diff --git a/.drone/docker-manifest.tmpl b/.drone/docker-manifest.tmpl deleted file mode 100644 index 32323adc382..00000000000 --- a/.drone/docker-manifest.tmpl +++ /dev/null @@ -1,13 +0,0 @@ -image: grafana/{{config.target}} -tags: - - latest - - {{build.tags}} -manifests: - - image: grafana/{{config.target}}:{{build.tags}}-amd64 - platform: - architecture: amd64 - os: linux - - image: grafana/{{config.target}}:{{build.tags}}-arm64 - platform: - architecture: arm64 - os: linux \ No newline at end of file diff --git a/.drone/drone.jsonnet b/.drone/drone.jsonnet deleted file mode 100644 index 1eb52962b56..00000000000 --- a/.drone/drone.jsonnet +++ /dev/null @@ -1,447 +0,0 @@ -local apps = ['tempo', 'tempo-vulture', 'tempo-query', 'tempo-cli']; -local archs = ['amd64', 'arm64']; - -//# Building blocks ## - -local pipeline(name, arch='amd64') = { - kind: 'pipeline', - name: name, - platform: { - os: 'linux', - arch: arch, - }, - steps: [], - depends_on: [], - trigger: { - ref: [ - 'refs/heads/main', - 'refs/tags/v*', - // weekly release branches - 'refs/heads/r?', - 'refs/heads/r??', - 'refs/heads/r???', - ], - }, -}; - -local secret(name, vault_path, vault_key) = { - kind: 'secret', - name: name, - get: { - path: vault_path, - name: vault_key, - }, -}; - -local docker_username_secret = secret('docker_username', 'infra/data/ci/docker_hub', 'username'); -local docker_password_secret = secret('docker_password', 'infra/data/ci/docker_hub', 'password'); - -// secrets for pushing serverless code packages -local image_upload_ops_tools_secret = secret('ops_tools_img_upload', 'infra/data/ci/tempo-ops-tools-function-upload', 'credentials.json'); - -// secret needed to access us.gcr.io in deploy_to_dev() -local docker_config_json_secret = secret('dockerconfigjson', 'secret/data/common/gcr', '.dockerconfigjson'); - -// secret needed for dep-tools -local gh_token_secret = secret('gh_token', 'infra/data/ci/github/grafanabot', 'pat'); -local tempo_app_id_secret = secret('tempo_app_id_secret', 'infra/data/ci/tempo/github-app', 'app-id'); -local tempo_app_installation_id_secret = secret('tempo_app_installation_id_secret', 'infra/data/ci/tempo/github-app', 'app-installation-id'); -local tempo_app_private_key_secret = secret('tempo_app_private_key_secret', 'infra/data/ci/tempo/github-app', 'app-private-key'); - -// secret to sign linux packages -local gpg_passphrase = secret('gpg_passphrase', 'infra/data/ci/packages-publish/gpg', 'passphrase'); -local gpg_private_key = secret('gpg_private_key', 'infra/data/ci/packages-publish/gpg', 'private-key'); - -local aws_dev_access_key_id = secret('AWS_ACCESS_KEY_ID-dev', 'infra/data/ci/tempo-dev/aws-credentials-drone', 'access_key_id'); -local aws_dev_secret_access_key = secret('AWS_SECRET_ACCESS_KEY-dev', 'infra/data/ci/tempo-dev/aws-credentials-drone', 'secret_access_key'); -local aws_prod_access_key_id = secret('AWS_ACCESS_KEY_ID-prod', 'infra/data/ci/tempo-prod/aws-credentials-drone', 'access_key_id'); -local aws_prod_secret_access_key = secret('AWS_SECRET_ACCESS_KEY-prod', 'infra/data/ci/tempo-prod/aws-credentials-drone', 'secret_access_key'); - -local aws_serverless_deployments = [ - { - env: 'dev', - bucket: 'dev-tempo-fn-source', - access_key_id: aws_dev_access_key_id.name, - secret_access_key: aws_dev_secret_access_key.name, - }, - { - env: 'prod', - bucket: 'prod-tempo-fn-source', - access_key_id: aws_prod_access_key_id.name, - secret_access_key: aws_prod_secret_access_key.name, - }, -]; - - -//# Steps ## - -// the alpine/git image has apk errors when run on aarch64, this is the most recent image that does not have this issue -// https://github.com/alpine-docker/git/issues/35 -local alpine_git_image = 'alpine/git:v2.30.2'; - -local image_tag(arch='') = { - name: 'image-tag', - image: alpine_git_image, - commands: [ - 'apk --update --no-cache add bash', - 'git fetch origin --tags', - ] + ( - if arch == '' then [ - 'echo $(./tools/image-tag) > .tags', - ] else [ - 'echo $(./tools/image-tag)-%s > .tags' % arch, - ] - ), -}; - -local image_tag_for_cd() = { - name: 'image-tag-for-cd', - image: alpine_git_image, - commands: [ - 'apk --update --no-cache add bash', - 'git fetch origin --tags', - 'echo "grafana/tempo:$(./tools/image-tag)" > .tags-for-cd-tempo', - 'echo "grafana/tempo-query:$(./tools/image-tag)" > .tags-for-cd-tempo_query', - 'echo "grafana/tempo-vulture:$(./tools/image-tag)" > .tags-for-cd-tempo_vulture', - ], -}; - -local build_binaries(arch) = { - name: 'build-tempo-binaries', - image: 'golang:1.23-alpine', - commands: [ - 'apk --update --no-cache add make git bash', - ] + [ - 'COMPONENT=%s GOARCH=%s make exe' % [app, arch] - for app in apps - ], -}; - -local docker_build(arch, app, dockerfile='') = { - name: 'build-%s-image' % app, - image: 'plugins/docker', - settings: { - dockerfile: if dockerfile != '' then dockerfile else 'cmd/%s/Dockerfile' % app, - repo: 'grafana/%s' % app, - username: { from_secret: docker_username_secret.name }, - password: { from_secret: docker_password_secret.name }, - platform: '%s/%s' % ['linux', arch], - build_args: [ - 'TARGETARCH=' + arch, - ], - }, -}; - -local docker_manifest(app) = { - name: 'manifest-%s' % app, - image: 'plugins/manifest:1.4.0', - settings: { - username: { from_secret: docker_username_secret.name }, - password: { from_secret: docker_password_secret.name }, - spec: '.drone/docker-manifest.tmpl', - target: app, - }, -}; - -local deploy_to_dev() = { - image: 'us.gcr.io/kubernetes-dev/drone/plugins/updater', - name: 'update-dev-images', - settings: { - config_json: std.manifestJsonEx( - { - destination_branch: 'master', - pull_request_branch_prefix: 'auto-merge/cd-tempo-dev', - pull_request_enabled: true, - pull_request_existing_strategy: "ignore", - repo_name: 'deployment_tools', - update_jsonnet_attribute_configs: [ - { - file_path: 'ksonnet/environments/tempo/dev-us-central-0.tempo-dev-01/images.libsonnet', - jsonnet_key: app, - jsonnet_value_file: '.tags-for-cd-' + app, - } - for app in ['tempo', 'tempo_query', 'tempo_vulture'] - ], - }, - ' ' - ), - github_app_id: { - from_secret: tempo_app_id_secret.name, - }, - github_app_installation_id: { - from_secret: tempo_app_installation_id_secret.name, - }, - github_app_private_key: { - from_secret: tempo_app_private_key_secret.name, - }, - }, -}; - -//# Pipelines & resources - -[ - // A pipeline to build Docker images for every app and for every arch - ( - pipeline('docker-' + arch, arch) { - steps+: [ - image_tag(arch), - build_binaries(arch), - ] + [ - docker_build(arch, app) - for app in apps - ], - } - ) - for arch in archs -] + [ - // Publish Docker manifests - pipeline('manifest') { - steps+: [ - image_tag(), - ] + [ - docker_manifest(app) - for app in apps - ], - depends_on+: [ - 'docker-%s' % arch - for arch in archs - ], - }, -] + [ - // Publish tools Docker manifests - ( - pipeline('docker-ci-tools-%s' % arch, arch) { - steps+: [ - image_tag(arch), - docker_build(arch, 'tempo-ci-tools', dockerfile='tools/Dockerfile'), - ], - } - ) - for arch in archs -] + [ - // Publish Docker manifests - pipeline('manifest-ci-tools') { - steps+: [ - image_tag(), - docker_manifest('tempo-ci-tools'), - ], - depends_on+: [ - 'docker-ci-tools-%s' % arch - for arch in archs - ], - }, -] + [ - // Continuously Deploy to dev env - pipeline('cd-to-dev-env') { - trigger: { - ref: [ - // always deploy tip of main to dev - 'refs/heads/main', - ], - }, - image_pull_secrets: [ - docker_config_json_secret.name, - ], - steps+: [ - image_tag_for_cd(), - ] + [ - deploy_to_dev(), - ], - depends_on+: [ - // wait for images to be published on dockerhub - 'manifest', - ], - }, -] + [ - // Build and deploy serverless code packages - pipeline('build-deploy-serverless') { - steps+: [ - { - name: 'build-tempo-serverless', - image: 'golang:1.23-alpine', - commands: [ - 'apk add make git zip bash', - './tools/image-tag | cut -d, -f 1 | tr A-Z a-z > .tags', // values in .tags are used by the next step when pushing the image - 'cd ./cmd/tempo-serverless', - 'make build-docker-gcr-binary', - 'make build-lambda-zip', - ], - }, - { - name: 'deploy-tempo-serverless-gcr', - image: 'plugins/gcr', - settings: { - repo: 'ops-tools-1203/tempo-serverless', - context: './cmd/tempo-serverless/cloud-run', - dockerfile: './cmd/tempo-serverless/cloud-run/Dockerfile', - json_key: { - from_secret: image_upload_ops_tools_secret.name, - }, - }, - }, - ] + - [ - { - name: 'deploy-tempo-%s-serverless-lambda' % d.env, - image: 'amazon/aws-cli', - environment: { - AWS_DEFAULT_REGION: 'us-east-2', - AWS_ACCESS_KEY_ID: { - from_secret: d.access_key_id, - }, - AWS_SECRET_ACCESS_KEY: { - from_secret: d.secret_access_key, - }, - }, - commands: [ - 'cd ./cmd/tempo-serverless/lambda', - 'aws s3 cp tempo-serverless*.zip s3://%s' % d.bucket, - ], - } - - for d in aws_serverless_deployments - ], - }, - - local ghTokenFilename = '/drone/src/gh-token.txt'; - // Build and release packages - // Tested by installing the packages on a systemd container - pipeline('release') { - trigger: { - event: ['tag', 'pull_request'], - }, - image_pull_secrets: [ - docker_config_json_secret.name, - ], - volumes+: [ - { - name: 'cgroup', - host: { - path: '/sys/fs/cgroup', - }, - }, - { - name: 'docker', - host: { - path: '/var/run/docker.sock', - }, - }, - ], - // Launch systemd containers to test the packages - services: [ - { - name: 'systemd-debian', - image: 'jrei/systemd-debian:12', - volumes: [ - { - name: 'cgroup', - path: '/sys/fs/cgroup', - }, - ], - privileged: true, - }, - { - name: 'systemd-centos', - image: 'jrei/systemd-centos:8', - volumes: [ - { - name: 'cgroup', - path: '/sys/fs/cgroup', - }, - ], - privileged: true, - }, - ], - steps+: [ - { - name: 'fetch', - image: 'docker:git', - commands: ['git fetch --tags'], - }, - { - name: 'Generate GitHub token', - image: 'us.gcr.io/kubernetes-dev/github-app-secret-writer:latest', - environment: { - GITHUB_APP_ID: { from_secret: tempo_app_id_secret.name }, - GITHUB_APP_INSTALLATION_ID: { from_secret: tempo_app_installation_id_secret.name }, - GITHUB_APP_PRIVATE_KEY: { from_secret: tempo_app_private_key_secret.name }, - }, - commands: [ - '/usr/bin/github-app-external-token > %s' % ghTokenFilename, - ], - }, - { - name: 'write-key', - image: 'golang:1.23', - commands: ['printf "%s" "$NFPM_SIGNING_KEY" > $NFPM_SIGNING_KEY_FILE'], - environment: { - NFPM_SIGNING_KEY: { from_secret: gpg_private_key.name }, - NFPM_SIGNING_KEY_FILE: '/drone/src/private-key.key', - }, - }, - { - name: 'test release', - image: 'golang:1.23', - commands: ['make release-snapshot'], - environment: { - NFPM_DEFAULT_PASSPHRASE: { from_secret: gpg_passphrase.name }, - NFPM_SIGNING_KEY_FILE: '/drone/src/private-key.key', - }, - }, - { - name: 'test deb package', - image: 'docker', - commands: ['./tools/packaging/verify-deb-install.sh'], - volumes: [ - { - name: 'docker', - path: '/var/run/docker.sock', - }, - ], - privileged: true, - }, - { - name: 'test rpm package', - image: 'docker', - commands: ['./tools/packaging/verify-rpm-install.sh'], - volumes: [ - { - name: 'docker', - path: '/var/run/docker.sock', - }, - ], - privileged: true, - }, - { - name: 'release', - image: 'golang:1.23', - commands: [ - 'export GITHUB_TOKEN=$(cat %s)' % ghTokenFilename, - 'make release' - ], - environment: { - NFPM_DEFAULT_PASSPHRASE: { from_secret: gpg_passphrase.name }, - NFPM_SIGNING_KEY_FILE: '/drone/src/private-key.key', - }, - when: { - event: ['tag'], - }, - }, - ], - }, -] + [ - docker_username_secret, - docker_password_secret, - docker_config_json_secret, - gh_token_secret, - tempo_app_id_secret, - tempo_app_installation_id_secret, - tempo_app_private_key_secret, - image_upload_ops_tools_secret, - aws_dev_access_key_id, - aws_dev_secret_access_key, - aws_prod_access_key_id, - aws_prod_secret_access_key, - gpg_private_key, - gpg_passphrase, -] diff --git a/.drone/drone.yml b/.drone/drone.yml deleted file mode 100644 index 2cf4926f943..00000000000 --- a/.drone/drone.yml +++ /dev/null @@ -1,602 +0,0 @@ ---- -depends_on: [] -kind: pipeline -name: docker-amd64 -platform: - arch: amd64 - os: linux -steps: -- commands: - - apk --update --no-cache add bash - - git fetch origin --tags - - echo $(./tools/image-tag)-amd64 > .tags - image: alpine/git:v2.30.2 - name: image-tag -- commands: - - apk --update --no-cache add make git bash - - COMPONENT=tempo GOARCH=amd64 make exe - - COMPONENT=tempo-vulture GOARCH=amd64 make exe - - COMPONENT=tempo-query GOARCH=amd64 make exe - - COMPONENT=tempo-cli GOARCH=amd64 make exe - image: golang:1.23-alpine - name: build-tempo-binaries -- image: plugins/docker - name: build-tempo-image - settings: - build_args: - - TARGETARCH=amd64 - dockerfile: cmd/tempo/Dockerfile - password: - from_secret: docker_password - platform: linux/amd64 - repo: grafana/tempo - username: - from_secret: docker_username -- image: plugins/docker - name: build-tempo-vulture-image - settings: - build_args: - - TARGETARCH=amd64 - dockerfile: cmd/tempo-vulture/Dockerfile - password: - from_secret: docker_password - platform: linux/amd64 - repo: grafana/tempo-vulture - username: - from_secret: docker_username -- image: plugins/docker - name: build-tempo-query-image - settings: - build_args: - - TARGETARCH=amd64 - dockerfile: cmd/tempo-query/Dockerfile - password: - from_secret: docker_password - platform: linux/amd64 - repo: grafana/tempo-query - username: - from_secret: docker_username -- image: plugins/docker - name: build-tempo-cli-image - settings: - build_args: - - TARGETARCH=amd64 - dockerfile: cmd/tempo-cli/Dockerfile - password: - from_secret: docker_password - platform: linux/amd64 - repo: grafana/tempo-cli - username: - from_secret: docker_username -trigger: - ref: - - refs/heads/main - - refs/tags/v* - - refs/heads/r? - - refs/heads/r?? - - refs/heads/r??? ---- -depends_on: [] -kind: pipeline -name: docker-arm64 -platform: - arch: arm64 - os: linux -steps: -- commands: - - apk --update --no-cache add bash - - git fetch origin --tags - - echo $(./tools/image-tag)-arm64 > .tags - image: alpine/git:v2.30.2 - name: image-tag -- commands: - - apk --update --no-cache add make git bash - - COMPONENT=tempo GOARCH=arm64 make exe - - COMPONENT=tempo-vulture GOARCH=arm64 make exe - - COMPONENT=tempo-query GOARCH=arm64 make exe - - COMPONENT=tempo-cli GOARCH=arm64 make exe - image: golang:1.23-alpine - name: build-tempo-binaries -- image: plugins/docker - name: build-tempo-image - settings: - build_args: - - TARGETARCH=arm64 - dockerfile: cmd/tempo/Dockerfile - password: - from_secret: docker_password - platform: linux/arm64 - repo: grafana/tempo - username: - from_secret: docker_username -- image: plugins/docker - name: build-tempo-vulture-image - settings: - build_args: - - TARGETARCH=arm64 - dockerfile: cmd/tempo-vulture/Dockerfile - password: - from_secret: docker_password - platform: linux/arm64 - repo: grafana/tempo-vulture - username: - from_secret: docker_username -- image: plugins/docker - name: build-tempo-query-image - settings: - build_args: - - TARGETARCH=arm64 - dockerfile: cmd/tempo-query/Dockerfile - password: - from_secret: docker_password - platform: linux/arm64 - repo: grafana/tempo-query - username: - from_secret: docker_username -- image: plugins/docker - name: build-tempo-cli-image - settings: - build_args: - - TARGETARCH=arm64 - dockerfile: cmd/tempo-cli/Dockerfile - password: - from_secret: docker_password - platform: linux/arm64 - repo: grafana/tempo-cli - username: - from_secret: docker_username -trigger: - ref: - - refs/heads/main - - refs/tags/v* - - refs/heads/r? - - refs/heads/r?? - - refs/heads/r??? ---- -depends_on: -- docker-amd64 -- docker-arm64 -kind: pipeline -name: manifest -platform: - arch: amd64 - os: linux -steps: -- commands: - - apk --update --no-cache add bash - - git fetch origin --tags - - echo $(./tools/image-tag) > .tags - image: alpine/git:v2.30.2 - name: image-tag -- image: plugins/manifest:1.4.0 - name: manifest-tempo - settings: - password: - from_secret: docker_password - spec: .drone/docker-manifest.tmpl - target: tempo - username: - from_secret: docker_username -- image: plugins/manifest:1.4.0 - name: manifest-tempo-vulture - settings: - password: - from_secret: docker_password - spec: .drone/docker-manifest.tmpl - target: tempo-vulture - username: - from_secret: docker_username -- image: plugins/manifest:1.4.0 - name: manifest-tempo-query - settings: - password: - from_secret: docker_password - spec: .drone/docker-manifest.tmpl - target: tempo-query - username: - from_secret: docker_username -- image: plugins/manifest:1.4.0 - name: manifest-tempo-cli - settings: - password: - from_secret: docker_password - spec: .drone/docker-manifest.tmpl - target: tempo-cli - username: - from_secret: docker_username -trigger: - ref: - - refs/heads/main - - refs/tags/v* - - refs/heads/r? - - refs/heads/r?? - - refs/heads/r??? ---- -depends_on: [] -kind: pipeline -name: docker-ci-tools-amd64 -platform: - arch: amd64 - os: linux -steps: -- commands: - - apk --update --no-cache add bash - - git fetch origin --tags - - echo $(./tools/image-tag)-amd64 > .tags - image: alpine/git:v2.30.2 - name: image-tag -- image: plugins/docker - name: build-tempo-ci-tools-image - settings: - build_args: - - TARGETARCH=amd64 - dockerfile: tools/Dockerfile - password: - from_secret: docker_password - platform: linux/amd64 - repo: grafana/tempo-ci-tools - username: - from_secret: docker_username -trigger: - ref: - - refs/heads/main - - refs/tags/v* - - refs/heads/r? - - refs/heads/r?? - - refs/heads/r??? ---- -depends_on: [] -kind: pipeline -name: docker-ci-tools-arm64 -platform: - arch: arm64 - os: linux -steps: -- commands: - - apk --update --no-cache add bash - - git fetch origin --tags - - echo $(./tools/image-tag)-arm64 > .tags - image: alpine/git:v2.30.2 - name: image-tag -- image: plugins/docker - name: build-tempo-ci-tools-image - settings: - build_args: - - TARGETARCH=arm64 - dockerfile: tools/Dockerfile - password: - from_secret: docker_password - platform: linux/arm64 - repo: grafana/tempo-ci-tools - username: - from_secret: docker_username -trigger: - ref: - - refs/heads/main - - refs/tags/v* - - refs/heads/r? - - refs/heads/r?? - - refs/heads/r??? ---- -depends_on: -- docker-ci-tools-amd64 -- docker-ci-tools-arm64 -kind: pipeline -name: manifest-ci-tools -platform: - arch: amd64 - os: linux -steps: -- commands: - - apk --update --no-cache add bash - - git fetch origin --tags - - echo $(./tools/image-tag) > .tags - image: alpine/git:v2.30.2 - name: image-tag -- image: plugins/manifest:1.4.0 - name: manifest-tempo-ci-tools - settings: - password: - from_secret: docker_password - spec: .drone/docker-manifest.tmpl - target: tempo-ci-tools - username: - from_secret: docker_username -trigger: - ref: - - refs/heads/main - - refs/tags/v* - - refs/heads/r? - - refs/heads/r?? - - refs/heads/r??? ---- -depends_on: -- manifest -image_pull_secrets: -- dockerconfigjson -kind: pipeline -name: cd-to-dev-env -platform: - arch: amd64 - os: linux -steps: -- commands: - - apk --update --no-cache add bash - - git fetch origin --tags - - echo "grafana/tempo:$(./tools/image-tag)" > .tags-for-cd-tempo - - echo "grafana/tempo-query:$(./tools/image-tag)" > .tags-for-cd-tempo_query - - echo "grafana/tempo-vulture:$(./tools/image-tag)" > .tags-for-cd-tempo_vulture - image: alpine/git:v2.30.2 - name: image-tag-for-cd -- image: us.gcr.io/kubernetes-dev/drone/plugins/updater - name: update-dev-images - settings: - config_json: |- - { - "destination_branch": "master", - "pull_request_branch_prefix": "auto-merge/cd-tempo-dev", - "pull_request_enabled": true, - "pull_request_existing_strategy": "ignore", - "repo_name": "deployment_tools", - "update_jsonnet_attribute_configs": [ - { - "file_path": "ksonnet/environments/tempo/dev-us-central-0.tempo-dev-01/images.libsonnet", - "jsonnet_key": "tempo", - "jsonnet_value_file": ".tags-for-cd-tempo" - }, - { - "file_path": "ksonnet/environments/tempo/dev-us-central-0.tempo-dev-01/images.libsonnet", - "jsonnet_key": "tempo_query", - "jsonnet_value_file": ".tags-for-cd-tempo_query" - }, - { - "file_path": "ksonnet/environments/tempo/dev-us-central-0.tempo-dev-01/images.libsonnet", - "jsonnet_key": "tempo_vulture", - "jsonnet_value_file": ".tags-for-cd-tempo_vulture" - } - ] - } - github_app_id: - from_secret: tempo_app_id_secret - github_app_installation_id: - from_secret: tempo_app_installation_id_secret - github_app_private_key: - from_secret: tempo_app_private_key_secret -trigger: - ref: - - refs/heads/main ---- -depends_on: [] -kind: pipeline -name: build-deploy-serverless -platform: - arch: amd64 - os: linux -steps: -- commands: - - apk add make git zip bash - - ./tools/image-tag | cut -d, -f 1 | tr A-Z a-z > .tags - - cd ./cmd/tempo-serverless - - make build-docker-gcr-binary - - make build-lambda-zip - image: golang:1.23-alpine - name: build-tempo-serverless -- image: plugins/gcr - name: deploy-tempo-serverless-gcr - settings: - context: ./cmd/tempo-serverless/cloud-run - dockerfile: ./cmd/tempo-serverless/cloud-run/Dockerfile - json_key: - from_secret: ops_tools_img_upload - repo: ops-tools-1203/tempo-serverless -- commands: - - cd ./cmd/tempo-serverless/lambda - - aws s3 cp tempo-serverless*.zip s3://dev-tempo-fn-source - environment: - AWS_ACCESS_KEY_ID: - from_secret: AWS_ACCESS_KEY_ID-dev - AWS_DEFAULT_REGION: us-east-2 - AWS_SECRET_ACCESS_KEY: - from_secret: AWS_SECRET_ACCESS_KEY-dev - image: amazon/aws-cli - name: deploy-tempo-dev-serverless-lambda -- commands: - - cd ./cmd/tempo-serverless/lambda - - aws s3 cp tempo-serverless*.zip s3://prod-tempo-fn-source - environment: - AWS_ACCESS_KEY_ID: - from_secret: AWS_ACCESS_KEY_ID-prod - AWS_DEFAULT_REGION: us-east-2 - AWS_SECRET_ACCESS_KEY: - from_secret: AWS_SECRET_ACCESS_KEY-prod - image: amazon/aws-cli - name: deploy-tempo-prod-serverless-lambda -trigger: - ref: - - refs/heads/main - - refs/tags/v* - - refs/heads/r? - - refs/heads/r?? - - refs/heads/r??? ---- -depends_on: [] -image_pull_secrets: -- dockerconfigjson -kind: pipeline -name: release -platform: - arch: amd64 - os: linux -services: -- image: jrei/systemd-debian:12 - name: systemd-debian - privileged: true - volumes: - - name: cgroup - path: /sys/fs/cgroup -- image: jrei/systemd-centos:8 - name: systemd-centos - privileged: true - volumes: - - name: cgroup - path: /sys/fs/cgroup -steps: -- commands: - - git fetch --tags - image: docker:git - name: fetch -- commands: - - /usr/bin/github-app-external-token > /drone/src/gh-token.txt - environment: - GITHUB_APP_ID: - from_secret: tempo_app_id_secret - GITHUB_APP_INSTALLATION_ID: - from_secret: tempo_app_installation_id_secret - GITHUB_APP_PRIVATE_KEY: - from_secret: tempo_app_private_key_secret - image: us.gcr.io/kubernetes-dev/github-app-secret-writer:latest - name: Generate GitHub token -- commands: - - printf "%s" "$NFPM_SIGNING_KEY" > $NFPM_SIGNING_KEY_FILE - environment: - NFPM_SIGNING_KEY: - from_secret: gpg_private_key - NFPM_SIGNING_KEY_FILE: /drone/src/private-key.key - image: golang:1.23 - name: write-key -- commands: - - make release-snapshot - environment: - NFPM_DEFAULT_PASSPHRASE: - from_secret: gpg_passphrase - NFPM_SIGNING_KEY_FILE: /drone/src/private-key.key - image: golang:1.23 - name: test release -- commands: - - ./tools/packaging/verify-deb-install.sh - image: docker - name: test deb package - privileged: true - volumes: - - name: docker - path: /var/run/docker.sock -- commands: - - ./tools/packaging/verify-rpm-install.sh - image: docker - name: test rpm package - privileged: true - volumes: - - name: docker - path: /var/run/docker.sock -- commands: - - export GITHUB_TOKEN=$(cat /drone/src/gh-token.txt) - - make release - environment: - NFPM_DEFAULT_PASSPHRASE: - from_secret: gpg_passphrase - NFPM_SIGNING_KEY_FILE: /drone/src/private-key.key - image: golang:1.23 - name: release - when: - event: - - tag -trigger: - event: - - tag - - pull_request -volumes: -- host: - path: /sys/fs/cgroup - name: cgroup -- host: - path: /var/run/docker.sock - name: docker ---- -get: - name: username - path: infra/data/ci/docker_hub -kind: secret -name: docker_username ---- -get: - name: password - path: infra/data/ci/docker_hub -kind: secret -name: docker_password ---- -get: - name: .dockerconfigjson - path: secret/data/common/gcr -kind: secret -name: dockerconfigjson ---- -get: - name: pat - path: infra/data/ci/github/grafanabot -kind: secret -name: gh_token ---- -get: - name: app-id - path: infra/data/ci/tempo/github-app -kind: secret -name: tempo_app_id_secret ---- -get: - name: app-installation-id - path: infra/data/ci/tempo/github-app -kind: secret -name: tempo_app_installation_id_secret ---- -get: - name: app-private-key - path: infra/data/ci/tempo/github-app -kind: secret -name: tempo_app_private_key_secret ---- -get: - name: credentials.json - path: infra/data/ci/tempo-ops-tools-function-upload -kind: secret -name: ops_tools_img_upload ---- -get: - name: access_key_id - path: infra/data/ci/tempo-dev/aws-credentials-drone -kind: secret -name: AWS_ACCESS_KEY_ID-dev ---- -get: - name: secret_access_key - path: infra/data/ci/tempo-dev/aws-credentials-drone -kind: secret -name: AWS_SECRET_ACCESS_KEY-dev ---- -get: - name: access_key_id - path: infra/data/ci/tempo-prod/aws-credentials-drone -kind: secret -name: AWS_ACCESS_KEY_ID-prod ---- -get: - name: secret_access_key - path: infra/data/ci/tempo-prod/aws-credentials-drone -kind: secret -name: AWS_SECRET_ACCESS_KEY-prod ---- -get: - name: private-key - path: infra/data/ci/packages-publish/gpg -kind: secret -name: gpg_private_key ---- -get: - name: passphrase - path: infra/data/ci/packages-publish/gpg -kind: secret -name: gpg_passphrase ---- -kind: signature -hmac: 0265cd585d8c7fc444bebc8aa1164ec6aa7893c2aa16f3beb61503102b00a798 - -... diff --git a/.drone/readme.md b/.drone/readme.md deleted file mode 100644 index b322e7a2fc9..00000000000 --- a/.drone/readme.md +++ /dev/null @@ -1,49 +0,0 @@ -# Drone - -Drone is used for building our official dockerhub images. It is broken into 3 -pipelines. Note that none of the pipelines include testing so it's important that -the codebase is otherwise tested when it begins this process. Currently we use GitHub -Actions for testing every PR and only build the main branch, tags and weekly release -branches (`r**`). - -# Pipelines - -The pipelines are `docker-amd64`, `docker-arm64`, and `manifest`. The two docker pipelines -run concurrently and create images tagged like `tempo:-` or `tempo:--`. -E.g. `tempo:1.1.0-arm64` or `tempo:main-e2a314-amd64`. The manifest step then creates a manifest -that combines the mentioned images into one multiarch image named as you would expect: -`tempo:1.1.0` or `tempo:main-e2a314`. - -The documentation on the manifest step is basically non-existent. There's some very -weak documentation in the Drone docs, but it's not even worth looking at. To understand -how to use the manifest step I'd recommend looking at the code itself: - -https://github.com/drone-plugins/drone-manifest - -It is a very simple wrapper that takes the configuration options and runs the following -cli tool: - -https://github.com/estesp/manifest-tool - -[`docker-manifest.tmpl`](./docker-manifest.tmpl) is pushed through the standard go templating library with access -to these objects: https://github.com/drone-plugins/drone-manifest/blob/master/plugin.go#L23 - -# Updating drone.yml - -`drone.yml` is generated based upon `drone.jsonnet`. To change the Drone pipelines edit -`drone.jsonnet` and run: - -``` -make drone -``` - -# Signature - -`drone.yml` contains a signature that can only be generated with an access token from the Grafana -Drone server. If you do not have an access token the last step of `make drone` will fail. Feel free -to still submit a PR, a Tempo maintainer can update the signature before merging the PR. To regenerate -the signature run: - -``` -make drone-signature -``` \ No newline at end of file diff --git a/.github/workflows/add-to-docs-project.yml b/.github/workflows/add-to-docs-project.yml index f5708738066..bf97c20b836 100644 --- a/.github/workflows/add-to-docs-project.yml +++ b/.github/workflows/add-to-docs-project.yml @@ -10,6 +10,6 @@ jobs: permissions: contents: read id-token: write - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - uses: grafana/writers-toolkit/add-to-docs-project@add-to-docs-project/v1 diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index 3504d609613..ef2441859cb 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -13,7 +13,7 @@ on: jobs: main: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - name: Checkout Actions uses: actions/checkout@v4 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e9766f45700..2156d5abdc0 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -19,8 +19,9 @@ concurrency: jobs: lint: name: Lint - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: + - name: Check out code uses: actions/checkout@v4 with: @@ -29,18 +30,32 @@ jobs: uses: actions/setup-go@v5 with: go-version-file: "go.mod" + - name: check-fmt run: make check-fmt - name: check-jsonnetfmt run: make check-jsonnetfmt + - name: Get year and week number + id: get-year-week-number + run: echo "date=$(date +"%Yweek%U")" >> $GITHUB_OUTPUT + + - name: cache golangci + uses: actions/cache@v4 + with: + path: .cache/golangci-lint + key: golangci-lint-${{ runner.os }}-${{ steps.get-year-week-number.outputs.date }}-${{ hashFiles('go.mod', '.golangci.yml') }} + - name: lint - run: make lint base=origin/${{github.base_ref}} + run: | + make lint base=origin/${{github.base_ref}} + sudo chown -R $(id -u):$(id -g) .cache/golangci-lint # needed to archive cache + unit-tests: name: Run Unit Tests - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 strategy: matrix: test-target: @@ -65,7 +80,7 @@ jobs: integration-tests: name: Run integration tests - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 strategy: matrix: test-target: @@ -73,7 +88,6 @@ jobs: test-e2e, test-integration-poller, test-e2e-deployments, - test-e2e-serverless, ] steps: @@ -90,7 +104,7 @@ jobs: build: name: Build - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - name: Check out code uses: actions/checkout@v4 @@ -103,6 +117,9 @@ jobs: - name: Build Tempo run: make tempo + - name: generate-manifest + run: make generate-manifest + - name: Build tempo-query run: make tempo-query @@ -114,7 +131,7 @@ jobs: benchmark: name: Benchmark - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - name: Check out code uses: actions/checkout@v4 @@ -129,7 +146,7 @@ jobs: vendor-check: name: Vendor check - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - name: Check out code uses: actions/checkout@v4 @@ -147,7 +164,7 @@ jobs: tempo-jsonnet: name: Check jsonnet & tempo-mixin - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - name: Check out code uses: actions/checkout@v4 @@ -168,7 +185,7 @@ jobs: build-technical-documentation: name: Build technical documentation - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - name: Check out code uses: actions/checkout@v4 diff --git a/.github/workflows/dependabot_serverless_gomod.yml b/.github/workflows/dependabot_serverless_gomod.yml deleted file mode 100644 index aaedaf4ddd8..00000000000 --- a/.github/workflows/dependabot_serverless_gomod.yml +++ /dev/null @@ -1,56 +0,0 @@ -name: Dependabot -on: pull_request -permissions: - contents: write - -# NOTE: For this job, we are unable to use the GITHUB_TOKEN from the action -# itself. The reason for this is that the tokens handed out for actions dont' -# contain the necessary permissions to kick off workflows in the case a new -# commit is added, as is the case here. For this job, we use the GH_BOT token -# secret in the repo, which has the necessary permissions. This allows the -# final commit+push on this job to execute the rest of the workflow required by -# the PR build configuration. - -jobs: - dependabot: - name: Serverless gomod update - runs-on: ubuntu-latest - if: ${{ github.event.pull_request.user.login == 'dependabot[bot]' }} - steps: - - name: Set up Go 1.23 - uses: actions/setup-go@v5 - with: - go-version: 1.23.3 - - - name: Get app token - uses: actions/create-github-app-token@v1 - id: get-github-app-token - with: - app-id: ${{secrets.APP_ID}} - private-key: ${{secrets.APP_PRIVATE_KEY}} - owner: ${{ github.repository_owner }} - - - name: Check out code - uses: actions/checkout@v4 - with: - ref: ${{ github.head_ref }} - token: ${{ steps.get-github-app-token.outputs.token }} - - - name: Update serverless gomod - run: make -C cmd/tempo-serverless update-mod - - - name: Commit serverless gomod changes - env: - USER: x-access-token - TOKEN: ${{ steps.get-github-app-token.outputs.token }} - run: | - git config --global url."https://${USER}:${TOKEN}@github.com/grafana/tempo".insteadOf "https://github.com/grafana/tempo" - git add cmd/tempo-serverless/lambda/go.mod - git add cmd/tempo-serverless/lambda/go.sum - git add cmd/tempo-serverless/cloud-run/go.mod - git add cmd/tempo-serverless/cloud-run/go.sum - git diff --quiet --staged || git commit -m 'Update serverless gomod' - - - name: Push changes - run: | - git push origin ${{ github.head_ref }} diff --git a/.github/workflows/doc-validator.yml b/.github/workflows/doc-validator.yml deleted file mode 100644 index 36c62ceaba8..00000000000 --- a/.github/workflows/doc-validator.yml +++ /dev/null @@ -1,34 +0,0 @@ -name: "doc-validator" -on: - pull_request: - paths: ["docs/sources/**"] - workflow_dispatch: -jobs: - doc-validator: - runs-on: "ubuntu-latest" - container: - image: "grafana/doc-validator:v5.2.0" - steps: - - name: "Checkout code" - uses: "actions/checkout@v4" - - name: "Run doc-validator" - run: | - rm -rf /hugo/content/docs - mkdir -p /hugo/content/docs/tempo - mkdir -p /hugo/content/docs/helm-charts/tempo-distributed - mv "$(pwd)/docs/sources/tempo" /hugo/content/docs/tempo/latest - mv "$(pwd)/docs/sources/helm-charts/tempo-distributed" /hugo/content/docs/helm-charts/tempo-distributed/next - doc-validator \ - '--skip-checks=^image' \ - /hugo/content/docs \ - /docs/tempo/latest \ - | sed -E s,/hugo/content/docs/tempo/latest,docs/sources/tempo,g \ - | sed -E s,/hugo/content/docs/helm-charts/tempo-distributed/next,/docs/sources/helm-charts/tempo-distributed,g \ - | reviewdog \ - -f=rdjsonl \ - --fail-on-error \ - --filter-mode=nofilter \ - --name=doc-validator \ - --reporter=github-pr-review - env: - REVIEWDOG_GITHUB_API_TOKEN: "${{ secrets.GITHUB_TOKEN }}" diff --git a/.github/workflows/docker-ci-tools.yml b/.github/workflows/docker-ci-tools.yml new file mode 100644 index 00000000000..ff10d32feea --- /dev/null +++ b/.github/workflows/docker-ci-tools.yml @@ -0,0 +1,80 @@ +name: docker-ci-tools +on: + push: + # If you define both branches/branches-ignore and paths/paths-ignore, the workflow will only run when both filters are satisfied. + branches: + - 'main' + paths: + - 'tools/**' + +env: + IMAGE_NAME: grafana/tempo-ci-tools + +# Needed to login to DockerHub +permissions: + contents: read + id-token: write + +jobs: + + get-image-tag: + if: github.repository == 'grafana/tempo' # skip in forks + runs-on: ubuntu-24.04 + outputs: + tag: ${{ steps.get-tag.outputs.tag }} + steps: + - name: Checkout + uses: actions/checkout@v4 + + - id: get-tag + run: | + echo "tag=$(./tools/image-tag)" >> "$GITHUB_OUTPUT" + + docker-ci-tools: + if: github.repository == 'grafana/tempo' + needs: get-image-tag + strategy: + matrix: + runner_arch: [ { runner: ubuntu-24.04, arch: amd64 }, { runner: github-hosted-ubuntu-arm64, arch: arm64 } ] + runs-on: ${{ matrix.runner_arch.runner }} + env: + TAG: ${{ needs.get-image-tag.outputs.tag }} + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Login to DockerHub + uses: grafana/shared-workflows/actions/dockerhub-login@dockerhub-login-v1.0 + + - name: docker-build-and-push + run: | + TAG_ARCH="$TAG-${{ matrix.runner_arch.arch }}" + docker build -f tools/Dockerfile -t $IMAGE_NAME:$TAG_ARCH . + docker push $IMAGE_NAME:$TAG_ARCH + + manifest: + if: github.repository == 'grafana/tempo' + needs: ['get-image-tag', 'docker-ci-tools'] + runs-on: ubuntu-latest + env: + TAG: ${{ needs.get-image-tag.outputs.tag }} + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Login to DockerHub + uses: grafana/shared-workflows/actions/dockerhub-login@dockerhub-login-v1.0 + + - name: docker-manifest-create-and-push + run: | + docker manifest create \ + $IMAGE_NAME:$TAG \ + --amend $IMAGE_NAME:$TAG-amd64 \ + --amend $IMAGE_NAME:$TAG-arm64 + docker manifest push $IMAGE_NAME:$TAG + + docker manifest create \ + $IMAGE_NAME:latest \ + --amend $IMAGE_NAME:$TAG-amd64 \ + --amend $IMAGE_NAME:$TAG-arm64 + docker manifest push $IMAGE_NAME:latest diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml new file mode 100644 index 00000000000..330b1228c48 --- /dev/null +++ b/.github/workflows/docker.yml @@ -0,0 +1,169 @@ +name: docker +on: + push: + branches: + - 'main' + - 'r[0-9]+' + tags: + - 'v*' + +# Needed to login to DockerHub +permissions: + contents: read + id-token: write + +jobs: + + get-tag: + if: github.repository == 'grafana/tempo' # skip in forks + runs-on: ubuntu-24.04 + outputs: + tag: ${{ steps.get-tag.outputs.tag }} + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: fetch tags + run: git fetch --tags + + - id: get-tag + run: | + echo "tag=$(./tools/image-tag)" >> "$GITHUB_OUTPUT" + + docker: + if: github.repository == 'grafana/tempo' + needs: get-tag + strategy: + matrix: + component: [ tempo, tempo-vulture, tempo-query, tempo-cli ] + runner_arch: [ { runner: ubuntu-24.04, arch: amd64 }, { runner: github-hosted-ubuntu-arm64, arch: arm64 } ] + runs-on: ${{ matrix.runner_arch.runner }} + env: + TAG: ${{ needs.get-tag.outputs.tag }} + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: fetch tags + run: git fetch --tags + + - name: build-tempo-binaries + run: | + COMPONENT=${{ matrix.component }} GOARCH=${{ matrix.runner_arch.arch }} make exe + + - name: docker-build + run: | + TAG_ARCH="$TAG-${{ matrix.runner_arch.arch }}" + docker build -f cmd/${{ matrix.component }}/Dockerfile -t grafana/${{ matrix.component }}:$TAG_ARCH . + + - name: Login to DockerHub + uses: grafana/shared-workflows/actions/dockerhub-login@dockerhub-login-v1.0 + + - name: docker-push + run: | + TAG_ARCH="$TAG-${{ matrix.runner_arch.arch }}" + docker push grafana/${{ matrix.component }}:$TAG_ARCH + + manifest: + if: github.repository == 'grafana/tempo' + needs: ['get-tag', 'docker'] + strategy: + matrix: + component: [ tempo, tempo-vulture, tempo-query, tempo-cli ] + runs-on: ubuntu-24.04 + env: + TAG: ${{ needs.get-tag.outputs.tag }} + IMAGE_NAME: grafana/${{ matrix.component }} + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Login to DockerHub + uses: grafana/shared-workflows/actions/dockerhub-login@dockerhub-login-v1.0 + + - name: docker-manifest-create-and-push + run: | + docker manifest create \ + $IMAGE_NAME:$TAG \ + --amend $IMAGE_NAME:$TAG-amd64 \ + --amend $IMAGE_NAME:$TAG-arm64 + docker manifest push $IMAGE_NAME:$TAG + + docker manifest create \ + $IMAGE_NAME:latest \ + --amend $IMAGE_NAME:$TAG-amd64 \ + --amend $IMAGE_NAME:$TAG-arm64 + docker manifest push $IMAGE_NAME:latest + + cd-to-dev-env: + # This job deploys the latest main commit to the dev environment + if: github.repository == 'grafana/tempo' && github.ref == 'refs/heads/main' + runs-on: ubuntu-24.04 + needs: manifest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: fetch tags + run: git fetch --tags + + - name: get-tag + run: | + echo "grafana/tempo:$(./tools/image-tag)" > tags_for_cd_tempo + echo "grafana/tempo-query:$(./tools/image-tag)" > tags_for_cd_tempo_query + echo "grafana/tempo-vulture:$(./tools/image-tag)" > tags_for_cd_tempo_vulture + + - name: Authenticate to GAR + uses: grafana/shared-workflows/actions/login-to-gar@main + id: login-to-gar + with: + registry: us-docker.pkg.dev + environment: prod + + - name: Get Vault secrets + uses: grafana/shared-workflows/actions/get-vault-secrets@main + with: + common_secrets: | + GITHUB_APP_ID=updater-app:app-id + GITHUB_APP_INSTALLATION_ID=updater-app:app-installation-id + GITHUB_APP_PRIVATE_KEY=updater-app:private-key + + - name: Update jsonnet + run: | + set -e -o pipefail + + cat << EOF > config.json + { + "destination_branch": "master", + "pull_request_branch_prefix": "auto-merge/cd-tempo-dev", + "pull_request_enabled": true, + "pull_request_existing_strategy": "ignore", + "repo_name": "deployment_tools", + "update_jsonnet_attribute_configs": [ + { + "file_path": "ksonnet/environments/tempo/dev-us-central-0.tempo-dev-01/images.libsonnet", + "jsonnet_key": "tempo", + "jsonnet_value_file": "tags_for_cd_tempo" + }, + { + "file_path": "ksonnet/environments/tempo/dev-us-central-0.tempo-dev-01/images.libsonnet", + "jsonnet_key": "tempo_query", + "jsonnet_value_file": "tags_for_cd_tempo_query" + }, + { + "file_path": "ksonnet/environments/tempo/dev-us-central-0.tempo-dev-01/images.libsonnet", + "jsonnet_key": "tempo_vulture", + "jsonnet_value_file": "tags_for_cd_tempo_vulture" + } + ] + } + EOF + + docker run --rm \ + -e GITHUB_APP_ID \ + -e GITHUB_APP_INSTALLATION_ID \ + -e GITHUB_APP_PRIVATE_KEY \ + -e CONFIG_JSON="$(cat config.json)" \ + -v ./tags_for_cd_tempo:/app/tags_for_cd_tempo \ + -v ./tags_for_cd_tempo_query:/app/tags_for_cd_tempo_query \ + -v ./tags_for_cd_tempo_vulture:/app/tags_for_cd_tempo_vulture us-docker.pkg.dev/grafanalabs-global/docker-deployment-tools-prod/updater diff --git a/.github/workflows/drone-signature-check.yml b/.github/workflows/drone-signature-check.yml deleted file mode 100644 index 6c07976761e..00000000000 --- a/.github/workflows/drone-signature-check.yml +++ /dev/null @@ -1,17 +0,0 @@ -name: Check Drone CI Signature - -on: - push: - branches: - - "main" - paths: - - ".drone/drone.yml" - pull_request: - paths: - - ".drone/drone.yml" - -jobs: - drone-signature-check: - uses: grafana/shared-workflows/.github/workflows/check-drone-signature.yaml@main - with: - drone_config_path: .drone/drone.yml diff --git a/.github/workflows/metrics-collector.yml b/.github/workflows/metrics-collector.yml index 7c52ff4185d..174be165c2a 100644 --- a/.github/workflows/metrics-collector.yml +++ b/.github/workflows/metrics-collector.yml @@ -8,7 +8,7 @@ jobs: # this action keeps failing in all forks, only run in grafana/tempo. # stats collection action is only useful in main repo. if: github.repository == 'grafana/tempo' - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - name: Get app token uses: actions/create-github-app-token@v1 diff --git a/.github/workflows/milestoned_to_project.yml b/.github/workflows/milestoned_to_project.yml index 2198b8c24b2..e88b02f092e 100644 --- a/.github/workflows/milestoned_to_project.yml +++ b/.github/workflows/milestoned_to_project.yml @@ -9,7 +9,7 @@ jobs: build: # only run in grafana/tempo. if: github.repository == 'grafana/tempo' - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - name: Get app token diff --git a/.github/workflows/publish-technical-documentation-next.yml b/.github/workflows/publish-technical-documentation-next.yml index bc262616d62..2c283dc9991 100644 --- a/.github/workflows/publish-technical-documentation-next.yml +++ b/.github/workflows/publish-technical-documentation-next.yml @@ -13,7 +13,7 @@ jobs: permissions: contents: read id-token: write - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - uses: actions/checkout@v4 - uses: grafana/writers-toolkit/publish-technical-documentation@publish-technical-documentation/v1 diff --git a/.github/workflows/publish-technical-documentation-release.yml b/.github/workflows/publish-technical-documentation-release.yml index 334ca5ade5c..369ad27c74d 100644 --- a/.github/workflows/publish-technical-documentation-release.yml +++ b/.github/workflows/publish-technical-documentation-release.yml @@ -15,7 +15,7 @@ jobs: permissions: contents: read id-token: write - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - uses: actions/checkout@v4 with: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 00000000000..35a66d871b2 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,62 @@ +name: release +on: + push: + tags: + - 'v*' + +# Needed to login to DockerHub +permissions: + contents: read + id-token: write + +jobs: + + release: + if: github.repository == 'grafana/tempo' # skip in forks + runs-on: ubuntu-24.04 + env: + NFPM_SIGNING_KEY_FILE: /tmp/nfpm-private-key.key + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: fetch tags + run: git fetch --tags + + - id: "get-secrets" + name: "get nfpm signing keys" + uses: "grafana/shared-workflows/actions/get-vault-secrets@main" + with: + common_secrets: | + NFPM_SIGNING_KEY=packages-gpg:private-key + NFPM_DEFAULT_PASSPHRASE=packages-gpg:passphrase + + - name: write-key + run: printenv NFPM_SIGNING_KEY > $NFPM_SIGNING_KEY_FILE + + - name: test release + run: make release-snapshot + + - name: test deb package + run: | + # podman is simpler to make it work with systemd inside containers + podman run -d --name systemd-debian --privileged -v /sys/fs/cgroup:/sys/fs/cgroup:ro jrei/systemd-debian:12 + podman cp ./dist/tempo_*_linux_amd64.deb systemd-debian:. + podman cp ./tools/packaging/verify-deb-install.sh systemd-debian:. + podman cp ./tools/packaging/wait-for-ready.sh systemd-debian:. + podman exec systemd-debian ./verify-deb-install.sh + podman rm -f systemd-debian + + - name: test rpm package + run: | + podman run -d --name systemd-centos --privileged -v /sys/fs/cgroup:/sys/fs/cgroup:ro jrei/systemd-centos:8 + podman cp ./dist/tempo_*_linux_amd64.rpm systemd-centos:. + podman cp ./tools/packaging/verify-rpm-install.sh systemd-centos:. + podman cp ./tools/packaging/wait-for-ready.sh systemd-centos:. + podman exec systemd-centos ./verify-rpm-install.sh + podman rm -f systemd-centos + + - name: release + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: make release diff --git a/.github/workflows/sbom-report.yml b/.github/workflows/sbom-report.yml index 8c5e437c4a3..4b20038e284 100644 --- a/.github/workflows/sbom-report.yml +++ b/.github/workflows/sbom-report.yml @@ -7,14 +7,14 @@ on: jobs: syft-sbom: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - name: Checkout uses: actions/checkout@v4 - name: Anchore SBOM Action - uses: anchore/sbom-action@v0.17.8 + uses: anchore/sbom-action@v0.18.0 with: artifact-name: ${{ github.event.repository.name }}-spdx.json diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 57508394bad..eecb541ac60 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -10,9 +10,9 @@ jobs: stale: # only run in grafana/tempo. if: github.repository == 'grafana/tempo' - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - - uses: actions/stale@v9.0.0 + - uses: actions/stale@v9.1.0 with: close-issue-reason: not_planned operations-per-run: 100 diff --git a/.github/workflows/update-make-docs.yml b/.github/workflows/update-make-docs.yml index 3700f7b47c2..95fc89487ad 100644 --- a/.github/workflows/update-make-docs.yml +++ b/.github/workflows/update-make-docs.yml @@ -6,7 +6,7 @@ on: jobs: main: if: github.repository == 'grafana/tempo' - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - uses: actions/checkout@v4 - uses: grafana/writers-toolkit/update-make-docs@update-make-docs/v1 diff --git a/.gitignore b/.gitignore index d46261d6155..6cb6f45bc2a 100644 --- a/.gitignore +++ b/.gitignore @@ -8,7 +8,6 @@ /bin /cmd/tempo-cli/tempo-cli /cmd/tempo-query/tempo-query -/cmd/tempo-serverless/vendor/ /dist /example/docker-compose/**/gcs-data/ /example/docker-compose/**/tempo-data/ @@ -25,3 +24,4 @@ integration/e2e/deployments/e2e_integration_test[0-9]* .tempo.yaml /tmp gh-token.txt +.cache diff --git a/.goreleaser.yml b/.goreleaser.yml index 9304e5708d2..9ddce1cdaea 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -128,7 +128,7 @@ nfpms: - deb - rpm contents: - - src: ./example/docker-compose/shared/tempo.yaml + - src: ./tools/packaging/tempo.yaml dst: /etc/tempo/config.yml type: 'config|noreplace' - src: ./tools/packaging/tempo.service diff --git a/CHANGELOG.md b/CHANGELOG.md index f546454b62e..9a07e5a13cd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,33 +1,96 @@ ## main / unreleased -* [FEATURE] tempo-cli: support dropping multiple traces in a single operation [#4266](https://github.com/grafana/tempo/pull/4266) (@ndk) +* [CHANGE] **BREAKING CHANGE** Remove tempo serverless [#4599](https://github.com/grafana/tempo/pull/4599/) @electron0zero + Following config options are no longer valid, please remove them if you are using these in your tempo config: + ``` + querier: + search: + prefer_self: + external_hedge_requests_at: + external_hedge_requests_up_to: + external_backend: + google_cloud_run: + external_endpoints: + ``` + Tempo serverless related metric `tempo_querier_external_endpoint_duration_seconds`, `tempo_querier_external_endpoint_hedged_roundtrips_total` and `tempo_feature_enabled` are being removed. + +* [CHANGE] **BREAKING CHANGE** Removed `internal_error` as a reason from `tempo_discarded_spans_total`. [#4554](https://github.com/grafana/tempo/pull/4554) (@joe-elliott) +* [ENHANCEMENT] Update minio to version [#4341](https://github.com/grafana/tempo/pull/4568) (@javiermolinar) +* [ENHANCEMENT] Prevent queries in the ingester from blocking flushing traces to disk and memory spikes. [#4483](https://github.com/grafana/tempo/pull/4483) (@joe-elliott) +* [ENHANCEMENT] Update tempo operational dashboard for new block-builder and v2 traces api [#4559](https://github.com/grafana/tempo/pull/4559) (@mdisibio) +* [ENHANCEMENT] Improve block-builder performance by flushing blocks concurrently [#4565](https://github.com/grafana/tempo/pull/4565) (@mdisibio) +* [ENHANCEMENT] Improve block-builder performance [#4596](https://github.com/grafana/tempo/pull/4596) (@mdisibio) +* [ENHANCEMENT] Export new `tempo_ingest_group_partition_lag` metric from block-builders and metrics-generators [#4571](https://github.com/grafana/tempo/pull/4571) (@mdisibio) +* [ENHANCEMENT] Use distroless base container images for improved security [#4556](https://github.com/grafana/tempo/pull/4556) (@carles-grafana) +* [BUGFIX] Choose a default step for a gRPC streaming query range request if none is provided. [#4546](https://github.com/grafana/tempo/pull/4576) (@joe-elliott) + Correctly copy exemplars for metrics like `| rate()` when gRPC streaming. +* [BUGFIX] Fix performance bottleneck and file cleanup in block builder [#4550](https://github.com/grafana/tempo/pull/4550) (@mdisibio) +* [BUGFIX] TraceQL incorrect results for additional spanset filters after a select operation [#4600](https://github.com/grafana/tempo/pull/4600) (@mdisibio) +* [BUGFIX] TraceQL results caching bug for floats ending in .0 [#4539](https://github.com/grafana/tempo/pull/4539) (@carles-grafana) + +# v2.7.0 + +* [CHANGE] Disable gRPC compression in the querier and distributor for performance reasons [#4429](https://github.com/grafana/tempo/pull/4429) (@carles-grafana) + If you would like to re-enable it, we recommend 'snappy'. Use the following settings: +``` +ingester_client: + grpc_client_config: + grpc_compression: "snappy" +metrics_generator_client: + grpc_client_config: + grpc_compression: "snappy" +querier: + frontend_worker: + grpc_client_config: + grpc_compression: "snappy" +``` * [CHANGE] slo: include request cancellations within SLO [#4355] (https://github.com/grafana/tempo/pull/4355) (@electron0zero) request cancellations are exposed under `result` label in `tempo_query_frontend_queries_total` and `tempo_query_frontend_queries_within_slo_total` with `completed` or `canceled` values to differentiate between completed and canceled requests. * [CHANGE] update default config values to better align with production workloads [#4340](https://github.com/grafana/tempo/pull/4340) (@electron0zero) * [CHANGE] fix deprecation warning by switching to DoBatchWithOptions [#4343](https://github.com/grafana/tempo/pull/4343) (@dastrobu) * [CHANGE] **BREAKING CHANGE** The Tempo serverless is now deprecated and will be removed in an upcoming release [#4017](https://github.com/grafana/tempo/pull/4017/) @electron0zero -* [CHANGE] **BREAKING CHANGE** Change the AWS Lambda serverless build tooling output from "main" to "bootstrap". Refer to https://aws.amazon.com/blogs/compute/migrating-aws-lambda-functions-from-the-go1-x-runtime-to-the-custom-runtime-on-amazon-linux-2/ for migration steps [#3852](https://github.com/grafana/tempo/pull/3852) (@zatlodan) -* [CHANGE] Add throughput and SLO metrics in the tags and tag values endpoints [#4148](https://github.com/grafana/tempo/pull/4148) (@electron0zero) * [CHANGE] tempo-cli: add support for /api/v2/traces endpoint [#4127](https://github.com/grafana/tempo/pull/4127) (@electron0zero) **BREAKING CHANGE** The `tempo-cli` now uses the `/api/v2/traces` endpoint by default, please use `--v1` flag to use `/api/traces` endpoint, which was the default in previous versions. * [CHANGE] TraceByID: don't allow concurrent_shards greater than query_shards. [#4074](https://github.com/grafana/tempo/pull/4074) (@electron0zero) * [CHANGE] **BREAKING CHANGE** The dynamic injection of X-Scope-OrgID header for metrics generator remote-writes is changed. If the header is aleady set in per-tenant overrides or global tempo configuration, then it is honored and not overwritten. [#4021](https://github.com/grafana/tempo/pull/4021) (@mdisibio) -* [CHANGE] **BREAKING CHANGE** Migrate from OpenTracing to OpenTelemetry instrumentation. Removed the `use_otel_tracer` configuration option. Use the OpenTelemetry environment variables to configure the span exporter [#3646](https://github.com/grafana/tempo/pull/3646) (@andreasgerstmayr) +* [CHANGE] **BREAKING CHANGE** Migrate from OpenTracing to OpenTelemetry instrumentation. Removed the `use_otel_tracer` configuration option. Use the OpenTelemetry environment variables to configure the span exporter [#4028](https://github.com/grafana/tempo/pull/4028),[#3646](https://github.com/grafana/tempo/pull/3646) (@andreasgerstmayr) To continue using the Jaeger exporter, use the following environment variable: `OTEL_TRACES_EXPORTER=jaeger`. * [CHANGE] No longer send the final diff in GRPC streaming. Instead we rely on the streamed intermediate results. [#4062](https://github.com/grafana/tempo/pull/4062) (@joe-elliott) * [CHANGE] Update Go to 1.23.3 [#4146](https://github.com/grafana/tempo/pull/4146) [#4147](https://github.com/grafana/tempo/pull/4147) [#4380](https://github.com/grafana/tempo/pull/4380) (@javiermolinar @mdisibio) -* [CHANGE] TraceQL: Add range condition for byte predicates [#4198](https://github.com/grafana/tempo/pull/4198) (@ie-pham) * [CHANGE] Return 422 for TRACE_TOO_LARGE queries [#4160](https://github.com/grafana/tempo/pull/4160) (@zalegrala) * [CHANGE] Upgrade OTEL sdk to reduce allocs [#4243](https://github.com/grafana/tempo/pull/4243) (@joe-elliott) +* [CHANGE] Update Alpine image version to 3.21 [#4504](https://github.com/grafana/tempo/pull/4504) (@mdisibio) * [CHANGE] Tighten file permissions [#4251](https://github.com/grafana/tempo/pull/4251) (@zalegrala) +* [CHANGE] Drop max live traces log message and rate limit trace too large. [#4418](https://github.com/grafana/tempo/pull/4418) (@joe-elliott) +* [CHANGE] Update the Open-Telemetry dependencies to v0.116.0 [#4466](https://github.com/grafana/tempo/pull/4466) (@yvrhdn) + **BREAKING CHANGE** After this update the Open-Telemetry Collector receiver will connect to `localhost` instead of all interfaces `0.0.0.0`. + Due to this, Tempo installations running inside Docker have to update the address they listen. + For more details on this change, see [#4465](https://github.com/grafana/tempo/issues/4465) + For more information about the security risk this change addresses, see https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/security-best-practices.md#safeguards-against-denial-of-service-attacks +* [CHANGE] Replace `cespare/xxhash` with `cespare/xxhash/v2` [#4485](https://github.com/grafana/tempo/pull/4485) (@Juneezee) +* [CHANGE] chore: remove gofakeit dependency [#4274](https://github.com/grafana/tempo/pull/4274) (@javiermolinar) +* [CHANGE] Chore: delete spanlogger. [4312](https://github.com/grafana/tempo/pull/4312) (@javiermolinar) +* [CHANGE] Update Azurite image. [#4298](https://github.com/grafana/tempo/pull/4464) (@javiermolinar) +* [CHANGE] Update golang.org/x/crypto [#4474](https://github.com/grafana/tempo/pull/4474) (@javiermolinar) +* [CHANGE] Distributor shim: add test verifying receiver works (including metrics) [#4477](https://github.com/grafana/tempo/pull/4477) (@yvrhdn) +* [CHANGE] Replace Grafana Agent example by Grafana Alloy[#4030](https://github.com/grafana/tempo/pull/4030) (@javiermolinar) +* [CHANGE] Update to the latest dskit [#4341](https://github.com/grafana/tempo/pull/4341) (@dastrobu) +* [FEATURE] tempo-cli: support dropping multiple traces in a single operation [#4266](https://github.com/grafana/tempo/pull/4266) (@ndk) * [FEATURE] Discarded span logging `log_discarded_spans` [#3957](https://github.com/grafana/tempo/issues/3957) (@dastrobu) * [FEATURE] TraceQL support for instrumentation scope [#3967](https://github.com/grafana/tempo/pull/3967) (@ie-pham) * [FEATURE] Export cost attribution usage metrics from distributor [#4162](https://github.com/grafana/tempo/pull/4162) (@mdisibio) * [FEATURE] TraceQL metrics: avg_over_time [#4073](https://github.com/grafana/tempo/pull/4073) (@javiermolinar) -* [ENHANCEMENT] Update to the latest dskit [#4341](https://github.com/grafana/tempo/pull/4341) (@dastrobu) +* [FEATURE] Limit tags and tag values search [#4320](https://github.com/grafana/tempo/pull/4320) (@javiermolinar) +* [FEATURE] TraceQL metrics queries: add min_over_time [#3975](https://github.com/grafana/tempo/pull/3975) (@javiermolinar) +* [FEATURE] TraceQL metrics queries: add max_over_time [#4065](https://github.com/grafana/tempo/pull/4065) (@javiermolinar) +* [ENHANCEMENT] TraceQL: Add range condition for byte predicates [#4198](https://github.com/grafana/tempo/pull/4198) (@ie-pham) +* [ENHANCEMENT] Add throughput and SLO metrics in the tags and tag values endpoints [#4148](https://github.com/grafana/tempo/pull/4148) (@electron0zero) +* [ENHANCEMENT] **BREAKING CHANGE** Add maximum spans per span set. Users can set `max_spans_per_span_set` to 0 to obtain the old behavior. [#4275](https://github.com/grafana/tempo/pull/4383) (@carles-grafana) +* [ENHANCEMENT] Add query-frontend limit for max length of query expression [#4397](https://github.com/grafana/tempo/pull/4397) (@electron0zero) +* [ENHANCEMENT] distributor: return trace id length when it is invalid [#4407](https://github.com/grafana/tempo/pull/4407) (@carles-grafana) * [ENHANCEMENT] Changed log level from INFO to DEBUG for the TempoDB Find operation using traceId to reduce excessive/unwanted logs in log search. [#4179](https://github.com/grafana/tempo/pull/4179) (@Aki0x137) * [ENHANCEMENT] Pushdown collection of results from generators in the querier [#4119](https://github.com/grafana/tempo/pull/4119) (@electron0zero) -* [ENHANCEMENT] The span multiplier now also sources its value from the resource attributes. [#4210](https://github.com/grafana/tempo/pull/4210) +* [ENHANCEMENT] The span multiplier now also sources its value from the resource attributes. [#4210](https://github.com/grafana/tempo/pull/4210) (@JunhoeKim) * [ENHANCEMENT] TraceQL: Attribute iterators collect matched array values [#3867](https://github.com/grafana/tempo/pull/3867) (@electron0zero, @stoewer) * [ENHANCEMENT] Allow returning partial traces that exceed the MaxBytes limit for V2 [#3941](https://github.com/grafana/tempo/pull/3941) (@javiermolinar) * [ENHANCEMENT] Added new middleware to validate request query values [#3993](https://github.com/grafana/tempo/pull/3993) (@javiermolinar) @@ -36,29 +99,21 @@ * [ENHANCEMENT] Update metrics-generator config in Tempo distributed docker compose example to serve TraceQL metrics [#4003](https://github.com/grafana/tempo/pull/4003) (@javiermolinar) * [ENHANCEMENT] Reduce allocs related to marshalling dedicated columns repeatedly in the query frontend. [#4007](https://github.com/grafana/tempo/pull/4007) (@joe-elliott) * [ENHANCEMENT] Improve performance of TraceQL queries [#4114](https://github.com/grafana/tempo/pull/4114) (@mdisibio) -* [ENHANCEMENT] Improve performance of TraceQL queries [#4163](https://github.com/grafana/tempo/pull/4163) (@mdisibio) +* [ENHANCEMENT] Improve performance of TraceQL queries [#4163](https://github.com/grafana/tempo/pull/4163) (@mdisibio) +* [ENHANCEMENT] Improve performance of some TraceQL queries using select() operation [#4438](https://github.com/grafana/tempo/pull/4438) (@mdisibio) * [ENHANCEMENT] Reduce memory usage of classic histograms in the span-metrics and service-graphs processors [#4232](https://github.com/grafana/tempo/pull/4232) (@mdisibio) * [ENHANCEMENT] Implement simple Fetch by key for cache items [#4032](https://github.com/grafana/tempo/pull/4032) (@javiermolinar) -* [ENHANCEMENT] Replace Grafana Agent example by Grafana Alloy[#4030](https://github.com/grafana/tempo/pull/4030) (@javiermolinar) -* [ENHANCEMENT] Support exporting internal Tempo traces via OTLP exporter when `use_otel_tracer` is enabled. Use the OpenTelemetry SDK environment variables to configure the span exporter. [#4028](https://github.com/grafana/tempo/pull/4028) (@andreasgerstmayr) -* [ENHANCEMENT] TraceQL metrics queries: add min_over_time [#3975](https://github.com/grafana/tempo/pull/3975) (@javiermolinar) -* [ENHANCEMENT] TraceQL metrics queries: add max_over_time [#4065](https://github.com/grafana/tempo/pull/4065) (@javiermolinar) -* [ENHANCEMENT] Write tenantindex as proto and json with a prefernce for proto [#4072](https://github.com/grafana/tempo/pull/4072) (@zalegrala) -* [ENHANCEMENT] Pool zstd encoding/decoding for tmepodb/backend [#4208](https://github.com/grafana/tempo/pull/4208) (@zalegrala) -* [ENHANCEMENT] The span multiplier now also sources its value from the resource attributes. [#4210](https://github.com/grafana/tempo/pull/4210) -* [ENHANCEMENT] Changed log level from INFO to DEBUG for the TempoDB Find operation using traceId to reduce excessive/unwanted logs in log search. [#4179](https://github.com/grafana/tempo/pull/4179) (@Aki0x137) -* [ENHANCEMENT] Pushdown collection of results from generators in the querier [#4119](https://github.com/grafana/tempo/pull/4119) (@electron0zero) +* [ENHANCEMENT] Write tenantindex as proto and json with a preference for proto [#4072](https://github.com/grafana/tempo/pull/4072) (@zalegrala) +* [ENHANCEMENT] Pool zstd encoding/decoding for tempodb/backend [#4208](https://github.com/grafana/tempo/pull/4208) (@zalegrala) * [ENHANCEMENT] Send semver version in api/stattus/buildinfo for cloud deployments [#4110](https://github.com/grafana/tempo/pull/4110) [@Aki0x137] * [ENHANCEMENT] Add completed block validation on startup.[#4256](https://github.com/grafana/tempo/pull/4256) (@joe-elliott) * [ENHANCEMENT] Speedup DistinctString and ScopedDistinctString collectors [#4109](https://github.com/grafana/tempo/pull/4109) (@electron0zero) * [ENHANCEMENT] Speedup collection of results from ingesters in the querier [#4100](https://github.com/grafana/tempo/pull/4100) (@electron0zero) * [ENHANCEMENT] Speedup DistinctValue collector and exit early for ingesters [#4104](https://github.com/grafana/tempo/pull/4104) (@electron0zero) * [ENHANCEMENT] Add disk caching in ingester SearchTagValuesV2 for completed blocks [#4069](https://github.com/grafana/tempo/pull/4069) (@electron0zero) -* [ENHANCEMENT] chore: remove gofakeit dependency [#4274](https://github.com/grafana/tempo/pull/4274) (@javiermolinar) * [ENHANCEMENT] Add a max flush attempts and metric to the metrics generator [#4254](https://github.com/grafana/tempo/pull/4254) (@joe-elliott) * [ENHANCEMENT] Collection of query-frontend changes to reduce allocs. [#4242](https://github.com/grafana/tempo/pull/4242) (@joe-elliott) * [ENHANCEMENT] Added `insecure-skip-verify` option in tempo-cli to skip SSL certificate validation when connecting to the S3 backend. [#4259](https://github.com/grafana/tempo/pull/4259) (@faridtmammadov) -* [ENHANCEMENT] Chore: delete spanlogger. [4312](https://github.com/grafana/tempo/pull/4312) (@javiermolinar) * [ENHANCEMENT] Add `invalid_utf8` to reasons spanmetrics will discard spans. [#4293](https://github.com/grafana/tempo/pull/4293) (@zalegrala) * [ENHANCEMENT] Reduce frontend and querier allocations by dropping HTTP headers early in the pipeline. [#4298](https://github.com/grafana/tempo/pull/4298) (@joe-elliott) * [ENHANCEMENT] Reduce ingester working set by improving prelloc behavior. [#4344](https://github.com/grafana/tempo/pull/4344),[#4369](https://github.com/grafana/tempo/pull/4369) (@joe-elliott) @@ -68,18 +123,25 @@ * [ENHANCEMENT] Reuse generator code to better refuse "too large" traces. [#4365](https://github.com/grafana/tempo/pull/4365) (@joe-elliott) This will cause the ingester to more aggressively and correctly refuse traces. Also added two metrics to better track bytes consumed per tenant in the ingester. `tempo_metrics_generator_live_trace_bytes` and `tempo_ingester_live_trace_bytes`. +* [ENHANCEMENT] Reduce goroutines in all non-querier components. [#4484](https://github.com/grafana/tempo/pull/4484) (@joe-elliott) +* [ENHANCEMENT] Add option to enforce max span attribute size [#4335](https://github.com/grafana/tempo/pull/4335) (@ie-pham) +* [BUGFIX] Handle invalid TraceQL query filter in tag values v2 disk cache [#4392](https://github.com/grafana/tempo/pull/4392) (@electron0zero) * [BUGFIX] Replace hedged requests roundtrips total with a counter. [#4063](https://github.com/grafana/tempo/pull/4063) [#4078](https://github.com/grafana/tempo/pull/4078) (@galalen) * [BUGFIX] Metrics generators: Correctly drop from the ring before stopping ingestion to reduce drops during a rollout. [#4101](https://github.com/grafana/tempo/pull/4101) (@joe-elliott) * [BUGFIX] Correctly handle 400 Bad Request and 404 Not Found in gRPC streaming [#4144](https://github.com/grafana/tempo/pull/4144) (@mapno) +* [BUGFIX] Correctly handle Authorization header in gRPC streaming [#4419](https://github.com/grafana/tempo/pull/4419) (@mdisibio) * [BUGFIX] Pushes a 0 to classic histogram's counter when the series is new to allow Prometheus to start from a non-null value. [#4140](https://github.com/grafana/tempo/pull/4140) (@mapno) * [BUGFIX] Fix counter samples being downsampled by backdate to the previous minute the initial sample when the series is new [#4236](https://github.com/grafana/tempo/pull/4236) (@javiermolinar) +* [BUGFIX] Fix traceql metrics returning incorrect data for falsey queries and unscoped attributes [#4409](https://github.com/grafana/tempo/pull/4409) (@mdisibio) * [BUGFIX] Fix traceql metrics time range handling at the cutoff between recent and backend data [#4257](https://github.com/grafana/tempo/issues/4257) (@mdisibio) -* [BUGFIX] Fix several issues with exemplar values for traceql metrics [#4366](https://github.com/grafana/tempo/pull/4366) (@mdisibio) +* [BUGFIX] Fix several issues with exemplar values for traceql metrics [#4366](https://github.com/grafana/tempo/pull/4366) [#4404](https://github.com/grafana/tempo/pull/4404) (@mdisibio) * [BUGFIX] Skip computing exemplars for instant queries. [#4204](https://github.com/grafana/tempo/pull/4204) (@javiermolinar) * [BUGFIX] Gave context to orphaned spans related to various maintenance processes. [#4260](https://github.com/grafana/tempo/pull/4260) (@joe-elliott) * [BUGFIX] Initialize histogram buckets to 0 to avoid downsampling. [#4366](https://github.com/grafana/tempo/pull/4366) (@javiermolinar) * [BUGFIX] Utilize S3Pass and S3User parameters in tempo-cli options, which were previously unused in the code. [#4259](https://github.com/grafana/tempo/pull/4259) (@faridtmammadov) * [BUGFIX] Fixed an issue in the generator where the first batch was counted 2x against a traces size. [#4365](https://github.com/grafana/tempo/pull/4365) (@joe-elliott) +* [BUGFIX] Fix compaction bug in SingleBinaryMode that could lead to 2x, 3x, etc TraceQL metrics results [#4446](https://github.com/grafana/tempo/pull/4446) (@mdisibio) +* [BUGFIX] Unstable compactors can occasionally duplicate data. Check for job ownership during compaction and cancel a job if ownership changes. [#4420](https://github.com/grafana/tempo/pull/4420) (@joe-elliott) # v2.6.1 @@ -140,7 +202,6 @@ * [ENHANCEMENT] Reduce memory consumption of query-frontend[#3888](https://github.com/grafana/tempo/pull/3888) (@joe-elliott) * [ENHANCEMENT] Reduce log level verbosity for e2e tests[#3900](https://github.com/grafana/tempo/pull/3900) (@javiermolinar) * [ENHANCEMENT] Added new Traces api V2[#3912](https://github.com/grafana/tempo/pull/3912) (@javiermolinar) -* [ENHANCEMENT] Update to the latest dskit [#3915](https://github.com/grafana/tempo/pull/3915) (@andreasgerstmayr) * [ENHANCEMENT] Reduce allocs building queriers sharded requests [#3932](https://github.com/grafana/tempo/pull/3932) (@javiermolinar) * [ENHANCEMENT] Allow compaction disablement per-tenant [#3965](https://github.com/grafana/tempo/pull/3965) (@zalegrala) * [ENHANCEMENT] Implement polling tenants concurrently [#3647](https://github.com/grafana/tempo/pull/3647) (@zalegrala) @@ -177,7 +238,7 @@ properly. A jsonnet example of an init container is included with the PR. This impacts all users of the `grafana/tempo` Docker image. * [CHANGE] Remove vParquet encoding [#3663](https://github.com/grafana/tempo/pull/3663) (@mdisibio) - **BREAKING CHANGE** In the last release vParquet (the first version) was deprecated and blocked from writes. Now, it's + **BREAKING CHANGE** In the last release vParquet (the first version) was deprecated and blocked from writes. Now, it's removed entirely. It will no longer be recognized as a valid encoding and cannot read any remaining vParquet blocks. Installations running with historical defaults should not require any changes as the default has been migrated for several releases. Installations with storage settings pinned to vParquet must run a previous release configured for vParquet2 or higher until all existing vParquet (1) blocks @@ -190,7 +251,7 @@ * [CHANGE] Add golangci to the tools image and update `lint` make target [#3610](https://github.com/grafana/tempo/pull/3610) (@zalegrala) * [CHANGE] Update Alpine image version to 3.20 [#3710](https://github.com/grafana/tempo/pull/3710) (@joe-elliott) * [FEATURE] Add TLS support for Memcached Client [#3585](https://github.com/grafana/tempo/pull/3585) (@sonisr) -* [FEATURE] TraceQL metrics queries: add quantile_over_time [#3605](https://github.com/grafana/tempo/pull/3605) [#3633](https://github.com/grafana/tempo/pull/3633) (@mdisibio) +* [FEATURE] TraceQL metrics queries: add quantile_over_time [#3605](https://github.com/grafana/tempo/pull/3605) [#3633](https://github.com/grafana/tempo/pull/3633) (@mdisibio) * [FEATURE] TraceQL metrics queries: add histogram_over_time [#3644](https://github.com/grafana/tempo/pull/3644) (@mdisibio) * [FEATURE] Added gRPC streaming endpoints for Tempo APIs. * Added gRPC streaming endpoints for all tag queries. [#3460](https://github.com/grafana/tempo/pull/3460) (@joe-elliott) @@ -323,13 +384,13 @@ * [CHANGE] Upgrade from deprecated [azure-storage-blob-go](https://github.com/Azure/azure-storage-blob-go) SDK to [azure-sdk-for-go](https://github.com/Azure/azure-sdk-for-go) [#2835](https://github.com/grafana/tempo/issues/2835) (@LasseHels) * [CHANGE] Metrics summary API validate the requested time range [#2902](https://github.com/grafana/tempo/pull/2902) (@mdisibio) * [CHANGE] Restructure Azure backends into versioned backends. Introduce `use_v2_sdk` config option for switching. [#2952](https://github.com/grafana/tempo/issues/2952) (@zalegrala) - v1: [azure-storage-blob-go](https://github.com/Azure/azure-storage-blob-go) original (now deprecated) SDK - v2: [azure-sdk-for-go](https://github.com/Azure/azure-sdk-for-go) + v1: [azure-storage-blob-go](https://github.com/Azure/azure-storage-blob-go) original (now deprecated) SDK + v2: [azure-sdk-for-go](https://github.com/Azure/azure-sdk-for-go) * [CHANGE] Adjust trace size estimation to better honor row group size settings. [#3038](https://github.com/grafana/tempo/pull/3038) (@joe-elliott) * [CHANGE] Update alpine image version to 3.18 to patch CVE-2022-48174. [#3046](https://github.com/grafana/tempo/pull/3046) (@joe-elliott) * [CHANGE] Overrides module refactor [#2688](https://github.com/grafana/tempo/pull/2688) (@mapno) - Added new `defaults` block to the overrides' module. Overrides change to indented syntax. - Old config: + Added new `defaults` block to the overrides' module. Overrides change to indented syntax. + Old config: ``` overrides: @@ -384,7 +445,7 @@ defaults: * [ENHANCEMENT] Add histogram buckets to metrics-generator config in user-configurable overrides [#2928](https://github.com/grafana/tempo/pull/2928) (@mar4uk) * [ENHANCEMENT] Adds websocket support for search streaming. [#2971](https://github.com/grafana/tempo/pull/2840) (@joe-elliott) * [ENHANCEMENT] Add new config block to distributors to produce debug metrics. [#3008](https://github.com/grafana/tempo/pull/3008) (@joe-elliott) - **Breaking Change** Removed deprecated config option: distributor.log_received_spans + **Breaking Change** Removed deprecated config option: distributor.log_received_spans * [ENHANCEMENT] added a metrics generator config option to enable/disable X-Scope-OrgID headers on remote write. [#2974](https://github.com/grafana/tempo/pull/2974) (@vineetjp) * [ENHANCEMENT] Correctly return RetryInfo to Otel Collector/Grafana Agent on ResourceExhausted. This allows the agents to honor their own retry settings. [#3019](https://github.com/grafana/tempo/pull/3019) (@joe-elliott) @@ -467,7 +528,7 @@ storage: * [FEATURE] New TraceQL structural operators descendant (>>), child (>), and sibling (~) [#2625](https://github.com/grafana/tempo/pull/2625) [#2660](https://github.com/grafana/tempo/pull/2660) (@mdisibio) * [FEATURE] Add user-configurable overrides module [#2543](https://github.com/grafana/tempo/pull/2543) [#2682](https://github.com/grafana/tempo/pull/2682) [#2681](https://github.com/grafana/tempo/pull/2681) (@electron0zero @kvrhdn) * [FEATURE] Add support for `q` query param in `/api/v2/search//values` to filter results based on a TraceQL query [#2253](https://github.com/grafana/tempo/pull/2253) (@mapno) -To make use of filtering, configure `autocomplete_filtering_enabled`. + To make use of filtering, configure `autocomplete_filtering_enabled`. * [FEATURE] Add support for `by()` and `coalesce()` to TraceQL. [#2490](https://github.com/grafana/tempo/pull/2490) * [FEATURE] Add a GRPC streaming endpoint for traceql search [#2366](https://github.com/grafana/tempo/pull/2366) (@joe-elliott) * [FEATURE] Add new API to summarize span metrics from generators [#2481](https://github.com/grafana/tempo/pull/2481) (@zalegrala) @@ -592,7 +653,7 @@ To make use of filtering, configure `autocomplete_filtering_enabled`. ## v2.0.0 / 2023-01-31 * [CHANGE] **BREAKING CHANGE** Use snake case on Azure Storage config [#1879](https://github.com/grafana/tempo/issues/1879) (@faustodavid) -Example of using snake case on Azure Storage config: + Example of using snake case on Azure Storage config: ``` # config.yaml @@ -610,7 +671,7 @@ storage: * [CHANGE] Update Go to 1.19 [#1665](https://github.com/grafana/tempo/pull/1665) (@ie-pham) * [CHANGE] Remove unsued scheduler frontend code [#1734](https://github.com/grafana/tempo/pull/1734) (@mapno) * [CHANGE] Deprecated `query-frontend.query_shards` in favor of `query_frontend.trace_by_id.query_shards`. -Old config will still work but will be removed in a future release. [#1735](https://github.com/grafana/tempo/pull/1735) (@mapno) + Old config will still work but will be removed in a future release. [#1735](https://github.com/grafana/tempo/pull/1735) (@mapno) * [CHANGE] Update alpine image version to 3.16. [#1784](https://github.com/grafana/tempo/pull/1784) (@zalegrala) * [CHANGE] Delete TempoRequestErrors alert from mixin [#1810](https://github.com/grafana/tempo/pull/1810) (@zalegrala) * **BREAKING CHANGE** Any jsonnet users relying on this alert should copy this into their own environment. @@ -680,7 +741,7 @@ Old config will still work but will be removed in a future release. [#1735](http * [FEATURE] TraceQL support * [FEATURE] Parquet backend is GA and default * [FEATURE] Add generic forwarder and implement otlpgrpc forwarder [#1775](https://github.com/grafana/tempo/pull/1775) (@Blinkuu) - New config options and example configuration: + New config options and example configuration: ``` # config.yaml @@ -744,7 +805,7 @@ overrides: * [ENHANCEMENT] Add query parameter to search API for traceQL queries [#1729](https://github.com/grafana/tempo/pull/1729) (@kvrhdn) * [ENHANCEMENT] metrics-generator: filter out older spans before metrics are aggregated [#1612](https://github.com/grafana/tempo/pull/1612) (@ie-pham) * [ENHANCEMENT] Add hedging to trace by ID lookups created by the frontend. [#1735](https://github.com/grafana/tempo/pull/1735) (@mapno) - New config options and defaults: + New config options and defaults: ``` query_frontend: @@ -755,7 +816,7 @@ query_frontend: * [ENHANCEMENT] Vulture now has improved distribution of the random traces it searches. [#1763](https://github.com/grafana/tempo/pull/1763) (@rfratto) * [ENHANCEMENT] Upgrade opentelemetry-proto submodule to v0.18.0 Internal types are updated to use `scope` instead of `instrumentation_library`. - This is a breaking change in trace by ID queries if JSON is requested. [#1754](https://github.com/grafana/tempo/pull/1754) (@mapno) + This is a breaking change in trace by ID queries if JSON is requested. [#1754](https://github.com/grafana/tempo/pull/1754) (@mapno) * [ENHANCEMENT] Add TLS support to the vulture [#1874](https://github.com/grafana/tempo/pull/1874) (@zalegrala) * [ENHANCEMENT] metrics-generator: extract `status_message` field from spans [#1786](https://github.com/grafana/tempo/pull/1786), [#1794](https://github.com/grafana/tempo/pull/1794) (@stoewer) * [ENHANCEMENT] metrics-generator: handle collisions between user defined and default dimensions [#1794](https://github.com/grafana/tempo/pull/1794) (@stoewer) @@ -789,7 +850,7 @@ query_frontend: * [CHANGE] Swapped out Google Cloud Functions serverless docs and build for Google Cloud Run. [#1483](https://github.com/grafana/tempo/pull/1483) (@joe-elliott) * [CHANGE] **BREAKING CHANGE** Change spanmetrics metric names and labels to match OTel conventions. [#1478](https://github.com/grafana/tempo/pull/1478) (@mapno) * [FEATURE] Add support for time picker in jaeger query plugin. [#1631](https://github.com/grafana/tempo/pull/1631) (@rubenvp8510) -Old metric names: + Old metric names: ``` traces_spanmetrics_duration_seconds_{sum,count,bucket} @@ -816,19 +877,19 @@ Additionally, default label `span_status` is renamed to `status_code`. ``` * [CHANGE] **BREAKING CHANGE** Include emptyDir for metrics generator wal storage in jsonnet [#1556](https://github.com/grafana/tempo/pull/1556) (@zalegrala) -Jsonnet users will now need to specify a storage request and limit for the generator wal. - _config+:: { - metrics_generator+: { - ephemeral_storage_request_size: '10Gi', - ephemeral_storage_limit_size: '11Gi', - }, - } + Jsonnet users will now need to specify a storage request and limit for the generator wal. + _config+:: { + metrics_generator+: { + ephemeral_storage_request_size: '10Gi', + ephemeral_storage_limit_size: '11Gi', + }, + } * [CHANGE] Two additional latency buckets added to the default settings for generated spanmetrics. Note that this will increase cardinality when using the defaults. [#1593](https://github.com/grafana/tempo/pull/1593) (@fredr) * [CHANGE] Mark `log_received_traces` as deprecated. New flag is `log_received_spans`. Extend distributor spans logger with optional features to include span attributes and a filter by error status. [#1465](https://github.com/grafana/tempo/pull/1465) (@faustodavid) * [FEATURE] Add parquet block format [#1479](https://github.com/grafana/tempo/pull/1479) [#1531](https://github.com/grafana/tempo/pull/1531) [#1564](https://github.com/grafana/tempo/pull/1564) (@annanay25, @mdisibio) * [FEATURE] Add anonymous usage reporting, enabled by default. [#1481](https://github.com/grafana/tempo/pull/1481) (@zalegrala) -**BREAKING CHANGE** As part of the usage stats inclusion, the distributor will also require access to the store. This is required so the distirbutor can know which cluster it should be reporting membership of. + **BREAKING CHANGE** As part of the usage stats inclusion, the distributor will also require access to the store. This is required so the distirbutor can know which cluster it should be reporting membership of. * [FEATURE] Include messaging systems and databases in service graphs. [#1576](https://github.com/grafana/tempo/pull/1576) (@kvrhdn) * [ENHANCEMENT] Added the ability to have a per tenant max search duration. [#1421](https://github.com/grafana/tempo/pull/1421) (@joe-elliott) * [ENHANCEMENT] metrics-generator: expose max_active_series as a metric [#1471](https://github.com/grafana/tempo/pull/1471) (@kvrhdn) diff --git a/CODEOWNERS b/CODEOWNERS index d8d3fb88d65..1a59dd31533 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -13,13 +13,13 @@ * @joe-elliott @mdisibio @mapno @yvrhdn @zalegrala @electron0zero @ie-pham @stoewer @javiermolinar -/docs/ @knylander-grafana @joe-elliott @mdisibio @mapno @yvrhdn @zalegrala @electron0zero @ie-pham @stoewer +/docs/ @knylander-grafana @joe-elliott @mdisibio @mapno @yvrhdn @zalegrala @electron0zero @ie-pham @stoewer @javiermolinar -/.github/backport.yml @jdbaldry @joe-elliott @mdisibio @mapno @yvrhdn @zalegrala @electron0zero @ie-pham @stoewer -/.github/update-make-docs.yml @jdbaldry @joe-elliott @mdisibio @mapno @yvrhdn @zalegrala @electron0zero @ie-pham @stoewer -/.github/website-next.yml @jdbaldry @joe-elliott @mdisibio @mapno @yvrhdn @zalegrala @electron0zero @ie-pham @stoewer -/.github/website-versioned.yml @jdbaldry @joe-elliott @mdisibio @mapno @yvrhdn @zalegrala @electron0zero @ie-pham @stoewer -/docs/docs.mk @jdbaldry @knylander-grafana @joe-elliott @mdisibio @mapno @yvrhdn @zalegrala @electron0zero @ie-pham @stoewer -/docs/make-docs @jdbaldry @knylander-grafana @joe-elliott @mdisibio @mapno @yvrhdn @zalegrala @electron0zero @ie-pham @stoewer -/docs/Makefile @jdbaldry @knylander-grafana @joe-elliott @mdisibio @mapno @yvrhdn @zalegrala @electron0zero @ie-pham @stoewer -/docs/variables.mk @jdbaldry @knylander-grafana @joe-elliott @mdisibio @mapno @yvrhdn @zalegrala @electron0zero @ie-pham @stoewer +/.github/backport.yml @jdbaldry @joe-elliott @mdisibio @mapno @yvrhdn @zalegrala @electron0zero @ie-pham @stoewer @javiermolinar +/.github/update-make-docs.yml @jdbaldry @joe-elliott @mdisibio @mapno @yvrhdn @zalegrala @electron0zero @ie-pham @stoewer @javiermolinar +/.github/website-next.yml @jdbaldry @joe-elliott @mdisibio @mapno @yvrhdn @zalegrala @electron0zero @ie-pham @stoewer @javiermolinar +/.github/website-versioned.yml @jdbaldry @joe-elliott @mdisibio @mapno @yvrhdn @zalegrala @electron0zero @ie-pham @stoewer @javiermolinar +/docs/docs.mk @jdbaldry @knylander-grafana @joe-elliott @mdisibio @mapno @yvrhdn @zalegrala @electron0zero @ie-pham @stoewer @javiermolinar +/docs/make-docs @jdbaldry @knylander-grafana @joe-elliott @mdisibio @mapno @yvrhdn @zalegrala @electron0zero @ie-pham @stoewer @javiermolinar +/docs/Makefile @jdbaldry @knylander-grafana @joe-elliott @mdisibio @mapno @yvrhdn @zalegrala @electron0zero @ie-pham @stoewer @javiermolinar +/docs/variables.mk @jdbaldry @knylander-grafana @joe-elliott @mdisibio @mapno @yvrhdn @zalegrala @electron0zero @ie-pham @stoewer @javiermolinar diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5ce1458500d..ff3ba6901c0 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -36,7 +36,6 @@ cmd/ tempo-cli/ - cli tool for directly inspecting blocks in the backend tempo-vulture/ - bird-themed consistency checker. optional. tempo-query/ - jaeger-query GRPC plugin (Apache2 licensed) - tempo-serverless/ - serverless handler for searching backend storage for traces docs/ example/ - great place to get started running Tempo docker-compose/ diff --git a/Makefile b/Makefile index 97de1af6eea..8447749fa1c 100644 --- a/Makefile +++ b/Makefile @@ -27,7 +27,6 @@ ALL_SRC := $(shell find . -name '*.go' \ -not -path './tools*/*' \ -not -path './vendor*/*' \ -not -path './integration/*' \ - -not -path './cmd/tempo-serverless/*' \ -type f | sort) # ALL_SRC but without pkg and tempodb packages @@ -35,7 +34,6 @@ OTHERS_SRC := $(shell find . -name '*.go' \ -not -path './tools*/*' \ -not -path './vendor*/*' \ -not -path './integration/*' \ - -not -path './cmd/tempo-serverless/*' \ -not -path './pkg*/*' \ -not -path './tempodb*/*' \ -type f | sort) @@ -84,11 +82,11 @@ tempo-vulture: .PHONY: exe ## Build exe exe: - GOOS=linux $(MAKE) $(COMPONENT) + GOOS=linux make $(COMPONENT) .PHONY: exe-debug ## Build exe-debug exe-debug: - BUILD_DEBUG=1 GOOS=linux $(MAKE) $(COMPONENT) + BUILD_DEBUG=1 GOOS=linux make $(COMPONENT) ##@ Testin' and Lintin' @@ -102,7 +100,7 @@ benchmark: tools ## Run benchmarks # Not used in CI, tests are split in pkg, tempodb, tempodb-wal and others in CI jobs .PHONY: test-with-cover -test-with-cover: tools test-serverless ## Run tests with code coverage +test-with-cover: tools ## Run tests with code coverage $(GOTEST) $(GOTEST_OPT_WITH_COVERAGE) $(ALL_PKGS) # tests in pkg @@ -122,7 +120,7 @@ test-with-cover-tempodb-wal: tools ## Test tempodb/wal with code coverage # all other tests (excluding pkg & tempodb) .PHONY: test-with-cover-others -test-with-cover-others: tools test-serverless ## Run other tests with code coverage +test-with-cover-others: tools ## Run other tests with code coverage $(GOTEST) $(GOTEST_OPT_WITH_COVERAGE) $(shell go list $(sort $(dir $(OTHERS_SRC)))) # runs e2e tests in the top level integration/e2e directory @@ -130,11 +128,6 @@ test-with-cover-others: tools test-serverless ## Run other tests with code cover test-e2e: tools docker-tempo docker-tempo-query ## Run end to end tests $(GOTEST) -v $(GOTEST_OPT) ./integration/e2e -# runs only serverless e2e tests -.PHONY: test-e2e-serverless -test-e2e-serverless: tools docker-tempo docker-serverless ## Run serverless end to end tests - $(GOTEST) -v $(GOTEST_OPT) ./integration/e2e/serverless - # runs only deployment modes e2e tests .PHONY: test-e2e-deployments test-e2e-deployments: tools docker-tempo docker-tempo-query ## Run end to end tests for deployments @@ -147,7 +140,7 @@ test-integration-poller: tools ## Run poller integration tests # test-all/bench use a docker image so build it first to make sure we're up to date .PHONY: test-all ## Run all tests -test-all: test-with-cover test-e2e test-e2e-serverless test-e2e-deployments test-integration-poller +test-all: test-with-cover test-e2e test-e2e-deployments test-integration-poller .PHONY: test-bench test-bench: tools docker-tempo ## Run all benchmarks @@ -171,9 +164,9 @@ check-jsonnetfmt: jsonnetfmt .PHONY: lint lint: # linting ifneq ($(base),) - $(TOOLS_CMD) $(LINT) run --config .golangci.yml --new-from-rev=$(base) + $(LINT_CMD) $(LINT) run --config .golangci.yml --new-from-rev=$(base) else - $(TOOLS_CMD) $(LINT) run --config .golangci.yml + $(LINT_CMD) $(LINT) run --config .golangci.yml endif ##@ Docker Images @@ -183,6 +176,12 @@ docker-component: check-component exe # not intended to be used directly docker build -t grafana/$(COMPONENT) --build-arg=TARGETARCH=$(GOARCH) -f ./cmd/$(COMPONENT)/Dockerfile . docker tag grafana/$(COMPONENT) $(COMPONENT) +.PHONY: docker-component-multi +docker-component-multi: check-component # not intended to be used directly + GOOS=linux GOARCH=amd64 make $(COMPONENT) + GOOS=linux GOARCH=arm64 make $(COMPONENT) + docker buildx build -t grafana/$(COMPONENT) --platform linux/amd64,linux/arm64 --output type=docker -f ./cmd/$(COMPONENT)/Dockerfile . + .PHONY: docker-component-debug docker-component-debug: check-component exe-debug docker build -t grafana/$(COMPONENT)-debug --build-arg=TARGETARCH=$(GOARCH) -f ./cmd/$(COMPONENT)/Dockerfile_debug . @@ -190,22 +189,26 @@ docker-component-debug: check-component exe-debug .PHONY: docker-tempo docker-tempo: ## Build tempo docker image - COMPONENT=tempo $(MAKE) docker-component + COMPONENT=tempo make docker-component + +.PHONY: docker-tempo-multi +docker-tempo-multi: ## Build multiarch image locally, requires containerd image store + COMPONENT=tempo make docker-component-multi docker-tempo-debug: ## Build tempo debug docker image - COMPONENT=tempo $(MAKE) docker-component-debug + COMPONENT=tempo make docker-component-debug .PHONY: docker-cli docker-tempo-cli: ## Build tempo cli docker image - COMPONENT=tempo-cli $(MAKE) docker-component + COMPONENT=tempo-cli make docker-component .PHONY: docker-tempo-query docker-tempo-query: ## Build tempo query docker image - COMPONENT=tempo-query $(MAKE) docker-component + COMPONENT=tempo-query make docker-component .PHONY: docker-tempo-vulture docker-tempo-vulture: ## Build tempo vulture docker image - COMPONENT=tempo-vulture $(MAKE) docker-component + COMPONENT=tempo-vulture make docker-component .PHONY: docker-images ## Build all docker images docker-images: docker-tempo docker-tempo-query docker-tempo-vulture @@ -294,12 +297,11 @@ vendor-check: gen-proto update-mod gen-traceql gen-parquet-query ## Keep up to d git diff --exit-code -- **/go.sum **/go.mod vendor/ pkg/tempopb/ pkg/traceql/ -### Tidy dependencies for tempo and tempo-serverless modules +### Tidy dependencies for tempo modules .PHONY: update-mod update-mod: tools-update-mod ## Update module go mod vendor go mod tidy -e - $(MAKE) -C cmd/tempo-serverless update-mod ### Release (intended to be used in the .github/workflows/release.yml) @@ -328,50 +330,25 @@ docs-test: ##@ jsonnet .PHONY: jsonnet jsonnet-check jsonnet-test jsonnet: tools-image ## Generate jsonnet - $(TOOLS_CMD) $(MAKE) -C operations/jsonnet-compiled/util gen + $(TOOLS_CMD) make -C operations/jsonnet-compiled/util gen jsonnet-check: tools-image ## Check jsonnet - $(TOOLS_CMD) $(MAKE) -C operations/jsonnet-compiled/util check + $(TOOLS_CMD) make -C operations/jsonnet-compiled/util check jsonnet-test: tools-image ## Test jsonnet - $(TOOLS_CMD) $(MAKE) -C operations/jsonnet/microservices test - -##@ serverless -.PHONY: docker-serverless test-serverless -docker-serverless: ## Build docker Tempo serverless - $(MAKE) -C cmd/tempo-serverless build-docker - -test-serverless: ## Run Tempo serverless tests - $(MAKE) -C cmd/tempo-serverless test + $(TOOLS_CMD) make -C operations/jsonnet/microservices test ### tempo-mixin .PHONY: tempo-mixin tempo-mixin-check tempo-mixin: tools-image - $(TOOLS_CMD) $(MAKE) -C operations/tempo-mixin all + $(TOOLS_CMD) make -C operations/tempo-mixin all tempo-mixin-check: tools-image - $(TOOLS_CMD) $(MAKE) -C operations/tempo-mixin check - -##@ drone -.PHONY: drone drone-jsonnet drone-signature -# this requires the drone-cli https://docs.drone.io/cli/install/ -drone: ## Run Drone targets - # piggyback on Loki's build image, this image contains a newer version of drone-cli than is - # released currently (1.4.0). The newer version of drone-clie keeps drone.yml human-readable. - # This will run 'make drone-jsonnet' from within the container - docker run -e DRONE_SERVER -e DRONE_TOKEN --rm -v $(shell pwd):/src/loki ${LOKI_BUILD_IMAGE} drone-jsonnet drone-signature - - drone lint .drone/drone.yml --trusted - -drone-jsonnet: - drone jsonnet --stream --format --source .drone/drone.jsonnet --target .drone/drone.yml - -drone-signature: -ifndef DRONE_TOKEN - $(error DRONE_TOKEN is not set, visit https://drone.grafana.net/account) -endif - DRONE_SERVER=https://drone.grafana.net drone sign --save grafana/tempo .drone/drone.yml + $(TOOLS_CMD) make -C operations/tempo-mixin check +.PHONY: generate-manifest +generate-manifest: + GO111MODULE=on CGO_ENABLED=0 go run -v pkg/docsgen/generate_manifest.go # Import fragments include build/tools.mk diff --git a/README.md b/README.md index 6a0a4b32c69..5f4ff2bb89d 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,7 @@ Grafana Tempo is an open source, easy-to-use, and high-scale distributed tracing Distributed tracing helps teams quickly pinpoint performance issues and understand the flow of requests across services. The Explore Traces UI simplifies this process by offering a user-friendly interface to view and analyze trace data, making it easier to identify and resolve issues without needing to write complex queries. -Refer to [Use traces to find solutions](https://grafana.com/docs/tempo/latest/introduction/solutions-with-traces/)t o learn more about how you can use distributed tracing to investigate and solve issues. +Refer to [Use traces to find solutions](https://grafana.com/docs/tempo/latest/introduction/solutions-with-traces/) to learn more about how you can use distributed tracing to investigate and solve issues. ## Explore Traces UI: A better way to get value from your tracing data We are excited to introduce the [Explore Traces app](https://github.com/grafana/explore-traces) as part of the Grafana Explore suite. This app provides a queryless and intuitive experience for analyzing tracing data, allowing teams to quickly identify performance issues, latency bottlenecks, and errors without needing to write complex queries or use TraceQL. diff --git a/RELEASES.MD b/RELEASES.MD index dbaf9842ed7..025cbc71589 100644 --- a/RELEASES.MD +++ b/RELEASES.MD @@ -5,7 +5,7 @@ - Push a semver tag to main on the merge commit above. Something like: - `git tag -a v1.2.0-rc.0` - `git push origin v1.2.0-rc.0` -- This will initiate the build process in Github Actions and Drone. The tagged docker image should +- This will initiate the build process in Github Actions. The tagged docker image should be available here shortly: https://hub.docker.com/r/grafana/tempo/tags?page=1&ordering=last_updated - A Github Release Draft should also be available here: https://github.com/grafana/tempo/releases - Copy over the CHANGELOG entries for the release diff --git a/build/tools.mk b/build/tools.mk index 888d799799a..a6bb8a22d35 100644 --- a/build/tools.mk +++ b/build/tools.mk @@ -12,11 +12,12 @@ TOOL_DIR ?= tools TOOL_CONFIG ?= $(TOOL_DIR)/tools.go TOOLS_IMAGE ?= grafana/tempo-ci-tools -TOOLS_IMAGE_TAG ?= main-e87b8d4 +TOOLS_IMAGE_TAG ?= main-d02611a GOTOOLS ?= $(shell cd $(TOOL_DIR) && go list -e -f '{{ .Imports }}' -tags tools |tr -d '[]') TOOLS_CMD = docker run --rm -t -v ${PWD}:/tools $(TOOLS_IMAGE):$(TOOLS_IMAGE_TAG) +LINT_CMD = docker run --rm -t -v ${PWD}:/tools -v ${PWD}/.cache/golangci-lint:/root/.cache/golangci-lint $(TOOLS_IMAGE):$(TOOLS_IMAGE_TAG) .PHONY: tools-image-build tools-image-build: diff --git a/cmd/tempo-cli/Dockerfile b/cmd/tempo-cli/Dockerfile index f54e475f717..518756b8444 100644 --- a/cmd/tempo-cli/Dockerfile +++ b/cmd/tempo-cli/Dockerfile @@ -1,5 +1,12 @@ -FROM alpine:3.20 as certs -RUN apk --update add ca-certificates +FROM alpine:latest AS ca-certificates +RUN apk add --update --no-cache ca-certificates + +FROM gcr.io/distroless/static-debian12:debug + +SHELL ["/busybox/sh", "-c"] + ARG TARGETARCH COPY bin/linux/tempo-cli-${TARGETARCH} /tempo-cli +COPY --from=ca-certificates /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ + ENTRYPOINT ["/tempo-cli"] diff --git a/cmd/tempo-cli/cmd-query-metrics-query-range.go b/cmd/tempo-cli/cmd-query-metrics-query-range.go index 7f15b766b81..15bc997f584 100644 --- a/cmd/tempo-cli/cmd-query-metrics-query-range.go +++ b/cmd/tempo-cli/cmd-query-metrics-query-range.go @@ -61,7 +61,6 @@ func (cmd *metricsQueryCmd) Run(_ *globalOptions) error { Query: cmd.TraceQL, Start: uint64(start), End: uint64(end), - Step: uint64(5 * time.Second), } if cmd.UseGRPC { diff --git a/cmd/tempo-cli/shared.go b/cmd/tempo-cli/shared.go index 1bd13a3df60..f5e5f2942ff 100644 --- a/cmd/tempo-cli/shared.go +++ b/cmd/tempo-cli/shared.go @@ -2,13 +2,14 @@ package main import ( "context" - "encoding/json" "errors" "fmt" "sort" "strconv" "time" + "github.com/gogo/protobuf/jsonpb" + "github.com/gogo/protobuf/proto" "github.com/google/uuid" "github.com/grafana/tempo/pkg/boundedwaitgroup" "github.com/grafana/tempo/tempodb/backend" @@ -124,8 +125,9 @@ func loadBlock(r backend.Reader, c backend.Compactor, tenantID string, id backen }, nil } -func printAsJSON[T any](value T) error { - traceJSON, err := json.Marshal(value) +func printAsJSON(pb proto.Message) error { + m := jsonpb.Marshaler{} + traceJSON, err := m.MarshalToString(pb) if err != nil { return err } diff --git a/cmd/tempo-query/Dockerfile b/cmd/tempo-query/Dockerfile index 44b473c1698..8e3b936bde0 100644 --- a/cmd/tempo-query/Dockerfile +++ b/cmd/tempo-query/Dockerfile @@ -1,13 +1,17 @@ -FROM alpine:3.20 as certs -RUN apk --update add ca-certificates -ARG TARGETARCH -COPY bin/linux/tempo-query-${TARGETARCH} /tempo-query +FROM alpine:latest AS ca-certificates +RUN apk add --update --no-cache ca-certificates -RUN addgroup -g 10001 -S tempo && \ - adduser -u 10001 -S tempo -G tempo +FROM gcr.io/distroless/static-debian12:debug -USER 10001:10001 +SHELL ["/busybox/sh", "-c"] + +RUN ["/busybox/addgroup", "-g", "10001", "-S", "tempo"] +RUN ["/busybox/adduser", "-u", "10001", "-S", "tempo", "-G", "tempo"] +ARG TARGETARCH COPY bin/linux/tempo-query-${TARGETARCH} /tempo-query +COPY --from=ca-certificates /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ + +USER 10001:10001 ENTRYPOINT ["/tempo-query"] diff --git a/cmd/tempo-query/tempo/plugin.go b/cmd/tempo-query/tempo/plugin.go index 25708d78275..ce84f3cea8f 100644 --- a/cmd/tempo-query/tempo/plugin.go +++ b/cmd/tempo-query/tempo/plugin.go @@ -18,18 +18,17 @@ import ( "github.com/gogo/protobuf/jsonpb" tlsCfg "github.com/grafana/dskit/crypto/tls" "github.com/grafana/dskit/user" - "github.com/grafana/tempo/pkg/tempopb" + jaeger "github.com/jaegertracing/jaeger/model" "github.com/jaegertracing/jaeger/proto-gen/storage_v1" + jaeger_spanstore "github.com/jaegertracing/jaeger/storage/spanstore" + ot_jaeger "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger" "go.opentelemetry.io/collector/pdata/ptrace" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/propagation" "go.uber.org/zap" "google.golang.org/grpc/metadata" - jaeger "github.com/jaegertracing/jaeger/model" - jaeger_spanstore "github.com/jaegertracing/jaeger/storage/spanstore" - - ot_jaeger "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger" + "github.com/grafana/tempo/pkg/tempopb" ) const ( @@ -245,10 +244,7 @@ func (b *Backend) getTrace(ctx context.Context, traceID jaeger.TraceID) (*jaeger return nil, fmt.Errorf("error unmarshalling body to otlp trace %v: %w", traceID, err) } - jaegerBatches, err := ot_jaeger.ProtoFromTraces(otTrace) - if err != nil { - return nil, fmt.Errorf("error translating to jaegerBatches %v: %w", traceID, err) - } + jaegerBatches := ot_jaeger.ProtoFromTraces(otTrace) jaegerTrace := &jaeger.Trace{ Spans: []*jaeger.Span{}, @@ -372,7 +368,7 @@ func (b *Backend) FindTraces(req *storage_v1.FindTracesRequest, stream storage_v for i := 0; i < len(resp.TraceIDs); i++ { result := <-results if result.err != nil { - //// TODO this seems to be an internal inconsistency error, ignore so we can still show the rest + // TODO this seems to be an internal inconsistency error, ignore so we can still show the rest b.logger.Info("failed to get a trace", zap.Error(err), zap.String("traceid", result.traceID.String())) span.AddEvent(fmt.Sprintf("could not get trace for traceID %v", result.traceID)) span.RecordError(err) diff --git a/cmd/tempo-serverless/Makefile b/cmd/tempo-serverless/Makefile deleted file mode 100644 index 78ddcd78c54..00000000000 --- a/cmd/tempo-serverless/Makefile +++ /dev/null @@ -1,53 +0,0 @@ -PACK=pack -VERSION=$(shell ../../tools/image-tag | cut -d, -f 1) - -IN_CLOUD_RUN=cd cloud-run && -IN_LAMBDA=cd lambda && - -LOWER_VERSION = `echo $(VERSION) | tr A-Z a-z` - -# -# build docker images for local testing and code zip files for google cloud run -# -.PHONY: build-docker -build-docker: build-docker-lambda-test build-docker-gcr - -# -# google cloud run -# -.PHONY: build-docker-gcr-binary -build-docker-gcr-binary: - $(IN_CLOUD_RUN) CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o main - -.PHONY: build-docker-gcr -build-docker-gcr: build-docker-gcr-binary - $(IN_CLOUD_RUN) docker build -f ./Dockerfile -t tempo-serverless:$(LOWER_VERSION) . - $(IN_CLOUD_RUN) rm main - docker tag tempo-serverless:$(LOWER_VERSION) tempo-serverless:latest - -# -# aws lambda -# -.PHONY: build-docker-lambda-test -build-docker-lambda-test: - $(IN_LAMBDA) CGO_ENABLED=0 go build -o ./lambda - $(IN_LAMBDA) docker build -f ./Dockerfile -t tempo-serverless-lambda . - -# Lambda zips expect a compiled executable in the root. The filename "bootstrap" is important here. -# The new AWS Lambda runtime expects an executable with the name "bootstrap" to be provided, the "handler" configuration is ignored when using the new runtime. -# See https://aws.amazon.com/blogs/compute/migrating-aws-lambda-functions-from-the-go1-x-runtime-to-the-custom-runtime-on-amazon-linux-2/ for more info. -.PHONY: build-lambda-zip -build-lambda-zip: - $(IN_LAMBDA) CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o bootstrap - $(IN_LAMBDA) zip tempo-serverless-$(VERSION).zip bootstrap - $(IN_LAMBDA) rm bootstrap - -.PHONY: test -test: - go test -v . - -### Tidy dependencies for tempo-serverless module -.PHONY: update-mod -update-mod: - $(IN_LAMBDA) go mod tidy -e - $(IN_CLOUD_RUN) go mod tidy -e diff --git a/cmd/tempo-serverless/cloud-run/Dockerfile b/cmd/tempo-serverless/cloud-run/Dockerfile deleted file mode 100644 index a79ceef06bd..00000000000 --- a/cmd/tempo-serverless/cloud-run/Dockerfile +++ /dev/null @@ -1,15 +0,0 @@ -# -# docker run -p 8080:8080 tempo-serverless -# -# to exercise the function -# curl http://localhost:8080/?start=1000&end=1001&... -# - -# -# build the serverless container image -# -# todo: FROM scratch saves ~5MB which could be meaningful in a serverless setting, but using scratch gave strange errors on query. -FROM alpine:3.20 as certs -RUN apk --update add ca-certificates -COPY ./main /main -ENTRYPOINT ["/main"] diff --git a/cmd/tempo-serverless/cloud-run/go.mod b/cmd/tempo-serverless/cloud-run/go.mod deleted file mode 100644 index d093aeadef8..00000000000 --- a/cmd/tempo-serverless/cloud-run/go.mod +++ /dev/null @@ -1,150 +0,0 @@ -module github.com/grafana/tempo/cmd/tempo-serverless/cloud-run - -go 1.23.3 - -require ( - github.com/gogo/protobuf v1.3.2 - github.com/grafana/tempo v1.2.0-rc.0.0.20211029120833-dee59ebe564c -) - -require ( - cloud.google.com/go v0.115.0 // indirect - cloud.google.com/go/auth v0.7.3 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.3 // indirect - cloud.google.com/go/compute/metadata v0.5.0 // indirect - cloud.google.com/go/iam v1.1.12 // indirect - cloud.google.com/go/storage v1.41.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect - github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 // indirect - github.com/andybalholm/brotli v1.1.1 // indirect - github.com/apache/thrift v0.20.0 // indirect - github.com/aws/aws-sdk-go v1.55.5 // indirect - github.com/beorn7/perks v1.0.1 // indirect - github.com/cespare/xxhash v1.1.0 // indirect - github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/cristalhq/hedgedhttp v0.9.1 // indirect - github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect - github.com/dustin/go-humanize v1.0.1 // indirect - github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb // indirect - github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/go-ini/ini v1.67.0 // indirect - github.com/go-kit/log v0.2.1 // indirect - github.com/go-logfmt/logfmt v0.6.0 // indirect - github.com/go-logr/logr v1.4.2 // indirect - github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-redis/redis/v8 v8.11.5 // indirect - github.com/goccy/go-json v0.10.3 // indirect - github.com/gogo/googleapis v1.4.1 // indirect - github.com/gogo/status v1.1.1 // indirect - github.com/golang-jwt/jwt/v5 v5.2.1 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.4 // indirect - github.com/golang/snappy v0.0.4 // indirect - github.com/google/s2a-go v0.1.8 // indirect - github.com/google/uuid v1.6.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.13.0 // indirect - github.com/gorilla/mux v1.8.1 // indirect - github.com/grafana/dskit v0.0.0-20241115082728-f2a7eb3aa0e9 // indirect - github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56 // indirect - github.com/grafana/pyroscope-go/godeltaprof v0.1.8 // indirect - github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect - github.com/hashicorp/hcl v1.0.0 // indirect - github.com/jaegertracing/jaeger v1.57.0 // indirect - github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/jpillora/backoff v1.0.0 // indirect - github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.11 // indirect - github.com/klauspost/cpuid/v2 v2.2.8 // indirect - github.com/kylelemons/godebug v1.1.0 // indirect - github.com/magiconair/properties v1.8.7 // indirect - github.com/mattn/go-runewidth v0.0.16 // indirect - github.com/miekg/dns v1.1.61 // indirect - github.com/minio/md5-simd v1.1.2 // indirect - github.com/minio/minio-go/v7 v7.0.80 // indirect - github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect - github.com/olekukonko/tablewriter v0.0.5 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.102.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.102.0 // indirect - github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e // indirect - github.com/opentracing-contrib/go-stdlib v1.0.0 // indirect - github.com/opentracing/opentracing-go v1.2.0 // indirect - github.com/parquet-go/parquet-go v0.23.1-0.20241011155651-6446d1d0d2fe // indirect - github.com/pelletier/go-toml/v2 v2.1.0 // indirect - github.com/pierrec/lz4/v4 v4.1.21 // indirect - github.com/pires/go-proxyproto v0.7.0 // indirect - github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect - github.com/pkg/errors v0.9.1 // indirect - github.com/prometheus/client_golang v1.19.1 // indirect - github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.55.0 // indirect - github.com/prometheus/exporter-toolkit v0.11.0 // indirect - github.com/prometheus/procfs v0.15.1 // indirect - github.com/prometheus/prometheus v0.54.0 // indirect - github.com/rivo/uniseg v0.4.7 // indirect - github.com/rs/xid v1.6.0 // indirect - github.com/sagikazarmark/locafero v0.4.0 // indirect - github.com/sagikazarmark/slog-shim v0.1.0 // indirect - github.com/sercand/kuberesolver/v5 v5.1.1 // indirect - github.com/sony/gobreaker v0.4.1 // indirect - github.com/sourcegraph/conc v0.3.0 // indirect - github.com/spaolacci/murmur3 v1.1.0 // indirect - github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.6.0 // indirect - github.com/spf13/pflag v1.0.5 // indirect - github.com/spf13/viper v1.18.2 // indirect - github.com/subosito/gotenv v1.6.0 // indirect - github.com/uber-go/atomic v1.4.0 // indirect - github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect - github.com/uber/jaeger-lib v2.4.1+incompatible // indirect - github.com/willf/bitset v1.1.11 // indirect - github.com/willf/bloom v2.0.3+incompatible // indirect - go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/collector/pdata v1.12.0 // indirect - go.opentelemetry.io/collector/semconv v0.105.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 // indirect - go.opentelemetry.io/otel v1.31.0 // indirect - go.opentelemetry.io/otel/metric v1.31.0 // indirect - go.opentelemetry.io/otel/trace v1.31.0 // indirect - go.uber.org/atomic v1.11.0 // indirect - go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.29.0 // indirect - golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect - golang.org/x/mod v0.19.0 // indirect - golang.org/x/net v0.31.0 // indirect - golang.org/x/oauth2 v0.21.0 // indirect - golang.org/x/sync v0.9.0 // indirect - golang.org/x/sys v0.27.0 // indirect - golang.org/x/text v0.20.0 // indirect - golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.23.0 // indirect - google.golang.org/api v0.190.0 // indirect - google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf // indirect - google.golang.org/grpc v1.65.0 // indirect - google.golang.org/protobuf v1.34.2 // indirect - gopkg.in/ini.v1 v1.67.0 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect -) - -replace github.com/grafana/tempo => ../../../ - -replace ( - github.com/bradfitz/gomemcache => github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab - k8s.io/api => k8s.io/api v0.20.4 - k8s.io/client-go => k8s.io/client-go v0.20.4 -) diff --git a/cmd/tempo-serverless/cloud-run/go.sum b/cmd/tempo-serverless/cloud-run/go.sum deleted file mode 100644 index 913d61f4b65..00000000000 --- a/cmd/tempo-serverless/cloud-run/go.sum +++ /dev/null @@ -1,474 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.115.0 h1:CnFSK6Xo3lDYRoBKEcAtia6VSC837/ZkJuRduSFnr14= -cloud.google.com/go v0.115.0/go.mod h1:8jIM5vVgoAEoiVxQ/O4BFTfHqulPZgs/ufEzMcFMdWU= -cloud.google.com/go/auth v0.7.3 h1:98Vr+5jMaCZ5NZk6e/uBgf60phTk/XN84r8QEWB9yjY= -cloud.google.com/go/auth v0.7.3/go.mod h1:HJtWUx1P5eqjy/f6Iq5KeytNpbAcGolPhOgyop2LlzA= -cloud.google.com/go/auth/oauth2adapt v0.2.3 h1:MlxF+Pd3OmSudg/b1yZ5lJwoXCEaeedAguodky1PcKI= -cloud.google.com/go/auth/oauth2adapt v0.2.3/go.mod h1:tMQXOfZzFuNuUxOypHlQEXgdfX5cuhwU+ffUuXRJE8I= -cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= -cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= -cloud.google.com/go/iam v1.1.12 h1:JixGLimRrNGcxvJEQ8+clfLxPlbeZA6MuRJ+qJNQ5Xw= -cloud.google.com/go/iam v1.1.12/go.mod h1:9LDX8J7dN5YRyzVHxwQzrQs9opFFqn0Mxs9nAeB+Hhg= -cloud.google.com/go/storage v1.41.0 h1:RusiwatSu6lHeEXe3kglxakAmAbfV+rhtPqA6i8RBx0= -cloud.google.com/go/storage v1.41.0/go.mod h1:J1WCa/Z2FcgdEDuPUY8DxT5I+d9mFKsCepp5vR6Sq80= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 h1:JZg6HRh6W6U4OLl6lk7BZ7BLisIzM9dG1R50zUk9C/M= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0/go.mod h1:YL1xnZ6QejvQHWJrX/AvhFl4WW4rqHVoKspWNVwFk0M= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 h1:B/dfvscEQtew9dVuoxqxrUKKv8Ih2f55PydknDamU+g= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0/go.mod h1:fiPSssYvltE08HJchL04dOy+RD4hgrjph0cwGGMntdI= -github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0 h1:+m0M/LFxN43KvULkDNfdXOgrjtg6UYJPFBJyuEcRCAw= -github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0/go.mod h1:PwOyop78lveYMRs6oCxjiVyBdyCgIYH6XHIVZO9/SFQ= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.2.0 h1:Ma67P/GGprNwsslzEH6+Kb8nybI8jpDTm4Wmzu2ReK8= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.2.0/go.mod h1:c+Lifp3EDEamAkPVzMooRNOK6CZjNSdEnf1A7jsI9u4= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 h1:gggzg0SUMs6SQbEw+3LoSsYf9YMjkupeAnHMX8O9mmY= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0/go.mod h1:+6KLcKIVgxoBDMqMO/Nvy7bZ9a0nbU3I1DtFQK3YvB4= -github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM= -github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= -github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= -github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 h1:t3eaIm0rUkzbrIewtiFmMK5RXHej2XnoXNhxVsAYUfg= -github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= -github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk= -github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= -github.com/alicebob/miniredis v2.5.0+incompatible h1:yBHoLpsyjupjz3NL3MhKMVkR41j82Yjf3KFv7ApYzUI= -github.com/alicebob/miniredis/v2 v2.21.0 h1:CdmwIlKUWFBDS+4464GtQiQ0R1vpzOgu4Vnd74rBL7M= -github.com/alicebob/miniredis/v2 v2.21.0/go.mod h1:XNqvJdQJv5mSuVMc0ynneafpnL/zv52acZ6kqeS0t88= -github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA= -github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA= -github.com/apache/thrift v0.20.0 h1:631+KvYbsBZxmuJjYwhezVsrfc/TbqtZV4QcxOX1fOI= -github.com/apache/thrift v0.20.0/go.mod h1:hOk1BQqcp2OLzGsyVXdfMk7YFlMxK3aoEVhjD06QhB8= -github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= -github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= -github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cristalhq/hedgedhttp v0.9.1 h1:g68L9cf8uUyQKQJwciD0A1Vgbsz+QgCjuB1I8FAsCDs= -github.com/cristalhq/hedgedhttp v0.9.1/go.mod h1:XkqWU6qVMutbhW68NnzjWrGtH8NUx1UfYqGYtHVKIsI= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= -github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= -github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM= -github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc= -github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= -github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= -github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= -github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= -github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= -github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= -github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= -github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= -github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= -github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= -github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= -github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= -github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= -github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0= -github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/gogo/status v1.1.1 h1:DuHXlSFHNKqTQ+/ACf5Vs6r4X/dH2EgIzR9Vr+H65kg= -github.com/gogo/status v1.1.1/go.mod h1:jpG3dM5QPcqu19Hg8lkUhBFBa3TcLs1DG7+2Jqci7oU= -github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= -github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= -github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= -github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= -github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= -github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s= -github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= -github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= -github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= -github.com/grafana/dskit v0.0.0-20241115082728-f2a7eb3aa0e9 h1:Dx7+6aU/fhwD2vkMr0PUcyxGat1sjUssHAKQKaS7sDM= -github.com/grafana/dskit v0.0.0-20241115082728-f2a7eb3aa0e9/go.mod h1:SPLNCARd4xdjCkue0O6hvuoveuS1dGJjDnfxYe405YQ= -github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56 h1:X8IKQ0wu40wpvYcKfBcc5T4QnhdQjUhtUtB/1CY89lE= -github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56/go.mod h1:PGk3RjYHpxMM8HFPhKKo+vve3DdlPUELZLSDEFehPuU= -github.com/grafana/pyroscope-go/godeltaprof v0.1.8 h1:iwOtYXeeVSAeYefJNaxDytgjKtUuKQbJqgAIjlnicKg= -github.com/grafana/pyroscope-go/godeltaprof v0.1.8/go.mod h1:2+l7K7twW49Ct4wFluZD3tZ6e0SjanjcUUBPVD/UuGU= -github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= -github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= -github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= -github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= -github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= -github.com/hashicorp/go-plugin v1.6.0 h1:wgd4KxHJTVGGqWBq4QPB1i5BZNEx9BR8+OFmHDmTk8A= -github.com/hashicorp/go-plugin v1.6.0/go.mod h1:lBS5MtSSBZk0SHc66KACcjjlU6WzEVP/8pwz68aMkCI= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= -github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= -github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= -github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= -github.com/jaegertracing/jaeger v1.57.0 h1:3wDtUUPs6NRYH7+d+y8MilDkLHdpPrVlQ2wbcsA62bs= -github.com/jaegertracing/jaeger v1.57.0/go.mod h1:p/1fxIU9hKHl7qEhKC72p2ZYVhvvZvNB73y6V7YyuTs= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6 h1:IsMZxCuZqKuao2vNdfD82fjjgPLfyHLpR41Z88viRWs= -github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6/go.mod h1:3VeWNIJaW+O5xpRQbPp0Ybqu1vJd/pm7s2F473HRrkw= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= -github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= -github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= -github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= -github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= -github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= -github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/miekg/dns v1.1.61 h1:nLxbwF3XxhwVSm8g9Dghm9MHPaUZuqhPiGL+675ZmEs= -github.com/miekg/dns v1.1.61/go.mod h1:mnAarhS3nWaW+NVP2wTkYVIZyHNJ098SJZUki3eykwQ= -github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= -github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.80 h1:2mdUHXEykRdY/BigLt3Iuu1otL0JTogT0Nmltg0wujk= -github.com/minio/minio-go/v7 v7.0.80/go.mod h1:84gmIilaX4zcvAWWzJ5Z1WI5axN+hAbM5w25xf8xvC0= -github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE= -github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= -github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= -github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= -github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/gomega v1.24.0 h1:+0glovB9Jd6z3VR+ScSwQqXVTIfJcGA9UBM8yzQxhqg= -github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.102.0 h1:qsM5HhWpAfIMg8LdO4u+CHofu4UuCuJwg/M+ySO9uZA= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.102.0/go.mod h1:wBJlGy9Wx6s7AxIMcSne2sGw73e5ZUy1AQ/duYwpFf8= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.102.0 h1:4VQidhCgkJiBvBDMOukr5ixrf5uP66iW5Hb+CFsb+4E= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.102.0/go.mod h1:nMto9zkv0vD8YI3oGZFZS2Uu7k2oHt1d+xUHN/ofUYo= -github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e h1:4cPxUYdgaGzZIT5/j0IfqOrrXmq6bG8AwvwisMXpdrg= -github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo= -github.com/opentracing-contrib/go-stdlib v1.0.0 h1:TBS7YuVotp8myLon4Pv7BtCBzOTo1DeZCld0Z63mW2w= -github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= -github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= -github.com/parquet-go/parquet-go v0.23.1-0.20241011155651-6446d1d0d2fe h1:oUJ5TPnrEK/z+/PeoLL+jCgfngAZIDMyhZASetRcYYg= -github.com/parquet-go/parquet-go v0.23.1-0.20241011155651-6446d1d0d2fe/go.mod h1:OqBBRGBl7+llplCvDMql8dEKaDqjaFA/VAPw+OJiNiw= -github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= -github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= -github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= -github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pires/go-proxyproto v0.7.0 h1:IukmRewDQFWC7kfnb66CSomk2q/seBuilHBYFwyq0Hs= -github.com/pires/go-proxyproto v0.7.0/go.mod h1:Vz/1JPY/OACxWGQNIRY2BeyDmpoaWmEP40O9LbuiFR4= -github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= -github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= -github.com/prometheus/exporter-toolkit v0.11.0 h1:yNTsuZ0aNCNFQ3aFTD2uhPOvr4iD7fdBvKPAEGkNf+g= -github.com/prometheus/exporter-toolkit v0.11.0/go.mod h1:BVnENhnNecpwoTLiABx7mrPB/OLRIgN74qlQbV+FK1Q= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= -github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/prometheus/prometheus v0.54.0 h1:6+VmEkohHcofl3W5LyRlhw1Lfm575w/aX6ZFyVAmzM0= -github.com/prometheus/prometheus v0.54.0/go.mod h1:xlLByHhk2g3ycakQGrMaU8K7OySZx98BzeCR99991NY= -github.com/redis/go-redis/v9 v9.6.1 h1:HHDteefn6ZkTtY5fGUE8tj8uy85AHk6zP7CpzIAM0y4= -github.com/redis/go-redis/v9 v9.6.1/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA= -github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= -github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU= -github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= -github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= -github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= -github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= -github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= -github.com/sercand/kuberesolver/v5 v5.1.1 h1:CYH+d67G0sGBj7q5wLK61yzqJJ8gLLC8aeprPTHb6yY= -github.com/sercand/kuberesolver/v5 v5.1.1/go.mod h1:Fs1KbKhVRnB2aDWN12NjKCB+RgYMWZJ294T3BtmVCpQ= -github.com/sony/gobreaker v0.4.1 h1:oMnRNZXX5j85zso6xCPRNPtmAycat+WcoKbklScLDgQ= -github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= -github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= -github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= -github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= -github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= -github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= -github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= -github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= -github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= -github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/uber-go/atomic v1.4.0 h1:yOuPqEq4ovnhEjpHmfFwsqBXDYbQeT6Nb0bwD6XnD5o= -github.com/uber-go/atomic v1.4.0/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g= -github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= -github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= -github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg= -github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= -github.com/willf/bitset v1.1.11 h1:N7Z7E9UvjW+sGsEl7k/SJrvY2reP1A07MrGuCjIOjRE= -github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= -github.com/willf/bloom v2.0.3+incompatible h1:QDacWdqcAUI1MPOwIQZRy9kOR7yxfyEmxX8Wdm2/JPA= -github.com/willf/bloom v2.0.3+incompatible/go.mod h1:MmAltL9pDMNTrvUkxdg0k0q5I0suxmuwp3KbyrZLOZ8= -github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU= -github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/gopher-lua v0.0.0-20220504180219-658193537a64 h1:5mLPGnFdSsevFRFc9q3yYbBkB6tsm4aCwwQV/j1JQAQ= -github.com/yuin/gopher-lua v0.0.0-20220504180219-658193537a64/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= -go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector/pdata v1.12.0 h1:Xx5VK1p4VO0md8MWm2icwC1MnJ7f8EimKItMWw46BmA= -go.opentelemetry.io/collector/pdata v1.12.0/go.mod h1:MYeB0MmMAxeM0hstCFrCqWLzdyeYySim2dG6pDT6nYI= -go.opentelemetry.io/collector/semconv v0.105.0 h1:8p6dZ3JfxFTjbY38d8xlQGB1TQ3nPUvs+D0RERniZ1g= -go.opentelemetry.io/collector/semconv v0.105.0/go.mod h1:yMVUCNoQPZVq/IPfrHrnntZTWsLf5YGZ7qwKulIl5hw= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 h1:vS1Ao/R55RNV4O7TA2Qopok8yN+X0LIP6RVWLFkprck= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0/go.mod h1:BMsdeOxN04K0L5FNUBfjFdvwWGNe/rkmSwH4Aelu/X0= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 h1:ZIg3ZT/aQ7AfKqdwp7ECpOK6vHqquXXuyTjIO8ZdmPs= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0/go.mod h1:DQAwmETtZV00skUwgD6+0U89g80NKsJE3DCKeLLPQMI= -go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= -go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= -go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= -go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= -go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= -go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= -go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= -go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= -go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= -go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= -go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= -go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= -go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= -go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= -go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ= -golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM= -golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= -golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190921015927-1a5e07d1ff72/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo= -golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= -golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= -golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= -golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= -golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= -golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= -golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= -google.golang.org/api v0.190.0 h1:ASM+IhLY1zljNdLu19W1jTmU6A+gMk6M46Wlur61s+Q= -google.golang.org/api v0.190.0/go.mod h1:QIr6I9iedBLnfqoD6L6Vze1UvS5Hzj5r2aUBOaZnLHo= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf h1:OqdXDEakZCVtDiZTjcxfwbHPCT11ycCEsTKesBVKvyY= -google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:mCr1K1c8kX+1iSBREvU3Juo11CB+QOEWxbRS01wWl5M= -google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f h1:b1Ln/PG8orm0SsBbHZWke8dDp2lrCD4jSmfglFpTZbk= -google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f/go.mod h1:AHT0dDg3SoMOgZGnZk29b5xTbPHMoEC8qthmBLJCpys= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf h1:liao9UHurZLtiEwBgT9LMOnKYsHze6eA6w1KQCMVN2Q= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= -google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= -gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/cmd/tempo-serverless/cloud-run/main.go b/cmd/tempo-serverless/cloud-run/main.go deleted file mode 100644 index e99d2e63c31..00000000000 --- a/cmd/tempo-serverless/cloud-run/main.go +++ /dev/null @@ -1,42 +0,0 @@ -package main - -import ( - "log" - "net/http" - "os" - - "github.com/gogo/protobuf/jsonpb" - serverless "github.com/grafana/tempo/cmd/tempo-serverless" -) - -func main() { - log.Print("starting server...") - http.HandleFunc("/", handler) - - // Determine port for HTTP service. - port := os.Getenv("PORT") - if port == "" { - port = "8080" - log.Printf("defaulting to port %s", port) - } - - // Start HTTP server. - log.Printf("listening on port %s", port) - if err := http.ListenAndServe(":"+port, nil); err != nil { - log.Fatal(err) - } -} - -func handler(w http.ResponseWriter, r *http.Request) { - resp, httpErr := serverless.Handler(r) - if httpErr != nil { - http.Error(w, httpErr.Err.Error(), httpErr.Status) - } - - marshaller := &jsonpb.Marshaler{} - err := marshaller.Marshal(w, resp) - if httpErr != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } -} diff --git a/cmd/tempo-serverless/handler.go b/cmd/tempo-serverless/handler.go deleted file mode 100644 index 13a877a1fc5..00000000000 --- a/cmd/tempo-serverless/handler.go +++ /dev/null @@ -1,250 +0,0 @@ -package serverless - -import ( - "bytes" - "context" - "fmt" - "net/http" - "reflect" - "runtime" - "strings" - "sync" - - "github.com/grafana/dskit/flagext" - "github.com/grafana/dskit/user" - "github.com/mitchellh/mapstructure" - "github.com/spf13/viper" - "gopkg.in/yaml.v2" - - "github.com/grafana/tempo/pkg/api" - "github.com/grafana/tempo/pkg/tempopb" - "github.com/grafana/tempo/pkg/traceql" - "github.com/grafana/tempo/tempodb" - "github.com/grafana/tempo/tempodb/backend" - "github.com/grafana/tempo/tempodb/backend/azure" - "github.com/grafana/tempo/tempodb/backend/gcs" - "github.com/grafana/tempo/tempodb/backend/local" - "github.com/grafana/tempo/tempodb/backend/s3" - "github.com/grafana/tempo/tempodb/encoding" - "github.com/grafana/tempo/tempodb/encoding/common" -) - -const ( - envConfigPrefix = "TEMPO" -) - -// used to initialize a reader one time -var ( - reader backend.Reader - readerErr error - readerConfig *tempodb.Config - readerOnce sync.Once -) - -type HTTPError struct { - Err error - Status int -} - -// Handler is the main entrypoint for the serverless handler. it expects a tempopb.SearchBlockRequest -// encoded in its parameters -func Handler(r *http.Request) (*tempopb.SearchResponse, *HTTPError) { - searchReq, err := api.ParseSearchBlockRequest(r) - if err != nil { - return nil, httpError("parsing search request", err, http.StatusBadRequest) - } - - maxBytes, err := api.ExtractServerlessParams(r) - if err != nil { - return nil, httpError("extracting serverless params", err, http.StatusBadRequest) - } - - // load config, fields are set through env vars TEMPO_ - reader, cfg, err := loadBackend() - if err != nil { - return nil, httpError("loading backend", err, http.StatusInternalServerError) - } - - tenant, _, err := user.ExtractOrgIDFromHTTPRequest(r) - if err != nil { - return nil, httpError("extracting org id", err, http.StatusBadRequest) - } - - blockID, err := backend.ParseUUID(searchReq.BlockID) - if err != nil { - return nil, httpError("parsing uuid", err, http.StatusBadRequest) - } - - enc, err := backend.ParseEncoding(searchReq.Encoding) - if err != nil { - return nil, httpError("parsing encoding", err, http.StatusBadRequest) - } - - dc, err := backend.DedicatedColumnsFromTempopb(searchReq.DedicatedColumns) - if err != nil { - return nil, httpError("parsing dedicated columns", err, http.StatusBadRequest) - } - - // /giphy so meta - meta := &backend.BlockMeta{ - Version: searchReq.Version, - TenantID: tenant, - Encoding: enc, - IndexPageSize: searchReq.IndexPageSize, - TotalRecords: searchReq.TotalRecords, - BlockID: blockID, - DataEncoding: searchReq.DataEncoding, - Size_: searchReq.Size_, - FooterSize: searchReq.FooterSize, - DedicatedColumns: dc, - } - - block, err := encoding.OpenBlock(meta, reader) - if err != nil { - return nil, httpError("creating backend block", err, http.StatusInternalServerError) - } - - opts := common.SearchOptions{ - StartPage: int(searchReq.StartPage), - TotalPages: int(searchReq.PagesToSearch), - MaxBytes: maxBytes, - } - cfg.Search.ApplyToOptions(&opts) - - var resp *tempopb.SearchResponse - - if api.IsTraceQLQuery(searchReq.SearchReq) { - engine := traceql.NewEngine() - - spansetFetcher := traceql.NewSpansetFetcherWrapper(func(ctx context.Context, req traceql.FetchSpansRequest) (traceql.FetchSpansResponse, error) { - return block.Fetch(ctx, req, opts) - }) - resp, err = engine.ExecuteSearch(r.Context(), searchReq.SearchReq, spansetFetcher) - if err != nil { - return nil, httpError("searching block", err, http.StatusInternalServerError) - } - } else { - resp, err = block.Search(r.Context(), searchReq.SearchReq, opts) - if err != nil { - return nil, httpError("searching block", err, http.StatusInternalServerError) - } - } - - runtime.GC() - - return resp, nil -} - -func loadBackend() (backend.Reader, *tempodb.Config, error) { - readerOnce.Do(func() { - cfg, err := loadConfig() - if err != nil { - readerErr = err - return - } - - var r backend.RawReader - - // Create the backend with NewNoConfirm() to prevent an extra call to the various backends on - // startup. This extra call exists just to confirm the bucket is accessible and force the - // standard Tempo components to fail during startup. If permissions are not correct this Lambda - // will fail instantly anyway and in a heavy query environment the extra calls will start to add up. - switch cfg.Backend { - case backend.Local: - err = fmt.Errorf("local backend not supported for serverless functions") - case backend.GCS: - r, _, _, err = gcs.NewNoConfirm(cfg.GCS) - case backend.S3: - r, _, _, err = s3.NewNoConfirm(cfg.S3) - case backend.Azure: - r, _, _, err = azure.NewNoConfirm(cfg.Azure) - default: - err = fmt.Errorf("unknown backend %s", cfg.Backend) - } - if err != nil { - readerErr = err - return - } - - readerConfig = cfg - reader = backend.NewReader(r) - }) - - return reader, readerConfig, readerErr -} - -func loadConfig() (*tempodb.Config, error) { - defaultConfig := &tempodb.Config{ - Search: &tempodb.SearchConfig{ - ChunkSizeBytes: tempodb.DefaultSearchChunkSizeBytes, - PrefetchTraceCount: tempodb.DefaultPrefetchTraceCount, - }, - Local: &local.Config{}, - GCS: &gcs.Config{}, - S3: &s3.Config{}, - Azure: &azure.Config{}, - } - - // horrible viper dance since it won't unmarshal to a struct from env: https://github.com/spf13/viper/issues/188 - v := viper.NewWithOptions() - b, err := yaml.Marshal(defaultConfig) - if err != nil { - return nil, fmt.Errorf("failed to marshal default config: %w", err) - } - v.SetConfigType("yaml") - if err = v.MergeConfig(bytes.NewReader(b)); err != nil { - return nil, fmt.Errorf("failed to merge config: %w", err) - } - - v.AutomaticEnv() - v.SetEnvPrefix(envConfigPrefix) - v.SetEnvKeyReplacer(strings.NewReplacer(".", "_", "-", "_")) - - cfg := &tempodb.Config{} - err = v.Unmarshal(cfg, setTagName, setDecodeHooks) - if err != nil { - return nil, fmt.Errorf("failed to unmarshal config: %w", err) - } - - return cfg, nil -} - -// this forces mapstructure to use the yaml tag that is already defined -// for all config struct fields -func setTagName(d *mapstructure.DecoderConfig) { - d.TagName = "yaml" -} - -// install all required decodeHooks so that viper will parse the yaml properly -func setDecodeHooks(c *mapstructure.DecoderConfig) { - c.DecodeHook = mapstructure.ComposeDecodeHookFunc( - mapstructure.StringToTimeDurationHookFunc(), - mapstructure.StringToSliceHookFunc(","), - stringToFlagExt(), - ) -} - -// stringToFlagExt returns a DecodeHookFunc that converts -// strings to dskit.FlagExt values. -func stringToFlagExt() mapstructure.DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}, - ) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(flagext.Secret{}) { - return data, nil - } - return flagext.SecretWithValue(data.(string)), nil - } -} - -func httpError(action string, err error, status int) *HTTPError { - return &HTTPError{ - Err: fmt.Errorf("serverless [%s]: %w", action, err), - Status: status, - } -} diff --git a/cmd/tempo-serverless/handler_test.go b/cmd/tempo-serverless/handler_test.go deleted file mode 100644 index f00fac2df45..00000000000 --- a/cmd/tempo-serverless/handler_test.go +++ /dev/null @@ -1,81 +0,0 @@ -package serverless - -import ( - "os" - "testing" - "time" - - "github.com/grafana/tempo/tempodb/backend" -) - -func TestLoadConfig(t *testing.T) { - os.Setenv("TEMPO_GCS_BUCKET_NAME", "some-random-gcs-bucket") - os.Setenv("TEMPO_GCS_CHUNK_BUFFER_SIZE", "10") - os.Setenv("TEMPO_GCS_HEDGE_REQUESTS_AT", "400ms") - os.Setenv("TEMPO_BACKEND", backend.GCS) - - // purposefully not using testfiy to reduce dependencies and keep the serverless packages small - cfg, err := loadConfig() - if err != nil { - t.Error("failed to load config:", err) - return - } - if cfg.Backend != backend.GCS { - t.Error("backend should be gcs", cfg.Backend) - } - if cfg.GCS.BucketName != "some-random-gcs-bucket" { - t.Error("gcs bucket name should be some-random-gcs-bucket", cfg.GCS.BucketName) - } - if cfg.GCS.ChunkBufferSize != 10 { - t.Error("gcs chunk buffer size should be 10", cfg.GCS.ChunkBufferSize) - } - if cfg.GCS.HedgeRequestsAt != 400*time.Millisecond { - t.Error("gcs hedge requests at should be 400ms", cfg.GCS.HedgeRequestsAt) - } -} - -func TestLoadConfigS3(t *testing.T) { - os.Setenv("TEMPO_S3_BUCKET", "tempo") - os.Setenv("TEMPO_S3_ENDPOINT", "glerg") - os.Setenv("TEMPO_BACKEND", backend.S3) - os.Setenv("TEMPO_S3_ACCESS_KEY", "access") - os.Setenv("TEMPO_S3_SECRET_KEY", "secret") - - cfg, err := loadConfig() - if err != nil { - t.Error("failed to load config", err) - return - } - if cfg.Backend != backend.S3 { - t.Error("backend should be s3", cfg.Backend) - } - if cfg.S3.Bucket != "tempo" { - t.Error("s3 bucket name should be tempo", cfg.S3.Bucket) - } - if cfg.S3.Endpoint != "glerg" { - t.Error("s3 endpoint should be glerg", cfg.S3.Endpoint) - } - if cfg.S3.AccessKey != "access" { - t.Error("s3 access key should be access", cfg.S3.AccessKey) - } - if cfg.S3.SecretKey.String() != "secret" { - t.Error("s3 secret key should be secret", cfg.S3.SecretKey) - } -} - -func TestLoadConfigAzure(t *testing.T) { - os.Setenv("TEMPO_BACKEND", backend.Azure) - os.Setenv("TEMPO_AZURE_MAX_BUFFERS", "3") - - cfg, err := loadConfig() - if err != nil { - t.Error("failed to load config", err) - return - } - if cfg.Backend != backend.Azure { - t.Error("backend should be azure", cfg.Backend) - } - if cfg.Azure.MaxBuffers != 3 { - t.Error("azure max buffers should be 3", cfg.Azure.MaxBuffers) - } -} diff --git a/cmd/tempo-serverless/lambda/Dockerfile b/cmd/tempo-serverless/lambda/Dockerfile deleted file mode 100644 index ad1c6bb398a..00000000000 --- a/cmd/tempo-serverless/lambda/Dockerfile +++ /dev/null @@ -1,44 +0,0 @@ -# -# docker run -p 8080:8080 -p 9000:9000 tempo-serverless -# -# to exercise the function after building using the raw lambda interface -# curl -d '{}' http://localhost:8080/2015-03-31/functions/function/invocations -# passed body should be serializable to: events.ALBTargetGroupRequest -# response body will be: events.ALBTargetGroupResponse -# -# to exercise the function as if it were an http endpoint through an alb -# curl http://localhost:9000/?start=1000&end=1001&... -# - -# -# build the lambda and retrive the lambda-local-proxy -# -FROM golang:1.21.4 AS build - -# copy in the lambda. todo: build in container -COPY lambda / - -# copy in https://github.com/treasure-data/lambda-local-proxy to include in the docker image. -# this project acts as an "alb" and translates normal http requests into Lambda invokes -RUN curl -L https://github.com/treasure-data/lambda-local-proxy/releases/download/v0.0.5/lambda-local-proxy_0.0.5_Linux_x86_64.tar.gz | tar xvz -C / \ - && chmod +x /lambda-local-proxy - -# -# https://docs.aws.amazon.com/lambda/latest/dg/images-create.html -# -FROM public.ecr.aws/lambda/go:1.2022.01.25.13 - -COPY --from=build /lambda ${LAMBDA_TASK_ROOT} -COPY --from=build /lambda-local-proxy /lambda-local-proxy - -RUN chmod +x /lambda-local-proxy - -RUN echo -e '\ - #!/bin/bash \n\ - AWS_REGION=dummy AWS_ACCESS_KEY_ID=dummy AWS_SECRET_ACCESS_KEY=dummy /lambda-local-proxy -e http://localhost:8080 -p 9000 -f function & \n\ - exec /lambda-entrypoint.sh lambda \n\ - ' > /run.sh -RUN chmod +x /run.sh - -# run lambda-local-proxy and the actual lambda runtime itself. -ENTRYPOINT /run.sh diff --git a/cmd/tempo-serverless/lambda/go.mod b/cmd/tempo-serverless/lambda/go.mod deleted file mode 100644 index e4499b30619..00000000000 --- a/cmd/tempo-serverless/lambda/go.mod +++ /dev/null @@ -1,154 +0,0 @@ -module github.com/grafana/tempo/cmd/tempo-serverless/lambda - -go 1.23.3 - -require ( - github.com/aws/aws-lambda-go v1.28.0 - github.com/gogo/protobuf v1.3.2 - github.com/grafana/tempo v0.0.0-00010101000000-000000000000 - github.com/stretchr/testify v1.9.0 -) - -require ( - cloud.google.com/go v0.115.0 // indirect - cloud.google.com/go/auth v0.7.3 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.3 // indirect - cloud.google.com/go/compute/metadata v0.5.0 // indirect - cloud.google.com/go/iam v1.1.12 // indirect - cloud.google.com/go/storage v1.41.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect - github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 // indirect - github.com/andybalholm/brotli v1.1.1 // indirect - github.com/apache/thrift v0.20.0 // indirect - github.com/aws/aws-sdk-go v1.55.5 // indirect - github.com/beorn7/perks v1.0.1 // indirect - github.com/cespare/xxhash v1.1.0 // indirect - github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/cristalhq/hedgedhttp v0.9.1 // indirect - github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect - github.com/dustin/go-humanize v1.0.1 // indirect - github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb // indirect - github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/go-ini/ini v1.67.0 // indirect - github.com/go-kit/log v0.2.1 // indirect - github.com/go-logfmt/logfmt v0.6.0 // indirect - github.com/go-logr/logr v1.4.2 // indirect - github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-redis/redis/v8 v8.11.5 // indirect - github.com/goccy/go-json v0.10.3 // indirect - github.com/gogo/googleapis v1.4.1 // indirect - github.com/gogo/status v1.1.1 // indirect - github.com/golang-jwt/jwt/v5 v5.2.1 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.4 // indirect - github.com/golang/snappy v0.0.4 // indirect - github.com/google/s2a-go v0.1.8 // indirect - github.com/google/uuid v1.6.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.13.0 // indirect - github.com/gorilla/mux v1.8.1 // indirect - github.com/grafana/dskit v0.0.0-20241115082728-f2a7eb3aa0e9 // indirect - github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56 // indirect - github.com/grafana/pyroscope-go/godeltaprof v0.1.8 // indirect - github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect - github.com/hashicorp/hcl v1.0.0 // indirect - github.com/jaegertracing/jaeger v1.57.0 // indirect - github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/jpillora/backoff v1.0.0 // indirect - github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.11 // indirect - github.com/klauspost/cpuid/v2 v2.2.8 // indirect - github.com/kylelemons/godebug v1.1.0 // indirect - github.com/magiconair/properties v1.8.7 // indirect - github.com/mattn/go-runewidth v0.0.16 // indirect - github.com/miekg/dns v1.1.61 // indirect - github.com/minio/md5-simd v1.1.2 // indirect - github.com/minio/minio-go/v7 v7.0.80 // indirect - github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect - github.com/olekukonko/tablewriter v0.0.5 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.102.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.102.0 // indirect - github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e // indirect - github.com/opentracing-contrib/go-stdlib v1.0.0 // indirect - github.com/opentracing/opentracing-go v1.2.0 // indirect - github.com/parquet-go/parquet-go v0.23.1-0.20241011155651-6446d1d0d2fe // indirect - github.com/pelletier/go-toml/v2 v2.1.0 // indirect - github.com/pierrec/lz4/v4 v4.1.21 // indirect - github.com/pires/go-proxyproto v0.7.0 // indirect - github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect - github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_golang v1.19.1 // indirect - github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.55.0 // indirect - github.com/prometheus/exporter-toolkit v0.11.0 // indirect - github.com/prometheus/procfs v0.15.1 // indirect - github.com/prometheus/prometheus v0.54.0 // indirect - github.com/rivo/uniseg v0.4.7 // indirect - github.com/rs/xid v1.6.0 // indirect - github.com/sagikazarmark/locafero v0.4.0 // indirect - github.com/sagikazarmark/slog-shim v0.1.0 // indirect - github.com/sercand/kuberesolver/v5 v5.1.1 // indirect - github.com/sony/gobreaker v0.4.1 // indirect - github.com/sourcegraph/conc v0.3.0 // indirect - github.com/spaolacci/murmur3 v1.1.0 // indirect - github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.6.0 // indirect - github.com/spf13/pflag v1.0.5 // indirect - github.com/spf13/viper v1.18.2 // indirect - github.com/subosito/gotenv v1.6.0 // indirect - github.com/uber-go/atomic v1.4.0 // indirect - github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect - github.com/uber/jaeger-lib v2.4.1+incompatible // indirect - github.com/willf/bitset v1.1.11 // indirect - github.com/willf/bloom v2.0.3+incompatible // indirect - go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/collector/pdata v1.12.0 // indirect - go.opentelemetry.io/collector/semconv v0.105.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 // indirect - go.opentelemetry.io/otel v1.31.0 // indirect - go.opentelemetry.io/otel/metric v1.31.0 // indirect - go.opentelemetry.io/otel/trace v1.31.0 // indirect - go.uber.org/atomic v1.11.0 // indirect - go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.29.0 // indirect - golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect - golang.org/x/mod v0.19.0 // indirect - golang.org/x/net v0.31.0 // indirect - golang.org/x/oauth2 v0.21.0 // indirect - golang.org/x/sync v0.9.0 // indirect - golang.org/x/sys v0.27.0 // indirect - golang.org/x/text v0.20.0 // indirect - golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.23.0 // indirect - google.golang.org/api v0.190.0 // indirect - google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf // indirect - google.golang.org/grpc v1.65.0 // indirect - google.golang.org/protobuf v1.34.2 // indirect - gopkg.in/ini.v1 v1.67.0 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect -) - -replace github.com/grafana/tempo => ../../../ - -replace ( - github.com/bradfitz/gomemcache => github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab - k8s.io/api => k8s.io/api v0.20.4 - k8s.io/client-go => k8s.io/client-go v0.20.4 -) diff --git a/cmd/tempo-serverless/lambda/go.sum b/cmd/tempo-serverless/lambda/go.sum deleted file mode 100644 index a34fbafcdee..00000000000 --- a/cmd/tempo-serverless/lambda/go.sum +++ /dev/null @@ -1,483 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.115.0 h1:CnFSK6Xo3lDYRoBKEcAtia6VSC837/ZkJuRduSFnr14= -cloud.google.com/go v0.115.0/go.mod h1:8jIM5vVgoAEoiVxQ/O4BFTfHqulPZgs/ufEzMcFMdWU= -cloud.google.com/go/auth v0.7.3 h1:98Vr+5jMaCZ5NZk6e/uBgf60phTk/XN84r8QEWB9yjY= -cloud.google.com/go/auth v0.7.3/go.mod h1:HJtWUx1P5eqjy/f6Iq5KeytNpbAcGolPhOgyop2LlzA= -cloud.google.com/go/auth/oauth2adapt v0.2.3 h1:MlxF+Pd3OmSudg/b1yZ5lJwoXCEaeedAguodky1PcKI= -cloud.google.com/go/auth/oauth2adapt v0.2.3/go.mod h1:tMQXOfZzFuNuUxOypHlQEXgdfX5cuhwU+ffUuXRJE8I= -cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= -cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= -cloud.google.com/go/iam v1.1.12 h1:JixGLimRrNGcxvJEQ8+clfLxPlbeZA6MuRJ+qJNQ5Xw= -cloud.google.com/go/iam v1.1.12/go.mod h1:9LDX8J7dN5YRyzVHxwQzrQs9opFFqn0Mxs9nAeB+Hhg= -cloud.google.com/go/storage v1.41.0 h1:RusiwatSu6lHeEXe3kglxakAmAbfV+rhtPqA6i8RBx0= -cloud.google.com/go/storage v1.41.0/go.mod h1:J1WCa/Z2FcgdEDuPUY8DxT5I+d9mFKsCepp5vR6Sq80= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 h1:JZg6HRh6W6U4OLl6lk7BZ7BLisIzM9dG1R50zUk9C/M= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0/go.mod h1:YL1xnZ6QejvQHWJrX/AvhFl4WW4rqHVoKspWNVwFk0M= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 h1:B/dfvscEQtew9dVuoxqxrUKKv8Ih2f55PydknDamU+g= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0/go.mod h1:fiPSssYvltE08HJchL04dOy+RD4hgrjph0cwGGMntdI= -github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0 h1:+m0M/LFxN43KvULkDNfdXOgrjtg6UYJPFBJyuEcRCAw= -github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0/go.mod h1:PwOyop78lveYMRs6oCxjiVyBdyCgIYH6XHIVZO9/SFQ= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.2.0 h1:Ma67P/GGprNwsslzEH6+Kb8nybI8jpDTm4Wmzu2ReK8= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.2.0/go.mod h1:c+Lifp3EDEamAkPVzMooRNOK6CZjNSdEnf1A7jsI9u4= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 h1:gggzg0SUMs6SQbEw+3LoSsYf9YMjkupeAnHMX8O9mmY= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0/go.mod h1:+6KLcKIVgxoBDMqMO/Nvy7bZ9a0nbU3I1DtFQK3YvB4= -github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM= -github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= -github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= -github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 h1:t3eaIm0rUkzbrIewtiFmMK5RXHej2XnoXNhxVsAYUfg= -github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= -github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk= -github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= -github.com/alicebob/miniredis v2.5.0+incompatible h1:yBHoLpsyjupjz3NL3MhKMVkR41j82Yjf3KFv7ApYzUI= -github.com/alicebob/miniredis/v2 v2.21.0 h1:CdmwIlKUWFBDS+4464GtQiQ0R1vpzOgu4Vnd74rBL7M= -github.com/alicebob/miniredis/v2 v2.21.0/go.mod h1:XNqvJdQJv5mSuVMc0ynneafpnL/zv52acZ6kqeS0t88= -github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA= -github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA= -github.com/apache/thrift v0.20.0 h1:631+KvYbsBZxmuJjYwhezVsrfc/TbqtZV4QcxOX1fOI= -github.com/apache/thrift v0.20.0/go.mod h1:hOk1BQqcp2OLzGsyVXdfMk7YFlMxK3aoEVhjD06QhB8= -github.com/aws/aws-lambda-go v1.28.0 h1:fZiik1PZqW2IyAN4rj+Y0UBaO1IDFlsNo9Zz/XnArK4= -github.com/aws/aws-lambda-go v1.28.0/go.mod h1:jJmlefzPfGnckuHdXX7/80O3BvUUi12XOkbv4w9SGLU= -github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= -github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= -github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cristalhq/hedgedhttp v0.9.1 h1:g68L9cf8uUyQKQJwciD0A1Vgbsz+QgCjuB1I8FAsCDs= -github.com/cristalhq/hedgedhttp v0.9.1/go.mod h1:XkqWU6qVMutbhW68NnzjWrGtH8NUx1UfYqGYtHVKIsI= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= -github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= -github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM= -github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc= -github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= -github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= -github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= -github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= -github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= -github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= -github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= -github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= -github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= -github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= -github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= -github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= -github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= -github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0= -github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/gogo/status v1.1.1 h1:DuHXlSFHNKqTQ+/ACf5Vs6r4X/dH2EgIzR9Vr+H65kg= -github.com/gogo/status v1.1.1/go.mod h1:jpG3dM5QPcqu19Hg8lkUhBFBa3TcLs1DG7+2Jqci7oU= -github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= -github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= -github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= -github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= -github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= -github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s= -github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= -github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= -github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= -github.com/grafana/dskit v0.0.0-20241115082728-f2a7eb3aa0e9 h1:Dx7+6aU/fhwD2vkMr0PUcyxGat1sjUssHAKQKaS7sDM= -github.com/grafana/dskit v0.0.0-20241115082728-f2a7eb3aa0e9/go.mod h1:SPLNCARd4xdjCkue0O6hvuoveuS1dGJjDnfxYe405YQ= -github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56 h1:X8IKQ0wu40wpvYcKfBcc5T4QnhdQjUhtUtB/1CY89lE= -github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56/go.mod h1:PGk3RjYHpxMM8HFPhKKo+vve3DdlPUELZLSDEFehPuU= -github.com/grafana/pyroscope-go/godeltaprof v0.1.8 h1:iwOtYXeeVSAeYefJNaxDytgjKtUuKQbJqgAIjlnicKg= -github.com/grafana/pyroscope-go/godeltaprof v0.1.8/go.mod h1:2+l7K7twW49Ct4wFluZD3tZ6e0SjanjcUUBPVD/UuGU= -github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= -github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= -github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= -github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= -github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= -github.com/hashicorp/go-plugin v1.6.0 h1:wgd4KxHJTVGGqWBq4QPB1i5BZNEx9BR8+OFmHDmTk8A= -github.com/hashicorp/go-plugin v1.6.0/go.mod h1:lBS5MtSSBZk0SHc66KACcjjlU6WzEVP/8pwz68aMkCI= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= -github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= -github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= -github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= -github.com/jaegertracing/jaeger v1.57.0 h1:3wDtUUPs6NRYH7+d+y8MilDkLHdpPrVlQ2wbcsA62bs= -github.com/jaegertracing/jaeger v1.57.0/go.mod h1:p/1fxIU9hKHl7qEhKC72p2ZYVhvvZvNB73y6V7YyuTs= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6 h1:IsMZxCuZqKuao2vNdfD82fjjgPLfyHLpR41Z88viRWs= -github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6/go.mod h1:3VeWNIJaW+O5xpRQbPp0Ybqu1vJd/pm7s2F473HRrkw= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= -github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= -github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= -github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= -github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= -github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= -github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/miekg/dns v1.1.61 h1:nLxbwF3XxhwVSm8g9Dghm9MHPaUZuqhPiGL+675ZmEs= -github.com/miekg/dns v1.1.61/go.mod h1:mnAarhS3nWaW+NVP2wTkYVIZyHNJ098SJZUki3eykwQ= -github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= -github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.80 h1:2mdUHXEykRdY/BigLt3Iuu1otL0JTogT0Nmltg0wujk= -github.com/minio/minio-go/v7 v7.0.80/go.mod h1:84gmIilaX4zcvAWWzJ5Z1WI5axN+hAbM5w25xf8xvC0= -github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE= -github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= -github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= -github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= -github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/gomega v1.24.0 h1:+0glovB9Jd6z3VR+ScSwQqXVTIfJcGA9UBM8yzQxhqg= -github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.102.0 h1:qsM5HhWpAfIMg8LdO4u+CHofu4UuCuJwg/M+ySO9uZA= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.102.0/go.mod h1:wBJlGy9Wx6s7AxIMcSne2sGw73e5ZUy1AQ/duYwpFf8= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.102.0 h1:4VQidhCgkJiBvBDMOukr5ixrf5uP66iW5Hb+CFsb+4E= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.102.0/go.mod h1:nMto9zkv0vD8YI3oGZFZS2Uu7k2oHt1d+xUHN/ofUYo= -github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e h1:4cPxUYdgaGzZIT5/j0IfqOrrXmq6bG8AwvwisMXpdrg= -github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo= -github.com/opentracing-contrib/go-stdlib v1.0.0 h1:TBS7YuVotp8myLon4Pv7BtCBzOTo1DeZCld0Z63mW2w= -github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= -github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= -github.com/parquet-go/parquet-go v0.23.1-0.20241011155651-6446d1d0d2fe h1:oUJ5TPnrEK/z+/PeoLL+jCgfngAZIDMyhZASetRcYYg= -github.com/parquet-go/parquet-go v0.23.1-0.20241011155651-6446d1d0d2fe/go.mod h1:OqBBRGBl7+llplCvDMql8dEKaDqjaFA/VAPw+OJiNiw= -github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= -github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= -github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= -github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pires/go-proxyproto v0.7.0 h1:IukmRewDQFWC7kfnb66CSomk2q/seBuilHBYFwyq0Hs= -github.com/pires/go-proxyproto v0.7.0/go.mod h1:Vz/1JPY/OACxWGQNIRY2BeyDmpoaWmEP40O9LbuiFR4= -github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= -github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= -github.com/prometheus/exporter-toolkit v0.11.0 h1:yNTsuZ0aNCNFQ3aFTD2uhPOvr4iD7fdBvKPAEGkNf+g= -github.com/prometheus/exporter-toolkit v0.11.0/go.mod h1:BVnENhnNecpwoTLiABx7mrPB/OLRIgN74qlQbV+FK1Q= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= -github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/prometheus/prometheus v0.54.0 h1:6+VmEkohHcofl3W5LyRlhw1Lfm575w/aX6ZFyVAmzM0= -github.com/prometheus/prometheus v0.54.0/go.mod h1:xlLByHhk2g3ycakQGrMaU8K7OySZx98BzeCR99991NY= -github.com/redis/go-redis/v9 v9.6.1 h1:HHDteefn6ZkTtY5fGUE8tj8uy85AHk6zP7CpzIAM0y4= -github.com/redis/go-redis/v9 v9.6.1/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA= -github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= -github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU= -github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= -github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= -github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= -github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= -github.com/sercand/kuberesolver/v5 v5.1.1 h1:CYH+d67G0sGBj7q5wLK61yzqJJ8gLLC8aeprPTHb6yY= -github.com/sercand/kuberesolver/v5 v5.1.1/go.mod h1:Fs1KbKhVRnB2aDWN12NjKCB+RgYMWZJ294T3BtmVCpQ= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sony/gobreaker v0.4.1 h1:oMnRNZXX5j85zso6xCPRNPtmAycat+WcoKbklScLDgQ= -github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= -github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= -github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= -github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= -github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= -github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= -github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= -github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= -github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= -github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/uber-go/atomic v1.4.0 h1:yOuPqEq4ovnhEjpHmfFwsqBXDYbQeT6Nb0bwD6XnD5o= -github.com/uber-go/atomic v1.4.0/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g= -github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= -github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= -github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg= -github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= -github.com/urfave/cli/v2 v2.2.0/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= -github.com/willf/bitset v1.1.11 h1:N7Z7E9UvjW+sGsEl7k/SJrvY2reP1A07MrGuCjIOjRE= -github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= -github.com/willf/bloom v2.0.3+incompatible h1:QDacWdqcAUI1MPOwIQZRy9kOR7yxfyEmxX8Wdm2/JPA= -github.com/willf/bloom v2.0.3+incompatible/go.mod h1:MmAltL9pDMNTrvUkxdg0k0q5I0suxmuwp3KbyrZLOZ8= -github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU= -github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/gopher-lua v0.0.0-20220504180219-658193537a64 h1:5mLPGnFdSsevFRFc9q3yYbBkB6tsm4aCwwQV/j1JQAQ= -github.com/yuin/gopher-lua v0.0.0-20220504180219-658193537a64/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= -go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector/pdata v1.12.0 h1:Xx5VK1p4VO0md8MWm2icwC1MnJ7f8EimKItMWw46BmA= -go.opentelemetry.io/collector/pdata v1.12.0/go.mod h1:MYeB0MmMAxeM0hstCFrCqWLzdyeYySim2dG6pDT6nYI= -go.opentelemetry.io/collector/semconv v0.105.0 h1:8p6dZ3JfxFTjbY38d8xlQGB1TQ3nPUvs+D0RERniZ1g= -go.opentelemetry.io/collector/semconv v0.105.0/go.mod h1:yMVUCNoQPZVq/IPfrHrnntZTWsLf5YGZ7qwKulIl5hw= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 h1:vS1Ao/R55RNV4O7TA2Qopok8yN+X0LIP6RVWLFkprck= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0/go.mod h1:BMsdeOxN04K0L5FNUBfjFdvwWGNe/rkmSwH4Aelu/X0= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 h1:ZIg3ZT/aQ7AfKqdwp7ECpOK6vHqquXXuyTjIO8ZdmPs= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0/go.mod h1:DQAwmETtZV00skUwgD6+0U89g80NKsJE3DCKeLLPQMI= -go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= -go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= -go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= -go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= -go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= -go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= -go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= -go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= -go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= -go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= -go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= -go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= -go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= -go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= -go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ= -golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM= -golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= -golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190921015927-1a5e07d1ff72/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo= -golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= -golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= -golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= -golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= -golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= -golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= -golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= -google.golang.org/api v0.190.0 h1:ASM+IhLY1zljNdLu19W1jTmU6A+gMk6M46Wlur61s+Q= -google.golang.org/api v0.190.0/go.mod h1:QIr6I9iedBLnfqoD6L6Vze1UvS5Hzj5r2aUBOaZnLHo= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf h1:OqdXDEakZCVtDiZTjcxfwbHPCT11ycCEsTKesBVKvyY= -google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:mCr1K1c8kX+1iSBREvU3Juo11CB+QOEWxbRS01wWl5M= -google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f h1:b1Ln/PG8orm0SsBbHZWke8dDp2lrCD4jSmfglFpTZbk= -google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f/go.mod h1:AHT0dDg3SoMOgZGnZk29b5xTbPHMoEC8qthmBLJCpys= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf h1:liao9UHurZLtiEwBgT9LMOnKYsHze6eA6w1KQCMVN2Q= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= -google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= -gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/cmd/tempo-serverless/lambda/main.go b/cmd/tempo-serverless/lambda/main.go deleted file mode 100644 index ff3b037d69e..00000000000 --- a/cmd/tempo-serverless/lambda/main.go +++ /dev/null @@ -1,116 +0,0 @@ -package main - -import ( - "context" - "fmt" - "net/http" - "net/url" - - "github.com/aws/aws-lambda-go/events" - "github.com/aws/aws-lambda-go/lambda" - "github.com/gogo/protobuf/jsonpb" - - serverless "github.com/grafana/tempo/cmd/tempo-serverless" -) - -func main() { - lambda.Start(HandleLambdaEvent) -} - -func HandleLambdaEvent(ctx context.Context, event events.ALBTargetGroupRequest) (events.ALBTargetGroupResponse, error) { - req, err := httpRequest(event) - if err != nil { - return events.ALBTargetGroupResponse{ - Body: err.Error(), - StatusCode: http.StatusInternalServerError, - Headers: map[string]string{}, // alb will 502 if we don't set this: https://github.com/awslabs/aws-lambda-go-api-proxy/issues/79 - }, nil - } - - resp, httpErr := serverless.Handler(req.WithContext(ctx)) - if httpErr != nil { - return events.ALBTargetGroupResponse{ - Body: httpErr.Err.Error(), - StatusCode: httpErr.Status, - Headers: map[string]string{}, - }, nil - } - - marshaller := &jsonpb.Marshaler{} - body, err := marshaller.MarshalToString(resp) - if err != nil { - return events.ALBTargetGroupResponse{ - Body: err.Error(), - StatusCode: http.StatusInternalServerError, - Headers: map[string]string{}, - }, nil - } - - return events.ALBTargetGroupResponse{ - Body: body, - StatusCode: http.StatusOK, - Headers: map[string]string{}, - }, nil -} - -// adapted with love from: https://github.com/akrylysov/algnhsa/blob/4c6f78589c506c0f060512adf96b97e8285ac80c/request.go#L65 -func httpRequest(event events.ALBTargetGroupRequest) (*http.Request, error) { - // params - params := url.Values{} - for k, v := range event.QueryStringParameters { - unescaped, err := url.QueryUnescape(v) - if err != nil { - return nil, fmt.Errorf("failed to unescape query string parameter %s: %s: %w", k, v, err) - } - params.Set(k, unescaped) - } - for k, vals := range event.MultiValueQueryStringParameters { - for i, v := range vals { - var err error - vals[i], err = url.QueryUnescape(v) - if err != nil { - return nil, fmt.Errorf("failed to unescape multi val query string parameter %s: %s: %w", k, v, err) - } - } - params[k] = vals - } - - // headers - headers := make(http.Header) - for k, v := range event.Headers { - headers.Set(k, v) - } - for k, vals := range event.MultiValueHeaders { - headers[http.CanonicalHeaderKey(k)] = vals - } - - // url - u := url.URL{ - Host: headers.Get("host"), - RawPath: event.Path, - RawQuery: params.Encode(), - } - - // Unescape request path - p, err := url.PathUnescape(u.RawPath) - if err != nil { - return nil, err - } - u.Path = p - - if u.Path == u.RawPath { - u.RawPath = "" - } - - // we don't use the body. ignore - - req, err := http.NewRequest(event.HTTPMethod, u.String(), nil) - if err != nil { - return nil, err - } - - req.RequestURI = u.RequestURI() - req.Header = headers - - return req, nil -} diff --git a/cmd/tempo-serverless/lambda/main_test.go b/cmd/tempo-serverless/lambda/main_test.go deleted file mode 100644 index 6517d12cda7..00000000000 --- a/cmd/tempo-serverless/lambda/main_test.go +++ /dev/null @@ -1,111 +0,0 @@ -package main - -import ( - "net/http" - "net/url" - "testing" - - "github.com/aws/aws-lambda-go/events" - "github.com/stretchr/testify/require" -) - -// Test that httpRequest() works as expected -func Test_httpRequest(t *testing.T) { - tests := []struct { - event events.ALBTargetGroupRequest - want *http.Request - wantErr string - }{ - // empty - { - event: events.ALBTargetGroupRequest{}, - want: &http.Request{ - Method: "GET", - RequestURI: "/", - URL: &url.URL{}, - Header: map[string][]string{}, - }, - }, - // rando - { - event: events.ALBTargetGroupRequest{ - HTTPMethod: "GET", - Path: "/", - Headers: map[string]string{ - "Content-Type": "application/json", - }, - QueryStringParameters: map[string]string{ - "query": "test", - }, - Body: "test", - }, - want: &http.Request{ - Method: "GET", - RequestURI: "/?query=test", - URL: &url.URL{ - Path: "/", - RawQuery: "query=test", - }, - Header: map[string][]string{ - "Content-Type": {"application/json"}, - }, - }, - }, - // unescape params - { - event: events.ALBTargetGroupRequest{ - QueryStringParameters: map[string]string{ - "q": "%7B%20.foo%20%3D%20bar%20%7D", - }, - }, - want: &http.Request{ - Method: "GET", - RequestURI: "/?q=%7B+.foo+%3D+bar+%7D", - URL: &url.URL{ - RawQuery: "q=%7B+.foo+%3D+bar+%7D", - }, - Header: map[string][]string{}, - }, - }, - // unescape multivalue params - { - event: events.ALBTargetGroupRequest{ - MultiValueQueryStringParameters: map[string][]string{ - "q2": {"%7B%20.foo%20%3D%20bar%20%7D", "%7B%20.foo%20%3D%20bar%20%7D"}, - }, - }, - want: &http.Request{ - Method: "GET", - RequestURI: "/?q2=%7B+.foo+%3D+bar+%7D&q2=%7B+.foo+%3D+bar+%7D", - URL: &url.URL{ - RawQuery: "q2=%7B+.foo+%3D+bar+%7D&q2=%7B+.foo+%3D+bar+%7D", - }, - Header: map[string][]string{}, - }, - }, - // unescape error - { - event: events.ALBTargetGroupRequest{ - QueryStringParameters: map[string]string{ - "q": "%7B%20.foo%20%3D%20bar%20%7D%ZZ", - }, - }, - wantErr: "failed to unescape query string parameter q: %7B%20.foo%20%3D%20bar%20%7D%ZZ: invalid URL escape \"%ZZ\"", - }, - } - - for _, tt := range tests { - got, err := httpRequest(tt.event) - if len(tt.wantErr) > 0 { - require.EqualError(t, err, tt.wantErr) - continue - } else { - require.NoError(t, err) - } - - require.Equal(t, tt.want.Method, got.Method) - require.Equal(t, tt.want.RequestURI, got.RequestURI) - require.Equal(t, tt.want.URL, got.URL) - require.Equal(t, tt.want.Header, got.Header) - } -} diff --git a/cmd/tempo-serverless/readme.md b/cmd/tempo-serverless/readme.md deleted file mode 100644 index bc3aaf30b39..00000000000 --- a/cmd/tempo-serverless/readme.md +++ /dev/null @@ -1,45 +0,0 @@ -# tempo-serverless - -## NOTE: The Tempo serverless is now deprecated and will be removed in an upcoming release. - -This folder is intended to wrap a simple handler for searching backend storage for traces. Subfolders -`./cloud-run` and `./lambda` contain specific code necessary to run this handler in the respective -environments. - -## Configuration -The serverless handler is setup to be configured via environment variables starting with the `TEMPO_` prefix. -``` -TEMPO_GCS_BUCKET_NAME=some-random-gcs-bucket -TEMPO_GCS_CHUNK_BUFFER_SIZE=10485760 -TEMPO_GCS_HEDGE_REQUESTS_AT=400ms -TEMPO_BACKEND=gcs -``` -All fields in tempodb.Config are accessible using their all caps yaml names. Also config objects can be descended -using the `_` character. Note that in the above example `TEMPO_BCS_BUCKET_NAME` refers to tempodb.Config.GCS.BucketName. - -## Make - -### make build-docker - -Builds two docker images named `tempo-serverlesss` and `tempo-serverless-lambda` that are used for e2e testing. - -``` -docker run --rm -p 8080:8080 tempo-serverless -curl http://localhost:8080 -``` - -### make build-docker-gcr-binary -### make build-docker-gcr - -Building the GCR docker image is split into two steps to make it easier to build as part of CI. (See /.drone/drone.jsonnet). -Together these two steps build a binary out of container and then package that up using a Dockerfile. The docker image -created in `build-docker-gcr` is the code artifact that is shipped to Google Cloud Run. - -### make build-docker-lambda-test - -Builds the docker image for lambda e2e testing. This image is not shipped anywhere. - -### make build-lambda-zip - -Builds a lambda zip file. This builds the code artifact that is shipped to AWS Lambda. - diff --git a/cmd/tempo-vulture/Dockerfile b/cmd/tempo-vulture/Dockerfile index 6a7eb449861..122f4c14267 100644 --- a/cmd/tempo-vulture/Dockerfile +++ b/cmd/tempo-vulture/Dockerfile @@ -1,10 +1,16 @@ -FROM alpine:3.20 as certs -RUN apk --update add ca-certificates +FROM alpine:latest AS ca-certificates +RUN apk add --update --no-cache ca-certificates + +FROM gcr.io/distroless/static-debian12:debug + +SHELL ["/busybox/sh", "-c"] + +RUN ["/busybox/addgroup", "-g", "10001", "-S", "tempo"] +RUN ["/busybox/adduser", "-u", "10001", "-S", "tempo", "-G", "tempo"] + ARG TARGETARCH COPY bin/linux/tempo-vulture-${TARGETARCH} /tempo-vulture - -RUN addgroup -g 10001 -S tempo && \ - adduser -u 10001 -S tempo -G tempo +COPY --from=ca-certificates /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ USER 10001:10001 diff --git a/cmd/tempo/Dockerfile b/cmd/tempo/Dockerfile index 8e77d926244..0e3a291ce91 100644 --- a/cmd/tempo/Dockerfile +++ b/cmd/tempo/Dockerfile @@ -1,13 +1,19 @@ -FROM alpine:3.20 AS certs -RUN apk --update add ca-certificates -ARG TARGETARCH -COPY bin/linux/tempo-${TARGETARCH} /tempo +FROM alpine:latest AS ca-certificates +RUN apk add --update --no-cache ca-certificates + +FROM gcr.io/distroless/static-debian12:debug -RUN addgroup -g 10001 -S tempo && \ - adduser -u 10001 -S tempo -G tempo +# we need this because some docker-compose files call chown assuming there's a shell +SHELL ["/busybox/sh", "-c"] -RUN mkdir -p /var/tempo -m 0700 && \ - chown -R tempo:tempo /var/tempo +RUN ["/busybox/addgroup", "-g", "10001", "-S", "tempo"] +RUN ["/busybox/adduser", "-u", "10001", "-S", "tempo", "-G", "tempo"] +RUN ["/busybox/mkdir", "-p", "/var/tempo", "-m", "0700"] +RUN ["/busybox/chown", "-R", "tempo:tempo", "/var/tempo"] + +ARG TARGETARCH +COPY bin/linux/tempo-${TARGETARCH} /tempo +COPY --from=ca-certificates /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ USER 10001:10001 diff --git a/cmd/tempo/Dockerfile_debug b/cmd/tempo/Dockerfile_debug index 04311bdf43f..687427fb9a5 100644 --- a/cmd/tempo/Dockerfile_debug +++ b/cmd/tempo/Dockerfile_debug @@ -2,7 +2,7 @@ FROM golang:alpine AS build-dlv RUN go install github.com/go-delve/delve/cmd/dlv@latest -FROM alpine:3.20 as certs +FROM alpine:3.21 as certs RUN apk --update add ca-certificates bash ARG TARGETARCH COPY bin/linux/tempo-${TARGETARCH} /tempo diff --git a/cmd/tempo/app/app.go b/cmd/tempo/app/app.go index 732ec877336..73ccf58d7c5 100644 --- a/cmd/tempo/app/app.go +++ b/cmd/tempo/app/app.go @@ -21,8 +21,8 @@ import ( "github.com/grafana/dskit/server" "github.com/grafana/dskit/services" "github.com/grafana/dskit/signals" + "github.com/grafana/tempo/modules/blockbuilder" "github.com/jedib0t/go-pretty/v6/table" - "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/version" "go.uber.org/atomic" "google.golang.org/grpc" @@ -53,13 +53,6 @@ const ( ) var ( - metricConfigFeatDesc = prometheus.NewDesc( - "tempo_feature_enabled", - "Boolean for configuration variables", - []string{"feature"}, - nil, - ) - statFeatureEnabledAuth = usagestats.NewInt("feature_enabled_auth_stats") statFeatureEnabledMultitenancy = usagestats.NewInt("feature_enabled_multitenancy") ) @@ -71,18 +64,21 @@ type App struct { Server TempoServer InternalServer *server.Server - readRings map[string]*ring.Ring - Overrides overrides.Service - distributor *distributor.Distributor - querier *querier.Querier - frontend *frontend_v1.Frontend - compactor *compactor.Compactor - ingester *ingester.Ingester - generator *generator.Generator - store storage.Store - usageReport *usagestats.Reporter - cacheProvider cache.Provider - MemberlistKV *memberlist.KVInitService + readRings map[string]*ring.Ring + partitionRing *ring.PartitionInstanceRing + partitionRingWatcher *ring.PartitionRingWatcher + Overrides overrides.Service + distributor *distributor.Distributor + querier *querier.Querier + frontend *frontend_v1.Frontend + compactor *compactor.Compactor + ingester *ingester.Ingester + generator *generator.Generator + blockBuilder *blockbuilder.BlockBuilder + store storage.Store + usageReport *usagestats.Reporter + cacheProvider cache.Provider + MemberlistKV *memberlist.KVInitService HTTPAuthMiddleware middleware.Interface TracesConsumerMiddleware receiver.Middleware @@ -270,7 +266,7 @@ func (t *App) writeStatusConfig(w io.Writer, r *http.Request) error { mode := r.URL.Query().Get("mode") switch mode { case "diff": - defaultCfg := newDefaultConfig() + defaultCfg := NewDefaultConfig() defaultCfgYaml, err := util.YAMLMarshalUnmarshal(defaultCfg) if err != nil { @@ -287,7 +283,7 @@ func (t *App) writeStatusConfig(w io.Writer, r *http.Request) error { return err } case "defaults": - output = newDefaultConfig() + output = NewDefaultConfig() case "": output = t.cfg default: diff --git a/cmd/tempo/app/config.go b/cmd/tempo/app/config.go index b0de9d276e9..7d32f8b46d1 100644 --- a/cmd/tempo/app/config.go +++ b/cmd/tempo/app/config.go @@ -8,8 +8,7 @@ import ( "github.com/grafana/dskit/flagext" "github.com/grafana/dskit/kv/memberlist" "github.com/grafana/dskit/server" - "github.com/prometheus/client_golang/prometheus" - + "github.com/grafana/tempo/modules/blockbuilder" "github.com/grafana/tempo/modules/cache" "github.com/grafana/tempo/modules/compactor" "github.com/grafana/tempo/modules/distributor" @@ -21,6 +20,7 @@ import ( "github.com/grafana/tempo/modules/overrides" "github.com/grafana/tempo/modules/querier" "github.com/grafana/tempo/modules/storage" + "github.com/grafana/tempo/pkg/ingest" internalserver "github.com/grafana/tempo/pkg/server" "github.com/grafana/tempo/pkg/usagestats" "github.com/grafana/tempo/pkg/util" @@ -49,6 +49,8 @@ type Config struct { Compactor compactor.Config `yaml:"compactor,omitempty"` Ingester ingester.Config `yaml:"ingester,omitempty"` Generator generator.Config `yaml:"metrics_generator,omitempty"` + Ingest ingest.Config `yaml:"ingest,omitempty"` + BlockBuilder blockbuilder.Config `yaml:"block_builder,omitempty"` StorageConfig storage.Config `yaml:"storage,omitempty"` Overrides overrides.Config `yaml:"overrides,omitempty"` MemberlistKV memberlist.KVConfig `yaml:"memberlist,omitempty"` @@ -56,7 +58,7 @@ type Config struct { CacheProvider cache.Config `yaml:"cache,omitempty"` } -func newDefaultConfig() *Config { +func NewDefaultConfig() *Config { defaultConfig := &Config{} defaultFS := flag.NewFlagSet("", flag.PanicOnError) defaultConfig.RegisterFlagsAndApplyDefaults("", defaultFS) @@ -116,14 +118,16 @@ func (c *Config) RegisterFlagsAndApplyDefaults(prefix string, f *flag.FlagSet) { // Everything else flagext.DefaultValues(&c.IngesterClient) - c.IngesterClient.GRPCClientConfig.GRPCCompression = "snappy" + c.IngesterClient.GRPCClientConfig.GRPCCompression = "" flagext.DefaultValues(&c.GeneratorClient) - c.GeneratorClient.GRPCClientConfig.GRPCCompression = "snappy" + c.GeneratorClient.GRPCClientConfig.GRPCCompression = "" c.Overrides.RegisterFlagsAndApplyDefaults(f) c.Distributor.RegisterFlagsAndApplyDefaults(util.PrefixConfig(prefix, "distributor"), f) c.Ingester.RegisterFlagsAndApplyDefaults(util.PrefixConfig(prefix, "ingester"), f) c.Generator.RegisterFlagsAndApplyDefaults(util.PrefixConfig(prefix, "generator"), f) + c.Ingest.RegisterFlagsAndApplyDefaults(util.PrefixConfig(prefix, "ingest"), f) + c.BlockBuilder.RegisterFlagsAndApplyDefaults(util.PrefixConfig(prefix, "block-builder"), f) c.Querier.RegisterFlagsAndApplyDefaults(util.PrefixConfig(prefix, "querier"), f) c.Frontend.RegisterFlagsAndApplyDefaults(util.PrefixConfig(prefix, "frontend"), f) c.Compactor.RegisterFlagsAndApplyDefaults(util.PrefixConfig(prefix, "compactor"), f) @@ -223,25 +227,11 @@ func (c *Config) CheckConfig() []ConfigWarning { warnings = append(warnings, warnTraceByIDConcurrentShards) } - return warnings -} - -func (c *Config) Describe(ch chan<- *prometheus.Desc) { - ch <- metricConfigFeatDesc -} - -func (c *Config) Collect(ch chan<- prometheus.Metric) { - features := map[string]int{ - "search_external_endpoints": 0, - } - - if len(c.Querier.Search.ExternalEndpoints) > 0 { - features["search_external_endpoints"] = 1 + if c.BlockBuilder.BlockConfig.BlockCfg.Version != c.BlockBuilder.WAL.Version { + warnings = append(warnings, warnBlockAndWALVersionMismatch) } - for label, value := range features { - ch <- prometheus.MustNewConstMetric(metricConfigFeatDesc, prometheus.GaugeValue, float64(value), label) - } + return warnings } // ConfigWarning bundles message and explanation strings in one structure. @@ -302,6 +292,11 @@ var ( Message: "c.Frontend.TraceByID.ConcurrentShards greater than query_shards is invalid. concurrent_shards will be set to query_shards", Explain: "Please remove ConcurrentShards or set it to a value less than or equal to QueryShards", } + + warnBlockAndWALVersionMismatch = ConfigWarning{ + Message: "c.BlockConfig.BlockCfg.Version != c.WAL.Version", + Explain: "Block version and WAL version must match. WAL version will be set to block version", + } ) func newV2Warning(setting string) ConfigWarning { diff --git a/cmd/tempo/app/config_test.go b/cmd/tempo/app/config_test.go index 1f2ccaa8c8c..85b1a93ae1a 100644 --- a/cmd/tempo/app/config_test.go +++ b/cmd/tempo/app/config_test.go @@ -25,7 +25,7 @@ func TestConfig_CheckConfig(t *testing.T) { }{ { name: "check default cfg and expect no warnings", - config: newDefaultConfig(), + config: NewDefaultConfig(), expect: nil, }, { @@ -76,7 +76,7 @@ func TestConfig_CheckConfig(t *testing.T) { { name: "hit local backend warnings", config: func() *Config { - cfg := newDefaultConfig() + cfg := NewDefaultConfig() cfg.StorageConfig.Trace = tempodb.Config{ Backend: backend.Local, BlocklistPollConcurrency: 1, @@ -92,7 +92,7 @@ func TestConfig_CheckConfig(t *testing.T) { { name: "warnings for v2 settings when they drift from default", config: func() *Config { - cfg := newDefaultConfig() + cfg := NewDefaultConfig() cfg.StorageConfig.Trace.Block.Version = vparquet4.VersionString cfg.StorageConfig.Trace.Block.IndexDownsampleBytes = 1 cfg.StorageConfig.Trace.Block.IndexPageSizeBytes = 1 @@ -112,7 +112,7 @@ func TestConfig_CheckConfig(t *testing.T) { { name: "no warnings for v2 settings when they drift from default and v2 is the block version", config: func() *Config { - cfg := newDefaultConfig() + cfg := NewDefaultConfig() cfg.StorageConfig.Trace.Block.Version = v2.VersionString cfg.StorageConfig.Trace.Block.IndexDownsampleBytes = 1 cfg.StorageConfig.Trace.Block.IndexPageSizeBytes = 1 @@ -126,7 +126,7 @@ func TestConfig_CheckConfig(t *testing.T) { { name: "trace storage conflicts with overrides storage - local", config: func() *Config { - cfg := newDefaultConfig() + cfg := NewDefaultConfig() cfg.StorageConfig.Trace.Backend = backend.Local cfg.StorageConfig.Trace.Local.Path = "/var/tempo" cfg.Overrides.UserConfigurableOverridesConfig.Client.Backend = backend.Local @@ -138,7 +138,7 @@ func TestConfig_CheckConfig(t *testing.T) { { name: "trace storage conflicts with overrides storage - gcs", config: func() *Config { - cfg := newDefaultConfig() + cfg := NewDefaultConfig() cfg.StorageConfig.Trace.Backend = backend.GCS cfg.StorageConfig.Trace.GCS.BucketName = "bucketname" cfg.StorageConfig.Trace.GCS.Prefix = "tempo" @@ -152,7 +152,7 @@ func TestConfig_CheckConfig(t *testing.T) { { name: "trace storage conflicts with overrides storage - different backends", config: func() *Config { - cfg := newDefaultConfig() + cfg := NewDefaultConfig() cfg.StorageConfig.Trace.Backend = backend.GCS cfg.StorageConfig.Trace.GCS.BucketName = "my-bucket" cfg.Overrides.UserConfigurableOverridesConfig.Client.Backend = backend.S3 diff --git a/cmd/tempo/app/modules.go b/cmd/tempo/app/modules.go index 2780ed31364..6d11e7e2592 100644 --- a/cmd/tempo/app/modules.go +++ b/cmd/tempo/app/modules.go @@ -12,7 +12,7 @@ import ( kitlog "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/grafana/dskit/dns" - "github.com/grafana/dskit/kv/codec" + "github.com/grafana/dskit/kv" "github.com/grafana/dskit/kv/memberlist" "github.com/grafana/dskit/middleware" "github.com/grafana/dskit/modules" @@ -23,6 +23,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/collectors" + "github.com/grafana/tempo/modules/blockbuilder" "github.com/grafana/tempo/modules/cache" "github.com/grafana/tempo/modules/compactor" "github.com/grafana/tempo/modules/distributor" @@ -65,6 +66,7 @@ const ( IngesterRing string = "ring" SecondaryIngesterRing string = "secondary-ring" MetricsGeneratorRing string = "metrics-generator-ring" + PartitionRing string = "partition-ring" // individual targets Distributor string = "distributor" @@ -73,6 +75,7 @@ const ( Querier string = "querier" QueryFrontend string = "query-frontend" Compactor string = "compactor" + BlockBuilder string = "block-builder" // composite targets SingleBinary string = "all" @@ -88,8 +91,6 @@ func (t *App) initServer() (services.Service, error) { t.cfg.Server.MetricsNamespace = metricsNamespace t.cfg.Server.ExcludeRequestInLog = true - prometheus.MustRegister(&t.cfg) - if t.cfg.EnableGoRuntimeMetrics { // unregister default Go collector prometheus.Unregister(collectors.NewGoCollector()) @@ -180,6 +181,25 @@ func (t *App) initReadRing(cfg ring.Config, name, key string) (*ring.Ring, error return ring, nil } +func (t *App) initPartitionRing() (services.Service, error) { + if !t.cfg.Ingest.Enabled { + return nil, nil + } + + kvClient, err := kv.NewClient(t.cfg.Ingester.IngesterPartitionRing.KVStore, ring.GetPartitionRingCodec(), kv.RegistererWithKVName(prometheus.DefaultRegisterer, ingester.PartitionRingName+"-watcher"), util_log.Logger) + if err != nil { + return nil, fmt.Errorf("creating KV store for ingester partitions ring watcher: %w", err) + } + + t.partitionRingWatcher = ring.NewPartitionRingWatcher(ingester.PartitionRingName, ingester.PartitionRingKey, kvClient, util_log.Logger, prometheus.WrapRegistererWithPrefix("tempo_", prometheus.DefaultRegisterer)) + t.partitionRing = ring.NewPartitionInstanceRing(t.partitionRingWatcher, t.readRings[ringIngester], t.cfg.Ingester.LifecyclerConfig.RingConfig.HeartbeatTimeout) + + // Expose a web page to view the partitions ring state. + t.Server.HTTPRouter().Path("/partition-ring").Methods("GET", "POST").Handler(ring.NewPartitionRingPageHandler(t.partitionRingWatcher, ring.NewPartitionRingEditor(ingester.PartitionRingKey, kvClient))) + + return t.partitionRingWatcher, nil +} + func (t *App) initOverrides() (services.Service, error) { o, err := overrides.NewOverrides(t.cfg.Overrides, newRuntimeConfigValidator(&t.cfg), prometheus.DefaultRegisterer) if err != nil { @@ -225,12 +245,16 @@ func (t *App) initOverridesAPI() (services.Service, error) { } func (t *App) initDistributor() (services.Service, error) { + t.cfg.Distributor.KafkaConfig = t.cfg.Ingest.Kafka + t.cfg.Distributor.KafkaWritePathEnabled = t.cfg.Ingest.Enabled // TODO: Don't mix config params + // todo: make ingester client a module instead of passing the config everywhere distributor, err := distributor.New(t.cfg.Distributor, t.cfg.IngesterClient, t.readRings[ringIngester], t.cfg.GeneratorClient, t.readRings[ringMetricsGenerator], + t.partitionRing, t.Overrides, t.TracesConsumerMiddleware, log.Logger, t.cfg.Server.LogLevel, prometheus.DefaultRegisterer) @@ -253,7 +277,13 @@ func (t *App) initDistributor() (services.Service, error) { func (t *App) initIngester() (services.Service, error) { t.cfg.Ingester.LifecyclerConfig.ListenPort = t.cfg.Server.GRPCListenPort t.cfg.Ingester.DedicatedColumns = t.cfg.StorageConfig.Trace.Block.DedicatedColumns - ingester, err := ingester.New(t.cfg.Ingester, t.store, t.Overrides, prometheus.DefaultRegisterer) + t.cfg.Ingester.IngestStorageConfig = t.cfg.Ingest + + // In SingleBinary mode don't try to discover parition from host name. Always use + // partition 0. This is for small installs or local/debugging setups. + singlePartition := t.cfg.Target == SingleBinary + + ingester, err := ingester.New(t.cfg.Ingester, t.store, t.Overrides, prometheus.DefaultRegisterer, singlePartition) if err != nil { return nil, fmt.Errorf("failed to create ingester: %w", err) } @@ -273,7 +303,16 @@ func (t *App) initGenerator() (services.Service, error) { } t.cfg.Generator.Ring.ListenPort = t.cfg.Server.GRPCListenPort - genSvc, err := generator.New(&t.cfg.Generator, t.Overrides, prometheus.DefaultRegisterer, t.store, log.Logger) + + t.cfg.Generator.Ingest = t.cfg.Ingest + t.cfg.Generator.Ingest.Kafka.ConsumerGroup = generator.ConsumerGroup + + if t.cfg.Target == SingleBinary && len(t.cfg.Generator.AssignedPartitions) == 0 { + // In SingleBinary mode always use partition 0. This is for small installs or local/debugging setups. + t.cfg.Generator.AssignedPartitions = map[string][]int32{t.cfg.Generator.InstanceID: {0}} + } + + genSvc, err := generator.New(&t.cfg.Generator, t.Overrides, prometheus.DefaultRegisterer, t.partitionRing, t.store, log.Logger) if errors.Is(err, generator.ErrUnconfigured) && t.cfg.Target != MetricsGenerator { // just warn if we're not running the metrics-generator level.Warn(log.Logger).Log("msg", "metrics-generator is not configured.", "err", err) return services.NewIdleService(nil, nil), nil @@ -294,6 +333,24 @@ func (t *App) initGenerator() (services.Service, error) { return t.generator, nil } +func (t *App) initBlockBuilder() (services.Service, error) { + if !t.cfg.Ingest.Enabled { + return services.NewIdleService(nil, nil), nil + } + + t.cfg.BlockBuilder.IngestStorageConfig = t.cfg.Ingest + t.cfg.BlockBuilder.IngestStorageConfig.Kafka.ConsumerGroup = blockbuilder.ConsumerGroup + + if t.cfg.Target == SingleBinary && len(t.cfg.BlockBuilder.AssignedPartitions) == 0 { + // In SingleBinary mode always use partition 0. This is for small installs or local/debugging setups. + t.cfg.BlockBuilder.AssignedPartitions = map[string][]int32{t.cfg.BlockBuilder.InstanceID: {0}} + } + + t.blockBuilder = blockbuilder.New(t.cfg.BlockBuilder, log.Logger, t.partitionRing, t.Overrides, t.store) + + return t.blockBuilder, nil +} + func (t *App) initQuerier() (services.Service, error) { // validate worker config // if we're not in single binary mode and worker address is not specified - bail @@ -463,6 +520,13 @@ func (t *App) initOptionalStore() (services.Service, error) { } func (t *App) initStore() (services.Service, error) { + // the only component that needs a functioning tempodb pool are the queriers. all other components will just spin up + // hundreds of never used pool goroutines. set pool size to 0 here to avoid that. + if t.cfg.Target != Querier && t.cfg.Target != SingleBinary && t.cfg.Target != ScalableSingleBinary { + t.cfg.StorageConfig.Trace.Pool.MaxWorkers = 0 + t.cfg.StorageConfig.Trace.Pool.QueueDepth = 0 + } + store, err := tempo_storage.NewStore(t.cfg.StorageConfig, t.cacheProvider, log.Logger) if err != nil { return nil, fmt.Errorf("failed to create store: %w", err) @@ -475,10 +539,11 @@ func (t *App) initStore() (services.Service, error) { func (t *App) initMemberlistKV() (services.Service, error) { reg := prometheus.DefaultRegisterer t.cfg.MemberlistKV.MetricsNamespace = metricsNamespace - t.cfg.MemberlistKV.Codecs = []codec.Codec{ + t.cfg.MemberlistKV.Codecs = append(t.cfg.MemberlistKV.Codecs, ring.GetCodec(), + ring.GetPartitionRingCodec(), usagestats.JSONCodec, - } + ) dnsProviderReg := prometheus.WrapRegistererWithPrefix( "tempo_", @@ -492,6 +557,7 @@ func (t *App) initMemberlistKV() (services.Service, error) { t.MemberlistKV = memberlist.NewKVInitService(&t.cfg.MemberlistKV, log.Logger, dnsProvider, reg) t.cfg.Ingester.LifecyclerConfig.RingConfig.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV + t.cfg.Ingester.IngesterPartitionRing.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV t.cfg.Generator.Ring.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV t.cfg.Distributor.DistributorRing.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV t.cfg.Compactor.ShardingRing.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV @@ -574,6 +640,7 @@ func (t *App) setupModuleManager() error { mm.RegisterModule(IngesterRing, t.initIngesterRing, modules.UserInvisibleModule) mm.RegisterModule(MetricsGeneratorRing, t.initGeneratorRing, modules.UserInvisibleModule) mm.RegisterModule(SecondaryIngesterRing, t.initSecondaryIngesterRing, modules.UserInvisibleModule) + mm.RegisterModule(PartitionRing, t.initPartitionRing, modules.UserInvisibleModule) mm.RegisterModule(Common, nil, modules.UserInvisibleModule) @@ -583,6 +650,7 @@ func (t *App) setupModuleManager() error { mm.RegisterModule(QueryFrontend, t.initQueryFrontend) mm.RegisterModule(Compactor, t.initCompactor) mm.RegisterModule(MetricsGenerator, t.initGenerator) + mm.RegisterModule(BlockBuilder, t.initBlockBuilder) mm.RegisterModule(SingleBinary, nil) mm.RegisterModule(ScalableSingleBinary, nil) @@ -599,18 +667,21 @@ func (t *App) setupModuleManager() error { IngesterRing: {Server, MemberlistKV}, SecondaryIngesterRing: {Server, MemberlistKV}, MetricsGeneratorRing: {Server, MemberlistKV}, + PartitionRing: {MemberlistKV, Server, IngesterRing}, Common: {UsageReport, Server, Overrides}, // individual targets QueryFrontend: {Common, Store, OverridesAPI}, - Distributor: {Common, IngesterRing, MetricsGeneratorRing}, - Ingester: {Common, Store, MemberlistKV}, - MetricsGenerator: {Common, OptionalStore, MemberlistKV}, + Distributor: {Common, IngesterRing, MetricsGeneratorRing, PartitionRing}, + Ingester: {Common, Store, MemberlistKV, PartitionRing}, + MetricsGenerator: {Common, OptionalStore, MemberlistKV, PartitionRing}, Querier: {Common, Store, IngesterRing, MetricsGeneratorRing, SecondaryIngesterRing}, Compactor: {Common, Store, MemberlistKV}, + BlockBuilder: {Common, Store, MemberlistKV, PartitionRing}, + // composite targets - SingleBinary: {Compactor, QueryFrontend, Querier, Ingester, Distributor, MetricsGenerator}, + SingleBinary: {Compactor, QueryFrontend, Querier, Ingester, Distributor, MetricsGenerator, BlockBuilder}, ScalableSingleBinary: {SingleBinary}, } diff --git a/docs/make-docs b/docs/make-docs index 2dc6726d919..ba448ce0727 100755 --- a/docs/make-docs +++ b/docs/make-docs @@ -6,6 +6,20 @@ # [Semantic versioning](https://semver.org/) is used to help the reader identify the significance of changes. # Changes are relevant to this script and the support docs.mk GNU Make interface. # +# ## 8.3.0 (2024-12-27) +# +# ### Added +# +# - Debug output of the final command when DEBUG=true. +# +# Useful to inspect if the script is correctly constructing the final command. +# +# ## 8.2.0 (2024-12-22) +# +# ### Removed +# +# - Special cases for Oracle and Datadog plugins now that they exist in the plugins monorepo. +# # ## 8.1.0 (2024-08-22) # # ### Added @@ -13,7 +27,7 @@ # - Additional website mounts for projects that use the website repository. # # Mounts are required for `make docs` to work in the website repository or with the website project. -# The Makefile is also mounted for convenient development of the procedure that repository. +# The Makefile is also mounted for convenient development of the procedure in that repository. # # ## 8.0.1 (2024-07-01) # @@ -355,8 +369,6 @@ SOURCES_grafana_cloud_frontend_observability_faro_web_sdk='faro-web-sdk' SOURCES_helm_charts_mimir_distributed='mimir' SOURCES_helm_charts_tempo_distributed='tempo' SOURCES_opentelemetry='opentelemetry-docs' -SOURCES_plugins_grafana_datadog_datasource='datadog-datasource' -SOURCES_plugins_grafana_oracle_datasource='oracle-datasource' SOURCES_resources='website' VERSIONS_as_code='UNVERSIONED' @@ -367,8 +379,6 @@ VERSIONS_grafana_cloud_k6='UNVERSIONED' VERSIONS_grafana_cloud_data_configuration_integrations='UNVERSIONED' VERSIONS_grafana_cloud_frontend_observability_faro_web_sdk='UNVERSIONED' VERSIONS_opentelemetry='UNVERSIONED' -VERSIONS_plugins_grafana_datadog_datasource='latest' -VERSIONS_plugins_grafana_oracle_datasource='latest' VERSIONS_resources='UNVERSIONED' VERSIONS_technical_documentation='UNVERSIONED' VERSIONS_website='UNVERSIONED' @@ -378,8 +388,6 @@ PATHS_grafana_cloud='content/docs/grafana-cloud' PATHS_helm_charts_mimir_distributed='docs/sources/helm-charts/mimir-distributed' PATHS_helm_charts_tempo_distributed='docs/sources/helm-charts/tempo-distributed' PATHS_mimir='docs/sources/mimir' -PATHS_plugins_grafana_datadog_datasource='docs/sources' -PATHS_plugins_grafana_oracle_datasource='docs/sources' PATHS_resources='content' PATHS_tempo='docs/sources/tempo' PATHS_website='content' @@ -631,7 +639,7 @@ POSIX_HERESTRING case "${_project}" in # Workaround for arbitrary mounts where the version field is expected to be the local directory - # and the repo field is expected to be the container directory. + # and the repo field is expected to be the container directory. arbitrary) echo "${_project}^${_version}^${_repo}^" # TODO ;; @@ -801,6 +809,10 @@ case "${image}" in | sed "s#$(proj_dst "${proj}")#sources#" EOF + if [ -n "${DEBUG}" ]; then + debg "${cmd}" + fi + case "${OUTPUT_FORMAT}" in human) if ! command -v jq >/dev/null 2>&1; then @@ -837,6 +849,10 @@ EOF /hugo/content/docs EOF + if [ -n "${DEBUG}" ]; then + debg "${cmd}" + fi + case "${OUTPUT_FORMAT}" in human) ${cmd} --output=line \ diff --git a/docs/sources/helm-charts/tempo-distributed/get-started-helm-charts/_index.md b/docs/sources/helm-charts/tempo-distributed/get-started-helm-charts/_index.md index 738ceba9bba..fc19b4d7b0d 100644 --- a/docs/sources/helm-charts/tempo-distributed/get-started-helm-charts/_index.md +++ b/docs/sources/helm-charts/tempo-distributed/get-started-helm-charts/_index.md @@ -464,6 +464,53 @@ To configure TLS with the Helm chart, you must have a TLS key-pair and CA certif For instructions, refer to [Configure TLS with Helm](https://grafana.com/docs/tempo//configuration/network/tls/). +### Optional: Use global or per-tenant overrides + +The `tempo-distributed` Helm chart provides a module for users to set global or per-tenant override settings: + +* Global overrides come under the `global_overrides` property, which pertain to the standard overrides +* Per-tenant overrides come under the `overrides` property, and allow specific tenants to alter configuration associated with them as per tenant-specific runtime overrides. The Helm chart generates a `/runtime/overrides.yaml` configuration file for all per-tenant configuration. + +These overrides correlate to the standard (global) and tenant-specific (`per_tenant_overide_config`)overrides in Tempo and GET configuration. +For more information about overrides, refer to the [Overrides configuration](https://grafana.com/docs/tempo//configuration/#overrides) documentation. + +Overrides can be used with both GET and Tempo. + +The following example configuration sets some global configuration options, as well as a set of options for a specific tenant: + +```yaml +global_overrides: + defaults: + ingestion: + rate_limit_bytes: 5 * 1000 * 1000 + burst_size_bytes: 5 * 1000 * 1000 + max_traces_per_user: 1000 + global: + max_bytes_per_trace: 10 * 1000 * 1000 + + metrics_generator: + processors: ['service-graphs', 'span-metrics'] + +overrides: + '1234': + ingestion: + rate_limit_bytes: 2 * 1000 * 1000 + burst_size_bytes: 2 * 1000 * 1000 + max_traces_per_user: 400 + global: + max_bytes_per_trace: 5 * 1000 * 1000 +``` + +This configuration: + +* Enables the Span Metrics and Service Graph metrics-generator processors for all tenants +* An ingestion rate and burst size limit of 5MB/s, a maximum trace size of 10MB and a maximum of 1000 live traces in an ingester for all tenants +* Overrides the '1234' tenant with a rate and burst size limit of 2MB/s, a maximum trace size of 5MB and a maximum of 400 live traces in an ingester + +{{< admonition type="note" >}} +Runtime configurations should include all options for a specific tenant. +{{< /admonition >}} + ## Install Grafana Tempo using the Helm chart Use the following command to install Tempo using the configuration options you've specified in the `custom.yaml` file: diff --git a/docs/sources/tempo/api_docs/_index.md b/docs/sources/tempo/api_docs/_index.md index 346e85cf3d2..2729c636f51 100644 --- a/docs/sources/tempo/api_docs/_index.md +++ b/docs/sources/tempo/api_docs/_index.md @@ -7,6 +7,9 @@ weight: 800 # Tempo HTTP API + + + Tempo exposes an API for pushing and querying traces, and operating the cluster itself. For the sake of clarity, API endpoints are grouped by service. @@ -15,10 +18,11 @@ These endpoints are exposed both when running Tempo in microservices and monolit - **microservices**: each service exposes its own endpoints - **monolithic**: the Tempo process exposes all API endpoints for the services running internally -For externally supported GRPC API, [see below](#tempo-grpc-api). +For externally supported gRPC API, [refer to Tempo gRPC API](#tempo-grpc-api). ## Endpoints + | API | Service | Type | Endpoint | | --- | ------- | ---- | -------- | | [Readiness probe](#readiness-probe) | _All services_ | HTTP | `GET /ready` | @@ -48,6 +52,8 @@ For externally supported GRPC API, [see below](#tempo-grpc-api). _(*) This endpoint isn't always available, check the specific section for more details._ + + ### Readiness probe ``` @@ -63,9 +69,10 @@ GET /metrics ``` Returns the metrics for the running Tempo service in the Prometheus exposition format. - + ### Pprof + ``` GET /debug/pprof/heap GET /debug/pprof/block @@ -78,7 +85,9 @@ GET /debug/pprof/mutex Returns the runtime profiling data in the format expected by the pprof visualization tool. There are many things which can be profiled using this including heap, trace, goroutine, etc. -_For more information, please check out the official documentation of [pprof](https://golang.org/pkg/net/http/pprof/)._ +For more information, refer to the official documentation of [pprof](https://golang.org/pkg/net/http/pprof/). + + ### Ingest @@ -88,12 +97,12 @@ Agent, OpenTelemetry Collector, or Jaeger Agent. | Protocol | Type | Docs | | -------- | ---- | ---- | -| OpenTelemetry | GRPC | [Link](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/otlp.md) | +| OpenTelemetry | gRPC | [Link](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/otlp.md) | | OpenTelemetry | HTTP | [Link](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/otlp.md) | | Jaeger | Thrift Compact | [Link](https://www.jaegertracing.io/docs/latest/apis/#span-reporting-apis) | | Jaeger | Thrift Binary | [Link](https://www.jaegertracing.io/docs/latest/apis/#span-reporting-apis) | | Jaeger | Thrift HTTP | [Link](https://www.jaegertracing.io/docs/latest/apis/#span-reporting-apis) | -| Jaeger | GRPC | [Link](https://www.jaegertracing.io/docs/latest/apis/#span-reporting-apis) | +| Jaeger | gRPC | [Link](https://www.jaegertracing.io/docs/latest/apis/#span-reporting-apis) | | Zipkin | HTTP | [Link](https://zipkin.io/zipkin-api/) | For information on how to use the Zipkin endpoint with curl (for debugging purposes), refer to [Pushing spans with HTTP]({{< relref "./pushing-spans-with-http" >}}). @@ -240,7 +249,7 @@ Example of how to query Tempo using curl. This query returns all traces that have their status set to error. ```bash -$ curl -G -s http://localhost:3200/api/search --data-urlencode 'q={ status=error }' | jq +curl -G -s http://localhost:3200/api/search --data-urlencode 'q={ status=error }' | jq { "traces": [ { @@ -282,7 +291,7 @@ Example of how to query Tempo using curl. This query returns all traces that have a tag `service.name` containing `cartservice` and a minimum duration of 600 ms. ```bash -$ curl -G -s http://localhost:3200/api/search --data-urlencode 'tags=service.name=cartservice' --data-urlencode minDuration=600ms | jq +curl -G -s http://localhost:3200/api/search --data-urlencode 'tags=service.name=cartservice' --data-urlencode minDuration=600ms | jq { "traces": [ { @@ -327,7 +336,7 @@ Example of how to query Tempo using curl. This query returns all discovered tag names. ```bash -$ curl -G -s http://localhost:3200/api/search/tags?scope=span | jq +curl -G -s http://localhost:3200/api/search/tags?scope=span | jq { "tagNames": [ "host.name", @@ -360,15 +369,21 @@ Parameters: Optional. Along with `end`, defines a time range from which tags should be returned. - `end = (unix epoch seconds)` Optional. Along with `start`, defines a time range from which tags should be returned. Providing both `start` and `end` includes blocks for the specified time range only. - +- `limit = (integer)` + Optional. Limits the maximum number of tags values. +- `maxStaleValues = (integer)` + Optional. Limits the search for tags names. If the number of stale (already known) values reaches or exceeds this limit, the search stops. If Tempo processes `maxStaleValues` matches without finding a new tag name, the search is returned early. ### Search tags V2 -Ingester configuration `complete_block_timeout` affects how long tags are available for search.If start or end aren't specified, it only fetches blocks that wasn't flushed to backend. +Ingester configuration `complete_block_timeout` affects how long tags are available for search. +If the start or end aren't specified, it only fetches blocks that weren't flushed to backend. -This endpoint retrieves all discovered tag names that can be used in search. The endpoint is available in the query frontend service in -a microservices deployment, or the Tempo endpoint in a monolithic mode deployment. The tags endpoint takes a scope that controls the kinds -of tags or attributes returned. If nothing is provided, the endpoint returns all resource and span tags. +This endpoint retrieves all discovered tag names that can be used in search. +The endpoint is available in the query frontend service in +a microservices deployment, or the Tempo endpoint in a monolithic mode deployment. +The tags endpoint takes a scope that controls the kinds of tags or attributes returned. +If nothing is provided, the endpoint returns all resource and span tags. ```bash GET /api/v2/search/tags?scope= @@ -385,6 +400,10 @@ Parameters: Optional. Along with `end` define a time range from which tags should be returned. - `end = (unix epoch seconds)` Optional. Along with `start` define a time range from which tags should be returned. Providing both `start` and `end` includes blocks for the specified time range only. +- `limit = (integer)` + Optional. Sets the maximum number of tags names allowed per scope. The query stops once this limit is reached for any scope. +- `maxStaleValues = (integer)` + Optional. Limits the search for tag values. The search stops if the number of stale (already known) values reaches or exceeds this limit. #### Example @@ -392,7 +411,7 @@ Example of how to query Tempo using curl. This query returns all discovered tag names. ```bash -$ curl -G -s http://localhost:3200/api/v2/search/tags | jq +curl -G -s http://localhost:3200/api/v2/search/tags | jq { "scopes": [ { @@ -494,7 +513,7 @@ Example of how to query Tempo using curl. This query returns all discovered values for the tag `service.name`. ```bash -$ curl -G -s http://localhost:3200/api/search/tag/service.name/values | jq +curl -G -s http://localhost:3200/api/search/tag/service.name/values | jq { "tagValues": [ "article-service", @@ -515,20 +534,24 @@ Parameters: Optional. Along with `end`, defines a time range from which tags should be returned. - `end = (unix epoch seconds)` Optional. Along with `start`, defines a time range from which tags should be returned. Providing both `start` and `end` includes blocks for the specified time range only. +- `limit = (integer)` + Optional. Limits the maximum number of tags values. +- `maxStaleValues = (integer)` + Optional. Limits the search for tags values. If the number of stale (already known) values reaches or exceeds this limit, the search stops. If Tempo processes `maxStaleValues` matches without finding a new tag name, the search is returned early. ### Search tag values V2 This endpoint retrieves all discovered values and their data types for the given TraceQL identifier. The endpoint is available in the query frontend service in a microservices deployment, or the Tempo endpoint in a monolithic mode deployment. This endpoint is similar to `/api/search/tag//values` but operates on TraceQL identifiers and types. -See [TraceQL]({{< relref "../traceql" >}}) documentation for more information. +Refer to [TraceQL]({{< relref "../traceql" >}}) documentation for more information. #### Example This example queries Tempo using curl and returns all discovered values for the tag `service.name`. ```bash -$ curl -G -s http://localhost:3200/api/v2/search/tag/.service.name/values | jq +curl -G -s http://localhost:3200/api/v2/search/tag/.service.name/values | jq { "tagValues": [ { @@ -561,7 +584,18 @@ $ curl -G -s http://localhost:3200/api/v2/search/tag/.service.name/values | jq } } ``` -This endpoint can also receive `start` and `end` optional parameters. These parameters define the time range from which the tags are fetched + +Parameters: +- `start = (unix epoch seconds)` + Optional. Along with `end`, defines a time range from which tags values should be returned. +- `end = (unix epoch seconds)` + Optional. Along with `start`, defines a time range from which tags values should be returned. Providing both `start` and `end` includes blocks for the specified time range only. +- `q = (traceql query)` + Optional. A TraceQL query to filter tag values by. Currently only works for a single spanset of `&&`ed conditions. For example: `{ span.foo = "bar" && resource.baz = "bat" ...}`. Refer to [Filtered tag values](#filtered-tag-values). +- `limit = (integer)` + Optional. Limits the maximum number of tags values +- `maxStaleValues = (integer)` + Optional. Limits the search for tags values. If the number of stale (already known) values reaches or exceeds this limit, the search stops. If Tempo processes `maxStaleValues` matches without finding a new tag name, the search is returned early. #### Filtered tag values @@ -569,16 +603,21 @@ You can pass an optional URL query parameter, `q`, to your request. The `q` parameter is a URL-encoded [TraceQL query]({{< relref "../traceql" >}}). If provided, the tag values returned by the API are filtered to only return values seen on spans matching your filter parameters. -Queries can be incomplete: for example, `{ .cluster = }`. Tempo extracts only the valid matchers and build a valid query. +Queries can be incomplete: for example, `{ resource.cluster = }`. +Tempo extracts only the valid matchers and builds a valid query. +If an input is invalid, Tempo doesn't provide an error. Instead, +you'll see the whole list when a failure of parsing input. This behavior helps with backwards compatibility. Only queries with a single selector `{}` and AND `&&` operators are supported. - - Example supported: `{ .cluster = "us-east-1" && .service = "frontend" }` - - Example unsupported: `{ .cluster = "us-east-1" || .service = "frontend" } && { .cluster = "us-east-2" }` + - Example supported: `{ resource.cluster = "us-east-1" && resource.service = "frontend" }` + - Example unsupported: `{ resource.cluster = "us-east-1" || resource.service = "frontend" } && { resource.cluster = "us-east-2" }` + +Unscoped attributes aren't supported for filtered tag values. The following request returns all discovered service names on spans with `span.http.method=GET`: ``` -GET /api/v2/search/tag/.service.name/values?q="{span.http.method='GET'}" +GET /api/v2/search/tag/resource.service.name/values?q="{span.http.method='GET'}" ``` If a particular service name (for example, `shopping-cart`) is only present on spans with `span.http.method=POST`, it won't be included in the list of values returned. @@ -602,7 +641,7 @@ Parameters: - `step = (duration string)` Optional. Defines the granularity of the returned time-series. For example, `step=15s` returns a data point every 15s within the time range. If not specified, then the default behavior chooses a dynamic step based on the time range. - `exemplars = (integer)` - Optional. Defines the maximun number of exemplars for the query. It will be trimmed to max_exemplars if exceed it. + Optional. Defines the maximum number of exemplars for the query. It's trimmed to `max_exemplars` if it exceeds it. The API is available in the query frontend service in a microservices deployment, or the Tempo endpoint in a monolithic mode deployment. @@ -610,7 +649,7 @@ a microservices deployment, or the Tempo endpoint in a monolithic mode deploymen For example, the following request computes the rate of spans received for `myservice` over the last three hours, at 1 minute intervals. {{< admonition type="note" >}} -Actual API parameters must be url-encoded. This example is left unencoded for readability. +Actual API parameters must be URL-encoded. This example is left unencoded for readability. {{< /admonition >}} ``` @@ -632,7 +671,7 @@ Parameters: - `end = (unix epoch seconds | unix epoch nanoseconds | RFC3339 string)` Optional. Along with `start` define the time range. Providing both `start` and `end` includes blocks for the specified time range only. - `since = (duration string)` - Optional. Can be used instead of `start` and `end` to define the time range in relative values. For example `since=15m` will query the last 15 minutes. Default is last 1 hour. + Optional. Can be used instead of `start` and `end` to define the time range in relative values. For example, `since=15m` queries the last 15 minutes. Default is last 1 hour. The API is available in the query frontend service in a microservices deployment, or the Tempo endpoint in a monolithic mode deployment. @@ -640,8 +679,8 @@ a microservices deployment, or the Tempo endpoint in a monolithic mode deploymen For example the following request computes the total number of failed spans over the last hour per service. {{< admonition type="note" >}} -Actual API parameters must be url-encoded. This example is left unencoded for readability. -{{% /admonition %}} +Actual API parameters must be URL-encoded. This example is left unencoded for readability. +{{< /admonition >}} ``` GET /api/metrics/query?q={status=error}|count_over_time()by(resource.service.name) @@ -657,11 +696,11 @@ Returns status code 200 and body `echo` when the query frontend is up and ready {{< admonition type="note" >}} Meant to be used in a Query Visualization UI like Grafana to test that the Tempo data source is working. -{{% /admonition %}} +{{< /admonition >}} ### Overrides API -For more information about user-configurable overrides API, refer to the [user-configurable overrides]({{< relref "../operations/user-configurable-overrides#api" >}}) documentation. +For more information about user-configurable overrides API, refer to the [user-configurable overrides](https://grafana.com/docs/tempo//operations/manage-advanced-systems/user-configurable-overrides/#api) documentation. ### Flush @@ -688,13 +727,13 @@ ingester service. {{< admonition type="note" >}} This is usually used at the time of scaling down a cluster. -{{% /admonition %}} +{{< /admonition >}} ### Usage metrics {{< admonition type="note" >}} This endpoint is only available when one or more usage trackers are enabled in [the distributor]({{< relref "../configuration#distributor" >}}). -{{% /admonition %}} +{{< /admonition >}} ``` GET /usage_metrics @@ -717,8 +756,8 @@ tempo_usage_tracker_bytes_received_total{service="service-A",tenant="single-tena ### Distributor ring status {{< admonition type="note" >}} -This endpoint is only available when Tempo is configured with [the global override strategy]({{< relref "../configuration#overrides" >}}). -{{% /admonition %}} +This endpoint is only available when Tempo is configured with [the global override strategy](https://grafana.com/docs/tempo//configuration/#overrides). +{{< /admonition >}} ``` GET /distributor/ring @@ -727,7 +766,7 @@ GET /distributor/ring Displays a web page with the distributor hash ring status, including the state, healthy, and last heartbeat time of each distributor. -_For more information, check the page on [consistent hash ring]({{< relref "../operations/consistent_hash_ring" >}})._ +For more information, refer to [consistent hash ring](https://grafana.com/docs/tempo//operations/manage-advanced-systems/consistent_hash_ring/). ### Ingesters ring status @@ -737,7 +776,7 @@ GET /ingester/ring Displays a web page with the ingesters hash ring status, including the state, healthy, and last heartbeat time of each ingester. -_For more information, check the page on [consistent hash ring]({{< relref "../operations/consistent_hash_ring" >}}). +For more information, refer to [consistent hash ring](http://grafana.com/docs/tempo//operations/manage-advanced-systems/consistent_hash_ring/). ### Metrics-generator ring status @@ -749,7 +788,7 @@ Displays a web page with the metrics-generator hash ring status, including the s This endpoint is only available when the metrics-generator is enabled. Refer to [metrics-generator]({{< relref "../configuration#metrics-generator" >}}). -For more information, refer to [consistent hash ring]({{< relref "../operations/consistent_hash_ring" >}}). +For more information, refer to [consistent hash ring](http://grafana.com/docs/tempo//operations/manage-advanced-systems/consistent_hash_ring/). ### Compactor ring status @@ -757,9 +796,9 @@ For more information, refer to [consistent hash ring]({{< relref "../operations/ GET /compactor/ring ``` -Displays a web page with the compactor hash ring status, including the state, healthy and last heartbeat time of each compactor. +Displays a web page with the compactor hash ring status, including the state, healthy, and last heartbeat time of each compactor. -For more information, refer to [consistent hash ring]({{< relref "../operations/consistent_hash_ring" >}}). +For more information, refer to [consistent hash ring](http://grafana.com/docs/tempo//operations/manage-advanced-systems/consistent_hash_ring/). ### Status @@ -793,7 +832,7 @@ GET /status/config Displays the configuration. Displays the configuration currently applied to Tempo (in YAML format), including default values and settings via CLI flags. -Sensitive data is masked. Please be aware that the exported configuration **doesn't include the per-tenant overrides**. +Sensitive data is masked. Be aware that the exported configuration **doesn't include the per-tenant overrides**. Optional query parameter: @@ -834,18 +873,18 @@ GET /api/status/buildinfo ``` Exposes the build information in a JSON object. The fields are `version`, `revision`, `branch`, `buildDate`, `buildUser`, and `goVersion`. -## Tempo GRPC API +## Tempo gRPC API -Tempo uses GRPC to internally communicate with itself, but only has one externally supported client. +Tempo uses [gRPC](https://grpc.io) to internally communicate with itself, but only has one externally supported client. The query-frontend component implements the streaming querier interface defined below. -[See here](https://github.com/grafana/tempo/blob/main/pkg/tempopb/) for the complete proto definition and generated code. +[Refer here](https://github.com/grafana/tempo/blob/main/pkg/tempopb/) for the complete proto definition and generated code. -By default, this service is only offered over the GRPC port. +By default, this service is only offered over the gRPC port. You can use streaming service over the HTTP port as well, which Grafana expects. To enable the streaming service over the HTTP port for use with Grafana, set the following: -``` +```yaml stream_over_http_enabled: true ``` @@ -861,3 +900,4 @@ service StreamingQuerier { rpc MetricsQueryRange(QueryRangeRequest) returns (stream QueryRangeResponse) {} } ``` + \ No newline at end of file diff --git a/docs/sources/tempo/api_docs/metrics-summary.md b/docs/sources/tempo/api_docs/metrics-summary.md index 4fac045d58c..f3fd8ed2553 100644 --- a/docs/sources/tempo/api_docs/metrics-summary.md +++ b/docs/sources/tempo/api_docs/metrics-summary.md @@ -12,7 +12,7 @@ weight: 600 {{< admonition type="warning" >}} The metrics summary API is deprecated as of Tempo 2.7. Features powered by the metrics summary API, like the [Aggregate by table](https://grafana.com/docs/grafana//datasources/tempo/query-editor/traceql-search/#optional-use-aggregate-by), are also deprecated in Grafana Cloud and Grafana 11.3 and later. It will be removed in a future release. -{{% /admonition %}} +{{< /admonition >}} This document explains how to use the metrics summary API in Tempo. This API returns RED metrics (span count, erroring span count, and latency information) for `kind=server` spans sent to Tempo in the last hour, grouped by a user-specified attribute. @@ -122,7 +122,7 @@ The response is returned as JSON following [standard protobuf->JSON mapping rule {{< admonition type="note" >}} The `uint64` fields cannot be fully expressed by JSON numeric values so the fields are serialized as strings. -{{% /admonition %}} +{{< /admonition >}} Example: diff --git a/docs/sources/tempo/configuration/_index.md b/docs/sources/tempo/configuration/_index.md index 60cd6d59c98..442e409997b 100644 --- a/docs/sources/tempo/configuration/_index.md +++ b/docs/sources/tempo/configuration/_index.md @@ -11,7 +11,7 @@ This document explains the configuration options for Tempo as well as the detail {{< admonition type="tip" >}} Instructions for configuring Tempo data sources are available in the [Grafana Cloud](/docs/grafana-cloud/send-data/traces/) and [Grafana](/docs/grafana/latest/datasources/tempo/) documentation. -{{% /admonition %}} +{{< /admonition >}} The Tempo configuration options include: @@ -19,9 +19,13 @@ The Tempo configuration options include: - [Use environment variables in the configuration](#use-environment-variables-in-the-configuration) - [Server](#server) - [Distributor](#distributor) + - [Set max attribute size to help control out of memory errors](#set-max-attribute-size-to-help-control-out-of-memory-errors) - [Ingester](#ingester) - [Metrics-generator](#metrics-generator) - [Query-frontend](#query-frontend) + - [Limit query size to improve performance and stability](#limit-query-size-to-improve-performance-and-stability) + - [Limit the spans per spanset](#limit-the-spans-per-spanset) + - [Cap the maximum query length](#cap-the-maximum-query-length) - [Querier](#querier) - [Compactor](#compactor) - [Storage](#storage) @@ -229,6 +233,11 @@ distributor: # instruct the client how to retry. [retry_after_on_resource_exhausted: | default = '0' ] + # Optional + # Configures the max size an attribute can be. Any key or value that exceeds this limit will be truncated before storing + # Setting this parameter to '0' would disable this check against attribute size + [max_span_attr_byte: | default = '2048'] + # Optional. # Configures usage trackers in the distributor which expose metrics of ingested traffic grouped by configurable # attributes exposed on /usage_metrics. @@ -243,6 +252,21 @@ distributor: [stale_duration: | default = 15m0s] ``` +### Set max attribute size to help control out of memory errors + +Tempo queriers can run out of memory when fetching traces that have spans with very large attributes. +This issue has been observed when trying to fetch a single trace using the [`tracebyID` endpoint](https://grafana.com/docs/tempo//api_docs/#query). +While a trace might not have a lot of spans (roughly 500), it can have a larger size (approximately 250KB). +Some of the spans in that trace had attributes whose values were very large in size. + +To avoid these out-of-memory crashes, use `max_span_attr_byte` to limit the maximum allowable size of any individual attribute. +Any key or values that exceed the configured limit are truncated before storing. +The default value is `2048`. + +Use the `tempo_distributor_attributes_truncated_total` metric to track how many attributes are truncated. + +For additional information, refer to [Troubleshoot out-of-memory errors](https://grafana.com/docs/tempo//troubleshooting/out-of-memory-errors/). + ## Ingester For more information on configuration options, refer to [this file](https://github.com/grafana/tempo/blob/main/modules/ingester/config.go). @@ -307,7 +331,7 @@ If you want to enable metrics-generator for your Grafana Cloud account, refer to You can limit spans with end times that occur within a configured duration to be considered in metrics generation using `metrics_ingestion_time_range_slack`. In Grafana Cloud, this value defaults to 30 seconds so all spans sent to the metrics-generation more than 30 seconds in the past are discarded or rejected. -For more information about the `local-blocks` configuration option, refer to [TraceQL metrics](https://grafana.com/docs/tempo/latest/operations/traceql-metrics/#configure-the-local-blocks-processor). +For more information about the `local-blocks` configuration option, refer to [TraceQL metrics](https://grafana.com/docs/tempo//operations/traceql-metrics/#activate-and-configure-the-local-blocks-processor). ```yaml # Metrics-generator configuration block @@ -593,6 +617,10 @@ query_frontend: # A list of regular expressions for refusing matching requests, these will apply for every request regardless of the endpoint. [url_deny_list: | default = ]] + # Max allowed TraceQL expression size, in bytes. queries bigger then this size will be rejected. + # (default: 128 KiB) + [max_query_expression_size_bytes: | default = 131072]] + search: # The number of concurrent jobs to execute when searching the backend. @@ -640,14 +668,17 @@ query_frontend: # The number of shards to break ingester queries into. [ingester_shards]: | default = 3] - + + # The maximum allowed value of spans per span set. 0 disables this limit. + [max_spans_per_span_set]: | default = 100] + # SLO configuration for Metadata (tags and tag values) endpoints. metadata_slo: # If set to a non-zero value, it's value will be used to decide if metadata query is within SLO or not. # Query is within SLO if it returned 200 within duration_slo seconds OR processed throughput_slo bytes/s data. # NOTE: Requires `duration_slo` AND `throughput_bytes_slo` to be configured. [duration_slo: | default = 0s ] - + # If set to a non-zero value, it's value will be used to decide if metadata query is within SLO or not. # Query is within SLO if it returned 200 within duration_slo seconds OR processed throughput_slo bytes/s data. [throughput_bytes_slo: | default = 0 ] @@ -680,7 +711,7 @@ query_frontend: # Maximun number of exemplars per range query. Limited to 100. [max_exemplars: | default = 100 ] - + # query_backend_after controls where the query-frontend searches for traces. # Time ranges older than query_backend_after will be searched in the backend/object storage only. # Time ranges between query_backend_after and now will be queried from the metrics-generators. @@ -700,6 +731,37 @@ query_frontend: ``` +### Limit query size to improve performance and stability + +Querying large tracing data presents several challenges. +Span sets with large number of spans impact query performance and stability. +In a similar manner, excessive queries result size can also negatively impact query performance. + +#### Limit the spans per spanset + +You can set the maximum spans per spanset by setting `max_spans_per_span_set` for the query-frontend. +The default value is 100. + +In Grafana or Grafana Cloud, you can use the **Span Limit** field in the [TraceQL query editor](https://grafana.com/docs/grafana-cloud/connect-externally-hosted/data-sources/tempo/query-editor/) in Grafana Explore. +This field sets the maximum number of spans to return for each span set. +The maximum value that you can set for the **Span Limit** value (or the spss query) is controlled by `max_spans_per_span_set`. +To disable the maximum spans per span set limit, set `max_spans_per_span_set` to `0`. +When set to `0`, there is no maximum and users can put any value in **Span Limit**. +However, this can only be set by a Tempo administrator, not by the user. + +#### Cap the maximum query length + +You can set the maximum length of a query using `query_frontend.max_query_expression_size_bytes` configuration parameter for the query-frontend. The default value is 128 KB. + +This limit is used to protect the system’s stability from potential abuse or mistakes, when running a large potentially expensive query. + +You can set the value lower of higher by setting it in the `query_frontend` configuration section, for example: + +``` +query_frontend: + max_query_expression_size_bytes: 10000 +``` + ## Querier For more information on configuration options, refer to [this file](https://github.com/grafana/tempo/blob/main/modules/querier/config.go). @@ -739,42 +801,6 @@ querier: # Timeout for search requests [query_timeout: | default = 30s] - # NOTE: The Tempo serverless feature is now deprecated and will be removed in an upcoming release. - # A list of external endpoints that the querier will use to offload backend search requests. They must - # take and return the same value as /api/search endpoint on the querier. This is intended to be - # used with serverless technologies for massive parallelization of the search path. - # The default value of "" disables this feature. - [external_endpoints: | default = ] - - # If search_external_endpoints is set then the querier will primarily act as a proxy for whatever serverless backend - # you have configured. This setting allows the operator to have the querier prefer itself for a configurable - # number of subqueries. In the default case of 2 the querier will process up to 2 search requests subqueries before starting - # to reach out to search_external_endpoints. - # Setting this to 0 will disable this feature and the querier will proxy all search subqueries to search_external_endpoints. - [prefer_self: | default = 10 ] - - # If set to a non-zero value a second request will be issued at the provided duration. Recommended to - # be set to p99 of external search requests to reduce long tail latency. - # (default: 8s) - [external_hedge_requests_at: ] - - # The maximum number of requests to execute when hedging. Requires hedge_requests_at to be set. - # (default: 2) - [external_hedge_requests_up_to: ] - - # The serverless backend to use. If external_backend is set, then authorization credentials will be provided - # when querying the external endpoints. "google_cloud_run" is the only value supported at this time. - # The default value of "" omits credentials when querying the external backend. - [external_backend: | default = ""] - - # Google Cloud Run configuration. Will be used only if the value of external_backend is "google_cloud_run". - google_cloud_run: - # A list of external endpoints that the querier will use to offload backend search requests. They must - # take and return the same value as /api/search endpoint on the querier. This is intended to be - # used with serverless technologies for massive parallelization of the search path. - # The default value of "" disables this feature. - [external_endpoints: | default = ] - # config of the worker that connects to the query frontend frontend_worker: @@ -1740,7 +1766,7 @@ overrides: # Cost attribution usage tracker configuration cost_attribution: # List of attributes to group ingested data by. Map value is optional. Can be used to rename and - # combine attributes. + # combine attributes. dimensions: @@ -1828,7 +1854,7 @@ overrides: These tenant-specific overrides are stored in an object store and can be modified using API requests. User-configurable overrides have priority over runtime overrides. -Refer to [user-configurable overrides]{{< relref "../operations/user-configurable-overrides" >}} for more details. +Refer to [user-configurable overrides](https://grafana.com/docs/tempo//operations/manage-advanced-systems/user-configurable-overrides/) for more details. #### Override strategies diff --git a/docs/sources/tempo/configuration/grafana-alloy/tail-based-sampling.md b/docs/sources/tempo/configuration/grafana-alloy/tail-based-sampling.md index 5586ded80ce..82ca68286e3 100644 --- a/docs/sources/tempo/configuration/grafana-alloy/tail-based-sampling.md +++ b/docs/sources/tempo/configuration/grafana-alloy/tail-based-sampling.md @@ -48,6 +48,23 @@ This overhead increases with the number of Alloy instances that share the same t

Tail-based sampling overview

+### Sampling load balancing + +Tail sampling load balancing is usually carried out by running two layers of collectors, the first layer receiving the telemetry data (in this case trace spans), and then distributing these to the second layer that carry out the sampling policies. + +Alloy includes a [load balancing export](https://grafana.com/docs/alloy/latest/reference/components/otelcol/otelcol.exporter.loadbalancing/) that can carry out routing to further collector targets based on a set number of keys (in the case of trace sampling, the `traceID` key). +Alloy uses the OpenTelemetry load balancing exporter. + +The routing key ensures that a specific collector in the second layer always deals with spans from the same trace ID, ensuring that sampling decisions are made correctly. +There is functionality to configure the exporter with targets via a few different methods. +This includes static IPs, multi-IP DNS A record entries, and a Kubernetes headless service resolver. +This has the advantage of allowing you to scale up/down the number of layer two collectors. + +There are some important points to note with the load balancer exporter around scaling and resilience, mostly around its eventual consistency model. For more infortmation, refer to [Resiliance and scaling considerations](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/loadbalancingexporter/README.md#resilience-and-scaling-considerations). +The most important in terms of tail sampling is that routing occurs based on an algorithm taking into account the number of backends available to the load balancer, and this can affect the target for trace ID spans before eventual consistency occurs. + +For an example manifest for a two layer OTel Collector deployment based around Kubernetes services, refer to the [K8s resolver README](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/exporter/loadbalancingexporter/example/k8s-resolver/README.md). + ## Configure tail-based sampling To start using tail-based sampling, define a sampling policy in your configuration file. diff --git a/docs/sources/tempo/configuration/manifest.md b/docs/sources/tempo/configuration/manifest.md index ea9c1f8d682..0c88ee28b74 100644 --- a/docs/sources/tempo/configuration/manifest.md +++ b/docs/sources/tempo/configuration/manifest.md @@ -3,6 +3,8 @@ title: Manifest description: This manifest lists of all Tempo options and their defaults. weight: 110 --- +[//]: # 'THIS FILE IS GENERATED AUTOMATICALLY BY go run pkg/docsgen/generate_manifest.go' +[//]: # 'DO NOT EDIT THIS FILE DIRECTLY' # Manifest @@ -10,17 +12,8 @@ This document is a reference for all Tempo options and their defaults. If you ar started with Tempo, refer to [Tempo examples](https://github.com/grafana/tempo/tree/main/example/docker-compose) and other [configuration documentation]({{< relref "../configuration" >}}). Most installations will require only setting 10 to 20 of these options. -It was generated by running Tempo with a minimal configuration and accessing the `/status/config` endpoint: -``` -go run ./cmd/tempo --storage.trace.backend=local --storage.trace.local.path=/var/tempo/traces --storage.trace.wal.path=/var/tempo/wal -``` - ## Complete configuration -{{< admonition type="note" >}} -This manifest was generated on 2024-11-18. -{{% /admonition %}} - ```yaml target: all http_api_prefix: "" @@ -177,7 +170,7 @@ distributor: mirror_timeout: 2s heartbeat_period: 5s heartbeat_timeout: 5m0s - instance_id: local-instance + instance_id: hostname instance_interface_names: - eth0 - en0 @@ -188,11 +181,29 @@ distributor: forwarders: [] usage: cost_attribution: - enabled: false max_cardinality: 10000 stale_duration: 15m0s + kafka_write_path_enabled: false + kafka_config: + address: "" + topic: "" + client_id: "" + dial_timeout: 0s + write_timeout: 0s + sasl_username: "" + sasl_password: "" + consumer_group: "" + consumer_group_offset_commit_interval: 0s + last_produced_offset_retry_timeout: 0s + auto_create_topic_enabled: false + auto_create_topic_default_partitions: 0 + producer_max_record_size_bytes: 0 + producer_max_buffered_bytes: 0 + target_consumer_lag_at_startup: 0s + max_consumer_lag_at_startup: 0s extend_writes: true retry_after_on_resource_exhausted: 0s + max_span_attr_byte: 2048 ingester_client: pool_config: checkinterval: 15s @@ -203,7 +214,7 @@ ingester_client: grpc_client_config: max_recv_msg_size: 104857600 max_send_msg_size: 104857600 - grpc_compression: snappy + grpc_compression: "" rate_limit: 0 rate_limit_burst: 0 backoff_on_ratelimits: false @@ -234,7 +245,7 @@ metrics_generator_client: grpc_client_config: max_recv_msg_size: 104857600 max_send_msg_size: 104857600 - grpc_compression: snappy + grpc_compression: "" rate_limit: 0 rate_limit_burst: 0 backoff_on_ratelimits: false @@ -258,12 +269,6 @@ metrics_generator_client: querier: search: query_timeout: 30s - prefer_self: 10 - external_hedge_requests_at: 8s - external_hedge_requests_up_to: 2 - external_backend: "" - google_cloud_run: null - external_endpoints: [] trace_by_id: query_timeout: 10s metrics: @@ -271,7 +276,7 @@ querier: time_overlap_cutoff: 0.2 max_concurrent_queries: 20 frontend_worker: - frontend_address: 127.0.0.1:9095 + frontend_address: "" dns_lookup_duration: 10s parallelism: 2 match_max_concurrent: true @@ -279,7 +284,7 @@ querier: grpc_client_config: max_recv_msg_size: 104857600 max_send_msg_size: 16777216 - grpc_compression: gzip + grpc_compression: "" rate_limit: 0 rate_limit_burst: 0 backoff_on_ratelimits: false @@ -317,6 +322,7 @@ query_frontend: query_backend_after: 15m0s query_ingesters_until: 30m0s ingester_shards: 3 + max_spans_per_span_set: 100 trace_by_id: query_shards: 50 metrics: @@ -333,6 +339,7 @@ query_frontend: retry_with_weights: true max_traceql_conditions: 4 max_regex_conditions: 1 + max_query_expression_size_bytes: 131072 compactor: ring: kvstore: @@ -369,7 +376,7 @@ compactor: heartbeat_timeout: 1m0s wait_stability_min_duration: 1m0s wait_stability_max_duration: 5m0s - instance_id: local-instance + instance_id: hostname instance_interface_names: - eth0 - en0 @@ -394,7 +401,7 @@ ingester: lifecycler: ring: kvstore: - store: inmemory + store: memberlist prefix: collectors/ consul: host: localhost:8500 @@ -434,16 +441,50 @@ ingester: join_after: 0s min_ready_duration: 15s interface_names: - - en0 + - eth0 enable_inet6: false final_sleep: 0s tokens_file_path: "" availability_zone: "" unregister_on_shutdown: true readiness_check_ring_health: true - address: 127.0.0.1 + address: "" port: 0 - id: local-instance + id: hostname + partition_ring: + kvstore: + store: memberlist + prefix: collectors/ + consul: + host: localhost:8500 + acl_token: "" + http_client_timeout: 20s + consistent_reads: false + watch_rate_limit: 1 + watch_burst_size: 1 + cas_retry_delay: 1s + etcd: + endpoints: [] + dial_timeout: 10s + max_retries: 10 + tls_enabled: false + tls_cert_path: "" + tls_key_path: "" + tls_ca_path: "" + tls_server_name: "" + tls_insecure_skip_verify: false + tls_cipher_suites: "" + tls_min_version: "" + username: "" + password: "" + multi: + primary: "" + secondary: "" + mirror_enabled: false + mirror_timeout: 2s + min_partition_owners_count: 1 + min_partition_owners_duration: 10s + delete_inactive_partition_after: 13h0m0s concurrent_flushes: 4 flush_check_period: 10s flush_op_timeout: 5m0s @@ -456,7 +497,7 @@ ingester: metrics_generator: ring: kvstore: - store: inmemory + store: memberlist prefix: collectors/ consul: host: localhost:8500 @@ -487,11 +528,11 @@ metrics_generator: mirror_timeout: 2s heartbeat_period: 5s heartbeat_timeout: 1m0s - instance_id: local-instance + instance_id: hostname instance_interface_names: - eth0 - en0 - instance_addr: 127.0.0.1 + instance_addr: "" instance_port: 0 enable_inet6: false processor: @@ -602,9 +643,58 @@ metrics_generator: search_encoding: none ingestion_time_range_slack: 2m0s version: vParquet4 + traces_query_storage: + path: "" + v2_encoding: none + search_encoding: none + ingestion_time_range_slack: 2m0s + version: vParquet4 metrics_ingestion_time_range_slack: 30s query_timeout: 30s override_ring_key: metrics-generator + assigned_partitions: {} + instance_id: hostname +ingest: + enabled: false + kafka: + address: localhost:9092 + topic: "" + client_id: "" + dial_timeout: 2s + write_timeout: 10s + sasl_username: "" + sasl_password: "" + consumer_group: "" + consumer_group_offset_commit_interval: 1s + last_produced_offset_retry_timeout: 10s + auto_create_topic_enabled: true + auto_create_topic_default_partitions: 1000 + producer_max_record_size_bytes: 15983616 + producer_max_buffered_bytes: 1073741824 + target_consumer_lag_at_startup: 2s + max_consumer_lag_at_startup: 15s +block_builder: + instance_id: hostname + assigned_partitions: {} + consume_cycle_duration: 5m0s + block: + max_block_bytes: 20971520 + bloom_filter_false_positive: 0.01 + bloom_filter_shard_size_bytes: 102400 + version: vParquet4 + search_encoding: snappy + search_page_size_bytes: 1048576 + v2_index_downsample_bytes: 1048576 + v2_index_page_size_bytes: 256000 + v2_encoding: zstd + parquet_row_group_size_bytes: 100000000 + parquet_dedicated_columns: [] + wal: + path: /var/tempo/block-builder/traces + v2_encoding: none + search_encoding: none + ingestion_time_range_slack: 2m0s + version: vParquet4 storage: trace: pool: @@ -615,7 +705,6 @@ storage: v2_encoding: snappy search_encoding: none ingestion_time_range_slack: 2m0s - version: vParquet4 block: bloom_filter_false_positive: 0.01 bloom_filter_shard_size_bytes: 102400 @@ -647,9 +736,9 @@ storage: blocklist_poll_tolerate_tenant_failures: 1 empty_tenant_deletion_enabled: false empty_tenant_deletion_age: 0s - backend: local + backend: "" local: - path: /var/tempo/traces + path: "" gcs: bucket_name: "" prefix: "" @@ -799,6 +888,7 @@ memberlist: gossip_to_dead_nodes_time: 30s dead_node_reclaim_time: 0s compression_enabled: false + notify_interval: 0s advertise_addr: "" advertise_port: 7946 cluster_label: "" @@ -817,6 +907,8 @@ memberlist: bind_port: 7946 packet_dial_timeout: 2s packet_write_timeout: 5s + max_concurrent_writes: 3 + acquire_writer_timeout: 250ms tls_enabled: false tls_cert_path: "" tls_key_path: "" diff --git a/docs/sources/tempo/configuration/network/ipv6.md b/docs/sources/tempo/configuration/network/ipv6.md index 9a9b951ce1a..3caca765cc3 100644 --- a/docs/sources/tempo/configuration/network/ipv6.md +++ b/docs/sources/tempo/configuration/network/ipv6.md @@ -12,7 +12,7 @@ Tempo can be configured to communicate between the components using Internet Pro {{< admonition type="note" >}} The underlying infrastructure must support this address family. This configuration may be used in a single-stack IPv6 environment, or in a dual-stack environment where both IPv6 and IPv4 are present. In a dual-stack scenario, only one address family may be configured at a time, and all components must be configured for that address family. -{{% /admonition %}} +{{< /admonition >}} ## Protocol configuration diff --git a/docs/sources/tempo/configuration/network/tls.md b/docs/sources/tempo/configuration/network/tls.md index fc19048dcbf..ec031ce05a5 100644 --- a/docs/sources/tempo/configuration/network/tls.md +++ b/docs/sources/tempo/configuration/network/tls.md @@ -12,7 +12,7 @@ Tempo can be configured to communicate between the components using Transport La {{< admonition type="note" >}} The ciphers and TLS version here are for example purposes only. We are not recommending which ciphers or TLS versions for use in production environments. -{{% /admonition %}} +{{< /admonition >}} ## Server configuration diff --git a/docs/sources/tempo/configuration/use-trace-data.md b/docs/sources/tempo/configuration/use-trace-data.md index 0f3a6c3eff5..a5b9746d4af 100644 --- a/docs/sources/tempo/configuration/use-trace-data.md +++ b/docs/sources/tempo/configuration/use-trace-data.md @@ -16,7 +16,7 @@ If you are using Grafana on-prem, you need to [set up the Tempo data source](/do {{< admonition type="tip" >}} If you want to explore tracing data in Grafana, try the [Intro to Metrics, Logs, Traces, and Profiling example]({{< relref "../getting-started/docker-example" >}}). -{{% /admonition %}} +{{< /admonition >}} This video explains how to add data sources, including Loki, Tempo, and Mimir, to Grafana and Grafana Cloud. Tempo data source set up starts at 4:58 in the video. diff --git a/docs/sources/tempo/getting-started/_index.md b/docs/sources/tempo/getting-started/_index.md index 88bcb78aa9f..967d7b3774f 100644 --- a/docs/sources/tempo/getting-started/_index.md +++ b/docs/sources/tempo/getting-started/_index.md @@ -9,14 +9,29 @@ aliases: # Get started with Grafana Tempo +Grafana Tempo is an open source, easy-to-use, and high-scale distributed tracing backend. Tempo lets you search for traces, generate metrics from spans, and link your tracing data with logs and metrics. + Distributed tracing visualizes the lifecycle of a request as it passes through a set of applications. -For more information about traces, refer to [Introduction to traces]({{< relref "../introduction" >}}). +For more information about traces, refer to [Introduction to traces](https://grafana.com/docs/tempo//introduction/). -Grafana Tempo is an open source, easy-to-use, and high-scale distributed tracing backend. Tempo lets you search for traces, generate metrics from spans, and link your tracing data with logs and metrics. +Getting started with Tempo is follows these basic steps. + +First, check out the [examples](https://grafana.com/docs/tempo//getting-started/example-demo-app/) for ideas on how to get started with Tempo. + +Next, review the [Setup documentation](https://grafana.com/docs/tempo//setup/) for step-by-step instructions for setting up Tempo and creating a test application. + +Tempo offers different deployment options, depending on your needs. Refer to the [plan your deployment](https://grafana.com/docs/tempo//setup/deployment/) section for more information. + +{{< admonition type="note" >}} +Grafana Alloy is already set up to use Tempo. +Refer to [Grafana Alloy configuration for tracing](https://grafana.com/docs/tempo//configuration/grafana-alloy/). +{{< /admonition >}} {{< youtube id="zDrA7Ly3ovU" >}} +## Tracing pipeline components + To build a tracing pipeline, you need four major components: client instrumentation, pipeline, backend, and visualization. @@ -24,23 +39,23 @@ This diagram illustrates a tracing system configuration:

Tracing pipeline overview

-## Client instrumentation +### Client instrumentation Client instrumentation (1 in the diagram) is the first building block to a functioning distributed tracing visualization pipeline. Client instrumentation is the process of adding instrumentation points in the application that create and offload spans. {{< admonition type="note" >}} -To learn more about instrumentation, read the [Instrument for tracing]({{< relref "./instrumentation" >}}) documentation to learn how to instrument your favorite language for distributed tracing. -{{% /admonition %}} +To learn more about instrumentation, read the [Instrument for tracing](https://grafana.com/docs/tempo//getting-started/instrumentation/) documentation to learn how to instrument your favorite language for distributed tracing. +{{< /admonition >}} -## Pipeline (Grafana Alloy) +### Pipeline (Grafana Alloy) -Once your application is instrumented for tracing, the traces need to be sent +After you instrument your application for tracing, the traces are sent to a backend for storage and visualization. You can build a tracing pipeline that -offloads spans from your application, buffers them, and eventually forwards them to a backend. -Tracing pipelines are optional (most clients can send directly to Tempo), but the pipelines -become more critical the larger and more robust your tracing system is. +offloads spans from your application, buffers them, and forwards them to a backend. +Tracing pipelines are optional since most clients can send directly to Tempo. +The pipelines become more critical the larger and more robust your tracing system is. Grafana Alloy is a service that is deployed close to the application, either on the same node or within the same cluster (in Kubernetes) to quickly offload traces from the application and forward them to @@ -48,33 +63,20 @@ a storage backend. Alloy also abstracts features like trace batching to a remote trace backend store, including retries on write failures. To learn more about Grafana Alloy and how to set it up for tracing with Tempo, -refer to [Grafana Alloy configuration for tracing]({{< relref "../configuration/grafana-alloy" >}}). +refer to [Grafana Alloy configuration for tracing](https://grafana.com/docs/tempo//configuration/grafana-alloy/). {{< admonition type="note" >}} The [OpenTelemetry Collector](https://github.com/open-telemetry/opentelemetry-collector) / [Jaeger Agent](https://www.jaegertracing.io/docs/latest/deployment/) can also be used at the agent layer. Refer to [this blog post](/blog/2021/04/13/how-to-send-traces-to-grafana-clouds-tempo-service-with-opentelemetry-collector/) -to see how the OpenTelemetry Collector can be used with Tempo. -{{% /admonition %}} +to see how to use the OpenTelemetry Collector with Tempo. +{{< /admonition >}} -## Backend (Tempo) +### Backend (Tempo) Grafana Tempo is an easy-to-use and high-scale distributed tracing backend used to store and query traces. The tracing backend stores and retrieves traces on demand. -Getting started with Tempo is easy. - -First, check out the [examples]({{< relref "./example-demo-app" >}}) for ideas on how to get started with Tempo. - -Next, review the [Setup documentation]({{< relref "../setup" >}}) for step-by-step instructions for setting up Tempo and creating a test application. - -Tempo offers different deployment options, depending upon your needs. Refer to the [plan your deployment]({{< relref "../setup/deployment" >}}) section for more information - -{{< admonition type="note" >}} -Grafana Alloy is already set up to use Tempo. -Refer to [Grafana Alloy configuration for tracing](https://grafana.com/docs/tempo//configuration/grafana-alloy). -{{% /admonition %}} - -## Visualization (Grafana) +## Visualize tracing data with Grafana -Grafana has a built-in Tempo data source that can be used to query Tempo and visualize traces. -For more information, refer to the [Tempo data source](/docs/grafana/latest/datasources/tempo) and the [Tempo in Grafana]({{< relref "./tempo-in-grafana" >}}) topics. +Grafana and Grafana Cloud have a built-in Tempo data source that you can use to query Tempo and visualize traces. +For more information, refer to the [Tempo data source](https://grafana.com/docs/grafana//datasources/tempo/) and the [Tempo in Grafana](https://grafana.com/docs/tempo//getting-started/tempo-in-grafana) topics. diff --git a/docs/sources/tempo/operations/best-practices.md b/docs/sources/tempo/getting-started/best-practices.md similarity index 80% rename from docs/sources/tempo/operations/best-practices.md rename to docs/sources/tempo/getting-started/best-practices.md index 2a9c0e2dfc2..7f868d31e58 100644 --- a/docs/sources/tempo/operations/best-practices.md +++ b/docs/sources/tempo/getting-started/best-practices.md @@ -2,6 +2,8 @@ title: Best practices for traces menuTitle: Best practices description: Learn about the best practices for traces +aliases: + - ../operations/best-practices/ # https://grafana.com/docs/tempo/latest/operations/best-practices/ weight: 20 --- diff --git a/docs/sources/tempo/getting-started/docker-example.md b/docs/sources/tempo/getting-started/docker-example.md index f28f2a4f116..2ce65bf9def 100644 --- a/docs/sources/tempo/getting-started/docker-example.md +++ b/docs/sources/tempo/getting-started/docker-example.md @@ -117,6 +117,44 @@ You can use Grafana to explore the traces generated by the k6-tracing service. + + +## Explore Traces plugin + + The [Explore Traces](https://grafana.com/docs/grafana/latest/explore/simplified-exploration/traces/) plugin offers an opinionated non query-based approach to exploring traces. Lets take a look at some of its key features and panels. + +1. Open a browser and navigate to [http://localhost:3000/a/grafana-exploretraces-app](http://localhost:3000/a/grafana-exploretraces-app). +2. Within the filter bar, there is a dropdown menu set to **Rate** of **Full traces**. Change this to **Duration** and **All spans**. + +This updated panel view looks like this: + +{{< figure align="center" src="/media/docs/tempo/explore-spans-error-view.png" alt="Explore Traces panel" >}} + +Breakdown of the view: +* The histogram at the top shows the distribution of span durations. The lighter the color, the more spans in that duration bucket. In this example, most spans fall within `537ms`, which is considered the average duration for the system. +* The high peaks in the histogram indicate spans that are taking longer than the average (As high as `2.15s`). These are likely to be the spans that are causing performance issues. You can investigate further to identify the root cause. + +Select `Slow traces` tab in the navigation bar to view the slowest traces in the system. + +{{< figure align="center" src="/media/docs/tempo/slow-trace-view.png" alt="Slow traces panel" >}} + +`shop-backend` appears to be the primary culprit for the slow traces. This happens when a user initiates the `article-to-cart` operation. From here, you can select the **Trace Name** to open the **Trace View** panel. + +{{< figure align="center" src="/media/docs/tempo/slow-trace-trace-view.png" alt="Trace View panel" >}} + +The **Trace View** panel provides a detailed view of the trace. The panel is divided into three sections: +* The top section shows the trace ID, duration, and the service that generated the trace. +* The middle section shows the trace timeline. Each span is represented as a horizontal bar. The color of the bar represents the span's status. The width of the bar represents the duration of the span. +* The bottom section shows the details of the selected span. This includes the span name, duration, and tags. + +Drilling into the `shop-backend` span, you can see that the `place-articles` operation has an exception event tied to it. This is likely the root cause of the slow trace. + +{{< figure align="center" src="/media/docs/tempo/slow-trace-root-cause-2.png" alt="Span View panel" >}} + +If you would like to dive deeper into the Explore Traces plugin and its panel concepts, refer to the [Explore Traces Concepts](https://grafana.com/docs/grafana/latest/explore/simplified-exploration/traces/concepts/). + + + ## Next steps diff --git a/docs/sources/tempo/getting-started/instrumentation.md b/docs/sources/tempo/getting-started/instrumentation.md index 6303b2e6a93..27c1e85caec 100644 --- a/docs/sources/tempo/getting-started/instrumentation.md +++ b/docs/sources/tempo/getting-started/instrumentation.md @@ -44,6 +44,10 @@ information from a client application with minimal manual instrumentation of the * [OpenTelemetry Python auto-instrumentation](https://github.com/open-telemetry/opentelemetry-python-contrib) * [OpenTelemetry Go auto-instrumentation](https://github.com/open-telemetry/opentelemetry-go-instrumentation) and [documentation](https://opentelemetry.io/docs/instrumentation/go/getting-started/) +{{< admonition type="note" >}} +Jaeger client libraries have been deprecated. For more information, refer to the [Deprecating Jaeger clients article](https://www.jaegertracing.io/docs/1.50/client-libraries/#deprecating-jaeger-clients). Jaeger now recommends using OpenTelemetry SDKs. +{{< /admonition >}} + ### Additional OTel resources - [Grafana Application Observability](/docs/grafana-cloud/monitor-applications/application-observability/) @@ -56,14 +60,6 @@ information from a client application with minimal manual instrumentation of the - [Zipkin Language Specific Instrumentation](https://zipkin.io/pages/tracers_instrumentation.html) -### Jaeger - -{{< admonition type="note" >}} -Jaeger client libraries have been deprecated. For more information, refer to the [Deprecating Jaeger clients article](https://www.jaegertracing.io/docs/1.50/client-libraries/#deprecating-jaeger-clients). Jaeger now recommends using OpenTelemetry SDKs. -{{% /admonition %}} - -- [Jaeger Language Specific Instrumentation](https://www.jaegertracing.io/docs/latest/client-libraries/) - ## Grafana Blog The Grafana blog periodically features instrumentation posts. diff --git a/docs/sources/tempo/getting-started/metrics-from-traces.md b/docs/sources/tempo/getting-started/metrics-from-traces.md index ee918193175..de616999a25 100644 --- a/docs/sources/tempo/getting-started/metrics-from-traces.md +++ b/docs/sources/tempo/getting-started/metrics-from-traces.md @@ -26,7 +26,7 @@ Span metrics are of particular interest if your system is not monitored with met {{< admonition type="note" >}} Metrics generation is disabled by default. Contact Grafana Support to enable metrics generation in your organization. -{{% /admonition %}} +{{< /admonition >}} After the metrics-generator is enabled in your organization, refer to [Metrics-generator configuration]({{< relref "../configuration" >}}) for information about metrics-generator options. diff --git a/docs/sources/tempo/getting-started/tempo-in-grafana.md b/docs/sources/tempo/getting-started/tempo-in-grafana.md index 14df9afb2a1..a012ac2c0dc 100644 --- a/docs/sources/tempo/getting-started/tempo-in-grafana.md +++ b/docs/sources/tempo/getting-started/tempo-in-grafana.md @@ -10,75 +10,7 @@ Grafana has a built-in Tempo data source that can be used to query Tempo and vis This page describes the high-level features and their availability. Use the latest versions for best compatibility and stability. -## Use TraceQL to dig deep into trace data +[//]: # 'Shared content for best practices for traces' +[//]: # 'This content is located in /tempo/docs/sources/shared/tempo-in-grafana.md' -Inspired by PromQL and LogQL, TraceQL is a query language designed for selecting traces in Tempo. - -The default Tempo search reviews the whole trace. TraceQL provides a method for formulating precise queries so you can quickly identify the traces and spans that you need. Query results are returned faster because the queries limit what is searched. - -You can run a TraceQL query either by issuing it to Tempo’s `q` parameter of the [`search` API endpoint]({{< relref "../api_docs#search" >}}), or, for those using Tempo in conjunction with Grafana, by using the [TraceQL query editor]({{< relref "../traceql/query-editor" >}}). - -For details about how queries are constructed, read the [TraceQL documentation]({{< relref "../traceql" >}}). - -![TraceQL query editor showing span results](/media/docs/grafana/data-sources/tempo/query-editor/tempo-ds-query-ed-example-v11-a.png) - -The most basic functionality is to visualize a trace using its ID. Select the TraceQL tab and enter the ID to view it. This functionality is enabled by default and is available in all versions of Grafana. - -## Finding traces using Trace to logs - -Traces can be discovered by searching logs for entries containing trace IDs. -This is most useful when your application also logs relevant information about the trace that can also be searched, such as HTTP status code, customer ID, etc. -This feature requires a linked Loki data source, and a [traceID derived field](/docs/grafana//datasources/loki/#derived-fields). - -For more information, refer to the [Trace to logs](https://grafana.com/docs/grafana//datasources/tempo/configure-tempo-data-source/#trace-to-logs) documentation. - -![Trace to logs lets you link your tracing data with log data](/media/docs/grafana/data-sources/tempo/trace-to-logs-v11.png) - -## Find traces using Search query builder - -Search for traces using common dimensions such as time range, duration, span tags, service names, and more. Use the trace view to quickly diagnose errors and high-latency events in your system. - -![Showing how to build queries with common dimensions using the query builder](/media/docs/grafana/data-sources/tempo/query-editor/tempo-ds-query-builder-v11.png) - -### Non-deterministic search - -Most search functions are deterministic: using the same search criteria results in the same results. - -However, Tempo search is non-deterministic. -If you perform the same search twice, you’ll get different lists, assuming the possible number of results for your search is greater than the number of results you have your search set to return. - -When performing a search, Tempo does a massively parallel search over the given time range, and takes the first N results. -Even identical searches differ due to things like machine load and network latency. -This approach values speed over predictability and is quite simple; enforcing that the search results are consistent would introduce additional complexity (and increase the time the user spends waiting for results). -TraceQL follows the same behavior. - -## Service graph view - -Grafana provides a built-in service graph view available in Grafana Cloud and Grafana 9.1. -The service graph view visualizes the span metrics (traces data for rates, error rates, and durations (RED)) and service graphs. -Once the requirements are set up, this pre-configured view is immediately available in **Explore > Service Graphs**. - -For more information, refer to the [service graph view]({{< relref "../metrics-generator/service-graph-view" >}}). - -![Service graph view](/media/docs/grafana/data-sources/tempo/query-editor/tempo-ds-query-service-graph.png) - -## View JSON file - -A local JSON file containing a trace can be imported and viewed in the Grafana UI. This is useful in cases where access to the original Tempo data source is limited, or for preserving traces outside of Tempo. - -The JSON data can be downloaded via the Tempo API or the [Inspector panel](/docs/grafana/latest/explore/explore-inspector/) while viewing the trace in Grafana. - -{{< admonition type="note" >}} -To perform this action on Grafana 10.1 or later, select a Tempo data source, select **Explore** from the main menu, and then select **Import trace**. -{{% /admonition %}} - -## Link tracing data with profiles - -Using Trace to profiles, you can use Grafana’s ability to correlate different signals by adding the functionality to link between traces and profiles. - -Trace to profiles lets you link your [Grafana Pyroscope](/docs/pyroscope/) data source to tracing data in Grafana or Grafana Cloud. -When configured, this connection lets you run queries from a trace span into the profile data. - -For more information, refer to the [Traces to profiles documentation](https://grafana.com/docs/grafana//datasources/tempo/configure-tempo-data-source#trace-to-profiles) and the [Grafana Pyroscope data source documentation](https://docs/grafana//datasources/grafana-pyroscope/). - -{{< youtube id="AG8VzfFMLxo" >}} \ No newline at end of file +{{< docs/shared source="tempo" lookup="tempo-in-grafana.md" version="" >}} diff --git a/docs/sources/tempo/introduction/_index.md b/docs/sources/tempo/introduction/_index.md index f64ef0cf5e5..d34cb6a1f3f 100644 --- a/docs/sources/tempo/introduction/_index.md +++ b/docs/sources/tempo/introduction/_index.md @@ -14,46 +14,54 @@ weight: 120 # Introduction A trace represents the whole journey of a request or an action as it moves through all the nodes of a distributed system, especially containerized applications or microservices architectures. -This makes them the ideal observability signal for discovering bottlenecks and interconnection issues. +Traces are the ideal observability signal for discovering bottlenecks and interconnection issues. Traces are composed of one or more spans. -A span is a unit of work within a trace that has a start time relative to the beginning of the trace, a duration and an operation name for the unit of work. -It usually has a reference to a parent span (unless it's the first span, the root span, in a trace). +A span is a unit of work within a trace that has a start time relative to the beginning of the trace, a duration, and an operation name for the unit of work. +It usually has a reference to a parent span, unless it's the first, or root, span in a trace. It frequently includes key/value attributes that are relevant to the span itself, for example the HTTP method used in the request, as well as other metadata such as the service name, sub-span events, or links to other spans. -By definition, traces are never complete. You can always push a new batch of spans, even if days have passed since the last one. +By definition, traces are never complete. +You can always push another batch of spans, even if days have passed since the last one. When receiving a query requesting a stored trace, tracing backends like Tempo find all the spans for that specific trace and collate them into a returned result. -For that reason, issues can arise on retrieval of the trace data if traces are extremely large. +Retrieving trace data can have issues if traces are extremely large. {{< youtube id="ZirbR0ZJIOs" >}} ## Example of traces -Firstly, a user on your website enters their email address into a form to sign up for your mailing list. They click **Enter**. This initial transaction has a trace ID that's subsequently associated with every interaction in the chain of processes within the system. +Firstly, a user on your website enters their email address into a form to sign up for your mailing list. +They click **Enter**. This initial transaction has a trace ID that's subsequently associated with every interaction in the chain of processes within the system. Next, the user's email address is data that flows through your system. -In a cloud computing world, it's possible that clicking that one button triggers many downstream processes on various microservices operating across many different nodes in your compute infrastructure. +In a cloud computing world, it's possible that clicking that one button triggers many downstream processes on various microservices operating across many different nodes in your compute infrastructure. -As a result, the email address might be sent to a microservice responsible for verification. If the email passes this check, it is then stored in a database. +As a result, the email address goes to a microservice responsible for verification. If the email passes this check, then the database stores the address. -Along the way, an anonymization microservice strips personally identifying data from the address and adds additional metadata before sending it along to a marketing qualifying microservice which determines whether the request was sent from a targeted part of the internet. +Along the way, an anonymizing microservice strips personally identifying data from the address and adds additional metadata before sending it along to a marketing qualifying microservice. +This microservice determines whether the request came from a targeted part of the internet. -Services respond and data flows back from each, sometimes triggering new events across the system. Along the way, logs are written to the nodes on which those services run with a time stamp showing when the info passed through. +Services respond and data flows back from each, sometimes triggering additional events across the system. +Along the way, nodes write logs on which those services run with a time stamp showing when the info passed through. -Finally, the request and response activity ends. No other spans are added to that TraceID. +Finally, the request and response activity end. +No other spans append to that trace ID. ## Traces and trace IDs Setting up tracing adds an identifier, or trace ID, to all of these events. -The trace ID is generated when the request is initiated and that same trace ID is applied to every single span as the request and response generate activity across the system. +The trace ID generates when the request initiates. +That same trace ID applies to every span as the request and response generate activity across the system. -That trace ID enables one to trace, or follow, a request as it flows from node to node, service to microservice to lambda function to wherever it goes in your chaotic, cloud computing system and back again. +The trace ID lets you trace, or follow, a request as it flows from node to node, service to microservice to lambda function to wherever it goes in your chaotic, cloud computing system and back again. This is recorded and displayed as spans. -Here's an example showing two pages in Grafana Cloud. The first, on the left (1), shows a query using the **Explore** feature. -In the query results you can see a **traceID** field that was added to an application. That field contains a **Tempo** trace ID. -The second page, on the right (2), uses the same Explore feature to perform a Tempo search using that **trace ID**. +Here's an example showing two pages in Grafana Cloud. +The first, numbered 1, shows a query using the **Explore** feature. +In the query results, you can see a **TraceID** field that was added to an application. +That field contains a **Tempo** trace ID. +The second page, numbered 2, uses the same **Explore** feature to perform a Tempo search using that **TraceID**. It then shows a set of spans as horizontal bars, each bar denoting a different part of the system. ![Traces example with query results and spans](/static/img/docs/tempo/screenshot-trace-explore-spans-g10.png) @@ -61,8 +69,8 @@ It then shows a set of spans as horizontal bars, each bar denoting a different p ## What are traces used for? Traces can help you find bottlenecks. -A trace can be visualized to give a graphic representation of how long it takes for each step in the data flow pathway to complete. -It can show where new requests are initiated and end, and how your system responds. +Applications like Grafana can visualize traces to give a graphic representation of how long it takes for each step in the data flow pathway to complete. +It can show where additional requests initiate and end, and how your system responds. This data helps you locate problem areas, often in places you never would have anticipated or found without this ability to trace the request flow. @@ -72,6 +80,6 @@ This data helps you locate problem areas, often in places you never would have a For more information about traces, refer to: -* [Traces and telemetry]({{< relref "./telemetry" >}}) -* [User journeys: How tracing can help you]({{< relref "./solutions-with-traces" >}}) -* [Glossary]({{< relref "./glossary" >}}) \ No newline at end of file +* [Traces and telemetry](./telemetry) +* [User journeys: How tracing can help you](./solutions-with-traces) +* [Glossary](./glossary) \ No newline at end of file diff --git a/docs/sources/tempo/introduction/solutions-with-traces/_index.md b/docs/sources/tempo/introduction/solutions-with-traces/_index.md index 1e543f00638..26b064391ae 100644 --- a/docs/sources/tempo/introduction/solutions-with-traces/_index.md +++ b/docs/sources/tempo/introduction/solutions-with-traces/_index.md @@ -11,13 +11,12 @@ weight: 300 # Use traces to find solutions -Tracing is best used for analyzing the performance of your system, identifying bottlenecks, monitoring latency, and providing a complete picture of how requests are processed. - -* Decrease MTTR/MTTI: Tracing helps reduce Mean Time To Repair (MTTR) and Mean Time To Identify (MTTI) by pinpointing exactly where errors or latency are occurring within a transaction across multiple services. -* Optimization of bottlenecks and long-running code: By visualizing the path and duration of requests, tracing can help identify bottleneck operations and long-running pieces of code that could benefit from optimization. -* Metrics generation and RED signals: Tracing can help generate useful metrics related to Request rate, Error rate, and Duration of requests (RED). You can set alerts against these high-level signals to detect problems when they arise. -* Seamless telemetry correlation: Using tracing in conjunction with logs and metrics can help give you a comprehensive view of events over time during an active incident or postmorterm analysis by showing relationships between services and dependencies. -* Monitor compliance with policies: Business policy adherence ensures that services are correctly isolated using generated metrics and generated service graphs. +Tracing is best used for analyzing the performance of your system, identifying bottlenecks, monitoring latency, and providing a complete picture of requests processing. +* Decrease mean time to repair and mean time to identify an issue by pinpointing exactly where errors or latency are occurring within a transaction across multiple services. +* Optimize bottlenecks and long-running code by visualizing the path and duration of requests. Tracing can help identify bottleneck operations and long-running pieces of code that could benefit from optimization. +* Detect issues with generated metrics. Tracing generates metrics related to request rate, error rate, and duration of requests. You can set alerts against these high-level signals to detect problems. +* Seamless telemetry correlation. Use tracing in conjunction with logs and metrics for a comprehensive view of events over time, during an active incident, or for root-cause analysis. Tracing shows relationships between services and dependencies. +* Monitor compliance with policies. Business policy adherence ensures that services are correctly isolated using generated metrics and generated service graphs. Each use case provides real-world examples, including the background of the use case and how tracing highlighted and helped resolve any issues. \ No newline at end of file diff --git a/docs/sources/tempo/introduction/solutions-with-traces/traces-app-insights.md b/docs/sources/tempo/introduction/solutions-with-traces/traces-app-insights.md index ce35b6fffb2..4e03804e99d 100644 --- a/docs/sources/tempo/introduction/solutions-with-traces/traces-app-insights.md +++ b/docs/sources/tempo/introduction/solutions-with-traces/traces-app-insights.md @@ -7,6 +7,17 @@ keywords: title: Identify bottlenecks and establish SLOs menuTitle: Identify bottlenecks and establish SLOs weight: 320 +refs: + metrics-generator: + - pattern: /docs/tempo/ + destination: https://grafana.com/docs/tempo//metrics-generator/ + - pattern: /docs/enterprise-traces/ + destination: https://grafana.com/docs/enterprise-traces//metrics-generator/ + span-metrics: + - pattern: /docs/tempo/ + destination: https://grafana.com/docs/tempo//metrics-generator/span_metrics/ + - pattern: /docs/enterprise-traces/ + destination: https://grafana.com/docs/enterprise-traces//metrics-generator/span_metrics/ --- # Identify bottlenecks and establish SLOs @@ -19,34 +30,39 @@ Handy Site Corp, a fake website company, runs an ecommerce application that incl ### Define realistic SLOs -Handy Site’s engineers start by establishing service level objectives (SLOs) around latency ensure that customers have a good experience when trying to complete the checkout process. + + +Handy Site’s engineers start by establishing service level objectives, or SLOs, around latency ensure that customers have a good experience when trying to complete the checkout process. To do this, they use metrics generated from their span data. Their service level objective should be a realistic target based on previous history during times of normal operation. -Once they've agreed upon their service level objective, they will set up alerts to warn them when they are at risk of failing to meet that objective. +Once they've agreed upon their service level objective, they set up alerts to signal risk of failing to meet that objective. ### Utilize span metrics to define your SLO and SLI -After evaluating options, they decide to use [span metrics](https://grafana.com/docs/tempo/latest/metrics-generator/span_metrics/) as a service level indicator (SLI) to measure SLO compliance. +After evaluating options, they decide to use [span metrics](ref:span-metrics) as a service-level indicator (SLI) to measure SLO compliance. ![Metrics generator and exemplars](/media/docs/tempo/intro/traces-metrics-gen-exemplars.png) -Tempo can generate metrics using the [metrics-generator component](https://grafana.com/docs/tempo/latest/metrics-generator/). +Tempo can generate metrics using the [metrics-generator component](ref:metrics-generator). These metrics are created based on spans from incoming traces and demonstrate immediate usefulness with respect to application flow and overview. This includes rate, error, and duration (RED) metrics. - Span metrics also make it easy to use exemplars. -An [exemplar](https://grafana.com/docs/grafana/latest/basics/exemplars/) serves as a detailed example of one of the observations aggregated into a metric. An exemplar contains the observed value together with an optional timestamp and arbitrary trace IDs, which are typically used to reference a trace. -Since traces and metrics co-exist in the metrics-generator, exemplars can be automatically added to those metrics, allowing you to quickly jump from a metric showing aggregate latency over time into an individual trace that represents a low, medium, or high latency request. Similarly, you can quickly jump from a metric showing error rate over time into an individual erroring trace. +An [exemplar](https://grafana.com/docs/grafana//basics/exemplars/) serves as a detailed example of one of the observations aggregated into a metric. An exemplar contains the observed value together with an optional timestamp and arbitrary trace IDs, which are typically used to reference a trace. +Since traces and metrics co-exist in the metrics-generator, exemplars can be automatically added to those metrics, allowing you to quickly jump from a metric showing aggregate latency over time into an individual trace that represents a low, medium, or high latency request. Similarly, you can quickly jump from a metric showing error rate over time into an individual erroring trace. ### Monitor latency Handy Site decides they're most interested in monitoring the latency of requests processed by their checkout service and want to set an objective that 99.5% of requests in a given month should complete within 2 seconds. -To define a service level indicator (SLI) that they can use to track their progress against their objective, they use the `traces_spanmetrics_latency` metric with the proper label selectors, such as `service name = checkoutservice`. -The metrics-generator adds a default set of labels to the metrics it generates, including `span_kind` and `status_code`. However, if they were interested in calculating checkout service latency per endpoint or per version of the software, they could change the configuration of the Tempo metrics-generator to add these custom dimensions as labels to their spanmetrics. +To define a service-level indicator (SLI) that they can use to track their progress against their objective, they use the `traces_spanmetrics_latency` metric with the proper label selectors, such as `service name = checkoutservice`. +The metrics-generator adds a default set of labels to the metrics it generates, including `span_kind` and `status_code`. +If they want to calculate checkout service latency per endpoint or per version of the software, they could change the configuration of the Tempo metrics-generator to add these custom dimensions as labels to their span metrics. -With all of this in place, Handy Site now opens the [Grafana SLO](https://grafana.com/docs/grafana-cloud/alerting-and-irm/slo/) application and follows the setup flow to establish an [SLI](https://grafana.com/docs/grafana-cloud/alerting-and-irm/slo/create/) for their checkout service around the `traces_spanmetrics_latency` metric.. -They can now be alerted to degradations in service quality that directly impact their end user experience. SLO-based alerting also ensures that they don't suffer from noisy alerts. Alerts are only triggered when the value of the SLI is such that the team is in danger of missing their SLO. +With all of this in place, Handy Site opens the [Grafana SLO](https://grafana.com/docs/grafana-cloud/alerting-and-irm/slo/) application and follows the setup flow to establish an [SLI](https://grafana.com/docs/grafana-cloud/alerting-and-irm/slo/create/) for their checkout service around the `traces_spanmetrics_latency` metric. +They can be alerted to degradations in service quality that directly impact their end user experience. +SLO-based alerting also ensures that they don't suffer from noisy alerts. +Alerts are only triggered when the value of the SLI is such that the team is in danger of missing their SLO. -![Latency SLO dashboard](/media/docs/tempo/intro/traces-metrics-gen-SLO.png) \ No newline at end of file +![Latency SLO dashboard](/media/docs/tempo/intro/traces-metrics-gen-SLO.png) + \ No newline at end of file diff --git a/docs/sources/tempo/introduction/solutions-with-traces/traces-diagnose-errors.md b/docs/sources/tempo/introduction/solutions-with-traces/traces-diagnose-errors.md index ebfaa2c317f..8c179353843 100644 --- a/docs/sources/tempo/introduction/solutions-with-traces/traces-diagnose-errors.md +++ b/docs/sources/tempo/introduction/solutions-with-traces/traces-diagnose-errors.md @@ -7,40 +7,52 @@ keywords: title: Diagnose errors with traces menuTitle: Diagnose errors with traces weight: 400 +refs: + traceql: + - pattern: /docs/tempo/ + destination: https://grafana.com/docs/tempo//traceql/ + - pattern: /docs/enterprise-traces/ + destination: https://grafana.com/docs/enterprise-traces//traceql/ --- # Diagnose errors with traces Traces allow you to quickly diagnose errors in your application, ensuring that you can perform Root Cause Analysis (RCA) on request failures. -Trace visualizations help you determine the spans in which errors occur, along with the context behind those errors, leading to a lower mean time to repair (MTTR). +Trace visualizations help you determine the spans in which errors occur, along with the context behind those errors, leading to a lower mean time to repair. ## Meet Handy Site Corp -Handy Site Corp, a fake website company, runs an ecommerce application that includes user authentication, a product catalog, order management, payment processing, and other services. +Handy Site Corp, a fake website company, runs an e-commerce application that includes user authentication, a product catalog, order management, payment processing, and other services. -Handy Site’s operations team receives several alerts for their error SLO for monitored endpoints in their services. Using their Grafana dashboards, they notice that there are several issues. The dashboard provides the percentages of errors: +Handy Site's operations team receives several alerts for their error service-level objectives, or SLOs, for monitored endpoints in their services. +Using their Grafana dashboards, they notice that there are several issues. +The dashboard provides the percentages of errors: ![Dashboard showing errors in services](/media/docs/tempo/intro/traces-error-SLO.png) More than 5% of the requests from users are resulting in an error across several endpoints, such as `/beholder`, `/owlbear`, and `/illithid`, which causes a degradation in performance and usability. -It’s imperative for the operations team at Handy Site to quickly troubleshoot the issue. The elevated error rates indicate that the Handy Site is unable to provide valid responses to their users’ requests, which in addition to threatening the operation team’s SLO error budget also affects profitability overall for Handy Site. +It's imperative for the operations team at Handy Site to quickly troubleshoot the issue. +The elevated error rates indicate that the Handy Site is unable to provide valid responses to their users' requests, which in addition to threatening the operation team's SLO error budget also affects profitability overall for Handy Site. ## Use TraceQL to query data -Tempo has a traces-first query language, [TraceQL](https://grafana.com/docs/tempo/latest/traceql/), that provides a unique toolset for selecting and searching tracing data. TraceQL can match traces based on span and resource attributes, duration, and ancestor<>descendant relationships. It also can compute aggregate statistics (e.g., `rate`) over a set of spans. +Tempo has a traces-first query language, [TraceQL](ref:traceql), that provides a unique toolset for selecting and searching tracing data. +TraceQL can match traces based on span and resource attributes, duration, and ancestor<>descendant relationships. +The language also can compute aggregate statistics, such as `rate` over a set of spans. -Handy Site’s services and applications are instrumented for tracing, so they can use TraceQL as a debugging tool. Using three TraceQL queries, the team identifies and validates the root cause of the issue. +Handy Site's has instrumented their services and applications for tracing, so they can use TraceQL as a debugging tool. +Using three TraceQL queries, the team identifies and validates the root cause of the issue. ### Find HTTP errors -The top-level service, `mythical-requester` receives requests and returns responses to users. When it receives a request, it calls numerous downstream services whose responses it relies on in order to send a response to the user request. +The top-level service, `mythical-requester` receives requests and returns responses to users. When it receives a request, it calls numerous downstream services whose responses it relies on to send a response to the user request. Using Grafana Explore, the operations team starts with a simple TraceQL query to find all traces from this top-level service, where an HTTP response code sent to a user is `400` or above. Status codes in this range include `Forbidden`, `Not found`, `Unauthorized`, and other client and server errors. ```traceql { resource.service.name = "mythical-requester" && span.http.status_code >= 400 } | select(span.http.target) ``` -The addition of the `select` statement to this query (after the `|`) ensures that the query response includes not only the set of matched spans, but also the `http.target` attribute (i.e. the endpoints for the SaaS service) for each of those spans. +Adding the `select` statement to this query (after the `|`) ensures that the query response includes not only the set of matched spans, but also the `http.target` attribute (for example, the endpoints for the SaaS service) for each of those spans. ![Query results showing http.target attribute](/media/docs/tempo/intro/traceql-http-target-handy-site.png) @@ -50,7 +62,8 @@ Looking at the set of returned spans, the most concerning ones are those with th The team decides to use structural operators to follow an error chain from the top-level `mythical-requester` service to any descendant spans that also have an error status. Descendant spans can be any span that's descended from the parent span, such as a child or a further child at any depth. -Using this query, the team can pinpoint the downstream service that might be causing the issue. The query below says "Find me spans where `status = error` that that are descendants of spans from the `mythical-requester` service that have status code `500`." +Using this query, the team can pinpoint the downstream service that might be causing the issue. +The query below says "Find spans where `status = error` that are descendants of spans from the `mythical-requester` service that have status code `500`." ```traceql { resource.service.name = "mythical-requester" && span.http.status_code = 500 } >> { status = error } @@ -58,7 +71,7 @@ Using this query, the team can pinpoint the downstream service that might be cau ![TraceQL results showing expanded span](/media/docs/tempo/intro/traceql-error-insert-handy-site.png) -Expanding the erroring span that is a descendant of the span for the `mythical-server` service shows the team that there is a problem with the data being inserted into the database. +Expanding the span with errors that's a descendant of the span for the `mythical-server` service shows the team that there is a problem with the data inserted into the database. Specifically, the service is passing a `null` value for a column in a database table where `null` values are invalid. ![Error span for INSERT](/media/docs/tempo/intro/traceql-insert-postgres-handy-site.png) @@ -67,15 +80,16 @@ Specifically, the service is passing a `null` value for a column in a database t After identifying the specific cause of this internal server error, the team wants to know if there are errors in any database operations other than the `null` `INSERT` error found above. -Their updated query uses a negated regular expression to find any spans where the database statement either doesn’t exist, or doesn’t start with an `INSERT` clause. -This should expose any other issues causing an internal server error and filter out the class of issues that they already diagnosed. +Their updated query uses a negated regular expression to find any spans where the database statement either doesn't exist, or doesn't start with an `INSERT` clause. +This should expose any other issues causing an internal server error and filter out the class of issues that they already diagnosed. ```traceql { resource.service.name = "mythical-requester" && span.http.status_code = 500 } >> { status = error && span.db.statement !~ "INSERT.*" } ``` This query yields no results, suggesting that the root cause of the issues the operations team are seeing is exclusively due to the failing database `INSERT` statement. -At this point, they can roll back to a known working version of the service, or deploy a fix to ensure that `null` data being passed to the service is rejected appropriately. -Once that is complete, the issue can be marked resolved and the Handy team's error rate SLI should return back to acceptable levels. +At this point, they can roll back to a known working version of the service, or deploy a fix to ensure that the service rejects `null` data appropriately. +After it completes, the team marks the issue resolved. +The Handy team's error rate SLI should return back to acceptable levels. ![Empty query results](/media/docs/tempo/intro/traceql-no-results-handy-site.png) diff --git a/docs/sources/tempo/introduction/telemetry.md b/docs/sources/tempo/introduction/telemetry.md index 8024cddf5f7..09b6c467d65 100644 --- a/docs/sources/tempo/introduction/telemetry.md +++ b/docs/sources/tempo/introduction/telemetry.md @@ -21,37 +21,42 @@ Correlating between the four pillars of observability helps create a holistic vi ## Metrics Metrics provide a high level picture of the state of a system. -Because they are numeric values and therefore can easily be compared against known thresholds, metrics are the foundation of alerts, which constantly run in the background and trigger when a value is outside of an expected range. This is typically the first sign that something is going on and are where discovery first starts. +Metrics are the foundation of alerts because metrics are numeric values and can be compared against known thresholds. +Alerts constantly run in the background and trigger when a value is outside of an expected range. +This is typically the first sign that something is going on and are where discovery first starts. Metrics indicate that something is happening. ## Logs Logs provide an audit trail of activity from a single process that create informational context. Logs act as atomic events, detailing what's occurring in the services in your application. -Whereas metrics are quantitative (numeric) and structured, logs are qualitative (textual) and unstructured or semi-structured. They offer a higher degree of detail, but also at the expense of creating significantly higher data volumes. +Whereas metrics are quantitative (numeric) and structured, logs are qualitative (textual) and unstructured or semi-structured. +They offer a higher degree of detail, but also at the expense of creating significantly higher data volumes. Logs let you know what's happening to your application. - ## Traces -Traces add further to the observability picture by telling you what happens at each step or action in a data pathway. Traces provide the map–-the where–-something is going wrong. -A trace provides a graphic representation of how long each step in the data flow pathway (for example, HTTP request, database lookup, call to a third party service) takes to complete. -It can show where new requests are initiated and finished, as well as how your system responds. +Traces add further to the observability picture by telling you what happens at each step or action in a data pathway. Traces provide the map—the where—something is going wrong. +A trace provides a graphic representation of how long each step in the data flow pathway takes to complete. For example, how long a HTTP request, a database lookup, or a call to a third party service takes. +It can show where requests initiate and finish, as well as how your system responds. This data helps you locate problem areas and assess their impact, often in places you never would have anticipated or found without this ability to trace the request flow. ## Profiles Profiles help you understand how your applications utilize compute resources such as CPU time and memory. -This allows you to identify specific lines of code or functions that can be optimized to improve application performance and efficiency. +This helps identify specific lines of code or functions to optimize and improve performance and efficiency. ## Why traces? Metrics in themselves aren't sufficient to find the root cause and solve complex issues. The same can be said for logs, which can contain a significant amount of information but lack the context of the interactions and dependencies between the different components of your complex environment. -Each pillar of observability (metrics, logs, traces, profiles) has its own unique strength when it comes to root causing issues. +Each pillar of observability—metrics, logs, traces, profiles—has its own unique strength when it comes to root causing issues. To get the most value of your observability strategy, you need to be able to correlate them. -Traces have the unique ability to show relationships between services. They allow you to identify which services are upstream from your service, which is helpful when you want to understand which services might be negatively impacted by problems in your service. They also allow you to identify which services are downstream from your service; this is valuable since your application relies on their downstream services, and problems with those services may be the cause of elevated errors or latency being reported by your service. +Traces have the unique ability to show relationships between services. +They help identify which services are upstream from your service, which is helpful when you want to understand which services might be negatively impacted by problems in your service. +Traces also help identify which services are downstream from your service. +This is valuable since your application relies on their downstream services, and problems with those services may be the cause of elevated errors or latency reported by your service. For example, you can directly see the failing database and all impacted failing edge endpoints. Using traces and [exemplars](https://grafana.com/docs/grafana/next/fundamentals/exemplars/), you can go from a metric data point and get to an associated trace. diff --git a/docs/sources/tempo/metrics-generator/_index.md b/docs/sources/tempo/metrics-generator/_index.md index 9c8a7436cc2..f1c9518014b 100644 --- a/docs/sources/tempo/metrics-generator/_index.md +++ b/docs/sources/tempo/metrics-generator/_index.md @@ -1,7 +1,6 @@ --- aliases: - - ./server_side_metrics # /docs/tempo//server_side_metrics/ - - /docs/tempo//metrics-generator/ + - ../operations/server_side_metrics # https://grafana.com/docs/tempo//operations/server_side_metrics/ title: Metrics-generator description: Metrics-generator is an optional Tempo component that derives metrics from ingested traces. weight: 500 @@ -79,3 +78,8 @@ If you want to enable metrics-generator for your Grafana Cloud account, refer to Enabling metrics generation and remote writing them to Grafana Cloud Metrics produces extra active series that could impact your billing. For more information on billing, refer to [Billing and usage](/docs/grafana-cloud/billing-and-usage/). + +## Multitenancy + +Tempo supports multitenancy in the metrics-generator through the use of environment variables and per-tenant overrides. +Refer to the [Multitenant Support for Metrics-Generator]({{< relref "multitenancy" >}}) documentation for more information. diff --git a/docs/sources/tempo/metrics-generator/active-series.md b/docs/sources/tempo/metrics-generator/active-series.md index 9dab55cfeb9..ba3991d2d51 100644 --- a/docs/sources/tempo/metrics-generator/active-series.md +++ b/docs/sources/tempo/metrics-generator/active-series.md @@ -16,9 +16,9 @@ These capabilities rely on a set of generated span metrics and service metrics. Any spans that are ingested by Tempo can create many metric series. However, this doesn't mean that every time a span is ingested that a new active series is created. -The number of active series generated depends on the label pairs generated from span data that are associated with the metrics, similar to other Prometheus-formated data. +The number of active series generated depends on the label pairs generated from span data that are associated with the metrics, similar to other Prometheus-formatted data. -For additional information, refer to the [Active series and DPM documentation](/docs/grafana-cloud/billing-and-usage/active-series-and-dpm/#active-series). +For additional information, refer to the [Active series and DPM documentation](https://grafana.com/docs/grafana-cloud/billing-and-usage/active-series-and-dpm/). ## Active series calculation diff --git a/docs/sources/tempo/metrics-generator/multitenancy.md b/docs/sources/tempo/metrics-generator/multitenancy.md new file mode 100644 index 00000000000..c82e881dbcf --- /dev/null +++ b/docs/sources/tempo/metrics-generator/multitenancy.md @@ -0,0 +1,48 @@ +--- +aliases: + - /docs/tempo/latest/metrics-generator/multitenancy +title: Multitenancy support +description: Learn about multitenancy support in the metrics-generator. +weight: 600 +--- + +# Multitenancy support + +Multitenancy is supported in the metrics-generator through the use of environment variables and per-tenant overrides. +This is useful when you want to propagate the multitenancy to the metrics backend, +keeping the data separated and secure. + +## Requirements + +- Tempo version 2.4.0 or later + +## Usage + +To use this feature, you need to define the `remote_write_headers` override for each tenant in your configuration. +You can also use environment variables in your configuration file, which will be expanded at runtime. +To make use of environment variables, you need to pass the `--config.expand-env` flag to Tempo. + +Example: + +```yaml +overrides: + team-traces-a: + metrics_generator: + processors: [ 'span-metrics' ] + remote_write_headers: + Authorization: ${PROM_A_BASIC_AUTH} + team-traces-b: + metrics_generator: + processors: [ 'span-metrics', 'service-graphs' ] + remote_write_headers: + Authorization: ${PROM_B_BEARER_AUTH} +``` + +```bash +export PROM_A_BASIC_AUTH="Basic $(echo "team-a:$(cat /token-prometheus-a)"|base64|tr -d '[:space:]')" +export PROM_B_BEARER_AUTH="Bearer $(cat /token-prometheus-b)" +``` + +In this example, `PROM_A_BASIC_AUTH` and `PROM_B_BEARER_AUTH` are environment variables that contain the respective tenants' authorization tokens. +The `remote_write_headers` override is used to specify the `Authorization` header for each tenant. +The `Authorization` header is used to authenticate the remote write request to the Prometheus remote write endpoint. \ No newline at end of file diff --git a/docs/sources/tempo/metrics-generator/service-graph-view.md b/docs/sources/tempo/metrics-generator/service-graph-view.md index 25babf08ff2..ec53c1c7de1 100644 --- a/docs/sources/tempo/metrics-generator/service-graph-view.md +++ b/docs/sources/tempo/metrics-generator/service-graph-view.md @@ -5,6 +5,17 @@ description: Grafana's service graph view utilizes metrics generated by the metr aliases: - ./app-performance-mgmt # /docs/tempo//metrics-generator/app-performance-mgmt weight: 400 +refs: + enable-service-graphs: + - pattern: /docs/tempo/ + destination: https://grafana.com/docs/tempo//metrics-generator/service_graphs/enable-service-graphs/ + - pattern: /docs/enterprise-traces/ + destination: https://grafana.com/docs/enterprise-traces//metrics-generator/service_graphs/enable-service-graphs/ + span-metrics: + - pattern: /docs/tempo/ + destination: https://grafana.com/docs/tempo//metrics-generator/span_metrics/ + - pattern: /docs/enterprise-traces/ + destination: https://grafana.com/docs/enterprise-traces//metrics-generator/span_metrics/ --- # Service graph view @@ -27,13 +38,13 @@ You have to enable span metrics and service graph generation on the Grafana back To use the service graph view, you need: -* Tempo or Grafana Cloud Traces with either 1) the metrics generator enabled and configured or 2) Grafana Agent or Grafana Alloy enabled and configured to send data to a Prometheus-compatible metrics store -* [Services graphs]({{< relref "../metrics-generator/service_graphs/enable-service-graphs" >}}), which are enabled by default in Grafana -* [Span metrics]({{< relref "../metrics-generator/span_metrics#how-to-run" >}}) enabled in your Tempo data source configuration +* Tempo or Grafana Cloud Traces with either the metrics generator enabled and configured or Grafana Agent or Grafana Alloy enabled and configured to send data to a Prometheus-compatible metrics store +* [Services graphs](ref:enable-service-graphs), which are enabled by default in Grafana +* [Span metrics](ref:span-metrics) enabled in your Tempo data source configuration The service graph view can be derived from metrics generated by either the metrics-generator or by Grafana Agent or Grafana Alloy. -For information on how to configure these features, refer to the [Grafana Tempo data sources documentation](/docs/grafana/latest/datasources/tempo/). +For information on how to configure these features, refer to the [Tempo data sources documentation](/docs/grafana//datasources/tempo/). ## What does the service graph view show? @@ -46,7 +57,7 @@ The service graph view provides a span metrics visualization (table) and service You can select any information in the table that has an underline to show more detailed information. You can also select any node in the service graph to display additional information. -![Service graph with extended informaiton](/media/docs/grafana/data-sources/tempo/query-editor/tempo-ds-query-service-graph-prom.png) +![Service graph with extended information](/media/docs/grafana/data-sources/tempo/query-editor/tempo-ds-query-service-graph-prom.png) ### Error rate example diff --git a/docs/sources/tempo/metrics-generator/service_graphs/_index.md b/docs/sources/tempo/metrics-generator/service_graphs/_index.md index 4003d2ebe53..7c6d45dca44 100644 --- a/docs/sources/tempo/metrics-generator/service_graphs/_index.md +++ b/docs/sources/tempo/metrics-generator/service_graphs/_index.md @@ -29,10 +29,10 @@ The metrics-generator processes traces and generates service graphs in the form Service graphs work by inspecting traces and looking for spans with parent-children relationship that represent a request. The processor uses the [OpenTelemetry semantic conventions](https://github.com/open-telemetry/semantic-conventions/blob/main/docs/general/trace.md) to detect a myriad of requests. -It currently supports the following requests: +It supports the following requests: - A direct request between two services where the outgoing and the incoming span must have [`span.kind`](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#spankind), `client`, and `server`, respectively. - A request across a messaging system where the outgoing and the incoming span must have `span.kind`, `producer`, and `consumer` respectively. -- A database request; in this case the processor looks for spans containing attributes `span.kind`=`client` as well as one of `db.name` or `db.system`. See below for how the name of the node is determined for a database request. +- A database request; in this case the processor looks for spans containing attributes `span.kind`=`client` as well as one of `db.name` or `db.system`. See below for how the name of the node is determined for a database request. Every span that can be paired up to form a request is kept in an in-memory store, until its corresponding pair span is received or the maximum waiting time has passed. When either of these conditions are reached, the request is recorded and removed from the local store. @@ -46,14 +46,15 @@ Each emitted metrics series have the `client` and `server` label corresponding w ### Virtual nodes Virtual nodes are nodes that form part of the lifecycle of a trace, -but spans for them are not being collected because they're outside the user's reach (for example, an external service for payment processing) or are not instrumented (for example, a frontend application). +but spans for them aren't collected because they're outside the user's reach or aren't instrumented. +For example, you might not collect spans for an external service for payment processing that's outside user interaction. Virtual nodes can be detected in two different ways: - The root span has `span.kind` set to `server`. This indicates that the request has initiated by an external system that's not instrumented, like a frontend application or an engineer via `curl`. -- A `client` span does not have its matching `server` span, but has a peer attribute present. In this case, we make the assumption that a call was made to an external service, for which Tempo won't receive spans. +- A `client` span doesn't have its matching `server` span, but has a peer attribute present. In this case, assume that a call was made to an external service, for which Tempo won't receive spans. - The default peer attributes are `peer.service`, `db.name` and `db.system`. - - The order of the attributes is important, as the first one that is present will be used as the virtual node name. + - The order of the attributes is important, as the first one is used as the virtual node name. A database node is identified by the span having at least `db.name` or `db.system` attribute. @@ -63,15 +64,19 @@ The name of a database node is determined using the following span attributes in The following metrics are exported: + + | Metric | Type | Labels | Description | | ----------------------------------------------------- | --------- | ------------------------------- | ---------------------------------------------------------------------------------------------------------- | -| traces_service_graph_request_total | Counter | client, server, connection_type | Total count of requests between two nodes | -| traces_service_graph_request_failed_total | Counter | client, server, connection_type | Total count of failed requests between two nodes | -| traces_service_graph_request_server_seconds | Histogram | client, server, connection_type | Time for a request between two nodes as seen from the server | -| traces_service_graph_request_client_seconds | Histogram | client, server, connection_type | Time for a request between two nodes as seen from the client | -| traces_service_graph_request_messaging_system_seconds | Histogram | client, server, connection_type | (Off by default) Time between publisher and consumer for services communicating through a messaging system | -| traces_service_graph_unpaired_spans_total | Counter | client, server, connection_type | Total count of unpaired spans | -| traces_service_graph_dropped_spans_total | Counter | client, server, connection_type | Total count of dropped spans | +| `traces_service_graph_request_total` | Counter | client, server, connection_type | Total count of requests between two nodes | +| `traces_service_graph_request_failed_total` | Counter | client, server, connection_type | Total count of failed requests between two nodes | +| `traces_service_graph_request_server_seconds` | Histogram | client, server, connection_type | Time for a request between two nodes as seen from the server | +| `traces_service_graph_request_client_seconds` | Histogram | client, server, connection_type | Time for a request between two nodes as seen from the client | +| `traces_service_graph_request_messaging_system_seconds` | Histogram | client, server, connection_type | (Off by default) Time between publisher and consumer for services communicating through a messaging system | +| `traces_service_graph_unpaired_spans_total` | Counter | client, server, connection_type | Total count of unpaired spans | +| `traces_service_graph_dropped_spans_total` | Counter | client, server, connection_type | Total count of dropped spans | + + Duration is measured both from the client and the server sides. @@ -81,7 +86,7 @@ Additional labels can be included using the `dimensions` configuration option, o Since the service graph processor has to process both sides of an edge, it needs to process all spans of a trace to function properly. -If spans of a trace are spread out over multiple instances, spans are not paired up reliably. +If spans of a trace are spread out over multiple instances, spans aren't paired up reliably. #### Activate `enable_virtual_node_label` @@ -89,4 +94,4 @@ Activating this feature adds the following label and corresponding values: | Label | Possible Values | Description | |-------------------------|-----------------------------|--------------------------------------------------------------------------| -| virtual_node | `unset`, `client`, `server` | Explicitly indicates the side that is uninstrumented | +| `virtual_node` | `unset`, `client`, `server` | Explicitly indicates the uninstrumented side | diff --git a/docs/sources/tempo/metrics-generator/service_graphs/enable-service-graphs.md b/docs/sources/tempo/metrics-generator/service_graphs/enable-service-graphs.md index 5a01c425d9b..079a0e4246e 100644 --- a/docs/sources/tempo/metrics-generator/service_graphs/enable-service-graphs.md +++ b/docs/sources/tempo/metrics-generator/service_graphs/enable-service-graphs.md @@ -5,32 +5,38 @@ aliases: title: Enable service graphs description: Learn how to enable service graphs weight: 200 +refs: + cardinality: + - pattern: /docs/tempo/ + destination: https://grafana.com/docs/tempo//metrics-generator/cardinality/ + - pattern: /docs/enterprise-traces/ + destination: https://grafana.com/docs/enterprise-traces//metrics-generator/cardinality/ --- ## Enable service graphs Service graphs are generated in Tempo and pushed to a metrics storage. Then, they can be represented in Grafana as a graph. -You will need those components to fully use service graphs. +You need those components to fully use service graphs. {{< admonition type="note" >}} Cardinality can pose a problem when you have lots of services. -To learn more about cardinality and how to perform a dry run of the metrics generator, see the [Cardinality documentation]({{< relref "../cardinality" >}}). -{{% /admonition %}} +To learn more about cardinality and how to perform a dry run of the metrics-generator, refer to the [Cardinality documentation](ref:cardinality). +{{< /admonition >}} ### Enable service graphs in Tempo/GET To enable service graphs in Tempo/GET, enable the metrics generator and add an overrides section which enables the `service-graphs` generator. -For more information, refer to the [configuration details]({{< relref "../../configuration#metrics-generator" >}}). +For more information, refer to the [configuration details](https://grafana.com/docs/tempo//configuration#metrics-generator). -To enable service graphs when using Grafana Agent, refer to the [Grafana Agent and service graphs documentation]({{< relref "../../configuration/grafana-agent/service-graphs" >}}). +To enable service graphs when using Grafana Alloy, refer to the [Grafana Alloy and service graphs documentation](https://grafana.com/docs/tempo//configuration/grafana-alloy/service-graphs/). ### Enable service graphs in Grafana {{< admonition type="note" >}} -Since Grafana 9.0.4, service graphs have been enabled by default. Prior to Grafana 9.0.4, service graphs were hidden -under the [feature toggle](/docs/grafana/latest/setup-grafana/configure-grafana/#feature_toggles) `tempoServiceGraph`. -{{% /admonition %}} +Service graphs are enabled by default in Grafana. Prior to Grafana 9.0.4, service graphs were hidden +under the [feature toggle](/docs/grafana/latest/setup-grafana/configure-grafana) `tempoServiceGraph`. +{{< /admonition >}} Configure a Tempo data source's service graphs by linking to the Prometheus backend where metrics are being sent: diff --git a/docs/sources/tempo/metrics-generator/service_graphs/estimate-cardinality.md b/docs/sources/tempo/metrics-generator/service_graphs/estimate-cardinality.md index 3d0d6501d07..89931329947 100644 --- a/docs/sources/tempo/metrics-generator/service_graphs/estimate-cardinality.md +++ b/docs/sources/tempo/metrics-generator/service_graphs/estimate-cardinality.md @@ -44,7 +44,7 @@ Finally, we get the following cardinality estimation: Sum: [([2 * #hb] + 2) * #hops] + [2 * #services] ``` -{{% admonition type="note" %}} +{{< admonition type="note" >}} If `enable_messaging_system_latency_histogram` configuration is set to `true`, another histogram is produced: ``` @@ -57,8 +57,8 @@ In that case, the estimation formula would be: Sum: [([3 * #hb] + 2) * #hops] + [2 * #services] ``` -{{% /admonition %}} +{{< /admonition >}} {{< admonition type="note" >}} To estimate the number of metrics, refer to the [Dry run metrics generator]({{< relref "../cardinality" >}}) documentation. -{{% /admonition %}} \ No newline at end of file +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/tempo/metrics-generator/span_metrics.md b/docs/sources/tempo/metrics-generator/span_metrics.md index 68430e3feca..88aba4a01b5 100644 --- a/docs/sources/tempo/metrics-generator/span_metrics.md +++ b/docs/sources/tempo/metrics-generator/span_metrics.md @@ -4,6 +4,12 @@ aliases: title: Span metrics description: The span metrics processor generates metrics from ingested tracing data, including request, error, and duration (RED) metrics. weight: 200 +refs: + cardinality: + - pattern: /docs/tempo/ + destination: https://grafana.com/docs/tempo//metrics-generator/cardinality/ + - pattern: /docs/enterprise-traces/ + destination: https://grafana.com/docs/enterprise-traces//metrics-generator/cardinality/ --- # Span metrics @@ -23,7 +29,7 @@ Even if you already have metrics, span metrics can provide in-depth monitoring o The generated metrics will show application level insight into your monitoring, as far as tracing gets propagated through your applications. -Last but not least, span metrics lower the entry barrier for using [exemplars](/docs/grafana/latest/basics/exemplars/). +Last but not least, span metrics lower the entry barrier for using [exemplars](https://grafana.com/docs/grafana//basics/exemplars/). An exemplar is a specific trace representative of measurement taken in a given time interval. Since traces and metrics co-exist in the metrics-generator, exemplars can be automatically added, providing additional value to these metrics. @@ -31,7 +37,7 @@ exemplars can be automatically added, providing additional value to these metric ## How to run To enable span metrics in Tempo or Grafana Enterprise Traces, enable the metrics generator and add an overrides section which enables the `span-metrics` processor. -Refer to [the configuration details]({{< relref "../configuration#metrics-generator" >}}). +Refer to [the configuration details](https://grafana.com/docs/tempo//configuration#metrics-generator). If you want to enable metrics-generator for your Grafana Cloud account, refer to the [Metrics-generator in Grafana Cloud](https://grafana.com/docs/grafana-cloud/send-data/traces/metrics-generator/) documentation. @@ -44,8 +50,8 @@ This processor mirrored the implementation from the OpenTelemetry Collector of t The OTel `spanmetricsprocessor` has since been [deprecated](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/processor/spanmetricsprocessor/v0.95.0/processor/spanmetricsprocessor/README.md) and replaced with the [span metric connector](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/processor/spanmetricsprocessor/v0.95.0/connector/spanmetricsconnector/README.md). {{< admonition type="note" >}} -To learn more about cardinality and how to perform a dry run of the metrics generator, see the [Cardinality documentation]({{< relref "./cardinality" >}}). -{{% /admonition %}} +To learn more about cardinality and how to perform a dry run of the metrics generator, see the [Cardinality documentation](ref:cardinality). +{{< /admonition >}} ### Metrics @@ -59,32 +65,32 @@ The following metrics are exported: {{< admonition type="note" >}} In Tempo 1.4 and 1.4.1, the histogram metric was called `traces_spanmetrics_duration_seconds`. This was changed later to be consistent with the metrics generated by Grafana Agent and the OpenTelemetry Collector. -{{% /admonition %}} +{{< /admonition >}} By default, the metrics processor adds the following labels to each metric: `service`, `span_name`, `span_kind`, `status_code`, `status_message`, `job`, and `instance`. - `service` - The name of the service that generated the span - `span_name` - The unique name of the span - `span_kind` - The type of span, this can be one of five values: - - `SPAN_KIND_SERVER` - The span was generated by a call from another service - - `SPAN_KIND_CLIENT` - The span made a call to another service - - `SPAN_KIND_INTERNAL` - The span does not have interaction outside of the service it was generated in - - `SPAN_KIND_PUBLISHER` - The span created data that was pushed onto a bus or message broker - - `SPAN_KIND_CONSUMER` - The span consumed data that was on a bus or messaging system + - `SPAN_KIND_SERVER` - The span was generated by a call from another service + - `SPAN_KIND_CLIENT` - The span made a call to another service + - `SPAN_KIND_INTERNAL` - The span does not have interaction outside of the service it was generated in + - `SPAN_KIND_PUBLISHER` - The span created data that was pushed onto a bus or message broker + - `SPAN_KIND_CONSUMER` - The span consumed data that was on a bus or messaging system - `status_code` - The result of the span, this can be one of three values: - - `STATUS_CODE_UNSET` - Result of the span was unset/unknown - - `STATUS_CODE_OK` - The span operation completed successfully - - `STATUS_CODE_ERROR` - The span operation completed with an error + - `STATUS_CODE_UNSET` - Result of the span was unset/unknown + - `STATUS_CODE_OK` - The span operation completed successfully + - `STATUS_CODE_ERROR` - The span operation completed with an error - `status_message` (optionally enabled) - The message that details the reason for the `status_code` label - `job` - The name of the job, a combination of namespace and service; only added if `metrics_generator.processor.span_metrics.enable_target_info: true` - `instance` - The instance ID; only added if `metrics_generator.processor.span_metrics.enable_target_info: true` -Additional user defined labels can be created using the [`dimensions` configuration option]({{< relref "../configuration#metrics-generator" >}}). -When a configured dimension collides with one of the default labels (e.g. `status_code`), the label for the respective dimension is prefixed with double underscore (i.e. `__status_code`). +Additional user defined labels can be created using the [`dimensions` configuration option](https://grafana.com/docs/tempo//configuration#metrics-generator). +When a configured dimension collides with one of the default labels (for example, `status_code`), the label for the respective dimension is prefixed with double underscore (for example, `__status_code`). -Custom labeling of dimensions is also supported using the [`dimension_mapping` configuration option]({{< relref "../configuration#metrics-generator" >}}). +Custom labeling of dimensions is also supported using the [`dimension_mapping` configuration option](https://grafana.com/docs/tempo//configuration#metrics-generator). -An optional metric called `traces_target_info` using all resource level attributes as dimensions can be enabled in the [`enable_target_info` configuration option]({{< relref "../configuration#metrics-generator" >}}). +An optional metric called `traces_target_info` using all resource level attributes as dimensions can be enabled in the [`enable_target_info` configuration option](https://grafana.com/docs/tmepo//configuration#metrics-generator). If you use a ratio-based sampler, you can use the custom sampler below to not lose metric information. However, you also need to set `metrics_generator.processor.span_metrics.span_multiplier_key` to `"X-SampleRatio"`. diff --git a/docs/sources/tempo/operations/_index.md b/docs/sources/tempo/operations/_index.md index a1b3896ebb5..1b4307f5dd2 100644 --- a/docs/sources/tempo/operations/_index.md +++ b/docs/sources/tempo/operations/_index.md @@ -3,6 +3,8 @@ title: Manage Tempo menuTitle: Manage description: Learn how to manage and tune Tempo. weight: 450 +aliases: + - ./ingester_pvcs/ # https://grafana.com/docs/tempo/next/operations/ingester_pvcs/ --- # Manage Tempo diff --git a/docs/sources/tempo/operations/architecture.md b/docs/sources/tempo/operations/architecture.md index ed1bf16c522..ec813c59392 100644 --- a/docs/sources/tempo/operations/architecture.md +++ b/docs/sources/tempo/operations/architecture.md @@ -1,33 +1,31 @@ --- title: Tempo architecture -description: A collection of documents that detail Tempo architectural decisions and operational implications. +description: Learn about Tempo architectural decisions and operational implications. aliases: - - /docs/tempo/latest/architecture/architecture - - /docs/tempo/latest/architecture - - /docs/tempo/operations/architecture -weight: 10 + - ../architecture/architecture # https://grafana.com/docs/tempo//architecture/architecture/ + - ../architecture # https://grafana.com/docs/tempo//architecture/ +weight: 100 --- # Tempo architecture -This topic provides an overview of the major components of Tempo. Refer to the [example setups]({{< relref "../getting-started/example-demo-app" >}}) -or [deployment options]({{< relref "../setup/deployment" >}}) for help deploying. +This topic provides an overview of the major components of Tempo. Refer to the [example setups](https://grafana.com/docs/tempo//getting-started/example-demo-app/) +or [deployment options](https://grafana.com/docs/tempo//setup/deployment/) for help deploying.

Tempo Architecture

- Tempo comprises of the following top-level components. ## Distributor -The distributor accepts spans in multiple formats including Jaeger, OpenTelemetry, Zipkin. It routes spans to ingesters by hashing the `traceID` and using a [distributed consistent hash ring]({{< relref "./consistent_hash_ring" >}}). +The distributor accepts spans in multiple formats including Jaeger, OpenTelemetry, Zipkin. It routes spans to ingesters by hashing the `traceID` and using a [distributed consistent hash ring](http://grafana.com/docs/tempo//operations/manage-advanced-systems/consistent_hash_ring/). The distributor uses the receiver layer from the [OpenTelemetry Collector](https://github.com/open-telemetry/opentelemetry-collector). -For best performance, it is recommended to ingest [OTel Proto](https://github.com/open-telemetry/opentelemetry-proto). For this reason -the [Grafana Agent](https://github.com/grafana/agent) uses the otlp exporter/receiver to send spans to Tempo. +For best performance, it's recommended to ingest [OTel Proto](https://github.com/open-telemetry/opentelemetry-proto). +For this reason, [Grafana Alloy](https://github.com/grafana/alloy/) uses the OTLP exporter/receiver to send spans to Tempo. ## Ingester -The [Ingester]({{< relref "../configuration#ingester" >}}) batches trace into blocks, creates bloom filters and indexes, and then flushes it all to the backend. +The [Ingester](https://grafana.com/docs/tempo//configuration/#ingester) batches trace into blocks, creates bloom filters and indexes, and then flushes it all to the backend. Blocks in the backend are generated in the following layout: ``` @@ -44,7 +42,7 @@ Blocks in the backend are generated in the following layout: The Query Frontend is responsible for sharding the search space for an incoming query. -Traces are exposed via a simple HTTP endpoint: +A simple HTTP endpoint exposes traces: `GET /api/traces/` Internally, the Query Frontend splits the blockID space into a configurable number of shards and queues these requests. @@ -52,12 +50,11 @@ Queriers connect to the Query Frontend via a streaming gRPC connection to proces ## Querier -The querier is responsible for finding the requested trace id in either the ingesters or the backend storage. Depending on -parameters it will query both the ingesters and pull bloom/indexes from the backend to search blocks in object -storage. +The querier finds the requested trace ID in either the ingesters or the backend storage. Depending on +parameters, the querier queries the ingesters for recently ingested traces and pulls the bloom filters and indexes from the backend storage to efficiently locate the traces within object storage blocks. The querier exposes an HTTP endpoint at: -`GET /querier/api/traces/`, but its not expected to be used directly. +`GET /querier/api/traces/`, but it's not intended for direct use. Queries should be sent to the Query Frontend. @@ -67,4 +64,4 @@ The Compactors stream blocks to and from the backend storage to reduce the total ## Metrics generator -This is an **optional** component that derives metrics from ingested traces and writes them to a metrics storage. Refer to the [metrics-generator documentation]({{< relref "../metrics-generator" >}}) to learn more. +This is an **optional** component that derives metrics from ingested traces and writes them to a metrics storage. Refer to the [metrics-generator documentation](https://grafana.com/docs/tempo//metrics-generator/) to learn more. diff --git a/docs/sources/tempo/operations/backend_search.md b/docs/sources/tempo/operations/backend_search.md index 823b7a2b45c..f9a6be62aa6 100644 --- a/docs/sources/tempo/operations/backend_search.md +++ b/docs/sources/tempo/operations/backend_search.md @@ -2,7 +2,10 @@ title: Tune search performance menutitle: Tune search performance description: How to tune Tempo to improve search performance. -weight: 90 +weight: 500 +aliases: + - ./serverless_aws/ # https://grafana.com/docs/tempo/next/operations/serverless_aws/ + - ./serverless_gcp/ # https://grafana.com/docs/tempo/next/operations/serverless_gcp/ --- # Tune search performance @@ -111,34 +114,6 @@ querier: max_concurrent_queries: 20 ``` -With serverless technologies: - -{{< admonition type="caution" >}} -The Tempo serverless feature is now deprecated and will be removed in an upcoming release. -{{< /admonition >}} - -{{< admonition type="note" >}} -Serverless can be a nice way to reduce cost by using it as spare query capacity. -However, serverless tends to have higher variance then simply allowing the queriers to perform the searches themselves. -{{< /admonition >}} - -```yaml -querier: - - search: - # A list of endpoints to query. Load will be spread evenly across - # these multiple serverless functions. - external_endpoints: - - https:// - - # If set to a non-zero value a second request will be issued at the provided duration. Recommended to - # be set to p99 of search requests to reduce long tail latency. - external_hedge_requests_at: 8s - - # The maximum number of requests to execute when hedging. Requires hedge_requests_at to be set. - external_hedge_requests_up_to: 2 -``` - ### Query-frontend [Query frontend]({{< relref "../configuration#query-frontend" >}}) lists all configuration @@ -174,55 +149,13 @@ query_frontend: target_bytes_per_job: 50_000_000 ``` -### Serverless environment - -{{< admonition type="caution" >}} -The Tempo serverless feature is now deprecated and will be removed in an upcoming release. -{{< /admonition >}} - -Serverless isn't required, but with larger loads, serverless can be used to reduce costs. -Tempo has support for Google Cloud Run and AWS Lambda. -In both cases, you can use the following -settings to configure Tempo to use a serverless environment: - -```yaml -querier: - search: - # A list of external endpoints that the querier will use to offload backend search requests. They must - # take and return the same value as /api/search endpoint on the querier. This is intended to be - # used with serverless technologies for massive parallelization of the search path. - # The default value of "" disables this feature. - [external_endpoints: | default = ] - - # If external_endpoints is set then the querier will primarily act as a proxy for whatever serverless backend - # you have configured. This setting allows the operator to have the querier prefer itself for a configurable - # number of subqueries. In the default case of 2 the querier will process up to 2 search requests subqueries before starting - # to reach out to external_endpoints. - # Setting this to 0 will disable this feature and the querier will proxy all search subqueries to external_endpoints. - [prefer_self: | default = 2 ] - - # If set to a non-zero value a second request will be issued at the provided duration. Recommended to - # be set to p99 of external search requests to reduce long tail latency. - # (default: 4s) - [external_hedge_requests_at: ] - - # The maximum number of requests to execute when hedging. Requires hedge_requests_at to be set. - # (default: 3) - [external_hedge_requests_up_to: ] -``` - -For cloud-specific details: - -- [AWS Lambda]({{< relref "./serverless_aws" >}}) -- [Google Cloud Run]({{< relref "./serverless_gcp" >}}) - ## Settings that are safe to increase without major impact. Scaling up queriers is a safe way to add more query capacity. At Grafana Labs, we prefer to scale queries horizontally by adding more replicas. If you see out of memory (OOM) errors, it might be worth scaling the queriers vertically. -We recommend running at least two replicates of query-frontend. These replicas should be scaled vertically instead of horizontally. +We recommend running at least two replicates of query-frontend. These replicas should be scaled vertically instead of horizontally. If you need to scale, scale query-frontends vertically by adding more CPU and RAM. Currently, query-frontends aren’t scaled horizontally, but this might change in the future. @@ -334,21 +267,6 @@ This option controls the upper limit on the size of a job, and can be used as a * In testing at Grafana Labs, 100MB to 200MB is a good range for this configuration, and works across different sizes of clusters. * We recommend keeping this fixed within the recommended range. -### `querier.search.prefer_self` parameter - -{{< admonition type="note" >}} -This configuration only applies to `tempo-serverless`. -{{< /admonition >}} - -This setting controls the number of job the querier will process before spilling over the `search_external_endpoints` (tempo-serverless). - -#### Guidelines - -* In testing at Grafana Labs, serverless suffered from cold starts problems. If your query load is predictable, serverless isn't recommended. -* Increase the value of `prefer_self` if you want to process more jobs in the querier and spill out in extreme cases. -* Setting this to a very big number is as good as turning it off because the querier tries to process all the jobs and it never spills over to serverless. -* If we set this to a low value, we spill more jobs to serverless, even when queriers have capacity to process the job, and due to cold start, query latency increases. - ### `querier.frontend_worker.parallelism` parameter Number of simultaneous queries to process per query-frontend or query-scheduler. This configuration controls the number of concurrent requests per query-frontend a querier process. diff --git a/docs/sources/tempo/operations/caching.md b/docs/sources/tempo/operations/caching.md index fc986014437..7c16e2eb79d 100644 --- a/docs/sources/tempo/operations/caching.md +++ b/docs/sources/tempo/operations/caching.md @@ -2,7 +2,7 @@ title: Improve performance with caching menuTitle: Improve performance with caching description: Learn how to improve query performance by using caching. -weight: 65 +weight: 600 --- # Improve performance with caching @@ -24,7 +24,7 @@ Refer to [Deploying Tempo]({{< relref "../setup/deployment" >}}). As a cluster grows in size, the number of instances of Tempo connecting to the cache servers also increases. By default, Memcached has a connection limit of 1024. -Memcached refuses new connections when this limit is surpassed. +Memcached refuses connections when this limit is surpassed. You can resolve this issue by increasing the connection limit of Memcached. You can use the `tempo_memcache_request_duration_seconds_count` metric to observe these errors. diff --git a/docs/sources/tempo/operations/cross_tenant_query.md b/docs/sources/tempo/operations/cross_tenant_query.md deleted file mode 100644 index e4946d978fa..00000000000 --- a/docs/sources/tempo/operations/cross_tenant_query.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: Cross-tenant query federation -menuTitle: Cross-tenant query -description: Cross-tenant query federation -weight: 70 -aliases: -- /docs/tempo/operations/cross-tenant-query ---- - - -# Cross-tenant query federation - -{{< admonition type="note" >}} -You need to enable `multitenancy_enabled: true` in the cluster for multi-tenant querying to work. -Refer to [Enable multi-tenancy](/docs/tempo/latest/operations/multitenancy/) for more details and implications of `multitenancy_enabled: true`. -{{% /admonition %}} - -Tempo supports multi-tenant queries for search, search-tags, and trace-by-ID search operations. - -To perform multi-tenant queries, send tenant IDs separated by a `|` character in the `X-Scope-OrgID` header, for example, `foo|bar`. - -By default, cross-tenant query is enabled and can be controlled using `multi_tenant_queries_enabled` configuration setting. - -```yaml -query_frontend: - multi_tenant_queries_enabled: true -``` - -For more information on configuration options, refer to [Enable multitenancy](https://grafana.com/docs/tempo/latest/operations/multitenancy/). - -## TraceQL queries - -Queries performed using the cross-tenant configured data source, in either **Explore** or inside of dashboards, -are performed across all the tenants that you specified in the **X-Scope-OrgID** header. - -TraceQL queries that compare multiple spansets may not correctly return all traces in a cross-tenant query. For instance, - -``` -{ span.attr1 = "bar" } && { span.attr2 = "foo" } -``` - -TraceQL evaluates a contiguously stored trace. -If these two conditions are satisfied in separate tenants, then Tempo doesn't correctly return the trace. diff --git a/docs/sources/tempo/operations/dedicated_columns.md b/docs/sources/tempo/operations/dedicated_columns.md index 27fcdc19340..885ba5bf7fd 100644 --- a/docs/sources/tempo/operations/dedicated_columns.md +++ b/docs/sources/tempo/operations/dedicated_columns.md @@ -1,7 +1,7 @@ --- title: Dedicated attribute columns description: Learn how to use dedicated attribute columns to improve query performance. -weight: 42 +weight: 400 --- # Dedicated attribute columns @@ -60,7 +60,7 @@ Similarly, default overrides take precedence over storage block configuration. ## Usage Dedicated attribute columns are limited to 10 span attributes and 10 resource attributes with string values. -As a rule of thumb, good candidates for dedicated attribute columns are attributes that contribute the most to the block size, +As a rule of thumb, good candidates for dedicated attribute columns are attributes that contribute the most to the block size, even if they are not frequently queried. Reducing the generic attribute key-value list size significantly improves query performance. diff --git a/docs/sources/tempo/operations/generic_forwarding.md b/docs/sources/tempo/operations/generic_forwarding.md deleted file mode 100644 index b8596723f5c..00000000000 --- a/docs/sources/tempo/operations/generic_forwarding.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: Generic forwarding -menuTitle: Generic forwarding -description: Asynchronous replication of ingested traces -weight: 130 ---- - -# Generic forwarding - -Generic forwarding allows asynchronous replication of ingested traces. The distributor writes received spans to both the ingester and defined endpoints, if enabled. This feature works in a "best-effort" manner, meaning that no retries happen if an error occurs during replication. - -{{< admonition type="warning" >}} -Generic forwarding does not work retroactively. Once enabled, the distributor only replicates freshly ingested spans. -{{% /admonition %}} - - -## Configure generic forwarding - -Enabling generic forwarding requires the configuration of the `distributor` and `overrides`. - -1. First, define a list of forwarders in the `distributor` section. Each forwarder must specify a unique `name`, supported `backend`, and backend-specific configuration. - -1. Second, reference these forwarders in the `overrides` section. This allows for fine-grained control over forwarding and makes it possible to enable this feature globally or on a per-tenant basis. - -For a detailed view of all the config options for the generic forwarding feature, please refer to [distributor]({{< relref "../configuration#distributor" >}}) and [overrides]({{< relref "../configuration#overrides" >}}) configuration pages. diff --git a/docs/sources/tempo/operations/ingester_pvcs.md b/docs/sources/tempo/operations/ingester_pvcs.md deleted file mode 100644 index f0a02e4512e..00000000000 --- a/docs/sources/tempo/operations/ingester_pvcs.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: Resize ingester persistent volume operations -menuTitle: Resize ingester PVCs -description: Learn how to resize ingester persistent volume operations. -weight: 50 ---- - -# Resize ingester persistent volume operations - -Tempo ingesters make heavy use of local disks to store write-ahead logs and blocks before being flushed to the backend (GCS, S3, etc.). -It is important to monitor the free volume space as full disks can lead to data loss and other errors. -The amount of disk space available affects how much volume a Tempo ingester can process and the length of time an outage to the backend can be tolerated. - -Therefore it may be necessary to increase the disk space for ingesters as usage increases. - -We recommend using SSDs for local storage. - -## Increase PVC size - -When deployed as a StatefulSet with Persistent Volume Claims (PVC), some manual steps are required. -The following configuration has worked successfully on GKE with GCS: - -1. Edit the persistent volume claim (pvc) for each ingester to the new size. - - ```bash - kubectl patch pvc -n -p '{"spec": {"resources": {"requests": {"storage": "'15Gi'"}}}}' - ``` - - Check all disks have been upgraded by running: - - `kubectl get pvc -n ` - - A restart is not necessary as the pods will automatically detect the increased disk space. - -1. Delete the StatefulSet but leave the pods running: - - `kubectl delete sts --cascade=false -n ingester` - -1. Edit and recreate the Statefulset with the new size. This covers new pods. There are many ways to deploy Tempo to Kubernetes, these are examples for the popular ones: - * Raw YAML: `kubectl apply -f .yaml` - * Helm: `helm upgrade ... tempo ...` - * Tanka: `tk apply ...` diff --git a/docs/sources/tempo/operations/manage-advanced-systems/_index.md b/docs/sources/tempo/operations/manage-advanced-systems/_index.md new file mode 100644 index 00000000000..0b87a42fc2d --- /dev/null +++ b/docs/sources/tempo/operations/manage-advanced-systems/_index.md @@ -0,0 +1,12 @@ +--- +title: Manage advanced systems +menuTitle: Manage advanced systems +description: Learn how to manage and tune complex installations. +weight: 700 +--- + +# Manage advanced systems + +This section provides resources for managing and tuning advanced Tempo configurations. + +{{< section >}} diff --git a/docs/sources/tempo/operations/consistent_hash_ring.md b/docs/sources/tempo/operations/manage-advanced-systems/consistent_hash_ring.md similarity index 87% rename from docs/sources/tempo/operations/consistent_hash_ring.md rename to docs/sources/tempo/operations/manage-advanced-systems/consistent_hash_ring.md index d2d9d303e46..fecb8556d94 100644 --- a/docs/sources/tempo/operations/consistent_hash_ring.md +++ b/docs/sources/tempo/operations/manage-advanced-systems/consistent_hash_ring.md @@ -2,7 +2,9 @@ title: Tune the consistent hash rings menuTitle: Tune the consistent hash rings description: Optimize the consistent hash rings for Tempo. -weight: 40 +weight: 500 +aliases: + - ../consistent_hash_ring/ # https://grafana.com/docs/tempo//operations/consistent_hash_ring/ --- # Tune the consistent hash rings @@ -22,7 +24,7 @@ Each hash ring exists for a distinct reason. Unless you are running with limits, this ring does not impact Tempo operation. -This ring is only used when `global` rate limits are used. The distributors use it to count the other active distributors. Incoming traffic is assumed to be evenly spread across all distributors and (global_rate_limit / # of distributors) is used to rate limit locally. +This ring is only used when `global` rate limits are used. The distributors use it to count the other active distributors. Incoming traffic is assumed to be evenly spread across all distributors and `(global_rate_limit / # of distributors)` is used to rate limit locally. ## Ingester @@ -52,7 +54,7 @@ This ring is used by the compactors to shard compaction jobs. Jobs are hashed in ## Interacting with the rings Web pages are available at the following endpoints. They show every ring member, their tokens and includes the ability to "Forget" a ring member. "Forgetting" is useful when a -ring member leaves the ring without properly shutting down (and therefore leaves its tokens in the ring). +ring member leaves the ring without properly shutting down, and therefore leaves its tokens in the ring. ### Distributor @@ -88,4 +90,4 @@ Unhealthy compactors will allow the blocklist to grow significantly. If the comp ## Configuring the rings -Ring/Lifecycler configuration control how a component interacts with the ring. Refer to the [configuration]({{< relref "../configuration" >}}) topic for more details. +Ring/Lifecycler configuration control how a component interacts with the ring. Refer to the [configuration](https://grafana.com/docs/tempo//configuration/) topic for more details. diff --git a/docs/sources/tempo/operations/manage-advanced-systems/cross_tenant_query.md b/docs/sources/tempo/operations/manage-advanced-systems/cross_tenant_query.md new file mode 100644 index 00000000000..45b46dbff85 --- /dev/null +++ b/docs/sources/tempo/operations/manage-advanced-systems/cross_tenant_query.md @@ -0,0 +1,43 @@ +--- +title: Cross-tenant query federation +menuTitle: Cross-tenant query +description: Cross-tenant query federation +weight: 300 +aliases: +- ../cross-tenant-query # https://grafana.com/docs/tempo//operations/cross-tenant-query/ +- ../cross_tenant_query # https://grafana.com/docs/tempo//operations/cross_tenant_query/ +--- + +# Cross-tenant query federation + +{{< admonition type="note" >}} +You need to enable `multitenancy_enabled: true` in the cluster for multi-tenant querying to work. +Refer to [Enable multi-tenancy](https://grafana.com/docs/tempo//operations/manage-advanced-systems/multitenancy/) for more details and implications of `multitenancy_enabled: true`. +{{< /admonition >}} + +Tempo supports multi-tenant queries for search, search-tags, and trace-by-ID search operations. + +To perform multi-tenant queries, send tenant IDs separated by a `|` character in the `X-Scope-OrgID` header, for example, `foo|bar`. + +By default, cross-tenant query is enabled and can be controlled using `multi_tenant_queries_enabled` configuration setting. + +```yaml +query_frontend: + multi_tenant_queries_enabled: true +``` + +For more information on configuration options, refer to [Enable multitenancy](https://grafana.com/docs/tempo//operations/manage-advanced-systems/multitenancy/). + +## TraceQL queries + +Queries performed using the cross-tenant configured data source, in either **Explore** or inside of dashboards, +are performed across all the tenants that you specified in the **X-Scope-OrgID** header. + +TraceQL queries that compare multiple spansets may not correctly return all traces in a cross-tenant query. For instance, + +``` +{ span.attr1 = "bar" } && { span.attr2 = "foo" } +``` + +TraceQL evaluates a contiguously stored trace. +If these two conditions are satisfied in separate tenants, then Tempo doesn't correctly return the trace. diff --git a/docs/sources/tempo/operations/manage-advanced-systems/generic_forwarding.md b/docs/sources/tempo/operations/manage-advanced-systems/generic_forwarding.md new file mode 100644 index 00000000000..4787b11169c --- /dev/null +++ b/docs/sources/tempo/operations/manage-advanced-systems/generic_forwarding.md @@ -0,0 +1,28 @@ +--- +title: Generic forwarding +menuTitle: Generic forwarding +description: Asynchronous replication of ingested traces +weight: 400 +aliases: + - ../generic_forwarding/ # https://grafana.com/docs/tempo//operations/consistent_hash_ring/ +--- + +# Generic forwarding + +Generic forwarding allows asynchronous replication of ingested traces. +The distributor writes received spans to both the ingester and defined endpoints, if enabled. +This feature works in a best-effort manner, meaning that no retries happen if an error occurs during replication. + +{{< admonition type="warning" >}} +Generic forwarding doesn't work retroactively. After it's enabled, the distributor only replicates freshly ingested spans. +{{< /admonition >}} + +## Configure generic forwarding + +Enabling generic forwarding requires the configuration of the `distributor` and `overrides`. + +1. Define a list of forwarders in the `distributor` section. Each forwarder must specify a unique `name`, supported `backend`, and backend-specific configuration. + +1. Reference these forwarders in the `overrides` section. This allows for fine-grained control over forwarding and makes it possible to enable this feature globally or on a per-tenant basis. + +For a detailed view of all the configuration options for the generic forwarding feature, refer to [distributor](https://grafana.com/docs/tempo//configuration/#distributor) and [overrides](https://grafana.com/docs/tempo//configuration/#overrides) configuration pages. diff --git a/docs/sources/tempo/operations/manage-advanced-systems/multitenancy.md b/docs/sources/tempo/operations/manage-advanced-systems/multitenancy.md new file mode 100644 index 00000000000..ef1292df17a --- /dev/null +++ b/docs/sources/tempo/operations/manage-advanced-systems/multitenancy.md @@ -0,0 +1,56 @@ +--- +title: Enable multi-tenancy +menuTitle: Enable multi-tenancy +weight: 100 +description: Enable multi-tenancy in Tempo using the X-Scope-OrgID header. +aliases: + - ../../configuration/multitenancy/ # https://grafana.com/docs/tempo//configuration/multitenancy/ + - ../multitenancy/ # https://grafana.com/docs/tempo//operations/multitenancy/ +--- + +# Enable multi-tenancy + +Tempo is a multi-tenant distributed tracing backend. It supports multi-tenancy through the use +of a header: `X-Scope-OrgID`. + +If you're interested in setting up multi-tenancy, consult the [multi-tenant example](https://github.com/grafana/tempo/tree/main/example/docker-compose/otel-collector-multitenant) +in the repository. This example uses the following settings to achieve multi-tenancy in Tempo. + +{{< admonition type="note" >}} +Multi-tenancy on ingestion is currently [only working](https://github.com/grafana/tempo/issues/495) with GPRC and this may never change. It's strongly recommended to use the OpenTelemetry Collector to support multi-tenancy. +{{< /admonition >}} + +## Configure multi-tenancy + +1. Configure the OTEL Collector to attach the `X-Scope-OrgID` header on push: + + ``` + exporters: + otlp: + headers: + x-scope-orgid: foo-bar-baz + ``` + +1. Configure the Tempo data source in Grafana to pass the tenant with the same header: + + ```yaml + - name: Tempo-Multitenant + jsonData: + httpHeaderName1: 'X-Scope-OrgID' + secureJsonData: + httpHeaderValue1: 'foo-bar-baz' + ``` + +1. Enable multi-tenancy on the Tempo backend by setting the following configuration value on all Tempo components: + + ```yaml + multitenancy_enabled: true + ``` + + or from the command line: + + ```yaml + --multitenancy.enabled=true + ``` + + This option forces all Tempo components to require the `X-Scope-OrgID` header. diff --git a/docs/sources/tempo/operations/manage-advanced-systems/user-configurable-overrides.md b/docs/sources/tempo/operations/manage-advanced-systems/user-configurable-overrides.md new file mode 100644 index 00000000000..aaad254690f --- /dev/null +++ b/docs/sources/tempo/operations/manage-advanced-systems/user-configurable-overrides.md @@ -0,0 +1,194 @@ +--- +title: User-configurable overrides +menuTitle: Configure Tempo overrides through the user-configurable overrides API +description: Configure Tempo overrides through the user-configurable overrides API +weight: 300 +aliases: + - ../use-configurable-overrides/ # https://grafana.com/docs/tempo//operations/user-configurable-overrides/ +--- + +# User-configurable overrides + +User-configurable overrides in Tempo let you change overrides for your tenant using an API. +Instead of modifying a file or Kubernetes `configmap`, you (and other services relying on Tempo) can use this API to modify the overrides directly. + +## Architecture + +User-configurable overrides are stored in an object store bucket managed by Tempo. + +![user-configurable-overrides-architecture.png](/media/docs/tempo/user-configurable-overrides-architecture.png) + +{{< admonition type="note" >}} +We recommend using a different bucket for overrides and traces storage, but they can share a bucket if needed. +When sharing a bucket, make sure any lifecycle rules are scoped correctly to not remove data of user-configurable overrides module. +{{< /admonition >}} + +Overrides of every tenant are stored at `/{tenant name}/overrides.json`: + +``` +overrides/ +├── 1/ +│ └── overrides.json +└── 2/ + └── overrides.json +``` + +Tempo regularly polls this bucket and keeps a copy of the limits in-memory. When requesting the overrides for a tenant, the overrides module: + +1. Checks this override is set in the user-configurable overrides, if so return that value. +2. Checks if this override is set in the runtime configuration (`configmap`), if so return that value. +3. Returns the default value. + +### Supported fields + +User-configurable overrides are designed to be a subset of the runtime overrides. Refer to [Overrides](https://grafana.com/docs/tempo//configuration/#overrides) for information about all overrides. +When you set a field in both the user-configurable overrides and the runtime overrides, the value from the user-configurable overrides takes priority. + +{{< admonition type="note" >}} +Note that `processors` is an exception. Tempo merges values from both user-configurable overrides and runtime overrides into a single list. +{{< /admonition >}} + +```yaml +[forwarders: ] + +metrics_generator: + + [processors: ] + [collection_interval: ] + [disable_collection: | default = false] + + processor: + + service_graphs: + [histogram_buckets: ] + [dimensions: ] + [peer_attributes: ] + [enable_client_server_prefix: ] + [enable_messaging_system_latency_histogram: ] + + span_metrics: + [histogram_buckets: ] + [dimensions: ] + [intrinsic_dimensions: ] + [filter_policies: [ + [ + include/exclude: + match_type: # options: strict, regexp + attributes: + - key: + value: + ] + ] + [enable_target_info: ] + [target_info_excluded_dimensions: ] +``` + +## API + +All API requests are handled on the `/api/overrides` endpoint. The module supports `GET`, `POST`, `PATCH`, and `DELETE` requests. + +This endpoint is tenant-specific. If Tempo is run in multitenant mode, all requests should have an appropriate `X-Scope-OrgID` header. + +If the tenant is run in distributed mode, only the query-frontend will accept API requests. + +### Operations + +#### GET /api/overrides + +Returns the current overrides and it's version. + +Query-parameters: +- `scope`: whether to return overrides from the API only `api` or merge it with the runtime overrides `merged`. Defaults to `api`. + +Example: + +```shell +$ curl -X GET -v -H "X-Scope-OrgID: 3" http://localhost:3100/tempo/api/overrides\?scope=merged` +``` + +#### POST /api/overrides + +Update the overrides with the given payload. Note this overwrites any existing overrides. + +Example: + +```shell +curl -X POST -v -H "X-Scope-OrgID: 3" -H "If-Match: 1697726795401423" http://localhost:3100/api/overrides --data "{}" +``` + +#### PATCH /api/overrides + +Update the existing overrides by patching it with the payload. +It follows the JSON merge patch protocol ([RFC 7386](https://datatracker.ietf.org/doc/html/rfc7386)). + +Example: + +```shell +curl -X PUT -v -H "X-Scope-OrgID: 3" -H "If-Match: 1697726795401423" http://localhost:3100/api/overrides --data "{\"forwarders\":null}" +``` + +#### DELETE /api/overrides + +Delete the existing overrides. + +Example: + +```shell +curl -X DELETE -H "X-Scope-OrgID: 3" -H "If-Match: 1697726795401423" http://localhost:3100/api/overrides +``` + +### Versioning + +To handle concurrent read and write operations, the backend stores the overrides with a version. +Whenever the overrides are returned, the response has an Etag header with the current version. + +```shell +$ curl -v http://localhost:3100/api/overrides +... +< HTTP/1.1 200 OK +< Content-Type: application/json +< Etag: 1697726795401423 +< Date: Wed, 07 Feb 2024 17:49:04 GMT +< Content-Length: 118 +... +``` + +Requests that modify or delete overrides need to pass the current version using the `If-Match` header: + +```shell +$ curl -X POST -H "If-Match: 1697726795401423" http://localhost:3100/api/overrides --data "..." +``` + +This example uses overrides in the `overrides.json` file with the location in `pwd`: + +```shell +$ curl -X POST -H "X-Scope-OrgID: 3" -H "If-Match: 1697726795401423" http://localhost:3100/api/overrides --data @overrides.json +``` + +If the version doesn't match the version in the backend, the request is rejected with HTTP error 412. + +### Conflicting runtime overrides check + +Overrides set through the user-configurable overrides take priority over runtime overrides. +This can lead to misleading scenarios because a value set in the runtime overrides is not actively being used. + +To warn users about preexisting runtime overrides, there is an optional check for conflicting runtime overrides. +If enabled requests are rejected if: + +1. There are no user-configurable overrides yet for this tenant. +2. There are runtime overrides set that contain overrides present in the user-configurable overrides. + +The check can be enabled in the configuration: + +```yaml +overrides: + user_configurable_overrides: + api: + check_for_conflicting_runtime_overrides: true +``` + +You can bypass this check by setting the query parameter `skip-conflicting-overrides-check=true`: + +```shell +$ curl -X POST -H "If-Match: 1697726795401423" http://localhost:3100/api/overrides?skip-conflicting-overrides-check=true --data "..." +``` diff --git a/docs/sources/tempo/operations/manage-advanced-systems/zone-aware-ingesters.md b/docs/sources/tempo/operations/manage-advanced-systems/zone-aware-ingesters.md new file mode 100644 index 00000000000..7c737c45717 --- /dev/null +++ b/docs/sources/tempo/operations/manage-advanced-systems/zone-aware-ingesters.md @@ -0,0 +1,28 @@ +--- +title: Zone-aware replication for ingesters +menuTitle: Zone-aware ingesters +description: Configure zone-aware ingesters for Tempo +weight: 600 +aliases: + - ../zone-aware-ingesters/ # https://grafana.com/docs/tempo//operations/zone-aware-ingesters/ +--- + +# Zone-aware replication for ingesters + +Zone awareness is a feature that ensures data is replicated across failure domains (which we refer to as “zones”) to provide greater reliability. +A failure domain is whatever you define it to be, but commonly may be an availability zone, data center, or server rack. + +When zone awareness is enabled for ingesters, incoming trace data is guaranteed to be replicated to ingesters in different zones. +This allows the system to withstand the loss of one or more zones (depending on the replication factor). + +Example: + +```yaml +# use the following fields in _config field of JSonnet config, to enable zone-aware ingesters. + multi_zone_ingester_enabled: false, + multi_zone_ingester_migration_enabled: false, + multi_zone_ingester_replicas: 0, + multi_zone_ingester_max_unavailable: 25, +``` + +For an configuration, refer to the [JSonnet microservices operations example](https://github.com/grafana/tempo/blob/main/operations/jsonnet/microservices/README.md) \ No newline at end of file diff --git a/docs/sources/tempo/operations/monitor/_index.md b/docs/sources/tempo/operations/monitor/_index.md index a2c68e7b03d..0dff065f3e4 100644 --- a/docs/sources/tempo/operations/monitor/_index.md +++ b/docs/sources/tempo/operations/monitor/_index.md @@ -2,7 +2,7 @@ title: Monitor Tempo menuTitle: Monitor Tempo description: Use polling, alerts, and dashboards to monitor Tempo in production. -weight: 20 +weight: 200 aliases: - ./monitoring ## https://grafana.com/docs/tempo/latest/operations/monitoring/ --- diff --git a/docs/sources/tempo/operations/monitor/polling.md b/docs/sources/tempo/operations/monitor/polling.md index ff74771e084..351dccbfdb8 100644 --- a/docs/sources/tempo/operations/monitor/polling.md +++ b/docs/sources/tempo/operations/monitor/polling.md @@ -28,7 +28,7 @@ During normal operation, it will stale by at most twice the configured `blocklis {{< admonition type="note" >}} For details about configuring polling, refer to [polling configuration]({{< relref "../../configuration/polling" >}}). -{{% /admonition %}} +{{< /admonition >}} ## Monitor polling with dashboards and alerts diff --git a/docs/sources/tempo/operations/monitor/set-up-monitoring.md b/docs/sources/tempo/operations/monitor/set-up-monitoring.md index 06e595643c6..a5b99c6f972 100644 --- a/docs/sources/tempo/operations/monitor/set-up-monitoring.md +++ b/docs/sources/tempo/operations/monitor/set-up-monitoring.md @@ -24,7 +24,7 @@ Update any instructions in this document for your own deployment. If you use the [Kubernetes integration Grafana Alloy Helm chart](https://grafana.com/docs/alloy//set-up/install/kubernetes/), you can use the Kubernetes scrape annotations to automatically scrape Tempo. You’ll need to add the labels to all of the deployed components. -{{% /admonition %}} +{{< /admonition >}} ## Before you begin @@ -51,7 +51,7 @@ In addition, the test app instructions explain how to configure a Tempo data sou {{< admonition type="note" >}} If you already have a Tempo environment, then there is no need to create a test app. This guide assumes that the Tempo and Grafana Alloy configurations are the same as or based on [these instructions to create a test application](https://grafana.com/docs/tempo/latest/setup/set-up-test-app/), as you'll augment those configurations to enable Tempo metrics monitoring. -{{% /admonition %}} +{{< /admonition >}} In these examples, Tempo is installed in a namespace called `tempo`. Change this namespace name in the examples as needed to fit your own environment. @@ -140,8 +140,8 @@ rule { This lets you create a configuration that scrapes metrics from Tempo components and writes the data to a Mimir instance of your choice. This example provides a Helm `values.yaml` file that you can use for [Alloy deployed on Kubernetes](https://grafana.com/docs/alloy//configure/kubernetes/). -The file configures the options Alloy uses to scrap a running instance of Tempo. -Refer to the comments in the example for details. +The file configures the options Alloy uses to scrap a running instance of Tempo. +Refer to the comments in the example for details. ```yaml alloy: @@ -265,7 +265,7 @@ This contains a compiled version of the alert and recording rules, as well as th If you want to change any of the mixins, make your updates in the `operations/tempo-mixin` directory. Use the instructions in the [README](https://github.com/grafana/tempo/tree/main/operations/tempo-mixin) in that directory to regenerate the files. The mixins are generated in the `operations/tempo-mixin-compiled` directory. -{{% /admonition %}} +{{< /admonition >}} ### Import the dashboards to Grafana @@ -276,7 +276,7 @@ Refer to [Import a dashboard ](https://grafana.com/docs/grafana/latest/dashboard Install all six dashboards. You can only import one dashboard at a time. Create a new folder in the Dashboards area, for example “Tempo Monitoring”, as an easy location to save the imported dashboards. -{{% /admonition %}} +{{< /admonition >}} To create a folder: diff --git a/docs/sources/tempo/operations/multitenancy.md b/docs/sources/tempo/operations/multitenancy.md deleted file mode 100644 index 08f9d032374..00000000000 --- a/docs/sources/tempo/operations/multitenancy.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -title: Enable multi-tenancy -menuTitle: Enable multi-tenancy -weight: 60 -description: Enable multi-tenancy in Tempo using the X-Scope-OrgID header. -aliases: -- /docs/tempo/configuratioon/multitenancy -- /docs/tempo/operations/multitenancy ---- - -# Enable multi-tenancy - -Tempo is a multi-tenant distributed tracing backend. It supports multi-tenancy through the use -of a header: `X-Scope-OrgID`. - -If you're interested in setting up multi-tenancy, consult the [multi-tenant example](https://github.com/grafana/tempo/tree/main/example/docker-compose/otel-collector-multitenant) -in the repository. This example uses the following settings to achieve multi-tenancy in Tempo. - -{{< admonition type="note" >}} -Multi-tenancy on ingestion is currently [only working](https://github.com/grafana/tempo/issues/495) with GPRC and this may never change. It's strongly recommended to use the OpenTelemetry Collector to support multi-tenancy. -{{% /admonition %}} - -## Configure multi-tenancy - -1. Configure the OTEL Collector to attach the `X-Scope-OrgID` header on push: - - ``` - exporters: - otlp: - headers: - x-scope-orgid: foo-bar-baz - ``` - -1. Configure the Tempo data source in Grafana to pass the tenant with the same header: - - ``` - - name: Tempo-Multitenant - jsonData: - httpHeaderName1: 'X-Scope-OrgID' - secureJsonData: - httpHeaderValue1: 'foo-bar-baz' - ``` - -1. Enable multi-tenancy on the Tempo backend by setting the following configuration value on all Tempo components: - - ``` - multitenancy_enabled: true - ``` - - or from the command line: - - ``` - --multitenancy.enabled=true - ``` - - This option forces all Tempo components to require the `X-Scope-OrgID` header. - - - - diff --git a/docs/sources/tempo/operations/schema.md b/docs/sources/tempo/operations/schema.md index ca3350b5fad..1ac029f9981 100644 --- a/docs/sources/tempo/operations/schema.md +++ b/docs/sources/tempo/operations/schema.md @@ -2,13 +2,18 @@ title: Apache Parquet schema menuTitle: Parquet schema description: This document describes the schema used with the Parquet block format. -weight: 75 +weight: 900 aliases: - /docs/tempo/parquet/schema --- # Apache Parquet schema + + + + + Starting with Tempo 2.0, Apache Parquet is used as the default column-formatted block format. Refer to the [Parquet configuration options]({{< relref "../configuration/parquet.md" >}}) for more information. @@ -51,6 +56,8 @@ The table below uses these abbreviations: - `rs` - resource spans - `ss` - scope spans + + | | | | |:----|:----|:----| |Name|Type|Description| @@ -118,6 +125,8 @@ The table below uses these abbreviations: |rs.ss.Spans.Links|byte array|Protocol-buffer encoded span links if present, else null.| |rs.ss.Spans.TraceState|string|The span's TraceState value if present, else empty string.https://opentelemetry.io/docs/reference/specification/trace/api/#tracestate| + + To increase the readability table omits the groups `list.element` that are added for nested list types in Parquet. ### Block Schema display in Parquet Message format @@ -261,7 +270,7 @@ For speed and ease-of-use, we are projecting several values to columns at the tr ## Well-known attributes Projecting attributes to their own columns has benefits for search speed and size. -Therefore, we are taking an opinionated approach and store some well-known attributes to their own dedicated columns. +Therefore, we are taking an opinionated approach and store some well-known attributes to their own dedicated columns. All other attributes are stored in the generic key/value maps and are still searchable, but not as quickly. We chose these attributes based on the [OTEL semantic-conventions](https://github.com/open-telemetry/semantic-conventions/tree/main) and what we commonly use ourselves (scratching our own itch), but we think they will be useful to most workloads. @@ -317,9 +326,14 @@ Parquet has robust support for many compression algorithms and data encodings. W 1. Snappy Compression - Enable on all columns 1. Dictionary encoding - Enable on all string columns (including byte array ParentSpanID). Most strings are very repetitive so this works well to optimize storage size. However we can greatly speed up search by inspecting the dictionary first and eliminating pages with no matches. -1. Time and duration unix nanos - Delta encoding +1. Time and duration UNIX nanos - Delta encoding 1. Rarely used columns such as `DroppedAttributesCount` - These columns are usually all zeroes, RLE works well. ### Bloom filters Parquet has native support for bloom filters. However, Tempo does not use them at this time. Tempo already has sophisticated support for sharding and caching bloom filters. + + + + + \ No newline at end of file diff --git a/docs/sources/tempo/operations/server_side_metrics.md b/docs/sources/tempo/operations/server_side_metrics.md deleted file mode 100644 index 4ffd7c9e79f..00000000000 --- a/docs/sources/tempo/operations/server_side_metrics.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: Server-side metrics architecture -menuTitle: Server-side metrics architecture -description: Server-side metrics is a feature that derive metrics from ingested traces. -weight: 15 ---- - -# Server-side metrics architecture - -Server-side metrics is a feature that derive metrics from ingested traces. - -To generate metrics, it uses an additional component: the [metrics-generator]({{< relref "../metrics-generator" >}}). -If present, the distributor will write received spans to both the ingester and the metrics-generator. -The metrics-generator processes spans and writes metrics to a Prometheus datasource using the Prometheus remote write protocol. - -## Architecture - -Generating and writing metrics introduces a whole new domain to Tempo unlike any other functionality thus far. -For this reason, a component, the metrics-generator, is dedicated to working with metrics. -This results in a clean division of responsibility and limits the blast radius from a metrics processors or the Prometheus remote write exporter blowing up. - - -``` - │ - │ - Ingress - │ - ▼ - ┌──────────────────────┐ - │ │ - │ Distributor │ - │ │ - └──────────────────────┘ - 2│ │1 - │ │ - ┌──────────────────┘ └────────┐ - │ │ - ▼ ▼ -┌ ─ ─ ─ ─ ─ ─ ─ ─ ┏━━━━━━━━━━━━━━━━━━━━━━┓ ┌──────────────────────┐ - │ ┃ ┃ │ │ -│ Prometheus ◀────Prometheus ────┃ Metrics-generator ┃ │ Ingester │◀───Queries──── - │ Remote Write ┃ ┃ │ │ -└ ─ ─ ─ ─ ─ ─ ─ ─ ┗━━━━━━━━━━━━━━━━━━━━━━┛ └──────────────────────┘ - │ - │ - │ - ▼ - ┌─────────────────┐ - │ │ - │ Backend │ - │ │ - └─────────────────┘ -``` - -## Configuration - -For a detailed view of all the config options for the metrics generator, visit [its config page]({{< relref "../configuration#metrics-generator" >}}). diff --git a/docs/sources/tempo/operations/serverless_aws.md b/docs/sources/tempo/operations/serverless_aws.md deleted file mode 100644 index 0167473b8bd..00000000000 --- a/docs/sources/tempo/operations/serverless_aws.md +++ /dev/null @@ -1,90 +0,0 @@ ---- -title: Search with AWS Lambda -description: Learn how to set up AWS Lambda for serverless backend search. -alias: -- /docs/tempo/latest/operations/backend_search/serverless_aws/ -- /docs/tempo/latest/operations/serverless_aws/ -weight: 95 ---- - -# Search with AWS Lambda - -{{< admonition type="caution" >}} -The Tempo serverless feature is now deprecated and will be removed in an upcoming release. -{{< /admonition >}} - -This document explains how to set up AWS Lambda for serverless backend search. -Read [improve search performance]({{< relref "./backend_search" >}}) for more guidance on configuration options for backend search. - -1. Build the code package: - - ```bash - cd ./cmd/tempo-serverless && make build-lambda-zip - ``` - - This will create a ZIP file containing the binary required for - the function. The file name will be of the form: `./lambda/tempo--.zip`. - Here is an example of that name: - - ```bash - ls lambda/*.zip - lambda/tempo-serverless-backend-search-297172a.zip - ``` - -1. Provision an S3 bucket. - -1. Copy the ZIP file into your bucket. - - ``` - aws s3 cp lambda/tempo-serverless-backend-search-297172a.zip gs:// - ``` - -1. Provision the Lambda. For a Lambda function to be invoked via HTTP, we also need to create an - ALB and some other resources. This example uses Terraform and only includes the function definition. - Additionally, you will need a VPC, security groups, IAM roles, an ALB, target groups, etc., but that is - beyond the scope of this guide. - - ``` - locals { - // this can be increased if you would like to use multiple functions - count = 1 - } - - resource "aws_lambda_function" "lambda" { - count = local.count - - function_name = "${local.name}-${count.index}" - description = "${local.name}-${count.index}" - role = - handler = "main" - runtime = "go1.x" - timeout = "60" - s3_key = "tempo-serverless-backend-search-297172a.zip" - s3_bucket = - memory_size = 1769 # 1 vcpu - - vpc_config { - subnet_ids = - security_group_ids = [] - } - - environment { - variables = { - "TEMPO_S3_BUCKET" = "" - "TEMPO_BACKEND" = "s3" - "TEMPO_S3_HEDGE_REQUESTS_AT" = "400ms" - "TEMPO_S3_HEDGE_REQUESTS_UP_TO" = "2" - "TEMPO_S3_ENDPOINT" = "s3.dualstack.us-east-2.amazonaws.com" - } - } - } - ``` - -1. Add the hostname of the newly-created ALB to the querier config: - - ``` - querier: - search: - external_endpoints: - - http:// - ``` diff --git a/docs/sources/tempo/operations/serverless_gcp.md b/docs/sources/tempo/operations/serverless_gcp.md deleted file mode 100644 index 6f503b47112..00000000000 --- a/docs/sources/tempo/operations/serverless_gcp.md +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: Search with Google Cloud Run -description: Learn how to set up Google Cloud Run for serverless backend search. -weight: 93 -alias: -- /docs/tempo/latest/operations/backend_search/serverless_gcp/ -- /docs/tempo/latest/operations/serverless_gcp/ ---- - -# Search with Google Cloud Run - -{{< admonition type="caution" >}} -The Tempo serverless feature is now deprecated and will be removed in an upcoming release. -{{< /admonition >}} - -This document walks you through setting up a Google Cloud Run for serverless backend search. -For more guidance on configuration options for full backend search [check here]({{< relref "./backend_search" >}}). - -1. Build the docker image: - - ```bash - cd ./cmd/tempo-serverless && make build-docker-gcr - ``` - - This will create the Docker container image to be deployed to Google Cloud Run. - The docker image will be named: `tempo-serverless:latest` and `tempo-serverless:-`. - Here is an example of that name: - - ```bash - $ docker images | grep tempo-serverless - tempo-serverless cloud-run-3be4efa 146c9d9fa63c 58 seconds ago 47.9MB - tempo-serverless latest 146c9d9fa63c 58 seconds ago 47.9MB - ``` - -1. Push the image to a Google Container Registry repo. - -1. Provision the Google Cloud Run service. This example uses Terraform. Configuration values - should be adjusted to meet the needs of your installation. - - ``` - locals { - // this can be increased if you would like to use multiple functions - count = 1 - } - - resource "google_cloud_run_service" "run" { - count = local.count - - name = "" - location = "" - - metadata { - annotations = { - "run.googleapis.com/ingress" = "internal", # this annotation can be used to limit connectivity to the service - } - } - - template { - metadata { - annotations = { - "autoscaling.knative.dev/minScale" = "1", - "autoscaling.knative.dev/maxScale" = "1000", - "autoscaling.knative.dev/panic-threshold-percentage" = "110.0", # default 200.0. how aggressively to go into panic mode and start scaling heavily - "autoscaling.knative.dev/window" = "10s", # default 60s. window over which to average metrics to make scaling decisions - } - } - spec { - container_concurrency = 4 - containers { - image = "" - resources { - limits = { - cpu = "2" - memory = "1Gi" - } - } - env { - name = "TEMPO_GCS_BUCKET_NAME" - value = "" - } - env { - name = "TEMPO_BACKEND" - value = "gcs" - } - env { - name = "TEMPO_GCS_HEDGE_REQUESTS_AT" - value = "400ms" - } - env { - name = "TEMPO_GCS_HEDGE_REQUESTS_UP_TO" - value = "2" - } - env { - name = "GOGC" - value = "400" - } - } - } - } - - traffic { - percent = 100 - latest_revision = true - } - } - ``` - -1. Add the newly-created cloud run service as external endpoints in your querier -configuration. The endpoint can be retrieved from the **Details** tab in Google Cloud Run: - - ``` - querier: - search: - external_endpoints: - - - ``` diff --git a/docs/sources/tempo/operations/tempo_cli.md b/docs/sources/tempo/operations/tempo_cli.md index 0c3374a6b2f..6ecce4c97d7 100644 --- a/docs/sources/tempo/operations/tempo_cli.md +++ b/docs/sources/tempo/operations/tempo_cli.md @@ -2,7 +2,7 @@ title: Tempo CLI description: Guide to using tempo-cli keywords: ["tempo", "cli", "tempo-cli", "command line interface"] -weight: 70 +weight: 800 --- # Tempo CLI @@ -99,7 +99,7 @@ Options: {{< admonition type="note" >}} Streaming over HTTP requires the `stream_over_http_enabled` flag to be set. For more information, refer to [Tempo GRPC API documentation]({{< relref "../api_docs" >}}). -{{% /admonition %}} +{{< /admonition >}} ### Search tags Call the Tempo API and search attribute names. @@ -119,7 +119,7 @@ Options: {{< admonition type="note" >}} Streaming over HTTP requires the `stream_over_http_enabled` flag to be set. For more information, refer to [Tempo GRPC API documentation]({{< relref "../api_docs" >}}). -{{% /admonition %}} +{{< /admonition >}} ### Search tag values Call the Tempo API and search attribute values. @@ -140,7 +140,7 @@ Options: {{< admonition type="note" >}} Streaming over HTTP requires the `stream_over_http_enabled` flag to be set. For more information, refer to [Tempo GRPC API documentation]({{< relref "../api_docs" >}}). -{{% /admonition %}} +{{< /admonition >}} ### Metrics Call the Tempo API and generate metrics from traces using TraceQL. @@ -159,9 +159,9 @@ Options: - `--use-grpc` Use GRPC streaming - `--path-prefix ` String to prefix search paths with -{{% admonition type="note" %}} +{{< admonition type="note" >}} Streaming over HTTP requires the `stream_over_http_enabled` flag to be set. For more information, refer to [Tempo GRPC API documentation]({{< relref "../api_docs" >}}). -{{% /admonition %}} +{{< /admonition >}} ## Query blocks command @@ -172,7 +172,7 @@ tempo-cli query blocks ``` {{< admonition type="note" >}} This can be intense as it downloads every bloom filter and some percentage of indexes/trace data. - {{% /admonition %}} + {{< /admonition >}} Arguments: - `trace-id` Trace ID as a hexadecimal string. diff --git a/docs/sources/tempo/operations/traceql-metrics.md b/docs/sources/tempo/operations/traceql-metrics.md index 40c036a0557..8fb090ae994 100644 --- a/docs/sources/tempo/operations/traceql-metrics.md +++ b/docs/sources/tempo/operations/traceql-metrics.md @@ -3,7 +3,7 @@ aliases: [] title: Configure TraceQL metrics menuTitle: Configure TraceQL metrics description: Learn about configuring TraceQL metrics. -weight: 550 +weight: 400 keywords: - Prometheus - TraceQL diff --git a/docs/sources/tempo/operations/user-configurable-overrides-architecture.png b/docs/sources/tempo/operations/user-configurable-overrides-architecture.png deleted file mode 100644 index 65b46a83070..00000000000 Binary files a/docs/sources/tempo/operations/user-configurable-overrides-architecture.png and /dev/null differ diff --git a/docs/sources/tempo/operations/user-configurable-overrides.md b/docs/sources/tempo/operations/user-configurable-overrides.md deleted file mode 100644 index 949ca707cad..00000000000 --- a/docs/sources/tempo/operations/user-configurable-overrides.md +++ /dev/null @@ -1,190 +0,0 @@ ---- -title: User-configurable overrides -menuTitle: Configure Tempo overrides through the user-configurable overrides API -description: Configure Tempo overrides through the user-configurable overrides API -weight: 90 ---- - -# User-configurable overrides - -User-configurable overrides in Tempo let you change overrides for your tenant using an API. -Instead of modifying a file or Kubernetes configmap, you (and other services relying on Tempo) can use this API to modify the overrides directly. - -### Architecture - -![user-configurable-overrides-architecture.png](/media/docs/tempo/user-configurable-overrides-architecture.png) - -User-configurable overrides are stored in an object store bucket managed by Tempo. - -{{< admonition type="note" >}} -We recommend using a different bucket for overrides and traces storage, but they can share a bucket if needed. -When sharing a bucket, make sure any lifecycle rules are scoped correctly to not remove data of user-configurable overrides module. -{{% /admonition %}} - -Overrides of every tenant are stored at `/{tenant name}/overrides.json`: - -``` -overrides/ -├── 1/ -│ └── overrides.json -└── 2/ - └── overrides.json -``` - -Tempo regularly polls this bucket and keeps a copy of the limits in-memory. When requesting the overrides for a tenant, the overrides module: - -1. Checks this override is set in the user-configurable overrides, if so return that value -2. Checks if this override is set in the runtime config (configmap), if so return that value -3. Returns the default value - -### Supported fields - -User-configurable overrides are designed to be a subset of the runtime overrides. Refer to [Overrides]({{< relref "../configuration#overrides" >}}) for information about all overrides. -When a field is set in both the user-configurable overrides and the runtime overrides, the value from the user-configurable overrides will take priority. - -{{< admonition type="note" >}} -`processors` is an exception: Tempo will merge values from both user-configurable overrides and runtime overrides into a single list. -{{% /admonition %}} - -```yaml -[forwarders: ] - -metrics_generator: - - [processors: ] - [collection_interval: ] - [disable_collection: | default = false] - - processor: - - service_graphs: - [histogram_buckets: ] - [dimensions: ] - [peer_attributes: ] - [enable_client_server_prefix: ] - [enable_messaging_system_latency_histogram: ] - - span_metrics: - [histogram_buckets: ] - [dimensions: ] - [intrinsic_dimensions: ] - [filter_policies: [ - [ - include/exclude: - match_type: # options: strict, regexp - attributes: - - key: - value: - ] - ] - [enable_target_info: ] - [target_info_excluded_dimensions: ] -``` - -### API - -All API requests are handled on the `/api/overrides` endpoint. The module supports `GET`, `POST`, `PATCH`, and `DELETE` requests. - -This endpoint is tenant-specific. If Tempo is run in multitenant mode, all requests should have an appropriate `X-Scope-OrgID` header. - -If the tenant is run in distributed mode, only the query-frontend will accept API requests. - -#### Operations - -##### GET /api/overrides - -Returns the current overrides and it's version. - -Query-parameters: -- `scope`: whether to return overrides from the API only `api` or merge it with the runtime overrides `merged`. Defaults to `api`. - -Example: - -``` -$ curl -X GET -v -H "X-Scope-OrgID: 3" http://localhost:3100/tempo/api/overrides\?scope=merged` -``` - -##### POST /api/overrides - -Update the overrides with the given payload. Note this will overwrite any existing overrides. - -Example: - -``` -curl -X POST -v -H "X-Scope-OrgID: 3" -H "If-Match: 1697726795401423" http://localhost:3100/api/overrides --data "{}" -``` - -##### PATCH /api/overrides - -Update the existing overrides by patching it with the payload. It follows the JSON merge patch protocol ([RFC 7386](https://datatracker.ietf.org/doc/html/rfc7386)). - -Example: - -``` -curl -X PUT -v -H "X-Scope-OrgID: 3" -H "If-Match: 1697726795401423" http://localhost:3100/api/overrides --data "{\"forwarders\":null}" -``` - -##### DELETE /api/overrides - -Delete the existing overrides. - -Example: - -``` -curl -X DELETE -H "X-Scope-OrgID: 3" -H "If-Match: 1697726795401423" http://localhost:3100/api/overrides -``` - -#### Versioning - -To handle concurrent read and write operations, overrides are stored with a version in the backend. -Whenever the overrides are returned, the response will have an Etag header with the current version. - -``` -$ curl -v http://localhost:3100/api/overrides -... -< HTTP/1.1 200 OK -< Content-Type: application/json -< Etag: 1697726795401423 -< Date: Wed, 07 Feb 2024 17:49:04 GMT -< Content-Length: 118 -... -``` - -Requests that modify or delete overrides need to pass the current version using the `If-Match` header: - -``` -$ curl -X POST -H "If-Match: 1697726795401423" http://localhost:3100/api/overrides --data "..." -``` -This example uses uses overrides in the `overrides.json` file with the location in `pwd`: - -``` -$ curl -X POST -H "X-Scope-OrgID: 3" -H "If-Match: 1697726795401423" http://localhost:3100/api/overrides --data @overrides.json -``` - -If the version does not match the version in the backend, the request is rejected with HTTP error 412. - -#### Conflicting runtime overrides check - -Overrides set through the user-configurable overrides take priority over runtime overrides. -This can lead to misleading scenarios because a value set in the runtime overrides is not actively being used. - -To warn users about preexisting runtime overrides, there is an optional check for conflicting runtime overrides. -If enabled requests are rejected if: - -1. There are no user-configurable overrides yet for this tenant. -2. There are runtime overrides set that contain overrides present in the user-configurable overrides. - -The check can be enabled in the configuration: - -```yaml -overrides: - user_configurable_overrides: - api: - check_for_conflicting_runtime_overrides: true -``` - -You can bypass this check by setting the query parameter `skip-conflicting-overrides-check=true`: - -``` -$ curl -X POST -H "If-Match: 1697726795401423" http://localhost:3100/api/overrides?skip-conflicting-overrides-check=true --data "..." -``` diff --git a/docs/sources/tempo/release-notes/v2-2.md b/docs/sources/tempo/release-notes/v2-2.md index fde619ac479..858089e1a08 100644 --- a/docs/sources/tempo/release-notes/v2-2.md +++ b/docs/sources/tempo/release-notes/v2-2.md @@ -7,6 +7,11 @@ weight: 50 # Version 2.2 release notes + + + + + The Tempo team is pleased to announce the release of Tempo 2.2. This release gives you: @@ -20,8 +25,8 @@ Tempo 2.2 makes vParquet2, a Parquet version designed to be more compatible with Read the [Tempo 2.2 blog post](/blog/2023/08/02/grafana-tempo-2.2-release-traceql-structural-operators-are-here/) for more examples and details about these improvements. {{< admonition type ="note" >}} -For a complete list of changes, enhancements, and bug fixes refer to the [Tempo 2.2 changelog](https://github.com/grafana/tempo/releases/tag/v2.2.0). -{{% /admonition %}} +For a complete list of changes, enhancements, and bug fixes refer to the [Tempo 2.2 CHANGELOG](https://github.com/grafana/tempo/releases/tag/v2.2.0). +{{< /admonition >}} ## Features and enhancements @@ -29,34 +34,37 @@ Some of the most important features and enhancements in Tempo 2.2 are highlighte ### Expanding the TraceQL language -With this release, we’ve added to the [TraceQL language]({{< relref "../traceql" >}}). +With this release, we've added to the [TraceQL language]({{< relref "../traceql" >}}). TraceQL now offers: -* Structural operators: descendant (>>), child (>), and sibling (~) ([documentation]({{< relref "../traceql#structural" >}})). Find relevant traces based on their structure and relationships among spans. [PR [#2625](https://github.com/grafana/tempo/pull/2625) [#2660](https://github.com/grafana/tempo/pull/2660)] -* A `select()` operation that allows you to specify arbitrary span attributes that you want included in the TraceQL response ([documentation]({{< relref "../traceql#selection" >}})) [PR [2494](https://github.com/grafana/tempo/pull/2494)] -* A `by()` operation that groups span sets within a trace by an attribute of your choosing. This operation is not supported in the Grafana UI yet; you can only use `by()` when querying Tempo’s search API directly. ([documentation]({{< relref "../traceql#grouping" >}}) [PR [2490](https://github.com/grafana/tempo/pull/2490)] -* New intrinsic attributes for use in TraceQL queries: `traceDuration`, `rootName`, and `rootServiceName` ([documentation]({{< relref "../traceql" >}})) [PR [#2503](https://github.com/grafana/tempo/pull/2503)] +- Structural operators: descendant (>>), child (>), and sibling (~) ([documentation]({{< relref "../traceql#structural" >}})). Find relevant traces based on their structure and relationships among spans. [PR [#2625](https://github.com/grafana/tempo/pull/2625) [#2660](https://github.com/grafana/tempo/pull/2660)] +- A `select()` operation that allows you to specify arbitrary span attributes that you want included in the TraceQL response ([documentation]({{< relref "../traceql#selection" >}})) [PR [2494](https://github.com/grafana/tempo/pull/2494)] +- A `by()` operation that groups span sets within a trace by an attribute of your choosing. This operation is not supported in the Grafana UI yet; you can only use `by()` when querying the search API directly. [[documentation]({{< relref "../traceql#grouping" >}}) PR [2490](https://github.com/grafana/tempo/pull/2490)] +- New intrinsic attributes for use in TraceQL queries: `traceDuration`, `rootName`, and `rootServiceName` ([documentation]({{< relref "../traceql" >}})) [PR [#2503](https://github.com/grafana/tempo/pull/2503)] Read the [Tempo 2.2 blog post](/blog/2023/08/02/grafana-tempo-2.2-release-traceql-structural-operators-are-here/) for examples of how to use these new language additions. To learn more about the TraceQL syntax, see the [TraceQL documentation]({{< relref "../traceql" >}}). -For information on planned future extensions to the TraceQL language, see [future work]({{< relref "../traceql/architecture" >}}). +For information on planned future extensions to the TraceQL language, refer to [future work]({{< relref "../traceql/architecture" >}}). ### Get TraceQL results faster -We’re always trying to reduce the time you spend waiting to get results to your TraceQL queries, and we’ve made some nice progress on this front with this release. +We're always trying to reduce the time you spend waiting to get results to your TraceQL queries, and we've made some nice progress on this front with this release. -We’ve added a [GRPC streaming](https://grpc.io/docs/what-is-grpc/core-concepts/#bidirectional-streaming-rpc) endpoint to Tempo’s query frontend that allows a client to stream search results from Tempo. The Tempo CLI has been updated to use this new streaming endpoint [PR [#2366](https://github.com/grafana/tempo/pull/2366)] . As of version 10.1, Grafana supports it as well, though you must first enable the `traceQLStreaming` feature toggle [PR [#72288](https://github.com/grafana/grafana/pull/72288)]. +We've added a [gRPC streaming](https://grpc.io/docs/what-is-grpc/core-concepts/#bidirectional-streaming-rpc) endpoint to the query frontend that allows a client to stream search results from Tempo. The Tempo CLI has been updated to use this new streaming endpoint [PR [#2366](https://github.com/grafana/tempo/pull/2366)]. +As of version 10.1, Grafana supports it as well, though you must first enable the `traceQLStreaming` feature toggle [PR [#72288](https://github.com/grafana/grafana/pull/72288)]. By streaming results to the client, you can start to look at traces matching your query before the entire query completes. This is particularly helpful for long-running queries; while the total time to complete the query is the same, you can start looking at your first matches before the full set of matched traces is returned. -In addition to streaming partial results, we’ve merged a number of improvements to speed up TraceQL queries. Here are just a few: +In addition to streaming partial results, we've merged a number of improvements to speed up TraceQL queries. Here are just a few: -* Add support for query batching between frontend and queriers to improve throughput [PR [2677](https://github.com/grafana/tempo/pull/2677)] -* Improve performance of TraceQL regex [PR [2484](https://github.com/grafana/tempo/pull/2484)] -* Fully skip over Parquet row groups with no matches in the column dictionaries [PR [2676](https://github.com/grafana/tempo/pull/2676)] -* New synchronous read mode for vParquet and vParquet2 [PRs [2165](https://github.com/grafana/tempo/pull/2165), [2535](https://github.com/grafana/tempo/pull/2535)] -* Improved TraceQL throughput by asynchronously creating jobs. [PR [2530](https://github.com/grafana/tempo/pull/2530)] +- Add support for query batching between frontend and queriers to improve throughput [PR [2677](https://github.com/grafana/tempo/pull/2677)] +- Improve performance of TraceQL regular expressions [PR [2484](https://github.com/grafana/tempo/pull/2484)] +- Fully skip over Parquet row groups with no matches in the column dictionaries [PR [2676](https://github.com/grafana/tempo/pull/2676)] + +- New synchronous read mode for vParquet and vParquet2 [PRs [2165](https://github.com/grafana/tempo/pull/2165), [2535](https://github.com/grafana/tempo/pull/2535)] + +- Improved TraceQL throughput by asynchronously creating jobs. [PR [2530](https://github.com/grafana/tempo/pull/2530)] ### Metrics summary API (experimental) @@ -66,36 +74,33 @@ From here, you might see that spans from `namespace=A` have a significantly high As another example, you could use this API to compare latencies of your spans broken down by the `region` attribute. From here, you might notice that spans from `region=North-America` have higher latencies than those from `region=Asia-Pacific`. -This API is meant to enable ad-hoc analysis of your incoming spans; by segmenting your spans by attribute and looking for differences in RED metrics, you can more quickly isolate where problems like elevated error rates or higher latencies are coming from. +This API is meant to enable as-needed analysis of your incoming spans; by segmenting your spans by attribute and looking for differences in RED metrics, you can more quickly isolate where problems like elevated error rates or higher latencies are coming from. -Unlike RED metrics computed by Tempo’s [metrics-generator](({{< relref "../metrics-generator" >}}), the values returned by this API are not persisted as time series. This has the advantage that you do not need to provide your own time series databases for storing and querying these metrics. It also allows you to compute RED metrics broken down by high cardinality attributes that would be too expensive to store in a time series database. Use the metrics generator if you want to store and visualize RED metrics over multi-hour or multi-day time ranges, or you want to alert on these metrics. +Unlike RED metrics computed by the [metrics-generator]({{< relref "../metrics-generator" >}}), the values returned by this API aren't persisted as time series. This has the advantage that you don't need to provide your own time series databases for storing and querying these metrics. It also allows you to compute RED metrics broken down by high cardinality attributes that would be too expensive to store in a time series database. Use the metrics generator if you want to store and visualize RED metrics over multi-hour or multi-day time ranges, or you want to alert on these metrics. To learn more about this API, refer to the [metrics summary API documentation.]({{< relref "../api_docs/metrics-summary" >}}) This work is represented in multiple PRs: [2368](https://github.com/grafana/tempo/pull/2368), [2418](https://github.com/grafana/tempo/pull/2418), [2424](https://github.com/grafana/tempo/pull/2424), [2442](https://github.com/grafana/tempo/pull/2442), [2480](https://github.com/grafana/tempo/pull/2480), [2481](https://github.com/grafana/tempo/pull/2481), [2501](https://github.com/grafana/tempo/pull/2501), [2579](https://github.com/grafana/tempo/pull/2579), and [2582](https://github.com/grafana/tempo/pull/2582). - ### Other enhancements -* Tempo’s [tag values]({{< relref "../api_docs#search-tag-values" >}}) and [tag names]({{< relref "../api_docs#search-tags" >}}) APIs now support filtering [PR [2253](https://github.com/grafana/tempo/pull/2253)]. This lets you retrieve all valid attribute values and names given certain criteria. For example, you can get a list of values for the attribute `namespace` seen on spans with attribute `resource=A.` This feature is off by default; to enable, configure `autocomplete_filtering_enabled`. ([documentation]({{< relref "../api_docs" >}})). Grafana’s autocomplete can make use of this filtering capability to provide better suggestions starting in v10.2 [PR [67845]](https://github.com/grafana/grafana/pull/67845). - -* Tempo’s metrics-generator now supports span filtering. Setting up filters allows you to compute metrics over the specific spans you care about, excluding others. It also can reduce the cardinality of generated metrics, and therefore the cost of storing those metrics in a Prometheus-compatible TSDB. ([documentation]({{< relref "../metrics-generator/span_metrics#filtering" >}})) [PR [2274](https://github.com/grafana/tempo/pull/2274)] +- The [tag values]({{< relref "../api_docs#search-tag-values" >}}) and [tag names]({{< relref "../api_docs#search-tags" >}}) APIs now support filtering [PR [2253](https://github.com/grafana/tempo/pull/2253)]. This lets you retrieve all valid attribute values and names given certain criteria. For example, you can get a list of values for the attribute `namespace` seen on spans with attribute `resource=A.` This feature is off by default; to enable, configure `autocomplete_filtering_enabled`. ([documentation]({{< relref "../api_docs" >}})). The autocomplete in Grafana can make use of this filtering capability to provide better suggestions starting in v10.2 [PR [67845]](https://github.com/grafana/grafana/pull/67845). -* Tempo’s metrics-generator can now detect virtual nodes ([documentation]({{< relref "../metrics-generator/service_graphs#virtual-nodes" >}})) [PR [2365](https://github.com/grafana/tempo/pull/2365)]. As a result, you’ll now see these virtual nodes represented in your service graph. For more information, refer to the [virtual nodes documentation]({{< relref "../metrics-generator/service_graphs#virtual-nodes" >}}). +- The metrics-generator now supports span filtering. Setting up filters allows you to compute metrics over the specific spans you care about, excluding others. It also can reduce the cardinality of generated metrics, and therefore the cost of storing those metrics in a Prometheus-compatible TSDB. ([documentation]({{< relref "../metrics-generator/span_metrics#filtering" >}})) [PR [2274](https://github.com/grafana/tempo/pull/2274)] +- The metrics-generator can now detect virtual nodes ([documentation]({{< relref "../metrics-generator/service_graphs#virtual-nodes" >}})) [PR [2365](https://github.com/grafana/tempo/pull/2365)]. As a result, you'll now see these virtual nodes represented in your service graph. For more information, refer to the [virtual nodes documentation]({{< relref "../metrics-generator/service_graphs#virtual-nodes" >}}). ## Upgrade considerations When [upgrading]({{< relref "../setup/upgrade" >}}) to Tempo 2.2, be aware of these breaking changes: -* JSonnet users only: We've converted the metrics-generator component from a k8s deployment to a k8s statefulset. Refer to the PR for seamless migration instructions. [PRs [#2533](https://github.com/grafana/tempo/pull/2533), [#2467](https://github.com/grafana/tempo/pull/2647)] -* Removed or renamed configuration parameters (see section below) +- JSonnet users only: We've converted the metrics-generator component from a k8s deployment to a k8s statefulset. Refer to the PR for seamless migration instructions. [PRs [#2533](https://github.com/grafana/tempo/pull/2533), [#2467](https://github.com/grafana/tempo/pull/2647)] +- Removed or renamed configuration parameters (see section below) -While not a breaking change, upgrading to Tempo 2.2 will by default change Tempo’s block format to vParquet2. +While not a breaking change, upgrading to Tempo 2.2 will by default change the block format to vParquet2. To stay on a previous block format, read the [Parquet configuration documentation]({{< relref "../configuration/parquet#choose-a-different-block-format" >}}). We strongly encourage upgrading to vParquet2 as soon as possible as this is required for using structural operators in your TraceQL queries and provides query performance improvements, in particular on queries using the `duration` intrinsic. - ### Removed or renamed configuration parameters The following fields were removed or renamed. @@ -136,31 +141,36 @@ The following fields were removed or renamed. ## Bug fixes -For a complete list, refer to the [Tempo changelog](https://github.com/grafana/tempo/releases). +For a complete list, refer to the [Tempo CHANGELOG](https://github.com/grafana/tempo/releases). ### 2.2.4 -* Updated Alpine image version to 3.18 to patch [CVE-2022-48174](https://nvd.nist.gov/vuln/detail/CVE-2022-48174) [PR 3046](https://github.com/grafana/tempo/pull/3046) -* Bumped Jaeger query docker image to 1.50.0 [PR 2998](https://github.com/grafana/tempo/pull/2998) +- Updated Alpine image version to 3.18 to patch [CVE-2022-48174](https://nvd.nist.gov/vuln/detail/CVE-2022-48174) [PR 3046](https://github.com/grafana/tempo/pull/3046) +- Bumped Jaeger query docker image to 1.50.0 [PR 2998](https://github.com/grafana/tempo/pull/2998) ### 2.2.3 -* Fixed S3 credentials providers configuration [PR 2889](https://github.com/grafana/tempo/pull/2889) +- Fixed S3 credentials providers configuration [PR 2889](https://github.com/grafana/tempo/pull/2889) ### 2.2.2 -* Fixed node role auth IDMSv1 [PR 2760](https://github.com/grafana/tempo/pull/2760) +- Fixed node role auth IDMSv1 [PR 2760](https://github.com/grafana/tempo/pull/2760) ### 2.2.1 -* Fixed incorrect metrics for index failures [PR 2781](https://github.com/grafana/tempo/pull/2781) -* Fixed a panic in the metrics-generator when using multiple tenants with default overrides [PR 2786](https://github.com/grafana/tempo/pull/2786) -* Restored `tenant_header_key` removed in [PR 2414](https://github.com/grafana/tempo/pull/2414) [PR 2786](https://github.com/grafana/tempo/pull/2795) -* Disabled streaming over HTTP by default [PR 2803](https://github.com/grafana/tempo/pull/2803) +- Fixed incorrect metrics for index failures [PR 2781](https://github.com/grafana/tempo/pull/2781) +- Fixed a panic in the metrics-generator when using multiple tenants with default overrides [PR 2786](https://github.com/grafana/tempo/pull/2786) +- Restored `tenant_header_key` removed in [PR 2414](https://github.com/grafana/tempo/pull/2414) [PR 2786](https://github.com/grafana/tempo/pull/2795) +- Disabled streaming over HTTP by default [PR 2803](https://github.com/grafana/tempo/pull/2803) ### 2.2 -* Fixed an issue in the metrics-generator that prevented scaling up parallelism when remote writing of metrics was lagging behind [PR [2463](https://github.com/grafana/tempo/issues/2463)] -* Fixed an issue where metrics-generator was setting wrong labels for `traces_target_info` [PR [2546](https://github.com/grafana/tempo/pull/2546)] -* Fixed an issue where matches and other spanset level attributes were not persisted to the TraceQL results. [PR [2490](https://github.com/grafana/tempo/pull/2490)] -* Fixed an issue where ingester search could occasionally fail with `file does not exist` error [PR [2534](https://github.com/grafana/tempo/issues/2534)] +- Fixed an issue in the metrics-generator that prevented scaling up parallelism when remote writing of metrics was lagging behind [PR [2463](https://github.com/grafana/tempo/issues/2463)] +- Fixed an issue where metrics-generator was setting wrong labels for `traces_target_info` [PR [2546](https://github.com/grafana/tempo/pull/2546)] +- Fixed an issue where matches and other spanset level attributes were not persisted to the TraceQL results. [PR [2490](https://github.com/grafana/tempo/pull/2490)] +- Fixed an issue where ingester search could occasionally fail with `file does not exist` error [PR [2534](https://github.com/grafana/tempo/issues/2534)] + + + + + \ No newline at end of file diff --git a/docs/sources/tempo/release-notes/v2-3.md b/docs/sources/tempo/release-notes/v2-3.md index ca4236a28c9..6bf352b4779 100644 --- a/docs/sources/tempo/release-notes/v2-3.md +++ b/docs/sources/tempo/release-notes/v2-3.md @@ -7,6 +7,11 @@ weight: 45 # Version 2.3 release notes + + + + + The Tempo team is pleased to announce the release of Tempo 2.3. This release gives you: @@ -28,7 +33,7 @@ This block format improves query performance relative to previous formats. Read the [**Tempo 2.3 blog post**](/blog/2023/11/01/grafana-tempo-2.3-release-faster-trace-queries-traceql-upgrades/) for more examples and details about these improvements. -These release notes highlight the most important features and bugfixes. For a complete list, refer to the [Tempo changelog](https://github.com/grafana/tempo/releases). +These release notes highlight the most important features and bugfixes. For a complete list, refer to the [Tempo CHANGELOG](https://github.com/grafana/tempo/releases). {{< youtube id="2FWi9_dSBdM?rel=0" >}} @@ -58,7 +63,7 @@ Unique to Tempo, TraceQL is a query language that lets you perform custom querie To learn more about the TraceQL syntax, see the [TraceQL documentation]({{< relref "../traceql" >}}). For information on planned future extensions to the TraceQL language, refer to [future work]({{< relref "../traceql/architecture" >}}). -We’ve made the following improvements to TraceQL: +We've made the following improvements to TraceQL: * Added two structural operators, ancestor (`<<`) and parent (`<`) ([documentation]({{< relref "../traceql#experimental-structural" >}})) [[PR 2877](https://github.com/grafana/tempo/pull/2877)] @@ -68,13 +73,13 @@ We’ve made the following improvements to TraceQL: * Improved the performance of TraceQL [`select()` queries]({{< relref "../traceql#selection" >}}). Metrics-summary now also correctly handles missing attributes. [[PR 2765](https://github.com/grafana/tempo/pull/2765)] -* Added support for searching by OpenTelemetry’s [span status message ](https://github.com/open-telemetry/opentelemetry-proto/blob/afcd2aa7f728216d5891ffc0d83f09f0278a6611/opentelemetry/proto/trace/v1/trace.proto#L260)using `statusMessage` intrinsic attribute ([documentation]({{< relref "../traceql#intrinsic-fields" >}})) [[PR 2848](https://github.com/grafana/tempo/pull/2848)] +* Added support for searching by OpenTelemetry's [span status message ](https://github.com/open-telemetry/opentelemetry-proto/blob/afcd2aa7f728216d5891ffc0d83f09f0278a6611/opentelemetry/proto/trace/v1/trace.proto#L260)using `statusMessage` intrinsic attribute ([documentation]({{< relref "../traceql#intrinsic-fields" >}})) [[PR 2848](https://github.com/grafana/tempo/pull/2848)] * Fixed cases where an empty filter (`{}`) didn't return expected results [[PR 2498](https://github.com/grafana/tempo/issues/2498)] ### Metrics-generator -We’ve made the following improvements to metrics-generator: +We've made the following improvements to metrics-generator: * Added a scope query parameter to `/api/overrides` so users can choose between fetching the overrides stored by the API and the merged overrides (those actually used by Tempo) [[PR 2915](https://github.com/grafana/tempo/pull/2915), [#3018](https://github.com/grafana/tempo/pull/3018)] * Added `TempoUserConfigurableOverridesReloadFailing` alert [[PR 2784](https://github.com/grafana/tempo/pull/2784)] @@ -90,11 +95,11 @@ When [upgrading]({{< relref "../setup/upgrade" >}}) to Tempo 2.3, be aware of th Although the vParquet3 format isn't yet the default, it's production ready and we highly recommend switching to it for improved query performance and [dedicated attribute columns]({{< relref "../operations/dedicated_columns" >}}). -Upgrading to Tempo 2.3 doesn’t modify the Parquet block format. You can use Tempo 2.3 with vParquet2 or vParquet3. vParquet2 remains the default backend for Tempo 2.3; vParquet3 is available as a stable option. +Upgrading to Tempo 2.3 doesn't modify the Parquet block format. You can use Tempo 2.3 with vParquet2 or vParquet3. vParquet2 remains the default backend for Tempo 2.3; vParquet3 is available as a stable option. {{< admonition type="note" >}} -Tempo 2.2 can’t read data stored in vParquet3. -{{% /admonition %}} +Tempo 2.2 can't read data stored in vParquet3. +{{< /admonition >}} For information on upgrading, refer to [Change the block format to vParquet3]({{< relref "../setup/upgrade" >}}) upgrade documentation. @@ -116,7 +121,7 @@ distributor: ### Changes to the Overrides module configuration -We’ve added a new `defaults` block to the overrides module for configuring global or per-tenant settings. The Overrides change to indented syntax. For more information, read the [Overrides configuration documentation]({{< relref "../configuration#overrides" >}}). +We've added a new `defaults` block to the overrides module for configuring global or per-tenant settings. The Overrides change to indented syntax. For more information, read the [Overrides configuration documentation]({{< relref "../configuration#overrides" >}}). You can also use the Tempo CLI to migrate configurations. Refer to the [documentation]({{< relref "../operations/tempo_cli#migrate-overrides-config-command" >}}). [[PR 2688](https://github.com/grafana/tempo/pull/2688)] @@ -181,13 +186,13 @@ The following vulnerabilities have been addressed: ## Bugfixes -For a complete list, refer to the [Tempo changelog](https://github.com/grafana/tempo/releases). +For a complete list, refer to the [Tempo CHANGELOG](https://github.com/grafana/tempo/releases/tag/v2.3.1). ### 2.3.1 * Include statusMessage intrinsic attribute in tag search. [PR 3084](https://github.com/grafana/tempo/pull/3084) * Fix compactor ignore configured S3 headers. [PR 3149](https://github.com/grafana/tempo/pull/3154) -* Readd session token to s3 credentials. [PR 3144](https://github.com/grafana/tempo/pull/3144) +* Read session token to s3 credentials. [PR 3144](https://github.com/grafana/tempo/pull/3144) ### 2.3 @@ -199,7 +204,7 @@ For a complete list, refer to the [Tempo changelog](https://github.com/grafana/t * Aligned `tempo_query_frontend_queries_total` and `tempo_query_frontend_queries_within_slo_total`. [PR 2840](https://github.com/grafana/tempo/pull/2840) This query now correctly tells you `%age` of requests that are within SLO: - ``` + ```traceql sum(rate(tempo_query_frontend_queries_within_slo_total{}[1m])) by (op) / sum(rate(tempo_query_frontend_queries_total{}[1m])) by (op) @@ -209,5 +214,9 @@ For a complete list, refer to the [Tempo changelog](https://github.com/grafana/t * Respected spss on GRPC streaming. [PR 2971](https://github.com/grafana/tempo/pull/2840) * Moved empty root span substitution from `querier` to `query-frontend`. [PR 2671](https://github.com/grafana/tempo/issues/2671) * Ingester errors correctly propagate on the query path [PR 2935](https://github.com/grafana/tempo/issues/2935) -* Fixed an issue where the ingester didn’t stop a query after timeout [PR 3031](https://github.com/grafana/tempo/pull/3031) -* Reordered the S3 credential chain and upgraded `minio-go`. `native_aws_auth_enabled` is deprecated [PR 3006](https://github.com/grafana/tempo/pull/3006) \ No newline at end of file +* Fixed an issue where the ingester didn't stop a query after timeout [PR 3031](https://github.com/grafana/tempo/pull/3031) +* Reordered the S3 credential chain and upgraded `minio-go`. `native_aws_auth_enabled` is deprecated [PR 3006](https://github.com/grafana/tempo/pull/3006) + + + + \ No newline at end of file diff --git a/docs/sources/tempo/release-notes/v2-4.md b/docs/sources/tempo/release-notes/v2-4.md index 287701bb1f2..7ea0406a5f0 100644 --- a/docs/sources/tempo/release-notes/v2-4.md +++ b/docs/sources/tempo/release-notes/v2-4.md @@ -7,6 +7,11 @@ weight: 40 # Version 2.4 release notes + + + + + The Tempo team is pleased to announce the release of Tempo 2.4. This release gives you: @@ -15,11 +20,11 @@ This release gives you: * Performance enhancements, thanks to the addition of new caching tiers * Cost savings, thanks to polling improvements that reduce calls to object storage -As part of this release, vParquet3 has also been promoted to the new default storage format for traces. For more about why we’re so excited about vParquet3, refer to [Accelerate TraceQL queries at scale with dedicated attribute columns in Grafana Tempo](/blog/2024/01/22/accelerate-traceql-queries-at-scale-with-dedicated-attribute-columns-in-grafana-tempo/). +As part of this release, vParquet3 has also been promoted to the new default storage format for traces. For more about why we're so excited about vParquet3, refer to [Accelerate TraceQL queries at scale with dedicated attribute columns in Grafana Tempo](/blog/2024/01/22/accelerate-traceql-queries-at-scale-with-dedicated-attribute-columns-in-grafana-tempo/). Read the [Tempo 2.4 blog post](/blog/2024/02/29/grafana-tempo-2.4-release-traceql-metrics-tiered-caching-and-tco-improvements/) for more examples and details about these improvements. -These release notes highlight the most important features and bugfixes. For a complete list, refer to the [Tempo changelog](https://github.com/grafana/tempo/releases). +These release notes highlight the most important features and bug fixes. For a complete list, refer to the [Tempo CHANGELOG](https://github.com/grafana/tempo/releases/tag/v2.4). {{< youtube id="EYUx2DkNRas" >}} @@ -29,20 +34,21 @@ The most important features and enhancements in Tempo 2.4 are highlighted below. ### Multi-tenant queries -Tempo now allows you to query multiple tenants at once. We’ve made multi-tenant queries compatible with streaming ([first released in v2.2]({{< relref "./v2-2#get-traceql-results-faster" >}})) so you can get query results as fast as possible. To learn more, refer to [Cross-tenant federation]({{< relref "../operations/cross_tenant_query" >}}) and [Enable multi-tenancy]({{< relref "../operations/multitenancy" >}}). [PRs [3262](https://github.com/grafana/tempo/pull/3262), [3087](https://github.com/grafana/tempo/pull/3087)] +Tempo now allows you to query multiple tenants at once. We've made multi-tenant queries compatible with streaming ([first released in v2.2]({{< relref "./v2-2#get-traceql-results-faster" >}})) so you can get query results as fast as possible. +To learn more, refer to [Cross-tenant federation](https://grafana.com/docs/tempo//operations/manage-advanced-systems/cross_tenant_query) and [Enable multi-tenancy](https://grafana.com/docs/tempo/operations/manage-advanced-systems/multitenancy/). [PRs [3262](https://github.com/grafana/tempo/pull/3262), [3087](https://github.com/grafana/tempo/pull/3087)] ### TraceQL metrics (experimental) -We’re excited to announce the addition of metrics queries to the TraceQL language. Metric queries extend trace queries by applying a function to trace query results. +We're excited to announce the addition of metrics queries to the TraceQL language. Metric queries extend trace queries by applying a function to trace query results. This powerful feature creates metrics from traces, much in the same way that LogQL metric queries create metrics from logs. -In this case, we are calculating the rate of the erroring spans coming from the service `foo`. Rate is a `spans/sec` quantity. +In this case, we're calculating the rate of the erroring spans coming from the service `foo`. Rate is a `spans/sec` quantity. -``` +```traceql { resource.service.name = "foo" && status = error } | rate() ``` -In addition, you can use Grafana Explore to [query and visualize the metrics]({{< relref "../operations/traceql-metrics" >}}) with the Tempo data soruce in Grafana or Grafana Cloud. +In addition, you can use Grafana Explore to [query and visualize the metrics]({{< relref "../operations/traceql-metrics" >}}) with the Tempo data source in Grafana or Grafana Cloud. ![Metrics visualization in Grafana](/media/docs/tempo/metrics-explore-sample-2.4.png) @@ -54,15 +60,18 @@ To learn more about the TraceQL syntax, see the [TraceQL documentation]({{< relr We continue to make query performance improvements so you spend less time waiting on results to your TraceQL queries. Below are some notable PRs that made it into this release: -* Improve TraceQL regex performance in certain queries. [PR [3139](https://github.com/grafana/tempo/pull/3139)] +* Improve TraceQL regular expression performance in certain queries. [PR [3139](https://github.com/grafana/tempo/pull/3139)] * Improve TraceQL performance in complex queries. [[PR 3113](https://github.com/grafana/tempo/pull/3113)] * TraceQL/Structural operators performance improvement. [[PR 3088](https://github.com/grafana/tempo/pull/3088)] + + ### vParquet3 is now the default block format Tempo 2.4 makes [vParquet3]({{< relref "../configuration/parquet" >}}) the default storage format. -We’re excited about [vParquet3]({{< relref "../configuration/parquet" >}}) relative to prior formats because of its support for [dedicated attribute columns]({{< relref "../operations/dedicated_columns" >}}), which help speed up queries on your largest and most queried attributes. We've seen excellent performance improvements when running it ourselves, and by promoting it to the default, we're signaling that it is ready for broad adoption. +We're excited about [vParquet3]({{< relref "../configuration/parquet" >}}) relative to prior formats because of its support for [dedicated attribute columns]({{< relref "../operations/dedicated_columns" >}}), which help speed up queries on your largest and most queried attributes. +We've seen excellent performance improvements when running it ourselves, and by promoting it to the default, we're signaling that it's ready for broad adoption. Dedicated attribute columns, available using vParquet3, improve query performance by storing the largest and most frequently used attributes in their own columns, rather than in the generic attribute key-value list. For more information, refer to @@ -72,15 +81,21 @@ If you had manually configured vParquet3, we recommend removing it to move forwa To read more about the design of vParquet3, refer to [the design proposal](https://github.com/grafana/tempo/blob/main/docs/design-proposals/2023-05%20vParquet3.md). For general information, refer to [the Apache Parquet schema]({{< relref "../operations/schema" >}}). + ### Additional caching layers -Tempo has added two new caches to improve TraceQL query performance. The frontend-search cache handles job search caching. The parquet-page cache handles page level caching. Refer to the [Cache section]({{< relref "../configuration#cache" >}}) of the Configuration documentation for how to configure these new caching layers. As part of adding these new caching layers, we’ve refactored our caching interface. This includes breaking changes described in “Breaking Changes”. [PRs [3166](https://github.com/grafana/tempo/pull/3166), [3225](https://github.com/grafana/tempo/pull/3225), [3196](https://github.com/grafana/tempo/pull/3196)] +Tempo has added two new caches to improve TraceQL query performance. The frontend-search cache handles job search caching. +The parquet-page cache handles page level caching. +Refer to the [Cache section]({{< relref "../configuration#cache" >}}) of the Configuration documentation for how to configure these new caching layers. -### Polling improvements for cost reduction +As part of adding these new caching layers, we've refactored our caching interface. +This includes breaking changes described in Breaking Changes. [PRs [3166](https://github.com/grafana/tempo/pull/3166), [3225](https://github.com/grafana/tempo/pull/3225), [3196](https://github.com/grafana/tempo/pull/3196)] -We’ve improved how Tempo polls object storage, ensuring that we reuse previous results. This has dramatically reduced the number of requests Tempo makes to the object store. Not only does this reduce the load on your object store, for many, it will save you money (since most hosted object storage solutions charge per request). +### Improved polling for cost reduction -We’ve also added the `list_blocks_concurrency` parameter to allow you to tune the number of list calls Tempo makes in parallel to object storage so you can select the value that works best for your environment. We’ve set the default value to `3`, which should work well for the average Tempo cluster. [[PR 2652](https://github.com/grafana/tempo/pull/2652)] +We've improved how Tempo polls object storage, ensuring that we reuse previous results. This has dramatically reduced the number of requests Tempo makes to the object store. Not only does this reduce the load on your object store, for many, it will save you money (since most hosted object storage solutions charge per request). + +We've also added the `list_blocks_concurrency` parameter to allow you to tune the number of list calls Tempo makes in parallel to object storage so you can select the value that works best for your environment. We've set the default value to `3`, which should work well for the average Tempo cluster. [[PR 2652](https://github.com/grafana/tempo/pull/2652)] ### Other enhancements and improvements @@ -88,26 +103,26 @@ In addition, the following improvements have been made in Tempo 2.4: * Improved Tempo error handling on writes, so that one erroring trace doesn't result in an entire batch of traces being dropped. [PR 2571](https://github.com/grafana/tempo/pull/2571) * Added per-tenant compaction window. [PR 3129](https://github.com/grafana/tempo/pull/3129) -* Added `--max-start-time` and `--min-start-time` flag to tempo-cli command `analyse blocks`. [PR 3250](https://github.com/grafana/tempo/pull/3250) +* Added `--max-start-time` and `--min-start-time` flag to `tempo-cli` command `analyse blocks`. [PR 3250](https://github.com/grafana/tempo/pull/3250) * Added per-tenant configurable `remote_write` headers to metrics-generator. [#3175](https://github.com/grafana/tempo/pull/3175) * Added variable expansion support to overrides configuration. [PR 3175](https://github.com/grafana/tempo/pull/3175) * Added HTML pages `/status/overrides` and `/status/overrides/{tenant}`. [PR 3244](https://github.com/grafana/tempo/pull/3244) [#3332](https://github.com/grafana/tempo/pull/3332) * Precalculate and reuse the vParquet3 schema before opening blocks. [PR 3367](https://github.com/grafana/tempo/pull/3367) * Made the trace ID label name configurable for remote written exemplars. [PR 3074](https://github.com/grafana/tempo/pull/3074) * Performance improvements in span filtering. [PR 3025](https://github.com/grafana/tempo/pull/3025) -* Introduced localblocks process configuration option to select only server spans. [PR 3303](https://github.com/grafana/tempo/pull/3303) +* Introduced `localblocks` process configuration option to select only server spans. [PR 3303](https://github.com/grafana/tempo/pull/3303) ## Upgrade considerations -When [upgrading]({{< relref "../setup/upgrade" >}}) to Tempo 2.4, be aware of these considerations and breaking changes. +When [upgrading](https://grafana.com/docs/tempo//setup/upgrade/) to Tempo 2.4, be aware of these considerations and breaking changes. ### Transition to vParquet 3 -vParquet3 format is now the default block format. It is production ready and we highly recommend switching to it for improved query performance and [dedicated attribute columns]({{< relref "../operations/dedicated_columns" >}}). +vParquet3 format is now the default block format. It's production ready and we highly recommend switching to it for improved query performance and [dedicated attribute columns]({{< relref "../operations/dedicated_columns" >}}). Upgrading to Tempo 2.4 modifies the Parquet block format. Although you can use Tempo 2.3 with vParquet2 or vParquet3, you can only use Tempo 2.4 with vParquet3. -With this release, the first version of our Parquet backend, vParquet, is being deprecated. Tempo 2.4 still reads vParquet1 blocks. However, Tempo will exit with error if they are manually configured. [[PR 3377](https://github.com/grafana/tempo/pull/3377/files#top)] +With this release, the first version of our Parquet backend, vParquet, is being deprecated. Tempo 2.4 still reads vParquet1 blocks. However, Tempo will exit with error if they're manually configured. [[PR 3377](https://github.com/grafana/tempo/pull/3377/files#top)] For information on upgrading, refer to [Upgrade to Tempo 2.4]({{< relref "../setup/upgrade" >}}) and [Choose a different block format]({{< relref "../configuration/parquet#choose-a-different-block-format" >}}) . @@ -214,7 +229,7 @@ This release addresses the following vulnerabilities: ## Bugfixes -For a complete list, refer to the [Tempo changelog](https://github.com/grafana/tempo/releases). +For a complete list, refer to the [Tempo CHANGELOG](https://github.com/grafana/tempo/releases). ### 2.4.2 @@ -236,8 +251,13 @@ For a complete list, refer to the [Tempo changelog](https://github.com/grafana/t * Fixed autocomplete filters sometimes returning erroneous results. [PR 3339](https://github.com/grafana/tempo/pull/3339) * Fixed trace context propagation between query-frontend and querier. [PR 3387](https://github.com/grafana/tempo/pull/3387) * Fixed parsing of span.resource.xyz attributes in TraceQL. [PR 3284](https://github.com/grafana/tempo/pull/3284) -* Changed exit code if config is successfully verified. [PR 3174](https://github.com/grafana/tempo/pull/3174) -* The tempo-cli analyze blocks command no longer fails on compacted blocks. [PR 3183](https://github.com/grafana/tempo/pull/3183) +* Changed exit code if configuration is successfully verified. [PR 3174](https://github.com/grafana/tempo/pull/3174) +* The `tempo-cli analyze blocks` command no longer fails on compacted blocks. [PR 3183](https://github.com/grafana/tempo/pull/3183) * Moved waitgroup handling for poller error condition. [PR 3224](https://github.com/grafana/tempo/pull/3224) * Fixed head block excessive locking in ingester search. [PR 3328](https://github.com/grafana/tempo/pull/3328) * Fixed an issue with ingester failed to write traces to disk after a crash or unclean restart. [PR 3346](https://github.com/grafana/tempo/issues/3346) + + + + + \ No newline at end of file diff --git a/docs/sources/tempo/release-notes/v2-5.md b/docs/sources/tempo/release-notes/v2-5.md index c8e5c8a0b9f..34def386029 100644 --- a/docs/sources/tempo/release-notes/v2-5.md +++ b/docs/sources/tempo/release-notes/v2-5.md @@ -7,6 +7,11 @@ weight: 35 # Version 2.5 release notes + + + + + The Tempo team is pleased to announce the release of Tempo 2.5. This release gives you: @@ -15,20 +20,21 @@ This release gives you: * TraceQL enhancements and performance improvements * Performance and stability enhancements -As part of this release, we’ve updated ownership for `/var/tempo` from `root:root` to a new `tempo:tempo` user with a UUID of `10001`. Learn about this breaking change in Upgrade considerations. +As part of this release, we've updated ownership for `/var/tempo` from `root:root` to a new `tempo:tempo` user with a UUID of `10001`. Learn about this breaking change in Upgrade considerations. Read the [Tempo 2.5 blog post](https://grafana.com/blog/2024/06/03/grafana-tempo-2.5-release-vparquet4-streaming-endpoints-and-more-metrics/) for more examples and details about these improvements. -These release notes highlight the most important features and bugfixes. For a complete list, refer to the [Tempo changelog](https://github.com/grafana/tempo/releases). +These release notes highlight the most important features and bug fixes. For a complete list, refer to the [Tempo CHANGELOG](https://github.com/grafana/tempo/releases). {{< youtube id="c4gW9fwkLhc" >}} ## Features and enhancements The most important features and enhancements in Tempo 2.5 are highlighted below. + ### Additional TraceQL metrics (experimental) -In this release, we’ve added several [TraceQL metrics](https://grafana.com/docs/tempo/latest/operations/traceql-metrics/). Tempo 2.4 introduced the `rate()` function to view rates of spans. For this release, we’ve added `quantile_over_time` and `histogram_over_time`. [PR 3605](https://github.com/grafana/tempo/pull/3605), [PR 3633](https://github.com/grafana/tempo/pull/3633), [PR 3644](https://github.com/grafana/tempo/pull/3644)] +In this release, we've added several [TraceQL metrics](https://grafana.com/docs/tempo/latest/operations/traceql-metrics/). Tempo 2.4 introduced the `rate()` function to view rates of spans. For this release, we've added `quantile_over_time` and `histogram_over_time`. [PR 3605](https://github.com/grafana/tempo/pull/3605), [PR 3633](https://github.com/grafana/tempo/pull/3633), [PR 3644](https://github.com/grafana/tempo/pull/3644)] You can use `quantiles_over_time` allowing users to aggregate numerical values, such as the all-important span duration. Notice that you can specify multiple quantiles in the same query. @@ -48,7 +54,7 @@ With this feature, you can now see partial query results as they come in, so you This is perfect for big queries that take a long time to return a response. The Tempo API endpoints now support gRPC streaming for tag queries and metrics. -We’ve added new streaming endpoints for: +We've added new streaming endpoints for: * `SearchTags` * `SearchTagsV2` @@ -63,17 +69,17 @@ In the Tempo CLI, you can use the `--use-grpc` option to enable GRPC streaming. To learn more, refer to the [Tempo gRPC API](https://grafana.com/docs/tempo/latest/api_docs/#tempo-grpc-api) and [Tempo CLI](https://grafana.com/docs/tempo/latest/operations/tempo_cli/#search) documentation. [PR 3460](https://github.com/grafana/tempo/pull/3460) [[PR #3584](https://github.com/grafana/tempo/pull/3584)] {{< admonition type="note" >}} -NOTE: Streaming over HTTP requires the `stream_over_http_enabled` flag to be set. For more information, refer to [Tempo GRPC API documentation](https://grafana.com/docs/tempo/latest/api_docs/#tempo-grpc-api). +Streaming over HTTP requires the `stream_over_http_enabled` flag to be set. For more information, refer to [Tempo GRPC API documentation](https://grafana.com/docs/tempo/latest/api_docs/#tempo-grpc-api). {{< /admonition >}} -In addition, we’ve reduced memory consumption in the frontend for large traces. [[PR 3522](https://github.com/grafana/tempo/pull/3522)] +In addition, we've reduced memory consumption in the frontend for large traces. [[PR 3522](https://github.com/grafana/tempo/pull/3522)] ### New vParquet4 block format (experimental) New in Tempo 2.5, the vParquet4 block format is required for querying links, events, and arrays and improves query performance relative to previous formats. [[PR 3368](https://github.com/grafana/tempo/pull/3368)] -In addition, we’ve updated the OTLP schema to add attributes to instrumentation scope in vParquet4.[[PR 3649](https://github.com/grafana/tempo/pull/3649)] +In addition, we've updated the OTLP schema to add attributes to instrumentation scope in vParquet4.[[PR 3649](https://github.com/grafana/tempo/pull/3649)] While you can use vParquet4, keep in mind that it's experimental. If you choose to use vParquet4 and then opt to revert to vParquet3, any vParquet4 blocks would not be readable by vParquet3. @@ -86,7 +92,7 @@ To learn more about the TraceQL syntax, see the [TraceQL documentation](https:// For information on planned future extensions to the TraceQL language, refer to [future work](https://github.com/grafana/tempo/blob/main/docs/design-proposals/2023-11%20TraceQL%20Extensions.md). -We’ve made the following improvements to TraceQL: +We've made the following improvements to TraceQL: * Add support for scoped intrinsics using a colon (`:`). The available scoped intrinsics are trace:duration, trace:rootName, trace:rootService, span:duration, span:kind, span:name, span:status, span:statusMessage. [[PR [3629](https://github.com/grafana/tempo/pull/3629)] * Performance improvements on TraceQL and tag value search. [[PR 3650](https://github.com/grafana/tempo/pull/3650),[PR 3667](https://github.com/grafana/tempo/pull/3667)] @@ -104,7 +110,7 @@ We’ve made the following improvements to TraceQL: * Better compaction throughput and memory usage [[PR 3579](https://github.com/grafana/tempo/pull/3579)] * Return a less confusing error message to the client when refusing spans due to ingestion rates. [[PR 3485](https://github.com/grafana/tempo/pull/3485)] * Clean Metrics Generator's Prometheus WAL before creating instance [[PR 3548](https://github.com/grafana/tempo/pull/3548)] -* Delete any remaining objects for empty tenants after a configurable duration, requires config enable [PR 3611](https://github.com/grafana/tempo/pull/3611)] +* Delete any remaining objects for empty tenants after a configurable duration, requires configuration enable [PR 3611](https://github.com/grafana/tempo/pull/3611)] ## Upgrade considerations @@ -119,23 +125,25 @@ The new user `10001` won't have access to the old files created by `root`. The ownership of `/var/tempo` changed from `root:root` to `tempo:tempo` with the UID/GID of `10001`. -The `ingester` and `metrics-generator` statefulsets may need to [run chown](https://opensource.com/article/19/8/linux-chown-command) to change ownership to start properly. +The `ingester` and `metrics-generator` statefulsets may need to [run `chown`](https://opensource.com/article/19/8/linux-chown-command) to change ownership to start properly. Refer to [PR 2265](https://github.com/grafana/tempo/pull/2265) to see a Jsonnet example of an `init` container. -This change doesn’t impact you if you used the Helm chart with the default security context set in the chart. +This change doesn't impact you if you used the Helm chart with the default security context set in the chart. All data should be owned by the `tempo` user already. -The UID won’t impact Helm chart users. +The UID won't impact Helm chart users. + ### Support for vParquet format removed The original vParquet format [has been removed](https://github.com/grafana/tempo/pull/3663) from Tempo 2.5. -Direct upgrades from Tempo 2.1 to Tempo 2.5 are not possible. +Direct upgrades from Tempo 2.1 to Tempo 2.5 aren't possible. You will need to upgrade to an intermediate version and wait for the old vParquet blocks to fall out of retention before upgrading to 2.5. [PR 3663](https://github.com/grafana/tempo/pull/3663)] vParquet(1) won't be recognized as a valid encoding and any remaining vParquet(1) blocks won't be readable. -Installations running with historical defaults should not require any changes as the default has been migrated for several releases. +Installations running with historical defaults shouldn't require any changes as the default has been migrated for several releases. Installations with storage settings pinned to vParquet must run a previous release configured for vParquet2 or higher until all existing vParquet(1) blocks have expired and been deleted from the backend, or else will encounter read errors after upgrading to this release. + ### **Updated, removed, or renamed configuration parameters** @@ -159,7 +167,7 @@ Installations with storage settings pinned to vParquet must run a previous relea ### Additional considerations * Updating to OTLP 1.3.0 removes the deprecated `InstrumentationLibrary` and `InstrumentationLibrarySpan` from the OTLP receivers. [PR 3649](https://github.com/grafana/tempo/pull/3649)] -* Removes the addition of a tenant in multitenant trace id lookup. [PR 3522](https://github.com/grafana/tempo/pull/3522)] +* Removes the addition of a tenant in multi-tenant trace id lookup. [PR 3522](https://github.com/grafana/tempo/pull/3522)] ## Bugfixes @@ -180,3 +188,8 @@ For a complete list, refer to the [Tempo changelog](https://github.com/grafana/t * Fix span-metrics' subprocessors bug that applied wrong configurations when running multiple tenants. [PR 3612](https://github.com/grafana/tempo/pull/3612) * Fix panic in query-frontend when combining results [PR 3683](https://github.com/grafana/tempo/pull/3683) * Fix TraceQL queries involving non boolean operations between statics and attributes. [PR 3698](https://github.com/grafana/tempo/pull/3698) + + + + + \ No newline at end of file diff --git a/docs/sources/tempo/release-notes/v2-6.md b/docs/sources/tempo/release-notes/v2-6.md index 6a753fd6a47..4c04ba8be7f 100644 --- a/docs/sources/tempo/release-notes/v2-6.md +++ b/docs/sources/tempo/release-notes/v2-6.md @@ -6,6 +6,10 @@ weight: 30 --- # Version 2.6 release notes + + + + The Tempo team is pleased to announce the release of Tempo 2.6. @@ -27,7 +31,7 @@ The most important features and enhancements in Tempo 2.6 are highlighted below. ### Additional TraceQL metrics (experimental) -In this release, we’ve added several [TraceQL metrics](https://grafana.com/docs/tempo/latest/operations/traceql-metrics/). In Tempo 2.6, TraceQL metrics adds: +In this release, we've added several [TraceQL metrics](https://grafana.com/docs/tempo/latest/operations/traceql-metrics/). In Tempo 2.6, TraceQL metrics adds: * Exemplars [[PR 3824](https://github.com/grafana/tempo/pull/3824), [documentation](https://grafana.com/docs/tempo/next/traceql/metrics-queries/#exemplars)] * Instant metrics queries using `/api/metrics/query` [[PR 3859](https://github.com/grafana/tempo/pull/3859), [documentation](https://grafana.com/docs/tempo/next/api_docs/#traceql-metrics)] @@ -44,9 +48,9 @@ For more information, refer to the [TraceQL metrics queries](https://grafana.com Unique to Tempo, TraceQL is a query language that lets you perform custom queries into your tracing data. To learn more about the TraceQL syntax, refer to the [TraceQL documentation](https://grafana.com/docs/tempo/latest/traceql/). -We’ve added event attributes and link scopes. Like spans, they both have instrinsics and attributes. +We've added event attributes and link scopes. Like spans, they both have instrinsics and attributes. -The `event` scope lets you query events that happen within a span. A span event is a unique point in time during the span’s duration. While spans help build the structural hierarchy of your services, span events can provide a deeper level of granularity to help debug your application faster and maintain optimal performance. To learn more about how you can use span events, read the [What are span events?](https://grafana.com/blog/2024/08/15/all-about-span-events-what-they-are-and-how-to-query-them/) blog post. [PRs [3708](https://github.com/grafana/tempo/pull/3708), [3708](https://github.com/grafana/tempo/pull/3748), [3908](https://github.com/grafana/tempo/pull/3908)] +The `event` scope lets you query events that happen within a span. A span event is a unique point in time during the span's duration. While spans help build the structural hierarchy of your services, span events can provide a deeper level of granularity to help debug your application faster and maintain optimal performance. To learn more about how you can use span events, read the [What are span events?](https://grafana.com/blog/2024/08/15/all-about-span-events-what-they-are-and-how-to-query-them/) blog post. [PRs [3708](https://github.com/grafana/tempo/pull/3708), [3708](https://github.com/grafana/tempo/pull/3748), [3908](https://github.com/grafana/tempo/pull/3908)] If you've instrumented your traces for span links, you can use the `link` scope to search for an attribute within a span link. A span link associates one span with one or more other spans. [PRs [3814](https://github.com/grafana/tempo/pull/3814), [3741](https://github.com/grafana/tempo/pull/3741)] @@ -60,7 +64,7 @@ You can search for an attribute in your link: ![A TraceQL example showing `link` scope](/media/docs/grafana/data-sources/tempo/query-editor/traceql-link-example.png) -We’ve also added autocomplete support for `events` and `links`. [[PR 3846](https://github.com/grafana/tempo/pull/3846)] +We've also added autocomplete support for `events` and `links`. [[PR 3846](https://github.com/grafana/tempo/pull/3846)] Tempo 2.6 improves TraceQL performance with these updates: @@ -243,11 +247,11 @@ Storage:
### Other breaking changes -* **BREAKING CHANGE** tempo-query is no longer a Jaeger instance with grpcPlugin. It's now a standalone server. Serving a gRPC API for Jaeger on `0.0.0.0:7777` by default. [[PR 3840]](https://github.com/grafana/tempo/issues/3840) +* **BREAKING CHANGE** `tempo-query` is no longer a Jaeger instance with grpcPlugin. It's now a standalone server. Serving a gRPC API for Jaeger on `0.0.0.0:7777` by default. [[PR 3840]](https://github.com/grafana/tempo/issues/3840) ## Bugfixes -For a complete list, refer to the [Tempo changelog](https://github.com/grafana/tempo/releases). +For a complete list, refer to the [Tempo CHANGELOG](https://github.com/grafana/tempo/releases). ### 2.6.1 @@ -260,7 +264,7 @@ For a complete list, refer to the [Tempo changelog](https://github.com/grafana/t * Fix metrics query histograms and quantiles on `traceDuration`. [[PR 3879](https://github.com/grafana/tempo/pull/3879)] * Fix divide by 0 bug in query frontend exemplar calculations. [[PR 3936](https://github.com/grafana/tempo/pull/3936)] * Fix autocomplete of a query using scoped instrinsics. [[PR 3865](https://github.com/grafana/tempo/pull/3865)] -* Improved handling of complete blocks in localblocks processor after enabling flushing. [[PR 3805](https://github.com/grafana/tempo/pull/3805)] +* Improved handling of complete blocks in the `localblocks` processor after enabling flushing. [[PR 3805](https://github.com/grafana/tempo/pull/3805)] * Fix double appending the primary iterator on second pass with event iterator. [[PR 3903](https://github.com/grafana/tempo/pull/3903)] * Fix frontend parsing error on cached responses [[PR 3759](https://github.com/grafana/tempo/pull/3759)] * `max_global_traces_per_user`: take into account `ingestion.tenant_shard_size` when converting to local limit. [[PR 3618](https://github.com/grafana/tempo/pull/3618)] @@ -271,3 +275,8 @@ For a complete list, refer to the [Tempo changelog](https://github.com/grafana/t * Correct block end time when the ingested traces are outside the ingestion slack. [[PR 3954](https://github.com/grafana/tempo/pull/3954)] * Fix race condition where a streaming response could be marshaled while being modified in the combiner resulting in a panic. [[PR 3961](https://github.com/grafana/tempo/pull/3961)] * Pass search options to the backend for `SearchTagValuesBlocksV2` requests. [[PR 3971](https://github.com/grafana/tempo/pull/3971)] + + + + + \ No newline at end of file diff --git a/docs/sources/tempo/release-notes/v2-7.md b/docs/sources/tempo/release-notes/v2-7.md new file mode 100644 index 00000000000..123bd861fce --- /dev/null +++ b/docs/sources/tempo/release-notes/v2-7.md @@ -0,0 +1,247 @@ +--- +title: Version 2.7 release notes +menuTitle: V2.7 +description: Release notes for Grafana Tempo 2.7 +weight: 25 +--- + +# Version 2.7 release notes + + + + + + +The Tempo team is pleased to announce the release of Tempo 2.7. + +This release gives you: + +* Ability to precisely track ingested traffic and attribute costs based on custom labels +* A series of enhancements that significantly boost Tempo performance and reduce its overall resource footprint. +* New TraceQL capabilities +* Improvements to TraceQL metrics + +Read the [Tempo 2.7 blog post](https://grafana.com/blog/2025/01/16/grafana-tempo-2.7-release-new-traceql-metrics-functions-operational-improvements-and-more/) for more examples and details about these improvements. + +These release notes highlight the most important features and bugfixes. +For a complete list, refer to the [Tempo changelog](https://github.com/grafana/tempo/releases). + +{{< youtube id="0jUEvY-pCdw" >}} + +## Features and enhancements + +The most important features and enhancements in Tempo 2.7 are highlighted below. + +### Track ingested traffic and attribute costs + +This new feature lets tenants precisely measure and attribute costs for their ingested trace data by leveraging custom labels. +This functionality provides a more accurate alternative to existing size-based metrics and meets the growing need for detailed cost attribution and billing transparency. + +Modern organizations are increasingly reliant on distributed traces for observability, yet reconciling the costs associated with different teams, services, or departments can be challenging. +The existing size metric isn't accurate enough by missing non-span data and can lead to under- or over-counting. +The new usage tracking feature overcomes these issues by splitting resource-level data fairly and providing up to 99% accuracy—perfect for cost reconciliation. + +Unlike the previous method, this new feature precisely accounts for every byte of trace data in the distributor—the only Tempo component with the original payload. +A new API endpoint, `/usage_metrics`, exposes the per-tenant metrics on ingested data and cost attribution, and can be controlled with per-tenant configuration. + +This feature is designed as a foundation for broader usage tracking capabilities, where additional trackers will allow organizations to measure and report on a range of usage metrics. ([#4162](https://github.com/grafana/tempo/pull/4162)) + +For additional information, refer to the [Usage metrics documentation](https://grafana.com/docs/tempo//api_docs/#usage-metrics). + +### Major performance and memory usage improvements + +We're excited to announce a series of enhancements that significantly boost Tempo performance and reduce its overall resource footprint. + +**Better refuse large traces:** The ingester now reuses generator code to better detect and reject oversized traces. +This change makes trace ingestion more reliable and prevents capacity overloads. +Plus, two new metrics `tempo_metrics_generator_live_trace_bytes` and `tempo_ingester_live_trace_bytes` provide deeper visibility into per-tenant byte usage. ([#4365](https://github.com/grafana/tempo/pull/4365)) + +**Reduced allocations:** We've refined how the query-frontend handles incoming traffic to eliminate unnecessary allocations when query demand is low. +As part of these improvements, the `querier_forget_delay` configuration option has been removed because it no longer served a practical purpose. ([#3996](https://github.com/grafana/tempo/pull/3996)) +This release also reduces the ingester working set by improving prelloc behavior. +It also adds tunable prealloc env variables `PREALLOC_BKT_SIZE`, `PREALLOC_NUM_BUCKETS`, `PREALLOC_MIN_BUCKET`, and metric `tempo_ingester_prealloc_miss_bytes_total` to observe and tune prealloc behavior. ([#4344](https://github.com/grafana/tempo/pull/4344), [#4369](https://github.com/grafana/tempo/pull/4369)) + +**Faster tag lookups and collector operations:** Multiple optimizations ([#4100](https://github.com/grafana/tempo/pull/4100), [#4104](https://github.com/grafana/tempo/pull/4104), [#4109](https://github.com/grafana/tempo/pull/4109)) make tag lookups and collector tasks more responsive, particularly for distinct value searches. Additionally, enabling disk caching for completed blocks in ingesters significantly cuts down on query latency and lowers overall I/O overhead. ([#4069](https://github.com/grafana/tempo/pull/4069)) + +**Reduce goroutines in non-querier components:** A simplified design across non-querier components lowers the total number of goroutines, making Tempo more efficient and easier to scale—even with high trace volumes. ([#4484](https://github.com/grafana/tempo/pull/4484)) + +### New TraceQL capabilities + +New in Tempo 2.7, TraceQL now allows you to query the [instrumentation scope](https://opentelemetry.io/docs/concepts/instrumentation-scope/) fields ([#3967](https://github.com/grafana/tempo/pull/3967)), letting you filter and explore your traces based on where and how they were instrumented. + +We've extended TraceQL to automatically collect matches from array values ([#3867](https://github.com/grafana/tempo/pull/3867)), making it easier to parse spans containing arrays of attributes. + +Query times are notably faster, thanks to multiple optimizations ([#4114](https://github.com/grafana/tempo/pull/4114), [#4163](https://github.com/grafana/tempo/pull/4163), [#4438](https://github.com/grafana/tempo/pull/4438)). +Whether you're running standard queries or advanced filters, you should see a significant speed boost. + +Tempo now uses the Prometheus "fast regex" engine to accelerate regular expression-based filtering ([#4329](https://github.com/grafana/tempo/pull/4329)). +As part of this update, all regular expressions matches are now fully anchored. +This breaking change means `span.foo =~ "bar"` is evaluated as `span.foo =~ "^bar$"`. +Update any affected queries accordingly. + +### Query improvements + +In API v2 queries, Tempo can now return partial traces even when they exceed the max bytes limit ([#3941](https://github.com/grafana/tempo/pull/3941)). +This ensures you can still retrieve and inspect useful segments of large traces to aid in debugging. +Refer to the [query v2 API endpoint](https://grafana.com/docs/tempo//api_docs/#query-v2) documentation for more information. + +### TraceQL metrics improvements (experimental) + +In TraceQL metrics, we've added a new `avg_over_time` function ([#4073](https://github.com/grafana/tempo/pull/4073)) to help you compute average values for trace-based metrics over specified time ranges, making it simpler to spot trends and anomalies. + +Tempo now supports `min_over_time` ([#3975](https://github.com/grafana/tempo/pull/3975)) and `max_over_time` ([#4065](https://github.com/grafana/tempo/pull/4065)) queries, giving you more flexibility in analyzing the smallest or largest values across your trace data. + +For more information about TraceQL metrics, refer to [TraceQL metrics functions](https://grafana.com/docs/tempo//traceql/metrics-queries/functions/#traceql-metrics-functions). + +### Other enhancements and improvements + +This release also has these notable updates: + +* The [metrics-generator](https://grafana.com/docs/tempo/latest/metrics-generator/) introduces a generous limit of 100 for failed flush attempts ([#4254](https://github.com/grafana/tempo/pull/4254)) to prevent constant retries on corrupted blocks. A new metric also tracks these flush failures, offering better visibility into potential issues during ingestion. +* For [span metrics](https://grafana.com/docs/tempo/latest/metrics-generator/span_metrics/), the span multiplier now also sources its value from the resource attributes ([#4210](https://github.com/grafana/tempo/pull/4210)). This makes it possible to adjust and correct metrics using service or environment configuration, ensuring more accurate data reporting. +* The `tempo-cli` now supports dropping multiple trace IDs in a single command, speeding up administrative tasks and simplifying cleanup operations. ([#4266](https://github.com/grafana/tempo/pull/4266)) +* An optional log, `log_discarded_spans`, tracks spans discarded by Tempo. This improves visibility into data ingestion workflows and helps you quickly diagnose any dropped spans. ([#3957](https://github.com/grafana/tempo/issues/3957)) +* You can now impose limits on tag and tag-value lookups, preventing runaway queries in large-scale deployments and ensuring a smoother user experience. ([#4320](https://github.com/grafana/tempo/pull/4320)) +* The tags and tag-values endpoints have gained new throughput and SLO-related metrics. Get deeper insights into query performance and reliability straight from Tempo's built-in monitoring. ([#4148](https://github.com/grafana/tempo/pull/4148)) +* Tempo now exposes a SemVer version in its `/api/status/buildinfo` endpoint, providing clear visibility into versioning, particularly useful for cloud deployments and automated pipelines. ([#4110](https://github.com/grafana/tempo/pull/4110)) + +## Upgrade considerations + +When [upgrading](https://grafana.com/docs/tempo/latest/setup/upgrade/) to Tempo 2.7, be aware of these considerations and breaking changes. + +### OpenTelemetry Collector receiver listens on `localhost` by default + +After this change, the OpenTelemetry Collector receiver defaults to binding on `localhost` rather than `0.0.0.0`. Tempo installations running in Docker or other container environments must update their listener address to continue receiving data. ([#4465](https://github.com/grafana/tempo/pull/4465)) + +Most Tempo installations use the receivers with the default configuration: + +```yaml +distributor: + receivers: + otlp: + protocols: + grpc: + http: +``` + +This used to work fine since the receivers defaulted to `0.0.0.0:4317` and `0.0.0.0:4318` respectively. With the changes to replace unspecified addresses, the receivers now default to `localhost:4317` and `localhost:4318`. + +As a result, connections to Tempo running in a Docker container won't work anymore. + +To workaround this, you need to specify the address you want to bind to explicitly. For instance, if Tempo is running in a container with hostname `tempo`, this should work: + +```yaml +# ... + http: + endpoint: "tempo:4318" +``` + +You can also explicitly bind to `0.0.0.0` still, but this has potential security risks: + +```yaml +# ... + http: + endpoint: "0.0.0.0:4318" +``` + +### Tempo serverless deprecation + +Tempo serverless is now officially deprecated and will be removed in an upcoming release. +Prepare to migrate any serverless workflows to alternative deployments. ([#4017](https://github.com/grafana/tempo/pull/4017), [documentation](https://grafana.com/docs/tempo/latest/operations/backend_search/#serverless-environment)) + +There are no changes to this release for serverless. However, you'll need to remove these configurations before the next release. + +### Anchored regular expressioon matchers in TraceQL + +TraceQL now uses the Prometheus "fast regex" engine to accelerate regular expression-based filtering ([#4329](https://github.com/grafana/tempo/pull/4329)). +As part of this update, all regular expression matches are now fully anchored. +This breaking change means `span.foo =~ "bar"` is evaluated as `span.foo =~ "^bar$"`. +Update any affected queries accordingly. + +For more information, refer to the [Comparison operators TraceQL](http://localhost:3002/docs/tempo//traceql/#comparison-operators) documentation. + +### Migration from OpenTracing to OpenTelemetry + +The `use_otel_tracer` option is removed. +Configure your spans via standard OpenTelemetry environment variables. +For Jaeger exporting, set `OTEL_TRACES_EXPORTER=jaeger`.For more information, refer to the [OpenTelemetry documentation](https://www.google.com/url?q=https://opentelemetry.io/docs/languages/sdk-configuration/&sa=D&source=docs&ust=1736460391410238&usg=AOvVaw3bykVWwn34XfhrnFK73uM_). ([#3646](https://github.com/grafana/tempo/pull/3646)) + +### Added, updated, removed, or renamed configuration parameters + + + + + + + + + + + + + + + + + + + + + + +
Parameter + Comments +
querier_forget_delay + Removed. The querier_forget_delay setting provided no effective functionality and has been dropped. (#3996) +
use_otel_tracer + Removed. Configure your spans via standard OpenTelemetry environment variables. For Jaeger exporting, set OTEL_TRACES_EXPORTER=jaeger. (#3646) +
+ +max_spans_per_span_set + + Added to query-frontend configuration. The limit is enabled by default and set to 100. Set it to `0` to restore the old behavior (unlimited). Otherwise, spans beyond the configured max are dropped. (#4275) +
use_otel_tracer + The use_otel_tracer option is removed. Configure your spans via standard OpenTelemetry environment variables. For Jaeger exporting, set OTEL_TRACES_EXPORTER=jaeger. (#3646) +
+ +### gRPC compression disabled + +Disable gRPC compression in the querier and distributor for performance reasons. ([#4429](https://github.com/grafana/tempo/pull/4429)) Check the gRPC compression settings if you see network issues. +If you would like to re-enable it, we recommend 'snappy'. +Use the following settings: + + ```yaml + ingester_client: + grpc_client_config: + grpc_compression: "snappy" + metrics_generator_client: + grpc_client_config: + grpc_compression: "snappy" + querier: + frontend_worker: + grpc_client_config: + grpc_compression: "snappy" + ``` + +### Other upgrade considerations + +* The Tempo CLI now targets the `/api/v2/traces` endpoint by default. Use the `--v1` flag if you still rely on the older `/api/traces` endpoint. ([#4127](https://github.com/grafana/tempo/pull/4127)) +* If you already set the `X-Scope-OrgID` header in per-tenant overrides or global Tempo configuration, it's now honored and not overwritten by Tempo. This may change behavior if you previously depended on automatic injection. ([#4021](https://github.com/grafana/tempo/pull/4021)) +* The AWS Lambda build output changes from main to bootstrap. Follow the [AWS migration steps](https://aws.amazon.com/blogs/compute/migrating-aws-lambda-functions-from-the-go1-x-runtime-to-the-custom-runtime-on-amazon-linux-2/) to ensure your Lambda functions continue to work. ([#3852](https://github.com/grafana/tempo/pull/3852)) + +## Bugfixes + +For a complete list, refer to the [Tempo CHANGELOG](https://github.com/grafana/tempo/releases). + +* Add `invalid_utf8` to reasons span metrics will discard spans. ([#4293](https://github.com/grafana/tempo/pull/4293)) We now catch values in your tracing data that can't be used as valid metrics labels. If you want span metrics by foo, but foo has illegal prom changes in it, then they won't be written. +* Metrics-generators: Correctly drop from the ring before stopping ingestion to reduce drops during a rollout. ([#4101](https://github.com/grafana/tempo/pull/4101)) +* Correctly handle 400 Bad Request and 404 Not Found in gRPC streaming. ([#4144](https://github.com/grafana/tempo/pull/4144)) +* Correctly handle Authorization header in gRPC streaming. ([#4419](https://github.com/grafana/tempo/pull/4419)) +* Fix TraceQL metrics time range handling at the cutoff between recent and backend data. ([#4257](https://github.com/grafana/tempo/issues/4257)) +* Fix several issues with exemplar values for TraceQL metrics. ([#4366](https://github.com/grafana/tempo/pull/4366), [#4404](https://github.com/grafana/tempo/pull/4404)) +* Utilize S3Pass and S3User parameters in `tempo-cli` options, which were previously unused in the code. ([#4259](https://github.com/grafana/tempo/pull/4259)) + + + + + \ No newline at end of file diff --git a/docs/sources/tempo/setup/deployment.md b/docs/sources/tempo/setup/deployment.md index 7d7df00a2e5..b0f7275c07c 100644 --- a/docs/sources/tempo/setup/deployment.md +++ b/docs/sources/tempo/setup/deployment.md @@ -19,7 +19,7 @@ which is the monolithic deployment mode. {{< admonition type="note" >}} _Monolithic mode_ was previously called _single binary mode_. Similarly _scalable monolithic mode_ was previously called _scalable single binary mode_. While the documentation has been updated to reflect this change, some URL names and deployment tooling (for example, Helm charts) do not yet reflect this change. -{{% /admonition %}} +{{< /admonition >}} ## Monolithic mode @@ -75,7 +75,7 @@ Tempo can be easily deployed through a number of tools, including Helm, Tanka, K {{< admonition type="note" >}} The Tanka and Helm examples are equivalent. They are both provided for people who prefer different configuration mechanisms. -{{% /admonition %}} +{{< /admonition >}} ### Helm diff --git a/docs/sources/tempo/setup/operator/multitenancy.md b/docs/sources/tempo/setup/operator/multitenancy.md index 57239a27b8f..f412650e1fb 100644 --- a/docs/sources/tempo/setup/operator/multitenancy.md +++ b/docs/sources/tempo/setup/operator/multitenancy.md @@ -4,13 +4,13 @@ description: Enable multi-tenancy menuTitle: Enable multi-tenancy weight: 250 aliases: -- /docs/tempo/operator/multitenancy +- ../../operator/multitenancy # https://grafana.com/docs/tempo/operator/multitenancy/ --- # Enable multi-tenancy Tempo is a multi-tenant distributed tracing backend. It supports multi-tenancy through the use of a header: `X-Scope-OrgID`. -Refer to [multi-tenancy docs]({{< relref "../../operations/multitenancy" >}}) for more details. +Refer to [multi-tenancy docs](https://grafana.com/docs/tempo//operations/manage-advanced-systenms/multitenancy/) for more details. This document outlines how to deploy and use multi-tenant Tempo with the Operator. ## Multi-tenancy without authentication @@ -18,8 +18,8 @@ This document outlines how to deploy and use multi-tenant Tempo with the Operato The following Kubernetes Custom Resource (CR) deploys a multi-tenant Tempo instance. {{< admonition type="note" >}} -Jaeger query is not tenant aware and therefore is not supported in this configuration. -{{% /admonition %}} +Jaeger query isn't tenant-aware and, therefore, isn't supported in this configuration. +{{< /admonition >}} ```yaml apiVersion: tempo.grafana.com/v1alpha1 diff --git a/docs/sources/tempo/setup/tanka.md b/docs/sources/tempo/setup/tanka.md index 826e8306790..80ecf970f56 100644 --- a/docs/sources/tempo/setup/tanka.md +++ b/docs/sources/tempo/setup/tanka.md @@ -21,8 +21,8 @@ To set up Tempo using Kubernetes with Tanka, you need to: 1. Deploy Tempo with the Tanka command {{< admonition type="note" >}} -This configuration is not suitable for a production environment but can provide a useful way to learn about Tempo. -{{% /admonition %}} +This configuration isn't suitable for a production environment but can provide a useful way to learn about Tempo. +{{< /admonition >}} ## Before you begin @@ -62,7 +62,7 @@ Tanka requires the current context for your Kubernetes environment. kubectl config current-context ``` -1. Initialize Tanka. This will use the current Kubernetes context: +1. Initialize Tanka. This uses the current Kubernetes context: ```bash tk init --k8s=false @@ -376,7 +376,7 @@ To change the resources requirements, follow these steps: {{< admonition type="note" >}} Lowering these requirements can impact overall performance. -{{% /admonition %}} +{{< /admonition >}} ## Deploy Tempo using Tanka @@ -393,7 +393,7 @@ If the ingesters don’t start after deploying Tempo with the Tanka command, thi pvc_storage_class: 'standard', }, ``` -{{% /admonition %}} +{{< /admonition >}} ## Next steps diff --git a/docs/sources/tempo/setup/upgrade.md b/docs/sources/tempo/setup/upgrade.md index 3046a3f1f0d..c3e87000cf3 100644 --- a/docs/sources/tempo/setup/upgrade.md +++ b/docs/sources/tempo/setup/upgrade.md @@ -7,8 +7,13 @@ weight: 310 # Upgrade your Tempo installation -You can upgrade an existing Tempo installation to the next version. -However, any new release has the potential to have breaking changes that should be tested in a non-production environment prior to rolling these changes to production. + + + + +You can upgrade a Tempo installation to the next version. +However, any release has the potential to have breaking changes. +We recommend testing in a non-production environment prior to rolling these changes to production. The upgrade process changes for each version, depending upon the changes made for the subsequent release. @@ -18,7 +23,138 @@ For detailed information about any release, refer to the [Release notes](../rele {{< admonition type="tip" >}} You can check your configuration options using the [`status` API endpoint]({{< relref "../api_docs#status" >}}) in your Tempo installation. -{{% /admonition %}} +{{< /admonition >}} + +## Upgrade to Tempo 2.7 + +When [upgrading](https://grafana.com/docs/tempo//setup/upgrade/) to Tempo 2.7, be aware of these considerations and breaking changes. + +### OpenTelemetry Collector receiver listens on `localhost` by default + +After this change, the OpenTelemetry Collector receiver defaults to binding on `localhost` rather than `0.0.0.0`. Tempo installations running in Docker or other container environments must update their listener address to continue receiving data. ([#4465](https://github.com/grafana/tempo/pull/4465)) + +Most Tempo installations use the receivers with the default configuration: + +```yaml +distributor: + receivers: + otlp: + protocols: + grpc: + http: +``` + +This used to work fine since the receivers defaulted to `0.0.0.0:4317` and `0.0.0.0:4318` respectively. With the changes to replace unspecified addresses, the receivers now default to `localhost:4317` and `localhost:4318`. + +As a result, connections to Tempo running in a Docker container won't work anymore. + +To workaround this, you need to specify the address you want to bind to explicitly. For instance, if Tempo is running in a container with hostname `tempo`, this should work: + +```yaml +# ... + http: + endpoint: "tempo:4318" +``` + +You can also explicitly bind to `0.0.0.0` still, but this has potential security risks: + +```yaml +# ... + http: + endpoint: "0.0.0.0:4318" +``` + +### Maximum spans per span set + +A new `max_spans_per_span_set` limit is enabled by default and set to 100. +Set it to `0` to restore the old behavior (unlimited). +Otherwise, spans beyond the configured max are dropped. ([#4275](https://github.com/grafana/tempo/pull/4383)) + +``` +query_frontend: + search: + max_spans_per_span_set: 0 +``` + +### Tempo serverless deprecation + +Tempo serverless is officially deprecated and will be removed in an upcoming release. +Prepare to migrate any serverless workflows to alternative deployments. ([#4017](https://github.com/grafana/tempo/pull/4017), [documentation](https://grafana.com/docs/tempo/latest/operations/backend_search/#serverless-environment)) + +There are no changes to this release for serverless. However, you'll need to remove these configurations before the next release. + +### Anchored regular expressions matchers in TraceQL + +Regex matchers in TraceQL are now fully anchored using Prometheus's fast regexp. +For instance, `span.foo =~ "bar"` is interpreted as `span.foo =~ "^bar$"`. Adjust existing queries accordingly. ([#4329](https://github.com/grafana/tempo/pull/4329)) + +For more information, refer to the [Comparison operators TraceQL](http://localhost:3002/docs/tempo//traceql/#comparison-operators) documentation. + +### Migration from OpenTracing to OpenTelemetry + +The `use_otel_tracer` option is removed. +Configure your spans via standard OpenTelemetry environment variables. +For Jaeger exporting, set `OTEL_TRACES_EXPORTER=jaeger`.For more information, refer to the [OpenTelemetry documentation](https://www.google.com/url?q=https://opentelemetry.io/docs/languages/sdk-configuration/&sa=D&source=docs&ust=1736460391410238&usg=AOvVaw3bykVWwn34XfhrnFK73uM_). ([#3646](https://github.com/grafana/tempo/pull/3646)) + +### gRPC compression disabled + +Disable gRPC compression in the querier and distributor for performance reasons. ([#4429](https://github.com/grafana/tempo/pull/4429)) Check the gRPC compression settings if you see network issues. +If you would like to re-enable it, we recommend 'snappy'. +Use the following settings: + + ``` + ingester_client: + grpc_client_config: + grpc_compression: "snappy" + metrics_generator_client: + grpc_client_config: + grpc_compression: "snappy" + querier: + frontend_worker: + grpc_client_config: + grpc_compression: "snappy" + ``` + +### Added, updated, removed, or renamed configuration parameters + + + + + + + + + + + + + + + + + + + + + + +
Parameter + Comments +
querier_forget_delay + Removed. The querier_forget_delay setting provided no effective functionality and has been dropped. (#3996) +
use_otel_tracer + Removed. Configure your spans via standard OpenTelemetry environment variables. For Jaeger exporting, set OTEL_TRACES_EXPORTER=jaeger. (#3646) +
max_spans_per_span_set + Added to query-frontend configuration. (#4275) +
use_otel_tracer + The use_otel_tracer option is removed. Configure your spans via standard OpenTelemetry environment variables. For Jaeger exporting, set OTEL_TRACES_EXPORTER=jaeger. (#3646) +
+ +### Other upgrade considerations + +* The Tempo CLI now targets the `/api/v2/traces` endpoint by default. Use the `--v1` flag if you still rely on the older `/api/traces` endpoint. ([#4127](https://github.com/grafana/tempo/pull/4127)) +* If you already set the `X-Scope-OrgID` header in per-tenant overrides or global Tempo configuration, it is now honored and not overwritten by Tempo. This may change behavior if you previously depended on automatic injection. ([#4021](https://github.com/grafana/tempo/pull/4021)) +* The AWS Lambda build output changes from main to bootstrap. Follow the [AWS migration steps](https://aws.amazon.com/blogs/compute/migrating-aws-lambda-functions-from-the-go1-x-runtime-to-the-custom-runtime-on-amazon-linux-2/) to ensure your Lambda functions continue to work. ([#3852](https://github.com/grafana/tempo/pull/3852)) ## Upgrade to Tempo 2.6 @@ -28,11 +164,11 @@ Tempo 2.6 has several considerations for any upgrade: * vParquet4 is now the default block format * Updated, removed, or renamed parameters -For a complete list of changes, refer to the [Temopo 2.6 changelog](https://github.com/grafana/tempo/releases/tag/v2.6.0). +For a complete list of changes, refer to the [Tempo 2.6 CHANGELOG](https://github.com/grafana/tempo/releases/tag/v2.6.0). ### Operational change for TraceQL metrics -We've changed to an RF1 (Replication Factor 1) pattern for TraceQL metrics as we were unable to hit performance goals for RF3 de-duplication. This requires some operational changes to query TraceQL metrics. +We've changed to an RF1 (Replication Factor 1) pattern for TraceQL metrics as we were unable to hit performance goals for RF3 deduplication. This requires some operational changes to query TraceQL metrics. TraceQL metrics are still considered experimental, but we hope to mark them GA soon when we productionize a complete RF1 write-read path. [PRs [3628](https://github.com/grafana/tempo/pull/3628), [3691]([https://github.com/grafana/tempo/pull/3691](https://github.com/grafana/tempo/pull/3691)), [3723]([https://github.com/grafana/tempo/pull/3723](https://github.com/grafana/tempo/pull/3723)), [3995]([https://github.com/grafana/tempo/pull/3995](https://github.com/grafana/tempo/pull/3995))] @@ -49,7 +185,7 @@ The local-blocks processor must be enabled to start using metrics queries like ` - local-blocks ``` -* By default, for all tenants in the main config: +* By default, for all tenants in the main configuration: ```yaml overrides: @@ -145,22 +281,22 @@ For information on upgrading, refer to [Upgrade to Tempo 2.6](https://grafana.co -### tempo-query is a standalone server +### `tempo-query` is a standalone server -With Tempo 2.6.1, tempo-query is no longer a Jaeger instance with grpcPlugin. -It’s now a standalone server. +With Tempo 2.6.1, `tempo-query` is no longer a Jaeger instance with `grpcPlugin`. +It's now a standalone server. Serving a gRPC API for Jaeger on 0.0.0.0:7777 by default. [PR 3840] ## Upgrade to Tempo 2.5 Tempo 2.5 has several considerations for any upgrade: -* Docker image runs as new UID +* Docker image runs as a new UID * Support for vParquet format removed * Experimental vParquet4 block format * Removed configuration parameters -For a complete list of changes, enhancements, and bug fixes, refer to the [Tempo 2.5 changelog](https://github.com/grafana/tempo/releases/tag/v2.5.0). +For a complete list of changes, enhancements, and bug fixes, refer to the [Tempo 2.5 CHANGELOG](https://github.com/grafana/tempo/releases/tag/v2.5.0). ### Docker image runs as new UID @@ -171,24 +307,25 @@ The new user `10001` won't have access to the old files created by `root`. The ownership of `/var/tempo` changed from `root:root` to `tempo:tempo` with the UID/GID of `10001`. -The `ingester` and `metrics-generator` statefulsets may need to [run chown](https://opensource.com/article/19/8/linux-chown-command) to change ownership to start properly. +The `ingester` and `metrics-generator` statefulsets may need to [run `chown`](https://opensource.com/article/19/8/linux-chown-command) to change ownership to start properly. Refer to [PR 2265](https://github.com/grafana/tempo/pull/2265) to see a Jsonnet example of an `init` container. -This change doesn’t impact you if you used the Helm chart with the default security context set in the chart. +This change doesn't impact you if you used the Helm chart with the default security context set in the chart. All data should be owned by the `tempo` user already. -The UID won’t impact Helm chart users. +The UID won't impact Helm chart users. + ### Support for vParquet format removed The original vParquet format [has been removed](https://github.com/grafana/tempo/pull/3663) from Tempo 2.5. -Direct upgrades from Tempo 2.1 to Tempo 2.5 are not possible. +Direct upgrades from Tempo 2.1 to Tempo 2.5 aren't possible. You will need to upgrade to an intermediate version and wait for the old vParquet blocks to fall out of retention before upgrading to 2.5. [PR 3663](https://github.com/grafana/tempo/pull/3663)] -vParquet(1) won't be recognized as a valid encoding and any remaining vParquet(1) blocks will not be readable. +vParquet(1) won't be recognized as a valid encoding and any remaining vParquet(1) blocks won't be readable. Installations running with historical defaults should not require any changes as the default has been migrated for several releases. -Installations with storage settings pinned to vParquet must run a previous release configured for vParquet2 or higher until all existing vParquet(1) blocks have expired and been deleted from the backend, or else will encounter read errors after upgrading to this release. +Installations with storage settings pinned to vParquet must run a previous release configured for vParquet2 or higher until all existing vParquet(1) blocks have expired and been deleted from the backend, or else you'll encounter read errors after upgrading to this release. ### Experimental vParquet4 block format @@ -199,6 +336,7 @@ If you choose to use vParquet4 and then opt to revert to vParquet3, any vParquet To try vParquet4, refer to [Choose a block format](https://grafana.com/docs/tempo/latest/configuration/parquet/#choose-a-different-block-format). + ### Removed configuration parameters @@ -221,17 +359,17 @@ To try vParquet4, refer to [Choose a block format](https://grafana.com/docs/temp ### Additional considerations * Updating to OTLP 1.3.0 removes the deprecated `InstrumentationLibrary` and `InstrumentationLibrarySpan` from the OTLP receivers. [PR 3649](https://github.com/grafana/tempo/pull/3649)] -* Removes the addition of a tenant in multitenant trace id lookup. [PR 3522](https://github.com/grafana/tempo/pull/3522)] +* Removes the addition of a tenant in multi-tenant trace id lookup. [PR 3522](https://github.com/grafana/tempo/pull/3522)] ## Upgrade to Tempo 2.4 Tempo 2.4 has several considerations for any upgrade: - + * vParquet3 is now the default backend * Caches configuration was refactored * Updated, removed, and renamed configuration parameters -For a complete list of changes, enhancements, and bug fixes, refer to the [Tempo 2.4 changelog](https://github.com/grafana/tempo/releases). +For a complete list of changes, enhancements, and bug fixes, refer to the [Tempo 2.4 CHANGELOG](https://github.com/grafana/tempo/releases). ### Transition to vParquet3 as default block format @@ -240,14 +378,15 @@ vParquet3 format is now the default block format. It is production ready and we Upgrading to Tempo 2.4 modifies the Parquet block format. Although you can use Tempo 2.3 with vParquet2 or vParquet3, you can only use Tempo 2.4 with vParquet3. With this release, the first version of our Parquet backend, vParquet, is being deprecated. -Tempo 2.4 will still read vParquet1 blocks. +Tempo 2.4 still reads vParquet1 blocks. However, Tempo will exit with error if they are manually configured. [[PR 3377](https://github.com/grafana/tempo/pull/3377/files#top)] For information on changing the vParquet version, refer to [Choose a different block format](https://grafana.com/docs/tempo/next/configuration/parquet#choose-a-different-block-format). + ### Cache configuration refactored -The major cache refactor to allow multiple role-based caches to be configured. [[PR 3166](https://github.com/grafana/tempo/pull/3166)] +The major cache refactor lets you configure multiple role-based caches. [[PR 3166](https://github.com/grafana/tempo/pull/3166)] This change resulted in several fields being deprecated (refer to the old configuration). These fields have all been migrated to a top level `cache:` field. @@ -319,7 +458,7 @@ Tempo 2.3 has several considerations for any upgrade: * New `defaults` block in Overrides module configuration * Several configuration parameters have been renamed or removed. -For a complete list of changes, enhancements, and bug fixes, refer to the [Tempo 2.3 changelog](https://github.com/grafana/tempo/releases). +For a complete list of changes, enhancements, and bug fixes, refer to the [Tempo 2.3 CHANGELOG](https://github.com/grafana/tempo/releases). ### Production-ready vParquet3 block format @@ -330,11 +469,11 @@ This block format is required for using dedicated attribute columns. While vParquet2 remains the default backend for Tempo 2.3, vParquet3 is available as a stable option. Both work with Tempo 2.3. -Upgrading to Tempo 2.3 doesn’t modify the Parquet block format. +Upgrading to Tempo 2.3 doesn't modify the Parquet block format. {{< admonition type="note" >}} -Tempo 2.2 can’t read data stored in vParquet3. -{{% /admonition %}} +Tempo 2.2 can't read data stored in vParquet3. +{{< /admonition >}} Recommended update process: @@ -421,11 +560,11 @@ Tempo 2.2 has several considerations for any upgrade: * vParquet2 is now the default block format * Several configuration parameters have been renamed or removed. -For a complete list of changes, enhancements, and bug fixes, refer to the [Tempo 2.2 changelog](https://github.com/grafana/tempo/releases). +For a complete list of changes, enhancements, and bug fixes, refer to the [Tempo 2.2 CHANGELOG](https://github.com/grafana/tempo/releases). ### Default block format changed to vParquet2 -While not a breaking change, upgrading to Tempo 2.2 by default changes Tempo’s block format to vParquet2. +While not a breaking change, upgrading to Tempo 2.2 by default changes Tempo's block format to vParquet2. To stay on a previous block format, read the [Parquet configuration documentation]({{< relref "../configuration/parquet#choose-a-different-block-format" >}}). We strongly encourage upgrading to vParquet2 as soon as possible as this is required for using structural operators in your TraceQL queries and provides query performance improvements, in particular on queries using the `duration` intrinsic. @@ -436,11 +575,11 @@ Tempo 2.2 updates the `microservices` JSonnet to support a `statefulset` for the {{< admonition type="note" >}} This update is important if you use the experimental `local-blocks` processor. -{{% /admonition %}} +{{< /admonition >}} To support a new `processor`, the metrics-generator has been converted from a `deployment` into a `statefulset` with a PVC. This requires manual intervention to migrate successfully and avoid downtime. -Note that currently both a `deployment` and a `statefulset` will be managed by the JSonnet for a period of time, after which we will delete the deployment from this repo and you will need to delete user-side references to the `tempo_metrics_generator_deployment`, as well as delete the deployment itself. +Note that currently both a `deployment` and a `statefulset` will be managed by the JSonnet for a period of time, after which we will delete the deployment from this repository and you will need to delete user-side references to the `tempo_metrics_generator_deployment`, as well as delete the deployment itself. Refer to the PR for seamless migration instructions. [PRs [2533](https://github.com/grafana/tempo/pull/2533), [2467](https://github.com/grafana/tempo/pull/2467)] @@ -481,9 +620,9 @@ tempo_ingester_trace_search_bytes_discarded_total ``` ### Upgrade path to maintain search from Tempo 1.x to 2.1 - + Removing support for search on v2 blocks means that if you upgrade directly from 1.9 to 2.1, you will not be able to search your v2 blocks. To avoid this, upgrade to 2.0 first, since 2.0 supports searching both v2 and vParquet blocks. You can let your old v2 blocks gradually age out while Tempo creates new vParquet blocks from incoming traces. Once all of your v2 blocks have been deleted and you only have vParquet format-blocks, you can upgrade to Tempo 2.1. All of your blocks will be searchable. - + Parquet files are no longer cached when carrying out searches. ### Breaking changes to metric names exposed by Tempo @@ -496,27 +635,28 @@ The `query_frontend_result_metrics_inspected_bytes` metric was removed in favor ## Upgrade from Tempo 1.5 to 2.0 -Tempo 2.0 marks a major milestone in Tempo’s development. When planning your upgrade, consider these factors: +Tempo 2.0 marks a major milestone in Tempo development. When planning your upgrade, consider these factors: - Breaking changes: - Renamed, removed, and moved configurations are described in section below. - The `TempoRequestErrors` alert was removed from mixin. Any Jsonnet users relying on this alert should copy this into their own environment. - Advisory: - - Changed defaults – Are these updates relevant for your installation? + - Changed defaults. Are these updates relevant for your installation? - TraceQL editor needs to be enabled in Grafana to use the query editor. - Resource requirements have changed for Tempo 2.0 with the default configuration. Once you upgrade to Tempo 2.0, there is no path to downgrade. {{< admonition type="note" >}} -There is a potential issue loading Tempo 1.5's experimental Parquet storage blocks. You may see errors or even panics in the compactors. We have only been able to reproduce this with interim commits between 1.5 and 2.0, but if you experience any issues please [report them](https://github.com/grafana/tempo/issues/new?assignees=&labels=&template=bug_report.md&title=) so we can isolate and fix this issue. -{{% /admonition %}} +There is a potential issue loading experimental Parquet storage blocks. You may see errors or even panics in the compactors. We have only been able to reproduce this with interim commits between 1.5 and 2.0, but if you experience any issues, [report them](https://github.com/grafana/tempo/issues/new?assignees=&labels=&template=bug_report.md&title=). +{{< /admonition >}} ### Check Tempo installation resource allocation -Parquet provides faster search and is required to enable TraceQL. However, the Tempo installation will require additional CPU and memory resources to use Parquet efficiently. Parquet is more costly due to the extra work of building the columnar blocks, and operators should expect at least 1.5x increase in required resources to run a Tempo 2.0 cluster. Most users will find these extra resources are negligible compared to the benefits that come from the additional features of TraceQL and from storing traces in an open format. +Parquet provides faster search and is required to enable TraceQL. However, the Tempo installation requires additional CPU and memory resources to use Parquet efficiently. Parquet is more costly due to the extra work of building the columnar blocks, and operators should expect at least 1.5x increase in required resources to run a Tempo 2.0 cluster. Most users find these extra resources are negligible compared to the benefits that come from the additional features of TraceQL and from storing traces in an open format. -You can can continue using the previous `v2` block format using the instructions provided in the [Parquet configuration documentation]({{< relref "../configuration/parquet" >}}). Tempo will continue to support trace by id lookup on the `v2` format for the foreseeable future. +You can continue using the previous `v2` block format using the instructions provided in the [Parquet configuration documentation]({{< relref "../configuration/parquet" >}}). +Tempo continues to support trace by id lookup on the `v2` format for the foreseeable future. ### Enable TraceQL in Grafana @@ -569,3 +709,6 @@ storage: storage_account_key: container_name: ``` + + + diff --git a/docs/sources/tempo/shared/best-practices-traces.md b/docs/sources/tempo/shared/best-practices-traces.md index 9a2a790c59f..801602d2e8f 100644 --- a/docs/sources/tempo/shared/best-practices-traces.md +++ b/docs/sources/tempo/shared/best-practices-traces.md @@ -9,9 +9,9 @@ labels: [//]: # 'This file documents the best practices for tracing for Tempo.' [//]: # 'This shared file is included in these locations:' -[//]: # '/grafana/docs/sources/datasources/tempo/best-practices.md' -[//]: # '/website/docs/grafana-cloud/data-configuration/traces/best-practices.md' -[//]: # '/tempo/docs/sources/tempo/operations/best-practices.md' +[//]: # '/grafana/docs/sources/datasources/tempo/tracing-best-practices.md' +[//]: # '/website/docs/grafana-cloud/send-data/traces/tracing-best-practices.md' +[//]: # '/tempo/docs/sources/tempo/getting-started/best-practices.md' [//]: # [//]: # 'If you make changes to this file, verify that the meaning and content are not changed in any place where the file is included.' [//]: # 'Any links should be fully qualified and not relative: /docs/grafana/ instead of ../grafana/.' @@ -94,6 +94,6 @@ You can consider breaking up the spans in several ways: - For long-running operations, you could create a new span for every predetermined interval of execution time. {{< admonition type="note" >}} This requires time-based tracking in your application's code and is more complex to implement. - {{% /admonition %}} + {{< /admonition >}} - Use span linking - Should data flow hit bottlenecks where further operations on that data might be batched at a later time, the use of [span links](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/overview.md#links-between-spans) can help keep traces constrained to an acceptable time range, while sharing context with other traces that work on the same data. This can also improve the readability of traces. diff --git a/docs/sources/tempo/shared/tempo-in-grafana.md b/docs/sources/tempo/shared/tempo-in-grafana.md new file mode 100644 index 00000000000..965c2cf15f6 --- /dev/null +++ b/docs/sources/tempo/shared/tempo-in-grafana.md @@ -0,0 +1,164 @@ + +--- +headless: true +description: Shared file for Tempo on Grafana. +labels: + products: + - enterprise + - oss +--- + +[//]: # 'This file describes how you can use tracing data in Grafana.' +[//]: # 'This shared file is included in these locations:' +[//]: # '/grafana/docs/sources/datasources/tempo/getting-started/tempo-in-grafana.md' +[//]: # '/website/docs/grafana-cloud/send-data/traces/use-traces-with-grafana.md' +[//]: # +[//]: # 'If you make changes to this file, verify that the meaning and content are not changed in any place where the file is included.' +[//]: # 'Any links should be fully qualified and not relative.' + + +## Query your data + +Using tracing data in Grafana and Grafana Cloud Traces, you can search for traces, generate metrics from spans, and link your tracing data with logs, metrics, and profiles. + +### Use Explore Traces to investigate tracing data + +{{< docs/public-preview product="Explore Traces" >}} + +[Explore Traces](https://grafana.com/docs/grafana-cloud/visualizations/simplified-exploration/traces/) helps you visualize insights from your Tempo traces data. +Using the app, you can: + +- Use *Rate*, *Errors*, and *Duration* (RED) metrics derived from traces to investigate issues +- Uncover related issues and monitor changes over time +- Browse automatic visualizations of your data based on its characteristics +- Do all of this without writing TraceQL queries + +Expand your observability journey and learn about [the Explore apps suite](https://grafana.com/docs/grafana-cloud/visualizations/simplified-exploration/). + +{{< youtube id="a3uB1C2oHA4" >}} + +### Search for traces + +Search for traces using common dimensions such as time range, duration, span tags, service names, etc. +Use the Explore trace view to quickly diagnose errors and high latency events in your system. + +![Sample search visualization](/static/img/docs/grafana-cloud/trace_search.png) + +#### Use trace search results as panels in dashboards + +You can embed tracing panels and visualizations in dashboards. +You can also save queries as panels. +For more information, refer to the [Traces Visualization](https://grafana.com/docs/grafana-cloud/visualizations/panels-visualizations/visualizations/traces/#add-traceql-with-table-visualizations) documentation. + +For example dashboards, visit [`play.grafana.org`](https://play.grafana.org). + +- [Traces and basic operations](https://play.grafana.org/d/fab5705a-e213-4527-8c23-92cb7452e746/traces-and-basic-operations-on-them?orgId=1) +- [Grafana Explore with a Tempo data source](https://play.grafana.org/explore?schemaVersion=1&panes=%7B%22cf2%22:%7B%22datasource%22:%22grafanacloud-traces%22,%22queries%22:%5B%7B%22refId%22:%22A%22,%22datasource%22:%7B%22type%22:%22tempo%22,%22uid%22:%22grafanacloud-traces%22%7D,%22queryType%22:%22traceqlSearch%22,%22limit%22:20,%22tableType%22:%22traces%22,%22filters%22:%5B%7B%22id%22:%22ab3bc4be%22,%22operator%22:%22%3D%22,%22scope%22:%22span%22%7D%5D%7D%5D,%22range%22:%7B%22from%22:%22now-1h%22,%22to%22:%22now%22%7D%7D%7D&orgId=1) + +### Use TraceQL to query data and generate metrics + +Inspired by PromQL and LogQL, TraceQL is a query language designed for selecting traces. + +Using Grafana **Explore**, you can search traces. +The default traces search reviews the whole trace. +TraceQL provides a method for formulating precise queries so you can zoom in to the data you need. +Query results return faster because the queries limit what is searched. + +You can construct queries using the [TraceQL query editor](https://grafana.com/docs/grafana//datasources/tempo/query-editor/traceql-editor/) or use the [Search query type](https://grafana.com/docs/grafana//datasources/tempo/query-editor/traceql-search/). + +For details about constructing queries, refer to the [TraceQL documentation](https://grafana.com/docs/tempo//traceql/). + +#### TraceQL metrics queries + +{{< docs/experimental product="TraceQL metrics" >}} + +TraceQL language provides metrics queries as an experimental feature. +Metric queries extend trace queries by applying an aggregate function to trace query results. For example: `{ span:name = "foo" } | rate() by (span:status)` +This powerful feature creates metrics from traces, much in the same way that LogQL metric queries create metrics from logs. + +[Explore Traces](https://grafana.com/docs/grafana-cloud/visualizations/simplified-exploration/traces/) is powered by metrics queries. + +For more information about available queries, refer to [TraceQL metrics queries](https://grafana.com/docs/tempo//traceql/metrics-queries). + +### Generate metrics from spans + +RED metrics can drive service graphs and other ready-to-go visualizations of your span data. +RED metrics represent: + +- _Rate_, the number of requests per second +- _Errors_, the number of those requests that are failing +- _Duration_, the amount of time those requests take + +For more information about RED method, refer to [The RED Method: how to instrument your services](/blog/2018/08/02/the-red-method-how-to-instrument-your-services/). + +To enable metrics-generator in Grafana Cloud, refer to [Enable metrics-generator](https://grafana.com/docs/grafana-cloud/send-data/traces/metrics-generator/). + +To enable metrics-generator for Tempo, refer to [Configure metrics-generator](https://grafana.com/docs/tempo//configuration/#metrics-generator). + +![Service graph view](/static/img/docs/grafana-cloud/trace_service_graph.png) + +These metrics exist in your Hosted Metrics instance and can also be easily used to generate powerful custom dashboards. + +![Custom Metrics Dashboard](/static/img/docs/grafana-cloud/trace_custom_metrics_dash.png) + +Metrics automatically generate exemplars as well which allows easy metrics to trace linking. + +![Trace Exemplars](/static/img/docs/grafana-cloud/trace_exemplars.png) + +#### Service graph view + +Service graph view displays a table of request rate, error rate, and duration metrics (RED) calculated from your incoming spans. +It also includes a node graph view built from your spans. + +To use the service graph view, you need to enable [service graphs](https://grafana.com/docs/tempo//metrics-generator/service_graphs/) and [span metrics](https://grafana.com/docs/tempo//metrics-generator/span_metrics/). +After it's enabled, this pre-configured view is immediately available in **Explore > Service Graphs**. + +Refer to the [service graph view documentation](https://docs/tempo//metrics-generator/service-graph-view) for further information. + +![Service graph view overview](/static/img/docs/grafana-cloud/apm-overview.png) + +## Integrate other telemetry signals + +### Link traces and logs + +If you're already doing request/response logging with trace IDs, they can be easily extracted from logs to jump directly to your traces. + +![Logs to Traces visualization](/static/img/docs/grafana-cloud/trace_sample.png) + +In the other direction, you can configure Grafana and Grafana Cloud to create a link from an individual span to your Loki logs. +If you see a long-running span or a span with errors, you +can immediately jump to the logs of the process causing the error. + +![Traces to Logs visualization](/static/img/docs/grafana-cloud/trace_to_logs.png) + +{{< admonition type="note" >}} +Cloud Traces only supports custom tags added by Grafana Support. +Cloud Traces supports these default tags: `cluster`, `hostname`, `namespace`, and `pod`. +Contact Support to add a custom tag. +{{< /admonition >}} + +### Link traces and metrics + +Grafana can correlate different signals by adding the functionality to link between traces and metrics. [Trace to metrics](/blog/2022/08/18/new-in-grafana-9.1-trace-to-metrics-allows-users-to-navigate-from-a-trace-span-to-a-selected-data-source/) lets you navigate from a trace span to a selected data source. +Using trace to metrics, you can quickly see trends or aggregated data related to each span. + +For example, you can use span attributes to metric labels by using the `$__tags` keyword to convert span attributes to metrics labels. + +To set up Trace to metrics for your data source, refer to [Trace to metric configuration](/docs/grafana-cloud/connect-externally-hosted/data-sources/tempo/configure-tempo-data-source/#trace-to-metrics). + +{{< youtube id="xOolCpm2F8c" >}} + +### Link traces and profiles + +Using Trace to profiles, you can correlate different signals by adding the functionality to link between traces and profiles. + +Trace to profiles lets you link your [Grafana Pyroscope](https://grafana.com/docs/pyroscope//) data source to tracing data in Grafana or Grafana Cloud. +When configured, this connection lets you run queries from a trace span into the profile data. + +![Selecting a link in the span queries the profile data source](/static/img/docs/tempo/profiles/tempo-profiles-Span-link-profile-data-source.png) + +For more information, refer to the [Traces to profiles documentation](https://grafana.com/docs/grafana//datasources/tempo/configure-tempo-data-source#trace-to-profiles) and the [Grafana Pyroscope data source documentation](https://docs/grafana//datasources/grafana-pyroscope/). + +For Cloud Traces, Refer to the [Traces to profiles documentation](https://grafana.com/docs/grafana-cloud/connect-externally-hosted/data-sources/tempo/configure-tempo-data-source#trace-to-profiles) for configuration instructions. + +{{< youtube id="AG8VzfFMLxo" >}} \ No newline at end of file diff --git a/docs/sources/tempo/traceql/_index.md b/docs/sources/tempo/traceql/_index.md index 52a07a7c66d..390c28e2baf 100644 --- a/docs/sources/tempo/traceql/_index.md +++ b/docs/sources/tempo/traceql/_index.md @@ -63,12 +63,127 @@ In this example, the search reduces traces to those spans where: * `http.status_code` is in the range of `200` to `299` and * the number of matching spans within a trace is greater than two. -Queries select sets of spans and filter them through a pipeline of aggregators and conditions. If, for a given trace, this pipeline produces a spanset then it is included in the results of the query. +Queries select sets of spans and filter them through a pipeline of aggregators and conditions. +If, for a given trace, this pipeline produces a spanset then it's included in the results of the query. + +Refer to [TraceQL metrics queries](https://grafana.com/docs/tempo//traceql/metrics-queries/) for examples of TraceQL metrics queries. + +### Find traces of a specific operation + +Let's say that you want to find traces of a specific operation, then both the operation name (the span attribute `name`) and the name of the service that holds this operation (the resource attribute `service.name`) should be specified for proper filtering. +In the example below, traces are filtered on the `resource.service.name` value `frontend` and the span `name` value `POST /api/order`: + +``` +{resource.service.name = "frontend" && name = "POST /api/orders"} +``` + +When using the same Grafana stack for multiple environments (for example, `production` and `staging`) or having services that share the same name but are differentiated though their namespace, the query looks like: + +``` +{ + resource.service.namespace = "ecommerce" && + resource.service.name = "frontend" && + resource.deployment.environment = "production" && + name = "POST /api/orders" +} +``` + +### Find traces having a particular outcome + +This example finds all traces on the operation `POST /api/orders` that have a span that has errored: + +``` +{ + resource.service.name="frontend" && + name = "POST /api/orders" && + status = error +} +``` + +This example finds all traces on the operation `POST /api/orders` that return with an HTTP 5xx error: + +``` +{ + resource.service.name="frontend" && + name = "POST /api/orders" && + span.http.status_code >= 500 +} +``` + +### Find traces that have a particular behavior + +You can use query filtering on multiple spans of the traces. +This example locates all the traces of the `GET /api/products/{id}` operation that access a database. It's a convenient request to identify abnormal access ratios to the database caused by caching problems. + +``` +{span.service.name="frontend" && name = "GET /api/products/{id}"} && {.db.system="postgresql"} +``` + +### Find traces going through `production` and `staging` instances + +This example finds traces that go through `production` and `staging` instances. +It's a convenient request to identify misconfigurations and leaks across production and non-production environments. + +``` +{ resource.deployment.environment = "production" } && { resource.deployment.environment = "staging" } +``` + +### Use structural operators + +Find traces that include the `frontend` service, where either that service or a downstream service includes a span where an error is set. + +``` +{ resource.service.name="frontend" } >> { status = error } +``` + +Find all leaf spans that end in the `productcatalogservice`. + +``` +{ } !< { resource.service.name = "productcatalogservice" } +``` + +Find if `productcatalogservice` and `frontend` are siblings. + +``` +{ resource.service.name = "productcatalogservice" } ~ { resource.service.name="frontend" } +``` + +### Other examples + +Find the services where the http status is 200, and list the service name the span belongs to along with returned traces. + +``` +{ span.http.status_code = 200 } | select(resource.service.name) +``` + +Find any trace with an unscoped `deployment.environment` attribute set to `production` and `http.status_code` attribute set to `200`: + +``` +{ .deployment.environment = "production" && span.http.status_code = 200 } +``` + +Find any trace where spans within it have a `deployment.environment` resource attribute set to `production` and a span `http.status_code` attribute set to `200`. In previous examples, all conditions had to be true on one span. These conditions can be true on either different spans or the same spans. + +``` +{ resource.deployment.environment = "production" } && { span.http.status_code = 200 } +``` + +Find any trace where any span has an `http.method` attribute set to `GET` as well as a `status` attribute set to `ok`, and where any other span has an `http.method` attribute set to `DELETE`, but doesn't have a `status` attribute set to `ok`: + +``` +{ span.http.method = "GET" && status = ok } && { span.http.method = "DELETE" && status != ok } +``` + +Find any trace with a `deployment.environment` attribute that matches the regex `prod-.*` and `http.status_code` attribute set to `200`: + +``` +{ resource.deployment.environment =~ "prod-.*" && span.http.status_code = 200 } +``` ## Selecting spans -In TraceQL, curly brackets `{}` always select a set of spans from the current trace. -They are commonly paired with a condition to reduce the spans being passed in. +In TraceQL, curly brackets `{}` always select a set of spans from available traces. +Curly brackets are commonly paired with a condition to reduce the spans fetched. TraceQL differentiates between two types of span data: intrinsics, which are fundamental to spans, and attributes, which are customizable key-value pairs. You can use intrinsics and attributes to build filters and select spans. @@ -92,6 +207,8 @@ Intrinsics example: Custom attributes are prefixed with `.`, such as `span.`, `resource.` , `link.`, or `event`. Resource has no intrinsic values. It only has custom attributes. + +Attributes are separated by a period (`.`), and intrinsic fields use a colon (`:`). The `trace` scope is only an intrinsic and doesn't have any custom attributes at the trace level. Attributes example: @@ -117,16 +234,13 @@ The following table shows the current available scoped intrinsic fields: | `trace:duration` | duration | max(end) - min(start) time of the spans in the trace | `{ trace:duration > 100ms }` | | `trace:rootName` | string | if it exists, the name of the root span in the trace | `{ trace:rootName = "HTTP GET" }` | | `trace:rootService` | string | if it exists, the service name of the root span in the trace | `{ trace:rootService = "gateway" }` | -| `trace:id` | string | trace id using hex string | `{ trace:id = "1234567890abcde" }` | +| `trace:id` | string | trace ID using hex string | `{ trace:id = "1234567890abcde" }` | | `event:name` | string | name of event | `{ event:name = "exception" }` | | `event:timeSinceStart` | duration | time of event in relation to the span start time | `{ event:timeSinceStart > 2ms}` | -| `link:spanID` | string | link span id using hex string | `{ link:spanID = "0000000000000001" }` | -| `link:traceID` | string | link trace id using hex string | `{ link:traceID = "1234567890abcde" }` | - - The trace-level intrinsics, `trace:duration`, `trace:rootName`, and `trace:rootService`, are the same for all spans in the same trace. Additionally, these intrinsics are significantly more performant because they have to inspect much less data then a span-level intrinsic. @@ -144,9 +258,7 @@ This example searches all Kubernetes clusters called `service-name` that have a ### Attribute fields -TraceQL has four different attribute scopes: span attributes, resource attributes, event attributes, and link attributes. - +TraceQL supports these different attribute scopes: span attributes, resource attributes, event attributes, link attributes, and instrumentation scope attributes. By expanding a span in the Grafana UI, you can see both its span attributes (1 in the screenshot) and resource attributes (2 in the screenshot). @@ -197,17 +309,27 @@ You can search for an attribute in your link: ``` { link.opentracing.ref_type = "child_of" } ``` - + +Find the libraries producing instrumentation for a given service: +``` +{ resource.service.name = "foo" } | rate() by (instrumentation:name) +``` + +The [Tempo 2.7 release video](https://www.youtube.com/watch?v=0jUEvY-pCdw) demos and explains the `instrumentation` scope, starting at 30 seconds. ### Unscoped attribute fields Attributes can be unscoped if you are unsure if the requested attribute exists on the span or resource. -When possible, use scoped instead of unscoped attributes. Scoped attributes provide faster query results. +When possible, use scoped instead of unscoped attributes. +Scoped attributes provide faster query results. For example, to find traces with an attribute of `sla` set to `critical`: ``` @@ -237,7 +359,7 @@ You can use quoted attributes syntax with non-quoted attribute syntax, the follo {{< admonition type="note" >}} Currently, only the `\"` and `\\` escape sequences are supported. -{{% /admonition %}} +{{< /admonition >}} ### Comparison operators @@ -254,8 +376,14 @@ The implemented comparison operators are: - `=~` (regular expression) - `!~` (negated regular expression) -TraceQL uses Golang regular expressions. Online regular expression testing sites like https://regex101.com/ are convenient to validate regular expressions used in TraceQL queries. All regular expressions -are treated as fully anchored. For example, `span.foo =~ "bar"` is evaluated as `span.foo =~ "^bar$"`. +TraceQL uses Golang regular expressions. +Online regular expression testing sites like https://regex101.com/ are convenient to validate regular expressions used in TraceQL queries. +All regular expressions are treated as fully anchored. +Regular expressions are anchored at both ends. This anchoring makes the queries faster and matches the behavior of PromQL, where regular expressions are also fully anchored. + +An unanchored query, such as: { span.foo =~ "bar" } is now treated as: { span.foo =~ "^bar$" }. + +If you use TraceQL with regular expressions in your Grafana dashboards and you want the unanchored behavior, update the queries to use the unanchored version, such as { span.foo =~ ".*bar.*"}. For example, to find all traces where an `http.status_code` attribute in a span are greater than `400` but less than equal to `500`: @@ -282,27 +410,29 @@ Find all traces where `any_attribute` is not `nil` or where `any_attribute` exis ### Field expressions -Fields can also be combined in various ways to allow more flexible search criteria. A field expression is a composite of multiple fields that define all of the criteria that must be matched to return results. +Fields can also be combined in various ways to allow more flexible search criteria. +A field expression is a composite of multiple fields that define all of the criteria that must be matched to return results. #### Examples -Find traces with "success" `http.status_code` codes: +Find traces with `success` `http.status_code` codes: ``` { span.http.status_code >= 200 && span.http.status_code < 300 } ``` -Find traces where a `DELETE` HTTP method was used and the intrinsic span status was not OK: +Find traces where a `DELETE` HTTP method was used and the intrinsic span status wasn't OK: ``` { span.http.method = "DELETE" && status != ok } ``` -Both expressions require all conditions to be true on the same span. The entire expression inside of a pair of `{}` must be evaluated as true on a single span for it to be included in the result set. +Both expressions require all conditions to be true on the same span. +The entire expression inside of a pair of `{}` must be evaluated as true on a single span for it to be included in the result set. In the above example, if a span includes an `.http.method` attribute set to `DELETE` where the span also includes a `status` attribute set to `ok`, the trace would not be included in the returned results. -## Combining spansets +## Combine spansets Spanset operators let you select different sets of spans from a trace and then make a determination between them. @@ -325,7 +455,7 @@ Note the difference between the previous example and this one: { resource.cloud.region = "us-east-1" && resource.cloud.region = "us-west-1" } ``` -The second expression returns no traces because it's impossible for a single span to have a `resource.cloud.region` attribute that is set to both region values at the same time. +The second expression returns no traces because it's impossible for a single span to have a `resource.cloud.region` attribute that's set to both region values at the same time. ### Structural @@ -366,7 +496,6 @@ For example, to get a failing endpoint AND all descendant failing spans in one q These spanset operators look at the structure of a trace and the relationship between the spans. These operators are marked experimental because sometimes return false positives. However, the operators can be very useful (see examples below). -We encourage users to try them and give feedback. - `{condA} !>> {condB}` - The not-descendant operator (`!>>`) looks for spans matching `{condB}` that are not descendant spans of a parent matching `{condA}` - `{condA} !<< {condB}` - The not-ancestor operator (`!<<`) looks for spans matching `{condB}` that are not ancestor spans of a child matching `{condA}` @@ -426,7 +555,8 @@ To find spans where the total of a made-up attribute `bytesProcessed` was more t ## Grouping -TraceQL supports a grouping pipeline operator that can be used to group by arbitrary attributes. This can be useful to +TraceQL supports a grouping pipeline operator that can be used to group by arbitrary attributes. +This can be useful to find something like a single service with more than 1 error: ``` @@ -435,7 +565,6 @@ find something like a single service with more than 1 error: {{< youtube id="fraepWra00Y" >}} - ## Arithmetic TraceQL supports arbitrary arithmetic in your queries. This can be useful to make queries more human readable: @@ -455,91 +584,3 @@ TraceQL can select arbitrary fields from spans. This is particularly performant ## Experimental TraceQL metrics TraceQL metrics are experimental, but easy to get started with. Refer to [the TraceQL metrics]({{< relref "../operations/traceql-metrics.md" >}}) documentation for more information. - -## Examples - -### Find traces of a specific operation - -Let's say that you want to find traces of a specific operation, then both the operation name (the span attribute `name`) and the name of the service that holds this operation (the resource attribute `service.name`) should be specified for proper filtering. -In the example below, traces are filtered on the `resource.service.name` value `frontend` and the span `name` value `POST /api/order`: - -``` -{resource.service.name = "frontend" && name = "POST /api/orders"} -``` - -When using the same Grafana stack for multiple environments (for example, `production` and `staging`) or having services that share the same name but are differentiated though their namespace, the query looks like: - -``` -{ - resource.service.namespace = "ecommerce" && - resource.service.name = "frontend" && - resource.deployment.environment = "production" && - name = "POST /api/orders" -} -``` - -### Find traces having a particular outcome - -This example finds all traces on the operation `POST /api/orders` that have an erroneous root span: - -``` -{ - resource.service.name="frontend" && - name = "POST /api/orders" && - status = error -} -``` - -This example finds all traces on the operation `POST /api/orders` that return with an HTTP 5xx error: - -``` -{ - resource.service.name="frontend" && - name = "POST /api/orders" && - span.http.status_code >= 500 -} -``` - -### Find traces that have a particuliar behavior - -You can use query filtering on multiple spans of the traces. -This example locates all the traces of the `GET /api/products/{id}` operation that access a database. It's a convenient request to identify abnormal access ratios to the database caused by caching problems. - -``` -{span.service.name="frontend" && name = "GET /api/products/{id}"} && {.db.system="postgresql"} -``` - -### Find traces going through `production` and `staging` instances - -This example finds traces that go through `production` and `staging` instances. -It's a convenient request to identify misconfigurations and leaks across production and non-production environments. - -``` -{ resource.deployment.environment = "production" } && { resource.deployment.environment = "staging" } -``` - -### Other examples - -Find any trace with a `deployment.environment` attribute set to `production` and `http.status_code` attribute set to `200`: - -``` -{ .deployment.environment = "production" && .http.status_code = 200 } -``` - -Find any trace where spans within it have a `deployment.environment` resource attribute set to `production` and a span `http.status_code` attribute set to `200`. In previous examples, all conditions had to be true on one span. These conditions can be true on either different spans or the same spans. - -``` -{ resource.deployment.environment = "production" } && { span.http.status_code = 200 } -``` - -Find any trace where any span has an `http.method` attribute set to `GET` as well as a `status` attribute set to `ok`, where any other span also exists that has an `http.method` attribute set to `DELETE`, but does not have a `status` attribute set to `ok`: - -``` -{ span.http.method = "GET" && status = ok } && { span.http.method = "DELETE" && status != ok } -``` - -Find any trace with a `deployment.environment` attribute that matches the regex `prod-.*` and `http.status_code` attribute set to `200`: - -``` -{ resource.deployment.environment =~ "prod-.*" && span.http.status_code = 200 } -``` diff --git a/docs/sources/tempo/traceql/metrics-queries/_index.md b/docs/sources/tempo/traceql/metrics-queries/_index.md index 817fd5dd005..890634a0480 100644 --- a/docs/sources/tempo/traceql/metrics-queries/_index.md +++ b/docs/sources/tempo/traceql/metrics-queries/_index.md @@ -6,6 +6,17 @@ weight: 600 keywords: - metrics query - TraceQL metrics +refs: + solve-problems-mq: + - pattern: /docs/tempo/ + destination: https://grafana.com/docs/tempo//traceql/metrics-queries/solve-problems-metrics-queries/ + - pattern: /docs/enterprise-traces/ + destination: https://grafana.com/docs/enterprise-traces//traceql/metrics-queries/solve-problems-metrics-queries/ + mq-functions: + - pattern: /docs/tempo/ + destination: https://grafana.com/docs/tempo//traceql/metrics-queries/functions/ + - pattern: /docs/enterprise-traces/ + destination: https://grafana.com/docs/enterprise-traces//traceql/metrics-queries/functions/ --- # TraceQL metrics queries @@ -43,7 +54,7 @@ For more information about the RED method, refer to [The RED Method: how to inst You can write TraceQL metrics queries to compute rate, errors, and durations over different groups of spans. -For more information on how to use TraceQL metrics to investigate issues, refer to [Solve problems with metrics queries](./solve-problems-metrics-queries). +For more information on how to use TraceQL metrics to investigate issues, refer to [Solve problems with metrics queries](ref:solve-problems-mq). ## Enable and use TraceQL metrics @@ -56,14 +67,14 @@ To run TraceQL metrics queries in Grafana, you need Grafana Cloud or Grafana 10. No extra configuration is needed. Use a Tempo data source that points to a Tempo database with TraceQL metrics enabled. -Refer to [Solve problems using metrics queries](./solve-problems-metrics-queries/) for some real-world examples. +Refer to [Solve problems using metrics queries](ref:solve-problems-mq) for some real-world examples. ### Functions TraceQL metrics queries currently include the following functions for aggregating over groups of spans: `rate`, `count_over_time`, `max_over_time`, `min_over_time`, `avg_over_time`, `quantile_over_time`, `histogram_over_time`, and `compare`. These functions can be added as an operator at the end of any TraceQL query. -For detailed information and example queries for each function, refer to [TraceQL metrics functions](./functions). +For detailed information and example queries for each function, refer to [TraceQL metrics functions](ref:mq-functions). ### Exemplars diff --git a/docs/sources/tempo/traceql/metrics-queries/functions.md b/docs/sources/tempo/traceql/metrics-queries/functions.md index 3116946b22a..429e0d572c5 100644 --- a/docs/sources/tempo/traceql/metrics-queries/functions.md +++ b/docs/sources/tempo/traceql/metrics-queries/functions.md @@ -19,10 +19,10 @@ TraceQL supports `rate`, `count_over_time`, `min_over_time`, `avg_over_time`, `q These functions can be added as an operator at the end of any TraceQL query. `rate` -: Calculates the number of matching spans per second +: Calculates the number of matching spans per second. `count_over_time` -: Counts the number of matching spans per time interval (refer to the [`step` API parameter](https://grafana.com/docs/tempo//api_docs/#traceql-metrics)). +: Counts the number of matching spans per time interval (refer to the [`step` API parameter](https://grafana.com/docs/tempo//api_docs)). `min_over_time` : Returns the minimum value for the specified attribute across all matching spans per time interval (refer to the [`step` API parameter](https://grafana.com/docs/tempo//api_docs/#traceql-metrics)). @@ -34,10 +34,10 @@ These functions can be added as an operator at the end of any TraceQL query. : Returns the average value for the specified attribute across all matching spans per time interval (refer to the [`step` API parameter](https://grafana.com/docs/tempo//api_docs/#traceql-metrics)). `quantile_over_time` -: The quantile of the values in the specified interval +: The quantile of the values in the specified interval. `histogram_over_time` -: Evaluate frequency distribution over time. Example: `histogram_over_time(duration) by (span.foo)` +: Evaluate frequency distribution over time. Example: `histogram_over_time(duration) by (span.foo)`. `compare` : Used to split the stream of spans into two groups: a selection and a baseline. The function returns time-series for all attributes found on the spans to highlight the differences between the two groups. @@ -87,7 +87,6 @@ The `count_over_time()` function counts the number of matching spans per time in The time interval that the count will be computed over is set by the `step` parameter. For more information, refer to the [`step` API parameter](https://grafana.com/docs/tempo//api_docs/#traceql-metrics). - ### Example This example counts the number of spans with name `"GET /:endpoint"` broken down by status code. You might see that there are 10 `"GET /:endpoint"` spans with status code 200 and 15 `"GET /:endpoint"` spans with status code 400. @@ -186,7 +185,7 @@ You could use a similar query to know what the 50th percentile and 95th percenti The `compare` function is used to split a set of spans into two groups: a selection and a baseline. It returns time-series for all attributes found on the spans to highlight the differences between the two groups. -This is a powerful function that's best understood by using the [**Comparison** tab in Explore Traces](https://grafana.com/docs/grafana//explore/simplified-exploration/traces/investigate/#comparison). +This is a powerful function that's best understood by using the [**Comparison** tab in Explore Traces](https://grafana.com/docs/grafana//explore/simplified-exploration/traces/investigate/#comparison). You can also under this function by looking at example outputs below. The function is used like other metrics functions: when it's placed after any trace query, it converts the query into a metrics query: diff --git a/docs/sources/tempo/traceql/metrics-queries/solve-problems-metrics-queries.md b/docs/sources/tempo/traceql/metrics-queries/solve-problems-metrics-queries.md index 7f2c65730c8..aa2bfb55464 100644 --- a/docs/sources/tempo/traceql/metrics-queries/solve-problems-metrics-queries.md +++ b/docs/sources/tempo/traceql/metrics-queries/solve-problems-metrics-queries.md @@ -6,6 +6,12 @@ weight: 600 keywords: - metrics query - TraceQL metrics +refs: + metrics-generator: + - pattern: /docs/tempo/ + destination: https://grafana.com/docs/tempo//metrics-generator/ + - pattern: /docs/enterprise-traces/ + destination: https://grafana.com/docs/enterprise-traces//metrics-generator/ --- # Solve problems with trace metrics queries @@ -18,8 +24,9 @@ This page provides an example of how you can investigate the rate of incoming re ## RED metrics and queries The Tempo metrics-generator emits metrics with pre-configured labels for Rate, Error, and Duration (RED) metrics and service graph edges. -Generated metric labels vary, but always include the service name (in service graph metrics, as a client and/or a server type). -For more information, refer to the [metrics-generator documentation](../../metrics-generator/). +Generated metric labels vary, but always include the service name. +For example, in service graph metrics, as a client or a server type, or both. +For more information, refer to the [metrics-generator documentation](ref:metrics-generator). You can use these metrics to get an overview of application performance. The metrics can be directly correlated to the trace spans that are available for querying. @@ -27,7 +34,8 @@ The metrics can be directly correlated to the trace spans that are available for TraceQL metrics allow a user to query metrics from traces directly from Tempo instead of requiring the metrics-generator component and an accompanying TSDB. {{< admonition type="note" >}} -TraceQL metrics are constrained to a 24-hour range window, and aren't available as a Grafana Managed Alerts source. For any metrics that you want to query over longer time ranges, use for alerting, or retain for more than 30 days, use the metrics-generator to store these metrics in Prometheus, Mimir, or other Prometheus-compatible TSDB and continue to use PromQL for querying. +TraceQL metrics are constrained to a 24-hour range window, and aren't available as a Grafana Managed Alerts source. +For any metrics that you want to query over longer time ranges, use for alerting, or retain for more than 30 days, use the metrics-generator to store these metrics in Prometheus, Mimir, or other Prometheus-compatible TSDB and continue to use PromQL for querying. {{< /admonition >}} ## Investigate the rate of incoming requests diff --git a/docs/sources/tempo/traceql/query-editor/_index.md b/docs/sources/tempo/traceql/query-editor/_index.md index 858afaa7d3a..c22163d9647 100644 --- a/docs/sources/tempo/traceql/query-editor/_index.md +++ b/docs/sources/tempo/traceql/query-editor/_index.md @@ -11,14 +11,25 @@ keywords: - Tempo query language - query editor - TraceQL +refs: + traceql: + - pattern: /docs/tempo/ + destination: https://grafana.com/docs/tempo//traceql/ + - pattern: /docs/enterprise-traces/ + destination: https://grafana.com/docs/enterprise-traces//traceql/ + service-graph-view: + - pattern: /docs/tempo/ + destination: https://grafana.com/docs/tempo//metrics-generator/service-graph-view/ + - pattern: /docs/enterprise-traces/ + destination: https://grafana.com/docs/enterprise-traces//metrics-generator/service-graph-view/ --- # Write TraceQL queries in Grafana The Tempo data source's query editor helps you query and display traces from Tempo in [Explore](https://grafana.com/docs/grafana//explore/). -The queries use [TraceQL](/docs/tempo/latest/traceql), the query language designed specifically for tracing. +The queries use [TraceQL](ref:traceql), the query language designed specifically for tracing. -For general documentation on querying data sources in Grafana, refer to [Query and transform data](/docs/grafana//panels-visualizations/query-transform-data/). +For general documentation on querying data sources in Grafana, refer to [Query and transform data](https://grafana.com/docs/grafana//panels-visualizations/query-transform-data/). ## Before you begin @@ -35,7 +46,7 @@ The three **Query types** are: - The **Search** query builder provides a user interface for building a TraceQL query. - The **TraceQL** query editor lets you write your own TraceQL query with assistance from autocomplete. -- The **Service Graph** view displays a visual relationship between services. Refer to the [Service graph view](https://grafana.com/docs/tempo//metrics-generator/service-graph-view/) documentation for more information. +- The **Service Graph** view displays a visual relationship between services. Refer to the [Service graph view](ref:service-graph-view) documentation for more information. ### Search query builder @@ -68,7 +79,7 @@ Using the service graph view, you can: - Determine how long the slowest queries in your service take to complete - Examine all traces that contain spans of particular interest based on rate, error, and duration values (RED signals) -For more information about the service graph, refer to [Service graph view](https://grafana.com/docs/tempo//metrics-generator/service-graph-view/). +For more information about the service graph, refer to [Service graph view](ref:service-graph-view). ![Screenshot of the Service Graph view](/media/docs/grafana/data-sources/tempo/query-editor/tempo-ds-query-service-graph.png) diff --git a/docs/sources/tempo/troubleshooting/_index.md b/docs/sources/tempo/troubleshooting/_index.md index ba1ba9a2750..98227f402bd 100644 --- a/docs/sources/tempo/troubleshooting/_index.md +++ b/docs/sources/tempo/troubleshooting/_index.md @@ -4,7 +4,7 @@ menuTitle: Troubleshoot description: Learn how to troubleshoot operational issues for Grafana Tempo. weight: 700 aliases: - - ../operations/troubleshooting/ + - ../operations/troubleshooting/ # https://grafana.com/docs/tempo//operations/troubleshooting/ --- # Troubleshoot Tempo @@ -16,18 +16,22 @@ In addition, the [Tempo runbook](https://github.com/grafana/tempo/blob/main/oper ## Sending traces -- [Spans are being refused with "pusher failed to consume trace data"](https://grafana.com/docs/tempo//troubleshooting/max-trace-limit-reached/) -- [Is Grafana Alloy sending to the backend?](https://grafana.com/docs/tempo//troubleshooting/alloy/) +- [Spans are being refused with "pusher failed to consume trace data"](https://grafana.com/docs/tempo//troubleshooting/send-traces/max-trace-limit-reached/) +- [Is Grafana Alloy sending to the backend?](https://grafana.com/docs/tempo//troubleshooting/send-traces/alloy/) ## Querying -- [Unable to find my traces in Tempo](https://grafana.com/docs/tempo//troubleshooting/unable-to-see-trace/) -- [Error message "Too many jobs in the queue"](https://grafana.com/docs/tempo//troubleshooting/too-many-jobs-in-queue/) -- [Queries fail with 500 and "error using pageFinder"](https://grafana.com/docs/tempo//troubleshooting/bad-blocks/) -- [I can search traces, but there are no service name or span name values available](https://grafana.com/docs/tempo//troubleshooting/search-tag) -- [Error message `response larger than the max ( vs )`](https://grafana.com/docs/tempo//troubleshooting/response-too-large/) -- [Search results don't match trace lookup results with long-running traces](https://grafana.com/docs/tempo//troubleshooting/long-running-traces/) +- [Unable to find my traces in Tempo](https://grafana.com/docs/tempo//troubleshooting/querying/unable-to-see-trace/) +- [Error message "Too many jobs in the queue"](https://grafana.com/docs/tempo//troubleshooting/querying/too-many-jobs-in-queue/) +- [Queries fail with 500 and "error using pageFinder"](https://grafana.com/docs/tempo//troubleshooting/querying/bad-blocks/) +- [I can search traces, but there are no service name or span name values available](https://grafana.com/docs/tempo//troubleshooting/querying/search-tag) +- [Error message `response larger than the max ( vs )`](https://grafana.com/docs/tempo//troubleshooting/querying/response-too-large/) +- [Search results don't match trace lookup results with long-running traces](https://grafana.com/docs/tempo//troubleshooting/querying/long-running-traces/) ## Metrics-generator - [Metrics or service graphs seem incomplete](https://grafana.com/docs/tempo//troubleshooting/metrics-generator/) + +## Out-of-memory errors + +- [Set the max attribute size to help control out of memory errors](https://grafana.com/docs/tempo//troubleshooting/out-of-memory-errors/) diff --git a/docs/sources/tempo/troubleshooting/max-trace-limit-reached.md b/docs/sources/tempo/troubleshooting/max-trace-limit-reached.md deleted file mode 100644 index 813a1fec01e..00000000000 --- a/docs/sources/tempo/troubleshooting/max-trace-limit-reached.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -title: Distributor refusing spans -description: Troubleshoot distributor refusing spans -weight: 471 -aliases: -- ../operations/troubleshooting/max-trace-limit-reached/ ---- - -# Distributor refusing spans - -The two most likely causes of refused spans are unhealthy ingesters or trace limits being exceeded. - -To log spans that are discarded, add the `--distributor.log_discarded_spans.enabled` flag to the distributor or -adjust the [distributor config]({{< relref "../configuration#distributor" >}}): - -```yaml -distributor: - log_discarded_spans: - enabled: true - include_all_attributes: false # set to true for more verbose logs -``` - -Adding the flag logs all discarded spans, as shown below: - -``` -level=info ts=2024-08-19T16:06:25.880684385Z caller=distributor.go:767 msg=discarded spanid=c2ebe710d2e2ce7a traceid=bd63605778e3dbe935b05e6afd291006 -level=info ts=2024-08-19T16:06:25.881169385Z caller=distributor.go:767 msg=discarded spanid=5352b0cb176679c8 traceid=ba41cae5089c9284e18bca08fbf10ca2 -``` - -## Unhealthy ingesters - -Unhealthy ingesters can be caused by failing OOMs or scale down events. -If you have unhealthy ingesters, your log line will look something like this: - -``` -msg="pusher failed to consume trace data" err="at least 2 live replicas required, could only find 1" -``` - -In this case, you may need to visit the ingester [ring page]({{< relref "../operations/consistent_hash_ring" >}}) at `/ingester/ring` on the Distributors -and "Forget" the unhealthy ingesters. This will work in the short term, but the long term fix is to stabilize your ingesters. - -## Trace limits reached - -In high volume tracing environments, the default trace limits are sometimes not sufficient. -These limits exist to protect Tempo and prevent it from OOMing, crashing or otherwise allow tenants to not DOS each other. -If you are refusing spans due to limits, you will see logs like this at the distributor: - -``` -msg="pusher failed to consume trace data" err="rpc error: code = FailedPrecondition desc = TRACE_TOO_LARGE: max size of trace (52428800) exceeded while adding 15632 bytes to trace a0fbd6f9ac5e2077d90a19551dd67b6f for tenant single-tenant" -msg="pusher failed to consume trace data" err="rpc error: code = FailedPrecondition desc = LIVE_TRACES_EXCEEDED: max live traces per tenant exceeded: per-user traces limit (local: 60000 global: 0 actual local: 60000) exceeded" -msg="pusher failed to consume trace data" err="rpc error: code = ResourceExhausted desc = RATE_LIMITED: ingestion rate limit (15000000 bytes) exceeded while adding 10 bytes" -``` - -You will also see the following metric incremented. The `reason` label on this metric will contain information about the refused reason. - -``` -tempo_discarded_spans_total -``` - -In this case, use available configuration options to [increase limits]({{< relref "../configuration#ingestion-limits" >}}). - -## Client resets connection - -When the client resets the connection before the distributor can consume the trace data, you see logs like this: - -``` -msg="pusher failed to consume trace data" err="context canceled" -``` - -This issue needs to be fixed on the client side. To inspect which clients are causing the issue, logging discarded spans -with `include_all_attributes: true` may help. - -Note that there may be other reasons for a closed context as well. Identifying the reason for a closed context is -not straightforward and may require additional debugging. \ No newline at end of file diff --git a/docs/sources/tempo/troubleshooting/metrics-generator.md b/docs/sources/tempo/troubleshooting/metrics-generator.md index f6d36f0ca67..aa3b51de07d 100644 --- a/docs/sources/tempo/troubleshooting/metrics-generator.md +++ b/docs/sources/tempo/troubleshooting/metrics-generator.md @@ -4,24 +4,24 @@ menuTitle: Metrics-generator description: Gain an understanding of how to debug metrics quality issues. weight: 500 aliases: -- ../operations/troubleshooting/metrics-generator/ +- ../operations/troubleshooting/metrics-generator/ # https://grafana.com/docs/tempo//operations/troubleshooting/metrics-generator/ --- # Troubleshoot metrics-generator If you are concerned with data quality issues in the metrics-generator, we'd first recommend: -- Reviewing your telemetry pipeline to determine the number of dropped spans. We are only looking for major issues here. -- Reviewing the [service graph documentation]({{< relref "../metrics-generator/service_graphs" >}}) to understand how they are built. +- Reviewing your telemetry pipeline to determine the number of dropped spans. You are only looking for major issues here. +- Reviewing the [service graph documentation](https://grafana.com/docs/tempo//metrics-generator/service_graphs/) to understand how they are built. -If everything seems ok from these two perspectives, consider the following topics to help resolve general issues with all metrics and span metrics specifically. +If everything seems acceptable from these two perspectives, consider the following topics to help resolve general issues with all metrics and span metrics specifically. ## All metrics ### Dropped spans in the distributor -The distributor has a queue of outgoing spans to the metrics-generators. If that queue is full then the distributor -will drop spans before they reach the generator. Use the following metric to determine if that is happening: +The distributor has a queue of outgoing spans to the metrics-generators. +If the queue is full, then the distributor drops spans before they reach the generator. Use the following metric to determine if that's happening: ``` sum(rate(tempo_distributor_queue_pushes_failures_total{}[1m])) diff --git a/docs/sources/tempo/troubleshooting/out-of-memory-errors.md b/docs/sources/tempo/troubleshooting/out-of-memory-errors.md new file mode 100644 index 00000000000..32ed71dfb40 --- /dev/null +++ b/docs/sources/tempo/troubleshooting/out-of-memory-errors.md @@ -0,0 +1,102 @@ +--- +title: Troubleshoot out-of-memory errors +menuTitle: Out-of-memory errors +description: Gain an understanding of how to debug out-of-memory (OOM) errors. +weight: 600 +--- + +# Troubleshoot out-of-memory errors + +Learn about out-of-memory (OOM) issues and how to troubleshoot them. + +## Set the max attribute size to help control out of memory errors + +Tempo queriers can run out of memory when fetching traces that have spans with very large attributes. +This issue has been observed when trying to fetch a single trace using the [`tracebyID` endpoint](https://grafana.com/docs/tempo/latest/api_docs/#query). + +To avoid these out-of-memory crashes, use `max_span_attr_byte` to limit the maximum allowable size of any individual attribute. +Any key or values that exceed the configured limit are truncated before storing. + +Use the `tempo_distributor_attributes_truncated_total` metric to track how many attributes are truncated. + +```yaml + # Optional + # Configures the max size an attribute can be. Any key or value that exceeds this limit will be truncated before storing + # Setting this parameter to '0' would disable this check against attribute size + [max_span_attr_byte: | default = '2048'] +``` + +Refer to the [configuration for distributors](https://grafana.com/docs/tempo//configuration/#set-max-attribute-size-to-help-control-out-of-memory-errors) documentation for more information. + +## Max trace size + +Traces which are long-running (minutes or hours) or large (100K - 1M spans) will spike the memory usage of each component when it is encountered. +This is because Tempo treats traces as single units, and keeps all data for a trace together to enable features like structural queries and analysis. + +When reading a large trace, it can spike the memory usage of the read components: + +* query-frontend +* querier +* ingester +* metrics-generator + +When writing a large trace, it can spike the memory usage of the write components: + +* ingester +* compactor +* metrics-generator + +Start with a smaller trace size limit of 15MB, and increase it as needed. +With an average span size of 300 bytes, this allows for 50K spans per trace. + +Always ensure that the limit is configured, and the largest recommended limit is 60 MB. + +Configure the limit in the per-tenant overrides: + +```yaml +overrides: + 'tenant123': + max_bytes_per_trace: 1.5e+07 +``` + +Refer to the [Overrides](# https://grafana.com/docs/tempo//configuration/#standard-overrides) documentation for more information. + +## Large attributes + +Very large attributes, 10KB or longer, can spike the memory usage of each component when they are encountered. +Tempo's Parquet format uses dictionary-encoded columns, which works well for repeated values. +However, for very large and high cardinality attributes, this can require a large amount of memory. + +A common source of large attributes is auto-instrumentation in these areas: + +* HTTP + * Request or response bodies + * Large headers + * [http.request.header.<key>](https://opentelemetry.io/docs/specs/semconv/attributes-registry/http/) + * Large URLs + * http.url + * [url.full](https://opentelemetry.io/docs/specs/semconv/attributes-registry/url/) +* Databases + * Full query statements + * db.statement + * [db.query.text](https://opentelemetry.io/docs/specs/semconv/attributes-registry/db/) +* Queues + * Message bodies + +When reading these attributes, they can spike the memory usage of the read components: + +* query-frontend +* querier +* ingester +* metrics-generator + +When writing these attributes, they can spike the memory usage of the write components: +* ingester +* compactor +* metrics-generator + +You can [automatically limit attribute sizes](https://github.com/grafana/tempo/pull/4335) using [`max_span_attr_byte`]((https://grafana.com/docs/tempo//configuration/#set-max-attribute-size-to-help-control-out-of-memory-errors). +You can also use these options: + +* Manually update application instrumentation to remove or limit these attributes +* Drop the attributes in the tracing pipeline using [attribute processor](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/attributesprocessor) diff --git a/docs/sources/tempo/troubleshooting/querying/_index.md b/docs/sources/tempo/troubleshooting/querying/_index.md new file mode 100644 index 00000000000..cc54fe56333 --- /dev/null +++ b/docs/sources/tempo/troubleshooting/querying/_index.md @@ -0,0 +1,12 @@ +--- +title: Issues with querying +menuTitle: Querying +description: Troubleshoot issues related to querying. +weight: 300 +--- + +# Issues with querying + +Learn about issues related to querying. + +{{< section withDescriptions="true">}} \ No newline at end of file diff --git a/docs/sources/tempo/troubleshooting/bad-blocks.md b/docs/sources/tempo/troubleshooting/querying/bad-blocks.md similarity index 79% rename from docs/sources/tempo/troubleshooting/bad-blocks.md rename to docs/sources/tempo/troubleshooting/querying/bad-blocks.md index c397170e382..50732eb3891 100644 --- a/docs/sources/tempo/troubleshooting/bad-blocks.md +++ b/docs/sources/tempo/troubleshooting/querying/bad-blocks.md @@ -3,7 +3,8 @@ title: Bad blocks description: Troubleshoot queries failing with an error message indicating bad blocks. weight: 475 aliases: -- ../operations/troubleshooting/bad-blocks/ +- ../../operations/troubleshooting/bad-blocks/ # https://grafana.com/docs/tempo//operations/troubleshooting/bad-blocks/ +- ../bad-blocks/ # https://grafana.com/docs/tempo//troubleshooting/bad-blocks/ --- # Bad blocks @@ -26,7 +27,7 @@ To fix such a block, first download it onto a machine where you can run the `tem Next run the `tempo-cli`'s `gen index` / `gen bloom` commands depending on which file is corrupt/deleted. The command will create a fresh index/bloom-filter from the data file at the required location (in the block folder). -To view all of the options for this command, see the [cli docs]({{< relref "../operations/tempo_cli" >}}). +To view all of the options for this command, see the [CLI docs](https://grafana.com/docs/tempo//operations/tempo_cli/). Finally, upload the generated index or bloom-filter onto the object store backend under the folder for the block. diff --git a/docs/sources/tempo/troubleshooting/long-running-traces.md b/docs/sources/tempo/troubleshooting/querying/long-running-traces.md similarity index 88% rename from docs/sources/tempo/troubleshooting/long-running-traces.md rename to docs/sources/tempo/troubleshooting/querying/long-running-traces.md index 2c58f694cfc..d1080e74667 100644 --- a/docs/sources/tempo/troubleshooting/long-running-traces.md +++ b/docs/sources/tempo/troubleshooting/querying/long-running-traces.md @@ -3,7 +3,8 @@ title: Long-running traces description: Troubleshoot search results when using long-running traces weight: 479 aliases: - - ../operations/troubleshooting/long-running-traces/ + - ../../operations/troubleshooting/long-running-traces/ # https://grafana.com/docs/tempo//operations/troubleshooting/long-running-traces/ + - ../long-running-traces/ # https://grafana.com/docs/tempo//troubleshooting/long-running-traces/ --- # Long-running traces @@ -20,7 +21,7 @@ different blocks, which can lead to inconsistency in a few ways: matching blocks, which yields greater accuracy when combined. 1. When using [`spanset` - operators](https://grafana.com/docs/tempo/latest/traceql/#combining-spansets), + operators](https://grafana.com/docs/tempo//traceql/#combine-spansets), Tempo only evaluates the contiguous trace of the current block. This means that for a single block the conditions may evaluate to false, but to consider all parts of the trace from all blocks would evaluate true. diff --git a/docs/sources/tempo/troubleshooting/response-too-large.md b/docs/sources/tempo/troubleshooting/querying/response-too-large.md similarity index 81% rename from docs/sources/tempo/troubleshooting/response-too-large.md rename to docs/sources/tempo/troubleshooting/querying/response-too-large.md index 53fd232cd32..a38934bf49d 100644 --- a/docs/sources/tempo/troubleshooting/response-too-large.md +++ b/docs/sources/tempo/troubleshooting/querying/response-too-large.md @@ -3,12 +3,13 @@ title: Response larger than the max description: Troubleshoot response larger than the max error message weight: 477 aliases: -- ../operations/troubleshooting/response-too-large/ +- ../operations/troubleshooting/response-too-large/ # https://grafana.com/docs/tempo//operations/troubleshooting/response-too-large/ +- ../response-too-large/ # https://grafana.com/docs/tempo//troubleshooting/response-too-large/ --- # Response larger than the max -The error message will take a similar form to the following: +The error message is similar to the following: ``` 500 Internal Server Error Body: response larger than the max ( vs ) diff --git a/docs/sources/tempo/troubleshooting/querying/search-tag.md b/docs/sources/tempo/troubleshooting/querying/search-tag.md new file mode 100644 index 00000000000..8ad6189bc1f --- /dev/null +++ b/docs/sources/tempo/troubleshooting/querying/search-tag.md @@ -0,0 +1,29 @@ +--- +title: Tag search +description: Troubleshoot No options found in Grafana tag search +weight: 476 +aliases: +- ../../operations/troubleshooting/search-tag/ # https://grafana.com/docs/tempo//operations/troubleshooting/search-tag/ +- ../search-tag/ # https://grafana.com/docs/tempo//troubleshooting/search-tag/ +--- + +# Tag search + +An issue occurs while searching for traces in Grafana Explore. The **Service Name** and **Span Name** drop down lists are empty, and there is a `No options found` message. + +HTTP requests to Tempo query frontend endpoint at `/api/search/tag/service.name/values` would respond with an empty set. + + +## Root cause + +The introduction of a cap on the size of tags causes this issue. + +Configuration parameter `max_bytes_per_tag_values_query` causes the return of an empty result +when a query exceeds the configured value. + +## Solutions + +There are two main solutions to this issue: + +* Reduce the cardinality of tags pushed to Tempo. Reducing the number of unique tag values will reduce the size returned by a tag search query. +* Increase the `max_bytes_per_tag_values_query` parameter in the [overrides](https://grafana.com/docs/tempo//configuration/#overrides) block of your Tempo configuration to a value as high as 50MB. diff --git a/docs/sources/tempo/troubleshooting/querying/too-many-jobs-in-queue.md b/docs/sources/tempo/troubleshooting/querying/too-many-jobs-in-queue.md new file mode 100644 index 00000000000..f537bebf74f --- /dev/null +++ b/docs/sources/tempo/troubleshooting/querying/too-many-jobs-in-queue.md @@ -0,0 +1,50 @@ +--- +title: Too many jobs in the queue +description: Troubleshoot too many jobs in the queue +weight: 474 +aliases: +- ../operations/troubleshooting/too-many-jobs-in-queue/ # https://grafana.com/docs/tempo//operations/troubleshooting/too-many-jobs-in-queue/ +- ../too-many-jobs-in-queue/ # https://grafana.com/docs/tempo//troubleshooting/too-many-jobs-in-queue/ +--- + +# Too many jobs in the queue + +The error message might also be +- `queue doesn't have room for 100 jobs` +- `failed to add a job to work queue` + +You may see this error if the compactor isn’t running and the blocklist size has exploded. +Possible reasons why the compactor may not be running are: + +- Insufficient permissions. +- Compactor sitting idle because no block is hashing to it. +- Incorrect configuration settings. + +## Diagnose the issue + +- Check metric `tempodb_compaction_bytes_written_total` +If this is greater than zero (0), it means the compactor is running and writing to the backend. +- Check metric `tempodb_compaction_errors_total` +If this metric is greater than zero (0), check the logs of the compactor for an error message. + +## Solutions + +- Verify that the Compactor has the LIST, GET, PUT, and DELETE permissions on the bucket objects. + - If these permissions are missing, assign them to the compactor container. + - For detailed information, refer to the [Amazon S3 permissions](https://grafana.com/docs/tempo//configuration/hosted-storage/s3/). +- If there’s a compactor sitting idle while others are running, port-forward to the compactor’s http endpoint. Then go to `/compactor/ring` and click **Forget** on the inactive compactor. +- Check the following configuration parameters to ensure that there are correct settings: + - `max_block_bytes` to determine when the ingester cuts blocks. A good number is anywhere from 100MB to 2GB depending on the workload. + - `max_compaction_objects` to determine the max number of objects in a compacted block. This should relatively high, generally in the millions. + - `retention_duration` for how long traces should be retained in the backend. +- Check the storage section of the configuration and increase `queue_depth`. Do bear in mind that a deeper queue could mean longer + waiting times for query responses. Adjust `max_workers` accordingly, which configures the number of parallel workers + that query backend blocks. + +```yaml +storage: + trace: + pool: + max_workers: 100 # worker pool determines the number of parallel requests to the object store backend + queue_depth: 10000 +``` diff --git a/docs/sources/tempo/troubleshooting/unable-to-see-trace.md b/docs/sources/tempo/troubleshooting/querying/unable-to-see-trace.md similarity index 92% rename from docs/sources/tempo/troubleshooting/unable-to-see-trace.md rename to docs/sources/tempo/troubleshooting/querying/unable-to-see-trace.md index 20fe2567ea7..1009c099d32 100644 --- a/docs/sources/tempo/troubleshooting/unable-to-see-trace.md +++ b/docs/sources/tempo/troubleshooting/querying/unable-to-see-trace.md @@ -3,15 +3,16 @@ title: Unable to find traces description: Troubleshoot missing traces weight: 473 aliases: -- ../operations/troubleshooting/missing-trace/ -- ../operations/troubleshooting/unable-to-see-trace/ +- ../../operations/troubleshooting/missing-trace/ # https://grafana.com/docs/tempo//operations/troubleshooting/missing-trace/ +- ../../operations/troubleshooting/unable-to-see-trace/ # https://grafana.com/docs/tempo//operations/troubleshooting/unable-to-see-trace/ +- ../unable-to-see-trace/ # htt/docs/tempo//troubleshooting/unable-to-see-trace/ --- # Unable to find traces The two main causes of missing traces are: -- Issues in ingestion of the data into Tempo. Spans are either not being sent correctly to Tempo or they are not getting sampled. +- Issues in ingestion of the data into Tempo. Spans are either not sent correctly to Tempo or they aren't getting sampled. - Issues querying for traces that have been received by Tempo. ## Section 1: Diagnose and fix ingestion issues @@ -106,8 +107,8 @@ If the pipeline isn't reporting any dropped spans, check whether application spa - If you require a higher ingest volume, increase the configuration for the rate limiting by adjusting the `max_traces_per_user` property in the [configured override limits](https://grafana.com/docs/tempo//configuration/#standard-overrides). {{< admonition type="note" >}} -Check the [ingestion limits page]({{< relref "../configuration#ingestion-limits" >}}) for further information on limits. -{{% /admonition %}} +Check the [ingestion limits page](https://grafana.com/docs/tempo//configuration/#overrides) for further information on limits. +{{< /admonition >}} ## Section 3: Diagnose and fix issues with querying traces diff --git a/docs/sources/tempo/troubleshooting/search-tag.md b/docs/sources/tempo/troubleshooting/search-tag.md deleted file mode 100644 index 442f025c5c7..00000000000 --- a/docs/sources/tempo/troubleshooting/search-tag.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: Tag search -description: Troubleshoot No options found in Grafana tag search -weight: 476 -aliases: -- ../operations/troubleshooting/search-tag/ ---- - -# Tag search - -An issue occurs while searching for traces in Grafana Explore. The **Service Name** and **Span Name** drop down lists are empty, and there is a `No options found` message. - -HTTP requests to Tempo query frontend endpoint at `/api/search/tag/service.name/values` would respond with an empty set. - - -## Root cause - -The introduction of a cap on the size of tags causes this issue. - -Configuration parameter `max_bytes_per_tag_values_query` causes the return of an empty result -when a query exceeds the configured value. - -## Solutions - -There are two main solutions to this issue: - -* Reduce the cardinality of tags pushed to Tempo. Reducing the number of unique tag values will reduce the size returned by a tag search query. -* Increase the `max_bytes_per_tag_values_query` parameter in the [overrides]({{< relref "../configuration#overrides" >}}) block of your Tempo configuration to a value as high as 50MB. diff --git a/docs/sources/tempo/troubleshooting/send-traces/_index.md b/docs/sources/tempo/troubleshooting/send-traces/_index.md new file mode 100644 index 00000000000..3c67a5146c4 --- /dev/null +++ b/docs/sources/tempo/troubleshooting/send-traces/_index.md @@ -0,0 +1,12 @@ +--- +title: Issues with sending traces +menuTitle: Sending traces +description: Troubleshoot issues related to sending traces. +weight: 200 +--- + +# Issues with sending traces + +Learn about issues related to sending traces. + +{{< section withDescriptions="true">}} \ No newline at end of file diff --git a/docs/sources/tempo/troubleshooting/alloy.md b/docs/sources/tempo/troubleshooting/send-traces/alloy.md similarity index 80% rename from docs/sources/tempo/troubleshooting/alloy.md rename to docs/sources/tempo/troubleshooting/send-traces/alloy.md index 1fc938a0c95..993919ca4a2 100644 --- a/docs/sources/tempo/troubleshooting/alloy.md +++ b/docs/sources/tempo/troubleshooting/send-traces/alloy.md @@ -4,14 +4,15 @@ menuTitle: Grafana Alloy description: Gain visibility on how many traces are being pushed to Grafana Alloy and if they are making it to the Tempo backend. weight: 472 aliases: -- ../operations/troubleshooting/agent/ -- ./agent.md # /docs/tempo//troubleshooting/agent.md +- ../operations/troubleshooting/agent/ # https://grafana.com/docs/tempo//operations/troubleshooting/agent/ +- ../agent/ # /docs/tempo//troubleshooting/agent/ +- ../alloy/ # https://grafana.com/docs/tempo//troubleshooting/alloy/ --- # Troubleshoot Grafana Alloy Sometimes it can be difficult to tell what, if anything, Grafana Alloy is sending along to the backend. -This document focuses on a few techniques to gain visibility on how many trace spans are pushed to Alloy and if they're making it to the backend. +This document focuses on a few techniques to gain visibility on how many trace spans push to Alloy and if they're making it to the backend. [OpenTelemetry Collector](https://github.com/open-telemetry/opentelemetry-collector) form the basis of the tracing pipeline, which does a fantastic job of logging network and other issues. @@ -22,13 +23,13 @@ If your logs are showing no obvious errors, one of the following suggestions may Alloy publishes a few Prometheus metrics that are useful to determine how much trace traffic it receives and successfully forwards. These metrics are a good place to start when diagnosing tracing Alloy issues. -From the [`otelcol.receiver.otlp`](https://grafana.com/docs/alloy//reference/components/otelcol/otelcol.receiver.otlp/) component: +From the [`otelcol.receiver.otlp`](https://grafana.com/docs/alloy//reference/components/otelcol/otelcol.receiver.otlp/) component: ``` receiver_accepted_spans_ratio_total receiver_refused_spans_ratio_total ``` -From the [`otelcol.exporter.otlp`](https://grafana.com/docs/alloy//reference/components/otelcol/otelcol.exporter.otlp/) component: +From the [`otelcol.exporter.otlp`](https://grafana.com/docs/alloy//reference/components/otelcol/otelcol.exporter.otlp/) component: ``` exporter_sent_spans_ratio_total exporter_send_failed_spans_ratio_total @@ -36,7 +37,7 @@ exporter_send_failed_spans_ratio_total Alloy has a Prometheus scrape endpoint, `/metrics`, that you can use to check metrics locally by opening a browser to `http://localhost:12345/metrics`. The `/metrics` HTTP endpoint of the Alloy HTTP server exposes the Alloy component and controller metrics. -Refer to the [Monitor the Grafana Alloy component controller](https://grafana.com/docs/alloy/latest/troubleshoot/controller_metrics/) documentation for more information. +Refer to the [Monitor the Grafana Alloy component controller](https://grafana.com/docs/alloy//troubleshoot/controller_metrics/) documentation for more information. ### Check metrics in Grafana Cloud diff --git a/docs/sources/tempo/troubleshooting/send-traces/max-trace-limit-reached.md b/docs/sources/tempo/troubleshooting/send-traces/max-trace-limit-reached.md new file mode 100644 index 00000000000..85f54abf99a --- /dev/null +++ b/docs/sources/tempo/troubleshooting/send-traces/max-trace-limit-reached.md @@ -0,0 +1,76 @@ +--- +title: Distributor refusing spans +description: Troubleshoot distributor refusing spans +weight: 471 +aliases: +- ../../operations/troubleshooting/max-trace-limit-reached/ # https://grafana.com/docs/tempo//operations/troubleshooting/max-trace-limit-reached/ +- ../max-trace-limit-reached/ # https://grafana.com/docs/tempo//troubleshooting/max-trace-limit-reached/ +--- + +# Distributor refusing spans + +The two most likely causes of refused spans are unhealthy ingesters or trace limits being exceeded. + +To log spans that are discarded, add the `--distributor.log_discarded_spans.enabled` flag to the distributor or +adjust the [distributor configuration](https://grafana.com/docs/tempo//configuration/#distributor): + +```yaml +distributor: + log_discarded_spans: + enabled: true + include_all_attributes: false # set to true for more verbose logs +``` + +Adding the flag logs all discarded spans, as shown below: + +``` +level=info ts=2024-08-19T16:06:25.880684385Z caller=distributor.go:767 msg=discarded spanid=c2ebe710d2e2ce7a traceid=bd63605778e3dbe935b05e6afd291006 +level=info ts=2024-08-19T16:06:25.881169385Z caller=distributor.go:767 msg=discarded spanid=5352b0cb176679c8 traceid=ba41cae5089c9284e18bca08fbf10ca2 +``` + +## Unhealthy ingesters + +Unhealthy ingesters can be caused by failing OOMs or scale down events. +If you have unhealthy ingesters, your log line will look something like this: + +``` +msg="pusher failed to consume trace data" err="at least 2 live replicas required, could only find 1" +``` + +In this case, you may need to visit the ingester [ring page](https://grafana.com/docs/tempo//operations/manage-advanced-systems/consistent_hash_ring/) at `/ingester/ring` on the Distributors +and "Forget" the unhealthy ingesters. +This works in the short term, but the long term fix is to stabilize your ingesters. + +## Trace limits reached + +In high volume tracing environments, the default trace limits are sometimes not sufficient. +These limits exist to protect Tempo and prevent it from OOMing, crashing, or otherwise allow tenants to not DOS each other. +If you are refusing spans due to limits, you'll see logs like this at the distributor: + +``` +msg="pusher failed to consume trace data" err="rpc error: code = FailedPrecondition desc = TRACE_TOO_LARGE: max size of trace (52428800) exceeded while adding 15632 bytes to trace a0fbd6f9ac5e2077d90a19551dd67b6f for tenant single-tenant" +msg="pusher failed to consume trace data" err="rpc error: code = FailedPrecondition desc = LIVE_TRACES_EXCEEDED: max live traces per tenant exceeded: per-user traces limit (local: 60000 global: 0 actual local: 60000) exceeded" +msg="pusher failed to consume trace data" err="rpc error: code = ResourceExhausted desc = RATE_LIMITED: ingestion rate limit (15000000 bytes) exceeded while adding 10 bytes" +``` + +You'll also see the following metric incremented. The `reason` label on this metric will contain information about the refused reason. + +``` +tempo_discarded_spans_total +``` + +In this case, use available configuration options to [increase limits](https://grafana.com/docs/tempo//configuration/#ingestion-limits). + +## Client resets connection + +When the client resets the connection before the distributor can consume the trace data, you see logs like this: + +``` +msg="pusher failed to consume trace data" err="context canceled" +``` + +This issue needs to be fixed on the client side. To inspect which clients are causing the issue, logging discarded spans +with `include_all_attributes: true` may help. + +Note that there may be other reasons for a closed context as well. Identifying the reason for a closed context is +not straightforward and may require additional debugging. \ No newline at end of file diff --git a/docs/sources/tempo/troubleshooting/too-many-jobs-in-queue.md b/docs/sources/tempo/troubleshooting/too-many-jobs-in-queue.md deleted file mode 100644 index a7d4a01fbcb..00000000000 --- a/docs/sources/tempo/troubleshooting/too-many-jobs-in-queue.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: Too many jobs in the queue -description: Troubleshoot too many jobs in the queue -weight: 474 -aliases: -- ../operations/troubleshooting/too-many-jobs-in-queue/ ---- - -# Too many jobs in the queue - -The error message might also be -- `queue doesn't have room for 100 jobs` -- `failed to add a job to work queue` - -You may see this error if the compactor isn’t running and the blocklist size has exploded. -Possible reasons why the compactor may not be running are: - -- Insufficient permissions. -- Compactor sitting idle because no block is hashing to it. -- Incorrect configuration settings. -## Diagnosing the issue -- Check metric `tempodb_compaction_bytes_written_total` -If this is greater than zero (0), it means the compactor is running and writing to the backend. -- Check metric `tempodb_compaction_errors_total` -If this metric is greater than zero (0), check the logs of the compactor for an error message. - -## Solutions -- Verify that the Compactor has the LIST, GET, PUT, and DELETE permissions on the bucket objects. - - If these permissions are missing, assign them to the compactor container. - - For detailed information, check - https://grafana.com/docs/tempo/latest/configuration/s3/#permissions -- If there’s a compactor sitting idle while others are running, port-forward to the compactor’s http endpoint. Then go to `/compactor/ring` and click **Forget** on the inactive compactor. -- Check the following configuration parameters to ensure that there are correct settings: - - `max_block_bytes` to determine when the ingester cuts blocks. A good number is anywhere from 100MB to 2GB depending on the workload. - - `max_compaction_objects` to determine the max number of objects in a compacted block. This should relatively high, generally in the millions. - - `retention_duration` for how long traces should be retained in the backend. -- Check the storage section of the config and increase `queue_depth`. Do bear in mind that a deeper queue could mean longer - waiting times for query responses. Adjust `max_workers` accordingly, which configures the number of parallel workers - that query backend blocks. -``` -storage: - trace: - pool: - max_workers: 100 # worker pool determines the number of parallel requests to the object store backend - queue_depth: 10000 -``` diff --git a/docs/variables.mk b/docs/variables.mk index 01b43593ea7..6dc5598446f 100644 --- a/docs/variables.mk +++ b/docs/variables.mk @@ -1,5 +1,2 @@ # List of projects to provide to the make-docs script. PROJECTS := tempo helm-charts/tempo-distributed - -# Set the DOC_VALIDATOR_IMAGE to match the one defined in CI. -export DOC_VALIDATOR_IMAGE := $(shell sed -En 's, *image: "(grafana/doc-validator.*)",\1,p' "$(shell git rev-parse --show-toplevel)/.github/workflows/doc-validator.yml") diff --git a/example/docker-compose/alloy/docker-compose.yaml b/example/docker-compose/alloy/docker-compose.yaml index e732cb3287e..3a8f1b7777a 100644 --- a/example/docker-compose/alloy/docker-compose.yaml +++ b/example/docker-compose/alloy/docker-compose.yaml @@ -19,11 +19,8 @@ services: - ../shared/tempo.yaml:/etc/tempo.yaml - ./tempo-data:/var/tempo ports: - - "14268" # jaeger ingest - - "3200" # tempo - - "4317" # otlp grpc - - "4318" # otlp http - - "9411" # zipkin2024-04-23T16:16:57+0000 + - "3200" # tempo + - "4317" # otlp grpc depends_on: - init diff --git a/example/docker-compose/cross-cluster/tempo-distributed-a.yaml b/example/docker-compose/cross-cluster/tempo-distributed-a.yaml index 9175545b8dc..83d11f4feeb 100644 --- a/example/docker-compose/cross-cluster/tempo-distributed-a.yaml +++ b/example/docker-compose/cross-cluster/tempo-distributed-a.yaml @@ -2,19 +2,11 @@ server: http_listen_port: 3200 distributor: - receivers: # this configuration will listen on all ports and protocols that tempo is capable of. - jaeger: # the receives all come from the OpenTelemetry collector. more configuration information can - protocols: # be found there: https://github.com/open-telemetry/opentelemetry-collector/tree/main/receiver - thrift_http: # - grpc: # for a production deployment you should only enable the receivers you need! - thrift_binary: - thrift_compact: - zipkin: + receivers: otlp: protocols: - http: grpc: - opencensus: + endpoint: "0.0.0.0:4317" ingester: max_block_duration: 5m # cut the headblock when this much time passes. this is being set for demo purposes and should probably be left alone normally diff --git a/example/docker-compose/cross-cluster/tempo-distributed-b.yaml b/example/docker-compose/cross-cluster/tempo-distributed-b.yaml index c90ed977346..35a2cfed8c0 100644 --- a/example/docker-compose/cross-cluster/tempo-distributed-b.yaml +++ b/example/docker-compose/cross-cluster/tempo-distributed-b.yaml @@ -2,19 +2,11 @@ server: http_listen_port: 3200 distributor: - receivers: # this configuration will listen on all ports and protocols that tempo is capable of. - jaeger: # the receives all come from the OpenTelemetry collector. more configuration information can - protocols: # be found there: https://github.com/open-telemetry/opentelemetry-collector/tree/main/receiver - thrift_http: # - grpc: # for a production deployment you should only enable the receivers you need! - thrift_binary: - thrift_compact: - zipkin: + receivers: otlp: protocols: - http: grpc: - opencensus: + endpoint: "0.0.0.0:4317" ingester: max_block_duration: 5m # cut the headblock when this much time passes. this is being set for demo purposes and should probably be left alone normally diff --git a/example/docker-compose/debug/docker-compose.yaml b/example/docker-compose/debug/docker-compose.yaml index b6d7a60397a..39fd958be38 100644 --- a/example/docker-compose/debug/docker-compose.yaml +++ b/example/docker-compose/debug/docker-compose.yaml @@ -21,12 +21,9 @@ services: environment: - DEBUG_BLOCK=0 ports: - - "14268:14268" # jaeger ingest - - "3200:3200" # tempo - - "4317:4317" # otlp grpc - - "4318:4318" # otlp http - - "9411:9411" # zipkin - - "2345:2345" # delve debug server + - "3200:3200" # tempo + - "4317:4317" # otlp grpc + - "2345:2345" # delve debug server depends_on: - init diff --git a/example/docker-compose/distributed/tempo-distributed.yaml b/example/docker-compose/distributed/tempo-distributed.yaml index b653c473560..767d4f38870 100644 --- a/example/docker-compose/distributed/tempo-distributed.yaml +++ b/example/docker-compose/distributed/tempo-distributed.yaml @@ -2,19 +2,11 @@ server: http_listen_port: 3200 distributor: - receivers: # this configuration will listen on all ports and protocols that tempo is capable of. - jaeger: # the receives all come from the OpenTelemetry collector. more configuration information can - protocols: # be found there: https://github.com/open-telemetry/opentelemetry-collector/tree/main/receiver - thrift_http: # - grpc: # for a production deployment you should only enable the receivers you need! - thrift_binary: - thrift_compact: - zipkin: + receivers: otlp: protocols: - http: grpc: - opencensus: + endpoint: "0.0.0.0:4317" ingester: max_block_duration: 5m # cut the headblock when this much time passes. this is being set for demo purposes and should probably be left alone normally @@ -66,4 +58,4 @@ overrides: defaults: metrics_generator: processors: ['service-graphs', 'span-metrics', 'local-blocks'] - generate_native_histograms: both \ No newline at end of file + generate_native_histograms: both diff --git a/example/docker-compose/ingest-storage/docker-compose.yaml b/example/docker-compose/ingest-storage/docker-compose.yaml new file mode 100644 index 00000000000..a8406be1f96 --- /dev/null +++ b/example/docker-compose/ingest-storage/docker-compose.yaml @@ -0,0 +1,237 @@ +services: + # Tempo runs as user 10001, and docker compose creates the volume as root. + # As such, we need to chown the volume in order for Tempo to start correctly. + init: + image: &tempoImage grafana/tempo:latest + user: root + entrypoint: + - "chown" + - "10001:10001" + - "/var/tempo" + volumes: + - ./tempo-data:/var/tempo + + distributor: + image: *tempoImage + depends_on: + - kafka + command: "-target=distributor -config.file=/etc/tempo.yaml" + restart: always + volumes: + - ./tempo.yaml:/etc/tempo.yaml + ports: + - "3200" # tempo + # Uncomment the following lines to enable tracing + # environment: + # - OTEL_EXPORTER_OTLP_ENDPOINT=http://alloy:4318 + + ingester-0: + image: *tempoImage + depends_on: + - kafka + command: "-target=ingester -config.file=/etc/tempo.yaml" + restart: always + volumes: + - ./tempo.yaml:/etc/tempo.yaml + ports: + - "3200" # tempo + hostname: ingester-0 + # Uncomment the following lines to enable tracing + # environment: + # - OTEL_EXPORTER_OTLP_ENDPOINT=http://alloy:4318 + + ingester-1: + image: *tempoImage + depends_on: + - kafka + command: "-target=ingester -config.file=/etc/tempo.yaml" + restart: always + volumes: + - ./tempo.yaml:/etc/tempo.yaml + ports: + - "3200" # tempo + hostname: ingester-1 + # Uncomment the following lines to enable tracing + # environment: + # - OTEL_EXPORTER_OTLP_ENDPOINT=http://alloy:4318 + + ingester-2: + image: *tempoImage + depends_on: + - kafka + command: "-target=ingester -config.file=/etc/tempo.yaml" + restart: always + volumes: + - ./tempo.yaml:/etc/tempo.yaml + ports: + - "3200" # tempo + hostname: ingester-2 + # Uncomment the following lines to enable tracing + # environment: + # - OTEL_EXPORTER_OTLP_ENDPOINT=http://alloy:4318 + + query-frontend: + image: *tempoImage + command: "-target=query-frontend -config.file=/etc/tempo.yaml -log.level=debug" + restart: always + volumes: + - ./tempo.yaml:/etc/tempo.yaml + ports: + - "3200:3200" # tempo + # Uncomment the following lines to enable tracing + # environment: + # - OTEL_EXPORTER_OTLP_ENDPOINT=http://alloy:4318 + + querier: + image: *tempoImage + command: "-target=querier -config.file=/etc/tempo.yaml -log.level=debug" + restart: always + volumes: + - ./tempo.yaml:/etc/tempo.yaml + ports: + - "3200" # tempo + # Uncomment the following lines to enable tracing + # environment: + # - OTEL_EXPORTER_OTLP_ENDPOINT=http://alloy:4318 + + compactor: + image: *tempoImage + depends_on: + - kafka + command: "-target=compactor -config.file=/etc/tempo.yaml" + restart: always + volumes: + - ./tempo.yaml:/etc/tempo.yaml + ports: + - "3200" # tempo + # Uncomment the following lines to enable tracing + # environment: + # - OTEL_EXPORTER_OTLP_ENDPOINT=http://alloy:4318 + + metrics-generator-0: + image: *tempoImage + depends_on: + - kafka + command: "-target=metrics-generator -config.file=/etc/tempo.yaml" + hostname: metrics-generator-0 + restart: always + volumes: + - ./tempo.yaml:/etc/tempo.yaml + ports: + - "3200" # tempo + # Uncomment the following lines to enable tracing + # environment: + # - OTEL_EXPORTER_OTLP_ENDPOINT=http://alloy:4318 + + metrics-generator-1: + image: *tempoImage + depends_on: + - kafka + command: "-target=metrics-generator -config.file=/etc/tempo.yaml" + hostname: metrics-generator-1 + restart: always + volumes: + - ./tempo.yaml:/etc/tempo.yaml + ports: + - "3200" # tempo + # Uncomment the following lines to enable tracing + # environment: + # - OTEL_EXPORTER_OTLP_ENDPOINT=http://alloy:4318 + + block-builder-0: + image: *tempoImage + depends_on: + - kafka + command: "-target=block-builder -config.file=/etc/tempo.yaml" + hostname: block-builder-0 + restart: always + volumes: + - ./tempo.yaml:/etc/tempo.yaml + ports: + - "3200" # tempo + # Uncomment the following lines to enable tracing + # environment: + # - OTEL_EXPORTER_OTLP_ENDPOINT=http://alloy:4318 + + block-builder-1: + image: *tempoImage + depends_on: + - kafka + command: "-target=block-builder -config.file=/etc/tempo.yaml" + hostname: block-builder-1 + restart: always + volumes: + - ./tempo.yaml:/etc/tempo.yaml + ports: + - "3200" # tempo + # Uncomment the following lines to enable tracing + # environment: + # - OTEL_EXPORTER_OTLP_ENDPOINT=http://alloy:4318 + + minio: + image: minio/minio:latest + environment: + - MINIO_ACCESS_KEY=tempo + - MINIO_SECRET_KEY=supersecret + ports: + - "9001:9001" + entrypoint: + - sh + - -euc + - mkdir -p /data/tempo && minio server /data --console-address ':9001' + + k6-tracing: + image: ghcr.io/grafana/xk6-client-tracing:v0.0.5 + environment: + - ENDPOINT=distributor:4317 + restart: always + depends_on: + - distributor + + prometheus: + image: prom/prometheus:latest + command: + - --config.file=/etc/prometheus.yaml + - --web.enable-remote-write-receiver + - --enable-feature=exemplar-storage + - --enable-feature=native-histograms + volumes: + - ../shared/prometheus.yaml:/etc/prometheus.yaml + ports: + - "9090:9090" + + grafana: + image: grafana/grafana:11.0.0 + volumes: + - ../shared/grafana-datasources.yaml:/etc/grafana/provisioning/datasources/datasources.yaml + environment: + - GF_AUTH_ANONYMOUS_ENABLED=true + - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin + - GF_AUTH_DISABLE_LOGIN_FORM=true + - GF_FEATURE_TOGGLES_ENABLE=traceqlEditor traceQLStreaming metricsSummary + ports: + - "3000:3000" + + "kafka": + "image": "confluentinc/cp-kafka:latest" + "environment": + - "CLUSTER_ID=zH1GDqcNTzGMDCXm5VZQdg" + - "KAFKA_BROKER_ID=1" + - "KAFKA_NUM_PARTITIONS=100" + - "KAFKA_PROCESS_ROLES=broker,controller" + - "KAFKA_LISTENERS=PLAINTEXT://:9092,CONTROLLER://:9093,PLAINTEXT_HOST://:29092" + - "KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://kafka:9092,PLAINTEXT_HOST://localhost:29092" + - "KAFKA_LISTENER_SECURITY_PROTOCOL_MAP=PLAINTEXT:PLAINTEXT,CONTROLLER:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT" + - "KAFKA_INTER_BROKER_LISTENER_NAME=PLAINTEXT" + - "KAFKA_CONTROLLER_LISTENER_NAMES=CONTROLLER" + - "KAFKA_CONTROLLER_QUORUM_VOTERS=1@kafka:9093" + - "KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=1" + - "KAFKA_LOG_RETENTION_CHECK_INTERVAL_MS=10000" + "healthcheck": + "interval": "1s" + "retries": 30 + "start_period": "1s" + "test": "nc -z localhost 9092 || exit -1" + "timeout": "1s" + "ports": + - "29092:29092" \ No newline at end of file diff --git a/example/docker-compose/ingest-storage/tempo.yaml b/example/docker-compose/ingest-storage/tempo.yaml new file mode 100644 index 00000000000..e478ef4ec53 --- /dev/null +++ b/example/docker-compose/ingest-storage/tempo.yaml @@ -0,0 +1,85 @@ +server: + http_listen_port: 3200 + log_level: info + +distributor: + kafka_write_path_enabled: true + receivers: + otlp: + protocols: + grpc: + log_received_spans: + enabled: true + log_discarded_spans: + enabled: true + +ingester: + max_block_duration: 5m # cut the headblock when this much time passes. this is being set for demo purposes and should probably be left alone normally + partition_ring: + kvstore: + store: memberlist + +memberlist: + abort_if_cluster_join_fails: false + bind_port: 7946 + join_members: + - ingester-0:7946 + - ingester-1:7946 + - ingester-2:7946 + +compactor: + compaction: + block_retention: 1h # overall Tempo trace retention. set for demo purposes + +querier: + frontend_worker: + frontend_address: query-frontend:9095 + +metrics_generator: + registry: + external_labels: + source: tempo + cluster: docker-compose + storage: + path: /var/tempo/generator/wal + remote_write: + - url: http://prometheus:9090/api/v1/write + send_exemplars: true + traces_storage: + path: /var/tempo/generator/traces + assigned_partitions: + metrics-generator-0: [0,2] + metrics-generator-1: [1] + +storage: + trace: + backend: s3 + s3: + bucket: tempo + endpoint: minio:9000 + access_key: tempo + secret_key: supersecret + insecure: true + wal: + path: /var/tempo/wal # where to store the wal locally + local: + path: /var/tempo/blocks + +overrides: + defaults: + metrics_generator: + processors: ['local-blocks'] + generate_native_histograms: both + +ingest: + enabled: true + kafka: + address: kafka:9092 + topic: tempo-ingest + +block_builder: + lookback_on_no_commit: 2m + consume_cycle_duration: 30s + assigned_partitions: + block-builder-0: [0,2] + block-builder-1: [1] \ No newline at end of file diff --git a/example/docker-compose/local/docker-compose.yaml b/example/docker-compose/local/docker-compose.yaml index f8172d2417d..226c4f5d8d0 100644 --- a/example/docker-compose/local/docker-compose.yaml +++ b/example/docker-compose/local/docker-compose.yaml @@ -67,5 +67,6 @@ services: - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin - GF_AUTH_DISABLE_LOGIN_FORM=true - GF_FEATURE_TOGGLES_ENABLE=traceqlEditor metricsSummary + - GF_INSTALL_PLUGINS=https://storage.googleapis.com/integration-artifacts/grafana-exploretraces-app/grafana-exploretraces-app-latest.zip;grafana-traces-app ports: - "3000:3000" diff --git a/example/docker-compose/local/tempo.yaml b/example/docker-compose/local/tempo.yaml index 3472f731aac..9cacb4c45d1 100644 --- a/example/docker-compose/local/tempo.yaml +++ b/example/docker-compose/local/tempo.yaml @@ -33,15 +33,23 @@ distributor: jaeger: # the receives all come from the OpenTelemetry collector. more configuration information can protocols: # be found there: https://github.com/open-telemetry/opentelemetry-collector/tree/main/receiver thrift_http: # - grpc: # for a production deployment you should only enable the receivers you need! + endpoint: "tempo:14268" # for a production deployment you should only enable the receivers you need! + grpc: + endpoint: "tempo:14250" thrift_binary: + endpoint: "tempo:6832" thrift_compact: + endpoint: "tempo:6831" zipkin: + endpoint: "tempo:9411" otlp: protocols: - http: grpc: + endpoint: "tempo:4317" + http: + endpoint: "tempo:4318" opencensus: + endpoint: "tempo:55678" ingester: max_block_duration: 5m # cut the headblock when this much time passes. this is being set for demo purposes and should probably be left alone normally @@ -62,6 +70,10 @@ metrics_generator: send_exemplars: true traces_storage: path: /var/tempo/generator/traces + processor: + local_blocks: + filter_server_spans: false + flush_to_storage: true storage: trace: @@ -76,3 +88,4 @@ overrides: metrics_generator: processors: [service-graphs, span-metrics, local-blocks] # enables metrics generator generate_native_histograms: both + diff --git a/example/docker-compose/multi-tenant/tempo.yaml b/example/docker-compose/multi-tenant/tempo.yaml index 655f56b04b8..bbece3ad60a 100644 --- a/example/docker-compose/multi-tenant/tempo.yaml +++ b/example/docker-compose/multi-tenant/tempo.yaml @@ -17,19 +17,11 @@ query_frontend: duration_slo: 5s distributor: - receivers: # this configuration will listen on all ports and protocols that tempo is capable of. - jaeger: # the receives all come from the OpenTelemetry collector. more configuration information can - protocols: # be found there: https://github.com/open-telemetry/opentelemetry-collector/tree/main/receiver - thrift_http: # - grpc: # for a production deployment you should only enable the receivers you need! - thrift_binary: - thrift_compact: - zipkin: + receivers: otlp: protocols: - http: grpc: - opencensus: + endpoint: "tempo:4317" ingester: max_block_duration: 5m # cut the headblock when this much time passes. this is being set for demo purposes and should probably be left alone normally diff --git a/example/docker-compose/otel-collector-multitenant/docker-compose.yaml b/example/docker-compose/otel-collector-multitenant/docker-compose.yaml index b84c6bc2dd1..14b835f7c95 100644 --- a/example/docker-compose/otel-collector-multitenant/docker-compose.yaml +++ b/example/docker-compose/otel-collector-multitenant/docker-compose.yaml @@ -19,11 +19,8 @@ services: - ../shared/tempo.yaml:/etc/tempo.yaml - ./tempo-data:/var/tempo ports: - - "14268" # jaeger ingest - - "3200" # tempo - - "4317" # otlp grpc - - "4318" # otlp http - - "9411" # zipkin + - "3200" # tempo + - "4317" # otlp grpc depends_on: - init diff --git a/example/docker-compose/otel-collector/docker-compose.yaml b/example/docker-compose/otel-collector/docker-compose.yaml index c9c3fbedac5..687b63bbcd6 100644 --- a/example/docker-compose/otel-collector/docker-compose.yaml +++ b/example/docker-compose/otel-collector/docker-compose.yaml @@ -19,11 +19,8 @@ services: - ../shared/tempo.yaml:/etc/tempo.yaml - ./tempo-data:/var/tempo ports: - - "14268" # jaeger ingest - - "3200" # tempo - - "4317" # otlp grpc - - "4318" # otlp http - - "9411" # zipkin + - "3200" # tempo + - "4317" # otlp grpc depends_on: - init diff --git a/example/docker-compose/shared/tempo.yaml b/example/docker-compose/shared/tempo.yaml index da019fdec19..d8a53271d99 100644 --- a/example/docker-compose/shared/tempo.yaml +++ b/example/docker-compose/shared/tempo.yaml @@ -14,19 +14,11 @@ query_frontend: duration_slo: 5s distributor: - receivers: # this configuration will listen on all ports and protocols that tempo is capable of. - jaeger: # the receives all come from the OpenTelemetry collector. more configuration information can - protocols: # be found there: https://github.com/open-telemetry/opentelemetry-collector/tree/main/receiver - thrift_http: # - grpc: # for a production deployment you should only enable the receivers you need! - thrift_binary: - thrift_compact: - zipkin: + receivers: otlp: protocols: - http: grpc: - opencensus: + endpoint: "tempo:4317" ingester: max_block_duration: 5m # cut the headblock when this much time passes. this is being set for demo purposes and should probably be left alone normally diff --git a/example/nomad/tempo-distributed/README.md b/example/nomad/tempo-distributed/README.md new file mode 100644 index 00000000000..adb5fc77ab3 --- /dev/null +++ b/example/nomad/tempo-distributed/README.md @@ -0,0 +1,44 @@ +# Microservices mode + +This Nomad job will deploy Tempo in +[microservices mode](https://grafana.com/docs/tempo/latest/setup/deployment/#microservices-mode) using S3 backend. + +## Usage + +### Prerequisites +- S3 compatible storage +- [Nomad memory oversubscription](https://developer.hashicorp.com/nomad/tutorials/advanced-scheduling/memory-oversubscription). If memory oversubscription is not enabled, remove `memory_max` from tempo.hcl + +Have a look at the job file and Tempo configuration file and change it to suite your environment. (e.g. in `config.yml` change s3 endpoint to your s3 compatible storge, prometheus endpoint, etc...) + +Variables +-------------- + +| Name | Value | Description | +|---|---|---| +| version | Default = "2.3.1" | Tempo version | +| s3_access_key_id | Default = "any" | S3 Access Key ID | +| s3_secret_access_key | Default = "any" | S3 Secret Access Key | + +### Run job + +Inside directory with job run: + +```shell +nomad job run tempo.hcl +``` + +To deploy a different version change `variable.version` default value or +specify from command line: + +```shell +nomad job run -var="version=2.6.1" tempo.hcl +``` + +### Scale Tempo + +Nomad CLI + +```shell +nomad job scale tempo distributor +``` diff --git a/example/nomad/tempo-distributed/config.yml b/example/nomad/tempo-distributed/config.yml new file mode 100644 index 00000000000..5f2c643134e --- /dev/null +++ b/example/nomad/tempo-distributed/config.yml @@ -0,0 +1,81 @@ +server: + log_level: info + http_listen_port: {{ env "NOMAD_PORT_http" }} + grpc_listen_port: {{ env "NOMAD_PORT_grpc" }} + +distributor: + ring: + kvstore: + store: consul + prefix: tempo/ + consul: + host: {{ env "attr.unique.network.ip-address" }}:8500 + + receivers: # this configuration will listen on all ports and protocols that tempo is capable of. + otlp: + protocols: + http: + grpc: + +ingester: + max_block_duration: 5m + lifecycler: + ring: + kvstore: + store: consul + prefix: tempo/ + consul: + host: {{ env "attr.unique.network.ip-address" }}:8500 + replication_factor: 3 + +compactor: + ring: + kvstore: + store: consul + prefix: tempo/ + consul: + host: {{ env "attr.unique.network.ip-address" }}:8500 + + compaction: + block_retention: 336h #Duration to keep blocks. Default is 14 days (336h). + +querier: + frontend_worker: + frontend_address: tempo-query-frontend-grpc.service.consul:9095 + +metrics_generator: + processor: + service_graphs: + max_items: 10000 + ring: + kvstore: + store: consul + prefix: tempo/ + consul: + host: {{ env "attr.unique.network.ip-address" }}:8500 + storage: + path: {{ env "NOMAD_ALLOC_DIR" }}/tempo/wal + remote_write: + - url: http://prometheus.service.consul/api/v1/write + send_exemplars: true + +storage: + trace: + backend: s3 + wal: + path: {{ env "NOMAD_ALLOC_DIR" }}/tempo/wal + local: + path: {{ env "NOMAD_ALLOC_DIR" }}/tempo/blocks + s3: + bucket: tempo # how to store data in s3 + endpoint: seaweedfs-s3.service.consul + insecure: true + access_key: ${S3_ACCESS_KEY_ID} + secret_key: ${S3_SECRET_ACCESS_KEY} + +overrides: + defaults: + metrics_generator: + processors: + - service-graphs + - span-metrics diff --git a/example/nomad/tempo-distributed/tempo.hcl b/example/nomad/tempo-distributed/tempo.hcl new file mode 100644 index 00000000000..13ca420758b --- /dev/null +++ b/example/nomad/tempo-distributed/tempo.hcl @@ -0,0 +1,493 @@ +variable "version" { + type = string + description = "Tempo version" + default = "2.3.1" +} + +variable "s3_access_key_id" { + type = string + description = "S3 Access Key ID" + default = "any" +} + +variable "s3_secret_access_key" { + type = string + description = "S3 Secret Access Key" + default = "any" +} + +job "tempo" { + datacenters = ["*"] + + group "metrics-generator" { + count = 1 + + network { + port "http" {} + port "grpc" {} + } + + service { + name = "tempo-metrics-generator" + port = "http" + tags = [] + check { + name = "metrics-generator" + port = "http" + type = "http" + path = "/ready" + interval = "20s" + timeout = "1s" + } + } + + service { + name = "tempo-metrics-generator-grpc" + port = "grpc" + tags = [] + check { + port = "grpc" + type = "grpc" + interval = "20s" + timeout = "1s" + grpc_use_tls = false + tls_skip_verify = true + } + } + + task "metrics-generator" { + driver = "docker" + user = "nobody" + kill_timeout = "90s" + + config { + image = "grafana/tempo:${var.version}" + ports = [ + "http", + "grpc", + ] + + args = [ + "-target=metrics-generator", + "-config.file=/local/config.yml", + "-config.expand-env=true", + ] + } + + template { + data = file("config.yml") + destination = "local/config.yml" + } + + template { + data = <<-EOH + S3_ACCESS_KEY_ID=${var.s3_access_key_id} + S3_SECRET_ACCESS_KEY=${var.s3_secret_access_key} + EOH + + destination = "secrets/s3.env" + env = true + } + + resources { + cpu = 200 + memory = 128 + memory_max = 1024 + } + } + } + + group "query-frontend" { + count = 1 + + network { + port "http" {} + port "grpc" { static = 9095} + } + + service { + name = "tempo-query-frontend" + port = "http" + tags = [] + } + + service { + name = "tempo-query-frontend-grpc" + port = "grpc" + tags = [] + check { + port = "grpc" + type = "grpc" + interval = "20s" + timeout = "1s" + grpc_use_tls = false + tls_skip_verify = true + } + } + + task "query-frontend" { + driver = "docker" + user = "nobody" + kill_timeout = "90s" + + config { + image = "grafana/tempo:${var.version}" + ports = [ + "http", + "grpc", + ] + + args = [ + "-target=query-frontend", + "-config.file=/local/config.yml", + "-config.expand-env=true", + ] + } + + template { + data = file("config.yml") + destination = "local/config.yml" + } + + template { + data = <<-EOH + S3_ACCESS_KEY_ID=${var.s3_access_key_id} + S3_SECRET_ACCESS_KEY=${var.s3_secret_access_key} + EOH + + destination = "secrets/s3.env" + env = true + } + + resources { + cpu = 200 + memory = 128 + memory_max = 1024 + } + } + } + + group "ingester" { + count = 3 + + network { + port "http" {} + port "grpc" {} + } + + service { + name = "tempo-ingester" + port = "http" + tags = [] + check { + name = "Tempo ingester" + port = "http" + type = "http" + path = "/ready" + interval = "20s" + timeout = "1s" + } + } + + service { + name = "tempo-ingester-grpc" + port = "grpc" + tags = [] + check { + port = "grpc" + type = "grpc" + interval = "20s" + timeout = "1s" + grpc_use_tls = false + tls_skip_verify = true + } + } + + task "ingester" { + driver = "docker" + user = "nobody" + kill_timeout = "90s" + + config { + image = "grafana/tempo:${var.version}" + ports = [ + "http", + "grpc", + ] + + args = [ + "-target=ingester", + "-config.file=/local/config.yml", + "-config.expand-env=true", + ] + network_mode = "host" + } + + template { + data = file("config.yml") + destination = "local/config.yml" + } + + template { + data = <<-EOH + S3_ACCESS_KEY_ID=${var.s3_access_key_id} + S3_SECRET_ACCESS_KEY=${var.s3_secret_access_key} + EOH + + destination = "secrets/s3.env" + env = true + } + + resources { + cpu = 300 + memory = 128 + memory_max = 2048 + } + } + } + + group "compactor" { + count = 1 + + ephemeral_disk { + size = 1000 + sticky = true + } + + network { + port "http" {} + port "grpc" {} + } + + service { + name = "tempo-compactor" + port = "http" + tags = [] + check { + name = "Tempo compactor" + port = "http" + type = "http" + path = "/ready" + interval = "20s" + timeout = "1s" + } + } + + service { + name = "tempo-compactor-grpc" + port = "grpc" + tags = [] + check { + port = "grpc" + type = "grpc" + interval = "20s" + timeout = "1s" + grpc_use_tls = false + tls_skip_verify = true + } + } + + task "compactor" { + driver = "docker" + user = "nobody" + kill_timeout = "90s" + + config { + image = "grafana/tempo:${var.version}" + ports = [ + "http", + "grpc", + ] + + args = [ + "-target=compactor", + "-config.file=/local/config.yml", + "-config.expand-env=true", + ] + } + + template { + data = file("config.yml") + destination = "local/config.yml" + } + + template { + data = <<-EOH + S3_ACCESS_KEY_ID=${var.s3_access_key_id} + S3_SECRET_ACCESS_KEY=${var.s3_secret_access_key} + EOH + + destination = "secrets/s3.env" + env = true + } + + resources { + cpu = 3000 + memory = 256 + memory_max = 1024 + } + } + } + group "distributor" { + count = 1 + + network { + port "http" {} + port "grpc" {} + port "otpl" { to = 4317 } + } + + service { + name = "tempo-distributor" + port = "http" + tags = [] + check { + name = "Tempo distributor" + port = "http" + type = "http" + path = "/ready" + interval = "20s" + timeout = "1s" + } + } + + service { + name = "tempo-distributor-otpl" + port = "otpl" + tags = [] + } + + service { + name = "tempo-distributor-grpc" + port = "grpc" + tags = [] + check { + port = "grpc" + type = "grpc" + interval = "20s" + timeout = "1s" + grpc_use_tls = false + tls_skip_verify = true + } + } + + task "distributor" { + driver = "docker" + user = "nobody" + kill_timeout = "90s" + + config { + image = "grafana/tempo:${var.version}" + ports = [ + "http", + "grpc", + "otpl", + ] + + args = [ + "-target=distributor", + "-config.file=/local/config.yml", + "-config.expand-env=true", + ] + } + + template { + data = file("config.yml") + destination = "local/config.yml" + } + + template { + data = <<-EOH + S3_ACCESS_KEY_ID=${var.s3_access_key_id} + S3_SECRET_ACCESS_KEY=${var.s3_secret_access_key} + EOH + + destination = "secrets/s3.env" + env = true + } + + resources { + cpu = 200 + memory = 128 + memory_max = 1024 + } + } + } + group "querier" { + count = 1 + + network { + port "http" {} + port "grpc" {} + } + + service { + name = "tempo-querier" + port = "http" + tags = [] + check { + name = "Tempo querier" + port = "http" + type = "http" + path = "/ready" + interval = "50s" + timeout = "1s" + } + } + + service { + name = "tempo-querier-grpc" + port = "grpc" + tags = [] + check { + port = "grpc" + type = "grpc" + interval = "20s" + timeout = "1s" + grpc_use_tls = false + tls_skip_verify = true + } + } + + task "querier" { + driver = "docker" + user = "nobody" + kill_timeout = "90s" + + config { + image = "grafana/tempo:${var.version}" + ports = [ + "http", + "grpc", + ] + + args = [ + "-target=querier", + "-config.file=/local/config.yml", + "-config.expand-env=true", + ] + } + + template { + data = file("config.yml") + destination = "local/config.yml" + } + + template { + data = <<-EOH + S3_ACCESS_KEY_ID=${var.s3_access_key_id} + S3_SECRET_ACCESS_KEY=${var.s3_secret_access_key} + EOH + + destination = "secrets/s3.env" + env = true + } + + resources { + cpu = 200 + memory = 128 + memory_max = 2048 + } + } + } +} diff --git a/example/nomad/tempo-monolith/README.md b/example/nomad/tempo-monolith/README.md new file mode 100644 index 00000000000..a13aafcee89 --- /dev/null +++ b/example/nomad/tempo-monolith/README.md @@ -0,0 +1,35 @@ +# Monolithic mode + +This Nomad job will deploy Tempo in +[monolithic mode](https://grafana.com/docs/tempo/latest/setup/deployment/#monolithic-mode) using S3 backend. + +## Usage + +### Prerequisites +- S3 compatible storage + +Variables +-------------- + +| Name | Value | Description | +|---|---|---| +| version | Default = "2.3.1" | Tempo version | +| s3_url | Default = "s3.dummy.url.com" | S3 storage URL | +| s3_access_key_id | Default = "any" | S3 Access Key ID | +| s3_secret_access_key | Default = "any" | S3 Secret Access Key | +| prometheus_remote_write_url | Default = "http://prometheus.service.consul/api/v1/write" | Prometheus Remote Write URL | + +### Run job + +Inside directory with job run: + +```shell +nomad job run tempo.hcl +``` + +To deploy a different version change `variable.version` default value or +specify from command line: + +```shell +nomad job run -var="version=2.6.1" tempo.hcl +``` diff --git a/example/nomad/tempo-monolith/tempo.hcl b/example/nomad/tempo-monolith/tempo.hcl new file mode 100644 index 00000000000..1c839a94c12 --- /dev/null +++ b/example/nomad/tempo-monolith/tempo.hcl @@ -0,0 +1,146 @@ +variable "version" { + type = string + description = "Tempo version" + default = "2.3.1" +} + +variable "prometheus_remote_write_url" { + type = string + description = "Prometheus Remote Write URL" + default = "http://prometheus.service.consul/api/v1/write" +} + +variable "s3_url" { + type = string + description = "S3 URL" + default = "s3.dummy.url" +} + +variable "s3_access_key_id" { + type = string + description = "S3 Access Key ID" + default = "any" +} + +variable "s3_secret_access_key" { + type = string + description = "S3 Secret Access Key" + default = "any" +} + +job "tempo" { + datacenters = ["*"] + + group "tempo" { + count = 1 + + network { + port "http" { + to = 3200 + } + port "grpc" {} + } + + service { + name = "tempo-http" + port = "http" + tags = [] + check { + name = "tempo-http" + port = "http" + type = "http" + path = "/ready" + interval = "20s" + timeout = "1s" + } + } + + service { + name = "tempo-grpc" + port = "grpc" + tags = [] + check { + port = "grpc" + type = "grpc" + interval = "20s" + timeout = "1s" + grpc_use_tls = false + tls_skip_verify = true + } + } + + task "tempo" { + driver = "docker" + user = "nobody" + kill_timeout = "90s" + + config { + image = "grafana/tempo:${var.version}" + ports = [ + "http", + "grpc", + ] + + args = [ + "-target=all", + "-config.file=/local/config.yml", + "-config.expand-env=true", + ] + } + template { + data = <<-EOC + server: + log_level: info + http_listen_port: {{ env "NOMAD_PORT_http" }} + grpc_listen_port: {{ env "NOMAD_PORT_grpc" }} + + distributor: + receivers: # this configuration will listen on all ports and protocols that tempo is capable of. + otlp: + protocols: + http: + grpc: + + metrics_generator: + processor: + service_graphs: + max_items: 10000 + + storage: + path: {{ env "NOMAD_ALLOC_DIR" }}/tempo/wal + remote_write: + - url: ${var.prometheus_remote_write_url} + send_exemplars: true + + storage: + trace: + backend: s3 + wal: + path: {{ env "NOMAD_ALLOC_DIR" }}/tempo/wal + local: + path: {{ env "NOMAD_ALLOC_DIR" }}/tempo/blocks + s3: + bucket: tempo # how to store data in s3 + endpoint: ${var.s3_url} + insecure: true + access_key: ${var.s3_access_key_id} + secret_key: ${var.s3_secret_access_key} + + overrides: + defaults: + metrics_generator: + processors: + - service-graphs + - span-metrics + EOC + destination = "local/config.yml" + } + + + resources { + cpu = 300 + memory = 1024 + } + } + } +} \ No newline at end of file diff --git a/example/tk/jsonnetfile.lock.json b/example/tk/jsonnetfile.lock.json index ef53d72468e..0a67309ded3 100644 --- a/example/tk/jsonnetfile.lock.json +++ b/example/tk/jsonnetfile.lock.json @@ -8,7 +8,7 @@ "subdir": "grafana" } }, - "version": "84e49c8549fa472c963862f233422c8b368afabe", + "version": "57b0b85dc1d7ed5e30c2e41f3bd26744b8aef519", "sum": "Y5nheroSOIwmE+djEVPq4OvvTxKenzdHhpEwaR3Ebjs=" }, { @@ -18,8 +18,8 @@ "subdir": "grafana-builder" } }, - "version": "84e49c8549fa472c963862f233422c8b368afabe", - "sum": "B49EzIY2WZsFxNMJcgRxE/gcZ9ltnS8pkOOV6Q5qioc=" + "version": "57b0b85dc1d7ed5e30c2e41f3bd26744b8aef519", + "sum": "yxqWcq/N3E/a/XreeU6EuE6X7kYPnG0AspAQFKOjASo=" }, { "source": { @@ -28,7 +28,7 @@ "subdir": "ksonnet-util" } }, - "version": "84e49c8549fa472c963862f233422c8b368afabe", + "version": "57b0b85dc1d7ed5e30c2e41f3bd26744b8aef519", "sum": "0y3AFX9LQSpfWTxWKSwoLgbt0Wc9nnCwhMH2szKzHv0=" }, { @@ -38,7 +38,7 @@ "subdir": "kube-state-metrics/" } }, - "version": "84e49c8549fa472c963862f233422c8b368afabe", + "version": "57b0b85dc1d7ed5e30c2e41f3bd26744b8aef519", "sum": "q1YzD+I4InDdfQP6k93W9Lw5U1jpll3gVaS9rUzLR7U=" }, { @@ -48,7 +48,7 @@ "subdir": "memcached" } }, - "version": "84e49c8549fa472c963862f233422c8b368afabe", + "version": "57b0b85dc1d7ed5e30c2e41f3bd26744b8aef519", "sum": "Cc715Y3rgTuimgDFIw+FaKzXSJGRYwt1pFTMbdrNBD8=" }, { @@ -58,7 +58,7 @@ "subdir": "prometheus" } }, - "version": "84e49c8549fa472c963862f233422c8b368afabe", + "version": "57b0b85dc1d7ed5e30c2e41f3bd26744b8aef519", "sum": "b4scQI+UtFxkVgzzD3dfbPMwI7EVtyjv1uETEmizM8M=" }, { @@ -68,7 +68,7 @@ "subdir": "tanka-util" } }, - "version": "84e49c8549fa472c963862f233422c8b368afabe", + "version": "57b0b85dc1d7ed5e30c2e41f3bd26744b8aef519", "sum": "ShSIissXdvCy1izTCDZX6tY7qxCoepE5L+WJ52Hw7ZQ=" }, { @@ -88,7 +88,7 @@ "subdir": "1.29" } }, - "version": "bf9a62cfd32a58c071b8410bfcdec058475dd25e", + "version": "6ecbb7709baf27f44b2e48f3529741ae6754ae6a", "sum": "i2w3hGbgQmaB73t5LJHSioPOVdYv8ZBvivHiDwZJVyI=" } ], diff --git a/example/tk/lib/k.libsonnet b/example/tk/lib/k.libsonnet index 8f0a057f9e6..a5f12df6c95 100644 --- a/example/tk/lib/k.libsonnet +++ b/example/tk/lib/k.libsonnet @@ -1 +1 @@ -import '1.21/main.libsonnet' +import '1.29/main.libsonnet' diff --git a/example/tk/lib/synthetic-load-generator/load-generator.json b/example/tk/lib/synthetic-load-generator/load-generator.json deleted file mode 100644 index 58a2dac3e08..00000000000 --- a/example/tk/lib/synthetic-load-generator/load-generator.json +++ /dev/null @@ -1,630 +0,0 @@ -{ - "topology": { - "services": [ - { - "serviceName": "frontend", - "tagSets": [ - { - "weight": 1, - "tags": { - "version": "v127", - "region": "us-east-1" - }, - "tagGenerators": [], - "inherit": [], - "maxLatency": 100 - }, - { - "weight": 1, - "tags": { - "version": "v125", - "region": "us-east-1" - }, - "tagGenerators": [], - "inherit": [], - "maxLatency": 100 - }, - { - "weight": 2, - "tags": { - "version": "v125", - "region": "us-west-1" - }, - "tagGenerators": [], - "inherit": [], - "maxLatency": 100 - } - ], - "routes": [ - { - "route": "/product", - "downstreamCalls": { - "productcatalogservice": "/GetProducts", - "recommendationservice": "/GetRecommendations", - "adservice": "/AdRequest" - }, - "tagSets": [ - { - "weight": 1, - "tags": { - "starter": "charmander" - }, - "tagGenerators": [ - { - "rand": { - "seed": 179867746078676, - "nextNextGaussian": 0, - "haveNextNextGaussian": false - }, - "tagGen": {}, - "valLength": 16, - "numTags": 50, - "numVals": 3000 - } - ], - "inherit": [] - }, - { - "weight": 1, - "tags": { - "starter": "squirtle" - }, - "tagGenerators": [], - "inherit": [] - }, - { - "weight": 1, - "tags": { - "starter": "bulbasaur" - }, - "tagGenerators": [], - "inherit": [] - } - ] - }, - { - "route": "/cart", - "downstreamCalls": { - "cartservice": "/GetCart", - "recommendationservice": "/GetRecommendations" - }, - "tagSets": [] - }, - { - "route": "/checkout", - "downstreamCalls": { - "checkoutservice": "/PlaceOrder" - }, - "tagSets": [ - { - "tags": {}, - "tagGenerators": [], - "inherit": [], - "maxLatency": 800 - } - ] - }, - { - "route": "/shipping", - "downstreamCalls": { - "shippingservice": "/GetQuote" - }, - "tagSets": [ - { - "tags": {}, - "tagGenerators": [], - "inherit": [], - "maxLatency": 50 - } - ] - }, - { - "route": "/currency", - "downstreamCalls": { - "currencyservice": "/GetConversion" - }, - "tagSets": [ - { - "tags": {}, - "tagGenerators": [], - "inherit": [], - "maxLatency": 50 - } - ] - } - ], - "instances": [ - "frontend-6b654dbf57-zq8dt", - "frontend-d847fdcf5-j6s2f", - "frontend-79d8c8d6c8-9sbff" - ], - "mergedTagSets": {}, - "random": { - "seed": 187004238864083, - "nextNextGaussian": 0, - "haveNextNextGaussian": false - } - }, - { - "serviceName": "productcatalogservice", - "tagSets": [ - { - "tags": { - "version": "v52" - }, - "tagGenerators": [], - "inherit": [ - "region" - ] - } - ], - "routes": [ - { - "route": "/GetProducts", - "downstreamCalls": {}, - "tagSets": [ - { - "tags": {}, - "tagGenerators": [], - "inherit": [ - "starter" - ], - "maxLatency": 100 - } - ] - }, - { - "route": "/SearchProducts", - "downstreamCalls": {}, - "tagSets": [ - { - "weight": 15, - "tags": { - "error": true, - "http.status_code": 503 - }, - "tagGenerators": [], - "inherit": [], - "maxLatency": 400 - }, - { - "weight": 85, - "tags": {}, - "tagGenerators": [], - "inherit": [], - "maxLatency": 400 - } - ] - } - ], - "instances": [ - "productcatalogservice-6b654dbf57-zq8dt", - "productcatalogservice-d847fdcf5-j6s2f" - ], - "mergedTagSets": {}, - "random": { - "seed": 238238032670139, - "nextNextGaussian": 0, - "haveNextNextGaussian": false - } - }, - { - "serviceName": "recommendationservice", - "tagSets": [ - { - "tags": { - "version": "v234", - "region": "us-east-1" - }, - "tagGenerators": [], - "inherit": [] - } - ], - "routes": [ - { - "route": "/GetRecommendations", - "downstreamCalls": { - "productcatalogservice": "/GetProducts" - }, - "tagSets": [ - { - "tags": {}, - "tagGenerators": [], - "inherit": [], - "maxLatency": 200 - } - ] - } - ], - "instances": [ - "recommendationservice-6b654dbf57-zq8dt", - "recommendationservice-d847fdcf5-j6s2f" - ], - "mergedTagSets": {}, - "random": { - "seed": 66295214032801, - "nextNextGaussian": 0, - "haveNextNextGaussian": false - } - }, - { - "serviceName": "cartservice", - "tagSets": [ - { - "tags": { - "version": "v5", - "region": "us-east-1" - }, - "tagGenerators": [], - "inherit": [] - } - ], - "routes": [ - { - "route": "/GetCart", - "downstreamCalls": {}, - "tagSets": [ - { - "tags": {}, - "tagGenerators": [], - "inherit": [], - "maxLatency": 200 - } - ] - } - ], - "instances": [ - "cartservice-6b654dbf57-zq8dt", - "cartservice-d847fdcf5-j6s2f" - ], - "mergedTagSets": {}, - "random": { - "seed": 234194353561392, - "nextNextGaussian": 0, - "haveNextNextGaussian": false - } - }, - { - "serviceName": "checkoutservice", - "tagSets": [ - { - "tags": { - "version": "v37", - "region": "us-east-1" - }, - "tagGenerators": [], - "inherit": [], - "maxLatency": 500 - } - ], - "routes": [ - { - "route": "/PlaceOrder", - "downstreamCalls": { - "paymentservice": "/CreditCardInfo", - "shippingservice": "/Address", - "currencyservice": "/GetConversion", - "cartservice": "/GetCart", - "emailservice": "/SendOrderConfirmation" - }, - "tagSets": [ - { - "weight": 25, - "tags": { - "error": true, - "http.status_code": 503 - }, - "tagGenerators": [], - "inherit": [] - }, - { - "weight": 85, - "tags": {}, - "tagGenerators": [], - "inherit": [] - } - ] - } - ], - "instances": [ - "checkoutservice-6b654dbf57-zq8dt", - "checkoutservice-d847fdcf5-j6s2f" - ], - "mergedTagSets": {}, - "random": { - "seed": 60782549660568, - "nextNextGaussian": 0, - "haveNextNextGaussian": false - } - }, - { - "serviceName": "paymentservice", - "tagSets": [ - { - "tags": { - "version": "v177", - "region": "us-east-1" - }, - "tagGenerators": [], - "inherit": [] - } - ], - "routes": [ - { - "route": "/ChargeRequest", - "downstreamCalls": { - "paymentservice": "/CreditCardInfo" - }, - "tagSets": [ - { - "tags": {}, - "tagGenerators": [], - "inherit": [], - "maxLatency": 700 - } - ] - }, - { - "route": "/CreditCardInfo", - "downstreamCalls": {}, - "tagSets": [ - { - "tags": {}, - "tagGenerators": [], - "inherit": [], - "maxLatency": 50 - } - ] - } - ], - "instances": [ - "paymentservice-6b654dbf57-zq8dt", - "paymentservice-d847fdcf5-j6s2f" - ], - "mergedTagSets": {}, - "random": { - "seed": 174850031049111, - "nextNextGaussian": 0, - "haveNextNextGaussian": false - } - }, - { - "serviceName": "shippingservice", - "tagSets": [ - { - "tags": { - "version": "v127", - "region": "us-east-1" - }, - "tagGenerators": [], - "inherit": [] - } - ], - "routes": [ - { - "route": "/GetQuote", - "downstreamCalls": { - "shippingservice": "/Address" - }, - "tagSets": [ - { - "tags": {}, - "tagGenerators": [], - "inherit": [], - "maxLatency": 250 - } - ] - }, - { - "route": "/ShipOrder", - "downstreamCalls": { - "shippingservice": "/Address" - }, - "tagSets": [ - { - "tags": {}, - "tagGenerators": [], - "inherit": [], - "maxLatency": 500 - } - ] - }, - { - "route": "/Address", - "downstreamCalls": {}, - "tagSets": [ - { - "tags": {}, - "tagGenerators": [], - "inherit": [], - "maxLatency": 100 - } - ] - } - ], - "instances": [ - "shippingservice-6b654dbf57-zq8dt", - "shippingservice-d847fdcf5-j6s2f" - ], - "mergedTagSets": {}, - "random": { - "seed": 107892261530518, - "nextNextGaussian": 0, - "haveNextNextGaussian": false - } - }, - { - "serviceName": "emailservice", - "tagSets": [ - { - "tags": { - "version": "v27", - "region": "us-east-1" - }, - "tagGenerators": [], - "inherit": [], - "maxLatency": 500 - } - ], - "routes": [ - { - "route": "/SendOrderConfirmation", - "downstreamCalls": { - "emailservice": "/OrderResult" - }, - "tagSets": [ - { - "weight": 15, - "tags": { - "error": true, - "http.status_code": 503 - }, - "tagGenerators": [], - "inherit": [] - }, - { - "weight": 85, - "tags": {}, - "tagGenerators": [], - "inherit": [] - } - ] - }, - { - "route": "/OrderResult", - "downstreamCalls": {}, - "tagSets": [ - { - "tags": {}, - "tagGenerators": [], - "inherit": [], - "maxLatency": 100 - } - ] - } - ], - "instances": [ - "emailservice-6b654dbf57-zq8dt", - "emailservice-d847fdcf5-j6s2f" - ], - "mergedTagSets": {}, - "random": { - "seed": 61175057559946, - "nextNextGaussian": 0, - "haveNextNextGaussian": false - } - }, - { - "serviceName": "currencyservice", - "tagSets": [ - { - "tags": { - "version": "v27", - "region": "us-east-1" - }, - "tagGenerators": [], - "inherit": [] - } - ], - "routes": [ - { - "route": "/GetConversion", - "downstreamCalls": { - "currencyservice": "/Money" - }, - "tagSets": [ - { - "tags": {}, - "tagGenerators": [], - "inherit": [], - "maxLatency": 100 - } - ] - }, - { - "route": "/Money", - "downstreamCalls": {}, - "tagSets": [ - { - "tags": {}, - "tagGenerators": [], - "inherit": [], - "maxLatency": 100 - } - ] - } - ], - "instances": [ - "currencyservice-6b654dbf57-zq8dt", - "currencyservice-d847fdcf5-j6s2f" - ], - "mergedTagSets": {}, - "random": { - "seed": 66219471499700, - "nextNextGaussian": 0, - "haveNextNextGaussian": false - } - }, - { - "serviceName": "adservice", - "tagSets": [ - { - "tags": {}, - "tagGenerators": [], - "inherit": [], - "maxLatency": 500 - } - ], - "routes": [ - { - "route": "/AdRequest", - "downstreamCalls": {}, - "tagSets": [] - }, - { - "route": "/Ad", - "downstreamCalls": {}, - "tagSets": [] - } - ], - "instances": [ - "adservice-6b654dbf57-zq8dt", - "adservice-d847fdcf5-j6s2f" - ], - "mergedTagSets": {}, - "random": { - "seed": 22694143111805, - "nextNextGaussian": 0, - "haveNextNextGaussian": false - } - } - ] - }, - "rootRoutes": [ - { - "service": "frontend", - "route": "/product", - "tracesPerHour": 2880 - }, - { - "service": "frontend", - "route": "/cart", - "tracesPerHour": 14400 - }, - { - "service": "frontend", - "route": "/shipping", - "tracesPerHour": 480 - }, - { - "service": "frontend", - "route": "/currency", - "tracesPerHour": 200 - }, - { - "service": "frontend", - "route": "/checkout", - "tracesPerHour": 480 - } - ] -} \ No newline at end of file diff --git a/example/tk/lib/synthetic-load-generator/main.libsonnet b/example/tk/lib/synthetic-load-generator/main.libsonnet index cb902fa1be2..63294a42048 100644 --- a/example/tk/lib/synthetic-load-generator/main.libsonnet +++ b/example/tk/lib/synthetic-load-generator/main.libsonnet @@ -1,33 +1,17 @@ { local k = import 'ksonnet-util/kausal.libsonnet', - local configMap = k.core.v1.configMap, local container = k.core.v1.container, - local volumeMount = k.core.v1.volumeMount, local deployment = k.apps.v1.deployment, - local volume = k.core.v1.volume, - - synthetic_load_generator_configmap: - configMap.new('synthetic-load-generator') + - configMap.withData({ - 'load-generator.json': importstr './load-generator.json', - }), synthetic_load_generator_container:: - container.new('synthetic-load-gen', 'omnition/synthetic-load-generator:1.0.25') + - container.withVolumeMounts([ - volumeMount.new('conf', '/conf'), - ]) + + container.new('synthetic-load-gen', 'ghcr.io/grafana/xk6-client-tracing:v0.0.5') + container.withEnvMap({ - TOPOLOGY_FILE: '/conf/load-generator.json', - JAEGER_COLLECTOR_URL: 'http://tempo:14268', + ENDPOINT: 'http://tempo:4317', }), synthetic_load_generator_deployment: deployment.new('synthetic-load-generator', 1, [ $.synthetic_load_generator_container ], - { app: 'synthetic_load_generator' }) + - deployment.mixin.spec.template.spec.withVolumes([ - volume.fromConfigMap('conf', $.synthetic_load_generator_configmap.metadata.name), - ]), + { app: 'synthetic_load_generator' }), } diff --git a/example/tk/tempo-microservices/main.jsonnet b/example/tk/tempo-microservices/main.jsonnet index 5cc02f9cc95..5f2f6820145 100644 --- a/example/tk/tempo-microservices/main.jsonnet +++ b/example/tk/tempo-microservices/main.jsonnet @@ -26,17 +26,22 @@ minio + metrics + load + tempo { }, distributor+: { receivers: { - opencensus: null, - jaeger: { + otlp: { protocols: { - thrift_http: null, + grpc: null, }, }, }, }, metrics_generator+: { + replicas: 1, ephemeral_storage_limit_size: '2Gi', ephemeral_storage_request_size: '1Gi', + pvc_size: '1Gi', + pvc_storage_class: 'local-path', + }, + block_builder+:{ + replicas: 1, }, memcached+: { replicas: 1, @@ -79,8 +84,7 @@ minio + metrics + load + tempo { tempo_distributor_container+:: k.util.resourcesRequests('500m', '500Mi') + container.withPortsMixin([ - containerPort.new('opencensus', 55678), - containerPort.new('jaeger-http', 14268), + containerPort.new('otlp-grpc', 4317), ]), tempo_ingester_container+:: diff --git a/example/tk/vendor/github.com/grafana/jsonnet-libs/grafana-builder/grafana.libsonnet b/example/tk/vendor/github.com/grafana/jsonnet-libs/grafana-builder/grafana.libsonnet index 0bd0b339493..be43616c8d3 100644 --- a/example/tk/vendor/github.com/grafana/jsonnet-libs/grafana-builder/grafana.libsonnet +++ b/example/tk/vendor/github.com/grafana/jsonnet-libs/grafana-builder/grafana.libsonnet @@ -1,3 +1,5 @@ +local utils = import 'mixin-utils/utils.libsonnet'; + { dashboard(title, uid='', datasource='default', datasource_regex=''):: { // Stuff that isn't materialised. @@ -41,7 +43,7 @@ }, }, - addMultiTemplate(name, metric_name, label_name, hide=0, allValue='.+', sort=2):: self { + addMultiTemplate(name, metric_name, label_name, hide=0, allValue='.+', sort=2, includeAll=true):: self { templating+: { list+: [{ allValue: allValue, @@ -52,7 +54,7 @@ }, datasource: '$datasource', hide: hide, - includeAll: true, + includeAll: includeAll, label: name, multi: true, name: name, @@ -70,6 +72,40 @@ }, }, + addShowNativeLatencyVariable():: self { + templating+: { + list+: [{ + current: { + selected: true, + text: 'classic', + value: '1', + }, + description: 'Choose between showing latencies based on low precision classic or high precision native histogram metrics.', + hide: 0, + includeAll: false, + label: 'Latency metrics', + multi: false, + name: 'latency_metrics', + query: 'native : -1,classic : 1', + options: [ + { + selected: false, + text: 'native', + value: '-1', + }, + { + selected: true, + text: 'classic', + value: '1', + }, + ], + skipUrlSync: false, + type: 'custom', + useTags: false, + }], + }, + }, + dashboardLinkUrl(title, url):: self { links+: [ { @@ -296,17 +332,25 @@ }, statPanel(query, format='percentunit'):: { + local isNativeClassic = utils.isNativeClassicQuery(query), type: 'singlestat', thresholds: '70,80', format: format, targets: [ { - expr: query, + expr: if isNativeClassic then utils.showClassicHistogramQuery(query) else query, + format: 'time_series', + instant: true, + refId: if isNativeClassic then 'A_classic' else 'A', + }, + ] + if isNativeClassic then [ + { + expr: utils.showNativeHistogramQuery(query), format: 'time_series', instant: true, refId: 'A', }, - ], + ] else [], }, tablePanel(queries, labelStyles):: { @@ -420,18 +464,20 @@ }, ], + httpStatusColors:: { + '1xx': '#EAB839', + '2xx': '#7EB26D', + '3xx': '#6ED0E0', + '4xx': '#EF843C', + '5xx': '#E24D42', + OK: '#7EB26D', + success: '#7EB26D', + 'error': '#E24D42', + cancel: '#A9A9A9', + }, + qpsPanel(selector, statusLabelName='status_code'):: { - aliasColors: { - '1xx': '#EAB839', - '2xx': '#7EB26D', - '3xx': '#6ED0E0', - '4xx': '#EF843C', - '5xx': '#E24D42', - OK: '#7EB26D', - success: '#7EB26D', - 'error': '#E24D42', - cancel: '#A9A9A9', - }, + aliasColors: $.httpStatusColors, targets: [ { expr: @@ -448,6 +494,65 @@ ], } + $.stack, + // Assumes that the metricName is for a histogram (as opposed to qpsPanel above) + // Assumes that there is a dashboard variable named latency_metrics, values are -1 (native) or 1 (classic) + qpsPanelNativeHistogram(metricName, selector, statusLabelName='status_code'):: { + local sumByStatus(nativeClassicQuery) = { + local template = + ||| + sum by (status) ( + label_replace(label_replace(%(metricQuery)s, + "status", "${1}xx", "%(label)s", "([0-9]).."), + "status", "${1}", "%(label)s", "([a-zA-Z]+)")) + |||, + native: template % { metricQuery: nativeClassicQuery.native, label: statusLabelName }, + classic: template % { metricQuery: nativeClassicQuery.classic, label: statusLabelName }, + }, + fieldConfig+: { + defaults+: { + custom+: { + lineWidth: 0, + fillOpacity: 100, // Get solid fill. + stacking: { + mode: 'normal', + group: 'A', + }, + }, + unit: 'reqps', + min: 0, + }, + overrides+: [{ + matcher: { + id: 'byName', + options: status, + }, + properties: [ + { + id: 'color', + value: { + mode: 'fixed', + fixedColor: $.httpStatusColors[status], + }, + }, + ], + } for status in std.objectFieldsAll($.httpStatusColors)], + }, + targets: [ + { + expr: utils.showClassicHistogramQuery(sumByStatus(utils.ncHistogramCountRate(metricName, selector))), + format: 'time_series', + legendFormat: '{{status}}', + refId: 'A_classic', + }, + { + expr: utils.showNativeHistogramQuery(sumByStatus(utils.ncHistogramCountRate(metricName, selector))), + format: 'time_series', + legendFormat: '{{status}}', + refId: 'A', + }, + ], + } + $.stack, + latencyPanel(metricName, selector, multiplier='1e3'):: { nullPointMode: 'null as zero', targets: [ @@ -473,6 +578,58 @@ yaxes: $.yaxes('ms'), }, + // Assumes that there is a dashboard variable named latency_metrics, values are -1 (native) or 1 (classic) + latencyPanelNativeHistogram(metricName, selector, multiplier='1e3'):: { + nullPointMode: 'null as zero', + fieldConfig+: { + defaults+: { + custom+: { + fillOpacity: 10, + }, + unit: 'ms', + }, + }, + targets: [ + { + expr: utils.showNativeHistogramQuery(utils.ncHistogramQuantile('0.99', metricName, selector, multiplier=multiplier)), + format: 'time_series', + legendFormat: '99th percentile', + refId: 'A', + }, + { + expr: utils.showClassicHistogramQuery(utils.ncHistogramQuantile('0.99', metricName, selector, multiplier=multiplier)), + format: 'time_series', + legendFormat: '99th percentile', + refId: 'A_classic', + }, + { + expr: utils.showNativeHistogramQuery(utils.ncHistogramQuantile('0.50', metricName, selector, multiplier=multiplier)), + format: 'time_series', + legendFormat: '50th percentile', + refId: 'B', + }, + { + expr: utils.showClassicHistogramQuery(utils.ncHistogramQuantile('0.50', metricName, selector, multiplier=multiplier)), + format: 'time_series', + legendFormat: '50th percentile', + refId: 'B_classic', + }, + { + expr: utils.showNativeHistogramQuery(utils.ncHistogramAverageRate(metricName, selector, multiplier=multiplier)), + format: 'time_series', + legendFormat: 'Average', + refId: 'C', + }, + { + expr: utils.showClassicHistogramQuery(utils.ncHistogramAverageRate(metricName, selector, multiplier=multiplier)), + format: 'time_series', + legendFormat: 'Average', + refId: 'C_classic', + }, + ], + yaxes: $.yaxes('ms'), + }, + selector:: { eq(label, value):: { label: label, op: '=', value: value }, neq(label, value):: { label: label, op: '!=', value: value }, diff --git a/go.mod b/go.mod index 2c5cf4c6357..76a669f717f 100644 --- a/go.mod +++ b/go.mod @@ -4,10 +4,9 @@ go 1.23.3 require ( cloud.google.com/go/storage v1.41.0 - github.com/alecthomas/kong v0.8.0 - github.com/alicebob/miniredis/v2 v2.21.0 - github.com/aws/aws-sdk-go v1.55.5 - github.com/cespare/xxhash v1.1.0 + github.com/alecthomas/kong v1.6.1 + github.com/alicebob/miniredis/v2 v2.34.0 + github.com/aws/aws-sdk-go v1.55.6 github.com/cespare/xxhash/v2 v2.3.0 github.com/cristalhq/hedgedhttp v0.9.1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc @@ -28,116 +27,137 @@ require ( github.com/grafana/dskit v0.0.0-20241115082728-f2a7eb3aa0e9 github.com/grafana/e2e v0.1.1 github.com/hashicorp/go-hclog v1.6.3 // indirect - github.com/hashicorp/go-plugin v1.6.0 // indirect - github.com/jaegertracing/jaeger v1.57.0 - github.com/jedib0t/go-pretty/v6 v6.2.4 + github.com/jaegertracing/jaeger v1.65.0 + github.com/jedib0t/go-pretty/v6 v6.6.5 github.com/json-iterator/go v1.1.12 github.com/jsternberg/zap-logfmt v1.2.0 github.com/klauspost/compress v1.17.11 - github.com/minio/minio-go/v7 v7.0.80 - github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c + github.com/minio/minio-go/v7 v7.0.84 + github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/olekukonko/tablewriter v0.0.5 - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.102.0 - github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.118.0 + github.com/opentracing-contrib/go-grpc v0.1.0 github.com/opentracing/opentracing-go v1.2.0 - github.com/pierrec/lz4/v4 v4.1.21 + github.com/pierrec/lz4/v4 v4.1.22 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.19.1 + github.com/prometheus/client_golang v1.20.5 github.com/prometheus/client_model v0.6.1 - github.com/prometheus/common v0.55.0 - github.com/prometheus/prometheus v0.54.0 - github.com/prometheus/statsd_exporter v0.26.0 + github.com/prometheus/common v0.62.0 + github.com/prometheus/prometheus v0.54.1 + github.com/prometheus/statsd_exporter v0.26.1 github.com/segmentio/fasthash v1.0.3 github.com/sony/gobreaker v0.4.1 - github.com/spf13/viper v1.18.2 - github.com/stretchr/testify v1.9.0 + github.com/spf13/viper v1.19.0 + github.com/stretchr/testify v1.10.0 github.com/uber-go/atomic v1.4.0 github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect github.com/willf/bloom v2.0.3+incompatible go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/collector v0.102.1 - go.opentelemetry.io/collector/component v0.102.1 - go.opentelemetry.io/collector/confmap v0.102.1 - go.opentelemetry.io/collector/consumer v0.102.1 - go.opentelemetry.io/collector/pdata v1.12.0 - go.opentelemetry.io/collector/semconv v0.105.0 // indirect - go.opentelemetry.io/otel v1.31.0 - go.opentelemetry.io/otel/bridge/opencensus v1.27.0 - go.opentelemetry.io/otel/bridge/opentracing v1.26.0 + go.opentelemetry.io/collector v0.118.0 // indirect + go.opentelemetry.io/collector/component v0.118.0 + go.opentelemetry.io/collector/confmap v1.24.0 + go.opentelemetry.io/collector/consumer v1.24.0 + go.opentelemetry.io/collector/pdata v1.24.0 + go.opentelemetry.io/collector/semconv v0.118.0 // indirect + go.opentelemetry.io/otel v1.34.0 + go.opentelemetry.io/otel/bridge/opencensus v1.34.0 + go.opentelemetry.io/otel/bridge/opentracing v1.34.0 go.opentelemetry.io/otel/exporters/jaeger v1.17.0 - go.opentelemetry.io/otel/metric v1.31.0 - go.opentelemetry.io/otel/sdk v1.31.0 - go.opentelemetry.io/otel/trace v1.31.0 + go.opentelemetry.io/otel/metric v1.34.0 + go.opentelemetry.io/otel/sdk v1.34.0 + go.opentelemetry.io/otel/trace v1.34.0 go.uber.org/atomic v1.11.0 go.uber.org/goleak v1.3.0 go.uber.org/multierr v1.11.0 go.uber.org/zap v1.27.0 - golang.org/x/sync v0.9.0 - golang.org/x/time v0.5.0 - google.golang.org/api v0.188.0 - google.golang.org/grpc v1.65.0 - google.golang.org/protobuf v1.34.2 + golang.org/x/sync v0.10.0 // indirect + golang.org/x/time v0.9.0 + google.golang.org/api v0.218.0 + google.golang.org/grpc v1.69.4 + google.golang.org/protobuf v1.36.3 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 ) require ( - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 - github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.1 + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0 github.com/evanphx/json-patch v5.9.0+incompatible - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da - github.com/googleapis/gax-go/v2 v2.13.0 - github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56 + github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 + github.com/googleapis/gax-go/v2 v2.14.1 + github.com/grafana/gomemcache v0.0.0-20241016125027-0a5bcc5aef40 github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter v0.102.0 - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.97.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.97.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.102.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.102.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver v0.102.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.102.0 - github.com/parquet-go/parquet-go v0.23.1-0.20241011155651-6446d1d0d2fe + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter v0.118.0 + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.118.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.118.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.118.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.118.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver v0.118.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.118.0 + github.com/parquet-go/parquet-go v0.24.0 github.com/stoewer/parquet-cli v0.0.9 - go.opentelemetry.io/collector/config/configgrpc v0.102.1 - go.opentelemetry.io/collector/config/confighttp v0.102.1 - go.opentelemetry.io/collector/config/configtls v1.18.0 - go.opentelemetry.io/collector/exporter v0.102.1 - go.opentelemetry.io/collector/exporter/otlpexporter v0.102.1 - go.opentelemetry.io/collector/extension v0.102.1 - go.opentelemetry.io/collector/otelcol v0.102.1 - go.opentelemetry.io/collector/processor v0.102.1 - go.opentelemetry.io/collector/receiver v0.102.1 - go.opentelemetry.io/collector/receiver/otlpreceiver v0.102.1 - go.opentelemetry.io/contrib/exporters/autoexport v0.53.0 - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 - go.opentelemetry.io/proto/otlp v1.3.1 - golang.org/x/net v0.31.0 - golang.org/x/oauth2 v0.21.0 - google.golang.org/genproto/googleapis/rpc v0.0.0-20240711142825-46eb208f015d + github.com/twmb/franz-go v1.18.1 + github.com/twmb/franz-go/pkg/kadm v1.14.0 + github.com/twmb/franz-go/pkg/kfake v0.0.0-20241202133023-293b7c4c56bb + github.com/twmb/franz-go/pkg/kmsg v1.9.0 + github.com/twmb/franz-go/plugin/kotel v1.5.0 + github.com/twmb/franz-go/plugin/kprom v1.1.0 + go.opentelemetry.io/collector/client v1.24.0 + go.opentelemetry.io/collector/component/componenttest v0.118.0 + go.opentelemetry.io/collector/config/configgrpc v0.118.0 + go.opentelemetry.io/collector/config/confighttp v0.118.0 + go.opentelemetry.io/collector/config/configtls v1.24.0 + go.opentelemetry.io/collector/exporter v0.118.0 + go.opentelemetry.io/collector/exporter/exportertest v0.118.0 + go.opentelemetry.io/collector/exporter/otlpexporter v0.118.0 + go.opentelemetry.io/collector/exporter/otlphttpexporter v0.118.0 + go.opentelemetry.io/collector/extension v0.118.0 + go.opentelemetry.io/collector/otelcol v0.118.0 + go.opentelemetry.io/collector/pdata/testdata v0.118.0 + go.opentelemetry.io/collector/processor v0.118.0 + go.opentelemetry.io/collector/receiver v0.118.0 + go.opentelemetry.io/collector/receiver/otlpreceiver v0.118.0 + go.opentelemetry.io/contrib/exporters/autoexport v0.59.0 + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.34.0 + go.opentelemetry.io/proto/otlp v1.5.0 + golang.org/x/net v0.34.0 + google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f ) require ( cloud.google.com/go v0.115.0 // indirect - cloud.google.com/go/auth v0.7.0 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect - cloud.google.com/go/compute/metadata v0.5.0 // indirect - cloud.google.com/go/iam v1.1.10 // indirect + cloud.google.com/go/auth v0.14.0 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.7 // indirect + cloud.google.com/go/compute/metadata v0.6.0 // indirect + cloud.google.com/go/iam v1.1.11 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect - github.com/IBM/sarama v1.43.2 // indirect - github.com/VividCortex/gohistogram v1.0.0 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2 // indirect + github.com/IBM/sarama v1.45.0 // indirect github.com/alecthomas/participle/v2 v2.1.1 // indirect - github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 // indirect - github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect + github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect + github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302 // indirect github.com/andybalholm/brotli v1.1.1 // indirect - github.com/apache/thrift v0.20.0 // indirect + github.com/antchfx/xmlquery v1.4.3 // indirect + github.com/antchfx/xpath v1.3.3 // indirect + github.com/apache/thrift v0.21.0 // indirect github.com/armon/go-metrics v0.4.1 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect + github.com/aws/aws-msk-iam-sasl-signer-go v1.0.0 // indirect github.com/aws/aws-sdk-go-v2 v1.22.2 // indirect github.com/aws/aws-sdk-go-v2/config v1.24.0 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.15.2 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.3 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.2 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.2 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.7.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.2 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.17.1 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.19.1 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.25.1 // indirect + github.com/aws/smithy-go v1.16.0 // indirect github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect @@ -146,16 +166,18 @@ require ( github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/dennwc/varint v1.0.0 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect - github.com/eapache/go-resiliency v1.6.0 // indirect + github.com/eapache/go-resiliency v1.7.0 // indirect github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect github.com/eapache/queue v1.1.0 // indirect + github.com/ebitengine/purego v0.8.1 // indirect github.com/edsrzf/mmap-go v1.1.0 // indirect - github.com/expr-lang/expr v1.16.2 // indirect + github.com/elastic/go-grok v0.3.1 // indirect + github.com/elastic/lunes v0.1.0 // indirect + github.com/expr-lang/expr v1.16.9 // indirect github.com/fatih/color v1.16.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/fsnotify/fsnotify v1.8.0 // indirect github.com/go-ini/ini v1.67.0 // indirect - github.com/go-kit/kit v0.13.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect @@ -168,18 +190,19 @@ require ( github.com/go-openapi/strfmt v0.23.0 // indirect github.com/go-openapi/swag v0.22.9 // indirect github.com/go-openapi/validate v0.23.0 // indirect - github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect + github.com/go-viper/mapstructure/v2 v2.2.1 // indirect github.com/gobwas/glob v0.2.3 // indirect - github.com/goccy/go-json v0.10.3 // indirect + github.com/goccy/go-json v0.10.4 // indirect github.com/gogo/googleapis v1.4.1 // indirect github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/google/btree v1.1.2 // indirect - github.com/google/s2a-go v0.1.7 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/gorilla/handlers v1.5.1 // indirect + github.com/google/s2a-go v0.1.9 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect + github.com/gorilla/handlers v1.5.2 // indirect github.com/grafana/pyroscope-go/godeltaprof v0.1.8 // indirect github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect + github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.2.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.0 // indirect github.com/hashicorp/consul/api v1.29.2 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect @@ -195,7 +218,6 @@ require ( github.com/hashicorp/hcl v1.0.0 // indirect github.com/hashicorp/memberlist v0.5.0 // indirect github.com/hashicorp/serf v0.10.1 // indirect - github.com/hashicorp/yamux v0.1.1 // indirect github.com/iancoleman/strcase v0.3.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jcmturner/aescts/v2 v2.0.0 // indirect @@ -207,11 +229,12 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/julienschmidt/httprouter v1.3.0 // indirect - github.com/klauspost/cpuid/v2 v2.2.8 // indirect + github.com/klauspost/cpuid/v2 v2.2.9 // indirect github.com/knadh/koanf v1.5.0 // indirect - github.com/knadh/koanf/v2 v2.1.1 // indirect + github.com/knadh/koanf/v2 v2.1.2 // indirect github.com/kylelemons/godebug v1.1.0 // indirect - github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 // indirect + github.com/magefile/mage v1.15.0 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect @@ -221,111 +244,127 @@ require ( github.com/minio/md5-simd v1.1.2 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/mitchellh/go-testing-interface v1.0.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mostynb/go-grpc-compression v1.2.3 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect - github.com/oklog/run v1.1.0 // indirect github.com/oklog/ulid v1.3.1 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.102.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.102.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.102.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.97.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.102.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.102.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.102.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.102.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure v0.102.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.102.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.102.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.118.0 // indirect github.com/opentracing-contrib/go-stdlib v1.0.0 // indirect github.com/openzipkin/zipkin-go v0.4.3 // indirect - github.com/pelletier/go-toml/v2 v2.1.0 // indirect + github.com/pelletier/go-toml/v2 v2.2.3 // indirect github.com/pires/go-proxyproto v0.7.0 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect github.com/prometheus/alertmanager v0.27.0 // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect github.com/prometheus/exporter-toolkit v0.11.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect - github.com/relvacode/iso8601 v1.4.0 // indirect + github.com/relvacode/iso8601 v1.6.0 // indirect github.com/rivo/uniseg v0.4.7 // indirect - github.com/rs/cors v1.11.0 // indirect + github.com/rs/cors v1.11.1 // indirect github.com/rs/xid v1.6.0 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect github.com/sercand/kuberesolver/v5 v5.1.1 // indirect - github.com/shirou/gopsutil/v3 v3.24.4 // indirect - github.com/shoenig/go-m1cpu v0.1.6 // indirect + github.com/shirou/gopsutil/v4 v4.24.12 // indirect github.com/soheilhy/cmux v0.1.5 // indirect github.com/sourcegraph/conc v0.3.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.6.0 // indirect - github.com/spf13/cobra v1.8.0 // indirect + github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/subosito/gotenv v1.6.0 // indirect - github.com/tklauser/go-sysconf v0.3.12 // indirect - github.com/tklauser/numcpus v0.6.1 // indirect + github.com/tklauser/go-sysconf v0.3.14 // indirect + github.com/tklauser/numcpus v0.8.0 // indirect + github.com/ua-parser/uap-go v0.0.0-20241012191800-bbb40edc15aa // indirect github.com/uber/jaeger-lib v2.4.1+incompatible // indirect github.com/willf/bitset v1.1.11 // indirect github.com/xdg-go/pbkdf2 v1.0.0 // indirect github.com/xdg-go/scram v1.1.2 // indirect github.com/xdg-go/stringprep v1.0.4 // indirect - github.com/yuin/gopher-lua v0.0.0-20220504180219-658193537a64 // indirect + github.com/yuin/gopher-lua v1.1.1 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect - go.etcd.io/etcd/api/v3 v3.5.10 // indirect - go.etcd.io/etcd/client/pkg/v3 v3.5.10 // indirect - go.etcd.io/etcd/client/v3 v3.5.10 // indirect + go.etcd.io/etcd/api/v3 v3.5.12 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.5.12 // indirect + go.etcd.io/etcd/client/v3 v3.5.12 // indirect go.mongodb.org/mongo-driver v1.15.0 // indirect - go.opentelemetry.io/collector/config/configauth v0.102.1 // indirect - go.opentelemetry.io/collector/config/configcompression v1.9.0 // indirect - go.opentelemetry.io/collector/config/confignet v0.102.1 // indirect - go.opentelemetry.io/collector/config/configopaque v1.18.0 // indirect - go.opentelemetry.io/collector/config/configretry v0.102.1 // indirect - go.opentelemetry.io/collector/config/configtelemetry v0.102.1 // indirect - go.opentelemetry.io/collector/config/internal v0.102.1 // indirect - go.opentelemetry.io/collector/confmap/converter/expandconverter v0.102.1 // indirect - go.opentelemetry.io/collector/confmap/provider/envprovider v0.102.1 // indirect - go.opentelemetry.io/collector/confmap/provider/fileprovider v0.102.1 // indirect - go.opentelemetry.io/collector/confmap/provider/httpprovider v0.102.1 // indirect - go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.102.1 // indirect - go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.102.1 // indirect - go.opentelemetry.io/collector/connector v0.102.1 // indirect - go.opentelemetry.io/collector/extension/auth v0.102.1 // indirect - go.opentelemetry.io/collector/featuregate v1.9.0 // indirect - go.opentelemetry.io/collector/service v0.102.1 // indirect - go.opentelemetry.io/contrib/bridges/prometheus v0.53.0 // indirect - go.opentelemetry.io/contrib/config v0.7.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 // indirect - go.opentelemetry.io/contrib/propagators/b3 v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.4.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/prometheus v0.50.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.4.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 // indirect - go.opentelemetry.io/otel/log v0.4.0 // indirect - go.opentelemetry.io/otel/sdk/log v0.4.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.28.0 // indirect - golang.org/x/crypto v0.29.0 // indirect - golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect - golang.org/x/mod v0.19.0 // indirect - golang.org/x/sys v0.27.0 // indirect - golang.org/x/text v0.20.0 // indirect - golang.org/x/tools v0.23.0 // indirect - gonum.org/v1/gonum v0.15.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/collector/component/componentstatus v0.118.0 // indirect + go.opentelemetry.io/collector/config/configauth v0.118.0 // indirect + go.opentelemetry.io/collector/config/configcompression v1.24.0 // indirect + go.opentelemetry.io/collector/config/confignet v1.24.0 // indirect + go.opentelemetry.io/collector/config/configopaque v1.24.0 // indirect + go.opentelemetry.io/collector/config/configretry v1.24.0 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.118.0 // indirect + go.opentelemetry.io/collector/connector v0.118.0 // indirect + go.opentelemetry.io/collector/connector/connectortest v0.118.0 // indirect + go.opentelemetry.io/collector/connector/xconnector v0.118.0 // indirect + go.opentelemetry.io/collector/consumer/consumererror v0.118.0 // indirect + go.opentelemetry.io/collector/consumer/consumererror/xconsumererror v0.118.0 // indirect + go.opentelemetry.io/collector/consumer/consumertest v0.118.0 // indirect + go.opentelemetry.io/collector/consumer/xconsumer v0.118.0 // indirect + go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper v0.118.0 // indirect + go.opentelemetry.io/collector/exporter/xexporter v0.118.0 // indirect + go.opentelemetry.io/collector/extension/auth v0.118.0 // indirect + go.opentelemetry.io/collector/extension/extensioncapabilities v0.118.0 // indirect + go.opentelemetry.io/collector/extension/extensiontest v0.118.0 // indirect + go.opentelemetry.io/collector/extension/xextension v0.118.0 // indirect + go.opentelemetry.io/collector/featuregate v1.24.0 // indirect + go.opentelemetry.io/collector/internal/fanoutconsumer v0.118.0 // indirect + go.opentelemetry.io/collector/internal/sharedcomponent v0.118.0 // indirect + go.opentelemetry.io/collector/pdata/pprofile v0.118.0 // indirect + go.opentelemetry.io/collector/pipeline v0.118.0 // indirect + go.opentelemetry.io/collector/pipeline/xpipeline v0.118.0 // indirect + go.opentelemetry.io/collector/processor/processortest v0.118.0 // indirect + go.opentelemetry.io/collector/processor/xprocessor v0.118.0 // indirect + go.opentelemetry.io/collector/receiver/receivertest v0.118.0 // indirect + go.opentelemetry.io/collector/receiver/xreceiver v0.118.0 // indirect + go.opentelemetry.io/collector/service v0.118.0 // indirect + go.opentelemetry.io/contrib/bridges/otelzap v0.8.0 // indirect + go.opentelemetry.io/contrib/bridges/prometheus v0.59.0 // indirect + go.opentelemetry.io/contrib/config v0.10.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect + go.opentelemetry.io/contrib/propagators/b3 v1.33.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.10.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.10.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.34.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.34.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 // indirect + go.opentelemetry.io/otel/exporters/prometheus v0.56.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.10.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.34.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.34.0 // indirect + go.opentelemetry.io/otel/log v0.10.0 // indirect + go.opentelemetry.io/otel/sdk/log v0.10.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.34.0 // indirect + golang.org/x/crypto v0.32.0 // indirect + golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f // indirect + golang.org/x/mod v0.22.0 // indirect + golang.org/x/oauth2 v0.25.0 // indirect + golang.org/x/sys v0.29.0 // indirect + golang.org/x/text v0.21.0 // indirect + golang.org/x/tools v0.27.0 // indirect + gonum.org/v1/gonum v0.15.1 // indirect google.golang.org/genproto v0.0.0-20240708141625-4ad9e859172b // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f // indirect gopkg.in/ini.v1 v1.67.0 // indirect k8s.io/apimachinery v0.29.3 // indirect k8s.io/client-go v0.29.3 // indirect diff --git a/go.sum b/go.sum index aa1e1948d3a..0285fb2f8b7 100644 --- a/go.sum +++ b/go.sum @@ -15,22 +15,22 @@ cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOY cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.115.0 h1:CnFSK6Xo3lDYRoBKEcAtia6VSC837/ZkJuRduSFnr14= cloud.google.com/go v0.115.0/go.mod h1:8jIM5vVgoAEoiVxQ/O4BFTfHqulPZgs/ufEzMcFMdWU= -cloud.google.com/go/auth v0.7.0 h1:kf/x9B3WTbBUHkC+1VS8wwwli9TzhSt0vSTVBmMR8Ts= -cloud.google.com/go/auth v0.7.0/go.mod h1:D+WqdrpcjmiCgWrXmLLxOVq1GACoE36chW6KXoEvuIw= -cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= -cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= +cloud.google.com/go/auth v0.14.0 h1:A5C4dKV/Spdvxcl0ggWwWEzzP7AZMJSEIgrkngwhGYM= +cloud.google.com/go/auth v0.14.0/go.mod h1:CYsoRL1PdiDuqeQpZE0bP2pnPrGqFcOkI0nldEQis+A= +cloud.google.com/go/auth/oauth2adapt v0.2.7 h1:/Lc7xODdqcEw8IrZ9SvwnlLX6j9FHQM74z6cBk9Rw6M= +cloud.google.com/go/auth/oauth2adapt v0.2.7/go.mod h1:NTbTTzfvPl1Y3V1nPpOgl2w6d/FjO7NNUQaWSox6ZMc= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= -cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= +cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= +cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/iam v1.1.10 h1:ZSAr64oEhQSClwBL670MsJAW5/RLiC6kfw3Bqmd5ZDI= -cloud.google.com/go/iam v1.1.10/go.mod h1:iEgMq62sg8zx446GCaijmA2Miwg5o3UbO+nI47WHJps= +cloud.google.com/go/iam v1.1.11 h1:0mQ8UKSfdHLut6pH9FM3bI55KWR46ketn0PuXleDyxw= +cloud.google.com/go/iam v1.1.11/go.mod h1:biXoiLWYIKntto2joP+62sd9uW5EpkZmKIvfNcTWlnQ= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -43,26 +43,26 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 cloud.google.com/go/storage v1.41.0 h1:RusiwatSu6lHeEXe3kglxakAmAbfV+rhtPqA6i8RBx0= cloud.google.com/go/storage v1.41.0/go.mod h1:J1WCa/Z2FcgdEDuPUY8DxT5I+d9mFKsCepp5vR6Sq80= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 h1:JZg6HRh6W6U4OLl6lk7BZ7BLisIzM9dG1R50zUk9C/M= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0/go.mod h1:YL1xnZ6QejvQHWJrX/AvhFl4WW4rqHVoKspWNVwFk0M= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 h1:B/dfvscEQtew9dVuoxqxrUKKv8Ih2f55PydknDamU+g= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0/go.mod h1:fiPSssYvltE08HJchL04dOy+RD4hgrjph0cwGGMntdI= -github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0 h1:+m0M/LFxN43KvULkDNfdXOgrjtg6UYJPFBJyuEcRCAw= -github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0/go.mod h1:PwOyop78lveYMRs6oCxjiVyBdyCgIYH6XHIVZO9/SFQ= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 h1:g0EZJwz7xkXQiZAI5xi9f3WWFYBlX1CPTrR+NDToRkQ= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0/go.mod h1:XCW7KnZet0Opnr7HccfUw1PLc4CjHqpcaxW8DHklNkQ= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.1 h1:1mvYtZfWQAnwNah/C+Z+Jb9rQH95LPE2vlmMuWAHJk8= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.1/go.mod h1:75I/mXtme1JyWFtz8GocPHVFyH421IBoZErnO16dd0k= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.1 h1:Bk5uOhSAenHyR5P61D/NzeQCv+4fEVV8mOkJ82NqpWw= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.1/go.mod h1:QZ4pw3or1WPmRBxf0cHd1tknzrT54WPBOQoGutCPvSU= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0/go.mod h1:Y/HgrePTmGy9HjdSGTqZNa+apUpTVIEVKXJyARP2lrk= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.2.0 h1:Ma67P/GGprNwsslzEH6+Kb8nybI8jpDTm4Wmzu2ReK8= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.2.0/go.mod h1:c+Lifp3EDEamAkPVzMooRNOK6CZjNSdEnf1A7jsI9u4= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 h1:gggzg0SUMs6SQbEw+3LoSsYf9YMjkupeAnHMX8O9mmY= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0/go.mod h1:+6KLcKIVgxoBDMqMO/Nvy7bZ9a0nbU3I1DtFQK3YvB4= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0 h1:PiSrjRPpkQNjrM8H0WwKMnZUdu1RGMtd/LdGKUrOo+c= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0/go.mod h1:oDrbWx4ewMylP7xHivfgixbfGBT6APAwsSoHRKotnIc= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0 h1:mlmW46Q0B79I+Aj4azKC6xDMFN9a9SyZWESlGWYXbFs= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0/go.mod h1:PXe2h+LKcWTX9afWdZoHyODqR4fBa5boUM/8uJfZ0Jo= github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM= github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2 h1:kYRSnvJju5gYVyhkij+RTJ/VR6QIUaCfWeaFm2ycsjQ= +github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U+0KMqAA0KcU= @@ -70,38 +70,38 @@ github.com/Code-Hex/go-generics-cache v1.5.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1v github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= -github.com/IBM/sarama v1.43.2 h1:HABeEqRUh32z8yzY2hGB/j8mHSzC/HA9zlEjqFNCzSw= -github.com/IBM/sarama v1.43.2/go.mod h1:Kyo4WkF24Z+1nz7xeVUFWIuKVV8RS3wM8mkvPKMdXFQ= -github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= -github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= -github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/alecthomas/assert/v2 v2.3.0 h1:mAsH2wmvjsuvyBvAmCtm7zFsBlb8mIHx5ySLVdDZXL0= -github.com/alecthomas/assert/v2 v2.3.0/go.mod h1:pXcQ2Asjp247dahGEmsZ6ru0UVwnkhktn7S0bBDLxvQ= -github.com/alecthomas/kong v0.8.0 h1:ryDCzutfIqJPnNn0omnrgHLbAggDQM2VWHikE1xqK7s= -github.com/alecthomas/kong v0.8.0/go.mod h1:n1iCIO2xS46oE8ZfYCNDqdR0b0wZNrXAIAqro/2132U= +github.com/IBM/sarama v1.45.0 h1:IzeBevTn809IJ/dhNKhP5mpxEXTmELuezO2tgHD9G5E= +github.com/IBM/sarama v1.45.0/go.mod h1:EEay63m8EZkeumco9TDXf2JT3uDnZsZqFgV46n4yZdY= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/alecthomas/assert/v2 v2.11.0 h1:2Q9r3ki8+JYXvGsDyBXwH3LcJ+WK5D0gc5E8vS6K3D0= +github.com/alecthomas/assert/v2 v2.11.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k= +github.com/alecthomas/kong v1.6.1 h1:/7bVimARU3uxPD0hbryPE8qWrS3Oz3kPQoxA/H2NKG8= +github.com/alecthomas/kong v1.6.1/go.mod h1:p2vqieVMeTAnaC83txKtXe8FLke2X07aruPWXyMPQrU= github.com/alecthomas/participle/v2 v2.1.1 h1:hrjKESvSqGHzRb4yW1ciisFJ4p3MGYih6icjJvbsmV8= github.com/alecthomas/participle/v2 v2.1.1/go.mod h1:Y1+hAs8DHPmc3YUFzqllV+eSQ9ljPTk0ZkPMtEdAx2c= -github.com/alecthomas/repr v0.2.0 h1:HAzS41CIzNW5syS8Mf9UwXhNH1J9aix/BvDRf1Ml2Yk= -github.com/alecthomas/repr v0.2.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= +github.com/alecthomas/repr v0.4.0 h1:GhI2A8MACjfegCPVq9f1FLvIBS+DrQ2KQBFZP1iFzXc= +github.com/alecthomas/repr v0.4.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 h1:t3eaIm0rUkzbrIewtiFmMK5RXHej2XnoXNhxVsAYUfg= -github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= -github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk= -github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= -github.com/alicebob/miniredis/v2 v2.21.0 h1:CdmwIlKUWFBDS+4464GtQiQ0R1vpzOgu4Vnd74rBL7M= -github.com/alicebob/miniredis/v2 v2.21.0/go.mod h1:XNqvJdQJv5mSuVMc0ynneafpnL/zv52acZ6kqeS0t88= +github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b h1:mimo19zliBX/vSQ6PWWSL9lK8qwHozUj03+zLoEB8O0= +github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= +github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302 h1:uvdUDbHQHO85qeSydJtItA4T55Pw6BtAejd0APRJOCE= +github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= +github.com/alicebob/miniredis/v2 v2.34.0 h1:mBFWMaJSNL9RwdGRyEDoAAv8OQc5UlEhLDQggTglU/0= +github.com/alicebob/miniredis/v2 v2.34.0/go.mod h1:kWShP4b58T1CW0Y5dViCd5ztzrDqRWqM3nksiyXk5s8= github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA= github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA= +github.com/antchfx/xmlquery v1.4.3 h1:f6jhxCzANrWfa93O+NmRWvieVyLs+R2Szfpy+YrZaww= +github.com/antchfx/xmlquery v1.4.3/go.mod h1:AEPEEPYE9GnA2mj5Ur2L5Q5/2PycJ0N9Fusrx9b12fc= +github.com/antchfx/xpath v1.3.3 h1:tmuPQa1Uye0Ym1Zn65vxPgfltWb/Lxu2jeqIGteJSRs= +github.com/antchfx/xpath v1.3.3/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/apache/thrift v0.20.0 h1:631+KvYbsBZxmuJjYwhezVsrfc/TbqtZV4QcxOX1fOI= -github.com/apache/thrift v0.20.0/go.mod h1:hOk1BQqcp2OLzGsyVXdfMk7YFlMxK3aoEVhjD06QhB8= +github.com/apache/thrift v0.21.0 h1:tdPmh/ptjE1IJnhbhrcl2++TauVjy242rkV/UzJChnE= +github.com/apache/thrift v0.21.0/go.mod h1:W1H8aR/QRtYNvrPeFXBtobyRkd0/YVhTc6i07XIAgDw= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= @@ -110,9 +110,11 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/aws/aws-msk-iam-sasl-signer-go v1.0.0 h1:UyjtGmO0Uwl/K+zpzPwLoXzMhcN9xmnR2nrqJoBrg3c= +github.com/aws/aws-msk-iam-sasl-signer-go v1.0.0/go.mod h1:TJAXuFs2HcMib3sN5L0gUC+Q01Qvy3DemvA55WuC+iA= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= -github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go v1.55.6 h1:cSg4pvZ3m8dgYcgqB97MrcdjUmZ1BeMYKUxMMB89IPk= +github.com/aws/aws-sdk-go v1.55.6/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= github.com/aws/aws-sdk-go-v2 v1.22.2 h1:lV0U8fnhAnPz8YcdmZVV60+tr6CakHzqA6P8T46ExJI= github.com/aws/aws-sdk-go-v2 v1.22.2/go.mod h1:Kd0OJtkW3Q0M0lUWGszapWjEvrXDzRW+D21JNsroB+c= @@ -155,15 +157,11 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA= -github.com/bufbuild/protocompile v0.4.0/go.mod h1:3v93+mbWn/v3xzN+31nwkJfrEpAUwp+BagBSZWx+TP8= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -175,14 +173,14 @@ github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 h1:QVw89YDxXxEe+l8gU8ETbOasdwEV+avkR75ZzsVV9WI= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cristalhq/hedgedhttp v0.9.1 h1:g68L9cf8uUyQKQJwciD0A1Vgbsz+QgCjuB1I8FAsCDs= github.com/cristalhq/hedgedhttp v0.9.1/go.mod h1:XkqWU6qVMutbhW68NnzjWrGtH8NUx1UfYqGYtHVKIsI= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -195,10 +193,10 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/r github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/digitalocean/godo v1.118.0 h1:lkzGFQmACrVCp7UqH1sAi4JK/PWwlc5aaxubgorKmC4= github.com/digitalocean/godo v1.118.0/go.mod h1:Vk0vpCot2HOAJwc5WE8wljZGtJ3ZtWIc8MQ8rF38sdo= -github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= -github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v27.0.3+incompatible h1:aBGI9TeQ4MPlhquTQKq9XbK79rKFVwXNUAYz9aXyEBE= -github.com/docker/docker v27.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI= +github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -208,31 +206,37 @@ github.com/drone/envsubst v1.0.3/go.mod h1:N2jZmlMufstn1KEqvbHjw40h1KyTmnVzHcSc9 github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/eapache/go-resiliency v1.6.0 h1:CqGDTLtpwuWKn6Nj3uNUdflaq+/kIPsg0gfNzHton30= -github.com/eapache/go-resiliency v1.6.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= +github.com/eapache/go-resiliency v1.7.0 h1:n3NRTnBn5N0Cbi/IeOHuQn9s2UwVUH7Ga0ZWcP+9JTA= +github.com/eapache/go-resiliency v1.7.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 h1:Oy0F4ALJ04o5Qqpdz8XLIpNA3WM/iSIXqxtqo7UGVws= github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/ebitengine/purego v0.8.1 h1:sdRKd6plj7KYW33EH5As6YKfe8m9zbN9JMrOjNVF/BE= +github.com/ebitengine/purego v0.8.1/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= github.com/efficientgo/tools/core v0.0.0-20220225185207-fe763185946b h1:ZHiD4/yE4idlbqvAO6iYCOYRzOMRpxkW+FKasRA3tsQ= github.com/efficientgo/tools/core v0.0.0-20220225185207-fe763185946b/go.mod h1:OmVcnJopJL8d3X3sSXTiypGoUSgFq1aDGmlrdi9dn/M= +github.com/elastic/go-grok v0.3.1 h1:WEhUxe2KrwycMnlvMimJXvzRa7DoByJB4PVUIE1ZD/U= +github.com/elastic/go-grok v0.3.1/go.mod h1:n38ls8ZgOboZRgKcjMY8eFeZFMmcL9n2lP0iHhIDk64= +github.com/elastic/lunes v0.1.0 h1:amRtLPjwkWtzDF/RKzcEPMvSsSseLDLW+bnhfNSLRe4= +github.com/elastic/lunes v0.1.0/go.mod h1:xGphYIt3XdZRtyWosHQTErsQTd4OP1p9wsbVoHelrd4= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.12.0 h1:4X+VP1GHd1Mhj6IB5mMeGbLCleqxjletLK6K0rbxyZI= -github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= +github.com/envoyproxy/go-control-plane v0.13.1 h1:vPfJZCkob6yTMEgS+0TwfTUfbHjfy/6vOJ8hUWX/uXE= +github.com/envoyproxy/go-control-plane v0.13.1/go.mod h1:X45hY0mufo6Fd0KW3rqsGvQMw58jvjymeCzBU3mWyHw= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= -github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls= github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/expr-lang/expr v1.16.2 h1:JvMnzUs3LeVHBvGFcXYmXo+Q6DPDmzrlcSBO6Wy3w4s= -github.com/expr-lang/expr v1.16.2/go.mod h1:uCkhfG+x7fcZ5A5sXHKuQ07jGZRl6J0FCAaf2k4PtVQ= +github.com/expr-lang/expr v1.16.9 h1:WUAzmR0JNI9JCiF0/ewwHB1gmcGw5wW7nWt8gc6PpCI= +github.com/expr-lang/expr v1.16.9/go.mod h1:8/vRC7+7HBzESEqt5kKpYXxrxkr31SaO8r40VO/1IT4= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -241,7 +245,6 @@ github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYF github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= @@ -249,9 +252,8 @@ github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHqu github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/fzipp/gocyclo v0.3.1/go.mod h1:DJHO6AUmbdqj2ET4Z9iArSuwWgYDRryYt2wASxc7x3E= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -260,8 +262,6 @@ github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.13.0 h1:OoneCcHKHQ03LfBpoQCUfCluwd2Vt3ohz+kvbJneZAU= -github.com/go-kit/kit v0.13.0/go.mod h1:phqEHMMUbyrCFCTgH48JueqrM3md2HcAZ8N3XE4FKDg= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= @@ -304,14 +304,14 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U= github.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= -github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c= -github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss= +github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= -github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= +github.com/goccy/go-json v0.10.4 h1:JSwxQzIqKfmFX1swYPpUThQZp/Ka4wzJdK0LWVytLPM= +github.com/goccy/go-json v0.10.4/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0= @@ -327,8 +327,9 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfU github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -377,7 +378,6 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= @@ -398,21 +398,21 @@ github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= -github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= +github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= +github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= -github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= +github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s= -github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= +github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q= +github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA= github.com/gophercloud/gophercloud v1.13.0 h1:8iY9d1DAbzMW6Vok1AxbbK5ZaUjzMp0tdyt4fX9IeJ0= github.com/gophercloud/gophercloud v1.13.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= -github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= -github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= +github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= +github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= @@ -423,8 +423,8 @@ github.com/grafana/dskit v0.0.0-20241115082728-f2a7eb3aa0e9 h1:Dx7+6aU/fhwD2vkMr github.com/grafana/dskit v0.0.0-20241115082728-f2a7eb3aa0e9/go.mod h1:SPLNCARd4xdjCkue0O6hvuoveuS1dGJjDnfxYe405YQ= github.com/grafana/e2e v0.1.1 h1:/b6xcv5BtoBnx8cZnCiey9DbjEc8z7gXHO5edoeRYxc= github.com/grafana/e2e v0.1.1/go.mod h1:RpNLgae5VT+BUHvPE+/zSypmOXKwEu4t+tnEMS1ATaE= -github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56 h1:X8IKQ0wu40wpvYcKfBcc5T4QnhdQjUhtUtB/1CY89lE= -github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56/go.mod h1:PGk3RjYHpxMM8HFPhKKo+vve3DdlPUELZLSDEFehPuU= +github.com/grafana/gomemcache v0.0.0-20241016125027-0a5bcc5aef40 h1:1TeKhyS+pvzOeyLV1XPZsiqebnKky/AKS3pJNNbHVPo= +github.com/grafana/gomemcache v0.0.0-20241016125027-0a5bcc5aef40/go.mod h1:IGRj8oOoxwJbHBYl1+OhS9UjQR0dv6SQOep7HqmtyFU= github.com/grafana/memberlist v0.3.1-0.20220708130638-bd88e10a3d91 h1:/NipyHnOmvRsVzj81j2qE0VxsvsqhOB0f4vJIhk2qCQ= github.com/grafana/memberlist v0.3.1-0.20220708130638-bd88e10a3d91/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/grafana/pyroscope-go/godeltaprof v0.1.8 h1:iwOtYXeeVSAeYefJNaxDytgjKtUuKQbJqgAIjlnicKg= @@ -433,11 +433,12 @@ github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrR github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.2.0 h1:kQ0NI7W1B3HwiN5gAYtY+XFItDPbLBwYRxAqbFTyDes= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.2.0/go.mod h1:zrT2dxOAjNFPRGjTUe2Xmb4q4YdUwVvQFV6xiCSf+z0= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= -github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.0 h1:VD1gqscl4nYs1YxVuSdemTrSgTKrwOWDK0FVFMqm+Cg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.0/go.mod h1:4EgsQoS4TOhJizV+JTFg40qx1Ofh3XmXEQNBpgvNT40= github.com/hashicorp/consul/api v1.13.0/go.mod h1:ZlVrynguJKcYr54zGaDbaL3fOvKC9m72FhPvA8T35KQ= github.com/hashicorp/consul/api v1.29.2 h1:aYyRn8EdE2mSfG14S1+L9Qkjtz8RzmaWh6AcNGRNwPw= github.com/hashicorp/consul/api v1.29.2/go.mod h1:0YObcaLNDSbtlgzIRtmRXI1ZkeuK0trCBxwZQ4MYnIk= @@ -471,8 +472,6 @@ github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= -github.com/hashicorp/go-plugin v1.6.0 h1:wgd4KxHJTVGGqWBq4QPB1i5BZNEx9BR8+OFmHDmTk8A= -github.com/hashicorp/go-plugin v1.6.0/go.mod h1:lBS5MtSSBZk0SHc66KACcjjlU6WzEVP/8pwz68aMkCI= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= @@ -495,6 +494,7 @@ github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKe github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= @@ -512,8 +512,6 @@ github.com/hashicorp/vault/api v1.0.4/go.mod h1:gDcqh3WGcR1cpF5AJz/B1UFheUEneMoI github.com/hashicorp/vault/sdk v0.1.13/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= -github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= github.com/hetznercloud/hcloud-go/v2 v2.10.2 h1:9gyTUPhfNbfbS40Spgij5mV5k37bOZgt8iHKCbfGs5I= github.com/hetznercloud/hcloud-go/v2 v2.10.2/go.mod h1:xQ+8KhIS62W0D78Dpi57jsufWh844gUw1az5OUvaeq8= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= @@ -528,8 +526,8 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2 github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/ionos-cloud/sdk-go/v6 v6.1.11 h1:J/uRN4UWO3wCyGOeDdMKv8LWRzKu6UIkLEaes38Kzh8= github.com/ionos-cloud/sdk-go/v6 v6.1.11/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= -github.com/jaegertracing/jaeger v1.57.0 h1:3wDtUUPs6NRYH7+d+y8MilDkLHdpPrVlQ2wbcsA62bs= -github.com/jaegertracing/jaeger v1.57.0/go.mod h1:p/1fxIU9hKHl7qEhKC72p2ZYVhvvZvNB73y6V7YyuTs= +github.com/jaegertracing/jaeger v1.65.0 h1:phDrZzaPUbomlN8VfxGWuPwkipYh7cU6V9q6Obf+7Fc= +github.com/jaegertracing/jaeger v1.65.0/go.mod h1:EkEqyIzI0xCjexVHURWJmZZxjswTUKSriW57eVG44yo= github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= @@ -542,10 +540,8 @@ github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh6 github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs= github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= -github.com/jedib0t/go-pretty/v6 v6.2.4 h1:wdaj2KHD2W+mz8JgJ/Q6L/T5dB7kyqEFI16eLq7GEmk= -github.com/jedib0t/go-pretty/v6 v6.2.4/go.mod h1:+nE9fyyHGil+PuISTCrp7avEdo6bqoMwqZnuiK2r2a0= -github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c= -github.com/jhump/protoreflect v1.15.1/go.mod h1:jD/2GMKKE6OqX8qTjhADU1e6DShO+gavG9e0Q693nKo= +github.com/jedib0t/go-pretty/v6 v6.6.5 h1:9PgMJOVBedpgYLI56jQRJYqngxYAAzfEUua+3NgSqAo= +github.com/jedib0t/go-pretty/v6 v6.6.5/go.mod h1:Uq/HrbhuFty5WSVNfjpQQe47x16RwVGXIveNGEyGtHs= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= @@ -575,12 +571,12 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= -github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/cpuid/v2 v2.2.9 h1:66ze0taIn2H33fBvCkXuv9BmCwDfafmiIVpKV9kKGuY= +github.com/klauspost/cpuid/v2 v2.2.9/go.mod h1:rqkxqrZ1EhYM9G+hXH7YdowN5R5RGN6NK4QwQ3WMXF8= github.com/knadh/koanf v1.5.0 h1:q2TSd/3Pyc/5yP9ldIrSdIz26MCcyNQzW0pEAugLPNs= github.com/knadh/koanf v1.5.0/go.mod h1:Hgyjp4y8v44hpZtPzs7JZfRAW5AhN7KfZcwv1RYggDs= -github.com/knadh/koanf/v2 v2.1.1 h1:/R8eXqasSTsmDCsAyYj+81Wteg8AqrV9CP6gvsTsOmM= -github.com/knadh/koanf/v2 v2.1.1/go.mod h1:4mnTRbZCK+ALuBXHZMjDfG9y714L7TykVnZkXbMU3Es= +github.com/knadh/koanf/v2 v2.1.2 h1:I2rtLRqXRy1p01m/utEtpZSSA6dcJbgGVuE27kW2PzQ= +github.com/knadh/koanf/v2 v2.1.2/go.mod h1:Gphfaen0q1Fc1HTgJgSTC4oRX9R2R5ErYMZJy8fLJBo= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -598,9 +594,10 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/linode/linodego v1.37.0 h1:B/2Spzv9jYXzKA+p+GD8fVCNJ7Wuw6P91ZDD9eCkkso= github.com/linode/linodego v1.37.0/go.mod h1:L7GXKFD3PoN2xSEtFc04wIXP5WK65O10jYQx0PQISWQ= -github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 h1:7UMa6KCCMjZEMDtTVdcGu0B1GmmC7QJKiCCjyTAWQy0= +github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= +github.com/magefile/mage v1.15.0 h1:BvGheCMAsG3bWUDbZ8AyXXpCNwU9u5CB6sM+HNb9HYg= +github.com/magefile/mage v1.15.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= @@ -631,8 +628,8 @@ github.com/miekg/dns v1.1.61 h1:nLxbwF3XxhwVSm8g9Dghm9MHPaUZuqhPiGL+675ZmEs= github.com/miekg/dns v1.1.61/go.mod h1:mnAarhS3nWaW+NVP2wTkYVIZyHNJ098SJZUki3eykwQ= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.80 h1:2mdUHXEykRdY/BigLt3Iuu1otL0JTogT0Nmltg0wujk= -github.com/minio/minio-go/v7 v7.0.80/go.mod h1:84gmIilaX4zcvAWWzJ5Z1WI5axN+hAbM5w25xf8xvC0= +github.com/minio/minio-go/v7 v7.0.84 h1:D1HVmAF8JF8Bpi6IU4V9vIEj+8pc+xU88EWMs2yed0E= +github.com/minio/minio-go/v7 v7.0.84/go.mod h1:57YXpvc5l3rjPdhqNrDsvVlY0qPI6UTk1bflAe+9doY= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= @@ -641,7 +638,6 @@ github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HK github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= @@ -672,8 +668,6 @@ github.com/npillmayer/nestext v0.1.3/go.mod h1:h2lrijH8jpicr25dFY+oAJLyzlya6jhnu github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= -github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= @@ -682,52 +676,54 @@ github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.102.0 h1:R70PpK14trQfL/Vj5oAiGRqX09s2gOWuf6t1Ae5fevQ= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.102.0/go.mod h1:xmy/yFFmB1Epy+czrYMbA+4xeOKvhFqNqYWU6qINeis= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter v0.102.0 h1:N3vWsp3xealy4AX8TovfHG5EKi/k7z+F/8LFP4SVAgo= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter v0.102.0/go.mod h1:/Ijok2yF1qYoHuRHvyLS04ZuW91Pue2VkqZ/nZxpkvk= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.102.0 h1:PNLVcz8kJLE9V5kGnbBh277Bvl4WwiVZ+NbFbOB80WY= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.102.0/go.mod h1:cBbjwd8m4rBVgCQksUbAVQX1EoM5IuCyNQw2mzvibEM= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.102.0 h1:qsM5HhWpAfIMg8LdO4u+CHofu4UuCuJwg/M+ySO9uZA= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.102.0/go.mod h1:wBJlGy9Wx6s7AxIMcSne2sGw73e5ZUy1AQ/duYwpFf8= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.97.0 h1:f3HVDcjUVUbOpKWiebD9v8+9YdDdNvzPyKh3IVb0ORY= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.97.0/go.mod h1:110wLws4lB2Jpv58rK7YoaMIhIEmLlzw5/viC0XJhbM= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.102.0 h1:xBd9EXG9qvWwa2d7qDRVv/D/2gAQqn1zGbPqdjkd+O8= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.102.0/go.mod h1:e4pc6nkNyzBi5g2RgIRjJ1slRsOY5qHIbPu0E4oM3cE= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.102.0 h1:/J1Q2tylp8ID+AIpCmfaArUyCPoSjY3nyZXdkpTw9J8= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.102.0/go.mod h1:lbNQBpvs40lInohZrqAbRZ+8r29GzfMfkbLV4fBPrzE= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.102.0 h1:pVJ792+Nzcv8nLlg18XOLOWEZ/dCK+Wo3Iak5TU8rz8= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.102.0/go.mod h1:DmkGhNL9nuSTg8fMhYNopMuF1Y3LFqu/FQHrvhBzME0= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.97.0 h1:bVeo7BahYY4rWdaEuzJX2Tn20MbvYcEHXbFjV2IwnPQ= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.97.0/go.mod h1:lj29zRdEZdvbQvZ6g7Pb+tSPw4dQE/jahe39fRgO+08= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.102.0 h1:vJL6lDaeI3pVA7ADnWKD3HMpI80BSrZ2UnGc+qkwqoY= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.102.0/go.mod h1:xtE7tds5j8PtI/wMuGb+Em5K9rJH8hm6t28Qe4QrpoU= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.102.0 h1:TvJYcU/DLRFCgHr7nT98k5D+qkZ4syKVxc8OJjv+K4c= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.102.0/go.mod h1:WzD3Ox7tywAQHknxAFpAC1oZJGItMp5mbvgUGjvzNY8= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure v0.102.0 h1:IgLMHSuraJzxLqVeM7xU7aZPcXS5/eoVnX+HBuFGQ6E= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure v0.102.0/go.mod h1:hG8EmxUvgXIiKTG6+UVcMhFeIN6UD/bswP7WYpQ2lCc= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.102.0 h1:4VQidhCgkJiBvBDMOukr5ixrf5uP66iW5Hb+CFsb+4E= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.102.0/go.mod h1:nMto9zkv0vD8YI3oGZFZS2Uu7k2oHt1d+xUHN/ofUYo= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.102.0 h1:Mh5MHf0PrUQMTM2S8HwEuPt3Fyz0Xnt0IG7GUc6Fmbs= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.102.0/go.mod h1:6fc8qnIayeGwAF41LyLR+/FRbyJf4+FikbmaO0GGq/Y= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.102.0 h1:5M7I78lyGsH+Xyy4NoXKM/UUCa52aZQiPcSX6so6x94= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.102.0/go.mod h1:BEQy0zEel5uIOTEFBBmvQJ4A32R6nKLtSMtC6ylLI8k= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.97.0 h1:IfJ9EkykXHBYdwuvZd0qfFcCHAAOuTEaiIlTRw6R9g4= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.97.0/go.mod h1:uTs2ukYAG9tCkoUhW39J9bNQAqwpqHhE85eeRVm6zCM= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.102.0 h1:HTGSfx2HzfudY1Uczw9yTBJnGBmTVFYzpGH1z+oD0nU= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.102.0/go.mod h1:Hlz24+Ah6Ojk0FUKNb1watRmTbLEru35+feroKA7dvQ= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.102.0 h1:2D3niNAKkr+NRVmAJW0bquSjzHUL6Pf1qQRLRPwA13M= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.102.0/go.mod h1:h0uqwH7b+NGDfFFWTjoGErMdYRdCqP1Az1/G+tfG024= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver v0.102.0 h1:dBhFe/29ODIbxg4+JRaHwYAHMFFeh6/+izVtjceXwew= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver v0.102.0/go.mod h1:WNFjuquVqyi+WEoa6L0J3DzPLRsP24ZlbZYwKv49VwY= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.102.0 h1:Pemo9pZa3VMYdrM/bss3f0qqVyBzPSulOBQL8VQcgN8= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.102.0/go.mod h1:fvjAM+jOQdiXCmAENKH/eWxBBqTaImbq3lpoBI4X5Ek= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.118.0 h1:GKIdx0Ug1IpcBiOA28BvCxZo4GIGsQR1dW6Id3aFYVE= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.118.0/go.mod h1:Ya3cB0ep9+pSkPD6W6lB1xaUVBicQt3xVyITKBjd3sw= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter v0.118.0 h1:2nY0HZ2mvPhTIcC3qNcZdAvjgurGA+h09As42iQdgW8= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter v0.118.0/go.mod h1:nEVT4t0anoejKqdFIznHJ/nOWwxnu2HCi4c+OeVE6T4= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.118.0 h1:W6maz9dZiAYO3WWFMy41GoX2tzx7EPiLGiykNkiAOMI= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.118.0/go.mod h1:WmS8hAnghKAR5UGYC/sww46mKkBO4RxAqkn8K0i+ZU4= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.118.0 h1:Xnwd0QEyBg6iNPUbc3CnHIb0hVjfTc+jHdFbA9VSa7k= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.118.0/go.mod h1:rmqCuNFMNBUxgyufeU8rpVYOWau8ubr0gmSO1u+R5Hk= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.118.0 h1:cRDOmJfEOm7G369Lw47k03NIg1qY6HtO9XTwfYRLBw4= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.118.0/go.mod h1:KPphlnKqOx44kbEks3VjqQstD/892osXDVN1kn53wWE= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.118.0 h1:ydifxzu9jPr0tW80f9QIOLgjmR7qCFQD33pLWqXO6ss= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.118.0/go.mod h1:/ijU34n3G4qMoiZIOUS6nYo6XrretVkcHXx9LKyUm+o= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.118.0 h1:dPYcq0NyUpXeJGejLvNAMZ+iaQGx0UCmNwnnn60D/Oc= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.118.0/go.mod h1:RYz6Pcxqia18V98XqWXWqXB/Qejn7vgK5PoWgMv7DwM= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.118.0 h1:qWFugbYl7CYQ1nTRmr+AWK9UsALLCYkno7XK8g0jPpg= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.118.0/go.mod h1:gTwTRGOfFmIZhj75yqp8WVXJeTJoF5mypr19LlGRewg= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic v0.118.0 h1:uAJjZmS275VjGDZvg4LfLk+vrsd08iMwre048+B4eFw= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic v0.118.0/go.mod h1:1q/L2R/28emNCz0EHfxEw853I6lPxTcHTqS+UrMea0k= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.118.0 h1:8pBuMvrFhU7YLJn1uhuuv5uLz0cJUyzusFtNA//fvFI= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.118.0/go.mod h1:pPjJ7ITPSA68VT7cGK9UIJkGsvfjIJV8cjB8cnnsKr8= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.118.0 h1:DSoYrOjLv23HXpx72hl61br4ZZTj6dqtwZSGoypKWIA= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.118.0/go.mod h1:nR+r7aAbsktscJk4fGmzljblbZBMaiZcIWeKbXV+HmY= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.118.0 h1:aUTSkzJExtrlHN32g8hX/cRNEo2ZmucPg+vwPqOYvhg= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.118.0/go.mod h1:a3sewj4nEozMwcNwZTHPzddS+1BnA6BaAkO/CRIGHVU= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure v0.118.0 h1:jCw+26caB9dWAB1uLFG004qBEGwkSsLioohRmp11TMw= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure v0.118.0/go.mod h1:XDUkUhtY32T4Km+Oma2a/psBYnCdqOkr7V8/7XHUo3k= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.118.0 h1:cNxDWIo5FNwVCEJ0OkYZG7L2FSiIoH7ASUnhjw5+yaA= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.118.0/go.mod h1:wGuwhjwdA3Sqw0gLBebku6vJ8NHqWhv8mDEOaxFsKTQ= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.118.0 h1:iCkInN6po+WATcDQ+fSqBRCMALgLshgIwNhwO3xioBA= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.118.0/go.mod h1:MTO5isJUZBQ+h1WRaHnLJA2UZmAVjwG5U8CqohWPdyI= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.118.0 h1:Ef0H9eY8EtZ6yqZvbyEFiE5ElQNLiADYo2KVdR7a3jU= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.118.0/go.mod h1:VqUc4LGE97Qh8RddrA7+fkd4OAzhhkQ59/oE0q3TfqI= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.118.0 h1:D/67TEByWyRExhiV0Ihr5DZCh6WsCpaFMUaaPeyP6c8= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.118.0/go.mod h1:qYuRkOOo0OXWAFb2YGyL+UQkyrypds9cMW+q7+dTUJM= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.118.0 h1:ABsdtuXGh1YjOkiVr19ZsaHAAfM+c7QiccF0yinhb4s= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.118.0/go.mod h1:bpfe1oPTuiP6ot4tkPvSVYPMkYshLGjNPrJvoDk1ZCg= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.118.0 h1:11bdqxZzIhH514hXX5R6pAJqWAfOtgQzjOgdfMpX/8Q= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.118.0/go.mod h1:OiCZiLKeRmkxrQDA9g9A7yB9BNtxDLixmXvhIsWPYh8= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver v0.118.0 h1:xBhG84x+FIlIuaeFFRa4GB6tEBpUsyXCdj00EFobjlc= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver v0.118.0/go.mod h1:QuTB8IztxTZt2YZUPB6q9yZk2H0Wm5bQP1ap6D2LNdU= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.118.0 h1:hdq0EDq6gCjOWl0RfXhAcSepB52QHx7us+UcUYTbWpg= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.118.0/go.mod h1:9kAczl5meDgn9zlLJJre8Q/4U43cqh9aAy3kCm/rJlk= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= -github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e h1:4cPxUYdgaGzZIT5/j0IfqOrrXmq6bG8AwvwisMXpdrg= -github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo= +github.com/opentracing-contrib/go-grpc v0.1.0 h1:9JHDtQXv6UL0tFF8KJB/4ApJgeOcaHp1h07d0PJjESc= +github.com/opentracing-contrib/go-grpc v0.1.0/go.mod h1:i3/jx/TvJZ/HKidtT4XGIi/NosUEpzS9xjVJctbKZzI= github.com/opentracing-contrib/go-stdlib v1.0.0 h1:TBS7YuVotp8myLon4Pv7BtCBzOTo1DeZCld0Z63mW2w= github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= @@ -737,17 +733,17 @@ github.com/openzipkin/zipkin-go v0.4.3 h1:9EGwpqkgnwdEIJ+Od7QVSEIH+ocmm5nPat0G7s github.com/openzipkin/zipkin-go v0.4.3/go.mod h1:M9wCJZFWCo2RiY+o1eBCEMe0Dp2S5LDHcMZmk3RmK7c= github.com/ovh/go-ovh v1.6.0 h1:ixLOwxQdzYDx296sXcgS35TOPEahJkpjMGtzPadCjQI= github.com/ovh/go-ovh v1.6.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c= -github.com/parquet-go/parquet-go v0.23.1-0.20241011155651-6446d1d0d2fe h1:oUJ5TPnrEK/z+/PeoLL+jCgfngAZIDMyhZASetRcYYg= -github.com/parquet-go/parquet-go v0.23.1-0.20241011155651-6446d1d0d2fe/go.mod h1:OqBBRGBl7+llplCvDMql8dEKaDqjaFA/VAPw+OJiNiw= +github.com/parquet-go/parquet-go v0.24.0 h1:VrsifmLPDnas8zpoHmYiWDZ1YHzLmc7NmNwPGkI2JM4= +github.com/parquet-go/parquet-go v0.24.0/go.mod h1:OqBBRGBl7+llplCvDMql8dEKaDqjaFA/VAPw+OJiNiw= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= -github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= -github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= +github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= -github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU= +github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pires/go-proxyproto v0.7.0 h1:IukmRewDQFWC7kfnb66CSomk2q/seBuilHBYFwyq0Hs= github.com/pires/go-proxyproto v0.7.0/go.mod h1:Vz/1JPY/OACxWGQNIRY2BeyDmpoaWmEP40O9LbuiFR4= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= @@ -756,15 +752,15 @@ github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= -github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/alertmanager v0.27.0 h1:V6nTa2J5V4s8TG4C4HtrBP/WNSebCCTYGGv4qecA/+I= github.com/prometheus/alertmanager v0.27.0/go.mod h1:8Ia/R3urPmbzJ8OsdvmZvIprDwvwmYCmUbwBL+jlPOE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -773,8 +769,8 @@ github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3O github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -786,8 +782,8 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/exporter-toolkit v0.11.0 h1:yNTsuZ0aNCNFQ3aFTD2uhPOvr4iD7fdBvKPAEGkNf+g= @@ -799,26 +795,26 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/prometheus/prometheus v0.54.0 h1:6+VmEkohHcofl3W5LyRlhw1Lfm575w/aX6ZFyVAmzM0= -github.com/prometheus/prometheus v0.54.0/go.mod h1:xlLByHhk2g3ycakQGrMaU8K7OySZx98BzeCR99991NY= -github.com/prometheus/statsd_exporter v0.26.0 h1:SQl3M6suC6NWQYEzOvIv+EF6dAMYEqIuZy+o4H9F5Ig= -github.com/prometheus/statsd_exporter v0.26.0/go.mod h1:GXFLADOmBTVDrHc7b04nX8ooq3azG61pnECNqT7O5DM= +github.com/prometheus/prometheus v0.54.1 h1:vKuwQNjnYN2/mDoWfHXDhAsz/68q/dQDb+YbcEqU7MQ= +github.com/prometheus/prometheus v0.54.1/go.mod h1:xlLByHhk2g3ycakQGrMaU8K7OySZx98BzeCR99991NY= +github.com/prometheus/statsd_exporter v0.26.1 h1:ucbIAdPmwAUcA+dU+Opok8Qt81Aw8HanlO+2N/Wjv7w= +github.com/prometheus/statsd_exporter v0.26.1/go.mod h1:XlDdjAmRmx3JVvPPYuFNUg+Ynyb5kR69iPPkQjxXFMk= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/redis/go-redis/v9 v9.6.1 h1:HHDteefn6ZkTtY5fGUE8tj8uy85AHk6zP7CpzIAM0y4= -github.com/redis/go-redis/v9 v9.6.1/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA= -github.com/relvacode/iso8601 v1.4.0 h1:GsInVSEJfkYuirYFxa80nMLbH2aydgZpIf52gYZXUJs= -github.com/relvacode/iso8601 v1.4.0/go.mod h1:FlNp+jz+TXpyRqgmM7tnzHHzBnz776kmAH2h3sZCn0I= +github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E= +github.com/redis/go-redis/v9 v9.7.0/go.mod h1:f6zhXITC7JUJIlPEiBOTXxJgPLdZcA93GewI7inzyWw= +github.com/relvacode/iso8601 v1.6.0 h1:eFXUhMJN3Gz8Rcq82f9DTMW0svjtAVuIEULglM7QHTU= +github.com/relvacode/iso8601 v1.6.0/go.mod h1:FlNp+jz+TXpyRqgmM7tnzHHzBnz776kmAH2h3sZCn0I= github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/rs/cors v1.11.0 h1:0B9GE/r9Bc2UxRMMtymBkHTenPkHDv0CW4Y98GBY+po= -github.com/rs/cors v1.11.0/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA= +github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU= github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -837,12 +833,8 @@ github.com/segmentio/fasthash v1.0.3 h1:EI9+KE1EwvMLBWwjpRDc+fEM+prwxDYbslddQGtr github.com/segmentio/fasthash v1.0.3/go.mod h1:waKX8l2N8yckOgmSsXJi7x1ZfdKZ4x7KRMzBtS3oedY= github.com/sercand/kuberesolver/v5 v5.1.1 h1:CYH+d67G0sGBj7q5wLK61yzqJJ8gLLC8aeprPTHb6yY= github.com/sercand/kuberesolver/v5 v5.1.1/go.mod h1:Fs1KbKhVRnB2aDWN12NjKCB+RgYMWZJ294T3BtmVCpQ= -github.com/shirou/gopsutil/v3 v3.24.4 h1:dEHgzZXt4LMNm+oYELpzl9YCqV65Yr/6SfrvgRBtXeU= -github.com/shirou/gopsutil/v3 v3.24.4/go.mod h1:lTd2mdiOspcqLgAnr9/nGi71NkeMpWKdmhuxm9GusH8= -github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= -github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= -github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= -github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= @@ -852,19 +844,18 @@ github.com/sony/gobreaker v0.4.1 h1:oMnRNZXX5j85zso6xCPRNPtmAycat+WcoKbklScLDgQ= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= -github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= -github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= +github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= +github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= github.com/stoewer/parquet-cli v0.0.9 h1:qFjncPnEnzwPJxnADcwvdiUzWwMch7PRWloaBNeBDE0= github.com/stoewer/parquet-cli v0.0.9/go.mod h1:bskxHdj8q3H1EmfuCqjViFoeO3NEvs5lzZAQvI8Nfjk= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -883,17 +874,32 @@ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1F github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/thanos-io/objstore v0.0.0-20220809103346-8ef1f215e2bf h1:onQsPyHlq2yIWU+Nfl6yStuqnZuVQQN8FZ8sBb2wqtw= github.com/thanos-io/objstore v0.0.0-20220809103346-8ef1f215e2bf/go.mod h1:v0NhuxxxUFUPatQcVNSCUkBEVezXzl7LSdaBOZygq98= -github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= -github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= -github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= -github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= +github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= +github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= +github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= +github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/twmb/franz-go v1.18.1 h1:D75xxCDyvTqBSiImFx2lkPduE39jz1vaD7+FNc+vMkc= +github.com/twmb/franz-go v1.18.1/go.mod h1:Uzo77TarcLTUZeLuGq+9lNpSkfZI+JErv7YJhlDjs9M= +github.com/twmb/franz-go/pkg/kadm v1.14.0 h1:nAn1co1lXzJQocpzyIyOFOjUBf4WHWs5/fTprXy2IZs= +github.com/twmb/franz-go/pkg/kadm v1.14.0/go.mod h1:XjOPz6ZaXXjrW2jVCfLuucP8H1w2TvD6y3PT2M+aAM4= +github.com/twmb/franz-go/pkg/kfake v0.0.0-20241202133023-293b7c4c56bb h1:cNa7PaAvkAhYIOootH/5gRO9ljbI3MIObf5qU/PKFKY= +github.com/twmb/franz-go/pkg/kfake v0.0.0-20241202133023-293b7c4c56bb/go.mod h1:nkBI/wGFp7t1NJnnCeJdS4sX5atPAqwCPpDXKuI7SC8= +github.com/twmb/franz-go/pkg/kmsg v1.9.0 h1:JojYUph2TKAau6SBtErXpXGC7E3gg4vGZMv9xFU/B6M= +github.com/twmb/franz-go/pkg/kmsg v1.9.0/go.mod h1:CMbfazviCyY6HM0SXuG5t9vOwYDHRCSrJJyBAe5paqg= +github.com/twmb/franz-go/plugin/kotel v1.5.0 h1:TiPfGUbQK384OO7ZYGdo7JuPCbJn+/8njQ/D9Je9CDE= +github.com/twmb/franz-go/plugin/kotel v1.5.0/go.mod h1:wRXzRo76x1myOUMaVHAyraXoGBdEcvlLChGTVv5+DWU= +github.com/twmb/franz-go/plugin/kprom v1.1.0 h1:grGeIJbm4llUBF8jkDjTb/b8rKllWSXjMwIqeCCcNYQ= +github.com/twmb/franz-go/plugin/kprom v1.1.0/go.mod h1:cTDrPMSkyrO99LyGx3AtiwF9W6+THHjZrkDE2+TEBIU= +github.com/ua-parser/uap-go v0.0.0-20241012191800-bbb40edc15aa h1:VzPR4xFM7HARqNocjdHg75ZL9SAgFtaF3P57ZdDcG6I= +github.com/ua-parser/uap-go v0.0.0-20241012191800-bbb40edc15aa/go.mod h1:BUbeWZiieNxAuuADTBNb3/aeje6on3DhU3rpWsQSB1E= github.com/uber-go/atomic v1.4.0 h1:yOuPqEq4ovnhEjpHmfFwsqBXDYbQeT6Nb0bwD6XnD5o= github.com/uber-go/atomic v1.4.0/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g= github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= @@ -920,20 +926,19 @@ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9/go.mod h1:E1AXubJBdNmFERAOucpDIxNzeGfLzg0mYh+UfMWdChA= -github.com/yuin/gopher-lua v0.0.0-20220504180219-658193537a64 h1:5mLPGnFdSsevFRFc9q3yYbBkB6tsm4aCwwQV/j1JQAQ= -github.com/yuin/gopher-lua v0.0.0-20220504180219-658193537a64/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= +github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M= +github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= -go.etcd.io/etcd/api/v3 v3.5.10 h1:szRajuUUbLyppkhs9K6BRtjY37l66XQQmw7oZRANE4k= -go.etcd.io/etcd/api/v3 v3.5.10/go.mod h1:TidfmT4Uycad3NM/o25fG3J07odo4GBB9hoxaodFCtI= +go.etcd.io/etcd/api/v3 v3.5.12 h1:W4sw5ZoU2Juc9gBWuLk5U6fHfNVyY1WC5g9uiXZio/c= +go.etcd.io/etcd/api/v3 v3.5.12/go.mod h1:Ot+o0SWSyT6uHhA56al1oCED0JImsRiU9Dc26+C2a+4= go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/pkg/v3 v3.5.10 h1:kfYIdQftBnbAq8pUWFXfpuuxFSKzlmM5cSn76JByiT0= -go.etcd.io/etcd/client/pkg/v3 v3.5.10/go.mod h1:DYivfIviIuQ8+/lCq4vcxuseg2P2XbHygkKwFo9fc8U= +go.etcd.io/etcd/client/pkg/v3 v3.5.12 h1:EYDL6pWwyOsylrQyLp2w+HkQ46ATiOvoEdMarindU2A= +go.etcd.io/etcd/client/pkg/v3 v3.5.12/go.mod h1:seTzl2d9APP8R5Y2hFL3NVlD6qC/dOT+3kvrqPyTas4= go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY= -go.etcd.io/etcd/client/v3 v3.5.10 h1:W9TXNZ+oB3MCd/8UjxHTWK5J9Nquw9fQBLJd5ne5/Ao= -go.etcd.io/etcd/client/v3 v3.5.10/go.mod h1:RVeBnDz2PUEZqTpgqwAtUd8nAPf5kjyFyND7P1VkOKc= +go.etcd.io/etcd/client/v3 v3.5.12 h1:v5lCPXn1pf1Uu3M4laUE2hp/geOTc5uPcYYsNe1lDxg= +go.etcd.io/etcd/client/v3 v3.5.12/go.mod h1:tSbBCakoWmmddL+BKVAJHa9km+O/E+bumDe9mSbPiqw= go.mongodb.org/mongo-driver v1.15.0 h1:rJCKC8eEliewXjZGf0ddURtl7tTVy1TK3bfl0gkUSLc= go.mongodb.org/mongo-driver v1.15.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -943,132 +948,176 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector v0.102.1 h1:M/ciCcReQsSDYG9bJ2Qwqk7pQILDJ2bM/l0MdeCAvJE= -go.opentelemetry.io/collector v0.102.1/go.mod h1:yF1lDRgL/Eksb4/LUnkMjvLvHHpi6wqBVlzp+dACnPM= -go.opentelemetry.io/collector/component v0.102.1 h1:66z+LN5dVCXhvuVKD1b56/3cYLK+mtYSLIwlskYA9IQ= -go.opentelemetry.io/collector/component v0.102.1/go.mod h1:XfkiSeImKYaewT2DavA80l0VZ3JjvGndZ8ayPXfp8d0= -go.opentelemetry.io/collector/config/configauth v0.102.1 h1:LuzijaZulMu4xmAUG8WA00ZKDlampH+ERjxclb40Q9g= -go.opentelemetry.io/collector/config/configauth v0.102.1/go.mod h1:kTzfI5fnbMJpm2wycVtQeWxFAtb7ns4HksSb66NIhX8= -go.opentelemetry.io/collector/config/configcompression v1.9.0 h1:B2q6XMO6xiF2s+14XjqAQHGY5UefR+PtkZ0WAlmSqpU= -go.opentelemetry.io/collector/config/configcompression v1.9.0/go.mod h1:6+m0GKCv7JKzaumn7u80A2dLNCuYf5wdR87HWreoBO0= -go.opentelemetry.io/collector/config/configgrpc v0.102.1 h1:6Plnfx+xw/JH8k11MkljGoysPfn1u7hHbO2evteOTeE= -go.opentelemetry.io/collector/config/configgrpc v0.102.1/go.mod h1:Kk3XOSar3QTzGDS8N8M38DVlOzUD7STS2obczO9q43I= -go.opentelemetry.io/collector/config/confighttp v0.102.1 h1:tPw1Xf2PfDdrXoBKLY5Sd4Dh8FNm5i+6DKuky9XraIM= -go.opentelemetry.io/collector/config/confighttp v0.102.1/go.mod h1:k4qscfjxuaDQmcAzioxmPujui9VSgW6oal3WLxp9CzI= -go.opentelemetry.io/collector/config/confignet v0.102.1 h1:nSiAFQMzNCO4sDBztUxY73qFw4Vh0hVePq8+3wXUHtU= -go.opentelemetry.io/collector/config/confignet v0.102.1/go.mod h1:pfOrCTfSZEB6H2rKtx41/3RN4dKs+X2EKQbw3MGRh0E= -go.opentelemetry.io/collector/config/configopaque v1.18.0 h1:aoEecgd5m8iZCX+S+iH6SK/lG6ULqCqtrtz7PeHw7vE= -go.opentelemetry.io/collector/config/configopaque v1.18.0/go.mod h1:6zlLIyOoRpJJ+0bEKrlZOZon3rOp5Jrz9fMdR4twOS4= -go.opentelemetry.io/collector/config/configretry v0.102.1 h1:J5/tXBL8P7d7HT5dxsp2H+//SkwDXR66Z9UTgRgtAzk= -go.opentelemetry.io/collector/config/configretry v0.102.1/go.mod h1:P+RA0IA+QoxnDn4072uyeAk1RIoYiCbxYsjpKX5eFC4= -go.opentelemetry.io/collector/config/configtelemetry v0.102.1 h1:f/CYcrOkaHd+COIJ2lWnEgBCHfhEycpbow4ZhrGwAlA= -go.opentelemetry.io/collector/config/configtelemetry v0.102.1/go.mod h1:WxWKNVAQJg/Io1nA3xLgn/DWLE/W1QOB2+/Js3ACi40= -go.opentelemetry.io/collector/config/configtls v1.18.0 h1:IQemIIuryeHgrpBJMbLl+LgTxvFBbv7Hhi+0WwlxpCU= -go.opentelemetry.io/collector/config/configtls v1.18.0/go.mod h1:lD2dlDqeTKq7OecFwIZMufDaa8erSlEoHMJrFPHrZNw= -go.opentelemetry.io/collector/config/internal v0.102.1 h1:HFsFD3xpHUuNHb8/UTz5crJw1cMHzsJQf/86sgD44hw= -go.opentelemetry.io/collector/config/internal v0.102.1/go.mod h1:Vig3dfeJJnuRe1kBNpszBzPoj5eYnR51wXbeq36Zfpg= -go.opentelemetry.io/collector/confmap v0.102.1 h1:wZuH+d/P11Suz8wbp+xQCJ0BPE9m5pybtUe74c+rU7E= -go.opentelemetry.io/collector/confmap v0.102.1/go.mod h1:KgpS7UxH5rkd69CzAzlY2I1heH8Z7eNCZlHmwQBMxNg= -go.opentelemetry.io/collector/confmap/converter/expandconverter v0.102.1 h1:s0RxnaABoRxtfvUeimZ0OOsF83wD/EK1tR2N5GZyst0= -go.opentelemetry.io/collector/confmap/converter/expandconverter v0.102.1/go.mod h1:ZwSMlOSIzmrrSSVNoMPDr21SQx7E52bZFMQJSOZ+EhY= -go.opentelemetry.io/collector/confmap/provider/envprovider v0.102.1 h1:4KLw0pTChIqDfw0ckZ411aQDw98pu2dDOqgBHXfJm8M= -go.opentelemetry.io/collector/confmap/provider/envprovider v0.102.1/go.mod h1:f+IJBW0Sc96T79qj3GQtE1wQ0uWEwpslD785efKBl+c= -go.opentelemetry.io/collector/confmap/provider/fileprovider v0.102.1 h1:nPhOtUbJHfTDqZqtvU76HmEz9iV4O/4/DSCZdnm0mpY= -go.opentelemetry.io/collector/confmap/provider/fileprovider v0.102.1/go.mod h1:eJnr6YDQiocmoRBvsKj33bIc4wysq5hy/jmOApv1dSM= -go.opentelemetry.io/collector/confmap/provider/httpprovider v0.102.1 h1:VsaGXqEUFost0mf2svhds6loYzPavkyY37nMQcqoTkc= -go.opentelemetry.io/collector/confmap/provider/httpprovider v0.102.1/go.mod h1:lQocxKI32Zj1F3PR9UZfzykq50/mOI1mbyZ0729dphI= -go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.102.1 h1:rEhPTqkGAezaFxJ8y/BL5m4vKTK3ZSpn+VcVLKnZo7Q= -go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.102.1/go.mod h1:GxUZM23m3u4vURw/At2zEKW+5GwcuCNsHJNT/Wq/cFI= -go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.102.1 h1:qmdaBIz0UnUKVitZzq+4HtO9zvRTwgNc/Q3b7kyf1NQ= -go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.102.1/go.mod h1:nAckG/FkzAaPuwtEN2Na2+ij+2hdTjtXUtFBnlUqpFk= -go.opentelemetry.io/collector/connector v0.102.1 h1:7lEwXmhzqtyZwz2bBUHzwV/CZqA8bhPPVJOi0cm9+Fk= -go.opentelemetry.io/collector/connector v0.102.1/go.mod h1:DRlDYJXsFx1FKKxkdM2Ja52/xe+0bgmy0hA+wgKRUVI= -go.opentelemetry.io/collector/consumer v0.102.1 h1:0CkgHhxwx4lI/m+hWjh607xyjooW5CObZ8hFQy5vvo0= -go.opentelemetry.io/collector/consumer v0.102.1/go.mod h1:HoXqmrRV13jLnP3/Gg3fYNdRkDPoO7UW58hKiLyFF60= -go.opentelemetry.io/collector/exporter v0.102.1 h1:4VURYgBNJscxfMhZWitzcwA1cig5a6pH0xZSpdECDnM= -go.opentelemetry.io/collector/exporter v0.102.1/go.mod h1:1pmNxvrvvbWDW6PiGObICdj0eOSGV4Fzwpm5QA1GU54= -go.opentelemetry.io/collector/exporter/otlpexporter v0.102.1 h1:bOXE7u1iy0SKwH2mnVyIMKkvFIR9bn9iIm1Cf/CJlZU= -go.opentelemetry.io/collector/exporter/otlpexporter v0.102.1/go.mod h1:4ya6xaUYvcXq9MQW0TbsR4QWkOJI02d/2Vt8plwdozA= -go.opentelemetry.io/collector/extension v0.102.1 h1:gAvE3w15q+Vv0Tj100jzcDpeMTyc8dAiemHRtJbspLg= -go.opentelemetry.io/collector/extension v0.102.1/go.mod h1:XBxUOXjZpwYLZYOK5u3GWlbBTOKmzStY5eU1R/aXkIo= -go.opentelemetry.io/collector/extension/auth v0.102.1 h1:GP6oBmpFJjxuVruPb9X40bdf6PNu9779i8anxa+wW6U= -go.opentelemetry.io/collector/extension/auth v0.102.1/go.mod h1:U2JWz8AW1QXX2Ap3ofzo5Dn2fZU/Lglld97Vbh8BZS0= -go.opentelemetry.io/collector/extension/zpagesextension v0.102.1 h1:YV+ejCgOBJjACOi/l3ULeivOhh85FPE8T4UcFdWviyg= -go.opentelemetry.io/collector/extension/zpagesextension v0.102.1/go.mod h1:/CZXg9/C64k85/k4bc7NFbCNP/MiPUZucbxPUN04ny4= -go.opentelemetry.io/collector/featuregate v1.9.0 h1:mC4/HnR5cx/kkG1RKOQAvHxxg5Ktmd9gpFdttPEXQtA= -go.opentelemetry.io/collector/featuregate v1.9.0/go.mod h1:PsOINaGgTiFc+Tzu2K/X2jP+Ngmlp7YKGV1XrnBkH7U= -go.opentelemetry.io/collector/otelcol v0.102.1 h1:JdRG3ven+c5k703QpZG5bxJi4JJOnWaNP/EJvN+oYnI= -go.opentelemetry.io/collector/otelcol v0.102.1/go.mod h1:kHf9KBXOLZXajR1On8XJbBBGcgh2I2+/mVVroPzOLJU= -go.opentelemetry.io/collector/pdata v1.12.0 h1:Xx5VK1p4VO0md8MWm2icwC1MnJ7f8EimKItMWw46BmA= -go.opentelemetry.io/collector/pdata v1.12.0/go.mod h1:MYeB0MmMAxeM0hstCFrCqWLzdyeYySim2dG6pDT6nYI= -go.opentelemetry.io/collector/pdata/testdata v0.102.1 h1:S3idZaJxy8M7mCC4PG4EegmtiSaOuh6wXWatKIui8xU= -go.opentelemetry.io/collector/pdata/testdata v0.102.1/go.mod h1:JEoSJTMgeTKyGxoMRy48RMYyhkA5vCCq/abJq9B6vXs= -go.opentelemetry.io/collector/processor v0.102.1 h1:79NWs7kTgmgxOIQacuZyDf+mYWuoJZS07SHwZT7sZ4Y= -go.opentelemetry.io/collector/processor v0.102.1/go.mod h1:sNM41tEHgv3YA/Dz9/6F8oCeObrqnKCGOMs7wS6Ldus= -go.opentelemetry.io/collector/receiver v0.102.1 h1:353t4U3o0RdU007JcQ4sRRzl72GHCJZwXDr8cCOcEbI= -go.opentelemetry.io/collector/receiver v0.102.1/go.mod h1:pYjMzUkvUlxJ8xt+VbI1to8HMtVlv8AW/K/2GQQOTB0= -go.opentelemetry.io/collector/receiver/otlpreceiver v0.102.1 h1:65/8lkVmOu6gwBw99W+QUQBeDC2qVTwlaiqy7/SpauY= -go.opentelemetry.io/collector/receiver/otlpreceiver v0.102.1/go.mod h1:0hmxfFSSqKJjRGvgYjp/XvptbAgLhLguwNgJqMp7zd0= -go.opentelemetry.io/collector/semconv v0.105.0 h1:8p6dZ3JfxFTjbY38d8xlQGB1TQ3nPUvs+D0RERniZ1g= -go.opentelemetry.io/collector/semconv v0.105.0/go.mod h1:yMVUCNoQPZVq/IPfrHrnntZTWsLf5YGZ7qwKulIl5hw= -go.opentelemetry.io/collector/service v0.102.1 h1:Lg7qrC4Zctd/OAlkpdsaZaUY+jLEGLLnOigfBLP2GW8= -go.opentelemetry.io/collector/service v0.102.1/go.mod h1:L5Sh3461B1Zij7vpMMbi6M/SZicgrLB3UgbG0oUK0pA= -go.opentelemetry.io/contrib/bridges/prometheus v0.53.0 h1:BdkKDtcrHThgjcEia1737OUuFdP6xzBKAMx2sNZCkvE= -go.opentelemetry.io/contrib/bridges/prometheus v0.53.0/go.mod h1:ZkhVxcJgeXlL/lVyT/vxNHVFiSG5qOaDwYaSgD8IfZo= -go.opentelemetry.io/contrib/config v0.7.0 h1:b1rK5tGTuhhPirJiMxOcyQfZs76j2VapY6ODn3b2Dbs= -go.opentelemetry.io/contrib/config v0.7.0/go.mod h1:8tdiFd8N5etOi3XzBmAoMxplEzI3TcL8dU5rM5/xcOQ= -go.opentelemetry.io/contrib/exporters/autoexport v0.53.0 h1:13K+tY7E8GJInkrvRiPAhC0gi/7vKjzDNhtmCf+QXG8= -go.opentelemetry.io/contrib/exporters/autoexport v0.53.0/go.mod h1:lyQF6xQ4iDnMg4sccNdFs1zf62xd79YI8vZqKjOTwMs= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 h1:vS1Ao/R55RNV4O7TA2Qopok8yN+X0LIP6RVWLFkprck= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0/go.mod h1:BMsdeOxN04K0L5FNUBfjFdvwWGNe/rkmSwH4Aelu/X0= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 h1:ZIg3ZT/aQ7AfKqdwp7ECpOK6vHqquXXuyTjIO8ZdmPs= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0/go.mod h1:DQAwmETtZV00skUwgD6+0U89g80NKsJE3DCKeLLPQMI= -go.opentelemetry.io/contrib/propagators/b3 v1.27.0 h1:IjgxbomVrV9za6bRi8fWCNXENs0co37SZedQilP2hm0= -go.opentelemetry.io/contrib/propagators/b3 v1.27.0/go.mod h1:Dv9obQz25lCisDvvs4dy28UPh974CxkahRDUPsY7y9E= -go.opentelemetry.io/contrib/zpages v0.52.0 h1:MPgkMy0Cp3O5EdfVXP0ss3ujhEibysTM4eszx7E7d+E= -go.opentelemetry.io/contrib/zpages v0.52.0/go.mod h1:fqG5AFdoYru3A3DnhibVuaaEfQV2WKxE7fYE1jgDRwk= -go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= -go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= -go.opentelemetry.io/otel/bridge/opencensus v1.27.0 h1:ao9aGGHd+G4YfjBpGs6vbkvt5hoC67STlJA9fCnOAcs= -go.opentelemetry.io/otel/bridge/opencensus v1.27.0/go.mod h1:uRvWtAAXzyVOST0WMPX5JHGBaAvBws+2F8PcC5gMnTk= -go.opentelemetry.io/otel/bridge/opentracing v1.26.0 h1:Q/dHj0DOhfLMAs5u5ucAbC7gy66x9xxsZRLpHCJ4XhI= -go.opentelemetry.io/otel/bridge/opentracing v1.26.0/go.mod h1:HfypvOw/8rqu4lXDhwaxVK1ibBAi1lTMXBHV9rywOCw= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/collector v0.118.0 h1:OBqxppK9Ul6bzEabcHsx11pXwgp05sBpqYxIxiOkyFo= +go.opentelemetry.io/collector v0.118.0/go.mod h1:yxfijW5k9dwd9sifTBAEoItE+ahFEtOlyvex1B99uno= +go.opentelemetry.io/collector/client v1.24.0 h1:eH7ctqDnRWNH5QVVbAvdYYdkvr8QWLkEm8FUPaaYbWE= +go.opentelemetry.io/collector/client v1.24.0/go.mod h1:C/38SYPa0tTL6ikPz/glYz6f3GVzEuT4nlEml6IBDMw= +go.opentelemetry.io/collector/component v0.118.0 h1:sSO/ObxJ+yH77Z4DmT1mlSuxhbgUmY1ztt7xCA1F/8w= +go.opentelemetry.io/collector/component v0.118.0/go.mod h1:LUJ3AL2b+tmFr3hZol3hzKzCMvNdqNq0M5CF3SWdv4M= +go.opentelemetry.io/collector/component/componentstatus v0.118.0 h1:1aCIdUjqz0noKNQr1v04P+lwF89Lkua5U7BhH9IAxkE= +go.opentelemetry.io/collector/component/componentstatus v0.118.0/go.mod h1:ynO1Nyj0t1h6x/djIMJy35bhnnWEc2mlQaFgDNUO504= +go.opentelemetry.io/collector/component/componenttest v0.118.0 h1:knEHckoiL2fEWSIc0iehg39zP4IXzi9sHa45O+oxKo8= +go.opentelemetry.io/collector/component/componenttest v0.118.0/go.mod h1:aHc7t7zVwCpbhrWIWY+GMuaMxMCUP8C8P7pJOt8r/vU= +go.opentelemetry.io/collector/config/configauth v0.118.0 h1:uBH/s9kRw/m7VWuibrkCzbXSCVLf9ElKq9NuKb0wAwk= +go.opentelemetry.io/collector/config/configauth v0.118.0/go.mod h1:uAmSGkihIENoIah6mEQ8S/HX4oiFOHZu3EoZLZwi9OI= +go.opentelemetry.io/collector/config/configcompression v1.24.0 h1:jyM6BX7wYcrh+eVSC0FMbWgy/zb9iP58SerOrvisccE= +go.opentelemetry.io/collector/config/configcompression v1.24.0/go.mod h1:LvYG00tbPTv0NOLoZN0wXq1F5thcxvukO8INq7xyfWU= +go.opentelemetry.io/collector/config/configgrpc v0.118.0 h1:if8VfsnnHwVX/E+GgehVXKh85YtAtVci+c4A/M5gPh0= +go.opentelemetry.io/collector/config/configgrpc v0.118.0/go.mod h1:TZqpu5s/iEW5XmhSnzrhXCUQ3W5qaICNvlllBf3GGcw= +go.opentelemetry.io/collector/config/confighttp v0.118.0 h1:ey50dfySOCPgUPJ1x8Kq6CmNcv/TpZHt6cYmPhZItj0= +go.opentelemetry.io/collector/config/confighttp v0.118.0/go.mod h1:4frheVFiIfKUHuD/KAPn+u+d+EUx5GlQTNmoI1ftReA= +go.opentelemetry.io/collector/config/confignet v1.24.0 h1:Je1oO3qCUI4etX9ZVyav/NkeD+sfzZQRmwMGy51Oei4= +go.opentelemetry.io/collector/config/confignet v1.24.0/go.mod h1:ZppUH1hgUJOubawEsxsQ9MzEYFytqo2GnVSS7d4CVxc= +go.opentelemetry.io/collector/config/configopaque v1.24.0 h1:EPOprMDreZPKyIgT0/eVBvEGQVvq7ncvBCBVnWerj54= +go.opentelemetry.io/collector/config/configopaque v1.24.0/go.mod h1:sW0t0iI/VfRL9VYX7Ik6XzVgPcR+Y5kejTLsYcMyDWs= +go.opentelemetry.io/collector/config/configretry v1.24.0 h1:sIPHhNNY2YlHMIJ//63iMxIqlgDeGczId0uUb1njsPM= +go.opentelemetry.io/collector/config/configretry v1.24.0/go.mod h1:cleBc9I0DIWpTiiHfu9v83FUaCTqcPXmebpLxjEIqro= +go.opentelemetry.io/collector/config/configtelemetry v0.118.0 h1:UlN46EViG2X42odWtXgWaqY7Y01ZKpsnswSwXTWx5mM= +go.opentelemetry.io/collector/config/configtelemetry v0.118.0/go.mod h1:SlBEwQg0qly75rXZ6W1Ig8jN25KBVBkFIIAUI1GiAAE= +go.opentelemetry.io/collector/config/configtls v1.24.0 h1:rOhl8qjIlUVVRHnwQj6/vZe6cuCYImyx7aVDBR35bqI= +go.opentelemetry.io/collector/config/configtls v1.24.0/go.mod h1:d0OdfkbuYEMYDBJLSbpH0wPI29lmSiFT3geqh/ygF2k= +go.opentelemetry.io/collector/confmap v1.24.0 h1:UUHVhkDCsVw14jPOarug9PDQE2vaB2ELPWMr7ARFBCA= +go.opentelemetry.io/collector/confmap v1.24.0/go.mod h1:Rrhs+MWoaP6AswZp+ReQ2VO9dfOfcUjdjiSHBsG+nec= +go.opentelemetry.io/collector/connector v0.118.0 h1:amay4UriWrtydaAxjQ8/MTTaVYERlZcYLCAGcjoBejw= +go.opentelemetry.io/collector/connector v0.118.0/go.mod h1:R6jbMrHZYg21pZ0nsoo4cSHIn7Lrdpi5R3OWcDEQwhE= +go.opentelemetry.io/collector/connector/connectortest v0.118.0 h1:hLMSTqtFWveXa3b1qJMEaWuaX3PHx7dfl8G/bsac2fE= +go.opentelemetry.io/collector/connector/connectortest v0.118.0/go.mod h1:hm6TNLiQLe65NpENCFsFoiO8fOf3BbN4UF1heUsT73Q= +go.opentelemetry.io/collector/connector/xconnector v0.118.0 h1:0s6rwZmt8va6xd3BEZs7s2QBNFNjLv0kzYi6l44dKqc= +go.opentelemetry.io/collector/connector/xconnector v0.118.0/go.mod h1:12mJPGWo90iZrrpgOkmSd5TkejweL34V/R6AqwqJnMA= +go.opentelemetry.io/collector/consumer v1.24.0 h1:7DeyBm9qdr1EPuCfPjWyChPK16DbVc0wZeSa9LZprFU= +go.opentelemetry.io/collector/consumer v1.24.0/go.mod h1:0G6jvZprIp4dpKMD1ZxCjriiP9GdFvFMObsQEtTk71s= +go.opentelemetry.io/collector/consumer/consumererror v0.118.0 h1:Cx//ZFDa6wUEoRDRYRZ/Rkb52dWNoHj2e9FdlcM9jCA= +go.opentelemetry.io/collector/consumer/consumererror v0.118.0/go.mod h1:2mhnzzLYR5zS2Zz4h9ZnRM8Uogu9qatcfQwGNenhing= +go.opentelemetry.io/collector/consumer/consumererror/xconsumererror v0.118.0 h1:/kkWdw1PQtPb1noZMTt6tbgP1ntWdJ835u1o45nYhTg= +go.opentelemetry.io/collector/consumer/consumererror/xconsumererror v0.118.0/go.mod h1:2mdXnTT0nPd/KTG9w29cc1OGKBLzL2HW+x/o7QVpCpI= +go.opentelemetry.io/collector/consumer/consumertest v0.118.0 h1:8AAS9ejQapP1zqt0+cI6u+AUBheT3X0171N9WtXWsVY= +go.opentelemetry.io/collector/consumer/consumertest v0.118.0/go.mod h1:spRM2wyGr4QZzqMHlLmZnqRCxqXN4Wd0piogC4Qb5PQ= +go.opentelemetry.io/collector/consumer/xconsumer v0.118.0 h1:guWnzzRqgCInjnYlOQ1BPrimppNGIVvnknAjlIbWXuY= +go.opentelemetry.io/collector/consumer/xconsumer v0.118.0/go.mod h1:C5V2d6Ys/Fi6k3tzjBmbdZ9v3J/rZSAMlhx4KVcMIIg= +go.opentelemetry.io/collector/exporter v0.118.0 h1:PE0vF2U+znOB8OVLPWNw40bGCoT/5QquQ8Xbz4i9Rb0= +go.opentelemetry.io/collector/exporter v0.118.0/go.mod h1:5ST3gxT/RzE/vg2bcGDtWJxlQF1ypwk50UpmdK1kUqY= +go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper v0.118.0 h1:wC4IyE98DR4eXVyT7EnA4iJ6s+sbUTZVq/5KoVWSKDw= +go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper v0.118.0/go.mod h1:spjZv9QX+pCcx/ECSqlo/UKCYJzp2rR5NsvIgpfdUxQ= +go.opentelemetry.io/collector/exporter/exportertest v0.118.0 h1:8gWky42BcJsxoaqWbnqCDUjP3Y84hjC6RD/UWHwR7sI= +go.opentelemetry.io/collector/exporter/exportertest v0.118.0/go.mod h1:UbpQBZvznA8YPqqcKlafVIhB6Qa4fPf2+I67MUGyNqo= +go.opentelemetry.io/collector/exporter/otlpexporter v0.118.0 h1:kfVfskZEroh3zs8HmdCLeo9weAJT5oedd+04McXEBSU= +go.opentelemetry.io/collector/exporter/otlpexporter v0.118.0/go.mod h1:iyvbf05lZdh+KObvNF0uEpaaV9YoQNofm1RRamWbq78= +go.opentelemetry.io/collector/exporter/otlphttpexporter v0.118.0 h1:8ShK60uf6nY6TlSYBZ2Y7eh3sv0WwNkUKgmh3P1U/2U= +go.opentelemetry.io/collector/exporter/otlphttpexporter v0.118.0/go.mod h1:UJXry//sSRs04eg35nZkT1wxP43tPxz/3wbf26eLRkc= +go.opentelemetry.io/collector/exporter/xexporter v0.118.0 h1:PZAo1CFhZHfQwtzUNj+Fwcv/21pWHJHTsrIddD096fw= +go.opentelemetry.io/collector/exporter/xexporter v0.118.0/go.mod h1:x4J+qyrRcp4DfWKqK3DLZomFTIUhedsqCQWqq6Gqps4= +go.opentelemetry.io/collector/extension v0.118.0 h1:9o5jLCTRvs0+rtFDx04zTBuB4WFrE0RvtVCPovYV0sA= +go.opentelemetry.io/collector/extension v0.118.0/go.mod h1:BFwB0WOlse6JnrStO44+k9kwUVjjtseFEHhJLHD7lBg= +go.opentelemetry.io/collector/extension/auth v0.118.0 h1:+eMNUBUK1JK9A3mr95BasbWE90Lxu+WlR9sqS36sJms= +go.opentelemetry.io/collector/extension/auth v0.118.0/go.mod h1:MJpYcRGSERkgOhczqTKoAhkHmcugr+YTlRhc/SpYYYI= +go.opentelemetry.io/collector/extension/auth/authtest v0.118.0 h1:KIORXNc71vfpQrrZOntiZesRCZtQ8alrASWVT/zZkyo= +go.opentelemetry.io/collector/extension/auth/authtest v0.118.0/go.mod h1:0ZlSP9NPAfTRQd6Tx4mOH0IWrp6ufHaVN//L9Mb87gM= +go.opentelemetry.io/collector/extension/extensioncapabilities v0.118.0 h1:I/SjuacUXdBOxa6ZnVMuMKkZX+m40tUm+5YKqWnNv/c= +go.opentelemetry.io/collector/extension/extensioncapabilities v0.118.0/go.mod h1:IxDALY0rMvsENrVui7Y5tvvL/xHNgMKuhfiQiSHMiTQ= +go.opentelemetry.io/collector/extension/extensiontest v0.118.0 h1:rKBUaFS9elGfENG45wANmrwx7mHsmt1+YWCzxjftElg= +go.opentelemetry.io/collector/extension/extensiontest v0.118.0/go.mod h1:CqNXzkIOR32D8EUpptpOXhpFkibs3kFlRyNMEgIW8l4= +go.opentelemetry.io/collector/extension/xextension v0.118.0 h1:P6gvJzqnH9ma2QfnWde/E6Xu9bAzuefzIwm5iupiVPE= +go.opentelemetry.io/collector/extension/xextension v0.118.0/go.mod h1:ne4Q8ZtRlbC0Etr2hTcVkjOpVM2bE2xy1u+R80LUkDw= +go.opentelemetry.io/collector/extension/zpagesextension v0.118.0 h1:XkaLvST4p1/i/dsk5yCwFG4HJUUr6joCbegJc2MEOrE= +go.opentelemetry.io/collector/extension/zpagesextension v0.118.0/go.mod h1:alaAK7I7UeM1Hcs/eNqIjTLIZpqrk3mD1Ua42mJ7JnU= +go.opentelemetry.io/collector/featuregate v1.24.0 h1:DEqDsuJgxjZ3E5JNC9hXCd4sWGFiF7h9kaziODuqwFY= +go.opentelemetry.io/collector/featuregate v1.24.0/go.mod h1:3GaXqflNDVwWndNGBJ1+XJFy3Fv/XrFgjMN60N3z7yg= +go.opentelemetry.io/collector/internal/fanoutconsumer v0.118.0 h1:affTj1Qxjbg9dZ1x2tbV9Rs9/otZQ1lHA++L8qB5KiQ= +go.opentelemetry.io/collector/internal/fanoutconsumer v0.118.0/go.mod h1:9mbE68mYdtTyozr3jTtNMB1RA5F8/dt2aWVYSu6bsQ4= +go.opentelemetry.io/collector/internal/sharedcomponent v0.118.0 h1:aCiwkzBL4VyPEUBmEcTnoPyld5EClJGbwyUNJhHNgEo= +go.opentelemetry.io/collector/internal/sharedcomponent v0.118.0/go.mod h1:drV6vD4acelEUOjM9cgxV5ILs8q2AYUh3EV+Pljdorg= +go.opentelemetry.io/collector/otelcol v0.118.0 h1:uSD3wU0sO4vsw5VvWI2yUFLggLdq1BWN/nC1LJXIhMg= +go.opentelemetry.io/collector/otelcol v0.118.0/go.mod h1:OdKz/AXj+ewCwXp/acZCBIoMIYiIxeNRNkbqUXvWi+o= +go.opentelemetry.io/collector/pdata v1.24.0 h1:D6j92eAzmAbQgivNBUnt8r9juOl8ugb+ihYynoFZIEg= +go.opentelemetry.io/collector/pdata v1.24.0/go.mod h1:cf3/W9E/uIvPS4MR26SnMFJhraUCattzzM6qusuONuc= +go.opentelemetry.io/collector/pdata/pprofile v0.118.0 h1:VK/fr65VFOwEhsSGRPj5c3lCv0yIK1Kt0sZxv9WZBb8= +go.opentelemetry.io/collector/pdata/pprofile v0.118.0/go.mod h1:eJyP/vBm179EghV3dPSnamGAWQwLyd+4z/3yG54YFoQ= +go.opentelemetry.io/collector/pdata/testdata v0.118.0 h1:5N0w1SX9KIRkwvtkrpzQgXy9eGk3vfNG0ds6mhEPMIM= +go.opentelemetry.io/collector/pdata/testdata v0.118.0/go.mod h1:UY+GHV5bOC1BnFburOZ0wiHReJj1XbW12mi2Ogbc5Lw= +go.opentelemetry.io/collector/pipeline v0.118.0 h1:RI1DMe7L0+5hGkx0EDGxG00TaJoh96MEQppgOlGx1Oc= +go.opentelemetry.io/collector/pipeline v0.118.0/go.mod h1:qE3DmoB05AW0C3lmPvdxZqd/H4po84NPzd5MrqgtL74= +go.opentelemetry.io/collector/pipeline/xpipeline v0.118.0 h1:ZUVF1MYNQYZvmuL30KfP+QbVGSbFZvldBM9hgCe4J4k= +go.opentelemetry.io/collector/pipeline/xpipeline v0.118.0/go.mod h1:XgG1ktGO9J1f6fasMYPWSXL9Raan/VYB9vddKKWp5hQ= +go.opentelemetry.io/collector/processor v0.118.0 h1:NlqWiTTpPP+EPbrqTcNP9nh/4O4/9U9RGWVB49xo4ws= +go.opentelemetry.io/collector/processor v0.118.0/go.mod h1:Y8OD7wk51oPuBqrbn1qXIK91AbprRHP76hlvEzC24U4= +go.opentelemetry.io/collector/processor/processortest v0.118.0 h1:VfTLHuIaJWGyUmrvAOvf63gPMf1vAW68/jtJClEsKtU= +go.opentelemetry.io/collector/processor/processortest v0.118.0/go.mod h1:ZFWxsSoafGNOEk83FtGz43M5ypUzAOvGnfT0aQTDHdU= +go.opentelemetry.io/collector/processor/xprocessor v0.118.0 h1:M/EMhPRbadHLpv7g99fBjfgyuYexBZmgQqb2vjTXjvM= +go.opentelemetry.io/collector/processor/xprocessor v0.118.0/go.mod h1:lkoQoCv2Cz+C0kf2VHgBUDYWDecZLLeaHEvHDXbBCXU= +go.opentelemetry.io/collector/receiver v0.118.0 h1:X4mspHmbbtwdCQZ7o370kNmdWfxRnK1FrsvEShCCKEc= +go.opentelemetry.io/collector/receiver v0.118.0/go.mod h1:wFyfu6sgrkDPLQoGOGMuChGZzkZnYcI/tPJWV4CRTzs= +go.opentelemetry.io/collector/receiver/otlpreceiver v0.118.0 h1:Nud8aaRDb86K2kBeqMTjqAKDUV00JDn+G4wUZ3hDlAk= +go.opentelemetry.io/collector/receiver/otlpreceiver v0.118.0/go.mod h1:MJvDEzWJnm1FMoIoTKmhlT3pPmwJP+65GKWy0lAzd30= +go.opentelemetry.io/collector/receiver/receivertest v0.118.0 h1:XlMr2mPsyXJsMUOqCpEoY3uCPsLZQbNA5fmVNDGB7Bw= +go.opentelemetry.io/collector/receiver/receivertest v0.118.0/go.mod h1:dtu/H1RNjhy11hTVf/XUfc02uGufMhYYdhhYBbglcUg= +go.opentelemetry.io/collector/receiver/xreceiver v0.118.0 h1:dzECve9e0H3ot0JWnWPuQr9Y84RhOYSd0+CjvJskx7Y= +go.opentelemetry.io/collector/receiver/xreceiver v0.118.0/go.mod h1:Lv1nD/mSYSP64iV8k+C+mWWZZOMLRubv9d1SUory3/E= +go.opentelemetry.io/collector/semconv v0.118.0 h1:V4vlMIK7TIaemrrn2VawvQPwruIKpj7Xgw9P5+BL56w= +go.opentelemetry.io/collector/semconv v0.118.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI= +go.opentelemetry.io/collector/service v0.118.0 h1:acZ9LzUbEF5M3G7o5FgenPJVuuM2y8c4HW5JVm648L4= +go.opentelemetry.io/collector/service v0.118.0/go.mod h1:uw3cl3UtkAOrEr8UQV2lXKjyTIbhWxURaQec8kE+Pic= +go.opentelemetry.io/contrib/bridges/otelzap v0.8.0 h1:4jqXEd0FGULFBy1bF1ledBePc0Ssu8YVddTgr8BXDTc= +go.opentelemetry.io/contrib/bridges/otelzap v0.8.0/go.mod h1:nrDogEQCtEOQ4jAiN4uHIE0BqicDF9bMyepgK1pIbP4= +go.opentelemetry.io/contrib/bridges/prometheus v0.59.0 h1:HY2hJ7yn3KuEBBBsKxvF3ViSmzLwsgeNvD+0utRMgzc= +go.opentelemetry.io/contrib/bridges/prometheus v0.59.0/go.mod h1:H4H7vs8766kwFnOZVEGMJFVF+phpBSmTckvvNRdJeDI= +go.opentelemetry.io/contrib/config v0.10.0 h1:2JknAzMaYjxrHkTnZh3eOme/Y2P5eHE2SWfhfV6Xd6c= +go.opentelemetry.io/contrib/config v0.10.0/go.mod h1:aND2M6/KfNkntI5cyvHriR/zvZgPf8j9yETdSmvpfmc= +go.opentelemetry.io/contrib/exporters/autoexport v0.59.0 h1:dKhAFwh7SSoOw+gwMtSv+XLkUGTFAwAGMT3X3XSE4FA= +go.opentelemetry.io/contrib/exporters/autoexport v0.59.0/go.mod h1:fPl+qlrhRdRntIpPs9JoQ0iBKAsnH5VkgppU1f9kyF4= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0 h1:rgMkmiGfix9vFJDcDi1PK8WEQP4FLQwLDfhp5ZLpFeE= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0/go.mod h1:ijPqXp5P6IRRByFVVg9DY8P5HkxkHE5ARIa+86aXPf4= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= +go.opentelemetry.io/contrib/propagators/b3 v1.33.0 h1:ig/IsHyyoQ1F1d6FUDIIW5oYpsuTVtN16AyGOgdjAHQ= +go.opentelemetry.io/contrib/propagators/b3 v1.33.0/go.mod h1:EsVYoNy+Eol5znb6wwN3XQTILyjl040gUpEnUSNZfsk= +go.opentelemetry.io/contrib/zpages v0.56.0 h1:W7vP6s3juzL5KiHpr41zLNmsJ0QAZudYu8ay0zGAoko= +go.opentelemetry.io/contrib/zpages v0.56.0/go.mod h1:IxPRP4TYHw9jLeaEOSDIiA9zmyJNZNO6sbW55iMvSXs= +go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= +go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= +go.opentelemetry.io/otel/bridge/opencensus v1.34.0 h1:2Uxf3WAnOkGFTMlMShbiHNF2qN1iGdnt5m6hUnUp07k= +go.opentelemetry.io/otel/bridge/opencensus v1.34.0/go.mod h1:ALZT48QF8vj9XiFlBFuBGBQsj9Wk8Sk1zdyu6/1MCVs= +go.opentelemetry.io/otel/bridge/opentracing v1.34.0 h1:QwIV7Mh/1MNFS56EgWI8p+nAAx/40DIQqgO0qXkX5NY= +go.opentelemetry.io/otel/bridge/opentracing v1.34.0/go.mod h1:4GYWm9bVsi+4+qJFpmBjFuuuMTVgxxIMfzekizbZ+o0= go.opentelemetry.io/otel/exporters/jaeger v1.17.0 h1:D7UpUy2Xc2wsi1Ras6V40q806WM07rqoCWzXu7Sqy+4= go.opentelemetry.io/otel/exporters/jaeger v1.17.0/go.mod h1:nPCqOnEH9rNLKqH/+rrUjiMzHJdV1BlpKcTwRTyKkKI= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.4.0 h1:zBPZAISA9NOc5cE8zydqDiS0itvg/P/0Hn9m72a5gvM= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.4.0/go.mod h1:gcj2fFjEsqpV3fXuzAA+0Ze1p2/4MJ4T7d77AmkvueQ= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0 h1:U2guen0GhqH8o/G2un8f/aG/y++OuW6MyCo6hT9prXk= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0/go.mod h1:yeGZANgEcpdx/WK0IvvRFC+2oLiMS2u4L/0Rj2M2Qr0= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.28.0 h1:aLmmtjRke7LPDQ3lvpFz+kNEH43faFhzW7v8BFIEydg= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.28.0/go.mod h1:TC1pyCt6G9Sjb4bQpShH+P5R53pO6ZuGnHuuln9xMeE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 h1:j9+03ymgYhPKmeXGk5Zu+cIZOlVzd9Zv7QIiyItjFBU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0/go.mod h1:Y5+XiUG4Emn1hTfciPzGPJaSI+RpDts6BnCIir0SLqk= -go.opentelemetry.io/otel/exporters/prometheus v0.50.0 h1:2Ewsda6hejmbhGFyUvWZjUThC98Cf8Zy6g0zkIimOng= -go.opentelemetry.io/otel/exporters/prometheus v0.50.0/go.mod h1:pMm5PkUo5YwbLiuEf7t2xg4wbP0/eSJrMxIMxKosynY= -go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.4.0 h1:0MH3f8lZrflbUWXVxyBg/zviDFdGE062uKh5+fu8Vv0= -go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.4.0/go.mod h1:Vh68vYiHY5mPdekTr0ox0sALsqjoVy0w3Os278yX5SQ= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.28.0 h1:BJee2iLkfRfl9lc7aFmBwkWxY/RI1RDdXepSF6y8TPE= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.28.0/go.mod h1:DIzlHs3DRscCIBU3Y9YSzPfScwnYnzfnCd4g8zA7bZc= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 h1:EVSnY9JbEEW92bEkIYOVMw4q1WJxIAGoFTrtYOzWuRQ= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0/go.mod h1:Ea1N1QQryNXpCD0I1fdLibBAIpQuBkznMmkdKrapk1Y= -go.opentelemetry.io/otel/log v0.4.0 h1:/vZ+3Utqh18e8TPjuc3ecg284078KWrR8BRz+PQAj3o= -go.opentelemetry.io/otel/log v0.4.0/go.mod h1:DhGnQvky7pHy82MIRV43iXh3FlKN8UUKftn0KbLOq6I= -go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= -go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= -go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= -go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= -go.opentelemetry.io/otel/sdk/log v0.4.0 h1:1mMI22L82zLqf6KtkjrRy5BbagOTWdJsqMY/HSqILAA= -go.opentelemetry.io/otel/sdk/log v0.4.0/go.mod h1:AYJ9FVF0hNOgAVzUG/ybg/QttnXhUePWAupmCqtdESo= -go.opentelemetry.io/otel/sdk/metric v1.28.0 h1:OkuaKgKrgAbYrrY0t92c+cC+2F6hsFNnCQArXCKlg08= -go.opentelemetry.io/otel/sdk/metric v1.28.0/go.mod h1:cWPjykihLAPvXKi4iZc1dpER3Jdq2Z0YLse3moQUCpg= -go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= -go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= -go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= -go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.10.0 h1:5dTKu4I5Dn4P2hxyW3l3jTaZx9ACgg0ECos1eAVrheY= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.10.0/go.mod h1:P5HcUI8obLrCCmM3sbVBohZFH34iszk/+CPWuakZWL8= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.10.0 h1:q/heq5Zh8xV1+7GoMGJpTxM2Lhq5+bFxB29tshuRuw0= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.10.0/go.mod h1:leO2CSTg0Y+LyvmR7Wm4pUxE8KAmaM2GCVx7O+RATLA= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.34.0 h1:ajl4QczuJVA2TU9W9AGw++86Xga/RKt//16z/yxPgdk= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.34.0/go.mod h1:Vn3/rlOJ3ntf/Q3zAI0V5lDnTbHGaUsNUeF6nZmm7pA= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.34.0 h1:opwv08VbCZ8iecIWs+McMdHRcAXzjAeda3uG2kI/hcA= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.34.0/go.mod h1:oOP3ABpW7vFHulLpE8aYtNBodrHhMTrvfxUXGvqm7Ac= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 h1:OeNbIYk/2C15ckl7glBlOBp5+WlYsOElzTNmiPW/x60= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0/go.mod h1:7Bept48yIeqxP2OZ9/AqIpYS94h2or0aB4FypJTc8ZM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 h1:tgJ0uaNS4c98WRNUEx5U3aDlrDOI5Rs+1Vifcw4DJ8U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0/go.mod h1:U7HYyW0zt/a9x5J1Kjs+r1f/d4ZHnYFclhYY2+YbeoE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.34.0 h1:BEj3SPM81McUZHYjRS5pEgNgnmzGJ5tRpU5krWnV8Bs= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.34.0/go.mod h1:9cKLGBDzI/F3NoHLQGm4ZrYdIHsvGt6ej6hUowxY0J4= +go.opentelemetry.io/otel/exporters/prometheus v0.56.0 h1:GnCIi0QyG0yy2MrJLzVrIM7laaJstj//flf1zEJCG+E= +go.opentelemetry.io/otel/exporters/prometheus v0.56.0/go.mod h1:JQcVZtbIIPM+7SWBB+T6FK+xunlyidwLp++fN0sUaOk= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.10.0 h1:GKCEAZLEpEf78cUvudQdTg0aET2ObOZRB2HtXA0qPAI= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.10.0/go.mod h1:9/zqSWLCmHT/9Jo6fYeUDRRogOLL60ABLsHWS99lF8s= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.34.0 h1:czJDQwFrMbOr9Kk+BPo1y8WZIIFIK58SA1kykuVeiOU= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.34.0/go.mod h1:lT7bmsxOe58Tq+JIOkTQMCGXdu47oA+VJKLZHbaBKbs= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.34.0 h1:jBpDk4HAUsrnVO1FsfCfCOTEc/MkInJmvfCHYLFiT80= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.34.0/go.mod h1:H9LUIM1daaeZaz91vZcfeM0fejXPmgCYE8ZhzqfJuiU= +go.opentelemetry.io/otel/log v0.10.0 h1:1CXmspaRITvFcjA4kyVszuG4HjA61fPDxMb7q3BuyF0= +go.opentelemetry.io/otel/log v0.10.0/go.mod h1:PbVdm9bXKku/gL0oFfUF4wwsQsOPlpo4VEqjvxih+FM= +go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= +go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= +go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= +go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= +go.opentelemetry.io/otel/sdk/log v0.10.0 h1:lR4teQGWfeDVGoute6l0Ou+RpFqQ9vaPdrNJlST0bvw= +go.opentelemetry.io/otel/sdk/log v0.10.0/go.mod h1:A+V1UTWREhWAittaQEG4bYm4gAZa6xnvVu+xKrIRkzo= +go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= +go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= +go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= +go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= +go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4= +go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= @@ -1094,8 +1143,12 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ= -golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= +golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1106,8 +1159,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM= -golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= +golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f h1:XdNn9LlyWAhLVp6P/i8QYBW+hlyhrhei9uErw2B5GJo= +golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f/go.mod h1:D5SMRVC3C2/4+F/DB1wZsLRnSNimn2Sp/NPsCrsv8ak= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1131,8 +1184,12 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= -golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1147,7 +1204,6 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190921015927-1a5e07d1ff72/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1173,16 +1229,21 @@ golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo= -golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= -golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= +golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1195,15 +1256,17 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= -golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.0.0-20180816055513-1c9583448a9c/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1264,15 +1327,23 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= -golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU= -golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= +golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1284,13 +1355,17 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= -golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -1337,16 +1412,19 @@ golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= -golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.27.0 h1:qEKojBykQkQ4EynWy4S8Weg69NumxKdn40Fce3uc/8o= +golang.org/x/tools v0.27.0/go.mod h1:sUi0ZgbwW9ZPAq26Ekut+weQPR5eIM6GQLQ1Yjm1H0Q= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= -gonum.org/v1/gonum v0.15.0 h1:2lYxjRbTYyxkJxlhC+LvJIx3SsANPdRybu1tGj9/OrQ= -gonum.org/v1/gonum v0.15.0/go.mod h1:xzZVBJBtS+Mz4q0Yl2LJTk+OxOg4jiXZ7qBoM0uISGo= +gonum.org/v1/gonum v0.15.1 h1:FNy7N6OUZVUaWG9pTiD+jlhdQ3lMP+/LcTpJ6+a8sQ0= +gonum.org/v1/gonum v0.15.1/go.mod h1:eZTZuRFrzu5pcyjN5wJhcIhnUdNijYxX1T2IcrOGY0o= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -1363,8 +1441,8 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.188.0 h1:51y8fJ/b1AaaBRJr4yWm96fPcuxSo0JcegXE3DaHQHw= -google.golang.org/api v0.188.0/go.mod h1:VR0d+2SIiWOYG3r/jdm7adPW9hI2aRv9ETOSCQ9Beag= +google.golang.org/api v0.218.0 h1:x6JCjEWeZ9PFCRe9z0FBrNwj7pB7DOAqT35N+IPnAUA= +google.golang.org/api v0.218.0/go.mod h1:5VGHBAkxrA/8EFjLVEYmMUJ8/8+gWWQ3s4cFH0FxG2M= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1407,10 +1485,10 @@ google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20240708141625-4ad9e859172b h1:dSTjko30weBaMj3eERKc0ZVXW4GudCswM3m+P++ukU0= google.golang.org/genproto v0.0.0-20240708141625-4ad9e859172b/go.mod h1:FfBgJBJg9GcpPvKIuHSZ/aE1g2ecGL74upMzGZjiGEY= -google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d h1:kHjw/5UfflP/L5EbledDrcG4C2597RtymmGRZvHiCuY= -google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d/go.mod h1:mw8MG/Qz5wfgYr6VqVCiZcHe/GJEfI+oGGDCohaVgB0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240711142825-46eb208f015d h1:JU0iKnSg02Gmb5ZdV8nYsKEKsP6o/FGVWTrw4i1DA9A= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240711142825-46eb208f015d/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f h1:gap6+3Gk41EItBuyi4XX/bp4oqJ3UwuIMl25yGinuAA= +google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:Ic02D47M+zbarjYYUlK57y316f2MoN0gjAwI3f2S95o= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -1418,7 +1496,6 @@ google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiq google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= @@ -1430,8 +1507,8 @@ google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A= +google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1444,8 +1521,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/integration/bench/config.yaml b/integration/bench/config.yaml index 56f7d085c21..6a0244744a0 100644 --- a/integration/bench/config.yaml +++ b/integration/bench/config.yaml @@ -7,6 +7,7 @@ server: distributor: receivers: zipkin: + endpoint: "tempo:9411" ingester: lifecycler: @@ -34,4 +35,4 @@ storage: queue_depth: 100 overrides: - max_traces_per_user: 50000 \ No newline at end of file + max_traces_per_user: 50000 diff --git a/integration/e2e/backend/backend.go b/integration/e2e/backend/backend.go index 30b233b8311..b5ffee55d0a 100644 --- a/integration/e2e/backend/backend.go +++ b/integration/e2e/backend/backend.go @@ -16,7 +16,7 @@ import ( ) const ( - azuriteImage = "mcr.microsoft.com/azure-storage/azurite:3.31.0" + azuriteImage = "mcr.microsoft.com/azure-storage/azurite:3.33.0" gcsImage = "fsouza/fake-gcs-server:1.47.6" ) diff --git a/integration/e2e/config-cross-cluster-a.yaml b/integration/e2e/config-cross-cluster-a.yaml index bfc8b023dd1..141a40af0df 100644 --- a/integration/e2e/config-cross-cluster-a.yaml +++ b/integration/e2e/config-cross-cluster-a.yaml @@ -6,6 +6,7 @@ distributor: jaeger: protocols: grpc: + endpoint: "distributor-a:14250" ingester: lifecycler: @@ -42,4 +43,4 @@ memberlist: querier: secondary_ingester_ring: "ring-b" frontend_worker: - frontend_address: tempo_active_active-query-frontend-a:9095 \ No newline at end of file + frontend_address: tempo_active_active-query-frontend-a:9095 diff --git a/integration/e2e/config-cross-cluster-b.yaml b/integration/e2e/config-cross-cluster-b.yaml index 3e363b6e5ce..0401722873f 100644 --- a/integration/e2e/config-cross-cluster-b.yaml +++ b/integration/e2e/config-cross-cluster-b.yaml @@ -6,6 +6,7 @@ distributor: jaeger: protocols: grpc: + endpoint: "distributor-b:14250" ingester: lifecycler: @@ -42,4 +43,4 @@ memberlist: querier: secondary_ingester_ring: "ring-a" frontend_worker: - frontend_address: tempo_active_active-query-frontend-b:9095 \ No newline at end of file + frontend_address: tempo_active_active-query-frontend-b:9095 diff --git a/integration/e2e/config-encodings.tmpl.yaml b/integration/e2e/config-encodings.tmpl.yaml index eb5f54e53a5..6b54f16abd3 100644 --- a/integration/e2e/config-encodings.tmpl.yaml +++ b/integration/e2e/config-encodings.tmpl.yaml @@ -14,10 +14,13 @@ distributor: jaeger: protocols: grpc: + endpoint: "tempo:14250" otlp: protocols: grpc: + endpoint: "tempo:4317" zipkin: + endpoint: "tempo:9411" log_received_spans: enabled: true diff --git a/integration/e2e/config-https.yaml b/integration/e2e/config-https.yaml index 70504884488..4c050ab4fad 100644 --- a/integration/e2e/config-https.yaml +++ b/integration/e2e/config-https.yaml @@ -25,6 +25,7 @@ distributor: jaeger: protocols: grpc: + endpoint: "tempo:14250" ingester: lifecycler: @@ -76,4 +77,4 @@ querier: tls_enabled: true tls_insecure_skip_verify: true tls_cipher_suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - tls_min_version: VersionTLS12 \ No newline at end of file + tls_min_version: VersionTLS12 diff --git a/integration/e2e/config-limits-429.yaml b/integration/e2e/config-limits-429.yaml index 4b3f2765483..32825ac4d24 100644 --- a/integration/e2e/config-limits-429.yaml +++ b/integration/e2e/config-limits-429.yaml @@ -9,6 +9,7 @@ distributor: jaeger: protocols: grpc: + endpoint: "tempo:14250" overrides: defaults: diff --git a/integration/e2e/config-limits-partial-success.yaml b/integration/e2e/config-limits-partial-success.yaml index fa4081a9364..cadd1faf844 100644 --- a/integration/e2e/config-limits-partial-success.yaml +++ b/integration/e2e/config-limits-partial-success.yaml @@ -8,6 +8,7 @@ distributor: otlp: protocols: grpc: + endpoint: "tempo:4317" overrides: max_bytes_per_trace: 600 diff --git a/integration/e2e/config-limits-query.yaml b/integration/e2e/config-limits-query.yaml index 8f313b35f18..1574d11dd7d 100644 --- a/integration/e2e/config-limits-query.yaml +++ b/integration/e2e/config-limits-query.yaml @@ -8,6 +8,7 @@ distributor: jaeger: protocols: grpc: + endpoint: "tempo:14250" compactor: compaction: @@ -46,4 +47,4 @@ storage: path: /var/tempo/wal pool: max_workers: 10 - queue_depth: 100 \ No newline at end of file + queue_depth: 100 diff --git a/integration/e2e/config-limits.yaml b/integration/e2e/config-limits.yaml index e46e57866a7..d5d97904d0e 100644 --- a/integration/e2e/config-limits.yaml +++ b/integration/e2e/config-limits.yaml @@ -9,10 +9,13 @@ distributor: jaeger: protocols: grpc: + endpoint: "tempo:14250" otlp: protocols: grpc: + endpoint: "tempo:4317" http: + endpoint: "tempo:4318" overrides: defaults: diff --git a/integration/e2e/config-metrics-generator-targetinfo.yaml b/integration/e2e/config-metrics-generator-targetinfo.yaml index cf54bd1dbcf..c496251a800 100644 --- a/integration/e2e/config-metrics-generator-targetinfo.yaml +++ b/integration/e2e/config-metrics-generator-targetinfo.yaml @@ -6,6 +6,7 @@ distributor: jaeger: protocols: grpc: + endpoint: "distributor:14250" log_received_spans: enabled: true @@ -48,4 +49,4 @@ overrides: max_active_series: 1000 processor: span_metrics: - enable_target_info: true # seconds \ No newline at end of file + enable_target_info: true # seconds diff --git a/integration/e2e/config-metrics-generator.yaml b/integration/e2e/config-metrics-generator.yaml index 06dfc5ff39d..ede1402c61f 100644 --- a/integration/e2e/config-metrics-generator.yaml +++ b/integration/e2e/config-metrics-generator.yaml @@ -6,6 +6,7 @@ distributor: jaeger: protocols: grpc: + endpoint: "distributor:14250" log_received_spans: enabled: true @@ -45,4 +46,4 @@ overrides: defaults: metrics_generator: processors: [service-graphs, span-metrics] - max_active_series: 1000 \ No newline at end of file + max_active_series: 1000 diff --git a/integration/e2e/config-multi-tenant-local.yaml b/integration/e2e/config-multi-tenant-local.yaml index 4d9736b301b..4ded5c61939 100644 --- a/integration/e2e/config-multi-tenant-local.yaml +++ b/integration/e2e/config-multi-tenant-local.yaml @@ -16,10 +16,13 @@ distributor: jaeger: protocols: grpc: + endpoint: "tempo:14250" otlp: protocols: grpc: + endpoint: "tempo:4317" zipkin: + endpoint: "tempo:9411" log_received_spans: enabled: true diff --git a/integration/e2e/config-plugin-test.yaml b/integration/e2e/config-plugin-test.yaml index 4d69e3cf42f..6054dc05a21 100644 --- a/integration/e2e/config-plugin-test.yaml +++ b/integration/e2e/config-plugin-test.yaml @@ -13,10 +13,13 @@ distributor: jaeger: protocols: grpc: + endpoint: "tempo:14250" otlp: protocols: grpc: + endpoint: "tempo:4317" zipkin: + endpoint: "tempo:9411" log_received_spans: enabled: true diff --git a/integration/e2e/config-query-range.yaml b/integration/e2e/config-query-range.yaml index faa5e5be6e0..286cf3a3627 100644 --- a/integration/e2e/config-query-range.yaml +++ b/integration/e2e/config-query-range.yaml @@ -15,10 +15,13 @@ distributor: jaeger: protocols: grpc: + endpoint: "tempo:14250" otlp: protocols: grpc: + endpoint: "tempo:4317" zipkin: + endpoint: "tempo:9411" log_received_spans: enabled: true diff --git a/integration/e2e/deployments/config-all-in-one-azurite.yaml b/integration/e2e/deployments/config-all-in-one-azurite.yaml index 3def0ff6f1b..c284699bd6c 100644 --- a/integration/e2e/deployments/config-all-in-one-azurite.yaml +++ b/integration/e2e/deployments/config-all-in-one-azurite.yaml @@ -14,7 +14,7 @@ distributor: jaeger: protocols: grpc: - + endpoint: "tempo:14250" ingester: lifecycler: address: 127.0.0.1 diff --git a/integration/e2e/deployments/config-all-in-one-gcs.yaml b/integration/e2e/deployments/config-all-in-one-gcs.yaml index efffae085c9..70028bc20a7 100644 --- a/integration/e2e/deployments/config-all-in-one-gcs.yaml +++ b/integration/e2e/deployments/config-all-in-one-gcs.yaml @@ -14,6 +14,7 @@ distributor: jaeger: protocols: grpc: + endpoint: "tempo:14250" ingester: lifecycler: diff --git a/integration/e2e/deployments/config-all-in-one-local.yaml b/integration/e2e/deployments/config-all-in-one-local.yaml index c0133165d7b..2f7e5910216 100644 --- a/integration/e2e/deployments/config-all-in-one-local.yaml +++ b/integration/e2e/deployments/config-all-in-one-local.yaml @@ -15,10 +15,13 @@ distributor: jaeger: protocols: grpc: + endpoint: "tempo:14250" otlp: protocols: grpc: + endpoint: "tempo:4317" zipkin: + endpoint: "tempo:9411" log_received_spans: enabled: true log_discarded_spans: diff --git a/integration/e2e/deployments/config-all-in-one-s3.yaml b/integration/e2e/deployments/config-all-in-one-s3.yaml index a429b183628..2f7d19035b9 100644 --- a/integration/e2e/deployments/config-all-in-one-s3.yaml +++ b/integration/e2e/deployments/config-all-in-one-s3.yaml @@ -14,6 +14,7 @@ distributor: jaeger: protocols: grpc: + endpoint: "tempo:14250" ingester: lifecycler: diff --git a/integration/e2e/deployments/config-microservices.tmpl.yaml b/integration/e2e/deployments/config-microservices.tmpl.yaml index b6860dbb29e..1b9e2eef330 100644 --- a/integration/e2e/deployments/config-microservices.tmpl.yaml +++ b/integration/e2e/deployments/config-microservices.tmpl.yaml @@ -6,6 +6,7 @@ distributor: jaeger: protocols: grpc: + endpoint: "distributor:14250" ingester: lifecycler: @@ -45,4 +46,4 @@ memberlist: querier: frontend_worker: - frontend_address: tempo_e2e-query-frontend:9095 \ No newline at end of file + frontend_address: tempo_e2e-query-frontend:9095 diff --git a/integration/e2e/deployments/config-scalable-single-binary.yaml b/integration/e2e/deployments/config-scalable-single-binary.yaml index f50568fda7e..5ccdd15ca80 100644 --- a/integration/e2e/deployments/config-scalable-single-binary.yaml +++ b/integration/e2e/deployments/config-scalable-single-binary.yaml @@ -6,6 +6,7 @@ distributor: jaeger: protocols: grpc: + endpoint: "0.0.0.0:14250" ingester: lifecycler: @@ -45,4 +46,4 @@ memberlist: querier: frontend_worker: - frontend_address: tempo-1:9095 \ No newline at end of file + frontend_address: tempo-1:9095 diff --git a/integration/e2e/ingest/config-kafka.yaml b/integration/e2e/ingest/config-kafka.yaml new file mode 100644 index 00000000000..1d9fe35b170 --- /dev/null +++ b/integration/e2e/ingest/config-kafka.yaml @@ -0,0 +1,73 @@ +target: all +stream_over_http_enabled: true + +server: + http_listen_port: 3200 + log_level: info + +query_frontend: + search: + query_backend_after: 0 # setting these both to 0 will force all range searches to hit the backend + query_ingesters_until: 0 + +distributor: + receivers: + jaeger: + protocols: + grpc: + otlp: + protocols: + grpc: + zipkin: + log_received_spans: + enabled: true + log_discarded_spans: + enabled: true + +ingester: + lifecycler: + address: 127.0.0.1 + ring: + kvstore: + store: inmemory + replication_factor: 1 + final_sleep: 0s + trace_idle_period: 1s + max_block_bytes: 1 + max_block_duration: 2s + complete_block_timeout: 20s + flush_check_period: 1s + partition_ring: + kvstore: + store: inmemory + +storage: + trace: + backend: local + local: + path: /var/tempo + pool: + max_workers: 10 + queue_depth: 100 + block: + version: vParquet3 + blocklist_poll: 1s + + +overrides: + user_configurable_overrides: + enabled: true + poll_interval: 10s + client: + backend: local + local: + path: /var/tempo/overrides + +block_builder: + consume_cycle_duration: 1m + +ingest: + enabled: true + kafka: + address: kafka:9092 + topic: tempo-ingest diff --git a/integration/e2e/ingest/ingest_storage_test.go b/integration/e2e/ingest/ingest_storage_test.go new file mode 100644 index 00000000000..8cbf5f4ada0 --- /dev/null +++ b/integration/e2e/ingest/ingest_storage_test.go @@ -0,0 +1,107 @@ +package ingest + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/grafana/e2e" + "github.com/grafana/tempo/integration/util" + "github.com/grafana/tempo/pkg/httpclient" + tempoUtil "github.com/grafana/tempo/pkg/util" + "github.com/prometheus/prometheus/model/labels" + "github.com/stretchr/testify/require" +) + +func TestIngest(t *testing.T) { + s, err := e2e.NewScenario("tempo_e2e") + require.NoError(t, err) + defer s.Close() + + // copy config template to shared directory and expand template variables + require.NoError(t, util.CopyFileToSharedDir(s, "config-kafka.yaml", "config.yaml")) + + kafka := NewKafka() + require.NoError(t, s.StartAndWaitReady(kafka)) + + tempo := util.NewTempoAllInOne() + require.NoError(t, s.StartAndWaitReady(tempo)) + + // Get port for the Jaeger gRPC receiver endpoint + c, err := util.NewJaegerGRPCClient(tempo.Endpoint(14250)) + require.NoError(t, err) + require.NotNil(t, c) + + info := tempoUtil.NewTraceInfo(time.Now(), "") + require.NoError(t, info.EmitAllBatches(c)) + + time.Sleep(5 * time.Minute) + + expected, err := info.ConstructTraceFromEpoch() + require.NoError(t, err) + + // test metrics + require.NoError(t, tempo.WaitSumMetrics(e2e.Equals(util.SpanCount(expected)), "tempo_distributor_spans_received_total")) + + // test echo + util.AssertEcho(t, "http://"+tempo.Endpoint(3200)+"/api/echo") + + apiClient := httpclient.New("http://"+tempo.Endpoint(3200), "") + + // query an in-memory trace + util.QueryAndAssertTrace(t, apiClient, info) + + // wait trace_idle_time and ensure trace is created in ingester + require.NoError(t, tempo.WaitSumMetricsWithOptions(e2e.Less(3), []string{"tempo_ingester_traces_created_total"}, e2e.WaitMissingMetrics)) + + // flush trace to backend + util.CallFlush(t, tempo) + + // search for trace in backend + util.SearchAndAssertTrace(t, apiClient, info) + util.SearchTraceQLAndAssertTrace(t, apiClient, info) + + // sleep + time.Sleep(10 * time.Second) + + // force clear completed block + util.CallFlush(t, tempo) + + fmt.Println(tempo.Endpoint(3200)) + // test metrics + require.NoError(t, tempo.WaitSumMetrics(e2e.Equals(1), "tempo_ingester_blocks_flushed_total")) + require.NoError(t, tempo.WaitSumMetricsWithOptions(e2e.Equals(1), []string{"tempodb_blocklist_length"}, e2e.WaitMissingMetrics)) + require.NoError(t, tempo.WaitSumMetrics(e2e.Equals(3), "tempo_query_frontend_queries_total")) + + matchers := []*labels.Matcher{ + { + Type: labels.MatchEqual, + Name: "receiver", + Value: "tempo/jaeger_receiver", + }, + { + Type: labels.MatchEqual, + Name: "transport", + Value: "grpc", + }, + } + + require.NoError(t, tempo.WaitSumMetricsWithOptions(e2e.Greater(1), []string{"tempo_receiver_accepted_spans"}, e2e.WithLabelMatchers(matchers...))) + require.NoError(t, tempo.WaitSumMetricsWithOptions(e2e.Equals(0), []string{"tempo_receiver_refused_spans"}, e2e.WithLabelMatchers(matchers...))) + + // query trace - should fetch from backend + util.QueryAndAssertTrace(t, apiClient, info) + + // search the backend. this works b/c we're passing a start/end AND setting query ingesters within min/max to 0 + now := time.Now() + util.SearchAndAssertTraceBackend(t, apiClient, info, now.Add(-20*time.Minute).Unix(), now.Unix()) + + util.SearchAndAsserTagsBackend(t, apiClient, now.Add(-20*time.Minute).Unix(), now.Unix()) + + // find the trace with streaming. using the http server b/c that's what Grafana will do + grpcClient, err := util.NewSearchGRPCClient(context.Background(), tempo.Endpoint(3200)) + require.NoError(t, err) + + util.SearchStreamAndAssertTrace(t, context.Background(), grpcClient, info, now.Add(-20*time.Minute).Unix(), now.Unix()) +} diff --git a/integration/e2e/ingest/kafka.go b/integration/e2e/ingest/kafka.go new file mode 100644 index 00000000000..0bd07c41bb6 --- /dev/null +++ b/integration/e2e/ingest/kafka.go @@ -0,0 +1,42 @@ +package ingest + +import ( + "github.com/grafana/e2e" +) + +const kafkaImage = "confluentinc/cp-kafka:7.7.1" + +func NewKafka() *e2e.HTTPService { + envVars := map[string]string{ + "CLUSTER_ID": "zH1GDqcNTzGMDCXm5VZQdg", + "KAFKA_BROKER_ID": "1", + "KAFKA_NUM_PARTITIONS": "1000", + "KAFKA_PROCESS_ROLES": "broker,controller", + "KAFKA_LISTENERS": "PLAINTEXT://:9092,CONTROLLER://:9093,PLAINTEXT_HOST://:29092", + "KAFKA_ADVERTISED_LISTENERS": "PLAINTEXT://kafka:9092,PLAINTEXT_HOST://localhost:29092", + "KAFKA_LISTENER_SECURITY_PROTOCOL_MAP": "PLAINTEXT:PLAINTEXT,CONTROLLER:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT", + "KAFKA_INTER_BROKER_LISTENER_NAME": "PLAINTEXT", + "KAFKA_CONTROLLER_LISTENER_NAMES": "CONTROLLER", + "KAFKA_CONTROLLER_QUORUM_VOTERS": "1@kafka:9093", + "KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR": "1", + "KAFKA_LOG_RETENTION_CHECK_INTERVAL_MS": "10000", + } + + service := e2e.NewConcreteService( + "kafka", + kafkaImage, + e2e.NewCommand("/etc/confluent/docker/run"), + // e2e.NewCmdReadinessProbe(e2e.NewCommand("kafka-topics", "--bootstrap-server", "broker:29092", "--list")), + e2e.NewCmdReadinessProbe(e2e.NewCommand("sh", "-c", "nc -z localhost 9092 || exit 1")), // TODO: A bit unstable, sometimes it fails + 9092, + 29092, + ) + + service.SetEnvVars(envVars) + + httpService := &e2e.HTTPService{ + ConcreteService: service, + } + + return httpService +} diff --git a/integration/e2e/multi_tenant_test.go b/integration/e2e/multi_tenant_test.go index 9b91c3b7c13..f63ca403526 100644 --- a/integration/e2e/multi_tenant_test.go +++ b/integration/e2e/multi_tenant_test.go @@ -207,9 +207,9 @@ func assertRequestCountMetric(t *testing.T, s *e2e.HTTPService, route string, re // getAttrsAndSpanNames returns trace attrs and span names func getAttrsAndSpanNames(trace *tempopb.Trace) traceStringsMap { - rAttrsKeys := collector.NewDistinctString(0) - rAttrsValues := collector.NewDistinctString(0) - spanNames := collector.NewDistinctString(0) + rAttrsKeys := collector.NewDistinctString(0, 0, 0) + rAttrsValues := collector.NewDistinctString(0, 0, 0) + spanNames := collector.NewDistinctString(0, 0, 0) for _, b := range trace.ResourceSpans { for _, ss := range b.ScopeSpans { diff --git a/integration/e2e/receivers_test.go b/integration/e2e/receivers_test.go index 5822dc39190..552509eb966 100644 --- a/integration/e2e/receivers_test.go +++ b/integration/e2e/receivers_test.go @@ -7,7 +7,6 @@ import ( "github.com/grafana/dskit/user" "github.com/grafana/e2e" - "github.com/grafana/tempo/integration/util" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -23,6 +22,7 @@ import ( tracenoop "go.opentelemetry.io/otel/trace/noop" "go.uber.org/zap" + "github.com/grafana/tempo/integration/util" "github.com/grafana/tempo/pkg/httpclient" tempoUtil "github.com/grafana/tempo/pkg/util" "github.com/grafana/tempo/pkg/util/test" @@ -86,9 +86,9 @@ func TestReceivers(t *testing.T) { t.Run(tc.name, func(t *testing.T) { // create exporter logger, _ := zap.NewDevelopment() - exporter, err := tc.factory.CreateTracesExporter( + exporter, err := tc.factory.CreateTraces( context.Background(), - exporter.CreateSettings{ + exporter.Settings{ TelemetrySettings: component.TelemetrySettings{ Logger: logger, TracerProvider: tracenoop.NewTracerProvider(), diff --git a/integration/e2e/serverless/config-serverless-gcr.yaml b/integration/e2e/serverless/config-serverless-gcr.yaml deleted file mode 100644 index 76e4fa7d103..00000000000 --- a/integration/e2e/serverless/config-serverless-gcr.yaml +++ /dev/null @@ -1,56 +0,0 @@ -server: - http_listen_port: 3200 - -query_frontend: - search: - query_backend_after: 0 # setting these both to 0 will force all range searches to hit the backend - query_ingesters_until: 0 - -distributor: - receivers: - jaeger: - protocols: - grpc: - -ingester: - lifecycler: - ring: - kvstore: - store: memberlist - replication_factor: 3 - heartbeat_period: 100ms - trace_idle_period: 1s - max_block_duration: 2s - complete_block_timeout: 5s - flush_check_period: 1s - -storage: - trace: - blocklist_poll: 2s - backend: s3 - s3: - bucket: tempo - endpoint: tempo_e2e-minio-9000:9000 # TODO: this is brittle, fix this eventually - access_key: Cheescake # TODO: use cortex_e2e.MinioAccessKey - secret_key: supersecret # TODO: use cortex_e2e.MinioSecretKey - insecure: true - pool: - max_workers: 10 - queue_depth: 100 - -memberlist: - abort_if_cluster_join_fails: false - bind_port: 7946 - join_members: - - tempo_e2e-distributor:7946 - - tempo_e2e-ingester-1:7946 - - tempo_e2e-ingester-2:7946 - - tempo_e2e-ingester-3:7946 - - tempo_e2e-querier:7946 - -querier: - search: - external_endpoints: - - http://tempo_e2e-serverless:8080/ - frontend_worker: - frontend_address: tempo_e2e-query-frontend:9095 diff --git a/integration/e2e/serverless/config-serverless-lambda.yaml b/integration/e2e/serverless/config-serverless-lambda.yaml deleted file mode 100644 index cbd4435909d..00000000000 --- a/integration/e2e/serverless/config-serverless-lambda.yaml +++ /dev/null @@ -1,56 +0,0 @@ -server: - http_listen_port: 3200 - -query_frontend: - search: - query_backend_after: 0 # setting these both to 0 will force all range searches to hit the backend - query_ingesters_until: 0 - -distributor: - receivers: - jaeger: - protocols: - grpc: - -ingester: - lifecycler: - ring: - kvstore: - store: memberlist - replication_factor: 3 - heartbeat_period: 100ms - trace_idle_period: 1s - max_block_duration: 2s - complete_block_timeout: 5s - flush_check_period: 1s - -storage: - trace: - blocklist_poll: 2s - backend: s3 - s3: - bucket: tempo - endpoint: tempo_e2e-minio-9000:9000 # TODO: this is brittle, fix this eventually - access_key: Cheescake # TODO: use cortex_e2e.MinioAccessKey - secret_key: supersecret # TODO: use cortex_e2e.MinioSecretKey - insecure: true - pool: - max_workers: 10 - queue_depth: 100 - -memberlist: - abort_if_cluster_join_fails: false - bind_port: 7946 - join_members: - - tempo_e2e-distributor:7946 - - tempo_e2e-ingester-1:7946 - - tempo_e2e-ingester-2:7946 - - tempo_e2e-ingester-3:7946 - - tempo_e2e-querier:7946 - -querier: - search: - external_endpoints: - - http://tempo_e2e-serverless:9000/ - frontend_worker: - frontend_address: tempo_e2e-query-frontend:9095 diff --git a/integration/e2e/serverless/serverless_test.go b/integration/e2e/serverless/serverless_test.go deleted file mode 100644 index c9c8e880d47..00000000000 --- a/integration/e2e/serverless/serverless_test.go +++ /dev/null @@ -1,169 +0,0 @@ -package serverless - -import ( - "testing" - "time" - - "github.com/grafana/tempo/integration/util" - "github.com/prometheus/prometheus/model/labels" - "github.com/stretchr/testify/require" - - "github.com/grafana/e2e" - e2e_db "github.com/grafana/e2e/db" - - "github.com/grafana/tempo/pkg/httpclient" - tempoUtil "github.com/grafana/tempo/pkg/util" - "github.com/grafana/tempo/tempodb/backend" -) - -const ( - configServerlessGCR = "config-serverless-gcr.yaml" - configServerlessLambda = "config-serverless-lambda.yaml" -) - -func TestServerless(t *testing.T) { - testClouds := []struct { - name string - serverless *e2e.HTTPService - config string - }{ - { - name: "gcr", - serverless: newTempoServerlessGCR(), - config: configServerlessGCR, - }, - { - name: "lambda", - serverless: newTempoServerlessLambda(), - config: configServerlessLambda, - }, - } - - for _, tc := range testClouds { - t.Run(tc.name, func(t *testing.T) { - s, err := e2e.NewScenario("tempo_e2e") - require.NoError(t, err) - defer s.Close() - - minio := e2e_db.NewMinio(9000, "tempo") - require.NotNil(t, minio) - require.NoError(t, s.StartAndWaitReady(minio)) - - require.NoError(t, util.CopyFileToSharedDir(s, tc.config, "config.yaml")) - tempoIngester1 := util.NewTempoIngester(1) - tempoIngester2 := util.NewTempoIngester(2) - tempoIngester3 := util.NewTempoIngester(3) - tempoDistributor := util.NewTempoDistributor() - tempoQueryFrontend := util.NewTempoQueryFrontend() - tempoQuerier := util.NewTempoQuerier() - tempoServerless := tc.serverless - require.NoError(t, s.StartAndWaitReady(tempoIngester1, tempoIngester2, tempoIngester3, tempoDistributor, tempoQueryFrontend, tempoQuerier, tempoServerless)) - - // wait for 2 active ingesters - time.Sleep(1 * time.Second) - matchers := []*labels.Matcher{ - { - Type: labels.MatchEqual, - Name: "name", - Value: "ingester", - }, - { - Type: labels.MatchEqual, - Name: "state", - Value: "ACTIVE", - }, - } - require.NoError(t, tempoDistributor.WaitSumMetricsWithOptions(e2e.Equals(3), []string{`tempo_ring_members`}, e2e.WithLabelMatchers(matchers...), e2e.WaitMissingMetrics)) - - // Get port for the Jaeger gRPC receiver endpoint - c, err := util.NewJaegerGRPCClient(tempoDistributor.Endpoint(14250)) - require.NoError(t, err) - require.NotNil(t, c) - - info := tempoUtil.NewTraceInfo(time.Now(), "") - require.NoError(t, info.EmitAllBatches(c)) - - // wait trace_idle_time and ensure trace is created in ingester - require.NoError(t, tempoIngester1.WaitSumMetricsWithOptions(e2e.Less(3), []string{"tempo_ingester_traces_created_total"}, e2e.WaitMissingMetrics)) - require.NoError(t, tempoIngester2.WaitSumMetricsWithOptions(e2e.Less(3), []string{"tempo_ingester_traces_created_total"}, e2e.WaitMissingMetrics)) - require.NoError(t, tempoIngester3.WaitSumMetricsWithOptions(e2e.Less(3), []string{"tempo_ingester_traces_created_total"}, e2e.WaitMissingMetrics)) - - features := []*labels.Matcher{ - { - Type: labels.MatchEqual, - Name: "feature", - Value: "search_external_endpoints", - }, - } - require.NoError(t, tempoDistributor.WaitSumMetricsWithOptions(e2e.Equals(1), []string{`tempo_feature_enabled`}, e2e.WithLabelMatchers(features...), e2e.WaitMissingMetrics)) - - apiClient := httpclient.New("http://"+tempoQueryFrontend.Endpoint(3200), "") - - // flush trace to backend - res, err := e2e.DoGet("http://" + tempoIngester1.Endpoint(3200) + "/flush") - require.NoError(t, err) - require.Equal(t, 204, res.StatusCode) - - res, err = e2e.DoGet("http://" + tempoIngester2.Endpoint(3200) + "/flush") - require.NoError(t, err) - require.Equal(t, 204, res.StatusCode) - - res, err = e2e.DoGet("http://" + tempoIngester3.Endpoint(3200) + "/flush") - require.NoError(t, err) - require.Equal(t, 204, res.StatusCode) - - // zzz - time.Sleep(10 * time.Second) - - // search the backend. this works b/c we're passing a start/end AND setting query ingesters within min/max to 0 - now := time.Now() - util.SearchAndAssertTraceBackend(t, apiClient, info, now.Add(-20*time.Minute).Unix(), now.Unix()) - }) - } -} - -func newTempoServerlessGCR() *e2e.HTTPService { - s := e2e.NewHTTPService( - "serverless", - "tempo-serverless", // created by Makefile in /cmd/tempo-serverless - nil, - nil, - 8080, - ) - - s.SetEnvVars(map[string]string{ - "TEMPO_S3_BUCKET": "tempo", - "TEMPO_S3_ENDPOINT": "tempo_e2e-minio-9000:9000", - "TEMPO_S3_ACCESS_KEY": e2e_db.MinioAccessKey, - "TEMPO_S3_SECRET_KEY": e2e_db.MinioSecretKey, - "TEMPO_S3_INSECURE": "true", - "TEMPO_BACKEND": backend.S3, - }) - - s.SetBackoff(util.TempoBackoff()) - - return s -} - -func newTempoServerlessLambda() *e2e.HTTPService { - s := e2e.NewHTTPService( - "serverless", - "tempo-serverless-lambda", // created by build-docker-lambda-test make target - nil, - nil, - 9000, - ) - - s.SetEnvVars(map[string]string{ - "TEMPO_S3_BUCKET": "tempo", - "TEMPO_S3_ENDPOINT": "tempo_e2e-minio-9000:9000", - "TEMPO_S3_ACCESS_KEY": e2e_db.MinioAccessKey, - "TEMPO_S3_SECRET_KEY": e2e_db.MinioSecretKey, - "TEMPO_S3_INSECURE": "true", - "TEMPO_BACKEND": backend.S3, - }) - - s.SetBackoff(util.TempoBackoff()) - - return s -} diff --git a/integration/microservices/tempo.yaml b/integration/microservices/tempo.yaml index c3ce244ba27..d4bcf6e607c 100644 --- a/integration/microservices/tempo.yaml +++ b/integration/microservices/tempo.yaml @@ -2,10 +2,13 @@ compactor: null distributor: receivers: zipkin: + endpoint: "distributor:9411" jaeger: protocols: - thrift_http: null - opencensus: null + thrift_http: + endpoint: "distributor:14268" + opencensus: + endpoint: "distributor:55678" ingester: trace_idle_period: 1s lifecycler: diff --git a/integration/util/util.go b/integration/util/util.go index cd3248b21ec..fd917f8b3ff 100644 --- a/integration/util/util.go +++ b/integration/util/util.go @@ -21,7 +21,6 @@ import ( "github.com/grafana/dskit/backoff" "github.com/grafana/e2e" - "github.com/grafana/tempo/pkg/model/trace" jaeger_grpc "github.com/jaegertracing/jaeger/cmd/agent/app/reporter/grpc" thrift "github.com/jaegertracing/jaeger/thrift-gen/jaeger" "github.com/stretchr/testify/assert" @@ -39,6 +38,7 @@ import ( "google.golang.org/grpc/credentials/insecure" "github.com/grafana/tempo/pkg/httpclient" + "github.com/grafana/tempo/pkg/model/trace" "github.com/grafana/tempo/pkg/tempopb" tempoUtil "github.com/grafana/tempo/pkg/util" ) @@ -47,7 +47,7 @@ const ( image = "tempo:latest" debugImage = "tempo-debug:latest" queryImage = "tempo-query:latest" - jaegerImage = "jaegertracing/jaeger-query:1.60" + jaegerImage = "jaegertracing/jaeger-query:1.64.0" ) // GetExtraArgs returns the extra args to pass to the Docker command used to run Tempo. @@ -348,9 +348,9 @@ func NewOtelGRPCExporter(endpoint string) (exporter.Traces, error) { }, } logger, _ := zap.NewDevelopment() - return factory.CreateTracesExporter( + return factory.CreateTraces( context.Background(), - exporter.CreateSettings{ + exporter.Settings{ TelemetrySettings: component.TelemetrySettings{ Logger: logger, TracerProvider: tnoop.NewTracerProvider(), @@ -609,7 +609,7 @@ func AssertEcho(t *testing.T, url string) { res, err := e2e.DoGet(url) require.NoError(t, err) require.Equal(t, http.StatusOK, res.StatusCode) - defer res.Body.Close() + defer func() { require.NoError(t, res.Body.Close()) }() } func QueryAndAssertTrace(t *testing.T, client *httpclient.Client, info *tempoUtil.TraceInfo) { diff --git a/modules/blockbuilder/blockbuilder.go b/modules/blockbuilder/blockbuilder.go new file mode 100644 index 00000000000..30b3bbfc371 --- /dev/null +++ b/modules/blockbuilder/blockbuilder.go @@ -0,0 +1,429 @@ +package blockbuilder + +import ( + "context" + "errors" + "fmt" + "strconv" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/grafana/dskit/backoff" + "github.com/grafana/dskit/ring" + "github.com/grafana/dskit/services" + "github.com/grafana/tempo/modules/storage" + "github.com/grafana/tempo/pkg/ingest" + "github.com/grafana/tempo/tempodb" + "github.com/grafana/tempo/tempodb/encoding" + "github.com/grafana/tempo/tempodb/wal" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/twmb/franz-go/pkg/kadm" + "github.com/twmb/franz-go/pkg/kgo" +) + +const ( + blockBuilderServiceName = "block-builder" + ConsumerGroup = "block-builder" + pollTimeout = 2 * time.Second + cutTime = 10 * time.Second +) + +var ( + metricFetchDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: "tempo", + Subsystem: "block_builder", + Name: "fetch_duration_seconds", + Help: "Time spent fetching from Kafka.", + NativeHistogramBucketFactor: 1.1, + }, []string{"partition"}) + metricFetchBytesTotal = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "tempo", + Subsystem: "block_builder", + Name: "fetch_bytes_total", + Help: "Total number of bytes fetched from Kafka", + }, []string{"partition"}) + metricFetchRecordsTotal = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "tempo", + Subsystem: "block_builder", + Name: "fetch_records_total", + Help: "Total number of records fetched from Kafka", + }, []string{"partition"}) + metricPartitionLagSeconds = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "tempo", + Subsystem: "block_builder", + Name: "partition_lag_seconds", + Help: "Lag of a partition in seconds.", + }, []string{"partition"}) + metricConsumeCycleDuration = promauto.NewHistogram(prometheus.HistogramOpts{ + Namespace: "tempo", + Subsystem: "block_builder", + Name: "consume_cycle_duration_seconds", + Help: "Time spent consuming a full cycle.", + NativeHistogramBucketFactor: 1.1, + }) + metricProcessPartitionSectionDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: "tempo", + Subsystem: "block_builder", + Name: "process_partition_section_duration_seconds", + Help: "Time spent processing one partition section.", + NativeHistogramBucketFactor: 1.1, + }, []string{"partition"}) + metricFetchErrors = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: "tempo", + Subsystem: "block_builder", + Name: "fetch_errors_total", + Help: "Total number of errors while fetching by the consumer.", + }, []string{"partition"}) +) + +type BlockBuilder struct { + services.Service + + logger log.Logger + cfg Config + + kafkaClient *kgo.Client + kadm *kadm.Client + decoder *ingest.Decoder + partitionRing ring.PartitionRingReader + + overrides Overrides + enc encoding.VersionedEncoding + wal *wal.WAL // TODO - Shared between tenants, should be per tenant? + writer tempodb.Writer +} + +func New( + cfg Config, + logger log.Logger, + partitionRing ring.PartitionRingReader, + overrides Overrides, + store storage.Store, +) *BlockBuilder { + b := &BlockBuilder{ + logger: logger, + cfg: cfg, + partitionRing: partitionRing, + decoder: ingest.NewDecoder(), + overrides: overrides, + writer: store, + } + + b.Service = services.NewBasicService(b.starting, b.running, b.stopping) + return b +} + +func (b *BlockBuilder) starting(ctx context.Context) (err error) { + level.Info(b.logger).Log("msg", "block builder starting") + + b.enc = encoding.DefaultEncoding() + if version := b.cfg.BlockConfig.BlockCfg.Version; version != "" { + b.enc, err = encoding.FromVersion(version) + if err != nil { + return fmt.Errorf("failed to create encoding: %w", err) + } + } + + b.wal, err = wal.New(&b.cfg.WAL) + if err != nil { + return fmt.Errorf("failed to create WAL: %w", err) + } + + b.kafkaClient, err = ingest.NewReaderClient( + b.cfg.IngestStorageConfig.Kafka, + ingest.NewReaderClientMetrics(blockBuilderServiceName, prometheus.DefaultRegisterer), + b.logger, + ) + if err != nil { + return fmt.Errorf("failed to create kafka reader client: %w", err) + } + + boff := backoff.New(ctx, backoff.Config{ + MinBackoff: 100 * time.Millisecond, + MaxBackoff: time.Minute, // If there is a network hiccup, we prefer to wait longer retrying, than fail the service. + MaxRetries: 10, + }) + + for boff.Ongoing() { + err := b.kafkaClient.Ping(ctx) + if err == nil { + break + } + level.Warn(b.logger).Log("msg", "ping kafka; will retry", "err", err) + boff.Wait() + } + if err := boff.ErrCause(); err != nil { + return fmt.Errorf("failed to ping kafka: %w", err) + } + + b.kadm = kadm.NewClient(b.kafkaClient) + + ingest.ExportPartitionLagMetrics( + ctx, + b.kadm, + b.logger, + b.cfg.IngestStorageConfig, + b.getAssignedActivePartitions) + + return nil +} + +func (b *BlockBuilder) running(ctx context.Context) error { + // Initial delay + waitTime := 0 * time.Second + for { + select { + case <-time.After(waitTime): + err := b.consume(ctx) + if err != nil { + level.Error(b.logger).Log("msg", "consumeCycle failed", "err", err) + } + + // Real delay on subsequent + waitTime = b.cfg.ConsumeCycleDuration + case <-ctx.Done(): + return nil + } + } +} + +func (b *BlockBuilder) consume(ctx context.Context) error { + var ( + end = time.Now() + partitions = b.getAssignedActivePartitions() + ) + + level.Info(b.logger).Log("msg", "starting consume cycle", "cycle_end", end, "active_partitions", partitions) + defer func(t time.Time) { metricConsumeCycleDuration.Observe(time.Since(t).Seconds()) }(time.Now()) + + // Clear all previous remnants + err := b.wal.Clear() + if err != nil { + return err + } + + for _, partition := range partitions { + // Consume partition while data remains. + // TODO - round-robin one consumption per partition instead to equalize catch-up time. + for { + more, err := b.consumePartition(ctx, partition, end) + if err != nil { + return err + } + + if !more { + break + } + } + } + + return nil +} + +func (b *BlockBuilder) consumePartition(ctx context.Context, partition int32, overallEnd time.Time) (more bool, err error) { + defer func(t time.Time) { + metricProcessPartitionSectionDuration.WithLabelValues(strconv.Itoa(int(partition))).Observe(time.Since(t).Seconds()) + }(time.Now()) + + var ( + dur = b.cfg.ConsumeCycleDuration + topic = b.cfg.IngestStorageConfig.Kafka.Topic + group = b.cfg.IngestStorageConfig.Kafka.ConsumerGroup + partLabel = strconv.Itoa(int(partition)) + startOffset kgo.Offset + init bool + writer *writer + lastRec *kgo.Record + nextCut time.Time + end time.Time + ) + + commits, err := b.kadm.FetchOffsetsForTopics(ctx, group, topic) + if err != nil { + return false, err + } + if err := commits.Error(); err != nil { + return false, err + } + + lastCommit, ok := commits.Lookup(topic, partition) + if ok && lastCommit.At >= 0 { + startOffset = kgo.NewOffset().At(lastCommit.At) + } else { + startOffset = kgo.NewOffset().AtStart() + } + + ends, err := b.kadm.ListEndOffsets(ctx, topic) + if err != nil { + return false, err + } + if err := ends.Error(); err != nil { + return false, err + } + lastPossibleMessage, lastPossibleMessageFound := ends.Lookup(topic, partition) + + level.Info(b.logger).Log( + "msg", "consuming partition", + "partition", partition, + "commit_offset", lastCommit.At, + "start_offset", startOffset, + ) + + // We always rewind the partition's offset to the commit offset by reassigning the partition to the client (this triggers partition assignment). + // This is so the cycle started exactly at the commit offset, and not at what was (potentially over-) consumed previously. + // In the end, we remove the partition from the client (refer to the defer below) to guarantee the client always consumes + // from one partition at a time. I.e. when this partition is consumed, we start consuming the next one. + b.kafkaClient.AddConsumePartitions(map[string]map[int32]kgo.Offset{ + topic: { + partition: startOffset, + }, + }) + defer b.kafkaClient.RemoveConsumePartitions(map[string][]int32{topic: {partition}}) + +outer: + for { + fetches := func() kgo.Fetches { + defer func(t time.Time) { metricFetchDuration.WithLabelValues(partLabel).Observe(time.Since(t).Seconds()) }(time.Now()) + ctx2, cancel := context.WithTimeout(ctx, pollTimeout) + defer cancel() + return b.kafkaClient.PollFetches(ctx2) + }() + err = fetches.Err() + if err != nil { + if errors.Is(err, context.DeadlineExceeded) { + // No more data + break + } + metricFetchErrors.WithLabelValues(partLabel).Inc() + return false, err + } + + if fetches.Empty() { + break + } + + for iter := fetches.RecordIter(); !iter.Done(); { + rec := iter.Next() + metricFetchBytesTotal.WithLabelValues(partLabel).Add(float64(len(rec.Value))) + metricFetchRecordsTotal.WithLabelValues(partLabel).Inc() + + level.Debug(b.logger).Log( + "msg", "processing record", + "partition", rec.Partition, + "offset", rec.Offset, + "timestamp", rec.Timestamp, + "len", len(rec.Value), + ) + + // Initialize on first record + if !init { + end = rec.Timestamp.Add(dur) // When block will be cut + metricPartitionLagSeconds.WithLabelValues(partLabel).Set(time.Since(rec.Timestamp).Seconds()) + writer = newPartitionSectionWriter(b.logger, uint64(partition), uint64(rec.Offset), b.cfg.BlockConfig, b.overrides, b.wal, b.enc) + nextCut = rec.Timestamp.Add(cutTime) + init = true + } + + if rec.Timestamp.After(end) { + // Cut this block but continue only if we have at least another full cycle + if overallEnd.Sub(rec.Timestamp) >= dur { + more = true + } + break outer + } + + if rec.Timestamp.After(overallEnd) { + break outer + } + + if rec.Timestamp.After(nextCut) { + // Cut before appending this trace + err = writer.cutidle(rec.Timestamp.Add(-cutTime), false) + if err != nil { + return false, err + } + nextCut = rec.Timestamp.Add(cutTime) + } + + err := b.pushTraces(rec.Timestamp, rec.Key, rec.Value, writer) + if err != nil { + return false, err + } + + lastRec = rec + + if lastPossibleMessageFound && lastRec.Offset >= lastPossibleMessage.Offset-1 { + // We reached the end so break now and avoid another poll which is expected to be empty. + break outer + } + } + } + + if lastRec == nil { + // Received no data + level.Info(b.logger).Log( + "msg", "no data", + "partition", partition, + ) + return false, nil + } + + // Cut any remaining + err = writer.cutidle(time.Time{}, true) + if err != nil { + return false, err + } + + err = writer.flush(ctx, b.writer) + if err != nil { + return false, err + } + + // TODO - Retry commit + resp, err := b.kadm.CommitOffsets(ctx, group, kadm.OffsetsFromRecords(*lastRec)) + if err != nil { + return false, err + } + if err := resp.Error(); err != nil { + return false, err + } + + level.Info(b.logger).Log( + "msg", "successfully committed offset to kafka", + "partition", partition, + "last_record", lastRec.Offset, + ) + + return more, nil +} + +func (b *BlockBuilder) stopping(err error) error { + if b.kafkaClient != nil { + b.kafkaClient.Close() + } + return err +} + +func (b *BlockBuilder) pushTraces(ts time.Time, tenantBytes, reqBytes []byte, p partitionSectionWriter) error { + req, err := b.decoder.Decode(reqBytes) + if err != nil { + return fmt.Errorf("failed to decode trace: %w", err) + } + defer b.decoder.Reset() + + return p.pushBytes(ts, string(tenantBytes), req) +} + +func (b *BlockBuilder) getAssignedActivePartitions() []int32 { + activePartitionsCount := b.partitionRing.PartitionRing().ActivePartitionsCount() + assignedActivePartitions := make([]int32, 0, activePartitionsCount) + for _, partition := range b.cfg.AssignedPartitions[b.cfg.InstanceID] { + if partition > int32(activePartitionsCount) { + break + } + assignedActivePartitions = append(assignedActivePartitions, partition) + } + return assignedActivePartitions +} diff --git a/modules/blockbuilder/blockbuilder_test.go b/modules/blockbuilder/blockbuilder_test.go new file mode 100644 index 00000000000..f09cfca100a --- /dev/null +++ b/modules/blockbuilder/blockbuilder_test.go @@ -0,0 +1,551 @@ +package blockbuilder + +import ( + "context" + "crypto/rand" + "errors" + "testing" + "time" + + "github.com/go-kit/log" + "github.com/grafana/dskit/flagext" + "github.com/grafana/dskit/ring" + "github.com/grafana/dskit/services" + "github.com/grafana/tempo/modules/storage" + "github.com/grafana/tempo/pkg/ingest" + "github.com/grafana/tempo/pkg/ingest/testkafka" + "github.com/grafana/tempo/pkg/util" + "github.com/grafana/tempo/pkg/util/test" + "github.com/grafana/tempo/tempodb" + "github.com/grafana/tempo/tempodb/backend" + "github.com/grafana/tempo/tempodb/backend/local" + "github.com/grafana/tempo/tempodb/blocklist" + "github.com/grafana/tempo/tempodb/encoding" + "github.com/grafana/tempo/tempodb/encoding/common" + "github.com/grafana/tempo/tempodb/wal" + "github.com/stretchr/testify/require" + "github.com/twmb/franz-go/pkg/kadm" + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kgo" + "github.com/twmb/franz-go/pkg/kmsg" + "go.uber.org/atomic" +) + +const ( + testTopic = "test-topic" + testConsumerGroup = "test-consumer-group" + testPartition = int32(0) +) + +// When the partition starts with no existing commit, +// the block-builder looks back to consume all available records from the start and ensures they are committed and flushed into a block. +func TestBlockbuilder_lookbackOnNoCommit(t *testing.T) { + ctx, cancel := context.WithCancelCause(context.Background()) + t.Cleanup(func() { cancel(errors.New("test done")) }) + + k, address := testkafka.CreateCluster(t, 1, testTopic) + + kafkaCommits := atomic.NewInt32(0) + k.ControlKey(kmsg.OffsetCommit, func(kmsg.Request) (kmsg.Response, error, bool) { + kafkaCommits.Inc() + return nil, nil, false + }) + + store := newStore(ctx, t) + cfg := blockbuilderConfig(t, address) + + b := New(cfg, test.NewTestingLogger(t), newPartitionRingReader(), &mockOverrides{}, store) + require.NoError(t, services.StartAndAwaitRunning(ctx, b)) + t.Cleanup(func() { + require.NoError(t, services.StopAndAwaitTerminated(ctx, b)) + }) + + client := newKafkaClient(t, cfg.IngestStorageConfig.Kafka) + producedRecords := sendReq(t, ctx, client) + + // Wait for record to be consumed and committed. + require.Eventually(t, func() bool { + return kafkaCommits.Load() > 0 + }, time.Minute, time.Second) + + // Wait for the block to be flushed. + require.Eventually(t, func() bool { + return len(store.BlockMetas(util.FakeTenantID)) == 1 && countFlushedTraces(store) == 1 + }, time.Minute, time.Second) + + // Check committed offset + requireLastCommitEquals(t, ctx, client, producedRecords[len(producedRecords)-1].Offset+1) +} + +// Starting with a pre-existing commit, +// the block-builder resumes from the last known position, consuming new records, +// and ensures all of them are properly committed and flushed into blocks. +func TestBlockbuilder_startWithCommit(t *testing.T) { + ctx, cancel := context.WithCancelCause(context.Background()) + t.Cleanup(func() { cancel(errors.New("test done")) }) + + k, address := testkafka.CreateCluster(t, 1, testTopic) + + kafkaCommits := atomic.NewInt32(0) + k.ControlKey(kmsg.OffsetCommit, func(kmsg.Request) (kmsg.Response, error, bool) { + kafkaCommits.Inc() + return nil, nil, false + }) + + store := newStore(ctx, t) + cfg := blockbuilderConfig(t, address) + + client := newKafkaClient(t, cfg.IngestStorageConfig.Kafka) + producedRecords := sendTracesFor(t, ctx, client, 5*time.Second, 100*time.Millisecond) // Send for 5 seconds + + commitedAt := len(producedRecords) / 2 + // Commit half of the records + offsets := make(kadm.Offsets) + offsets.Add(kadm.Offset{ + Topic: testTopic, + Partition: 0, + At: producedRecords[commitedAt].Offset, + }) + admClient := kadm.NewClient(client) + require.NoError(t, admClient.CommitAllOffsets(ctx, cfg.IngestStorageConfig.Kafka.ConsumerGroup, offsets)) + + b := New(cfg, test.NewTestingLogger(t), newPartitionRingReader(), &mockOverrides{}, store) + require.NoError(t, services.StartAndAwaitRunning(ctx, b)) + t.Cleanup(func() { + require.NoError(t, services.StopAndAwaitTerminated(ctx, b)) + }) + + records := sendTracesFor(t, ctx, client, 5*time.Second, 100*time.Millisecond) // Send for 5 seconds + producedRecords = append(producedRecords, records...) + + // Wait for record to be consumed and committed. + require.Eventually(t, func() bool { + return kafkaCommits.Load() > 0 + }, time.Minute, time.Second) + + // Wait for the block to be flushed. + require.Eventually(t, func() bool { + return countFlushedTraces(store) == len(producedRecords)-commitedAt + }, time.Minute, time.Second) + + // Check committed offset + requireLastCommitEquals(t, ctx, client, producedRecords[len(producedRecords)-1].Offset+1) +} + +// In case a block flush initially fails, the system retries until it succeeds. +func TestBlockbuilder_flushingFails(t *testing.T) { + ctx, cancel := context.WithCancelCause(context.Background()) + t.Cleanup(func() { cancel(errors.New("test done")) }) + + k, address := testkafka.CreateCluster(t, 1, "test-topic") + + kafkaCommits := atomic.NewInt32(0) + k.ControlKey(kmsg.OffsetCommit, func(kmsg.Request) (kmsg.Response, error, bool) { + kafkaCommits.Inc() + return nil, nil, false + }) + + storageWrites := atomic.NewInt32(0) + store := newStoreWrapper(newStore(ctx, t), func(ctx context.Context, block tempodb.WriteableBlock, store storage.Store) error { + // Fail the first block write + if storageWrites.Inc() == 1 { + return errors.New("failed to write block") + } + return store.WriteBlock(ctx, block) + }) + cfg := blockbuilderConfig(t, address) + logger := test.NewTestingLogger(t) + + client := newKafkaClient(t, cfg.IngestStorageConfig.Kafka) + producedRecords := sendTracesFor(t, ctx, client, time.Second, 100*time.Millisecond) // Send for 1 second, <1 consumption cycles + + b := New(cfg, logger, newPartitionRingReader(), &mockOverrides{}, store) + require.NoError(t, services.StartAndAwaitRunning(ctx, b)) + t.Cleanup(func() { + require.NoError(t, services.StopAndAwaitTerminated(ctx, b)) + }) + + // Wait for record to be consumed and committed. + require.Eventually(t, func() bool { return kafkaCommits.Load() >= 1 }, time.Minute, time.Second) + + // Wait for the block to be flushed. + require.Eventually(t, func() bool { + return len(store.BlockMetas(util.FakeTenantID)) >= 1 + }, time.Minute, time.Second) + + // Check committed offset + requireLastCommitEquals(t, ctx, client, producedRecords[len(producedRecords)-1].Offset+1) +} + +// Receiving records with older timestamps the block-builder processes them in the current cycle, +// ensuring they're written into a new block despite "belonging" to another cycle. +func TestBlockbuilder_receivesOldRecords(t *testing.T) { + ctx, cancel := context.WithCancelCause(context.Background()) + t.Cleanup(func() { cancel(errors.New("test done")) }) + + k, address := testkafka.CreateCluster(t, 1, "test-topic") + + kafkaCommits := atomic.NewInt32(0) + k.ControlKey(kmsg.OffsetCommit, func(kmsg.Request) (kmsg.Response, error, bool) { + kafkaCommits.Inc() + return nil, nil, false + }) + + store := newStore(ctx, t) + cfg := blockbuilderConfig(t, address) + + b := New(cfg, test.NewTestingLogger(t), newPartitionRingReader(), &mockOverrides{}, store) + require.NoError(t, services.StartAndAwaitRunning(ctx, b)) + t.Cleanup(func() { + require.NoError(t, services.StopAndAwaitTerminated(ctx, b)) + }) + + client := newKafkaClient(t, cfg.IngestStorageConfig.Kafka) + producedRecords := sendReq(t, ctx, client) + + // Wait for record to be consumed and committed. + require.Eventually(t, func() bool { + return kafkaCommits.Load() == 1 + }, time.Minute, time.Second) + + // Wait for the block to be flushed. + require.Eventually(t, func() bool { + return len(store.BlockMetas(util.FakeTenantID)) == 1 + }, time.Minute, time.Second) + + // Re-send the same records with an older timestamp + // They should be processed in the next cycle and written to a new block regardless of the timestamp + for _, record := range producedRecords { + record.Timestamp = record.Timestamp.Add(-time.Hour) + } + res := client.ProduceSync(ctx, producedRecords...) + require.NoError(t, res.FirstErr()) + + // Wait for record to be consumed and committed. + require.Eventually(t, func() bool { + return kafkaCommits.Load() == 2 + }, time.Minute, time.Second) + + // Wait for the block to be flushed. + require.Eventually(t, func() bool { + return len(store.BlockMetas(util.FakeTenantID)) == 2 + }, time.Minute, time.Second) + + // Check committed offset + requireLastCommitEquals(t, ctx, client, producedRecords[len(producedRecords)-1].Offset+1) +} + +// FIXME - Test is unstable and will fail if records cross two consumption cycles, +// +// because it's asserting that there is exactly two commits, one of which fails. +// It can be 3 commits if the records cross two consumption cycles. +// +// On encountering a commit failure, the block-builder retries the operation and eventually succeeds. +// +// This would cause two blocks to be written, one for each cycle (one cycle fails at commit, the other succeeds). +// The block-builder deterministically generates the block ID based on the cycle end timestamp, +// so the block ID for the failed cycle is the same from the block ID for the successful cycle, +// and the failed block is overwritten by the successful one. +func TestBlockbuilder_committingFails(t *testing.T) { + ctx, cancel := context.WithCancelCause(context.Background()) + t.Cleanup(func() { cancel(errors.New("test done")) }) + + k, address := testkafka.CreateCluster(t, 1, "test-topic") + + kafkaCommits := atomic.NewInt32(0) + k.ControlKey(kmsg.OffsetCommit, func(req kmsg.Request) (kmsg.Response, error, bool) { + kafkaCommits.Inc() + + if kafkaCommits.Load() == 1 { // First commit fails + res := kmsg.NewOffsetCommitResponse() + res.Version = req.GetVersion() + res.Topics = []kmsg.OffsetCommitResponseTopic{ + { + Topic: testTopic, + Partitions: []kmsg.OffsetCommitResponseTopicPartition{ + { + Partition: 0, + ErrorCode: kerr.RebalanceInProgress.Code, + }, + }, + }, + } + return &res, nil, true + } + + return nil, nil, false + }) + + store := newStore(ctx, t) + cfg := blockbuilderConfig(t, address) + logger := test.NewTestingLogger(t) + + client := newKafkaClient(t, cfg.IngestStorageConfig.Kafka) + producedRecords := sendTracesFor(t, ctx, client, time.Second, 100*time.Millisecond) // Send for 1 second, <1 consumption cycles + + b := New(cfg, logger, newPartitionRingReader(), &mockOverrides{}, store) + require.NoError(t, services.StartAndAwaitRunning(ctx, b)) + t.Cleanup(func() { + require.NoError(t, services.StopAndAwaitTerminated(ctx, b)) + }) + + // Wait for record to be consumed and committed. + require.Eventually(t, func() bool { + return kafkaCommits.Load() == 2 // First commit fails, second commit succeeds + }, time.Minute, time.Second) + + // Wait for the block to be flushed. + require.Eventually(t, func() bool { + return len(store.BlockMetas(util.FakeTenantID)) == 1 // Only one block should have been written + }, time.Minute, time.Second) + + // Check committed offset + requireLastCommitEquals(t, ctx, client, producedRecords[len(producedRecords)-1].Offset+1) +} + +func blockbuilderConfig(t testing.TB, address string) Config { + cfg := Config{} + flagext.DefaultValues(&cfg) + + flagext.DefaultValues(&cfg.BlockConfig) + + flagext.DefaultValues(&cfg.IngestStorageConfig.Kafka) + cfg.IngestStorageConfig.Kafka.Address = address + cfg.IngestStorageConfig.Kafka.Topic = testTopic + cfg.IngestStorageConfig.Kafka.ConsumerGroup = testConsumerGroup + + cfg.AssignedPartitions = map[string][]int32{cfg.InstanceID: {0}} + cfg.ConsumeCycleDuration = 5 * time.Second + + cfg.WAL.Filepath = t.TempDir() + + return cfg +} + +var _ blocklist.JobSharder = (*ownEverythingSharder)(nil) + +type ownEverythingSharder struct{} + +func (o *ownEverythingSharder) Owns(string) bool { return true } + +func newStore(ctx context.Context, t testing.TB) storage.Store { + return newStoreWithLogger(ctx, t, test.NewTestingLogger(t)) +} + +func newStoreWithLogger(ctx context.Context, t testing.TB, log log.Logger) storage.Store { + tmpDir := t.TempDir() + + s, err := storage.NewStore(storage.Config{ + Trace: tempodb.Config{ + Backend: backend.Local, + Local: &local.Config{ + Path: tmpDir, + }, + Block: &common.BlockConfig{ + IndexDownsampleBytes: 2, + BloomFP: 0.01, + BloomShardSizeBytes: 100_000, + Version: encoding.LatestEncoding().Version(), + Encoding: backend.EncLZ4_1M, + IndexPageSizeBytes: 1000, + }, + WAL: &wal.Config{ + Filepath: tmpDir, + }, + BlocklistPoll: 5 * time.Second, + }, + }, nil, log) + require.NoError(t, err) + + s.EnablePolling(ctx, &ownEverythingSharder{}) + return s +} + +var _ storage.Store = (*storeWrapper)(nil) + +type storeWrapper struct { + storage.Store + writeBlock func(ctx context.Context, block tempodb.WriteableBlock, store storage.Store) error +} + +func newStoreWrapper(s storage.Store, writeBlock func(ctx context.Context, block tempodb.WriteableBlock, store storage.Store) error) *storeWrapper { + return &storeWrapper{ + Store: s, + writeBlock: writeBlock, + } +} + +func (m *storeWrapper) WriteBlock(ctx context.Context, block tempodb.WriteableBlock) error { + if m.writeBlock != nil { + return m.writeBlock(ctx, block, m.Store) + } + return m.Store.WriteBlock(ctx, block) +} + +var _ ring.PartitionRingReader = (*mockPartitionRingReader)(nil) + +func newPartitionRingReader() *mockPartitionRingReader { + return &mockPartitionRingReader{ + r: ring.NewPartitionRing(ring.PartitionRingDesc{ + Partitions: map[int32]ring.PartitionDesc{ + 0: {State: ring.PartitionActive}, + }, + }), + } +} + +type mockPartitionRingReader struct { + r *ring.PartitionRing +} + +func (m *mockPartitionRingReader) PartitionRing() *ring.PartitionRing { + return m.r +} + +var _ Overrides = (*mockOverrides)(nil) + +type mockOverrides struct { + dc backend.DedicatedColumns +} + +func (m *mockOverrides) MaxBytesPerTrace(_ string) int { return 0 } +func (m *mockOverrides) DedicatedColumns(_ string) backend.DedicatedColumns { return m.dc } + +func newKafkaClient(t testing.TB, config ingest.KafkaConfig) *kgo.Client { + writeClient, err := kgo.NewClient( + kgo.SeedBrokers(config.Address), + kgo.AllowAutoTopicCreation(), + kgo.DefaultProduceTopic(config.Topic), + // We will choose the partition of each record. + kgo.RecordPartitioner(kgo.ManualPartitioner()), + ) + require.NoError(t, err) + t.Cleanup(writeClient.Close) + + return writeClient +} + +func countFlushedTraces(store storage.Store) int { + count := 0 + for _, meta := range store.BlockMetas(util.FakeTenantID) { + count += int(meta.TotalObjects) + } + return count +} + +// nolint: revive +func sendReq(t testing.TB, ctx context.Context, client *kgo.Client) []*kgo.Record { + traceID := generateTraceID(t) + + req := test.MakePushBytesRequest(t, 10, traceID) + records, err := ingest.Encode(0, util.FakeTenantID, req, 1_000_000) + require.NoError(t, err) + + res := client.ProduceSync(ctx, records...) + require.NoError(t, res.FirstErr()) + + return records +} + +// nolint: revive,unparam +func sendTracesFor(t *testing.T, ctx context.Context, client *kgo.Client, dur, interval time.Duration) []*kgo.Record { + ticker := time.NewTicker(interval) + defer ticker.Stop() + + timer := time.NewTimer(dur) + defer timer.Stop() + + producedRecords := make([]*kgo.Record, 0) + + for { + select { + case <-ctx.Done(): // Exit the function if the context is done + return producedRecords + case <-timer.C: // Exit the function when the timer is done + return producedRecords + case <-ticker.C: + records := sendReq(t, ctx, client) + producedRecords = append(producedRecords, records...) + } + } +} + +func generateTraceID(t testing.TB) []byte { + traceID := make([]byte, 16) + _, err := rand.Read(traceID) + require.NoError(t, err) + return traceID +} + +// nolint: revive +func requireLastCommitEquals(t testing.TB, ctx context.Context, client *kgo.Client, expectedOffset int64) { + offsets, err := kadm.NewClient(client).FetchOffsetsForTopics(ctx, testConsumerGroup, testTopic) + require.NoError(t, err) + offset, ok := offsets.Lookup(testTopic, testPartition) + require.True(t, ok) + require.Equal(t, expectedOffset, offset.At) +} + +func BenchmarkBlockBuilder(b *testing.B) { + var ( + ctx = context.Background() + logger = log.NewNopLogger() + _, address = testkafka.CreateCluster(b, 1, testTopic) + store = newStoreWithLogger(ctx, b, logger) + cfg = blockbuilderConfig(b, address) + client = newKafkaClient(b, cfg.IngestStorageConfig.Kafka) + o = &mockOverrides{ + dc: backend.DedicatedColumns{ + backend.DedicatedColumn{Scope: backend.DedicatedColumnScopeResource, Name: "res0", Type: backend.DedicatedColumnTypeString}, + backend.DedicatedColumn{Scope: backend.DedicatedColumnScopeResource, Name: "res1", Type: backend.DedicatedColumnTypeString}, + backend.DedicatedColumn{Scope: backend.DedicatedColumnScopeResource, Name: "res2", Type: backend.DedicatedColumnTypeString}, + backend.DedicatedColumn{Scope: backend.DedicatedColumnScopeResource, Name: "res3", Type: backend.DedicatedColumnTypeString}, + backend.DedicatedColumn{Scope: backend.DedicatedColumnScopeResource, Name: "res4", Type: backend.DedicatedColumnTypeString}, + backend.DedicatedColumn{Scope: backend.DedicatedColumnScopeResource, Name: "res5", Type: backend.DedicatedColumnTypeString}, + backend.DedicatedColumn{Scope: backend.DedicatedColumnScopeResource, Name: "res6", Type: backend.DedicatedColumnTypeString}, + backend.DedicatedColumn{Scope: backend.DedicatedColumnScopeResource, Name: "res7", Type: backend.DedicatedColumnTypeString}, + backend.DedicatedColumn{Scope: backend.DedicatedColumnScopeResource, Name: "res8", Type: backend.DedicatedColumnTypeString}, + backend.DedicatedColumn{Scope: backend.DedicatedColumnScopeResource, Name: "res9", Type: backend.DedicatedColumnTypeString}, + backend.DedicatedColumn{Scope: backend.DedicatedColumnScopeSpan, Name: "span0", Type: backend.DedicatedColumnTypeString}, + backend.DedicatedColumn{Scope: backend.DedicatedColumnScopeSpan, Name: "span1", Type: backend.DedicatedColumnTypeString}, + backend.DedicatedColumn{Scope: backend.DedicatedColumnScopeSpan, Name: "span2", Type: backend.DedicatedColumnTypeString}, + backend.DedicatedColumn{Scope: backend.DedicatedColumnScopeSpan, Name: "span3", Type: backend.DedicatedColumnTypeString}, + backend.DedicatedColumn{Scope: backend.DedicatedColumnScopeSpan, Name: "span4", Type: backend.DedicatedColumnTypeString}, + backend.DedicatedColumn{Scope: backend.DedicatedColumnScopeSpan, Name: "span5", Type: backend.DedicatedColumnTypeString}, + backend.DedicatedColumn{Scope: backend.DedicatedColumnScopeSpan, Name: "span6", Type: backend.DedicatedColumnTypeString}, + backend.DedicatedColumn{Scope: backend.DedicatedColumnScopeSpan, Name: "span7", Type: backend.DedicatedColumnTypeString}, + backend.DedicatedColumn{Scope: backend.DedicatedColumnScopeSpan, Name: "span8", Type: backend.DedicatedColumnTypeString}, + backend.DedicatedColumn{Scope: backend.DedicatedColumnScopeSpan, Name: "span9", Type: backend.DedicatedColumnTypeString}, + }, + } + ) + + cfg.ConsumeCycleDuration = 1 * time.Hour + + bb := New(cfg, logger, newPartitionRingReader(), o, store) + defer func() { require.NoError(b, bb.stopping(nil)) }() + + // Startup (without starting the background consume cycle) + err := bb.starting(ctx) + require.NoError(b, err) + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + + var records []*kgo.Record + for i := 0; i < 1000; i++ { + records = append(records, sendReq(b, ctx, client)...) + } + + var size int + for _, r := range records { + size += len(r.Value) + } + + err = bb.consume(ctx) + require.NoError(b, err) + + b.SetBytes(int64(size)) + } +} diff --git a/modules/blockbuilder/config.go b/modules/blockbuilder/config.go new file mode 100644 index 00000000000..0fa9d6209d4 --- /dev/null +++ b/modules/blockbuilder/config.go @@ -0,0 +1,108 @@ +package blockbuilder + +import ( + "encoding/json" + "flag" + "fmt" + "os" + "time" + + "github.com/go-kit/log/level" + "github.com/grafana/tempo/pkg/ingest" + "github.com/grafana/tempo/pkg/util/log" + "github.com/grafana/tempo/tempodb/encoding" + "github.com/grafana/tempo/tempodb/encoding/common" + "github.com/grafana/tempo/tempodb/wal" +) + +type BlockConfig struct { + MaxBlockBytes uint64 `yaml:"max_block_bytes" doc:"Maximum size of a block."` + + BlockCfg common.BlockConfig `yaml:"-,inline"` +} + +func (c *BlockConfig) RegisterFlags(f *flag.FlagSet) { + c.RegisterFlagsAndApplyDefaults("", f) +} + +func (c *BlockConfig) RegisterFlagsAndApplyDefaults(prefix string, f *flag.FlagSet) { + f.Uint64Var(&c.MaxBlockBytes, prefix+".max-block-bytes", 20*1024*1024, "Maximum size of a block.") // TODO - Review default + + c.BlockCfg.Version = encoding.DefaultEncoding().Version() + c.BlockCfg.RegisterFlagsAndApplyDefaults(prefix, f) +} + +type Config struct { + InstanceID string `yaml:"instance_id" doc:"Instance id."` + AssignedPartitions map[string][]int32 `yaml:"assigned_partitions" doc:"List of partitions assigned to this block builder."` + ConsumeCycleDuration time.Duration `yaml:"consume_cycle_duration" doc:"Interval between consumption cycles."` + + BlockConfig BlockConfig `yaml:"block" doc:"Configuration for the block builder."` + WAL wal.Config `yaml:"wal" doc:"Configuration for the write ahead log."` + + // This config is dynamically injected because defined outside the ingester config. + IngestStorageConfig ingest.Config `yaml:"-"` +} + +func (c *Config) Validate() error { + if c.BlockConfig.BlockCfg.Version != c.WAL.Version { + c.WAL.Version = c.BlockConfig.BlockCfg.Version + } + + if err := common.ValidateConfig(&c.BlockConfig.BlockCfg); err != nil { + return fmt.Errorf("block config validation failed: %w", err) + } + + if err := c.WAL.Validate(); err != nil { + return fmt.Errorf("wal config validation failed: %w", err) + } + + return nil +} + +func (c *Config) RegisterFlags(f *flag.FlagSet) { + c.RegisterFlagsAndApplyDefaults("", f) +} + +func (c *Config) RegisterFlagsAndApplyDefaults(prefix string, f *flag.FlagSet) { + hostname, err := os.Hostname() + if err != nil { + level.Error(log.Logger).Log("msg", "failed to get hostname", "err", err) + os.Exit(1) + } + + f.StringVar(&c.InstanceID, "block-builder.instance-id", hostname, "Instance id.") + f.Var(newPartitionAssignmentVar(&c.AssignedPartitions), prefix+".assigned-partitions", "List of partitions assigned to this block builder.") + f.DurationVar(&c.ConsumeCycleDuration, prefix+".consume-cycle-duration", 5*time.Minute, "Interval between consumption cycles.") + + c.BlockConfig.RegisterFlagsAndApplyDefaults(prefix+".block", f) + c.WAL.RegisterFlags(f) + c.WAL.Version = c.BlockConfig.BlockCfg.Version + f.StringVar(&c.WAL.Filepath, prefix+".wal.path", "/var/tempo/block-builder/traces", "Path at which store WAL blocks.") +} + +type partitionAssignmentVar struct { + p *map[string][]int32 +} + +func newPartitionAssignmentVar(p *map[string][]int32) *partitionAssignmentVar { + return &partitionAssignmentVar{p} +} + +func (p *partitionAssignmentVar) Set(s string) error { + if s == "" { + return nil + } + + val := make(map[string][]int32) + if err := json.Unmarshal([]byte(s), &val); err != nil { + return err + } + *p.p = val + + return nil +} + +func (p *partitionAssignmentVar) String() string { + return fmt.Sprintf("%v", *p.p) +} diff --git a/modules/blockbuilder/config_test.go b/modules/blockbuilder/config_test.go new file mode 100644 index 00000000000..c37a88b5c5e --- /dev/null +++ b/modules/blockbuilder/config_test.go @@ -0,0 +1,64 @@ +package blockbuilder + +import ( + "testing" + + "github.com/grafana/tempo/tempodb/backend" + "github.com/grafana/tempo/tempodb/encoding" + "github.com/grafana/tempo/tempodb/encoding/common" + v2 "github.com/grafana/tempo/tempodb/encoding/v2" + "github.com/grafana/tempo/tempodb/encoding/vparquet4" + "github.com/grafana/tempo/tempodb/wal" + "github.com/stretchr/testify/assert" +) + +func TestConfig_validate(t *testing.T) { + tests := []struct { + name string + cfg Config + expectedErr bool + }{ + { + name: "ValidConfig", + cfg: Config{ + BlockConfig: BlockConfig{ + BlockCfg: common.BlockConfig{ + Version: encoding.LatestEncoding().Version(), + IndexDownsampleBytes: 1, + IndexPageSizeBytes: 1, + BloomFP: 0.1, + BloomShardSizeBytes: 1, + DedicatedColumns: backend.DedicatedColumns{ + {Scope: backend.DedicatedColumnScopeResource, Name: "foo", Type: backend.DedicatedColumnTypeString}, + }, + }, + }, + WAL: wal.Config{ + Version: encoding.LatestEncoding().Version(), + }, + }, + expectedErr: false, + }, + { + name: "InvalidBlockConfig", + cfg: Config{ + BlockConfig: BlockConfig{ + BlockCfg: common.BlockConfig{ + Version: vparquet4.VersionString, + IndexDownsampleBytes: 0, + }, + }, + WAL: wal.Config{ + Version: v2.VersionString, + }, + }, + expectedErr: true, + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + err := tc.cfg.Validate() + assert.Equal(t, tc.expectedErr, err != nil, "unexpected error: %v", err) + }) + } +} diff --git a/modules/blockbuilder/partition_writer.go b/modules/blockbuilder/partition_writer.go new file mode 100644 index 00000000000..e846394d6fe --- /dev/null +++ b/modules/blockbuilder/partition_writer.go @@ -0,0 +1,123 @@ +package blockbuilder + +import ( + "context" + "strings" + "sync" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/grafana/tempo/pkg/tempopb" + "github.com/grafana/tempo/pkg/util" + "github.com/grafana/tempo/tempodb" + "github.com/grafana/tempo/tempodb/encoding" + "github.com/grafana/tempo/tempodb/wal" +) + +type partitionSectionWriter interface { + pushBytes(ts time.Time, tenant string, req *tempopb.PushBytesRequest) error + flush(ctx context.Context, store tempodb.Writer) error +} + +type writer struct { + logger log.Logger + + blockCfg BlockConfig + partition, cycleEndTs uint64 + + overrides Overrides + wal *wal.WAL + enc encoding.VersionedEncoding + + mtx sync.Mutex + m map[string]*tenantStore +} + +func newPartitionSectionWriter(logger log.Logger, partition, cycleEndTs uint64, blockCfg BlockConfig, overrides Overrides, wal *wal.WAL, enc encoding.VersionedEncoding) *writer { + return &writer{ + logger: logger, + partition: partition, + cycleEndTs: cycleEndTs, + blockCfg: blockCfg, + overrides: overrides, + wal: wal, + enc: enc, + mtx: sync.Mutex{}, + m: make(map[string]*tenantStore), + } +} + +func (p *writer) pushBytes(ts time.Time, tenant string, req *tempopb.PushBytesRequest) error { + level.Debug(p.logger).Log( + "msg", "pushing bytes", + "tenant", tenant, + "num_traces", len(req.Traces), + "id", idsToString(req.Ids), + ) + + i, err := p.instanceForTenant(tenant) + if err != nil { + return err + } + + for j, trace := range req.Traces { + if err := i.AppendTrace(req.Ids[j], trace.Slice, ts); err != nil { + return err + } + } + + return nil +} + +func (p *writer) cutidle(since time.Time, immediate bool) error { + for _, i := range p.m { + if err := i.CutIdle(since, immediate); err != nil { + return err + } + } + return nil +} + +func (p *writer) flush(ctx context.Context, store tempodb.Writer) error { + // TODO - Retry with backoff? + for _, i := range p.m { + level.Info(p.logger).Log("msg", "flushing tenant", "tenant", i.tenantID) + if err := i.Flush(ctx, store); err != nil { + return err + } + } + return nil +} + +func (p *writer) instanceForTenant(tenant string) (*tenantStore, error) { + p.mtx.Lock() + defer p.mtx.Unlock() + + if i, ok := p.m[tenant]; ok { + return i, nil + } + + i, err := newTenantStore(tenant, p.partition, p.cycleEndTs, p.blockCfg, p.logger, p.wal, p.enc, p.overrides) + if err != nil { + return nil, err + } + + p.m[tenant] = i + + return i, nil +} + +func idsToString(ids [][]byte) string { + b := strings.Builder{} + b.WriteString("[") + for i, id := range ids { + if i > 0 { + b.WriteString(", ") + } + b.WriteString(util.TraceIDToHexString(id)) + } + b.WriteString("]") + + return b.String() +} diff --git a/modules/blockbuilder/tenant_store.go b/modules/blockbuilder/tenant_store.go new file mode 100644 index 00000000000..258d386ea83 --- /dev/null +++ b/modules/blockbuilder/tenant_store.go @@ -0,0 +1,330 @@ +package blockbuilder + +import ( + "bytes" + "context" + "slices" + "sync" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/google/uuid" + "github.com/grafana/tempo/modules/blockbuilder/util" + "github.com/grafana/tempo/modules/overrides" + "github.com/grafana/tempo/pkg/boundedwaitgroup" + "github.com/grafana/tempo/pkg/livetraces" + "github.com/grafana/tempo/pkg/model" + "github.com/grafana/tempo/pkg/tempopb" + "github.com/grafana/tempo/pkg/tracesizes" + "github.com/grafana/tempo/tempodb" + "github.com/grafana/tempo/tempodb/backend" + "github.com/grafana/tempo/tempodb/encoding" + "github.com/grafana/tempo/tempodb/encoding/common" + "github.com/grafana/tempo/tempodb/wal" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "go.uber.org/atomic" +) + +var metricBlockBuilderFlushedBlocks = promauto.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "tempo", + Subsystem: "block_builder", + Name: "flushed_blocks", + }, []string{"tenant"}, +) + +const ( + reasonTraceTooLarge = "trace_too_large" + flushConcurrency = 4 +) + +// TODO - This needs locking +type tenantStore struct { + tenantID string + idGenerator util.IDGenerator + + cfg BlockConfig + logger log.Logger + overrides Overrides + enc encoding.VersionedEncoding + + wal *wal.WAL + + headBlockMtx sync.Mutex + headBlock common.WALBlock + + blocksMtx sync.Mutex + walBlocks []common.WALBlock + + liveTraces *livetraces.LiveTraces[[]byte] + traceSizes *tracesizes.Tracker +} + +func newTenantStore(tenantID string, partitionID, endTimestamp uint64, cfg BlockConfig, logger log.Logger, wal *wal.WAL, enc encoding.VersionedEncoding, o Overrides) (*tenantStore, error) { + s := &tenantStore{ + tenantID: tenantID, + idGenerator: util.NewDeterministicIDGenerator(tenantID, partitionID, endTimestamp), + cfg: cfg, + logger: logger, + overrides: o, + wal: wal, + headBlockMtx: sync.Mutex{}, + blocksMtx: sync.Mutex{}, + enc: enc, + liveTraces: livetraces.New[[]byte](func(b []byte) uint64 { return uint64(len(b)) }), + traceSizes: tracesizes.New(), + } + + return s, s.resetHeadBlock() +} + +// TODO - periodically flush +func (s *tenantStore) cutHeadBlock(immediate bool) error { + s.headBlockMtx.Lock() + defer s.headBlockMtx.Unlock() + + dataLen := s.headBlock.DataLength() + + if s.headBlock == nil || dataLen == 0 { + return nil + } + + if !immediate && dataLen < s.cfg.MaxBlockBytes { + return nil + } + + s.blocksMtx.Lock() + defer s.blocksMtx.Unlock() + + if err := s.headBlock.Flush(); err != nil { + return err + } + s.walBlocks = append(s.walBlocks, s.headBlock) + s.headBlock = nil + + return s.resetHeadBlock() +} + +func (s *tenantStore) resetHeadBlock() error { + meta := &backend.BlockMeta{ + BlockID: s.idGenerator.NewID(), + TenantID: s.tenantID, + DedicatedColumns: s.overrides.DedicatedColumns(s.tenantID), + ReplicationFactor: backend.MetricsGeneratorReplicationFactor, + } + block, err := s.wal.NewBlock(meta, model.CurrentEncoding) + if err != nil { + return err + } + s.headBlock = block + return nil +} + +func (s *tenantStore) AppendTrace(traceID []byte, tr []byte, ts time.Time) error { + maxSz := s.overrides.MaxBytesPerTrace(s.tenantID) + + if maxSz > 0 && !s.traceSizes.Allow(traceID, len(tr), maxSz) { + // Record dropped spans due to trace too large + // We have to unmarhal to count the number of spans. + // TODO - There might be a better way + t := &tempopb.Trace{} + if err := t.Unmarshal(tr); err != nil { + return err + } + count := 0 + for _, b := range t.ResourceSpans { + for _, ss := range b.ScopeSpans { + count += len(ss.Spans) + } + } + overrides.RecordDiscardedSpans(count, reasonTraceTooLarge, s.tenantID) + return nil + } + + s.liveTraces.PushWithTimestamp(ts, traceID, tr, 0) + + return nil +} + +func (s *tenantStore) CutIdle(since time.Time, immediate bool) error { + idle := s.liveTraces.CutIdle(since, immediate) + + slices.SortFunc(idle, func(a, b *livetraces.LiveTrace[[]byte]) int { + return bytes.Compare(a.ID, b.ID) + }) + + var ( + unmarshalWg = sync.WaitGroup{} + unmarshalErr = atomic.NewError(nil) + unmarshaled = make([]*tempopb.Trace, len(idle)) + starts = make([]uint32, len(idle)) + ends = make([]uint32, len(idle)) + ) + + // Unmarshal and process in parallel, each goroutine handles 1/Nth + for i := 0; i < len(idle) && i < flushConcurrency; i++ { + unmarshalWg.Add(1) + go func(i int) { + defer unmarshalWg.Done() + + for j := i; j < len(idle); j += flushConcurrency { + tr := new(tempopb.Trace) + + for _, b := range idle[j].Batches { + // This unmarshal appends the batches onto the existing tempopb.Trace + // so we don't need to allocate another container temporarily + err := tr.Unmarshal(b) + if err != nil { + unmarshalErr.Store(err) + return + } + } + + // Get trace timestamp bounds + var start, end uint64 + for _, b := range tr.ResourceSpans { + for _, ss := range b.ScopeSpans { + for _, s := range ss.Spans { + if start == 0 || s.StartTimeUnixNano < start { + start = s.StartTimeUnixNano + } + if s.EndTimeUnixNano > end { + end = s.EndTimeUnixNano + } + } + } + } + + // Convert from unix nanos to unix seconds + starts[j] = uint32(start / uint64(time.Second)) + ends[j] = uint32(end / uint64(time.Second)) + unmarshaled[j] = tr + } + }(i) + } + + unmarshalWg.Wait() + if err := unmarshalErr.Load(); err != nil { + return err + } + + for i, tr := range unmarshaled { + if err := s.headBlock.AppendTrace(idle[i].ID, tr, starts[i], ends[i]); err != nil { + return err + } + } + + // Return prealloc slices to the pool + for _, i := range idle { + tempopb.ReuseByteSlices(i.Batches) + } + + err := s.headBlock.Flush() + if err != nil { + return err + } + + // Cut head block if needed + return s.cutHeadBlock(false) +} + +func (s *tenantStore) Flush(ctx context.Context, store tempodb.Writer) error { + // TODO - Advance some of this work if possible + + // Cut head block + if err := s.cutHeadBlock(true); err != nil { + return err + } + + s.blocksMtx.Lock() + defer s.blocksMtx.Unlock() + + var ( + completeBlocks = make([]tempodb.WriteableBlock, len(s.walBlocks)) + jobErr = atomic.NewError(nil) + wg = boundedwaitgroup.New(flushConcurrency) + ) + + // Convert WALs to backend blocks + for i, block := range s.walBlocks { + wg.Add(1) + go func(i int, block common.WALBlock) { + defer wg.Done() + + completeBlock, err := s.buildWriteableBlock(ctx, block) + if err != nil { + jobErr.Store(err) + return + } + + err = block.Clear() + if err != nil { + jobErr.Store(err) + return + } + + completeBlocks[i] = completeBlock + }(i, block) + } + + wg.Wait() + if err := jobErr.Load(); err != nil { + return err + } + + // Write all blocks to the store + level.Info(s.logger).Log("msg", "writing blocks to storage", "num_blocks", len(completeBlocks)) + for _, block := range completeBlocks { + wg.Add(1) + go func(block tempodb.WriteableBlock) { + defer wg.Done() + level.Info(s.logger).Log("msg", "writing block to storage", "block_id", block.BlockMeta().BlockID.String()) + if err := store.WriteBlock(ctx, block); err != nil { + jobErr.Store(err) + return + } + + metricBlockBuilderFlushedBlocks.WithLabelValues(s.tenantID).Inc() + + if err := s.wal.LocalBackend().ClearBlock((uuid.UUID)(block.BlockMeta().BlockID), s.tenantID); err != nil { + jobErr.Store(err) + } + }(block) + } + + wg.Wait() + if err := jobErr.Load(); err != nil { + return err + } + + // Clear the blocks + s.walBlocks = s.walBlocks[:0] + + return nil +} + +func (s *tenantStore) buildWriteableBlock(ctx context.Context, b common.WALBlock) (tempodb.WriteableBlock, error) { + level.Debug(s.logger).Log("msg", "building writeable block", "block_id", b.BlockMeta().BlockID.String()) + + iter, err := b.Iterator() + if err != nil { + return nil, err + } + defer iter.Close() + + reader, writer := backend.NewReader(s.wal.LocalBackend()), backend.NewWriter(s.wal.LocalBackend()) + + newMeta, err := s.enc.CreateBlock(ctx, &s.cfg.BlockCfg, b.BlockMeta(), iter, reader, writer) + if err != nil { + return nil, err + } + + newBlock, err := s.enc.OpenBlock(newMeta, reader) + if err != nil { + return nil, err + } + + return NewWriteableBlock(newBlock, reader, writer), nil +} diff --git a/modules/blockbuilder/util/id.go b/modules/blockbuilder/util/id.go new file mode 100644 index 00000000000..f9a556a1292 --- /dev/null +++ b/modules/blockbuilder/util/id.go @@ -0,0 +1,60 @@ +package util + +import ( + "crypto/sha1" + "encoding/binary" + "hash" + + "github.com/google/uuid" + "github.com/grafana/tempo/tempodb/backend" + "go.uber.org/atomic" +) + +const ( + sha1Version5 = 5 +) + +var ns = uuid.MustParse("28840903-6eb5-4ffb-8880-93a4fa98dbcb") // Random UUID + +type IDGenerator interface { + NewID() backend.UUID +} + +var _ IDGenerator = (*DeterministicIDGenerator)(nil) + +type DeterministicIDGenerator struct { + buf []byte + seq *atomic.Uint64 + hash hash.Hash +} + +func NewDeterministicIDGenerator(tenantID string, seeds ...uint64) *DeterministicIDGenerator { + return &DeterministicIDGenerator{ + buf: newBuf([]byte(tenantID), seeds), + seq: atomic.NewUint64(0), + hash: sha1.New(), + } +} + +func newBuf(tenantID []byte, seeds []uint64) []byte { + dl, sl := len(tenantID), len(seeds) + data := make([]byte, dl+sl*8+8) // tenantID bytes + 8 bytes per uint64 + 8 bytes for seq + copy(data, tenantID) + + for i, seed := range seeds { + binary.LittleEndian.PutUint64(data[dl+i*8:], seed) + } + + return data +} + +func (d *DeterministicIDGenerator) NewID() backend.UUID { + return backend.UUID(newDeterministicID(d.hash, d.buf, d.seq.Inc())) +} + +func newDeterministicID(hash hash.Hash, data []byte, seq uint64) uuid.UUID { + // update last 8 bytes of data with seq + binary.LittleEndian.PutUint64(data[len(data)-8:], seq) + + return uuid.NewHash(hash, ns, data, sha1Version5) +} diff --git a/modules/blockbuilder/util/id_test.go b/modules/blockbuilder/util/id_test.go new file mode 100644 index 00000000000..ad59872af71 --- /dev/null +++ b/modules/blockbuilder/util/id_test.go @@ -0,0 +1,82 @@ +package util + +import ( + "testing" + "time" + + "github.com/google/uuid" + "github.com/grafana/tempo/pkg/util" + "github.com/grafana/tempo/tempodb/backend" + "github.com/stretchr/testify/assert" +) + +func TestDeterministicIDGenerator(t *testing.T) { + ts := time.Now().UnixMilli() + + gen := NewDeterministicIDGenerator(util.FakeTenantID, 0, uint64(ts)) + + firstPassIDs := make(map[backend.UUID]struct{}) + for seq := int64(0); seq < 10; seq++ { + id := gen.NewID() + firstPassIDs[id] = struct{}{} + } + + // Verify that that UUIDs are valid + for id := range firstPassIDs { + _, err := uuid.Parse(id.String()) + assert.NoError(t, err) + } + + gen = NewDeterministicIDGenerator(util.FakeTenantID, 0, uint64(ts)) + for seq := int64(0); seq < 10; seq++ { + id := gen.NewID() + if _, ok := firstPassIDs[id]; !ok { + t.Errorf("ID %s not found in first pass IDs", id) + } + } +} + +func TestDeterministicIDGeneratorWithDifferentTenants(t *testing.T) { + ts := time.Now().UnixMilli() + seed := uint64(42) + + gen1 := NewDeterministicIDGenerator("tenant-1", seed, uint64(ts)) + gen2 := NewDeterministicIDGenerator("tenant-2", seed, uint64(ts)) + + for i := 0; i < 10; i++ { + assert.NotEqualf(t, gen1.NewID(), gen2.NewID(), "IDs should be different") + } +} + +func FuzzDeterministicIDGenerator(f *testing.F) { + f.Skip() + + f.Add(util.FakeTenantID, uint64(42), uint64(100)) + f.Fuzz(func(t *testing.T, tenantID string, seed1, seed2 uint64) { + gen := NewDeterministicIDGenerator(tenantID, seed1, seed2) + + for i := 0; i < 3; i++ { + id := gen.NewID() + _, err := uuid.Parse(id.String()) + if err != nil { + t.Fatalf("failed to parse UUID: %v", err) + } + } + }) +} + +func BenchmarkDeterministicID(b *testing.B) { + tenant := util.FakeTenantID + ts := time.Now().UnixMilli() + partitionID := uint64(0) + gen := NewDeterministicIDGenerator(tenant, partitionID, uint64(ts)) + for i := 0; i < b.N; i++ { + _ = gen.NewID() + } +} + +func BenchmarkNewID(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = uuid.New() + } +} diff --git a/modules/blockbuilder/writeable_block.go b/modules/blockbuilder/writeable_block.go new file mode 100644 index 00000000000..361d163fcb2 --- /dev/null +++ b/modules/blockbuilder/writeable_block.go @@ -0,0 +1,71 @@ +package blockbuilder + +import ( + "context" + "fmt" + "time" + + "github.com/google/uuid" + "github.com/grafana/tempo/tempodb" + "github.com/grafana/tempo/tempodb/backend" + "github.com/grafana/tempo/tempodb/encoding" + "github.com/grafana/tempo/tempodb/encoding/common" + "go.uber.org/atomic" +) + +// Overrides is just the set of overrides needed here. +type Overrides interface { + MaxBytesPerTrace(string) int + DedicatedColumns(string) backend.DedicatedColumns +} + +const nameFlushed = "flushed" + +var _ tempodb.WriteableBlock = (*WriteableBlock)(nil) + +// WriteableBlock is a trimmed down version of ingester.LocalBlock +type WriteableBlock struct { + backendBlock common.BackendBlock + + reader backend.Reader + writer backend.Writer + + flushedTime atomic.Int64 // protecting flushedTime b/c it's accessed from the store on flush and from the ingester instance checking flush time +} + +func NewWriteableBlock(backendBlock common.BackendBlock, r backend.Reader, w backend.Writer) tempodb.WriteableBlock { + return &WriteableBlock{ + backendBlock: backendBlock, + reader: r, + writer: w, + } +} + +func (b *WriteableBlock) BlockMeta() *backend.BlockMeta { return b.backendBlock.BlockMeta() } + +func (b *WriteableBlock) Write(ctx context.Context, w backend.Writer) error { + err := encoding.CopyBlock(ctx, b.BlockMeta(), b.reader, w) + if err != nil { + return fmt.Errorf("error copying block from local to remote backend: %w", err) + } + + err = b.setFlushed(ctx) + return err +} + +// TODO - Necessary? +func (b *WriteableBlock) setFlushed(ctx context.Context) error { + flushedTime := time.Now() + flushedBytes, err := flushedTime.MarshalText() + if err != nil { + return fmt.Errorf("error marshalling flush time to text: %w", err) + } + + err = b.writer.Write(ctx, nameFlushed, (uuid.UUID)(b.BlockMeta().BlockID), b.BlockMeta().TenantID, flushedBytes, nil) + if err != nil { + return fmt.Errorf("error writing ingester block flushed file: %w", err) + } + + b.flushedTime.Store(flushedTime.Unix()) + return nil +} diff --git a/modules/distributor/config.go b/modules/distributor/config.go index 14bb02fa41b..2a2cb988d1b 100644 --- a/modules/distributor/config.go +++ b/modules/distributor/config.go @@ -6,6 +6,7 @@ import ( "github.com/grafana/dskit/flagext" ring_client "github.com/grafana/dskit/ring/client" + "github.com/grafana/tempo/pkg/ingest" "github.com/grafana/tempo/modules/distributor/forwarder" "github.com/grafana/tempo/modules/distributor/usage" @@ -41,6 +42,10 @@ type Config struct { Forwarders forwarder.ConfigList `yaml:"forwarders"` Usage usage.Config `yaml:"usage,omitempty"` + // Kafka + KafkaWritePathEnabled bool `yaml:"kafka_write_path_enabled"` + KafkaConfig ingest.KafkaConfig `yaml:"kafka_config"` + // disables write extension with inactive ingesters. Use this along with ingester.lifecycler.unregister_on_shutdown = true // note that setting these two config values reduces tolerance to failures on rollout b/c there is always one guaranteed to be failing replica ExtendWrites bool `yaml:"extend_writes"` @@ -51,6 +56,8 @@ type Config struct { // For testing. factory ring_client.PoolAddrFunc `yaml:"-"` + + MaxSpanAttrByte int `yaml:"max_span_attr_byte"` } type LogSpansConfig struct { @@ -74,6 +81,8 @@ func (cfg *Config) RegisterFlagsAndApplyDefaults(prefix string, f *flag.FlagSet) cfg.OverrideRingKey = distributorRingKey cfg.ExtendWrites = true + cfg.MaxSpanAttrByte = 2048 // 2KB + f.BoolVar(&cfg.LogReceivedSpans.Enabled, util.PrefixConfig(prefix, "log-received-spans.enabled"), false, "Enable to log every received span to help debug ingestion or calculate span error distributions using the logs.") f.BoolVar(&cfg.LogReceivedSpans.IncludeAllAttributes, util.PrefixConfig(prefix, "log-received-spans.include-attributes"), false, "Enable to include span attributes in the logs.") f.BoolVar(&cfg.LogReceivedSpans.FilterByStatusError, util.PrefixConfig(prefix, "log-received-spans.filter-by-status-error"), false, "Enable to filter out spans without status error.") @@ -84,3 +93,13 @@ func (cfg *Config) RegisterFlagsAndApplyDefaults(prefix string, f *flag.FlagSet) cfg.Usage.RegisterFlagsAndApplyDefaults(prefix, f) } + +func (cfg *Config) Validate() error { + if cfg.KafkaWritePathEnabled { + if err := cfg.KafkaConfig.Validate(); err != nil { + return err + } + } + + return nil +} diff --git a/modules/distributor/distributor.go b/modules/distributor/distributor.go index d093ad0ae47..6935bd54b7f 100644 --- a/modules/distributor/distributor.go +++ b/modules/distributor/distributor.go @@ -6,11 +6,13 @@ import ( "fmt" "math" "net/http" + "strconv" "sync" "time" "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/gogo/protobuf/proto" "github.com/gogo/status" "github.com/grafana/dskit/limiter" dslog "github.com/grafana/dskit/log" @@ -18,10 +20,12 @@ import ( ring_client "github.com/grafana/dskit/ring/client" "github.com/grafana/dskit/services" "github.com/grafana/dskit/user" + "github.com/grafana/tempo/pkg/ingest" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/prometheus/util/strutil" "github.com/segmentio/fasthash/fnv1a" + "github.com/twmb/franz-go/pkg/kgo" "go.opentelemetry.io/collector/pdata/ptrace" "go.opentelemetry.io/otel" "google.golang.org/grpc/codes" @@ -35,6 +39,7 @@ import ( "github.com/grafana/tempo/modules/overrides" "github.com/grafana/tempo/pkg/model" "github.com/grafana/tempo/pkg/tempopb" + v1_common "github.com/grafana/tempo/pkg/tempopb/common/v1" v1 "github.com/grafana/tempo/pkg/tempopb/trace/v1" "github.com/grafana/tempo/pkg/usagestats" tempo_util "github.com/grafana/tempo/pkg/util" @@ -49,8 +54,6 @@ const ( reasonTraceTooLarge = "trace_too_large" // reasonLiveTracesExceeded indicates that tempo is already tracking too many live traces in the ingesters for this user reasonLiveTracesExceeded = "live_traces_exceeded" - // reasonInternalError indicates an unexpected error occurred processing these spans. analogous to a 500 - reasonInternalError = "internal_error" // reasonUnknown indicates a pushByte error at the ingester level not related to GRPC reasonUnknown = "unknown_error" @@ -112,6 +115,35 @@ var ( Name: "distributor_metrics_generator_clients", Help: "The current number of metrics-generator clients.", }) + metricAttributesTruncated = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: "tempo", + Name: "distributor_attributes_truncated_total", + Help: "The total number of attribute keys or values truncated per tenant", + }, []string{"tenant"}) + metricKafkaRecordsPerRequest = promauto.NewHistogram(prometheus.HistogramOpts{ + Namespace: "tempo", + Subsystem: "distributor", + Name: "kafka_records_per_request", + Help: "The number of records in each kafka request", + }) + metricKafkaWriteLatency = promauto.NewHistogram(prometheus.HistogramOpts{ + Namespace: "tempo", + Subsystem: "distributor", + Name: "kafka_write_latency_seconds", + Help: "The latency of writing to kafka", + }) + metricKafkaWriteBytesTotal = promauto.NewCounter(prometheus.CounterOpts{ + Namespace: "tempo", + Subsystem: "distributor", + Name: "kafka_write_bytes_total", + Help: "The total number of bytes written to kafka", + }) + metricKafkaAppends = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: "tempo", + Subsystem: "distributor", + Name: "kafka_appends_total", + Help: "The total number of appends sent to kafka", + }, []string{"partition", "status"}) statBytesReceived = usagestats.NewCounter("distributor_bytes_received") statSpansReceived = usagestats.NewCounter("distributor_spans_received") @@ -149,6 +181,10 @@ type Distributor struct { // Generic Forwarder forwardersManager *forwarder.Manager + // Kafka + kafkaProducer *ingest.Producer + partitionRing ring.PartitionRingReader + // Per-user rate limiter. ingestionRateLimiter *limiter.RateLimiter @@ -162,7 +198,23 @@ type Distributor struct { } // New a distributor creates. -func New(cfg Config, clientCfg ingester_client.Config, ingestersRing ring.ReadRing, generatorClientCfg generator_client.Config, generatorsRing ring.ReadRing, o overrides.Interface, middleware receiver.Middleware, logger log.Logger, loggingLevel dslog.Level, reg prometheus.Registerer) (*Distributor, error) { +func New( + cfg Config, + clientCfg ingester_client.Config, + ingestersRing ring.ReadRing, + generatorClientCfg generator_client.Config, + generatorsRing ring.ReadRing, + partitionRing ring.PartitionRingReader, + o overrides.Interface, + middleware receiver.Middleware, + logger log.Logger, + loggingLevel dslog.Level, + reg prometheus.Registerer, +) (*Distributor, error) { + if err := cfg.Validate(); err != nil { + return nil, err + } + factory := cfg.factory if factory == nil { factory = func(addr string) (ring_client.PoolClient, error) { @@ -213,6 +265,7 @@ func New(cfg Config, clientCfg ingester_client.Config, ingestersRing ring.ReadRi ingestionRateLimiter: limiter.NewRateLimiter(ingestionRateStrategy, 10*time.Second), generatorClientCfg: generatorClientCfg, generatorsRing: generatorsRing, + partitionRing: partitionRing, overrides: o, traceEncoder: model.MustNewSegmentDecoder(model.CurrentEncoding), logger: logger, @@ -256,12 +309,20 @@ func New(cfg Config, clientCfg ingester_client.Config, ingestersRing ring.ReadRi cfgReceivers = defaultReceivers } - receivers, err := receiver.New(cfgReceivers, d, middleware, cfg.RetryAfterOnResourceExhausted, loggingLevel) + receivers, err := receiver.New(cfgReceivers, d, middleware, cfg.RetryAfterOnResourceExhausted, loggingLevel, reg) if err != nil { return nil, err } subservices = append(subservices, receivers) + if cfg.KafkaWritePathEnabled { + client, err := ingest.NewWriterClient(cfg.KafkaConfig, 10, logger, reg) + if err != nil { + return nil, fmt.Errorf("failed to create kafka writer client: %w", err) + } + d.kafkaProducer = ingest.NewProducer(client, d.cfg.KafkaConfig.ProducerMaxBufferedBytes, reg) + } + d.subservices, err = services.NewManager(subservices...) if err != nil { return nil, fmt.Errorf("failed to create subservices: %w", err) @@ -328,7 +389,7 @@ func (d *Distributor) extractBasicInfo(ctx context.Context, traces ptrace.Traces // PushTraces pushes a batch of traces func (d *Distributor) PushTraces(ctx context.Context, traces ptrace.Traces) (*tempopb.PushResponse, error) { - ctx, span := tracer.Start(ctx, "distributor.PushTraces") + ctx, span := tracer.Start(ctx, "distributor.PushBytes") defer span.End() userID, spanCount, size, err := d.extractBasicInfo(ctx, traces) @@ -378,26 +439,38 @@ func (d *Distributor) PushTraces(ctx context.Context, traces ptrace.Traces) (*te d.usage.Observe(userID, batches) } - keys, rebatchedTraces, err := requestsByTraceID(batches, userID, spanCount) + keys, rebatchedTraces, truncatedAttributeCount, err := requestsByTraceID(batches, userID, spanCount, d.cfg.MaxSpanAttrByte) if err != nil { - overrides.RecordDiscardedSpans(spanCount, reasonInternalError, userID) logDiscardedResourceSpans(batches, userID, &d.cfg.LogDiscardedSpans, d.logger) return nil, err } + if truncatedAttributeCount > 0 { + metricAttributesTruncated.WithLabelValues(userID).Add(float64(truncatedAttributeCount)) + } + err = d.sendToIngestersViaBytes(ctx, userID, spanCount, rebatchedTraces, keys) if err != nil { return nil, err } - if len(d.overrides.MetricsGeneratorProcessors(userID)) > 0 { - d.generatorForwarder.SendTraces(ctx, userID, keys, rebatchedTraces) - } - if err := d.forwardersManager.ForTenant(userID).ForwardTraces(ctx, traces); err != nil { _ = level.Warn(d.logger).Log("msg", "failed to forward batches for tenant=%s: %w", userID, err) } + if d.kafkaProducer != nil { + err := d.sendToKafka(ctx, userID, keys, rebatchedTraces) + if err != nil { + // TODO: Handle error + level.Error(d.logger).Log("msg", "failed to write to kafka", "err", err) + } + } else { + // See if we need to send to the generators + if len(d.overrides.MetricsGeneratorProcessors(userID)) > 0 { + d.generatorForwarder.SendTraces(ctx, userID, keys, rebatchedTraces) + } + } + return nil, nil // PushRequest is ignored, so no reason to create one } @@ -461,7 +534,6 @@ func (d *Distributor) sendToIngestersViaBytes(ctx context.Context, userID string }, ring.DoBatchOptions{}) // if err != nil, we discarded everything because of an internal error (like "context cancelled") if err != nil { - overrides.RecordDiscardedSpans(totalSpanCount, reasonInternalError, userID) logDiscardedRebatchedSpans(traces, userID, &d.cfg.LogDiscardedSpans, d.logger) return err } @@ -523,20 +595,106 @@ func (d *Distributor) UsageTrackerHandler() http.Handler { return nil } +func (d *Distributor) sendToKafka(ctx context.Context, userID string, keys []uint32, traces []*rebatchedTrace) error { + marshalledTraces := make([][]byte, len(traces)) + for i, t := range traces { + b, err := proto.Marshal(t.trace) + if err != nil { + return fmt.Errorf("failed to marshal trace: %w", err) + } + marshalledTraces[i] = b + } + + partitionRing := d.partitionRing.PartitionRing() // TODO - Use shuffle sharding + return ring.DoBatchWithOptions(ctx, ring.Write, ring.NewActivePartitionBatchRing(partitionRing), keys, func(partition ring.InstanceDesc, indexes []int) error { + localCtx, cancel := context.WithTimeout(ctx, d.clientCfg.RemoteTimeout) + defer cancel() + localCtx = user.InjectOrgID(localCtx, userID) + + req := &tempopb.PushBytesRequest{ + Traces: make([]tempopb.PreallocBytes, len(indexes)), + Ids: make([][]byte, len(indexes)), + } + + for i, j := range indexes { + req.Traces[i].Slice = marshalledTraces[j][0:] + req.Ids[i] = traces[j].id + } + + // The partition ID is stored in the ring.InstanceDesc ID. + partitionID, err := strconv.ParseInt(partition.Id, 10, 31) + if err != nil { + return err + } + + startTime := time.Now() + + records, err := ingest.Encode(int32(partitionID), userID, req, d.cfg.KafkaConfig.ProducerMaxRecordSizeBytes) + if err != nil { + return fmt.Errorf("failed to encode PushSpansRequest: %w", err) + } + + metricKafkaRecordsPerRequest.Observe(float64(len(records))) + + produceResults := d.kafkaProducer.ProduceSync(localCtx, records) + + if count, sizeBytes := successfulProduceRecordsStats(produceResults); count > 0 { + metricKafkaWriteLatency.Observe(time.Since(startTime).Seconds()) + metricKafkaWriteBytesTotal.Add(float64(sizeBytes)) + _ = level.Debug(d.logger).Log("msg", "kafka write success stats", "count", count, "size_bytes", sizeBytes) + } + + var finalErr error + for _, result := range produceResults { + if result.Err != nil { + _ = level.Error(d.logger).Log("msg", "failed to write to kafka", "err", result.Err) + metricKafkaAppends.WithLabelValues(fmt.Sprintf("partition_%d", partitionID), "fail").Inc() + finalErr = result.Err + } else { + metricKafkaAppends.WithLabelValues(fmt.Sprintf("partition_%d", partitionID), "success").Inc() + } + } + + return finalErr + }, ring.DoBatchOptions{}) +} + +func successfulProduceRecordsStats(results kgo.ProduceResults) (count, sizeBytes int) { + for _, res := range results { + if res.Err == nil && res.Record != nil { + count++ + sizeBytes += len(res.Record.Value) + } + } + + return count, sizeBytes +} + // requestsByTraceID takes an incoming tempodb.PushRequest and creates a set of keys for the hash ring // and traces to pass onto the ingesters. -func requestsByTraceID(batches []*v1.ResourceSpans, userID string, spanCount int) ([]uint32, []*rebatchedTrace, error) { +func requestsByTraceID(batches []*v1.ResourceSpans, userID string, spanCount, maxSpanAttrSize int) ([]uint32, []*rebatchedTrace, int, error) { const tracesPerBatch = 20 // p50 of internal env tracesByID := make(map[uint32]*rebatchedTrace, tracesPerBatch) + truncatedAttributeCount := 0 for _, b := range batches { spansByILS := make(map[uint32]*v1.ScopeSpans) + // check for large resources for large attributes + if maxSpanAttrSize > 0 && b.Resource != nil { + resourceAttrTruncatedCount := processAttributes(b.Resource.Attributes, maxSpanAttrSize) + truncatedAttributeCount += resourceAttrTruncatedCount + } for _, ils := range b.ScopeSpans { for _, span := range ils.Spans { + // check large spans for large attributes + if maxSpanAttrSize > 0 { + spanAttrTruncatedCount := processAttributes(span.Attributes, maxSpanAttrSize) + truncatedAttributeCount += spanAttrTruncatedCount + } traceID := span.TraceId if !validation.ValidTraceID(traceID) { - return nil, nil, status.Errorf(codes.InvalidArgument, "trace ids must be 128 bit") + return nil, nil, 0, status.Errorf(codes.InvalidArgument, "trace ids must be 128 bit, received %d bits", len(traceID)*8) } traceKey := tempo_util.TokenFor(userID, traceID) @@ -602,7 +760,30 @@ func requestsByTraceID(batches []*v1.ResourceSpans, userID string, spanCount int traces = append(traces, r) } - return keys, traces, nil + return keys, traces, truncatedAttributeCount, nil +} + +// find and truncate the span attributes that are too large +func processAttributes(attributes []*v1_common.KeyValue, maxAttrSize int) int { + count := 0 + for _, attr := range attributes { + if len(attr.Key) > maxAttrSize { + attr.Key = attr.Key[:maxAttrSize] + count++ + } + + switch value := attr.GetValue().Value.(type) { + case *v1_common.AnyValue_StringValue: + if len(value.StringValue) > maxAttrSize { + value.StringValue = value.StringValue[:maxAttrSize] + count++ + } + default: + continue + } + } + + return count } // discardedPredicate determines if a trace is discarded based on the number of successful replications. diff --git a/modules/distributor/distributor_test.go b/modules/distributor/distributor_test.go index 87fe761e339..b4cdb839a11 100644 --- a/modules/distributor/distributor_test.go +++ b/modules/distributor/distributor_test.go @@ -107,7 +107,24 @@ func TestRequestsByTraceID(t *testing.T) { }, }, }, - expectedErr: status.Errorf(codes.InvalidArgument, "trace ids must be 128 bit"), + expectedErr: status.Errorf(codes.InvalidArgument, "trace ids must be 128 bit, received 8 bits"), + }, + { + name: "empty trace id", + batches: []*v1.ResourceSpans{ + { + ScopeSpans: []*v1.ScopeSpans{ + { + Spans: []*v1.Span{ + { + TraceId: []byte{}, + }, + }, + }, + }, + }, + }, + expectedErr: status.Errorf(codes.InvalidArgument, "trace ids must be 128 bit, received 0 bits"), }, { name: "one span", @@ -696,7 +713,7 @@ func TestRequestsByTraceID(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - keys, rebatchedTraces, err := requestsByTraceID(tt.batches, util.FakeTenantID, 1) + keys, rebatchedTraces, _, err := requestsByTraceID(tt.batches, util.FakeTenantID, 1, 1000) require.Equal(t, len(keys), len(rebatchedTraces)) for i, expectedKey := range tt.expectedKeys { @@ -722,9 +739,64 @@ func TestRequestsByTraceID(t *testing.T) { } } +func TestProcessAttributes(t *testing.T) { + spanCount := 10 + batchCount := 3 + trace := test.MakeTraceWithSpanCount(batchCount, spanCount, []byte{0x0A, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}) + + maxAttrByte := 1000 + longString := strings.Repeat("t", 1100) + + // add long attributes to the resource level + trace.ResourceSpans[0].Resource.Attributes = append(trace.ResourceSpans[0].Resource.Attributes, + test.MakeAttribute("long value", longString), + ) + trace.ResourceSpans[0].Resource.Attributes = append(trace.ResourceSpans[0].Resource.Attributes, + test.MakeAttribute(longString, "long key"), + ) + + // add long attributes to the span level + trace.ResourceSpans[0].ScopeSpans[0].Spans[0].Attributes = append(trace.ResourceSpans[0].ScopeSpans[0].Spans[0].Attributes, + test.MakeAttribute("long value", longString), + ) + trace.ResourceSpans[0].ScopeSpans[0].Spans[0].Attributes = append(trace.ResourceSpans[0].ScopeSpans[0].Spans[0].Attributes, + test.MakeAttribute(longString, "long key"), + ) + + _, rebatchedTrace, truncatedCount, _ := requestsByTraceID(trace.ResourceSpans, "test", spanCount*batchCount, maxAttrByte) + assert.Equal(t, 4, truncatedCount) + for _, rT := range rebatchedTrace { + for _, resource := range rT.trace.ResourceSpans { + // find large resource attributes + for _, attr := range resource.Resource.Attributes { + if attr.Key == "long value" { + assert.Equal(t, longString[:maxAttrByte], attr.Value.GetStringValue()) + } + if attr.Value.GetStringValue() == "long key" { + assert.Equal(t, longString[:maxAttrByte], attr.Key) + } + } + // find large span attributes + for _, scope := range resource.ScopeSpans { + for _, span := range scope.Spans { + for _, attr := range span.Attributes { + if attr.Key == "long value" { + assert.Equal(t, longString[:maxAttrByte], attr.Value.GetStringValue()) + } + if attr.Value.GetStringValue() == "long key" { + assert.Equal(t, longString[:maxAttrByte], attr.Key) + } + } + } + } + + } + } +} + func BenchmarkTestsByRequestID(b *testing.B) { - spansPer := 100 - batches := 10 + spansPer := 5000 + batches := 100 traces := []*tempopb.Trace{ test.MakeTraceWithSpanCount(batches, spansPer, []byte{0x0A, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}), test.MakeTraceWithSpanCount(batches, spansPer, []byte{0x0B, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}), @@ -740,14 +812,15 @@ func BenchmarkTestsByRequestID(b *testing.B) { } b.ResetTimer() + b.ReportAllocs() for i := 0; i < b.N; i++ { for _, blerg := range ils { - _, _, err := requestsByTraceID([]*v1.ResourceSpans{ + _, _, _, err := requestsByTraceID([]*v1.ResourceSpans{ { ScopeSpans: blerg, }, - }, "test", spansPer*len(traces)) + }, "test", spansPer*len(traces), 5) require.NoError(b, err) } } @@ -1614,6 +1687,7 @@ func prepare(t *testing.T, limits overrides.Config, logger kitlog.Logger) (*Dist }) } + distributorConfig.MaxSpanAttrByte = 1000 distributorConfig.DistributorRing.HeartbeatPeriod = 100 * time.Millisecond distributorConfig.DistributorRing.InstanceID = strconv.Itoa(rand.Int()) distributorConfig.DistributorRing.KVStore.Mock = nil @@ -1625,7 +1699,7 @@ func prepare(t *testing.T, limits overrides.Config, logger kitlog.Logger) (*Dist l := dslog.Level{} _ = l.Set("error") mw := receiver.MultiTenancyMiddleware() - d, err := New(distributorConfig, clientConfig, ingestersRing, generator_client.Config{}, nil, overrides, mw, logger, l, prometheus.NewPedanticRegistry()) + d, err := New(distributorConfig, clientConfig, ingestersRing, generator_client.Config{}, nil, nil, overrides, mw, logger, l, prometheus.NewPedanticRegistry()) require.NoError(t, err) return d, ingesters diff --git a/modules/distributor/forwarder/forwarder.go b/modules/distributor/forwarder/forwarder.go index bf2dd21d739..87c3a5b1a42 100644 --- a/modules/distributor/forwarder/forwarder.go +++ b/modules/distributor/forwarder/forwarder.go @@ -86,7 +86,7 @@ type FilterForwarder struct { func NewFilterForwarder(cfg FilterConfig, next Forwarder, logLevel dslog.Level) (*FilterForwarder, error) { factory := filterprocessor.NewFactory() - set := processor.CreateSettings{ + set := processor.Settings{ ID: component.ID{}, TelemetrySettings: component.TelemetrySettings{ Logger: newLogger(logLevel), @@ -102,7 +102,7 @@ func NewFilterForwarder(cfg FilterConfig, next Forwarder, logLevel dslog.Level) SpanEventConditions: cfg.Traces.SpanEventConditions, }, } - fp, err := factory.CreateTracesProcessor(context.Background(), set, fpCfg, consumerToForwarderAdapter{forwarder: next}) + fp, err := factory.CreateTraces(context.Background(), set, fpCfg, consumerToForwarderAdapter{forwarder: next}) if err != nil { return nil, fmt.Errorf("failed to create filter processor: %w", err) } @@ -173,11 +173,6 @@ func (f *FilterForwarder) GetExtensions() map[component.ID]extension.Extension { return nil } -// GetExporters implements component.Host -func (f *FilterForwarder) GetExporters() map[component.DataType]map[component.ID]component.Component { - return nil -} - type consumerToForwarderAdapter struct { forwarder Forwarder } diff --git a/modules/distributor/forwarder_test.go b/modules/distributor/forwarder_test.go index 61807712a5f..d5dfbff868d 100644 --- a/modules/distributor/forwarder_test.go +++ b/modules/distributor/forwarder_test.go @@ -28,7 +28,7 @@ func TestForwarder(t *testing.T) { require.NoError(t, err) b := test.MakeBatch(10, id) - keys, rebatchedTraces, err := requestsByTraceID([]*v1.ResourceSpans{b}, tenantID, 10) + keys, rebatchedTraces, _, err := requestsByTraceID([]*v1.ResourceSpans{b}, tenantID, 10, 1000) require.NoError(t, err) o, err := overrides.NewOverrides(oCfg, nil, prometheus.DefaultRegisterer) @@ -69,7 +69,7 @@ func TestForwarder_shutdown(t *testing.T) { require.NoError(t, err) b := test.MakeBatch(10, id) - keys, rebatchedTraces, err := requestsByTraceID([]*v1.ResourceSpans{b}, tenantID, 10) + keys, rebatchedTraces, _, err := requestsByTraceID([]*v1.ResourceSpans{b}, tenantID, 10, 1000) require.NoError(t, err) o, err := overrides.NewOverrides(oCfg, nil, prometheus.DefaultRegisterer) diff --git a/modules/distributor/receiver/metrics_provider.go b/modules/distributor/receiver/metrics_provider.go index 9abd360fffe..f15af449664 100644 --- a/modules/distributor/receiver/metrics_provider.go +++ b/modules/distributor/receiver/metrics_provider.go @@ -22,181 +22,85 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/embedded" + "go.opentelemetry.io/otel/metric/noop" +) + +const ( + // These metrics are defined here: https://github.com/open-telemetry/opentelemetry-collector/blob/release/v0.116.x/receiver/receiverhelper/internal/metadata/generated_telemetry.go + otelcolAcceptedSpansMetricName = "otelcol_receiver_accepted_spans" + otelcolRefusedSpansMetricName = "otelcol_receiver_refused_spans" ) var ( // Compile-time check this implements the OpenTelemetry API. - _ metric.MeterProvider = MeterProvider{} - _ metric.Meter = Meter{} - _ metric.Observer = Observer{} - _ metric.Registration = Registration{} - _ metric.Int64Counter = Int64Counter{} - _ metric.Float64Counter = Float64Counter{} - _ metric.Int64UpDownCounter = Int64UpDownCounter{} - _ metric.Float64UpDownCounter = Float64UpDownCounter{} - _ metric.Int64Histogram = Int64Histogram{} - _ metric.Float64Histogram = Float64Histogram{} - _ metric.Int64Gauge = Int64Gauge{} - _ metric.Float64Gauge = Float64Gauge{} - _ metric.Int64ObservableCounter = Int64ObservableCounter{} - _ metric.Float64ObservableCounter = Float64ObservableCounter{} - _ metric.Int64ObservableGauge = Int64ObservableGauge{} - _ metric.Float64ObservableGauge = Float64ObservableGauge{} - _ metric.Int64ObservableUpDownCounter = Int64ObservableUpDownCounter{} - _ metric.Float64ObservableUpDownCounter = Float64ObservableUpDownCounter{} - _ metric.Int64Observer = Int64Observer{} - _ metric.Float64Observer = Float64Observer{} + _ metric.MeterProvider = MeterProvider{} + _ metric.Meter = Meter{} + _ metric.Int64Counter = Int64Counter{} ) -var ( - receiverAcceptedSpans = promauto.NewCounterVec(prometheus.CounterOpts{ - Namespace: "tempo", - Name: "receiver_accepted_spans", - Help: "Number of spans successfully pushed into the pipeline.", - }, []string{"receiver", "transport"}) - receiverRefusedSpans = promauto.NewCounterVec(prometheus.CounterOpts{ - Namespace: "tempo", - Name: "receiver_refused_spans", - Help: "Number of spans that could not be pushed into the pipeline.", - }, []string{"receiver", "transport"}) -) +type metrics struct { + receiverAcceptedSpans *prometheus.CounterVec + receiverRefusedSpans *prometheus.CounterVec +} + +func newMetrics(reg prometheus.Registerer) *metrics { + return &metrics{ + receiverAcceptedSpans: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ + Namespace: "tempo", + Name: "receiver_accepted_spans", + Help: "Number of spans successfully pushed into the pipeline.", + }, []string{"receiver", "transport"}), + receiverRefusedSpans: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ + Namespace: "tempo", + Name: "receiver_refused_spans", + Help: "Number of spans that could not be pushed into the pipeline.", + }, []string{"receiver", "transport"}), + } +} // MeterProvider is an OpenTelemetry No-Op MeterProvider. -type MeterProvider struct{ embedded.MeterProvider } +type MeterProvider struct { + embedded.MeterProvider + metrics *metrics +} // NewMeterProvider returns a MeterProvider that does not record any telemetry. -func NewMeterProvider() MeterProvider { - return MeterProvider{} +func NewMeterProvider(reg prometheus.Registerer) MeterProvider { + return MeterProvider{ + metrics: newMetrics(reg), + } } // Meter returns an OpenTelemetry Meter that does not record any telemetry. -func (MeterProvider) Meter(string, ...metric.MeterOption) metric.Meter { - return Meter{} +func (mp MeterProvider) Meter(string, ...metric.MeterOption) metric.Meter { + return &Meter{ + metrics: mp.metrics, + } } // Meter is an OpenTelemetry No-Op Meter. -type Meter struct{ embedded.Meter } +type Meter struct { + // embed the noop Meter, this provides noop implementations for all methods we don't implement ourselves + noop.Meter + metrics *metrics +} -// Int64Counter returns a Counter used to record int64 measurements that -// produces no telemetry. -func (Meter) Int64Counter(name string, _ ...metric.Int64CounterOption) (metric.Int64Counter, error) { +// Int64Counter returns a Counter used to record int64 measurements +func (m Meter) Int64Counter(name string, _ ...metric.Int64CounterOption) (metric.Int64Counter, error) { switch name { - case "receiver_accepted_spans", "receiver_refused_spans": - return Int64Counter{Name: name}, nil + case otelcolAcceptedSpansMetricName, otelcolRefusedSpansMetricName: + return Int64Counter{Name: name, metrics: m.metrics}, nil default: - return Int64Counter{}, nil + return noop.Int64Counter{}, nil } } -// Int64UpDownCounter returns an UpDownCounter used to record int64 -// measurements that produces no telemetry. -func (Meter) Int64UpDownCounter(string, ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { - return Int64UpDownCounter{}, nil -} - -// Int64Histogram returns a Histogram used to record int64 measurements that -// produces no telemetry. -func (Meter) Int64Histogram(string, ...metric.Int64HistogramOption) (metric.Int64Histogram, error) { - return Int64Histogram{}, nil -} - -// Int64Gauge returns a Gauge used to record int64 measurements that -// produces no telemetry. -func (Meter) Int64Gauge(string, ...metric.Int64GaugeOption) (metric.Int64Gauge, error) { - return Int64Gauge{}, nil -} - -// Int64ObservableCounter returns an ObservableCounter used to record int64 -// measurements that produces no telemetry. -func (Meter) Int64ObservableCounter(string, ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { - return Int64ObservableCounter{}, nil -} - -// Int64ObservableUpDownCounter returns an ObservableUpDownCounter used to -// record int64 measurements that produces no telemetry. -func (Meter) Int64ObservableUpDownCounter(string, ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { - return Int64ObservableUpDownCounter{}, nil -} - -// Int64ObservableGauge returns an ObservableGauge used to record int64 -// measurements that produces no telemetry. -func (Meter) Int64ObservableGauge(string, ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { - return Int64ObservableGauge{}, nil -} - -// Float64Counter returns a Counter used to record int64 measurements that -// produces no telemetry. -func (Meter) Float64Counter(string, ...metric.Float64CounterOption) (metric.Float64Counter, error) { - return Float64Counter{}, nil -} - -// Float64UpDownCounter returns an UpDownCounter used to record int64 -// measurements that produces no telemetry. -func (Meter) Float64UpDownCounter(string, ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { - return Float64UpDownCounter{}, nil -} - -// Float64Histogram returns a Histogram used to record int64 measurements that -// produces no telemetry. -func (Meter) Float64Histogram(string, ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { - return Float64Histogram{}, nil -} - -// Float64Gauge returns a Gauge used to record float64 measurements that -// produces no telemetry. -func (Meter) Float64Gauge(string, ...metric.Float64GaugeOption) (metric.Float64Gauge, error) { - return Float64Gauge{}, nil -} - -// Float64ObservableCounter returns an ObservableCounter used to record int64 -// measurements that produces no telemetry. -func (Meter) Float64ObservableCounter(string, ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { - return Float64ObservableCounter{}, nil -} - -// Float64ObservableUpDownCounter returns an ObservableUpDownCounter used to -// record int64 measurements that produces no telemetry. -func (Meter) Float64ObservableUpDownCounter(string, ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { - return Float64ObservableUpDownCounter{}, nil -} - -// Float64ObservableGauge returns an ObservableGauge used to record int64 -// measurements that produces no telemetry. -func (Meter) Float64ObservableGauge(string, ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { - return Float64ObservableGauge{}, nil -} - -// RegisterCallback performs no operation. -func (Meter) RegisterCallback(metric.Callback, ...metric.Observable) (metric.Registration, error) { - return Registration{}, nil -} - -// Observer acts as a recorder of measurements for multiple instruments in a -// Callback, it performing no operation. -type Observer struct{ embedded.Observer } - -// ObserveFloat64 performs no operation. -func (Observer) ObserveFloat64(metric.Float64Observable, float64, ...metric.ObserveOption) { -} - -// ObserveInt64 performs no operation. -func (Observer) ObserveInt64(metric.Int64Observable, int64, ...metric.ObserveOption) { -} - -// Registration is the registration of a Callback with a No-Op Meter. -type Registration struct{ embedded.Registration } - -// Unregister unregisters the Callback the Registration represents with the -// No-Op Meter. This will always return nil because the No-Op Meter performs no -// operation, including hold any record of registrations. -func (Registration) Unregister() error { return nil } - // Int64Counter is an OpenTelemetry Counter used to record int64 measurements. -// It produces no telemetry. type Int64Counter struct { embedded.Int64Counter - Name string + Name string + metrics *metrics } func (r Int64Counter) Add(_ context.Context, value int64, options ...metric.AddOption) { @@ -220,113 +124,9 @@ func (r Int64Counter) Add(_ context.Context, value int64, options ...metric.AddO } switch r.Name { - case "receiver_accepted_spans": - receiverAcceptedSpans.WithLabelValues(receiver, transport).Add(float64(value)) - case "receiver_refused_spans": - receiverRefusedSpans.WithLabelValues(receiver, transport).Add(float64(value)) + case otelcolAcceptedSpansMetricName: + r.metrics.receiverAcceptedSpans.WithLabelValues(receiver, transport).Add(float64(value)) + case otelcolRefusedSpansMetricName: + r.metrics.receiverRefusedSpans.WithLabelValues(receiver, transport).Add(float64(value)) } } - -// Float64Counter is an OpenTelemetry Counter used to record float64 -// measurements. It produces no telemetry. -type Float64Counter struct{ embedded.Float64Counter } - -// Add performs no operation. -func (Float64Counter) Add(context.Context, float64, ...metric.AddOption) {} - -// Int64UpDownCounter is an OpenTelemetry UpDownCounter used to record int64 -// measurements. It produces no telemetry. -type Int64UpDownCounter struct{ embedded.Int64UpDownCounter } - -// Add performs no operation. -func (Int64UpDownCounter) Add(context.Context, int64, ...metric.AddOption) {} - -// Float64UpDownCounter is an OpenTelemetry UpDownCounter used to record -// float64 measurements. It produces no telemetry. -type Float64UpDownCounter struct{ embedded.Float64UpDownCounter } - -// Add performs no operation. -func (Float64UpDownCounter) Add(context.Context, float64, ...metric.AddOption) {} - -// Int64Histogram is an OpenTelemetry Histogram used to record int64 -// measurements. It produces no telemetry. -type Int64Histogram struct{ embedded.Int64Histogram } - -// Record performs no operation. -func (Int64Histogram) Record(context.Context, int64, ...metric.RecordOption) {} - -// Float64Histogram is an OpenTelemetry Histogram used to record float64 -// measurements. It produces no telemetry. -type Float64Histogram struct{ embedded.Float64Histogram } - -// Record performs no operation. -func (Float64Histogram) Record(context.Context, float64, ...metric.RecordOption) {} - -// Int64Gauge is an OpenTelemetry Gauge used to record instantaneous int64 -// measurements. It produces no telemetry. -type Int64Gauge struct{ embedded.Int64Gauge } - -// Record performs no operation. -func (Int64Gauge) Record(context.Context, int64, ...metric.RecordOption) {} - -// Float64Gauge is an OpenTelemetry Gauge used to record instantaneous float64 -// measurements. It produces no telemetry. -type Float64Gauge struct{ embedded.Float64Gauge } - -// Record performs no operation. -func (Float64Gauge) Record(context.Context, float64, ...metric.RecordOption) {} - -// Int64ObservableCounter is an OpenTelemetry ObservableCounter used to record -// int64 measurements. It produces no telemetry. -type Int64ObservableCounter struct { - metric.Int64Observable - embedded.Int64ObservableCounter -} - -// Float64ObservableCounter is an OpenTelemetry ObservableCounter used to record -// float64 measurements. It produces no telemetry. -type Float64ObservableCounter struct { - metric.Float64Observable - embedded.Float64ObservableCounter -} - -// Int64ObservableGauge is an OpenTelemetry ObservableGauge used to record -// int64 measurements. It produces no telemetry. -type Int64ObservableGauge struct { - metric.Int64Observable - embedded.Int64ObservableGauge -} - -// Float64ObservableGauge is an OpenTelemetry ObservableGauge used to record -// float64 measurements. It produces no telemetry. -type Float64ObservableGauge struct { - metric.Float64Observable - embedded.Float64ObservableGauge -} - -// Int64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter -// used to record int64 measurements. It produces no telemetry. -type Int64ObservableUpDownCounter struct { - metric.Int64Observable - embedded.Int64ObservableUpDownCounter -} - -// Float64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter -// used to record float64 measurements. It produces no telemetry. -type Float64ObservableUpDownCounter struct { - metric.Float64Observable - embedded.Float64ObservableUpDownCounter -} - -// Int64Observer is a recorder of int64 measurements that performs no operation. -type Int64Observer struct{ embedded.Int64Observer } - -// Observe performs no operation. -func (Int64Observer) Observe(int64, ...metric.ObserveOption) {} - -// Float64Observer is a recorder of float64 measurements that performs no -// operation. -type Float64Observer struct{ embedded.Float64Observer } - -// Observe performs no operation. -func (Float64Observer) Observe(float64, ...metric.ObserveOption) {} diff --git a/modules/distributor/receiver/metrics_provider_test.go b/modules/distributor/receiver/metrics_provider_test.go index 0c7ce7a2fa2..95da5882323 100644 --- a/modules/distributor/receiver/metrics_provider_test.go +++ b/modules/distributor/receiver/metrics_provider_test.go @@ -4,12 +4,13 @@ import ( "context" "testing" + "github.com/prometheus/client_golang/prometheus" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" ) func BenchmarkMetricsProvider(b *testing.B) { - meterProvider := NewMeterProvider() + meterProvider := NewMeterProvider(prometheus.NewPedanticRegistry()) meter := meterProvider.Meter("test") acceptedSpans, _ := meter.Int64Counter("receiver_accepted_spans") c := context.Background() diff --git a/modules/distributor/receiver/shim.go b/modules/distributor/receiver/shim.go index 563f9b7af60..b11f3f0455f 100644 --- a/modules/distributor/receiver/shim.go +++ b/modules/distributor/receiver/shim.go @@ -14,6 +14,7 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver" + "github.com/prometheus/client_golang/prometheus" prom_client "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "go.opentelemetry.io/collector/component" @@ -142,7 +143,7 @@ func (m *mapProvider) Scheme() string { return "mock" } func (m *mapProvider) Shutdown(context.Context) error { return nil } -func New(receiverCfg map[string]interface{}, pusher TracesPusher, middleware Middleware, retryAfterDuration time.Duration, logLevel dslog.Level) (services.Service, error) { +func New(receiverCfg map[string]interface{}, pusher TracesPusher, middleware Middleware, retryAfterDuration time.Duration, logLevel dslog.Level, reg prometheus.Registerer) (services.Service, error) { shim := &receiversShim{ pusher: pusher, logger: log.NewRateLimitedLogger(logsPerSecond, level.Error(log.Logger)), @@ -235,7 +236,7 @@ func New(receiverCfg map[string]interface{}, pusher TracesPusher, middleware Mid nopType := component.MustNewType("tempo") traceProvider := tracenoop.NewTracerProvider() - meterProvider := NewMeterProvider() + meterProvider := NewMeterProvider(reg) // todo: propagate a real context? translate our log configuration into zap? ctx := context.Background() for componentID, cfg := range conf.Receivers { @@ -271,17 +272,15 @@ func New(receiverCfg map[string]interface{}, pusher TracesPusher, middleware Mid cfg = jaegerRecvCfg } - params := receiver.CreateSettings{ + params := receiver.Settings{ ID: component.NewIDWithName(nopType, fmt.Sprintf("%s_receiver", componentID.Type().String())), TelemetrySettings: component.TelemetrySettings{ Logger: zapLogger, TracerProvider: traceProvider, MeterProvider: meterProvider, - ReportStatus: func(*component.StatusEvent) { - }, }, } - receiver, err := factoryBase.CreateTracesReceiver(ctx, params, cfg, middleware.Wrap(shim)) + receiver, err := factoryBase.CreateTraces(ctx, params, cfg, middleware.Wrap(shim)) if err != nil { return nil, err } @@ -367,10 +366,6 @@ func (r *receiversShim) GetFactory(component.Kind, component.Type) component.Fac // GetExtensions implements component.Host func (r *receiversShim) GetExtensions() map[component.ID]extension.Extension { return nil } -func (r *receiversShim) GetExporters() map[component.DataType]map[component.ID]component.Component { - return nil -} - // observability shims func newLogger(level dslog.Level) *zap.Logger { zapLevel := zapcore.InfoLevel diff --git a/modules/distributor/receiver/shim_test.go b/modules/distributor/receiver/shim_test.go index 22076743a50..970a09dcb46 100644 --- a/modules/distributor/receiver/shim_test.go +++ b/modules/distributor/receiver/shim_test.go @@ -1,17 +1,216 @@ package receiver import ( + "context" "errors" + "strings" "testing" "time" + dslog "github.com/grafana/dskit/log" + "github.com/grafana/dskit/services" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configgrpc" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/config/configtls" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/otlpexporter" + "go.opentelemetry.io/collector/exporter/otlphttpexporter" + "go.opentelemetry.io/collector/pdata/ptrace" + "go.opentelemetry.io/collector/pdata/testdata" + metricnoop "go.opentelemetry.io/otel/metric/noop" + tracenoop "go.opentelemetry.io/otel/trace/noop" + "go.uber.org/zap" "google.golang.org/genproto/googleapis/rpc/errdetails" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "google.golang.org/protobuf/types/known/durationpb" + + "github.com/grafana/tempo/pkg/tempopb" ) +// These tests use the OpenTelemetry Collector Exporters to validate the different protocols +func TestShim_integration(t *testing.T) { + randomTraces := testdata.GenerateTraces(5) + + testCases := []struct { + name string + receiverCfg map[string]interface{} + factory exporter.Factory + exporterCfg component.Config + expectedTransport string + }{ + { + name: "otlpexporter", + receiverCfg: map[string]interface{}{ + "otlp": map[string]interface{}{ + "protocols": map[string]interface{}{ + "grpc": nil, + }, + }, + }, + factory: otlpexporter.NewFactory(), + exporterCfg: &otlpexporter.Config{ + ClientConfig: configgrpc.ClientConfig{ + Endpoint: "127.0.0.1:4317", + TLSSetting: configtls.ClientConfig{ + Insecure: true, + }, + }, + }, + expectedTransport: "grpc", + }, + { + name: "otlphttpexporter - JSON encoding", + receiverCfg: map[string]interface{}{ + "otlp": map[string]interface{}{ + "protocols": map[string]interface{}{ + "http": nil, + }, + }, + }, + factory: otlphttpexporter.NewFactory(), + exporterCfg: &otlphttpexporter.Config{ + ClientConfig: confighttp.ClientConfig{ + Endpoint: "http://127.0.0.1:4318", + }, + Encoding: otlphttpexporter.EncodingJSON, + }, + expectedTransport: "http", + }, + { + name: "otlphttpexporter - proto encoding", + receiverCfg: map[string]interface{}{ + "otlp": map[string]interface{}{ + "protocols": map[string]interface{}{ + "http": nil, + }, + }, + }, + factory: otlphttpexporter.NewFactory(), + exporterCfg: &otlphttpexporter.Config{ + ClientConfig: confighttp.ClientConfig{ + Endpoint: "http://127.0.0.1:4318", + }, + Encoding: otlphttpexporter.EncodingProto, + }, + expectedTransport: "http", + }, + } + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + pusher := &capturingPusher{} + reg := prometheus.NewPedanticRegistry() + + stopShim := runReceiverShim(t, testCase.receiverCfg, pusher, reg) + defer stopShim() + + exporter, stopExporter := runOTelExporter(t, testCase.factory, testCase.exporterCfg) + defer stopExporter() + + err := exporter.ConsumeTraces(context.Background(), randomTraces) + assert.NoError(t, err) + + receivedTraces := pusher.GetAndClearTraces() + // We should only have received one push request + require.Len(t, receivedTraces, 1) + + assert.Equal(t, randomTraces, receivedTraces[0]) + + count, err := testutil.GatherAndCount(reg, "tempo_receiver_accepted_spans", "tempo_receiver_refused_spans") + assert.NoError(t, err) + assert.Equal(t, 2, count) + + expected := ` +# HELP tempo_receiver_accepted_spans Number of spans successfully pushed into the pipeline. +# TYPE tempo_receiver_accepted_spans counter +tempo_receiver_accepted_spans{receiver="tempo/otlp_receiver", transport=""} 5 +# HELP tempo_receiver_refused_spans Number of spans that could not be pushed into the pipeline. +# TYPE tempo_receiver_refused_spans counter +tempo_receiver_refused_spans{receiver="tempo/otlp_receiver", transport=""} 0 +` + expectedWithTransport := strings.ReplaceAll(expected, "", testCase.expectedTransport) + + err = testutil.GatherAndCompare(reg, strings.NewReader(expectedWithTransport), "tempo_receiver_accepted_spans", "tempo_receiver_refused_spans") + assert.NoError(t, err) + }) + } +} + +func runReceiverShim(t *testing.T, receiverCfg map[string]interface{}, pusher TracesPusher, reg prometheus.Registerer) func() { + level := dslog.Level{} + _ = level.Set("info") + + shim, err := New(receiverCfg, pusher, FakeTenantMiddleware(), 0, level, reg) + require.NoError(t, err) + + err = services.StartAndAwaitRunning(context.Background(), shim) + require.NoError(t, err) + + return func() { + err := services.StopAndAwaitTerminated(context.Background(), shim) + if errors.Is(err, context.Canceled) { + return + } + assert.NoError(t, err) + } +} + +func runOTelExporter(t *testing.T, factory exporter.Factory, cfg component.Config) (exporter.Traces, func()) { + exporter, err := factory.CreateTraces( + context.Background(), + exporter.Settings{ + ID: component.MustNewID("test"), + TelemetrySettings: component.TelemetrySettings{ + Logger: zap.NewNop(), + TracerProvider: tracenoop.NewTracerProvider(), + MeterProvider: metricnoop.NewMeterProvider(), + }, + }, + cfg, + ) + require.NoError(t, err) + + err = exporter.Start(context.Background(), &mockHost{}) + require.NoError(t, err) + + return exporter, func() { + err = exporter.Shutdown(context.Background()) + assert.NoError(t, err, "traces exporter shutting down failed") + } +} + +type mockHost struct{} + +var _ component.Host = (*mockHost)(nil) + +func (m *mockHost) GetFactory(component.Kind, component.Type) component.Factory { + panic("implement me") +} + +func (m *mockHost) GetExtensions() map[component.ID]component.Component { + panic("implement me") +} + +type capturingPusher struct { + traces []ptrace.Traces +} + +func (p *capturingPusher) GetAndClearTraces() []ptrace.Traces { + traces := p.traces + p.traces = nil + return traces +} + +func (p *capturingPusher) PushTraces(_ context.Context, t ptrace.Traces) (*tempopb.PushResponse, error) { + p.traces = append(p.traces, t) + return &tempopb.PushResponse{}, nil +} + // TestWrapRetryableError confirms that errors are wrapped as expected func TestWrapRetryableError(t *testing.T) { // no wrapping b/c not a grpc error diff --git a/modules/frontend/combiner/metrics_query_range.go b/modules/frontend/combiner/metrics_query_range.go index d542ece7a75..c17b4a84d98 100644 --- a/modules/frontend/combiner/metrics_query_range.go +++ b/modules/frontend/combiner/metrics_query_range.go @@ -1,6 +1,8 @@ package combiner import ( + "math" + "slices" "sort" "strings" @@ -42,6 +44,8 @@ func NewQueryRange(req *tempopb.QueryRangeRequest, trackDiffs bool) (Combiner, e resp = &tempopb.QueryRangeResponse{} } sortResponse(resp) + attachExemplars(req, resp) + return resp, nil }, diff: func(_ *tempopb.QueryRangeResponse) (*tempopb.QueryRangeResponse, error) { @@ -50,6 +54,8 @@ func NewQueryRange(req *tempopb.QueryRangeRequest, trackDiffs bool) (Combiner, e resp = &tempopb.QueryRangeResponse{} } sortResponse(resp) + attachExemplars(req, resp) + return resp, nil }, } @@ -81,3 +87,32 @@ func sortResponse(res *tempopb.QueryRangeResponse) { }) } } + +// attachExemplars to the final series outputs. Placeholder exemplars for things like rate() +// have NaNs, and we can't attach them until the very end. +func attachExemplars(req *tempopb.QueryRangeRequest, res *tempopb.QueryRangeResponse) { + for _, ss := range res.Series { + for i, e := range ss.Exemplars { + + // Only needed for NaNs + if !math.IsNaN(e.Value) { + continue + } + + exemplarInterval := traceql.IntervalOfMs(e.TimestampMs, req.Start, req.End, req.Step) + + // Look for sample in the same slot. + // BinarySearch is possible because all samples were sorted previously. + j, ok := slices.BinarySearchFunc(ss.Samples, exemplarInterval, func(s tempopb.Sample, _ int) int { + // NOTE - Look for sample in same interval, not same value. + si := traceql.IntervalOfMs(s.TimestampMs, req.Start, req.End, req.Step) + + // This returns negative, zero, or positive + return si - exemplarInterval + }) + if ok { + ss.Exemplars[i].Value = ss.Samples[j].Value + } + } + } +} diff --git a/modules/frontend/combiner/metrics_query_range_test.go b/modules/frontend/combiner/metrics_query_range_test.go new file mode 100644 index 00000000000..b90c1054739 --- /dev/null +++ b/modules/frontend/combiner/metrics_query_range_test.go @@ -0,0 +1,123 @@ +package combiner + +import ( + "math" + "math/rand/v2" + "testing" + "time" + + "github.com/grafana/tempo/pkg/tempopb" + "github.com/grafana/tempo/pkg/traceql" + "github.com/stretchr/testify/require" +) + +func TestAttachExemplars(t *testing.T) { + start := uint64(10 * time.Second) + end := uint64(20 * time.Second) + step := traceql.DefaultQueryRangeStep(start, end) + + req := &tempopb.QueryRangeRequest{ + Start: start, + End: end, + Step: step, + } + + tcs := []struct { + name string + include func(i int) bool + }{ + { + name: "include all", + include: func(_ int) bool { return true }, + }, + { + name: "include none", + include: func(_ int) bool { return false }, + }, + { + name: "include every other", + include: func(i int) bool { return i%2 == 0 }, + }, + { + name: "include rando", + include: func(_ int) bool { return rand.Int()%2 == 0 }, + }, + } + + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + resp, expectedSeries := buildSeriesForExemplarTest(start, end, step, tc.include) + + attachExemplars(req, resp) + require.Equal(t, expectedSeries, resp.Series) + }) + } +} + +func BenchmarkAttachExemplars(b *testing.B) { + start := uint64(1 * time.Second) + end := uint64(10000 * time.Second) + step := uint64(time.Second) + + req := &tempopb.QueryRangeRequest{ + Start: start, + End: end, + Step: step, + } + + resp, _ := buildSeriesForExemplarTest(start, end, step, func(_ int) bool { return true }) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + attachExemplars(req, resp) + } +} + +func buildSeriesForExemplarTest(start, end, step uint64, include func(i int) bool) (*tempopb.QueryRangeResponse, []*tempopb.TimeSeries) { + resp := &tempopb.QueryRangeResponse{ + Series: []*tempopb.TimeSeries{ + {}, + }, + } + + expectedSeries := []*tempopb.TimeSeries{ + {}, + } + + // populate series and expected series based on step + idx := 0 + for i := start; i < end; i += step { + idx++ + tsMS := int64(i / uint64(time.Millisecond)) + val := float64(idx) + + sample := tempopb.Sample{ + TimestampMs: tsMS, + Value: val, + } + nanExemplar := tempopb.Exemplar{ + TimestampMs: tsMS, + Value: math.NaN(), + } + valExamplar := tempopb.Exemplar{ + TimestampMs: tsMS, + Value: val, + } + + includeExemplar := include(idx) + + // copy the sample and nan exemplar into the response. the nan exemplar should be overwritten + resp.Series[0].Samples = append(resp.Series[0].Samples, sample) + if includeExemplar { + resp.Series[0].Exemplars = append(resp.Series[0].Exemplars, nanExemplar) + } + + // copy the sample and val exemplar into the expected response + expectedSeries[0].Samples = append(expectedSeries[0].Samples, sample) + if includeExemplar { + expectedSeries[0].Exemplars = append(expectedSeries[0].Exemplars, valExamplar) + } + } + + return resp, expectedSeries +} diff --git a/modules/frontend/combiner/search_tag_values.go b/modules/frontend/combiner/search_tag_values.go index 253033dec33..23b68e8351b 100644 --- a/modules/frontend/combiner/search_tag_values.go +++ b/modules/frontend/combiner/search_tag_values.go @@ -12,9 +12,9 @@ var ( _ GRPCCombiner[*tempopb.SearchTagValuesV2Response] = (*genericCombiner[*tempopb.SearchTagValuesV2Response])(nil) ) -func NewSearchTagValues(limitBytes int) Combiner { +func NewSearchTagValues(maxDataBytes int, maxTagsValues uint32, staleValueThreshold uint32) Combiner { // Distinct collector with no limit - d := collector.NewDistinctStringWithDiff(limitBytes) + d := collector.NewDistinctStringWithDiff(maxDataBytes, maxTagsValues, staleValueThreshold) inspectedBytes := atomic.NewUint64(0) c := &genericCombiner[*tempopb.SearchTagValuesResponse]{ @@ -56,13 +56,13 @@ func NewSearchTagValues(limitBytes int) Combiner { return c } -func NewTypedSearchTagValues(limitBytes int) GRPCCombiner[*tempopb.SearchTagValuesResponse] { - return NewSearchTagValues(limitBytes).(GRPCCombiner[*tempopb.SearchTagValuesResponse]) +func NewTypedSearchTagValues(maxDataBytes int, maxTagsValues uint32, staleValueThreshold uint32) GRPCCombiner[*tempopb.SearchTagValuesResponse] { + return NewSearchTagValues(maxDataBytes, maxTagsValues, staleValueThreshold).(GRPCCombiner[*tempopb.SearchTagValuesResponse]) } -func NewSearchTagValuesV2(limitBytes int) Combiner { +func NewSearchTagValuesV2(maxDataBytes int, maxTagsValues uint32, staleValueThreshold uint32) Combiner { // Distinct collector with no limit and diff enabled - d := collector.NewDistinctValueWithDiff(limitBytes, func(tv tempopb.TagValue) int { return len(tv.Type) + len(tv.Value) }) + d := collector.NewDistinctValueWithDiff(maxDataBytes, maxTagsValues, staleValueThreshold, func(tv tempopb.TagValue) int { return len(tv.Type) + len(tv.Value) }) inspectedBytes := atomic.NewUint64(0) c := &genericCombiner[*tempopb.SearchTagValuesV2Response]{ @@ -113,6 +113,6 @@ func NewSearchTagValuesV2(limitBytes int) Combiner { return c } -func NewTypedSearchTagValuesV2(limitBytes int) GRPCCombiner[*tempopb.SearchTagValuesV2Response] { - return NewSearchTagValuesV2(limitBytes).(GRPCCombiner[*tempopb.SearchTagValuesV2Response]) +func NewTypedSearchTagValuesV2(maxDataBytes int, maxTagsValues uint32, staleValueThreshold uint32) GRPCCombiner[*tempopb.SearchTagValuesV2Response] { + return NewSearchTagValuesV2(maxDataBytes, maxTagsValues, staleValueThreshold).(GRPCCombiner[*tempopb.SearchTagValuesV2Response]) } diff --git a/modules/frontend/combiner/search_tags.go b/modules/frontend/combiner/search_tags.go index 224a5706b1a..8653536dc2d 100644 --- a/modules/frontend/combiner/search_tags.go +++ b/modules/frontend/combiner/search_tags.go @@ -12,8 +12,8 @@ var ( _ GRPCCombiner[*tempopb.SearchTagsV2Response] = (*genericCombiner[*tempopb.SearchTagsV2Response])(nil) ) -func NewSearchTags(limitBytes int) Combiner { - d := collector.NewDistinctStringWithDiff(limitBytes) +func NewSearchTags(maxDataBytes int, maxTagsPerScope uint32, staleValueThreshold uint32) Combiner { + d := collector.NewDistinctStringWithDiff(maxDataBytes, maxTagsPerScope, staleValueThreshold) inspectedBytes := atomic.NewUint64(0) c := &genericCombiner[*tempopb.SearchTagsResponse]{ @@ -56,13 +56,13 @@ func NewSearchTags(limitBytes int) Combiner { return c } -func NewTypedSearchTags(limitBytes int) GRPCCombiner[*tempopb.SearchTagsResponse] { - return NewSearchTags(limitBytes).(GRPCCombiner[*tempopb.SearchTagsResponse]) +func NewTypedSearchTags(maxDataBytes int, maxTagsPerScope uint32, staleValueThreshold uint32) GRPCCombiner[*tempopb.SearchTagsResponse] { + return NewSearchTags(maxDataBytes, maxTagsPerScope, staleValueThreshold).(GRPCCombiner[*tempopb.SearchTagsResponse]) } -func NewSearchTagsV2(limitBytes int) Combiner { +func NewSearchTagsV2(maxDataBytes int, maxTagsPerScope uint32, staleValueThreshold uint32) Combiner { // Distinct collector map to collect scopes and scope values - distinctValues := collector.NewScopedDistinctStringWithDiff(limitBytes) + distinctValues := collector.NewScopedDistinctStringWithDiff(maxDataBytes, maxTagsPerScope, staleValueThreshold) inspectedBytes := atomic.NewUint64(0) c := &genericCombiner[*tempopb.SearchTagsV2Response]{ @@ -121,6 +121,6 @@ func NewSearchTagsV2(limitBytes int) Combiner { return c } -func NewTypedSearchTagsV2(limitBytes int) GRPCCombiner[*tempopb.SearchTagsV2Response] { - return NewSearchTagsV2(limitBytes).(GRPCCombiner[*tempopb.SearchTagsV2Response]) +func NewTypedSearchTagsV2(maxDataBytes int, maxTagsPerScope uint32, staleValueThreshold uint32) GRPCCombiner[*tempopb.SearchTagsV2Response] { + return NewSearchTagsV2(maxDataBytes, maxTagsPerScope, staleValueThreshold).(GRPCCombiner[*tempopb.SearchTagsV2Response]) } diff --git a/modules/frontend/combiner/search_tags_test.go b/modules/frontend/combiner/search_tags_test.go index 3fed17bc8f3..cbac1dd118c 100644 --- a/modules/frontend/combiner/search_tags_test.go +++ b/modules/frontend/combiner/search_tags_test.go @@ -13,8 +13,10 @@ import ( func TestTagsCombiner(t *testing.T) { tests := []struct { name string - factory func(int) Combiner - limit int + factory func(int, uint32, uint32) Combiner + limitBytes int + maxTagsValues uint32 + maxCacheHits uint32 result1 proto.Message result2 proto.Message expectedResult proto.Message @@ -31,7 +33,7 @@ func TestTagsCombiner(t *testing.T) { expectedResult: &tempopb.SearchTagsResponse{TagNames: []string{"tag1", "tag2", "tag3"}, Metrics: &tempopb.MetadataMetrics{}}, actualResult: &tempopb.SearchTagsResponse{}, sort: func(m proto.Message) { sort.Strings(m.(*tempopb.SearchTagsResponse).TagNames) }, - limit: 100, + limitBytes: 100, }, { name: "SearchTagsV2", @@ -49,7 +51,7 @@ func TestTagsCombiner(t *testing.T) { return scopes[i].Name < scopes[j].Name }) }, - limit: 100, + limitBytes: 100, }, { name: "SearchTagValues", @@ -59,7 +61,7 @@ func TestTagsCombiner(t *testing.T) { expectedResult: &tempopb.SearchTagValuesResponse{TagValues: []string{"tag1", "tag2", "tag3"}, Metrics: &tempopb.MetadataMetrics{}}, actualResult: &tempopb.SearchTagValuesResponse{}, sort: func(m proto.Message) { sort.Strings(m.(*tempopb.SearchTagValuesResponse).TagValues) }, - limit: 100, + limitBytes: 100, }, { name: "SearchTagValuesV2", @@ -73,7 +75,7 @@ func TestTagsCombiner(t *testing.T) { return m.(*tempopb.SearchTagValuesV2Response).TagValues[i].Value < m.(*tempopb.SearchTagValuesV2Response).TagValues[j].Value }) }, - limit: 100, + limitBytes: 100, }, // limits { @@ -85,7 +87,7 @@ func TestTagsCombiner(t *testing.T) { actualResult: &tempopb.SearchTagsResponse{}, sort: func(m proto.Message) { sort.Strings(m.(*tempopb.SearchTagsResponse).TagNames) }, expectedShouldQuit: true, - limit: 5, + limitBytes: 5, }, { name: "SearchTagsV2 - limited", @@ -104,7 +106,7 @@ func TestTagsCombiner(t *testing.T) { }) }, expectedShouldQuit: true, - limit: 2, + limitBytes: 2, }, { name: "SearchTagValues - limited", @@ -115,7 +117,7 @@ func TestTagsCombiner(t *testing.T) { actualResult: &tempopb.SearchTagValuesResponse{}, sort: func(m proto.Message) { sort.Strings(m.(*tempopb.SearchTagValuesResponse).TagValues) }, expectedShouldQuit: true, - limit: 5, + limitBytes: 5, }, { name: "SearchTagValuesV2 - limited", @@ -130,7 +132,22 @@ func TestTagsCombiner(t *testing.T) { }) }, expectedShouldQuit: true, - limit: 10, + limitBytes: 10, + }, + { + name: "SearchTagValuesV2 - max values limited", + factory: NewSearchTagValuesV2, + result1: &tempopb.SearchTagValuesV2Response{TagValues: []*tempopb.TagValue{{Value: "v1", Type: "string"}}}, + result2: &tempopb.SearchTagValuesV2Response{TagValues: []*tempopb.TagValue{{Value: "v2", Type: "string"}, {Value: "v3", Type: "string"}}}, + expectedResult: &tempopb.SearchTagValuesV2Response{TagValues: []*tempopb.TagValue{{Value: "v1", Type: "string"}}, Metrics: &tempopb.MetadataMetrics{}}, + actualResult: &tempopb.SearchTagValuesV2Response{}, + sort: func(m proto.Message) { + sort.Slice(m.(*tempopb.SearchTagValuesV2Response).TagValues, func(i, j int) bool { + return m.(*tempopb.SearchTagValuesV2Response).TagValues[i].Value < m.(*tempopb.SearchTagValuesV2Response).TagValues[j].Value + }) + }, + expectedShouldQuit: true, + maxTagsValues: 1, }, // with metrics { @@ -141,7 +158,7 @@ func TestTagsCombiner(t *testing.T) { expectedResult: &tempopb.SearchTagsResponse{TagNames: []string{"tag1", "tag2", "tag3"}, Metrics: &tempopb.MetadataMetrics{InspectedBytes: 2}}, actualResult: &tempopb.SearchTagsResponse{}, sort: func(m proto.Message) { sort.Strings(m.(*tempopb.SearchTagsResponse).TagNames) }, - limit: 100, + limitBytes: 100, }, { name: "SearchTagsV2 - metrics", @@ -159,7 +176,7 @@ func TestTagsCombiner(t *testing.T) { return scopes[i].Name < scopes[j].Name }) }, - limit: 100, + limitBytes: 100, }, { name: "SearchTagValues - metrics", @@ -169,7 +186,7 @@ func TestTagsCombiner(t *testing.T) { expectedResult: &tempopb.SearchTagValuesResponse{TagValues: []string{"tag1", "tag2", "tag3"}, Metrics: &tempopb.MetadataMetrics{InspectedBytes: 2}}, actualResult: &tempopb.SearchTagValuesResponse{}, sort: func(m proto.Message) { sort.Strings(m.(*tempopb.SearchTagValuesResponse).TagValues) }, - limit: 100, + limitBytes: 100, }, { name: "SearchTagValuesV2 - metrics", @@ -183,12 +200,12 @@ func TestTagsCombiner(t *testing.T) { return m.(*tempopb.SearchTagValuesV2Response).TagValues[i].Value < m.(*tempopb.SearchTagValuesV2Response).TagValues[j].Value }) }, - limit: 100, + limitBytes: 100, }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - combiner := tc.factory(tc.limit) + combiner := tc.factory(tc.limitBytes, tc.maxTagsValues, tc.maxCacheHits) err := combiner.AddResponse(toHTTPResponse(t, tc.result1, 200)) assert.NoError(t, err) @@ -228,7 +245,7 @@ func metrics(message proto.Message) *tempopb.MetadataMetrics { } func TestTagsGRPCCombiner(t *testing.T) { - c := NewTypedSearchTags(0) + c := NewTypedSearchTags(0, 0, 0) res1 := &tempopb.SearchTagsResponse{TagNames: []string{"tag1"}, Metrics: &tempopb.MetadataMetrics{InspectedBytes: 1}} res2 := &tempopb.SearchTagsResponse{TagNames: []string{"tag1", "tag2"}, Metrics: &tempopb.MetadataMetrics{InspectedBytes: 1}} diff1 := &tempopb.SearchTagsResponse{TagNames: []string{"tag1"}, Metrics: &tempopb.MetadataMetrics{InspectedBytes: 1}} @@ -238,7 +255,7 @@ func TestTagsGRPCCombiner(t *testing.T) { } func TestTagsV2GRPCCombiner(t *testing.T) { - c := NewTypedSearchTagsV2(0) + c := NewTypedSearchTagsV2(0, 0, 0) res1 := &tempopb.SearchTagsV2Response{Scopes: []*tempopb.SearchTagsV2Scope{{Name: "scope1", Tags: []string{"tag1"}}}, Metrics: &tempopb.MetadataMetrics{InspectedBytes: 1}} res2 := &tempopb.SearchTagsV2Response{Scopes: []*tempopb.SearchTagsV2Scope{{Name: "scope1", Tags: []string{"tag1", "tag2"}}, {Name: "scope2", Tags: []string{"tag3"}}}, Metrics: &tempopb.MetadataMetrics{InspectedBytes: 1}} diff1 := &tempopb.SearchTagsV2Response{Scopes: []*tempopb.SearchTagsV2Scope{{Name: "scope1", Tags: []string{"tag1"}}}, Metrics: &tempopb.MetadataMetrics{InspectedBytes: 1}} @@ -255,7 +272,7 @@ func TestTagsV2GRPCCombiner(t *testing.T) { } func TestTagValuesGRPCCombiner(t *testing.T) { - c := NewTypedSearchTagValues(0) + c := NewTypedSearchTagValues(0, 0, 0) res1 := &tempopb.SearchTagValuesResponse{TagValues: []string{"tag1"}, Metrics: &tempopb.MetadataMetrics{InspectedBytes: 1}} res2 := &tempopb.SearchTagValuesResponse{TagValues: []string{"tag1", "tag2"}, Metrics: &tempopb.MetadataMetrics{InspectedBytes: 1}} diff1 := &tempopb.SearchTagValuesResponse{TagValues: []string{"tag1"}, Metrics: &tempopb.MetadataMetrics{InspectedBytes: 1}} @@ -265,7 +282,7 @@ func TestTagValuesGRPCCombiner(t *testing.T) { } func TestTagValuesV2GRPCCombiner(t *testing.T) { - c := NewTypedSearchTagValuesV2(0) + c := NewTypedSearchTagValuesV2(0, 0, 0) res1 := &tempopb.SearchTagValuesV2Response{TagValues: []*tempopb.TagValue{{Value: "v1", Type: "string"}}, Metrics: &tempopb.MetadataMetrics{InspectedBytes: 1}} res2 := &tempopb.SearchTagValuesV2Response{TagValues: []*tempopb.TagValue{{Value: "v1", Type: "string"}, {Value: "v2", Type: "string"}}, Metrics: &tempopb.MetadataMetrics{InspectedBytes: 1}} diff1 := &tempopb.SearchTagValuesV2Response{TagValues: []*tempopb.TagValue{{Value: "v1", Type: "string"}}, Metrics: &tempopb.MetadataMetrics{InspectedBytes: 1}} diff --git a/modules/frontend/config.go b/modules/frontend/config.go index f49534042c2..a6ac3d9f074 100644 --- a/modules/frontend/config.go +++ b/modules/frontend/config.go @@ -32,6 +32,9 @@ type Config struct { // A list of regexes for black listing requests, these will apply for every request regardless the endpoint URLDenyList []string `yaml:"url_deny_list,omitempty"` + // Maximum allowed size of the raw TraceQL Query expression in bytes + MaxQueryExpressionSizeBytes int `yaml:"max_query_expression_size_bytes,omitempty"` + // A list of headers allowed through the HTTP pipeline. Everything else will be stripped. AllowedHeaders []string `yaml:"-"` } @@ -79,6 +82,7 @@ func (cfg *Config) RegisterFlagsAndApplyDefaults(string, *flag.FlagSet) { ConcurrentRequests: defaultConcurrentRequests, TargetBytesPerRequest: defaultTargetBytesPerRequest, IngesterShards: 3, + MaxSpansPerSpanSet: 100, }, SLO: slo, } @@ -104,6 +108,8 @@ func (cfg *Config) RegisterFlagsAndApplyDefaults(string, *flag.FlagSet) { MaxTraceQLConditions: 4, } + // set default max query size to 128 KiB, queries larger than this will be rejected + cfg.MaxQueryExpressionSizeBytes = 128 * 1024 // enable multi tenant queries by default cfg.MultiTenantQueriesEnabled = true } diff --git a/modules/frontend/frontend.go b/modules/frontend/frontend.go index edd1ce9b540..f4c6f27f5ab 100644 --- a/modules/frontend/frontend.go +++ b/modules/frontend/frontend.go @@ -95,7 +95,7 @@ func New(cfg Config, next pipeline.RoundTripper, o overrides.Interface, reader t statusCodeWare := pipeline.NewStatusCodeAdjustWare() traceIDStatusCodeWare := pipeline.NewStatusCodeAdjustWareWithAllowedCode(http.StatusNotFound) urlDenyListWare := pipeline.NewURLDenyListWare(cfg.URLDenyList) - queryValidatorWare := pipeline.NewQueryValidatorWare() + queryValidatorWare := pipeline.NewQueryValidatorWare(cfg.MaxQueryExpressionSizeBytes) headerStripWare := pipeline.NewStripHeadersWare(cfg.AllowedHeaders) tracePipeline := pipeline.Build( diff --git a/modules/frontend/grpc_util.go b/modules/frontend/grpc_util.go new file mode 100644 index 00000000000..87db54a40de --- /dev/null +++ b/modules/frontend/grpc_util.go @@ -0,0 +1,31 @@ +package frontend + +import ( + "context" + "net/http" + + "google.golang.org/grpc/metadata" +) + +var copyHeaders = []string{ + "Authorization", + "X-Scope-OrgID", +} + +func headersFromGrpcContext(ctx context.Context) (hs http.Header) { + hs = http.Header{} + + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + // Tests may not have metadata so skip + return + } + + for _, h := range copyHeaders { + if v := md.Get(h); len(v) > 0 { + hs[h] = v + } + } + + return +} diff --git a/modules/frontend/metrics_query_handler.go b/modules/frontend/metrics_query_handler.go index 46a83816039..5794c7fdbe9 100644 --- a/modules/frontend/metrics_query_handler.go +++ b/modules/frontend/metrics_query_handler.go @@ -32,6 +32,8 @@ func newQueryInstantStreamingGRPCHandler(cfg Config, next pipeline.AsyncRoundTri return err } + headers := headersFromGrpcContext(ctx) + // -------------------------------------------------- // Rewrite into a query_range request. // -------------------------------------------------- @@ -43,7 +45,7 @@ func newQueryInstantStreamingGRPCHandler(cfg Config, next pipeline.AsyncRoundTri } httpReq := api.BuildQueryRangeRequest(&http.Request{ URL: &url.URL{Path: downstreamPath}, - Header: http.Header{}, + Header: headers, Body: io.NopCloser(bytes.NewReader([]byte{})), }, qr, "") // dedicated cols are never passed from the caller httpReq = httpReq.Clone(ctx) diff --git a/modules/frontend/metrics_query_range_handler.go b/modules/frontend/metrics_query_range_handler.go index 54ffc29ff67..34100e4bd1e 100644 --- a/modules/frontend/metrics_query_range_handler.go +++ b/modules/frontend/metrics_query_range_handler.go @@ -17,6 +17,7 @@ import ( "github.com/grafana/tempo/pkg/api" "github.com/grafana/tempo/pkg/tempopb" + "github.com/grafana/tempo/pkg/traceql" ) // newQueryRangeStreamingGRPCHandler returns a handler that streams results from the HTTP handler @@ -25,13 +26,21 @@ func newQueryRangeStreamingGRPCHandler(cfg Config, next pipeline.AsyncRoundTripp downstreamPath := path.Join(apiPrefix, api.PathMetricsQueryRange) return func(req *tempopb.QueryRangeRequest, srv tempopb.StreamingQuerier_MetricsQueryRangeServer) error { + ctx := srv.Context() + + headers := headersFromGrpcContext(ctx) + + // default step if not set + if req.Step == 0 { + req.Step = traceql.DefaultQueryRangeStep(req.Start, req.End) + } + httpReq := api.BuildQueryRangeRequest(&http.Request{ URL: &url.URL{Path: downstreamPath}, - Header: http.Header{}, + Header: headers, Body: io.NopCloser(bytes.NewReader([]byte{})), }, req, "") // dedicated cols are never passed from the caller - ctx := srv.Context() httpReq = httpReq.WithContext(ctx) tenant, _ := user.ExtractOrgID(ctx) start := time.Now() diff --git a/modules/frontend/metrics_query_range_sharder.go b/modules/frontend/metrics_query_range_sharder.go index 570764f2f32..5463b3bd48f 100644 --- a/modules/frontend/metrics_query_range_sharder.go +++ b/modules/frontend/metrics_query_range_sharder.go @@ -79,6 +79,13 @@ func (s queryRangeSharder) RoundTrip(pipelineRequest pipeline.Request) (pipeline return pipeline.NewBadRequest(err), nil } + if expr.IsNoop() { + // Empty response + ch := make(chan pipeline.Request, 2) + close(ch) + return pipeline.NewAsyncSharderChan(ctx, s.cfg.ConcurrentRequests, ch, nil, s.next), nil + } + tenantID, err := user.ExtractOrgID(ctx) if err != nil { return pipeline.NewBadRequest(err), nil diff --git a/modules/frontend/pipeline/async_query_validator_middleware.go b/modules/frontend/pipeline/async_query_validator_middleware.go index 6aa2fd3f0fa..c0a3f7d0054 100644 --- a/modules/frontend/pipeline/async_query_validator_middleware.go +++ b/modules/frontend/pipeline/async_query_validator_middleware.go @@ -9,13 +9,15 @@ import ( ) type queryValidatorWare struct { - next AsyncRoundTripper[combiner.PipelineResponse] + next AsyncRoundTripper[combiner.PipelineResponse] + maxQuerySizeBytes int } -func NewQueryValidatorWare() AsyncMiddleware[combiner.PipelineResponse] { +func NewQueryValidatorWare(maxQuerySizeBytes int) AsyncMiddleware[combiner.PipelineResponse] { return AsyncMiddlewareFunc[combiner.PipelineResponse](func(next AsyncRoundTripper[combiner.PipelineResponse]) AsyncRoundTripper[combiner.PipelineResponse] { return &queryValidatorWare{ - next: next, + next: next, + maxQuerySizeBytes: maxQuerySizeBytes, } }) } @@ -38,6 +40,12 @@ func (c queryValidatorWare) validateTraceQLQuery(queryParams url.Values) error { traceQLQuery = queryParams.Get("query") } if traceQLQuery != "" { + // reject query if the query expression size exceeds the maximum allowed size. + // reject huge queries before we parse them, this avoids parsing huge queries. + if len(traceQLQuery) > c.maxQuerySizeBytes { + return fmt.Errorf("TraceQL expression exceeds the configured maximum size of %d bytes, reduce the query expression size or contact your system administrator", c.maxQuerySizeBytes) + } + expr, err := traceql.Parse(traceQLQuery) if err == nil { err = traceql.Validate(expr) diff --git a/modules/frontend/pipeline/async_query_validator_middleware_test.go b/modules/frontend/pipeline/async_query_validator_middleware_test.go index be711e8f4ec..626fdf89524 100644 --- a/modules/frontend/pipeline/async_query_validator_middleware_test.go +++ b/modules/frontend/pipeline/async_query_validator_middleware_test.go @@ -8,45 +8,80 @@ import ( "testing" "github.com/grafana/tempo/modules/frontend/combiner" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) var nextFunc = AsyncRoundTripperFunc[combiner.PipelineResponse](func(_ Request) (Responses[combiner.PipelineResponse], error) { return NewHTTPToAsyncResponse(&http.Response{ StatusCode: 200, - Body: io.NopCloser(bytes.NewReader([]byte{})), + Body: io.NopCloser(bytes.NewReader([]byte("foo"))), }), nil }) func TestQueryValidator(t *testing.T) { - roundTrip := NewQueryValidatorWare().Wrap(nextFunc) - statusCode := doRequest(t, "http://localhost:8080/api/search", roundTrip) - assert.Equal(t, 200, statusCode) -} + tests := []struct { + name string + url string + statusCode int + maxQuerySizeBytes int + }{ + { + name: "No Query", + url: "http://localhost:8080/api/search", + statusCode: 200, + maxQuerySizeBytes: 1000, + }, + { + name: "Empty query value", + url: "http://localhost:8080/api/search&q=", + statusCode: 200, + maxQuerySizeBytes: 1000, + }, + { + name: "Valid query", + url: "http://localhost:8080/api/search&q={}", + statusCode: 200, + maxQuerySizeBytes: 1000, + }, + { + name: "Invalid TraceQL query", + url: "http://localhost:8080/api/search?q={. hi}", + statusCode: 400, + maxQuerySizeBytes: 1000, + }, + { + name: "Invalid TraceQL query regex", + url: "http://localhost:8080/api/search?query={span.a =~ \"[\"}", + statusCode: 400, + maxQuerySizeBytes: 1000, + }, + { + name: "TraceQL query smaller then max size", + url: "http://localhost:8080/api/search?q={ resource.service.name=\"service\" }", + statusCode: 200, + maxQuerySizeBytes: 1000, + }, + { + name: "TraceQL query bigger then max size", + url: "http://localhost:8080/api/search?q={ resource.service.name=\"service\" }", + statusCode: 400, + maxQuerySizeBytes: 10, + }, + } -func TestQueryValidatorForAValidQuery(t *testing.T) { - roundTrip := NewQueryValidatorWare().Wrap(nextFunc) - statusCode := doRequest(t, "http://localhost:8080/api/search&q={}", roundTrip) - assert.Equal(t, 200, statusCode) -} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + rt := NewQueryValidatorWare(tt.maxQuerySizeBytes).Wrap(nextFunc) + req, _ := http.NewRequest(http.MethodGet, tt.url, nil) + resp, _ := rt.RoundTrip(NewHTTPRequest(req)) -func TestQueryValidatorForAnInvalidTraceQLQuery(t *testing.T) { - roundTrip := NewQueryValidatorWare().Wrap(nextFunc) - statusCode := doRequest(t, "http://localhost:8080/api/search?q={. hi}", roundTrip) - assert.Equal(t, 400, statusCode) -} - -func TestQueryValidatorForAnInvalidTraceQlQueryRegex(t *testing.T) { - roundTrip := NewQueryValidatorWare().Wrap(nextFunc) - statusCode := doRequest(t, "http://localhost:8080/api/search?query={span.a =~ \"[\"}", roundTrip) - assert.Equal(t, 400, statusCode) -} + httpResponse, _, err := resp.Next(context.Background()) + require.NoError(t, err) + body, err := io.ReadAll(httpResponse.HTTPResponse().Body) + require.NoError(t, err) + require.NotEmpty(t, body) -func doRequest(t *testing.T, url string, rt AsyncRoundTripper[combiner.PipelineResponse]) int { - req, _ := http.NewRequest(http.MethodGet, url, nil) - resp, _ := rt.RoundTrip(NewHTTPRequest(req)) - httpResponse, _, err := resp.Next(context.Background()) - require.NoError(t, err) - return httpResponse.HTTPResponse().StatusCode + require.Equal(t, tt.statusCode, httpResponse.HTTPResponse().StatusCode) + }) + } } diff --git a/modules/frontend/search_handlers.go b/modules/frontend/search_handlers.go index 1675271a27f..c6da54d6608 100644 --- a/modules/frontend/search_handlers.go +++ b/modules/frontend/search_handlers.go @@ -28,9 +28,13 @@ func newSearchStreamingGRPCHandler(cfg Config, next pipeline.AsyncRoundTripper[c downstreamPath := path.Join(apiPrefix, api.PathSearch) return func(req *tempopb.SearchRequest, srv tempopb.StreamingQuerier_SearchServer) error { + ctx := srv.Context() + + headers := headersFromGrpcContext(ctx) + httpReq, err := api.BuildSearchRequest(&http.Request{ URL: &url.URL{Path: downstreamPath}, - Header: http.Header{}, + Header: headers, Body: io.NopCloser(bytes.NewReader([]byte{})), }, req) if err != nil { @@ -38,7 +42,6 @@ func newSearchStreamingGRPCHandler(cfg Config, next pipeline.AsyncRoundTripper[c return status.Errorf(codes.InvalidArgument, "build search request failed: %s", err.Error()) } - ctx := srv.Context() httpReq = httpReq.WithContext(ctx) tenant, _ := user.ExtractOrgID(ctx) start := time.Now() diff --git a/modules/frontend/search_handlers_test.go b/modules/frontend/search_handlers_test.go index 56c719237d6..ad4ea908207 100644 --- a/modules/frontend/search_handlers_test.go +++ b/modules/frontend/search_handlers_test.go @@ -314,7 +314,8 @@ func TestSearchLimitHonored(t *testing.T) { } }, }, nil, &Config{ - MultiTenantQueriesEnabled: true, + MultiTenantQueriesEnabled: true, + MaxQueryExpressionSizeBytes: 10000, TraceByID: TraceByIDConfig{ QueryShards: minQueryShards, SLO: testSLOcfg, @@ -768,7 +769,8 @@ func frontendWithSettings(t require.TestingT, next pipeline.RoundTripper, rdr te } if cfg == nil { cfg = &Config{ - MultiTenantQueriesEnabled: true, + MultiTenantQueriesEnabled: true, + MaxQueryExpressionSizeBytes: 100000, TraceByID: TraceByIDConfig{ QueryShards: minQueryShards, SLO: testSLOcfg, diff --git a/modules/frontend/search_sharder.go b/modules/frontend/search_sharder.go index ddad508d30f..2500a5c9cf0 100644 --- a/modules/frontend/search_sharder.go +++ b/modules/frontend/search_sharder.go @@ -35,6 +35,7 @@ type SearchSharderConfig struct { QueryBackendAfter time.Duration `yaml:"query_backend_after,omitempty"` QueryIngestersUntil time.Duration `yaml:"query_ingesters_until,omitempty"` IngesterShards int `yaml:"ingester_shards,omitempty"` + MaxSpansPerSpanSet uint32 `yaml:"max_spans_per_span_set,omitempty"` } type asyncSearchSharder struct { @@ -91,6 +92,10 @@ func (s asyncSearchSharder) RoundTrip(pipelineRequest pipeline.Request) (pipelin return pipeline.NewBadRequest(fmt.Errorf("range specified by start and end exceeds %s. received start=%d end=%d", maxDuration, searchReq.Start, searchReq.End)), nil } + if s.cfg.MaxSpansPerSpanSet != 0 && searchReq.SpansPerSpanSet > s.cfg.MaxSpansPerSpanSet { + return pipeline.NewBadRequest(fmt.Errorf("spans per span set exceeds %d. received %d", s.cfg.MaxSpansPerSpanSet, searchReq.SpansPerSpanSet)), nil + } + // buffer of shards+1 allows us to insert ingestReq and metrics reqCh := make(chan pipeline.Request, s.cfg.IngesterShards+1) diff --git a/modules/frontend/search_sharder_test.go b/modules/frontend/search_sharder_test.go index 3a8730d083c..850f19b41a7 100644 --- a/modules/frontend/search_sharder_test.go +++ b/modules/frontend/search_sharder_test.go @@ -41,11 +41,11 @@ type mockReader struct { metas []*backend.BlockMeta } -func (m *mockReader) SearchTags(context.Context, *backend.BlockMeta, string, common.SearchOptions) (*tempopb.SearchTagsV2Response, error) { +func (m *mockReader) SearchTags(context.Context, *backend.BlockMeta, *tempopb.SearchTagsBlockRequest, common.SearchOptions) (*tempopb.SearchTagsV2Response, error) { return nil, nil } -func (m *mockReader) SearchTagValues(context.Context, *backend.BlockMeta, string, common.SearchOptions) (*tempopb.SearchTagValuesResponse, error) { +func (m *mockReader) SearchTagValues(context.Context, *backend.BlockMeta, *tempopb.SearchTagValuesBlockRequest, common.SearchOptions) (*tempopb.SearchTagValuesResponse, error) { return nil, nil } @@ -717,6 +717,7 @@ func TestSearchSharderRoundTripBadRequest(t *testing.T) { ConcurrentRequests: defaultConcurrentRequests, TargetBytesPerRequest: defaultTargetBytesPerRequest, MaxDuration: 5 * time.Minute, + MaxSpansPerSpanSet: 100, }, log.NewNopLogger()) testRT := sharder.Wrap(next) @@ -731,6 +732,12 @@ func TestSearchSharderRoundTripBadRequest(t *testing.T) { resp, err = testRT.RoundTrip(pipeline.NewHTTPRequest(req)) testBadRequestFromResponses(t, resp, err, "range specified by start and end exceeds 5m0s. received start=1000 end=1500") + // spans per span set greater than maximum + req = httptest.NewRequest("GET", "/?spss=200", nil) + req = req.WithContext(user.InjectOrgID(req.Context(), "blerg")) + resp, err = testRT.RoundTrip(pipeline.NewHTTPRequest(req)) + testBadRequestFromResponses(t, resp, err, "spans per span set exceeds 100. received 200") + // bad request req = httptest.NewRequest("GET", "/?start=asdf&end=1500", nil) resp, err = testRT.RoundTrip(pipeline.NewHTTPRequest(req)) diff --git a/modules/frontend/tag_handlers.go b/modules/frontend/tag_handlers.go index 404009a48ca..a7c59af3388 100644 --- a/modules/frontend/tag_handlers.go +++ b/modules/frontend/tag_handlers.go @@ -47,8 +47,8 @@ func newTagsStreamingGRPCHandler(cfg Config, next pipeline.AsyncRoundTripper[com } var finalResponse *tempopb.SearchTagsResponse - comb := combiner.NewTypedSearchTags(o.MaxBytesPerTagValuesQuery(tenant)) - collector := pipeline.NewGRPCCollector[*tempopb.SearchTagsResponse](next, cfg.ResponseConsumers, comb, func(res *tempopb.SearchTagsResponse) error { + comb := combiner.NewTypedSearchTags(o.MaxBytesPerTagValuesQuery(tenant), req.MaxTagsPerScope, req.StaleValuesThreshold) + collector := pipeline.NewGRPCCollector(next, cfg.ResponseConsumers, comb, func(res *tempopb.SearchTagsResponse) error { finalResponse = res // to get the bytes processed for SLO calculations return srv.Send(res) }) @@ -80,8 +80,8 @@ func newTagsV2StreamingGRPCHandler(cfg Config, next pipeline.AsyncRoundTripper[c } var finalResponse *tempopb.SearchTagsV2Response - comb := combiner.NewTypedSearchTagsV2(o.MaxBytesPerTagValuesQuery(tenant)) - collector := pipeline.NewGRPCCollector[*tempopb.SearchTagsV2Response](next, cfg.ResponseConsumers, comb, func(res *tempopb.SearchTagsV2Response) error { + comb := combiner.NewTypedSearchTagsV2(o.MaxBytesPerTagValuesQuery(tenant), req.MaxTagsPerScope, req.StaleValuesThreshold) + collector := pipeline.NewGRPCCollector(next, cfg.ResponseConsumers, comb, func(res *tempopb.SearchTagsV2Response) error { finalResponse = res // to get the bytes processed for SLO calculations return srv.Send(res) }) @@ -117,8 +117,8 @@ func newTagValuesStreamingGRPCHandler(cfg Config, next pipeline.AsyncRoundTrippe } var finalResponse *tempopb.SearchTagValuesResponse - comb := combiner.NewTypedSearchTagValues(o.MaxBytesPerTagValuesQuery(tenant)) - collector := pipeline.NewGRPCCollector[*tempopb.SearchTagValuesResponse](next, cfg.ResponseConsumers, comb, func(res *tempopb.SearchTagValuesResponse) error { + comb := combiner.NewTypedSearchTagValues(o.MaxBytesPerTagValuesQuery(tenant), req.MaxTagValues, req.StaleValueThreshold) + collector := pipeline.NewGRPCCollector(next, cfg.ResponseConsumers, comb, func(res *tempopb.SearchTagValuesResponse) error { finalResponse = res // to get the bytes processed for SLO calculations return srv.Send(res) }) @@ -154,8 +154,8 @@ func newTagValuesV2StreamingGRPCHandler(cfg Config, next pipeline.AsyncRoundTrip } var finalResponse *tempopb.SearchTagValuesV2Response - comb := combiner.NewTypedSearchTagValuesV2(o.MaxBytesPerTagValuesQuery(tenant)) - collector := pipeline.NewGRPCCollector[*tempopb.SearchTagValuesV2Response](next, cfg.ResponseConsumers, comb, func(res *tempopb.SearchTagValuesV2Response) error { + comb := combiner.NewTypedSearchTagValuesV2(o.MaxBytesPerTagValuesQuery(tenant), req.MaxTagValues, req.StaleValueThreshold) + collector := pipeline.NewGRPCCollector(next, cfg.ResponseConsumers, comb, func(res *tempopb.SearchTagValuesV2Response) error { finalResponse = res // to get the bytes processed for SLO calculations return srv.Send(res) }) @@ -187,9 +187,9 @@ func newTagsHTTPHandler(cfg Config, next pipeline.AsyncRoundTripper[combiner.Pip return errResp, nil } - scope, _, rangeDur := parseParams(req) + scope, _, rangeDur, maxTagsPerScope, staleValueThreshold := parseParams(req) // build and use round tripper - comb := combiner.NewTypedSearchTags(o.MaxBytesPerTagValuesQuery(tenant)) + comb := combiner.NewTypedSearchTags(o.MaxBytesPerTagValuesQuery(tenant), maxTagsPerScope, staleValueThreshold) rt := pipeline.NewHTTPCollector(next, cfg.ResponseConsumers, comb) start := time.Now() logTagsRequest(logger, tenant, "SearchTags", scope, rangeDur) @@ -221,9 +221,9 @@ func newTagsV2HTTPHandler(cfg Config, next pipeline.AsyncRoundTripper[combiner.P return errResp, nil } - scope, _, rangeDur := parseParams(req) + scope, _, rangeDur, maxTagsPerScope, staleValueThreshold := parseParams(req) // build and use round tripper - comb := combiner.NewTypedSearchTagsV2(o.MaxBytesPerTagValuesQuery(tenant)) + comb := combiner.NewTypedSearchTagsV2(o.MaxBytesPerTagValuesQuery(tenant), maxTagsPerScope, staleValueThreshold) rt := pipeline.NewHTTPCollector(next, cfg.ResponseConsumers, comb) start := time.Now() logTagsRequest(logger, tenant, "SearchTagsV2", scope, rangeDur) @@ -255,11 +255,11 @@ func newTagValuesHTTPHandler(cfg Config, next pipeline.AsyncRoundTripper[combine return errResp, nil } - _, query, rangeDur := parseParams(req) + _, query, rangeDur, maxTagsValues, staleValueThreshold := parseParams(req) tagName := extractTagName(req.URL.Path, tagNameRegexV1) // build and use round tripper - comb := combiner.NewTypedSearchTagValues(o.MaxBytesPerTagValuesQuery(tenant)) + comb := combiner.NewTypedSearchTagValues(o.MaxBytesPerTagValuesQuery(tenant), maxTagsValues, staleValueThreshold) rt := pipeline.NewHTTPCollector(next, cfg.ResponseConsumers, comb) start := time.Now() logTagValuesRequest(logger, tenant, "SearchTagValues", tagName, query, rangeDur) @@ -291,11 +291,11 @@ func newTagValuesV2HTTPHandler(cfg Config, next pipeline.AsyncRoundTripper[combi return errResp, nil } - _, query, rangeDur := parseParams(req) + _, query, rangeDur, maxTagsValues, staleValueThreshold := parseParams(req) tagName := extractTagName(req.URL.Path, tagNameRegexV2) // build and use round tripper - comb := combiner.NewTypedSearchTagValuesV2(o.MaxBytesPerTagValuesQuery(tenant)) + comb := combiner.NewTypedSearchTagValuesV2(o.MaxBytesPerTagValuesQuery(tenant), maxTagsValues, staleValueThreshold) rt := pipeline.NewHTTPCollector(next, cfg.ResponseConsumers, comb) start := time.Now() logTagValuesRequest(logger, tenant, "SearchTagValuesV2", tagName, query, rangeDur) @@ -332,9 +332,11 @@ func extractTenantWithErrorResp(req *http.Request, logger log.Logger) (string, * } func buildTagsRequestAndExtractTenant(ctx context.Context, req *tempopb.SearchTagsRequest, downstreamPath string, logger log.Logger) (*http.Request, string, error) { + headers := headersFromGrpcContext(ctx) + httpReq, err := api.BuildSearchTagsRequest(&http.Request{ URL: &url.URL{Path: downstreamPath}, - Header: http.Header{}, + Header: headers, Body: io.NopCloser(bytes.NewReader([]byte{})), }, req) if err != nil { @@ -353,9 +355,11 @@ func buildTagsRequestAndExtractTenant(ctx context.Context, req *tempopb.SearchTa } func buildTagValuesRequestAndExtractTenant(ctx context.Context, req *tempopb.SearchTagValuesRequest, downstreamPath string, logger log.Logger) (*http.Request, string, error) { + headers := headersFromGrpcContext(ctx) + httpReq, err := api.BuildSearchTagValuesRequest(&http.Request{ URL: &url.URL{Path: downstreamPath}, - Header: http.Header{}, + Header: headers, Body: io.NopCloser(bytes.NewReader([]byte{})), }, req) if err != nil { @@ -427,21 +431,26 @@ func logTagValuesResult(logger log.Logger, tenantID, handler, tagName, query str // parseParams parses optional 'start', 'end', 'scope', and 'q' params from a http.Request // returns scope, query and duration (end - start). returns "", and 0 if these params are invalid or absent -func parseParams(req *http.Request) (string, string, uint32) { +func parseParams(req *http.Request) (scope string, q string, duration uint32, limit uint32, staleValueThreshold uint32) { query := req.URL.Query() - - scope := query.Get("scope") - q := query.Get("q") + scope = query.Get("scope") + q = query.Get("q") // ignore errors, we default to 0 as params are not always present. start, _ := strconv.ParseInt(query.Get("start"), 10, 64) end, _ := strconv.ParseInt(query.Get("end"), 10, 64) - - var duration int64 + maxItems, err := strconv.ParseUint(query.Get("limit"), 10, 32) + if err != nil { + maxItems = 0 + } + maxStaleValues, err := strconv.ParseUint(query.Get("maxStaleValues"), 10, 32) + if err != nil { + maxStaleValues = 0 + } // duration only makes sense if start and end are present and end is greater than start if start > 0 && end > 0 && end > start { - duration = end - start + duration = uint32(end - start) } - return scope, q, uint32(duration) + return scope, q, duration, uint32(maxItems), uint32(maxStaleValues) } // extractTagName extracts the tagName based on the provided regex pattern diff --git a/modules/frontend/tag_handlers_test.go b/modules/frontend/tag_handlers_test.go index 77915e55b72..658f5e91b2b 100644 --- a/modules/frontend/tag_handlers_test.go +++ b/modules/frontend/tag_handlers_test.go @@ -538,88 +538,148 @@ func TestSearchTagsV2AccessesCache(t *testing.T) { func TestParseParams(t *testing.T) { tests := []struct { - name string - queryParams map[string]string - expectedScope string - expectedQ string - expectedDuration uint32 + name string + queryParams map[string]string + expectedScope string + expectedQ string + expectedDuration uint32 + expectedLimit uint32 + expectedValueThreshold uint32 }{ { - name: "all params present", - queryParams: map[string]string{"start": "1723667082", "end": "1723839882", "scope": "resource", "q": "some_query"}, - expectedScope: "resource", - expectedQ: "some_query", - expectedDuration: 172800, + name: "all params present", + queryParams: map[string]string{"start": "1723667082", "end": "1723839882", "scope": "resource", "q": "some_query"}, + expectedScope: "resource", + expectedQ: "some_query", + expectedDuration: 172800, + expectedLimit: 0, + expectedValueThreshold: 0, }, { - name: "missing start", - queryParams: map[string]string{"end": "1723839882", "scope": "resource"}, - expectedScope: "resource", - expectedQ: "", - expectedDuration: 0, + name: "missing start", + queryParams: map[string]string{"end": "1723839882", "scope": "resource"}, + expectedScope: "resource", + expectedQ: "", + expectedDuration: 0, + expectedLimit: 0, + expectedValueThreshold: 0, }, { - name: "missing end", - queryParams: map[string]string{"start": "1723667082", "scope": "resource"}, - expectedScope: "resource", - expectedQ: "", - expectedDuration: 0, + name: "missing end", + queryParams: map[string]string{"start": "1723667082", "scope": "resource"}, + expectedScope: "resource", + expectedQ: "", + expectedDuration: 0, + expectedLimit: 0, + expectedValueThreshold: 0, }, { - name: "missing scope", - queryParams: map[string]string{"start": "1723667082", "end": "1723839882"}, - expectedScope: "", - expectedQ: "", - expectedDuration: 172800, + name: "missing scope", + queryParams: map[string]string{"start": "1723667082", "end": "1723839882"}, + expectedScope: "", + expectedQ: "", + expectedDuration: 172800, + expectedLimit: 0, + expectedValueThreshold: 0, }, { - name: "missing q", - queryParams: map[string]string{"start": "1723667082", "end": "1723839882", "scope": "resource"}, - expectedScope: "resource", - expectedQ: "", - expectedDuration: 172800, + name: "missing q", + queryParams: map[string]string{"start": "1723667082", "end": "1723839882", "scope": "resource"}, + expectedScope: "resource", + expectedQ: "", + expectedDuration: 172800, + expectedLimit: 0, + expectedValueThreshold: 0, }, { - name: "invalid start", - queryParams: map[string]string{"start": "invalid", "end": "1723839882", "scope": "resource"}, - expectedScope: "resource", - expectedQ: "", - expectedDuration: 0, + name: "invalid start", + queryParams: map[string]string{"start": "invalid", "end": "1723839882", "scope": "resource"}, + expectedScope: "resource", + expectedQ: "", + expectedDuration: 0, + expectedLimit: 0, + expectedValueThreshold: 0, }, { - name: "invalid end", - queryParams: map[string]string{"start": "1723667082", "end": "invalid", "scope": "resource"}, - expectedScope: "resource", - expectedQ: "", - expectedDuration: 0, + name: "invalid end", + queryParams: map[string]string{"start": "1723667082", "end": "invalid", "scope": "resource"}, + expectedScope: "resource", + expectedQ: "", + expectedDuration: 0, + expectedLimit: 0, + expectedValueThreshold: 0, }, { - name: "no params", - queryParams: map[string]string{}, - expectedScope: "", - expectedQ: "", - expectedDuration: 0, + name: "no params", + queryParams: map[string]string{}, + expectedScope: "", + expectedQ: "", + expectedDuration: 0, + expectedLimit: 0, + expectedValueThreshold: 0, }, { - name: "negative start and end", - queryParams: map[string]string{"start": "-1000", "end": "-2000", "scope": "negative_case"}, - expectedScope: "negative_case", - expectedQ: "", - expectedDuration: 0, + name: "negative start and end", + queryParams: map[string]string{"start": "-1000", "end": "-2000", "scope": "negative_case"}, + expectedScope: "negative_case", + expectedQ: "", + expectedDuration: 0, + expectedLimit: 0, + expectedValueThreshold: 0, }, { - name: "end less than start", - queryParams: map[string]string{"start": "1723839882", "end": "1723667082", "scope": "resource"}, - expectedScope: "resource", - expectedQ: "", - expectedDuration: 0, + name: "end less than start", + queryParams: map[string]string{"start": "1723839882", "end": "1723667082", "scope": "resource"}, + expectedScope: "resource", + expectedQ: "", + expectedDuration: 0, + expectedLimit: 0, + expectedValueThreshold: 0, }, { - name: "start and end are the same", - queryParams: map[string]string{"start": "1723839882", "end": "1723839882", "scope": "zero_duration"}, - expectedScope: "zero_duration", - expectedQ: "", - expectedDuration: 0, + name: "start and end are the same", + queryParams: map[string]string{"start": "1723839882", "end": "1723839882", "scope": "zero_duration"}, + expectedScope: "zero_duration", + expectedQ: "", + expectedDuration: 0, + expectedLimit: 0, + expectedValueThreshold: 0, + }, + { + name: "invalid limit", + queryParams: map[string]string{"limit": "-1000"}, + expectedScope: "", + expectedQ: "", + expectedDuration: 0, + expectedLimit: 0, + expectedValueThreshold: 0, + }, + { + name: "invalid too large limit", + queryParams: map[string]string{"limit": "1000000000000000000"}, + expectedScope: "", + expectedQ: "", + expectedDuration: 0, + expectedLimit: 0, + expectedValueThreshold: 0, + }, + { + name: "valid limit", + queryParams: map[string]string{"limit": "100"}, + expectedScope: "", + expectedQ: "", + expectedDuration: 0, + expectedLimit: 100, + expectedValueThreshold: 0, + }, + { + name: "valid threshold", + queryParams: map[string]string{"maxStaleValues": "100"}, + expectedScope: "", + expectedQ: "", + expectedDuration: 0, + expectedLimit: 0, + expectedValueThreshold: 100, }, } @@ -633,11 +693,13 @@ func TestParseParams(t *testing.T) { u.RawQuery = query.Encode() req := &http.Request{URL: u} - scope, q, duration := parseParams(req) + scope, q, duration, limit, staleValueThreshold := parseParams(req) require.Equal(t, tt.expectedScope, scope) require.Equal(t, tt.expectedQ, q) require.Equal(t, tt.expectedDuration, duration) + require.Equal(t, tt.expectedLimit, limit) + require.Equal(t, tt.expectedValueThreshold, staleValueThreshold) }) } } diff --git a/modules/generator/config.go b/modules/generator/config.go index 03721391b63..4e1b6b41b68 100644 --- a/modules/generator/config.go +++ b/modules/generator/config.go @@ -1,8 +1,10 @@ package generator import ( + "encoding/json" "flag" "fmt" + "os" "time" "github.com/grafana/tempo/modules/generator/processor/localblocks" @@ -10,6 +12,7 @@ import ( "github.com/grafana/tempo/modules/generator/processor/spanmetrics" "github.com/grafana/tempo/modules/generator/registry" "github.com/grafana/tempo/modules/generator/storage" + "github.com/grafana/tempo/pkg/ingest" "github.com/grafana/tempo/tempodb/encoding" "github.com/grafana/tempo/tempodb/wal" ) @@ -20,20 +23,28 @@ const ( // ringNameForServer is the name of the ring used by the metrics-generator server. ringNameForServer = "metrics-generator" + + ConsumerGroup = "metrics-generator" ) // Config for a generator. type Config struct { - Ring RingConfig `yaml:"ring"` - Processor ProcessorConfig `yaml:"processor"` - Registry registry.Config `yaml:"registry"` - Storage storage.Config `yaml:"storage"` - TracesWAL wal.Config `yaml:"traces_storage"` + Ring RingConfig `yaml:"ring"` + Processor ProcessorConfig `yaml:"processor"` + Registry registry.Config `yaml:"registry"` + Storage storage.Config `yaml:"storage"` + TracesWAL wal.Config `yaml:"traces_storage"` + TracesQueryWAL wal.Config `yaml:"traces_query_storage"` // MetricsIngestionSlack is the max amount of time passed since a span's end time // for the span to be considered in metrics generation MetricsIngestionSlack time.Duration `yaml:"metrics_ingestion_time_range_slack"` QueryTimeout time.Duration `yaml:"query_timeout"` OverrideRingKey string `yaml:"override_ring_key"` + + // This config is dynamically injected because defined outside the generator config. + Ingest ingest.Config `yaml:"-"` + AssignedPartitions map[string][]int32 `yaml:"assigned_partitions" doc:"List of partitions assigned to this block builder."` + InstanceID string `yaml:"instance_id" doc:"default=" category:"advanced"` } // RegisterFlagsAndApplyDefaults registers the flags. @@ -42,13 +53,45 @@ func (cfg *Config) RegisterFlagsAndApplyDefaults(prefix string, f *flag.FlagSet) cfg.Processor.RegisterFlagsAndApplyDefaults(prefix, f) cfg.Registry.RegisterFlagsAndApplyDefaults(prefix, f) cfg.Storage.RegisterFlagsAndApplyDefaults(prefix, f) + cfg.TracesWAL.RegisterFlags(f) cfg.TracesWAL.Version = encoding.DefaultEncoding().Version() - cfg.TracesWAL.IngestionSlack = 2 * time.Minute + cfg.TracesQueryWAL.RegisterFlags(f) + cfg.TracesQueryWAL.Version = encoding.DefaultEncoding().Version() // setting default for max span age before discarding to 30s cfg.MetricsIngestionSlack = 30 * time.Second cfg.QueryTimeout = 30 * time.Second cfg.OverrideRingKey = generatorRingKey + + hostname, err := os.Hostname() + if err != nil { + fmt.Printf("failed to get hostname: %v", err) + os.Exit(1) + } + f.StringVar(&cfg.InstanceID, prefix+".instance-id", hostname, "Instance id.") + f.Var(newPartitionAssignmentVar(&cfg.AssignedPartitions), prefix+".assigned-partitions", "List of partitions assigned to this metrics generator.") +} + +func (cfg *Config) Validate() error { + if cfg.Ingest.Enabled { + if err := cfg.Ingest.Kafka.Validate(); err != nil { + return err + } + } + + // Only validate if being used + if cfg.TracesWAL.Filepath != "" { + if err := cfg.TracesWAL.Validate(); err != nil { + return err + } + } + if cfg.TracesQueryWAL.Filepath != "" { + if err := cfg.TracesQueryWAL.Validate(); err != nil { + return err + } + } + + return nil } type ProcessorConfig struct { @@ -141,3 +184,29 @@ func (cfg *ProcessorConfig) copyWithOverrides(o metricsGeneratorOverrides, userI return copyCfg, nil } + +type partitionAssignmentVar struct { + p *map[string][]int32 +} + +func newPartitionAssignmentVar(p *map[string][]int32) *partitionAssignmentVar { + return &partitionAssignmentVar{p} +} + +func (p *partitionAssignmentVar) Set(s string) error { + if s == "" { + return nil + } + + val := make(map[string][]int32) + if err := json.Unmarshal([]byte(s), &val); err != nil { + return err + } + *p.p = val + + return nil +} + +func (p *partitionAssignmentVar) String() string { + return fmt.Sprintf("%v", *p.p) +} diff --git a/modules/generator/generator.go b/modules/generator/generator.go index 2b0078c03f8..1a3216ea6db 100644 --- a/modules/generator/generator.go +++ b/modules/generator/generator.go @@ -11,17 +11,21 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/grafana/dskit/backoff" "github.com/grafana/dskit/kv" "github.com/grafana/dskit/ring" "github.com/grafana/dskit/services" "github.com/grafana/dskit/user" "github.com/prometheus/client_golang/prometheus" + "github.com/twmb/franz-go/pkg/kadm" + "github.com/twmb/franz-go/pkg/kgo" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.uber.org/atomic" "github.com/grafana/tempo/modules/generator/storage" objStorage "github.com/grafana/tempo/modules/storage" + "github.com/grafana/tempo/pkg/ingest" "github.com/grafana/tempo/pkg/tempopb" tempodb_wal "github.com/grafana/tempo/tempodb/wal" ) @@ -65,14 +69,24 @@ type Generator struct { reg prometheus.Registerer logger log.Logger + + kafkaWG sync.WaitGroup + kafkaStop func() + kafkaClient *kgo.Client + kafkaAdm *kadm.Client + partitionRing ring.PartitionRingReader } // New makes a new Generator. -func New(cfg *Config, overrides metricsGeneratorOverrides, reg prometheus.Registerer, store objStorage.Store, logger log.Logger) (*Generator, error) { +func New(cfg *Config, overrides metricsGeneratorOverrides, reg prometheus.Registerer, partitionRing ring.PartitionRingReader, store objStorage.Store, logger log.Logger) (*Generator, error) { if cfg.Storage.Path == "" { return nil, ErrUnconfigured } + if err := cfg.Validate(); err != nil { + return nil, err + } + err := os.MkdirAll(cfg.Storage.Path, 0o700) if err != nil { return nil, fmt.Errorf("failed to mkdir on %s: %w", cfg.Storage.Path, err) @@ -84,10 +98,10 @@ func New(cfg *Config, overrides metricsGeneratorOverrides, reg prometheus.Regist instances: map[string]*instance{}, - store: store, - - reg: reg, - logger: logger, + store: store, + partitionRing: partitionRing, + reg: reg, + logger: logger, } // Lifecycler and ring @@ -147,10 +161,45 @@ func (g *Generator) starting(ctx context.Context) (err error) { return fmt.Errorf("unable to start metrics-generator dependencies: %w", err) } + if g.cfg.Ingest.Enabled { + g.kafkaClient, err = ingest.NewReaderClient( + g.cfg.Ingest.Kafka, + ingest.NewReaderClientMetrics("generator", prometheus.DefaultRegisterer), + g.logger, + ) + if err != nil { + return fmt.Errorf("failed to create kafka reader client: %w", err) + } + + boff := backoff.New(ctx, backoff.Config{ + MinBackoff: 100 * time.Millisecond, + MaxBackoff: time.Minute, // If there is a network hiccup, we prefer to wait longer retrying, than fail the service. + MaxRetries: 10, + }) + + for boff.Ongoing() { + err := g.kafkaClient.Ping(ctx) + if err == nil { + break + } + level.Warn(g.logger).Log("msg", "ping kafka; will retry", "err", err) + boff.Wait() + } + if err := boff.ErrCause(); err != nil { + return fmt.Errorf("failed to ping kafka: %w", err) + } + + g.kafkaAdm = kadm.NewClient(g.kafkaClient) + } + return nil } func (g *Generator) running(ctx context.Context) error { + if g.cfg.Ingest.Enabled { + g.startKafka() + } + for { select { case <-ctx.Done(): @@ -175,6 +224,11 @@ func (g *Generator) stopping(_ error) error { // Mark as read-only after we have removed ourselves from the ring g.stopIncomingRequests() + // Stop reading from queue and wait for outstanding data to be processed and committed + if g.cfg.Ingest.Enabled { + g.stopKafka() + } + var wg sync.WaitGroup wg.Add(len(g.instances)) @@ -265,22 +319,31 @@ func (g *Generator) createInstance(id string) (*instance, error) { } // Create traces wal if configured - var tracesWAL *tempodb_wal.WAL + var tracesWAL, tracesQueryWAL *tempodb_wal.WAL if g.cfg.TracesWAL.Filepath != "" { // Create separate wals per tenant by prefixing path with tenant ID + cfg := g.cfg.TracesWAL + cfg.Filepath = path.Join(cfg.Filepath, id) + tracesWAL, err = tempodb_wal.New(&cfg) + if err != nil { + _ = wal.Close() + return nil, err + } + } - tracesWALCfg := g.cfg.TracesWAL - tracesWALCfg.Filepath = path.Join(tracesWALCfg.Filepath, id) - - tracesWAL, err = tempodb_wal.New(&tracesWALCfg) + if g.cfg.TracesQueryWAL.Filepath != "" { + // Create separate wals per tenant by prefixing path with tenant ID + cfg := g.cfg.TracesQueryWAL + cfg.Filepath = path.Join(cfg.Filepath, id) + tracesQueryWAL, err = tempodb_wal.New(&cfg) if err != nil { _ = wal.Close() return nil, err } } - inst, err := newInstance(g.cfg, id, g.overrides, wal, reg, g.logger, tracesWAL, g.store) + inst, err := newInstance(g.cfg, id, g.overrides, wal, reg, g.logger, tracesWAL, tracesQueryWAL, g.store) if err != nil { _ = wal.Close() return nil, err diff --git a/modules/generator/generator_kafka.go b/modules/generator/generator_kafka.go new file mode 100644 index 00000000000..57d3f36f194 --- /dev/null +++ b/modules/generator/generator_kafka.go @@ -0,0 +1,223 @@ +package generator + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/go-kit/log/level" + "github.com/grafana/tempo/pkg/ingest" + "github.com/grafana/tempo/pkg/tempopb" + + "github.com/twmb/franz-go/pkg/kadm" + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kgo" +) + +func (g *Generator) startKafka() { + // Create context that will be used to stop the goroutines. + var ctx context.Context + ctx, g.kafkaStop = context.WithCancel(context.Background()) + + g.kafkaWG.Add(1) + go g.listenKafka(ctx) + ingest.ExportPartitionLagMetrics(ctx, g.kafkaAdm, g.logger, g.cfg.Ingest, g.getAssignedActivePartitions) +} + +func (g *Generator) stopKafka() { + g.kafkaStop() + g.kafkaWG.Wait() +} + +func (g *Generator) listenKafka(ctx context.Context) { + defer g.kafkaWG.Done() + + level.Info(g.logger).Log("msg", "generator now listening to kafka") + for { + select { + case <-time.After(2 * time.Second): + if g.readOnly.Load() { + // Starting up or shutting down + continue + } + err := g.readKafka(ctx) + if err != nil { + level.Error(g.logger).Log("msg", "readKafka failed", "err", err) + continue + } + case <-ctx.Done(): + return + } + } +} + +func (g *Generator) readKafka(ctx context.Context) error { + fallback := time.Now().Add(-time.Minute) + + groupLag, err := getGroupLag( + ctx, + kadm.NewClient(g.kafkaClient), + g.cfg.Ingest.Kafka.Topic, + g.cfg.Ingest.Kafka.ConsumerGroup, + fallback, + ) + if err != nil { + return fmt.Errorf("failed to get group lag: %w", err) + } + + assignedPartitions := g.getAssignedActivePartitions() + + for _, partition := range assignedPartitions { + if ctx.Err() != nil { + return ctx.Err() + } + + partitionLag, ok := groupLag.Lookup(g.cfg.Ingest.Kafka.Topic, partition) + if !ok { + return fmt.Errorf("lag for partition %d not found", partition) + } + + if partitionLag.Lag <= 0 { + // Nothing to consume + continue + } + + err := g.consumePartition(ctx, partition, partitionLag) + if err != nil { + return err + } + } + return nil +} + +func (g *Generator) consumePartition(ctx context.Context, partition int32, lag kadm.GroupMemberLag) error { + d := ingest.NewDecoder() + + // We always rewind the partition's offset to the commit offset by reassigning the partition to the client (this triggers partition assignment). + // This is so the cycle started exactly at the commit offset, and not at what was (potentially over-) consumed previously. + // In the end, we remove the partition from the client (refer to the defer below) to guarantee the client always consumes + // from one partition at a time. I.e. when this partition is consumed, we start consuming the next one. + g.kafkaClient.AddConsumePartitions(map[string]map[int32]kgo.Offset{ + g.cfg.Ingest.Kafka.Topic: { + partition: kgo.NewOffset().At(lag.Commit.At), + }, + }) + defer g.kafkaClient.RemoveConsumePartitions(map[string][]int32{g.cfg.Ingest.Kafka.Topic: {partition}}) + + fetches := g.kafkaClient.PollFetches(ctx) + fetches.EachError(func(_ string, _ int32, err error) { + if !errors.Is(err, context.Canceled) { + level.Error(g.logger).Log("msg", "failed to fetch records", "err", err) + } + }) + + for iter := fetches.RecordIter(); !iter.Done(); { + r := iter.Next() + + tenant := string(r.Key) + + i, err := g.getOrCreateInstance(tenant) + if err != nil { + return err + } + + d.Reset() + req, err := d.Decode(r.Value) + if err != nil { + return err + } + + for _, tr := range req.Traces { + trace := &tempopb.Trace{} + err = trace.Unmarshal(tr.Slice) + if err != nil { + return err + } + + i.pushSpansFromQueue(ctx, &tempopb.PushSpansRequest{ + Batches: trace.ResourceSpans, + }) + } + } + + offsets := kadm.OffsetsFromFetches(fetches) + err := g.kafkaAdm.CommitAllOffsets(ctx, g.cfg.Ingest.Kafka.ConsumerGroup, offsets) + if err != nil { + return fmt.Errorf("generator failed to commit offsets: %w", err) + } + + return nil +} + +func getGroupLag(ctx context.Context, admClient *kadm.Client, topic, group string, fallback time.Time) (kadm.GroupLag, error) { + offsets, err := admClient.FetchOffsets(ctx, group) + if err != nil { + if !errors.Is(err, kerr.GroupIDNotFound) { + return nil, fmt.Errorf("fetch offsets: %w", err) + } + } + if err := offsets.Error(); err != nil { + return nil, fmt.Errorf("fetch offsets got error in response: %w", err) + } + + startOffsets, err := admClient.ListStartOffsets(ctx, topic) + if err != nil { + return nil, err + } + endOffsets, err := admClient.ListEndOffsets(ctx, topic) + if err != nil { + return nil, err + } + + resolveFallbackOffsets := sync.OnceValues(func() (kadm.ListedOffsets, error) { + return admClient.ListOffsetsAfterMilli(ctx, fallback.UnixMilli(), topic) + }) + // If the group-partition in offsets doesn't have a commit, fall back depending on where fallbackOffsetMillis points at. + for topic, pt := range startOffsets.Offsets() { + for partition, startOffset := range pt { + if _, ok := offsets.Lookup(topic, partition); ok { + continue + } + fallbackOffsets, err := resolveFallbackOffsets() + if err != nil { + return nil, fmt.Errorf("resolve fallback offsets: %w", err) + } + o, ok := fallbackOffsets.Lookup(topic, partition) + if !ok { + return nil, fmt.Errorf("partition %d not found in fallback offsets for topic %s", partition, topic) + } + if o.Offset < startOffset.At { + // Skip the resolved fallback offset if it's before the partition's start offset (i.e. before the earliest offset of the partition). + // This should not happen in Kafka, but can happen in Kafka-compatible systems, e.g. Warpstream. + continue + } + offsets.Add(kadm.OffsetResponse{Offset: kadm.Offset{ + Topic: o.Topic, + Partition: o.Partition, + At: o.Offset, + LeaderEpoch: o.LeaderEpoch, + }}) + } + } + + descrGroup := kadm.DescribedGroup{ + // "Empty" is the state that indicates that the group doesn't have active consumer members; this is always the case for block-builder, + // because we don't use group consumption. + State: "Empty", + } + return kadm.CalculateGroupLagWithStartOffsets(descrGroup, offsets, startOffsets, endOffsets), nil +} + +func (g *Generator) getAssignedActivePartitions() []int32 { + activePartitionsCount := g.partitionRing.PartitionRing().ActivePartitionsCount() + assignedActivePartitions := make([]int32, 0, activePartitionsCount) + for _, partition := range g.cfg.AssignedPartitions[g.cfg.InstanceID] { + if partition > int32(activePartitionsCount) { + break + } + assignedActivePartitions = append(assignedActivePartitions, partition) + } + return assignedActivePartitions +} diff --git a/modules/generator/generator_test.go b/modules/generator/generator_test.go index a3ad47c1b6c..0838b51ccb3 100644 --- a/modules/generator/generator_test.go +++ b/modules/generator/generator_test.go @@ -63,7 +63,7 @@ overrides: generatorConfig.Storage.Path = t.TempDir() generatorConfig.Ring.KVStore.Store = "inmemory" generatorConfig.Processor.SpanMetrics.RegisterFlagsAndApplyDefaults("", nil) - g, err := New(generatorConfig, o, prometheus.NewRegistry(), nil, newTestLogger(t)) + g, err := New(generatorConfig, o, prometheus.NewRegistry(), nil, nil, newTestLogger(t)) require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(context.Background(), g)) @@ -155,7 +155,7 @@ func BenchmarkPushSpans(b *testing.B) { "span-metrics": {}, "service-graphs": {}, }, - spanMetricsEnableTargetInfo: true, + spanMetricsEnableTargetInfo: false, spanMetricsTargetInfoExcludedDimensions: []string{"excluded}"}, } ) @@ -165,7 +165,7 @@ func BenchmarkPushSpans(b *testing.B) { wal, err := storage.New(walcfg, o, tenant, reg, log) require.NoError(b, err) - inst, err := newInstance(cfg, tenant, o, wal, reg, log, nil, nil) + inst, err := newInstance(cfg, tenant, o, wal, reg, log, nil, nil, nil) require.NoError(b, err) defer inst.shutdown() @@ -175,6 +175,12 @@ func BenchmarkPushSpans(b *testing.B) { test.MakeBatch(100, nil), test.MakeBatch(100, nil), test.MakeBatch(100, nil), + test.MakeBatch(100, nil), + test.MakeBatch(100, nil), + test.MakeBatch(100, nil), + test.MakeBatch(100, nil), + test.MakeBatch(100, nil), + test.MakeBatch(100, nil), }, } @@ -189,11 +195,13 @@ func BenchmarkPushSpans(b *testing.B) { {Key: "k8s.node.name", Value: &common_v1.AnyValue{Value: &common_v1.AnyValue_StringValue{StringValue: "test" + strconv.Itoa(i)}}}, {Key: "k8s.pod.ip", Value: &common_v1.AnyValue{Value: &common_v1.AnyValue_StringValue{StringValue: "test" + strconv.Itoa(i)}}}, {Key: "k8s.pod.name", Value: &common_v1.AnyValue{Value: &common_v1.AnyValue_StringValue{StringValue: "test" + strconv.Itoa(i)}}}, + {Key: "service.instance.id", Value: &common_v1.AnyValue{Value: &common_v1.AnyValue_StringValue{StringValue: "test" + strconv.Itoa(i)}}}, {Key: "excluded", Value: &common_v1.AnyValue{Value: &common_v1.AnyValue_StringValue{StringValue: "test" + strconv.Itoa(i)}}}, }...) } b.ResetTimer() + b.ReportAllocs() for i := 0; i < b.N; i++ { inst.pushSpans(ctx, req) } @@ -234,7 +242,7 @@ func BenchmarkCollect(b *testing.B) { wal, err := storage.New(walcfg, o, tenant, reg, log) require.NoError(b, err) - inst, err := newInstance(cfg, tenant, o, wal, reg, log, nil, nil) + inst, err := newInstance(cfg, tenant, o, wal, reg, log, nil, nil, nil) require.NoError(b, err) defer inst.shutdown() diff --git a/modules/generator/instance.go b/modules/generator/instance.go index a3c8a0db6a3..a1fff02f3ef 100644 --- a/modules/generator/instance.go +++ b/modules/generator/instance.go @@ -23,6 +23,7 @@ import ( "github.com/grafana/tempo/modules/generator/storage" "github.com/grafana/tempo/pkg/tempopb" v1 "github.com/grafana/tempo/pkg/tempopb/trace/v1" + "github.com/grafana/tempo/pkg/traceql" "github.com/grafana/tempo/tempodb/wal" "go.uber.org/atomic" @@ -74,14 +75,16 @@ type instance struct { registry *registry.ManagedRegistry wal storage.Storage - traceWAL *wal.WAL - writer tempodb.Writer + traceWAL *wal.WAL + traceQueryWAL *wal.WAL + writer tempodb.Writer // processorsMtx protects the processors map, not the processors itself processorsMtx sync.RWMutex // processors is a map of processor name -> processor, only one instance of a processor can be // active at any time - processors map[string]processor.Processor + processors map[string]processor.Processor + queuebasedLocalBlocks *localblocks.Processor shutdownCh chan struct{} @@ -89,7 +92,7 @@ type instance struct { logger log.Logger } -func newInstance(cfg *Config, instanceID string, overrides metricsGeneratorOverrides, wal storage.Storage, reg prometheus.Registerer, logger log.Logger, traceWAL *wal.WAL, writer tempodb.Writer) (*instance, error) { +func newInstance(cfg *Config, instanceID string, overrides metricsGeneratorOverrides, wal storage.Storage, reg prometheus.Registerer, logger log.Logger, traceWAL, rf1TraceWAL *wal.WAL, writer tempodb.Writer) (*instance, error) { logger = log.With(logger, "tenant", instanceID) i := &instance{ @@ -97,10 +100,11 @@ func newInstance(cfg *Config, instanceID string, overrides metricsGeneratorOverr instanceID: instanceID, overrides: overrides, - registry: registry.New(&cfg.Registry, overrides, instanceID, wal, logger), - wal: wal, - traceWAL: traceWAL, - writer: writer, + registry: registry.New(&cfg.Registry, overrides, instanceID, wal, logger), + wal: wal, + traceWAL: traceWAL, + traceQueryWAL: rf1TraceWAL, + writer: writer, processors: make(map[string]processor.Processor), @@ -304,6 +308,16 @@ func (i *instance) addProcessor(processorName string, cfg ProcessorConfig) error return err } newProcessor = p + + // Add the non-flushing alternate if configured + if i.traceQueryWAL != nil { + nonFlushingConfig := cfg.LocalBlocks + nonFlushingConfig.FlushToStorage = false + i.queuebasedLocalBlocks, err = localblocks.New(nonFlushingConfig, i.instanceID, i.traceQueryWAL, i.writer, i.overrides) + if err != nil { + return err + } + } default: level.Error(i.logger).Log( "msg", fmt.Sprintf("processor does not exist, supported processors: [%s]", strings.Join(SupportedProcessors, ", ")), @@ -335,6 +349,11 @@ func (i *instance) removeProcessor(processorName string) { delete(i.processors, processorName) deletedProcessor.Shutdown(context.Background()) + + if processorName == localblocks.Name && i.queuebasedLocalBlocks != nil { + i.queuebasedLocalBlocks.Shutdown(context.Background()) + i.queuebasedLocalBlocks = nil + } } // updateProcessorMetrics updates the active processor metrics. Must be called under a read lock. @@ -358,6 +377,25 @@ func (i *instance) pushSpans(ctx context.Context, req *tempopb.PushSpansRequest) } } +func (i *instance) pushSpansFromQueue(ctx context.Context, req *tempopb.PushSpansRequest) { + i.preprocessSpans(req) + i.processorsMtx.RLock() + defer i.processorsMtx.RUnlock() + + for _, processor := range i.processors { + // Same as normal push except we skip the local blocks processor + if processor.Name() == localblocks.Name { + continue + } + processor.PushSpans(ctx, req) + } + + // Now we push to the non-flushing local blocks if present + if i.queuebasedLocalBlocks != nil { + i.queuebasedLocalBlocks.PushSpans(ctx, req) + } +} + func (i *instance) preprocessSpans(req *tempopb.PushSpansRequest) { // TODO - uniqify all strings? // Doesn't help allocs, but should greatly reduce inuse space @@ -404,23 +442,74 @@ func (i *instance) GetMetrics(ctx context.Context, req *tempopb.SpanMetricsReque } func (i *instance) QueryRange(ctx context.Context, req *tempopb.QueryRangeRequest) (resp *tempopb.QueryRangeResponse, err error) { + var processors []*localblocks.Processor + + i.processorsMtx.RLock() for _, processor := range i.processors { switch p := processor.(type) { case *localblocks.Processor: - r, err := p.QueryRange(ctx, req) - if err != nil { - return resp, err - } + processors = append(processors, p) + } + } - rr := r.ToProto(req) - return &tempopb.QueryRangeResponse{ - Series: rr, - }, nil - default: + if i.queuebasedLocalBlocks != nil { + processors = append(processors, i.queuebasedLocalBlocks) + } + + i.processorsMtx.RUnlock() + + if len(processors) == 0 { + return resp, fmt.Errorf("localblocks processor not found") + } + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + expr, err := traceql.Parse(req.Query) + if err != nil { + return nil, fmt.Errorf("compiling query: %w", err) + } + + unsafe := i.overrides.UnsafeQueryHints(i.instanceID) + + timeOverlapCutoff := i.cfg.Processor.LocalBlocks.Metrics.TimeOverlapCutoff + if v, ok := expr.Hints.GetFloat(traceql.HintTimeOverlapCutoff, unsafe); ok && v >= 0 && v <= 1.0 { + timeOverlapCutoff = v + } + + e := traceql.NewEngine() + + // Compile the raw version of the query for head and wal blocks + // These aren't cached and we put them all into the same evaluator + // for efficiency. + rawEval, err := e.CompileMetricsQueryRange(req, int(req.Exemplars), timeOverlapCutoff, unsafe) + if err != nil { + return nil, err + } + + // This is a summation version of the query for complete blocks + // which can be cached. They are timeseries, so they need the job-level evaluator. + jobEval, err := traceql.NewEngine().CompileMetricsQueryRangeNonRaw(req, traceql.AggregateModeSum) + if err != nil { + return nil, err + } + + for _, p := range processors { + err = p.QueryRange(ctx, req, rawEval, jobEval) + if err != nil { + return nil, err } } - return resp, fmt.Errorf("localblocks processor not found") + // Combine the raw results into the job results + walResults := rawEval.Results().ToProto(req) + jobEval.ObserveSeries(walResults) + + r := jobEval.Results() + rr := r.ToProto(req) + return &tempopb.QueryRangeResponse{ + Series: rr, + }, nil } func (i *instance) updatePushMetrics(bytesIngested int, spanCount int, expiredSpanCount int) { diff --git a/modules/generator/instance_test.go b/modules/generator/instance_test.go index 6f1efc0f33d..112fa90098a 100644 --- a/modules/generator/instance_test.go +++ b/modules/generator/instance_test.go @@ -33,10 +33,10 @@ func Test_instance_concurrency(t *testing.T) { servicegraphs.Name: {}, } - instance1, err := newInstance(&Config{}, "test", overrides, &noopStorage{}, prometheus.DefaultRegisterer, log.NewNopLogger(), nil, nil) + instance1, err := newInstance(&Config{}, "test", overrides, &noopStorage{}, prometheus.DefaultRegisterer, log.NewNopLogger(), nil, nil, nil) assert.NoError(t, err) - instance2, err := newInstance(&Config{}, "test", overrides, &noopStorage{}, prometheus.DefaultRegisterer, log.NewNopLogger(), nil, nil) + instance2, err := newInstance(&Config{}, "test", overrides, &noopStorage{}, prometheus.DefaultRegisterer, log.NewNopLogger(), nil, nil, nil) assert.NoError(t, err) end := make(chan struct{}) @@ -87,7 +87,7 @@ func Test_instance_updateProcessors(t *testing.T) { logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout)) overrides := mockOverrides{} - instance, err := newInstance(&cfg, "test", &overrides, &noopStorage{}, prometheus.DefaultRegisterer, logger, nil, nil) + instance, err := newInstance(&cfg, "test", &overrides, &noopStorage{}, prometheus.DefaultRegisterer, logger, nil, nil, nil) assert.NoError(t, err) // stop the update goroutine diff --git a/modules/generator/processor/localblocks/livetraces.go b/modules/generator/processor/localblocks/livetraces.go deleted file mode 100644 index 9d0ec8231b1..00000000000 --- a/modules/generator/processor/localblocks/livetraces.go +++ /dev/null @@ -1,86 +0,0 @@ -package localblocks - -import ( - "hash" - "hash/fnv" - "time" - - v1 "github.com/grafana/tempo/pkg/tempopb/trace/v1" -) - -type liveTrace struct { - id []byte - timestamp time.Time - Batches []*v1.ResourceSpans - - sz uint64 -} - -type liveTraces struct { - hash hash.Hash64 - traces map[uint64]*liveTrace - - sz uint64 -} - -func newLiveTraces() *liveTraces { - return &liveTraces{ - hash: fnv.New64(), - traces: make(map[uint64]*liveTrace), - } -} - -func (l *liveTraces) token(traceID []byte) uint64 { - l.hash.Reset() - l.hash.Write(traceID) - return l.hash.Sum64() -} - -func (l *liveTraces) Len() uint64 { - return uint64(len(l.traces)) -} - -func (l *liveTraces) Size() uint64 { - return l.sz -} - -func (l *liveTraces) Push(traceID []byte, batch *v1.ResourceSpans, max uint64) bool { - token := l.token(traceID) - - tr := l.traces[token] - if tr == nil { - - // Before adding this check against max - // Zero means no limit - if max > 0 && uint64(len(l.traces)) >= max { - return false - } - - tr = &liveTrace{ - id: traceID, - } - l.traces[token] = tr - } - - sz := uint64(batch.Size()) - tr.sz += sz - l.sz += sz - - tr.Batches = append(tr.Batches, batch) - tr.timestamp = time.Now() - return true -} - -func (l *liveTraces) CutIdle(idleSince time.Time, immediate bool) []*liveTrace { - res := []*liveTrace{} - - for k, tr := range l.traces { - if tr.timestamp.Before(idleSince) || immediate { - res = append(res, tr) - l.sz -= tr.sz - delete(l.traces, k) - } - } - - return res -} diff --git a/modules/generator/processor/localblocks/processor.go b/modules/generator/processor/localblocks/processor.go index 78dca7bdc4a..d0e23d39a79 100644 --- a/modules/generator/processor/localblocks/processor.go +++ b/modules/generator/processor/localblocks/processor.go @@ -21,6 +21,7 @@ import ( "go.opentelemetry.io/otel" gen "github.com/grafana/tempo/modules/generator/processor" + "github.com/grafana/tempo/pkg/livetraces" "github.com/grafana/tempo/pkg/model" "github.com/grafana/tempo/pkg/tempopb" v1 "github.com/grafana/tempo/pkg/tempopb/trace/v1" @@ -70,7 +71,7 @@ type Processor struct { flushqueue *flushqueues.PriorityQueue liveTracesMtx sync.Mutex - liveTraces *liveTraces + liveTraces *livetraces.LiveTraces[*v1.ResourceSpans] traceSizes *tracesizes.Tracker writer tempodb.Writer @@ -103,7 +104,7 @@ func New(cfg Config, tenant string, wal *wal.WAL, writer tempodb.Writer, overrid walBlocks: map[uuid.UUID]common.WALBlock{}, completeBlocks: map[uuid.UUID]*ingester.LocalBlock{}, flushqueue: flushqueues.NewPriorityQueue(metricFlushQueueSize.WithLabelValues(tenant)), - liveTraces: newLiveTraces(), + liveTraces: livetraces.New[*v1.ResourceSpans](func(rs *v1.ResourceSpans) uint64 { return uint64(rs.Size()) }), traceSizes: tracesizes.New(), closeCh: make(chan struct{}), wg: sync.WaitGroup{}, @@ -131,7 +132,7 @@ func New(cfg Config, tenant string, wal *wal.WAL, writer tempodb.Writer, overrid } func (*Processor) Name() string { - return "LocalBlocksProcessor" + return Name } func (p *Processor) PushSpans(_ context.Context, req *tempopb.PushSpansRequest) { @@ -597,7 +598,7 @@ func (p *Processor) cutIdleTraces(immediate bool) error { p.liveTracesMtx.Lock() // Record live traces before flushing so we know the high water mark - metricLiveTraces.WithLabelValues(p.tenant).Set(float64(len(p.liveTraces.traces))) + metricLiveTraces.WithLabelValues(p.tenant).Set(float64(p.liveTraces.Len())) metricLiveTraceBytes.WithLabelValues(p.tenant).Set(float64(p.liveTraces.Size())) since := time.Now().Add(-p.Cfg.TraceIdlePeriod) @@ -611,7 +612,7 @@ func (p *Processor) cutIdleTraces(immediate bool) error { // Sort by ID sort.Slice(tracesToCut, func(i, j int) bool { - return bytes.Compare(tracesToCut[i].id, tracesToCut[j].id) == -1 + return bytes.Compare(tracesToCut[i].ID, tracesToCut[j].ID) == -1 }) for _, t := range tracesToCut { @@ -620,7 +621,7 @@ func (p *Processor) cutIdleTraces(immediate bool) error { ResourceSpans: t.Batches, } - err := p.writeHeadBlock(t.id, tr) + err := p.writeHeadBlock(t.ID, tr) if err != nil { return err } diff --git a/modules/generator/processor/localblocks/processor_test.go b/modules/generator/processor/localblocks/processor_test.go index c7be41cde46..7a21ab657e2 100644 --- a/modules/generator/processor/localblocks/processor_test.go +++ b/modules/generator/processor/localblocks/processor_test.go @@ -93,11 +93,24 @@ func TestProcessorDoesNotRace(t *testing.T) { }, } overrides = &mockOverrides{} + e = traceql.NewEngine() ) p, err := New(cfg, tenant, wal, &mockWriter{}, overrides) require.NoError(t, err) + qr := &tempopb.QueryRangeRequest{ + Query: "{} | rate() by (resource.service.name)", + Start: uint64(time.Now().Add(-5 * time.Minute).UnixNano()), + End: uint64(time.Now().UnixNano()), + Step: uint64(30 * time.Second), + } + me, err := e.CompileMetricsQueryRange(qr, 0, 0, false) + require.NoError(t, err) + + je, err := e.CompileMetricsQueryRangeNonRaw(qr, traceql.AggregateModeSum) + require.NoError(t, err) + var ( end = make(chan struct{}) wg = sync.WaitGroup{} @@ -176,12 +189,7 @@ func TestProcessorDoesNotRace(t *testing.T) { }) go concurrent(func() { - _, err := p.QueryRange(ctx, &tempopb.QueryRangeRequest{ - Query: "{} | rate() by (resource.service.name)", - Start: uint64(time.Now().Add(-5 * time.Minute).UnixNano()), - End: uint64(time.Now().UnixNano()), - Step: uint64(30 * time.Second), - }) + err := p.QueryRange(ctx, qr, me, je) require.NoError(t, err) }) diff --git a/modules/generator/processor/localblocks/query_range.go b/modules/generator/processor/localblocks/query_range.go index 848a78f9716..18c321147b0 100644 --- a/modules/generator/processor/localblocks/query_range.go +++ b/modules/generator/processor/localblocks/query_range.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "sync" "time" "github.com/google/uuid" @@ -23,7 +22,7 @@ import ( ) // QueryRange returns metrics. -func (p *Processor) QueryRange(ctx context.Context, req *tempopb.QueryRangeRequest) (traceql.SeriesSet, error) { +func (p *Processor) QueryRange(ctx context.Context, req *tempopb.QueryRangeRequest, rawEval *traceql.MetricsEvalulator, jobEval *traceql.MetricsFrontendEvaluator) error { ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -32,12 +31,12 @@ func (p *Processor) QueryRange(ctx context.Context, req *tempopb.QueryRangeReque cutoff := time.Now().Add(-p.Cfg.CompleteBlockTimeout).Add(-timeBuffer) if req.Start < uint64(cutoff.UnixNano()) { - return nil, fmt.Errorf("time range must be within last %v", p.Cfg.CompleteBlockTimeout) + return fmt.Errorf("time range must be within last %v", p.Cfg.CompleteBlockTimeout) } expr, err := traceql.Parse(req.Query) if err != nil { - return nil, fmt.Errorf("compiling query: %w", err) + return fmt.Errorf("compiling query: %w", err) } unsafe := p.overrides.UnsafeQueryHints(p.tenant) @@ -52,25 +51,6 @@ func (p *Processor) QueryRange(ctx context.Context, req *tempopb.QueryRangeReque concurrency = uint(v) } - e := traceql.NewEngine() - - // Compile the raw version of the query for wal blocks - // These aren't cached and we put them all into the same evaluator - // for efficiency. - eval, err := e.CompileMetricsQueryRange(req, int(req.Exemplars), timeOverlapCutoff, unsafe) - if err != nil { - return nil, err - } - - // This is a summation version of the query for complete blocks - // which can be cached. But we need their results separately so they are - // computed separately. - overallEvalMtx := sync.Mutex{} - overallEval, err := traceql.NewEngine().CompileMetricsQueryRangeNonRaw(req, traceql.AggregateModeSum) - if err != nil { - return nil, err - } - withinRange := func(m *backend.BlockMeta) bool { start := uint64(m.StartTime.UnixNano()) end := uint64(m.EndTime.UnixNano()) @@ -86,7 +66,7 @@ func (p *Processor) QueryRange(ctx context.Context, req *tempopb.QueryRangeReque wg.Add(1) go func(w common.WALBlock) { defer wg.Done() - err := p.queryRangeWALBlock(ctx, w, eval) + err := p.queryRangeWALBlock(ctx, w, rawEval) if err != nil { jobErr.Store(err) } @@ -105,7 +85,7 @@ func (p *Processor) QueryRange(ctx context.Context, req *tempopb.QueryRangeReque wg.Add(1) go func(w common.WALBlock) { defer wg.Done() - err := p.queryRangeWALBlock(ctx, w, eval) + err := p.queryRangeWALBlock(ctx, w, rawEval) if err != nil { jobErr.Store(err) } @@ -130,23 +110,17 @@ func (p *Processor) QueryRange(ctx context.Context, req *tempopb.QueryRangeReque return } - overallEvalMtx.Lock() - defer overallEvalMtx.Unlock() - overallEval.ObserveSeries(resp) + jobEval.ObserveSeries(resp) }(b) } wg.Wait() if err := jobErr.Load(); err != nil { - return nil, err + return err } - // Combine the uncacheable results into the overall results - walResults := eval.Results().ToProto(req) - overallEval.ObserveSeries(walResults) - - return overallEval.Results(), nil + return nil } func (p *Processor) queryRangeWALBlock(ctx context.Context, b common.WALBlock, eval *traceql.MetricsEvalulator) error { diff --git a/modules/generator/processor/spanmetrics/config.go b/modules/generator/processor/spanmetrics/config.go index 797f0dfa4b8..161dae0d3ef 100644 --- a/modules/generator/processor/spanmetrics/config.go +++ b/modules/generator/processor/spanmetrics/config.go @@ -22,6 +22,8 @@ const ( dimInstance = "instance" ) +var intrinsicLabels = []string{dimService, dimSpanName, dimSpanKind, dimStatusCode, dimStatusMessage} + type Config struct { // Buckets for latency histogram in seconds. HistogramBuckets []float64 `yaml:"histogram_buckets"` diff --git a/modules/generator/processor/spanmetrics/spanmetrics.go b/modules/generator/processor/spanmetrics/spanmetrics.go index 954c2ddf6e2..d6730ce897a 100644 --- a/modules/generator/processor/spanmetrics/spanmetrics.go +++ b/modules/generator/processor/spanmetrics/spanmetrics.go @@ -6,7 +6,6 @@ import ( "time" "unicode/utf8" - "github.com/prometheus/prometheus/util/strutil" "go.opentelemetry.io/otel" gen "github.com/grafana/tempo/modules/generator/processor" @@ -68,11 +67,11 @@ func New(cfg Config, reg registry.Registry, filteredSpansCounter, invalidUTF8Cou } for _, d := range cfg.Dimensions { - labels = append(labels, sanitizeLabelNameWithCollisions(d)) + labels = append(labels, processor_util.SanitizeLabelNameWithCollisions(d, intrinsicLabels)) } for _, m := range cfg.DimensionMappings { - labels = append(labels, sanitizeLabelNameWithCollisions(m.Name)) + labels = append(labels, processor_util.SanitizeLabelNameWithCollisions(m.Name, intrinsicLabels)) } err := validateLabelValues(labels) @@ -124,16 +123,15 @@ func (p *Processor) Shutdown(_ context.Context) { } func (p *Processor) aggregateMetrics(resourceSpans []*v1_trace.ResourceSpans) { + resourceLabels := make([]string, 0) + resourceValues := make([]string, 0) for _, rs := range resourceSpans { // already extract job name & instance id, so we only have to do it once per batch of spans svcName, _ := processor_util.FindServiceName(rs.Resource.Attributes) jobName := processor_util.GetJobValue(rs.Resource.Attributes) instanceID, _ := processor_util.FindInstanceID(rs.Resource.Attributes) - resourceLabels := make([]string, 0) // TODO move outside the loop and reuse? - resourceValues := make([]string, 0) // TODO don't allocate unless needed? - if p.Cfg.EnableTargetInfo { - resourceLabels, resourceValues = processor_util.GetTargetInfoAttributesValues(rs.Resource.Attributes, p.Cfg.TargetInfoExcludedDimensions) + processor_util.GetTargetInfoAttributesValues(&resourceLabels, &resourceValues, rs.Resource.Attributes, p.Cfg.TargetInfoExcludedDimensions, intrinsicLabels) } for _, ils := range rs.ScopeSpans { for _, span := range ils.Spans { @@ -237,10 +235,6 @@ func (p *Processor) aggregateMetricsForSpan(svcName string, jobName string, inst // TODO - attribute names are stable across applications // so let's cache the result of previous sanitizations resourceAttributesCount := len(targetInfoLabels) - for index, label := range targetInfoLabels { - // sanitize label name - targetInfoLabels[index] = sanitizeLabelNameWithCollisions(label) - } // add joblabel to target info only if job is not blank if jobName != "" { @@ -263,26 +257,11 @@ func (p *Processor) aggregateMetricsForSpan(svcName string, jobName string, inst } } -func sanitizeLabelNameWithCollisions(name string) string { - sanitized := strutil.SanitizeLabelName(name) - - if isIntrinsicDimension(sanitized) { - return "__" + sanitized - } - - return sanitized -} - func validateLabelValues(v []string) error { for _, value := range v { if !utf8.ValidString(value) { return fmt.Errorf("invalid utf8 string: %s", value) } } - return nil } - -func isIntrinsicDimension(name string) bool { - return processor_util.Contains(name, []string{dimJob, dimSpanName, dimSpanKind, dimStatusCode, dimStatusMessage, dimInstance}) -} diff --git a/modules/generator/processor/util/util.go b/modules/generator/processor/util/util.go index 796872b526a..8c62ae386e1 100644 --- a/modules/generator/processor/util/util.go +++ b/modules/generator/processor/util/util.go @@ -1,12 +1,16 @@ package util import ( + "slices" + semconv "go.opentelemetry.io/otel/semconv/v1.25.0" v1_common "github.com/grafana/tempo/pkg/tempopb/common/v1" v1_resource "github.com/grafana/tempo/pkg/tempopb/resource/v1" v1 "github.com/grafana/tempo/pkg/tempopb/trace/v1" tempo_util "github.com/grafana/tempo/pkg/util" + + "github.com/prometheus/prometheus/util/strutil" ) func FindServiceName(attributes []*v1_common.KeyValue) (string, bool) { @@ -68,21 +72,30 @@ func GetJobValue(attributes []*v1_common.KeyValue) string { return namespace + svName } -func GetTargetInfoAttributesValues(attributes []*v1_common.KeyValue, exclude []string) ([]string, []string) { +func GetTargetInfoAttributesValues(keys, values *[]string, attributes []*v1_common.KeyValue, exclude, intrinsicLabels []string) { // TODO allocate with known length, or take new params for existing buffers - keys := make([]string, 0) - values := make([]string, 0) + *keys = (*keys)[:0] + *values = (*values)[:0] for _, attrs := range attributes { // ignoring job and instance key := attrs.Key - value := tempo_util.StringifyAnyValue(attrs.Value) if key != "service.name" && key != "service.namespace" && key != "service.instance.id" && !Contains(key, exclude) { - keys = append(keys, key) - values = append(values, value) + *keys = append(*keys, SanitizeLabelNameWithCollisions(key, intrinsicLabels)) + value := tempo_util.StringifyAnyValue(attrs.Value) + *values = append(*values, value) } } +} + +func SanitizeLabelNameWithCollisions(name string, dimensions []string) string { + sanitized := strutil.SanitizeLabelName(name) + + // check if same label as intrinsics + if slices.Contains(dimensions, sanitized) { + return "__" + sanitized + } - return keys, values + return sanitized } func Contains(key string, list []string) bool { diff --git a/modules/generator/registry/native_histogram.go b/modules/generator/registry/native_histogram.go index e14cf7e3bdd..b0800d6f6b7 100644 --- a/modules/generator/registry/native_histogram.go +++ b/modules/generator/registry/native_histogram.go @@ -141,6 +141,8 @@ func (h *nativeHistogram) newSeries(labelValueCombo *LabelValueCombo, value floa NativeHistogramBucketFactor: 1.1, NativeHistogramMaxBucketNumber: 100, NativeHistogramMinResetDuration: 15 * time.Minute, + // TODO enable examplars on native histograms + NativeHistogramMaxExemplars: -1, }), lastUpdated: 0, firstSeries: atomic.NewBool(true), @@ -248,7 +250,6 @@ func (h *nativeHistogram) collectMetrics(appender storage.Appender, timeMs int64 } } } - } return diff --git a/modules/generator/storage/instance_test.go b/modules/generator/storage/instance_test.go index 5ad4d9de81e..f3aa115c99f 100644 --- a/modules/generator/storage/instance_test.go +++ b/modules/generator/storage/instance_test.go @@ -12,8 +12,8 @@ import ( "time" "github.com/go-kit/log" + "github.com/grafana/dskit/test" "github.com/grafana/dskit/user" - "github.com/grafana/tempo/modules/overrides" "github.com/prometheus/client_golang/prometheus" prometheus_common_config "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -25,6 +25,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.uber.org/atomic" + + "github.com/grafana/tempo/modules/overrides" ) // Verify basic functionality like sending metrics and exemplars, buffering and retrying failed @@ -73,13 +75,12 @@ func TestInstance(t *testing.T) { }) // Wait until remote.Storage has tried at least once to send data - err = waitUntil(20*time.Second, func() bool { + test.Poll(t, 30*time.Second, true, func() interface{} { mockServer.mtx.Lock() defer mockServer.mtx.Unlock() return mockServer.refusedRequests > 0 }) - require.NoError(t, err, "timed out while waiting for refused requests") // Allow requests mockServer.refuseRequests.Store(false) @@ -144,7 +145,7 @@ func TestInstance_multiTenancy(t *testing.T) { }) // Wait until every tenant received at least one request - err = waitUntil(20*time.Second, func() bool { + test.Poll(t, 45*time.Second, true, func() interface{} { mockServer.mtx.Lock() defer mockServer.mtx.Unlock() @@ -155,7 +156,6 @@ func TestInstance_multiTenancy(t *testing.T) { } return true }) - require.NoError(t, err, "timed out while waiting for accepted requests") cancel() for _, instance := range instances { @@ -238,13 +238,12 @@ func TestInstance_remoteWriteHeaders(t *testing.T) { }) // Wait until remote.Storage has tried at least once to send data - err = waitUntil(20*time.Second, func() bool { + test.Poll(t, 30*time.Second, true, func() interface{} { mockServer.mtx.Lock() defer mockServer.mtx.Unlock() return mockServer.refusedRequests > 0 }) - require.NoError(t, err, "timed out while waiting for refused requests") // Allow requests mockServer.refuseRequests.Store(false) @@ -348,22 +347,6 @@ func poll(ctx context.Context, interval time.Duration, f func()) { } } -// waitUntil executes f until it returns true or timeout is reached. -func waitUntil(timeout time.Duration, f func() bool) error { - start := time.Now() - - for { - if f() { - return nil - } - if time.Since(start) > timeout { - return fmt.Errorf("timed out while waiting for condition") - } - - time.Sleep(50 * time.Millisecond) - } -} - var _ Overrides = (*mockOverrides)(nil) type mockOverrides struct { diff --git a/modules/ingester/config.go b/modules/ingester/config.go index 1b5d34c8a87..f64d9bbd6cf 100644 --- a/modules/ingester/config.go +++ b/modules/ingester/config.go @@ -8,6 +8,7 @@ import ( "github.com/go-kit/log/level" "github.com/grafana/dskit/flagext" "github.com/grafana/dskit/ring" + "github.com/grafana/tempo/pkg/ingest" "github.com/grafana/tempo/pkg/util/log" "github.com/grafana/tempo/tempodb" @@ -16,7 +17,8 @@ import ( // Config for an ingester. type Config struct { - LifecyclerConfig ring.LifecyclerConfig `yaml:"lifecycler,omitempty"` + LifecyclerConfig ring.LifecyclerConfig `yaml:"lifecycler,omitempty"` + IngesterPartitionRing PartitionRingConfig `yaml:"partition_ring" category:"experimental"` ConcurrentFlushes int `yaml:"concurrent_flushes"` FlushCheckPeriod time.Duration `yaml:"flush_check_period"` @@ -28,7 +30,9 @@ type Config struct { OverrideRingKey string `yaml:"override_ring_key"` FlushAllOnShutdown bool `yaml:"flush_all_on_shutdown"` - DedicatedColumns backend.DedicatedColumns `yaml:"-"` + // This config is dynamically injected because defined outside the ingester config. + DedicatedColumns backend.DedicatedColumns `yaml:"-"` + IngestStorageConfig ingest.Config `yaml:"-"` } // RegisterFlagsAndApplyDefaults registers the flags. @@ -39,6 +43,8 @@ func (cfg *Config) RegisterFlagsAndApplyDefaults(prefix string, f *flag.FlagSet) cfg.LifecyclerConfig.RingConfig.ReplicationFactor = 1 cfg.LifecyclerConfig.RingConfig.HeartbeatTimeout = 5 * time.Minute + cfg.IngesterPartitionRing.RegisterFlags(f) + cfg.ConcurrentFlushes = 4 cfg.FlushCheckPeriod = 10 * time.Second cfg.FlushOpTimeout = 5 * time.Minute diff --git a/modules/ingester/flush.go b/modules/ingester/flush.go index 9bb3afd2291..3464ee1a620 100644 --- a/modules/ingester/flush.go +++ b/modules/ingester/flush.go @@ -72,20 +72,6 @@ const ( opKindFlush ) -// Flush triggers a flush of all in memory traces to disk. This is called -// by the lifecycler on shutdown and will put our traces in the WAL to be -// replayed. -func (i *Ingester) Flush() { - instances := i.getInstances() - - for _, instance := range instances { - err := instance.CutCompleteTraces(0, true) - if err != nil { - level.Error(log.WithUserID(instance.instanceID, log.Logger)).Log("msg", "failed to cut complete traces on shutdown", "err", err) - } - } -} - // ShutdownHandler handles a graceful shutdown for an ingester. It does the following things in order // * Stop incoming writes by exiting from the ring // * Flush all blocks to backend @@ -124,9 +110,9 @@ func (i *Ingester) FlushHandler(w http.ResponseWriter, r *http.Request) { return } level.Info(log.Logger).Log("msg", "flushing instance", "instance", instance.instanceID) - i.sweepInstance(instance, true) + i.cutOneInstanceToWal(instance, true) } else { - i.sweepAllInstances(true) + i.cutAllInstancesToWal() } w.WriteHeader(http.StatusNoContent) @@ -151,16 +137,47 @@ func (o *flushOp) Priority() int64 { return -o.at.Unix() } -// sweepAllInstances periodically schedules series for flushing and garbage collects instances with no series -func (i *Ingester) sweepAllInstances(immediate bool) { +// cutToWalLoop kicks off a goroutine for the passed instance that will periodically cut traces to WAL. +// it signals completion through cutToWalWg, waits for cutToWalStart and stops on cutToWalStop. +func (i *Ingester) cutToWalLoop(instance *instance) { + i.cutToWalWg.Add(1) + + go func() { + defer i.cutToWalWg.Done() + + // wait for the signal to start. we need the wal to be completely replayed + // before we start cutting to WAL + select { + case <-i.cutToWalStart: + case <-i.cutToWalStop: + return + } + + // ticker + ticker := time.NewTicker(i.cfg.FlushCheckPeriod) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + i.cutOneInstanceToWal(instance, false) + case <-i.cutToWalStop: + return + } + } + }() +} + +// cutAllInstancesToWal periodically schedules series for flushing and garbage collects instances with no series +func (i *Ingester) cutAllInstancesToWal() { instances := i.getInstances() for _, instance := range instances { - i.sweepInstance(instance, immediate) + i.cutOneInstanceToWal(instance, true) } } -func (i *Ingester) sweepInstance(instance *instance, immediate bool) { +func (i *Ingester) cutOneInstanceToWal(instance *instance, immediate bool) { // cut traces internally err := instance.CutCompleteTraces(i.cfg.MaxTraceIdle, immediate) if err != nil { @@ -204,6 +221,7 @@ func (i *Ingester) flushLoop(j int) { if o == nil { return } + op := o.(*flushOp) op.attempts++ @@ -246,7 +264,7 @@ func handleAbandonedOp(op *flushOp) { func (i *Ingester) handleComplete(ctx context.Context, op *flushOp) (retry bool, err error) { ctx, sp := tracer.Start(ctx, "ingester.Complete", trace.WithAttributes(attribute.String("tenant", op.userID), attribute.String("blockID", op.blockID.String()))) defer sp.End() - withSpan(level.Info(log.Logger), sp).Log("msg", "flushing block", "tenant", op.userID, "block", op.blockID.String()) + withSpan(level.Info(log.Logger), sp).Log("msg", "completing block", "tenant", op.userID, "block", op.blockID.String()) // No point in proceeding if shutdown has been initiated since // we won't be able to queue up the next flush op @@ -256,7 +274,6 @@ func (i *Ingester) handleComplete(ctx context.Context, op *flushOp) (retry bool, } start := time.Now() - level.Info(log.Logger).Log("msg", "completing block", "tenant", op.userID, "blockID", op.blockID) instance, err := i.getOrCreateInstance(op.userID) if err != nil { return false, err diff --git a/modules/ingester/ingester.go b/modules/ingester/ingester.go index 07c9eec0a04..fc6c722f727 100644 --- a/modules/ingester/ingester.go +++ b/modules/ingester/ingester.go @@ -11,9 +11,11 @@ import ( "github.com/go-kit/log/level" "github.com/gogo/status" "github.com/google/uuid" + "github.com/grafana/dskit/kv" "github.com/grafana/dskit/ring" "github.com/grafana/dskit/services" "github.com/grafana/dskit/user" + "github.com/grafana/tempo/pkg/ingest" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "go.opentelemetry.io/otel" @@ -51,6 +53,10 @@ var tracer = otel.Tracer("modules/ingester") const ( ingesterRingKey = "ring" + + // PartitionRingKey is the key under which we store the partitions ring used by the "ingest storage". + PartitionRingKey = "ingester-partitions" + PartitionRingName = "ingester-partitions" ) // Ingester builds blocks out of incoming traces @@ -71,7 +77,15 @@ type Ingester struct { flushQueues *flushqueues.ExclusiveQueues flushQueuesDone sync.WaitGroup - limiter *Limiter + // manages synchronous behavior with startCutToWal + cutToWalWg sync.WaitGroup + cutToWalStop chan struct{} + cutToWalStart chan struct{} + limiter Limiter + + // Used by ingest storage when enabled + ingestPartitionLifecycler *ring.PartitionInstanceLifecycler + ingestPartitionID int32 overrides ingesterOverrides @@ -79,7 +93,7 @@ type Ingester struct { } // New makes a new Ingester. -func New(cfg Config, store storage.Store, overrides overrides.Interface, reg prometheus.Registerer) (*Ingester, error) { +func New(cfg Config, store storage.Store, overrides overrides.Interface, reg prometheus.Registerer, singlePartition bool) (*Ingester, error) { i := &Ingester{ cfg: cfg, instances: map[string]*instance{}, @@ -87,18 +101,50 @@ func New(cfg Config, store storage.Store, overrides overrides.Interface, reg pro flushQueues: flushqueues.New(cfg.ConcurrentFlushes, metricFlushQueueLength), replayJitter: true, overrides: overrides, + + cutToWalStart: make(chan struct{}), + cutToWalStop: make(chan struct{}), } i.pushErr.Store(ErrStarting) i.local = store.WAL().LocalBackend() - lc, err := ring.NewLifecycler(cfg.LifecyclerConfig, i, "ingester", cfg.OverrideRingKey, true, log.Logger, prometheus.WrapRegistererWithPrefix("tempo_", reg)) + lc, err := ring.NewLifecycler(cfg.LifecyclerConfig, nil, "ingester", cfg.OverrideRingKey, true, log.Logger, prometheus.WrapRegistererWithPrefix("tempo_", reg)) if err != nil { return nil, fmt.Errorf("NewLifecycler failed: %w", err) } i.lifecycler = lc + if ingestCfg := cfg.IngestStorageConfig; ingestCfg.Enabled { + if singlePartition { + // For single-binary don't require hostname to identify a partition. + // Assume partition 0. + i.ingestPartitionID = 0 + } else { + i.ingestPartitionID, err = ingest.IngesterPartitionID(cfg.LifecyclerConfig.ID) + if err != nil { + return nil, fmt.Errorf("calculating ingester partition ID: %w", err) + } + } + + partitionRingKV := cfg.IngesterPartitionRing.KVStore.Mock + if partitionRingKV == nil { + partitionRingKV, err = kv.NewClient(cfg.IngesterPartitionRing.KVStore, ring.GetPartitionRingCodec(), kv.RegistererWithKVName(reg, PartitionRingName+"-lifecycler"), log.Logger) + if err != nil { + return nil, fmt.Errorf("creating KV store for ingester partition ring: %w", err) + } + } + + i.ingestPartitionLifecycler = ring.NewPartitionInstanceLifecycler( + i.cfg.IngesterPartitionRing.ToLifecyclerConfig(i.ingestPartitionID, cfg.LifecyclerConfig.ID), + PartitionRingName, + PartitionRingKey, + partitionRingKV, + log.Logger, + prometheus.WrapRegistererWithPrefix("cortex_", reg)) + } + // Now that the lifecycler has been created, we can create the limiter // which depends on it. i.limiter = NewLimiter(overrides, i.lifecycler, cfg.LifecyclerConfig.RingConfig.ReplicationFactor) @@ -106,7 +152,7 @@ func New(cfg Config, store storage.Store, overrides overrides.Interface, reg pro i.subservicesWatcher = services.NewFailureWatcher() i.subservicesWatcher.WatchService(i.lifecycler) - i.Service = services.NewBasicService(i.starting, i.loop, i.stopping) + i.Service = services.NewBasicService(i.starting, i.running, i.stopping) return i, nil } @@ -135,39 +181,30 @@ func (i *Ingester) starting(ctx context.Context) error { return fmt.Errorf("failed to start lifecycle: %w", err) } + if i.ingestPartitionLifecycler != nil { + if err := i.ingestPartitionLifecycler.StartAsync(context.Background()); err != nil { + return fmt.Errorf("failed to start ingest partition lifecycler: %w", err) + } + if err := i.ingestPartitionLifecycler.AwaitRunning(ctx); err != nil { + return fmt.Errorf("failed to start ingest partition lifecycle: %w", err) + } + } + + // accept traces i.pushErr.Store(nil) + // start flushing traces to wal + close(i.cutToWalStart) + return nil } -func (i *Ingester) loop(ctx context.Context) error { - flushTicker := time.NewTicker(i.cfg.FlushCheckPeriod) - defer flushTicker.Stop() - - for { - select { - case <-flushTicker.C: - i.sweepAllInstances(false) - - case <-ctx.Done(): - return nil - - case err := <-i.subservicesWatcher.Chan(): - return fmt.Errorf("ingester subservice failed: %w", err) - } - } -} - -// complete the flushing -// ExclusiveQueues.activekeys keeps track of flush operations due for processing -// ExclusiveQueues.IsEmpty check uses ExclusiveQueues.activeKeys to determine if flushQueues is empty or not -// sweepAllInstances prepares remaining traces to be flushed by flushLoop routine, also updating ExclusiveQueues.activekeys with keys for new flush operations -// ExclusiveQueues.activeKeys is cleared of a flush operation when a processing of flush operation is either successful or doesn't return retry signal -// This ensures that i.flushQueues is empty only when all traces are flushed -func (i *Ingester) flushRemaining() { - i.sweepAllInstances(true) - for !i.flushQueues.IsEmpty() { - time.Sleep(100 * time.Millisecond) +func (i *Ingester) running(ctx context.Context) error { + select { + case <-ctx.Done(): + return nil + case err := <-i.subservicesWatcher.Chan(): + return fmt.Errorf("ingester subservice failed: %w", err) } } @@ -175,9 +212,16 @@ func (i *Ingester) flushRemaining() { func (i *Ingester) stopping(_ error) error { i.markUnavailable() - // flush any remaining traces + // signal all cutting to wal to stop and wait for all goroutines to finish + close(i.cutToWalStop) + i.cutToWalWg.Wait() + if i.cfg.FlushAllOnShutdown { + // force all in memory traces to be flushed to disk AND fully flush them to the backend i.flushRemaining() + } else { + // force all in memory traces to be flushed to disk + i.cutAllInstancesToWal() } if i.flushQueues != nil { @@ -190,6 +234,19 @@ func (i *Ingester) stopping(_ error) error { return nil } +// complete the flushing +// ExclusiveQueues.activekeys keeps track of flush operations due for processing +// ExclusiveQueues.IsEmpty check uses ExclusiveQueues.activeKeys to determine if flushQueues is empty or not +// sweepAllInstances prepares remaining traces to be flushed by flushLoop routine, also updating ExclusiveQueues.activekeys with keys for new flush operations +// ExclusiveQueues.activeKeys is cleared of a flush operation when a processing of flush operation is either successful or doesn't return retry signal +// This ensures that i.flushQueues is empty only when all traces are flushed +func (i *Ingester) flushRemaining() { + i.cutAllInstancesToWal() + for !i.flushQueues.IsEmpty() { + time.Sleep(100 * time.Millisecond) + } +} + func (i *Ingester) markUnavailable() { // Lifecycler can be nil if the ingester is for a flusher. if i.lifecycler != nil { @@ -200,7 +257,7 @@ func (i *Ingester) markUnavailable() { } // This will prevent us accepting any more samples - i.stopIncomingRequests() + i.pushErr.Store(ErrShuttingDown) } // PushBytes implements tempopb.Pusher.PushBytes. Traces pushed to this endpoint are expected to be in the formats @@ -328,6 +385,8 @@ func (i *Ingester) getOrCreateInstance(instanceID string) (*instance, error) { return nil, err } i.instances[instanceID] = inst + + i.cutToWalLoop(inst) } return inst, nil } @@ -351,19 +410,6 @@ func (i *Ingester) getInstances() []*instance { return instances } -// stopIncomingRequests implements ring.Lifecycler. -func (i *Ingester) stopIncomingRequests() { - i.instancesMtx.Lock() - defer i.instancesMtx.Unlock() - - i.pushErr.Store(ErrShuttingDown) -} - -// TransferOut implements ring.Lifecycler. -func (i *Ingester) TransferOut(context.Context) error { - return ring.ErrTransferDisabled -} - func (i *Ingester) replayWal() error { level.Info(log.Logger).Log("msg", "beginning wal replay") diff --git a/modules/ingester/ingester_search.go b/modules/ingester/ingester_search.go index d87af8b429b..6398b93e48f 100644 --- a/modules/ingester/ingester_search.go +++ b/modules/ingester/ingester_search.go @@ -103,7 +103,7 @@ func (i *Ingester) SearchTagValues(ctx context.Context, req *tempopb.SearchTagVa return &tempopb.SearchTagValuesResponse{}, nil } - res, err = inst.SearchTagValues(ctx, req.TagName) + res, err = inst.SearchTagValues(ctx, req.TagName, req.MaxTagValues, req.StaleValueThreshold) if err != nil { return nil, err } diff --git a/modules/ingester/ingester_test.go b/modules/ingester/ingester_test.go index 625ff3f56d5..2297d5f171c 100644 --- a/modules/ingester/ingester_test.go +++ b/modules/ingester/ingester_test.go @@ -412,7 +412,8 @@ func TestIngesterStartingReadOnly(t *testing.T) { defaultIngesterTestConfig(), defaultIngesterStore(t, t.TempDir()), limits, - prometheus.NewPedanticRegistry()) + prometheus.NewPedanticRegistry(), + false) require.NoError(t, err) _, err = ingester.PushBytesV2(ctx, &tempopb.PushBytesRequest{}) @@ -547,7 +548,7 @@ func defaultIngesterWithOverrides(t testing.TB, tmpDir string, o overrides.Confi s := defaultIngesterStore(t, tmpDir) - ingester, err := New(ingesterConfig, s, limits, prometheus.NewPedanticRegistry()) + ingester, err := New(ingesterConfig, s, limits, prometheus.NewPedanticRegistry(), false) require.NoError(t, err, "unexpected error creating ingester") ingester.replayJitter = false diff --git a/modules/ingester/instance.go b/modules/ingester/instance.go index b1ce94d8b4f..4fb5991fc35 100644 --- a/modules/ingester/instance.go +++ b/modules/ingester/instance.go @@ -12,6 +12,7 @@ import ( "sync" "time" + kitlog "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/gogo/status" "github.com/google/uuid" @@ -38,19 +39,9 @@ var ( errMaxLiveTraces = errors.New(overrides.ErrorPrefixLiveTracesExceeded) ) -func newTraceTooLargeError(traceID common.ID, instanceID string, maxBytes, reqSize int) error { - level.Warn(log.Logger).Log("msg", fmt.Sprintf("%s: max size of trace (%d) exceeded while adding %d bytes to trace %s for tenant %s", - overrides.ErrorPrefixTraceTooLarge, maxBytes, reqSize, hex.EncodeToString(traceID), instanceID)) - return errTraceTooLarge -} - -func newMaxLiveTracesError(instanceID string, limit string) error { - level.Warn(log.Logger).Log("msg", fmt.Sprintf("%s: max live traces exceeded for tenant %s: %v", overrides.ErrorPrefixLiveTracesExceeded, instanceID, limit)) - return errMaxLiveTraces -} - const ( - traceDataType = "trace" + traceDataType = "trace" + maxTraceLogLinesPerSecond = 10 ) var ( @@ -104,7 +95,7 @@ type instance struct { instanceID string tracesCreatedTotal prometheus.Counter bytesReceivedTotal *prometheus.CounterVec - limiter *Limiter + limiter Limiter writer tempodb.Writer dedicatedColumns backend.DedicatedColumns @@ -115,9 +106,13 @@ type instance struct { localWriter backend.Writer hash hash.Hash32 + + logger kitlog.Logger + maxTraceLogger *log.RateLimitedLogger } -func newInstance(instanceID string, limiter *Limiter, overrides ingesterOverrides, writer tempodb.Writer, l *local.Backend, dedicatedColumns backend.DedicatedColumns) (*instance, error) { +func newInstance(instanceID string, limiter Limiter, overrides ingesterOverrides, writer tempodb.Writer, l *local.Backend, dedicatedColumns backend.DedicatedColumns) (*instance, error) { + logger := kitlog.With(log.Logger, "tenant", instanceID) i := &instance{ traces: map[uint32]*liveTrace{}, traceSizes: tracesizes.New(), @@ -136,6 +131,9 @@ func newInstance(instanceID string, limiter *Limiter, overrides ingesterOverride localWriter: backend.NewWriter(l), hash: fnv.New32(), + + logger: logger, + maxTraceLogger: log.NewRateLimitedLogger(maxTraceLogLinesPerSecond, level.Warn(logger)), } err := i.resetHeadBlock() if err != nil { @@ -176,7 +174,7 @@ func (i *instance) addTraceError(errorsByTrace []tempopb.PushErrorReason, pushEr } // error is not either MaxLiveTraces or TraceTooLarge - level.Error(log.Logger).Log("msg", "Unexpected error during PushBytes", "tenant", i.instanceID, "error", pushError) + level.Error(i.logger).Log("msg", "Unexpected error during PushBytes", "error", pushError) errorsByTrace = append(errorsByTrace, tempopb.PushErrorReason_UNKNOWN_ERROR) return errorsByTrace @@ -204,14 +202,15 @@ func (i *instance) push(ctx context.Context, id, traceBytes []byte) error { err := i.limiter.AssertMaxTracesPerUser(i.instanceID, len(i.traces)) if err != nil { - return newMaxLiveTracesError(i.instanceID, err.Error()) + return errMaxLiveTraces } - maxBytes := i.limiter.limits.MaxBytesPerTrace(i.instanceID) + maxBytes := i.limiter.Limits().MaxBytesPerTrace(i.instanceID) reqSize := len(traceBytes) if maxBytes > 0 && !i.traceSizes.Allow(id, reqSize, maxBytes) { - return newTraceTooLargeError(id, i.instanceID, maxBytes, reqSize) + i.maxTraceLogger.Log("msg", overrides.ErrorPrefixTraceTooLarge, "max", maxBytes, "size", reqSize, "trace", hex.EncodeToString(id)) + return errTraceTooLarge } tkn := i.tokenForTraceID(id) @@ -421,7 +420,7 @@ func (i *instance) FindTraceByID(ctx context.Context, id []byte, allowPartialTra } i.tracesMtx.Unlock() - maxBytes := i.limiter.limits.MaxBytesPerTrace(i.instanceID) + maxBytes := i.limiter.Limits().MaxBytesPerTrace(i.instanceID) searchOpts := common.DefaultSearchOptionsWithMaxBytes(maxBytes) combiner := trace.NewCombiner(maxBytes, allowPartialTrace) @@ -529,7 +528,7 @@ func (i *instance) getDedicatedColumns() backend.DedicatedColumns { if cols := i.overrides.DedicatedColumns(i.instanceID); cols != nil { err := cols.Validate() if err != nil { - level.Error(log.Logger).Log("msg", "Unable to apply overrides for dedicated attribute columns. Columns invalid.", "tenant", i.instanceID, "error", err) + level.Error(i.logger).Log("msg", "Unable to apply overrides for dedicated attribute columns. Columns invalid.", "error", err) return i.dedicatedColumns } @@ -608,14 +607,14 @@ func (i *instance) rediscoverLocalBlocks(ctx context.Context) ([]*LocalBlock, er if err != nil { if errors.Is(err, backend.ErrDoesNotExist) { // Partial/incomplete block found, remove, it will be recreated from data in the wal. - level.Warn(log.Logger).Log("msg", "Unable to reload meta for local block. This indicates an incomplete block and will be deleted", "tenant", i.instanceID, "block", id.String()) + level.Warn(i.logger).Log("msg", "Unable to reload meta for local block. This indicates an incomplete block and will be deleted", "block", id.String()) err = i.local.ClearBlock(id, i.instanceID) if err != nil { return nil, fmt.Errorf("deleting bad local block tenant %v block %v: %w", i.instanceID, id.String(), err) } } else { // Block with unknown error - level.Error(log.Logger).Log("msg", "Unexpected error reloading meta for local block. Ignoring and continuing. This block should be investigated.", "tenant", i.instanceID, "block", id.String(), "error", err) + level.Error(i.logger).Log("msg", "Unexpected error reloading meta for local block. Ignoring and continuing. This block should be investigated.", "block", id.String(), "error", err) metricReplayErrorsTotal.WithLabelValues(i.instanceID).Inc() } @@ -631,7 +630,7 @@ func (i *instance) rediscoverLocalBlocks(ctx context.Context) ([]*LocalBlock, er // level corruption err = b.Validate(ctx) if err != nil && !errors.Is(err, common.ErrUnsupported) { - level.Error(log.Logger).Log("msg", "local block failed validation, dropping", "tenantID", i.instanceID, "block", id.String(), "error", err) + level.Error(i.logger).Log("msg", "local block failed validation, dropping", "block", id.String(), "error", err) metricReplayErrorsTotal.WithLabelValues(i.instanceID).Inc() err = i.local.ClearBlock(id, i.instanceID) @@ -645,7 +644,7 @@ func (i *instance) rediscoverLocalBlocks(ctx context.Context) ([]*LocalBlock, er ib := NewLocalBlock(ctx, b, i.local) rediscoveredBlocks = append(rediscoveredBlocks, ib) - level.Info(log.Logger).Log("msg", "reloaded local block", "tenantID", i.instanceID, "block", id.String(), "flushed", ib.FlushedTime()) + level.Info(i.logger).Log("msg", "reloaded local block", "block", id.String(), "flushed", ib.FlushedTime()) } i.blocksMtx.Lock() diff --git a/modules/ingester/instance_search.go b/modules/ingester/instance_search.go index 2de7b25f4a6..23f486b82f5 100644 --- a/modules/ingester/instance_search.go +++ b/modules/ingester/instance_search.go @@ -182,7 +182,7 @@ func (i *instance) SearchTags(ctx context.Context, scope string) (*tempopb.Searc return nil, err } - distinctValues := collector.NewDistinctString(0) // search tags v2 enforces the limit + distinctValues := collector.NewDistinctString(0, 0, 0) // search tags v2 enforces the limit // flatten v2 response for _, s := range v2Response.Scopes { @@ -232,8 +232,8 @@ func (i *instance) SearchTagsV2(ctx context.Context, req *tempopb.SearchTagsRequ return nil, fmt.Errorf("unknown scope: %s", scope) } - limit := i.limiter.limits.MaxBytesPerTagValuesQuery(userID) - distinctValues := collector.NewScopedDistinctString(limit) + maxBytestPerTags := i.limiter.Limits().MaxBytesPerTagValuesQuery(userID) + distinctValues := collector.NewScopedDistinctString(maxBytestPerTags, req.MaxTagsPerScope, req.StaleValuesThreshold) mc := collector.NewMetricsCollector() engine := traceql.NewEngine() @@ -296,7 +296,7 @@ func (i *instance) SearchTagsV2(ctx context.Context, req *tempopb.SearchTagsRequ } if distinctValues.Exceeded() { - level.Warn(log.Logger).Log("msg", "size of tags in instance exceeded limit, reduce cardinality or size of tags", "tenant", userID, "limit", limit) + level.Warn(log.Logger).Log("msg", "Search of tags exceeded limit, reduce cardinality or size of tags", "orgID", userID, "stopReason", distinctValues.StopReason()) } collected := distinctValues.Strings() @@ -324,18 +324,18 @@ func (i *instance) SearchTagsV2(ctx context.Context, req *tempopb.SearchTagsRequ return resp, nil } -func (i *instance) SearchTagValues(ctx context.Context, tagName string) (*tempopb.SearchTagValuesResponse, error) { +func (i *instance) SearchTagValues(ctx context.Context, tagName string, limit uint32, staleValueThreshold uint32) (*tempopb.SearchTagValuesResponse, error) { userID, err := user.ExtractOrgID(ctx) if err != nil { return nil, err } - limit := i.limiter.limits.MaxBytesPerTagValuesQuery(userID) - distinctValues := collector.NewDistinctString(limit) + maxBytesPerTagValues := i.limiter.Limits().MaxBytesPerTagValuesQuery(userID) + distinctValues := collector.NewDistinctString(maxBytesPerTagValues, limit, staleValueThreshold) mc := collector.NewMetricsCollector() var inspectedBlocks, maxBlocks int - if limit := i.limiter.limits.MaxBlocksPerTagValuesQuery(userID); limit > 0 { + if limit := i.limiter.Limits().MaxBlocksPerTagValuesQuery(userID); limit > 0 { maxBlocks = limit } @@ -382,7 +382,7 @@ func (i *instance) SearchTagValues(ctx context.Context, tagName string) (*tempop } if distinctValues.Exceeded() { - level.Warn(log.Logger).Log("msg", "size of tag values in instance exceeded limit, reduce cardinality or size of tags", "tag", tagName, "tenant", userID, "limit", limit, "size", distinctValues.Size()) + level.Warn(log.Logger).Log("msg", "Search of tags exceeded limit, reduce cardinality or size of tags", "tag", tagName, "orgID", userID, "stopReason", distinctValues.StopReason()) } return &tempopb.SearchTagValuesResponse{ @@ -400,8 +400,8 @@ func (i *instance) SearchTagValuesV2(ctx context.Context, req *tempopb.SearchTag ctx, span := tracer.Start(ctx, "instance.SearchTagValuesV2") defer span.End() - limit := i.limiter.limits.MaxBytesPerTagValuesQuery(userID) - valueCollector := collector.NewDistinctValue[tempopb.TagValue](limit, func(v tempopb.TagValue) int { return len(v.Type) + len(v.Value) }) + limit := i.limiter.Limits().MaxBytesPerTagValuesQuery(userID) + valueCollector := collector.NewDistinctValue(limit, req.MaxTagValues, req.StaleValueThreshold, func(v tempopb.TagValue) int { return len(v.Type) + len(v.Value) }) mc := collector.NewMetricsCollector() // to collect bytesRead metric engine := traceql.NewEngine() @@ -412,7 +412,7 @@ func (i *instance) SearchTagValuesV2(ctx context.Context, req *tempopb.SearchTag var anyErr atomic.Error var inspectedBlocks atomic.Int32 var maxBlocks int32 - if limit := i.limiter.limits.MaxBlocksPerTagValuesQuery(userID); limit > 0 { + if limit := i.limiter.Limits().MaxBlocksPerTagValuesQuery(userID); limit > 0 { maxBlocks = int32(limit) } @@ -512,7 +512,7 @@ func (i *instance) SearchTagValuesV2(ctx context.Context, req *tempopb.SearchTag // cache miss, search the block. We will cache the results if we find any. span.SetAttributes(attribute.Bool("cached", false)) // using local collector to collect values from the block and cache them. - localCol := collector.NewDistinctValue[tempopb.TagValue](limit, func(v tempopb.TagValue) int { return len(v.Type) + len(v.Value) }) + localCol := collector.NewDistinctValue[tempopb.TagValue](limit, req.MaxTagValues, req.StaleValueThreshold, func(v tempopb.TagValue) int { return len(v.Type) + len(v.Value) }) localErr := performSearch(ctx, b, localCol) if localErr != nil { return localErr @@ -616,15 +616,20 @@ func includeBlock(b *backend.BlockMeta, req *tempopb.SearchRequest) bool { return b.StartTime.Unix() <= end && b.EndTime.Unix() >= start } +// searchTagValuesV2CacheKey generates a cache key for the searchTagValuesV2 request +// cache key is used as the filename to store the protobuf data on disk func searchTagValuesV2CacheKey(req *tempopb.SearchTagValuesRequest, limit int, prefix string) string { query := req.Query if req.Query != "" { ast, err := traceql.Parse(req.Query) - if err != nil { // this should never happen but in case it happens - return "" + if err == nil { + // forces the query into a canonical form + query = ast.String() + } else { + // In case of a bad TraceQL query, we ignore the query and return unfiltered results. + // if we fail to parse the query, we will assume query is empty and compute the cache key. + query = "" } - // forces the query into a canonical form - query = ast.String() } // NOTE: we are not adding req.Start and req.End to the cache key because we don't respect the start and end diff --git a/modules/ingester/instance_search_test.go b/modules/ingester/instance_search_test.go index a3be2669e1e..860ae26ebb3 100644 --- a/modules/ingester/instance_search_test.go +++ b/modules/ingester/instance_search_test.go @@ -292,7 +292,7 @@ func testSearchTagsAndValues(t *testing.T, ctx context.Context, i *instance, tag checkSearchTags("event", true) checkSearchTags("link", true) - srv, err := i.SearchTagValues(ctx, tagName) + srv, err := i.SearchTagValues(ctx, tagName, 0, 0) require.NoError(t, err) require.Greater(t, srv.Metrics.InspectedBytes, uint64(100)) // we scanned at-least 100 bytes @@ -312,10 +312,13 @@ func TestInstanceSearchTagAndValuesV2(t *testing.T) { otherTagValue = qux queryThatMatches = fmt.Sprintf(`{ name = "%s" }`, spanName) queryThatDoesNotMatch = `{ resource.service.name = "aaaaa" }` + emptyQuery = `{ }` + invalidQuery = `{ not_a_traceql = query }` + partInvalidQuery = fmt.Sprintf(`{ name = "%s" && not_a_traceql = query }`, spanName) ) _, expectedTagValues, expectedEventTagValues, expectedLinkTagValues := writeTracesForSearch(t, i, spanName, tagKey, tagValue, true, true) - _, _, _, _ = writeTracesForSearch(t, i, "other-"+spanName, tagKey, otherTagValue, true, false) + _, otherTagValues, otherEventTagValues, otherLinkTagValues := writeTracesForSearch(t, i, "other-"+spanName, tagKey, otherTagValue, true, true) userCtx := user.InjectOrgID(context.Background(), "fake") @@ -339,7 +342,7 @@ func TestInstanceSearchTagAndValuesV2(t *testing.T) { // test that we are creating cache files for search tag values v2 // check that we have cache files for all complete blocks for all the cache keys - limit := i.limiter.limits.MaxBytesPerTagValuesQuery("fake") + limit := i.limiter.Limits().MaxBytesPerTagValuesQuery("fake") cacheKeys := cacheKeysForTestSearchTagValuesV2(tagKey, queryThatMatches, limit) for _, cacheKey := range cacheKeys { for _, b := range i.completeBlocks { @@ -351,6 +354,16 @@ func TestInstanceSearchTagAndValuesV2(t *testing.T) { // test search is returning same results with cache testSearchTagsAndValuesV2(t, userCtx, i, tagKey, queryThatMatches, expectedTagValues, expectedEventTagValues, expectedLinkTagValues) + + // merge all tag values to test unfiltered query + expectedTagValues = append(expectedTagValues, otherTagValues...) + expectedEventTagValues = append(expectedEventTagValues, otherEventTagValues...) + expectedLinkTagValues = append(expectedLinkTagValues, otherLinkTagValues...) + + // test un-filtered query and check that bad/invalid TraceQL query returns all tag values and is same as unfiltered query + testSearchTagsAndValuesV2(t, userCtx, i, tagKey, emptyQuery, expectedTagValues, expectedEventTagValues, expectedLinkTagValues) + testSearchTagsAndValuesV2(t, userCtx, i, tagKey, invalidQuery, expectedTagValues, expectedEventTagValues, expectedLinkTagValues) + testSearchTagsAndValuesV2(t, userCtx, i, tagKey, partInvalidQuery, expectedTagValues, expectedEventTagValues, expectedLinkTagValues) } // nolint:revive,unparam @@ -441,7 +454,7 @@ func TestInstanceSearchMaxBytesPerTagValuesQueryReturnsPartial(t *testing.T) { limits, err := overrides.NewOverrides(overrides.Config{ Defaults: overrides.Overrides{ Read: overrides.ReadOverrides{ - MaxBytesPerTagValuesQuery: 10, + MaxBytesPerTagValuesQuery: 12, }, }, }, nil, prometheus.DefaultRegisterer) @@ -461,9 +474,9 @@ func TestInstanceSearchMaxBytesPerTagValuesQueryReturnsPartial(t *testing.T) { _, _, _, _ = writeTracesForSearch(t, i, "", tagKey, tagValue, true, false) userCtx := user.InjectOrgID(context.Background(), "fake") - resp, err := i.SearchTagValues(userCtx, tagKey) + resp, err := i.SearchTagValues(userCtx, tagKey, 0, 0) require.NoError(t, err) - require.Equal(t, 2, len(resp.TagValues)) // Only two values of the form "bar123" fit in the 10 byte limit above. + require.Equal(t, 2, len(resp.TagValues)) // Only two values of the form "bar123" fit in the 12 byte limit above. } // TestInstanceSearchMaxBytesPerTagValuesQueryReturnsPartial confirms that SearchTagValues returns @@ -501,7 +514,7 @@ func TestInstanceSearchMaxBlocksPerTagValuesQueryReturnsPartial(t *testing.T) { userCtx := user.InjectOrgID(context.Background(), "fake") - respV1, err := i.SearchTagValues(userCtx, tagKey) + respV1, err := i.SearchTagValues(userCtx, tagKey, 0, 0) require.NoError(t, err) assert.Equal(t, 100, len(respV1.TagValues)) @@ -515,7 +528,7 @@ func TestInstanceSearchMaxBlocksPerTagValuesQueryReturnsPartial(t *testing.T) { i.limiter = NewLimiter(limits, &ringCountMock{count: 1}, 1) - respV1, err = i.SearchTagValues(userCtx, tagKey) + respV1, err = i.SearchTagValues(userCtx, tagKey, 0, 0) require.NoError(t, err) assert.Equal(t, 200, len(respV1.TagValues)) @@ -709,7 +722,7 @@ func TestInstanceSearchDoesNotRace(t *testing.T) { go concurrent(func() { // SearchTagValues queries now require userID in ctx ctx := user.InjectOrgID(context.Background(), "test") - _, err := i.SearchTagValues(ctx, tagKey) + _, err := i.SearchTagValues(ctx, tagKey, 0, 0) require.NoError(t, err, "error getting search tag values") }) @@ -1006,3 +1019,64 @@ func TestIncludeBlock(t *testing.T) { }) } } + +func Test_searchTagValuesV2CacheKey(t *testing.T) { + tests := []struct { + name string + req *tempopb.SearchTagValuesRequest + limit int + prefix string + expectedCacheKey string + }{ + { + name: "prefix empty", + req: &tempopb.SearchTagValuesRequest{TagName: "span.foo", Query: "{}"}, + limit: 100, + prefix: "", + expectedCacheKey: "_10963035328899851375.buf", + }, + { + name: "prefix not empty but same query", + req: &tempopb.SearchTagValuesRequest{TagName: "span.foo", Query: "{}"}, + limit: 100, + prefix: "my_amazing_prefix", + // hash should be same, only prefix should change + expectedCacheKey: "my_amazing_prefix_10963035328899851375.buf", + }, + { + name: "changing limit changes the cache key for same query", + req: &tempopb.SearchTagValuesRequest{TagName: "span.foo", Query: "{}"}, + limit: 500, + prefix: "my_amazing_prefix", + expectedCacheKey: "my_amazing_prefix_10962052365504419966.buf", + }, + { + name: "different query generates different cache key", + req: &tempopb.SearchTagValuesRequest{TagName: "span.foo", Query: "{ name = \"foo\" }"}, + limit: 500, + prefix: "my_amazing_prefix", + expectedCacheKey: "my_amazing_prefix_9241051696576633442.buf", + }, + { + name: "invalid query generates a valid cache key", + req: &tempopb.SearchTagValuesRequest{TagName: "span.foo", Query: "{span.env=dev}"}, + limit: 500, + prefix: "my_amazing_prefix", + expectedCacheKey: "my_amazing_prefix_7849238702443650194.buf", + }, + { + name: "different invalid query generates the same valid cache key", + req: &tempopb.SearchTagValuesRequest{TagName: "span.foo", Query: "{ && span.foo = \"bar\" }"}, + limit: 500, + prefix: "my_amazing_prefix", + expectedCacheKey: "my_amazing_prefix_7849238702443650194.buf", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cacheKey := searchTagValuesV2CacheKey(tt.req, tt.limit, tt.prefix) + require.Equal(t, tt.expectedCacheKey, cacheKey) + }) + } +} diff --git a/modules/ingester/limiter.go b/modules/ingester/limiter.go index 8674e6583ea..8a9a4e99777 100644 --- a/modules/ingester/limiter.go +++ b/modules/ingester/limiter.go @@ -4,6 +4,7 @@ import ( "fmt" "math" + "github.com/grafana/dskit/ring" "github.com/grafana/tempo/modules/overrides" ) @@ -17,26 +18,35 @@ type RingCount interface { HealthyInstancesCount() int } +type Limiter interface { + AssertMaxTracesPerUser(userID string, traces int) error + Limits() overrides.Interface +} + // Limiter implements primitives to get the maximum number of traces // an ingester can handle for a specific tenant -type Limiter struct { +type limiter struct { limits overrides.Interface ring RingCount replicationFactor int } // NewLimiter makes a new limiter -func NewLimiter(limits overrides.Interface, ring RingCount, replicationFactor int) *Limiter { - return &Limiter{ +func NewLimiter(limits overrides.Interface, ring RingCount, replicationFactor int) Limiter { + return &limiter{ limits: limits, ring: ring, replicationFactor: replicationFactor, } } +func (l *limiter) Limits() overrides.Interface { + return l.limits +} + // AssertMaxTracesPerUser ensures limit has not been reached compared to the current // number of streams in input and returns an error if so. -func (l *Limiter) AssertMaxTracesPerUser(userID string, traces int) error { +func (l *limiter) AssertMaxTracesPerUser(userID string, traces int) error { actualLimit := l.maxTracesPerUser(userID) if traces < actualLimit { return nil @@ -48,7 +58,7 @@ func (l *Limiter) AssertMaxTracesPerUser(userID string, traces int) error { return fmt.Errorf(errMaxTracesPerUserLimitExceeded, localLimit, globalLimit, actualLimit) } -func (l *Limiter) maxTracesPerUser(userID string) int { +func (l *limiter) maxTracesPerUser(userID string) int { localLimit := l.limits.MaxLocalTracesPerUser(userID) // We can assume that traces are evenly distributed across ingesters @@ -65,7 +75,7 @@ func (l *Limiter) maxTracesPerUser(userID string) int { return localLimit } -func (l *Limiter) convertGlobalToLocalLimit(userID string, globalLimit int) int { +func (l *limiter) convertGlobalToLocalLimit(userID string, globalLimit int) int { if globalLimit == 0 { return 0 } @@ -87,7 +97,82 @@ func (l *Limiter) convertGlobalToLocalLimit(userID string, globalLimit int) int return 0 } -func (l *Limiter) minNonZero(first, second int) int { +func (l *limiter) minNonZero(first, second int) int { + if first == 0 || (second != 0 && first > second) { + return second + } + + return first +} + +type partitionLimiter struct { + limits overrides.Interface + ring *ring.PartitionRingWatcher +} + +func NewPartitionLimiter(limits overrides.Interface, ring *ring.PartitionRingWatcher) Limiter { + return &partitionLimiter{ + limits: limits, + ring: ring, + } +} + +func (l *partitionLimiter) Limits() overrides.Interface { + return l.limits +} + +func (l *partitionLimiter) AssertMaxTracesPerUser(userID string, traces int) error { + actualLimit := l.maxTracesPerUser(userID) + if traces < actualLimit { + return nil + } + + localLimit := l.limits.MaxLocalTracesPerUser(userID) + globalLimit := l.limits.MaxGlobalTracesPerUser(userID) + + return fmt.Errorf(errMaxTracesPerUserLimitExceeded, localLimit, globalLimit, actualLimit) +} + +func (l *partitionLimiter) maxTracesPerUser(userID string) int { + localLimit := l.limits.MaxLocalTracesPerUser(userID) + + // We can assume that traces are evenly distributed across ingesters + // so we do convert the global limit into a local limit + globalLimit := l.limits.MaxGlobalTracesPerUser(userID) + localLimit = l.minNonZero(localLimit, l.convertGlobalToLocalLimit(userID, globalLimit)) + + // If both the local and global limits are disabled, we just + // use the largest int value + if localLimit == 0 { + localLimit = math.MaxInt32 + } + + return localLimit +} + +func (l *partitionLimiter) convertGlobalToLocalLimit(userID string, globalLimit int) int { + if globalLimit == 0 { + return 0 + } + + // Given we don't need a super accurate count (ie. when the ingesters + // topology changes) and we prefer to always be in favor of the tenant, + // we can use a per-ingester limit equal to: + // (global limit / number of ingesters) * replication factor + ingestionShardSize := l.limits.IngestionTenantShardSize(userID) + totalIngesters := l.ring.PartitionRing().ShuffleShardSize(ingestionShardSize) + numIngesters := l.minNonZero(ingestionShardSize, totalIngesters) + + // May happen because the number of ingesters is asynchronously updated. + // If happens, we just temporarily ignore the global limit. + if numIngesters > 0 { + return int(float64(globalLimit) / float64(numIngesters)) + } + + return 0 +} + +func (l *partitionLimiter) minNonZero(first, second int) int { if first == 0 || (second != 0 && first > second) { return second } diff --git a/modules/ingester/partition_ring.go b/modules/ingester/partition_ring.go new file mode 100644 index 00000000000..22b69b4e78b --- /dev/null +++ b/modules/ingester/partition_ring.go @@ -0,0 +1,47 @@ +package ingester + +import ( + "flag" + "time" + + "github.com/grafana/dskit/kv" + "github.com/grafana/dskit/ring" +) + +type PartitionRingConfig struct { + KVStore kv.Config `yaml:"kvstore" doc:"description=The key-value store used to share the hash ring across multiple instances. This option needs be set on ingesters, distributors, queriers, and rulers when running in microservices mode."` + + // MinOwnersCount maps to ring.PartitionInstanceLifecyclerConfig's WaitOwnersCountOnPending. + MinOwnersCount int `yaml:"min_partition_owners_count"` + + // MinOwnersDuration maps to ring.PartitionInstanceLifecyclerConfig's WaitOwnersDurationOnPending. + MinOwnersDuration time.Duration `yaml:"min_partition_owners_duration"` + + // DeleteInactivePartitionAfter maps to ring.PartitionInstanceLifecyclerConfig's DeleteInactivePartitionAfterDuration. + DeleteInactivePartitionAfter time.Duration `yaml:"delete_inactive_partition_after"` + + // lifecyclerPollingInterval is the lifecycler polling interval. This setting is used to lower it in tests. + lifecyclerPollingInterval time.Duration +} + +// RegisterFlags adds the flags required to config this to the given FlagSet +func (cfg *PartitionRingConfig) RegisterFlags(f *flag.FlagSet) { + // Ring flags + cfg.KVStore.Store = "memberlist" // Override default value. + cfg.KVStore.RegisterFlagsWithPrefix("ingester.partition-ring.", "collectors/", f) + + f.IntVar(&cfg.MinOwnersCount, "ingester.partition-ring.min-partition-owners-count", 1, "Minimum number of owners to wait before a PENDING partition gets switched to ACTIVE.") + f.DurationVar(&cfg.MinOwnersDuration, "ingester.partition-ring.min-partition-owners-duration", 10*time.Second, "How long the minimum number of owners are enforced before a PENDING partition gets switched to ACTIVE.") + f.DurationVar(&cfg.DeleteInactivePartitionAfter, "ingester.partition-ring.delete-inactive-partition-after", 13*time.Hour, "How long to wait before an INACTIVE partition is eligible for deletion. The partition is deleted only if it has been in INACTIVE state for at least the configured duration and it has no owners registered. A value of 0 disables partitions deletion.") +} + +func (cfg *PartitionRingConfig) ToLifecyclerConfig(partitionID int32, instanceID string) ring.PartitionInstanceLifecyclerConfig { + return ring.PartitionInstanceLifecyclerConfig{ + PartitionID: partitionID, + InstanceID: instanceID, + WaitOwnersCountOnPending: cfg.MinOwnersCount, + WaitOwnersDurationOnPending: cfg.MinOwnersDuration, + DeleteInactivePartitionAfterDuration: cfg.DeleteInactivePartitionAfter, + PollingInterval: cfg.lifecyclerPollingInterval, + } +} diff --git a/modules/overrides/config.go b/modules/overrides/config.go index 2366023f4c1..c973eb1b628 100644 --- a/modules/overrides/config.go +++ b/modules/overrides/config.go @@ -170,7 +170,7 @@ type CompactionOverrides struct { } type GlobalOverrides struct { - // MaxBytesPerTrace is enforced in the Ingester, Compactor, Querier (Search) and Serverless (Search). It + // MaxBytesPerTrace is enforced in the Ingester, Compactor, Querier (Search). It // is not used when doing a trace by id lookup. MaxBytesPerTrace int `yaml:"max_bytes_per_trace,omitempty" json:"max_bytes_per_trace,omitempty"` } diff --git a/modules/overrides/config_legacy.go b/modules/overrides/config_legacy.go index 526011f486d..6516e2817e7 100644 --- a/modules/overrides/config_legacy.go +++ b/modules/overrides/config_legacy.go @@ -130,7 +130,7 @@ type LegacyOverrides struct { MaxMetricsDuration model.Duration `yaml:"max_metrics_duration" json:"max_metrics_duration"` UnsafeQueryHints bool `yaml:"unsafe_query_hints" json:"unsafe_query_hints"` - // MaxBytesPerTrace is enforced in the Ingester, Compactor, Querier (Search) and Serverless (Search). It + // MaxBytesPerTrace is enforced in the Ingester, Compactor, Querier (Search). It // is not used when doing a trace by id lookup. MaxBytesPerTrace int `yaml:"max_bytes_per_trace" json:"max_bytes_per_trace"` diff --git a/modules/querier/config.go b/modules/querier/config.go index 824622cc3c2..cb86d8df25a 100644 --- a/modules/querier/config.go +++ b/modules/querier/config.go @@ -7,7 +7,6 @@ import ( "github.com/grafana/dskit/backoff" "github.com/grafana/dskit/grpcclient" - "github.com/grafana/tempo/modules/querier/external" "github.com/grafana/tempo/modules/querier/worker" ) @@ -27,18 +26,7 @@ type Config struct { } type SearchConfig struct { - QueryTimeout time.Duration `yaml:"query_timeout"` - PreferSelf int `yaml:"prefer_self"` - HedgeRequestsAt time.Duration `yaml:"external_hedge_requests_at"` - HedgeRequestsUpTo int `yaml:"external_hedge_requests_up_to"` - - // backends - ExternalBackend string `yaml:"external_backend"` - CloudRun *external.CloudRunConfig `yaml:"google_cloud_run"` - - // Ideally this config should be under the external.HTTPConfig struct, but - // it will require a breaking change. - ExternalEndpoints []string `yaml:"external_endpoints"` + QueryTimeout time.Duration `yaml:"query_timeout"` } type TraceByIDConfig struct { @@ -63,9 +51,6 @@ func (cfg *Config) RegisterFlagsAndApplyDefaults(prefix string, f *flag.FlagSet) cfg.QueryRelevantIngesters = false cfg.ExtraQueryDelay = 0 cfg.MaxConcurrentQueries = 20 - cfg.Search.PreferSelf = 10 - cfg.Search.HedgeRequestsAt = 8 * time.Second - cfg.Search.HedgeRequestsUpTo = 2 cfg.Search.QueryTimeout = 30 * time.Second cfg.Metrics.ConcurrentBlocks = 2 cfg.Metrics.TimeOverlapCutoff = 0.2 @@ -76,7 +61,7 @@ func (cfg *Config) RegisterFlagsAndApplyDefaults(prefix string, f *flag.FlagSet) GRPCClientConfig: grpcclient.Config{ MaxRecvMsgSize: 100 << 20, MaxSendMsgSize: 16 << 20, - GRPCCompression: "gzip", + GRPCCompression: "", BackoffConfig: backoff.Config{ // the max possible backoff should be lesser than QueryTimeout, with room for actual query response time MinBackoff: 100 * time.Millisecond, MaxBackoff: 1 * time.Second, diff --git a/modules/querier/external/client.go b/modules/querier/external/client.go deleted file mode 100644 index 5d601ca01cc..00000000000 --- a/modules/querier/external/client.go +++ /dev/null @@ -1,195 +0,0 @@ -package external - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "math/rand" - "net/http" - "time" - - "github.com/cristalhq/hedgedhttp" - "github.com/gogo/protobuf/jsonpb" - "github.com/grafana/dskit/user" - "github.com/grafana/tempo/pkg/api" - "github.com/grafana/tempo/pkg/hedgedmetrics" - "github.com/grafana/tempo/pkg/tempopb" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" -) - -var ( - metricEndpointDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: "tempo", - Name: "querier_external_endpoint_duration_seconds", - Help: "The duration of the external endpoints.", - Buckets: prometheus.DefBuckets, - NativeHistogramBucketFactor: 1.1, - NativeHistogramMaxBucketNumber: 100, - NativeHistogramMinResetDuration: 1 * time.Hour, - }, []string{"endpoint"}) - metricExternalHedgedRequests = promauto.NewCounter( - prometheus.CounterOpts{ - Namespace: "tempo", - Name: "querier_external_endpoint_hedged_roundtrips_total", - Help: "Total number of hedged external requests.", - }, - ) -) - -type Config struct { - Backend string - - HTTPConfig *HTTPConfig - CloudRunConfig *CloudRunConfig - - HedgeRequestsAt time.Duration - HedgeRequestsUpTo int -} - -type Client struct { - httpClient *http.Client - endpoints []string - tokenProvider tokenProvider -} - -type CloudRunConfig struct { - Endpoints []string `yaml:"external_endpoints"` - NoAuth bool // For testing -} - -type HTTPConfig struct { - Endpoints []string -} - -type option func(client *Client) error - -func withTokenProvider(provider tokenProvider) option { - return func(client *Client) error { - client.tokenProvider = provider - return nil - } -} - -func NewClient(cfg *Config) (*Client, error) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - switch cfg.Backend { - case "": - // For backwards compatibility, use unauthenticated http as the default. - return newClientWithOpts(&commonConfig{ - endpoints: cfg.HTTPConfig.Endpoints, - hedgeRequestsAt: cfg.HedgeRequestsAt, - hedgeRequestsUpTo: cfg.HedgeRequestsUpTo, - }) - case "google_cloud_run": - provider, err := newGoogleProvider(ctx, cfg.CloudRunConfig.Endpoints, cfg.CloudRunConfig.NoAuth) - if err != nil { - return nil, err - } - return newClientWithOpts(&commonConfig{ - endpoints: cfg.CloudRunConfig.Endpoints, - hedgeRequestsAt: cfg.HedgeRequestsAt, - hedgeRequestsUpTo: cfg.HedgeRequestsUpTo, - }, withTokenProvider(provider)) - - case "aws_lambda": - // TODO: implement RBAC for lambda. Here's one approach using API - // gateway: - // https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-use-lambda-authorizer.html#api-gateway-lambda-authorizer-flow - return nil, fmt.Errorf("lambda external backend does not yet have RBAC support") - default: - return nil, fmt.Errorf("unknown external backend configured: %s", cfg.Backend) - } -} - -type commonConfig struct { - hedgeRequestsAt time.Duration - hedgeRequestsUpTo int - endpoints []string -} - -func newClientWithOpts(cfg *commonConfig, opts ...option) (*Client, error) { - httpClient := http.DefaultClient - if cfg.hedgeRequestsAt != 0 { - var err error - var stats *hedgedhttp.Stats - httpClient, stats, err = hedgedhttp.NewClientAndStats( - cfg.hedgeRequestsAt, - cfg.hedgeRequestsUpTo, - http.DefaultClient, - ) - if err != nil { - return nil, err - } - hedgedmetrics.Publish(stats, metricExternalHedgedRequests) - } - - c := &Client{ - httpClient: httpClient, - endpoints: cfg.endpoints, - tokenProvider: &nilTokenProvider{}, - } - - for _, opt := range opts { - err := opt(c) - if err != nil { - return nil, err - } - } - - return c, nil -} - -func (s *Client) Search(ctx context.Context, maxBytes int, searchReq *tempopb.SearchBlockRequest) (*tempopb.SearchResponse, error) { - endpoint := s.endpoints[rand.Intn(len(s.endpoints))] - req, err := http.NewRequest(http.MethodGet, endpoint, nil) - if err != nil { - return nil, fmt.Errorf("external endpoint failed to make new request: %w", err) - } - columnsJSON, err := json.Marshal(searchReq.DedicatedColumns) - if err != nil { - return nil, err - } - req, err = api.BuildSearchBlockRequest(req, searchReq, string(columnsJSON)) - if err != nil { - return nil, fmt.Errorf("external endpoint failed to build search block request: %w", err) - } - req = api.AddServerlessParams(req, maxBytes) - err = user.InjectOrgIDIntoHTTPRequest(ctx, req) - if err != nil { - return nil, fmt.Errorf("external endpoint failed to inject tenant id: %w", err) - } - token, err := s.tokenProvider.getToken(ctx, endpoint) - if err != nil { - return nil, fmt.Errorf("external endpoint failed to set auth header: %w", err) - } - if token != nil { - token.SetAuthHeader(req) - } - - start := time.Now() - resp, err := s.httpClient.Do(req) - metricEndpointDuration.WithLabelValues(endpoint).Observe(time.Since(start).Seconds()) - if err != nil { - return nil, fmt.Errorf("external endpoint failed to call http: %s, %w", endpoint, err) - } - defer resp.Body.Close() - body, err := io.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("external endpoint failed to read body: %w", err) - } - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("external endpoint returned %d, %s", resp.StatusCode, string(body)) - } - var searchResp tempopb.SearchResponse - err = jsonpb.Unmarshal(bytes.NewReader(body), &searchResp) - if err != nil { - return nil, fmt.Errorf("external endpoint failed to unmarshal body: %s: %w", string(body), err) - } - - return &searchResp, nil -} diff --git a/modules/querier/external/client_test.go b/modules/querier/external/client_test.go deleted file mode 100644 index 8d49111d345..00000000000 --- a/modules/querier/external/client_test.go +++ /dev/null @@ -1,100 +0,0 @@ -package external - -import ( - "context" - "encoding/json" - "net/http" - "net/http/httptest" - "testing" - "time" - - "github.com/grafana/dskit/user" - "github.com/stretchr/testify/require" - "go.uber.org/atomic" - "golang.org/x/oauth2" - - "github.com/grafana/tempo/pkg/tempopb" -) - -func TestAuthHeader(t *testing.T) { - authorizationHeader := atomic.NewString("") - - srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - time.Sleep(100 * time.Millisecond) - curr := authorizationHeader.Load() - if curr != "" { - http.Error(w, "authorization has already been set", http.StatusBadRequest) - return - } - authHeader := r.Header.Get("authorization") - authorizationHeader.Store(authHeader) - - // Create an instance of SearchResponse and populate its fields - response := tempopb.SearchResponse{} - - // Marshal the SearchResponse struct into a JSON byte slice - jsonData, err := json.Marshal(response) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - _, err = w.Write(jsonData) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - })) - defer srv.Close() - - tests := []struct { - cfg *commonConfig - authHeaderExpected string - options []option - }{ - { - cfg: &commonConfig{ - endpoints: []string{srv.URL}, - }, - authHeaderExpected: "", - }, - { - cfg: &commonConfig{ - endpoints: []string{srv.URL}, - }, - options: []option{ - withTokenProvider(getStubbedTokenProvider("dummytoken")), - }, - - authHeaderExpected: "Bearer dummytoken", - }, - } - - ctx := user.InjectOrgID(context.Background(), "blerg") - - for _, tc := range tests { - authorizationHeader.Store("") - - c, err := newClientWithOpts(tc.cfg, tc.options...) - require.NoError(t, err) - - _, err = c.Search(ctx, 0, &tempopb.SearchBlockRequest{}) - require.NoError(t, err) - - require.Equal(t, tc.authHeaderExpected, authorizationHeader.Load()) - } -} - -type stubbedProvider struct { - dummyToken string -} - -func (t *stubbedProvider) getToken(_ context.Context, _ string) (*oauth2.Token, error) { - return &oauth2.Token{ - AccessToken: t.dummyToken, - }, nil -} - -func getStubbedTokenProvider(dummyToken string) tokenProvider { - return &stubbedProvider{dummyToken: dummyToken} -} diff --git a/modules/querier/external/token_provider.go b/modules/querier/external/token_provider.go deleted file mode 100644 index 7f92374ddb2..00000000000 --- a/modules/querier/external/token_provider.go +++ /dev/null @@ -1,85 +0,0 @@ -package external - -import ( - "context" - "fmt" - "sync" - - "go.uber.org/multierr" - "golang.org/x/oauth2" - "google.golang.org/api/idtoken" -) - -type tokenProvider interface { - // Returns an oauth2 token, leveraging a cache unless the token is expired. - // If expired, the token is renewed and added to the cache. - // - // If this returns nil, the request will be unauthenticated. - getToken(ctx context.Context, endpoint string) (*oauth2.Token, error) -} - -// Caches an oauth2.TokenSource to enable efficient auth on each of our external -// endpoints. -type cachedTokenProvider struct { - tokenSources map[string]oauth2.TokenSource -} - -func newTokenProvider( - ctx context.Context, - endpoints []string, - getTokenSource func(ctx context.Context, endpoint string) (oauth2.TokenSource, error), -) (*cachedTokenProvider, error) { - sources := make(map[string]oauth2.TokenSource, len(endpoints)) - - var mtx sync.Mutex - var wg sync.WaitGroup - - var tsErr error - for _, endpoint := range endpoints { - wg.Add(1) - go func(ep string) { - defer wg.Done() - ts, err := getTokenSource(ctx, ep) - - mtx.Lock() - defer mtx.Unlock() - - if err != nil { - tsErr = multierr.Combine(tsErr, err) - } - sources[ep] = oauth2.ReuseTokenSource(nil, ts) - }(endpoint) - } - wg.Wait() - if tsErr != nil { - return nil, fmt.Errorf("failed to create one or more token sources: %w", tsErr) - } - - return &cachedTokenProvider{ - tokenSources: sources, - }, nil -} - -func (t *cachedTokenProvider) getToken(_ context.Context, endpoint string) (*oauth2.Token, error) { - if src, containsKey := t.tokenSources[endpoint]; containsKey { - return src.Token() - } - return nil, fmt.Errorf("endpoint is not configured: %s", endpoint) -} - -func newGoogleProvider(ctx context.Context, endpoints []string, noAuth bool) (tokenProvider, error) { - if noAuth { - return &nilTokenProvider{}, nil - } - - return newTokenProvider(ctx, endpoints, func(ctx context.Context, endpoint string) (oauth2.TokenSource, error) { - return idtoken.NewTokenSource(ctx, endpoint) - }) -} - -type nilTokenProvider struct{} - -func (t *nilTokenProvider) getToken(_ context.Context, _ string) (*oauth2.Token, error) { - // no-op - return nil, nil -} diff --git a/modules/querier/external/token_provider_test.go b/modules/querier/external/token_provider_test.go deleted file mode 100644 index 1ea21435e75..00000000000 --- a/modules/querier/external/token_provider_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package external - -import ( - "context" - "fmt" - "testing" - - "github.com/stretchr/testify/require" - "golang.org/x/oauth2" -) - -func TestCachedTokenProvider(t *testing.T) { - tests := []struct { - endpoints []string - expectedToken *oauth2.Token - testEndpoint string - }{ - { - endpoints: []string{"foo"}, - testEndpoint: "foo", - expectedToken: &oauth2.Token{AccessToken: "foo-token"}, - }, - { - endpoints: []string{"foo", "bar"}, - testEndpoint: "foo", - expectedToken: &oauth2.Token{AccessToken: "foo-token"}, - }, - { - endpoints: []string{"foo", "bar"}, - testEndpoint: "bar", - expectedToken: &oauth2.Token{AccessToken: "bar-token"}, - }, - } - - for _, tc := range tests { - getTokenSource := func(ctx context.Context, endpoint string) (oauth2.TokenSource, error) { - return getStubbedTokenSource(fmt.Sprintf("%s-token", endpoint)), nil - } - - tp, err := newTokenProvider(context.Background(), tc.endpoints, getTokenSource) - require.NoError(t, err) - - actual, err := tp.getToken(context.Background(), tc.testEndpoint) - require.NoError(t, err) - require.Equal(t, tc.expectedToken, actual) - } -} - -type stubbedTokenSource struct { - dummyToken string -} - -func (t *stubbedTokenSource) Token() (*oauth2.Token, error) { - return &oauth2.Token{ - AccessToken: t.dummyToken, - }, nil -} - -func getStubbedTokenSource(dummyToken string) oauth2.TokenSource { - return &stubbedTokenSource{dummyToken: dummyToken} -} diff --git a/modules/querier/querier.go b/modules/querier/querier.go index 513f1fd0973..fc53838b5cb 100644 --- a/modules/querier/querier.go +++ b/modules/querier/querier.go @@ -22,12 +22,10 @@ import ( oteltrace "go.opentelemetry.io/otel/trace" "go.uber.org/atomic" "go.uber.org/multierr" - "golang.org/x/sync/semaphore" generator_client "github.com/grafana/tempo/modules/generator/client" ingester_client "github.com/grafana/tempo/modules/ingester/client" "github.com/grafana/tempo/modules/overrides" - "github.com/grafana/tempo/modules/querier/external" "github.com/grafana/tempo/modules/querier/worker" "github.com/grafana/tempo/modules/storage" "github.com/grafana/tempo/pkg/api" @@ -81,10 +79,6 @@ type Querier struct { store storage.Store limits overrides.Interface - externalClient *external.Client - - searchPreferSelf *semaphore.Weighted - subservices *services.Manager subservicesWatcher *services.FailureWatcher } @@ -107,21 +101,6 @@ func New( return generator_client.New(addr, generatorClientConfig) } - externalClient, err := external.NewClient(&external.Config{ - Backend: cfg.Search.ExternalBackend, - CloudRunConfig: cfg.Search.CloudRun, - - HedgeRequestsAt: cfg.Search.HedgeRequestsAt, - HedgeRequestsUpTo: cfg.Search.HedgeRequestsUpTo, - - HTTPConfig: &external.HTTPConfig{ - Endpoints: cfg.Search.ExternalEndpoints, - }, - }) - if err != nil { - return nil, err - } - ingesterPools := make([]*ring_client.Pool, 0, len(ingesterRings)) for i, ring := range ingesterRings { pool := ring_client.NewPool(fmt.Sprintf("querier_pool_%d", i), @@ -144,11 +123,9 @@ func New( generatorClientFactory, metricMetricsGeneratorClients, log.Logger), - engine: traceql.NewEngine(), - store: store, - limits: limits, - searchPreferSelf: semaphore.NewWeighted(int64(cfg.Search.PreferSelf)), - externalClient: externalClient, + engine: traceql.NewEngine(), + store: store, + limits: limits, } q.Service = services.NewBasicService(q.starting, q.running, q.stopping) @@ -494,7 +471,7 @@ func (q *Querier) SearchTagsBlocks(ctx context.Context, req *tempopb.SearchTagsB return nil, err } - distinctValues := collector.NewDistinctString(0) + distinctValues := collector.NewDistinctString(0, 0, 0) // flatten v2 response for _, s := range v2Response.Scopes { @@ -535,8 +512,8 @@ func (q *Querier) SearchTags(ctx context.Context, req *tempopb.SearchTagsRequest return nil, fmt.Errorf("error extracting org id in Querier.SearchTags: %w", err) } - limit := q.limits.MaxBytesPerTagValuesQuery(userID) - distinctValues := collector.NewDistinctString(limit) + maxDataSize := q.limits.MaxBytesPerTagValuesQuery(userID) + distinctValues := collector.NewDistinctString(maxDataSize, req.MaxTagsPerScope, req.StaleValuesThreshold) mc := collector.NewMetricsCollector() forEach := func(ctx context.Context, client tempopb.QuerierClient) error { @@ -563,7 +540,7 @@ func (q *Querier) SearchTags(ctx context.Context, req *tempopb.SearchTagsRequest } if distinctValues.Exceeded() { - level.Warn(log.Logger).Log("msg", "size of tags in instance exceeded limit, reduce cardinality or size of tags", "userID", userID, "limit", limit, "size", distinctValues.Size()) + level.Warn(log.Logger).Log("msg", "size of tags in instance exceeded limit, reduce cardinality or size of tags", "userID", userID, "maxDataSize", maxDataSize, "size", distinctValues.Size()) } return &tempopb.SearchTagsResponse{ @@ -573,13 +550,13 @@ func (q *Querier) SearchTags(ctx context.Context, req *tempopb.SearchTagsRequest } func (q *Querier) SearchTagsV2(ctx context.Context, req *tempopb.SearchTagsRequest) (*tempopb.SearchTagsV2Response, error) { - userID, err := user.ExtractOrgID(ctx) + orgID, err := user.ExtractOrgID(ctx) if err != nil { return nil, fmt.Errorf("error extracting org id in Querier.SearchTags: %w", err) } - limit := q.limits.MaxBytesPerTagValuesQuery(userID) - distinctValues := collector.NewScopedDistinctString(limit) + maxBytesPerTag := q.limits.MaxBytesPerTagValuesQuery(orgID) + distinctValues := collector.NewScopedDistinctString(maxBytesPerTag, req.MaxTagsPerScope, req.StaleValuesThreshold) mc := collector.NewMetricsCollector() // Get results from all ingesters @@ -603,13 +580,13 @@ func (q *Querier) SearchTagsV2(ctx context.Context, req *tempopb.SearchTagsReque return nil } - err = q.forIngesterRings(ctx, userID, nil, forEach) + err = q.forIngesterRings(ctx, orgID, nil, forEach) if err != nil { return nil, fmt.Errorf("error querying ingesters in Querier.SearchTags: %w", err) } if distinctValues.Exceeded() { - level.Warn(log.Logger).Log("msg", "size of tags in instance exceeded limit, reduce cardinality or size of tags", "userID", userID, "limit", limit) + level.Warn(log.Logger).Log("msg", "Search tags exceeded limit, reduce cardinality or size of tags", "orgID", orgID, "stopReason", distinctValues.StopReason()) } collected := distinctValues.Strings() @@ -633,8 +610,8 @@ func (q *Querier) SearchTagValues(ctx context.Context, req *tempopb.SearchTagVal return nil, fmt.Errorf("error extracting org id in Querier.SearchTagValues: %w", err) } - limit := q.limits.MaxBytesPerTagValuesQuery(userID) - distinctValues := collector.NewDistinctString(limit) + maxDataSize := q.limits.MaxBytesPerTagValuesQuery(userID) + distinctValues := collector.NewDistinctString(maxDataSize, req.MaxTagValues, req.StaleValueThreshold) mc := collector.NewMetricsCollector() // Virtual tags values. Get these first. @@ -668,7 +645,7 @@ func (q *Querier) SearchTagValues(ctx context.Context, req *tempopb.SearchTagVal } if distinctValues.Exceeded() { - level.Warn(log.Logger).Log("msg", "size of tag values in instance exceeded limit, reduce cardinality or size of tags", "tag", req.TagName, "userID", userID, "limit", limit, "size", distinctValues.Size()) + level.Warn(log.Logger).Log("msg", "Search of tag values exceeded limit, reduce cardinality or size of tags", "tag", req.TagName, "orgID", userID, "stopReason", distinctValues.StopReason()) } return &tempopb.SearchTagValuesResponse{ @@ -683,8 +660,8 @@ func (q *Querier) SearchTagValuesV2(ctx context.Context, req *tempopb.SearchTagV return nil, fmt.Errorf("error extracting org id in Querier.SearchTagValues: %w", err) } - limit := q.limits.MaxBytesPerTagValuesQuery(userID) - distinctValues := collector.NewDistinctValue(limit, func(v tempopb.TagValue) int { return len(v.Type) + len(v.Value) }) + maxDataSize := q.limits.MaxBytesPerTagValuesQuery(userID) + distinctValues := collector.NewDistinctValue(maxDataSize, req.MaxTagValues, req.StaleValueThreshold, func(v tempopb.TagValue) int { return len(v.Type) + len(v.Value) }) mc := collector.NewMetricsCollector() // Virtual tags values. Get these first. @@ -726,7 +703,7 @@ func (q *Querier) SearchTagValuesV2(ctx context.Context, req *tempopb.SearchTagV } if distinctValues.Exceeded() { - _ = level.Warn(log.Logger).Log("msg", "size of tag values exceeded limit, reduce cardinality or size of tags", "tag", req.TagName, "userID", userID, "limit", limit, "size", distinctValues.Size()) + _ = level.Warn(log.Logger).Log("msg", "Search of tag values exceeded limit, reduce cardinality or size of tags", "tag", req.TagName, "orgID", userID, "stopReason", distinctValues.StopReason()) } return valuesToV2Response(distinctValues, mc.TotalValue()), nil @@ -839,28 +816,6 @@ func valuesToV2Response(distinctValues *collector.DistinctValue[tempopb.TagValue // SearchBlock searches the specified subset of the block for the passed tags. func (q *Querier) SearchBlock(ctx context.Context, req *tempopb.SearchBlockRequest) (*tempopb.SearchResponse, error) { - // if we have no external configuration always search in the querier - if q.cfg.Search.ExternalBackend == "" && len(q.cfg.Search.ExternalEndpoints) == 0 { - return q.internalSearchBlock(ctx, req) - } - - // if we have external configuration but there's an open slot locally then search in the querier - if q.searchPreferSelf.TryAcquire(1) { - defer q.searchPreferSelf.Release(1) - return q.internalSearchBlock(ctx, req) - } - - // proxy externally! - tenantID, err := user.ExtractOrgID(ctx) - if err != nil { - return nil, fmt.Errorf("error extracting org id for externalEndpoint: %w", err) - } - maxBytes := q.limits.MaxBytesPerTrace(tenantID) - - return q.externalClient.Search(ctx, maxBytes, req) -} - -func (q *Querier) internalSearchBlock(ctx context.Context, req *tempopb.SearchBlockRequest) (*tempopb.SearchResponse, error) { tenantID, err := user.ExtractOrgID(ctx) if err != nil { return nil, fmt.Errorf("error extracting org id in Querier.BackendSearch: %w", err) @@ -965,7 +920,7 @@ func (q *Querier) internalTagsSearchBlockV2(ctx context.Context, req *tempopb.Se query := traceql.ExtractMatchers(req.SearchReq.Query) if traceql.IsEmptyQuery(query) { - resp, err := q.store.SearchTags(ctx, meta, req.SearchReq.Scope, opts) + resp, err := q.store.SearchTags(ctx, meta, req, opts) if err != nil { return nil, err } @@ -981,7 +936,7 @@ func (q *Querier) internalTagsSearchBlockV2(ctx context.Context, req *tempopb.Se return resp, nil } - valueCollector := collector.NewScopedDistinctString(q.limits.MaxBytesPerTagValuesQuery(tenantID)) + valueCollector := collector.NewScopedDistinctString(q.limits.MaxBytesPerTagValuesQuery(tenantID), req.MaxTagsPerScope, req.StaleValueThreshold) mc := collector.NewMetricsCollector() fetcher := traceql.NewTagNamesFetcherWrapper(func(ctx context.Context, req traceql.FetchTagsRequest, cb traceql.FetchTagsCallback) error { @@ -1000,6 +955,10 @@ func (q *Querier) internalTagsSearchBlockV2(ctx context.Context, req *tempopb.Se return nil, err } + if valueCollector.Exceeded() { + level.Warn(log.Logger).Log("msg", "Search tags exceeded limit, reduce cardinality or size of tags", "orgID", tenantID, "stopReason", valueCollector.StopReason()) + } + scopedVals := valueCollector.Strings() resp := &tempopb.SearchTagsV2Response{ Scopes: make([]*tempopb.SearchTagsV2Scope, 0, len(scopedVals)), @@ -1053,7 +1012,7 @@ func (q *Querier) internalTagValuesSearchBlock(ctx context.Context, req *tempopb opts.StartPage = int(req.StartPage) opts.TotalPages = int(req.PagesToSearch) - resp, err := q.store.SearchTagValues(ctx, meta, req.SearchReq.TagName, opts) + resp, err := q.store.SearchTagValues(ctx, meta, req, opts) if err != nil { return &tempopb.SearchTagValuesResponse{}, err } @@ -1109,7 +1068,10 @@ func (q *Querier) internalTagValuesSearchBlockV2(ctx context.Context, req *tempo return nil, err } - valueCollector := collector.NewDistinctValue(q.limits.MaxBytesPerTagValuesQuery(tenantID), func(v tempopb.TagValue) int { return len(v.Type) + len(v.Value) }) + valueCollector := collector.NewDistinctValue(q.limits.MaxBytesPerTagValuesQuery(tenantID), + req.SearchReq.MaxTagValues, req.SearchReq.StaleValueThreshold, + func(v tempopb.TagValue) int { return len(v.Type) + len(v.Value) }) + mc := collector.NewMetricsCollector() fetcher := traceql.NewTagValuesFetcherWrapper(func(ctx context.Context, req traceql.FetchTagValuesRequest, cb traceql.FetchTagValuesCallback) error { @@ -1121,6 +1083,10 @@ func (q *Querier) internalTagValuesSearchBlockV2(ctx context.Context, req *tempo return nil, err } + if valueCollector.Exceeded() { + level.Warn(log.Logger).Log("msg", "Search tags exceeded limit, reduce cardinality or size of tags", "orgID", tenantID, "stopReason", valueCollector.StopReason()) + } + return valuesToV2Response(valueCollector, mc.TotalValue()), nil } diff --git a/modules/querier/querier_test.go b/modules/querier/querier_test.go index 2548be01a49..445434f06dc 100644 --- a/modules/querier/querier_test.go +++ b/modules/querier/querier_test.go @@ -2,101 +2,18 @@ package querier import ( "context" - "net/http" - "net/http/httptest" "sort" "testing" - "time" "github.com/grafana/dskit/user" - "github.com/prometheus/client_golang/prometheus" - "github.com/stretchr/testify/require" - "github.com/uber-go/atomic" - generator_client "github.com/grafana/tempo/modules/generator/client" ingester_client "github.com/grafana/tempo/modules/ingester/client" "github.com/grafana/tempo/modules/overrides" - "github.com/grafana/tempo/modules/querier/external" "github.com/grafana/tempo/pkg/tempopb" + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" ) -func TestQuerierUsesSearchExternalEndpoint(t *testing.T) { - numExternalRequests := atomic.NewInt32(0) - - srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - time.Sleep(100 * time.Millisecond) - numExternalRequests.Inc() - })) - defer srv.Close() - - tests := []struct { - cfg Config - queriesToExecute int - externalExpected int32 - }{ - // SearchExternalEndpoints is respected - { - cfg: Config{ - Search: SearchConfig{ - ExternalEndpoints: []string{srv.URL}, - }, - }, - queriesToExecute: 3, - externalExpected: 3, - }, - // No SearchExternalEndpoints causes the querier to service everything internally - { - cfg: Config{}, - queriesToExecute: 3, - externalExpected: 0, - }, - { - cfg: Config{ - Search: SearchConfig{ - ExternalBackend: "google_cloud_run", - CloudRun: &external.CloudRunConfig{ - Endpoints: []string{srv.URL}, - NoAuth: true, - }, - }, - }, - queriesToExecute: 1, - externalExpected: 1, - }, - // SearchPreferSelf is respected. this test won't pass b/c SearchBlock fails instantly and so - // all 3 queries are executed locally and nothing is proxied to the external endpoint. - // we'd have to mock the storage.Store interface to get this to pass. it's a big interface. - // { - // cfg: Config{ - // SearchExternalEndpoints: []string{srv.URL}, - // SearchPreferSelf: 2, - // }, - // queriesToExecute: 3, - // externalExpected: 1, - // }, - } - - ctx := user.InjectOrgID(context.Background(), "blerg") - - for _, tc := range tests { - numExternalRequests.Store(0) - - o, err := overrides.NewOverrides(overrides.Config{}, nil, prometheus.DefaultRegisterer) - require.NoError(t, err) - - q, err := New(tc.cfg, ingester_client.Config{}, nil, generator_client.Config{}, nil, nil, o) - require.NoError(t, err) - - for i := 0; i < tc.queriesToExecute; i++ { - // ignore error purposefully here. all queries will error, but we don't care - // numExternalRequests will tell us what we need to know - _, _ = q.SearchBlock(ctx, &tempopb.SearchBlockRequest{}) - } - - require.Equal(t, tc.externalExpected, numExternalRequests.Load()) - } -} - func TestVirtualTagsDoesntHitBackend(t *testing.T) { o, err := overrides.NewOverrides(overrides.Config{}, nil, prometheus.DefaultRegisterer) require.NoError(t, err) diff --git a/modules/storage/config.go b/modules/storage/config.go index 62766ec83a5..c7766a57c60 100644 --- a/modules/storage/config.go +++ b/modules/storage/config.go @@ -2,7 +2,6 @@ package storage import ( "flag" - "time" "github.com/grafana/tempo/pkg/cache" "github.com/grafana/tempo/pkg/util" @@ -35,10 +34,10 @@ func (cfg *Config) RegisterFlagsAndApplyDefaults(prefix string, f *flag.FlagSet) f.DurationVar(&cfg.Trace.BlocklistPoll, util.PrefixConfig(prefix, "trace.blocklist_poll"), tempodb.DefaultBlocklistPoll, "Period at which to run the maintenance cycle.") cfg.Trace.WAL = &wal.Config{} + cfg.Trace.WAL.RegisterFlags(f) f.StringVar(&cfg.Trace.WAL.Filepath, util.PrefixConfig(prefix, "trace.wal.path"), "/var/tempo/wal", "Path at which store WAL blocks.") cfg.Trace.WAL.Encoding = backend.EncSnappy cfg.Trace.WAL.SearchEncoding = backend.EncNone - cfg.Trace.WAL.IngestionSlack = 2 * time.Minute cfg.Trace.Search = &tempodb.SearchConfig{} cfg.Trace.Search.RegisterFlagsAndApplyDefaults(prefix, f) diff --git a/operations/jsonnet-compiled/ConfigMap-tempo-block-builder.yaml b/operations/jsonnet-compiled/ConfigMap-tempo-block-builder.yaml new file mode 100644 index 00000000000..1230d1780be --- /dev/null +++ b/operations/jsonnet-compiled/ConfigMap-tempo-block-builder.yaml @@ -0,0 +1,44 @@ +apiVersion: v1 +data: + tempo.yaml: | + compactor: {} + distributor: {} + http_api_prefix: "" + ingester: + lifecycler: + ring: + replication_factor: 3 + memberlist: + abort_if_cluster_join_fails: false + bind_port: 7946 + join_members: + - dns+gossip-ring.tracing.svc.cluster.local.:7946 + overrides: + per_tenant_override_config: /overrides/overrides.yaml + server: + http_listen_port: 3200 + storage: + trace: + azure: + container_name: tempo + backend: gcs + blocklist_poll: "0" + cache: memcached + gcs: + bucket_name: tempo + chunk_buffer_size: 1.048576e+07 + memcached: + consistent_hash: true + host: memcached + service: memcached-client + timeout: 200ms + pool: + queue_depth: 2000 + s3: + bucket: tempo + wal: + path: /var/tempo/wal +kind: ConfigMap +metadata: + name: tempo-block-builder + namespace: tracing diff --git a/operations/jsonnet-compiled/Service-block-builder.yaml b/operations/jsonnet-compiled/Service-block-builder.yaml new file mode 100644 index 00000000000..607c0e60725 --- /dev/null +++ b/operations/jsonnet-compiled/Service-block-builder.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + name: block-builder + name: block-builder + namespace: tracing +spec: + ports: + - name: block-builder-prom-metrics + port: 3200 + targetPort: 3200 + selector: + app: block-builder + name: block-builder diff --git a/operations/jsonnet-compiled/StatefulSet-block-builder.yaml b/operations/jsonnet-compiled/StatefulSet-block-builder.yaml new file mode 100644 index 00000000000..a718577689f --- /dev/null +++ b/operations/jsonnet-compiled/StatefulSet-block-builder.yaml @@ -0,0 +1,61 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: block-builder + namespace: tracing +spec: + podManagementPolicy: Parallel + replicas: 0 + selector: + matchLabels: + app: block-builder + name: block-builder + serviceName: block-builder + template: + metadata: + annotations: + config_hash: 46188d18f0d8adfa8586e9dbeb744db2 + labels: + app: block-builder + name: block-builder + spec: + containers: + - args: + - -config.file=/conf/tempo.yaml + - -mem-ballast-size-mbs=1024 + - -target=block-builder + image: grafana/tempo:latest + imagePullPolicy: IfNotPresent + name: block-builder + ports: + - containerPort: 3200 + name: prom-metrics + readinessProbe: + httpGet: + path: /ready + port: 3200 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + cpu: "1" + memory: 2Gi + requests: + cpu: 500m + memory: 1Gi + volumeMounts: + - mountPath: /conf + name: tempo-conf + - mountPath: /overrides + name: overrides + securityContext: + fsGroup: 10001 + volumes: + - configMap: + name: tempo-block-builder + name: tempo-conf + - configMap: + name: tempo-overrides + name: overrides + updateStrategy: + type: RollingUpdate diff --git a/operations/jsonnet/microservices/block-builder.libsonnet b/operations/jsonnet/microservices/block-builder.libsonnet new file mode 100644 index 00000000000..926180fd2ab --- /dev/null +++ b/operations/jsonnet/microservices/block-builder.libsonnet @@ -0,0 +1,65 @@ +{ + local k = import 'ksonnet-util/kausal.libsonnet', + + local container = k.core.v1.container, + local containerPort = k.core.v1.containerPort, + local volumeMount = k.core.v1.volumeMount, + local statefulset = k.apps.v1.statefulSet, + local volume = k.core.v1.volume, + local envVar = k.core.v1.envVar, + local configMap = k.core.v1.configMap, + + local target_name = 'block-builder', + local tempo_config_volume = 'tempo-conf', + local tempo_overrides_config_volume = 'overrides', + + // Statefulset + + tempo_block_builder_ports:: [containerPort.new('prom-metrics', $._config.port)], + + tempo_block_builder_args:: { + target: target_name, + 'config.file': '/conf/tempo.yaml', + 'mem-ballast-size-mbs': $._config.ballast_size_mbs, + }, + + tempo_block_builder_container:: + container.new(target_name, $._images.tempo) + + container.withPorts($.tempo_block_builder_ports) + + container.withArgs($.util.mapToFlags($.tempo_block_builder_args)) + + container.withVolumeMounts([ + volumeMount.new(tempo_config_volume, '/conf'), + volumeMount.new(tempo_overrides_config_volume, '/overrides'), + ]) + + $.util.withResources($._config.block_builder.resources) + + (if $._config.variables_expansion then container.withEnvMixin($._config.variables_expansion_env_mixin) else {}) + + $.util.readinessProbe + + (if $._config.variables_expansion then container.withArgsMixin(['-config.expand-env=true']) else {}), + + tempo_block_builder_statefulset: + statefulset.new(target_name, $._config.block_builder.replicas, $.tempo_block_builder_container, [], { app: target_name }) + + statefulset.mixin.spec.withServiceName(target_name) + + statefulset.spec.template.spec.securityContext.withFsGroup(10001) + // 10001 is the UID of the tempo user + statefulset.mixin.spec.template.metadata.withAnnotations({ + config_hash: std.md5(std.toString($.tempo_block_builder_configmap.data['tempo.yaml'])), + }) + + statefulset.mixin.spec.template.spec.withVolumes([ + volume.fromConfigMap(tempo_config_volume, $.tempo_block_builder_configmap.metadata.name), + volume.fromConfigMap(tempo_overrides_config_volume, $._config.overrides_configmap_name), + ]) + + statefulset.mixin.spec.withPodManagementPolicy('Parallel'), + + // Configmap + + tempo_block_builder_configmap: + configMap.new('tempo-block-builder') + + configMap.withData({ + 'tempo.yaml': k.util.manifestYaml($.tempo_block_builder_config), + }), + + // Service + + tempo_block_builder_service: + k.util.serviceFor($.tempo_block_builder_statefulset), + +} diff --git a/operations/jsonnet/microservices/config.libsonnet b/operations/jsonnet/microservices/config.libsonnet index f54214555ed..7d0a018ce73 100644 --- a/operations/jsonnet/microservices/config.libsonnet +++ b/operations/jsonnet/microservices/config.libsonnet @@ -107,6 +107,19 @@ }, }, }, + block_builder: { + replicas: 0, + resources: { + requests: { + cpu: '500m', + memory: '1Gi', + }, + limits: { + cpu: '1', + memory: '2Gi', + }, + }, + }, memcached: { replicas: 3, connection_limit: 4096, diff --git a/operations/jsonnet/microservices/configmap.libsonnet b/operations/jsonnet/microservices/configmap.libsonnet index c2f33c0ccba..8379a851e4a 100644 --- a/operations/jsonnet/microservices/configmap.libsonnet +++ b/operations/jsonnet/microservices/configmap.libsonnet @@ -111,6 +111,7 @@ }, tempo_query_frontend_config:: $.tempo_config {}, + tempo_block_builder_config:: $.tempo_config {}, // This will be the single configmap that stores `overrides.yaml`. overrides_config: @@ -151,6 +152,12 @@ 'tempo.yaml': $.util.manifestYaml($.tempo_querier_config), }), + tempo_block_builder_configmap: + configMap.new('tempo-block-builder') + + configMap.withData({ + 'tempo.yaml': $.util.manifestYaml($.tempo_block_builder_config), + }), + tempo_query_frontend_configmap: configMap.new('tempo-query-frontend') + configMap.withData({ diff --git a/operations/jsonnet/microservices/tempo.libsonnet b/operations/jsonnet/microservices/tempo.libsonnet index af421491493..aa7c0782a61 100644 --- a/operations/jsonnet/microservices/tempo.libsonnet +++ b/operations/jsonnet/microservices/tempo.libsonnet @@ -7,6 +7,7 @@ (import 'generator.libsonnet') + (import 'frontend.libsonnet') + (import 'querier.libsonnet') + +(import 'block-builder.libsonnet') + (import 'vulture.libsonnet') + (import 'memcached.libsonnet') + (import 'multi-zone.libsonnet') + diff --git a/operations/tempo-mixin-compiled/dashboards/tempo-operational.json b/operations/tempo-mixin-compiled/dashboards/tempo-operational.json index 5e811d10150..79cff1277df 100644 --- a/operations/tempo-mixin-compiled/dashboards/tempo-operational.json +++ b/operations/tempo-mixin-compiled/dashboards/tempo-operational.json @@ -1809,7 +1809,7 @@ "pluginVersion": "9.0.0-d373beebpre", "targets": [ { - "expr": "sum(rate(tempo_request_duration_seconds_count{route=~\".*api_traces_traceid\", cluster=\"$cluster\", namespace=\"$namespace\", job=~\"$namespace/cortex-gw(-internal)?\"}[$__rate_interval])) by (status_code)", + "expr": "sum(rate(tempo_request_duration_seconds_count{route=~\".*traces_traceid\", cluster=\"$cluster\", namespace=\"$namespace\", job=~\"$namespace/cortex-gw(-internal)?\"}[$__rate_interval])) by (status_code)", "hide": false, "interval": "", "legendFormat": "{{status_code}}", @@ -1907,19 +1907,19 @@ "pluginVersion": "9.0.0-d373beebpre", "targets": [ { - "expr": "histogram_quantile(.99, sum(rate(tempo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", job=~\"$namespace/cortex-gw(-internal)?\", route=~\".*api_traces_traceid\"}[$__rate_interval])) by (le))", + "expr": "histogram_quantile(.99, sum(rate(tempo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", job=~\"$namespace/cortex-gw(-internal)?\", route=~\".*traces_traceid\"}[$__rate_interval])) by (le))", "interval": "", "legendFormat": ".99", "refId": "A" }, { - "expr": "histogram_quantile(.9, sum(rate(tempo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", job=~\"$namespace/cortex-gw(-internal)?\", route=~\".*api_traces_traceid\"}[$__rate_interval])) by (le))", + "expr": "histogram_quantile(.9, sum(rate(tempo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", job=~\"$namespace/cortex-gw(-internal)?\", route=~\".*traces_traceid\"}[$__rate_interval])) by (le))", "interval": "", "legendFormat": ".9", "refId": "B" }, { - "expr": "histogram_quantile(.5, sum(rate(tempo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", job=~\"$namespace/cortex-gw(-internal)?\", route=~\".*api_traces_traceid\"}[$__rate_interval])) by (le))", + "expr": "histogram_quantile(.5, sum(rate(tempo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", job=~\"$namespace/cortex-gw(-internal)?\", route=~\".*traces_traceid\"}[$__rate_interval])) by (le))", "interval": "", "legendFormat": ".5", "refId": "C" @@ -2233,7 +2233,7 @@ "pluginVersion": "9.0.0-d373beebpre", "targets": [ { - "expr": "sum(rate(tempo_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", route=~\".*api_traces_traceid\", job=\"$namespace/query-frontend\"}[$__rate_interval])) by (status_code)", + "expr": "sum(rate(tempo_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", route=~\".*traces_traceid\", job=\"$namespace/query-frontend\"}[$__rate_interval])) by (status_code)", "hide": false, "interval": "", "legendFormat": "{{status_code}}", @@ -2331,19 +2331,19 @@ "pluginVersion": "9.0.0-d373beebpre", "targets": [ { - "expr": "histogram_quantile(.99, sum(rate(tempo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/query-frontend\", route=~\".*api_traces_traceid\"}[$__rate_interval])) by (le))", + "expr": "histogram_quantile(.99, sum(rate(tempo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/query-frontend\", route=~\".*traces_traceid\"}[$__rate_interval])) by (le))", "interval": "", "legendFormat": ".99", "refId": "A" }, { - "expr": "histogram_quantile(.9, sum(rate(tempo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/query-frontend\", route=~\".*api_traces_traceid\"}[$__rate_interval])) by (le))", + "expr": "histogram_quantile(.9, sum(rate(tempo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/query-frontend\", route=~\".*traces_traceid\"}[$__rate_interval])) by (le))", "interval": "", "legendFormat": ".9", "refId": "B" }, { - "expr": "histogram_quantile(.5, sum(rate(tempo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/query-frontend\", route=~\".*api_traces_traceid\"}[$__rate_interval])) by (le))", + "expr": "histogram_quantile(.5, sum(rate(tempo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/query-frontend\", route=~\".*traces_traceid\"}[$__rate_interval])) by (le))", "interval": "", "legendFormat": ".5", "refId": "C" @@ -2655,7 +2655,7 @@ "pluginVersion": "9.0.0-d373beebpre", "targets": [ { - "expr": "sum(rate(tempo_request_duration_seconds_count{route=~\"querier_.*api_traces_traceid\", cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/querier\"}[$__rate_interval])) by (status_code)", + "expr": "sum(rate(tempo_request_duration_seconds_count{route=~\"querier_.*traces_traceid\", cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/querier\"}[$__rate_interval])) by (status_code)", "hide": false, "interval": "", "legendFormat": "{{status_code}}", @@ -2753,19 +2753,19 @@ "pluginVersion": "9.0.0-d373beebpre", "targets": [ { - "expr": "histogram_quantile(.99, sum(rate(tempo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/querier\", route=~\"querier_.*api_traces_traceid\"}[$__rate_interval])) by (le))", + "expr": "histogram_quantile(.99, sum(rate(tempo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/querier\", route=~\"querier_.*traces_traceid\"}[$__rate_interval])) by (le))", "interval": "", "legendFormat": ".99", "refId": "A" }, { - "expr": "histogram_quantile(.9, sum(rate(tempo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/querier\", route=~\"querier_.*api_traces_traceid\"}[$__rate_interval])) by (le))", + "expr": "histogram_quantile(.9, sum(rate(tempo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/querier\", route=~\"querier_.*traces_traceid\"}[$__rate_interval])) by (le))", "interval": "", "legendFormat": ".9", "refId": "B" }, { - "expr": "histogram_quantile(.5, sum(rate(tempo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/querier\", route=~\"querier_.*api_traces_traceid\"}[$__rate_interval])) by (le))", + "expr": "histogram_quantile(.5, sum(rate(tempo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/querier\", route=~\"querier_.*traces_traceid\"}[$__rate_interval])) by (le))", "interval": "", "legendFormat": ".5", "refId": "C" @@ -7364,7 +7364,7 @@ "type": "row" } ], - "refresh": "30s", + "refresh": "", "schemaVersion": 39, "tags": [ "tempo" @@ -7477,8 +7477,8 @@ "allValue": ".*", "current": { "selected": false, - "text": "All", - "value": "$__all" + "text": "ingester", + "value": "ingester" }, "hide": 0, "includeAll": true, @@ -7486,10 +7486,15 @@ "name": "component", "options": [ { - "selected": true, + "selected": false, "text": "All", "value": "$__all" }, + { + "selected": false, + "text": "block-builder", + "value": "block-builder" + }, { "selected": false, "text": "compactor", @@ -7501,7 +7506,7 @@ "value": "distributor" }, { - "selected": false, + "selected": true, "text": "ingester", "value": "ingester" }, @@ -7531,7 +7536,7 @@ "value": "cortex-gw-internal" } ], - "query": "compactor,distributor,ingester,metrics-generator,query-frontend,querier,cortex-gw,cortex-gw-internal", + "query": "block-builder,compactor,distributor,ingester,metrics-generator,query-frontend,querier,cortex-gw,cortex-gw-internal", "queryValue": "", "skipUrlSync": false, "type": "custom" diff --git a/operations/tempo-mixin-compiled/dashboards/tempo-reads.json b/operations/tempo-mixin-compiled/dashboards/tempo-reads.json index 2564b7ec408..e402049dde7 100644 --- a/operations/tempo-mixin-compiled/dashboards/tempo-reads.json +++ b/operations/tempo-mixin-compiled/dashboards/tempo-reads.json @@ -697,216 +697,6 @@ "renderer": "flot", "seriesOverrides": [ - ], - "spaceLength": 10, - "span": 6, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "sum by (status) (\n label_replace(label_replace(rate(tempo_querier_external_endpoint_duration_seconds_count{cluster=~\"$cluster\", job=~\"($namespace)/querier\"}[$__rate_interval]),\n \"status\", \"${1}xx\", \"status_code\", \"([0-9])..\"),\n \"status\", \"${1}\", \"status_code\", \"([a-zA-Z]+)\"))\n", - "format": "time_series", - "interval": "1m", - "legendFormat": "{{status}}", - "refId": "A" - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "QPS", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - }, - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "id": 8, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 6, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "histogram_quantile(0.99, sum(rate(tempo_querier_external_endpoint_duration_seconds_bucket{cluster=~\"$cluster\", job=~\"($namespace)/querier\"}[$__rate_interval])) by (le,endpoint)) * 1e3", - "format": "time_series", - "interval": "1m", - "intervalFactor": 2, - "legendFormat": "{{route}} 99th", - "refId": "A", - "step": 10 - }, - { - "expr": "histogram_quantile(0.50, sum(rate(tempo_querier_external_endpoint_duration_seconds_bucket{cluster=~\"$cluster\", job=~\"($namespace)/querier\"}[$__rate_interval])) by (le,endpoint)) * 1e3", - "format": "time_series", - "interval": "1m", - "intervalFactor": 2, - "legendFormat": "{{route}} 50th", - "refId": "B", - "step": 10 - }, - { - "expr": "sum(rate(tempo_querier_external_endpoint_duration_seconds_sum{cluster=~\"$cluster\", job=~\"($namespace)/querier\"}[$__rate_interval])) by (endpoint) * 1e3 / sum(rate(tempo_querier_external_endpoint_duration_seconds_count{cluster=~\"$cluster\", job=~\"($namespace)/querier\"}[$__rate_interval])) by (endpoint)", - "format": "time_series", - "interval": "1m", - "intervalFactor": 2, - "legendFormat": "{{route}} Average", - "refId": "C", - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "Latency", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "ms", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Querier External Endpoint", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { - "1xx": "#EAB839", - "2xx": "#7EB26D", - "3xx": "#6ED0E0", - "4xx": "#EF843C", - "5xx": "#E24D42", - "OK": "#7EB26D", - "cancel": "#A9A9A9", - "error": "#E24D42", - "success": "#7EB26D" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 10, - "id": 9, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 0, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - ], "spaceLength": 10, "span": 6, @@ -970,7 +760,7 @@ "dashes": false, "datasource": "$datasource", "fill": 1, - "id": 10, + "id": 8, "legend": { "avg": false, "current": false, @@ -1095,7 +885,7 @@ "dashes": false, "datasource": "$datasource", "fill": 10, - "id": 11, + "id": 9, "legend": { "avg": false, "current": false, @@ -1180,7 +970,7 @@ "dashes": false, "datasource": "$datasource", "fill": 1, - "id": 12, + "id": 10, "legend": { "avg": false, "current": false, @@ -1305,7 +1095,7 @@ "dashes": false, "datasource": "$datasource", "fill": 10, - "id": 13, + "id": 11, "legend": { "avg": false, "current": false, @@ -1390,7 +1180,7 @@ "dashes": false, "datasource": "$datasource", "fill": 1, - "id": 14, + "id": 12, "legend": { "avg": false, "current": false, diff --git a/operations/tempo-mixin-compiled/dashboards/tempo-writes.json b/operations/tempo-mixin-compiled/dashboards/tempo-writes.json index 5d9b29bbd4a..d81221d66f2 100644 --- a/operations/tempo-mixin-compiled/dashboards/tempo-writes.json +++ b/operations/tempo-mixin-compiled/dashboards/tempo-writes.json @@ -276,7 +276,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (grpc_status) (\n rate(\n label_replace(\n {cluster=~\"$cluster\", job=~\"($namespace)/cortex-gw(-internal)?\", __name__=~\"envoy_cluster_grpc_proto_collector_trace_v1_TraceService_[0-9]+\"},\n \"grpc_status\", \"$1\", \"__name__\", \"envoy_cluster_grpc_proto_collector_trace_v1_TraceService_(.+)\"\n )\n [$__interval:$__interval]\n )\n)\n", + "expr": "sum by (grpc_status) (\n rate(\n label_replace(\n {cluster=~\"$cluster\", job=~\"($namespace)/cortex-gw(-internal)?\", __name__=~\"envoy_cluster_grpc_proto_collector_trace_v1_TraceService_[0-9]+\"},\n \"grpc_status\", \"$1\", \"__name__\", \"envoy_cluster_grpc_proto_collector_trace_v1_TraceService_(.+)\"\n )\n [$__rate_interval:30s]\n )\n)\n", "format": "time_series", "interval": "1m", "legendFormat": "{{grpc_status}}", diff --git a/operations/tempo-mixin/dashboards/tempo-operational.json b/operations/tempo-mixin/dashboards/tempo-operational.json index 3bf55f63e05..ea87aa6a446 100644 --- a/operations/tempo-mixin/dashboards/tempo-operational.json +++ b/operations/tempo-mixin/dashboards/tempo-operational.json @@ -1707,7 +1707,7 @@ "pluginVersion": "9.0.0-d373beebpre", "targets": [ { - "expr": "sum(rate(tempo_request_duration_seconds_count{route=~\".*api_traces_traceid\", cluster=\"$cluster\", namespace=\"$namespace\", job=~\"$namespace/cortex-gw(-internal)?\"}[$__rate_interval])) by (status_code)", + "expr": "sum(rate(tempo_request_duration_seconds_count{route=~\".*traces_traceid\", cluster=\"$cluster\", namespace=\"$namespace\", job=~\"$namespace/cortex-gw(-internal)?\"}[$__rate_interval])) by (status_code)", "hide": false, "interval": "", "legendFormat": "{{status_code}}", @@ -1799,19 +1799,19 @@ "pluginVersion": "9.0.0-d373beebpre", "targets": [ { - "expr": "histogram_quantile(.99, sum(rate(tempo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", job=~\"$namespace/cortex-gw(-internal)?\", route=~\".*api_traces_traceid\"}[$__rate_interval])) by (le))", + "expr": "histogram_quantile(.99, sum(rate(tempo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", job=~\"$namespace/cortex-gw(-internal)?\", route=~\".*traces_traceid\"}[$__rate_interval])) by (le))", "interval": "", "legendFormat": ".99", "refId": "A" }, { - "expr": "histogram_quantile(.9, sum(rate(tempo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", job=~\"$namespace/cortex-gw(-internal)?\", route=~\".*api_traces_traceid\"}[$__rate_interval])) by (le))", + "expr": "histogram_quantile(.9, sum(rate(tempo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", job=~\"$namespace/cortex-gw(-internal)?\", route=~\".*traces_traceid\"}[$__rate_interval])) by (le))", "interval": "", "legendFormat": ".9", "refId": "B" }, { - "expr": "histogram_quantile(.5, sum(rate(tempo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", job=~\"$namespace/cortex-gw(-internal)?\", route=~\".*api_traces_traceid\"}[$__rate_interval])) by (le))", + "expr": "histogram_quantile(.5, sum(rate(tempo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", job=~\"$namespace/cortex-gw(-internal)?\", route=~\".*traces_traceid\"}[$__rate_interval])) by (le))", "interval": "", "legendFormat": ".5", "refId": "C" @@ -2107,7 +2107,7 @@ "pluginVersion": "9.0.0-d373beebpre", "targets": [ { - "expr": "sum(rate(tempo_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", route=~\".*api_traces_traceid\", job=\"$namespace/query-frontend\"}[$__rate_interval])) by (status_code)", + "expr": "sum(rate(tempo_request_duration_seconds_count{cluster=\"$cluster\", namespace=\"$namespace\", route=~\".*traces_traceid\", job=\"$namespace/query-frontend\"}[$__rate_interval])) by (status_code)", "hide": false, "interval": "", "legendFormat": "{{status_code}}", @@ -2199,19 +2199,19 @@ "pluginVersion": "9.0.0-d373beebpre", "targets": [ { - "expr": "histogram_quantile(.99, sum(rate(tempo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/query-frontend\", route=~\".*api_traces_traceid\"}[$__rate_interval])) by (le))", + "expr": "histogram_quantile(.99, sum(rate(tempo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/query-frontend\", route=~\".*traces_traceid\"}[$__rate_interval])) by (le))", "interval": "", "legendFormat": ".99", "refId": "A" }, { - "expr": "histogram_quantile(.9, sum(rate(tempo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/query-frontend\", route=~\".*api_traces_traceid\"}[$__rate_interval])) by (le))", + "expr": "histogram_quantile(.9, sum(rate(tempo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/query-frontend\", route=~\".*traces_traceid\"}[$__rate_interval])) by (le))", "interval": "", "legendFormat": ".9", "refId": "B" }, { - "expr": "histogram_quantile(.5, sum(rate(tempo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/query-frontend\", route=~\".*api_traces_traceid\"}[$__rate_interval])) by (le))", + "expr": "histogram_quantile(.5, sum(rate(tempo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/query-frontend\", route=~\".*traces_traceid\"}[$__rate_interval])) by (le))", "interval": "", "legendFormat": ".5", "refId": "C" @@ -2505,7 +2505,7 @@ "pluginVersion": "9.0.0-d373beebpre", "targets": [ { - "expr": "sum(rate(tempo_request_duration_seconds_count{route=~\"querier_.*api_traces_traceid\", cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/querier\"}[$__rate_interval])) by (status_code)", + "expr": "sum(rate(tempo_request_duration_seconds_count{route=~\"querier_.*traces_traceid\", cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/querier\"}[$__rate_interval])) by (status_code)", "hide": false, "interval": "", "legendFormat": "{{status_code}}", @@ -2597,19 +2597,19 @@ "pluginVersion": "9.0.0-d373beebpre", "targets": [ { - "expr": "histogram_quantile(.99, sum(rate(tempo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/querier\", route=~\"querier_.*api_traces_traceid\"}[$__rate_interval])) by (le))", + "expr": "histogram_quantile(.99, sum(rate(tempo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/querier\", route=~\"querier_.*traces_traceid\"}[$__rate_interval])) by (le))", "interval": "", "legendFormat": ".99", "refId": "A" }, { - "expr": "histogram_quantile(.9, sum(rate(tempo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/querier\", route=~\"querier_.*api_traces_traceid\"}[$__rate_interval])) by (le))", + "expr": "histogram_quantile(.9, sum(rate(tempo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/querier\", route=~\"querier_.*traces_traceid\"}[$__rate_interval])) by (le))", "interval": "", "legendFormat": ".9", "refId": "B" }, { - "expr": "histogram_quantile(.5, sum(rate(tempo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/querier\", route=~\"querier_.*api_traces_traceid\"}[$__rate_interval])) by (le))", + "expr": "histogram_quantile(.5, sum(rate(tempo_request_duration_seconds_bucket{cluster=\"$cluster\", namespace=\"$namespace\", job=\"$namespace/querier\", route=~\"querier_.*traces_traceid\"}[$__rate_interval])) by (le))", "interval": "", "legendFormat": ".5", "refId": "C" @@ -6936,7 +6936,7 @@ "type": "row" } ], - "refresh": "30s", + "refresh": "", "schemaVersion": 39, "tags": [ "tempo" @@ -7041,8 +7041,8 @@ "allValue": ".*", "current": { "selected": false, - "text": "All", - "value": "$__all" + "text": "ingester", + "value": "ingester" }, "hide": 0, "includeAll": true, @@ -7050,10 +7050,15 @@ "name": "component", "options": [ { - "selected": true, + "selected": false, "text": "All", "value": "$__all" }, + { + "selected": false, + "text": "block-builder", + "value": "block-builder" + }, { "selected": false, "text": "compactor", @@ -7065,7 +7070,7 @@ "value": "distributor" }, { - "selected": false, + "selected": true, "text": "ingester", "value": "ingester" }, @@ -7095,7 +7100,7 @@ "value": "cortex-gw-internal" } ], - "query": "compactor,distributor,ingester,metrics-generator,query-frontend,querier,cortex-gw,cortex-gw-internal", + "query": "block-builder,compactor,distributor,ingester,metrics-generator,query-frontend,querier,cortex-gw,cortex-gw-internal", "queryValue": "", "skipUrlSync": false, "type": "custom" diff --git a/operations/tempo-mixin/dashboards/tempo-reads.libsonnet b/operations/tempo-mixin/dashboards/tempo-reads.libsonnet index d5fb2869e52..506e7a48ea6 100644 --- a/operations/tempo-mixin/dashboards/tempo-reads.libsonnet +++ b/operations/tempo-mixin/dashboards/tempo-reads.libsonnet @@ -39,17 +39,6 @@ dashboard_utils { $.latencyPanel('tempo_request_duration_seconds', '{%s,route=~"querier_%sapi_.*"}' % [$.jobMatcher($._config.jobs.querier), $._config.http_api_prefix], additional_grouping='route') ) ) - .addRow( - g.row('Querier External Endpoint') - .addPanel( - $.panel('QPS') + - $.qpsPanel('tempo_querier_external_endpoint_duration_seconds_count{%s}' % [$.jobMatcher($._config.jobs.querier)]) - ) - .addPanel( - $.panel('Latency') + - $.latencyPanel('tempo_querier_external_endpoint_duration_seconds', '{%s}' % [$.jobMatcher($._config.jobs.querier)], additional_grouping='endpoint') - ) - ) .addRow( g.row('Ingester') .addPanel( diff --git a/operations/tempo-mixin/dashboards/tempo-writes.libsonnet b/operations/tempo-mixin/dashboards/tempo-writes.libsonnet index ce6fc29bf9b..d7bff639be4 100644 --- a/operations/tempo-mixin/dashboards/tempo-writes.libsonnet +++ b/operations/tempo-mixin/dashboards/tempo-writes.libsonnet @@ -29,7 +29,7 @@ dashboard_utils { {%s, __name__=~"envoy_cluster_grpc_proto_collector_trace_v1_TraceService_[0-9]+"}, "grpc_status", "$1", "__name__", "envoy_cluster_grpc_proto_collector_trace_v1_TraceService_(.+)" ) - [$__interval:$__interval] + [$__rate_interval:30s] ) ) ||| % $.jobMatcher($._config.jobs.gateway), diff --git a/operations/tempo-mixin/runbook.md b/operations/tempo-mixin/runbook.md index a0da36dbb21..060dd875742 100644 --- a/operations/tempo-mixin/runbook.md +++ b/operations/tempo-mixin/runbook.md @@ -45,6 +45,9 @@ Another way to increase parallelism is by increasing the size of the worker pool A theoretically ideal value for this config to avoid _any_ queueing would be (Size of blocklist / Max Concurrent Queries). But also factor in the resources provided to the querier. +Our [documentation](https://grafana.com/docs/tempo/latest/operations/backend_search/#query-frontend) +includes [a solid guide](https://grafana.com/docs/tempo/latest/operations/backend_search/#guidelines-on-key-configuration-parameters) on the various parameters with suggestions. + ### Trace Lookup Failures If trace lookups are fail with the error: `error querying store in Querier.FindTraceByID: queue doesn't have room for jobs`, this @@ -70,24 +73,6 @@ Consider the following resolutions: - Increase the queue_depth size to do more work per querier - Adjust compaction settings to reduce the number of blocks -### Serverless/External Endpoints - -If the request latency issues are due to backend searches with serverless/external endpoints there may be additional configuration -options that will help decrease latency. Before you get started know that serverless functionality only impacts the `/api/search` endpoint -if a `start` and `end` parameter are passed and `external_endpoints` are configured on the querier. One way to determine if external -endpoints are getting hit is to check the Reads dashboard and look for the "Querier External Endpoint" row. - -Tuning serverless search can be difficult. Our [public documentation](https://grafana.com/docs/tempo/latest/operations/backend_search/#query-frontend) -includes a solid guide on the various parameters with suggestions. The linked documentation is augmented with some suggestions here: - -- Consider provisioning more serverless functions and adding them to the `querier.search.external_endpoints` array. This will increase your - baseline latency and your total throughput. -- Decreasing `querier.search.hedge_requests_at` and increasing `querier.search.hedge_requests_up_to` will put more pressure on the serverless endpoints but will - result in lower latency. -- Increasing `querier.search.prefer_self` and scaling up the queriers will cause more work to be performed by the queriers which will lower latencies. -- Increasing `query_frontend.max_oustanding_per_tenant` and `query_frontend.search.concurrent_jobs` will increase the rate at which the - query_frontend tries to feed jobs to the queriers and can decrease latency. - ## TempoCompactorUnhealthy If this occurs access the [ring page](https://grafana.com/docs/tempo/latest/operations/consistent_hash_ring/) at `/compactor/ring`. diff --git a/pkg/api/http.go b/pkg/api/http.go index 2a4ed06c0f2..36adb909107 100644 --- a/pkg/api/http.go +++ b/pkg/api/http.go @@ -40,7 +40,7 @@ const ( urlParamSince = "since" urlParamExemplars = "exemplars" - // backend search (querier/serverless) + // backend search querier urlParamStartPage = "startPage" urlParamPagesToSearch = "pagesToSearch" urlParamBlockID = "blockID" @@ -53,9 +53,6 @@ const ( urlParamFooterSize = "footerSize" urlParamDedicatedColumns = "dc" - // maxBytes (serverless only) - urlParamMaxBytes = "maxBytes" - // search tags urlParamScope = "scope" @@ -459,7 +456,9 @@ func BuildQueryRangeRequest(req *http.Request, searchReq *tempopb.QueryRangeRequ qb := newQueryBuilder("") qb.addParam(urlParamStart, strconv.FormatUint(searchReq.Start, 10)) qb.addParam(urlParamEnd, strconv.FormatUint(searchReq.End, 10)) - qb.addParam(urlParamStep, time.Duration(searchReq.Step).String()) + if searchReq.Step != 0 { // if step != 0 leave the param out and Tempo will calculate it + qb.addParam(urlParamStep, time.Duration(searchReq.Step).String()) + } qb.addParam(QueryModeKey, searchReq.QueryMode) // New RF1 params qb.addParam(urlParamBlockID, searchReq.BlockID) @@ -679,37 +678,6 @@ func BuildSearchBlockRequest(req *http.Request, searchReq *tempopb.SearchBlockRe return req, nil } -// AddServerlessParams takes an already existing http.Request and adds maxBytes -// to it -func AddServerlessParams(req *http.Request, maxBytes int) *http.Request { - if req == nil { - req = &http.Request{ - URL: &url.URL{}, - } - } - - qb := newQueryBuilder(req.URL.RawQuery) - qb.addParam(urlParamMaxBytes, strconv.FormatInt(int64(maxBytes), 10)) - req.URL.RawQuery = qb.query() - - return req -} - -// ExtractServerlessParams extracts params for the serverless functions from -// an http.Request -func ExtractServerlessParams(req *http.Request) (int, error) { - s, exists := extractQueryParam(req.URL.Query(), urlParamMaxBytes) - if !exists { - return 0, nil - } - maxBytes, err := strconv.ParseInt(s, 10, 32) - if err != nil { - return 0, fmt.Errorf("invalid maxBytes: %w", err) - } - - return int(maxBytes), nil -} - func extractQueryParam(v url.Values, param string) (string, bool) { value := v.Get(param) return value, value != "" diff --git a/pkg/api/http_test.go b/pkg/api/http_test.go index 6f340ebd38e..08cf5a31d3b 100644 --- a/pkg/api/http_test.go +++ b/pkg/api/http_test.go @@ -638,33 +638,6 @@ func TestBuildSearchRequest(t *testing.T) { } } -func TestAddServerlessParams(t *testing.T) { - actualURL := AddServerlessParams(nil, 10) - assert.Equal(t, "?maxBytes=10", actualURL.URL.String()) - - req, err := http.NewRequest("GET", "http://example.com", nil) - require.NoError(t, err) - - actualURL = AddServerlessParams(req, 10) - assert.Equal(t, "http://example.com?maxBytes=10", actualURL.URL.String()) -} - -func TestExtractServerlessParam(t *testing.T) { - r := httptest.NewRequest("GET", "http://example.com", nil) - maxBytes, err := ExtractServerlessParams(r) - require.NoError(t, err) - assert.Equal(t, 0, maxBytes) - - r = httptest.NewRequest("GET", "http://example.com?maxBytes=13", nil) - maxBytes, err = ExtractServerlessParams(r) - require.NoError(t, err) - assert.Equal(t, 13, maxBytes) - - r = httptest.NewRequest("GET", "http://example.com?maxBytes=blerg", nil) - _, err = ExtractServerlessParams(r) - assert.Error(t, err) -} - func Test_parseTimestamp(t *testing.T) { now := time.Now() @@ -704,7 +677,9 @@ func TestQueryRangeRoundtrip(t *testing.T) { }{ { name: "empty", - req: &tempopb.QueryRangeRequest{}, + req: &tempopb.QueryRangeRequest{ + Step: uint64(time.Second), // you can't actually roundtrip an empty query b/c Build/Parse will force a default step + }, }, { name: "not empty!", diff --git a/pkg/cache/memcached_client_selector.go b/pkg/cache/memcached_client_selector.go index 46b1d0d283d..45729afd350 100644 --- a/pkg/cache/memcached_client_selector.go +++ b/pkg/cache/memcached_client_selector.go @@ -5,7 +5,7 @@ import ( "strings" "sync" - "github.com/cespare/xxhash" + "github.com/cespare/xxhash/v2" "github.com/facette/natsort" "github.com/grafana/gomemcache/memcache" ) diff --git a/pkg/collector/distinct_string_collector.go b/pkg/collector/distinct_string_collector.go index ca881703e26..120ad4f6fe6 100644 --- a/pkg/collector/distinct_string_collector.go +++ b/pkg/collector/distinct_string_collector.go @@ -1,39 +1,51 @@ package collector import ( + "fmt" "sort" "strings" "sync" ) type DistinctString struct { - values map[string]struct{} - new map[string]struct{} - maxLen int - currLen int - diffEnabled bool - limExceeded bool - mtx sync.Mutex + values map[string]struct{} + new map[string]struct{} + maxDataSize int + currDataSize int + currentValuesLen uint32 + maxValues uint32 + maxCacheHits uint32 + currentCacheHits uint32 + diffEnabled bool + limExceeded bool + mtx sync.Mutex + stopReason string } -// NewDistinctString with the given maximum data size. This is calculated -// as the total length of the recorded strings. For ease of use, maximum=0 -// is interpreted as unlimited. -func NewDistinctString(maxDataSize int) *DistinctString { +// NewDistinctString with the given maximum data size and max items. +// MaxDataSize is calculated as the total length of the recorded strings. +// For ease of use, maxDataSize=0 and maxItems=0 are interpreted as unlimited. +func NewDistinctString(maxDataSize int, maxValues uint32, staleValueThreshold uint32) *DistinctString { return &DistinctString{ - values: make(map[string]struct{}), - maxLen: maxDataSize, - diffEnabled: false, // disable diff to make it faster + values: make(map[string]struct{}), + maxDataSize: maxDataSize, + diffEnabled: false, // disable diff to make it faster + maxValues: maxValues, + maxCacheHits: staleValueThreshold, } } // NewDistinctStringWithDiff is like NewDistinctString but with diff support enabled. -func NewDistinctStringWithDiff(maxDataSize int) *DistinctString { +// MaxDataSize is calculated as the total length of the recorded strings. +// For ease of use, maxDataSize=0 and maxItems=0 are interpreted as unlimited. +func NewDistinctStringWithDiff(maxDataSize int, maxValues uint32, staleValueThreshold uint32) *DistinctString { return &DistinctString{ - values: make(map[string]struct{}), - new: make(map[string]struct{}), - maxLen: maxDataSize, - diffEnabled: true, + values: make(map[string]struct{}), + new: make(map[string]struct{}), + maxDataSize: maxDataSize, + diffEnabled: true, + maxValues: maxValues, + maxCacheHits: staleValueThreshold, } } @@ -47,20 +59,33 @@ func (d *DistinctString) Collect(s string) (added bool) { if d.limExceeded { return false } + valueLen := len(s) - if _, ok := d.values[s]; ok { - // Already present + if d.maxDataSize > 0 && d.currDataSize+valueLen >= d.maxDataSize { + d.stopReason = fmt.Sprintf("Max data exceeded: dataSize %d, maxDataSize %d", d.currDataSize, d.maxDataSize) + d.limExceeded = true return false } - valueLen := len(s) - // Can it fit? - if d.maxLen > 0 && d.currLen+valueLen > d.maxLen { - // No, it can't fit + if d.maxValues > 0 && d.currentValuesLen >= d.maxValues { + d.stopReason = fmt.Sprintf("Max values exceeded: values %d, maxValues %d", d.currentValuesLen, d.maxValues) + d.limExceeded = true + return false + } + + if d.maxCacheHits > 0 && d.currentCacheHits >= d.maxCacheHits { + d.stopReason = fmt.Sprintf("Max stale values exceeded: cacheHits %d, maxValues %d", d.currentValuesLen, d.maxCacheHits) d.limExceeded = true return false } + if _, ok := d.values[s]; ok { + // Already present + d.currentCacheHits++ + return false + } + d.currentCacheHits = 0 // CacheHits reset to 0 when a new value is found + // Clone instead of referencing original s = strings.Clone(s) @@ -68,7 +93,8 @@ func (d *DistinctString) Collect(s string) (added bool) { d.new[s] = struct{}{} } d.values[s] = struct{}{} - d.currLen += valueLen + d.currDataSize += valueLen + d.currentValuesLen++ return true } @@ -96,12 +122,16 @@ func (d *DistinctString) Exceeded() bool { return d.limExceeded } +func (d *DistinctString) StopReason() string { + return d.stopReason +} + // Size is the total size of all distinct strings encountered. func (d *DistinctString) Size() int { d.mtx.Lock() defer d.mtx.Unlock() - return d.currLen + return d.currDataSize } // Diff returns all new strings collected since the last time diff was called diff --git a/pkg/collector/distinct_string_collector_test.go b/pkg/collector/distinct_string_collector_test.go index ec778e3012c..3e119d3a74c 100644 --- a/pkg/collector/distinct_string_collector_test.go +++ b/pkg/collector/distinct_string_collector_test.go @@ -10,12 +10,12 @@ import ( ) func TestDistinctStringCollector(t *testing.T) { - d := NewDistinctString(10) + d := NewDistinctString(12, 0, 0) - d.Collect("123") - d.Collect("4567") - d.Collect("890") - d.Collect("11") + require.True(t, d.Collect("123")) + require.True(t, d.Collect("4567")) + require.True(t, d.Collect("890")) + require.False(t, d.Collect("11")) require.True(t, d.Exceeded()) stringsSlicesEqual(t, []string{"123", "4567", "890"}, d.Strings()) @@ -26,17 +26,56 @@ func TestDistinctStringCollector(t *testing.T) { require.Error(t, err, errDiffNotEnabled) } +func TestDistinctStringCollectorWithMaxItemsLimit(t *testing.T) { + d := NewDistinctString(0, 3, 0) + + require.True(t, d.Collect("123")) + require.True(t, d.Collect("4567")) + require.True(t, d.Collect("890")) + require.False(t, d.Collect("11")) + + require.True(t, d.Exceeded()) + stringsSlicesEqual(t, []string{"123", "4567", "890"}, d.Strings()) + + // diff fails when diff is not enabled + res, err := d.Diff() + require.Nil(t, res) + require.Error(t, err, errDiffNotEnabled) +} + +func TestDistinctStringCollectorWitCacheHitsLimit(t *testing.T) { + d := NewDistinctString(0, 0, 3) + + require.True(t, d.Collect("123")) + require.True(t, d.Collect("4567")) + require.True(t, d.Collect("890")) + require.False(t, d.Collect("890")) + require.True(t, d.Collect("11")) // The counter resets with every new value + require.False(t, d.Collect("890")) + require.False(t, d.Collect("890")) + require.False(t, d.Collect("890")) + require.False(t, d.Collect("12")) + + require.True(t, d.Exceeded()) + stringsSlicesEqual(t, []string{"123", "4567", "890", "11"}, d.Strings()) + + // diff fails when diff is not enabled + res, err := d.Diff() + require.Nil(t, res) + require.Error(t, err, errDiffNotEnabled) +} + func TestDistinctStringCollectorDiff(t *testing.T) { - d := NewDistinctStringWithDiff(0) + d := NewDistinctStringWithDiff(0, 0, 0) - d.Collect("123") - d.Collect("4567") + require.True(t, d.Collect("123")) + require.True(t, d.Collect("4567")) stringsSlicesEqual(t, []string{"123", "4567"}, readDistinctStringDiff(t, d)) stringsSlicesEqual(t, []string{}, readDistinctStringDiff(t, d)) - d.Collect("123") - d.Collect("890") + require.False(t, d.Collect("123")) + require.True(t, d.Collect("890")) stringsSlicesEqual(t, []string{"890"}, readDistinctStringDiff(t, d)) stringsSlicesEqual(t, []string{}, readDistinctStringDiff(t, d)) @@ -49,7 +88,7 @@ func readDistinctStringDiff(t *testing.T, d *DistinctString) []string { } func TestDistinctStringCollectorIsSafe(t *testing.T) { - d := NewDistinctString(0) // no limit + d := NewDistinctString(0, 0, 0) // no limit var wg sync.WaitGroup for i := 0; i < 10; i++ { wg.Add(1) @@ -90,7 +129,7 @@ func BenchmarkDistinctStringCollect(b *testing.B) { for _, lim := range limits { b.Run("uniques_limit:"+strconv.Itoa(lim), func(b *testing.B) { for n := 0; n < b.N; n++ { - distinctStrings := NewDistinctString(lim) + distinctStrings := NewDistinctString(lim, 0, 0) for _, values := range ingesterStrings { for _, v := range values { if distinctStrings.Collect(v) { @@ -103,7 +142,7 @@ func BenchmarkDistinctStringCollect(b *testing.B) { b.Run("duplicates_limit:"+strconv.Itoa(lim), func(b *testing.B) { for n := 0; n < b.N; n++ { - distinctStrings := NewDistinctString(lim) + distinctStrings := NewDistinctString(lim, 0, 0) for i := 0; i < numIngesters; i++ { for j := 0; j < numTagValuesPerIngester; j++ { // collect first item to simulate duplicates diff --git a/pkg/collector/distinct_value_collector.go b/pkg/collector/distinct_value_collector.go index a8359667639..2df7561f58d 100644 --- a/pkg/collector/distinct_value_collector.go +++ b/pkg/collector/distinct_value_collector.go @@ -2,43 +2,54 @@ package collector import ( "errors" + "fmt" "sync" ) var errDiffNotEnabled = errors.New("diff not enabled") type DistinctValue[T comparable] struct { - values map[T]struct{} - new map[T]struct{} - len func(T) int - maxLen int - currLen int - limExceeded bool - diffEnabled bool - mtx sync.Mutex + values map[T]struct{} + new map[T]struct{} + len func(T) int + maxDataSize int + currDataSize int + currentValuesLen uint32 + maxValues uint32 + maxCacheHits uint32 + currentCacheHits uint32 + limExceeded bool + diffEnabled bool + stopReason string + mtx sync.Mutex } -// NewDistinctValue with the given maximum data size. This is calculated -// as the total length of the recorded strings. For ease of use, maximum=0 -// is interpreted as unlimited. +// NewDistinctValue with the given maximum data size and values limited. +// maxDataSize is calculated as the total length of the recorded strings. +// staleValueThreshold introduces a stop condition that is triggered when the number of found cache hits overcomes the limit +// For ease of use, maxDataSize=0 and maxValues are interpreted as unlimited. // Use NewDistinctValueWithDiff to enable diff support, but that one is slightly slower. -func NewDistinctValue[T comparable](maxDataSize int, len func(T) int) *DistinctValue[T] { +func NewDistinctValue[T comparable](maxDataSize int, maxValues uint32, staleValueThreshold uint32, len func(T) int) *DistinctValue[T] { return &DistinctValue[T]{ - values: make(map[T]struct{}), - maxLen: maxDataSize, - diffEnabled: false, // disable diff to make it faster - len: len, + values: make(map[T]struct{}), + maxDataSize: maxDataSize, + diffEnabled: false, // disable diff to make it faster + len: len, + maxValues: maxValues, + maxCacheHits: staleValueThreshold, } } // NewDistinctValueWithDiff is like NewDistinctValue but with diff support enabled. -func NewDistinctValueWithDiff[T comparable](maxDataSize int, len func(T) int) *DistinctValue[T] { +func NewDistinctValueWithDiff[T comparable](maxDataSize int, maxValues uint32, staleValueThreshold uint32, len func(T) int) *DistinctValue[T] { return &DistinctValue[T]{ - values: make(map[T]struct{}), - new: make(map[T]struct{}), - maxLen: maxDataSize, - diffEnabled: true, - len: len, + values: make(map[T]struct{}), + new: make(map[T]struct{}), + maxDataSize: maxDataSize, + diffEnabled: true, + len: len, + maxValues: maxValues, + maxCacheHits: staleValueThreshold, } } @@ -52,28 +63,39 @@ func (d *DistinctValue[T]) Collect(v T) (exceeded bool) { if d.limExceeded { return true } - // Calculate length valueLen := d.len(v) - // Can it fit? - // note: we will stop adding values slightly before the limit is reached - if d.maxLen > 0 && d.currLen+valueLen >= d.maxLen { - // No, it can't fit + if d.maxDataSize > 0 && d.currDataSize+valueLen >= d.maxDataSize { + d.stopReason = fmt.Sprintf("Max data exceeded: dataSize %d, maxDataSize %d", d.currDataSize, d.maxDataSize) + d.limExceeded = true + return true + } + + if d.maxValues > 0 && d.currentValuesLen >= d.maxValues { + d.stopReason = fmt.Sprintf("Max values exceeded: values %d, maxValues %d", d.currentValuesLen, d.maxValues) d.limExceeded = true return true } + if d.maxCacheHits > 0 && d.currentCacheHits >= d.maxCacheHits { + d.stopReason = fmt.Sprintf("Max stale values exceeded: cacheHits %d, maxValues %d", d.currentValuesLen, d.maxCacheHits) + d.limExceeded = true + return true + } if _, ok := d.values[v]; ok { - return // Already present + d.currentCacheHits++ + return false // Already present } + d.currentCacheHits = 0 // CacheHits reset to 0 when a new value is found if d.diffEnabled { d.new[v] = struct{}{} } d.values[v] = struct{}{} - d.currLen += valueLen + d.currDataSize += valueLen + d.currentValuesLen++ return false } @@ -100,12 +122,16 @@ func (d *DistinctValue[T]) Exceeded() bool { return d.limExceeded } +func (d *DistinctValue[T]) StopReason() string { + return d.stopReason +} + // Size is the total size of all distinct items collected func (d *DistinctValue[T]) Size() int { d.mtx.Lock() defer d.mtx.Unlock() - return d.currLen + return d.currDataSize } // Diff returns all new strings collected since the last time diff was called diff --git a/pkg/collector/distinct_value_collector_test.go b/pkg/collector/distinct_value_collector_test.go index d0bd470db97..4856173836f 100644 --- a/pkg/collector/distinct_value_collector_test.go +++ b/pkg/collector/distinct_value_collector_test.go @@ -11,18 +11,28 @@ import ( ) func TestDistinctValueCollector(t *testing.T) { - d := NewDistinctValue[string](10, func(s string) int { return len(s) }) + d := NewDistinctValue(10, 0, 0, func(s string) int { return len(s) }) - var stop bool - stop = d.Collect("123") - require.False(t, stop) - stop = d.Collect("4567") - require.False(t, stop) - stop = d.Collect("890") - require.True(t, stop) + require.False(t, d.Collect("123")) + require.False(t, d.Collect("4567")) + require.True(t, d.Collect("890")) + require.True(t, d.Exceeded()) + stringsSlicesEqual(t, []string{"123", "4567"}, d.Values()) + + // diff fails when diff is not enabled + res, err := d.Diff() + require.Nil(t, res) + require.Error(t, err, errDiffNotEnabled) +} + +func TestDistinctValueCollectorWithMaxValuesLimited(t *testing.T) { + d := NewDistinctValue(0, 2, 0, func(s string) int { return len(s) }) + require.False(t, d.Collect("123")) + require.False(t, d.Collect("4567")) + require.True(t, d.Collect("890")) require.True(t, d.Exceeded()) - require.Equal(t, stop, d.Exceeded()) // final stop should be same as Exceeded + stringsSlicesEqual(t, []string{"123", "4567"}, d.Values()) // diff fails when diff is not enabled @@ -31,17 +41,34 @@ func TestDistinctValueCollector(t *testing.T) { require.Error(t, err, errDiffNotEnabled) } +func TestDistinctValueCollectorWithCacheHitsLimits(t *testing.T) { + d := NewDistinctValue(0, 0, 2, func(s string) int { return len(s) }) + require.False(t, d.Collect("123")) + require.False(t, d.Collect("4567")) + require.False(t, d.Collect("890")) + require.False(t, d.Collect("890")) + require.False(t, d.Collect("890")) + require.True(t, d.Collect("890")) + require.True(t, d.Exceeded()) + stringsSlicesEqual(t, []string{"123", "4567", "890"}, d.Values()) + + // diff fails when diff is not enabled + res, err := d.Diff() + require.Nil(t, res) + require.Error(t, err, errDiffNotEnabled) +} + func TestDistinctValueCollectorDiff(t *testing.T) { - d := NewDistinctValueWithDiff[string](0, func(s string) int { return len(s) }) + d := NewDistinctValueWithDiff(0, 0, 0, func(s string) int { return len(s) }) - d.Collect("123") - d.Collect("4567") + require.False(t, d.Collect("123")) + require.False(t, d.Collect("4567")) stringsSlicesEqual(t, []string{"123", "4567"}, readDistinctValueDiff(t, d)) stringsSlicesEqual(t, []string{}, readDistinctValueDiff(t, d)) - d.Collect("123") - d.Collect("890") + require.False(t, d.Collect("123")) + require.False(t, d.Collect("890")) stringsSlicesEqual(t, []string{"890"}, readDistinctValueDiff(t, d)) stringsSlicesEqual(t, []string{}, readDistinctValueDiff(t, d)) @@ -72,6 +99,7 @@ func BenchmarkDistinctValueCollect(b *testing.B) { Value: fmt.Sprintf("value_%d_%d", i, j), } } + ingesterTagValues[i] = tagValues } limits := []int{ @@ -85,7 +113,7 @@ func BenchmarkDistinctValueCollect(b *testing.B) { for _, lim := range limits { b.Run("uniques_limit:"+strconv.Itoa(lim), func(b *testing.B) { for n := 0; n < b.N; n++ { - distinctValues := NewDistinctValue(lim, func(v tempopb.TagValue) int { return len(v.Type) + len(v.Value) }) + distinctValues := NewDistinctValue(lim, 0, 0, func(v tempopb.TagValue) int { return len(v.Type) + len(v.Value) }) for _, tagValues := range ingesterTagValues { for _, v := range tagValues { if distinctValues.Collect(v) { @@ -98,7 +126,7 @@ func BenchmarkDistinctValueCollect(b *testing.B) { b.Run("duplicates_limit:"+strconv.Itoa(lim), func(b *testing.B) { for n := 0; n < b.N; n++ { - distinctValues := NewDistinctValue(lim, func(v tempopb.TagValue) int { return len(v.Type) + len(v.Value) }) + distinctValues := NewDistinctValue(lim, 0, 0, func(v tempopb.TagValue) int { return len(v.Type) + len(v.Value) }) for i := 0; i < numIngesters; i++ { for j := 0; j < numTagValuesPerIngester; j++ { // collect first item to simulate duplicates diff --git a/pkg/collector/scoped_distinct_string.go b/pkg/collector/scoped_distinct_string.go index abe902122c7..b5c2b487f4f 100644 --- a/pkg/collector/scoped_distinct_string.go +++ b/pkg/collector/scoped_distinct_string.go @@ -4,31 +4,48 @@ import ( "sync" ) +const IntrinsicScope = "intrinsic" + type ScopedDistinctString struct { - cols map[string]*DistinctString - newCol func(int) *DistinctString - maxLen int - curLen int - limExceeded bool - diffEnabled bool - mtx sync.Mutex + cols map[string]*DistinctString + newCol func(int, uint32, uint32) *DistinctString + maxDataSize int + currDataSize int + limExceeded bool + maxCacheHits uint32 + diffEnabled bool + maxTagsPerScope uint32 + stopReason string + mtx sync.Mutex } -func NewScopedDistinctString(maxDataSize int) *ScopedDistinctString { +// NewScopedDistinctString collects the tags per scope +// MaxDataSize is calculated as the total length of the recorded strings. +// MaxTagsPerScope controls how many tags can be added per scope. The intrinsic scope is unbounded. +// For ease of use, maxDataSize=0 and maxTagsPerScope=0 are interpreted as unlimited. +func NewScopedDistinctString(maxDataSize int, maxTagsPerScope uint32, staleValueThreshold uint32) *ScopedDistinctString { return &ScopedDistinctString{ - cols: map[string]*DistinctString{}, - newCol: NewDistinctString, - maxLen: maxDataSize, - diffEnabled: false, + cols: map[string]*DistinctString{}, + newCol: NewDistinctString, + maxDataSize: maxDataSize, + diffEnabled: false, + maxTagsPerScope: maxTagsPerScope, + maxCacheHits: staleValueThreshold, } } -func NewScopedDistinctStringWithDiff(maxDataSize int) *ScopedDistinctString { +// NewScopedDistinctStringWithDiff collects the tags per scope with diff +// MaxDataSize is calculated as the total length of the recorded strings. +// MaxTagsPerScope controls how many tags can be added per scope. The intrinsic scope is unbounded. +// For ease of use, maxDataSize=0 and maxTagsPerScope=0 are interpreted as unlimited. +func NewScopedDistinctStringWithDiff(maxDataSize int, maxTagsPerScope uint32, staleValueThreshold uint32) *ScopedDistinctString { return &ScopedDistinctString{ - cols: map[string]*DistinctString{}, - newCol: NewDistinctStringWithDiff, - maxLen: maxDataSize, - diffEnabled: true, + cols: map[string]*DistinctString{}, + newCol: NewDistinctStringWithDiff, + maxDataSize: maxDataSize, + diffEnabled: true, + maxTagsPerScope: maxTagsPerScope, + maxCacheHits: staleValueThreshold, } } @@ -45,7 +62,7 @@ func (d *ScopedDistinctString) Collect(scope string, val string) (exceeded bool) valueLen := len(val) // can it fit? - if d.maxLen > 0 && d.curLen+valueLen > d.maxLen { + if d.maxDataSize > 0 && d.currDataSize+valueLen > d.maxDataSize { // No d.limExceeded = true return true @@ -54,13 +71,23 @@ func (d *ScopedDistinctString) Collect(scope string, val string) (exceeded bool) // get or create collector col, ok := d.cols[scope] if !ok { - col = d.newCol(0) + if scope == IntrinsicScope { + col = d.newCol(0, 0, 0) + } else { + col = d.newCol(0, d.maxTagsPerScope, d.maxCacheHits) + } d.cols[scope] = col } // add valueLen if we successfully added the value if col.Collect(val) { - d.curLen += valueLen + d.currDataSize += valueLen + } + if col.Exceeded() { + // we stop if one of the scopes exceed the limit + d.limExceeded = true + d.stopReason = col.stopReason + return true } return false } @@ -80,11 +107,25 @@ func (d *ScopedDistinctString) Strings() map[string][]string { } // Exceeded indicates if some values were lost because the maximum size limit was met. +// Or because one of the scopes max tags was reached. func (d *ScopedDistinctString) Exceeded() bool { d.mtx.Lock() defer d.mtx.Unlock() - return d.limExceeded + if d.limExceeded { + return true + } + + for _, v := range d.cols { + if v.Exceeded() { + return true + } + } + return false +} + +func (d *ScopedDistinctString) StopReason() string { + return d.stopReason } // Diff returns all new strings collected since the last time Diff was called diff --git a/pkg/collector/scoped_distinct_string_test.go b/pkg/collector/scoped_distinct_string_test.go index 9b090cef442..7ae38c5ffb1 100644 --- a/pkg/collector/scoped_distinct_string_test.go +++ b/pkg/collector/scoped_distinct_string_test.go @@ -12,10 +12,12 @@ import ( func TestScopedDistinct(t *testing.T) { tcs := []struct { - in map[string][]string - expected map[string][]string - limit int - exceeded bool + in map[string][]string + expected map[string][]string + maxBytes int + maxItemsPerScope uint32 + staleValueThreshold uint32 + exceeded bool }{ { in: map[string][]string{ @@ -46,13 +48,27 @@ func TestScopedDistinct(t *testing.T) { "scope1": {"val1", "val2"}, "scope2": {"val1"}, }, - limit: 13, + maxBytes: 13, exceeded: true, }, + { + in: map[string][]string{ + "intrinsic": {"val1", "val2"}, + "scope2": {"val1", "val2"}, + "scope3": {"val1", "val2", "val3"}, + }, + expected: map[string][]string{ + "intrinsic": {"val1", "val2"}, + "scope2": {"val1"}, + }, + maxBytes: 0, + maxItemsPerScope: 1, + exceeded: true, + }, } for _, tc := range tcs { - c := NewScopedDistinctString(tc.limit) + c := NewScopedDistinctString(tc.maxBytes, tc.maxItemsPerScope, tc.staleValueThreshold) // get and sort keys so we can deterministically add values keys := []string{} @@ -61,18 +77,15 @@ func TestScopedDistinct(t *testing.T) { } slices.Sort(keys) - var stop bool for _, k := range keys { v := tc.in[k] for _, val := range v { - stop = c.Collect(k, val) + c.Collect(k, val) } } // check if we exceeded the limit, and Collect and Exceeded return the same value require.Equal(t, tc.exceeded, c.Exceeded()) - require.Equal(t, tc.exceeded, stop) - require.Equal(t, stop, c.Exceeded()) actual := c.Strings() assertMaps(t, tc.expected, actual) @@ -80,7 +93,7 @@ func TestScopedDistinct(t *testing.T) { } func TestScopedDistinctDiff(t *testing.T) { - c := NewScopedDistinctStringWithDiff(0) + c := NewScopedDistinctStringWithDiff(0, 0, 0) c.Collect("scope1", "val1") expected := map[string][]string{ @@ -122,7 +135,7 @@ func TestScopedDistinctDiff(t *testing.T) { assertMaps(t, map[string][]string{}, readScopedDistinctStringDiff(t, c)) // diff should error when diff is not enabled - col := NewScopedDistinctString(0) + col := NewScopedDistinctString(0, 0, 0) col.Collect("scope1", "val1") res, err := col.Diff() require.Nil(t, res) @@ -144,7 +157,7 @@ func assertMaps(t *testing.T, expected, actual map[string][]string) { } func TestScopedDistinctStringCollectorIsSafe(t *testing.T) { - d := NewScopedDistinctString(0) // no limit + d := NewScopedDistinctString(0, 0, 0) // no limit var wg sync.WaitGroup for i := 0; i < 10; i++ { @@ -195,7 +208,7 @@ func BenchmarkScopedDistinctStringCollect(b *testing.B) { for _, lim := range limits { b.Run("uniques_limit:"+strconv.Itoa(lim), func(b *testing.B) { for n := 0; n < b.N; n++ { - scopedDistinctStrings := NewScopedDistinctString(lim) + scopedDistinctStrings := NewScopedDistinctString(lim, 0, 0) for _, tags := range ingesterTags { for scope, values := range tags { for _, v := range values { @@ -210,7 +223,7 @@ func BenchmarkScopedDistinctStringCollect(b *testing.B) { b.Run("duplicates_limit:"+strconv.Itoa(lim), func(b *testing.B) { for n := 0; n < b.N; n++ { - scopedDistinctStrings := NewScopedDistinctString(lim) + scopedDistinctStrings := NewScopedDistinctString(lim, 0, 0) for i := 0; i < numIngesters; i++ { for scope := range ingesterTags[i] { // collect first item to simulate duplicates diff --git a/pkg/docsgen/generate_manifest.go b/pkg/docsgen/generate_manifest.go new file mode 100644 index 00000000000..94c1d4a14d7 --- /dev/null +++ b/pkg/docsgen/generate_manifest.go @@ -0,0 +1,65 @@ +package main + +import ( + "fmt" + "log" + "os" + "os/exec" + + "github.com/grafana/tempo/cmd/tempo/app" + "gopkg.in/yaml.v3" +) + +const ManifestPath = "docs/sources/tempo/configuration/manifest.md" + +const Cmd = "go run pkg/docsgen/generate_manifest.go" + +var Manifest = fmt.Sprintf(`--- +title: Manifest +description: This manifest lists of all Tempo options and their defaults. +weight: 110 +--- +[//]: # 'THIS FILE IS GENERATED AUTOMATICALLY BY %s' +[//]: # 'DO NOT EDIT THIS FILE DIRECTLY' + +# Manifest + +This document is a reference for all Tempo options and their defaults. If you are just getting +started with Tempo, refer to [Tempo examples](https://github.com/grafana/tempo/tree/main/example/docker-compose) +and other [configuration documentation]({{< relref "../configuration" >}}). Most installations will require only setting 10 to 20 of these options. + +## Complete configuration + +`, Cmd) + +func main() { + newConfig := app.NewDefaultConfig() + // Override values that depend on the host specifics + const hostname = "hostname" + newConfig.Distributor.DistributorRing.InstanceID = hostname + newConfig.Compactor.ShardingRing.InstanceID = hostname + newConfig.Ingester.LifecyclerConfig.ID = hostname + newConfig.Ingester.LifecyclerConfig.InfNames = []string{"eth0"} + newConfig.Generator.Ring.InstanceID = hostname + newConfig.Generator.InstanceID = hostname + newConfig.BlockBuilder.InstanceID = hostname + + newConfigBytes, err := yaml.Marshal(newConfig) + if err != nil { + panic(err) + } + newManifest := Manifest + "```yaml\n" + string(newConfigBytes) + "```\n" + + err = os.WriteFile(ManifestPath, []byte(newManifest), 0o644) + if err != nil { + panic(err) + } + + cmd := exec.Command("git", "diff", "--exit-code", ManifestPath) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + err = cmd.Run() + if err != nil { + log.Fatalf("The manifest with the default Tempo configuration has changed. Please run '%s' and commit the changes.", Cmd) + } +} diff --git a/pkg/ingest/blockbuilder/consumer.go b/pkg/ingest/blockbuilder/consumer.go new file mode 100644 index 00000000000..29b334c3698 --- /dev/null +++ b/pkg/ingest/blockbuilder/consumer.go @@ -0,0 +1,33 @@ +package blockbuilder + +import ( + "context" + "fmt" + + "github.com/go-kit/log" + "github.com/grafana/tempo/pkg/ingest" + "github.com/twmb/franz-go/pkg/kgo" + "github.com/twmb/franz-go/plugin/kprom" +) + +type Consumer struct{} + +type PartitionsFn func(context.Context, *kgo.Client, map[string][]int32) + +// NewConsumer creates a new Kafka consumer for the block-builder +func NewConsumer(kafkaCfg ingest.KafkaConfig, metrics *kprom.Metrics, logger log.Logger, onRevoked, onAssigned PartitionsFn, opts ...kgo.Opt) (*kgo.Client, error) { + opts = append(opts, + kgo.DisableAutoCommit(), + kgo.OnPartitionsRevoked(onRevoked), + kgo.OnPartitionsAssigned(onAssigned), + + // Block-builder group consumes + kgo.ConsumerGroup(kafkaCfg.ConsumerGroup), + ) + + client, err := ingest.NewReaderClient(kafkaCfg, metrics, logger, opts...) + if err != nil { + return nil, fmt.Errorf("failed to create kafka client: %w", err) + } + return client, nil +} diff --git a/pkg/ingest/config.go b/pkg/ingest/config.go new file mode 100644 index 00000000000..66c34485771 --- /dev/null +++ b/pkg/ingest/config.go @@ -0,0 +1,182 @@ +package ingest + +import ( + "context" + "errors" + "flag" + "fmt" + "strconv" + "strings" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/grafana/dskit/flagext" + "github.com/twmb/franz-go/pkg/kadm" + "github.com/twmb/franz-go/pkg/kgo" +) + +const ( + // writerRequestTimeoutOverhead is the overhead applied by the Writer to every Kafka timeout. + // You can think about this overhead as an extra time for requests sitting in the client's buffer + // before being sent on the wire and the actual time it takes to send it over the network and + // start being processed by Kafka. + writerRequestTimeoutOverhead = 2 * time.Second + + // producerBatchMaxBytes is the max allowed size of a batch of Kafka records. + producerBatchMaxBytes = 16_000_000 + + // maxProducerRecordDataBytesLimit is the max allowed size of a single record data. Given we have a limit + // on the max batch size (producerBatchMaxBytes), a Kafka record data can't be bigger than the batch size + // minus some overhead required to serialise the batch and the record itself. We use 16KB as such overhead + // in the worst case scenario, which is expected to be way above the actual one. + maxProducerRecordDataBytesLimit = producerBatchMaxBytes - 16384 + minProducerRecordDataBytesLimit = 1024 * 1024 +) + +var ( + ErrMissingKafkaAddress = errors.New("the Kafka address has not been configured") + ErrMissingKafkaTopic = errors.New("the Kafka topic has not been configured") + ErrInconsistentConsumerLagAtStartup = errors.New("the target and max consumer lag at startup must be either both set to 0 or to a value greater than 0") + ErrInvalidMaxConsumerLagAtStartup = errors.New("the configured max consumer lag at startup must greater or equal than the configured target consumer lag") + ErrInvalidProducerMaxRecordSizeBytes = fmt.Errorf("the configured producer max record size bytes must be a value between %d and %d", minProducerRecordDataBytesLimit, maxProducerRecordDataBytesLimit) + ErrInconsistentSASLCredentials = errors.New("the SASL username and password must be both configured to enable SASL authentication") +) + +type Config struct { + Enabled bool `yaml:"enabled"` + Kafka KafkaConfig `yaml:"kafka"` +} + +func (cfg *Config) RegisterFlagsAndApplyDefaults(_ string, f *flag.FlagSet) { + cfg.Kafka.RegisterFlags(f) +} + +func (cfg *Config) Validate() error { + if !cfg.Enabled { + return nil + } + + return cfg.Kafka.Validate() +} + +// KafkaConfig holds the generic config for the Kafka backend. +type KafkaConfig struct { + Address string `yaml:"address"` + Topic string `yaml:"topic"` + ClientID string `yaml:"client_id"` + DialTimeout time.Duration `yaml:"dial_timeout"` + WriteTimeout time.Duration `yaml:"write_timeout"` + + SASLUsername string `yaml:"sasl_username"` + SASLPassword flagext.Secret `yaml:"sasl_password"` + + ConsumerGroup string `yaml:"consumer_group"` + ConsumerGroupOffsetCommitInterval time.Duration `yaml:"consumer_group_offset_commit_interval"` + + LastProducedOffsetRetryTimeout time.Duration `yaml:"last_produced_offset_retry_timeout"` + + AutoCreateTopicEnabled bool `yaml:"auto_create_topic_enabled"` + AutoCreateTopicDefaultPartitions int `yaml:"auto_create_topic_default_partitions"` + + ProducerMaxRecordSizeBytes int `yaml:"producer_max_record_size_bytes"` + ProducerMaxBufferedBytes int64 `yaml:"producer_max_buffered_bytes"` + + TargetConsumerLagAtStartup time.Duration `yaml:"target_consumer_lag_at_startup"` + MaxConsumerLagAtStartup time.Duration `yaml:"max_consumer_lag_at_startup"` +} + +func (cfg *KafkaConfig) RegisterFlags(f *flag.FlagSet) { + cfg.RegisterFlagsWithPrefix("kafka", f) +} + +func (cfg *KafkaConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { + f.StringVar(&cfg.Address, prefix+".address", "localhost:9092", "The Kafka backend address.") + f.StringVar(&cfg.Topic, prefix+".topic", "", "The Kafka topic name.") + f.StringVar(&cfg.ClientID, prefix+".client-id", "", "The Kafka client ID.") + f.DurationVar(&cfg.DialTimeout, prefix+".dial-timeout", 2*time.Second, "The maximum time allowed to open a connection to a Kafka broker.") + f.DurationVar(&cfg.WriteTimeout, prefix+".write-timeout", 10*time.Second, "How long to wait for an incoming write request to be successfully committed to the Kafka backend.") + + f.StringVar(&cfg.SASLUsername, prefix+".sasl-username", "", "The SASL username for authentication.") + f.Var(&cfg.SASLPassword, prefix+".sasl-password", "The SASL password for authentication.") + + f.StringVar(&cfg.ConsumerGroup, prefix+".consumer-group", "", "The consumer group used by the consumer to track the last consumed offset. The consumer group must be different for each ingester. If the configured consumer group contains the '' placeholder, it is replaced with the actual partition ID owned by the ingester. When empty (recommended), Tempo uses the ingester instance ID to guarantee uniqueness.") + f.DurationVar(&cfg.ConsumerGroupOffsetCommitInterval, prefix+".consumer-group-offset-commit-interval", time.Second, "How frequently a consumer should commit the consumed offset to Kafka. The last committed offset is used at startup to continue the consumption from where it was left.") + + f.DurationVar(&cfg.LastProducedOffsetRetryTimeout, prefix+".last-produced-offset-retry-timeout", 10*time.Second, "How long to retry a failed request to get the last produced offset.") + + f.BoolVar(&cfg.AutoCreateTopicEnabled, prefix+".auto-create-topic-enabled", true, "Enable auto-creation of Kafka topic if it doesn't exist.") + f.IntVar(&cfg.AutoCreateTopicDefaultPartitions, prefix+".auto-create-topic-default-partitions", 1000, "When auto-creation of Kafka topic is enabled and this value is positive, Kafka's num.partitions configuration option is set on Kafka brokers with this value when Tempo component that uses Kafka starts. This configuration option specifies the default number of partitions that the Kafka broker uses for auto-created topics. Note that this is a Kafka-cluster wide setting, and applies to any auto-created topic. If the setting of num.partitions fails, Tempo proceeds anyways, but auto-created topics could have an incorrect number of partitions.") + + f.IntVar(&cfg.ProducerMaxRecordSizeBytes, prefix+".producer-max-record-size-bytes", maxProducerRecordDataBytesLimit, "The maximum size of a Kafka record data that should be generated by the producer. An incoming write request larger than this size is split into multiple Kafka records. We strongly recommend to not change this setting unless for testing purposes.") + f.Int64Var(&cfg.ProducerMaxBufferedBytes, prefix+".producer-max-buffered-bytes", 1024*1024*1024, "The maximum size of (uncompressed) buffered and unacknowledged produced records sent to Kafka. The produce request fails once this limit is reached. This limit is per Kafka client. 0 to disable the limit.") + + consumerLagUsage := fmt.Sprintf("Set both -%s and -%s to 0 to disable waiting for maximum consumer lag being honored at startup.", prefix+".target-consumer-lag-at-startup", prefix+".max-consumer-lag-at-startup") + f.DurationVar(&cfg.TargetConsumerLagAtStartup, prefix+".target-consumer-lag-at-startup", 2*time.Second, "The best-effort maximum lag a consumer tries to achieve at startup. "+consumerLagUsage) + f.DurationVar(&cfg.MaxConsumerLagAtStartup, prefix+".max-consumer-lag-at-startup", 15*time.Second, "The guaranteed maximum lag before a consumer is considered to have caught up reading from a partition at startup, becomes ACTIVE in the hash ring and passes the readiness check. "+consumerLagUsage) +} + +func (cfg *KafkaConfig) Validate() error { + if cfg.Address == "" { + return ErrMissingKafkaAddress + } + if cfg.Topic == "" { + return ErrMissingKafkaTopic + } + if cfg.ProducerMaxRecordSizeBytes < minProducerRecordDataBytesLimit || cfg.ProducerMaxRecordSizeBytes > maxProducerRecordDataBytesLimit { + return ErrInvalidProducerMaxRecordSizeBytes + } + if (cfg.TargetConsumerLagAtStartup == 0 && cfg.MaxConsumerLagAtStartup != 0) || (cfg.TargetConsumerLagAtStartup != 0 && cfg.MaxConsumerLagAtStartup == 0) { + return ErrInconsistentConsumerLagAtStartup + } + if cfg.MaxConsumerLagAtStartup < cfg.TargetConsumerLagAtStartup { + return ErrInvalidMaxConsumerLagAtStartup + } + + if (cfg.SASLUsername == "") != (cfg.SASLPassword.String() == "") { + return ErrInconsistentSASLCredentials + } + + return nil +} + +// GetConsumerGroup returns the consumer group to use for the given instanceID and partitionID. +func (cfg *KafkaConfig) GetConsumerGroup(instanceID string, partitionID int32) string { + if cfg.ConsumerGroup == "" { + return instanceID + } + + return strings.ReplaceAll(cfg.ConsumerGroup, "", strconv.Itoa(int(partitionID))) +} + +// SetDefaultNumberOfPartitionsForAutocreatedTopics tries to set num.partitions config option on brokers. +// This is best-effort, if setting the option fails, error is logged, but not returned. +func (cfg KafkaConfig) SetDefaultNumberOfPartitionsForAutocreatedTopics(logger log.Logger) { + if cfg.AutoCreateTopicDefaultPartitions <= 0 { + return + } + + cl, err := kgo.NewClient(commonKafkaClientOptions(cfg, nil, logger)...) + if err != nil { + level.Error(logger).Log("msg", "failed to create kafka client", "err", err) + return + } + + adm := kadm.NewClient(cl) + defer adm.Close() + + defaultNumberOfPartitions := fmt.Sprintf("%d", cfg.AutoCreateTopicDefaultPartitions) + _, err = adm.AlterBrokerConfigsState(context.Background(), []kadm.AlterConfig{ + { + Op: kadm.SetConfig, + Name: "num.partitions", + Value: &defaultNumberOfPartitions, + }, + }) + if err != nil { + level.Error(logger).Log("msg", "failed to alter default number of partitions", "err", err) + return + } + + level.Info(logger).Log("msg", "configured Kafka-wide default number of partitions for auto-created topics (num.partitions)", "value", cfg.AutoCreateTopicDefaultPartitions) +} diff --git a/pkg/ingest/config_test.go b/pkg/ingest/config_test.go new file mode 100644 index 00000000000..63da0579a16 --- /dev/null +++ b/pkg/ingest/config_test.go @@ -0,0 +1,41 @@ +package ingest + +import ( + "testing" + + "github.com/go-kit/log" + "github.com/stretchr/testify/require" + "github.com/twmb/franz-go/pkg/kfake" + "github.com/twmb/franz-go/pkg/kmsg" +) + +func TestSetDefaultNumberOfPartitionsForAutocreatedTopics(t *testing.T) { + cluster, err := kfake.NewCluster(kfake.NumBrokers(1)) + require.NoError(t, err) + t.Cleanup(cluster.Close) + + addrs := cluster.ListenAddrs() + require.Len(t, addrs, 1) + + cfg := KafkaConfig{ + Address: addrs[0], + AutoCreateTopicDefaultPartitions: 100, + } + + cluster.ControlKey(kmsg.AlterConfigs.Int16(), func(request kmsg.Request) (kmsg.Response, error, bool) { + r := request.(*kmsg.AlterConfigsRequest) + + require.Len(t, r.Resources, 1) + res := r.Resources[0] + require.Equal(t, kmsg.ConfigResourceTypeBroker, res.ResourceType) + require.Len(t, res.Configs, 1) + cfg := res.Configs[0] + require.Equal(t, "num.partitions", cfg.Name) + require.NotNil(t, *cfg.Value) + require.Equal(t, "100", *cfg.Value) + + return &kmsg.AlterConfigsResponse{}, nil, true + }) + + cfg.SetDefaultNumberOfPartitionsForAutocreatedTopics(log.NewNopLogger()) +} diff --git a/pkg/ingest/encoding.go b/pkg/ingest/encoding.go new file mode 100644 index 00000000000..5afb3b1a382 --- /dev/null +++ b/pkg/ingest/encoding.go @@ -0,0 +1,147 @@ +// Package kafka provides encoding and decoding functionality for Tempo's Kafka integration. +package ingest + +import ( + "errors" + "fmt" + math_bits "math/bits" + "sync" + + "github.com/grafana/tempo/pkg/tempopb" + "github.com/twmb/franz-go/pkg/kgo" +) + +var encoderPool = sync.Pool{ + New: func() any { + return &tempopb.PushBytesRequest{} + }, +} + +func encoderPoolGet() *tempopb.PushBytesRequest { + x := encoderPool.Get() + if x != nil { + return x.(*tempopb.PushBytesRequest) + } + + return &tempopb.PushBytesRequest{ + Traces: make([]tempopb.PreallocBytes, 0, 10), + Ids: make([][]byte, 0, 10), + } +} + +func encoderPoolPut(req *tempopb.PushBytesRequest) { + req.Traces = req.Traces[:0] + req.Ids = req.Ids[:0] + encoderPool.Put(req) +} + +func Encode(partitionID int32, tenantID string, req *tempopb.PushBytesRequest, maxSize int) ([]*kgo.Record, error) { + reqSize := req.Size() + + // Fast path for small requests + if reqSize <= maxSize { + rec, err := marshalWriteRequestToRecord(partitionID, tenantID, req) + if err != nil { + return nil, err + } + return []*kgo.Record{rec}, nil + } + + var records []*kgo.Record + batch := encoderPoolGet() + defer encoderPoolPut(batch) + + currentSize := 0 + + for i, entry := range req.Traces { + l := entry.Size() + len(req.Ids[i]) + // Size of the entry in the req + entrySize := 1 + l + sovPush(uint64(l)) + + // Check if a single entry is too big + if entrySize > maxSize || (i == 0 && currentSize+entrySize > maxSize) { + return nil, fmt.Errorf("single entry size (%d) exceeds maximum allowed size (%d)", entrySize, maxSize) + } + + if currentSize+entrySize > maxSize { + // Current req is full, create a record and start a new req + if len(batch.Traces) > 0 { + rec, err := marshalWriteRequestToRecord(partitionID, tenantID, batch) + if err != nil { + return nil, err + } + records = append(records, rec) + } + // Reset currentStream + batch.Traces = batch.Traces[:0] + batch.Ids = batch.Ids[:0] + currentSize = 0 + } + batch.Traces = append(batch.Traces, entry) + batch.Ids = append(batch.Ids, req.Ids[i]) + currentSize += entrySize + } + + // Handle any remaining entries + if len(batch.Traces) > 0 { + rec, err := marshalWriteRequestToRecord(partitionID, tenantID, batch) + if err != nil { + return nil, err + } + records = append(records, rec) + } + + if len(records) == 0 { + return nil, errors.New("no valid records created") + } + + return records, nil +} + +// marshalWriteRequestToRecord converts a PushBytesRequest to a Kafka record. +func marshalWriteRequestToRecord(partitionID int32, tenantID string, req *tempopb.PushBytesRequest) (*kgo.Record, error) { + data, err := req.Marshal() + if err != nil { + return nil, fmt.Errorf("failed to marshal record: %w", err) + } + + return &kgo.Record{ + Key: []byte(tenantID), + Value: data, + Partition: partitionID, + }, nil +} + +// Decoder is responsible for decoding Kafka record data back into logproto.Stream format. +// It caches parsed labels for efficiency. +type Decoder struct { + req *tempopb.PushBytesRequest +} + +func NewDecoder() *Decoder { + return &Decoder{ + req: &tempopb.PushBytesRequest{}, // TODO - Pool? + } +} + +// Decode converts a Kafka record's byte data back into a tempopb.Trace. +func (d *Decoder) Decode(data []byte) (*tempopb.PushBytesRequest, error) { + err := d.req.Unmarshal(data) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal record: %w", err) + } + return d.req, nil +} + +func (d *Decoder) Reset() { + // Retain slice capacity + d.req.Ids = d.req.Ids[:0] + d.req.Traces = d.req.Traces[:0] +} + +// sovPush calculates the size of varint-encoded uint64. +// It is used to determine the number of bytes needed to encode an uint64 value +// in Protocol Buffers' variable-length integer format. +func sovPush(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} diff --git a/pkg/ingest/encoding_test.go b/pkg/ingest/encoding_test.go new file mode 100644 index 00000000000..9933a464acb --- /dev/null +++ b/pkg/ingest/encoding_test.go @@ -0,0 +1,137 @@ +package ingest + +import ( + "math/rand" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/grafana/tempo/pkg/tempopb" +) + +func TestEncoderDecoder(t *testing.T) { + tests := []struct { + name string + req *tempopb.PushBytesRequest + maxSize int + expectSplit bool + }{ + { + name: "Small trace, no split", + req: generateRequest(10, 100), + maxSize: 1024 * 1024, + expectSplit: false, + }, + { + name: "Large trace, expect split", + req: generateRequest(1000, 1000), + maxSize: 1024 * 10, + expectSplit: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + decoder := NewDecoder() + + records, err := Encode(0, "test-tenant", tt.req, tt.maxSize) + require.NoError(t, err) + + if tt.expectSplit { + require.Greater(t, len(records), 1) + } else { + require.Equal(t, 1, len(records)) + } + + var decodedEntries []tempopb.PreallocBytes + var decodedIDs [][]byte + + for _, record := range records { + decoder.Reset() + req, err := decoder.Decode(record.Value) + require.NoError(t, err) + decodedEntries = append(decodedEntries, req.Traces...) + decodedIDs = append(decodedIDs, req.Ids...) + } + + require.Equal(t, len(tt.req.Traces), len(decodedEntries)) + for i := range tt.req.Traces { + require.Equal(t, tt.req.Traces[i], decodedEntries[i]) + require.Equal(t, tt.req.Ids[i], decodedIDs[i]) + } + }) + } +} + +func TestEncoderSingleEntryTooLarge(t *testing.T) { + stream := generateRequest(1, 1000) + + _, err := Encode(0, "test-tenant", stream, 100) + require.Error(t, err) + require.Contains(t, err.Error(), "single entry size") +} + +func TestDecoderInvalidData(t *testing.T) { + decoder := NewDecoder() + + _, err := decoder.Decode([]byte("invalid data")) + require.Error(t, err) +} + +func TestEncoderDecoderEmptyStream(t *testing.T) { + decoder := NewDecoder() + + req := &tempopb.PushBytesRequest{} + + records, err := Encode(0, "test-tenant", req, 10<<20) + require.NoError(t, err) + require.Len(t, records, 1) + + decodedReq, err := decoder.Decode(records[0].Value) + require.NoError(t, err) + require.Equal(t, req.Traces, decodedReq.Traces) +} + +func BenchmarkEncodeDecode(b *testing.B) { + decoder := NewDecoder() + stream := generateRequest(1000, 200) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + records, err := Encode(0, "test-tenant", stream, 10<<20) + if err != nil { + b.Fatal(err) + } + for _, record := range records { + _, err := decoder.Decode(record.Value) + if err != nil { + b.Fatal(err) + } + } + } +} + +// Helper function to generate a test trace +func generateRequest(entries, lineLength int) *tempopb.PushBytesRequest { + stream := &tempopb.PushBytesRequest{ + Traces: make([]tempopb.PreallocBytes, entries), + Ids: make([][]byte, entries), + } + + for i := 0; i < entries; i++ { + stream.Traces[i].Slice = generateRandomString(lineLength) + stream.Ids[i] = generateRandomString(lineLength) + } + + return stream +} + +// Helper function to generate a random string +func generateRandomString(length int) []byte { + const charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" + b := make([]byte, length) + for i := range b { + b[i] = charset[rand.Intn(len(charset))] + } + return b +} diff --git a/pkg/ingest/logger.go b/pkg/ingest/logger.go new file mode 100644 index 00000000000..61fced6e338 --- /dev/null +++ b/pkg/ingest/logger.go @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package ingest + +import ( + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/twmb/franz-go/pkg/kgo" +) + +type logger struct { + logger log.Logger +} + +func newLogger(l log.Logger) *logger { + return &logger{ + logger: log.With(l, "component", "kafka_client"), + } +} + +func (l *logger) Level() kgo.LogLevel { + // The Kafka client calls Level() to check whether debug level is enabled or not. + // To keep it simple, we always return Info, so the Kafka client will never try + // to log expensive debug messages. + return kgo.LogLevelInfo +} + +func (l *logger) Log(lev kgo.LogLevel, msg string, keyvals ...any) { + keyvals = append([]any{"msg", msg}, keyvals...) + switch lev { + case kgo.LogLevelDebug: + level.Debug(l.logger).Log(keyvals...) + case kgo.LogLevelInfo: + level.Info(l.logger).Log(keyvals...) + case kgo.LogLevelWarn: + level.Warn(l.logger).Log(keyvals...) + case kgo.LogLevelError: + level.Error(l.logger).Log(keyvals...) + case kgo.LogLevelNone: + // Do nothing. + } +} diff --git a/pkg/ingest/metrics.go b/pkg/ingest/metrics.go new file mode 100644 index 00000000000..72ba578cd1e --- /dev/null +++ b/pkg/ingest/metrics.go @@ -0,0 +1,89 @@ +package ingest + +import ( + "context" + "errors" + "fmt" + "strconv" + "time" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/twmb/franz-go/pkg/kadm" + "github.com/twmb/franz-go/pkg/kerr" +) + +var metricPartitionLag = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "tempo", + Subsystem: "ingest", + Name: "group_partition_lag", + Help: "Lag of a partition.", +}, []string{"group", "partition"}) + +// TODO - Simplify signature to create client instead? +func ExportPartitionLagMetrics(ctx context.Context, admClient *kadm.Client, log log.Logger, cfg Config, getAssignedActivePartitions func() []int32) { + go func() { + var ( + waitTime = time.Second * 15 + topic = cfg.Kafka.Topic + group = cfg.Kafka.ConsumerGroup + ) + + for { + select { + case <-time.After(waitTime): + lag, err := getGroupLag(ctx, admClient, topic, group) + if err != nil { + level.Error(log).Log("msg", "metric lag failed:", "err", err) + continue + } + for _, p := range getAssignedActivePartitions() { + l, ok := lag.Lookup(topic, p) + if ok { + metricPartitionLag.WithLabelValues(group, strconv.Itoa(int(p))).Set(float64(l.Lag)) + } + } + case <-ctx.Done(): + return + } + } + }() +} + +// getGroupLag is similar to `kadm.Client.Lag` but works when the group doesn't have live participants. +// Similar to `kadm.CalculateGroupLagWithStartOffsets`, it takes into account that the group may not have any commits. +// +// The lag is the difference between the last produced offset (high watermark) and an offset in the "past". +// If the block builder committed an offset for a given partition to the consumer group at least once, then +// the lag is the difference between the last produced offset and the offset committed in the consumer group. +// Otherwise, if the block builder didn't commit an offset for a given partition yet (e.g. block builder is +// running for the first time), then the lag is the difference between the last produced offset and fallbackOffsetMillis. +func getGroupLag(ctx context.Context, admClient *kadm.Client, topic, group string) (kadm.GroupLag, error) { + offsets, err := admClient.FetchOffsets(ctx, group) + if err != nil { + if !errors.Is(err, kerr.GroupIDNotFound) { + return nil, fmt.Errorf("fetch offsets: %w", err) + } + } + if err := offsets.Error(); err != nil { + return nil, fmt.Errorf("fetch offsets got error in response: %w", err) + } + + startOffsets, err := admClient.ListStartOffsets(ctx, topic) + if err != nil { + return nil, err + } + endOffsets, err := admClient.ListEndOffsets(ctx, topic) + if err != nil { + return nil, err + } + + descrGroup := kadm.DescribedGroup{ + // "Empty" is the state that indicates that the group doesn't have active consumer members; this is always the case for block-builder, + // because we don't use group consumption. + State: "Empty", + } + return kadm.CalculateGroupLagWithStartOffsets(descrGroup, offsets, startOffsets, endOffsets), nil +} diff --git a/pkg/ingest/reader_client.go b/pkg/ingest/reader_client.go new file mode 100644 index 00000000000..7467a9542ca --- /dev/null +++ b/pkg/ingest/reader_client.go @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package ingest + +import ( + "time" + + "github.com/go-kit/log" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/twmb/franz-go/pkg/kgo" + "github.com/twmb/franz-go/plugin/kprom" +) + +// NewReaderClient returns the kgo.Client that should be used by the Reader. +func NewReaderClient(kafkaCfg KafkaConfig, metrics *kprom.Metrics, logger log.Logger, opts ...kgo.Opt) (*kgo.Client, error) { + const fetchMaxBytes = 100_000_000 + + opts = append(opts, commonKafkaClientOptions(kafkaCfg, metrics, logger)...) + opts = append(opts, + kgo.FetchMinBytes(1), + kgo.FetchMaxBytes(fetchMaxBytes), + kgo.FetchMaxWait(5*time.Second), + kgo.FetchMaxPartitionBytes(50_000_000), + + // BrokerMaxReadBytes sets the maximum response size that can be read from + // Kafka. This is a safety measure to avoid OOMing on invalid responses. + // franz-go recommendation is to set it 2x FetchMaxBytes. + kgo.BrokerMaxReadBytes(2*fetchMaxBytes), + ) + client, err := kgo.NewClient(opts...) + if err != nil { + return nil, errors.Wrap(err, "creating kafka client") + } + if kafkaCfg.AutoCreateTopicEnabled { + kafkaCfg.SetDefaultNumberOfPartitionsForAutocreatedTopics(logger) + } + return client, nil +} + +func NewReaderClientMetrics(component string, reg prometheus.Registerer) *kprom.Metrics { + return kprom.NewMetrics("tempo_ingest_storage_reader", + kprom.Registerer(prometheus.WrapRegistererWith(prometheus.Labels{"component": component}, reg)), + // Do not export the client ID, because we use it to specify options to the backend. + kprom.FetchAndProduceDetail(kprom.Batches, kprom.Records, kprom.CompressedBytes, kprom.UncompressedBytes)) +} diff --git a/pkg/ingest/testkafka/cluster.go b/pkg/ingest/testkafka/cluster.go new file mode 100644 index 00000000000..b13a1ed1d2d --- /dev/null +++ b/pkg/ingest/testkafka/cluster.go @@ -0,0 +1,169 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package testkafka + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/twmb/franz-go/pkg/kfake" + "github.com/twmb/franz-go/pkg/kmsg" +) + +type controlFn func(kmsg.Request) (kmsg.Response, error, bool) + +type Cluster struct { + t testing.TB + fake *kfake.Cluster + topic string + numPartitions int + committedOffsets map[string][]int64 + controlFuncs map[kmsg.Key]controlFn +} + +// CreateCluster returns a fake Kafka cluster for unit testing. +func CreateCluster(t testing.TB, numPartitions int32, topicName string) (*Cluster, string) { + fake, err := kfake.NewCluster(kfake.NumBrokers(1), kfake.SeedTopics(numPartitions, topicName)) + require.NoError(t, err) + t.Cleanup(fake.Close) + + addrs := fake.ListenAddrs() + require.Len(t, addrs, 1) + + c := &Cluster{ + t: t, + fake: fake, + topic: topicName, + numPartitions: int(numPartitions), + committedOffsets: map[string][]int64{}, + controlFuncs: map[kmsg.Key]controlFn{}, + } + + // Add support for consumer groups + c.fake.ControlKey(kmsg.OffsetCommit.Int16(), c.offsetCommit) + c.fake.ControlKey(kmsg.OffsetFetch.Int16(), c.offsetFetch) + + return c, addrs[0] +} + +func (c *Cluster) ControlKey(key kmsg.Key, fn controlFn) { + switch key { + case kmsg.OffsetCommit: + // These are called by us for deterministic order + c.controlFuncs[key] = fn + default: + // These are passed through + c.fake.ControlKey(int16(key), fn) + } +} + +func (c *Cluster) ensureConsumerGroupExists(consumerGroup string) { + if _, ok := c.committedOffsets[consumerGroup]; ok { + return + } + c.committedOffsets[consumerGroup] = make([]int64, c.numPartitions+1) + + // Initialise the partition offsets with the special value -1 which means "no offset committed". + for i := 0; i < len(c.committedOffsets[consumerGroup]); i++ { + c.committedOffsets[consumerGroup][i] = -1 + } +} + +// nolint: revive +func (c *Cluster) offsetCommit(request kmsg.Request) (kmsg.Response, error, bool) { + c.fake.KeepControl() + + if fn := c.controlFuncs[kmsg.OffsetCommit]; fn != nil { + res, err, handled := fn(request) + if handled { + return res, err, handled + } + } + + commitR := request.(*kmsg.OffsetCommitRequest) + consumerGroup := commitR.Group + c.ensureConsumerGroupExists(consumerGroup) + require.Len(c.t, commitR.Topics, 1, "test only has support for one topic per request") + topic := commitR.Topics[0] + require.Equal(c.t, c.topic, topic.Topic) + require.Len(c.t, topic.Partitions, 1, "test only has support for one partition per request") + + partitionID := topic.Partitions[0].Partition + c.committedOffsets[consumerGroup][partitionID] = topic.Partitions[0].Offset + + resp := request.ResponseKind().(*kmsg.OffsetCommitResponse) + resp.Default() + resp.Topics = []kmsg.OffsetCommitResponseTopic{ + { + Topic: c.topic, + Partitions: []kmsg.OffsetCommitResponseTopicPartition{{Partition: partitionID}}, + }, + } + + return resp, nil, true +} + +// nolint: revive +func (c *Cluster) offsetFetch(kreq kmsg.Request) (kmsg.Response, error, bool) { + c.fake.KeepControl() + req := kreq.(*kmsg.OffsetFetchRequest) + require.Len(c.t, req.Groups, 1, "test only has support for one consumer group per request") + consumerGroup := req.Groups[0].Group + c.ensureConsumerGroupExists(consumerGroup) + + const allPartitions = -1 + var partitionID int32 + + if len(req.Groups[0].Topics) == 0 { + // An empty request means fetch all topic-partitions for this group. + partitionID = allPartitions + } else { + partitionID = req.Groups[0].Topics[0].Partitions[0] + assert.Len(c.t, req.Groups[0].Topics, 1, "test only has support for one partition per request") + assert.Len(c.t, req.Groups[0].Topics[0].Partitions, 1, "test only has support for one partition per request") + } + + // Prepare the list of partitions for which the offset has been committed. + // This mimics the real Kafka behaviour. + var partitionsResp []kmsg.OffsetFetchResponseGroupTopicPartition + if partitionID == allPartitions { + for i := 0; i < c.numPartitions; i++ { + if c.committedOffsets[consumerGroup][i] >= 0 { + partitionsResp = append(partitionsResp, kmsg.OffsetFetchResponseGroupTopicPartition{ + Partition: int32(i), + Offset: c.committedOffsets[consumerGroup][i], + }) + } + } + } else { + if c.committedOffsets[consumerGroup][partitionID] >= 0 { + partitionsResp = append(partitionsResp, kmsg.OffsetFetchResponseGroupTopicPartition{ + Partition: partitionID, + Offset: c.committedOffsets[consumerGroup][partitionID], + }) + } + } + + // Prepare the list topics for which there are some committed offsets. + // This mimics the real Kafka behaviour. + var topicsResp []kmsg.OffsetFetchResponseGroupTopic + if len(partitionsResp) > 0 { + topicsResp = []kmsg.OffsetFetchResponseGroupTopic{ + { + Topic: c.topic, + Partitions: partitionsResp, + }, + } + } + + resp := kreq.ResponseKind().(*kmsg.OffsetFetchResponse) + resp.Default() + resp.Groups = []kmsg.OffsetFetchResponseGroup{ + { + Group: consumerGroup, + Topics: topicsResp, + }, + } + return resp, nil, true +} diff --git a/pkg/ingest/testkafka/message.go b/pkg/ingest/testkafka/message.go new file mode 100644 index 00000000000..cb7fd50a3f5 --- /dev/null +++ b/pkg/ingest/testkafka/message.go @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package testkafka + +import ( + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" +) + +// CreateProduceResponseError returns a kmsg.ProduceResponse containing err for the input topic and partition. +func CreateProduceResponseError(version int16, topic string, partition int32, err *kerr.Error) *kmsg.ProduceResponse { + return &kmsg.ProduceResponse{ + Version: version, + Topics: []kmsg.ProduceResponseTopic{ + { + Topic: topic, + Partitions: []kmsg.ProduceResponseTopicPartition{ + { + Partition: partition, + ErrorCode: err.Code, + }, + }, + }, + }, + } +} diff --git a/pkg/ingest/util.go b/pkg/ingest/util.go new file mode 100644 index 00000000000..d2bbac77ada --- /dev/null +++ b/pkg/ingest/util.go @@ -0,0 +1,26 @@ +package ingest + +import ( + "fmt" + "regexp" + "strconv" +) + +// Regular expression used to parse the ingester numeric ID. +var ingesterIDRegexp = regexp.MustCompile("-([0-9]+)$") + +// IngesterPartitionID returns the partition ID owner the the given ingester. +func IngesterPartitionID(ingesterID string) (int32, error) { + match := ingesterIDRegexp.FindStringSubmatch(ingesterID) + if len(match) == 0 { + return 0, fmt.Errorf("ingester ID %s doesn't match regular expression %q", ingesterID, ingesterIDRegexp.String()) + } + + // Parse the ingester sequence number. + ingesterSeq, err := strconv.Atoi(match[1]) + if err != nil { + return 0, fmt.Errorf("no ingester sequence number in ingester ID %s", ingesterID) + } + + return int32(ingesterSeq), nil +} diff --git a/pkg/ingest/writer_client.go b/pkg/ingest/writer_client.go new file mode 100644 index 00000000000..4f4769e0041 --- /dev/null +++ b/pkg/ingest/writer_client.go @@ -0,0 +1,343 @@ +package ingest + +import ( + "context" + "errors" + "math" + "sync" + "time" + + "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kgo" + "github.com/twmb/franz-go/pkg/kmsg" + "github.com/twmb/franz-go/pkg/sasl/plain" + "github.com/twmb/franz-go/plugin/kotel" + "github.com/twmb/franz-go/plugin/kprom" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" + "go.uber.org/atomic" +) + +// NewWriterClient returns the kgo.Client that should be used by the Writer. +// +// The input prometheus.Registerer must be wrapped with a prefix (the names of metrics +// registered don't have a prefix). +func NewWriterClient(kafkaCfg KafkaConfig, maxInflightProduceRequests int, logger log.Logger, reg prometheus.Registerer) (*kgo.Client, error) { + // Do not export the client ID, because we use it to specify options to the backend. + metrics := kprom.NewMetrics( + "", // No prefix. We expect the input prometheus.Registered to be wrapped with a prefix. + kprom.Registerer(reg), + kprom.FetchAndProduceDetail(kprom.Batches, kprom.Records, kprom.CompressedBytes, kprom.UncompressedBytes)) + + opts := append( + commonKafkaClientOptions(kafkaCfg, metrics, logger), + kgo.RequiredAcks(kgo.AllISRAcks()), + kgo.DefaultProduceTopic(kafkaCfg.Topic), + + // We set the partition field in each record. + kgo.RecordPartitioner(kgo.ManualPartitioner()), + + // Set the upper bounds the size of a record batch. + kgo.ProducerBatchMaxBytes(producerBatchMaxBytes), + + // By default, the Kafka client allows 1 Produce in-flight request per broker. Disabling write idempotency + // (which we don't need), we can increase the max number of in-flight Produce requests per broker. A higher + // number of in-flight requests, in addition to short buffering ("linger") in client side before firing the + // next Produce request allows us to reduce the end-to-end latency. + // + // The result of the multiplication of producer linger and max in-flight requests should match the maximum + // Produce latency expected by the Kafka backend in a steady state. For example, 50ms * 20 requests = 1s, + // which means the Kafka client will keep issuing a Produce request every 50ms as far as the Kafka backend + // doesn't take longer than 1s to process them (if it takes longer, the client will buffer data and stop + // issuing new Produce requests until some previous ones complete). + kgo.DisableIdempotentWrite(), + kgo.ProducerLinger(50*time.Millisecond), + kgo.MaxProduceRequestsInflightPerBroker(maxInflightProduceRequests), + + // Unlimited number of Produce retries but a deadline on the max time a record can take to be delivered. + // With the default config it would retry infinitely. + // + // Details of the involved timeouts: + // - RecordDeliveryTimeout: how long a Kafka client Produce() call can take for a given record. The overhead + // timeout is NOT applied. + // - ProduceRequestTimeout: how long to wait for the response to the Produce request (the Kafka protocol message) + // after being sent on the network. The actual timeout is increased by the configured overhead. + // + // When a Produce request to Kafka fail, the client will retry up until the RecordDeliveryTimeout is reached. + // Once the timeout is reached, the Produce request will fail and all other buffered requests in the client + // (for the same partition) will fail too. See kgo.RecordDeliveryTimeout() documentation for more info. + kgo.RecordRetries(math.MaxInt), + kgo.RecordDeliveryTimeout(kafkaCfg.WriteTimeout), + kgo.ProduceRequestTimeout(kafkaCfg.WriteTimeout), + kgo.RequestTimeoutOverhead(writerRequestTimeoutOverhead), + + // Unlimited number of buffered records because we limit on bytes in Writer. The reason why we don't use + // kgo.MaxBufferedBytes() is because it suffers a deadlock issue: + // https://github.com/twmb/franz-go/issues/777 + kgo.MaxBufferedRecords(math.MaxInt), // Use a high value to set it as unlimited, because the client doesn't support "0 as unlimited". + kgo.MaxBufferedBytes(0), + ) + if kafkaCfg.AutoCreateTopicEnabled { + kafkaCfg.SetDefaultNumberOfPartitionsForAutocreatedTopics(logger) + } + return kgo.NewClient(opts...) +} + +type onlySampledTraces struct { + propagation.TextMapPropagator +} + +func (o onlySampledTraces) Inject(ctx context.Context, carrier propagation.TextMapCarrier) { + sc := trace.SpanContextFromContext(ctx) + if !sc.IsSampled() { + return + } + o.TextMapPropagator.Inject(ctx, carrier) +} + +func commonKafkaClientOptions(cfg KafkaConfig, metrics *kprom.Metrics, logger log.Logger) []kgo.Opt { + opts := []kgo.Opt{ + kgo.ClientID(cfg.ClientID), + kgo.SeedBrokers(cfg.Address), + kgo.DialTimeout(cfg.DialTimeout), + + // A cluster metadata update is a request sent to a broker and getting back the map of partitions and + // the leader broker for each partition. The cluster metadata can be updated (a) periodically or + // (b) when some events occur (e.g. backoff due to errors). + // + // MetadataMinAge() sets the minimum time between two cluster metadata updates due to events. + // MetadataMaxAge() sets how frequently the periodic update should occur. + // + // It's important to note that the periodic update is also used to discover new brokers (e.g. during a + // rolling update or after a scale up). For this reason, it's important to run the update frequently. + // + // The other two side effects of frequently updating the cluster metadata: + // 1. The "metadata" request may be expensive to run on the Kafka backend. + // 2. If the backend returns each time a different authoritative owner for a partition, then each time + // the cluster metadata is updated the Kafka client will create a new connection for each partition, + // leading to a high connections churn rate. + // + // We currently set min and max age to the same value to have constant load on the Kafka backend: regardless + // there are errors or not, the metadata requests frequency doesn't change. + kgo.MetadataMinAge(10 * time.Second), + kgo.MetadataMaxAge(10 * time.Second), + + kgo.WithLogger(newLogger(logger)), + + kgo.RetryTimeoutFn(func(key int16) time.Duration { + switch key { + case ((*kmsg.ListOffsetsRequest)(nil)).Key(): + return cfg.LastProducedOffsetRetryTimeout + } + + // 30s is the default timeout in the Kafka client. + return 30 * time.Second + }), + } + + if cfg.AutoCreateTopicEnabled { + opts = append(opts, kgo.AllowAutoTopicCreation()) + } + + // SASL plain auth. + if cfg.SASLUsername != "" && cfg.SASLPassword.String() != "" { + opts = append(opts, kgo.SASL(plain.Plain(func(_ context.Context) (plain.Auth, error) { + return plain.Auth{ + User: cfg.SASLUsername, + Pass: cfg.SASLPassword.String(), + }, nil + }))) + } + + tracer := kotel.NewTracer( + kotel.TracerPropagator(propagation.NewCompositeTextMapPropagator(onlySampledTraces{propagation.TraceContext{}})), + ) + opts = append(opts, kgo.WithHooks(kotel.NewKotel(kotel.WithTracer(tracer)).Hooks()...)) + + if metrics != nil { + opts = append(opts, kgo.WithHooks(metrics)) + } + + return opts +} + +// Producer is a kgo.Client wrapper exposing some higher level features and metrics useful for producers. +type Producer struct { + *kgo.Client + + closeOnce *sync.Once + closed chan struct{} + + // Keep track of Kafka records size (bytes) currently in-flight in the Kafka client. + // This counter is used to implement a limit on the max buffered bytes. + bufferedBytes *atomic.Int64 + + // The max buffered bytes allowed. Once this limit is reached, produce requests fail. + maxBufferedBytes int64 + + // Custom metrics. + bufferedProduceBytes prometheus.Summary + bufferedProduceBytesLimit prometheus.Gauge + produceRequestsTotal prometheus.Counter + produceFailuresTotal *prometheus.CounterVec +} + +// NewProducer returns a new KafkaProducer. +// +// The input prometheus.Registerer must be wrapped with a prefix (the names of metrics +// registered don't have a prefix). +func NewProducer(client *kgo.Client, maxBufferedBytes int64, reg prometheus.Registerer) *Producer { + producer := &Producer{ + Client: client, + closeOnce: &sync.Once{}, + closed: make(chan struct{}), + bufferedBytes: atomic.NewInt64(0), + maxBufferedBytes: maxBufferedBytes, + + // Metrics. + bufferedProduceBytes: promauto.With(reg).NewSummary( + prometheus.SummaryOpts{ + Namespace: "tempo", + Subsystem: "distributor", + Name: "buffered_produce_bytes", + Help: "The buffered produce records in bytes. Quantile buckets keep track of buffered records size over the last 60s.", + Objectives: map[float64]float64{0.5: 0.05, 0.99: 0.001, 1: 0.001}, + MaxAge: time.Minute, + AgeBuckets: 6, + }), + bufferedProduceBytesLimit: promauto.With(reg).NewGauge( + prometheus.GaugeOpts{ + Namespace: "tempo", + Subsystem: "distributor", + Name: "buffered_produce_bytes_limit", + Help: "The bytes limit on buffered produce records. Produce requests fail once this limit is reached.", + }), + produceRequestsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Namespace: "tempo", + Subsystem: "distributor", + Name: "produce_requests_total", + Help: "Total number of produce requests issued to Kafka.", + }), + produceFailuresTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ + Namespace: "tempo", + Subsystem: "distributor", + Name: "produce_failures_total", + Help: "Total number of failed produce requests issued to Kafka.", + }, []string{"reason"}), + } + + producer.bufferedProduceBytesLimit.Set(float64(maxBufferedBytes)) + + go producer.updateMetricsLoop() + + return producer +} + +func (c *Producer) Close() { + c.closeOnce.Do(func() { + close(c.closed) + }) + + c.Client.Close() +} + +func (c *Producer) updateMetricsLoop() { + // We observe buffered produce bytes and at regular intervals, to have a good + // approximation of the peak value reached over the observation period. + ticker := time.NewTicker(250 * time.Millisecond) + + for { + select { + case <-ticker.C: + c.bufferedProduceBytes.Observe(float64(c.Client.BufferedProduceBytes())) + + case <-c.closed: + return + } + } +} + +// ProduceSync produces records to Kafka and returns once all records have been successfully committed, +// or an error occurred. +// +// This function honors the configure max buffered bytes and refuse to produce a record, returnin kgo.ErrMaxBuffered, +// if the configured limit is reached. +func (c *Producer) ProduceSync(ctx context.Context, records []*kgo.Record) kgo.ProduceResults { + var ( + remaining = atomic.NewInt64(int64(len(records))) + done = make(chan struct{}) + resMx sync.Mutex + res = make(kgo.ProduceResults, 0, len(records)) + ) + + c.produceRequestsTotal.Add(float64(len(records))) + + onProduceDone := func(r *kgo.Record, err error) { + if c.maxBufferedBytes > 0 { + c.bufferedBytes.Add(-int64(len(r.Value))) + } + + resMx.Lock() + res = append(res, kgo.ProduceResult{Record: r, Err: err}) + resMx.Unlock() + + if err != nil { + c.produceFailuresTotal.WithLabelValues(produceErrReason(err)).Inc() + } + + // In case of error we'll wait for all responses anyway before returning from produceSync(). + // It allows us to keep code easier, given we don't expect this function to be frequently + // called with multiple records. + if remaining.Dec() == 0 { + close(done) + } + } + + for _, record := range records { + // Fast fail if the Kafka client buffer is full. Buffered bytes counter is decreased onProducerDone(). + if c.maxBufferedBytes > 0 && c.bufferedBytes.Add(int64(len(record.Value))) > c.maxBufferedBytes { + onProduceDone(record, kgo.ErrMaxBuffered) + continue + } + + // We use a new context to avoid that other Produce() may be cancelled when this call's context is + // canceled. It's important to note that cancelling the context passed to Produce() doesn't actually + // prevent the data to be sent over the wire (because it's never removed from the buffer) but in some + // cases may cause all requests to fail with context cancelled. + // + // Produce() may theoretically block if the buffer is full, but we configure the Kafka client with + // unlimited buffer because we implement the buffer limit ourselves (see maxBufferedBytes). This means + // Produce() should never block for us in practice. + c.Client.Produce(context.WithoutCancel(ctx), record, onProduceDone) + } + + // Wait for a response or until the context has done. + select { + case <-ctx.Done(): + return kgo.ProduceResults{{Err: context.Cause(ctx)}} + case <-done: + // Once we're done, it's guaranteed that no more results will be appended, so we can safely return it. + return res + } +} + +func produceErrReason(err error) string { + if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, kgo.ErrRecordTimeout) { + return "timeout" + } + if errors.Is(err, kgo.ErrMaxBuffered) { + return "buffer-full" + } + if errors.Is(err, kerr.MessageTooLarge) { + return "record-too-large" + } + if errors.Is(err, context.Canceled) { + // This should never happen because we don't cancel produce requests, however we + // check this error anyway to detect if something unexpected happened. + return "canceled" + } + return "other" +} diff --git a/pkg/livetraces/livetraces.go b/pkg/livetraces/livetraces.go new file mode 100644 index 00000000000..425f70d6285 --- /dev/null +++ b/pkg/livetraces/livetraces.go @@ -0,0 +1,96 @@ +package livetraces + +import ( + "hash" + "hash/fnv" + "time" + + v1 "github.com/grafana/tempo/pkg/tempopb/trace/v1" +) + +type LiveTraceBatchT interface { + *v1.ResourceSpans | []byte +} + +type LiveTrace[T LiveTraceBatchT] struct { + ID []byte + timestamp time.Time + Batches []T + + sz uint64 +} + +type LiveTraces[T LiveTraceBatchT] struct { + hash hash.Hash64 + Traces map[uint64]*LiveTrace[T] + + sz uint64 + szFunc func(T) uint64 +} + +func New[T LiveTraceBatchT](sizeFunc func(T) uint64) *LiveTraces[T] { + return &LiveTraces[T]{ + hash: fnv.New64(), + Traces: make(map[uint64]*LiveTrace[T]), + szFunc: sizeFunc, + } +} + +func (l *LiveTraces[T]) token(traceID []byte) uint64 { + l.hash.Reset() + l.hash.Write(traceID) + return l.hash.Sum64() +} + +func (l *LiveTraces[T]) Len() uint64 { + return uint64(len(l.Traces)) +} + +func (l *LiveTraces[T]) Size() uint64 { + return l.sz +} + +func (l *LiveTraces[T]) Push(traceID []byte, batch T, max uint64) bool { + return l.PushWithTimestamp(time.Now(), traceID, batch, max) +} + +func (l *LiveTraces[T]) PushWithTimestamp(ts time.Time, traceID []byte, batch T, max uint64) bool { + token := l.token(traceID) + + tr := l.Traces[token] + if tr == nil { + + // Before adding this check against max + // Zero means no limit + if max > 0 && uint64(len(l.Traces)) >= max { + return false + } + + tr = &LiveTrace[T]{ + ID: traceID, + } + l.Traces[token] = tr + } + + sz := l.szFunc(batch) + tr.sz += sz + l.sz += sz + + tr.Batches = append(tr.Batches, batch) + tr.timestamp = ts + return true +} + +func (l *LiveTraces[T]) CutIdle(idleSince time.Time, immediate bool) []*LiveTrace[T] { + res := []*LiveTrace[T]{} + + for k, tr := range l.Traces { + if tr.timestamp.Before(idleSince) || immediate { + res = append(res, tr) + l.sz -= tr.sz + delete(l.Traces, k) + } + } + + return res +} diff --git a/modules/generator/processor/localblocks/livetraces_test.go b/pkg/livetraces/livetraces_test.go similarity index 84% rename from modules/generator/processor/localblocks/livetraces_test.go rename to pkg/livetraces/livetraces_test.go index 60a1f3be12a..8a4f3d77664 100644 --- a/modules/generator/processor/localblocks/livetraces_test.go +++ b/pkg/livetraces/livetraces_test.go @@ -1,16 +1,17 @@ -package localblocks +package livetraces import ( "math/rand/v2" "testing" "time" + v1 "github.com/grafana/tempo/pkg/tempopb/trace/v1" "github.com/grafana/tempo/pkg/util/test" "github.com/stretchr/testify/require" ) func TestLiveTracesSizesAndLen(t *testing.T) { - lt := newLiveTraces() + lt := New[*v1.ResourceSpans](func(rs *v1.ResourceSpans) uint64 { return uint64(rs.Size()) }) expectedSz := uint64(0) expectedLen := uint64(0) diff --git a/pkg/tempopb/tempo.pb.go b/pkg/tempopb/tempo.pb.go index 427add0ea36..de45413874f 100644 --- a/pkg/tempopb/tempo.pb.go +++ b/pkg/tempopb/tempo.pb.go @@ -1045,10 +1045,12 @@ func (m *SearchMetrics) GetInspectedSpans() uint64 { } type SearchTagsRequest struct { - Scope string `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope,omitempty"` - Query string `protobuf:"bytes,2,opt,name=query,proto3" json:"query,omitempty"` - Start uint32 `protobuf:"varint,3,opt,name=start,proto3" json:"start,omitempty"` - End uint32 `protobuf:"varint,4,opt,name=end,proto3" json:"end,omitempty"` + Scope string `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope,omitempty"` + Query string `protobuf:"bytes,2,opt,name=query,proto3" json:"query,omitempty"` + Start uint32 `protobuf:"varint,3,opt,name=start,proto3" json:"start,omitempty"` + End uint32 `protobuf:"varint,4,opt,name=end,proto3" json:"end,omitempty"` + MaxTagsPerScope uint32 `protobuf:"varint,5,opt,name=maxTagsPerScope,proto3" json:"maxTagsPerScope,omitempty"` + StaleValuesThreshold uint32 `protobuf:"varint,6,opt,name=staleValuesThreshold,proto3" json:"staleValuesThreshold,omitempty"` } func (m *SearchTagsRequest) Reset() { *m = SearchTagsRequest{} } @@ -1112,21 +1114,37 @@ func (m *SearchTagsRequest) GetEnd() uint32 { return 0 } +func (m *SearchTagsRequest) GetMaxTagsPerScope() uint32 { + if m != nil { + return m.MaxTagsPerScope + } + return 0 +} + +func (m *SearchTagsRequest) GetStaleValuesThreshold() uint32 { + if m != nil { + return m.StaleValuesThreshold + } + return 0 +} + // SearchTagsBlockRequest takes SearchTagsRequest parameters as well as all information necessary // to search a block in the backend. type SearchTagsBlockRequest struct { - SearchReq *SearchTagsRequest `protobuf:"bytes,1,opt,name=searchReq,proto3" json:"searchReq,omitempty"` - BlockID string `protobuf:"bytes,2,opt,name=blockID,proto3" json:"blockID,omitempty"` - StartPage uint32 `protobuf:"varint,3,opt,name=startPage,proto3" json:"startPage,omitempty"` - PagesToSearch uint32 `protobuf:"varint,4,opt,name=pagesToSearch,proto3" json:"pagesToSearch,omitempty"` - Encoding string `protobuf:"bytes,5,opt,name=encoding,proto3" json:"encoding,omitempty"` - IndexPageSize uint32 `protobuf:"varint,6,opt,name=indexPageSize,proto3" json:"indexPageSize,omitempty"` - TotalRecords uint32 `protobuf:"varint,7,opt,name=totalRecords,proto3" json:"totalRecords,omitempty"` - DataEncoding string `protobuf:"bytes,8,opt,name=dataEncoding,proto3" json:"dataEncoding,omitempty"` - Version string `protobuf:"bytes,9,opt,name=version,proto3" json:"version,omitempty"` - Size_ uint64 `protobuf:"varint,10,opt,name=size,proto3" json:"size,omitempty"` - FooterSize uint32 `protobuf:"varint,11,opt,name=footerSize,proto3" json:"footerSize,omitempty"` - DedicatedColumns []*DedicatedColumn `protobuf:"bytes,12,rep,name=dedicatedColumns,proto3" json:"dedicatedColumns,omitempty"` + SearchReq *SearchTagsRequest `protobuf:"bytes,1,opt,name=searchReq,proto3" json:"searchReq,omitempty"` + BlockID string `protobuf:"bytes,2,opt,name=blockID,proto3" json:"blockID,omitempty"` + StartPage uint32 `protobuf:"varint,3,opt,name=startPage,proto3" json:"startPage,omitempty"` + PagesToSearch uint32 `protobuf:"varint,4,opt,name=pagesToSearch,proto3" json:"pagesToSearch,omitempty"` + Encoding string `protobuf:"bytes,5,opt,name=encoding,proto3" json:"encoding,omitempty"` + IndexPageSize uint32 `protobuf:"varint,6,opt,name=indexPageSize,proto3" json:"indexPageSize,omitempty"` + TotalRecords uint32 `protobuf:"varint,7,opt,name=totalRecords,proto3" json:"totalRecords,omitempty"` + DataEncoding string `protobuf:"bytes,8,opt,name=dataEncoding,proto3" json:"dataEncoding,omitempty"` + Version string `protobuf:"bytes,9,opt,name=version,proto3" json:"version,omitempty"` + Size_ uint64 `protobuf:"varint,10,opt,name=size,proto3" json:"size,omitempty"` + FooterSize uint32 `protobuf:"varint,11,opt,name=footerSize,proto3" json:"footerSize,omitempty"` + DedicatedColumns []*DedicatedColumn `protobuf:"bytes,12,rep,name=dedicatedColumns,proto3" json:"dedicatedColumns,omitempty"` + MaxTagsPerScope uint32 `protobuf:"varint,13,opt,name=maxTagsPerScope,proto3" json:"maxTagsPerScope,omitempty"` + StaleValueThreshold uint32 `protobuf:"varint,14,opt,name=staleValueThreshold,proto3" json:"staleValueThreshold,omitempty"` } func (m *SearchTagsBlockRequest) Reset() { *m = SearchTagsBlockRequest{} } @@ -1246,6 +1264,20 @@ func (m *SearchTagsBlockRequest) GetDedicatedColumns() []*DedicatedColumn { return nil } +func (m *SearchTagsBlockRequest) GetMaxTagsPerScope() uint32 { + if m != nil { + return m.MaxTagsPerScope + } + return 0 +} + +func (m *SearchTagsBlockRequest) GetStaleValueThreshold() uint32 { + if m != nil { + return m.StaleValueThreshold + } + return 0 +} + type SearchTagValuesBlockRequest struct { SearchReq *SearchTagValuesRequest `protobuf:"bytes,1,opt,name=searchReq,proto3" json:"searchReq,omitempty"` BlockID string `protobuf:"bytes,2,opt,name=blockID,proto3" json:"blockID,omitempty"` @@ -1535,10 +1567,12 @@ func (m *SearchTagsV2Scope) GetTags() []string { } type SearchTagValuesRequest struct { - TagName string `protobuf:"bytes,1,opt,name=tagName,proto3" json:"tagName,omitempty"` - Query string `protobuf:"bytes,2,opt,name=query,proto3" json:"query,omitempty"` - Start uint32 `protobuf:"varint,4,opt,name=start,proto3" json:"start,omitempty"` - End uint32 `protobuf:"varint,5,opt,name=end,proto3" json:"end,omitempty"` + TagName string `protobuf:"bytes,1,opt,name=tagName,proto3" json:"tagName,omitempty"` + Query string `protobuf:"bytes,2,opt,name=query,proto3" json:"query,omitempty"` + Start uint32 `protobuf:"varint,4,opt,name=start,proto3" json:"start,omitempty"` + End uint32 `protobuf:"varint,5,opt,name=end,proto3" json:"end,omitempty"` + MaxTagValues uint32 `protobuf:"varint,6,opt,name=maxTagValues,proto3" json:"maxTagValues,omitempty"` + StaleValueThreshold uint32 `protobuf:"varint,7,opt,name=staleValueThreshold,proto3" json:"staleValueThreshold,omitempty"` } func (m *SearchTagValuesRequest) Reset() { *m = SearchTagValuesRequest{} } @@ -1602,6 +1636,20 @@ func (m *SearchTagValuesRequest) GetEnd() uint32 { return 0 } +func (m *SearchTagValuesRequest) GetMaxTagValues() uint32 { + if m != nil { + return m.MaxTagValues + } + return 0 +} + +func (m *SearchTagValuesRequest) GetStaleValueThreshold() uint32 { + if m != nil { + return m.StaleValueThreshold + } + return 0 +} + type SearchTagValuesResponse struct { TagValues []string `protobuf:"bytes,1,rep,name=tagValues,proto3" json:"tagValues,omitempty"` Metrics *MetadataMetrics `protobuf:"bytes,2,opt,name=metrics,proto3" json:"metrics,omitempty"` @@ -3539,186 +3587,191 @@ func init() { func init() { proto.RegisterFile("pkg/tempopb/tempo.proto", fileDescriptor_f22805646f4f62b6) } var fileDescriptor_f22805646f4f62b6 = []byte{ - // 2855 bytes of a gzipped FileDescriptorProto + // 2935 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x3a, 0x4b, 0x6f, 0x23, 0xc7, - 0xd1, 0x1a, 0xf1, 0x5d, 0x24, 0x25, 0xaa, 0x57, 0x96, 0xb9, 0xdc, 0xb5, 0x24, 0x8f, 0x17, 0x86, - 0x3e, 0x3f, 0x28, 0x2d, 0xbd, 0xc6, 0xe7, 0xb5, 0x13, 0x07, 0xd2, 0x8a, 0x59, 0xcb, 0xd6, 0xcb, - 0x4d, 0x5a, 0x36, 0x02, 0x03, 0xc2, 0x88, 0xec, 0xe5, 0x0e, 0x44, 0xce, 0xd0, 0x33, 0x43, 0x79, - 0x99, 0x83, 0x91, 0x04, 0xc8, 0x21, 0x40, 0x0e, 0x39, 0x24, 0x87, 0xfc, 0x82, 0x20, 0xa7, 0x1c, - 0x92, 0x7f, 0x10, 0x20, 0x70, 0x10, 0xc4, 0x30, 0x90, 0x8b, 0x91, 0x83, 0x11, 0xd8, 0x87, 0xe4, - 0x67, 0x04, 0x5d, 0xdd, 0x3d, 0x6f, 0x49, 0x5e, 0xef, 0x1a, 0xf1, 0xc1, 0x27, 0x75, 0xd5, 0x54, - 0x57, 0x57, 0xd7, 0xbb, 0x9a, 0x82, 0x27, 0xc7, 0xa7, 0x83, 0x75, 0x8f, 0x8d, 0xc6, 0xf6, 0xf8, - 0x44, 0xfc, 0x6d, 0x8e, 0x1d, 0xdb, 0xb3, 0x49, 0x41, 0x22, 0x1b, 0x4b, 0x3d, 0x7b, 0x34, 0xb2, - 0xad, 0xf5, 0xb3, 0x9b, 0xeb, 0x62, 0x25, 0x08, 0x1a, 0x2f, 0x0e, 0x4c, 0xef, 0xfe, 0xe4, 0xa4, - 0xd9, 0xb3, 0x47, 0xeb, 0x03, 0x7b, 0x60, 0xaf, 0x23, 0xfa, 0x64, 0x72, 0x0f, 0x21, 0x04, 0x70, - 0x25, 0xc9, 0x17, 0x3d, 0xc7, 0xe8, 0x31, 0xce, 0x05, 0x17, 0x02, 0xab, 0xff, 0x49, 0x83, 0x5a, - 0x97, 0xc3, 0x5b, 0xd3, 0x9d, 0x6d, 0xca, 0x3e, 0x98, 0x30, 0xd7, 0x23, 0x75, 0x28, 0x20, 0xcd, - 0xce, 0x76, 0x5d, 0x5b, 0xd5, 0xd6, 0x2a, 0x54, 0x81, 0x64, 0x19, 0xe0, 0x64, 0x68, 0xf7, 0x4e, - 0x3b, 0x9e, 0xe1, 0x78, 0xf5, 0xd9, 0x55, 0x6d, 0xad, 0x44, 0x43, 0x18, 0xd2, 0x80, 0x22, 0x42, - 0x6d, 0xab, 0x5f, 0xcf, 0xe0, 0x57, 0x1f, 0x26, 0xd7, 0xa1, 0xf4, 0xc1, 0x84, 0x39, 0xd3, 0x3d, - 0xbb, 0xcf, 0xea, 0x39, 0xfc, 0x18, 0x20, 0xc8, 0x0b, 0xb0, 0x60, 0x0c, 0x87, 0xf6, 0x87, 0x87, - 0x86, 0xe3, 0x99, 0xc6, 0x10, 0x65, 0xaa, 0xe7, 0x57, 0xb5, 0xb5, 0x22, 0x4d, 0x7e, 0xd0, 0xff, - 0xa3, 0xc1, 0x42, 0x48, 0x6c, 0x77, 0x6c, 0x5b, 0x2e, 0x23, 0x37, 0x20, 0x87, 0x82, 0xa2, 0xd4, - 0xe5, 0xd6, 0x5c, 0x53, 0xaa, 0xb0, 0x89, 0xa4, 0x54, 0x7c, 0x24, 0x2f, 0x41, 0x61, 0xc4, 0x3c, - 0xc7, 0xec, 0xb9, 0x78, 0x81, 0x72, 0xeb, 0x6a, 0x94, 0x8e, 0xb3, 0xdc, 0x13, 0x04, 0x54, 0x51, - 0x92, 0xdb, 0x90, 0x77, 0x3d, 0xc3, 0x9b, 0xb8, 0x78, 0xad, 0xb9, 0xd6, 0xd3, 0xc9, 0x3d, 0x4a, - 0x8c, 0x66, 0x07, 0x09, 0xa9, 0xdc, 0xc0, 0xb5, 0x39, 0x62, 0xae, 0x6b, 0x0c, 0x58, 0x3d, 0x8b, - 0xb7, 0x56, 0xa0, 0xfe, 0x0c, 0xe4, 0x05, 0x2d, 0xa9, 0x40, 0xf1, 0xce, 0xc1, 0xde, 0xe1, 0x6e, - 0xbb, 0xdb, 0xae, 0xcd, 0x90, 0x32, 0x14, 0x0e, 0x37, 0x69, 0x77, 0x67, 0x73, 0xb7, 0xa6, 0xe9, - 0x24, 0x64, 0x20, 0x29, 0x96, 0xfe, 0xc9, 0x2c, 0x54, 0x3b, 0xcc, 0x70, 0x7a, 0xf7, 0x95, 0xc9, - 0x5e, 0x85, 0x6c, 0xd7, 0x18, 0xb8, 0x75, 0x6d, 0x35, 0xb3, 0x56, 0x6e, 0xad, 0xfa, 0xd2, 0x45, - 0xa8, 0x9a, 0x9c, 0xa4, 0x6d, 0x79, 0xce, 0x74, 0x2b, 0xfb, 0xf1, 0xe7, 0x2b, 0x33, 0x14, 0xf7, - 0x90, 0x1b, 0x50, 0xdd, 0x33, 0xad, 0xed, 0x89, 0x63, 0x78, 0xa6, 0x6d, 0xed, 0x09, 0xb5, 0x54, - 0x69, 0x14, 0x89, 0x54, 0xc6, 0x83, 0x10, 0x55, 0x46, 0x52, 0x85, 0x91, 0x64, 0x11, 0x72, 0xbb, - 0xe6, 0xc8, 0xf4, 0xf0, 0xaa, 0x55, 0x2a, 0x00, 0x8e, 0x75, 0xd1, 0x63, 0x72, 0x02, 0x8b, 0x00, - 0xa9, 0x41, 0x86, 0x59, 0x7d, 0x34, 0x72, 0x95, 0xf2, 0x25, 0xa7, 0x7b, 0x9b, 0x7b, 0x44, 0xbd, - 0x88, 0x8a, 0x12, 0x00, 0x59, 0x83, 0xf9, 0xce, 0xd8, 0xb0, 0xdc, 0x43, 0xe6, 0xf0, 0xbf, 0x1d, - 0xe6, 0xd5, 0x4b, 0xb8, 0x27, 0x8e, 0x6e, 0xfc, 0x3f, 0x94, 0xfc, 0x2b, 0x72, 0xf6, 0xa7, 0x6c, - 0x8a, 0xbe, 0x50, 0xa2, 0x7c, 0xc9, 0xd9, 0x9f, 0x19, 0xc3, 0x09, 0x93, 0x8e, 0x2b, 0x80, 0x57, - 0x67, 0x5f, 0xd1, 0xf4, 0xbf, 0x64, 0x80, 0x08, 0x55, 0x6d, 0x71, 0x77, 0x55, 0x5a, 0xbd, 0x05, - 0x25, 0x57, 0x29, 0x50, 0x3a, 0xd5, 0x52, 0xba, 0x6a, 0x69, 0x40, 0xc8, 0x0d, 0x8e, 0x4e, 0xbf, - 0xb3, 0x2d, 0x0f, 0x52, 0x20, 0x0f, 0x01, 0xbc, 0xfa, 0x21, 0x77, 0x06, 0xa1, 0xbf, 0x00, 0xc1, - 0x35, 0x3c, 0x36, 0x06, 0xcc, 0xed, 0xda, 0x82, 0xb5, 0xd4, 0x61, 0x14, 0xc9, 0x43, 0x8c, 0x59, - 0x3d, 0xbb, 0x6f, 0x5a, 0x03, 0x19, 0x45, 0x3e, 0xcc, 0x39, 0x98, 0x56, 0x9f, 0x3d, 0xe0, 0xec, - 0x3a, 0xe6, 0x8f, 0x99, 0xd4, 0x6d, 0x14, 0x49, 0x74, 0xa8, 0x78, 0xb6, 0x67, 0x0c, 0x29, 0xeb, - 0xd9, 0x4e, 0xdf, 0xad, 0x17, 0x90, 0x28, 0x82, 0xe3, 0x34, 0x7d, 0xc3, 0x33, 0xda, 0xea, 0x24, - 0x61, 0x90, 0x08, 0x8e, 0xdf, 0xf3, 0x8c, 0x39, 0xae, 0x69, 0x5b, 0x68, 0x8f, 0x12, 0x55, 0x20, - 0x21, 0x90, 0x75, 0xf9, 0xf1, 0xb0, 0xaa, 0xad, 0x65, 0x29, 0xae, 0x79, 0xea, 0xb8, 0x67, 0xdb, - 0x1e, 0x73, 0x50, 0xb0, 0x32, 0x9e, 0x19, 0xc2, 0x90, 0x6d, 0xa8, 0xf5, 0x59, 0xdf, 0xec, 0x19, - 0x1e, 0xeb, 0xdf, 0xb1, 0x87, 0x93, 0x91, 0xe5, 0xd6, 0x2b, 0xe8, 0xcd, 0x75, 0x5f, 0xe5, 0xdb, - 0x51, 0x02, 0x9a, 0xd8, 0xa1, 0xff, 0x59, 0x83, 0xf9, 0x18, 0x15, 0xb9, 0x05, 0x39, 0xb7, 0x67, - 0x8f, 0x99, 0x0c, 0xdd, 0xe5, 0xf3, 0xd8, 0x35, 0x3b, 0x9c, 0x8a, 0x0a, 0x62, 0x7e, 0x07, 0xcb, - 0x18, 0x29, 0x5f, 0xc1, 0x35, 0xb9, 0x09, 0x59, 0x6f, 0x3a, 0x16, 0xf9, 0x65, 0xae, 0xf5, 0xd4, - 0xb9, 0x8c, 0xba, 0xd3, 0x31, 0xa3, 0x48, 0xaa, 0xaf, 0x40, 0x0e, 0xd9, 0x92, 0x22, 0x64, 0x3b, - 0x87, 0x9b, 0xfb, 0xb5, 0x19, 0x1e, 0xec, 0xb4, 0xdd, 0x39, 0x78, 0x87, 0xde, 0x69, 0x63, 0x7c, - 0x67, 0x39, 0x39, 0x01, 0xc8, 0x77, 0xba, 0x74, 0x67, 0xff, 0x6e, 0x6d, 0x46, 0x7f, 0x00, 0x73, - 0xca, 0xbb, 0x64, 0x6a, 0xbb, 0x05, 0x79, 0xcc, 0x5e, 0x2a, 0xc2, 0xaf, 0x47, 0xf3, 0x8f, 0xa0, - 0xde, 0x63, 0x9e, 0xc1, 0x2d, 0x44, 0x25, 0x2d, 0xd9, 0x88, 0xa7, 0xba, 0xb8, 0xf7, 0xc6, 0xf3, - 0x9c, 0xfe, 0x8f, 0x0c, 0x5c, 0x49, 0xe1, 0x18, 0x2f, 0x09, 0xa5, 0xa0, 0x24, 0xac, 0xc1, 0xbc, - 0x63, 0xdb, 0x5e, 0x87, 0x39, 0x67, 0x66, 0x8f, 0xed, 0x07, 0x2a, 0x8b, 0xa3, 0xb9, 0x77, 0x72, - 0x14, 0xb2, 0x47, 0x3a, 0x51, 0x21, 0xa2, 0x48, 0x5e, 0x08, 0x30, 0x24, 0xba, 0xe6, 0x88, 0xbd, - 0x63, 0x99, 0x0f, 0xf6, 0x0d, 0xcb, 0xc6, 0x48, 0xc8, 0xd2, 0xe4, 0x07, 0xee, 0x55, 0xfd, 0x20, - 0x25, 0x89, 0xf4, 0x12, 0xc2, 0x90, 0xe7, 0xa0, 0xe0, 0xca, 0x9c, 0x91, 0x47, 0x0d, 0xd4, 0x02, - 0x0d, 0x08, 0x3c, 0x55, 0x04, 0xe4, 0x05, 0x28, 0xca, 0x25, 0x8f, 0x89, 0x4c, 0x2a, 0xb1, 0x4f, - 0x41, 0x28, 0x54, 0x5c, 0x71, 0x39, 0x9e, 0xc3, 0xdd, 0x7a, 0x11, 0x77, 0x34, 0x2f, 0xb2, 0x4b, - 0xb3, 0x13, 0xda, 0x80, 0x49, 0x8a, 0x46, 0x78, 0x34, 0x8e, 0x60, 0x21, 0x41, 0x92, 0x92, 0xc7, - 0x9e, 0x0f, 0xe7, 0xb1, 0x72, 0xeb, 0x89, 0x90, 0x51, 0x83, 0xcd, 0xe1, 0xf4, 0xb6, 0x0b, 0x95, - 0xf0, 0x27, 0xcc, 0x43, 0x63, 0xc3, 0xba, 0x63, 0x4f, 0x2c, 0x0f, 0x19, 0xf3, 0x3c, 0xa4, 0x10, - 0x5c, 0xa7, 0xcc, 0x71, 0x6c, 0x47, 0x7c, 0x16, 0xc5, 0x20, 0x84, 0xd1, 0x7f, 0xae, 0x41, 0x41, - 0xea, 0x83, 0x3c, 0x03, 0x39, 0xbe, 0x51, 0xb9, 0x65, 0x35, 0xa2, 0x30, 0x2a, 0xbe, 0x61, 0x05, - 0x34, 0xbc, 0xde, 0x7d, 0xd6, 0x97, 0xdc, 0x14, 0x48, 0x5e, 0x03, 0x30, 0x3c, 0xcf, 0x31, 0x4f, - 0x26, 0x1e, 0xe3, 0x15, 0x85, 0xf3, 0xb8, 0xe6, 0xf3, 0x90, 0xed, 0xce, 0xd9, 0xcd, 0xe6, 0x5b, - 0x6c, 0x7a, 0xc4, 0x6f, 0x43, 0x43, 0xe4, 0x3c, 0xd6, 0xb3, 0xfc, 0x18, 0xb2, 0x04, 0x79, 0x7e, - 0x90, 0xef, 0x9b, 0x12, 0x4a, 0x0d, 0xe1, 0x54, 0xf7, 0xca, 0x9c, 0xe7, 0x5e, 0x37, 0xa0, 0xaa, - 0x9c, 0x89, 0xc3, 0xae, 0x74, 0xc4, 0x28, 0x32, 0x76, 0x8b, 0xdc, 0xc3, 0xdd, 0xe2, 0xb7, 0x7e, - 0x2d, 0x97, 0xc1, 0xc8, 0x23, 0xca, 0xb4, 0xdc, 0x31, 0xeb, 0x79, 0xac, 0xdf, 0x55, 0x41, 0x8f, - 0xf5, 0x2e, 0x86, 0x26, 0xcf, 0xc2, 0x9c, 0x8f, 0xda, 0x9a, 0xf2, 0xc3, 0x67, 0x51, 0xbe, 0x18, - 0x96, 0xac, 0x42, 0x19, 0xb3, 0x3b, 0x16, 0x37, 0x55, 0xb9, 0xc3, 0x28, 0x7e, 0xd1, 0x9e, 0x3d, - 0x1a, 0x0f, 0x99, 0xc7, 0xfa, 0x6f, 0xda, 0x27, 0xae, 0xaa, 0x3d, 0x11, 0x24, 0xf7, 0x1b, 0xdc, - 0x84, 0x14, 0x22, 0xd8, 0x02, 0x04, 0x97, 0x3b, 0x60, 0x29, 0xc4, 0xc9, 0xa3, 0x38, 0x71, 0x74, - 0x44, 0x6e, 0xac, 0xe1, 0x58, 0x83, 0xc2, 0x72, 0x23, 0x56, 0x1f, 0xf0, 0x78, 0xe0, 0xaa, 0xe1, - 0x55, 0x5d, 0x15, 0xe5, 0x45, 0x95, 0xce, 0x85, 0xb1, 0x65, 0xba, 0x5e, 0x84, 0x1c, 0x36, 0x93, - 0xaa, 0xb6, 0x23, 0x10, 0x34, 0x1e, 0x99, 0x94, 0xc6, 0x23, 0xeb, 0x37, 0x1e, 0xfa, 0x27, 0x19, - 0x58, 0x0a, 0x4e, 0x8a, 0xf4, 0x00, 0xaf, 0x24, 0x7b, 0x80, 0x46, 0x2c, 0x8b, 0x86, 0xa4, 0xfb, - 0xae, 0x0f, 0xf8, 0x76, 0xf4, 0x01, 0x9f, 0x65, 0xe0, 0x9a, 0x6f, 0x1c, 0x0c, 0xba, 0xa8, 0x55, - 0xbf, 0x9f, 0xb4, 0xea, 0x4a, 0xd2, 0xaa, 0x62, 0xe3, 0x77, 0xa6, 0xfd, 0x56, 0x99, 0xb6, 0xaf, - 0x5a, 0x75, 0x11, 0x76, 0xb2, 0x41, 0x6a, 0x40, 0xd1, 0x33, 0x06, 0xbc, 0x83, 0x10, 0xb5, 0xa8, - 0x44, 0x7d, 0x98, 0xb4, 0xe2, 0x6d, 0x50, 0x70, 0x9c, 0x2a, 0xcd, 0x89, 0x46, 0xe8, 0x23, 0x58, - 0x0c, 0x4e, 0x39, 0x6a, 0xf9, 0xe7, 0xb4, 0x20, 0x8f, 0x09, 0x47, 0x55, 0xbc, 0xb4, 0x5c, 0x70, - 0xd4, 0x12, 0x9d, 0xa4, 0xa4, 0xfc, 0x5a, 0xe7, 0xbf, 0x16, 0x4e, 0x7d, 0x92, 0xa1, 0x5f, 0xd0, - 0xb4, 0x50, 0x41, 0x23, 0x90, 0xf5, 0xf8, 0xe4, 0x37, 0x8b, 0x97, 0xc6, 0xb5, 0x3e, 0x0e, 0x65, - 0xb3, 0x88, 0x0f, 0x63, 0x1f, 0x27, 0xd4, 0xe2, 0xf7, 0x71, 0x02, 0xbc, 0x2c, 0x81, 0x66, 0x53, - 0x12, 0x68, 0x2e, 0x48, 0xa0, 0xa7, 0xf0, 0x64, 0xe2, 0x44, 0xa9, 0x31, 0x5e, 0x34, 0x14, 0x52, - 0x9a, 0x26, 0x40, 0x7c, 0x2d, 0xdd, 0xdc, 0x82, 0xa2, 0x3a, 0x06, 0xaf, 0x3f, 0xf5, 0x8b, 0x01, - 0xae, 0xd3, 0xe7, 0x3c, 0xfd, 0x27, 0x1a, 0x5c, 0x8d, 0xc9, 0x18, 0xb2, 0xeb, 0x7a, 0x5c, 0xca, - 0x72, 0x6b, 0x21, 0xe8, 0xe5, 0xe4, 0x97, 0x47, 0x15, 0xfc, 0xaf, 0x1a, 0xcc, 0xc7, 0x3e, 0xa6, - 0xd4, 0x70, 0x2d, 0xb5, 0x86, 0x47, 0x6a, 0xef, 0x6c, 0xbc, 0xf6, 0x26, 0xea, 0x77, 0x26, 0xad, - 0x7e, 0xc7, 0xfa, 0x80, 0x6c, 0xb2, 0x0f, 0x48, 0xa9, 0xe1, 0xb9, 0xd4, 0x1a, 0xae, 0xef, 0x43, - 0x0e, 0xbb, 0x10, 0xd2, 0x86, 0xaa, 0xc3, 0x5c, 0x7b, 0xe2, 0xf4, 0x58, 0x27, 0xd4, 0x0a, 0x06, - 0xe9, 0x54, 0xbc, 0x37, 0x9d, 0xdd, 0x6c, 0xd2, 0x30, 0x19, 0x8d, 0xee, 0xd2, 0xf7, 0xa1, 0x72, - 0x38, 0x71, 0x83, 0x89, 0xe7, 0x75, 0xa8, 0x62, 0xcf, 0xe9, 0x6e, 0x4d, 0xbb, 0xf2, 0x51, 0x27, - 0xb3, 0x36, 0x17, 0xd2, 0x32, 0xa7, 0x6e, 0x73, 0x0a, 0xca, 0x0c, 0xd7, 0xb6, 0x68, 0x94, 0x5c, - 0xef, 0x40, 0x8d, 0x53, 0xa0, 0xb0, 0xca, 0xfb, 0x5f, 0xf4, 0xa7, 0x28, 0x1e, 0x2d, 0x95, 0xad, - 0x27, 0x3e, 0xfe, 0x7c, 0x65, 0xe6, 0x9f, 0x9f, 0xaf, 0x54, 0x0f, 0x1d, 0x66, 0x0c, 0x87, 0x76, - 0x4f, 0x50, 0xab, 0xf1, 0xa9, 0x06, 0x19, 0xb3, 0x2f, 0xda, 0xd2, 0x0a, 0xe5, 0x4b, 0x7d, 0x4f, - 0x30, 0x15, 0x17, 0x90, 0x4c, 0x6f, 0x43, 0xe1, 0x04, 0xdb, 0xd9, 0xaf, 0x7c, 0x73, 0x45, 0xaf, - 0xdf, 0x00, 0x90, 0x6f, 0x3b, 0xdc, 0xc2, 0x4b, 0x91, 0x19, 0xaf, 0xa2, 0xc4, 0xd0, 0x5f, 0x87, - 0xd2, 0xae, 0x69, 0x9d, 0x76, 0x86, 0x66, 0x8f, 0x8f, 0xa0, 0xb9, 0xa1, 0x69, 0x9d, 0xaa, 0xb3, - 0xae, 0x25, 0xcf, 0xe2, 0x67, 0x34, 0xf9, 0x06, 0x2a, 0x28, 0xf5, 0x9f, 0x69, 0x40, 0x38, 0x52, - 0xb9, 0x63, 0xd0, 0x47, 0x89, 0x80, 0xd7, 0xc2, 0x01, 0x5f, 0x87, 0xc2, 0xc0, 0xb1, 0x27, 0xe3, - 0x2d, 0x95, 0x08, 0x14, 0xc8, 0xe9, 0x87, 0xf8, 0xb4, 0x23, 0xba, 0x65, 0x01, 0x7c, 0xe5, 0x04, - 0xf1, 0x0b, 0x1e, 0x7d, 0x81, 0x10, 0x9d, 0xc9, 0x68, 0x64, 0x38, 0xd3, 0xff, 0x8d, 0x2c, 0xbf, - 0xd7, 0xe0, 0x4a, 0x44, 0x21, 0x41, 0xa6, 0x62, 0xae, 0x67, 0x8e, 0x78, 0xb5, 0x41, 0x49, 0x8a, - 0x34, 0x40, 0x44, 0x87, 0x26, 0xd1, 0x67, 0x87, 0x86, 0xa6, 0x67, 0x61, 0x0e, 0xfd, 0xaf, 0xe3, - 0x93, 0x08, 0xd1, 0x62, 0x58, 0xd2, 0x0c, 0xd2, 0x46, 0x16, 0x2d, 0xb8, 0x18, 0x19, 0x99, 0x12, - 0x29, 0xe3, 0x7b, 0x50, 0xa1, 0xc6, 0x87, 0x6f, 0x98, 0xae, 0x67, 0x0f, 0x1c, 0x63, 0xc4, 0x9d, - 0xe4, 0x64, 0xd2, 0x3b, 0x65, 0x9e, 0x4c, 0x13, 0x12, 0xe2, 0x77, 0xef, 0x85, 0x24, 0x13, 0x80, - 0xfe, 0x26, 0x14, 0xd5, 0xd0, 0x91, 0x32, 0x47, 0xbe, 0x10, 0x9d, 0x23, 0x97, 0xa2, 0xb3, 0xeb, - 0xdb, 0xbb, 0x7c, 0x58, 0x34, 0x7b, 0x2a, 0x7f, 0xfe, 0x5a, 0x83, 0x72, 0x48, 0x44, 0xb2, 0x05, - 0x0b, 0x43, 0xc3, 0x63, 0x56, 0x6f, 0x7a, 0x7c, 0x5f, 0x89, 0x27, 0xbd, 0x32, 0x98, 0x48, 0xc3, - 0xb2, 0xd3, 0x9a, 0xa4, 0x0f, 0x6e, 0xf3, 0x7f, 0x90, 0x77, 0x99, 0x63, 0xca, 0x80, 0x0c, 0xa7, - 0x5c, 0x7f, 0x56, 0x92, 0x04, 0xfc, 0xe2, 0x22, 0xc0, 0xa5, 0x62, 0x25, 0xa4, 0xff, 0x3d, 0xea, - 0xdd, 0xd2, 0xb1, 0x92, 0x23, 0xee, 0x25, 0xd6, 0x9a, 0x4d, 0xb5, 0x56, 0x20, 0x5f, 0xe6, 0x32, - 0xf9, 0x6a, 0x90, 0x19, 0xdf, 0xbe, 0x2d, 0x07, 0x44, 0xbe, 0x14, 0x98, 0x97, 0x65, 0xfe, 0xe4, - 0x4b, 0x81, 0xd9, 0x90, 0x53, 0x11, 0x5f, 0x22, 0xe6, 0xe5, 0x0d, 0x39, 0xfe, 0xf0, 0xa5, 0xfe, - 0x2e, 0x34, 0xd2, 0xe2, 0x44, 0xba, 0xe8, 0x6d, 0x28, 0xb9, 0x88, 0x32, 0x59, 0x32, 0x05, 0xa4, - 0xec, 0x0b, 0xa8, 0xf5, 0xdf, 0x68, 0x50, 0x8d, 0x18, 0x36, 0x52, 0x3b, 0x73, 0xb2, 0x76, 0x56, - 0x40, 0xb3, 0x50, 0x19, 0x19, 0xaa, 0x59, 0x1c, 0xba, 0x87, 0xfa, 0xd6, 0xa8, 0x76, 0x8f, 0x43, - 0xae, 0x7c, 0xc3, 0xd6, 0x5c, 0x0e, 0x9d, 0xe0, 0xe5, 0x8a, 0x54, 0x3b, 0xe1, 0x50, 0x5f, 0x5e, - 0x4c, 0xeb, 0xe3, 0x44, 0x2e, 0x9e, 0xcb, 0x0b, 0xc8, 0x5b, 0xbd, 0x85, 0x13, 0xc8, 0x9e, 0x9a, - 0x56, 0x1f, 0x7b, 0xcd, 0x1c, 0xc5, 0xb5, 0xce, 0xc4, 0xf3, 0xae, 0x14, 0x7c, 0xdb, 0xf0, 0x0c, - 0xde, 0x48, 0x3a, 0xcc, 0x9d, 0x0c, 0xbd, 0x6e, 0x50, 0xda, 0x43, 0x18, 0xde, 0x84, 0x09, 0x48, - 0xba, 0x4d, 0x23, 0x35, 0x86, 0x90, 0x82, 0x4a, 0x4a, 0x9e, 0x05, 0x17, 0x12, 0x5f, 0xb9, 0x9b, - 0x0c, 0x8d, 0x13, 0x36, 0x0c, 0x75, 0x44, 0x01, 0x82, 0xcb, 0x81, 0xc0, 0x51, 0xa8, 0x9b, 0x08, - 0x61, 0xc8, 0x3a, 0xcc, 0x7a, 0xca, 0x35, 0x56, 0xce, 0x97, 0xe1, 0xd0, 0x36, 0x2d, 0x8f, 0xce, - 0x7a, 0x2e, 0x8f, 0xa1, 0xa5, 0xf4, 0xcf, 0x68, 0x0c, 0x53, 0x0a, 0x51, 0xa5, 0xb8, 0xe6, 0xde, - 0x71, 0x66, 0x0c, 0xf1, 0x60, 0x8d, 0xf2, 0x25, 0xaf, 0xcf, 0xec, 0x01, 0x1b, 0x8d, 0x87, 0x86, - 0xd3, 0x95, 0xef, 0x71, 0x19, 0xfc, 0x89, 0x26, 0x8e, 0x26, 0xcf, 0x41, 0x4d, 0xa1, 0xd4, 0xfb, - 0xbc, 0x74, 0xce, 0x04, 0x5e, 0xef, 0xc0, 0x15, 0x7c, 0x6a, 0xdf, 0xb1, 0x5c, 0xcf, 0xb0, 0xbc, - 0x8b, 0xb3, 0xb2, 0x9f, 0x65, 0x65, 0xa6, 0x89, 0x64, 0x59, 0x11, 0x9b, 0x98, 0x65, 0x1f, 0xc0, - 0x62, 0x94, 0xa9, 0x74, 0xe1, 0xa6, 0x1f, 0x53, 0xc2, 0x7f, 0x83, 0xb4, 0x23, 0x29, 0x3b, 0xf8, - 0xd5, 0x0f, 0xac, 0x87, 0x7f, 0xc4, 0xfc, 0xa9, 0x06, 0xd5, 0x08, 0x2f, 0x72, 0x1b, 0xf2, 0x68, - 0xb6, 0x64, 0xcc, 0x24, 0x5f, 0x67, 0xe4, 0x6f, 0x23, 0x72, 0x43, 0xb4, 0x99, 0xd4, 0x64, 0x32, - 0x24, 0x2b, 0x50, 0x1e, 0x3b, 0xf6, 0xe8, 0x58, 0x72, 0x15, 0x2f, 0x99, 0xc0, 0x51, 0xbb, 0x88, - 0xd1, 0xff, 0x90, 0x81, 0x05, 0xbc, 0x3e, 0x35, 0xac, 0x01, 0x7b, 0x2c, 0x1a, 0xc5, 0x99, 0xcb, - 0x63, 0x63, 0x69, 0x46, 0x5c, 0x47, 0x7f, 0x55, 0x2b, 0xc4, 0x7f, 0x55, 0x0b, 0xcd, 0xa9, 0xc5, - 0x0b, 0xe6, 0xd4, 0xd2, 0xa5, 0x73, 0x2a, 0xa4, 0xcd, 0xa9, 0xa1, 0xe9, 0xb0, 0x1c, 0x9d, 0x0e, - 0xc3, 0x13, 0x6c, 0x25, 0x36, 0xc1, 0xaa, 0xc9, 0xb1, 0x7a, 0xee, 0xe4, 0x38, 0xf7, 0x95, 0x26, - 0xc7, 0xf9, 0x87, 0x9d, 0x1c, 0xb1, 0xbe, 0x4b, 0xd7, 0x77, 0xeb, 0x35, 0x71, 0x67, 0x1f, 0xa1, - 0xbb, 0x40, 0xc2, 0x06, 0x93, 0xde, 0xfa, 0x7c, 0xcc, 0x5b, 0xaf, 0x04, 0x45, 0xd2, 0x1c, 0xb1, - 0x47, 0x76, 0xd5, 0x8f, 0xa0, 0xd8, 0x96, 0x12, 0x3c, 0x7e, 0x27, 0x7d, 0x1a, 0x2a, 0x3c, 0x8d, - 0xb8, 0x9e, 0x31, 0x1a, 0x1f, 0x8f, 0x84, 0x97, 0x66, 0x68, 0xd9, 0xc7, 0xed, 0xb9, 0xfa, 0x26, - 0xe4, 0x3b, 0x06, 0x1f, 0x11, 0x12, 0xc4, 0xb3, 0x09, 0xe2, 0xe0, 0x14, 0x2d, 0x74, 0x8a, 0xfe, - 0xa9, 0x06, 0x10, 0xe8, 0xe2, 0x51, 0x6e, 0xb1, 0x0e, 0x05, 0x17, 0x85, 0x51, 0xed, 0xc0, 0x7c, - 0xa0, 0x3e, 0xc4, 0x4b, 0x7a, 0x45, 0x75, 0x69, 0x14, 0x92, 0x97, 0xc3, 0x16, 0xcf, 0xc6, 0x4a, - 0xb8, 0x52, 0xbc, 0xe4, 0x1a, 0x50, 0x3e, 0xf7, 0x3e, 0xcc, 0xc7, 0xa6, 0x0b, 0x52, 0x81, 0xe2, - 0xfe, 0xc1, 0x71, 0x9b, 0xd2, 0x03, 0x5a, 0x9b, 0x21, 0x57, 0x60, 0x7e, 0x6f, 0xf3, 0xbd, 0xe3, - 0xdd, 0x9d, 0xa3, 0xf6, 0x71, 0x97, 0x6e, 0xde, 0x69, 0x77, 0x6a, 0x1a, 0x47, 0xe2, 0xfa, 0xb8, - 0x7b, 0x70, 0x70, 0xbc, 0xbb, 0x49, 0xef, 0xb6, 0x6b, 0xb3, 0x64, 0x01, 0xaa, 0xef, 0xec, 0xbf, - 0xb5, 0x7f, 0xf0, 0xee, 0xbe, 0xdc, 0x9c, 0x69, 0xfd, 0x52, 0x83, 0x3c, 0x67, 0xcf, 0x1c, 0xf2, - 0x03, 0x28, 0xf9, 0x43, 0x0a, 0xb9, 0x1a, 0x19, 0x6d, 0xc2, 0x83, 0x4b, 0xe3, 0x89, 0xc8, 0x27, - 0xe5, 0x9c, 0xfa, 0x0c, 0xd9, 0x84, 0xb2, 0x4f, 0x7c, 0xd4, 0xfa, 0x3a, 0x2c, 0x5a, 0xff, 0xd6, - 0xa0, 0x26, 0xfd, 0xf2, 0x2e, 0xb3, 0x98, 0x63, 0x78, 0xb6, 0x2f, 0x18, 0xce, 0x2b, 0x31, 0xae, - 0xe1, 0xe1, 0xe7, 0x7c, 0xc1, 0x76, 0x00, 0xee, 0x32, 0x4f, 0xf5, 0x8a, 0xd7, 0xd2, 0x8b, 0xa3, - 0xe0, 0x71, 0xfd, 0x9c, 0xca, 0xa9, 0x58, 0xdd, 0x05, 0x08, 0x02, 0x93, 0x04, 0xb5, 0x3e, 0x91, - 0x5e, 0x1b, 0xd7, 0x52, 0xbf, 0xf9, 0x37, 0xfd, 0x5d, 0x16, 0x0a, 0xfc, 0x83, 0xc9, 0x1c, 0xf2, - 0x06, 0x54, 0x7f, 0x68, 0x5a, 0x7d, 0xff, 0xa7, 0x75, 0x72, 0x35, 0xed, 0x17, 0x7d, 0xc1, 0xb6, - 0x71, 0xfe, 0x8f, 0xfd, 0x68, 0x82, 0x8a, 0xfa, 0xb1, 0xae, 0xc7, 0x2c, 0x8f, 0x9c, 0xf3, 0x0b, - 0x71, 0xe3, 0xc9, 0x04, 0xde, 0x67, 0xd1, 0x86, 0x72, 0xe8, 0xd7, 0xe7, 0xb0, 0xb6, 0x12, 0xbf, - 0x49, 0x5f, 0xc4, 0xe6, 0x2e, 0x40, 0xf0, 0x66, 0x44, 0x2e, 0x78, 0xa5, 0x6e, 0x5c, 0x4b, 0xfd, - 0xe6, 0x33, 0x7a, 0x4b, 0x5d, 0x49, 0x3c, 0x3e, 0x5d, 0xc8, 0xea, 0xa9, 0xd4, 0x07, 0xb0, 0x10, - 0xb3, 0x23, 0x98, 0x8f, 0x3d, 0xbb, 0x90, 0xcb, 0x9e, 0x5a, 0x1b, 0xab, 0xe7, 0x13, 0xf8, 0x7c, - 0x7f, 0x14, 0x7a, 0x21, 0x53, 0xcf, 0x39, 0x97, 0x73, 0xd6, 0xcf, 0x23, 0x08, 0xcb, 0xdc, 0xfa, - 0x5b, 0x16, 0x6a, 0x1d, 0xcf, 0x61, 0xc6, 0xc8, 0xb4, 0x06, 0xca, 0x65, 0x5e, 0x83, 0xbc, 0x2c, - 0x7c, 0x0f, 0x6b, 0xe2, 0x0d, 0x8d, 0xc7, 0xc3, 0x63, 0xb1, 0xcd, 0x86, 0x46, 0xf6, 0x1e, 0xa3, - 0x75, 0x36, 0x34, 0xf2, 0xde, 0x37, 0x63, 0x9f, 0x0d, 0x8d, 0xbc, 0xff, 0xcd, 0x59, 0x68, 0x43, - 0x23, 0x87, 0xb0, 0x20, 0x73, 0xc5, 0x63, 0xc9, 0x0e, 0x1b, 0x1a, 0x39, 0x82, 0x2b, 0x61, 0x8e, - 0xb2, 0x85, 0x24, 0xd7, 0xa3, 0xfb, 0xa2, 0x4d, 0x72, 0x48, 0xc3, 0x69, 0xdd, 0x2e, 0xe7, 0xdb, - 0xfa, 0xa3, 0x06, 0x05, 0x95, 0x09, 0x8f, 0x53, 0xa7, 0x55, 0xfd, 0xa2, 0x19, 0x4e, 0x1e, 0xf4, - 0xcc, 0x85, 0x34, 0x8f, 0x3d, 0x5b, 0x6e, 0xd5, 0x3f, 0xfe, 0x62, 0x59, 0xfb, 0xf4, 0x8b, 0x65, - 0xed, 0x5f, 0x5f, 0x2c, 0x6b, 0xbf, 0xfa, 0x72, 0x79, 0xe6, 0xd3, 0x2f, 0x97, 0x67, 0x3e, 0xfb, - 0x72, 0x79, 0xe6, 0x24, 0x8f, 0xff, 0x3b, 0xf6, 0xd2, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x05, - 0xbf, 0xa3, 0xdb, 0xbc, 0x26, 0x00, 0x00, + 0xd1, 0x1a, 0xf1, 0xa9, 0x22, 0x29, 0x51, 0x2d, 0x59, 0xe6, 0x72, 0xd7, 0x92, 0x3c, 0x5e, 0x18, + 0xfa, 0xfc, 0xa0, 0xb4, 0xf4, 0x1a, 0x9f, 0xd7, 0x4e, 0x1c, 0x48, 0x2b, 0x66, 0x2d, 0x5b, 0x2f, + 0x37, 0x69, 0xd9, 0x08, 0x0c, 0x08, 0x23, 0xb2, 0x97, 0x3b, 0x10, 0x39, 0x43, 0xcf, 0x0c, 0x65, + 0x29, 0x07, 0x23, 0x09, 0x90, 0x43, 0x80, 0x1c, 0x72, 0x48, 0x0e, 0xf9, 0x05, 0x41, 0x72, 0xc9, + 0x21, 0xf9, 0x07, 0x41, 0x0c, 0x07, 0x41, 0x02, 0x03, 0xb9, 0x18, 0x39, 0x18, 0x81, 0x7d, 0x48, + 0x7e, 0x46, 0xd0, 0xd5, 0xdd, 0xf3, 0xa6, 0xe4, 0xb5, 0xd7, 0x88, 0x0f, 0x3e, 0xb1, 0xab, 0xba, + 0xba, 0xba, 0xba, 0xba, 0x9e, 0x3d, 0x84, 0xc7, 0x47, 0xa7, 0xfd, 0x75, 0x8f, 0x0d, 0x47, 0xf6, + 0xe8, 0x44, 0xfc, 0x36, 0x46, 0x8e, 0xed, 0xd9, 0xa4, 0x20, 0x91, 0xf5, 0xa5, 0xae, 0x3d, 0x1c, + 0xda, 0xd6, 0xfa, 0xd9, 0xad, 0x75, 0x31, 0x12, 0x04, 0xf5, 0xe7, 0xfb, 0xa6, 0xf7, 0x60, 0x7c, + 0xd2, 0xe8, 0xda, 0xc3, 0xf5, 0xbe, 0xdd, 0xb7, 0xd7, 0x11, 0x7d, 0x32, 0xbe, 0x8f, 0x10, 0x02, + 0x38, 0x92, 0xe4, 0x8b, 0x9e, 0x63, 0x74, 0x19, 0xe7, 0x82, 0x03, 0x81, 0xd5, 0xff, 0xa8, 0x41, + 0xb5, 0xc3, 0xe1, 0xad, 0x8b, 0x9d, 0x6d, 0xca, 0xde, 0x1b, 0x33, 0xd7, 0x23, 0x35, 0x28, 0x20, + 0xcd, 0xce, 0x76, 0x4d, 0x5b, 0xd5, 0xd6, 0xca, 0x54, 0x81, 0x64, 0x19, 0xe0, 0x64, 0x60, 0x77, + 0x4f, 0xdb, 0x9e, 0xe1, 0x78, 0xb5, 0xe9, 0x55, 0x6d, 0x6d, 0x86, 0x86, 0x30, 0xa4, 0x0e, 0x45, + 0x84, 0x5a, 0x56, 0xaf, 0x96, 0xc1, 0x59, 0x1f, 0x26, 0x37, 0x60, 0xe6, 0xbd, 0x31, 0x73, 0x2e, + 0xf6, 0xec, 0x1e, 0xab, 0xe5, 0x70, 0x32, 0x40, 0x90, 0xe7, 0x60, 0xde, 0x18, 0x0c, 0xec, 0xf7, + 0x0f, 0x0d, 0xc7, 0x33, 0x8d, 0x01, 0xca, 0x54, 0xcb, 0xaf, 0x6a, 0x6b, 0x45, 0x9a, 0x9c, 0xd0, + 0xff, 0xa3, 0xc1, 0x7c, 0x48, 0x6c, 0x77, 0x64, 0x5b, 0x2e, 0x23, 0x37, 0x21, 0x87, 0x82, 0xa2, + 0xd4, 0xa5, 0xe6, 0x6c, 0x43, 0xaa, 0xb0, 0x81, 0xa4, 0x54, 0x4c, 0x92, 0x17, 0xa0, 0x30, 0x64, + 0x9e, 0x63, 0x76, 0x5d, 0x3c, 0x40, 0xa9, 0x79, 0x2d, 0x4a, 0xc7, 0x59, 0xee, 0x09, 0x02, 0xaa, + 0x28, 0xc9, 0x1d, 0xc8, 0xbb, 0x9e, 0xe1, 0x8d, 0x5d, 0x3c, 0xd6, 0x6c, 0xf3, 0xc9, 0xe4, 0x1a, + 0x25, 0x46, 0xa3, 0x8d, 0x84, 0x54, 0x2e, 0xe0, 0xda, 0x1c, 0x32, 0xd7, 0x35, 0xfa, 0xac, 0x96, + 0xc5, 0x53, 0x2b, 0x50, 0x7f, 0x0a, 0xf2, 0x82, 0x96, 0x94, 0xa1, 0x78, 0xf7, 0x60, 0xef, 0x70, + 0xb7, 0xd5, 0x69, 0x55, 0xa7, 0x48, 0x09, 0x0a, 0x87, 0x9b, 0xb4, 0xb3, 0xb3, 0xb9, 0x5b, 0xd5, + 0x74, 0x12, 0xba, 0x20, 0x29, 0x96, 0xfe, 0xf7, 0x69, 0xa8, 0xb4, 0x99, 0xe1, 0x74, 0x1f, 0xa8, + 0x2b, 0x7b, 0x19, 0xb2, 0x1d, 0xa3, 0xef, 0xd6, 0xb4, 0xd5, 0xcc, 0x5a, 0xa9, 0xb9, 0xea, 0x4b, + 0x17, 0xa1, 0x6a, 0x70, 0x92, 0x96, 0xe5, 0x39, 0x17, 0x5b, 0xd9, 0x8f, 0x3e, 0x5d, 0x99, 0xa2, + 0xb8, 0x86, 0xdc, 0x84, 0xca, 0x9e, 0x69, 0x6d, 0x8f, 0x1d, 0xc3, 0x33, 0x6d, 0x6b, 0x4f, 0xa8, + 0xa5, 0x42, 0xa3, 0x48, 0xa4, 0x32, 0xce, 0x43, 0x54, 0x19, 0x49, 0x15, 0x46, 0x92, 0x45, 0xc8, + 0xed, 0x9a, 0x43, 0xd3, 0xc3, 0xa3, 0x56, 0xa8, 0x00, 0x38, 0xd6, 0x45, 0x8b, 0xc9, 0x09, 0x2c, + 0x02, 0xa4, 0x0a, 0x19, 0x66, 0xf5, 0xf0, 0x92, 0x2b, 0x94, 0x0f, 0x39, 0xdd, 0x9b, 0xdc, 0x22, + 0x6a, 0x45, 0x54, 0x94, 0x00, 0xc8, 0x1a, 0xcc, 0xb5, 0x47, 0x86, 0xe5, 0x1e, 0x32, 0x87, 0xff, + 0xb6, 0x99, 0x57, 0x9b, 0xc1, 0x35, 0x71, 0x74, 0xfd, 0xff, 0x61, 0xc6, 0x3f, 0x22, 0x67, 0x7f, + 0xca, 0x2e, 0xd0, 0x16, 0x66, 0x28, 0x1f, 0x72, 0xf6, 0x67, 0xc6, 0x60, 0xcc, 0xa4, 0xe1, 0x0a, + 0xe0, 0xe5, 0xe9, 0x97, 0x34, 0xfd, 0xc3, 0x0c, 0x10, 0xa1, 0xaa, 0x2d, 0x6e, 0xae, 0x4a, 0xab, + 0xb7, 0x61, 0xc6, 0x55, 0x0a, 0x94, 0x46, 0xb5, 0x94, 0xae, 0x5a, 0x1a, 0x10, 0xf2, 0x0b, 0x47, + 0xa3, 0xdf, 0xd9, 0x96, 0x1b, 0x29, 0x90, 0xbb, 0x00, 0x1e, 0xfd, 0x90, 0x1b, 0x83, 0xd0, 0x5f, + 0x80, 0xe0, 0x1a, 0x1e, 0x19, 0x7d, 0xe6, 0x76, 0x6c, 0xc1, 0x5a, 0xea, 0x30, 0x8a, 0xe4, 0x2e, + 0xc6, 0xac, 0xae, 0xdd, 0x33, 0xad, 0xbe, 0xf4, 0x22, 0x1f, 0xe6, 0x1c, 0x4c, 0xab, 0xc7, 0xce, + 0x39, 0xbb, 0xb6, 0xf9, 0x43, 0x26, 0x75, 0x1b, 0x45, 0x12, 0x1d, 0xca, 0x9e, 0xed, 0x19, 0x03, + 0xca, 0xba, 0xb6, 0xd3, 0x73, 0x6b, 0x05, 0x24, 0x8a, 0xe0, 0x38, 0x4d, 0xcf, 0xf0, 0x8c, 0x96, + 0xda, 0x49, 0x5c, 0x48, 0x04, 0xc7, 0xcf, 0x79, 0xc6, 0x1c, 0xd7, 0xb4, 0x2d, 0xbc, 0x8f, 0x19, + 0xaa, 0x40, 0x42, 0x20, 0xeb, 0xf2, 0xed, 0x61, 0x55, 0x5b, 0xcb, 0x52, 0x1c, 0xf3, 0xd0, 0x71, + 0xdf, 0xb6, 0x3d, 0xe6, 0xa0, 0x60, 0x25, 0xdc, 0x33, 0x84, 0x21, 0xdb, 0x50, 0xed, 0xb1, 0x9e, + 0xd9, 0x35, 0x3c, 0xd6, 0xbb, 0x6b, 0x0f, 0xc6, 0x43, 0xcb, 0xad, 0x95, 0xd1, 0x9a, 0x6b, 0xbe, + 0xca, 0xb7, 0xa3, 0x04, 0x34, 0xb1, 0x42, 0xff, 0x93, 0x06, 0x73, 0x31, 0x2a, 0x72, 0x1b, 0x72, + 0x6e, 0xd7, 0x1e, 0x31, 0xe9, 0xba, 0xcb, 0x93, 0xd8, 0x35, 0xda, 0x9c, 0x8a, 0x0a, 0x62, 0x7e, + 0x06, 0xcb, 0x18, 0x2a, 0x5b, 0xc1, 0x31, 0xb9, 0x05, 0x59, 0xef, 0x62, 0x24, 0xe2, 0xcb, 0x6c, + 0xf3, 0x89, 0x89, 0x8c, 0x3a, 0x17, 0x23, 0x46, 0x91, 0x54, 0x5f, 0x81, 0x1c, 0xb2, 0x25, 0x45, + 0xc8, 0xb6, 0x0f, 0x37, 0xf7, 0xab, 0x53, 0xdc, 0xd9, 0x69, 0xab, 0x7d, 0xf0, 0x16, 0xbd, 0xdb, + 0x42, 0xff, 0xce, 0x72, 0x72, 0x02, 0x90, 0x6f, 0x77, 0xe8, 0xce, 0xfe, 0xbd, 0xea, 0x94, 0x7e, + 0x0e, 0xb3, 0xca, 0xba, 0x64, 0x68, 0xbb, 0x0d, 0x79, 0x8c, 0x5e, 0xca, 0xc3, 0x6f, 0x44, 0xe3, + 0x8f, 0xa0, 0xde, 0x63, 0x9e, 0xc1, 0x6f, 0x88, 0x4a, 0x5a, 0xb2, 0x11, 0x0f, 0x75, 0x71, 0xeb, + 0x8d, 0xc7, 0x39, 0xfd, 0x1f, 0x19, 0x58, 0x48, 0xe1, 0x18, 0x4f, 0x09, 0x33, 0x41, 0x4a, 0x58, + 0x83, 0x39, 0xc7, 0xb6, 0xbd, 0x36, 0x73, 0xce, 0xcc, 0x2e, 0xdb, 0x0f, 0x54, 0x16, 0x47, 0x73, + 0xeb, 0xe4, 0x28, 0x64, 0x8f, 0x74, 0x22, 0x43, 0x44, 0x91, 0x3c, 0x11, 0xa0, 0x4b, 0x74, 0xcc, + 0x21, 0x7b, 0xcb, 0x32, 0xcf, 0xf7, 0x0d, 0xcb, 0x46, 0x4f, 0xc8, 0xd2, 0xe4, 0x04, 0xb7, 0xaa, + 0x5e, 0x10, 0x92, 0x44, 0x78, 0x09, 0x61, 0xc8, 0x33, 0x50, 0x70, 0x65, 0xcc, 0xc8, 0xa3, 0x06, + 0xaa, 0x81, 0x06, 0x04, 0x9e, 0x2a, 0x02, 0xf2, 0x1c, 0x14, 0xe5, 0x90, 0xfb, 0x44, 0x26, 0x95, + 0xd8, 0xa7, 0x20, 0x14, 0xca, 0xae, 0x38, 0x1c, 0x8f, 0xe1, 0x6e, 0xad, 0x88, 0x2b, 0x1a, 0x97, + 0xdd, 0x4b, 0xa3, 0x1d, 0x5a, 0x80, 0x41, 0x8a, 0x46, 0x78, 0xd4, 0x8f, 0x60, 0x3e, 0x41, 0x92, + 0x12, 0xc7, 0x9e, 0x0d, 0xc7, 0xb1, 0x52, 0xf3, 0xb1, 0xd0, 0xa5, 0x06, 0x8b, 0xc3, 0xe1, 0x6d, + 0x17, 0xca, 0xe1, 0x29, 0x8c, 0x43, 0x23, 0xc3, 0xba, 0x6b, 0x8f, 0x2d, 0x0f, 0x19, 0xf3, 0x38, + 0xa4, 0x10, 0x5c, 0xa7, 0xcc, 0x71, 0x6c, 0x47, 0x4c, 0x8b, 0x64, 0x10, 0xc2, 0xe8, 0x3f, 0xd5, + 0xa0, 0x20, 0xf5, 0x41, 0x9e, 0x82, 0x1c, 0x5f, 0xa8, 0xcc, 0xb2, 0x12, 0x51, 0x18, 0x15, 0x73, + 0x98, 0x01, 0x0d, 0xaf, 0xfb, 0x80, 0xf5, 0x24, 0x37, 0x05, 0x92, 0x57, 0x00, 0x0c, 0xcf, 0x73, + 0xcc, 0x93, 0xb1, 0xc7, 0x78, 0x46, 0xe1, 0x3c, 0xae, 0xfb, 0x3c, 0x64, 0xb9, 0x73, 0x76, 0xab, + 0xf1, 0x06, 0xbb, 0x38, 0xe2, 0xa7, 0xa1, 0x21, 0x72, 0xee, 0xeb, 0x59, 0xbe, 0x0d, 0x59, 0x82, + 0x3c, 0xdf, 0xc8, 0xb7, 0x4d, 0x09, 0xa5, 0xba, 0x70, 0xaa, 0x79, 0x65, 0x26, 0x99, 0xd7, 0x4d, + 0xa8, 0x28, 0x63, 0xe2, 0xb0, 0x2b, 0x0d, 0x31, 0x8a, 0x8c, 0x9d, 0x22, 0xf7, 0x70, 0xa7, 0xf8, + 0xb5, 0x9f, 0xcb, 0xa5, 0x33, 0x72, 0x8f, 0x32, 0x2d, 0x77, 0xc4, 0xba, 0x1e, 0xeb, 0x75, 0x94, + 0xd3, 0x63, 0xbe, 0x8b, 0xa1, 0xc9, 0xd3, 0x30, 0xeb, 0xa3, 0xb6, 0x2e, 0xf8, 0xe6, 0xd3, 0x28, + 0x5f, 0x0c, 0x4b, 0x56, 0xa1, 0x84, 0xd1, 0x1d, 0x93, 0x9b, 0xca, 0xdc, 0x61, 0x14, 0x3f, 0x68, + 0xd7, 0x1e, 0x8e, 0x06, 0xcc, 0x63, 0xbd, 0xd7, 0xed, 0x13, 0x57, 0xe5, 0x9e, 0x08, 0x92, 0xdb, + 0x0d, 0x2e, 0x42, 0x0a, 0xe1, 0x6c, 0x01, 0x82, 0xcb, 0x1d, 0xb0, 0x14, 0xe2, 0xe4, 0x51, 0x9c, + 0x38, 0x3a, 0x22, 0x37, 0xe6, 0x70, 0xcc, 0x41, 0x61, 0xb9, 0x11, 0xab, 0xff, 0x59, 0xe3, 0x0e, + 0xc1, 0x75, 0xc3, 0xd3, 0xba, 0xca, 0xca, 0x8b, 0x2a, 0x9e, 0x8b, 0xdb, 0x96, 0xf1, 0x7a, 0x11, + 0x72, 0x58, 0x4d, 0xaa, 0xe4, 0x8e, 0x40, 0x50, 0x79, 0x64, 0x52, 0x2a, 0x8f, 0x6c, 0x50, 0x79, + 0xac, 0xc1, 0xdc, 0xd0, 0x38, 0xe7, 0xbb, 0xf0, 0x72, 0x02, 0xb9, 0x8b, 0xf3, 0xc5, 0xd1, 0xa4, + 0x09, 0x8b, 0xae, 0x67, 0x0c, 0x18, 0xde, 0xa4, 0xdb, 0x79, 0xe0, 0x30, 0xf7, 0x81, 0x3d, 0x50, + 0x65, 0x4c, 0xea, 0x9c, 0xfe, 0xbb, 0x2c, 0x2c, 0x05, 0xe7, 0x88, 0x94, 0x18, 0x2f, 0x25, 0x4b, + 0x8c, 0x7a, 0x2c, 0x48, 0x87, 0xce, 0xfe, 0x6d, 0x99, 0xf1, 0x8d, 0x28, 0x33, 0xd2, 0xcc, 0xa5, + 0x92, 0x6e, 0x2e, 0x1b, 0xb0, 0x10, 0x98, 0x44, 0x60, 0x2d, 0xb3, 0x48, 0x9d, 0x36, 0xa5, 0x7f, + 0x92, 0x81, 0xeb, 0xfe, 0xc5, 0x0b, 0x4b, 0x8a, 0x58, 0xcc, 0x77, 0x93, 0x16, 0xb3, 0x92, 0xb4, + 0x18, 0xb1, 0xf0, 0x5b, 0xb3, 0xf9, 0x46, 0x55, 0xa7, 0x3d, 0xd5, 0x65, 0x08, 0x97, 0x96, 0xb5, + 0x5d, 0x1d, 0x8a, 0x9e, 0xd1, 0xe7, 0xc5, 0x8f, 0x48, 0xa3, 0x33, 0xd4, 0x87, 0x49, 0x33, 0x5e, + 0xc1, 0x05, 0xdb, 0xa9, 0xaa, 0x22, 0x51, 0xc3, 0x7d, 0x00, 0x8b, 0xc1, 0x2e, 0x47, 0x4d, 0x7f, + 0x9f, 0x26, 0xe4, 0x31, 0x54, 0xaa, 0x64, 0x9d, 0x16, 0x67, 0x8e, 0x9a, 0xa2, 0x08, 0x96, 0x94, + 0x5f, 0x6a, 0xff, 0x57, 0xc2, 0x41, 0x5b, 0x32, 0xf4, 0x73, 0xb1, 0x16, 0xca, 0xc5, 0x04, 0xb2, + 0x1e, 0x6f, 0x5a, 0xa7, 0xf1, 0xd0, 0x38, 0xd6, 0x3f, 0xd4, 0x42, 0xa1, 0x32, 0x62, 0xc4, 0x58, + 0x83, 0x0a, 0xbd, 0xf8, 0x35, 0xa8, 0x00, 0xaf, 0x8a, 0xfd, 0xd9, 0x94, 0xd8, 0x9f, 0x0b, 0x62, + 0xbf, 0x0e, 0x65, 0xe1, 0xb5, 0x62, 0x3b, 0x69, 0x96, 0x11, 0xdc, 0x24, 0x37, 0x2e, 0x4c, 0x76, + 0xe3, 0x53, 0x78, 0x3c, 0x71, 0x0e, 0x79, 0x11, 0x3c, 0x8d, 0xfa, 0xbb, 0x89, 0x1b, 0x0f, 0x10, + 0x5f, 0x4a, 0xe5, 0xb7, 0xa1, 0xa8, 0xb6, 0x41, 0xad, 0x5e, 0xf8, 0xd9, 0x11, 0xc7, 0xe9, 0x9d, + 0xaf, 0xfe, 0x23, 0x0d, 0xae, 0xc5, 0x64, 0x0c, 0x99, 0xcb, 0x7a, 0x5c, 0xca, 0x52, 0x73, 0x3e, + 0xa8, 0x6e, 0xe5, 0xcc, 0x57, 0x15, 0xfc, 0x2f, 0x1a, 0xcc, 0xc5, 0x26, 0x53, 0xaa, 0x1a, 0x2d, + 0xb5, 0xaa, 0x89, 0x54, 0x23, 0xd3, 0xf1, 0x6a, 0x24, 0x51, 0xd1, 0x64, 0xd2, 0x2a, 0x9a, 0x58, + 0x65, 0x94, 0x4d, 0x56, 0x46, 0x29, 0x55, 0x4d, 0x2e, 0xb5, 0xaa, 0xd1, 0xf7, 0x21, 0x87, 0x75, + 0x19, 0x69, 0x41, 0xc5, 0x61, 0xae, 0x3d, 0x76, 0xba, 0xac, 0x1d, 0x2a, 0x8e, 0x83, 0x28, 0x2d, + 0x5e, 0xe0, 0xce, 0x6e, 0x35, 0x68, 0x98, 0x8c, 0x46, 0x57, 0xe9, 0xfb, 0x50, 0x3e, 0x1c, 0xbb, + 0x41, 0x0f, 0xf8, 0x2a, 0x54, 0xb0, 0x0a, 0x77, 0xb7, 0x2e, 0x3a, 0xf2, 0x99, 0x2b, 0xb3, 0x36, + 0x1b, 0xd2, 0x32, 0xa7, 0x6e, 0x71, 0x0a, 0xca, 0x0c, 0xd7, 0xb6, 0x68, 0x94, 0x5c, 0x6f, 0x43, + 0x95, 0x53, 0xa0, 0xb0, 0xca, 0xa7, 0x9e, 0xf7, 0xfb, 0x4a, 0xee, 0x84, 0xe5, 0xad, 0xc7, 0x3e, + 0xfa, 0x74, 0x65, 0xea, 0x9f, 0x9f, 0xae, 0x54, 0x0e, 0x1d, 0x66, 0x0c, 0x06, 0x76, 0x57, 0x50, + 0xab, 0x86, 0xb2, 0x0a, 0x19, 0xb3, 0x27, 0x0a, 0xf5, 0x32, 0xe5, 0x43, 0x7d, 0x4f, 0x30, 0x15, + 0x07, 0x90, 0x4c, 0xef, 0x40, 0xe1, 0x04, 0x0b, 0xfc, 0x2f, 0x7c, 0x72, 0x45, 0xaf, 0xdf, 0x04, + 0x90, 0xaf, 0x5d, 0xfc, 0x86, 0x97, 0x22, 0x5d, 0x6f, 0x59, 0x89, 0xa1, 0xbf, 0x0a, 0x33, 0xbb, + 0xa6, 0x75, 0xda, 0x1e, 0x98, 0x5d, 0xde, 0x94, 0xe7, 0x06, 0xa6, 0x75, 0xaa, 0xf6, 0xba, 0x9e, + 0xdc, 0x8b, 0xef, 0xd1, 0xe0, 0x0b, 0xa8, 0xa0, 0xd4, 0x7f, 0xa2, 0x01, 0xe1, 0x48, 0x65, 0x8e, + 0x41, 0x61, 0x29, 0xc2, 0x88, 0x16, 0x0e, 0x23, 0x35, 0x28, 0xf4, 0x1d, 0x7b, 0x3c, 0xda, 0x52, + 0xe1, 0x45, 0x81, 0x9c, 0x7e, 0x80, 0x8f, 0x5d, 0xa2, 0x7f, 0x10, 0xc0, 0x17, 0x0d, 0x3b, 0xfa, + 0xcf, 0xb8, 0xf7, 0x05, 0x42, 0xb4, 0xc7, 0xc3, 0xa1, 0xe1, 0x5c, 0xfc, 0x6f, 0x64, 0xf9, 0xad, + 0x06, 0x0b, 0x11, 0x85, 0x04, 0x91, 0x8a, 0xb9, 0x9e, 0x39, 0xe4, 0x49, 0x0c, 0x25, 0x29, 0xd2, + 0x00, 0x11, 0x6d, 0x23, 0x45, 0xe7, 0x11, 0x6a, 0x23, 0x9f, 0x86, 0x59, 0xb4, 0xbf, 0xb6, 0x4f, + 0x22, 0x44, 0x8b, 0x61, 0x49, 0x23, 0x08, 0x1b, 0x59, 0xbc, 0xc1, 0xc5, 0x48, 0x13, 0x99, 0x08, + 0x19, 0xdf, 0x81, 0x32, 0x35, 0xde, 0x7f, 0xcd, 0x74, 0x3d, 0xbb, 0xef, 0x18, 0x43, 0x6e, 0x24, + 0x27, 0xe3, 0xee, 0x29, 0xf3, 0x64, 0x98, 0x90, 0x10, 0x3f, 0x7b, 0x37, 0x24, 0x99, 0x00, 0xf4, + 0xd7, 0xa1, 0xa8, 0xda, 0xb0, 0x94, 0xce, 0xfa, 0xb9, 0x68, 0x67, 0xbd, 0x14, 0xed, 0xe6, 0xdf, + 0xdc, 0xe5, 0xed, 0xb3, 0xd9, 0x55, 0xf1, 0xf3, 0x97, 0x1a, 0x94, 0x42, 0x22, 0x92, 0x2d, 0x98, + 0x1f, 0x18, 0x1e, 0xb3, 0xba, 0x17, 0xc7, 0x0f, 0x94, 0x78, 0xd2, 0x2a, 0x83, 0x1e, 0x3d, 0x2c, + 0x3b, 0xad, 0x4a, 0xfa, 0xe0, 0x34, 0xff, 0x07, 0x79, 0x97, 0x39, 0xa6, 0x74, 0xc8, 0x70, 0xc8, + 0xf5, 0xbb, 0x47, 0x49, 0xc0, 0x0f, 0x2e, 0x1c, 0x5c, 0x2a, 0x56, 0x42, 0xfa, 0xdf, 0xa2, 0xd6, + 0x2d, 0x0d, 0x2b, 0xd9, 0xf4, 0x5f, 0x71, 0x5b, 0xd3, 0xa9, 0xb7, 0x15, 0xc8, 0x97, 0xb9, 0x4a, + 0xbe, 0x2a, 0x64, 0x46, 0x77, 0xee, 0xc8, 0x96, 0x99, 0x0f, 0x05, 0xe6, 0x45, 0x19, 0x3f, 0xf9, + 0x50, 0x60, 0x36, 0x64, 0x9f, 0xc8, 0x87, 0x88, 0x79, 0x71, 0x43, 0x36, 0x84, 0x7c, 0xa8, 0xbf, + 0x0d, 0xf5, 0x34, 0x3f, 0x91, 0x26, 0x7a, 0x07, 0x66, 0x5c, 0x44, 0x99, 0x2c, 0x19, 0x02, 0x52, + 0xd6, 0x05, 0xd4, 0xfa, 0xaf, 0x34, 0xa8, 0x44, 0x2e, 0x36, 0x92, 0x3b, 0x73, 0x32, 0x77, 0x96, + 0x41, 0xb3, 0x50, 0x19, 0x19, 0xaa, 0x59, 0x1c, 0xba, 0x8f, 0xfa, 0xd6, 0xa8, 0x76, 0x9f, 0x43, + 0xae, 0x7c, 0xd5, 0xd7, 0x5c, 0x0e, 0x9d, 0xe0, 0xe1, 0x8a, 0x54, 0x3b, 0xe1, 0x50, 0x4f, 0x1e, + 0x4c, 0xeb, 0xe1, 0x1b, 0x85, 0xf8, 0x80, 0x50, 0x40, 0xde, 0xea, 0xeb, 0x00, 0x81, 0xec, 0xa9, + 0x69, 0xf5, 0xb0, 0x84, 0xcd, 0x51, 0x1c, 0xeb, 0x4c, 0x3c, 0x78, 0x4b, 0xc1, 0xb7, 0x0d, 0xcf, + 0xe0, 0xf5, 0xa9, 0xc3, 0xdc, 0xf1, 0xc0, 0xeb, 0x04, 0xa9, 0x3d, 0x84, 0xe1, 0xb5, 0x9d, 0x80, + 0xa4, 0xd9, 0xd4, 0x53, 0x7d, 0x08, 0x29, 0xa8, 0xa4, 0xe4, 0x51, 0x70, 0x3e, 0x31, 0xcb, 0xcd, + 0x64, 0x60, 0x9c, 0xb0, 0x41, 0xa8, 0xce, 0x0a, 0x10, 0x5c, 0x0e, 0x04, 0x8e, 0x42, 0xd5, 0x44, + 0x08, 0x43, 0xd6, 0x61, 0xda, 0x53, 0xa6, 0xb1, 0x32, 0x59, 0x86, 0x43, 0xdb, 0xb4, 0x3c, 0x3a, + 0xed, 0xb9, 0xdc, 0x87, 0x96, 0xd2, 0xa7, 0xf1, 0x32, 0x4c, 0x29, 0x44, 0x85, 0xe2, 0x98, 0x5b, + 0xc7, 0x99, 0x31, 0xc0, 0x8d, 0x35, 0xca, 0x87, 0x3c, 0x3f, 0xb3, 0x73, 0x36, 0x1c, 0x0d, 0x0c, + 0xa7, 0x23, 0x5f, 0x28, 0x33, 0xf8, 0xd1, 0x2a, 0x8e, 0x26, 0xcf, 0x40, 0x55, 0xa1, 0xd4, 0x17, + 0x0b, 0x69, 0x9c, 0x09, 0xbc, 0xde, 0x86, 0x05, 0xfc, 0xf8, 0xb0, 0x63, 0xb9, 0x9e, 0x61, 0x79, + 0x97, 0x47, 0x65, 0x3f, 0xca, 0xca, 0x48, 0x13, 0x89, 0xb2, 0xc2, 0x37, 0x31, 0xca, 0x9e, 0xc3, + 0x62, 0x94, 0xa9, 0x34, 0xe1, 0x86, 0xef, 0x53, 0xc2, 0x7e, 0x83, 0xb0, 0x23, 0x29, 0xdb, 0x38, + 0xeb, 0x3b, 0xd6, 0xc3, 0x3f, 0xeb, 0xfe, 0x58, 0x83, 0x4a, 0x84, 0x17, 0xb9, 0x03, 0x79, 0xbc, + 0xb6, 0xa4, 0xcf, 0x24, 0xdf, 0xab, 0xe4, 0xd7, 0x22, 0xb9, 0x20, 0x5a, 0x4c, 0x6a, 0x32, 0x18, + 0x92, 0x15, 0x28, 0x8d, 0x1c, 0x7b, 0x78, 0x2c, 0xb9, 0x8a, 0xb7, 0x5d, 0xe0, 0xa8, 0x5d, 0xc4, + 0xe8, 0xbf, 0xcf, 0xc0, 0x3c, 0x1e, 0x9f, 0x1a, 0x56, 0x9f, 0x3d, 0x12, 0x8d, 0x62, 0x2b, 0xe7, + 0xb1, 0x91, 0xbc, 0x46, 0x1c, 0x47, 0xbf, 0x33, 0x16, 0xe2, 0xdf, 0x19, 0x43, 0xed, 0x6f, 0xf1, + 0x92, 0xf6, 0x77, 0xe6, 0xca, 0xf6, 0x17, 0xd2, 0xda, 0xdf, 0x50, 0xd3, 0x59, 0x8a, 0x36, 0x9d, + 0xe1, 0xc6, 0xb8, 0x1c, 0x6b, 0x8c, 0x55, 0x43, 0x5a, 0x99, 0xd8, 0x90, 0xce, 0x7e, 0xa1, 0x86, + 0x74, 0xee, 0xa1, 0xdf, 0x31, 0x78, 0x7e, 0x97, 0xa6, 0xef, 0xd6, 0xaa, 0xe2, 0xcc, 0x3e, 0x42, + 0x77, 0x81, 0x84, 0x2f, 0x4c, 0x5a, 0xeb, 0xb3, 0x31, 0x6b, 0x5d, 0x08, 0x92, 0xa4, 0x39, 0x64, + 0x5f, 0xd9, 0x54, 0x3f, 0x80, 0x62, 0x4b, 0x4a, 0xf0, 0xe8, 0x8d, 0xf4, 0x49, 0x28, 0xf3, 0x30, + 0xe2, 0x7a, 0xc6, 0x70, 0x74, 0x3c, 0x14, 0x56, 0x9a, 0xa1, 0x25, 0x1f, 0xb7, 0xe7, 0xea, 0x9b, + 0x90, 0x6f, 0x1b, 0xbc, 0x45, 0x48, 0x10, 0x4f, 0x27, 0x88, 0x83, 0x5d, 0xb4, 0xd0, 0x2e, 0xfa, + 0xc7, 0x1a, 0x40, 0xa0, 0x8b, 0xaf, 0x72, 0x8a, 0x75, 0x28, 0xb8, 0x28, 0x8c, 0x2a, 0x07, 0xe6, + 0x02, 0xf5, 0x21, 0x5e, 0xd2, 0x2b, 0xaa, 0x2b, 0xbd, 0x90, 0xbc, 0x18, 0xbe, 0xf1, 0x6c, 0x2c, + 0x85, 0x2b, 0xc5, 0x4b, 0xae, 0x01, 0xe5, 0x33, 0xef, 0xc2, 0x5c, 0xac, 0xbb, 0x20, 0x65, 0x28, + 0xee, 0x1f, 0x1c, 0xb7, 0x28, 0x3d, 0xa0, 0xd5, 0x29, 0xb2, 0x00, 0x73, 0x7b, 0x9b, 0xef, 0x1c, + 0xef, 0xee, 0x1c, 0xb5, 0x8e, 0x3b, 0x74, 0xf3, 0x6e, 0xab, 0x5d, 0xd5, 0x38, 0x12, 0xc7, 0xc7, + 0x9d, 0x83, 0x83, 0xe3, 0xdd, 0x4d, 0x7a, 0xaf, 0x55, 0x9d, 0x26, 0xf3, 0x50, 0x79, 0x6b, 0xff, + 0x8d, 0xfd, 0x83, 0xb7, 0xf7, 0xe5, 0xe2, 0x4c, 0xf3, 0xe7, 0x1a, 0xe4, 0x39, 0x7b, 0xe6, 0x90, + 0xef, 0xc1, 0x8c, 0xdf, 0xa4, 0x90, 0x6b, 0x91, 0xd6, 0x26, 0xdc, 0xb8, 0xd4, 0x1f, 0x8b, 0x4c, + 0x29, 0xe3, 0xd4, 0xa7, 0xc8, 0x26, 0x94, 0x7c, 0xe2, 0xa3, 0xe6, 0x97, 0x61, 0xd1, 0xfc, 0xb7, + 0x06, 0x55, 0x69, 0x97, 0xf7, 0x98, 0xc5, 0x1c, 0xc3, 0xb3, 0x7d, 0xc1, 0xb0, 0x5f, 0x89, 0x71, + 0x0d, 0x37, 0x3f, 0x93, 0x05, 0xdb, 0x01, 0xb8, 0xc7, 0x3c, 0x55, 0x2b, 0x5e, 0x4f, 0x4f, 0x8e, + 0x82, 0xc7, 0x8d, 0x09, 0x99, 0x53, 0xb1, 0xba, 0x07, 0x10, 0x38, 0x26, 0x09, 0x72, 0x7d, 0x22, + 0xbc, 0xd6, 0xaf, 0xa7, 0xce, 0xf9, 0x27, 0xfd, 0x4d, 0x16, 0x0a, 0x7c, 0xc2, 0x64, 0x0e, 0x79, + 0x0d, 0x2a, 0xdf, 0x37, 0xad, 0x9e, 0xff, 0x67, 0x03, 0x72, 0x2d, 0xed, 0x3f, 0x0e, 0x82, 0x6d, + 0x7d, 0xf2, 0xdf, 0x1f, 0xf0, 0x0a, 0xca, 0xea, 0xf3, 0x65, 0x97, 0x59, 0x1e, 0x99, 0xf0, 0xcd, + 0xbc, 0xfe, 0x78, 0x02, 0xef, 0xb3, 0x68, 0x41, 0x29, 0xf4, 0x3d, 0x3e, 0xac, 0xad, 0xc4, 0x57, + 0xfa, 0xcb, 0xd8, 0xdc, 0x03, 0x08, 0x9e, 0xa2, 0xc8, 0x25, 0x0f, 0xeb, 0xf5, 0xeb, 0xa9, 0x73, + 0x3e, 0xa3, 0x37, 0xd4, 0x91, 0xc4, 0x9b, 0xd6, 0xa5, 0xac, 0x9e, 0x48, 0x7d, 0x57, 0x0b, 0x31, + 0x3b, 0x82, 0xb9, 0xd8, 0xb3, 0x0b, 0xb9, 0xea, 0x05, 0xb7, 0xbe, 0x3a, 0x99, 0xc0, 0xe7, 0xfb, + 0x83, 0xd0, 0xc3, 0x9b, 0x7a, 0xce, 0xb9, 0x9a, 0xb3, 0x3e, 0x89, 0x20, 0x2c, 0x73, 0xf3, 0xaf, + 0x59, 0xa8, 0xb6, 0x3d, 0x87, 0x19, 0x43, 0xd3, 0xea, 0x2b, 0x93, 0x79, 0x05, 0xf2, 0x32, 0xf1, + 0x3d, 0xec, 0x15, 0x6f, 0x68, 0xdc, 0x1f, 0x1e, 0xc9, 0xdd, 0x6c, 0x68, 0x64, 0xef, 0x11, 0xde, + 0xce, 0x86, 0x46, 0xde, 0xf9, 0x7a, 0xee, 0x67, 0x43, 0x23, 0xef, 0x7e, 0x7d, 0x37, 0xb4, 0xa1, + 0x91, 0x43, 0x98, 0x97, 0xb1, 0xe2, 0x91, 0x44, 0x87, 0x0d, 0x8d, 0x1c, 0xc1, 0x42, 0x98, 0xa3, + 0x2c, 0x21, 0xc9, 0x8d, 0xe8, 0xba, 0x68, 0x91, 0x1c, 0xd2, 0x70, 0x5a, 0xb5, 0xcb, 0xf9, 0x36, + 0xff, 0xa0, 0x41, 0x41, 0x45, 0xc2, 0xe3, 0xd4, 0x6e, 0x55, 0xbf, 0xac, 0x87, 0x93, 0x1b, 0x3d, + 0x75, 0x29, 0xcd, 0x23, 0x8f, 0x96, 0x5b, 0xb5, 0x8f, 0x3e, 0x5b, 0xd6, 0x3e, 0xfe, 0x6c, 0x59, + 0xfb, 0xd7, 0x67, 0xcb, 0xda, 0x2f, 0x3e, 0x5f, 0x9e, 0xfa, 0xf8, 0xf3, 0xe5, 0xa9, 0x4f, 0x3e, + 0x5f, 0x9e, 0x3a, 0xc9, 0xe3, 0xbf, 0xe9, 0x5e, 0xf8, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x3c, + 0x55, 0xe8, 0xef, 0xce, 0x27, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -5610,6 +5663,16 @@ func (m *SearchTagsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.StaleValuesThreshold != 0 { + i = encodeVarintTempo(dAtA, i, uint64(m.StaleValuesThreshold)) + i-- + dAtA[i] = 0x30 + } + if m.MaxTagsPerScope != 0 { + i = encodeVarintTempo(dAtA, i, uint64(m.MaxTagsPerScope)) + i-- + dAtA[i] = 0x28 + } if m.End != 0 { i = encodeVarintTempo(dAtA, i, uint64(m.End)) i-- @@ -5657,6 +5720,16 @@ func (m *SearchTagsBlockRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) _ = i var l int _ = l + if m.StaleValueThreshold != 0 { + i = encodeVarintTempo(dAtA, i, uint64(m.StaleValueThreshold)) + i-- + dAtA[i] = 0x70 + } + if m.MaxTagsPerScope != 0 { + i = encodeVarintTempo(dAtA, i, uint64(m.MaxTagsPerScope)) + i-- + dAtA[i] = 0x68 + } if len(m.DedicatedColumns) > 0 { for iNdEx := len(m.DedicatedColumns) - 1; iNdEx >= 0; iNdEx-- { { @@ -6003,6 +6076,16 @@ func (m *SearchTagValuesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) _ = i var l int _ = l + if m.StaleValueThreshold != 0 { + i = encodeVarintTempo(dAtA, i, uint64(m.StaleValueThreshold)) + i-- + dAtA[i] = 0x38 + } + if m.MaxTagValues != 0 { + i = encodeVarintTempo(dAtA, i, uint64(m.MaxTagValues)) + i-- + dAtA[i] = 0x30 + } if m.End != 0 { i = encodeVarintTempo(dAtA, i, uint64(m.End)) i-- @@ -7872,6 +7955,12 @@ func (m *SearchTagsRequest) Size() (n int) { if m.End != 0 { n += 1 + sovTempo(uint64(m.End)) } + if m.MaxTagsPerScope != 0 { + n += 1 + sovTempo(uint64(m.MaxTagsPerScope)) + } + if m.StaleValuesThreshold != 0 { + n += 1 + sovTempo(uint64(m.StaleValuesThreshold)) + } return n } @@ -7925,6 +8014,12 @@ func (m *SearchTagsBlockRequest) Size() (n int) { n += 1 + l + sovTempo(uint64(l)) } } + if m.MaxTagsPerScope != 0 { + n += 1 + sovTempo(uint64(m.MaxTagsPerScope)) + } + if m.StaleValueThreshold != 0 { + n += 1 + sovTempo(uint64(m.StaleValueThreshold)) + } return n } @@ -8058,6 +8153,12 @@ func (m *SearchTagValuesRequest) Size() (n int) { if m.End != 0 { n += 1 + sovTempo(uint64(m.End)) } + if m.MaxTagValues != 0 { + n += 1 + sovTempo(uint64(m.MaxTagValues)) + } + if m.StaleValueThreshold != 0 { + n += 1 + sovTempo(uint64(m.StaleValueThreshold)) + } return n } @@ -11182,6 +11283,44 @@ func (m *SearchTagsRequest) Unmarshal(dAtA []byte) error { break } } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxTagsPerScope", wireType) + } + m.MaxTagsPerScope = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTempo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxTagsPerScope |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StaleValuesThreshold", wireType) + } + m.StaleValuesThreshold = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTempo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StaleValuesThreshold |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipTempo(dAtA[iNdEx:]) @@ -11544,6 +11683,44 @@ func (m *SearchTagsBlockRequest) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxTagsPerScope", wireType) + } + m.MaxTagsPerScope = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTempo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxTagsPerScope |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StaleValueThreshold", wireType) + } + m.StaleValueThreshold = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTempo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StaleValueThreshold |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipTempo(dAtA[iNdEx:]) @@ -12410,6 +12587,44 @@ func (m *SearchTagValuesRequest) Unmarshal(dAtA []byte) error { break } } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxTagValues", wireType) + } + m.MaxTagValues = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTempo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxTagValues |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StaleValueThreshold", wireType) + } + m.StaleValueThreshold = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTempo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StaleValueThreshold |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipTempo(dAtA[iNdEx:]) diff --git a/pkg/tempopb/tempo.proto b/pkg/tempopb/tempo.proto index 53dba1d68e4..6b0939f64b1 100644 --- a/pkg/tempopb/tempo.proto +++ b/pkg/tempopb/tempo.proto @@ -163,6 +163,8 @@ message SearchTagsRequest { string query = 2; uint32 start = 3; uint32 end = 4; + uint32 maxTagsPerScope = 5; + uint32 staleValuesThreshold =6; } // SearchTagsBlockRequest takes SearchTagsRequest parameters as well as all information necessary @@ -180,6 +182,8 @@ message SearchTagsBlockRequest { uint64 size = 10; // total size of data file uint32 footerSize = 11; // size of file footer (parquet) repeated DedicatedColumn dedicatedColumns = 12; + uint32 maxTagsPerScope = 13; // Limit of tags per scope + uint32 staleValueThreshold = 14; // Limit of stale values } message SearchTagValuesBlockRequest { @@ -218,6 +222,8 @@ message SearchTagValuesRequest { string query = 2; // TraceQL query uint32 start = 4; uint32 end = 5; + uint32 maxTagValues = 6; + uint32 staleValueThreshold = 7; // Limit of stale values } message SearchTagValuesResponse { diff --git a/pkg/tempopb/utils.go b/pkg/tempopb/utils.go new file mode 100644 index 00000000000..2133f97540d --- /dev/null +++ b/pkg/tempopb/utils.go @@ -0,0 +1,19 @@ +package tempopb + +import v1 "github.com/grafana/tempo/pkg/tempopb/common/v1" + +func MakeKeyValueString(key, value string) v1.KeyValue { + return v1.KeyValue{ + Key: key, + Value: &v1.AnyValue{ + Value: &v1.AnyValue_StringValue{ + StringValue: value, + }, + }, + } +} + +func MakeKeyValueStringPtr(key, value string) *v1.KeyValue { + kv := MakeKeyValueString(key, value) + return &kv +} diff --git a/pkg/traceql/ast.go b/pkg/traceql/ast.go index b743260920c..8a824ba3302 100644 --- a/pkg/traceql/ast.go +++ b/pkg/traceql/ast.go @@ -73,6 +73,49 @@ func (r *RootExpr) withHints(h *Hints) *RootExpr { return r } +// IsNoop detects trival noop queries like {false} which never return +// results and can be used to exit early. +func (r *RootExpr) IsNoop() bool { + isNoopFilter := func(x any) bool { + f, ok := x.(*SpansetFilter) + if !ok { + return false + } + + if f.Expression.referencesSpan() { + return false + } + + // Else check for static evaluation to false + v, _ := f.Expression.execute(nil) + return v.Equals(&StaticFalse) + } + + // Any spanset filter that references the span or something other + // than static false means the expression isn't noop. + // This checks one layer deep which covers most expressions. + for _, e := range r.Pipeline.Elements { + switch x := e.(type) { + case SpansetOperation: + if !isNoopFilter(x.LHS) { + return false + } + if !isNoopFilter(x.RHS) { + return false + } + case *SpansetFilter: + if !isNoopFilter(x) { + return false + } + default: + // Lots of other expressions here which aren't checked + // for noops yet. + return false + } + } + return true +} + // ********************** // Pipeline // ********************** @@ -113,12 +156,37 @@ func (p Pipeline) impliedType() StaticType { } func (p Pipeline) extractConditions(req *FetchSpansRequest) { + forceSecondPass := false + for _, element := range p.Elements { - element.extractConditions(req) + if forceSecondPass { + extractToSecondPass(req, element) + } else { + element.extractConditions(req) + } + + // If we just processed a select operation, + // then switch all remaining elements to the second pass. + if _, ok := element.(SelectOperation); ok { + forceSecondPass = true + } } - // TODO this needs to be fine-tuned a bit, e.g. { .foo = "bar" } | by(.namespace), AllConditions can still be true - if len(p.Elements) > 1 { - req.AllConditions = false +} + +func extractToSecondPass(req *FetchSpansRequest, element pipelineElement) { + req2 := &FetchSpansRequest{} + element.extractConditions(req2) + + // Copy all to second pass, except if there is already an OpNone then it suffices for all cases. + for _, c := range req2.Conditions { + if !req.HasAttributeWithOp(c.Attribute, OpNone) { + req.SecondPassConditions = append(req.SecondPassConditions, c) + } + } + for _, c := range req2.SecondPassConditions { + if !req.HasAttributeWithOp(c.Attribute, OpNone) { + req.SecondPassConditions = append(req.SecondPassConditions, c) + } } } @@ -154,6 +222,7 @@ func newGroupOperation(e FieldExpression) GroupOperation { func (o GroupOperation) extractConditions(request *FetchSpansRequest) { o.Expression.extractConditions(request) + request.AllConditions = false } type CoalesceOperation struct{} @@ -222,7 +291,6 @@ func (o ScalarOperation) impliedType() StaticType { func (o ScalarOperation) extractConditions(request *FetchSpansRequest) { o.LHS.extractConditions(request) o.RHS.extractConditions(request) - request.AllConditions = false } type Aggregate struct { @@ -249,6 +317,7 @@ func (a Aggregate) impliedType() StaticType { } func (a Aggregate) extractConditions(request *FetchSpansRequest) { + request.AllConditions = false if a.e != nil { a.e.extractConditions(request) } @@ -378,7 +447,6 @@ func (ScalarFilter) __spansetExpression() {} func (f ScalarFilter) extractConditions(request *FetchSpansRequest) { f.lhs.extractConditions(request) f.rhs.extractConditions(request) - request.AllConditions = false } // ********************** @@ -1111,33 +1179,36 @@ func (a *MetricsAggregate) init(q *tempopb.QueryRangeRequest, mode AggregateMode case metricsAggregateCountOverTime: innerAgg = func() VectorAggregator { return NewCountOverTimeAggregator() } a.simpleAggregationOp = sumAggregation + a.exemplarFn = exemplarNaN case metricsAggregateMinOverTime: innerAgg = func() VectorAggregator { return NewOverTimeAggregator(a.attr, minAggregation) } a.simpleAggregationOp = minAggregation + a.exemplarFn = exemplarFnFor(a.attr) case metricsAggregateMaxOverTime: innerAgg = func() VectorAggregator { return NewOverTimeAggregator(a.attr, maxAggregation) } a.simpleAggregationOp = maxAggregation + a.exemplarFn = exemplarFnFor(a.attr) case metricsAggregateRate: innerAgg = func() VectorAggregator { return NewRateAggregator(1.0 / time.Duration(q.Step).Seconds()) } a.simpleAggregationOp = sumAggregation + a.exemplarFn = exemplarNaN - case metricsAggregateHistogramOverTime, metricsAggregateQuantileOverTime: - // Histograms and quantiles are implemented as count_over_time() by(2^log2(attr)) for now - // I.e. a duration of 500ms will be in __bucket==0.512s + case metricsAggregateHistogramOverTime: innerAgg = func() VectorAggregator { return NewCountOverTimeAggregator() } + byFunc = bucketizeFnFor(a.attr) byFuncLabel = internalLabelBucket a.simpleAggregationOp = sumAggregation - switch a.attr { - case IntrinsicDurationAttribute: - // Optimal implementation for duration attribute - byFunc = a.bucketizeSpanDuration - default: - // Basic implementation for all other attributes - byFunc = a.bucketizeAttribute - } + a.exemplarFn = exemplarNaN // Histogram final series are counts so exemplars are placeholders + + case metricsAggregateQuantileOverTime: + innerAgg = func() VectorAggregator { return NewCountOverTimeAggregator() } + byFunc = bucketizeFnFor(a.attr) + byFuncLabel = internalLabelBucket + a.simpleAggregationOp = sumAggregation + a.exemplarFn = exemplarFnFor(a.attr) } switch mode { @@ -1153,10 +1224,20 @@ func (a *MetricsAggregate) init(q *tempopb.QueryRangeRequest, mode AggregateMode a.agg = NewGroupingAggregator(a.op.String(), func() RangeAggregator { return NewStepAggregator(q.Start, q.End, q.Step, innerAgg) }, a.by, byFunc, byFuncLabel) - a.exemplarFn = exemplarFnFor(a.attr) } -func (a *MetricsAggregate) bucketizeSpanDuration(s Span) (Static, bool) { +func bucketizeFnFor(attr Attribute) func(Span) (Static, bool) { + switch attr { + case IntrinsicDurationAttribute: + // Optimal implementation for duration attribute + return bucketizeDuration + default: + // Basic implementation for all other attributes + return bucketizeAttribute(attr) + } +} + +func bucketizeDuration(s Span) (Static, bool) { d := s.DurationNanos() if d < 2 { return NewStaticNil(), false @@ -1165,26 +1246,30 @@ func (a *MetricsAggregate) bucketizeSpanDuration(s Span) (Static, bool) { return NewStaticFloat(Log2Bucketize(d) / float64(time.Second)), true } -func (a *MetricsAggregate) bucketizeAttribute(s Span) (Static, bool) { - f, t := FloatizeAttribute(s, a.attr) - - switch t { - case TypeInt: - if f < 2 { - return NewStaticNil(), false - } - // Bucket is the value rounded up to the nearest power of 2 - return NewStaticFloat(Log2Bucketize(uint64(f))), true - case TypeDuration: - if f < 2 { +// exemplarAttribute captures a closure around the attribute so it doesn't have to be passed along with every span. +// should be more efficient. +func bucketizeAttribute(a Attribute) func(Span) (Static, bool) { + return func(s Span) (Static, bool) { + f, t := FloatizeAttribute(s, a) + + switch t { + case TypeInt: + if f < 2 { + return NewStaticNil(), false + } + // Bucket is the value rounded up to the nearest power of 2 + return NewStaticFloat(Log2Bucketize(uint64(f))), true + case TypeDuration: + if f < 2 { + return NewStaticNil(), false + } + // Bucket is log2(nanos) converted to float seconds + return NewStaticFloat(Log2Bucketize(uint64(f)) / float64(time.Second)), true + default: + // TODO(mdisibio) - Add support for floats, we need to map them into buckets. + // Because of the range of floats, we need a native histogram approach. return NewStaticNil(), false } - // Bucket is log2(nanos) converted to float seconds - return NewStaticFloat(Log2Bucketize(uint64(f)) / float64(time.Second)), true - default: - // TODO(mdisibio) - Add support for floats, we need to map them into buckets. - // Because of the range of floats, we need a native histogram approach. - return NewStaticNil(), false } } diff --git a/pkg/traceql/ast_conditions_test.go b/pkg/traceql/ast_conditions_test.go index 1cd7ddf29bb..35f35bb330a 100644 --- a/pkg/traceql/ast_conditions_test.go +++ b/pkg/traceql/ast_conditions_test.go @@ -147,6 +147,21 @@ func TestScalarFilter_extractConditions(t *testing.T) { }, allConditions: false, }, + { + query: `({ span.http.status_code = 200 } | count()) > ({ span.http.status_code = 500 } | count())`, + conditions: []Condition{ + newCondition(NewScopedAttribute(AttributeScopeSpan, false, "http.status_code"), OpEqual, NewStaticInt(200)), + newCondition(NewScopedAttribute(AttributeScopeSpan, false, "http.status_code"), OpEqual, NewStaticInt(500)), + }, + allConditions: false, + }, + { + query: `{ .foo = "a" } | 3 > 2`, + conditions: []Condition{ + newCondition(NewAttribute("foo"), OpEqual, NewStaticString("a")), + }, + allConditions: true, + }, } for _, tt := range tests { t.Run(tt.query, func(t *testing.T) { @@ -230,7 +245,7 @@ func TestSelect_extractConditions(t *testing.T) { secondPassConditions: []Condition{ newCondition(NewScopedAttribute(AttributeScopeResource, false, "service.name"), OpNone), }, - allConditions: false, + allConditions: true, }, { query: `{ } | select(.name,name)`, @@ -241,7 +256,20 @@ func TestSelect_extractConditions(t *testing.T) { newCondition(NewAttribute("name"), OpNone), newCondition(NewIntrinsic(IntrinsicName), OpNone), }, - allConditions: false, + allConditions: true, + }, + { + // Pipleline elements after a select are always directed to the second pass + query: `{ } | select(span.foo) | { span.foo = "a" && span.bar = "b"}`, + conditions: []Condition{ + newCondition(NewIntrinsic(IntrinsicSpanStartTime), OpNone), + }, + secondPassConditions: []Condition{ + // span.foo=a has no effect because it's already covered by the select statement + newCondition(NewScopedAttribute(AttributeScopeSpan, false, "foo"), OpNone), + newCondition(NewScopedAttribute(AttributeScopeSpan, false, "bar"), OpEqual, NewStaticString("b")), + }, + allConditions: true, }, } for _, tt := range tests { diff --git a/pkg/traceql/ast_stringer.go b/pkg/traceql/ast_stringer.go index 38e39d670be..b2bbc66d0e6 100644 --- a/pkg/traceql/ast_stringer.go +++ b/pkg/traceql/ast_stringer.go @@ -89,7 +89,12 @@ func (s Static) EncodeToString(quotes bool) string { i, _ := s.Int() return strconv.Itoa(i) case TypeFloat: - return strconv.FormatFloat(s.Float(), 'g', -1, 64) + f := strconv.FormatFloat(s.Float(), 'g', -1, 64) + // if the float string doesn't contain e or ., then append .0 to distinguish it from an int + if !strings.ContainsAny(f, "e.") { + f = f + ".0" + } + return f case TypeString: var str string if len(s.valBytes) > 0 { diff --git a/pkg/traceql/ast_test.go b/pkg/traceql/ast_test.go index 6e383a794cb..42962661f0d 100644 --- a/pkg/traceql/ast_test.go +++ b/pkg/traceql/ast_test.go @@ -10,6 +10,38 @@ import ( "github.com/stretchr/testify/require" ) +func TestRootExprIsNoop(t *testing.T) { + noops := []string{ + "{false}", + "{1=0}", + "({false})", + "{false} && {false}", + "{false} >> {false}", + } + + for _, q := range noops { + expr, err := Parse(q) + require.NoError(t, err) + require.True(t, expr.IsNoop(), "Query should be noop: %v", q) + } + + ops := []string{ + "{true}", + "{1=1}", + "{.foo = 1}", + "{false} || {true}", + + // This is a noop but not yet detected + "{false} && {true}", + } + + for _, q := range ops { + expr, err := Parse(q) + require.NoError(t, err) + require.False(t, expr.IsNoop(), "Query should not be noop: %v", q) + } +} + func TestNewStaticNil(t *testing.T) { s := NewStaticNil() assert.Equal(t, TypeNil, s.Type) @@ -99,9 +131,12 @@ func TestStatic_String(t *testing.T) { {arg: nil, want: "nil"}, {arg: 101, want: "101"}, {arg: -10, want: "-10"}, - {arg: -1.0, want: "-1"}, - {arg: 0.0, want: "0"}, - {arg: 10.0, want: "10"}, + {arg: -1.0, want: "-1.0"}, + {arg: 0.0, want: "0.0"}, + {arg: 10.0, want: "10.0"}, + {arg: 12.34, want: "12.34"}, + {arg: 1.23e+23, want: "1.23e+23"}, + {arg: 12.34e+2, want: "1234.0"}, {arg: "test", want: "`test`"}, {arg: true, want: "true"}, {arg: StatusOk, want: "ok"}, @@ -578,7 +613,7 @@ func TestPipelineExtractConditions(t *testing.T) { newCondition(NewAttribute("foo1"), OpEqual, NewStaticString("a")), newCondition(NewAttribute("foo2"), OpEqual, NewStaticString("b")), }, - AllConditions: false, + AllConditions: true, }, }, { diff --git a/pkg/traceql/engine.go b/pkg/traceql/engine.go index e3288d7e64b..4120bf3c607 100644 --- a/pkg/traceql/engine.go +++ b/pkg/traceql/engine.go @@ -55,6 +55,13 @@ func (e *Engine) ExecuteSearch(ctx context.Context, searchReq *tempopb.SearchReq return nil, err } + if rootExpr.IsNoop() { + return &tempopb.SearchResponse{ + Traces: nil, + Metrics: &tempopb.SearchMetrics{}, + }, nil + } + fetchSpansRequest.StartTimeUnixNanos = unixSecToNano(searchReq.Start) fetchSpansRequest.EndTimeUnixNanos = unixSecToNano(searchReq.End) @@ -170,6 +177,14 @@ func (e *Engine) ExecuteTagValues( span.SetAttributes(attribute.String("pipeline", rootExpr.Pipeline.String())) span.SetAttributes(attribute.String("autocompleteReq", fmt.Sprint(autocompleteReq))) + // If the tag we are fetching is already filtered in the query, then this is a noop. + // I.e. we are autocompleting resource.service.name and the query was {resource.service.name="foo"} + for _, c := range autocompleteReq.Conditions { + if c.Attribute == tag && c.Op == OpEqual { + return nil + } + } + return fetcher.Fetch(ctx, autocompleteReq, cb) } @@ -223,12 +238,6 @@ func (e *Engine) createAutocompleteRequest(tag Attribute, pipeline Pipeline) Fet pipeline.extractConditions(&req) - // TODO: remove other conditions for the wantAttr we're searching for - // for _, cond := range fetchSpansRequest.Conditions { - // if cond.Attribute == wantAttr { - // return fmt.Errorf("cannot search for tag values for tag that is already used in query") - // } - // } req.Conditions = append(req.Conditions, Condition{ Attribute: tag, Op: OpNone, diff --git a/pkg/traceql/engine_metrics.go b/pkg/traceql/engine_metrics.go index 94148cd3726..5e1e1cec67a 100644 --- a/pkg/traceql/engine_metrics.go +++ b/pkg/traceql/engine_metrics.go @@ -296,6 +296,7 @@ func (set SeriesSet) ToProtoDiff(req *tempopb.QueryRangeRequest, rangeForLabels }, ) } + exemplars = append(exemplars, tempopb.Exemplar{ Labels: labels, Value: e.Value, @@ -907,6 +908,14 @@ func optimize(req *FetchSpansRequest) { return } + // Unscoped attributes like .foo require the second pass to evaluate + for _, c := range req.Conditions { + if c.Attribute.Scope == AttributeScopeNone && c.Attribute.Intrinsic == IntrinsicNone { + // Unscoped (non-intrinsic) attribute + return + } + } + // There is an issue where multiple conditions &&'ed on the same // attribute can look like AllConditions==true, but are implemented // in the storage layer like ||'ed and require the second pass callback (engine). @@ -1101,14 +1110,21 @@ func (e *MetricsEvalulator) sampleExemplar(id []byte) bool { // MetricsFrontendEvaluator pipes the sharded job results back into the engine for the rest // of the pipeline. i.e. This evaluator is for the query-frontend. type MetricsFrontendEvaluator struct { + mtx sync.Mutex metricsPipeline metricsFirstStageElement } func (m *MetricsFrontendEvaluator) ObserveSeries(in []*tempopb.TimeSeries) { + m.mtx.Lock() + defer m.mtx.Unlock() + m.metricsPipeline.observeSeries(in) } func (m *MetricsFrontendEvaluator) Results() SeriesSet { + m.mtx.Lock() + defer m.mtx.Unlock() + return m.metricsPipeline.result() } @@ -1242,16 +1258,6 @@ func (b *SimpleAggregator) aggregateExemplars(ts *tempopb.TimeSeries, existing * } func (b *SimpleAggregator) Results() SeriesSet { - // Attach placeholder exemplars to the output - for _, ts := range b.ss { - for i, e := range ts.Exemplars { - if math.IsNaN(e.Value) { - interval := IntervalOfMs(int64(e.TimestampMs), b.start, b.end, b.step) - ts.Exemplars[i].Value = ts.Values[interval] - } - } - } - return b.ss } diff --git a/pkg/traceql/engine_metrics_test.go b/pkg/traceql/engine_metrics_test.go index c5a64148196..515109c1ec2 100644 --- a/pkg/traceql/engine_metrics_test.go +++ b/pkg/traceql/engine_metrics_test.go @@ -352,7 +352,7 @@ func TestQuantileOverTime(t *testing.T) { // Output series with quantiles per foo // Prom labels are sorted alphabetically, traceql labels maintain original order. out := SeriesSet{ - `{p="0", span.foo="bar"}`: TimeSeries{ + `{p="0.0", span.foo="bar"}`: TimeSeries{ Labels: []Label{ {Name: "span.foo", Value: NewStaticString("bar")}, {Name: "p", Value: NewStaticFloat(0)}, @@ -374,14 +374,14 @@ func TestQuantileOverTime(t *testing.T) { 0, }, }, - `{p="1", span.foo="bar"}`: TimeSeries{ + `{p="1.0", span.foo="bar"}`: TimeSeries{ Labels: []Label{ {Name: "span.foo", Value: NewStaticString("bar")}, {Name: "p", Value: NewStaticFloat(1)}, }, Values: []float64{_512ns, _256ns, 0}, }, - `{p="0", span.foo="baz"}`: TimeSeries{ + `{p="0.0", span.foo="baz"}`: TimeSeries{ Labels: []Label{ {Name: "span.foo", Value: NewStaticString("baz")}, {Name: "p", Value: NewStaticFloat(0)}, @@ -401,7 +401,7 @@ func TestQuantileOverTime(t *testing.T) { percentileHelper(0.5, _512ns, _512ns, _512ns), }, }, - `{p="1", span.foo="baz"}`: TimeSeries{ + `{p="1.0", span.foo="baz"}`: TimeSeries{ Labels: []Label{ {Name: "span.foo", Value: NewStaticString("baz")}, {Name: "p", Value: NewStaticFloat(1)}, diff --git a/pkg/traceql/engine_test.go b/pkg/traceql/engine_test.go index f7f0599b4e4..11f0e8d4573 100644 --- a/pkg/traceql/engine_test.go +++ b/pkg/traceql/engine_test.go @@ -666,9 +666,15 @@ func TestExecuteTagValues(t *testing.T) { {Type: "string", Value: "redis call"}, }, }, + { + name: "noop", // autocompleting an attribute already filtered by the query + attribute: "name", + query: `{ name = "foo" }`, + expectedValues: []tempopb.TagValue{}, + }, } { t.Run(tc.name, func(t *testing.T) { - distinctValues := collector.NewDistinctValue[tempopb.TagValue](100_000, func(v tempopb.TagValue) int { return len(v.Type) + len(v.Value) }) + distinctValues := collector.NewDistinctValue[tempopb.TagValue](100_000, 0, 0, func(v tempopb.TagValue) int { return len(v.Type) + len(v.Value) }) // Ugly hack to make the mock fetcher work with a bad query fetcherQuery := tc.query diff --git a/pkg/traceql/storage.go b/pkg/traceql/storage.go index c7f18416c6c..8c40a282794 100644 --- a/pkg/traceql/storage.go +++ b/pkg/traceql/storage.go @@ -124,6 +124,22 @@ func (f *FetchSpansRequest) HasAttribute(a Attribute) bool { return false } +func (f *FetchSpansRequest) HasAttributeWithOp(a Attribute, o Operator) bool { + for _, cc := range f.Conditions { + if cc.Attribute == a && cc.Op == o { + return true + } + } + + for _, cc := range f.SecondPassConditions { + if cc.Attribute == a && cc.Op == o { + return true + } + } + + return false +} + type Span interface { // AttributeFor returns the attribute for the given key. If the attribute is not found then // the second return value will be false. diff --git a/pkg/traceql/test_examples.yaml b/pkg/traceql/test_examples.yaml index 836fbaa3dbe..acf7722753d 100644 --- a/pkg/traceql/test_examples.yaml +++ b/pkg/traceql/test_examples.yaml @@ -54,6 +54,7 @@ valid: - '{ event.foo = "bar" }' - '{ link.foo = "bar" }' - '{ instrumentation.foo = "bar" }' + - '{ span.foo != 3.0 }' # scoped intrinsics - '{ trace:duration > 2s }' - '{ trace:rootName = "a" }' @@ -151,7 +152,7 @@ valid: # undocumented - nested set - '{ nestedSetLeft > 3 }' - '{ } >> { kind = server } | select(nestedSetLeft, nestedSetRight, nestedSetParent)' - + # parse_fails throw an error when parsing parse_fails: - 'true' diff --git a/pkg/util/log/rate_limited_logger.go b/pkg/util/log/rate_limited_logger.go index c4646aaffeb..5ba831749e1 100644 --- a/pkg/util/log/rate_limited_logger.go +++ b/pkg/util/log/rate_limited_logger.go @@ -4,6 +4,8 @@ import ( "time" gkLog "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" "golang.org/x/time/rate" ) @@ -12,6 +14,14 @@ type RateLimitedLogger struct { logger gkLog.Logger } +var metricDropedLines = promauto.NewCounter(prometheus.CounterOpts{ + Namespace: "tempo", + Name: "dropped_log_lines_total", + Help: "The total number of log lines dropped by the rate limited logger.", +}) + +// NewRateLimitedLogger returns a new RateLimitedLogger that logs at most logsPerSecond messages per second. +// TODO: migrate to the dskit rate limited logger func NewRateLimitedLogger(logsPerSecond int, logger gkLog.Logger) *RateLimitedLogger { return &RateLimitedLogger{ limiter: rate.NewLimiter(rate.Limit(logsPerSecond), 1), @@ -21,6 +31,7 @@ func NewRateLimitedLogger(logsPerSecond int, logger gkLog.Logger) *RateLimitedLo func (l *RateLimitedLogger) Log(keyvals ...interface{}) { if !l.limiter.AllowN(time.Now(), 1) { + metricDropedLines.Inc() return } diff --git a/pkg/util/test/logger.go b/pkg/util/test/logger.go new file mode 100644 index 00000000000..efbeef90fa7 --- /dev/null +++ b/pkg/util/test/logger.go @@ -0,0 +1,42 @@ +package test + +import ( + "sync" + "testing" + "time" + + "github.com/go-kit/log" +) + +var _ log.Logger = (*TestingLogger)(nil) + +type TestingLogger struct { + t testing.TB + mtx *sync.Mutex +} + +func NewTestingLogger(t testing.TB) *TestingLogger { + return &TestingLogger{ + t: t, + mtx: &sync.Mutex{}, + } +} + +// WithT returns a new logger that logs to t. Writes between the new logger and the original logger are synchronized. +func (l *TestingLogger) WithT(t testing.TB) log.Logger { + return &TestingLogger{ + t: t, + mtx: l.mtx, + } +} + +func (l *TestingLogger) Log(keyvals ...interface{}) error { + // Prepend log with timestamp. + keyvals = append([]interface{}{time.Now().String()}, keyvals...) + + l.mtx.Lock() + l.t.Log(keyvals...) + l.mtx.Unlock() + + return nil +} diff --git a/pkg/util/test/req.go b/pkg/util/test/req.go index b33370581d1..b3b2a45d153 100644 --- a/pkg/util/test/req.go +++ b/pkg/util/test/req.go @@ -17,6 +17,17 @@ import ( "github.com/stretchr/testify/require" ) +func MakeAttribute(key, value string) *v1_common.KeyValue { + return &v1_common.KeyValue{ + Key: key, + Value: &v1_common.AnyValue{ + Value: &v1_common.AnyValue_StringValue{ + StringValue: value, + }, + }, + } +} + func MakeSpan(traceID []byte) *v1_trace.Span { return MakeSpanWithAttributeCount(traceID, rand.Int()%10+1) } @@ -354,3 +365,18 @@ func MakeTraceWithTags(traceID []byte, service string, intValue int64) *tempopb. }) return trace } + +func MakePushBytesRequest(t testing.TB, requests int, traceID []byte) *tempopb.PushBytesRequest { + trace := MakeTrace(requests, traceID) + b, err := proto.Marshal(trace) + require.NoError(t, err) + + req := &tempopb.PushBytesRequest{ + Traces: make([]tempopb.PreallocBytes, 0), + Ids: make([][]byte, 0), + } + req.Traces = append(req.Traces, tempopb.PreallocBytes{Slice: b}) + req.Ids = append(req.Ids, traceID) + + return req +} diff --git a/tempodb/backend/s3/s3.go b/tempodb/backend/s3/s3.go index c434de1339f..bd19f0ce89b 100644 --- a/tempodb/backend/s3/s3.go +++ b/tempodb/backend/s3/s3.go @@ -63,8 +63,21 @@ type overrideSignatureVersion struct { useV2 bool } +func (s *overrideSignatureVersion) RetrieveWithCredContext(cc *credentials.CredContext) (credentials.Value, error) { + v, err := s.upstream.RetrieveWithCredContext(cc) + if err != nil { + return v, err + } + + if s.useV2 && !v.SignerType.IsAnonymous() { + v.SignerType = credentials.SignatureV2 + } + + return v, nil +} + func (s *overrideSignatureVersion) Retrieve() (credentials.Value, error) { - v, err := s.upstream.Retrieve() + v, err := s.upstream.RetrieveWithCredContext(&credentials.CredContext{Client: http.DefaultClient}) if err != nil { return v, err } @@ -610,7 +623,7 @@ func fetchCreds(cfg *Config) (*credentials.Credentials, error) { creds := credentials.NewChainCredentials(chain) // error early if we cannot obtain credentials - if _, err := creds.Get(); err != nil { + if _, err := creds.GetWithContext(&credentials.CredContext{Client: http.DefaultClient}); err != nil { return nil, fmt.Errorf("failed to get credentials: %w", err) } diff --git a/tempodb/blocklist/list.go b/tempodb/blocklist/list.go index 9514ba5bfa0..55d9c2d8087 100644 --- a/tempodb/blocklist/list.go +++ b/tempodb/blocklist/list.go @@ -1,9 +1,9 @@ package blocklist import ( + "slices" "sync" - "github.com/google/uuid" "github.com/grafana/tempo/tempodb/backend" ) @@ -125,64 +125,70 @@ func (l *List) Update(tenantID string, add []*backend.BlockMeta, remove []*backe // updateInternal exists to do the work of applying updates to held PerTenant and PerTenantCompacted maps // it must be called under lock func (l *List) updateInternal(tenantID string, add []*backend.BlockMeta, remove []*backend.BlockMeta, compactedAdd []*backend.CompactedBlockMeta, compactedRemove []*backend.CompactedBlockMeta) { - // ******** Regular blocks ******** - blocklist := l.metas[tenantID] - - matchedRemovals := make(map[uuid.UUID]struct{}) - for _, b := range blocklist { - for _, rem := range remove { - if b.BlockID == rem.BlockID { - matchedRemovals[(uuid.UUID)(rem.BlockID)] = struct{}{} - break - } + hasID := func(id backend.UUID) func(*backend.BlockMeta) bool { + return func(b *backend.BlockMeta) bool { + return b.BlockID == id } } - existingMetas := make(map[uuid.UUID]struct{}) - newblocklist := make([]*backend.BlockMeta, 0, len(blocklist)-len(matchedRemovals)+len(add)) - // rebuild the blocklist dropping all removals - for _, b := range blocklist { - existingMetas[(uuid.UUID)(b.BlockID)] = struct{}{} - if _, ok := matchedRemovals[(uuid.UUID)(b.BlockID)]; !ok { - newblocklist = append(newblocklist, b) + hasIDC := func(id backend.UUID) func(*backend.CompactedBlockMeta) bool { + return func(b *backend.CompactedBlockMeta) bool { + return b.BlockID == id } } - // add new blocks (only if they don't already exist) - for _, b := range add { - if _, ok := existingMetas[(uuid.UUID)(b.BlockID)]; !ok { - newblocklist = append(newblocklist, b) - } - } - - l.metas[tenantID] = newblocklist - // ******** Compacted blocks ******** - compactedBlocklist := l.compactedMetas[tenantID] - - compactedRemovals := map[uuid.UUID]struct{}{} - for _, c := range compactedBlocklist { - for _, rem := range compactedRemove { - if c.BlockID == rem.BlockID { - compactedRemovals[(uuid.UUID)(rem.BlockID)] = struct{}{} - break + // ******** Regular blocks ******** + if len(add) > 0 || len(remove) > 0 || len(compactedAdd) > 0 || len(compactedRemove) > 0 { + var ( + existing = l.metas[tenantID] + final = make([]*backend.BlockMeta, 0, max(0, len(existing)+len(add)-len(remove))) + ) + + // rebuild dropping all removals + for _, b := range existing { + if slices.ContainsFunc(remove, hasID(b.BlockID)) { + continue } + final = append(final, b) } - } + // add new if they don't already exist and weren't also removed + for _, b := range add { + if slices.ContainsFunc(final, hasID(b.BlockID)) || + slices.ContainsFunc(remove, hasID(b.BlockID)) || + slices.ContainsFunc(compactedAdd, hasIDC(b.BlockID)) || + slices.ContainsFunc(compactedRemove, hasIDC(b.BlockID)) { + continue + } - existingMetas = make(map[uuid.UUID]struct{}) - newCompactedBlocklist := make([]*backend.CompactedBlockMeta, 0, len(compactedBlocklist)-len(compactedRemovals)+len(compactedAdd)) - // rebuild the blocklist dropping all removals - for _, b := range compactedBlocklist { - existingMetas[(uuid.UUID)(b.BlockID)] = struct{}{} - if _, ok := compactedRemovals[(uuid.UUID)(b.BlockID)]; !ok { - newCompactedBlocklist = append(newCompactedBlocklist, b) + final = append(final, b) } + + l.metas[tenantID] = final } - for _, b := range compactedAdd { - if _, ok := existingMetas[(uuid.UUID)(b.BlockID)]; !ok { - newCompactedBlocklist = append(newCompactedBlocklist, b) + + // ******** Compacted blocks ******** + if len(compactedAdd) > 0 || len(compactedRemove) > 0 { + var ( + existing = l.compactedMetas[tenantID] + final = make([]*backend.CompactedBlockMeta, 0, max(0, len(existing)+len(compactedAdd)-len(compactedRemove))) + ) + + // rebuild dropping all removals + for _, b := range existing { + if slices.ContainsFunc(compactedRemove, hasIDC(b.BlockID)) { + continue + } + final = append(final, b) + } + // add new if they don't already exist and weren't also removed + for _, b := range compactedAdd { + if slices.ContainsFunc(existing, hasIDC(b.BlockID)) || + slices.ContainsFunc(compactedRemove, hasIDC(b.BlockID)) { + continue + } + final = append(final, b) } - } - l.compactedMetas[tenantID] = newCompactedBlocklist + l.compactedMetas[tenantID] = final + } } diff --git a/tempodb/blocklist/list_test.go b/tempodb/blocklist/list_test.go index 0bfa3250ff9..7d870368f28 100644 --- a/tempodb/blocklist/list_test.go +++ b/tempodb/blocklist/list_test.go @@ -125,11 +125,21 @@ func TestApplyPollResults(t *testing.T) { } func TestUpdate(t *testing.T) { + var ( + _1 = meta("00000000-0000-0000-0000-000000000001") + _2 = meta("00000000-0000-0000-0000-000000000002") + _3 = meta("00000000-0000-0000-0000-000000000003") + _2c = compactedMeta("00000000-0000-0000-0000-000000000002") + _3c = compactedMeta("00000000-0000-0000-0000-000000000003") + ) + tests := []struct { name string existing []*backend.BlockMeta add []*backend.BlockMeta remove []*backend.BlockMeta + addC []*backend.CompactedBlockMeta + removeC []*backend.CompactedBlockMeta expected []*backend.BlockMeta }{ { @@ -142,160 +152,73 @@ func TestUpdate(t *testing.T) { { name: "add to nil", existing: nil, - add: []*backend.BlockMeta{ - { - BlockID: backend.MustParse("00000000-0000-0000-0000-000000000001"), - }, - }, - remove: nil, - expected: []*backend.BlockMeta{ - { - BlockID: backend.MustParse("00000000-0000-0000-0000-000000000001"), - }, - }, + add: []*backend.BlockMeta{_1}, + remove: nil, + expected: []*backend.BlockMeta{_1}, }, { - name: "add to existing", - existing: []*backend.BlockMeta{ - { - BlockID: backend.MustParse("00000000-0000-0000-0000-000000000001"), - }, - }, - add: []*backend.BlockMeta{ - { - BlockID: backend.MustParse("00000000-0000-0000-0000-000000000002"), - }, - }, - remove: nil, - expected: []*backend.BlockMeta{ - { - BlockID: backend.MustParse("00000000-0000-0000-0000-000000000001"), - }, - { - BlockID: backend.MustParse("00000000-0000-0000-0000-000000000002"), - }, - }, + name: "add to existing", + existing: []*backend.BlockMeta{_1}, + add: []*backend.BlockMeta{_2}, + remove: nil, + expected: []*backend.BlockMeta{_1, _2}, }, { name: "remove from nil", existing: nil, add: nil, - remove: []*backend.BlockMeta{ - { - BlockID: backend.MustParse("00000000-0000-0000-0000-000000000002"), - }, - }, + remove: []*backend.BlockMeta{_2}, expected: nil, }, { - name: "remove nil", - existing: []*backend.BlockMeta{ - { - BlockID: backend.MustParse("00000000-0000-0000-0000-000000000002"), - }, - }, - add: nil, - remove: nil, - expected: []*backend.BlockMeta{ - { - BlockID: backend.MustParse("00000000-0000-0000-0000-000000000002"), - }, - }, + name: "remove nil", + existing: []*backend.BlockMeta{_2}, + add: nil, + remove: nil, + expected: []*backend.BlockMeta{_2}, }, { - name: "remove existing", - existing: []*backend.BlockMeta{ - { - BlockID: backend.MustParse("00000000-0000-0000-0000-000000000001"), - }, - { - BlockID: backend.MustParse("00000000-0000-0000-0000-000000000002"), - }, - }, - add: nil, - remove: []*backend.BlockMeta{ - { - BlockID: backend.MustParse("00000000-0000-0000-0000-000000000001"), - }, - }, - expected: []*backend.BlockMeta{ - { - BlockID: backend.MustParse("00000000-0000-0000-0000-000000000002"), - }, - }, + name: "remove existing", + existing: []*backend.BlockMeta{_1, _2}, + add: nil, + remove: []*backend.BlockMeta{_1}, + expected: []*backend.BlockMeta{_2}, }, { - name: "remove no match", - existing: []*backend.BlockMeta{ - { - BlockID: backend.MustParse("00000000-0000-0000-0000-000000000001"), - }, - }, - add: nil, - remove: []*backend.BlockMeta{ - { - BlockID: backend.MustParse("00000000-0000-0000-0000-000000000002"), - }, - }, - expected: []*backend.BlockMeta{ - { - BlockID: backend.MustParse("00000000-0000-0000-0000-000000000001"), - }, - }, + name: "remove no match", + existing: []*backend.BlockMeta{_1}, + add: nil, + remove: []*backend.BlockMeta{_2}, + expected: []*backend.BlockMeta{_1}, }, { - name: "add and remove", - existing: []*backend.BlockMeta{ - { - BlockID: backend.MustParse("00000000-0000-0000-0000-000000000001"), - }, - { - BlockID: backend.MustParse("00000000-0000-0000-0000-000000000002"), - }, - }, - add: []*backend.BlockMeta{ - { - BlockID: backend.MustParse("00000000-0000-0000-0000-000000000003"), - }, - }, - remove: []*backend.BlockMeta{ - { - BlockID: backend.MustParse("00000000-0000-0000-0000-000000000002"), - }, - }, - expected: []*backend.BlockMeta{ - { - BlockID: backend.MustParse("00000000-0000-0000-0000-000000000001"), - }, - { - BlockID: backend.MustParse("00000000-0000-0000-0000-000000000003"), - }, - }, + name: "add and remove", + existing: []*backend.BlockMeta{_1, _2}, + add: []*backend.BlockMeta{_3}, + remove: []*backend.BlockMeta{_2}, + expected: []*backend.BlockMeta{_1, _3}, }, { - name: "add already exists", - existing: []*backend.BlockMeta{ - { - BlockID: backend.MustParse("00000000-0000-0000-0000-000000000001"), - }, - }, - add: []*backend.BlockMeta{ - { - BlockID: backend.MustParse("00000000-0000-0000-0000-000000000001"), - }, - { - BlockID: backend.MustParse("00000000-0000-0000-0000-000000000002"), - }, - }, - remove: nil, - expected: []*backend.BlockMeta{ - { - BlockID: backend.MustParse("00000000-0000-0000-0000-000000000001"), - }, - { - BlockID: backend.MustParse("00000000-0000-0000-0000-000000000002"), - }, - }, + name: "add already exists", + existing: []*backend.BlockMeta{_1}, + add: []*backend.BlockMeta{_1, _2}, + remove: nil, + expected: []*backend.BlockMeta{_1, _2}, + }, + { + name: "not added if also removed", + existing: []*backend.BlockMeta{_1}, + add: []*backend.BlockMeta{_2}, + remove: []*backend.BlockMeta{_2}, + expected: []*backend.BlockMeta{_1}, + }, + { + name: "not added if also compacted", + existing: []*backend.BlockMeta{_1}, + add: []*backend.BlockMeta{_2, _3}, + addC: []*backend.CompactedBlockMeta{_2c}, + removeC: []*backend.CompactedBlockMeta{_3c}, + expected: []*backend.BlockMeta{_1}, }, } @@ -304,18 +227,21 @@ func TestUpdate(t *testing.T) { l := New() l.metas[testTenantID] = tt.existing - l.Update(testTenantID, tt.add, tt.remove, nil, nil) - - assert.Equal(t, len(tt.expected), len(l.metas[testTenantID])) + l.Update(testTenantID, tt.add, tt.remove, tt.addC, tt.removeC) - for i := range tt.expected { - assert.Equal(t, tt.expected[i].BlockID, l.metas[testTenantID][i].BlockID) - } + require.Equal(t, len(tt.expected), len(l.metas[testTenantID])) + require.ElementsMatch(t, tt.expected, l.metas[testTenantID]) }) } } func TestUpdateCompacted(t *testing.T) { + var ( + _1 = compactedMeta("00000000-0000-0000-0000-000000000001") + _2 = compactedMeta("00000000-0000-0000-0000-000000000002") + _3 = compactedMeta("00000000-0000-0000-0000-000000000003") + ) + tests := []struct { name string existing []*backend.CompactedBlockMeta @@ -332,124 +258,34 @@ func TestUpdateCompacted(t *testing.T) { { name: "add to nil", existing: nil, - add: []*backend.CompactedBlockMeta{ - { - BlockMeta: backend.BlockMeta{ - BlockID: backend.MustParse("00000000-0000-0000-0000-000000000001"), - }, - }, - }, - expected: []*backend.CompactedBlockMeta{ - { - BlockMeta: backend.BlockMeta{ - BlockID: backend.MustParse("00000000-0000-0000-0000-000000000001"), - }, - }, - }, + add: []*backend.CompactedBlockMeta{_1}, + expected: []*backend.CompactedBlockMeta{_1}, }, { - name: "add to existing", - existing: []*backend.CompactedBlockMeta{ - { - BlockMeta: backend.BlockMeta{ - BlockID: backend.MustParse("00000000-0000-0000-0000-000000000001"), - }, - }, - }, - add: []*backend.CompactedBlockMeta{ - { - BlockMeta: backend.BlockMeta{ - BlockID: backend.MustParse("00000000-0000-0000-0000-000000000002"), - }, - }, - }, - expected: []*backend.CompactedBlockMeta{ - { - BlockMeta: backend.BlockMeta{ - BlockID: backend.MustParse("00000000-0000-0000-0000-000000000001"), - }, - }, - { - BlockMeta: backend.BlockMeta{ - BlockID: backend.MustParse("00000000-0000-0000-0000-000000000002"), - }, - }, - }, + name: "add to existing", + existing: []*backend.CompactedBlockMeta{_1}, + add: []*backend.CompactedBlockMeta{_2}, + expected: []*backend.CompactedBlockMeta{_1, _2}, }, { - name: "add already exists", - existing: []*backend.CompactedBlockMeta{ - { - BlockMeta: backend.BlockMeta{ - BlockID: backend.MustParse("00000000-0000-0000-0000-000000000001"), - }, - }, - }, - add: []*backend.CompactedBlockMeta{ - { - BlockMeta: backend.BlockMeta{ - BlockID: backend.MustParse("00000000-0000-0000-0000-000000000001"), - }, - }, - { - BlockMeta: backend.BlockMeta{ - BlockID: backend.MustParse("00000000-0000-0000-0000-000000000002"), - }, - }, - }, - expected: []*backend.CompactedBlockMeta{ - { - BlockMeta: backend.BlockMeta{ - BlockID: backend.MustParse("00000000-0000-0000-0000-000000000001"), - }, - }, - { - BlockMeta: backend.BlockMeta{ - BlockID: backend.MustParse("00000000-0000-0000-0000-000000000002"), - }, - }, - }, + name: "add already exists", + existing: []*backend.CompactedBlockMeta{_1}, + add: []*backend.CompactedBlockMeta{_1, _2}, + expected: []*backend.CompactedBlockMeta{_1, _2}, }, { - name: "add and remove", - existing: []*backend.CompactedBlockMeta{ - { - BlockMeta: backend.BlockMeta{ - BlockID: backend.MustParse("00000000-0000-0000-0000-000000000001"), - }, - }, - { - BlockMeta: backend.BlockMeta{ - BlockID: backend.MustParse("00000000-0000-0000-0000-000000000002"), - }, - }, - }, - add: []*backend.CompactedBlockMeta{ - { - BlockMeta: backend.BlockMeta{ - BlockID: backend.MustParse("00000000-0000-0000-0000-000000000003"), - }, - }, - }, - remove: []*backend.CompactedBlockMeta{ - { - BlockMeta: backend.BlockMeta{ - BlockID: backend.MustParse("00000000-0000-0000-0000-000000000002"), - }, - }, - }, - expected: []*backend.CompactedBlockMeta{ - { - BlockMeta: backend.BlockMeta{ - BlockID: backend.MustParse("00000000-0000-0000-0000-000000000001"), - }, - }, - { - BlockMeta: backend.BlockMeta{ - BlockID: backend.MustParse("00000000-0000-0000-0000-000000000003"), - }, - }, - }, + name: "add and remove", + existing: []*backend.CompactedBlockMeta{_1, _2}, + add: []*backend.CompactedBlockMeta{_3}, + remove: []*backend.CompactedBlockMeta{_2}, + expected: []*backend.CompactedBlockMeta{_1, _3}, + }, + { + name: "not added if also removed", + existing: []*backend.CompactedBlockMeta{_1}, + add: []*backend.CompactedBlockMeta{_2}, + remove: []*backend.CompactedBlockMeta{_2}, + expected: []*backend.CompactedBlockMeta{_1}, }, } @@ -656,3 +492,58 @@ func TestUpdatesSaved(t *testing.T) { assert.Equal(t, tc.expectedCompacted, actualCompacted) } } + +func BenchmarkUpdate(b *testing.B) { + var ( + l = New() + numBlocks = 100000 // Realistic number + existing = make([]*backend.BlockMeta, 0, numBlocks) + add = []*backend.BlockMeta{ + meta("00000000-0000-0000-0000-000000000001"), + meta("00000000-0000-0000-0000-000000000002"), + } + remove = []*backend.BlockMeta{ + meta("00000000-0000-0000-0000-000000000003"), + meta("00000000-0000-0000-0000-000000000004"), + } + numCompacted = 1000 // Realistic number + compacted = make([]*backend.CompactedBlockMeta, 0, numCompacted) + compactedAdd = []*backend.CompactedBlockMeta{ + compactedMeta("00000000-0000-0000-0000-000000000005"), + compactedMeta("00000000-0000-0000-0000-000000000006"), + } + compactedRemove = []*backend.CompactedBlockMeta{ + compactedMeta("00000000-0000-0000-0000-000000000007"), + compactedMeta("00000000-0000-0000-0000-000000000008"), + } + ) + + for i := 0; i < numBlocks; i++ { + existing = append(existing, meta(uuid.NewString())) + } + for i := 0; i < numCompacted; i++ { + compacted = append(compacted, compactedMeta(uuid.NewString())) + } + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + l.metas[testTenantID] = existing + l.compactedMetas[testTenantID] = compacted + l.Update(testTenantID, add, remove, compactedAdd, compactedRemove) + } +} + +func meta(id string) *backend.BlockMeta { + return &backend.BlockMeta{ + BlockID: backend.MustParse(id), + } +} + +func compactedMeta(id string) *backend.CompactedBlockMeta { + return &backend.CompactedBlockMeta{ + BlockMeta: backend.BlockMeta{ + BlockID: backend.MustParse(id), + }, + } +} diff --git a/tempodb/compactor.go b/tempodb/compactor.go index 517844cf126..ef971eb7d9f 100644 --- a/tempodb/compactor.go +++ b/tempodb/compactor.go @@ -6,6 +6,7 @@ import ( "fmt" "sort" "strconv" + "strings" "time" "github.com/go-kit/log/level" @@ -70,6 +71,8 @@ var ( Name: "compaction_spans_combined_total", Help: "Number of spans that are deduped per replication factor.", }, []string{"replication_factor"}) + + errCompactionJobNoLongerOwned = fmt.Errorf("compaction job no longer owned") ) func (rw *readerWriter) compactionLoop(ctx context.Context) { @@ -78,26 +81,20 @@ func (rw *readerWriter) compactionLoop(ctx context.Context) { compactionCycle = rw.compactorCfg.CompactionCycle } - ticker := time.NewTicker(compactionCycle) - defer ticker.Stop() for { - select { - case <-ctx.Done(): - return - default: + // if the context is cancelled, we're shutting down and need to stop compacting + if ctx.Err() != nil { + break } - select { - case <-ticker.C: - rw.doCompaction(ctx) - case <-ctx.Done(): - return - } + doForAtLeast(ctx, compactionCycle, func() { + rw.compactOneTenant(ctx) + }) } } -// doCompaction runs a compaction cycle every 30s -func (rw *readerWriter) doCompaction(ctx context.Context) { +// compactOneTenant runs a compaction cycle every 30s +func (rw *readerWriter) compactOneTenant(ctx context.Context) { // List of all tenants in the block list // The block list is updated by constant polling the storage for tenant indexes and/or tenant blocks (and building the index) tenants := rw.blocklist.Tenants() @@ -142,51 +139,102 @@ func (rw *readerWriter) doCompaction(ctx context.Context) { start := time.Now() - level.Debug(rw.logger).Log("msg", "starting compaction cycle", "tenantID", tenantID, "offset", rw.compactorTenantOffset) + level.Info(rw.logger).Log("msg", "starting compaction cycle", "tenantID", tenantID, "offset", rw.compactorTenantOffset) for { - select { - case <-ctx.Done(): + // this context is controlled by the service manager. it being cancelled means that the process is shutting down + if ctx.Err() != nil { + level.Info(rw.logger).Log("msg", "caught context cancelled at the top of the compaction loop. bailing.", "err", ctx.Err(), "cause", context.Cause(ctx)) return - default: - // Pick up to defaultMaxInputBlocks (4) blocks to compact into a single one - toBeCompacted, hashString := blockSelector.BlocksToCompact() - if len(toBeCompacted) == 0 { - measureOutstandingBlocks(tenantID, blockSelector, rw.compactorSharder.Owns) + } + + // Pick up to defaultMaxInputBlocks (4) blocks to compact into a single one + toBeCompacted, hashString := blockSelector.BlocksToCompact() + if len(toBeCompacted) == 0 { + measureOutstandingBlocks(tenantID, blockSelector, rw.compactorSharder.Owns) - level.Debug(rw.logger).Log("msg", "compaction cycle complete. No more blocks to compact", "tenantID", tenantID) + level.Info(rw.logger).Log("msg", "compaction cycle complete. No more blocks to compact", "tenantID", tenantID) + return + } + + owns := func() bool { + return rw.compactorSharder.Owns(hashString) + } + if !owns() { + // continue on this tenant until we find something we own + continue + } + + level.Info(rw.logger).Log("msg", "Compacting hash", "hashString", hashString) + err := rw.compactWhileOwns(ctx, toBeCompacted, tenantID, owns) + + if errors.Is(err, backend.ErrDoesNotExist) { + level.Warn(rw.logger).Log("msg", "unable to find meta during compaction. trying again on this block list", "err", err) + } else if err != nil { + level.Error(rw.logger).Log("msg", "error during compaction cycle", "err", err) + metricCompactionErrors.Inc() + } + + // after a maintenance cycle bail out + if start.Add(rw.compactorCfg.MaxTimePerTenant).Before(time.Now()) { + measureOutstandingBlocks(tenantID, blockSelector, rw.compactorSharder.Owns) + + level.Info(rw.logger).Log("msg", "compacted blocks for a maintenance cycle, bailing out", "tenantID", tenantID) + return + } + } +} + +func (rw *readerWriter) compactWhileOwns(ctx context.Context, blockMetas []*backend.BlockMeta, tenantID string, owns func() bool) error { + ownsCtx, cancel := context.WithCancelCause(ctx) + + done := make(chan struct{}) + defer close(done) + + // every second test if we still own the job. if we don't then cancel the context with a cause + // that we can then test for + go func() { + ticker := time.NewTicker(1 * time.Second) + defer ticker.Stop() + + for { + if !owns() { + cancel(errCompactionJobNoLongerOwned) return } - if !rw.compactorSharder.Owns(hashString) { - // continue on this tenant until we find something we own - continue - } - level.Info(rw.logger).Log("msg", "Compacting hash", "hashString", hashString) - // Compact selected blocks into a larger one - err := rw.compact(ctx, toBeCompacted, tenantID) - - if errors.Is(err, backend.ErrDoesNotExist) { - level.Warn(rw.logger).Log("msg", "unable to find meta during compaction. trying again on this block list", "err", err) - } else if err != nil { - level.Error(rw.logger).Log("msg", "error during compaction cycle", "err", err) - metricCompactionErrors.Inc() - } - if !rw.compactorSharder.Owns(hashString) { - level.Warn(rw.logger).Log("msg", "compaction complete but we no longer own the hash", "hashString", hashString) + select { + case <-ticker.C: + case <-done: + return + case <-ownsCtx.Done(): + return } + } + }() - // after a maintenance cycle bail out - if start.Add(rw.compactorCfg.MaxTimePerTenant).Before(time.Now()) { - measureOutstandingBlocks(tenantID, blockSelector, rw.compactorSharder.Owns) + err := rw.compactOneJob(ownsCtx, blockMetas, tenantID) + if errors.Is(err, context.Canceled) && errors.Is(context.Cause(ownsCtx), errCompactionJobNoLongerOwned) { + level.Warn(rw.logger).Log("msg", "lost ownership of this job. abandoning job and trying again on this block list", "err", err) + return nil + } - level.Info(rw.logger).Log("msg", "compacted blocks for a maintenance cycle, bailing out", "tenantID", tenantID) - return - } + // test to see if we still own this job. it would be exceptional to log this message, but would be nice to know. a more likely bad case is that + // job ownership changes but that change has not yet propagated to this compactor, so it duplicated data w/o realizing it. + if !owns() { + // format a string with all input metas + sb := &strings.Builder{} + for _, meta := range blockMetas { + sb.WriteString(meta.BlockID.String()) + sb.WriteString(", ") } + + level.Error(rw.logger).Log("msg", "lost ownership of this job after compaction. possible data duplication", "tenant", tenantID, "input_blocks", sb.String()) } + + return err } -func (rw *readerWriter) compact(ctx context.Context, blockMetas []*backend.BlockMeta, tenantID string) error { +func (rw *readerWriter) compactOneJob(ctx context.Context, blockMetas []*backend.BlockMeta, tenantID string) error { level.Debug(rw.logger).Log("msg", "beginning compaction", "num blocks compacting", len(blockMetas)) // todo - add timeout? @@ -380,3 +428,22 @@ func (i instrumentedObjectCombiner) Combine(dataEncoding string, objs ...[]byte) } return b, wasCombined, err } + +// doForAtLeast executes the function f. It blocks for at least the passed duration but can go longer. if context is cancelled after +// the function is done we will bail immediately. in the current use case this means that the process is shutting down +// we don't force f() to cancel, we assume it also responds to the cancelled context +func doForAtLeast(ctx context.Context, dur time.Duration, f func()) { + startTime := time.Now() + f() + elapsed := time.Since(startTime) + + if elapsed < dur { + ticker := time.NewTicker(dur - elapsed) + defer ticker.Stop() + + select { + case <-ticker.C: + case <-ctx.Done(): + } + } +} diff --git a/tempodb/compactor_test.go b/tempodb/compactor_test.go index 3ef1f679eb1..13532c5f6f8 100644 --- a/tempodb/compactor_test.go +++ b/tempodb/compactor_test.go @@ -173,7 +173,7 @@ func testCompactionRoundtrip(t *testing.T, targetBlockVersion string) { require.Len(t, blocks, inputBlocks) compactions++ - err := rw.compact(context.Background(), blocks, testTenantID) + err := rw.compactOneJob(context.Background(), blocks, testTenantID) require.NoError(t, err) expectedBlockCount -= blocksPerCompaction @@ -336,7 +336,7 @@ func testSameIDCompaction(t *testing.T, targetBlockVersion string) { combinedStart, err := test.GetCounterVecValue(metricCompactionObjectsCombined, "0") require.NoError(t, err) - err = rw.compact(ctx, blocks, testTenantID) + err = rw.compactOneJob(ctx, blocks, testTenantID) require.NoError(t, err) checkBlocklists(t, uuid.Nil, 1, blockCount, rw) @@ -420,7 +420,7 @@ func TestCompactionUpdatesBlocklist(t *testing.T) { rw.pollBlocklist() // compact everything - err = rw.compact(ctx, rw.blocklist.Metas(testTenantID), testTenantID) + err = rw.compactOneJob(ctx, rw.blocklist.Metas(testTenantID), testTenantID) require.NoError(t, err) // New blocklist contains 1 compacted block with everything @@ -501,7 +501,7 @@ func TestCompactionMetrics(t *testing.T) { assert.NoError(t, err) // compact everything - err = rw.compact(ctx, rw.blocklist.Metas(testTenantID), testTenantID) + err = rw.compactOneJob(ctx, rw.blocklist.Metas(testTenantID), testTenantID) assert.NoError(t, err) // Check metric @@ -570,12 +570,12 @@ func TestCompactionIteratesThroughTenants(t *testing.T) { // Verify that tenant 2 compacted, tenant 1 is not // Compaction starts at index 1 for simplicity - rw.doCompaction(ctx) + rw.compactOneTenant(ctx) assert.Equal(t, 2, len(rw.blocklist.Metas(testTenantID))) assert.Equal(t, 1, len(rw.blocklist.Metas(testTenantID2))) // Verify both tenants compacted after second run - rw.doCompaction(ctx) + rw.compactOneTenant(ctx) assert.Equal(t, 1, len(rw.blocklist.Metas(testTenantID))) assert.Equal(t, 1, len(rw.blocklist.Metas(testTenantID2))) } @@ -630,11 +630,11 @@ func testCompactionHonorsBlockStartEndTimes(t *testing.T, targetBlockVersion str r.EnablePolling(ctx, &mockJobSharder{}) - cutTestBlockWithTraces(t, w, testTenantID, []testData{ + cutTestBlockWithTraces(t, w, []testData{ {test.ValidTraceID(nil), test.MakeTrace(10, nil), 100, 101}, {test.ValidTraceID(nil), test.MakeTrace(10, nil), 102, 103}, }) - cutTestBlockWithTraces(t, w, testTenantID, []testData{ + cutTestBlockWithTraces(t, w, []testData{ {test.ValidTraceID(nil), test.MakeTrace(10, nil), 104, 105}, {test.ValidTraceID(nil), test.MakeTrace(10, nil), 106, 107}, }) @@ -643,7 +643,7 @@ func testCompactionHonorsBlockStartEndTimes(t *testing.T, targetBlockVersion str rw.pollBlocklist() // compact everything - err = rw.compact(ctx, rw.blocklist.Metas(testTenantID), testTenantID) + err = rw.compactOneJob(ctx, rw.blocklist.Metas(testTenantID), testTenantID) require.NoError(t, err) time.Sleep(100 * time.Millisecond) @@ -778,13 +778,36 @@ func testCompactionDropsTraces(t *testing.T, targetBlockVersion string) { } } +func TestDoForAtLeast(t *testing.T) { + // test that it runs for at least the duration + start := time.Now() + doForAtLeast(context.Background(), time.Second, func() { time.Sleep(time.Millisecond) }) + require.WithinDuration(t, time.Now(), start.Add(time.Second), 100*time.Millisecond) + + // test that it allows func to overrun + start = time.Now() + doForAtLeast(context.Background(), time.Second, func() { time.Sleep(2 * time.Second) }) + require.WithinDuration(t, time.Now(), start.Add(2*time.Second), 100*time.Millisecond) + + // make sure cancelling the context stops the function if the function is complete and we're + // just waiting. it is presumed, but not enforced that the function responds to a cancelled contxt + ctx, cancel := context.WithCancel(context.Background()) + start = time.Now() + go func() { + time.Sleep(time.Second) + cancel() + }() + doForAtLeast(ctx, 2*time.Second, func() {}) + require.WithinDuration(t, time.Now(), start.Add(time.Second), 100*time.Millisecond) +} + type testData struct { id common.ID t *tempopb.Trace start, end uint32 } -func cutTestBlockWithTraces(t testing.TB, w Writer, tenantID string, data []testData) common.BackendBlock { +func cutTestBlockWithTraces(t testing.TB, w Writer, data []testData) common.BackendBlock { dec := model.MustNewSegmentDecoder(model.CurrentEncoding) wal := w.WAL() @@ -894,6 +917,6 @@ func benchmarkCompaction(b *testing.B, targetBlockVersion string) { b.ResetTimer() - err = rw.compact(ctx, metas, testTenantID) + err = rw.compactOneJob(ctx, metas, testTenantID) require.NoError(b, err) } diff --git a/tempodb/config.go b/tempodb/config.go index 86f86c0e822..0ba66f602b3 100644 --- a/tempodb/config.go +++ b/tempodb/config.go @@ -163,7 +163,7 @@ func validateConfig(cfg *Config) error { cfg.WAL.Version = cfg.Block.Version } - err := wal.ValidateConfig(cfg.WAL) + err := cfg.WAL.Validate() if err != nil { return fmt.Errorf("wal config validation failed: %w", err) } diff --git a/tempodb/encoding/v2/appender.go b/tempodb/encoding/v2/appender.go index 46000f2236d..56d9229615f 100644 --- a/tempodb/encoding/v2/appender.go +++ b/tempodb/encoding/v2/appender.go @@ -4,7 +4,7 @@ import ( "hash" "sync" - "github.com/cespare/xxhash" + "github.com/cespare/xxhash/v2" "github.com/grafana/tempo/tempodb/encoding/common" ) diff --git a/tempodb/encoding/v2/index_reader.go b/tempodb/encoding/v2/index_reader.go index 6b939f7874d..d9f2773dc2b 100644 --- a/tempodb/encoding/v2/index_reader.go +++ b/tempodb/encoding/v2/index_reader.go @@ -8,7 +8,7 @@ import ( "github.com/grafana/tempo/pkg/sort" "go.opentelemetry.io/otel" - "github.com/cespare/xxhash" + "github.com/cespare/xxhash/v2" "github.com/grafana/tempo/tempodb/backend" "github.com/grafana/tempo/tempodb/encoding/common" ) diff --git a/tempodb/encoding/v2/index_writer.go b/tempodb/encoding/v2/index_writer.go index 653b2bf3b61..5f12579e4c4 100644 --- a/tempodb/encoding/v2/index_writer.go +++ b/tempodb/encoding/v2/index_writer.go @@ -3,7 +3,7 @@ package v2 import ( "fmt" - "github.com/cespare/xxhash" + "github.com/cespare/xxhash/v2" ) type indexWriter struct { diff --git a/tempodb/encoding/vparquet2/block_search_tags_test.go b/tempodb/encoding/vparquet2/block_search_tags_test.go index a32e1bcb0f5..58afd30e787 100644 --- a/tempodb/encoding/vparquet2/block_search_tags_test.go +++ b/tempodb/encoding/vparquet2/block_search_tags_test.go @@ -177,7 +177,7 @@ func BenchmarkBackendBlockSearchTags(b *testing.B) { block := newBackendBlock(meta, rr) opts := common.DefaultSearchOptions() - d := collector.NewDistinctString(1_000_000) + d := collector.NewDistinctString(1_000_000, 0, 0) mc := collector.NewMetricsCollector() b.ResetTimer() @@ -212,7 +212,7 @@ func BenchmarkBackendBlockSearchTagValues(b *testing.B) { for _, tc := range testCases { b.Run(tc, func(b *testing.B) { - d := collector.NewDistinctString(1_000_000) + d := collector.NewDistinctString(1_000_000, 0, 0) mc := collector.NewMetricsCollector() b.ResetTimer() for i := 0; i < b.N; i++ { diff --git a/tempodb/encoding/vparquet3/block_autocomplete_test.go b/tempodb/encoding/vparquet3/block_autocomplete_test.go index b4964c45f11..c95a8e1238a 100644 --- a/tempodb/encoding/vparquet3/block_autocomplete_test.go +++ b/tempodb/encoding/vparquet3/block_autocomplete_test.go @@ -211,7 +211,7 @@ func TestFetchTagNames(t *testing.T) { } t.Run(fmt.Sprintf("query: %s %s-%s", tc.name, tc.query, scope), func(t *testing.T) { - distinctAttrNames := collector.NewScopedDistinctString(0) + distinctAttrNames := collector.NewScopedDistinctString(0, 0, 0) req, err := traceql.ExtractFetchSpansRequest(tc.query) require.NoError(t, err) @@ -494,7 +494,7 @@ func TestFetchTagValues(t *testing.T) { for _, tc := range testCases { t.Run(fmt.Sprintf("tag: %s, query: %s", tc.tag, tc.query), func(t *testing.T) { - distinctValues := collector.NewDistinctValue[tempopb.TagValue](1_000_000, func(v tempopb.TagValue) int { return len(v.Type) + len(v.Value) }) + distinctValues := collector.NewDistinctValue[tempopb.TagValue](1_000_000, 0, 0, func(v tempopb.TagValue) int { return len(v.Type) + len(v.Value) }) req, err := traceql.ExtractFetchSpansRequest(tc.query) require.NoError(t, err) @@ -596,7 +596,7 @@ func BenchmarkFetchTagValues(b *testing.B) { for _, tc := range testCases { b.Run(fmt.Sprintf("tag: %s, query: %s", tc.tag, tc.query), func(b *testing.B) { - distinctValues := collector.NewDistinctValue[tempopb.TagValue](1_000_000, func(v tempopb.TagValue) int { return len(v.Type) + len(v.Value) }) + distinctValues := collector.NewDistinctValue[tempopb.TagValue](1_000_000, 0, 0, func(v tempopb.TagValue) int { return len(v.Type) + len(v.Value) }) req, err := traceql.ExtractFetchSpansRequest(tc.query) require.NoError(b, err) @@ -678,7 +678,7 @@ func BenchmarkFetchTags(b *testing.B) { for _, tc := range testCases { for _, scope := range []traceql.AttributeScope{traceql.AttributeScopeSpan, traceql.AttributeScopeResource, traceql.AttributeScopeNone} { b.Run(fmt.Sprintf("query: %s %s", tc.query, scope), func(b *testing.B) { - distinctStrings := collector.NewScopedDistinctString(1_000_000) + distinctStrings := collector.NewScopedDistinctString(1_000_000, 0, 0) req, err := traceql.ExtractFetchSpansRequest(tc.query) require.NoError(b, err) diff --git a/tempodb/encoding/vparquet3/block_search_tags_test.go b/tempodb/encoding/vparquet3/block_search_tags_test.go index 31e1ae2a704..c47a5c19bc8 100644 --- a/tempodb/encoding/vparquet3/block_search_tags_test.go +++ b/tempodb/encoding/vparquet3/block_search_tags_test.go @@ -203,7 +203,7 @@ func BenchmarkBackendBlockSearchTags(b *testing.B) { block := newBackendBlock(meta, rr) opts := common.DefaultSearchOptions() - d := collector.NewDistinctString(1_000_000) + d := collector.NewDistinctString(1_000_000, 0, 0) mc := collector.NewMetricsCollector() b.ResetTimer() @@ -238,7 +238,7 @@ func BenchmarkBackendBlockSearchTagValues(b *testing.B) { for _, tc := range testCases { b.Run(tc, func(b *testing.B) { - d := collector.NewDistinctString(1_000_000) + d := collector.NewDistinctString(1_000_000, 0, 0) mc := collector.NewMetricsCollector() b.ResetTimer() diff --git a/tempodb/encoding/vparquet3/block_traceql_test.go b/tempodb/encoding/vparquet3/block_traceql_test.go index 6b9f074ed5c..8ab94aff874 100644 --- a/tempodb/encoding/vparquet3/block_traceql_test.go +++ b/tempodb/encoding/vparquet3/block_traceql_test.go @@ -592,6 +592,7 @@ func BenchmarkBackendBlockTraceQL(b *testing.B) { {"||", "{ resource.service.name = `loki-querier` } || { resource.service.name = `loki-gateway` }"}, {"mixed", `{resource.namespace!="" && resource.service.name="cortex-gateway" && duration>50ms && resource.cluster=~"prod.*"}`}, {"complex", `{resource.cluster=~"prod.*" && resource.namespace = "tempo-prod" && resource.container="query-frontend" && name = "HTTP GET - tempo_api_v2_search_tags" && span.http.status_code = 200 && duration > 1s}`}, + {"select", `{resource.cluster=~"prod.*" && resource.namespace = "tempo-prod"} | select(resource.container)`}, } ctx := context.TODO() diff --git a/tempodb/encoding/vparquet4/block_autocomplete_test.go b/tempodb/encoding/vparquet4/block_autocomplete_test.go index d0f87aa82af..94166263222 100644 --- a/tempodb/encoding/vparquet4/block_autocomplete_test.go +++ b/tempodb/encoding/vparquet4/block_autocomplete_test.go @@ -318,7 +318,7 @@ func TestFetchTagNames(t *testing.T) { } t.Run(fmt.Sprintf("query: %s %s-%s", tc.name, tc.query, scope), func(t *testing.T) { - distinctAttrNames := collector.NewScopedDistinctString(0) + distinctAttrNames := collector.NewScopedDistinctString(0, 0, 0) req, err := traceql.ExtractFetchSpansRequest(tc.query) require.NoError(t, err) @@ -617,7 +617,7 @@ func TestFetchTagValues(t *testing.T) { for _, tc := range testCases { t.Run(fmt.Sprintf("tag: %s, query: %s", tc.tag, tc.query), func(t *testing.T) { - distinctValues := collector.NewDistinctValue[tempopb.TagValue](1_000_000, func(v tempopb.TagValue) int { return len(v.Type) + len(v.Value) }) + distinctValues := collector.NewDistinctValue[tempopb.TagValue](1_000_000, 0, 0, func(v tempopb.TagValue) int { return len(v.Type) + len(v.Value) }) req, err := traceql.ExtractFetchSpansRequest(tc.query) require.NoError(t, err) @@ -720,7 +720,7 @@ func BenchmarkFetchTagValues(b *testing.B) { for _, tc := range testCases { b.Run(fmt.Sprintf("tag: %s, query: %s", tc.tag, tc.query), func(b *testing.B) { - distinctValues := collector.NewDistinctValue[tempopb.TagValue](1_000_000, func(v tempopb.TagValue) int { return len(v.Type) + len(v.Value) }) + distinctValues := collector.NewDistinctValue[tempopb.TagValue](1_000_000, 0, 0, func(v tempopb.TagValue) int { return len(v.Type) + len(v.Value) }) req, err := traceql.ExtractFetchSpansRequest(tc.query) require.NoError(b, err) @@ -802,7 +802,7 @@ func BenchmarkFetchTags(b *testing.B) { for _, tc := range testCases { for _, scope := range []traceql.AttributeScope{traceql.AttributeScopeSpan, traceql.AttributeScopeResource, traceql.AttributeScopeNone} { b.Run(fmt.Sprintf("query: %s %s", tc.query, scope), func(b *testing.B) { - distinctStrings := collector.NewScopedDistinctString(1_000_000) + distinctStrings := collector.NewScopedDistinctString(1_000_000, 0, 0) req, err := traceql.ExtractFetchSpansRequest(tc.query) require.NoError(b, err) diff --git a/tempodb/encoding/vparquet4/block_search_tags_test.go b/tempodb/encoding/vparquet4/block_search_tags_test.go index 2c734da42b1..d502d7bf7ae 100644 --- a/tempodb/encoding/vparquet4/block_search_tags_test.go +++ b/tempodb/encoding/vparquet4/block_search_tags_test.go @@ -203,7 +203,7 @@ func BenchmarkBackendBlockSearchTags(b *testing.B) { block := newBackendBlock(meta, rr) opts := common.DefaultSearchOptions() - d := collector.NewDistinctString(1_000_000) + d := collector.NewDistinctString(1_000_000, 0, 0) mc := collector.NewMetricsCollector() b.ResetTimer() @@ -238,7 +238,7 @@ func BenchmarkBackendBlockSearchTagValues(b *testing.B) { for _, tc := range testCases { b.Run(tc, func(b *testing.B) { - d := collector.NewDistinctString(1_000_000) + d := collector.NewDistinctString(1_000_000, 0, 0) mc := collector.NewMetricsCollector() b.ResetTimer() for i := 0; i < b.N; i++ { diff --git a/tempodb/encoding/vparquet4/block_traceql_test.go b/tempodb/encoding/vparquet4/block_traceql_test.go index 8c81e0d217a..b5c0701ebe5 100644 --- a/tempodb/encoding/vparquet4/block_traceql_test.go +++ b/tempodb/encoding/vparquet4/block_traceql_test.go @@ -943,6 +943,7 @@ func BenchmarkBackendBlockTraceQL(b *testing.B) { {"||", "{ resource.service.name = `loki-querier` } || { resource.service.name = `loki-gateway` }"}, {"mixed", `{resource.namespace!="" && resource.service.name="cortex-gateway" && duration>50ms && resource.cluster=~"prod.*"}`}, {"complex", `{resource.cluster=~"prod.*" && resource.namespace = "tempo-prod" && resource.container="query-frontend" && name = "HTTP GET - tempo_api_v2_search_tags" && span.http.status_code = 200 && duration > 1s}`}, + {"select", `{resource.cluster=~"prod.*" && resource.namespace = "tempo-prod"} | select(resource.container)`}, } ctx := context.TODO() diff --git a/tempodb/encoding/vparquet4/compactor.go b/tempodb/encoding/vparquet4/compactor.go index 3b7d258eb19..b47817c9bb1 100644 --- a/tempodb/encoding/vparquet4/compactor.go +++ b/tempodb/encoding/vparquet4/compactor.go @@ -60,7 +60,7 @@ func (c *Compactor) Compact(ctx context.Context, l log.Logger, r backend.Reader, iter, err := block.rawIter(derivedCtx, pool) if err != nil { - return nil, err + return nil, fmt.Errorf("error creating iterator for block %s: %w", blockMeta.BlockID.String(), err) } bookmarks = append(bookmarks, newBookmark[parquet.Row](iter)) diff --git a/tempodb/encoding/vparquet4/schema.go b/tempodb/encoding/vparquet4/schema.go index 64cb6299866..af3f1c26471 100644 --- a/tempodb/encoding/vparquet4/schema.go +++ b/tempodb/encoding/vparquet4/schema.go @@ -340,6 +340,14 @@ func attrToParquetTypeUnsupported(a *v1.KeyValue, p *Attribute) { // traceToParquet converts a tempopb.Trace to this schema's object model. Returns the new object and // a bool indicating if it's a connected trace or not func traceToParquet(meta *backend.BlockMeta, id common.ID, tr *tempopb.Trace, ot *Trace) (*Trace, bool) { + // Dedicated attribute column assignments + dedicatedResourceAttributes := dedicatedColumnsToColumnMapping(meta.DedicatedColumns, backend.DedicatedColumnScopeResource) + dedicatedSpanAttributes := dedicatedColumnsToColumnMapping(meta.DedicatedColumns, backend.DedicatedColumnScopeSpan) + + return traceToParquetWithMapping(id, tr, ot, dedicatedResourceAttributes, dedicatedSpanAttributes) +} + +func traceToParquetWithMapping(id common.ID, tr *tempopb.Trace, ot *Trace, dedicatedResourceAttributes, dedicatedSpanAttributes dedicatedColumnMapping) (*Trace, bool) { if ot == nil { ot = &Trace{} } @@ -353,10 +361,6 @@ func traceToParquet(meta *backend.BlockMeta, id common.ID, tr *tempopb.Trace, ot var rootSpan *v1_trace.Span var rootBatch *v1_trace.ResourceSpans - // Dedicated attribute column assignments - dedicatedResourceAttributes := dedicatedColumnsToColumnMapping(meta.DedicatedColumns, backend.DedicatedColumnScopeResource) - dedicatedSpanAttributes := dedicatedColumnsToColumnMapping(meta.DedicatedColumns, backend.DedicatedColumnScopeSpan) - ot.ResourceSpans = extendReuseSlice(len(tr.ResourceSpans), ot.ResourceSpans) for ib, b := range tr.ResourceSpans { ob := &ot.ResourceSpans[ib] diff --git a/tempodb/encoding/vparquet4/wal_block.go b/tempodb/encoding/vparquet4/wal_block.go index 627d1180c2a..8ca9bc25686 100644 --- a/tempodb/encoding/vparquet4/wal_block.go +++ b/tempodb/encoding/vparquet4/wal_block.go @@ -82,6 +82,8 @@ func openWALBlock(filename, path string, ingestionSlack, _ time.Duration) (commo path: path, ids: common.NewIDMap[int64](), ingestionSlack: ingestionSlack, + dedcolsRes: dedicatedColumnsToColumnMapping(meta.DedicatedColumns, backend.DedicatedColumnScopeResource), + dedcolsSpan: dedicatedColumnsToColumnMapping(meta.DedicatedColumns, backend.DedicatedColumnScopeSpan), } // read all files in dir @@ -161,6 +163,8 @@ func createWALBlock(meta *backend.BlockMeta, filepath, dataEncoding string, inge path: filepath, ids: common.NewIDMap[int64](), ingestionSlack: ingestionSlack, + dedcolsRes: dedicatedColumnsToColumnMapping(meta.DedicatedColumns, backend.DedicatedColumnScopeResource), + dedcolsSpan: dedicatedColumnsToColumnMapping(meta.DedicatedColumns, backend.DedicatedColumnScopeSpan), } // build folder @@ -284,6 +288,8 @@ type walBlock struct { meta *backend.BlockMeta path string ingestionSlack time.Duration + dedcolsRes dedicatedColumnMapping + dedcolsSpan dedicatedColumnMapping // Unflushed data buffer *Trace @@ -331,7 +337,7 @@ func (b *walBlock) Append(id common.ID, buff []byte, start, end uint32) error { func (b *walBlock) AppendTrace(id common.ID, trace *tempopb.Trace, start, end uint32) error { var connected bool - b.buffer, connected = traceToParquet(b.meta, id, trace, b.buffer) + b.buffer, connected = traceToParquetWithMapping(id, trace, b.buffer, b.dedcolsRes, b.dedcolsSpan) if !connected { dataquality.WarnDisconnectedTrace(b.meta.TenantID, dataquality.PhaseTraceFlushedToWal) } diff --git a/tempodb/retention.go b/tempodb/retention.go index fd7fd667cfd..bffcafadefa 100644 --- a/tempodb/retention.go +++ b/tempodb/retention.go @@ -12,6 +12,8 @@ import ( ) // retentionLoop watches a timer to clean up blocks that are past retention. +// todo: correctly pass context all the way to the backend so a cancelled context can stop the retention loop. +// see implementation of compactionLoop() func (rw *readerWriter) retentionLoop(ctx context.Context) { ticker := time.NewTicker(rw.cfg.BlocklistPoll) for { diff --git a/tempodb/retention_test.go b/tempodb/retention_test.go index af5862276d1..b0c7b4d8134 100644 --- a/tempodb/retention_test.go +++ b/tempodb/retention_test.go @@ -46,8 +46,8 @@ func TestRetention(t *testing.T) { err = c.EnableCompaction(ctx, &CompactorConfig{ ChunkSizeBytes: 10, MaxCompactionRange: time.Hour, - BlockRetention: 0, - CompactedBlockRetention: 0, + BlockRetention: time.Hour, + CompactedBlockRetention: time.Hour, }, &mockSharder{}, &mockOverrides{}) require.NoError(t, err) @@ -71,10 +71,12 @@ func TestRetention(t *testing.T) { checkBlocklists(t, (uuid.UUID)(blockID), 1, 0, rw) // retention should mark it compacted + rw.compactorCfg.BlockRetention = 0 r.(*readerWriter).doRetention(ctx) checkBlocklists(t, (uuid.UUID)(blockID), 0, 1, rw) // retention again should clear it + rw.compactorCfg.CompactedBlockRetention = 0 r.(*readerWriter).doRetention(ctx) checkBlocklists(t, (uuid.UUID)(blockID), 0, 0, rw) } diff --git a/tempodb/tempodb.go b/tempodb/tempodb.go index 86ceebed9bc..f8c46b16802 100644 --- a/tempodb/tempodb.go +++ b/tempodb/tempodb.go @@ -17,6 +17,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/grafana/dskit/user" "github.com/grafana/tempo/modules/cache/memcached" "github.com/grafana/tempo/modules/cache/redis" "github.com/grafana/tempo/pkg/cache" @@ -82,8 +83,8 @@ type IterateObjectCallback func(id common.ID, obj []byte) bool type Reader interface { Find(ctx context.Context, tenantID string, id common.ID, blockStart string, blockEnd string, timeStart int64, timeEnd int64, opts common.SearchOptions) ([]*tempopb.Trace, []error, error) Search(ctx context.Context, meta *backend.BlockMeta, req *tempopb.SearchRequest, opts common.SearchOptions) (*tempopb.SearchResponse, error) - SearchTags(ctx context.Context, meta *backend.BlockMeta, scope string, opts common.SearchOptions) (*tempopb.SearchTagsV2Response, error) - SearchTagValues(ctx context.Context, meta *backend.BlockMeta, tag string, opts common.SearchOptions) (*tempopb.SearchTagValuesResponse, error) + SearchTags(ctx context.Context, meta *backend.BlockMeta, req *tempopb.SearchTagsBlockRequest, opts common.SearchOptions) (*tempopb.SearchTagsV2Response, error) + SearchTagValues(ctx context.Context, meta *backend.BlockMeta, req *tempopb.SearchTagValuesBlockRequest, opts common.SearchOptions) (*tempopb.SearchTagValuesResponse, error) SearchTagValuesV2(ctx context.Context, meta *backend.BlockMeta, req *tempopb.SearchTagValuesRequest, opts common.SearchOptions) (*tempopb.SearchTagValuesV2Response, error) // TODO(suraj): use common.MetricsCallback in Fetch and remove the Bytes callback from traceql.FetchSpansResponse @@ -369,7 +370,8 @@ func (rw *readerWriter) Search(ctx context.Context, meta *backend.BlockMeta, req return block.Search(ctx, req, opts) } -func (rw *readerWriter) SearchTags(ctx context.Context, meta *backend.BlockMeta, scope string, opts common.SearchOptions) (*tempopb.SearchTagsV2Response, error) { +func (rw *readerWriter) SearchTags(ctx context.Context, meta *backend.BlockMeta, req *tempopb.SearchTagsBlockRequest, opts common.SearchOptions) (*tempopb.SearchTagsV2Response, error) { + scope := req.SearchReq.Scope attributeScope := traceql.AttributeScopeFromString(scope) if attributeScope == traceql.AttributeScopeUnknown { @@ -381,7 +383,7 @@ func (rw *readerWriter) SearchTags(ctx context.Context, meta *backend.BlockMeta, return nil, err } - distinctValues := collector.NewScopedDistinctString(0) // todo: propagate limit? + distinctValues := collector.NewScopedDistinctString(0, req.SearchReq.MaxTagsPerScope, req.SearchReq.StaleValuesThreshold) mc := collector.NewMetricsCollector() rw.cfg.Search.ApplyToOptions(&opts) @@ -392,6 +394,11 @@ func (rw *readerWriter) SearchTags(ctx context.Context, meta *backend.BlockMeta, return nil, err } + orgID, _ := user.ExtractOrgID(ctx) + if distinctValues.Exceeded() { + level.Warn(log.Logger).Log("msg", "Search tags exceeded limit, reduce cardinality or size of tags", "orgID", orgID, "stopReason", distinctValues.StopReason()) + } + // build response collected := distinctValues.Strings() resp := &tempopb.SearchTagsV2Response{ @@ -408,16 +415,21 @@ func (rw *readerWriter) SearchTags(ctx context.Context, meta *backend.BlockMeta, return resp, nil } -func (rw *readerWriter) SearchTagValues(ctx context.Context, meta *backend.BlockMeta, tag string, opts common.SearchOptions) (response *tempopb.SearchTagValuesResponse, err error) { +func (rw *readerWriter) SearchTagValues(ctx context.Context, meta *backend.BlockMeta, req *tempopb.SearchTagValuesBlockRequest, opts common.SearchOptions) (response *tempopb.SearchTagValuesResponse, err error) { block, err := encoding.OpenBlock(meta, rw.r) if err != nil { return &tempopb.SearchTagValuesResponse{}, err } - dv := collector.NewDistinctString(0) + dv := collector.NewDistinctString(0, req.SearchReq.MaxTagValues, req.SearchReq.StaleValueThreshold) mc := collector.NewMetricsCollector() rw.cfg.Search.ApplyToOptions(&opts) - err = block.SearchTagValues(ctx, tag, dv.Collect, mc.Add, opts) + err = block.SearchTagValues(ctx, req.SearchReq.TagName, dv.Collect, mc.Add, opts) + + orgID, _ := user.ExtractOrgID(ctx) + if dv.Exceeded() { + level.Warn(log.Logger).Log("msg", "Search tags exceeded limit, reduce cardinality or size of tags", "orgID", orgID, "stopReason", dv.StopReason()) + } return &tempopb.SearchTagValuesResponse{ TagValues: dv.Strings(), @@ -436,7 +448,7 @@ func (rw *readerWriter) SearchTagValuesV2(ctx context.Context, meta *backend.Blo return nil, err } - dv := collector.NewDistinctValue[tempopb.TagValue](0, func(v tempopb.TagValue) int { return len(v.Type) + len(v.Value) }) + dv := collector.NewDistinctValue(0, req.MaxTagValues, req.StaleValueThreshold, func(v tempopb.TagValue) int { return len(v.Type) + len(v.Value) }) mc := collector.NewMetricsCollector() rw.cfg.Search.ApplyToOptions(&opts) err = block.SearchTagValuesV2(ctx, tag, traceql.MakeCollectTagValueFunc(dv.Collect), mc.Add, opts) @@ -444,6 +456,11 @@ func (rw *readerWriter) SearchTagValuesV2(ctx context.Context, meta *backend.Blo return nil, err } + orgID, _ := user.ExtractOrgID(ctx) + if dv.Exceeded() { + level.Warn(log.Logger).Log("msg", "Search tags exceeded limit, reduce cardinality or size of tags", "orgID", orgID, "stopReason", dv.StopReason()) + } + resp := &tempopb.SearchTagValuesV2Response{ Metrics: &tempopb.MetadataMetrics{InspectedBytes: mc.TotalValue()}, } diff --git a/tempodb/tempodb_metrics_test.go b/tempodb/tempodb_metrics_test.go new file mode 100644 index 00000000000..c2d3c0b7ce5 --- /dev/null +++ b/tempodb/tempodb_metrics_test.go @@ -0,0 +1,196 @@ +package tempodb + +import ( + "context" + "path" + "testing" + "time" + + "github.com/go-kit/log" + "github.com/grafana/tempo/pkg/model" + "github.com/grafana/tempo/pkg/tempopb" + common_v1 "github.com/grafana/tempo/pkg/tempopb/common/v1" + resource_v1 "github.com/grafana/tempo/pkg/tempopb/resource/v1" + v1 "github.com/grafana/tempo/pkg/tempopb/trace/v1" + "github.com/grafana/tempo/pkg/traceql" + "github.com/grafana/tempo/pkg/util/test" + "github.com/grafana/tempo/tempodb/backend" + "github.com/grafana/tempo/tempodb/backend/local" + "github.com/grafana/tempo/tempodb/encoding/common" + "github.com/grafana/tempo/tempodb/encoding/vparquet4" + "github.com/grafana/tempo/tempodb/wal" + "github.com/stretchr/testify/require" +) + +var queryRangeTestCases = []struct { + req *tempopb.QueryRangeRequest + expected []*tempopb.TimeSeries +}{ + { + req: &tempopb.QueryRangeRequest{ + Start: 1, + End: 50 * uint64(time.Second), + Step: 15 * uint64(time.Second), + Query: "{ } | rate()", + }, + expected: []*tempopb.TimeSeries{ + { + PromLabels: `{__name__="rate"}`, + Labels: []common_v1.KeyValue{tempopb.MakeKeyValueString("__name__", "rate")}, + Samples: []tempopb.Sample{ + {TimestampMs: 0, Value: 14.0 / 15.0}, // First interval starts at 1, so it only has 14 spans + {TimestampMs: 15_000, Value: 1.0}, // Spans every 1 second + {TimestampMs: 30_000, Value: 1.0}, // Spans every 1 second + {TimestampMs: 45_000, Value: 5.0 / 15.0}, // Interval [45,50) has 5 spans + {TimestampMs: 60_000, Value: 0}, // I think this is a bug that we extend out an extra interval + }, + }, + }, + }, + { + req: &tempopb.QueryRangeRequest{ + Start: 1, + End: 50 * uint64(time.Second), + Step: 15 * uint64(time.Second), + Query: `{ .service.name="even" } | rate()`, + }, + expected: []*tempopb.TimeSeries{ + { + PromLabels: `{__name__="rate"}`, + Labels: []common_v1.KeyValue{tempopb.MakeKeyValueString("__name__", "rate")}, + Samples: []tempopb.Sample{ + {TimestampMs: 0, Value: 7.0 / 15.0}, // Interval [ 1, 14], 7 spans at 2, 4, 6, 8, 10, 12, 14 + {TimestampMs: 15_000, Value: 7.0 / 15.0}, // Interval [15, 29], 7 spans at 16, 18, 20, 22, 24, 26, 28 + {TimestampMs: 30_000, Value: 8.0 / 15.0}, // Interval [30, 44], 8 spans at 30, 32, 34, 36, 38, 40, 42, 44 + {TimestampMs: 45_000, Value: 2.0 / 15.0}, // Interval [45, 50), 2 spans at 46, 48 + {TimestampMs: 60_000, Value: 0}, // I think this is a bug that we extend out an extra interval + }, + }, + }, + }, +} + +func TestTempoDBQueryRange(t *testing.T) { + var ( + tempDir = t.TempDir() + blockVersion = vparquet4.VersionString + ) + + dc := backend.DedicatedColumns{ + {Scope: "resource", Name: "res-dedicated.01", Type: "string"}, + {Scope: "resource", Name: "res-dedicated.02", Type: "string"}, + {Scope: "span", Name: "span-dedicated.01", Type: "string"}, + {Scope: "span", Name: "span-dedicated.02", Type: "string"}, + } + r, w, c, err := New(&Config{ + Backend: backend.Local, + Local: &local.Config{ + Path: path.Join(tempDir, "traces"), + }, + Block: &common.BlockConfig{ + IndexDownsampleBytes: 17, + BloomFP: .01, + BloomShardSizeBytes: 100_000, + Version: blockVersion, + IndexPageSizeBytes: 1000, + RowGroupSizeBytes: 10000, + DedicatedColumns: dc, + }, + WAL: &wal.Config{ + Filepath: path.Join(tempDir, "wal"), + IngestionSlack: time.Since(time.Time{}), + }, + Search: &SearchConfig{ + ChunkSizeBytes: 1_000_000, + ReadBufferCount: 8, ReadBufferSizeBytes: 4 * 1024 * 1024, + }, + BlocklistPoll: 0, + }, nil, log.NewNopLogger()) + require.NoError(t, err) + + err = c.EnableCompaction(context.Background(), &CompactorConfig{ + ChunkSizeBytes: 10, + MaxCompactionRange: time.Hour, + BlockRetention: 0, + CompactedBlockRetention: 0, + }, &mockSharder{}, &mockOverrides{}) + require.NoError(t, err) + + ctx := context.Background() + r.EnablePolling(ctx, &mockJobSharder{}) + + // Write to wal + wal := w.WAL() + + meta := &backend.BlockMeta{BlockID: backend.NewUUID(), TenantID: testTenantID, DedicatedColumns: dc} + head, err := wal.NewBlock(meta, model.CurrentEncoding) + require.NoError(t, err) + dec := model.MustNewSegmentDecoder(model.CurrentEncoding) + + totalSpans := 100 + for i := 1; i <= totalSpans; i++ { + tid := test.ValidTraceID(nil) + + sp := test.MakeSpan(tid) + + // Start time is i seconds + sp.StartTimeUnixNano = uint64(i * int(time.Second)) + + // Duration is i seconds + sp.EndTimeUnixNano = sp.StartTimeUnixNano + uint64(i*int(time.Second)) + + // Service name + var svcName string + if i%2 == 0 { + svcName = "even" + } else { + svcName = "odd" + } + + tr := &tempopb.Trace{ + ResourceSpans: []*v1.ResourceSpans{ + { + Resource: &resource_v1.Resource{ + Attributes: []*common_v1.KeyValue{tempopb.MakeKeyValueStringPtr("service.name", svcName)}, + }, + ScopeSpans: []*v1.ScopeSpans{ + { + Spans: []*v1.Span{ + sp, + }, + }, + }, + }, + }, + } + + b1, err := dec.PrepareForWrite(tr, 0, 0) + require.NoError(t, err) + + b2, err := dec.ToObject([][]byte{b1}) + require.NoError(t, err) + err = head.Append(tid, b2, 0, 0) + require.NoError(t, err) + } + + // Complete block + block, err := w.CompleteBlock(context.Background(), head) + require.NoError(t, err) + + f := traceql.NewSpansetFetcherWrapper(func(ctx context.Context, req traceql.FetchSpansRequest) (traceql.FetchSpansResponse, error) { + return block.Fetch(ctx, req, common.DefaultSearchOptions()) + }) + + for _, tc := range queryRangeTestCases { + + eval, err := traceql.NewEngine().CompileMetricsQueryRange(tc.req, 0, 0, false) + require.NoError(t, err) + + err = eval.Do(ctx, f, 0, 0) + require.NoError(t, err) + + actual := eval.Results().ToProto(tc.req) + + require.Equal(t, tc.expected, actual, "Query: %v", tc.req.Query) + } +} diff --git a/tempodb/tempodb_search_test.go b/tempodb/tempodb_search_test.go index 58d72e8ba08..7c136cb98d0 100644 --- a/tempodb/tempodb_search_test.go +++ b/tempodb/tempodb_search_test.go @@ -239,6 +239,10 @@ func advancedTraceQLRunner(t *testing.T, wantTr *tempopb.Trace, wantMeta *tempop // groupin' (.foo is a known attribute that is the same on both spans) {Query: "{} | by(span.foo) | count() = 2"}, {Query: "{} | by(resource.service.name) | count() = 1"}, + // Attribute == nil (current work-around version) + {Query: "{} | select(.missing) | { !(.missing != nil)}"}, + {Query: "{} | select(span.missing) | { !(span.missing != nil)}"}, + {Query: "{} | select(resource.missing) | { !(resource.missing != nil)}"}, } searchesThatDontMatch := []*tempopb.SearchRequest{ // conditions @@ -1383,7 +1387,7 @@ func tagValuesRunner(t *testing.T, _ *tempopb.Trace, _ *tempopb.TraceSearchMetad for _, tc := range tcs { t.Run(tc.name, func(t *testing.T) { - valueCollector := collector.NewDistinctValue[tempopb.TagValue](0, func(_ tempopb.TagValue) int { return 0 }) + valueCollector := collector.NewDistinctValue[tempopb.TagValue](0, 0, 0, func(_ tempopb.TagValue) int { return 0 }) mc := collector.NewMetricsCollector() fetcher := traceql.NewTagValuesFetcherWrapper(func(ctx context.Context, req traceql.FetchTagValuesRequest, cb traceql.FetchTagValuesCallback) error { return bb.FetchTagValues(ctx, req, cb, mc.Add, common.DefaultSearchOptions()) @@ -1459,7 +1463,7 @@ func tagNamesRunner(t *testing.T, _ *tempopb.Trace, _ *tempopb.TraceSearchMetada return bb.FetchTagNames(ctx, req, cb, mc.Add, common.DefaultSearchOptions()) }) - valueCollector := collector.NewScopedDistinctString(0) + valueCollector := collector.NewScopedDistinctString(0, 0, 0) err := e.ExecuteTagNames(ctx, traceql.AttributeScopeFromString(tc.scope), tc.query, func(tag string, scope traceql.AttributeScope) bool { return valueCollector.Collect(scope.String(), tag) }, fetcher) @@ -2293,7 +2297,13 @@ func TestSearchForTagsAndTagValues(t *testing.T) { block, err := w.CompleteBlock(context.Background(), head) require.NoError(t, err) - resp, err := r.SearchTags(context.Background(), block.BlockMeta(), "", common.DefaultSearchOptions()) + reqBlocks := &tempopb.SearchTagsBlockRequest{ + SearchReq: &tempopb.SearchTagsRequest{ + Scope: "", + }, + } + + resp, err := r.SearchTags(context.Background(), block.BlockMeta(), reqBlocks, common.DefaultSearchOptions()) require.NoError(t, err) expectedTags := []string{"stringTag", "intTag", "service.name", "other"} var actualTags []string @@ -2304,7 +2314,13 @@ func TestSearchForTagsAndTagValues(t *testing.T) { sort.Strings(actualTags) assert.Equal(t, expectedTags, actualTags) - respValues, err := r.SearchTagValues(context.Background(), block.BlockMeta(), "service.name", common.DefaultSearchOptions()) + req := &tempopb.SearchTagValuesBlockRequest{ + SearchReq: &tempopb.SearchTagValuesRequest{ + TagName: "service.name", + }, + } + + respValues, err := r.SearchTagValues(context.Background(), block.BlockMeta(), req, common.DefaultSearchOptions()) require.NotZero(t, respValues.Metrics.InspectedBytes) require.NoError(t, err) @@ -2313,7 +2329,13 @@ func TestSearchForTagsAndTagValues(t *testing.T) { sort.Strings(respValues.TagValues) assert.Equal(t, expectedTagsValues, respValues.TagValues) - respValues, err = r.SearchTagValues(context.Background(), block.BlockMeta(), "intTag", common.DefaultSearchOptions()) + req = &tempopb.SearchTagValuesBlockRequest{ + SearchReq: &tempopb.SearchTagValuesRequest{ + TagName: "intTag", + }, + } + + respValues, err = r.SearchTagValues(context.Background(), block.BlockMeta(), req, common.DefaultSearchOptions()) require.NotZero(t, respValues.Metrics.InspectedBytes) require.NoError(t, err) @@ -2372,7 +2394,7 @@ func TestSearchForTagsAndTagValues(t *testing.T) { require.Equal(t, expected, tagValues.TagValues) require.NotZero(t, tagValues.Metrics.InspectedBytes) - valueCollector := collector.NewDistinctValue[tempopb.TagValue](0, func(_ tempopb.TagValue) int { return 0 }) + valueCollector := collector.NewDistinctValue[tempopb.TagValue](0, 0, 0, func(_ tempopb.TagValue) int { return 0 }) mc := collector.NewMetricsCollector() f := traceql.NewTagValuesFetcherWrapper(func(ctx context.Context, req traceql.FetchTagValuesRequest, cb traceql.FetchTagValuesCallback) error { return r.FetchTagValues(ctx, block.BlockMeta(), req, cb, mc.Add, common.DefaultSearchOptions()) diff --git a/tempodb/tempodb_test.go b/tempodb/tempodb_test.go index d47374f9047..7e17616c7fb 100644 --- a/tempodb/tempodb_test.go +++ b/tempodb/tempodb_test.go @@ -237,25 +237,25 @@ func checkBlocklists(t *testing.T, expectedID uuid.UUID, expectedB int, expected blocklist := rw.blocklist.Metas(testTenantID) require.Len(t, blocklist, expectedB) if expectedB > 0 && expectedID != uuid.Nil { - assert.Equal(t, expectedID, (uuid.UUID)(blocklist[0].BlockID)) + require.Equal(t, expectedID, (uuid.UUID)(blocklist[0].BlockID)) } // confirm blocklists are in starttime ascending order lastTime := time.Time{} for _, b := range blocklist { - assert.True(t, lastTime.Before(b.StartTime) || lastTime.Equal(b.StartTime)) + require.True(t, lastTime.Before(b.StartTime) || lastTime.Equal(b.StartTime)) lastTime = b.StartTime } compactedBlocklist := rw.blocklist.CompactedMetas(testTenantID) - assert.Len(t, compactedBlocklist, expectedCB) + require.Len(t, compactedBlocklist, expectedCB) if expectedCB > 0 && expectedID != uuid.Nil { - assert.Equal(t, expectedID, (uuid.UUID)(compactedBlocklist[0].BlockID)) + require.Equal(t, expectedID, (uuid.UUID)(compactedBlocklist[0].BlockID)) } lastTime = time.Time{} for _, b := range compactedBlocklist { - assert.True(t, lastTime.Before(b.StartTime) || lastTime.Equal(b.StartTime)) + require.True(t, lastTime.Before(b.StartTime) || lastTime.Equal(b.StartTime)) lastTime = b.StartTime } } @@ -547,7 +547,7 @@ func TestSearchCompactedBlocks(t *testing.T) { // compact var blockMetas []*backend.BlockMeta blockMetas = append(blockMetas, complete.BlockMeta()) - require.NoError(t, rw.compact(ctx, blockMetas, testTenantID)) + require.NoError(t, rw.compactOneJob(ctx, blockMetas, testTenantID)) // poll rw.pollBlocklist() diff --git a/tempodb/wal/wal.go b/tempodb/wal/wal.go index 055bbb61f5d..880e9a20b9d 100644 --- a/tempodb/wal/wal.go +++ b/tempodb/wal/wal.go @@ -1,6 +1,7 @@ package wal import ( + "flag" "fmt" "os" "path/filepath" @@ -32,7 +33,11 @@ type Config struct { Version string `yaml:"version,omitempty"` } -func ValidateConfig(c *Config) error { +func (c *Config) RegisterFlags(*flag.FlagSet) { + c.IngestionSlack = 2 * time.Minute +} + +func (c *Config) Validate() error { if _, err := encoding.FromVersion(c.Version); err != nil { return fmt.Errorf("failed to validate block version %s: %w", c.Version, err) } @@ -148,9 +153,8 @@ func (w *WAL) GetFilepath() string { return w.c.Filepath } -func (w *WAL) ClearFolder(dir string) error { - p := filepath.Join(w.c.Filepath, dir) - return os.RemoveAll(p) +func (w *WAL) Clear() error { + return os.RemoveAll(w.c.Filepath) } func (w *WAL) LocalBackend() *local.Backend { diff --git a/tools/go.mod b/tools/go.mod index 0e5e28724cb..c6da016d4f5 100644 --- a/tools/go.mod +++ b/tools/go.mod @@ -8,7 +8,7 @@ require ( github.com/grafana/tanka v0.26.0 github.com/jsonnet-bundler/jsonnet-bundler v0.5.1 github.com/psampaz/go-mod-outdated v0.9.0 - golang.org/x/tools v0.24.0 + golang.org/x/tools v0.27.0 gotest.tools/gotestsum v1.10.0 mvdan.cc/gofumpt v0.7.0 ) @@ -203,14 +203,14 @@ require ( go.uber.org/automaxprocs v1.5.3 // indirect go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.24.0 // indirect - golang.org/x/crypto v0.27.0 // indirect + golang.org/x/crypto v0.31.0 // indirect golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e // indirect golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f // indirect - golang.org/x/mod v0.21.0 // indirect - golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.25.0 // indirect - golang.org/x/term v0.24.0 // indirect - golang.org/x/text v0.18.0 // indirect + golang.org/x/mod v0.22.0 // indirect + golang.org/x/sync v0.10.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/term v0.27.0 // indirect + golang.org/x/text v0.21.0 // indirect google.golang.org/protobuf v1.34.2 // indirect gopkg.in/alecthomas/kingpin.v2 v2.2.6 // indirect gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/tools/go.sum b/tools/go.sum index 4c76f1f6c0b..82cd68da654 100644 --- a/tools/go.sum +++ b/tools/go.sum @@ -673,8 +673,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= -golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -719,8 +719,8 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91 golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= -golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -759,8 +759,8 @@ golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo= +golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -781,8 +781,8 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -840,8 +840,8 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= -golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -849,8 +849,8 @@ golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= -golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -861,8 +861,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= -golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -927,8 +927,8 @@ golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.27.0 h1:qEKojBykQkQ4EynWy4S8Weg69NumxKdn40Fce3uc/8o= +golang.org/x/tools v0.27.0/go.mod h1:sUi0ZgbwW9ZPAq26Ekut+weQPR5eIM6GQLQ1Yjm1H0Q= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/tools/packaging/tempo.yaml b/tools/packaging/tempo.yaml new file mode 100644 index 00000000000..97a0d671b1b --- /dev/null +++ b/tools/packaging/tempo.yaml @@ -0,0 +1,48 @@ +stream_over_http_enabled: true +server: + http_listen_port: 3200 + log_level: info + +query_frontend: + search: + duration_slo: 5s + throughput_bytes_slo: 1.073741824e+09 + metadata_slo: + duration_slo: 5s + throughput_bytes_slo: 1.073741824e+09 + trace_by_id: + duration_slo: 5s + +distributor: + receivers: + otlp: + protocols: + grpc: + endpoint: "0.0.0.0:4317" + +metrics_generator: + registry: + external_labels: + source: tempo + cluster: docker-compose + storage: + path: /var/tempo/generator/wal + remote_write: + - url: http://prometheus:9090/api/v1/write + send_exemplars: true + traces_storage: + path: /var/tempo/generator/traces + +storage: + trace: + backend: local # backend configuration to use + wal: + path: /var/tempo/wal # where to store the wal locally + local: + path: /var/tempo/blocks + +overrides: + defaults: + metrics_generator: + processors: [service-graphs, span-metrics, local-blocks] # enables metrics generator + generate_native_histograms: both diff --git a/tools/packaging/verify-deb-install.sh b/tools/packaging/verify-deb-install.sh index 90c0cc41e3e..374f4f131bc 100755 --- a/tools/packaging/verify-deb-install.sh +++ b/tools/packaging/verify-deb-install.sh @@ -1,25 +1,11 @@ -#!/usr/bin/env sh +#!/usr/bin/env bash set -euxo pipefail -docker ps -image="$(docker ps --filter ancestor=jrei/systemd-debian:12 --latest --format "{{.ID}}")" -echo "Running on container: ${image}" +# Install tempo and check it's running +dpkg -i ./tempo_*_linux_amd64.deb +[ "$(systemctl is-active tempo)" = "active" ] || (echo "tempo is inactive" && exit 1) -dir="." -if [ -n "${CI}" ]; then - dir="/drone/src" -fi -echo "Running on directory: ${dir}" - -cat <= http.StatusMultipleChoices { return nil, &Error{ Response: resp, diff --git a/vendor/cloud.google.com/go/auth/credentials/compute.go b/vendor/cloud.google.com/go/auth/credentials/compute.go index 6f70fa353b0..8afd0472eaa 100644 --- a/vendor/cloud.google.com/go/auth/credentials/compute.go +++ b/vendor/cloud.google.com/go/auth/credentials/compute.go @@ -37,8 +37,11 @@ var ( // computeTokenProvider creates a [cloud.google.com/go/auth.TokenProvider] that // uses the metadata service to retrieve tokens. -func computeTokenProvider(opts *DetectOptions) auth.TokenProvider { - return auth.NewCachedTokenProvider(computeProvider{scopes: opts.Scopes}, &auth.CachedTokenProviderOptions{ +func computeTokenProvider(opts *DetectOptions, client *metadata.Client) auth.TokenProvider { + return auth.NewCachedTokenProvider(&computeProvider{ + scopes: opts.Scopes, + client: client, + }, &auth.CachedTokenProviderOptions{ ExpireEarly: opts.EarlyTokenRefresh, DisableAsyncRefresh: opts.DisableAsyncRefresh, }) @@ -47,6 +50,7 @@ func computeTokenProvider(opts *DetectOptions) auth.TokenProvider { // computeProvider fetches tokens from the google cloud metadata service. type computeProvider struct { scopes []string + client *metadata.Client } type metadataTokenResp struct { @@ -55,7 +59,7 @@ type metadataTokenResp struct { TokenType string `json:"token_type"` } -func (cs computeProvider) Token(ctx context.Context) (*auth.Token, error) { +func (cs *computeProvider) Token(ctx context.Context) (*auth.Token, error) { tokenURI, err := url.Parse(computeTokenURI) if err != nil { return nil, err @@ -65,7 +69,7 @@ func (cs computeProvider) Token(ctx context.Context) (*auth.Token, error) { v.Set("scopes", strings.Join(cs.scopes, ",")) tokenURI.RawQuery = v.Encode() } - tokenJSON, err := metadata.GetWithContext(ctx, tokenURI.String()) + tokenJSON, err := cs.client.GetWithContext(ctx, tokenURI.String()) if err != nil { return nil, fmt.Errorf("credentials: cannot fetch token: %w", err) } diff --git a/vendor/cloud.google.com/go/auth/credentials/detect.go b/vendor/cloud.google.com/go/auth/credentials/detect.go index 2d9a73edf36..a1b5a931884 100644 --- a/vendor/cloud.google.com/go/auth/credentials/detect.go +++ b/vendor/cloud.google.com/go/auth/credentials/detect.go @@ -19,6 +19,7 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "net/http" "os" "time" @@ -27,6 +28,7 @@ import ( "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/credsfile" "cloud.google.com/go/compute/metadata" + "github.com/googleapis/gax-go/v2/internallog" ) const ( @@ -96,12 +98,17 @@ func DetectDefault(opts *DetectOptions) (*auth.Credentials, error) { } if OnGCE() { + metadataClient := metadata.NewWithOptions(&metadata.Options{ + Logger: opts.logger(), + }) return auth.NewCredentials(&auth.CredentialsOptions{ - TokenProvider: computeTokenProvider(opts), - ProjectIDProvider: auth.CredentialsPropertyFunc(func(context.Context) (string, error) { - return metadata.ProjectID() + TokenProvider: computeTokenProvider(opts, metadataClient), + ProjectIDProvider: auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) { + return metadataClient.ProjectIDWithContext(ctx) }), - UniverseDomainProvider: &internal.ComputeUniverseDomainProvider{}, + UniverseDomainProvider: &internal.ComputeUniverseDomainProvider{ + MetadataClient: metadataClient, + }, }), nil } @@ -158,6 +165,11 @@ type DetectOptions struct { // The default value is "googleapis.com". This option is ignored for // authentication flows that do not support universe domain. Optional. UniverseDomain string + // Logger is used for debug logging. If provided, logging will be enabled + // at the loggers configured level. By default logging is disabled unless + // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default + // logger will be used. Optional. + Logger *slog.Logger } func (o *DetectOptions) validate() error { @@ -190,7 +202,11 @@ func (o *DetectOptions) client() *http.Client { if o.Client != nil { return o.Client } - return internal.CloneDefaultClient() + return internal.DefaultClient() +} + +func (o *DetectOptions) logger() *slog.Logger { + return internallog.New(o.Logger) } func readCredentialsFile(filename string, opts *DetectOptions) (*auth.Credentials, error) { @@ -253,6 +269,7 @@ func clientCredConfigFromJSON(b []byte, opts *DetectOptions) *auth.Options3LO { AuthURL: c.AuthURI, TokenURL: c.TokenURI, Client: opts.client(), + Logger: opts.logger(), EarlyTokenExpiry: opts.EarlyTokenRefresh, AuthHandlerOpts: handleOpts, // TODO(codyoss): refactor this out. We need to add in auto-detection diff --git a/vendor/cloud.google.com/go/auth/credentials/filetypes.go b/vendor/cloud.google.com/go/auth/credentials/filetypes.go index fe93557389d..e5243e6cfbe 100644 --- a/vendor/cloud.google.com/go/auth/credentials/filetypes.go +++ b/vendor/cloud.google.com/go/auth/credentials/filetypes.go @@ -33,7 +33,7 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) { return nil, err } - var projectID, quotaProjectID, universeDomain string + var projectID, universeDomain string var tp auth.TokenProvider switch fileType { case credsfile.ServiceAccountKey: @@ -56,7 +56,6 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) { if err != nil { return nil, err } - quotaProjectID = f.QuotaProjectID universeDomain = f.UniverseDomain case credsfile.ExternalAccountKey: f, err := credsfile.ParseExternalAccount(b) @@ -67,7 +66,6 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) { if err != nil { return nil, err } - quotaProjectID = f.QuotaProjectID universeDomain = resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain) case credsfile.ExternalAccountAuthorizedUserKey: f, err := credsfile.ParseExternalAccountAuthorizedUser(b) @@ -78,7 +76,6 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) { if err != nil { return nil, err } - quotaProjectID = f.QuotaProjectID universeDomain = f.UniverseDomain case credsfile.ImpersonatedServiceAccountKey: f, err := credsfile.ParseImpersonatedServiceAccount(b) @@ -108,9 +105,9 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) { TokenProvider: auth.NewCachedTokenProvider(tp, &auth.CachedTokenProviderOptions{ ExpireEarly: opts.EarlyTokenRefresh, }), - JSON: b, - ProjectIDProvider: internalauth.StaticCredentialsProperty(projectID), - QuotaProjectIDProvider: internalauth.StaticCredentialsProperty(quotaProjectID), + JSON: b, + ProjectIDProvider: internalauth.StaticCredentialsProperty(projectID), + // TODO(codyoss): only set quota project here if there was a user override UniverseDomainProvider: internalauth.StaticCredentialsProperty(universeDomain), }), nil } @@ -127,8 +124,14 @@ func resolveUniverseDomain(optsUniverseDomain, fileUniverseDomain string) string } func handleServiceAccount(f *credsfile.ServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) { + ud := resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain) if opts.UseSelfSignedJWT { return configureSelfSignedJWT(f, opts) + } else if ud != "" && ud != internalauth.DefaultUniverseDomain { + // For non-GDU universe domains, token exchange is impossible and services + // must support self-signed JWTs. + opts.UseSelfSignedJWT = true + return configureSelfSignedJWT(f, opts) } opts2LO := &auth.Options2LO{ Email: f.ClientEmail, @@ -138,6 +141,7 @@ func handleServiceAccount(f *credsfile.ServiceAccountFile, opts *DetectOptions) TokenURL: f.TokenURL, Subject: opts.Subject, Client: opts.client(), + Logger: opts.logger(), } if opts2LO.TokenURL == "" { opts2LO.TokenURL = jwtTokenURL @@ -156,6 +160,7 @@ func handleUserCredential(f *credsfile.UserCredentialsFile, opts *DetectOptions) EarlyTokenExpiry: opts.EarlyTokenRefresh, RefreshToken: f.RefreshToken, Client: opts.client(), + Logger: opts.logger(), } return auth.New3LOTokenProvider(opts3LO) } @@ -174,6 +179,8 @@ func handleExternalAccount(f *credsfile.ExternalAccountFile, opts *DetectOptions Scopes: opts.scopes(), WorkforcePoolUserProject: f.WorkforcePoolUserProject, Client: opts.client(), + Logger: opts.logger(), + IsDefaultClient: opts.Client == nil, } if f.ServiceAccountImpersonation != nil { externalOpts.ServiceAccountImpersonationLifetimeSeconds = f.ServiceAccountImpersonation.TokenLifetimeSeconds @@ -191,6 +198,7 @@ func handleExternalAccountAuthorizedUser(f *credsfile.ExternalAccountAuthorizedU ClientSecret: f.ClientSecret, Scopes: opts.scopes(), Client: opts.client(), + Logger: opts.logger(), } return externalaccountuser.NewTokenProvider(externalOpts) } @@ -210,6 +218,7 @@ func handleImpersonatedServiceAccount(f *credsfile.ImpersonatedServiceAccountFil Tp: tp, Delegates: f.Delegates, Client: opts.client(), + Logger: opts.logger(), }) } @@ -217,5 +226,6 @@ func handleGDCHServiceAccount(f *credsfile.GDCHServiceAccountFile, opts *DetectO return gdch.NewTokenProvider(f, &gdch.Options{ STSAudience: opts.STSAudience, Client: opts.client(), + Logger: opts.logger(), }) } diff --git a/vendor/cloud.google.com/go/auth/credentials/idtoken/cache.go b/vendor/cloud.google.com/go/auth/credentials/idtoken/cache.go deleted file mode 100644 index e6f4ff81160..00000000000 --- a/vendor/cloud.google.com/go/auth/credentials/idtoken/cache.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package idtoken - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "strconv" - "strings" - "sync" - "time" - - "cloud.google.com/go/auth/internal" -) - -type cachingClient struct { - client *http.Client - - // clock optionally specifies a func to return the current time. - // If nil, time.Now is used. - clock func() time.Time - - mu sync.Mutex - certs map[string]*cachedResponse -} - -func newCachingClient(client *http.Client) *cachingClient { - return &cachingClient{ - client: client, - certs: make(map[string]*cachedResponse, 2), - } -} - -type cachedResponse struct { - resp *certResponse - exp time.Time -} - -func (c *cachingClient) getCert(ctx context.Context, url string) (*certResponse, error) { - if response, ok := c.get(url); ok { - return response, nil - } - req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) - if err != nil { - return nil, err - } - resp, body, err := internal.DoRequest(c.client, req) - if err != nil { - return nil, err - } - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("idtoken: unable to retrieve cert, got status code %d", resp.StatusCode) - } - - certResp := &certResponse{} - if err := json.Unmarshal(body, &certResp); err != nil { - return nil, err - - } - c.set(url, certResp, resp.Header) - return certResp, nil -} - -func (c *cachingClient) now() time.Time { - if c.clock != nil { - return c.clock() - } - return time.Now() -} - -func (c *cachingClient) get(url string) (*certResponse, bool) { - c.mu.Lock() - defer c.mu.Unlock() - cachedResp, ok := c.certs[url] - if !ok { - return nil, false - } - if c.now().After(cachedResp.exp) { - return nil, false - } - return cachedResp.resp, true -} - -func (c *cachingClient) set(url string, resp *certResponse, headers http.Header) { - exp := c.calculateExpireTime(headers) - c.mu.Lock() - c.certs[url] = &cachedResponse{resp: resp, exp: exp} - c.mu.Unlock() -} - -// calculateExpireTime will determine the expire time for the cache based on -// HTTP headers. If there is any difficulty reading the headers the fallback is -// to set the cache to expire now. -func (c *cachingClient) calculateExpireTime(headers http.Header) time.Time { - var maxAge int - cc := strings.Split(headers.Get("cache-control"), ",") - for _, v := range cc { - if strings.Contains(v, "max-age") { - ss := strings.Split(v, "=") - if len(ss) < 2 { - return c.now() - } - ma, err := strconv.Atoi(ss[1]) - if err != nil { - return c.now() - } - maxAge = ma - } - } - a := headers.Get("age") - if a == "" { - return c.now().Add(time.Duration(maxAge) * time.Second) - } - age, err := strconv.Atoi(a) - if err != nil { - return c.now() - } - return c.now().Add(time.Duration(maxAge-age) * time.Second) -} diff --git a/vendor/cloud.google.com/go/auth/credentials/idtoken/compute.go b/vendor/cloud.google.com/go/auth/credentials/idtoken/compute.go deleted file mode 100644 index fb9c62c610d..00000000000 --- a/vendor/cloud.google.com/go/auth/credentials/idtoken/compute.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package idtoken - -import ( - "context" - "fmt" - "net/url" - "time" - - "cloud.google.com/go/auth" - "cloud.google.com/go/auth/internal" - "cloud.google.com/go/compute/metadata" -) - -const identitySuffix = "instance/service-accounts/default/identity" - -// computeCredentials checks if this code is being run on GCE. If it is, it -// will use the metadata service to build a Credentials that fetches ID -// tokens. -func computeCredentials(opts *Options) (*auth.Credentials, error) { - if opts.CustomClaims != nil { - return nil, fmt.Errorf("idtoken: Options.CustomClaims can't be used with the metadata service, please provide a service account if you would like to use this feature") - } - tp := computeIDTokenProvider{ - audience: opts.Audience, - format: opts.ComputeTokenFormat, - client: *metadata.NewClient(opts.client()), - } - return auth.NewCredentials(&auth.CredentialsOptions{ - TokenProvider: auth.NewCachedTokenProvider(tp, &auth.CachedTokenProviderOptions{ - ExpireEarly: 5 * time.Minute, - }), - ProjectIDProvider: auth.CredentialsPropertyFunc(func(context.Context) (string, error) { - return metadata.ProjectID() - }), - UniverseDomainProvider: &internal.ComputeUniverseDomainProvider{}, - }), nil -} - -type computeIDTokenProvider struct { - audience string - format ComputeTokenFormat - client metadata.Client -} - -func (c computeIDTokenProvider) Token(ctx context.Context) (*auth.Token, error) { - v := url.Values{} - v.Set("audience", c.audience) - if c.format != ComputeTokenFormatStandard { - v.Set("format", "full") - } - if c.format == ComputeTokenFormatFullWithLicense { - v.Set("licenses", "TRUE") - } - urlSuffix := identitySuffix + "?" + v.Encode() - res, err := c.client.GetWithContext(ctx, urlSuffix) - if err != nil { - return nil, err - } - if res == "" { - return nil, fmt.Errorf("idtoken: invalid empty response from metadata service") - } - return &auth.Token{ - Value: res, - Type: internal.TokenTypeBearer, - // Compute tokens are valid for one hour: - // https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#create-id - Expiry: time.Now().Add(1 * time.Hour), - }, nil -} diff --git a/vendor/cloud.google.com/go/auth/credentials/idtoken/file.go b/vendor/cloud.google.com/go/auth/credentials/idtoken/file.go deleted file mode 100644 index 333521c9194..00000000000 --- a/vendor/cloud.google.com/go/auth/credentials/idtoken/file.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package idtoken - -import ( - "encoding/json" - "fmt" - "path/filepath" - "strings" - - "cloud.google.com/go/auth" - "cloud.google.com/go/auth/credentials" - "cloud.google.com/go/auth/credentials/impersonate" - "cloud.google.com/go/auth/internal" - "cloud.google.com/go/auth/internal/credsfile" -) - -const ( - jwtTokenURL = "https://oauth2.googleapis.com/token" - iamCredAud = "https://iamcredentials.googleapis.com/" -) - -var ( - defaultScopes = []string{ - "https://iamcredentials.googleapis.com/", - "https://www.googleapis.com/auth/cloud-platform", - } -) - -func credsFromBytes(b []byte, opts *Options) (*auth.Credentials, error) { - t, err := credsfile.ParseFileType(b) - if err != nil { - return nil, err - } - switch t { - case credsfile.ServiceAccountKey: - f, err := credsfile.ParseServiceAccount(b) - if err != nil { - return nil, err - } - opts2LO := &auth.Options2LO{ - Email: f.ClientEmail, - PrivateKey: []byte(f.PrivateKey), - PrivateKeyID: f.PrivateKeyID, - TokenURL: f.TokenURL, - UseIDToken: true, - } - if opts2LO.TokenURL == "" { - opts2LO.TokenURL = jwtTokenURL - } - - var customClaims map[string]interface{} - if opts != nil { - customClaims = opts.CustomClaims - } - if customClaims == nil { - customClaims = make(map[string]interface{}) - } - customClaims["target_audience"] = opts.Audience - - opts2LO.PrivateClaims = customClaims - tp, err := auth.New2LOTokenProvider(opts2LO) - if err != nil { - return nil, err - } - tp = auth.NewCachedTokenProvider(tp, nil) - return auth.NewCredentials(&auth.CredentialsOptions{ - TokenProvider: tp, - JSON: b, - ProjectIDProvider: internal.StaticCredentialsProperty(f.ProjectID), - UniverseDomainProvider: internal.StaticCredentialsProperty(f.UniverseDomain), - }), nil - case credsfile.ImpersonatedServiceAccountKey, credsfile.ExternalAccountKey: - type url struct { - ServiceAccountImpersonationURL string `json:"service_account_impersonation_url"` - } - var accountURL url - if err := json.Unmarshal(b, &accountURL); err != nil { - return nil, err - } - account := filepath.Base(accountURL.ServiceAccountImpersonationURL) - account = strings.Split(account, ":")[0] - - baseCreds, err := credentials.DetectDefault(&credentials.DetectOptions{ - Scopes: defaultScopes, - CredentialsJSON: b, - Client: opts.client(), - UseSelfSignedJWT: true, - }) - if err != nil { - return nil, err - } - - config := impersonate.IDTokenOptions{ - Audience: opts.Audience, - TargetPrincipal: account, - IncludeEmail: true, - Client: opts.client(), - Credentials: baseCreds, - } - creds, err := impersonate.NewIDTokenCredentials(&config) - if err != nil { - return nil, err - } - return auth.NewCredentials(&auth.CredentialsOptions{ - TokenProvider: creds, - JSON: b, - ProjectIDProvider: auth.CredentialsPropertyFunc(baseCreds.ProjectID), - UniverseDomainProvider: auth.CredentialsPropertyFunc(baseCreds.UniverseDomain), - QuotaProjectIDProvider: auth.CredentialsPropertyFunc(baseCreds.QuotaProjectID), - }), nil - default: - return nil, fmt.Errorf("idtoken: unsupported credentials type: %v", t) - } -} diff --git a/vendor/cloud.google.com/go/auth/credentials/idtoken/idtoken.go b/vendor/cloud.google.com/go/auth/credentials/idtoken/idtoken.go deleted file mode 100644 index 2c1ad6004ae..00000000000 --- a/vendor/cloud.google.com/go/auth/credentials/idtoken/idtoken.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package idtoken - -import ( - "errors" - "fmt" - "net/http" - "os" - - "cloud.google.com/go/auth" - "cloud.google.com/go/auth/internal" - "cloud.google.com/go/auth/internal/credsfile" - "cloud.google.com/go/compute/metadata" -) - -// ComputeTokenFormat dictates the the token format when requesting an ID token -// from the compute metadata service. -type ComputeTokenFormat int - -const ( - // ComputeTokenFormatDefault means the same as [ComputeTokenFormatFull]. - ComputeTokenFormatDefault ComputeTokenFormat = iota - // ComputeTokenFormatStandard mean only standard JWT fields will be included - // in the token. - ComputeTokenFormatStandard - // ComputeTokenFormatFull means the token will include claims about the - // virtual machine instance and its project. - ComputeTokenFormatFull - // ComputeTokenFormatFullWithLicense means the same as - // [ComputeTokenFormatFull] with the addition of claims about licenses - // associated with the instance. - ComputeTokenFormatFullWithLicense -) - -// Options for the configuration of creation of an ID token with -// [NewCredentials]. -type Options struct { - // Audience is the `aud` field for the token, such as an API endpoint the - // token will grant access to. Required. - Audience string - // ComputeTokenFormat dictates the the token format when requesting an ID - // token from the compute metadata service. Optional. - ComputeTokenFormat ComputeTokenFormat - // CustomClaims specifies private non-standard claims for an ID token. - // Optional. - CustomClaims map[string]interface{} - - // CredentialsFile overrides detection logic and sources a credential file - // from the provided filepath. Optional. - CredentialsFile string - // CredentialsJSON overrides detection logic and uses the JSON bytes as the - // source for the credential. Optional. - CredentialsJSON []byte - // Client configures the underlying client used to make network requests - // when fetching tokens. If provided this should be a fully authenticated - // client. Optional. - Client *http.Client -} - -func (o *Options) client() *http.Client { - if o == nil || o.Client == nil { - return internal.CloneDefaultClient() - } - return o.Client -} - -func (o *Options) validate() error { - if o == nil { - return errors.New("idtoken: opts must be provided") - } - if o.Audience == "" { - return errors.New("idtoken: audience must be specified") - } - return nil -} - -// NewCredentials creates a [cloud.google.com/go/auth.Credentials] that -// returns ID tokens configured by the opts provided. The parameter -// opts.Audience may not be empty. -func NewCredentials(opts *Options) (*auth.Credentials, error) { - if err := opts.validate(); err != nil { - return nil, err - } - if b := opts.jsonBytes(); b != nil { - return credsFromBytes(b, opts) - } - if metadata.OnGCE() { - return computeCredentials(opts) - } - return nil, fmt.Errorf("idtoken: couldn't find any credentials") -} - -func (o *Options) jsonBytes() []byte { - if len(o.CredentialsJSON) > 0 { - return o.CredentialsJSON - } - var fnOverride string - if o != nil { - fnOverride = o.CredentialsFile - } - filename := credsfile.GetFileNameFromEnv(fnOverride) - if filename != "" { - b, _ := os.ReadFile(filename) - return b - } - return nil -} - -// Payload represents a decoded payload of an ID token. -type Payload struct { - Issuer string `json:"iss"` - Audience string `json:"aud"` - Expires int64 `json:"exp"` - IssuedAt int64 `json:"iat"` - Subject string `json:"sub,omitempty"` - Claims map[string]interface{} `json:"-"` -} diff --git a/vendor/cloud.google.com/go/auth/credentials/idtoken/validate.go b/vendor/cloud.google.com/go/auth/credentials/idtoken/validate.go deleted file mode 100644 index d653bf2c189..00000000000 --- a/vendor/cloud.google.com/go/auth/credentials/idtoken/validate.go +++ /dev/null @@ -1,269 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package idtoken - -import ( - "context" - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rsa" - "crypto/sha256" - "encoding/base64" - "encoding/json" - "fmt" - "math/big" - "net/http" - "strings" - "time" - - "cloud.google.com/go/auth/internal" - "cloud.google.com/go/auth/internal/jwt" -) - -const ( - es256KeySize int = 32 - googleIAPCertsURL string = "https://www.gstatic.com/iap/verify/public_key-jwk" - googleSACertsURL string = "https://www.googleapis.com/oauth2/v3/certs" -) - -var ( - defaultValidator = &Validator{client: newCachingClient(internal.CloneDefaultClient())} - // now aliases time.Now for testing. - now = time.Now -) - -// certResponse represents a list jwks. It is the format returned from known -// Google cert endpoints. -type certResponse struct { - Keys []jwk `json:"keys"` -} - -// jwk is a simplified representation of a standard jwk. It only includes the -// fields used by Google's cert endpoints. -type jwk struct { - Alg string `json:"alg"` - Crv string `json:"crv"` - Kid string `json:"kid"` - Kty string `json:"kty"` - Use string `json:"use"` - E string `json:"e"` - N string `json:"n"` - X string `json:"x"` - Y string `json:"y"` -} - -// Validator provides a way to validate Google ID Tokens -type Validator struct { - client *cachingClient -} - -// ValidatorOptions provides a way to configure a [Validator]. -type ValidatorOptions struct { - // Client used to make requests to the certs URL. Optional. - Client *http.Client -} - -// NewValidator creates a Validator that uses the options provided to configure -// a the internal http.Client that will be used to make requests to fetch JWKs. -func NewValidator(opts *ValidatorOptions) (*Validator, error) { - var client *http.Client - if opts != nil && opts.Client != nil { - client = opts.Client - } else { - client = internal.CloneDefaultClient() - } - return &Validator{client: newCachingClient(client)}, nil -} - -// Validate is used to validate the provided idToken with a known Google cert -// URL. If audience is not empty the audience claim of the Token is validated. -// Upon successful validation a parsed token Payload is returned allowing the -// caller to validate any additional claims. -func (v *Validator) Validate(ctx context.Context, idToken string, audience string) (*Payload, error) { - return v.validate(ctx, idToken, audience) -} - -// Validate is used to validate the provided idToken with a known Google cert -// URL. If audience is not empty the audience claim of the Token is validated. -// Upon successful validation a parsed token Payload is returned allowing the -// caller to validate any additional claims. -func Validate(ctx context.Context, idToken string, audience string) (*Payload, error) { - return defaultValidator.validate(ctx, idToken, audience) -} - -// ParsePayload parses the given token and returns its payload. -// -// Warning: This function does not validate the token prior to parsing it. -// -// ParsePayload is primarily meant to be used to inspect a token's payload. This is -// useful when validation fails and the payload needs to be inspected. -// -// Note: A successful Validate() invocation with the same token will return an -// identical payload. -func ParsePayload(idToken string) (*Payload, error) { - _, payload, _, err := parseToken(idToken) - if err != nil { - return nil, err - } - return payload, nil -} - -func (v *Validator) validate(ctx context.Context, idToken string, audience string) (*Payload, error) { - header, payload, sig, err := parseToken(idToken) - if err != nil { - return nil, err - } - - if audience != "" && payload.Audience != audience { - return nil, fmt.Errorf("idtoken: audience provided does not match aud claim in the JWT") - } - - if now().Unix() > payload.Expires { - return nil, fmt.Errorf("idtoken: token expired: now=%v, expires=%v", now().Unix(), payload.Expires) - } - hashedContent := hashHeaderPayload(idToken) - switch header.Algorithm { - case jwt.HeaderAlgRSA256: - if err := v.validateRS256(ctx, header.KeyID, hashedContent, sig); err != nil { - return nil, err - } - case "ES256": - if err := v.validateES256(ctx, header.KeyID, hashedContent, sig); err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("idtoken: expected JWT signed with RS256 or ES256 but found %q", header.Algorithm) - } - - return payload, nil -} - -func (v *Validator) validateRS256(ctx context.Context, keyID string, hashedContent []byte, sig []byte) error { - certResp, err := v.client.getCert(ctx, googleSACertsURL) - if err != nil { - return err - } - j, err := findMatchingKey(certResp, keyID) - if err != nil { - return err - } - dn, err := decode(j.N) - if err != nil { - return err - } - de, err := decode(j.E) - if err != nil { - return err - } - - pk := &rsa.PublicKey{ - N: new(big.Int).SetBytes(dn), - E: int(new(big.Int).SetBytes(de).Int64()), - } - return rsa.VerifyPKCS1v15(pk, crypto.SHA256, hashedContent, sig) -} - -func (v *Validator) validateES256(ctx context.Context, keyID string, hashedContent []byte, sig []byte) error { - certResp, err := v.client.getCert(ctx, googleIAPCertsURL) - if err != nil { - return err - } - j, err := findMatchingKey(certResp, keyID) - if err != nil { - return err - } - dx, err := decode(j.X) - if err != nil { - return err - } - dy, err := decode(j.Y) - if err != nil { - return err - } - - pk := &ecdsa.PublicKey{ - Curve: elliptic.P256(), - X: new(big.Int).SetBytes(dx), - Y: new(big.Int).SetBytes(dy), - } - r := big.NewInt(0).SetBytes(sig[:es256KeySize]) - s := big.NewInt(0).SetBytes(sig[es256KeySize:]) - if valid := ecdsa.Verify(pk, hashedContent, r, s); !valid { - return fmt.Errorf("idtoken: ES256 signature not valid") - } - return nil -} - -func findMatchingKey(response *certResponse, keyID string) (*jwk, error) { - if response == nil { - return nil, fmt.Errorf("idtoken: cert response is nil") - } - for _, v := range response.Keys { - if v.Kid == keyID { - return &v, nil - } - } - return nil, fmt.Errorf("idtoken: could not find matching cert keyId for the token provided") -} - -func parseToken(idToken string) (*jwt.Header, *Payload, []byte, error) { - segments := strings.Split(idToken, ".") - if len(segments) != 3 { - return nil, nil, nil, fmt.Errorf("idtoken: invalid token, token must have three segments; found %d", len(segments)) - } - // Header - dh, err := decode(segments[0]) - if err != nil { - return nil, nil, nil, fmt.Errorf("idtoken: unable to decode JWT header: %v", err) - } - var header *jwt.Header - err = json.Unmarshal(dh, &header) - if err != nil { - return nil, nil, nil, fmt.Errorf("idtoken: unable to unmarshal JWT header: %v", err) - } - - // Payload - dp, err := decode(segments[1]) - if err != nil { - return nil, nil, nil, fmt.Errorf("idtoken: unable to decode JWT claims: %v", err) - } - var payload *Payload - if err := json.Unmarshal(dp, &payload); err != nil { - return nil, nil, nil, fmt.Errorf("idtoken: unable to unmarshal JWT payload: %v", err) - } - if err := json.Unmarshal(dp, &payload.Claims); err != nil { - return nil, nil, nil, fmt.Errorf("idtoken: unable to unmarshal JWT payload claims: %v", err) - } - - // Signature - signature, err := decode(segments[2]) - if err != nil { - return nil, nil, nil, fmt.Errorf("idtoken: unable to decode JWT signature: %v", err) - } - return header, payload, signature, nil -} - -// hashHeaderPayload gets the SHA256 checksum for verification of the JWT. -func hashHeaderPayload(idtoken string) []byte { - // remove the sig from the token - content := idtoken[:strings.LastIndex(idtoken, ".")] - hashed := sha256.Sum256([]byte(content)) - return hashed[:] -} - -func decode(s string) ([]byte, error) { - return base64.RawURLEncoding.DecodeString(s) -} diff --git a/vendor/cloud.google.com/go/auth/credentials/impersonate/doc.go b/vendor/cloud.google.com/go/auth/credentials/impersonate/doc.go deleted file mode 100644 index 0dc45f13fb7..00000000000 --- a/vendor/cloud.google.com/go/auth/credentials/impersonate/doc.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package impersonate is used to impersonate Google Credentials. If you need -// to impersonate some credentials to use with a client library see -// [NewCredentials]. If instead you would like to create an Open -// Connect ID token using impersonation see [NewIDTokenCredentials]. -// -// # Required IAM roles -// -// In order to impersonate a service account the base service account must have -// the Service Account Token Creator role, roles/iam.serviceAccountTokenCreator, -// on the service account being impersonated. See -// https://cloud.google.com/iam/docs/understanding-service-accounts. -// -// Optionally, delegates can be used during impersonation if the base service -// account lacks the token creator role on the target. When using delegates, -// each service account must be granted roles/iam.serviceAccountTokenCreator -// on the next service account in the delgation chain. -// -// For example, if a base service account of SA1 is trying to impersonate target -// service account SA2 while using delegate service accounts DSA1 and DSA2, -// the following must be true: -// -// 1. Base service account SA1 has roles/iam.serviceAccountTokenCreator on -// DSA1. -// 2. DSA1 has roles/iam.serviceAccountTokenCreator on DSA2. -// 3. DSA2 has roles/iam.serviceAccountTokenCreator on target SA2. -// -// If the base credential is an authorized user and not a service account, or if -// the option WithQuotaProject is set, the target service account must have a -// role that grants the serviceusage.services.use permission such as -// roles/serviceusage.serviceUsageConsumer. -package impersonate diff --git a/vendor/cloud.google.com/go/auth/credentials/impersonate/idtoken.go b/vendor/cloud.google.com/go/auth/credentials/impersonate/idtoken.go deleted file mode 100644 index d4affc17336..00000000000 --- a/vendor/cloud.google.com/go/auth/credentials/impersonate/idtoken.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package impersonate - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "net/http" - "time" - - "cloud.google.com/go/auth" - "cloud.google.com/go/auth/credentials" - "cloud.google.com/go/auth/httptransport" - "cloud.google.com/go/auth/internal" -) - -// IDTokenOptions for generating an impersonated ID token. -type IDTokenOptions struct { - // Audience is the `aud` field for the token, such as an API endpoint the - // token will grant access to. Required. - Audience string - // TargetPrincipal is the email address of the service account to - // impersonate. Required. - TargetPrincipal string - // IncludeEmail includes the target service account's email in the token. - // The resulting token will include both an `email` and `email_verified` - // claim. Optional. - IncludeEmail bool - // Delegates are the ordered service account email addresses in a delegation - // chain. Each service account must be granted - // roles/iam.serviceAccountTokenCreator on the next service account in the - // chain. Optional. - Delegates []string - - // Credentials used to fetch the ID token. If not provided, and a Client is - // also not provided, base credentials will try to be detected from the - // environment. Optional. - Credentials *auth.Credentials - // Client configures the underlying client used to make network requests - // when fetching tokens. If provided the client should provide it's own - // base credentials at call time. Optional. - Client *http.Client -} - -func (o *IDTokenOptions) validate() error { - if o == nil { - return errors.New("impersonate: options must be provided") - } - if o.Audience == "" { - return errors.New("impersonate: audience must be provided") - } - if o.TargetPrincipal == "" { - return errors.New("impersonate: target service account must be provided") - } - return nil -} - -var ( - defaultScope = "https://www.googleapis.com/auth/cloud-platform" -) - -// NewIDTokenCredentials creates an impersonated -// [cloud.google.com/go/auth/Credentials] that returns ID tokens configured -// with the provided config and using credentials loaded from Application -// Default Credentials as the base credentials if not provided with the opts. -// The tokens produced are valid for one hour and are automatically refreshed. -func NewIDTokenCredentials(opts *IDTokenOptions) (*auth.Credentials, error) { - if err := opts.validate(); err != nil { - return nil, err - } - var client *http.Client - var creds *auth.Credentials - if opts.Client == nil && opts.Credentials == nil { - var err error - // TODO: test not signed jwt more - creds, err = credentials.DetectDefault(&credentials.DetectOptions{ - Scopes: []string{defaultScope}, - UseSelfSignedJWT: true, - }) - if err != nil { - return nil, err - } - client, err = httptransport.NewClient(&httptransport.Options{ - Credentials: creds, - }) - if err != nil { - return nil, err - } - } else if opts.Client == nil { - creds = opts.Credentials - client = internal.CloneDefaultClient() - if err := httptransport.AddAuthorizationMiddleware(client, opts.Credentials); err != nil { - return nil, err - } - } else { - client = opts.Client - } - - itp := impersonatedIDTokenProvider{ - client: client, - targetPrincipal: opts.TargetPrincipal, - audience: opts.Audience, - includeEmail: opts.IncludeEmail, - } - for _, v := range opts.Delegates { - itp.delegates = append(itp.delegates, formatIAMServiceAccountName(v)) - } - - var udp auth.CredentialsPropertyProvider - if creds != nil { - udp = auth.CredentialsPropertyFunc(creds.UniverseDomain) - } - return auth.NewCredentials(&auth.CredentialsOptions{ - TokenProvider: auth.NewCachedTokenProvider(itp, nil), - UniverseDomainProvider: udp, - }), nil -} - -type generateIDTokenRequest struct { - Audience string `json:"audience"` - IncludeEmail bool `json:"includeEmail"` - Delegates []string `json:"delegates,omitempty"` -} - -type generateIDTokenResponse struct { - Token string `json:"token"` -} - -type impersonatedIDTokenProvider struct { - client *http.Client - - targetPrincipal string - audience string - includeEmail bool - delegates []string -} - -func (i impersonatedIDTokenProvider) Token(ctx context.Context) (*auth.Token, error) { - genIDTokenReq := generateIDTokenRequest{ - Audience: i.audience, - IncludeEmail: i.includeEmail, - Delegates: i.delegates, - } - bodyBytes, err := json.Marshal(genIDTokenReq) - if err != nil { - return nil, fmt.Errorf("impersonate: unable to marshal request: %w", err) - } - - url := fmt.Sprintf("%s/v1/%s:generateIdToken", iamCredentialsEndpoint, formatIAMServiceAccountName(i.targetPrincipal)) - req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(bodyBytes)) - if err != nil { - return nil, fmt.Errorf("impersonate: unable to create request: %w", err) - } - req.Header.Set("Content-Type", "application/json") - resp, body, err := internal.DoRequest(i.client, req) - if err != nil { - return nil, fmt.Errorf("impersonate: unable to generate ID token: %w", err) - } - if c := resp.StatusCode; c < 200 || c > 299 { - return nil, fmt.Errorf("impersonate: status code %d: %s", c, body) - } - - var generateIDTokenResp generateIDTokenResponse - if err := json.Unmarshal(body, &generateIDTokenResp); err != nil { - return nil, fmt.Errorf("impersonate: unable to parse response: %w", err) - } - return &auth.Token{ - Value: generateIDTokenResp.Token, - // Generated ID tokens are good for one hour. - Expiry: time.Now().Add(1 * time.Hour), - }, nil -} diff --git a/vendor/cloud.google.com/go/auth/credentials/impersonate/impersonate.go b/vendor/cloud.google.com/go/auth/credentials/impersonate/impersonate.go deleted file mode 100644 index 0be955acde8..00000000000 --- a/vendor/cloud.google.com/go/auth/credentials/impersonate/impersonate.go +++ /dev/null @@ -1,266 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package impersonate - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "net/http" - "time" - - "cloud.google.com/go/auth" - "cloud.google.com/go/auth/credentials" - "cloud.google.com/go/auth/httptransport" - "cloud.google.com/go/auth/internal" -) - -var ( - iamCredentialsEndpoint = "https://iamcredentials.googleapis.com" - oauth2Endpoint = "https://oauth2.googleapis.com" - errMissingTargetPrincipal = errors.New("impersonate: target service account must be provided") - errMissingScopes = errors.New("impersonate: scopes must be provided") - errLifetimeOverMax = errors.New("impersonate: max lifetime is 12 hours") - errUniverseNotSupportedDomainWideDelegation = errors.New("impersonate: service account user is configured for the credential. " + - "Domain-wide delegation is not supported in universes other than googleapis.com") -) - -// TODO(codyoss): plumb through base for this and idtoken - -// NewCredentials returns an impersonated -// [cloud.google.com/go/auth/NewCredentials] configured with the provided options -// and using credentials loaded from Application Default Credentials as the base -// credentials if not provided with the opts. -func NewCredentials(opts *CredentialsOptions) (*auth.Credentials, error) { - if err := opts.validate(); err != nil { - return nil, err - } - - var isStaticToken bool - // Default to the longest acceptable value of one hour as the token will - // be refreshed automatically if not set. - lifetime := 1 * time.Hour - if opts.Lifetime != 0 { - lifetime = opts.Lifetime - // Don't auto-refresh token if a lifetime is configured. - isStaticToken = true - } - - var client *http.Client - var creds *auth.Credentials - if opts.Client == nil && opts.Credentials == nil { - var err error - creds, err = credentials.DetectDefault(&credentials.DetectOptions{ - Scopes: []string{defaultScope}, - UseSelfSignedJWT: true, - }) - if err != nil { - return nil, err - } - client, err = httptransport.NewClient(&httptransport.Options{ - Credentials: creds, - }) - if err != nil { - return nil, err - } - } else if opts.Credentials != nil { - creds = opts.Credentials - client = internal.CloneDefaultClient() - if err := httptransport.AddAuthorizationMiddleware(client, opts.Credentials); err != nil { - return nil, err - } - } else { - client = opts.Client - } - - // If a subject is specified a domain-wide delegation auth-flow is initiated - // to impersonate as the provided subject (user). - if opts.Subject != "" { - if !opts.isUniverseDomainGDU() { - return nil, errUniverseNotSupportedDomainWideDelegation - } - tp, err := user(opts, client, lifetime, isStaticToken) - if err != nil { - return nil, err - } - var udp auth.CredentialsPropertyProvider - if creds != nil { - udp = auth.CredentialsPropertyFunc(creds.UniverseDomain) - } - return auth.NewCredentials(&auth.CredentialsOptions{ - TokenProvider: tp, - UniverseDomainProvider: udp, - }), nil - } - - its := impersonatedTokenProvider{ - client: client, - targetPrincipal: opts.TargetPrincipal, - lifetime: fmt.Sprintf("%.fs", lifetime.Seconds()), - } - for _, v := range opts.Delegates { - its.delegates = append(its.delegates, formatIAMServiceAccountName(v)) - } - its.scopes = make([]string, len(opts.Scopes)) - copy(its.scopes, opts.Scopes) - - var tpo *auth.CachedTokenProviderOptions - if isStaticToken { - tpo = &auth.CachedTokenProviderOptions{ - DisableAutoRefresh: true, - } - } - - var udp auth.CredentialsPropertyProvider - if creds != nil { - udp = auth.CredentialsPropertyFunc(creds.UniverseDomain) - } - return auth.NewCredentials(&auth.CredentialsOptions{ - TokenProvider: auth.NewCachedTokenProvider(its, tpo), - UniverseDomainProvider: udp, - }), nil -} - -// CredentialsOptions for generating an impersonated credential token. -type CredentialsOptions struct { - // TargetPrincipal is the email address of the service account to - // impersonate. Required. - TargetPrincipal string - // Scopes that the impersonated credential should have. Required. - Scopes []string - // Delegates are the service account email addresses in a delegation chain. - // Each service account must be granted roles/iam.serviceAccountTokenCreator - // on the next service account in the chain. Optional. - Delegates []string - // Lifetime is the amount of time until the impersonated token expires. If - // unset the token's lifetime will be one hour and be automatically - // refreshed. If set the token may have a max lifetime of one hour and will - // not be refreshed. Service accounts that have been added to an org policy - // with constraints/iam.allowServiceAccountCredentialLifetimeExtension may - // request a token lifetime of up to 12 hours. Optional. - Lifetime time.Duration - // Subject is the sub field of a JWT. This field should only be set if you - // wish to impersonate as a user. This feature is useful when using domain - // wide delegation. Optional. - Subject string - - // Credentials is the provider of the credentials used to fetch the ID - // token. If not provided, and a Client is also not provided, credentials - // will try to be detected from the environment. Optional. - Credentials *auth.Credentials - // Client configures the underlying client used to make network requests - // when fetching tokens. If provided the client should provide it's own - // credentials at call time. Optional. - Client *http.Client - // UniverseDomain is the default service domain for a given Cloud universe. - // The default value is "googleapis.com". Optional. - UniverseDomain string -} - -func (o *CredentialsOptions) validate() error { - if o == nil { - return errors.New("impersonate: options must be provided") - } - if o.TargetPrincipal == "" { - return errMissingTargetPrincipal - } - if len(o.Scopes) == 0 { - return errMissingScopes - } - if o.Lifetime.Hours() > 12 { - return errLifetimeOverMax - } - return nil -} - -// getUniverseDomain is the default service domain for a given Cloud universe. -// The default value is "googleapis.com". -func (o *CredentialsOptions) getUniverseDomain() string { - if o.UniverseDomain == "" { - return internal.DefaultUniverseDomain - } - return o.UniverseDomain -} - -// isUniverseDomainGDU returns true if the universe domain is the default Google -// universe. -func (o *CredentialsOptions) isUniverseDomainGDU() bool { - return o.getUniverseDomain() == internal.DefaultUniverseDomain -} - -func formatIAMServiceAccountName(name string) string { - return fmt.Sprintf("projects/-/serviceAccounts/%s", name) -} - -type generateAccessTokenRequest struct { - Delegates []string `json:"delegates,omitempty"` - Lifetime string `json:"lifetime,omitempty"` - Scope []string `json:"scope,omitempty"` -} - -type generateAccessTokenResponse struct { - AccessToken string `json:"accessToken"` - ExpireTime string `json:"expireTime"` -} - -type impersonatedTokenProvider struct { - client *http.Client - - targetPrincipal string - lifetime string - scopes []string - delegates []string -} - -// Token returns an impersonated Token. -func (i impersonatedTokenProvider) Token(ctx context.Context) (*auth.Token, error) { - reqBody := generateAccessTokenRequest{ - Delegates: i.delegates, - Lifetime: i.lifetime, - Scope: i.scopes, - } - b, err := json.Marshal(reqBody) - if err != nil { - return nil, fmt.Errorf("impersonate: unable to marshal request: %w", err) - } - url := fmt.Sprintf("%s/v1/%s:generateAccessToken", iamCredentialsEndpoint, formatIAMServiceAccountName(i.targetPrincipal)) - req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(b)) - if err != nil { - return nil, fmt.Errorf("impersonate: unable to create request: %w", err) - } - req.Header.Set("Content-Type", "application/json") - resp, body, err := internal.DoRequest(i.client, req) - if err != nil { - return nil, fmt.Errorf("impersonate: unable to generate access token: %w", err) - } - if c := resp.StatusCode; c < 200 || c > 299 { - return nil, fmt.Errorf("impersonate: status code %d: %s", c, body) - } - - var accessTokenResp generateAccessTokenResponse - if err := json.Unmarshal(body, &accessTokenResp); err != nil { - return nil, fmt.Errorf("impersonate: unable to parse response: %w", err) - } - expiry, err := time.Parse(time.RFC3339, accessTokenResp.ExpireTime) - if err != nil { - return nil, fmt.Errorf("impersonate: unable to parse expiry: %w", err) - } - return &auth.Token{ - Value: accessTokenResp.AccessToken, - Expiry: expiry, - }, nil -} diff --git a/vendor/cloud.google.com/go/auth/credentials/impersonate/user.go b/vendor/cloud.google.com/go/auth/credentials/impersonate/user.go deleted file mode 100644 index 1acaaa922d9..00000000000 --- a/vendor/cloud.google.com/go/auth/credentials/impersonate/user.go +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package impersonate - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" - "time" - - "cloud.google.com/go/auth" - "cloud.google.com/go/auth/internal" -) - -// user provides an auth flow for domain-wide delegation, setting -// CredentialsConfig.Subject to be the impersonated user. -func user(opts *CredentialsOptions, client *http.Client, lifetime time.Duration, isStaticToken bool) (auth.TokenProvider, error) { - u := userTokenProvider{ - client: client, - targetPrincipal: opts.TargetPrincipal, - subject: opts.Subject, - lifetime: lifetime, - } - u.delegates = make([]string, len(opts.Delegates)) - for i, v := range opts.Delegates { - u.delegates[i] = formatIAMServiceAccountName(v) - } - u.scopes = make([]string, len(opts.Scopes)) - copy(u.scopes, opts.Scopes) - var tpo *auth.CachedTokenProviderOptions - if isStaticToken { - tpo = &auth.CachedTokenProviderOptions{ - DisableAutoRefresh: true, - } - } - return auth.NewCachedTokenProvider(u, tpo), nil -} - -type claimSet struct { - Iss string `json:"iss"` - Scope string `json:"scope,omitempty"` - Sub string `json:"sub,omitempty"` - Aud string `json:"aud"` - Iat int64 `json:"iat"` - Exp int64 `json:"exp"` -} - -type signJWTRequest struct { - Payload string `json:"payload"` - Delegates []string `json:"delegates,omitempty"` -} - -type signJWTResponse struct { - // KeyID is the key used to sign the JWT. - KeyID string `json:"keyId"` - // SignedJwt contains the automatically generated header; the - // client-supplied payload; and the signature, which is generated using - // the key referenced by the `kid` field in the header. - SignedJWT string `json:"signedJwt"` -} - -type exchangeTokenResponse struct { - AccessToken string `json:"access_token"` - TokenType string `json:"token_type"` - ExpiresIn int64 `json:"expires_in"` -} - -type userTokenProvider struct { - client *http.Client - - targetPrincipal string - subject string - scopes []string - lifetime time.Duration - delegates []string -} - -func (u userTokenProvider) Token(ctx context.Context) (*auth.Token, error) { - signedJWT, err := u.signJWT(ctx) - if err != nil { - return nil, err - } - return u.exchangeToken(ctx, signedJWT) -} - -func (u userTokenProvider) signJWT(ctx context.Context) (string, error) { - now := time.Now() - exp := now.Add(u.lifetime) - claims := claimSet{ - Iss: u.targetPrincipal, - Scope: strings.Join(u.scopes, " "), - Sub: u.subject, - Aud: fmt.Sprintf("%s/token", oauth2Endpoint), - Iat: now.Unix(), - Exp: exp.Unix(), - } - payloadBytes, err := json.Marshal(claims) - if err != nil { - return "", fmt.Errorf("impersonate: unable to marshal claims: %w", err) - } - signJWTReq := signJWTRequest{ - Payload: string(payloadBytes), - Delegates: u.delegates, - } - - bodyBytes, err := json.Marshal(signJWTReq) - if err != nil { - return "", fmt.Errorf("impersonate: unable to marshal request: %w", err) - } - reqURL := fmt.Sprintf("%s/v1/%s:signJwt", iamCredentialsEndpoint, formatIAMServiceAccountName(u.targetPrincipal)) - req, err := http.NewRequestWithContext(ctx, "POST", reqURL, bytes.NewReader(bodyBytes)) - if err != nil { - return "", fmt.Errorf("impersonate: unable to create request: %w", err) - } - req.Header.Set("Content-Type", "application/json") - resp, body, err := internal.DoRequest(u.client, req) - if err != nil { - return "", fmt.Errorf("impersonate: unable to sign JWT: %w", err) - } - if c := resp.StatusCode; c < 200 || c > 299 { - return "", fmt.Errorf("impersonate: status code %d: %s", c, body) - } - - var signJWTResp signJWTResponse - if err := json.Unmarshal(body, &signJWTResp); err != nil { - return "", fmt.Errorf("impersonate: unable to parse response: %w", err) - } - return signJWTResp.SignedJWT, nil -} - -func (u userTokenProvider) exchangeToken(ctx context.Context, signedJWT string) (*auth.Token, error) { - v := url.Values{} - v.Set("grant_type", "assertion") - v.Set("assertion_type", "http://oauth.net/grant_type/jwt/1.0/bearer") - v.Set("assertion", signedJWT) - req, err := http.NewRequestWithContext(ctx, "POST", fmt.Sprintf("%s/token", oauth2Endpoint), strings.NewReader(v.Encode())) - if err != nil { - return nil, err - } - resp, body, err := internal.DoRequest(u.client, req) - if err != nil { - return nil, fmt.Errorf("impersonate: unable to exchange token: %w", err) - } - if c := resp.StatusCode; c < 200 || c > 299 { - return nil, fmt.Errorf("impersonate: status code %d: %s", c, body) - } - - var tokenResp exchangeTokenResponse - if err := json.Unmarshal(body, &tokenResp); err != nil { - return nil, fmt.Errorf("impersonate: unable to parse response: %w", err) - } - - return &auth.Token{ - Value: tokenResp.AccessToken, - Type: tokenResp.TokenType, - Expiry: time.Now().Add(time.Second * time.Duration(tokenResp.ExpiresIn)), - }, nil -} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go index a34f6b06f84..9ecd1f64bd5 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go @@ -23,6 +23,7 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "net/http" "net/url" "os" @@ -32,6 +33,7 @@ import ( "time" "cloud.google.com/go/auth/internal" + "github.com/googleapis/gax-go/v2/internallog" ) var ( @@ -87,6 +89,7 @@ type awsSubjectProvider struct { reqOpts *RequestOptions Client *http.Client + logger *slog.Logger } func (sp *awsSubjectProvider) subjectToken(ctx context.Context) (string, error) { @@ -94,32 +97,30 @@ func (sp *awsSubjectProvider) subjectToken(ctx context.Context) (string, error) if sp.RegionalCredVerificationURL == "" { sp.RegionalCredVerificationURL = defaultRegionalCredentialVerificationURL } - if sp.requestSigner == nil { - headers := make(map[string]string) - if sp.shouldUseMetadataServer() { - awsSessionToken, err := sp.getAWSSessionToken(ctx) - if err != nil { - return "", err - } - - if awsSessionToken != "" { - headers[awsIMDSv2SessionTokenHeader] = awsSessionToken - } - } - - awsSecurityCredentials, err := sp.getSecurityCredentials(ctx, headers) + headers := make(map[string]string) + if sp.shouldUseMetadataServer() { + awsSessionToken, err := sp.getAWSSessionToken(ctx) if err != nil { return "", err } - if sp.region, err = sp.getRegion(ctx, headers); err != nil { - return "", err - } - sp.requestSigner = &awsRequestSigner{ - RegionName: sp.region, - AwsSecurityCredentials: awsSecurityCredentials, + + if awsSessionToken != "" { + headers[awsIMDSv2SessionTokenHeader] = awsSessionToken } } + awsSecurityCredentials, err := sp.getSecurityCredentials(ctx, headers) + if err != nil { + return "", err + } + if sp.region, err = sp.getRegion(ctx, headers); err != nil { + return "", err + } + sp.requestSigner = &awsRequestSigner{ + RegionName: sp.region, + AwsSecurityCredentials: awsSecurityCredentials, + } + // Generate the signed request to AWS STS GetCallerIdentity API. // Use the required regional endpoint. Otherwise, the request will fail. req, err := http.NewRequestWithContext(ctx, "POST", strings.Replace(sp.RegionalCredVerificationURL, "{region}", sp.region, 1), nil) @@ -194,10 +195,12 @@ func (sp *awsSubjectProvider) getAWSSessionToken(ctx context.Context) (string, e } req.Header.Set(awsIMDSv2SessionTTLHeader, awsIMDSv2SessionTTL) + sp.logger.DebugContext(ctx, "aws session token request", "request", internallog.HTTPRequest(req, nil)) resp, body, err := internal.DoRequest(sp.Client, req) if err != nil { return "", err } + sp.logger.DebugContext(ctx, "aws session token response", "response", internallog.HTTPResponse(resp, body)) if resp.StatusCode != http.StatusOK { return "", fmt.Errorf("credentials: unable to retrieve AWS session token: %s", body) } @@ -227,10 +230,12 @@ func (sp *awsSubjectProvider) getRegion(ctx context.Context, headers map[string] for name, value := range headers { req.Header.Add(name, value) } + sp.logger.DebugContext(ctx, "aws region request", "request", internallog.HTTPRequest(req, nil)) resp, body, err := internal.DoRequest(sp.Client, req) if err != nil { return "", err } + sp.logger.DebugContext(ctx, "aws region response", "response", internallog.HTTPResponse(resp, body)) if resp.StatusCode != http.StatusOK { return "", fmt.Errorf("credentials: unable to retrieve AWS region - %s", body) } @@ -285,10 +290,12 @@ func (sp *awsSubjectProvider) getMetadataSecurityCredentials(ctx context.Context for name, value := range headers { req.Header.Add(name, value) } + sp.logger.DebugContext(ctx, "aws security credential request", "request", internallog.HTTPRequest(req, nil)) resp, body, err := internal.DoRequest(sp.Client, req) if err != nil { return result, err } + sp.logger.DebugContext(ctx, "aws security credential response", "response", internallog.HTTPResponse(resp, body)) if resp.StatusCode != http.StatusOK { return result, fmt.Errorf("credentials: unable to retrieve AWS security credentials - %s", body) } @@ -310,10 +317,12 @@ func (sp *awsSubjectProvider) getMetadataRoleName(ctx context.Context, headers m req.Header.Add(name, value) } + sp.logger.DebugContext(ctx, "aws metadata role request", "request", internallog.HTTPRequest(req, nil)) resp, body, err := internal.DoRequest(sp.Client, req) if err != nil { return "", err } + sp.logger.DebugContext(ctx, "aws metadata role response", "response", internallog.HTTPResponse(resp, body)) if resp.StatusCode != http.StatusOK { return "", fmt.Errorf("credentials: unable to retrieve AWS role name - %s", body) } diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go index b19c6edeae5..a8220642348 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go @@ -18,6 +18,7 @@ import ( "context" "errors" "fmt" + "log/slog" "net/http" "regexp" "strconv" @@ -28,6 +29,7 @@ import ( "cloud.google.com/go/auth/credentials/internal/impersonate" "cloud.google.com/go/auth/credentials/internal/stsexchange" "cloud.google.com/go/auth/internal/credsfile" + "github.com/googleapis/gax-go/v2/internallog" ) const ( @@ -100,6 +102,15 @@ type Options struct { AwsSecurityCredentialsProvider AwsSecurityCredentialsProvider // Client for token request. Client *http.Client + // IsDefaultClient marks whether the client passed in is a default client that can be overriden. + // This is important for X509 credentials which should create a new client if the default was used + // but should respect a client explicitly passed in by the user. + IsDefaultClient bool + // Logger is used for debug logging. If provided, logging will be enabled + // at the loggers configured level. By default logging is disabled unless + // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default + // logger will be used. Optional. + Logger *slog.Logger } // SubjectTokenProvider can be used to supply a subject token to exchange for a @@ -181,6 +192,26 @@ func (o *Options) validate() error { return nil } +// client returns the http client that should be used for the token exchange. If a non-default client +// is provided, then the client configured in the options will always be returned. If a default client +// is provided and the options are configured for X509 credentials, a new client will be created. +func (o *Options) client() (*http.Client, error) { + // If a client was provided and no override certificate config location was provided, use the provided client. + if o.CredentialSource == nil || o.CredentialSource.Certificate == nil || (!o.IsDefaultClient && o.CredentialSource.Certificate.CertificateConfigLocation == "") { + return o.Client, nil + } + + // If a new client should be created, validate and use the certificate source to create a new mTLS client. + cert := o.CredentialSource.Certificate + if !cert.UseDefaultCertificateConfig && cert.CertificateConfigLocation == "" { + return nil, errors.New("credentials: \"certificate\" object must either specify a certificate_config_location or use_default_certificate_config should be true") + } + if cert.UseDefaultCertificateConfig && cert.CertificateConfigLocation != "" { + return nil, errors.New("credentials: \"certificate\" object cannot specify both a certificate_config_location and use_default_certificate_config=true") + } + return createX509Client(cert.CertificateConfigLocation) +} + // resolveTokenURL sets the default STS token endpoint with the configured // universe domain. func (o *Options) resolveTokenURL() { @@ -200,15 +231,24 @@ func NewTokenProvider(opts *Options) (auth.TokenProvider, error) { return nil, err } opts.resolveTokenURL() + logger := internallog.New(opts.Logger) stp, err := newSubjectTokenProvider(opts) if err != nil { return nil, err } + + client, err := opts.client() + if err != nil { + return nil, err + } + tp := &tokenProvider{ - client: opts.Client, + client: client, opts: opts, stp: stp, + logger: logger, } + if opts.ServiceAccountImpersonationURL == "" { return auth.NewCachedTokenProvider(tp, nil), nil } @@ -218,11 +258,12 @@ func NewTokenProvider(opts *Options) (auth.TokenProvider, error) { // needed for impersonation tp.opts.Scopes = []string{"https://www.googleapis.com/auth/cloud-platform"} imp, err := impersonate.NewTokenProvider(&impersonate.Options{ - Client: opts.Client, + Client: client, URL: opts.ServiceAccountImpersonationURL, Scopes: scopes, Tp: auth.NewCachedTokenProvider(tp, nil), TokenLifetimeSeconds: opts.ServiceAccountImpersonationLifetimeSeconds, + Logger: logger, }) if err != nil { return nil, err @@ -238,6 +279,7 @@ type subjectTokenProvider interface { // tokenProvider is the provider that handles external credentials. It is used to retrieve Tokens. type tokenProvider struct { client *http.Client + logger *slog.Logger opts *Options stp subjectTokenProvider } @@ -279,6 +321,7 @@ func (tp *tokenProvider) Token(ctx context.Context) (*auth.Token, error) { Authentication: clientAuth, Headers: header, ExtraOpts: options, + Logger: tp.logger, }) if err != nil { return nil, err @@ -299,12 +342,14 @@ func (tp *tokenProvider) Token(ctx context.Context) (*auth.Token, error) { // newSubjectTokenProvider determines the type of credsfile.CredentialSource needed to create a // subjectTokenProvider func newSubjectTokenProvider(o *Options) (subjectTokenProvider, error) { + logger := internallog.New(o.Logger) reqOpts := &RequestOptions{Audience: o.Audience, SubjectTokenType: o.SubjectTokenType} if o.AwsSecurityCredentialsProvider != nil { return &awsSubjectProvider{ securityCredentialsProvider: o.AwsSecurityCredentialsProvider, TargetResource: o.Audience, reqOpts: reqOpts, + logger: logger, }, nil } else if o.SubjectTokenProvider != nil { return &programmaticProvider{stp: o.SubjectTokenProvider, opts: reqOpts}, nil @@ -321,6 +366,7 @@ func newSubjectTokenProvider(o *Options) (subjectTokenProvider, error) { CredVerificationURL: o.CredentialSource.URL, TargetResource: o.Audience, Client: o.Client, + logger: logger, } if o.CredentialSource.IMDSv2SessionTokenURL != "" { awsProvider.IMDSv2SessionTokenURL = o.CredentialSource.IMDSv2SessionTokenURL @@ -331,7 +377,13 @@ func newSubjectTokenProvider(o *Options) (subjectTokenProvider, error) { } else if o.CredentialSource.File != "" { return &fileSubjectProvider{File: o.CredentialSource.File, Format: o.CredentialSource.Format}, nil } else if o.CredentialSource.URL != "" { - return &urlSubjectProvider{URL: o.CredentialSource.URL, Headers: o.CredentialSource.Headers, Format: o.CredentialSource.Format, Client: o.Client}, nil + return &urlSubjectProvider{ + URL: o.CredentialSource.URL, + Headers: o.CredentialSource.Headers, + Format: o.CredentialSource.Format, + Client: o.Client, + Logger: logger, + }, nil } else if o.CredentialSource.Executable != nil { ec := o.CredentialSource.Executable if ec.Command == "" { @@ -353,6 +405,15 @@ func newSubjectTokenProvider(o *Options) (subjectTokenProvider, error) { execProvider.opts = o execProvider.env = runtimeEnvironment{} return execProvider, nil + } else if o.CredentialSource.Certificate != nil { + cert := o.CredentialSource.Certificate + if !cert.UseDefaultCertificateConfig && cert.CertificateConfigLocation == "" { + return nil, errors.New("credentials: \"certificate\" object must either specify a certificate_config_location or use_default_certificate_config should be true") + } + if cert.UseDefaultCertificateConfig && cert.CertificateConfigLocation != "" { + return nil, errors.New("credentials: \"certificate\" object cannot specify both a certificate_config_location and use_default_certificate_config=true") + } + return &x509Provider{}, nil } return nil, errors.New("credentials: unable to parse credential source") } diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go index e33d35a2687..754ecf4fef9 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go @@ -19,10 +19,12 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "net/http" "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/credsfile" + "github.com/googleapis/gax-go/v2/internallog" ) const ( @@ -30,6 +32,7 @@ const ( fileTypeJSON = "json" urlProviderType = "url" programmaticProviderType = "programmatic" + x509ProviderType = "x509" ) type urlSubjectProvider struct { @@ -37,6 +40,7 @@ type urlSubjectProvider struct { Headers map[string]string Format *credsfile.Format Client *http.Client + Logger *slog.Logger } func (sp *urlSubjectProvider) subjectToken(ctx context.Context) (string, error) { @@ -48,10 +52,12 @@ func (sp *urlSubjectProvider) subjectToken(ctx context.Context) (string, error) for key, val := range sp.Headers { req.Header.Add(key, val) } + sp.Logger.DebugContext(ctx, "url subject token request", "request", internallog.HTTPRequest(req, nil)) resp, body, err := internal.DoRequest(sp.Client, req) if err != nil { return "", fmt.Errorf("credentials: invalid response when retrieving subject token: %w", err) } + sp.Logger.DebugContext(ctx, "url subject token response", "response", internallog.HTTPResponse(resp, body)) if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices { return "", fmt.Errorf("credentials: status code %d: %s", c, body) } diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/x509_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/x509_provider.go new file mode 100644 index 00000000000..115df5881f1 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/x509_provider.go @@ -0,0 +1,63 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package externalaccount + +import ( + "context" + "crypto/tls" + "net/http" + "time" + + "cloud.google.com/go/auth/internal/transport/cert" +) + +// x509Provider implements the subjectTokenProvider type for +// x509 workload identity credentials. Because x509 credentials +// rely on an mTLS connection to represent the 3rd party identity +// rather than a subject token, this provider will always return +// an empty string when a subject token is requested by the external account +// token provider. +type x509Provider struct { +} + +func (xp *x509Provider) providerType() string { + return x509ProviderType +} + +func (xp *x509Provider) subjectToken(ctx context.Context) (string, error) { + return "", nil +} + +// createX509Client creates a new client that is configured with mTLS, using the +// certificate configuration specified in the credential source. +func createX509Client(certificateConfigLocation string) (*http.Client, error) { + certProvider, err := cert.NewWorkloadX509CertProvider(certificateConfigLocation) + if err != nil { + return nil, err + } + trans := http.DefaultTransport.(*http.Transport).Clone() + + trans.TLSClientConfig = &tls.Config{ + GetClientCertificate: certProvider, + } + + // Create a client with default settings plus the X509 workload cert and key. + client := &http.Client{ + Transport: trans, + Timeout: 30 * time.Second, + } + + return client, nil +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go index 0d788547987..ae39206e5f3 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go @@ -17,12 +17,14 @@ package externalaccountuser import ( "context" "errors" + "log/slog" "net/http" "time" "cloud.google.com/go/auth" "cloud.google.com/go/auth/credentials/internal/stsexchange" "cloud.google.com/go/auth/internal" + "github.com/googleapis/gax-go/v2/internallog" ) // Options stores the configuration for fetching tokens with external authorized @@ -51,6 +53,8 @@ type Options struct { // Client for token request. Client *http.Client + // Logger for logging. + Logger *slog.Logger } func (c *Options) validate() bool { @@ -90,6 +94,7 @@ func (tp *tokenProvider) Token(ctx context.Context) (*auth.Token, error) { RefreshToken: opts.RefreshToken, Authentication: clientAuth, Headers: headers, + Logger: internallog.New(tp.o.Logger), }) if err != nil { return nil, err diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go b/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go index 720045d3b07..c2d320fdf4c 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go @@ -16,12 +16,13 @@ package gdch import ( "context" - "crypto/rsa" + "crypto" "crypto/tls" "crypto/x509" "encoding/json" "errors" "fmt" + "log/slog" "net/http" "net/url" "os" @@ -32,6 +33,7 @@ import ( "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/credsfile" "cloud.google.com/go/auth/internal/jwt" + "github.com/googleapis/gax-go/v2/internallog" ) const ( @@ -51,6 +53,7 @@ var ( type Options struct { STSAudience string Client *http.Client + Logger *slog.Logger } // NewTokenProvider returns a [cloud.google.com/go/auth.TokenProvider] from a @@ -62,7 +65,7 @@ func NewTokenProvider(f *credsfile.GDCHServiceAccountFile, o *Options) (auth.Tok if o.STSAudience == "" { return nil, errors.New("credentials: STSAudience must be set for the GDCH auth flows") } - pk, err := internal.ParseKey([]byte(f.PrivateKey)) + signer, err := internal.ParseKey([]byte(f.PrivateKey)) if err != nil { return nil, err } @@ -75,10 +78,11 @@ func NewTokenProvider(f *credsfile.GDCHServiceAccountFile, o *Options) (auth.Tok serviceIdentity: fmt.Sprintf("system:serviceaccount:%s:%s", f.Project, f.Name), tokenURL: f.TokenURL, aud: o.STSAudience, - pk: pk, + signer: signer, pkID: f.PrivateKeyID, certPool: certPool, client: o.Client, + logger: internallog.New(o.Logger), } return tp, nil } @@ -97,11 +101,12 @@ type gdchProvider struct { serviceIdentity string tokenURL string aud string - pk *rsa.PrivateKey + signer crypto.Signer pkID string certPool *x509.CertPool client *http.Client + logger *slog.Logger } func (g gdchProvider) Token(ctx context.Context) (*auth.Token, error) { @@ -120,7 +125,7 @@ func (g gdchProvider) Token(ctx context.Context) (*auth.Token, error) { Type: jwt.HeaderType, KeyID: string(g.pkID), } - payload, err := jwt.EncodeJWS(&h, &claims, g.pk) + payload, err := jwt.EncodeJWS(&h, &claims, g.signer) if err != nil { return nil, err } @@ -136,10 +141,12 @@ func (g gdchProvider) Token(ctx context.Context) (*auth.Token, error) { return nil, err } req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + g.logger.DebugContext(ctx, "gdch token request", "request", internallog.HTTPRequest(req, []byte(v.Encode()))) resp, body, err := internal.DoRequest(g.client, req) if err != nil { return nil, fmt.Errorf("credentials: cannot fetch token: %w", err) } + g.logger.DebugContext(ctx, "gdch token response", "response", internallog.HTTPResponse(resp, body)) if c := resp.StatusCode; c < http.StatusOK || c > http.StatusMultipleChoices { return nil, &auth.Error{ Response: resp, diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/idtoken.go b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/idtoken.go new file mode 100644 index 00000000000..705462c1615 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/idtoken.go @@ -0,0 +1,105 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package impersonate + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "log/slog" + "net/http" + "strings" + "time" + + "cloud.google.com/go/auth" + "cloud.google.com/go/auth/internal" + "github.com/googleapis/gax-go/v2/internallog" +) + +var ( + universeDomainPlaceholder = "UNIVERSE_DOMAIN" + iamCredentialsUniverseDomainEndpoint = "https://iamcredentials.UNIVERSE_DOMAIN" +) + +// IDTokenIAMOptions provides configuration for [IDTokenIAMOptions.Token]. +type IDTokenIAMOptions struct { + // Client is required. + Client *http.Client + // Logger is required. + Logger *slog.Logger + UniverseDomain auth.CredentialsPropertyProvider + ServiceAccountEmail string + GenerateIDTokenRequest +} + +// GenerateIDTokenRequest holds the request to the IAM generateIdToken RPC. +type GenerateIDTokenRequest struct { + Audience string `json:"audience"` + IncludeEmail bool `json:"includeEmail"` + // Delegates are the ordered, fully-qualified resource name for service + // accounts in a delegation chain. Each service account must be granted + // roles/iam.serviceAccountTokenCreator on the next service account in the + // chain. The delegates must have the following format: + // projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}. The - wildcard + // character is required; replacing it with a project ID is invalid. + // Optional. + Delegates []string `json:"delegates,omitempty"` +} + +// GenerateIDTokenResponse holds the response from the IAM generateIdToken RPC. +type GenerateIDTokenResponse struct { + Token string `json:"token"` +} + +// Token call IAM generateIdToken with the configuration provided in [IDTokenIAMOptions]. +func (o IDTokenIAMOptions) Token(ctx context.Context) (*auth.Token, error) { + universeDomain, err := o.UniverseDomain.GetProperty(ctx) + if err != nil { + return nil, err + } + endpoint := strings.Replace(iamCredentialsUniverseDomainEndpoint, universeDomainPlaceholder, universeDomain, 1) + url := fmt.Sprintf("%s/v1/%s:generateIdToken", endpoint, internal.FormatIAMServiceAccountResource(o.ServiceAccountEmail)) + + bodyBytes, err := json.Marshal(o.GenerateIDTokenRequest) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to marshal request: %w", err) + } + + req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(bodyBytes)) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to create request: %w", err) + } + req.Header.Set("Content-Type", "application/json") + o.Logger.DebugContext(ctx, "impersonated idtoken request", "request", internallog.HTTPRequest(req, bodyBytes)) + resp, body, err := internal.DoRequest(o.Client, req) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to generate ID token: %w", err) + } + o.Logger.DebugContext(ctx, "impersonated idtoken response", "response", internallog.HTTPResponse(resp, body)) + if c := resp.StatusCode; c < 200 || c > 299 { + return nil, fmt.Errorf("impersonate: status code %d: %s", c, body) + } + + var tokenResp GenerateIDTokenResponse + if err := json.Unmarshal(body, &tokenResp); err != nil { + return nil, fmt.Errorf("impersonate: unable to parse response: %w", err) + } + return &auth.Token{ + Value: tokenResp.Token, + // Generated ID tokens are good for one hour. + Expiry: time.Now().Add(1 * time.Hour), + }, nil +} diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go index ed53afa519e..b3a99261fa9 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go @@ -20,11 +20,13 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "net/http" "time" "cloud.google.com/go/auth" "cloud.google.com/go/auth/internal" + "github.com/googleapis/gax-go/v2/internallog" ) const ( @@ -74,6 +76,11 @@ type Options struct { // Client configures the underlying client used to make network requests // when fetching tokens. Required. Client *http.Client + // Logger is used for debug logging. If provided, logging will be enabled + // at the loggers configured level. By default logging is disabled unless + // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default + // logger will be used. Optional. + Logger *slog.Logger } func (o *Options) validate() error { @@ -88,6 +95,7 @@ func (o *Options) validate() error { // Token performs the exchange to get a temporary service account token to allow access to GCP. func (o *Options) Token(ctx context.Context) (*auth.Token, error) { + logger := internallog.New(o.Logger) lifetime := defaultTokenLifetime if o.TokenLifetimeSeconds != 0 { lifetime = fmt.Sprintf("%ds", o.TokenLifetimeSeconds) @@ -109,10 +117,12 @@ func (o *Options) Token(ctx context.Context) (*auth.Token, error) { if err := setAuthHeader(ctx, o.Tp, req); err != nil { return nil, err } + logger.DebugContext(ctx, "impersonated token request", "request", internallog.HTTPRequest(req, b)) resp, body, err := internal.DoRequest(o.Client, req) if err != nil { return nil, fmt.Errorf("credentials: unable to generate access token: %w", err) } + logger.DebugContext(ctx, "impersonated token response", "response", internallog.HTTPResponse(resp, body)) if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices { return nil, fmt.Errorf("credentials: status code %d: %s", c, body) } diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go b/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go index 768a9dafc13..e1d2b15034d 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go @@ -19,6 +19,7 @@ import ( "encoding/base64" "encoding/json" "fmt" + "log/slog" "net/http" "net/url" "strconv" @@ -26,6 +27,7 @@ import ( "cloud.google.com/go/auth" "cloud.google.com/go/auth/internal" + "github.com/googleapis/gax-go/v2/internallog" ) const ( @@ -40,6 +42,7 @@ const ( // Options stores the configuration for making an sts exchange request. type Options struct { Client *http.Client + Logger *slog.Logger Endpoint string Request *TokenRequest Authentication ClientAuthentication @@ -80,6 +83,7 @@ func ExchangeToken(ctx context.Context, opts *Options) (*TokenResponse, error) { func doRequest(ctx context.Context, opts *Options, data url.Values) (*TokenResponse, error) { opts.Authentication.InjectAuthentication(data, opts.Headers) encodedData := data.Encode() + logger := internallog.New(opts.Logger) req, err := http.NewRequestWithContext(ctx, "POST", opts.Endpoint, strings.NewReader(encodedData)) if err != nil { @@ -93,10 +97,12 @@ func doRequest(ctx context.Context, opts *Options, data url.Values) (*TokenRespo } req.Header.Set("Content-Length", strconv.Itoa(len(encodedData))) + logger.DebugContext(ctx, "sts token request", "request", internallog.HTTPRequest(req, []byte(encodedData))) resp, body, err := internal.DoRequest(opts.Client, req) if err != nil { return nil, fmt.Errorf("credentials: invalid response from Secure Token Server: %w", err) } + logger.DebugContext(ctx, "sts token response", "response", internallog.HTTPResponse(resp, body)) if c := resp.StatusCode; c < http.StatusOK || c > http.StatusMultipleChoices { return nil, fmt.Errorf("credentials: status code %d: %s", c, body) } diff --git a/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go b/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go index b62a8ae4d5d..8d335ccecc9 100644 --- a/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go +++ b/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go @@ -16,8 +16,10 @@ package credentials import ( "context" - "crypto/rsa" + "crypto" + "errors" "fmt" + "log/slog" "strings" "time" @@ -35,7 +37,10 @@ var ( // configureSelfSignedJWT uses the private key in the service account to create // a JWT without making a network call. func configureSelfSignedJWT(f *credsfile.ServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) { - pk, err := internal.ParseKey([]byte(f.PrivateKey)) + if len(opts.scopes()) == 0 && opts.Audience == "" { + return nil, errors.New("credentials: both scopes and audience are empty") + } + signer, err := internal.ParseKey([]byte(f.PrivateKey)) if err != nil { return nil, fmt.Errorf("credentials: could not parse key: %w", err) } @@ -43,8 +48,9 @@ func configureSelfSignedJWT(f *credsfile.ServiceAccountFile, opts *DetectOptions email: f.ClientEmail, audience: opts.Audience, scopes: opts.scopes(), - pk: pk, + signer: signer, pkID: f.PrivateKeyID, + logger: opts.logger(), }, nil } @@ -52,8 +58,9 @@ type selfSignedTokenProvider struct { email string audience string scopes []string - pk *rsa.PrivateKey + signer crypto.Signer pkID string + logger *slog.Logger } func (tp *selfSignedTokenProvider) Token(context.Context) (*auth.Token, error) { @@ -73,9 +80,10 @@ func (tp *selfSignedTokenProvider) Token(context.Context) (*auth.Token, error) { Type: jwt.HeaderType, KeyID: string(tp.pkID), } - msg, err := jwt.EncodeJWS(h, c, tp.pk) + tok, err := jwt.EncodeJWS(h, c, tp.signer) if err != nil { return nil, fmt.Errorf("credentials: could not encode JWT: %w", err) } - return &auth.Token{Value: msg, Type: internal.TokenTypeBearer, Expiry: exp}, nil + tp.logger.Debug("created self-signed JWT", "token", tok) + return &auth.Token{Value: tok, Type: internal.TokenTypeBearer, Expiry: exp}, nil } diff --git a/vendor/cloud.google.com/go/auth/grpctransport/directpath.go b/vendor/cloud.google.com/go/auth/grpctransport/directpath.go index 8dbfa7ef7e9..d781c3e49a9 100644 --- a/vendor/cloud.google.com/go/auth/grpctransport/directpath.go +++ b/vendor/cloud.google.com/go/auth/grpctransport/directpath.go @@ -22,7 +22,7 @@ import ( "strings" "cloud.google.com/go/auth" - "cloud.google.com/go/compute/metadata" + "cloud.google.com/go/auth/internal/compute" "google.golang.org/grpc" grpcgoogle "google.golang.org/grpc/credentials/google" ) @@ -55,7 +55,7 @@ func checkDirectPathEndPoint(endpoint string) bool { return true } -func isTokenProviderDirectPathCompatible(tp auth.TokenProvider, _ *Options) bool { +func isTokenProviderDirectPathCompatible(tp auth.TokenProvider, o *Options) bool { if tp == nil { return false } @@ -66,10 +66,13 @@ func isTokenProviderDirectPathCompatible(tp auth.TokenProvider, _ *Options) bool if tok == nil { return false } - if source, _ := tok.Metadata["auth.google.tokenSource"].(string); source != "compute-metadata" { + if tok.MetadataString("auth.google.tokenSource") != "compute-metadata" { return false } - if acct, _ := tok.Metadata["auth.google.serviceAccount"].(string); acct != "default" { + if o.InternalOptions != nil && o.InternalOptions.EnableNonDefaultSAForDirectPath { + return true + } + if tok.MetadataString("auth.google.serviceAccount") != "default" { return false } return true @@ -91,7 +94,7 @@ func isDirectPathXdsUsed(o *Options) bool { // configuration allows the use of direct path. If it does not the provided // grpcOpts and endpoint are returned. func configureDirectPath(grpcOpts []grpc.DialOption, opts *Options, endpoint string, creds *auth.Credentials) ([]grpc.DialOption, string) { - if isDirectPathEnabled(endpoint, opts) && metadata.OnGCE() && isTokenProviderDirectPathCompatible(creds, opts) { + if isDirectPathEnabled(endpoint, opts) && compute.OnComputeEngine() && isTokenProviderDirectPathCompatible(creds, opts) { // Overwrite all of the previously specific DialOptions, DirectPath uses its own set of credentials and certificates. grpcOpts = []grpc.DialOption{ grpc.WithCredentialsBundle(grpcgoogle.NewDefaultCredentialsWithOptions(grpcgoogle.DefaultCredentialsOptions{PerRPCCreds: &grpcCredentialsProvider{creds: creds}}))} diff --git a/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go index 5c3bc66f998..95f259037f2 100644 --- a/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go +++ b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Package grpctransport provides functionality for managing gRPC client +// connections to Google Cloud services. package grpctransport import ( @@ -19,16 +21,21 @@ import ( "crypto/tls" "errors" "fmt" + "log/slog" "net/http" + "os" + "sync" "cloud.google.com/go/auth" "cloud.google.com/go/auth/credentials" "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/transport" - "go.opencensus.io/plugin/ocgrpc" + "github.com/googleapis/gax-go/v2/internallog" + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" "google.golang.org/grpc" grpccreds "google.golang.org/grpc/credentials" grpcinsecure "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/stats" ) const ( @@ -38,7 +45,7 @@ const ( // Check env to decide if using google-c2p resolver for DirectPath traffic. enableDirectPathXdsEnvVar = "GOOGLE_CLOUD_ENABLE_DIRECT_PATH_XDS" - quotaProjectHeaderKey = "X-Goog-User-Project" + quotaProjectHeaderKey = "X-goog-user-project" ) var ( @@ -46,6 +53,27 @@ var ( timeoutDialerOption grpc.DialOption ) +// otelStatsHandler is a singleton otelgrpc.clientHandler to be used across +// all dial connections to avoid the memory leak documented in +// https://github.com/open-telemetry/opentelemetry-go-contrib/issues/4226 +// +// TODO: When this module depends on a version of otelgrpc containing the fix, +// replace this singleton with inline usage for simplicity. +// The fix should be in https://github.com/open-telemetry/opentelemetry-go/pull/5797. +var ( + initOtelStatsHandlerOnce sync.Once + otelStatsHandler stats.Handler +) + +// otelGRPCStatsHandler returns singleton otelStatsHandler for reuse across all +// dial connections. +func otelGRPCStatsHandler() stats.Handler { + initOtelStatsHandlerOnce.Do(func() { + otelStatsHandler = otelgrpc.NewClientHandler() + }) + return otelStatsHandler +} + // ClientCertProvider is a function that returns a TLS client certificate to be // used when opening TLS connections. It follows the same semantics as // [crypto/tls.Config.GetClientCertificate]. @@ -90,6 +118,11 @@ type Options struct { // APIKey specifies an API key to be used as the basis for authentication. // If set DetectOpts are ignored. APIKey string + // Logger is used for debug logging. If provided, logging will be enabled + // at the loggers configured level. By default logging is disabled unless + // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default + // logger will be used. Optional. + Logger *slog.Logger // InternalOptions are NOT meant to be set directly by consumers of this // package, they should only be set by generated client code. @@ -105,6 +138,10 @@ func (o *Options) client() *http.Client { return nil } +func (o *Options) logger() *slog.Logger { + return internallog.New(o.Logger) +} + func (o *Options) validate() error { if o == nil { return errors.New("grpctransport: opts required to be non-nil") @@ -146,6 +183,9 @@ func (o *Options) resolveDetectOptions() *credentials.DetectOptions { do.Client = transport.DefaultHTTPClientWithTLS(tlsConfig) do.TokenURL = credentials.GoogleMTLSTokenURL } + if do.Logger == nil { + do.Logger = o.logger() + } return do } @@ -214,6 +254,7 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er ClientCertProvider: opts.ClientCertProvider, Client: opts.client(), UniverseDomain: opts.UniverseDomain, + Logger: opts.logger(), } if io := opts.InternalOptions; io != nil { tOpts.DefaultEndpointTemplate = io.DefaultEndpointTemplate @@ -271,7 +312,10 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er if metadata == nil { metadata = make(map[string]string, 1) } - metadata[quotaProjectHeaderKey] = qp + // Don't overwrite user specified quota + if _, ok := metadata[quotaProjectHeaderKey]; !ok { + metadata[quotaProjectHeaderKey] = qp + } } grpcOpts = append(grpcOpts, grpc.WithPerRPCCredentials(&grpcCredentialsProvider{ @@ -288,10 +332,10 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er // Add tracing, but before the other options, so that clients can override the // gRPC stats handler. // This assumes that gRPC options are processed in order, left to right. - grpcOpts = addOCStatsHandler(grpcOpts, opts) + grpcOpts = addOpenTelemetryStatsHandler(grpcOpts, opts) grpcOpts = append(grpcOpts, opts.GRPCDialOpts...) - return grpc.DialContext(ctx, endpoint, grpcOpts...) + return grpc.Dial(endpoint, grpcOpts...) } // grpcKeyProvider satisfies https://pkg.go.dev/google.golang.org/grpc/credentials#PerRPCCredentials. @@ -325,29 +369,39 @@ type grpcCredentialsProvider struct { clientUniverseDomain string } -// getClientUniverseDomain returns the default service domain for a given Cloud universe. -// The default value is "googleapis.com". This is the universe domain -// configured for the client, which will be compared to the universe domain -// that is separately configured for the credentials. +// getClientUniverseDomain returns the default service domain for a given Cloud +// universe, with the following precedence: +// +// 1. A non-empty option.WithUniverseDomain or similar client option. +// 2. A non-empty environment variable GOOGLE_CLOUD_UNIVERSE_DOMAIN. +// 3. The default value "googleapis.com". +// +// This is the universe domain configured for the client, which will be compared +// to the universe domain that is separately configured for the credentials. func (c *grpcCredentialsProvider) getClientUniverseDomain() string { - if c.clientUniverseDomain == "" { - return internal.DefaultUniverseDomain + if c.clientUniverseDomain != "" { + return c.clientUniverseDomain } - return c.clientUniverseDomain + if envUD := os.Getenv(internal.UniverseDomainEnvVar); envUD != "" { + return envUD + } + return internal.DefaultUniverseDomain } func (c *grpcCredentialsProvider) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { - credentialsUniverseDomain, err := c.creds.UniverseDomain(ctx) - if err != nil { - return nil, err - } - if err := transport.ValidateUniverseDomain(c.getClientUniverseDomain(), credentialsUniverseDomain); err != nil { - return nil, err - } token, err := c.creds.Token(ctx) if err != nil { return nil, err } + if token.MetadataString("auth.google.tokenSource") != "compute-metadata" { + credentialsUniverseDomain, err := c.creds.UniverseDomain(ctx) + if err != nil { + return nil, err + } + if err := transport.ValidateUniverseDomain(c.getClientUniverseDomain(), credentialsUniverseDomain); err != nil { + return nil, err + } + } if c.secure { ri, _ := grpccreds.RequestInfoFromContext(ctx) if err = grpccreds.CheckSecurityLevel(ri.AuthInfo, grpccreds.PrivacyAndIntegrity); err != nil { @@ -376,9 +430,9 @@ func (c *grpcCredentialsProvider) RequireTransportSecurity() bool { return c.secure } -func addOCStatsHandler(dialOpts []grpc.DialOption, opts *Options) []grpc.DialOption { +func addOpenTelemetryStatsHandler(dialOpts []grpc.DialOption, opts *Options) []grpc.DialOption { if opts.DisableTelemetry { return dialOpts } - return append(dialOpts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{})) + return append(dialOpts, grpc.WithStatsHandler(otelGRPCStatsHandler())) } diff --git a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go index 969c8d4d200..5758e85b5db 100644 --- a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go +++ b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go @@ -12,18 +12,22 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Package httptransport provides functionality for managing HTTP client +// connections to Google Cloud services. package httptransport import ( "crypto/tls" "errors" "fmt" + "log/slog" "net/http" "cloud.google.com/go/auth" detect "cloud.google.com/go/auth/credentials" "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/transport" + "github.com/googleapis/gax-go/v2/internallog" ) // ClientCertProvider is a function that returns a TLS client certificate to be @@ -67,6 +71,11 @@ type Options struct { // configured for the client, which will be compared to the universe domain // that is separately configured for the credentials. UniverseDomain string + // Logger is used for debug logging. If provided, logging will be enabled + // at the loggers configured level. By default logging is disabled unless + // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default + // logger will be used. Optional. + Logger *slog.Logger // InternalOptions are NOT meant to be set directly by consumers of this // package, they should only be set by generated client code. @@ -99,6 +108,10 @@ func (o *Options) client() *http.Client { return nil } +func (o *Options) logger() *slog.Logger { + return internallog.New(o.Logger) +} + func (o *Options) resolveDetectOptions() *detect.DetectOptions { io := o.InternalOptions // soft-clone these so we are not updating a ref the user holds and may reuse @@ -123,6 +136,9 @@ func (o *Options) resolveDetectOptions() *detect.DetectOptions { do.Client = transport.DefaultHTTPClientWithTLS(tlsConfig) do.TokenURL = detect.GoogleMTLSTokenURL } + if do.Logger == nil { + do.Logger = o.logger() + } return do } @@ -145,14 +161,21 @@ type InternalOptions struct { // service. DefaultScopes []string // SkipValidation bypasses validation on Options. It should only be used - // internally for clients that needs more control over their transport. + // internally for clients that need more control over their transport. SkipValidation bool + // SkipUniverseDomainValidation skips the verification that the universe + // domain configured for the client matches the universe domain configured + // for the credentials. It should only be used internally for clients that + // need more control over their transport. The default is false. + SkipUniverseDomainValidation bool } // AddAuthorizationMiddleware adds a middleware to the provided client's // transport that sets the Authorization header with the value produced by the // provided [cloud.google.com/go/auth.Credentials]. An error is returned only // if client or creds is nil. +// +// This function does not support setting a universe domain value on the client. func AddAuthorizationMiddleware(client *http.Client, creds *auth.Credentials) error { if client == nil || creds == nil { return fmt.Errorf("httptransport: client and tp must not be nil") @@ -171,7 +194,6 @@ func AddAuthorizationMiddleware(client *http.Client, creds *auth.Credentials) er client.Transport = &authTransport{ creds: creds, base: base, - // TODO(quartzmo): Somehow set clientUniverseDomain from impersonate calls. } return nil } @@ -189,6 +211,7 @@ func NewClient(opts *Options) (*http.Client, error) { ClientCertProvider: opts.ClientCertProvider, Client: opts.client(), UniverseDomain: opts.UniverseDomain, + Logger: opts.logger(), } if io := opts.InternalOptions; io != nil { tOpts.DefaultEndpointTemplate = io.DefaultEndpointTemplate diff --git a/vendor/cloud.google.com/go/auth/httptransport/trace.go b/vendor/cloud.google.com/go/auth/httptransport/trace.go deleted file mode 100644 index 467c477c04d..00000000000 --- a/vendor/cloud.google.com/go/auth/httptransport/trace.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package httptransport - -import ( - "encoding/binary" - "encoding/hex" - "fmt" - "net/http" - "strconv" - "strings" - - "go.opencensus.io/trace" - "go.opencensus.io/trace/propagation" -) - -const ( - httpHeaderMaxSize = 200 - cloudTraceHeader = `X-Cloud-Trace-Context` -) - -// asserts the httpFormat fulfills this foreign interface -var _ propagation.HTTPFormat = (*httpFormat)(nil) - -// httpFormat implements propagation.httpFormat to propagate -// traces in HTTP headers for Google Cloud Platform and Cloud Trace. -type httpFormat struct{} - -// SpanContextFromRequest extracts a Cloud Trace span context from incoming requests. -func (f *httpFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { - h := req.Header.Get(cloudTraceHeader) - // See https://cloud.google.com/trace/docs/faq for the header HTTPFormat. - // Return if the header is empty or missing, or if the header is unreasonably - // large, to avoid making unnecessary copies of a large string. - if h == "" || len(h) > httpHeaderMaxSize { - return trace.SpanContext{}, false - } - - // Parse the trace id field. - slash := strings.Index(h, `/`) - if slash == -1 { - return trace.SpanContext{}, false - } - tid, h := h[:slash], h[slash+1:] - - buf, err := hex.DecodeString(tid) - if err != nil { - return trace.SpanContext{}, false - } - copy(sc.TraceID[:], buf) - - // Parse the span id field. - spanstr := h - semicolon := strings.Index(h, `;`) - if semicolon != -1 { - spanstr, h = h[:semicolon], h[semicolon+1:] - } - sid, err := strconv.ParseUint(spanstr, 10, 64) - if err != nil { - return trace.SpanContext{}, false - } - binary.BigEndian.PutUint64(sc.SpanID[:], sid) - - // Parse the options field, options field is optional. - if !strings.HasPrefix(h, "o=") { - return sc, true - } - o, err := strconv.ParseUint(h[2:], 10, 32) - if err != nil { - return trace.SpanContext{}, false - } - sc.TraceOptions = trace.TraceOptions(o) - return sc, true -} - -// SpanContextToRequest modifies the given request to include a Cloud Trace header. -func (f *httpFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { - sid := binary.BigEndian.Uint64(sc.SpanID[:]) - header := fmt.Sprintf("%s/%d;o=%d", hex.EncodeToString(sc.TraceID[:]), sid, int64(sc.TraceOptions)) - req.Header.Set(cloudTraceHeader, header) -} diff --git a/vendor/cloud.google.com/go/auth/httptransport/transport.go b/vendor/cloud.google.com/go/auth/httptransport/transport.go index 94caeb00f0a..ee215b6dc6c 100644 --- a/vendor/cloud.google.com/go/auth/httptransport/transport.go +++ b/vendor/cloud.google.com/go/auth/httptransport/transport.go @@ -19,6 +19,7 @@ import ( "crypto/tls" "net" "net/http" + "os" "time" "cloud.google.com/go/auth" @@ -26,12 +27,12 @@ import ( "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/transport" "cloud.google.com/go/auth/internal/transport/cert" - "go.opencensus.io/plugin/ochttp" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" "golang.org/x/net/http2" ) const ( - quotaProjectHeaderKey = "X-Goog-User-Project" + quotaProjectHeaderKey = "X-goog-user-project" ) func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, error) { @@ -41,7 +42,7 @@ func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, err headers: headers, } var trans http.RoundTripper = ht - trans = addOCTransport(trans, opts) + trans = addOpenTelemetryTransport(trans, opts) switch { case opts.DisableAuthentication: // Do nothing. @@ -76,13 +77,21 @@ func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, err if headers == nil { headers = make(map[string][]string, 1) } - headers.Set(quotaProjectHeaderKey, qp) + // Don't overwrite user specified quota + if v := headers.Get(quotaProjectHeaderKey); v == "" { + headers.Set(quotaProjectHeaderKey, qp) + } + } + var skipUD bool + if iOpts := opts.InternalOptions; iOpts != nil { + skipUD = iOpts.SkipUniverseDomainValidation } creds.TokenProvider = auth.NewCachedTokenProvider(creds.TokenProvider, nil) trans = &authTransport{ - base: trans, - creds: creds, - clientUniverseDomain: opts.UniverseDomain, + base: trans, + creds: creds, + clientUniverseDomain: opts.UniverseDomain, + skipUniverseDomainValidation: skipUD, } } return trans, nil @@ -94,7 +103,11 @@ func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, err // http.DefaultTransport. // If TLSCertificate is available, set TLSClientConfig as well. func defaultBaseTransport(clientCertSource cert.Provider, dialTLSContext func(context.Context, string, string) (net.Conn, error)) http.RoundTripper { - trans := http.DefaultTransport.(*http.Transport).Clone() + defaultTransport, ok := http.DefaultTransport.(*http.Transport) + if !ok { + defaultTransport = transport.BaseTransport() + } + trans := defaultTransport.Clone() trans.MaxIdleConnsPerHost = 100 if clientCertSource != nil { @@ -155,29 +168,37 @@ func (t *headerTransport) RoundTrip(req *http.Request) (*http.Response, error) { return rt.RoundTrip(&newReq) } -func addOCTransport(trans http.RoundTripper, opts *Options) http.RoundTripper { +func addOpenTelemetryTransport(trans http.RoundTripper, opts *Options) http.RoundTripper { if opts.DisableTelemetry { return trans } - return &ochttp.Transport{ - Base: trans, - Propagation: &httpFormat{}, - } + return otelhttp.NewTransport(trans) } type authTransport struct { - creds *auth.Credentials - base http.RoundTripper - clientUniverseDomain string + creds *auth.Credentials + base http.RoundTripper + clientUniverseDomain string + skipUniverseDomainValidation bool } -// getClientUniverseDomain returns the universe domain configured for the client. -// The default value is "googleapis.com". +// getClientUniverseDomain returns the default service domain for a given Cloud +// universe, with the following precedence: +// +// 1. A non-empty option.WithUniverseDomain or similar client option. +// 2. A non-empty environment variable GOOGLE_CLOUD_UNIVERSE_DOMAIN. +// 3. The default value "googleapis.com". +// +// This is the universe domain configured for the client, which will be compared +// to the universe domain that is separately configured for the credentials. func (t *authTransport) getClientUniverseDomain() string { - if t.clientUniverseDomain == "" { - return internal.DefaultUniverseDomain + if t.clientUniverseDomain != "" { + return t.clientUniverseDomain + } + if envUD := os.Getenv(internal.UniverseDomainEnvVar); envUD != "" { + return envUD } - return t.clientUniverseDomain + return internal.DefaultUniverseDomain } // RoundTrip authorizes and authenticates the request with an @@ -193,17 +214,19 @@ func (t *authTransport) RoundTrip(req *http.Request) (*http.Response, error) { } }() } - credentialsUniverseDomain, err := t.creds.UniverseDomain(req.Context()) - if err != nil { - return nil, err - } - if err := transport.ValidateUniverseDomain(t.getClientUniverseDomain(), credentialsUniverseDomain); err != nil { - return nil, err - } token, err := t.creds.Token(req.Context()) if err != nil { return nil, err } + if !t.skipUniverseDomainValidation && token.MetadataString("auth.google.tokenSource") != "compute-metadata" { + credentialsUniverseDomain, err := t.creds.UniverseDomain(req.Context()) + if err != nil { + return nil, err + } + if err := transport.ValidateUniverseDomain(t.getClientUniverseDomain(), credentialsUniverseDomain); err != nil { + return nil, err + } + } req2 := req.Clone(req.Context()) SetAuthHeader(token, req2) reqBodyClosed = true diff --git a/vendor/cloud.google.com/go/auth/internal/compute/compute.go b/vendor/cloud.google.com/go/auth/internal/compute/compute.go new file mode 100644 index 00000000000..05c7e8bdd49 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/compute/compute.go @@ -0,0 +1,65 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compute + +import ( + "log" + "runtime" + "strings" + "sync" +) + +var ( + vmOnGCEOnce sync.Once + vmOnGCE bool +) + +// OnComputeEngine returns whether the client is running on GCE. +// +// This is a copy of the gRPC internal googlecloud.OnGCE() func at: +// https://github.com/grpc/grpc-go/blob/master/internal/googlecloud/googlecloud.go +// The functionality is similar to the metadata.OnGCE() func at: +// https://github.com/googleapis/google-cloud-go/blob/main/compute/metadata/metadata.go +// The difference is that OnComputeEngine() does not perform HTTP or DNS check on the metadata server. +// In particular, OnComputeEngine() will return false on Serverless. +func OnComputeEngine() bool { + vmOnGCEOnce.Do(func() { + mf, err := manufacturer() + if err != nil { + log.Printf("Failed to read manufacturer, vmOnGCE=false: %v", err) + return + } + vmOnGCE = isRunningOnGCE(mf, runtime.GOOS) + }) + return vmOnGCE +} + +// isRunningOnGCE checks whether the local system, without doing a network request, is +// running on GCP. +func isRunningOnGCE(manufacturer []byte, goos string) bool { + name := string(manufacturer) + switch goos { + case "linux": + name = strings.TrimSpace(name) + return name == "Google" || name == "Google Compute Engine" + case "windows": + name = strings.Replace(name, " ", "", -1) + name = strings.Replace(name, "\n", "", -1) + name = strings.Replace(name, "\r", "", -1) + return name == "Google" + default: + return false + } +} diff --git a/vendor/cloud.google.com/go/auth/internal/compute/manufacturer.go b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer.go new file mode 100644 index 00000000000..af490bf4f49 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer.go @@ -0,0 +1,22 @@ +//go:build !(linux || windows) +// +build !linux,!windows + +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compute + +func manufacturer() ([]byte, error) { + return nil, nil +} diff --git a/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_linux.go b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_linux.go new file mode 100644 index 00000000000..d92178df86c --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_linux.go @@ -0,0 +1,23 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compute + +import "os" + +const linuxProductNameFile = "/sys/class/dmi/id/product_name" + +func manufacturer() ([]byte, error) { + return os.ReadFile(linuxProductNameFile) +} diff --git a/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_windows.go b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_windows.go new file mode 100644 index 00000000000..16be9df3064 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_windows.go @@ -0,0 +1,46 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compute + +import ( + "errors" + "os/exec" + "regexp" + "strings" +) + +const ( + windowsCheckCommand = "powershell.exe" + windowsCheckCommandArgs = "Get-WmiObject -Class Win32_BIOS" + powershellOutputFilter = "Manufacturer" + windowsManufacturerRegex = ":(.*)" +) + +func manufacturer() ([]byte, error) { + cmd := exec.Command(windowsCheckCommand, windowsCheckCommandArgs) + out, err := cmd.Output() + if err != nil { + return nil, err + } + for _, line := range strings.Split(strings.TrimSuffix(string(out), "\n"), "\n") { + if strings.HasPrefix(line, powershellOutputFilter) { + re := regexp.MustCompile(windowsManufacturerRegex) + name := re.FindString(line) + name = strings.TrimLeft(name, ":") + return []byte(name), nil + } + } + return nil, errors.New("cannot determine the machine's manufacturer") +} diff --git a/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go b/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go index 69e30779f98..3be6e5bbb41 100644 --- a/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go +++ b/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go @@ -90,19 +90,20 @@ type ExternalAccountAuthorizedUserFile struct { // CredentialSource stores the information necessary to retrieve the credentials for the STS exchange. // -// One field amongst File, URL, and Executable should be filled, depending on the kind of credential in question. +// One field amongst File, URL, Certificate, and Executable should be filled, depending on the kind of credential in question. // The EnvironmentID should start with AWS if being used for an AWS credential. type CredentialSource struct { - File string `json:"file"` - URL string `json:"url"` - Headers map[string]string `json:"headers"` - Executable *ExecutableConfig `json:"executable,omitempty"` - EnvironmentID string `json:"environment_id"` - RegionURL string `json:"region_url"` - RegionalCredVerificationURL string `json:"regional_cred_verification_url"` - CredVerificationURL string `json:"cred_verification_url"` - IMDSv2SessionTokenURL string `json:"imdsv2_session_token_url"` - Format *Format `json:"format,omitempty"` + File string `json:"file"` + URL string `json:"url"` + Headers map[string]string `json:"headers"` + Executable *ExecutableConfig `json:"executable,omitempty"` + Certificate *CertificateConfig `json:"certificate"` + EnvironmentID string `json:"environment_id"` // TODO: Make type for this + RegionURL string `json:"region_url"` + RegionalCredVerificationURL string `json:"regional_cred_verification_url"` + CredVerificationURL string `json:"cred_verification_url"` + IMDSv2SessionTokenURL string `json:"imdsv2_session_token_url"` + Format *Format `json:"format,omitempty"` } // Format describes the format of a [CredentialSource]. @@ -121,6 +122,13 @@ type ExecutableConfig struct { OutputFile string `json:"output_file"` } +// CertificateConfig represents the options used to set up X509 based workload +// [CredentialSource] +type CertificateConfig struct { + UseDefaultCertificateConfig bool `json:"use_default_certificate_config"` + CertificateConfigLocation string `json:"certificate_config_location"` +} + // ServiceAccountImpersonationInfo has impersonation configuration. type ServiceAccountImpersonationInfo struct { TokenLifetimeSeconds int `json:"token_lifetime_seconds"` diff --git a/vendor/cloud.google.com/go/auth/internal/internal.go b/vendor/cloud.google.com/go/auth/internal/internal.go index 8c328e2fbd9..6a8eab6eb99 100644 --- a/vendor/cloud.google.com/go/auth/internal/internal.go +++ b/vendor/cloud.google.com/go/auth/internal/internal.go @@ -16,7 +16,7 @@ package internal import ( "context" - "crypto/rsa" + "crypto" "crypto/x509" "encoding/json" "encoding/pem" @@ -38,42 +38,61 @@ const ( // QuotaProjectEnvVar is the environment variable for setting the quota // project. QuotaProjectEnvVar = "GOOGLE_CLOUD_QUOTA_PROJECT" - projectEnvVar = "GOOGLE_CLOUD_PROJECT" - maxBodySize = 1 << 20 + // UniverseDomainEnvVar is the environment variable for setting the default + // service domain for a given Cloud universe. + UniverseDomainEnvVar = "GOOGLE_CLOUD_UNIVERSE_DOMAIN" + projectEnvVar = "GOOGLE_CLOUD_PROJECT" + maxBodySize = 1 << 20 // DefaultUniverseDomain is the default value for universe domain. // Universe domain is the default service domain for a given Cloud universe. DefaultUniverseDomain = "googleapis.com" ) -// CloneDefaultClient returns a [http.Client] with some good defaults. -func CloneDefaultClient() *http.Client { +type clonableTransport interface { + Clone() *http.Transport +} + +// DefaultClient returns an [http.Client] with some defaults set. If +// the current [http.DefaultTransport] is a [clonableTransport], as +// is the case for an [*http.Transport], the clone will be used. +// Otherwise the [http.DefaultTransport] is used directly. +func DefaultClient() *http.Client { + if transport, ok := http.DefaultTransport.(clonableTransport); ok { + return &http.Client{ + Transport: transport.Clone(), + Timeout: 30 * time.Second, + } + } + return &http.Client{ - Transport: http.DefaultTransport.(*http.Transport).Clone(), + Transport: http.DefaultTransport, Timeout: 30 * time.Second, } } // ParseKey converts the binary contents of a private key file -// to an *rsa.PrivateKey. It detects whether the private key is in a +// to an crypto.Signer. It detects whether the private key is in a // PEM container or not. If so, it extracts the the private key // from PEM container before conversion. It only supports PEM // containers with no passphrase. -func ParseKey(key []byte) (*rsa.PrivateKey, error) { +func ParseKey(key []byte) (crypto.Signer, error) { block, _ := pem.Decode(key) if block != nil { key = block.Bytes } - parsedKey, err := x509.ParsePKCS8PrivateKey(key) + var parsedKey crypto.PrivateKey + var err error + parsedKey, err = x509.ParsePKCS8PrivateKey(key) if err != nil { parsedKey, err = x509.ParsePKCS1PrivateKey(key) if err != nil { return nil, fmt.Errorf("private key should be a PEM or plain PKCS1 or PKCS8: %w", err) } } - parsed, ok := parsedKey.(*rsa.PrivateKey) + parsed, ok := parsedKey.(crypto.Signer) if !ok { - return nil, errors.New("private key is invalid") + return nil, errors.New("private key is not a signer") } return parsed, nil } @@ -162,6 +181,7 @@ func (p StaticProperty) GetProperty(context.Context) (string, error) { // ComputeUniverseDomainProvider fetches the credentials universe domain from // the google cloud metadata service. type ComputeUniverseDomainProvider struct { + MetadataClient *metadata.Client universeDomainOnce sync.Once universeDomain string universeDomainErr error @@ -171,7 +191,7 @@ type ComputeUniverseDomainProvider struct { // metadata service. func (c *ComputeUniverseDomainProvider) GetProperty(ctx context.Context) (string, error) { c.universeDomainOnce.Do(func() { - c.universeDomain, c.universeDomainErr = getMetadataUniverseDomain(ctx) + c.universeDomain, c.universeDomainErr = getMetadataUniverseDomain(ctx, c.MetadataClient) }) if c.universeDomainErr != nil { return "", c.universeDomainErr @@ -180,13 +200,14 @@ func (c *ComputeUniverseDomainProvider) GetProperty(ctx context.Context) (string } // httpGetMetadataUniverseDomain is a package var for unit test substitution. -var httpGetMetadataUniverseDomain = func(ctx context.Context) (string, error) { - client := metadata.NewClient(&http.Client{Timeout: time.Second}) - return client.GetWithContext(ctx, "universe/universe_domain") +var httpGetMetadataUniverseDomain = func(ctx context.Context, client *metadata.Client) (string, error) { + ctx, cancel := context.WithTimeout(ctx, 1*time.Second) + defer cancel() + return client.GetWithContext(ctx, "universe/universe-domain") } -func getMetadataUniverseDomain(ctx context.Context) (string, error) { - universeDomain, err := httpGetMetadataUniverseDomain(ctx) +func getMetadataUniverseDomain(ctx context.Context, client *metadata.Client) (string, error) { + universeDomain, err := httpGetMetadataUniverseDomain(ctx, client) if err == nil { return universeDomain, nil } @@ -196,3 +217,9 @@ func getMetadataUniverseDomain(ctx context.Context) (string, error) { } return "", err } + +// FormatIAMServiceAccountResource sets a service account name in an IAM resource +// name. +func FormatIAMServiceAccountResource(name string) string { + return fmt.Sprintf("projects/-/serviceAccounts/%s", name) +} diff --git a/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go b/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go index dc28b3c3bb5..9bd55f510cc 100644 --- a/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go +++ b/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go @@ -111,7 +111,7 @@ func (c *Claims) encode() (string, error) { } // EncodeJWS encodes the data using the provided key as a JSON web signature. -func EncodeJWS(header *Header, c *Claims, key *rsa.PrivateKey) (string, error) { +func EncodeJWS(header *Header, c *Claims, signer crypto.Signer) (string, error) { head, err := header.encode() if err != nil { return "", err @@ -123,7 +123,7 @@ func EncodeJWS(header *Header, c *Claims, key *rsa.PrivateKey) (string, error) { ss := fmt.Sprintf("%s.%s", head, claims) h := sha256.New() h.Write([]byte(ss)) - sig, err := rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, h.Sum(nil)) + sig, err := signer.Sign(rand.Reader, h.Sum(nil), crypto.SHA256) if err != nil { return "", err } diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cba.go b/vendor/cloud.google.com/go/auth/internal/transport/cba.go index d94e0af08a3..2f922f7dfef 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/cba.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/cba.go @@ -17,7 +17,10 @@ package transport import ( "context" "crypto/tls" + "crypto/x509" "errors" + "log" + "log/slog" "net" "net/http" "net/url" @@ -44,11 +47,9 @@ const ( googleAPIUseMTLSOld = "GOOGLE_API_USE_MTLS" universeDomainPlaceholder = "UNIVERSE_DOMAIN" -) -var ( - mdsMTLSAutoConfigSource mtlsConfigSource - errUniverseNotSupportedMTLS = errors.New("mTLS is not supported in any universe other than googleapis.com") + mtlsMDSRoot = "/run/google-mds-mtls/root.crt" + mtlsMDSKey = "/run/google-mds-mtls/client.key" ) // Options is a struct that is duplicated information from the individual @@ -56,13 +57,14 @@ var ( // fields on httptransport.Options and grpctransport.Options. type Options struct { Endpoint string - DefaultMTLSEndpoint string DefaultEndpointTemplate string + DefaultMTLSEndpoint string ClientCertProvider cert.Provider Client *http.Client UniverseDomain string EnableDirectPath bool EnableDirectPathXds bool + Logger *slog.Logger } // getUniverseDomain returns the default service domain for a given Cloud @@ -90,6 +92,16 @@ func (o *Options) defaultEndpoint() string { return strings.Replace(o.DefaultEndpointTemplate, universeDomainPlaceholder, o.getUniverseDomain(), 1) } +// defaultMTLSEndpoint returns the DefaultMTLSEndpointTemplate merged with the +// universe domain if the DefaultMTLSEndpointTemplate is set, otherwise returns an +// empty string. +func (o *Options) defaultMTLSEndpoint() string { + if o.DefaultMTLSEndpoint == "" { + return "" + } + return strings.Replace(o.DefaultMTLSEndpoint, universeDomainPlaceholder, o.getUniverseDomain(), 1) +} + // mergedEndpoint merges a user-provided Endpoint of format host[:port] with the // default endpoint. func (o *Options) mergedEndpoint() (string, error) { @@ -120,7 +132,24 @@ func GetGRPCTransportCredsAndEndpoint(opts *Options) (credentials.TransportCrede defaultTransportCreds := credentials.NewTLS(&tls.Config{ GetClientCertificate: config.clientCertSource, }) - if config.s2aAddress == "" { + + var s2aAddr string + var transportCredsForS2A credentials.TransportCredentials + + if config.mtlsS2AAddress != "" { + s2aAddr = config.mtlsS2AAddress + transportCredsForS2A, err = loadMTLSMDSTransportCreds(mtlsMDSRoot, mtlsMDSKey) + if err != nil { + log.Printf("Loading MTLS MDS credentials failed: %v", err) + if config.s2aAddress != "" { + s2aAddr = config.s2aAddress + } else { + return defaultTransportCreds, config.endpoint, nil + } + } + } else if config.s2aAddress != "" { + s2aAddr = config.s2aAddress + } else { return defaultTransportCreds, config.endpoint, nil } @@ -133,8 +162,9 @@ func GetGRPCTransportCredsAndEndpoint(opts *Options) (credentials.TransportCrede } s2aTransportCreds, err := s2a.NewClientCreds(&s2a.ClientOptions{ - S2AAddress: config.s2aAddress, - FallbackOpts: fallbackOpts, + S2AAddress: s2aAddr, + TransportCreds: transportCredsForS2A, + FallbackOpts: fallbackOpts, }) if err != nil { // Use default if we cannot initialize S2A client transport credentials. @@ -151,7 +181,23 @@ func GetHTTPTransportConfig(opts *Options) (cert.Provider, func(context.Context, return nil, nil, err } - if config.s2aAddress == "" { + var s2aAddr string + var transportCredsForS2A credentials.TransportCredentials + + if config.mtlsS2AAddress != "" { + s2aAddr = config.mtlsS2AAddress + transportCredsForS2A, err = loadMTLSMDSTransportCreds(mtlsMDSRoot, mtlsMDSKey) + if err != nil { + log.Printf("Loading MTLS MDS credentials failed: %v", err) + if config.s2aAddress != "" { + s2aAddr = config.s2aAddress + } else { + return config.clientCertSource, nil, nil + } + } + } else if config.s2aAddress != "" { + s2aAddr = config.s2aAddress + } else { return config.clientCertSource, nil, nil } @@ -169,12 +215,38 @@ func GetHTTPTransportConfig(opts *Options) (cert.Provider, func(context.Context, } dialTLSContextFunc := s2a.NewS2ADialTLSContextFunc(&s2a.ClientOptions{ - S2AAddress: config.s2aAddress, - FallbackOpts: fallbackOpts, + S2AAddress: s2aAddr, + TransportCreds: transportCredsForS2A, + FallbackOpts: fallbackOpts, }) return nil, dialTLSContextFunc, nil } +func loadMTLSMDSTransportCreds(mtlsMDSRootFile, mtlsMDSKeyFile string) (credentials.TransportCredentials, error) { + rootPEM, err := os.ReadFile(mtlsMDSRootFile) + if err != nil { + return nil, err + } + caCertPool := x509.NewCertPool() + ok := caCertPool.AppendCertsFromPEM(rootPEM) + if !ok { + return nil, errors.New("failed to load MTLS MDS root certificate") + } + // The mTLS MDS credentials are formatted as the concatenation of a PEM-encoded certificate chain + // followed by a PEM-encoded private key. For this reason, the concatenation is passed in to the + // tls.X509KeyPair function as both the certificate chain and private key arguments. + cert, err := tls.LoadX509KeyPair(mtlsMDSKeyFile, mtlsMDSKeyFile) + if err != nil { + return nil, err + } + tlsConfig := tls.Config{ + RootCAs: caCertPool, + Certificates: []tls.Certificate{cert}, + MinVersion: tls.VersionTLS13, + } + return credentials.NewTLS(&tlsConfig), nil +} + func getTransportConfig(opts *Options) (*transportConfig, error) { clientCertSource, err := GetClientCertificateProvider(opts) if err != nil { @@ -192,21 +264,18 @@ func getTransportConfig(opts *Options) (*transportConfig, error) { if !shouldUseS2A(clientCertSource, opts) { return &defaultTransportConfig, nil } - if !opts.isUniverseDomainGDU() { - return nil, errUniverseNotSupportedMTLS - } - - s2aMTLSEndpoint := opts.DefaultMTLSEndpoint - s2aAddress := GetS2AAddress() - if s2aAddress == "" { + s2aAddress := GetS2AAddress(opts.Logger) + mtlsS2AAddress := GetMTLSS2AAddress(opts.Logger) + if s2aAddress == "" && mtlsS2AAddress == "" { return &defaultTransportConfig, nil } return &transportConfig{ clientCertSource: clientCertSource, endpoint: endpoint, s2aAddress: s2aAddress, - s2aMTLSEndpoint: s2aMTLSEndpoint, + mtlsS2AAddress: mtlsS2AAddress, + s2aMTLSEndpoint: opts.defaultMTLSEndpoint(), }, nil } @@ -241,8 +310,10 @@ type transportConfig struct { clientCertSource cert.Provider // The corresponding endpoint to use based on client certificate source. endpoint string - // The S2A address if it can be used, otherwise an empty string. + // The plaintext S2A address if it can be used, otherwise an empty string. s2aAddress string + // The MTLS S2A address if it can be used, otherwise an empty string. + mtlsS2AAddress string // The MTLS endpoint to use with S2A. s2aMTLSEndpoint string } @@ -250,24 +321,23 @@ type transportConfig struct { // getEndpoint returns the endpoint for the service, taking into account the // user-provided endpoint override "settings.Endpoint". // -// If no endpoint override is specified, we will either return the default endpoint or -// the default mTLS endpoint if a client certificate is available. +// If no endpoint override is specified, we will either return the default +// endpoint or the default mTLS endpoint if a client certificate is available. // -// You can override the default endpoint choice (mtls vs. regular) by setting the -// GOOGLE_API_USE_MTLS_ENDPOINT environment variable. +// You can override the default endpoint choice (mTLS vs. regular) by setting +// the GOOGLE_API_USE_MTLS_ENDPOINT environment variable. // // If the endpoint override is an address (host:port) rather than full base // URL (ex. https://...), then the user-provided address will be merged into // the default endpoint. For example, WithEndpoint("myhost:8000") and -// DefaultEndpointTemplate("https://UNIVERSE_DOMAIN/bar/baz") will return "https://myhost:8080/bar/baz" +// DefaultEndpointTemplate("https://UNIVERSE_DOMAIN/bar/baz") will return +// "https://myhost:8080/bar/baz". Note that this does not apply to the mTLS +// endpoint. func getEndpoint(opts *Options, clientCertSource cert.Provider) (string, error) { if opts.Endpoint == "" { mtlsMode := getMTLSMode() if mtlsMode == mTLSModeAlways || (clientCertSource != nil && mtlsMode == mTLSModeAuto) { - if !opts.isUniverseDomainGDU() { - return "", errUniverseNotSupportedMTLS - } - return opts.DefaultMTLSEndpoint, nil + return opts.defaultMTLSEndpoint(), nil } return opts.defaultEndpoint(), nil } diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go index 36651591612..6c954ae193c 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go @@ -16,7 +16,6 @@ package cert import ( "crypto/tls" - "errors" "github.com/googleapis/enterprise-certificate-proxy/client" ) @@ -37,10 +36,9 @@ type ecpSource struct { func NewEnterpriseCertificateProxyProvider(configFilePath string) (Provider, error) { key, err := client.Cred(configFilePath) if err != nil { - if errors.Is(err, client.ErrCredUnavailable) { - return nil, errSourceUnavailable - } - return nil, err + // TODO(codyoss): once this is fixed upstream can handle this error a + // little better here. But be safe for now and assume unavailable. + return nil, errSourceUnavailable } return (&ecpSource{ diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go index 3227aba280c..738cb21618e 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go @@ -62,11 +62,11 @@ func NewSecureConnectProvider(configFilePath string) (Provider, error) { file, err := os.ReadFile(configFilePath) if err != nil { - if errors.Is(err, os.ErrNotExist) { - // Config file missing means Secure Connect is not supported. - return nil, errSourceUnavailable - } - return nil, err + // Config file missing means Secure Connect is not supported. + // There are non-os.ErrNotExist errors that may be returned. + // (e.g. if the home directory is /dev/null, *nix systems will + // return ENOTDIR instead of ENOENT) + return nil, errSourceUnavailable } var metadata secureConnectMetadata diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go index e8675bf824b..347aaced721 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go @@ -82,10 +82,7 @@ func (s *workloadSource) getClientCertificate(info *tls.CertificateRequestInfo) func getCertAndKeyFiles(configFilePath string) (string, string, error) { jsonFile, err := os.Open(configFilePath) if err != nil { - if errors.Is(err, os.ErrNotExist) { - return "", "", errSourceUnavailable - } - return "", "", err + return "", "", errSourceUnavailable } byteValue, err := io.ReadAll(jsonFile) diff --git a/vendor/cloud.google.com/go/auth/internal/transport/s2a.go b/vendor/cloud.google.com/go/auth/internal/transport/s2a.go index 2ed532deb7a..a6330995636 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/s2a.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/s2a.go @@ -15,12 +15,14 @@ package transport import ( + "context" "encoding/json" + "fmt" "log" + "log/slog" "os" "strconv" "sync" - "time" "cloud.google.com/go/auth/internal/transport/cert" "cloud.google.com/go/compute/metadata" @@ -31,41 +33,38 @@ const ( ) var ( - // The period an MTLS config can be reused before needing refresh. - configExpiry = time.Hour + mtlsConfiguration *mtlsConfig - // mdsMTLSAutoConfigSource is an instance of reuseMTLSConfigSource, with metadataMTLSAutoConfig as its config source. mtlsOnce sync.Once ) // GetS2AAddress returns the S2A address to be reached via plaintext connection. // Returns empty string if not set or invalid. -func GetS2AAddress() string { - c, err := getMetadataMTLSAutoConfig().Config() - if err != nil { - return "" - } - if !c.Valid() { +func GetS2AAddress(logger *slog.Logger) string { + getMetadataMTLSAutoConfig(logger) + if !mtlsConfiguration.valid() { return "" } - return c.S2A.PlaintextAddress + return mtlsConfiguration.S2A.PlaintextAddress } -type mtlsConfigSource interface { - Config() (*mtlsConfig, error) +// GetMTLSS2AAddress returns the S2A address to be reached via MTLS connection. +// Returns empty string if not set or invalid. +func GetMTLSS2AAddress(logger *slog.Logger) string { + getMetadataMTLSAutoConfig(logger) + if !mtlsConfiguration.valid() { + return "" + } + return mtlsConfiguration.S2A.MTLSAddress } // mtlsConfig contains the configuration for establishing MTLS connections with Google APIs. type mtlsConfig struct { - S2A *s2aAddresses `json:"s2a"` - Expiry time.Time + S2A *s2aAddresses `json:"s2a"` } -func (c *mtlsConfig) Valid() bool { - return c != nil && c.S2A != nil && !c.expired() -} -func (c *mtlsConfig) expired() bool { - return c.Expiry.Before(time.Now()) +func (c *mtlsConfig) valid() bool { + return c != nil && c.S2A != nil } // s2aAddresses contains the plaintext and/or MTLS S2A addresses. @@ -76,80 +75,39 @@ type s2aAddresses struct { MTLSAddress string `json:"mtls_address"` } -// getMetadataMTLSAutoConfig returns mdsMTLSAutoConfigSource, which is backed by config from MDS with auto-refresh. -func getMetadataMTLSAutoConfig() mtlsConfigSource { +func getMetadataMTLSAutoConfig(logger *slog.Logger) { + var err error mtlsOnce.Do(func() { - mdsMTLSAutoConfigSource = &reuseMTLSConfigSource{ - src: &metadataMTLSAutoConfig{}, + mtlsConfiguration, err = queryConfig(logger) + if err != nil { + log.Printf("Getting MTLS config failed: %v", err) } }) - return mdsMTLSAutoConfigSource -} - -// reuseMTLSConfigSource caches a valid version of mtlsConfig, and uses `src` to refresh upon config expiry. -// It implements the mtlsConfigSource interface, so calling Config() on it returns an mtlsConfig. -type reuseMTLSConfigSource struct { - src mtlsConfigSource // src.Config() is called when config is expired - mu sync.Mutex // mutex guards config - config *mtlsConfig // cached config -} - -func (cs *reuseMTLSConfigSource) Config() (*mtlsConfig, error) { - cs.mu.Lock() - defer cs.mu.Unlock() - - if cs.config.Valid() { - return cs.config, nil - } - c, err := cs.src.Config() - if err != nil { - return nil, err - } - cs.config = c - return c, nil } -// metadataMTLSAutoConfig is an implementation of the interface mtlsConfigSource -// It has the logic to query MDS and return an mtlsConfig -type metadataMTLSAutoConfig struct{} - -var httpGetMetadataMTLSConfig = func() (string, error) { - return metadata.Get(configEndpointSuffix) +var httpGetMetadataMTLSConfig = func(logger *slog.Logger) (string, error) { + metadataClient := metadata.NewWithOptions(&metadata.Options{ + Logger: logger, + }) + return metadataClient.GetWithContext(context.Background(), configEndpointSuffix) } -func (cs *metadataMTLSAutoConfig) Config() (*mtlsConfig, error) { - resp, err := httpGetMetadataMTLSConfig() +func queryConfig(logger *slog.Logger) (*mtlsConfig, error) { + resp, err := httpGetMetadataMTLSConfig(logger) if err != nil { - log.Printf("querying MTLS config from MDS endpoint failed: %v", err) - return defaultMTLSConfig(), nil + return nil, fmt.Errorf("querying MTLS config from MDS endpoint failed: %w", err) } var config mtlsConfig err = json.Unmarshal([]byte(resp), &config) if err != nil { - log.Printf("unmarshalling MTLS config from MDS endpoint failed: %v", err) - return defaultMTLSConfig(), nil + return nil, fmt.Errorf("unmarshalling MTLS config from MDS endpoint failed: %w", err) } - if config.S2A == nil { - log.Printf("returned MTLS config from MDS endpoint is invalid: %v", config) - return defaultMTLSConfig(), nil + return nil, fmt.Errorf("returned MTLS config from MDS endpoint is invalid: %v", config) } - - // set new expiry - config.Expiry = time.Now().Add(configExpiry) return &config, nil } -func defaultMTLSConfig() *mtlsConfig { - return &mtlsConfig{ - S2A: &s2aAddresses{ - PlaintextAddress: "", - MTLSAddress: "", - }, - Expiry: time.Now().Add(configExpiry), - } -} - func shouldUseS2A(clientCertSource cert.Provider, opts *Options) bool { // If client cert is found, use that over S2A. if clientCertSource != nil { diff --git a/vendor/cloud.google.com/go/auth/internal/transport/transport.go b/vendor/cloud.google.com/go/auth/internal/transport/transport.go index 718a6b17145..992ac40df0b 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/transport.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/transport.go @@ -49,6 +49,7 @@ func CloneDetectOptions(oldDo *credentials.DetectOptions) *credentials.DetectOpt // These fields are are pointer types that we just want to use exactly // as the user set, copy the ref Client: oldDo.Client, + Logger: oldDo.Logger, AuthHandlerOptions: oldDo.AuthHandlerOptions, } @@ -81,12 +82,14 @@ func ValidateUniverseDomain(clientUniverseDomain, credentialsUniverseDomain stri // DefaultHTTPClientWithTLS constructs an HTTPClient using the provided tlsConfig, to support mTLS. func DefaultHTTPClientWithTLS(tlsConfig *tls.Config) *http.Client { - trans := baseTransport() + trans := BaseTransport() trans.TLSClientConfig = tlsConfig return &http.Client{Transport: trans} } -func baseTransport() *http.Transport { +// BaseTransport returns a default [http.Transport] which can be used if +// [http.DefaultTransport] has been overwritten. +func BaseTransport() *http.Transport { return &http.Transport{ Proxy: http.ProxyFromEnvironment, DialContext: (&net.Dialer{ diff --git a/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md b/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md index ff9747beda0..d9044f1a94b 100644 --- a/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md +++ b/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md @@ -1,5 +1,40 @@ # Changelog +## [0.2.7](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.6...auth/oauth2adapt/v0.2.7) (2025-01-09) + + +### Bug Fixes + +* **auth/oauth2adapt:** Update golang.org/x/net to v0.33.0 ([e9b0b69](https://github.com/googleapis/google-cloud-go/commit/e9b0b69644ea5b276cacff0a707e8a5e87efafc9)) + +## [0.2.6](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.5...auth/oauth2adapt/v0.2.6) (2024-11-21) + + +### Bug Fixes + +* **auth/oauth2adapt:** Copy map in tokenSourceAdapter.Token ([#11164](https://github.com/googleapis/google-cloud-go/issues/11164)) ([8cb0cbc](https://github.com/googleapis/google-cloud-go/commit/8cb0cbccdc32886dfb3af49fee04012937d114d2)), refs [#11161](https://github.com/googleapis/google-cloud-go/issues/11161) + +## [0.2.5](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.4...auth/oauth2adapt/v0.2.5) (2024-10-30) + + +### Bug Fixes + +* **auth/oauth2adapt:** Convert token metadata where possible ([#11062](https://github.com/googleapis/google-cloud-go/issues/11062)) ([34bf1c1](https://github.com/googleapis/google-cloud-go/commit/34bf1c164465d66745c0cfdf7cd10a8e2da92e52)) + +## [0.2.4](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.3...auth/oauth2adapt/v0.2.4) (2024-08-08) + + +### Bug Fixes + +* **auth/oauth2adapt:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758)) + +## [0.2.3](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.2...auth/oauth2adapt/v0.2.3) (2024-07-10) + + +### Bug Fixes + +* **auth/oauth2adapt:** Bump google.golang.org/api@v0.187.0 ([8fa9e39](https://github.com/googleapis/google-cloud-go/commit/8fa9e398e512fd8533fd49060371e61b5725a85b)) + ## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.1...auth/oauth2adapt/v0.2.2) (2024-04-23) diff --git a/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go b/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go index 9835ac571cf..9cc33e5ee64 100644 --- a/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go +++ b/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go @@ -26,6 +26,13 @@ import ( "golang.org/x/oauth2/google" ) +const ( + oauth2TokenSourceKey = "oauth2.google.tokenSource" + oauth2ServiceAccountKey = "oauth2.google.serviceAccount" + authTokenSourceKey = "auth.google.tokenSource" + authServiceAccountKey = "auth.google.serviceAccount" +) + // TokenProviderFromTokenSource converts any [golang.org/x/oauth2.TokenSource] // into a [cloud.google.com/go/auth.TokenProvider]. func TokenProviderFromTokenSource(ts oauth2.TokenSource) auth.TokenProvider { @@ -47,10 +54,21 @@ func (tp *tokenProviderAdapter) Token(context.Context) (*auth.Token, error) { } return nil, err } + // Preserve compute token metadata, for both types of tokens. + metadata := map[string]interface{}{} + if val, ok := tok.Extra(oauth2TokenSourceKey).(string); ok { + metadata[authTokenSourceKey] = val + metadata[oauth2TokenSourceKey] = val + } + if val, ok := tok.Extra(oauth2ServiceAccountKey).(string); ok { + metadata[authServiceAccountKey] = val + metadata[oauth2ServiceAccountKey] = val + } return &auth.Token{ - Value: tok.AccessToken, - Type: tok.Type(), - Expiry: tok.Expiry, + Value: tok.AccessToken, + Type: tok.Type(), + Expiry: tok.Expiry, + Metadata: metadata, }, nil } @@ -76,11 +94,29 @@ func (ts *tokenSourceAdapter) Token() (*oauth2.Token, error) { } return nil, err } - return &oauth2.Token{ + tok2 := &oauth2.Token{ AccessToken: tok.Value, TokenType: tok.Type, Expiry: tok.Expiry, - }, nil + } + // Preserve token metadata. + m := tok.Metadata + if m != nil { + // Copy map to avoid concurrent map writes error (#11161). + metadata := make(map[string]interface{}, len(m)+2) + for k, v := range m { + metadata[k] = v + } + // Append compute token metadata in converted form. + if val, ok := metadata[authTokenSourceKey].(string); ok && val != "" { + metadata[oauth2TokenSourceKey] = val + } + if val, ok := metadata[authServiceAccountKey].(string); ok && val != "" { + metadata[oauth2ServiceAccountKey] = val + } + tok2 = tok2.WithExtra(metadata) + } + return tok2, nil } // AuthCredentialsFromOauth2Credentials converts a [golang.org/x/oauth2/google.Credentials] diff --git a/vendor/cloud.google.com/go/auth/threelegged.go b/vendor/cloud.google.com/go/auth/threelegged.go index a8ce6cd8a8d..07804dc162d 100644 --- a/vendor/cloud.google.com/go/auth/threelegged.go +++ b/vendor/cloud.google.com/go/auth/threelegged.go @@ -20,6 +20,7 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "mime" "net/http" "net/url" @@ -28,6 +29,7 @@ import ( "time" "cloud.google.com/go/auth/internal" + "github.com/googleapis/gax-go/v2/internallog" ) // AuthorizationHandler is a 3-legged-OAuth helper that prompts the user for @@ -69,6 +71,11 @@ type Options3LO struct { // AuthHandlerOpts provides a set of options for doing a // 3-legged OAuth2 flow with a custom [AuthorizationHandler]. Optional. AuthHandlerOpts *AuthorizationHandlerOptions + // Logger is used for debug logging. If provided, logging will be enabled + // at the loggers configured level. By default logging is disabled unless + // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default + // logger will be used. Optional. + Logger *slog.Logger } func (o *Options3LO) validate() error { @@ -96,6 +103,10 @@ func (o *Options3LO) validate() error { return nil } +func (o *Options3LO) logger() *slog.Logger { + return internallog.New(o.Logger) +} + // PKCEOptions holds parameters to support PKCE. type PKCEOptions struct { // Challenge is the un-padded, base64-url-encoded string of the encrypted code verifier. @@ -128,7 +139,7 @@ func (o *Options3LO) client() *http.Client { if o.Client != nil { return o.Client } - return internal.CloneDefaultClient() + return internal.DefaultClient() } // authCodeURL returns a URL that points to a OAuth2 consent page. @@ -293,12 +304,15 @@ func fetchToken(ctx context.Context, o *Options3LO, v url.Values) (*Token, strin if o.AuthStyle == StyleInHeader { req.SetBasicAuth(url.QueryEscape(o.ClientID), url.QueryEscape(o.ClientSecret)) } + logger := o.logger() + logger.DebugContext(ctx, "3LO token request", "request", internallog.HTTPRequest(req, []byte(v.Encode()))) // Make request resp, body, err := internal.DoRequest(o.client(), req) if err != nil { return nil, refreshToken, err } + logger.DebugContext(ctx, "3LO token response", "response", internallog.HTTPResponse(resp, body)) failureStatus := resp.StatusCode < 200 || resp.StatusCode > 299 tokError := &Error{ Response: resp, diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md index 9594e1e2793..bcfb5d81659 100644 --- a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md +++ b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md @@ -1,5 +1,26 @@ # Changes +## [0.6.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.2...compute/metadata/v0.6.0) (2024-12-13) + + +### Features + +* **compute/metadata:** Add debug logging ([#11078](https://github.com/googleapis/google-cloud-go/issues/11078)) ([a816814](https://github.com/googleapis/google-cloud-go/commit/a81681463906e4473570a2f426eb0dc2de64e53f)) + +## [0.5.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.1...compute/metadata/v0.5.2) (2024-09-20) + + +### Bug Fixes + +* **compute/metadata:** Close Response Body for failed request ([#10891](https://github.com/googleapis/google-cloud-go/issues/10891)) ([e91d45e](https://github.com/googleapis/google-cloud-go/commit/e91d45e4757a9e354114509ba9800085d9e0ff1f)) + +## [0.5.1](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.0...compute/metadata/v0.5.1) (2024-09-12) + + +### Bug Fixes + +* **compute/metadata:** Check error chain for retryable error ([#10840](https://github.com/googleapis/google-cloud-go/issues/10840)) ([2bdedef](https://github.com/googleapis/google-cloud-go/commit/2bdedeff621b223d63cebc4355fcf83bc68412cd)) + ## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.4.0...compute/metadata/v0.5.0) (2024-07-10) diff --git a/vendor/cloud.google.com/go/compute/metadata/log.go b/vendor/cloud.google.com/go/compute/metadata/log.go new file mode 100644 index 00000000000..8ec673b8823 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/log.go @@ -0,0 +1,149 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metadata + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "log/slog" + "net/http" + "strings" +) + +// Code below this point is copied from github.com/googleapis/gax-go/v2/internallog +// to avoid the dependency. The compute/metadata module is used by too many +// non-client library modules that can't justify the dependency. + +// The handler returned if logging is not enabled. +type noOpHandler struct{} + +func (h noOpHandler) Enabled(_ context.Context, _ slog.Level) bool { + return false +} + +func (h noOpHandler) Handle(_ context.Context, _ slog.Record) error { + return nil +} + +func (h noOpHandler) WithAttrs(_ []slog.Attr) slog.Handler { + return h +} + +func (h noOpHandler) WithGroup(_ string) slog.Handler { + return h +} + +// httpRequest returns a lazily evaluated [slog.LogValuer] for a +// [http.Request] and the associated body. +func httpRequest(req *http.Request, body []byte) slog.LogValuer { + return &request{ + req: req, + payload: body, + } +} + +type request struct { + req *http.Request + payload []byte +} + +func (r *request) LogValue() slog.Value { + if r == nil || r.req == nil { + return slog.Value{} + } + var groupValueAttrs []slog.Attr + groupValueAttrs = append(groupValueAttrs, slog.String("method", r.req.Method)) + groupValueAttrs = append(groupValueAttrs, slog.String("url", r.req.URL.String())) + + var headerAttr []slog.Attr + for k, val := range r.req.Header { + headerAttr = append(headerAttr, slog.String(k, strings.Join(val, ","))) + } + if len(headerAttr) > 0 { + groupValueAttrs = append(groupValueAttrs, slog.Any("headers", headerAttr)) + } + + if len(r.payload) > 0 { + if attr, ok := processPayload(r.payload); ok { + groupValueAttrs = append(groupValueAttrs, attr) + } + } + return slog.GroupValue(groupValueAttrs...) +} + +// httpResponse returns a lazily evaluated [slog.LogValuer] for a +// [http.Response] and the associated body. +func httpResponse(resp *http.Response, body []byte) slog.LogValuer { + return &response{ + resp: resp, + payload: body, + } +} + +type response struct { + resp *http.Response + payload []byte +} + +func (r *response) LogValue() slog.Value { + if r == nil { + return slog.Value{} + } + var groupValueAttrs []slog.Attr + groupValueAttrs = append(groupValueAttrs, slog.String("status", fmt.Sprint(r.resp.StatusCode))) + + var headerAttr []slog.Attr + for k, val := range r.resp.Header { + headerAttr = append(headerAttr, slog.String(k, strings.Join(val, ","))) + } + if len(headerAttr) > 0 { + groupValueAttrs = append(groupValueAttrs, slog.Any("headers", headerAttr)) + } + + if len(r.payload) > 0 { + if attr, ok := processPayload(r.payload); ok { + groupValueAttrs = append(groupValueAttrs, attr) + } + } + return slog.GroupValue(groupValueAttrs...) +} + +func processPayload(payload []byte) (slog.Attr, bool) { + peekChar := payload[0] + if peekChar == '{' { + // JSON object + var m map[string]any + if err := json.Unmarshal(payload, &m); err == nil { + return slog.Any("payload", m), true + } + } else if peekChar == '[' { + // JSON array + var m []any + if err := json.Unmarshal(payload, &m); err == nil { + return slog.Any("payload", m), true + } + } else { + // Everything else + buf := &bytes.Buffer{} + if err := json.Compact(buf, payload); err != nil { + // Write raw payload incase of error + buf.Write(payload) + } + return slog.String("payload", buf.String()), true + } + return slog.Attr{}, false +} diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index 345080b7297..4c18a383a43 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -24,6 +24,7 @@ import ( "encoding/json" "fmt" "io" + "log/slog" "net" "net/http" "net/url" @@ -60,7 +61,10 @@ var ( instID = &cachedValue{k: "instance/id", trim: true} ) -var defaultClient = &Client{hc: newDefaultHTTPClient()} +var defaultClient = &Client{ + hc: newDefaultHTTPClient(), + logger: slog.New(noOpHandler{}), +} func newDefaultHTTPClient() *http.Client { return &http.Client{ @@ -408,17 +412,42 @@ func strsContains(ss []string, s string) bool { // A Client provides metadata. type Client struct { - hc *http.Client + hc *http.Client + logger *slog.Logger +} + +// Options for configuring a [Client]. +type Options struct { + // Client is the HTTP client used to make requests. Optional. + Client *http.Client + // Logger is used to log information about HTTP request and responses. + // If not provided, nothing will be logged. Optional. + Logger *slog.Logger } // NewClient returns a Client that can be used to fetch metadata. // Returns the client that uses the specified http.Client for HTTP requests. // If nil is specified, returns the default client. func NewClient(c *http.Client) *Client { - if c == nil { + return NewWithOptions(&Options{ + Client: c, + }) +} + +// NewWithOptions returns a Client that is configured with the provided Options. +func NewWithOptions(opts *Options) *Client { + if opts == nil { return defaultClient } - return &Client{hc: c} + client := opts.Client + if client == nil { + client = newDefaultHTTPClient() + } + logger := opts.Logger + if logger == nil { + logger = slog.New(noOpHandler{}) + } + return &Client{hc: client, logger: logger} } // getETag returns a value from the metadata service as well as the associated ETag. @@ -448,14 +477,26 @@ func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string req.Header.Set("User-Agent", userAgent) var res *http.Response var reqErr error + var body []byte retryer := newRetryer() for { + c.logger.DebugContext(ctx, "metadata request", "request", httpRequest(req, nil)) res, reqErr = c.hc.Do(req) var code int if res != nil { code = res.StatusCode + body, err = io.ReadAll(res.Body) + if err != nil { + res.Body.Close() + return "", "", err + } + c.logger.DebugContext(ctx, "metadata response", "response", httpResponse(res, body)) + res.Body.Close() } if delay, shouldRetry := retryer.Retry(code, reqErr); shouldRetry { + if res != nil && res.Body != nil { + res.Body.Close() + } if err := sleep(ctx, delay); err != nil { return "", "", err } @@ -466,18 +507,13 @@ func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string if reqErr != nil { return "", "", reqErr } - defer res.Body.Close() if res.StatusCode == http.StatusNotFound { return "", "", NotDefinedError(suffix) } - all, err := io.ReadAll(res.Body) - if err != nil { - return "", "", err - } if res.StatusCode != 200 { - return "", "", &Error{Code: res.StatusCode, Message: string(all)} + return "", "", &Error{Code: res.StatusCode, Message: string(body)} } - return string(all), res.Header.Get("Etag"), nil + return string(body), res.Header.Get("Etag"), nil } // Get returns a value from the metadata service. diff --git a/vendor/cloud.google.com/go/compute/metadata/retry_linux.go b/vendor/cloud.google.com/go/compute/metadata/retry_linux.go index bb412f8917e..2e53f012300 100644 --- a/vendor/cloud.google.com/go/compute/metadata/retry_linux.go +++ b/vendor/cloud.google.com/go/compute/metadata/retry_linux.go @@ -17,10 +17,15 @@ package metadata -import "syscall" +import ( + "errors" + "syscall" +) func init() { // Initialize syscallRetryable to return true on transient socket-level // errors. These errors are specific to Linux. - syscallRetryable = func(err error) bool { return err == syscall.ECONNRESET || err == syscall.ECONNREFUSED } + syscallRetryable = func(err error) bool { + return errors.Is(err, syscall.ECONNRESET) || errors.Is(err, syscall.ECONNREFUSED) + } } diff --git a/vendor/cloud.google.com/go/iam/CHANGES.md b/vendor/cloud.google.com/go/iam/CHANGES.md index 5aab66312bd..2331bfc1b1d 100644 --- a/vendor/cloud.google.com/go/iam/CHANGES.md +++ b/vendor/cloud.google.com/go/iam/CHANGES.md @@ -1,6 +1,13 @@ # Changes +## [1.1.11](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.10...iam/v1.1.11) (2024-07-10) + + +### Bug Fixes + +* **iam:** Bump google.golang.org/grpc@v1.64.1 ([8ecc4e9](https://github.com/googleapis/google-cloud-go/commit/8ecc4e9622e5bbe9b90384d5848ab816027226c5)) + ## [1.1.10](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.9...iam/v1.1.10) (2024-07-01) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md index f88b277ab63..cf422304e7b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md @@ -1,5 +1,12 @@ # Release History +## 1.17.0 (2025-01-07) + +### Features Added + +* Added field `OperationLocationResultPath` to `runtime.NewPollerOptions[T]` for LROs that use the `Operation-Location` pattern. +* Support `encoding.TextMarshaler` and `encoding.TextUnmarshaler` interfaces in `arm.ResourceID`. + ## 1.16.0 (2024-10-17) ### Features Added diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go index 00f2d5a0ab9..d9a4e36dccb 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go @@ -110,6 +110,21 @@ func (id *ResourceID) String() string { return id.stringValue } +// MarshalText returns a textual representation of the ResourceID +func (id *ResourceID) MarshalText() ([]byte, error) { + return []byte(id.String()), nil +} + +// UnmarshalText decodes the textual representation of a ResourceID +func (id *ResourceID) UnmarshalText(text []byte) error { + newId, err := ParseResourceID(string(text)) + if err != nil { + return err + } + *id = *newId + return nil +} + func newResourceID(parent *ResourceID, resourceTypeName string, resourceName string) *ResourceID { id := &ResourceID{} id.init(parent, chooseResourceType(resourceTypeName, parent), resourceName, true) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go index 03699fd7629..f4963318932 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go @@ -40,12 +40,13 @@ type Poller[T any] struct { OrigURL string `json:"origURL"` Method string `json:"method"` FinalState pollers.FinalStateVia `json:"finalState"` + ResultPath string `json:"resultPath"` CurState string `json:"state"` } // New creates a new Poller from the provided initial response. // Pass nil for response to create an empty Poller for rehydration. -func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.FinalStateVia) (*Poller[T], error) { +func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.FinalStateVia, resultPath string) (*Poller[T], error) { if resp == nil { log.Write(log.EventLRO, "Resuming Operation-Location poller.") return &Poller[T]{pl: pl}, nil @@ -82,6 +83,7 @@ func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.Fi OrigURL: resp.Request.URL.String(), Method: resp.Request.Method, FinalState: finalState, + ResultPath: resultPath, CurState: curState, }, nil } @@ -116,10 +118,6 @@ func (p *Poller[T]) Result(ctx context.Context, out *T) error { var req *exported.Request var err error - // when the payload is included with the status monitor on - // terminal success it's in the "result" JSON property - payloadPath := "result" - if p.FinalState == pollers.FinalStateViaLocation && p.LocURL != "" { req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL) } else if rl, rlErr := poller.GetResourceLocation(p.resp); rlErr != nil && !errors.Is(rlErr, poller.ErrNoBody) { @@ -138,7 +136,7 @@ func (p *Poller[T]) Result(ctx context.Context, out *T) error { // if a final GET request has been created, execute it if req != nil { // no JSON path when making a final GET request - payloadPath = "" + p.ResultPath = "" resp, err := p.pl.Do(req) if err != nil { return err @@ -146,5 +144,5 @@ func (p *Poller[T]) Result(ctx context.Context, out *T) error { p.resp = resp } - return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), payloadPath, out) + return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), p.ResultPath, out) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go index 9f53770e5b6..44ab00d4008 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go @@ -40,5 +40,5 @@ const ( Module = "azcore" // Version is the semantic version (see http://semver.org) of this module. - Version = "v1.16.0" + Version = "v1.17.0" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go index b960cff0b2d..c66fc0a90a5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go @@ -32,6 +32,7 @@ type PagingHandler[T any] struct { } // Pager provides operations for iterating over paged responses. +// Methods on this type are not safe for concurrent use. type Pager[T any] struct { current *T handler PagingHandler[T] diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go index 03f76c9aa8e..4f90e447432 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go @@ -50,8 +50,14 @@ const ( // NewPollerOptions contains the optional parameters for NewPoller. type NewPollerOptions[T any] struct { // FinalStateVia contains the final-state-via value for the LRO. + // NOTE: used only for Azure-AsyncOperation and Operation-Location LROs. FinalStateVia FinalStateVia + // OperationLocationResultPath contains the JSON path to the result's + // payload when it's included with the terminal success response. + // NOTE: only used for Operation-Location LROs. + OperationLocationResultPath string + // Response contains a preconstructed response type. // The final payload will be unmarshaled into it and returned. Response *T @@ -98,7 +104,7 @@ func NewPoller[T any](resp *http.Response, pl exported.Pipeline, options *NewPol opr, err = async.New[T](pl, resp, options.FinalStateVia) } else if op.Applicable(resp) { // op poller must be checked before loc as it can also have a location header - opr, err = op.New[T](pl, resp, options.FinalStateVia) + opr, err = op.New[T](pl, resp, options.FinalStateVia, options.OperationLocationResultPath) } else if loc.Applicable(resp) { opr, err = loc.New[T](pl, resp) } else if body.Applicable(resp) { @@ -172,7 +178,7 @@ func NewPollerFromResumeToken[T any](token string, pl exported.Pipeline, options } else if loc.CanResume(asJSON) { opr, _ = loc.New[T](pl, nil) } else if op.CanResume(asJSON) { - opr, _ = op.New[T](pl, nil, "") + opr, _ = op.New[T](pl, nil, "", "") } else { return nil, fmt.Errorf("unhandled poller token %s", string(raw)) } @@ -200,6 +206,7 @@ type PollingHandler[T any] interface { } // Poller encapsulates a long-running operation, providing polling facilities until the operation reaches a terminal state. +// Methods on this type are not safe for concurrent use. type Poller[T any] struct { op PollingHandler[T] resp *http.Response diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/BREAKING_CHANGES.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/BREAKING_CHANGES.md index ea267e4f411..567e6975b18 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/BREAKING_CHANGES.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/BREAKING_CHANGES.md @@ -1,5 +1,15 @@ # Breaking Changes +## v1.8.0 + +### New errors from `NewManagedIdentityCredential` in some environments + +`NewManagedIdentityCredential` now returns an error when `ManagedIdentityCredentialOptions.ID` is set in a hosting environment whose managed identity API doesn't support user-assigned identities. `ManagedIdentityCredential.GetToken()` formerly logged a warning in these cases. Returning an error instead prevents the credential authenticating an unexpected identity. The affected hosting environments are: + * Azure Arc + * Azure ML (when a resource or object ID is specified; client IDs are supported) + * Cloud Shell + * Service Fabric + ## v1.6.0 ### Behavioral change to `DefaultAzureCredential` in IMDS managed identity scenarios diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md index e35f5ad935b..1ffc19a548f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md @@ -1,5 +1,19 @@ # Release History +## 1.8.1 (2025-01-15) + +### Bugs Fixed +* User credential types inconsistently log access token scopes +* `DefaultAzureCredential` skips managed identity in Azure Container Instances +* Credentials having optional tenant IDs such as `AzureCLICredential` and + `InteractiveBrowserCredential` require setting `AdditionallyAllowedTenants` + when used with some clients + +### Other Changes +* `ChainedTokenCredential` and `DefaultAzureCredential` continue to their next + credential after `ManagedIdentityCredential` receives an unexpected response + from IMDS, indicating the response is from something else such as a proxy + ## 1.8.0 (2024-10-08) ### Other Changes diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md index 96f30b25cc3..c99ce5b2b56 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md @@ -54,17 +54,7 @@ The `azidentity` module focuses on OAuth authentication with Microsoft Entra ID. ### DefaultAzureCredential -`DefaultAzureCredential` simplifies authentication while developing applications that deploy to Azure by combining credentials used in Azure hosting environments and credentials used in local development. In production, it's better to use a specific credential type so authentication is more predictable and easier to debug. `DefaultAzureCredential` attempts to authenticate via the following mechanisms in this order, stopping when one succeeds: - -![DefaultAzureCredential authentication flow](img/mermaidjs/DefaultAzureCredentialAuthFlow.svg) - -1. **Environment** - `DefaultAzureCredential` will read account information specified via [environment variables](#environment-variables) and use it to authenticate. -1. **Workload Identity** - If the app is deployed on Kubernetes with environment variables set by the workload identity webhook, `DefaultAzureCredential` will authenticate the configured identity. -1. **Managed Identity** - If the app is deployed to an Azure host with managed identity enabled, `DefaultAzureCredential` will authenticate with it. -1. **Azure CLI** - If a user or service principal has authenticated via the Azure CLI `az login` command, `DefaultAzureCredential` will authenticate that identity. -1. **Azure Developer CLI** - If the developer has authenticated via the Azure Developer CLI `azd auth login` command, the `DefaultAzureCredential` will authenticate with that account. - -> Note: `DefaultAzureCredential` is intended to simplify getting started with the SDK by handling common scenarios with reasonable default behaviors. Developers who want more control or whose scenario isn't served by the default settings should use other credential types. +`DefaultAzureCredential` simplifies authentication while developing apps that deploy to Azure by combining credentials used in Azure hosting environments with credentials used in local development. For more information, see [DefaultAzureCredential overview][dac_overview]. ## Managed Identity @@ -128,10 +118,10 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil) ### Credential chains -|Credential|Usage -|-|- -|[DefaultAzureCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DefaultAzureCredential)|Simplified authentication experience for getting started developing Azure apps -|[ChainedTokenCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ChainedTokenCredential)|Define custom authentication flows, composing multiple credentials +|Credential|Usage|Reference +|-|-|- +|[DefaultAzureCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DefaultAzureCredential)|Simplified authentication experience for getting started developing Azure apps|[DefaultAzureCredential overview][dac_overview]| +|[ChainedTokenCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ChainedTokenCredential)|Define custom authentication flows, composing multiple credentials|[ChainedTokenCredential overview][ctc_overview]| ### Authenticating Azure-Hosted Applications @@ -260,4 +250,8 @@ For more information, see the or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. + +[ctc_overview]: https://aka.ms/azsdk/go/identity/credential-chains#chainedtokencredential-overview +[dac_overview]: https://aka.ms/azsdk/go/identity/credential-chains#defaultazurecredential-overview + ![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-go%2Fsdk%2Fazidentity%2FREADME.png) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD index e0bd09c636e..8fc7c64aa34 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD @@ -22,13 +22,13 @@ Some credential types support opt-in persistent token caching (see [the below ta Persistent caches are encrypted at rest using a mechanism that depends on the operating system: -| Operating system | Encryption facility | -|------------------|---------------------------------------| -| Linux | kernel key retention service (keyctl) | -| macOS | Keychain | -| Windows | Data Protection API (DPAPI) | +| Operating system | Encryption facility | +| ---------------- | ---------------------------------------------- | +| Linux | kernel key retention service (keyctl) | +| macOS | Keychain (requires cgo and native build tools) | +| Windows | Data Protection API (DPAPI) | -Persistent caching requires encryption. When the required encryption facility is unuseable, or the application is running on an unsupported OS, the persistent cache constructor returns an error. This doesn't mean that authentication is impossible, only that credentials can't persist authentication data and the application will need to reauthenticate the next time it runs. See the [package documentation][example] for example code showing how to configure persistent caching and access cached data. +Persistent caching requires encryption. When the required encryption facility is unuseable, or the application is running on an unsupported OS, the persistent cache constructor returns an error. This doesn't mean that authentication is impossible, only that credentials can't persist authentication data and the application will need to reauthenticate the next time it runs. See the package documentation for examples showing how to configure persistent caching and access cached data for [users][user_example] and [service principals][sp_example]. ### Credentials supporting token caching @@ -37,7 +37,7 @@ The following table indicates the state of in-memory and persistent caching in e **Note:** in-memory caching is enabled by default for every type supporting it. Persistent token caching must be enabled explicitly. See the [package documentation][user_example] for an example showing how to do this for credential types authenticating users. For types that authenticate service principals, set the `Cache` field on the constructor's options as shown in [this example][sp_example]. | Credential | In-memory token caching | Persistent token caching | -|--------------------------------|---------------------------------------------------------------------|--------------------------| +| ------------------------------ | ------------------------------------------------------------------- | ------------------------ | | `AzureCLICredential` | Not Supported | Not Supported | | `AzureDeveloperCLICredential` | Not Supported | Not Supported | | `AzurePipelinesCredential` | Supported | Supported | diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md index c24f67e84a7..9c4b1cd71c8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md @@ -8,6 +8,7 @@ This troubleshooting guide covers failure investigation techniques, common error - [Permission issues](#permission-issues) - [Find relevant information in errors](#find-relevant-information-in-errors) - [Enable and configure logging](#enable-and-configure-logging) +- [Troubleshoot persistent token caching issues](#troubleshoot-persistent-token-caching-issues) - [Troubleshoot AzureCLICredential authentication issues](#troubleshoot-azureclicredential-authentication-issues) - [Troubleshoot AzureDeveloperCLICredential authentication issues](#troubleshoot-azuredeveloperclicredential-authentication-issues) - [Troubleshoot AzurePipelinesCredential authentication issues](#troubleshoot-azurepipelinescredential-authentication-issues) @@ -236,6 +237,29 @@ azd auth token --output json --scope https://management.core.windows.net/.defaul | No service connection found with identifier |The `serviceConnectionID` argument to `NewAzurePipelinesCredential` is incorrect| Verify the service connection ID. This parameter refers to the `resourceId` of the Azure Service Connection. It can also be found in the query string of the service connection's configuration in Azure DevOps. [Azure Pipelines documentation](https://learn.microsoft.com/azure/devops/pipelines/library/service-endpoints?view=azure-devops&tabs=yaml) has more information about service connections.| |401 (Unauthorized) response from OIDC endpoint|The `systemAccessToken` argument to `NewAzurePipelinesCredential` is incorrect|Check pipeline configuration. This value comes from the predefined variable `System.AccessToken` [as described in Azure Pipelines documentation](https://learn.microsoft.com/azure/devops/pipelines/build/variables?view=azure-devops&tabs=yaml#systemaccesstoken).| +## Troubleshoot persistent token caching issues + +### macOS + +[azidentity/cache](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache) encrypts persistent caches with the system Keychain on macOS. You may see build and runtime errors there because calling the Keychain API requires cgo and macOS prohibits Keychain access in some scenarios. + +#### Build errors + +Build errors about undefined `accessor` symbols indicate that cgo wasn't enabled. For example: +``` +$ GOOS=darwin go build +# github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache +../../go/pkg/mod/github.com/!azure/azure-sdk-for-go/sdk/azidentity/cache@v0.3.0/darwin.go:18:19: undefined: accessor.New +../../go/pkg/mod/github.com/!azure/azure-sdk-for-go/sdk/azidentity/cache@v0.3.0/darwin.go:18:38: undefined: accessor.WithAccount +``` + +Try `go build` again with `CGO_ENABLED=1`. You may need to install native build tools. + +#### Runtime errors + +macOS prohibits Keychain access from environments without a GUI such as SSH sessions. If your application calls the persistent cache constructor ([cache.New](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache#New)) from an SSH session on a macOS host, you'll see an error like +`persistent storage isn't available due to error "User interaction is not allowed. (-25308)"`. This doesn't mean authentication is impossible, only that credentials can't persist data and the application must reauthenticate the next time it runs. + ## Get additional help Additional information on ways to reach out for support can be found in [SUPPORT.md](https://github.com/Azure/azure-sdk-for-go/blob/main/SUPPORT.md). diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go index ce55dc658e3..40a94154c67 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go @@ -42,6 +42,8 @@ const ( developerSignOnClientID = "04b07795-8ddb-461a-bbee-02f9e1bf7b46" defaultSuffix = "/.default" + scopeLogFmt = "%s.GetToken() acquired a token for scope %q" + traceNamespace = "Microsoft.Entra" traceOpGetToken = "GetToken" traceOpAuthenticate = "Authenticate" @@ -103,7 +105,16 @@ func resolveAdditionalTenants(tenants []string) []string { return cp } -// resolveTenant returns the correct tenant for a token request +// resolveTenant returns the correct tenant for a token request, or "" when the calling credential doesn't +// have an explicitly configured tenant and the caller didn't specify a tenant for the token request. +// +// - defaultTenant: tenant set when constructing the credential, if any. "" is valid for credentials +// having an optional or implicit tenant such as dev tool and interactive user credentials. Those +// default to the tool's configured tenant or the user's home tenant, respectively. +// - specified: tenant specified for this token request i.e., TokenRequestOptions.TenantID. May be "". +// - credName: name of the calling credential type; for error messages +// - additionalTenants: optional allow list of tenants the credential may acquire tokens from in +// addition to defaultTenant i.e., the credential's AdditionallyAllowedTenants option func resolveTenant(defaultTenant, specified, credName string, additionalTenants []string) (string, error) { if specified == "" || specified == defaultTenant { return defaultTenant, nil @@ -119,6 +130,17 @@ func resolveTenant(defaultTenant, specified, credName string, additionalTenants return specified, nil } } + if len(additionalTenants) == 0 { + switch defaultTenant { + case "", organizationsTenantID: + // The application didn't specify a tenant or allow list when constructing the credential. Allow the + // tenant specified for this token request because we have nothing to compare it to (i.e., it vacuously + // satisfies the credential's configuration); don't know whether the application is multitenant; and + // don't want to return an error in the common case that the specified tenant matches the credential's + // default tenant determined elsewhere e.g., in some dev tool's configuration. + return specified, nil + } + } return "", fmt.Errorf(`%s isn't configured to acquire tokens for tenant %q. To enable acquiring tokens for this tenant add it to the AdditionallyAllowedTenants on the credential options, or add "*" to allow acquiring tokens for any tenant`, credName, specified) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go index b9976f5fede..e2f371cfd87 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go @@ -30,9 +30,9 @@ type azTokenProvider func(ctx context.Context, scopes []string, tenant, subscrip // AzureCLICredentialOptions contains optional parameters for AzureCLICredential. type AzureCLICredentialOptions struct { - // AdditionallyAllowedTenants specifies tenants for which the credential may acquire tokens, in addition - // to TenantID. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the - // logged in account can access. + // AdditionallyAllowedTenants specifies tenants to which the credential may authenticate, in addition to + // TenantID. When TenantID is empty, this option has no effect and the credential will authenticate to + // any requested tenant. Add the wildcard value "*" to allow the credential to authenticate to any tenant. AdditionallyAllowedTenants []string // Subscription is the name or ID of a subscription. Set this to acquire tokens for an account other diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go index cbe7c4c2db1..46d0b551922 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go @@ -30,9 +30,9 @@ type azdTokenProvider func(ctx context.Context, scopes []string, tenant string) // AzureDeveloperCLICredentialOptions contains optional parameters for AzureDeveloperCLICredential. type AzureDeveloperCLICredentialOptions struct { - // AdditionallyAllowedTenants specifies tenants for which the credential may acquire tokens, in addition - // to TenantID. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the - // logged in account can access. + // AdditionallyAllowedTenants specifies tenants to which the credential may authenticate, in addition to + // TenantID. When TenantID is empty, this option has no effect and the credential will authenticate to + // any requested tenant. Add the wildcard value "*" to allow the credential to authenticate to any tenant. AdditionallyAllowedTenants []string // TenantID identifies the tenant the credential should authenticate in. Defaults to the azd environment, diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go index 2460f66ec1e..82342a02545 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go @@ -27,7 +27,10 @@ type ChainedTokenCredentialOptions struct { } // ChainedTokenCredential links together multiple credentials and tries them sequentially when authenticating. By default, -// it tries all the credentials until one authenticates, after which it always uses that credential. +// it tries all the credentials until one authenticates, after which it always uses that credential. For more information, +// see [ChainedTokenCredential overview]. +// +// [ChainedTokenCredential overview]: https://aka.ms/azsdk/go/identity/credential-chains#chainedtokencredential-overview type ChainedTokenCredential struct { cond *sync.Cond iterating bool @@ -46,6 +49,9 @@ func NewChainedTokenCredential(sources []azcore.TokenCredential, options *Chaine if source == nil { // cannot have a nil credential in the chain or else the application will panic when GetToken() is called on nil return nil, errors.New("sources cannot contain nil") } + if mc, ok := source.(*ManagedIdentityCredential); ok { + mc.mic.chained = true + } } cp := make([]azcore.TokenCredential, len(sources)) copy(cp, sources) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml index 62c12b5465f..c3af0cdc2d6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml @@ -26,27 +26,16 @@ extends: parameters: CloudConfig: Public: - ServiceConnection: azure-sdk-tests - SubscriptionConfigurationFilePaths: - - eng/common/TestResources/sub-config/AzurePublicMsft.json SubscriptionConfigurations: - - $(sub-config-azure-cloud-test-resources) - $(sub-config-identity-test-resources) EnableRaceDetector: true + Location: westus2 RunLiveTests: true ServiceDirectory: azidentity UsePipelineProxy: false ${{ if endsWith(variables['Build.DefinitionName'], 'weekly') }}: - PreSteps: - - task: AzureCLI@2 - displayName: Set OIDC token - inputs: - addSpnToEnvironment: true - azureSubscription: azure-sdk-tests - inlineScript: Write-Host "##vso[task.setvariable variable=OIDC_TOKEN;]$($env:idToken)" - scriptLocation: inlineScript - scriptType: pscore + PersistOidcToken: true MatrixConfigs: - Name: managed_identity_matrix GenerateVMJobs: true diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go index 7059a510c22..92f508094df 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go @@ -115,7 +115,7 @@ func (c *confidentialClient) GetToken(ctx context.Context, tro policy.TokenReque err = newAuthenticationFailedErrorFromMSAL(c.name, err) } } else { - msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", c.name, strings.Join(ar.GrantedScopes, ", ")) + msg := fmt.Sprintf(scopeLogFmt, c.name, strings.Join(ar.GrantedScopes, ", ")) log.Write(EventAuthentication, msg) } return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go index 3cfc0f7bf1d..14af271f6a1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go @@ -23,15 +23,19 @@ type DefaultAzureCredentialOptions struct { // to credential types that authenticate via external tools such as the Azure CLI. azcore.ClientOptions - // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. Add - // the wildcard value "*" to allow the credential to acquire tokens for any tenant. This value can also be - // set as a semicolon delimited list of tenants in the environment variable AZURE_ADDITIONALLY_ALLOWED_TENANTS. + // AdditionallyAllowedTenants specifies tenants to which the credential may authenticate, in addition to + // TenantID. When TenantID is empty, this option has no effect and the credential will authenticate to + // any requested tenant. Add the wildcard value "*" to allow the credential to authenticate to any tenant. + // This value can also be set as a semicolon delimited list of tenants in the environment variable + // AZURE_ADDITIONALLY_ALLOWED_TENANTS. AdditionallyAllowedTenants []string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool + // TenantID sets the default tenant for authentication via the Azure CLI and workload identity. TenantID string } @@ -39,7 +43,7 @@ type DefaultAzureCredentialOptions struct { // DefaultAzureCredential simplifies authentication while developing applications that deploy to Azure by // combining credentials used in Azure hosting environments and credentials used in local development. In // production, it's better to use a specific credential type so authentication is more predictable and easier -// to debug. +// to debug. For more information, see [DefaultAzureCredential overview]. // // DefaultAzureCredential attempts to authenticate with each of these credential types, in the following order, // stopping when one provides a token: @@ -55,6 +59,8 @@ type DefaultAzureCredentialOptions struct { // Consult the documentation for these credential types for more information on how they authenticate. // Once a credential has successfully authenticated, DefaultAzureCredential will use that credential for // every subsequent authentication. +// +// [DefaultAzureCredential overview]: https://aka.ms/azsdk/go/identity/credential-chains#defaultazurecredential-overview type DefaultAzureCredential struct { chain *ChainedTokenCredential } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go index 53c4c728735..53ae9767f42 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go @@ -21,8 +21,9 @@ const credNameDeviceCode = "DeviceCodeCredential" type DeviceCodeCredentialOptions struct { azcore.ClientOptions - // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire - // tokens. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant. + // AdditionallyAllowedTenants specifies tenants to which the credential may authenticate, in addition to + // TenantID. When TenantID is empty, this option has no effect and the credential will authenticate to + // any requested tenant. Add the wildcard value "*" to allow the credential to authenticate to any tenant. AdditionallyAllowedTenants []string // AuthenticationRecord returned by a call to a credential's Authenticate method. Set this option diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go index 848db16e435..ec89de9b5b2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go @@ -20,8 +20,9 @@ const credNameBrowser = "InteractiveBrowserCredential" type InteractiveBrowserCredentialOptions struct { azcore.ClientOptions - // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire - // tokens. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant. + // AdditionallyAllowedTenants specifies tenants to which the credential may authenticate, in addition to + // TenantID. When TenantID is empty, this option has no effect and the credential will authenticate to + // any requested tenant. Add the wildcard value "*" to allow the credential to authenticate to any tenant. AdditionallyAllowedTenants []string // AuthenticationRecord returned by a call to a credential's Authenticate method. Set this option diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go index 4c657a92ec5..cc07fd70153 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go @@ -65,6 +65,9 @@ type managedIdentityClient struct { id ManagedIDKind msiType msiType probeIMDS bool + // chained indicates whether the client is part of a credential chain. If true, the client will return + // a credentialUnavailableError instead of an AuthenticationFailedError for an unexpected IMDS response. + chained bool } // arcKeyDirectory returns the directory expected to contain Azure Arc keys @@ -144,7 +147,7 @@ func newManagedIdentityClient(options *ManagedIdentityCredentialOptions) (*manag if _, ok := os.LookupEnv(identityHeader); ok { if _, ok := os.LookupEnv(identityServerThumbprint); ok { if options.ID != nil { - return nil, errors.New("the Service Fabric API doesn't support specifying a user-assigned managed identity at runtime") + return nil, errors.New("the Service Fabric API doesn't support specifying a user-assigned identity at runtime. The identity is determined by cluster resource configuration. See https://aka.ms/servicefabricmi") } env = "Service Fabric" c.endpoint = endpoint @@ -215,6 +218,7 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi // no need to synchronize around this value because it's true only when DefaultAzureCredential constructed the client, // and in that case ChainedTokenCredential.GetToken synchronizes goroutines that would execute this block if c.probeIMDS { + // send a malformed request (no Metadata header) to IMDS to determine whether the endpoint is available cx, cancel := context.WithTimeout(ctx, imdsProbeTimeout) defer cancel() cx = policy.WithRetryOptions(cx, policy.RetryOptions{MaxRetries: -1}) @@ -222,24 +226,14 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi if err != nil { return azcore.AccessToken{}, fmt.Errorf("failed to create IMDS probe request: %s", err) } - res, err := c.azClient.Pipeline().Do(req) - if err != nil { + if _, err = c.azClient.Pipeline().Do(req); err != nil { msg := err.Error() if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { msg = "managed identity timed out. See https://aka.ms/azsdk/go/identity/troubleshoot#dac for more information" } return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, msg) } - // because IMDS always responds with JSON, assume a non-JSON response is from something else, such - // as a proxy, and return credentialUnavailableError so DefaultAzureCredential continues iterating - b, err := azruntime.Payload(res) - if err != nil { - return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, fmt.Sprintf("failed to read IMDS probe response: %s", err)) - } - if !json.Valid(b) { - return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, "unexpected response to IMDS probe") - } - // send normal token requests from now on because IMDS responded + // send normal token requests from now on because something responded c.probeIMDS = false } @@ -254,13 +248,21 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi } if azruntime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { - return c.createAccessToken(resp) + tk, err := c.createAccessToken(resp) + if err != nil && c.chained && c.msiType == msiTypeIMDS { + // failure to unmarshal a 2xx implies the response is from something other than IMDS such as a proxy listening at + // the same address. Return a credentialUnavailableError so credential chains continue to their next credential + err = newCredentialUnavailableError(credNameManagedIdentity, err.Error()) + } + return tk, err } if c.msiType == msiTypeIMDS { switch resp.StatusCode { case http.StatusBadRequest: if id != nil { + // return authenticationFailedError, halting any encompassing credential chain, + // because the explicit user-assigned identity implies the developer expected this to work return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "the requested identity isn't assigned to this resource", resp) } msg := "failed to authenticate a system assigned identity" @@ -276,6 +278,13 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, fmt.Sprintf("unexpected response %q", string(body))) } } + if c.chained { + // the response may be from something other than IMDS, for example a proxy returning + // 404. Return credentialUnavailableError so credential chains continue to their + // next credential, include the response in the error message to help debugging + err = newAuthenticationFailedError(credNameManagedIdentity, "", resp) + return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, err.Error()) + } } return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "", resp) @@ -290,7 +299,7 @@ func (c *managedIdentityClient) createAccessToken(res *http.Response) (azcore.Ac ExpiresOn interface{} `json:"expires_on,omitempty"` // the value returned in this field varies between a number and a date string }{} if err := azruntime.UnmarshalAsJSON(res, &value); err != nil { - return azcore.AccessToken{}, fmt.Errorf("internal AccessToken: %v", err) + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "Unexpected response content", res) } if value.ExpiresIn != "" { expiresIn, err := json.Number(value.ExpiresIn).Int64() diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go index 73363e1c9e7..ef5e4d72129 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go @@ -154,12 +154,7 @@ func (p *publicClient) GetToken(ctx context.Context, tro policy.TokenRequestOpti if p.opts.DisableAutomaticAuthentication { return azcore.AccessToken{}, newAuthenticationRequiredError(p.name, tro) } - at, err := p.reqToken(ctx, client, tro) - if err == nil { - msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", p.name, strings.Join(ar.GrantedScopes, ", ")) - log.Write(EventAuthentication, msg) - } - return at, err + return p.reqToken(ctx, client, tro) } // reqToken requests a token from the MSAL public client. It's separate from GetToken() to enable Authenticate() to bypass the cache. @@ -242,6 +237,8 @@ func (p *publicClient) newMSALClient(enableCAE bool) (msalPublicClient, error) { func (p *publicClient) token(ar public.AuthResult, err error) (azcore.AccessToken, error) { if err == nil { + msg := fmt.Sprintf(scopeLogFmt, p.name, strings.Join(ar.GrantedScopes, ", ")) + log.Write(EventAuthentication, msg) p.record, err = newAuthenticationRecord(ar) } else { err = newAuthenticationFailedErrorFromMSAL(p.name, err) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 index 1a07fede637..efa8c6d3eb1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 @@ -7,6 +7,10 @@ param ( [hashtable] $AdditionalParameters = @{}, [hashtable] $DeploymentOutputs, + [Parameter(Mandatory = $true)] + [ValidateNotNullOrEmpty()] + [string] $SubscriptionId, + [Parameter(ParameterSetName = 'Provisioner', Mandatory = $true)] [ValidateNotNullOrEmpty()] [string] $TenantId, @@ -15,6 +19,10 @@ param ( [ValidatePattern('^[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}$')] [string] $TestApplicationId, + [Parameter(Mandatory = $true)] + [ValidateNotNullOrEmpty()] + [string] $Environment, + # Captures any arguments from eng/New-TestResources.ps1 not declared here (no parameter errors). [Parameter(ValueFromRemainingArguments = $true)] $RemainingArguments @@ -28,8 +36,9 @@ if ($CI) { Write-Host "Skipping post-provisioning script because resources weren't deployed" return } - az login --federated-token $env:OIDC_TOKEN --service-principal -t $TenantId -u $TestApplicationId - az account set --subscription $DeploymentOutputs['AZIDENTITY_SUBSCRIPTION_ID'] + az cloud set -n $Environment + az login --federated-token $env:ARM_OIDC_TOKEN --service-principal -t $TenantId -u $TestApplicationId + az account set --subscription $SubscriptionId } Write-Host "Building container" @@ -62,6 +71,9 @@ $aciName = "azidentity-test" az container create -g $rg -n $aciName --image $image ` --acr-identity $($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) ` --assign-identity [system] $($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) ` + --cpu 1 ` + --memory 1.0 ` + --os-type Linux ` --role "Storage Blob Data Reader" ` --scope $($DeploymentOutputs['AZIDENTITY_STORAGE_ID']) ` -e AZIDENTITY_STORAGE_NAME=$($DeploymentOutputs['AZIDENTITY_STORAGE_NAME']) ` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go index 4fa22dcc121..88c1078a722 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go @@ -14,5 +14,5 @@ const ( module = "github.com/Azure/azure-sdk-for-go/sdk/" + component // Version is the semantic version (see http://semver.org) of this module. - version = "v1.8.0" + version = "v1.8.1" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md index 6e5e80087d6..07be3af2b83 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md @@ -1,5 +1,102 @@ # Release History +## 1.5.0 (2024-11-13) + +### Features Added +* Fix compareHeaders custom sorting algorithm for String To Sign. +* Added permissions & resourcetype parameters in listblob response. + +## 1.5.0-beta.1 (2024-10-22) + +### Other Changes +* Updated `azcore` version to `1.16.0` +* Updated `azidentity` version to `1.8.0` + +## 1.4.1 (2024-09-18) + +### Features Added +* Added crc64 response header to Put Blob. +* Upgraded service version to `2024-08-04`. + +## 1.4.1-beta.1 (2024-08-27) + +### Features Added +* Upgraded service version to `2024-08-04`. + +### Other Changes +* Updated `azcore` version to `1.14.0` + +## 1.4.0 (2024-07-18) + +### Other Changes +* GetProperties() was called twice in DownloadFile method. Enhanced to call it only once, reducing latency. +* Updated `azcore` version to `1.13.0` + +## 1.4.0-beta.1 (2024-06-14) + +### Features Added +* Updated service version to `2024-05-04`. + +### Other Changes +* Updated `azidentity` version to `1.6.0` +* Updated `azcore` version to `1.12.0` + +## 1.3.2 (2024-04-09) + +### Bugs Fixed +* Fixed an issue where GetSASURL() was providing HTTPS SAS, instead of the default http+https SAS. Fixes [#22448](https://github.com/Azure/azure-sdk-for-go/issues/22448) + +### Other Changes +* Integrate `InsecureAllowCredentialWithHTTP` client options. +* Update dependencies. + +## 1.3.1 (2024-02-28) + +### Bugs Fixed + +* Re-enabled `SharedKeyCredential` authentication mode for non TLS protected endpoints. +* Use random write in `DownloadFile` method. Fixes [#22426](https://github.com/Azure/azure-sdk-for-go/issues/22426). + +## 1.3.0 (2024-02-12) + +### Bugs Fixed +* Fix concurrency issue while Downloading File. Fixes [#22156](https://github.com/Azure/azure-sdk-for-go/issues/22156). +* Fix panic when nil options bag is passed to NewGetPageRangesPager. Fixes [22356](https://github.com/Azure/azure-sdk-for-go/issues/22356). +* Fix file offset update after Download file. Fixes [#22297](https://github.com/Azure/azure-sdk-for-go/issues/22297). + +### Other Changes +* Updated the version of `azcore` to `1.9.2` + +## 1.3.0-beta.1 (2024-01-09) + +### Features Added + +* Updated service version to `2023-11-03`. +* Added support for Audience when OAuth is used. + +### Bugs Fixed + +* Block `SharedKeyCredential` authentication mode for non TLS protected endpoints. Fixes [#21841](https://github.com/Azure/azure-sdk-for-go/issues/21841). + +## 1.2.1 (2023-12-13) + +### Features Added + +* Exposed GetSASURL from specialized clients + +### Bugs Fixed + +* Fixed case in Blob Batch API when blob path has / in it. Fixes [#21649](https://github.com/Azure/azure-sdk-for-go/issues/21649). +* Fixed SharedKeyMissingError when using client.BlobClient().GetSASURL() method +* Fixed an issue that would cause metadata keys with empty values to be omitted when enumerating blobs. +* Fixed an issue where passing empty map to set blob tags API was causing panic. Fixes [#21869](https://github.com/Azure/azure-sdk-for-go/issues/21869). +* Fixed an issue where downloaded file has incorrect size when not a multiple of block size. Fixes [#21995](https://github.com/Azure/azure-sdk-for-go/issues/21995). +* Fixed case where `io.ErrUnexpectedEOF` was treated as expected error in `UploadStream`. Fixes [#21837](https://github.com/Azure/azure-sdk-for-go/issues/21837). + +### Other Changes + +* Updated the version of `azcore` to `1.9.1` and `azidentity` to `1.4.0`. + ## 1.2.0 (2023-10-11) ### Bugs Fixed diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md index 905fb2675c6..e71157a1979 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md @@ -1,6 +1,9 @@ # Azure Blob Storage module for Go +[![PkgGoDev](https://pkg.go.dev/badge/github.com/Azure/azure-sdk-for-go/sdk/azblob)](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob) +[![Build Status](https://dev.azure.com/azure-sdk/public/_apis/build/status/go/go%20-%20azdatalake%20-%20ci?branchName=main)](https://dev.azure.com/azure-sdk/public/_build/latest?definitionId=2854&branchName=main) +[![Code Coverage](https://img.shields.io/azure-devops/coverage/azure-sdk/public/2854/main)](https://img.shields.io/azure-devops/coverage/azure-sdk/public/2854/main) -> Service Version: 2023-08-03 +> Service Version: 2023-11-03 Azure Blob Storage is Microsoft's object storage solution for the cloud. Blob Storage is optimized for storing massive amounts of unstructured data - data that does not adhere to a particular data model or diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go index 69913e334d4..3bf058976a2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go @@ -9,18 +9,19 @@ package appendblob import ( "context" "errors" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "io" "os" "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" ) // ClientOptions contains the optional parameters when creating a Client. @@ -34,11 +35,12 @@ type Client base.CompositeClient[generated.BlobClient, generated.AppendBlobClien // - cred - an Azure AD credential, typically obtained via the azidentity module // - options - client options; pass nil to accept the default values func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { - authPolicy := shared.NewStorageChallengePolicy(cred) + audience := base.GetAudience((*base.ClientOptions)(options)) conOptions := shared.GetClientOptions(options) + authPolicy := shared.NewStorageChallengePolicy(cred, audience, conOptions.InsecureAllowCredentialWithHTTP) plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - azClient, err := azcore.NewClient(shared.AppendBlobClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) if err != nil { return nil, err } @@ -53,7 +55,7 @@ func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptio func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) { conOptions := shared.GetClientOptions(options) - azClient, err := azcore.NewClient(shared.AppendBlobClient, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) if err != nil { return nil, err } @@ -70,7 +72,7 @@ func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCreden conOptions := shared.GetClientOptions(options) plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - azClient, err := azcore.NewClient(shared.AppendBlobClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) if err != nil { return nil, err } @@ -290,8 +292,8 @@ func (ab *Client) GetAccountInfo(ctx context.Context, o *blob.GetAccountInfoOpti // SetHTTPHeaders changes a blob's HTTP headers. // For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. -func (ab *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) { - return ab.BlobClient().SetHTTPHeaders(ctx, HTTPHeaders, o) +func (ab *Client) SetHTTPHeaders(ctx context.Context, httpHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) { + return ab.BlobClient().SetHTTPHeaders(ctx, httpHeaders, o) } // SetMetadata changes a blob's metadata. @@ -338,6 +340,12 @@ func (ab *Client) CopyFromURL(ctx context.Context, copySource string, o *blob.Co return blob.CopyFromURLResponse{}, errors.New("operation will not work on this blob type. CopyFromURL works only with block blob") } +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at append blob. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +func (ab *Client) GetSASURL(permissions sas.BlobPermissions, expiry time.Time, o *blob.GetSASURLOptions) (string, error) { + return ab.BlobClient().GetSASURL(permissions, expiry, o) +} + // Concurrent Download Functions ----------------------------------------------------------------------------------------- // DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json index ee07ad45b1e..61d84c6f98f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "go", "TagPrefix": "go/storage/azblob", - "Tag": "go/storage/azblob_818d8addd0" + "Tag": "go/storage/azblob_e5b4fd09a3" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go index 55de9b34958..ec6907fbdc8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go @@ -36,15 +36,16 @@ type Client base.Client[generated.BlobClient] // - cred - an Azure AD credential, typically obtained via the azidentity module // - options - client options; pass nil to accept the default values func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { - authPolicy := shared.NewStorageChallengePolicy(cred) + audience := base.GetAudience((*base.ClientOptions)(options)) conOptions := shared.GetClientOptions(options) + authPolicy := shared.NewStorageChallengePolicy(cred, audience, conOptions.InsecureAllowCredentialWithHTTP) plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - azClient, err := azcore.NewClient(shared.BlobClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) if err != nil { return nil, err } - return (*Client)(base.NewBlobClient(blobURL, azClient, &cred)), nil + return (*Client)(base.NewBlobClient(blobURL, azClient, &cred, (*base.ClientOptions)(conOptions))), nil } // NewClientWithNoCredential creates an instance of Client with the specified values. @@ -54,11 +55,11 @@ func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptio func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) { conOptions := shared.GetClientOptions(options) - azClient, err := azcore.NewClient(shared.BlobClient, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) if err != nil { return nil, err } - return (*Client)(base.NewBlobClient(blobURL, azClient, nil)), nil + return (*Client)(base.NewBlobClient(blobURL, azClient, nil, (*base.ClientOptions)(conOptions))), nil } // NewClientWithSharedKeyCredential creates an instance of Client with the specified values. @@ -70,11 +71,11 @@ func NewClientWithSharedKeyCredential(blobURL string, cred *SharedKeyCredential, conOptions := shared.GetClientOptions(options) plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - azClient, err := azcore.NewClient(shared.BlobClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) if err != nil { return nil, err } - return (*Client)(base.NewBlobClient(blobURL, azClient, cred)), nil + return (*Client)(base.NewBlobClient(blobURL, azClient, cred, (*base.ClientOptions)(conOptions))), nil } // NewClientFromConnectionString creates an instance of Client with the specified values. @@ -112,6 +113,10 @@ func (b *Client) credential() any { return base.Credential((*base.Client[generated.BlobClient])(b)) } +func (b *Client) getClientOptions() *base.ClientOptions { + return base.GetClientOptions((*base.Client[generated.BlobClient])(b)) +} + // URL returns the URL endpoint used by the Client object. func (b *Client) URL() string { return b.generated().Endpoint() @@ -126,7 +131,7 @@ func (b *Client) WithSnapshot(snapshot string) (*Client, error) { } p.Snapshot = snapshot - return (*Client)(base.NewBlobClient(p.String(), b.generated().InternalClient(), b.credential())), nil + return (*Client)(base.NewBlobClient(p.String(), b.generated().InternalClient(), b.credential(), b.getClientOptions())), nil } // WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id. @@ -138,7 +143,7 @@ func (b *Client) WithVersionID(versionID string) (*Client, error) { } p.VersionID = versionID - return (*Client)(base.NewBlobClient(p.String(), b.generated().InternalClient(), b.credential())), nil + return (*Client)(base.NewBlobClient(p.String(), b.generated().InternalClient(), b.credential(), b.getClientOptions())), nil } // Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. @@ -180,9 +185,9 @@ func (b *Client) GetProperties(ctx context.Context, options *GetPropertiesOption // SetHTTPHeaders changes a blob's HTTP headers. // For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. -func (b *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders HTTPHeaders, o *SetHTTPHeadersOptions) (SetHTTPHeadersResponse, error) { +func (b *Client) SetHTTPHeaders(ctx context.Context, httpHeaders HTTPHeaders, o *SetHTTPHeadersOptions) (SetHTTPHeadersResponse, error) { opts, leaseAccessConditions, modifiedAccessConditions := o.format() - resp, err := b.generated().SetHTTPHeaders(ctx, opts, &HTTPHeaders, leaseAccessConditions, modifiedAccessConditions) + resp, err := b.generated().SetHTTPHeaders(ctx, opts, &httpHeaders, leaseAccessConditions, modifiedAccessConditions) return resp, err } @@ -353,7 +358,7 @@ func (b *Client) downloadBuffer(ctx context.Context, writer io.WriterAt, o downl OperationName: "downloadBlobToWriterAt", TransferSize: count, ChunkSize: o.BlockSize, - NumChunks: uint16(((count - 1) / o.BlockSize) + 1), + NumChunks: uint64(((count - 1) / o.BlockSize) + 1), Concurrency: o.Concurrency, Operation: func(ctx context.Context, chunkStart int64, count int64) error { downloadBlobOptions := o.getDownloadBlobOptions(HTTPRange{ @@ -392,168 +397,6 @@ func (b *Client) downloadBuffer(ctx context.Context, writer io.WriterAt, o downl return count, nil } -// downloadFile downloads an Azure blob to a Writer. The blocks are downloaded parallely, -// but written to file serially -func (b *Client) downloadFile(ctx context.Context, writer io.Writer, o downloadOptions) (int64, error) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if o.BlockSize == 0 { - o.BlockSize = DefaultDownloadBlockSize - } - - if o.Concurrency == 0 { - o.Concurrency = DefaultConcurrency - } - - count := o.Range.Count - if count == CountToEnd { //Calculate size if not specified - gr, err := b.GetProperties(ctx, o.getBlobPropertiesOptions()) - if err != nil { - return 0, err - } - count = *gr.ContentLength - o.Range.Offset - } - - if count <= 0 { - // The file is empty, there is nothing to download. - return 0, nil - } - - progress := int64(0) - progressLock := &sync.Mutex{} - - // helper routine to get body - getBodyForRange := func(ctx context.Context, chunkStart, size int64) (io.ReadCloser, error) { - downloadBlobOptions := o.getDownloadBlobOptions(HTTPRange{ - Offset: chunkStart + o.Range.Offset, - Count: size, - }, nil) - dr, err := b.DownloadStream(ctx, downloadBlobOptions) - if err != nil { - return nil, err - } - - var body io.ReadCloser = dr.NewRetryReader(ctx, &o.RetryReaderOptionsPerBlock) - if o.Progress != nil { - rangeProgress := int64(0) - body = streaming.NewResponseProgress( - body, - func(bytesTransferred int64) { - diff := bytesTransferred - rangeProgress - rangeProgress = bytesTransferred - progressLock.Lock() - progress += diff - o.Progress(progress) - progressLock.Unlock() - }) - } - - return body, nil - } - - // if file fits in a single buffer, we'll download here. - if count <= o.BlockSize { - body, err := getBodyForRange(ctx, int64(0), count) - if err != nil { - return 0, err - } - defer body.Close() - - return io.Copy(writer, body) - } - - buffers := shared.NewMMBPool(int(o.Concurrency), o.BlockSize) - defer buffers.Free() - aquireBuffer := func() ([]byte, error) { - select { - case b := <-buffers.Acquire(): - // got a buffer - return b, nil - default: - // no buffer available; allocate a new buffer if possible - if _, err := buffers.Grow(); err != nil { - return nil, err - } - - // either grab the newly allocated buffer or wait for one to become available - return <-buffers.Acquire(), nil - } - } - - numChunks := uint16((count-1)/o.BlockSize) + 1 - blocks := make([]chan []byte, numChunks) - for b := range blocks { - blocks[b] = make(chan []byte) - } - - /* - * We have created as many channels as the number of chunks we have. - * Each downloaded block will be sent to the channel matching its - * sequece number, i.e. 0th block is sent to 0th channel, 1st block - * to 1st channel and likewise. The blocks are then read and written - * to the file serially by below goroutine. Do note that the blocks - * blocks are still downloaded parallelly from n/w, only serailized - * and written to file here. - */ - writerError := make(chan error) - go func(ch chan error) { - for _, block := range blocks { - select { - case <-ctx.Done(): - return - case block := <-block: - _, err := writer.Write(block) - buffers.Release(block) - if err != nil { - ch <- err - return - } - } - } - ch <- nil - }(writerError) - - // Prepare and do parallel download. - err := shared.DoBatchTransfer(ctx, &shared.BatchTransferOptions{ - OperationName: "downloadBlobToWriterAt", - TransferSize: count, - ChunkSize: o.BlockSize, - NumChunks: numChunks, - Concurrency: o.Concurrency, - Operation: func(ctx context.Context, chunkStart int64, count int64) error { - buff, err := aquireBuffer() - if err != nil { - return err - } - - body, err := getBodyForRange(ctx, chunkStart, count) - if err != nil { - buffers.Release(buff) - return nil - } - - _, err = io.ReadFull(body, buff[:count]) - body.Close() - if err != nil { - return err - } - - blockIndex := (chunkStart / o.BlockSize) - blocks[blockIndex] <- buff - return nil - }, - }) - - if err != nil { - return 0, err - } - // error from writer thread. - if err = <-writerError; err != nil { - return 0, err - } - return count, nil -} - // DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata. // For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob. func (b *Client) DownloadStream(ctx context.Context, o *DownloadStreamOptions) (DownloadStreamResponse, error) { @@ -605,6 +448,7 @@ func (b *Client) DownloadFile(ctx context.Context, file *os.File, o *DownloadFil return 0, err } size = *props.ContentLength - do.Range.Offset + do.Range.Count = size } else { size = count } @@ -621,7 +465,7 @@ func (b *Client) DownloadFile(ctx context.Context, file *os.File, o *DownloadFil } if size > 0 { - return b.downloadFile(ctx, file, *do) + return b.downloadBuffer(ctx, file, *do) } else { // if the blob's size is 0, there is no need in downloading it return 0, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go index 5a79c12d435..d7334688946 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go @@ -51,7 +51,7 @@ type Tags = generated.BlobTag // HTTPRange defines a range of bytes within an HTTP resource, starting at offset and // ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange -// which has an offset but no zero value count indicates from the offset to the resource's end. +// which has an offset and zero value count indicates from the offset to the resource's end. type HTTPRange = exported.HTTPRange // Request Model Declaration ------------------------------------------------------------------------------------------- diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/retry_reader.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/retry_reader.go index 1deedb5902e..a625c995327 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/retry_reader.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/retry_reader.go @@ -101,7 +101,6 @@ func (s *RetryReader) setResponse(r io.ReadCloser) { // Read from retry reader func (s *RetryReader) Read(p []byte) (n int, err error) { for try := int32(0); ; try++ { - //fmt.Println(try) // Comment out for debugging. if s.countWasBounded && s.info.Range.Count == CountToEnd { // User specified an original count and the remaining bytes are 0, return 0, EOF return 0, io.EOF diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go index 8a1573c0ce2..07fad60611b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go @@ -69,6 +69,7 @@ const ( CopyIDMismatch Code = "CopyIdMismatch" EmptyMetadataKey Code = "EmptyMetadataKey" FeatureVersionMismatch Code = "FeatureVersionMismatch" + ImmutabilityPolicyDeleteOnLockedPolicy Code = "ImmutabilityPolicyDeleteOnLockedPolicy" IncrementalCopyBlobMismatch Code = "IncrementalCopyBlobMismatch" IncrementalCopyOfEralierVersionSnapshotNotAllowed Code = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" IncrementalCopySourceMustBeSnapshot Code = "IncrementalCopySourceMustBeSnapshot" @@ -122,6 +123,7 @@ const ( NoAuthenticationInformation Code = "NoAuthenticationInformation" NoPendingCopyOperation Code = "NoPendingCopyOperation" OperationNotAllowedOnIncrementalCopyBlob Code = "OperationNotAllowedOnIncrementalCopyBlob" + OperationNotAllowedOnRootBlob Code = "OperationNotAllowedOnRootBlob" OperationTimedOut Code = "OperationTimedOut" OutOfRangeInput Code = "OutOfRangeInput" OutOfRangeQueryParameterValue Code = "OutOfRangeQueryParameterValue" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/chunkwriting.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/chunkwriting.go index 212255d4c66..24df42c75ef 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/chunkwriting.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/chunkwriting.go @@ -75,7 +75,7 @@ func copyFromReader[T ~[]byte](ctx context.Context, src io.Reader, dst blockWrit } var n int - n, err = io.ReadFull(src, buffer) + n, err = shared.ReadAtLeast(src, buffer, len(buffer)) if n > 0 { // some data was read, upload it @@ -108,7 +108,7 @@ func copyFromReader[T ~[]byte](ctx context.Context, src io.Reader, dst blockWrit } if err != nil { // The reader is done, no more outgoing buffers - if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) { + if errors.Is(err, io.EOF) { // these are expected errors, we don't surface those err = nil } else { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go index 8b542f85a8b..7a3ab3fe8dc 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go @@ -11,8 +11,6 @@ import ( "context" "encoding/base64" "errors" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" "io" "math" "os" @@ -21,16 +19,19 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/internal/log" "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" ) // ClientOptions contains the optional parameters when creating a Client. @@ -44,11 +45,12 @@ type Client base.CompositeClient[generated.BlobClient, generated.BlockBlobClient // - cred - an Azure AD credential, typically obtained via the azidentity module // - options - client options; pass nil to accept the default values func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { - authPolicy := shared.NewStorageChallengePolicy(cred) + audience := base.GetAudience((*base.ClientOptions)(options)) conOptions := shared.GetClientOptions(options) + authPolicy := shared.NewStorageChallengePolicy(cred, audience, conOptions.InsecureAllowCredentialWithHTTP) plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - azClient, err := azcore.NewClient(shared.BlockBlobClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) if err != nil { return nil, err } @@ -62,7 +64,7 @@ func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptio func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) { conOptions := shared.GetClientOptions(options) - azClient, err := azcore.NewClient(shared.BlockBlobClient, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) if err != nil { return nil, err } @@ -79,7 +81,7 @@ func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCreden conOptions := shared.GetClientOptions(options) plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - azClient, err := azcore.NewClient(shared.BlockBlobClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) if err != nil { return nil, err } @@ -129,7 +131,7 @@ func (bb *Client) URL() string { return bb.generated().Endpoint() } -// BlobClient returns the embedded blob client for this AppendBlob client. +// BlobClient returns the embedded blob client for this BlockBlob client. func (bb *Client) BlobClient() *blob.Client { blobClient, _ := base.InnerClients((*base.CompositeClient[generated.BlobClient, generated.BlockBlobClient])(bb)) return (*blob.Client)(blobClient) @@ -362,8 +364,8 @@ func (bb *Client) GetAccountInfo(ctx context.Context, o *blob.GetAccountInfoOpti // SetHTTPHeaders changes a blob's HTTP headers. // For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. -func (bb *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) { - return bb.BlobClient().SetHTTPHeaders(ctx, HTTPHeaders, o) +func (bb *Client) SetHTTPHeaders(ctx context.Context, httpHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) { + return bb.BlobClient().SetHTTPHeaders(ctx, httpHeaders, o) } // SetMetadata changes a blob's metadata. @@ -410,6 +412,12 @@ func (bb *Client) CopyFromURL(ctx context.Context, copySource string, o *blob.Co return bb.BlobClient().CopyFromURL(ctx, copySource, o) } +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at block blob. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +func (bb *Client) GetSASURL(permissions sas.BlobPermissions, expiry time.Time, o *blob.GetSASURLOptions) (string, error) { + return bb.BlobClient().GetSASURL(permissions, expiry, o) +} + // Concurrent Upload Functions ----------------------------------------------------------------------------------------- // uploadFromReader uploads a buffer in blocks to a block blob. @@ -466,7 +474,7 @@ func (bb *Client) uploadFromReader(ctx context.Context, reader io.ReaderAt, actu OperationName: "uploadFromReader", TransferSize: actualSize, ChunkSize: o.BlockSize, - NumChunks: uint16(((actualSize - 1) / o.BlockSize) + 1), + NumChunks: uint64(((actualSize - 1) / o.BlockSize) + 1), Concurrency: o.Concurrency, Operation: func(ctx context.Context, offset int64, chunkSize int64) error { // This function is called once per block. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml index f5100e13113..2259336b2bd 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml @@ -21,11 +21,12 @@ pr: - sdk/storage/azblob -stages: - - template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml +extends: + template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml parameters: ServiceDirectory: 'storage/azblob' RunLiveTests: true + UsePipelineProxy: false EnvVars: AZURE_CLIENT_ID: $(AZBLOB_CLIENT_ID) AZURE_TENANT_ID: $(AZBLOB_TENANT_ID) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/client.go index 5c4b719c4ba..c511d8a79f2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/client.go @@ -31,11 +31,7 @@ type Client struct { // - cred - an Azure AD credential, typically obtained via the azidentity module // - options - client options; pass nil to accept the default values func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { - var clientOptions *service.ClientOptions - if options != nil { - clientOptions = &service.ClientOptions{ClientOptions: options.ClientOptions} - } - svcClient, err := service.NewClient(serviceURL, cred, clientOptions) + svcClient, err := service.NewClient(serviceURL, cred, (*service.ClientOptions)(options)) if err != nil { return nil, err } @@ -50,11 +46,7 @@ func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOp // - serviceURL - the URL of the storage account e.g. https://.blob.core.windows.net/? // - options - client options; pass nil to accept the default values func NewClientWithNoCredential(serviceURL string, options *ClientOptions) (*Client, error) { - var clientOptions *service.ClientOptions - if options != nil { - clientOptions = &service.ClientOptions{ClientOptions: options.ClientOptions} - } - svcClient, err := service.NewClientWithNoCredential(serviceURL, clientOptions) + svcClient, err := service.NewClientWithNoCredential(serviceURL, (*service.ClientOptions)(options)) if err != nil { return nil, err } @@ -83,15 +75,12 @@ func NewClientWithSharedKeyCredential(serviceURL string, cred *SharedKeyCredenti // - connectionString - a connection string for the desired storage account // - options - client options; pass nil to accept the default values func NewClientFromConnectionString(connectionString string, options *ClientOptions) (*Client, error) { - if options == nil { - options = &ClientOptions{} - } - containerClient, err := service.NewClientFromConnectionString(connectionString, (*service.ClientOptions)(options)) + svcClient, err := service.NewClientFromConnectionString(connectionString, (*service.ClientOptions)(options)) if err != nil { return nil, err } return &Client{ - svc: containerClient, + svc: svcClient, }, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/common.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/common.go index 560e151d553..48771e8c9c2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/common.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/common.go @@ -32,5 +32,5 @@ func ParseURL(u string) (URLParts, error) { // HTTPRange defines a range of bytes within an HTTP resource, starting at offset and // ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange -// which has an offset but no zero value count indicates from the offset to the resource's end. +// which has an offset and zero value count indicates from the offset to the resource's end. type HTTPRange = exported.HTTPRange diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go index 3058b5d49c0..0e43ed015e8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go @@ -11,8 +11,6 @@ import ( "context" "errors" "fmt" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" "net/http" "net/url" "time" @@ -20,8 +18,10 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" @@ -42,15 +42,16 @@ type Client base.Client[generated.ContainerClient] // - cred - an Azure AD credential, typically obtained via the azidentity module // - options - client options; pass nil to accept the default values func NewClient(containerURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { - authPolicy := shared.NewStorageChallengePolicy(cred) + audience := base.GetAudience((*base.ClientOptions)(options)) conOptions := shared.GetClientOptions(options) + authPolicy := shared.NewStorageChallengePolicy(cred, audience, conOptions.InsecureAllowCredentialWithHTTP) plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - azClient, err := azcore.NewClient(shared.ContainerClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) if err != nil { return nil, err } - return (*Client)(base.NewContainerClient(containerURL, azClient, &cred)), nil + return (*Client)(base.NewContainerClient(containerURL, azClient, &cred, (*base.ClientOptions)(conOptions))), nil } // NewClientWithNoCredential creates an instance of Client with the specified values. @@ -60,11 +61,11 @@ func NewClient(containerURL string, cred azcore.TokenCredential, options *Client func NewClientWithNoCredential(containerURL string, options *ClientOptions) (*Client, error) { conOptions := shared.GetClientOptions(options) - azClient, err := azcore.NewClient(shared.ContainerClient, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) if err != nil { return nil, err } - return (*Client)(base.NewContainerClient(containerURL, azClient, nil)), nil + return (*Client)(base.NewContainerClient(containerURL, azClient, nil, (*base.ClientOptions)(conOptions))), nil } // NewClientWithSharedKeyCredential creates an instance of Client with the specified values. @@ -76,11 +77,11 @@ func NewClientWithSharedKeyCredential(containerURL string, cred *SharedKeyCreden conOptions := shared.GetClientOptions(options) plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - azClient, err := azcore.NewClient(shared.ContainerClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) if err != nil { return nil, err } - return (*Client)(base.NewContainerClient(containerURL, azClient, cred)), nil + return (*Client)(base.NewContainerClient(containerURL, azClient, cred, (*base.ClientOptions)(conOptions))), nil } // NewClientFromConnectionString creates an instance of Client with the specified values. @@ -122,6 +123,10 @@ func getGeneratedBlobClient(b *blob.Client) *generated.BlobClient { return base.InnerClient((*base.Client[generated.BlobClient])(b)) } +func (c *Client) getClientOptions() *base.ClientOptions { + return base.GetClientOptions((*base.Client[generated.ContainerClient])(c)) +} + // URL returns the URL endpoint used by the Client object. func (c *Client) URL() string { return c.generated().Endpoint() @@ -133,7 +138,7 @@ func (c *Client) URL() string { func (c *Client) NewBlobClient(blobName string) *blob.Client { blobName = url.PathEscape(blobName) blobURL := runtime.JoinPaths(c.URL(), blobName) - return (*blob.Client)(base.NewBlobClient(blobURL, c.generated().InternalClient().WithClientName(shared.BlobClient), c.credential())) + return (*blob.Client)(base.NewBlobClient(blobURL, c.generated().InternalClient().WithClientName(exported.ModuleName), c.credential(), c.getClientOptions())) } // NewAppendBlobClient creates a new appendblob.Client object by concatenating blobName to the end of @@ -142,7 +147,7 @@ func (c *Client) NewBlobClient(blobName string) *blob.Client { func (c *Client) NewAppendBlobClient(blobName string) *appendblob.Client { blobName = url.PathEscape(blobName) blobURL := runtime.JoinPaths(c.URL(), blobName) - return (*appendblob.Client)(base.NewAppendBlobClient(blobURL, c.generated().InternalClient().WithClientName(shared.AppendBlobClient), c.sharedKey())) + return (*appendblob.Client)(base.NewAppendBlobClient(blobURL, c.generated().InternalClient().WithClientName(exported.ModuleName), c.sharedKey())) } // NewBlockBlobClient creates a new blockblob.Client object by concatenating blobName to the end of @@ -151,7 +156,7 @@ func (c *Client) NewAppendBlobClient(blobName string) *appendblob.Client { func (c *Client) NewBlockBlobClient(blobName string) *blockblob.Client { blobName = url.PathEscape(blobName) blobURL := runtime.JoinPaths(c.URL(), blobName) - return (*blockblob.Client)(base.NewBlockBlobClient(blobURL, c.generated().InternalClient().WithClientName(shared.BlockBlobClient), c.sharedKey())) + return (*blockblob.Client)(base.NewBlockBlobClient(blobURL, c.generated().InternalClient().WithClientName(exported.ModuleName), c.sharedKey())) } // NewPageBlobClient creates a new pageblob.Client object by concatenating blobName to the end of @@ -160,7 +165,7 @@ func (c *Client) NewBlockBlobClient(blobName string) *blockblob.Client { func (c *Client) NewPageBlobClient(blobName string) *pageblob.Client { blobName = url.PathEscape(blobName) blobURL := runtime.JoinPaths(c.URL(), blobName) - return (*pageblob.Client)(base.NewPageBlobClient(blobURL, c.generated().InternalClient().WithClientName(shared.PageBlobClient), c.sharedKey())) + return (*pageblob.Client)(base.NewPageBlobClient(blobURL, c.generated().InternalClient().WithClientName(exported.ModuleName), c.sharedKey())) } // Create creates a new container within a storage account. If a container with the same name already exists, the operation fails. @@ -343,7 +348,6 @@ func (c *Client) GetSASURL(permissions sas.ContainerPermissions, expiry time.Tim // Containers do not have snapshots, nor versions. qps, err := sas.BlobSignatureValues{ Version: sas.Version, - Protocol: sas.ProtocolHTTPS, ContainerName: urlParts.ContainerName, Permissions: permissions.String(), StartTime: st, @@ -366,7 +370,8 @@ func (c *Client) NewBatchBuilder() (*BatchBuilder, error) { switch cred := c.credential().(type) { case *azcore.TokenCredential: - authPolicy = shared.NewStorageChallengePolicy(*cred) + conOptions := c.getClientOptions() + authPolicy = shared.NewStorageChallengePolicy(*cred, base.GetAudience(conOptions), conOptions.InsecureAllowCredentialWithHTTP) case *SharedKeyCredential: authPolicy = exported.NewSharedKeyCredPolicy(cred) case nil: diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go index 61d936ab73d..ccee90dbc5c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go @@ -7,10 +7,11 @@ package container import ( - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" "reflect" "time" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" ) @@ -126,7 +127,7 @@ func (o *GetPropertiesOptions) format() (*generated.ContainerClientGetProperties // ListBlobsInclude indicates what additional information the service should return with each blob. type ListBlobsInclude struct { - Copy, Metadata, Snapshots, UncommittedBlobs, Deleted, Tags, Versions, LegalHold, ImmutabilityPolicy, DeletedWithVersions bool + Copy, Metadata, Snapshots, UncommittedBlobs, Deleted, Tags, Versions, LegalHold, ImmutabilityPolicy, DeletedWithVersions, Permissions bool } func (l ListBlobsInclude) format() []generated.ListBlobsIncludeItem { @@ -166,7 +167,9 @@ func (l ListBlobsInclude) format() []generated.ListBlobsIncludeItem { if l.Versions { include = append(include, generated.ListBlobsIncludeItemVersions) } - + if l.Permissions { + include = append(include, generated.ListBlobsIncludeItemPermissions) + } return include } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go index 0bdbaefaf22..073de855b61 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go @@ -10,16 +10,24 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" + "strings" ) // ClientOptions contains the optional parameters when creating a Client. type ClientOptions struct { azcore.ClientOptions + + // Audience to use when requesting tokens for Azure Active Directory authentication. + // Only has an effect when credential is of type TokenCredential. The value could be + // https://storage.azure.com/ (default) or https://.blob.core.windows.net. + Audience string } type Client[T any] struct { inner *T credential any + options *ClientOptions } func InnerClient[T any](client *Client[T]) *T { @@ -39,28 +47,43 @@ func Credential[T any](client *Client[T]) any { return client.credential } +func GetClientOptions[T any](client *Client[T]) *ClientOptions { + return client.options +} + +func GetAudience(clOpts *ClientOptions) string { + if clOpts == nil || len(strings.TrimSpace(clOpts.Audience)) == 0 { + return shared.TokenScope + } else { + return strings.TrimRight(clOpts.Audience, "/") + "/.default" + } +} + func NewClient[T any](inner *T) *Client[T] { return &Client[T]{inner: inner} } -func NewServiceClient(containerURL string, azClient *azcore.Client, credential any) *Client[generated.ServiceClient] { +func NewServiceClient(containerURL string, azClient *azcore.Client, credential any, options *ClientOptions) *Client[generated.ServiceClient] { return &Client[generated.ServiceClient]{ inner: generated.NewServiceClient(containerURL, azClient), credential: credential, + options: options, } } -func NewContainerClient(containerURL string, azClient *azcore.Client, credential any) *Client[generated.ContainerClient] { +func NewContainerClient(containerURL string, azClient *azcore.Client, credential any, options *ClientOptions) *Client[generated.ContainerClient] { return &Client[generated.ContainerClient]{ inner: generated.NewContainerClient(containerURL, azClient), credential: credential, + options: options, } } -func NewBlobClient(blobURL string, azClient *azcore.Client, credential any) *Client[generated.BlobClient] { +func NewBlobClient(blobURL string, azClient *azcore.Client, credential any, options *ClientOptions) *Client[generated.BlobClient] { return &Client[generated.BlobClient]{ inner: generated.NewBlobClient(blobURL, azClient), credential: credential, + options: options, } } @@ -71,7 +94,10 @@ type CompositeClient[T, U any] struct { } func InnerClients[T, U any](client *CompositeClient[T, U]) (*Client[T], *U) { - return &Client[T]{inner: client.innerT}, client.innerU + return &Client[T]{ + inner: client.innerT, + credential: client.sharedKey, + }, client.innerU } func NewAppendBlobClient(blobURL string, azClient *azcore.Client, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.AppendBlobClient] { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/blob_batch.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/blob_batch.go index 64a88688a49..c26c62aa883 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/blob_batch.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/blob_batch.go @@ -11,12 +11,6 @@ import ( "bytes" "errors" "fmt" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/internal/log" - "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" "io" "mime" "mime/multipart" @@ -24,6 +18,13 @@ import ( "net/textproto" "strconv" "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" ) const ( @@ -45,11 +46,11 @@ func createBatchID() (string, error) { // buildSubRequest is used for building the sub-request. Example: // DELETE /container0/blob0 HTTP/1.1 // x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT -// Authorization: SharedKey account:G4jjBXA7LI/RnWKIOQ8i9xH4p76pAQ+4Fs4R1VxasaE= +// Authorization: SharedKey account: // Content-Length: 0 func buildSubRequest(req *policy.Request) []byte { var batchSubRequest strings.Builder - blobPath := req.Raw().URL.Path + blobPath := req.Raw().URL.EscapedPath() if len(req.Raw().URL.RawQuery) > 0 { blobPath += "?" + req.Raw().URL.RawQuery } @@ -80,7 +81,7 @@ func buildSubRequest(req *policy.Request) []byte { // // DELETE /container0/blob0 HTTP/1.1 // x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT -// Authorization: SharedKey account:G4jjBXA7LI/RnWKIOQ8i9xH4p76pAQ+4Fs4R1VxasaE= +// Authorization: SharedKey account: // Content-Length: 0 func CreateBatchRequest(bb *BlobBatchBuilder) ([]byte, string, error) { batchID, err := createBatchID() diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/exported.go index 9bc1ca47df8..d0355727c90 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/exported.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/exported.go @@ -13,7 +13,7 @@ import ( // HTTPRange defines a range of bytes within an HTTP resource, starting at offset and // ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange -// which has an offset but no zero value count indicates from the offset to the resource's end. +// which has an offset and zero value count indicates from the offset to the resource's end. type HTTPRange struct { Offset int64 Count int64 diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/shared_key_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/shared_key_credential.go index bd0bd5e260d..b0be323b7b8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/shared_key_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/shared_key_credential.go @@ -109,6 +109,91 @@ func getHeader(key string, headers map[string][]string) string { return "" } +func getWeightTables() [][]int { + tableLv0 := [...]int{ + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x71c, 0x0, 0x71f, 0x721, 0x723, 0x725, + 0x0, 0x0, 0x0, 0x72d, 0x803, 0x0, 0x0, 0x733, 0x0, 0xd03, 0xd1a, 0xd1c, 0xd1e, + 0xd20, 0xd22, 0xd24, 0xd26, 0xd28, 0xd2a, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0xe02, 0xe09, 0xe0a, 0xe1a, 0xe21, 0xe23, 0xe25, 0xe2c, 0xe32, 0xe35, 0xe36, 0xe48, 0xe51, + 0xe70, 0xe7c, 0xe7e, 0xe89, 0xe8a, 0xe91, 0xe99, 0xe9f, 0xea2, 0xea4, 0xea6, 0xea7, 0xea9, + 0x0, 0x0, 0x0, 0x743, 0x744, 0x748, 0xe02, 0xe09, 0xe0a, 0xe1a, 0xe21, 0xe23, 0xe25, + 0xe2c, 0xe32, 0xe35, 0xe36, 0xe48, 0xe51, 0xe70, 0xe7c, 0xe7e, 0xe89, 0xe8a, 0xe91, 0xe99, + 0xe9f, 0xea2, 0xea4, 0xea6, 0xea7, 0xea9, 0x0, 0x74c, 0x0, 0x750, 0x0, + } + tableLv2 := [...]int{ + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8012, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8212, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + } + tables := [][]int{tableLv0[:], tableLv2[:]} + return tables +} + +// NewHeaderStringComparer performs a multi-level, weight-based comparison of two strings +func compareHeaders(lhs, rhs string, tables [][]int) int { + currLevel, i, j := 0, 0, 0 + n := len(tables) + lhsLen := len(lhs) + rhsLen := len(rhs) + + for currLevel < n { + if currLevel == (n-1) && i != j { + if i > j { + return -1 + } + if i < j { + return 1 + } + return 0 + } + + var w1, w2 int + + // Check bounds before accessing lhs[i] + if i < lhsLen { + w1 = tables[currLevel][lhs[i]] + } else { + w1 = 0x1 + } + + // Check bounds before accessing rhs[j] + if j < rhsLen { + w2 = tables[currLevel][rhs[j]] + } else { + w2 = 0x1 + } + + if w1 == 0x1 && w2 == 0x1 { + i = 0 + j = 0 + currLevel++ + } else if w1 == w2 { + i++ + j++ + } else if w1 == 0 { + i++ + } else if w2 == 0 { + j++ + } else { + if w1 < w2 { + return -1 + } + if w1 > w2 { + return 1 + } + return 0 + } + } + return 0 +} + func (c *SharedKeyCredential) buildCanonicalizedHeader(headers http.Header) string { cm := map[string][]string{} for k, v := range headers { @@ -125,7 +210,11 @@ func (c *SharedKeyCredential) buildCanonicalizedHeader(headers http.Header) stri for key := range cm { keys = append(keys, key) } - sort.Strings(keys) + tables := getWeightTables() + // Sort the keys using the custom comparator + sort.Slice(keys, func(i, j int) bool { + return compareHeaders(keys[i], keys[j], tables) < 0 + }) ch := bytes.NewBufferString("") for i, key := range keys { if i > 0 { @@ -195,6 +284,13 @@ func NewSharedKeyCredPolicy(cred *SharedKeyCredential) *SharedKeyCredPolicy { } func (s *SharedKeyCredPolicy) Do(req *policy.Request) (*http.Response, error) { + // skip adding the authorization header if no SharedKeyCredential was provided. + // this prevents a panic that might be hard to diagnose and allows testing + // against http endpoints that don't require authentication. + if s.cred == nil { + return req.Next() + } + if d := getHeader(shared.HeaderXmsDate, req.Raw().Header); d == "" { req.Raw().Header.Set(shared.HeaderXmsDate, time.Now().UTC().Format(http.TimeFormat)) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go index 935debca3d5..3e95fff69a9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go @@ -7,6 +7,6 @@ package exported const ( - ModuleName = "azblob" - ModuleVersion = "v1.2.0" + ModuleName = "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" + ModuleVersion = "v1.5.0" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md index 367f020f4d5..afc96b4d74f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md @@ -7,7 +7,7 @@ go: true clear-output-folder: false version: "^3.0.0" license-header: MICROSOFT_MIT_NO_VERSION -input-file: "https://raw.githubusercontent.com/Azure/azure-rest-api-specs/a32d0b2423d19835246bb2ef92941503bfd5e734/specification/storage/data-plane/Microsoft.BlobStorage/preview/2021-12-02/blob.json" +input-file: "https://raw.githubusercontent.com/Azure/azure-rest-api-specs/f6f50c6388fd5836fa142384641b8353a99874ef/specification/storage/data-plane/Microsoft.BlobStorage/stable/2024-08-04/blob.json" credential-scope: "https://storage.azure.com/.default" output-folder: ../generated file-prefix: "zz_" @@ -19,10 +19,43 @@ modelerfour: seal-single-value-enum-by-default: true lenient-model-deduplication: true export-clients: true -use: "@autorest/go@4.0.0-preview.49" +use: "@autorest/go@4.0.0-preview.65" ``` -### Updating service version to 2023-08-03 +### Add Owner,Group,Permissions,Acl,ResourceType in ListBlob Response +``` yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + $.BlobPropertiesInternal.properties["Owner"] = { + "type" : "string", + }; + $.BlobPropertiesInternal.properties["Group"] = { + "type" : "string", + }; + $.BlobPropertiesInternal.properties["Permissions"] = { + "type" : "string", + }; + $.BlobPropertiesInternal.properties["Acl"] = { + "type" : "string", + }; + $.BlobPropertiesInternal.properties["ResourceType"] = { + "type" : "string", + }; + +``` + +### Add permissions in ListBlobsInclude +``` yaml +directive: +- from: swagger-document + where: $.parameters.ListBlobsInclude + transform: > + $.items.enum.push("permissions"); +``` + +### Updating service version to 2024-11-04 ```yaml directive: - from: @@ -35,8 +68,21 @@ directive: where: $ transform: >- return $. - replaceAll(`[]string{"2021-12-02"}`, `[]string{ServiceVersion}`). - replaceAll(`2021-12-02`, `2023-08-03`); + replaceAll(`[]string{"2024-08-04"}`, `[]string{ServiceVersion}`); +``` + +### Fix CRC Response Header in PutBlob response +``` yaml +directive: +- from: swagger-document + where: $["x-ms-paths"]["/{containerName}/{blob}?BlockBlob"].put.responses["201"].headers + transform: > + $["x-ms-content-crc64"] = { + "x-ms-client-name": "ContentCRC64", + "type": "string", + "format": "byte", + "description": "Returned for a block blob so that the client can check the integrity of message content." + }; ``` ### Undo breaking change with BlobName @@ -280,7 +326,9 @@ directive: ``` yaml directive: -- from: zz_models.go +- from: + - zz_models.go + - zz_options.go where: $ transform: >- return $. @@ -291,7 +339,7 @@ directive: replace(/SourceIfMatch\s+\*string/g, `SourceIfMatch *azcore.ETag`). replace(/SourceIfNoneMatch\s+\*string/g, `SourceIfNoneMatch *azcore.ETag`); -- from: zz_response_types.go +- from: zz_responses.go where: $ transform: >- return $. @@ -443,8 +491,8 @@ directive: where: $ transform: >- return $. - replace(/if\s+!runtime\.HasStatusCode\(resp,\s+http\.StatusOK\)\s+\{\s*\n\t\treturn\s+ServiceClientSubmitBatchResponse\{\}\,\s+runtime\.NewResponseError\(resp\)\s*\n\t\}/g, - `if !runtime.HasStatusCode(resp, http.StatusAccepted) {\n\t\treturn ServiceClientSubmitBatchResponse{}, runtime.NewResponseError(resp)\n\t}`); + replace(/if\s+!runtime\.HasStatusCode\(httpResp,\s+http\.StatusOK\)\s+\{\s+err\s+=\s+runtime\.NewResponseError\(httpResp\)\s+return ServiceClientSubmitBatchResponse\{\}\,\s+err\s+}/g, + `if !runtime.HasStatusCode(httpResp, http.StatusAccepted) {\n\t\terr = runtime.NewResponseError(httpResp)\n\t\treturn ServiceClientSubmitBatchResponse{}, err\n\t}`); ``` ### Convert time to GMT for If-Modified-Since and If-Unmodified-Since request headers diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/constants.go index 8c13c44116b..114ab2d5767 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/constants.go @@ -6,4 +6,4 @@ package generated -const ServiceVersion = "2023-08-03" +const ServiceVersion = "2024-11-04" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go index 32be22221d7..80c5d99d392 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go @@ -1,11 +1,7 @@ -//go:build go1.18 -// +build go1.18 - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -33,7 +29,7 @@ type AppendBlobClient struct { // AppendBlob. Append Block is supported only on version 2015-02-21 version or later. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - contentLength - The length of the request. // - body - Initial data // - options - AppendBlobClientAppendBlockOptions contains the optional parameters for the AppendBlobClient.AppendBlock method. @@ -44,18 +40,21 @@ type AppendBlobClient struct { // - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *AppendBlobClient) AppendBlock(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *AppendBlobClientAppendBlockOptions, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (AppendBlobClientAppendBlockResponse, error) { + var err error req, err := client.appendBlockCreateRequest(ctx, contentLength, body, options, leaseAccessConditions, appendPositionAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) if err != nil { return AppendBlobClientAppendBlockResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return AppendBlobClientAppendBlockResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return AppendBlobClientAppendBlockResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return AppendBlobClientAppendBlockResponse{}, err } - return client.appendBlockHandleResponse(resp) + resp, err := client.appendBlockHandleResponse(httpResp) + return resp, err } // appendBlockCreateRequest creates the AppendBlock request. @@ -70,54 +69,54 @@ func (client *AppendBlobClient) appendBlockCreateRequest(ctx context.Context, co reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} if options != nil && options.TransactionalContentMD5 != nil { req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} } - if options != nil && options.TransactionalContentCRC64 != nil { - req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } - if appendPositionAccessConditions != nil && appendPositionAccessConditions.MaxSize != nil { - req.Raw().Header["x-ms-blob-condition-maxsize"] = []string{strconv.FormatInt(*appendPositionAccessConditions.MaxSize, 10)} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil { req.Raw().Header["x-ms-blob-condition-appendpos"] = []string{strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)} } + if appendPositionAccessConditions != nil && appendPositionAccessConditions.MaxSize != nil { + req.Raw().Header["x-ms-blob-condition-maxsize"] = []string{strconv.FormatInt(*appendPositionAccessConditions.MaxSize, 10)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.TransactionalContentCRC64 != nil { + req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} } if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if err := req.SetBody(body, "application/octet-stream"); err != nil { return nil, err } @@ -127,22 +126,19 @@ func (client *AppendBlobClient) appendBlockCreateRequest(ctx context.Context, co // appendBlockHandleResponse handles the AppendBlock response. func (client *AppendBlobClient) appendBlockHandleResponse(resp *http.Response) (AppendBlobClientAppendBlockResponse, error) { result := AppendBlobClientAppendBlockResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) + if val := resp.Header.Get("x-ms-blob-append-offset"); val != "" { + result.BlobAppendOffset = &val } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) if err != nil { return AppendBlobClientAppendBlockResponse{}, err } - result.LastModified = &lastModified + result.BlobCommittedBlockCount = &blobCommittedBlockCount } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return AppendBlobClientAppendBlockResponse{}, err - } - result.ContentMD5 = contentMD5 + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } if val := resp.Header.Get("x-ms-content-crc64"); val != "" { contentCRC64, err := base64.StdEncoding.DecodeString(val) @@ -151,14 +147,12 @@ func (client *AppendBlobClient) appendBlockHandleResponse(resp *http.Response) ( } result.ContentCRC64 = contentCRC64 } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + result.ContentMD5 = contentMD5 } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) @@ -167,16 +161,14 @@ func (client *AppendBlobClient) appendBlockHandleResponse(resp *http.Response) ( } result.Date = &date } - if val := resp.Header.Get("x-ms-blob-append-offset"); val != "" { - result.BlobAppendOffset = &val + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) } - if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { - blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) - blobCommittedBlockCount := int32(blobCommittedBlockCount32) - if err != nil { - return AppendBlobClientAppendBlockResponse{}, err - } - result.BlobCommittedBlockCount = &blobCommittedBlockCount + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) @@ -185,11 +177,18 @@ func (client *AppendBlobClient) appendBlockHandleResponse(resp *http.Response) ( } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val } return result, nil } @@ -199,7 +198,7 @@ func (client *AppendBlobClient) appendBlockHandleResponse(resp *http.Response) ( // created with x-ms-blob-type set to AppendBlob. Append Block is supported only on version 2015-02-21 version or later. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - sourceURL - Specify a URL to the copy source. // - contentLength - The length of the request. // - options - AppendBlobClientAppendBlockFromURLOptions contains the optional parameters for the AppendBlobClient.AppendBlockFromURL @@ -213,18 +212,21 @@ func (client *AppendBlobClient) appendBlockHandleResponse(resp *http.Response) ( // - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL // method. func (client *AppendBlobClient) AppendBlockFromURL(ctx context.Context, sourceURL string, contentLength int64, options *AppendBlobClientAppendBlockFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (AppendBlobClientAppendBlockFromURLResponse, error) { + var err error req, err := client.appendBlockFromURLCreateRequest(ctx, sourceURL, contentLength, options, cpkInfo, cpkScopeInfo, leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) if err != nil { return AppendBlobClientAppendBlockFromURLResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return AppendBlobClientAppendBlockFromURLResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return AppendBlobClientAppendBlockFromURLResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return AppendBlobClientAppendBlockFromURLResponse{}, err } - return client.appendBlockFromURLHandleResponse(resp) + resp, err := client.appendBlockFromURLHandleResponse(httpResp) + return resp, err } // appendBlockFromURLCreateRequest creates the AppendBlockFromURL request. @@ -239,98 +241,92 @@ func (client *AppendBlobClient) appendBlockFromURLCreateRequest(ctx context.Cont reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-copy-source"] = []string{sourceURL} - if options != nil && options.SourceRange != nil { - req.Raw().Header["x-ms-source-range"] = []string{*options.SourceRange} - } - if options != nil && options.SourceContentMD5 != nil { - req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} - } - if options != nil && options.SourceContentcrc64 != nil { - req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentcrc64)} - } + req.Raw().Header["Accept"] = []string{"application/xml"} req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} if options != nil && options.TransactionalContentMD5 != nil { req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} } - if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil { + req.Raw().Header["x-ms-blob-condition-appendpos"] = []string{strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)} } if appendPositionAccessConditions != nil && appendPositionAccessConditions.MaxSize != nil { req.Raw().Header["x-ms-blob-condition-maxsize"] = []string{strconv.FormatInt(*appendPositionAccessConditions.MaxSize, 10)} } - if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil { - req.Raw().Header["x-ms-blob-condition-appendpos"] = []string{strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + req.Raw().Header["x-ms-copy-source"] = []string{sourceURL} + if options != nil && options.CopySourceAuthorization != nil { + req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + if options != nil && options.SourceContentcrc64 != nil { + req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentcrc64)} + } + if options != nil && options.SourceContentMD5 != nil { + req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} + } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - if options != nil && options.CopySourceAuthorization != nil { - req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} + if options != nil && options.SourceRange != nil { + req.Raw().Header["x-ms-source-range"] = []string{*options.SourceRange} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } // appendBlockFromURLHandleResponse handles the AppendBlockFromURL response. func (client *AppendBlobClient) appendBlockFromURLHandleResponse(resp *http.Response) (AppendBlobClientAppendBlockFromURLResponse, error) { result := AppendBlobClientAppendBlockFromURLResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return AppendBlobClientAppendBlockFromURLResponse{}, err - } - result.LastModified = &lastModified + if val := resp.Header.Get("x-ms-blob-append-offset"); val != "" { + result.BlobAppendOffset = &val } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) if err != nil { return AppendBlobClientAppendBlockFromURLResponse{}, err } - result.ContentMD5 = contentMD5 + result.BlobCommittedBlockCount = &blobCommittedBlockCount } if val := resp.Header.Get("x-ms-content-crc64"); val != "" { contentCRC64, err := base64.StdEncoding.DecodeString(val) @@ -339,11 +335,12 @@ func (client *AppendBlobClient) appendBlockFromURLHandleResponse(resp *http.Resp } result.ContentCRC64 = contentCRC64 } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + result.ContentMD5 = contentMD5 } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) @@ -352,16 +349,8 @@ func (client *AppendBlobClient) appendBlockFromURLHandleResponse(resp *http.Resp } result.Date = &date } - if val := resp.Header.Get("x-ms-blob-append-offset"); val != "" { - result.BlobAppendOffset = &val - } - if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { - blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) - blobCommittedBlockCount := int32(blobCommittedBlockCount32) - if err != nil { - return AppendBlobClientAppendBlockFromURLResponse{}, err - } - result.BlobCommittedBlockCount = &blobCommittedBlockCount + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { result.EncryptionKeySHA256 = &val @@ -376,13 +365,26 @@ func (client *AppendBlobClient) appendBlockFromURLHandleResponse(resp *http.Resp } result.IsServerEncrypted = &isServerEncrypted } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } // Create - The Create Append Blob operation creates a new append blob. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - contentLength - The length of the request. // - options - AppendBlobClientCreateOptions contains the optional parameters for the AppendBlobClient.Create method. // - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. @@ -391,18 +393,21 @@ func (client *AppendBlobClient) appendBlockFromURLHandleResponse(resp *http.Resp // - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *AppendBlobClient) Create(ctx context.Context, contentLength int64, options *AppendBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (AppendBlobClientCreateResponse, error) { + var err error req, err := client.createCreateRequest(ctx, contentLength, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) if err != nil { return AppendBlobClientCreateResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return AppendBlobClientCreateResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return AppendBlobClientCreateResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return AppendBlobClientCreateResponse{}, err } - return client.createHandleResponse(resp) + resp, err := client.createHandleResponse(httpResp) + return resp, err } // createCreateRequest creates the Create request. @@ -416,10 +421,25 @@ func (client *AppendBlobClient) createCreateRequest(ctx context.Context, content reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-blob-type"] = []string{"AppendBlob"} + req.Raw().Header["Accept"] = []string{"application/xml"} req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { - req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} } if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} @@ -430,21 +450,15 @@ func (client *AppendBlobClient) createCreateRequest(ctx context.Context, content if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { - req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} - } - if options != nil && options.Metadata != nil { - for k, v := range options.Metadata { - if v != nil { - req.Raw().Header["x-ms-meta-"+k] = []string{*v} - } - } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + req.Raw().Header["x-ms-blob-type"] = []string{"AppendBlob"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { - req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} @@ -452,59 +466,43 @@ func (client *AppendBlobClient) createCreateRequest(ctx context.Context, content if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - if options != nil && options.BlobTagsString != nil { - req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} } if options != nil && options.ImmutabilityPolicyExpiry != nil { req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} } - if options != nil && options.ImmutabilityPolicyMode != nil { - req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } if options != nil && options.LegalHold != nil { req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} } - req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } // createHandleResponse handles the Create response. func (client *AppendBlobClient) createHandleResponse(resp *http.Response) (AppendBlobClientCreateResponse, error) { result := AppendBlobClientCreateResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return AppendBlobClientCreateResponse{}, err - } - result.LastModified = &lastModified + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } if val := resp.Header.Get("Content-MD5"); val != "" { contentMD5, err := base64.StdEncoding.DecodeString(val) @@ -513,18 +511,6 @@ func (client *AppendBlobClient) createHandleResponse(resp *http.Response) (Appen } result.ContentMD5 = contentMD5 } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -532,6 +518,15 @@ func (client *AppendBlobClient) createHandleResponse(resp *http.Response) (Appen } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { @@ -539,11 +534,21 @@ func (client *AppendBlobClient) createHandleResponse(resp *http.Response) (Appen } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientCreateResponse{}, err + } + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val } return result, nil } @@ -552,25 +557,28 @@ func (client *AppendBlobClient) createHandleResponse(resp *http.Response) (Appen // or later. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - options - AppendBlobClientSealOptions contains the optional parameters for the AppendBlobClient.Seal method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. // - AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock // method. func (client *AppendBlobClient) Seal(ctx context.Context, options *AppendBlobClientSealOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions) (AppendBlobClientSealResponse, error) { + var err error req, err := client.sealCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions, appendPositionAccessConditions) if err != nil { return AppendBlobClientSealResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return AppendBlobClientSealResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return AppendBlobClientSealResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return AppendBlobClientSealResponse{}, err } - return client.sealHandleResponse(resp) + resp, err := client.sealHandleResponse(httpResp) + return resp, err } // sealCreateRequest creates the Seal request. @@ -585,54 +593,38 @@ func (client *AppendBlobClient) sealCreateRequest(ctx context.Context, options * reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil { req.Raw().Header["x-ms-blob-condition-appendpos"] = []string{strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)} } - req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } // sealHandleResponse handles the Seal response. func (client *AppendBlobClient) sealHandleResponse(resp *http.Response) (AppendBlobClientSealResponse, error) { result := AppendBlobClientSealResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return AppendBlobClientSealResponse{}, err - } - result.LastModified = &lastModified - } if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -640,6 +632,9 @@ func (client *AppendBlobClient) sealHandleResponse(resp *http.Response) (AppendB } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } if val := resp.Header.Get("x-ms-blob-sealed"); val != "" { isSealed, err := strconv.ParseBool(val) if err != nil { @@ -647,5 +642,18 @@ func (client *AppendBlobClient) sealHandleResponse(resp *http.Response) (AppendB } result.IsSealed = &isSealed } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientSealResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go index 257a3656dd1..9e99a4efab1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go @@ -1,11 +1,7 @@ -//go:build go1.18 -// +build go1.18 - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -33,23 +29,26 @@ type BlobClient struct { // blob with zero length and full metadata. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - copyID - The copy identifier provided in the x-ms-copy-id header of the original Copy Blob operation. // - options - BlobClientAbortCopyFromURLOptions contains the optional parameters for the BlobClient.AbortCopyFromURL method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. func (client *BlobClient) AbortCopyFromURL(ctx context.Context, copyID string, options *BlobClientAbortCopyFromURLOptions, leaseAccessConditions *LeaseAccessConditions) (BlobClientAbortCopyFromURLResponse, error) { + var err error req, err := client.abortCopyFromURLCreateRequest(ctx, copyID, options, leaseAccessConditions) if err != nil { return BlobClientAbortCopyFromURLResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientAbortCopyFromURLResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusNoContent) { - return BlobClientAbortCopyFromURLResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusNoContent) { + err = runtime.NewResponseError(httpResp) + return BlobClientAbortCopyFromURLResponse{}, err } - return client.abortCopyFromURLHandleResponse(resp) + resp, err := client.abortCopyFromURLHandleResponse(httpResp) + return resp, err } // abortCopyFromURLCreateRequest creates the AbortCopyFromURL request. @@ -65,15 +64,15 @@ func (client *BlobClient) abortCopyFromURLCreateRequest(ctx context.Context, cop reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } req.Raw().Header["x-ms-copy-action"] = []string{"abort"} if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } @@ -83,12 +82,6 @@ func (client *BlobClient) abortCopyFromURLHandleResponse(resp *http.Response) (B if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -96,31 +89,40 @@ func (client *BlobClient) abortCopyFromURLHandleResponse(resp *http.Response) (B } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } // AcquireLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - duration - Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite // lease can be between 15 and 60 seconds. A lease duration cannot be changed using // renew or change. // - options - BlobClientAcquireLeaseOptions contains the optional parameters for the BlobClient.AcquireLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) AcquireLease(ctx context.Context, duration int32, options *BlobClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientAcquireLeaseResponse, error) { + var err error req, err := client.acquireLeaseCreateRequest(ctx, duration, options, modifiedAccessConditions) if err != nil { return BlobClientAcquireLeaseResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientAcquireLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return BlobClientAcquireLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlobClientAcquireLeaseResponse{}, err } - return client.acquireLeaseHandleResponse(resp) + resp, err := client.acquireLeaseHandleResponse(httpResp) + return resp, err } // acquireLeaseCreateRequest creates the AcquireLease request. @@ -135,37 +137,47 @@ func (client *BlobClient) acquireLeaseCreateRequest(ctx context.Context, duratio reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-lease-action"] = []string{"acquire"} - req.Raw().Header["x-ms-lease-duration"] = []string{strconv.FormatInt(int64(duration), 10)} - if options != nil && options.ProposedLeaseID != nil { - req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID} + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + req.Raw().Header["x-ms-lease-action"] = []string{"acquire"} + req.Raw().Header["x-ms-lease-duration"] = []string{strconv.FormatInt(int64(duration), 10)} + if options != nil && options.ProposedLeaseID != nil { + req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } // acquireLeaseHandleResponse handles the AcquireLease response. func (client *BlobClient) acquireLeaseHandleResponse(resp *http.Response) (BlobClientAcquireLeaseResponse, error) { result := BlobClientAcquireLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientAcquireLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -179,44 +191,37 @@ func (client *BlobClient) acquireLeaseHandleResponse(resp *http.Response) (BlobC if val := resp.Header.Get("x-ms-lease-id"); val != "" { result.LeaseID = &val } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientAcquireLeaseResponse{}, err - } - result.Date = &date - } return result, nil } // BreakLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - options - BlobClientBreakLeaseOptions contains the optional parameters for the BlobClient.BreakLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) BreakLease(ctx context.Context, options *BlobClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientBreakLeaseResponse, error) { + var err error req, err := client.breakLeaseCreateRequest(ctx, options, modifiedAccessConditions) if err != nil { return BlobClientBreakLeaseResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientBreakLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return BlobClientBreakLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return BlobClientBreakLeaseResponse{}, err } - return client.breakLeaseHandleResponse(resp) + resp, err := client.breakLeaseHandleResponse(httpResp) + return resp, err } // breakLeaseCreateRequest creates the BreakLease request. @@ -231,36 +236,46 @@ func (client *BlobClient) breakLeaseCreateRequest(ctx context.Context, options * reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-lease-action"] = []string{"break"} - if options != nil && options.BreakPeriod != nil { - req.Raw().Header["x-ms-lease-break-period"] = []string{strconv.FormatInt(int64(*options.BreakPeriod), 10)} + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + req.Raw().Header["x-ms-lease-action"] = []string{"break"} + if options != nil && options.BreakPeriod != nil { + req.Raw().Header["x-ms-lease-break-period"] = []string{strconv.FormatInt(int64(*options.BreakPeriod), 10)} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } // breakLeaseHandleResponse handles the BreakLease response. func (client *BlobClient) breakLeaseHandleResponse(resp *http.Response) (BlobClientBreakLeaseResponse, error) { result := BlobClientBreakLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientBreakLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -279,29 +294,19 @@ func (client *BlobClient) breakLeaseHandleResponse(resp *http.Response) (BlobCli } result.LeaseTime = &leaseTime } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientBreakLeaseResponse{}, err - } - result.Date = &date - } return result, nil } // ChangeLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - leaseID - Specifies the current lease ID on the resource. // - proposedLeaseID - Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed // lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID @@ -309,18 +314,21 @@ func (client *BlobClient) breakLeaseHandleResponse(resp *http.Response) (BlobCli // - options - BlobClientChangeLeaseOptions contains the optional parameters for the BlobClient.ChangeLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, options *BlobClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientChangeLeaseResponse, error) { + var err error req, err := client.changeLeaseCreateRequest(ctx, leaseID, proposedLeaseID, options, modifiedAccessConditions) if err != nil { return BlobClientChangeLeaseResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientChangeLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientChangeLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientChangeLeaseResponse{}, err } - return client.changeLeaseHandleResponse(resp) + resp, err := client.changeLeaseHandleResponse(httpResp) + return resp, err } // changeLeaseCreateRequest creates the ChangeLease request. @@ -335,35 +343,45 @@ func (client *BlobClient) changeLeaseCreateRequest(ctx context.Context, leaseID reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-lease-action"] = []string{"change"} - req.Raw().Header["x-ms-lease-id"] = []string{leaseID} - req.Raw().Header["x-ms-proposed-lease-id"] = []string{proposedLeaseID} + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } + req.Raw().Header["x-ms-lease-action"] = []string{"change"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + req.Raw().Header["x-ms-proposed-lease-id"] = []string{proposedLeaseID} req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // changeLeaseHandleResponse handles the ChangeLease response. func (client *BlobClient) changeLeaseHandleResponse(resp *http.Response) (BlobClientChangeLeaseResponse, error) { result := BlobClientChangeLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientChangeLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -374,25 +392,15 @@ func (client *BlobClient) changeLeaseHandleResponse(resp *http.Response) (BlobCl } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } - if val := resp.Header.Get("x-ms-lease-id"); val != "" { - result.LeaseID = &val - } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientChangeLeaseResponse{}, err - } - result.Date = &date - } return result, nil } @@ -400,7 +408,7 @@ func (client *BlobClient) changeLeaseHandleResponse(resp *http.Response) (BlobCl // until the copy is complete. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies // a page blob snapshot. The value should be URL-encoded as it would appear in a request // URI. The source blob must either be public or must be authenticated via a shared access signature. @@ -411,18 +419,21 @@ func (client *BlobClient) changeLeaseHandleResponse(resp *http.Response) (BlobCl // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. func (client *BlobClient) CopyFromURL(ctx context.Context, copySource string, options *BlobClientCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions, cpkScopeInfo *CPKScopeInfo) (BlobClientCopyFromURLResponse, error) { + var err error req, err := client.copyFromURLCreateRequest(ctx, copySource, options, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions, cpkScopeInfo) if err != nil { return BlobClientCopyFromURLResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientCopyFromURLResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return BlobClientCopyFromURLResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return BlobClientCopyFromURLResponse{}, err } - return client.copyFromURLHandleResponse(resp) + resp, err := client.copyFromURLHandleResponse(httpResp) + return resp, err } // copyFromURLCreateRequest creates the CopyFromURL request. @@ -436,111 +447,99 @@ func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySour reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-requires-sync"] = []string{"true"} - if options != nil && options.Metadata != nil { - for k, v := range options.Metadata { - if v != nil { - req.Raw().Header["x-ms-meta-"+k] = []string{*v} - } - } - } - if options != nil && options.Tier != nil { - req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { - req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { - req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-copy-source"] = []string{copySource} - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - if options != nil && options.SourceContentMD5 != nil { - req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} + req.Raw().Header["x-ms-copy-source"] = []string{copySource} + if options != nil && options.CopySourceAuthorization != nil { + req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} } - if options != nil && options.BlobTagsString != nil { - req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + if options != nil && options.CopySourceTags != nil { + req.Raw().Header["x-ms-copy-source-tag-option"] = []string{string(*options.CopySourceTags)} } - if options != nil && options.ImmutabilityPolicyExpiry != nil { - req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } if options != nil && options.ImmutabilityPolicyMode != nil { req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} } + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } if options != nil && options.LegalHold != nil { req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} } - if options != nil && options.CopySourceAuthorization != nil { - req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } } - if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { - req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + req.Raw().Header["x-ms-requires-sync"] = []string{"true"} + if options != nil && options.SourceContentMD5 != nil { + req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} } - if options != nil && options.CopySourceTags != nil { - req.Raw().Header["x-ms-copy-source-tag-option"] = []string{string(*options.CopySourceTags)} + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} } - req.Raw().Header["Accept"] = []string{"application/xml"} - return req, nil -} - -// copyFromURLHandleResponse handles the CopyFromURL response. -func (client *BlobClient) copyFromURLHandleResponse(resp *http.Response) (BlobClientCopyFromURLResponse, error) { - result := BlobClientCopyFromURLResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientCopyFromURLResponse{}, err - } - result.LastModified = &lastModified + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { + req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + return req, nil +} + +// copyFromURLHandleResponse handles the CopyFromURL response. +func (client *BlobClient) copyFromURLHandleResponse(resp *http.Response) (BlobClientCopyFromURLResponse, error) { + result := BlobClientCopyFromURLResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientCopyFromURLResponse{}, err + } + result.ContentCRC64 = contentCRC64 } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) if err != nil { return BlobClientCopyFromURLResponse{}, err } - result.Date = &date + result.ContentMD5 = contentMD5 } if val := resp.Header.Get("x-ms-copy-id"); val != "" { result.CopyID = &val @@ -548,22 +547,34 @@ func (client *BlobClient) copyFromURLHandleResponse(resp *http.Response) (BlobCl if val := resp.Header.Get("x-ms-copy-status"); val != "" { result.CopyStatus = &val } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientCopyFromURLResponse{}, err } - result.ContentMD5 = contentMD5 + result.Date = &date } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - contentCRC64, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientCopyFromURLResponse{}, err } - result.ContentCRC64 = contentCRC64 + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val } return result, nil } @@ -571,25 +582,28 @@ func (client *BlobClient) copyFromURLHandleResponse(resp *http.Response) (BlobCl // CreateSnapshot - The Create Snapshot operation creates a read-only snapshot of a blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - options - BlobClientCreateSnapshotOptions contains the optional parameters for the BlobClient.CreateSnapshot method. // - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. // - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. func (client *BlobClient) CreateSnapshot(ctx context.Context, options *BlobClientCreateSnapshotOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientCreateSnapshotResponse, error) { + var err error req, err := client.createSnapshotCreateRequest(ctx, options, cpkInfo, cpkScopeInfo, modifiedAccessConditions, leaseAccessConditions) if err != nil { return BlobClientCreateSnapshotResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientCreateSnapshotResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return BlobClientCreateSnapshotResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlobClientCreateSnapshotResponse{}, err } - return client.createSnapshotHandleResponse(resp) + resp, err := client.createSnapshotHandleResponse(httpResp) + return resp, err } // createSnapshotCreateRequest creates the CreateSnapshot request. @@ -604,12 +618,24 @@ func (client *BlobClient) createSnapshotCreateRequest(ctx context.Context, optio reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - if options != nil && options.Metadata != nil { - for k, v := range options.Metadata { - if v != nil { - req.Raw().Header["x-ms-meta-"+k] = []string{*v} - } - } + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} @@ -617,47 +643,49 @@ func (client *BlobClient) createSnapshotCreateRequest(ctx context.Context, optio if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } // createSnapshotHandleResponse handles the CreateSnapshot response. func (client *BlobClient) createSnapshotHandleResponse(resp *http.Response) (BlobClientCreateSnapshotResponse, error) { result := BlobClientCreateSnapshotResponse{} - if val := resp.Header.Get("x-ms-snapshot"); val != "" { - result.Snapshot = &val + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientCreateSnapshotResponse{}, err + } + result.Date = &date } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlobClientCreateSnapshotResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { @@ -665,32 +693,18 @@ func (client *BlobClient) createSnapshotHandleResponse(resp *http.Response) (Blo } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } + if val := resp.Header.Get("x-ms-snapshot"); val != "" { + result.Snapshot = &val + } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } if val := resp.Header.Get("x-ms-version-id"); val != "" { result.VersionID = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientCreateSnapshotResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return BlobClientCreateSnapshotResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted - } return result, nil } @@ -707,23 +721,26 @@ func (client *BlobClient) createSnapshotHandleResponse(resp *http.Response) (Blo // return an HTTP status code of 404 (ResourceNotFound). // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - options - BlobClientDeleteOptions contains the optional parameters for the BlobClient.Delete method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) Delete(ctx context.Context, options *BlobClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientDeleteResponse, error) { + var err error req, err := client.deleteCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { return BlobClientDeleteResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientDeleteResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return BlobClientDeleteResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return BlobClientDeleteResponse{}, err } - return client.deleteHandleResponse(resp) + resp, err := client.deleteHandleResponse(httpResp) + return resp, err } // deleteCreateRequest creates the Delete request. @@ -733,45 +750,45 @@ func (client *BlobClient) deleteCreateRequest(ctx context.Context, options *Blob return nil, err } reqQP := req.Raw().URL.Query() + if options != nil && options.DeleteType != nil { + reqQP.Set("deletetype", string(*options.DeleteType)) + } if options != nil && options.Snapshot != nil { reqQP.Set("snapshot", *options.Snapshot) } - if options != nil && options.VersionID != nil { - reqQP.Set("versionid", *options.VersionID) - } if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } - if options != nil && options.DeleteType != nil { - reqQP.Set("deletetype", string(*options.DeleteType)) + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) } req.Raw().URL.RawQuery = reqQP.Encode() - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - if options != nil && options.DeleteSnapshots != nil { - req.Raw().Header["x-ms-delete-snapshots"] = []string{string(*options.DeleteSnapshots)} + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + if options != nil && options.DeleteSnapshots != nil { + req.Raw().Header["x-ms-delete-snapshots"] = []string{string(*options.DeleteSnapshots)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -781,12 +798,6 @@ func (client *BlobClient) deleteHandleResponse(resp *http.Response) (BlobClientD if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -794,28 +805,37 @@ func (client *BlobClient) deleteHandleResponse(resp *http.Response) (BlobClientD } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } // DeleteImmutabilityPolicy - The Delete Immutability Policy operation deletes the immutability policy on the blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - options - BlobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the BlobClient.DeleteImmutabilityPolicy // method. func (client *BlobClient) DeleteImmutabilityPolicy(ctx context.Context, options *BlobClientDeleteImmutabilityPolicyOptions) (BlobClientDeleteImmutabilityPolicyResponse, error) { + var err error req, err := client.deleteImmutabilityPolicyCreateRequest(ctx, options) if err != nil { return BlobClientDeleteImmutabilityPolicyResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientDeleteImmutabilityPolicyResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientDeleteImmutabilityPolicyResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientDeleteImmutabilityPolicyResponse{}, err } - return client.deleteImmutabilityPolicyHandleResponse(resp) + resp, err := client.deleteImmutabilityPolicyHandleResponse(httpResp) + return resp, err } // deleteImmutabilityPolicyCreateRequest creates the DeleteImmutabilityPolicy request. @@ -830,11 +850,11 @@ func (client *BlobClient) deleteImmutabilityPolicyCreateRequest(ctx context.Cont reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -844,12 +864,6 @@ func (client *BlobClient) deleteImmutabilityPolicyHandleResponse(resp *http.Resp if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -857,6 +871,12 @@ func (client *BlobClient) deleteImmutabilityPolicyHandleResponse(resp *http.Resp } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } @@ -864,24 +884,27 @@ func (client *BlobClient) deleteImmutabilityPolicyHandleResponse(resp *http.Resp // can also call Download to read a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - options - BlobClientDownloadOptions contains the optional parameters for the BlobClient.Download method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) Download(ctx context.Context, options *BlobClientDownloadOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientDownloadResponse, error) { + var err error req, err := client.downloadCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, modifiedAccessConditions) if err != nil { return BlobClientDownloadResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientDownloadResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusPartialContent, http.StatusNotModified) { - return BlobClientDownloadResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusPartialContent, http.StatusNotModified) { + err = runtime.NewResponseError(httpResp) + return BlobClientDownloadResponse{}, err } - return client.downloadHandleResponse(resp) + resp, err := client.downloadHandleResponse(httpResp) + return resp, err } // downloadCreateRequest creates the Download request. @@ -894,25 +917,32 @@ func (client *BlobClient) downloadCreateRequest(ctx context.Context, options *Bl if options != nil && options.Snapshot != nil { reqQP.Set("snapshot", *options.Snapshot) } - if options != nil && options.VersionID != nil { - reqQP.Set("versionid", *options.VersionID) - } if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) + } req.Raw().URL.RawQuery = reqQP.Encode() runtime.SkipBodyDownload(req) - if options != nil && options.Range != nil { - req.Raw().Header["x-ms-range"] = []string{*options.Range} + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } - if options != nil && options.RangeGetContentMD5 != nil { - req.Raw().Header["x-ms-range-get-content-md5"] = []string{strconv.FormatBool(*options.RangeGetContentMD5)} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} } - if options != nil && options.RangeGetContentCRC64 != nil { - req.Raw().Header["x-ms-range-get-content-crc64"] = []string{strconv.FormatBool(*options.RangeGetContentCRC64)} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} @@ -920,112 +950,97 @@ func (client *BlobClient) downloadCreateRequest(ctx context.Context, options *Bl if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + if options != nil && options.RangeGetContentCRC64 != nil { + req.Raw().Header["x-ms-range-get-content-crc64"] = []string{strconv.FormatBool(*options.RangeGetContentCRC64)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + if options != nil && options.RangeGetContentMD5 != nil { + req.Raw().Header["x-ms-range-get-content-md5"] = []string{strconv.FormatBool(*options.RangeGetContentMD5)} } req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // downloadHandleResponse handles the Download response. func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClientDownloadResponse, error) { result := BlobClientDownloadResponse{Body: resp.Body} - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("Accept-Ranges"); val != "" { + result.AcceptRanges = &val + } + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) if err != nil { return BlobClientDownloadResponse{}, err } - result.LastModified = &lastModified + result.BlobCommittedBlockCount = &blobCommittedBlockCount } - if val := resp.Header.Get("x-ms-creation-time"); val != "" { - creationTime, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-blob-content-md5"); val != "" { + blobContentMD5, err := base64.StdEncoding.DecodeString(val) if err != nil { return BlobClientDownloadResponse{}, err } - result.CreationTime = &creationTime - } - for hh := range resp.Header { - if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { - if result.Metadata == nil { - result.Metadata = map[string]*string{} - } - result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) - } - } - if val := resp.Header.Get("x-ms-or-policy-id"); val != "" { - result.ObjectReplicationPolicyID = &val - } - for hh := range resp.Header { - if len(hh) > len("x-ms-or-") && strings.EqualFold(hh[:len("x-ms-or-")], "x-ms-or-") { - if result.Metadata == nil { - result.Metadata = map[string]*string{} - } - result.Metadata[hh[len("x-ms-or-"):]] = to.Ptr(resp.Header.Get(hh)) - } + result.BlobContentMD5 = blobContentMD5 } - if val := resp.Header.Get("Content-Length"); val != "" { - contentLength, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) if err != nil { return BlobClientDownloadResponse{}, err } - result.ContentLength = &contentLength + result.BlobSequenceNumber = &blobSequenceNumber } - if val := resp.Header.Get("Content-Type"); val != "" { - result.ContentType = &val + if val := resp.Header.Get("x-ms-blob-type"); val != "" { + result.BlobType = (*BlobType)(&val) } - if val := resp.Header.Get("Content-Range"); val != "" { - result.ContentRange = &val + if val := resp.Header.Get("Cache-Control"); val != "" { + result.CacheControl = &val } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) if err != nil { return BlobClientDownloadResponse{}, err } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("Content-Encoding"); val != "" { - result.ContentEncoding = &val - } - if val := resp.Header.Get("Cache-Control"); val != "" { - result.CacheControl = &val + result.ContentCRC64 = contentCRC64 } if val := resp.Header.Get("Content-Disposition"); val != "" { result.ContentDisposition = &val } + if val := resp.Header.Get("Content-Encoding"); val != "" { + result.ContentEncoding = &val + } if val := resp.Header.Get("Content-Language"); val != "" { result.ContentLanguage = &val } - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("Content-Length"); val != "" { + contentLength, err := strconv.ParseInt(val, 10, 64) if err != nil { return BlobClientDownloadResponse{}, err } - result.BlobSequenceNumber = &blobSequenceNumber + result.ContentLength = &contentLength } - if val := resp.Header.Get("x-ms-blob-type"); val != "" { - result.BlobType = (*BlobType)(&val) + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Content-Range"); val != "" { + result.ContentRange = &val + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val } if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { copyCompletionTime, err := time.Parse(time.RFC1123, val) @@ -1034,9 +1049,6 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien } result.CopyCompletionTime = ©CompletionTime } - if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { - result.CopyStatusDescription = &val - } if val := resp.Header.Get("x-ms-copy-id"); val != "" { result.CopyID = &val } @@ -1049,36 +1061,15 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien if val := resp.Header.Get("x-ms-copy-status"); val != "" { result.CopyStatus = (*CopyStatusType)(&val) } - if val := resp.Header.Get("x-ms-lease-duration"); val != "" { - result.LeaseDuration = (*LeaseDurationType)(&val) - } - if val := resp.Header.Get("x-ms-lease-state"); val != "" { - result.LeaseState = (*LeaseStateType)(&val) - } - if val := resp.Header.Get("x-ms-lease-status"); val != "" { - result.LeaseStatus = (*LeaseStatusType)(&val) - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val + if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { + result.CopyStatusDescription = &val } - if val := resp.Header.Get("x-ms-is-current-version"); val != "" { - isCurrentVersion, err := strconv.ParseBool(val) + if val := resp.Header.Get("x-ms-creation-time"); val != "" { + creationTime, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientDownloadResponse{}, err } - result.IsCurrentVersion = &isCurrentVersion - } - if val := resp.Header.Get("Accept-Ranges"); val != "" { - result.AcceptRanges = &val + result.CreationTime = &creationTime } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) @@ -1087,20 +1078,8 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien } result.Date = &date } - if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { - blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) - blobCommittedBlockCount := int32(blobCommittedBlockCount32) - if err != nil { - return BlobClientDownloadResponse{}, err - } - result.BlobCommittedBlockCount = &blobCommittedBlockCount - } - if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return BlobClientDownloadResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { result.EncryptionKeySHA256 = &val @@ -1108,19 +1087,25 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { result.EncryptionScope = &val } - if val := resp.Header.Get("x-ms-blob-content-md5"); val != "" { - blobContentMD5, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("x-ms-error-code"); val != "" { + result.ErrorCode = &val + } + if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" { + immutabilityPolicyExpiresOn, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientDownloadResponse{}, err } - result.BlobContentMD5 = blobContentMD5 + result.ImmutabilityPolicyExpiresOn = &immutabilityPolicyExpiresOn } - if val := resp.Header.Get("x-ms-tag-count"); val != "" { - tagCount, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { + result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val) + } + if val := resp.Header.Get("x-ms-is-current-version"); val != "" { + isCurrentVersion, err := strconv.ParseBool(val) if err != nil { return BlobClientDownloadResponse{}, err } - result.TagCount = &tagCount + result.IsCurrentVersion = &isCurrentVersion } if val := resp.Header.Get("x-ms-blob-sealed"); val != "" { isSealed, err := strconv.ParseBool(val) @@ -1129,6 +1114,13 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien } result.IsSealed = &isSealed } + if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } if val := resp.Header.Get("x-ms-last-access-time"); val != "" { lastAccessed, err := time.Parse(time.RFC1123, val) if err != nil { @@ -1136,15 +1128,21 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien } result.LastAccessed = &lastAccessed } - if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" { - immutabilityPolicyExpiresOn, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientDownloadResponse{}, err } - result.ImmutabilityPolicyExpiresOn = &immutabilityPolicyExpiresOn + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { - result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val) + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) } if val := resp.Header.Get("x-ms-legal-hold"); val != "" { legalHold, err := strconv.ParseBool(val) @@ -1153,15 +1151,40 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien } result.LegalHold = &legalHold } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - contentCRC64, err := base64.StdEncoding.DecodeString(val) + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("x-ms-or-policy-id"); val != "" { + result.ObjectReplicationPolicyID = &val + } + for hh := range resp.Header { + if len(hh) > len("x-ms-or-") && strings.EqualFold(hh[:len("x-ms-or-")], "x-ms-or-") { + if result.ObjectReplicationRules == nil { + result.ObjectReplicationRules = map[string]*string{} + } + result.ObjectReplicationRules[hh[len("x-ms-or-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-tag-count"); val != "" { + tagCount, err := strconv.ParseInt(val, 10, 64) if err != nil { return BlobClientDownloadResponse{}, err } - result.ContentCRC64 = contentCRC64 + result.TagCount = &tagCount } - if val := resp.Header.Get("x-ms-error-code"); val != "" { - result.ErrorCode = &val + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val } return result, nil } @@ -1169,21 +1192,24 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien // GetAccountInfo - Returns the sku name and account kind // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - options - BlobClientGetAccountInfoOptions contains the optional parameters for the BlobClient.GetAccountInfo method. func (client *BlobClient) GetAccountInfo(ctx context.Context, options *BlobClientGetAccountInfoOptions) (BlobClientGetAccountInfoResponse, error) { + var err error req, err := client.getAccountInfoCreateRequest(ctx, options) if err != nil { return BlobClientGetAccountInfoResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientGetAccountInfoResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientGetAccountInfoResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientGetAccountInfoResponse{}, err } - return client.getAccountInfoHandleResponse(resp) + resp, err := client.getAccountInfoHandleResponse(httpResp) + return resp, err } // getAccountInfoCreateRequest creates the GetAccountInfo request. @@ -1193,26 +1219,29 @@ func (client *BlobClient) getAccountInfoCreateRequest(ctx context.Context, optio return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "account") reqQP.Set("comp", "properties") + reqQP.Set("restype", "account") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } // getAccountInfoHandleResponse handles the GetAccountInfo response. func (client *BlobClient) getAccountInfoHandleResponse(resp *http.Response) (BlobClientGetAccountInfoResponse, error) { result := BlobClientGetAccountInfoResponse{} + if val := resp.Header.Get("x-ms-account-kind"); val != "" { + result.AccountKind = (*AccountKind)(&val) + } if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -1220,11 +1249,21 @@ func (client *BlobClient) getAccountInfoHandleResponse(resp *http.Response) (Blo } result.Date = &date } - if val := resp.Header.Get("x-ms-sku-name"); val != "" { - result.SKUName = (*SKUName)(&val) + if val := resp.Header.Get("x-ms-is-hns-enabled"); val != "" { + isHierarchicalNamespaceEnabled, err := strconv.ParseBool(val) + if err != nil { + return BlobClientGetAccountInfoResponse{}, err + } + result.IsHierarchicalNamespaceEnabled = &isHierarchicalNamespaceEnabled } - if val := resp.Header.Get("x-ms-account-kind"); val != "" { - result.AccountKind = (*AccountKind)(&val) + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-sku-name"); val != "" { + result.SKUName = (*SKUName)(&val) + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val } return result, nil } @@ -1233,24 +1272,27 @@ func (client *BlobClient) getAccountInfoHandleResponse(resp *http.Response) (Blo // for the blob. It does not return the content of the blob. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - options - BlobClientGetPropertiesOptions contains the optional parameters for the BlobClient.GetProperties method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) GetProperties(ctx context.Context, options *BlobClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientGetPropertiesResponse, error) { + var err error req, err := client.getPropertiesCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, modifiedAccessConditions) if err != nil { return BlobClientGetPropertiesResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientGetPropertiesResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientGetPropertiesResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientGetPropertiesResponse{}, err } - return client.getPropertiesHandleResponse(resp) + resp, err := client.getPropertiesHandleResponse(httpResp) + return resp, err } // getPropertiesCreateRequest creates the GetProperties request. @@ -1263,87 +1305,124 @@ func (client *BlobClient) getPropertiesCreateRequest(ctx context.Context, option if options != nil && options.Snapshot != nil { reqQP.Set("snapshot", *options.Snapshot) } - if options != nil && options.VersionID != nil { - reqQP.Set("versionid", *options.VersionID) - } if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } - req.Raw().URL.RawQuery = reqQP.Encode() - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } // getPropertiesHandleResponse handles the GetProperties response. func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (BlobClientGetPropertiesResponse, error) { result := BlobClientGetPropertiesResponse{} - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("Accept-Ranges"); val != "" { + result.AcceptRanges = &val + } + if val := resp.Header.Get("x-ms-access-tier"); val != "" { + result.AccessTier = &val + } + if val := resp.Header.Get("x-ms-access-tier-change-time"); val != "" { + accessTierChangeTime, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.LastModified = &lastModified + result.AccessTierChangeTime = &accessTierChangeTime } - if val := resp.Header.Get("x-ms-creation-time"); val != "" { - creationTime, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-access-tier-inferred"); val != "" { + accessTierInferred, err := strconv.ParseBool(val) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.CreationTime = &creationTime + result.AccessTierInferred = &accessTierInferred } - for hh := range resp.Header { - if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { - if result.Metadata == nil { - result.Metadata = map[string]*string{} - } - result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) - } + if val := resp.Header.Get("x-ms-archive-status"); val != "" { + result.ArchiveStatus = &val } - if val := resp.Header.Get("x-ms-or-policy-id"); val != "" { - result.ObjectReplicationPolicyID = &val + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.BlobCommittedBlockCount = &blobCommittedBlockCount } - for hh := range resp.Header { - if len(hh) > len("x-ms-or-") && strings.EqualFold(hh[:len("x-ms-or-")], "x-ms-or-") { - if result.Metadata == nil { - result.Metadata = map[string]*string{} - } - result.Metadata[hh[len("x-ms-or-"):]] = to.Ptr(resp.Header.Get(hh)) + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlobClientGetPropertiesResponse{}, err } + result.BlobSequenceNumber = &blobSequenceNumber } if val := resp.Header.Get("x-ms-blob-type"); val != "" { result.BlobType = (*BlobType)(&val) } + if val := resp.Header.Get("Cache-Control"); val != "" { + result.CacheControl = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Content-Disposition"); val != "" { + result.ContentDisposition = &val + } + if val := resp.Header.Get("Content-Encoding"); val != "" { + result.ContentEncoding = &val + } + if val := resp.Header.Get("Content-Language"); val != "" { + result.ContentLanguage = &val + } + if val := resp.Header.Get("Content-Length"); val != "" { + contentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.ContentLength = &contentLength + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { copyCompletionTime, err := time.Parse(time.RFC1123, val) if err != nil { @@ -1351,9 +1430,6 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob } result.CopyCompletionTime = ©CompletionTime } - if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { - result.CopyStatusDescription = &val - } if val := resp.Header.Get("x-ms-copy-id"); val != "" { result.CopyID = &val } @@ -1366,90 +1442,72 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob if val := resp.Header.Get("x-ms-copy-status"); val != "" { result.CopyStatus = (*CopyStatusType)(&val) } - if val := resp.Header.Get("x-ms-incremental-copy"); val != "" { - isIncrementalCopy, err := strconv.ParseBool(val) + if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { + result.CopyStatusDescription = &val + } + if val := resp.Header.Get("x-ms-creation-time"); val != "" { + creationTime, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.IsIncrementalCopy = &isIncrementalCopy + result.CreationTime = &creationTime + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.Date = &date } if val := resp.Header.Get("x-ms-copy-destination-snapshot"); val != "" { result.DestinationSnapshot = &val } - if val := resp.Header.Get("x-ms-lease-duration"); val != "" { - result.LeaseDuration = (*LeaseDurationType)(&val) + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) } - if val := resp.Header.Get("x-ms-lease-state"); val != "" { - result.LeaseState = (*LeaseStateType)(&val) + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val } - if val := resp.Header.Get("x-ms-lease-status"); val != "" { - result.LeaseStatus = (*LeaseStatusType)(&val) + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val } - if val := resp.Header.Get("Content-Length"); val != "" { - contentLength, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("x-ms-expiry-time"); val != "" { + expiresOn, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.ContentLength = &contentLength - } - if val := resp.Header.Get("Content-Type"); val != "" { - result.ContentType = &val - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) + result.ExpiresOn = &expiresOn } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" { + immutabilityPolicyExpiresOn, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("Content-Encoding"); val != "" { - result.ContentEncoding = &val - } - if val := resp.Header.Get("Content-Disposition"); val != "" { - result.ContentDisposition = &val - } - if val := resp.Header.Get("Content-Language"); val != "" { - result.ContentLanguage = &val + result.ImmutabilityPolicyExpiresOn = &immutabilityPolicyExpiresOn } - if val := resp.Header.Get("Cache-Control"); val != "" { - result.CacheControl = &val + if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { + result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val) } - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("x-ms-is-current-version"); val != "" { + isCurrentVersion, err := strconv.ParseBool(val) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.BlobSequenceNumber = &blobSequenceNumber - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val + result.IsCurrentVersion = &isCurrentVersion } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-incremental-copy"); val != "" { + isIncrementalCopy, err := strconv.ParseBool(val) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.Date = &date - } - if val := resp.Header.Get("Accept-Ranges"); val != "" { - result.AcceptRanges = &val + result.IsIncrementalCopy = &isIncrementalCopy } - if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { - blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) - blobCommittedBlockCount := int32(blobCommittedBlockCount32) + if val := resp.Header.Get("x-ms-blob-sealed"); val != "" { + isSealed, err := strconv.ParseBool(val) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.BlobCommittedBlockCount = &blobCommittedBlockCount + result.IsSealed = &isSealed } if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) @@ -1458,89 +1516,73 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val - } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val - } - if val := resp.Header.Get("x-ms-access-tier"); val != "" { - result.AccessTier = &val - } - if val := resp.Header.Get("x-ms-access-tier-inferred"); val != "" { - accessTierInferred, err := strconv.ParseBool(val) + if val := resp.Header.Get("x-ms-last-access-time"); val != "" { + lastAccessed, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.AccessTierInferred = &accessTierInferred - } - if val := resp.Header.Get("x-ms-archive-status"); val != "" { - result.ArchiveStatus = &val + result.LastAccessed = &lastAccessed } - if val := resp.Header.Get("x-ms-access-tier-change-time"); val != "" { - accessTierChangeTime, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.AccessTierChangeTime = &accessTierChangeTime + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) } - if val := resp.Header.Get("x-ms-is-current-version"); val != "" { - isCurrentVersion, err := strconv.ParseBool(val) - if err != nil { - return BlobClientGetPropertiesResponse{}, err - } - result.IsCurrentVersion = &isCurrentVersion + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) } - if val := resp.Header.Get("x-ms-tag-count"); val != "" { - tagCount, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) + } + if val := resp.Header.Get("x-ms-legal-hold"); val != "" { + legalHold, err := strconv.ParseBool(val) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.TagCount = &tagCount + result.LegalHold = &legalHold } - if val := resp.Header.Get("x-ms-expiry-time"); val != "" { - expiresOn, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientGetPropertiesResponse{}, err + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) } - result.ExpiresOn = &expiresOn } - if val := resp.Header.Get("x-ms-blob-sealed"); val != "" { - isSealed, err := strconv.ParseBool(val) - if err != nil { - return BlobClientGetPropertiesResponse{}, err + if val := resp.Header.Get("x-ms-or-policy-id"); val != "" { + result.ObjectReplicationPolicyID = &val + } + for hh := range resp.Header { + if len(hh) > len("x-ms-or-") && strings.EqualFold(hh[:len("x-ms-or-")], "x-ms-or-") { + if result.ObjectReplicationRules == nil { + result.ObjectReplicationRules = map[string]*string{} + } + result.ObjectReplicationRules[hh[len("x-ms-or-"):]] = to.Ptr(resp.Header.Get(hh)) } - result.IsSealed = &isSealed } if val := resp.Header.Get("x-ms-rehydrate-priority"); val != "" { result.RehydratePriority = &val } - if val := resp.Header.Get("x-ms-last-access-time"); val != "" { - lastAccessed, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientGetPropertiesResponse{}, err - } - result.LastAccessed = &lastAccessed + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val } - if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" { - immutabilityPolicyExpiresOn, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-tag-count"); val != "" { + tagCount, err := strconv.ParseInt(val, 10, 64) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.ImmutabilityPolicyExpiresOn = &immutabilityPolicyExpiresOn + result.TagCount = &tagCount } - if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { - result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val) + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val } - if val := resp.Header.Get("x-ms-legal-hold"); val != "" { - legalHold, err := strconv.ParseBool(val) - if err != nil { - return BlobClientGetPropertiesResponse{}, err - } - result.LegalHold = &legalHold + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val } return result, nil } @@ -1548,23 +1590,26 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob // GetTags - The Get Tags operation enables users to get the tags associated with a blob. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - options - BlobClientGetTagsOptions contains the optional parameters for the BlobClient.GetTags method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. func (client *BlobClient) GetTags(ctx context.Context, options *BlobClientGetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientGetTagsResponse, error) { + var err error req, err := client.getTagsCreateRequest(ctx, options, modifiedAccessConditions, leaseAccessConditions) if err != nil { return BlobClientGetTagsResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientGetTagsResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientGetTagsResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientGetTagsResponse{}, err } - return client.getTagsHandleResponse(resp) + resp, err := client.getTagsHandleResponse(httpResp) + return resp, err } // getTagsCreateRequest creates the GetTags request. @@ -1575,17 +1620,17 @@ func (client *BlobClient) getTagsCreateRequest(ctx context.Context, options *Blo } reqQP := req.Raw().URL.Query() reqQP.Set("comp", "tags") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } if options != nil && options.Snapshot != nil { reqQP.Set("snapshot", *options.Snapshot) } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } if options != nil && options.VersionID != nil { reqQP.Set("versionid", *options.VersionID) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -1595,7 +1640,7 @@ func (client *BlobClient) getTagsCreateRequest(ctx context.Context, options *Blo if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -1605,12 +1650,6 @@ func (client *BlobClient) getTagsHandleResponse(resp *http.Response) (BlobClient if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -1618,6 +1657,12 @@ func (client *BlobClient) getTagsHandleResponse(resp *http.Response) (BlobClient } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } if err := runtime.UnmarshalAsXML(resp, &result.BlobTags); err != nil { return BlobClientGetTagsResponse{}, err } @@ -1627,24 +1672,27 @@ func (client *BlobClient) getTagsHandleResponse(resp *http.Response) (BlobClient // Query - The Query operation enables users to select/project on blob data by providing simple query expressions. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - options - BlobClientQueryOptions contains the optional parameters for the BlobClient.Query method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) Query(ctx context.Context, options *BlobClientQueryOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientQueryResponse, error) { + var err error req, err := client.queryCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, modifiedAccessConditions) if err != nil { return BlobClientQueryResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientQueryResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusPartialContent) { - return BlobClientQueryResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusPartialContent) { + err = runtime.NewResponseError(httpResp) + return BlobClientQueryResponse{}, err } - return client.queryHandleResponse(resp) + resp, err := client.queryHandleResponse(httpResp) + return resp, err } // queryCreateRequest creates the Query request. @@ -1663,38 +1711,38 @@ func (client *BlobClient) queryCreateRequest(ctx context.Context, options *BlobC } req.Raw().URL.RawQuery = reqQP.Encode() runtime.SkipBodyDownload(req) - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - if cpkInfo != nil && cpkInfo.EncryptionKey != nil { - req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} - } - if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { - req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} - } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.QueryRequest != nil { if err := runtime.MarshalAsXML(req, *options.QueryRequest); err != nil { return nil, err @@ -1707,65 +1755,75 @@ func (client *BlobClient) queryCreateRequest(ctx context.Context, options *BlobC // queryHandleResponse handles the Query response. func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQueryResponse, error) { result := BlobClientQueryResponse{Body: resp.Body} - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("Accept-Ranges"); val != "" { + result.AcceptRanges = &val + } + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) if err != nil { return BlobClientQueryResponse{}, err } - result.LastModified = &lastModified + result.BlobCommittedBlockCount = &blobCommittedBlockCount } - for hh := range resp.Header { - if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { - if result.Metadata == nil { - result.Metadata = map[string]*string{} - } - result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) + if val := resp.Header.Get("x-ms-blob-content-md5"); val != "" { + blobContentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientQueryResponse{}, err } + result.BlobContentMD5 = blobContentMD5 } - if val := resp.Header.Get("Content-Length"); val != "" { - contentLength, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) if err != nil { return BlobClientQueryResponse{}, err } - result.ContentLength = &contentLength + result.BlobSequenceNumber = &blobSequenceNumber } - if val := resp.Header.Get("Content-Type"); val != "" { - result.ContentType = &val + if val := resp.Header.Get("x-ms-blob-type"); val != "" { + result.BlobType = (*BlobType)(&val) } - if val := resp.Header.Get("Content-Range"); val != "" { - result.ContentRange = &val + if val := resp.Header.Get("Cache-Control"); val != "" { + result.CacheControl = &val } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) if err != nil { return BlobClientQueryResponse{}, err } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("Content-Encoding"); val != "" { - result.ContentEncoding = &val - } - if val := resp.Header.Get("Cache-Control"); val != "" { - result.CacheControl = &val + result.ContentCRC64 = contentCRC64 } if val := resp.Header.Get("Content-Disposition"); val != "" { result.ContentDisposition = &val } + if val := resp.Header.Get("Content-Encoding"); val != "" { + result.ContentEncoding = &val + } if val := resp.Header.Get("Content-Language"); val != "" { result.ContentLanguage = &val } - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("Content-Length"); val != "" { + contentLength, err := strconv.ParseInt(val, 10, 64) if err != nil { return BlobClientQueryResponse{}, err } - result.BlobSequenceNumber = &blobSequenceNumber + result.ContentLength = &contentLength } - if val := resp.Header.Get("x-ms-blob-type"); val != "" { - result.BlobType = (*BlobType)(&val) + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Content-Range"); val != "" { + result.ContentRange = &val + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val } if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { copyCompletionTime, err := time.Parse(time.RFC1123, val) @@ -1774,9 +1832,6 @@ func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQu } result.CopyCompletionTime = ©CompletionTime } - if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { - result.CopyStatusDescription = &val - } if val := resp.Header.Get("x-ms-copy-id"); val != "" { result.CopyID = &val } @@ -1789,26 +1844,8 @@ func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQu if val := resp.Header.Get("x-ms-copy-status"); val != "" { result.CopyStatus = (*CopyStatusType)(&val) } - if val := resp.Header.Get("x-ms-lease-duration"); val != "" { - result.LeaseDuration = (*LeaseDurationType)(&val) - } - if val := resp.Header.Get("x-ms-lease-state"); val != "" { - result.LeaseState = (*LeaseStateType)(&val) - } - if val := resp.Header.Get("x-ms-lease-status"); val != "" { - result.LeaseStatus = (*LeaseStatusType)(&val) - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("Accept-Ranges"); val != "" { - result.AcceptRanges = &val + if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { + result.CopyStatusDescription = &val } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) @@ -1817,20 +1854,8 @@ func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQu } result.Date = &date } - if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { - blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) - blobCommittedBlockCount := int32(blobCommittedBlockCount32) - if err != nil { - return BlobClientQueryResponse{}, err - } - result.BlobCommittedBlockCount = &blobCommittedBlockCount - } - if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return BlobClientQueryResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { result.EncryptionKeySHA256 = &val @@ -1838,19 +1863,42 @@ func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQu if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { result.EncryptionScope = &val } - if val := resp.Header.Get("x-ms-blob-content-md5"); val != "" { - blobContentMD5, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) if err != nil { return BlobClientQueryResponse{}, err } - result.BlobContentMD5 = blobContentMD5 + result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - contentCRC64, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientQueryResponse{}, err } - result.ContentCRC64 = contentCRC64 + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) + } + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val } return result, nil } @@ -1858,23 +1906,26 @@ func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQu // ReleaseLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - leaseID - Specifies the current lease ID on the resource. // - options - BlobClientReleaseLeaseOptions contains the optional parameters for the BlobClient.ReleaseLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) ReleaseLease(ctx context.Context, leaseID string, options *BlobClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientReleaseLeaseResponse, error) { + var err error req, err := client.releaseLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) if err != nil { return BlobClientReleaseLeaseResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientReleaseLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientReleaseLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientReleaseLeaseResponse{}, err } - return client.releaseLeaseHandleResponse(resp) + resp, err := client.releaseLeaseHandleResponse(httpResp) + return resp, err } // releaseLeaseCreateRequest creates the ReleaseLease request. @@ -1888,35 +1939,45 @@ func (client *BlobClient) releaseLeaseCreateRequest(ctx context.Context, leaseID if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } - req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-lease-action"] = []string{"release"} - req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } + req.Raw().Header["x-ms-lease-action"] = []string{"release"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // releaseLeaseHandleResponse handles the ReleaseLease response. func (client *BlobClient) releaseLeaseHandleResponse(resp *http.Response) (BlobClientReleaseLeaseResponse, error) { result := BlobClientReleaseLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientReleaseLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -1927,45 +1988,38 @@ func (client *BlobClient) releaseLeaseHandleResponse(resp *http.Response) (BlobC } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientReleaseLeaseResponse{}, err - } - result.Date = &date - } return result, nil } // RenewLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - leaseID - Specifies the current lease ID on the resource. // - options - BlobClientRenewLeaseOptions contains the optional parameters for the BlobClient.RenewLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) RenewLease(ctx context.Context, leaseID string, options *BlobClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientRenewLeaseResponse, error) { + var err error req, err := client.renewLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) if err != nil { return BlobClientRenewLeaseResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientRenewLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientRenewLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientRenewLeaseResponse{}, err } - return client.renewLeaseHandleResponse(resp) + resp, err := client.renewLeaseHandleResponse(httpResp) + return resp, err } // renewLeaseCreateRequest creates the RenewLease request. @@ -1980,34 +2034,44 @@ func (client *BlobClient) renewLeaseCreateRequest(ctx context.Context, leaseID s reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-lease-action"] = []string{"renew"} - req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } + req.Raw().Header["x-ms-lease-action"] = []string{"renew"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // renewLeaseHandleResponse handles the RenewLease response. func (client *BlobClient) renewLeaseHandleResponse(resp *http.Response) (BlobClientRenewLeaseResponse, error) { result := BlobClientRenewLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientRenewLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -2021,44 +2085,37 @@ func (client *BlobClient) renewLeaseHandleResponse(resp *http.Response) (BlobCli if val := resp.Header.Get("x-ms-lease-id"); val != "" { result.LeaseID = &val } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientRenewLeaseResponse{}, err - } - result.Date = &date - } return result, nil } // SetExpiry - Sets the time a blob will expire and be deleted. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - expiryOptions - Required. Indicates mode of the expiry time // - options - BlobClientSetExpiryOptions contains the optional parameters for the BlobClient.SetExpiry method. func (client *BlobClient) SetExpiry(ctx context.Context, expiryOptions ExpiryOptions, options *BlobClientSetExpiryOptions) (BlobClientSetExpiryResponse, error) { + var err error req, err := client.setExpiryCreateRequest(ctx, expiryOptions, options) if err != nil { return BlobClientSetExpiryResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientSetExpiryResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientSetExpiryResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetExpiryResponse{}, err } - return client.setExpiryHandleResponse(resp) + resp, err := client.setExpiryHandleResponse(httpResp) + return resp, err } // setExpiryCreateRequest creates the SetExpiry request. @@ -2073,7 +2130,7 @@ func (client *BlobClient) setExpiryCreateRequest(ctx context.Context, expiryOpti reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -2081,13 +2138,23 @@ func (client *BlobClient) setExpiryCreateRequest(ctx context.Context, expiryOpti if options != nil && options.ExpiresOn != nil { req.Raw().Header["x-ms-expiry-time"] = []string{*options.ExpiresOn} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } // setExpiryHandleResponse handles the SetExpiry response. func (client *BlobClient) setExpiryHandleResponse(resp *http.Response) (BlobClientSetExpiryResponse, error) { result := BlobClientSetExpiryResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetExpiryResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -2098,46 +2165,39 @@ func (client *BlobClient) setExpiryHandleResponse(resp *http.Response) (BlobClie } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientSetExpiryResponse{}, err - } - result.Date = &date - } return result, nil } // SetHTTPHeaders - The Set HTTP Headers operation sets system properties on the blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - options - BlobClientSetHTTPHeadersOptions contains the optional parameters for the BlobClient.SetHTTPHeaders method. // - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) SetHTTPHeaders(ctx context.Context, options *BlobClientSetHTTPHeadersOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetHTTPHeadersResponse, error) { + var err error req, err := client.setHTTPHeadersCreateRequest(ctx, options, blobHTTPHeaders, leaseAccessConditions, modifiedAccessConditions) if err != nil { return BlobClientSetHTTPHeadersResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientSetHTTPHeadersResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientSetHTTPHeadersResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetHTTPHeadersResponse{}, err } - return client.setHTTPHeadersHandleResponse(resp) + resp, err := client.setHTTPHeadersHandleResponse(httpResp) + return resp, err } // setHTTPHeadersCreateRequest creates the SetHTTPHeaders request. @@ -2152,14 +2212,24 @@ func (client *BlobClient) setHTTPHeadersCreateRequest(ctx context.Context, optio reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { - req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { - req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} } if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} @@ -2167,48 +2237,28 @@ func (client *BlobClient) setHTTPHeadersCreateRequest(ctx context.Context, optio if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage} } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { - req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // setHTTPHeadersHandleResponse handles the SetHTTPHeaders response. func (client *BlobClient) setHTTPHeadersHandleResponse(resp *http.Response) (BlobClientSetHTTPHeadersResponse, error) { result := BlobClientSetHTTPHeadersResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientSetHTTPHeadersResponse{}, err - } - result.LastModified = &lastModified - } if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) if err != nil { @@ -2219,12 +2269,6 @@ func (client *BlobClient) setHTTPHeadersHandleResponse(resp *http.Response) (Blo if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -2232,29 +2276,48 @@ func (client *BlobClient) setHTTPHeadersHandleResponse(resp *http.Response) (Blo } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetHTTPHeadersResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } // SetImmutabilityPolicy - The Set Immutability Policy operation sets the immutability policy on the blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - options - BlobClientSetImmutabilityPolicyOptions contains the optional parameters for the BlobClient.SetImmutabilityPolicy // method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) SetImmutabilityPolicy(ctx context.Context, options *BlobClientSetImmutabilityPolicyOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetImmutabilityPolicyResponse, error) { + var err error req, err := client.setImmutabilityPolicyCreateRequest(ctx, options, modifiedAccessConditions) if err != nil { return BlobClientSetImmutabilityPolicyResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientSetImmutabilityPolicyResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientSetImmutabilityPolicyResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetImmutabilityPolicyResponse{}, err } - return client.setImmutabilityPolicyHandleResponse(resp) + resp, err := client.setImmutabilityPolicyHandleResponse(httpResp) + return resp, err } // setImmutabilityPolicyCreateRequest creates the SetImmutabilityPolicy request. @@ -2269,20 +2332,20 @@ func (client *BlobClient) setImmutabilityPolicyCreateRequest(ctx context.Context reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } + req.Raw().Header["Accept"] = []string{"application/xml"} if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - if options != nil && options.ImmutabilityPolicyExpiry != nil { - req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } if options != nil && options.ImmutabilityPolicyMode != nil { req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} } - req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -2292,12 +2355,6 @@ func (client *BlobClient) setImmutabilityPolicyHandleResponse(resp *http.Respons if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -2315,28 +2372,37 @@ func (client *BlobClient) setImmutabilityPolicyHandleResponse(resp *http.Respons if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val) } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } // SetLegalHold - The Set Legal Hold operation sets a legal hold on the blob. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - legalHold - Specified if a legal hold should be set on the blob. // - options - BlobClientSetLegalHoldOptions contains the optional parameters for the BlobClient.SetLegalHold method. func (client *BlobClient) SetLegalHold(ctx context.Context, legalHold bool, options *BlobClientSetLegalHoldOptions) (BlobClientSetLegalHoldResponse, error) { + var err error req, err := client.setLegalHoldCreateRequest(ctx, legalHold, options) if err != nil { return BlobClientSetLegalHoldResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientSetLegalHoldResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientSetLegalHoldResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetLegalHoldResponse{}, err } - return client.setLegalHoldHandleResponse(resp) + resp, err := client.setLegalHoldHandleResponse(httpResp) + return resp, err } // setLegalHoldCreateRequest creates the SetLegalHold request. @@ -2351,12 +2417,12 @@ func (client *BlobClient) setLegalHoldCreateRequest(ctx context.Context, legalHo reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(legalHold)} - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -2366,12 +2432,6 @@ func (client *BlobClient) setLegalHoldHandleResponse(resp *http.Response) (BlobC if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -2386,6 +2446,12 @@ func (client *BlobClient) setLegalHoldHandleResponse(resp *http.Response) (BlobC } result.LegalHold = &legalHold } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } @@ -2393,25 +2459,28 @@ func (client *BlobClient) setLegalHoldHandleResponse(resp *http.Response) (BlobC // pairs // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - options - BlobClientSetMetadataOptions contains the optional parameters for the BlobClient.SetMetadata method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. // - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) SetMetadata(ctx context.Context, options *BlobClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetMetadataResponse, error) { + var err error req, err := client.setMetadataCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) if err != nil { return BlobClientSetMetadataResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientSetMetadataResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientSetMetadataResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetMetadataResponse{}, err } - return client.setMetadataHandleResponse(resp) + resp, err := client.setMetadataHandleResponse(httpResp) + return resp, err } // setMetadataCreateRequest creates the SetMetadata request. @@ -2426,15 +2495,24 @@ func (client *BlobClient) setMetadataCreateRequest(ctx context.Context, options reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - if options != nil && options.Metadata != nil { - for k, v := range options.Metadata { - if v != nil { - req.Raw().Header["x-ms-meta-"+k] = []string{*v} - } - } + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} @@ -2442,60 +2520,32 @@ func (client *BlobClient) setMetadataCreateRequest(ctx context.Context, options if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } // setMetadataHandleResponse handles the SetMetadata response. func (client *BlobClient) setMetadataHandleResponse(resp *http.Response) (BlobClientSetMetadataResponse, error) { result := BlobClientSetMetadataResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientSetMetadataResponse{}, err - } - result.LastModified = &lastModified - } if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -2503,6 +2553,15 @@ func (client *BlobClient) setMetadataHandleResponse(resp *http.Response) (BlobCl } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { @@ -2510,11 +2569,21 @@ func (client *BlobClient) setMetadataHandleResponse(resp *http.Response) (BlobCl } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetMetadataResponse{}, err + } + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val } return result, nil } @@ -2522,24 +2591,27 @@ func (client *BlobClient) setMetadataHandleResponse(resp *http.Response) (BlobCl // SetTags - The Set Tags operation enables users to set tags on a blob. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - tags - Blob tags // - options - BlobClientSetTagsOptions contains the optional parameters for the BlobClient.SetTags method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. func (client *BlobClient) SetTags(ctx context.Context, tags BlobTags, options *BlobClientSetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientSetTagsResponse, error) { + var err error req, err := client.setTagsCreateRequest(ctx, tags, options, modifiedAccessConditions, leaseAccessConditions) if err != nil { return BlobClientSetTagsResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientSetTagsResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusNoContent) { - return BlobClientSetTagsResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusNoContent) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetTagsResponse{}, err } - return client.setTagsHandleResponse(resp) + resp, err := client.setTagsHandleResponse(httpResp) + return resp, err } // setTagsCreateRequest creates the SetTags request. @@ -2557,23 +2629,23 @@ func (client *BlobClient) setTagsCreateRequest(ctx context.Context, tags BlobTag reqQP.Set("versionid", *options.VersionID) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.TransactionalContentMD5 != nil { req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} } - if options != nil && options.TransactionalContentCRC64 != nil { - req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} - } if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } + if options != nil && options.TransactionalContentCRC64 != nil { + req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} + } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if err := runtime.MarshalAsXML(req, tags); err != nil { return nil, err } @@ -2586,12 +2658,6 @@ func (client *BlobClient) setTagsHandleResponse(resp *http.Response) (BlobClient if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -2599,6 +2665,12 @@ func (client *BlobClient) setTagsHandleResponse(resp *http.Response) (BlobClient } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } @@ -2608,24 +2680,27 @@ func (client *BlobClient) setTagsHandleResponse(resp *http.Response) (BlobClient // storage type. This operation does not update the blob's ETag. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - tier - Indicates the tier to be set on the blob. // - options - BlobClientSetTierOptions contains the optional parameters for the BlobClient.SetTier method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) SetTier(ctx context.Context, tier AccessTier, options *BlobClientSetTierOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetTierResponse, error) { + var err error req, err := client.setTierCreateRequest(ctx, tier, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { return BlobClientSetTierResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientSetTierResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { - return BlobClientSetTierResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetTierResponse{}, err } - return client.setTierHandleResponse(resp) + resp, err := client.setTierHandleResponse(httpResp) + return resp, err } // setTierCreateRequest creates the SetTier request. @@ -2639,28 +2714,28 @@ func (client *BlobClient) setTierCreateRequest(ctx context.Context, tier AccessT if options != nil && options.Snapshot != nil { reqQP.Set("snapshot", *options.Snapshot) } - if options != nil && options.VersionID != nil { - reqQP.Set("versionid", *options.VersionID) - } if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } + if options != nil && options.VersionID != nil { + reqQP.Set("versionid", *options.VersionID) + } req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} req.Raw().Header["x-ms-access-tier"] = []string{string(tier)} - if options != nil && options.RehydratePriority != nil { - req.Raw().Header["x-ms-rehydrate-priority"] = []string{string(*options.RehydratePriority)} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + if options != nil && options.RehydratePriority != nil { + req.Raw().Header["x-ms-rehydrate-priority"] = []string{string(*options.RehydratePriority)} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -2682,7 +2757,7 @@ func (client *BlobClient) setTierHandleResponse(resp *http.Response) (BlobClient // StartCopyFromURL - The Start Copy From URL operation copies a blob or an internet resource to a new blob. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies // a page blob snapshot. The value should be URL-encoded as it would appear in a request // URI. The source blob must either be public or must be authenticated via a shared access signature. @@ -2692,18 +2767,21 @@ func (client *BlobClient) setTierHandleResponse(resp *http.Response) (BlobClient // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. func (client *BlobClient) StartCopyFromURL(ctx context.Context, copySource string, options *BlobClientStartCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientStartCopyFromURLResponse, error) { + var err error req, err := client.startCopyFromURLCreateRequest(ctx, copySource, options, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions) if err != nil { return BlobClientStartCopyFromURLResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientStartCopyFromURLResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return BlobClientStartCopyFromURLResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return BlobClientStartCopyFromURLResponse{}, err } - return client.startCopyFromURLHandleResponse(resp) + resp, err := client.startCopyFromURLHandleResponse(httpResp) + return resp, err } // startCopyFromURLCreateRequest creates the StartCopyFromURL request. @@ -2717,6 +2795,41 @@ func (client *BlobClient) startCopyFromURLCreateRequest(ctx context.Context, cop reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-copy-source"] = []string{copySource} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + } + if options != nil && options.ImmutabilityPolicyExpiry != nil { + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if options != nil && options.LegalHold != nil { + req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} + } if options != nil && options.Metadata != nil { for k, v := range options.Metadata { if v != nil { @@ -2724,72 +2837,53 @@ func (client *BlobClient) startCopyFromURLCreateRequest(ctx context.Context, cop } } } - if options != nil && options.Tier != nil { - req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} - } if options != nil && options.RehydratePriority != nil { req.Raw().Header["x-ms-rehydrate-priority"] = []string{string(*options.RehydratePriority)} } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + if options != nil && options.SealBlob != nil { + req.Raw().Header["x-ms-seal-blob"] = []string{strconv.FormatBool(*options.SealBlob)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} + } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfTags != nil { req.Raw().Header["x-ms-source-if-tags"] = []string{*sourceModifiedAccessConditions.SourceIfTags} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} - } - req.Raw().Header["x-ms-copy-source"] = []string{copySource} - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if options != nil && options.BlobTagsString != nil { req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} } - if options != nil && options.SealBlob != nil { - req.Raw().Header["x-ms-seal-blob"] = []string{strconv.FormatBool(*options.SealBlob)} - } - if options != nil && options.ImmutabilityPolicyExpiry != nil { - req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} - } - if options != nil && options.ImmutabilityPolicyMode != nil { - req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} - } - if options != nil && options.LegalHold != nil { - req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} - } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } // startCopyFromURLHandleResponse handles the StartCopyFromURL response. func (client *BlobClient) startCopyFromURLHandleResponse(resp *http.Response) (BlobClientStartCopyFromURLResponse, error) { result := BlobClientStartCopyFromURLResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientStartCopyFromURLResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -2800,9 +2894,6 @@ func (client *BlobClient) startCopyFromURLHandleResponse(resp *http.Response) (B } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } @@ -2812,40 +2903,30 @@ func (client *BlobClient) startCopyFromURLHandleResponse(resp *http.Response) (B if val := resp.Header.Get("x-ms-version-id"); val != "" { result.VersionID = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientStartCopyFromURLResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-copy-id"); val != "" { - result.CopyID = &val - } - if val := resp.Header.Get("x-ms-copy-status"); val != "" { - result.CopyStatus = (*CopyStatusType)(&val) - } return result, nil } // Undelete - Undelete a blob that was previously soft deleted // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - options - BlobClientUndeleteOptions contains the optional parameters for the BlobClient.Undelete method. func (client *BlobClient) Undelete(ctx context.Context, options *BlobClientUndeleteOptions) (BlobClientUndeleteResponse, error) { + var err error req, err := client.undeleteCreateRequest(ctx, options) if err != nil { return BlobClientUndeleteResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientUndeleteResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientUndeleteResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientUndeleteResponse{}, err } - return client.undeleteHandleResponse(resp) + resp, err := client.undeleteHandleResponse(httpResp) + return resp, err } // undeleteCreateRequest creates the Undelete request. @@ -2860,11 +2941,11 @@ func (client *BlobClient) undeleteCreateRequest(ctx context.Context, options *Bl reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -2874,12 +2955,6 @@ func (client *BlobClient) undeleteHandleResponse(resp *http.Response) (BlobClien if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -2887,5 +2962,11 @@ func (client *BlobClient) undeleteHandleResponse(resp *http.Response) (BlobClien } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go index 6b2def53f8f..6768a1143f8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go @@ -1,11 +1,7 @@ -//go:build go1.18 -// +build go1.18 - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -37,7 +33,7 @@ type BlockBlobClient struct { // belong to. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - blocks - Blob Blocks. // - options - BlockBlobClientCommitBlockListOptions contains the optional parameters for the BlockBlobClient.CommitBlockList // method. @@ -47,18 +43,21 @@ type BlockBlobClient struct { // - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlockBlobClient) CommitBlockList(ctx context.Context, blocks BlockLookupList, options *BlockBlobClientCommitBlockListOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientCommitBlockListResponse, error) { + var err error req, err := client.commitBlockListCreateRequest(ctx, blocks, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) if err != nil { return BlockBlobClientCommitBlockListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlockBlobClientCommitBlockListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return BlockBlobClientCommitBlockListResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlockBlobClientCommitBlockListResponse{}, err } - return client.commitBlockListHandleResponse(resp) + resp, err := client.commitBlockListHandleResponse(httpResp) + return resp, err } // commitBlockListCreateRequest creates the CommitBlockList request. @@ -73,11 +72,30 @@ func (client *BlockBlobClient) commitBlockListCreateRequest(ctx context.Context, reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.TransactionalContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} + } if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { - req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} } if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} @@ -88,24 +106,17 @@ func (client *BlockBlobClient) commitBlockListCreateRequest(ctx context.Context, if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} } - if options != nil && options.TransactionalContentMD5 != nil { - req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } if options != nil && options.TransactionalContentCRC64 != nil { req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} } - if options != nil && options.Metadata != nil { - for k, v := range options.Metadata { - if v != nil { - req.Raw().Header["x-ms-meta-"+k] = []string{*v} - } - } - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { - req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} @@ -113,47 +124,35 @@ func (client *BlockBlobClient) commitBlockListCreateRequest(ctx context.Context, if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } - if options != nil && options.Tier != nil { - req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - if options != nil && options.BlobTagsString != nil { - req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} } if options != nil && options.ImmutabilityPolicyExpiry != nil { req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} } - if options != nil && options.ImmutabilityPolicyMode != nil { - req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } if options != nil && options.LegalHold != nil { req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} } - req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if err := runtime.MarshalAsXML(req, blocks); err != nil { return nil, err } @@ -163,15 +162,15 @@ func (client *BlockBlobClient) commitBlockListCreateRequest(ctx context.Context, // commitBlockListHandleResponse handles the CommitBlockList response. func (client *BlockBlobClient) commitBlockListHandleResponse(resp *http.Response) (BlockBlobClientCommitBlockListResponse, error) { result := BlockBlobClientCommitBlockListResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) if err != nil { return BlockBlobClientCommitBlockListResponse{}, err } - result.LastModified = &lastModified + result.ContentCRC64 = contentCRC64 } if val := resp.Header.Get("Content-MD5"); val != "" { contentMD5, err := base64.StdEncoding.DecodeString(val) @@ -180,44 +179,44 @@ func (client *BlockBlobClient) commitBlockListHandleResponse(resp *http.Response } result.ContentMD5 = contentMD5 } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - contentCRC64, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) if err != nil { return BlockBlobClientCommitBlockListResponse{}, err } - result.ContentCRC64 = contentCRC64 - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val + result.Date = &date } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) if err != nil { return BlockBlobClientCommitBlockListResponse{}, err } - result.Date = &date + result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) if err != nil { return BlockBlobClientCommitBlockListResponse{}, err } - result.IsServerEncrypted = &isServerEncrypted + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val } return result, nil } @@ -225,24 +224,27 @@ func (client *BlockBlobClient) commitBlockListHandleResponse(resp *http.Response // GetBlockList - The Get Block List operation retrieves the list of blocks that have been uploaded as part of a block blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - listType - Specifies whether to return the list of committed blocks, the list of uncommitted blocks, or both lists together. // - options - BlockBlobClientGetBlockListOptions contains the optional parameters for the BlockBlobClient.GetBlockList method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlockBlobClient) GetBlockList(ctx context.Context, listType BlockListType, options *BlockBlobClientGetBlockListOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientGetBlockListResponse, error) { + var err error req, err := client.getBlockListCreateRequest(ctx, listType, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { return BlockBlobClientGetBlockListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlockBlobClientGetBlockListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlockBlobClientGetBlockListResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlockBlobClientGetBlockListResponse{}, err } - return client.getBlockListHandleResponse(resp) + resp, err := client.getBlockListHandleResponse(httpResp) + return resp, err } // getBlockListCreateRequest creates the GetBlockList request. @@ -252,54 +254,61 @@ func (client *BlockBlobClient) getBlockListCreateRequest(ctx context.Context, li return nil, err } reqQP := req.Raw().URL.Query() + reqQP.Set("blocklisttype", string(listType)) reqQP.Set("comp", "blocklist") if options != nil && options.Snapshot != nil { reqQP.Set("snapshot", *options.Snapshot) } - reqQP.Set("blocklisttype", string(listType)) if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } // getBlockListHandleResponse handles the GetBlockList response. func (client *BlockBlobClient) getBlockListHandleResponse(resp *http.Response) (BlockBlobClientGetBlockListResponse, error) { result := BlockBlobClientGetBlockListResponse{} - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { + blobContentLength, err := strconv.ParseInt(val, 10, 64) if err != nil { return BlockBlobClientGetBlockListResponse{}, err } - result.LastModified = &lastModified + result.BlobContentLength = &blobContentLength } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } if val := resp.Header.Get("Content-Type"); val != "" { result.ContentType = &val } - if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { - blobContentLength, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) if err != nil { return BlockBlobClientGetBlockListResponse{}, err } - result.BlobContentLength = &blobContentLength + result.Date = &date } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientGetBlockListResponse{}, err + } + result.LastModified = &lastModified } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val @@ -307,13 +316,6 @@ func (client *BlockBlobClient) getBlockListHandleResponse(resp *http.Response) ( if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlockBlobClientGetBlockListResponse{}, err - } - result.Date = &date - } if err := runtime.UnmarshalAsXML(resp, &result.BlockList); err != nil { return BlockBlobClientGetBlockListResponse{}, err } @@ -327,7 +329,7 @@ func (client *BlockBlobClient) getBlockListHandleResponse(resp *http.Response) ( // Block from URL API in conjunction with Put Block List. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - contentLength - The length of the request. // - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies // a page blob snapshot. The value should be URL-encoded as it would appear in a request @@ -342,18 +344,21 @@ func (client *BlockBlobClient) getBlockListHandleResponse(resp *http.Response) ( // - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL // method. func (client *BlockBlobClient) PutBlobFromURL(ctx context.Context, contentLength int64, copySource string, options *BlockBlobClientPutBlobFromURLOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (BlockBlobClientPutBlobFromURLResponse, error) { + var err error req, err := client.putBlobFromURLCreateRequest(ctx, contentLength, copySource, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions, sourceModifiedAccessConditions) if err != nil { return BlockBlobClientPutBlobFromURLResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlockBlobClientPutBlobFromURLResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return BlockBlobClientPutBlobFromURLResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlockBlobClientPutBlobFromURLResponse{}, err } - return client.putBlobFromURLHandleResponse(resp) + resp, err := client.putBlobFromURLHandleResponse(httpResp) + return resp, err } // putBlobFromURLCreateRequest creates the PutBlobFromURL request. @@ -367,13 +372,31 @@ func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context, reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-blob-type"] = []string{"BlockBlob"} + req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} if options != nil && options.TransactionalContentMD5 != nil { req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} } - req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { - req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} } if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} @@ -384,21 +407,25 @@ func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context, if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { - req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} } - if options != nil && options.Metadata != nil { - for k, v := range options.Metadata { - if v != nil { - req.Raw().Header["x-ms-meta-"+k] = []string{*v} - } - } + req.Raw().Header["x-ms-blob-type"] = []string{"BlockBlob"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + req.Raw().Header["x-ms-copy-source"] = []string{copySource} + if options != nil && options.CopySourceAuthorization != nil { + req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { - req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + if options != nil && options.CopySourceBlobProperties != nil { + req.Raw().Header["x-ms-copy-source-blob-properties"] = []string{strconv.FormatBool(*options.CopySourceBlobProperties)} + } + if options != nil && options.CopySourceTags != nil { + req.Raw().Header["x-ms-copy-source-tag-option"] = []string{string(*options.CopySourceTags)} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} @@ -406,81 +433,52 @@ func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context, if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } - if options != nil && options.Tier != nil { - req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + if options != nil && options.SourceContentMD5 != nil { + req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { - req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} - } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfTags != nil { req.Raw().Header["x-ms-source-if-tags"] = []string{*sourceModifiedAccessConditions.SourceIfTags} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - if options != nil && options.SourceContentMD5 != nil { - req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if options != nil && options.BlobTagsString != nil { req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} } - req.Raw().Header["x-ms-copy-source"] = []string{copySource} - if options != nil && options.CopySourceBlobProperties != nil { - req.Raw().Header["x-ms-copy-source-blob-properties"] = []string{strconv.FormatBool(*options.CopySourceBlobProperties)} - } - if options != nil && options.CopySourceAuthorization != nil { - req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} - } - if options != nil && options.CopySourceTags != nil { - req.Raw().Header["x-ms-copy-source-tag-option"] = []string{string(*options.CopySourceTags)} - } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } // putBlobFromURLHandleResponse handles the PutBlobFromURL response. func (client *BlockBlobClient) putBlobFromURLHandleResponse(resp *http.Response) (BlockBlobClientPutBlobFromURLResponse, error) { result := BlockBlobClientPutBlobFromURLResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlockBlobClientPutBlobFromURLResponse{}, err - } - result.LastModified = &lastModified + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } if val := resp.Header.Get("Content-MD5"); val != "" { contentMD5, err := base64.StdEncoding.DecodeString(val) @@ -489,18 +487,6 @@ func (client *BlockBlobClient) putBlobFromURLHandleResponse(resp *http.Response) } result.ContentMD5 = contentMD5 } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -508,6 +494,15 @@ func (client *BlockBlobClient) putBlobFromURLHandleResponse(resp *http.Response) } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { @@ -515,11 +510,21 @@ func (client *BlockBlobClient) putBlobFromURLHandleResponse(resp *http.Response) } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientPutBlobFromURLResponse{}, err + } + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val } return result, nil } @@ -527,7 +532,7 @@ func (client *BlockBlobClient) putBlobFromURLHandleResponse(resp *http.Response) // StageBlock - The Stage Block operation creates a new block to be committed as part of a blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal // to 64 bytes in size. For a given blob, the length of the value specified for the blockid // parameter must be the same size for each block. @@ -538,18 +543,21 @@ func (client *BlockBlobClient) putBlobFromURLHandleResponse(resp *http.Response) // - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. // - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. func (client *BlockBlobClient) StageBlock(ctx context.Context, blockID string, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientStageBlockOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo) (BlockBlobClientStageBlockResponse, error) { + var err error req, err := client.stageBlockCreateRequest(ctx, blockID, contentLength, body, options, leaseAccessConditions, cpkInfo, cpkScopeInfo) if err != nil { return BlockBlobClientStageBlockResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlockBlobClientStageBlockResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return BlockBlobClientStageBlockResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlockBlobClientStageBlockResponse{}, err } - return client.stageBlockHandleResponse(resp) + resp, err := client.stageBlockHandleResponse(httpResp) + return resp, err } // stageBlockCreateRequest creates the StageBlock request. @@ -559,21 +567,25 @@ func (client *BlockBlobClient) stageBlockCreateRequest(ctx context.Context, bloc return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "block") reqQP.Set("blockid", blockID) + reqQP.Set("comp", "block") if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} if options != nil && options.TransactionalContentMD5 != nil { req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } if options != nil && options.TransactionalContentCRC64 != nil { req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} @@ -581,17 +593,13 @@ func (client *BlockBlobClient) stageBlockCreateRequest(ctx context.Context, bloc if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if err := req.SetBody(body, "application/octet-stream"); err != nil { return nil, err } @@ -601,6 +609,16 @@ func (client *BlockBlobClient) stageBlockCreateRequest(ctx context.Context, bloc // stageBlockHandleResponse handles the StageBlock response. func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (BlockBlobClientStageBlockResponse, error) { result := BlockBlobClientStageBlockResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientStageBlockResponse{}, err + } + result.ContentCRC64 = contentCRC64 + } if val := resp.Header.Get("Content-MD5"); val != "" { contentMD5, err := base64.StdEncoding.DecodeString(val) if err != nil { @@ -608,15 +626,6 @@ func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (Bl } result.ContentMD5 = contentMD5 } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -624,12 +633,11 @@ func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (Bl } result.Date = &date } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - contentCRC64, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return BlockBlobClientStageBlockResponse{}, err - } - result.ContentCRC64 = contentCRC64 + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) @@ -638,11 +646,11 @@ func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (Bl } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val } return result, nil } @@ -651,7 +659,7 @@ func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (Bl // are read from a URL. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal // to 64 bytes in size. For a given blob, the length of the value specified for the blockid // parameter must be the same size for each block. @@ -665,18 +673,21 @@ func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (Bl // - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL // method. func (client *BlockBlobClient) StageBlockFromURL(ctx context.Context, blockID string, contentLength int64, sourceURL string, options *BlockBlobClientStageBlockFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (BlockBlobClientStageBlockFromURLResponse, error) { + var err error req, err := client.stageBlockFromURLCreateRequest(ctx, blockID, contentLength, sourceURL, options, cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions) if err != nil { return BlockBlobClientStageBlockFromURLResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlockBlobClientStageBlockFromURLResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return BlockBlobClientStageBlockFromURLResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlockBlobClientStageBlockFromURLResponse{}, err } - return client.stageBlockFromURLHandleResponse(resp) + resp, err := client.stageBlockFromURLHandleResponse(httpResp) + return resp, err } // stageBlockFromURLCreateRequest creates the StageBlockFromURL request. @@ -686,22 +697,23 @@ func (client *BlockBlobClient) stageBlockFromURLCreateRequest(ctx context.Contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("comp", "block") reqQP.Set("blockid", blockID) + reqQP.Set("comp", "block") if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} - req.Raw().Header["x-ms-copy-source"] = []string{sourceURL} - if options != nil && options.SourceRange != nil { - req.Raw().Header["x-ms-source-range"] = []string{*options.SourceRange} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - if options != nil && options.SourceContentMD5 != nil { - req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} + req.Raw().Header["x-ms-copy-source"] = []string{sourceURL} + if options != nil && options.CopySourceAuthorization != nil { + req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} } - if options != nil && options.SourceContentcrc64 != nil { - req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentcrc64)} + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} @@ -709,47 +721,42 @@ func (client *BlockBlobClient) stageBlockFromURLCreateRequest(ctx context.Contex if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} + if options != nil && options.SourceContentcrc64 != nil { + req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentcrc64)} } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + if options != nil && options.SourceContentMD5 != nil { + req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} + } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - if options != nil && options.CopySourceAuthorization != nil { - req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} + if options != nil && options.SourceRange != nil { + req.Raw().Header["x-ms-source-range"] = []string{*options.SourceRange} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } // stageBlockFromURLHandleResponse handles the StageBlockFromURL response. func (client *BlockBlobClient) stageBlockFromURLHandleResponse(resp *http.Response) (BlockBlobClientStageBlockFromURLResponse, error) { result := BlockBlobClientStageBlockFromURLResponse{} - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return BlockBlobClientStageBlockFromURLResponse{}, err - } - result.ContentMD5 = contentMD5 + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } if val := resp.Header.Get("x-ms-content-crc64"); val != "" { contentCRC64, err := base64.StdEncoding.DecodeString(val) @@ -758,14 +765,12 @@ func (client *BlockBlobClient) stageBlockFromURLHandleResponse(resp *http.Respon } result.ContentCRC64 = contentCRC64 } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientStageBlockFromURLResponse{}, err + } + result.ContentMD5 = contentMD5 } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) @@ -774,6 +779,12 @@ func (client *BlockBlobClient) stageBlockFromURLHandleResponse(resp *http.Respon } result.Date = &date } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { @@ -781,11 +792,11 @@ func (client *BlockBlobClient) stageBlockFromURLHandleResponse(resp *http.Respon } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val } return result, nil } @@ -796,7 +807,7 @@ func (client *BlockBlobClient) stageBlockFromURLHandleResponse(resp *http.Respon // the content of a block blob, use the Put Block List operation. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - contentLength - The length of the request. // - body - Initial data // - options - BlockBlobClientUploadOptions contains the optional parameters for the BlockBlobClient.Upload method. @@ -806,18 +817,21 @@ func (client *BlockBlobClient) stageBlockFromURLHandleResponse(resp *http.Respon // - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlockBlobClient) Upload(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientUploadOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientUploadResponse, error) { + var err error req, err := client.uploadCreateRequest(ctx, contentLength, body, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) if err != nil { return BlockBlobClientUploadResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlockBlobClientUploadResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return BlockBlobClientUploadResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlockBlobClientUploadResponse{}, err } - return client.uploadHandleResponse(resp) + resp, err := client.uploadHandleResponse(httpResp) + return resp, err } // uploadCreateRequest creates the Upload request. @@ -831,13 +845,31 @@ func (client *BlockBlobClient) uploadCreateRequest(ctx context.Context, contentL reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-blob-type"] = []string{"BlockBlob"} + req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} if options != nil && options.TransactionalContentMD5 != nil { req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} } - req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { - req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.Tier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} } if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} @@ -848,21 +880,18 @@ func (client *BlockBlobClient) uploadCreateRequest(ctx context.Context, contentL if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { - req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} } - if options != nil && options.Metadata != nil { - for k, v := range options.Metadata { - if v != nil { - req.Raw().Header["x-ms-meta-"+k] = []string{*v} - } - } + req.Raw().Header["x-ms-blob-type"] = []string{"BlockBlob"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + if options != nil && options.TransactionalContentCRC64 != nil { + req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { - req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} @@ -870,50 +899,35 @@ func (client *BlockBlobClient) uploadCreateRequest(ctx context.Context, contentL if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } - if options != nil && options.Tier != nil { - req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - if options != nil && options.BlobTagsString != nil { - req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} } if options != nil && options.ImmutabilityPolicyExpiry != nil { req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} } - if options != nil && options.ImmutabilityPolicyMode != nil { - req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } if options != nil && options.LegalHold != nil { req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} } - if options != nil && options.TransactionalContentCRC64 != nil { - req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } } - req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if err := req.SetBody(body, "application/octet-stream"); err != nil { return nil, err } @@ -923,15 +937,15 @@ func (client *BlockBlobClient) uploadCreateRequest(ctx context.Context, contentL // uploadHandleResponse handles the Upload response. func (client *BlockBlobClient) uploadHandleResponse(resp *http.Response) (BlockBlobClientUploadResponse, error) { result := BlockBlobClientUploadResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) if err != nil { return BlockBlobClientUploadResponse{}, err } - result.LastModified = &lastModified + result.ContentCRC64 = contentCRC64 } if val := resp.Header.Get("Content-MD5"); val != "" { contentMD5, err := base64.StdEncoding.DecodeString(val) @@ -940,18 +954,6 @@ func (client *BlockBlobClient) uploadHandleResponse(resp *http.Response) (BlockB } result.ContentMD5 = contentMD5 } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -959,6 +961,15 @@ func (client *BlockBlobClient) uploadHandleResponse(resp *http.Response) (BlockB } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { @@ -966,11 +977,21 @@ func (client *BlockBlobClient) uploadHandleResponse(resp *http.Response) (BlockB } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientUploadResponse{}, err + } + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val } return result, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go index b9d306cacf4..3dd77291ff9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go @@ -1,11 +1,7 @@ -//go:build go1.18 -// +build go1.18 - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -347,6 +343,7 @@ const ( ListBlobsIncludeItemImmutabilitypolicy ListBlobsIncludeItem = "immutabilitypolicy" ListBlobsIncludeItemLegalhold ListBlobsIncludeItem = "legalhold" ListBlobsIncludeItemMetadata ListBlobsIncludeItem = "metadata" + ListBlobsIncludeItemPermissions ListBlobsIncludeItem = "permissions" ListBlobsIncludeItemSnapshots ListBlobsIncludeItem = "snapshots" ListBlobsIncludeItemTags ListBlobsIncludeItem = "tags" ListBlobsIncludeItemUncommittedblobs ListBlobsIncludeItem = "uncommittedblobs" @@ -362,6 +359,7 @@ func PossibleListBlobsIncludeItemValues() []ListBlobsIncludeItem { ListBlobsIncludeItemImmutabilitypolicy, ListBlobsIncludeItemLegalhold, ListBlobsIncludeItemMetadata, + ListBlobsIncludeItemPermissions, ListBlobsIncludeItemSnapshots, ListBlobsIncludeItemTags, ListBlobsIncludeItemUncommittedblobs, diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go index 8d325a3a58c..683b40a2550 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go @@ -1,11 +1,7 @@ -//go:build go1.18 -// +build go1.18 - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -35,25 +31,28 @@ type ContainerClient struct { // to 60 seconds, or can be infinite // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - duration - Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite // lease can be between 15 and 60 seconds. A lease duration cannot be changed using // renew or change. // - options - ContainerClientAcquireLeaseOptions contains the optional parameters for the ContainerClient.AcquireLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *ContainerClient) AcquireLease(ctx context.Context, duration int32, options *ContainerClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientAcquireLeaseResponse, error) { + var err error req, err := client.acquireLeaseCreateRequest(ctx, duration, options, modifiedAccessConditions) if err != nil { return ContainerClientAcquireLeaseResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientAcquireLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return ContainerClientAcquireLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return ContainerClientAcquireLeaseResponse{}, err } - return client.acquireLeaseHandleResponse(resp) + resp, err := client.acquireLeaseHandleResponse(httpResp) + return resp, err } // acquireLeaseCreateRequest creates the AcquireLease request. @@ -69,28 +68,38 @@ func (client *ContainerClient) acquireLeaseCreateRequest(ctx context.Context, du reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-lease-action"] = []string{"acquire"} - req.Raw().Header["x-ms-lease-duration"] = []string{strconv.FormatInt(int64(duration), 10)} - if options != nil && options.ProposedLeaseID != nil { - req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID} - } + req.Raw().Header["Accept"] = []string{"application/xml"} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-lease-action"] = []string{"acquire"} + req.Raw().Header["x-ms-lease-duration"] = []string{strconv.FormatInt(int64(duration), 10)} + if options != nil && options.ProposedLeaseID != nil { + req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } // acquireLeaseHandleResponse handles the AcquireLease response. func (client *ContainerClient) acquireLeaseHandleResponse(resp *http.Response) (ContainerClientAcquireLeaseResponse, error) { result := ContainerClientAcquireLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientAcquireLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -104,22 +113,12 @@ func (client *ContainerClient) acquireLeaseHandleResponse(resp *http.Response) ( if val := resp.Header.Get("x-ms-lease-id"); val != "" { result.LeaseID = &val } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientAcquireLeaseResponse{}, err - } - result.Date = &date - } return result, nil } @@ -127,22 +126,25 @@ func (client *ContainerClient) acquireLeaseHandleResponse(resp *http.Response) ( // to 60 seconds, or can be infinite // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - options - ContainerClientBreakLeaseOptions contains the optional parameters for the ContainerClient.BreakLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *ContainerClient) BreakLease(ctx context.Context, options *ContainerClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientBreakLeaseResponse, error) { + var err error req, err := client.breakLeaseCreateRequest(ctx, options, modifiedAccessConditions) if err != nil { return ContainerClientBreakLeaseResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientBreakLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return ContainerClientBreakLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return ContainerClientBreakLeaseResponse{}, err } - return client.breakLeaseHandleResponse(resp) + resp, err := client.breakLeaseHandleResponse(httpResp) + return resp, err } // breakLeaseCreateRequest creates the BreakLease request. @@ -158,27 +160,37 @@ func (client *ContainerClient) breakLeaseCreateRequest(ctx context.Context, opti reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-lease-action"] = []string{"break"} - if options != nil && options.BreakPeriod != nil { - req.Raw().Header["x-ms-lease-break-period"] = []string{strconv.FormatInt(int64(*options.BreakPeriod), 10)} - } + req.Raw().Header["Accept"] = []string{"application/xml"} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-lease-action"] = []string{"break"} + if options != nil && options.BreakPeriod != nil { + req.Raw().Header["x-ms-lease-break-period"] = []string{strconv.FormatInt(int64(*options.BreakPeriod), 10)} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } // breakLeaseHandleResponse handles the BreakLease response. func (client *ContainerClient) breakLeaseHandleResponse(resp *http.Response) (ContainerClientBreakLeaseResponse, error) { result := ContainerClientBreakLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientBreakLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -197,22 +209,12 @@ func (client *ContainerClient) breakLeaseHandleResponse(resp *http.Response) (Co } result.LeaseTime = &leaseTime } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientBreakLeaseResponse{}, err - } - result.Date = &date - } return result, nil } @@ -220,7 +222,7 @@ func (client *ContainerClient) breakLeaseHandleResponse(resp *http.Response) (Co // to 60 seconds, or can be infinite // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - leaseID - Specifies the current lease ID on the resource. // - proposedLeaseID - Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed // lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID @@ -228,18 +230,21 @@ func (client *ContainerClient) breakLeaseHandleResponse(resp *http.Response) (Co // - options - ContainerClientChangeLeaseOptions contains the optional parameters for the ContainerClient.ChangeLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *ContainerClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, options *ContainerClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientChangeLeaseResponse, error) { + var err error req, err := client.changeLeaseCreateRequest(ctx, leaseID, proposedLeaseID, options, modifiedAccessConditions) if err != nil { return ContainerClientChangeLeaseResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientChangeLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientChangeLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientChangeLeaseResponse{}, err } - return client.changeLeaseHandleResponse(resp) + resp, err := client.changeLeaseHandleResponse(httpResp) + return resp, err } // changeLeaseCreateRequest creates the ChangeLease request. @@ -255,26 +260,36 @@ func (client *ContainerClient) changeLeaseCreateRequest(ctx context.Context, lea reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-lease-action"] = []string{"change"} - req.Raw().Header["x-ms-lease-id"] = []string{leaseID} - req.Raw().Header["x-ms-proposed-lease-id"] = []string{proposedLeaseID} + req.Raw().Header["Accept"] = []string{"application/xml"} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-lease-action"] = []string{"change"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + req.Raw().Header["x-ms-proposed-lease-id"] = []string{proposedLeaseID} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } // changeLeaseHandleResponse handles the ChangeLease response. func (client *ContainerClient) changeLeaseHandleResponse(resp *http.Response) (ContainerClientChangeLeaseResponse, error) { result := ContainerClientChangeLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientChangeLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -288,22 +303,12 @@ func (client *ContainerClient) changeLeaseHandleResponse(resp *http.Response) (C if val := resp.Header.Get("x-ms-lease-id"); val != "" { result.LeaseID = &val } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientChangeLeaseResponse{}, err - } - result.Date = &date - } return result, nil } @@ -311,22 +316,25 @@ func (client *ContainerClient) changeLeaseHandleResponse(resp *http.Response) (C // fails // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - options - ContainerClientCreateOptions contains the optional parameters for the ContainerClient.Create method. // - ContainerCPKScopeInfo - ContainerCPKScopeInfo contains a group of parameters for the ContainerClient.Create method. func (client *ContainerClient) Create(ctx context.Context, options *ContainerClientCreateOptions, containerCPKScopeInfo *ContainerCPKScopeInfo) (ContainerClientCreateResponse, error) { + var err error req, err := client.createCreateRequest(ctx, options, containerCPKScopeInfo) if err != nil { return ContainerClientCreateResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientCreateResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return ContainerClientCreateResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return ContainerClientCreateResponse{}, err } - return client.createHandleResponse(resp) + resp, err := client.createHandleResponse(httpResp) + return resp, err } // createCreateRequest creates the Create request. @@ -341,17 +349,10 @@ func (client *ContainerClient) createCreateRequest(ctx context.Context, options reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - if options != nil && options.Metadata != nil { - for k, v := range options.Metadata { - if v != nil { - req.Raw().Header["x-ms-meta-"+k] = []string{*v} - } - } - } + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.Access != nil { req.Raw().Header["x-ms-blob-public-access"] = []string{string(*options.Access)} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -361,13 +362,30 @@ func (client *ContainerClient) createCreateRequest(ctx context.Context, options if containerCPKScopeInfo != nil && containerCPKScopeInfo.PreventEncryptionScopeOverride != nil { req.Raw().Header["x-ms-deny-encryption-scope-override"] = []string{strconv.FormatBool(*containerCPKScopeInfo.PreventEncryptionScopeOverride)} } - req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } // createHandleResponse handles the Create response. func (client *ContainerClient) createHandleResponse(resp *http.Response) (ContainerClientCreateResponse, error) { result := ContainerClientCreateResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientCreateResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -378,22 +396,12 @@ func (client *ContainerClient) createHandleResponse(resp *http.Response) (Contai } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientCreateResponse{}, err - } - result.Date = &date - } return result, nil } @@ -401,23 +409,26 @@ func (client *ContainerClient) createHandleResponse(resp *http.Response) (Contai // deleted during garbage collection // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - options - ContainerClientDeleteOptions contains the optional parameters for the ContainerClient.Delete method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *ContainerClient) Delete(ctx context.Context, options *ContainerClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientDeleteResponse, error) { + var err error req, err := client.deleteCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { return ContainerClientDeleteResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientDeleteResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return ContainerClientDeleteResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return ContainerClientDeleteResponse{}, err } - return client.deleteHandleResponse(resp) + resp, err := client.deleteHandleResponse(httpResp) + return resp, err } // deleteCreateRequest creates the Delete request. @@ -432,20 +443,20 @@ func (client *ContainerClient) deleteCreateRequest(ctx context.Context, options reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } + req.Raw().Header["Accept"] = []string{"application/xml"} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -455,12 +466,6 @@ func (client *ContainerClient) deleteHandleResponse(resp *http.Response) (Contai if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -468,6 +473,12 @@ func (client *ContainerClient) deleteHandleResponse(resp *http.Response) (Contai } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } @@ -475,22 +486,25 @@ func (client *ContainerClient) deleteHandleResponse(resp *http.Response) (Contai // Filter blobs searches within the given container. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - where - Filters the results to return only to return only blobs whose tags match the specified expression. // - options - ContainerClientFilterBlobsOptions contains the optional parameters for the ContainerClient.FilterBlobs method. func (client *ContainerClient) FilterBlobs(ctx context.Context, where string, options *ContainerClientFilterBlobsOptions) (ContainerClientFilterBlobsResponse, error) { + var err error req, err := client.filterBlobsCreateRequest(ctx, where, options) if err != nil { return ContainerClientFilterBlobsResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientFilterBlobsResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientFilterBlobsResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientFilterBlobsResponse{}, err } - return client.filterBlobsHandleResponse(resp) + resp, err := client.filterBlobsHandleResponse(httpResp) + return resp, err } // filterBlobsCreateRequest creates the FilterBlobs request. @@ -500,27 +514,27 @@ func (client *ContainerClient) filterBlobsCreateRequest(ctx context.Context, whe return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "container") reqQP.Set("comp", "blobs") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) } - reqQP.Set("where", where) if options != nil && options.Marker != nil { reqQP.Set("marker", *options.Marker) } if options != nil && options.Maxresults != nil { reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) } - if options != nil && options.Include != nil { - reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + reqQP.Set("restype", "container") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } + reqQP.Set("where", where) req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -530,12 +544,6 @@ func (client *ContainerClient) filterBlobsHandleResponse(resp *http.Response) (C if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -543,6 +551,12 @@ func (client *ContainerClient) filterBlobsHandleResponse(resp *http.Response) (C } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } if err := runtime.UnmarshalAsXML(resp, &result.FilterBlobSegment); err != nil { return ContainerClientFilterBlobsResponse{}, err } @@ -553,23 +567,26 @@ func (client *ContainerClient) filterBlobsHandleResponse(resp *http.Response) (C // be accessed publicly. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - options - ContainerClientGetAccessPolicyOptions contains the optional parameters for the ContainerClient.GetAccessPolicy // method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. func (client *ContainerClient) GetAccessPolicy(ctx context.Context, options *ContainerClientGetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions) (ContainerClientGetAccessPolicyResponse, error) { + var err error req, err := client.getAccessPolicyCreateRequest(ctx, options, leaseAccessConditions) if err != nil { return ContainerClientGetAccessPolicyResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientGetAccessPolicyResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientGetAccessPolicyResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientGetAccessPolicyResponse{}, err } - return client.getAccessPolicyHandleResponse(resp) + resp, err := client.getAccessPolicyHandleResponse(httpResp) + return resp, err } // getAccessPolicyCreateRequest creates the GetAccessPolicy request. @@ -579,20 +596,20 @@ func (client *ContainerClient) getAccessPolicyCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "container") reqQP.Set("comp", "acl") + reqQP.Set("restype", "container") if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } @@ -602,6 +619,16 @@ func (client *ContainerClient) getAccessPolicyHandleResponse(resp *http.Response if val := resp.Header.Get("x-ms-blob-public-access"); val != "" { result.BlobPublicAccess = (*PublicAccessType)(&val) } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientGetAccessPolicyResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -612,22 +639,12 @@ func (client *ContainerClient) getAccessPolicyHandleResponse(resp *http.Response } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientGetAccessPolicyResponse{}, err - } - result.Date = &date - } if err := runtime.UnmarshalAsXML(resp, &result); err != nil { return ContainerClientGetAccessPolicyResponse{}, err } @@ -637,22 +654,25 @@ func (client *ContainerClient) getAccessPolicyHandleResponse(resp *http.Response // GetAccountInfo - Returns the sku name and account kind // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - options - ContainerClientGetAccountInfoOptions contains the optional parameters for the ContainerClient.GetAccountInfo // method. func (client *ContainerClient) GetAccountInfo(ctx context.Context, options *ContainerClientGetAccountInfoOptions) (ContainerClientGetAccountInfoResponse, error) { + var err error req, err := client.getAccountInfoCreateRequest(ctx, options) if err != nil { return ContainerClientGetAccountInfoResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientGetAccountInfoResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientGetAccountInfoResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientGetAccountInfoResponse{}, err } - return client.getAccountInfoHandleResponse(resp) + resp, err := client.getAccountInfoHandleResponse(httpResp) + return resp, err } // getAccountInfoCreateRequest creates the GetAccountInfo request. @@ -662,26 +682,29 @@ func (client *ContainerClient) getAccountInfoCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "account") reqQP.Set("comp", "properties") + reqQP.Set("restype", "account") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } // getAccountInfoHandleResponse handles the GetAccountInfo response. func (client *ContainerClient) getAccountInfoHandleResponse(resp *http.Response) (ContainerClientGetAccountInfoResponse, error) { result := ContainerClientGetAccountInfoResponse{} + if val := resp.Header.Get("x-ms-account-kind"); val != "" { + result.AccountKind = (*AccountKind)(&val) + } if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -689,11 +712,21 @@ func (client *ContainerClient) getAccountInfoHandleResponse(resp *http.Response) } result.Date = &date } + if val := resp.Header.Get("x-ms-is-hns-enabled"); val != "" { + isHierarchicalNamespaceEnabled, err := strconv.ParseBool(val) + if err != nil { + return ContainerClientGetAccountInfoResponse{}, err + } + result.IsHierarchicalNamespaceEnabled = &isHierarchicalNamespaceEnabled + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } if val := resp.Header.Get("x-ms-sku-name"); val != "" { result.SKUName = (*SKUName)(&val) } - if val := resp.Header.Get("x-ms-account-kind"); val != "" { - result.AccountKind = (*AccountKind)(&val) + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val } return result, nil } @@ -702,22 +735,25 @@ func (client *ContainerClient) getAccountInfoHandleResponse(resp *http.Response) // does not include the container's list of blobs // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - options - ContainerClientGetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. func (client *ContainerClient) GetProperties(ctx context.Context, options *ContainerClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (ContainerClientGetPropertiesResponse, error) { + var err error req, err := client.getPropertiesCreateRequest(ctx, options, leaseAccessConditions) if err != nil { return ContainerClientGetPropertiesResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientGetPropertiesResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientGetPropertiesResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientGetPropertiesResponse{}, err } - return client.getPropertiesHandleResponse(resp) + resp, err := client.getPropertiesHandleResponse(httpResp) + return resp, err } // getPropertiesCreateRequest creates the GetProperties request. @@ -732,56 +768,26 @@ func (client *ContainerClient) getPropertiesCreateRequest(ctx context.Context, o reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } // getPropertiesHandleResponse handles the GetProperties response. func (client *ContainerClient) getPropertiesHandleResponse(resp *http.Response) (ContainerClientGetPropertiesResponse, error) { result := ContainerClientGetPropertiesResponse{} - for hh := range resp.Header { - if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { - if result.Metadata == nil { - result.Metadata = map[string]*string{} - } - result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) - } - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientGetPropertiesResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-lease-duration"); val != "" { - result.LeaseDuration = (*LeaseDurationType)(&val) - } - if val := resp.Header.Get("x-ms-lease-state"); val != "" { - result.LeaseState = (*LeaseStateType)(&val) - } - if val := resp.Header.Get("x-ms-lease-status"); val != "" { - result.LeaseStatus = (*LeaseStatusType)(&val) + if val := resp.Header.Get("x-ms-blob-public-access"); val != "" { + result.BlobPublicAccess = (*PublicAccessType)(&val) } if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -789,8 +795,18 @@ func (client *ContainerClient) getPropertiesHandleResponse(resp *http.Response) } result.Date = &date } - if val := resp.Header.Get("x-ms-blob-public-access"); val != "" { - result.BlobPublicAccess = (*PublicAccessType)(&val) + if val := resp.Header.Get("x-ms-default-encryption-scope"); val != "" { + result.DefaultEncryptionScope = &val + } + if val := resp.Header.Get("x-ms-deny-encryption-scope-override"); val != "" { + denyEncryptionScopeOverride, err := strconv.ParseBool(val) + if err != nil { + return ContainerClientGetPropertiesResponse{}, err + } + result.DenyEncryptionScopeOverride = &denyEncryptionScopeOverride + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("x-ms-has-immutability-policy"); val != "" { hasImmutabilityPolicy, err := strconv.ParseBool(val) @@ -806,29 +822,49 @@ func (client *ContainerClient) getPropertiesHandleResponse(resp *http.Response) } result.HasLegalHold = &hasLegalHold } - if val := resp.Header.Get("x-ms-default-encryption-scope"); val != "" { - result.DefaultEncryptionScope = &val - } - if val := resp.Header.Get("x-ms-deny-encryption-scope-override"); val != "" { - denyEncryptionScopeOverride, err := strconv.ParseBool(val) + if val := resp.Header.Get("x-ms-immutable-storage-with-versioning-enabled"); val != "" { + isImmutableStorageWithVersioningEnabled, err := strconv.ParseBool(val) if err != nil { return ContainerClientGetPropertiesResponse{}, err } - result.DenyEncryptionScopeOverride = &denyEncryptionScopeOverride + result.IsImmutableStorageWithVersioningEnabled = &isImmutableStorageWithVersioningEnabled } - if val := resp.Header.Get("x-ms-immutable-storage-with-versioning-enabled"); val != "" { - isImmutableStorageWithVersioningEnabled, err := strconv.ParseBool(val) + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) if err != nil { return ContainerClientGetPropertiesResponse{}, err } - result.IsImmutableStorageWithVersioningEnabled = &isImmutableStorageWithVersioningEnabled + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) + } + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val } return result, nil } // NewListBlobFlatSegmentPager - [Update] The List Blobs operation returns a list of the blobs under the specified container // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - options - ContainerClientListBlobFlatSegmentOptions contains the optional parameters for the ContainerClient.NewListBlobFlatSegmentPager // method. // @@ -839,10 +875,9 @@ func (client *ContainerClient) ListBlobFlatSegmentCreateRequest(ctx context.Cont return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "container") reqQP.Set("comp", "list") - if options != nil && options.Prefix != nil { - reqQP.Set("prefix", *options.Prefix) + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) } if options != nil && options.Marker != nil { reqQP.Set("marker", *options.Marker) @@ -850,35 +885,30 @@ func (client *ContainerClient) ListBlobFlatSegmentCreateRequest(ctx context.Cont if options != nil && options.Maxresults != nil { reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) } - if options != nil && options.Include != nil { - reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + if options != nil && options.Prefix != nil { + reqQP.Set("prefix", *options.Prefix) } + reqQP.Set("restype", "container") if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } // listBlobFlatSegmentHandleResponse handles the ListBlobFlatSegment response. func (client *ContainerClient) ListBlobFlatSegmentHandleResponse(resp *http.Response) (ContainerClientListBlobFlatSegmentResponse, error) { result := ContainerClientListBlobFlatSegmentResponse{} - if val := resp.Header.Get("Content-Type"); val != "" { - result.ContentType = &val - } if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) @@ -887,6 +917,12 @@ func (client *ContainerClient) ListBlobFlatSegmentHandleResponse(resp *http.Resp } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } if err := runtime.UnmarshalAsXML(resp, &result.ListBlobsFlatSegmentResponse); err != nil { return ContainerClientListBlobFlatSegmentResponse{}, err } @@ -895,7 +931,7 @@ func (client *ContainerClient) ListBlobFlatSegmentHandleResponse(resp *http.Resp // NewListBlobHierarchySegmentPager - [Update] The List Blobs operation returns a list of the blobs under the specified container // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - delimiter - When the request includes this parameter, the operation returns a BlobPrefix element in the response body that // acts as a placeholder for all blobs whose names begin with the same substring up to the // appearance of the delimiter character. The delimiter may be a single character or a string. @@ -907,23 +943,16 @@ func (client *ContainerClient) NewListBlobHierarchySegmentPager(delimiter string return page.NextMarker != nil && len(*page.NextMarker) > 0 }, Fetcher: func(ctx context.Context, page *ContainerClientListBlobHierarchySegmentResponse) (ContainerClientListBlobHierarchySegmentResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.ListBlobHierarchySegmentCreateRequest(ctx, delimiter, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextMarker) + nextLink := "" + if page != nil { + nextLink = *page.NextMarker } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.ListBlobHierarchySegmentCreateRequest(ctx, delimiter, options) + }, nil) if err != nil { return ContainerClientListBlobHierarchySegmentResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ContainerClientListBlobHierarchySegmentResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientListBlobHierarchySegmentResponse{}, runtime.NewResponseError(resp) - } return client.ListBlobHierarchySegmentHandleResponse(resp) }, }) @@ -936,47 +965,41 @@ func (client *ContainerClient) ListBlobHierarchySegmentCreateRequest(ctx context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "container") reqQP.Set("comp", "list") - if options != nil && options.Prefix != nil { - reqQP.Set("prefix", *options.Prefix) - } reqQP.Set("delimiter", delimiter) + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + } if options != nil && options.Marker != nil { reqQP.Set("marker", *options.Marker) } if options != nil && options.Maxresults != nil { reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) } - if options != nil && options.Include != nil { - reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + if options != nil && options.Prefix != nil { + reqQP.Set("prefix", *options.Prefix) } + reqQP.Set("restype", "container") if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } // ListBlobHierarchySegmentHandleResponse handles the ListBlobHierarchySegment response. func (client *ContainerClient) ListBlobHierarchySegmentHandleResponse(resp *http.Response) (ContainerClientListBlobHierarchySegmentResponse, error) { result := ContainerClientListBlobHierarchySegmentResponse{} - if val := resp.Header.Get("Content-Type"); val != "" { - result.ContentType = &val - } if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) @@ -985,6 +1008,12 @@ func (client *ContainerClient) ListBlobHierarchySegmentHandleResponse(resp *http } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } if err := runtime.UnmarshalAsXML(resp, &result.ListBlobsHierarchySegmentResponse); err != nil { return ContainerClientListBlobHierarchySegmentResponse{}, err } @@ -995,23 +1024,26 @@ func (client *ContainerClient) ListBlobHierarchySegmentHandleResponse(resp *http // to 60 seconds, or can be infinite // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - leaseID - Specifies the current lease ID on the resource. // - options - ContainerClientReleaseLeaseOptions contains the optional parameters for the ContainerClient.ReleaseLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *ContainerClient) ReleaseLease(ctx context.Context, leaseID string, options *ContainerClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientReleaseLeaseResponse, error) { + var err error req, err := client.releaseLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) if err != nil { return ContainerClientReleaseLeaseResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientReleaseLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientReleaseLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientReleaseLeaseResponse{}, err } - return client.releaseLeaseHandleResponse(resp) + resp, err := client.releaseLeaseHandleResponse(httpResp) + return resp, err } // releaseLeaseCreateRequest creates the ReleaseLease request. @@ -1027,25 +1059,35 @@ func (client *ContainerClient) releaseLeaseCreateRequest(ctx context.Context, le reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-lease-action"] = []string{"release"} - req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + req.Raw().Header["Accept"] = []string{"application/xml"} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-lease-action"] = []string{"release"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } // releaseLeaseHandleResponse handles the ReleaseLease response. func (client *ContainerClient) releaseLeaseHandleResponse(resp *http.Response) (ContainerClientReleaseLeaseResponse, error) { result := ContainerClientReleaseLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientReleaseLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -1056,44 +1098,37 @@ func (client *ContainerClient) releaseLeaseHandleResponse(resp *http.Response) ( } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientReleaseLeaseResponse{}, err - } - result.Date = &date - } return result, nil } // Rename - Renames an existing container. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - sourceContainerName - Required. Specifies the name of the container to rename. // - options - ContainerClientRenameOptions contains the optional parameters for the ContainerClient.Rename method. func (client *ContainerClient) Rename(ctx context.Context, sourceContainerName string, options *ContainerClientRenameOptions) (ContainerClientRenameResponse, error) { + var err error req, err := client.renameCreateRequest(ctx, sourceContainerName, options) if err != nil { return ContainerClientRenameResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientRenameResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientRenameResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientRenameResponse{}, err } - return client.renameHandleResponse(resp) + resp, err := client.renameHandleResponse(httpResp) + return resp, err } // renameCreateRequest creates the Rename request. @@ -1103,13 +1138,13 @@ func (client *ContainerClient) renameCreateRequest(ctx context.Context, sourceCo return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "container") reqQP.Set("comp", "rename") + reqQP.Set("restype", "container") if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -1117,7 +1152,7 @@ func (client *ContainerClient) renameCreateRequest(ctx context.Context, sourceCo if options != nil && options.SourceLeaseID != nil { req.Raw().Header["x-ms-source-lease-id"] = []string{*options.SourceLeaseID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -1127,12 +1162,6 @@ func (client *ContainerClient) renameHandleResponse(resp *http.Response) (Contai if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -1140,6 +1169,12 @@ func (client *ContainerClient) renameHandleResponse(resp *http.Response) (Contai } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } @@ -1147,23 +1182,26 @@ func (client *ContainerClient) renameHandleResponse(resp *http.Response) (Contai // to 60 seconds, or can be infinite // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - leaseID - Specifies the current lease ID on the resource. // - options - ContainerClientRenewLeaseOptions contains the optional parameters for the ContainerClient.RenewLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *ContainerClient) RenewLease(ctx context.Context, leaseID string, options *ContainerClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientRenewLeaseResponse, error) { + var err error req, err := client.renewLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) if err != nil { return ContainerClientRenewLeaseResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientRenewLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientRenewLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientRenewLeaseResponse{}, err } - return client.renewLeaseHandleResponse(resp) + resp, err := client.renewLeaseHandleResponse(httpResp) + return resp, err } // renewLeaseCreateRequest creates the RenewLease request. @@ -1179,25 +1217,35 @@ func (client *ContainerClient) renewLeaseCreateRequest(ctx context.Context, leas reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-lease-action"] = []string{"renew"} - req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + req.Raw().Header["Accept"] = []string{"application/xml"} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-lease-action"] = []string{"renew"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } // renewLeaseHandleResponse handles the RenewLease response. func (client *ContainerClient) renewLeaseHandleResponse(resp *http.Response) (ContainerClientRenewLeaseResponse, error) { result := ContainerClientRenewLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientRenewLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -1211,43 +1259,36 @@ func (client *ContainerClient) renewLeaseHandleResponse(resp *http.Response) (Co if val := resp.Header.Get("x-ms-lease-id"); val != "" { result.LeaseID = &val } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientRenewLeaseResponse{}, err - } - result.Date = &date - } return result, nil } // Restore - Restores a previously-deleted container. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - options - ContainerClientRestoreOptions contains the optional parameters for the ContainerClient.Restore method. func (client *ContainerClient) Restore(ctx context.Context, options *ContainerClientRestoreOptions) (ContainerClientRestoreResponse, error) { + var err error req, err := client.restoreCreateRequest(ctx, options) if err != nil { return ContainerClientRestoreResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientRestoreResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return ContainerClientRestoreResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return ContainerClientRestoreResponse{}, err } - return client.restoreHandleResponse(resp) + resp, err := client.restoreHandleResponse(httpResp) + return resp, err } // restoreCreateRequest creates the Restore request. @@ -1257,13 +1298,13 @@ func (client *ContainerClient) restoreCreateRequest(ctx context.Context, options return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "container") reqQP.Set("comp", "undelete") + reqQP.Set("restype", "container") if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -1273,7 +1314,7 @@ func (client *ContainerClient) restoreCreateRequest(ctx context.Context, options if options != nil && options.DeletedContainerVersion != nil { req.Raw().Header["x-ms-deleted-container-version"] = []string{*options.DeletedContainerVersion} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -1283,12 +1324,6 @@ func (client *ContainerClient) restoreHandleResponse(resp *http.Response) (Conta if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -1296,6 +1331,12 @@ func (client *ContainerClient) restoreHandleResponse(resp *http.Response) (Conta } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } @@ -1303,25 +1344,28 @@ func (client *ContainerClient) restoreHandleResponse(resp *http.Response) (Conta // may be accessed publicly. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - containerACL - the acls for the container // - options - ContainerClientSetAccessPolicyOptions contains the optional parameters for the ContainerClient.SetAccessPolicy // method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *ContainerClient) SetAccessPolicy(ctx context.Context, containerACL []*SignedIdentifier, options *ContainerClientSetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientSetAccessPolicyResponse, error) { + var err error req, err := client.setAccessPolicyCreateRequest(ctx, containerACL, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { return ContainerClientSetAccessPolicyResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientSetAccessPolicyResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientSetAccessPolicyResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientSetAccessPolicyResponse{}, err } - return client.setAccessPolicyHandleResponse(resp) + resp, err := client.setAccessPolicyHandleResponse(httpResp) + return resp, err } // setAccessPolicyCreateRequest creates the SetAccessPolicy request. @@ -1331,29 +1375,29 @@ func (client *ContainerClient) setAccessPolicyCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "container") reqQP.Set("comp", "acl") + reqQP.Set("restype", "container") if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} - } - if options != nil && options.Access != nil { - req.Raw().Header["x-ms-blob-public-access"] = []string{string(*options.Access)} - } + req.Raw().Header["Accept"] = []string{"application/xml"} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.Access != nil { + req.Raw().Header["x-ms-blob-public-access"] = []string{string(*options.Access)} + } if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} type wrapper struct { XMLName xml.Name `xml:"SignedIdentifiers"` ContainerACL *[]*SignedIdentifier `xml:"SignedIdentifier"` @@ -1367,6 +1411,16 @@ func (client *ContainerClient) setAccessPolicyCreateRequest(ctx context.Context, // setAccessPolicyHandleResponse handles the SetAccessPolicy response. func (client *ContainerClient) setAccessPolicyHandleResponse(resp *http.Response) (ContainerClientSetAccessPolicyResponse, error) { result := ContainerClientSetAccessPolicyResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientSetAccessPolicyResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -1377,45 +1431,38 @@ func (client *ContainerClient) setAccessPolicyHandleResponse(resp *http.Response } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientSetAccessPolicyResponse{}, err - } - result.Date = &date - } return result, nil } // SetMetadata - operation sets one or more user-defined name-value pairs for the specified container. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - options - ContainerClientSetMetadataOptions contains the optional parameters for the ContainerClient.SetMetadata method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *ContainerClient) SetMetadata(ctx context.Context, options *ContainerClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientSetMetadataResponse, error) { + var err error req, err := client.setMetadataCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { return ContainerClientSetMetadataResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientSetMetadataResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientSetMetadataResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientSetMetadataResponse{}, err } - return client.setMetadataHandleResponse(resp) + resp, err := client.setMetadataHandleResponse(httpResp) + return resp, err } // setMetadataCreateRequest creates the SetMetadata request. @@ -1425,12 +1472,19 @@ func (client *ContainerClient) setMetadataCreateRequest(ctx context.Context, opt return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "container") reqQP.Set("comp", "metadata") + reqQP.Set("restype", "container") if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } @@ -1441,20 +1495,23 @@ func (client *ContainerClient) setMetadataCreateRequest(ctx context.Context, opt } } } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // setMetadataHandleResponse handles the SetMetadata response. func (client *ContainerClient) setMetadataHandleResponse(resp *http.Response) (ContainerClientSetMetadataResponse, error) { result := ContainerClientSetMetadataResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientSetMetadataResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -1465,47 +1522,40 @@ func (client *ContainerClient) setMetadataHandleResponse(resp *http.Response) (C } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientSetMetadataResponse{}, err - } - result.Date = &date - } return result, nil } // SubmitBatch - The Batch operation allows multiple API calls to be embedded into a single HTTP request. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - contentLength - The length of the request. // - multipartContentType - Required. The value of this header must be multipart/mixed with a batch boundary. Example header // value: multipart/mixed; boundary=batch_ // - body - Initial data // - options - ContainerClientSubmitBatchOptions contains the optional parameters for the ContainerClient.SubmitBatch method. func (client *ContainerClient) SubmitBatch(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *ContainerClientSubmitBatchOptions) (ContainerClientSubmitBatchResponse, error) { + var err error req, err := client.submitBatchCreateRequest(ctx, contentLength, multipartContentType, body, options) if err != nil { return ContainerClientSubmitBatchResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientSubmitBatchResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return ContainerClientSubmitBatchResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return ContainerClientSubmitBatchResponse{}, err } - return client.submitBatchHandleResponse(resp) + resp, err := client.submitBatchHandleResponse(httpResp) + return resp, err } // submitBatchCreateRequest creates the SubmitBatch request. @@ -1515,20 +1565,20 @@ func (client *ContainerClient) submitBatchCreateRequest(ctx context.Context, con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "container") reqQP.Set("comp", "batch") + reqQP.Set("restype", "container") if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() runtime.SkipBodyDownload(req) + req.Raw().Header["Accept"] = []string{"application/xml"} req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} req.Raw().Header["Content-Type"] = []string{multipartContentType} - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if err := req.SetBody(body, multipartContentType); err != nil { return nil, err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go index 1fed5f630bd..75307183896 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go @@ -1,11 +1,7 @@ -//go:build go1.18 -// +build go1.18 - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -26,89 +22,6 @@ type AccessPolicy struct { Start *time.Time `xml:"Start"` } -// AppendBlobClientAppendBlockFromURLOptions contains the optional parameters for the AppendBlobClient.AppendBlockFromURL -// method. -type AppendBlobClientAppendBlockFromURLOptions struct { - // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. - CopySourceAuthorization *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Specify the md5 calculated for the range of bytes that must be read from the copy source. - SourceContentMD5 []byte - // Specify the crc64 calculated for the range of bytes that must be read from the copy source. - SourceContentcrc64 []byte - // Bytes of source data in the specified range. - SourceRange *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte -} - -// AppendBlobClientAppendBlockOptions contains the optional parameters for the AppendBlobClient.AppendBlock method. -type AppendBlobClientAppendBlockOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // Specify the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 []byte - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte -} - -// AppendBlobClientCreateOptions contains the optional parameters for the AppendBlobClient.Create method. -type AppendBlobClientCreateOptions struct { - // Optional. Used to set blob tags in various blob operations. - BlobTagsString *string - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *ImmutabilityPolicySetting - // Specified if a legal hold should be set on the blob. - LegalHold *bool - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// AppendBlobClientSealOptions contains the optional parameters for the AppendBlobClient.Seal method. -type AppendBlobClientSealOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock method. -type AppendPositionAccessConditions struct { - // Optional conditional header, used only for the Append Block operation. A number indicating the byte offset to compare. - // Append Block will succeed only if the append position is equal to this number. If - // it is not, the request will fail with the AppendPositionConditionNotMet error (HTTP status code 412 - Precondition Failed). - AppendPosition *int64 - // Optional conditional header. The max length in bytes permitted for the append blob. If the Append Block operation would - // cause the blob to exceed that limit or if the blob size is already greater than - // the value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error (HTTP status code 412 - - // Precondition Failed). - MaxSize *int64 -} - // ArrowConfiguration - Groups the settings used for formatting the response if the response should be Arrow formatted. type ArrowConfiguration struct { // REQUIRED @@ -124,405 +37,11 @@ type ArrowField struct { Scale *int32 `xml:"Scale"` } -// BlobClientAbortCopyFromURLOptions contains the optional parameters for the BlobClient.AbortCopyFromURL method. -type BlobClientAbortCopyFromURLOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientAcquireLeaseOptions contains the optional parameters for the BlobClient.AcquireLease method. -type BlobClientAcquireLeaseOptions struct { - // Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is - // not in the correct format. See Guid Constructor (String) for a list of valid GUID - // string formats. - ProposedLeaseID *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientBreakLeaseOptions contains the optional parameters for the BlobClient.BreakLease method. -type BlobClientBreakLeaseOptions struct { - // For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This - // break period is only used if it is shorter than the time remaining on the - // lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has - // expired, but the lease may be held for longer than the break period. If this - // header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, - // and an infinite lease breaks immediately. - BreakPeriod *int32 - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientChangeLeaseOptions contains the optional parameters for the BlobClient.ChangeLease method. -type BlobClientChangeLeaseOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientCopyFromURLOptions contains the optional parameters for the BlobClient.CopyFromURL method. -type BlobClientCopyFromURLOptions struct { - // Optional. Used to set blob tags in various blob operations. - BlobTagsString *string - // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. - CopySourceAuthorization *string - // Optional, default 'replace'. Indicates if source tags should be copied or replaced with the tags specified by x-ms-tags. - CopySourceTags *BlobCopySourceTags - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *ImmutabilityPolicySetting - // Specified if a legal hold should be set on the blob. - LegalHold *bool - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Specify the md5 calculated for the range of bytes that must be read from the copy source. - SourceContentMD5 []byte - // Optional. Indicates the tier to be set on the blob. - Tier *AccessTier - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientCreateSnapshotOptions contains the optional parameters for the BlobClient.CreateSnapshot method. -type BlobClientCreateSnapshotOptions struct { - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the BlobClient.DeleteImmutabilityPolicy -// method. -type BlobClientDeleteImmutabilityPolicyOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientDeleteOptions contains the optional parameters for the BlobClient.Delete method. -type BlobClientDeleteOptions struct { - // Required if the blob has associated snapshots. Specify one of the following two options: include: Delete the base blob - // and all of its snapshots. only: Delete only the blob's snapshots and not the blob - // itself - DeleteSnapshots *DeleteSnapshotsOptionType - // Optional. Only possible value is 'permanent', which specifies to permanently delete a blob if blob soft delete is enabled. - DeleteType *DeleteType - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string -} - -// BlobClientDownloadOptions contains the optional parameters for the BlobClient.Download method. -type BlobClientDownloadOptions struct { - // Return only the bytes of the blob in the specified range. - Range *string - // When set to true and specified together with the Range, the service returns the CRC64 hash for the range, as long as the - // range is less than or equal to 4 MB in size. - RangeGetContentCRC64 *bool - // When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the - // range is less than or equal to 4 MB in size. - RangeGetContentMD5 *bool - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string -} - -// BlobClientGetAccountInfoOptions contains the optional parameters for the BlobClient.GetAccountInfo method. -type BlobClientGetAccountInfoOptions struct { - // placeholder for future optional parameters -} - -// BlobClientGetPropertiesOptions contains the optional parameters for the BlobClient.GetProperties method. -type BlobClientGetPropertiesOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string -} - -// BlobClientGetTagsOptions contains the optional parameters for the BlobClient.GetTags method. -type BlobClientGetTagsOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string -} - -// BlobClientQueryOptions contains the optional parameters for the BlobClient.Query method. -type BlobClientQueryOptions struct { - // the query request - QueryRequest *QueryRequest - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientReleaseLeaseOptions contains the optional parameters for the BlobClient.ReleaseLease method. -type BlobClientReleaseLeaseOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientRenewLeaseOptions contains the optional parameters for the BlobClient.RenewLease method. -type BlobClientRenewLeaseOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientSetExpiryOptions contains the optional parameters for the BlobClient.SetExpiry method. -type BlobClientSetExpiryOptions struct { - // The time to set the blob to expiry - ExpiresOn *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientSetHTTPHeadersOptions contains the optional parameters for the BlobClient.SetHTTPHeaders method. -type BlobClientSetHTTPHeadersOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientSetImmutabilityPolicyOptions contains the optional parameters for the BlobClient.SetImmutabilityPolicy method. -type BlobClientSetImmutabilityPolicyOptions struct { - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *ImmutabilityPolicySetting - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientSetLegalHoldOptions contains the optional parameters for the BlobClient.SetLegalHold method. -type BlobClientSetLegalHoldOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientSetMetadataOptions contains the optional parameters for the BlobClient.SetMetadata method. -type BlobClientSetMetadataOptions struct { - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientSetTagsOptions contains the optional parameters for the BlobClient.SetTags method. -type BlobClientSetTagsOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // Specify the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 []byte - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string -} - -// BlobClientSetTierOptions contains the optional parameters for the BlobClient.SetTier method. -type BlobClientSetTierOptions struct { - // Optional: Indicates the priority with which to rehydrate an archived blob. - RehydratePriority *RehydratePriority - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string -} - -// BlobClientStartCopyFromURLOptions contains the optional parameters for the BlobClient.StartCopyFromURL method. -type BlobClientStartCopyFromURLOptions struct { - // Optional. Used to set blob tags in various blob operations. - BlobTagsString *string - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *ImmutabilityPolicySetting - // Specified if a legal hold should be set on the blob. - LegalHold *bool - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - // Optional: Indicates the priority with which to rehydrate an archived blob. - RehydratePriority *RehydratePriority - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Overrides the sealed state of the destination blob. Service version 2019-12-12 and newer. - SealBlob *bool - // Optional. Indicates the tier to be set on the blob. - Tier *AccessTier - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientUndeleteOptions contains the optional parameters for the BlobClient.Undelete method. -type BlobClientUndeleteOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - type BlobFlatListSegment struct { // REQUIRED BlobItems []*BlobItem `xml:"Blob"` } -// BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. -type BlobHTTPHeaders struct { - // Optional. Sets the blob's cache control. If specified, this property is stored with the blob and returned with a read request. - BlobCacheControl *string - // Optional. Sets the blob's Content-Disposition header. - BlobContentDisposition *string - // Optional. Sets the blob's content encoding. If specified, this property is stored with the blob and returned with a read - // request. - BlobContentEncoding *string - // Optional. Set the blob's content language. If specified, this property is stored with the blob and returned with a read - // request. - BlobContentLanguage *string - // Optional. An MD5 hash of the blob content. Note that this hash is not validated, as the hashes for the individual blocks - // were validated when each was uploaded. - BlobContentMD5 []byte - // Optional. Sets the blob's content type. If specified, this property is stored with the blob and returned with a read request. - BlobContentType *string -} - type BlobHierarchyListSegment struct { // REQUIRED BlobItems []*BlobItem `xml:"Blob"` @@ -576,6 +95,7 @@ type BlobProperties struct { // REQUIRED LastModified *time.Time `xml:"Last-Modified"` + ACL *string `xml:"Acl"` AccessTier *AccessTier `xml:"AccessTier"` AccessTierChangeTime *time.Time `xml:"AccessTierChangeTime"` AccessTierInferred *bool `xml:"AccessTierInferred"` @@ -605,6 +125,7 @@ type BlobProperties struct { // The name of the encryption scope under which the blob is encrypted. EncryptionScope *string `xml:"EncryptionScope"` ExpiresOn *time.Time `xml:"Expiry-Time"` + Group *string `xml:"Group"` ImmutabilityPolicyExpiresOn *time.Time `xml:"ImmutabilityPolicyUntilDate"` ImmutabilityPolicyMode *ImmutabilityPolicyMode `xml:"ImmutabilityPolicyMode"` IncrementalCopy *bool `xml:"IncrementalCopy"` @@ -614,11 +135,14 @@ type BlobProperties struct { LeaseState *LeaseStateType `xml:"LeaseState"` LeaseStatus *LeaseStatusType `xml:"LeaseStatus"` LegalHold *bool `xml:"LegalHold"` + Owner *string `xml:"Owner"` + Permissions *string `xml:"Permissions"` // If an object is in rehydrate pending state then this header is returned with priority of rehydrate. Valid values are High // and Standard. RehydratePriority *RehydratePriority `xml:"RehydratePriority"` RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"` + ResourceType *string `xml:"ResourceType"` ServerEncrypted *bool `xml:"ServerEncrypted"` TagCount *int32 `xml:"TagCount"` } @@ -646,145 +170,6 @@ type Block struct { Size *int64 `xml:"Size"` } -// BlockBlobClientCommitBlockListOptions contains the optional parameters for the BlockBlobClient.CommitBlockList method. -type BlockBlobClientCommitBlockListOptions struct { - // Optional. Used to set blob tags in various blob operations. - BlobTagsString *string - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *ImmutabilityPolicySetting - // Specified if a legal hold should be set on the blob. - LegalHold *bool - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Optional. Indicates the tier to be set on the blob. - Tier *AccessTier - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // Specify the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 []byte - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte -} - -// BlockBlobClientGetBlockListOptions contains the optional parameters for the BlockBlobClient.GetBlockList method. -type BlockBlobClientGetBlockListOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlockBlobClientPutBlobFromURLOptions contains the optional parameters for the BlockBlobClient.PutBlobFromURL method. -type BlockBlobClientPutBlobFromURLOptions struct { - // Optional. Used to set blob tags in various blob operations. - BlobTagsString *string - // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. - CopySourceAuthorization *string - // Optional, default is true. Indicates if properties from the source blob should be copied. - CopySourceBlobProperties *bool - // Optional, default 'replace'. Indicates if source tags should be copied or replaced with the tags specified by x-ms-tags. - CopySourceTags *BlobCopySourceTags - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Specify the md5 calculated for the range of bytes that must be read from the copy source. - SourceContentMD5 []byte - // Optional. Indicates the tier to be set on the blob. - Tier *AccessTier - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte -} - -// BlockBlobClientStageBlockFromURLOptions contains the optional parameters for the BlockBlobClient.StageBlockFromURL method. -type BlockBlobClientStageBlockFromURLOptions struct { - // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. - CopySourceAuthorization *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Specify the md5 calculated for the range of bytes that must be read from the copy source. - SourceContentMD5 []byte - // Specify the crc64 calculated for the range of bytes that must be read from the copy source. - SourceContentcrc64 []byte - // Bytes of source data in the specified range. - SourceRange *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlockBlobClientStageBlockOptions contains the optional parameters for the BlockBlobClient.StageBlock method. -type BlockBlobClientStageBlockOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // Specify the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 []byte - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte -} - -// BlockBlobClientUploadOptions contains the optional parameters for the BlockBlobClient.Upload method. -type BlockBlobClientUploadOptions struct { - // Optional. Used to set blob tags in various blob operations. - BlobTagsString *string - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *ImmutabilityPolicySetting - // Specified if a legal hold should be set on the blob. - LegalHold *bool - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Optional. Indicates the tier to be set on the blob. - Tier *AccessTier - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // Specify the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 []byte - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte -} - type BlockList struct { CommittedBlocks []*Block `xml:"CommittedBlocks>Block"` UncommittedBlocks []*Block `xml:"UncommittedBlocks>Block"` @@ -804,274 +189,6 @@ type ClearRange struct { Start *int64 `xml:"Start"` } -// ContainerClientAcquireLeaseOptions contains the optional parameters for the ContainerClient.AcquireLease method. -type ContainerClientAcquireLeaseOptions struct { - // Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is - // not in the correct format. See Guid Constructor (String) for a list of valid GUID - // string formats. - ProposedLeaseID *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientBreakLeaseOptions contains the optional parameters for the ContainerClient.BreakLease method. -type ContainerClientBreakLeaseOptions struct { - // For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This - // break period is only used if it is shorter than the time remaining on the - // lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has - // expired, but the lease may be held for longer than the break period. If this - // header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, - // and an infinite lease breaks immediately. - BreakPeriod *int32 - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientChangeLeaseOptions contains the optional parameters for the ContainerClient.ChangeLease method. -type ContainerClientChangeLeaseOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientCreateOptions contains the optional parameters for the ContainerClient.Create method. -type ContainerClientCreateOptions struct { - // Specifies whether data in the container may be accessed publicly and the level of access - Access *PublicAccessType - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientDeleteOptions contains the optional parameters for the ContainerClient.Delete method. -type ContainerClientDeleteOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientFilterBlobsOptions contains the optional parameters for the ContainerClient.FilterBlobs method. -type ContainerClientFilterBlobsOptions struct { - // Include this parameter to specify one or more datasets to include in the response. - Include []FilterBlobsIncludeItem - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - Maxresults *int32 - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientGetAccessPolicyOptions contains the optional parameters for the ContainerClient.GetAccessPolicy method. -type ContainerClientGetAccessPolicyOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientGetAccountInfoOptions contains the optional parameters for the ContainerClient.GetAccountInfo method. -type ContainerClientGetAccountInfoOptions struct { - // placeholder for future optional parameters -} - -// ContainerClientGetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method. -type ContainerClientGetPropertiesOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientListBlobFlatSegmentOptions contains the optional parameters for the ContainerClient.NewListBlobFlatSegmentPager -// method. -type ContainerClientListBlobFlatSegmentOptions struct { - // Include this parameter to specify one or more datasets to include in the response. - Include []ListBlobsIncludeItem - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - Maxresults *int32 - // Filters the results to return only containers whose name begins with the specified prefix. - Prefix *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientListBlobHierarchySegmentOptions contains the optional parameters for the ContainerClient.NewListBlobHierarchySegmentPager -// method. -type ContainerClientListBlobHierarchySegmentOptions struct { - // Include this parameter to specify one or more datasets to include in the response. - Include []ListBlobsIncludeItem - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - Maxresults *int32 - // Filters the results to return only containers whose name begins with the specified prefix. - Prefix *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientReleaseLeaseOptions contains the optional parameters for the ContainerClient.ReleaseLease method. -type ContainerClientReleaseLeaseOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientRenameOptions contains the optional parameters for the ContainerClient.Rename method. -type ContainerClientRenameOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // A lease ID for the source path. If specified, the source path must have an active lease and the lease ID must match. - SourceLeaseID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientRenewLeaseOptions contains the optional parameters for the ContainerClient.RenewLease method. -type ContainerClientRenewLeaseOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientRestoreOptions contains the optional parameters for the ContainerClient.Restore method. -type ContainerClientRestoreOptions struct { - // Optional. Version 2019-12-12 and later. Specifies the name of the deleted container to restore. - DeletedContainerName *string - // Optional. Version 2019-12-12 and later. Specifies the version of the deleted container to restore. - DeletedContainerVersion *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientSetAccessPolicyOptions contains the optional parameters for the ContainerClient.SetAccessPolicy method. -type ContainerClientSetAccessPolicyOptions struct { - // Specifies whether data in the container may be accessed publicly and the level of access - Access *PublicAccessType - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientSetMetadataOptions contains the optional parameters for the ContainerClient.SetMetadata method. -type ContainerClientSetMetadataOptions struct { - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientSubmitBatchOptions contains the optional parameters for the ContainerClient.SubmitBatch method. -type ContainerClientSubmitBatchOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerCPKScopeInfo contains a group of parameters for the ContainerClient.Create method. -type ContainerCPKScopeInfo struct { - // Optional. Version 2019-07-07 and later. Specifies the default encryption scope to set on the container and use for all - // future writes. - DefaultEncryptionScope *string - // Optional. Version 2019-07-07 and newer. If true, prevents any request from specifying a different encryption scope than - // the scope set on the container. - PreventEncryptionScopeOverride *bool -} - // ContainerItem - An Azure Storage container type ContainerItem struct { // REQUIRED @@ -1133,27 +250,6 @@ type CORSRule struct { MaxAgeInSeconds *int32 `xml:"MaxAgeInSeconds"` } -// CPKInfo contains a group of parameters for the BlobClient.Download method. -type CPKInfo struct { - // The algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided - // if the x-ms-encryption-key header is provided. - EncryptionAlgorithm *EncryptionAlgorithmType - // Optional. Specifies the encryption key to use to encrypt the data provided in the request. If not specified, encryption - // is performed with the root account encryption key. For more information, see - // Encryption at Rest for Azure Storage Services. - EncryptionKey *string - // The SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. - EncryptionKeySHA256 *string -} - -// CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -type CPKScopeInfo struct { - // Optional. Version 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data provided - // in the request. If not specified, encryption is performed with the default - // account encryption scope. For more information, see Encryption at Rest for Azure Storage Services. - EncryptionScope *string -} - // DelimitedTextConfiguration - Groups the settings used for interpreting the blob data if the blob is delimited text formatted. type DelimitedTextConfiguration struct { // The string used to separate columns. @@ -1225,12 +321,6 @@ type KeyInfo struct { Start *string `xml:"Start"` } -// LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -type LeaseAccessConditions struct { - // If specified, the operation only succeeds if the resource's lease is active and matches this ID. - LeaseID *string -} - // ListBlobsFlatSegmentResponse - An enumeration of blobs type ListBlobsFlatSegmentResponse struct { // REQUIRED @@ -1310,195 +400,6 @@ type Metrics struct { Version *string `xml:"Version"` } -// ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -type ModifiedAccessConditions struct { - // Specify an ETag value to operate only on blobs with a matching value. - IfMatch *azcore.ETag - // Specify this header value to operate only on a blob if it has been modified since the specified date/time. - IfModifiedSince *time.Time - // Specify an ETag value to operate only on blobs without a matching value. - IfNoneMatch *azcore.ETag - // Specify a SQL where clause on blob tags to operate only on blobs with a matching value. - IfTags *string - // Specify this header value to operate only on a blob if it has not been modified since the specified date/time. - IfUnmodifiedSince *time.Time -} - -// PageBlobClientClearPagesOptions contains the optional parameters for the PageBlobClient.ClearPages method. -type PageBlobClientClearPagesOptions struct { - // Return only the bytes of the blob in the specified range. - Range *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// PageBlobClientCopyIncrementalOptions contains the optional parameters for the PageBlobClient.CopyIncremental method. -type PageBlobClientCopyIncrementalOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// PageBlobClientCreateOptions contains the optional parameters for the PageBlobClient.Create method. -type PageBlobClientCreateOptions struct { - // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of - // the sequence number must be between 0 and 2^63 - 1. - BlobSequenceNumber *int64 - // Optional. Used to set blob tags in various blob operations. - BlobTagsString *string - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *ImmutabilityPolicySetting - // Specified if a legal hold should be set on the blob. - LegalHold *bool - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Optional. Indicates the tier to be set on the page blob. - Tier *PremiumPageBlobAccessTier - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// PageBlobClientGetPageRangesDiffOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesDiffPager -// method. -type PageBlobClientGetPageRangesDiffOptions struct { - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - Maxresults *int32 - // Optional. This header is only supported in service versions 2019-04-19 and after and specifies the URL of a previous snapshot - // of the target blob. The response will only contain pages that were changed - // between the target blob and its previous snapshot. - PrevSnapshotURL *string - // Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a DateTime value that specifies that the response - // will contain only pages that were changed between target blob and previous - // snapshot. Changed pages include both updated and cleared pages. The target blob may be a snapshot, as long as the snapshot - // specified by prevsnapshot is the older of the two. Note that incremental - // snapshots are currently supported only for blobs created on or after January 1, 2016. - Prevsnapshot *string - // Return only the bytes of the blob in the specified range. - Range *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// PageBlobClientGetPageRangesOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesPager method. -type PageBlobClientGetPageRangesOptions struct { - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - Maxresults *int32 - // Return only the bytes of the blob in the specified range. - Range *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// PageBlobClientResizeOptions contains the optional parameters for the PageBlobClient.Resize method. -type PageBlobClientResizeOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// PageBlobClientUpdateSequenceNumberOptions contains the optional parameters for the PageBlobClient.UpdateSequenceNumber -// method. -type PageBlobClientUpdateSequenceNumberOptions struct { - // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of - // the sequence number must be between 0 and 2^63 - 1. - BlobSequenceNumber *int64 - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// PageBlobClientUploadPagesFromURLOptions contains the optional parameters for the PageBlobClient.UploadPagesFromURL method. -type PageBlobClientUploadPagesFromURLOptions struct { - // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. - CopySourceAuthorization *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Specify the md5 calculated for the range of bytes that must be read from the copy source. - SourceContentMD5 []byte - // Specify the crc64 calculated for the range of bytes that must be read from the copy source. - SourceContentcrc64 []byte - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// PageBlobClientUploadPagesOptions contains the optional parameters for the PageBlobClient.UploadPages method. -type PageBlobClientUploadPagesOptions struct { - // Return only the bytes of the blob in the specified range. - Range *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // Specify the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 []byte - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte -} - // PageList - the list of pages type PageList struct { ClearRange []*ClearRange `xml:"ClearRange"` @@ -1561,122 +462,6 @@ type RetentionPolicy struct { Days *int32 `xml:"Days"` } -// SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages method. -type SequenceNumberAccessConditions struct { - // Specify this header value to operate only on a blob if it has the specified sequence number. - IfSequenceNumberEqualTo *int64 - // Specify this header value to operate only on a blob if it has a sequence number less than the specified. - IfSequenceNumberLessThan *int64 - // Specify this header value to operate only on a blob if it has a sequence number less than or equal to the specified. - IfSequenceNumberLessThanOrEqualTo *int64 -} - -// ServiceClientFilterBlobsOptions contains the optional parameters for the ServiceClient.FilterBlobs method. -type ServiceClientFilterBlobsOptions struct { - // Include this parameter to specify one or more datasets to include in the response. - Include []FilterBlobsIncludeItem - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - Maxresults *int32 - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ServiceClientGetAccountInfoOptions contains the optional parameters for the ServiceClient.GetAccountInfo method. -type ServiceClientGetAccountInfoOptions struct { - // placeholder for future optional parameters -} - -// ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method. -type ServiceClientGetPropertiesOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ServiceClientGetStatisticsOptions contains the optional parameters for the ServiceClient.GetStatistics method. -type ServiceClientGetStatisticsOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ServiceClientGetUserDelegationKeyOptions contains the optional parameters for the ServiceClient.GetUserDelegationKey method. -type ServiceClientGetUserDelegationKeyOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ServiceClientListContainersSegmentOptions contains the optional parameters for the ServiceClient.NewListContainersSegmentPager -// method. -type ServiceClientListContainersSegmentOptions struct { - // Include this parameter to specify that the container's metadata be returned as part of the response body. - Include []ListContainersIncludeType - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - Maxresults *int32 - // Filters the results to return only containers whose name begins with the specified prefix. - Prefix *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ServiceClientSetPropertiesOptions contains the optional parameters for the ServiceClient.SetProperties method. -type ServiceClientSetPropertiesOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ServiceClientSubmitBatchOptions contains the optional parameters for the ServiceClient.SubmitBatch method. -type ServiceClientSubmitBatchOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - // SignedIdentifier - signed identifier type SignedIdentifier struct { // REQUIRED; An Access policy @@ -1686,20 +471,6 @@ type SignedIdentifier struct { ID *string `xml:"Id"` } -// SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL method. -type SourceModifiedAccessConditions struct { - // Specify an ETag value to operate only on blobs with a matching value. - SourceIfMatch *azcore.ETag - // Specify this header value to operate only on a blob if it has been modified since the specified date/time. - SourceIfModifiedSince *time.Time - // Specify an ETag value to operate only on blobs without a matching value. - SourceIfNoneMatch *azcore.ETag - // Specify a SQL where clause on blob tags to operate only on blobs with a matching value. - SourceIfTags *string - // Specify this header value to operate only on a blob if it has not been modified since the specified date/time. - SourceIfUnmodifiedSince *time.Time -} - // StaticWebsite - The properties that enable an account to host a static website type StaticWebsite struct { // REQUIRED; Indicates whether this account is hosting a static website diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go index dc5dba1037a..e2e64d6fffe 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go @@ -1,11 +1,7 @@ -//go:build go1.18 -// +build go1.18 - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -24,12 +20,12 @@ func (a AccessPolicy) MarshalXML(enc *xml.Encoder, start xml.StartElement) error type alias AccessPolicy aux := &struct { *alias - Expiry *timeRFC3339 `xml:"Expiry"` - Start *timeRFC3339 `xml:"Start"` + Expiry *dateTimeRFC3339 `xml:"Expiry"` + Start *dateTimeRFC3339 `xml:"Start"` }{ alias: (*alias)(&a), - Expiry: (*timeRFC3339)(a.Expiry), - Start: (*timeRFC3339)(a.Start), + Expiry: (*dateTimeRFC3339)(a.Expiry), + Start: (*dateTimeRFC3339)(a.Start), } return enc.EncodeElement(aux, start) } @@ -39,16 +35,20 @@ func (a *AccessPolicy) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) er type alias AccessPolicy aux := &struct { *alias - Expiry *timeRFC3339 `xml:"Expiry"` - Start *timeRFC3339 `xml:"Start"` + Expiry *dateTimeRFC3339 `xml:"Expiry"` + Start *dateTimeRFC3339 `xml:"Start"` }{ alias: (*alias)(a), } if err := dec.DecodeElement(aux, &start); err != nil { return err } - a.Expiry = (*time.Time)(aux.Expiry) - a.Start = (*time.Time)(aux.Start) + if aux.Expiry != nil && !(*time.Time)(aux.Expiry).IsZero() { + a.Expiry = (*time.Time)(aux.Expiry) + } + if aux.Start != nil && !(*time.Time)(aux.Start).IsZero() { + a.Start = (*time.Time)(aux.Start) + } return nil } @@ -106,25 +106,25 @@ func (b BlobProperties) MarshalXML(enc *xml.Encoder, start xml.StartElement) err type alias BlobProperties aux := &struct { *alias - AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"` - ContentMD5 *string `xml:"Content-MD5"` - CopyCompletionTime *timeRFC1123 `xml:"CopyCompletionTime"` - CreationTime *timeRFC1123 `xml:"Creation-Time"` - DeletedTime *timeRFC1123 `xml:"DeletedTime"` - ExpiresOn *timeRFC1123 `xml:"Expiry-Time"` - ImmutabilityPolicyExpiresOn *timeRFC1123 `xml:"ImmutabilityPolicyUntilDate"` - LastAccessedOn *timeRFC1123 `xml:"LastAccessTime"` - LastModified *timeRFC1123 `xml:"Last-Modified"` + AccessTierChangeTime *dateTimeRFC1123 `xml:"AccessTierChangeTime"` + ContentMD5 *string `xml:"Content-MD5"` + CopyCompletionTime *dateTimeRFC1123 `xml:"CopyCompletionTime"` + CreationTime *dateTimeRFC1123 `xml:"Creation-Time"` + DeletedTime *dateTimeRFC1123 `xml:"DeletedTime"` + ExpiresOn *dateTimeRFC1123 `xml:"Expiry-Time"` + ImmutabilityPolicyExpiresOn *dateTimeRFC1123 `xml:"ImmutabilityPolicyUntilDate"` + LastAccessedOn *dateTimeRFC1123 `xml:"LastAccessTime"` + LastModified *dateTimeRFC1123 `xml:"Last-Modified"` }{ alias: (*alias)(&b), - AccessTierChangeTime: (*timeRFC1123)(b.AccessTierChangeTime), - CopyCompletionTime: (*timeRFC1123)(b.CopyCompletionTime), - CreationTime: (*timeRFC1123)(b.CreationTime), - DeletedTime: (*timeRFC1123)(b.DeletedTime), - ExpiresOn: (*timeRFC1123)(b.ExpiresOn), - ImmutabilityPolicyExpiresOn: (*timeRFC1123)(b.ImmutabilityPolicyExpiresOn), - LastAccessedOn: (*timeRFC1123)(b.LastAccessedOn), - LastModified: (*timeRFC1123)(b.LastModified), + AccessTierChangeTime: (*dateTimeRFC1123)(b.AccessTierChangeTime), + CopyCompletionTime: (*dateTimeRFC1123)(b.CopyCompletionTime), + CreationTime: (*dateTimeRFC1123)(b.CreationTime), + DeletedTime: (*dateTimeRFC1123)(b.DeletedTime), + ExpiresOn: (*dateTimeRFC1123)(b.ExpiresOn), + ImmutabilityPolicyExpiresOn: (*dateTimeRFC1123)(b.ImmutabilityPolicyExpiresOn), + LastAccessedOn: (*dateTimeRFC1123)(b.LastAccessedOn), + LastModified: (*dateTimeRFC1123)(b.LastModified), } if b.ContentMD5 != nil { encodedContentMD5 := runtime.EncodeByteArray(b.ContentMD5, runtime.Base64StdFormat) @@ -138,34 +138,50 @@ func (b *BlobProperties) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) type alias BlobProperties aux := &struct { *alias - AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"` - ContentMD5 *string `xml:"Content-MD5"` - CopyCompletionTime *timeRFC1123 `xml:"CopyCompletionTime"` - CreationTime *timeRFC1123 `xml:"Creation-Time"` - DeletedTime *timeRFC1123 `xml:"DeletedTime"` - ExpiresOn *timeRFC1123 `xml:"Expiry-Time"` - ImmutabilityPolicyExpiresOn *timeRFC1123 `xml:"ImmutabilityPolicyUntilDate"` - LastAccessedOn *timeRFC1123 `xml:"LastAccessTime"` - LastModified *timeRFC1123 `xml:"Last-Modified"` + AccessTierChangeTime *dateTimeRFC1123 `xml:"AccessTierChangeTime"` + ContentMD5 *string `xml:"Content-MD5"` + CopyCompletionTime *dateTimeRFC1123 `xml:"CopyCompletionTime"` + CreationTime *dateTimeRFC1123 `xml:"Creation-Time"` + DeletedTime *dateTimeRFC1123 `xml:"DeletedTime"` + ExpiresOn *dateTimeRFC1123 `xml:"Expiry-Time"` + ImmutabilityPolicyExpiresOn *dateTimeRFC1123 `xml:"ImmutabilityPolicyUntilDate"` + LastAccessedOn *dateTimeRFC1123 `xml:"LastAccessTime"` + LastModified *dateTimeRFC1123 `xml:"Last-Modified"` }{ alias: (*alias)(b), } if err := dec.DecodeElement(aux, &start); err != nil { return err } - b.AccessTierChangeTime = (*time.Time)(aux.AccessTierChangeTime) + if aux.AccessTierChangeTime != nil && !(*time.Time)(aux.AccessTierChangeTime).IsZero() { + b.AccessTierChangeTime = (*time.Time)(aux.AccessTierChangeTime) + } if aux.ContentMD5 != nil { if err := runtime.DecodeByteArray(*aux.ContentMD5, &b.ContentMD5, runtime.Base64StdFormat); err != nil { return err } } - b.CopyCompletionTime = (*time.Time)(aux.CopyCompletionTime) - b.CreationTime = (*time.Time)(aux.CreationTime) - b.DeletedTime = (*time.Time)(aux.DeletedTime) - b.ExpiresOn = (*time.Time)(aux.ExpiresOn) - b.ImmutabilityPolicyExpiresOn = (*time.Time)(aux.ImmutabilityPolicyExpiresOn) - b.LastAccessedOn = (*time.Time)(aux.LastAccessedOn) - b.LastModified = (*time.Time)(aux.LastModified) + if aux.CopyCompletionTime != nil && !(*time.Time)(aux.CopyCompletionTime).IsZero() { + b.CopyCompletionTime = (*time.Time)(aux.CopyCompletionTime) + } + if aux.CreationTime != nil && !(*time.Time)(aux.CreationTime).IsZero() { + b.CreationTime = (*time.Time)(aux.CreationTime) + } + if aux.DeletedTime != nil && !(*time.Time)(aux.DeletedTime).IsZero() { + b.DeletedTime = (*time.Time)(aux.DeletedTime) + } + if aux.ExpiresOn != nil && !(*time.Time)(aux.ExpiresOn).IsZero() { + b.ExpiresOn = (*time.Time)(aux.ExpiresOn) + } + if aux.ImmutabilityPolicyExpiresOn != nil && !(*time.Time)(aux.ImmutabilityPolicyExpiresOn).IsZero() { + b.ImmutabilityPolicyExpiresOn = (*time.Time)(aux.ImmutabilityPolicyExpiresOn) + } + if aux.LastAccessedOn != nil && !(*time.Time)(aux.LastAccessedOn).IsZero() { + b.LastAccessedOn = (*time.Time)(aux.LastAccessedOn) + } + if aux.LastModified != nil && !(*time.Time)(aux.LastModified).IsZero() { + b.LastModified = (*time.Time)(aux.LastModified) + } return nil } @@ -249,12 +265,12 @@ func (c ContainerProperties) MarshalXML(enc *xml.Encoder, start xml.StartElement type alias ContainerProperties aux := &struct { *alias - DeletedTime *timeRFC1123 `xml:"DeletedTime"` - LastModified *timeRFC1123 `xml:"Last-Modified"` + DeletedTime *dateTimeRFC1123 `xml:"DeletedTime"` + LastModified *dateTimeRFC1123 `xml:"Last-Modified"` }{ alias: (*alias)(&c), - DeletedTime: (*timeRFC1123)(c.DeletedTime), - LastModified: (*timeRFC1123)(c.LastModified), + DeletedTime: (*dateTimeRFC1123)(c.DeletedTime), + LastModified: (*dateTimeRFC1123)(c.LastModified), } return enc.EncodeElement(aux, start) } @@ -264,16 +280,20 @@ func (c *ContainerProperties) UnmarshalXML(dec *xml.Decoder, start xml.StartElem type alias ContainerProperties aux := &struct { *alias - DeletedTime *timeRFC1123 `xml:"DeletedTime"` - LastModified *timeRFC1123 `xml:"Last-Modified"` + DeletedTime *dateTimeRFC1123 `xml:"DeletedTime"` + LastModified *dateTimeRFC1123 `xml:"Last-Modified"` }{ alias: (*alias)(c), } if err := dec.DecodeElement(aux, &start); err != nil { return err } - c.DeletedTime = (*time.Time)(aux.DeletedTime) - c.LastModified = (*time.Time)(aux.LastModified) + if aux.DeletedTime != nil && !(*time.Time)(aux.DeletedTime).IsZero() { + c.DeletedTime = (*time.Time)(aux.DeletedTime) + } + if aux.LastModified != nil && !(*time.Time)(aux.LastModified).IsZero() { + c.LastModified = (*time.Time)(aux.LastModified) + } return nil } @@ -297,10 +317,10 @@ func (g GeoReplication) MarshalXML(enc *xml.Encoder, start xml.StartElement) err type alias GeoReplication aux := &struct { *alias - LastSyncTime *timeRFC1123 `xml:"LastSyncTime"` + LastSyncTime *dateTimeRFC1123 `xml:"LastSyncTime"` }{ alias: (*alias)(&g), - LastSyncTime: (*timeRFC1123)(g.LastSyncTime), + LastSyncTime: (*dateTimeRFC1123)(g.LastSyncTime), } return enc.EncodeElement(aux, start) } @@ -310,14 +330,16 @@ func (g *GeoReplication) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) type alias GeoReplication aux := &struct { *alias - LastSyncTime *timeRFC1123 `xml:"LastSyncTime"` + LastSyncTime *dateTimeRFC1123 `xml:"LastSyncTime"` }{ alias: (*alias)(g), } if err := dec.DecodeElement(aux, &start); err != nil { return err } - g.LastSyncTime = (*time.Time)(aux.LastSyncTime) + if aux.LastSyncTime != nil && !(*time.Time)(aux.LastSyncTime).IsZero() { + g.LastSyncTime = (*time.Time)(aux.LastSyncTime) + } return nil } @@ -414,12 +436,12 @@ func (u UserDelegationKey) MarshalXML(enc *xml.Encoder, start xml.StartElement) type alias UserDelegationKey aux := &struct { *alias - SignedExpiry *timeRFC3339 `xml:"SignedExpiry"` - SignedStart *timeRFC3339 `xml:"SignedStart"` + SignedExpiry *dateTimeRFC3339 `xml:"SignedExpiry"` + SignedStart *dateTimeRFC3339 `xml:"SignedStart"` }{ alias: (*alias)(&u), - SignedExpiry: (*timeRFC3339)(u.SignedExpiry), - SignedStart: (*timeRFC3339)(u.SignedStart), + SignedExpiry: (*dateTimeRFC3339)(u.SignedExpiry), + SignedStart: (*dateTimeRFC3339)(u.SignedStart), } return enc.EncodeElement(aux, start) } @@ -429,16 +451,20 @@ func (u *UserDelegationKey) UnmarshalXML(dec *xml.Decoder, start xml.StartElemen type alias UserDelegationKey aux := &struct { *alias - SignedExpiry *timeRFC3339 `xml:"SignedExpiry"` - SignedStart *timeRFC3339 `xml:"SignedStart"` + SignedExpiry *dateTimeRFC3339 `xml:"SignedExpiry"` + SignedStart *dateTimeRFC3339 `xml:"SignedStart"` }{ alias: (*alias)(u), } if err := dec.DecodeElement(aux, &start); err != nil { return err } - u.SignedExpiry = (*time.Time)(aux.SignedExpiry) - u.SignedStart = (*time.Time)(aux.SignedStart) + if aux.SignedExpiry != nil && !(*time.Time)(aux.SignedExpiry).IsZero() { + u.SignedExpiry = (*time.Time)(aux.SignedExpiry) + } + if aux.SignedStart != nil && !(*time.Time)(aux.SignedStart).IsZero() { + u.SignedStart = (*time.Time)(aux.SignedStart) + } return nil } @@ -452,18 +478,8 @@ func populate(m map[string]any, k string, v any) { } } -func populateAny(m map[string]any, k string, v any) { - if v == nil { - return - } else if azcore.IsNullValue(v) { - m[k] = nil - } else { - m[k] = v - } -} - func unpopulate(data json.RawMessage, fn string, v any) error { - if data == nil { + if data == nil || string(data) == "null" { return nil } if err := json.Unmarshal(data, v); err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_options.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_options.go new file mode 100644 index 00000000000..89d05310a8e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_options.go @@ -0,0 +1,1484 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package generated + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "time" +) + +// AppendBlobClientAppendBlockFromURLOptions contains the optional parameters for the AppendBlobClient.AppendBlockFromURL +// method. +type AppendBlobClientAppendBlockFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentcrc64 []byte + + // Bytes of source data in the specified range. + SourceRange *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// AppendBlobClientAppendBlockOptions contains the optional parameters for the AppendBlobClient.AppendBlock method. +type AppendBlobClientAppendBlockOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// AppendBlobClientCreateOptions contains the optional parameters for the AppendBlobClient.Create method. +type AppendBlobClientCreateOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Specified if a legal hold should be set on the blob. + LegalHold *bool + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// AppendBlobClientSealOptions contains the optional parameters for the AppendBlobClient.Seal method. +type AppendBlobClientSealOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock method. +type AppendPositionAccessConditions struct { + // Optional conditional header, used only for the Append Block operation. A number indicating the byte offset to compare. + // Append Block will succeed only if the append position is equal to this number. If + // it is not, the request will fail with the AppendPositionConditionNotMet error (HTTP status code 412 - Precondition Failed). + AppendPosition *int64 + + // Optional conditional header. The max length in bytes permitted for the append blob. If the Append Block operation would + // cause the blob to exceed that limit or if the blob size is already greater than + // the value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error (HTTP status code 412 - + // Precondition Failed). + MaxSize *int64 +} + +// BlobClientAbortCopyFromURLOptions contains the optional parameters for the BlobClient.AbortCopyFromURL method. +type BlobClientAbortCopyFromURLOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientAcquireLeaseOptions contains the optional parameters for the BlobClient.AcquireLease method. +type BlobClientAcquireLeaseOptions struct { + // Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is + // not in the correct format. See Guid Constructor (String) for a list of valid GUID + // string formats. + ProposedLeaseID *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientBreakLeaseOptions contains the optional parameters for the BlobClient.BreakLease method. +type BlobClientBreakLeaseOptions struct { + // For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This + // break period is only used if it is shorter than the time remaining on the + // lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has + // expired, but the lease may be held for longer than the break period. If this + // header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, + // and an infinite lease breaks immediately. + BreakPeriod *int32 + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientChangeLeaseOptions contains the optional parameters for the BlobClient.ChangeLease method. +type BlobClientChangeLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientCopyFromURLOptions contains the optional parameters for the BlobClient.CopyFromURL method. +type BlobClientCopyFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + // Optional, default 'replace'. Indicates if source tags should be copied or replaced with the tags specified by x-ms-tags. + CopySourceTags *BlobCopySourceTags + + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Specified if a legal hold should be set on the blob. + LegalHold *bool + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientCreateSnapshotOptions contains the optional parameters for the BlobClient.CreateSnapshot method. +type BlobClientCreateSnapshotOptions struct { + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the BlobClient.DeleteImmutabilityPolicy +// method. +type BlobClientDeleteImmutabilityPolicyOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientDeleteOptions contains the optional parameters for the BlobClient.Delete method. +type BlobClientDeleteOptions struct { + // Required if the blob has associated snapshots. Specify one of the following two options: include: Delete the base blob + // and all of its snapshots. only: Delete only the blob's snapshots and not the blob + // itself + DeleteSnapshots *DeleteSnapshotsOptionType + + // Optional. Only possible value is 'permanent', which specifies to permanently delete a blob if blob soft delete is enabled. + DeleteType *DeleteType + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientDownloadOptions contains the optional parameters for the BlobClient.Download method. +type BlobClientDownloadOptions struct { + // Return only the bytes of the blob in the specified range. + Range *string + + // When set to true and specified together with the Range, the service returns the CRC64 hash for the range, as long as the + // range is less than or equal to 4 MB in size. + RangeGetContentCRC64 *bool + + // When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the + // range is less than or equal to 4 MB in size. + RangeGetContentMD5 *bool + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientGetAccountInfoOptions contains the optional parameters for the BlobClient.GetAccountInfo method. +type BlobClientGetAccountInfoOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientGetPropertiesOptions contains the optional parameters for the BlobClient.GetProperties method. +type BlobClientGetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientGetTagsOptions contains the optional parameters for the BlobClient.GetTags method. +type BlobClientGetTagsOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientQueryOptions contains the optional parameters for the BlobClient.Query method. +type BlobClientQueryOptions struct { + // the query request + QueryRequest *QueryRequest + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientReleaseLeaseOptions contains the optional parameters for the BlobClient.ReleaseLease method. +type BlobClientReleaseLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientRenewLeaseOptions contains the optional parameters for the BlobClient.RenewLease method. +type BlobClientRenewLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetExpiryOptions contains the optional parameters for the BlobClient.SetExpiry method. +type BlobClientSetExpiryOptions struct { + // The time to set the blob to expiry + ExpiresOn *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetHTTPHeadersOptions contains the optional parameters for the BlobClient.SetHTTPHeaders method. +type BlobClientSetHTTPHeadersOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetImmutabilityPolicyOptions contains the optional parameters for the BlobClient.SetImmutabilityPolicy method. +type BlobClientSetImmutabilityPolicyOptions struct { + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetLegalHoldOptions contains the optional parameters for the BlobClient.SetLegalHold method. +type BlobClientSetLegalHoldOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetMetadataOptions contains the optional parameters for the BlobClient.SetMetadata method. +type BlobClientSetMetadataOptions struct { + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetTagsOptions contains the optional parameters for the BlobClient.SetTags method. +type BlobClientSetTagsOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte + + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientSetTierOptions contains the optional parameters for the BlobClient.SetTier method. +type BlobClientSetTierOptions struct { + // Optional: Indicates the priority with which to rehydrate an archived blob. + RehydratePriority *RehydratePriority + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientStartCopyFromURLOptions contains the optional parameters for the BlobClient.StartCopyFromURL method. +type BlobClientStartCopyFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Specified if a legal hold should be set on the blob. + LegalHold *bool + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Optional: Indicates the priority with which to rehydrate an archived blob. + RehydratePriority *RehydratePriority + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Overrides the sealed state of the destination blob. Service version 2019-12-12 and newer. + SealBlob *bool + + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientUndeleteOptions contains the optional parameters for the BlobClient.Undelete method. +type BlobClientUndeleteOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +type BlobHTTPHeaders struct { + // Optional. Sets the blob's cache control. If specified, this property is stored with the blob and returned with a read request. + BlobCacheControl *string + + // Optional. Sets the blob's Content-Disposition header. + BlobContentDisposition *string + + // Optional. Sets the blob's content encoding. If specified, this property is stored with the blob and returned with a read + // request. + BlobContentEncoding *string + + // Optional. Set the blob's content language. If specified, this property is stored with the blob and returned with a read + // request. + BlobContentLanguage *string + + // Optional. An MD5 hash of the blob content. Note that this hash is not validated, as the hashes for the individual blocks + // were validated when each was uploaded. + BlobContentMD5 []byte + + // Optional. Sets the blob's content type. If specified, this property is stored with the blob and returned with a read request. + BlobContentType *string +} + +// BlockBlobClientCommitBlockListOptions contains the optional parameters for the BlockBlobClient.CommitBlockList method. +type BlockBlobClientCommitBlockListOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Specified if a legal hold should be set on the blob. + LegalHold *bool + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// BlockBlobClientGetBlockListOptions contains the optional parameters for the BlockBlobClient.GetBlockList method. +type BlockBlobClientGetBlockListOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlockBlobClientPutBlobFromURLOptions contains the optional parameters for the BlockBlobClient.PutBlobFromURL method. +type BlockBlobClientPutBlobFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + // Optional, default is true. Indicates if properties from the source blob should be copied. + CopySourceBlobProperties *bool + + // Optional, default 'replace'. Indicates if source tags should be copied or replaced with the tags specified by x-ms-tags. + CopySourceTags *BlobCopySourceTags + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// BlockBlobClientStageBlockFromURLOptions contains the optional parameters for the BlockBlobClient.StageBlockFromURL method. +type BlockBlobClientStageBlockFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentcrc64 []byte + + // Bytes of source data in the specified range. + SourceRange *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlockBlobClientStageBlockOptions contains the optional parameters for the BlockBlobClient.StageBlock method. +type BlockBlobClientStageBlockOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// BlockBlobClientUploadOptions contains the optional parameters for the BlockBlobClient.Upload method. +type BlockBlobClientUploadOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Specified if a legal hold should be set on the blob. + LegalHold *bool + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// ContainerClientAcquireLeaseOptions contains the optional parameters for the ContainerClient.AcquireLease method. +type ContainerClientAcquireLeaseOptions struct { + // Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is + // not in the correct format. See Guid Constructor (String) for a list of valid GUID + // string formats. + ProposedLeaseID *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientBreakLeaseOptions contains the optional parameters for the ContainerClient.BreakLease method. +type ContainerClientBreakLeaseOptions struct { + // For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This + // break period is only used if it is shorter than the time remaining on the + // lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has + // expired, but the lease may be held for longer than the break period. If this + // header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, + // and an infinite lease breaks immediately. + BreakPeriod *int32 + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientChangeLeaseOptions contains the optional parameters for the ContainerClient.ChangeLease method. +type ContainerClientChangeLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientCreateOptions contains the optional parameters for the ContainerClient.Create method. +type ContainerClientCreateOptions struct { + // Specifies whether data in the container may be accessed publicly and the level of access + Access *PublicAccessType + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientDeleteOptions contains the optional parameters for the ContainerClient.Delete method. +type ContainerClientDeleteOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientFilterBlobsOptions contains the optional parameters for the ContainerClient.FilterBlobs method. +type ContainerClientFilterBlobsOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []FilterBlobsIncludeItem + + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientGetAccessPolicyOptions contains the optional parameters for the ContainerClient.GetAccessPolicy method. +type ContainerClientGetAccessPolicyOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientGetAccountInfoOptions contains the optional parameters for the ContainerClient.GetAccountInfo method. +type ContainerClientGetAccountInfoOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientGetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method. +type ContainerClientGetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientListBlobFlatSegmentOptions contains the optional parameters for the ContainerClient.NewListBlobFlatSegmentPager +// method. +type ContainerClientListBlobFlatSegmentOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []ListBlobsIncludeItem + + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientListBlobHierarchySegmentOptions contains the optional parameters for the ContainerClient.NewListBlobHierarchySegmentPager +// method. +type ContainerClientListBlobHierarchySegmentOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []ListBlobsIncludeItem + + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientReleaseLeaseOptions contains the optional parameters for the ContainerClient.ReleaseLease method. +type ContainerClientReleaseLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientRenameOptions contains the optional parameters for the ContainerClient.Rename method. +type ContainerClientRenameOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // A lease ID for the source path. If specified, the source path must have an active lease and the lease ID must match. + SourceLeaseID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientRenewLeaseOptions contains the optional parameters for the ContainerClient.RenewLease method. +type ContainerClientRenewLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientRestoreOptions contains the optional parameters for the ContainerClient.Restore method. +type ContainerClientRestoreOptions struct { + // Optional. Version 2019-12-12 and later. Specifies the name of the deleted container to restore. + DeletedContainerName *string + + // Optional. Version 2019-12-12 and later. Specifies the version of the deleted container to restore. + DeletedContainerVersion *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientSetAccessPolicyOptions contains the optional parameters for the ContainerClient.SetAccessPolicy method. +type ContainerClientSetAccessPolicyOptions struct { + // Specifies whether data in the container may be accessed publicly and the level of access + Access *PublicAccessType + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientSetMetadataOptions contains the optional parameters for the ContainerClient.SetMetadata method. +type ContainerClientSetMetadataOptions struct { + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientSubmitBatchOptions contains the optional parameters for the ContainerClient.SubmitBatch method. +type ContainerClientSubmitBatchOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerCPKScopeInfo contains a group of parameters for the ContainerClient.Create method. +type ContainerCPKScopeInfo struct { + // Optional. Version 2019-07-07 and later. Specifies the default encryption scope to set on the container and use for all + // future writes. + DefaultEncryptionScope *string + + // Optional. Version 2019-07-07 and newer. If true, prevents any request from specifying a different encryption scope than + // the scope set on the container. + PreventEncryptionScopeOverride *bool +} + +// CPKInfo contains a group of parameters for the BlobClient.Download method. +type CPKInfo struct { + // The algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided + // if the x-ms-encryption-key header is provided. + EncryptionAlgorithm *EncryptionAlgorithmType + + // Optional. Specifies the encryption key to use to encrypt the data provided in the request. If not specified, encryption + // is performed with the root account encryption key. For more information, see + // Encryption at Rest for Azure Storage Services. + EncryptionKey *string + + // The SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. + EncryptionKeySHA256 *string +} + +// CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +type CPKScopeInfo struct { + // Optional. Version 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data provided + // in the request. If not specified, encryption is performed with the default + // account encryption scope. For more information, see Encryption at Rest for Azure Storage Services. + EncryptionScope *string +} + +// LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +type LeaseAccessConditions struct { + // If specified, the operation only succeeds if the resource's lease is active and matches this ID. + LeaseID *string +} + +// ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +type ModifiedAccessConditions struct { + // Specify an ETag value to operate only on blobs with a matching value. + IfMatch *azcore.ETag + + // Specify this header value to operate only on a blob if it has been modified since the specified date/time. + IfModifiedSince *time.Time + + // Specify an ETag value to operate only on blobs without a matching value. + IfNoneMatch *azcore.ETag + + // Specify a SQL where clause on blob tags to operate only on blobs with a matching value. + IfTags *string + + // Specify this header value to operate only on a blob if it has not been modified since the specified date/time. + IfUnmodifiedSince *time.Time +} + +// PageBlobClientClearPagesOptions contains the optional parameters for the PageBlobClient.ClearPages method. +type PageBlobClientClearPagesOptions struct { + // Return only the bytes of the blob in the specified range. + Range *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientCopyIncrementalOptions contains the optional parameters for the PageBlobClient.CopyIncremental method. +type PageBlobClientCopyIncrementalOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientCreateOptions contains the optional parameters for the PageBlobClient.Create method. +type PageBlobClientCreateOptions struct { + // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of + // the sequence number must be between 0 and 2^63 - 1. + BlobSequenceNumber *int64 + + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Specified if a legal hold should be set on the blob. + LegalHold *bool + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Optional. Indicates the tier to be set on the page blob. + Tier *PremiumPageBlobAccessTier + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientGetPageRangesDiffOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesDiffPager +// method. +type PageBlobClientGetPageRangesDiffOptions struct { + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Optional. This header is only supported in service versions 2019-04-19 and after and specifies the URL of a previous snapshot + // of the target blob. The response will only contain pages that were changed + // between the target blob and its previous snapshot. + PrevSnapshotURL *string + + // Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a DateTime value that specifies that the response + // will contain only pages that were changed between target blob and previous + // snapshot. Changed pages include both updated and cleared pages. The target blob may be a snapshot, as long as the snapshot + // specified by prevsnapshot is the older of the two. Note that incremental + // snapshots are currently supported only for blobs created on or after January 1, 2016. + Prevsnapshot *string + + // Return only the bytes of the blob in the specified range. + Range *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientGetPageRangesOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesPager method. +type PageBlobClientGetPageRangesOptions struct { + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Return only the bytes of the blob in the specified range. + Range *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientResizeOptions contains the optional parameters for the PageBlobClient.Resize method. +type PageBlobClientResizeOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientUpdateSequenceNumberOptions contains the optional parameters for the PageBlobClient.UpdateSequenceNumber +// method. +type PageBlobClientUpdateSequenceNumberOptions struct { + // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of + // the sequence number must be between 0 and 2^63 - 1. + BlobSequenceNumber *int64 + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientUploadPagesFromURLOptions contains the optional parameters for the PageBlobClient.UploadPagesFromURL method. +type PageBlobClientUploadPagesFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentcrc64 []byte + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientUploadPagesOptions contains the optional parameters for the PageBlobClient.UploadPages method. +type PageBlobClientUploadPagesOptions struct { + // Return only the bytes of the blob in the specified range. + Range *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages method. +type SequenceNumberAccessConditions struct { + // Specify this header value to operate only on a blob if it has the specified sequence number. + IfSequenceNumberEqualTo *int64 + + // Specify this header value to operate only on a blob if it has a sequence number less than the specified. + IfSequenceNumberLessThan *int64 + + // Specify this header value to operate only on a blob if it has a sequence number less than or equal to the specified. + IfSequenceNumberLessThanOrEqualTo *int64 +} + +// ServiceClientFilterBlobsOptions contains the optional parameters for the ServiceClient.FilterBlobs method. +type ServiceClientFilterBlobsOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []FilterBlobsIncludeItem + + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientGetAccountInfoOptions contains the optional parameters for the ServiceClient.GetAccountInfo method. +type ServiceClientGetAccountInfoOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method. +type ServiceClientGetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientGetStatisticsOptions contains the optional parameters for the ServiceClient.GetStatistics method. +type ServiceClientGetStatisticsOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientGetUserDelegationKeyOptions contains the optional parameters for the ServiceClient.GetUserDelegationKey method. +type ServiceClientGetUserDelegationKeyOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientListContainersSegmentOptions contains the optional parameters for the ServiceClient.NewListContainersSegmentPager +// method. +type ServiceClientListContainersSegmentOptions struct { + // Include this parameter to specify that the container's metadata be returned as part of the response body. + Include []ListContainersIncludeType + + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientSetPropertiesOptions contains the optional parameters for the ServiceClient.SetProperties method. +type ServiceClientSetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientSubmitBatchOptions contains the optional parameters for the ServiceClient.SubmitBatch method. +type ServiceClientSubmitBatchOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL method. +type SourceModifiedAccessConditions struct { + // Specify an ETag value to operate only on blobs with a matching value. + SourceIfMatch *azcore.ETag + + // Specify this header value to operate only on a blob if it has been modified since the specified date/time. + SourceIfModifiedSince *time.Time + + // Specify an ETag value to operate only on blobs without a matching value. + SourceIfNoneMatch *azcore.ETag + + // Specify a SQL where clause on blob tags to operate only on blobs with a matching value. + SourceIfTags *string + + // Specify this header value to operate only on a blob if it has not been modified since the specified date/time. + SourceIfUnmodifiedSince *time.Time +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go index b41644c99f1..ee46ef9ca07 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go @@ -1,11 +1,7 @@ -//go:build go1.18 -// +build go1.18 - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -31,7 +27,7 @@ type PageBlobClient struct { // ClearPages - The Clear Pages operation clears a set of pages from a page blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - contentLength - The length of the request. // - options - PageBlobClientClearPagesOptions contains the optional parameters for the PageBlobClient.ClearPages method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. @@ -41,18 +37,21 @@ type PageBlobClient struct { // method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *PageBlobClient) ClearPages(ctx context.Context, contentLength int64, options *PageBlobClientClearPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientClearPagesResponse, error) { + var err error req, err := client.clearPagesCreateRequest(ctx, contentLength, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) if err != nil { return PageBlobClientClearPagesResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return PageBlobClientClearPagesResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return PageBlobClientClearPagesResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientClearPagesResponse{}, err } - return client.clearPagesHandleResponse(resp) + resp, err := client.clearPagesHandleResponse(httpResp) + return resp, err } // clearPagesCreateRequest creates the ClearPages request. @@ -67,13 +66,25 @@ func (client *PageBlobClient) clearPagesCreateRequest(ctx context.Context, conte reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-page-write"] = []string{"clear"} + req.Raw().Header["Accept"] = []string{"application/xml"} req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} - if options != nil && options.Range != nil { - req.Raw().Header["x-ms-range"] = []string{*options.Range} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} @@ -81,56 +92,51 @@ func (client *PageBlobClient) clearPagesCreateRequest(ctx context.Context, conte if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { + req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)} + } if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo != nil { req.Raw().Header["x-ms-if-sequence-number-le"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo, 10)} } if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThan != nil { req.Raw().Header["x-ms-if-sequence-number-lt"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThan, 10)} } - if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { - req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-page-write"] = []string{"clear"} + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } // clearPagesHandleResponse handles the ClearPages response. func (client *PageBlobClient) clearPagesHandleResponse(resp *http.Response) (PageBlobClientClearPagesResponse, error) { result := PageBlobClientClearPagesResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PageBlobClientClearPagesResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) if err != nil { return PageBlobClientClearPagesResponse{}, err } - result.LastModified = &lastModified + result.ContentCRC64 = contentCRC64 } if val := resp.Header.Get("Content-MD5"); val != "" { contentMD5, err := base64.StdEncoding.DecodeString(val) @@ -139,22 +145,22 @@ func (client *PageBlobClient) clearPagesHandleResponse(resp *http.Response) (Pag } result.ContentMD5 = contentMD5 } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - contentCRC64, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) if err != nil { return PageBlobClientClearPagesResponse{}, err } - result.ContentCRC64 = contentCRC64 + result.Date = &date } - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) if err != nil { return PageBlobClientClearPagesResponse{}, err } - result.BlobSequenceNumber = &blobSequenceNumber - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val + result.LastModified = &lastModified } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val @@ -162,13 +168,6 @@ func (client *PageBlobClient) clearPagesHandleResponse(resp *http.Response) (Pag if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientClearPagesResponse{}, err - } - result.Date = &date - } return result, nil } @@ -179,7 +178,7 @@ func (client *PageBlobClient) clearPagesHandleResponse(resp *http.Response) (Pag // 2016-05-31. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies // a page blob snapshot. The value should be URL-encoded as it would appear in a request // URI. The source blob must either be public or must be authenticated via a shared access signature. @@ -187,18 +186,21 @@ func (client *PageBlobClient) clearPagesHandleResponse(resp *http.Response) (Pag // method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *PageBlobClient) CopyIncremental(ctx context.Context, copySource string, options *PageBlobClientCopyIncrementalOptions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientCopyIncrementalResponse, error) { + var err error req, err := client.copyIncrementalCreateRequest(ctx, copySource, options, modifiedAccessConditions) if err != nil { return PageBlobClientCopyIncrementalResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return PageBlobClientCopyIncrementalResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return PageBlobClientCopyIncrementalResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientCopyIncrementalResponse{}, err } - return client.copyIncrementalHandleResponse(resp) + resp, err := client.copyIncrementalHandleResponse(httpResp) + return resp, err } // copyIncrementalCreateRequest creates the CopyIncremental request. @@ -213,33 +215,49 @@ func (client *PageBlobClient) copyIncrementalCreateRequest(ctx context.Context, reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } + req.Raw().Header["x-ms-copy-source"] = []string{copySource} if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-copy-source"] = []string{copySource} req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } // copyIncrementalHandleResponse handles the CopyIncremental response. func (client *PageBlobClient) copyIncrementalHandleResponse(resp *http.Response) (PageBlobClientCopyIncrementalResponse, error) { result := PageBlobClientCopyIncrementalResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientCopyIncrementalResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -250,35 +268,19 @@ func (client *PageBlobClient) copyIncrementalHandleResponse(resp *http.Response) } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientCopyIncrementalResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-copy-id"); val != "" { - result.CopyID = &val - } - if val := resp.Header.Get("x-ms-copy-status"); val != "" { - result.CopyStatus = (*CopyStatusType)(&val) - } return result, nil } // Create - The Create operation creates a new page blob. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - contentLength - The length of the request. // - blobContentLength - This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned // to a 512-byte boundary. @@ -289,18 +291,21 @@ func (client *PageBlobClient) copyIncrementalHandleResponse(resp *http.Response) // - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *PageBlobClient) Create(ctx context.Context, contentLength int64, blobContentLength int64, options *PageBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientCreateResponse, error) { + var err error req, err := client.createCreateRequest(ctx, contentLength, blobContentLength, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) if err != nil { return PageBlobClientCreateResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return PageBlobClientCreateResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return PageBlobClientCreateResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientCreateResponse{}, err } - return client.createHandleResponse(resp) + resp, err := client.createHandleResponse(httpResp) + return resp, err } // createCreateRequest creates the Create request. @@ -314,13 +319,28 @@ func (client *PageBlobClient) createCreateRequest(ctx context.Context, contentLe reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-blob-type"] = []string{"PageBlob"} + req.Raw().Header["Accept"] = []string{"application/xml"} req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } if options != nil && options.Tier != nil { req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { - req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} } if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding} @@ -328,24 +348,22 @@ func (client *PageBlobClient) createCreateRequest(ctx context.Context, contentLe if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage} } + req.Raw().Header["x-ms-blob-content-length"] = []string{strconv.FormatInt(blobContentLength, 10)} if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)} } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { - req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl} + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType} } - if options != nil && options.Metadata != nil { - for k, v := range options.Metadata { - if v != nil { - req.Raw().Header["x-ms-meta-"+k] = []string{*v} - } - } + if options != nil && options.BlobSequenceNumber != nil { + req.Raw().Header["x-ms-blob-sequence-number"] = []string{strconv.FormatInt(*options.BlobSequenceNumber, 10)} } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + req.Raw().Header["x-ms-blob-type"] = []string{"PageBlob"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { - req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} @@ -353,102 +371,89 @@ func (client *PageBlobClient) createCreateRequest(ctx context.Context, contentLe if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-blob-content-length"] = []string{strconv.FormatInt(blobContentLength, 10)} - if options != nil && options.BlobSequenceNumber != nil { - req.Raw().Header["x-ms-blob-sequence-number"] = []string{strconv.FormatInt(*options.BlobSequenceNumber, 10)} - } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - if options != nil && options.BlobTagsString != nil { - req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + if options != nil && options.ImmutabilityPolicyMode != nil { + req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} } if options != nil && options.ImmutabilityPolicyExpiry != nil { req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} } - if options != nil && options.ImmutabilityPolicyMode != nil { - req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } if options != nil && options.LegalHold != nil { req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} } - req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + if options != nil && options.BlobTagsString != nil { + req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } // createHandleResponse handles the Create response. func (client *PageBlobClient) createHandleResponse(resp *http.Response) (PageBlobClientCreateResponse, error) { result := PageBlobClientCreateResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) if err != nil { return PageBlobClientCreateResponse{}, err } - result.LastModified = &lastModified + result.ContentMD5 = contentMD5 } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) if err != nil { return PageBlobClientCreateResponse{}, err } - result.ContentMD5 = contentMD5 + result.Date = &date } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) if err != nil { return PageBlobClientCreateResponse{}, err } - result.Date = &date + result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) if err != nil { return PageBlobClientCreateResponse{}, err } - result.IsServerEncrypted = &isServerEncrypted + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val } return result, nil } @@ -456,7 +461,7 @@ func (client *PageBlobClient) createHandleResponse(resp *http.Response) (PageBlo // NewGetPageRangesPager - The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot // of a page blob // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - options - PageBlobClientGetPageRangesOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesPager // method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. @@ -467,23 +472,16 @@ func (client *PageBlobClient) NewGetPageRangesPager(options *PageBlobClientGetPa return page.NextMarker != nil && len(*page.NextMarker) > 0 }, Fetcher: func(ctx context.Context, page *PageBlobClientGetPageRangesResponse) (PageBlobClientGetPageRangesResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.GetPageRangesCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextMarker) + nextLink := "" + if page != nil { + nextLink = *page.NextMarker } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.GetPageRangesCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) + }, nil) if err != nil { return PageBlobClientGetPageRangesResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return PageBlobClientGetPageRangesResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return PageBlobClientGetPageRangesResponse{}, runtime.NewResponseError(resp) - } return client.GetPageRangesHandleResponse(resp) }, }) @@ -497,61 +495,51 @@ func (client *PageBlobClient) GetPageRangesCreateRequest(ctx context.Context, op } reqQP := req.Raw().URL.Query() reqQP.Set("comp", "pagelist") - if options != nil && options.Snapshot != nil { - reqQP.Set("snapshot", *options.Snapshot) - } - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } if options != nil && options.Marker != nil { reqQP.Set("marker", *options.Marker) } if options != nil && options.Maxresults != nil { reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) } - req.Raw().URL.RawQuery = reqQP.Encode() - if options != nil && options.Range != nil { - req.Raw().Header["x-ms-range"] = []string{*options.Range} + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } // GetPageRangesHandleResponse handles the GetPageRanges response. func (client *PageBlobClient) GetPageRangesHandleResponse(resp *http.Response) (PageBlobClientGetPageRangesResponse, error) { result := PageBlobClientGetPageRangesResponse{} - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientGetPageRangesResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { blobContentLength, err := strconv.ParseInt(val, 10, 64) if err != nil { @@ -562,12 +550,6 @@ func (client *PageBlobClient) GetPageRangesHandleResponse(resp *http.Response) ( if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -575,6 +557,22 @@ func (client *PageBlobClient) GetPageRangesHandleResponse(resp *http.Response) ( } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientGetPageRangesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } if err := runtime.UnmarshalAsXML(resp, &result.PageList); err != nil { return PageBlobClientGetPageRangesResponse{}, err } @@ -584,7 +582,7 @@ func (client *PageBlobClient) GetPageRangesHandleResponse(resp *http.Response) ( // NewGetPageRangesDiffPager - The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that // were changed between target blob and previous snapshot. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - options - PageBlobClientGetPageRangesDiffOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesDiffPager // method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. @@ -595,23 +593,16 @@ func (client *PageBlobClient) NewGetPageRangesDiffPager(options *PageBlobClientG return page.NextMarker != nil && len(*page.NextMarker) > 0 }, Fetcher: func(ctx context.Context, page *PageBlobClientGetPageRangesDiffResponse) (PageBlobClientGetPageRangesDiffResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.GetPageRangesDiffCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextMarker) + nextLink := "" + if page != nil { + nextLink = *page.NextMarker } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.GetPageRangesDiffCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) + }, nil) if err != nil { return PageBlobClientGetPageRangesDiffResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return PageBlobClientGetPageRangesDiffResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return PageBlobClientGetPageRangesDiffResponse{}, runtime.NewResponseError(resp) - } return client.GetPageRangesDiffHandleResponse(resp) }, }) @@ -625,67 +616,57 @@ func (client *PageBlobClient) GetPageRangesDiffCreateRequest(ctx context.Context } reqQP := req.Raw().URL.Query() reqQP.Set("comp", "pagelist") - if options != nil && options.Snapshot != nil { - reqQP.Set("snapshot", *options.Snapshot) - } - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) - } - if options != nil && options.Prevsnapshot != nil { - reqQP.Set("prevsnapshot", *options.Prevsnapshot) - } if options != nil && options.Marker != nil { reqQP.Set("marker", *options.Marker) } if options != nil && options.Maxresults != nil { reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) } - req.Raw().URL.RawQuery = reqQP.Encode() - if options != nil && options.PrevSnapshotURL != nil { - req.Raw().Header["x-ms-previous-snapshot-url"] = []string{*options.PrevSnapshotURL} + if options != nil && options.Prevsnapshot != nil { + reqQP.Set("prevsnapshot", *options.Prevsnapshot) } - if options != nil && options.Range != nil { - req.Raw().Header["x-ms-range"] = []string{*options.Range} + if options != nil && options.Snapshot != nil { + reqQP.Set("snapshot", *options.Snapshot) } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.PrevSnapshotURL != nil { + req.Raw().Header["x-ms-previous-snapshot-url"] = []string{*options.PrevSnapshotURL} + } + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } // GetPageRangesDiffHandleResponse handles the GetPageRangesDiff response. func (client *PageBlobClient) GetPageRangesDiffHandleResponse(resp *http.Response) (PageBlobClientGetPageRangesDiffResponse, error) { result := PageBlobClientGetPageRangesDiffResponse{} - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientGetPageRangesDiffResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { blobContentLength, err := strconv.ParseInt(val, 10, 64) if err != nil { @@ -696,12 +677,6 @@ func (client *PageBlobClient) GetPageRangesDiffHandleResponse(resp *http.Respons if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -709,6 +684,22 @@ func (client *PageBlobClient) GetPageRangesDiffHandleResponse(resp *http.Respons } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientGetPageRangesDiffResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } if err := runtime.UnmarshalAsXML(resp, &result.PageList); err != nil { return PageBlobClientGetPageRangesDiffResponse{}, err } @@ -718,7 +709,7 @@ func (client *PageBlobClient) GetPageRangesDiffHandleResponse(resp *http.Respons // Resize - Resize the Blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - blobContentLength - This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned // to a 512-byte boundary. // - options - PageBlobClientResizeOptions contains the optional parameters for the PageBlobClient.Resize method. @@ -727,18 +718,21 @@ func (client *PageBlobClient) GetPageRangesDiffHandleResponse(resp *http.Respons // - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *PageBlobClient) Resize(ctx context.Context, blobContentLength int64, options *PageBlobClientResizeOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientResizeResponse, error) { + var err error req, err := client.resizeCreateRequest(ctx, blobContentLength, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) if err != nil { return PageBlobClientResizeResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return PageBlobClientResizeResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return PageBlobClientResizeResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientResizeResponse{}, err } - return client.resizeHandleResponse(resp) + resp, err := client.resizeHandleResponse(httpResp) + return resp, err } // resizeCreateRequest creates the Resize request. @@ -753,8 +747,25 @@ func (client *PageBlobClient) resizeCreateRequest(ctx context.Context, blobConte reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + req.Raw().Header["x-ms-blob-content-length"] = []string{strconv.FormatInt(blobContentLength, 10)} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} @@ -762,49 +773,22 @@ func (client *PageBlobClient) resizeCreateRequest(ctx context.Context, blobConte if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-blob-content-length"] = []string{strconv.FormatInt(blobContentLength, 10)} - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } // resizeHandleResponse handles the Resize response. func (client *PageBlobClient) resizeHandleResponse(resp *http.Response) (PageBlobClientResizeResponse, error) { result := PageBlobClientResizeResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientResizeResponse{}, err - } - result.LastModified = &lastModified - } if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) if err != nil { @@ -815,12 +799,6 @@ func (client *PageBlobClient) resizeHandleResponse(resp *http.Response) (PageBlo if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -828,13 +806,29 @@ func (client *PageBlobClient) resizeHandleResponse(resp *http.Response) (PageBlo } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientResizeResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } // UpdateSequenceNumber - Update the sequence number of the blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - sequenceNumberAction - Required if the x-ms-blob-sequence-number header is set for the request. This property applies to // page blobs only. This property indicates how the service should modify the blob's sequence number // - options - PageBlobClientUpdateSequenceNumberOptions contains the optional parameters for the PageBlobClient.UpdateSequenceNumber @@ -842,18 +836,21 @@ func (client *PageBlobClient) resizeHandleResponse(resp *http.Response) (PageBlo // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *PageBlobClient) UpdateSequenceNumber(ctx context.Context, sequenceNumberAction SequenceNumberActionType, options *PageBlobClientUpdateSequenceNumberOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientUpdateSequenceNumberResponse, error) { + var err error req, err := client.updateSequenceNumberCreateRequest(ctx, sequenceNumberAction, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { return PageBlobClientUpdateSequenceNumberResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return PageBlobClientUpdateSequenceNumberResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return PageBlobClientUpdateSequenceNumberResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientUpdateSequenceNumberResponse{}, err } - return client.updateSequenceNumberHandleResponse(resp) + resp, err := client.updateSequenceNumberHandleResponse(httpResp) + return resp, err } // updateSequenceNumberCreateRequest creates the UpdateSequenceNumber request. @@ -868,49 +865,39 @@ func (client *PageBlobClient) updateSequenceNumberCreateRequest(ctx context.Cont reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { - req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-sequence-number-action"] = []string{string(sequenceNumberAction)} if options != nil && options.BlobSequenceNumber != nil { req.Raw().Header["x-ms-blob-sequence-number"] = []string{strconv.FormatInt(*options.BlobSequenceNumber, 10)} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-sequence-number-action"] = []string{string(sequenceNumberAction)} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } // updateSequenceNumberHandleResponse handles the UpdateSequenceNumber response. func (client *PageBlobClient) updateSequenceNumberHandleResponse(resp *http.Response) (PageBlobClientUpdateSequenceNumberResponse, error) { result := PageBlobClientUpdateSequenceNumberResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientUpdateSequenceNumberResponse{}, err - } - result.LastModified = &lastModified - } if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) if err != nil { @@ -921,12 +908,6 @@ func (client *PageBlobClient) updateSequenceNumberHandleResponse(resp *http.Resp if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -934,13 +915,29 @@ func (client *PageBlobClient) updateSequenceNumberHandleResponse(resp *http.Resp } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUpdateSequenceNumberResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } // UploadPages - The Upload Pages operation writes a range of pages to a page blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - contentLength - The length of the request. // - body - Initial data // - options - PageBlobClientUploadPagesOptions contains the optional parameters for the PageBlobClient.UploadPages method. @@ -951,18 +948,21 @@ func (client *PageBlobClient) updateSequenceNumberHandleResponse(resp *http.Resp // method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *PageBlobClient) UploadPages(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *PageBlobClientUploadPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientUploadPagesResponse, error) { + var err error req, err := client.uploadPagesCreateRequest(ctx, contentLength, body, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) if err != nil { return PageBlobClientUploadPagesResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return PageBlobClientUploadPagesResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return PageBlobClientUploadPagesResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientUploadPagesResponse{}, err } - return client.uploadPagesHandleResponse(resp) + resp, err := client.uploadPagesHandleResponse(httpResp) + return resp, err } // uploadPagesCreateRequest creates the UploadPages request. @@ -977,19 +977,31 @@ func (client *PageBlobClient) uploadPagesCreateRequest(ctx context.Context, cont reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-page-write"] = []string{"update"} + req.Raw().Header["Accept"] = []string{"application/xml"} req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} if options != nil && options.TransactionalContentMD5 != nil { req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } if options != nil && options.TransactionalContentCRC64 != nil { req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} } - if options != nil && options.Range != nil { - req.Raw().Header["x-ms-range"] = []string{*options.Range} - } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if cpkInfo != nil && cpkInfo.EncryptionKey != nil { req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} @@ -997,41 +1009,29 @@ func (client *PageBlobClient) uploadPagesCreateRequest(ctx context.Context, cont if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { + req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)} + } if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo != nil { req.Raw().Header["x-ms-if-sequence-number-le"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo, 10)} } if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThan != nil { req.Raw().Header["x-ms-if-sequence-number-lt"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThan, 10)} } - if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { - req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-page-write"] = []string{"update"} + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if err := req.SetBody(body, "application/octet-stream"); err != nil { return nil, err } @@ -1041,22 +1041,15 @@ func (client *PageBlobClient) uploadPagesCreateRequest(ctx context.Context, cont // uploadPagesHandleResponse handles the UploadPages response. func (client *PageBlobClient) uploadPagesHandleResponse(resp *http.Response) (PageBlobClientUploadPagesResponse, error) { result := PageBlobClientUploadPagesResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) if err != nil { return PageBlobClientUploadPagesResponse{}, err } - result.LastModified = &lastModified + result.BlobSequenceNumber = &blobSequenceNumber } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return PageBlobClientUploadPagesResponse{}, err - } - result.ContentMD5 = contentMD5 + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } if val := resp.Header.Get("x-ms-content-crc64"); val != "" { contentCRC64, err := base64.StdEncoding.DecodeString(val) @@ -1065,21 +1058,12 @@ func (client *PageBlobClient) uploadPagesHandleResponse(resp *http.Response) (Pa } result.ContentCRC64 = contentCRC64 } - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) if err != nil { return PageBlobClientUploadPagesResponse{}, err } - result.BlobSequenceNumber = &blobSequenceNumber - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val + result.ContentMD5 = contentMD5 } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) @@ -1088,6 +1072,15 @@ func (client *PageBlobClient) uploadPagesHandleResponse(resp *http.Response) (Pa } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { @@ -1095,11 +1088,18 @@ func (client *PageBlobClient) uploadPagesHandleResponse(resp *http.Response) (Pa } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val } return result, nil } @@ -1108,7 +1108,7 @@ func (client *PageBlobClient) uploadPagesHandleResponse(resp *http.Response) (Pa // a URL // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - sourceURL - Specify a URL to the copy source. // - sourceRange - Bytes of source data in the specified range. The length of this range should match the ContentLength header // and x-ms-range/Range destination range header. @@ -1126,18 +1126,21 @@ func (client *PageBlobClient) uploadPagesHandleResponse(resp *http.Response) (Pa // - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL // method. func (client *PageBlobClient) UploadPagesFromURL(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParam string, options *PageBlobClientUploadPagesFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (PageBlobClientUploadPagesFromURLResponse, error) { + var err error req, err := client.uploadPagesFromURLCreateRequest(ctx, sourceURL, sourceRange, contentLength, rangeParam, options, cpkInfo, cpkScopeInfo, leaseAccessConditions, sequenceNumberAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) if err != nil { return PageBlobClientUploadPagesFromURLResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return PageBlobClientUploadPagesFromURLResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return PageBlobClientUploadPagesFromURLResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientUploadPagesFromURLResponse{}, err } - return client.uploadPagesFromURLHandleResponse(resp) + resp, err := client.uploadPagesFromURLHandleResponse(httpResp) + return resp, err } // uploadPagesFromURLCreateRequest creates the UploadPagesFromURL request. @@ -1152,31 +1155,41 @@ func (client *PageBlobClient) uploadPagesFromURLCreateRequest(ctx context.Contex reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-page-write"] = []string{"update"} + req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + } + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } req.Raw().Header["x-ms-copy-source"] = []string{sourceURL} - req.Raw().Header["x-ms-source-range"] = []string{sourceRange} - if options != nil && options.SourceContentMD5 != nil { - req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} + if options != nil && options.CopySourceAuthorization != nil { + req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} } - if options != nil && options.SourceContentcrc64 != nil { - req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentcrc64)} + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } - req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} - req.Raw().Header["x-ms-range"] = []string{rangeParam} if cpkInfo != nil && cpkInfo.EncryptionKey != nil { req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey} } if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256} } - if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { - req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} - } if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } - if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { - req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { + req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)} } if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo != nil { req.Raw().Header["x-ms-if-sequence-number-le"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo, 10)} @@ -1184,66 +1197,46 @@ func (client *PageBlobClient) uploadPagesFromURLCreateRequest(ctx context.Contex if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThan != nil { req.Raw().Header["x-ms-if-sequence-number-lt"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThan, 10)} } - if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { - req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { - req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} - } - if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { - req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)} - } if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} + req.Raw().Header["x-ms-page-write"] = []string{"update"} + req.Raw().Header["x-ms-range"] = []string{rangeParam} + if options != nil && options.SourceContentcrc64 != nil { + req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentcrc64)} + } + if options != nil && options.SourceContentMD5 != nil { + req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} + } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} } - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} - if options != nil && options.RequestID != nil { - req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} - } - if options != nil && options.CopySourceAuthorization != nil { - req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-source-range"] = []string{sourceRange} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } // uploadPagesFromURLHandleResponse handles the UploadPagesFromURL response. func (client *PageBlobClient) uploadPagesFromURLHandleResponse(resp *http.Response) (PageBlobClientUploadPagesFromURLResponse, error) { result := PageBlobClientUploadPagesFromURLResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientUploadPagesFromURLResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) if err != nil { return PageBlobClientUploadPagesFromURLResponse{}, err } - result.ContentMD5 = contentMD5 + result.BlobSequenceNumber = &blobSequenceNumber } if val := resp.Header.Get("x-ms-content-crc64"); val != "" { contentCRC64, err := base64.StdEncoding.DecodeString(val) @@ -1252,18 +1245,12 @@ func (client *PageBlobClient) uploadPagesFromURLHandleResponse(resp *http.Respon } result.ContentCRC64 = contentCRC64 } - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) if err != nil { return PageBlobClientUploadPagesFromURLResponse{}, err } - result.BlobSequenceNumber = &blobSequenceNumber - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val + result.ContentMD5 = contentMD5 } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) @@ -1272,6 +1259,15 @@ func (client *PageBlobClient) uploadPagesFromURLHandleResponse(resp *http.Respon } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { @@ -1279,11 +1275,18 @@ func (client *PageBlobClient) uploadPagesFromURLHandleResponse(resp *http.Respon } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val } return result, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go deleted file mode 100644 index b52664c938e..00000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go +++ /dev/null @@ -1,1994 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. -// Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. - -package generated - -import ( - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "io" - "time" -) - -// AppendBlobClientAppendBlockFromURLResponse contains the response from method AppendBlobClient.AppendBlockFromURL. -type AppendBlobClientAppendBlockFromURLResponse struct { - // BlobAppendOffset contains the information returned from the x-ms-blob-append-offset header response. - BlobAppendOffset *string - - // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response. - BlobCommittedBlockCount *int32 - - // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. - ContentCRC64 []byte - - // ContentMD5 contains the information returned from the Content-MD5 header response. - ContentMD5 []byte - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. - EncryptionKeySHA256 *string - - // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. - EncryptionScope *string - - // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. - IsServerEncrypted *bool - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// AppendBlobClientAppendBlockResponse contains the response from method AppendBlobClient.AppendBlock. -type AppendBlobClientAppendBlockResponse struct { - // BlobAppendOffset contains the information returned from the x-ms-blob-append-offset header response. - BlobAppendOffset *string - - // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response. - BlobCommittedBlockCount *int32 - - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. - ContentCRC64 []byte - - // ContentMD5 contains the information returned from the Content-MD5 header response. - ContentMD5 []byte - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. - EncryptionKeySHA256 *string - - // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. - EncryptionScope *string - - // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. - IsServerEncrypted *bool - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// AppendBlobClientCreateResponse contains the response from method AppendBlobClient.Create. -type AppendBlobClientCreateResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // ContentMD5 contains the information returned from the Content-MD5 header response. - ContentMD5 []byte - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. - EncryptionKeySHA256 *string - - // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. - EncryptionScope *string - - // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. - IsServerEncrypted *bool - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string - - // VersionID contains the information returned from the x-ms-version-id header response. - VersionID *string -} - -// AppendBlobClientSealResponse contains the response from method AppendBlobClient.Seal. -type AppendBlobClientSealResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // IsSealed contains the information returned from the x-ms-blob-sealed header response. - IsSealed *bool - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// BlobClientAbortCopyFromURLResponse contains the response from method BlobClient.AbortCopyFromURL. -type BlobClientAbortCopyFromURLResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// BlobClientAcquireLeaseResponse contains the response from method BlobClient.AcquireLease. -type BlobClientAcquireLeaseResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // LeaseID contains the information returned from the x-ms-lease-id header response. - LeaseID *string - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// BlobClientBreakLeaseResponse contains the response from method BlobClient.BreakLease. -type BlobClientBreakLeaseResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // LeaseTime contains the information returned from the x-ms-lease-time header response. - LeaseTime *int32 - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// BlobClientChangeLeaseResponse contains the response from method BlobClient.ChangeLease. -type BlobClientChangeLeaseResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // LeaseID contains the information returned from the x-ms-lease-id header response. - LeaseID *string - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// BlobClientCopyFromURLResponse contains the response from method BlobClient.CopyFromURL. -type BlobClientCopyFromURLResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. - ContentCRC64 []byte - - // ContentMD5 contains the information returned from the Content-MD5 header response. - ContentMD5 []byte - - // CopyID contains the information returned from the x-ms-copy-id header response. - CopyID *string - - // CopyStatus contains the information returned from the x-ms-copy-status header response. - CopyStatus *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. - EncryptionScope *string - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string - - // VersionID contains the information returned from the x-ms-version-id header response. - VersionID *string -} - -// BlobClientCreateSnapshotResponse contains the response from method BlobClient.CreateSnapshot. -type BlobClientCreateSnapshotResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. - IsServerEncrypted *bool - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Snapshot contains the information returned from the x-ms-snapshot header response. - Snapshot *string - - // Version contains the information returned from the x-ms-version header response. - Version *string - - // VersionID contains the information returned from the x-ms-version-id header response. - VersionID *string -} - -// BlobClientDeleteImmutabilityPolicyResponse contains the response from method BlobClient.DeleteImmutabilityPolicy. -type BlobClientDeleteImmutabilityPolicyResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// BlobClientDeleteResponse contains the response from method BlobClient.Delete. -type BlobClientDeleteResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// BlobClientDownloadResponse contains the response from method BlobClient.Download. -type BlobClientDownloadResponse struct { - // AcceptRanges contains the information returned from the Accept-Ranges header response. - AcceptRanges *string - - // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response. - BlobCommittedBlockCount *int32 - - // BlobContentMD5 contains the information returned from the x-ms-blob-content-md5 header response. - BlobContentMD5 []byte - - // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. - BlobSequenceNumber *int64 - - // BlobType contains the information returned from the x-ms-blob-type header response. - BlobType *BlobType - - // Body contains the streaming response. - Body io.ReadCloser - - // CacheControl contains the information returned from the Cache-Control header response. - CacheControl *string - - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. - ContentCRC64 []byte - - // ContentDisposition contains the information returned from the Content-Disposition header response. - ContentDisposition *string - - // ContentEncoding contains the information returned from the Content-Encoding header response. - ContentEncoding *string - - // ContentLanguage contains the information returned from the Content-Language header response. - ContentLanguage *string - - // ContentLength contains the information returned from the Content-Length header response. - ContentLength *int64 - - // ContentMD5 contains the information returned from the Content-MD5 header response. - ContentMD5 []byte - - // ContentRange contains the information returned from the Content-Range header response. - ContentRange *string - - // ContentType contains the information returned from the Content-Type header response. - ContentType *string - - // CopyCompletionTime contains the information returned from the x-ms-copy-completion-time header response. - CopyCompletionTime *time.Time - - // CopyID contains the information returned from the x-ms-copy-id header response. - CopyID *string - - // CopyProgress contains the information returned from the x-ms-copy-progress header response. - CopyProgress *string - - // CopySource contains the information returned from the x-ms-copy-source header response. - CopySource *string - - // CopyStatus contains the information returned from the x-ms-copy-status header response. - CopyStatus *CopyStatusType - - // CopyStatusDescription contains the information returned from the x-ms-copy-status-description header response. - CopyStatusDescription *string - - // CreationTime contains the information returned from the x-ms-creation-time header response. - CreationTime *time.Time - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. - EncryptionKeySHA256 *string - - // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. - EncryptionScope *string - - // ErrorCode contains the information returned from the x-ms-error-code header response. - ErrorCode *string - - // ImmutabilityPolicyExpiresOn contains the information returned from the x-ms-immutability-policy-until-date header response. - ImmutabilityPolicyExpiresOn *time.Time - - // ImmutabilityPolicyMode contains the information returned from the x-ms-immutability-policy-mode header response. - ImmutabilityPolicyMode *ImmutabilityPolicyMode - - // IsCurrentVersion contains the information returned from the x-ms-is-current-version header response. - IsCurrentVersion *bool - - // IsSealed contains the information returned from the x-ms-blob-sealed header response. - IsSealed *bool - - // IsServerEncrypted contains the information returned from the x-ms-server-encrypted header response. - IsServerEncrypted *bool - - // LastAccessed contains the information returned from the x-ms-last-access-time header response. - LastAccessed *time.Time - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // LeaseDuration contains the information returned from the x-ms-lease-duration header response. - LeaseDuration *LeaseDurationType - - // LeaseState contains the information returned from the x-ms-lease-state header response. - LeaseState *LeaseStateType - - // LeaseStatus contains the information returned from the x-ms-lease-status header response. - LeaseStatus *LeaseStatusType - - // LegalHold contains the information returned from the x-ms-legal-hold header response. - LegalHold *bool - - // Metadata contains the information returned from the x-ms-meta header response. - Metadata map[string]*string - - // ObjectReplicationPolicyID contains the information returned from the x-ms-or-policy-id header response. - ObjectReplicationPolicyID *string - - // ObjectReplicationRules contains the information returned from the x-ms-or header response. - ObjectReplicationRules map[string]*string - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // TagCount contains the information returned from the x-ms-tag-count header response. - TagCount *int64 - - // Version contains the information returned from the x-ms-version header response. - Version *string - - // VersionID contains the information returned from the x-ms-version-id header response. - VersionID *string -} - -// BlobClientGetAccountInfoResponse contains the response from method BlobClient.GetAccountInfo. -type BlobClientGetAccountInfoResponse struct { - // AccountKind contains the information returned from the x-ms-account-kind header response. - AccountKind *AccountKind - - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // SKUName contains the information returned from the x-ms-sku-name header response. - SKUName *SKUName - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// BlobClientGetPropertiesResponse contains the response from method BlobClient.GetProperties. -type BlobClientGetPropertiesResponse struct { - // AcceptRanges contains the information returned from the Accept-Ranges header response. - AcceptRanges *string - - // AccessTier contains the information returned from the x-ms-access-tier header response. - AccessTier *string - - // AccessTierChangeTime contains the information returned from the x-ms-access-tier-change-time header response. - AccessTierChangeTime *time.Time - - // AccessTierInferred contains the information returned from the x-ms-access-tier-inferred header response. - AccessTierInferred *bool - - // ArchiveStatus contains the information returned from the x-ms-archive-status header response. - ArchiveStatus *string - - // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response. - BlobCommittedBlockCount *int32 - - // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. - BlobSequenceNumber *int64 - - // BlobType contains the information returned from the x-ms-blob-type header response. - BlobType *BlobType - - // CacheControl contains the information returned from the Cache-Control header response. - CacheControl *string - - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // ContentDisposition contains the information returned from the Content-Disposition header response. - ContentDisposition *string - - // ContentEncoding contains the information returned from the Content-Encoding header response. - ContentEncoding *string - - // ContentLanguage contains the information returned from the Content-Language header response. - ContentLanguage *string - - // ContentLength contains the information returned from the Content-Length header response. - ContentLength *int64 - - // ContentMD5 contains the information returned from the Content-MD5 header response. - ContentMD5 []byte - - // ContentType contains the information returned from the Content-Type header response. - ContentType *string - - // CopyCompletionTime contains the information returned from the x-ms-copy-completion-time header response. - CopyCompletionTime *time.Time - - // CopyID contains the information returned from the x-ms-copy-id header response. - CopyID *string - - // CopyProgress contains the information returned from the x-ms-copy-progress header response. - CopyProgress *string - - // CopySource contains the information returned from the x-ms-copy-source header response. - CopySource *string - - // CopyStatus contains the information returned from the x-ms-copy-status header response. - CopyStatus *CopyStatusType - - // CopyStatusDescription contains the information returned from the x-ms-copy-status-description header response. - CopyStatusDescription *string - - // CreationTime contains the information returned from the x-ms-creation-time header response. - CreationTime *time.Time - - // Date contains the information returned from the Date header response. - Date *time.Time - - // DestinationSnapshot contains the information returned from the x-ms-copy-destination-snapshot header response. - DestinationSnapshot *string - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. - EncryptionKeySHA256 *string - - // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. - EncryptionScope *string - - // ExpiresOn contains the information returned from the x-ms-expiry-time header response. - ExpiresOn *time.Time - - // ImmutabilityPolicyExpiresOn contains the information returned from the x-ms-immutability-policy-until-date header response. - ImmutabilityPolicyExpiresOn *time.Time - - // ImmutabilityPolicyMode contains the information returned from the x-ms-immutability-policy-mode header response. - ImmutabilityPolicyMode *ImmutabilityPolicyMode - - // IsCurrentVersion contains the information returned from the x-ms-is-current-version header response. - IsCurrentVersion *bool - - // IsIncrementalCopy contains the information returned from the x-ms-incremental-copy header response. - IsIncrementalCopy *bool - - // IsSealed contains the information returned from the x-ms-blob-sealed header response. - IsSealed *bool - - // IsServerEncrypted contains the information returned from the x-ms-server-encrypted header response. - IsServerEncrypted *bool - - // LastAccessed contains the information returned from the x-ms-last-access-time header response. - LastAccessed *time.Time - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // LeaseDuration contains the information returned from the x-ms-lease-duration header response. - LeaseDuration *LeaseDurationType - - // LeaseState contains the information returned from the x-ms-lease-state header response. - LeaseState *LeaseStateType - - // LeaseStatus contains the information returned from the x-ms-lease-status header response. - LeaseStatus *LeaseStatusType - - // LegalHold contains the information returned from the x-ms-legal-hold header response. - LegalHold *bool - - // Metadata contains the information returned from the x-ms-meta header response. - Metadata map[string]*string - - // ObjectReplicationPolicyID contains the information returned from the x-ms-or-policy-id header response. - ObjectReplicationPolicyID *string - - // ObjectReplicationRules contains the information returned from the x-ms-or header response. - ObjectReplicationRules map[string]*string - - // RehydratePriority contains the information returned from the x-ms-rehydrate-priority header response. - RehydratePriority *string - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // TagCount contains the information returned from the x-ms-tag-count header response. - TagCount *int64 - - // Version contains the information returned from the x-ms-version header response. - Version *string - - // VersionID contains the information returned from the x-ms-version-id header response. - VersionID *string -} - -// BlobClientGetTagsResponse contains the response from method BlobClient.GetTags. -type BlobClientGetTagsResponse struct { - BlobTags - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` - - // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` - - // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` -} - -// BlobClientQueryResponse contains the response from method BlobClient.Query. -type BlobClientQueryResponse struct { - // AcceptRanges contains the information returned from the Accept-Ranges header response. - AcceptRanges *string - - // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response. - BlobCommittedBlockCount *int32 - - // BlobContentMD5 contains the information returned from the x-ms-blob-content-md5 header response. - BlobContentMD5 []byte - - // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. - BlobSequenceNumber *int64 - - // BlobType contains the information returned from the x-ms-blob-type header response. - BlobType *BlobType - - // Body contains the streaming response. - Body io.ReadCloser - - // CacheControl contains the information returned from the Cache-Control header response. - CacheControl *string - - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. - ContentCRC64 []byte - - // ContentDisposition contains the information returned from the Content-Disposition header response. - ContentDisposition *string - - // ContentEncoding contains the information returned from the Content-Encoding header response. - ContentEncoding *string - - // ContentLanguage contains the information returned from the Content-Language header response. - ContentLanguage *string - - // ContentLength contains the information returned from the Content-Length header response. - ContentLength *int64 - - // ContentMD5 contains the information returned from the Content-MD5 header response. - ContentMD5 []byte - - // ContentRange contains the information returned from the Content-Range header response. - ContentRange *string - - // ContentType contains the information returned from the Content-Type header response. - ContentType *string - - // CopyCompletionTime contains the information returned from the x-ms-copy-completion-time header response. - CopyCompletionTime *time.Time - - // CopyID contains the information returned from the x-ms-copy-id header response. - CopyID *string - - // CopyProgress contains the information returned from the x-ms-copy-progress header response. - CopyProgress *string - - // CopySource contains the information returned from the x-ms-copy-source header response. - CopySource *string - - // CopyStatus contains the information returned from the x-ms-copy-status header response. - CopyStatus *CopyStatusType - - // CopyStatusDescription contains the information returned from the x-ms-copy-status-description header response. - CopyStatusDescription *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. - EncryptionKeySHA256 *string - - // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. - EncryptionScope *string - - // IsServerEncrypted contains the information returned from the x-ms-server-encrypted header response. - IsServerEncrypted *bool - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // LeaseDuration contains the information returned from the x-ms-lease-duration header response. - LeaseDuration *LeaseDurationType - - // LeaseState contains the information returned from the x-ms-lease-state header response. - LeaseState *LeaseStateType - - // LeaseStatus contains the information returned from the x-ms-lease-status header response. - LeaseStatus *LeaseStatusType - - // Metadata contains the information returned from the x-ms-meta header response. - Metadata map[string]*string - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// BlobClientReleaseLeaseResponse contains the response from method BlobClient.ReleaseLease. -type BlobClientReleaseLeaseResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// BlobClientRenewLeaseResponse contains the response from method BlobClient.RenewLease. -type BlobClientRenewLeaseResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // LeaseID contains the information returned from the x-ms-lease-id header response. - LeaseID *string - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// BlobClientSetExpiryResponse contains the response from method BlobClient.SetExpiry. -type BlobClientSetExpiryResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// BlobClientSetHTTPHeadersResponse contains the response from method BlobClient.SetHTTPHeaders. -type BlobClientSetHTTPHeadersResponse struct { - // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. - BlobSequenceNumber *int64 - - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// BlobClientSetImmutabilityPolicyResponse contains the response from method BlobClient.SetImmutabilityPolicy. -type BlobClientSetImmutabilityPolicyResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ImmutabilityPolicyExpiry contains the information returned from the x-ms-immutability-policy-until-date header response. - ImmutabilityPolicyExpiry *time.Time - - // ImmutabilityPolicyMode contains the information returned from the x-ms-immutability-policy-mode header response. - ImmutabilityPolicyMode *ImmutabilityPolicyMode - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// BlobClientSetLegalHoldResponse contains the response from method BlobClient.SetLegalHold. -type BlobClientSetLegalHoldResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // LegalHold contains the information returned from the x-ms-legal-hold header response. - LegalHold *bool - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// BlobClientSetMetadataResponse contains the response from method BlobClient.SetMetadata. -type BlobClientSetMetadataResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. - EncryptionKeySHA256 *string - - // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. - EncryptionScope *string - - // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. - IsServerEncrypted *bool - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string - - // VersionID contains the information returned from the x-ms-version-id header response. - VersionID *string -} - -// BlobClientSetTagsResponse contains the response from method BlobClient.SetTags. -type BlobClientSetTagsResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// BlobClientSetTierResponse contains the response from method BlobClient.SetTier. -type BlobClientSetTierResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// BlobClientStartCopyFromURLResponse contains the response from method BlobClient.StartCopyFromURL. -type BlobClientStartCopyFromURLResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // CopyID contains the information returned from the x-ms-copy-id header response. - CopyID *string - - // CopyStatus contains the information returned from the x-ms-copy-status header response. - CopyStatus *CopyStatusType - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string - - // VersionID contains the information returned from the x-ms-version-id header response. - VersionID *string -} - -// BlobClientUndeleteResponse contains the response from method BlobClient.Undelete. -type BlobClientUndeleteResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// BlockBlobClientCommitBlockListResponse contains the response from method BlockBlobClient.CommitBlockList. -type BlockBlobClientCommitBlockListResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. - ContentCRC64 []byte - - // ContentMD5 contains the information returned from the Content-MD5 header response. - ContentMD5 []byte - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. - EncryptionKeySHA256 *string - - // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. - EncryptionScope *string - - // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. - IsServerEncrypted *bool - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string - - // VersionID contains the information returned from the x-ms-version-id header response. - VersionID *string -} - -// BlockBlobClientGetBlockListResponse contains the response from method BlockBlobClient.GetBlockList. -type BlockBlobClientGetBlockListResponse struct { - BlockList - // BlobContentLength contains the information returned from the x-ms-blob-content-length header response. - BlobContentLength *int64 `xml:"BlobContentLength"` - - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` - - // ContentType contains the information returned from the Content-Type header response. - ContentType *string `xml:"ContentType"` - - // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag `xml:"ETag"` - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time `xml:"LastModified"` - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` - - // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` -} - -// BlockBlobClientPutBlobFromURLResponse contains the response from method BlockBlobClient.PutBlobFromURL. -type BlockBlobClientPutBlobFromURLResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // ContentMD5 contains the information returned from the Content-MD5 header response. - ContentMD5 []byte - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. - EncryptionKeySHA256 *string - - // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. - EncryptionScope *string - - // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. - IsServerEncrypted *bool - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string - - // VersionID contains the information returned from the x-ms-version-id header response. - VersionID *string -} - -// BlockBlobClientStageBlockFromURLResponse contains the response from method BlockBlobClient.StageBlockFromURL. -type BlockBlobClientStageBlockFromURLResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. - ContentCRC64 []byte - - // ContentMD5 contains the information returned from the Content-MD5 header response. - ContentMD5 []byte - - // Date contains the information returned from the Date header response. - Date *time.Time - - // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. - EncryptionKeySHA256 *string - - // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. - EncryptionScope *string - - // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. - IsServerEncrypted *bool - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// BlockBlobClientStageBlockResponse contains the response from method BlockBlobClient.StageBlock. -type BlockBlobClientStageBlockResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. - ContentCRC64 []byte - - // ContentMD5 contains the information returned from the Content-MD5 header response. - ContentMD5 []byte - - // Date contains the information returned from the Date header response. - Date *time.Time - - // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. - EncryptionKeySHA256 *string - - // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. - EncryptionScope *string - - // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. - IsServerEncrypted *bool - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// BlockBlobClientUploadResponse contains the response from method BlockBlobClient.Upload. -type BlockBlobClientUploadResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // ContentMD5 contains the information returned from the Content-MD5 header response. - ContentMD5 []byte - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. - EncryptionKeySHA256 *string - - // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. - EncryptionScope *string - - // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. - IsServerEncrypted *bool - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string - - // VersionID contains the information returned from the x-ms-version-id header response. - VersionID *string -} - -// ContainerClientAcquireLeaseResponse contains the response from method ContainerClient.AcquireLease. -type ContainerClientAcquireLeaseResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // LeaseID contains the information returned from the x-ms-lease-id header response. - LeaseID *string - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// ContainerClientBreakLeaseResponse contains the response from method ContainerClient.BreakLease. -type ContainerClientBreakLeaseResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // LeaseTime contains the information returned from the x-ms-lease-time header response. - LeaseTime *int32 - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// ContainerClientChangeLeaseResponse contains the response from method ContainerClient.ChangeLease. -type ContainerClientChangeLeaseResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // LeaseID contains the information returned from the x-ms-lease-id header response. - LeaseID *string - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// ContainerClientCreateResponse contains the response from method ContainerClient.Create. -type ContainerClientCreateResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// ContainerClientDeleteResponse contains the response from method ContainerClient.Delete. -type ContainerClientDeleteResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// ContainerClientFilterBlobsResponse contains the response from method ContainerClient.FilterBlobs. -type ContainerClientFilterBlobsResponse struct { - FilterBlobSegment - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` - - // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` - - // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` -} - -// ContainerClientGetAccessPolicyResponse contains the response from method ContainerClient.GetAccessPolicy. -type ContainerClientGetAccessPolicyResponse struct { - // BlobPublicAccess contains the information returned from the x-ms-blob-public-access header response. - BlobPublicAccess *PublicAccessType `xml:"BlobPublicAccess"` - - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` - - // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag `xml:"ETag"` - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time `xml:"LastModified"` - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` - - // a collection of signed identifiers - SignedIdentifiers []*SignedIdentifier `xml:"SignedIdentifier"` - - // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` -} - -// ContainerClientGetAccountInfoResponse contains the response from method ContainerClient.GetAccountInfo. -type ContainerClientGetAccountInfoResponse struct { - // AccountKind contains the information returned from the x-ms-account-kind header response. - AccountKind *AccountKind - - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // SKUName contains the information returned from the x-ms-sku-name header response. - SKUName *SKUName - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// ContainerClientGetPropertiesResponse contains the response from method ContainerClient.GetProperties. -type ContainerClientGetPropertiesResponse struct { - // BlobPublicAccess contains the information returned from the x-ms-blob-public-access header response. - BlobPublicAccess *PublicAccessType - - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // DefaultEncryptionScope contains the information returned from the x-ms-default-encryption-scope header response. - DefaultEncryptionScope *string - - // DenyEncryptionScopeOverride contains the information returned from the x-ms-deny-encryption-scope-override header response. - DenyEncryptionScopeOverride *bool - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // HasImmutabilityPolicy contains the information returned from the x-ms-has-immutability-policy header response. - HasImmutabilityPolicy *bool - - // HasLegalHold contains the information returned from the x-ms-has-legal-hold header response. - HasLegalHold *bool - - // IsImmutableStorageWithVersioningEnabled contains the information returned from the x-ms-immutable-storage-with-versioning-enabled - // header response. - IsImmutableStorageWithVersioningEnabled *bool - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // LeaseDuration contains the information returned from the x-ms-lease-duration header response. - LeaseDuration *LeaseDurationType - - // LeaseState contains the information returned from the x-ms-lease-state header response. - LeaseState *LeaseStateType - - // LeaseStatus contains the information returned from the x-ms-lease-status header response. - LeaseStatus *LeaseStatusType - - // Metadata contains the information returned from the x-ms-meta header response. - Metadata map[string]*string - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// ContainerClientListBlobFlatSegmentResponse contains the response from method ContainerClient.NewListBlobFlatSegmentPager. -type ContainerClientListBlobFlatSegmentResponse struct { - ListBlobsFlatSegmentResponse - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` - - // ContentType contains the information returned from the Content-Type header response. - ContentType *string `xml:"ContentType"` - - // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` - - // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` -} - -// ContainerClientListBlobHierarchySegmentResponse contains the response from method ContainerClient.NewListBlobHierarchySegmentPager. -type ContainerClientListBlobHierarchySegmentResponse struct { - ListBlobsHierarchySegmentResponse - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` - - // ContentType contains the information returned from the Content-Type header response. - ContentType *string `xml:"ContentType"` - - // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` - - // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` -} - -// ContainerClientReleaseLeaseResponse contains the response from method ContainerClient.ReleaseLease. -type ContainerClientReleaseLeaseResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// ContainerClientRenameResponse contains the response from method ContainerClient.Rename. -type ContainerClientRenameResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// ContainerClientRenewLeaseResponse contains the response from method ContainerClient.RenewLease. -type ContainerClientRenewLeaseResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // LeaseID contains the information returned from the x-ms-lease-id header response. - LeaseID *string - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// ContainerClientRestoreResponse contains the response from method ContainerClient.Restore. -type ContainerClientRestoreResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// ContainerClientSetAccessPolicyResponse contains the response from method ContainerClient.SetAccessPolicy. -type ContainerClientSetAccessPolicyResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// ContainerClientSetMetadataResponse contains the response from method ContainerClient.SetMetadata. -type ContainerClientSetMetadataResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// ContainerClientSubmitBatchResponse contains the response from method ContainerClient.SubmitBatch. -type ContainerClientSubmitBatchResponse struct { - // Body contains the streaming response. - Body io.ReadCloser - - // ContentType contains the information returned from the Content-Type header response. - ContentType *string - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// PageBlobClientClearPagesResponse contains the response from method PageBlobClient.ClearPages. -type PageBlobClientClearPagesResponse struct { - // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. - BlobSequenceNumber *int64 - - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. - ContentCRC64 []byte - - // ContentMD5 contains the information returned from the Content-MD5 header response. - ContentMD5 []byte - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// PageBlobClientCopyIncrementalResponse contains the response from method PageBlobClient.CopyIncremental. -type PageBlobClientCopyIncrementalResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // CopyID contains the information returned from the x-ms-copy-id header response. - CopyID *string - - // CopyStatus contains the information returned from the x-ms-copy-status header response. - CopyStatus *CopyStatusType - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// PageBlobClientCreateResponse contains the response from method PageBlobClient.Create. -type PageBlobClientCreateResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // ContentMD5 contains the information returned from the Content-MD5 header response. - ContentMD5 []byte - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. - EncryptionKeySHA256 *string - - // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. - EncryptionScope *string - - // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. - IsServerEncrypted *bool - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string - - // VersionID contains the information returned from the x-ms-version-id header response. - VersionID *string -} - -// PageBlobClientGetPageRangesDiffResponse contains the response from method PageBlobClient.NewGetPageRangesDiffPager. -type PageBlobClientGetPageRangesDiffResponse struct { - PageList - // BlobContentLength contains the information returned from the x-ms-blob-content-length header response. - BlobContentLength *int64 `xml:"BlobContentLength"` - - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` - - // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag `xml:"ETag"` - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time `xml:"LastModified"` - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` - - // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` -} - -// PageBlobClientGetPageRangesResponse contains the response from method PageBlobClient.NewGetPageRangesPager. -type PageBlobClientGetPageRangesResponse struct { - PageList - // BlobContentLength contains the information returned from the x-ms-blob-content-length header response. - BlobContentLength *int64 `xml:"BlobContentLength"` - - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` - - // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag `xml:"ETag"` - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time `xml:"LastModified"` - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` - - // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` -} - -// PageBlobClientResizeResponse contains the response from method PageBlobClient.Resize. -type PageBlobClientResizeResponse struct { - // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. - BlobSequenceNumber *int64 - - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// PageBlobClientUpdateSequenceNumberResponse contains the response from method PageBlobClient.UpdateSequenceNumber. -type PageBlobClientUpdateSequenceNumberResponse struct { - // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. - BlobSequenceNumber *int64 - - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// PageBlobClientUploadPagesFromURLResponse contains the response from method PageBlobClient.UploadPagesFromURL. -type PageBlobClientUploadPagesFromURLResponse struct { - // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. - BlobSequenceNumber *int64 - - // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. - ContentCRC64 []byte - - // ContentMD5 contains the information returned from the Content-MD5 header response. - ContentMD5 []byte - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. - EncryptionKeySHA256 *string - - // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. - EncryptionScope *string - - // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. - IsServerEncrypted *bool - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// PageBlobClientUploadPagesResponse contains the response from method PageBlobClient.UploadPages. -type PageBlobClientUploadPagesResponse struct { - // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. - BlobSequenceNumber *int64 - - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. - ContentCRC64 []byte - - // ContentMD5 contains the information returned from the Content-MD5 header response. - ContentMD5 []byte - - // Date contains the information returned from the Date header response. - Date *time.Time - - // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag - - // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. - EncryptionKeySHA256 *string - - // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. - EncryptionScope *string - - // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. - IsServerEncrypted *bool - - // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// ServiceClientFilterBlobsResponse contains the response from method ServiceClient.FilterBlobs. -type ServiceClientFilterBlobsResponse struct { - FilterBlobSegment - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` - - // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` - - // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` -} - -// ServiceClientGetAccountInfoResponse contains the response from method ServiceClient.GetAccountInfo. -type ServiceClientGetAccountInfoResponse struct { - // AccountKind contains the information returned from the x-ms-account-kind header response. - AccountKind *AccountKind - - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // Date contains the information returned from the Date header response. - Date *time.Time - - // IsHierarchicalNamespaceEnabled contains the information returned from the x-ms-is-hns-enabled header response. - IsHierarchicalNamespaceEnabled *bool - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // SKUName contains the information returned from the x-ms-sku-name header response. - SKUName *SKUName - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// ServiceClientGetPropertiesResponse contains the response from method ServiceClient.GetProperties. -type ServiceClientGetPropertiesResponse struct { - StorageServiceProperties - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` - - // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` -} - -// ServiceClientGetStatisticsResponse contains the response from method ServiceClient.GetStatistics. -type ServiceClientGetStatisticsResponse struct { - StorageServiceStats - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` - - // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` - - // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` -} - -// ServiceClientGetUserDelegationKeyResponse contains the response from method ServiceClient.GetUserDelegationKey. -type ServiceClientGetUserDelegationKeyResponse struct { - UserDelegationKey - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` - - // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` - - // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` -} - -// ServiceClientListContainersSegmentResponse contains the response from method ServiceClient.NewListContainersSegmentPager. -type ServiceClientListContainersSegmentResponse struct { - ListContainersSegmentResponse - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` - - // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` -} - -// ServiceClientSetPropertiesResponse contains the response from method ServiceClient.SetProperties. -type ServiceClientSetPropertiesResponse struct { - // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} - -// ServiceClientSubmitBatchResponse contains the response from method ServiceClient.SubmitBatch. -type ServiceClientSubmitBatchResponse struct { - // Body contains the streaming response. - Body io.ReadCloser - - // ContentType contains the information returned from the Content-Type header response. - ContentType *string - - // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string - - // Version contains the information returned from the x-ms-version header response. - Version *string -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_responses.go new file mode 100644 index 00000000000..ff9f3d3de1b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_responses.go @@ -0,0 +1,2022 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package generated + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "io" + "time" +) + +// AppendBlobClientAppendBlockFromURLResponse contains the response from method AppendBlobClient.AppendBlockFromURL. +type AppendBlobClientAppendBlockFromURLResponse struct { + // BlobAppendOffset contains the information returned from the x-ms-blob-append-offset header response. + BlobAppendOffset *string + + // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response. + BlobCommittedBlockCount *int32 + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// AppendBlobClientAppendBlockResponse contains the response from method AppendBlobClient.AppendBlock. +type AppendBlobClientAppendBlockResponse struct { + // BlobAppendOffset contains the information returned from the x-ms-blob-append-offset header response. + BlobAppendOffset *string + + // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response. + BlobCommittedBlockCount *int32 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// AppendBlobClientCreateResponse contains the response from method AppendBlobClient.Create. +type AppendBlobClientCreateResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// AppendBlobClientSealResponse contains the response from method AppendBlobClient.Seal. +type AppendBlobClientSealResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // IsSealed contains the information returned from the x-ms-blob-sealed header response. + IsSealed *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientAbortCopyFromURLResponse contains the response from method BlobClient.AbortCopyFromURL. +type BlobClientAbortCopyFromURLResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientAcquireLeaseResponse contains the response from method BlobClient.AcquireLease. +type BlobClientAcquireLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientBreakLeaseResponse contains the response from method BlobClient.BreakLease. +type BlobClientBreakLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseTime contains the information returned from the x-ms-lease-time header response. + LeaseTime *int32 + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientChangeLeaseResponse contains the response from method BlobClient.ChangeLease. +type BlobClientChangeLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientCopyFromURLResponse contains the response from method BlobClient.CopyFromURL. +type BlobClientCopyFromURLResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// BlobClientCreateSnapshotResponse contains the response from method BlobClient.CreateSnapshot. +type BlobClientCreateSnapshotResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Snapshot contains the information returned from the x-ms-snapshot header response. + Snapshot *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// BlobClientDeleteImmutabilityPolicyResponse contains the response from method BlobClient.DeleteImmutabilityPolicy. +type BlobClientDeleteImmutabilityPolicyResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientDeleteResponse contains the response from method BlobClient.Delete. +type BlobClientDeleteResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientDownloadResponse contains the response from method BlobClient.Download. +type BlobClientDownloadResponse struct { + // AcceptRanges contains the information returned from the Accept-Ranges header response. + AcceptRanges *string + + // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response. + BlobCommittedBlockCount *int32 + + // BlobContentMD5 contains the information returned from the x-ms-blob-content-md5 header response. + BlobContentMD5 []byte + + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // BlobType contains the information returned from the x-ms-blob-type header response. + BlobType *BlobType + + // Body contains the streaming response. + Body io.ReadCloser + + // CacheControl contains the information returned from the Cache-Control header response. + CacheControl *string + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentDisposition contains the information returned from the Content-Disposition header response. + ContentDisposition *string + + // ContentEncoding contains the information returned from the Content-Encoding header response. + ContentEncoding *string + + // ContentLanguage contains the information returned from the Content-Language header response. + ContentLanguage *string + + // ContentLength contains the information returned from the Content-Length header response. + ContentLength *int64 + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // ContentRange contains the information returned from the Content-Range header response. + ContentRange *string + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // CopyCompletionTime contains the information returned from the x-ms-copy-completion-time header response. + CopyCompletionTime *time.Time + + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyProgress contains the information returned from the x-ms-copy-progress header response. + CopyProgress *string + + // CopySource contains the information returned from the x-ms-copy-source header response. + CopySource *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *CopyStatusType + + // CopyStatusDescription contains the information returned from the x-ms-copy-status-description header response. + CopyStatusDescription *string + + // CreationTime contains the information returned from the x-ms-creation-time header response. + CreationTime *time.Time + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // ErrorCode contains the information returned from the x-ms-error-code header response. + ErrorCode *string + + // ImmutabilityPolicyExpiresOn contains the information returned from the x-ms-immutability-policy-until-date header response. + ImmutabilityPolicyExpiresOn *time.Time + + // ImmutabilityPolicyMode contains the information returned from the x-ms-immutability-policy-mode header response. + ImmutabilityPolicyMode *ImmutabilityPolicyMode + + // IsCurrentVersion contains the information returned from the x-ms-is-current-version header response. + IsCurrentVersion *bool + + // IsSealed contains the information returned from the x-ms-blob-sealed header response. + IsSealed *bool + + // IsServerEncrypted contains the information returned from the x-ms-server-encrypted header response. + IsServerEncrypted *bool + + // LastAccessed contains the information returned from the x-ms-last-access-time header response. + LastAccessed *time.Time + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseDuration contains the information returned from the x-ms-lease-duration header response. + LeaseDuration *LeaseDurationType + + // LeaseState contains the information returned from the x-ms-lease-state header response. + LeaseState *LeaseStateType + + // LeaseStatus contains the information returned from the x-ms-lease-status header response. + LeaseStatus *LeaseStatusType + + // LegalHold contains the information returned from the x-ms-legal-hold header response. + LegalHold *bool + + // Metadata contains the information returned from the x-ms-meta header response. + Metadata map[string]*string + + // ObjectReplicationPolicyID contains the information returned from the x-ms-or-policy-id header response. + ObjectReplicationPolicyID *string + + // ObjectReplicationRules contains the information returned from the x-ms-or header response. + ObjectReplicationRules map[string]*string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // TagCount contains the information returned from the x-ms-tag-count header response. + TagCount *int64 + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// BlobClientGetAccountInfoResponse contains the response from method BlobClient.GetAccountInfo. +type BlobClientGetAccountInfoResponse struct { + // AccountKind contains the information returned from the x-ms-account-kind header response. + AccountKind *AccountKind + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // IsHierarchicalNamespaceEnabled contains the information returned from the x-ms-is-hns-enabled header response. + IsHierarchicalNamespaceEnabled *bool + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // SKUName contains the information returned from the x-ms-sku-name header response. + SKUName *SKUName + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientGetPropertiesResponse contains the response from method BlobClient.GetProperties. +type BlobClientGetPropertiesResponse struct { + // AcceptRanges contains the information returned from the Accept-Ranges header response. + AcceptRanges *string + + // AccessTier contains the information returned from the x-ms-access-tier header response. + AccessTier *string + + // AccessTierChangeTime contains the information returned from the x-ms-access-tier-change-time header response. + AccessTierChangeTime *time.Time + + // AccessTierInferred contains the information returned from the x-ms-access-tier-inferred header response. + AccessTierInferred *bool + + // ArchiveStatus contains the information returned from the x-ms-archive-status header response. + ArchiveStatus *string + + // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response. + BlobCommittedBlockCount *int32 + + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // BlobType contains the information returned from the x-ms-blob-type header response. + BlobType *BlobType + + // CacheControl contains the information returned from the Cache-Control header response. + CacheControl *string + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentDisposition contains the information returned from the Content-Disposition header response. + ContentDisposition *string + + // ContentEncoding contains the information returned from the Content-Encoding header response. + ContentEncoding *string + + // ContentLanguage contains the information returned from the Content-Language header response. + ContentLanguage *string + + // ContentLength contains the information returned from the Content-Length header response. + ContentLength *int64 + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // CopyCompletionTime contains the information returned from the x-ms-copy-completion-time header response. + CopyCompletionTime *time.Time + + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyProgress contains the information returned from the x-ms-copy-progress header response. + CopyProgress *string + + // CopySource contains the information returned from the x-ms-copy-source header response. + CopySource *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *CopyStatusType + + // CopyStatusDescription contains the information returned from the x-ms-copy-status-description header response. + CopyStatusDescription *string + + // CreationTime contains the information returned from the x-ms-creation-time header response. + CreationTime *time.Time + + // Date contains the information returned from the Date header response. + Date *time.Time + + // DestinationSnapshot contains the information returned from the x-ms-copy-destination-snapshot header response. + DestinationSnapshot *string + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // ExpiresOn contains the information returned from the x-ms-expiry-time header response. + ExpiresOn *time.Time + + // ImmutabilityPolicyExpiresOn contains the information returned from the x-ms-immutability-policy-until-date header response. + ImmutabilityPolicyExpiresOn *time.Time + + // ImmutabilityPolicyMode contains the information returned from the x-ms-immutability-policy-mode header response. + ImmutabilityPolicyMode *ImmutabilityPolicyMode + + // IsCurrentVersion contains the information returned from the x-ms-is-current-version header response. + IsCurrentVersion *bool + + // IsIncrementalCopy contains the information returned from the x-ms-incremental-copy header response. + IsIncrementalCopy *bool + + // IsSealed contains the information returned from the x-ms-blob-sealed header response. + IsSealed *bool + + // IsServerEncrypted contains the information returned from the x-ms-server-encrypted header response. + IsServerEncrypted *bool + + // LastAccessed contains the information returned from the x-ms-last-access-time header response. + LastAccessed *time.Time + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseDuration contains the information returned from the x-ms-lease-duration header response. + LeaseDuration *LeaseDurationType + + // LeaseState contains the information returned from the x-ms-lease-state header response. + LeaseState *LeaseStateType + + // LeaseStatus contains the information returned from the x-ms-lease-status header response. + LeaseStatus *LeaseStatusType + + // LegalHold contains the information returned from the x-ms-legal-hold header response. + LegalHold *bool + + // Metadata contains the information returned from the x-ms-meta header response. + Metadata map[string]*string + + // ObjectReplicationPolicyID contains the information returned from the x-ms-or-policy-id header response. + ObjectReplicationPolicyID *string + + // ObjectReplicationRules contains the information returned from the x-ms-or header response. + ObjectReplicationRules map[string]*string + + // RehydratePriority contains the information returned from the x-ms-rehydrate-priority header response. + RehydratePriority *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // TagCount contains the information returned from the x-ms-tag-count header response. + TagCount *int64 + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// BlobClientGetTagsResponse contains the response from method BlobClient.GetTags. +type BlobClientGetTagsResponse struct { + // Blob tags + BlobTags + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientQueryResponse contains the response from method BlobClient.Query. +type BlobClientQueryResponse struct { + // AcceptRanges contains the information returned from the Accept-Ranges header response. + AcceptRanges *string + + // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response. + BlobCommittedBlockCount *int32 + + // BlobContentMD5 contains the information returned from the x-ms-blob-content-md5 header response. + BlobContentMD5 []byte + + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // BlobType contains the information returned from the x-ms-blob-type header response. + BlobType *BlobType + + // Body contains the streaming response. + Body io.ReadCloser + + // CacheControl contains the information returned from the Cache-Control header response. + CacheControl *string + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentDisposition contains the information returned from the Content-Disposition header response. + ContentDisposition *string + + // ContentEncoding contains the information returned from the Content-Encoding header response. + ContentEncoding *string + + // ContentLanguage contains the information returned from the Content-Language header response. + ContentLanguage *string + + // ContentLength contains the information returned from the Content-Length header response. + ContentLength *int64 + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // ContentRange contains the information returned from the Content-Range header response. + ContentRange *string + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // CopyCompletionTime contains the information returned from the x-ms-copy-completion-time header response. + CopyCompletionTime *time.Time + + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyProgress contains the information returned from the x-ms-copy-progress header response. + CopyProgress *string + + // CopySource contains the information returned from the x-ms-copy-source header response. + CopySource *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *CopyStatusType + + // CopyStatusDescription contains the information returned from the x-ms-copy-status-description header response. + CopyStatusDescription *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseDuration contains the information returned from the x-ms-lease-duration header response. + LeaseDuration *LeaseDurationType + + // LeaseState contains the information returned from the x-ms-lease-state header response. + LeaseState *LeaseStateType + + // LeaseStatus contains the information returned from the x-ms-lease-status header response. + LeaseStatus *LeaseStatusType + + // Metadata contains the information returned from the x-ms-meta header response. + Metadata map[string]*string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientReleaseLeaseResponse contains the response from method BlobClient.ReleaseLease. +type BlobClientReleaseLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientRenewLeaseResponse contains the response from method BlobClient.RenewLease. +type BlobClientRenewLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientSetExpiryResponse contains the response from method BlobClient.SetExpiry. +type BlobClientSetExpiryResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientSetHTTPHeadersResponse contains the response from method BlobClient.SetHTTPHeaders. +type BlobClientSetHTTPHeadersResponse struct { + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientSetImmutabilityPolicyResponse contains the response from method BlobClient.SetImmutabilityPolicy. +type BlobClientSetImmutabilityPolicyResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ImmutabilityPolicyExpiry contains the information returned from the x-ms-immutability-policy-until-date header response. + ImmutabilityPolicyExpiry *time.Time + + // ImmutabilityPolicyMode contains the information returned from the x-ms-immutability-policy-mode header response. + ImmutabilityPolicyMode *ImmutabilityPolicyMode + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientSetLegalHoldResponse contains the response from method BlobClient.SetLegalHold. +type BlobClientSetLegalHoldResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // LegalHold contains the information returned from the x-ms-legal-hold header response. + LegalHold *bool + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientSetMetadataResponse contains the response from method BlobClient.SetMetadata. +type BlobClientSetMetadataResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// BlobClientSetTagsResponse contains the response from method BlobClient.SetTags. +type BlobClientSetTagsResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientSetTierResponse contains the response from method BlobClient.SetTier. +type BlobClientSetTierResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlobClientStartCopyFromURLResponse contains the response from method BlobClient.StartCopyFromURL. +type BlobClientStartCopyFromURLResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *CopyStatusType + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// BlobClientUndeleteResponse contains the response from method BlobClient.Undelete. +type BlobClientUndeleteResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlockBlobClientCommitBlockListResponse contains the response from method BlockBlobClient.CommitBlockList. +type BlockBlobClientCommitBlockListResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// BlockBlobClientGetBlockListResponse contains the response from method BlockBlobClient.GetBlockList. +type BlockBlobClientGetBlockListResponse struct { + BlockList + + // BlobContentLength contains the information returned from the x-ms-blob-content-length header response. + BlobContentLength *int64 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlockBlobClientPutBlobFromURLResponse contains the response from method BlockBlobClient.PutBlobFromURL. +type BlockBlobClientPutBlobFromURLResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// BlockBlobClientStageBlockFromURLResponse contains the response from method BlockBlobClient.StageBlockFromURL. +type BlockBlobClientStageBlockFromURLResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlockBlobClientStageBlockResponse contains the response from method BlockBlobClient.StageBlock. +type BlockBlobClientStageBlockResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BlockBlobClientUploadResponse contains the response from method BlockBlobClient.Upload. +type BlockBlobClientUploadResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// ContainerClientAcquireLeaseResponse contains the response from method ContainerClient.AcquireLease. +type ContainerClientAcquireLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientBreakLeaseResponse contains the response from method ContainerClient.BreakLease. +type ContainerClientBreakLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseTime contains the information returned from the x-ms-lease-time header response. + LeaseTime *int32 + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientChangeLeaseResponse contains the response from method ContainerClient.ChangeLease. +type ContainerClientChangeLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientCreateResponse contains the response from method ContainerClient.Create. +type ContainerClientCreateResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientDeleteResponse contains the response from method ContainerClient.Delete. +type ContainerClientDeleteResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientFilterBlobsResponse contains the response from method ContainerClient.FilterBlobs. +type ContainerClientFilterBlobsResponse struct { + // The result of a Filter Blobs API call + FilterBlobSegment + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientGetAccessPolicyResponse contains the response from method ContainerClient.GetAccessPolicy. +type ContainerClientGetAccessPolicyResponse struct { + // BlobPublicAccess contains the information returned from the x-ms-blob-public-access header response. + BlobPublicAccess *PublicAccessType + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // a collection of signed identifiers + SignedIdentifiers []*SignedIdentifier `xml:"SignedIdentifier"` + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientGetAccountInfoResponse contains the response from method ContainerClient.GetAccountInfo. +type ContainerClientGetAccountInfoResponse struct { + // AccountKind contains the information returned from the x-ms-account-kind header response. + AccountKind *AccountKind + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // IsHierarchicalNamespaceEnabled contains the information returned from the x-ms-is-hns-enabled header response. + IsHierarchicalNamespaceEnabled *bool + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // SKUName contains the information returned from the x-ms-sku-name header response. + SKUName *SKUName + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientGetPropertiesResponse contains the response from method ContainerClient.GetProperties. +type ContainerClientGetPropertiesResponse struct { + // BlobPublicAccess contains the information returned from the x-ms-blob-public-access header response. + BlobPublicAccess *PublicAccessType + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // DefaultEncryptionScope contains the information returned from the x-ms-default-encryption-scope header response. + DefaultEncryptionScope *string + + // DenyEncryptionScopeOverride contains the information returned from the x-ms-deny-encryption-scope-override header response. + DenyEncryptionScopeOverride *bool + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // HasImmutabilityPolicy contains the information returned from the x-ms-has-immutability-policy header response. + HasImmutabilityPolicy *bool + + // HasLegalHold contains the information returned from the x-ms-has-legal-hold header response. + HasLegalHold *bool + + // IsImmutableStorageWithVersioningEnabled contains the information returned from the x-ms-immutable-storage-with-versioning-enabled + // header response. + IsImmutableStorageWithVersioningEnabled *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseDuration contains the information returned from the x-ms-lease-duration header response. + LeaseDuration *LeaseDurationType + + // LeaseState contains the information returned from the x-ms-lease-state header response. + LeaseState *LeaseStateType + + // LeaseStatus contains the information returned from the x-ms-lease-status header response. + LeaseStatus *LeaseStatusType + + // Metadata contains the information returned from the x-ms-meta header response. + Metadata map[string]*string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientListBlobFlatSegmentResponse contains the response from method ContainerClient.NewListBlobFlatSegmentPager. +type ContainerClientListBlobFlatSegmentResponse struct { + // An enumeration of blobs + ListBlobsFlatSegmentResponse + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientListBlobHierarchySegmentResponse contains the response from method ContainerClient.NewListBlobHierarchySegmentPager. +type ContainerClientListBlobHierarchySegmentResponse struct { + // An enumeration of blobs + ListBlobsHierarchySegmentResponse + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientReleaseLeaseResponse contains the response from method ContainerClient.ReleaseLease. +type ContainerClientReleaseLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientRenameResponse contains the response from method ContainerClient.Rename. +type ContainerClientRenameResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientRenewLeaseResponse contains the response from method ContainerClient.RenewLease. +type ContainerClientRenewLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientRestoreResponse contains the response from method ContainerClient.Restore. +type ContainerClientRestoreResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientSetAccessPolicyResponse contains the response from method ContainerClient.SetAccessPolicy. +type ContainerClientSetAccessPolicyResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientSetMetadataResponse contains the response from method ContainerClient.SetMetadata. +type ContainerClientSetMetadataResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ContainerClientSubmitBatchResponse contains the response from method ContainerClient.SubmitBatch. +type ContainerClientSubmitBatchResponse struct { + // Body contains the streaming response. + Body io.ReadCloser + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// PageBlobClientClearPagesResponse contains the response from method PageBlobClient.ClearPages. +type PageBlobClientClearPagesResponse struct { + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// PageBlobClientCopyIncrementalResponse contains the response from method PageBlobClient.CopyIncremental. +type PageBlobClientCopyIncrementalResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *CopyStatusType + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// PageBlobClientCreateResponse contains the response from method PageBlobClient.Create. +type PageBlobClientCreateResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// PageBlobClientGetPageRangesDiffResponse contains the response from method PageBlobClient.NewGetPageRangesDiffPager. +type PageBlobClientGetPageRangesDiffResponse struct { + // the list of pages + PageList + + // BlobContentLength contains the information returned from the x-ms-blob-content-length header response. + BlobContentLength *int64 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// PageBlobClientGetPageRangesResponse contains the response from method PageBlobClient.NewGetPageRangesPager. +type PageBlobClientGetPageRangesResponse struct { + // the list of pages + PageList + + // BlobContentLength contains the information returned from the x-ms-blob-content-length header response. + BlobContentLength *int64 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// PageBlobClientResizeResponse contains the response from method PageBlobClient.Resize. +type PageBlobClientResizeResponse struct { + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// PageBlobClientUpdateSequenceNumberResponse contains the response from method PageBlobClient.UpdateSequenceNumber. +type PageBlobClientUpdateSequenceNumberResponse struct { + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// PageBlobClientUploadPagesFromURLResponse contains the response from method PageBlobClient.UploadPagesFromURL. +type PageBlobClientUploadPagesFromURLResponse struct { + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// PageBlobClientUploadPagesResponse contains the response from method PageBlobClient.UploadPages. +type PageBlobClientUploadPagesResponse struct { + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ServiceClientFilterBlobsResponse contains the response from method ServiceClient.FilterBlobs. +type ServiceClientFilterBlobsResponse struct { + // The result of a Filter Blobs API call + FilterBlobSegment + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ServiceClientGetAccountInfoResponse contains the response from method ServiceClient.GetAccountInfo. +type ServiceClientGetAccountInfoResponse struct { + // AccountKind contains the information returned from the x-ms-account-kind header response. + AccountKind *AccountKind + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // IsHierarchicalNamespaceEnabled contains the information returned from the x-ms-is-hns-enabled header response. + IsHierarchicalNamespaceEnabled *bool + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // SKUName contains the information returned from the x-ms-sku-name header response. + SKUName *SKUName + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ServiceClientGetPropertiesResponse contains the response from method ServiceClient.GetProperties. +type ServiceClientGetPropertiesResponse struct { + // Storage Service Properties. + StorageServiceProperties + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ServiceClientGetStatisticsResponse contains the response from method ServiceClient.GetStatistics. +type ServiceClientGetStatisticsResponse struct { + // Stats for the storage service. + StorageServiceStats + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ServiceClientGetUserDelegationKeyResponse contains the response from method ServiceClient.GetUserDelegationKey. +type ServiceClientGetUserDelegationKeyResponse struct { + // A user delegation key + UserDelegationKey + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ServiceClientListContainersSegmentResponse contains the response from method ServiceClient.NewListContainersSegmentPager. +type ServiceClientListContainersSegmentResponse struct { + // An enumeration of containers + ListContainersSegmentResponse + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ServiceClientSetPropertiesResponse contains the response from method ServiceClient.SetProperties. +type ServiceClientSetPropertiesResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ServiceClientSubmitBatchResponse contains the response from method ServiceClient.SubmitBatch. +type ServiceClientSubmitBatchResponse struct { + // Body contains the streaming response. + Body io.ReadCloser + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go index faeefdc5322..dcf51a9d563 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go @@ -1,11 +1,7 @@ -//go:build go1.18 -// +build go1.18 - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -34,22 +30,25 @@ type ServiceClient struct { // be scoped within the expression to a single container. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - where - Filters the results to return only to return only blobs whose tags match the specified expression. // - options - ServiceClientFilterBlobsOptions contains the optional parameters for the ServiceClient.FilterBlobs method. func (client *ServiceClient) FilterBlobs(ctx context.Context, where string, options *ServiceClientFilterBlobsOptions) (ServiceClientFilterBlobsResponse, error) { + var err error req, err := client.filterBlobsCreateRequest(ctx, where, options) if err != nil { return ServiceClientFilterBlobsResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ServiceClientFilterBlobsResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ServiceClientFilterBlobsResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ServiceClientFilterBlobsResponse{}, err } - return client.filterBlobsHandleResponse(resp) + resp, err := client.filterBlobsHandleResponse(httpResp) + return resp, err } // filterBlobsCreateRequest creates the FilterBlobs request. @@ -60,25 +59,25 @@ func (client *ServiceClient) filterBlobsCreateRequest(ctx context.Context, where } reqQP := req.Raw().URL.Query() reqQP.Set("comp", "blobs") - if options != nil && options.Timeout != nil { - reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) } - reqQP.Set("where", where) if options != nil && options.Marker != nil { reqQP.Set("marker", *options.Marker) } if options != nil && options.Maxresults != nil { reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) } - if options != nil && options.Include != nil { - reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } + reqQP.Set("where", where) req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1) - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -88,12 +87,6 @@ func (client *ServiceClient) filterBlobsHandleResponse(resp *http.Response) (Ser if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -101,6 +94,12 @@ func (client *ServiceClient) filterBlobsHandleResponse(resp *http.Response) (Ser } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } if err := runtime.UnmarshalAsXML(resp, &result.FilterBlobSegment); err != nil { return ServiceClientFilterBlobsResponse{}, err } @@ -110,21 +109,24 @@ func (client *ServiceClient) filterBlobsHandleResponse(resp *http.Response) (Ser // GetAccountInfo - Returns the sku name and account kind // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - options - ServiceClientGetAccountInfoOptions contains the optional parameters for the ServiceClient.GetAccountInfo method. func (client *ServiceClient) GetAccountInfo(ctx context.Context, options *ServiceClientGetAccountInfoOptions) (ServiceClientGetAccountInfoResponse, error) { + var err error req, err := client.getAccountInfoCreateRequest(ctx, options) if err != nil { return ServiceClientGetAccountInfoResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ServiceClientGetAccountInfoResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ServiceClientGetAccountInfoResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ServiceClientGetAccountInfoResponse{}, err } - return client.getAccountInfoHandleResponse(resp) + resp, err := client.getAccountInfoHandleResponse(httpResp) + return resp, err } // getAccountInfoCreateRequest creates the GetAccountInfo request. @@ -134,26 +136,29 @@ func (client *ServiceClient) getAccountInfoCreateRequest(ctx context.Context, op return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "account") reqQP.Set("comp", "properties") + reqQP.Set("restype", "account") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} req.Raw().Header["Accept"] = []string{"application/xml"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } // getAccountInfoHandleResponse handles the GetAccountInfo response. func (client *ServiceClient) getAccountInfoHandleResponse(resp *http.Response) (ServiceClientGetAccountInfoResponse, error) { result := ServiceClientGetAccountInfoResponse{} + if val := resp.Header.Get("x-ms-account-kind"); val != "" { + result.AccountKind = (*AccountKind)(&val) + } if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -161,12 +166,6 @@ func (client *ServiceClient) getAccountInfoHandleResponse(resp *http.Response) ( } result.Date = &date } - if val := resp.Header.Get("x-ms-sku-name"); val != "" { - result.SKUName = (*SKUName)(&val) - } - if val := resp.Header.Get("x-ms-account-kind"); val != "" { - result.AccountKind = (*AccountKind)(&val) - } if val := resp.Header.Get("x-ms-is-hns-enabled"); val != "" { isHierarchicalNamespaceEnabled, err := strconv.ParseBool(val) if err != nil { @@ -174,6 +173,15 @@ func (client *ServiceClient) getAccountInfoHandleResponse(resp *http.Response) ( } result.IsHierarchicalNamespaceEnabled = &isHierarchicalNamespaceEnabled } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-sku-name"); val != "" { + result.SKUName = (*SKUName)(&val) + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } @@ -181,21 +189,24 @@ func (client *ServiceClient) getAccountInfoHandleResponse(resp *http.Response) ( // CORS (Cross-Origin Resource Sharing) rules. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - options - ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method. func (client *ServiceClient) GetProperties(ctx context.Context, options *ServiceClientGetPropertiesOptions) (ServiceClientGetPropertiesResponse, error) { + var err error req, err := client.getPropertiesCreateRequest(ctx, options) if err != nil { return ServiceClientGetPropertiesResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ServiceClientGetPropertiesResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ServiceClientGetPropertiesResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ServiceClientGetPropertiesResponse{}, err } - return client.getPropertiesHandleResponse(resp) + resp, err := client.getPropertiesHandleResponse(httpResp) + return resp, err } // getPropertiesCreateRequest creates the GetProperties request. @@ -205,17 +216,17 @@ func (client *ServiceClient) getPropertiesCreateRequest(ctx context.Context, opt return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "service") reqQP.Set("comp", "properties") + reqQP.Set("restype", "service") if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -241,21 +252,24 @@ func (client *ServiceClient) getPropertiesHandleResponse(resp *http.Response) (S // location endpoint when read-access geo-redundant replication is enabled for the storage account. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - options - ServiceClientGetStatisticsOptions contains the optional parameters for the ServiceClient.GetStatistics method. func (client *ServiceClient) GetStatistics(ctx context.Context, options *ServiceClientGetStatisticsOptions) (ServiceClientGetStatisticsResponse, error) { + var err error req, err := client.getStatisticsCreateRequest(ctx, options) if err != nil { return ServiceClientGetStatisticsResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ServiceClientGetStatisticsResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ServiceClientGetStatisticsResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ServiceClientGetStatisticsResponse{}, err } - return client.getStatisticsHandleResponse(resp) + resp, err := client.getStatisticsHandleResponse(httpResp) + return resp, err } // getStatisticsCreateRequest creates the GetStatistics request. @@ -265,17 +279,17 @@ func (client *ServiceClient) getStatisticsCreateRequest(ctx context.Context, opt return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "service") reqQP.Set("comp", "stats") + reqQP.Set("restype", "service") if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -285,12 +299,6 @@ func (client *ServiceClient) getStatisticsHandleResponse(resp *http.Response) (S if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -298,6 +306,12 @@ func (client *ServiceClient) getStatisticsHandleResponse(resp *http.Response) (S } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } if err := runtime.UnmarshalAsXML(resp, &result.StorageServiceStats); err != nil { return ServiceClientGetStatisticsResponse{}, err } @@ -308,23 +322,26 @@ func (client *ServiceClient) getStatisticsHandleResponse(resp *http.Response) (S // bearer token authentication. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - keyInfo - Key information // - options - ServiceClientGetUserDelegationKeyOptions contains the optional parameters for the ServiceClient.GetUserDelegationKey // method. func (client *ServiceClient) GetUserDelegationKey(ctx context.Context, keyInfo KeyInfo, options *ServiceClientGetUserDelegationKeyOptions) (ServiceClientGetUserDelegationKeyResponse, error) { + var err error req, err := client.getUserDelegationKeyCreateRequest(ctx, keyInfo, options) if err != nil { return ServiceClientGetUserDelegationKeyResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ServiceClientGetUserDelegationKeyResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ServiceClientGetUserDelegationKeyResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ServiceClientGetUserDelegationKeyResponse{}, err } - return client.getUserDelegationKeyHandleResponse(resp) + resp, err := client.getUserDelegationKeyHandleResponse(httpResp) + return resp, err } // getUserDelegationKeyCreateRequest creates the GetUserDelegationKey request. @@ -334,17 +351,17 @@ func (client *ServiceClient) getUserDelegationKeyCreateRequest(ctx context.Conte return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "service") reqQP.Set("comp", "userdelegationkey") + reqQP.Set("restype", "service") if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if err := runtime.MarshalAsXML(req, keyInfo); err != nil { return nil, err } @@ -357,12 +374,6 @@ func (client *ServiceClient) getUserDelegationKeyHandleResponse(resp *http.Respo if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -370,6 +381,12 @@ func (client *ServiceClient) getUserDelegationKeyHandleResponse(resp *http.Respo } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } if err := runtime.UnmarshalAsXML(resp, &result.UserDelegationKey); err != nil { return ServiceClientGetUserDelegationKeyResponse{}, err } @@ -379,7 +396,7 @@ func (client *ServiceClient) getUserDelegationKeyHandleResponse(resp *http.Respo // NewListContainersSegmentPager - The List Containers Segment operation returns a list of the containers under the specified // account // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - options - ServiceClientListContainersSegmentOptions contains the optional parameters for the ServiceClient.NewListContainersSegmentPager // method. // @@ -391,8 +408,8 @@ func (client *ServiceClient) ListContainersSegmentCreateRequest(ctx context.Cont } reqQP := req.Raw().URL.Query() reqQP.Set("comp", "list") - if options != nil && options.Prefix != nil { - reqQP.Set("prefix", *options.Prefix) + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) } if options != nil && options.Marker != nil { reqQP.Set("marker", *options.Marker) @@ -400,18 +417,18 @@ func (client *ServiceClient) ListContainersSegmentCreateRequest(ctx context.Cont if options != nil && options.Maxresults != nil { reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) } - if options != nil && options.Include != nil { - reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + if options != nil && options.Prefix != nil { + reqQP.Set("prefix", *options.Prefix) } if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} return req, nil } @@ -437,22 +454,25 @@ func (client *ServiceClient) ListContainersSegmentHandleResponse(resp *http.Resp // and CORS (Cross-Origin Resource Sharing) rules // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - storageServiceProperties - The StorageService properties. // - options - ServiceClientSetPropertiesOptions contains the optional parameters for the ServiceClient.SetProperties method. func (client *ServiceClient) SetProperties(ctx context.Context, storageServiceProperties StorageServiceProperties, options *ServiceClientSetPropertiesOptions) (ServiceClientSetPropertiesResponse, error) { + var err error req, err := client.setPropertiesCreateRequest(ctx, storageServiceProperties, options) if err != nil { return ServiceClientSetPropertiesResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ServiceClientSetPropertiesResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return ServiceClientSetPropertiesResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return ServiceClientSetPropertiesResponse{}, err } - return client.setPropertiesHandleResponse(resp) + resp, err := client.setPropertiesHandleResponse(httpResp) + return resp, err } // setPropertiesCreateRequest creates the SetProperties request. @@ -462,17 +482,17 @@ func (client *ServiceClient) setPropertiesCreateRequest(ctx context.Context, sto return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("restype", "service") reqQP.Set("comp", "properties") + reqQP.Set("restype", "service") if options != nil && options.Timeout != nil { reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if err := runtime.MarshalAsXML(req, storageServiceProperties); err != nil { return nil, err } @@ -497,25 +517,28 @@ func (client *ServiceClient) setPropertiesHandleResponse(resp *http.Response) (S // SubmitBatch - The Batch operation allows multiple API calls to be embedded into a single HTTP request. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-08-03 +// Generated from API version 2024-08-04 // - contentLength - The length of the request. // - multipartContentType - Required. The value of this header must be multipart/mixed with a batch boundary. Example header // value: multipart/mixed; boundary=batch_ // - body - Initial data // - options - ServiceClientSubmitBatchOptions contains the optional parameters for the ServiceClient.SubmitBatch method. func (client *ServiceClient) SubmitBatch(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *ServiceClientSubmitBatchOptions) (ServiceClientSubmitBatchResponse, error) { + var err error req, err := client.submitBatchCreateRequest(ctx, contentLength, multipartContentType, body, options) if err != nil { return ServiceClientSubmitBatchResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ServiceClientSubmitBatchResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return ServiceClientSubmitBatchResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return ServiceClientSubmitBatchResponse{}, err } - return client.submitBatchHandleResponse(resp) + resp, err := client.submitBatchHandleResponse(httpResp) + return resp, err } // submitBatchCreateRequest creates the SubmitBatch request. @@ -531,13 +554,13 @@ func (client *ServiceClient) submitBatchCreateRequest(ctx context.Context, conte } req.Raw().URL.RawQuery = reqQP.Encode() runtime.SkipBodyDownload(req) + req.Raw().Header["Accept"] = []string{"application/xml"} req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} req.Raw().Header["Content-Type"] = []string{multipartContentType} - req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } - req.Raw().Header["Accept"] = []string{"application/xml"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if err := req.SetBody(body, multipartContentType); err != nil { return nil, err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go index 4b4d51aa399..ee3732ebcc1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go @@ -1,11 +1,7 @@ -//go:build go1.18 -// +build go1.18 - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -15,29 +11,36 @@ import ( ) const ( - rfc1123JSON = `"` + time.RFC1123 + `"` + dateTimeRFC1123JSON = `"` + time.RFC1123 + `"` ) -type timeRFC1123 time.Time +type dateTimeRFC1123 time.Time -func (t timeRFC1123) MarshalJSON() ([]byte, error) { - b := []byte(time.Time(t).Format(rfc1123JSON)) +func (t dateTimeRFC1123) MarshalJSON() ([]byte, error) { + b := []byte(time.Time(t).Format(dateTimeRFC1123JSON)) return b, nil } -func (t timeRFC1123) MarshalText() ([]byte, error) { +func (t dateTimeRFC1123) MarshalText() ([]byte, error) { b := []byte(time.Time(t).Format(time.RFC1123)) return b, nil } -func (t *timeRFC1123) UnmarshalJSON(data []byte) error { - p, err := time.Parse(rfc1123JSON, strings.ToUpper(string(data))) - *t = timeRFC1123(p) +func (t *dateTimeRFC1123) UnmarshalJSON(data []byte) error { + p, err := time.Parse(dateTimeRFC1123JSON, strings.ToUpper(string(data))) + *t = dateTimeRFC1123(p) return err } -func (t *timeRFC1123) UnmarshalText(data []byte) error { +func (t *dateTimeRFC1123) UnmarshalText(data []byte) error { + if len(data) == 0 { + return nil + } p, err := time.Parse(time.RFC1123, string(data)) - *t = timeRFC1123(p) + *t = dateTimeRFC1123(p) return err } + +func (t dateTimeRFC1123) String() string { + return time.Time(t).Format(time.RFC1123) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go index 1ce9d621164..e9eac9bcb11 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go @@ -1,11 +1,7 @@ -//go:build go1.18 -// +build go1.18 - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -15,45 +11,72 @@ import ( "time" ) +// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. +var tzOffsetRegex = regexp.MustCompile(`(?:Z|z|\+|-)(?:\d+:\d+)*"*$`) + const ( - utcLayoutJSON = `"2006-01-02T15:04:05.999999999"` - utcLayout = "2006-01-02T15:04:05.999999999" - rfc3339JSON = `"` + time.RFC3339Nano + `"` + utcDateTime = "2006-01-02T15:04:05.999999999" + utcDateTimeJSON = `"` + utcDateTime + `"` + utcDateTimeNoT = "2006-01-02 15:04:05.999999999" + utcDateTimeJSONNoT = `"` + utcDateTimeNoT + `"` + dateTimeNoT = `2006-01-02 15:04:05.999999999Z07:00` + dateTimeJSON = `"` + time.RFC3339Nano + `"` + dateTimeJSONNoT = `"` + dateTimeNoT + `"` ) -// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. -var tzOffsetRegex = regexp.MustCompile(`(Z|z|\+|-)(\d+:\d+)*"*$`) - -type timeRFC3339 time.Time +type dateTimeRFC3339 time.Time -func (t timeRFC3339) MarshalJSON() (json []byte, err error) { +func (t dateTimeRFC3339) MarshalJSON() ([]byte, error) { tt := time.Time(t) return tt.MarshalJSON() } -func (t timeRFC3339) MarshalText() (text []byte, err error) { +func (t dateTimeRFC3339) MarshalText() ([]byte, error) { tt := time.Time(t) return tt.MarshalText() } -func (t *timeRFC3339) UnmarshalJSON(data []byte) error { - layout := utcLayoutJSON - if tzOffsetRegex.Match(data) { - layout = rfc3339JSON +func (t *dateTimeRFC3339) UnmarshalJSON(data []byte) error { + tzOffset := tzOffsetRegex.Match(data) + hasT := strings.Contains(string(data), "T") || strings.Contains(string(data), "t") + var layout string + if tzOffset && hasT { + layout = dateTimeJSON + } else if tzOffset { + layout = dateTimeJSONNoT + } else if hasT { + layout = utcDateTimeJSON + } else { + layout = utcDateTimeJSONNoT } return t.Parse(layout, string(data)) } -func (t *timeRFC3339) UnmarshalText(data []byte) (err error) { - layout := utcLayout - if tzOffsetRegex.Match(data) { +func (t *dateTimeRFC3339) UnmarshalText(data []byte) error { + if len(data) == 0 { + return nil + } + tzOffset := tzOffsetRegex.Match(data) + hasT := strings.Contains(string(data), "T") || strings.Contains(string(data), "t") + var layout string + if tzOffset && hasT { layout = time.RFC3339Nano + } else if tzOffset { + layout = dateTimeNoT + } else if hasT { + layout = utcDateTime + } else { + layout = utcDateTimeNoT } return t.Parse(layout, string(data)) } -func (t *timeRFC3339) Parse(layout, value string) error { +func (t *dateTimeRFC3339) Parse(layout, value string) error { p, err := time.Parse(layout, strings.ToUpper(value)) - *t = timeRFC3339(p) + *t = dateTimeRFC3339(p) return err } + +func (t dateTimeRFC3339) String() string { + return time.Time(t).Format(time.RFC3339Nano) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go index 144ea18e1ab..355d0176b3d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go @@ -1,16 +1,15 @@ -//go:build go1.18 -// +build go1.18 - // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated import ( "encoding/xml" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "io" "strings" ) @@ -19,22 +18,32 @@ type additionalProperties map[string]*string // UnmarshalXML implements the xml.Unmarshaler interface for additionalProperties. func (ap *additionalProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { tokName := "" - for t, err := d.Token(); err == nil; t, err = d.Token() { + tokValue := "" + for { + t, err := d.Token() + if errors.Is(err, io.EOF) { + break + } else if err != nil { + return err + } switch tt := t.(type) { case xml.StartElement: tokName = strings.ToLower(tt.Name.Local) - break + tokValue = "" case xml.CharData: + if tokName == "" { + continue + } + tokValue = string(tt) + case xml.EndElement: if tokName == "" { continue } if *ap == nil { *ap = additionalProperties{} } - s := string(tt) - (*ap)[tokName] = &s + (*ap)[tokName] = to.Ptr(tokValue) tokName = "" - break } } return nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go index c1b3a3d2729..5c44af34ae9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go @@ -19,7 +19,7 @@ const ( type BatchTransferOptions struct { TransferSize int64 ChunkSize int64 - NumChunks uint16 + NumChunks uint64 Concurrency uint16 Operation func(ctx context.Context, offset int64, chunkSize int64) error OperationName string @@ -44,7 +44,6 @@ func DoBatchTransfer(ctx context.Context, o *BatchTransferOptions) error { // Create the goroutines that process each operation (in parallel). for g := uint16(0); g < o.Concurrency; g++ { - //grIndex := g go func() { for f := range operationChannel { err := f() @@ -54,7 +53,7 @@ func DoBatchTransfer(ctx context.Context, o *BatchTransferOptions) error { } // Add each chunk's operation to the channel. - for chunkNum := uint16(0); chunkNum < o.NumChunks; chunkNum++ { + for chunkNum := uint64(0); chunkNum < o.NumChunks; chunkNum++ { curChunkSize := o.ChunkSize if chunkNum == o.NumChunks-1 { // Last chunk @@ -69,7 +68,7 @@ func DoBatchTransfer(ctx context.Context, o *BatchTransferOptions) error { // Wait for the operations to complete. var firstErr error = nil - for chunkNum := uint16(0); chunkNum < o.NumChunks; chunkNum++ { + for chunkNum := uint64(0); chunkNum < o.NumChunks; chunkNum++ { responseError := <-operationResponseChannel // record the first error (the original error which should cause the other chunks to fail with canceled context) if responseError != nil && firstErr == nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/challenge_policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/challenge_policy.go index e7c8e9213d8..fff61016c85 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/challenge_policy.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/challenge_policy.go @@ -8,11 +8,12 @@ package shared import ( "errors" + "net/http" + "strings" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" - "net/http" - "strings" ) type storageAuthorizer struct { @@ -20,13 +21,14 @@ type storageAuthorizer struct { tenantID string } -func NewStorageChallengePolicy(cred azcore.TokenCredential) policy.Policy { - s := storageAuthorizer{scopes: []string{TokenScope}} - return runtime.NewBearerTokenPolicy(cred, []string{TokenScope}, &policy.BearerTokenOptions{ +func NewStorageChallengePolicy(cred azcore.TokenCredential, audience string, allowHTTP bool) policy.Policy { + s := storageAuthorizer{scopes: []string{audience}} + return runtime.NewBearerTokenPolicy(cred, []string{audience}, &policy.BearerTokenOptions{ AuthorizationHandler: policy.AuthorizationHandler{ OnRequest: s.onRequest, OnChallenge: s.onChallenge, }, + InsecureAllowCredentialWithHTTP: allowHTTP, }) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go index 1de60999ec5..c7922076f3c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go @@ -44,15 +44,6 @@ const ( const crc64Polynomial uint64 = 0x9A6C9329AC4BC9B5 -const ( - AppendBlobClient = "azblob/appendblob.Client" - BlobClient = "azblob/blob.Client" - BlockBlobClient = "azblob/blockblob.Client" - ContainerClient = "azblob/container.Client" - PageBlobClient = "azblob/pageblob.Client" - ServiceClient = "azblob/service.Client" -) - var CRC64Table = crc64.MakeTable(crc64Polynomial) // CopyOptions returns a zero-value T if opts is nil. @@ -144,9 +135,6 @@ func ParseConnectionString(connectionString string) (ParsedConnectionString, err // SerializeBlobTags converts tags to generated.BlobTags func SerializeBlobTags(tagsMap map[string]string) *generated.BlobTags { - if len(tagsMap) == 0 { - return nil - } blobTagSet := make([]*generated.BlobTag, 0) for key, val := range tagsMap { newKey, newVal := key, val @@ -257,3 +245,27 @@ func IsIPEndpointStyle(host string) bool { } return net.ParseIP(host) != nil } + +// ReadAtLeast reads from r into buf until it has read at least min bytes. +// It returns the number of bytes copied and an error. +// The EOF error is returned if no bytes were read or +// EOF happened after reading fewer than min bytes. +// If min is greater than the length of buf, ReadAtLeast returns ErrShortBuffer. +// On return, n >= min if and only if err == nil. +// If r returns an error having read at least min bytes, the error is dropped. +// This method is same as io.ReadAtLeast except that it does not +// return io.ErrUnexpectedEOF when fewer than min bytes are read. +func ReadAtLeast(r io.Reader, buf []byte, min int) (n int, err error) { + if len(buf) < min { + return 0, io.ErrShortBuffer + } + for n < min && err == nil { + var nn int + nn, err = r.Read(buf[n:]) + n += nn + } + if n >= min { + err = nil + } + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go index 7e534cee185..63ceac9792e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go @@ -22,6 +22,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" ) // ClientOptions contains the optional parameters when creating a Client. @@ -35,11 +36,12 @@ type Client base.CompositeClient[generated.BlobClient, generated.PageBlobClient] // - cred - an Azure AD credential, typically obtained via the azidentity module // - options - client options; pass nil to accept the default values func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { - authPolicy := shared.NewStorageChallengePolicy(cred) + audience := base.GetAudience((*base.ClientOptions)(options)) conOptions := shared.GetClientOptions(options) + authPolicy := shared.NewStorageChallengePolicy(cred, audience, conOptions.InsecureAllowCredentialWithHTTP) plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - azClient, err := azcore.NewClient(shared.PageBlobClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) if err != nil { return nil, err } @@ -53,7 +55,7 @@ func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptio func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) { conOptions := shared.GetClientOptions(options) - azClient, err := azcore.NewClient(shared.PageBlobClient, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) if err != nil { return nil, err } @@ -69,7 +71,7 @@ func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCreden conOptions := shared.GetClientOptions(options) plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - azClient, err := azcore.NewClient(shared.PageBlobClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) if err != nil { return nil, err } @@ -378,8 +380,8 @@ func (pb *Client) GetAccountInfo(ctx context.Context, o *blob.GetAccountInfoOpti // SetHTTPHeaders changes a blob's HTTP headers. // For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. -func (pb *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) { - return pb.BlobClient().SetHTTPHeaders(ctx, HTTPHeaders, o) +func (pb *Client) SetHTTPHeaders(ctx context.Context, httpHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) { + return pb.BlobClient().SetHTTPHeaders(ctx, httpHeaders, o) } // SetMetadata changes a blob's metadata. @@ -426,6 +428,12 @@ func (pb *Client) CopyFromURL(ctx context.Context, copySource string, o *blob.Co return pb.BlobClient().CopyFromURL(ctx, copySource, o) } +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at Page blob. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +func (pb *Client) GetSASURL(permissions sas.BlobPermissions, expiry time.Time, o *blob.GetSASURLOptions) (string, error) { + return pb.BlobClient().GetSASURL(permissions, expiry, o) +} + // Concurrent Download Functions ----------------------------------------------------------------------------------------- // DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/models.go index e6148f173c3..39aef20ff5d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/models.go @@ -198,7 +198,7 @@ type GetPageRangesOptions struct { func (o *GetPageRangesOptions) format() (*generated.PageBlobClientGetPageRangesOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { if o == nil { - return nil, nil, nil + return &generated.PageBlobClientGetPageRangesOptions{}, nil, nil } leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go index 4c23208e2e1..20f9875a965 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go @@ -8,6 +8,7 @@ package sas import ( "errors" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" "net" "net/url" "strings" @@ -23,7 +24,7 @@ const ( var ( // Version is the default version encoded in the SAS token. - Version = "2021-12-02" + Version = generated.ServiceVersion ) // TimeFormats ISO 8601 format. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go index 45f730847d2..813fa77a9e6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go @@ -255,7 +255,7 @@ func (v BlobSignatureValues) SignWithUserDelegation(userDelegationCredential *Us signature: signature, } - //User delegation SAS specific parameters + // User delegation SAS specific parameters p.signedOID = *udk.SignedOID p.signedTID = *udk.SignedTID p.signedStart = *udk.SignedStart @@ -272,7 +272,7 @@ func getCanonicalName(account string, containerName string, blobName string, dir // Blob: "/blob/account/containername/blobname" elements := []string{"/blob/", account, "/", containerName} if blobName != "" { - elements = append(elements, "/", strings.Replace(blobName, "\\", "/", -1)) + elements = append(elements, "/", strings.ReplaceAll(blobName, "\\", "/")) } else if directoryName != "" { elements = append(elements, "/", directoryName) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/url_parts.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/url_parts.go index 57fe053f07a..758739cb826 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/url_parts.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/url_parts.go @@ -117,7 +117,7 @@ func (up URLParts) String() string { rawQuery := up.UnparsedParams - //If no snapshot is initially provided, fill it in from the SAS query properties to help the user + // If no snapshot is initially provided, fill it in from the SAS query properties to help the user if up.Snapshot == "" && !up.SAS.SnapshotTime().IsZero() { up.Snapshot = up.SAS.SnapshotTime().Format(exported.SnapshotTimeFormat) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go index 461775347ee..cf39c3d579e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go @@ -11,9 +11,6 @@ import ( "context" "errors" "fmt" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" "net/http" "strings" "time" @@ -21,8 +18,11 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" @@ -40,15 +40,16 @@ type Client base.Client[generated.ServiceClient] // - cred - an Azure AD credential, typically obtained via the azidentity module // - options - client options; pass nil to accept the default values func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { - authPolicy := shared.NewStorageChallengePolicy(cred) + audience := base.GetAudience((*base.ClientOptions)(options)) conOptions := shared.GetClientOptions(options) + authPolicy := shared.NewStorageChallengePolicy(cred, audience, conOptions.InsecureAllowCredentialWithHTTP) plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - azClient, err := azcore.NewClient(shared.ServiceClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) if err != nil { return nil, err } - return (*Client)(base.NewServiceClient(serviceURL, azClient, &cred)), nil + return (*Client)(base.NewServiceClient(serviceURL, azClient, &cred, (*base.ClientOptions)(conOptions))), nil } // NewClientWithNoCredential creates an instance of Client with the specified values. @@ -58,11 +59,11 @@ func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOp func NewClientWithNoCredential(serviceURL string, options *ClientOptions) (*Client, error) { conOptions := shared.GetClientOptions(options) - azClient, err := azcore.NewClient(shared.ServiceClient, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) if err != nil { return nil, err } - return (*Client)(base.NewServiceClient(serviceURL, azClient, nil)), nil + return (*Client)(base.NewServiceClient(serviceURL, azClient, nil, (*base.ClientOptions)(conOptions))), nil } // NewClientWithSharedKeyCredential creates an instance of Client with the specified values. @@ -74,12 +75,12 @@ func NewClientWithSharedKeyCredential(serviceURL string, cred *SharedKeyCredenti conOptions := shared.GetClientOptions(options) plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - azClient, err := azcore.NewClient(shared.ServiceClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) if err != nil { return nil, err } - return (*Client)(base.NewServiceClient(serviceURL, azClient, cred)), nil + return (*Client)(base.NewServiceClient(serviceURL, azClient, cred, (*base.ClientOptions)(conOptions))), nil } // NewClientFromConnectionString creates an instance of Client with the specified values. @@ -136,6 +137,10 @@ func getGeneratedBlobClient(b *blob.Client) *generated.BlobClient { return base.InnerClient((*base.Client[generated.BlobClient])(b)) } +func (s *Client) getClientOptions() *base.ClientOptions { + return base.GetClientOptions((*base.Client[generated.ServiceClient])(s)) +} + // URL returns the URL endpoint used by the Client object. func (s *Client) URL() string { return s.generated().Endpoint() @@ -145,7 +150,7 @@ func (s *Client) URL() string { // this Client's URL. The new container.Client uses the same request policy pipeline as the Client. func (s *Client) NewContainerClient(containerName string) *container.Client { containerURL := runtime.JoinPaths(s.generated().Endpoint(), containerName) - return (*container.Client)(base.NewContainerClient(containerURL, s.generated().InternalClient().WithClientName(shared.ContainerClient), s.credential())) + return (*container.Client)(base.NewContainerClient(containerURL, s.generated().InternalClient().WithClientName(exported.ModuleName), s.credential(), s.getClientOptions())) } // CreateContainer is a lifecycle method to creates a new container under the specified account. @@ -275,7 +280,6 @@ func (s *Client) GetSASURL(resources sas.AccountResourceTypes, permissions sas.A st := o.format() qps, err := sas.AccountSignatureValues{ Version: sas.Version, - Protocol: sas.ProtocolHTTPS, Permissions: permissions.String(), ResourceTypes: resources.String(), StartTime: st, @@ -315,7 +319,8 @@ func (s *Client) NewBatchBuilder() (*BatchBuilder, error) { switch cred := s.credential().(type) { case *azcore.TokenCredential: - authPolicy = shared.NewStorageChallengePolicy(*cred) + conOptions := s.getClientOptions() + authPolicy = shared.NewStorageChallengePolicy(*cred, base.GetAudience(conOptions), conOptions.InsecureAllowCredentialWithHTTP) case *SharedKeyCredential: authPolicy = exported.NewSharedKeyCredPolicy(cred) case nil: diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go index f86286051de..57d0e2777e1 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go @@ -18,6 +18,8 @@ import ( "encoding/pem" "errors" "fmt" + "os" + "strings" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base" @@ -315,16 +317,21 @@ func New(authority, clientID string, cred Credential, options ...Option) (Client if err != nil { return Client{}, err } - + autoEnabledRegion := os.Getenv("MSAL_FORCE_REGION") opts := clientOptions{ authority: authority, // if the caller specified a token provider, it will handle all details of authentication, using Client only as a token cache disableInstanceDiscovery: cred.tokenProvider != nil, httpClient: shared.DefaultClient, + azureRegion: autoEnabledRegion, } for _, o := range options { o(&opts) } + if strings.EqualFold(opts.azureRegion, "DisableMsalForceRegion") { + opts.azureRegion = "" + } + baseOpts := []base.Option{ base.WithCacheAccessor(opts.accessor), base.WithClientCapabilities(opts.capabilities), diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go index 09a0d92f520..e473d1267da 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go @@ -89,8 +89,23 @@ type AuthResult struct { ExpiresOn time.Time GrantedScopes []string DeclinedScopes []string + Metadata AuthResultMetadata } +// AuthResultMetadata which contains meta data for the AuthResult +type AuthResultMetadata struct { + TokenSource TokenSource +} + +type TokenSource int + +// These are all the types of token flows. +const ( + SourceUnknown TokenSource = 0 + IdentityProvider TokenSource = 1 + Cache TokenSource = 2 +) + // AuthResultFromStorage creates an AuthResult from a storage token response (which is generated from the cache). func AuthResultFromStorage(storageTokenResponse storage.TokenResponse) (AuthResult, error) { if err := storageTokenResponse.AccessToken.Validate(); err != nil { @@ -109,7 +124,17 @@ func AuthResultFromStorage(storageTokenResponse storage.TokenResponse) (AuthResu return AuthResult{}, fmt.Errorf("problem decoding JWT token: %w", err) } } - return AuthResult{account, idToken, accessToken, storageTokenResponse.AccessToken.ExpiresOn.T, grantedScopes, nil}, nil + return AuthResult{ + Account: account, + IDToken: idToken, + AccessToken: accessToken, + ExpiresOn: storageTokenResponse.AccessToken.ExpiresOn.T, + GrantedScopes: grantedScopes, + DeclinedScopes: nil, + Metadata: AuthResultMetadata{ + TokenSource: Cache, + }, + }, nil } // NewAuthResult creates an AuthResult. @@ -123,6 +148,9 @@ func NewAuthResult(tokenResponse accesstokens.TokenResponse, account shared.Acco AccessToken: tokenResponse.AccessToken, ExpiresOn: tokenResponse.ExpiresOn.T, GrantedScopes: tokenResponse.GrantedScopes.Slice, + Metadata: AuthResultMetadata{ + TokenSource: IdentityProvider, + }, }, nil } diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go index 2238521f5f9..2134e57c9e4 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go @@ -18,10 +18,6 @@ import ( ) const addField = "AdditionalFields" -const ( - marshalJSON = "MarshalJSON" - unmarshalJSON = "UnmarshalJSON" -) var ( leftBrace = []byte("{")[0] @@ -106,48 +102,38 @@ func delimIs(got json.Token, want rune) bool { // hasMarshalJSON will determine if the value or a pointer to this value has // the MarshalJSON method. func hasMarshalJSON(v reflect.Value) bool { - if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid { - _, ok := v.Interface().(json.Marshaler) - return ok - } - - if v.Kind() == reflect.Ptr { - v = v.Elem() - } else { - if !v.CanAddr() { - return false + ok := false + if _, ok = v.Interface().(json.Marshaler); !ok { + var i any + if v.Kind() == reflect.Ptr { + i = v.Elem().Interface() + } else if v.CanAddr() { + i = v.Addr().Interface() } - v = v.Addr() - } - - if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid { - _, ok := v.Interface().(json.Marshaler) - return ok + _, ok = i.(json.Marshaler) } - return false + return ok } // callMarshalJSON will call MarshalJSON() method on the value or a pointer to this value. // This will panic if the method is not defined. func callMarshalJSON(v reflect.Value) ([]byte, error) { - if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid { - marsh := v.Interface().(json.Marshaler) + if marsh, ok := v.Interface().(json.Marshaler); ok { return marsh.MarshalJSON() } if v.Kind() == reflect.Ptr { - v = v.Elem() + if marsh, ok := v.Elem().Interface().(json.Marshaler); ok { + return marsh.MarshalJSON() + } } else { if v.CanAddr() { - v = v.Addr() + if marsh, ok := v.Addr().Interface().(json.Marshaler); ok { + return marsh.MarshalJSON() + } } } - if method := v.MethodByName(unmarshalJSON); method.Kind() != reflect.Invalid { - marsh := v.Interface().(json.Marshaler) - return marsh.MarshalJSON() - } - panic(fmt.Sprintf("callMarshalJSON called on type %T that does not have MarshalJSON defined", v.Interface())) } @@ -162,12 +148,8 @@ func hasUnmarshalJSON(v reflect.Value) bool { v = v.Addr() } - if method := v.MethodByName(unmarshalJSON); method.Kind() != reflect.Invalid { - _, ok := v.Interface().(json.Unmarshaler) - return ok - } - - return false + _, ok := v.Interface().(json.Unmarshaler) + return ok } // hasOmitEmpty indicates if the field has instructed us to not output diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go index 04236ff3127..fda5d7dd333 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go @@ -7,6 +7,7 @@ package local import ( "context" "fmt" + "html" "net" "net/http" "strconv" @@ -141,7 +142,7 @@ func (s *Server) handler(w http.ResponseWriter, r *http.Request) { headerErr := q.Get("error") if headerErr != "" { - desc := q.Get("error_description") + desc := html.EscapeString(q.Get("error_description")) // Note: It is a little weird we handle some errors by not going to the failPage. If they all should, // change this to s.error() and make s.error() write the failPage instead of an error code. _, _ = w.Write([]byte(fmt.Sprintf(failPage, headerErr, desc))) diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go index ef8d908a444..e0653134448 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go @@ -10,6 +10,8 @@ import ( "io" "time" + "github.com/google/uuid" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported" internalTime "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time" @@ -18,7 +20,6 @@ import ( "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs" - "github.com/google/uuid" ) // ResolveEndpointer contains the methods for resolving authority endpoints. @@ -331,7 +332,7 @@ func (t *Client) DeviceCode(ctx context.Context, authParams authority.AuthParams func (t *Client) resolveEndpoint(ctx context.Context, authParams *authority.AuthParams, userPrincipalName string) error { endpoints, err := t.Resolver.ResolveEndpoints(ctx, authParams.AuthorityInfo, userPrincipalName) if err != nil { - return fmt.Errorf("unable to resolve an endpoint: %s", err) + return fmt.Errorf("unable to resolve an endpoint: %w", err) } authParams.Endpoints = endpoints return nil diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go index 9d60734f88e..c3c4a96fc30 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go @@ -23,7 +23,7 @@ import ( const ( authorizationEndpoint = "https://%v/%v/oauth2/v2.0/authorize" - instanceDiscoveryEndpoint = "https://%v/common/discovery/instance" + aadInstanceDiscoveryEndpoint = "https://%v/common/discovery/instance" tenantDiscoveryEndpointWithRegion = "https://%s.%s/%s/v2.0/.well-known/openid-configuration" regionName = "REGION_NAME" defaultAPIVersion = "2021-10-01" @@ -47,13 +47,12 @@ type jsonCaller interface { } var aadTrustedHostList = map[string]bool{ - "login.windows.net": true, // Microsoft Azure Worldwide - Used in validation scenarios where host is not this list - "login.chinacloudapi.cn": true, // Microsoft Azure China - "login.microsoftonline.de": true, // Microsoft Azure Blackforest - "login-us.microsoftonline.com": true, // Microsoft Azure US Government - Legacy - "login.microsoftonline.us": true, // Microsoft Azure US Government - "login.microsoftonline.com": true, // Microsoft Azure Worldwide - "login.cloudgovapi.us": true, // Microsoft Azure US Government + "login.windows.net": true, // Microsoft Azure Worldwide - Used in validation scenarios where host is not this list + "login.partner.microsoftonline.cn": true, // Microsoft Azure China + "login.microsoftonline.de": true, // Microsoft Azure Blackforest + "login-us.microsoftonline.com": true, // Microsoft Azure US Government - Legacy + "login.microsoftonline.us": true, // Microsoft Azure US Government + "login.microsoftonline.com": true, // Microsoft Azure Worldwide } // TrustedHost checks if an AAD host is trusted/valid. @@ -137,8 +136,12 @@ const ( const ( AAD = "MSSTS" ADFS = "ADFS" + DSTS = "DSTS" ) +// DSTSTenant is referenced throughout multiple files, let us use a const in case we ever need to change it. +const DSTSTenant = "7a433bfc-2514-4697-b467-e0933190487f" + // AuthenticationScheme is an extensibility mechanism designed to be used only by Azure Arc for proof of possession access tokens. type AuthenticationScheme interface { // Extra parameters that are added to the request to the /token endpoint. @@ -236,23 +239,26 @@ func NewAuthParams(clientID string, authorityInfo Info) AuthParams { // - the client is configured to authenticate only Microsoft accounts via the "consumers" endpoint // - the resulting authority URL is invalid func (p AuthParams) WithTenant(ID string) (AuthParams, error) { - switch ID { - case "", p.AuthorityInfo.Tenant: - // keep the default tenant because the caller didn't override it + if ID == "" || ID == p.AuthorityInfo.Tenant { return p, nil - case "common", "consumers", "organizations": - if p.AuthorityInfo.AuthorityType == AAD { + } + + var authority string + switch p.AuthorityInfo.AuthorityType { + case AAD: + if ID == "common" || ID == "consumers" || ID == "organizations" { return p, fmt.Errorf(`tenant ID must be a specific tenant, not "%s"`, ID) } - // else we'll return a better error below - } - if p.AuthorityInfo.AuthorityType != AAD { - return p, errors.New("the authority doesn't support tenants") - } - if p.AuthorityInfo.Tenant == "consumers" { - return p, errors.New(`client is configured to authenticate only personal Microsoft accounts, via the "consumers" endpoint`) + if p.AuthorityInfo.Tenant == "consumers" { + return p, errors.New(`client is configured to authenticate only personal Microsoft accounts, via the "consumers" endpoint`) + } + authority = "https://" + path.Join(p.AuthorityInfo.Host, ID) + case ADFS: + return p, errors.New("ADFS authority doesn't support tenants") + case DSTS: + return p, errors.New("dSTS authority doesn't support tenants") } - authority := "https://" + path.Join(p.AuthorityInfo.Host, ID) + info, err := NewInfoFromAuthorityURI(authority, p.AuthorityInfo.ValidateAuthority, p.AuthorityInfo.InstanceDiscoveryDisabled) if err == nil { info.Region = p.AuthorityInfo.Region @@ -344,44 +350,57 @@ type Info struct { Host string CanonicalAuthorityURI string AuthorityType string - UserRealmURIPrefix string ValidateAuthority bool Tenant string Region string InstanceDiscoveryDisabled bool } -func firstPathSegment(u *url.URL) (string, error) { - pathParts := strings.Split(u.EscapedPath(), "/") - if len(pathParts) >= 2 { - return pathParts[1], nil - } - - return "", errors.New(`authority must be an https URL such as "https://login.microsoftonline.com/"`) -} - // NewInfoFromAuthorityURI creates an AuthorityInfo instance from the authority URL provided. func NewInfoFromAuthorityURI(authority string, validateAuthority bool, instanceDiscoveryDisabled bool) (Info, error) { - u, err := url.Parse(strings.ToLower(authority)) - if err != nil || u.Scheme != "https" { - return Info{}, errors.New(`authority must be an https URL such as "https://login.microsoftonline.com/"`) + + cannonicalAuthority := authority + + // suffix authority with / if it doesn't have one + if !strings.HasSuffix(cannonicalAuthority, "/") { + cannonicalAuthority += "/" } - tenant, err := firstPathSegment(u) + u, err := url.Parse(strings.ToLower(cannonicalAuthority)) + if err != nil { - return Info{}, err + return Info{}, fmt.Errorf("couldn't parse authority url: %w", err) + } + if u.Scheme != "https" { + return Info{}, errors.New("authority url scheme must be https") + } + + pathParts := strings.Split(u.EscapedPath(), "/") + if len(pathParts) < 3 { + return Info{}, errors.New(`authority must be an URL such as "https://login.microsoftonline.com/"`) } + authorityType := AAD - if tenant == "adfs" { + tenant := pathParts[1] + switch tenant { + case "adfs": authorityType = ADFS + case "dstsv2": + if len(pathParts) != 4 { + return Info{}, fmt.Errorf("dSTS authority must be an https URL such as https:///dstsv2/%s", DSTSTenant) + } + if pathParts[2] != DSTSTenant { + return Info{}, fmt.Errorf("dSTS authority only accepts a single tenant %q", DSTSTenant) + } + authorityType = DSTS + tenant = DSTSTenant } // u.Host includes the port, if any, which is required for private cloud deployments return Info{ Host: u.Host, - CanonicalAuthorityURI: fmt.Sprintf("https://%v/%v/", u.Host, tenant), + CanonicalAuthorityURI: cannonicalAuthority, AuthorityType: authorityType, - UserRealmURIPrefix: fmt.Sprintf("https://%v/common/userrealm/", u.Hostname()), ValidateAuthority: validateAuthority, Tenant: tenant, InstanceDiscoveryDisabled: instanceDiscoveryDisabled, @@ -525,7 +544,7 @@ func (c Client) AADInstanceDiscovery(ctx context.Context, authorityInfo Info) (I discoveryHost = authorityInfo.Host } - endpoint := fmt.Sprintf(instanceDiscoveryEndpoint, discoveryHost) + endpoint := fmt.Sprintf(aadInstanceDiscoveryEndpoint, discoveryHost) err = c.Comm.JSONCall(ctx, endpoint, http.Header{}, qv, nil, &resp) } return resp, err @@ -543,17 +562,19 @@ func detectRegion(ctx context.Context) string { client := http.Client{ Timeout: time.Duration(2 * time.Second), } - req, _ := http.NewRequest("GET", imdsEndpoint, nil) + req, _ := http.NewRequestWithContext(ctx, http.MethodGet, imdsEndpoint, nil) req.Header.Set("Metadata", "true") resp, err := client.Do(req) + if err == nil { + defer resp.Body.Close() + } // If the request times out or there is an error, it is retried once - if err != nil || resp.StatusCode != 200 { + if err != nil || resp.StatusCode != http.StatusOK { resp, err = client.Do(req) - if err != nil || resp.StatusCode != 200 { + if err != nil || resp.StatusCode != http.StatusOK { return "" } } - defer resp.Body.Close() response, err := io.ReadAll(resp.Body) if err != nil { return "" diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go index 7d9ec7cd374..d62aac74eb9 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go @@ -18,10 +18,11 @@ import ( "strings" "time" + "github.com/google/uuid" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" customJSON "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version" - "github.com/google/uuid" ) // HTTPClient represents an HTTP client. @@ -70,15 +71,13 @@ func (c *Client) JSONCall(ctx context.Context, endpoint string, headers http.Hea unmarshal = customJSON.Unmarshal } - u, err := url.Parse(endpoint) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("%s?%s", endpoint, qv.Encode()), nil) if err != nil { - return fmt.Errorf("could not parse path URL(%s): %w", endpoint, err) + return fmt.Errorf("could not create request: %w", err) } - u.RawQuery = qv.Encode() addStdHeaders(headers) - - req := &http.Request{Method: http.MethodGet, URL: u, Header: headers} + req.Header = headers if body != nil { // Note: In case your wondering why we are not gzip encoding.... diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go index 0ade411797a..4030ec8d8f1 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go @@ -18,9 +18,6 @@ import ( "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" ) -// ADFS is an active directory federation service authority type. -const ADFS = "ADFS" - type cacheEntry struct { Endpoints authority.Endpoints ValidForDomainsInList map[string]bool @@ -51,7 +48,7 @@ func (m *authorityEndpoint) ResolveEndpoints(ctx context.Context, authorityInfo return endpoints, nil } - endpoint, err := m.openIDConfigurationEndpoint(ctx, authorityInfo, userPrincipalName) + endpoint, err := m.openIDConfigurationEndpoint(ctx, authorityInfo) if err != nil { return authority.Endpoints{}, err } @@ -83,7 +80,7 @@ func (m *authorityEndpoint) cachedEndpoints(authorityInfo authority.Info, userPr defer m.mu.Unlock() if cacheEntry, ok := m.cache[authorityInfo.CanonicalAuthorityURI]; ok { - if authorityInfo.AuthorityType == ADFS { + if authorityInfo.AuthorityType == authority.ADFS { domain, err := adfsDomainFromUpn(userPrincipalName) if err == nil { if _, ok := cacheEntry.ValidForDomainsInList[domain]; ok { @@ -102,7 +99,7 @@ func (m *authorityEndpoint) addCachedEndpoints(authorityInfo authority.Info, use updatedCacheEntry := createcacheEntry(endpoints) - if authorityInfo.AuthorityType == ADFS { + if authorityInfo.AuthorityType == authority.ADFS { // Since we're here, we've made a call to the backend. We want to ensure we're caching // the latest values from the server. if cacheEntry, ok := m.cache[authorityInfo.CanonicalAuthorityURI]; ok { @@ -119,9 +116,12 @@ func (m *authorityEndpoint) addCachedEndpoints(authorityInfo authority.Info, use m.cache[authorityInfo.CanonicalAuthorityURI] = updatedCacheEntry } -func (m *authorityEndpoint) openIDConfigurationEndpoint(ctx context.Context, authorityInfo authority.Info, userPrincipalName string) (string, error) { - if authorityInfo.Tenant == "adfs" { +func (m *authorityEndpoint) openIDConfigurationEndpoint(ctx context.Context, authorityInfo authority.Info) (string, error) { + if authorityInfo.AuthorityType == authority.ADFS { return fmt.Sprintf("https://%s/adfs/.well-known/openid-configuration", authorityInfo.Host), nil + } else if authorityInfo.AuthorityType == authority.DSTS { + return fmt.Sprintf("https://%s/dstsv2/%s/v2.0/.well-known/openid-configuration", authorityInfo.Host, authority.DSTSTenant), nil + } else if authorityInfo.ValidateAuthority && !authority.TrustedHost(authorityInfo.Host) { resp, err := m.rest.Authority().AADInstanceDiscovery(ctx, authorityInfo) if err != nil { @@ -134,7 +134,6 @@ func (m *authorityEndpoint) openIDConfigurationEndpoint(ctx context.Context, aut return "", err } return resp.TenantDiscoveryEndpoint, nil - } return authorityInfo.CanonicalAuthorityURI + "v2.0/.well-known/openid-configuration", nil diff --git a/vendor/github.com/IBM/sarama/.golangci.yml b/vendor/github.com/IBM/sarama/.golangci.yml index 72e3e4c2448..2e029401d88 100644 --- a/vendor/github.com/IBM/sarama/.golangci.yml +++ b/vendor/github.com/IBM/sarama/.golangci.yml @@ -1,4 +1,5 @@ run: + go: "1.20" timeout: 5m deadline: 10m @@ -57,7 +58,7 @@ linters: enable: - bodyclose - depguard - - exportloopref + # - copyloopvar - dogsled - errcheck - errorlint @@ -79,6 +80,7 @@ linters: issues: exclude: + - "G115: integer overflow conversion" - "G404: Use of weak random number generator" exclude-rules: # exclude some linters from running on certains files. diff --git a/vendor/github.com/IBM/sarama/.pre-commit-config.yaml b/vendor/github.com/IBM/sarama/.pre-commit-config.yaml index 1869b8160ed..1e64cc0d88a 100644 --- a/vendor/github.com/IBM/sarama/.pre-commit-config.yaml +++ b/vendor/github.com/IBM/sarama/.pre-commit-config.yaml @@ -2,7 +2,7 @@ fail_fast: false default_install_hook_types: [pre-commit, commit-msg] repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.4.0 + rev: v5.0.0 hooks: - id: check-merge-conflict - id: check-yaml @@ -32,10 +32,10 @@ repos: files: \.go$ args: [] - repo: https://github.com/gitleaks/gitleaks - rev: v8.16.3 + rev: v8.21.2 hooks: - id: gitleaks - repo: https://github.com/golangci/golangci-lint - rev: v1.52.2 + rev: v1.61.0 hooks: - id: golangci-lint diff --git a/vendor/github.com/IBM/sarama/Dockerfile.kafka b/vendor/github.com/IBM/sarama/Dockerfile.kafka index ac2d47a1644..d2234e3918f 100644 --- a/vendor/github.com/IBM/sarama/Dockerfile.kafka +++ b/vendor/github.com/IBM/sarama/Dockerfile.kafka @@ -1,42 +1,64 @@ -FROM registry.access.redhat.com/ubi8/ubi-minimal:8.9@sha256:f30dbf77b075215f6c827c269c073b5e0973e5cea8dacdf7ecb6a19c868f37f2 +FROM registry.access.redhat.com/ubi9/ubi-minimal:9.5@sha256:daa61d6103e98bccf40d7a69a0d4f8786ec390e2204fd94f7cc49053e9949360 USER root RUN microdnf update -y \ - && microdnf install -y curl gzip java-11-openjdk-headless tar tzdata-java \ + && microdnf install -y git gzip java-17-openjdk-headless tar tzdata-java \ && microdnf reinstall -y tzdata \ && microdnf clean all -ENV JAVA_HOME=/usr/lib/jvm/jre-11 +ENV JAVA_HOME=/usr/lib/jvm/jre-17 # https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html # Ensure Java doesn't cache any dns results -RUN cd /etc/java/java-11-openjdk/*/conf/security \ +RUN cd /etc/java/java-17-openjdk/*/conf/security \ && sed -e '/networkaddress.cache.ttl/d' -e '/networkaddress.cache.negative.ttl/d' -i java.security \ && echo 'networkaddress.cache.ttl=0' >> java.security \ && echo 'networkaddress.cache.negative.ttl=0' >> java.security ARG SCALA_VERSION="2.13" -ARG KAFKA_VERSION="3.6.0" +ARG KAFKA_VERSION="3.6.2" -# https://github.com/apache/kafka/blob/9989b68d0d38c8f1357f78bf9d53a58c1476188d/tests/docker/Dockerfile#L46-L72 +WORKDIR /tmp + +# https://github.com/apache/kafka/blob/2e2b0a58eda3e677763af974a44a6aaa3c280214/tests/docker/Dockerfile#L77-L105 ARG KAFKA_MIRROR="https://s3-us-west-2.amazonaws.com/kafka-packages" SHELL ["/bin/bash", "-o", "pipefail", "-c"] -RUN mkdir -p "/opt/kafka-${KAFKA_VERSION}" \ +RUN --mount=type=bind,target=.,rw=true \ + mkdir -p "/opt/kafka-${KAFKA_VERSION}" \ && chmod a+rw "/opt/kafka-${KAFKA_VERSION}" \ - && curl -s "$KAFKA_MIRROR/kafka_${SCALA_VERSION}-${KAFKA_VERSION}.tgz" | tar xz --strip-components=1 -C "/opt/kafka-${KAFKA_VERSION}" + && if [ "$KAFKA_VERSION" = "4.0.0" ]; then \ + microdnf install -y java-17-openjdk-devel \ + && git clone --depth=50 --single-branch -b 4.0 https://github.com/apache/kafka /usr/src/kafka \ + && cd /usr/src/kafka \ + && : PIN TO COMMIT BEFORE KAFKA-17616 ZOOKEEPER REMOVAL STARTED \ + && git reset --hard d1504649fb \ + && export JAVA_TOOL_OPTIONS=-XX:MaxRAMPercentage=80 \ + && sed -e '/version=/s/-SNAPSHOT//' -e '/org.gradle.jvmargs/d' -e '/org.gradle.parallel/s/true/false/' -i gradle.properties && ./gradlew -PmaxParallelForks=1 -PmaxScalacThreads=1 --no-daemon releaseTarGz -x siteDocsTar -x javadoc \ + && tar xzf core/build/distributions/kafka_${SCALA_VERSION}-${KAFKA_VERSION}.tgz --strip-components=1 -C "/opt/kafka-${KAFKA_VERSION}" \ + && cp /tmp/server.properties "/opt/kafka-${KAFKA_VERSION}/config/" \ + && microdnf remove -y java-17-openjdk-devel \ + && rm -rf /usr/src/kafka ; \ + else \ + curl -s "$KAFKA_MIRROR/kafka_${SCALA_VERSION}-${KAFKA_VERSION}.tgz" | tar xz --strip-components=1 -C "/opt/kafka-${KAFKA_VERSION}" ; \ + fi # older kafka versions depend upon jaxb-api being bundled with the JDK, but it # was removed from Java 11 so work around that by including it in the kafka # libs dir regardless -WORKDIR /tmp RUN curl -sLO "https://repo1.maven.org/maven2/javax/xml/bind/jaxb-api/2.3.0/jaxb-api-2.3.0.jar" \ && for DIR in /opt/kafka-*; do cp -v jaxb-api-2.3.0.jar $DIR/libs/ ; done \ && rm -f jaxb-api-2.3.0.jar +# older kafka versions with the zookeeper 3.4.13 client aren't compatible with Java 17 so quietly bump them to 3.5.9 +RUN [ -f "/opt/kafka-${KAFKA_VERSION}/libs/zookeeper-3.4.13.jar" ] || exit 0 ; \ + rm -f "/opt/kafka-${KAFKA_VERSION}/libs/zookeeper-3.4.13.jar" \ + && curl --fail -sSL -o "/opt/kafka-${KAFKA_VERSION}/libs/zookeeper-3.5.9.jar" "https://repo1.maven.org/maven2/org/apache/zookeeper/zookeeper/3.5.9/zookeeper-3.5.9.jar" \ + && curl --fail -sSL -o "/opt/kafka-${KAFKA_VERSION}/libs/zookeeper-jute-3.5.9.jar" "https://repo1.maven.org/maven2/org/apache/zookeeper/zookeeper-jute/3.5.9/zookeeper-jute-3.5.9.jar" + WORKDIR /opt/kafka-${KAFKA_VERSION} -ENV JAVA_MAJOR_VERSION=11 +ENV JAVA_MAJOR_VERSION=17 RUN sed -e "s/JAVA_MAJOR_VERSION=.*/JAVA_MAJOR_VERSION=${JAVA_MAJOR_VERSION}/" -i"" ./bin/kafka-run-class.sh diff --git a/vendor/github.com/IBM/sarama/Makefile b/vendor/github.com/IBM/sarama/Makefile index 7cefc2a2c36..ba6f46e41a5 100644 --- a/vendor/github.com/IBM/sarama/Makefile +++ b/vendor/github.com/IBM/sarama/Makefile @@ -9,7 +9,7 @@ FILES := $(shell find . -name '*.go' -type f -not -name '*.pb.go' -not -name TESTS := $(shell find . -name '*.go' -type f -not -name '*.pb.go' -not -name '*_generated.go' -name '*_test.go') $(GOBIN)/tparse: - GOBIN=$(GOBIN) go install github.com/mfridman/tparse@v0.11.1 + GOBIN=$(GOBIN) go install github.com/mfridman/tparse@v0.16.0 get: $(GO) get ./... $(GO) mod verify diff --git a/vendor/github.com/IBM/sarama/admin.go b/vendor/github.com/IBM/sarama/admin.go index dcf1d7659cc..8aa1f374e4d 100644 --- a/vendor/github.com/IBM/sarama/admin.go +++ b/vendor/github.com/IBM/sarama/admin.go @@ -3,6 +3,7 @@ package sarama import ( "errors" "fmt" + "io" "math/rand" "strconv" "sync" @@ -99,6 +100,9 @@ type ClusterAdmin interface { // This operation is supported by brokers with version 0.11.0.0 or higher. DeleteACL(filter AclFilter, validateOnly bool) ([]MatchingAcl, error) + // ElectLeaders allows to trigger the election of preferred leaders for a set of partitions. + ElectLeaders(ElectionType, map[string][]int32) (map[string]map[int32]*PartitionResult, error) + // List the consumer groups available in the cluster. ListConsumerGroups() (map[string]string, error) @@ -141,6 +145,10 @@ type ClusterAdmin interface { // locally cached value if it's available. Controller() (*Broker, error) + // Coordinator returns the coordinating broker for a consumer group. It will + // return a locally cached value if it's available. + Coordinator(group string) (*Broker, error) + // Remove members from the consumer group by given member identities. // This operation is supported by brokers with version 2.3 or higher // This is for static membership feature. KIP-345 @@ -192,14 +200,25 @@ func (ca *clusterAdmin) Controller() (*Broker, error) { return ca.client.Controller() } +func (ca *clusterAdmin) Coordinator(group string) (*Broker, error) { + return ca.client.Coordinator(group) +} + func (ca *clusterAdmin) refreshController() (*Broker, error) { return ca.client.RefreshController() } -// isErrNotController returns `true` if the given error type unwraps to an -// `ErrNotController` response from Kafka -func isErrNotController(err error) bool { - return errors.Is(err, ErrNotController) +// isRetriableControllerError returns `true` if the given error type unwraps to +// an `ErrNotController` or `EOF` response from Kafka +func isRetriableControllerError(err error) bool { + return errors.Is(err, ErrNotController) || errors.Is(err, io.EOF) +} + +// isRetriableGroupCoordinatorError returns `true` if the given error type +// unwraps to an `ErrNotCoordinatorForConsumer`, +// `ErrConsumerCoordinatorNotAvailable` or `EOF` response from Kafka +func isRetriableGroupCoordinatorError(err error) bool { + return errors.Is(err, ErrNotCoordinatorForConsumer) || errors.Is(err, ErrConsumerCoordinatorNotAvailable) || errors.Is(err, io.EOF) } // retryOnError will repeatedly call the given (error-returning) func in the @@ -249,7 +268,7 @@ func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateO request.Version = 1 } - return ca.retryOnError(isErrNotController, func() error { + return ca.retryOnError(isRetriableControllerError, func() error { b, err := ca.Controller() if err != nil { return err @@ -266,7 +285,7 @@ func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateO } if !errors.Is(topicErr.Err, ErrNoError) { - if errors.Is(topicErr.Err, ErrNotController) { + if isRetriableControllerError(topicErr.Err) { _, _ = ca.refreshController() } return topicErr @@ -278,14 +297,14 @@ func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateO func (ca *clusterAdmin) DescribeTopics(topics []string) (metadata []*TopicMetadata, err error) { var response *MetadataResponse - err = ca.retryOnError(isErrNotController, func() error { + err = ca.retryOnError(isRetriableControllerError, func() error { controller, err := ca.Controller() if err != nil { return err } request := NewMetadataRequest(ca.conf.Version, topics) response, err = controller.GetMetadata(request) - if isErrNotController(err) { + if isRetriableControllerError(err) { _, _ = ca.refreshController() } return err @@ -298,7 +317,7 @@ func (ca *clusterAdmin) DescribeTopics(topics []string) (metadata []*TopicMetada func (ca *clusterAdmin) DescribeCluster() (brokers []*Broker, controllerID int32, err error) { var response *MetadataResponse - err = ca.retryOnError(isErrNotController, func() error { + err = ca.retryOnError(isRetriableControllerError, func() error { controller, err := ca.Controller() if err != nil { return err @@ -306,7 +325,7 @@ func (ca *clusterAdmin) DescribeCluster() (brokers []*Broker, controllerID int32 request := NewMetadataRequest(ca.conf.Version, nil) response, err = controller.GetMetadata(request) - if isErrNotController(err) { + if isRetriableControllerError(err) { _, _ = ca.refreshController() } return err @@ -438,7 +457,7 @@ func (ca *clusterAdmin) DeleteTopic(topic string) error { request.Version = 1 } - return ca.retryOnError(isErrNotController, func() error { + return ca.retryOnError(isRetriableControllerError, func() error { b, err := ca.Controller() if err != nil { return err @@ -482,7 +501,7 @@ func (ca *clusterAdmin) CreatePartitions(topic string, count int32, assignment [ request.Version = 1 } - return ca.retryOnError(isErrNotController, func() error { + return ca.retryOnError(isRetriableControllerError, func() error { b, err := ca.Controller() if err != nil { return err @@ -523,7 +542,7 @@ func (ca *clusterAdmin) AlterPartitionReassignments(topic string, assignment [][ request.AddBlock(topic, int32(i), assignment[i]) } - return ca.retryOnError(isErrNotController, func() error { + return ca.retryOnError(isRetriableControllerError, func() error { b, err := ca.Controller() if err != nil { return err @@ -570,7 +589,7 @@ func (ca *clusterAdmin) ListPartitionReassignments(topic string, partitions []in request.AddBlock(topic, partitions) var rsp *ListPartitionReassignmentsResponse - err = ca.retryOnError(isErrNotController, func() error { + err = ca.retryOnError(isRetriableControllerError, func() error { b, err := ca.Controller() if err != nil { return err @@ -578,7 +597,7 @@ func (ca *clusterAdmin) ListPartitionReassignments(topic string, partitions []in _ = b.Open(ca.client.Config()) rsp, err = b.ListPartitionReassignments(request) - if isErrNotController(err) { + if isRetriableControllerError(err) { _, _ = ca.refreshController() } return err @@ -907,15 +926,53 @@ func (ca *clusterAdmin) DeleteACL(filter AclFilter, validateOnly bool) ([]Matchi return mAcls, nil } +func (ca *clusterAdmin) ElectLeaders(electionType ElectionType, partitions map[string][]int32) (map[string]map[int32]*PartitionResult, error) { + request := &ElectLeadersRequest{ + Type: electionType, + TopicPartitions: partitions, + TimeoutMs: int32(60000), + } + + if ca.conf.Version.IsAtLeast(V2_4_0_0) { + request.Version = 2 + } else if ca.conf.Version.IsAtLeast(V0_11_0_0) { + request.Version = 1 + } + + var res *ElectLeadersResponse + if err := ca.retryOnError(isRetriableControllerError, func() error { + b, err := ca.Controller() + if err != nil { + return err + } + _ = b.Open(ca.client.Config()) + + res, err = b.ElectLeaders(request) + if err != nil { + return err + } + if !errors.Is(res.ErrorCode, ErrNoError) { + if isRetriableControllerError(res.ErrorCode) { + _, _ = ca.refreshController() + } + return res.ErrorCode + } + return nil + }); err != nil { + return nil, err + } + return res.ReplicaElectionResults, nil +} + func (ca *clusterAdmin) DescribeConsumerGroups(groups []string) (result []*GroupDescription, err error) { groupsPerBroker := make(map[*Broker][]string) for _, group := range groups { - controller, err := ca.client.Coordinator(group) + coordinator, err := ca.client.Coordinator(group) if err != nil { return nil, err } - groupsPerBroker[controller] = append(groupsPerBroker[controller], group) + groupsPerBroker[coordinator] = append(groupsPerBroker[coordinator], group) } for broker, brokerGroups := range groupsPerBroker { @@ -1007,22 +1064,36 @@ func (ca *clusterAdmin) ListConsumerGroups() (allGroups map[string]string, err e } func (ca *clusterAdmin) ListConsumerGroupOffsets(group string, topicPartitions map[string][]int32) (*OffsetFetchResponse, error) { - coordinator, err := ca.client.Coordinator(group) - if err != nil { - return nil, err - } - + var response *OffsetFetchResponse request := NewOffsetFetchRequest(ca.conf.Version, group, topicPartitions) + err := ca.retryOnError(isRetriableGroupCoordinatorError, func() (err error) { + defer func() { + if err != nil && isRetriableGroupCoordinatorError(err) { + _ = ca.client.RefreshCoordinator(group) + } + }() + + coordinator, err := ca.client.Coordinator(group) + if err != nil { + return err + } + + response, err = coordinator.FetchOffset(request) + if err != nil { + return err + } + if !errors.Is(response.Err, ErrNoError) { + return response.Err + } - return coordinator.FetchOffset(request) + return nil + }) + + return response, err } func (ca *clusterAdmin) DeleteConsumerGroupOffset(group string, topic string, partition int32) error { - coordinator, err := ca.client.Coordinator(group) - if err != nil { - return err - } - + var response *DeleteOffsetsResponse request := &DeleteOffsetsRequest{ Group: group, partitions: map[string][]int32{ @@ -1030,27 +1101,35 @@ func (ca *clusterAdmin) DeleteConsumerGroupOffset(group string, topic string, pa }, } - resp, err := coordinator.DeleteOffsets(request) - if err != nil { - return err - } + return ca.retryOnError(isRetriableGroupCoordinatorError, func() (err error) { + defer func() { + if err != nil && isRetriableGroupCoordinatorError(err) { + _ = ca.client.RefreshCoordinator(group) + } + }() - if !errors.Is(resp.ErrorCode, ErrNoError) { - return resp.ErrorCode - } + coordinator, err := ca.client.Coordinator(group) + if err != nil { + return err + } - if !errors.Is(resp.Errors[topic][partition], ErrNoError) { - return resp.Errors[topic][partition] - } - return nil + response, err = coordinator.DeleteOffsets(request) + if err != nil { + return err + } + if !errors.Is(response.ErrorCode, ErrNoError) { + return response.ErrorCode + } + if !errors.Is(response.Errors[topic][partition], ErrNoError) { + return response.Errors[topic][partition] + } + + return nil + }) } func (ca *clusterAdmin) DeleteConsumerGroup(group string) error { - coordinator, err := ca.client.Coordinator(group) - if err != nil { - return err - } - + var response *DeleteGroupsResponse request := &DeleteGroupsRequest{ Groups: []string{group}, } @@ -1058,21 +1137,34 @@ func (ca *clusterAdmin) DeleteConsumerGroup(group string) error { request.Version = 1 } - resp, err := coordinator.DeleteGroups(request) - if err != nil { - return err - } + return ca.retryOnError(isRetriableGroupCoordinatorError, func() (err error) { + defer func() { + if err != nil && isRetriableGroupCoordinatorError(err) { + _ = ca.client.RefreshCoordinator(group) + } + }() - groupErr, ok := resp.GroupErrorCodes[group] - if !ok { - return ErrIncompleteResponse - } + coordinator, err := ca.client.Coordinator(group) + if err != nil { + return err + } - if !errors.Is(groupErr, ErrNoError) { - return groupErr - } + response, err = coordinator.DeleteGroups(request) + if err != nil { + return err + } - return nil + groupErr, ok := response.GroupErrorCodes[group] + if !ok { + return ErrIncompleteResponse + } + + if !errors.Is(groupErr, ErrNoError) { + return groupErr + } + + return nil + }) } func (ca *clusterAdmin) DescribeLogDirs(brokerIds []int32) (allLogDirs map[int32][]DescribeLogDirsResponseDirMetadata, err error) { @@ -1170,7 +1262,7 @@ func (ca *clusterAdmin) AlterUserScramCredentials(u []AlterUserScramCredentialsU } var rsp *AlterUserScramCredentialsResponse - err := ca.retryOnError(isErrNotController, func() error { + err := ca.retryOnError(isRetriableControllerError, func() error { b, err := ca.Controller() if err != nil { return err @@ -1248,18 +1340,14 @@ func (ca *clusterAdmin) AlterClientQuotas(entity []QuotaEntityComponent, op Clie return nil } -func (ca *clusterAdmin) RemoveMemberFromConsumerGroup(groupId string, groupInstanceIds []string) (*LeaveGroupResponse, error) { +func (ca *clusterAdmin) RemoveMemberFromConsumerGroup(group string, groupInstanceIds []string) (*LeaveGroupResponse, error) { if !ca.conf.Version.IsAtLeast(V2_4_0_0) { return nil, ConfigurationError("Removing members from a consumer group headers requires Kafka version of at least v2.4.0") } - - controller, err := ca.client.Coordinator(groupId) - if err != nil { - return nil, err - } + var response *LeaveGroupResponse request := &LeaveGroupRequest{ Version: 3, - GroupId: groupId, + GroupId: group, } for _, instanceId := range groupInstanceIds { groupInstanceId := instanceId @@ -1267,5 +1355,28 @@ func (ca *clusterAdmin) RemoveMemberFromConsumerGroup(groupId string, groupInsta GroupInstanceId: &groupInstanceId, }) } - return controller.LeaveGroup(request) + err := ca.retryOnError(isRetriableGroupCoordinatorError, func() (err error) { + defer func() { + if err != nil && isRetriableGroupCoordinatorError(err) { + _ = ca.client.RefreshCoordinator(group) + } + }() + + coordinator, err := ca.client.Coordinator(group) + if err != nil { + return err + } + + response, err = coordinator.LeaveGroup(request) + if err != nil { + return err + } + if !errors.Is(response.Err, ErrNoError) { + return response.Err + } + + return nil + }) + + return response, err } diff --git a/vendor/github.com/IBM/sarama/async_producer.go b/vendor/github.com/IBM/sarama/async_producer.go index f629a6a2e7f..5f257524b64 100644 --- a/vendor/github.com/IBM/sarama/async_producer.go +++ b/vendor/github.com/IBM/sarama/async_producer.go @@ -13,6 +13,13 @@ import ( "github.com/rcrowley/go-metrics" ) +// ErrProducerRetryBufferOverflow is returned when the bridging retry buffer is full and OOM prevention needs to be applied. +var ErrProducerRetryBufferOverflow = errors.New("retry buffer full: message discarded to prevent buffer overflow") + +// minFunctionalRetryBufferLength is the lower limit of Producer.Retry.MaxBufferLength for it to function. +// Any non-zero maxBufferLength but less than this lower limit is pushed to the lower limit. +const minFunctionalRetryBufferLength = 4 * 1024 + // AsyncProducer publishes Kafka messages using a non-blocking API. It routes messages // to the correct broker for the provided topic-partition, refreshing metadata as appropriate, // and parses responses for errors. You must read from the Errors() channel or the @@ -1101,7 +1108,7 @@ func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceRespo bp.parent.returnSuccesses(pSet.msgs) // Retriable errors case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition, - ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend: + ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend, ErrKafkaStorageError: if bp.parent.conf.Producer.Retry.Max <= 0 { bp.parent.abandonBrokerConnection(bp.broker) bp.parent.returnErrors(pSet.msgs, block.Err) @@ -1134,7 +1141,7 @@ func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceRespo switch block.Err { case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition, - ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend: + ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend, ErrKafkaStorageError: Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n", bp.broker.ID(), topic, partition, block.Err) if bp.currentRetries[topic] == nil { @@ -1207,6 +1214,11 @@ func (bp *brokerProducer) handleError(sent *produceSet, err error) { // effectively a "bridge" between the flushers and the dispatcher in order to avoid deadlock // based on https://godoc.org/github.com/eapache/channels#InfiniteChannel func (p *asyncProducer) retryHandler() { + maxBufferSize := p.conf.Producer.Retry.MaxBufferLength + if 0 < maxBufferSize && maxBufferSize < minFunctionalRetryBufferLength { + maxBufferSize = minFunctionalRetryBufferLength + } + var msg *ProducerMessage buf := queue.New() @@ -1227,6 +1239,19 @@ func (p *asyncProducer) retryHandler() { } buf.Add(msg) + + if maxBufferSize > 0 && buf.Length() >= maxBufferSize { + msgToHandle := buf.Peek().(*ProducerMessage) + if msgToHandle.flags == 0 { + select { + case p.input <- msgToHandle: + buf.Remove() + default: + buf.Remove() + p.returnError(msgToHandle, ErrProducerRetryBufferOverflow) + } + } + } } } diff --git a/vendor/github.com/IBM/sarama/balance_strategy.go b/vendor/github.com/IBM/sarama/balance_strategy.go index 30d41779c1e..b5bc30a13bd 100644 --- a/vendor/github.com/IBM/sarama/balance_strategy.go +++ b/vendor/github.com/IBM/sarama/balance_strategy.go @@ -989,6 +989,7 @@ func (p *partitionMovements) getTheActualPartitionToBeMoved(partition topicParti return reversePairPartition } +//nolint:unused // this is used but only in unittests as a helper (which are excluded by the integration build tag) func (p *partitionMovements) isLinked(src, dst string, pairs []consumerPair, currentPath []string) ([]string, bool) { if src == dst { return currentPath, false @@ -1023,6 +1024,7 @@ func (p *partitionMovements) isLinked(src, dst string, pairs []consumerPair, cur return currentPath, false } +//nolint:unused // this is used but only in unittests as a helper (which are excluded by the integration build tag) func (p *partitionMovements) in(cycle []string, cycles [][]string) bool { superCycle := make([]string, len(cycle)-1) for i := 0; i < len(cycle)-1; i++ { @@ -1037,6 +1039,7 @@ func (p *partitionMovements) in(cycle []string, cycles [][]string) bool { return false } +//nolint:unused // this is used but only in unittests as a helper (which are excluded by the integration build tag) func (p *partitionMovements) hasCycles(pairs []consumerPair) bool { cycles := make([][]string, 0) for _, pair := range pairs { @@ -1068,6 +1071,7 @@ func (p *partitionMovements) hasCycles(pairs []consumerPair) bool { return false } +//nolint:unused // this is used but only in unittests as a helper (which are excluded by the integration build tag) func (p *partitionMovements) isSticky() bool { for topic, movements := range p.PartitionMovementsByTopic { movementPairs := make([]consumerPair, len(movements)) @@ -1085,6 +1089,7 @@ func (p *partitionMovements) isSticky() bool { return true } +//nolint:unused // this is used but only in unittests as a helper (which are excluded by the integration build tag) func indexOfSubList(source []string, target []string) int { targetSize := len(target) maxCandidate := len(source) - targetSize diff --git a/vendor/github.com/IBM/sarama/broker.go b/vendor/github.com/IBM/sarama/broker.go index d0d5b87b8b9..c4f1005f56e 100644 --- a/vendor/github.com/IBM/sarama/broker.go +++ b/vendor/github.com/IBM/sarama/broker.go @@ -243,9 +243,9 @@ func (b *Broker) Open(conf *Config) error { if b.connErr != nil { err = b.conn.Close() if err == nil { - DebugLogger.Printf("Closed connection to broker %s\n", b.addr) + DebugLogger.Printf("Closed connection to broker %s due to SASL v0 auth error: %s\n", b.addr, b.connErr) } else { - Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err) + Logger.Printf("Error while closing connection to broker %s (due to SASL v0 auth error: %s): %s\n", b.addr, b.connErr, err) } b.conn = nil atomic.StoreInt32(&b.opened, 0) @@ -264,9 +264,9 @@ func (b *Broker) Open(conf *Config) error { <-b.done err = b.conn.Close() if err == nil { - DebugLogger.Printf("Closed connection to broker %s\n", b.addr) + DebugLogger.Printf("Closed connection to broker %s due to SASL v1 auth error: %s\n", b.addr, b.connErr) } else { - Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err) + Logger.Printf("Error while closing connection to broker %s (due to SASL v1 auth error: %s): %s\n", b.addr, b.connErr, err) } b.conn = nil atomic.StoreInt32(&b.opened, 0) @@ -689,6 +689,18 @@ func (b *Broker) ListPartitionReassignments(request *ListPartitionReassignmentsR return response, nil } +// ElectLeaders sends aa elect leaders request and returns list partitions elect result +func (b *Broker) ElectLeaders(request *ElectLeadersRequest) (*ElectLeadersResponse, error) { + response := new(ElectLeadersResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + // DeleteRecords send a request to delete records and return delete record // response or error func (b *Broker) DeleteRecords(request *DeleteRecordsRequest) (*DeleteRecordsResponse, error) { @@ -1242,12 +1254,12 @@ func (b *Broker) authenticateViaSASLv1() error { handshakeErr := b.sendInternal(handshakeRequest, prom) if handshakeErr != nil { - Logger.Printf("Error while performing SASL handshake %s\n", b.addr) + Logger.Printf("Error while performing SASL handshake %s: %s\n", b.addr, handshakeErr) return handshakeErr } handshakeErr = handleResponsePromise(handshakeRequest, handshakeResponse, prom, metricRegistry) if handshakeErr != nil { - Logger.Printf("Error while performing SASL handshake %s\n", b.addr) + Logger.Printf("Error while handling SASL handshake response %s: %s\n", b.addr, handshakeErr) return handshakeErr } @@ -1267,7 +1279,7 @@ func (b *Broker) authenticateViaSASLv1() error { } authErr = handleResponsePromise(authenticateRequest, authenticateResponse, prom, metricRegistry) if authErr != nil { - Logger.Printf("Error while performing SASL Auth %s\n", b.addr) + Logger.Printf("Error while performing SASL Auth %s: %s\n", b.addr, authErr) return nil, authErr } @@ -1385,7 +1397,7 @@ func (b *Broker) sendAndReceiveSASLPlainAuthV0() error { if b.conf.Net.SASL.Handshake { handshakeErr := b.sendAndReceiveSASLHandshake(SASLTypePlaintext, b.conf.Net.SASL.Version) if handshakeErr != nil { - Logger.Printf("Error while performing SASL handshake %s\n", b.addr) + Logger.Printf("Error while performing SASL handshake %s: %s\n", b.addr, handshakeErr) return handshakeErr } } @@ -1426,9 +1438,6 @@ func (b *Broker) sendAndReceiveSASLPlainAuthV0() error { func (b *Broker) sendAndReceiveSASLPlainAuthV1(authSendReceiver func(authBytes []byte) (*SaslAuthenticateResponse, error)) error { authBytes := []byte(b.conf.Net.SASL.AuthIdentity + "\x00" + b.conf.Net.SASL.User + "\x00" + b.conf.Net.SASL.Password) _, err := authSendReceiver(authBytes) - if err != nil { - return err - } return err } diff --git a/vendor/github.com/IBM/sarama/client.go b/vendor/github.com/IBM/sarama/client.go index 2decba7c553..5c54b446130 100644 --- a/vendor/github.com/IBM/sarama/client.go +++ b/vendor/github.com/IBM/sarama/client.go @@ -363,34 +363,19 @@ func (client *client) MetadataTopics() ([]string, error) { } func (client *client) Partitions(topic string) ([]int32, error) { - if client.Closed() { - return nil, ErrClosedClient - } - - partitions := client.cachedPartitions(topic, allPartitions) - - if len(partitions) == 0 { - err := client.RefreshMetadata(topic) - if err != nil { - return nil, err - } - partitions = client.cachedPartitions(topic, allPartitions) - } - - // no partitions found after refresh metadata - if len(partitions) == 0 { - return nil, ErrUnknownTopicOrPartition - } - - return partitions, nil + return client.getPartitions(topic, allPartitions) } func (client *client) WritablePartitions(topic string) ([]int32, error) { + return client.getPartitions(topic, writablePartitions) +} + +func (client *client) getPartitions(topic string, pt partitionType) ([]int32, error) { if client.Closed() { return nil, ErrClosedClient } - partitions := client.cachedPartitions(topic, writablePartitions) + partitions := client.cachedPartitions(topic, pt) // len==0 catches when it's nil (no such topic) and the odd case when every single // partition is undergoing leader election simultaneously. Callers have to be able to handle @@ -403,7 +388,7 @@ func (client *client) WritablePartitions(topic string) ([]int32, error) { if err != nil { return nil, err } - partitions = client.cachedPartitions(topic, writablePartitions) + partitions = client.cachedPartitions(topic, pt) } if partitions == nil { @@ -414,56 +399,24 @@ func (client *client) WritablePartitions(topic string) ([]int32, error) { } func (client *client) Replicas(topic string, partitionID int32) ([]int32, error) { - if client.Closed() { - return nil, ErrClosedClient - } - - metadata := client.cachedMetadata(topic, partitionID) - - if metadata == nil { - err := client.RefreshMetadata(topic) - if err != nil { - return nil, err - } - metadata = client.cachedMetadata(topic, partitionID) - } - - if metadata == nil { - return nil, ErrUnknownTopicOrPartition - } - - if errors.Is(metadata.Err, ErrReplicaNotAvailable) { - return dupInt32Slice(metadata.Replicas), metadata.Err - } - return dupInt32Slice(metadata.Replicas), nil + return client.getReplicas(topic, partitionID, func(metadata *PartitionMetadata) []int32 { + return metadata.Replicas + }) } func (client *client) InSyncReplicas(topic string, partitionID int32) ([]int32, error) { - if client.Closed() { - return nil, ErrClosedClient - } - - metadata := client.cachedMetadata(topic, partitionID) - - if metadata == nil { - err := client.RefreshMetadata(topic) - if err != nil { - return nil, err - } - metadata = client.cachedMetadata(topic, partitionID) - } - - if metadata == nil { - return nil, ErrUnknownTopicOrPartition - } - - if errors.Is(metadata.Err, ErrReplicaNotAvailable) { - return dupInt32Slice(metadata.Isr), metadata.Err - } - return dupInt32Slice(metadata.Isr), nil + return client.getReplicas(topic, partitionID, func(metadata *PartitionMetadata) []int32 { + return metadata.Isr + }) } func (client *client) OfflineReplicas(topic string, partitionID int32) ([]int32, error) { + return client.getReplicas(topic, partitionID, func(metadata *PartitionMetadata) []int32 { + return metadata.OfflineReplicas + }) +} + +func (client *client) getReplicas(topic string, partitionID int32, extractor func(metadata *PartitionMetadata) []int32) ([]int32, error) { if client.Closed() { return nil, ErrClosedClient } @@ -482,10 +435,11 @@ func (client *client) OfflineReplicas(topic string, partitionID int32) ([]int32, return nil, ErrUnknownTopicOrPartition } + replicas := extractor(metadata) if errors.Is(metadata.Err, ErrReplicaNotAvailable) { - return dupInt32Slice(metadata.OfflineReplicas), metadata.Err + return dupInt32Slice(replicas), metadata.Err } - return dupInt32Slice(metadata.OfflineReplicas), nil + return dupInt32Slice(replicas), nil } func (client *client) Leader(topic string, partitionID int32) (*Broker, error) { diff --git a/vendor/github.com/IBM/sarama/config.go b/vendor/github.com/IBM/sarama/config.go index facf7664367..8c7c4c98531 100644 --- a/vendor/github.com/IBM/sarama/config.go +++ b/vendor/github.com/IBM/sarama/config.go @@ -269,6 +269,13 @@ type Config struct { // more sophisticated backoff strategies. This takes precedence over // `Backoff` if set. BackoffFunc func(retries, maxRetries int) time.Duration + // The maximum length of the bridging buffer between `input` and `retries` channels + // in AsyncProducer#retryHandler. + // The limit is to prevent this buffer from overflowing or causing OOM. + // Defaults to 0 for unlimited. + // Any value between 0 and 4096 is pushed to 4096. + // A zero or negative value indicates unlimited. + MaxBufferLength int } // Interceptors to be called when the producer dispatcher reads the @@ -387,7 +394,7 @@ type Config struct { // default is 250ms, since 0 causes the consumer to spin when no events are // available. 100-500ms is a reasonable range for most cases. Kafka only // supports precision up to milliseconds; nanoseconds will be truncated. - // Equivalent to the JVM's `fetch.wait.max.ms`. + // Equivalent to the JVM's `fetch.max.wait.ms`. MaxWaitTime time.Duration // The maximum amount of time the consumer expects a message takes to diff --git a/vendor/github.com/IBM/sarama/create_topics_request.go b/vendor/github.com/IBM/sarama/create_topics_request.go index 8382d17c20a..e8c0f01472c 100644 --- a/vendor/github.com/IBM/sarama/create_topics_request.go +++ b/vendor/github.com/IBM/sarama/create_topics_request.go @@ -16,6 +16,21 @@ type CreateTopicsRequest struct { ValidateOnly bool } +func NewCreateTopicsRequest(version KafkaVersion, topicDetails map[string]*TopicDetail, timeout time.Duration) *CreateTopicsRequest { + r := &CreateTopicsRequest{ + TopicDetails: topicDetails, + Timeout: timeout, + } + if version.IsAtLeast(V2_0_0_0) { + r.Version = 3 + } else if version.IsAtLeast(V0_11_0_0) { + r.Version = 2 + } else if version.IsAtLeast(V0_10_2_0) { + r.Version = 1 + } + return r +} + func (c *CreateTopicsRequest) encode(pe packetEncoder) error { if err := pe.putArrayLength(len(c.TopicDetails)); err != nil { return err diff --git a/vendor/github.com/IBM/sarama/delete_topics_request.go b/vendor/github.com/IBM/sarama/delete_topics_request.go index 252c0d02594..f38f32770be 100644 --- a/vendor/github.com/IBM/sarama/delete_topics_request.go +++ b/vendor/github.com/IBM/sarama/delete_topics_request.go @@ -8,6 +8,21 @@ type DeleteTopicsRequest struct { Timeout time.Duration } +func NewDeleteTopicsRequest(version KafkaVersion, topics []string, timeout time.Duration) *DeleteTopicsRequest { + d := &DeleteTopicsRequest{ + Topics: topics, + Timeout: timeout, + } + if version.IsAtLeast(V2_1_0_0) { + d.Version = 3 + } else if version.IsAtLeast(V2_0_0_0) { + d.Version = 2 + } else if version.IsAtLeast(V0_11_0_0) { + d.Version = 1 + } + return d +} + func (d *DeleteTopicsRequest) encode(pe packetEncoder) error { if err := pe.putStringArray(d.Topics); err != nil { return err diff --git a/vendor/github.com/IBM/sarama/docker-compose.yml b/vendor/github.com/IBM/sarama/docker-compose.yml index 55283cfe4f0..1e66cca0ce5 100644 --- a/vendor/github.com/IBM/sarama/docker-compose.yml +++ b/vendor/github.com/IBM/sarama/docker-compose.yml @@ -1,8 +1,7 @@ -version: '3.9' services: zookeeper-1: - hostname: 'zookeeper-1' - image: 'docker.io/library/zookeeper:3.6.3' + container_name: 'zookeeper-1' + image: 'docker.io/library/zookeeper:3.7.2' init: true restart: always environment: @@ -14,8 +13,8 @@ services: ZOO_MAX_CLIENT_CNXNS: '0' ZOO_4LW_COMMANDS_WHITELIST: 'mntr,conf,ruok' zookeeper-2: - hostname: 'zookeeper-2' - image: 'docker.io/library/zookeeper:3.6.3' + container_name: 'zookeeper-2' + image: 'docker.io/library/zookeeper:3.7.2' init: true restart: always environment: @@ -27,8 +26,8 @@ services: ZOO_MAX_CLIENT_CNXNS: '0' ZOO_4LW_COMMANDS_WHITELIST: 'mntr,conf,ruok' zookeeper-3: - hostname: 'zookeeper-3' - image: 'docker.io/library/zookeeper:3.6.3' + container_name: 'zookeeper-3' + image: 'docker.io/library/zookeeper:3.7.2' init: true restart: always environment: @@ -40,20 +39,20 @@ services: ZOO_MAX_CLIENT_CNXNS: '0' ZOO_4LW_COMMANDS_WHITELIST: 'mntr,conf,ruok' kafka-1: - hostname: 'kafka-1' - image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}' + container_name: 'kafka-1' + image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.2}' init: true build: context: . dockerfile: Dockerfile.kafka args: - KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} + KAFKA_VERSION: ${KAFKA_VERSION:-3.6.2} SCALA_VERSION: ${SCALA_VERSION:-2.13} healthcheck: test: [ 'CMD', - '/opt/kafka-${KAFKA_VERSION:-3.6.0}/bin/kafka-broker-api-versions.sh', + '/opt/kafka-${KAFKA_VERSION:-3.6.2}/bin/kafka-broker-api-versions.sh', '--bootstrap-server', 'kafka-1:9091', ] @@ -68,13 +67,14 @@ services: - toxiproxy restart: always environment: - KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} + KAFKA_VERSION: ${KAFKA_VERSION:-3.6.2} KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29091' KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-1:9091,LISTENER_LOCAL://localhost:29091' KAFKA_CFG_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' KAFKA_CFG_DEFAULT_REPLICATION_FACTOR: '2' + KAFKA_CFG_OFFSETS_TOPIC_REPLICATION_FACTOR: '2' KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: '2' KAFKA_CFG_BROKER_ID: '1' KAFKA_CFG_BROKER_RACK: '1' @@ -86,20 +86,20 @@ services: KAFKA_CFG_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_JVM_PERFORMANCE_OPTS: "-XX:+IgnoreUnrecognizedVMOptions" kafka-2: - hostname: 'kafka-2' - image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}' + container_name: 'kafka-2' + image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.2}' init: true build: context: . dockerfile: Dockerfile.kafka args: - KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} + KAFKA_VERSION: ${KAFKA_VERSION:-3.6.2} SCALA_VERSION: ${SCALA_VERSION:-2.13} healthcheck: test: [ 'CMD', - '/opt/kafka-${KAFKA_VERSION:-3.6.0}/bin/kafka-broker-api-versions.sh', + '/opt/kafka-${KAFKA_VERSION:-3.6.2}/bin/kafka-broker-api-versions.sh', '--bootstrap-server', 'kafka-2:9091', ] @@ -114,13 +114,14 @@ services: - toxiproxy restart: always environment: - KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} + KAFKA_VERSION: ${KAFKA_VERSION:-3.6.2} KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29092' KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-2:9091,LISTENER_LOCAL://localhost:29092' KAFKA_CFG_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' KAFKA_CFG_DEFAULT_REPLICATION_FACTOR: '2' + KAFKA_CFG_OFFSETS_TOPIC_REPLICATION_FACTOR: '2' KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: '2' KAFKA_CFG_BROKER_ID: '2' KAFKA_CFG_BROKER_RACK: '2' @@ -132,20 +133,20 @@ services: KAFKA_CFG_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_JVM_PERFORMANCE_OPTS: "-XX:+IgnoreUnrecognizedVMOptions" kafka-3: - hostname: 'kafka-3' - image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}' + container_name: 'kafka-3' + image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.2}' init: true build: context: . dockerfile: Dockerfile.kafka args: - KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} + KAFKA_VERSION: ${KAFKA_VERSION:-3.6.2} SCALA_VERSION: ${SCALA_VERSION:-2.13} healthcheck: test: [ 'CMD', - '/opt/kafka-${KAFKA_VERSION:-3.6.0}/bin/kafka-broker-api-versions.sh', + '/opt/kafka-${KAFKA_VERSION:-3.6.2}/bin/kafka-broker-api-versions.sh', '--bootstrap-server', 'kafka-3:9091', ] @@ -160,13 +161,14 @@ services: - toxiproxy restart: always environment: - KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} + KAFKA_VERSION: ${KAFKA_VERSION:-3.6.2} KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29093' KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-3:9091,LISTENER_LOCAL://localhost:29093' KAFKA_CFG_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' KAFKA_CFG_DEFAULT_REPLICATION_FACTOR: '2' + KAFKA_CFG_OFFSETS_TOPIC_REPLICATION_FACTOR: '2' KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: '2' KAFKA_CFG_BROKER_ID: '3' KAFKA_CFG_BROKER_RACK: '3' @@ -178,20 +180,20 @@ services: KAFKA_CFG_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_JVM_PERFORMANCE_OPTS: "-XX:+IgnoreUnrecognizedVMOptions" kafka-4: - hostname: 'kafka-4' - image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}' + container_name: 'kafka-4' + image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.2}' init: true build: context: . dockerfile: Dockerfile.kafka args: - KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} + KAFKA_VERSION: ${KAFKA_VERSION:-3.6.2} SCALA_VERSION: ${SCALA_VERSION:-2.13} healthcheck: test: [ 'CMD', - '/opt/kafka-${KAFKA_VERSION:-3.6.0}/bin/kafka-broker-api-versions.sh', + '/opt/kafka-${KAFKA_VERSION:-3.6.2}/bin/kafka-broker-api-versions.sh', '--bootstrap-server', 'kafka-4:9091', ] @@ -206,13 +208,14 @@ services: - toxiproxy restart: always environment: - KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} + KAFKA_VERSION: ${KAFKA_VERSION:-3.6.2} KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29094' KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-4:9091,LISTENER_LOCAL://localhost:29094' KAFKA_CFG_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' KAFKA_CFG_DEFAULT_REPLICATION_FACTOR: '2' + KAFKA_CFG_OFFSETS_TOPIC_REPLICATION_FACTOR: '2' KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: '2' KAFKA_CFG_BROKER_ID: '4' KAFKA_CFG_BROKER_RACK: '4' @@ -224,20 +227,20 @@ services: KAFKA_CFG_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_JVM_PERFORMANCE_OPTS: "-XX:+IgnoreUnrecognizedVMOptions" kafka-5: - hostname: 'kafka-5' - image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}' + container_name: 'kafka-5' + image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.2}' init: true build: context: . dockerfile: Dockerfile.kafka args: - KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} + KAFKA_VERSION: ${KAFKA_VERSION:-3.6.2} SCALA_VERSION: ${SCALA_VERSION:-2.13} healthcheck: test: [ 'CMD', - '/opt/kafka-${KAFKA_VERSION:-3.6.0}/bin/kafka-broker-api-versions.sh', + '/opt/kafka-${KAFKA_VERSION:-3.6.2}/bin/kafka-broker-api-versions.sh', '--bootstrap-server', 'kafka-5:9091', ] @@ -252,13 +255,14 @@ services: - toxiproxy restart: always environment: - KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} + KAFKA_VERSION: ${KAFKA_VERSION:-3.6.2} KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29095' KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-5:9091,LISTENER_LOCAL://localhost:29095' KAFKA_CFG_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' KAFKA_CFG_DEFAULT_REPLICATION_FACTOR: '2' + KAFKA_CFG_OFFSETS_TOPIC_REPLICATION_FACTOR: '2' KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: '2' KAFKA_CFG_BROKER_ID: '5' KAFKA_CFG_BROKER_RACK: '5' @@ -270,7 +274,7 @@ services: KAFKA_CFG_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_JVM_PERFORMANCE_OPTS: "-XX:+IgnoreUnrecognizedVMOptions" toxiproxy: - hostname: 'toxiproxy' + container_name: 'toxiproxy' image: 'ghcr.io/shopify/toxiproxy:2.4.0' init: true healthcheck: diff --git a/vendor/github.com/IBM/sarama/elect_leaders_request.go b/vendor/github.com/IBM/sarama/elect_leaders_request.go new file mode 100644 index 00000000000..cd8d6a7f0b8 --- /dev/null +++ b/vendor/github.com/IBM/sarama/elect_leaders_request.go @@ -0,0 +1,134 @@ +package sarama + +type ElectLeadersRequest struct { + Version int16 + Type ElectionType + TopicPartitions map[string][]int32 + TimeoutMs int32 +} + +func (r *ElectLeadersRequest) encode(pe packetEncoder) error { + if r.Version > 0 { + pe.putInt8(int8(r.Type)) + } + + pe.putCompactArrayLength(len(r.TopicPartitions)) + + for topic, partitions := range r.TopicPartitions { + if r.Version < 2 { + if err := pe.putString(topic); err != nil { + return err + } + } else { + if err := pe.putCompactString(topic); err != nil { + return err + } + } + + if err := pe.putCompactInt32Array(partitions); err != nil { + return err + } + + if r.Version >= 2 { + pe.putEmptyTaggedFieldArray() + } + } + + pe.putInt32(r.TimeoutMs) + + if r.Version >= 2 { + pe.putEmptyTaggedFieldArray() + } + + return nil +} + +func (r *ElectLeadersRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + if r.Version > 0 { + t, err := pd.getInt8() + if err != nil { + return err + } + r.Type = ElectionType(t) + } + + topicCount, err := pd.getCompactArrayLength() + if err != nil { + return err + } + if topicCount > 0 { + r.TopicPartitions = make(map[string][]int32) + for i := 0; i < topicCount; i++ { + var topic string + if r.Version < 2 { + topic, err = pd.getString() + } else { + topic, err = pd.getCompactString() + } + if err != nil { + return err + } + partitionCount, err := pd.getCompactArrayLength() + if err != nil { + return err + } + partitions := make([]int32, partitionCount) + for j := 0; j < partitionCount; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + partitions[j] = partition + } + r.TopicPartitions[topic] = partitions + if r.Version >= 2 { + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + } + } + + r.TimeoutMs, err = pd.getInt32() + if err != nil { + return err + } + + if r.Version >= 2 { + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + + return nil +} + +func (r *ElectLeadersRequest) key() int16 { + return 43 +} + +func (r *ElectLeadersRequest) version() int16 { + return r.Version +} + +func (r *ElectLeadersRequest) headerVersion() int16 { + return 2 +} + +func (r *ElectLeadersRequest) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 2 +} + +func (r *ElectLeadersRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 2: + return V2_4_0_0 + case 1: + return V0_11_0_0 + case 0: + return V0_10_0_0 + default: + return V2_4_0_0 + } +} diff --git a/vendor/github.com/IBM/sarama/elect_leaders_response.go b/vendor/github.com/IBM/sarama/elect_leaders_response.go new file mode 100644 index 00000000000..8c85249acb2 --- /dev/null +++ b/vendor/github.com/IBM/sarama/elect_leaders_response.go @@ -0,0 +1,173 @@ +package sarama + +import "time" + +type PartitionResult struct { + ErrorCode KError + ErrorMessage *string +} + +func (b *PartitionResult) encode(pe packetEncoder, version int16) error { + pe.putInt16(int16(b.ErrorCode)) + if version < 2 { + if err := pe.putNullableString(b.ErrorMessage); err != nil { + return err + } + } else { + if err := pe.putNullableCompactString(b.ErrorMessage); err != nil { + return err + } + } + if version >= 2 { + pe.putEmptyTaggedFieldArray() + } + return nil +} + +func (b *PartitionResult) decode(pd packetDecoder, version int16) (err error) { + kerr, err := pd.getInt16() + if err != nil { + return err + } + b.ErrorCode = KError(kerr) + if version < 2 { + b.ErrorMessage, err = pd.getNullableString() + } else { + b.ErrorMessage, err = pd.getCompactNullableString() + } + if version >= 2 { + _, err = pd.getEmptyTaggedFieldArray() + } + return err +} + +type ElectLeadersResponse struct { + Version int16 + ThrottleTimeMs int32 + ErrorCode KError + ReplicaElectionResults map[string]map[int32]*PartitionResult +} + +func (r *ElectLeadersResponse) encode(pe packetEncoder) error { + pe.putInt32(r.ThrottleTimeMs) + + if r.Version > 0 { + pe.putInt16(int16(r.ErrorCode)) + } + + pe.putCompactArrayLength(len(r.ReplicaElectionResults)) + for topic, partitions := range r.ReplicaElectionResults { + if r.Version < 2 { + if err := pe.putString(topic); err != nil { + return err + } + } else { + if err := pe.putCompactString(topic); err != nil { + return err + } + } + pe.putCompactArrayLength(len(partitions)) + for partition, result := range partitions { + pe.putInt32(partition) + if err := result.encode(pe, r.Version); err != nil { + return err + } + } + pe.putEmptyTaggedFieldArray() + } + + pe.putEmptyTaggedFieldArray() + + return nil +} + +func (r *ElectLeadersResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + if r.ThrottleTimeMs, err = pd.getInt32(); err != nil { + return err + } + if r.Version > 0 { + kerr, err := pd.getInt16() + if err != nil { + return err + } + r.ErrorCode = KError(kerr) + } + + numTopics, err := pd.getCompactArrayLength() + if err != nil { + return err + } + + r.ReplicaElectionResults = make(map[string]map[int32]*PartitionResult, numTopics) + for i := 0; i < numTopics; i++ { + var topic string + if r.Version < 2 { + topic, err = pd.getString() + } else { + topic, err = pd.getCompactString() + } + if err != nil { + return err + } + + numPartitions, err := pd.getCompactArrayLength() + if err != nil { + return err + } + r.ReplicaElectionResults[topic] = make(map[int32]*PartitionResult, numPartitions) + for j := 0; j < numPartitions; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + result := new(PartitionResult) + if err := result.decode(pd, r.Version); err != nil { + return err + } + r.ReplicaElectionResults[topic][partition] = result + } + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + + return nil +} + +func (r *ElectLeadersResponse) key() int16 { + return 43 +} + +func (r *ElectLeadersResponse) version() int16 { + return r.Version +} + +func (r *ElectLeadersResponse) headerVersion() int16 { + return 1 +} + +func (r *ElectLeadersResponse) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 2 +} + +func (r *ElectLeadersResponse) requiredVersion() KafkaVersion { + switch r.Version { + case 2: + return V2_4_0_0 + case 1: + return V0_11_0_0 + case 0: + return V0_10_0_0 + default: + return V2_4_0_0 + } +} + +func (r *ElectLeadersResponse) throttleTime() time.Duration { + return time.Duration(r.ThrottleTimeMs) * time.Millisecond +} diff --git a/vendor/github.com/IBM/sarama/election_type.go b/vendor/github.com/IBM/sarama/election_type.go new file mode 100644 index 00000000000..01f3b65b3c6 --- /dev/null +++ b/vendor/github.com/IBM/sarama/election_type.go @@ -0,0 +1,10 @@ +package sarama + +type ElectionType int8 + +const ( + // PreferredElection constant type + PreferredElection ElectionType = 0 + // UncleanElection constant type + UncleanElection ElectionType = 1 +) diff --git a/vendor/github.com/IBM/sarama/entrypoint.sh b/vendor/github.com/IBM/sarama/entrypoint.sh index 9fe9a44b1d9..516a8dc3822 100644 --- a/vendor/github.com/IBM/sarama/entrypoint.sh +++ b/vendor/github.com/IBM/sarama/entrypoint.sh @@ -3,7 +3,7 @@ set -eu set -o pipefail -KAFKA_VERSION="${KAFKA_VERSION:-3.6.0}" +KAFKA_VERSION="${KAFKA_VERSION:-3.6.2}" KAFKA_HOME="/opt/kafka-${KAFKA_VERSION}" if [ ! -d "${KAFKA_HOME}" ]; then diff --git a/vendor/github.com/IBM/sarama/errors.go b/vendor/github.com/IBM/sarama/errors.go index 2c431aecb05..842d302571a 100644 --- a/vendor/github.com/IBM/sarama/errors.go +++ b/vendor/github.com/IBM/sarama/errors.go @@ -304,7 +304,7 @@ func (err KError) Error() string { case ErrOffsetsLoadInProgress: return "kafka server: The coordinator is still loading offsets and cannot currently process requests" case ErrConsumerCoordinatorNotAvailable: - return "kafka server: Offset's topic has not yet been created" + return "kafka server: The coordinator is not available" case ErrNotCoordinatorForConsumer: return "kafka server: Request was for a consumer group that is not coordinated by this broker" case ErrInvalidTopic: diff --git a/vendor/github.com/IBM/sarama/mockresponses.go b/vendor/github.com/IBM/sarama/mockresponses.go index d09415b49a0..2c352797f73 100644 --- a/vendor/github.com/IBM/sarama/mockresponses.go +++ b/vendor/github.com/IBM/sarama/mockresponses.go @@ -778,6 +778,28 @@ func (mr *MockListPartitionReassignmentsResponse) For(reqBody versionedDecoder) return res } +type MockElectLeadersResponse struct { + t TestReporter +} + +func NewMockElectLeadersResponse(t TestReporter) *MockElectLeadersResponse { + return &MockElectLeadersResponse{t: t} +} + +func (mr *MockElectLeadersResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*ElectLeadersRequest) + res := &ElectLeadersResponse{Version: req.version(), ReplicaElectionResults: map[string]map[int32]*PartitionResult{}} + + for topic, partitions := range req.TopicPartitions { + for _, partition := range partitions { + res.ReplicaElectionResults[topic] = map[int32]*PartitionResult{ + partition: {ErrorCode: ErrNoError}, + } + } + } + return res +} + type MockDeleteRecordsResponse struct { t TestReporter } diff --git a/vendor/github.com/IBM/sarama/offset_manager.go b/vendor/github.com/IBM/sarama/offset_manager.go index 1bf54590897..2948651272b 100644 --- a/vendor/github.com/IBM/sarama/offset_manager.go +++ b/vendor/github.com/IBM/sarama/offset_manager.go @@ -251,18 +251,31 @@ func (om *offsetManager) Commit() { } func (om *offsetManager) flushToBroker() { + broker, err := om.coordinator() + if err != nil { + om.handleError(err) + return + } + + // Care needs to be taken to unlock this. Don't want to defer the unlock as this would + // cause the lock to be held while waiting for the broker to reply. + broker.lock.Lock() req := om.constructRequest() if req == nil { + broker.lock.Unlock() return } + resp, rp, err := sendOffsetCommit(broker, req) + broker.lock.Unlock() - broker, err := om.coordinator() if err != nil { om.handleError(err) + om.releaseCoordinator(broker) + _ = broker.Close() return } - resp, err := broker.CommitOffset(req) + err = handleResponsePromise(req, resp, rp, nil) if err != nil { om.handleError(err) om.releaseCoordinator(broker) @@ -270,9 +283,20 @@ func (om *offsetManager) flushToBroker() { return } + broker.handleThrottledResponse(resp) om.handleResponse(broker, req, resp) } +func sendOffsetCommit(coordinator *Broker, req *OffsetCommitRequest) (*OffsetCommitResponse, *responsePromise, error) { + resp := new(OffsetCommitResponse) + responseHeaderVersion := resp.headerVersion() + promise, err := coordinator.send(req, true, responseHeaderVersion) + if err != nil { + return nil, nil, err + } + return resp, promise, nil +} + func (om *offsetManager) constructRequest() *OffsetCommitRequest { r := &OffsetCommitRequest{ Version: 1, diff --git a/vendor/github.com/IBM/sarama/request.go b/vendor/github.com/IBM/sarama/request.go index e8e74ca34a6..8f0c2b579a2 100644 --- a/vendor/github.com/IBM/sarama/request.go +++ b/vendor/github.com/IBM/sarama/request.go @@ -194,7 +194,8 @@ func allocateBody(key, version int16) protocolBody { // 41: DescribeDelegationTokenRequest case 42: return &DeleteGroupsRequest{Version: version} - // 43: ElectLeadersRequest + case 43: + return &ElectLeadersRequest{Version: version} case 44: return &IncrementalAlterConfigsRequest{Version: version} case 45: diff --git a/vendor/github.com/IBM/sarama/server.properties b/vendor/github.com/IBM/sarama/server.properties new file mode 100644 index 00000000000..21ba1c7d9c6 --- /dev/null +++ b/vendor/github.com/IBM/sarama/server.properties @@ -0,0 +1,138 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# This configuration file is intended for use in ZK-based mode, where Apache ZooKeeper is required. +# See kafka.server.KafkaConfig for additional details and defaults +# + +############################# Server Basics ############################# + +# The id of the broker. This must be set to a unique integer for each broker. +broker.id=0 + +############################# Socket Server Settings ############################# + +# The address the socket server listens on. If not configured, the host name will be equal to the value of +# java.net.InetAddress.getCanonicalHostName(), with PLAINTEXT listener name, and port 9092. +# FORMAT: +# listeners = listener_name://host_name:port +# EXAMPLE: +# listeners = PLAINTEXT://your.host.name:9092 +#listeners=PLAINTEXT://:9092 + +# Listener name, hostname and port the broker will advertise to clients. +# If not set, it uses the value for "listeners". +#advertised.listeners=PLAINTEXT://your.host.name:9092 + +# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details +#listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL + +# The number of threads that the server uses for receiving requests from the network and sending responses to the network +num.network.threads=3 + +# The number of threads that the server uses for processing requests, which may include disk I/O +num.io.threads=8 + +# The send buffer (SO_SNDBUF) used by the socket server +socket.send.buffer.bytes=102400 + +# The receive buffer (SO_RCVBUF) used by the socket server +socket.receive.buffer.bytes=102400 + +# The maximum size of a request that the socket server will accept (protection against OOM) +socket.request.max.bytes=104857600 + + +############################# Log Basics ############################# + +# A comma separated list of directories under which to store log files +log.dirs=/tmp/kafka-logs + +# The default number of log partitions per topic. More partitions allow greater +# parallelism for consumption, but this will also result in more files across +# the brokers. +num.partitions=1 + +# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. +# This value is recommended to be increased for installations with data dirs located in RAID array. +num.recovery.threads.per.data.dir=1 + +############################# Internal Topic Settings ############################# +# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state" +# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3. +offsets.topic.replication.factor=1 +transaction.state.log.replication.factor=1 +transaction.state.log.min.isr=1 + +############################# Log Flush Policy ############################# + +# Messages are immediately written to the filesystem but by default we only fsync() to sync +# the OS cache lazily. The following configurations control the flush of data to disk. +# There are a few important trade-offs here: +# 1. Durability: Unflushed data may be lost if you are not using replication. +# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. +# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks. +# The settings below allow one to configure the flush policy to flush data after a period of time or +# every N messages (or both). This can be done globally and overridden on a per-topic basis. + +# The number of messages to accept before forcing a flush of data to disk +#log.flush.interval.messages=10000 + +# The maximum amount of time a message can sit in a log before we force a flush +#log.flush.interval.ms=1000 + +############################# Log Retention Policy ############################# + +# The following configurations control the disposal of log segments. The policy can +# be set to delete segments after a period of time, or after a given size has accumulated. +# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens +# from the end of the log. + +# The minimum age of a log file to be eligible for deletion due to age +log.retention.hours=168 + +# A size-based retention policy for logs. Segments are pruned from the log unless the remaining +# segments drop below log.retention.bytes. Functions independently of log.retention.hours. +#log.retention.bytes=1073741824 + +# The maximum size of a log segment file. When this size is reached a new log segment will be created. +#log.segment.bytes=1073741824 + +# The interval at which log segments are checked to see if they can be deleted according +# to the retention policies +log.retention.check.interval.ms=300000 + +############################# Zookeeper ############################# + +# Zookeeper connection string (see zookeeper docs for details). +# This is a comma separated host:port pairs, each corresponding to a zk +# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002". +# You can also append an optional chroot string to the urls to specify the +# root directory for all kafka znodes. +zookeeper.connect=localhost:2181 + +# Timeout in ms for connecting to zookeeper +zookeeper.connection.timeout.ms=18000 + + +############################# Group Coordinator Settings ############################# + +# The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance. +# The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms. +# The default value for this is 3 seconds. +# We override this to 0 here as it makes for a better out-of-the-box experience for development and testing. +# However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup. +group.initial.rebalance.delay.ms=0 diff --git a/vendor/github.com/IBM/sarama/transaction_manager.go b/vendor/github.com/IBM/sarama/transaction_manager.go index ca7e13dab0e..bf20b75e905 100644 --- a/vendor/github.com/IBM/sarama/transaction_manager.go +++ b/vendor/github.com/IBM/sarama/transaction_manager.go @@ -466,7 +466,7 @@ func (t *transactionManager) publishOffsetsToTxn(offsets topicPartitionOffsets, resultOffsets = failedTxn if len(resultOffsets) == 0 { - DebugLogger.Printf("txnmgr/txn-offset-commit [%s] successful txn-offset-commit with group %s %+v\n", + DebugLogger.Printf("txnmgr/txn-offset-commit [%s] successful txn-offset-commit with group %s\n", t.transactionalID, groupId) return resultOffsets, false, nil } diff --git a/vendor/github.com/IBM/sarama/utils.go b/vendor/github.com/IBM/sarama/utils.go index feadc0065bb..b0e1aceff14 100644 --- a/vendor/github.com/IBM/sarama/utils.go +++ b/vendor/github.com/IBM/sarama/utils.go @@ -44,11 +44,10 @@ func withRecover(fn func()) { } func safeAsyncClose(b *Broker) { - tmp := b // local var prevents clobbering in goroutine go withRecover(func() { - if connected, _ := tmp.Connected(); connected { - if err := tmp.Close(); err != nil { - Logger.Println("Error closing broker", tmp.ID(), ":", err) + if connected, _ := b.Connected(); connected { + if err := b.Close(); err != nil { + Logger.Println("Error closing broker", b.ID(), ":", err) } } }) @@ -198,7 +197,16 @@ var ( V3_4_1_0 = newKafkaVersion(3, 4, 1, 0) V3_5_0_0 = newKafkaVersion(3, 5, 0, 0) V3_5_1_0 = newKafkaVersion(3, 5, 1, 0) + V3_5_2_0 = newKafkaVersion(3, 5, 2, 0) V3_6_0_0 = newKafkaVersion(3, 6, 0, 0) + V3_6_1_0 = newKafkaVersion(3, 6, 1, 0) + V3_6_2_0 = newKafkaVersion(3, 6, 2, 0) + V3_7_0_0 = newKafkaVersion(3, 7, 0, 0) + V3_7_1_0 = newKafkaVersion(3, 7, 1, 0) + V3_8_0_0 = newKafkaVersion(3, 8, 0, 0) + V3_8_1_0 = newKafkaVersion(3, 8, 1, 0) + V3_9_0_0 = newKafkaVersion(3, 9, 0, 0) + V4_0_0_0 = newKafkaVersion(4, 0, 0, 0) SupportedVersions = []KafkaVersion{ V0_8_2_0, @@ -237,8 +245,10 @@ var ( V2_6_0_0, V2_6_1_0, V2_6_2_0, + V2_6_3_0, V2_7_0_0, V2_7_1_0, + V2_7_2_0, V2_8_0_0, V2_8_1_0, V2_8_2_0, @@ -259,10 +269,19 @@ var ( V3_4_1_0, V3_5_0_0, V3_5_1_0, + V3_5_2_0, V3_6_0_0, + V3_6_1_0, + V3_6_2_0, + V3_7_0_0, + V3_7_1_0, + V3_8_0_0, + V3_8_1_0, + V3_9_0_0, + V4_0_0_0, } MinVersion = V0_8_2_0 - MaxVersion = V3_6_0_0 + MaxVersion = V4_0_0_0 DefaultVersion = V2_1_0_0 // reduced set of protocol versions to matrix test @@ -274,11 +293,11 @@ var ( V2_0_1_0, V2_2_2_0, V2_4_1_0, - V2_6_2_0, + V2_6_3_0, V2_8_2_0, V3_1_2_0, V3_3_2_0, - V3_6_0_0, + V3_6_2_0, } ) diff --git a/vendor/github.com/VividCortex/gohistogram/.gitignore b/vendor/github.com/VividCortex/gohistogram/.gitignore deleted file mode 100644 index 4c51178c9a2..00000000000 --- a/vendor/github.com/VividCortex/gohistogram/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -\#* -.\#* \ No newline at end of file diff --git a/vendor/github.com/VividCortex/gohistogram/LICENSE b/vendor/github.com/VividCortex/gohistogram/LICENSE deleted file mode 100644 index d23fea36567..00000000000 --- a/vendor/github.com/VividCortex/gohistogram/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2013 VividCortex - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/VividCortex/gohistogram/README.md b/vendor/github.com/VividCortex/gohistogram/README.md deleted file mode 100644 index eeb14d36664..00000000000 --- a/vendor/github.com/VividCortex/gohistogram/README.md +++ /dev/null @@ -1,80 +0,0 @@ -# gohistogram - Histograms in Go - -![build status](https://circleci.com/gh/VividCortex/gohistogram.png?circle-token=d37ec652ea117165cd1b342400a801438f575209) - -This package provides [Streaming Approximate Histograms](https://vividcortex.com/blog/2013/07/08/streaming-approximate-histograms/) -for efficient quantile approximations. - -The histograms in this package are based on the algorithms found in -Ben-Haim & Yom-Tov's *A Streaming Parallel Decision Tree Algorithm* -([PDF](http://jmlr.org/papers/volume11/ben-haim10a/ben-haim10a.pdf)). -Histogram bins do not have a preset size. As values stream into -the histogram, bins are dynamically added and merged. - -Another implementation can be found in the Apache Hive project (see -[NumericHistogram](http://hive.apache.org/docs/r0.11.0/api/org/apache/hadoop/hive/ql/udf/generic/NumericHistogram.html)). - -An example: - -![histogram](http://i.imgur.com/5OplaRs.png) - -The accurate method of calculating quantiles (like percentiles) requires -data to be sorted. Streaming histograms make it possible to approximate -quantiles without sorting (or even individually storing) values. - -NumericHistogram is the more basic implementation of a streaming -histogram. WeightedHistogram implements bin values as exponentially-weighted -moving averages. - -A maximum bin size is passed as an argument to the constructor methods. A -larger bin size yields more accurate approximations at the cost of increased -memory utilization and performance. - -A picture of kittens: - -![stack of kittens](http://i.imgur.com/QxRTWAE.jpg) - -## Getting started - -### Using in your own code - - $ go get github.com/VividCortex/gohistogram - -```go -import "github.com/VividCortex/gohistogram" -``` - -### Running tests and making modifications - -Get the code into your workspace: - - $ cd $GOPATH - $ git clone git@github.com:VividCortex/gohistogram.git ./src/github.com/VividCortex/gohistogram - -You can run the tests now: - - $ cd src/github.com/VividCortex/gohistogram - $ go test . - -## API Documentation - -Full source documentation can be found [here][godoc]. - -[godoc]: http://godoc.org/github.com/VividCortex/gohistogram - -## Contributing - -We only accept pull requests for minor fixes or improvements. This includes: - -* Small bug fixes -* Typos -* Documentation or comments - -Please open issues to discuss new features. Pull requests for new features will be rejected, -so we recommend forking the repository and making changes in your fork for your use case. - -## License - -Copyright (c) 2013 VividCortex - -Released under MIT License. Check `LICENSE` file for details. diff --git a/vendor/github.com/VividCortex/gohistogram/histogram.go b/vendor/github.com/VividCortex/gohistogram/histogram.go deleted file mode 100644 index ede21fd311b..00000000000 --- a/vendor/github.com/VividCortex/gohistogram/histogram.go +++ /dev/null @@ -1,23 +0,0 @@ -package gohistogram - -// Copyright (c) 2013 VividCortex, Inc. All rights reserved. -// Please see the LICENSE file for applicable license terms. - -// Histogram is the interface that wraps the Add and Quantile methods. -type Histogram interface { - // Add adds a new value, n, to the histogram. Trimming is done - // automatically. - Add(n float64) - - // Quantile returns an approximation. - Quantile(n float64) (q float64) - - // String returns a string reprentation of the histogram, - // which is useful for printing to a terminal. - String() (str string) -} - -type bin struct { - value float64 - count float64 -} diff --git a/vendor/github.com/VividCortex/gohistogram/numerichistogram.go b/vendor/github.com/VividCortex/gohistogram/numerichistogram.go deleted file mode 100644 index 20dea740d19..00000000000 --- a/vendor/github.com/VividCortex/gohistogram/numerichistogram.go +++ /dev/null @@ -1,160 +0,0 @@ -package gohistogram - -// Copyright (c) 2013 VividCortex, Inc. All rights reserved. -// Please see the LICENSE file for applicable license terms. - -import ( - "fmt" -) - -type NumericHistogram struct { - bins []bin - maxbins int - total uint64 -} - -// NewHistogram returns a new NumericHistogram with a maximum of n bins. -// -// There is no "optimal" bin count, but somewhere between 20 and 80 bins -// should be sufficient. -func NewHistogram(n int) *NumericHistogram { - return &NumericHistogram{ - bins: make([]bin, 0), - maxbins: n, - total: 0, - } -} - -func (h *NumericHistogram) Add(n float64) { - defer h.trim() - h.total++ - for i := range h.bins { - if h.bins[i].value == n { - h.bins[i].count++ - return - } - - if h.bins[i].value > n { - - newbin := bin{value: n, count: 1} - head := append(make([]bin, 0), h.bins[0:i]...) - - head = append(head, newbin) - tail := h.bins[i:] - h.bins = append(head, tail...) - return - } - } - - h.bins = append(h.bins, bin{count: 1, value: n}) -} - -func (h *NumericHistogram) Quantile(q float64) float64 { - count := q * float64(h.total) - for i := range h.bins { - count -= float64(h.bins[i].count) - - if count <= 0 { - return h.bins[i].value - } - } - - return -1 -} - -// CDF returns the value of the cumulative distribution function -// at x -func (h *NumericHistogram) CDF(x float64) float64 { - count := 0.0 - for i := range h.bins { - if h.bins[i].value <= x { - count += float64(h.bins[i].count) - } - } - - return count / float64(h.total) -} - -// Mean returns the sample mean of the distribution -func (h *NumericHistogram) Mean() float64 { - if h.total == 0 { - return 0 - } - - sum := 0.0 - - for i := range h.bins { - sum += h.bins[i].value * h.bins[i].count - } - - return sum / float64(h.total) -} - -// Variance returns the variance of the distribution -func (h *NumericHistogram) Variance() float64 { - if h.total == 0 { - return 0 - } - - sum := 0.0 - mean := h.Mean() - - for i := range h.bins { - sum += (h.bins[i].count * (h.bins[i].value - mean) * (h.bins[i].value - mean)) - } - - return sum / float64(h.total) -} - -func (h *NumericHistogram) Count() float64 { - return float64(h.total) -} - -// trim merges adjacent bins to decrease the bin count to the maximum value -func (h *NumericHistogram) trim() { - for len(h.bins) > h.maxbins { - // Find closest bins in terms of value - minDelta := 1e99 - minDeltaIndex := 0 - for i := range h.bins { - if i == 0 { - continue - } - - if delta := h.bins[i].value - h.bins[i-1].value; delta < minDelta { - minDelta = delta - minDeltaIndex = i - } - } - - // We need to merge bins minDeltaIndex-1 and minDeltaIndex - totalCount := h.bins[minDeltaIndex-1].count + h.bins[minDeltaIndex].count - mergedbin := bin{ - value: (h.bins[minDeltaIndex-1].value* - h.bins[minDeltaIndex-1].count + - h.bins[minDeltaIndex].value* - h.bins[minDeltaIndex].count) / - totalCount, // weighted average - count: totalCount, // summed heights - } - head := append(make([]bin, 0), h.bins[0:minDeltaIndex-1]...) - tail := append([]bin{mergedbin}, h.bins[minDeltaIndex+1:]...) - h.bins = append(head, tail...) - } -} - -// String returns a string reprentation of the histogram, -// which is useful for printing to a terminal. -func (h *NumericHistogram) String() (str string) { - str += fmt.Sprintln("Total:", h.total) - - for i := range h.bins { - var bar string - for j := 0; j < int(float64(h.bins[i].count)/float64(h.total)*200); j++ { - bar += "." - } - str += fmt.Sprintln(h.bins[i].value, "\t", bar) - } - - return -} diff --git a/vendor/github.com/VividCortex/gohistogram/weightedhistogram.go b/vendor/github.com/VividCortex/gohistogram/weightedhistogram.go deleted file mode 100644 index 16eed37193b..00000000000 --- a/vendor/github.com/VividCortex/gohistogram/weightedhistogram.go +++ /dev/null @@ -1,190 +0,0 @@ -// Package gohistogram contains implementations of weighted and exponential histograms. -package gohistogram - -// Copyright (c) 2013 VividCortex, Inc. All rights reserved. -// Please see the LICENSE file for applicable license terms. - -import "fmt" - -// A WeightedHistogram implements Histogram. A WeightedHistogram has bins that have values -// which are exponentially weighted moving averages. This allows you keep inserting large -// amounts of data into the histogram and approximate quantiles with recency factored in. -type WeightedHistogram struct { - bins []bin - maxbins int - total float64 - alpha float64 -} - -// NewWeightedHistogram returns a new WeightedHistogram with a maximum of n bins with a decay factor -// of alpha. -// -// There is no "optimal" bin count, but somewhere between 20 and 80 bins should be -// sufficient. -// -// Alpha should be set to 2 / (N+1), where N represents the average age of the moving window. -// For example, a 60-second window with an average age of 30 seconds would yield an -// alpha of 0.064516129. -func NewWeightedHistogram(n int, alpha float64) *WeightedHistogram { - return &WeightedHistogram{ - bins: make([]bin, 0), - maxbins: n, - total: 0, - alpha: alpha, - } -} - -func ewma(existingVal float64, newVal float64, alpha float64) (result float64) { - result = newVal*(1-alpha) + existingVal*alpha - return -} - -func (h *WeightedHistogram) scaleDown(except int) { - for i := range h.bins { - if i != except { - h.bins[i].count = ewma(h.bins[i].count, 0, h.alpha) - } - } -} - -func (h *WeightedHistogram) Add(n float64) { - defer h.trim() - for i := range h.bins { - if h.bins[i].value == n { - h.bins[i].count++ - - defer h.scaleDown(i) - return - } - - if h.bins[i].value > n { - - newbin := bin{value: n, count: 1} - head := append(make([]bin, 0), h.bins[0:i]...) - - head = append(head, newbin) - tail := h.bins[i:] - h.bins = append(head, tail...) - - defer h.scaleDown(i) - return - } - } - - h.bins = append(h.bins, bin{count: 1, value: n}) -} - -func (h *WeightedHistogram) Quantile(q float64) float64 { - count := q * h.total - for i := range h.bins { - count -= float64(h.bins[i].count) - - if count <= 0 { - return h.bins[i].value - } - } - - return -1 -} - -// CDF returns the value of the cumulative distribution function -// at x -func (h *WeightedHistogram) CDF(x float64) float64 { - count := 0.0 - for i := range h.bins { - if h.bins[i].value <= x { - count += float64(h.bins[i].count) - } - } - - return count / h.total -} - -// Mean returns the sample mean of the distribution -func (h *WeightedHistogram) Mean() float64 { - if h.total == 0 { - return 0 - } - - sum := 0.0 - - for i := range h.bins { - sum += h.bins[i].value * h.bins[i].count - } - - return sum / h.total -} - -// Variance returns the variance of the distribution -func (h *WeightedHistogram) Variance() float64 { - if h.total == 0 { - return 0 - } - - sum := 0.0 - mean := h.Mean() - - for i := range h.bins { - sum += (h.bins[i].count * (h.bins[i].value - mean) * (h.bins[i].value - mean)) - } - - return sum / h.total -} - -func (h *WeightedHistogram) Count() float64 { - return h.total -} - -func (h *WeightedHistogram) trim() { - total := 0.0 - for i := range h.bins { - total += h.bins[i].count - } - h.total = total - for len(h.bins) > h.maxbins { - - // Find closest bins in terms of value - minDelta := 1e99 - minDeltaIndex := 0 - for i := range h.bins { - if i == 0 { - continue - } - - if delta := h.bins[i].value - h.bins[i-1].value; delta < minDelta { - minDelta = delta - minDeltaIndex = i - } - } - - // We need to merge bins minDeltaIndex-1 and minDeltaIndex - totalCount := h.bins[minDeltaIndex-1].count + h.bins[minDeltaIndex].count - mergedbin := bin{ - value: (h.bins[minDeltaIndex-1].value* - h.bins[minDeltaIndex-1].count + - h.bins[minDeltaIndex].value* - h.bins[minDeltaIndex].count) / - totalCount, // weighted average - count: totalCount, // summed heights - } - head := append(make([]bin, 0), h.bins[0:minDeltaIndex-1]...) - tail := append([]bin{mergedbin}, h.bins[minDeltaIndex+1:]...) - h.bins = append(head, tail...) - } -} - -// String returns a string reprentation of the histogram, -// which is useful for printing to a terminal. -func (h *WeightedHistogram) String() (str string) { - str += fmt.Sprintln("Total:", h.total) - - for i := range h.bins { - var bar string - for j := 0; j < int(float64(h.bins[i].count)/float64(h.total)*200); j++ { - bar += "." - } - str += fmt.Sprintln(h.bins[i].value, "\t", bar) - } - - return -} diff --git a/vendor/github.com/alecthomas/kong/.gitignore b/vendor/github.com/alecthomas/kong/.gitignore index ba077a4031a..e69de29bb2d 100644 --- a/vendor/github.com/alecthomas/kong/.gitignore +++ b/vendor/github.com/alecthomas/kong/.gitignore @@ -1 +0,0 @@ -bin diff --git a/vendor/github.com/alecthomas/kong/.golangci.yml b/vendor/github.com/alecthomas/kong/.golangci.yml index 1ee8fa6f542..844092f993b 100644 --- a/vendor/github.com/alecthomas/kong/.golangci.yml +++ b/vendor/github.com/alecthomas/kong/.golangci.yml @@ -7,7 +7,6 @@ output: linters: enable-all: true disable: - - maligned - lll - gochecknoglobals - wsl @@ -17,11 +16,8 @@ linters: - goprintffuncname - paralleltest - nlreturn - - goerr113 - - ifshort - testpackage - wrapcheck - - exhaustivestruct - forbidigo - gci - godot @@ -29,9 +25,6 @@ linters: - cyclop - errorlint - nestif - - golint - - scopelint - - interfacer - tagliatelle - thelper - godox @@ -41,10 +34,22 @@ linters: - exhaustruct - nonamedreturns - nilnil + - depguard # nothing to guard against yet + - tagalign # hurts readability of kong tags + - mnd + - perfsprint + - err113 + - copyloopvar + - intrange + - execinquery linters-settings: govet: - check-shadowing: true + # These govet checks are disabled by default, but they're useful. + enable: + - niliness + - sortslice + - unusedwrite dupl: threshold: 100 gocyclo: @@ -65,4 +70,15 @@ issues: - 'bad syntax for struct tag key' - 'bad syntax for struct tag pair' - 'result .* \(error\) is always nil' - - 'package io/ioutil is deprecated' + - 'Error return value of `fmt.Fprintln` is not checked' + + exclude-rules: + # Don't warn on unused parameters. + # Parameter names are useful for documentation. + # Replacing them with '_' hides useful information. + - linters: [revive] + text: 'unused-parameter: parameter \S+ seems to be unused, consider removing or renaming it as _' + + # Duplicate words are okay in tests. + - linters: [dupword] + path: _test\.go diff --git a/vendor/github.com/alecthomas/kong/README.md b/vendor/github.com/alecthomas/kong/README.md index 35b07c61862..4a86251ca96 100644 --- a/vendor/github.com/alecthomas/kong/README.md +++ b/vendor/github.com/alecthomas/kong/README.md @@ -2,47 +2,48 @@

# Kong is a command-line parser for Go + [![](https://godoc.org/github.com/alecthomas/kong?status.svg)](http://godoc.org/github.com/alecthomas/kong) [![CircleCI](https://img.shields.io/circleci/project/github/alecthomas/kong.svg)](https://circleci.com/gh/alecthomas/kong) [![Go Report Card](https://goreportcard.com/badge/github.com/alecthomas/kong)](https://goreportcard.com/report/github.com/alecthomas/kong) [![Slack chat](https://img.shields.io/static/v1?logo=slack&style=flat&label=slack&color=green&message=gophers)](https://gophers.slack.com/messages/CN9DS8YF3) - - - - -1. [Introduction](#introduction) -1. [Help](#help) - 1. [Help as a user of a Kong application](#help-as-a-user-of-a-kong-application) - 1. [Defining help in Kong](#defining-help-in-kong) - 1. [Showing the _command_'s detailed help](#showing-the-_command_s-detailed-help) - 1. [Showing an _argument_'s detailed help](#showing-an-_argument_s-detailed-help) -1. [Command handling](#command-handling) - 1. [Switch on the command string](#switch-on-the-command-string) - 1. [Attach a `Run(...) error` method to each command](#attach-a-run-error-method-to-each-command) -1. [Hooks: BeforeReset\(\), BeforeResolve\(\), BeforeApply\(\), AfterApply\(\) and the Bind\(\) option](#hooks-beforereset-beforeresolve-beforeapply-afterapply-and-the-bind-option) -1. [Flags](#flags) -1. [Commands and sub-commands](#commands-and-sub-commands) -1. [Branching positional arguments](#branching-positional-arguments) -1. [Positional arguments](#positional-arguments) -1. [Slices](#slices) -1. [Maps](#maps) -1. [Nested data structure](#nested-data-structure) -1. [Custom named decoders](#custom-named-decoders) -1. [Supported field types](#supported-field-types) -1. [Custom decoders \(mappers\)](#custom-decoders-mappers) -1. [Supported tags](#supported-tags) -1. [Plugins](#plugins) -1. [Dynamic Commands](#dynamic-commands) -1. [Variable interpolation](#variable-interpolation) -1. [Validation](#validation) -1. [Modifying Kong's behaviour](#modifying-kongs-behaviour) - 1. [`Name(help)` and `Description(help)` - set the application name description](#namehelp-and-descriptionhelp---set-the-application-name-description) - 1. [`Configuration(loader, paths...)` - load defaults from configuration files](#configurationloader-paths---load-defaults-from-configuration-files) - 1. [`Resolver(...)` - support for default values from external sources](#resolver---support-for-default-values-from-external-sources) - 1. [`*Mapper(...)` - customising how the command-line is mapped to Go values](#mapper---customising-how-the-command-line-is-mapped-to-go-values) - 1. [`ConfigureHelp(HelpOptions)` and `Help(HelpFunc)` - customising help](#configurehelphelpoptions-and-helphelpfunc---customising-help) - 1. [`Bind(...)` - bind values for callback hooks and Run\(\) methods](#bind---bind-values-for-callback-hooks-and-run-methods) - 1. [Other options](#other-options) - - +- [Version 1.0.0 Release](#version-100-release) +- [Introduction](#introduction) +- [Help](#help) + - [Help as a user of a Kong application](#help-as-a-user-of-a-kong-application) + - [Defining help in Kong](#defining-help-in-kong) +- [Command handling](#command-handling) + - [Switch on the command string](#switch-on-the-command-string) + - [Attach a `Run(...) error` method to each command](#attach-a-run-error-method-to-each-command) +- [Hooks: BeforeReset(), BeforeResolve(), BeforeApply(), AfterApply() and the Bind() option](#hooks-beforereset-beforeresolve-beforeapply-afterapply-and-the-bind-option) +- [Flags](#flags) +- [Commands and sub-commands](#commands-and-sub-commands) +- [Branching positional arguments](#branching-positional-arguments) +- [Positional arguments](#positional-arguments) +- [Slices](#slices) +- [Maps](#maps) +- [Pointers](#pointers) +- [Nested data structure](#nested-data-structure) +- [Custom named decoders](#custom-named-decoders) +- [Supported field types](#supported-field-types) +- [Custom decoders (mappers)](#custom-decoders-mappers) +- [Supported tags](#supported-tags) +- [Plugins](#plugins) +- [Dynamic Commands](#dynamic-commands) +- [Variable interpolation](#variable-interpolation) +- [Validation](#validation) +- [Modifying Kong's behaviour](#modifying-kongs-behaviour) + - [`Name(help)` and `Description(help)` - set the application name description](#namehelp-and-descriptionhelp---set-the-application-name-description) + - [`Configuration(loader, paths...)` - load defaults from configuration files](#configurationloader-paths---load-defaults-from-configuration-files) + - [`Resolver(...)` - support for default values from external sources](#resolver---support-for-default-values-from-external-sources) + - [`*Mapper(...)` - customising how the command-line is mapped to Go values](#mapper---customising-how-the-command-line-is-mapped-to-go-values) + - [`ConfigureHelp(HelpOptions)` and `Help(HelpFunc)` - customising help](#configurehelphelpoptions-and-helphelpfunc---customising-help) + - [Injecting values into `Run()` methods](#injecting-values-into-run-methods) + - [Other options](#other-options) + +## Version 1.0.0 Release + +Kong has been stable for a long time, so it seemed appropriate to cut a 1.0 release. + +There is one breaking change, [#436](https://github.com/alecthomas/kong/pull/436), which should effect relatively few users. ## Introduction @@ -137,13 +138,14 @@ also be interpolated into the help string. Finally, any command, or argument type implementing the interface `Help() string` will have this function called to retrieve more detail to augment the help tag. This allows for much more descriptive text than can -fit in Go tags. [See _examples/shell/help](./_examples/shell/help) +fit in Go tags. [See \_examples/shell/help](./_examples/shell/help) #### Showing the _command_'s detailed help A command's additional help text is _not_ shown from top-level help, but can be displayed within contextual help: **Top level help** + ```bash $ go run ./_examples/shell/help --help Usage: help @@ -159,6 +161,7 @@ Commands: ``` **Contextual** + ```bash $ go run ./_examples/shell/help echo --help Usage: help echo @@ -180,6 +183,7 @@ Flags: Custom help will only be shown for _positional arguments with named fields_ ([see the README section on positional arguments for more details on what that means](../../../README.md#branching-positional-arguments)) **Contextual argument help** + ```bash $ go run ./_examples/shell/help msg --help Usage: help echo @@ -303,8 +307,8 @@ func main() { ## Hooks: BeforeReset(), BeforeResolve(), BeforeApply(), AfterApply() and the Bind() option -If a node in the grammar has a `BeforeReset(...)`, `BeforeResolve -(...)`, `BeforeApply(...) error` and/or `AfterApply(...) error` method, those +If a node in the CLI, or any of its embedded fields, has a `BeforeReset(...) error`, `BeforeResolve +(...) error`, `BeforeApply(...) error` and/or `AfterApply(...) error` method, those methods will be called before values are reset, before validation/assignment, and after validation/assignment, respectively. @@ -329,7 +333,7 @@ var cli struct { func main() { // Debug logger going to discard. - logger := log.New(ioutil.Discard, "", log.LstdFlags) + logger := log.New(io.Discard, "", log.LstdFlags) ctx := kong.Parse(&cli, kong.Bind(logger)) @@ -339,7 +343,7 @@ func main() { ## Flags -Any [mapped](#mapper---customising-how-the-command-line-is-mapped-to-go-values) field in the command structure *not* tagged with `cmd` or `arg` will be a flag. Flags are optional by default. +Any [mapped](#mapper---customising-how-the-command-line-is-mapped-to-go-values) field in the command structure _not_ tagged with `cmd` or `arg` will be a flag. Flags are optional by default. eg. The command-line `app [--flag="foo"]` can be represented by the following. @@ -479,25 +483,22 @@ Kong includes a number of builtin custom type mappers. These can be used by specifying the tag `type:""`. They are registered with the option function `NamedMapper(name, mapper)`. -| Name | Description | -| -------------- | -------------------------------------------------------------------------------------------------- | -| `path` | A path. ~ expansion is applied. `-` is accepted for stdout, and will be passed unaltered. | -| `existingfile` | An existing file. ~ expansion is applied. `-` is accepted for stdin, and will be passed unaltered. | -| `existingdir` | An existing directory. ~ expansion is applied. | -| `counter` | Increment a numeric field. Useful for `-vvv`. Can accept `-s`, `--long` or `--long=N`. | +| Name | Description | +| -------------- | ---------------------------------------------------------------------------------------------------------------------- | +| `path` | A path. ~ expansion is applied. `-` is accepted for stdout, and will be passed unaltered. | +| `existingfile` | An existing file. ~ expansion is applied. `-` is accepted for stdin, and will be passed unaltered. | +| `existingdir` | An existing directory. ~ expansion is applied. | +| `counter` | Increment a numeric field. Useful for `-vvv`. Can accept `-s`, `--long` or `--long=N`. | | `filecontent` | Read the file at path into the field. ~ expansion is applied. `-` is accepted for stdin, and will be passed unaltered. | - Slices and maps treat type tags specially. For slices, the `type:""` tag specifies the element type. For maps, the tag has the format `tag:"[]:[]"` where either may be omitted. ## Supported field types - ## Custom decoders (mappers) - Any field implementing `encoding.TextUnmarshaler` or `json.Unmarshaler` will use those interfaces for decoding values. Kong also includes builtin support for many common Go types: @@ -522,36 +523,43 @@ Tags can be in two forms: Both can coexist with standard Tag parsing. | Tag | Description | -|----------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | | `cmd:""` | If present, struct is a command. | | `arg:""` | If present, field is an argument. Required by default. | | `env:"X,Y,..."` | Specify envars to use for default value. The envs are resolved in the declared order. The first value found is used. | | `name:"X"` | Long name, for overriding field name. | | `help:"X"` | Help text. | | `type:"X"` | Specify [named types](#custom-named-decoders) to use. | -| `placeholder:"X"` | Placeholder text. | +| `placeholder:"X"` | Placeholder input, if flag. e.g. `` `placeholder:""` `` will show `--flag-name=` when displaying help. | | `default:"X"` | Default value. | | `default:"1"` | On a command, make it the default. | | `default:"withargs"` | On a command, make it the default and allow args/flags from that command | | `short:"X"` | Short name, if flag. | -| `aliases:"X,Y"` | One or more aliases (for cmd). | +| `aliases:"X,Y"` | One or more aliases (for cmd or flag). | | `required:""` | If present, flag/arg is required. | | `optional:""` | If present, flag/arg is optional. | | `hidden:""` | If present, command or flag is hidden. | | `negatable:""` | If present on a `bool` field, supports prefixing a flag with `--no-` to invert the default value | +| `negatable:"X"` | If present on a `bool` field, supports `--X` to invert the default value | | `format:"X"` | Format for parsing input, if supported. | | `sep:"X"` | Separator for sequences (defaults to ","). May be `none` to disable splitting. | | `mapsep:"X"` | Separator for maps (defaults to ";"). May be `none` to disable splitting. | | `enum:"X,Y,..."` | Set of valid values allowed for this flag. An enum field must be `required` or have a valid `default`. | | `group:"X"` | Logical group for a flag or command. | | `xor:"X,Y,..."` | Exclusive OR groups for flags. Only one flag in the group can be used which is restricted within the same command. When combined with `required`, at least one of the `xor` group will be required. | +| `and:"X,Y,..."` | AND groups for flags. All flags in the group must be used in the same command. When combined with `required`, all flags in the group will be required. | | `prefix:"X"` | Prefix for all sub-flags. | | `envprefix:"X"` | Envar prefix for all sub-flags. | +| `xorprefix:"X"` | Prefix for all sub-flags in XOR/AND groups. | | `set:"K=V"` | Set a variable for expansion by child elements. Multiples can occur. | | `embed:""` | If present, this field's children will be embedded in the parent. Useful for composition. | -| `passthrough:""` | If present on a positional argument, it stops flag parsing when encountered, as if `--` was processed before. Useful for external command wrappers, like `exec`. On a command it requires that the command contains only one argument of type `[]string` which is then filled with everything following the command, unparsed. | +| `passthrough:""`[^1] | If present on a positional argument, it stops flag parsing when encountered, as if `--` was processed before. Useful for external command wrappers, like `exec`. On a command it requires that the command contains only one argument of type `[]string` which is then filled with everything following the command, unparsed. | | `-` | Ignore the field. Useful for adding non-CLI fields to a configuration struct. e.g `` `kong:"-"` `` | +[^1]: `` can be `partial` or `all` (the default). `all` will pass through all arguments including flags, including +flags. `partial` will validate flags until the first positional argument is encountered, then pass through all remaining +positional arguments. + ## Plugins Kong CLI's can be extended by embedding the `kong.Plugin` type and populating it with pointers to Kong annotated structs. For example: @@ -622,21 +630,26 @@ func main() { ## Validation Kong does validation on the structure of a command-line, but also supports -extensible validation. Any node in the tree may implement the following -interface: +extensible validation. Any node in the tree may implement either of the following interfaces: ```go type Validatable interface { Validate() error } - ``` +``` + +```go +type Validatable interface { + Validate(kctx *kong.Context) error + } +``` If one of these nodes is in the active command-line it will be called during normal validation. ## Modifying Kong's behaviour -Each Kong parser can be configured via functional options passed to `New(cli interface{}, options...Option)`. +Each Kong parser can be configured via functional options passed to `New(cli any, options...Option)`. The full set of options can be found [here](https://godoc.org/github.com/alecthomas/kong#Option). @@ -660,6 +673,13 @@ kong.Parse(&cli, kong.Configuration(kong.JSON, "/etc/myapp.json", "~/.myapp.json [See the tests](https://github.com/alecthomas/kong/blob/master/resolver_test.go#L206) for an example of how the JSON file is structured. +#### List of Configuration Loaders + +- [YAML](https://github.com/alecthomas/kong-yaml) +- [HCL](https://github.com/alecthomas/kong-hcl) +- [TOML](https://github.com/alecthomas/kong-toml) +- [JSON](https://github.com/alecthomas/kong) + ### `Resolver(...)` - support for default values from external sources Resolvers are Kong's extension point for providing default values from external sources. As an example, support for environment variables via the `env` tag is provided by a resolver. There's also a builtin resolver for JSON configuration files. @@ -687,21 +707,26 @@ All builtin Go types (as well as a bunch of useful stdlib types like `time.Time` 1. `NamedMapper(string, Mapper)` and using the tag key `type:""`. 2. `KindMapper(reflect.Kind, Mapper)`. 3. `TypeMapper(reflect.Type, Mapper)`. -4. `ValueMapper(interface{}, Mapper)`, passing in a pointer to a field of the grammar. +4. `ValueMapper(any, Mapper)`, passing in a pointer to a field of the grammar. ### `ConfigureHelp(HelpOptions)` and `Help(HelpFunc)` - customising help The default help output is usually sufficient, but if not there are two solutions. 1. Use `ConfigureHelp(HelpOptions)` to configure how help is formatted (see [HelpOptions](https://godoc.org/github.com/alecthomas/kong#HelpOptions) for details). -2. Custom help can be wired into Kong via the `Help(HelpFunc)` option. The `HelpFunc` is passed a `Context`, which contains the parsed context for the current command-line. See the implementation of `PrintHelp` for an example. +2. Custom help can be wired into Kong via the `Help(HelpFunc)` option. The `HelpFunc` is passed a `Context`, which contains the parsed context for the current command-line. See the implementation of `DefaultHelpPrinter` for an example. 3. Use `ValueFormatter(HelpValueFormatter)` if you want to just customize the help text that is accompanied by flags and arguments. 4. Use `Groups([]Group)` if you want to customize group titles or add a header. -### `Bind(...)` - bind values for callback hooks and Run() methods +### Injecting values into `Run()` methods -See the [section on hooks](#hooks-beforeresolve-beforeapply-afterapply-and-the-bind-option) for details. +There are several ways to inject values into `Run()` methods: + +1. Use `Bind()` to bind values directly. +2. Use `BindTo()` to bind values to an interface type. +3. Use `BindToProvider()` to bind values to a function that provides the value. +4. Implement `Provide() error` methods on the command structure. ### Other options -The full set of options can be found [here](https://godoc.org/github.com/alecthomas/kong#Option). \ No newline at end of file +The full set of options can be found [here](https://godoc.org/github.com/alecthomas/kong#Option). diff --git a/vendor/github.com/alecthomas/kong/build.go b/vendor/github.com/alecthomas/kong/build.go index e23c11518ec..5d17f53fc86 100644 --- a/vendor/github.com/alecthomas/kong/build.go +++ b/vendor/github.com/alecthomas/kong/build.go @@ -9,9 +9,9 @@ import ( // Plugins are dynamically embedded command-line structures. // // Each element in the Plugins list *must* be a pointer to a structure. -type Plugins []interface{} +type Plugins []any -func build(k *Kong, ast interface{}) (app *Application, err error) { +func build(k *Kong, ast any) (app *Application, err error) { v := reflect.ValueOf(ast) iv := reflect.Indirect(v) if v.Kind() != reflect.Ptr || iv.Kind() != reflect.Struct { @@ -51,6 +51,9 @@ type flattenedField struct { func flattenedFields(v reflect.Value, ptag *Tag) (out []flattenedField, err error) { v = reflect.Indirect(v) + if v.Kind() != reflect.Struct { + return out, nil + } for i := 0; i < v.NumField(); i++ { ft := v.Type().Field(i) fv := v.Field(i) @@ -68,6 +71,7 @@ func flattenedFields(v reflect.Value, ptag *Tag) (out []flattenedField, err erro // Accumulate prefixes. tag.Prefix = ptag.Prefix + tag.Prefix tag.EnvPrefix = ptag.EnvPrefix + tag.EnvPrefix + tag.XorPrefix = ptag.XorPrefix + tag.XorPrefix // Combine parent vars. tag.Vars = ptag.Vars.CloneWith(tag.Vars) // Command and embedded structs can be pointers, so we hydrate them now. @@ -108,7 +112,7 @@ func flattenedFields(v reflect.Value, ptag *Tag) (out []flattenedField, err erro // Build a Node in the Kong data model. // // "v" is the value to create the node from, "typ" is the output Node type. -func buildNode(k *Kong, v reflect.Value, typ NodeType, tag *Tag, seenFlags map[string]bool) (*Node, error) { +func buildNode(k *Kong, v reflect.Value, typ NodeType, tag *Tag, seenFlags map[string]bool) (*Node, error) { //nolint:gocyclo node := &Node{ Type: typ, Target: v, @@ -144,6 +148,18 @@ MAIN: } } + if len(tag.Xor) != 0 { + for i := range tag.Xor { + tag.Xor[i] = tag.XorPrefix + tag.Xor[i] + } + } + + if len(tag.And) != 0 { + for i := range tag.And { + tag.And[i] = tag.XorPrefix + tag.And[i] + } + } + // Nested structs are either commands or args, unless they implement the Mapper interface. if field.value.Kind() == reflect.Struct && (tag.Cmd || tag.Arg) && k.registry.ForValue(fv) == nil { typ := CommandNode @@ -170,6 +186,12 @@ MAIN: if flag.Short != 0 { delete(seenFlags, "-"+string(flag.Short)) } + if negFlag := negatableFlagName(flag.Name, flag.Tag.Negatable); negFlag != "" { + delete(seenFlags, negFlag) + } + for _, aflag := range flag.Aliases { + delete(seenFlags, "--"+aflag) + } } if err := validatePositionalArguments(node); err != nil { @@ -272,17 +294,18 @@ func buildField(k *Kong, node *Node, v reflect.Value, ft reflect.StructField, fv } value := &Value{ - Name: name, - Help: tag.Help, - OrigHelp: tag.Help, - HasDefault: tag.HasDefault, - Default: tag.Default, - DefaultValue: reflect.New(fv.Type()).Elem(), - Mapper: mapper, - Tag: tag, - Target: fv, - Enum: tag.Enum, - Passthrough: tag.Passthrough, + Name: name, + Help: tag.Help, + OrigHelp: tag.Help, + HasDefault: tag.HasDefault, + Default: tag.Default, + DefaultValue: reflect.New(fv.Type()).Elem(), + Mapper: mapper, + Tag: tag, + Target: fv, + Enum: tag.Enum, + Passthrough: tag.Passthrough, + PassthroughMode: tag.PassthroughMode, // Flags are optional by default, and args are required by default. Required: (!tag.Arg && tag.Required) || (tag.Arg && !tag.Optional), @@ -296,19 +319,35 @@ func buildField(k *Kong, node *Node, v reflect.Value, ft reflect.StructField, fv return failField(v, ft, "duplicate flag --%s", value.Name) } seenFlags["--"+value.Name] = true + for _, alias := range tag.Aliases { + aliasFlag := "--" + alias + if seenFlags[aliasFlag] { + return failField(v, ft, "duplicate flag %s", aliasFlag) + } + seenFlags[aliasFlag] = true + } if tag.Short != 0 { if seenFlags["-"+string(tag.Short)] { return failField(v, ft, "duplicate short flag -%c", tag.Short) } seenFlags["-"+string(tag.Short)] = true } + if tag.Negatable != "" { + negFlag := negatableFlagName(value.Name, tag.Negatable) + if seenFlags[negFlag] { + return failField(v, ft, "duplicate negation flag %s", negFlag) + } + seenFlags[negFlag] = true + } flag := &Flag{ Value: value, + Aliases: tag.Aliases, Short: tag.Short, PlaceHolder: tag.PlaceHolder, Envs: tag.Envs, Group: buildGroupForKey(k, tag.Group), Xor: tag.Xor, + And: tag.And, Hidden: tag.Hidden, } value.Flag = flag diff --git a/vendor/github.com/alecthomas/kong/callbacks.go b/vendor/github.com/alecthomas/kong/callbacks.go index 3a8a45f2f65..c1fac817814 100644 --- a/vendor/github.com/alecthomas/kong/callbacks.go +++ b/vendor/github.com/alecthomas/kong/callbacks.go @@ -6,7 +6,10 @@ import ( "strings" ) -type bindings map[reflect.Type]func() (reflect.Value, error) +// A map of type to function that returns a value of that type. +// +// The function should have the signature func(...) (T, error). Arguments are recursively resolved. +type bindings map[reflect.Type]any func (b bindings) String() string { out := []string{} @@ -16,35 +19,26 @@ func (b bindings) String() string { return "bindings{" + strings.Join(out, ", ") + "}" } -func (b bindings) add(values ...interface{}) bindings { +func (b bindings) add(values ...any) bindings { for _, v := range values { v := v - b[reflect.TypeOf(v)] = func() (reflect.Value, error) { return reflect.ValueOf(v), nil } + b[reflect.TypeOf(v)] = func() (any, error) { return v, nil } } return b } -func (b bindings) addTo(impl, iface interface{}) { - valueOf := reflect.ValueOf(impl) - b[reflect.TypeOf(iface).Elem()] = func() (reflect.Value, error) { return valueOf, nil } +func (b bindings) addTo(impl, iface any) { + b[reflect.TypeOf(iface).Elem()] = func() (any, error) { return impl, nil } } -func (b bindings) addProvider(provider interface{}) error { +func (b bindings) addProvider(provider any) error { pv := reflect.ValueOf(provider) t := pv.Type() - if t.Kind() != reflect.Func || t.NumIn() != 0 || t.NumOut() != 2 || t.Out(1) != reflect.TypeOf((*error)(nil)).Elem() { - return fmt.Errorf("%T must be a function with the signature func()(T, error)", provider) + if t.Kind() != reflect.Func || t.NumOut() != 2 || t.Out(1) != reflect.TypeOf((*error)(nil)).Elem() { + return fmt.Errorf("%T must be a function with the signature func(...)(T, error)", provider) } rt := pv.Type().Out(0) - b[rt] = func() (reflect.Value, error) { - out := pv.Call(nil) - errv := out[1] - var err error - if !errv.IsNil() { - err = errv.Interface().(error) // nolint - } - return out[0], err - } + b[rt] = provider return nil } @@ -74,32 +68,50 @@ func getMethod(value reflect.Value, name string) reflect.Value { return method } +// Get methods from the given value and any embedded fields. +func getMethods(value reflect.Value, name string) []reflect.Value { + // Collect all possible receivers + receivers := []reflect.Value{value} + if value.Kind() == reflect.Ptr { + value = value.Elem() + } + if value.Kind() == reflect.Struct { + t := value.Type() + for i := 0; i < value.NumField(); i++ { + field := value.Field(i) + fieldType := t.Field(i) + if fieldType.IsExported() && fieldType.Anonymous { + receivers = append(receivers, field) + } + } + } + // Search all receivers for methods + var methods []reflect.Value + for _, receiver := range receivers { + if method := getMethod(receiver, name); method.IsValid() { + methods = append(methods, method) + } + } + return methods +} + func callFunction(f reflect.Value, bindings bindings) error { if f.Kind() != reflect.Func { return fmt.Errorf("expected function, got %s", f.Type()) } - in := []reflect.Value{} t := f.Type() if t.NumOut() != 1 || !t.Out(0).Implements(callbackReturnSignature) { return fmt.Errorf("return value of %s must implement \"error\"", t) } - for i := 0; i < t.NumIn(); i++ { - pt := t.In(i) - if argf, ok := bindings[pt]; ok { - argv, err := argf() - if err != nil { - return err - } - in = append(in, argv) - } else { - return fmt.Errorf("couldn't find binding of type %s for parameter %d of %s(), use kong.Bind(%s)", pt, i, t, pt) - } + out, err := callAnyFunction(f, bindings) + if err != nil { + return err } - out := f.Call(in) - if out[0].IsNil() { + ferr := out[0] + if ferrv := reflect.ValueOf(ferr); !ferrv.IsValid() || ((ferrv.Kind() == reflect.Interface || ferrv.Kind() == reflect.Pointer) && ferrv.IsNil()) { return nil } - return out[0].Interface().(error) // nolint + return ferr.(error) //nolint:forcetypeassert } func callAnyFunction(f reflect.Value, bindings bindings) (out []any, err error) { @@ -110,15 +122,19 @@ func callAnyFunction(f reflect.Value, bindings bindings) (out []any, err error) t := f.Type() for i := 0; i < t.NumIn(); i++ { pt := t.In(i) - if argf, ok := bindings[pt]; ok { - argv, err := argf() - if err != nil { - return nil, err - } - in = append(in, argv) - } else { + argf, ok := bindings[pt] + if !ok { return nil, fmt.Errorf("couldn't find binding of type %s for parameter %d of %s(), use kong.Bind(%s)", pt, i, t, pt) } + // Recursively resolve binding functions. + argv, err := callAnyFunction(reflect.ValueOf(argf), bindings) + if err != nil { + return nil, fmt.Errorf("%s: %w", pt, err) + } + if ferrv := reflect.ValueOf(argv[len(argv)-1]); ferrv.IsValid() && !ferrv.IsNil() { + return nil, ferrv.Interface().(error) //nolint:forcetypeassert + } + in = append(in, reflect.ValueOf(argv[0])) } outv := f.Call(in) out = make([]any, len(outv)) @@ -127,11 +143,3 @@ func callAnyFunction(f reflect.Value, bindings bindings) (out []any, err error) } return out, nil } - -func callMethod(name string, v, f reflect.Value, bindings bindings) error { - err := callFunction(f, bindings) - if err != nil { - return fmt.Errorf("%s.%s(): %w", v.Type(), name, err) - } - return nil -} diff --git a/vendor/github.com/alecthomas/kong/camelcase.go b/vendor/github.com/alecthomas/kong/camelcase.go index acf29f75770..a955b16ba21 100644 --- a/vendor/github.com/alecthomas/kong/camelcase.go +++ b/vendor/github.com/alecthomas/kong/camelcase.go @@ -13,37 +13,37 @@ import ( // // Examples // -// "" => [""] -// "lowercase" => ["lowercase"] -// "Class" => ["Class"] -// "MyClass" => ["My", "Class"] -// "MyC" => ["My", "C"] -// "HTML" => ["HTML"] -// "PDFLoader" => ["PDF", "Loader"] -// "AString" => ["A", "String"] -// "SimpleXMLParser" => ["Simple", "XML", "Parser"] -// "vimRPCPlugin" => ["vim", "RPC", "Plugin"] -// "GL11Version" => ["GL", "11", "Version"] -// "99Bottles" => ["99", "Bottles"] -// "May5" => ["May", "5"] -// "BFG9000" => ["BFG", "9000"] -// "BöseÜberraschung" => ["Böse", "Überraschung"] -// "Two spaces" => ["Two", " ", "spaces"] -// "BadUTF8\xe2\xe2\xa1" => ["BadUTF8\xe2\xe2\xa1"] +// "" => [""] +// "lowercase" => ["lowercase"] +// "Class" => ["Class"] +// "MyClass" => ["My", "Class"] +// "MyC" => ["My", "C"] +// "HTML" => ["HTML"] +// "PDFLoader" => ["PDF", "Loader"] +// "AString" => ["A", "String"] +// "SimpleXMLParser" => ["Simple", "XML", "Parser"] +// "vimRPCPlugin" => ["vim", "RPC", "Plugin"] +// "GL11Version" => ["GL", "11", "Version"] +// "99Bottles" => ["99", "Bottles"] +// "May5" => ["May", "5"] +// "BFG9000" => ["BFG", "9000"] +// "BöseÜberraschung" => ["Böse", "Überraschung"] +// "Two spaces" => ["Two", " ", "spaces"] +// "BadUTF8\xe2\xe2\xa1" => ["BadUTF8\xe2\xe2\xa1"] // // Splitting rules // -// 1) If string is not valid UTF-8, return it without splitting as +// 1. If string is not valid UTF-8, return it without splitting as // single item array. -// 2) Assign all unicode characters into one of 4 sets: lower case +// 2. Assign all unicode characters into one of 4 sets: lower case // letters, upper case letters, numbers, and all other characters. -// 3) Iterate through characters of string, introducing splits +// 3. Iterate through characters of string, introducing splits // between adjacent characters that belong to different sets. -// 4) Iterate through array of split strings, and if a given string +// 4. Iterate through array of split strings, and if a given string // is upper case: -// if subsequent string is lower case: -// move last character of upper case string to beginning of -// lower case string +// if subsequent string is lower case: +// move last character of upper case string to beginning of +// lower case string func camelCase(src string) (entries []string) { // don't split invalid utf8 if !utf8.ValidString(src) { diff --git a/vendor/github.com/alecthomas/kong/context.go b/vendor/github.com/alecthomas/kong/context.go index 0ec10f47d17..b6a56e33cc5 100644 --- a/vendor/github.com/alecthomas/kong/context.go +++ b/vendor/github.com/alecthomas/kong/context.go @@ -102,7 +102,7 @@ func Trace(k *Kong, args []string) (*Context, error) { } // Bind adds bindings to the Context. -func (c *Context) Bind(args ...interface{}) { +func (c *Context) Bind(args ...any) { c.bindings.add(args...) } @@ -111,7 +111,7 @@ func (c *Context) Bind(args ...interface{}) { // This will typically have to be called like so: // // BindTo(impl, (*MyInterface)(nil)) -func (c *Context) BindTo(impl, iface interface{}) { +func (c *Context) BindTo(impl, iface any) { c.bindings.addTo(impl, iface) } @@ -119,7 +119,7 @@ func (c *Context) BindTo(impl, iface interface{}) { // // This is useful when the Run() function of different commands require different values that may // not all be initialisable from the main() function. -func (c *Context) BindToProvider(provider interface{}) error { +func (c *Context) BindToProvider(provider any) error { return c.bindings.addProvider(provider) } @@ -161,7 +161,7 @@ func (c *Context) Empty() bool { } // Validate the current context. -func (c *Context) Validate() error { // nolint: gocyclo +func (c *Context) Validate() error { //nolint: gocyclo err := Visit(c.Model, func(node Visitable, next Next) error { switch node := node.(type) { case *Value: @@ -201,15 +201,18 @@ func (c *Context) Validate() error { // nolint: gocyclo case *Application: value = node.Target - desc = node.Name + desc = "" case *Node: value = node.Target desc = node.Path() } if validate := isValidatable(value); validate != nil { - if err := validate.Validate(); err != nil { - return fmt.Errorf("%s: %w", desc, err) + if err := validate.Validate(c); err != nil { + if desc != "" { + return fmt.Errorf("%s: %w", desc, err) + } + return err } } } @@ -256,7 +259,7 @@ func (c *Context) Validate() error { // nolint: gocyclo if err := checkMissingPositionals(positionals, node.Positional); err != nil { return err } - if err := checkXorDuplicates(c.Path); err != nil { + if err := checkXorDuplicatedAndAndMissing(c.Path); err != nil { return err } @@ -303,7 +306,7 @@ func (c *Context) AddResolver(resolver Resolver) { } // FlagValue returns the set value of a flag if it was encountered and exists, or its default value. -func (c *Context) FlagValue(flag *Flag) interface{} { +func (c *Context) FlagValue(flag *Flag) any { for _, trace := range c.Path { if trace.Flag == flag { v, ok := c.values[trace.Flag.Value] @@ -344,7 +347,8 @@ func (c *Context) endParsing() { } } -func (c *Context) trace(node *Node) (err error) { // nolint: gocyclo +//nolint:maintidx +func (c *Context) trace(node *Node) (err error) { //nolint: gocyclo positional := 0 node.Active = true @@ -374,15 +378,19 @@ func (c *Context) trace(node *Node) (err error) { // nolint: gocyclo switch { case v == "-": fallthrough - default: // nolint + default: //nolint c.scan.Pop() c.scan.PushTyped(token.Value, PositionalArgumentToken) // Indicates end of parsing. All remaining arguments are treated as positional arguments only. case v == "--": - c.scan.Pop() c.endParsing() + // Pop the -- token unless the next positional argument accepts passthrough arguments. + if !(positional < len(node.Positional) && node.Positional[positional].Passthrough) { + c.scan.Pop() + } + // Long flag. case strings.HasPrefix(v, "--"): c.scan.Pop() @@ -417,12 +425,22 @@ func (c *Context) trace(node *Node) (err error) { // nolint: gocyclo case FlagToken: if err := c.parseFlag(flags, token.String()); err != nil { - return err + if isUnknownFlagError(err) && positional < len(node.Positional) && node.Positional[positional].PassthroughMode == PassThroughModeAll { + c.scan.Pop() + c.scan.PushTyped(token.String(), PositionalArgumentToken) + } else { + return err + } } case ShortFlagToken: if err := c.parseFlag(flags, token.String()); err != nil { - return err + if isUnknownFlagError(err) && positional < len(node.Positional) && node.Positional[positional].PassthroughMode == PassThroughModeAll { + c.scan.Pop() + c.scan.PushTyped(token.String(), PositionalArgumentToken) + } else { + return err + } } case FlagValueToken: @@ -554,7 +572,7 @@ func (c *Context) Resolve() error { } // Pick the last resolved value. - var selected interface{} + var selected any for _, resolver := range resolvers { s, err := resolver.Resolve(c, path, flag) if err != nil { @@ -681,20 +699,29 @@ func flipBoolValue(value reflect.Value) error { func (c *Context) parseFlag(flags []*Flag, match string) (err error) { candidates := []string{} + for _, flag := range flags { long := "--" + flag.Name - short := "-" + string(flag.Short) - neg := "--no-" + flag.Name + matched := long == match candidates = append(candidates, long) if flag.Short != 0 { + short := "-" + string(flag.Short) + matched = matched || (short == match) candidates = append(candidates, short) } - if short != match && long != match && !(match == neg && flag.Tag.Negatable) { + for _, alias := range flag.Aliases { + alias = "--" + alias + matched = matched || (alias == match) + candidates = append(candidates, alias) + } + + neg := negatableFlagName(flag.Name, flag.Tag.Negatable) + if !matched && match != neg { continue } // Found a matching flag. c.scan.Pop() - if match == neg && flag.Tag.Negatable { + if match == neg && flag.Tag.Negatable != "" { flag.Negated = true } err := flag.Parse(c.scan, c.getValue(flag.Value)) @@ -716,13 +743,23 @@ func (c *Context) parseFlag(flags []*Flag, match string) (err error) { c.Path = append(c.Path, &Path{Flag: flag}) return nil } - return findPotentialCandidates(match, candidates, "unknown flag %s", match) + return &unknownFlagError{Cause: findPotentialCandidates(match, candidates, "unknown flag %s", match)} } +func isUnknownFlagError(err error) bool { + var unknown *unknownFlagError + return errors.As(err, &unknown) +} + +type unknownFlagError struct{ Cause error } + +func (e *unknownFlagError) Unwrap() error { return e.Cause } +func (e *unknownFlagError) Error() string { return e.Cause.Error() } + // Call an arbitrary function filling arguments with bound values. -func (c *Context) Call(fn any, binds ...interface{}) (out []interface{}, err error) { +func (c *Context) Call(fn any, binds ...any) (out []any, err error) { fv := reflect.ValueOf(fn) - bindings := c.Kong.bindings.clone().add(binds...).add(c).merge(c.bindings) //nolint:govet + bindings := c.Kong.bindings.clone().add(binds...).add(c).merge(c.bindings) return callAnyFunction(fv, bindings) } @@ -732,7 +769,7 @@ func (c *Context) Call(fn any, binds ...interface{}) (out []interface{}, err err // // Any passed values will be bindable to arguments of the target Run() method. Additionally, // all parent nodes in the command structure will be bound. -func (c *Context) RunNode(node *Node, binds ...interface{}) (err error) { +func (c *Context) RunNode(node *Node, binds ...any) (err error) { type targetMethod struct { node *Node method reflect.Value @@ -745,6 +782,19 @@ func (c *Context) RunNode(node *Node, binds ...interface{}) (err error) { methodBinds = methodBinds.clone() for p := node; p != nil; p = p.Parent { methodBinds = methodBinds.add(p.Target.Addr().Interface()) + // Try value and pointer to value. + for _, p := range []reflect.Value{p.Target, p.Target.Addr()} { + t := p.Type() + for i := 0; i < p.NumMethod(); i++ { + methodt := t.Method(i) + if strings.HasPrefix(methodt.Name, "Provide") { + method := p.Method(i) + if err := methodBinds.addProvider(method.Interface()); err != nil { + return fmt.Errorf("%s.%s: %w", t.Name(), methodt.Name, err) + } + } + } + } } if method.IsValid() { methods = append(methods, targetMethod{node, method, methodBinds}) @@ -753,13 +803,8 @@ func (c *Context) RunNode(node *Node, binds ...interface{}) (err error) { if len(methods) == 0 { return fmt.Errorf("no Run() method found in hierarchy of %s", c.Selected().Summary()) } - _, err = c.Apply() - if err != nil { - return err - } - for _, method := range methods { - if err = callMethod("Run", method.node.Target, method.method, method.binds); err != nil { + if err = callFunction(method.method, method.binds); err != nil { return err } } @@ -770,21 +815,27 @@ func (c *Context) RunNode(node *Node, binds ...interface{}) (err error) { // // Any passed values will be bindable to arguments of the target Run() method. Additionally, // all parent nodes in the command structure will be bound. -func (c *Context) Run(binds ...interface{}) (err error) { +func (c *Context) Run(binds ...any) (err error) { node := c.Selected() if node == nil { - if len(c.Path) > 0 { - selected := c.Path[0].Node() - if selected.Type == ApplicationNode { - method := getMethod(selected.Target, "Run") - if method.IsValid() { - return c.RunNode(selected, binds...) - } + if len(c.Path) == 0 { + return fmt.Errorf("no command selected") + } + selected := c.Path[0].Node() + if selected.Type == ApplicationNode { + method := getMethod(selected.Target, "Run") + if method.IsValid() { + node = selected } } - return fmt.Errorf("no command selected") + + if node == nil { + return fmt.Errorf("no command selected") + } } - return c.RunNode(node, binds...) + runErr := c.RunNode(node, binds...) + err = c.Kong.applyHook(c, "AfterRun") + return errors.Join(runErr, err) } // PrintUsage to Kong's stdout. @@ -799,23 +850,35 @@ func (c *Context) PrintUsage(summary bool) error { func checkMissingFlags(flags []*Flag) error { xorGroupSet := map[string]bool{} xorGroup := map[string][]string{} + andGroupSet := map[string]bool{} + andGroup := map[string][]string{} missing := []string{} + andGroupRequired := getRequiredAndGroupMap(flags) for _, flag := range flags { + for _, and := range flag.And { + flag.Required = andGroupRequired[and] + } if flag.Set { for _, xor := range flag.Xor { xorGroupSet[xor] = true } + for _, and := range flag.And { + andGroupSet[and] = true + } } if !flag.Required || flag.Set { continue } - if len(flag.Xor) > 0 { + if len(flag.Xor) > 0 || len(flag.And) > 0 { for _, xor := range flag.Xor { if xorGroupSet[xor] { continue } xorGroup[xor] = append(xorGroup[xor], flag.Summary()) } + for _, and := range flag.And { + andGroup[and] = append(andGroup[and], flag.Summary()) + } } else { missing = append(missing, flag.Summary()) } @@ -825,6 +888,11 @@ func checkMissingFlags(flags []*Flag) error { missing = append(missing, strings.Join(flags, " or ")) } } + for _, flags := range andGroup { + if len(flags) > 1 { + missing = append(missing, strings.Join(flags, " and ")) + } + } if len(missing) == 0 { return nil @@ -835,6 +903,18 @@ func checkMissingFlags(flags []*Flag) error { return fmt.Errorf("missing flags: %s", strings.Join(missing, ", ")) } +func getRequiredAndGroupMap(flags []*Flag) map[string]bool { + andGroupRequired := map[string]bool{} + for _, flag := range flags { + for _, and := range flag.And { + if flag.Required { + andGroupRequired[and] = true + } + } + } + return andGroupRequired +} + func checkMissingChildren(node *Node) error { missing := []string{} @@ -871,7 +951,7 @@ func checkMissingChildren(node *Node) error { if len(missing) == 1 { return fmt.Errorf("expected %s", missing[0]) } - return fmt.Errorf("expected one of %s", strings.Join(missing, ", ")) + return fmt.Errorf("expected one of %s", strings.Join(missing, ", ")) } // If we're missing any positionals and they're required, return an error. @@ -931,7 +1011,7 @@ func checkEnum(value *Value, target reflect.Value) error { } enums = append(enums, fmt.Sprintf("%q", enum)) } - return fmt.Errorf("%s must be one of %s but got %q", value.ShortSummary(), strings.Join(enums, ","), target.Interface()) + return fmt.Errorf("%s must be one of %s but got %q", value.ShortSummary(), strings.Join(enums, ","), fmt.Sprintf("%v", target.Interface())) } } @@ -945,6 +1025,20 @@ func checkPassthroughArg(target reflect.Value) bool { } } +func checkXorDuplicatedAndAndMissing(paths []*Path) error { + errs := []string{} + if err := checkXorDuplicates(paths); err != nil { + errs = append(errs, err.Error()) + } + if err := checkAndMissing(paths); err != nil { + errs = append(errs, err.Error()) + } + if len(errs) > 0 { + return errors.New(strings.Join(errs, ", ")) + } + return nil +} + func checkXorDuplicates(paths []*Path) error { for _, path := range paths { seen := map[string]*Flag{} @@ -963,7 +1057,39 @@ func checkXorDuplicates(paths []*Path) error { return nil } -func findPotentialCandidates(needle string, haystack []string, format string, args ...interface{}) error { +func checkAndMissing(paths []*Path) error { + for _, path := range paths { + missingMsgs := []string{} + andGroups := map[string][]*Flag{} + for _, flag := range path.Flags { + for _, and := range flag.And { + andGroups[and] = append(andGroups[and], flag) + } + } + for _, flags := range andGroups { + oneSet := false + notSet := []*Flag{} + flagNames := []string{} + for _, flag := range flags { + flagNames = append(flagNames, flag.Name) + if flag.Set { + oneSet = true + } else { + notSet = append(notSet, flag) + } + } + if len(notSet) > 0 && oneSet { + missingMsgs = append(missingMsgs, fmt.Sprintf("--%s must be used together", strings.Join(flagNames, " and --"))) + } + } + if len(missingMsgs) > 0 { + return fmt.Errorf("%s", strings.Join(missingMsgs, ", ")) + } + } + return nil +} + +func findPotentialCandidates(needle string, haystack []string, format string, args ...any) error { if len(haystack) == 0 { return fmt.Errorf(format, args...) } @@ -983,12 +1109,23 @@ func findPotentialCandidates(needle string, haystack []string, format string, ar } type validatable interface{ Validate() error } +type extendedValidatable interface { + Validate(kctx *Context) error +} + +// Proxy a validatable function to the extendedValidatable interface +type validatableFunc func() error -func isValidatable(v reflect.Value) validatable { +func (f validatableFunc) Validate(kctx *Context) error { return f() } + +func isValidatable(v reflect.Value) extendedValidatable { if !v.IsValid() || (v.Kind() == reflect.Ptr || v.Kind() == reflect.Slice || v.Kind() == reflect.Map) && v.IsNil() { return nil } if validate, ok := v.Interface().(validatable); ok { + return validatableFunc(validate.Validate) + } + if validate, ok := v.Interface().(extendedValidatable); ok { return validate } if v.CanAddr() { diff --git a/vendor/github.com/alecthomas/kong/defaults.go b/vendor/github.com/alecthomas/kong/defaults.go index f6728d79fa8..9489fb3bf27 100644 --- a/vendor/github.com/alecthomas/kong/defaults.go +++ b/vendor/github.com/alecthomas/kong/defaults.go @@ -1,7 +1,7 @@ package kong // ApplyDefaults if they are not already set. -func ApplyDefaults(target interface{}, options ...Option) error { +func ApplyDefaults(target any, options ...Option) error { app, err := New(target, options...) if err != nil { return err diff --git a/vendor/github.com/alecthomas/kong/doc.go b/vendor/github.com/alecthomas/kong/doc.go index 78c4d11037b..7e53da7f362 100644 --- a/vendor/github.com/alecthomas/kong/doc.go +++ b/vendor/github.com/alecthomas/kong/doc.go @@ -2,31 +2,31 @@ // // Here's an example: // -// shell rm [-f] [-r] ... -// shell ls [ ...] +// shell rm [-f] [-r] ... +// shell ls [ ...] // // This can be represented by the following command-line structure: // -// package main +// package main // -// import "github.com/alecthomas/kong" +// import "github.com/alecthomas/kong" // -// var CLI struct { -// Rm struct { -// Force bool `short:"f" help:"Force removal."` -// Recursive bool `short:"r" help:"Recursively remove files."` +// var CLI struct { +// Rm struct { +// Force bool `short:"f" help:"Force removal."` +// Recursive bool `short:"r" help:"Recursively remove files."` // -// Paths []string `arg help:"Paths to remove." type:"path"` -// } `cmd help:"Remove files."` +// Paths []string `arg help:"Paths to remove." type:"path"` +// } `cmd help:"Remove files."` // -// Ls struct { -// Paths []string `arg optional help:"Paths to list." type:"path"` -// } `cmd help:"List paths."` -// } +// Ls struct { +// Paths []string `arg optional help:"Paths to list." type:"path"` +// } `cmd help:"List paths."` +// } // -// func main() { -// kong.Parse(&CLI) -// } +// func main() { +// kong.Parse(&CLI) +// } // // See https://github.com/alecthomas/kong for details. package kong diff --git a/vendor/github.com/alecthomas/kong/global.go b/vendor/github.com/alecthomas/kong/global.go index d4b3cb56aa5..babe1e197dc 100644 --- a/vendor/github.com/alecthomas/kong/global.go +++ b/vendor/github.com/alecthomas/kong/global.go @@ -5,7 +5,7 @@ import ( ) // Parse constructs a new parser and parses the default command-line. -func Parse(cli interface{}, options ...Option) *Context { +func Parse(cli any, options ...Option) *Context { parser, err := New(cli, options...) if err != nil { panic(err) diff --git a/vendor/github.com/alecthomas/kong/guesswidth.go b/vendor/github.com/alecthomas/kong/guesswidth.go index 46768e68301..dfdc3f5134b 100644 --- a/vendor/github.com/alecthomas/kong/guesswidth.go +++ b/vendor/github.com/alecthomas/kong/guesswidth.go @@ -1,3 +1,4 @@ +//go:build appengine || (!linux && !freebsd && !darwin && !dragonfly && !netbsd && !openbsd) // +build appengine !linux,!freebsd,!darwin,!dragonfly,!netbsd,!openbsd package kong diff --git a/vendor/github.com/alecthomas/kong/guesswidth_unix.go b/vendor/github.com/alecthomas/kong/guesswidth_unix.go index db52595e248..0170055a79c 100644 --- a/vendor/github.com/alecthomas/kong/guesswidth_unix.go +++ b/vendor/github.com/alecthomas/kong/guesswidth_unix.go @@ -27,9 +27,9 @@ func guessWidth(w io.Writer) int { if _, _, err := syscall.Syscall6( syscall.SYS_IOCTL, - uintptr(fd), // nolint: unconvert + uintptr(fd), //nolint: unconvert uintptr(syscall.TIOCGWINSZ), - uintptr(unsafe.Pointer(&dimensions)), // nolint: gas + uintptr(unsafe.Pointer(&dimensions)), //nolint: gas 0, 0, 0, ); err == 0 { if dimensions[1] == 0 { diff --git a/vendor/github.com/alecthomas/kong/help.go b/vendor/github.com/alecthomas/kong/help.go index 4fb77001d4c..6363ea212dc 100644 --- a/vendor/github.com/alecthomas/kong/help.go +++ b/vendor/github.com/alecthomas/kong/help.go @@ -386,7 +386,7 @@ func newHelpWriter(ctx *Context, options HelpOptions) *helpWriter { return w } -func (h *helpWriter) Printf(format string, args ...interface{}) { +func (h *helpWriter) Printf(format string, args ...any) { h.Print(fmt.Sprintf(format, args...)) } @@ -490,28 +490,24 @@ func formatFlag(haveShort bool, flag *Flag) string { flagString := "" name := flag.Name isBool := flag.IsBool() + isCounter := flag.IsCounter() + + short := "" if flag.Short != 0 { - if isBool && flag.Tag.Negatable { - flagString += fmt.Sprintf("-%c, --[no-]%s", flag.Short, name) - } else { - flagString += fmt.Sprintf("-%c, --%s", flag.Short, name) - } - } else { - if isBool && flag.Tag.Negatable { - if haveShort { - flagString = fmt.Sprintf(" --[no-]%s", name) - } else { - flagString = fmt.Sprintf("--[no-]%s", name) - } - } else { - if haveShort { - flagString += fmt.Sprintf(" --%s", name) - } else { - flagString += fmt.Sprintf("--%s", name) - } - } + short = "-" + string(flag.Short) + ", " + } else if haveShort { + short = " " + } + + if isBool && flag.Tag.Negatable == negatableDefault { + name = "[no-]" + name + } else if isBool && flag.Tag.Negatable != "" { + name += "/" + flag.Tag.Negatable } - if !isBool { + + flagString += fmt.Sprintf("%s--%s", short, name) + + if !isBool && !isCounter { flagString += fmt.Sprintf("=%s", flag.FormatPlaceHolder()) } return flagString diff --git a/vendor/github.com/alecthomas/kong/hooks.go b/vendor/github.com/alecthomas/kong/hooks.go index d166b08883a..9fdf24c1395 100644 --- a/vendor/github.com/alecthomas/kong/hooks.go +++ b/vendor/github.com/alecthomas/kong/hooks.go @@ -3,17 +3,24 @@ package kong // BeforeResolve is a documentation-only interface describing hooks that run before resolvers are applied. type BeforeResolve interface { // This is not the correct signature - see README for details. - BeforeResolve(args ...interface{}) error + BeforeResolve(args ...any) error } // BeforeApply is a documentation-only interface describing hooks that run before values are set. type BeforeApply interface { // This is not the correct signature - see README for details. - BeforeApply(args ...interface{}) error + BeforeApply(args ...any) error } // AfterApply is a documentation-only interface describing hooks that run after values are set. type AfterApply interface { // This is not the correct signature - see README for details. - AfterApply(args ...interface{}) error + AfterApply(args ...any) error +} + +// AfterRun is a documentation-only interface describing hooks that run after Run() returns. +type AfterRun interface { + // This is not the correct signature - see README for details. + // AfterRun is called after Run() returns. + AfterRun(args ...any) error } diff --git a/vendor/github.com/alecthomas/kong/kong.go b/vendor/github.com/alecthomas/kong/kong.go index 255633fbba7..b85e145296c 100644 --- a/vendor/github.com/alecthomas/kong/kong.go +++ b/vendor/github.com/alecthomas/kong/kong.go @@ -15,7 +15,7 @@ var ( callbackReturnSignature = reflect.TypeOf((*error)(nil)).Elem() ) -func failField(parent reflect.Value, field reflect.StructField, format string, args ...interface{}) error { +func failField(parent reflect.Value, field reflect.StructField, format string, args ...any) error { name := parent.Type().Name() if name == "" { name = "" @@ -24,7 +24,7 @@ func failField(parent reflect.Value, field reflect.StructField, format string, a } // Must creates a new Parser or panics if there is an error. -func Must(ast interface{}, options ...Option) *Kong { +func Must(ast any, options ...Option) *Kong { k, err := New(ast, options...) if err != nil { panic(err) @@ -76,7 +76,7 @@ type Kong struct { // New creates a new Kong parser on grammar. // // See the README (https://github.com/alecthomas/kong) for usage instructions. -func New(grammar interface{}, options ...Option) (*Kong, error) { +func New(grammar any, options ...Option) (*Kong, error) { k := &Kong{ Exit: os.Exit, Stdout: os.Stdout, @@ -117,7 +117,7 @@ func New(grammar interface{}, options ...Option) (*Kong, error) { // Embed any embedded structs. for _, embed := range k.embedded { - tag, err := parseTagString(strings.Join(embed.tags, " ")) //nolint:govet + tag, err := parseTagString(strings.Join(embed.tags, " ")) if err != nil { return nil, err } @@ -167,9 +167,42 @@ func New(grammar interface{}, options ...Option) (*Kong, error) { k.bindings.add(k.vars) + if err = checkOverlappingXorAnd(k); err != nil { + return nil, err + } + return k, nil } +func checkOverlappingXorAnd(k *Kong) error { + xorGroups := map[string][]string{} + andGroups := map[string][]string{} + for _, flag := range k.Model.Node.Flags { + for _, xor := range flag.Xor { + xorGroups[xor] = append(xorGroups[xor], flag.Name) + } + for _, and := range flag.And { + andGroups[and] = append(andGroups[and], flag.Name) + } + } + for xor, xorSet := range xorGroups { + for and, andSet := range andGroups { + overlappingEntries := []string{} + for _, xorTag := range xorSet { + for _, andTag := range andSet { + if xorTag == andTag { + overlappingEntries = append(overlappingEntries, xorTag) + } + } + } + if len(overlappingEntries) > 1 { + return fmt.Errorf("invalid xor and combination, %s and %s overlap with more than one: %s", xor, and, overlappingEntries) + } + } + } + return nil +} + type varStack []Vars func (v *varStack) head() Vars { return (*v)[len(*v)-1] } @@ -216,19 +249,19 @@ func (k *Kong) interpolateValue(value *Value, vars Vars) (err error) { return fmt.Errorf("enum for %s: %s", value.Summary(), err) } - updatedVars := map[string]string{ - "default": value.Default, - "enum": value.Enum, - } if value.Default, err = interpolate(value.Default, vars, nil); err != nil { return fmt.Errorf("default value for %s: %s", value.Summary(), err) } if value.Enum, err = interpolate(value.Enum, vars, nil); err != nil { return fmt.Errorf("enum value for %s: %s", value.Summary(), err) } + updatedVars := map[string]string{ + "default": value.Default, + "enum": value.Enum, + } if value.Flag != nil { for i, env := range value.Flag.Envs { - if value.Flag.Envs[i], err = interpolate(env, vars, nil); err != nil { + if value.Flag.Envs[i], err = interpolate(env, vars, updatedVars); err != nil { return fmt.Errorf("env value for %s: %s", value.Summary(), err) } } @@ -328,16 +361,14 @@ func (k *Kong) applyHook(ctx *Context, name string) error { default: panic("unsupported Path") } - method := getMethod(value, name) - if !method.IsValid() { - continue - } - binds := k.bindings.clone() - binds.add(ctx, trace) - binds.add(trace.Node().Vars().CloneWith(k.vars)) - binds.merge(ctx.bindings) - if err := callMethod(name, value, method, binds); err != nil { - return err + for _, method := range getMethods(value, name) { + binds := k.bindings.clone() + binds.add(ctx, trace) + binds.add(trace.Node().Vars().CloneWith(k.vars)) + binds.merge(ctx.bindings) + if err := callFunction(method, binds); err != nil { + return err + } } } // Path[0] will always be the app root. @@ -359,21 +390,19 @@ func (k *Kong) applyHookToDefaultFlags(ctx *Context, node *Node, name string) er if !flag.HasDefault || ctx.values[flag.Value].IsValid() || !flag.Target.IsValid() { continue } - method := getMethod(flag.Target, name) - if !method.IsValid() { - continue - } - path := &Path{Flag: flag} - if err := callMethod(name, flag.Target, method, binds.clone().add(path)); err != nil { - return next(err) + for _, method := range getMethods(flag.Target, name) { + path := &Path{Flag: flag} + if err := callFunction(method, binds.clone().add(path)); err != nil { + return next(err) + } } } return next(nil) }) } -func formatMultilineMessage(w io.Writer, leaders []string, format string, args ...interface{}) { - lines := strings.Split(fmt.Sprintf(format, args...), "\n") +func formatMultilineMessage(w io.Writer, leaders []string, format string, args ...any) { + lines := strings.Split(strings.TrimRight(fmt.Sprintf(format, args...), "\n"), "\n") leader := "" for _, l := range leaders { if l == "" { @@ -388,31 +417,31 @@ func formatMultilineMessage(w io.Writer, leaders []string, format string, args . } // Printf writes a message to Kong.Stdout with the application name prefixed. -func (k *Kong) Printf(format string, args ...interface{}) *Kong { +func (k *Kong) Printf(format string, args ...any) *Kong { formatMultilineMessage(k.Stdout, []string{k.Model.Name}, format, args...) return k } // Errorf writes a message to Kong.Stderr with the application name prefixed. -func (k *Kong) Errorf(format string, args ...interface{}) *Kong { +func (k *Kong) Errorf(format string, args ...any) *Kong { formatMultilineMessage(k.Stderr, []string{k.Model.Name, "error"}, format, args...) return k } // Fatalf writes a message to Kong.Stderr with the application name prefixed then exits with a non-zero status. -func (k *Kong) Fatalf(format string, args ...interface{}) { +func (k *Kong) Fatalf(format string, args ...any) { k.Errorf(format, args...) k.Exit(1) } // FatalIfErrorf terminates with an error message if err != nil. -func (k *Kong) FatalIfErrorf(err error, args ...interface{}) { +func (k *Kong) FatalIfErrorf(err error, args ...any) { if err == nil { return } msg := err.Error() if len(args) > 0 { - msg = fmt.Sprintf(args[0].(string), args[1:]...) + ": " + err.Error() // nolint + msg = fmt.Sprintf(args[0].(string), args[1:]...) + ": " + err.Error() //nolint } // Maybe display usage information. var parseErr *ParseError @@ -439,11 +468,11 @@ func (k *Kong) LoadConfig(path string) (Resolver, error) { if err != nil { return nil, err } - r, err := os.Open(path) // nolint: gas + r, err := os.Open(path) //nolint: gas if err != nil { return nil, err } - defer r.Close() // nolint: gosec + defer r.Close() return k.loader(r) } diff --git a/vendor/github.com/alecthomas/kong/levenshtein.go b/vendor/github.com/alecthomas/kong/levenshtein.go index 1816f301153..6837d6c3097 100644 --- a/vendor/github.com/alecthomas/kong/levenshtein.go +++ b/vendor/github.com/alecthomas/kong/levenshtein.go @@ -31,7 +31,7 @@ func levenshtein(a, b string) int { return f[len(f)-1] } -func min(a, b int) int { +func min(a, b int) int { //nolint:predeclared if a <= b { return a } diff --git a/vendor/github.com/alecthomas/kong/mapper.go b/vendor/github.com/alecthomas/kong/mapper.go index a510e745344..db3f24ec8be 100644 --- a/vendor/github.com/alecthomas/kong/mapper.go +++ b/vendor/github.com/alecthomas/kong/mapper.go @@ -5,7 +5,7 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" + "io" "math/bits" "net/url" "os" @@ -17,7 +17,7 @@ import ( var ( mapperValueType = reflect.TypeOf((*MapperValue)(nil)).Elem() - boolMapperType = reflect.TypeOf((*BoolMapper)(nil)).Elem() + boolMapperValueType = reflect.TypeOf((*BoolMapperValue)(nil)).Elem() jsonUnmarshalerType = reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() binaryUnmarshalerType = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem() @@ -47,15 +47,21 @@ type MapperValue interface { Decode(ctx *DecodeContext) error } +// BoolMapperValue may be implemented by fields in order to provide custom mappings for boolean values. +type BoolMapperValue interface { + MapperValue + IsBool() bool +} + type mapperValueAdapter struct { isBool bool } func (m *mapperValueAdapter) Decode(ctx *DecodeContext, target reflect.Value) error { if target.Type().Implements(mapperValueType) { - return target.Interface().(MapperValue).Decode(ctx) // nolint + return target.Interface().(MapperValue).Decode(ctx) //nolint } - return target.Addr().Interface().(MapperValue).Decode(ctx) // nolint + return target.Addr().Interface().(MapperValue).Decode(ctx) //nolint } func (m *mapperValueAdapter) IsBool() bool { @@ -71,9 +77,9 @@ func (m *textUnmarshalerAdapter) Decode(ctx *DecodeContext, target reflect.Value return err } if target.Type().Implements(textUnmarshalerType) { - return target.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(value)) // nolint + return target.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(value)) //nolint } - return target.Addr().Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(value)) // nolint + return target.Addr().Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(value)) //nolint } type binaryUnmarshalerAdapter struct{} @@ -85,9 +91,9 @@ func (m *binaryUnmarshalerAdapter) Decode(ctx *DecodeContext, target reflect.Val return err } if target.Type().Implements(binaryUnmarshalerType) { - return target.Interface().(encoding.BinaryUnmarshaler).UnmarshalBinary([]byte(value)) // nolint + return target.Interface().(encoding.BinaryUnmarshaler).UnmarshalBinary([]byte(value)) //nolint } - return target.Addr().Interface().(encoding.BinaryUnmarshaler).UnmarshalBinary([]byte(value)) // nolint + return target.Addr().Interface().(encoding.BinaryUnmarshaler).UnmarshalBinary([]byte(value)) //nolint } type jsonUnmarshalerAdapter struct{} @@ -99,9 +105,9 @@ func (j *jsonUnmarshalerAdapter) Decode(ctx *DecodeContext, target reflect.Value return err } if target.Type().Implements(jsonUnmarshalerType) { - return target.Interface().(json.Unmarshaler).UnmarshalJSON([]byte(value)) // nolint + return target.Interface().(json.Unmarshaler).UnmarshalJSON([]byte(value)) //nolint } - return target.Addr().Interface().(json.Unmarshaler).UnmarshalJSON([]byte(value)) // nolint + return target.Addr().Interface().(json.Unmarshaler).UnmarshalJSON([]byte(value)) //nolint } // A Mapper represents how a field is mapped from command-line values to Go. @@ -136,7 +142,7 @@ type BoolMapperExt interface { // A MapperFunc is a single function that complies with the Mapper interface. type MapperFunc func(ctx *DecodeContext, target reflect.Value) error -func (m MapperFunc) Decode(ctx *DecodeContext, target reflect.Value) error { // nolint: revive +func (m MapperFunc) Decode(ctx *DecodeContext, target reflect.Value) error { //nolint: revive return m(ctx, target) } @@ -194,7 +200,7 @@ func (r *Registry) ForType(typ reflect.Type) Mapper { for _, impl := range []reflect.Type{typ, reflect.PtrTo(typ)} { if impl.Implements(mapperValueType) { // FIXME: This should pass in the bool mapper. - return &mapperValueAdapter{impl.Implements(boolMapperType)} + return &mapperValueAdapter{impl.Implements(boolMapperValueType)} } } // Next, try explicitly registered types. @@ -245,7 +251,7 @@ func (r *Registry) RegisterType(typ reflect.Type, mapper Mapper) *Registry { } // RegisterValue registers a Mapper by pointer to the field value. -func (r *Registry) RegisterValue(ptr interface{}, mapper Mapper) *Registry { +func (r *Registry) RegisterValue(ptr any, mapper Mapper) *Registry { key := reflect.ValueOf(ptr) if key.Kind() != reflect.Ptr { panic("expected a pointer") @@ -333,7 +339,7 @@ func durationDecoder() MapperFunc { return fmt.Errorf("expected duration but got %q: %v", v, err) } case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64: - d = reflect.ValueOf(v).Convert(reflect.TypeOf(time.Duration(0))).Interface().(time.Duration) // nolint: forcetypeassert + d = reflect.ValueOf(v).Convert(reflect.TypeOf(time.Duration(0))).Interface().(time.Duration) //nolint: forcetypeassert default: return fmt.Errorf("expected duration but got %q", v) } @@ -361,7 +367,7 @@ func timeDecoder() MapperFunc { } } -func intDecoder(bits int) MapperFunc { // nolint: dupl +func intDecoder(bits int) MapperFunc { //nolint: dupl return func(ctx *DecodeContext, target reflect.Value) error { t, err := ctx.Scan.PopValue("int") if err != nil { @@ -390,7 +396,7 @@ func intDecoder(bits int) MapperFunc { // nolint: dupl } } -func uintDecoder(bits int) MapperFunc { // nolint: dupl +func uintDecoder(bits int) MapperFunc { //nolint: dupl return func(ctx *DecodeContext, target reflect.Value) error { t, err := ctx.Scan.PopValue("uint") if err != nil { @@ -467,7 +473,7 @@ func mapDecoder(r *Registry) MapperFunc { case string: childScanner = ScanAsType(t.Type, SplitEscaped(v, mapsep)...) - case []map[string]interface{}: + case []map[string]any: for _, m := range v { err := jsonTranscode(m, target.Addr().Interface()) if err != nil { @@ -476,7 +482,7 @@ func mapDecoder(r *Registry) MapperFunc { } return nil - case map[string]interface{}: + case map[string]any: return jsonTranscode(v, target.Addr().Interface()) default: @@ -534,7 +540,7 @@ func sliceDecoder(r *Registry) MapperFunc { var childScanner *Scanner if ctx.Value.Flag != nil { t := ctx.Scan.Pop() - // If decoding a flag, we need an value. + // If decoding a flag, we need a value. if t.IsEOL() { return fmt.Errorf("missing value, expecting \"%c...\"", sep) } @@ -542,11 +548,11 @@ func sliceDecoder(r *Registry) MapperFunc { case string: childScanner = ScanAsType(t.Type, SplitEscaped(v, sep)...) - case []interface{}: + case []any: return jsonTranscode(v, target.Addr().Interface()) default: - v = []interface{}{v} + v = []any{v} return jsonTranscode(v, target.Addr().Interface()) } } else { @@ -574,6 +580,12 @@ func pathMapper(r *Registry) MapperFunc { if target.Kind() == reflect.Slice { return sliceDecoder(r)(ctx, target) } + if target.Kind() == reflect.Ptr && target.Elem().Kind() == reflect.String { + if target.IsNil() { + return nil + } + target = target.Elem() + } if target.Kind() != reflect.String { return fmt.Errorf("\"path\" type must be applied to a string not %s", target.Type()) } @@ -605,7 +617,7 @@ func fileMapper(r *Registry) MapperFunc { file = os.Stdin } else { path = ExpandPath(path) - file, err = os.Open(path) // nolint: gosec + file, err = os.Open(path) //nolint: gosec if err != nil { return err } @@ -629,7 +641,7 @@ func existingFileMapper(r *Registry) MapperFunc { return err } - if !ctx.Value.Active || ctx.Value.Set { + if !ctx.Value.Active || (ctx.Value.Set && ctx.Value.Target.Type() == target.Type()) { // early return to avoid checking extra files that may not exist; // this hack only works because the value provided on the cli is // checked before the default value is checked (if default is set). @@ -665,7 +677,7 @@ func existingDirMapper(r *Registry) MapperFunc { return err } - if !ctx.Value.Active || ctx.Value.Set { + if !ctx.Value.Active || (ctx.Value.Set && ctx.Value.Target.Type() == target.Type()) { // early return to avoid checking extra dirs that may not exist; // this hack only works because the value provided on the cli is // checked before the default value is checked (if default is set). @@ -706,11 +718,14 @@ func fileContentMapper(r *Registry) MapperFunc { var data []byte if path != "-" { path = ExpandPath(path) - data, err = ioutil.ReadFile(path) //nolint:gosec + data, err = os.ReadFile(path) //nolint:gosec } else { - data, err = ioutil.ReadAll(os.Stdin) + data, err = io.ReadAll(os.Stdin) } if err != nil { + if info, statErr := os.Stat(path); statErr == nil && info.IsDir() { + return fmt.Errorf("%q exists but is a directory: %w", path, err) + } return err } target.SetBytes(data) @@ -863,7 +878,7 @@ type NamedFileContentFlag struct { Contents []byte } -func (f *NamedFileContentFlag) Decode(ctx *DecodeContext) error { // nolint: revive +func (f *NamedFileContentFlag) Decode(ctx *DecodeContext) error { //nolint: revive var filename string err := ctx.Scan.PopValueInto("filename", &filename) if err != nil { @@ -875,7 +890,7 @@ func (f *NamedFileContentFlag) Decode(ctx *DecodeContext) error { // nolint: rev return nil } filename = ExpandPath(filename) - data, err := ioutil.ReadFile(filename) // nolint: gosec + data, err := os.ReadFile(filename) //nolint: gosec if err != nil { return fmt.Errorf("failed to open %q: %v", filename, err) } @@ -887,7 +902,7 @@ func (f *NamedFileContentFlag) Decode(ctx *DecodeContext) error { // nolint: rev // FileContentFlag is a flag value that loads a file's contents into its value. type FileContentFlag []byte -func (f *FileContentFlag) Decode(ctx *DecodeContext) error { // nolint: revive +func (f *FileContentFlag) Decode(ctx *DecodeContext) error { //nolint: revive var filename string err := ctx.Scan.PopValueInto("filename", &filename) if err != nil { @@ -899,7 +914,7 @@ func (f *FileContentFlag) Decode(ctx *DecodeContext) error { // nolint: revive return nil } filename = ExpandPath(filename) - data, err := ioutil.ReadFile(filename) // nolint: gosec + data, err := os.ReadFile(filename) //nolint: gosec if err != nil { return fmt.Errorf("failed to open %q: %v", filename, err) } @@ -907,7 +922,7 @@ func (f *FileContentFlag) Decode(ctx *DecodeContext) error { // nolint: revive return nil } -func jsonTranscode(in, out interface{}) error { +func jsonTranscode(in, out any) error { data, err := json.Marshal(in) if err != nil { return err diff --git a/vendor/github.com/alecthomas/kong/model.go b/vendor/github.com/alecthomas/kong/model.go index 793cf97bcce..065fcdd0848 100644 --- a/vendor/github.com/alecthomas/kong/model.go +++ b/vendor/github.com/alecthomas/kong/model.go @@ -69,7 +69,7 @@ func (n *Node) Leaf() bool { // Find a command/argument/flag by pointer to its field. // // Returns nil if not found. Panics if ptr is not a pointer. -func (n *Node) Find(ptr interface{}) *Node { +func (n *Node) Find(ptr any) *Node { key := reflect.ValueOf(ptr) if key.Kind() != reflect.Ptr { panic("expected a pointer") @@ -162,6 +162,16 @@ func (n *Node) Summary() string { } else if len(n.Children) > 0 { summary += " " } + allFlags := n.Flags + if n.Parent != nil { + allFlags = append(allFlags, n.Parent.Flags...) + } + for _, flag := range allFlags { + if !flag.Required { + summary += " [flags]" + break + } + } return summary } @@ -229,23 +239,24 @@ func (n *Node) ClosestGroup() *Group { // A Value is either a flag or a variable positional argument. type Value struct { - Flag *Flag // Nil if positional argument. - Name string - Help string - OrigHelp string // Original help string, without interpolated variables. - HasDefault bool - Default string - DefaultValue reflect.Value - Enum string - Mapper Mapper - Tag *Tag - Target reflect.Value - Required bool - Set bool // Set to true when this value is set through some mechanism. - Format string // Formatting directive, if applicable. - Position int // Position (for positional arguments). - Passthrough bool // Set to true to stop flag parsing when encountered. - Active bool // Denotes the value is part of an active branch in the CLI. + Flag *Flag // Nil if positional argument. + Name string + Help string + OrigHelp string // Original help string, without interpolated variables. + HasDefault bool + Default string + DefaultValue reflect.Value + Enum string + Mapper Mapper + Tag *Tag + Target reflect.Value + Required bool + Set bool // Set to true when this value is set through some mechanism. + Format string // Formatting directive, if applicable. + Position int // Position (for positional arguments). + Passthrough bool // Deprecated: Use PassthroughMode instead. Set to true to stop flag parsing when encountered. + PassthroughMode PassthroughMode // + Active bool // Denotes the value is part of an active branch in the CLI. } // EnumMap returns a map of the enums in this value. @@ -368,9 +379,9 @@ func (v *Value) Reset() error { v.Target.Set(reflect.Zero(v.Target.Type())) if len(v.Tag.Envs) != 0 { for _, env := range v.Tag.Envs { - envar := os.Getenv(env) + envar, ok := os.LookupEnv(env) // Parse the first non-empty ENV in the list - if envar != "" { + if ok { err := v.Parse(ScanFromTokens(Token{Type: FlagValueToken, Value: envar}), v.Target) if err != nil { return fmt.Errorf("%s (from envar %s=%q)", err, env, envar) @@ -395,8 +406,10 @@ type Flag struct { *Value Group *Group // Logical grouping when displaying. May also be used by configuration loaders to group options logically. Xor []string + And []string PlaceHolder string Envs []string + Aliases []string Short rune Hidden bool Negated bool @@ -420,7 +433,7 @@ func (f *Flag) FormatPlaceHolder() string { return placeholderHelper.PlaceHolder(f) } tail := "" - if f.Value.IsSlice() && f.Value.Tag.Sep != -1 { + if f.Value.IsSlice() && f.Value.Tag.Sep != -1 && f.Tag.Type == "" { tail += string(f.Value.Tag.Sep) + "..." } if f.PlaceHolder != "" { @@ -433,7 +446,7 @@ func (f *Flag) FormatPlaceHolder() string { return f.Default + tail } if f.Value.IsMap() { - if f.Value.Tag.MapSep != -1 { + if f.Value.Tag.MapSep != -1 && f.Tag.Type == "" { tail = string(f.Value.Tag.MapSep) + "..." } return "KEY=VALUE" + tail @@ -490,6 +503,9 @@ func reflectValueIsZero(v reflect.Value) bool { default: // This should never happens, but will act as a safeguard for // later, as a default value doesn't makes sense here. - panic(&reflect.ValueError{"reflect.Value.IsZero", v.Kind()}) + panic(&reflect.ValueError{ + Method: "reflect.Value.IsZero", + Kind: v.Kind(), + }) } } diff --git a/vendor/github.com/alecthomas/kong/negatable.go b/vendor/github.com/alecthomas/kong/negatable.go new file mode 100644 index 00000000000..7d8902a53d4 --- /dev/null +++ b/vendor/github.com/alecthomas/kong/negatable.go @@ -0,0 +1,19 @@ +package kong + +// negatableDefault is a placeholder value for the Negatable tag to indicate +// the negated flag is --no-. This is needed as at the time of +// parsing a tag, the field's flag name is not yet known. +const negatableDefault = "_" + +// negatableFlagName returns the name of the flag for a negatable field, or +// an empty string if the field is not negatable. +func negatableFlagName(name, negation string) string { + switch negation { + case "": + return "" + case negatableDefault: + return "--no-" + name + default: + return "--" + negation + } +} diff --git a/vendor/github.com/alecthomas/kong/options.go b/vendor/github.com/alecthomas/kong/options.go index 8d2893cf143..6263202b65d 100644 --- a/vendor/github.com/alecthomas/kong/options.go +++ b/vendor/github.com/alecthomas/kong/options.go @@ -20,7 +20,7 @@ type Option interface { // OptionFunc is function that adheres to the Option interface. type OptionFunc func(k *Kong) error -func (o OptionFunc) Apply(k *Kong) error { return o(k) } // nolint: revive +func (o OptionFunc) Apply(k *Kong) error { return o(k) } //nolint: revive // Vars sets the variables to use for interpolation into help strings and default values. // @@ -79,7 +79,7 @@ type dynamicCommand struct { help string group string tags []string - cmd interface{} + cmd any } // DynamicCommand registers a dynamically constructed command with the root of the CLI. @@ -87,8 +87,12 @@ type dynamicCommand struct { // This is useful for command-line structures that are extensible via user-provided plugins. // // "tags" is a list of extra tag strings to parse, in the form :"". -func DynamicCommand(name, help, group string, cmd interface{}, tags ...string) Option { +func DynamicCommand(name, help, group string, cmd any, tags ...string) Option { return OptionFunc(func(k *Kong) error { + if run := getMethod(reflect.Indirect(reflect.ValueOf(cmd)), "Run"); !run.IsValid() { + return fmt.Errorf("kong: DynamicCommand %q must be a type with a 'Run' method; got %T", name, cmd) + } + k.dynamicCommands = append(k.dynamicCommands, &dynamicCommand{ name: name, help: help, @@ -152,7 +156,7 @@ func KindMapper(kind reflect.Kind, mapper Mapper) Option { } // ValueMapper registers a mapper to a field value. -func ValueMapper(ptr interface{}, mapper Mapper) Option { +func ValueMapper(ptr any, mapper Mapper) Option { return OptionFunc(func(k *Kong) error { k.registry.RegisterValue(ptr, mapper) return nil @@ -187,7 +191,7 @@ func Writers(stdout, stderr io.Writer) Option { // AfterApply(...) error // // Called before validation/assignment, and immediately after validation/assignment, respectively. -func Bind(args ...interface{}) Option { +func Bind(args ...any) Option { return OptionFunc(func(k *Kong) error { k.bindings.add(args...) return nil @@ -197,18 +201,22 @@ func Bind(args ...interface{}) Option { // BindTo allows binding of implementations to interfaces. // // BindTo(impl, (*iface)(nil)) -func BindTo(impl, iface interface{}) Option { +func BindTo(impl, iface any) Option { return OptionFunc(func(k *Kong) error { k.bindings.addTo(impl, iface) return nil }) } -// BindToProvider allows binding of provider functions. +// BindToProvider binds an injected value to a provider function. +// +// The provider function must have the signature: +// +// func() (any, error) // // This is useful when the Run() function of different commands require different values that may // not all be initialisable from the main() function. -func BindToProvider(provider interface{}) Option { +func BindToProvider(provider any) Option { return OptionFunc(func(k *Kong) error { return k.bindings.addProvider(provider) }) @@ -287,7 +295,7 @@ func AutoGroup(format func(parent Visitable, flag *Flag) *Group) Option { // See also ExplicitGroups for a more structured alternative. type Groups map[string]string -func (g Groups) Apply(k *Kong) error { // nolint: revive +func (g Groups) Apply(k *Kong) error { //nolint: revive for key, info := range g { lines := strings.Split(info, "\n") title := strings.TrimSpace(lines[0]) diff --git a/vendor/github.com/alecthomas/kong/renovate.json5 b/vendor/github.com/alecthomas/kong/renovate.json5 new file mode 100644 index 00000000000..561d59fd876 --- /dev/null +++ b/vendor/github.com/alecthomas/kong/renovate.json5 @@ -0,0 +1,18 @@ +{ + $schema: "https://docs.renovatebot.com/renovate-schema.json", + extends: [ + "config:recommended", + ":semanticCommits", + ":semanticCommitTypeAll(chore)", + ":semanticCommitScope(deps)", + "group:allNonMajor", + "schedule:earlyMondays", // Run once a week. + ], + packageRules: [ + { + "matchPackageNames": ["golangci-lint"], + "matchManagers": ["hermit"], + "enabled": false + }, + ] +} diff --git a/vendor/github.com/alecthomas/kong/resolver.go b/vendor/github.com/alecthomas/kong/resolver.go index ac1de1fccd1..29be1b9189f 100644 --- a/vendor/github.com/alecthomas/kong/resolver.go +++ b/vendor/github.com/alecthomas/kong/resolver.go @@ -14,29 +14,29 @@ type Resolver interface { Validate(app *Application) error // Resolve the value for a Flag. - Resolve(context *Context, parent *Path, flag *Flag) (interface{}, error) + Resolve(context *Context, parent *Path, flag *Flag) (any, error) } // ResolverFunc is a convenience type for non-validating Resolvers. -type ResolverFunc func(context *Context, parent *Path, flag *Flag) (interface{}, error) +type ResolverFunc func(context *Context, parent *Path, flag *Flag) (any, error) var _ Resolver = ResolverFunc(nil) -func (r ResolverFunc) Resolve(context *Context, parent *Path, flag *Flag) (interface{}, error) { // nolint: revive +func (r ResolverFunc) Resolve(context *Context, parent *Path, flag *Flag) (any, error) { //nolint: revive return r(context, parent, flag) } -func (r ResolverFunc) Validate(app *Application) error { return nil } // nolint: revive +func (r ResolverFunc) Validate(app *Application) error { return nil } //nolint: revive // JSON returns a Resolver that retrieves values from a JSON source. // // Flag names are used as JSON keys indirectly, by tring snake_case and camelCase variants. func JSON(r io.Reader) (Resolver, error) { - values := map[string]interface{}{} + values := map[string]any{} err := json.NewDecoder(r).Decode(&values) if err != nil { return nil, err } - var f ResolverFunc = func(context *Context, parent *Path, flag *Flag) (interface{}, error) { + var f ResolverFunc = func(context *Context, parent *Path, flag *Flag) (any, error) { name := strings.ReplaceAll(flag.Name, "-", "_") snakeCaseName := snakeCase(flag.Name) raw, ok := values[name] @@ -47,7 +47,7 @@ func JSON(r io.Reader) (Resolver, error) { } raw = values for _, part := range strings.Split(name, ".") { - if values, ok := raw.(map[string]interface{}); ok { + if values, ok := raw.(map[string]any); ok { raw, ok = values[part] if !ok { return nil, nil @@ -63,6 +63,6 @@ func JSON(r io.Reader) (Resolver, error) { } func snakeCase(name string) string { - name = strings.Join(strings.Split(strings.Title(name), "-"), "") //nolint: staticcheck + name = strings.Join(strings.Split(strings.Title(name), "-"), "") return strings.ToLower(name[:1]) + name[1:] } diff --git a/vendor/github.com/alecthomas/kong/scanner.go b/vendor/github.com/alecthomas/kong/scanner.go index 1766c4b20d2..262d16f1cfe 100644 --- a/vendor/github.com/alecthomas/kong/scanner.go +++ b/vendor/github.com/alecthomas/kong/scanner.go @@ -41,7 +41,7 @@ func (t TokenType) String() string { // Token created by Scanner. type Token struct { - Value interface{} + Value any Type TokenType } @@ -82,7 +82,7 @@ func (t Token) InferredType() TokenType { return t.Type } if v, ok := t.Value.(string); ok { - if strings.HasPrefix(v, "--") { // nolint: gocritic + if strings.HasPrefix(v, "--") { //nolint: gocritic return FlagToken } else if v == "-" { return PositionalArgumentToken @@ -109,7 +109,7 @@ func (t Token) IsValue() bool { // // For example, the token "--foo=bar" will be split into the following by the parser: // -// [{FlagToken, "foo"}, {FlagValueToken, "bar"}] +// [{FlagToken, "foo"}, {FlagValueToken, "bar"}] type Scanner struct { args []Token } @@ -171,7 +171,7 @@ func (s *Scanner) PopValue(context string) (Token, error) { // PopValueInto pops a value token into target or returns an error. // // "context" is used to assist the user if the value can not be popped, eg. "expected value but got " -func (s *Scanner) PopValueInto(context string, target interface{}) error { +func (s *Scanner) PopValueInto(context string, target any) error { t, err := s.PopValue(context) if err != nil { return err @@ -204,13 +204,13 @@ func (s *Scanner) Peek() Token { } // Push an untyped Token onto the front of the Scanner. -func (s *Scanner) Push(arg interface{}) *Scanner { +func (s *Scanner) Push(arg any) *Scanner { s.PushToken(Token{Value: arg}) return s } // PushTyped pushes a typed token onto the front of the Scanner. -func (s *Scanner) PushTyped(arg interface{}, typ TokenType) *Scanner { +func (s *Scanner) PushTyped(arg any, typ TokenType) *Scanner { s.PushToken(Token{Value: arg, Type: typ}) return s } diff --git a/vendor/github.com/alecthomas/kong/tag.go b/vendor/github.com/alecthomas/kong/tag.go index f99059beab9..a2bc4a978ff 100644 --- a/vendor/github.com/alecthomas/kong/tag.go +++ b/vendor/github.com/alecthomas/kong/tag.go @@ -9,36 +9,51 @@ import ( "unicode/utf8" ) +// PassthroughMode indicates how parameters are passed through when "passthrough" is set. +type PassthroughMode int + +const ( + // PassThroughModeNone indicates passthrough mode is disabled. + PassThroughModeNone PassthroughMode = iota + // PassThroughModeAll indicates that all parameters, including flags, are passed through. It is the default. + PassThroughModeAll + // PassThroughModePartial will validate flags until the first positional argument is encountered, then pass through all remaining positional arguments. + PassThroughModePartial +) + // Tag represents the parsed state of Kong tags in a struct field tag. type Tag struct { - Ignored bool // Field is ignored by Kong. ie. kong:"-" - Cmd bool - Arg bool - Required bool - Optional bool - Name string - Help string - Type string - TypeName string - HasDefault bool - Default string - Format string - PlaceHolder string - Envs []string - Short rune - Hidden bool - Sep rune - MapSep rune - Enum string - Group string - Xor []string - Vars Vars - Prefix string // Optional prefix on anonymous structs. All sub-flags will have this prefix. - EnvPrefix string - Embed bool - Aliases []string - Negatable bool - Passthrough bool + Ignored bool // Field is ignored by Kong. ie. kong:"-" + Cmd bool + Arg bool + Required bool + Optional bool + Name string + Help string + Type string + TypeName string + HasDefault bool + Default string + Format string + PlaceHolder string + Envs []string + Short rune + Hidden bool + Sep rune + MapSep rune + Enum string + Group string + Xor []string + And []string + Vars Vars + Prefix string // Optional prefix on anonymous structs. All sub-flags will have this prefix. + EnvPrefix string + XorPrefix string // Optional prefix on XOR/AND groups. + Embed bool + Aliases []string + Negatable string + Passthrough bool // Deprecated: use PassthroughMode instead. + PassthroughMode PassthroughMode // Storage for all tag keys for arbitrary lookups. items map[string][]string @@ -62,7 +77,7 @@ type tagChars struct { var kongChars = tagChars{sep: ',', quote: '\'', assign: '=', needsUnquote: false} var bareChars = tagChars{sep: ' ', quote: '"', assign: ':', needsUnquote: true} -// nolint:gocyclo +//nolint:gocyclo func parseTagItems(tagString string, chr tagChars) (map[string][]string, error) { d := map[string][]string{} key := []rune{} @@ -203,7 +218,7 @@ func parseTag(parent reflect.Value, ft reflect.StructField) (*Tag, error) { return t, nil } -func hydrateTag(t *Tag, typ reflect.Type) error { // nolint: gocyclo +func hydrateTag(t *Tag, typ reflect.Type) error { //nolint: gocyclo var typeName string var isBool bool var isBoolPtr bool @@ -249,14 +264,23 @@ func hydrateTag(t *Tag, typ reflect.Type) error { // nolint: gocyclo for _, xor := range t.GetAll("xor") { t.Xor = append(t.Xor, strings.FieldsFunc(xor, tagSplitFn)...) } + for _, and := range t.GetAll("and") { + t.And = append(t.And, strings.FieldsFunc(and, tagSplitFn)...) + } t.Prefix = t.Get("prefix") t.EnvPrefix = t.Get("envprefix") + t.XorPrefix = t.Get("xorprefix") t.Embed = t.Has("embed") - negatable := t.Has("negatable") - if negatable && !isBool && !isBoolPtr { - return fmt.Errorf("negatable can only be set on booleans") + if t.Has("negatable") { + if !isBool && !isBoolPtr { + return fmt.Errorf("negatable can only be set on booleans") + } + negatable := t.Get("negatable") + if negatable == "" { + negatable = negatableDefault // placeholder for default negation of --no- + } + t.Negatable = negatable } - t.Negatable = negatable aliases := t.Get("aliases") if len(aliases) > 0 { t.Aliases = append(t.Aliases, strings.FieldsFunc(aliases, tagSplitFn)...) @@ -280,6 +304,17 @@ func hydrateTag(t *Tag, typ reflect.Type) error { // nolint: gocyclo return fmt.Errorf("passthrough only makes sense for positional arguments or commands") } t.Passthrough = passthrough + if t.Passthrough { + passthroughMode := t.Get("passthrough") + switch passthroughMode { + case "partial": + t.PassthroughMode = PassThroughModePartial + case "all", "": + t.PassthroughMode = PassThroughModeAll + default: + return fmt.Errorf("invalid passthrough mode %q, must be one of 'partial' or 'all'", passthroughMode) + } + } return nil } diff --git a/vendor/github.com/alecthomas/kong/util.go b/vendor/github.com/alecthomas/kong/util.go index 50b1dfef786..8b7066429b8 100644 --- a/vendor/github.com/alecthomas/kong/util.go +++ b/vendor/github.com/alecthomas/kong/util.go @@ -17,7 +17,7 @@ func (c ConfigFlag) BeforeResolve(kong *Kong, ctx *Context, trace *Path) error { if kong.loader == nil { return fmt.Errorf("kong must be configured with kong.Configuration(...)") } - path := string(ctx.FlagValue(trace.Flag).(ConfigFlag)) // nolint + path := string(ctx.FlagValue(trace.Flag).(ConfigFlag)) //nolint resolver, err := kong.LoadConfig(path) if err != nil { return err diff --git a/vendor/github.com/alecthomas/units/renovate.json5 b/vendor/github.com/alecthomas/units/renovate.json5 index 897864b852f..6bb4acde94a 100644 --- a/vendor/github.com/alecthomas/units/renovate.json5 +++ b/vendor/github.com/alecthomas/units/renovate.json5 @@ -8,4 +8,8 @@ "group:allNonMajor", "schedule:earlyMondays", // Run once a week. ], + postUpdateOptions: [ + "gomodTidy", + "gomodUpdateImportPaths" + ] } diff --git a/vendor/github.com/alicebob/gopher-json/doc.go b/vendor/github.com/alicebob/gopher-json/doc.go index b73aeafd530..8cf73d45a91 100644 --- a/vendor/github.com/alicebob/gopher-json/doc.go +++ b/vendor/github.com/alicebob/gopher-json/doc.go @@ -1,33 +1,35 @@ // Package json is a simple JSON encoder/decoder for gopher-lua. // -// Documentation +// # Documentation // // The following functions are exposed by the library: -// decode(string): Decodes a JSON string. Returns nil and an error string if -// the string could not be decoded. -// encode(value): Encodes a value into a JSON string. Returns nil and an error -// string if the value could not be encoded. +// +// decode(string): Decodes a JSON string. Returns nil and an error string if +// the string could not be decoded. +// encode(value): Encodes a value into a JSON string. Returns nil and an error +// string if the value could not be encoded. // // The following types are supported: // -// Lua | JSON -// ---------+----- -// nil | null -// number | number -// string | string -// table | object: when table is non-empty and has only string keys -// | array: when table is empty, or has only sequential numeric keys -// | starting from 1 +// Lua | JSON +// ---------+----- +// nil | null +// number | number +// string | string +// table | object: when table is non-empty and has only string keys +// | array: when table is empty, or has only sequential numeric keys +// | starting from 1 // // Attempting to encode any other Lua type will result in an error. // -// Example +// # Example // // Below is an example usage of the library: -// import ( -// luajson "layeh.com/gopher-json" -// ) // -// L := lua.NewState() -// luajson.Preload(s) +// import ( +// luajson "layeh.com/gopher-json" +// ) +// +// L := lua.NewState() +// luajson.Preload(L) package json diff --git a/vendor/github.com/alicebob/miniredis/v2/.gitignore b/vendor/github.com/alicebob/miniredis/v2/.gitignore index 7ba06b06ce6..8016b4be34e 100644 --- a/vendor/github.com/alicebob/miniredis/v2/.gitignore +++ b/vendor/github.com/alicebob/miniredis/v2/.gitignore @@ -2,3 +2,5 @@ /integration/dump.rdb *.swp /integration/nodes.conf +.idea/ +miniredis.iml diff --git a/vendor/github.com/alicebob/miniredis/v2/CHANGELOG.md b/vendor/github.com/alicebob/miniredis/v2/CHANGELOG.md index ce28f55acfa..79c1c34752d 100644 --- a/vendor/github.com/alicebob/miniredis/v2/CHANGELOG.md +++ b/vendor/github.com/alicebob/miniredis/v2/CHANGELOG.md @@ -1,6 +1,116 @@ ## Changelog +### v2.34.0 + +- fix ZINTERSTORE where target is one of the source sets +- added support for ZRank and ZRevRank with score (thanks Jeff Howell) +- fix MEMORY subcommand casing (thanks @joshaber) +- use streamCmp in Xtrim (thanks @daniel-cohere) + + +### v2.33.0 + +- minimum Go version is now 1.17 +- fix integer overflow (thanks @wszaranski) +- test against the last BSD redis (7.2.4) +- ignore 'redis.set_repl()' call (thanks @TingluoHuang) +- various build fixes (thanks @wszaranski) +- add StartAddrTLS function (thanks @agriffaut) +- support for the NOMKSTREAM option for XADD (thanks @Jahaja) +- return empty array for SRANDMEMBER on nonexistent key (thanks @WKBae) + + +### v2.32.1 + +- support for SINTERCARD (thanks @s-barr-fetch) +- support for EXPIRETIME and PEXPIRETIME (thanks @wszaranski) +- fix GEO* units to be case insensitive + + +### v2.31.1 + +- support COUNT in SCAN and ZSCAN (thanks @BarakSilverfort) +- support for OBJECT IDLETIME (thanks @nerd2) +- support for HRANDFIELD (thanks @sejin-P) + + +### v2.31.0 + +- support for MEMORY USAGE (thanks @davidroman0O) +- test against Redis 7.2.0 +- support for CLIENT SETNAME/GETNAME (thanks @mr-karan) +- fix very small numbers (thanks @zsh1995) +- use the same float-to-string logic real Redis uses + + +### v2.30.5 + +- support SMISMEMBER (thanks @sandyharvie) + + +### v2.30.4 + +- fix ZADD LT/LG (thanks @sejin-P) +- fix COPY (thanks @jerargus) +- quicker SPOP + + +### v2.30.3 + +- fix lua error_reply (thanks @pkierski) +- fix use of blocking functions in lua +- support for ZMSCORE (thanks @lsgndln) +- lua cache (thanks @tonyhb) + + +### v2.30.2 + +- support MINID in XADD (thanks @nathan-cormier) +- support BLMOVE (thanks @sevein) +- fix COMMAND (thanks @pje) +- fix 'XREAD ... $' on a non-existing stream + + +### v2.30.1 + +- support SET NX GET special case + + +### v2.30.0 + +- implement redis 7.0.x (from 6.X). Main changes: + - test against 7.0.7 + - update error messages + - support nx|xx|gt|lt options in [P]EXPIRE[AT] + - update how deleted items are processed in pending queues in streams + + +### v2.23.1 + +- resolve $ to latest ID in XREAD (thanks @josh-hook) +- handle disconnect in blocking functions (thanks @jgirtakovskis) +- fix type conversion bug in redisToLua (thanks Sandy Harvie) +- BRPOP{LPUSH} timeout can be float since 6.0 + + +### v2.23.0 + +- basic INFO support (thanks @kirill-a-belov) +- support COUNT in SSCAN (thanks @Abdi-dd) +- test and support Go 1.19 +- support LPOS (thanks @ianstarz) +- support XPENDING, XGROUP {CREATECONSUMER,DESTROY,DELCONSUMER}, XINFO {CONSUMERS,GROUPS}, XCLAIM (thanks @sandyharvie) + + +### v2.22.0 + +- set miniredis.DumpMaxLineLen to get more Dump() info (thanks @afjoseph) +- fix invalid resposne of COMMAND (thanks @zsh1995) +- fix possibility to generate duplicate IDs in XADD (thanks @readams) +- adds support for XAUTOCLAIM min-idle parameter (thanks @readams) + + ### v2.21.0 - support for GETEX (thanks @dntj) @@ -35,7 +145,7 @@ ### v2.16.1 -- fix ZINTERSTORE with wets (thanks @lingjl2010 and @okhowang) +- fix ZINTERSTORE with sets (thanks @lingjl2010 and @okhowang) - fix exclusive ranges in XRANGE (thanks @joseotoro) diff --git a/vendor/github.com/alicebob/miniredis/v2/Makefile b/vendor/github.com/alicebob/miniredis/v2/Makefile index 2aa4cd2c573..2b5ec3eca9a 100644 --- a/vendor/github.com/alicebob/miniredis/v2/Makefile +++ b/vendor/github.com/alicebob/miniredis/v2/Makefile @@ -1,12 +1,33 @@ -.PHONY: all test testrace int - -all: test - -test: +.PHONY: test +test: ### Run unit tests go test ./... -testrace: +.PHONY: testrace +testrace: ### Run unit tests with race detector go test -race ./... -int: - ${MAKE} -C integration all +.PHONY: int +int: ### Run integration tests (doesn't download redis server) + ${MAKE} -C integration int + +.PHONY: ci +ci: ### Run full tests suite (including download and compilation of proper redis server) + ${MAKE} test + ${MAKE} -C integration redis_src/redis-server int + ${MAKE} testrace + +.PHONY: clean +clean: ### Clean integration test files and remove compiled redis from integration/redis_src + ${MAKE} -C integration clean + +.PHONY: help +help: +ifeq ($(UNAME), Linux) + @grep -P '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | \ + awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' +else + @# this is not tested, but prepared in advance for you, Mac drivers + @awk -F ':.*###' '$$0 ~ FS {printf "%15s%s\n", $$1 ":", $$2}' \ + $(MAKEFILE_LIST) | grep -v '@awk' | sort +endif + diff --git a/vendor/github.com/alicebob/miniredis/v2/README.md b/vendor/github.com/alicebob/miniredis/v2/README.md index 60072f4e0d8..5d4e4d8ba76 100644 --- a/vendor/github.com/alicebob/miniredis/v2/README.md +++ b/vendor/github.com/alicebob/miniredis/v2/README.md @@ -39,15 +39,17 @@ Implemented commands: - EXISTS - EXPIRE - EXPIREAT + - EXPIRETIME - KEYS - MOVE - PERSIST - PEXPIRE - PEXPIREAT + - PEXPIRETIME - PTTL + - RANDOMKEY -- see m.Seed(...) - RENAME - RENAMENX - - RANDOMKEY -- see m.Seed(...) - SCAN - TOUCH - TTL @@ -64,6 +66,8 @@ Implemented commands: - FLUSHALL - FLUSHDB - TIME -- returns time.Now() or value set by SetTime() + - COMMAND -- partly + - INFO -- partly, returns only "clients" section with one field "connected_clients" - String keys (complete) - APPEND - BITCOUNT @@ -101,6 +105,7 @@ Implemented commands: - HLEN - HMGET - HMSET + - HRANDFIELD - HSET - HSETNX - HSTRLEN @@ -125,6 +130,7 @@ Implemented commands: - RPUSH - RPUSHX - LMOVE + - BLMOVE - Pub/Sub (complete) - PSUBSCRIBE - PUBLISH @@ -139,20 +145,23 @@ Implemented commands: - SDIFFSTORE - SINTER - SINTERSTORE + - SINTERCARD - SISMEMBER - SMEMBERS + - SMISMEMBER - SMOVE - SPOP -- see m.Seed(...) - SRANDMEMBER -- see m.Seed(...) - SREM + - SSCAN - SUNION - SUNIONSTORE - - SSCAN - Sorted Set keys (complete) - ZADD - ZCARD - ZCOUNT - ZINCRBY + - ZINTER - ZINTERSTORE - ZLEXCOUNT - ZPOPMIN @@ -178,9 +187,15 @@ Implemented commands: - XACK - XADD - XAUTOCLAIM + - XCLAIM - XDEL - XGROUP CREATE + - XGROUP CREATECONSUMER + - XGROUP DESTROY + - XGROUP DELCONSUMER - XINFO STREAM -- partly + - XINFO GROUPS + - XINFO CONSUMERS -- partly - XLEN - XRANGE - XREAD @@ -203,8 +218,6 @@ Implemented commands: - GEORADIUS_RO - GEORADIUSBYMEMBER - GEORADIUSBYMEMBER_RO - - Server - - COMMAND -- partly - Cluster - CLUSTER SLOTS - CLUSTER KEYSLOT @@ -302,7 +315,6 @@ Commands which will probably not be implemented: - ~~CLIENT *~~ - ~~CONFIG *~~ - ~~DEBUG *~~ - - ~~INFO~~ - ~~LASTSAVE~~ - ~~MONITOR~~ - ~~ROLE~~ @@ -315,7 +327,7 @@ Commands which will probably not be implemented: ## &c. -Integration tests are run against Redis 6.2.6. The [./integration](./integration/) subdir +Integration tests are run against Redis 7.2.4. The [./integration](./integration/) subdir compares miniredis against a real redis instance. The Redis 6 RESP3 protocol is supported. If there are problems, please open @@ -325,5 +337,4 @@ If you want to test Redis Sentinel have a look at [minisentinel](https://github. A changelog is kept at [CHANGELOG.md](https://github.com/alicebob/miniredis/blob/master/CHANGELOG.md). -[![Build Status](https://travis-ci.com/alicebob/miniredis.svg?branch=master)](https://travis-ci.com/alicebob/miniredis) [![Go Reference](https://pkg.go.dev/badge/github.com/alicebob/miniredis/v2.svg)](https://pkg.go.dev/github.com/alicebob/miniredis/v2) diff --git a/vendor/github.com/alicebob/miniredis/v2/cmd_client.go b/vendor/github.com/alicebob/miniredis/v2/cmd_client.go new file mode 100644 index 00000000000..ca9fcd9a4d4 --- /dev/null +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_client.go @@ -0,0 +1,68 @@ +package miniredis + +import ( + "fmt" + "strings" + + "github.com/alicebob/miniredis/v2/server" +) + +// commandsClient handles client operations. +func commandsClient(m *Miniredis) { + m.srv.Register("CLIENT", m.cmdClient) +} + +// CLIENT +func (m *Miniredis) cmdClient(c *server.Peer, cmd string, args []string) { + if len(args) == 0 { + setDirty(c) + c.WriteError("ERR wrong number of arguments for 'client' command") + return + } + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + switch cmd := strings.ToUpper(args[0]); cmd { + case "SETNAME": + m.cmdClientSetName(c, args[1:]) + case "GETNAME": + m.cmdClientGetName(c, args[1:]) + default: + setDirty(c) + c.WriteError(fmt.Sprintf("ERR unknown subcommand '%s'. Try CLIENT HELP.", cmd)) + } + }) +} + +// CLIENT SETNAME +func (m *Miniredis) cmdClientSetName(c *server.Peer, args []string) { + if len(args) != 1 { + setDirty(c) + c.WriteError("ERR wrong number of arguments for 'client setname' command") + return + } + + name := args[0] + if strings.ContainsAny(name, " \n") { + setDirty(c) + c.WriteError("ERR Client names cannot contain spaces, newlines or special characters.") + return + + } + c.ClientName = name + c.WriteOK() +} + +// CLIENT GETNAME +func (m *Miniredis) cmdClientGetName(c *server.Peer, args []string) { + if len(args) > 0 { + setDirty(c) + c.WriteError("ERR wrong number of arguments for 'client getname' command") + return + } + + if c.ClientName == "" { + c.WriteNull() + } else { + c.WriteBulk(c.ClientName) + } +} diff --git a/vendor/github.com/alicebob/miniredis/v2/cmd_cluster.go b/vendor/github.com/alicebob/miniredis/v2/cmd_cluster.go index 083c4ecf7d7..9951f3dd3b7 100644 --- a/vendor/github.com/alicebob/miniredis/v2/cmd_cluster.go +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_cluster.go @@ -4,13 +4,14 @@ package miniredis import ( "fmt" - "github.com/alicebob/miniredis/v2/server" "strings" + + "github.com/alicebob/miniredis/v2/server" ) // commandsCluster handles some cluster operations. func commandsCluster(m *Miniredis) { - _ = m.srv.Register("CLUSTER", m.cmdCluster) + m.srv.Register("CLUSTER", m.cmdCluster) } func (m *Miniredis) cmdCluster(c *server.Peer, cmd string, args []string) { @@ -51,14 +52,14 @@ func (m *Miniredis) cmdClusterSlots(c *server.Peer, cmd string, args []string) { }) } -//CLUSTER KEYSLOT +// CLUSTER KEYSLOT func (m *Miniredis) cmdClusterKeySlot(c *server.Peer, cmd string, args []string) { withTx(m, c, func(c *server.Peer, ctx *connCtx) { c.WriteInt(163) }) } -//CLUSTER NODES +// CLUSTER NODES func (m *Miniredis) cmdClusterNodes(c *server.Peer, cmd string, args []string) { withTx(m, c, func(c *server.Peer, ctx *connCtx) { c.WriteBulk("e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca 127.0.0.1:7000@7000 myself,master - 0 0 1 connected 0-16383") diff --git a/vendor/github.com/alicebob/miniredis/v2/cmd_command.go b/vendor/github.com/alicebob/miniredis/v2/cmd_command.go index 33c691c193e..8f73b2bac42 100644 --- a/vendor/github.com/alicebob/miniredis/v2/cmd_command.go +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_command.go @@ -4,2046 +4,11 @@ package miniredis import "github.com/alicebob/miniredis/v2/server" -func commandsCommand(m *Miniredis) { - _ = m.srv.Register("COMMAND", m.cmdCommand) -} - func (m *Miniredis) cmdCommand(c *server.Peer, cmd string, args []string) { // Got from redis 5.0.7 with // echo 'COMMAND' | nc redis_addr redis_port - // - res := ` -*200 -*6 -$12 -hincrbyfloat -:4 -*3 -+write -+denyoom -+fast -:1 -:1 -:1 -*6 -$10 -xreadgroup -:-7 -*3 -+write -+noscript -+movablekeys -:1 -:1 -:1 -*6 -$10 -sdiffstore -:-3 -*2 -+write -+denyoom -:1 -:-1 -:1 -*6 -$8 -lastsave -:1 -*2 -+random -+fast -:0 -:0 -:0 -*6 -$5 -setnx -:3 -*3 -+write -+denyoom -+fast -:1 -:1 -:1 -*6 -$8 -bzpopmax -:-3 -*3 -+write -+noscript -+fast -:1 -:-2 -:1 -*6 -$12 -punsubscribe -:-1 -*4 -+pubsub -+noscript -+loading -+stale -:0 -:0 -:0 -*6 -$4 -xack -:-4 -*2 -+write -+fast -:1 -:1 -:1 -*6 -$10 -pfselftest -:1 -*1 -+admin -:0 -:0 -:0 -*6 -$6 -substr -:4 -*1 -+readonly -:1 -:1 -:1 -*6 -$8 -smembers -:2 -*2 -+readonly -+sort_for_script -:1 -:1 -:1 -*6 -$11 -unsubscribe -:-1 -*4 -+pubsub -+noscript -+loading -+stale -:0 -:0 -:0 -*6 -$11 -zinterstore -:-4 -*3 -+write -+denyoom -+movablekeys -:0 -:0 -:0 -*6 -$6 -strlen -:2 -*2 -+readonly -+fast -:1 -:1 -:1 -*6 -$7 -pfmerge -:-2 -*2 -+write -+denyoom -:1 -:-1 -:1 -*6 -$9 -randomkey -:1 -*2 -+readonly -+random -:0 -:0 -:0 -*6 -$6 -lolwut -:-1 -*1 -+readonly -:0 -:0 -:0 -*6 -$4 -rpop -:2 -*2 -+write -+fast -:1 -:1 -:1 -*6 -$5 -hkeys -:2 -*2 -+readonly -+sort_for_script -:1 -:1 -:1 -*6 -$6 -client -:-2 -*2 -+admin -+noscript -:0 -:0 -:0 -*6 -$6 -module -:-2 -*2 -+admin -+noscript -:0 -:0 -:0 -*6 -$7 -slowlog -:-2 -*2 -+admin -+random -:0 -:0 -:0 -*6 -$7 -geohash -:-2 -*1 -+readonly -:1 -:1 -:1 -*6 -$6 -lrange -:4 -*1 -+readonly -:1 -:1 -:1 -*6 -$4 -ping -:-1 -*2 -+stale -+fast -:0 -:0 -:0 -*6 -$8 -bitcount -:-2 -*1 -+readonly -:1 -:1 -:1 -*6 -$6 -pubsub -:-2 -*4 -+pubsub -+random -+loading -+stale -:0 -:0 -:0 -*6 -$4 -role -:1 -*3 -+noscript -+loading -+stale -:0 -:0 -:0 -*6 -$4 -hget -:3 -*2 -+readonly -+fast -:1 -:1 -:1 -*6 -$6 -object -:-2 -*2 -+readonly -+random -:2 -:2 -:1 -*6 -$9 -zrevrange -:-4 -*1 -+readonly -:1 -:1 -:1 -*6 -$7 -hincrby -:4 -*3 -+write -+denyoom -+fast -:1 -:1 -:1 -*6 -$9 -zlexcount -:4 -*2 -+readonly -+fast -:1 -:1 -:1 -*6 -$5 -scard -:2 -*2 -+readonly -+fast -:1 -:1 -:1 -*6 -$6 -append -:3 -*2 -+write -+denyoom -:1 -:1 -:1 -*6 -$7 -hstrlen -:3 -*2 -+readonly -+fast -:1 -:1 -:1 -*6 -$6 -config -:-2 -*4 -+admin -+noscript -+loading -+stale -:0 -:0 -:0 -*6 -$4 -hset -:-4 -*3 -+write -+denyoom -+fast -:1 -:1 -:1 -*6 -$16 -zrevrangebyscore -:-4 -*1 -+readonly -:1 -:1 -:1 -*6 -$4 -incr -:2 -*3 -+write -+denyoom -+fast -:1 -:1 -:1 -*6 -$6 -setbit -:4 -*2 -+write -+denyoom -:1 -:1 -:1 -*6 -$9 -rpoplpush -:3 -*2 -+write -+denyoom -:1 -:2 -:1 -*6 -$6 -xclaim -:-6 -*3 -+write -+random -+fast -:1 -:1 -:1 -*6 -$11 -sinterstore -:-3 -*2 -+write -+denyoom -:1 -:-1 -:1 -*6 -$7 -publish -:3 -*4 -+pubsub -+loading -+stale -+fast -:0 -:0 -:0 -*6 -$5 -hscan -:-3 -*2 -+readonly -+random -:1 -:1 -:1 -*6 -$5 -multi -:1 -*2 -+noscript -+fast -:0 -:0 -:0 -*6 -$3 -set -:-3 -*2 -+write -+denyoom -:1 -:1 -:1 -*6 -$6 -lpushx -:-3 -*3 -+write -+denyoom -+fast -:1 -:1 -:1 -*6 -$16 -zremrangebyscore -:4 -*1 -+write -:1 -:1 -:1 -*6 -$9 -pexpireat -:3 -*2 -+write -+fast -:1 -:1 -:1 -*6 -$4 -hdel -:-3 -*2 -+write -+fast -:1 -:1 -:1 -*6 -$12 -bgrewriteaof -:1 -*2 -+admin -+noscript -:0 -:0 -:0 -*6 -$7 -migrate -:-6 -*3 -+write -+random -+movablekeys -:0 -:0 -:0 -*6 -$9 -replicaof -:3 -*3 -+admin -+noscript -+stale -:0 -:0 -:0 -*6 -$5 -touch -:-2 -*2 -+readonly -+fast -:1 -:1 -:1 -*6 -$6 -xsetid -:3 -*3 -+write -+denyoom -+fast -:1 -:1 -:1 -*6 -$5 -bitop -:-4 -*2 -+write -+denyoom -:2 -:-1 -:1 -*6 -$6 -swapdb -:3 -*2 -+write -+fast -:0 -:0 -:0 -*6 -$5 -sdiff -:-2 -*2 -+readonly -+sort_for_script -:1 -:-1 -:1 -*6 -$6 -lindex -:3 -*1 -+readonly -:1 -:1 -:1 -*6 -$4 -wait -:3 -*1 -+noscript -:0 -:0 -:0 -*6 -$4 -lrem -:4 -*1 -+write -:1 -:1 -:1 -*6 -$6 -hsetnx -:4 -*3 -+write -+denyoom -+fast -:1 -:1 -:1 -*6 -$8 -getrange -:4 -*1 -+readonly -:1 -:1 -:1 -*6 -$4 -hlen -:2 -*2 -+readonly -+fast -:1 -:1 -:1 -*6 -$4 -post -:-1 -*2 -+loading -+stale -:0 -:0 -:0 -*6 -$9 -sismember -:3 -*2 -+readonly -+fast -:1 -:1 -:1 -*6 -$7 -unwatch -:1 -*2 -+noscript -+fast -:0 -:0 -:0 -*6 -$5 -lpush -:-3 -*3 -+write -+denyoom -+fast -:1 -:1 -:1 -*6 -$4 -scan -:-2 -*2 -+readonly -+random -:0 -:0 -:0 -*6 -$5 -smove -:4 -*2 -+write -+fast -:1 -:2 -:1 -*6 -$7 -cluster -:-2 -*1 -+admin -:0 -:0 -:0 -*6 -$6 -bgsave -:-1 -*2 -+admin -+noscript -:0 -:0 -:0 -*6 -$4 -dump -:2 -*2 -+readonly -+random -:1 -:1 -:1 -*6 -$7 -latency -:-2 -*4 -+admin -+noscript -+loading -+stale -:0 -:0 -:0 -*6 -$8 -bzpopmin -:-3 -*3 -+write -+noscript -+fast -:1 -:-2 -:1 -*6 -$6 -getbit -:3 -*2 -+readonly -+fast -:1 -:1 -:1 -*6 -$7 -hgetall -:2 -*2 -+readonly -+random -:1 -:1 -:1 -*6 -$6 -rename -:3 -*1 -+write -:1 -:2 -:1 -*6 -$9 -subscribe -:-2 -*4 -+pubsub -+noscript -+loading -+stale -:0 -:0 -:0 -*6 -$4 -xdel -:-3 -*2 -+write -+fast -:1 -:1 -:1 -*6 -$15 -zremrangebyrank -:4 -*1 -+write -:1 -:1 -:1 -*6 -$4 -type -:2 -*2 -+readonly -+fast -:1 -:1 -:1 -*6 -$6 -script -:-2 -*1 -+noscript -:0 -:0 -:0 -*6 -$5 -hmset -:-4 -*3 -+write -+denyoom -+fast -:1 -:1 -:1 -*6 -$6 -sunion -:-2 -*2 -+readonly -+sort_for_script -:1 -:-1 -:1 -*6 -$4 -mget -:-2 -*2 -+readonly -+fast -:1 -:-1 -:1 -*6 -$10 -brpoplpush -:4 -*3 -+write -+denyoom -+noscript -:1 -:2 -:1 -*6 -$6 -geoadd -:-5 -*2 -+write -+denyoom -:1 -:1 -:1 -*6 -$6 -decrby -:3 -*3 -+write -+denyoom -+fast -:1 -:1 -:1 -*6 -$4 -echo -:2 -*1 -+fast -:0 -:0 -:0 -*6 -$6 -dbsize -:1 -*2 -+readonly -+fast -:0 -:0 -:0 -*6 -$5 -zcard -:2 -*2 -+readonly -+fast -:1 -:1 -:1 -*6 -$6 -select -:2 -*2 -+loading -+fast -:0 -:0 -:0 -*6 -$4 -sadd -:-3 -*3 -+write -+denyoom -+fast -:1 -:1 -:1 -*6 -$5 -host: -:-1 -*2 -+loading -+stale -:0 -:0 -:0 -*6 -$5 -sscan -:-3 -*2 -+readonly -+random -:1 -:1 -:1 -*6 -$12 -georadius_ro -:-6 -*2 -+readonly -+movablekeys -:1 -:1 -:1 -*6 -$7 -monitor -:1 -*2 -+admin -+noscript -:0 -:0 -:0 -*6 -$14 -zremrangebylex -:4 -*1 -+write -:1 -:1 -:1 -*6 -$11 -sunionstore -:-3 -*2 -+write -+denyoom -:1 -:-1 -:1 -*6 -$5 -zscan -:-3 -*2 -+readonly -+random -:1 -:1 -:1 -*6 -$9 -readwrite -:1 -*1 -+fast -:0 -:0 -:0 -*6 -$6 -xgroup -:-2 -*2 -+write -+denyoom -:2 -:2 -:1 -*6 -$5 -setex -:4 -*2 -+write -+denyoom -:1 -:1 -:1 -*6 -$4 -save -:1 -*2 -+admin -+noscript -:0 -:0 -:0 -*6 -$5 -hvals -:2 -*2 -+readonly -+sort_for_script -:1 -:1 -:1 -*6 -$5 -watch -:-2 -*2 -+noscript -+fast -:1 -:-1 -:1 -*6 -$7 -hexists -:3 -*2 -+readonly -+fast -:1 -:1 -:1 -*6 -$4 -info -:-1 -*3 -+random -+loading -+stale -:0 -:0 -:0 -*6 -$5 -psync -:3 -*3 -+readonly -+admin -+noscript -:0 -:0 -:0 -*6 -$11 -zrangebylex -:-4 -*1 -+readonly -:1 -:1 -:1 -*6 -$4 -zadd -:-4 -*3 -+write -+denyoom -+fast -:1 -:1 -:1 -*6 -$4 -xlen -:2 -*2 -+readonly -+fast -:1 -:1 -:1 -*6 -$4 -auth -:2 -*4 -+noscript -+loading -+stale -+fast -:0 -:0 -:0 -*6 -$4 -srem -:-3 -*2 -+write -+fast -:1 -:1 -:1 -*6 -$9 -georadius -:-6 -*2 -+write -+movablekeys -:1 -:1 -:1 -*6 -$4 -exec -:1 -*2 -+noscript -+skip_monitor -:0 -:0 -:0 -*6 -$7 -pfcount -:-2 -*1 -+readonly -:1 -:-1 -:1 -*6 -$7 -zpopmin -:-2 -*2 -+write -+fast -:1 -:1 -:1 -*6 -$4 -move -:3 -*2 -+write -+fast -:1 -:1 -:1 -*6 -$5 -xtrim -:-2 -*3 -+write -+random -+fast -:1 -:1 -:1 -*6 -$6 -asking -:1 -*1 -+fast -:0 -:0 -:0 -*6 -$4 -pttl -:2 -*3 -+readonly -+random -+fast -:1 -:1 -:1 -*6 -$11 -srandmember -:-2 -*2 -+readonly -+random -:1 -:1 -:1 -*6 -$8 -flushall -:-1 -*1 -+write -:0 -:0 -:0 -*6 -$4 -sort -:-2 -*3 -+write -+denyoom -+movablekeys -:1 -:1 -:1 -*6 -$3 -del -:-2 -*1 -+write -:1 -:-1 -:1 -*6 -$14 -restore-asking -:-4 -*3 -+write -+denyoom -+asking -:1 -:1 -:1 -*6 -$10 -psubscribe -:-2 -*4 -+pubsub -+noscript -+loading -+stale -:0 -:0 -:0 -*6 -$4 -decr -:2 -*3 -+write -+denyoom -+fast -:1 -:1 -:1 -*6 -$6 -incrby -:3 -*3 -+write -+denyoom -+fast -:1 -:1 -:1 -*6 -$14 -zrevrangebylex -:-4 -*1 -+readonly -:1 -:1 -:1 -*6 -$8 -bitfield -:-2 -*2 -+write -+denyoom -:1 -:1 -:1 -*6 -$6 -exists -:-2 -*2 -+readonly -+fast -:1 -:-1 -:1 -*6 -$8 -replconf -:-1 -*4 -+admin -+noscript -+loading -+stale -:0 -:0 -:0 -*6 -$7 -zincrby -:4 -*3 -+write -+denyoom -+fast -:1 -:1 -:1 -*6 -$5 -blpop -:-3 -*2 -+write -+noscript -:1 -:-2 -:1 -*6 -$4 -lpop -:2 -*2 -+write -+fast -:1 -:1 -:1 -*6 -$3 -ttl -:2 -*3 -+readonly -+random -+fast -:1 -:1 -:1 -*6 -$5 -xread -:-4 -*3 -+readonly -+noscript -+movablekeys -:1 -:1 -:1 -*6 -$5 -rpush -:-3 -*3 -+write -+denyoom -+fast -:1 -:1 -:1 -*6 -$8 -zrevrank -:3 -*2 -+readonly -+fast -:1 -:1 -:1 -*6 -$11 -incrbyfloat -:3 -*3 -+write -+denyoom -+fast -:1 -:1 -:1 -*6 -$5 -brpop -:-3 -*2 -+write -+noscript -:1 -:-2 -:1 -*6 -$4 -xadd -:-5 -*4 -+write -+denyoom -+random -+fast -:1 -:1 -:1 -*6 -$8 -setrange -:4 -*2 -+write -+denyoom -:1 -:1 -:1 -*6 -$17 -georadiusbymember -:-5 -*2 -+write -+movablekeys -:1 -:1 -:1 -*6 -$6 -unlink -:-2 -*2 -+write -+fast -:1 -:-1 -:1 -*6 -$8 -expireat -:3 -*2 -+write -+fast -:1 -:1 -:1 -*6 -$5 -debug -:-2 -*2 -+admin -+noscript -:0 -:0 -:0 -*6 -$20 -georadiusbymember_ro -:-5 -*2 -+readonly -+movablekeys -:1 -:1 -:1 -*6 -$4 -lset -:4 -*2 -+write -+denyoom -:1 -:1 -:1 -*6 -$6 -zscore -:3 -*2 -+readonly -+fast -:1 -:1 -:1 -*6 -$4 -llen -:2 -*2 -+readonly -+fast -:1 -:1 -:1 -*6 -$4 -time -:1 -*2 -+random -+fast -:0 -:0 -:0 -*6 -$8 -shutdown -:-1 -*4 -+admin -+noscript -+loading -+stale -:0 -:0 -:0 -*6 -$7 -evalsha -:-3 -*2 -+noscript -+movablekeys -:0 -:0 -:0 -*6 -$6 -zcount -:4 -*2 -+readonly -+fast -:1 -:1 -:1 -*6 -$6 -memory -:-2 -*2 -+readonly -+random -:0 -:0 -:0 -*6 -$5 -xinfo -:-2 -*2 -+readonly -+random -:2 -:2 -:1 -*6 -$8 -xpending -:-3 -*2 -+readonly -+random -:1 -:1 -:1 -*6 -$4 -eval -:-3 -*2 -+noscript -+movablekeys -:0 -:0 -:0 -*6 -$6 -xrange -:-4 -*1 -+readonly -:1 -:1 -:1 -*6 -$7 -restore -:-4 -*2 -+write -+denyoom -:1 -:1 -:1 -*6 -$7 -zpopmax -:-2 -*2 -+write -+fast -:1 -:1 -:1 -*6 -$4 -mset -:-3 -*2 -+write -+denyoom -:1 -:-1 -:2 -*6 -$4 -spop -:-2 -*3 -+write -+random -+fast -:1 -:1 -:1 -*6 -$5 -ltrim -:4 -*1 -+write -:1 -:1 -:1 -*6 -$5 -zrank -:3 -*2 -+readonly -+fast -:1 -:1 -:1 -*6 -$9 -xrevrange -:-4 -*1 -+readonly -:1 -:1 -:1 -*6 -$3 -get -:2 -*2 -+readonly -+fast -:1 -:1 -:1 -*6 -$7 -flushdb -:-1 -*1 -+write -:0 -:0 -:0 -*6 -$5 -hmget -:-3 -*2 -+readonly -+fast -:1 -:1 -:1 -*6 -$6 -msetnx -:-3 -*2 -+write -+denyoom -:1 -:-1 -:2 -*6 -$7 -persist -:2 -*2 -+write -+fast -:1 -:1 -:1 -*6 -$11 -zunionstore -:-4 -*3 -+write -+denyoom -+movablekeys -:0 -:0 -:0 -*6 -$7 -command -:0 -*3 -+random -+loading -+stale -:0 -:0 -:0 -*6 -$8 -renamenx -:3 -*2 -+write -+fast -:1 -:2 -:1 -*6 -$6 -zrange -:-4 -*1 -+readonly -:1 -:1 -:1 -*6 -$7 -pexpire -:3 -*2 -+write -+fast -:1 -:1 -:1 -*6 -$4 -keys -:2 -*2 -+readonly -+sort_for_script -:0 -:0 -:0 -*6 -$4 -zrem -:-3 -*2 -+write -+fast -:1 -:1 -:1 -*6 -$5 -pfadd -:-2 -*3 -+write -+denyoom -+fast -:1 -:1 -:1 -*6 -$6 -psetex -:4 -*2 -+write -+denyoom -:1 -:1 -:1 -*6 -$13 -zrangebyscore -:-4 -*1 -+readonly -:1 -:1 -:1 -*6 -$4 -sync -:1 -*3 -+readonly -+admin -+noscript -:0 -:0 -:0 -*6 -$7 -pfdebug -:-3 -*1 -+write -:0 -:0 -:0 -*6 -$7 -discard -:1 -*2 -+noscript -+fast -:0 -:0 -:0 -*6 -$8 -readonly -:1 -*1 -+fast -:0 -:0 -:0 -*6 -$7 -geodist -:-4 -*1 -+readonly -:1 -:1 -:1 -*6 -$6 -geopos -:-2 -*1 -+readonly -:1 -:1 -:1 -*6 -$6 -bitpos -:-3 -*1 -+readonly -:1 -:1 -:1 -*6 -$6 -sinter -:-2 -*2 -+readonly -+sort_for_script -:1 -:-1 -:1 -*6 -$6 -getset -:3 -*2 -+write -+denyoom -:1 -:1 -:1 -*6 -$7 -slaveof -:3 -*3 -+admin -+noscript -+stale -:0 -:0 -:0 -*6 -$6 -rpushx -:-3 -*3 -+write -+denyoom -+fast -:1 -:1 -:1 -*6 -$7 -linsert -:5 -*2 -+write -+denyoom -:1 -:1 -:1 -*6 -$6 -expire -:3 -*2 -+write -+fast -:1 -:1 -:1 - ` - c.WriteBulk(res) + res := "*200\r\n*6\r\n$12\r\nhincrbyfloat\r\n:4\r\n*3\r\n+write\r\n+denyoom\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$10\r\nxreadgroup\r\n:-7\r\n*3\r\n+write\r\n+noscript\r\n+movablekeys\r\n:1\r\n:1\r\n:1\r\n*6\r\n$10\r\nsdiffstore\r\n:-3\r\n*2\r\n+write\r\n+denyoom\r\n:1\r\n:-1\r\n:1\r\n*6\r\n$8\r\nlastsave\r\n:1\r\n*2\r\n+random\r\n+fast\r\n:0\r\n:0\r\n:0\r\n*6\r\n$5\r\nsetnx\r\n:3\r\n*3\r\n+write\r\n+denyoom\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$8\r\nbzpopmax\r\n:-3\r\n*3\r\n+write\r\n+noscript\r\n+fast\r\n:1\r\n:-2\r\n:1\r\n*6\r\n$12\r\npunsubscribe\r\n:-1\r\n*4\r\n+pubsub\r\n+noscript\r\n+loading\r\n+stale\r\n:0\r\n:0\r\n:0\r\n*6\r\n$4\r\nxack\r\n:-4\r\n*2\r\n+write\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$10\r\npfselftest\r\n:1\r\n*1\r\n+admin\r\n:0\r\n:0\r\n:0\r\n*6\r\n$6\r\nsubstr\r\n:4\r\n*1\r\n+readonly\r\n:1\r\n:1\r\n:1\r\n*6\r\n$8\r\nsmembers\r\n:2\r\n*2\r\n+readonly\r\n+sort_for_script\r\n:1\r\n:1\r\n:1\r\n*6\r\n$11\r\nunsubscribe\r\n:-1\r\n*4\r\n+pubsub\r\n+noscript\r\n+loading\r\n+stale\r\n:0\r\n:0\r\n:0\r\n*6\r\n$11\r\nzinterstore\r\n:-4\r\n*3\r\n+write\r\n+denyoom\r\n+movablekeys\r\n:0\r\n:0\r\n:0\r\n*6\r\n$6\r\nstrlen\r\n:2\r\n*2\r\n+readonly\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$7\r\npfmerge\r\n:-2\r\n*2\r\n+write\r\n+denyoom\r\n:1\r\n:-1\r\n:1\r\n*6\r\n$9\r\nrandomkey\r\n:1\r\n*2\r\n+readonly\r\n+random\r\n:0\r\n:0\r\n:0\r\n*6\r\n$6\r\nlolwut\r\n:-1\r\n*1\r\n+readonly\r\n:0\r\n:0\r\n:0\r\n*6\r\n$4\r\nrpop\r\n:2\r\n*2\r\n+write\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$5\r\nhkeys\r\n:2\r\n*2\r\n+readonly\r\n+sort_for_script\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\nclient\r\n:-2\r\n*2\r\n+admin\r\n+noscript\r\n:0\r\n:0\r\n:0\r\n*6\r\n$6\r\nmodule\r\n:-2\r\n*2\r\n+admin\r\n+noscript\r\n:0\r\n:0\r\n:0\r\n*6\r\n$7\r\nslowlog\r\n:-2\r\n*2\r\n+admin\r\n+random\r\n:0\r\n:0\r\n:0\r\n*6\r\n$7\r\ngeohash\r\n:-2\r\n*1\r\n+readonly\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\nlrange\r\n:4\r\n*1\r\n+readonly\r\n:1\r\n:1\r\n:1\r\n*6\r\n$4\r\nping\r\n:-1\r\n*2\r\n+stale\r\n+fast\r\n:0\r\n:0\r\n:0\r\n*6\r\n$8\r\nbitcount\r\n:-2\r\n*1\r\n+readonly\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\npubsub\r\n:-2\r\n*4\r\n+pubsub\r\n+random\r\n+loading\r\n+stale\r\n:0\r\n:0\r\n:0\r\n*6\r\n$4\r\nrole\r\n:1\r\n*3\r\n+noscript\r\n+loading\r\n+stale\r\n:0\r\n:0\r\n:0\r\n*6\r\n$4\r\nhget\r\n:3\r\n*2\r\n+readonly\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\nobject\r\n:-2\r\n*2\r\n+readonly\r\n+random\r\n:2\r\n:2\r\n:1\r\n*6\r\n$9\r\nzrevrange\r\n:-4\r\n*1\r\n+readonly\r\n:1\r\n:1\r\n:1\r\n*6\r\n$7\r\nhincrby\r\n:4\r\n*3\r\n+write\r\n+denyoom\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$9\r\nzlexcount\r\n:4\r\n*2\r\n+readonly\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$5\r\nscard\r\n:2\r\n*2\r\n+readonly\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\nappend\r\n:3\r\n*2\r\n+write\r\n+denyoom\r\n:1\r\n:1\r\n:1\r\n*6\r\n$7\r\nhstrlen\r\n:3\r\n*2\r\n+readonly\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\nconfig\r\n:-2\r\n*4\r\n+admin\r\n+noscript\r\n+loading\r\n+stale\r\n:0\r\n:0\r\n:0\r\n*6\r\n$4\r\nhset\r\n:-4\r\n*3\r\n+write\r\n+denyoom\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$16\r\nzrevrangebyscore\r\n:-4\r\n*1\r\n+readonly\r\n:1\r\n:1\r\n:1\r\n*6\r\n$4\r\nincr\r\n:2\r\n*3\r\n+write\r\n+denyoom\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\nsetbit\r\n:4\r\n*2\r\n+write\r\n+denyoom\r\n:1\r\n:1\r\n:1\r\n*6\r\n$9\r\nrpoplpush\r\n:3\r\n*2\r\n+write\r\n+denyoom\r\n:1\r\n:2\r\n:1\r\n*6\r\n$6\r\nxclaim\r\n:-6\r\n*3\r\n+write\r\n+random\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$11\r\nsinterstore\r\n:-3\r\n*2\r\n+write\r\n+denyoom\r\n:1\r\n:-1\r\n:1\r\n*6\r\n$7\r\npublish\r\n:3\r\n*4\r\n+pubsub\r\n+loading\r\n+stale\r\n+fast\r\n:0\r\n:0\r\n:0\r\n*6\r\n$5\r\nhscan\r\n:-3\r\n*2\r\n+readonly\r\n+random\r\n:1\r\n:1\r\n:1\r\n*6\r\n$5\r\nmulti\r\n:1\r\n*2\r\n+noscript\r\n+fast\r\n:0\r\n:0\r\n:0\r\n*6\r\n$3\r\nset\r\n:-3\r\n*2\r\n+write\r\n+denyoom\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\nlpushx\r\n:-3\r\n*3\r\n+write\r\n+denyoom\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$16\r\nzremrangebyscore\r\n:4\r\n*1\r\n+write\r\n:1\r\n:1\r\n:1\r\n*6\r\n$9\r\npexpireat\r\n:3\r\n*2\r\n+write\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$4\r\nhdel\r\n:-3\r\n*2\r\n+write\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$12\r\nbgrewriteaof\r\n:1\r\n*2\r\n+admin\r\n+noscript\r\n:0\r\n:0\r\n:0\r\n*6\r\n$7\r\nmigrate\r\n:-6\r\n*3\r\n+write\r\n+random\r\n+movablekeys\r\n:0\r\n:0\r\n:0\r\n*6\r\n$9\r\nreplicaof\r\n:3\r\n*3\r\n+admin\r\n+noscript\r\n+stale\r\n:0\r\n:0\r\n:0\r\n*6\r\n$5\r\ntouch\r\n:-2\r\n*2\r\n+readonly\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\nxsetid\r\n:3\r\n*3\r\n+write\r\n+denyoom\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$5\r\nbitop\r\n:-4\r\n*2\r\n+write\r\n+denyoom\r\n:2\r\n:-1\r\n:1\r\n*6\r\n$6\r\nswapdb\r\n:3\r\n*2\r\n+write\r\n+fast\r\n:0\r\n:0\r\n:0\r\n*6\r\n$5\r\nsdiff\r\n:-2\r\n*2\r\n+readonly\r\n+sort_for_script\r\n:1\r\n:-1\r\n:1\r\n*6\r\n$6\r\nlindex\r\n:3\r\n*1\r\n+readonly\r\n:1\r\n:1\r\n:1\r\n*6\r\n$4\r\nwait\r\n:3\r\n*1\r\n+noscript\r\n:0\r\n:0\r\n:0\r\n*6\r\n$4\r\nlrem\r\n:4\r\n*1\r\n+write\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\nhsetnx\r\n:4\r\n*3\r\n+write\r\n+denyoom\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$8\r\ngetrange\r\n:4\r\n*1\r\n+readonly\r\n:1\r\n:1\r\n:1\r\n*6\r\n$4\r\nhlen\r\n:2\r\n*2\r\n+readonly\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$4\r\npost\r\n:-1\r\n*2\r\n+loading\r\n+stale\r\n:0\r\n:0\r\n:0\r\n*6\r\n$9\r\nsismember\r\n:3\r\n*2\r\n+readonly\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$7\r\nunwatch\r\n:1\r\n*2\r\n+noscript\r\n+fast\r\n:0\r\n:0\r\n:0\r\n*6\r\n$5\r\nlpush\r\n:-3\r\n*3\r\n+write\r\n+denyoom\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$4\r\nscan\r\n:-2\r\n*2\r\n+readonly\r\n+random\r\n:0\r\n:0\r\n:0\r\n*6\r\n$5\r\nsmove\r\n:4\r\n*2\r\n+write\r\n+fast\r\n:1\r\n:2\r\n:1\r\n*6\r\n$7\r\ncluster\r\n:-2\r\n*1\r\n+admin\r\n:0\r\n:0\r\n:0\r\n*6\r\n$6\r\nbgsave\r\n:-1\r\n*2\r\n+admin\r\n+noscript\r\n:0\r\n:0\r\n:0\r\n*6\r\n$4\r\ndump\r\n:2\r\n*2\r\n+readonly\r\n+random\r\n:1\r\n:1\r\n:1\r\n*6\r\n$7\r\nlatency\r\n:-2\r\n*4\r\n+admin\r\n+noscript\r\n+loading\r\n+stale\r\n:0\r\n:0\r\n:0\r\n*6\r\n$8\r\nbzpopmin\r\n:-3\r\n*3\r\n+write\r\n+noscript\r\n+fast\r\n:1\r\n:-2\r\n:1\r\n*6\r\n$6\r\ngetbit\r\n:3\r\n*2\r\n+readonly\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$7\r\nhgetall\r\n:2\r\n*2\r\n+readonly\r\n+random\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\nrename\r\n:3\r\n*1\r\n+write\r\n:1\r\n:2\r\n:1\r\n*6\r\n$9\r\nsubscribe\r\n:-2\r\n*4\r\n+pubsub\r\n+noscript\r\n+loading\r\n+stale\r\n:0\r\n:0\r\n:0\r\n*6\r\n$4\r\nxdel\r\n:-3\r\n*2\r\n+write\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$15\r\nzremrangebyrank\r\n:4\r\n*1\r\n+write\r\n:1\r\n:1\r\n:1\r\n*6\r\n$4\r\ntype\r\n:2\r\n*2\r\n+readonly\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\nscript\r\n:-2\r\n*1\r\n+noscript\r\n:0\r\n:0\r\n:0\r\n*6\r\n$5\r\nhmset\r\n:-4\r\n*3\r\n+write\r\n+denyoom\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\nsunion\r\n:-2\r\n*2\r\n+readonly\r\n+sort_for_script\r\n:1\r\n:-1\r\n:1\r\n*6\r\n$4\r\nmget\r\n:-2\r\n*2\r\n+readonly\r\n+fast\r\n:1\r\n:-1\r\n:1\r\n*6\r\n$10\r\nbrpoplpush\r\n:4\r\n*3\r\n+write\r\n+denyoom\r\n+noscript\r\n:1\r\n:2\r\n:1\r\n*6\r\n$6\r\ngeoadd\r\n:-5\r\n*2\r\n+write\r\n+denyoom\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\ndecrby\r\n:3\r\n*3\r\n+write\r\n+denyoom\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$4\r\necho\r\n:2\r\n*1\r\n+fast\r\n:0\r\n:0\r\n:0\r\n*6\r\n$6\r\ndbsize\r\n:1\r\n*2\r\n+readonly\r\n+fast\r\n:0\r\n:0\r\n:0\r\n*6\r\n$5\r\nzcard\r\n:2\r\n*2\r\n+readonly\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\nselect\r\n:2\r\n*2\r\n+loading\r\n+fast\r\n:0\r\n:0\r\n:0\r\n*6\r\n$4\r\nsadd\r\n:-3\r\n*3\r\n+write\r\n+denyoom\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$5\r\nhost:\r\n:-1\r\n*2\r\n+loading\r\n+stale\r\n:0\r\n:0\r\n:0\r\n*6\r\n$5\r\nsscan\r\n:-3\r\n*2\r\n+readonly\r\n+random\r\n:1\r\n:1\r\n:1\r\n*6\r\n$12\r\ngeoradius_ro\r\n:-6\r\n*2\r\n+readonly\r\n+movablekeys\r\n:1\r\n:1\r\n:1\r\n*6\r\n$7\r\nmonitor\r\n:1\r\n*2\r\n+admin\r\n+noscript\r\n:0\r\n:0\r\n:0\r\n*6\r\n$14\r\nzremrangebylex\r\n:4\r\n*1\r\n+write\r\n:1\r\n:1\r\n:1\r\n*6\r\n$11\r\nsunionstore\r\n:-3\r\n*2\r\n+write\r\n+denyoom\r\n:1\r\n:-1\r\n:1\r\n*6\r\n$5\r\nzscan\r\n:-3\r\n*2\r\n+readonly\r\n+random\r\n:1\r\n:1\r\n:1\r\n*6\r\n$9\r\nreadwrite\r\n:1\r\n*1\r\n+fast\r\n:0\r\n:0\r\n:0\r\n*6\r\n$6\r\nxgroup\r\n:-2\r\n*2\r\n+write\r\n+denyoom\r\n:2\r\n:2\r\n:1\r\n*6\r\n$5\r\nsetex\r\n:4\r\n*2\r\n+write\r\n+denyoom\r\n:1\r\n:1\r\n:1\r\n*6\r\n$4\r\nsave\r\n:1\r\n*2\r\n+admin\r\n+noscript\r\n:0\r\n:0\r\n:0\r\n*6\r\n$5\r\nhvals\r\n:2\r\n*2\r\n+readonly\r\n+sort_for_script\r\n:1\r\n:1\r\n:1\r\n*6\r\n$5\r\nwatch\r\n:-2\r\n*2\r\n+noscript\r\n+fast\r\n:1\r\n:-1\r\n:1\r\n*6\r\n$7\r\nhexists\r\n:3\r\n*2\r\n+readonly\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$4\r\ninfo\r\n:-1\r\n*3\r\n+random\r\n+loading\r\n+stale\r\n:0\r\n:0\r\n:0\r\n*6\r\n$5\r\npsync\r\n:3\r\n*3\r\n+readonly\r\n+admin\r\n+noscript\r\n:0\r\n:0\r\n:0\r\n*6\r\n$11\r\nzrangebylex\r\n:-4\r\n*1\r\n+readonly\r\n:1\r\n:1\r\n:1\r\n*6\r\n$4\r\nzadd\r\n:-4\r\n*3\r\n+write\r\n+denyoom\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$4\r\nxlen\r\n:2\r\n*2\r\n+readonly\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$4\r\nauth\r\n:2\r\n*4\r\n+noscript\r\n+loading\r\n+stale\r\n+fast\r\n:0\r\n:0\r\n:0\r\n*6\r\n$4\r\nsrem\r\n:-3\r\n*2\r\n+write\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$9\r\ngeoradius\r\n:-6\r\n*2\r\n+write\r\n+movablekeys\r\n:1\r\n:1\r\n:1\r\n*6\r\n$4\r\nexec\r\n:1\r\n*2\r\n+noscript\r\n+skip_monitor\r\n:0\r\n:0\r\n:0\r\n*6\r\n$7\r\npfcount\r\n:-2\r\n*1\r\n+readonly\r\n:1\r\n:-1\r\n:1\r\n*6\r\n$7\r\nzpopmin\r\n:-2\r\n*2\r\n+write\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$4\r\nmove\r\n:3\r\n*2\r\n+write\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$5\r\nxtrim\r\n:-2\r\n*3\r\n+write\r\n+random\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\nasking\r\n:1\r\n*1\r\n+fast\r\n:0\r\n:0\r\n:0\r\n*6\r\n$4\r\npttl\r\n:2\r\n*3\r\n+readonly\r\n+random\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$11\r\nsrandmember\r\n:-2\r\n*2\r\n+readonly\r\n+random\r\n:1\r\n:1\r\n:1\r\n*6\r\n$8\r\nflushall\r\n:-1\r\n*1\r\n+write\r\n:0\r\n:0\r\n:0\r\n*6\r\n$4\r\nsort\r\n:-2\r\n*3\r\n+write\r\n+denyoom\r\n+movablekeys\r\n:1\r\n:1\r\n:1\r\n*6\r\n$3\r\ndel\r\n:-2\r\n*1\r\n+write\r\n:1\r\n:-1\r\n:1\r\n*6\r\n$14\r\nrestore-asking\r\n:-4\r\n*3\r\n+write\r\n+denyoom\r\n+asking\r\n:1\r\n:1\r\n:1\r\n*6\r\n$10\r\npsubscribe\r\n:-2\r\n*4\r\n+pubsub\r\n+noscript\r\n+loading\r\n+stale\r\n:0\r\n:0\r\n:0\r\n*6\r\n$4\r\ndecr\r\n:2\r\n*3\r\n+write\r\n+denyoom\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\nincrby\r\n:3\r\n*3\r\n+write\r\n+denyoom\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$14\r\nzrevrangebylex\r\n:-4\r\n*1\r\n+readonly\r\n:1\r\n:1\r\n:1\r\n*6\r\n$8\r\nbitfield\r\n:-2\r\n*2\r\n+write\r\n+denyoom\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\nexists\r\n:-2\r\n*2\r\n+readonly\r\n+fast\r\n:1\r\n:-1\r\n:1\r\n*6\r\n$8\r\nreplconf\r\n:-1\r\n*4\r\n+admin\r\n+noscript\r\n+loading\r\n+stale\r\n:0\r\n:0\r\n:0\r\n*6\r\n$7\r\nzincrby\r\n:4\r\n*3\r\n+write\r\n+denyoom\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$5\r\nblpop\r\n:-3\r\n*2\r\n+write\r\n+noscript\r\n:1\r\n:-2\r\n:1\r\n*6\r\n$4\r\nlpop\r\n:2\r\n*2\r\n+write\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$3\r\nttl\r\n:2\r\n*3\r\n+readonly\r\n+random\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$5\r\nxread\r\n:-4\r\n*3\r\n+readonly\r\n+noscript\r\n+movablekeys\r\n:1\r\n:1\r\n:1\r\n*6\r\n$5\r\nrpush\r\n:-3\r\n*3\r\n+write\r\n+denyoom\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$8\r\nzrevrank\r\n:3\r\n*2\r\n+readonly\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$11\r\nincrbyfloat\r\n:3\r\n*3\r\n+write\r\n+denyoom\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$5\r\nbrpop\r\n:-3\r\n*2\r\n+write\r\n+noscript\r\n:1\r\n:-2\r\n:1\r\n*6\r\n$4\r\nxadd\r\n:-5\r\n*4\r\n+write\r\n+denyoom\r\n+random\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$8\r\nsetrange\r\n:4\r\n*2\r\n+write\r\n+denyoom\r\n:1\r\n:1\r\n:1\r\n*6\r\n$17\r\ngeoradiusbymember\r\n:-5\r\n*2\r\n+write\r\n+movablekeys\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\nunlink\r\n:-2\r\n*2\r\n+write\r\n+fast\r\n:1\r\n:-1\r\n:1\r\n*6\r\n$8\r\nexpireat\r\n:3\r\n*2\r\n+write\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$5\r\ndebug\r\n:-2\r\n*2\r\n+admin\r\n+noscript\r\n:0\r\n:0\r\n:0\r\n*6\r\n$20\r\ngeoradiusbymember_ro\r\n:-5\r\n*2\r\n+readonly\r\n+movablekeys\r\n:1\r\n:1\r\n:1\r\n*6\r\n$4\r\nlset\r\n:4\r\n*2\r\n+write\r\n+denyoom\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\nzscore\r\n:3\r\n*2\r\n+readonly\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$4\r\nllen\r\n:2\r\n*2\r\n+readonly\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$4\r\ntime\r\n:1\r\n*2\r\n+random\r\n+fast\r\n:0\r\n:0\r\n:0\r\n*6\r\n$8\r\nshutdown\r\n:-1\r\n*4\r\n+admin\r\n+noscript\r\n+loading\r\n+stale\r\n:0\r\n:0\r\n:0\r\n*6\r\n$7\r\nevalsha\r\n:-3\r\n*2\r\n+noscript\r\n+movablekeys\r\n:0\r\n:0\r\n:0\r\n*6\r\n$6\r\nzcount\r\n:4\r\n*2\r\n+readonly\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\nmemory\r\n:-2\r\n*2\r\n+readonly\r\n+random\r\n:0\r\n:0\r\n:0\r\n*6\r\n$5\r\nxinfo\r\n:-2\r\n*2\r\n+readonly\r\n+random\r\n:2\r\n:2\r\n:1\r\n*6\r\n$8\r\nxpending\r\n:-3\r\n*2\r\n+readonly\r\n+random\r\n:1\r\n:1\r\n:1\r\n*6\r\n$4\r\neval\r\n:-3\r\n*2\r\n+noscript\r\n+movablekeys\r\n:0\r\n:0\r\n:0\r\n*6\r\n$6\r\nxrange\r\n:-4\r\n*1\r\n+readonly\r\n:1\r\n:1\r\n:1\r\n*6\r\n$7\r\nrestore\r\n:-4\r\n*2\r\n+write\r\n+denyoom\r\n:1\r\n:1\r\n:1\r\n*6\r\n$7\r\nzpopmax\r\n:-2\r\n*2\r\n+write\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$4\r\nmset\r\n:-3\r\n*2\r\n+write\r\n+denyoom\r\n:1\r\n:-1\r\n:2\r\n*6\r\n$4\r\nspop\r\n:-2\r\n*3\r\n+write\r\n+random\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$5\r\nltrim\r\n:4\r\n*1\r\n+write\r\n:1\r\n:1\r\n:1\r\n*6\r\n$5\r\nzrank\r\n:3\r\n*2\r\n+readonly\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$9\r\nxrevrange\r\n:-4\r\n*1\r\n+readonly\r\n:1\r\n:1\r\n:1\r\n*6\r\n$3\r\nget\r\n:2\r\n*2\r\n+readonly\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$7\r\nflushdb\r\n:-1\r\n*1\r\n+write\r\n:0\r\n:0\r\n:0\r\n*6\r\n$5\r\nhmget\r\n:-3\r\n*2\r\n+readonly\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\nmsetnx\r\n:-3\r\n*2\r\n+write\r\n+denyoom\r\n:1\r\n:-1\r\n:2\r\n*6\r\n$7\r\npersist\r\n:2\r\n*2\r\n+write\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$11\r\nzunionstore\r\n:-4\r\n*3\r\n+write\r\n+denyoom\r\n+movablekeys\r\n:0\r\n:0\r\n:0\r\n*6\r\n$7\r\ncommand\r\n:0\r\n*3\r\n+random\r\n+loading\r\n+stale\r\n:0\r\n:0\r\n:0\r\n*6\r\n$8\r\nrenamenx\r\n:3\r\n*2\r\n+write\r\n+fast\r\n:1\r\n:2\r\n:1\r\n*6\r\n$6\r\nzrange\r\n:-4\r\n*1\r\n+readonly\r\n:1\r\n:1\r\n:1\r\n*6\r\n$7\r\npexpire\r\n:3\r\n*2\r\n+write\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$4\r\nkeys\r\n:2\r\n*2\r\n+readonly\r\n+sort_for_script\r\n:0\r\n:0\r\n:0\r\n*6\r\n$4\r\nzrem\r\n:-3\r\n*2\r\n+write\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$5\r\npfadd\r\n:-2\r\n*3\r\n+write\r\n+denyoom\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\npsetex\r\n:4\r\n*2\r\n+write\r\n+denyoom\r\n:1\r\n:1\r\n:1\r\n*6\r\n$13\r\nzrangebyscore\r\n:-4\r\n*1\r\n+readonly\r\n:1\r\n:1\r\n:1\r\n*6\r\n$4\r\nsync\r\n:1\r\n*3\r\n+readonly\r\n+admin\r\n+noscript\r\n:0\r\n:0\r\n:0\r\n*6\r\n$7\r\npfdebug\r\n:-3\r\n*1\r\n+write\r\n:0\r\n:0\r\n:0\r\n*6\r\n$7\r\ndiscard\r\n:1\r\n*2\r\n+noscript\r\n+fast\r\n:0\r\n:0\r\n:0\r\n*6\r\n$8\r\nreadonly\r\n:1\r\n*1\r\n+fast\r\n:0\r\n:0\r\n:0\r\n*6\r\n$7\r\ngeodist\r\n:-4\r\n*1\r\n+readonly\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\ngeopos\r\n:-2\r\n*1\r\n+readonly\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\nbitpos\r\n:-3\r\n*1\r\n+readonly\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\nsinter\r\n:-2\r\n*2\r\n+readonly\r\n+sort_for_script\r\n:1\r\n:-1\r\n:1\r\n*6\r\n$6\r\ngetset\r\n:3\r\n*2\r\n+write\r\n+denyoom\r\n:1\r\n:1\r\n:1\r\n*6\r\n$7\r\nslaveof\r\n:3\r\n*3\r\n+admin\r\n+noscript\r\n+stale\r\n:0\r\n:0\r\n:0\r\n*6\r\n$6\r\nrpushx\r\n:-3\r\n*3\r\n+write\r\n+denyoom\r\n+fast\r\n:1\r\n:1\r\n:1\r\n*6\r\n$7\r\nlinsert\r\n:5\r\n*2\r\n+write\r\n+denyoom\r\n:1\r\n:1\r\n:1\r\n*6\r\n$6\r\nexpire\r\n:3\r\n*2\r\n+write\r\n+fast\r\n:1\r\n:1\r\n:1\r\n" + + c.WriteRaw(res) } diff --git a/vendor/github.com/alicebob/miniredis/v2/cmd_connection.go b/vendor/github.com/alicebob/miniredis/v2/cmd_connection.go index defbbccab9f..1afb5cea180 100644 --- a/vendor/github.com/alicebob/miniredis/v2/cmd_connection.go +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_connection.go @@ -4,7 +4,6 @@ package miniredis import ( "fmt" - "strconv" "strings" "github.com/alicebob/miniredis/v2/server" @@ -71,27 +70,34 @@ func (m *Miniredis) cmdAuth(c *server.Peer, cmd string, args []string) { if m.checkPubsub(c, cmd) { return } - if getCtx(c).nested { - c.WriteError(msgNotFromScripts) + ctx := getCtx(c) + if ctx.nested { + c.WriteError(msgNotFromScripts(ctx.nestedSHA)) return } - username := "default" - pw := args[0] + + var opts = struct { + username string + password string + }{ + username: "default", + password: args[0], + } if len(args) == 2 { - username, pw = args[0], args[1] + opts.username, opts.password = args[0], args[1] } withTx(m, c, func(c *server.Peer, ctx *connCtx) { - if len(m.passwords) == 0 && username == "default" { + if len(m.passwords) == 0 && opts.username == "default" { c.WriteError("ERR AUTH called without any password configured for the default user. Are you sure your configuration is correct?") return } - setPW, ok := m.passwords[username] + setPW, ok := m.passwords[opts.username] if !ok { c.WriteError("WRONGPASS invalid username-password pair") return } - if setPW != pw { + if setPW != opts.password { c.WriteError("WRONGPASS invalid username-password pair") return } @@ -109,17 +115,16 @@ func (m *Miniredis) cmdHello(c *server.Peer, cmd string, args []string) { } var opts struct { - version int - username, password string + version int + username string + password string } - versionArg, args := args[0], args[1:] - var err error - opts.version, err = strconv.Atoi(versionArg) - if err != nil { - c.WriteError("ERR Protocol version is not an integer or out of range") + if ok := optIntErr(c, args[0], &opts.version, "ERR Protocol version is not an integer or out of range"); !ok { return } + args = args[1:] + switch opts.version { case 2, 3: default: @@ -199,8 +204,9 @@ func (m *Miniredis) cmdEcho(c *server.Peer, cmd string, args []string) { return } + msg := args[0] + withTx(m, c, func(c *server.Peer, ctx *connCtx) { - msg := args[0] c.WriteBulk(msg) }) } @@ -212,27 +218,25 @@ func (m *Miniredis) cmdSelect(c *server.Peer, cmd string, args []string) { c.WriteError(errWrongNumber(cmd)) return } - if !m.handleAuth(c) { + if !m.isValidCMD(c, cmd) { return } - if m.checkPubsub(c, cmd) { + + var opts struct { + id int + } + if ok := optInt(c, args[0], &opts.id); !ok { return } withTx(m, c, func(c *server.Peer, ctx *connCtx) { - id, err := strconv.Atoi(args[0]) - if err != nil { - c.WriteError(msgInvalidInt) - setDirty(c) - return - } - if id < 0 { + if opts.id < 0 { c.WriteError(msgDBIndexOutOfRange) setDirty(c) return } - ctx.selectedDB = id + ctx.selectedDB = opts.id c.WriteOK() }) } @@ -248,26 +252,26 @@ func (m *Miniredis) cmdSwapdb(c *server.Peer, cmd string, args []string) { return } + var opts struct { + id1 int + id2 int + } + + if ok := optIntErr(c, args[0], &opts.id1, "ERR invalid first DB index"); !ok { + return + } + if ok := optIntErr(c, args[1], &opts.id2, "ERR invalid second DB index"); !ok { + return + } + withTx(m, c, func(c *server.Peer, ctx *connCtx) { - id1, err := strconv.Atoi(args[0]) - if err != nil { - c.WriteError("ERR invalid first DB index") - setDirty(c) - return - } - id2, err := strconv.Atoi(args[1]) - if err != nil { - c.WriteError("ERR invalid second DB index") - setDirty(c) - return - } - if id1 < 0 || id2 < 0 { + if opts.id1 < 0 || opts.id2 < 0 { c.WriteError(msgDBIndexOutOfRange) setDirty(c) return } - m.swapDB(id1, id2) + m.swapDB(opts.id1, opts.id2) c.WriteOK() }) diff --git a/vendor/github.com/alicebob/miniredis/v2/cmd_generic.go b/vendor/github.com/alicebob/miniredis/v2/cmd_generic.go index f9f06bfb96e..721ad2fabe9 100644 --- a/vendor/github.com/alicebob/miniredis/v2/cmd_generic.go +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_generic.go @@ -3,6 +3,8 @@ package miniredis import ( + "errors" + "fmt" "sort" "strconv" "strings" @@ -11,14 +13,31 @@ import ( "github.com/alicebob/miniredis/v2/server" ) +const ( + // expiretimeReplyNoExpiration is return value for EXPIRETIME and PEXPIRETIME if the key exists but has no associated expiration time + expiretimeReplyNoExpiration = -1 + // expiretimeReplyMissingKey is return value for EXPIRETIME and PEXPIRETIME if the key does not exist + expiretimeReplyMissingKey = -2 +) + +func inSeconds(t time.Time) int { + return int(t.Unix()) +} + +func inMilliSeconds(t time.Time) int { + return int(t.UnixMilli()) +} + // commandsGeneric handles EXPIRE, TTL, PERSIST, &c. func commandsGeneric(m *Miniredis) { + m.srv.Register("COPY", m.cmdCopy) m.srv.Register("DEL", m.cmdDel) - m.srv.Register("UNLINK", m.cmdDel) // DUMP m.srv.Register("EXISTS", m.cmdExists) m.srv.Register("EXPIRE", makeCmdExpire(m, false, time.Second)) m.srv.Register("EXPIREAT", makeCmdExpire(m, true, time.Second)) + m.srv.Register("EXPIRETIME", m.makeCmdExpireTime(inSeconds)) + m.srv.Register("PEXPIRETIME", m.makeCmdExpireTime(inMilliSeconds)) m.srv.Register("KEYS", m.cmdKeys) // MIGRATE m.srv.Register("MOVE", m.cmdMove) @@ -31,12 +50,53 @@ func commandsGeneric(m *Miniredis) { m.srv.Register("RENAME", m.cmdRename) m.srv.Register("RENAMENX", m.cmdRenamenx) // RESTORE - // SORT m.srv.Register("TOUCH", m.cmdTouch) m.srv.Register("TTL", m.cmdTTL) m.srv.Register("TYPE", m.cmdType) m.srv.Register("SCAN", m.cmdScan) - m.srv.Register("COPY", m.cmdCopy) + // SORT + m.srv.Register("UNLINK", m.cmdDel) +} + +type expireOpts struct { + key string + value int + nx bool + xx bool + gt bool + lt bool +} + +func expireParse(cmd string, args []string) (*expireOpts, error) { + var opts expireOpts + + opts.key = args[0] + if err := optIntSimple(args[1], &opts.value); err != nil { + return nil, err + } + args = args[2:] + for len(args) > 0 { + switch strings.ToLower(args[0]) { + case "nx": + opts.nx = true + case "xx": + opts.xx = true + case "gt": + opts.gt = true + case "lt": + opts.lt = true + default: + return nil, fmt.Errorf("ERR Unsupported option %s", args[0]) + } + args = args[1:] + } + if opts.gt && opts.lt { + return nil, errors.New("ERR GT and LT options at the same time are not compatible") + } + if opts.nx && (opts.xx || opts.gt || opts.lt) { + return nil, errors.New("ERR NX and XX, GT or LT options at the same time are not compatible") + } + return &opts, nil } // generic expire command for EXPIRE, PEXPIRE, EXPIREAT, PEXPIREAT @@ -44,7 +104,7 @@ func commandsGeneric(m *Miniredis) { // converted to a duration. func makeCmdExpire(m *Miniredis, unix bool, d time.Duration) func(*server.Peer, string, []string) { return func(c *server.Peer, cmd string, args []string) { - if len(args) != 2 { + if len(args) < 2 { setDirty(c) c.WriteError(errWrongNumber(cmd)) return @@ -56,12 +116,10 @@ func makeCmdExpire(m *Miniredis, unix bool, d time.Duration) func(*server.Peer, return } - key := args[0] - value := args[1] - i, err := strconv.Atoi(value) + opts, err := expireParse(cmd, args) if err != nil { setDirty(c) - c.WriteError(msgInvalidInt) + c.WriteError(err.Error()) return } @@ -69,22 +127,90 @@ func makeCmdExpire(m *Miniredis, unix bool, d time.Duration) func(*server.Peer, db := m.db(ctx.selectedDB) // Key must be present. - if _, ok := db.keys[key]; !ok { + if _, ok := db.keys[opts.key]; !ok { c.WriteInt(0) return } + + oldTTL, ok := db.ttl[opts.key] + + var newTTL time.Duration if unix { - db.ttl[key] = m.at(i, d) + newTTL = m.at(opts.value, d) } else { - db.ttl[key] = time.Duration(i) * d + newTTL = time.Duration(opts.value) * d + } + + // > NX -- Set expiry only when the key has no expiry + if opts.nx && ok { + c.WriteInt(0) + return } - db.keyVersion[key]++ - db.checkTTL(key) + // > XX -- Set expiry only when the key has an existing expiry + if opts.xx && !ok { + c.WriteInt(0) + return + } + // > GT -- Set expiry only when the new expiry is greater than current one + // (no exp == infinity) + if opts.gt && (!ok || newTTL <= oldTTL) { + c.WriteInt(0) + return + } + // > LT -- Set expiry only when the new expiry is less than current one + if opts.lt && ok && newTTL > oldTTL { + c.WriteInt(0) + return + } + db.ttl[opts.key] = newTTL + db.incr(opts.key) + db.checkTTL(opts.key) c.WriteInt(1) }) } } +// makeCmdExpireTime creates server command function that returns the absolute Unix timestamp (since January 1, 1970) +// at which the given key will expire, in unit selected by time result strategy (e.g. seconds, milliseconds). +// For more information see redis documentation for [expiretime] and [pexpiretime]. +// +// [expiretime]: https://redis.io/commands/expiretime/ +// [pexpiretime]: https://redis.io/commands/pexpiretime/ +func (m *Miniredis) makeCmdExpireTime(timeResultStrategy func(time.Time) int) server.Cmd { + return func(c *server.Peer, cmd string, args []string) { + if len(args) != 1 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + + if !m.handleAuth(c) { + return + } + if m.checkPubsub(c, cmd) { + return + } + + key := args[0] + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + db := m.db(ctx.selectedDB) + + if _, ok := db.keys[key]; !ok { + c.WriteInt(expiretimeReplyMissingKey) + return + } + + ttl, ok := db.ttl[key] + if !ok { + c.WriteInt(expiretimeReplyNoExpiration) + return + } + + c.WriteInt(timeResultStrategy(m.effectiveNow().Add(ttl))) + }) + } +} + // TOUCH func (m *Miniredis) cmdTouch(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { @@ -214,7 +340,7 @@ func (m *Miniredis) cmdPersist(c *server.Peer, cmd string, args []string) { return } delete(db.ttl, key) - db.keyVersion[key]++ + db.incr(key) c.WriteInt(1) }) } @@ -318,21 +444,23 @@ func (m *Miniredis) cmdMove(c *server.Peer, cmd string, args []string) { return } - key := args[0] - targetDB, err := strconv.Atoi(args[1]) - if err != nil { - targetDB = 0 + var opts struct { + key string + targetDB int } + opts.key = args[0] + opts.targetDB, _ = strconv.Atoi(args[1]) + withTx(m, c, func(c *server.Peer, ctx *connCtx) { - if ctx.selectedDB == targetDB { + if ctx.selectedDB == opts.targetDB { c.WriteError("ERR source and destination objects are the same") return } db := m.db(ctx.selectedDB) - targetDB := m.db(targetDB) + targetDB := m.db(opts.targetDB) - if !db.move(key, targetDB) { + if !db.move(opts.key, targetDB) { c.WriteInt(0) return } @@ -413,17 +541,23 @@ func (m *Miniredis) cmdRename(c *server.Peer, cmd string, args []string) { return } - from, to := args[0], args[1] + opts := struct { + from string + to string + }{ + from: args[0], + to: args[1], + } withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - if !db.exists(from) { + if !db.exists(opts.from) { c.WriteError(msgKeyNotFound) return } - db.rename(from, to) + db.rename(opts.from, opts.to) c.WriteOK() }) } @@ -442,130 +576,151 @@ func (m *Miniredis) cmdRenamenx(c *server.Peer, cmd string, args []string) { return } - from, to := args[0], args[1] + opts := struct { + from string + to string + }{ + from: args[0], + to: args[1], + } withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - if !db.exists(from) { + if !db.exists(opts.from) { c.WriteError(msgKeyNotFound) return } - if db.exists(to) { + if db.exists(opts.to) { c.WriteInt(0) return } - db.rename(from, to) + db.rename(opts.from, opts.to) c.WriteInt(1) }) } -// SCAN -func (m *Miniredis) cmdScan(c *server.Peer, cmd string, args []string) { - if len(args) < 1 { - setDirty(c) - c.WriteError(errWrongNumber(cmd)) - return - } - if !m.handleAuth(c) { - return - } - if m.checkPubsub(c, cmd) { - return - } +type scanOpts struct { + cursor int + count int + withMatch bool + match string + withType bool + _type string +} - cursor, err := strconv.Atoi(args[0]) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidCursor) - return +func scanParse(cmd string, args []string) (*scanOpts, error) { + var opts scanOpts + if err := optIntSimple(args[0], &opts.cursor); err != nil { + return nil, errors.New(msgInvalidCursor) } args = args[1:] // MATCH, COUNT and TYPE options - var ( - withMatch bool - match string - withType bool - _type string - ) - for len(args) > 0 { if strings.ToLower(args[0]) == "count" { - // we do nothing with count if len(args) < 2 { - setDirty(c) - c.WriteError(msgSyntaxError) - return + return nil, errors.New(msgSyntaxError) } - if _, err := strconv.Atoi(args[1]); err != nil { - setDirty(c) - c.WriteError(msgInvalidInt) - return + count, err := strconv.Atoi(args[1]) + if err != nil || count < 0 { + return nil, errors.New(msgInvalidInt) + } + if count == 0 { + return nil, errors.New(msgSyntaxError) } + opts.count = count args = args[2:] continue } if strings.ToLower(args[0]) == "match" { if len(args) < 2 { - setDirty(c) - c.WriteError(msgSyntaxError) - return + return nil, errors.New(msgSyntaxError) } - withMatch = true - match, args = args[1], args[2:] + opts.withMatch = true + opts.match, args = args[1], args[2:] continue } if strings.ToLower(args[0]) == "type" { if len(args) < 2 { - setDirty(c) - c.WriteError(msgSyntaxError) - return + return nil, errors.New(msgSyntaxError) } - withType = true - _type, args = strings.ToLower(args[1]), args[2:] + opts.withType = true + opts._type, args = strings.ToLower(args[1]), args[2:] continue } + return nil, errors.New(msgSyntaxError) + } + return &opts, nil +} + +// SCAN +func (m *Miniredis) cmdScan(c *server.Peer, cmd string, args []string) { + if len(args) < 1 { setDirty(c) - c.WriteError(msgSyntaxError) + c.WriteError(errWrongNumber(cmd)) + return + } + if !m.handleAuth(c) { + return + } + if m.checkPubsub(c, cmd) { + return + } + + opts, err := scanParse(cmd, args) + if err != nil { + setDirty(c) + c.WriteError(err.Error()) return } withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) // We return _all_ (matched) keys every time. - - if cursor != 0 { - // Invalid cursor. - c.WriteLen(2) - c.WriteBulk("0") // no next cursor - c.WriteLen(0) // no elements - return - } - var keys []string - if withType { + if opts.withType { keys = make([]string, 0) for k, t := range db.keys { // type must be given exactly; no pattern matching is performed - if t == _type { + if t == opts._type { keys = append(keys, k) } } - sort.Strings(keys) // To make things deterministic. } else { keys = db.allKeys() } - if withMatch { - keys, _ = matchKeys(keys, match) + sort.Strings(keys) // To make things deterministic. + + if opts.withMatch { + keys, _ = matchKeys(keys, opts.match) + } + + low := opts.cursor + high := low + opts.count + // validate high is correct + if high > len(keys) || high == 0 { + high = len(keys) + } + if opts.cursor > high { + // invalid cursor + c.WriteLen(2) + c.WriteBulk("0") // no next cursor + c.WriteLen(0) // no elements + return } + cursorValue := low + opts.count + if cursorValue >= len(keys) { + cursorValue = 0 // no next cursor + } + keys = keys[low:high] c.WriteLen(2) - c.WriteBulk("0") // no next cursor + c.WriteBulk(fmt.Sprintf("%d", cursorValue)) c.WriteLen(len(keys)) for _, k := range keys { c.WriteBulk(k) @@ -573,26 +728,15 @@ func (m *Miniredis) cmdScan(c *server.Peer, cmd string, args []string) { }) } -// COPY -func (m *Miniredis) cmdCopy(c *server.Peer, cmd string, args []string) { - if len(args) < 2 { - setDirty(c) - c.WriteError(errWrongNumber(cmd)) - return - } - if !m.handleAuth(c) { - return - } - if m.checkPubsub(c, cmd) { - return - } +type copyOpts struct { + from string + to string + destinationDB int + replace bool +} - var opts = struct { - from string - to string - destinationDB int - replace bool - }{ +func copyParse(cmd string, args []string) (*copyOpts, error) { + opts := copyOpts{ destinationDB: -1, } @@ -601,33 +745,45 @@ func (m *Miniredis) cmdCopy(c *server.Peer, cmd string, args []string) { switch strings.ToLower(args[0]) { case "db": if len(args) < 2 { - setDirty(c) - c.WriteError(msgSyntaxError) - return + return nil, errors.New(msgSyntaxError) } - db, err := strconv.Atoi(args[1]) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidInt) - return + if err := optIntSimple(args[1], &opts.destinationDB); err != nil { + return nil, err } - if db < 0 { - setDirty(c) - c.WriteError(msgDBIndexOutOfRange) - return + if opts.destinationDB < 0 { + return nil, errors.New(msgDBIndexOutOfRange) } - opts.destinationDB = db args = args[2:] case "replace": opts.replace = true args = args[1:] default: - setDirty(c) - c.WriteError(msgSyntaxError) - return + return nil, errors.New(msgSyntaxError) } } + return &opts, nil +} +// COPY +func (m *Miniredis) cmdCopy(c *server.Peer, cmd string, args []string) { + if len(args) < 2 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + if !m.handleAuth(c) { + return + } + if m.checkPubsub(c, cmd) { + return + } + + opts, err := copyParse(cmd, args) + if err != nil { + setDirty(c) + c.WriteError(err.Error()) + return + } withTx(m, c, func(c *server.Peer, ctx *connCtx) { fromDB, toDB := ctx.selectedDB, opts.destinationDB if toDB == -1 { diff --git a/vendor/github.com/alicebob/miniredis/v2/cmd_geo.go b/vendor/github.com/alicebob/miniredis/v2/cmd_geo.go index a6c1901d684..6702e96ba0a 100644 --- a/vendor/github.com/alicebob/miniredis/v2/cmd_geo.go +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_geo.go @@ -226,28 +226,28 @@ func (m *Miniredis) cmdGeoradius(c *server.Peer, cmd string, args []string) { } args = args[5:] - var ( - withDist = false - withCoord = false - direction = unsorted - count = 0 - withStore = false - storeKey = "" - withStoredist = false - storedistKey = "" - ) + var opts struct { + withDist bool + withCoord bool + direction direction // unsorted + count int + withStore bool + storeKey string + withStoredist bool + storedistKey string + } for len(args) > 0 { arg := args[0] args = args[1:] switch strings.ToUpper(arg) { case "WITHCOORD": - withCoord = true + opts.withCoord = true case "WITHDIST": - withDist = true + opts.withDist = true case "ASC": - direction = asc + opts.direction = asc case "DESC": - direction = desc + opts.direction = desc case "COUNT": if len(args) == 0 { setDirty(c) @@ -266,15 +266,15 @@ func (m *Miniredis) cmdGeoradius(c *server.Peer, cmd string, args []string) { return } args = args[1:] - count = n + opts.count = n case "STORE": if len(args) == 0 { setDirty(c) c.WriteError("ERR syntax error") return } - withStore = true - storeKey = args[0] + opts.withStore = true + opts.storeKey = args[0] args = args[1:] case "STOREDIST": if len(args) == 0 { @@ -282,8 +282,8 @@ func (m *Miniredis) cmdGeoradius(c *server.Peer, cmd string, args []string) { c.WriteError("ERR syntax error") return } - withStoredist = true - storedistKey = args[0] + opts.withStoredist = true + opts.storedistKey = args[0] args = args[1:] default: setDirty(c) @@ -292,14 +292,14 @@ func (m *Miniredis) cmdGeoradius(c *server.Peer, cmd string, args []string) { } } - if strings.ToUpper(cmd) == "GEORADIUS_RO" && (withStore || withStoredist) { + if strings.ToUpper(cmd) == "GEORADIUS_RO" && (opts.withStore || opts.withStoredist) { setDirty(c) c.WriteError("ERR syntax error") return } withTx(m, c, func(c *server.Peer, ctx *connCtx) { - if (withStore || withStoredist) && (withDist || withCoord) { + if (opts.withStore || opts.withStoredist) && (opts.withDist || opts.withCoord) { c.WriteError("ERR STORE option in GEORADIUS is not compatible with WITHDIST, WITHHASH and WITHCOORDS options") return } @@ -310,9 +310,9 @@ func (m *Miniredis) cmdGeoradius(c *server.Peer, cmd string, args []string) { matches := withinRadius(members, longitude, latitude, radius*toMeter) // deal with ASC/DESC - if direction != unsorted { + if opts.direction != unsorted { sort.Slice(matches, func(i, j int) bool { - if direction == desc { + if opts.direction == desc { return matches[i].Distance > matches[j].Distance } return matches[i].Distance < matches[j].Distance @@ -320,25 +320,25 @@ func (m *Miniredis) cmdGeoradius(c *server.Peer, cmd string, args []string) { } // deal with COUNT - if count > 0 && len(matches) > count { - matches = matches[:count] + if opts.count > 0 && len(matches) > opts.count { + matches = matches[:opts.count] } // deal with "STORE x" - if withStore { - db.del(storeKey, true) + if opts.withStore { + db.del(opts.storeKey, true) for _, member := range matches { - db.ssetAdd(storeKey, member.Score, member.Name) + db.ssetAdd(opts.storeKey, member.Score, member.Name) } c.WriteInt(len(matches)) return } // deal with "STOREDIST x" - if withStoredist { - db.del(storedistKey, true) + if opts.withStoredist { + db.del(opts.storedistKey, true) for _, member := range matches { - db.ssetAdd(storedistKey, member.Distance/toMeter, member.Name) + db.ssetAdd(opts.storedistKey, member.Distance/toMeter, member.Name) } c.WriteInt(len(matches)) return @@ -346,24 +346,24 @@ func (m *Miniredis) cmdGeoradius(c *server.Peer, cmd string, args []string) { c.WriteLen(len(matches)) for _, member := range matches { - if !withDist && !withCoord { + if !opts.withDist && !opts.withCoord { c.WriteBulk(member.Name) continue } len := 1 - if withDist { + if opts.withDist { len++ } - if withCoord { + if opts.withCoord { len++ } c.WriteLen(len) c.WriteBulk(member.Name) - if withDist { + if opts.withDist { c.WriteBulk(fmt.Sprintf("%.4f", member.Distance/toMeter)) } - if withCoord { + if opts.withCoord { c.WriteLen(2) c.WriteBulk(fmt.Sprintf("%f", member.Longitude)) c.WriteBulk(fmt.Sprintf("%f", member.Latitude)) @@ -386,45 +386,53 @@ func (m *Miniredis) cmdGeoradiusbymember(c *server.Peer, cmd string, args []stri return } - key := args[0] - member := args[1] + opts := struct { + key string + member string + radius float64 + toMeter float64 + + withDist bool + withCoord bool + direction direction // unsorted + count int + withStore bool + storeKey string + withStoredist bool + storedistKey string + }{ + key: args[0], + member: args[1], + } - radius, err := strconv.ParseFloat(args[2], 64) - if err != nil || radius < 0 { + r, err := strconv.ParseFloat(args[2], 64) + if err != nil || r < 0 { setDirty(c) c.WriteError(errWrongNumber(cmd)) return } - toMeter := parseUnit(args[3]) - if toMeter == 0 { + opts.radius = r + + opts.toMeter = parseUnit(args[3]) + if opts.toMeter == 0 { setDirty(c) c.WriteError(errWrongNumber(cmd)) return } args = args[4:] - var ( - withDist = false - withCoord = false - direction = unsorted - count = 0 - withStore = false - storeKey = "" - withStoredist = false - storedistKey = "" - ) for len(args) > 0 { arg := args[0] args = args[1:] switch strings.ToUpper(arg) { case "WITHCOORD": - withCoord = true + opts.withCoord = true case "WITHDIST": - withDist = true + opts.withDist = true case "ASC": - direction = asc + opts.direction = asc case "DESC": - direction = desc + opts.direction = desc case "COUNT": if len(args) == 0 { setDirty(c) @@ -443,15 +451,15 @@ func (m *Miniredis) cmdGeoradiusbymember(c *server.Peer, cmd string, args []stri return } args = args[1:] - count = n + opts.count = n case "STORE": if len(args) == 0 { setDirty(c) c.WriteError("ERR syntax error") return } - withStore = true - storeKey = args[0] + opts.withStore = true + opts.storeKey = args[0] args = args[1:] case "STOREDIST": if len(args) == 0 { @@ -459,8 +467,8 @@ func (m *Miniredis) cmdGeoradiusbymember(c *server.Peer, cmd string, args []stri c.WriteError("ERR syntax error") return } - withStoredist = true - storedistKey = args[0] + opts.withStoredist = true + opts.storedistKey = args[0] args = args[1:] default: setDirty(c) @@ -469,44 +477,44 @@ func (m *Miniredis) cmdGeoradiusbymember(c *server.Peer, cmd string, args []stri } } - if strings.ToUpper(cmd) == "GEORADIUSBYMEMBER_RO" && (withStore || withStoredist) { + if strings.ToUpper(cmd) == "GEORADIUSBYMEMBER_RO" && (opts.withStore || opts.withStoredist) { setDirty(c) c.WriteError("ERR syntax error") return } withTx(m, c, func(c *server.Peer, ctx *connCtx) { - if (withStore || withStoredist) && (withDist || withCoord) { + if (opts.withStore || opts.withStoredist) && (opts.withDist || opts.withCoord) { c.WriteError("ERR STORE option in GEORADIUS is not compatible with WITHDIST, WITHHASH and WITHCOORDS options") return } db := m.db(ctx.selectedDB) - if !db.exists(key) { + if !db.exists(opts.key) { c.WriteNull() return } - if db.t(key) != "zset" { + if db.t(opts.key) != "zset" { c.WriteError(ErrWrongType.Error()) return } // get position of member - if !db.ssetExists(key, member) { + if !db.ssetExists(opts.key, opts.member) { c.WriteError("ERR could not decode requested zset member") return } - score := db.ssetScore(key, member) + score := db.ssetScore(opts.key, opts.member) longitude, latitude := fromGeohash(uint64(score)) - members := db.ssetElements(key) - matches := withinRadius(members, longitude, latitude, radius*toMeter) + members := db.ssetElements(opts.key) + matches := withinRadius(members, longitude, latitude, opts.radius*opts.toMeter) // deal with ASC/DESC - if direction != unsorted { + if opts.direction != unsorted { sort.Slice(matches, func(i, j int) bool { - if direction == desc { + if opts.direction == desc { return matches[i].Distance > matches[j].Distance } return matches[i].Distance < matches[j].Distance @@ -514,25 +522,25 @@ func (m *Miniredis) cmdGeoradiusbymember(c *server.Peer, cmd string, args []stri } // deal with COUNT - if count > 0 && len(matches) > count { - matches = matches[:count] + if opts.count > 0 && len(matches) > opts.count { + matches = matches[:opts.count] } // deal with "STORE x" - if withStore { - db.del(storeKey, true) + if opts.withStore { + db.del(opts.storeKey, true) for _, member := range matches { - db.ssetAdd(storeKey, member.Score, member.Name) + db.ssetAdd(opts.storeKey, member.Score, member.Name) } c.WriteInt(len(matches)) return } // deal with "STOREDIST x" - if withStoredist { - db.del(storedistKey, true) + if opts.withStoredist { + db.del(opts.storedistKey, true) for _, member := range matches { - db.ssetAdd(storedistKey, member.Distance/toMeter, member.Name) + db.ssetAdd(opts.storedistKey, member.Distance/opts.toMeter, member.Name) } c.WriteInt(len(matches)) return @@ -540,24 +548,24 @@ func (m *Miniredis) cmdGeoradiusbymember(c *server.Peer, cmd string, args []stri c.WriteLen(len(matches)) for _, member := range matches { - if !withDist && !withCoord { + if !opts.withDist && !opts.withCoord { c.WriteBulk(member.Name) continue } len := 1 - if withDist { + if opts.withDist { len++ } - if withCoord { + if opts.withCoord { len++ } c.WriteLen(len) c.WriteBulk(member.Name) - if withDist { - c.WriteBulk(fmt.Sprintf("%.4f", member.Distance/toMeter)) + if opts.withDist { + c.WriteBulk(fmt.Sprintf("%.4f", member.Distance/opts.toMeter)) } - if withCoord { + if opts.withCoord { c.WriteLen(2) c.WriteBulk(fmt.Sprintf("%f", member.Longitude)) c.WriteBulk(fmt.Sprintf("%f", member.Latitude)) @@ -586,7 +594,7 @@ func withinRadius(members []ssElem, longitude, latitude, radius float64) []geoDi } func parseUnit(u string) float64 { - switch u { + switch strings.ToLower(u) { case "m": return 1 case "km": diff --git a/vendor/github.com/alicebob/miniredis/v2/cmd_hash.go b/vendor/github.com/alicebob/miniredis/v2/cmd_hash.go index 142ba63e161..2c45630a1a4 100644 --- a/vendor/github.com/alicebob/miniredis/v2/cmd_hash.go +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_hash.go @@ -27,6 +27,7 @@ func commandsHash(m *Miniredis) { m.srv.Register("HSTRLEN", m.cmdHstrlen) m.srv.Register("HVALS", m.cmdHvals) m.srv.Register("HSCAN", m.cmdHscan) + m.srv.Register("HRANDFIELD", m.cmdHrandfield) } // HSET @@ -77,27 +78,35 @@ func (m *Miniredis) cmdHsetnx(c *server.Peer, cmd string, args []string) { return } - key, field, value := args[0], args[1], args[2] + opts := struct { + key string + field string + value string + }{ + key: args[0], + field: args[1], + value: args[2], + } withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - if t, ok := db.keys[key]; ok && t != "hash" { + if t, ok := db.keys[opts.key]; ok && t != "hash" { c.WriteError(msgWrongType) return } - if _, ok := db.hashKeys[key]; !ok { - db.hashKeys[key] = map[string]string{} - db.keys[key] = "hash" + if _, ok := db.hashKeys[opts.key]; !ok { + db.hashKeys[opts.key] = map[string]string{} + db.keys[opts.key] = "hash" } - _, ok := db.hashKeys[key][field] + _, ok := db.hashKeys[opts.key][opts.field] if ok { c.WriteInt(0) return } - db.hashKeys[key][field] = value - db.keyVersion[key]++ + db.hashKeys[opts.key][opts.field] = opts.value + db.incr(opts.key) c.WriteInt(1) }) } @@ -191,12 +200,18 @@ func (m *Miniredis) cmdHdel(c *server.Peer, cmd string, args []string) { return } - key, fields := args[0], args[1:] + opts := struct { + key string + fields []string + }{ + key: args[0], + fields: args[1:], + } withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - t, ok := db.keys[key] + t, ok := db.keys[opts.key] if !ok { // No key is zero deleted c.WriteInt(0) @@ -208,19 +223,19 @@ func (m *Miniredis) cmdHdel(c *server.Peer, cmd string, args []string) { } deleted := 0 - for _, f := range fields { - _, ok := db.hashKeys[key][f] + for _, f := range opts.fields { + _, ok := db.hashKeys[opts.key][f] if !ok { continue } - delete(db.hashKeys[key], f) + delete(db.hashKeys[opts.key], f) deleted++ } c.WriteInt(deleted) // Nothing left. Remove the whole key. - if len(db.hashKeys[key]) == 0 { - db.del(key, true) + if len(db.hashKeys[opts.key]) == 0 { + db.del(opts.key, true) } }) } @@ -239,12 +254,18 @@ func (m *Miniredis) cmdHexists(c *server.Peer, cmd string, args []string) { return } - key, field := args[0], args[1] + opts := struct { + key string + field string + }{ + key: args[0], + field: args[1], + } withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - t, ok := db.keys[key] + t, ok := db.keys[opts.key] if !ok { c.WriteInt(0) return @@ -254,7 +275,7 @@ func (m *Miniredis) cmdHexists(c *server.Peer, cmd string, args []string) { return } - if _, ok := db.hashKeys[key][field]; !ok { + if _, ok := db.hashKeys[opts.key][opts.field]; !ok { c.WriteInt(0) return } @@ -494,24 +515,27 @@ func (m *Miniredis) cmdHincrby(c *server.Peer, cmd string, args []string) { return } - key, field, deltas := args[0], args[1], args[2] - - delta, err := strconv.Atoi(deltas) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidInt) + opts := struct { + key string + field string + delta int + }{ + key: args[0], + field: args[1], + } + if ok := optInt(c, args[2], &opts.delta); !ok { return } withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - if t, ok := db.keys[key]; ok && t != "hash" { + if t, ok := db.keys[opts.key]; ok && t != "hash" { c.WriteError(msgWrongType) return } - v, err := db.hashIncr(key, field, delta) + v, err := db.hashIncr(opts.key, opts.field, opts.delta) if err != nil { c.WriteError(err.Error()) return @@ -534,24 +558,31 @@ func (m *Miniredis) cmdHincrbyfloat(c *server.Peer, cmd string, args []string) { return } - key, field, deltas := args[0], args[1], args[2] - - delta, _, err := big.ParseFloat(deltas, 10, 128, 0) + opts := struct { + key string + field string + delta *big.Float + }{ + key: args[0], + field: args[1], + } + delta, _, err := big.ParseFloat(args[2], 10, 128, 0) if err != nil { setDirty(c) c.WriteError(msgInvalidFloat) return } + opts.delta = delta withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - if t, ok := db.keys[key]; ok && t != "hash" { + if t, ok := db.keys[opts.key]; ok && t != "hash" { c.WriteError(msgWrongType) return } - v, err := db.hashIncrfloat(key, field, delta) + v, err := db.hashIncrfloat(opts.key, opts.field, opts.delta) if err != nil { c.WriteError(err.Error()) return @@ -574,18 +605,20 @@ func (m *Miniredis) cmdHscan(c *server.Peer, cmd string, args []string) { return } - key := args[0] - cursor, err := strconv.Atoi(args[1]) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidCursor) + opts := struct { + key string + cursor int + withMatch bool + match string + }{ + key: args[0], + } + if ok := optIntErr(c, args[1], &opts.cursor, msgInvalidCursor); !ok { return } args = args[2:] // MATCH and COUNT options - var withMatch bool - var match string for len(args) > 0 { if strings.ToLower(args[0]) == "count" { // we do nothing with count @@ -609,8 +642,8 @@ func (m *Miniredis) cmdHscan(c *server.Peer, cmd string, args []string) { c.WriteError(msgSyntaxError) return } - withMatch = true - match, args = args[1], args[2:] + opts.withMatch = true + opts.match, args = args[1], args[2:] continue } setDirty(c) @@ -622,21 +655,21 @@ func (m *Miniredis) cmdHscan(c *server.Peer, cmd string, args []string) { db := m.db(ctx.selectedDB) // return _all_ (matched) keys every time - if cursor != 0 { + if opts.cursor != 0 { // Invalid cursor. c.WriteLen(2) c.WriteBulk("0") // no next cursor c.WriteLen(0) // no elements return } - if db.exists(key) && db.t(key) != "hash" { + if db.exists(opts.key) && db.t(opts.key) != "hash" { c.WriteError(ErrWrongType.Error()) return } - members := db.hashFields(key) - if withMatch { - members, _ = matchKeys(members, match) + members := db.hashFields(opts.key) + if opts.withMatch { + members, _ = matchKeys(members, opts.match) } c.WriteLen(2) @@ -645,7 +678,100 @@ func (m *Miniredis) cmdHscan(c *server.Peer, cmd string, args []string) { c.WriteLen(len(members) * 2) for _, k := range members { c.WriteBulk(k) - c.WriteBulk(db.hashGet(key, k)) + c.WriteBulk(db.hashGet(opts.key, k)) + } + }) +} + +// HRANDFIELD +func (m *Miniredis) cmdHrandfield(c *server.Peer, cmd string, args []string) { + if len(args) > 3 || len(args) < 1 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + if !m.handleAuth(c) { + return + } + if m.checkPubsub(c, cmd) { + return + } + + opts := struct { + key string + count int + countSet bool + withValues bool + }{ + key: args[0], + } + + if len(args) > 1 { + if ok := optIntErr(c, args[1], &opts.count, msgInvalidInt); !ok { + return + } + opts.countSet = true + } + + if len(args) == 3 { + if strings.ToLower(args[2]) == "withvalues" { + opts.withValues = true + } else { + setDirty(c) + c.WriteError(msgSyntaxError) + return + } + } + + withTx(m, c, func(peer *server.Peer, ctx *connCtx) { + db := m.db(ctx.selectedDB) + members := db.hashFields(opts.key) + m.shuffle(members) + + if !opts.countSet { + // > When called with just the key argument, return a random field from the + // hash value stored at key. + if len(members) == 0 { + peer.WriteNull() + return + } + peer.WriteBulk(members[0]) + return + } + + if len(members) > abs(opts.count) { + members = members[:abs(opts.count)] + } + switch { + case opts.count >= 0: + // if count is positive there can't be duplicates, and the length is restricted + case opts.count < 0: + // if count is negative there can be duplicates, but length will match + if len(members) > 0 { + for len(members) < -opts.count { + members = append(members, members[m.randIntn(len(members))]) + } + } + } + + if opts.withValues { + peer.WriteMapLen(len(members)) + for _, m := range members { + peer.WriteBulk(m) + peer.WriteBulk(db.hashGet(opts.key, m)) + } + return + } + peer.WriteLen(len(members)) + for _, m := range members { + peer.WriteBulk(m) } }) } + +func abs(n int) int { + if n < 0 { + return -n + } + return n +} diff --git a/vendor/github.com/alicebob/miniredis/v2/cmd_hll.go b/vendor/github.com/alicebob/miniredis/v2/cmd_hll.go index bd2f90c8382..a7b483673e3 100644 --- a/vendor/github.com/alicebob/miniredis/v2/cmd_hll.go +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_hll.go @@ -52,7 +52,7 @@ func (m *Miniredis) cmdPfcount(c *server.Peer, cmd string, args []string) { return } - keys := args[:] + keys := args withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) diff --git a/vendor/github.com/alicebob/miniredis/v2/cmd_info.go b/vendor/github.com/alicebob/miniredis/v2/cmd_info.go new file mode 100644 index 00000000000..e5984a9b2c0 --- /dev/null +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_info.go @@ -0,0 +1,40 @@ +package miniredis + +import ( + "fmt" + + "github.com/alicebob/miniredis/v2/server" +) + +// Command 'INFO' from https://redis.io/commands/info/ +func (m *Miniredis) cmdInfo(c *server.Peer, cmd string, args []string) { + if !m.isValidCMD(c, cmd) { + return + } + + if len(args) > 1 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + const ( + clientsSectionName = "clients" + clientsSectionContent = "# Clients\nconnected_clients:%d\r\n" + ) + + var result string + + for _, key := range args { + if key != clientsSectionName { + setDirty(c) + c.WriteError(fmt.Sprintf("section (%s) is not supported", key)) + return + } + } + result = fmt.Sprintf(clientsSectionContent, m.Server().ClientsLen()) + + c.WriteBulk(result) + }) +} diff --git a/vendor/github.com/alicebob/miniredis/v2/cmd_list.go b/vendor/github.com/alicebob/miniredis/v2/cmd_list.go index 62f9691887b..0683cccc9c9 100644 --- a/vendor/github.com/alicebob/miniredis/v2/cmd_list.go +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_list.go @@ -23,6 +23,7 @@ func commandsList(m *Miniredis) { m.srv.Register("BRPOP", m.cmdBrpop) m.srv.Register("BRPOPLPUSH", m.cmdBrpoplpush) m.srv.Register("LINDEX", m.cmdLindex) + m.srv.Register("LPOS", m.cmdLpos) m.srv.Register("LINSERT", m.cmdLinsert) m.srv.Register("LLEN", m.cmdLlen) m.srv.Register("LPOP", m.cmdLpop) @@ -37,6 +38,7 @@ func commandsList(m *Miniredis) { m.srv.Register("RPUSH", m.cmdRpush) m.srv.Register("RPUSHX", m.cmdRpushx) m.srv.Register("LMOVE", m.cmdLmove) + m.srv.Register("BLMOVE", m.cmdBlmove) } // BLPOP @@ -62,28 +64,23 @@ func (m *Miniredis) cmdBXpop(c *server.Peer, cmd string, args []string, lr leftr return } - timeoutS := args[len(args)-1] - keys := args[:len(args)-1] - - timeout, err := strconv.Atoi(timeoutS) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidTimeout) - return + var opts struct { + keys []string + timeout time.Duration } - if timeout < 0 { - setDirty(c) - c.WriteError(msgNegTimeout) + + if ok := optDuration(c, args[len(args)-1], &opts.timeout); !ok { return } + opts.keys = args[:len(args)-1] blocking( m, c, - time.Duration(timeout)*time.Second, + opts.timeout, func(c *server.Peer, ctx *connCtx) bool { db := m.db(ctx.selectedDB) - for _, key := range keys { + for _, key := range opts.keys { if !db.exists(key) { continue } @@ -165,6 +162,153 @@ func (m *Miniredis) cmdLindex(c *server.Peer, cmd string, args []string) { }) } +// LPOS key element [RANK rank] [COUNT num-matches] [MAXLEN len] +func (m *Miniredis) cmdLpos(c *server.Peer, cmd string, args []string) { + if !m.handleAuth(c) { + return + } + if m.checkPubsub(c, cmd) { + return + } + + if len(args) == 1 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + + // Extract options from arguments if present. + // + // Redis allows duplicate options and uses the last specified. + // `LPOS key term RANK 1 RANK 2` is effectively the same as + // `LPOS key term RANK 2` + if len(args)%2 == 1 { + setDirty(c) + c.WriteError(msgSyntaxError) + return + } + rank, count := 1, 1 // Default values + var maxlen int // Default value is the list length (see below) + var countSpecified, maxlenSpecified bool + if len(args) > 2 { + for i := 2; i < len(args); i++ { + if i%2 == 0 { + val := args[i+1] + var err error + switch strings.ToLower(args[i]) { + case "rank": + if rank, err = strconv.Atoi(val); err != nil { + setDirty(c) + c.WriteError(msgInvalidInt) + return + } + if rank == 0 { + setDirty(c) + c.WriteError(msgRankIsZero) + return + } + case "count": + countSpecified = true + if count, err = strconv.Atoi(val); err != nil || count < 0 { + setDirty(c) + c.WriteError(msgCountIsNegative) + return + } + case "maxlen": + maxlenSpecified = true + if maxlen, err = strconv.Atoi(val); err != nil || maxlen < 0 { + setDirty(c) + c.WriteError(msgMaxLengthIsNegative) + return + } + default: + setDirty(c) + c.WriteError(msgSyntaxError) + return + } + } + } + } + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + db := m.db(ctx.selectedDB) + key, element := args[0], args[1] + t, ok := db.keys[key] + if !ok { + // No such key + c.WriteNull() + return + } + if t != "list" { + c.WriteError(msgWrongType) + return + } + l := db.listKeys[key] + + // RANK cannot be zero (see above). + // If RANK is positive search forward (left to right). + // If RANK is negative search backward (right to left). + // Iterator returns true to continue iterating. + iterate := func(iterator func(i int, e string) bool) { + comparisons := len(l) + // Only use max length if specified, not zero, and less than total length. + // When max length is specified, but is zero, this means "unlimited". + if maxlenSpecified && maxlen != 0 && maxlen < len(l) { + comparisons = maxlen + } + if rank > 0 { + for i := 0; i < comparisons; i++ { + if resume := iterator(i, l[i]); !resume { + return + } + } + } else if rank < 0 { + start := len(l) - 1 + end := len(l) - comparisons + for i := start; i >= end; i-- { + if resume := iterator(i, l[i]); !resume { + return + } + } + } + } + + var currentRank, currentCount int + vals := make([]int, 0, count) + iterate(func(i int, e string) bool { + if e == element { + currentRank++ + // Only collect values only after surpassing the absolute value of rank. + if rank > 0 && currentRank < rank { + return true + } + if rank < 0 && currentRank < -rank { + return true + } + vals = append(vals, i) + currentCount++ + if currentCount == count { + return false + } + } + return true + }) + + if !countSpecified && len(vals) == 0 { + c.WriteNull() + return + } + if !countSpecified && len(vals) == 1 { + c.WriteInt(vals[0]) + return + } + c.WriteLen(len(vals)) + for _, val := range vals { + c.WriteInt(val) + } + }) +} + // LINSERT func (m *Miniredis) cmdLinsert(c *server.Peer, cmd string, args []string) { if len(args) != 4 { @@ -224,7 +368,7 @@ func (m *Miniredis) cmdLinsert(c *server.Peer, cmd string, args []string) { } } db.listKeys[key] = l - db.keyVersion[key]++ + db.incr(key) c.WriteInt(len(l)) return } @@ -297,18 +441,14 @@ func (m *Miniredis) cmdXpop(c *server.Peer, cmd string, args []string, lr leftri opts.key, args = args[0], args[1:] if len(args) > 0 { - v, err := strconv.Atoi(args[0]) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidInt) + if ok := optInt(c, args[0], &opts.count); !ok { return } - if v < 0 { + if opts.count < 0 { setDirty(c) c.WriteError(msgOutOfRange) return } - opts.count = v opts.withCount = true args = args[1:] } @@ -323,6 +463,11 @@ func (m *Miniredis) cmdXpop(c *server.Peer, cmd string, args []string, lr leftri if !db.exists(opts.key) { // non-existing key is fine + if opts.withCount && !c.Resp3 { + // zero-length list in this specific case. Looks like a redis bug to me. + c.WriteLen(-1) + return + } c.WriteNull() return } @@ -342,11 +487,7 @@ func (m *Miniredis) cmdXpop(c *server.Peer, cmd string, args []string, lr leftri } opts.count -= 1 } - if len(popped) == 0 { - c.WriteLen(-1) - } else { - c.WriteStrings(popped) - } + c.WriteStrings(popped) return } @@ -471,35 +612,35 @@ func (m *Miniredis) cmdLrange(c *server.Peer, cmd string, args []string) { return } - key := args[0] - start, err := strconv.Atoi(args[1]) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidInt) + opts := struct { + key string + start int + end int + }{ + key: args[0], + } + if ok := optInt(c, args[1], &opts.start); !ok { return } - end, err := strconv.Atoi(args[2]) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidInt) + if ok := optInt(c, args[2], &opts.end); !ok { return } withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - if t, ok := db.keys[key]; ok && t != "list" { + if t, ok := db.keys[opts.key]; ok && t != "list" { c.WriteError(msgWrongType) return } - l := db.listKeys[key] + l := db.listKeys[opts.key] if len(l) == 0 { c.WriteLen(0) return } - rs, re := redisRange(len(l), start, end, false) + rs, re := redisRange(len(l), opts.start, opts.end, false) c.WriteLen(re - rs) for _, el := range l[rs:re] { c.WriteBulk(el) @@ -521,42 +662,44 @@ func (m *Miniredis) cmdLrem(c *server.Peer, cmd string, args []string) { return } - key := args[0] - count, err := strconv.Atoi(args[1]) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidInt) + var opts struct { + key string + count int + value string + } + opts.key = args[0] + if ok := optInt(c, args[1], &opts.count); !ok { return } - value := args[2] + opts.value = args[2] withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - if !db.exists(key) { + if !db.exists(opts.key) { c.WriteInt(0) return } - if db.t(key) != "list" { + if db.t(opts.key) != "list" { c.WriteError(msgWrongType) return } - l := db.listKeys[key] - if count < 0 { + l := db.listKeys[opts.key] + if opts.count < 0 { reverseSlice(l) } deleted := 0 newL := []string{} toDelete := len(l) - if count < 0 { - toDelete = -count + if opts.count < 0 { + toDelete = -opts.count } - if count > 0 { - toDelete = count + if opts.count > 0 { + toDelete = opts.count } for _, el := range l { - if el == value { + if el == opts.value { if toDelete > 0 { deleted++ toDelete-- @@ -565,14 +708,14 @@ func (m *Miniredis) cmdLrem(c *server.Peer, cmd string, args []string) { } newL = append(newL, el) } - if count < 0 { + if opts.count < 0 { reverseSlice(newL) } if len(newL) == 0 { - db.del(key, true) + db.del(opts.key, true) } else { - db.listKeys[key] = newL - db.keyVersion[key]++ + db.listKeys[opts.key] = newL + db.incr(opts.key) } c.WriteInt(deleted) @@ -593,28 +736,31 @@ func (m *Miniredis) cmdLset(c *server.Peer, cmd string, args []string) { return } - key := args[0] - index, err := strconv.Atoi(args[1]) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidInt) + var opts struct { + key string + index int + value string + } + opts.key = args[0] + if ok := optInt(c, args[1], &opts.index); !ok { return } - value := args[2] + opts.value = args[2] withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - if !db.exists(key) { + if !db.exists(opts.key) { c.WriteError(msgKeyNotFound) return } - if db.t(key) != "list" { + if db.t(opts.key) != "list" { c.WriteError(msgWrongType) return } - l := db.listKeys[key] + l := db.listKeys[opts.key] + index := opts.index if index < 0 { index = len(l) + index } @@ -622,8 +768,8 @@ func (m *Miniredis) cmdLset(c *server.Peer, cmd string, args []string) { c.WriteError(msgOutOfRange) return } - l[index] = value - db.keyVersion[key]++ + l[index] = opts.value + db.incr(opts.key) c.WriteOK() }) @@ -643,24 +789,24 @@ func (m *Miniredis) cmdLtrim(c *server.Peer, cmd string, args []string) { return } - key := args[0] - start, err := strconv.Atoi(args[1]) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidInt) + var opts struct { + key string + start int + end int + } + + opts.key = args[0] + if ok := optInt(c, args[1], &opts.start); !ok { return } - end, err := strconv.Atoi(args[2]) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidInt) + if ok := optInt(c, args[2], &opts.end); !ok { return } withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - t, ok := db.keys[key] + t, ok := db.keys[opts.key] if !ok { c.WriteOK() return @@ -670,14 +816,14 @@ func (m *Miniredis) cmdLtrim(c *server.Peer, cmd string, args []string) { return } - l := db.listKeys[key] - rs, re := redisRange(len(l), start, end, false) + l := db.listKeys[opts.key] + rs, re := redisRange(len(l), opts.start, opts.end, false) l = l[rs:re] if len(l) == 0 { - db.del(key, true) + db.del(opts.key, true) } else { - db.listKeys[key] = l - db.keyVersion[key]++ + db.listKeys[opts.key] = l + db.incr(opts.key) } c.WriteOK() }) @@ -730,39 +876,36 @@ func (m *Miniredis) cmdBrpoplpush(c *server.Peer, cmd string, args []string) { return } - src := args[0] - dst := args[1] - timeout, err := strconv.Atoi(args[2]) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidTimeout) - return + var opts struct { + src string + dst string + timeout time.Duration } - if timeout < 0 { - setDirty(c) - c.WriteError(msgNegTimeout) + opts.src = args[0] + opts.dst = args[1] + if ok := optDuration(c, args[2], &opts.timeout); !ok { return } blocking( m, c, - time.Duration(timeout)*time.Second, + opts.timeout, func(c *server.Peer, ctx *connCtx) bool { db := m.db(ctx.selectedDB) - if !db.exists(src) { + if !db.exists(opts.src) { return false } - if db.t(src) != "list" || (db.exists(dst) && db.t(dst) != "list") { + if db.t(opts.src) != "list" || (db.exists(opts.dst) && db.t(opts.dst) != "list") { c.WriteError(msgWrongType) return true } - if len(db.listKeys[src]) == 0 { + if len(db.listKeys[opts.src]) == 0 { return false } - elem := db.listPop(src) - db.listLpush(dst, elem) + elem := db.listPop(opts.src) + db.listLpush(opts.dst, elem) c.WriteBulk(elem) return true }, @@ -787,37 +930,125 @@ func (m *Miniredis) cmdLmove(c *server.Peer, cmd string, args []string) { return } - src, dst, srcDir, dstDir := args[0], args[1], strings.ToLower(args[2]), strings.ToLower(args[3]) + opts := struct { + src string + dst string + srcDir string + dstDir string + }{ + src: args[0], + dst: args[1], + srcDir: strings.ToLower(args[2]), + dstDir: strings.ToLower(args[3]), + } withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - if !db.exists(src) { + if !db.exists(opts.src) { c.WriteNull() return } - if db.t(src) != "list" || (db.exists(dst) && db.t(dst) != "list") { + if db.t(opts.src) != "list" || (db.exists(opts.dst) && db.t(opts.dst) != "list") { c.WriteError(msgWrongType) return } var elem string - if srcDir == "left" { - elem = db.listLpop(src) - } else if srcDir == "right" { - elem = db.listPop(src) - } else { + switch opts.srcDir { + case "left": + elem = db.listLpop(opts.src) + case "right": + elem = db.listPop(opts.src) + default: c.WriteError(msgSyntaxError) return } - if dstDir == "left" { - db.listLpush(dst, elem) - } else if dstDir == "right" { - db.listPush(dst, elem) - } else { + switch opts.dstDir { + case "left": + db.listLpush(opts.dst, elem) + case "right": + db.listPush(opts.dst, elem) + default: c.WriteError(msgSyntaxError) return } c.WriteBulk(elem) }) } + +// BLMOVE +func (m *Miniredis) cmdBlmove(c *server.Peer, cmd string, args []string) { + if len(args) != 5 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + if !m.handleAuth(c) { + return + } + if m.checkPubsub(c, cmd) { + return + } + + opts := struct { + src string + dst string + srcDir string + dstDir string + timeout time.Duration + }{ + src: args[0], + dst: args[1], + srcDir: strings.ToLower(args[2]), + dstDir: strings.ToLower(args[3]), + } + if ok := optDuration(c, args[len(args)-1], &opts.timeout); !ok { + return + } + + blocking( + m, + c, + opts.timeout, + func(c *server.Peer, ctx *connCtx) bool { + db := m.db(ctx.selectedDB) + + if !db.exists(opts.src) { + return false + } + if db.t(opts.src) != "list" || (db.exists(opts.dst) && db.t(opts.dst) != "list") { + c.WriteError(msgWrongType) + return true + } + + var elem string + switch opts.srcDir { + case "left": + elem = db.listLpop(opts.src) + case "right": + elem = db.listPop(opts.src) + default: + c.WriteError(msgSyntaxError) + return true + } + + switch opts.dstDir { + case "left": + db.listLpush(opts.dst, elem) + case "right": + db.listPush(opts.dst, elem) + default: + c.WriteError(msgSyntaxError) + return true + } + + c.WriteBulk(elem) + return true + }, + func(c *server.Peer) { + // timeout + c.WriteLen(-1) + }, + ) +} diff --git a/vendor/github.com/alicebob/miniredis/v2/cmd_object.go b/vendor/github.com/alicebob/miniredis/v2/cmd_object.go new file mode 100644 index 00000000000..b958a95cfe1 --- /dev/null +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_object.go @@ -0,0 +1,58 @@ +package miniredis + +import ( + "fmt" + "strings" + + "github.com/alicebob/miniredis/v2/server" +) + +// commandsObject handles all object operations. +func commandsObject(m *Miniredis) { + m.srv.Register("OBJECT", m.cmdObject) +} + +// OBJECT +func (m *Miniredis) cmdObject(c *server.Peer, cmd string, args []string) { + if len(args) == 0 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + if !m.handleAuth(c) { + return + } + if m.checkPubsub(c, cmd) { + return + } + + switch sub := strings.ToLower(args[0]); sub { + case "idletime": + m.cmdObjectIdletime(c, args[1:]) + default: + setDirty(c) + c.WriteError(fmt.Sprintf(msgFObjectUsage, sub)) + } +} + +// OBJECT IDLETIME +func (m *Miniredis) cmdObjectIdletime(c *server.Peer, args []string) { + if len(args) != 1 { + setDirty(c) + c.WriteError(errWrongNumber("object|idletime")) + return + } + key := args[0] + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + db := m.db(ctx.selectedDB) + + t, ok := db.lru[key] + if !ok { + c.WriteNull() + return + } + + c.WriteInt(int(db.master.effectiveNow().Sub(t).Seconds())) + }) +} diff --git a/vendor/github.com/alicebob/miniredis/v2/cmd_pubsub.go b/vendor/github.com/alicebob/miniredis/v2/cmd_pubsub.go index 70997be5ab1..0fc9f0de355 100644 --- a/vendor/github.com/alicebob/miniredis/v2/cmd_pubsub.go +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_pubsub.go @@ -29,8 +29,9 @@ func (m *Miniredis) cmdSubscribe(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } - if getCtx(c).nested { - c.WriteError(msgNotFromScripts) + ctx := getCtx(c) + if ctx.nested { + c.WriteError(msgNotFromScripts(ctx.nestedSHA)) return } @@ -53,8 +54,9 @@ func (m *Miniredis) cmdUnsubscribe(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } - if getCtx(c).nested { - c.WriteError(msgNotFromScripts) + ctx := getCtx(c) + if ctx.nested { + c.WriteError(msgNotFromScripts(ctx.nestedSHA)) return } @@ -103,8 +105,9 @@ func (m *Miniredis) cmdPsubscribe(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } - if getCtx(c).nested { - c.WriteError(msgNotFromScripts) + ctx := getCtx(c) + if ctx.nested { + c.WriteError(msgNotFromScripts(ctx.nestedSHA)) return } @@ -127,8 +130,9 @@ func (m *Miniredis) cmdPunsubscribe(c *server.Peer, cmd string, args []string) { if !m.handleAuth(c) { return } - if getCtx(c).nested { - c.WriteError(msgNotFromScripts) + ctx := getCtx(c) + if ctx.nested { + c.WriteError(msgNotFromScripts(ctx.nestedSHA)) return } @@ -212,7 +216,9 @@ func (m *Miniredis) cmdPubSub(c *server.Peer, cmd string, args []string) { case "NUMPAT": argsOk = len(subargs) == 0 default: - argsOk = false + setDirty(c) + c.WriteError(fmt.Sprintf(msgFPubsubUsageSimple, subcommand)) + return } if !argsOk { diff --git a/vendor/github.com/alicebob/miniredis/v2/cmd_scripting.go b/vendor/github.com/alicebob/miniredis/v2/cmd_scripting.go index ef10aaef0fe..5a48f8b7f47 100644 --- a/vendor/github.com/alicebob/miniredis/v2/cmd_scripting.go +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_scripting.go @@ -7,6 +7,7 @@ import ( "io" "strconv" "strings" + "sync" luajson "github.com/alicebob/gopher-json" lua "github.com/yuin/gopher-lua" @@ -21,9 +22,13 @@ func commandsScripting(m *Miniredis) { m.srv.Register("SCRIPT", m.cmdScript) } +var ( + parsedScripts = sync.Map{} +) + // Execute lua. Needs to run m.Lock()ed, from within withTx(). // Returns true if the lua was OK (and hence should be cached). -func (m *Miniredis) runLuaScript(c *server.Peer, script string, args []string) bool { +func (m *Miniredis) runLuaScript(c *server.Peer, sha, script string, args []string) bool { l := lua.NewState(lua.Options{SkipOpenLibs: true}) defer l.Close() @@ -80,7 +85,7 @@ func (m *Miniredis) runLuaScript(c *server.Peer, script string, args []string) b } l.SetGlobal("ARGV", argvTable) - redisFuncs, redisConstants := mkLua(m.srv, c) + redisFuncs, redisConstants := mkLua(m.srv, c, sha) // Register command handlers l.Push(l.NewFunction(func(l *lua.LState) int { mod := l.RegisterModule("redis", redisFuncs).(*lua.LTable) @@ -91,13 +96,13 @@ func (m *Miniredis) runLuaScript(c *server.Peer, script string, args []string) b return 1 })) - l.DoString(protectGlobals) + _ = doScript(l, protectGlobals) l.Push(lua.LString("redis")) l.Call(1, 0) - if err := l.DoString(script); err != nil { - c.WriteError(errLuaParseError(err)) + if err := doScript(l, script); err != nil { + c.WriteError(err.Error()) return false } @@ -105,6 +110,42 @@ func (m *Miniredis) runLuaScript(c *server.Peer, script string, args []string) b return true } +// doScript pre-compiiles the given script into a Lua prototype, +// then executes the pre-compiled function against the given lua state. +// +// This is thread-safe. +func doScript(l *lua.LState, script string) error { + proto, err := compile(script) + if err != nil { + return fmt.Errorf(errLuaParseError(err)) + } + + lfunc := l.NewFunctionFromProto(proto) + l.Push(lfunc) + if err := l.PCall(0, lua.MultRet, nil); err != nil { + // ensure we wrap with the correct format. + return fmt.Errorf(errLuaParseError(err)) + } + + return nil +} + +func compile(script string) (*lua.FunctionProto, error) { + if val, ok := parsedScripts.Load(script); ok { + return val.(*lua.FunctionProto), nil + } + chunk, err := parse.Parse(strings.NewReader(script), "") + if err != nil { + return nil, err + } + proto, err := lua.Compile(chunk, "") + if err != nil { + return nil, err + } + parsedScripts.Store(script, proto) + return proto, nil +} + func (m *Miniredis) cmdEval(c *server.Peer, cmd string, args []string) { if len(args) < 2 { setDirty(c) @@ -117,18 +158,18 @@ func (m *Miniredis) cmdEval(c *server.Peer, cmd string, args []string) { if m.checkPubsub(c, cmd) { return } - - if getCtx(c).nested { - c.WriteError(msgNotFromScripts) + ctx := getCtx(c) + if ctx.nested { + c.WriteError(msgNotFromScripts(ctx.nestedSHA)) return } script, args := args[0], args[1:] withTx(m, c, func(c *server.Peer, ctx *connCtx) { - ok := m.runLuaScript(c, script, args) + sha := sha1Hex(script) + ok := m.runLuaScript(c, sha, script, args) if ok { - sha := sha1Hex(script) m.scripts[sha] = script } }) @@ -146,8 +187,9 @@ func (m *Miniredis) cmdEvalsha(c *server.Peer, cmd string, args []string) { if m.checkPubsub(c, cmd) { return } - if getCtx(c).nested { - c.WriteError(msgNotFromScripts) + ctx := getCtx(c) + if ctx.nested { + c.WriteError(msgNotFromScripts(ctx.nestedSHA)) return } @@ -160,7 +202,7 @@ func (m *Miniredis) cmdEvalsha(c *server.Peer, cmd string, args []string) { return } - m.runLuaScript(c, script, args) + m.runLuaScript(c, sha, script, args) }) } @@ -177,28 +219,62 @@ func (m *Miniredis) cmdScript(c *server.Peer, cmd string, args []string) { return } - if getCtx(c).nested { - c.WriteError(msgNotFromScripts) + ctx := getCtx(c) + if ctx.nested { + c.WriteError(msgNotFromScripts(ctx.nestedSHA)) return } - subcmd, args := args[0], args[1:] + var opts struct { + subcmd string + script string + } - withTx(m, c, func(c *server.Peer, ctx *connCtx) { - switch strings.ToLower(subcmd) { - case "load": - if len(args) != 1 { - c.WriteError(fmt.Sprintf(msgFScriptUsage, "LOAD")) - return + opts.subcmd, args = args[0], args[1:] + + switch strings.ToLower(opts.subcmd) { + case "load": + if len(args) != 1 { + setDirty(c) + c.WriteError(fmt.Sprintf(msgFScriptUsage, "LOAD")) + return + } + opts.script = args[0] + case "exists": + if len(args) == 0 { + setDirty(c) + c.WriteError(errWrongNumber("script|exists")) + return + } + case "flush": + if len(args) == 1 { + switch strings.ToUpper(args[0]) { + case "SYNC", "ASYNC": + args = args[1:] + default: } - script := args[0] + } + if len(args) != 0 { + setDirty(c) + c.WriteError(msgScriptFlush) + return + } + + default: + setDirty(c) + c.WriteError(fmt.Sprintf(msgFScriptUsageSimple, strings.ToUpper(opts.subcmd))) + return + } - if _, err := parse.Parse(strings.NewReader(script), "user_script"); err != nil { + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + switch strings.ToLower(opts.subcmd) { + case "load": + if _, err := parse.Parse(strings.NewReader(opts.script), "user_script"); err != nil { c.WriteError(errLuaParseError(err)) return } - sha := sha1Hex(script) - m.scripts[sha] = script + sha := sha1Hex(opts.script) + m.scripts[sha] = opts.script c.WriteBulk(sha) case "exists": @@ -212,23 +288,9 @@ func (m *Miniredis) cmdScript(c *server.Peer, cmd string, args []string) { } case "flush": - if len(args) == 1 { - switch strings.ToUpper(args[0]) { - case "SYNC", "ASYNC": - args = args[1:] - default: - } - } - if len(args) != 0 { - c.WriteError(msgScriptFlush) - return - } - m.scripts = map[string]string{} c.WriteOK() - default: - c.WriteError(fmt.Sprintf(msgFScriptUsage, strings.ToUpper(subcmd))) } }) } diff --git a/vendor/github.com/alicebob/miniredis/v2/cmd_server.go b/vendor/github.com/alicebob/miniredis/v2/cmd_server.go index 223651d39eb..6a41a433943 100644 --- a/vendor/github.com/alicebob/miniredis/v2/cmd_server.go +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_server.go @@ -3,17 +3,84 @@ package miniredis import ( + "fmt" "strconv" "strings" "github.com/alicebob/miniredis/v2/server" + "github.com/alicebob/miniredis/v2/size" ) func commandsServer(m *Miniredis) { + m.srv.Register("COMMAND", m.cmdCommand) m.srv.Register("DBSIZE", m.cmdDbsize) m.srv.Register("FLUSHALL", m.cmdFlushall) m.srv.Register("FLUSHDB", m.cmdFlushdb) + m.srv.Register("INFO", m.cmdInfo) m.srv.Register("TIME", m.cmdTime) + m.srv.Register("MEMORY", m.cmdMemory) +} + +// MEMORY +func (m *Miniredis) cmdMemory(c *server.Peer, cmd string, args []string) { + if len(args) == 0 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + if !m.handleAuth(c) { + return + } + if m.checkPubsub(c, cmd) { + return + } + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + db := m.db(ctx.selectedDB) + + cmd, args := strings.ToLower(args[0]), args[1:] + switch cmd { + case "usage": + if len(args) < 1 { + setDirty(c) + c.WriteError(errWrongNumber("memory|usage")) + return + } + if len(args) > 1 { + setDirty(c) + c.WriteError(msgSyntaxError) + return + } + + var ( + value interface{} + ok bool + ) + switch db.keys[args[0]] { + case "string": + value, ok = db.stringKeys[args[0]] + case "set": + value, ok = db.setKeys[args[0]] + case "hash": + value, ok = db.hashKeys[args[0]] + case "list": + value, ok = db.listKeys[args[0]] + case "hll": + value, ok = db.hllKeys[args[0]] + case "zset": + value, ok = db.sortedsetKeys[args[0]] + case "stream": + value, ok = db.streamKeys[args[0]] + } + if !ok { + c.WriteNull() + return + } + c.WriteInt(size.Of(value)) + default: + c.WriteError(fmt.Sprintf(msgMemorySubcommand, strings.ToUpper(cmd))) + } + }) } // DBSIZE @@ -100,8 +167,8 @@ func (m *Miniredis) cmdTime(c *server.Peer, cmd string, args []string) { withTx(m, c, func(c *server.Peer, ctx *connCtx) { now := m.effectiveNow() nanos := now.UnixNano() - seconds := nanos / 1000000000 - microseconds := (nanos / 1000) % 1000000 + seconds := nanos / 1_000_000_000 + microseconds := (nanos / 1_000) % 1_000_000 c.WriteLen(2) c.WriteBulk(strconv.FormatInt(seconds, 10)) diff --git a/vendor/github.com/alicebob/miniredis/v2/cmd_set.go b/vendor/github.com/alicebob/miniredis/v2/cmd_set.go index a9cdf411a0e..abcd1baa111 100644 --- a/vendor/github.com/alicebob/miniredis/v2/cmd_set.go +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_set.go @@ -3,6 +3,7 @@ package miniredis import ( + "fmt" "strconv" "strings" @@ -15,10 +16,12 @@ func commandsSet(m *Miniredis) { m.srv.Register("SCARD", m.cmdScard) m.srv.Register("SDIFF", m.cmdSdiff) m.srv.Register("SDIFFSTORE", m.cmdSdiffstore) + m.srv.Register("SINTERCARD", m.cmdSintercard) m.srv.Register("SINTER", m.cmdSinter) m.srv.Register("SINTERSTORE", m.cmdSinterstore) m.srv.Register("SISMEMBER", m.cmdSismember) m.srv.Register("SMEMBERS", m.cmdSmembers) + m.srv.Register("SMISMEMBER", m.cmdSmismember) m.srv.Register("SMOVE", m.cmdSmove) m.srv.Register("SPOP", m.cmdSpop) m.srv.Register("SRANDMEMBER", m.cmdSrandmember) @@ -217,6 +220,77 @@ func (m *Miniredis) cmdSinterstore(c *server.Peer, cmd string, args []string) { }) } +// SINTERCARD +func (m *Miniredis) cmdSintercard(c *server.Peer, cmd string, args []string) { + if len(args) < 2 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + if !m.handleAuth(c) { + return + } + if m.checkPubsub(c, cmd) { + return + } + + opts := struct { + keys []string + limit int + }{} + + numKeys, err := strconv.Atoi(args[0]) + if err != nil { + setDirty(c) + c.WriteError("ERR numkeys should be greater than 0") + return + } + if numKeys < 1 { + setDirty(c) + c.WriteError("ERR numkeys should be greater than 0") + return + } + + args = args[1:] + if len(args) < numKeys { + setDirty(c) + c.WriteError("ERR Number of keys can't be greater than number of args") + return + } + opts.keys = args[:numKeys] + + args = args[numKeys:] + if len(args) == 2 && strings.ToLower(args[0]) == "limit" { + l, err := strconv.Atoi(args[1]) + if err != nil { + setDirty(c) + c.WriteError(msgInvalidInt) + return + } + if l < 0 { + setDirty(c) + c.WriteError(msgLimitIsNegative) + return + } + opts.limit = l + } else if len(args) > 0 { + setDirty(c) + c.WriteError(msgSyntaxError) + return + } + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + db := m.db(ctx.selectedDB) + + count, err := db.setIntercard(opts.keys, opts.limit) + if err != nil { + c.WriteError(err.Error()) + return + } + c.WriteInt(count) + }) +} + // SISMEMBER func (m *Miniredis) cmdSismember(c *server.Peer, cmd string, args []string) { if len(args) != 2 { @@ -292,6 +366,50 @@ func (m *Miniredis) cmdSmembers(c *server.Peer, cmd string, args []string) { }) } +// SMISMEMBER +func (m *Miniredis) cmdSmismember(c *server.Peer, cmd string, args []string) { + if len(args) < 2 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + if !m.handleAuth(c) { + return + } + if m.checkPubsub(c, cmd) { + return + } + + key, values := args[0], args[1:] + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + db := m.db(ctx.selectedDB) + + if !db.exists(key) { + c.WriteLen(len(values)) + for range values { + c.WriteInt(0) + } + return + } + + if db.t(key) != "set" { + c.WriteError(ErrWrongType.Error()) + return + } + + c.WriteLen(len(values)) + for _, value := range values { + if db.setIsMember(key, value) { + c.WriteInt(1) + } else { + c.WriteInt(0) + } + } + return + }) +} + // SMOVE func (m *Miniredis) cmdSmove(c *server.Peer, cmd string, args []string) { if len(args) != 3 { @@ -399,12 +517,14 @@ func (m *Miniredis) cmdSpop(c *server.Peer, cmd string, args []string) { } var deleted []string + members := db.setMembers(opts.key) for i := 0; i < opts.count; i++ { - members := db.setMembers(opts.key) if len(members) == 0 { break } - member := members[m.randIntn(len(members))] + i := m.randIntn(len(members)) + member := members[i] + members = delElem(members, i) db.setRem(opts.key, member) deleted = append(deleted, member) } @@ -462,6 +582,10 @@ func (m *Miniredis) cmdSrandmember(c *server.Peer, cmd string, args []string) { db := m.db(ctx.selectedDB) if !db.exists(key) { + if withCount { + c.WriteLen(0) + return + } c.WriteNull() return } @@ -609,17 +733,22 @@ func (m *Miniredis) cmdSscan(c *server.Peer, cmd string, args []string) { return } - key := args[0] - cursor, err := strconv.Atoi(args[1]) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidCursor) + var opts struct { + key string + value int + cursor int + count int + withMatch bool + match string + } + + opts.key = args[0] + if ok := optIntErr(c, args[1], &opts.cursor, msgInvalidCursor); !ok { return } args = args[2:] + // MATCH and COUNT options - var withMatch bool - var match string for len(args) > 0 { if strings.ToLower(args[0]) == "count" { if len(args) < 2 { @@ -627,13 +756,18 @@ func (m *Miniredis) cmdSscan(c *server.Peer, cmd string, args []string) { c.WriteError(msgSyntaxError) return } - _, err := strconv.Atoi(args[1]) - if err != nil { + count, err := strconv.Atoi(args[1]) + if err != nil || count < 0 { setDirty(c) c.WriteError(msgInvalidInt) return } - // We do nothing with count. + if count == 0 { + setDirty(c) + c.WriteError(msgSyntaxError) + return + } + opts.count = count args = args[2:] continue } @@ -643,8 +777,8 @@ func (m *Miniredis) cmdSscan(c *server.Peer, cmd string, args []string) { c.WriteError(msgSyntaxError) return } - withMatch = true - match = args[1] + opts.withMatch = true + opts.match = args[1] args = args[2:] continue } @@ -656,29 +790,47 @@ func (m *Miniredis) cmdSscan(c *server.Peer, cmd string, args []string) { withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) // return _all_ (matched) keys every time - - if cursor != 0 { + if db.exists(opts.key) && db.t(opts.key) != "set" { + c.WriteError(ErrWrongType.Error()) + return + } + members := db.setMembers(opts.key) + if opts.withMatch { + members, _ = matchKeys(members, opts.match) + } + low := opts.cursor + high := low + opts.count + // validate high is correct + if high > len(members) || high == 0 { + high = len(members) + } + if opts.cursor > high { // invalid cursor c.WriteLen(2) c.WriteBulk("0") // no next cursor c.WriteLen(0) // no elements return } - if db.exists(key) && db.t(key) != "set" { - c.WriteError(ErrWrongType.Error()) - return + cursorValue := low + opts.count + if cursorValue > len(members) { + cursorValue = 0 // no next cursor } - - members := db.setMembers(key) - if withMatch { - members, _ = matchKeys(members, match) - } - + members = members[low:high] c.WriteLen(2) - c.WriteBulk("0") // no next cursor + c.WriteBulk(fmt.Sprintf("%d", cursorValue)) c.WriteLen(len(members)) for _, k := range members { c.WriteBulk(k) } + }) } + +func delElem(ls []string, i int) []string { + // this swap+truncate is faster but changes behaviour: + // ls[i] = ls[len(ls)-1] + // ls = ls[:len(ls)-1] + // so we do the dumb thing: + ls = append(ls[:i], ls[i+1:]...) + return ls +} diff --git a/vendor/github.com/alicebob/miniredis/v2/cmd_sorted_set.go b/vendor/github.com/alicebob/miniredis/v2/cmd_sorted_set.go index 75b540bafbe..bc1afc049c7 100644 --- a/vendor/github.com/alicebob/miniredis/v2/cmd_sorted_set.go +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_sorted_set.go @@ -4,6 +4,7 @@ package miniredis import ( "errors" + "fmt" "math" "sort" "strconv" @@ -18,7 +19,8 @@ func commandsSortedSet(m *Miniredis) { m.srv.Register("ZCARD", m.cmdZcard) m.srv.Register("ZCOUNT", m.cmdZcount) m.srv.Register("ZINCRBY", m.cmdZincrby) - m.srv.Register("ZINTERSTORE", m.cmdZinterstore) + m.srv.Register("ZINTER", m.makeCmdZinter(false)) + m.srv.Register("ZINTERSTORE", m.makeCmdZinter(true)) m.srv.Register("ZLEXCOUNT", m.cmdZlexcount) m.srv.Register("ZRANGE", m.cmdZrange) m.srv.Register("ZRANGEBYLEX", m.makeCmdZrangebylex(false)) @@ -33,6 +35,7 @@ func commandsSortedSet(m *Miniredis) { m.srv.Register("ZREVRANGEBYSCORE", m.makeCmdZrangebyscore(true)) m.srv.Register("ZREVRANK", m.makeCmdZrank(true)) m.srv.Register("ZSCORE", m.cmdZscore) + m.srv.Register("ZMSCORE", m.cmdZMscore) m.srv.Register("ZUNION", m.cmdZunion) m.srv.Register("ZUNIONSTORE", m.cmdZunionstore) m.srv.Register("ZSCAN", m.cmdZscan) @@ -55,42 +58,44 @@ func (m *Miniredis) cmdZadd(c *server.Peer, cmd string, args []string) { return } - key, args := args[0], args[1:] - var ( - nx = false - xx = false - gt = false - lt = false - ch = false - incr = false - elems = map[string]float64{} - ) + var opts struct { + key string + nx bool + xx bool + gt bool + lt bool + ch bool + incr bool + } + elems := map[string]float64{} + opts.key = args[0] + args = args[1:] outer: for len(args) > 0 { switch strings.ToUpper(args[0]) { case "NX": - nx = true + opts.nx = true args = args[1:] continue case "XX": - xx = true + opts.xx = true args = args[1:] continue case "GT": - gt = true + opts.gt = true args = args[1:] continue case "LT": - lt = true + opts.lt = true args = args[1:] continue case "CH": - ch = true + opts.ch = true args = args[1:] continue case "INCR": - incr = true + opts.incr = true args = args[1:] continue default: @@ -114,21 +119,21 @@ outer: args = args[2:] } - if xx && nx { + if opts.xx && opts.nx { setDirty(c) c.WriteError(msgXXandNX) return } - if gt && lt || - gt && nx || - lt && nx { + if opts.gt && opts.lt || + opts.gt && opts.nx || + opts.lt && opts.nx { setDirty(c) c.WriteError(msgGTLTandNX) return } - if incr && len(elems) > 1 { + if opts.incr && len(elems) > 1 { setDirty(c) c.WriteError(msgSingleElementPair) return @@ -137,22 +142,22 @@ outer: withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - if db.exists(key) && db.t(key) != "zset" { + if db.exists(opts.key) && db.t(opts.key) != "zset" { c.WriteError(ErrWrongType.Error()) return } - if incr { + if opts.incr { for member, delta := range elems { - if nx && db.ssetExists(key, member) { + if opts.nx && db.ssetExists(opts.key, member) { c.WriteNull() return } - if xx && !db.ssetExists(key, member) { + if opts.xx && !db.ssetExists(opts.key, member) { c.WriteNull() return } - newScore := db.ssetIncrby(key, member, delta) + newScore := db.ssetIncrby(opts.key, member, delta) c.WriteFloat(newScore) } return @@ -160,23 +165,24 @@ outer: res := 0 for member, score := range elems { - if nx && db.ssetExists(key, member) { + exists := db.ssetExists(opts.key, member) + if opts.nx && exists { continue } - if xx && !db.ssetExists(key, member) { + if opts.xx && !exists { continue } - old := db.ssetScore(key, member) - if gt && score <= old { + old := db.ssetScore(opts.key, member) + if opts.gt && exists && score <= old { continue } - if lt && score >= old { + if opts.lt && exists && score >= old { continue } - if db.ssetAdd(key, score, member) { + if db.ssetAdd(opts.key, score, member) { res++ } else { - if ch && old != score { + if opts.ch && old != score { // if 'CH' is specified, only count changed keys res++ } @@ -233,14 +239,25 @@ func (m *Miniredis) cmdZcount(c *server.Peer, cmd string, args []string) { return } - key := args[0] - min, minIncl, err := parseFloatRange(args[1]) + var ( + opts struct { + key string + min float64 + minIncl bool + max float64 + maxIncl bool + } + err error + ) + + opts.key = args[0] + opts.min, opts.minIncl, err = parseFloatRange(args[1]) if err != nil { setDirty(c) c.WriteError(msgInvalidMinMax) return } - max, maxIncl, err := parseFloatRange(args[2]) + opts.max, opts.maxIncl, err = parseFloatRange(args[2]) if err != nil { setDirty(c) c.WriteError(msgInvalidMinMax) @@ -250,18 +267,18 @@ func (m *Miniredis) cmdZcount(c *server.Peer, cmd string, args []string) { withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - if !db.exists(key) { + if !db.exists(opts.key) { c.WriteInt(0) return } - if db.t(key) != "zset" { + if db.t(opts.key) != "zset" { c.WriteError(ErrWrongType.Error()) return } - members := db.ssetElements(key) - members = withSSRange(members, min, minIncl, max, maxIncl) + members := db.ssetElements(opts.key) + members = withSSRange(members, opts.min, opts.minIncl, opts.max, opts.maxIncl) c.WriteInt(len(members)) }) } @@ -280,166 +297,218 @@ func (m *Miniredis) cmdZincrby(c *server.Peer, cmd string, args []string) { return } - key := args[0] - delta, err := strconv.ParseFloat(args[1], 64) + var opts struct { + key string + delta float64 + member string + } + + opts.key = args[0] + d, err := strconv.ParseFloat(args[1], 64) if err != nil { setDirty(c) c.WriteError(msgInvalidFloat) return } - member := args[2] + opts.delta = d + opts.member = args[2] withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - if db.exists(key) && db.t(key) != "zset" { + if db.exists(opts.key) && db.t(opts.key) != "zset" { c.WriteError(msgWrongType) return } - newScore := db.ssetIncrby(key, member, delta) + newScore := db.ssetIncrby(opts.key, opts.member, opts.delta) c.WriteFloat(newScore) }) } -// ZINTERSTORE -func (m *Miniredis) cmdZinterstore(c *server.Peer, cmd string, args []string) { - if len(args) < 3 { - setDirty(c) - c.WriteError(errWrongNumber(cmd)) - return - } - if !m.handleAuth(c) { - return - } - if m.checkPubsub(c, cmd) { - return - } +// ZINTERSTORE and ZINTER +func (m *Miniredis) makeCmdZinter(store bool) func(c *server.Peer, cmd string, args []string) { + return func(c *server.Peer, cmd string, args []string) { + minArgs := 2 + if store { + minArgs++ + } + if len(args) < minArgs { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + if !m.handleAuth(c) { + return + } + if m.checkPubsub(c, cmd) { + return + } - destination := args[0] - numKeys, err := strconv.Atoi(args[1]) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidInt) - return - } - args = args[2:] - if len(args) < numKeys { - setDirty(c) - c.WriteError(msgSyntaxError) - return - } - if numKeys <= 0 { - setDirty(c) - c.WriteError("ERR at least 1 input key is needed for ZUNIONSTORE/ZINTERSTORE") - return - } - keys := args[:numKeys] - args = args[numKeys:] + var opts = struct { + Store bool // if true this is ZINTERSTORE + Destination string // only relevant if $store is true + Keys []string + Aggregate string + WithWeights bool + Weights []float64 + WithScores bool // only for ZINTER + }{ + Store: store, + Aggregate: "sum", + } - withWeights := false - weights := []float64{} - aggregate := "sum" - for len(args) > 0 { - switch strings.ToLower(args[0]) { - case "weights": - if len(args) < numKeys+1 { - setDirty(c) - c.WriteError(msgSyntaxError) - return - } - for i := 0; i < numKeys; i++ { - f, err := strconv.ParseFloat(args[i+1], 64) - if err != nil { + if store { + opts.Destination = args[0] + args = args[1:] + } + numKeys, err := strconv.Atoi(args[0]) + if err != nil { + setDirty(c) + c.WriteError(msgInvalidInt) + return + } + args = args[1:] + if len(args) < numKeys { + setDirty(c) + c.WriteError(msgSyntaxError) + return + } + if numKeys <= 0 { + setDirty(c) + c.WriteError("ERR at least 1 input key is needed for ZUNIONSTORE/ZINTERSTORE") + return + } + opts.Keys = args[:numKeys] + args = args[numKeys:] + + for len(args) > 0 { + switch strings.ToLower(args[0]) { + case "weights": + if len(args) < numKeys+1 { setDirty(c) - c.WriteError("ERR weight value is not a float") + c.WriteError(msgSyntaxError) return } - weights = append(weights, f) - } - withWeights = true - args = args[numKeys+1:] - case "aggregate": - if len(args) < 2 { - setDirty(c) - c.WriteError(msgSyntaxError) - return - } - aggregate = strings.ToLower(args[1]) - switch aggregate { - case "sum", "min", "max": + for i := 0; i < numKeys; i++ { + f, err := strconv.ParseFloat(args[i+1], 64) + if err != nil { + setDirty(c) + c.WriteError("ERR weight value is not a float") + return + } + opts.Weights = append(opts.Weights, f) + } + opts.WithWeights = true + args = args[numKeys+1:] + case "aggregate": + if len(args) < 2 { + setDirty(c) + c.WriteError(msgSyntaxError) + return + } + aggregate := strings.ToLower(args[1]) + switch aggregate { + case "sum", "min", "max": + opts.Aggregate = aggregate + default: + setDirty(c) + c.WriteError(msgSyntaxError) + return + } + args = args[2:] + case "withscores": + if store { + setDirty(c) + c.WriteError(msgSyntaxError) + return + } + opts.WithScores = true + args = args[1:] default: setDirty(c) c.WriteError(msgSyntaxError) return } - args = args[2:] - default: - setDirty(c) - c.WriteError(msgSyntaxError) - return } - } - withTx(m, c, func(c *server.Peer, ctx *connCtx) { - db := m.db(ctx.selectedDB) - db.del(destination, true) - - // We collect everything and remove all keys which turned out not to be - // present in every set. - sset := map[string]float64{} - counts := map[string]int{} - for i, key := range keys { - if !db.exists(key) { - continue - } + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + db := m.db(ctx.selectedDB) - var set map[string]float64 - switch db.t(key) { - case "set": - set = map[string]float64{} - for elem := range db.setKeys[key] { - set[elem] = 1.0 - } - case "zset": - set = db.sortedSet(key) - default: - c.WriteError(msgWrongType) - return - } - for member, score := range set { - if withWeights { - score *= weights[i] - } - counts[member]++ - old, ok := sset[member] - if !ok { - sset[member] = score + // We collect everything and remove all keys which turned out not to be + // present in every set. + sset := map[string]float64{} + counts := map[string]int{} + for i, key := range opts.Keys { + if !db.exists(key) { continue } - switch aggregate { + + var set map[string]float64 + switch db.t(key) { + case "set": + set = map[string]float64{} + for elem := range db.setKeys[key] { + set[elem] = 1.0 + } + case "zset": + set = db.sortedSet(key) default: - panic("Invalid aggregate") - case "sum": - sset[member] += score - case "min": - if score < old { - sset[member] = score + c.WriteError(msgWrongType) + return + } + for member, score := range set { + if opts.WithWeights { + score *= opts.Weights[i] } - case "max": - if score > old { + counts[member]++ + old, ok := sset[member] + if !ok { sset[member] = score + continue + } + switch opts.Aggregate { + default: + panic("Invalid aggregate") + case "sum": + sset[member] += score + case "min": + if score < old { + sset[member] = score + } + case "max": + if score > old { + sset[member] = score + } } } } - } - for key, count := range counts { - if count != numKeys { - delete(sset, key) + for key, count := range counts { + if count != numKeys { + delete(sset, key) + } } - } - db.ssetSet(destination, sset) - c.WriteInt(len(sset)) - }) + + if opts.Store { + // ZINTERSTORE mode + db.del(opts.Destination, true) + db.ssetSet(opts.Destination, sset) + c.WriteInt(len(sset)) + return + } + // ZINTER mode + size := len(sset) + if opts.WithScores { + size *= 2 + } + c.WriteLen(size) + for _, l := range sortedKeys(sset) { + c.WriteBulk(l) + if opts.WithScores { + c.WriteFloat(sset[l]) + } + } + }) + } } // ZLEXCOUNT @@ -739,7 +808,7 @@ func (m *Miniredis) makeCmdZrangebyscore(reverse bool) server.Cmd { // ZRANK and ZREVRANK func (m *Miniredis) makeCmdZrank(reverse bool) server.Cmd { return func(c *server.Peer, cmd string, args []string) { - if len(args) != 2 { + if len(args) < 2 { setDirty(c) c.WriteError(errWrongNumber(cmd)) return @@ -756,8 +825,24 @@ func (m *Miniredis) makeCmdZrank(reverse bool) server.Cmd { withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) + withScore := false + if len(args) > 0 && strings.ToUpper(args[len(args)-1]) == "WITHSCORE" { + withScore = true + args = args[:len(args)-1] + } + + if len(args) > 2 { + setDirty(c) + c.WriteError(msgSyntaxError) + return + } + if !db.exists(key) { - c.WriteNull() + if withScore { + c.WriteLen(-1) + } else { + c.WriteNull() + } return } @@ -772,10 +857,21 @@ func (m *Miniredis) makeCmdZrank(reverse bool) server.Cmd { } rank, ok := db.ssetRank(key, member, direction) if !ok { - c.WriteNull() + if withScore { + c.WriteLen(-1) + } else { + c.WriteNull() + } return } - c.WriteInt(rank) + + if withScore { + c.WriteLen(2) + c.WriteInt(rank) + c.WriteFloat(db.ssetScore(key, member)) + } else { + c.WriteInt(rank) + } }) } } @@ -889,37 +985,37 @@ func (m *Miniredis) cmdZremrangebyrank(c *server.Peer, cmd string, args []string return } - key := args[0] - start, err := strconv.Atoi(args[1]) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidInt) + var opts struct { + key string + start int + end int + } + + opts.key = args[0] + if ok := optInt(c, args[1], &opts.start); !ok { return } - end, err := strconv.Atoi(args[2]) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidInt) + if ok := optInt(c, args[2], &opts.end); !ok { return } withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - if !db.exists(key) { + if !db.exists(opts.key) { c.WriteInt(0) return } - if db.t(key) != "zset" { + if db.t(opts.key) != "zset" { c.WriteError(ErrWrongType.Error()) return } - members := db.ssetMembers(key) - rs, re := redisRange(len(members), start, end, false) + members := db.ssetMembers(opts.key) + rs, re := redisRange(len(members), opts.start, opts.end, false) for _, el := range members[rs:re] { - db.ssetRem(key, el) + db.ssetRem(opts.key, el) } c.WriteInt(re - rs) }) @@ -939,14 +1035,24 @@ func (m *Miniredis) cmdZremrangebyscore(c *server.Peer, cmd string, args []strin return } - key := args[0] - min, minIncl, err := parseFloatRange(args[1]) + var ( + opts struct { + key string + min float64 + minIncl bool + max float64 + maxIncl bool + } + err error + ) + opts.key = args[0] + opts.min, opts.minIncl, err = parseFloatRange(args[1]) if err != nil { setDirty(c) c.WriteError(msgInvalidMinMax) return } - max, maxIncl, err := parseFloatRange(args[2]) + opts.max, opts.maxIncl, err = parseFloatRange(args[2]) if err != nil { setDirty(c) c.WriteError(msgInvalidMinMax) @@ -956,21 +1062,21 @@ func (m *Miniredis) cmdZremrangebyscore(c *server.Peer, cmd string, args []strin withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - if !db.exists(key) { + if !db.exists(opts.key) { c.WriteInt(0) return } - if db.t(key) != "zset" { + if db.t(opts.key) != "zset" { c.WriteError(ErrWrongType.Error()) return } - members := db.ssetElements(key) - members = withSSRange(members, min, minIncl, max, maxIncl) + members := db.ssetElements(opts.key) + members = withSSRange(members, opts.min, opts.minIncl, opts.max, opts.maxIncl) for _, el := range members { - db.ssetRem(key, el.member) + db.ssetRem(opts.key, el.member) } c.WriteInt(len(members)) }) @@ -1014,6 +1120,49 @@ func (m *Miniredis) cmdZscore(c *server.Peer, cmd string, args []string) { }) } +// ZMSCORE +func (m *Miniredis) cmdZMscore(c *server.Peer, cmd string, args []string) { + if len(args) < 2 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + if !m.handleAuth(c) { + return + } + if m.checkPubsub(c, cmd) { + return + } + + key, members := args[0], args[1:] + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + db := m.db(ctx.selectedDB) + + if !db.exists(key) { + c.WriteLen(len(members)) + for range members { + c.WriteNull() + } + return + } + + if db.t(key) != "zset" { + c.WriteError(ErrWrongType.Error()) + return + } + + c.WriteLen(len(members)) + for _, member := range members { + if !db.ssetExists(key, member) { + c.WriteNull() + continue + } + c.WriteFloat(db.ssetScore(key, member)) + } + }) +} + // parseFloatRange handles ZRANGEBYSCORE floats. They are inclusive unless the // string starts with '(' func parseFloatRange(s string) (float64, bool, error) { @@ -1371,17 +1520,20 @@ func (m *Miniredis) cmdZscan(c *server.Peer, cmd string, args []string) { return } - key := args[0] - cursor, err := strconv.Atoi(args[1]) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidCursor) + var opts struct { + key string + cursor int + count int + withMatch bool + match string + } + + opts.key = args[0] + if ok := optIntErr(c, args[1], &opts.cursor, msgInvalidCursor); !ok { return } args = args[2:] // MATCH and COUNT options - var withMatch bool - var match string for len(args) > 0 { if strings.ToLower(args[0]) == "count" { if len(args) < 2 { @@ -1389,13 +1541,18 @@ func (m *Miniredis) cmdZscan(c *server.Peer, cmd string, args []string) { c.WriteError(msgSyntaxError) return } - _, err := strconv.Atoi(args[1]) + count, err := strconv.Atoi(args[1]) if err != nil { setDirty(c) c.WriteError(msgInvalidInt) return } - // We do nothing with count. + if count <= 0 { + setDirty(c) + c.WriteError(msgSyntaxError) + return + } + opts.count = count args = args[2:] continue } @@ -1405,8 +1562,8 @@ func (m *Miniredis) cmdZscan(c *server.Peer, cmd string, args []string) { c.WriteError(msgSyntaxError) return } - withMatch = true - match = args[1] + opts.withMatch = true + opts.match = args[1] args = args[2:] continue } @@ -1417,31 +1574,42 @@ func (m *Miniredis) cmdZscan(c *server.Peer, cmd string, args []string) { withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - // Paging is not implementend, all results are returned for cursor 0. - if cursor != 0 { - // Invalid cursor. + if db.exists(opts.key) && db.t(opts.key) != "zset" { + c.WriteError(ErrWrongType.Error()) + return + } + + members := db.ssetMembers(opts.key) + if opts.withMatch { + members, _ = matchKeys(members, opts.match) + } + + low := opts.cursor + high := low + opts.count + // validate high is correct + if high > len(members) || high == 0 { + high = len(members) + } + if opts.cursor > high { + // invalid cursor c.WriteLen(2) c.WriteBulk("0") // no next cursor c.WriteLen(0) // no elements return } - if db.exists(key) && db.t(key) != "zset" { - c.WriteError(ErrWrongType.Error()) - return - } - - members := db.ssetMembers(key) - if withMatch { - members, _ = matchKeys(members, match) + cursorValue := low + opts.count + if cursorValue >= len(members) { + cursorValue = 0 // no next cursor } + members = members[low:high] c.WriteLen(2) - c.WriteBulk("0") // no next cursor + c.WriteBulk(fmt.Sprintf("%d", cursorValue)) // HSCAN gives key, values. c.WriteLen(len(members) * 2) for _, k := range members { c.WriteBulk(k) - c.WriteFloat(db.ssetScore(key, k)) + c.WriteFloat(db.ssetScore(opts.key, k)) } }) } @@ -1463,10 +1631,9 @@ func (m *Miniredis) cmdZpopmax(reverse bool) server.Cmd { var err error if len(args) > 1 { count, err = strconv.Atoi(args[1]) - - if err != nil { + if err != nil || count < 0 { setDirty(c) - c.WriteError(msgInvalidInt) + c.WriteError(msgInvalidRange) return } } @@ -1536,17 +1703,12 @@ func (m *Miniredis) cmdZrandmember(c *server.Peer, cmd string, args []string) { args = args[1:] if len(args) > 0 { - count := args[0] - args = args[1:] - - n, err := strconv.Atoi(count) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidInt) + // can be negative + if ok := optInt(c, args[0], &opts.count); !ok { return } opts.withCount = true - opts.count = n // can be negative + args = args[1:] } if len(args) > 0 && strings.ToUpper(args[0]) == "WITHSCORES" { @@ -1852,3 +2014,12 @@ func parseLexrange(s string) (string, bool, error) { return "", false, errors.New(msgInvalidRangeItem) } } + +func sortedKeys(m map[string]float64) []string { + var keys []string + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} diff --git a/vendor/github.com/alicebob/miniredis/v2/cmd_stream.go b/vendor/github.com/alicebob/miniredis/v2/cmd_stream.go index 3ba0af0204c..b6d00343bcc 100644 --- a/vendor/github.com/alicebob/miniredis/v2/cmd_stream.go +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_stream.go @@ -28,6 +28,7 @@ func commandsStream(m *Miniredis) { m.srv.Register("XPENDING", m.cmdXpending) m.srv.Register("XTRIM", m.cmdXtrim) m.srv.Register("XAUTOCLAIM", m.cmdXautoclaim) + m.srv.Register("XCLAIM", m.cmdXclaim) } // XADD @@ -47,8 +48,13 @@ func (m *Miniredis) cmdXadd(c *server.Peer, cmd string, args []string) { key, args := args[0], args[1:] withTx(m, c, func(c *server.Peer, ctx *connCtx) { - maxlen := -1 + minID := "" + makeStream := true + if strings.ToLower(args[0]) == "nomkstream" { + args = args[1:] + makeStream = false + } if strings.ToLower(args[0]) == "maxlen" { args = args[1:] // we don't treat "~" special @@ -66,6 +72,14 @@ func (m *Miniredis) cmdXadd(c *server.Peer, cmd string, args []string) { } maxlen = n args = args[1:] + } else if strings.ToLower(args[0]) == "minid" { + args = args[1:] + // we don't treat "~" special + if args[0] == "~" { + args = args[1:] + } + minID = args[0] + args = args[1:] } if len(args) < 1 { c.WriteError(errWrongNumber(cmd)) @@ -92,7 +106,10 @@ func (m *Miniredis) cmdXadd(c *server.Peer, cmd string, args []string) { return } if s == nil { - // TODO: NOMKSTREAM + if !makeStream { + c.WriteNull() + return + } s, _ = db.newStream(key) } @@ -109,7 +126,10 @@ func (m *Miniredis) cmdXadd(c *server.Peer, cmd string, args []string) { if maxlen >= 0 { s.trim(maxlen) } - db.keyVersion[key]++ + if minID != "" { + s.trimBefore(minID) + } + db.incr(key) c.WriteBulk(newID) }) @@ -168,26 +188,30 @@ func (m *Miniredis) makeCmdXrange(reverse bool) server.Cmd { return } - var ( - key = args[0] - startKey = args[1] - endKey = args[2] + opts := struct { + key string + startKey string startExclusive bool + endKey string endExclusive bool - ) - if strings.HasPrefix(startKey, "(") { - startExclusive = true - startKey = startKey[1:] - if startKey == "-" || startKey == "+" { + }{ + key: args[0], + startKey: args[1], + endKey: args[2], + } + if strings.HasPrefix(opts.startKey, "(") { + opts.startExclusive = true + opts.startKey = opts.startKey[1:] + if opts.startKey == "-" || opts.startKey == "+" { setDirty(c) c.WriteError(msgInvalidStreamID) return } } - if strings.HasPrefix(endKey, "(") { - endExclusive = true - endKey = endKey[1:] - if endKey == "-" || endKey == "+" { + if strings.HasPrefix(opts.endKey, "(") { + opts.endExclusive = true + opts.endKey = opts.endKey[1:] + if opts.endKey == "-" || opts.endKey == "+" { setDirty(c) c.WriteError(msgInvalidStreamID) return @@ -205,12 +229,12 @@ func (m *Miniredis) makeCmdXrange(reverse bool) server.Cmd { } withTx(m, c, func(c *server.Peer, ctx *connCtx) { - start, err := formatStreamRangeBound(startKey, true, reverse) + start, err := formatStreamRangeBound(opts.startKey, true, reverse) if err != nil { c.WriteError(msgInvalidStreamID) return } - end, err := formatStreamRangeBound(endKey, false, reverse) + end, err := formatStreamRangeBound(opts.endKey, false, reverse) if err != nil { c.WriteError(msgInvalidStreamID) return @@ -223,17 +247,17 @@ func (m *Miniredis) makeCmdXrange(reverse bool) server.Cmd { db := m.db(ctx.selectedDB) - if !db.exists(key) { + if !db.exists(opts.key) { c.WriteLen(0) return } - if db.t(key) != "stream" { + if db.t(opts.key) != "stream" { c.WriteError(ErrWrongType.Error()) return } - var entries = db.streamKeys[key].entries + var entries = db.streamKeys[opts.key].entries if reverse { entries = reversedStreamEntries(entries) } @@ -270,11 +294,11 @@ func (m *Miniredis) makeCmdXrange(reverse bool) server.Cmd { } // Continue if start exclusive and entry ID == start - if startExclusive && streamCmp(entry.ID, start) == 0 { + if opts.startExclusive && streamCmp(entry.ID, start) == 0 { continue } // Continue if end exclusive and entry ID == end - if endExclusive && streamCmp(entry.ID, end) == 0 { + if opts.endExclusive && streamCmp(entry.ID, end) == 0 { continue } @@ -296,19 +320,50 @@ func (m *Miniredis) makeCmdXrange(reverse bool) server.Cmd { // XGROUP func (m *Miniredis) cmdXgroup(c *server.Peer, cmd string, args []string) { - if (len(args) == 4 || len(args) == 5) && strings.ToUpper(args[0]) == "CREATE" { + if len(args) == 0 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + if !m.handleAuth(c) { + return + } + if m.checkPubsub(c, cmd) { + return + } + + subCmd, args := strings.ToLower(args[0]), args[1:] + switch subCmd { + case "create": m.cmdXgroupCreate(c, cmd, args) - } else { - j := strings.Join(args, " ") - err := fmt.Sprintf("ERR 'XGROUP %s' not supported", j) + case "destroy": + m.cmdXgroupDestroy(c, cmd, args) + case "createconsumer": + m.cmdXgroupCreateconsumer(c, cmd, args) + case "delconsumer": + m.cmdXgroupDelconsumer(c, cmd, args) + case "help", + "setid": + err := fmt.Sprintf("ERR 'XGROUP %s' not supported", subCmd) setDirty(c) c.WriteError(err) + default: + setDirty(c) + c.WriteError(fmt.Sprintf( + "ERR unknown subcommand '%s'. Try XGROUP HELP.", + subCmd, + )) } } // XGROUP CREATE func (m *Miniredis) cmdXgroupCreate(c *server.Peer, cmd string, args []string) { - stream, group, id := args[1], args[2], args[3] + if len(args) != 3 && len(args) != 4 { + setDirty(c) + c.WriteError(errWrongNumber("CREATE")) + return + } + stream, group, id := args[0], args[1], args[2] withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) @@ -318,7 +373,7 @@ func (m *Miniredis) cmdXgroupCreate(c *server.Peer, cmd string, args []string) { c.WriteError(err.Error()) return } - if s == nil && len(args) == 5 && strings.ToUpper(args[4]) == "MKSTREAM" { + if s == nil && len(args) == 4 && strings.ToUpper(args[3]) == "MKSTREAM" { if s, err = db.newStream(stream); err != nil { c.WriteError(err.Error()) return @@ -338,6 +393,124 @@ func (m *Miniredis) cmdXgroupCreate(c *server.Peer, cmd string, args []string) { }) } +// XGROUP DESTROY +func (m *Miniredis) cmdXgroupDestroy(c *server.Peer, cmd string, args []string) { + if len(args) != 2 { + setDirty(c) + c.WriteError(errWrongNumber("DESTROY")) + return + } + stream, groupName := args[0], args[1] + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + db := m.db(ctx.selectedDB) + + s, err := db.stream(stream) + if err != nil { + c.WriteError(err.Error()) + return + } + if s == nil { + c.WriteError(msgXgroupKeyNotFound) + return + } + + if _, ok := s.groups[groupName]; !ok { + c.WriteInt(0) + return + } + delete(s.groups, groupName) + c.WriteInt(1) + }) +} + +// XGROUP CREATECONSUMER +func (m *Miniredis) cmdXgroupCreateconsumer(c *server.Peer, cmd string, args []string) { + if len(args) != 3 { + setDirty(c) + c.WriteError(errWrongNumber("CREATECONSUMER")) + return + } + key, groupName, consumerName := args[0], args[1], args[2] + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + db := m.db(ctx.selectedDB) + + s, err := db.stream(key) + if err != nil { + c.WriteError(err.Error()) + return + } + if s == nil { + c.WriteError(msgXgroupKeyNotFound) + return + } + + g, ok := s.groups[groupName] + if !ok { + err := fmt.Sprintf("NOGROUP No such consumer group '%s' for key name '%s'", groupName, key) + c.WriteError(err) + return + } + + if _, ok = g.consumers[consumerName]; ok { + c.WriteInt(0) + return + } + g.consumers[consumerName] = &consumer{} + c.WriteInt(1) + }) +} + +// XGROUP DELCONSUMER +func (m *Miniredis) cmdXgroupDelconsumer(c *server.Peer, cmd string, args []string) { + if len(args) != 3 { + setDirty(c) + c.WriteError(errWrongNumber("DELCONSUMER")) + return + } + key, groupName, consumerName := args[0], args[1], args[2] + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + db := m.db(ctx.selectedDB) + + s, err := db.stream(key) + if err != nil { + c.WriteError(err.Error()) + return + } + if s == nil { + c.WriteError(msgXgroupKeyNotFound) + return + } + + g, ok := s.groups[groupName] + if !ok { + err := fmt.Sprintf("NOGROUP No such consumer group '%s' for key name '%s'", groupName, key) + c.WriteError(err) + return + } + + consumer, ok := g.consumers[consumerName] + if !ok { + c.WriteInt(0) + return + } + defer delete(g.consumers, consumerName) + + if consumer.numPendingEntries > 0 { + newPending := make([]pendingEntry, 0) + for _, entry := range g.pending { + if entry.consumer != consumerName { + newPending = append(newPending, entry) + } + } + g.pending = newPending + } + c.WriteInt(consumer.numPendingEntries) + }) +} + // XINFO func (m *Miniredis) cmdXinfo(c *server.Peer, cmd string, args []string) { if len(args) < 1 { @@ -345,22 +518,32 @@ func (m *Miniredis) cmdXinfo(c *server.Peer, cmd string, args []string) { c.WriteError(errWrongNumber(cmd)) return } + if !m.handleAuth(c) { + return + } + if m.checkPubsub(c, cmd) { + return + } + subCmd, args := strings.ToUpper(args[0]), args[1:] switch subCmd { case "STREAM": m.cmdXinfoStream(c, args) - case "CONSUMERS", "GROUPS", "HELP": + case "CONSUMERS": + m.cmdXinfoConsumers(c, args) + case "GROUPS": + m.cmdXinfoGroups(c, args) + case "HELP": err := fmt.Sprintf("'XINFO %s' not supported", strings.Join(args, " ")) setDirty(c) c.WriteError(err) default: setDirty(c) c.WriteError(fmt.Sprintf( - "ERR Unknown subcommand or wrong number of arguments for '%s'. Try XINFO HELP.", + "ERR unknown subcommand or wrong number of arguments for '%s'. Try XINFO HELP.", subCmd, )) } - } // XINFO STREAM @@ -368,10 +551,11 @@ func (m *Miniredis) cmdXinfo(c *server.Peer, cmd string, args []string) { func (m *Miniredis) cmdXinfoStream(c *server.Peer, args []string) { if len(args) < 1 { setDirty(c) - c.WriteError(errWrongNumber("XINFO")) + c.WriteError(errWrongNumber("STREAM")) return } key := args[0] + withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) @@ -391,6 +575,112 @@ func (m *Miniredis) cmdXinfoStream(c *server.Peer, args []string) { }) } +// XINFO GROUPS +func (m *Miniredis) cmdXinfoGroups(c *server.Peer, args []string) { + if len(args) != 1 { + setDirty(c) + c.WriteError(errWrongNumber("GROUPS")) + return + } + key := args[0] + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + db := m.db(ctx.selectedDB) + + s, err := db.stream(key) + if err != nil { + c.WriteError(err.Error()) + return + } + if s == nil { + c.WriteError(msgKeyNotFound) + return + } + + c.WriteLen(len(s.groups)) + for name, g := range s.groups { + c.WriteMapLen(6) + + c.WriteBulk("name") + c.WriteBulk(name) + c.WriteBulk("consumers") + c.WriteInt(len(g.consumers)) + c.WriteBulk("pending") + c.WriteInt(len(g.activePending())) + c.WriteBulk("last-delivered-id") + c.WriteBulk(g.lastID) + c.WriteBulk("entries-read") + c.WriteNull() + c.WriteBulk("lag") + c.WriteInt(len(g.stream.entries)) + } + }) +} + +// XINFO CONSUMERS +// Please note that this is only a partial implementation, for it does not +// return each consumer's "idle" value, which indicates "the number of +// milliseconds that have passed since the consumer last interacted with the +// server." +func (m *Miniredis) cmdXinfoConsumers(c *server.Peer, args []string) { + if len(args) != 2 { + setDirty(c) + c.WriteError(errWrongNumber("CONSUMERS")) + return + } + key, groupName := args[0], args[1] + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + db := m.db(ctx.selectedDB) + + s, err := db.stream(key) + if err != nil { + c.WriteError(err.Error()) + return + } + if s == nil { + c.WriteError(msgKeyNotFound) + return + } + + g, ok := s.groups[groupName] + if !ok { + err := fmt.Sprintf("NOGROUP No such consumer group '%s' for key name '%s'", groupName, key) + c.WriteError(err) + return + } + + var consumerNames []string + for name := range g.consumers { + consumerNames = append(consumerNames, name) + } + sort.Strings(consumerNames) + + c.WriteLen(len(consumerNames)) + for _, name := range consumerNames { + cons := g.consumers[name] + + c.WriteMapLen(4) + c.WriteBulk("name") + c.WriteBulk(name) + c.WriteBulk("pending") + c.WriteInt(cons.numPendingEntries) + // TODO: these times aren't set for all commands + c.WriteBulk("idle") + c.WriteInt(m.sinceMilli(cons.lastSeen)) + c.WriteBulk("inactive") + c.WriteInt(m.sinceMilli(cons.lastSuccess)) + } + }) +} + +func (m *Miniredis) sinceMilli(t time.Time) int { + if t.IsZero() { + return -1 + } + return int(m.effectiveNow().Sub(t).Milliseconds()) +} + // XREADGROUP func (m *Miniredis) cmdXreadgroup(c *server.Peer, cmd string, args []string) { // XREADGROUP GROUP group consumer STREAMS key ID @@ -399,6 +689,12 @@ func (m *Miniredis) cmdXreadgroup(c *server.Peer, cmd string, args []string) { c.WriteError(errWrongNumber(cmd)) return } + if !m.handleAuth(c) { + return + } + if m.checkPubsub(c, cmd) { + return + } var opts struct { group string @@ -505,6 +801,12 @@ parsing: c, opts.blockTimeout, func(c *server.Peer, ctx *connCtx) bool { + if ctx.nested { + setDirty(c) + c.WriteError("ERR XREADGROUP command is not allowed with BLOCK option from scripts") + return false + } + db := m.db(ctx.selectedDB) res, err := xreadgroup( db, @@ -574,6 +876,12 @@ func (m *Miniredis) cmdXack(c *server.Peer, cmd string, args []string) { c.WriteError(errWrongNumber(cmd)) return } + if !m.handleAuth(c) { + return + } + if m.checkPubsub(c, cmd) { + return + } key, group, ids := args[0], args[1], args[2:] @@ -605,6 +913,12 @@ func (m *Miniredis) cmdXdel(c *server.Peer, cmd string, args []string) { c.WriteError(errWrongNumber(cmd)) return } + if !m.handleAuth(c) { + return + } + if m.checkPubsub(c, cmd) { + return + } stream, ids := args[0], args[1:] @@ -625,7 +939,7 @@ func (m *Miniredis) cmdXdel(c *server.Peer, cmd string, args []string) { c.WriteError(err.Error()) return } - db.keyVersion[stream]++ + db.incr(stream) c.WriteInt(n) }) } @@ -637,15 +951,23 @@ func (m *Miniredis) cmdXread(c *server.Peer, cmd string, args []string) { c.WriteError(errWrongNumber(cmd)) return } - - var opts struct { - count int - streams []string - ids []string - block bool - blockTimeout time.Duration + if !m.handleAuth(c) { + return } - var err error + if m.checkPubsub(c, cmd) { + return + } + + var ( + opts struct { + count int + streams []string + ids []string + block bool + blockTimeout time.Duration + } + err error + ) parsing: for len(args) > 0 { @@ -676,11 +998,21 @@ parsing: } opts.streams, opts.ids = args[0:len(args)/2], args[len(args)/2:] - for _, id := range opts.ids { + for i, id := range opts.ids { if _, err := parseStreamID(id); id != `$` && err != nil { setDirty(c) c.WriteError(msgInvalidStreamID) return + } else if id == "$" { + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + db := m.db(getCtx(c).selectedDB) + stream, ok := db.streamKeys[opts.streams[i]] + if ok { + opts.ids[i] = stream.lastID() + } else { + opts.ids[i] = "0-0" + } + }) } } args = nil @@ -690,7 +1022,6 @@ parsing: break parsing } } - if err != nil { setDirty(c) c.WriteError(err.Error()) @@ -710,6 +1041,12 @@ parsing: c, opts.blockTimeout, func(c *server.Peer, ctx *connCtx) bool { + if ctx.nested { + setDirty(c) + c.WriteError("ERR XREAD command is not allowed with BLOCK option from scripts") + return false + } + db := m.db(ctx.selectedDB) res := xread(db, opts.streams, opts.ids, opts.count) if len(res) == 0 { @@ -796,46 +1133,68 @@ func (m *Miniredis) cmdXpending(c *server.Peer, cmd string, args []string) { c.WriteError(errWrongNumber(cmd)) return } - - key, group, args := args[0], args[1], args[2:] - summary := true - if len(args) > 0 && strings.ToUpper(args[0]) == "IDLE" { - setDirty(c) - c.WriteError("ERR IDLE is unsupported") + if !m.handleAuth(c) { return } - var ( + if m.checkPubsub(c, cmd) { + return + } + + var opts struct { + key string + group string + summary bool + idle time.Duration start, end string count int consumer *string - ) + } + + opts.key, opts.group, args = args[0], args[1], args[2:] + opts.summary = true if len(args) >= 3 { - summary = false + opts.summary = false + + if strings.ToUpper(args[0]) == "IDLE" { + idleMs, err := strconv.ParseInt(args[1], 10, 64) + if err != nil { + setDirty(c) + c.WriteError(msgInvalidInt) + return + } + opts.idle = time.Duration(idleMs) * time.Millisecond + + args = args[2:] + if len(args) < 3 { + setDirty(c) + c.WriteError(msgSyntaxError) + return + } + } - start_, err := formatStreamRangeBound(args[0], true, false) + var err error + opts.start, err = formatStreamRangeBound(args[0], true, false) if err != nil { + setDirty(c) c.WriteError(msgInvalidStreamID) return } - start = start_ - end_, err := formatStreamRangeBound(args[1], false, false) + opts.end, err = formatStreamRangeBound(args[1], false, false) if err != nil { + setDirty(c) c.WriteError(msgInvalidStreamID) return } - end = end_ - n, err := strconv.Atoi(args[2]) // negative is allowed + opts.count, err = strconv.Atoi(args[2]) // negative is allowed if err != nil { + setDirty(c) c.WriteError(msgInvalidInt) return } - count = n args = args[3:] if len(args) == 1 { - var c string - c, args = args[0], args[1:] - consumer = &c + opts.consumer, args = &args[0], args[1:] } } if len(args) != 0 { @@ -846,26 +1205,27 @@ func (m *Miniredis) cmdXpending(c *server.Peer, cmd string, args []string) { withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - g, err := db.streamGroup(key, group) + g, err := db.streamGroup(opts.key, opts.group) if err != nil { c.WriteError(err.Error()) return } if g == nil { - c.WriteError(errReadgroup(key, group).Error()) + c.WriteError(errReadgroup(opts.key, opts.group).Error()) return } - if summary { + if opts.summary { writeXpendingSummary(c, *g) return } - writeXpending(m.effectiveNow(), c, *g, start, end, count, consumer) + writeXpending(m.effectiveNow(), c, *g, opts.idle, opts.start, opts.end, opts.count, opts.consumer) }) } func writeXpendingSummary(c *server.Peer, g streamGroup) { - if len(g.pending) == 0 { + pend := g.activePending() + if len(pend) == 0 { c.WriteLen(4) c.WriteInt(0) c.WriteNull() @@ -880,9 +1240,9 @@ func writeXpendingSummary(c *server.Peer, g streamGroup) { // - highest ID // - all consumers with > 0 pending items c.WriteLen(4) - c.WriteInt(len(g.pending)) - c.WriteBulk(g.pending[0].id) - c.WriteBulk(g.pending[len(g.pending)-1].id) + c.WriteInt(len(pend)) + c.WriteBulk(pend[0].id) + c.WriteBulk(pend[len(pend)-1].id) cons := map[string]int{} for id := range g.consumers { cnt := g.pendingCount(id) @@ -907,6 +1267,7 @@ func writeXpending( now time.Time, c *server.Peer, g streamGroup, + idle time.Duration, start, end string, count int, @@ -942,12 +1303,19 @@ func writeXpending( if streamCmp(p.id, end) > 0 { continue } - res = append(res, entry{ - id: p.id, - consumer: p.consumer, - millis: int(now.Sub(p.lastDelivery).Milliseconds()), - count: p.deliveryCount, - }) + timeSinceLastDelivery := now.Sub(p.lastDelivery) + if timeSinceLastDelivery >= idle { + res = append(res, entry{ + id: p.id, + consumer: p.consumer, + millis: int(timeSinceLastDelivery.Milliseconds()), + count: p.deliveryCount, + }) + } + } + if len(res) == 0 { + c.WriteLen(-1) + return } c.WriteLen(len(res)) for _, e := range res { @@ -966,6 +1334,12 @@ func (m *Miniredis) cmdXtrim(c *server.Peer, cmd string, args []string) { c.WriteError(errWrongNumber(cmd)) return } + if !m.handleAuth(c) { + return + } + if m.checkPubsub(c, cmd) { + return + } var opts struct { stream string @@ -1052,16 +1426,8 @@ func (m *Miniredis) cmdXtrim(c *server.Peer, cmd string, args []string) { s.trim(opts.maxLen) c.WriteInt(entriesBefore - len(s.entries)) case "MINID": - var delete []string - for _, entry := range s.entries { - if entry.ID < opts.threshold { - delete = append(delete, entry.ID) - } else { - break - } - } - s.delete(delete) - c.WriteInt(len(delete)) + n := s.trimBefore(opts.threshold) + c.WriteInt(n) } }) } @@ -1074,32 +1440,42 @@ func (m *Miniredis) cmdXautoclaim(c *server.Peer, cmd string, args []string) { c.WriteError(errWrongNumber(cmd)) return } + if !m.handleAuth(c) { + return + } + if m.checkPubsub(c, cmd) { + return + } - key, group, consumer := args[0], args[1], args[2] + var opts struct { + key string + group string + consumer string + minIdleTime time.Duration + start string + justId bool + count int + } - minIdleTime, err := strconv.Atoi(args[3]) + opts.key, opts.group, opts.consumer = args[0], args[1], args[2] + n, err := strconv.Atoi(args[3]) if err != nil { setDirty(c) c.WriteError("ERR Invalid min-idle-time argument for XAUTOCLAIM") return } - if minIdleTime != 0 { - setDirty(c) - c.WriteError("ERR IDLE is unsupported") - return - } + opts.minIdleTime = time.Millisecond * time.Duration(n) start_, err := formatStreamRangeBound(args[4], true, false) if err != nil { c.WriteError(msgInvalidStreamID) return } - start := start_ + opts.start = start_ args = args[5:] - count := 100 - var justId bool + opts.count = 100 parsing: for len(args) > 0 { switch strings.ToUpper(args[0]) { @@ -1109,7 +1485,7 @@ parsing: break parsing } - count, err = strconv.Atoi(args[1]) + opts.count, err = strconv.Atoi(args[1]) if err != nil { break parsing } @@ -1117,7 +1493,7 @@ parsing: args = args[2:] case "JUSTID": args = args[1:] - justId = true + opts.justId = true default: err = errors.New(msgSyntaxError) break parsing @@ -1132,24 +1508,25 @@ parsing: withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - g, err := db.streamGroup(key, group) + g, err := db.streamGroup(opts.key, opts.group) if err != nil { c.WriteError(err.Error()) return } if g == nil { - c.WriteError(errReadgroup(key, group).Error()) + c.WriteError(errReadgroup(opts.key, opts.group).Error()) return } - nextCallId, entries := xautoclaim(m.effectiveNow(), *g, start, count, consumer) - writeXautoclaim(c, nextCallId, entries, justId) + nextCallId, entries := xautoclaim(m.effectiveNow(), *g, opts.minIdleTime, opts.start, opts.count, opts.consumer) + writeXautoclaim(c, nextCallId, entries, opts.justId) }) } func xautoclaim( now time.Time, g streamGroup, + minIdleTime time.Duration, start string, count int, consumerID string, @@ -1162,15 +1539,31 @@ func xautoclaim( msgs := g.pendingAfter(start) var res []StreamEntry for i, p := range msgs { - g.consumers[consumerID] = consumer{} + if minIdleTime > 0 && now.Before(p.lastDelivery.Add(minIdleTime)) { + continue + } + + prevConsumerID := p.consumer + if _, ok := g.consumers[consumerID]; !ok { + g.consumers[consumerID] = &consumer{} + } p.consumer = consumerID + _, entry := g.stream.get(p.id) // not found. Weird? if entry == nil { + // TODO: support third element of return from XAUTOCLAIM, which + // should delete entries not found in the PEL during XAUTOCLAIM. + // (Introduced in Redis 7.0) continue } + p.deliveryCount += 1 p.lastDelivery = now + + g.consumers[prevConsumerID].numPendingEntries-- + g.consumers[consumerID].numPendingEntries++ + msgs[i] = p res = append(res, *entry) @@ -1185,7 +1578,7 @@ func xautoclaim( } func writeXautoclaim(c *server.Peer, nextCallId string, res []StreamEntry, justId bool) { - c.WriteLen(2) + c.WriteLen(3) c.WriteBulk(nextCallId) c.WriteLen(len(res)) for _, entry := range res { @@ -1201,6 +1594,209 @@ func writeXautoclaim(c *server.Peer, nextCallId string, res []StreamEntry, justI c.WriteBulk(v) } } + // TODO: see "Redis 7" note + c.WriteLen(0) +} + +// XCLAIM +func (m *Miniredis) cmdXclaim(c *server.Peer, cmd string, args []string) { + if len(args) < 5 { + setDirty(c) + c.WriteError(errWrongNumber(cmd)) + return + } + if !m.handleAuth(c) { + return + } + if m.checkPubsub(c, cmd) { + return + } + + var opts struct { + key string + groupName string + consumerName string + minIdleTime time.Duration + newLastDelivery time.Time + ids []string + retryCount *int + force bool + justId bool + } + + opts.key, opts.groupName, opts.consumerName = args[0], args[1], args[2] + + minIdleTimeMillis, err := strconv.Atoi(args[3]) + if err != nil { + setDirty(c) + c.WriteError("ERR Invalid min-idle-time argument for XCLAIM") + return + } + opts.minIdleTime = time.Millisecond * time.Duration(minIdleTimeMillis) + + opts.newLastDelivery = m.effectiveNow() + opts.ids = append(opts.ids, args[4]) + + args = args[5:] + for len(args) > 0 { + arg := strings.ToUpper(args[0]) + if arg == "IDLE" || + arg == "TIME" || + arg == "RETRYCOUNT" || + arg == "FORCE" || + arg == "JUSTID" { + break + } + opts.ids = append(opts.ids, arg) + args = args[1:] + } + + for len(args) > 0 { + arg := strings.ToUpper(args[0]) + switch arg { + case "IDLE": + idleMs, err := strconv.ParseInt(args[1], 10, 64) + if err != nil { + setDirty(c) + c.WriteError("ERR Invalid IDLE option argument for XCLAIM") + return + } + if idleMs < 0 { + idleMs = 0 + } + opts.newLastDelivery = m.effectiveNow().Add(time.Millisecond * time.Duration(-idleMs)) + args = args[2:] + case "TIME": + timeMs, err := strconv.ParseInt(args[1], 10, 64) + if err != nil { + setDirty(c) + c.WriteError("ERR Invalid TIME option argument for XCLAIM") + return + } + opts.newLastDelivery = time.UnixMilli(timeMs) + args = args[2:] + case "RETRYCOUNT": + retryCount, err := strconv.Atoi(args[1]) + if err != nil { + setDirty(c) + c.WriteError("ERR Invalid RETRYCOUNT option argument for XCLAIM") + return + } + opts.retryCount = &retryCount + args = args[2:] + case "FORCE": + opts.force = true + args = args[1:] + case "JUSTID": + opts.justId = true + args = args[1:] + default: + setDirty(c) + c.WriteError(fmt.Sprintf("ERR Unrecognized XCLAIM option '%s'", args[0])) + return + } + } + + withTx(m, c, func(c *server.Peer, ctx *connCtx) { + db := m.db(ctx.selectedDB) + + g, err := db.streamGroup(opts.key, opts.groupName) + if err != nil { + c.WriteError(err.Error()) + return + } + if g == nil { + c.WriteError(errReadgroup(opts.key, opts.groupName).Error()) + return + } + + claimedEntryIDs := m.xclaim(g, opts.consumerName, opts.minIdleTime, opts.newLastDelivery, opts.ids, opts.retryCount, opts.force) + writeXclaim(c, g.stream, claimedEntryIDs, opts.justId) + }) +} + +func (m *Miniredis) xclaim( + group *streamGroup, + consumerName string, + minIdleTime time.Duration, + newLastDelivery time.Time, + ids []string, + retryCount *int, + force bool, +) (claimedEntryIDs []string) { + for _, id := range ids { + pelPos, pelEntry := group.searchPending(id) + if pelEntry == nil { + group.setLastSeen(consumerName, m.effectiveNow()) + if !force { + continue + } + + if pelPos < len(group.pending) { + group.pending = append(group.pending[:pelPos+1], group.pending[pelPos:]...) + } else { + group.pending = append(group.pending, pendingEntry{}) + } + pelEntry = &group.pending[pelPos] + + *pelEntry = pendingEntry{ + id: id, + consumer: consumerName, + deliveryCount: 1, + } + group.setLastSuccess(consumerName, m.effectiveNow()) + } else { + group.consumers[pelEntry.consumer].numPendingEntries-- + pelEntry.consumer = consumerName + } + + if retryCount != nil { + pelEntry.deliveryCount = *retryCount + } else { + pelEntry.deliveryCount++ + } + pelEntry.lastDelivery = newLastDelivery + + // redis7: don't report entries which are deleted by now + if _, e := group.stream.get(id); e == nil { + continue + } + + claimedEntryIDs = append(claimedEntryIDs, id) + } + if len(claimedEntryIDs) == 0 { + group.setLastSeen(consumerName, m.effectiveNow()) + return + } + + if _, ok := group.consumers[consumerName]; !ok { + group.consumers[consumerName] = &consumer{} + } + consumer := group.consumers[consumerName] + consumer.numPendingEntries += len(claimedEntryIDs) + + group.setLastSuccess(consumerName, m.effectiveNow()) + return +} + +func writeXclaim(c *server.Peer, stream *streamKey, claimedEntryIDs []string, justId bool) { + c.WriteLen(len(claimedEntryIDs)) + for _, id := range claimedEntryIDs { + if justId { + c.WriteBulk(id) + continue + } + + _, entry := stream.get(id) + if entry == nil { + c.WriteNull() + continue + } + + c.WriteLen(2) + c.WriteBulk(entry.ID) + c.WriteStrings(entry.Values) + } } func parseBlock(cmd string, args []string, block *bool, timeout *time.Duration) error { diff --git a/vendor/github.com/alicebob/miniredis/v2/cmd_string.go b/vendor/github.com/alicebob/miniredis/v2/cmd_string.go index 29c2c22734b..b8013f2811c 100644 --- a/vendor/github.com/alicebob/miniredis/v2/cmd_string.go +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_string.go @@ -131,16 +131,27 @@ func (m *Miniredis) cmdSet(c *server.Peer, cmd string, args []string) { withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) + readonly := false if opts.nx { if db.exists(opts.key) { - c.WriteNull() - return + if opts.get { + // special case for SET NX GET + readonly = true + } else { + c.WriteNull() + return + } } } if opts.xx { if !db.exists(opts.key) { - c.WriteNull() - return + if opts.get { + // special case for SET XX GET + readonly = true + } else { + c.WriteNull() + return + } } } if opts.keepttl { @@ -154,14 +165,17 @@ func (m *Miniredis) cmdSet(c *server.Peer, cmd string, args []string) { return } } + old, existed := db.stringKeys[opts.key] - db.del(opts.key, true) // be sure to remove existing values of other type keys. - // a vanilla SET clears the expire - if opts.ttl >= 0 { // EXAT/PXAT can expire right away - db.stringSet(opts.key, opts.value) - } - if opts.ttl != 0 { - db.ttl[opts.key] = opts.ttl + if !readonly { + db.del(opts.key, true) // be sure to remove existing values of other type keys. + // a vanilla SET clears the expire + if opts.ttl >= 0 { // EXAT/PXAT can expire right away + db.stringSet(opts.key, opts.value) + } + if opts.ttl != 0 { + db.ttl[opts.key] = opts.ttl + } } if opts.get { if !existed { @@ -227,26 +241,29 @@ func (m *Miniredis) cmdPsetex(c *server.Peer, cmd string, args []string) { return } - key := args[0] - ttl, err := strconv.Atoi(args[1]) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidInt) + var opts struct { + key string + ttl int + value string + } + + opts.key = args[0] + if ok := optInt(c, args[1], &opts.ttl); !ok { return } - if ttl <= 0 { + if opts.ttl <= 0 { setDirty(c) c.WriteError(msgInvalidPSETEXTime) return } - value := args[2] + opts.value = args[2] withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - db.del(key, true) // Clear any existing keys. - db.stringSet(key, value) - db.ttl[key] = time.Duration(ttl) * time.Millisecond + db.del(opts.key, true) // Clear any existing keys. + db.stringSet(opts.key, opts.value) + db.ttl[opts.key] = time.Duration(opts.ttl) * time.Millisecond c.WriteOK() }) } @@ -634,23 +651,24 @@ func (m *Miniredis) cmdIncrby(c *server.Peer, cmd string, args []string) { return } - key := args[0] - delta, err := strconv.Atoi(args[1]) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidInt) + var opts struct { + key string + delta int + } + opts.key = args[0] + if ok := optInt(c, args[1], &opts.delta); !ok { return } withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - if t, ok := db.keys[key]; ok && t != "string" { + if t, ok := db.keys[opts.key]; ok && t != "string" { c.WriteError(msgWrongType) return } - v, err := db.stringIncr(key, delta) + v, err := db.stringIncr(opts.key, opts.delta) if err != nil { c.WriteError(err.Error()) return @@ -746,23 +764,24 @@ func (m *Miniredis) cmdDecrby(c *server.Peer, cmd string, args []string) { return } - key := args[0] - delta, err := strconv.Atoi(args[1]) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidInt) + var opts struct { + key string + delta int + } + opts.key = args[0] + if ok := optInt(c, args[1], &opts.delta); !ok { return } withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - if t, ok := db.keys[key]; ok && t != "string" { + if t, ok := db.keys[opts.key]; ok && t != "string" { c.WriteError(msgWrongType) return } - v, err := db.stringIncr(key, -delta) + v, err := db.stringIncr(opts.key, -opts.delta) if err != nil { c.WriteError(err.Error()) return @@ -845,30 +864,29 @@ func (m *Miniredis) cmdGetrange(c *server.Peer, cmd string, args []string) { return } - key := args[0] - start, err := strconv.Atoi(args[1]) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidInt) + var opts struct { + key string + start int + end int + } + opts.key = args[0] + if ok := optInt(c, args[1], &opts.start); !ok { return } - end, err := strconv.Atoi(args[2]) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidInt) + if ok := optInt(c, args[2], &opts.end); !ok { return } withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - if t, ok := db.keys[key]; ok && t != "string" { + if t, ok := db.keys[opts.key]; ok && t != "string" { c.WriteError(msgWrongType) return } - v := db.stringKeys[key] - c.WriteBulk(withRange(v, start, end)) + v := db.stringKeys[opts.key] + c.WriteBulk(withRange(v, opts.start, opts.end)) }) } @@ -886,36 +904,39 @@ func (m *Miniredis) cmdSetrange(c *server.Peer, cmd string, args []string) { return } - key := args[0] - pos, err := strconv.Atoi(args[1]) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidInt) + var opts struct { + key string + pos int + subst string + } + opts.key = args[0] + if ok := optInt(c, args[1], &opts.pos); !ok { return } - if pos < 0 { + if opts.pos < 0 { setDirty(c) c.WriteError("ERR offset is out of range") return } - subst := args[2] + opts.subst = args[2] withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - if t, ok := db.keys[key]; ok && t != "string" { + if t, ok := db.keys[opts.key]; ok && t != "string" { c.WriteError(msgWrongType) return } - v := []byte(db.stringKeys[key]) - if len(v) < pos+len(subst) { - newV := make([]byte, pos+len(subst)) + v := []byte(db.stringKeys[opts.key]) + end := opts.pos + len(opts.subst) + if len(v) < end { + newV := make([]byte, end) copy(newV, v) v = newV } - copy(v[pos:pos+len(subst)], subst) - db.stringSet(key, string(v)) + copy(v[opts.pos:end], opts.subst) + db.stringSet(opts.key, string(v)) c.WriteInt(len(v)) }) } @@ -935,28 +956,20 @@ func (m *Miniredis) cmdBitcount(c *server.Peer, cmd string, args []string) { } var opts struct { - useRange bool - start, end int - key string + useRange bool + start int + end int + key string } opts.key, args = args[0], args[1:] if len(args) >= 2 { opts.useRange = true - var err error - n, err := strconv.Atoi(args[0]) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidInt) + if ok := optInt(c, args[0], &opts.start); !ok { return } - opts.start = n - n, err = strconv.Atoi(args[1]) - if err != nil { - setDirty(c) - c.WriteError(msgInvalidInt) + if ok := optInt(c, args[1], &opts.end); !ok { return } - opts.end = n args = args[2:] } @@ -1001,25 +1014,28 @@ func (m *Miniredis) cmdBitop(c *server.Peer, cmd string, args []string) { return } - var ( - op = strings.ToUpper(args[0]) - target = args[1] - input = args[2:] - ) + var opts struct { + op string + target string + input []string + } + opts.op = strings.ToUpper(args[0]) + opts.target = args[1] + opts.input = args[2:] // 'op' is tested when the transaction is executed. withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - switch op { + switch opts.op { case "AND", "OR", "XOR": - first := input[0] + first := opts.input[0] if t, ok := db.keys[first]; ok && t != "string" { c.WriteError(msgWrongType) return } res := []byte(db.stringKeys[first]) - for _, vk := range input[1:] { + for _, vk := range opts.input[1:] { if t, ok := db.keys[vk]; ok && t != "string" { c.WriteError(msgWrongType) return @@ -1029,23 +1045,23 @@ func (m *Miniredis) cmdBitop(c *server.Peer, cmd string, args []string) { "AND": func(a, b byte) byte { return a & b }, "OR": func(a, b byte) byte { return a | b }, "XOR": func(a, b byte) byte { return a ^ b }, - }[op] + }[opts.op] res = sliceBinOp(cb, res, []byte(v)) } - db.del(target, false) // Keep TTL + db.del(opts.target, false) // Keep TTL if len(res) == 0 { - db.del(target, true) + db.del(opts.target, true) } else { - db.stringSet(target, string(res)) + db.stringSet(opts.target, string(res)) } c.WriteInt(len(res)) case "NOT": // NOT only takes a single argument. - if len(input) != 1 { + if len(opts.input) != 1 { c.WriteError("ERR BITOP NOT must be called with a single source key.") return } - key := input[0] + key := opts.input[0] if t, ok := db.keys[key]; ok && t != "string" { c.WriteError(msgWrongType) return @@ -1054,11 +1070,11 @@ func (m *Miniredis) cmdBitop(c *server.Peer, cmd string, args []string) { for i := range value { value[i] = ^value[i] } - db.del(target, false) // Keep TTL + db.del(opts.target, false) // Keep TTL if len(value) == 0 { - db.del(target, true) + db.del(opts.target, true) } else { - db.stringSet(target, string(value)) + db.stringSet(opts.target, string(value)) } c.WriteInt(len(value)) default: @@ -1182,9 +1198,15 @@ func (m *Miniredis) cmdGetbit(c *server.Peer, cmd string, args []string) { return } - key := args[0] - bit, err := strconv.Atoi(args[1]) - if err != nil { + var opts struct { + key string + bit int + } + opts.key = args[0] + if ok := optIntErr(c, args[1], &opts.bit, "ERR bit offset is not an integer or out of range"); !ok { + return + } + if opts.bit < 0 { setDirty(c) c.WriteError("ERR bit offset is not an integer or out of range") return @@ -1193,13 +1215,13 @@ func (m *Miniredis) cmdGetbit(c *server.Peer, cmd string, args []string) { withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - if t, ok := db.keys[key]; ok && t != "string" { + if t, ok := db.keys[opts.key]; ok && t != "string" { c.WriteError(msgWrongType) return } - value := db.stringKeys[key] + value := db.stringKeys[opts.key] - ourByteNr := bit / 8 + ourByteNr := opts.bit / 8 var ourByte byte if ourByteNr > len(value)-1 { ourByte = '\x00' @@ -1207,7 +1229,7 @@ func (m *Miniredis) cmdGetbit(c *server.Peer, cmd string, args []string) { ourByte = value[ourByteNr] } res := 0 - if toBits(ourByte)[bit%8] { + if toBits(ourByte)[opts.bit%8] { res = 1 } c.WriteInt(res) @@ -1228,15 +1250,24 @@ func (m *Miniredis) cmdSetbit(c *server.Peer, cmd string, args []string) { return } - key := args[0] - bit, err := strconv.Atoi(args[1]) - if err != nil || bit < 0 { + var opts struct { + key string + bit int + newBit int + } + opts.key = args[0] + if ok := optIntErr(c, args[1], &opts.bit, "ERR bit offset is not an integer or out of range"); !ok { + return + } + if opts.bit < 0 { setDirty(c) c.WriteError("ERR bit offset is not an integer or out of range") return } - newBit, err := strconv.Atoi(args[2]) - if err != nil || (newBit != 0 && newBit != 1) { + if ok := optIntErr(c, args[2], &opts.newBit, "ERR bit is not an integer or out of range"); !ok { + return + } + if opts.newBit != 0 && opts.newBit != 1 { setDirty(c) c.WriteError("ERR bit is not an integer or out of range") return @@ -1245,14 +1276,14 @@ func (m *Miniredis) cmdSetbit(c *server.Peer, cmd string, args []string) { withTx(m, c, func(c *server.Peer, ctx *connCtx) { db := m.db(ctx.selectedDB) - if t, ok := db.keys[key]; ok && t != "string" { + if t, ok := db.keys[opts.key]; ok && t != "string" { c.WriteError(msgWrongType) return } - value := []byte(db.stringKeys[key]) + value := []byte(db.stringKeys[opts.key]) - ourByteNr := bit / 8 - ourBitNr := bit % 8 + ourByteNr := opts.bit / 8 + ourBitNr := opts.bit % 8 if ourByteNr > len(value)-1 { // Too short. Expand. newValue := make([]byte, ourByteNr+1) @@ -1263,12 +1294,12 @@ func (m *Miniredis) cmdSetbit(c *server.Peer, cmd string, args []string) { if toBits(value[ourByteNr])[ourBitNr] { old = 1 } - if newBit == 0 { + if opts.newBit == 0 { value[ourByteNr] &^= 1 << uint8(7-ourBitNr) } else { value[ourByteNr] |= 1 << uint8(7-ourBitNr) } - db.stringSet(key, string(value)) + db.stringSet(opts.key, string(value)) c.WriteInt(old) }) diff --git a/vendor/github.com/alicebob/miniredis/v2/cmd_transactions.go b/vendor/github.com/alicebob/miniredis/v2/cmd_transactions.go index 9cbcaf3b3c8..94729e00411 100644 --- a/vendor/github.com/alicebob/miniredis/v2/cmd_transactions.go +++ b/vendor/github.com/alicebob/miniredis/v2/cmd_transactions.go @@ -30,7 +30,7 @@ func (m *Miniredis) cmdMulti(c *server.Peer, cmd string, args []string) { ctx := getCtx(c) if ctx.nested { - c.WriteError(msgNotFromScripts) + c.WriteError(msgNotFromScripts(ctx.nestedSHA)) return } if inTx(ctx) { @@ -59,7 +59,7 @@ func (m *Miniredis) cmdExec(c *server.Peer, cmd string, args []string) { ctx := getCtx(c) if ctx.nested { - c.WriteError(msgNotFromScripts) + c.WriteError(msgNotFromScripts(ctx.nestedSHA)) return } if !inTx(ctx) { @@ -137,7 +137,7 @@ func (m *Miniredis) cmdWatch(c *server.Peer, cmd string, args []string) { ctx := getCtx(c) if ctx.nested { - c.WriteError(msgNotFromScripts) + c.WriteError(msgNotFromScripts(ctx.nestedSHA)) return } if inTx(ctx) { diff --git a/vendor/github.com/alicebob/miniredis/v2/db.go b/vendor/github.com/alicebob/miniredis/v2/db.go index d6df96a2a62..af56839fab1 100644 --- a/vendor/github.com/alicebob/miniredis/v2/db.go +++ b/vendor/github.com/alicebob/miniredis/v2/db.go @@ -3,6 +3,7 @@ package miniredis import ( "errors" "fmt" + "math" "math/big" "sort" "strconv" @@ -13,8 +14,12 @@ var ( errInvalidEntryID = errors.New("stream ID is invalid") ) +// exists also updates the lru func (db *RedisDB) exists(k string) bool { _, ok := db.keys[k] + if ok { + db.lru[k] = db.master.effectiveNow() + } return ok } @@ -23,6 +28,12 @@ func (db *RedisDB) t(k string) string { return db.keys[k] } +// incr increases the version and the lru timestamp +func (db *RedisDB) incr(k string) { + db.lru[k] = db.master.effectiveNow() + db.keyVersion[k]++ +} + // allKeys returns all keys. Sorted. func (db *RedisDB) allKeys() []string { res := make([]string, 0, len(db.keys)) @@ -36,6 +47,7 @@ func (db *RedisDB) allKeys() []string { // flush removes all keys and values. func (db *RedisDB) flush() { db.keys = map[string]string{} + db.lru = map[string]time.Time{} db.stringKeys = map[string]string{} db.hashKeys = map[string]hashKey{} db.listKeys = map[string]listKey{} @@ -75,10 +87,10 @@ func (db *RedisDB) move(key string, to *RedisDB) bool { default: panic("unhandled key type") } - to.keyVersion[key]++ if v, ok := db.ttl[key]; ok { to.ttl[key] = v } + to.incr(key) db.del(key, true) return true } @@ -104,10 +116,10 @@ func (db *RedisDB) rename(from, to string) { panic("missing case") } db.keys[to] = db.keys[from] - db.keyVersion[to]++ if v, ok := db.ttl[from]; ok { db.ttl[to] = v } + db.incr(to) db.del(from, true) } @@ -118,6 +130,7 @@ func (db *RedisDB) del(k string, delTTL bool) { } t := db.t(k) delete(db.keys, k) + delete(db.lru, k) db.keyVersion[k]++ if delTTL { delete(db.ttl, k) @@ -155,7 +168,7 @@ func (db *RedisDB) stringSet(k, v string) { db.del(k, false) db.keys[k] = "string" db.stringKeys[k] = v - db.keyVersion[k]++ + db.incr(k) } // change int key value @@ -168,6 +181,17 @@ func (db *RedisDB) stringIncr(k string, delta int) (int, error) { return 0, ErrIntValueError } } + + if delta > 0 { + if math.MaxInt-delta < v { + return 0, ErrIntValueOverflowError + } + } else { + if math.MinInt-delta > v { + return 0, ErrIntValueOverflowError + } + } + v += delta db.stringSet(k, strconv.Itoa(v)) return v, nil @@ -197,7 +221,7 @@ func (db *RedisDB) listLpush(k, v string) int { } l = append([]string{v}, l...) db.listKeys[k] = l - db.keyVersion[k]++ + db.incr(k) return len(l) } @@ -211,7 +235,7 @@ func (db *RedisDB) listLpop(k string) string { } else { db.listKeys[k] = l } - db.keyVersion[k]++ + db.incr(k) return el } @@ -222,7 +246,7 @@ func (db *RedisDB) listPush(k string, v ...string) int { } l = append(l, v...) db.listKeys[k] = l - db.keyVersion[k]++ + db.incr(k) return len(l) } @@ -234,7 +258,7 @@ func (db *RedisDB) listPop(k string) string { db.del(k, true) } else { db.listKeys[k] = l - db.keyVersion[k]++ + db.incr(k) } return el } @@ -243,7 +267,7 @@ func (db *RedisDB) listPop(k string) string { func (db *RedisDB) setSet(k string, set setKey) { db.keys[k] = "set" db.setKeys[k] = set - db.keyVersion[k]++ + db.incr(k) } // setadd adds members to a set. Returns nr of new keys. @@ -261,7 +285,7 @@ func (db *RedisDB) setAdd(k string, elems ...string) int { s[e] = struct{}{} } db.setKeys[k] = s - db.keyVersion[k]++ + db.incr(k) return added } @@ -283,7 +307,7 @@ func (db *RedisDB) setRem(k string, fields ...string) int { } else { db.setKeys[k] = s } - db.keyVersion[k]++ + db.incr(k) return removed } @@ -349,7 +373,7 @@ func (db *RedisDB) hashSet(k string, fv ...string) int { f, v := fv[idx], fv[idx+1] _, ok := db.hashKeys[k][f] db.hashKeys[k][f] = v - db.keyVersion[k]++ + db.incr(k) if !ok { new++ } @@ -401,7 +425,7 @@ func (db *RedisDB) sortedSet(key string) map[string]float64 { // ssetSet sets a complete sorted set. func (db *RedisDB) ssetSet(key string, sset sortedSet) { db.keys[key] = "zset" - db.keyVersion[key]++ + db.incr(key) db.sortedsetKeys[key] = sset } @@ -415,7 +439,7 @@ func (db *RedisDB) ssetAdd(key string, score float64, member string) bool { _, ok = ss[member] ss[member] = score db.sortedsetKeys[key] = ss - db.keyVersion[key]++ + db.incr(key) return !ok } @@ -468,6 +492,16 @@ func (db *RedisDB) ssetScore(key, member string) float64 { return ss[member] } +// ssetMScore returns multiple scores of a list of members in a sorted set. +func (db *RedisDB) ssetMScore(key string, members []string) []float64 { + scores := make([]float64, 0, len(members)) + ss := db.sortedsetKeys[key] + for _, member := range members { + scores = append(scores, ss[member]) + } + return scores +} + // ssetRem is sorted set key delete. func (db *RedisDB) ssetRem(key, member string) bool { ss := db.sortedsetKeys[key] @@ -499,7 +533,7 @@ func (db *RedisDB) ssetIncrby(k, m string, delta float64) float64 { v, _ := ss.get(m) v += delta ss.set(v, m) - db.keyVersion[k]++ + db.incr(k) return v } @@ -568,6 +602,54 @@ func (db *RedisDB) setInter(keys []string) (setKey, error) { return s, nil } +// setIntercard implements the logic behind SINTER* +// len keys needs to be > 0 +func (db *RedisDB) setIntercard(keys []string, limit int) (int, error) { + // all keys must either not exist, or be of type "set". + allExist := true + for _, key := range keys { + exists := db.exists(key) + allExist = allExist && exists + if exists && db.t(key) != "set" { + return 0, ErrWrongType + } + } + + if !allExist { + return 0, nil + } + + smallestKey := keys[0] + smallestIdx := 0 + for i, key := range keys { + if len(db.setKeys[key]) < len(db.setKeys[smallestKey]) { + smallestKey = key + smallestIdx = i + } + } + keys[smallestIdx] = keys[len(keys)-1] + keys = keys[:len(keys)-1] + + count := 0 + for item := range db.setKeys[smallestKey] { + inIntersection := true + for _, key := range keys { + if _, ok := db.setKeys[key][item]; !ok { + inIntersection = false + break + } + } + if inIntersection { + count++ + if count == limit { + break + } + } + } + + return count, nil +} + // setUnion implements the logic behind SUNION* func (db *RedisDB) setUnion(keys []string) (setKey, error) { key := keys[0] @@ -603,7 +685,7 @@ func (db *RedisDB) newStream(key string) (*streamKey, error) { db.keys[key] = "stream" s := newStreamKey() db.streamKeys[key] = s - db.keyVersion[key]++ + db.incr(key) return s, nil } @@ -655,7 +737,7 @@ func (db *RedisDB) hllAdd(k string, elems ...string) int { } } db.hllKeys[k] = s - db.keyVersion[k]++ + db.incr(k) return hllAltered } @@ -702,7 +784,7 @@ func (db *RedisDB) hllMerge(keys []string) error { db.hllKeys[destKey] = destHll db.keys[destKey] = "hll" - db.keyVersion[destKey]++ + db.incr(destKey) return nil } diff --git a/vendor/github.com/alicebob/miniredis/v2/direct.go b/vendor/github.com/alicebob/miniredis/v2/direct.go index cd2323c63f4..31505eb4622 100644 --- a/vendor/github.com/alicebob/miniredis/v2/direct.go +++ b/vendor/github.com/alicebob/miniredis/v2/direct.go @@ -21,6 +21,9 @@ var ( // ErrIntValueError can returned by INCRBY ErrIntValueError = errors.New(msgInvalidInt) + // ErrIntValueOverflowError can be returned by INCR, DECR, INCRBY, DECRBY + ErrIntValueOverflowError = errors.New(msgIntOverflow) + // ErrFloatValueError can returned by INCRBYFLOAT ErrFloatValueError = errors.New(msgInvalidFloat) ) @@ -421,7 +424,7 @@ func (db *RedisDB) SetTTL(k string, ttl time.Duration) { defer db.master.signal.Broadcast() db.ttl[k] = ttl - db.keyVersion[k]++ + db.incr(k) } // Type gives the type of a key, or "" @@ -506,7 +509,7 @@ func (db *RedisDB) hdel(k, f string) { return } delete(db.hashKeys[k], f) - db.keyVersion[k]++ + db.incr(k) } // HIncrBy increases the integer value of a hash field by delta (int). @@ -666,6 +669,24 @@ func (db *RedisDB) ZScore(k, member string) (float64, error) { return db.ssetScore(k, member), nil } +// ZScore gives scores of a list of members in a sorted set. +func (m *Miniredis) ZMScore(k string, members ...string) ([]float64, error) { + return m.DB(m.selectedDB).ZMScore(k, members) +} + +func (db *RedisDB) ZMScore(k string, members []string) ([]float64, error) { + db.master.Lock() + defer db.master.Unlock() + + if !db.exists(k) { + return nil, ErrKeyNotFound + } + if db.t(k) != "zset" { + return nil, ErrWrongType + } + return db.ssetMScore(k, members), nil +} + // XAdd adds an entry to a stream. `id` can be left empty or be '*'. // If a value is given normal XADD rules apply. Values should be an even // length. diff --git a/vendor/github.com/alicebob/miniredis/v2/fpconv/LICENSE.txt b/vendor/github.com/alicebob/miniredis/v2/fpconv/LICENSE.txt new file mode 100644 index 00000000000..0a0af2e8fc5 --- /dev/null +++ b/vendor/github.com/alicebob/miniredis/v2/fpconv/LICENSE.txt @@ -0,0 +1,26 @@ +This code is derived from the C code in redis-7.2.0/deps/fpconv/*, which has +this license: + +Boost Software License - Version 1.0 - August 17th, 2003 + +Permission is hereby granted, free of charge, to any person or organization +obtaining a copy of the software and accompanying documentation covered by +this license (the "Software") to use, reproduce, display, distribute, +execute, and transmit the Software, and to prepare derivative works of the +Software, and to permit third-parties to whom the Software is furnished to +do so, all subject to the following: + +The copyright notices in the Software and this entire statement, including +the above license grant, this restriction and the following disclaimer, +must be included in all copies of the Software, in whole or in part, and +all derivative works of the Software, unless such copies or derivative +works are solely in the form of machine-executable object code generated by +a source language processor. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/alicebob/miniredis/v2/fpconv/Makefile b/vendor/github.com/alicebob/miniredis/v2/fpconv/Makefile new file mode 100644 index 00000000000..d32d4bdcd18 --- /dev/null +++ b/vendor/github.com/alicebob/miniredis/v2/fpconv/Makefile @@ -0,0 +1,6 @@ +.PHONY: test fuzz +test: + go test + +fuzz: + go test -fuzz=Fuzz diff --git a/vendor/github.com/alicebob/miniredis/v2/fpconv/README.md b/vendor/github.com/alicebob/miniredis/v2/fpconv/README.md new file mode 100644 index 00000000000..c210e6033a6 --- /dev/null +++ b/vendor/github.com/alicebob/miniredis/v2/fpconv/README.md @@ -0,0 +1,3 @@ +This is a translation of the actual C code in Redis (7.2) which does the float +-> string conversion. +Strconv does a close enough job, but we can use the exact same logic, so why not. diff --git a/vendor/github.com/alicebob/miniredis/v2/fpconv/dtoa.go b/vendor/github.com/alicebob/miniredis/v2/fpconv/dtoa.go new file mode 100644 index 00000000000..251fc4f3b03 --- /dev/null +++ b/vendor/github.com/alicebob/miniredis/v2/fpconv/dtoa.go @@ -0,0 +1,286 @@ +package fpconv + +import ( + "math" +) + +var ( + fracmask uint64 = 0x000FFFFFFFFFFFFF + expmask uint64 = 0x7FF0000000000000 + hiddenbit uint64 = 0x0010000000000000 + signmask uint64 = 0x8000000000000000 + expbias int64 = 1023 + 52 + zeros = []rune("0000000000000000000000") + + tens = []uint64{ + 10000000000000000000, + 1000000000000000000, + 100000000000000000, + 10000000000000000, + 1000000000000000, + 100000000000000, + 10000000000000, + 1000000000000, + 100000000000, + 10000000000, + 1000000000, + 100000000, + 10000000, + 1000000, + 100000, + 10000, + 1000, + 100, + 10, + 1} +) + +func absv(n int) int { + if n < 0 { + return -n + } + return n +} + +func minv(a, b int) int { + if a < b { + return a + } + return b +} + +func Dtoa(d float64) string { + var ( + dest [25]rune // Note C has 24, which is broken + digits [18]rune + + str_len int = 0 + neg = false + ) + + if get_dbits(d)&signmask != 0 { + dest[0] = '-' + str_len++ + neg = true + } + + if spec := filter_special(d, dest[str_len:]); spec != 0 { + return string(dest[:str_len+spec]) + } + + var ( + k int = 0 + ndigits int = grisu2(d, &digits, &k) + ) + + str_len += emit_digits(&digits, ndigits, dest[str_len:], k, neg) + return string(dest[:str_len]) +} + +func filter_special(fp float64, dest []rune) int { + if fp == 0.0 { + dest[0] = '0' + return 1 + } + + if math.IsNaN(fp) { + dest[0] = 'n' + dest[1] = 'a' + dest[2] = 'n' + return 3 + } + if math.IsInf(fp, 0) { + dest[0] = 'i' + dest[1] = 'n' + dest[2] = 'f' + return 3 + } + return 0 +} + +func grisu2(d float64, digits *[18]rune, K *int) int { + w := build_fp(d) + + lower, upper := get_normalized_boundaries(w) + + w = normalize(w) + + var k int64 + cp := find_cachedpow10(upper.exp, &k) + + w = multiply(w, cp) + upper = multiply(upper, cp) + lower = multiply(lower, cp) + + lower.frac++ + upper.frac-- + + *K = int(-k) + + return generate_digits(w, upper, lower, digits[:], K) +} + +func emit_digits(digits *[18]rune, ndigits int, dest []rune, K int, neg bool) int { + exp := int(absv(K + ndigits - 1)) + + /* write plain integer */ + if K >= 0 && (exp < (ndigits + 7)) { + copy(dest, digits[:ndigits]) + copy(dest[ndigits:], zeros[:K]) + + return ndigits + K + } + + /* write decimal w/o scientific notation */ + if K < 0 && (K > -7 || exp < 4) { + offset := int(ndigits - absv(K)) + /* fp < 1.0 -> write leading zero */ + if offset <= 0 { + offset = -offset + dest[0] = '0' + dest[1] = '.' + copy(dest[2:], zeros[:offset]) + copy(dest[offset+2:], digits[:ndigits]) + + return ndigits + 2 + offset + + /* fp > 1.0 */ + } else { + copy(dest, digits[:offset]) + dest[offset] = '.' + copy(dest[offset+1:], digits[offset:offset+ndigits-offset]) + + return ndigits + 1 + } + } + /* write decimal w/ scientific notation */ + l := 18 // was: 18-neg + if neg { + l-- + } + ndigits = minv(ndigits, l) + + var idx int = 0 + dest[idx] = digits[0] + idx++ + + if ndigits > 1 { + dest[idx] = '.' + idx++ + copy(dest[idx:], digits[+1:ndigits-1+1]) + idx += ndigits - 1 + } + + dest[idx] = 'e' + idx++ + + sign := '+' + if K+ndigits-1 < 0 { + sign = '-' + } + dest[idx] = sign + idx++ + + var cent rune = 0 + + if exp > 99 { + cent = rune(exp / 100) + dest[idx] = cent + '0' + idx++ + exp -= int(cent) * 100 + } + if exp > 9 { + dec := rune(exp / 10) + dest[idx] = dec + '0' + idx++ + exp -= int(dec) * 10 + } else if cent != 0 { + dest[idx] = '0' + idx++ + } + + dest[idx] = rune(exp%10) + '0' + idx++ + + return idx +} + +func generate_digits(fp, upper, lower Fp, digits []rune, K *int) int { + var ( + wfrac = uint64(upper.frac - fp.frac) + delta = uint64(upper.frac - lower.frac) + ) + + one := Fp{ + frac: 1 << -upper.exp, + exp: upper.exp, + } + + part1 := uint64(upper.frac >> -one.exp) + part2 := uint64(upper.frac & (one.frac - 1)) + + var ( + idx = 0 + kappa = 10 + index = 10 + ) + /* 1000000000 */ + for ; kappa > 0; index++ { + div := tens[index] + digit := part1 / div + + if digit != 0 || idx != 0 { + digits[idx] = rune(digit) + '0' + idx++ + } + + part1 -= digit * div + kappa-- + + tmp := (part1 << -one.exp) + part2 + if tmp <= delta { + *K += kappa + round_digit(digits, idx, delta, tmp, div<<-one.exp, wfrac) + + return idx + } + } + + /* 10 */ + index = 18 + for { + var unit uint64 = tens[index] + part2 *= 10 + delta *= 10 + kappa-- + + digit := part2 >> -one.exp + if digit != 0 || idx != 0 { + digits[idx] = rune(digit) + '0' + idx++ + } + + part2 &= uint64(one.frac) - 1 + if part2 < delta { + *K += kappa + round_digit(digits, idx, delta, part2, uint64(one.frac), wfrac*unit) + + return idx + } + + index-- + } +} + +func round_digit(digits []rune, + ndigits int, + delta uint64, + rem uint64, + kappa uint64, + frac uint64) { + for rem < frac && delta-rem >= kappa && + (rem+kappa < frac || frac-rem > rem+kappa-frac) { + digits[ndigits-1]-- + rem += kappa + } +} diff --git a/vendor/github.com/alicebob/miniredis/v2/fpconv/fp.go b/vendor/github.com/alicebob/miniredis/v2/fpconv/fp.go new file mode 100644 index 00000000000..4906463638f --- /dev/null +++ b/vendor/github.com/alicebob/miniredis/v2/fpconv/fp.go @@ -0,0 +1,96 @@ +package fpconv + +import ( + "math" +) + +type ( + Fp struct { + frac uint64 + exp int64 + } +) + +func build_fp(d float64) Fp { + bits := get_dbits(d) + + fp := Fp{ + frac: bits & fracmask, + exp: int64((bits & expmask) >> 52), + } + + if fp.exp != 0 { + fp.frac += hiddenbit + fp.exp -= expbias + } else { + fp.exp = -expbias + 1 + } + + return fp +} + +func normalize(fp Fp) Fp { + for (fp.frac & hiddenbit) == 0 { + fp.frac <<= 1 + fp.exp-- + } + + var shift int64 = 64 - 52 - 1 + fp.frac <<= shift + fp.exp -= shift + return fp +} + +func multiply(a, b Fp) Fp { + lomask := uint64(0x00000000FFFFFFFF) + + var ( + ah_bl = uint64((a.frac >> 32) * (b.frac & lomask)) + al_bh = uint64((a.frac & lomask) * (b.frac >> 32)) + al_bl = uint64((a.frac & lomask) * (b.frac & lomask)) + ah_bh = uint64((a.frac >> 32) * (b.frac >> 32)) + ) + + tmp := uint64((ah_bl & lomask) + (al_bh & lomask) + (al_bl >> 32)) + /* round up */ + tmp += uint64(1) << 31 + + return Fp{ + ah_bh + (ah_bl >> 32) + (al_bh >> 32) + (tmp >> 32), + a.exp + b.exp + 64, + } +} + +func get_dbits(d float64) uint64 { + return math.Float64bits(d) +} + +func get_normalized_boundaries(fp Fp) (Fp, Fp) { + upper := Fp{ + frac: (fp.frac << 1) + 1, + exp: fp.exp - 1, + } + for (upper.frac & (hiddenbit << 1)) == 0 { + upper.frac <<= 1 + upper.exp-- + } + + var u_shift int64 = 64 - 52 - 2 + + upper.frac <<= u_shift + upper.exp = upper.exp - u_shift + + l_shift := int64(1) + if fp.frac == hiddenbit { + l_shift = 2 + } + + lower := Fp{ + frac: (fp.frac << l_shift) - 1, + exp: fp.exp - l_shift, + } + + lower.frac <<= lower.exp - upper.exp + lower.exp = upper.exp + return lower, upper +} diff --git a/vendor/github.com/alicebob/miniredis/v2/fpconv/powers.go b/vendor/github.com/alicebob/miniredis/v2/fpconv/powers.go new file mode 100644 index 00000000000..24725f91418 --- /dev/null +++ b/vendor/github.com/alicebob/miniredis/v2/fpconv/powers.go @@ -0,0 +1,82 @@ +package fpconv + +var ( + npowers int64 = 87 + steppowers int64 = 8 + firstpower int64 = -348 /* 10 ^ -348 */ + + expmax = -32 + expmin = -60 + + powers_ten = []Fp{ + {18054884314459144840, -1220}, {13451937075301367670, -1193}, + {10022474136428063862, -1166}, {14934650266808366570, -1140}, + {11127181549972568877, -1113}, {16580792590934885855, -1087}, + {12353653155963782858, -1060}, {18408377700990114895, -1034}, + {13715310171984221708, -1007}, {10218702384817765436, -980}, + {15227053142812498563, -954}, {11345038669416679861, -927}, + {16905424996341287883, -901}, {12595523146049147757, -874}, + {9384396036005875287, -847}, {13983839803942852151, -821}, + {10418772551374772303, -794}, {15525180923007089351, -768}, + {11567161174868858868, -741}, {17236413322193710309, -715}, + {12842128665889583758, -688}, {9568131466127621947, -661}, + {14257626930069360058, -635}, {10622759856335341974, -608}, + {15829145694278690180, -582}, {11793632577567316726, -555}, + {17573882009934360870, -529}, {13093562431584567480, -502}, + {9755464219737475723, -475}, {14536774485912137811, -449}, + {10830740992659433045, -422}, {16139061738043178685, -396}, + {12024538023802026127, -369}, {17917957937422433684, -343}, + {13349918974505688015, -316}, {9946464728195732843, -289}, + {14821387422376473014, -263}, {11042794154864902060, -236}, + {16455045573212060422, -210}, {12259964326927110867, -183}, + {18268770466636286478, -157}, {13611294676837538539, -130}, + {10141204801825835212, -103}, {15111572745182864684, -77}, + {11258999068426240000, -50}, {16777216000000000000, -24}, + {12500000000000000000, 3}, {9313225746154785156, 30}, + {13877787807814456755, 56}, {10339757656912845936, 83}, + {15407439555097886824, 109}, {11479437019748901445, 136}, + {17105694144590052135, 162}, {12744735289059618216, 189}, + {9495567745759798747, 216}, {14149498560666738074, 242}, + {10542197943230523224, 269}, {15709099088952724970, 295}, + {11704190886730495818, 322}, {17440603504673385349, 348}, + {12994262207056124023, 375}, {9681479787123295682, 402}, + {14426529090290212157, 428}, {10748601772107342003, 455}, + {16016664761464807395, 481}, {11933345169920330789, 508}, + {17782069995880619868, 534}, {13248674568444952270, 561}, + {9871031767461413346, 588}, {14708983551653345445, 614}, + {10959046745042015199, 641}, {16330252207878254650, 667}, + {12166986024289022870, 694}, {18130221999122236476, 720}, + {13508068024458167312, 747}, {10064294952495520794, 774}, + {14996968138956309548, 800}, {11173611982879273257, 827}, + {16649979327439178909, 853}, {12405201291620119593, 880}, + {9242595204427927429, 907}, {13772540099066387757, 933}, + {10261342003245940623, 960}, {15290591125556738113, 986}, + {11392378155556871081, 1013}, {16975966327722178521, 1039}, + {12648080533535911531, 1066}, + } +) + +func find_cachedpow10(exp int64, k *int64) Fp { + one_log_ten := 0.30102999566398114 + + approx := int64(float64(-(exp + npowers)) * one_log_ten) + idx := int((approx - firstpower) / steppowers) + + for { + current := int(exp + powers_ten[idx].exp + 64) + + if current < expmin { + idx++ + continue + } + + if current > expmax { + idx-- + continue + } + + *k = (firstpower + int64(idx)*steppowers) + + return powers_ten[idx] + } +} diff --git a/vendor/github.com/alicebob/miniredis/v2/geo.go b/vendor/github.com/alicebob/miniredis/v2/geo.go index bc8e929270b..3028a167010 100644 --- a/vendor/github.com/alicebob/miniredis/v2/geo.go +++ b/vendor/github.com/alicebob/miniredis/v2/geo.go @@ -21,12 +21,10 @@ func hsin(theta float64) float64 { } // distance function returns the distance (in meters) between two points of -// a given longitude and latitude relatively accurately (using a spherical -// approximation of the Earth) through the Haversin Distance Formula for -// great arc distance on a sphere with accuracy for small distances -// +// a given longitude and latitude relatively accurately (using a spherical +// approximation of the Earth) through the Haversin Distance Formula for +// great arc distance on a sphere with accuracy for small distances // point coordinates are supplied in degrees and converted into rad. in the func -// // distance returned is meters // http://en.wikipedia.org/wiki/Haversine_formula // Source: https://gist.github.com/cdipaolo/d3f8db3848278b49db68 diff --git a/vendor/github.com/alicebob/miniredis/v2/lua.go b/vendor/github.com/alicebob/miniredis/v2/lua.go index 42222dce8e3..ff777a45a09 100644 --- a/vendor/github.com/alicebob/miniredis/v2/lua.go +++ b/vendor/github.com/alicebob/miniredis/v2/lua.go @@ -18,7 +18,7 @@ var luaRedisConstants = map[string]lua.LValue{ "LOG_WARNING": lua.LNumber(3), } -func mkLua(srv *server.Server, c *server.Peer) (map[string]lua.LGFunction, map[string]lua.LValue) { +func mkLua(srv *server.Server, c *server.Peer, sha string) (map[string]lua.LGFunction, map[string]lua.LValue) { mkCall := func(failFast bool) func(l *lua.LState) int { // one server.Ctx for a single Lua run pCtx := &connCtx{} @@ -26,12 +26,13 @@ func mkLua(srv *server.Server, c *server.Peer) (map[string]lua.LGFunction, map[s pCtx.authenticated = true } pCtx.nested = true + pCtx.nestedSHA = sha pCtx.selectedDB = getCtx(c).selectedDB return func(l *lua.LState) int { top := l.GetTop() if top == 0 { - l.Error(lua.LString("Please specify at least one argument for redis.call()"), 1) + l.Error(lua.LString(fmt.Sprintf("Please specify at least one argument for this redis lib call script: %s, &c.", sha)), 1) return 0 } var args []string @@ -42,12 +43,12 @@ func mkLua(srv *server.Server, c *server.Peer) (map[string]lua.LGFunction, map[s case lua.LString: args = append(args, string(a)) default: - l.Error(lua.LString("Lua redis() command arguments must be strings or integers"), 1) + l.Error(lua.LString(fmt.Sprintf("Lua redis lib command arguments must be strings or integers script: %s, &c.", sha)), 1) return 0 } } if len(args) == 0 { - l.Error(lua.LString(msgNotFromScripts), 1) + l.Error(lua.LString(msgNotFromScripts(sha)), 1) return 0 } @@ -63,7 +64,7 @@ func mkLua(srv *server.Server, c *server.Peer) (map[string]lua.LGFunction, map[s if failFast { // call() mode if strings.Contains(err.Error(), "ERR unknown command") { - l.Error(lua.LString("Unknown Redis command called from Lua script"), 1) + l.Error(lua.LString(fmt.Sprintf("Unknown Redis command called from script script: %s, &c.", sha)), 1) } else { l.Error(lua.LString(err.Error()), 1) } @@ -112,7 +113,18 @@ func mkLua(srv *server.Server, c *server.Peer) (map[string]lua.LGFunction, map[s return 0 } res := &lua.LTable{} - res.RawSetString("err", lua.LString(msg)) + parts := strings.SplitN(msg.String(), " ", 2) + // '-' at the beginging will be added as a part of error response + if parts[0] != "" && parts[0][0] == '-' { + parts[0] = parts[0][1:] + } + var final_msg string + if len(parts) == 2 { + final_msg = fmt.Sprintf("%s %s", parts[0], parts[1]) + } else { + final_msg = fmt.Sprintf("ERR %s", parts[0]) + } + res.RawSetString("err", lua.LString(final_msg)) l.Push(res) return 1 }, @@ -149,6 +161,15 @@ func mkLua(srv *server.Server, c *server.Peer) (map[string]lua.LGFunction, map[s // ignored return 1 }, + "set_repl": func(l *lua.LState) int { + top := l.GetTop() + if top != 1 { + l.Error(lua.LString("wrong number of arguments"), 1) + return 0 + } + // ignored + return 1 + }, }, luaRedisConstants } @@ -217,6 +238,8 @@ func redisToLua(l *lua.LState, res []interface{}) *lua.LTable { v = lua.LFalse } else { switch et := e.(type) { + case int: + v = lua.LNumber(et) case int64: v = lua.LNumber(et) case []uint8: diff --git a/vendor/github.com/alicebob/miniredis/v2/miniredis.go b/vendor/github.com/alicebob/miniredis/v2/miniredis.go index 171678d9432..a9600ef621e 100644 --- a/vendor/github.com/alicebob/miniredis/v2/miniredis.go +++ b/vendor/github.com/alicebob/miniredis/v2/miniredis.go @@ -13,7 +13,6 @@ // // For direct use you can select a Redis database with either `s.Select(12); // s.Get("foo")` or `s.DB(12).Get("foo")`. -// package miniredis import ( @@ -26,9 +25,12 @@ import ( "sync" "time" + "github.com/alicebob/miniredis/v2/proto" "github.com/alicebob/miniredis/v2/server" ) +var DumpMaxLineLen = 60 + type hashKey map[string]string type listKey []string type setKey map[string]struct{} @@ -46,6 +48,7 @@ type RedisDB struct { sortedsetKeys map[string]sortedSet // ZADD &c. keys streamKeys map[string]*streamKey // XADD &c. keys ttl map[string]time.Duration // effective TTL values + lru map[string]time.Time // last recently used ( read or written to ) keyVersion map[string]uint // used to watch values } @@ -75,6 +78,7 @@ type dbKey struct { } // connCtx has all state for a single connection. +// (this struct was named before context.Context existed) type connCtx struct { selectedDB int // selected DB authenticated bool // auth enabled and a valid AUTH seen @@ -83,6 +87,7 @@ type connCtx struct { watch map[dbKey]uint // WATCHed keys subscriber *Subscriber // client is in PUBSUB mode if not nil nested bool // this is called via Lua + nestedSHA string // set to the SHA of the nesting function } // NewMiniRedis makes a new, non-started, Miniredis object. @@ -102,6 +107,7 @@ func newRedisDB(id int, m *Miniredis) RedisDB { id: id, master: m, keys: map[string]string{}, + lru: map[string]time.Time{}, stringKeys: map[string]string{}, hashKeys: map[string]hashKey{}, listKeys: map[string]listKey{}, @@ -130,6 +136,7 @@ func RunTLS(cfg *tls.Config) (*Miniredis, error) { type Tester interface { Fatalf(string, ...interface{}) Cleanup(func()) + Logf(format string, args ...interface{}) } // RunT start a new miniredis, pass it a testing.T. It also registers the cleanup after your test is done. @@ -143,6 +150,22 @@ func RunT(t Tester) *Miniredis { return m } +func runWithClient(t Tester) (*Miniredis, *proto.Client) { + m := RunT(t) + + c, err := proto.Dial(m.Addr()) + if err != nil { + t.Fatalf("could not connect to miniredis: %s", err) + } + t.Cleanup(func() { + if err = c.Close(); err != nil { + t.Logf("error closing connection to miniredis: %s", err) + } + }) + + return m, c +} + // Start starts a server. It listens on a random port on localhost. See also // Addr(). func (m *Miniredis) Start() error { @@ -172,6 +195,15 @@ func (m *Miniredis) StartAddr(addr string) error { return m.start(s) } +// StartAddrTLS runs miniredis with a given addr, TLS version. +func (m *Miniredis) StartAddrTLS(addr string, cfg *tls.Config) error { + s, err := server.NewServerTLS(addr, cfg) + if err != nil { + return err + } + return m.start(s) +} + func (m *Miniredis) start(s *server.Server) error { m.Lock() defer m.Unlock() @@ -192,8 +224,9 @@ func (m *Miniredis) start(s *server.Server) error { commandsScripting(m) commandsGeo(m) commandsCluster(m) - commandsCommand(m) commandsHll(m) + commandsClient(m) + commandsObject(m) return nil } @@ -341,12 +374,19 @@ func (m *Miniredis) Server() *server.Server { } // Dump returns a text version of the selected DB, usable for debugging. +// +// Dump limits the maximum length of each key:value to "DumpMaxLineLen" characters. +// To increase that, call something like: +// +// miniredis.DumpMaxLineLen = 1024 +// mr, _ = miniredis.Run() +// mr.Dump() func (m *Miniredis) Dump() string { m.Lock() defer m.Unlock() var ( - maxLen = 60 + maxLen = DumpMaxLineLen indent = " " db = m.db(m.selectedDB) r = "" @@ -359,6 +399,7 @@ func (m *Miniredis) Dump() string { return fmt.Sprintf("%q%s", s, suffix) } ) + for _, k := range db.allKeys() { r += fmt.Sprintf("- %s\n", k) t := db.t(k) @@ -409,8 +450,10 @@ func (m *Miniredis) SetTime(t time.Time) { } // make every command return this message. For example: -// LOADING Redis is loading the dataset in memory -// MASTERDOWN Link with MASTER is down and replica-serve-stale-data is set to 'no'. +// +// LOADING Redis is loading the dataset in memory +// MASTERDOWN Link with MASTER is down and replica-serve-stale-data is set to 'no'. +// // Clear it with an empty string. Don't add newlines. func (m *Miniredis) SetError(msg string) { cb := server.Hook(nil) @@ -423,6 +466,18 @@ func (m *Miniredis) SetError(msg string) { m.srv.SetPreHook(cb) } +// isValidCMD returns true if command is valid and can be executed. +func (m *Miniredis) isValidCMD(c *server.Peer, cmd string) bool { + if !m.handleAuth(c) { + return false + } + if m.checkPubsub(c, cmd) { + return false + } + + return true +} + // handleAuth returns false if connection has no access. It sends the reply. func (m *Miniredis) handleAuth(c *server.Peer) bool { if getCtx(c).nested { @@ -653,7 +708,7 @@ func (m *Miniredis) copy( case "hash": destDB.hashKeys[dst] = copyHashKey(srcDB.hashKeys[src]) case "list": - destDB.listKeys[dst] = srcDB.listKeys[src] + destDB.listKeys[dst] = copyListKey(srcDB.listKeys[src]) case "set": destDB.setKeys[dst] = copySetKey(srcDB.setKeys[src]) case "zset": @@ -666,7 +721,7 @@ func (m *Miniredis) copy( panic("missing case") } destDB.keys[dst] = srcDB.keys[src] - destDB.keyVersion[dst]++ + destDB.incr(dst) if v, ok := srcDB.ttl[src]; ok { destDB.ttl[dst] = v } @@ -681,6 +736,12 @@ func copyHashKey(orig hashKey) hashKey { return cpy } +func copyListKey(orig listKey) listKey { + cpy := make(listKey, len(orig)) + copy(cpy, orig) + return cpy +} + func copySetKey(orig setKey) setKey { cpy := setKey{} for k, v := range orig { diff --git a/vendor/github.com/alicebob/miniredis/v2/opts.go b/vendor/github.com/alicebob/miniredis/v2/opts.go index 016d2682000..5b29c78c23f 100644 --- a/vendor/github.com/alicebob/miniredis/v2/opts.go +++ b/vendor/github.com/alicebob/miniredis/v2/opts.go @@ -1,7 +1,10 @@ package miniredis import ( + "errors" + "math" "strconv" + "time" "github.com/alicebob/miniredis/v2/server" ) @@ -10,12 +13,48 @@ import ( // Writes "invalid integer" error to c if it's not a valid integer. Returns // whether or not things were okay. func optInt(c *server.Peer, src string, dest *int) bool { + return optIntErr(c, src, dest, msgInvalidInt) +} + +func optIntErr(c *server.Peer, src string, dest *int, errMsg string) bool { n, err := strconv.Atoi(src) if err != nil { setDirty(c) - c.WriteError(msgInvalidInt) + c.WriteError(errMsg) return false } *dest = n return true } + +// optIntSimple sets dest or returns an error +func optIntSimple(src string, dest *int) error { + n, err := strconv.Atoi(src) + if err != nil { + return errors.New(msgInvalidInt) + } + *dest = n + return nil +} + +func optDuration(c *server.Peer, src string, dest *time.Duration) bool { + n, err := strconv.ParseFloat(src, 64) + if err != nil { + setDirty(c) + c.WriteError(msgInvalidTimeout) + return false + } + if n < 0 { + setDirty(c) + c.WriteError(msgTimeoutNegative) + return false + } + if math.IsInf(n, 0) { + setDirty(c) + c.WriteError(msgTimeoutIsOutOfRange) + return false + } + + *dest = time.Duration(n*1_000_000) * time.Microsecond + return true +} diff --git a/vendor/github.com/alicebob/miniredis/v2/proto/Makefile b/vendor/github.com/alicebob/miniredis/v2/proto/Makefile new file mode 100644 index 00000000000..b9ef39496bf --- /dev/null +++ b/vendor/github.com/alicebob/miniredis/v2/proto/Makefile @@ -0,0 +1,2 @@ +test: + go test diff --git a/vendor/github.com/alicebob/miniredis/v2/proto/client.go b/vendor/github.com/alicebob/miniredis/v2/proto/client.go new file mode 100644 index 00000000000..92f57baf1ea --- /dev/null +++ b/vendor/github.com/alicebob/miniredis/v2/proto/client.go @@ -0,0 +1,60 @@ +package proto + +import ( + "bufio" + "crypto/tls" + "net" +) + +type Client struct { + c net.Conn + r *bufio.Reader +} + +func Dial(addr string) (*Client, error) { + c, err := net.Dial("tcp", addr) + if err != nil { + return nil, err + } + + return &Client{ + c: c, + r: bufio.NewReader(c), + }, nil +} + +func DialTLS(addr string, cfg *tls.Config) (*Client, error) { + c, err := tls.Dial("tcp", addr, cfg) + if err != nil { + return nil, err + } + + return &Client{ + c: c, + r: bufio.NewReader(c), + }, nil +} + +func (c *Client) Close() error { + return c.c.Close() +} + +func (c *Client) Do(cmd ...string) (string, error) { + if err := Write(c.c, cmd); err != nil { + return "", err + } + return Read(c.r) +} + +func (c *Client) Read() (string, error) { + return Read(c.r) +} + +// Do() + ReadStrings() +func (c *Client) DoStrings(cmd ...string) ([]string, error) { + res, err := c.Do(cmd...) + if err != nil { + return nil, err + } + return ReadStrings(res) +} diff --git a/vendor/github.com/alicebob/miniredis/v2/proto/proto.go b/vendor/github.com/alicebob/miniredis/v2/proto/proto.go new file mode 100644 index 00000000000..e378faf187c --- /dev/null +++ b/vendor/github.com/alicebob/miniredis/v2/proto/proto.go @@ -0,0 +1,288 @@ +package proto + +import ( + "bufio" + "errors" + "fmt" + "io" + "strconv" + "strings" +) + +var ( + ErrProtocol = errors.New("unsupported protocol") + ErrUnexpected = errors.New("not what you asked for") +) + +func readLine(r *bufio.Reader) (string, error) { + line, err := r.ReadString('\n') + if err != nil { + return "", err + } + if len(line) < 3 { + return "", ErrProtocol + } + return line, nil +} + +// Read an array, with all elements are the raw redis commands +// Also reads sets and maps. +func ReadArray(b string) ([]string, error) { + r := bufio.NewReader(strings.NewReader(b)) + line, err := readLine(r) + if err != nil { + return nil, err + } + + elems := 0 + switch line[0] { + default: + return nil, ErrUnexpected + case '*', '>', '~': + // *: array + // >: push data + // ~: set + length, err := strconv.Atoi(line[1 : len(line)-2]) + if err != nil { + return nil, err + } + elems = length + case '%': + // we also read maps. + length, err := strconv.Atoi(line[1 : len(line)-2]) + if err != nil { + return nil, err + } + elems = length * 2 + } + + var res []string + for i := 0; i < elems; i++ { + next, err := Read(r) + if err != nil { + return nil, err + } + res = append(res, next) + } + return res, nil +} + +func ReadString(b string) (string, error) { + r := bufio.NewReader(strings.NewReader(b)) + line, err := readLine(r) + if err != nil { + return "", err + } + + switch line[0] { + default: + return "", ErrUnexpected + case '$': + // bulk strings are: `$5\r\nhello\r\n` + length, err := strconv.Atoi(line[1 : len(line)-2]) + if err != nil { + return "", err + } + if length < 0 { + // -1 is a nil response + return line, nil + } + var ( + buf = make([]byte, length+2) + pos = 0 + ) + for pos < length+2 { + n, err := r.Read(buf[pos:]) + if err != nil { + return "", err + } + pos += n + } + return string(buf[:len(buf)-2]), nil + } +} + +func readInline(b string) (string, error) { + if len(b) < 3 { + return "", ErrUnexpected + } + return b[1 : len(b)-2], nil +} + +func ReadError(b string) (string, error) { + if len(b) < 1 { + return "", ErrUnexpected + } + + switch b[0] { + default: + return "", ErrUnexpected + case '-': + return readInline(b) + } +} + +func ReadStrings(b string) ([]string, error) { + elems, err := ReadArray(b) + if err != nil { + return nil, err + } + var res []string + for _, e := range elems { + s, err := ReadString(e) + if err != nil { + return nil, err + } + res = append(res, s) + } + return res, nil +} + +// Read a single command, returning it raw. Used to read replies from redis. +// Understands RESP3 proto. +func Read(r *bufio.Reader) (string, error) { + line, err := readLine(r) + if err != nil { + return "", err + } + + switch line[0] { + default: + return "", ErrProtocol + case '+', '-', ':', ',', '_': + // +: inline string + // -: errors + // :: integer + // ,: float + // _: null + // Simple line based replies. + return line, nil + case '$': + // bulk strings are: `$5\r\nhello\r\n` + length, err := strconv.Atoi(line[1 : len(line)-2]) + if err != nil { + return "", err + } + if length < 0 { + // -1 is a nil response + return line, nil + } + var ( + buf = make([]byte, length+2) + pos = 0 + ) + for pos < length+2 { + n, err := r.Read(buf[pos:]) + if err != nil { + return "", err + } + pos += n + } + return line + string(buf), nil + case '*', '>', '~': + // arrays are: `*6\r\n...` + // pushdata is: `>6\r\n...` + // sets are: `~6\r\n...` + length, err := strconv.Atoi(line[1 : len(line)-2]) + if err != nil { + return "", err + } + for i := 0; i < length; i++ { + next, err := Read(r) + if err != nil { + return "", err + } + line += next + } + return line, nil + case '%': + // maps are: `%3\r\n...` + length, err := strconv.Atoi(line[1 : len(line)-2]) + if err != nil { + return "", err + } + for i := 0; i < length*2; i++ { + next, err := Read(r) + if err != nil { + return "", err + } + line += next + } + return line, nil + } +} + +// Write a command in RESP3 proto. Used to write commands to redis. +// Currently only supports string arrays. +func Write(w io.Writer, cmd []string) error { + if _, err := fmt.Fprintf(w, "*%d\r\n", len(cmd)); err != nil { + return err + } + for _, c := range cmd { + if _, err := fmt.Fprintf(w, "$%d\r\n%s\r\n", len(c), c); err != nil { + return err + } + } + return nil +} + +// Parse into interfaces. `b` must contain exactly a single command (which can be nested). +func Parse(b string) (interface{}, error) { + if len(b) < 1 { + return nil, ErrUnexpected + } + + switch b[0] { + default: + return "", ErrProtocol + case '+': + return readInline(b) + case '-': + e, err := readInline(b) + if err != nil { + return nil, err + } + return errors.New(e), nil + case ':': + e, err := readInline(b) + if err != nil { + return nil, err + } + return strconv.Atoi(e) + case '$': + return ReadString(b) + case '*': + elems, err := ReadArray(b) + if err != nil { + return nil, err + } + var res []interface{} + for _, elem := range elems { + e, err := Parse(elem) + if err != nil { + return nil, err + } + res = append(res, e) + } + return res, nil + case '%': + elems, err := ReadArray(b) + if err != nil { + return nil, err + } + var res = map[interface{}]interface{}{} + for len(elems) > 1 { + key, err := Parse(elems[0]) + if err != nil { + return nil, err + } + value, err := Parse(elems[1]) + if err != nil { + return nil, err + } + res[key] = value + elems = elems[2:] + } + return res, nil + } +} diff --git a/vendor/github.com/alicebob/miniredis/v2/proto/types.go b/vendor/github.com/alicebob/miniredis/v2/proto/types.go new file mode 100644 index 00000000000..0b3b7c9af22 --- /dev/null +++ b/vendor/github.com/alicebob/miniredis/v2/proto/types.go @@ -0,0 +1,102 @@ +package proto + +import ( + "fmt" + "strings" +) + +// Byte-safe string +func String(s string) string { + return fmt.Sprintf("$%d\r\n%s\r\n", len(s), s) +} + +// Inline string +func Inline(s string) string { + return inline('+', s) +} + +// Error +func Error(s string) string { + return inline('-', s) +} + +func inline(r rune, s string) string { + return fmt.Sprintf("%s%s\r\n", string(r), s) +} + +// Int +func Int(n int) string { + return fmt.Sprintf(":%d\r\n", n) +} + +// Float +func Float(n float64) string { + return fmt.Sprintf(",%g\r\n", n) +} + +const ( + Nil = "$-1\r\n" + NilResp3 = "_\r\n" + NilList = "*-1\r\n" +) + +// Array assembles the args in a list. Args should be raw redis commands. +// Example: Array(String("foo"), String("bar")) +func Array(args ...string) string { + return fmt.Sprintf("*%d\r\n", len(args)) + strings.Join(args, "") +} + +// Push assembles the args for push-data. Args should be raw redis commands. +// Example: Push(String("foo"), String("bar")) +func Push(args ...string) string { + return fmt.Sprintf(">%d\r\n", len(args)) + strings.Join(args, "") +} + +// Strings is a helper to build 1 dimensional string arrays. +func Strings(args ...string) string { + var strings []string + for _, a := range args { + strings = append(strings, String(a)) + } + return Array(strings...) +} + +// Ints is a helper to build 1 dimensional int arrays. +func Ints(args ...int) string { + var ints []string + for _, a := range args { + ints = append(ints, Int(a)) + } + return Array(ints...) +} + +// Map assembles the args in a map. Args should be raw redis commands. +// Must be an even number of arguments. +// Example: Map(String("foo"), String("bar")) +func Map(args ...string) string { + return fmt.Sprintf("%%%d\r\n", len(args)/2) + strings.Join(args, "") +} + +// StringMap is is a wrapper to get a map of (bulk)strings. +func StringMap(args ...string) string { + var strings []string + for _, a := range args { + strings = append(strings, String(a)) + } + return Map(strings...) +} + +// Set assembles the args in a map. Args should be raw redis commands. +// Example: Set(String("foo"), String("bar")) +func Set(args ...string) string { + return fmt.Sprintf("~%d\r\n", len(args)) + strings.Join(args, "") +} + +// StringSet is is a wrapper to get a set of (bulk)strings. +func StringSet(args ...string) string { + var strings []string + for _, a := range args { + strings = append(strings, String(a)) + } + return Set(strings...) +} diff --git a/vendor/github.com/alicebob/miniredis/v2/redis.go b/vendor/github.com/alicebob/miniredis/v2/redis.go index d4a0cd8d441..f70728e7c73 100644 --- a/vendor/github.com/alicebob/miniredis/v2/redis.go +++ b/vendor/github.com/alicebob/miniredis/v2/redis.go @@ -16,23 +16,29 @@ const ( msgWrongType = "WRONGTYPE Operation against a key holding the wrong kind of value" msgNotValidHllValue = "WRONGTYPE Key is not a valid HyperLogLog string value." msgInvalidInt = "ERR value is not an integer or out of range" + msgIntOverflow = "ERR increment or decrement would overflow" msgInvalidFloat = "ERR value is not a valid float" msgInvalidMinMax = "ERR min or max is not a float" msgInvalidRangeItem = "ERR min or max not valid string range item" msgInvalidTimeout = "ERR timeout is not a float or out of range" + msgInvalidRange = "ERR value is out of range, must be positive" msgSyntaxError = "ERR syntax error" msgKeyNotFound = "ERR no such key" msgOutOfRange = "ERR index out of range" msgInvalidCursor = "ERR invalid cursor" msgXXandNX = "ERR XX and NX options at the same time are not compatible" - msgNegTimeout = "ERR timeout is negative" + msgTimeoutNegative = "ERR timeout is negative" + msgTimeoutIsOutOfRange = "ERR timeout is out of range" msgInvalidSETime = "ERR invalid expire time in set" msgInvalidSETEXTime = "ERR invalid expire time in setex" msgInvalidPSETEXTime = "ERR invalid expire time in psetex" msgInvalidKeysNumber = "ERR Number of keys can't be greater than number of args" msgNegativeKeysNumber = "ERR Number of keys can't be negative" - msgFScriptUsage = "ERR Unknown subcommand or wrong number of arguments for '%s'. Try SCRIPT HELP." - msgFPubsubUsage = "ERR Unknown subcommand or wrong number of arguments for '%s'. Try PUBSUB HELP." + msgFScriptUsage = "ERR unknown subcommand or wrong number of arguments for '%s'. Try SCRIPT HELP." + msgFScriptUsageSimple = "ERR unknown subcommand '%s'. Try SCRIPT HELP." + msgFPubsubUsage = "ERR unknown subcommand or wrong number of arguments for '%s'. Try PUBSUB HELP." + msgFPubsubUsageSimple = "ERR unknown subcommand '%s'. Try PUBSUB HELP." + msgFObjectUsage = "ERR unknown subcommand '%s'. Try OBJECT HELP." msgScriptFlush = "ERR SCRIPT FLUSH only support SYNC|ASYNC option" msgSingleElementPair = "ERR INCR option supports a single increment-element pair" msgGTLTandNX = "ERR GT, LT, and/or NX options at the same time are not compatible" @@ -40,15 +46,19 @@ const ( msgStreamIDTooSmall = "ERR The ID specified in XADD is equal or smaller than the target stream top item" msgStreamIDZero = "ERR The ID specified in XADD must be greater than 0-0" msgNoScriptFound = "NOSCRIPT No matching script. Please use EVAL." - msgUnsupportedUnit = "ERR unsupported unit provided. please use m, km, ft, mi" - msgNotFromScripts = "This Redis command is not allowed from scripts" - msgXreadUnbalanced = "ERR Unbalanced XREAD list of streams: for each stream key an ID or '$' must be specified." + msgUnsupportedUnit = "ERR unsupported unit provided. please use M, KM, FT, MI" + msgXreadUnbalanced = "ERR Unbalanced 'xread' list of streams: for each stream key an ID or '$' must be specified." msgXgroupKeyNotFound = "ERR The XGROUP subcommand requires the key to exist. Note that for CREATE you may want to use the MKSTREAM option to create an empty stream automatically." msgXtrimInvalidStrategy = "ERR unsupported XTRIM strategy. Please use MAXLEN, MINID" msgXtrimInvalidMaxLen = "ERR value is not an integer or out of range" msgXtrimInvalidLimit = "ERR syntax error, LIMIT cannot be used without the special ~ option" msgDBIndexOutOfRange = "ERR DB index is out of range" msgLimitCombination = "ERR syntax error, LIMIT is only supported in combination with either BYSCORE or BYLEX" + msgRankIsZero = "ERR RANK can't be zero: use 1 to start from the first match, 2 from the second ... or use negative to start from the end of the list" + msgCountIsNegative = "ERR COUNT can't be negative" + msgMaxLengthIsNegative = "ERR MAXLEN can't be negative" + msgLimitIsNegative = "ERR LIMIT can't be negative" + msgMemorySubcommand = "ERR unknown subcommand '%s'. Try MEMORY HELP." ) func errWrongNumber(cmd string) string { @@ -67,6 +77,10 @@ func errXreadgroup(key, group string) error { return fmt.Errorf("NOGROUP No such key '%s' or consumer group '%s' in XREADGROUP with GROUP option", key, group) } +func msgNotFromScripts(sha string) string { + return fmt.Sprintf("This Redis command is not allowed from script script: %s, &c", sha) +} + // withTx wraps the non-argument-checking part of command handling code in // transaction logic. func withTx( @@ -131,17 +145,25 @@ func blocking( m.signal.Broadcast() // main loop might miss this signal }() - m.Lock() - defer m.Unlock() + if !ctx.nested { + // this is a call via Lua's .call(). It's already locked. + m.Lock() + defer m.Unlock() + } for { - done := cb(c, ctx) - if done { + if c.Closed() { return } if m.Ctx.Err() != nil { return } + + done := cb(c, ctx) + if done { + return + } + if timedOut { onTimeout(c) return diff --git a/vendor/github.com/alicebob/miniredis/v2/server/server.go b/vendor/github.com/alicebob/miniredis/v2/server/server.go index 60e391f22bd..f425675f97d 100644 --- a/vendor/github.com/alicebob/miniredis/v2/server/server.go +++ b/vendor/github.com/alicebob/miniredis/v2/server/server.go @@ -4,11 +4,12 @@ import ( "bufio" "crypto/tls" "fmt" - "math" "net" "strings" "sync" "unicode" + + "github.com/alicebob/miniredis/v2/fpconv" ) func errUnknownCommand(cmd string, args []string) string { @@ -158,24 +159,34 @@ func (s *Server) servePeer(c net.Conn) { peer := &Peer{ w: bufio.NewWriter(c), } + defer func() { for _, f := range peer.onDisconnect { f() } }() - for { - args, err := readArray(r) - if err != nil { - return + readCh := make(chan []string) + + go func() { + defer close(readCh) + + for { + args, err := readArray(r) + if err != nil { + peer.Close() + return + } + + readCh <- args } + }() + + for args := range readCh { s.Dispatch(peer, args) peer.Flush() - s.mu.Lock() - closed := peer.closed - s.mu.Unlock() - if closed { + if peer.Closed() { c.Close() } } @@ -237,6 +248,7 @@ type Peer struct { Ctx interface{} // anything goes, server won't touch this onDisconnect []func() // list of callbacks mu sync.Mutex // for Block() + ClientName string // client name set by CLIENT SETNAME } func NewPeer(w *bufio.Writer) *Peer { @@ -259,6 +271,13 @@ func (c *Peer) Close() { c.closed = true } +// Return true if the peer connection closed. +func (c *Peer) Closed() bool { + c.mu.Lock() + defer c.mu.Unlock() + return c.closed +} + // Register a function to execute on disconnect. There can be multiple // functions registered. func (c *Peer) OnDisconnect(f func()) { @@ -459,29 +478,8 @@ func (w *Writer) Flush() { w.w.Flush() } -// formatFloat formats a float the way redis does (sort-of) +// formatFloat formats a float the way redis does. +// Redis uses a method called "grisu2", which we ported from C. func formatFloat(v float64) string { - if math.IsInf(v, 1) { - return "inf" - } - if math.IsInf(v, -1) { - return "-inf" - } - return stripZeros(fmt.Sprintf("%.12f", v)) -} - -func stripZeros(sv string) string { - for strings.Contains(sv, ".") { - if sv[len(sv)-1] != '0' { - break - } - // Remove trailing 0s. - sv = sv[:len(sv)-1] - // Ends with a '.'. - if sv[len(sv)-1] == '.' { - sv = sv[:len(sv)-1] - break - } - } - return sv + return fpconv.Dtoa(v) } diff --git a/vendor/github.com/alicebob/miniredis/v2/size/readme.md b/vendor/github.com/alicebob/miniredis/v2/size/readme.md new file mode 100644 index 00000000000..89220e459ad --- /dev/null +++ b/vendor/github.com/alicebob/miniredis/v2/size/readme.md @@ -0,0 +1,2 @@ + +Credits to DmitriyVTitov on his package https://github.com/DmitriyVTitov/size diff --git a/vendor/github.com/alicebob/miniredis/v2/size/size.go b/vendor/github.com/alicebob/miniredis/v2/size/size.go new file mode 100644 index 00000000000..43fee6e216a --- /dev/null +++ b/vendor/github.com/alicebob/miniredis/v2/size/size.go @@ -0,0 +1,138 @@ +package size + +import ( + "reflect" + "unsafe" +) + +// Of returns the size of 'v' in bytes. +// If there is an error during calculation, Of returns -1. +func Of(v interface{}) int { + // Cache with every visited pointer so we don't count two pointers + // to the same memory twice. + cache := make(map[uintptr]bool) + return sizeOf(reflect.Indirect(reflect.ValueOf(v)), cache) +} + +// sizeOf returns the number of bytes the actual data represented by v occupies in memory. +// If there is an error, sizeOf returns -1. +func sizeOf(v reflect.Value, cache map[uintptr]bool) int { + switch v.Kind() { + + case reflect.Array: + sum := 0 + for i := 0; i < v.Len(); i++ { + s := sizeOf(v.Index(i), cache) + if s < 0 { + return -1 + } + sum += s + } + + return sum + (v.Cap()-v.Len())*int(v.Type().Elem().Size()) + + case reflect.Slice: + // return 0 if this node has been visited already + if cache[v.Pointer()] { + return 0 + } + cache[v.Pointer()] = true + + sum := 0 + for i := 0; i < v.Len(); i++ { + s := sizeOf(v.Index(i), cache) + if s < 0 { + return -1 + } + sum += s + } + + sum += (v.Cap() - v.Len()) * int(v.Type().Elem().Size()) + + return sum + int(v.Type().Size()) + + case reflect.Struct: + sum := 0 + for i, n := 0, v.NumField(); i < n; i++ { + s := sizeOf(v.Field(i), cache) + if s < 0 { + return -1 + } + sum += s + } + + // Look for struct padding. + padding := int(v.Type().Size()) + for i, n := 0, v.NumField(); i < n; i++ { + padding -= int(v.Field(i).Type().Size()) + } + + return sum + padding + + case reflect.String: + s := v.String() + hdr := (*reflect.StringHeader)(unsafe.Pointer(&s)) + if cache[hdr.Data] { + return int(v.Type().Size()) + } + cache[hdr.Data] = true + return len(s) + int(v.Type().Size()) + + case reflect.Ptr: + // return Ptr size if this node has been visited already (infinite recursion) + if cache[v.Pointer()] { + return int(v.Type().Size()) + } + cache[v.Pointer()] = true + if v.IsNil() { + return int(reflect.New(v.Type()).Type().Size()) + } + s := sizeOf(reflect.Indirect(v), cache) + if s < 0 { + return -1 + } + return s + int(v.Type().Size()) + + case reflect.Bool, + reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Int, reflect.Uint, + reflect.Chan, + reflect.Uintptr, + reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128, + reflect.Func: + return int(v.Type().Size()) + + case reflect.Map: + // return 0 if this node has been visited already (infinite recursion) + if cache[v.Pointer()] { + return 0 + } + cache[v.Pointer()] = true + sum := 0 + keys := v.MapKeys() + for i := range keys { + val := v.MapIndex(keys[i]) + // calculate size of key and value separately + sv := sizeOf(val, cache) + if sv < 0 { + return -1 + } + sum += sv + sk := sizeOf(keys[i], cache) + if sk < 0 { + return -1 + } + sum += sk + } + // Include overhead due to unused map buckets. 10.79 comes + // from https://golang.org/src/runtime/map.go. + return sum + int(v.Type().Size()) + int(float64(len(keys))*10.79) + + case reflect.Interface: + return sizeOf(v.Elem(), cache) + int(v.Type().Size()) + + } + + return -1 +} diff --git a/vendor/github.com/alicebob/miniredis/v2/stream.go b/vendor/github.com/alicebob/miniredis/v2/stream.go index d748cfa6d7f..5d3ec677a7e 100644 --- a/vendor/github.com/alicebob/miniredis/v2/stream.go +++ b/vendor/github.com/alicebob/miniredis/v2/stream.go @@ -9,13 +9,16 @@ import ( "sort" "strconv" "strings" + "sync" "time" ) // a Stream is a list of entries, lowest ID (oldest) first, and all "groups". type streamKey struct { - entries []StreamEntry - groups map[string]*streamGroup + entries []StreamEntry + groups map[string]*streamGroup + lastAllocatedID string + mu sync.Mutex } // a StreamEntry is an entry in a stream. The ID is always of the form @@ -30,11 +33,14 @@ type streamGroup struct { stream *streamKey lastID string pending []pendingEntry - consumers map[string]consumer + consumers map[string]*consumer } type consumer struct { - // TODO: "last seen" timestamp + numPendingEntries int + // these timestamps aren't tracked perfectly + lastSeen time.Time // "idle" XINFO key + lastSuccess time.Time // "inactive" XINFO key } type pendingEntry struct { @@ -50,20 +56,36 @@ func newStreamKey() *streamKey { } } +// generateID doesn't lock the mutex func (s *streamKey) generateID(now time.Time) string { ts := uint64(now.UnixNano()) / 1_000_000 - lastID := s.lastID() - next := fmt.Sprintf("%d-%d", ts, 0) - if streamCmp(lastID, next) == -1 { - return next + if s.lastAllocatedID != "" && streamCmp(s.lastAllocatedID, next) >= 0 { + last, _ := parseStreamID(s.lastAllocatedID) + next = fmt.Sprintf("%d-%d", last[0], last[1]+1) + } + + lastID := s.lastIDUnlocked() + if streamCmp(lastID, next) >= 0 { + last, _ := parseStreamID(lastID) + next = fmt.Sprintf("%d-%d", last[0], last[1]+1) } - last, _ := parseStreamID(lastID) - return fmt.Sprintf("%d-%d", last[0], last[1]+1) + + s.lastAllocatedID = next + return next } +// lastID locks the mutex func (s *streamKey) lastID() string { + s.mu.Lock() + defer s.mu.Unlock() + + return s.lastIDUnlocked() +} + +// lastID doesn't lock the mutex +func (s *streamKey) lastIDUnlocked() string { if len(s.entries) == 0 { return "0-0" } @@ -72,6 +94,9 @@ func (s *streamKey) lastID() string { } func (s *streamKey) copy() *streamKey { + s.mu.Lock() + defer s.mu.Unlock() + cpy := &streamKey{ entries: s.entries, } @@ -186,17 +211,20 @@ func reversedStreamEntries(o []StreamEntry) []StreamEntry { } func (s *streamKey) createGroup(group, id string) error { + s.mu.Lock() + defer s.mu.Unlock() + if _, ok := s.groups[group]; ok { return errors.New("BUSYGROUP Consumer Group name already exists") } if id == "$" { - id = s.lastID() + id = s.lastIDUnlocked() } s.groups[group] = &streamGroup{ stream: s, lastID: id, - consumers: map[string]consumer{}, + consumers: map[string]*consumer{}, } return nil } @@ -205,6 +233,9 @@ func (s *streamKey) createGroup(group, id string) error { // If id is empty or "*" the ID will be generated automatically. // `values` should have an even length. func (s *streamKey) add(entryID string, values []string, now time.Time) (string, error) { + s.mu.Lock() + defer s.mu.Unlock() + if entryID == "" || entryID == "*" { entryID = s.generateID(now) } @@ -216,7 +247,7 @@ func (s *streamKey) add(entryID string, values []string, now time.Time) (string, if entryID == "0-0" { return "", errors.New(msgStreamIDZero) } - if streamCmp(s.lastID(), entryID) != -1 { + if streamCmp(s.lastIDUnlocked(), entryID) != -1 { return "", errors.New(msgStreamIDTooSmall) } @@ -228,13 +259,36 @@ func (s *streamKey) add(entryID string, values []string, now time.Time) (string, } func (s *streamKey) trim(n int) { + s.mu.Lock() + defer s.mu.Unlock() + if len(s.entries) > n { s.entries = s.entries[len(s.entries)-n:] } } +// trimBefore deletes entries with an id less than the provided id +// and returns the number of entries deleted +func (s *streamKey) trimBefore(id string) int { + s.mu.Lock() + var delete []string + for _, entry := range s.entries { + if streamCmp(entry.ID, id) < 0 { + delete = append(delete, entry.ID) + } else { + break + } + } + s.mu.Unlock() + s.delete(delete) + return len(delete) +} + // all entries after "id" func (s *streamKey) after(id string) []StreamEntry { + s.mu.Lock() + defer s.mu.Unlock() + pos := sort.Search(len(s.entries), func(i int) bool { return streamCmp(id, s.entries[i].ID) < 0 }) @@ -244,6 +298,9 @@ func (s *streamKey) after(id string) []StreamEntry { // get a stream entry by ID // Also returns the position in the entries slice, if found. func (s *streamKey) get(id string) (int, *StreamEntry) { + s.mu.Lock() + defer s.mu.Unlock() + pos := sort.Search(len(s.entries), func(i int) bool { return streamCmp(id, s.entries[i].ID) <= 0 }) @@ -272,16 +329,39 @@ func (g *streamGroup) readGroup( } if !noack { + shouldAppend := len(g.pending) == 0 for _, msg := range msgs { - g.pending = append(g.pending, pendingEntry{ + if !shouldAppend { + shouldAppend = streamCmp(msg.ID, g.pending[len(g.pending)-1].id) == 1 + } + + var entry *pendingEntry + if shouldAppend { + g.pending = append(g.pending, pendingEntry{}) + entry = &g.pending[len(g.pending)-1] + } else { + var pos int + pos, entry = g.searchPending(msg.ID) + if entry == nil { + g.pending = append(g.pending[:pos+1], g.pending[pos:]...) + entry = &g.pending[pos] + } else { + g.consumers[entry.consumer].numPendingEntries-- + } + } + + *entry = pendingEntry{ id: msg.ID, consumer: consumerID, deliveryCount: 1, lastDelivery: now, - }) + } } } - g.consumers[consumerID] = consumer{} + if _, ok := g.consumers[consumerID]; !ok { + g.consumers[consumerID] = &consumer{} + } + g.consumers[consumerID].numPendingEntries += len(msgs) g.lastID = msgs[len(msgs)-1].ID return msgs } @@ -307,6 +387,16 @@ func (g *streamGroup) readGroup( return res } +func (g *streamGroup) searchPending(id string) (int, *pendingEntry) { + pos := sort.Search(len(g.pending), func(i int) bool { + return streamCmp(id, g.pending[i].id) <= 0 + }) + if pos >= len(g.pending) || g.pending[pos].id != id { + return pos, nil + } + return pos, &g.pending[pos] +} + func (g *streamGroup) ack(ids []string) (int, error) { count := 0 for _, id := range ids { @@ -314,14 +404,19 @@ func (g *streamGroup) ack(ids []string) (int, error) { return 0, errors.New(msgInvalidStreamID) } - pos := sort.Search(len(g.pending), func(i int) bool { - return streamCmp(id, g.pending[i].id) <= 0 - }) - if len(g.pending) <= pos || g.pending[pos].id != id { + pos, entry := g.searchPending(id) + if entry == nil { continue } + consumer := g.consumers[entry.consumer] + consumer.numPendingEntries-- + g.pending = append(g.pending[:pos], g.pending[pos+1:]...) + // don't count deleted entries + if _, e := g.stream.get(id); e == nil { + continue + } count++ } return count, nil @@ -354,7 +449,7 @@ func (g *streamGroup) pendingAfter(id string) []pendingEntry { func (g *streamGroup) pendingCount(consumer string) int { n := 0 - for _, p := range g.pending { + for _, p := range g.activePending() { if p.consumer == consumer { n++ } @@ -362,10 +457,25 @@ func (g *streamGroup) pendingCount(consumer string) int { return n } +// pending entries without the entries deleted from the group +func (g *streamGroup) activePending() []pendingEntry { + var pe []pendingEntry + for _, p := range g.pending { + // drop deleted ones + if _, e := g.stream.get(p.id); e == nil { + continue + } + p := p + pe = append(pe, p) + } + return pe +} + func (g *streamGroup) copy() *streamGroup { - cns := map[string]consumer{} + cns := map[string]*consumer{} for k, v := range g.consumers { - cns[k] = v + c := *v + cns[k] = &c } return &streamGroup{ // don't copy stream @@ -374,3 +484,17 @@ func (g *streamGroup) copy() *streamGroup { consumers: cns, } } + +func (g *streamGroup) setLastSeen(c string, t time.Time) { + cons, ok := g.consumers[c] + if !ok { + cons = &consumer{} + } + cons.lastSeen = t + g.consumers[c] = cons +} + +func (g *streamGroup) setLastSuccess(c string, t time.Time) { + g.setLastSeen(c, t) + g.consumers[c].lastSuccess = t +} diff --git a/vendor/github.com/antchfx/xmlquery/.gitignore b/vendor/github.com/antchfx/xmlquery/.gitignore new file mode 100644 index 00000000000..4d5d27b1d3a --- /dev/null +++ b/vendor/github.com/antchfx/xmlquery/.gitignore @@ -0,0 +1,32 @@ +# vscode +.vscode +debug +*.test + +./build + +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof \ No newline at end of file diff --git a/vendor/github.com/antchfx/xmlquery/LICENSE b/vendor/github.com/antchfx/xmlquery/LICENSE new file mode 100644 index 00000000000..e14c37141c5 --- /dev/null +++ b/vendor/github.com/antchfx/xmlquery/LICENSE @@ -0,0 +1,17 @@ +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/antchfx/xmlquery/README.md b/vendor/github.com/antchfx/xmlquery/README.md new file mode 100644 index 00000000000..4d26bfef35d --- /dev/null +++ b/vendor/github.com/antchfx/xmlquery/README.md @@ -0,0 +1,302 @@ +# xmlquery + +[![Build Status](https://github.com/antchfx/xmlquery/actions/workflows/testing.yml/badge.svg)](https://github.com/antchfx/xmlquery/actions/workflows/testing.yml) +[![GoDoc](https://godoc.org/github.com/antchfx/xmlquery?status.svg)](https://godoc.org/github.com/antchfx/xmlquery) +[![Go Report Card](https://goreportcard.com/badge/github.com/antchfx/xmlquery)](https://goreportcard.com/report/github.com/antchfx/xmlquery) + +# Overview + +`xmlquery` is an XPath query package for XML documents, allowing you to extract +data or evaluate from XML documents with an XPath expression. + +`xmlquery` has a built-in query object caching feature that caches recently used +XPATH query strings. Enabling caching can avoid recompile XPath expression for +each query. + +You can visit this page to learn about the supported XPath(1.0/2.0) syntax. https://github.com/antchfx/xpath + +[htmlquery](https://github.com/antchfx/htmlquery) - Package for the HTML document query. + +[xmlquery](https://github.com/antchfx/xmlquery) - Package for the XML document query. + +[jsonquery](https://github.com/antchfx/jsonquery) - Package for the JSON document query. + +# Installation + +``` + $ go get github.com/antchfx/xmlquery +``` + +# Quick Starts + +```go +import ( + "github.com/antchfx/xmlquery" +) + +func main(){ + s := ` + + + W3Schools Home Page + https://www.w3schools.com + Free web building tutorials + + RSS Tutorial + https://www.w3schools.com/xml/xml_rss.asp + New RSS tutorial on W3Schools + + + XML Tutorial + https://www.w3schools.com/xml + New XML tutorial on W3Schools + + +` + + doc, err := xmlquery.Parse(strings.NewReader(s)) + if err != nil { + panic(err) + } + channel := xmlquery.FindOne(doc, "//channel") + if n := channel.SelectElement("title"); n != nil { + fmt.Printf("title: %s\n", n.InnerText()) + } + if n := channel.SelectElement("link"); n != nil { + fmt.Printf("link: %s\n", n.InnerText()) + } + for i, n := range xmlquery.Find(doc, "//item/title") { + fmt.Printf("#%d %s\n", i, n.InnerText()) + } +} +``` + +# Getting Started + +### Find specified XPath query. + +```go +list, err := xmlquery.QueryAll(doc, "a") +if err != nil { + panic(err) +} +``` + +#### Parse an XML from URL. + +```go +doc, err := xmlquery.LoadURL("http://www.example.com/sitemap.xml") +``` + +#### Parse an XML from string. + +```go +s := `` +doc, err := xmlquery.Parse(strings.NewReader(s)) +``` + +#### Parse an XML from io.Reader. + +```go +f, err := os.Open("../books.xml") +doc, err := xmlquery.Parse(f) +``` + +#### Parse an XML in a stream fashion (simple case without elements filtering). + +```go +f, _ := os.Open("../books.xml") +p, err := xmlquery.CreateStreamParser(f, "/bookstore/book") +for { + n, err := p.Read() + if err == io.EOF { + break + } + if err != nil { + panic(err) + } + fmt.Println(n) +} +``` + +Notes: `CreateStreamParser()` used for saving memory if your had a large XML file to parse. + +#### Parse an XML in a stream fashion (simple case advanced element filtering). + +```go +f, _ := os.Open("../books.xml") +p, err := xmlquery.CreateStreamParser(f, "/bookstore/book", "/bookstore/book[price>=10]") +for { + n, err := p.Read() + if err == io.EOF { + break + } + if err != nil { + panic(err) + } + fmt.Println(n) +} +``` + +#### Find authors of all books in the bookstore. + +```go +list := xmlquery.Find(doc, "//book//author") +// or +list := xmlquery.Find(doc, "//author") +``` + +#### Find the second book. + +```go +book := xmlquery.FindOne(doc, "//book[2]") +``` + +#### Find the last book. + +```go +book := xmlquery.FindOne(doc, "//book[last()]") +``` + +#### Find all book elements and only get `id` attribute. + +```go +list := xmlquery.Find(doc,"//book/@id") +fmt.Println(list[0].InnerText) // outout @id value +``` + +#### Find all books with id `bk104`. + +```go +list := xmlquery.Find(doc, "//book[@id='bk104']") +``` + +#### Find all books with price less than 5. + +```go +list := xmlquery.Find(doc, "//book[price<5]") +``` + +#### Evaluate total price of all books. + +```go +expr, err := xpath.Compile("sum(//book/price)") +price := expr.Evaluate(xmlquery.CreateXPathNavigator(doc)).(float64) +fmt.Printf("total price: %f\n", price) +``` + +#### Count the number of books. + +```go +expr, err := xpath.Compile("count(//book)") +count := expr.Evaluate(xmlquery.CreateXPathNavigator(doc)).(float64) +``` + +#### Calculate the total price of all book prices. + +```go +expr, err := xpath.Compile("sum(//book/price)") +price := expr.Evaluate(xmlquery.CreateXPathNavigator(doc)).(float64) +``` + +# Advanced Features + +### Parse `UTF-16` XML file with `ParseWithOptions()`. + +```go +f, _ := os.Open(`UTF-16.XML`) +// Convert UTF-16 XML to UTF-8 +utf16ToUtf8Transformer := unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM).NewDecoder() +utf8Reader := transform.NewReader(f, utf16ToUtf8Transformer) +// Sets `CharsetReader` +options := xmlquery.ParserOptions{ + Decoder: &xmlquery.DecoderOptions{ + CharsetReader: func(charset string, input io.Reader) (io.Reader, error) { + return input, nil + }, + }, +} +doc, err := xmlquery.ParseWithOptions(utf8Reader, options) +``` + +### Query with custom namespace prefix. + +```go +s := ` + + +RequestReplyActivity +OpClientReqActivity +300 +80 + +` +nsMap := map[string]string{ + "q": "http://xmlns.xyz.com/process/2003", + "r": "http://www.w3.org/1999/XSL/Transform", + "s": "http://www.w3.org/2001/XMLSchema", +} +expr, _ := xpath.CompileWithNS("//q:activity", nsMap) +node := xmlquery.QuerySelector(doc, expr) +``` + +#### Create XML document without call `xml.Marshal`. + +```go +doc := &xmlquery.Node{ + Type: xmlquery.DeclarationNode, + Data: "xml", + Attr: []xml.Attr{ + xml.Attr{Name: xml.Name{Local: "version"}, Value: "1.0"}, + }, +} +root := &xmlquery.Node{ + Data: "rss", + Type: xmlquery.ElementNode, +} +doc.FirstChild = root +channel := &xmlquery.Node{ + Data: "channel", + Type: xmlquery.ElementNode, +} +root.FirstChild = channel +title := &xmlquery.Node{ + Data: "title", + Type: xmlquery.ElementNode, +} +title_text := &xmlquery.Node{ + Data: "W3Schools Home Page", + Type: xmlquery.TextNode, +} +title.FirstChild = title_text +channel.FirstChild = title + +fmt.Println(doc.OutputXML(true)) +fmt.Println(doc.OutputXMLWithOptions(WithOutputSelf())) +``` + +Output: + +```xml +W3Schools Home Page +``` + +# FAQ + +#### `Find()` vs `QueryAll()`, which is better? + +`Find` and `QueryAll` both do the same thing: searches all of matched XML nodes. +`Find` panics if provided with an invalid XPath query, while `QueryAll` returns +an error. + +#### Can I save my query expression object for the next query? + +Yes, you can. We provide `QuerySelector` and `QuerySelectorAll` methods; they +accept your query expression object. + +Caching a query expression object avoids recompiling the XPath query +expression, improving query performance. + +# Questions + +Please let me know if you have any questions diff --git a/vendor/github.com/antchfx/xmlquery/cache.go b/vendor/github.com/antchfx/xmlquery/cache.go new file mode 100644 index 00000000000..3abffcdc57c --- /dev/null +++ b/vendor/github.com/antchfx/xmlquery/cache.go @@ -0,0 +1,43 @@ +package xmlquery + +import ( + "sync" + + "github.com/golang/groupcache/lru" + + "github.com/antchfx/xpath" +) + +// DisableSelectorCache will disable caching for the query selector if value is true. +var DisableSelectorCache = false + +// SelectorCacheMaxEntries allows how many selector object can be caching. Default is 50. +// Will disable caching if SelectorCacheMaxEntries <= 0. +var SelectorCacheMaxEntries = 50 + +var ( + cacheOnce sync.Once + cache *lru.Cache + cacheMutex sync.Mutex +) + +func getQuery(expr string) (*xpath.Expr, error) { + if DisableSelectorCache || SelectorCacheMaxEntries <= 0 { + return xpath.Compile(expr) + } + cacheOnce.Do(func() { + cache = lru.New(SelectorCacheMaxEntries) + }) + cacheMutex.Lock() + defer cacheMutex.Unlock() + if v, ok := cache.Get(expr); ok { + return v.(*xpath.Expr), nil + } + v, err := xpath.Compile(expr) + if err != nil { + return nil, err + } + cache.Add(expr, v) + return v, nil + +} diff --git a/vendor/github.com/antchfx/xmlquery/cached_reader.go b/vendor/github.com/antchfx/xmlquery/cached_reader.go new file mode 100644 index 00000000000..fe389c5d5c7 --- /dev/null +++ b/vendor/github.com/antchfx/xmlquery/cached_reader.go @@ -0,0 +1,69 @@ +package xmlquery + +import ( + "bufio" +) + +type cachedReader struct { + buffer *bufio.Reader + cache []byte + cacheCap int + cacheLen int + caching bool +} + +func newCachedReader(r *bufio.Reader) *cachedReader { + return &cachedReader{ + buffer: r, + cache: make([]byte, 4096), + cacheCap: 4096, + cacheLen: 0, + caching: false, + } +} + +func (c *cachedReader) StartCaching() { + c.cacheLen = 0 + c.caching = true +} + +func (c *cachedReader) ReadByte() (byte, error) { + if !c.caching { + return c.buffer.ReadByte() + } + b, err := c.buffer.ReadByte() + if err != nil { + return b, err + } + if c.cacheLen < c.cacheCap { + c.cache[c.cacheLen] = b + c.cacheLen++ + } + return b, err +} + +func (c *cachedReader) Cache() []byte { + return c.cache[:c.cacheLen] +} + +func (c *cachedReader) StopCaching() { + c.caching = false +} + +func (c *cachedReader) Read(p []byte) (int, error) { + n, err := c.buffer.Read(p) + if err != nil { + return n, err + } + if c.caching && c.cacheLen < c.cacheCap { + for i := 0; i < n; i++ { + c.cache[c.cacheLen] = p[i] + c.cacheLen++ + if c.cacheLen >= c.cacheCap { + break + } + } + } + return n, err +} + diff --git a/vendor/github.com/antchfx/xmlquery/node.go b/vendor/github.com/antchfx/xmlquery/node.go new file mode 100644 index 00000000000..03e0ce912e5 --- /dev/null +++ b/vendor/github.com/antchfx/xmlquery/node.go @@ -0,0 +1,404 @@ +package xmlquery + +import ( + "bufio" + "encoding/xml" + "fmt" + "html" + "io" + "strings" +) + +// A NodeType is the type of a Node. +type NodeType uint + +const ( + // DocumentNode is a document object that, as the root of the document tree, + // provides access to the entire XML document. + DocumentNode NodeType = iota + // DeclarationNode is the document type declaration, indicated by the + // following tag (for example, ). + DeclarationNode + // ElementNode is an element (for example, ). + ElementNode + // TextNode is the text content of a node. + TextNode + // CharDataNode node + CharDataNode + // CommentNode a comment (for example, ). + CommentNode + // AttributeNode is an attribute of element. + AttributeNode + // NotationNode is a directive represents in document (for example, ). + NotationNode +) + +type Attr struct { + Name xml.Name + Value string + NamespaceURI string +} + +// A Node consists of a NodeType and some Data (tag name for +// element nodes, content for text) and are part of a tree of Nodes. +type Node struct { + Parent, FirstChild, LastChild, PrevSibling, NextSibling *Node + + Type NodeType + Data string + Prefix string + NamespaceURI string + Attr []Attr + + level int // node level in the tree +} + +type outputConfiguration struct { + printSelf bool + preserveSpaces bool + emptyElementTagSupport bool + skipComments bool + useIndentation string +} + +type OutputOption func(*outputConfiguration) + +// WithOutputSelf configures the Node to print the root node itself +func WithOutputSelf() OutputOption { + return func(oc *outputConfiguration) { + oc.printSelf = true + } +} + +// WithEmptyTagSupport empty tags should be written as and +// not as +func WithEmptyTagSupport() OutputOption { + return func(oc *outputConfiguration) { + oc.emptyElementTagSupport = true + } +} + +// WithoutComments will skip comments in output +func WithoutComments() OutputOption { + return func(oc *outputConfiguration) { + oc.skipComments = true + } +} + +// WithPreserveSpace will preserve spaces in output +func WithPreserveSpace() OutputOption { + return func(oc *outputConfiguration) { + oc.preserveSpaces = true + } +} + +// WithIndentation sets the indentation string used for formatting the output. +func WithIndentation(indentation string) OutputOption { + return func(oc *outputConfiguration) { + oc.useIndentation = indentation + } +} + +func newXMLName(name string) xml.Name { + if i := strings.IndexByte(name, ':'); i > 0 { + return xml.Name{ + Space: name[:i], + Local: name[i+1:], + } + } + return xml.Name{ + Local: name, + } +} + +func (n *Node) Level() int { + return n.level +} + +// InnerText returns the text between the start and end tags of the object. +func (n *Node) InnerText() string { + var output func(*strings.Builder, *Node) + output = func(b *strings.Builder, n *Node) { + switch n.Type { + case TextNode, CharDataNode: + b.WriteString(n.Data) + case CommentNode: + default: + for child := n.FirstChild; child != nil; child = child.NextSibling { + output(b, child) + } + } + } + + var b strings.Builder + output(&b, n) + return b.String() +} + +func (n *Node) sanitizedData(preserveSpaces bool) string { + if preserveSpaces { + return n.Data + } + return strings.TrimSpace(n.Data) +} + +func calculatePreserveSpaces(n *Node, pastValue bool) bool { + if attr := n.SelectAttr("xml:space"); attr == "preserve" { + return true + } else if attr == "default" { + return false + } + return pastValue +} + +type indentation struct { + level int + hasChild bool + indent string + w io.Writer +} + +func newIndentation(indent string, w io.Writer) *indentation { + if indent == "" { + return nil + } + return &indentation{ + indent: indent, + w: w, + } +} + +func (i *indentation) NewLine() { + if i == nil { + return + } + io.WriteString(i.w, "\n") +} + +func (i *indentation) Open() { + if i == nil { + return + } + + io.WriteString(i.w, "\n") + io.WriteString(i.w, strings.Repeat(i.indent, i.level)) + + i.level++ + i.hasChild = false +} + +func (i *indentation) Close() { + if i == nil { + return + } + i.level-- + if i.hasChild { + io.WriteString(i.w, "\n") + io.WriteString(i.w, strings.Repeat(i.indent, i.level)) + } + i.hasChild = true +} + +func outputXML(w io.Writer, n *Node, preserveSpaces bool, config *outputConfiguration, indent *indentation) { + preserveSpaces = calculatePreserveSpaces(n, preserveSpaces) + switch n.Type { + case TextNode: + io.WriteString(w, html.EscapeString(n.sanitizedData(preserveSpaces))) + return + case CharDataNode: + io.WriteString(w, "") + return + case CommentNode: + if !config.skipComments { + io.WriteString(w, "") + } + return + case NotationNode: + indent.NewLine() + fmt.Fprintf(w, "", n.Data) + return + case DeclarationNode: + io.WriteString(w, "") + } else { + if n.FirstChild != nil || !config.emptyElementTagSupport { + io.WriteString(w, ">") + } else { + io.WriteString(w, "/>") + indent.Close() + return + } + } + for child := n.FirstChild; child != nil; child = child.NextSibling { + outputXML(w, child, preserveSpaces, config, indent) + } + if n.Type != DeclarationNode { + indent.Close() + if n.Prefix == "" { + fmt.Fprintf(w, "", n.Data) + } else { + fmt.Fprintf(w, "", n.Prefix, n.Data) + } + } +} + +// OutputXML returns the text that including tags name. +func (n *Node) OutputXML(self bool) string { + if self { + return n.OutputXMLWithOptions(WithOutputSelf()) + } + return n.OutputXMLWithOptions() +} + +// OutputXMLWithOptions returns the text that including tags name. +func (n *Node) OutputXMLWithOptions(opts ...OutputOption) string { + var b strings.Builder + n.WriteWithOptions(&b, opts...) + return b.String() +} + +// Write writes xml to given writer. +func (n *Node) Write(writer io.Writer, self bool) { + if self { + n.WriteWithOptions(writer, WithOutputSelf()) + } + n.WriteWithOptions(writer) +} + +// WriteWithOptions writes xml with given options to given writer. +func (n *Node) WriteWithOptions(writer io.Writer, opts ...OutputOption) { + config := &outputConfiguration{} + // Set the options + for _, opt := range opts { + opt(config) + } + pastPreserveSpaces := config.preserveSpaces + preserveSpaces := calculatePreserveSpaces(n, pastPreserveSpaces) + b := bufio.NewWriter(writer) + defer b.Flush() + + if config.printSelf && n.Type != DocumentNode { + outputXML(b, n, preserveSpaces, config, newIndentation(config.useIndentation, b)) + } else { + for n := n.FirstChild; n != nil; n = n.NextSibling { + outputXML(b, n, preserveSpaces, config, newIndentation(config.useIndentation, b)) + } + } +} + +// AddAttr adds a new attribute specified by 'key' and 'val' to a node 'n'. +func AddAttr(n *Node, key, val string) { + attr := Attr{ + Name: newXMLName(key), + Value: val, + } + n.Attr = append(n.Attr, attr) +} + +// SetAttr allows an attribute value with the specified name to be changed. +// If the attribute did not previously exist, it will be created. +func (n *Node) SetAttr(key, value string) { + name := newXMLName(key) + for i, attr := range n.Attr { + if attr.Name == name { + n.Attr[i].Value = value + return + } + } + AddAttr(n, key, value) +} + +// RemoveAttr removes the attribute with the specified name. +func (n *Node) RemoveAttr(key string) { + name := newXMLName(key) + for i, attr := range n.Attr { + if attr.Name == name { + n.Attr = append(n.Attr[:i], n.Attr[i+1:]...) + return + } + } +} + +// AddChild adds a new node 'n' to a node 'parent' as its last child. +func AddChild(parent, n *Node) { + n.Parent = parent + n.NextSibling = nil + if parent.FirstChild == nil { + parent.FirstChild = n + n.PrevSibling = nil + } else { + parent.LastChild.NextSibling = n + n.PrevSibling = parent.LastChild + } + + parent.LastChild = n +} + +// AddSibling adds a new node 'n' as a sibling of a given node 'sibling'. +// Note it is not necessarily true that the new node 'n' would be added +// immediately after 'sibling'. If 'sibling' isn't the last child of its +// parent, then the new node 'n' will be added at the end of the sibling +// chain of their parent. +func AddSibling(sibling, n *Node) { + for t := sibling.NextSibling; t != nil; t = t.NextSibling { + sibling = t + } + n.Parent = sibling.Parent + sibling.NextSibling = n + n.PrevSibling = sibling + n.NextSibling = nil + if sibling.Parent != nil { + sibling.Parent.LastChild = n + } +} + +// RemoveFromTree removes a node and its subtree from the document +// tree it is in. If the node is the root of the tree, then it's no-op. +func RemoveFromTree(n *Node) { + if n.Parent == nil { + return + } + if n.Parent.FirstChild == n { + if n.Parent.LastChild == n { + n.Parent.FirstChild = nil + n.Parent.LastChild = nil + } else { + n.Parent.FirstChild = n.NextSibling + n.NextSibling.PrevSibling = nil + } + } else { + if n.Parent.LastChild == n { + n.Parent.LastChild = n.PrevSibling + n.PrevSibling.NextSibling = nil + } else { + n.PrevSibling.NextSibling = n.NextSibling + n.NextSibling.PrevSibling = n.PrevSibling + } + } + n.Parent = nil + n.PrevSibling = nil + n.NextSibling = nil +} diff --git a/vendor/github.com/antchfx/xmlquery/options.go b/vendor/github.com/antchfx/xmlquery/options.go new file mode 100644 index 00000000000..6b902d21480 --- /dev/null +++ b/vendor/github.com/antchfx/xmlquery/options.go @@ -0,0 +1,33 @@ +package xmlquery + +import ( + "encoding/xml" + "io" +) + +type ParserOptions struct { + Decoder *DecoderOptions +} + +func (options ParserOptions) apply(parser *parser) { + if options.Decoder != nil { + (*options.Decoder).apply(parser.decoder) + } +} + +// DecoderOptions implement the very same options than the standard +// encoding/xml package. Please refer to this documentation: +// https://golang.org/pkg/encoding/xml/#Decoder +type DecoderOptions struct { + Strict bool + AutoClose []string + Entity map[string]string + CharsetReader func(charset string, input io.Reader) (io.Reader, error) +} + +func (options DecoderOptions) apply(decoder *xml.Decoder) { + decoder.Strict = options.Strict + decoder.AutoClose = options.AutoClose + decoder.Entity = options.Entity + decoder.CharsetReader = options.CharsetReader +} diff --git a/vendor/github.com/antchfx/xmlquery/parse.go b/vendor/github.com/antchfx/xmlquery/parse.go new file mode 100644 index 00000000000..7627f4650aa --- /dev/null +++ b/vendor/github.com/antchfx/xmlquery/parse.go @@ -0,0 +1,414 @@ +package xmlquery + +import ( + "bufio" + "encoding/xml" + "fmt" + "io" + "net/http" + "regexp" + "strings" + "sync" + + "github.com/antchfx/xpath" + "golang.org/x/net/html/charset" +) + +var xmlMIMERegex = regexp.MustCompile(`(?i)((application|image|message|model)/((\w|\.|-)+\+?)?|text/)(wb)?xml`) + +// LoadURL loads the XML document from the specified URL. +func LoadURL(url string) (*Node, error) { + resp, err := http.Get(url) + if err != nil { + return nil, err + } + defer resp.Body.Close() + // Make sure the Content-Type has a valid XML MIME type + if xmlMIMERegex.MatchString(resp.Header.Get("Content-Type")) { + return Parse(resp.Body) + } + return nil, fmt.Errorf("invalid XML document(%s)", resp.Header.Get("Content-Type")) +} + +// Parse returns the parse tree for the XML from the given Reader. +func Parse(r io.Reader) (*Node, error) { + return ParseWithOptions(r, ParserOptions{}) +} + +// ParseWithOptions is like parse, but with custom options +func ParseWithOptions(r io.Reader, options ParserOptions) (*Node, error) { + p := createParser(r) + options.apply(p) + for { + _, err := p.parse() + if err == io.EOF { + return p.doc, nil + } + if err != nil { + return nil, err + } + } +} + +type parser struct { + decoder *xml.Decoder + doc *Node + level int + prev *Node + streamElementXPath *xpath.Expr // Under streaming mode, this specifies the xpath to the target element node(s). + streamElementFilter *xpath.Expr // If specified, it provides further filtering on the target element. + streamNode *Node // Need to remember the last target node So we can clean it up upon next Read() call. + streamNodePrev *Node // Need to remember target node's prev so upon target node removal, we can restore correct prev. + reader *cachedReader // Need to maintain a reference to the reader, so we can determine whether a node contains CDATA. + once sync.Once + space2prefix map[string]*xmlnsPrefix +} + +type xmlnsPrefix struct { + name string + level int +} + +func createParser(r io.Reader) *parser { + reader := newCachedReader(bufio.NewReader(r)) + p := &parser{ + decoder: xml.NewDecoder(reader), + doc: &Node{Type: DocumentNode}, + level: 0, + reader: reader, + } + if p.decoder.CharsetReader == nil { + p.decoder.CharsetReader = charset.NewReaderLabel + } + p.prev = p.doc + return p +} + +func (p *parser) parse() (*Node, error) { + p.once.Do(func() { + p.space2prefix = map[string]*xmlnsPrefix{"http://www.w3.org/XML/1998/namespace": {name: "xml", level: 0}} + }) + + var streamElementNodeCounter int + for { + p.reader.StartCaching() + tok, err := p.decoder.Token() + p.reader.StopCaching() + if err != nil { + return nil, err + } + + switch tok := tok.(type) { + case xml.StartElement: + if p.level == 0 { + // mising XML declaration + attributes := make([]Attr, 1) + attributes[0].Name = xml.Name{Local: "version"} + attributes[0].Value = "1.0" + node := &Node{ + Type: DeclarationNode, + Data: "xml", + Attr: attributes, + level: 1, + } + AddChild(p.prev, node) + p.level = 1 + p.prev = node + } + + for _, att := range tok.Attr { + if att.Name.Local == "xmlns" { + // https://github.com/antchfx/xmlquery/issues/67 + if prefix, ok := p.space2prefix[att.Value]; !ok || (ok && prefix.level >= p.level) { + p.space2prefix[att.Value] = &xmlnsPrefix{name: "", level: p.level} // reset empty if exist the default namespace + } + } else if att.Name.Space == "xmlns" { + // maybe there are have duplicate NamespaceURL? + p.space2prefix[att.Value] = &xmlnsPrefix{name: att.Name.Local, level: p.level} + } + } + + if space := tok.Name.Space; space != "" { + if _, found := p.space2prefix[space]; !found && p.decoder.Strict { + return nil, fmt.Errorf("xmlquery: invalid XML document, namespace %s is missing", space) + } + } + + attributes := make([]Attr, len(tok.Attr)) + for i, att := range tok.Attr { + name := att.Name + if prefix, ok := p.space2prefix[name.Space]; ok { + name.Space = prefix.name + } + attributes[i] = Attr{ + Name: name, + Value: att.Value, + NamespaceURI: att.Name.Space, + } + } + + node := &Node{ + Type: ElementNode, + Data: tok.Name.Local, + NamespaceURI: tok.Name.Space, + Attr: attributes, + level: p.level, + } + + if p.level == p.prev.level { + AddSibling(p.prev, node) + } else if p.level > p.prev.level { + AddChild(p.prev, node) + } else if p.level < p.prev.level { + for i := p.prev.level - p.level; i > 1; i-- { + p.prev = p.prev.Parent + } + AddSibling(p.prev.Parent, node) + } + + if node.NamespaceURI != "" { + if v, ok := p.space2prefix[node.NamespaceURI]; ok { + cached := string(p.reader.Cache()) + if strings.HasPrefix(cached, fmt.Sprintf("%s:%s", v.name, node.Data)) || strings.HasPrefix(cached, fmt.Sprintf("<%s:%s", v.name, node.Data)) { + node.Prefix = v.name + } + } + } + // If we're in the streaming mode, we need to remember the node if it is the target node + // so that when we finish processing the node's EndElement, we know how/what to return to + // caller. Also we need to remove the target node from the tree upon next Read() call so + // memory doesn't grow unbounded. + if p.streamElementXPath != nil { + if p.streamNode == nil { + if QuerySelector(p.doc, p.streamElementXPath) != nil { + p.streamNode = node + p.streamNodePrev = p.prev + streamElementNodeCounter = 1 + } + } else { + streamElementNodeCounter++ + } + } + p.prev = node + p.level++ + case xml.EndElement: + p.level-- + // If we're in streaming mode, and we already have a potential streaming + // target node identified (p.streamNode != nil) then we need to check if + // this is the real one we want to return to caller. + if p.streamNode != nil { + streamElementNodeCounter-- + if streamElementNodeCounter == 0 { + // Now we know this element node is the at least passing the initial + // p.streamElementXPath check and is a potential target node candidate. + // We need to have 1 more check with p.streamElementFilter (if given) to + // ensure it is really the element node we want. + // The reason we need a two-step check process is because the following + // situation: + // b1 + // And say the p.streamElementXPath = "/AAA/BBB[. != 'b1']". Now during + // xml.StartElement time, the node is still empty, so it will pass + // the p.streamElementXPath check. However, eventually we know this + // shouldn't be returned to the caller. Having a second more fine-grained + // filter check ensures that. So in this case, the caller should really + // setup the stream parser with: + // streamElementXPath = "/AAA/BBB[" + // streamElementFilter = "/AAA/BBB[. != 'b1']" + if p.streamElementFilter == nil || QuerySelector(p.doc, p.streamElementFilter) != nil { + return p.streamNode, nil + } + // otherwise, this isn't our target node, clean things up. + // note we also remove the underlying *Node from the node tree, to prevent + // future stream node candidate selection error. + RemoveFromTree(p.streamNode) + p.prev = p.streamNodePrev + p.streamNode = nil + p.streamNodePrev = nil + } + } + case xml.CharData: + // First, normalize the cache... + cached := strings.ToUpper(string(p.reader.Cache())) + nodeType := TextNode + if strings.HasPrefix(cached, " p.prev.level { + AddChild(p.prev, node) + } else if p.level < p.prev.level { + for i := p.prev.level - p.level; i > 1; i-- { + p.prev = p.prev.Parent + } + AddSibling(p.prev.Parent, node) + } + case xml.Comment: + node := &Node{Type: CommentNode, Data: string(tok), level: p.level} + if p.level == p.prev.level { + AddSibling(p.prev, node) + } else if p.level > p.prev.level { + AddChild(p.prev, node) + } else if p.level < p.prev.level { + for i := p.prev.level - p.level; i > 1; i-- { + p.prev = p.prev.Parent + } + AddSibling(p.prev.Parent, node) + } + case xml.ProcInst: // Processing Instruction + if p.prev.Type != DeclarationNode { + p.level++ + } + node := &Node{Type: DeclarationNode, Data: tok.Target, level: p.level} + pairs := strings.Split(string(tok.Inst), " ") + for _, pair := range pairs { + pair = strings.TrimSpace(pair) + if i := strings.Index(pair, "="); i > 0 { + AddAttr(node, pair[:i], strings.Trim(pair[i+1:], `"'`)) + } + } + if p.level == p.prev.level { + AddSibling(p.prev, node) + } else if p.level > p.prev.level { + AddChild(p.prev, node) + } else if p.level < p.prev.level { + for i := p.prev.level - p.level; i > 1; i-- { + p.prev = p.prev.Parent + } + AddSibling(p.prev.Parent, node) + } + p.prev = node + case xml.Directive: + node := &Node{Type: NotationNode, Data: string(tok), level: p.level} + if p.level == p.prev.level { + AddSibling(p.prev, node) + } else if p.level > p.prev.level { + AddChild(p.prev, node) + } else if p.level < p.prev.level { + for i := p.prev.level - p.level; i > 1; i-- { + p.prev = p.prev.Parent + } + AddSibling(p.prev.Parent, node) + } + } + } +} + +// StreamParser enables loading and parsing an XML document in a streaming +// fashion. +type StreamParser struct { + p *parser +} + +// CreateStreamParser creates a StreamParser. Argument streamElementXPath is +// required. +// Argument streamElementFilter is optional and should only be used in advanced +// scenarios. +// +// Scenario 1: simple case: +// +// xml := `b1b2` +// sp, err := CreateStreamParser(strings.NewReader(xml), "/AAA/BBB") +// if err != nil { +// panic(err) +// } +// for { +// n, err := sp.Read() +// if err != nil { +// break +// } +// fmt.Println(n.OutputXML(true)) +// } +// +// Output will be: +// +// b1 +// b2 +// +// Scenario 2: advanced case: +// +// xml := `b1b2` +// sp, err := CreateStreamParser(strings.NewReader(xml), "/AAA/BBB", "/AAA/BBB[. != 'b1']") +// if err != nil { +// panic(err) +// } +// for { +// n, err := sp.Read() +// if err != nil { +// break +// } +// fmt.Println(n.OutputXML(true)) +// } +// +// Output will be: +// +// b2 +// +// As the argument names indicate, streamElementXPath should be used for +// providing xpath query pointing to the target element node only, no extra +// filtering on the element itself or its children; while streamElementFilter, +// if needed, can provide additional filtering on the target element and its +// children. +// +// CreateStreamParser returns an error if either streamElementXPath or +// streamElementFilter, if provided, cannot be successfully parsed and compiled +// into a valid xpath query. +func CreateStreamParser(r io.Reader, streamElementXPath string, streamElementFilter ...string) (*StreamParser, error) { + return CreateStreamParserWithOptions(r, ParserOptions{}, streamElementXPath, streamElementFilter...) +} + +// CreateStreamParserWithOptions is like CreateStreamParser, but with custom options +func CreateStreamParserWithOptions( + r io.Reader, + options ParserOptions, + streamElementXPath string, + streamElementFilter ...string, +) (*StreamParser, error) { + elemXPath, err := getQuery(streamElementXPath) + if err != nil { + return nil, fmt.Errorf("invalid streamElementXPath '%s', err: %s", streamElementXPath, err.Error()) + } + elemFilter := (*xpath.Expr)(nil) + if len(streamElementFilter) > 0 { + elemFilter, err = getQuery(streamElementFilter[0]) + if err != nil { + return nil, fmt.Errorf("invalid streamElementFilter '%s', err: %s", streamElementFilter[0], err.Error()) + } + } + parser := createParser(r) + options.apply(parser) + sp := &StreamParser{ + p: parser, + } + sp.p.streamElementXPath = elemXPath + sp.p.streamElementFilter = elemFilter + return sp, nil +} + +// Read returns a target node that satisfies the XPath specified by caller at +// StreamParser creation time. If there is no more satisfying target nodes after +// reading the rest of the XML document, io.EOF will be returned. At any time, +// any XML parsing error encountered will be returned, and the stream parsing +// stopped. Calling Read() after an error is returned (including io.EOF) results +// undefined behavior. Also note, due to the streaming nature, calling Read() +// will automatically remove any previous target node(s) from the document tree. +func (sp *StreamParser) Read() (*Node, error) { + // Because this is a streaming read, we need to release/remove last + // target node from the node tree to free up memory. + if sp.p.streamNode != nil { + // We need to remove all siblings before the current stream node, + // because the document may contain unwanted nodes between the target + // ones (for example new line text node), which would otherwise + // accumulate as first childs, and slow down the stream over time + for sp.p.streamNode.PrevSibling != nil { + RemoveFromTree(sp.p.streamNode.PrevSibling) + } + sp.p.prev = sp.p.streamNode.Parent + RemoveFromTree(sp.p.streamNode) + sp.p.streamNode = nil + sp.p.streamNodePrev = nil + } + return sp.p.parse() +} diff --git a/vendor/github.com/antchfx/xmlquery/query.go b/vendor/github.com/antchfx/xmlquery/query.go new file mode 100644 index 00000000000..d1353aac485 --- /dev/null +++ b/vendor/github.com/antchfx/xmlquery/query.go @@ -0,0 +1,304 @@ +/* +Package xmlquery provides extract data from XML documents using XPath expression. +*/ +package xmlquery + +import ( + "fmt" + "strings" + + "github.com/antchfx/xpath" +) + +// SelectElements finds child elements with the specified name. +func (n *Node) SelectElements(name string) []*Node { + return Find(n, name) +} + +// SelectElement finds child elements with the specified name. +func (n *Node) SelectElement(name string) *Node { + return FindOne(n, name) +} + +// SelectAttr returns the attribute value with the specified name. +func (n *Node) SelectAttr(name string) string { + if n.Type == AttributeNode { + if n.Data == name { + return n.InnerText() + } + return "" + } + xmlName := newXMLName(name) + for _, attr := range n.Attr { + if attr.Name == xmlName { + return attr.Value + } + } + return "" +} + +var _ xpath.NodeNavigator = &NodeNavigator{} + +// CreateXPathNavigator creates a new xpath.NodeNavigator for the specified +// XML Node. +func CreateXPathNavigator(top *Node) *NodeNavigator { + return &NodeNavigator{curr: top, root: top, attr: -1} +} + +func getCurrentNode(it *xpath.NodeIterator) *Node { + n := it.Current().(*NodeNavigator) + if n.NodeType() == xpath.AttributeNode { + childNode := &Node{ + Type: TextNode, + Data: n.Value(), + } + return &Node{ + Parent: n.curr, + Type: AttributeNode, + Data: n.LocalName(), + FirstChild: childNode, + LastChild: childNode, + } + } + return n.curr +} + +// Find is like QueryAll but panics if `expr` is not a valid XPath expression. +// See `QueryAll()` function. +func Find(top *Node, expr string) []*Node { + nodes, err := QueryAll(top, expr) + if err != nil { + panic(err) + } + return nodes +} + +// FindOne is like Query but panics if `expr` is not a valid XPath expression. +// See `Query()` function. +func FindOne(top *Node, expr string) *Node { + node, err := Query(top, expr) + if err != nil { + panic(err) + } + return node +} + +// QueryAll searches the XML Node that matches by the specified XPath expr. +// Returns an error if the expression `expr` cannot be parsed. +func QueryAll(top *Node, expr string) ([]*Node, error) { + exp, err := getQuery(expr) + if err != nil { + return nil, err + } + return QuerySelectorAll(top, exp), nil +} + +// Query searches the XML Node that matches by the specified XPath expr, +// and returns first matched element. +func Query(top *Node, expr string) (*Node, error) { + exp, err := getQuery(expr) + if err != nil { + return nil, err + } + return QuerySelector(top, exp), nil +} + +// QuerySelectorAll searches all of the XML Node that matches the specified +// XPath selectors. +func QuerySelectorAll(top *Node, selector *xpath.Expr) []*Node { + t := selector.Select(CreateXPathNavigator(top)) + var elems []*Node + for t.MoveNext() { + elems = append(elems, getCurrentNode(t)) + } + return elems +} + +// QuerySelector returns the first matched XML Node by the specified XPath +// selector. +func QuerySelector(top *Node, selector *xpath.Expr) *Node { + t := selector.Select(CreateXPathNavigator(top)) + if t.MoveNext() { + return getCurrentNode(t) + } + return nil +} + +// FindEach searches the html.Node and calls functions cb. +// Important: this method is deprecated, instead, use for .. = range Find(){}. +func FindEach(top *Node, expr string, cb func(int, *Node)) { + for i, n := range Find(top, expr) { + cb(i, n) + } +} + +// FindEachWithBreak functions the same as FindEach but allows to break the loop +// by returning false from the callback function `cb`. +// Important: this method is deprecated, instead, use .. = range Find(){}. +func FindEachWithBreak(top *Node, expr string, cb func(int, *Node) bool) { + for i, n := range Find(top, expr) { + if !cb(i, n) { + break + } + } +} + +type NodeNavigator struct { + root, curr *Node + attr int +} + +func (x *NodeNavigator) Current() *Node { + return x.curr +} + +func (x *NodeNavigator) NodeType() xpath.NodeType { + switch x.curr.Type { + case CommentNode: + return xpath.CommentNode + case TextNode, CharDataNode, NotationNode: + return xpath.TextNode + case DeclarationNode, DocumentNode: + return xpath.RootNode + case ElementNode: + if x.attr != -1 { + return xpath.AttributeNode + } + return xpath.ElementNode + } + panic(fmt.Sprintf("unknown XML node type: %v", x.curr.Type)) +} + +func (x *NodeNavigator) LocalName() string { + if x.attr != -1 { + return x.curr.Attr[x.attr].Name.Local + } + return x.curr.Data + +} + +func (x *NodeNavigator) Prefix() string { + if x.NodeType() == xpath.AttributeNode { + if x.attr != -1 { + return x.curr.Attr[x.attr].Name.Space + } + return "" + } + return x.curr.Prefix +} + +func (x *NodeNavigator) NamespaceURL() string { + if x.attr != -1 { + return x.curr.Attr[x.attr].NamespaceURI + } + return x.curr.NamespaceURI +} + +func (x *NodeNavigator) Value() string { + switch x.curr.Type { + case CommentNode: + return x.curr.Data + case ElementNode: + if x.attr != -1 { + return x.curr.Attr[x.attr].Value + } + return x.curr.InnerText() + case TextNode: + return x.curr.Data + } + return "" +} + +func (x *NodeNavigator) Copy() xpath.NodeNavigator { + n := *x + return &n +} + +func (x *NodeNavigator) MoveToRoot() { + x.curr = x.root +} + +func (x *NodeNavigator) MoveToParent() bool { + if x.attr != -1 { + x.attr = -1 + return true + } else if node := x.curr.Parent; node != nil { + x.curr = node + return true + } + return false +} + +func (x *NodeNavigator) MoveToNextAttribute() bool { + if x.attr >= len(x.curr.Attr)-1 { + return false + } + x.attr++ + return true +} + +func (x *NodeNavigator) MoveToChild() bool { + if x.attr != -1 { + return false + } + if node := x.curr.FirstChild; node != nil { + x.curr = node + return true + } + return false +} + +func (x *NodeNavigator) MoveToFirst() bool { + if x.attr != -1 || x.curr.PrevSibling == nil { + return false + } + for { + node := x.curr.PrevSibling + if node == nil { + break + } + x.curr = node + } + return true +} + +func (x *NodeNavigator) String() string { + return x.Value() +} + +func (x *NodeNavigator) MoveToNext() bool { + if x.attr != -1 { + return false + } + for node := x.curr.NextSibling; node != nil; node = x.curr.NextSibling { + x.curr = node + if x.curr.Type != TextNode || strings.TrimSpace(x.curr.Data) != "" { + return true + } + } + return false +} + +func (x *NodeNavigator) MoveToPrevious() bool { + if x.attr != -1 { + return false + } + for node := x.curr.PrevSibling; node != nil; node = x.curr.PrevSibling { + x.curr = node + if x.curr.Type != TextNode || strings.TrimSpace(x.curr.Data) != "" { + return true + } + } + return false +} + +func (x *NodeNavigator) MoveTo(other xpath.NodeNavigator) bool { + node, ok := other.(*NodeNavigator) + if !ok || node.root != x.root { + return false + } + + x.curr = node.curr + x.attr = node.attr + return true +} diff --git a/vendor/github.com/antchfx/xpath/.gitignore b/vendor/github.com/antchfx/xpath/.gitignore new file mode 100644 index 00000000000..4d5d27b1d3a --- /dev/null +++ b/vendor/github.com/antchfx/xpath/.gitignore @@ -0,0 +1,32 @@ +# vscode +.vscode +debug +*.test + +./build + +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof \ No newline at end of file diff --git a/vendor/github.com/antchfx/xpath/LICENSE b/vendor/github.com/antchfx/xpath/LICENSE new file mode 100644 index 00000000000..e14c37141c5 --- /dev/null +++ b/vendor/github.com/antchfx/xpath/LICENSE @@ -0,0 +1,17 @@ +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/antchfx/xpath/README.md b/vendor/github.com/antchfx/xpath/README.md new file mode 100644 index 00000000000..733c4c8727e --- /dev/null +++ b/vendor/github.com/antchfx/xpath/README.md @@ -0,0 +1,167 @@ +# XPath + +[![GoDoc](https://godoc.org/github.com/antchfx/xpath?status.svg)](https://godoc.org/github.com/antchfx/xpath) +[![Coverage Status](https://coveralls.io/repos/github/antchfx/xpath/badge.svg?branch=master)](https://coveralls.io/github/antchfx/xpath?branch=master) +[![Build Status](https://github.com/antchfx/xpath/actions/workflows/testing.yml/badge.svg)](https://github.com/antchfx/xpath/actions/workflows/testing.yml) +[![Go Report Card](https://goreportcard.com/badge/github.com/antchfx/xpath)](https://goreportcard.com/report/github.com/antchfx/xpath) + +XPath is Go package provides selecting nodes from XML, HTML or other documents using XPath expression. + +# Implementation + +- [htmlquery](https://github.com/antchfx/htmlquery) - an XPath query package for HTML document + +- [xmlquery](https://github.com/antchfx/xmlquery) - an XPath query package for XML document. + +- [jsonquery](https://github.com/antchfx/jsonquery) - an XPath query package for JSON document + +# Supported Features + +#### The basic XPath patterns. + +> The basic XPath patterns cover 90% of the cases that most stylesheets will need. + +- `node` : Selects all child elements with nodeName of node. + +- `*` : Selects all child elements. + +- `@attr` : Selects the attribute attr. + +- `@*` : Selects all attributes. + +- `node()` : Matches an org.w3c.dom.Node. + +- `text()` : Matches a org.w3c.dom.Text node. + +- `comment()` : Matches a comment. + +- `.` : Selects the current node. + +- `..` : Selects the parent of current node. + +- `/` : Selects the document node. + +- `a[expr]` : Select only those nodes matching a which also satisfy the expression expr. + +- `a[n]` : Selects the nth matching node matching a When a filter's expression is a number, XPath selects based on position. + +- `a/b` : For each node matching a, add the nodes matching b to the result. + +- `a//b` : For each node matching a, add the descendant nodes matching b to the result. + +- `//b` : Returns elements in the entire document matching b. + +- `a|b` : All nodes matching a or b, union operation(not boolean or). + +- `(a, b, c)` : Evaluates each of its operands and concatenates the resulting sequences, in order, into a single result sequence + +- `(a/b)` : Selects all matches nodes as grouping set. + +#### Node Axes + +- `child::*` : The child axis selects children of the current node. + + - `child::node()`: Selects all the children of the context node. + - `child::text()`: Selects all text node children of the context node. + +- `descendant::*` : The descendant axis selects descendants of the current node. It is equivalent to '//'. + +- `descendant-or-self::*` : Selects descendants including the current node. + +- `attribute::*` : Selects attributes of the current element. It is equivalent to @\* + +- `following-sibling::*` : Selects nodes after the current node. + +- `preceding-sibling::*` : Selects nodes before the current node. + +- `following::*` : Selects the first matching node following in document order, excluding descendants. + +- `preceding::*` : Selects the first matching node preceding in document order, excluding ancestors. + +- `parent::*` : Selects the parent if it matches. The '..' pattern from the core is equivalent to 'parent::node()'. + +- `ancestor::*` : Selects matching ancestors. + +- `ancestor-or-self::*` : Selects ancestors including the current node. + +- `self::*` : Selects the current node. '.' is equivalent to 'self::node()'. + +#### Expressions + +The gxpath supported three types: number, boolean, string. + +- `path` : Selects nodes based on the path. + +- `a = b` : Standard comparisons. + + - `a = b` : True if a equals b. + - `a != b` : True if a is not equal to b. + - `a < b` : True if a is less than b. + - `a <= b` : True if a is less than or equal to b. + - `a > b` : True if a is greater than b. + - `a >= b` : True if a is greater than or equal to b. + +- `a + b` : Arithmetic expressions. + + - `- a` Unary minus + - `a + b` : Addition + - `a - b` : Subtraction + - `a * b` : Multiplication + - `a div b` : Division + - `a mod b` : Modulus (division remainder) + +- `a or b` : Boolean `or` operation. + +- `a and b` : Boolean `and` operation. + +- `(expr)` : Parenthesized expressions. + +- `fun(arg1, ..., argn)` : Function calls: + +| Function | Supported | +| ----------------------- | --------- | +| `boolean()` | ✓ | +| `ceiling()` | ✓ | +| `choose()` | ✗ | +| `concat()` | ✓ | +| `contains()` | ✓ | +| `count()` | ✓ | +| `current()` | ✗ | +| `document()` | ✗ | +| `element-available()` | ✗ | +| `ends-with()` | ✓ | +| `false()` | ✓ | +| `floor()` | ✓ | +| `format-number()` | ✗ | +| `function-available()` | ✗ | +| `generate-id()` | ✗ | +| `id()` | ✗ | +| `key()` | ✗ | +| `lang()` | ✗ | +| `last()` | ✓ | +| `local-name()` | ✓ | +| `lower-case()`[^1] | ✓ | +| `matches()` | ✓ | +| `name()` | ✓ | +| `namespace-uri()` | ✓ | +| `normalize-space()` | ✓ | +| `not()` | ✓ | +| `number()` | ✓ | +| `position()` | ✓ | +| `replace()` | ✓ | +| `reverse()` | ✓ | +| `round()` | ✓ | +| `starts-with()` | ✓ | +| `string()` | ✓ | +| `string-join()`[^1] | ✓ | +| `string-length()` | ✓ | +| `substring()` | ✓ | +| `substring-after()` | ✓ | +| `substring-before()` | ✓ | +| `sum()` | ✓ | +| `system-property()` | ✗ | +| `translate()` | ✓ | +| `true()` | ✓ | +| `unparsed-entity-url()` | ✗ | + +[^1]: XPath-2.0 expression diff --git a/vendor/github.com/antchfx/xpath/build.go b/vendor/github.com/antchfx/xpath/build.go new file mode 100644 index 00000000000..a93c8eb5f78 --- /dev/null +++ b/vendor/github.com/antchfx/xpath/build.go @@ -0,0 +1,718 @@ +package xpath + +import ( + "errors" + "fmt" +) + +type flag int + +var flagsEnum = struct { + None flag + SmartDesc flag + PosFilter flag + Filter flag + Condition flag +}{ + None: 0, + SmartDesc: 1, + PosFilter: 2, + Filter: 4, + Condition: 8, +} + +type builderProp int + +var builderProps = struct { + None builderProp + PosFilter builderProp + HasPosition builderProp + HasLast builderProp + NonFlat builderProp +}{ + None: 0, + PosFilter: 1, + HasPosition: 2, + HasLast: 4, + NonFlat: 8, +} + +// builder provides building an XPath expressions. +type builder struct { + parseDepth int + firstInput query +} + +// axisPredicate creates a predicate to predicating for this axis node. +func axisPredicate(root *axisNode) func(NodeNavigator) bool { + nametest := root.LocalName != "" || root.Prefix != "" + predicate := func(n NodeNavigator) bool { + if root.typeTest == n.NodeType() || root.typeTest == allNode { + if nametest { + type namespaceURL interface { + NamespaceURL() string + } + if ns, ok := n.(namespaceURL); ok && root.hasNamespaceURI { + return root.LocalName == n.LocalName() && root.namespaceURI == ns.NamespaceURL() + } + if root.LocalName == n.LocalName() && root.Prefix == n.Prefix() { + return true + } + } else { + return true + } + } + return false + } + + return predicate +} + +// processAxis processes a query for the XPath axis node. +func (b *builder) processAxis(root *axisNode, flags flag, props *builderProp) (query, error) { + var ( + err error + qyInput query + qyOutput query + ) + b.firstInput = nil + predicate := axisPredicate(root) + + if root.Input == nil { + qyInput = &contextQuery{} + *props = builderProps.None + } else { + inputFlags := flagsEnum.None + if (flags & flagsEnum.Filter) == 0 { + if root.AxisType == "child" && (root.Input.Type() == nodeAxis) { + if input := root.Input.(*axisNode); input.AxisType == "descendant-or-self" { + var qyGrandInput query + if input.Input != nil { + qyGrandInput, err = b.processNode(input.Input, flagsEnum.SmartDesc, props) + if err != nil { + return nil, err + } + } else { + qyGrandInput = &contextQuery{} + } + qyOutput = &descendantQuery{name: root.LocalName, Input: qyGrandInput, Predicate: predicate, Self: false} + *props |= builderProps.NonFlat + return qyOutput, nil + } + } + if root.AxisType == "descendant" || root.AxisType == "descendant-or-self" { + inputFlags |= flagsEnum.SmartDesc + } + } + + qyInput, err = b.processNode(root.Input, inputFlags, props) + if err != nil { + return nil, err + } + } + + switch root.AxisType { + case "ancestor": + qyOutput = &ancestorQuery{name: root.LocalName, Input: qyInput, Predicate: predicate} + *props |= builderProps.NonFlat + case "ancestor-or-self": + qyOutput = &ancestorQuery{name: root.LocalName, Input: qyInput, Predicate: predicate, Self: true} + *props |= builderProps.NonFlat + case "attribute": + qyOutput = &attributeQuery{name: root.LocalName, Input: qyInput, Predicate: predicate} + case "child": + if (*props & builderProps.NonFlat) == 0 { + qyOutput = &childQuery{name: root.LocalName, Input: qyInput, Predicate: predicate} + } else { + qyOutput = &cachedChildQuery{name: root.LocalName, Input: qyInput, Predicate: predicate} + } + case "descendant": + if (flags & flagsEnum.SmartDesc) != flagsEnum.None { + qyOutput = &descendantOverDescendantQuery{name: root.LocalName, Input: qyInput, MatchSelf: false, Predicate: predicate} + } else { + qyOutput = &descendantQuery{name: root.LocalName, Input: qyInput, Predicate: predicate} + } + *props |= builderProps.NonFlat + case "descendant-or-self": + if (flags & flagsEnum.SmartDesc) != flagsEnum.None { + qyOutput = &descendantOverDescendantQuery{name: root.LocalName, Input: qyInput, MatchSelf: true, Predicate: predicate} + } else { + qyOutput = &descendantQuery{name: root.LocalName, Input: qyInput, Predicate: predicate, Self: true} + } + *props |= builderProps.NonFlat + case "following": + qyOutput = &followingQuery{Input: qyInput, Predicate: predicate} + *props |= builderProps.NonFlat + case "following-sibling": + qyOutput = &followingQuery{Input: qyInput, Predicate: predicate, Sibling: true} + case "parent": + qyOutput = &parentQuery{Input: qyInput, Predicate: predicate} + case "preceding": + qyOutput = &precedingQuery{Input: qyInput, Predicate: predicate} + *props |= builderProps.NonFlat + case "preceding-sibling": + qyOutput = &precedingQuery{Input: qyInput, Predicate: predicate, Sibling: true} + case "self": + qyOutput = &selfQuery{Input: qyInput, Predicate: predicate} + case "namespace": + // haha,what will you do someting?? + default: + err = fmt.Errorf("unknown axe type: %s", root.AxisType) + return nil, err + } + return qyOutput, nil +} + +func canBeNumber(q query) bool { + if q.ValueType() != xpathResultType.Any { + return q.ValueType() == xpathResultType.Number + } + return true +} + +// processFilterNode builds query for the XPath filter predicate. +func (b *builder) processFilter(root *filterNode, flags flag, props *builderProp) (query, error) { + first := (flags & flagsEnum.Filter) == 0 + + qyInput, err := b.processNode(root.Input, (flags | flagsEnum.Filter), props) + if err != nil { + return nil, err + } + firstInput := b.firstInput + + var propsCond builderProp + cond, err := b.processNode(root.Condition, flags, &propsCond) + if err != nil { + return nil, err + } + + // Checking whether is number + if canBeNumber(cond) || ((propsCond & (builderProps.HasPosition | builderProps.HasLast)) != 0) { + propsCond |= builderProps.HasPosition + flags |= flagsEnum.PosFilter + } + + if root.Input.Type() != nodeFilter { + *props &= ^builderProps.PosFilter + } + + if (propsCond & builderProps.HasPosition) != 0 { + *props |= builderProps.PosFilter + } + + if (propsCond & builderProps.HasPosition) != builderProps.None { + if (propsCond & builderProps.HasLast) != 0 { + // https://github.com/antchfx/xpath/issues/76 + // https://github.com/antchfx/xpath/issues/78 + if qyFunc, ok := cond.(*functionQuery); ok { + switch qyFunc.Input.(type) { + case *filterQuery: + cond = &lastFuncQuery{Input: qyFunc.Input} + } + } + } + } + + merge := (qyInput.Properties() & queryProps.Merge) != 0 + if first && firstInput != nil { + if merge && ((*props & builderProps.PosFilter) != 0) { + var ( + rootQuery = &contextQuery{} + parent query + ) + switch axisQuery := firstInput.(type) { + case *ancestorQuery: + if _, ok := axisQuery.Input.(*contextQuery); !ok { + parent = axisQuery.Input + axisQuery.Input = rootQuery + } + case *attributeQuery: + if _, ok := axisQuery.Input.(*contextQuery); !ok { + parent = axisQuery.Input + axisQuery.Input = rootQuery + } + case *childQuery: + if _, ok := axisQuery.Input.(*contextQuery); !ok { + parent = axisQuery.Input + axisQuery.Input = rootQuery + } + case *cachedChildQuery: + if _, ok := axisQuery.Input.(*contextQuery); !ok { + parent = axisQuery.Input + axisQuery.Input = rootQuery + } + case *descendantQuery: + if _, ok := axisQuery.Input.(*contextQuery); !ok { + parent = axisQuery.Input + axisQuery.Input = rootQuery + } + case *followingQuery: + if _, ok := axisQuery.Input.(*contextQuery); !ok { + parent = axisQuery.Input + axisQuery.Input = rootQuery + } + case *precedingQuery: + if _, ok := axisQuery.Input.(*contextQuery); !ok { + parent = axisQuery.Input + axisQuery.Input = rootQuery + } + case *parentQuery: + if _, ok := axisQuery.Input.(*contextQuery); !ok { + parent = axisQuery.Input + axisQuery.Input = rootQuery + } + case *selfQuery: + if _, ok := axisQuery.Input.(*contextQuery); !ok { + parent = axisQuery.Input + axisQuery.Input = rootQuery + } + case *groupQuery: + if _, ok := axisQuery.Input.(*contextQuery); !ok { + parent = axisQuery.Input + axisQuery.Input = rootQuery + } + case *descendantOverDescendantQuery: + if _, ok := axisQuery.Input.(*contextQuery); !ok { + parent = axisQuery.Input + axisQuery.Input = rootQuery + } + } + b.firstInput = nil + child := &filterQuery{Input: qyInput, Predicate: cond, NoPosition: false} + if parent != nil { + return &mergeQuery{Input: parent, Child: child}, nil + } + return child, nil + } + b.firstInput = nil + } + + resultQuery := &filterQuery{ + Input: qyInput, + Predicate: cond, + NoPosition: (propsCond & builderProps.HasPosition) == 0, + } + return resultQuery, nil +} + +// processFunctionNode processes query for the XPath function node. +func (b *builder) processFunction(root *functionNode, props *builderProp) (query, error) { + // Reset builder props + *props = builderProps.None + + var qyOutput query + switch root.FuncName { + case "lower-case": + arg, err := b.processNode(root.Args[0], flagsEnum.None, props) + if err != nil { + return nil, err + } + qyOutput = &functionQuery{Func: lowerCaseFunc(arg)} + case "starts-with": + arg1, err := b.processNode(root.Args[0], flagsEnum.None, props) + if err != nil { + return nil, err + } + arg2, err := b.processNode(root.Args[1], flagsEnum.None, props) + if err != nil { + return nil, err + } + qyOutput = &functionQuery{Func: startwithFunc(arg1, arg2)} + case "ends-with": + arg1, err := b.processNode(root.Args[0], flagsEnum.None, props) + if err != nil { + return nil, err + } + arg2, err := b.processNode(root.Args[1], flagsEnum.None, props) + if err != nil { + return nil, err + } + qyOutput = &functionQuery{Func: endwithFunc(arg1, arg2)} + case "contains": + arg1, err := b.processNode(root.Args[0], flagsEnum.None, props) + if err != nil { + return nil, err + } + arg2, err := b.processNode(root.Args[1], flagsEnum.None, props) + if err != nil { + return nil, err + } + qyOutput = &functionQuery{Func: containsFunc(arg1, arg2)} + case "matches": + //matches(string , pattern) + if len(root.Args) != 2 { + return nil, errors.New("xpath: matches function must have two parameters") + } + var ( + arg1, arg2 query + err error + ) + if arg1, err = b.processNode(root.Args[0], flagsEnum.None, props); err != nil { + return nil, err + } + if arg2, err = b.processNode(root.Args[1], flagsEnum.None, props); err != nil { + return nil, err + } + // Issue #92, testing the regular expression before. + if q, ok := arg2.(*constantQuery); ok { + if _, err = getRegexp(q.Val.(string)); err != nil { + return nil, fmt.Errorf("matches() got error. %v", err) + } + } + qyOutput = &functionQuery{Func: matchesFunc(arg1, arg2)} + case "substring": + //substring( string , start [, length] ) + if len(root.Args) < 2 { + return nil, errors.New("xpath: substring function must have at least two parameter") + } + var ( + arg1, arg2, arg3 query + err error + ) + if arg1, err = b.processNode(root.Args[0], flagsEnum.None, props); err != nil { + return nil, err + } + if arg2, err = b.processNode(root.Args[1], flagsEnum.None, props); err != nil { + return nil, err + } + if len(root.Args) == 3 { + if arg3, err = b.processNode(root.Args[2], flagsEnum.None, props); err != nil { + return nil, err + } + } + qyOutput = &functionQuery{Func: substringFunc(arg1, arg2, arg3)} + case "substring-before", "substring-after": + //substring-xxxx( haystack, needle ) + if len(root.Args) != 2 { + return nil, errors.New("xpath: substring-before function must have two parameters") + } + var ( + arg1, arg2 query + err error + ) + if arg1, err = b.processNode(root.Args[0], flagsEnum.None, props); err != nil { + return nil, err + } + if arg2, err = b.processNode(root.Args[1], flagsEnum.None, props); err != nil { + return nil, err + } + qyOutput = &functionQuery{ + Func: substringIndFunc(arg1, arg2, root.FuncName == "substring-after"), + } + case "string-length": + // string-length( [string] ) + if len(root.Args) < 1 { + return nil, errors.New("xpath: string-length function must have at least one parameter") + } + arg1, err := b.processNode(root.Args[0], flagsEnum.None, props) + if err != nil { + return nil, err + } + qyOutput = &functionQuery{Func: stringLengthFunc(arg1)} + case "normalize-space": + var arg node + if len(root.Args) > 0 { + arg = root.Args[0] + } else { + arg = newAxisNode("self", allNode, "", "", "", nil) + } + arg1, err := b.processNode(arg, flagsEnum.None, props) + if err != nil { + return nil, err + } + qyOutput = &functionQuery{Func: normalizespaceFunc(arg1)} + case "replace": + //replace( string , string, string ) + if len(root.Args) != 3 { + return nil, errors.New("xpath: replace function must have three parameters") + } + var ( + arg1, arg2, arg3 query + err error + ) + if arg1, err = b.processNode(root.Args[0], flagsEnum.None, props); err != nil { + return nil, err + } + if arg2, err = b.processNode(root.Args[1], flagsEnum.None, props); err != nil { + return nil, err + } + if arg3, err = b.processNode(root.Args[2], flagsEnum.None, props); err != nil { + return nil, err + } + qyOutput = &functionQuery{Func: replaceFunc(arg1, arg2, arg3)} + case "translate": + //translate( string , string, string ) + if len(root.Args) != 3 { + return nil, errors.New("xpath: translate function must have three parameters") + } + var ( + arg1, arg2, arg3 query + err error + ) + if arg1, err = b.processNode(root.Args[0], flagsEnum.None, props); err != nil { + return nil, err + } + if arg2, err = b.processNode(root.Args[1], flagsEnum.None, props); err != nil { + return nil, err + } + if arg3, err = b.processNode(root.Args[2], flagsEnum.None, props); err != nil { + return nil, err + } + qyOutput = &functionQuery{Func: translateFunc(arg1, arg2, arg3)} + case "not": + if len(root.Args) == 0 { + return nil, errors.New("xpath: not function must have at least one parameter") + } + argQuery, err := b.processNode(root.Args[0], flagsEnum.None, props) + if err != nil { + return nil, err + } + qyOutput = &functionQuery{Func: notFunc(argQuery)} + case "name", "local-name", "namespace-uri": + if len(root.Args) > 1 { + return nil, fmt.Errorf("xpath: %s function must have at most one parameter", root.FuncName) + } + var ( + arg query + err error + ) + if len(root.Args) == 1 { + arg, err = b.processNode(root.Args[0], flagsEnum.None, props) + if err != nil { + return nil, err + } + } + switch root.FuncName { + case "name": + qyOutput = &functionQuery{Func: nameFunc(arg)} + case "local-name": + qyOutput = &functionQuery{Func: localNameFunc(arg)} + case "namespace-uri": + qyOutput = &functionQuery{Func: namespaceFunc(arg)} + } + case "true", "false": + val := root.FuncName == "true" + qyOutput = &functionQuery{ + Func: func(_ query, _ iterator) interface{} { + return val + }, + } + case "last": + qyOutput = &functionQuery{Input: b.firstInput, Func: lastFunc()} + *props |= builderProps.HasLast + case "position": + qyOutput = &functionQuery{Input: b.firstInput, Func: positionFunc()} + *props |= builderProps.HasPosition + case "boolean", "number", "string": + var inp query + if len(root.Args) > 1 { + return nil, fmt.Errorf("xpath: %s function must have at most one parameter", root.FuncName) + } + if len(root.Args) == 1 { + argQuery, err := b.processNode(root.Args[0], flagsEnum.None, props) + if err != nil { + return nil, err + } + inp = argQuery + } + switch root.FuncName { + case "boolean": + qyOutput = &functionQuery{Func: booleanFunc(inp)} + case "string": + qyOutput = &functionQuery{Func: stringFunc(inp)} + case "number": + qyOutput = &functionQuery{Func: numberFunc(inp)} + } + case "count": + if len(root.Args) == 0 { + return nil, fmt.Errorf("xpath: count(node-sets) function must with have parameters node-sets") + } + argQuery, err := b.processNode(root.Args[0], flagsEnum.None, props) + if err != nil { + return nil, err + } + qyOutput = &functionQuery{Func: countFunc(argQuery)} + case "sum": + if len(root.Args) == 0 { + return nil, fmt.Errorf("xpath: sum(node-sets) function must with have parameters node-sets") + } + argQuery, err := b.processNode(root.Args[0], flagsEnum.None, props) + if err != nil { + return nil, err + } + qyOutput = &functionQuery{Func: sumFunc(argQuery)} + case "ceiling", "floor", "round": + if len(root.Args) == 0 { + return nil, fmt.Errorf("xpath: ceiling(node-sets) function must with have parameters node-sets") + } + argQuery, err := b.processNode(root.Args[0], flagsEnum.None, props) + if err != nil { + return nil, err + } + switch root.FuncName { + case "ceiling": + qyOutput = &functionQuery{Func: ceilingFunc(argQuery)} + case "floor": + qyOutput = &functionQuery{Func: floorFunc(argQuery)} + case "round": + qyOutput = &functionQuery{Func: roundFunc(argQuery)} + } + case "concat": + if len(root.Args) < 2 { + return nil, fmt.Errorf("xpath: concat() must have at least two arguments") + } + var args []query + for _, v := range root.Args { + q, err := b.processNode(v, flagsEnum.None, props) + if err != nil { + return nil, err + } + args = append(args, q) + } + qyOutput = &functionQuery{Func: concatFunc(args...)} + case "reverse": + if len(root.Args) == 0 { + return nil, fmt.Errorf("xpath: reverse(node-sets) function must with have parameters node-sets") + } + argQuery, err := b.processNode(root.Args[0], flagsEnum.None, props) + if err != nil { + return nil, err + } + qyOutput = &transformFunctionQuery{Input: argQuery, Func: reverseFunc} + case "string-join": + if len(root.Args) != 2 { + return nil, fmt.Errorf("xpath: string-join(node-sets, separator) function requires node-set and argument") + } + input, err := b.processNode(root.Args[0], flagsEnum.None, props) + if err != nil { + return nil, err + } + arg1, err := b.processNode(root.Args[1], flagsEnum.None, props) + if err != nil { + return nil, err + } + qyOutput = &functionQuery{Func: stringJoinFunc(input, arg1)} + default: + return nil, fmt.Errorf("not yet support this function %s()", root.FuncName) + } + return qyOutput, nil +} + +func (b *builder) processOperator(root *operatorNode, props *builderProp) (query, error) { + var ( + leftProp builderProp + rightProp builderProp + ) + + left, err := b.processNode(root.Left, flagsEnum.None, &leftProp) + if err != nil { + return nil, err + } + right, err := b.processNode(root.Right, flagsEnum.None, &rightProp) + if err != nil { + return nil, err + } + *props = leftProp | rightProp + + var qyOutput query + switch root.Op { + case "+", "-", "*", "div", "mod": // Numeric operator + var exprFunc func(iterator, interface{}, interface{}) interface{} + switch root.Op { + case "+": + exprFunc = plusFunc + case "-": + exprFunc = minusFunc + case "*": + exprFunc = mulFunc + case "div": + exprFunc = divFunc + case "mod": + exprFunc = modFunc + } + qyOutput = &numericQuery{Left: left, Right: right, Do: exprFunc} + case "=", ">", ">=", "<", "<=", "!=": + var exprFunc func(iterator, interface{}, interface{}) interface{} + switch root.Op { + case "=": + exprFunc = eqFunc + case ">": + exprFunc = gtFunc + case ">=": + exprFunc = geFunc + case "<": + exprFunc = ltFunc + case "<=": + exprFunc = leFunc + case "!=": + exprFunc = neFunc + } + qyOutput = &logicalQuery{Left: left, Right: right, Do: exprFunc} + case "or", "and": + isOr := false + if root.Op == "or" { + isOr = true + } + qyOutput = &booleanQuery{Left: left, Right: right, IsOr: isOr} + case "|": + *props |= builderProps.NonFlat + qyOutput = &unionQuery{Left: left, Right: right} + } + return qyOutput, nil +} + +func (b *builder) processNode(root node, flags flag, props *builderProp) (q query, err error) { + if b.parseDepth = b.parseDepth + 1; b.parseDepth > 1024 { + err = errors.New("the xpath expressions is too complex") + return + } + *props = builderProps.None + switch root.Type() { + case nodeConstantOperand: + n := root.(*operandNode) + q = &constantQuery{Val: n.Val} + case nodeRoot: + q = &absoluteQuery{} + case nodeAxis: + q, err = b.processAxis(root.(*axisNode), flags, props) + b.firstInput = q + case nodeFilter: + q, err = b.processFilter(root.(*filterNode), flags, props) + b.firstInput = q + case nodeFunction: + q, err = b.processFunction(root.(*functionNode), props) + case nodeOperator: + q, err = b.processOperator(root.(*operatorNode), props) + case nodeGroup: + q, err = b.processNode(root.(*groupNode).Input, flagsEnum.None, props) + if err != nil { + return + } + q = &groupQuery{Input: q} + if b.firstInput == nil { + b.firstInput = q + } + } + b.parseDepth-- + return +} + +// build builds a specified XPath expressions expr. +func build(expr string, namespaces map[string]string) (q query, err error) { + defer func() { + if e := recover(); e != nil { + switch x := e.(type) { + case string: + err = errors.New(x) + case error: + err = x + default: + err = errors.New("unknown panic") + } + } + }() + root := parse(expr, namespaces) + b := &builder{} + props := builderProps.None + return b.processNode(root, flagsEnum.None, &props) +} diff --git a/vendor/github.com/antchfx/xpath/cache.go b/vendor/github.com/antchfx/xpath/cache.go new file mode 100644 index 00000000000..31a2b335656 --- /dev/null +++ b/vendor/github.com/antchfx/xpath/cache.go @@ -0,0 +1,80 @@ +package xpath + +import ( + "regexp" + "sync" +) + +type loadFunc func(key interface{}) (interface{}, error) + +const ( + defaultCap = 65536 +) + +// The reason we're building a simple capacity-resetting loading cache (when capacity reached) instead of using +// something like github.com/hashicorp/golang-lru is primarily due to (not wanting to create) external dependency. +// Currently this library has 0 external dep (other than go sdk), and supports go 1.6, 1.9, and 1.10 (and later). +// Creating external lib dependencies (plus their transitive dependencies) would make things hard if not impossible. +// We expect under most circumstances, the defaultCap is big enough for any long running services that use this +// library if their xpath regexp cardinality is low. However, in extreme cases when the capacity is reached, we +// simply reset the cache, taking a small subsequent perf hit (next to nothing considering amortization) in trade +// of more complex and less performant LRU type of construct. +type loadingCache struct { + sync.RWMutex + cap int + load loadFunc + m map[interface{}]interface{} + reset int +} + +// NewLoadingCache creates a new instance of a loading cache with capacity. Capacity must be >= 0, or +// it will panic. Capacity == 0 means the cache growth is unbounded. +func NewLoadingCache(load loadFunc, capacity int) *loadingCache { + if capacity < 0 { + panic("capacity must be >= 0") + } + return &loadingCache{cap: capacity, load: load, m: make(map[interface{}]interface{})} +} + +func (c *loadingCache) get(key interface{}) (interface{}, error) { + c.RLock() + v, found := c.m[key] + c.RUnlock() + if found { + return v, nil + } + v, err := c.load(key) + if err != nil { + return nil, err + } + c.Lock() + if c.cap > 0 && len(c.m) >= c.cap { + c.m = map[interface{}]interface{}{key: v} + c.reset++ + } else { + c.m[key] = v + } + c.Unlock() + return v, nil +} + +var ( + // RegexpCache is a loading cache for string -> *regexp.Regexp mapping. It is exported so that in rare cases + // client can customize load func and/or capacity. + RegexpCache = defaultRegexpCache() +) + +func defaultRegexpCache() *loadingCache { + return NewLoadingCache( + func(key interface{}) (interface{}, error) { + return regexp.Compile(key.(string)) + }, defaultCap) +} + +func getRegexp(pattern string) (*regexp.Regexp, error) { + exp, err := RegexpCache.get(pattern) + if err != nil { + return nil, err + } + return exp.(*regexp.Regexp), nil +} diff --git a/vendor/github.com/antchfx/xpath/func.go b/vendor/github.com/antchfx/xpath/func.go new file mode 100644 index 00000000000..4079a194f58 --- /dev/null +++ b/vendor/github.com/antchfx/xpath/func.go @@ -0,0 +1,679 @@ +package xpath + +import ( + "errors" + "fmt" + "math" + "strconv" + "strings" + "sync" + "unicode" +) + +// Defined an interface of stringBuilder that compatible with +// strings.Builder(go 1.10) and bytes.Buffer(< go 1.10) +type stringBuilder interface { + WriteRune(r rune) (n int, err error) + WriteString(s string) (int, error) + Reset() + Grow(n int) + String() string +} + +var builderPool = sync.Pool{New: func() interface{} { + return newStringBuilder() +}} + +// The XPath function list. + +func predicate(q query) func(NodeNavigator) bool { + type Predicater interface { + Test(NodeNavigator) bool + } + if p, ok := q.(Predicater); ok { + return p.Test + } + return func(NodeNavigator) bool { return true } +} + +// positionFunc is a XPath Node Set functions position(). +func positionFunc() func(query, iterator) interface{} { + return func(q query, t iterator) interface{} { + var ( + count = 1 + node = t.Current().Copy() + ) + test := predicate(q) + for node.MoveToPrevious() { + if test(node) { + count++ + } + } + return float64(count) + } +} + +// lastFunc is a XPath Node Set functions last(). +func lastFunc() func(query, iterator) interface{} { + return func(q query, t iterator) interface{} { + var ( + count = 0 + node = t.Current().Copy() + ) + test := predicate(q) + node.MoveToFirst() + for { + if test(node) { + count++ + } + if !node.MoveToNext() { + break + } + } + return float64(count) + } +} + +// countFunc is a XPath Node Set functions count(node-set). +func countFunc(arg query) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + var count = 0 + q := functionArgs(arg) + test := predicate(q) + switch typ := q.Evaluate(t).(type) { + case query: + for node := typ.Select(t); node != nil; node = typ.Select(t) { + if test(node) { + count++ + } + } + } + return float64(count) + } +} + +// sumFunc is a XPath Node Set functions sum(node-set). +func sumFunc(arg query) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + var sum float64 + switch typ := functionArgs(arg).Evaluate(t).(type) { + case query: + for node := typ.Select(t); node != nil; node = typ.Select(t) { + if v, err := strconv.ParseFloat(node.Value(), 64); err == nil { + sum += v + } + } + case float64: + sum = typ + case string: + v, err := strconv.ParseFloat(typ, 64) + if err != nil { + panic(errors.New("sum() function argument type must be a node-set or number")) + } + sum = v + } + return sum + } +} + +func asNumber(t iterator, o interface{}) float64 { + switch typ := o.(type) { + case query: + node := typ.Select(t) + if node == nil { + return math.NaN() + } + if v, err := strconv.ParseFloat(node.Value(), 64); err == nil { + return v + } + case float64: + return typ + case string: + v, err := strconv.ParseFloat(typ, 64) + if err == nil { + return v + } + } + return math.NaN() +} + +// ceilingFunc is a XPath Node Set functions ceiling(node-set). +func ceilingFunc(arg query) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + val := asNumber(t, functionArgs(arg).Evaluate(t)) + // if math.IsNaN(val) { + // panic(errors.New("ceiling() function argument type must be a valid number")) + // } + return math.Ceil(val) + } +} + +// floorFunc is a XPath Node Set functions floor(node-set). +func floorFunc(arg query) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + val := asNumber(t, functionArgs(arg).Evaluate(t)) + return math.Floor(val) + } +} + +// roundFunc is a XPath Node Set functions round(node-set). +func roundFunc(arg query) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + val := asNumber(t, functionArgs(arg).Evaluate(t)) + //return math.Round(val) + return round(val) + } +} + +// nameFunc is a XPath functions name([node-set]). +func nameFunc(arg query) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + var v NodeNavigator + if arg == nil { + v = t.Current() + } else { + v = arg.Clone().Select(t) + if v == nil { + return "" + } + } + ns := v.Prefix() + if ns == "" { + return v.LocalName() + } + return ns + ":" + v.LocalName() + } +} + +// localNameFunc is a XPath functions local-name([node-set]). +func localNameFunc(arg query) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + var v NodeNavigator + if arg == nil { + v = t.Current() + } else { + v = arg.Clone().Select(t) + if v == nil { + return "" + } + } + return v.LocalName() + } +} + +// namespaceFunc is a XPath functions namespace-uri([node-set]). +func namespaceFunc(arg query) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + var v NodeNavigator + if arg == nil { + v = t.Current() + } else { + // Get the first node in the node-set if specified. + v = arg.Clone().Select(t) + if v == nil { + return "" + } + } + // fix about namespace-uri() bug: https://github.com/antchfx/xmlquery/issues/22 + // TODO: In the next version, add NamespaceURL() to the NodeNavigator interface. + type namespaceURL interface { + NamespaceURL() string + } + if f, ok := v.(namespaceURL); ok { + return f.NamespaceURL() + } + return v.Prefix() + } +} + +func asBool(t iterator, v interface{}) bool { + switch v := v.(type) { + case nil: + return false + case *NodeIterator: + return v.MoveNext() + case bool: + return v + case float64: + return v != 0 + case string: + return v != "" + case query: + return v.Select(t) != nil + default: + panic(fmt.Errorf("unexpected type: %T", v)) + } +} + +func asString(t iterator, v interface{}) string { + switch v := v.(type) { + case nil: + return "" + case bool: + if v { + return "true" + } + return "false" + case float64: + return strconv.FormatFloat(v, 'g', -1, 64) + case string: + return v + case query: + node := v.Select(t) + if node == nil { + return "" + } + return node.Value() + default: + panic(fmt.Errorf("unexpected type: %T", v)) + } +} + +// booleanFunc is a XPath functions boolean([node-set]). +func booleanFunc(arg1 query) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + v := functionArgs(arg1).Evaluate(t) + return asBool(t, v) + } +} + +// numberFunc is a XPath functions number([node-set]). +func numberFunc(arg1 query) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + v := functionArgs(arg1).Evaluate(t) + return asNumber(t, v) + } +} + +// stringFunc is a XPath functions string([node-set]). +func stringFunc(arg1 query) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + v := functionArgs(arg1).Evaluate(t) + return asString(t, v) + } +} + +// startwithFunc is a XPath functions starts-with(string, string). +func startwithFunc(arg1, arg2 query) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + var ( + m, n string + ok bool + ) + switch typ := functionArgs(arg1).Evaluate(t).(type) { + case string: + m = typ + case query: + node := typ.Select(t) + if node == nil { + return false + } + m = node.Value() + default: + panic(errors.New("starts-with() function argument type must be string")) + } + n, ok = functionArgs(arg2).Evaluate(t).(string) + if !ok { + panic(errors.New("starts-with() function argument type must be string")) + } + return strings.HasPrefix(m, n) + } +} + +// endwithFunc is a XPath functions ends-with(string, string). +func endwithFunc(arg1, arg2 query) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + var ( + m, n string + ok bool + ) + switch typ := functionArgs(arg1).Evaluate(t).(type) { + case string: + m = typ + case query: + node := typ.Select(t) + if node == nil { + return false + } + m = node.Value() + default: + panic(errors.New("ends-with() function argument type must be string")) + } + n, ok = functionArgs(arg2).Evaluate(t).(string) + if !ok { + panic(errors.New("ends-with() function argument type must be string")) + } + return strings.HasSuffix(m, n) + } +} + +// containsFunc is a XPath functions contains(string or @attr, string). +func containsFunc(arg1, arg2 query) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + var ( + m, n string + ok bool + ) + switch typ := functionArgs(arg1).Evaluate(t).(type) { + case string: + m = typ + case query: + node := typ.Select(t) + if node == nil { + return false + } + m = node.Value() + default: + panic(errors.New("contains() function argument type must be string")) + } + + n, ok = functionArgs(arg2).Evaluate(t).(string) + if !ok { + panic(errors.New("contains() function argument type must be string")) + } + + return strings.Contains(m, n) + } +} + +// matchesFunc is an XPath function that tests a given string against a regexp pattern. +// Note: does not support https://www.w3.org/TR/xpath-functions-31/#func-matches 3rd optional `flags` argument; if +// needed, directly put flags in the regexp pattern, such as `(?i)^pattern$` for `i` flag. +func matchesFunc(arg1, arg2 query) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + var s string + switch typ := functionArgs(arg1).Evaluate(t).(type) { + case string: + s = typ + case query: + node := typ.Select(t) + if node == nil { + return "" + } + s = node.Value() + } + var pattern string + var ok bool + if pattern, ok = functionArgs(arg2).Evaluate(t).(string); !ok { + panic(errors.New("matches() function second argument type must be string")) + } + re, err := getRegexp(pattern) + if err != nil { + panic(fmt.Errorf("matches() function second argument is not a valid regexp pattern, err: %s", err.Error())) + } + return re.MatchString(s) + } +} + +// normalizespaceFunc is XPath functions normalize-space(string?) +func normalizespaceFunc(arg1 query) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + var m string + switch typ := functionArgs(arg1).Evaluate(t).(type) { + case string: + m = typ + case query: + node := typ.Select(t) + if node == nil { + return "" + } + m = node.Value() + } + var b = builderPool.Get().(stringBuilder) + b.Grow(len(m)) + + runeStr := []rune(strings.TrimSpace(m)) + l := len(runeStr) + for i := range runeStr { + r := runeStr[i] + isSpace := unicode.IsSpace(r) + if !(isSpace && (i+1 < l && unicode.IsSpace(runeStr[i+1]))) { + if isSpace { + r = ' ' + } + b.WriteRune(r) + } + } + result := b.String() + b.Reset() + builderPool.Put(b) + + return result + } +} + +// substringFunc is XPath functions substring function returns a part of a given string. +func substringFunc(arg1, arg2, arg3 query) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + var m string + switch typ := functionArgs(arg1).Evaluate(t).(type) { + case string: + m = typ + case query: + node := typ.Select(t) + if node == nil { + return "" + } + m = node.Value() + } + + var start, length float64 + var ok bool + + if start, ok = functionArgs(arg2).Evaluate(t).(float64); !ok { + panic(errors.New("substring() function first argument type must be int")) + } else if start < 1 { + panic(errors.New("substring() function first argument type must be >= 1")) + } + start-- + if arg3 != nil { + if length, ok = functionArgs(arg3).Evaluate(t).(float64); !ok { + panic(errors.New("substring() function second argument type must be int")) + } + } + if (len(m) - int(start)) < int(length) { + panic(errors.New("substring() function start and length argument out of range")) + } + if length > 0 { + return m[int(start):int(length+start)] + } + return m[int(start):] + } +} + +// substringIndFunc is XPath functions substring-before/substring-after function returns a part of a given string. +func substringIndFunc(arg1, arg2 query, after bool) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + var str string + switch v := functionArgs(arg1).Evaluate(t).(type) { + case string: + str = v + case query: + node := v.Select(t) + if node == nil { + return "" + } + str = node.Value() + } + var word string + switch v := functionArgs(arg2).Evaluate(t).(type) { + case string: + word = v + case query: + node := v.Select(t) + if node == nil { + return "" + } + word = node.Value() + } + if word == "" { + return "" + } + + i := strings.Index(str, word) + if i < 0 { + return "" + } + if after { + return str[i+len(word):] + } + return str[:i] + } +} + +// stringLengthFunc is XPATH string-length( [string] ) function that returns a number +// equal to the number of characters in a given string. +func stringLengthFunc(arg1 query) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + switch v := functionArgs(arg1).Evaluate(t).(type) { + case string: + return float64(len(v)) + case query: + node := v.Select(t) + if node == nil { + break + } + return float64(len(node.Value())) + } + return float64(0) + } +} + +// translateFunc is XPath functions translate() function returns a replaced string. +func translateFunc(arg1, arg2, arg3 query) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + str := asString(t, functionArgs(arg1).Evaluate(t)) + src := asString(t, functionArgs(arg2).Evaluate(t)) + dst := asString(t, functionArgs(arg3).Evaluate(t)) + + replace := make([]string, 0, len(src)) + for i, s := range src { + d := "" + if i < len(dst) { + d = string(dst[i]) + } + replace = append(replace, string(s), d) + } + return strings.NewReplacer(replace...).Replace(str) + } +} + +// replaceFunc is XPath functions replace() function returns a replaced string. +func replaceFunc(arg1, arg2, arg3 query) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + str := asString(t, functionArgs(arg1).Evaluate(t)) + src := asString(t, functionArgs(arg2).Evaluate(t)) + dst := asString(t, functionArgs(arg3).Evaluate(t)) + + return strings.Replace(str, src, dst, -1) + } +} + +// notFunc is XPATH functions not(expression) function operation. +func notFunc(arg1 query) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + switch v := functionArgs(arg1).Evaluate(t).(type) { + case bool: + return !v + case query: + node := v.Select(t) + return node == nil + default: + return false + } + } +} + +// concatFunc is the concat function concatenates two or more +// strings and returns the resulting string. +// concat( string1 , string2 [, stringn]* ) +func concatFunc(args ...query) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + b := builderPool.Get().(stringBuilder) + for _, v := range args { + v = functionArgs(v) + + switch v := v.Evaluate(t).(type) { + case string: + b.WriteString(v) + case query: + node := v.Select(t) + if node != nil { + b.WriteString(node.Value()) + } + } + } + result := b.String() + b.Reset() + builderPool.Put(b) + + return result + } +} + +// https://github.com/antchfx/xpath/issues/43 +func functionArgs(q query) query { + if _, ok := q.(*functionQuery); ok { + return q + } + return q.Clone() +} + +func reverseFunc(q query, t iterator) func() NodeNavigator { + var list []NodeNavigator + for { + node := q.Select(t) + if node == nil { + break + } + list = append(list, node.Copy()) + } + i := len(list) + return func() NodeNavigator { + if i <= 0 { + return nil + } + i-- + node := list[i] + return node + } +} + +// string-join is a XPath Node Set functions string-join(node-set, separator). +func stringJoinFunc(q, arg1 query) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + var separator string + switch v := functionArgs(arg1).Evaluate(t).(type) { + case string: + separator = v + case query: + node := v.Select(t) + if node != nil { + separator = node.Value() + } + } + + q = functionArgs(q) + test := predicate(q) + var parts []string + switch v := q.Evaluate(t).(type) { + case string: + return v + case query: + for node := v.Select(t); node != nil; node = v.Select(t) { + if test(node) { + parts = append(parts, node.Value()) + } + } + } + return strings.Join(parts, separator) + } +} + +// lower-case is XPATH function that converts a string to lower case. +func lowerCaseFunc(arg1 query) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + v := functionArgs(arg1).Evaluate(t) + return strings.ToLower(asString(t, v)) + } +} diff --git a/vendor/github.com/antchfx/xpath/func_go110.go b/vendor/github.com/antchfx/xpath/func_go110.go new file mode 100644 index 00000000000..d6ca4513931 --- /dev/null +++ b/vendor/github.com/antchfx/xpath/func_go110.go @@ -0,0 +1,16 @@ +// +build go1.10 + +package xpath + +import ( + "math" + "strings" +) + +func round(f float64) int { + return int(math.Round(f)) +} + +func newStringBuilder() stringBuilder { + return &strings.Builder{} +} diff --git a/vendor/github.com/antchfx/xpath/func_pre_go110.go b/vendor/github.com/antchfx/xpath/func_pre_go110.go new file mode 100644 index 00000000000..335141f79f5 --- /dev/null +++ b/vendor/github.com/antchfx/xpath/func_pre_go110.go @@ -0,0 +1,22 @@ +// +build !go1.10 + +package xpath + +import ( + "bytes" + "math" +) + +// math.Round() is supported by Go 1.10+, +// This method just compatible for version <1.10. +// https://github.com/golang/go/issues/20100 +func round(f float64) int { + if math.Abs(f) < 0.5 { + return 0 + } + return int(f + math.Copysign(0.5, f)) +} + +func newStringBuilder() stringBuilder { + return &bytes.Buffer{} +} diff --git a/vendor/github.com/antchfx/xpath/operator.go b/vendor/github.com/antchfx/xpath/operator.go new file mode 100644 index 00000000000..2820152b369 --- /dev/null +++ b/vendor/github.com/antchfx/xpath/operator.go @@ -0,0 +1,288 @@ +package xpath + +import ( + "strconv" +) + +// The XPath number operator function list. + +type logical func(iterator, string, interface{}, interface{}) bool + +var logicalFuncs = [][]logical{ + {cmpBooleanBoolean, nil, nil, nil}, + {nil, cmpNumericNumeric, cmpNumericString, cmpNumericNodeSet}, + {nil, cmpStringNumeric, cmpStringString, cmpStringNodeSet}, + {nil, cmpNodeSetNumeric, cmpNodeSetString, cmpNodeSetNodeSet}, +} + +// number vs number +func cmpNumberNumberF(op string, a, b float64) bool { + switch op { + case "=": + return a == b + case ">": + return a > b + case "<": + return a < b + case ">=": + return a >= b + case "<=": + return a <= b + case "!=": + return a != b + } + return false +} + +// string vs string +func cmpStringStringF(op string, a, b string) bool { + switch op { + case "=": + return a == b + case ">": + return a > b + case "<": + return a < b + case ">=": + return a >= b + case "<=": + return a <= b + case "!=": + return a != b + } + return false +} + +func cmpBooleanBooleanF(op string, a, b bool) bool { + switch op { + case "or": + return a || b + case "and": + return a && b + } + return false +} + +func cmpNumericNumeric(t iterator, op string, m, n interface{}) bool { + a := m.(float64) + b := n.(float64) + return cmpNumberNumberF(op, a, b) +} + +func cmpNumericString(t iterator, op string, m, n interface{}) bool { + a := m.(float64) + b := n.(string) + num, err := strconv.ParseFloat(b, 64) + if err != nil { + panic(err) + } + return cmpNumberNumberF(op, a, num) +} + +func cmpNumericNodeSet(t iterator, op string, m, n interface{}) bool { + a := m.(float64) + b := n.(query) + + for { + node := b.Select(t) + if node == nil { + break + } + num, err := strconv.ParseFloat(node.Value(), 64) + if err != nil { + panic(err) + } + if cmpNumberNumberF(op, a, num) { + return true + } + } + return false +} + +func cmpNodeSetNumeric(t iterator, op string, m, n interface{}) bool { + a := m.(query) + b := n.(float64) + for { + node := a.Select(t) + if node == nil { + break + } + num, err := strconv.ParseFloat(node.Value(), 64) + if err != nil { + panic(err) + } + if cmpNumberNumberF(op, num, b) { + return true + } + } + return false +} + +func cmpNodeSetString(t iterator, op string, m, n interface{}) bool { + a := m.(query) + b := n.(string) + for { + node := a.Select(t) + if node == nil { + break + } + if cmpStringStringF(op, b, node.Value()) { + return true + } + } + return false +} + +func cmpNodeSetNodeSet(t iterator, op string, m, n interface{}) bool { + a := m.(query) + b := n.(query) + for { + x := a.Select(t) + if x == nil { + return false + } + + y := b.Select(t) + if y == nil { + return false + } + + for { + if cmpStringStringF(op, x.Value(), y.Value()) { + return true + } + if y = b.Select(t); y == nil { + break + } + } + // reset + b.Evaluate(t) + } +} + +func cmpStringNumeric(t iterator, op string, m, n interface{}) bool { + a := m.(string) + b := n.(float64) + num, err := strconv.ParseFloat(a, 64) + if err != nil { + panic(err) + } + return cmpNumberNumberF(op, b, num) +} + +func cmpStringString(t iterator, op string, m, n interface{}) bool { + a := m.(string) + b := n.(string) + return cmpStringStringF(op, a, b) +} + +func cmpStringNodeSet(t iterator, op string, m, n interface{}) bool { + a := m.(string) + b := n.(query) + for { + node := b.Select(t) + if node == nil { + break + } + if cmpStringStringF(op, a, node.Value()) { + return true + } + } + return false +} + +func cmpBooleanBoolean(t iterator, op string, m, n interface{}) bool { + a := m.(bool) + b := n.(bool) + return cmpBooleanBooleanF(op, a, b) +} + +// eqFunc is an `=` operator. +func eqFunc(t iterator, m, n interface{}) interface{} { + t1 := getXPathType(m) + t2 := getXPathType(n) + return logicalFuncs[t1][t2](t, "=", m, n) +} + +// gtFunc is an `>` operator. +func gtFunc(t iterator, m, n interface{}) interface{} { + t1 := getXPathType(m) + t2 := getXPathType(n) + return logicalFuncs[t1][t2](t, ">", m, n) +} + +// geFunc is an `>=` operator. +func geFunc(t iterator, m, n interface{}) interface{} { + t1 := getXPathType(m) + t2 := getXPathType(n) + return logicalFuncs[t1][t2](t, ">=", m, n) +} + +// ltFunc is an `<` operator. +func ltFunc(t iterator, m, n interface{}) interface{} { + t1 := getXPathType(m) + t2 := getXPathType(n) + return logicalFuncs[t1][t2](t, "<", m, n) +} + +// leFunc is an `<=` operator. +func leFunc(t iterator, m, n interface{}) interface{} { + t1 := getXPathType(m) + t2 := getXPathType(n) + return logicalFuncs[t1][t2](t, "<=", m, n) +} + +// neFunc is an `!=` operator. +func neFunc(t iterator, m, n interface{}) interface{} { + t1 := getXPathType(m) + t2 := getXPathType(n) + return logicalFuncs[t1][t2](t, "!=", m, n) +} + +// orFunc is an `or` operator. +var orFunc = func(t iterator, m, n interface{}) interface{} { + t1 := getXPathType(m) + t2 := getXPathType(n) + return logicalFuncs[t1][t2](t, "or", m, n) +} + +func numericExpr(t iterator, m, n interface{}, cb func(float64, float64) float64) float64 { + a := asNumber(t, m) + b := asNumber(t, n) + return cb(a, b) +} + +// plusFunc is an `+` operator. +var plusFunc = func(t iterator, m, n interface{}) interface{} { + return numericExpr(t, m, n, func(a, b float64) float64 { + return a + b + }) +} + +// minusFunc is an `-` operator. +var minusFunc = func(t iterator, m, n interface{}) interface{} { + return numericExpr(t, m, n, func(a, b float64) float64 { + return a - b + }) +} + +// mulFunc is an `*` operator. +var mulFunc = func(t iterator, m, n interface{}) interface{} { + return numericExpr(t, m, n, func(a, b float64) float64 { + return a * b + }) +} + +// divFunc is an `DIV` operator. +var divFunc = func(t iterator, m, n interface{}) interface{} { + return numericExpr(t, m, n, func(a, b float64) float64 { + return a / b + }) +} + +// modFunc is an 'MOD' operator. +var modFunc = func(t iterator, m, n interface{}) interface{} { + return numericExpr(t, m, n, func(a, b float64) float64 { + return float64(int(a) % int(b)) + }) +} diff --git a/vendor/github.com/antchfx/xpath/parse.go b/vendor/github.com/antchfx/xpath/parse.go new file mode 100644 index 00000000000..53931259115 --- /dev/null +++ b/vendor/github.com/antchfx/xpath/parse.go @@ -0,0 +1,1254 @@ +package xpath + +import ( + "bytes" + "errors" + "fmt" + "strconv" + "unicode" + "unicode/utf8" +) + +// A XPath expression token type. +type itemType int + +const ( + itemComma itemType = iota // ',' + itemSlash // '/' + itemAt // '@' + itemDot // '.' + itemLParens // '(' + itemRParens // ')' + itemLBracket // '[' + itemRBracket // ']' + itemStar // '*' + itemPlus // '+' + itemMinus // '-' + itemEq // '=' + itemLt // '<' + itemGt // '>' + itemBang // '!' + itemDollar // '$' + itemApos // '\'' + itemQuote // '"' + itemUnion // '|' + itemNe // '!=' + itemLe // '<=' + itemGe // '>=' + itemAnd // '&&' + itemOr // '||' + itemDotDot // '..' + itemSlashSlash // '//' + itemName // XML Name + itemString // Quoted string constant + itemNumber // Number constant + itemAxe // Axe (like child::) + itemEOF // END +) + +// A node is an XPath node in the parse tree. +type node interface { + Type() nodeType +} + +// nodeType identifies the type of a parse tree node. +type nodeType int + +func (t nodeType) Type() nodeType { + return t +} + +const ( + nodeRoot nodeType = iota + nodeAxis + nodeFilter + nodeFunction + nodeOperator + nodeVariable + nodeConstantOperand + nodeGroup +) + +type parser struct { + r *scanner + d int + namespaces map[string]string +} + +// newOperatorNode returns new operator node OperatorNode. +func newOperatorNode(op string, left, right node) node { + return &operatorNode{nodeType: nodeOperator, Op: op, Left: left, Right: right} +} + +// newOperand returns new constant operand node OperandNode. +func newOperandNode(v interface{}) node { + return &operandNode{nodeType: nodeConstantOperand, Val: v} +} + +// newAxisNode returns new axis node AxisNode. +func newAxisNode(axisType string, typeTest NodeType, localName, prefix, prop string, n node, opts ...func(p *axisNode)) node { + a := axisNode{ + nodeType: nodeAxis, + typeTest: typeTest, + LocalName: localName, + Prefix: prefix, + AxisType: axisType, + Prop: prop, + Input: n, + } + for _, o := range opts { + o(&a) + } + return &a +} + +// newVariableNode returns new variable node VariableNode. +func newVariableNode(prefix, name string) node { + return &variableNode{nodeType: nodeVariable, Name: name, Prefix: prefix} +} + +// newFilterNode returns a new filter node FilterNode. +func newFilterNode(n, m node) node { + return &filterNode{nodeType: nodeFilter, Input: n, Condition: m} +} + +func newGroupNode(n node) node { + return &groupNode{nodeType: nodeGroup, Input: n} +} + +// newRootNode returns a root node. +func newRootNode(s string) node { + return &rootNode{nodeType: nodeRoot, slash: s} +} + +// newFunctionNode returns function call node. +func newFunctionNode(name, prefix string, args []node) node { + return &functionNode{nodeType: nodeFunction, Prefix: prefix, FuncName: name, Args: args} +} + +// testOp reports whether current item name is an operand op. +func testOp(r *scanner, op string) bool { + return r.typ == itemName && r.prefix == "" && r.name == op +} + +func isPrimaryExpr(r *scanner) bool { + switch r.typ { + case itemString, itemNumber, itemDollar, itemLParens: + return true + case itemName: + return r.canBeFunc && !isNodeType(r) + } + return false +} + +func isNodeType(r *scanner) bool { + switch r.name { + case "node", "text", "processing-instruction", "comment": + return r.prefix == "" + } + return false +} + +func isStep(item itemType) bool { + switch item { + case itemDot, itemDotDot, itemAt, itemAxe, itemStar, itemName: + return true + } + return false +} + +func checkItem(r *scanner, typ itemType) { + if r.typ != typ { + panic(fmt.Sprintf("%s has an invalid token", r.text)) + } +} + +// parseExpression parsing the expression with input node n. +func (p *parser) parseExpression(n node) node { + if p.d = p.d + 1; p.d > 200 { + panic("the xpath query is too complex(depth > 200)") + } + n = p.parseOrExpr(n) + p.d-- + return n +} + +// next scanning next item on forward. +func (p *parser) next() bool { + return p.r.nextItem() +} + +func (p *parser) skipItem(typ itemType) { + checkItem(p.r, typ) + p.next() +} + +// OrExpr ::= AndExpr | OrExpr 'or' AndExpr +func (p *parser) parseOrExpr(n node) node { + opnd := p.parseAndExpr(n) + for { + if !testOp(p.r, "or") { + break + } + p.next() + opnd = newOperatorNode("or", opnd, p.parseAndExpr(n)) + } + return opnd +} + +// AndExpr ::= EqualityExpr | AndExpr 'and' EqualityExpr +func (p *parser) parseAndExpr(n node) node { + opnd := p.parseEqualityExpr(n) + for { + if !testOp(p.r, "and") { + break + } + p.next() + opnd = newOperatorNode("and", opnd, p.parseEqualityExpr(n)) + } + return opnd +} + +// EqualityExpr ::= RelationalExpr | EqualityExpr '=' RelationalExpr | EqualityExpr '!=' RelationalExpr +func (p *parser) parseEqualityExpr(n node) node { + opnd := p.parseRelationalExpr(n) +Loop: + for { + var op string + switch p.r.typ { + case itemEq: + op = "=" + case itemNe: + op = "!=" + default: + break Loop + } + p.next() + opnd = newOperatorNode(op, opnd, p.parseRelationalExpr(n)) + } + return opnd +} + +// RelationalExpr ::= AdditiveExpr | RelationalExpr '<' AdditiveExpr | RelationalExpr '>' AdditiveExpr +// +// | RelationalExpr '<=' AdditiveExpr +// | RelationalExpr '>=' AdditiveExpr +func (p *parser) parseRelationalExpr(n node) node { + opnd := p.parseAdditiveExpr(n) +Loop: + for { + var op string + switch p.r.typ { + case itemLt: + op = "<" + case itemGt: + op = ">" + case itemLe: + op = "<=" + case itemGe: + op = ">=" + default: + break Loop + } + p.next() + opnd = newOperatorNode(op, opnd, p.parseAdditiveExpr(n)) + } + return opnd +} + +// AdditiveExpr ::= MultiplicativeExpr | AdditiveExpr '+' MultiplicativeExpr | AdditiveExpr '-' MultiplicativeExpr +func (p *parser) parseAdditiveExpr(n node) node { + opnd := p.parseMultiplicativeExpr(n) +Loop: + for { + var op string + switch p.r.typ { + case itemPlus: + op = "+" + case itemMinus: + op = "-" + default: + break Loop + } + p.next() + opnd = newOperatorNode(op, opnd, p.parseMultiplicativeExpr(n)) + } + return opnd +} + +// MultiplicativeExpr ::= UnaryExpr | MultiplicativeExpr MultiplyOperator(*) UnaryExpr +// +// | MultiplicativeExpr 'div' UnaryExpr | MultiplicativeExpr 'mod' UnaryExpr +func (p *parser) parseMultiplicativeExpr(n node) node { + opnd := p.parseUnaryExpr(n) +Loop: + for { + var op string + if p.r.typ == itemStar { + op = "*" + } else if testOp(p.r, "div") || testOp(p.r, "mod") { + op = p.r.name + } else { + break Loop + } + p.next() + opnd = newOperatorNode(op, opnd, p.parseUnaryExpr(n)) + } + return opnd +} + +// UnaryExpr ::= UnionExpr | '-' UnaryExpr +func (p *parser) parseUnaryExpr(n node) node { + minus := false + // ignore '-' sequence + for p.r.typ == itemMinus { + p.next() + minus = !minus + } + opnd := p.parseUnionExpr(n) + if minus { + opnd = newOperatorNode("*", opnd, newOperandNode(float64(-1))) + } + return opnd +} + +// UnionExpr ::= PathExpr | UnionExpr '|' PathExpr +func (p *parser) parseUnionExpr(n node) node { + opnd := p.parsePathExpr(n) +Loop: + for { + if p.r.typ != itemUnion { + break Loop + } + p.next() + opnd2 := p.parsePathExpr(n) + // Checking the node type that must be is node set type? + opnd = newOperatorNode("|", opnd, opnd2) + } + return opnd +} + +// PathExpr ::= LocationPath | FilterExpr | FilterExpr '/' RelativeLocationPath | FilterExpr '//' RelativeLocationPath +func (p *parser) parsePathExpr(n node) node { + var opnd node + if isPrimaryExpr(p.r) { + opnd = p.parseFilterExpr(n) + switch p.r.typ { + case itemSlash: + p.next() + opnd = p.parseRelativeLocationPath(opnd) + case itemSlashSlash: + p.next() + opnd = p.parseRelativeLocationPath(newAxisNode("descendant-or-self", allNode, "", "", "", opnd)) + } + } else { + opnd = p.parseLocationPath(nil) + } + return opnd +} + +// FilterExpr ::= PrimaryExpr | FilterExpr Predicate +func (p *parser) parseFilterExpr(n node) node { + opnd := p.parsePrimaryExpr(n) + if p.r.typ == itemLBracket { + opnd = newFilterNode(opnd, p.parsePredicate(opnd)) + } + return opnd +} + +// Predicate ::= '[' PredicateExpr ']' +func (p *parser) parsePredicate(n node) node { + p.skipItem(itemLBracket) + opnd := p.parseExpression(n) + p.skipItem(itemRBracket) + return opnd +} + +// LocationPath ::= RelativeLocationPath | AbsoluteLocationPath +func (p *parser) parseLocationPath(n node) (opnd node) { + switch p.r.typ { + case itemSlash: + p.next() + opnd = newRootNode("/") + if isStep(p.r.typ) { + opnd = p.parseRelativeLocationPath(opnd) // ?? child:: or self ?? + } + case itemSlashSlash: + p.next() + opnd = newRootNode("//") + opnd = p.parseRelativeLocationPath(newAxisNode("descendant-or-self", allNode, "", "", "", opnd)) + default: + opnd = p.parseRelativeLocationPath(n) + } + return opnd +} + +// RelativeLocationPath ::= Step | RelativeLocationPath '/' Step | AbbreviatedRelativeLocationPath +func (p *parser) parseRelativeLocationPath(n node) node { + opnd := n +Loop: + for { + opnd = p.parseStep(opnd) + switch p.r.typ { + case itemSlashSlash: + p.next() + opnd = newAxisNode("descendant-or-self", allNode, "", "", "", opnd) + case itemSlash: + p.next() + default: + break Loop + } + } + return opnd +} + +// Step ::= AxisSpecifier NodeTest Predicate* | AbbreviatedStep +func (p *parser) parseStep(n node) (opnd node) { + if p.r.typ == itemDot || p.r.typ == itemDotDot { + if p.r.typ == itemDot { + opnd = newAxisNode("self", allNode, "", "", "", n) + } else { + opnd = newAxisNode("parent", allNode, "", "", "", n) + } + p.next() + if p.r.typ != itemLBracket { + return opnd + } + } else { + axisType := "child" // default axes value. + switch p.r.typ { + case itemAt: + axisType = "attribute" + p.next() + case itemAxe: + axisType = p.r.name + p.next() + case itemLParens: + return p.parseSequence(n) + } + matchType := ElementNode + if axisType == "attribute" { + matchType = AttributeNode + } + opnd = p.parseNodeTest(n, axisType, matchType) + } + for p.r.typ == itemLBracket { + opnd = newFilterNode(opnd, p.parsePredicate(opnd)) + } + return opnd +} + +// Expr ::= '(' Step ("," Step)* ')' +func (p *parser) parseSequence(n node) (opnd node) { + p.skipItem(itemLParens) + opnd = p.parseStep(n) + for { + if p.r.typ != itemComma { + break + } + p.next() + opnd2 := p.parseStep(n) + opnd = newOperatorNode("|", opnd, opnd2) + } + p.skipItem(itemRParens) + return opnd +} + +// NodeTest ::= NameTest | nodeType '(' ')' | 'processing-instruction' '(' Literal ')' +func (p *parser) parseNodeTest(n node, axeTyp string, matchType NodeType) (opnd node) { + switch p.r.typ { + case itemName: + if p.r.canBeFunc && isNodeType(p.r) { + var prop string + switch p.r.name { + case "comment", "text", "processing-instruction", "node": + prop = p.r.name + } + var name string + p.next() + p.skipItem(itemLParens) + if prop == "processing-instruction" && p.r.typ != itemRParens { + checkItem(p.r, itemString) + name = p.r.strval + p.next() + } + p.skipItem(itemRParens) + switch prop { + case "comment": + matchType = CommentNode + case "text": + matchType = TextNode + case "processing-instruction": + case "node": + matchType = allNode + default: + matchType = RootNode + } + + opnd = newAxisNode(axeTyp, matchType, name, "", prop, n) + } else { + prefix := p.r.prefix + name := p.r.name + p.next() + if p.r.name == "*" { + name = "" + } + opnd = newAxisNode(axeTyp, matchType, name, prefix, "", n, func(a *axisNode) { + if prefix != "" && p.namespaces != nil { + if ns, ok := p.namespaces[prefix]; ok { + a.hasNamespaceURI = true + a.namespaceURI = ns + } else { + panic(fmt.Sprintf("prefix %s not defined.", prefix)) + } + } + }) + } + case itemStar: + opnd = newAxisNode(axeTyp, matchType, "", "", "", n) + p.next() + default: + panic("expression must evaluate to a node-set") + } + return opnd +} + +// PrimaryExpr ::= VariableReference | '(' Expr ')' | Literal | Number | FunctionCall +func (p *parser) parsePrimaryExpr(n node) (opnd node) { + switch p.r.typ { + case itemString: + opnd = newOperandNode(p.r.strval) + p.next() + case itemNumber: + opnd = newOperandNode(p.r.numval) + p.next() + case itemDollar: + p.next() + checkItem(p.r, itemName) + opnd = newVariableNode(p.r.prefix, p.r.name) + p.next() + case itemLParens: + p.next() + opnd = p.parseExpression(n) + if opnd.Type() != nodeConstantOperand { + opnd = newGroupNode(opnd) + } + p.skipItem(itemRParens) + case itemName: + if p.r.canBeFunc && !isNodeType(p.r) { + opnd = p.parseMethod(nil) + } + } + return opnd +} + +// FunctionCall ::= FunctionName '(' ( Argument ( ',' Argument )* )? ')' +func (p *parser) parseMethod(n node) node { + var args []node + name := p.r.name + prefix := p.r.prefix + + p.skipItem(itemName) + p.skipItem(itemLParens) + if p.r.typ != itemRParens { + for { + args = append(args, p.parseExpression(n)) + if p.r.typ == itemRParens { + break + } + p.skipItem(itemComma) + } + } + p.skipItem(itemRParens) + return newFunctionNode(name, prefix, args) +} + +// Parse parsing the XPath express string expr and returns a tree node. +func parse(expr string, namespaces map[string]string) node { + r := &scanner{text: expr} + r.nextChar() + r.nextItem() + p := &parser{r: r, namespaces: namespaces} + return p.parseExpression(nil) +} + +// rootNode holds a top-level node of tree. +type rootNode struct { + nodeType + slash string +} + +func (r *rootNode) String() string { + return r.slash +} + +// operatorNode holds two Nodes operator. +type operatorNode struct { + nodeType + Op string + Left, Right node +} + +func (o *operatorNode) String() string { + return fmt.Sprintf("%v%s%v", o.Left, o.Op, o.Right) +} + +// axisNode holds a location step. +type axisNode struct { + nodeType + Input node + Prop string // node-test name.[comment|text|processing-instruction|node] + AxisType string // name of the axis.[attribute|ancestor|child|....] + LocalName string // local part name of node. + Prefix string // prefix name of node. + namespaceURI string // namespace URI of node + hasNamespaceURI bool // if namespace URI is set (can be "") + typeTest NodeType +} + +func (a *axisNode) String() string { + var b bytes.Buffer + if a.AxisType != "" { + b.Write([]byte(a.AxisType + "::")) + } + if a.Prefix != "" { + b.Write([]byte(a.Prefix + ":")) + } + b.Write([]byte(a.LocalName)) + if a.Prop != "" { + b.Write([]byte("/" + a.Prop + "()")) + } + return b.String() +} + +// operandNode holds a constant operand. +type operandNode struct { + nodeType + Val interface{} +} + +func (o *operandNode) String() string { + return fmt.Sprintf("%v", o.Val) +} + +// groupNode holds a set of node expression +type groupNode struct { + nodeType + Input node +} + +func (g *groupNode) String() string { + return fmt.Sprintf("%s", g.Input) +} + +// filterNode holds a condition filter. +type filterNode struct { + nodeType + Input, Condition node +} + +func (f *filterNode) String() string { + return fmt.Sprintf("%s[%s]", f.Input, f.Condition) +} + +// variableNode holds a variable. +type variableNode struct { + nodeType + Name, Prefix string +} + +func (v *variableNode) String() string { + if v.Prefix == "" { + return v.Name + } + return fmt.Sprintf("%s:%s", v.Prefix, v.Name) +} + +// functionNode holds a function call. +type functionNode struct { + nodeType + Args []node + Prefix string + FuncName string // function name +} + +func (f *functionNode) String() string { + var b bytes.Buffer + // fun(arg1, ..., argn) + b.Write([]byte(f.FuncName)) + b.Write([]byte("(")) + for i, arg := range f.Args { + if i > 0 { + b.Write([]byte(",")) + } + b.Write([]byte(fmt.Sprintf("%s", arg))) + } + b.Write([]byte(")")) + return b.String() +} + +type scanner struct { + text, name, prefix string + + pos int + curr rune + currSize int + typ itemType + strval string // text value at current pos + numval float64 // number value at current pos + canBeFunc bool +} + +func (s *scanner) nextChar() bool { + if s.pos >= len(s.text) { + s.curr = rune(0) + s.currSize = 1 + return false + } + + r, size := rune(s.text[s.pos]), 1 + if r >= 0x80 { // handle multi-byte runes + r, size = utf8.DecodeRuneInString(s.text[s.pos:]) + } + + s.curr = r + s.currSize = size + s.pos += size + return true +} + +func (s *scanner) nextItem() bool { + s.skipSpace() + switch s.curr { + case 0: + s.typ = itemEOF + return false + case ',', '@', '(', ')', '|', '*', '[', ']', '+', '-', '=', '#', '$': + s.typ = asItemType(s.curr) + s.nextChar() + case '<': + s.typ = itemLt + s.nextChar() + if s.curr == '=' { + s.typ = itemLe + s.nextChar() + } + case '>': + s.typ = itemGt + s.nextChar() + if s.curr == '=' { + s.typ = itemGe + s.nextChar() + } + case '!': + s.typ = itemBang + s.nextChar() + if s.curr == '=' { + s.typ = itemNe + s.nextChar() + } + case '.': + s.typ = itemDot + s.nextChar() + if s.curr == '.' { + s.typ = itemDotDot + s.nextChar() + } else if isDigit(s.curr) { + s.typ = itemNumber + s.numval = s.scanFraction() + } + case '/': + s.typ = itemSlash + s.nextChar() + if s.curr == '/' { + s.typ = itemSlashSlash + s.nextChar() + } + case '"', '\'': + s.typ = itemString + s.strval = s.scanString() + default: + if isDigit(s.curr) { + s.typ = itemNumber + s.numval = s.scanNumber() + } else if isName(s.curr) { + s.typ = itemName + s.name = s.scanName() + s.prefix = "" + // "foo:bar" is one itemem not three because it doesn't allow spaces in between + // We should distinct it from "foo::" and need process "foo ::" as well + if s.curr == ':' { + s.nextChar() + // can be "foo:bar" or "foo::" + if s.curr == ':' { + // "foo::" + s.nextChar() + s.typ = itemAxe + } else { // "foo:*", "foo:bar" or "foo: " + s.prefix = s.name + if s.curr == '*' { + s.nextChar() + s.name = "*" + } else if isName(s.curr) { + s.name = s.scanName() + } else { + panic(fmt.Sprintf("%s has an invalid qualified name.", s.text)) + } + } + } else { + s.skipSpace() + if s.curr == ':' { + s.nextChar() + // it can be "foo ::" or just "foo :" + if s.curr == ':' { + s.nextChar() + s.typ = itemAxe + } else { + panic(fmt.Sprintf("%s has an invalid qualified name.", s.text)) + } + } + } + s.skipSpace() + s.canBeFunc = s.curr == '(' + } else { + panic(fmt.Sprintf("%s has an invalid token.", s.text)) + } + } + return true +} + +func (s *scanner) skipSpace() { +Loop: + for { + if !unicode.IsSpace(s.curr) || !s.nextChar() { + break Loop + } + } +} + +func (s *scanner) scanFraction() float64 { + var ( + i = s.pos - 2 + c = 1 // '.' + ) + for isDigit(s.curr) { + s.nextChar() + c++ + } + v, err := strconv.ParseFloat(s.text[i:i+c], 64) + if err != nil { + panic(fmt.Errorf("xpath: scanFraction parse float got error: %v", err)) + } + return v +} + +func (s *scanner) scanNumber() float64 { + var ( + c int + i = s.pos - 1 + ) + for isDigit(s.curr) { + s.nextChar() + c++ + } + if s.curr == '.' { + s.nextChar() + c++ + for isDigit(s.curr) { + s.nextChar() + c++ + } + } + v, err := strconv.ParseFloat(s.text[i:i+c], 64) + if err != nil { + panic(fmt.Errorf("xpath: scanNumber parse float got error: %v", err)) + } + return v +} + +func (s *scanner) scanString() string { + var ( + end = s.curr + ) + s.nextChar() + i := s.pos - s.currSize + c := s.currSize + for s.curr != end { + if !s.nextChar() { + panic(errors.New("xpath: scanString got unclosed string")) + } + c += s.currSize + } + c -= 1 + s.nextChar() + return s.text[i : i+c] +} + +func (s *scanner) scanName() string { + var ( + c = s.currSize - 1 + i = s.pos - s.currSize + ) + + // Detect current rune size + + for isName(s.curr) { + if !s.nextChar() { + c += s.currSize + break + } + c += s.currSize + } + return s.text[i : i+c] +} + +func isName(r rune) bool { + return string(r) != ":" && string(r) != "/" && + (unicode.Is(first, r) || unicode.Is(second, r) || string(r) == "*") +} + +func isDigit(r rune) bool { + return unicode.IsDigit(r) +} + +func asItemType(r rune) itemType { + switch r { + case ',': + return itemComma + case '@': + return itemAt + case '(': + return itemLParens + case ')': + return itemRParens + case '|': + return itemUnion + case '*': + return itemStar + case '[': + return itemLBracket + case ']': + return itemRBracket + case '+': + return itemPlus + case '-': + return itemMinus + case '=': + return itemEq + case '$': + return itemDollar + } + panic(fmt.Errorf("unknown item: %v", r)) +} + +var first = &unicode.RangeTable{ + R16: []unicode.Range16{ + {0x003A, 0x003A, 1}, + {0x0041, 0x005A, 1}, + {0x005F, 0x005F, 1}, + {0x0061, 0x007A, 1}, + {0x00C0, 0x00D6, 1}, + {0x00D8, 0x00F6, 1}, + {0x00F8, 0x00FF, 1}, + {0x0100, 0x0131, 1}, + {0x0134, 0x013E, 1}, + {0x0141, 0x0148, 1}, + {0x014A, 0x017E, 1}, + {0x0180, 0x01C3, 1}, + {0x01CD, 0x01F0, 1}, + {0x01F4, 0x01F5, 1}, + {0x01FA, 0x0217, 1}, + {0x0250, 0x02A8, 1}, + {0x02BB, 0x02C1, 1}, + {0x0386, 0x0386, 1}, + {0x0388, 0x038A, 1}, + {0x038C, 0x038C, 1}, + {0x038E, 0x03A1, 1}, + {0x03A3, 0x03CE, 1}, + {0x03D0, 0x03D6, 1}, + {0x03DA, 0x03E0, 2}, + {0x03E2, 0x03F3, 1}, + {0x0401, 0x040C, 1}, + {0x040E, 0x044F, 1}, + {0x0451, 0x045C, 1}, + {0x045E, 0x0481, 1}, + {0x0490, 0x04C4, 1}, + {0x04C7, 0x04C8, 1}, + {0x04CB, 0x04CC, 1}, + {0x04D0, 0x04EB, 1}, + {0x04EE, 0x04F5, 1}, + {0x04F8, 0x04F9, 1}, + {0x0531, 0x0556, 1}, + {0x0559, 0x0559, 1}, + {0x0561, 0x0586, 1}, + {0x05D0, 0x05EA, 1}, + {0x05F0, 0x05F2, 1}, + {0x0621, 0x063A, 1}, + {0x0641, 0x064A, 1}, + {0x0671, 0x06B7, 1}, + {0x06BA, 0x06BE, 1}, + {0x06C0, 0x06CE, 1}, + {0x06D0, 0x06D3, 1}, + {0x06D5, 0x06D5, 1}, + {0x06E5, 0x06E6, 1}, + {0x0905, 0x0939, 1}, + {0x093D, 0x093D, 1}, + {0x0958, 0x0961, 1}, + {0x0985, 0x098C, 1}, + {0x098F, 0x0990, 1}, + {0x0993, 0x09A8, 1}, + {0x09AA, 0x09B0, 1}, + {0x09B2, 0x09B2, 1}, + {0x09B6, 0x09B9, 1}, + {0x09DC, 0x09DD, 1}, + {0x09DF, 0x09E1, 1}, + {0x09F0, 0x09F1, 1}, + {0x0A05, 0x0A0A, 1}, + {0x0A0F, 0x0A10, 1}, + {0x0A13, 0x0A28, 1}, + {0x0A2A, 0x0A30, 1}, + {0x0A32, 0x0A33, 1}, + {0x0A35, 0x0A36, 1}, + {0x0A38, 0x0A39, 1}, + {0x0A59, 0x0A5C, 1}, + {0x0A5E, 0x0A5E, 1}, + {0x0A72, 0x0A74, 1}, + {0x0A85, 0x0A8B, 1}, + {0x0A8D, 0x0A8D, 1}, + {0x0A8F, 0x0A91, 1}, + {0x0A93, 0x0AA8, 1}, + {0x0AAA, 0x0AB0, 1}, + {0x0AB2, 0x0AB3, 1}, + {0x0AB5, 0x0AB9, 1}, + {0x0ABD, 0x0AE0, 0x23}, + {0x0B05, 0x0B0C, 1}, + {0x0B0F, 0x0B10, 1}, + {0x0B13, 0x0B28, 1}, + {0x0B2A, 0x0B30, 1}, + {0x0B32, 0x0B33, 1}, + {0x0B36, 0x0B39, 1}, + {0x0B3D, 0x0B3D, 1}, + {0x0B5C, 0x0B5D, 1}, + {0x0B5F, 0x0B61, 1}, + {0x0B85, 0x0B8A, 1}, + {0x0B8E, 0x0B90, 1}, + {0x0B92, 0x0B95, 1}, + {0x0B99, 0x0B9A, 1}, + {0x0B9C, 0x0B9C, 1}, + {0x0B9E, 0x0B9F, 1}, + {0x0BA3, 0x0BA4, 1}, + {0x0BA8, 0x0BAA, 1}, + {0x0BAE, 0x0BB5, 1}, + {0x0BB7, 0x0BB9, 1}, + {0x0C05, 0x0C0C, 1}, + {0x0C0E, 0x0C10, 1}, + {0x0C12, 0x0C28, 1}, + {0x0C2A, 0x0C33, 1}, + {0x0C35, 0x0C39, 1}, + {0x0C60, 0x0C61, 1}, + {0x0C85, 0x0C8C, 1}, + {0x0C8E, 0x0C90, 1}, + {0x0C92, 0x0CA8, 1}, + {0x0CAA, 0x0CB3, 1}, + {0x0CB5, 0x0CB9, 1}, + {0x0CDE, 0x0CDE, 1}, + {0x0CE0, 0x0CE1, 1}, + {0x0D05, 0x0D0C, 1}, + {0x0D0E, 0x0D10, 1}, + {0x0D12, 0x0D28, 1}, + {0x0D2A, 0x0D39, 1}, + {0x0D60, 0x0D61, 1}, + {0x0E01, 0x0E2E, 1}, + {0x0E30, 0x0E30, 1}, + {0x0E32, 0x0E33, 1}, + {0x0E40, 0x0E45, 1}, + {0x0E81, 0x0E82, 1}, + {0x0E84, 0x0E84, 1}, + {0x0E87, 0x0E88, 1}, + {0x0E8A, 0x0E8D, 3}, + {0x0E94, 0x0E97, 1}, + {0x0E99, 0x0E9F, 1}, + {0x0EA1, 0x0EA3, 1}, + {0x0EA5, 0x0EA7, 2}, + {0x0EAA, 0x0EAB, 1}, + {0x0EAD, 0x0EAE, 1}, + {0x0EB0, 0x0EB0, 1}, + {0x0EB2, 0x0EB3, 1}, + {0x0EBD, 0x0EBD, 1}, + {0x0EC0, 0x0EC4, 1}, + {0x0F40, 0x0F47, 1}, + {0x0F49, 0x0F69, 1}, + {0x10A0, 0x10C5, 1}, + {0x10D0, 0x10F6, 1}, + {0x1100, 0x1100, 1}, + {0x1102, 0x1103, 1}, + {0x1105, 0x1107, 1}, + {0x1109, 0x1109, 1}, + {0x110B, 0x110C, 1}, + {0x110E, 0x1112, 1}, + {0x113C, 0x1140, 2}, + {0x114C, 0x1150, 2}, + {0x1154, 0x1155, 1}, + {0x1159, 0x1159, 1}, + {0x115F, 0x1161, 1}, + {0x1163, 0x1169, 2}, + {0x116D, 0x116E, 1}, + {0x1172, 0x1173, 1}, + {0x1175, 0x119E, 0x119E - 0x1175}, + {0x11A8, 0x11AB, 0x11AB - 0x11A8}, + {0x11AE, 0x11AF, 1}, + {0x11B7, 0x11B8, 1}, + {0x11BA, 0x11BA, 1}, + {0x11BC, 0x11C2, 1}, + {0x11EB, 0x11F0, 0x11F0 - 0x11EB}, + {0x11F9, 0x11F9, 1}, + {0x1E00, 0x1E9B, 1}, + {0x1EA0, 0x1EF9, 1}, + {0x1F00, 0x1F15, 1}, + {0x1F18, 0x1F1D, 1}, + {0x1F20, 0x1F45, 1}, + {0x1F48, 0x1F4D, 1}, + {0x1F50, 0x1F57, 1}, + {0x1F59, 0x1F5B, 0x1F5B - 0x1F59}, + {0x1F5D, 0x1F5D, 1}, + {0x1F5F, 0x1F7D, 1}, + {0x1F80, 0x1FB4, 1}, + {0x1FB6, 0x1FBC, 1}, + {0x1FBE, 0x1FBE, 1}, + {0x1FC2, 0x1FC4, 1}, + {0x1FC6, 0x1FCC, 1}, + {0x1FD0, 0x1FD3, 1}, + {0x1FD6, 0x1FDB, 1}, + {0x1FE0, 0x1FEC, 1}, + {0x1FF2, 0x1FF4, 1}, + {0x1FF6, 0x1FFC, 1}, + {0x2126, 0x2126, 1}, + {0x212A, 0x212B, 1}, + {0x212E, 0x212E, 1}, + {0x2180, 0x2182, 1}, + {0x3007, 0x3007, 1}, + {0x3021, 0x3029, 1}, + {0x3041, 0x3094, 1}, + {0x30A1, 0x30FA, 1}, + {0x3105, 0x312C, 1}, + {0x4E00, 0x9FA5, 1}, + {0xAC00, 0xD7A3, 1}, + }, +} + +var second = &unicode.RangeTable{ + R16: []unicode.Range16{ + {0x002D, 0x002E, 1}, + {0x0030, 0x0039, 1}, + {0x00B7, 0x00B7, 1}, + {0x02D0, 0x02D1, 1}, + {0x0300, 0x0345, 1}, + {0x0360, 0x0361, 1}, + {0x0387, 0x0387, 1}, + {0x0483, 0x0486, 1}, + {0x0591, 0x05A1, 1}, + {0x05A3, 0x05B9, 1}, + {0x05BB, 0x05BD, 1}, + {0x05BF, 0x05BF, 1}, + {0x05C1, 0x05C2, 1}, + {0x05C4, 0x0640, 0x0640 - 0x05C4}, + {0x064B, 0x0652, 1}, + {0x0660, 0x0669, 1}, + {0x0670, 0x0670, 1}, + {0x06D6, 0x06DC, 1}, + {0x06DD, 0x06DF, 1}, + {0x06E0, 0x06E4, 1}, + {0x06E7, 0x06E8, 1}, + {0x06EA, 0x06ED, 1}, + {0x06F0, 0x06F9, 1}, + {0x0901, 0x0903, 1}, + {0x093C, 0x093C, 1}, + {0x093E, 0x094C, 1}, + {0x094D, 0x094D, 1}, + {0x0951, 0x0954, 1}, + {0x0962, 0x0963, 1}, + {0x0966, 0x096F, 1}, + {0x0981, 0x0983, 1}, + {0x09BC, 0x09BC, 1}, + {0x09BE, 0x09BF, 1}, + {0x09C0, 0x09C4, 1}, + {0x09C7, 0x09C8, 1}, + {0x09CB, 0x09CD, 1}, + {0x09D7, 0x09D7, 1}, + {0x09E2, 0x09E3, 1}, + {0x09E6, 0x09EF, 1}, + {0x0A02, 0x0A3C, 0x3A}, + {0x0A3E, 0x0A3F, 1}, + {0x0A40, 0x0A42, 1}, + {0x0A47, 0x0A48, 1}, + {0x0A4B, 0x0A4D, 1}, + {0x0A66, 0x0A6F, 1}, + {0x0A70, 0x0A71, 1}, + {0x0A81, 0x0A83, 1}, + {0x0ABC, 0x0ABC, 1}, + {0x0ABE, 0x0AC5, 1}, + {0x0AC7, 0x0AC9, 1}, + {0x0ACB, 0x0ACD, 1}, + {0x0AE6, 0x0AEF, 1}, + {0x0B01, 0x0B03, 1}, + {0x0B3C, 0x0B3C, 1}, + {0x0B3E, 0x0B43, 1}, + {0x0B47, 0x0B48, 1}, + {0x0B4B, 0x0B4D, 1}, + {0x0B56, 0x0B57, 1}, + {0x0B66, 0x0B6F, 1}, + {0x0B82, 0x0B83, 1}, + {0x0BBE, 0x0BC2, 1}, + {0x0BC6, 0x0BC8, 1}, + {0x0BCA, 0x0BCD, 1}, + {0x0BD7, 0x0BD7, 1}, + {0x0BE7, 0x0BEF, 1}, + {0x0C01, 0x0C03, 1}, + {0x0C3E, 0x0C44, 1}, + {0x0C46, 0x0C48, 1}, + {0x0C4A, 0x0C4D, 1}, + {0x0C55, 0x0C56, 1}, + {0x0C66, 0x0C6F, 1}, + {0x0C82, 0x0C83, 1}, + {0x0CBE, 0x0CC4, 1}, + {0x0CC6, 0x0CC8, 1}, + {0x0CCA, 0x0CCD, 1}, + {0x0CD5, 0x0CD6, 1}, + {0x0CE6, 0x0CEF, 1}, + {0x0D02, 0x0D03, 1}, + {0x0D3E, 0x0D43, 1}, + {0x0D46, 0x0D48, 1}, + {0x0D4A, 0x0D4D, 1}, + {0x0D57, 0x0D57, 1}, + {0x0D66, 0x0D6F, 1}, + {0x0E31, 0x0E31, 1}, + {0x0E34, 0x0E3A, 1}, + {0x0E46, 0x0E46, 1}, + {0x0E47, 0x0E4E, 1}, + {0x0E50, 0x0E59, 1}, + {0x0EB1, 0x0EB1, 1}, + {0x0EB4, 0x0EB9, 1}, + {0x0EBB, 0x0EBC, 1}, + {0x0EC6, 0x0EC6, 1}, + {0x0EC8, 0x0ECD, 1}, + {0x0ED0, 0x0ED9, 1}, + {0x0F18, 0x0F19, 1}, + {0x0F20, 0x0F29, 1}, + {0x0F35, 0x0F39, 2}, + {0x0F3E, 0x0F3F, 1}, + {0x0F71, 0x0F84, 1}, + {0x0F86, 0x0F8B, 1}, + {0x0F90, 0x0F95, 1}, + {0x0F97, 0x0F97, 1}, + {0x0F99, 0x0FAD, 1}, + {0x0FB1, 0x0FB7, 1}, + {0x0FB9, 0x0FB9, 1}, + {0x20D0, 0x20DC, 1}, + {0x20E1, 0x3005, 0x3005 - 0x20E1}, + {0x302A, 0x302F, 1}, + {0x3031, 0x3035, 1}, + {0x3099, 0x309A, 1}, + {0x309D, 0x309E, 1}, + {0x30FC, 0x30FE, 1}, + }, +} diff --git a/vendor/github.com/antchfx/xpath/query.go b/vendor/github.com/antchfx/xpath/query.go new file mode 100644 index 00000000000..75c43361393 --- /dev/null +++ b/vendor/github.com/antchfx/xpath/query.go @@ -0,0 +1,1437 @@ +package xpath + +import ( + "bytes" + "fmt" + "hash/fnv" + "reflect" +) + +// The return type of the XPath expression. +type resultType int + +var xpathResultType = struct { + Boolean resultType + // A numeric value + Number resultType + String resultType + // A node collection. + NodeSet resultType + // Any of the XPath node types. + Any resultType +}{ + Boolean: 0, + Number: 1, + String: 2, + NodeSet: 3, + Any: 4, +} + +type queryProp int + +var queryProps = struct { + None queryProp + Position queryProp + Count queryProp + Cached queryProp + Reverse queryProp + Merge queryProp +}{ + None: 0, + Position: 1, + Count: 2, + Cached: 4, + Reverse: 8, + Merge: 16, +} + +type iterator interface { + Current() NodeNavigator +} + +// An XPath query interface. +type query interface { + // Select traversing iterator returns a query matched node NodeNavigator. + Select(iterator) NodeNavigator + + // Evaluate evaluates query and returns values of the current query. + Evaluate(iterator) interface{} + + Clone() query + + // ValueType returns the value type of the current query. + ValueType() resultType + + Properties() queryProp +} + +// nopQuery is an empty query that always return nil for any query. +type nopQuery struct{} + +func (nopQuery) Select(iterator) NodeNavigator { return nil } + +func (nopQuery) Evaluate(iterator) interface{} { return nil } + +func (nopQuery) Clone() query { return nopQuery{} } + +func (nopQuery) ValueType() resultType { return xpathResultType.NodeSet } + +func (nopQuery) Properties() queryProp { + return queryProps.Merge | queryProps.Position | queryProps.Count | queryProps.Cached +} + +// contextQuery is returns current node on the iterator object query. +type contextQuery struct { + count int +} + +func (c *contextQuery) Select(t iterator) NodeNavigator { + if c.count > 0 { + return nil + } + c.count++ + return t.Current().Copy() +} + +func (c *contextQuery) Evaluate(iterator) interface{} { + c.count = 0 + return c +} + +func (c *contextQuery) Clone() query { + return &contextQuery{} +} + +func (c *contextQuery) ValueType() resultType { + return xpathResultType.NodeSet +} + +func (c *contextQuery) Properties() queryProp { + return queryProps.Merge | queryProps.Position | queryProps.Count | queryProps.Cached +} + +type absoluteQuery struct { + count int +} + +func (a *absoluteQuery) Select(t iterator) (n NodeNavigator) { + if a.count > 0 { + return + } + a.count++ + n = t.Current().Copy() + n.MoveToRoot() + return +} + +func (a *absoluteQuery) Evaluate(t iterator) interface{} { + a.count = 0 + return a +} + +func (a *absoluteQuery) Clone() query { + return &absoluteQuery{} +} + +func (a *absoluteQuery) ValueType() resultType { + return xpathResultType.NodeSet +} + +func (a *absoluteQuery) Properties() queryProp { + return queryProps.Merge | queryProps.Position | queryProps.Count | queryProps.Cached +} + +// ancestorQuery is an XPath ancestor node query.(ancestor::*|ancestor-self::*) +type ancestorQuery struct { + name string + iterator func() NodeNavigator + table map[uint64]bool + + Self bool + Input query + Predicate func(NodeNavigator) bool +} + +func (a *ancestorQuery) Select(t iterator) NodeNavigator { + if a.table == nil { + a.table = make(map[uint64]bool) + } + + for { + if a.iterator == nil { + node := a.Input.Select(t) + if node == nil { + return nil + } + first := true + node = node.Copy() + a.iterator = func() NodeNavigator { + if first { + first = false + if a.Self && a.Predicate(node) { + return node + } + } + for node.MoveToParent() { + if a.Predicate(node) { + return node + } + } + return nil + } + } + + for node := a.iterator(); node != nil; node = a.iterator() { + node_id := getHashCode(node.Copy()) + if _, ok := a.table[node_id]; !ok { + a.table[node_id] = true + return node + } + } + a.iterator = nil + } +} + +func (a *ancestorQuery) Evaluate(t iterator) interface{} { + a.Input.Evaluate(t) + a.iterator = nil + return a +} + +func (a *ancestorQuery) Test(n NodeNavigator) bool { + return a.Predicate(n) +} + +func (a *ancestorQuery) Clone() query { + return &ancestorQuery{name: a.name, Self: a.Self, Input: a.Input.Clone(), Predicate: a.Predicate} +} + +func (a *ancestorQuery) ValueType() resultType { + return xpathResultType.NodeSet +} + +func (a *ancestorQuery) Properties() queryProp { + return queryProps.Position | queryProps.Count | queryProps.Cached | queryProps.Merge | queryProps.Reverse +} + +// attributeQuery is an XPath attribute node query.(@*) +type attributeQuery struct { + name string + iterator func() NodeNavigator + + Input query + Predicate func(NodeNavigator) bool +} + +func (a *attributeQuery) Select(t iterator) NodeNavigator { + for { + if a.iterator == nil { + node := a.Input.Select(t) + if node == nil { + return nil + } + node = node.Copy() + a.iterator = func() NodeNavigator { + for { + onAttr := node.MoveToNextAttribute() + if !onAttr { + return nil + } + if a.Predicate(node) { + return node + } + } + } + } + + if node := a.iterator(); node != nil { + return node + } + a.iterator = nil + } +} + +func (a *attributeQuery) Evaluate(t iterator) interface{} { + a.Input.Evaluate(t) + a.iterator = nil + return a +} + +func (a *attributeQuery) Test(n NodeNavigator) bool { + return a.Predicate(n) +} + +func (a *attributeQuery) Clone() query { + return &attributeQuery{name: a.name, Input: a.Input.Clone(), Predicate: a.Predicate} +} + +func (a *attributeQuery) ValueType() resultType { + return xpathResultType.NodeSet +} + +func (a *attributeQuery) Properties() queryProp { + return queryProps.Merge +} + +// childQuery is an XPath child node query.(child::*) +type childQuery struct { + name string + posit int + iterator func() NodeNavigator + + Input query + Predicate func(NodeNavigator) bool +} + +func (c *childQuery) Select(t iterator) NodeNavigator { + for { + if c.iterator == nil { + c.posit = 0 + node := c.Input.Select(t) + if node == nil { + return nil + } + node = node.Copy() + first := true + c.iterator = func() NodeNavigator { + for { + if (first && !node.MoveToChild()) || (!first && !node.MoveToNext()) { + return nil + } + first = false + if c.Predicate(node) { + return node + } + } + } + } + + if node := c.iterator(); node != nil { + c.posit++ + return node + } + c.iterator = nil + } +} + +func (c *childQuery) Evaluate(t iterator) interface{} { + c.Input.Evaluate(t) + c.iterator = nil + return c +} + +func (c *childQuery) Test(n NodeNavigator) bool { + return c.Predicate(n) +} + +func (c *childQuery) Clone() query { + return &childQuery{name: c.name, Input: c.Input.Clone(), Predicate: c.Predicate} +} + +func (c *childQuery) ValueType() resultType { + return xpathResultType.NodeSet +} + +func (c *childQuery) Properties() queryProp { + return queryProps.Merge +} + +// position returns a position of current NodeNavigator. +func (c *childQuery) position() int { + return c.posit +} + +type cachedChildQuery struct { + name string + posit int + iterator func() NodeNavigator + + Input query + Predicate func(NodeNavigator) bool +} + +func (c *cachedChildQuery) Select(t iterator) NodeNavigator { + for { + if c.iterator == nil { + c.posit = 0 + node := c.Input.Select(t) + if node == nil { + return nil + } + node = node.Copy() + first := true + c.iterator = func() NodeNavigator { + for { + if (first && !node.MoveToChild()) || (!first && !node.MoveToNext()) { + return nil + } + first = false + if c.Predicate(node) { + return node + } + } + } + } + + if node := c.iterator(); node != nil { + c.posit++ + return node + } + c.iterator = nil + } +} + +func (c *cachedChildQuery) Evaluate(t iterator) interface{} { + c.Input.Evaluate(t) + c.iterator = nil + return c +} + +func (c *cachedChildQuery) position() int { + return c.posit +} + +func (c *cachedChildQuery) Test(n NodeNavigator) bool { + return c.Predicate(n) +} + +func (c *cachedChildQuery) Clone() query { + return &childQuery{name: c.name, Input: c.Input.Clone(), Predicate: c.Predicate} +} + +func (c *cachedChildQuery) ValueType() resultType { + return xpathResultType.NodeSet +} + +func (c *cachedChildQuery) Properties() queryProp { + return queryProps.Merge +} + +// descendantQuery is an XPath descendant node query.(descendant::* | descendant-or-self::*) +type descendantQuery struct { + name string + iterator func() NodeNavigator + posit int + level int + + Self bool + Input query + Predicate func(NodeNavigator) bool +} + +func (d *descendantQuery) Select(t iterator) NodeNavigator { + for { + if d.iterator == nil { + d.posit = 0 + node := d.Input.Select(t) + if node == nil { + return nil + } + node = node.Copy() + d.level = 0 + first := true + d.iterator = func() NodeNavigator { + if first { + first = false + if d.Self && d.Predicate(node) { + return node + } + } + + for { + if node.MoveToChild() { + d.level = d.level + 1 + } else { + for { + if d.level == 0 { + return nil + } + if node.MoveToNext() { + break + } + node.MoveToParent() + d.level = d.level - 1 + } + } + if d.Predicate(node) { + return node + } + } + } + } + + if node := d.iterator(); node != nil { + d.posit++ + return node + } + d.iterator = nil + } +} + +func (d *descendantQuery) Evaluate(t iterator) interface{} { + d.Input.Evaluate(t) + d.iterator = nil + return d +} + +func (d *descendantQuery) Test(n NodeNavigator) bool { + return d.Predicate(n) +} + +// position returns a position of current NodeNavigator. +func (d *descendantQuery) position() int { + return d.posit +} + +func (d *descendantQuery) depth() int { + return d.level +} + +func (d *descendantQuery) Clone() query { + return &descendantQuery{name: d.name, Self: d.Self, Input: d.Input.Clone(), Predicate: d.Predicate} +} + +func (d *descendantQuery) ValueType() resultType { + return xpathResultType.NodeSet +} + +func (d *descendantQuery) Properties() queryProp { + return queryProps.Merge +} + +// followingQuery is an XPath following node query.(following::*|following-sibling::*) +type followingQuery struct { + posit int + iterator func() NodeNavigator + + Input query + Sibling bool // The matching sibling node of current node. + Predicate func(NodeNavigator) bool +} + +func (f *followingQuery) Select(t iterator) NodeNavigator { + for { + if f.iterator == nil { + f.posit = 0 + node := f.Input.Select(t) + if node == nil { + return nil + } + node = node.Copy() + if f.Sibling { + f.iterator = func() NodeNavigator { + for { + if !node.MoveToNext() { + return nil + } + if f.Predicate(node) { + f.posit++ + return node + } + } + } + } else { + var q *descendantQuery // descendant query + f.iterator = func() NodeNavigator { + for { + if q == nil { + for !node.MoveToNext() { + if !node.MoveToParent() { + return nil + } + } + q = &descendantQuery{ + Self: true, + Input: &contextQuery{}, + Predicate: f.Predicate, + } + t.Current().MoveTo(node) + } + if node := q.Select(t); node != nil { + f.posit = q.posit + return node + } + q = nil + } + } + } + } + + if node := f.iterator(); node != nil { + return node + } + f.iterator = nil + } +} + +func (f *followingQuery) Evaluate(t iterator) interface{} { + f.Input.Evaluate(t) + return f +} + +func (f *followingQuery) Test(n NodeNavigator) bool { + return f.Predicate(n) +} + +func (f *followingQuery) Clone() query { + return &followingQuery{Input: f.Input.Clone(), Sibling: f.Sibling, Predicate: f.Predicate} +} + +func (f *followingQuery) ValueType() resultType { + return xpathResultType.NodeSet +} + +func (f *followingQuery) Properties() queryProp { + return queryProps.Merge +} + +func (f *followingQuery) position() int { + return f.posit +} + +// precedingQuery is an XPath preceding node query.(preceding::*) +type precedingQuery struct { + iterator func() NodeNavigator + posit int + Input query + Sibling bool // The matching sibling node of current node. + Predicate func(NodeNavigator) bool +} + +func (p *precedingQuery) Select(t iterator) NodeNavigator { + for { + if p.iterator == nil { + p.posit = 0 + node := p.Input.Select(t) + if node == nil { + return nil + } + node = node.Copy() + if p.Sibling { + p.iterator = func() NodeNavigator { + for { + for !node.MoveToPrevious() { + return nil + } + if p.Predicate(node) { + p.posit++ + return node + } + } + } + } else { + var q query + p.iterator = func() NodeNavigator { + for { + if q == nil { + for !node.MoveToPrevious() { + if !node.MoveToParent() { + return nil + } + p.posit = 0 + } + q = &descendantQuery{ + Self: true, + Input: &contextQuery{}, + Predicate: p.Predicate, + } + t.Current().MoveTo(node) + } + if node := q.Select(t); node != nil { + p.posit++ + return node + } + q = nil + } + } + } + } + if node := p.iterator(); node != nil { + return node + } + p.iterator = nil + } +} + +func (p *precedingQuery) Evaluate(t iterator) interface{} { + p.Input.Evaluate(t) + return p +} + +func (p *precedingQuery) Test(n NodeNavigator) bool { + return p.Predicate(n) +} + +func (p *precedingQuery) Clone() query { + return &precedingQuery{Input: p.Input.Clone(), Sibling: p.Sibling, Predicate: p.Predicate} +} + +func (p *precedingQuery) ValueType() resultType { + return xpathResultType.NodeSet +} + +func (p *precedingQuery) Properties() queryProp { + return queryProps.Merge | queryProps.Reverse +} + +func (p *precedingQuery) position() int { + return p.posit +} + +// parentQuery is an XPath parent node query.(parent::*) +type parentQuery struct { + Input query + Predicate func(NodeNavigator) bool +} + +func (p *parentQuery) Select(t iterator) NodeNavigator { + for { + node := p.Input.Select(t) + if node == nil { + return nil + } + node = node.Copy() + if node.MoveToParent() && p.Predicate(node) { + return node + } + } +} + +func (p *parentQuery) Evaluate(t iterator) interface{} { + p.Input.Evaluate(t) + return p +} + +func (p *parentQuery) Clone() query { + return &parentQuery{Input: p.Input.Clone(), Predicate: p.Predicate} +} + +func (p *parentQuery) ValueType() resultType { + return xpathResultType.NodeSet +} + +func (p *parentQuery) Properties() queryProp { + return queryProps.Position | queryProps.Count | queryProps.Cached | queryProps.Merge +} + +func (p *parentQuery) Test(n NodeNavigator) bool { + return p.Predicate(n) +} + +// selfQuery is an Self node query.(self::*) +type selfQuery struct { + Input query + Predicate func(NodeNavigator) bool +} + +func (s *selfQuery) Select(t iterator) NodeNavigator { + for { + node := s.Input.Select(t) + if node == nil { + return nil + } + + if s.Predicate(node) { + return node + } + } +} + +func (s *selfQuery) Evaluate(t iterator) interface{} { + s.Input.Evaluate(t) + return s +} + +func (s *selfQuery) Test(n NodeNavigator) bool { + return s.Predicate(n) +} + +func (s *selfQuery) Clone() query { + return &selfQuery{Input: s.Input.Clone(), Predicate: s.Predicate} +} + +func (s *selfQuery) ValueType() resultType { + return xpathResultType.NodeSet +} + +func (s *selfQuery) Properties() queryProp { + return queryProps.Merge +} + +// filterQuery is an XPath query for predicate filter. +type filterQuery struct { + Input query + Predicate query + NoPosition bool + + posit int + positmap map[int]int +} + +func (f *filterQuery) do(t iterator) bool { + val := reflect.ValueOf(f.Predicate.Evaluate(t)) + switch val.Kind() { + case reflect.Bool: + return val.Bool() + case reflect.String: + return len(val.String()) > 0 + case reflect.Float64: + pt := getNodePosition(f.Input) + return int(val.Float()) == pt + default: + if f.Predicate != nil { + return f.Predicate.Select(t) != nil + } + } + return false +} + +func (f *filterQuery) position() int { + return f.posit +} + +func (f *filterQuery) Select(t iterator) NodeNavigator { + if f.positmap == nil { + f.positmap = make(map[int]int) + } + for { + + node := f.Input.Select(t) + if node == nil { + return nil + } + node = node.Copy() + + t.Current().MoveTo(node) + if f.do(t) { + // fix https://github.com/antchfx/htmlquery/issues/26 + // Calculate and keep the each of matching node's position in the same depth. + level := getNodeDepth(f.Input) + f.positmap[level]++ + f.posit = f.positmap[level] + return node + } + } +} + +func (f *filterQuery) Evaluate(t iterator) interface{} { + f.Input.Evaluate(t) + return f +} + +func (f *filterQuery) Clone() query { + return &filterQuery{Input: f.Input.Clone(), Predicate: f.Predicate.Clone()} +} + +func (f *filterQuery) ValueType() resultType { + return xpathResultType.NodeSet +} + +func (f *filterQuery) Properties() queryProp { + return (queryProps.Position | f.Input.Properties()) & (queryProps.Reverse | queryProps.Merge) +} + +// functionQuery is an XPath function that returns a computed value for +// the Evaluate call of the current NodeNavigator node. Select call isn't +// applicable for functionQuery. +type functionQuery struct { + Input query // Node Set + Func func(query, iterator) interface{} // The xpath function. +} + +func (f *functionQuery) Select(t iterator) NodeNavigator { + return nil +} + +// Evaluate call a specified function that will returns the +// following value type: number,string,boolean. +func (f *functionQuery) Evaluate(t iterator) interface{} { + return f.Func(f.Input, t) +} + +func (f *functionQuery) Clone() query { + if f.Input == nil { + return &functionQuery{Func: f.Func} + } + return &functionQuery{Input: f.Input.Clone(), Func: f.Func} +} + +func (f *functionQuery) ValueType() resultType { + return xpathResultType.Any +} + +func (f *functionQuery) Properties() queryProp { + return queryProps.Merge +} + +// transformFunctionQuery diffs from functionQuery where the latter computes a scalar +// value (number,string,boolean) for the current NodeNavigator node while the former +// (transformFunctionQuery) performs a mapping or transform of the current NodeNavigator +// and returns a new NodeNavigator. It is used for non-scalar XPath functions such as +// reverse(), remove(), subsequence(), unordered(), etc. +type transformFunctionQuery struct { + Input query + Func func(query, iterator) func() NodeNavigator + iterator func() NodeNavigator +} + +func (f *transformFunctionQuery) Select(t iterator) NodeNavigator { + if f.iterator == nil { + f.iterator = f.Func(f.Input, t) + } + return f.iterator() +} + +func (f *transformFunctionQuery) Evaluate(t iterator) interface{} { + f.Input.Evaluate(t) + f.iterator = nil + return f +} + +func (f *transformFunctionQuery) Clone() query { + return &transformFunctionQuery{Input: f.Input.Clone(), Func: f.Func} +} + +func (f *transformFunctionQuery) ValueType() resultType { + return xpathResultType.Any +} + +func (f *transformFunctionQuery) Properties() queryProp { + return queryProps.Merge +} + +// constantQuery is an XPath constant operand. +type constantQuery struct { + Val interface{} +} + +func (c *constantQuery) Select(t iterator) NodeNavigator { + return nil +} + +func (c *constantQuery) Evaluate(t iterator) interface{} { + return c.Val +} + +func (c *constantQuery) Clone() query { + return c +} + +func (c *constantQuery) ValueType() resultType { + return getXPathType(c.Val) +} + +func (c *constantQuery) Properties() queryProp { + return queryProps.Position | queryProps.Count | queryProps.Cached | queryProps.Merge +} + +type groupQuery struct { + posit int + + Input query +} + +func (g *groupQuery) Select(t iterator) NodeNavigator { + node := g.Input.Select(t) + if node == nil { + return nil + } + g.posit++ + return node +} + +func (g *groupQuery) Evaluate(t iterator) interface{} { + return g.Input.Evaluate(t) +} + +func (g *groupQuery) Clone() query { + return &groupQuery{Input: g.Input.Clone()} +} + +func (g *groupQuery) ValueType() resultType { + return g.Input.ValueType() +} + +func (g *groupQuery) Properties() queryProp { + return queryProps.Position +} + +func (g *groupQuery) position() int { + return g.posit +} + +// logicalQuery is an XPath logical expression. +type logicalQuery struct { + Left, Right query + + Do func(iterator, interface{}, interface{}) interface{} +} + +func (l *logicalQuery) Select(t iterator) NodeNavigator { + // When a XPath expr is logical expression. + node := t.Current().Copy() + val := l.Evaluate(t) + switch val.(type) { + case bool: + if val.(bool) == true { + return node + } + } + return nil +} + +func (l *logicalQuery) Evaluate(t iterator) interface{} { + m := l.Left.Evaluate(t) + n := l.Right.Evaluate(t) + return l.Do(t, m, n) +} + +func (l *logicalQuery) Clone() query { + return &logicalQuery{Left: l.Left.Clone(), Right: l.Right.Clone(), Do: l.Do} +} + +func (l *logicalQuery) ValueType() resultType { + return xpathResultType.Boolean +} + +func (l *logicalQuery) Properties() queryProp { + return queryProps.Merge +} + +// numericQuery is an XPath numeric operator expression. +type numericQuery struct { + Left, Right query + + Do func(iterator, interface{}, interface{}) interface{} +} + +func (n *numericQuery) Select(t iterator) NodeNavigator { + return nil +} + +func (n *numericQuery) Evaluate(t iterator) interface{} { + m := n.Left.Evaluate(t) + k := n.Right.Evaluate(t) + return n.Do(t, m, k) +} + +func (n *numericQuery) Clone() query { + return &numericQuery{Left: n.Left.Clone(), Right: n.Right.Clone(), Do: n.Do} +} + +func (n *numericQuery) ValueType() resultType { + return xpathResultType.Number +} + +func (n *numericQuery) Properties() queryProp { + return queryProps.Merge +} + +type booleanQuery struct { + IsOr bool + Left, Right query + iterator func() NodeNavigator +} + +func (b *booleanQuery) Select(t iterator) NodeNavigator { + if b.iterator == nil { + var list []NodeNavigator + i := 0 + root := t.Current().Copy() + if b.IsOr { + for { + node := b.Left.Select(t) + if node == nil { + break + } + node = node.Copy() + list = append(list, node) + } + t.Current().MoveTo(root) + for { + node := b.Right.Select(t) + if node == nil { + break + } + node = node.Copy() + list = append(list, node) + } + } else { + var m []NodeNavigator + var n []NodeNavigator + for { + node := b.Left.Select(t) + if node == nil { + break + } + node = node.Copy() + list = append(m, node) + } + t.Current().MoveTo(root) + for { + node := b.Right.Select(t) + if node == nil { + break + } + node = node.Copy() + list = append(n, node) + } + for _, k := range m { + for _, j := range n { + if k == j { + list = append(list, k) + } + } + } + } + + b.iterator = func() NodeNavigator { + if i >= len(list) { + return nil + } + node := list[i] + i++ + return node + } + } + return b.iterator() +} + +func (b *booleanQuery) Evaluate(t iterator) interface{} { + n := t.Current().Copy() + + m := b.Left.Evaluate(t) + left := asBool(t, m) + if b.IsOr && left { + return true + } else if !b.IsOr && !left { + return false + } + + t.Current().MoveTo(n) + m = b.Right.Evaluate(t) + return asBool(t, m) +} + +func (b *booleanQuery) Clone() query { + return &booleanQuery{IsOr: b.IsOr, Left: b.Left.Clone(), Right: b.Right.Clone()} +} + +func (b *booleanQuery) ValueType() resultType { + return xpathResultType.Boolean +} + +func (b *booleanQuery) Properties() queryProp { + return queryProps.Merge +} + +type unionQuery struct { + Left, Right query + iterator func() NodeNavigator +} + +func (u *unionQuery) Select(t iterator) NodeNavigator { + if u.iterator == nil { + var list []NodeNavigator + var m = make(map[uint64]bool) + root := t.Current().Copy() + for { + node := u.Left.Select(t) + if node == nil { + break + } + code := getHashCode(node.Copy()) + if _, ok := m[code]; !ok { + m[code] = true + list = append(list, node.Copy()) + } + } + t.Current().MoveTo(root) + for { + node := u.Right.Select(t) + if node == nil { + break + } + code := getHashCode(node.Copy()) + if _, ok := m[code]; !ok { + m[code] = true + list = append(list, node.Copy()) + } + } + var i int + u.iterator = func() NodeNavigator { + if i >= len(list) { + return nil + } + node := list[i] + i++ + return node + } + } + return u.iterator() +} + +func (u *unionQuery) Evaluate(t iterator) interface{} { + u.iterator = nil + u.Left.Evaluate(t) + u.Right.Evaluate(t) + return u +} + +func (u *unionQuery) Clone() query { + return &unionQuery{Left: u.Left.Clone(), Right: u.Right.Clone()} +} + +func (u *unionQuery) ValueType() resultType { + return xpathResultType.NodeSet +} + +func (u *unionQuery) Properties() queryProp { + return queryProps.Merge +} + +type lastFuncQuery struct { + buffer []NodeNavigator + counted bool + + Input query +} + +func (q *lastFuncQuery) Select(t iterator) NodeNavigator { + return nil +} + +func (q *lastFuncQuery) Evaluate(t iterator) interface{} { + if !q.counted { + for { + node := q.Input.Select(t) + if node == nil { + break + } + q.buffer = append(q.buffer, node.Copy()) + } + q.counted = true + } + return float64(len(q.buffer)) +} + +func (q *lastFuncQuery) Clone() query { + return &lastFuncQuery{Input: q.Input.Clone()} +} + +func (q *lastFuncQuery) ValueType() resultType { + return xpathResultType.Number +} + +func (q *lastFuncQuery) Properties() queryProp { + return queryProps.Merge +} + +type descendantOverDescendantQuery struct { + name string + level int + posit int + currentNode NodeNavigator + + Input query + MatchSelf bool + Predicate func(NodeNavigator) bool +} + +func (d *descendantOverDescendantQuery) moveToFirstChild() bool { + if d.currentNode.MoveToChild() { + d.level++ + return true + } + return false +} + +func (d *descendantOverDescendantQuery) moveUpUntilNext() bool { + for !d.currentNode.MoveToNext() { + d.level-- + if d.level == 0 { + return false + } + d.currentNode.MoveToParent() + } + return true +} + +func (d *descendantOverDescendantQuery) Select(t iterator) NodeNavigator { + for { + if d.level == 0 { + node := d.Input.Select(t) + if node == nil { + return nil + } + d.currentNode = node.Copy() + d.posit = 0 + if d.MatchSelf && d.Predicate(d.currentNode) { + d.posit = 1 + return d.currentNode + } + d.moveToFirstChild() + } else if !d.moveUpUntilNext() { + continue + } + for ok := true; ok; ok = d.moveToFirstChild() { + if d.Predicate(d.currentNode) { + d.posit++ + return d.currentNode + } + } + } +} + +func (d *descendantOverDescendantQuery) Evaluate(t iterator) interface{} { + d.Input.Evaluate(t) + return d +} + +func (d *descendantOverDescendantQuery) Clone() query { + return &descendantOverDescendantQuery{Input: d.Input.Clone(), Predicate: d.Predicate, MatchSelf: d.MatchSelf} +} + +func (d *descendantOverDescendantQuery) ValueType() resultType { + return xpathResultType.NodeSet +} + +func (d *descendantOverDescendantQuery) Properties() queryProp { + return queryProps.Merge +} + +func (d *descendantOverDescendantQuery) position() int { + return d.posit +} + +type mergeQuery struct { + Input query + Child query + + iterator func() NodeNavigator +} + +func (m *mergeQuery) Select(t iterator) NodeNavigator { + for { + if m.iterator == nil { + root := m.Input.Select(t) + if root == nil { + return nil + } + m.Child.Evaluate(t) + root = root.Copy() + t.Current().MoveTo(root) + var list []NodeNavigator + for node := m.Child.Select(t); node != nil; node = m.Child.Select(t) { + list = append(list, node.Copy()) + } + i := 0 + m.iterator = func() NodeNavigator { + if i >= len(list) { + return nil + } + result := list[i] + i++ + return result + } + } + + if node := m.iterator(); node != nil { + return node + } + m.iterator = nil + } +} + +func (m *mergeQuery) Evaluate(t iterator) interface{} { + m.Input.Evaluate(t) + return m +} + +func (m *mergeQuery) Clone() query { + return &mergeQuery{Input: m.Input.Clone(), Child: m.Child.Clone()} +} + +func (m *mergeQuery) ValueType() resultType { + return xpathResultType.NodeSet +} + +func (m *mergeQuery) Properties() queryProp { + return queryProps.Position | queryProps.Count | queryProps.Cached | queryProps.Merge +} + +func getHashCode(n NodeNavigator) uint64 { + var sb bytes.Buffer + switch n.NodeType() { + case AttributeNode, TextNode, CommentNode: + sb.WriteString(fmt.Sprintf("%s=%s", n.LocalName(), n.Value())) + // https://github.com/antchfx/htmlquery/issues/25 + d := 1 + for n.MoveToPrevious() { + d++ + } + sb.WriteString(fmt.Sprintf("-%d", d)) + for n.MoveToParent() { + d = 1 + for n.MoveToPrevious() { + d++ + } + sb.WriteString(fmt.Sprintf("-%d", d)) + } + case ElementNode: + sb.WriteString(n.Prefix() + n.LocalName()) + d := 1 + for n.MoveToPrevious() { + d++ + } + sb.WriteString(fmt.Sprintf("-%d", d)) + + for n.MoveToParent() { + d = 1 + for n.MoveToPrevious() { + d++ + } + sb.WriteString(fmt.Sprintf("-%d", d)) + } + } + h := fnv.New64a() + h.Write(sb.Bytes()) + return h.Sum64() +} + +func getNodePosition(q query) int { + type Position interface { + position() int + } + if count, ok := q.(Position); ok { + return count.position() + } + return 1 +} + +func getNodeDepth(q query) int { + type Depth interface { + depth() int + } + if count, ok := q.(Depth); ok { + return count.depth() + } + return 0 +} + +func getXPathType(i interface{}) resultType { + v := reflect.ValueOf(i) + switch v.Kind() { + case reflect.Float64: + return xpathResultType.Number + case reflect.String: + return xpathResultType.String + case reflect.Bool: + return xpathResultType.Boolean + default: + if _, ok := i.(query); ok { + return xpathResultType.NodeSet + } + } + panic(fmt.Errorf("xpath unknown value type: %v", v.Kind())) +} diff --git a/vendor/github.com/antchfx/xpath/xpath.go b/vendor/github.com/antchfx/xpath/xpath.go new file mode 100644 index 00000000000..04bbe8d4c23 --- /dev/null +++ b/vendor/github.com/antchfx/xpath/xpath.go @@ -0,0 +1,176 @@ +package xpath + +import ( + "errors" + "fmt" +) + +// NodeType represents a type of XPath node. +type NodeType int + +const ( + // RootNode is a root node of the XML document or node tree. + RootNode NodeType = iota + + // ElementNode is an element, such as . + ElementNode + + // AttributeNode is an attribute, such as id='123'. + AttributeNode + + // TextNode is the text content of a node. + TextNode + + // CommentNode is a comment node, such as + CommentNode + + // allNode is any types of node, used by xpath package only to predicate match. + allNode +) + +// NodeNavigator provides cursor model for navigating XML data. +type NodeNavigator interface { + // NodeType returns the XPathNodeType of the current node. + NodeType() NodeType + + // LocalName gets the Name of the current node. + LocalName() string + + // Prefix returns namespace prefix associated with the current node. + Prefix() string + + // Value gets the value of current node. + Value() string + + // Copy does a deep copy of the NodeNavigator and all its components. + Copy() NodeNavigator + + // MoveToRoot moves the NodeNavigator to the root node of the current node. + MoveToRoot() + + // MoveToParent moves the NodeNavigator to the parent node of the current node. + MoveToParent() bool + + // MoveToNextAttribute moves the NodeNavigator to the next attribute on current node. + MoveToNextAttribute() bool + + // MoveToChild moves the NodeNavigator to the first child node of the current node. + MoveToChild() bool + + // MoveToFirst moves the NodeNavigator to the first sibling node of the current node. + MoveToFirst() bool + + // MoveToNext moves the NodeNavigator to the next sibling node of the current node. + MoveToNext() bool + + // MoveToPrevious moves the NodeNavigator to the previous sibling node of the current node. + MoveToPrevious() bool + + // MoveTo moves the NodeNavigator to the same position as the specified NodeNavigator. + MoveTo(NodeNavigator) bool +} + +// NodeIterator holds all matched Node object. +type NodeIterator struct { + node NodeNavigator + query query +} + +// Current returns current node which matched. +func (t *NodeIterator) Current() NodeNavigator { + return t.node +} + +// MoveNext moves Navigator to the next match node. +func (t *NodeIterator) MoveNext() bool { + n := t.query.Select(t) + if n == nil { + return false + } + if !t.node.MoveTo(n) { + t.node = n.Copy() + } + return true +} + +// Select selects a node set using the specified XPath expression. +// This method is deprecated, recommend using Expr.Select() method instead. +func Select(root NodeNavigator, expr string) *NodeIterator { + exp, err := Compile(expr) + if err != nil { + panic(err) + } + return exp.Select(root) +} + +// Expr is an XPath expression for query. +type Expr struct { + s string + q query +} + +type iteratorFunc func() NodeNavigator + +func (f iteratorFunc) Current() NodeNavigator { + return f() +} + +// Evaluate returns the result of the expression. +// The result type of the expression is one of the follow: bool,float64,string,NodeIterator). +func (expr *Expr) Evaluate(root NodeNavigator) interface{} { + val := expr.q.Evaluate(iteratorFunc(func() NodeNavigator { return root })) + switch val.(type) { + case query: + return &NodeIterator{query: expr.q.Clone(), node: root} + } + return val +} + +// Select selects a node set using the specified XPath expression. +func (expr *Expr) Select(root NodeNavigator) *NodeIterator { + return &NodeIterator{query: expr.q.Clone(), node: root} +} + +// String returns XPath expression string. +func (expr *Expr) String() string { + return expr.s +} + +// Compile compiles an XPath expression string. +func Compile(expr string) (*Expr, error) { + if expr == "" { + return nil, errors.New("expr expression is nil") + } + qy, err := build(expr, nil) + if err != nil { + return nil, err + } + if qy == nil { + return nil, fmt.Errorf(fmt.Sprintf("undeclared variable in XPath expression: %s", expr)) + } + return &Expr{s: expr, q: qy}, nil +} + +// MustCompile compiles an XPath expression string and ignored error. +func MustCompile(expr string) *Expr { + exp, err := Compile(expr) + if err != nil { + return &Expr{s: expr, q: nopQuery{}} + } + return exp +} + +// CompileWithNS compiles an XPath expression string, using given namespaces map. +func CompileWithNS(expr string, namespaces map[string]string) (*Expr, error) { + if expr == "" { + return nil, errors.New("expr expression is nil") + } + qy, err := build(expr, namespaces) + if err != nil { + return nil, err + } + if qy == nil { + return nil, fmt.Errorf(fmt.Sprintf("undeclared variable in XPath expression: %s", expr)) + } + return &Expr{s: expr, q: qy}, nil +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/configuration.go b/vendor/github.com/apache/thrift/lib/go/thrift/configuration.go index de27edd674c..a9565d399df 100644 --- a/vendor/github.com/apache/thrift/lib/go/thrift/configuration.go +++ b/vendor/github.com/apache/thrift/lib/go/thrift/configuration.go @@ -56,47 +56,47 @@ const ( // // For example, say you want to migrate this old code into using TConfiguration: // -// sccket, err := thrift.NewTSocketTimeout("host:port", time.Second, time.Second) -// transFactory := thrift.NewTFramedTransportFactoryMaxLength( -// thrift.NewTTransportFactory(), -// 1024 * 1024 * 256, -// ) -// protoFactory := thrift.NewTBinaryProtocolFactory(true, true) +// socket, err := thrift.NewTSocketTimeout("host:port", time.Second, time.Second) +// transFactory := thrift.NewTFramedTransportFactoryMaxLength( +// thrift.NewTTransportFactory(), +// 1024 * 1024 * 256, +// ) +// protoFactory := thrift.NewTBinaryProtocolFactory(true, true) // // This is the wrong way to do it because in the end the TConfiguration used by // socket and transFactory will be overwritten by the one used by protoFactory // because of TConfiguration propagation: // -// // bad example, DO NOT USE -// sccket := thrift.NewTSocketConf("host:port", &thrift.TConfiguration{ -// ConnectTimeout: time.Second, -// SocketTimeout: time.Second, -// }) -// transFactory := thrift.NewTFramedTransportFactoryConf( -// thrift.NewTTransportFactory(), -// &thrift.TConfiguration{ -// MaxFrameSize: 1024 * 1024 * 256, -// }, -// ) -// protoFactory := thrift.NewTBinaryProtocolFactoryConf(&thrift.TConfiguration{ -// TBinaryStrictRead: thrift.BoolPtr(true), -// TBinaryStrictWrite: thrift.BoolPtr(true), -// }) +// // bad example, DO NOT USE +// socket := thrift.NewTSocketConf("host:port", &thrift.TConfiguration{ +// ConnectTimeout: time.Second, +// SocketTimeout: time.Second, +// }) +// transFactory := thrift.NewTFramedTransportFactoryConf( +// thrift.NewTTransportFactory(), +// &thrift.TConfiguration{ +// MaxFrameSize: 1024 * 1024 * 256, +// }, +// ) +// protoFactory := thrift.NewTBinaryProtocolFactoryConf(&thrift.TConfiguration{ +// TBinaryStrictRead: thrift.BoolPtr(true), +// TBinaryStrictWrite: thrift.BoolPtr(true), +// }) // // This is the correct way to do it: // -// conf := &thrift.TConfiguration{ -// ConnectTimeout: time.Second, -// SocketTimeout: time.Second, +// conf := &thrift.TConfiguration{ +// ConnectTimeout: time.Second, +// SocketTimeout: time.Second, // -// MaxFrameSize: 1024 * 1024 * 256, +// MaxFrameSize: 1024 * 1024 * 256, // -// TBinaryStrictRead: thrift.BoolPtr(true), -// TBinaryStrictWrite: thrift.BoolPtr(true), -// } -// sccket := thrift.NewTSocketConf("host:port", conf) -// transFactory := thrift.NewTFramedTransportFactoryConf(thrift.NewTTransportFactory(), conf) -// protoFactory := thrift.NewTBinaryProtocolFactoryConf(conf) +// TBinaryStrictRead: thrift.BoolPtr(true), +// TBinaryStrictWrite: thrift.BoolPtr(true), +// } +// socket := thrift.NewTSocketConf("host:port", conf) +// transFactory := thrift.NewTFramedTransportFactoryConf(thrift.NewTTransportFactory(), conf) +// protoFactory := thrift.NewTBinaryProtocolFactoryConf(conf) // // [1]: https://github.com/apache/thrift/blob/master/doc/specs/thrift-tconfiguration.md type TConfiguration struct { @@ -132,6 +132,8 @@ type TConfiguration struct { // THeaderProtocolIDPtr and THeaderProtocolIDPtrMust helper functions // are provided to help filling this value. THeaderProtocolID *THeaderProtocolID + // The write transforms to be applied to THeaderTransport. + THeaderTransforms []THeaderTransformID // Used internally by deprecated constructors, to avoid overriding // underlying TTransport/TProtocol's cfg by accidental propagations. @@ -245,6 +247,18 @@ func (tc *TConfiguration) GetTHeaderProtocolID() THeaderProtocolID { return protoID } +// GetTHeaderTransforms returns the THeaderTransformIDs to be applied on +// THeaderTransport writing. +// +// It's nil-safe. If tc is nil, empty slice will be returned (meaning no +// transforms to be applied). +func (tc *TConfiguration) GetTHeaderTransforms() []THeaderTransformID { + if tc == nil { + return nil + } + return tc.THeaderTransforms +} + // THeaderProtocolIDPtr validates and returns the pointer to id. // // If id is not a valid THeaderProtocolID, a pointer to THeaderProtocolDefault diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/exception.go b/vendor/github.com/apache/thrift/lib/go/thrift/exception.go index e2f1728eac5..5b4cad96faa 100644 --- a/vendor/github.com/apache/thrift/lib/go/thrift/exception.go +++ b/vendor/github.com/apache/thrift/lib/go/thrift/exception.go @@ -121,20 +121,20 @@ var _ TException = wrappedTException{} // // For a endpoint defined in thrift IDL like this: // -// service MyService { -// FooResponse foo(1: FooRequest request) throws ( -// 1: Exception1 error1, -// 2: Exception2 error2, -// ) -// } +// service MyService { +// FooResponse foo(1: FooRequest request) throws ( +// 1: Exception1 error1, +// 2: Exception2 error2, +// ) +// } // // The thrift compiler generated go code for the result TStruct would be like: // -// type MyServiceFooResult struct { -// Success *FooResponse `thrift:"success,0" db:"success" json:"success,omitempty"` -// Error1 *Exception1 `thrift:"error1,1" db:"error1" json:"error1,omitempty"` -// Error2 *Exception2 `thrift:"error2,2" db:"error2" json:"error2,omitempty"` -// } +// type MyServiceFooResult struct { +// Success *FooResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +// Error1 *Exception1 `thrift:"error1,1" db:"error1" json:"error1,omitempty"` +// Error2 *Exception2 `thrift:"error2,2" db:"error2" json:"error2,omitempty"` +// } // // And this function extracts the first non-nil exception out of // *MyServiceFooResult. @@ -144,7 +144,7 @@ func ExtractExceptionFromResult(result TStruct) error { return nil } typ := v.Type() - for i := 0; i < v.NumField(); i++ { + for i := range v.NumField() { if typ.Field(i).Name == "Success" { continue } diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/header_protocol.go b/vendor/github.com/apache/thrift/lib/go/thrift/header_protocol.go index 36777b4ca00..bec84b85c49 100644 --- a/vendor/github.com/apache/thrift/lib/go/thrift/header_protocol.go +++ b/vendor/github.com/apache/thrift/lib/go/thrift/header_protocol.go @@ -119,6 +119,11 @@ func (p *THeaderProtocol) ClearWriteHeaders() { } // AddTransform add a transform for writing. +// +// Deprecated: This only applies to the next message written, and the next read +// message will cause write transforms to be reset from what's configured in +// TConfiguration. For sticky transforms, use TConfiguration.THeaderTransforms +// instead. func (p *THeaderProtocol) AddTransform(transform THeaderTransformID) error { return p.transport.AddTransform(transform) } diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/header_transport.go b/vendor/github.com/apache/thrift/lib/go/thrift/header_transport.go index 3aea5a9888d..d6d64160af3 100644 --- a/vendor/github.com/apache/thrift/lib/go/thrift/header_transport.go +++ b/vendor/github.com/apache/thrift/lib/go/thrift/header_transport.go @@ -128,7 +128,7 @@ var _ io.ReadCloser = (*TransformReader)(nil) // // If you don't know the closers capacity beforehand, just use // -// &TransformReader{Reader: baseReader} +// &TransformReader{Reader: baseReader} // // instead would be sufficient. func NewTransformReaderWithCapacity(baseReader io.Reader, capacity int) *TransformReader { @@ -151,6 +151,11 @@ func (tr *TransformReader) Close() error { } // AddTransform adds a transform. +// +// Deprecated: This only applies to the next message written, and the next read +// message will cause write transforms to be reset from what's configured in +// TConfiguration. For sticky transforms, use TConfiguration.THeaderTransforms +// instead. func (tr *TransformReader) AddTransform(id THeaderTransformID) error { switch id { default: @@ -206,6 +211,25 @@ func (tw *TransformWriter) Close() error { return nil } +var zlibDefaultLevelWriterPool = newPool( + func() *zlib.Writer { + return zlib.NewWriter(nil) + }, + nil, +) + +type zlibPoolCloser struct { + writer *zlib.Writer +} + +func (z *zlibPoolCloser) Close() error { + defer func() { + z.writer.Reset(nil) + zlibDefaultLevelWriterPool.put(&z.writer) + }() + return z.writer.Close() +} + // AddTransform adds a transform. func (tw *TransformWriter) AddTransform(id THeaderTransformID) error { switch id { @@ -217,9 +241,12 @@ func (tw *TransformWriter) AddTransform(id THeaderTransformID) error { case TransformNone: // no-op case TransformZlib: - writeCloser := zlib.NewWriter(tw.Writer) + writeCloser := zlibDefaultLevelWriterPool.get() + writeCloser.Reset(tw.Writer) tw.Writer = writeCloser - tw.closers = append(tw.closers, writeCloser) + tw.closers = append(tw.closers, &zlibPoolCloser{ + writer: writeCloser, + }) } return nil } @@ -300,11 +327,12 @@ func NewTHeaderTransportConf(trans TTransport, conf *TConfiguration) *THeaderTra } PropagateTConfiguration(trans, conf) return &THeaderTransport{ - transport: trans, - reader: bufio.NewReader(trans), - writeHeaders: make(THeaderMap), - protocolID: conf.GetTHeaderProtocolID(), - cfg: conf, + transport: trans, + reader: bufio.NewReader(trans), + writeHeaders: make(THeaderMap), + writeTransforms: conf.GetTHeaderTransforms(), + protocolID: conf.GetTHeaderProtocolID(), + cfg: conf, } } @@ -449,6 +477,11 @@ func (t *THeaderTransport) parseHeaders(ctx context.Context, frameSize uint32) e } t.protocolID = THeaderProtocolID(protoID) + // Reset writeTransforms to the ones from cfg, as we are going to add + // compression transforms from what we read, we don't want to accumulate + // different transforms read from different requests + t.writeTransforms = t.cfg.GetTHeaderTransforms() + var transformCount int32 transformCount, err = hp.readVarint32() if err != nil { @@ -461,12 +494,21 @@ func (t *THeaderTransport) parseHeaders(ctx context.Context, frameSize uint32) e ) t.frameReader = reader transformIDs := make([]THeaderTransformID, transformCount) - for i := 0; i < int(transformCount); i++ { + for i := range int(transformCount) { id, err := hp.readVarint32() if err != nil { return err } - transformIDs[i] = THeaderTransformID(id) + tID := THeaderTransformID(id) + transformIDs[i] = tID + + // For compression transforms, we should also add them + // to writeTransforms so that the response (assuming we + // are reading a request) would do the same compression. + switch tID { + case TransformZlib: + t.addWriteTransformsDedupe(tID) + } } // The transform IDs on the wire was added based on the order of // writing, so on the reading side we need to reverse the order. @@ -494,7 +536,7 @@ func (t *THeaderTransport) parseHeaders(ctx context.Context, frameSize uint32) e if err != nil { return err } - for i := 0; i < int(count); i++ { + for range int(count) { key, err := hp.ReadString(ctx) if err != nil { return err @@ -544,7 +586,7 @@ func (t *THeaderTransport) Read(p []byte) (read int, err error) { // the last Read finished the frame, do endOfFrame // handling here. err = t.endOfFrame() - } else if err == io.EOF { + } else if errors.Is(err, io.EOF) { err = t.endOfFrame() if err != nil { return @@ -726,6 +768,9 @@ func (t *THeaderTransport) ClearWriteHeaders() { } // AddTransform add a transform for writing. +// +// NOTE: This is provided as a low-level API, but in general you should use +// TConfiguration.THeaderTransforms to set transforms for writing instead. func (t *THeaderTransport) AddTransform(transform THeaderTransformID) error { if !supportedTransformIDs[transform] { return NewTProtocolExceptionWithType( @@ -758,6 +803,17 @@ func (t *THeaderTransport) isFramed() bool { } } +// addWriteTransformsDedupe adds id to writeTransforms only if it's not already +// there. +func (t *THeaderTransport) addWriteTransformsDedupe(id THeaderTransformID) { + for _, existingID := range t.writeTransforms { + if existingID == id { + return + } + } + t.writeTransforms = append(t.writeTransforms, id) +} + // SetTConfiguration implements TConfigurationSetter. func (t *THeaderTransport) SetTConfiguration(cfg *TConfiguration) { PropagateTConfiguration(t.transport, cfg) diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/protocol.go b/vendor/github.com/apache/thrift/lib/go/thrift/protocol.go index 2ee14caaad1..68cfe4aaa25 100644 --- a/vendor/github.com/apache/thrift/lib/go/thrift/protocol.go +++ b/vendor/github.com/apache/thrift/lib/go/thrift/protocol.go @@ -146,7 +146,7 @@ func Skip(ctx context.Context, self TProtocol, fieldType TType, maxDepth int) (e if err != nil { return err } - for i := 0; i < size; i++ { + for range size { err := Skip(ctx, self, keyType, maxDepth-1) if err != nil { return err @@ -163,7 +163,7 @@ func Skip(ctx context.Context, self TProtocol, fieldType TType, maxDepth int) (e if err != nil { return err } - for i := 0; i < size; i++ { + for range size { err := Skip(ctx, self, elemType, maxDepth-1) if err != nil { return err @@ -175,7 +175,7 @@ func Skip(ctx context.Context, self TProtocol, fieldType TType, maxDepth int) (e if err != nil { return err } - for i := 0; i < size; i++ { + for range size { err := Skip(ctx, self, elemType, maxDepth-1) if err != nil { return err diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/simple_json_protocol.go b/vendor/github.com/apache/thrift/lib/go/thrift/simple_json_protocol.go index 8b1284fd1b1..ec12991a1d7 100644 --- a/vendor/github.com/apache/thrift/lib/go/thrift/simple_json_protocol.go +++ b/vendor/github.com/apache/thrift/lib/go/thrift/simple_json_protocol.go @@ -30,6 +30,7 @@ import ( "io" "math" "strconv" + "strings" ) type _ParseContext int @@ -922,15 +923,7 @@ func (p *TSimpleJSONProtocol) ParseStringBody() (string, error) { if err != nil { return "", NewTProtocolException(err) } - l := len(line) - // count number of escapes to see if we need to keep going - i := 1 - for ; i < l; i++ { - if line[l-i-1] != '\\' { - break - } - } - if i&0x01 == 1 { + if endsWithoutEscapedQuote(line) { v, ok := jsonUnquote(string(JSON_QUOTE) + line) if !ok { return "", NewTProtocolException(err) @@ -951,27 +944,29 @@ func (p *TSimpleJSONProtocol) ParseStringBody() (string, error) { } func (p *TSimpleJSONProtocol) ParseQuotedStringBody() (string, error) { - line, err := p.reader.ReadString(JSON_QUOTE) - if err != nil { - return "", NewTProtocolException(err) + var sb strings.Builder + + for { + line, err := p.reader.ReadString(JSON_QUOTE) + if err != nil { + return "", NewTProtocolException(err) + } + sb.WriteString(line) + if endsWithoutEscapedQuote(line) { + return sb.String(), nil + } } - l := len(line) - // count number of escapes to see if we need to keep going +} + +func endsWithoutEscapedQuote(s string) bool { + l := len(s) i := 1 for ; i < l; i++ { - if line[l-i-1] != '\\' { + if s[l-i-1] != '\\' { break } } - if i&0x01 == 1 { - return line, nil - } - s, err := p.ParseQuotedStringBody() - if err != nil { - return "", NewTProtocolException(err) - } - v := line + s - return v, nil + return i&0x01 == 1 } func (p *TSimpleJSONProtocol) ParseBase64EncodedBody() ([]byte, error) { @@ -1200,7 +1195,7 @@ func (p *TSimpleJSONProtocol) readNumeric() (Numeric, error) { for continueFor { c, err := p.reader.ReadByte() if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { break } return NUMERIC_NULL, NewTProtocolException(err) @@ -1311,7 +1306,7 @@ func (p *TSimpleJSONProtocol) readNumeric() (Numeric, error) { // Safely peeks into the buffer, reading only what is necessary func (p *TSimpleJSONProtocol) safePeekContains(b []byte) bool { - for i := 0; i < len(b); i++ { + for i := range b { a, _ := p.reader.Peek(i + 1) if len(a) < (i+1) || a[i] != b[i] { return false diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/ssl_server_socket.go b/vendor/github.com/apache/thrift/lib/go/thrift/ssl_server_socket.go index 907afca326f..3f05ad93db6 100644 --- a/vendor/github.com/apache/thrift/lib/go/thrift/ssl_server_socket.go +++ b/vendor/github.com/apache/thrift/lib/go/thrift/ssl_server_socket.go @@ -93,6 +93,9 @@ func (p *TSSLServerSocket) Open() error { } func (p *TSSLServerSocket) Addr() net.Addr { + if p.listener != nil { + return p.listener.Addr() + } return p.addr } diff --git a/vendor/github.com/aws/aws-msk-iam-sasl-signer-go/LICENSE b/vendor/github.com/aws/aws-msk-iam-sasl-signer-go/LICENSE new file mode 100644 index 00000000000..67db8588217 --- /dev/null +++ b/vendor/github.com/aws/aws-msk-iam-sasl-signer-go/LICENSE @@ -0,0 +1,175 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/aws/aws-msk-iam-sasl-signer-go/NOTICE b/vendor/github.com/aws/aws-msk-iam-sasl-signer-go/NOTICE new file mode 100644 index 00000000000..616fc588945 --- /dev/null +++ b/vendor/github.com/aws/aws-msk-iam-sasl-signer-go/NOTICE @@ -0,0 +1 @@ +Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. diff --git a/vendor/github.com/aws/aws-msk-iam-sasl-signer-go/signer/msk_auth_token_provider.go b/vendor/github.com/aws/aws-msk-iam-sasl-signer-go/signer/msk_auth_token_provider.go new file mode 100644 index 00000000000..1c2670db2fa --- /dev/null +++ b/vendor/github.com/aws/aws-msk-iam-sasl-signer-go/signer/msk_auth_token_provider.go @@ -0,0 +1,305 @@ +package signer + +import ( + "context" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "fmt" + + "log" + "net/http" + "net/url" + "runtime" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/service/sts" +) + +const ( + ActionType = "Action" // ActionType represents the key for the action type in the request. + ActionName = "kafka-cluster:Connect" // ActionName represents the specific action name for connecting to a Kafka cluster. + SigningName = "kafka-cluster" // SigningName represents the signing name for the Kafka cluster. + UserAgentKey = "User-Agent" // UserAgentKey represents the key for the User-Agent parameter in the request. + LibName = "aws-msk-iam-sasl-signer-go" // LibName represents the name of the library. + ExpiresQueryKey = "X-Amz-Expires" // ExpiresQueryKey represents the key for the expiration time in the query parameters. + DefaultSessionName = "MSKSASLDefaultSession" // DefaultSessionName represents the default session name for assuming a role. + DefaultExpirySeconds = 900 // DefaultExpirySeconds represents the default expiration time in seconds. +) + +var ( + endpointURLTemplate = "kafka.%s.amazonaws.com" // endpointURLTemplate represents the template for the Kafka endpoint URL + AwsDebugCreds = false // AwsDebugCreds flag indicates whether credentials should be debugged +) + +// GenerateAuthToken generates base64 encoded signed url as auth token from default credentials. +// Loads the IAM credentials from default credentials provider chain. +func GenerateAuthToken(ctx context.Context, region string) (string, int64, error) { + credentials, err := loadDefaultCredentials(ctx, region) + + if err != nil { + return "", 0, fmt.Errorf("failed to load credentials: %w", err) + } + + return constructAuthToken(ctx, region, credentials) +} + +// GenerateAuthTokenFromProfile generates base64 encoded signed url as auth token by loading IAM credentials from an AWS named profile. +func GenerateAuthTokenFromProfile(ctx context.Context, region string, awsProfile string) (string, int64, error) { + credentials, err := loadCredentialsFromProfile(ctx, region, awsProfile) + + if err != nil { + return "", 0, fmt.Errorf("failed to load credentials: %w", err) + } + + return constructAuthToken(ctx, region, credentials) +} + +// GenerateAuthTokenFromRole generates base64 encoded signed url as auth token by loading IAM credentials from an aws role Arn +func GenerateAuthTokenFromRole( + ctx context.Context, region string, roleArn string, stsSessionName string, +) (string, int64, error) { + if stsSessionName == "" { + stsSessionName = DefaultSessionName + } + credentials, err := loadCredentialsFromRoleArn(ctx, region, roleArn, stsSessionName) + + if err != nil { + return "", 0, fmt.Errorf("failed to load credentials: %w", err) + } + + return constructAuthToken(ctx, region, credentials) +} + +// GenerateAuthTokenFromCredentialsProvider generates base64 encoded signed url as auth token by loading IAM credentials +// from an aws credentials provider +func GenerateAuthTokenFromCredentialsProvider( + ctx context.Context, region string, credentialsProvider aws.CredentialsProvider, +) (string, int64, error) { + credentials, err := loadCredentialsFromCredentialsProvider(ctx, credentialsProvider) + + if err != nil { + return "", 0, fmt.Errorf("failed to load credentials: %w", err) + } + + return constructAuthToken(ctx, region, credentials) +} + +// Loads credentials from the default credential chain. +func loadDefaultCredentials(ctx context.Context, region string) (*aws.Credentials, error) { + cfg, err := config.LoadDefaultConfig(ctx, config.WithRegion(region)) + + if err != nil { + return nil, fmt.Errorf("unable to load SDK config: %w", err) + } + + return loadCredentialsFromCredentialsProvider(ctx, cfg.Credentials) +} + +// Loads credentials from a named aws profile. +func loadCredentialsFromProfile(ctx context.Context, region string, awsProfile string) (*aws.Credentials, error) { + cfg, err := config.LoadDefaultConfig(ctx, + config.WithRegion(region), + config.WithSharedConfigProfile(awsProfile), + ) + + if err != nil { + return nil, fmt.Errorf("unable to load SDK config: %w", err) + } + + return loadCredentialsFromCredentialsProvider(ctx, cfg.Credentials) +} + +// Loads credentials from a named by assuming the passed role. +// This implementation creates a new sts client for every call to get or refresh token. In order to avoid this, please +// use your own credentials provider. +// If you wish to use regional endpoint, please pass your own credentials provider. +func loadCredentialsFromRoleArn( + ctx context.Context, region string, roleArn string, stsSessionName string, +) (*aws.Credentials, error) { + cfg, err := config.LoadDefaultConfig(ctx, config.WithRegion(region)) + + if err != nil { + return nil, fmt.Errorf("unable to load SDK config: %w", err) + } + + stsClient := sts.NewFromConfig(cfg) + + assumeRoleInput := &sts.AssumeRoleInput{ + RoleArn: aws.String(roleArn), + RoleSessionName: aws.String(stsSessionName), + } + assumeRoleOutput, err := stsClient.AssumeRole(ctx, assumeRoleInput) + if err != nil { + return nil, fmt.Errorf("unable to assume role, %s: %w", roleArn, err) + } + + //Create new aws.Credentials instance using the credentials from AssumeRoleOutput.Credentials + creds := aws.Credentials{ + AccessKeyID: *assumeRoleOutput.Credentials.AccessKeyId, + SecretAccessKey: *assumeRoleOutput.Credentials.SecretAccessKey, + SessionToken: *assumeRoleOutput.Credentials.SessionToken, + } + + return &creds, nil +} + +// Loads credentials from the credentials provider +func loadCredentialsFromCredentialsProvider( + ctx context.Context, credentialsProvider aws.CredentialsProvider, +) (*aws.Credentials, error) { + creds, err := credentialsProvider.Retrieve(ctx) + return &creds, err +} + +// Constructs Auth Token. +func constructAuthToken(ctx context.Context, region string, credentials *aws.Credentials) (string, int64, error) { + endpointURL := fmt.Sprintf(endpointURLTemplate, region) + + if credentials == nil || credentials.AccessKeyID == "" || credentials.SecretAccessKey == "" { + return "", 0, fmt.Errorf("aws credentials cannot be empty") + } + + if AwsDebugCreds { + logCallerIdentity(ctx, region, *credentials) + } + + req, err := buildRequest(DefaultExpirySeconds, endpointURL) + if err != nil { + return "", 0, fmt.Errorf("failed to build request for signing: %w", err) + } + + signedURL, err := signRequest(ctx, req, region, credentials) + if err != nil { + return "", 0, fmt.Errorf("failed to sign request with aws sig v4: %w", err) + } + + expirationTimeMs, err := getExpirationTimeMs(signedURL) + if err != nil { + return "", 0, fmt.Errorf("failed to extract expiration from signed url: %w", err) + } + + signedURLWithUserAgent, err := addUserAgent(signedURL) + if err != nil { + return "", 0, fmt.Errorf("failed to add user agent to the signed url: %w", err) + } + + return base64Encode(signedURLWithUserAgent), expirationTimeMs, nil +} + +// Build https request with query parameters in order to sign. +func buildRequest(expirySeconds int, endpointURL string) (*http.Request, error) { + query := url.Values{ + ActionType: {ActionName}, + ExpiresQueryKey: {strconv.FormatInt(int64(expirySeconds), 10)}, + } + + authURL := url.URL{ + Host: endpointURL, + Scheme: "https", + Path: "/", + RawQuery: query.Encode(), + } + + return http.NewRequest(http.MethodGet, authURL.String(), nil) +} + +// Sign request with aws sig v4. +func signRequest(ctx context.Context, req *http.Request, region string, credentials *aws.Credentials) (string, error) { + signer := v4.NewSigner() + signedURL, _, err := signer.PresignHTTP(ctx, *credentials, req, + calculateSHA256Hash(""), + SigningName, + region, + time.Now().UTC(), + ) + + return signedURL, err +} + +// Parses the URL and gets the expiration time in millis associated with the signed url +func getExpirationTimeMs(signedURL string) (int64, error) { + parsedURL, err := url.Parse(signedURL) + + if err != nil { + return 0, fmt.Errorf("failed to parse the signed url: %w", err) + } + + params := parsedURL.Query() + date, err := time.Parse("20060102T150405Z", params.Get("X-Amz-Date")) + + if err != nil { + return 0, fmt.Errorf("failed to parse the 'X-Amz-Date' param from signed url: %w", err) + } + + signingTimeMs := date.UnixNano() / int64(time.Millisecond) + expiryDurationSeconds, err := strconv.ParseInt(params.Get("X-Amz-Expires"), 10, 64) + + if err != nil { + return 0, fmt.Errorf("failed to parse the 'X-Amz-Expires' param from signed url: %w", err) + } + + expiryDurationMs := expiryDurationSeconds * 1000 + expiryMs := signingTimeMs + expiryDurationMs + return expiryMs, nil +} + +// Calculate sha256Hash and hex encode it. +func calculateSHA256Hash(input string) string { + hash := sha256.Sum256([]byte(input)) + return hex.EncodeToString(hash[:]) +} + +// Base64 encode with raw url encoding. +func base64Encode(signedURL string) string { + signedURLBytes := []byte(signedURL) + return base64.RawURLEncoding.EncodeToString(signedURLBytes) +} + +// Add user agent to the signed url +func addUserAgent(signedURL string) (string, error) { + parsedSignedURL, err := url.Parse(signedURL) + + if err != nil { + return "", fmt.Errorf("failed to parse signed url: %w", err) + } + + query := parsedSignedURL.Query() + userAgent := strings.Join([]string{LibName, version, runtime.Version()}, "/") + query.Set(UserAgentKey, userAgent) + parsedSignedURL.RawQuery = query.Encode() + + return parsedSignedURL.String(), nil +} + +// Log caller identity to debug which credentials are being picked up +func logCallerIdentity(ctx context.Context, region string, awsCredentials aws.Credentials) { + cfg, err := config.LoadDefaultConfig(ctx, + config.WithRegion(region), + config.WithCredentialsProvider(credentials.StaticCredentialsProvider{ + Value: awsCredentials, + }), + ) + if err != nil { + log.Printf("failed to load AWS configuration: %v", err) + } + + stsClient := sts.NewFromConfig(cfg) + + callerIdentity, err := stsClient.GetCallerIdentity(ctx, &sts.GetCallerIdentityInput{}) + + if err != nil { + log.Printf("failed to get caller identity: %v", err) + } + + log.Printf("Credentials Identity: {UserId: %s, Account: %s, Arn: %s}\n", + *callerIdentity.UserId, + *callerIdentity.Account, + *callerIdentity.Arn) +} diff --git a/vendor/github.com/aws/aws-msk-iam-sasl-signer-go/signer/version.go b/vendor/github.com/aws/aws-msk-iam-sasl-signer-go/signer/version.go new file mode 100644 index 00000000000..d723e9085de --- /dev/null +++ b/vendor/github.com/aws/aws-msk-iam-sasl-signer-go/signer/version.go @@ -0,0 +1,3 @@ +package signer + +const version = "1.0.0" diff --git a/vendor/go.opentelemetry.io/collector/config/internal/LICENSE b/vendor/github.com/aws/aws-sdk-go-v2/LICENSE.txt similarity index 100% rename from vendor/go.opentelemetry.io/collector/config/internal/LICENSE rename to vendor/github.com/aws/aws-sdk-go-v2/LICENSE.txt diff --git a/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt new file mode 100644 index 00000000000..899129ecc46 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt @@ -0,0 +1,3 @@ +AWS SDK for Go +Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Copyright 2014-2015 Stripe, Inc. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go new file mode 100644 index 00000000000..b361c13867c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go @@ -0,0 +1,197 @@ +package aws + +import ( + "net/http" + + smithybearer "github.com/aws/smithy-go/auth/bearer" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" +) + +// HTTPClient provides the interface to provide custom HTTPClients. Generally +// *http.Client is sufficient for most use cases. The HTTPClient should not +// follow 301 or 302 redirects. +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// A Config provides service configuration for service clients. +type Config struct { + // The region to send requests to. This parameter is required and must + // be configured globally or on a per-client basis unless otherwise + // noted. A full list of regions is found in the "Regions and Endpoints" + // document. + // + // See http://docs.aws.amazon.com/general/latest/gr/rande.html for + // information on AWS regions. + Region string + + // The credentials object to use when signing requests. + // Use the LoadDefaultConfig to load configuration from all the SDK's supported + // sources, and resolve credentials using the SDK's default credential chain. + Credentials CredentialsProvider + + // The Bearer Authentication token provider to use for authenticating API + // operation calls with a Bearer Authentication token. The API clients and + // operation must support Bearer Authentication scheme in order for the + // token provider to be used. API clients created with NewFromConfig will + // automatically be configured with this option, if the API client support + // Bearer Authentication. + // + // The SDK's config.LoadDefaultConfig can automatically populate this + // option for external configuration options such as SSO session. + // https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html + BearerAuthTokenProvider smithybearer.TokenProvider + + // The HTTP Client the SDK's API clients will use to invoke HTTP requests. + // The SDK defaults to a BuildableClient allowing API clients to create + // copies of the HTTP Client for service specific customizations. + // + // Use a (*http.Client) for custom behavior. Using a custom http.Client + // will prevent the SDK from modifying the HTTP client. + HTTPClient HTTPClient + + // An endpoint resolver that can be used to provide or override an endpoint + // for the given service and region. + // + // See the `aws.EndpointResolver` documentation for additional usage + // information. + // + // Deprecated: See Config.EndpointResolverWithOptions + EndpointResolver EndpointResolver + + // An endpoint resolver that can be used to provide or override an endpoint + // for the given service and region. + // + // When EndpointResolverWithOptions is specified, it will be used by a + // service client rather than using EndpointResolver if also specified. + // + // See the `aws.EndpointResolverWithOptions` documentation for additional + // usage information. + // + // Deprecated: with the release of endpoint resolution v2 in API clients, + // EndpointResolver and EndpointResolverWithOptions are deprecated. + // Providing a value for this field will likely prevent you from using + // newer endpoint-related service features. See API client options + // EndpointResolverV2 and BaseEndpoint. + EndpointResolverWithOptions EndpointResolverWithOptions + + // RetryMaxAttempts specifies the maximum number attempts an API client + // will call an operation that fails with a retryable error. + // + // API Clients will only use this value to construct a retryer if the + // Config.Retryer member is not nil. This value will be ignored if + // Retryer is not nil. + RetryMaxAttempts int + + // RetryMode specifies the retry model the API client will be created with. + // + // API Clients will only use this value to construct a retryer if the + // Config.Retryer member is not nil. This value will be ignored if + // Retryer is not nil. + RetryMode RetryMode + + // Retryer is a function that provides a Retryer implementation. A Retryer + // guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. + // + // In general, the provider function should return a new instance of a + // Retryer if you are attempting to provide a consistent Retryer + // configuration across all clients. This will ensure that each client will + // be provided a new instance of the Retryer implementation, and will avoid + // issues such as sharing the same retry token bucket across services. + // + // If not nil, RetryMaxAttempts, and RetryMode will be ignored by API + // clients. + Retryer func() Retryer + + // ConfigSources are the sources that were used to construct the Config. + // Allows for additional configuration to be loaded by clients. + ConfigSources []interface{} + + // APIOptions provides the set of middleware mutations modify how the API + // client requests will be handled. This is useful for adding additional + // tracing data to a request, or changing behavior of the SDK's client. + APIOptions []func(*middleware.Stack) error + + // The logger writer interface to write logging messages to. Defaults to + // standard error. + Logger logging.Logger + + // Configures the events that will be sent to the configured logger. This + // can be used to configure the logging of signing, retries, request, and + // responses of the SDK clients. + // + // See the ClientLogMode type documentation for the complete set of logging + // modes and available configuration. + ClientLogMode ClientLogMode + + // The configured DefaultsMode. If not specified, service clients will + // default to legacy. + // + // Supported modes are: auto, cross-region, in-region, legacy, mobile, + // standard + DefaultsMode DefaultsMode + + // The RuntimeEnvironment configuration, only populated if the DefaultsMode + // is set to DefaultsModeAuto and is initialized by + // `config.LoadDefaultConfig`. You should not populate this structure + // programmatically, or rely on the values here within your applications. + RuntimeEnvironment RuntimeEnvironment + + // AppId is an optional application specific identifier that can be set. + // When set it will be appended to the User-Agent header of every request + // in the form of App/{AppId}. This variable is sourced from environment + // variable AWS_SDK_UA_APP_ID or the shared config profile attribute sdk_ua_app_id. + // See https://docs.aws.amazon.com/sdkref/latest/guide/settings-reference.html for + // more information on environment variables and shared config settings. + AppID string + + // BaseEndpoint is an intermediary transfer location to a service specific + // BaseEndpoint on a service's Options. + BaseEndpoint *string +} + +// NewConfig returns a new Config pointer that can be chained with builder +// methods to set multiple configuration values inline without using pointers. +func NewConfig() *Config { + return &Config{} +} + +// Copy will return a shallow copy of the Config object. If any additional +// configurations are provided they will be merged into the new config returned. +func (c Config) Copy() Config { + cp := c + return cp +} + +// EndpointDiscoveryEnableState indicates if endpoint discovery is +// enabled, disabled, auto or unset state. +// +// Default behavior (Auto or Unset) indicates operations that require endpoint +// discovery will use Endpoint Discovery by default. Operations that +// optionally use Endpoint Discovery will not use Endpoint Discovery +// unless EndpointDiscovery is explicitly enabled. +type EndpointDiscoveryEnableState uint + +// Enumeration values for EndpointDiscoveryEnableState +const ( + // EndpointDiscoveryUnset represents EndpointDiscoveryEnableState is unset. + // Users do not need to use this value explicitly. The behavior for unset + // is the same as for EndpointDiscoveryAuto. + EndpointDiscoveryUnset EndpointDiscoveryEnableState = iota + + // EndpointDiscoveryAuto represents an AUTO state that allows endpoint + // discovery only when required by the api. This is the default + // configuration resolved by the client if endpoint discovery is neither + // enabled or disabled. + EndpointDiscoveryAuto // default state + + // EndpointDiscoveryDisabled indicates client MUST not perform endpoint + // discovery even when required. + EndpointDiscoveryDisabled + + // EndpointDiscoveryEnabled indicates client MUST always perform endpoint + // discovery if supported for the operation. + EndpointDiscoveryEnabled +) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/context.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/context.go new file mode 100644 index 00000000000..4d8e26ef321 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/context.go @@ -0,0 +1,22 @@ +package aws + +import ( + "context" + "time" +) + +type suppressedContext struct { + context.Context +} + +func (s *suppressedContext) Deadline() (deadline time.Time, ok bool) { + return time.Time{}, false +} + +func (s *suppressedContext) Done() <-chan struct{} { + return nil +} + +func (s *suppressedContext) Err() error { + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go new file mode 100644 index 00000000000..781ac0ae2c0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go @@ -0,0 +1,224 @@ +package aws + +import ( + "context" + "fmt" + "sync/atomic" + "time" + + sdkrand "github.com/aws/aws-sdk-go-v2/internal/rand" + "github.com/aws/aws-sdk-go-v2/internal/sync/singleflight" +) + +// CredentialsCacheOptions are the options +type CredentialsCacheOptions struct { + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // An ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. This can cause an + // increased number of requests to refresh the credentials to occur. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration + + // ExpiryWindowJitterFrac provides a mechanism for randomizing the + // expiration of credentials within the configured ExpiryWindow by a random + // percentage. Valid values are between 0.0 and 1.0. + // + // As an example if ExpiryWindow is 60 seconds and ExpiryWindowJitterFrac + // is 0.5 then credentials will be set to expire between 30 to 60 seconds + // prior to their actual expiration time. + // + // If ExpiryWindow is 0 or less then ExpiryWindowJitterFrac is ignored. + // If ExpiryWindowJitterFrac is 0 then no randomization will be applied to the window. + // If ExpiryWindowJitterFrac < 0 the value will be treated as 0. + // If ExpiryWindowJitterFrac > 1 the value will be treated as 1. + ExpiryWindowJitterFrac float64 +} + +// CredentialsCache provides caching and concurrency safe credentials retrieval +// via the provider's retrieve method. +// +// CredentialsCache will look for optional interfaces on the Provider to adjust +// how the credential cache handles credentials caching. +// +// - HandleFailRefreshCredentialsCacheStrategy - Allows provider to handle +// credential refresh failures. This could return an updated Credentials +// value, or attempt another means of retrieving credentials. +// +// - AdjustExpiresByCredentialsCacheStrategy - Allows provider to adjust how +// credentials Expires is modified. This could modify how the Credentials +// Expires is adjusted based on the CredentialsCache ExpiryWindow option. +// Such as providing a floor not to reduce the Expires below. +type CredentialsCache struct { + provider CredentialsProvider + + options CredentialsCacheOptions + creds atomic.Value + sf singleflight.Group +} + +// NewCredentialsCache returns a CredentialsCache that wraps provider. Provider +// is expected to not be nil. A variadic list of one or more functions can be +// provided to modify the CredentialsCache configuration. This allows for +// configuration of credential expiry window and jitter. +func NewCredentialsCache(provider CredentialsProvider, optFns ...func(options *CredentialsCacheOptions)) *CredentialsCache { + options := CredentialsCacheOptions{} + + for _, fn := range optFns { + fn(&options) + } + + if options.ExpiryWindow < 0 { + options.ExpiryWindow = 0 + } + + if options.ExpiryWindowJitterFrac < 0 { + options.ExpiryWindowJitterFrac = 0 + } else if options.ExpiryWindowJitterFrac > 1 { + options.ExpiryWindowJitterFrac = 1 + } + + return &CredentialsCache{ + provider: provider, + options: options, + } +} + +// Retrieve returns the credentials. If the credentials have already been +// retrieved, and not expired the cached credentials will be returned. If the +// credentials have not been retrieved yet, or expired the provider's Retrieve +// method will be called. +// +// Returns and error if the provider's retrieve method returns an error. +func (p *CredentialsCache) Retrieve(ctx context.Context) (Credentials, error) { + if creds, ok := p.getCreds(); ok && !creds.Expired() { + return creds, nil + } + + resCh := p.sf.DoChan("", func() (interface{}, error) { + return p.singleRetrieve(&suppressedContext{ctx}) + }) + select { + case res := <-resCh: + return res.Val.(Credentials), res.Err + case <-ctx.Done(): + return Credentials{}, &RequestCanceledError{Err: ctx.Err()} + } +} + +func (p *CredentialsCache) singleRetrieve(ctx context.Context) (interface{}, error) { + currCreds, ok := p.getCreds() + if ok && !currCreds.Expired() { + return currCreds, nil + } + + newCreds, err := p.provider.Retrieve(ctx) + if err != nil { + handleFailToRefresh := defaultHandleFailToRefresh + if cs, ok := p.provider.(HandleFailRefreshCredentialsCacheStrategy); ok { + handleFailToRefresh = cs.HandleFailToRefresh + } + newCreds, err = handleFailToRefresh(ctx, currCreds, err) + if err != nil { + return Credentials{}, fmt.Errorf("failed to refresh cached credentials, %w", err) + } + } + + if newCreds.CanExpire && p.options.ExpiryWindow > 0 { + adjustExpiresBy := defaultAdjustExpiresBy + if cs, ok := p.provider.(AdjustExpiresByCredentialsCacheStrategy); ok { + adjustExpiresBy = cs.AdjustExpiresBy + } + + randFloat64, err := sdkrand.CryptoRandFloat64() + if err != nil { + return Credentials{}, fmt.Errorf("failed to get random provider, %w", err) + } + + var jitter time.Duration + if p.options.ExpiryWindowJitterFrac > 0 { + jitter = time.Duration(randFloat64 * + p.options.ExpiryWindowJitterFrac * float64(p.options.ExpiryWindow)) + } + + newCreds, err = adjustExpiresBy(newCreds, -(p.options.ExpiryWindow - jitter)) + if err != nil { + return Credentials{}, fmt.Errorf("failed to adjust credentials expires, %w", err) + } + } + + p.creds.Store(&newCreds) + return newCreds, nil +} + +// getCreds returns the currently stored credentials and true. Returning false +// if no credentials were stored. +func (p *CredentialsCache) getCreds() (Credentials, bool) { + v := p.creds.Load() + if v == nil { + return Credentials{}, false + } + + c := v.(*Credentials) + if c == nil || !c.HasKeys() { + return Credentials{}, false + } + + return *c, true +} + +// Invalidate will invalidate the cached credentials. The next call to Retrieve +// will cause the provider's Retrieve method to be called. +func (p *CredentialsCache) Invalidate() { + p.creds.Store((*Credentials)(nil)) +} + +// IsCredentialsProvider returns whether credential provider wrapped by CredentialsCache +// matches the target provider type. +func (p *CredentialsCache) IsCredentialsProvider(target CredentialsProvider) bool { + return IsCredentialsProvider(p.provider, target) +} + +// HandleFailRefreshCredentialsCacheStrategy is an interface for +// CredentialsCache to allow CredentialsProvider how failed to refresh +// credentials is handled. +type HandleFailRefreshCredentialsCacheStrategy interface { + // Given the previously cached Credentials, if any, and refresh error, may + // returns new or modified set of Credentials, or error. + // + // Credential caches may use default implementation if nil. + HandleFailToRefresh(context.Context, Credentials, error) (Credentials, error) +} + +// defaultHandleFailToRefresh returns the passed in error. +func defaultHandleFailToRefresh(ctx context.Context, _ Credentials, err error) (Credentials, error) { + return Credentials{}, err +} + +// AdjustExpiresByCredentialsCacheStrategy is an interface for CredentialCache +// to allow CredentialsProvider to intercept adjustments to Credentials expiry +// based on expectations and use cases of CredentialsProvider. +// +// Credential caches may use default implementation if nil. +type AdjustExpiresByCredentialsCacheStrategy interface { + // Given a Credentials as input, applying any mutations and + // returning the potentially updated Credentials, or error. + AdjustExpiresBy(Credentials, time.Duration) (Credentials, error) +} + +// defaultAdjustExpiresBy adds the duration to the passed in credentials Expires, +// and returns the updated credentials value. If Credentials value's CanExpire +// is false, the passed in credentials are returned unchanged. +func defaultAdjustExpiresBy(creds Credentials, dur time.Duration) (Credentials, error) { + if !creds.CanExpire { + return creds, nil + } + + creds.Expires = creds.Expires.Add(dur) + return creds, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go new file mode 100644 index 00000000000..714d4ad85cb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go @@ -0,0 +1,170 @@ +package aws + +import ( + "context" + "fmt" + "reflect" + "time" + + "github.com/aws/aws-sdk-go-v2/internal/sdk" +) + +// AnonymousCredentials provides a sentinel CredentialsProvider that should be +// used to instruct the SDK's signing middleware to not sign the request. +// +// Using `nil` credentials when configuring an API client will achieve the same +// result. The AnonymousCredentials type allows you to configure the SDK's +// external config loading to not attempt to source credentials from the shared +// config or environment. +// +// For example you can use this CredentialsProvider with an API client's +// Options to instruct the client not to sign a request for accessing public +// S3 bucket objects. +// +// The following example demonstrates using the AnonymousCredentials to prevent +// SDK's external config loading attempt to resolve credentials. +// +// cfg, err := config.LoadDefaultConfig(context.TODO(), +// config.WithCredentialsProvider(aws.AnonymousCredentials{}), +// ) +// if err != nil { +// log.Fatalf("failed to load config, %v", err) +// } +// +// client := s3.NewFromConfig(cfg) +// +// Alternatively you can leave the API client Option's `Credential` member to +// nil. If using the `NewFromConfig` constructor you'll need to explicitly set +// the `Credentials` member to nil, if the external config resolved a +// credential provider. +// +// client := s3.New(s3.Options{ +// // Credentials defaults to a nil value. +// }) +// +// This can also be configured for specific operations calls too. +// +// cfg, err := config.LoadDefaultConfig(context.TODO()) +// if err != nil { +// log.Fatalf("failed to load config, %v", err) +// } +// +// client := s3.NewFromConfig(config) +// +// result, err := client.GetObject(context.TODO(), s3.GetObject{ +// Bucket: aws.String("example-bucket"), +// Key: aws.String("example-key"), +// }, func(o *s3.Options) { +// o.Credentials = nil +// // Or +// o.Credentials = aws.AnonymousCredentials{} +// }) +type AnonymousCredentials struct{} + +// Retrieve implements the CredentialsProvider interface, but will always +// return error, and cannot be used to sign a request. The AnonymousCredentials +// type is used as a sentinel type instructing the AWS request signing +// middleware to not sign a request. +func (AnonymousCredentials) Retrieve(context.Context) (Credentials, error) { + return Credentials{Source: "AnonymousCredentials"}, + fmt.Errorf("the AnonymousCredentials is not a valid credential provider, and cannot be used to sign AWS requests with") +} + +// A Credentials is the AWS credentials value for individual credential fields. +type Credentials struct { + // AWS Access key ID + AccessKeyID string + + // AWS Secret Access Key + SecretAccessKey string + + // AWS Session Token + SessionToken string + + // Source of the credentials + Source string + + // States if the credentials can expire or not. + CanExpire bool + + // The time the credentials will expire at. Should be ignored if CanExpire + // is false. + Expires time.Time +} + +// Expired returns if the credentials have expired. +func (v Credentials) Expired() bool { + if v.CanExpire { + // Calling Round(0) on the current time will truncate the monotonic + // reading only. Ensures credential expiry time is always based on + // reported wall-clock time. + return !v.Expires.After(sdk.NowTime().Round(0)) + } + + return false +} + +// HasKeys returns if the credentials keys are set. +func (v Credentials) HasKeys() bool { + return len(v.AccessKeyID) > 0 && len(v.SecretAccessKey) > 0 +} + +// A CredentialsProvider is the interface for any component which will provide +// credentials Credentials. A CredentialsProvider is required to manage its own +// Expired state, and what to be expired means. +// +// A credentials provider implementation can be wrapped with a CredentialCache +// to cache the credential value retrieved. Without the cache the SDK will +// attempt to retrieve the credentials for every request. +type CredentialsProvider interface { + // Retrieve returns nil if it successfully retrieved the value. + // Error is returned if the value were not obtainable, or empty. + Retrieve(ctx context.Context) (Credentials, error) +} + +// CredentialsProviderFunc provides a helper wrapping a function value to +// satisfy the CredentialsProvider interface. +type CredentialsProviderFunc func(context.Context) (Credentials, error) + +// Retrieve delegates to the function value the CredentialsProviderFunc wraps. +func (fn CredentialsProviderFunc) Retrieve(ctx context.Context) (Credentials, error) { + return fn(ctx) +} + +type isCredentialsProvider interface { + IsCredentialsProvider(CredentialsProvider) bool +} + +// IsCredentialsProvider returns whether the target CredentialProvider is the same type as provider when comparing the +// implementation type. +// +// If provider has a method IsCredentialsProvider(CredentialsProvider) bool it will be responsible for validating +// whether target matches the credential provider type. +// +// When comparing the CredentialProvider implementations provider and target for equality, the following rules are used: +// +// If provider is of type T and target is of type V, true if type *T is the same as type *V, otherwise false +// If provider is of type *T and target is of type V, true if type *T is the same as type *V, otherwise false +// If provider is of type T and target is of type *V, true if type *T is the same as type *V, otherwise false +// If provider is of type *T and target is of type *V,true if type *T is the same as type *V, otherwise false +func IsCredentialsProvider(provider, target CredentialsProvider) bool { + if target == nil || provider == nil { + return provider == target + } + + if x, ok := provider.(isCredentialsProvider); ok { + return x.IsCredentialsProvider(target) + } + + targetType := reflect.TypeOf(target) + if targetType.Kind() != reflect.Ptr { + targetType = reflect.PtrTo(targetType) + } + + providerType := reflect.TypeOf(provider) + if providerType.Kind() != reflect.Ptr { + providerType = reflect.PtrTo(providerType) + } + + return targetType.AssignableTo(providerType) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/auto.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/auto.go new file mode 100644 index 00000000000..fd408e51860 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/auto.go @@ -0,0 +1,38 @@ +package defaults + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + "runtime" + "strings" +) + +var getGOOS = func() string { + return runtime.GOOS +} + +// ResolveDefaultsModeAuto is used to determine the effective aws.DefaultsMode when the mode +// is set to aws.DefaultsModeAuto. +func ResolveDefaultsModeAuto(region string, environment aws.RuntimeEnvironment) aws.DefaultsMode { + goos := getGOOS() + if goos == "android" || goos == "ios" { + return aws.DefaultsModeMobile + } + + var currentRegion string + if len(environment.EnvironmentIdentifier) > 0 { + currentRegion = environment.Region + } + + if len(currentRegion) == 0 && len(environment.EC2InstanceMetadataRegion) > 0 { + currentRegion = environment.EC2InstanceMetadataRegion + } + + if len(region) > 0 && len(currentRegion) > 0 { + if strings.EqualFold(region, currentRegion) { + return aws.DefaultsModeInRegion + } + return aws.DefaultsModeCrossRegion + } + + return aws.DefaultsModeStandard +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/configuration.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/configuration.go new file mode 100644 index 00000000000..8b7e01fa29a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/configuration.go @@ -0,0 +1,43 @@ +package defaults + +import ( + "time" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +// Configuration is the set of SDK configuration options that are determined based +// on the configured DefaultsMode. +type Configuration struct { + // RetryMode is the configuration's default retry mode API clients should + // use for constructing a Retryer. + RetryMode aws.RetryMode + + // ConnectTimeout is the maximum amount of time a dial will wait for + // a connect to complete. + // + // See https://pkg.go.dev/net#Dialer.Timeout + ConnectTimeout *time.Duration + + // TLSNegotiationTimeout specifies the maximum amount of time waiting to + // wait for a TLS handshake. + // + // See https://pkg.go.dev/net/http#Transport.TLSHandshakeTimeout + TLSNegotiationTimeout *time.Duration +} + +// GetConnectTimeout returns the ConnectTimeout value, returns false if the value is not set. +func (c *Configuration) GetConnectTimeout() (time.Duration, bool) { + if c.ConnectTimeout == nil { + return 0, false + } + return *c.ConnectTimeout, true +} + +// GetTLSNegotiationTimeout returns the TLSNegotiationTimeout value, returns false if the value is not set. +func (c *Configuration) GetTLSNegotiationTimeout() (time.Duration, bool) { + if c.TLSNegotiationTimeout == nil { + return 0, false + } + return *c.TLSNegotiationTimeout, true +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/defaults.go new file mode 100644 index 00000000000..dbaa873dc89 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/defaults.go @@ -0,0 +1,50 @@ +// Code generated by github.com/aws/aws-sdk-go-v2/internal/codegen/cmd/defaultsconfig. DO NOT EDIT. + +package defaults + +import ( + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + "time" +) + +// GetModeConfiguration returns the default Configuration descriptor for the given mode. +// +// Supports the following modes: cross-region, in-region, mobile, standard +func GetModeConfiguration(mode aws.DefaultsMode) (Configuration, error) { + var mv aws.DefaultsMode + mv.SetFromString(string(mode)) + + switch mv { + case aws.DefaultsModeCrossRegion: + settings := Configuration{ + ConnectTimeout: aws.Duration(3100 * time.Millisecond), + RetryMode: aws.RetryMode("standard"), + TLSNegotiationTimeout: aws.Duration(3100 * time.Millisecond), + } + return settings, nil + case aws.DefaultsModeInRegion: + settings := Configuration{ + ConnectTimeout: aws.Duration(1100 * time.Millisecond), + RetryMode: aws.RetryMode("standard"), + TLSNegotiationTimeout: aws.Duration(1100 * time.Millisecond), + } + return settings, nil + case aws.DefaultsModeMobile: + settings := Configuration{ + ConnectTimeout: aws.Duration(30000 * time.Millisecond), + RetryMode: aws.RetryMode("standard"), + TLSNegotiationTimeout: aws.Duration(30000 * time.Millisecond), + } + return settings, nil + case aws.DefaultsModeStandard: + settings := Configuration{ + ConnectTimeout: aws.Duration(3100 * time.Millisecond), + RetryMode: aws.RetryMode("standard"), + TLSNegotiationTimeout: aws.Duration(3100 * time.Millisecond), + } + return settings, nil + default: + return Configuration{}, fmt.Errorf("unsupported defaults mode: %v", mode) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/doc.go new file mode 100644 index 00000000000..2d90011b426 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/doc.go @@ -0,0 +1,2 @@ +// Package defaults provides recommended configuration values for AWS SDKs and CLIs. +package defaults diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaultsmode.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaultsmode.go new file mode 100644 index 00000000000..fcf9387c281 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaultsmode.go @@ -0,0 +1,95 @@ +// Code generated by github.com/aws/aws-sdk-go-v2/internal/codegen/cmd/defaultsmode. DO NOT EDIT. + +package aws + +import ( + "strings" +) + +// DefaultsMode is the SDK defaults mode setting. +type DefaultsMode string + +// The DefaultsMode constants. +const ( + // DefaultsModeAuto is an experimental mode that builds on the standard mode. + // The SDK will attempt to discover the execution environment to determine the + // appropriate settings automatically. + // + // Note that the auto detection is heuristics-based and does not guarantee 100% + // accuracy. STANDARD mode will be used if the execution environment cannot + // be determined. The auto detection might query EC2 Instance Metadata service + // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html), + // which might introduce latency. Therefore we recommend choosing an explicit + // defaults_mode instead if startup latency is critical to your application + DefaultsModeAuto DefaultsMode = "auto" + + // DefaultsModeCrossRegion builds on the standard mode and includes optimization + // tailored for applications which call AWS services in a different region + // + // Note that the default values vended from this mode might change as best practices + // may evolve. As a result, it is encouraged to perform tests when upgrading + // the SDK + DefaultsModeCrossRegion DefaultsMode = "cross-region" + + // DefaultsModeInRegion builds on the standard mode and includes optimization + // tailored for applications which call AWS services from within the same AWS + // region + // + // Note that the default values vended from this mode might change as best practices + // may evolve. As a result, it is encouraged to perform tests when upgrading + // the SDK + DefaultsModeInRegion DefaultsMode = "in-region" + + // DefaultsModeLegacy provides default settings that vary per SDK and were used + // prior to establishment of defaults_mode + DefaultsModeLegacy DefaultsMode = "legacy" + + // DefaultsModeMobile builds on the standard mode and includes optimization + // tailored for mobile applications + // + // Note that the default values vended from this mode might change as best practices + // may evolve. As a result, it is encouraged to perform tests when upgrading + // the SDK + DefaultsModeMobile DefaultsMode = "mobile" + + // DefaultsModeStandard provides the latest recommended default values that + // should be safe to run in most scenarios + // + // Note that the default values vended from this mode might change as best practices + // may evolve. As a result, it is encouraged to perform tests when upgrading + // the SDK + DefaultsModeStandard DefaultsMode = "standard" +) + +// SetFromString sets the DefaultsMode value to one of the pre-defined constants that matches +// the provided string when compared using EqualFold. If the value does not match a known +// constant it will be set to as-is and the function will return false. As a special case, if the +// provided value is a zero-length string, the mode will be set to LegacyDefaultsMode. +func (d *DefaultsMode) SetFromString(v string) (ok bool) { + switch { + case strings.EqualFold(v, string(DefaultsModeAuto)): + *d = DefaultsModeAuto + ok = true + case strings.EqualFold(v, string(DefaultsModeCrossRegion)): + *d = DefaultsModeCrossRegion + ok = true + case strings.EqualFold(v, string(DefaultsModeInRegion)): + *d = DefaultsModeInRegion + ok = true + case strings.EqualFold(v, string(DefaultsModeLegacy)): + *d = DefaultsModeLegacy + ok = true + case strings.EqualFold(v, string(DefaultsModeMobile)): + *d = DefaultsModeMobile + ok = true + case strings.EqualFold(v, string(DefaultsModeStandard)): + *d = DefaultsModeStandard + ok = true + case len(v) == 0: + *d = DefaultsModeLegacy + ok = true + default: + *d = DefaultsMode(v) + } + return ok +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/doc.go new file mode 100644 index 00000000000..d8b6e09e593 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/doc.go @@ -0,0 +1,62 @@ +// Package aws provides the core SDK's utilities and shared types. Use this package's +// utilities to simplify setting and reading API operations parameters. +// +// # Value and Pointer Conversion Utilities +// +// This package includes a helper conversion utility for each scalar type the SDK's +// API use. These utilities make getting a pointer of the scalar, and dereferencing +// a pointer easier. +// +// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value. +// The Pointer to value will safely dereference the pointer and return its value. +// If the pointer was nil, the scalar's zero value will be returned. +// +// The value to pointer functions will be named after the scalar type. So get a +// *string from a string value use the "String" function. This makes it easy to +// to get pointer of a literal string value, because getting the address of a +// literal requires assigning the value to a variable first. +// +// var strPtr *string +// +// // Without the SDK's conversion functions +// str := "my string" +// strPtr = &str +// +// // With the SDK's conversion functions +// strPtr = aws.String("my string") +// +// // Convert *string to string value +// str = aws.ToString(strPtr) +// +// In addition to scalars the aws package also includes conversion utilities for +// map and slice for commonly types used in API parameters. The map and slice +// conversion functions use similar naming pattern as the scalar conversion +// functions. +// +// var strPtrs []*string +// var strs []string = []string{"Go", "Gophers", "Go"} +// +// // Convert []string to []*string +// strPtrs = aws.StringSlice(strs) +// +// // Convert []*string to []string +// strs = aws.ToStringSlice(strPtrs) +// +// # SDK Default HTTP Client +// +// The SDK will use the http.DefaultClient if a HTTP client is not provided to +// the SDK's Session, or service client constructor. This means that if the +// http.DefaultClient is modified by other components of your application the +// modifications will be picked up by the SDK as well. +// +// In some cases this might be intended, but it is a better practice to create +// a custom HTTP Client to share explicitly through your application. You can +// configure the SDK to use the custom HTTP Client by setting the HTTPClient +// value of the SDK's Config type when creating a Session or service client. +package aws + +// generate.go uses a build tag of "ignore", go run doesn't need to specify +// this because go run ignores all build flags when running a go file directly. +//go:generate go run -tags codegen generate.go +//go:generate go run -tags codegen logging_generate.go +//go:generate gofmt -w -s . diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go new file mode 100644 index 00000000000..aa10a9b40f0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go @@ -0,0 +1,229 @@ +package aws + +import ( + "fmt" +) + +// DualStackEndpointState is a constant to describe the dual-stack endpoint resolution behavior. +type DualStackEndpointState uint + +const ( + // DualStackEndpointStateUnset is the default value behavior for dual-stack endpoint resolution. + DualStackEndpointStateUnset DualStackEndpointState = iota + + // DualStackEndpointStateEnabled enables dual-stack endpoint resolution for service endpoints. + DualStackEndpointStateEnabled + + // DualStackEndpointStateDisabled disables dual-stack endpoint resolution for endpoints. + DualStackEndpointStateDisabled +) + +// GetUseDualStackEndpoint takes a service's EndpointResolverOptions and returns the UseDualStackEndpoint value. +// Returns boolean false if the provided options does not have a method to retrieve the DualStackEndpointState. +func GetUseDualStackEndpoint(options ...interface{}) (value DualStackEndpointState, found bool) { + type iface interface { + GetUseDualStackEndpoint() DualStackEndpointState + } + for _, option := range options { + if i, ok := option.(iface); ok { + value = i.GetUseDualStackEndpoint() + found = true + break + } + } + return value, found +} + +// FIPSEndpointState is a constant to describe the FIPS endpoint resolution behavior. +type FIPSEndpointState uint + +const ( + // FIPSEndpointStateUnset is the default value behavior for FIPS endpoint resolution. + FIPSEndpointStateUnset FIPSEndpointState = iota + + // FIPSEndpointStateEnabled enables FIPS endpoint resolution for service endpoints. + FIPSEndpointStateEnabled + + // FIPSEndpointStateDisabled disables FIPS endpoint resolution for endpoints. + FIPSEndpointStateDisabled +) + +// GetUseFIPSEndpoint takes a service's EndpointResolverOptions and returns the UseDualStackEndpoint value. +// Returns boolean false if the provided options does not have a method to retrieve the DualStackEndpointState. +func GetUseFIPSEndpoint(options ...interface{}) (value FIPSEndpointState, found bool) { + type iface interface { + GetUseFIPSEndpoint() FIPSEndpointState + } + for _, option := range options { + if i, ok := option.(iface); ok { + value = i.GetUseFIPSEndpoint() + found = true + break + } + } + return value, found +} + +// Endpoint represents the endpoint a service client should make API operation +// calls to. +// +// The SDK will automatically resolve these endpoints per API client using an +// internal endpoint resolvers. If you'd like to provide custom endpoint +// resolving behavior you can implement the EndpointResolver interface. +type Endpoint struct { + // The base URL endpoint the SDK API clients will use to make API calls to. + // The SDK will suffix URI path and query elements to this endpoint. + URL string + + // Specifies if the endpoint's hostname can be modified by the SDK's API + // client. + // + // If the hostname is mutable the SDK API clients may modify any part of + // the hostname based on the requirements of the API, (e.g. adding, or + // removing content in the hostname). Such as, Amazon S3 API client + // prefixing "bucketname" to the hostname, or changing the + // hostname service name component from "s3." to "s3-accesspoint.dualstack." + // for the dualstack endpoint of an S3 Accesspoint resource. + // + // Care should be taken when providing a custom endpoint for an API. If the + // endpoint hostname is mutable, and the client cannot modify the endpoint + // correctly, the operation call will most likely fail, or have undefined + // behavior. + // + // If hostname is immutable, the SDK API clients will not modify the + // hostname of the URL. This may cause the API client not to function + // correctly if the API requires the operation specific hostname values + // to be used by the client. + // + // This flag does not modify the API client's behavior if this endpoint + // will be used instead of Endpoint Discovery, or if the endpoint will be + // used to perform Endpoint Discovery. That behavior is configured via the + // API Client's Options. + HostnameImmutable bool + + // The AWS partition the endpoint belongs to. + PartitionID string + + // The service name that should be used for signing the requests to the + // endpoint. + SigningName string + + // The region that should be used for signing the request to the endpoint. + SigningRegion string + + // The signing method that should be used for signing the requests to the + // endpoint. + SigningMethod string + + // The source of the Endpoint. By default, this will be EndpointSourceServiceMetadata. + // When providing a custom endpoint, you should set the source as EndpointSourceCustom. + // If source is not provided when providing a custom endpoint, the SDK may not + // perform required host mutations correctly. Source should be used along with + // HostnameImmutable property as per the usage requirement. + Source EndpointSource +} + +// EndpointSource is the endpoint source type. +type EndpointSource int + +const ( + // EndpointSourceServiceMetadata denotes service modeled endpoint metadata is used as Endpoint Source. + EndpointSourceServiceMetadata EndpointSource = iota + + // EndpointSourceCustom denotes endpoint is a custom endpoint. This source should be used when + // user provides a custom endpoint to be used by the SDK. + EndpointSourceCustom +) + +// EndpointNotFoundError is a sentinel error to indicate that the +// EndpointResolver implementation was unable to resolve an endpoint for the +// given service and region. Resolvers should use this to indicate that an API +// client should fallback and attempt to use it's internal default resolver to +// resolve the endpoint. +type EndpointNotFoundError struct { + Err error +} + +// Error is the error message. +func (e *EndpointNotFoundError) Error() string { + return fmt.Sprintf("endpoint not found, %v", e.Err) +} + +// Unwrap returns the underlying error. +func (e *EndpointNotFoundError) Unwrap() error { + return e.Err +} + +// EndpointResolver is an endpoint resolver that can be used to provide or +// override an endpoint for the given service and region. API clients will +// attempt to use the EndpointResolver first to resolve an endpoint if +// available. If the EndpointResolver returns an EndpointNotFoundError error, +// API clients will fallback to attempting to resolve the endpoint using its +// internal default endpoint resolver. +// +// Deprecated: See EndpointResolverWithOptions +type EndpointResolver interface { + ResolveEndpoint(service, region string) (Endpoint, error) +} + +// EndpointResolverFunc wraps a function to satisfy the EndpointResolver interface. +// +// Deprecated: See EndpointResolverWithOptionsFunc +type EndpointResolverFunc func(service, region string) (Endpoint, error) + +// ResolveEndpoint calls the wrapped function and returns the results. +// +// Deprecated: See EndpointResolverWithOptions.ResolveEndpoint +func (e EndpointResolverFunc) ResolveEndpoint(service, region string) (Endpoint, error) { + return e(service, region) +} + +// EndpointResolverWithOptions is an endpoint resolver that can be used to provide or +// override an endpoint for the given service, region, and the service client's EndpointOptions. API clients will +// attempt to use the EndpointResolverWithOptions first to resolve an endpoint if +// available. If the EndpointResolverWithOptions returns an EndpointNotFoundError error, +// API clients will fallback to attempting to resolve the endpoint using its +// internal default endpoint resolver. +type EndpointResolverWithOptions interface { + ResolveEndpoint(service, region string, options ...interface{}) (Endpoint, error) +} + +// EndpointResolverWithOptionsFunc wraps a function to satisfy the EndpointResolverWithOptions interface. +type EndpointResolverWithOptionsFunc func(service, region string, options ...interface{}) (Endpoint, error) + +// ResolveEndpoint calls the wrapped function and returns the results. +func (e EndpointResolverWithOptionsFunc) ResolveEndpoint(service, region string, options ...interface{}) (Endpoint, error) { + return e(service, region, options...) +} + +// GetDisableHTTPS takes a service's EndpointResolverOptions and returns the DisableHTTPS value. +// Returns boolean false if the provided options does not have a method to retrieve the DisableHTTPS. +func GetDisableHTTPS(options ...interface{}) (value bool, found bool) { + type iface interface { + GetDisableHTTPS() bool + } + for _, option := range options { + if i, ok := option.(iface); ok { + value = i.GetDisableHTTPS() + found = true + break + } + } + return value, found +} + +// GetResolvedRegion takes a service's EndpointResolverOptions and returns the ResolvedRegion value. +// Returns boolean false if the provided options does not have a method to retrieve the ResolvedRegion. +func GetResolvedRegion(options ...interface{}) (value string, found bool) { + type iface interface { + GetResolvedRegion() string + } + for _, option := range options { + if i, ok := option.(iface); ok { + value = i.GetResolvedRegion() + found = true + break + } + } + return value, found +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/errors.go new file mode 100644 index 00000000000..f390a08f9ff --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/errors.go @@ -0,0 +1,9 @@ +package aws + +// MissingRegionError is an error that is returned if region configuration +// value was not found. +type MissingRegionError struct{} + +func (*MissingRegionError) Error() string { + return "an AWS region is required, but was not found" +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/from_ptr.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/from_ptr.go new file mode 100644 index 00000000000..2394418e9bd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/from_ptr.go @@ -0,0 +1,365 @@ +// Code generated by aws/generate.go DO NOT EDIT. + +package aws + +import ( + "github.com/aws/smithy-go/ptr" + "time" +) + +// ToBool returns bool value dereferenced if the passed +// in pointer was not nil. Returns a bool zero value if the +// pointer was nil. +func ToBool(p *bool) (v bool) { + return ptr.ToBool(p) +} + +// ToBoolSlice returns a slice of bool values, that are +// dereferenced if the passed in pointer was not nil. Returns a bool +// zero value if the pointer was nil. +func ToBoolSlice(vs []*bool) []bool { + return ptr.ToBoolSlice(vs) +} + +// ToBoolMap returns a map of bool values, that are +// dereferenced if the passed in pointer was not nil. The bool +// zero value is used if the pointer was nil. +func ToBoolMap(vs map[string]*bool) map[string]bool { + return ptr.ToBoolMap(vs) +} + +// ToByte returns byte value dereferenced if the passed +// in pointer was not nil. Returns a byte zero value if the +// pointer was nil. +func ToByte(p *byte) (v byte) { + return ptr.ToByte(p) +} + +// ToByteSlice returns a slice of byte values, that are +// dereferenced if the passed in pointer was not nil. Returns a byte +// zero value if the pointer was nil. +func ToByteSlice(vs []*byte) []byte { + return ptr.ToByteSlice(vs) +} + +// ToByteMap returns a map of byte values, that are +// dereferenced if the passed in pointer was not nil. The byte +// zero value is used if the pointer was nil. +func ToByteMap(vs map[string]*byte) map[string]byte { + return ptr.ToByteMap(vs) +} + +// ToString returns string value dereferenced if the passed +// in pointer was not nil. Returns a string zero value if the +// pointer was nil. +func ToString(p *string) (v string) { + return ptr.ToString(p) +} + +// ToStringSlice returns a slice of string values, that are +// dereferenced if the passed in pointer was not nil. Returns a string +// zero value if the pointer was nil. +func ToStringSlice(vs []*string) []string { + return ptr.ToStringSlice(vs) +} + +// ToStringMap returns a map of string values, that are +// dereferenced if the passed in pointer was not nil. The string +// zero value is used if the pointer was nil. +func ToStringMap(vs map[string]*string) map[string]string { + return ptr.ToStringMap(vs) +} + +// ToInt returns int value dereferenced if the passed +// in pointer was not nil. Returns a int zero value if the +// pointer was nil. +func ToInt(p *int) (v int) { + return ptr.ToInt(p) +} + +// ToIntSlice returns a slice of int values, that are +// dereferenced if the passed in pointer was not nil. Returns a int +// zero value if the pointer was nil. +func ToIntSlice(vs []*int) []int { + return ptr.ToIntSlice(vs) +} + +// ToIntMap returns a map of int values, that are +// dereferenced if the passed in pointer was not nil. The int +// zero value is used if the pointer was nil. +func ToIntMap(vs map[string]*int) map[string]int { + return ptr.ToIntMap(vs) +} + +// ToInt8 returns int8 value dereferenced if the passed +// in pointer was not nil. Returns a int8 zero value if the +// pointer was nil. +func ToInt8(p *int8) (v int8) { + return ptr.ToInt8(p) +} + +// ToInt8Slice returns a slice of int8 values, that are +// dereferenced if the passed in pointer was not nil. Returns a int8 +// zero value if the pointer was nil. +func ToInt8Slice(vs []*int8) []int8 { + return ptr.ToInt8Slice(vs) +} + +// ToInt8Map returns a map of int8 values, that are +// dereferenced if the passed in pointer was not nil. The int8 +// zero value is used if the pointer was nil. +func ToInt8Map(vs map[string]*int8) map[string]int8 { + return ptr.ToInt8Map(vs) +} + +// ToInt16 returns int16 value dereferenced if the passed +// in pointer was not nil. Returns a int16 zero value if the +// pointer was nil. +func ToInt16(p *int16) (v int16) { + return ptr.ToInt16(p) +} + +// ToInt16Slice returns a slice of int16 values, that are +// dereferenced if the passed in pointer was not nil. Returns a int16 +// zero value if the pointer was nil. +func ToInt16Slice(vs []*int16) []int16 { + return ptr.ToInt16Slice(vs) +} + +// ToInt16Map returns a map of int16 values, that are +// dereferenced if the passed in pointer was not nil. The int16 +// zero value is used if the pointer was nil. +func ToInt16Map(vs map[string]*int16) map[string]int16 { + return ptr.ToInt16Map(vs) +} + +// ToInt32 returns int32 value dereferenced if the passed +// in pointer was not nil. Returns a int32 zero value if the +// pointer was nil. +func ToInt32(p *int32) (v int32) { + return ptr.ToInt32(p) +} + +// ToInt32Slice returns a slice of int32 values, that are +// dereferenced if the passed in pointer was not nil. Returns a int32 +// zero value if the pointer was nil. +func ToInt32Slice(vs []*int32) []int32 { + return ptr.ToInt32Slice(vs) +} + +// ToInt32Map returns a map of int32 values, that are +// dereferenced if the passed in pointer was not nil. The int32 +// zero value is used if the pointer was nil. +func ToInt32Map(vs map[string]*int32) map[string]int32 { + return ptr.ToInt32Map(vs) +} + +// ToInt64 returns int64 value dereferenced if the passed +// in pointer was not nil. Returns a int64 zero value if the +// pointer was nil. +func ToInt64(p *int64) (v int64) { + return ptr.ToInt64(p) +} + +// ToInt64Slice returns a slice of int64 values, that are +// dereferenced if the passed in pointer was not nil. Returns a int64 +// zero value if the pointer was nil. +func ToInt64Slice(vs []*int64) []int64 { + return ptr.ToInt64Slice(vs) +} + +// ToInt64Map returns a map of int64 values, that are +// dereferenced if the passed in pointer was not nil. The int64 +// zero value is used if the pointer was nil. +func ToInt64Map(vs map[string]*int64) map[string]int64 { + return ptr.ToInt64Map(vs) +} + +// ToUint returns uint value dereferenced if the passed +// in pointer was not nil. Returns a uint zero value if the +// pointer was nil. +func ToUint(p *uint) (v uint) { + return ptr.ToUint(p) +} + +// ToUintSlice returns a slice of uint values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint +// zero value if the pointer was nil. +func ToUintSlice(vs []*uint) []uint { + return ptr.ToUintSlice(vs) +} + +// ToUintMap returns a map of uint values, that are +// dereferenced if the passed in pointer was not nil. The uint +// zero value is used if the pointer was nil. +func ToUintMap(vs map[string]*uint) map[string]uint { + return ptr.ToUintMap(vs) +} + +// ToUint8 returns uint8 value dereferenced if the passed +// in pointer was not nil. Returns a uint8 zero value if the +// pointer was nil. +func ToUint8(p *uint8) (v uint8) { + return ptr.ToUint8(p) +} + +// ToUint8Slice returns a slice of uint8 values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint8 +// zero value if the pointer was nil. +func ToUint8Slice(vs []*uint8) []uint8 { + return ptr.ToUint8Slice(vs) +} + +// ToUint8Map returns a map of uint8 values, that are +// dereferenced if the passed in pointer was not nil. The uint8 +// zero value is used if the pointer was nil. +func ToUint8Map(vs map[string]*uint8) map[string]uint8 { + return ptr.ToUint8Map(vs) +} + +// ToUint16 returns uint16 value dereferenced if the passed +// in pointer was not nil. Returns a uint16 zero value if the +// pointer was nil. +func ToUint16(p *uint16) (v uint16) { + return ptr.ToUint16(p) +} + +// ToUint16Slice returns a slice of uint16 values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint16 +// zero value if the pointer was nil. +func ToUint16Slice(vs []*uint16) []uint16 { + return ptr.ToUint16Slice(vs) +} + +// ToUint16Map returns a map of uint16 values, that are +// dereferenced if the passed in pointer was not nil. The uint16 +// zero value is used if the pointer was nil. +func ToUint16Map(vs map[string]*uint16) map[string]uint16 { + return ptr.ToUint16Map(vs) +} + +// ToUint32 returns uint32 value dereferenced if the passed +// in pointer was not nil. Returns a uint32 zero value if the +// pointer was nil. +func ToUint32(p *uint32) (v uint32) { + return ptr.ToUint32(p) +} + +// ToUint32Slice returns a slice of uint32 values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint32 +// zero value if the pointer was nil. +func ToUint32Slice(vs []*uint32) []uint32 { + return ptr.ToUint32Slice(vs) +} + +// ToUint32Map returns a map of uint32 values, that are +// dereferenced if the passed in pointer was not nil. The uint32 +// zero value is used if the pointer was nil. +func ToUint32Map(vs map[string]*uint32) map[string]uint32 { + return ptr.ToUint32Map(vs) +} + +// ToUint64 returns uint64 value dereferenced if the passed +// in pointer was not nil. Returns a uint64 zero value if the +// pointer was nil. +func ToUint64(p *uint64) (v uint64) { + return ptr.ToUint64(p) +} + +// ToUint64Slice returns a slice of uint64 values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint64 +// zero value if the pointer was nil. +func ToUint64Slice(vs []*uint64) []uint64 { + return ptr.ToUint64Slice(vs) +} + +// ToUint64Map returns a map of uint64 values, that are +// dereferenced if the passed in pointer was not nil. The uint64 +// zero value is used if the pointer was nil. +func ToUint64Map(vs map[string]*uint64) map[string]uint64 { + return ptr.ToUint64Map(vs) +} + +// ToFloat32 returns float32 value dereferenced if the passed +// in pointer was not nil. Returns a float32 zero value if the +// pointer was nil. +func ToFloat32(p *float32) (v float32) { + return ptr.ToFloat32(p) +} + +// ToFloat32Slice returns a slice of float32 values, that are +// dereferenced if the passed in pointer was not nil. Returns a float32 +// zero value if the pointer was nil. +func ToFloat32Slice(vs []*float32) []float32 { + return ptr.ToFloat32Slice(vs) +} + +// ToFloat32Map returns a map of float32 values, that are +// dereferenced if the passed in pointer was not nil. The float32 +// zero value is used if the pointer was nil. +func ToFloat32Map(vs map[string]*float32) map[string]float32 { + return ptr.ToFloat32Map(vs) +} + +// ToFloat64 returns float64 value dereferenced if the passed +// in pointer was not nil. Returns a float64 zero value if the +// pointer was nil. +func ToFloat64(p *float64) (v float64) { + return ptr.ToFloat64(p) +} + +// ToFloat64Slice returns a slice of float64 values, that are +// dereferenced if the passed in pointer was not nil. Returns a float64 +// zero value if the pointer was nil. +func ToFloat64Slice(vs []*float64) []float64 { + return ptr.ToFloat64Slice(vs) +} + +// ToFloat64Map returns a map of float64 values, that are +// dereferenced if the passed in pointer was not nil. The float64 +// zero value is used if the pointer was nil. +func ToFloat64Map(vs map[string]*float64) map[string]float64 { + return ptr.ToFloat64Map(vs) +} + +// ToTime returns time.Time value dereferenced if the passed +// in pointer was not nil. Returns a time.Time zero value if the +// pointer was nil. +func ToTime(p *time.Time) (v time.Time) { + return ptr.ToTime(p) +} + +// ToTimeSlice returns a slice of time.Time values, that are +// dereferenced if the passed in pointer was not nil. Returns a time.Time +// zero value if the pointer was nil. +func ToTimeSlice(vs []*time.Time) []time.Time { + return ptr.ToTimeSlice(vs) +} + +// ToTimeMap returns a map of time.Time values, that are +// dereferenced if the passed in pointer was not nil. The time.Time +// zero value is used if the pointer was nil. +func ToTimeMap(vs map[string]*time.Time) map[string]time.Time { + return ptr.ToTimeMap(vs) +} + +// ToDuration returns time.Duration value dereferenced if the passed +// in pointer was not nil. Returns a time.Duration zero value if the +// pointer was nil. +func ToDuration(p *time.Duration) (v time.Duration) { + return ptr.ToDuration(p) +} + +// ToDurationSlice returns a slice of time.Duration values, that are +// dereferenced if the passed in pointer was not nil. Returns a time.Duration +// zero value if the pointer was nil. +func ToDurationSlice(vs []*time.Duration) []time.Duration { + return ptr.ToDurationSlice(vs) +} + +// ToDurationMap returns a map of time.Duration values, that are +// dereferenced if the passed in pointer was not nil. The time.Duration +// zero value is used if the pointer was nil. +func ToDurationMap(vs map[string]*time.Duration) map[string]time.Duration { + return ptr.ToDurationMap(vs) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go new file mode 100644 index 00000000000..a88bb2f7588 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package aws + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.22.2" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/logging.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/logging.go new file mode 100644 index 00000000000..91c94d987b1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/logging.go @@ -0,0 +1,119 @@ +// Code generated by aws/logging_generate.go DO NOT EDIT. + +package aws + +// ClientLogMode represents the logging mode of SDK clients. The client logging mode is a bit-field where +// each bit is a flag that describes the logging behavior for one or more client components. +// The entire 64-bit group is reserved for later expansion by the SDK. +// +// Example: Setting ClientLogMode to enable logging of retries and requests +// +// clientLogMode := aws.LogRetries | aws.LogRequest +// +// Example: Adding an additional log mode to an existing ClientLogMode value +// +// clientLogMode |= aws.LogResponse +type ClientLogMode uint64 + +// Supported ClientLogMode bits that can be configured to toggle logging of specific SDK events. +const ( + LogSigning ClientLogMode = 1 << (64 - 1 - iota) + LogRetries + LogRequest + LogRequestWithBody + LogResponse + LogResponseWithBody + LogDeprecatedUsage + LogRequestEventMessage + LogResponseEventMessage +) + +// IsSigning returns whether the Signing logging mode bit is set +func (m ClientLogMode) IsSigning() bool { + return m&LogSigning != 0 +} + +// IsRetries returns whether the Retries logging mode bit is set +func (m ClientLogMode) IsRetries() bool { + return m&LogRetries != 0 +} + +// IsRequest returns whether the Request logging mode bit is set +func (m ClientLogMode) IsRequest() bool { + return m&LogRequest != 0 +} + +// IsRequestWithBody returns whether the RequestWithBody logging mode bit is set +func (m ClientLogMode) IsRequestWithBody() bool { + return m&LogRequestWithBody != 0 +} + +// IsResponse returns whether the Response logging mode bit is set +func (m ClientLogMode) IsResponse() bool { + return m&LogResponse != 0 +} + +// IsResponseWithBody returns whether the ResponseWithBody logging mode bit is set +func (m ClientLogMode) IsResponseWithBody() bool { + return m&LogResponseWithBody != 0 +} + +// IsDeprecatedUsage returns whether the DeprecatedUsage logging mode bit is set +func (m ClientLogMode) IsDeprecatedUsage() bool { + return m&LogDeprecatedUsage != 0 +} + +// IsRequestEventMessage returns whether the RequestEventMessage logging mode bit is set +func (m ClientLogMode) IsRequestEventMessage() bool { + return m&LogRequestEventMessage != 0 +} + +// IsResponseEventMessage returns whether the ResponseEventMessage logging mode bit is set +func (m ClientLogMode) IsResponseEventMessage() bool { + return m&LogResponseEventMessage != 0 +} + +// ClearSigning clears the Signing logging mode bit +func (m *ClientLogMode) ClearSigning() { + *m &^= LogSigning +} + +// ClearRetries clears the Retries logging mode bit +func (m *ClientLogMode) ClearRetries() { + *m &^= LogRetries +} + +// ClearRequest clears the Request logging mode bit +func (m *ClientLogMode) ClearRequest() { + *m &^= LogRequest +} + +// ClearRequestWithBody clears the RequestWithBody logging mode bit +func (m *ClientLogMode) ClearRequestWithBody() { + *m &^= LogRequestWithBody +} + +// ClearResponse clears the Response logging mode bit +func (m *ClientLogMode) ClearResponse() { + *m &^= LogResponse +} + +// ClearResponseWithBody clears the ResponseWithBody logging mode bit +func (m *ClientLogMode) ClearResponseWithBody() { + *m &^= LogResponseWithBody +} + +// ClearDeprecatedUsage clears the DeprecatedUsage logging mode bit +func (m *ClientLogMode) ClearDeprecatedUsage() { + *m &^= LogDeprecatedUsage +} + +// ClearRequestEventMessage clears the RequestEventMessage logging mode bit +func (m *ClientLogMode) ClearRequestEventMessage() { + *m &^= LogRequestEventMessage +} + +// ClearResponseEventMessage clears the ResponseEventMessage logging mode bit +func (m *ClientLogMode) ClearResponseEventMessage() { + *m &^= LogResponseEventMessage +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/logging_generate.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/logging_generate.go new file mode 100644 index 00000000000..6ecc2231a12 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/logging_generate.go @@ -0,0 +1,95 @@ +//go:build clientlogmode +// +build clientlogmode + +package main + +import ( + "fmt" + "log" + "os" + "strings" + "text/template" +) + +var config = struct { + ModeBits []string +}{ + // Items should be appended only to keep bit-flag positions stable + ModeBits: []string{ + "Signing", + "Retries", + "Request", + "RequestWithBody", + "Response", + "ResponseWithBody", + "DeprecatedUsage", + "RequestEventMessage", + "ResponseEventMessage", + }, +} + +func bitName(name string) string { + return strings.ToUpper(name[:1]) + name[1:] +} + +var tmpl = template.Must(template.New("ClientLogMode").Funcs(map[string]interface{}{ + "symbolName": func(name string) string { + return "Log" + bitName(name) + }, + "bitName": bitName, +}).Parse(`// Code generated by aws/logging_generate.go DO NOT EDIT. + +package aws + +// ClientLogMode represents the logging mode of SDK clients. The client logging mode is a bit-field where +// each bit is a flag that describes the logging behavior for one or more client components. +// The entire 64-bit group is reserved for later expansion by the SDK. +// +// Example: Setting ClientLogMode to enable logging of retries and requests +// clientLogMode := aws.LogRetries | aws.LogRequest +// +// Example: Adding an additional log mode to an existing ClientLogMode value +// clientLogMode |= aws.LogResponse +type ClientLogMode uint64 + +// Supported ClientLogMode bits that can be configured to toggle logging of specific SDK events. +const ( +{{- range $index, $field := .ModeBits }} + {{ (symbolName $field) }}{{- if (eq 0 $index) }} ClientLogMode = 1 << (64 - 1 - iota){{- end }} +{{- end }} +) +{{ range $_, $field := .ModeBits }} +// Is{{- bitName $field }} returns whether the {{ bitName $field }} logging mode bit is set +func (m ClientLogMode) Is{{- bitName $field }}() bool { + return m&{{- (symbolName $field) }} != 0 +} +{{ end }} +{{- range $_, $field := .ModeBits }} +// Clear{{- bitName $field }} clears the {{ bitName $field }} logging mode bit +func (m *ClientLogMode) Clear{{- bitName $field }}() { + *m &^= {{ (symbolName $field) }} +} +{{ end -}} +`)) + +func main() { + uniqueBitFields := make(map[string]struct{}) + + for _, bitName := range config.ModeBits { + if _, ok := uniqueBitFields[strings.ToLower(bitName)]; ok { + panic(fmt.Sprintf("duplicate bit field: %s", bitName)) + } + uniqueBitFields[bitName] = struct{}{} + } + + file, err := os.Create("logging.go") + if err != nil { + log.Fatal(err) + } + defer file.Close() + + err = tmpl.Execute(file, config) + if err != nil { + log.Fatal(err) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/metadata.go new file mode 100644 index 00000000000..2de15528c93 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/metadata.go @@ -0,0 +1,201 @@ +package middleware + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + + "github.com/aws/smithy-go/middleware" +) + +// RegisterServiceMetadata registers metadata about the service and operation into the middleware context +// so that it is available at runtime for other middleware to introspect. +type RegisterServiceMetadata struct { + ServiceID string + SigningName string + Region string + OperationName string +} + +// ID returns the middleware identifier. +func (s *RegisterServiceMetadata) ID() string { + return "RegisterServiceMetadata" +} + +// HandleInitialize registers service metadata information into the middleware context, allowing for introspection. +func (s RegisterServiceMetadata) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) (out middleware.InitializeOutput, metadata middleware.Metadata, err error) { + if len(s.ServiceID) > 0 { + ctx = SetServiceID(ctx, s.ServiceID) + } + if len(s.SigningName) > 0 { + ctx = SetSigningName(ctx, s.SigningName) + } + if len(s.Region) > 0 { + ctx = setRegion(ctx, s.Region) + } + if len(s.OperationName) > 0 { + ctx = setOperationName(ctx, s.OperationName) + } + return next.HandleInitialize(ctx, in) +} + +// service metadata keys for storing and lookup of runtime stack information. +type ( + serviceIDKey struct{} + signingNameKey struct{} + signingRegionKey struct{} + regionKey struct{} + operationNameKey struct{} + partitionIDKey struct{} + requiresLegacyEndpointsKey struct{} +) + +// GetServiceID retrieves the service id from the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetServiceID(ctx context.Context) (v string) { + v, _ = middleware.GetStackValue(ctx, serviceIDKey{}).(string) + return v +} + +// GetSigningName retrieves the service signing name from the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetSigningName(ctx context.Context) (v string) { + v, _ = middleware.GetStackValue(ctx, signingNameKey{}).(string) + return v +} + +// GetSigningRegion retrieves the region from the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetSigningRegion(ctx context.Context) (v string) { + v, _ = middleware.GetStackValue(ctx, signingRegionKey{}).(string) + return v +} + +// GetRegion retrieves the endpoint region from the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetRegion(ctx context.Context) (v string) { + v, _ = middleware.GetStackValue(ctx, regionKey{}).(string) + return v +} + +// GetOperationName retrieves the service operation metadata from the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetOperationName(ctx context.Context) (v string) { + v, _ = middleware.GetStackValue(ctx, operationNameKey{}).(string) + return v +} + +// GetPartitionID retrieves the endpoint partition id from the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetPartitionID(ctx context.Context) string { + v, _ := middleware.GetStackValue(ctx, partitionIDKey{}).(string) + return v +} + +// GetRequiresLegacyEndpoints the flag used to indicate if legacy endpoint +// customizations need to be executed. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetRequiresLegacyEndpoints(ctx context.Context) bool { + v, _ := middleware.GetStackValue(ctx, requiresLegacyEndpointsKey{}).(bool) + return v +} + +// SetRequiresLegacyEndpoints set or modifies the flag indicated that +// legacy endpoint customizations are needed. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func SetRequiresLegacyEndpoints(ctx context.Context, value bool) context.Context { + return middleware.WithStackValue(ctx, requiresLegacyEndpointsKey{}, value) +} + +// SetSigningName set or modifies the signing name on the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func SetSigningName(ctx context.Context, value string) context.Context { + return middleware.WithStackValue(ctx, signingNameKey{}, value) +} + +// SetSigningRegion sets or modifies the region on the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func SetSigningRegion(ctx context.Context, value string) context.Context { + return middleware.WithStackValue(ctx, signingRegionKey{}, value) +} + +// SetServiceID sets the service id on the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func SetServiceID(ctx context.Context, value string) context.Context { + return middleware.WithStackValue(ctx, serviceIDKey{}, value) +} + +// setRegion sets the endpoint region on the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func setRegion(ctx context.Context, value string) context.Context { + return middleware.WithStackValue(ctx, regionKey{}, value) +} + +// setOperationName sets the service operation on the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func setOperationName(ctx context.Context, value string) context.Context { + return middleware.WithStackValue(ctx, operationNameKey{}, value) +} + +// SetPartitionID sets the partition id of a resolved region on the context +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func SetPartitionID(ctx context.Context, value string) context.Context { + return middleware.WithStackValue(ctx, partitionIDKey{}, value) +} + +// EndpointSource key +type endpointSourceKey struct{} + +// GetEndpointSource returns an endpoint source if set on context +func GetEndpointSource(ctx context.Context) (v aws.EndpointSource) { + v, _ = middleware.GetStackValue(ctx, endpointSourceKey{}).(aws.EndpointSource) + return v +} + +// SetEndpointSource sets endpoint source on context +func SetEndpointSource(ctx context.Context, value aws.EndpointSource) context.Context { + return middleware.WithStackValue(ctx, endpointSourceKey{}, value) +} + +type signingCredentialsKey struct{} + +// GetSigningCredentials returns the credentials that were used for signing if set on context. +func GetSigningCredentials(ctx context.Context) (v aws.Credentials) { + v, _ = middleware.GetStackValue(ctx, signingCredentialsKey{}).(aws.Credentials) + return v +} + +// SetSigningCredentials sets the credentails used for signing on the context. +func SetSigningCredentials(ctx context.Context, value aws.Credentials) context.Context { + return middleware.WithStackValue(ctx, signingCredentialsKey{}, value) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go new file mode 100644 index 00000000000..9bd0dfb1508 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go @@ -0,0 +1,168 @@ +package middleware + +import ( + "context" + "fmt" + "time" + + "github.com/aws/aws-sdk-go-v2/internal/rand" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + smithyrand "github.com/aws/smithy-go/rand" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// ClientRequestID is a Smithy BuildMiddleware that will generate a unique ID for logical API operation +// invocation. +type ClientRequestID struct{} + +// ID the identifier for the ClientRequestID +func (r *ClientRequestID) ID() string { + return "ClientRequestID" +} + +// HandleBuild attaches a unique operation invocation id for the operation to the request +func (r ClientRequestID) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", req) + } + + invocationID, err := smithyrand.NewUUID(rand.Reader).GetUUID() + if err != nil { + return out, metadata, err + } + + const invocationIDHeader = "Amz-Sdk-Invocation-Id" + req.Header[invocationIDHeader] = append(req.Header[invocationIDHeader][:0], invocationID) + + return next.HandleBuild(ctx, in) +} + +// RecordResponseTiming records the response timing for the SDK client requests. +type RecordResponseTiming struct{} + +// ID is the middleware identifier +func (a *RecordResponseTiming) ID() string { + return "RecordResponseTiming" +} + +// HandleDeserialize calculates response metadata and clock skew +func (a RecordResponseTiming) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + responseAt := sdk.NowTime() + setResponseAt(&metadata, responseAt) + + var serverTime time.Time + + switch resp := out.RawResponse.(type) { + case *smithyhttp.Response: + respDateHeader := resp.Header.Get("Date") + if len(respDateHeader) == 0 { + break + } + var parseErr error + serverTime, parseErr = smithyhttp.ParseTime(respDateHeader) + if parseErr != nil { + logger := middleware.GetLogger(ctx) + logger.Logf(logging.Warn, "failed to parse response Date header value, got %v", + parseErr.Error()) + break + } + setServerTime(&metadata, serverTime) + } + + if !serverTime.IsZero() { + attemptSkew := serverTime.Sub(responseAt) + setAttemptSkew(&metadata, attemptSkew) + } + + return out, metadata, err +} + +type responseAtKey struct{} + +// GetResponseAt returns the time response was received at. +func GetResponseAt(metadata middleware.Metadata) (v time.Time, ok bool) { + v, ok = metadata.Get(responseAtKey{}).(time.Time) + return v, ok +} + +// setResponseAt sets the response time on the metadata. +func setResponseAt(metadata *middleware.Metadata, v time.Time) { + metadata.Set(responseAtKey{}, v) +} + +type serverTimeKey struct{} + +// GetServerTime returns the server time for response. +func GetServerTime(metadata middleware.Metadata) (v time.Time, ok bool) { + v, ok = metadata.Get(serverTimeKey{}).(time.Time) + return v, ok +} + +// setServerTime sets the server time on the metadata. +func setServerTime(metadata *middleware.Metadata, v time.Time) { + metadata.Set(serverTimeKey{}, v) +} + +type attemptSkewKey struct{} + +// GetAttemptSkew returns Attempt clock skew for response from metadata. +func GetAttemptSkew(metadata middleware.Metadata) (v time.Duration, ok bool) { + v, ok = metadata.Get(attemptSkewKey{}).(time.Duration) + return v, ok +} + +// setAttemptSkew sets the attempt clock skew on the metadata. +func setAttemptSkew(metadata *middleware.Metadata, v time.Duration) { + metadata.Set(attemptSkewKey{}, v) +} + +// AddClientRequestIDMiddleware adds ClientRequestID to the middleware stack +func AddClientRequestIDMiddleware(stack *middleware.Stack) error { + return stack.Build.Add(&ClientRequestID{}, middleware.After) +} + +// AddRecordResponseTiming adds RecordResponseTiming middleware to the +// middleware stack. +func AddRecordResponseTiming(stack *middleware.Stack) error { + return stack.Deserialize.Add(&RecordResponseTiming{}, middleware.After) +} + +// rawResponseKey is the accessor key used to store and access the +// raw response within the response metadata. +type rawResponseKey struct{} + +// addRawResponse middleware adds raw response on to the metadata +type addRawResponse struct{} + +// ID the identifier for the ClientRequestID +func (m *addRawResponse) ID() string { + return "AddRawResponseToMetadata" +} + +// HandleDeserialize adds raw response on the middleware metadata +func (m addRawResponse) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + metadata.Set(rawResponseKey{}, out.RawResponse) + return out, metadata, err +} + +// AddRawResponseToMetadata adds middleware to the middleware stack that +// store raw response on to the metadata. +func AddRawResponseToMetadata(stack *middleware.Stack) error { + return stack.Deserialize.Add(&addRawResponse{}, middleware.Before) +} + +// GetRawResponse returns raw response set on metadata +func GetRawResponse(metadata middleware.Metadata) interface{} { + return metadata.Get(rawResponseKey{}) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname.go new file mode 100644 index 00000000000..ba262dadcd0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname.go @@ -0,0 +1,24 @@ +//go:build go1.16 +// +build go1.16 + +package middleware + +import "runtime" + +func getNormalizedOSName() (os string) { + switch runtime.GOOS { + case "android": + os = "android" + case "linux": + os = "linux" + case "windows": + os = "windows" + case "darwin": + os = "macos" + case "ios": + os = "ios" + default: + os = "other" + } + return os +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname_go115.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname_go115.go new file mode 100644 index 00000000000..e14a1e4ecb9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname_go115.go @@ -0,0 +1,24 @@ +//go:build !go1.16 +// +build !go1.16 + +package middleware + +import "runtime" + +func getNormalizedOSName() (os string) { + switch runtime.GOOS { + case "android": + os = "android" + case "linux": + os = "linux" + case "windows": + os = "windows" + case "darwin": + // Due to Apple M1 we can't distinguish between macOS and iOS when GOOS/GOARCH is darwin/amd64 + // For now declare this as "other" until we have a better detection mechanism. + fallthrough + default: + os = "other" + } + return os +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/recursion_detection.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/recursion_detection.go new file mode 100644 index 00000000000..3f6aaf231e1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/recursion_detection.go @@ -0,0 +1,94 @@ +package middleware + +import ( + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "os" +) + +const envAwsLambdaFunctionName = "AWS_LAMBDA_FUNCTION_NAME" +const envAmznTraceID = "_X_AMZN_TRACE_ID" +const amznTraceIDHeader = "X-Amzn-Trace-Id" + +// AddRecursionDetection adds recursionDetection to the middleware stack +func AddRecursionDetection(stack *middleware.Stack) error { + return stack.Build.Add(&RecursionDetection{}, middleware.After) +} + +// RecursionDetection detects Lambda environment and sets its X-Ray trace ID to request header if absent +// to avoid recursion invocation in Lambda +type RecursionDetection struct{} + +// ID returns the middleware identifier +func (m *RecursionDetection) ID() string { + return "RecursionDetection" +} + +// HandleBuild detects Lambda environment and adds its trace ID to request header if absent +func (m *RecursionDetection) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown request type %T", req) + } + + _, hasLambdaEnv := os.LookupEnv(envAwsLambdaFunctionName) + xAmznTraceID, hasTraceID := os.LookupEnv(envAmznTraceID) + value := req.Header.Get(amznTraceIDHeader) + // only set the X-Amzn-Trace-Id header when it is not set initially, the + // current environment is Lambda and the _X_AMZN_TRACE_ID env variable exists + if value != "" || !hasLambdaEnv || !hasTraceID { + return next.HandleBuild(ctx, in) + } + + req.Header.Set(amznTraceIDHeader, percentEncode(xAmznTraceID)) + return next.HandleBuild(ctx, in) +} + +func percentEncode(s string) string { + upperhex := "0123456789ABCDEF" + hexCount := 0 + for i := 0; i < len(s); i++ { + c := s[i] + if shouldEncode(c) { + hexCount++ + } + } + + if hexCount == 0 { + return s + } + + required := len(s) + 2*hexCount + t := make([]byte, required) + j := 0 + for i := 0; i < len(s); i++ { + if c := s[i]; shouldEncode(c) { + t[j] = '%' + t[j+1] = upperhex[c>>4] + t[j+2] = upperhex[c&15] + j += 3 + } else { + t[j] = c + j++ + } + } + return string(t) +} + +func shouldEncode(c byte) bool { + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { + return false + } + switch c { + case '-', '=', ';', ':', '+', '&', '[', ']', '{', '}', '"', '\'', ',': + return false + default: + return true + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id.go new file mode 100644 index 00000000000..dd3391fe41e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id.go @@ -0,0 +1,27 @@ +package middleware + +import ( + "github.com/aws/smithy-go/middleware" +) + +// requestIDKey is used to retrieve request id from response metadata +type requestIDKey struct{} + +// SetRequestIDMetadata sets the provided request id over middleware metadata +func SetRequestIDMetadata(metadata *middleware.Metadata, id string) { + metadata.Set(requestIDKey{}, id) +} + +// GetRequestIDMetadata retrieves the request id from middleware metadata +// returns string and bool indicating value of request id, whether request id was set. +func GetRequestIDMetadata(metadata middleware.Metadata) (string, bool) { + if !metadata.Has(requestIDKey{}) { + return "", false + } + + v, ok := metadata.Get(requestIDKey{}).(string) + if !ok { + return "", true + } + return v, true +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go new file mode 100644 index 00000000000..7ce48c611cd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go @@ -0,0 +1,49 @@ +package middleware + +import ( + "context" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// AddRequestIDRetrieverMiddleware adds request id retriever middleware +func AddRequestIDRetrieverMiddleware(stack *middleware.Stack) error { + // add error wrapper middleware before operation deserializers so that it can wrap the error response + // returned by operation deserializers + return stack.Deserialize.Insert(&requestIDRetriever{}, "OperationDeserializer", middleware.Before) +} + +type requestIDRetriever struct { +} + +// ID returns the middleware identifier +func (m *requestIDRetriever) ID() string { + return "RequestIDRetriever" +} + +func (m *requestIDRetriever) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + + resp, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + // No raw response to wrap with. + return out, metadata, err + } + + // Different header which can map to request id + requestIDHeaderList := []string{"X-Amzn-Requestid", "X-Amz-RequestId"} + + for _, h := range requestIDHeaderList { + // check for headers known to contain Request id + if v := resp.Header.Get(h); len(v) != 0 { + // set reqID on metadata for successful responses. + SetRequestIDMetadata(&metadata, v) + break + } + } + + return out, metadata, err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go new file mode 100644 index 00000000000..af3447ddc98 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go @@ -0,0 +1,261 @@ +package middleware + +import ( + "context" + "fmt" + "os" + "runtime" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +var languageVersion = strings.TrimPrefix(runtime.Version(), "go") + +// SDKAgentKeyType is the metadata type to add to the SDK agent string +type SDKAgentKeyType int + +// The set of valid SDKAgentKeyType constants. If an unknown value is assigned for SDKAgentKeyType it will +// be mapped to AdditionalMetadata. +const ( + _ SDKAgentKeyType = iota + APIMetadata + OperatingSystemMetadata + LanguageMetadata + EnvironmentMetadata + FeatureMetadata + ConfigMetadata + FrameworkMetadata + AdditionalMetadata + ApplicationIdentifier +) + +func (k SDKAgentKeyType) string() string { + switch k { + case APIMetadata: + return "api" + case OperatingSystemMetadata: + return "os" + case LanguageMetadata: + return "lang" + case EnvironmentMetadata: + return "exec-env" + case FeatureMetadata: + return "ft" + case ConfigMetadata: + return "cfg" + case FrameworkMetadata: + return "lib" + case ApplicationIdentifier: + return "app" + case AdditionalMetadata: + fallthrough + default: + return "md" + } +} + +const execEnvVar = `AWS_EXECUTION_ENV` + +var validChars = map[rune]bool{ + '!': true, '#': true, '$': true, '%': true, '&': true, '\'': true, '*': true, '+': true, + '-': true, '.': true, '^': true, '_': true, '`': true, '|': true, '~': true, +} + +// requestUserAgent is a build middleware that set the User-Agent for the request. +type requestUserAgent struct { + sdkAgent, userAgent *smithyhttp.UserAgentBuilder +} + +// newRequestUserAgent returns a new requestUserAgent which will set the User-Agent and X-Amz-User-Agent for the +// request. +// +// User-Agent example: +// +// aws-sdk-go-v2/1.2.3 +// +// X-Amz-User-Agent example: +// +// aws-sdk-go-v2/1.2.3 md/GOOS/linux md/GOARCH/amd64 lang/go/1.15 +func newRequestUserAgent() *requestUserAgent { + userAgent, sdkAgent := smithyhttp.NewUserAgentBuilder(), smithyhttp.NewUserAgentBuilder() + addProductName(userAgent) + addProductName(sdkAgent) + + r := &requestUserAgent{ + sdkAgent: sdkAgent, + userAgent: userAgent, + } + + addSDKMetadata(r) + + return r +} + +func addSDKMetadata(r *requestUserAgent) { + r.AddSDKAgentKey(OperatingSystemMetadata, getNormalizedOSName()) + r.AddSDKAgentKeyValue(LanguageMetadata, "go", languageVersion) + r.AddSDKAgentKeyValue(AdditionalMetadata, "GOOS", runtime.GOOS) + r.AddSDKAgentKeyValue(AdditionalMetadata, "GOARCH", runtime.GOARCH) + if ev := os.Getenv(execEnvVar); len(ev) > 0 { + r.AddSDKAgentKey(EnvironmentMetadata, ev) + } +} + +func addProductName(builder *smithyhttp.UserAgentBuilder) { + builder.AddKeyValue(aws.SDKName, aws.SDKVersion) +} + +// AddUserAgentKey retrieves a requestUserAgent from the provided stack, or initializes one. +func AddUserAgentKey(key string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + requestUserAgent, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + requestUserAgent.AddUserAgentKey(key) + return nil + } +} + +// AddUserAgentKeyValue retrieves a requestUserAgent from the provided stack, or initializes one. +func AddUserAgentKeyValue(key, value string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + requestUserAgent, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + requestUserAgent.AddUserAgentKeyValue(key, value) + return nil + } +} + +// AddSDKAgentKey retrieves a requestUserAgent from the provided stack, or initializes one. +func AddSDKAgentKey(keyType SDKAgentKeyType, key string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + requestUserAgent, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + requestUserAgent.AddSDKAgentKey(keyType, key) + return nil + } +} + +// AddSDKAgentKeyValue retrieves a requestUserAgent from the provided stack, or initializes one. +func AddSDKAgentKeyValue(keyType SDKAgentKeyType, key, value string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + requestUserAgent, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + requestUserAgent.AddSDKAgentKeyValue(keyType, key, value) + return nil + } +} + +// AddRequestUserAgentMiddleware registers a requestUserAgent middleware on the stack if not present. +func AddRequestUserAgentMiddleware(stack *middleware.Stack) error { + _, err := getOrAddRequestUserAgent(stack) + return err +} + +func getOrAddRequestUserAgent(stack *middleware.Stack) (*requestUserAgent, error) { + id := (*requestUserAgent)(nil).ID() + bm, ok := stack.Build.Get(id) + if !ok { + bm = newRequestUserAgent() + err := stack.Build.Add(bm, middleware.After) + if err != nil { + return nil, err + } + } + + requestUserAgent, ok := bm.(*requestUserAgent) + if !ok { + return nil, fmt.Errorf("%T for %s middleware did not match expected type", bm, id) + } + + return requestUserAgent, nil +} + +// AddUserAgentKey adds the component identified by name to the User-Agent string. +func (u *requestUserAgent) AddUserAgentKey(key string) { + u.userAgent.AddKey(strings.Map(rules, key)) +} + +// AddUserAgentKeyValue adds the key identified by the given name and value to the User-Agent string. +func (u *requestUserAgent) AddUserAgentKeyValue(key, value string) { + u.userAgent.AddKeyValue(strings.Map(rules, key), strings.Map(rules, value)) +} + +// AddUserAgentKey adds the component identified by name to the User-Agent string. +func (u *requestUserAgent) AddSDKAgentKey(keyType SDKAgentKeyType, key string) { + // TODO: should target sdkAgent + u.userAgent.AddKey(keyType.string() + "/" + strings.Map(rules, key)) +} + +// AddUserAgentKeyValue adds the key identified by the given name and value to the User-Agent string. +func (u *requestUserAgent) AddSDKAgentKeyValue(keyType SDKAgentKeyType, key, value string) { + // TODO: should target sdkAgent + u.userAgent.AddKeyValue(keyType.string(), strings.Map(rules, key)+"#"+strings.Map(rules, value)) +} + +// ID the name of the middleware. +func (u *requestUserAgent) ID() string { + return "UserAgent" +} + +// HandleBuild adds or appends the constructed user agent to the request. +func (u *requestUserAgent) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + switch req := in.Request.(type) { + case *smithyhttp.Request: + u.addHTTPUserAgent(req) + // TODO: To be re-enabled + // u.addHTTPSDKAgent(req) + default: + return out, metadata, fmt.Errorf("unknown transport type %T", in) + } + + return next.HandleBuild(ctx, in) +} + +func (u *requestUserAgent) addHTTPUserAgent(request *smithyhttp.Request) { + const userAgent = "User-Agent" + updateHTTPHeader(request, userAgent, u.userAgent.Build()) +} + +func (u *requestUserAgent) addHTTPSDKAgent(request *smithyhttp.Request) { + const sdkAgent = "X-Amz-User-Agent" + updateHTTPHeader(request, sdkAgent, u.sdkAgent.Build()) +} + +func updateHTTPHeader(request *smithyhttp.Request, header string, value string) { + var current string + if v := request.Header[header]; len(v) > 0 { + current = v[0] + } + if len(current) > 0 { + current = value + " " + current + } else { + current = value + } + request.Header[header] = append(request.Header[header][:0], current) +} + +func rules(r rune) rune { + switch { + case r >= '0' && r <= '9': + return r + case r >= 'A' && r <= 'Z' || r >= 'a' && r <= 'z': + return r + case validChars[r]: + return r + default: + return '-' + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go new file mode 100644 index 00000000000..47ebc0f5476 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go @@ -0,0 +1,72 @@ +package query + +import ( + "fmt" + "net/url" +) + +// Array represents the encoding of Query lists and sets. A Query array is a +// representation of a list of values of a fixed type. A serialized array might +// look like the following: +// +// ListName.member.1=foo +// &ListName.member.2=bar +// &Listname.member.3=baz +type Array struct { + // The query values to add the array to. + values url.Values + // The array's prefix, which includes the names of all parent structures + // and ends with the name of the list. For example, the prefix might be + // "ParentStructure.ListName". This prefix will be used to form the full + // keys for each element in the list. For example, an entry might have the + // key "ParentStructure.ListName.member.MemberName.1". + // + // While this is currently represented as a string that gets added to, it + // could also be represented as a stack that only gets condensed into a + // string when a finalized key is created. This could potentially reduce + // allocations. + prefix string + // Whether the list is flat or not. A list that is not flat will produce the + // following entry to the url.Values for a given entry: + // ListName.MemberName.1=value + // A list that is flat will produce the following: + // ListName.1=value + flat bool + // The location name of the member. In most cases this should be "member". + memberName string + // Elements are stored in values, so we keep track of the list size here. + size int32 + // Empty lists are encoded as "=", if we add a value later we will + // remove this encoding + emptyValue Value +} + +func newArray(values url.Values, prefix string, flat bool, memberName string) *Array { + emptyValue := newValue(values, prefix, flat) + emptyValue.String("") + + return &Array{ + values: values, + prefix: prefix, + flat: flat, + memberName: memberName, + emptyValue: emptyValue, + } +} + +// Value adds a new element to the Query Array. Returns a Value type used to +// encode the array element. +func (a *Array) Value() Value { + if a.size == 0 { + delete(a.values, a.emptyValue.key) + } + + // Query lists start a 1, so adjust the size first + a.size++ + prefix := a.prefix + if !a.flat { + prefix = fmt.Sprintf("%s.%s", prefix, a.memberName) + } + // Lists can't have flat members + return newValue(a.values, fmt.Sprintf("%s.%d", prefix, a.size), false) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/encoder.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/encoder.go new file mode 100644 index 00000000000..2ecf9241cdd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/encoder.go @@ -0,0 +1,80 @@ +package query + +import ( + "io" + "net/url" + "sort" +) + +// Encoder is a Query encoder that supports construction of Query body +// values using methods. +type Encoder struct { + // The query values that will be built up to manage encoding. + values url.Values + // The writer that the encoded body will be written to. + writer io.Writer + Value +} + +// NewEncoder returns a new Query body encoder +func NewEncoder(writer io.Writer) *Encoder { + values := url.Values{} + return &Encoder{ + values: values, + writer: writer, + Value: newBaseValue(values), + } +} + +// Encode returns the []byte slice representing the current +// state of the Query encoder. +func (e Encoder) Encode() error { + ws, ok := e.writer.(interface{ WriteString(string) (int, error) }) + if !ok { + // Fall back to less optimal byte slice casting if WriteString isn't available. + ws = &wrapWriteString{writer: e.writer} + } + + // Get the keys and sort them to have a stable output + keys := make([]string, 0, len(e.values)) + for k := range e.values { + keys = append(keys, k) + } + sort.Strings(keys) + isFirstEntry := true + for _, key := range keys { + queryValues := e.values[key] + escapedKey := url.QueryEscape(key) + for _, value := range queryValues { + if !isFirstEntry { + if _, err := ws.WriteString(`&`); err != nil { + return err + } + } else { + isFirstEntry = false + } + if _, err := ws.WriteString(escapedKey); err != nil { + return err + } + if _, err := ws.WriteString(`=`); err != nil { + return err + } + if _, err := ws.WriteString(url.QueryEscape(value)); err != nil { + return err + } + } + } + return nil +} + +// wrapWriteString wraps an io.Writer to provide a WriteString method +// where one is not available. +type wrapWriteString struct { + writer io.Writer +} + +// WriteString writes a string to the wrapped writer by casting it to +// a byte array first. +func (w wrapWriteString) WriteString(v string) (int, error) { + return w.writer.Write([]byte(v)) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/map.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/map.go new file mode 100644 index 00000000000..dea242b8b6d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/map.go @@ -0,0 +1,78 @@ +package query + +import ( + "fmt" + "net/url" +) + +// Map represents the encoding of Query maps. A Query map is a representation +// of a mapping of arbitrary string keys to arbitrary values of a fixed type. +// A Map differs from an Object in that the set of keys is not fixed, in that +// the values must all be of the same type, and that map entries are ordered. +// A serialized map might look like the following: +// +// MapName.entry.1.key=Foo +// &MapName.entry.1.value=spam +// &MapName.entry.2.key=Bar +// &MapName.entry.2.value=eggs +type Map struct { + // The query values to add the map to. + values url.Values + // The map's prefix, which includes the names of all parent structures + // and ends with the name of the object. For example, the prefix might be + // "ParentStructure.MapName". This prefix will be used to form the full + // keys for each key-value pair of the map. For example, a value might have + // the key "ParentStructure.MapName.1.value". + // + // While this is currently represented as a string that gets added to, it + // could also be represented as a stack that only gets condensed into a + // string when a finalized key is created. This could potentially reduce + // allocations. + prefix string + // Whether the map is flat or not. A map that is not flat will produce the + // following entries to the url.Values for a given key-value pair: + // MapName.entry.1.KeyLocationName=mykey + // MapName.entry.1.ValueLocationName=myvalue + // A map that is flat will produce the following: + // MapName.1.KeyLocationName=mykey + // MapName.1.ValueLocationName=myvalue + flat bool + // The location name of the key. In most cases this should be "key". + keyLocationName string + // The location name of the value. In most cases this should be "value". + valueLocationName string + // Elements are stored in values, so we keep track of the list size here. + size int32 +} + +func newMap(values url.Values, prefix string, flat bool, keyLocationName string, valueLocationName string) *Map { + return &Map{ + values: values, + prefix: prefix, + flat: flat, + keyLocationName: keyLocationName, + valueLocationName: valueLocationName, + } +} + +// Key adds the given named key to the Query map. +// Returns a Value encoder that should be used to encode a Query value type. +func (m *Map) Key(name string) Value { + // Query lists start a 1, so adjust the size first + m.size++ + var key string + var value string + if m.flat { + key = fmt.Sprintf("%s.%d.%s", m.prefix, m.size, m.keyLocationName) + value = fmt.Sprintf("%s.%d.%s", m.prefix, m.size, m.valueLocationName) + } else { + key = fmt.Sprintf("%s.entry.%d.%s", m.prefix, m.size, m.keyLocationName) + value = fmt.Sprintf("%s.entry.%d.%s", m.prefix, m.size, m.valueLocationName) + } + + // The key can only be a string, so we just go ahead and set it here + newValue(m.values, key, false).String(name) + + // Maps can't have flat members + return newValue(m.values, value, false) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/middleware.go new file mode 100644 index 00000000000..36034479113 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/middleware.go @@ -0,0 +1,62 @@ +package query + +import ( + "context" + "fmt" + "io/ioutil" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// AddAsGetRequestMiddleware adds a middleware to the Serialize stack after the +// operation serializer that will convert the query request body to a GET +// operation with the query message in the HTTP request querystring. +func AddAsGetRequestMiddleware(stack *middleware.Stack) error { + return stack.Serialize.Insert(&asGetRequest{}, "OperationSerializer", middleware.After) +} + +type asGetRequest struct{} + +func (*asGetRequest) ID() string { return "Query:AsGetRequest" } + +func (m *asGetRequest) HandleSerialize( + ctx context.Context, input middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + req, ok := input.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("expect smithy HTTP Request, got %T", input.Request) + } + + req.Method = "GET" + + // If the stream is not set, nothing else to do. + stream := req.GetStream() + if stream == nil { + return next.HandleSerialize(ctx, input) + } + + // Clear the stream since there will not be any body. + req.Header.Del("Content-Type") + req, err = req.SetStream(nil) + if err != nil { + return out, metadata, fmt.Errorf("unable update request body %w", err) + } + input.Request = req + + // Update request query with the body's query string value. + delim := "" + if len(req.URL.RawQuery) != 0 { + delim = "&" + } + + b, err := ioutil.ReadAll(stream) + if err != nil { + return out, metadata, fmt.Errorf("unable to get request body %w", err) + } + req.URL.RawQuery += delim + string(b) + + return next.HandleSerialize(ctx, input) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go new file mode 100644 index 00000000000..455b92515ca --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go @@ -0,0 +1,69 @@ +package query + +import ( + "fmt" + "net/url" +) + +// Object represents the encoding of Query structures and unions. A Query +// object is a representation of a mapping of string keys to arbitrary +// values where there is a fixed set of keys whose values each have their +// own known type. A serialized object might look like the following: +// +// ObjectName.Foo=value +// &ObjectName.Bar=5 +type Object struct { + // The query values to add the object to. + values url.Values + // The object's prefix, which includes the names of all parent structures + // and ends with the name of the object. For example, the prefix might be + // "ParentStructure.ObjectName". This prefix will be used to form the full + // keys for each member of the object. For example, a member might have the + // key "ParentStructure.ObjectName.MemberName". + // + // While this is currently represented as a string that gets added to, it + // could also be represented as a stack that only gets condensed into a + // string when a finalized key is created. This could potentially reduce + // allocations. + prefix string +} + +func newObject(values url.Values, prefix string) *Object { + return &Object{ + values: values, + prefix: prefix, + } +} + +// Key adds the given named key to the Query object. +// Returns a Value encoder that should be used to encode a Query value type. +func (o *Object) Key(name string) Value { + return o.key(name, false) +} + +// KeyWithValues adds the given named key to the Query object. +// Returns a Value encoder that should be used to encode a Query list of values. +func (o *Object) KeyWithValues(name string) Value { + return o.keyWithValues(name, false) +} + +// FlatKey adds the given named key to the Query object. +// Returns a Value encoder that should be used to encode a Query value type. The +// value will be flattened if it is a map or array. +func (o *Object) FlatKey(name string) Value { + return o.key(name, true) +} + +func (o *Object) key(name string, flatValue bool) Value { + if o.prefix != "" { + return newValue(o.values, fmt.Sprintf("%s.%s", o.prefix, name), flatValue) + } + return newValue(o.values, name, flatValue) +} + +func (o *Object) keyWithValues(name string, flatValue bool) Value { + if o.prefix != "" { + return newAppendValue(o.values, fmt.Sprintf("%s.%s", o.prefix, name), flatValue) + } + return newAppendValue(o.values, name, flatValue) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go new file mode 100644 index 00000000000..a9251521f12 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go @@ -0,0 +1,115 @@ +package query + +import ( + "math/big" + "net/url" + + "github.com/aws/smithy-go/encoding/httpbinding" +) + +// Value represents a Query Value type. +type Value struct { + // The query values to add the value to. + values url.Values + // The value's key, which will form the prefix for complex types. + key string + // Whether the value should be flattened or not if it's a flattenable type. + flat bool + queryValue httpbinding.QueryValue +} + +func newValue(values url.Values, key string, flat bool) Value { + return Value{ + values: values, + key: key, + flat: flat, + queryValue: httpbinding.NewQueryValue(values, key, false), + } +} + +func newAppendValue(values url.Values, key string, flat bool) Value { + return Value{ + values: values, + key: key, + flat: flat, + queryValue: httpbinding.NewQueryValue(values, key, true), + } +} + +func newBaseValue(values url.Values) Value { + return Value{ + values: values, + queryValue: httpbinding.NewQueryValue(nil, "", false), + } +} + +// Array returns a new Array encoder. +func (qv Value) Array(locationName string) *Array { + return newArray(qv.values, qv.key, qv.flat, locationName) +} + +// Object returns a new Object encoder. +func (qv Value) Object() *Object { + return newObject(qv.values, qv.key) +} + +// Map returns a new Map encoder. +func (qv Value) Map(keyLocationName string, valueLocationName string) *Map { + return newMap(qv.values, qv.key, qv.flat, keyLocationName, valueLocationName) +} + +// Base64EncodeBytes encodes v as a base64 query string value. +// This is intended to enable compatibility with the JSON encoder. +func (qv Value) Base64EncodeBytes(v []byte) { + qv.queryValue.Blob(v) +} + +// Boolean encodes v as a query string value +func (qv Value) Boolean(v bool) { + qv.queryValue.Boolean(v) +} + +// String encodes v as a query string value +func (qv Value) String(v string) { + qv.queryValue.String(v) +} + +// Byte encodes v as a query string value +func (qv Value) Byte(v int8) { + qv.queryValue.Byte(v) +} + +// Short encodes v as a query string value +func (qv Value) Short(v int16) { + qv.queryValue.Short(v) +} + +// Integer encodes v as a query string value +func (qv Value) Integer(v int32) { + qv.queryValue.Integer(v) +} + +// Long encodes v as a query string value +func (qv Value) Long(v int64) { + qv.queryValue.Long(v) +} + +// Float encodes v as a query string value +func (qv Value) Float(v float32) { + qv.queryValue.Float(v) +} + +// Double encodes v as a query string value +func (qv Value) Double(v float64) { + qv.queryValue.Double(v) +} + +// BigInteger encodes v as a query string value +func (qv Value) BigInteger(v *big.Int) { + qv.queryValue.BigInteger(v) +} + +// BigDecimal encodes v as a query string value +func (qv Value) BigDecimal(v *big.Float) { + qv.queryValue.BigDecimal(v) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/restjson/decoder_util.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/restjson/decoder_util.go new file mode 100644 index 00000000000..1bce78a4d45 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/restjson/decoder_util.go @@ -0,0 +1,85 @@ +package restjson + +import ( + "encoding/json" + "io" + "strings" + + "github.com/aws/smithy-go" +) + +// GetErrorInfo util looks for code, __type, and message members in the +// json body. These members are optionally available, and the function +// returns the value of member if it is available. This function is useful to +// identify the error code, msg in a REST JSON error response. +func GetErrorInfo(decoder *json.Decoder) (errorType string, message string, err error) { + var errInfo struct { + Code string + Type string `json:"__type"` + Message string + } + + err = decoder.Decode(&errInfo) + if err != nil { + if err == io.EOF { + return errorType, message, nil + } + return errorType, message, err + } + + // assign error type + if len(errInfo.Code) != 0 { + errorType = errInfo.Code + } else if len(errInfo.Type) != 0 { + errorType = errInfo.Type + } + + // assign error message + if len(errInfo.Message) != 0 { + message = errInfo.Message + } + + // sanitize error + if len(errorType) != 0 { + errorType = SanitizeErrorCode(errorType) + } + + return errorType, message, nil +} + +// SanitizeErrorCode sanitizes the errorCode string . +// The rule for sanitizing is if a `:` character is present, then take only the +// contents before the first : character in the value. +// If a # character is present, then take only the contents after the +// first # character in the value. +func SanitizeErrorCode(errorCode string) string { + if strings.ContainsAny(errorCode, ":") { + errorCode = strings.SplitN(errorCode, ":", 2)[0] + } + + if strings.ContainsAny(errorCode, "#") { + errorCode = strings.SplitN(errorCode, "#", 2)[1] + } + + return errorCode +} + +// GetSmithyGenericAPIError returns smithy generic api error and an error interface. +// Takes in json decoder, and error Code string as args. The function retrieves error message +// and error code from the decoder body. If errorCode of length greater than 0 is passed in as +// an argument, it is used instead. +func GetSmithyGenericAPIError(decoder *json.Decoder, errorCode string) (*smithy.GenericAPIError, error) { + errorType, message, err := GetErrorInfo(decoder) + if err != nil { + return nil, err + } + + if len(errorCode) == 0 { + errorCode = errorType + } + + return &smithy.GenericAPIError{ + Code: errorCode, + Message: message, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/xml/error_utils.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/xml/error_utils.go new file mode 100644 index 00000000000..6975ce6524d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/xml/error_utils.go @@ -0,0 +1,48 @@ +package xml + +import ( + "encoding/xml" + "fmt" + "io" +) + +// ErrorComponents represents the error response fields +// that will be deserialized from an xml error response body +type ErrorComponents struct { + Code string + Message string + RequestID string +} + +// GetErrorResponseComponents returns the error fields from an xml error response body +func GetErrorResponseComponents(r io.Reader, noErrorWrapping bool) (ErrorComponents, error) { + if noErrorWrapping { + var errResponse noWrappedErrorResponse + if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF { + return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err) + } + return ErrorComponents(errResponse), nil + } + + var errResponse wrappedErrorResponse + if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF { + return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err) + } + return ErrorComponents(errResponse), nil +} + +// noWrappedErrorResponse represents the error response body with +// no internal Error wrapping +type noWrappedErrorResponse struct { + Code string `xml:"Code"` + Message string `xml:"Message"` + RequestID string `xml:"RequestId"` +} + +// wrappedErrorResponse represents the error response body +// wrapped within Error +type wrappedErrorResponse struct { + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` + RequestID string `xml:"RequestId"` +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_bucket.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_bucket.go new file mode 100644 index 00000000000..974ef594f07 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_bucket.go @@ -0,0 +1,96 @@ +package ratelimit + +import ( + "sync" +) + +// TokenBucket provides a concurrency safe utility for adding and removing +// tokens from the available token bucket. +type TokenBucket struct { + remainingTokens uint + maxCapacity uint + minCapacity uint + mu sync.Mutex +} + +// NewTokenBucket returns an initialized TokenBucket with the capacity +// specified. +func NewTokenBucket(i uint) *TokenBucket { + return &TokenBucket{ + remainingTokens: i, + maxCapacity: i, + minCapacity: 1, + } +} + +// Retrieve attempts to reduce the available tokens by the amount requested. If +// there are tokens available true will be returned along with the number of +// available tokens remaining. If amount requested is larger than the available +// capacity, false will be returned along with the available capacity. If the +// amount is less than the available capacity, the capacity will be reduced by +// that amount, and the remaining capacity and true will be returned. +func (t *TokenBucket) Retrieve(amount uint) (available uint, retrieved bool) { + t.mu.Lock() + defer t.mu.Unlock() + + if amount > t.remainingTokens { + return t.remainingTokens, false + } + + t.remainingTokens -= amount + return t.remainingTokens, true +} + +// Refund returns the amount of tokens back to the available token bucket, up +// to the initial capacity. +func (t *TokenBucket) Refund(amount uint) { + t.mu.Lock() + defer t.mu.Unlock() + + // Capacity cannot exceed max capacity. + t.remainingTokens = uintMin(t.remainingTokens+amount, t.maxCapacity) +} + +// Capacity returns the maximum capacity of tokens that the bucket could +// contain. +func (t *TokenBucket) Capacity() uint { + t.mu.Lock() + defer t.mu.Unlock() + + return t.maxCapacity +} + +// Remaining returns the number of tokens that remaining in the bucket. +func (t *TokenBucket) Remaining() uint { + t.mu.Lock() + defer t.mu.Unlock() + + return t.remainingTokens +} + +// Resize adjusts the size of the token bucket. Returns the capacity remaining. +func (t *TokenBucket) Resize(size uint) uint { + t.mu.Lock() + defer t.mu.Unlock() + + t.maxCapacity = uintMax(size, t.minCapacity) + + // Capacity needs to be capped at max capacity, if max size reduced. + t.remainingTokens = uintMin(t.remainingTokens, t.maxCapacity) + + return t.remainingTokens +} + +func uintMin(a, b uint) uint { + if a < b { + return a + } + return b +} + +func uintMax(a, b uint) uint { + if a > b { + return a + } + return b +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_rate_limit.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_rate_limit.go new file mode 100644 index 00000000000..d89090ad38e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_rate_limit.go @@ -0,0 +1,83 @@ +package ratelimit + +import ( + "context" + "fmt" +) + +type rateToken struct { + tokenCost uint + bucket *TokenBucket +} + +func (t rateToken) release() error { + t.bucket.Refund(t.tokenCost) + return nil +} + +// TokenRateLimit provides a Token Bucket RateLimiter implementation +// that limits the overall number of retry attempts that can be made across +// operation invocations. +type TokenRateLimit struct { + bucket *TokenBucket +} + +// NewTokenRateLimit returns an TokenRateLimit with default values. +// Functional options can configure the retry rate limiter. +func NewTokenRateLimit(tokens uint) *TokenRateLimit { + return &TokenRateLimit{ + bucket: NewTokenBucket(tokens), + } +} + +type canceledError struct { + Err error +} + +func (c canceledError) CanceledError() bool { return true } +func (c canceledError) Unwrap() error { return c.Err } +func (c canceledError) Error() string { + return fmt.Sprintf("canceled, %v", c.Err) +} + +// GetToken may cause a available pool of retry quota to be +// decremented. Will return an error if the decremented value can not be +// reduced from the retry quota. +func (l *TokenRateLimit) GetToken(ctx context.Context, cost uint) (func() error, error) { + select { + case <-ctx.Done(): + return nil, canceledError{Err: ctx.Err()} + default: + } + if avail, ok := l.bucket.Retrieve(cost); !ok { + return nil, QuotaExceededError{Available: avail, Requested: cost} + } + + return rateToken{ + tokenCost: cost, + bucket: l.bucket, + }.release, nil +} + +// AddTokens increments the token bucket by a fixed amount. +func (l *TokenRateLimit) AddTokens(v uint) error { + l.bucket.Refund(v) + return nil +} + +// Remaining returns the number of remaining tokens in the bucket. +func (l *TokenRateLimit) Remaining() uint { + return l.bucket.Remaining() +} + +// QuotaExceededError provides the SDK error when the retries for a given +// token bucket have been exhausted. +type QuotaExceededError struct { + Available uint + Requested uint +} + +func (e QuotaExceededError) Error() string { + return fmt.Sprintf("retry quota exceeded, %d available, %d requested", + e.Available, e.Requested) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/request.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/request.go new file mode 100644 index 00000000000..d8d00e61582 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/request.go @@ -0,0 +1,25 @@ +package aws + +import ( + "fmt" +) + +// TODO remove replace with smithy.CanceledError + +// RequestCanceledError is the error that will be returned by an API request +// that was canceled. Requests given a Context may return this error when +// canceled. +type RequestCanceledError struct { + Err error +} + +// CanceledError returns true to satisfy interfaces checking for canceled errors. +func (*RequestCanceledError) CanceledError() bool { return true } + +// Unwrap returns the underlying error, if there was one. +func (e *RequestCanceledError) Unwrap() error { + return e.Err +} +func (e *RequestCanceledError) Error() string { + return fmt.Sprintf("request canceled, %v", e.Err) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive.go new file mode 100644 index 00000000000..4dfde857373 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive.go @@ -0,0 +1,156 @@ +package retry + +import ( + "context" + "fmt" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/sdk" +) + +const ( + // DefaultRequestCost is the cost of a single request from the adaptive + // rate limited token bucket. + DefaultRequestCost uint = 1 +) + +// DefaultThrottles provides the set of errors considered throttle errors that +// are checked by default. +var DefaultThrottles = []IsErrorThrottle{ + ThrottleErrorCode{ + Codes: DefaultThrottleErrorCodes, + }, +} + +// AdaptiveModeOptions provides the functional options for configuring the +// adaptive retry mode, and delay behavior. +type AdaptiveModeOptions struct { + // If the adaptive token bucket is empty, when an attempt will be made + // AdaptiveMode will sleep until a token is available. This can occur when + // attempts fail with throttle errors. Use this option to disable the sleep + // until token is available, and return error immediately. + FailOnNoAttemptTokens bool + + // The cost of an attempt from the AdaptiveMode's adaptive token bucket. + RequestCost uint + + // Set of strategies to determine if the attempt failed due to a throttle + // error. + // + // It is safe to append to this list in NewAdaptiveMode's functional options. + Throttles []IsErrorThrottle + + // Set of options for standard retry mode that AdaptiveMode is built on top + // of. AdaptiveMode may apply its own defaults to Standard retry mode that + // are different than the defaults of NewStandard. Use these options to + // override the default options. + StandardOptions []func(*StandardOptions) +} + +// AdaptiveMode provides an experimental retry strategy that expands on the +// Standard retry strategy, adding client attempt rate limits. The attempt rate +// limit is initially unrestricted, but becomes restricted when the attempt +// fails with for a throttle error. When restricted AdaptiveMode may need to +// sleep before an attempt is made, if too many throttles have been received. +// AdaptiveMode's sleep can be canceled with context cancel. Set +// AdaptiveModeOptions FailOnNoAttemptTokens to change the behavior from sleep, +// to fail fast. +// +// Eventually unrestricted attempt rate limit will be restored once attempts no +// longer are failing due to throttle errors. +type AdaptiveMode struct { + options AdaptiveModeOptions + throttles IsErrorThrottles + + retryer aws.RetryerV2 + rateLimit *adaptiveRateLimit +} + +// NewAdaptiveMode returns an initialized AdaptiveMode retry strategy. +func NewAdaptiveMode(optFns ...func(*AdaptiveModeOptions)) *AdaptiveMode { + o := AdaptiveModeOptions{ + RequestCost: DefaultRequestCost, + Throttles: append([]IsErrorThrottle{}, DefaultThrottles...), + } + for _, fn := range optFns { + fn(&o) + } + + return &AdaptiveMode{ + options: o, + throttles: IsErrorThrottles(o.Throttles), + retryer: NewStandard(o.StandardOptions...), + rateLimit: newAdaptiveRateLimit(), + } +} + +// IsErrorRetryable returns if the failed attempt is retryable. This check +// should determine if the error can be retried, or if the error is +// terminal. +func (a *AdaptiveMode) IsErrorRetryable(err error) bool { + return a.retryer.IsErrorRetryable(err) +} + +// MaxAttempts returns the maximum number of attempts that can be made for +// an attempt before failing. A value of 0 implies that the attempt should +// be retried until it succeeds if the errors are retryable. +func (a *AdaptiveMode) MaxAttempts() int { + return a.retryer.MaxAttempts() +} + +// RetryDelay returns the delay that should be used before retrying the +// attempt. Will return error if the if the delay could not be determined. +func (a *AdaptiveMode) RetryDelay(attempt int, opErr error) ( + time.Duration, error, +) { + return a.retryer.RetryDelay(attempt, opErr) +} + +// GetRetryToken attempts to deduct the retry cost from the retry token pool. +// Returning the token release function, or error. +func (a *AdaptiveMode) GetRetryToken(ctx context.Context, opErr error) ( + releaseToken func(error) error, err error, +) { + return a.retryer.GetRetryToken(ctx, opErr) +} + +// GetInitialToken returns the initial attempt token that can increment the +// retry token pool if the attempt is successful. +// +// Deprecated: This method does not provide a way to block using Context, +// nor can it return an error. Use RetryerV2, and GetAttemptToken instead. Only +// present to implement Retryer interface. +func (a *AdaptiveMode) GetInitialToken() (releaseToken func(error) error) { + return nopRelease +} + +// GetAttemptToken returns the attempt token that can be used to rate limit +// attempt calls. Will be used by the SDK's retry package's Attempt +// middleware to get an attempt token prior to calling the temp and releasing +// the attempt token after the attempt has been made. +func (a *AdaptiveMode) GetAttemptToken(ctx context.Context) (func(error) error, error) { + for { + acquiredToken, waitTryAgain := a.rateLimit.AcquireToken(a.options.RequestCost) + if acquiredToken { + break + } + if a.options.FailOnNoAttemptTokens { + return nil, fmt.Errorf( + "unable to get attempt token, and FailOnNoAttemptTokens enables") + } + + if err := sdk.SleepWithContext(ctx, waitTryAgain); err != nil { + return nil, fmt.Errorf("failed to wait for token to be available, %w", err) + } + } + + return a.handleResponse, nil +} + +func (a *AdaptiveMode) handleResponse(opErr error) error { + throttled := a.throttles.IsErrorThrottle(opErr).Bool() + + a.rateLimit.Update(throttled) + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_ratelimit.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_ratelimit.go new file mode 100644 index 00000000000..ad96d9b8c5d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_ratelimit.go @@ -0,0 +1,158 @@ +package retry + +import ( + "math" + "sync" + "time" + + "github.com/aws/aws-sdk-go-v2/internal/sdk" +) + +type adaptiveRateLimit struct { + tokenBucketEnabled bool + + smooth float64 + beta float64 + scaleConstant float64 + minFillRate float64 + + fillRate float64 + calculatedRate float64 + lastRefilled time.Time + measuredTxRate float64 + lastTxRateBucket float64 + requestCount int64 + lastMaxRate float64 + lastThrottleTime time.Time + timeWindow float64 + + tokenBucket *adaptiveTokenBucket + + mu sync.Mutex +} + +func newAdaptiveRateLimit() *adaptiveRateLimit { + now := sdk.NowTime() + return &adaptiveRateLimit{ + smooth: 0.8, + beta: 0.7, + scaleConstant: 0.4, + + minFillRate: 0.5, + + lastTxRateBucket: math.Floor(timeFloat64Seconds(now)), + lastThrottleTime: now, + + tokenBucket: newAdaptiveTokenBucket(0), + } +} + +func (a *adaptiveRateLimit) Enable(v bool) { + a.mu.Lock() + defer a.mu.Unlock() + + a.tokenBucketEnabled = v +} + +func (a *adaptiveRateLimit) AcquireToken(amount uint) ( + tokenAcquired bool, waitTryAgain time.Duration, +) { + a.mu.Lock() + defer a.mu.Unlock() + + if !a.tokenBucketEnabled { + return true, 0 + } + + a.tokenBucketRefill() + + available, ok := a.tokenBucket.Retrieve(float64(amount)) + if !ok { + waitDur := float64Seconds((float64(amount) - available) / a.fillRate) + return false, waitDur + } + + return true, 0 +} + +func (a *adaptiveRateLimit) Update(throttled bool) { + a.mu.Lock() + defer a.mu.Unlock() + + a.updateMeasuredRate() + + if throttled { + rateToUse := a.measuredTxRate + if a.tokenBucketEnabled { + rateToUse = math.Min(a.measuredTxRate, a.fillRate) + } + + a.lastMaxRate = rateToUse + a.calculateTimeWindow() + a.lastThrottleTime = sdk.NowTime() + a.calculatedRate = a.cubicThrottle(rateToUse) + a.tokenBucketEnabled = true + } else { + a.calculateTimeWindow() + a.calculatedRate = a.cubicSuccess(sdk.NowTime()) + } + + newRate := math.Min(a.calculatedRate, 2*a.measuredTxRate) + a.tokenBucketUpdateRate(newRate) +} + +func (a *adaptiveRateLimit) cubicSuccess(t time.Time) float64 { + dt := secondsFloat64(t.Sub(a.lastThrottleTime)) + return (a.scaleConstant * math.Pow(dt-a.timeWindow, 3)) + a.lastMaxRate +} + +func (a *adaptiveRateLimit) cubicThrottle(rateToUse float64) float64 { + return rateToUse * a.beta +} + +func (a *adaptiveRateLimit) calculateTimeWindow() { + a.timeWindow = math.Pow((a.lastMaxRate*(1.-a.beta))/a.scaleConstant, 1./3.) +} + +func (a *adaptiveRateLimit) tokenBucketUpdateRate(newRPS float64) { + a.tokenBucketRefill() + a.fillRate = math.Max(newRPS, a.minFillRate) + a.tokenBucket.Resize(newRPS) +} + +func (a *adaptiveRateLimit) updateMeasuredRate() { + now := sdk.NowTime() + timeBucket := math.Floor(timeFloat64Seconds(now)*2.) / 2. + a.requestCount++ + + if timeBucket > a.lastTxRateBucket { + currentRate := float64(a.requestCount) / (timeBucket - a.lastTxRateBucket) + a.measuredTxRate = (currentRate * a.smooth) + (a.measuredTxRate * (1. - a.smooth)) + a.requestCount = 0 + a.lastTxRateBucket = timeBucket + } +} + +func (a *adaptiveRateLimit) tokenBucketRefill() { + now := sdk.NowTime() + if a.lastRefilled.IsZero() { + a.lastRefilled = now + return + } + + fillAmount := secondsFloat64(now.Sub(a.lastRefilled)) * a.fillRate + a.tokenBucket.Refund(fillAmount) + a.lastRefilled = now +} + +func float64Seconds(v float64) time.Duration { + return time.Duration(v * float64(time.Second)) +} + +func secondsFloat64(v time.Duration) float64 { + return float64(v) / float64(time.Second) +} + +func timeFloat64Seconds(v time.Time) float64 { + return float64(v.UnixNano()) / float64(time.Second) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_token_bucket.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_token_bucket.go new file mode 100644 index 00000000000..052723e8ed1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_token_bucket.go @@ -0,0 +1,83 @@ +package retry + +import ( + "math" + "sync" +) + +// adaptiveTokenBucket provides a concurrency safe utility for adding and +// removing tokens from the available token bucket. +type adaptiveTokenBucket struct { + remainingTokens float64 + maxCapacity float64 + minCapacity float64 + mu sync.Mutex +} + +// newAdaptiveTokenBucket returns an initialized adaptiveTokenBucket with the +// capacity specified. +func newAdaptiveTokenBucket(i float64) *adaptiveTokenBucket { + return &adaptiveTokenBucket{ + remainingTokens: i, + maxCapacity: i, + minCapacity: 1, + } +} + +// Retrieve attempts to reduce the available tokens by the amount requested. If +// there are tokens available true will be returned along with the number of +// available tokens remaining. If amount requested is larger than the available +// capacity, false will be returned along with the available capacity. If the +// amount is less than the available capacity, the capacity will be reduced by +// that amount, and the remaining capacity and true will be returned. +func (t *adaptiveTokenBucket) Retrieve(amount float64) (available float64, retrieved bool) { + t.mu.Lock() + defer t.mu.Unlock() + + if amount > t.remainingTokens { + return t.remainingTokens, false + } + + t.remainingTokens -= amount + return t.remainingTokens, true +} + +// Refund returns the amount of tokens back to the available token bucket, up +// to the initial capacity. +func (t *adaptiveTokenBucket) Refund(amount float64) { + t.mu.Lock() + defer t.mu.Unlock() + + // Capacity cannot exceed max capacity. + t.remainingTokens = math.Min(t.remainingTokens+amount, t.maxCapacity) +} + +// Capacity returns the maximum capacity of tokens that the bucket could +// contain. +func (t *adaptiveTokenBucket) Capacity() float64 { + t.mu.Lock() + defer t.mu.Unlock() + + return t.maxCapacity +} + +// Remaining returns the number of tokens that remaining in the bucket. +func (t *adaptiveTokenBucket) Remaining() float64 { + t.mu.Lock() + defer t.mu.Unlock() + + return t.remainingTokens +} + +// Resize adjusts the size of the token bucket. Returns the capacity remaining. +func (t *adaptiveTokenBucket) Resize(size float64) float64 { + t.mu.Lock() + defer t.mu.Unlock() + + t.maxCapacity = math.Max(size, t.minCapacity) + + // Capacity needs to be capped at max capacity, if max size reduced. + t.remainingTokens = math.Min(t.remainingTokens, t.maxCapacity) + + return t.remainingTokens +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/doc.go new file mode 100644 index 00000000000..3a08ebe0a72 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/doc.go @@ -0,0 +1,80 @@ +// Package retry provides interfaces and implementations for SDK request retry behavior. +// +// # Retryer Interface and Implementations +// +// This package defines Retryer interface that is used to either implement custom retry behavior +// or to extend the existing retry implementations provided by the SDK. This package provides a single +// retry implementation: Standard. +// +// # Standard +// +// Standard is the default retryer implementation used by service clients. The standard retryer is a rate limited +// retryer that has a configurable max attempts to limit the number of retry attempts when a retryable error occurs. +// In addition, the retryer uses a configurable token bucket to rate limit the retry attempts across the client, +// and uses an additional delay policy to limit the time between a requests subsequent attempts. +// +// By default the standard retryer uses the DefaultRetryables slice of IsErrorRetryable types to determine whether +// a given error is retryable. By default this list of retryables includes the following: +// - Retrying errors that implement the RetryableError method, and return true. +// - Connection Errors +// - Errors that implement a ConnectionError, Temporary, or Timeout method that return true. +// - Connection Reset Errors. +// - net.OpErr types that are dialing errors or are temporary. +// - HTTP Status Codes: 500, 502, 503, and 504. +// - API Error Codes +// - RequestTimeout, RequestTimeoutException +// - Throttling, ThrottlingException, ThrottledException, RequestThrottledException, TooManyRequestsException, +// RequestThrottled, SlowDown, EC2ThrottledException +// - ProvisionedThroughputExceededException, RequestLimitExceeded, BandwidthLimitExceeded, LimitExceededException +// - TransactionInProgressException, PriorRequestNotComplete +// +// The standard retryer will not retry a request in the event if the context associated with the request +// has been cancelled. Applications must handle this case explicitly if they wish to retry with a different context +// value. +// +// You can configure the standard retryer implementation to fit your applications by constructing a standard retryer +// using the NewStandard function, and providing one more functional argument that mutate the StandardOptions +// structure. StandardOptions provides the ability to modify the token bucket rate limiter, retryable error conditions, +// and the retry delay policy. +// +// For example to modify the default retry attempts for the standard retryer: +// +// // configure the custom retryer +// customRetry := retry.NewStandard(func(o *retry.StandardOptions) { +// o.MaxAttempts = 5 +// }) +// +// // create a service client with the retryer +// s3.NewFromConfig(cfg, func(o *s3.Options) { +// o.Retryer = customRetry +// }) +// +// # Utilities +// +// A number of package functions have been provided to easily wrap retryer implementations in an implementation agnostic +// way. These are: +// +// AddWithErrorCodes - Provides the ability to add additional API error codes that should be considered retryable +// in addition to those considered retryable by the provided retryer. +// +// AddWithMaxAttempts - Provides the ability to set the max number of attempts for retrying a request by wrapping +// a retryer implementation. +// +// AddWithMaxBackoffDelay - Provides the ability to set the max back off delay that can occur before retrying a +// request by wrapping a retryer implementation. +// +// The following package functions have been provided to easily satisfy different retry interfaces to further customize +// a given retryer's behavior: +// +// BackoffDelayerFunc - Can be used to wrap a function to satisfy the BackoffDelayer interface. For example, +// you can use this method to easily create custom back off policies to be used with the +// standard retryer. +// +// IsErrorRetryableFunc - Can be used to wrap a function to satisfy the IsErrorRetryable interface. For example, +// this can be used to extend the standard retryer to add additional logic to determine if an +// error should be retried. +// +// IsErrorTimeoutFunc - Can be used to wrap a function to satisfy IsErrorTimeout interface. For example, +// this can be used to extend the standard retryer to add additional logic to determine if an +// error should be considered a timeout. +package retry diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/errors.go new file mode 100644 index 00000000000..3e432eefe77 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/errors.go @@ -0,0 +1,20 @@ +package retry + +import "fmt" + +// MaxAttemptsError provides the error when the maximum number of attempts have +// been exceeded. +type MaxAttemptsError struct { + Attempt int + Err error +} + +func (e *MaxAttemptsError) Error() string { + return fmt.Sprintf("exceeded maximum number of attempts, %d, %v", e.Attempt, e.Err) +} + +// Unwrap returns the nested error causing the max attempts error. Provides the +// implementation for errors.Is and errors.As to unwrap nested errors. +func (e *MaxAttemptsError) Unwrap() error { + return e.Err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/jitter_backoff.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/jitter_backoff.go new file mode 100644 index 00000000000..c266996dea2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/jitter_backoff.go @@ -0,0 +1,49 @@ +package retry + +import ( + "math" + "time" + + "github.com/aws/aws-sdk-go-v2/internal/rand" + "github.com/aws/aws-sdk-go-v2/internal/timeconv" +) + +// ExponentialJitterBackoff provides backoff delays with jitter based on the +// number of attempts. +type ExponentialJitterBackoff struct { + maxBackoff time.Duration + // precomputed number of attempts needed to reach max backoff. + maxBackoffAttempts float64 + + randFloat64 func() (float64, error) +} + +// NewExponentialJitterBackoff returns an ExponentialJitterBackoff configured +// for the max backoff. +func NewExponentialJitterBackoff(maxBackoff time.Duration) *ExponentialJitterBackoff { + return &ExponentialJitterBackoff{ + maxBackoff: maxBackoff, + maxBackoffAttempts: math.Log2( + float64(maxBackoff) / float64(time.Second)), + randFloat64: rand.CryptoRandFloat64, + } +} + +// BackoffDelay returns the duration to wait before the next attempt should be +// made. Returns an error if unable get a duration. +func (j *ExponentialJitterBackoff) BackoffDelay(attempt int, err error) (time.Duration, error) { + if attempt > int(j.maxBackoffAttempts) { + return j.maxBackoff, nil + } + + b, err := j.randFloat64() + if err != nil { + return 0, err + } + + // [0.0, 1.0) * 2 ^ attempts + ri := int64(1 << uint64(attempt)) + delaySeconds := b * float64(ri) + + return timeconv.FloatSecondsDur(delaySeconds), nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/metadata.go new file mode 100644 index 00000000000..7a3f1830186 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/metadata.go @@ -0,0 +1,52 @@ +package retry + +import ( + awsmiddle "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" +) + +// attemptResultsKey is a metadata accessor key to retrieve metadata +// for all request attempts. +type attemptResultsKey struct { +} + +// GetAttemptResults retrieves attempts results from middleware metadata. +func GetAttemptResults(metadata middleware.Metadata) (AttemptResults, bool) { + m, ok := metadata.Get(attemptResultsKey{}).(AttemptResults) + return m, ok +} + +// AttemptResults represents struct containing metadata returned by all request attempts. +type AttemptResults struct { + + // Results is a slice consisting attempt result from all request attempts. + // Results are stored in order request attempt is made. + Results []AttemptResult +} + +// AttemptResult represents attempt result returned by a single request attempt. +type AttemptResult struct { + + // Err is the error if received for the request attempt. + Err error + + // Retryable denotes if request may be retried. This states if an + // error is considered retryable. + Retryable bool + + // Retried indicates if this request was retried. + Retried bool + + // ResponseMetadata is any existing metadata passed via the response middlewares. + ResponseMetadata middleware.Metadata +} + +// addAttemptResults adds attempt results to middleware metadata +func addAttemptResults(metadata *middleware.Metadata, v AttemptResults) { + metadata.Set(attemptResultsKey{}, v) +} + +// GetRawResponse returns raw response recorded for the attempt result +func (a AttemptResult) GetRawResponse() interface{} { + return awsmiddle.GetRawResponse(a.ResponseMetadata) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go new file mode 100644 index 00000000000..822fc920a75 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go @@ -0,0 +1,330 @@ +package retry + +import ( + "context" + "fmt" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddle "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go/logging" + smithymiddle "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/transport/http" +) + +// RequestCloner is a function that can take an input request type and clone +// the request for use in a subsequent retry attempt. +type RequestCloner func(interface{}) interface{} + +type retryMetadata struct { + AttemptNum int + AttemptTime time.Time + MaxAttempts int + AttemptClockSkew time.Duration +} + +// Attempt is a Smithy Finalize middleware that handles retry attempts using +// the provided Retryer implementation. +type Attempt struct { + // Enable the logging of retry attempts performed by the SDK. This will + // include logging retry attempts, unretryable errors, and when max + // attempts are reached. + LogAttempts bool + + retryer aws.RetryerV2 + requestCloner RequestCloner +} + +// NewAttemptMiddleware returns a new Attempt retry middleware. +func NewAttemptMiddleware(retryer aws.Retryer, requestCloner RequestCloner, optFns ...func(*Attempt)) *Attempt { + m := &Attempt{ + retryer: wrapAsRetryerV2(retryer), + requestCloner: requestCloner, + } + for _, fn := range optFns { + fn(m) + } + return m +} + +// ID returns the middleware identifier +func (r *Attempt) ID() string { return "Retry" } + +func (r Attempt) logf(logger logging.Logger, classification logging.Classification, format string, v ...interface{}) { + if !r.LogAttempts { + return + } + logger.Logf(classification, format, v...) +} + +// HandleFinalize utilizes the provider Retryer implementation to attempt +// retries over the next handler +func (r *Attempt) HandleFinalize(ctx context.Context, in smithymiddle.FinalizeInput, next smithymiddle.FinalizeHandler) ( + out smithymiddle.FinalizeOutput, metadata smithymiddle.Metadata, err error, +) { + var attemptNum int + var attemptClockSkew time.Duration + var attemptResults AttemptResults + + maxAttempts := r.retryer.MaxAttempts() + releaseRetryToken := nopRelease + + for { + attemptNum++ + attemptInput := in + attemptInput.Request = r.requestCloner(attemptInput.Request) + + // Record the metadata for the for attempt being started. + attemptCtx := setRetryMetadata(ctx, retryMetadata{ + AttemptNum: attemptNum, + AttemptTime: sdk.NowTime().UTC(), + MaxAttempts: maxAttempts, + AttemptClockSkew: attemptClockSkew, + }) + + var attemptResult AttemptResult + out, attemptResult, releaseRetryToken, err = r.handleAttempt(attemptCtx, attemptInput, releaseRetryToken, next) + attemptClockSkew, _ = awsmiddle.GetAttemptSkew(attemptResult.ResponseMetadata) + + // AttemptResult Retried states that the attempt was not successful, and + // should be retried. + shouldRetry := attemptResult.Retried + + // Add attempt metadata to list of all attempt metadata + attemptResults.Results = append(attemptResults.Results, attemptResult) + + if !shouldRetry { + // Ensure the last response's metadata is used as the bases for result + // metadata returned by the stack. The Slice of attempt results + // will be added to this cloned metadata. + metadata = attemptResult.ResponseMetadata.Clone() + + break + } + } + + addAttemptResults(&metadata, attemptResults) + return out, metadata, err +} + +// handleAttempt handles an individual request attempt. +func (r *Attempt) handleAttempt( + ctx context.Context, in smithymiddle.FinalizeInput, releaseRetryToken func(error) error, next smithymiddle.FinalizeHandler, +) ( + out smithymiddle.FinalizeOutput, attemptResult AttemptResult, _ func(error) error, err error, +) { + defer func() { + attemptResult.Err = err + }() + + // Short circuit if this attempt never can succeed because the context is + // canceled. This reduces the chance of token pools being modified for + // attempts that will not be made + select { + case <-ctx.Done(): + return out, attemptResult, nopRelease, ctx.Err() + default: + } + + //------------------------------ + // Get Attempt Token + //------------------------------ + releaseAttemptToken, err := r.retryer.GetAttemptToken(ctx) + if err != nil { + return out, attemptResult, nopRelease, fmt.Errorf( + "failed to get retry Send token, %w", err) + } + + //------------------------------ + // Send Attempt + //------------------------------ + logger := smithymiddle.GetLogger(ctx) + service, operation := awsmiddle.GetServiceID(ctx), awsmiddle.GetOperationName(ctx) + retryMetadata, _ := getRetryMetadata(ctx) + attemptNum := retryMetadata.AttemptNum + maxAttempts := retryMetadata.MaxAttempts + + // Following attempts must ensure the request payload stream starts in a + // rewound state. + if attemptNum > 1 { + if rewindable, ok := in.Request.(interface{ RewindStream() error }); ok { + if rewindErr := rewindable.RewindStream(); rewindErr != nil { + return out, attemptResult, nopRelease, fmt.Errorf( + "failed to rewind transport stream for retry, %w", rewindErr) + } + } + + r.logf(logger, logging.Debug, "retrying request %s/%s, attempt %d", + service, operation, attemptNum) + } + + var metadata smithymiddle.Metadata + out, metadata, err = next.HandleFinalize(ctx, in) + attemptResult.ResponseMetadata = metadata + + //------------------------------ + // Bookkeeping + //------------------------------ + // Release the retry token based on the state of the attempt's error (if any). + if releaseError := releaseRetryToken(err); releaseError != nil && err != nil { + return out, attemptResult, nopRelease, fmt.Errorf( + "failed to release retry token after request error, %w", err) + } + // Release the attempt token based on the state of the attempt's error (if any). + if releaseError := releaseAttemptToken(err); releaseError != nil && err != nil { + return out, attemptResult, nopRelease, fmt.Errorf( + "failed to release initial token after request error, %w", err) + } + // If there was no error making the attempt, nothing further to do. There + // will be nothing to retry. + if err == nil { + return out, attemptResult, nopRelease, err + } + + //------------------------------ + // Is Retryable and Should Retry + //------------------------------ + // If the attempt failed with an unretryable error, nothing further to do + // but return, and inform the caller about the terminal failure. + retryable := r.retryer.IsErrorRetryable(err) + if !retryable { + r.logf(logger, logging.Debug, "request failed with unretryable error %v", err) + return out, attemptResult, nopRelease, err + } + + // set retryable to true + attemptResult.Retryable = true + + // Once the maximum number of attempts have been exhausted there is nothing + // further to do other than inform the caller about the terminal failure. + if maxAttempts > 0 && attemptNum >= maxAttempts { + r.logf(logger, logging.Debug, "max retry attempts exhausted, max %d", maxAttempts) + err = &MaxAttemptsError{ + Attempt: attemptNum, + Err: err, + } + return out, attemptResult, nopRelease, err + } + + //------------------------------ + // Get Retry (aka Retry Quota) Token + //------------------------------ + // Get a retry token that will be released after the + releaseRetryToken, retryTokenErr := r.retryer.GetRetryToken(ctx, err) + if retryTokenErr != nil { + return out, attemptResult, nopRelease, retryTokenErr + } + + //------------------------------ + // Retry Delay and Sleep + //------------------------------ + // Get the retry delay before another attempt can be made, and sleep for + // that time. Potentially early exist if the sleep is canceled via the + // context. + retryDelay, reqErr := r.retryer.RetryDelay(attemptNum, err) + if reqErr != nil { + return out, attemptResult, releaseRetryToken, reqErr + } + if reqErr = sdk.SleepWithContext(ctx, retryDelay); reqErr != nil { + err = &aws.RequestCanceledError{Err: reqErr} + return out, attemptResult, releaseRetryToken, err + } + + // The request should be re-attempted. + attemptResult.Retried = true + + return out, attemptResult, releaseRetryToken, err +} + +// MetricsHeader attaches SDK request metric header for retries to the transport +type MetricsHeader struct{} + +// ID returns the middleware identifier +func (r *MetricsHeader) ID() string { + return "RetryMetricsHeader" +} + +// HandleFinalize attaches the SDK request metric header to the transport layer +func (r MetricsHeader) HandleFinalize(ctx context.Context, in smithymiddle.FinalizeInput, next smithymiddle.FinalizeHandler) ( + out smithymiddle.FinalizeOutput, metadata smithymiddle.Metadata, err error, +) { + retryMetadata, _ := getRetryMetadata(ctx) + + const retryMetricHeader = "Amz-Sdk-Request" + var parts []string + + parts = append(parts, "attempt="+strconv.Itoa(retryMetadata.AttemptNum)) + if retryMetadata.MaxAttempts != 0 { + parts = append(parts, "max="+strconv.Itoa(retryMetadata.MaxAttempts)) + } + + var ttl time.Time + if deadline, ok := ctx.Deadline(); ok { + ttl = deadline + } + + // Only append the TTL if it can be determined. + if !ttl.IsZero() && retryMetadata.AttemptClockSkew > 0 { + const unixTimeFormat = "20060102T150405Z" + ttl = ttl.Add(retryMetadata.AttemptClockSkew) + parts = append(parts, "ttl="+ttl.Format(unixTimeFormat)) + } + + switch req := in.Request.(type) { + case *http.Request: + req.Header[retryMetricHeader] = append(req.Header[retryMetricHeader][:0], strings.Join(parts, "; ")) + default: + return out, metadata, fmt.Errorf("unknown transport type %T", req) + } + + return next.HandleFinalize(ctx, in) +} + +type retryMetadataKey struct{} + +// getRetryMetadata retrieves retryMetadata from the context and a bool +// indicating if it was set. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func getRetryMetadata(ctx context.Context) (metadata retryMetadata, ok bool) { + metadata, ok = smithymiddle.GetStackValue(ctx, retryMetadataKey{}).(retryMetadata) + return metadata, ok +} + +// setRetryMetadata sets the retryMetadata on the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func setRetryMetadata(ctx context.Context, metadata retryMetadata) context.Context { + return smithymiddle.WithStackValue(ctx, retryMetadataKey{}, metadata) +} + +// AddRetryMiddlewaresOptions is the set of options that can be passed to +// AddRetryMiddlewares for configuring retry associated middleware. +type AddRetryMiddlewaresOptions struct { + Retryer aws.Retryer + + // Enable the logging of retry attempts performed by the SDK. This will + // include logging retry attempts, unretryable errors, and when max + // attempts are reached. + LogRetryAttempts bool +} + +// AddRetryMiddlewares adds retry middleware to operation middleware stack +func AddRetryMiddlewares(stack *smithymiddle.Stack, options AddRetryMiddlewaresOptions) error { + attempt := NewAttemptMiddleware(options.Retryer, http.RequestCloner, func(middleware *Attempt) { + middleware.LogAttempts = options.LogRetryAttempts + }) + + if err := stack.Finalize.Add(attempt, smithymiddle.After); err != nil { + return err + } + if err := stack.Finalize.Add(&MetricsHeader{}, smithymiddle.After); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retry.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retry.go new file mode 100644 index 00000000000..af81635b3fd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retry.go @@ -0,0 +1,90 @@ +package retry + +import ( + "context" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +// AddWithErrorCodes returns a Retryer with additional error codes considered +// for determining if the error should be retried. +func AddWithErrorCodes(r aws.Retryer, codes ...string) aws.Retryer { + retryable := &RetryableErrorCode{ + Codes: map[string]struct{}{}, + } + for _, c := range codes { + retryable.Codes[c] = struct{}{} + } + + return &withIsErrorRetryable{ + RetryerV2: wrapAsRetryerV2(r), + Retryable: retryable, + } +} + +type withIsErrorRetryable struct { + aws.RetryerV2 + Retryable IsErrorRetryable +} + +func (r *withIsErrorRetryable) IsErrorRetryable(err error) bool { + if v := r.Retryable.IsErrorRetryable(err); v != aws.UnknownTernary { + return v.Bool() + } + return r.RetryerV2.IsErrorRetryable(err) +} + +// AddWithMaxAttempts returns a Retryer with MaxAttempts set to the value +// specified. +func AddWithMaxAttempts(r aws.Retryer, max int) aws.Retryer { + return &withMaxAttempts{ + RetryerV2: wrapAsRetryerV2(r), + Max: max, + } +} + +type withMaxAttempts struct { + aws.RetryerV2 + Max int +} + +func (w *withMaxAttempts) MaxAttempts() int { + return w.Max +} + +// AddWithMaxBackoffDelay returns a retryer wrapping the passed in retryer +// overriding the RetryDelay behavior for a alternate minimum initial backoff +// delay. +func AddWithMaxBackoffDelay(r aws.Retryer, delay time.Duration) aws.Retryer { + return &withMaxBackoffDelay{ + RetryerV2: wrapAsRetryerV2(r), + backoff: NewExponentialJitterBackoff(delay), + } +} + +type withMaxBackoffDelay struct { + aws.RetryerV2 + backoff *ExponentialJitterBackoff +} + +func (r *withMaxBackoffDelay) RetryDelay(attempt int, err error) (time.Duration, error) { + return r.backoff.BackoffDelay(attempt, err) +} + +type wrappedAsRetryerV2 struct { + aws.Retryer +} + +func wrapAsRetryerV2(r aws.Retryer) aws.RetryerV2 { + v, ok := r.(aws.RetryerV2) + if !ok { + v = wrappedAsRetryerV2{Retryer: r} + } + + return v +} + +func (w wrappedAsRetryerV2) GetAttemptToken(context.Context) (func(error) error, error) { + return w.Retryer.GetInitialToken(), nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go new file mode 100644 index 00000000000..987affdde6f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go @@ -0,0 +1,201 @@ +package retry + +import ( + "errors" + "net" + "net/url" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +// IsErrorRetryable provides the interface of an implementation to determine if +// a error as the result of an operation is retryable. +type IsErrorRetryable interface { + IsErrorRetryable(error) aws.Ternary +} + +// IsErrorRetryables is a collection of checks to determine of the error is +// retryable. Iterates through the checks and returns the state of retryable +// if any check returns something other than unknown. +type IsErrorRetryables []IsErrorRetryable + +// IsErrorRetryable returns if the error is retryable if any of the checks in +// the list return a value other than unknown. +func (r IsErrorRetryables) IsErrorRetryable(err error) aws.Ternary { + for _, re := range r { + if v := re.IsErrorRetryable(err); v != aws.UnknownTernary { + return v + } + } + return aws.UnknownTernary +} + +// IsErrorRetryableFunc wraps a function with the IsErrorRetryable interface. +type IsErrorRetryableFunc func(error) aws.Ternary + +// IsErrorRetryable returns if the error is retryable. +func (fn IsErrorRetryableFunc) IsErrorRetryable(err error) aws.Ternary { + return fn(err) +} + +// RetryableError is an IsErrorRetryable implementation which uses the +// optional interface Retryable on the error value to determine if the error is +// retryable. +type RetryableError struct{} + +// IsErrorRetryable returns if the error is retryable if it satisfies the +// Retryable interface, and returns if the attempt should be retried. +func (RetryableError) IsErrorRetryable(err error) aws.Ternary { + var v interface{ RetryableError() bool } + + if !errors.As(err, &v) { + return aws.UnknownTernary + } + + return aws.BoolTernary(v.RetryableError()) +} + +// NoRetryCanceledError detects if the error was an request canceled error and +// returns if so. +type NoRetryCanceledError struct{} + +// IsErrorRetryable returns the error is not retryable if the request was +// canceled. +func (NoRetryCanceledError) IsErrorRetryable(err error) aws.Ternary { + var v interface{ CanceledError() bool } + + if !errors.As(err, &v) { + return aws.UnknownTernary + } + + if v.CanceledError() { + return aws.FalseTernary + } + return aws.UnknownTernary +} + +// RetryableConnectionError determines if the underlying error is an HTTP +// connection and returns if it should be retried. +// +// Includes errors such as connection reset, connection refused, net dial, +// temporary, and timeout errors. +type RetryableConnectionError struct{} + +// IsErrorRetryable returns if the error is caused by and HTTP connection +// error, and should be retried. +func (r RetryableConnectionError) IsErrorRetryable(err error) aws.Ternary { + if err == nil { + return aws.UnknownTernary + } + var retryable bool + + var conErr interface{ ConnectionError() bool } + var tempErr interface{ Temporary() bool } + var timeoutErr interface{ Timeout() bool } + var urlErr *url.Error + var netOpErr *net.OpError + var dnsError *net.DNSError + + if errors.As(err, &dnsError) { + // NXDOMAIN errors should not be retried + if dnsError.IsNotFound { + return aws.BoolTernary(false) + } + + // if !dnsError.Temporary(), error may or may not be temporary, + // (i.e. !Temporary() =/=> !retryable) so we should fall through to + // remaining checks + if dnsError.Temporary() { + return aws.BoolTernary(true) + } + } + + switch { + case errors.As(err, &conErr) && conErr.ConnectionError(): + retryable = true + + case strings.Contains(err.Error(), "connection reset"): + retryable = true + + case errors.As(err, &urlErr): + // Refused connections should be retried as the service may not yet be + // running on the port. Go TCP dial considers refused connections as + // not temporary. + if strings.Contains(urlErr.Error(), "connection refused") { + retryable = true + } else { + return r.IsErrorRetryable(errors.Unwrap(urlErr)) + } + + case errors.As(err, &netOpErr): + // Network dial, or temporary network errors are always retryable. + if strings.EqualFold(netOpErr.Op, "dial") || netOpErr.Temporary() { + retryable = true + } else { + return r.IsErrorRetryable(errors.Unwrap(netOpErr)) + } + + case errors.As(err, &tempErr) && tempErr.Temporary(): + // Fallback to the generic temporary check, with temporary errors + // retryable. + retryable = true + + case errors.As(err, &timeoutErr) && timeoutErr.Timeout(): + // Fallback to the generic timeout check, with timeout errors + // retryable. + retryable = true + + default: + return aws.UnknownTernary + } + + return aws.BoolTernary(retryable) + +} + +// RetryableHTTPStatusCode provides a IsErrorRetryable based on HTTP status +// codes. +type RetryableHTTPStatusCode struct { + Codes map[int]struct{} +} + +// IsErrorRetryable return if the passed in error is retryable based on the +// HTTP status code. +func (r RetryableHTTPStatusCode) IsErrorRetryable(err error) aws.Ternary { + var v interface{ HTTPStatusCode() int } + + if !errors.As(err, &v) { + return aws.UnknownTernary + } + + _, ok := r.Codes[v.HTTPStatusCode()] + if !ok { + return aws.UnknownTernary + } + + return aws.TrueTernary +} + +// RetryableErrorCode determines if an attempt should be retried based on the +// API error code. +type RetryableErrorCode struct { + Codes map[string]struct{} +} + +// IsErrorRetryable return if the error is retryable based on the error codes. +// Returns unknown if the error doesn't have a code or it is unknown. +func (r RetryableErrorCode) IsErrorRetryable(err error) aws.Ternary { + var v interface{ ErrorCode() string } + + if !errors.As(err, &v) { + return aws.UnknownTernary + } + + _, ok := r.Codes[v.ErrorCode()] + if !ok { + return aws.UnknownTernary + } + + return aws.TrueTernary +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go new file mode 100644 index 00000000000..25abffc8128 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go @@ -0,0 +1,258 @@ +package retry + +import ( + "context" + "fmt" + "time" + + "github.com/aws/aws-sdk-go-v2/aws/ratelimit" +) + +// BackoffDelayer provides the interface for determining the delay to before +// another request attempt, that previously failed. +type BackoffDelayer interface { + BackoffDelay(attempt int, err error) (time.Duration, error) +} + +// BackoffDelayerFunc provides a wrapper around a function to determine the +// backoff delay of an attempt retry. +type BackoffDelayerFunc func(int, error) (time.Duration, error) + +// BackoffDelay returns the delay before attempt to retry a request. +func (fn BackoffDelayerFunc) BackoffDelay(attempt int, err error) (time.Duration, error) { + return fn(attempt, err) +} + +const ( + // DefaultMaxAttempts is the maximum of attempts for an API request + DefaultMaxAttempts int = 3 + + // DefaultMaxBackoff is the maximum back off delay between attempts + DefaultMaxBackoff time.Duration = 20 * time.Second +) + +// Default retry token quota values. +const ( + DefaultRetryRateTokens uint = 500 + DefaultRetryCost uint = 5 + DefaultRetryTimeoutCost uint = 10 + DefaultNoRetryIncrement uint = 1 +) + +// DefaultRetryableHTTPStatusCodes is the default set of HTTP status codes the SDK +// should consider as retryable errors. +var DefaultRetryableHTTPStatusCodes = map[int]struct{}{ + 500: {}, + 502: {}, + 503: {}, + 504: {}, +} + +// DefaultRetryableErrorCodes provides the set of API error codes that should +// be retried. +var DefaultRetryableErrorCodes = map[string]struct{}{ + "RequestTimeout": {}, + "RequestTimeoutException": {}, +} + +// DefaultThrottleErrorCodes provides the set of API error codes that are +// considered throttle errors. +var DefaultThrottleErrorCodes = map[string]struct{}{ + "Throttling": {}, + "ThrottlingException": {}, + "ThrottledException": {}, + "RequestThrottledException": {}, + "TooManyRequestsException": {}, + "ProvisionedThroughputExceededException": {}, + "TransactionInProgressException": {}, + "RequestLimitExceeded": {}, + "BandwidthLimitExceeded": {}, + "LimitExceededException": {}, + "RequestThrottled": {}, + "SlowDown": {}, + "PriorRequestNotComplete": {}, + "EC2ThrottledException": {}, +} + +// DefaultRetryables provides the set of retryable checks that are used by +// default. +var DefaultRetryables = []IsErrorRetryable{ + NoRetryCanceledError{}, + RetryableError{}, + RetryableConnectionError{}, + RetryableHTTPStatusCode{ + Codes: DefaultRetryableHTTPStatusCodes, + }, + RetryableErrorCode{ + Codes: DefaultRetryableErrorCodes, + }, + RetryableErrorCode{ + Codes: DefaultThrottleErrorCodes, + }, +} + +// DefaultTimeouts provides the set of timeout checks that are used by default. +var DefaultTimeouts = []IsErrorTimeout{ + TimeouterError{}, +} + +// StandardOptions provides the functional options for configuring the standard +// retryable, and delay behavior. +type StandardOptions struct { + // Maximum number of attempts that should be made. + MaxAttempts int + + // MaxBackoff duration between retried attempts. + MaxBackoff time.Duration + + // Provides the backoff strategy the retryer will use to determine the + // delay between retry attempts. + Backoff BackoffDelayer + + // Set of strategies to determine if the attempt should be retried based on + // the error response received. + // + // It is safe to append to this list in NewStandard's functional options. + Retryables []IsErrorRetryable + + // Set of strategies to determine if the attempt failed due to a timeout + // error. + // + // It is safe to append to this list in NewStandard's functional options. + Timeouts []IsErrorTimeout + + // Provides the rate limiting strategy for rate limiting attempt retries + // across all attempts the retryer is being used with. + RateLimiter RateLimiter + + // The cost to deduct from the RateLimiter's token bucket per retry. + RetryCost uint + + // The cost to deduct from the RateLimiter's token bucket per retry caused + // by timeout error. + RetryTimeoutCost uint + + // The cost to payback to the RateLimiter's token bucket for successful + // attempts. + NoRetryIncrement uint +} + +// RateLimiter provides the interface for limiting the rate of attempt retries +// allowed by the retryer. +type RateLimiter interface { + GetToken(ctx context.Context, cost uint) (releaseToken func() error, err error) + AddTokens(uint) error +} + +// Standard is the standard retry pattern for the SDK. It uses a set of +// retryable checks to determine of the failed attempt should be retried, and +// what retry delay should be used. +type Standard struct { + options StandardOptions + + timeout IsErrorTimeout + retryable IsErrorRetryable + backoff BackoffDelayer +} + +// NewStandard initializes a standard retry behavior with defaults that can be +// overridden via functional options. +func NewStandard(fnOpts ...func(*StandardOptions)) *Standard { + o := StandardOptions{ + MaxAttempts: DefaultMaxAttempts, + MaxBackoff: DefaultMaxBackoff, + Retryables: append([]IsErrorRetryable{}, DefaultRetryables...), + Timeouts: append([]IsErrorTimeout{}, DefaultTimeouts...), + + RateLimiter: ratelimit.NewTokenRateLimit(DefaultRetryRateTokens), + RetryCost: DefaultRetryCost, + RetryTimeoutCost: DefaultRetryTimeoutCost, + NoRetryIncrement: DefaultNoRetryIncrement, + } + for _, fn := range fnOpts { + fn(&o) + } + if o.MaxAttempts <= 0 { + o.MaxAttempts = DefaultMaxAttempts + } + + backoff := o.Backoff + if backoff == nil { + backoff = NewExponentialJitterBackoff(o.MaxBackoff) + } + + return &Standard{ + options: o, + backoff: backoff, + retryable: IsErrorRetryables(o.Retryables), + timeout: IsErrorTimeouts(o.Timeouts), + } +} + +// MaxAttempts returns the maximum number of attempts that can be made for a +// request before failing. +func (s *Standard) MaxAttempts() int { + return s.options.MaxAttempts +} + +// IsErrorRetryable returns if the error is can be retried or not. Should not +// consider the number of attempts made. +func (s *Standard) IsErrorRetryable(err error) bool { + return s.retryable.IsErrorRetryable(err).Bool() +} + +// RetryDelay returns the delay to use before another request attempt is made. +func (s *Standard) RetryDelay(attempt int, err error) (time.Duration, error) { + return s.backoff.BackoffDelay(attempt, err) +} + +// GetAttemptToken returns the token to be released after then attempt completes. +// The release token will add NoRetryIncrement to the RateLimiter token pool if +// the attempt was successful. If the attempt failed, nothing will be done. +func (s *Standard) GetAttemptToken(context.Context) (func(error) error, error) { + return s.GetInitialToken(), nil +} + +// GetInitialToken returns a token for adding the NoRetryIncrement to the +// RateLimiter token if the attempt completed successfully without error. +// +// InitialToken applies to result of the each attempt, including the first. +// Whereas the RetryToken applies to the result of subsequent attempts. +// +// Deprecated: use GetAttemptToken instead. +func (s *Standard) GetInitialToken() func(error) error { + return releaseToken(s.noRetryIncrement).release +} + +func (s *Standard) noRetryIncrement() error { + return s.options.RateLimiter.AddTokens(s.options.NoRetryIncrement) +} + +// GetRetryToken attempts to deduct the retry cost from the retry token pool. +// Returning the token release function, or error. +func (s *Standard) GetRetryToken(ctx context.Context, opErr error) (func(error) error, error) { + cost := s.options.RetryCost + + if s.timeout.IsErrorTimeout(opErr).Bool() { + cost = s.options.RetryTimeoutCost + } + + fn, err := s.options.RateLimiter.GetToken(ctx, cost) + if err != nil { + return nil, fmt.Errorf("failed to get rate limit token, %w", err) + } + + return releaseToken(fn).release, nil +} + +func nopRelease(error) error { return nil } + +type releaseToken func() error + +func (f releaseToken) release(err error) error { + if err != nil { + return nil + } + + return f() +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/throttle_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/throttle_error.go new file mode 100644 index 00000000000..c4b844d15f1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/throttle_error.go @@ -0,0 +1,60 @@ +package retry + +import ( + "errors" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +// IsErrorThrottle provides the interface of an implementation to determine if +// a error response from an operation is a throttling error. +type IsErrorThrottle interface { + IsErrorThrottle(error) aws.Ternary +} + +// IsErrorThrottles is a collection of checks to determine of the error a +// throttle error. Iterates through the checks and returns the state of +// throttle if any check returns something other than unknown. +type IsErrorThrottles []IsErrorThrottle + +// IsErrorThrottle returns if the error is a throttle error if any of the +// checks in the list return a value other than unknown. +func (r IsErrorThrottles) IsErrorThrottle(err error) aws.Ternary { + for _, re := range r { + if v := re.IsErrorThrottle(err); v != aws.UnknownTernary { + return v + } + } + return aws.UnknownTernary +} + +// IsErrorThrottleFunc wraps a function with the IsErrorThrottle interface. +type IsErrorThrottleFunc func(error) aws.Ternary + +// IsErrorThrottle returns if the error is a throttle error. +func (fn IsErrorThrottleFunc) IsErrorThrottle(err error) aws.Ternary { + return fn(err) +} + +// ThrottleErrorCode determines if an attempt should be retried based on the +// API error code. +type ThrottleErrorCode struct { + Codes map[string]struct{} +} + +// IsErrorThrottle return if the error is a throttle error based on the error +// codes. Returns unknown if the error doesn't have a code or it is unknown. +func (r ThrottleErrorCode) IsErrorThrottle(err error) aws.Ternary { + var v interface{ ErrorCode() string } + + if !errors.As(err, &v) { + return aws.UnknownTernary + } + + _, ok := r.Codes[v.ErrorCode()] + if !ok { + return aws.UnknownTernary + } + + return aws.TrueTernary +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/timeout_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/timeout_error.go new file mode 100644 index 00000000000..3d47870d2dc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/timeout_error.go @@ -0,0 +1,52 @@ +package retry + +import ( + "errors" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +// IsErrorTimeout provides the interface of an implementation to determine if +// a error matches. +type IsErrorTimeout interface { + IsErrorTimeout(err error) aws.Ternary +} + +// IsErrorTimeouts is a collection of checks to determine of the error is +// retryable. Iterates through the checks and returns the state of retryable +// if any check returns something other than unknown. +type IsErrorTimeouts []IsErrorTimeout + +// IsErrorTimeout returns if the error is retryable if any of the checks in +// the list return a value other than unknown. +func (ts IsErrorTimeouts) IsErrorTimeout(err error) aws.Ternary { + for _, t := range ts { + if v := t.IsErrorTimeout(err); v != aws.UnknownTernary { + return v + } + } + return aws.UnknownTernary +} + +// IsErrorTimeoutFunc wraps a function with the IsErrorTimeout interface. +type IsErrorTimeoutFunc func(error) aws.Ternary + +// IsErrorTimeout returns if the error is retryable. +func (fn IsErrorTimeoutFunc) IsErrorTimeout(err error) aws.Ternary { + return fn(err) +} + +// TimeouterError provides the IsErrorTimeout implementation for determining if +// an error is a timeout based on type with the Timeout method. +type TimeouterError struct{} + +// IsErrorTimeout returns if the error is a timeout error. +func (t TimeouterError) IsErrorTimeout(err error) aws.Ternary { + var v interface{ Timeout() bool } + + if !errors.As(err, &v) { + return aws.UnknownTernary + } + + return aws.BoolTernary(v.Timeout()) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go new file mode 100644 index 00000000000..b0ba4cb2f08 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go @@ -0,0 +1,127 @@ +package aws + +import ( + "context" + "fmt" + "time" +) + +// RetryMode provides the mode the API client will use to create a retryer +// based on. +type RetryMode string + +const ( + // RetryModeStandard model provides rate limited retry attempts with + // exponential backoff delay. + RetryModeStandard RetryMode = "standard" + + // RetryModeAdaptive model provides attempt send rate limiting on throttle + // responses in addition to standard mode's retry rate limiting. + // + // Adaptive retry mode is experimental and is subject to change in the + // future. + RetryModeAdaptive RetryMode = "adaptive" +) + +// ParseRetryMode attempts to parse a RetryMode from the given string. +// Returning error if the value is not a known RetryMode. +func ParseRetryMode(v string) (mode RetryMode, err error) { + switch v { + case "standard": + return RetryModeStandard, nil + case "adaptive": + return RetryModeAdaptive, nil + default: + return mode, fmt.Errorf("unknown RetryMode, %v", v) + } +} + +func (m RetryMode) String() string { return string(m) } + +// Retryer is an interface to determine if a given error from a +// attempt should be retried, and if so what backoff delay to apply. The +// default implementation used by most services is the retry package's Standard +// type. Which contains basic retry logic using exponential backoff. +type Retryer interface { + // IsErrorRetryable returns if the failed attempt is retryable. This check + // should determine if the error can be retried, or if the error is + // terminal. + IsErrorRetryable(error) bool + + // MaxAttempts returns the maximum number of attempts that can be made for + // an attempt before failing. A value of 0 implies that the attempt should + // be retried until it succeeds if the errors are retryable. + MaxAttempts() int + + // RetryDelay returns the delay that should be used before retrying the + // attempt. Will return error if the delay could not be determined. + RetryDelay(attempt int, opErr error) (time.Duration, error) + + // GetRetryToken attempts to deduct the retry cost from the retry token pool. + // Returning the token release function, or error. + GetRetryToken(ctx context.Context, opErr error) (releaseToken func(error) error, err error) + + // GetInitialToken returns the initial attempt token that can increment the + // retry token pool if the attempt is successful. + GetInitialToken() (releaseToken func(error) error) +} + +// RetryerV2 is an interface to determine if a given error from an attempt +// should be retried, and if so what backoff delay to apply. The default +// implementation used by most services is the retry package's Standard type. +// Which contains basic retry logic using exponential backoff. +// +// RetryerV2 replaces the Retryer interface, deprecating the GetInitialToken +// method in favor of GetAttemptToken which takes a context, and can return an error. +// +// The SDK's retry package's Attempt middleware, and utilities will always +// wrap a Retryer as a RetryerV2. Delegating to GetInitialToken, only if +// GetAttemptToken is not implemented. +type RetryerV2 interface { + Retryer + + // GetInitialToken returns the initial attempt token that can increment the + // retry token pool if the attempt is successful. + // + // Deprecated: This method does not provide a way to block using Context, + // nor can it return an error. Use RetryerV2, and GetAttemptToken instead. + GetInitialToken() (releaseToken func(error) error) + + // GetAttemptToken returns the send token that can be used to rate limit + // attempt calls. Will be used by the SDK's retry package's Attempt + // middleware to get a send token prior to calling the temp and releasing + // the send token after the attempt has been made. + GetAttemptToken(context.Context) (func(error) error, error) +} + +// NopRetryer provides a RequestRetryDecider implementation that will flag +// all attempt errors as not retryable, with a max attempts of 1. +type NopRetryer struct{} + +// IsErrorRetryable returns false for all error values. +func (NopRetryer) IsErrorRetryable(error) bool { return false } + +// MaxAttempts always returns 1 for the original attempt. +func (NopRetryer) MaxAttempts() int { return 1 } + +// RetryDelay is not valid for the NopRetryer. Will always return error. +func (NopRetryer) RetryDelay(int, error) (time.Duration, error) { + return 0, fmt.Errorf("not retrying any attempt errors") +} + +// GetRetryToken returns a stub function that does nothing. +func (NopRetryer) GetRetryToken(context.Context, error) (func(error) error, error) { + return nopReleaseToken, nil +} + +// GetInitialToken returns a stub function that does nothing. +func (NopRetryer) GetInitialToken() func(error) error { + return nopReleaseToken +} + +// GetAttemptToken returns a stub function that does nothing. +func (NopRetryer) GetAttemptToken(context.Context) (func(error) error, error) { + return nopReleaseToken, nil +} + +func nopReleaseToken(error) error { return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/runtime.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/runtime.go new file mode 100644 index 00000000000..3af9b2b3361 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/runtime.go @@ -0,0 +1,14 @@ +package aws + +// ExecutionEnvironmentID is the AWS execution environment runtime identifier. +type ExecutionEnvironmentID string + +// RuntimeEnvironment is a collection of values that are determined at runtime +// based on the environment that the SDK is executing in. Some of these values +// may or may not be present based on the executing environment and certain SDK +// configuration properties that drive whether these values are populated.. +type RuntimeEnvironment struct { + EnvironmentIdentifier ExecutionEnvironmentID + Region string + EC2InstanceMetadataRegion string +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/cache.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/cache.go new file mode 100644 index 00000000000..cbf22f1d0b0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/cache.go @@ -0,0 +1,115 @@ +package v4 + +import ( + "strings" + "sync" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +func lookupKey(service, region string) string { + var s strings.Builder + s.Grow(len(region) + len(service) + 3) + s.WriteString(region) + s.WriteRune('/') + s.WriteString(service) + return s.String() +} + +type derivedKey struct { + AccessKey string + Date time.Time + Credential []byte +} + +type derivedKeyCache struct { + values map[string]derivedKey + mutex sync.RWMutex +} + +func newDerivedKeyCache() derivedKeyCache { + return derivedKeyCache{ + values: make(map[string]derivedKey), + } +} + +func (s *derivedKeyCache) Get(credentials aws.Credentials, service, region string, signingTime SigningTime) []byte { + key := lookupKey(service, region) + s.mutex.RLock() + if cred, ok := s.get(key, credentials, signingTime.Time); ok { + s.mutex.RUnlock() + return cred + } + s.mutex.RUnlock() + + s.mutex.Lock() + if cred, ok := s.get(key, credentials, signingTime.Time); ok { + s.mutex.Unlock() + return cred + } + cred := deriveKey(credentials.SecretAccessKey, service, region, signingTime) + entry := derivedKey{ + AccessKey: credentials.AccessKeyID, + Date: signingTime.Time, + Credential: cred, + } + s.values[key] = entry + s.mutex.Unlock() + + return cred +} + +func (s *derivedKeyCache) get(key string, credentials aws.Credentials, signingTime time.Time) ([]byte, bool) { + cacheEntry, ok := s.retrieveFromCache(key) + if ok && cacheEntry.AccessKey == credentials.AccessKeyID && isSameDay(signingTime, cacheEntry.Date) { + return cacheEntry.Credential, true + } + return nil, false +} + +func (s *derivedKeyCache) retrieveFromCache(key string) (derivedKey, bool) { + if v, ok := s.values[key]; ok { + return v, true + } + return derivedKey{}, false +} + +// SigningKeyDeriver derives a signing key from a set of credentials +type SigningKeyDeriver struct { + cache derivedKeyCache +} + +// NewSigningKeyDeriver returns a new SigningKeyDeriver +func NewSigningKeyDeriver() *SigningKeyDeriver { + return &SigningKeyDeriver{ + cache: newDerivedKeyCache(), + } +} + +// DeriveKey returns a derived signing key from the given credentials to be used with SigV4 signing. +func (k *SigningKeyDeriver) DeriveKey(credential aws.Credentials, service, region string, signingTime SigningTime) []byte { + return k.cache.Get(credential, service, region, signingTime) +} + +func deriveKey(secret, service, region string, t SigningTime) []byte { + hmacDate := HMACSHA256([]byte("AWS4"+secret), []byte(t.ShortTimeFormat())) + hmacRegion := HMACSHA256(hmacDate, []byte(region)) + hmacService := HMACSHA256(hmacRegion, []byte(service)) + return HMACSHA256(hmacService, []byte("aws4_request")) +} + +func isSameDay(x, y time.Time) bool { + xYear, xMonth, xDay := x.Date() + yYear, yMonth, yDay := y.Date() + + if xYear != yYear { + return false + } + + if xMonth != yMonth { + return false + } + + return xDay == yDay +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/const.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/const.go new file mode 100644 index 00000000000..a23cb003bf7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/const.go @@ -0,0 +1,40 @@ +package v4 + +// Signature Version 4 (SigV4) Constants +const ( + // EmptyStringSHA256 is the hex encoded sha256 value of an empty string + EmptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` + + // UnsignedPayload indicates that the request payload body is unsigned + UnsignedPayload = "UNSIGNED-PAYLOAD" + + // AmzAlgorithmKey indicates the signing algorithm + AmzAlgorithmKey = "X-Amz-Algorithm" + + // AmzSecurityTokenKey indicates the security token to be used with temporary credentials + AmzSecurityTokenKey = "X-Amz-Security-Token" + + // AmzDateKey is the UTC timestamp for the request in the format YYYYMMDD'T'HHMMSS'Z' + AmzDateKey = "X-Amz-Date" + + // AmzCredentialKey is the access key ID and credential scope + AmzCredentialKey = "X-Amz-Credential" + + // AmzSignedHeadersKey is the set of headers signed for the request + AmzSignedHeadersKey = "X-Amz-SignedHeaders" + + // AmzSignatureKey is the query parameter to store the SigV4 signature + AmzSignatureKey = "X-Amz-Signature" + + // TimeFormat is the time format to be used in the X-Amz-Date header or query parameter + TimeFormat = "20060102T150405Z" + + // ShortTimeFormat is the shorten time format used in the credential scope + ShortTimeFormat = "20060102" + + // ContentSHAKey is the SHA256 of request body + ContentSHAKey = "X-Amz-Content-Sha256" + + // StreamingEventsPayload indicates that the request payload body is a signed event stream. + StreamingEventsPayload = "STREAMING-AWS4-HMAC-SHA256-EVENTS" +) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/header_rules.go new file mode 100644 index 00000000000..c61955ad5b9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/header_rules.go @@ -0,0 +1,82 @@ +package v4 + +import ( + sdkstrings "github.com/aws/aws-sdk-go-v2/internal/strings" +) + +// Rules houses a set of Rule needed for validation of a +// string value +type Rules []Rule + +// Rule interface allows for more flexible rules and just simply +// checks whether or not a value adheres to that Rule +type Rule interface { + IsValid(value string) bool +} + +// IsValid will iterate through all rules and see if any rules +// apply to the value and supports nested rules +func (r Rules) IsValid(value string) bool { + for _, rule := range r { + if rule.IsValid(value) { + return true + } + } + return false +} + +// MapRule generic Rule for maps +type MapRule map[string]struct{} + +// IsValid for the map Rule satisfies whether it exists in the map +func (m MapRule) IsValid(value string) bool { + _, ok := m[value] + return ok +} + +// AllowList is a generic Rule for include listing +type AllowList struct { + Rule +} + +// IsValid for AllowList checks if the value is within the AllowList +func (w AllowList) IsValid(value string) bool { + return w.Rule.IsValid(value) +} + +// ExcludeList is a generic Rule for exclude listing +type ExcludeList struct { + Rule +} + +// IsValid for AllowList checks if the value is within the AllowList +func (b ExcludeList) IsValid(value string) bool { + return !b.Rule.IsValid(value) +} + +// Patterns is a list of strings to match against +type Patterns []string + +// IsValid for Patterns checks each pattern and returns if a match has +// been found +func (p Patterns) IsValid(value string) bool { + for _, pattern := range p { + if sdkstrings.HasPrefixFold(value, pattern) { + return true + } + } + return false +} + +// InclusiveRules rules allow for rules to depend on one another +type InclusiveRules []Rule + +// IsValid will return true if all rules are true +func (r InclusiveRules) IsValid(value string) bool { + for _, rule := range r { + if !rule.IsValid(value) { + return false + } + } + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go new file mode 100644 index 00000000000..ca738f234b3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go @@ -0,0 +1,71 @@ +package v4 + +// IgnoredHeaders is a list of headers that are ignored during signing +var IgnoredHeaders = Rules{ + ExcludeList{ + MapRule{ + "Authorization": struct{}{}, + "User-Agent": struct{}{}, + "X-Amzn-Trace-Id": struct{}{}, + "Expect": struct{}{}, + }, + }, +} + +// RequiredSignedHeaders is a allow list for Build canonical headers. +var RequiredSignedHeaders = Rules{ + AllowList{ + MapRule{ + "Cache-Control": struct{}{}, + "Content-Disposition": struct{}{}, + "Content-Encoding": struct{}{}, + "Content-Language": struct{}{}, + "Content-Md5": struct{}{}, + "Content-Type": struct{}{}, + "Expires": struct{}{}, + "If-Match": struct{}{}, + "If-Modified-Since": struct{}{}, + "If-None-Match": struct{}{}, + "If-Unmodified-Since": struct{}{}, + "Range": struct{}{}, + "X-Amz-Acl": struct{}{}, + "X-Amz-Copy-Source": struct{}{}, + "X-Amz-Copy-Source-If-Match": struct{}{}, + "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, + "X-Amz-Copy-Source-If-None-Match": struct{}{}, + "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, + "X-Amz-Copy-Source-Range": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Expected-Bucket-Owner": struct{}{}, + "X-Amz-Grant-Full-control": struct{}{}, + "X-Amz-Grant-Read": struct{}{}, + "X-Amz-Grant-Read-Acp": struct{}{}, + "X-Amz-Grant-Write": struct{}{}, + "X-Amz-Grant-Write-Acp": struct{}{}, + "X-Amz-Metadata-Directive": struct{}{}, + "X-Amz-Mfa": struct{}{}, + "X-Amz-Request-Payer": struct{}{}, + "X-Amz-Server-Side-Encryption": struct{}{}, + "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, + "X-Amz-Server-Side-Encryption-Context": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Storage-Class": struct{}{}, + "X-Amz-Website-Redirect-Location": struct{}{}, + "X-Amz-Content-Sha256": struct{}{}, + "X-Amz-Tagging": struct{}{}, + }, + }, + Patterns{"X-Amz-Object-Lock-"}, + Patterns{"X-Amz-Meta-"}, +} + +// AllowedQueryHoisting is a allowed list for Build query headers. The boolean value +// represents whether or not it is a pattern. +var AllowedQueryHoisting = InclusiveRules{ + ExcludeList{RequiredSignedHeaders}, + Patterns{"X-Amz-"}, +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/hmac.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/hmac.go new file mode 100644 index 00000000000..e7fa7a1b1e6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/hmac.go @@ -0,0 +1,13 @@ +package v4 + +import ( + "crypto/hmac" + "crypto/sha256" +) + +// HMACSHA256 computes a HMAC-SHA256 of data given the provided key. +func HMACSHA256(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/host.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/host.go new file mode 100644 index 00000000000..bf93659a43f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/host.go @@ -0,0 +1,75 @@ +package v4 + +import ( + "net/http" + "strings" +) + +// SanitizeHostForHeader removes default port from host and updates request.Host +func SanitizeHostForHeader(r *http.Request) { + host := getHost(r) + port := portOnly(host) + if port != "" && isDefaultPort(r.URL.Scheme, port) { + r.Host = stripPort(host) + } +} + +// Returns host from request +func getHost(r *http.Request) string { + if r.Host != "" { + return r.Host + } + + return r.URL.Host +} + +// Hostname returns u.Host, without any port number. +// +// If Host is an IPv6 literal with a port number, Hostname returns the +// IPv6 literal without the square brackets. IPv6 literals may include +// a zone identifier. +// +// Copied from the Go 1.8 standard library (net/url) +func stripPort(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return hostport + } + if i := strings.IndexByte(hostport, ']'); i != -1 { + return strings.TrimPrefix(hostport[:i], "[") + } + return hostport[:colon] +} + +// Port returns the port part of u.Host, without the leading colon. +// If u.Host doesn't contain a port, Port returns an empty string. +// +// Copied from the Go 1.8 standard library (net/url) +func portOnly(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return "" + } + if i := strings.Index(hostport, "]:"); i != -1 { + return hostport[i+len("]:"):] + } + if strings.Contains(hostport, "]") { + return "" + } + return hostport[colon+len(":"):] +} + +// Returns true if the specified URI is using the standard port +// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs) +func isDefaultPort(scheme, port string) bool { + if port == "" { + return true + } + + lowerCaseScheme := strings.ToLower(scheme) + if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") { + return true + } + + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/scope.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/scope.go new file mode 100644 index 00000000000..fc7887909e2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/scope.go @@ -0,0 +1,13 @@ +package v4 + +import "strings" + +// BuildCredentialScope builds the Signature Version 4 (SigV4) signing scope +func BuildCredentialScope(signingTime SigningTime, region, service string) string { + return strings.Join([]string{ + signingTime.ShortTimeFormat(), + region, + service, + "aws4_request", + }, "/") +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/time.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/time.go new file mode 100644 index 00000000000..1de06a765d1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/time.go @@ -0,0 +1,36 @@ +package v4 + +import "time" + +// SigningTime provides a wrapper around a time.Time which provides cached values for SigV4 signing. +type SigningTime struct { + time.Time + timeFormat string + shortTimeFormat string +} + +// NewSigningTime creates a new SigningTime given a time.Time +func NewSigningTime(t time.Time) SigningTime { + return SigningTime{ + Time: t, + } +} + +// TimeFormat provides a time formatted in the X-Amz-Date format. +func (m *SigningTime) TimeFormat() string { + return m.format(&m.timeFormat, TimeFormat) +} + +// ShortTimeFormat provides a time formatted of 20060102. +func (m *SigningTime) ShortTimeFormat() string { + return m.format(&m.shortTimeFormat, ShortTimeFormat) +} + +func (m *SigningTime) format(target *string, format string) string { + if len(*target) > 0 { + return *target + } + v := m.Time.Format(format) + *target = v + return v +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/util.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/util.go new file mode 100644 index 00000000000..d025dbaa060 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/util.go @@ -0,0 +1,80 @@ +package v4 + +import ( + "net/url" + "strings" +) + +const doubleSpace = " " + +// StripExcessSpaces will rewrite the passed in slice's string values to not +// contain multiple side-by-side spaces. +func StripExcessSpaces(str string) string { + var j, k, l, m, spaces int + // Trim trailing spaces + for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- { + } + + // Trim leading spaces + for k = 0; k < j && str[k] == ' '; k++ { + } + str = str[k : j+1] + + // Strip multiple spaces. + j = strings.Index(str, doubleSpace) + if j < 0 { + return str + } + + buf := []byte(str) + for k, m, l = j, j, len(buf); k < l; k++ { + if buf[k] == ' ' { + if spaces == 0 { + // First space. + buf[m] = buf[k] + m++ + } + spaces++ + } else { + // End of multiple spaces. + spaces = 0 + buf[m] = buf[k] + m++ + } + } + + return string(buf[:m]) +} + +// GetURIPath returns the escaped URI component from the provided URL. +func GetURIPath(u *url.URL) string { + var uriPath string + + if len(u.Opaque) > 0 { + const schemeSep, pathSep, queryStart = "//", "/", "?" + + opaque := u.Opaque + // Cut off the query string if present. + if idx := strings.Index(opaque, queryStart); idx >= 0 { + opaque = opaque[:idx] + } + + // Cutout the scheme separator if present. + if strings.Index(opaque, schemeSep) == 0 { + opaque = opaque[len(schemeSep):] + } + + // capture URI path starting with first path separator. + if idx := strings.Index(opaque, pathSep); idx >= 0 { + uriPath = opaque[idx:] + } + } else { + uriPath = u.EscapedPath() + } + + if len(uriPath) == 0 { + uriPath = "/" + } + + return uriPath +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go new file mode 100644 index 00000000000..0fb9b24e4ac --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go @@ -0,0 +1,408 @@ +package v4 + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "net/http" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + v4Internal "github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4" + internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const computePayloadHashMiddlewareID = "ComputePayloadHash" + +// HashComputationError indicates an error occurred while computing the signing hash +type HashComputationError struct { + Err error +} + +// Error is the error message +func (e *HashComputationError) Error() string { + return fmt.Sprintf("failed to compute payload hash: %v", e.Err) +} + +// Unwrap returns the underlying error if one is set +func (e *HashComputationError) Unwrap() error { + return e.Err +} + +// SigningError indicates an error condition occurred while performing SigV4 signing +type SigningError struct { + Err error +} + +func (e *SigningError) Error() string { + return fmt.Sprintf("failed to sign request: %v", e.Err) +} + +// Unwrap returns the underlying error cause +func (e *SigningError) Unwrap() error { + return e.Err +} + +// UseDynamicPayloadSigningMiddleware swaps the compute payload sha256 middleware with a resolver middleware that +// switches between unsigned and signed payload based on TLS state for request. +// This middleware should not be used for AWS APIs that do not support unsigned payload signing auth. +// By default, SDK uses this middleware for known AWS APIs that support such TLS based auth selection . +// +// Usage example - +// S3 PutObject API allows unsigned payload signing auth usage when TLS is enabled, and uses this middleware to +// dynamically switch between unsigned and signed payload based on TLS state for request. +func UseDynamicPayloadSigningMiddleware(stack *middleware.Stack) error { + _, err := stack.Build.Swap(computePayloadHashMiddlewareID, &dynamicPayloadSigningMiddleware{}) + return err +} + +// dynamicPayloadSigningMiddleware dynamically resolves the middleware that computes and set payload sha256 middleware. +type dynamicPayloadSigningMiddleware struct { +} + +// ID returns the resolver identifier +func (m *dynamicPayloadSigningMiddleware) ID() string { + return computePayloadHashMiddlewareID +} + +// HandleBuild sets a resolver that directs to the payload sha256 compute handler. +func (m *dynamicPayloadSigningMiddleware) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + // if TLS is enabled, use unsigned payload when supported + if req.IsHTTPS() { + return (&unsignedPayload{}).HandleBuild(ctx, in, next) + } + + // else fall back to signed payload + return (&computePayloadSHA256{}).HandleBuild(ctx, in, next) +} + +// unsignedPayload sets the SigV4 request payload hash to unsigned. +// +// Will not set the Unsigned Payload magic SHA value, if a SHA has already been +// stored in the context. (e.g. application pre-computed SHA256 before making +// API call). +// +// This middleware does not check the X-Amz-Content-Sha256 header, if that +// header is serialized a middleware must translate it into the context. +type unsignedPayload struct{} + +// AddUnsignedPayloadMiddleware adds unsignedPayload to the operation +// middleware stack +func AddUnsignedPayloadMiddleware(stack *middleware.Stack) error { + return stack.Build.Add(&unsignedPayload{}, middleware.After) +} + +// ID returns the unsignedPayload identifier +func (m *unsignedPayload) ID() string { + return computePayloadHashMiddlewareID +} + +// HandleBuild sets the payload hash to be an unsigned payload +func (m *unsignedPayload) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + // This should not compute the content SHA256 if the value is already + // known. (e.g. application pre-computed SHA256 before making API call). + // Does not have any tight coupling to the X-Amz-Content-Sha256 header, if + // that header is provided a middleware must translate it into the context. + contentSHA := GetPayloadHash(ctx) + if len(contentSHA) == 0 { + contentSHA = v4Internal.UnsignedPayload + } + + ctx = SetPayloadHash(ctx, contentSHA) + return next.HandleBuild(ctx, in) +} + +// computePayloadSHA256 computes SHA256 payload hash to sign. +// +// Will not set the Unsigned Payload magic SHA value, if a SHA has already been +// stored in the context. (e.g. application pre-computed SHA256 before making +// API call). +// +// This middleware does not check the X-Amz-Content-Sha256 header, if that +// header is serialized a middleware must translate it into the context. +type computePayloadSHA256 struct{} + +// AddComputePayloadSHA256Middleware adds computePayloadSHA256 to the +// operation middleware stack +func AddComputePayloadSHA256Middleware(stack *middleware.Stack) error { + return stack.Build.Add(&computePayloadSHA256{}, middleware.After) +} + +// RemoveComputePayloadSHA256Middleware removes computePayloadSHA256 from the +// operation middleware stack +func RemoveComputePayloadSHA256Middleware(stack *middleware.Stack) error { + _, err := stack.Build.Remove(computePayloadHashMiddlewareID) + return err +} + +// ID is the middleware name +func (m *computePayloadSHA256) ID() string { + return computePayloadHashMiddlewareID +} + +// HandleBuild compute the payload hash for the request payload +func (m *computePayloadSHA256) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &HashComputationError{ + Err: fmt.Errorf("unexpected request middleware type %T", in.Request), + } + } + + // This should not compute the content SHA256 if the value is already + // known. (e.g. application pre-computed SHA256 before making API call) + // Does not have any tight coupling to the X-Amz-Content-Sha256 header, if + // that header is provided a middleware must translate it into the context. + if contentSHA := GetPayloadHash(ctx); len(contentSHA) != 0 { + return next.HandleBuild(ctx, in) + } + + hash := sha256.New() + if stream := req.GetStream(); stream != nil { + _, err = io.Copy(hash, stream) + if err != nil { + return out, metadata, &HashComputationError{ + Err: fmt.Errorf("failed to compute payload hash, %w", err), + } + } + + if err := req.RewindStream(); err != nil { + return out, metadata, &HashComputationError{ + Err: fmt.Errorf("failed to seek body to start, %w", err), + } + } + } + + ctx = SetPayloadHash(ctx, hex.EncodeToString(hash.Sum(nil))) + + return next.HandleBuild(ctx, in) +} + +// SwapComputePayloadSHA256ForUnsignedPayloadMiddleware replaces the +// ComputePayloadSHA256 middleware with the UnsignedPayload middleware. +// +// Use this to disable computing the Payload SHA256 checksum and instead use +// UNSIGNED-PAYLOAD for the SHA256 value. +func SwapComputePayloadSHA256ForUnsignedPayloadMiddleware(stack *middleware.Stack) error { + _, err := stack.Build.Swap(computePayloadHashMiddlewareID, &unsignedPayload{}) + return err +} + +// contentSHA256Header sets the X-Amz-Content-Sha256 header value to +// the Payload hash stored in the context. +type contentSHA256Header struct{} + +// AddContentSHA256HeaderMiddleware adds ContentSHA256Header to the +// operation middleware stack +func AddContentSHA256HeaderMiddleware(stack *middleware.Stack) error { + return stack.Build.Insert(&contentSHA256Header{}, computePayloadHashMiddlewareID, middleware.After) +} + +// RemoveContentSHA256HeaderMiddleware removes contentSHA256Header middleware +// from the operation middleware stack +func RemoveContentSHA256HeaderMiddleware(stack *middleware.Stack) error { + _, err := stack.Build.Remove((*contentSHA256Header)(nil).ID()) + return err +} + +// ID returns the ContentSHA256HeaderMiddleware identifier +func (m *contentSHA256Header) ID() string { + return "SigV4ContentSHA256Header" +} + +// HandleBuild sets the X-Amz-Content-Sha256 header value to the Payload hash +// stored in the context. +func (m *contentSHA256Header) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &HashComputationError{Err: fmt.Errorf("unexpected request middleware type %T", in.Request)} + } + + req.Header.Set(v4Internal.ContentSHAKey, GetPayloadHash(ctx)) + + return next.HandleBuild(ctx, in) +} + +// SignHTTPRequestMiddlewareOptions is the configuration options for the SignHTTPRequestMiddleware middleware. +type SignHTTPRequestMiddlewareOptions struct { + CredentialsProvider aws.CredentialsProvider + Signer HTTPSigner + LogSigning bool +} + +// SignHTTPRequestMiddleware is a `FinalizeMiddleware` implementation for SigV4 HTTP Signing +type SignHTTPRequestMiddleware struct { + credentialsProvider aws.CredentialsProvider + signer HTTPSigner + logSigning bool +} + +// NewSignHTTPRequestMiddleware constructs a SignHTTPRequestMiddleware using the given Signer for signing requests +func NewSignHTTPRequestMiddleware(options SignHTTPRequestMiddlewareOptions) *SignHTTPRequestMiddleware { + return &SignHTTPRequestMiddleware{ + credentialsProvider: options.CredentialsProvider, + signer: options.Signer, + logSigning: options.LogSigning, + } +} + +// ID is the SignHTTPRequestMiddleware identifier +func (s *SignHTTPRequestMiddleware) ID() string { + return "Signing" +} + +// HandleFinalize will take the provided input and sign the request using the SigV4 authentication scheme +func (s *SignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + if !haveCredentialProvider(s.credentialsProvider) { + return next.HandleFinalize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &SigningError{Err: fmt.Errorf("unexpected request middleware type %T", in.Request)} + } + + signingName, signingRegion := awsmiddleware.GetSigningName(ctx), awsmiddleware.GetSigningRegion(ctx) + payloadHash := GetPayloadHash(ctx) + if len(payloadHash) == 0 { + return out, metadata, &SigningError{Err: fmt.Errorf("computed payload hash missing from context")} + } + + credentials, err := s.credentialsProvider.Retrieve(ctx) + if err != nil { + return out, metadata, &SigningError{Err: fmt.Errorf("failed to retrieve credentials: %w", err)} + } + + signerOptions := []func(o *SignerOptions){ + func(o *SignerOptions) { + o.Logger = middleware.GetLogger(ctx) + o.LogSigning = s.logSigning + }, + } + + // existing DisableURIPathEscaping is equivalent in purpose + // to authentication scheme property DisableDoubleEncoding + disableDoubleEncoding, overridden := internalauth.GetDisableDoubleEncoding(ctx) + if overridden { + signerOptions = append(signerOptions, func(o *SignerOptions) { + o.DisableURIPathEscaping = disableDoubleEncoding + }) + } + + err = s.signer.SignHTTP(ctx, credentials, req.Request, payloadHash, signingName, signingRegion, sdk.NowTime(), signerOptions...) + if err != nil { + return out, metadata, &SigningError{Err: fmt.Errorf("failed to sign http request, %w", err)} + } + + ctx = awsmiddleware.SetSigningCredentials(ctx, credentials) + + return next.HandleFinalize(ctx, in) +} + +type streamingEventsPayload struct{} + +// AddStreamingEventsPayload adds the streamingEventsPayload middleware to the stack. +func AddStreamingEventsPayload(stack *middleware.Stack) error { + return stack.Build.Add(&streamingEventsPayload{}, middleware.After) +} + +func (s *streamingEventsPayload) ID() string { + return computePayloadHashMiddlewareID +} + +func (s *streamingEventsPayload) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + contentSHA := GetPayloadHash(ctx) + if len(contentSHA) == 0 { + contentSHA = v4Internal.StreamingEventsPayload + } + + ctx = SetPayloadHash(ctx, contentSHA) + + return next.HandleBuild(ctx, in) +} + +// GetSignedRequestSignature attempts to extract the signature of the request. +// Returning an error if the request is unsigned, or unable to extract the +// signature. +func GetSignedRequestSignature(r *http.Request) ([]byte, error) { + const authHeaderSignatureElem = "Signature=" + + if auth := r.Header.Get(authorizationHeader); len(auth) != 0 { + ps := strings.Split(auth, ", ") + for _, p := range ps { + if idx := strings.Index(p, authHeaderSignatureElem); idx >= 0 { + sig := p[len(authHeaderSignatureElem):] + if len(sig) == 0 { + return nil, fmt.Errorf("invalid request signature authorization header") + } + return hex.DecodeString(sig) + } + } + } + + if sig := r.URL.Query().Get("X-Amz-Signature"); len(sig) != 0 { + return hex.DecodeString(sig) + } + + return nil, fmt.Errorf("request not signed") +} + +func haveCredentialProvider(p aws.CredentialsProvider) bool { + if p == nil { + return false + } + + return !aws.IsCredentialsProvider(p, (*aws.AnonymousCredentials)(nil)) +} + +type payloadHashKey struct{} + +// GetPayloadHash retrieves the payload hash to use for signing +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetPayloadHash(ctx context.Context) (v string) { + v, _ = middleware.GetStackValue(ctx, payloadHashKey{}).(string) + return v +} + +// SetPayloadHash sets the payload hash to be used for signing the request +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func SetPayloadHash(ctx context.Context, hash string) context.Context { + return middleware.WithStackValue(ctx, payloadHashKey{}, hash) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/presign_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/presign_middleware.go new file mode 100644 index 00000000000..e1a06651243 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/presign_middleware.go @@ -0,0 +1,127 @@ +package v4 + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go/middleware" + smithyHTTP "github.com/aws/smithy-go/transport/http" +) + +// HTTPPresigner is an interface to a SigV4 signer that can sign create a +// presigned URL for a HTTP requests. +type HTTPPresigner interface { + PresignHTTP( + ctx context.Context, credentials aws.Credentials, r *http.Request, + payloadHash string, service string, region string, signingTime time.Time, + optFns ...func(*SignerOptions), + ) (url string, signedHeader http.Header, err error) +} + +// PresignedHTTPRequest provides the URL and signed headers that are included +// in the presigned URL. +type PresignedHTTPRequest struct { + URL string + Method string + SignedHeader http.Header +} + +// PresignHTTPRequestMiddlewareOptions is the options for the PresignHTTPRequestMiddleware middleware. +type PresignHTTPRequestMiddlewareOptions struct { + CredentialsProvider aws.CredentialsProvider + Presigner HTTPPresigner + LogSigning bool +} + +// PresignHTTPRequestMiddleware provides the Finalize middleware for creating a +// presigned URL for an HTTP request. +// +// Will short circuit the middleware stack and not forward onto the next +// Finalize handler. +type PresignHTTPRequestMiddleware struct { + credentialsProvider aws.CredentialsProvider + presigner HTTPPresigner + logSigning bool +} + +// NewPresignHTTPRequestMiddleware returns a new PresignHTTPRequestMiddleware +// initialized with the presigner. +func NewPresignHTTPRequestMiddleware(options PresignHTTPRequestMiddlewareOptions) *PresignHTTPRequestMiddleware { + return &PresignHTTPRequestMiddleware{ + credentialsProvider: options.CredentialsProvider, + presigner: options.Presigner, + logSigning: options.LogSigning, + } +} + +// ID provides the middleware ID. +func (*PresignHTTPRequestMiddleware) ID() string { return "PresignHTTPRequest" } + +// HandleFinalize will take the provided input and create a presigned url for +// the http request using the SigV4 presign authentication scheme. +// +// Since the signed request is not a valid HTTP request +func (s *PresignHTTPRequestMiddleware) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyHTTP.Request) + if !ok { + return out, metadata, &SigningError{ + Err: fmt.Errorf("unexpected request middleware type %T", in.Request), + } + } + + httpReq := req.Build(ctx) + if !haveCredentialProvider(s.credentialsProvider) { + out.Result = &PresignedHTTPRequest{ + URL: httpReq.URL.String(), + Method: httpReq.Method, + SignedHeader: http.Header{}, + } + + return out, metadata, nil + } + + signingName := awsmiddleware.GetSigningName(ctx) + signingRegion := awsmiddleware.GetSigningRegion(ctx) + payloadHash := GetPayloadHash(ctx) + if len(payloadHash) == 0 { + return out, metadata, &SigningError{ + Err: fmt.Errorf("computed payload hash missing from context"), + } + } + + credentials, err := s.credentialsProvider.Retrieve(ctx) + if err != nil { + return out, metadata, &SigningError{ + Err: fmt.Errorf("failed to retrieve credentials: %w", err), + } + } + + u, h, err := s.presigner.PresignHTTP(ctx, credentials, + httpReq, payloadHash, signingName, signingRegion, sdk.NowTime(), + func(o *SignerOptions) { + o.Logger = middleware.GetLogger(ctx) + o.LogSigning = s.logSigning + }) + if err != nil { + return out, metadata, &SigningError{ + Err: fmt.Errorf("failed to sign http request, %w", err), + } + } + + out.Result = &PresignedHTTPRequest{ + URL: u, + Method: httpReq.Method, + SignedHeader: h, + } + + return out, metadata, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go new file mode 100644 index 00000000000..66aa2bd6ab0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go @@ -0,0 +1,86 @@ +package v4 + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "github.com/aws/aws-sdk-go-v2/aws" + v4Internal "github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4" + "strings" + "time" +) + +// EventStreamSigner is an AWS EventStream protocol signer. +type EventStreamSigner interface { + GetSignature(ctx context.Context, headers, payload []byte, signingTime time.Time, optFns ...func(*StreamSignerOptions)) ([]byte, error) +} + +// StreamSignerOptions is the configuration options for StreamSigner. +type StreamSignerOptions struct{} + +// StreamSigner implements Signature Version 4 (SigV4) signing of event stream encoded payloads. +type StreamSigner struct { + options StreamSignerOptions + + credentials aws.Credentials + service string + region string + + prevSignature []byte + + signingKeyDeriver *v4Internal.SigningKeyDeriver +} + +// NewStreamSigner returns a new AWS EventStream protocol signer. +func NewStreamSigner(credentials aws.Credentials, service, region string, seedSignature []byte, optFns ...func(*StreamSignerOptions)) *StreamSigner { + o := StreamSignerOptions{} + + for _, fn := range optFns { + fn(&o) + } + + return &StreamSigner{ + options: o, + credentials: credentials, + service: service, + region: region, + signingKeyDeriver: v4Internal.NewSigningKeyDeriver(), + prevSignature: seedSignature, + } +} + +// GetSignature signs the provided header and payload bytes. +func (s *StreamSigner) GetSignature(ctx context.Context, headers, payload []byte, signingTime time.Time, optFns ...func(*StreamSignerOptions)) ([]byte, error) { + options := s.options + + for _, fn := range optFns { + fn(&options) + } + + prevSignature := s.prevSignature + + st := v4Internal.NewSigningTime(signingTime) + + sigKey := s.signingKeyDeriver.DeriveKey(s.credentials, s.service, s.region, st) + + scope := v4Internal.BuildCredentialScope(st, s.region, s.service) + + stringToSign := s.buildEventStreamStringToSign(headers, payload, prevSignature, scope, &st) + + signature := v4Internal.HMACSHA256(sigKey, []byte(stringToSign)) + s.prevSignature = signature + + return signature, nil +} + +func (s *StreamSigner) buildEventStreamStringToSign(headers, payload, previousSignature []byte, credentialScope string, signingTime *v4Internal.SigningTime) string { + hash := sha256.New() + return strings.Join([]string{ + "AWS4-HMAC-SHA256-PAYLOAD", + signingTime.TimeFormat(), + credentialScope, + hex.EncodeToString(previousSignature), + hex.EncodeToString(makeHash(hash, headers)), + hex.EncodeToString(makeHash(hash, payload)), + }, "\n") +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go new file mode 100644 index 00000000000..4d162556bbf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go @@ -0,0 +1,548 @@ +// Package v4 implements signing for AWS V4 signer +// +// Provides request signing for request that need to be signed with +// AWS V4 Signatures. +// +// # Standalone Signer +// +// Generally using the signer outside of the SDK should not require any additional +// +// The signer does this by taking advantage of the URL.EscapedPath method. If your request URI requires +// +// additional escaping you many need to use the URL.Opaque to define what the raw URI should be sent +// to the service as. +// +// The signer will first check the URL.Opaque field, and use its value if set. +// The signer does require the URL.Opaque field to be set in the form of: +// +// "///" +// +// // e.g. +// "//example.com/some/path" +// +// The leading "//" and hostname are required or the URL.Opaque escaping will +// not work correctly. +// +// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath() +// method and using the returned value. +// +// AWS v4 signature validation requires that the canonical string's URI path +// element must be the URI escaped form of the HTTP request's path. +// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html +// +// The Go HTTP client will perform escaping automatically on the request. Some +// of these escaping may cause signature validation errors because the HTTP +// request differs from the URI path or query that the signature was generated. +// https://golang.org/pkg/net/url/#URL.EscapedPath +// +// Because of this, it is recommended that when using the signer outside of the +// SDK that explicitly escaping the request prior to being signed is preferable, +// and will help prevent signature validation errors. This can be done by setting +// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then +// call URL.EscapedPath() if Opaque is not set. +// +// Test `TestStandaloneSign` provides a complete example of using the signer +// outside of the SDK and pre-escaping the URI path. +package v4 + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "fmt" + "hash" + "net/http" + "net/textproto" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + v4Internal "github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4" + "github.com/aws/smithy-go/encoding/httpbinding" + "github.com/aws/smithy-go/logging" +) + +const ( + signingAlgorithm = "AWS4-HMAC-SHA256" + authorizationHeader = "Authorization" +) + +// HTTPSigner is an interface to a SigV4 signer that can sign HTTP requests +type HTTPSigner interface { + SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*SignerOptions)) error +} + +type keyDerivator interface { + DeriveKey(credential aws.Credentials, service, region string, signingTime v4Internal.SigningTime) []byte +} + +// SignerOptions is the SigV4 Signer options. +type SignerOptions struct { + // Disables the Signer's moving HTTP header key/value pairs from the HTTP + // request header to the request's query string. This is most commonly used + // with pre-signed requests preventing headers from being added to the + // request's query string. + DisableHeaderHoisting bool + + // Disables the automatic escaping of the URI path of the request for the + // siganture's canonical string's path. For services that do not need additional + // escaping then use this to disable the signer escaping the path. + // + // S3 is an example of a service that does not need additional escaping. + // + // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html + DisableURIPathEscaping bool + + // The logger to send log messages to. + Logger logging.Logger + + // Enable logging of signed requests. + // This will enable logging of the canonical request, the string to sign, and for presigning the subsequent + // presigned URL. + LogSigning bool +} + +// Signer applies AWS v4 signing to given request. Use this to sign requests +// that need to be signed with AWS V4 Signatures. +type Signer struct { + options SignerOptions + keyDerivator keyDerivator +} + +// NewSigner returns a new SigV4 Signer +func NewSigner(optFns ...func(signer *SignerOptions)) *Signer { + options := SignerOptions{} + + for _, fn := range optFns { + fn(&options) + } + + return &Signer{options: options, keyDerivator: v4Internal.NewSigningKeyDeriver()} +} + +type httpSigner struct { + Request *http.Request + ServiceName string + Region string + Time v4Internal.SigningTime + Credentials aws.Credentials + KeyDerivator keyDerivator + IsPreSign bool + + PayloadHash string + + DisableHeaderHoisting bool + DisableURIPathEscaping bool +} + +func (s *httpSigner) Build() (signedRequest, error) { + req := s.Request + + query := req.URL.Query() + headers := req.Header + + s.setRequiredSigningFields(headers, query) + + // Sort Each Query Key's Values + for key := range query { + sort.Strings(query[key]) + } + + v4Internal.SanitizeHostForHeader(req) + + credentialScope := s.buildCredentialScope() + credentialStr := s.Credentials.AccessKeyID + "/" + credentialScope + if s.IsPreSign { + query.Set(v4Internal.AmzCredentialKey, credentialStr) + } + + unsignedHeaders := headers + if s.IsPreSign && !s.DisableHeaderHoisting { + var urlValues url.Values + urlValues, unsignedHeaders = buildQuery(v4Internal.AllowedQueryHoisting, headers) + for k := range urlValues { + query[k] = urlValues[k] + } + } + + host := req.URL.Host + if len(req.Host) > 0 { + host = req.Host + } + + signedHeaders, signedHeadersStr, canonicalHeaderStr := s.buildCanonicalHeaders(host, v4Internal.IgnoredHeaders, unsignedHeaders, s.Request.ContentLength) + + if s.IsPreSign { + query.Set(v4Internal.AmzSignedHeadersKey, signedHeadersStr) + } + + var rawQuery strings.Builder + rawQuery.WriteString(strings.Replace(query.Encode(), "+", "%20", -1)) + + canonicalURI := v4Internal.GetURIPath(req.URL) + if !s.DisableURIPathEscaping { + canonicalURI = httpbinding.EscapePath(canonicalURI, false) + } + + canonicalString := s.buildCanonicalString( + req.Method, + canonicalURI, + rawQuery.String(), + signedHeadersStr, + canonicalHeaderStr, + ) + + strToSign := s.buildStringToSign(credentialScope, canonicalString) + signingSignature, err := s.buildSignature(strToSign) + if err != nil { + return signedRequest{}, err + } + + if s.IsPreSign { + rawQuery.WriteString("&X-Amz-Signature=") + rawQuery.WriteString(signingSignature) + } else { + headers[authorizationHeader] = append(headers[authorizationHeader][:0], buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature)) + } + + req.URL.RawQuery = rawQuery.String() + + return signedRequest{ + Request: req, + SignedHeaders: signedHeaders, + CanonicalString: canonicalString, + StringToSign: strToSign, + PreSigned: s.IsPreSign, + }, nil +} + +func buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature string) string { + const credential = "Credential=" + const signedHeaders = "SignedHeaders=" + const signature = "Signature=" + const commaSpace = ", " + + var parts strings.Builder + parts.Grow(len(signingAlgorithm) + 1 + + len(credential) + len(credentialStr) + 2 + + len(signedHeaders) + len(signedHeadersStr) + 2 + + len(signature) + len(signingSignature), + ) + parts.WriteString(signingAlgorithm) + parts.WriteRune(' ') + parts.WriteString(credential) + parts.WriteString(credentialStr) + parts.WriteString(commaSpace) + parts.WriteString(signedHeaders) + parts.WriteString(signedHeadersStr) + parts.WriteString(commaSpace) + parts.WriteString(signature) + parts.WriteString(signingSignature) + return parts.String() +} + +// SignHTTP signs AWS v4 requests with the provided payload hash, service name, region the +// request is made to, and time the request is signed at. The signTime allows +// you to specify that a request is signed for the future, and cannot be +// used until then. +// +// The payloadHash is the hex encoded SHA-256 hash of the request payload, and +// must be provided. Even if the request has no payload (aka body). If the +// request has no payload you should use the hex encoded SHA-256 of an empty +// string as the payloadHash value. +// +// "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" +// +// Some services such as Amazon S3 accept alternative values for the payload +// hash, such as "UNSIGNED-PAYLOAD" for requests where the body will not be +// included in the request signature. +// +// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html +// +// Sign differs from Presign in that it will sign the request using HTTP +// header values. This type of signing is intended for http.Request values that +// will not be shared, or are shared in a way the header values on the request +// will not be lost. +// +// The passed in request will be modified in place. +func (s Signer) SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(options *SignerOptions)) error { + options := s.options + + for _, fn := range optFns { + fn(&options) + } + + signer := &httpSigner{ + Request: r, + PayloadHash: payloadHash, + ServiceName: service, + Region: region, + Credentials: credentials, + Time: v4Internal.NewSigningTime(signingTime.UTC()), + DisableHeaderHoisting: options.DisableHeaderHoisting, + DisableURIPathEscaping: options.DisableURIPathEscaping, + KeyDerivator: s.keyDerivator, + } + + signedRequest, err := signer.Build() + if err != nil { + return err + } + + logSigningInfo(ctx, options, &signedRequest, false) + + return nil +} + +// PresignHTTP signs AWS v4 requests with the payload hash, service name, region +// the request is made to, and time the request is signed at. The signTime +// allows you to specify that a request is signed for the future, and cannot +// be used until then. +// +// Returns the signed URL and the map of HTTP headers that were included in the +// signature or an error if signing the request failed. For presigned requests +// these headers and their values must be included on the HTTP request when it +// is made. This is helpful to know what header values need to be shared with +// the party the presigned request will be distributed to. +// +// The payloadHash is the hex encoded SHA-256 hash of the request payload, and +// must be provided. Even if the request has no payload (aka body). If the +// request has no payload you should use the hex encoded SHA-256 of an empty +// string as the payloadHash value. +// +// "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" +// +// Some services such as Amazon S3 accept alternative values for the payload +// hash, such as "UNSIGNED-PAYLOAD" for requests where the body will not be +// included in the request signature. +// +// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html +// +// PresignHTTP differs from SignHTTP in that it will sign the request using +// query string instead of header values. This allows you to share the +// Presigned Request's URL with third parties, or distribute it throughout your +// system with minimal dependencies. +// +// PresignHTTP will not set the expires time of the presigned request +// automatically. To specify the expire duration for a request add the +// "X-Amz-Expires" query parameter on the request with the value as the +// duration in seconds the presigned URL should be considered valid for. This +// parameter is not used by all AWS services, and is most notable used by +// Amazon S3 APIs. +// +// expires := 20 * time.Minute +// query := req.URL.Query() +// query.Set("X-Amz-Expires", strconv.FormatInt(int64(expires/time.Second), 10)) +// req.URL.RawQuery = query.Encode() +// +// This method does not modify the provided request. +func (s *Signer) PresignHTTP( + ctx context.Context, credentials aws.Credentials, r *http.Request, + payloadHash string, service string, region string, signingTime time.Time, + optFns ...func(*SignerOptions), +) (signedURI string, signedHeaders http.Header, err error) { + options := s.options + + for _, fn := range optFns { + fn(&options) + } + + signer := &httpSigner{ + Request: r.Clone(r.Context()), + PayloadHash: payloadHash, + ServiceName: service, + Region: region, + Credentials: credentials, + Time: v4Internal.NewSigningTime(signingTime.UTC()), + IsPreSign: true, + DisableHeaderHoisting: options.DisableHeaderHoisting, + DisableURIPathEscaping: options.DisableURIPathEscaping, + KeyDerivator: s.keyDerivator, + } + + signedRequest, err := signer.Build() + if err != nil { + return "", nil, err + } + + logSigningInfo(ctx, options, &signedRequest, true) + + signedHeaders = make(http.Header) + + // For the signed headers we canonicalize the header keys in the returned map. + // This avoids situations where can standard library double headers like host header. For example the standard + // library will set the Host header, even if it is present in lower-case form. + for k, v := range signedRequest.SignedHeaders { + key := textproto.CanonicalMIMEHeaderKey(k) + signedHeaders[key] = append(signedHeaders[key], v...) + } + + return signedRequest.Request.URL.String(), signedHeaders, nil +} + +func (s *httpSigner) buildCredentialScope() string { + return v4Internal.BuildCredentialScope(s.Time, s.Region, s.ServiceName) +} + +func buildQuery(r v4Internal.Rule, header http.Header) (url.Values, http.Header) { + query := url.Values{} + unsignedHeaders := http.Header{} + for k, h := range header { + if r.IsValid(k) { + query[k] = h + } else { + unsignedHeaders[k] = h + } + } + + return query, unsignedHeaders +} + +func (s *httpSigner) buildCanonicalHeaders(host string, rule v4Internal.Rule, header http.Header, length int64) (signed http.Header, signedHeaders, canonicalHeadersStr string) { + signed = make(http.Header) + + var headers []string + const hostHeader = "host" + headers = append(headers, hostHeader) + signed[hostHeader] = append(signed[hostHeader], host) + + const contentLengthHeader = "content-length" + if length > 0 { + headers = append(headers, contentLengthHeader) + signed[contentLengthHeader] = append(signed[contentLengthHeader], strconv.FormatInt(length, 10)) + } + + for k, v := range header { + if !rule.IsValid(k) { + continue // ignored header + } + if strings.EqualFold(k, contentLengthHeader) { + // prevent signing already handled content-length header. + continue + } + + lowerCaseKey := strings.ToLower(k) + if _, ok := signed[lowerCaseKey]; ok { + // include additional values + signed[lowerCaseKey] = append(signed[lowerCaseKey], v...) + continue + } + + headers = append(headers, lowerCaseKey) + signed[lowerCaseKey] = v + } + sort.Strings(headers) + + signedHeaders = strings.Join(headers, ";") + + var canonicalHeaders strings.Builder + n := len(headers) + const colon = ':' + for i := 0; i < n; i++ { + if headers[i] == hostHeader { + canonicalHeaders.WriteString(hostHeader) + canonicalHeaders.WriteRune(colon) + canonicalHeaders.WriteString(v4Internal.StripExcessSpaces(host)) + } else { + canonicalHeaders.WriteString(headers[i]) + canonicalHeaders.WriteRune(colon) + // Trim out leading, trailing, and dedup inner spaces from signed header values. + values := signed[headers[i]] + for j, v := range values { + cleanedValue := strings.TrimSpace(v4Internal.StripExcessSpaces(v)) + canonicalHeaders.WriteString(cleanedValue) + if j < len(values)-1 { + canonicalHeaders.WriteRune(',') + } + } + } + canonicalHeaders.WriteRune('\n') + } + canonicalHeadersStr = canonicalHeaders.String() + + return signed, signedHeaders, canonicalHeadersStr +} + +func (s *httpSigner) buildCanonicalString(method, uri, query, signedHeaders, canonicalHeaders string) string { + return strings.Join([]string{ + method, + uri, + query, + canonicalHeaders, + signedHeaders, + s.PayloadHash, + }, "\n") +} + +func (s *httpSigner) buildStringToSign(credentialScope, canonicalRequestString string) string { + return strings.Join([]string{ + signingAlgorithm, + s.Time.TimeFormat(), + credentialScope, + hex.EncodeToString(makeHash(sha256.New(), []byte(canonicalRequestString))), + }, "\n") +} + +func makeHash(hash hash.Hash, b []byte) []byte { + hash.Reset() + hash.Write(b) + return hash.Sum(nil) +} + +func (s *httpSigner) buildSignature(strToSign string) (string, error) { + key := s.KeyDerivator.DeriveKey(s.Credentials, s.ServiceName, s.Region, s.Time) + return hex.EncodeToString(v4Internal.HMACSHA256(key, []byte(strToSign))), nil +} + +func (s *httpSigner) setRequiredSigningFields(headers http.Header, query url.Values) { + amzDate := s.Time.TimeFormat() + + if s.IsPreSign { + query.Set(v4Internal.AmzAlgorithmKey, signingAlgorithm) + if sessionToken := s.Credentials.SessionToken; len(sessionToken) > 0 { + query.Set("X-Amz-Security-Token", sessionToken) + } + + query.Set(v4Internal.AmzDateKey, amzDate) + return + } + + headers[v4Internal.AmzDateKey] = append(headers[v4Internal.AmzDateKey][:0], amzDate) + + if len(s.Credentials.SessionToken) > 0 { + headers[v4Internal.AmzSecurityTokenKey] = append(headers[v4Internal.AmzSecurityTokenKey][:0], s.Credentials.SessionToken) + } +} + +func logSigningInfo(ctx context.Context, options SignerOptions, request *signedRequest, isPresign bool) { + if !options.LogSigning { + return + } + signedURLMsg := "" + if isPresign { + signedURLMsg = fmt.Sprintf(logSignedURLMsg, request.Request.URL.String()) + } + logger := logging.WithContext(ctx, options.Logger) + logger.Logf(logging.Debug, logSignInfoMsg, request.CanonicalString, request.StringToSign, signedURLMsg) +} + +type signedRequest struct { + Request *http.Request + SignedHeaders http.Header + CanonicalString string + StringToSign string + PreSigned bool +} + +const logSignInfoMsg = `Request Signature: +---[ CANONICAL STRING ]----------------------------- +%s +---[ STRING TO SIGN ]-------------------------------- +%s%s +-----------------------------------------------------` +const logSignedURLMsg = ` +---[ SIGNED URL ]------------------------------------ +%s` diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/to_ptr.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/to_ptr.go new file mode 100644 index 00000000000..f3fc4d610dc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/to_ptr.go @@ -0,0 +1,297 @@ +// Code generated by aws/generate.go DO NOT EDIT. + +package aws + +import ( + "github.com/aws/smithy-go/ptr" + "time" +) + +// Bool returns a pointer value for the bool value passed in. +func Bool(v bool) *bool { + return ptr.Bool(v) +} + +// BoolSlice returns a slice of bool pointers from the values +// passed in. +func BoolSlice(vs []bool) []*bool { + return ptr.BoolSlice(vs) +} + +// BoolMap returns a map of bool pointers from the values +// passed in. +func BoolMap(vs map[string]bool) map[string]*bool { + return ptr.BoolMap(vs) +} + +// Byte returns a pointer value for the byte value passed in. +func Byte(v byte) *byte { + return ptr.Byte(v) +} + +// ByteSlice returns a slice of byte pointers from the values +// passed in. +func ByteSlice(vs []byte) []*byte { + return ptr.ByteSlice(vs) +} + +// ByteMap returns a map of byte pointers from the values +// passed in. +func ByteMap(vs map[string]byte) map[string]*byte { + return ptr.ByteMap(vs) +} + +// String returns a pointer value for the string value passed in. +func String(v string) *string { + return ptr.String(v) +} + +// StringSlice returns a slice of string pointers from the values +// passed in. +func StringSlice(vs []string) []*string { + return ptr.StringSlice(vs) +} + +// StringMap returns a map of string pointers from the values +// passed in. +func StringMap(vs map[string]string) map[string]*string { + return ptr.StringMap(vs) +} + +// Int returns a pointer value for the int value passed in. +func Int(v int) *int { + return ptr.Int(v) +} + +// IntSlice returns a slice of int pointers from the values +// passed in. +func IntSlice(vs []int) []*int { + return ptr.IntSlice(vs) +} + +// IntMap returns a map of int pointers from the values +// passed in. +func IntMap(vs map[string]int) map[string]*int { + return ptr.IntMap(vs) +} + +// Int8 returns a pointer value for the int8 value passed in. +func Int8(v int8) *int8 { + return ptr.Int8(v) +} + +// Int8Slice returns a slice of int8 pointers from the values +// passed in. +func Int8Slice(vs []int8) []*int8 { + return ptr.Int8Slice(vs) +} + +// Int8Map returns a map of int8 pointers from the values +// passed in. +func Int8Map(vs map[string]int8) map[string]*int8 { + return ptr.Int8Map(vs) +} + +// Int16 returns a pointer value for the int16 value passed in. +func Int16(v int16) *int16 { + return ptr.Int16(v) +} + +// Int16Slice returns a slice of int16 pointers from the values +// passed in. +func Int16Slice(vs []int16) []*int16 { + return ptr.Int16Slice(vs) +} + +// Int16Map returns a map of int16 pointers from the values +// passed in. +func Int16Map(vs map[string]int16) map[string]*int16 { + return ptr.Int16Map(vs) +} + +// Int32 returns a pointer value for the int32 value passed in. +func Int32(v int32) *int32 { + return ptr.Int32(v) +} + +// Int32Slice returns a slice of int32 pointers from the values +// passed in. +func Int32Slice(vs []int32) []*int32 { + return ptr.Int32Slice(vs) +} + +// Int32Map returns a map of int32 pointers from the values +// passed in. +func Int32Map(vs map[string]int32) map[string]*int32 { + return ptr.Int32Map(vs) +} + +// Int64 returns a pointer value for the int64 value passed in. +func Int64(v int64) *int64 { + return ptr.Int64(v) +} + +// Int64Slice returns a slice of int64 pointers from the values +// passed in. +func Int64Slice(vs []int64) []*int64 { + return ptr.Int64Slice(vs) +} + +// Int64Map returns a map of int64 pointers from the values +// passed in. +func Int64Map(vs map[string]int64) map[string]*int64 { + return ptr.Int64Map(vs) +} + +// Uint returns a pointer value for the uint value passed in. +func Uint(v uint) *uint { + return ptr.Uint(v) +} + +// UintSlice returns a slice of uint pointers from the values +// passed in. +func UintSlice(vs []uint) []*uint { + return ptr.UintSlice(vs) +} + +// UintMap returns a map of uint pointers from the values +// passed in. +func UintMap(vs map[string]uint) map[string]*uint { + return ptr.UintMap(vs) +} + +// Uint8 returns a pointer value for the uint8 value passed in. +func Uint8(v uint8) *uint8 { + return ptr.Uint8(v) +} + +// Uint8Slice returns a slice of uint8 pointers from the values +// passed in. +func Uint8Slice(vs []uint8) []*uint8 { + return ptr.Uint8Slice(vs) +} + +// Uint8Map returns a map of uint8 pointers from the values +// passed in. +func Uint8Map(vs map[string]uint8) map[string]*uint8 { + return ptr.Uint8Map(vs) +} + +// Uint16 returns a pointer value for the uint16 value passed in. +func Uint16(v uint16) *uint16 { + return ptr.Uint16(v) +} + +// Uint16Slice returns a slice of uint16 pointers from the values +// passed in. +func Uint16Slice(vs []uint16) []*uint16 { + return ptr.Uint16Slice(vs) +} + +// Uint16Map returns a map of uint16 pointers from the values +// passed in. +func Uint16Map(vs map[string]uint16) map[string]*uint16 { + return ptr.Uint16Map(vs) +} + +// Uint32 returns a pointer value for the uint32 value passed in. +func Uint32(v uint32) *uint32 { + return ptr.Uint32(v) +} + +// Uint32Slice returns a slice of uint32 pointers from the values +// passed in. +func Uint32Slice(vs []uint32) []*uint32 { + return ptr.Uint32Slice(vs) +} + +// Uint32Map returns a map of uint32 pointers from the values +// passed in. +func Uint32Map(vs map[string]uint32) map[string]*uint32 { + return ptr.Uint32Map(vs) +} + +// Uint64 returns a pointer value for the uint64 value passed in. +func Uint64(v uint64) *uint64 { + return ptr.Uint64(v) +} + +// Uint64Slice returns a slice of uint64 pointers from the values +// passed in. +func Uint64Slice(vs []uint64) []*uint64 { + return ptr.Uint64Slice(vs) +} + +// Uint64Map returns a map of uint64 pointers from the values +// passed in. +func Uint64Map(vs map[string]uint64) map[string]*uint64 { + return ptr.Uint64Map(vs) +} + +// Float32 returns a pointer value for the float32 value passed in. +func Float32(v float32) *float32 { + return ptr.Float32(v) +} + +// Float32Slice returns a slice of float32 pointers from the values +// passed in. +func Float32Slice(vs []float32) []*float32 { + return ptr.Float32Slice(vs) +} + +// Float32Map returns a map of float32 pointers from the values +// passed in. +func Float32Map(vs map[string]float32) map[string]*float32 { + return ptr.Float32Map(vs) +} + +// Float64 returns a pointer value for the float64 value passed in. +func Float64(v float64) *float64 { + return ptr.Float64(v) +} + +// Float64Slice returns a slice of float64 pointers from the values +// passed in. +func Float64Slice(vs []float64) []*float64 { + return ptr.Float64Slice(vs) +} + +// Float64Map returns a map of float64 pointers from the values +// passed in. +func Float64Map(vs map[string]float64) map[string]*float64 { + return ptr.Float64Map(vs) +} + +// Time returns a pointer value for the time.Time value passed in. +func Time(v time.Time) *time.Time { + return ptr.Time(v) +} + +// TimeSlice returns a slice of time.Time pointers from the values +// passed in. +func TimeSlice(vs []time.Time) []*time.Time { + return ptr.TimeSlice(vs) +} + +// TimeMap returns a map of time.Time pointers from the values +// passed in. +func TimeMap(vs map[string]time.Time) map[string]*time.Time { + return ptr.TimeMap(vs) +} + +// Duration returns a pointer value for the time.Duration value passed in. +func Duration(v time.Duration) *time.Duration { + return ptr.Duration(v) +} + +// DurationSlice returns a slice of time.Duration pointers from the values +// passed in. +func DurationSlice(vs []time.Duration) []*time.Duration { + return ptr.DurationSlice(vs) +} + +// DurationMap returns a map of time.Duration pointers from the values +// passed in. +func DurationMap(vs map[string]time.Duration) map[string]*time.Duration { + return ptr.DurationMap(vs) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go new file mode 100644 index 00000000000..26d90719b2d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go @@ -0,0 +1,310 @@ +package http + +import ( + "crypto/tls" + "github.com/aws/aws-sdk-go-v2/aws" + "net" + "net/http" + "reflect" + "sync" + "time" +) + +// Defaults for the HTTPTransportBuilder. +var ( + // Default connection pool options + DefaultHTTPTransportMaxIdleConns = 100 + DefaultHTTPTransportMaxIdleConnsPerHost = 10 + + // Default connection timeouts + DefaultHTTPTransportIdleConnTimeout = 90 * time.Second + DefaultHTTPTransportTLSHandleshakeTimeout = 10 * time.Second + DefaultHTTPTransportExpectContinueTimeout = 1 * time.Second + + // Default to TLS 1.2 for all HTTPS requests. + DefaultHTTPTransportTLSMinVersion uint16 = tls.VersionTLS12 +) + +// Timeouts for net.Dialer's network connection. +var ( + DefaultDialConnectTimeout = 30 * time.Second + DefaultDialKeepAliveTimeout = 30 * time.Second +) + +// BuildableClient provides a HTTPClient implementation with options to +// create copies of the HTTPClient when additional configuration is provided. +// +// The client's methods will not share the http.Transport value between copies +// of the BuildableClient. Only exported member values of the Transport and +// optional Dialer will be copied between copies of BuildableClient. +type BuildableClient struct { + transport *http.Transport + dialer *net.Dialer + + initOnce sync.Once + + clientTimeout time.Duration + client *http.Client +} + +// NewBuildableClient returns an initialized client for invoking HTTP +// requests. +func NewBuildableClient() *BuildableClient { + return &BuildableClient{} +} + +// Do implements the HTTPClient interface's Do method to invoke a HTTP request, +// and receive the response. Uses the BuildableClient's current +// configuration to invoke the http.Request. +// +// If connection pooling is enabled (aka HTTP KeepAlive) the client will only +// share pooled connections with its own instance. Copies of the +// BuildableClient will have their own connection pools. +// +// Redirect (3xx) responses will not be followed, the HTTP response received +// will returned instead. +func (b *BuildableClient) Do(req *http.Request) (*http.Response, error) { + b.initOnce.Do(b.build) + + return b.client.Do(req) +} + +// Freeze returns a frozen aws.HTTPClient implementation that is no longer a BuildableClient. +// Use this to prevent the SDK from applying DefaultMode configuration values to a buildable client. +func (b *BuildableClient) Freeze() aws.HTTPClient { + cpy := b.clone() + cpy.build() + return cpy.client +} + +func (b *BuildableClient) build() { + b.client = wrapWithLimitedRedirect(&http.Client{ + Timeout: b.clientTimeout, + Transport: b.GetTransport(), + }) +} + +func (b *BuildableClient) clone() *BuildableClient { + cpy := NewBuildableClient() + cpy.transport = b.GetTransport() + cpy.dialer = b.GetDialer() + cpy.clientTimeout = b.clientTimeout + + return cpy +} + +// WithTransportOptions copies the BuildableClient and returns it with the +// http.Transport options applied. +// +// If a non (*http.Transport) was set as the round tripper, the round tripper +// will be replaced with a default Transport value before invoking the option +// functions. +func (b *BuildableClient) WithTransportOptions(opts ...func(*http.Transport)) *BuildableClient { + cpy := b.clone() + + tr := cpy.GetTransport() + for _, opt := range opts { + opt(tr) + } + cpy.transport = tr + + return cpy +} + +// WithDialerOptions copies the BuildableClient and returns it with the +// net.Dialer options applied. Will set the client's http.Transport DialContext +// member. +func (b *BuildableClient) WithDialerOptions(opts ...func(*net.Dialer)) *BuildableClient { + cpy := b.clone() + + dialer := cpy.GetDialer() + for _, opt := range opts { + opt(dialer) + } + cpy.dialer = dialer + + tr := cpy.GetTransport() + tr.DialContext = cpy.dialer.DialContext + cpy.transport = tr + + return cpy +} + +// WithTimeout Sets the timeout used by the client for all requests. +func (b *BuildableClient) WithTimeout(timeout time.Duration) *BuildableClient { + cpy := b.clone() + cpy.clientTimeout = timeout + return cpy +} + +// GetTransport returns a copy of the client's HTTP Transport. +func (b *BuildableClient) GetTransport() *http.Transport { + var tr *http.Transport + if b.transport != nil { + tr = b.transport.Clone() + } else { + tr = defaultHTTPTransport() + } + + return tr +} + +// GetDialer returns a copy of the client's network dialer. +func (b *BuildableClient) GetDialer() *net.Dialer { + var dialer *net.Dialer + if b.dialer != nil { + dialer = shallowCopyStruct(b.dialer).(*net.Dialer) + } else { + dialer = defaultDialer() + } + + return dialer +} + +// GetTimeout returns a copy of the client's timeout to cancel requests with. +func (b *BuildableClient) GetTimeout() time.Duration { + return b.clientTimeout +} + +func defaultDialer() *net.Dialer { + return &net.Dialer{ + Timeout: DefaultDialConnectTimeout, + KeepAlive: DefaultDialKeepAliveTimeout, + DualStack: true, + } +} + +func defaultHTTPTransport() *http.Transport { + dialer := defaultDialer() + + tr := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: dialer.DialContext, + TLSHandshakeTimeout: DefaultHTTPTransportTLSHandleshakeTimeout, + MaxIdleConns: DefaultHTTPTransportMaxIdleConns, + MaxIdleConnsPerHost: DefaultHTTPTransportMaxIdleConnsPerHost, + IdleConnTimeout: DefaultHTTPTransportIdleConnTimeout, + ExpectContinueTimeout: DefaultHTTPTransportExpectContinueTimeout, + ForceAttemptHTTP2: true, + TLSClientConfig: &tls.Config{ + MinVersion: DefaultHTTPTransportTLSMinVersion, + }, + } + + return tr +} + +// shallowCopyStruct creates a shallow copy of the passed in source struct, and +// returns that copy of the same struct type. +func shallowCopyStruct(src interface{}) interface{} { + srcVal := reflect.ValueOf(src) + srcValType := srcVal.Type() + + var returnAsPtr bool + if srcValType.Kind() == reflect.Ptr { + srcVal = srcVal.Elem() + srcValType = srcValType.Elem() + returnAsPtr = true + } + dstVal := reflect.New(srcValType).Elem() + + for i := 0; i < srcValType.NumField(); i++ { + ft := srcValType.Field(i) + if len(ft.PkgPath) != 0 { + // unexported fields have a PkgPath + continue + } + + dstVal.Field(i).Set(srcVal.Field(i)) + } + + if returnAsPtr { + dstVal = dstVal.Addr() + } + + return dstVal.Interface() +} + +// wrapWithLimitedRedirect updates the Client's Transport and CheckRedirect to +// not follow any redirect other than 307 and 308. No other redirect will be +// followed. +// +// If the client does not have a Transport defined will use a new SDK default +// http.Transport configuration. +func wrapWithLimitedRedirect(c *http.Client) *http.Client { + tr := c.Transport + if tr == nil { + tr = defaultHTTPTransport() + } + + cc := *c + cc.CheckRedirect = limitedRedirect + cc.Transport = suppressBadHTTPRedirectTransport{ + tr: tr, + } + + return &cc +} + +// limitedRedirect is a CheckRedirect that prevents the client from following +// any non 307/308 HTTP status code redirects. +// +// The 307 and 308 redirects are allowed because the client must use the +// original HTTP method for the redirected to location. Whereas 301 and 302 +// allow the client to switch to GET for the redirect. +// +// Suppresses all redirect requests with a URL of badHTTPRedirectLocation. +func limitedRedirect(r *http.Request, via []*http.Request) error { + // Request.Response, in CheckRedirect is the response that is triggering + // the redirect. + resp := r.Response + if r.URL.String() == badHTTPRedirectLocation { + resp.Header.Del(badHTTPRedirectLocation) + return http.ErrUseLastResponse + } + + switch resp.StatusCode { + case 307, 308: + // Only allow 307 and 308 redirects as they preserve the method. + return nil + } + + return http.ErrUseLastResponse +} + +// suppressBadHTTPRedirectTransport provides an http.RoundTripper +// implementation that wraps another http.RoundTripper to prevent HTTP client +// receiving 301 and 302 HTTP responses redirects without the required location +// header. +// +// Clients using this utility must have a CheckRedirect, e.g. limitedRedirect, +// that check for responses with having a URL of baseHTTPRedirectLocation, and +// suppress the redirect. +type suppressBadHTTPRedirectTransport struct { + tr http.RoundTripper +} + +const badHTTPRedirectLocation = `https://amazonaws.com/badhttpredirectlocation` + +// RoundTrip backfills a stub location when a 301/302 response is received +// without a location. This stub location is used by limitedRedirect to prevent +// the HTTP client from failing attempting to use follow a redirect without a +// location value. +func (t suppressBadHTTPRedirectTransport) RoundTrip(r *http.Request) (*http.Response, error) { + resp, err := t.tr.RoundTrip(r) + if err != nil { + return resp, err + } + + // S3 is the only known service to return 301 without location header. + // The Go standard library HTTP client will return an opaque error if it + // tries to follow a 301/302 response missing the location header. + switch resp.StatusCode { + case 301, 302: + if v := resp.Header.Get("Location"); len(v) == 0 { + resp.Header.Set("Location", badHTTPRedirectLocation) + } + } + + return resp, err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/content_type.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/content_type.go new file mode 100644 index 00000000000..556f54a7f77 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/content_type.go @@ -0,0 +1,42 @@ +package http + +import ( + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// removeContentTypeHeader is a build middleware that removes +// content type header if content-length header is unset or +// is set to zero, +type removeContentTypeHeader struct { +} + +// ID the name of the middleware. +func (m *removeContentTypeHeader) ID() string { + return "RemoveContentTypeHeader" +} + +// HandleBuild adds or appends the constructed user agent to the request. +func (m *removeContentTypeHeader) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in) + } + + // remove contentTypeHeader when content-length is zero + if req.ContentLength == 0 { + req.Header.Del("content-type") + } + + return next.HandleBuild(ctx, in) +} + +// RemoveContentTypeHeader removes content-type header if +// content length is unset or equal to zero. +func RemoveContentTypeHeader(stack *middleware.Stack) error { + return stack.Build.Add(&removeContentTypeHeader{}, middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error.go new file mode 100644 index 00000000000..44651c9902d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error.go @@ -0,0 +1,33 @@ +package http + +import ( + "errors" + "fmt" + + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// ResponseError provides the HTTP centric error type wrapping the underlying error +// with the HTTP response value and the deserialized RequestID. +type ResponseError struct { + *smithyhttp.ResponseError + + // RequestID associated with response error + RequestID string +} + +// ServiceRequestID returns the request id associated with Response Error +func (e *ResponseError) ServiceRequestID() string { return e.RequestID } + +// Error returns the formatted error +func (e *ResponseError) Error() string { + return fmt.Sprintf( + "https response error StatusCode: %d, RequestID: %s, %v", + e.Response.StatusCode, e.RequestID, e.Err) +} + +// As populates target and returns true if the type of target is a error type that +// the ResponseError embeds, (e.g.AWS HTTP ResponseError) +func (e *ResponseError) As(target interface{}) bool { + return errors.As(e.ResponseError, target) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go new file mode 100644 index 00000000000..8fd14cecd23 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go @@ -0,0 +1,54 @@ +package http + +import ( + "context" + + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// AddResponseErrorMiddleware adds response error wrapper middleware +func AddResponseErrorMiddleware(stack *middleware.Stack) error { + // add error wrapper middleware before request id retriever middleware so that it can wrap the error response + // returned by operation deserializers + return stack.Deserialize.Insert(&responseErrorWrapper{}, "RequestIDRetriever", middleware.Before) +} + +type responseErrorWrapper struct { +} + +// ID returns the middleware identifier +func (m *responseErrorWrapper) ID() string { + return "ResponseErrorWrapper" +} + +func (m *responseErrorWrapper) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err == nil { + // Nothing to do when there is no error. + return out, metadata, err + } + + resp, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + // No raw response to wrap with. + return out, metadata, err + } + + // look for request id in metadata + reqID, _ := awsmiddleware.GetRequestIDMetadata(metadata) + + // Wrap the returned smithy error with the request id retrieved from the metadata + err = &ResponseError{ + ResponseError: &smithyhttp.ResponseError{ + Response: resp, + Err: err, + }, + RequestID: reqID, + } + + return out, metadata, err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/timeout_read_closer.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/timeout_read_closer.go new file mode 100644 index 00000000000..993929bd9b7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/timeout_read_closer.go @@ -0,0 +1,104 @@ +package http + +import ( + "context" + "fmt" + "io" + "time" + + "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +type readResult struct { + n int + err error +} + +// ResponseTimeoutError is an error when the reads from the response are +// delayed longer than the timeout the read was configured for. +type ResponseTimeoutError struct { + TimeoutDur time.Duration +} + +// Timeout returns that the error is was caused by a timeout, and can be +// retried. +func (*ResponseTimeoutError) Timeout() bool { return true } + +func (e *ResponseTimeoutError) Error() string { + return fmt.Sprintf("read on body reach timeout limit, %v", e.TimeoutDur) +} + +// timeoutReadCloser will handle body reads that take too long. +// We will return a ErrReadTimeout error if a timeout occurs. +type timeoutReadCloser struct { + reader io.ReadCloser + duration time.Duration +} + +// Read will spin off a goroutine to call the reader's Read method. We will +// select on the timer's channel or the read's channel. Whoever completes first +// will be returned. +func (r *timeoutReadCloser) Read(b []byte) (int, error) { + timer := time.NewTimer(r.duration) + c := make(chan readResult, 1) + + go func() { + n, err := r.reader.Read(b) + timer.Stop() + c <- readResult{n: n, err: err} + }() + + select { + case data := <-c: + return data.n, data.err + case <-timer.C: + return 0, &ResponseTimeoutError{TimeoutDur: r.duration} + } +} + +func (r *timeoutReadCloser) Close() error { + return r.reader.Close() +} + +// AddResponseReadTimeoutMiddleware adds a middleware to the stack that wraps the +// response body so that a read that takes too long will return an error. +func AddResponseReadTimeoutMiddleware(stack *middleware.Stack, duration time.Duration) error { + return stack.Deserialize.Add(&readTimeout{duration: duration}, middleware.After) +} + +// readTimeout wraps the response body with a timeoutReadCloser +type readTimeout struct { + duration time.Duration +} + +// ID returns the id of the middleware +func (*readTimeout) ID() string { + return "ReadResponseTimeout" +} + +// HandleDeserialize implements the DeserializeMiddleware interface +func (m *readTimeout) HandleDeserialize( + ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + response.Body = &timeoutReadCloser{ + reader: response.Body, + duration: m.duration, + } + out.RawResponse = response + + return out, metadata, err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/types.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/types.go new file mode 100644 index 00000000000..cc3ae811402 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/types.go @@ -0,0 +1,42 @@ +package aws + +import ( + "fmt" +) + +// Ternary is an enum allowing an unknown or none state in addition to a bool's +// true and false. +type Ternary int + +func (t Ternary) String() string { + switch t { + case UnknownTernary: + return "unknown" + case FalseTernary: + return "false" + case TrueTernary: + return "true" + default: + return fmt.Sprintf("unknown value, %d", int(t)) + } +} + +// Bool returns true if the value is TrueTernary, false otherwise. +func (t Ternary) Bool() bool { + return t == TrueTernary +} + +// Enumerations for the values of the Ternary type. +const ( + UnknownTernary Ternary = iota + FalseTernary + TrueTernary +) + +// BoolTernary returns a true or false Ternary value for the bool provided. +func BoolTernary(v bool) Ternary { + if v { + return TrueTernary + } + return FalseTernary +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/version.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/version.go new file mode 100644 index 00000000000..5f729d45e1c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/version.go @@ -0,0 +1,8 @@ +// Package aws provides core functionality for making requests to AWS services. +package aws + +// SDKName is the name of this AWS SDK +const SDKName = "aws-sdk-go-v2" + +// SDKVersion is the version of this SDK +const SDKVersion = goModuleVersion diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md new file mode 100644 index 00000000000..cbe4ec7e50b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md @@ -0,0 +1,479 @@ +# v1.24.0 (2023-11-13) + +* **Feature**: Replace the legacy config parser with a modern, less-strict implementation. Parsing failures within a section will now simply ignore the invalid line rather than silently drop the entire section. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.23.0 (2023-11-09.2) + +* **Feature**: BREAKFIX: In order to support subproperty parsing, invalid property definitions must not be ignored +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.3 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.2 (2023-11-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.1 (2023-11-06) + +* No change notes available for this release. + +# v1.22.0 (2023-11-02) + +* **Feature**: Add env and shared config settings for disabling IMDSv1 fallback. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.21.0 (2023-11-01) + +* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.19.1 (2023-10-24) + +* No change notes available for this release. + +# v1.19.0 (2023-10-16) + +* **Feature**: Modify logic of retrieving user agent appID from env config + +# v1.18.45 (2023-10-12) + +* **Bug Fix**: Fail to load config if an explicitly provided profile doesn't exist. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.44 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.43 (2023-10-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.42 (2023-09-22) + +* **Bug Fix**: Fixed a bug where merging `max_attempts` or `duration_seconds` fields across shared config files with invalid values would silently default them to 0. +* **Bug Fix**: Move type assertion of config values out of the parsing stage, which resolves an issue where the contents of a profile would silently be dropped with certain numeric formats. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.41 (2023-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.40 (2023-09-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.39 (2023-09-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.38 (2023-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.37 (2023-08-23) + +* No change notes available for this release. + +# v1.18.36 (2023-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.35 (2023-08-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.34 (2023-08-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.33 (2023-08-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.32 (2023-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.31 (2023-07-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.30 (2023-07-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.29 (2023-07-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.28 (2023-07-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.27 (2023-06-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.26 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.25 (2023-05-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.24 (2023-05-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.23 (2023-05-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.22 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.21 (2023-04-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.20 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.19 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.18 (2023-03-16) + +* **Bug Fix**: Allow RoleARN to be set as functional option on STS WebIdentityRoleOptions. Fixes aws/aws-sdk-go-v2#2015. + +# v1.18.17 (2023-03-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.16 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.15 (2023-02-22) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.14 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.13 (2023-02-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.12 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.11 (2023-02-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.10 (2023-01-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.9 (2023-01-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.8 (2023-01-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.7 (2022-12-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.6 (2022-12-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.5 (2022-12-15) + +* **Bug Fix**: Unify logic between shared config and in finding home directory +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.4 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.3 (2022-11-22) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.2 (2022-11-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.1 (2022-11-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.0 (2022-11-11) + +* **Announcement**: When using the SSOTokenProvider, a previous implementation incorrectly compensated for invalid SSOTokenProvider configurations in the shared profile. This has been fixed via PR #1903 and tracked in issue #1846 +* **Feature**: Adds token refresh support (via SSOTokenProvider) when using the SSOCredentialProvider +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.11 (2022-11-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.10 (2022-10-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.9 (2022-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.8 (2022-09-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.7 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.6 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.5 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.4 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.3 (2022-08-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.2 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.1 (2022-08-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.0 (2022-08-14) + +* **Feature**: Add alternative mechanism for determning the users `$HOME` or `%USERPROFILE%` location when the environment variables are not present. + +# v1.16.1 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.0 (2022-08-10) + +* **Feature**: Adds support for the following settings in the `~/.aws/credentials` file: `sso_account_id`, `sso_region`, `sso_role_name`, `sso_start_url`, and `ca_bundle`. + +# v1.15.17 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.16 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.15 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.14 (2022-07-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.13 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.12 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.11 (2022-06-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.10 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.9 (2022-05-26) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.8 (2022-05-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.7 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.6 (2022-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.5 (2022-05-09) + +* **Bug Fix**: Fixes a bug in LoadDefaultConfig to correctly assign ConfigSources so all config resolvers have access to the config sources. This fixes the feature/ec2/imds client not having configuration applied via config.LoadOptions such as EC2IMDSClientEnableState. PR [#1682](https://github.com/aws/aws-sdk-go-v2/pull/1682) + +# v1.15.4 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.0 (2022-02-24) + +* **Feature**: Adds support for loading RetryMaxAttempts and RetryMod from the environment and shared configuration files. These parameters drive how the SDK's API client will initialize its default retryer, if custome retryer has not been specified. See [config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) module and [aws.Config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws#Config) for more information about and how to use these new options. +* **Feature**: Adds support for the `ca_bundle` parameter in shared config and credentials files. The usage of the file is the same as environment variable, `AWS_CA_BUNDLE`, but sourced from shared config. Fixes [#1589](https://github.com/aws/aws-sdk-go-v2/issues/1589) +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.1 (2022-01-28) + +* **Bug Fix**: Fixes LoadDefaultConfig handling of errors returned by passed in functional options. Previously errors returned from the LoadOptions passed into LoadDefaultConfig were incorrectly ignored. [#1562](https://github.com/aws/aws-sdk-go-v2/pull/1562). Thanks to [Pinglei Guo](https://github.com/pingleig) for submitting this PR. +* **Bug Fix**: Fixes the SDK's handling of `duration_sections` in the shared credentials file or specified in multiple shared config and shared credentials files under the same profile. [#1568](https://github.com/aws/aws-sdk-go-v2/pull/1568). Thanks to [Amir Szekely](https://github.com/kichik) for help reproduce this bug. +* **Bug Fix**: Updates `config` module to use os.UserHomeDir instead of hard coded environment variable for OS. [#1563](https://github.com/aws/aws-sdk-go-v2/pull/1563) +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.0 (2022-01-07) + +* **Feature**: Add load option for CredentialCache. Adds a new member to the LoadOptions struct, CredentialsCacheOptions. This member allows specifying a function that will be used to configure the CredentialsCache. The CredentialsCacheOptions will only be used if the configuration loader will wrap the underlying credential provider in the CredentialsCache. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.1 (2021-12-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.0 (2021-12-02) + +* **Feature**: Add support for specifying `EndpointResolverWithOptions` on `LoadOptions`, and associated `WithEndpointResolverWithOptions`. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.3 (2021-11-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.2 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.1 (2021-11-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2021-11-06) + +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.0 (2021-10-21) + +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.3 (2021-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.2 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.1 (2021-09-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.0 (2021-09-02) + +* **Feature**: Add support for S3 Multi-Region Access Point ARNs. + +# v1.7.0 (2021-08-27) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.1 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.0 (2021-08-04) + +* **Feature**: adds error handling for defered close calls +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.0 (2021-07-15) + +* **Feature**: Support has been added for EC2 IPv6-enabled Instance Metadata Service Endpoints. +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.1 (2021-07-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2021-06-25) + +* **Feature**: Adds configuration setting for enabling endpoint discovery. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2021-05-20) + +* **Feature**: SSO credentials can now be defined alongside other credential providers within the same configuration profile. +* **Bug Fix**: Profile names were incorrectly normalized to lower-case, which could result in unexpected profile configurations. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/go.opentelemetry.io/collector/confmap/converter/expandconverter/LICENSE b/vendor/github.com/aws/aws-sdk-go-v2/config/LICENSE.txt similarity index 100% rename from vendor/go.opentelemetry.io/collector/confmap/converter/expandconverter/LICENSE rename to vendor/github.com/aws/aws-sdk-go-v2/config/LICENSE.txt diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/config.go new file mode 100644 index 00000000000..dfe62973221 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/config.go @@ -0,0 +1,213 @@ +package config + +import ( + "context" + "os" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +// defaultAWSConfigResolvers are a slice of functions that will resolve external +// configuration values into AWS configuration values. +// +// This will setup the AWS configuration's Region, +var defaultAWSConfigResolvers = []awsConfigResolver{ + // Resolves the default configuration the SDK's aws.Config will be + // initialized with. + resolveDefaultAWSConfig, + + // Sets the logger to be used. Could be user provided logger, and client + // logging mode. + resolveLogger, + resolveClientLogMode, + + // Sets the HTTP client and configuration to use for making requests using + // the HTTP transport. + resolveHTTPClient, + resolveCustomCABundle, + + // Sets the endpoint resolving behavior the API Clients will use for making + // requests to. Clients default to their own clients this allows overrides + // to be specified. The resolveEndpointResolver option is deprecated, but + // we still need to set it for backwards compatibility on config + // construction. + resolveEndpointResolver, + resolveEndpointResolverWithOptions, + + // Sets the retry behavior API clients will use within their retry attempt + // middleware. Defaults to unset, allowing API clients to define their own + // retry behavior. + resolveRetryer, + + // Sets the region the API Clients should use for making requests to. + resolveRegion, + resolveEC2IMDSRegion, + resolveDefaultRegion, + + // Sets the additional set of middleware stack mutators that will custom + // API client request pipeline middleware. + resolveAPIOptions, + + // Resolves the DefaultsMode that should be used by SDK clients. If this + // mode is set to DefaultsModeAuto. + // + // Comes after HTTPClient and CustomCABundle to ensure the HTTP client is + // configured if provided before invoking IMDS if mode is auto. Comes + // before resolving credentials so that those subsequent clients use the + // configured auto mode. + resolveDefaultsModeOptions, + + // Sets the resolved credentials the API clients will use for + // authentication. Provides the SDK's default credential chain. + // + // Should probably be the last step in the resolve chain to ensure that all + // other configurations are resolved first in case downstream credentials + // implementations depend on or can be configured with earlier resolved + // configuration options. + resolveCredentials, + + // Sets the resolved bearer authentication token API clients will use for + // httpBearerAuth authentication scheme. + resolveBearerAuthToken, + + // Sets the sdk app ID if present in shared config profile + resolveAppID, + + resolveBaseEndpoint, +} + +// A Config represents a generic configuration value or set of values. This type +// will be used by the AWSConfigResolvers to extract +// +// General the Config type will use type assertion against the Provider interfaces +// to extract specific data from the Config. +type Config interface{} + +// A loader is used to load external configuration data and returns it as +// a generic Config type. +// +// The loader should return an error if it fails to load the external configuration +// or the configuration data is malformed, or required components missing. +type loader func(context.Context, configs) (Config, error) + +// An awsConfigResolver will extract configuration data from the configs slice +// using the provider interfaces to extract specific functionality. The extracted +// configuration values will be written to the AWS Config value. +// +// The resolver should return an error if it it fails to extract the data, the +// data is malformed, or incomplete. +type awsConfigResolver func(ctx context.Context, cfg *aws.Config, configs configs) error + +// configs is a slice of Config values. These values will be used by the +// AWSConfigResolvers to extract external configuration values to populate the +// AWS Config type. +// +// Use AppendFromLoaders to add additional external Config values that are +// loaded from external sources. +// +// Use ResolveAWSConfig after external Config values have been added or loaded +// to extract the loaded configuration values into the AWS Config. +type configs []Config + +// AppendFromLoaders iterates over the slice of loaders passed in calling each +// loader function in order. The external config value returned by the loader +// will be added to the returned configs slice. +// +// If a loader returns an error this method will stop iterating and return +// that error. +func (cs configs) AppendFromLoaders(ctx context.Context, loaders []loader) (configs, error) { + for _, fn := range loaders { + cfg, err := fn(ctx, cs) + if err != nil { + return nil, err + } + + cs = append(cs, cfg) + } + + return cs, nil +} + +// ResolveAWSConfig returns a AWS configuration populated with values by calling +// the resolvers slice passed in. Each resolver is called in order. Any resolver +// may overwrite the AWS Configuration value of a previous resolver. +// +// If an resolver returns an error this method will return that error, and stop +// iterating over the resolvers. +func (cs configs) ResolveAWSConfig(ctx context.Context, resolvers []awsConfigResolver) (aws.Config, error) { + var cfg aws.Config + + for _, fn := range resolvers { + if err := fn(ctx, &cfg, cs); err != nil { + return aws.Config{}, err + } + } + + return cfg, nil +} + +// ResolveConfig calls the provide function passing slice of configuration sources. +// This implements the aws.ConfigResolver interface. +func (cs configs) ResolveConfig(f func(configs []interface{}) error) error { + var cfgs []interface{} + for i := range cs { + cfgs = append(cfgs, cs[i]) + } + return f(cfgs) +} + +// LoadDefaultConfig reads the SDK's default external configurations, and +// populates an AWS Config with the values from the external configurations. +// +// An optional variadic set of additional Config values can be provided as input +// that will be prepended to the configs slice. Use this to add custom configuration. +// The custom configurations must satisfy the respective providers for their data +// or the custom data will be ignored by the resolvers and config loaders. +// +// cfg, err := config.LoadDefaultConfig( context.TODO(), +// config.WithSharedConfigProfile("test-profile"), +// ) +// if err != nil { +// panic(fmt.Sprintf("failed loading config, %v", err)) +// } +// +// The default configuration sources are: +// * Environment Variables +// * Shared Configuration and Shared Credentials files. +func LoadDefaultConfig(ctx context.Context, optFns ...func(*LoadOptions) error) (cfg aws.Config, err error) { + var options LoadOptions + for _, optFn := range optFns { + if err := optFn(&options); err != nil { + return aws.Config{}, err + } + } + + // assign Load Options to configs + var cfgCpy = configs{options} + + cfgCpy, err = cfgCpy.AppendFromLoaders(ctx, resolveConfigLoaders(&options)) + if err != nil { + return aws.Config{}, err + } + + cfg, err = cfgCpy.ResolveAWSConfig(ctx, defaultAWSConfigResolvers) + if err != nil { + return aws.Config{}, err + } + + return cfg, nil +} + +func resolveConfigLoaders(options *LoadOptions) []loader { + loaders := make([]loader, 2) + loaders[0] = loadEnvConfig + + // specification of a profile should cause a load failure if it doesn't exist + if os.Getenv(awsProfileEnvVar) != "" || options.SharedConfigProfile != "" { + loaders[1] = loadSharedConfig + } else { + loaders[1] = loadSharedConfigIgnoreNotExist + } + + return loaders +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/defaultsmode.go b/vendor/github.com/aws/aws-sdk-go-v2/config/defaultsmode.go new file mode 100644 index 00000000000..20b66367ffd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/defaultsmode.go @@ -0,0 +1,47 @@ +package config + +import ( + "context" + "os" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" +) + +const execEnvVar = "AWS_EXECUTION_ENV" + +// DefaultsModeOptions is the set of options that are used to configure +type DefaultsModeOptions struct { + // The SDK configuration defaults mode. Defaults to legacy if not specified. + // + // Supported modes are: auto, cross-region, in-region, legacy, mobile, standard + Mode aws.DefaultsMode + + // The EC2 Instance Metadata Client that should be used when performing environment + // discovery when aws.DefaultsModeAuto is set. + // + // If not specified the SDK will construct a client if the instance metadata service has not been disabled by + // the AWS_EC2_METADATA_DISABLED environment variable. + IMDSClient *imds.Client +} + +func resolveDefaultsModeRuntimeEnvironment(ctx context.Context, envConfig *EnvConfig, client *imds.Client) (aws.RuntimeEnvironment, error) { + getRegionOutput, err := client.GetRegion(ctx, &imds.GetRegionInput{}) + // honor context timeouts, but if we couldn't talk to IMDS don't fail runtime environment introspection. + select { + case <-ctx.Done(): + return aws.RuntimeEnvironment{}, err + default: + } + + var imdsRegion string + if err == nil { + imdsRegion = getRegionOutput.Region + } + + return aws.RuntimeEnvironment{ + EnvironmentIdentifier: aws.ExecutionEnvironmentID(os.Getenv(execEnvVar)), + Region: envConfig.Region, + EC2InstanceMetadataRegion: imdsRegion, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/config/doc.go new file mode 100644 index 00000000000..aab7164e283 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/doc.go @@ -0,0 +1,20 @@ +// Package config provides utilities for loading configuration from multiple +// sources that can be used to configure the SDK's API clients, and utilities. +// +// The config package will load configuration from environment variables, AWS +// shared configuration file (~/.aws/config), and AWS shared credentials file +// (~/.aws/credentials). +// +// Use the LoadDefaultConfig to load configuration from all the SDK's supported +// sources, and resolve credentials using the SDK's default credential chain. +// +// LoadDefaultConfig allows for a variadic list of additional Config sources that can +// provide one or more configuration values which can be used to programmatically control the resolution +// of a specific value, or allow for broader range of additional configuration sources not supported by the SDK. +// A Config source implements one or more provider interfaces defined in this package. Config sources passed in will +// take precedence over the default environment and shared config sources used by the SDK. If one or more Config sources +// implement the same provider interface, priority will be handled by the order in which the sources were passed in. +// +// A number of helpers (prefixed by “With“) are provided in this package that implement their respective provider +// interface. These helpers should be used for overriding configuration programmatically at runtime. +package config diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go new file mode 100644 index 00000000000..78bc1493372 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go @@ -0,0 +1,738 @@ +package config + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "os" + "strconv" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" +) + +// CredentialsSourceName provides a name of the provider when config is +// loaded from environment. +const CredentialsSourceName = "EnvConfigCredentials" + +// Environment variables that will be read for configuration values. +const ( + awsAccessKeyIDEnvVar = "AWS_ACCESS_KEY_ID" + awsAccessKeyEnvVar = "AWS_ACCESS_KEY" + + awsSecretAccessKeyEnvVar = "AWS_SECRET_ACCESS_KEY" + awsSecretKeyEnvVar = "AWS_SECRET_KEY" + + awsSessionTokenEnvVar = "AWS_SESSION_TOKEN" + + awsContainerCredentialsEndpointEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" + awsContainerCredentialsRelativePathEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" + awsContainerPProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN" + + awsRegionEnvVar = "AWS_REGION" + awsDefaultRegionEnvVar = "AWS_DEFAULT_REGION" + + awsProfileEnvVar = "AWS_PROFILE" + awsDefaultProfileEnvVar = "AWS_DEFAULT_PROFILE" + + awsSharedCredentialsFileEnvVar = "AWS_SHARED_CREDENTIALS_FILE" + + awsConfigFileEnvVar = "AWS_CONFIG_FILE" + + awsCustomCABundleEnvVar = "AWS_CA_BUNDLE" + + awsWebIdentityTokenFilePathEnvVar = "AWS_WEB_IDENTITY_TOKEN_FILE" + + awsRoleARNEnvVar = "AWS_ROLE_ARN" + awsRoleSessionNameEnvVar = "AWS_ROLE_SESSION_NAME" + + awsEnableEndpointDiscoveryEnvVar = "AWS_ENABLE_ENDPOINT_DISCOVERY" + + awsS3UseARNRegionEnvVar = "AWS_S3_USE_ARN_REGION" + + awsEc2MetadataServiceEndpointModeEnvVar = "AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE" + + awsEc2MetadataServiceEndpointEnvVar = "AWS_EC2_METADATA_SERVICE_ENDPOINT" + + awsEc2MetadataDisabled = "AWS_EC2_METADATA_DISABLED" + awsEc2MetadataV1DisabledEnvVar = "AWS_EC2_METADATA_V1_DISABLED" + + awsS3DisableMultiRegionAccessPointEnvVar = "AWS_S3_DISABLE_MULTIREGION_ACCESS_POINTS" + + awsUseDualStackEndpoint = "AWS_USE_DUALSTACK_ENDPOINT" + + awsUseFIPSEndpoint = "AWS_USE_FIPS_ENDPOINT" + + awsDefaultMode = "AWS_DEFAULTS_MODE" + + awsRetryMaxAttempts = "AWS_MAX_ATTEMPTS" + awsRetryMode = "AWS_RETRY_MODE" + awsSdkAppID = "AWS_SDK_UA_APP_ID" + + awsIgnoreConfiguredEndpoints = "AWS_IGNORE_CONFIGURED_ENDPOINT_URLS" + awsEndpointURL = "AWS_ENDPOINT_URL" +) + +var ( + credAccessEnvKeys = []string{ + awsAccessKeyIDEnvVar, + awsAccessKeyEnvVar, + } + credSecretEnvKeys = []string{ + awsSecretAccessKeyEnvVar, + awsSecretKeyEnvVar, + } + regionEnvKeys = []string{ + awsRegionEnvVar, + awsDefaultRegionEnvVar, + } + profileEnvKeys = []string{ + awsProfileEnvVar, + awsDefaultProfileEnvVar, + } +) + +// EnvConfig is a collection of environment values the SDK will read +// setup config from. All environment values are optional. But some values +// such as credentials require multiple values to be complete or the values +// will be ignored. +type EnvConfig struct { + // Environment configuration values. If set both Access Key ID and Secret Access + // Key must be provided. Session Token and optionally also be provided, but is + // not required. + // + // # Access Key ID + // AWS_ACCESS_KEY_ID=AKID + // AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. + // + // # Secret Access Key + // AWS_SECRET_ACCESS_KEY=SECRET + // AWS_SECRET_KEY=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. + // + // # Session Token + // AWS_SESSION_TOKEN=TOKEN + Credentials aws.Credentials + + // ContainerCredentialsEndpoint value is the HTTP enabled endpoint to retrieve credentials + // using the endpointcreds.Provider + ContainerCredentialsEndpoint string + + // ContainerCredentialsRelativePath is the relative URI path that will be used when attempting to retrieve + // credentials from the container endpoint. + ContainerCredentialsRelativePath string + + // ContainerAuthorizationToken is the authorization token that will be included in the HTTP Authorization + // header when attempting to retrieve credentials from the container credentials endpoint. + ContainerAuthorizationToken string + + // Region value will instruct the SDK where to make service API requests to. If is + // not provided in the environment the region must be provided before a service + // client request is made. + // + // AWS_REGION=us-west-2 + // AWS_DEFAULT_REGION=us-west-2 + Region string + + // Profile name the SDK should load use when loading shared configuration from the + // shared configuration files. If not provided "default" will be used as the + // profile name. + // + // AWS_PROFILE=my_profile + // AWS_DEFAULT_PROFILE=my_profile + SharedConfigProfile string + + // Shared credentials file path can be set to instruct the SDK to use an alternate + // file for the shared credentials. If not set the file will be loaded from + // $HOME/.aws/credentials on Linux/Unix based systems, and + // %USERPROFILE%\.aws\credentials on Windows. + // + // AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials + SharedCredentialsFile string + + // Shared config file path can be set to instruct the SDK to use an alternate + // file for the shared config. If not set the file will be loaded from + // $HOME/.aws/config on Linux/Unix based systems, and + // %USERPROFILE%\.aws\config on Windows. + // + // AWS_CONFIG_FILE=$HOME/my_shared_config + SharedConfigFile string + + // Sets the path to a custom Credentials Authority (CA) Bundle PEM file + // that the SDK will use instead of the system's root CA bundle. + // Only use this if you want to configure the SDK to use a custom set + // of CAs. + // + // Enabling this option will attempt to merge the Transport + // into the SDK's HTTP client. If the client's Transport is + // not a http.Transport an error will be returned. If the + // Transport's TLS config is set this option will cause the + // SDK to overwrite the Transport's TLS config's RootCAs value. + // + // Setting a custom HTTPClient in the aws.Config options will override this setting. + // To use this option and custom HTTP client, the HTTP client needs to be provided + // when creating the config. Not the service client. + // + // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle + CustomCABundle string + + // Enables endpoint discovery via environment variables. + // + // AWS_ENABLE_ENDPOINT_DISCOVERY=true + EnableEndpointDiscovery aws.EndpointDiscoveryEnableState + + // Specifies the WebIdentity token the SDK should use to assume a role + // with. + // + // AWS_WEB_IDENTITY_TOKEN_FILE=file_path + WebIdentityTokenFilePath string + + // Specifies the IAM role arn to use when assuming an role. + // + // AWS_ROLE_ARN=role_arn + RoleARN string + + // Specifies the IAM role session name to use when assuming a role. + // + // AWS_ROLE_SESSION_NAME=session_name + RoleSessionName string + + // Specifies if the S3 service should allow ARNs to direct the region + // the client's requests are sent to. + // + // AWS_S3_USE_ARN_REGION=true + S3UseARNRegion *bool + + // Specifies if the EC2 IMDS service client is enabled. + // + // AWS_EC2_METADATA_DISABLED=true + EC2IMDSClientEnableState imds.ClientEnableState + + // Specifies if EC2 IMDSv1 fallback is disabled. + // + // AWS_EC2_METADATA_V1_DISABLED=true + EC2IMDSv1Disabled *bool + + // Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6) + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE=IPv6 + EC2IMDSEndpointMode imds.EndpointModeState + + // Specifies the EC2 Instance Metadata Service endpoint to use. If specified it overrides EC2IMDSEndpointMode. + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://fd00:ec2::254 + EC2IMDSEndpoint string + + // Specifies if the S3 service should disable multi-region access points + // support. + // + // AWS_S3_DISABLE_MULTIREGION_ACCESS_POINTS=true + S3DisableMultiRegionAccessPoints *bool + + // Specifies that SDK clients must resolve a dual-stack endpoint for + // services. + // + // AWS_USE_DUALSTACK_ENDPOINT=true + UseDualStackEndpoint aws.DualStackEndpointState + + // Specifies that SDK clients must resolve a FIPS endpoint for + // services. + // + // AWS_USE_FIPS_ENDPOINT=true + UseFIPSEndpoint aws.FIPSEndpointState + + // Specifies the SDK Defaults Mode used by services. + // + // AWS_DEFAULTS_MODE=standard + DefaultsMode aws.DefaultsMode + + // Specifies the maximum number attempts an API client will call an + // operation that fails with a retryable error. + // + // AWS_MAX_ATTEMPTS=3 + RetryMaxAttempts int + + // Specifies the retry model the API client will be created with. + // + // aws_retry_mode=standard + RetryMode aws.RetryMode + + // aws sdk app ID that can be added to user agent header string + AppID string + + // Flag used to disable configured endpoints. + IgnoreConfiguredEndpoints *bool + + // Value to contain configured endpoints to be propagated to + // corresponding endpoint resolution field. + BaseEndpoint string +} + +// loadEnvConfig reads configuration values from the OS's environment variables. +// Returning the a Config typed EnvConfig to satisfy the ConfigLoader func type. +func loadEnvConfig(ctx context.Context, cfgs configs) (Config, error) { + return NewEnvConfig() +} + +// NewEnvConfig retrieves the SDK's environment configuration. +// See `EnvConfig` for the values that will be retrieved. +func NewEnvConfig() (EnvConfig, error) { + var cfg EnvConfig + + creds := aws.Credentials{ + Source: CredentialsSourceName, + } + setStringFromEnvVal(&creds.AccessKeyID, credAccessEnvKeys) + setStringFromEnvVal(&creds.SecretAccessKey, credSecretEnvKeys) + if creds.HasKeys() { + creds.SessionToken = os.Getenv(awsSessionTokenEnvVar) + cfg.Credentials = creds + } + + cfg.ContainerCredentialsEndpoint = os.Getenv(awsContainerCredentialsEndpointEnvVar) + cfg.ContainerCredentialsRelativePath = os.Getenv(awsContainerCredentialsRelativePathEnvVar) + cfg.ContainerAuthorizationToken = os.Getenv(awsContainerPProviderAuthorizationEnvVar) + + setStringFromEnvVal(&cfg.Region, regionEnvKeys) + setStringFromEnvVal(&cfg.SharedConfigProfile, profileEnvKeys) + + cfg.SharedCredentialsFile = os.Getenv(awsSharedCredentialsFileEnvVar) + cfg.SharedConfigFile = os.Getenv(awsConfigFileEnvVar) + + cfg.CustomCABundle = os.Getenv(awsCustomCABundleEnvVar) + + cfg.WebIdentityTokenFilePath = os.Getenv(awsWebIdentityTokenFilePathEnvVar) + + cfg.RoleARN = os.Getenv(awsRoleARNEnvVar) + cfg.RoleSessionName = os.Getenv(awsRoleSessionNameEnvVar) + + cfg.AppID = os.Getenv(awsSdkAppID) + + if err := setEndpointDiscoveryTypeFromEnvVal(&cfg.EnableEndpointDiscovery, []string{awsEnableEndpointDiscoveryEnvVar}); err != nil { + return cfg, err + } + + if err := setBoolPtrFromEnvVal(&cfg.S3UseARNRegion, []string{awsS3UseARNRegionEnvVar}); err != nil { + return cfg, err + } + + setEC2IMDSClientEnableState(&cfg.EC2IMDSClientEnableState, []string{awsEc2MetadataDisabled}) + if err := setEC2IMDSEndpointMode(&cfg.EC2IMDSEndpointMode, []string{awsEc2MetadataServiceEndpointModeEnvVar}); err != nil { + return cfg, err + } + cfg.EC2IMDSEndpoint = os.Getenv(awsEc2MetadataServiceEndpointEnvVar) + if err := setBoolPtrFromEnvVal(&cfg.EC2IMDSv1Disabled, []string{awsEc2MetadataV1DisabledEnvVar}); err != nil { + return cfg, err + } + + if err := setBoolPtrFromEnvVal(&cfg.S3DisableMultiRegionAccessPoints, []string{awsS3DisableMultiRegionAccessPointEnvVar}); err != nil { + return cfg, err + } + + if err := setUseDualStackEndpointFromEnvVal(&cfg.UseDualStackEndpoint, []string{awsUseDualStackEndpoint}); err != nil { + return cfg, err + } + + if err := setUseFIPSEndpointFromEnvVal(&cfg.UseFIPSEndpoint, []string{awsUseFIPSEndpoint}); err != nil { + return cfg, err + } + + if err := setDefaultsModeFromEnvVal(&cfg.DefaultsMode, []string{awsDefaultMode}); err != nil { + return cfg, err + } + + if err := setIntFromEnvVal(&cfg.RetryMaxAttempts, []string{awsRetryMaxAttempts}); err != nil { + return cfg, err + } + if err := setRetryModeFromEnvVal(&cfg.RetryMode, []string{awsRetryMode}); err != nil { + return cfg, err + } + + setStringFromEnvVal(&cfg.BaseEndpoint, []string{awsEndpointURL}) + + if err := setBoolPtrFromEnvVal(&cfg.IgnoreConfiguredEndpoints, []string{awsIgnoreConfiguredEndpoints}); err != nil { + return cfg, err + } + + return cfg, nil +} + +func (c EnvConfig) getDefaultsMode(ctx context.Context) (aws.DefaultsMode, bool, error) { + if len(c.DefaultsMode) == 0 { + return "", false, nil + } + return c.DefaultsMode, true, nil +} + +func (c EnvConfig) getAppID(context.Context) (string, bool, error) { + return c.AppID, len(c.AppID) > 0, nil +} + +// GetRetryMaxAttempts returns the value of AWS_MAX_ATTEMPTS if was specified, +// and not 0. +func (c EnvConfig) GetRetryMaxAttempts(ctx context.Context) (int, bool, error) { + if c.RetryMaxAttempts == 0 { + return 0, false, nil + } + return c.RetryMaxAttempts, true, nil +} + +// GetRetryMode returns the RetryMode of AWS_RETRY_MODE if was specified, and a +// valid value. +func (c EnvConfig) GetRetryMode(ctx context.Context) (aws.RetryMode, bool, error) { + if len(c.RetryMode) == 0 { + return "", false, nil + } + return c.RetryMode, true, nil +} + +func setEC2IMDSClientEnableState(state *imds.ClientEnableState, keys []string) { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue + } + switch { + case strings.EqualFold(value, "true"): + *state = imds.ClientDisabled + case strings.EqualFold(value, "false"): + *state = imds.ClientEnabled + default: + continue + } + break + } +} + +func setDefaultsModeFromEnvVal(mode *aws.DefaultsMode, keys []string) error { + for _, k := range keys { + if value := os.Getenv(k); len(value) > 0 { + if ok := mode.SetFromString(value); !ok { + return fmt.Errorf("invalid %s value: %s", k, value) + } + break + } + } + return nil +} + +func setRetryModeFromEnvVal(mode *aws.RetryMode, keys []string) (err error) { + for _, k := range keys { + if value := os.Getenv(k); len(value) > 0 { + *mode, err = aws.ParseRetryMode(value) + if err != nil { + return fmt.Errorf("invalid %s value, %w", k, err) + } + break + } + } + return nil +} + +func setEC2IMDSEndpointMode(mode *imds.EndpointModeState, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue + } + if err := mode.SetFromString(value); err != nil { + return fmt.Errorf("invalid value for environment variable, %s=%s, %v", k, value, err) + } + } + return nil +} + +// GetRegion returns the AWS Region if set in the environment. Returns an empty +// string if not set. +func (c EnvConfig) getRegion(ctx context.Context) (string, bool, error) { + if len(c.Region) == 0 { + return "", false, nil + } + return c.Region, true, nil +} + +// GetSharedConfigProfile returns the shared config profile if set in the +// environment. Returns an empty string if not set. +func (c EnvConfig) getSharedConfigProfile(ctx context.Context) (string, bool, error) { + if len(c.SharedConfigProfile) == 0 { + return "", false, nil + } + + return c.SharedConfigProfile, true, nil +} + +// getSharedConfigFiles returns a slice of filenames set in the environment. +// +// Will return the filenames in the order of: +// * Shared Config +func (c EnvConfig) getSharedConfigFiles(context.Context) ([]string, bool, error) { + var files []string + if v := c.SharedConfigFile; len(v) > 0 { + files = append(files, v) + } + + if len(files) == 0 { + return nil, false, nil + } + return files, true, nil +} + +// getSharedCredentialsFiles returns a slice of filenames set in the environment. +// +// Will return the filenames in the order of: +// * Shared Credentials +func (c EnvConfig) getSharedCredentialsFiles(context.Context) ([]string, bool, error) { + var files []string + if v := c.SharedCredentialsFile; len(v) > 0 { + files = append(files, v) + } + if len(files) == 0 { + return nil, false, nil + } + return files, true, nil +} + +// GetCustomCABundle returns the custom CA bundle's PEM bytes if the file was +func (c EnvConfig) getCustomCABundle(context.Context) (io.Reader, bool, error) { + if len(c.CustomCABundle) == 0 { + return nil, false, nil + } + + b, err := ioutil.ReadFile(c.CustomCABundle) + if err != nil { + return nil, false, err + } + return bytes.NewReader(b), true, nil +} + +// GetIgnoreConfiguredEndpoints is used in knowing when to disable configured +// endpoints feature. +func (c EnvConfig) GetIgnoreConfiguredEndpoints(context.Context) (bool, bool, error) { + if c.IgnoreConfiguredEndpoints == nil { + return false, false, nil + } + + return *c.IgnoreConfiguredEndpoints, true, nil +} + +func (c EnvConfig) getBaseEndpoint(context.Context) (string, bool, error) { + return c.BaseEndpoint, len(c.BaseEndpoint) > 0, nil +} + +// GetServiceBaseEndpoint is used to retrieve a normalized SDK ID for use +// with configured endpoints. +func (c EnvConfig) GetServiceBaseEndpoint(ctx context.Context, sdkID string) (string, bool, error) { + if endpt := os.Getenv(fmt.Sprintf("%s_%s", awsEndpointURL, normalizeEnv(sdkID))); endpt != "" { + return endpt, true, nil + } + return "", false, nil +} + +func normalizeEnv(sdkID string) string { + upper := strings.ToUpper(sdkID) + return strings.ReplaceAll(upper, " ", "_") +} + +// GetS3UseARNRegion returns whether to allow ARNs to direct the region +// the S3 client's requests are sent to. +func (c EnvConfig) GetS3UseARNRegion(ctx context.Context) (value, ok bool, err error) { + if c.S3UseARNRegion == nil { + return false, false, nil + } + + return *c.S3UseARNRegion, true, nil +} + +// GetS3DisableMultiRegionAccessPoints returns whether to disable multi-region access point +// support for the S3 client. +func (c EnvConfig) GetS3DisableMultiRegionAccessPoints(ctx context.Context) (value, ok bool, err error) { + if c.S3DisableMultiRegionAccessPoints == nil { + return false, false, nil + } + + return *c.S3DisableMultiRegionAccessPoints, true, nil +} + +// GetUseDualStackEndpoint returns whether the service's dual-stack endpoint should be +// used for requests. +func (c EnvConfig) GetUseDualStackEndpoint(ctx context.Context) (value aws.DualStackEndpointState, found bool, err error) { + if c.UseDualStackEndpoint == aws.DualStackEndpointStateUnset { + return aws.DualStackEndpointStateUnset, false, nil + } + + return c.UseDualStackEndpoint, true, nil +} + +// GetUseFIPSEndpoint returns whether the service's FIPS endpoint should be +// used for requests. +func (c EnvConfig) GetUseFIPSEndpoint(ctx context.Context) (value aws.FIPSEndpointState, found bool, err error) { + if c.UseFIPSEndpoint == aws.FIPSEndpointStateUnset { + return aws.FIPSEndpointStateUnset, false, nil + } + + return c.UseFIPSEndpoint, true, nil +} + +func setStringFromEnvVal(dst *string, keys []string) { + for _, k := range keys { + if v := os.Getenv(k); len(v) > 0 { + *dst = v + break + } + } +} + +func setIntFromEnvVal(dst *int, keys []string) error { + for _, k := range keys { + if v := os.Getenv(k); len(v) > 0 { + i, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("invalid value %s=%s, %w", k, v, err) + } + *dst = int(i) + break + } + } + + return nil +} + +func setBoolPtrFromEnvVal(dst **bool, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue + } + + if *dst == nil { + *dst = new(bool) + } + + switch { + case strings.EqualFold(value, "false"): + **dst = false + case strings.EqualFold(value, "true"): + **dst = true + default: + return fmt.Errorf( + "invalid value for environment variable, %s=%s, need true or false", + k, value) + } + break + } + + return nil +} + +func setEndpointDiscoveryTypeFromEnvVal(dst *aws.EndpointDiscoveryEnableState, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue // skip if empty + } + + switch { + case strings.EqualFold(value, endpointDiscoveryDisabled): + *dst = aws.EndpointDiscoveryDisabled + case strings.EqualFold(value, endpointDiscoveryEnabled): + *dst = aws.EndpointDiscoveryEnabled + case strings.EqualFold(value, endpointDiscoveryAuto): + *dst = aws.EndpointDiscoveryAuto + default: + return fmt.Errorf( + "invalid value for environment variable, %s=%s, need true, false or auto", + k, value) + } + } + return nil +} + +func setUseDualStackEndpointFromEnvVal(dst *aws.DualStackEndpointState, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue // skip if empty + } + + switch { + case strings.EqualFold(value, "true"): + *dst = aws.DualStackEndpointStateEnabled + case strings.EqualFold(value, "false"): + *dst = aws.DualStackEndpointStateDisabled + default: + return fmt.Errorf( + "invalid value for environment variable, %s=%s, need true, false", + k, value) + } + } + return nil +} + +func setUseFIPSEndpointFromEnvVal(dst *aws.FIPSEndpointState, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue // skip if empty + } + + switch { + case strings.EqualFold(value, "true"): + *dst = aws.FIPSEndpointStateEnabled + case strings.EqualFold(value, "false"): + *dst = aws.FIPSEndpointStateDisabled + default: + return fmt.Errorf( + "invalid value for environment variable, %s=%s, need true, false", + k, value) + } + } + return nil +} + +// GetEnableEndpointDiscovery returns resolved value for EnableEndpointDiscovery env variable setting. +func (c EnvConfig) GetEnableEndpointDiscovery(ctx context.Context) (value aws.EndpointDiscoveryEnableState, found bool, err error) { + if c.EnableEndpointDiscovery == aws.EndpointDiscoveryUnset { + return aws.EndpointDiscoveryUnset, false, nil + } + + return c.EnableEndpointDiscovery, true, nil +} + +// GetEC2IMDSClientEnableState implements a EC2IMDSClientEnableState options resolver interface. +func (c EnvConfig) GetEC2IMDSClientEnableState() (imds.ClientEnableState, bool, error) { + if c.EC2IMDSClientEnableState == imds.ClientDefaultEnableState { + return imds.ClientDefaultEnableState, false, nil + } + + return c.EC2IMDSClientEnableState, true, nil +} + +// GetEC2IMDSEndpointMode implements a EC2IMDSEndpointMode option resolver interface. +func (c EnvConfig) GetEC2IMDSEndpointMode() (imds.EndpointModeState, bool, error) { + if c.EC2IMDSEndpointMode == imds.EndpointModeStateUnset { + return imds.EndpointModeStateUnset, false, nil + } + + return c.EC2IMDSEndpointMode, true, nil +} + +// GetEC2IMDSEndpoint implements a EC2IMDSEndpoint option resolver interface. +func (c EnvConfig) GetEC2IMDSEndpoint() (string, bool, error) { + if len(c.EC2IMDSEndpoint) == 0 { + return "", false, nil + } + + return c.EC2IMDSEndpoint, true, nil +} + +// GetEC2IMDSV1FallbackDisabled implements an EC2IMDSV1FallbackDisabled option +// resolver interface. +func (c EnvConfig) GetEC2IMDSV1FallbackDisabled() (bool, bool) { + if c.EC2IMDSv1Disabled == nil { + return false, false + } + + return *c.EC2IMDSv1Disabled, true +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/generate.go b/vendor/github.com/aws/aws-sdk-go-v2/config/generate.go new file mode 100644 index 00000000000..654a7a77fb7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/generate.go @@ -0,0 +1,4 @@ +package config + +//go:generate go run -tags codegen ./codegen -output=provider_assert_test.go +//go:generate gofmt -s -w ./ diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go new file mode 100644 index 00000000000..1bdac9754d8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package config + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.24.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go b/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go new file mode 100644 index 00000000000..7480bb45ed1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go @@ -0,0 +1,1046 @@ +package config + +import ( + "context" + "io" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go-v2/credentials/endpointcreds" + "github.com/aws/aws-sdk-go-v2/credentials/processcreds" + "github.com/aws/aws-sdk-go-v2/credentials/ssocreds" + "github.com/aws/aws-sdk-go-v2/credentials/stscreds" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" + smithybearer "github.com/aws/smithy-go/auth/bearer" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" +) + +// LoadOptionsFunc is a type alias for LoadOptions functional option +type LoadOptionsFunc func(*LoadOptions) error + +// LoadOptions are discrete set of options that are valid for loading the +// configuration +type LoadOptions struct { + + // Region is the region to send requests to. + Region string + + // Credentials object to use when signing requests. + Credentials aws.CredentialsProvider + + // Token provider for authentication operations with bearer authentication. + BearerAuthTokenProvider smithybearer.TokenProvider + + // HTTPClient the SDK's API clients will use to invoke HTTP requests. + HTTPClient HTTPClient + + // EndpointResolver that can be used to provide or override an endpoint for + // the given service and region. + // + // See the `aws.EndpointResolver` documentation on usage. + // + // Deprecated: See EndpointResolverWithOptions + EndpointResolver aws.EndpointResolver + + // EndpointResolverWithOptions that can be used to provide or override an + // endpoint for the given service and region. + // + // See the `aws.EndpointResolverWithOptions` documentation on usage. + EndpointResolverWithOptions aws.EndpointResolverWithOptions + + // RetryMaxAttempts specifies the maximum number attempts an API client + // will call an operation that fails with a retryable error. + // + // This value will only be used if Retryer option is nil. + RetryMaxAttempts int + + // RetryMode specifies the retry model the API client will be created with. + // + // This value will only be used if Retryer option is nil. + RetryMode aws.RetryMode + + // Retryer is a function that provides a Retryer implementation. A Retryer + // guides how HTTP requests should be retried in case of recoverable + // failures. + // + // If not nil, RetryMaxAttempts, and RetryMode will be ignored. + Retryer func() aws.Retryer + + // APIOptions provides the set of middleware mutations modify how the API + // client requests will be handled. This is useful for adding additional + // tracing data to a request, or changing behavior of the SDK's client. + APIOptions []func(*middleware.Stack) error + + // Logger writer interface to write logging messages to. + Logger logging.Logger + + // ClientLogMode is used to configure the events that will be sent to the + // configured logger. This can be used to configure the logging of signing, + // retries, request, and responses of the SDK clients. + // + // See the ClientLogMode type documentation for the complete set of logging + // modes and available configuration. + ClientLogMode *aws.ClientLogMode + + // SharedConfigProfile is the profile to be used when loading the SharedConfig + SharedConfigProfile string + + // SharedConfigFiles is the slice of custom shared config files to use when + // loading the SharedConfig. A non-default profile used within config file + // must have name defined with prefix 'profile '. eg [profile xyz] + // indicates a profile with name 'xyz'. To read more on the format of the + // config file, please refer the documentation at + // https://docs.aws.amazon.com/credref/latest/refdocs/file-format.html#file-format-config + // + // If duplicate profiles are provided within the same, or across multiple + // shared config files, the next parsed profile will override only the + // properties that conflict with the previously defined profile. Note that + // if duplicate profiles are provided within the SharedCredentialsFiles and + // SharedConfigFiles, the properties defined in shared credentials file + // take precedence. + SharedConfigFiles []string + + // SharedCredentialsFile is the slice of custom shared credentials files to + // use when loading the SharedConfig. The profile name used within + // credentials file must not prefix 'profile '. eg [xyz] indicates a + // profile with name 'xyz'. Profile declared as [profile xyz] will be + // ignored. To read more on the format of the credentials file, please + // refer the documentation at + // https://docs.aws.amazon.com/credref/latest/refdocs/file-format.html#file-format-creds + // + // If duplicate profiles are provided with a same, or across multiple + // shared credentials files, the next parsed profile will override only + // properties that conflict with the previously defined profile. Note that + // if duplicate profiles are provided within the SharedCredentialsFiles and + // SharedConfigFiles, the properties defined in shared credentials file + // take precedence. + SharedCredentialsFiles []string + + // CustomCABundle is CA bundle PEM bytes reader + CustomCABundle io.Reader + + // DefaultRegion is the fall back region, used if a region was not resolved + // from other sources + DefaultRegion string + + // UseEC2IMDSRegion indicates if SDK should retrieve the region + // from the EC2 Metadata service + UseEC2IMDSRegion *UseEC2IMDSRegion + + // CredentialsCacheOptions is a function for setting the + // aws.CredentialsCacheOptions + CredentialsCacheOptions func(*aws.CredentialsCacheOptions) + + // BearerAuthTokenCacheOptions is a function for setting the smithy-go + // auth/bearer#TokenCacheOptions + BearerAuthTokenCacheOptions func(*smithybearer.TokenCacheOptions) + + // SSOTokenProviderOptions is a function for setting the + // credentials/ssocreds.SSOTokenProviderOptions + SSOTokenProviderOptions func(*ssocreds.SSOTokenProviderOptions) + + // ProcessCredentialOptions is a function for setting + // the processcreds.Options + ProcessCredentialOptions func(*processcreds.Options) + + // EC2RoleCredentialOptions is a function for setting + // the ec2rolecreds.Options + EC2RoleCredentialOptions func(*ec2rolecreds.Options) + + // EndpointCredentialOptions is a function for setting + // the endpointcreds.Options + EndpointCredentialOptions func(*endpointcreds.Options) + + // WebIdentityRoleCredentialOptions is a function for setting + // the stscreds.WebIdentityRoleOptions + WebIdentityRoleCredentialOptions func(*stscreds.WebIdentityRoleOptions) + + // AssumeRoleCredentialOptions is a function for setting the + // stscreds.AssumeRoleOptions + AssumeRoleCredentialOptions func(*stscreds.AssumeRoleOptions) + + // SSOProviderOptions is a function for setting + // the ssocreds.Options + SSOProviderOptions func(options *ssocreds.Options) + + // LogConfigurationWarnings when set to true, enables logging + // configuration warnings + LogConfigurationWarnings *bool + + // S3UseARNRegion specifies if the S3 service should allow ARNs to direct + // the region, the client's requests are sent to. + S3UseARNRegion *bool + + // S3DisableMultiRegionAccessPoints specifies if the S3 service should disable + // the S3 Multi-Region access points feature. + S3DisableMultiRegionAccessPoints *bool + + // EnableEndpointDiscovery specifies if endpoint discovery is enable for + // the client. + EnableEndpointDiscovery aws.EndpointDiscoveryEnableState + + // Specifies if the EC2 IMDS service client is enabled. + // + // AWS_EC2_METADATA_DISABLED=true + EC2IMDSClientEnableState imds.ClientEnableState + + // Specifies the EC2 Instance Metadata Service default endpoint selection + // mode (IPv4 or IPv6) + EC2IMDSEndpointMode imds.EndpointModeState + + // Specifies the EC2 Instance Metadata Service endpoint to use. If + // specified it overrides EC2IMDSEndpointMode. + EC2IMDSEndpoint string + + // Specifies that SDK clients must resolve a dual-stack endpoint for + // services. + UseDualStackEndpoint aws.DualStackEndpointState + + // Specifies that SDK clients must resolve a FIPS endpoint for + // services. + UseFIPSEndpoint aws.FIPSEndpointState + + // Specifies the SDK configuration mode for defaults. + DefaultsModeOptions DefaultsModeOptions + + // The sdk app ID retrieved from env var or shared config to be added to request user agent header + AppID string +} + +func (o LoadOptions) getDefaultsMode(ctx context.Context) (aws.DefaultsMode, bool, error) { + if len(o.DefaultsModeOptions.Mode) == 0 { + return "", false, nil + } + return o.DefaultsModeOptions.Mode, true, nil +} + +// GetRetryMaxAttempts returns the RetryMaxAttempts if specified in the +// LoadOptions and not 0. +func (o LoadOptions) GetRetryMaxAttempts(ctx context.Context) (int, bool, error) { + if o.RetryMaxAttempts == 0 { + return 0, false, nil + } + return o.RetryMaxAttempts, true, nil +} + +// GetRetryMode returns the RetryMode specified in the LoadOptions. +func (o LoadOptions) GetRetryMode(ctx context.Context) (aws.RetryMode, bool, error) { + if len(o.RetryMode) == 0 { + return "", false, nil + } + return o.RetryMode, true, nil +} + +func (o LoadOptions) getDefaultsModeIMDSClient(ctx context.Context) (*imds.Client, bool, error) { + if o.DefaultsModeOptions.IMDSClient == nil { + return nil, false, nil + } + return o.DefaultsModeOptions.IMDSClient, true, nil +} + +// getRegion returns Region from config's LoadOptions +func (o LoadOptions) getRegion(ctx context.Context) (string, bool, error) { + if len(o.Region) == 0 { + return "", false, nil + } + + return o.Region, true, nil +} + +// getAppID returns AppID from config's LoadOptions +func (o LoadOptions) getAppID(ctx context.Context) (string, bool, error) { + return o.AppID, len(o.AppID) > 0, nil +} + +// WithRegion is a helper function to construct functional options +// that sets Region on config's LoadOptions. Setting the region to +// an empty string, will result in the region value being ignored. +// If multiple WithRegion calls are made, the last call overrides +// the previous call values. +func WithRegion(v string) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.Region = v + return nil + } +} + +// WithAppID is a helper function to construct functional options +// that sets AppID on config's LoadOptions. +func WithAppID(ID string) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.AppID = ID + return nil + } +} + +// getDefaultRegion returns DefaultRegion from config's LoadOptions +func (o LoadOptions) getDefaultRegion(ctx context.Context) (string, bool, error) { + if len(o.DefaultRegion) == 0 { + return "", false, nil + } + + return o.DefaultRegion, true, nil +} + +// WithDefaultRegion is a helper function to construct functional options +// that sets a DefaultRegion on config's LoadOptions. Setting the default +// region to an empty string, will result in the default region value +// being ignored. If multiple WithDefaultRegion calls are made, the last +// call overrides the previous call values. Note that both WithRegion and +// WithEC2IMDSRegion call takes precedence over WithDefaultRegion call +// when resolving region. +func WithDefaultRegion(v string) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.DefaultRegion = v + return nil + } +} + +// getSharedConfigProfile returns SharedConfigProfile from config's LoadOptions +func (o LoadOptions) getSharedConfigProfile(ctx context.Context) (string, bool, error) { + if len(o.SharedConfigProfile) == 0 { + return "", false, nil + } + + return o.SharedConfigProfile, true, nil +} + +// WithSharedConfigProfile is a helper function to construct functional options +// that sets SharedConfigProfile on config's LoadOptions. Setting the shared +// config profile to an empty string, will result in the shared config profile +// value being ignored. +// If multiple WithSharedConfigProfile calls are made, the last call overrides +// the previous call values. +func WithSharedConfigProfile(v string) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.SharedConfigProfile = v + return nil + } +} + +// getSharedConfigFiles returns SharedConfigFiles set on config's LoadOptions +func (o LoadOptions) getSharedConfigFiles(ctx context.Context) ([]string, bool, error) { + if o.SharedConfigFiles == nil { + return nil, false, nil + } + + return o.SharedConfigFiles, true, nil +} + +// WithSharedConfigFiles is a helper function to construct functional options +// that sets slice of SharedConfigFiles on config's LoadOptions. +// Setting the shared config files to an nil string slice, will result in the +// shared config files value being ignored. +// If multiple WithSharedConfigFiles calls are made, the last call overrides +// the previous call values. +func WithSharedConfigFiles(v []string) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.SharedConfigFiles = v + return nil + } +} + +// getSharedCredentialsFiles returns SharedCredentialsFiles set on config's LoadOptions +func (o LoadOptions) getSharedCredentialsFiles(ctx context.Context) ([]string, bool, error) { + if o.SharedCredentialsFiles == nil { + return nil, false, nil + } + + return o.SharedCredentialsFiles, true, nil +} + +// WithSharedCredentialsFiles is a helper function to construct functional options +// that sets slice of SharedCredentialsFiles on config's LoadOptions. +// Setting the shared credentials files to an nil string slice, will result in the +// shared credentials files value being ignored. +// If multiple WithSharedCredentialsFiles calls are made, the last call overrides +// the previous call values. +func WithSharedCredentialsFiles(v []string) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.SharedCredentialsFiles = v + return nil + } +} + +// getCustomCABundle returns CustomCABundle from LoadOptions +func (o LoadOptions) getCustomCABundle(ctx context.Context) (io.Reader, bool, error) { + if o.CustomCABundle == nil { + return nil, false, nil + } + + return o.CustomCABundle, true, nil +} + +// WithCustomCABundle is a helper function to construct functional options +// that sets CustomCABundle on config's LoadOptions. Setting the custom CA Bundle +// to nil will result in custom CA Bundle value being ignored. +// If multiple WithCustomCABundle calls are made, the last call overrides the +// previous call values. +func WithCustomCABundle(v io.Reader) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.CustomCABundle = v + return nil + } +} + +// UseEC2IMDSRegion provides a regionProvider that retrieves the region +// from the EC2 Metadata service. +type UseEC2IMDSRegion struct { + // If unset will default to generic EC2 IMDS client. + Client *imds.Client +} + +// getRegion attempts to retrieve the region from EC2 Metadata service. +func (p *UseEC2IMDSRegion) getRegion(ctx context.Context) (string, bool, error) { + if ctx == nil { + ctx = context.Background() + } + + client := p.Client + if client == nil { + client = imds.New(imds.Options{}) + } + + result, err := client.GetRegion(ctx, nil) + if err != nil { + return "", false, err + } + if len(result.Region) != 0 { + return result.Region, true, nil + } + return "", false, nil +} + +// getEC2IMDSRegion returns the value of EC2 IMDS region. +func (o LoadOptions) getEC2IMDSRegion(ctx context.Context) (string, bool, error) { + if o.UseEC2IMDSRegion == nil { + return "", false, nil + } + + return o.UseEC2IMDSRegion.getRegion(ctx) +} + +// WithEC2IMDSRegion is a helper function to construct functional options +// that enables resolving EC2IMDS region. The function takes +// in a UseEC2IMDSRegion functional option, and can be used to set the +// EC2IMDS client which will be used to resolve EC2IMDSRegion. +// If no functional option is provided, an EC2IMDS client is built and used +// by the resolver. If multiple WithEC2IMDSRegion calls are made, the last +// call overrides the previous call values. Note that the WithRegion calls takes +// precedence over WithEC2IMDSRegion when resolving region. +func WithEC2IMDSRegion(fnOpts ...func(o *UseEC2IMDSRegion)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.UseEC2IMDSRegion = &UseEC2IMDSRegion{} + + for _, fn := range fnOpts { + fn(o.UseEC2IMDSRegion) + } + return nil + } +} + +// getCredentialsProvider returns the credentials value +func (o LoadOptions) getCredentialsProvider(ctx context.Context) (aws.CredentialsProvider, bool, error) { + if o.Credentials == nil { + return nil, false, nil + } + + return o.Credentials, true, nil +} + +// WithCredentialsProvider is a helper function to construct functional options +// that sets Credential provider value on config's LoadOptions. If credentials +// provider is set to nil, the credentials provider value will be ignored. +// If multiple WithCredentialsProvider calls are made, the last call overrides +// the previous call values. +func WithCredentialsProvider(v aws.CredentialsProvider) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.Credentials = v + return nil + } +} + +// getCredentialsCacheOptionsProvider returns the wrapped function to set aws.CredentialsCacheOptions +func (o LoadOptions) getCredentialsCacheOptions(ctx context.Context) (func(*aws.CredentialsCacheOptions), bool, error) { + if o.CredentialsCacheOptions == nil { + return nil, false, nil + } + + return o.CredentialsCacheOptions, true, nil +} + +// WithCredentialsCacheOptions is a helper function to construct functional +// options that sets a function to modify the aws.CredentialsCacheOptions the +// aws.CredentialsCache will be configured with, if the CredentialsCache is used +// by the configuration loader. +// +// If multiple WithCredentialsCacheOptions calls are made, the last call +// overrides the previous call values. +func WithCredentialsCacheOptions(v func(*aws.CredentialsCacheOptions)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.CredentialsCacheOptions = v + return nil + } +} + +// getBearerAuthTokenProvider returns the credentials value +func (o LoadOptions) getBearerAuthTokenProvider(ctx context.Context) (smithybearer.TokenProvider, bool, error) { + if o.BearerAuthTokenProvider == nil { + return nil, false, nil + } + + return o.BearerAuthTokenProvider, true, nil +} + +// WithBearerAuthTokenProvider is a helper function to construct functional options +// that sets Credential provider value on config's LoadOptions. If credentials +// provider is set to nil, the credentials provider value will be ignored. +// If multiple WithBearerAuthTokenProvider calls are made, the last call overrides +// the previous call values. +func WithBearerAuthTokenProvider(v smithybearer.TokenProvider) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.BearerAuthTokenProvider = v + return nil + } +} + +// getBearerAuthTokenCacheOptionsProvider returns the wrapped function to set smithybearer.TokenCacheOptions +func (o LoadOptions) getBearerAuthTokenCacheOptions(ctx context.Context) (func(*smithybearer.TokenCacheOptions), bool, error) { + if o.BearerAuthTokenCacheOptions == nil { + return nil, false, nil + } + + return o.BearerAuthTokenCacheOptions, true, nil +} + +// WithBearerAuthTokenCacheOptions is a helper function to construct functional options +// that sets a function to modify the TokenCacheOptions the smithy-go +// auth/bearer#TokenCache will be configured with, if the TokenCache is used by +// the configuration loader. +// +// If multiple WithBearerAuthTokenCacheOptions calls are made, the last call overrides +// the previous call values. +func WithBearerAuthTokenCacheOptions(v func(*smithybearer.TokenCacheOptions)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.BearerAuthTokenCacheOptions = v + return nil + } +} + +// getSSOTokenProviderOptionsProvider returns the wrapped function to set smithybearer.TokenCacheOptions +func (o LoadOptions) getSSOTokenProviderOptions(ctx context.Context) (func(*ssocreds.SSOTokenProviderOptions), bool, error) { + if o.SSOTokenProviderOptions == nil { + return nil, false, nil + } + + return o.SSOTokenProviderOptions, true, nil +} + +// WithSSOTokenProviderOptions is a helper function to construct functional +// options that sets a function to modify the SSOtokenProviderOptions the SDK's +// credentials/ssocreds#SSOProvider will be configured with, if the +// SSOTokenProvider is used by the configuration loader. +// +// If multiple WithSSOTokenProviderOptions calls are made, the last call overrides +// the previous call values. +func WithSSOTokenProviderOptions(v func(*ssocreds.SSOTokenProviderOptions)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.SSOTokenProviderOptions = v + return nil + } +} + +// getProcessCredentialOptions returns the wrapped function to set processcreds.Options +func (o LoadOptions) getProcessCredentialOptions(ctx context.Context) (func(*processcreds.Options), bool, error) { + if o.ProcessCredentialOptions == nil { + return nil, false, nil + } + + return o.ProcessCredentialOptions, true, nil +} + +// WithProcessCredentialOptions is a helper function to construct functional options +// that sets a function to use processcreds.Options on config's LoadOptions. +// If process credential options is set to nil, the process credential value will +// be ignored. If multiple WithProcessCredentialOptions calls are made, the last call +// overrides the previous call values. +func WithProcessCredentialOptions(v func(*processcreds.Options)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.ProcessCredentialOptions = v + return nil + } +} + +// getEC2RoleCredentialOptions returns the wrapped function to set the ec2rolecreds.Options +func (o LoadOptions) getEC2RoleCredentialOptions(ctx context.Context) (func(*ec2rolecreds.Options), bool, error) { + if o.EC2RoleCredentialOptions == nil { + return nil, false, nil + } + + return o.EC2RoleCredentialOptions, true, nil +} + +// WithEC2RoleCredentialOptions is a helper function to construct functional options +// that sets a function to use ec2rolecreds.Options on config's LoadOptions. If +// EC2 role credential options is set to nil, the EC2 role credential options value +// will be ignored. If multiple WithEC2RoleCredentialOptions calls are made, +// the last call overrides the previous call values. +func WithEC2RoleCredentialOptions(v func(*ec2rolecreds.Options)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.EC2RoleCredentialOptions = v + return nil + } +} + +// getEndpointCredentialOptions returns the wrapped function to set endpointcreds.Options +func (o LoadOptions) getEndpointCredentialOptions(context.Context) (func(*endpointcreds.Options), bool, error) { + if o.EndpointCredentialOptions == nil { + return nil, false, nil + } + + return o.EndpointCredentialOptions, true, nil +} + +// WithEndpointCredentialOptions is a helper function to construct functional options +// that sets a function to use endpointcreds.Options on config's LoadOptions. If +// endpoint credential options is set to nil, the endpoint credential options +// value will be ignored. If multiple WithEndpointCredentialOptions calls are made, +// the last call overrides the previous call values. +func WithEndpointCredentialOptions(v func(*endpointcreds.Options)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.EndpointCredentialOptions = v + return nil + } +} + +// getWebIdentityRoleCredentialOptions returns the wrapped function +func (o LoadOptions) getWebIdentityRoleCredentialOptions(context.Context) (func(*stscreds.WebIdentityRoleOptions), bool, error) { + if o.WebIdentityRoleCredentialOptions == nil { + return nil, false, nil + } + + return o.WebIdentityRoleCredentialOptions, true, nil +} + +// WithWebIdentityRoleCredentialOptions is a helper function to construct +// functional options that sets a function to use stscreds.WebIdentityRoleOptions +// on config's LoadOptions. If web identity role credentials options is set to nil, +// the web identity role credentials value will be ignored. If multiple +// WithWebIdentityRoleCredentialOptions calls are made, the last call +// overrides the previous call values. +func WithWebIdentityRoleCredentialOptions(v func(*stscreds.WebIdentityRoleOptions)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.WebIdentityRoleCredentialOptions = v + return nil + } +} + +// getAssumeRoleCredentialOptions returns AssumeRoleCredentialOptions from LoadOptions +func (o LoadOptions) getAssumeRoleCredentialOptions(context.Context) (func(options *stscreds.AssumeRoleOptions), bool, error) { + if o.AssumeRoleCredentialOptions == nil { + return nil, false, nil + } + + return o.AssumeRoleCredentialOptions, true, nil +} + +// WithAssumeRoleCredentialOptions is a helper function to construct +// functional options that sets a function to use stscreds.AssumeRoleOptions +// on config's LoadOptions. If assume role credentials options is set to nil, +// the assume role credentials value will be ignored. If multiple +// WithAssumeRoleCredentialOptions calls are made, the last call overrides +// the previous call values. +func WithAssumeRoleCredentialOptions(v func(*stscreds.AssumeRoleOptions)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.AssumeRoleCredentialOptions = v + return nil + } +} + +func (o LoadOptions) getHTTPClient(ctx context.Context) (HTTPClient, bool, error) { + if o.HTTPClient == nil { + return nil, false, nil + } + + return o.HTTPClient, true, nil +} + +// WithHTTPClient is a helper function to construct functional options +// that sets HTTPClient on LoadOptions. If HTTPClient is set to nil, +// the HTTPClient value will be ignored. +// If multiple WithHTTPClient calls are made, the last call overrides +// the previous call values. +func WithHTTPClient(v HTTPClient) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.HTTPClient = v + return nil + } +} + +func (o LoadOptions) getAPIOptions(ctx context.Context) ([]func(*middleware.Stack) error, bool, error) { + if o.APIOptions == nil { + return nil, false, nil + } + + return o.APIOptions, true, nil +} + +// WithAPIOptions is a helper function to construct functional options +// that sets APIOptions on LoadOptions. If APIOptions is set to nil, the +// APIOptions value is ignored. If multiple WithAPIOptions calls are +// made, the last call overrides the previous call values. +func WithAPIOptions(v []func(*middleware.Stack) error) LoadOptionsFunc { + return func(o *LoadOptions) error { + if v == nil { + return nil + } + + o.APIOptions = append(o.APIOptions, v...) + return nil + } +} + +func (o LoadOptions) getRetryMaxAttempts(ctx context.Context) (int, bool, error) { + if o.RetryMaxAttempts == 0 { + return 0, false, nil + } + + return o.RetryMaxAttempts, true, nil +} + +// WithRetryMaxAttempts is a helper function to construct functional options that sets +// RetryMaxAttempts on LoadOptions. If RetryMaxAttempts is unset, the RetryMaxAttempts value is +// ignored. If multiple WithRetryMaxAttempts calls are made, the last call overrides +// the previous call values. +// +// Will be ignored of LoadOptions.Retryer or WithRetryer are used. +func WithRetryMaxAttempts(v int) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.RetryMaxAttempts = v + return nil + } +} + +func (o LoadOptions) getRetryMode(ctx context.Context) (aws.RetryMode, bool, error) { + if o.RetryMode == "" { + return "", false, nil + } + + return o.RetryMode, true, nil +} + +// WithRetryMode is a helper function to construct functional options that sets +// RetryMode on LoadOptions. If RetryMode is unset, the RetryMode value is +// ignored. If multiple WithRetryMode calls are made, the last call overrides +// the previous call values. +// +// Will be ignored of LoadOptions.Retryer or WithRetryer are used. +func WithRetryMode(v aws.RetryMode) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.RetryMode = v + return nil + } +} + +func (o LoadOptions) getRetryer(ctx context.Context) (func() aws.Retryer, bool, error) { + if o.Retryer == nil { + return nil, false, nil + } + + return o.Retryer, true, nil +} + +// WithRetryer is a helper function to construct functional options +// that sets Retryer on LoadOptions. If Retryer is set to nil, the +// Retryer value is ignored. If multiple WithRetryer calls are +// made, the last call overrides the previous call values. +func WithRetryer(v func() aws.Retryer) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.Retryer = v + return nil + } +} + +func (o LoadOptions) getEndpointResolver(ctx context.Context) (aws.EndpointResolver, bool, error) { + if o.EndpointResolver == nil { + return nil, false, nil + } + + return o.EndpointResolver, true, nil +} + +// WithEndpointResolver is a helper function to construct functional options +// that sets the EndpointResolver on LoadOptions. If the EndpointResolver is set to nil, +// the EndpointResolver value is ignored. If multiple WithEndpointResolver calls +// are made, the last call overrides the previous call values. +// +// Deprecated: See WithEndpointResolverWithOptions +func WithEndpointResolver(v aws.EndpointResolver) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.EndpointResolver = v + return nil + } +} + +func (o LoadOptions) getEndpointResolverWithOptions(ctx context.Context) (aws.EndpointResolverWithOptions, bool, error) { + if o.EndpointResolverWithOptions == nil { + return nil, false, nil + } + + return o.EndpointResolverWithOptions, true, nil +} + +// WithEndpointResolverWithOptions is a helper function to construct functional options +// that sets the EndpointResolverWithOptions on LoadOptions. If the EndpointResolverWithOptions is set to nil, +// the EndpointResolver value is ignored. If multiple WithEndpointResolver calls +// are made, the last call overrides the previous call values. +func WithEndpointResolverWithOptions(v aws.EndpointResolverWithOptions) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.EndpointResolverWithOptions = v + return nil + } +} + +func (o LoadOptions) getLogger(ctx context.Context) (logging.Logger, bool, error) { + if o.Logger == nil { + return nil, false, nil + } + + return o.Logger, true, nil +} + +// WithLogger is a helper function to construct functional options +// that sets Logger on LoadOptions. If Logger is set to nil, the +// Logger value will be ignored. If multiple WithLogger calls are made, +// the last call overrides the previous call values. +func WithLogger(v logging.Logger) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.Logger = v + return nil + } +} + +func (o LoadOptions) getClientLogMode(ctx context.Context) (aws.ClientLogMode, bool, error) { + if o.ClientLogMode == nil { + return 0, false, nil + } + + return *o.ClientLogMode, true, nil +} + +// WithClientLogMode is a helper function to construct functional options +// that sets client log mode on LoadOptions. If client log mode is set to nil, +// the client log mode value will be ignored. If multiple WithClientLogMode calls are made, +// the last call overrides the previous call values. +func WithClientLogMode(v aws.ClientLogMode) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.ClientLogMode = &v + return nil + } +} + +func (o LoadOptions) getLogConfigurationWarnings(ctx context.Context) (v bool, found bool, err error) { + if o.LogConfigurationWarnings == nil { + return false, false, nil + } + return *o.LogConfigurationWarnings, true, nil +} + +// WithLogConfigurationWarnings is a helper function to construct +// functional options that can be used to set LogConfigurationWarnings +// on LoadOptions. +// +// If multiple WithLogConfigurationWarnings calls are made, the last call +// overrides the previous call values. +func WithLogConfigurationWarnings(v bool) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.LogConfigurationWarnings = &v + return nil + } +} + +// GetS3UseARNRegion returns whether to allow ARNs to direct the region +// the S3 client's requests are sent to. +func (o LoadOptions) GetS3UseARNRegion(ctx context.Context) (v bool, found bool, err error) { + if o.S3UseARNRegion == nil { + return false, false, nil + } + return *o.S3UseARNRegion, true, nil +} + +// WithS3UseARNRegion is a helper function to construct functional options +// that can be used to set S3UseARNRegion on LoadOptions. +// If multiple WithS3UseARNRegion calls are made, the last call overrides +// the previous call values. +func WithS3UseARNRegion(v bool) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.S3UseARNRegion = &v + return nil + } +} + +// GetS3DisableMultiRegionAccessPoints returns whether to disable +// the S3 multi-region access points feature. +func (o LoadOptions) GetS3DisableMultiRegionAccessPoints(ctx context.Context) (v bool, found bool, err error) { + if o.S3DisableMultiRegionAccessPoints == nil { + return false, false, nil + } + return *o.S3DisableMultiRegionAccessPoints, true, nil +} + +// WithS3DisableMultiRegionAccessPoints is a helper function to construct functional options +// that can be used to set S3DisableMultiRegionAccessPoints on LoadOptions. +// If multiple WithS3DisableMultiRegionAccessPoints calls are made, the last call overrides +// the previous call values. +func WithS3DisableMultiRegionAccessPoints(v bool) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.S3DisableMultiRegionAccessPoints = &v + return nil + } +} + +// GetEnableEndpointDiscovery returns if the EnableEndpointDiscovery flag is set. +func (o LoadOptions) GetEnableEndpointDiscovery(ctx context.Context) (value aws.EndpointDiscoveryEnableState, ok bool, err error) { + if o.EnableEndpointDiscovery == aws.EndpointDiscoveryUnset { + return aws.EndpointDiscoveryUnset, false, nil + } + return o.EnableEndpointDiscovery, true, nil +} + +// WithEndpointDiscovery is a helper function to construct functional options +// that can be used to enable endpoint discovery on LoadOptions for supported clients. +// If multiple WithEndpointDiscovery calls are made, the last call overrides +// the previous call values. +func WithEndpointDiscovery(v aws.EndpointDiscoveryEnableState) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.EnableEndpointDiscovery = v + return nil + } +} + +// getSSOProviderOptions returns AssumeRoleCredentialOptions from LoadOptions +func (o LoadOptions) getSSOProviderOptions(context.Context) (func(options *ssocreds.Options), bool, error) { + if o.SSOProviderOptions == nil { + return nil, false, nil + } + + return o.SSOProviderOptions, true, nil +} + +// WithSSOProviderOptions is a helper function to construct +// functional options that sets a function to use ssocreds.Options +// on config's LoadOptions. If the SSO credential provider options is set to nil, +// the sso provider options value will be ignored. If multiple +// WithSSOProviderOptions calls are made, the last call overrides +// the previous call values. +func WithSSOProviderOptions(v func(*ssocreds.Options)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.SSOProviderOptions = v + return nil + } +} + +// GetEC2IMDSClientEnableState implements a EC2IMDSClientEnableState options resolver interface. +func (o LoadOptions) GetEC2IMDSClientEnableState() (imds.ClientEnableState, bool, error) { + if o.EC2IMDSClientEnableState == imds.ClientDefaultEnableState { + return imds.ClientDefaultEnableState, false, nil + } + + return o.EC2IMDSClientEnableState, true, nil +} + +// GetEC2IMDSEndpointMode implements a EC2IMDSEndpointMode option resolver interface. +func (o LoadOptions) GetEC2IMDSEndpointMode() (imds.EndpointModeState, bool, error) { + if o.EC2IMDSEndpointMode == imds.EndpointModeStateUnset { + return imds.EndpointModeStateUnset, false, nil + } + + return o.EC2IMDSEndpointMode, true, nil +} + +// GetEC2IMDSEndpoint implements a EC2IMDSEndpoint option resolver interface. +func (o LoadOptions) GetEC2IMDSEndpoint() (string, bool, error) { + if len(o.EC2IMDSEndpoint) == 0 { + return "", false, nil + } + + return o.EC2IMDSEndpoint, true, nil +} + +// WithEC2IMDSClientEnableState is a helper function to construct functional options that sets the EC2IMDSClientEnableState. +func WithEC2IMDSClientEnableState(v imds.ClientEnableState) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.EC2IMDSClientEnableState = v + return nil + } +} + +// WithEC2IMDSEndpointMode is a helper function to construct functional options that sets the EC2IMDSEndpointMode. +func WithEC2IMDSEndpointMode(v imds.EndpointModeState) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.EC2IMDSEndpointMode = v + return nil + } +} + +// WithEC2IMDSEndpoint is a helper function to construct functional options that sets the EC2IMDSEndpoint. +func WithEC2IMDSEndpoint(v string) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.EC2IMDSEndpoint = v + return nil + } +} + +// WithUseDualStackEndpoint is a helper function to construct +// functional options that can be used to set UseDualStackEndpoint on LoadOptions. +func WithUseDualStackEndpoint(v aws.DualStackEndpointState) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.UseDualStackEndpoint = v + return nil + } +} + +// GetUseDualStackEndpoint returns whether the service's dual-stack endpoint should be +// used for requests. +func (o LoadOptions) GetUseDualStackEndpoint(ctx context.Context) (value aws.DualStackEndpointState, found bool, err error) { + if o.UseDualStackEndpoint == aws.DualStackEndpointStateUnset { + return aws.DualStackEndpointStateUnset, false, nil + } + return o.UseDualStackEndpoint, true, nil +} + +// WithUseFIPSEndpoint is a helper function to construct +// functional options that can be used to set UseFIPSEndpoint on LoadOptions. +func WithUseFIPSEndpoint(v aws.FIPSEndpointState) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.UseFIPSEndpoint = v + return nil + } +} + +// GetUseFIPSEndpoint returns whether the service's FIPS endpoint should be +// used for requests. +func (o LoadOptions) GetUseFIPSEndpoint(ctx context.Context) (value aws.FIPSEndpointState, found bool, err error) { + if o.UseFIPSEndpoint == aws.FIPSEndpointStateUnset { + return aws.FIPSEndpointStateUnset, false, nil + } + return o.UseFIPSEndpoint, true, nil +} + +// WithDefaultsMode sets the SDK defaults configuration mode to the value provided. +// +// Zero or more functional options can be provided to provide configuration options for performing +// environment discovery when using aws.DefaultsModeAuto. +func WithDefaultsMode(mode aws.DefaultsMode, optFns ...func(options *DefaultsModeOptions)) LoadOptionsFunc { + do := DefaultsModeOptions{ + Mode: mode, + } + for _, fn := range optFns { + fn(&do) + } + return func(options *LoadOptions) error { + options.DefaultsModeOptions = do + return nil + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/local.go b/vendor/github.com/aws/aws-sdk-go-v2/config/local.go new file mode 100644 index 00000000000..b629137c821 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/local.go @@ -0,0 +1,51 @@ +package config + +import ( + "fmt" + "net" + "net/url" +) + +var lookupHostFn = net.LookupHost + +func isLoopbackHost(host string) (bool, error) { + ip := net.ParseIP(host) + if ip != nil { + return ip.IsLoopback(), nil + } + + // Host is not an ip, perform lookup + addrs, err := lookupHostFn(host) + if err != nil { + return false, err + } + if len(addrs) == 0 { + return false, fmt.Errorf("no addrs found for host, %s", host) + } + + for _, addr := range addrs { + if !net.ParseIP(addr).IsLoopback() { + return false, nil + } + } + + return true, nil +} + +func validateLocalURL(v string) error { + u, err := url.Parse(v) + if err != nil { + return err + } + + host := u.Hostname() + if len(host) == 0 { + return fmt.Errorf("unable to parse host from local HTTP cred provider URL") + } else if isLoopback, err := isLoopbackHost(host); err != nil { + return fmt.Errorf("failed to resolve host %q, %v", host, err) + } else if !isLoopback { + return fmt.Errorf("invalid endpoint host, %q, only host resolving to loopback addresses are allowed", host) + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go new file mode 100644 index 00000000000..d5235846011 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go @@ -0,0 +1,670 @@ +package config + +import ( + "context" + "io" + "net/http" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go-v2/credentials/endpointcreds" + "github.com/aws/aws-sdk-go-v2/credentials/processcreds" + "github.com/aws/aws-sdk-go-v2/credentials/ssocreds" + "github.com/aws/aws-sdk-go-v2/credentials/stscreds" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" + smithybearer "github.com/aws/smithy-go/auth/bearer" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" +) + +// sharedConfigProfileProvider provides access to the shared config profile +// name external configuration value. +type sharedConfigProfileProvider interface { + getSharedConfigProfile(ctx context.Context) (string, bool, error) +} + +// getSharedConfigProfile searches the configs for a sharedConfigProfileProvider +// and returns the value if found. Returns an error if a provider fails before a +// value is found. +func getSharedConfigProfile(ctx context.Context, configs configs) (value string, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(sharedConfigProfileProvider); ok { + value, found, err = p.getSharedConfigProfile(ctx) + if err != nil || found { + break + } + } + } + return +} + +// sharedConfigFilesProvider provides access to the shared config filesnames +// external configuration value. +type sharedConfigFilesProvider interface { + getSharedConfigFiles(ctx context.Context) ([]string, bool, error) +} + +// getSharedConfigFiles searches the configs for a sharedConfigFilesProvider +// and returns the value if found. Returns an error if a provider fails before a +// value is found. +func getSharedConfigFiles(ctx context.Context, configs configs) (value []string, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(sharedConfigFilesProvider); ok { + value, found, err = p.getSharedConfigFiles(ctx) + if err != nil || found { + break + } + } + } + + return +} + +// sharedCredentialsFilesProvider provides access to the shared credentials filesnames +// external configuration value. +type sharedCredentialsFilesProvider interface { + getSharedCredentialsFiles(ctx context.Context) ([]string, bool, error) +} + +// getSharedCredentialsFiles searches the configs for a sharedCredentialsFilesProvider +// and returns the value if found. Returns an error if a provider fails before a +// value is found. +func getSharedCredentialsFiles(ctx context.Context, configs configs) (value []string, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(sharedCredentialsFilesProvider); ok { + value, found, err = p.getSharedCredentialsFiles(ctx) + if err != nil || found { + break + } + } + } + + return +} + +// customCABundleProvider provides access to the custom CA bundle PEM bytes. +type customCABundleProvider interface { + getCustomCABundle(ctx context.Context) (io.Reader, bool, error) +} + +// getCustomCABundle searches the configs for a customCABundleProvider +// and returns the value if found. Returns an error if a provider fails before a +// value is found. +func getCustomCABundle(ctx context.Context, configs configs) (value io.Reader, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(customCABundleProvider); ok { + value, found, err = p.getCustomCABundle(ctx) + if err != nil || found { + break + } + } + } + + return +} + +// regionProvider provides access to the region external configuration value. +type regionProvider interface { + getRegion(ctx context.Context) (string, bool, error) +} + +// getRegion searches the configs for a regionProvider and returns the value +// if found. Returns an error if a provider fails before a value is found. +func getRegion(ctx context.Context, configs configs) (value string, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(regionProvider); ok { + value, found, err = p.getRegion(ctx) + if err != nil || found { + break + } + } + } + return +} + +// IgnoreConfiguredEndpointsProvider is needed to search for all providers +// that provide a flag to disable configured endpoints. +type IgnoreConfiguredEndpointsProvider interface { + GetIgnoreConfiguredEndpoints(ctx context.Context) (bool, bool, error) +} + +// GetIgnoreConfiguredEndpoints is used in knowing when to disable configured +// endpoints feature. +func GetIgnoreConfiguredEndpoints(ctx context.Context, configs []interface{}) (value bool, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(IgnoreConfiguredEndpointsProvider); ok { + value, found, err = p.GetIgnoreConfiguredEndpoints(ctx) + if err != nil || found { + break + } + } + } + return +} + +type baseEndpointProvider interface { + getBaseEndpoint(ctx context.Context) (string, bool, error) +} + +func getBaseEndpoint(ctx context.Context, configs configs) (value string, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(baseEndpointProvider); ok { + value, found, err = p.getBaseEndpoint(ctx) + if err != nil || found { + break + } + } + } + return +} + +type servicesObjectProvider interface { + getServicesObject(ctx context.Context) (map[string]map[string]string, bool, error) +} + +func getServicesObject(ctx context.Context, configs configs) (value map[string]map[string]string, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(servicesObjectProvider); ok { + value, found, err = p.getServicesObject(ctx) + if err != nil || found { + break + } + } + } + return +} + +// appIDProvider provides access to the sdk app ID value +type appIDProvider interface { + getAppID(ctx context.Context) (string, bool, error) +} + +func getAppID(ctx context.Context, configs configs) (value string, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(appIDProvider); ok { + value, found, err = p.getAppID(ctx) + if err != nil || found { + break + } + } + } + return +} + +// ec2IMDSRegionProvider provides access to the ec2 imds region +// configuration value +type ec2IMDSRegionProvider interface { + getEC2IMDSRegion(ctx context.Context) (string, bool, error) +} + +// getEC2IMDSRegion searches the configs for a ec2IMDSRegionProvider and +// returns the value if found. Returns an error if a provider fails before +// a value is found. +func getEC2IMDSRegion(ctx context.Context, configs configs) (region string, found bool, err error) { + for _, cfg := range configs { + if provider, ok := cfg.(ec2IMDSRegionProvider); ok { + region, found, err = provider.getEC2IMDSRegion(ctx) + if err != nil || found { + break + } + } + } + return +} + +// credentialsProviderProvider provides access to the credentials external +// configuration value. +type credentialsProviderProvider interface { + getCredentialsProvider(ctx context.Context) (aws.CredentialsProvider, bool, error) +} + +// getCredentialsProvider searches the configs for a credentialsProviderProvider +// and returns the value if found. Returns an error if a provider fails before a +// value is found. +func getCredentialsProvider(ctx context.Context, configs configs) (p aws.CredentialsProvider, found bool, err error) { + for _, cfg := range configs { + if provider, ok := cfg.(credentialsProviderProvider); ok { + p, found, err = provider.getCredentialsProvider(ctx) + if err != nil || found { + break + } + } + } + return +} + +// credentialsCacheOptionsProvider is an interface for retrieving a function for setting +// the aws.CredentialsCacheOptions. +type credentialsCacheOptionsProvider interface { + getCredentialsCacheOptions(ctx context.Context) (func(*aws.CredentialsCacheOptions), bool, error) +} + +// getCredentialsCacheOptionsProvider is an interface for retrieving a function for setting +// the aws.CredentialsCacheOptions. +func getCredentialsCacheOptionsProvider(ctx context.Context, configs configs) ( + f func(*aws.CredentialsCacheOptions), found bool, err error, +) { + for _, config := range configs { + if p, ok := config.(credentialsCacheOptionsProvider); ok { + f, found, err = p.getCredentialsCacheOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// bearerAuthTokenProviderProvider provides access to the bearer authentication +// token external configuration value. +type bearerAuthTokenProviderProvider interface { + getBearerAuthTokenProvider(context.Context) (smithybearer.TokenProvider, bool, error) +} + +// getBearerAuthTokenProvider searches the config sources for a +// bearerAuthTokenProviderProvider and returns the value if found. Returns an +// error if a provider fails before a value is found. +func getBearerAuthTokenProvider(ctx context.Context, configs configs) (p smithybearer.TokenProvider, found bool, err error) { + for _, cfg := range configs { + if provider, ok := cfg.(bearerAuthTokenProviderProvider); ok { + p, found, err = provider.getBearerAuthTokenProvider(ctx) + if err != nil || found { + break + } + } + } + return +} + +// bearerAuthTokenCacheOptionsProvider is an interface for retrieving a function for +// setting the smithy-go auth/bearer#TokenCacheOptions. +type bearerAuthTokenCacheOptionsProvider interface { + getBearerAuthTokenCacheOptions(context.Context) (func(*smithybearer.TokenCacheOptions), bool, error) +} + +// getBearerAuthTokenCacheOptionsProvider is an interface for retrieving a function for +// setting the smithy-go auth/bearer#TokenCacheOptions. +func getBearerAuthTokenCacheOptions(ctx context.Context, configs configs) ( + f func(*smithybearer.TokenCacheOptions), found bool, err error, +) { + for _, config := range configs { + if p, ok := config.(bearerAuthTokenCacheOptionsProvider); ok { + f, found, err = p.getBearerAuthTokenCacheOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// ssoTokenProviderOptionsProvider is an interface for retrieving a function for +// setting the SDK's credentials/ssocreds#SSOTokenProviderOptions. +type ssoTokenProviderOptionsProvider interface { + getSSOTokenProviderOptions(context.Context) (func(*ssocreds.SSOTokenProviderOptions), bool, error) +} + +// getSSOTokenProviderOptions is an interface for retrieving a function for +// setting the SDK's credentials/ssocreds#SSOTokenProviderOptions. +func getSSOTokenProviderOptions(ctx context.Context, configs configs) ( + f func(*ssocreds.SSOTokenProviderOptions), found bool, err error, +) { + for _, config := range configs { + if p, ok := config.(ssoTokenProviderOptionsProvider); ok { + f, found, err = p.getSSOTokenProviderOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// ssoTokenProviderOptionsProvider + +// processCredentialOptions is an interface for retrieving a function for setting +// the processcreds.Options. +type processCredentialOptions interface { + getProcessCredentialOptions(ctx context.Context) (func(*processcreds.Options), bool, error) +} + +// getProcessCredentialOptions searches the slice of configs and returns the first function found +func getProcessCredentialOptions(ctx context.Context, configs configs) (f func(*processcreds.Options), found bool, err error) { + for _, config := range configs { + if p, ok := config.(processCredentialOptions); ok { + f, found, err = p.getProcessCredentialOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// ec2RoleCredentialOptionsProvider is an interface for retrieving a function +// for setting the ec2rolecreds.Provider options. +type ec2RoleCredentialOptionsProvider interface { + getEC2RoleCredentialOptions(ctx context.Context) (func(*ec2rolecreds.Options), bool, error) +} + +// getEC2RoleCredentialProviderOptions searches the slice of configs and returns the first function found +func getEC2RoleCredentialProviderOptions(ctx context.Context, configs configs) (f func(*ec2rolecreds.Options), found bool, err error) { + for _, config := range configs { + if p, ok := config.(ec2RoleCredentialOptionsProvider); ok { + f, found, err = p.getEC2RoleCredentialOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// defaultRegionProvider is an interface for retrieving a default region if a region was not resolved from other sources +type defaultRegionProvider interface { + getDefaultRegion(ctx context.Context) (string, bool, error) +} + +// getDefaultRegion searches the slice of configs and returns the first fallback region found +func getDefaultRegion(ctx context.Context, configs configs) (value string, found bool, err error) { + for _, config := range configs { + if p, ok := config.(defaultRegionProvider); ok { + value, found, err = p.getDefaultRegion(ctx) + if err != nil || found { + break + } + } + } + return +} + +// endpointCredentialOptionsProvider is an interface for retrieving a function for setting +// the endpointcreds.ProviderOptions. +type endpointCredentialOptionsProvider interface { + getEndpointCredentialOptions(ctx context.Context) (func(*endpointcreds.Options), bool, error) +} + +// getEndpointCredentialProviderOptions searches the slice of configs and returns the first function found +func getEndpointCredentialProviderOptions(ctx context.Context, configs configs) (f func(*endpointcreds.Options), found bool, err error) { + for _, config := range configs { + if p, ok := config.(endpointCredentialOptionsProvider); ok { + f, found, err = p.getEndpointCredentialOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// webIdentityRoleCredentialOptionsProvider is an interface for retrieving a function for setting +// the stscreds.WebIdentityRoleProvider. +type webIdentityRoleCredentialOptionsProvider interface { + getWebIdentityRoleCredentialOptions(ctx context.Context) (func(*stscreds.WebIdentityRoleOptions), bool, error) +} + +// getWebIdentityCredentialProviderOptions searches the slice of configs and returns the first function found +func getWebIdentityCredentialProviderOptions(ctx context.Context, configs configs) (f func(*stscreds.WebIdentityRoleOptions), found bool, err error) { + for _, config := range configs { + if p, ok := config.(webIdentityRoleCredentialOptionsProvider); ok { + f, found, err = p.getWebIdentityRoleCredentialOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// assumeRoleCredentialOptionsProvider is an interface for retrieving a function for setting +// the stscreds.AssumeRoleOptions. +type assumeRoleCredentialOptionsProvider interface { + getAssumeRoleCredentialOptions(ctx context.Context) (func(*stscreds.AssumeRoleOptions), bool, error) +} + +// getAssumeRoleCredentialProviderOptions searches the slice of configs and returns the first function found +func getAssumeRoleCredentialProviderOptions(ctx context.Context, configs configs) (f func(*stscreds.AssumeRoleOptions), found bool, err error) { + for _, config := range configs { + if p, ok := config.(assumeRoleCredentialOptionsProvider); ok { + f, found, err = p.getAssumeRoleCredentialOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// HTTPClient is an HTTP client implementation +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// httpClientProvider is an interface for retrieving HTTPClient +type httpClientProvider interface { + getHTTPClient(ctx context.Context) (HTTPClient, bool, error) +} + +// getHTTPClient searches the slice of configs and returns the HTTPClient set on configs +func getHTTPClient(ctx context.Context, configs configs) (client HTTPClient, found bool, err error) { + for _, config := range configs { + if p, ok := config.(httpClientProvider); ok { + client, found, err = p.getHTTPClient(ctx) + if err != nil || found { + break + } + } + } + return +} + +// apiOptionsProvider is an interface for retrieving APIOptions +type apiOptionsProvider interface { + getAPIOptions(ctx context.Context) ([]func(*middleware.Stack) error, bool, error) +} + +// getAPIOptions searches the slice of configs and returns the APIOptions set on configs +func getAPIOptions(ctx context.Context, configs configs) (apiOptions []func(*middleware.Stack) error, found bool, err error) { + for _, config := range configs { + if p, ok := config.(apiOptionsProvider); ok { + // retrieve APIOptions from configs and set it on cfg + apiOptions, found, err = p.getAPIOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// endpointResolverProvider is an interface for retrieving an aws.EndpointResolver from a configuration source +type endpointResolverProvider interface { + getEndpointResolver(ctx context.Context) (aws.EndpointResolver, bool, error) +} + +// getEndpointResolver searches the provided config sources for a EndpointResolverFunc that can be used +// to configure the aws.Config.EndpointResolver value. +func getEndpointResolver(ctx context.Context, configs configs) (f aws.EndpointResolver, found bool, err error) { + for _, c := range configs { + if p, ok := c.(endpointResolverProvider); ok { + f, found, err = p.getEndpointResolver(ctx) + if err != nil || found { + break + } + } + } + return +} + +// endpointResolverWithOptionsProvider is an interface for retrieving an aws.EndpointResolverWithOptions from a configuration source +type endpointResolverWithOptionsProvider interface { + getEndpointResolverWithOptions(ctx context.Context) (aws.EndpointResolverWithOptions, bool, error) +} + +// getEndpointResolver searches the provided config sources for a EndpointResolverFunc that can be used +// to configure the aws.Config.EndpointResolver value. +func getEndpointResolverWithOptions(ctx context.Context, configs configs) (f aws.EndpointResolverWithOptions, found bool, err error) { + for _, c := range configs { + if p, ok := c.(endpointResolverWithOptionsProvider); ok { + f, found, err = p.getEndpointResolverWithOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// loggerProvider is an interface for retrieving a logging.Logger from a configuration source. +type loggerProvider interface { + getLogger(ctx context.Context) (logging.Logger, bool, error) +} + +// getLogger searches the provided config sources for a logging.Logger that can be used +// to configure the aws.Config.Logger value. +func getLogger(ctx context.Context, configs configs) (l logging.Logger, found bool, err error) { + for _, c := range configs { + if p, ok := c.(loggerProvider); ok { + l, found, err = p.getLogger(ctx) + if err != nil || found { + break + } + } + } + return +} + +// clientLogModeProvider is an interface for retrieving the aws.ClientLogMode from a configuration source. +type clientLogModeProvider interface { + getClientLogMode(ctx context.Context) (aws.ClientLogMode, bool, error) +} + +func getClientLogMode(ctx context.Context, configs configs) (m aws.ClientLogMode, found bool, err error) { + for _, c := range configs { + if p, ok := c.(clientLogModeProvider); ok { + m, found, err = p.getClientLogMode(ctx) + if err != nil || found { + break + } + } + } + return +} + +// retryProvider is an configuration provider for custom Retryer. +type retryProvider interface { + getRetryer(ctx context.Context) (func() aws.Retryer, bool, error) +} + +func getRetryer(ctx context.Context, configs configs) (v func() aws.Retryer, found bool, err error) { + for _, c := range configs { + if p, ok := c.(retryProvider); ok { + v, found, err = p.getRetryer(ctx) + if err != nil || found { + break + } + } + } + return +} + +// logConfigurationWarningsProvider is an configuration provider for +// retrieving a boolean indicating whether configuration issues should +// be logged when loading from config sources +type logConfigurationWarningsProvider interface { + getLogConfigurationWarnings(ctx context.Context) (bool, bool, error) +} + +func getLogConfigurationWarnings(ctx context.Context, configs configs) (v bool, found bool, err error) { + for _, c := range configs { + if p, ok := c.(logConfigurationWarningsProvider); ok { + v, found, err = p.getLogConfigurationWarnings(ctx) + if err != nil || found { + break + } + } + } + return +} + +// ssoCredentialOptionsProvider is an interface for retrieving a function for setting +// the ssocreds.Options. +type ssoCredentialOptionsProvider interface { + getSSOProviderOptions(context.Context) (func(*ssocreds.Options), bool, error) +} + +func getSSOProviderOptions(ctx context.Context, configs configs) (v func(options *ssocreds.Options), found bool, err error) { + for _, c := range configs { + if p, ok := c.(ssoCredentialOptionsProvider); ok { + v, found, err = p.getSSOProviderOptions(ctx) + if err != nil || found { + break + } + } + } + return v, found, err +} + +type defaultsModeIMDSClientProvider interface { + getDefaultsModeIMDSClient(context.Context) (*imds.Client, bool, error) +} + +func getDefaultsModeIMDSClient(ctx context.Context, configs configs) (v *imds.Client, found bool, err error) { + for _, c := range configs { + if p, ok := c.(defaultsModeIMDSClientProvider); ok { + v, found, err = p.getDefaultsModeIMDSClient(ctx) + if err != nil || found { + break + } + } + } + return v, found, err +} + +type defaultsModeProvider interface { + getDefaultsMode(context.Context) (aws.DefaultsMode, bool, error) +} + +func getDefaultsMode(ctx context.Context, configs configs) (v aws.DefaultsMode, found bool, err error) { + for _, c := range configs { + if p, ok := c.(defaultsModeProvider); ok { + v, found, err = p.getDefaultsMode(ctx) + if err != nil || found { + break + } + } + } + return v, found, err +} + +type retryMaxAttemptsProvider interface { + GetRetryMaxAttempts(context.Context) (int, bool, error) +} + +func getRetryMaxAttempts(ctx context.Context, configs configs) (v int, found bool, err error) { + for _, c := range configs { + if p, ok := c.(retryMaxAttemptsProvider); ok { + v, found, err = p.GetRetryMaxAttempts(ctx) + if err != nil || found { + break + } + } + } + return v, found, err +} + +type retryModeProvider interface { + GetRetryMode(context.Context) (aws.RetryMode, bool, error) +} + +func getRetryMode(ctx context.Context, configs configs) (v aws.RetryMode, found bool, err error) { + for _, c := range configs { + if p, ok := c.(retryModeProvider); ok { + v, found, err = p.GetRetryMode(ctx) + if err != nil || found { + break + } + } + } + return v, found, err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go new file mode 100644 index 00000000000..b5a74b23197 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go @@ -0,0 +1,341 @@ +package config + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net/http" + "os" + + "github.com/aws/aws-sdk-go-v2/aws" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" + "github.com/aws/smithy-go/logging" +) + +// resolveDefaultAWSConfig will write default configuration values into the cfg +// value. It will write the default values, overwriting any previous value. +// +// This should be used as the first resolver in the slice of resolvers when +// resolving external configuration. +func resolveDefaultAWSConfig(ctx context.Context, cfg *aws.Config, cfgs configs) error { + var sources []interface{} + for _, s := range cfgs { + sources = append(sources, s) + } + + *cfg = aws.Config{ + Credentials: aws.AnonymousCredentials{}, + Logger: logging.NewStandardLogger(os.Stderr), + ConfigSources: sources, + } + return nil +} + +// resolveCustomCABundle extracts the first instance of a custom CA bundle filename +// from the external configurations. It will update the HTTP Client's builder +// to be configured with the custom CA bundle. +// +// Config provider used: +// * customCABundleProvider +func resolveCustomCABundle(ctx context.Context, cfg *aws.Config, cfgs configs) error { + pemCerts, found, err := getCustomCABundle(ctx, cfgs) + if err != nil { + // TODO error handling, What is the best way to handle this? + // capture previous errors continue. error out if all errors + return err + } + if !found { + return nil + } + + if cfg.HTTPClient == nil { + cfg.HTTPClient = awshttp.NewBuildableClient() + } + + trOpts, ok := cfg.HTTPClient.(*awshttp.BuildableClient) + if !ok { + return fmt.Errorf("unable to add custom RootCAs HTTPClient, "+ + "has no WithTransportOptions, %T", cfg.HTTPClient) + } + + var appendErr error + client := trOpts.WithTransportOptions(func(tr *http.Transport) { + if tr.TLSClientConfig == nil { + tr.TLSClientConfig = &tls.Config{} + } + if tr.TLSClientConfig.RootCAs == nil { + tr.TLSClientConfig.RootCAs = x509.NewCertPool() + } + + b, err := ioutil.ReadAll(pemCerts) + if err != nil { + appendErr = fmt.Errorf("failed to read custom CA bundle PEM file") + } + + if !tr.TLSClientConfig.RootCAs.AppendCertsFromPEM(b) { + appendErr = fmt.Errorf("failed to load custom CA bundle PEM file") + } + }) + if appendErr != nil { + return appendErr + } + + cfg.HTTPClient = client + return err +} + +// resolveRegion extracts the first instance of a Region from the configs slice. +// +// Config providers used: +// * regionProvider +func resolveRegion(ctx context.Context, cfg *aws.Config, configs configs) error { + v, found, err := getRegion(ctx, configs) + if err != nil { + // TODO error handling, What is the best way to handle this? + // capture previous errors continue. error out if all errors + return err + } + if !found { + return nil + } + + cfg.Region = v + return nil +} + +func resolveBaseEndpoint(ctx context.Context, cfg *aws.Config, configs configs) error { + var downcastCfgSources []interface{} + for _, cs := range configs { + downcastCfgSources = append(downcastCfgSources, interface{}(cs)) + } + + if val, found, err := GetIgnoreConfiguredEndpoints(ctx, downcastCfgSources); found && val && err == nil { + cfg.BaseEndpoint = nil + return nil + } + + v, found, err := getBaseEndpoint(ctx, configs) + if err != nil { + return err + } + + if !found { + return nil + } + cfg.BaseEndpoint = aws.String(v) + return nil +} + +// resolveAppID extracts the sdk app ID from the configs slice's SharedConfig or env var +func resolveAppID(ctx context.Context, cfg *aws.Config, configs configs) error { + ID, _, err := getAppID(ctx, configs) + if err != nil { + return err + } + + cfg.AppID = ID + return nil +} + +// resolveDefaultRegion extracts the first instance of a default region and sets `aws.Config.Region` to the default +// region if region had not been resolved from other sources. +func resolveDefaultRegion(ctx context.Context, cfg *aws.Config, configs configs) error { + if len(cfg.Region) > 0 { + return nil + } + + v, found, err := getDefaultRegion(ctx, configs) + if err != nil { + return err + } + if !found { + return nil + } + + cfg.Region = v + + return nil +} + +// resolveHTTPClient extracts the first instance of a HTTPClient and sets `aws.Config.HTTPClient` to the HTTPClient instance +// if one has not been resolved from other sources. +func resolveHTTPClient(ctx context.Context, cfg *aws.Config, configs configs) error { + c, found, err := getHTTPClient(ctx, configs) + if err != nil { + return err + } + if !found { + return nil + } + + cfg.HTTPClient = c + return nil +} + +// resolveAPIOptions extracts the first instance of APIOptions and sets `aws.Config.APIOptions` to the resolved API options +// if one has not been resolved from other sources. +func resolveAPIOptions(ctx context.Context, cfg *aws.Config, configs configs) error { + o, found, err := getAPIOptions(ctx, configs) + if err != nil { + return err + } + if !found { + return nil + } + + cfg.APIOptions = o + + return nil +} + +// resolveEndpointResolver extracts the first instance of a EndpointResolverFunc from the config slice +// and sets the functions result on the aws.Config.EndpointResolver +func resolveEndpointResolver(ctx context.Context, cfg *aws.Config, configs configs) error { + endpointResolver, found, err := getEndpointResolver(ctx, configs) + if err != nil { + return err + } + if !found { + return nil + } + + cfg.EndpointResolver = endpointResolver + + return nil +} + +// resolveEndpointResolver extracts the first instance of a EndpointResolverFunc from the config slice +// and sets the functions result on the aws.Config.EndpointResolver +func resolveEndpointResolverWithOptions(ctx context.Context, cfg *aws.Config, configs configs) error { + endpointResolver, found, err := getEndpointResolverWithOptions(ctx, configs) + if err != nil { + return err + } + if !found { + return nil + } + + cfg.EndpointResolverWithOptions = endpointResolver + + return nil +} + +func resolveLogger(ctx context.Context, cfg *aws.Config, configs configs) error { + logger, found, err := getLogger(ctx, configs) + if err != nil { + return err + } + if !found { + return nil + } + + cfg.Logger = logger + + return nil +} + +func resolveClientLogMode(ctx context.Context, cfg *aws.Config, configs configs) error { + mode, found, err := getClientLogMode(ctx, configs) + if err != nil { + return err + } + if !found { + return nil + } + + cfg.ClientLogMode = mode + + return nil +} + +func resolveRetryer(ctx context.Context, cfg *aws.Config, configs configs) error { + retryer, found, err := getRetryer(ctx, configs) + if err != nil { + return err + } + + if found { + cfg.Retryer = retryer + return nil + } + + // Only load the retry options if a custom retryer has not be specified. + if err = resolveRetryMaxAttempts(ctx, cfg, configs); err != nil { + return err + } + return resolveRetryMode(ctx, cfg, configs) +} + +func resolveEC2IMDSRegion(ctx context.Context, cfg *aws.Config, configs configs) error { + if len(cfg.Region) > 0 { + return nil + } + + region, found, err := getEC2IMDSRegion(ctx, configs) + if err != nil { + return err + } + if !found { + return nil + } + + cfg.Region = region + + return nil +} + +func resolveDefaultsModeOptions(ctx context.Context, cfg *aws.Config, configs configs) error { + defaultsMode, found, err := getDefaultsMode(ctx, configs) + if err != nil { + return err + } + if !found { + defaultsMode = aws.DefaultsModeLegacy + } + + var environment aws.RuntimeEnvironment + if defaultsMode == aws.DefaultsModeAuto { + envConfig, _, _ := getAWSConfigSources(configs) + + client, found, err := getDefaultsModeIMDSClient(ctx, configs) + if err != nil { + return err + } + if !found { + client = imds.NewFromConfig(*cfg) + } + + environment, err = resolveDefaultsModeRuntimeEnvironment(ctx, envConfig, client) + if err != nil { + return err + } + } + + cfg.DefaultsMode = defaultsMode + cfg.RuntimeEnvironment = environment + + return nil +} + +func resolveRetryMaxAttempts(ctx context.Context, cfg *aws.Config, configs configs) error { + maxAttempts, found, err := getRetryMaxAttempts(ctx, configs) + if err != nil || !found { + return err + } + cfg.RetryMaxAttempts = maxAttempts + + return nil +} + +func resolveRetryMode(ctx context.Context, cfg *aws.Config, configs configs) error { + retryMode, found, err := getRetryMode(ctx, configs) + if err != nil || !found { + return err + } + cfg.RetryMode = retryMode + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_bearer_token.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_bearer_token.go new file mode 100644 index 00000000000..a8ebb3c0a39 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_bearer_token.go @@ -0,0 +1,122 @@ +package config + +import ( + "context" + "fmt" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/credentials/ssocreds" + "github.com/aws/aws-sdk-go-v2/service/ssooidc" + smithybearer "github.com/aws/smithy-go/auth/bearer" +) + +// resolveBearerAuthToken extracts a token provider from the config sources. +// +// If an explicit bearer authentication token provider is not found the +// resolver will fallback to resolving token provider via other config sources +// such as SharedConfig. +func resolveBearerAuthToken(ctx context.Context, cfg *aws.Config, configs configs) error { + found, err := resolveBearerAuthTokenProvider(ctx, cfg, configs) + if found || err != nil { + return err + } + + return resolveBearerAuthTokenProviderChain(ctx, cfg, configs) +} + +// resolveBearerAuthTokenProvider extracts the first instance of +// BearerAuthTokenProvider from the config sources. +// +// The resolved BearerAuthTokenProvider will be wrapped in a cache to ensure +// the Token is only refreshed when needed. This also protects the +// TokenProvider so it can be used concurrently. +// +// Config providers used: +// * bearerAuthTokenProviderProvider +func resolveBearerAuthTokenProvider(ctx context.Context, cfg *aws.Config, configs configs) (bool, error) { + tokenProvider, found, err := getBearerAuthTokenProvider(ctx, configs) + if !found || err != nil { + return false, err + } + + cfg.BearerAuthTokenProvider, err = wrapWithBearerAuthTokenCache( + ctx, configs, tokenProvider) + if err != nil { + return false, err + } + + return true, nil +} + +func resolveBearerAuthTokenProviderChain(ctx context.Context, cfg *aws.Config, configs configs) (err error) { + _, sharedConfig, _ := getAWSConfigSources(configs) + + var provider smithybearer.TokenProvider + + if sharedConfig.SSOSession != nil { + provider, err = resolveBearerAuthSSOTokenProvider( + ctx, cfg, sharedConfig.SSOSession, configs) + } + + if err == nil && provider != nil { + cfg.BearerAuthTokenProvider, err = wrapWithBearerAuthTokenCache( + ctx, configs, provider) + } + + return err +} + +func resolveBearerAuthSSOTokenProvider(ctx context.Context, cfg *aws.Config, session *SSOSession, configs configs) (*ssocreds.SSOTokenProvider, error) { + ssoTokenProviderOptionsFn, found, err := getSSOTokenProviderOptions(ctx, configs) + if err != nil { + return nil, fmt.Errorf("failed to get SSOTokenProviderOptions from config sources, %w", err) + } + + var optFns []func(*ssocreds.SSOTokenProviderOptions) + if found { + optFns = append(optFns, ssoTokenProviderOptionsFn) + } + + cachePath, err := ssocreds.StandardCachedTokenFilepath(session.Name) + if err != nil { + return nil, fmt.Errorf("failed to get SSOTokenProvider's cache path, %w", err) + } + + client := ssooidc.NewFromConfig(*cfg) + provider := ssocreds.NewSSOTokenProvider(client, cachePath, optFns...) + + return provider, nil +} + +// wrapWithBearerAuthTokenCache will wrap provider with an smithy-go +// bearer/auth#TokenCache with the provided options if the provider is not +// already a TokenCache. +func wrapWithBearerAuthTokenCache( + ctx context.Context, + cfgs configs, + provider smithybearer.TokenProvider, + optFns ...func(*smithybearer.TokenCacheOptions), +) (smithybearer.TokenProvider, error) { + _, ok := provider.(*smithybearer.TokenCache) + if ok { + return provider, nil + } + + tokenCacheConfigOptions, optionsFound, err := getBearerAuthTokenCacheOptions(ctx, cfgs) + if err != nil { + return nil, err + } + + opts := make([]func(*smithybearer.TokenCacheOptions), 0, 2+len(optFns)) + opts = append(opts, func(o *smithybearer.TokenCacheOptions) { + o.RefreshBeforeExpires = 5 * time.Minute + o.RetrieveBearerTokenTimeout = 30 * time.Second + }) + opts = append(opts, optFns...) + if optionsFound { + opts = append(opts, tokenCacheConfigOptions) + } + + return smithybearer.NewTokenCache(provider, opts...), nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go new file mode 100644 index 00000000000..b21cd30804d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go @@ -0,0 +1,499 @@ +package config + +import ( + "context" + "fmt" + "net/url" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go-v2/credentials/endpointcreds" + "github.com/aws/aws-sdk-go-v2/credentials/processcreds" + "github.com/aws/aws-sdk-go-v2/credentials/ssocreds" + "github.com/aws/aws-sdk-go-v2/credentials/stscreds" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" + "github.com/aws/aws-sdk-go-v2/service/sso" + "github.com/aws/aws-sdk-go-v2/service/ssooidc" + "github.com/aws/aws-sdk-go-v2/service/sts" +) + +const ( + // valid credential source values + credSourceEc2Metadata = "Ec2InstanceMetadata" + credSourceEnvironment = "Environment" + credSourceECSContainer = "EcsContainer" +) + +var ( + ecsContainerEndpoint = "http://169.254.170.2" // not constant to allow for swapping during unit-testing +) + +// resolveCredentials extracts a credential provider from slice of config +// sources. +// +// If an explicit credential provider is not found the resolver will fallback +// to resolving credentials by extracting a credential provider from EnvConfig +// and SharedConfig. +func resolveCredentials(ctx context.Context, cfg *aws.Config, configs configs) error { + found, err := resolveCredentialProvider(ctx, cfg, configs) + if found || err != nil { + return err + } + + return resolveCredentialChain(ctx, cfg, configs) +} + +// resolveCredentialProvider extracts the first instance of Credentials from the +// config slices. +// +// The resolved CredentialProvider will be wrapped in a cache to ensure the +// credentials are only refreshed when needed. This also protects the +// credential provider to be used concurrently. +// +// Config providers used: +// * credentialsProviderProvider +func resolveCredentialProvider(ctx context.Context, cfg *aws.Config, configs configs) (bool, error) { + credProvider, found, err := getCredentialsProvider(ctx, configs) + if !found || err != nil { + return false, err + } + + cfg.Credentials, err = wrapWithCredentialsCache(ctx, configs, credProvider) + if err != nil { + return false, err + } + + return true, nil +} + +// resolveCredentialChain resolves a credential provider chain using EnvConfig +// and SharedConfig if present in the slice of provided configs. +// +// The resolved CredentialProvider will be wrapped in a cache to ensure the +// credentials are only refreshed when needed. This also protects the +// credential provider to be used concurrently. +func resolveCredentialChain(ctx context.Context, cfg *aws.Config, configs configs) (err error) { + envConfig, sharedConfig, other := getAWSConfigSources(configs) + + // When checking if a profile was specified programmatically we should only consider the "other" + // configuration sources that have been provided. This ensures we correctly honor the expected credential + // hierarchy. + _, sharedProfileSet, err := getSharedConfigProfile(ctx, other) + if err != nil { + return err + } + + switch { + case sharedProfileSet: + err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig, other) + case envConfig.Credentials.HasKeys(): + cfg.Credentials = credentials.StaticCredentialsProvider{Value: envConfig.Credentials} + case len(envConfig.WebIdentityTokenFilePath) > 0: + err = assumeWebIdentity(ctx, cfg, envConfig.WebIdentityTokenFilePath, envConfig.RoleARN, envConfig.RoleSessionName, configs) + default: + err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig, other) + } + if err != nil { + return err + } + + // Wrap the resolved provider in a cache so the SDK will cache credentials. + cfg.Credentials, err = wrapWithCredentialsCache(ctx, configs, cfg.Credentials) + if err != nil { + return err + } + + return nil +} + +func resolveCredsFromProfile(ctx context.Context, cfg *aws.Config, envConfig *EnvConfig, sharedConfig *SharedConfig, configs configs) (err error) { + + switch { + case sharedConfig.Source != nil: + // Assume IAM role with credentials source from a different profile. + err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig.Source, configs) + + case sharedConfig.Credentials.HasKeys(): + // Static Credentials from Shared Config/Credentials file. + cfg.Credentials = credentials.StaticCredentialsProvider{ + Value: sharedConfig.Credentials, + } + + case len(sharedConfig.CredentialSource) != 0: + err = resolveCredsFromSource(ctx, cfg, envConfig, sharedConfig, configs) + + case len(sharedConfig.WebIdentityTokenFile) != 0: + // Credentials from Assume Web Identity token require an IAM Role, and + // that roll will be assumed. May be wrapped with another assume role + // via SourceProfile. + return assumeWebIdentity(ctx, cfg, sharedConfig.WebIdentityTokenFile, sharedConfig.RoleARN, sharedConfig.RoleSessionName, configs) + + case sharedConfig.hasSSOConfiguration(): + err = resolveSSOCredentials(ctx, cfg, sharedConfig, configs) + + case len(sharedConfig.CredentialProcess) != 0: + // Get credentials from CredentialProcess + err = processCredentials(ctx, cfg, sharedConfig, configs) + + case len(envConfig.ContainerCredentialsEndpoint) != 0: + err = resolveLocalHTTPCredProvider(ctx, cfg, envConfig.ContainerCredentialsEndpoint, envConfig.ContainerAuthorizationToken, configs) + + case len(envConfig.ContainerCredentialsRelativePath) != 0: + err = resolveHTTPCredProvider(ctx, cfg, ecsContainerURI(envConfig.ContainerCredentialsRelativePath), envConfig.ContainerAuthorizationToken, configs) + + default: + err = resolveEC2RoleCredentials(ctx, cfg, configs) + } + if err != nil { + return err + } + + if len(sharedConfig.RoleARN) > 0 { + return credsFromAssumeRole(ctx, cfg, sharedConfig, configs) + } + + return nil +} + +func resolveSSOCredentials(ctx context.Context, cfg *aws.Config, sharedConfig *SharedConfig, configs configs) error { + if err := sharedConfig.validateSSOConfiguration(); err != nil { + return err + } + + var options []func(*ssocreds.Options) + v, found, err := getSSOProviderOptions(ctx, configs) + if err != nil { + return err + } + if found { + options = append(options, v) + } + + cfgCopy := cfg.Copy() + + if sharedConfig.SSOSession != nil { + ssoTokenProviderOptionsFn, found, err := getSSOTokenProviderOptions(ctx, configs) + if err != nil { + return fmt.Errorf("failed to get SSOTokenProviderOptions from config sources, %w", err) + } + var optFns []func(*ssocreds.SSOTokenProviderOptions) + if found { + optFns = append(optFns, ssoTokenProviderOptionsFn) + } + cfgCopy.Region = sharedConfig.SSOSession.SSORegion + cachedPath, err := ssocreds.StandardCachedTokenFilepath(sharedConfig.SSOSession.Name) + if err != nil { + return err + } + oidcClient := ssooidc.NewFromConfig(cfgCopy) + tokenProvider := ssocreds.NewSSOTokenProvider(oidcClient, cachedPath, optFns...) + options = append(options, func(o *ssocreds.Options) { + o.SSOTokenProvider = tokenProvider + o.CachedTokenFilepath = cachedPath + }) + } else { + cfgCopy.Region = sharedConfig.SSORegion + } + + cfg.Credentials = ssocreds.New(sso.NewFromConfig(cfgCopy), sharedConfig.SSOAccountID, sharedConfig.SSORoleName, sharedConfig.SSOStartURL, options...) + + return nil +} + +func ecsContainerURI(path string) string { + return fmt.Sprintf("%s%s", ecsContainerEndpoint, path) +} + +func processCredentials(ctx context.Context, cfg *aws.Config, sharedConfig *SharedConfig, configs configs) error { + var opts []func(*processcreds.Options) + + options, found, err := getProcessCredentialOptions(ctx, configs) + if err != nil { + return err + } + if found { + opts = append(opts, options) + } + + cfg.Credentials = processcreds.NewProvider(sharedConfig.CredentialProcess, opts...) + + return nil +} + +func resolveLocalHTTPCredProvider(ctx context.Context, cfg *aws.Config, endpointURL, authToken string, configs configs) error { + var resolveErr error + + parsed, err := url.Parse(endpointURL) + if err != nil { + resolveErr = fmt.Errorf("invalid URL, %w", err) + } else { + host := parsed.Hostname() + if len(host) == 0 { + resolveErr = fmt.Errorf("unable to parse host from local HTTP cred provider URL") + } else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil { + resolveErr = fmt.Errorf("failed to resolve host %q, %v", host, loopbackErr) + } else if !isLoopback { + resolveErr = fmt.Errorf("invalid endpoint host, %q, only loopback hosts are allowed", host) + } + } + + if resolveErr != nil { + return resolveErr + } + + return resolveHTTPCredProvider(ctx, cfg, endpointURL, authToken, configs) +} + +func resolveHTTPCredProvider(ctx context.Context, cfg *aws.Config, url, authToken string, configs configs) error { + optFns := []func(*endpointcreds.Options){ + func(options *endpointcreds.Options) { + if len(authToken) != 0 { + options.AuthorizationToken = authToken + } + options.APIOptions = cfg.APIOptions + if cfg.Retryer != nil { + options.Retryer = cfg.Retryer() + } + }, + } + + optFn, found, err := getEndpointCredentialProviderOptions(ctx, configs) + if err != nil { + return err + } + if found { + optFns = append(optFns, optFn) + } + + provider := endpointcreds.New(url, optFns...) + + cfg.Credentials, err = wrapWithCredentialsCache(ctx, configs, provider, func(options *aws.CredentialsCacheOptions) { + options.ExpiryWindow = 5 * time.Minute + }) + if err != nil { + return err + } + + return nil +} + +func resolveCredsFromSource(ctx context.Context, cfg *aws.Config, envConfig *EnvConfig, sharedCfg *SharedConfig, configs configs) (err error) { + switch sharedCfg.CredentialSource { + case credSourceEc2Metadata: + return resolveEC2RoleCredentials(ctx, cfg, configs) + + case credSourceEnvironment: + cfg.Credentials = credentials.StaticCredentialsProvider{Value: envConfig.Credentials} + + case credSourceECSContainer: + if len(envConfig.ContainerCredentialsRelativePath) == 0 { + return fmt.Errorf("EcsContainer was specified as the credential_source, but 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' was not set") + } + return resolveHTTPCredProvider(ctx, cfg, ecsContainerURI(envConfig.ContainerCredentialsRelativePath), envConfig.ContainerAuthorizationToken, configs) + + default: + return fmt.Errorf("credential_source values must be EcsContainer, Ec2InstanceMetadata, or Environment") + } + + return nil +} + +func resolveEC2RoleCredentials(ctx context.Context, cfg *aws.Config, configs configs) error { + optFns := make([]func(*ec2rolecreds.Options), 0, 2) + + optFn, found, err := getEC2RoleCredentialProviderOptions(ctx, configs) + if err != nil { + return err + } + if found { + optFns = append(optFns, optFn) + } + + optFns = append(optFns, func(o *ec2rolecreds.Options) { + // Only define a client from config if not already defined. + if o.Client == nil { + o.Client = imds.NewFromConfig(*cfg) + } + }) + + provider := ec2rolecreds.New(optFns...) + + cfg.Credentials, err = wrapWithCredentialsCache(ctx, configs, provider) + if err != nil { + return err + } + + return nil +} + +func getAWSConfigSources(cfgs configs) (*EnvConfig, *SharedConfig, configs) { + var ( + envConfig *EnvConfig + sharedConfig *SharedConfig + other configs + ) + + for i := range cfgs { + switch c := cfgs[i].(type) { + case EnvConfig: + if envConfig == nil { + envConfig = &c + } + case *EnvConfig: + if envConfig == nil { + envConfig = c + } + case SharedConfig: + if sharedConfig == nil { + sharedConfig = &c + } + case *SharedConfig: + if envConfig == nil { + sharedConfig = c + } + default: + other = append(other, c) + } + } + + if envConfig == nil { + envConfig = &EnvConfig{} + } + + if sharedConfig == nil { + sharedConfig = &SharedConfig{} + } + + return envConfig, sharedConfig, other +} + +// AssumeRoleTokenProviderNotSetError is an error returned when creating a +// session when the MFAToken option is not set when shared config is configured +// load assume a role with an MFA token. +type AssumeRoleTokenProviderNotSetError struct{} + +// Error is the error message +func (e AssumeRoleTokenProviderNotSetError) Error() string { + return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.") +} + +func assumeWebIdentity(ctx context.Context, cfg *aws.Config, filepath string, roleARN, sessionName string, configs configs) error { + if len(filepath) == 0 { + return fmt.Errorf("token file path is not set") + } + + optFns := []func(*stscreds.WebIdentityRoleOptions){ + func(options *stscreds.WebIdentityRoleOptions) { + options.RoleSessionName = sessionName + }, + } + + optFn, found, err := getWebIdentityCredentialProviderOptions(ctx, configs) + if err != nil { + return err + } + + if found { + optFns = append(optFns, optFn) + } + + opts := stscreds.WebIdentityRoleOptions{ + RoleARN: roleARN, + } + + for _, fn := range optFns { + fn(&opts) + } + + if len(opts.RoleARN) == 0 { + return fmt.Errorf("role ARN is not set") + } + + client := opts.Client + if client == nil { + client = sts.NewFromConfig(*cfg) + } + + provider := stscreds.NewWebIdentityRoleProvider(client, roleARN, stscreds.IdentityTokenFile(filepath), optFns...) + + cfg.Credentials = provider + + return nil +} + +func credsFromAssumeRole(ctx context.Context, cfg *aws.Config, sharedCfg *SharedConfig, configs configs) (err error) { + optFns := []func(*stscreds.AssumeRoleOptions){ + func(options *stscreds.AssumeRoleOptions) { + options.RoleSessionName = sharedCfg.RoleSessionName + if sharedCfg.RoleDurationSeconds != nil { + if *sharedCfg.RoleDurationSeconds/time.Minute > 15 { + options.Duration = *sharedCfg.RoleDurationSeconds + } + } + // Assume role with external ID + if len(sharedCfg.ExternalID) > 0 { + options.ExternalID = aws.String(sharedCfg.ExternalID) + } + + // Assume role with MFA + if len(sharedCfg.MFASerial) != 0 { + options.SerialNumber = aws.String(sharedCfg.MFASerial) + } + }, + } + + optFn, found, err := getAssumeRoleCredentialProviderOptions(ctx, configs) + if err != nil { + return err + } + if found { + optFns = append(optFns, optFn) + } + + { + // Synthesize options early to validate configuration errors sooner to ensure a token provider + // is present if the SerialNumber was set. + var o stscreds.AssumeRoleOptions + for _, fn := range optFns { + fn(&o) + } + if o.TokenProvider == nil && o.SerialNumber != nil { + return AssumeRoleTokenProviderNotSetError{} + } + } + + cfg.Credentials = stscreds.NewAssumeRoleProvider(sts.NewFromConfig(*cfg), sharedCfg.RoleARN, optFns...) + + return nil +} + +// wrapWithCredentialsCache will wrap provider with an aws.CredentialsCache +// with the provided options if the provider is not already a +// aws.CredentialsCache. +func wrapWithCredentialsCache( + ctx context.Context, + cfgs configs, + provider aws.CredentialsProvider, + optFns ...func(options *aws.CredentialsCacheOptions), +) (aws.CredentialsProvider, error) { + _, ok := provider.(*aws.CredentialsCache) + if ok { + return provider, nil + } + + credCacheOptions, optionsFound, err := getCredentialsCacheOptionsProvider(ctx, cfgs) + if err != nil { + return nil, err + } + + // force allocation of a new slice if the additional options are + // needed, to prevent overwriting the passed in slice of options. + optFns = optFns[:len(optFns):len(optFns)] + if optionsFound { + optFns = append(optFns, credCacheOptions) + } + + return aws.NewCredentialsCache(provider, optFns...), nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go new file mode 100644 index 00000000000..20683bf5f07 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go @@ -0,0 +1,1492 @@ +package config + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" + "github.com/aws/aws-sdk-go-v2/internal/ini" + "github.com/aws/aws-sdk-go-v2/internal/shareddefaults" + "github.com/aws/smithy-go/logging" +) + +const ( + // Prefix to use for filtering profiles. The profile prefix should only + // exist in the shared config file, not the credentials file. + profilePrefix = `profile ` + + // Prefix to be used for SSO sections. These are supposed to only exist in + // the shared config file, not the credentials file. + ssoSectionPrefix = `sso-session ` + + // Prefix for services section. It is referenced in profile via the services + // parameter to configure clients for service-specific parameters. + servicesPrefix = `services` + + // string equivalent for boolean + endpointDiscoveryDisabled = `false` + endpointDiscoveryEnabled = `true` + endpointDiscoveryAuto = `auto` + + // Static Credentials group + accessKeyIDKey = `aws_access_key_id` // group required + secretAccessKey = `aws_secret_access_key` // group required + sessionTokenKey = `aws_session_token` // optional + + // Assume Role Credentials group + roleArnKey = `role_arn` // group required + sourceProfileKey = `source_profile` // group required + credentialSourceKey = `credential_source` // group required (or source_profile) + externalIDKey = `external_id` // optional + mfaSerialKey = `mfa_serial` // optional + roleSessionNameKey = `role_session_name` // optional + roleDurationSecondsKey = "duration_seconds" // optional + + // AWS Single Sign-On (AWS SSO) group + ssoSessionNameKey = "sso_session" + + ssoRegionKey = "sso_region" + ssoStartURLKey = "sso_start_url" + + ssoAccountIDKey = "sso_account_id" + ssoRoleNameKey = "sso_role_name" + + // Additional Config fields + regionKey = `region` + + // endpoint discovery group + enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional + + // External Credential process + credentialProcessKey = `credential_process` // optional + + // Web Identity Token File + webIdentityTokenFileKey = `web_identity_token_file` // optional + + // S3 ARN Region Usage + s3UseARNRegionKey = "s3_use_arn_region" + + ec2MetadataServiceEndpointModeKey = "ec2_metadata_service_endpoint_mode" + + ec2MetadataServiceEndpointKey = "ec2_metadata_service_endpoint" + + ec2MetadataV1DisabledKey = "ec2_metadata_v1_disabled" + + // Use DualStack Endpoint Resolution + useDualStackEndpoint = "use_dualstack_endpoint" + + // DefaultSharedConfigProfile is the default profile to be used when + // loading configuration from the config files if another profile name + // is not provided. + DefaultSharedConfigProfile = `default` + + // S3 Disable Multi-Region AccessPoints + s3DisableMultiRegionAccessPointsKey = `s3_disable_multiregion_access_points` + + useFIPSEndpointKey = "use_fips_endpoint" + + defaultsModeKey = "defaults_mode" + + // Retry options + retryMaxAttemptsKey = "max_attempts" + retryModeKey = "retry_mode" + + caBundleKey = "ca_bundle" + + sdkAppID = "sdk_ua_app_id" + + ignoreConfiguredEndpoints = "ignore_configured_endpoint_urls" + + endpointURL = "endpoint_url" +) + +// defaultSharedConfigProfile allows for swapping the default profile for testing +var defaultSharedConfigProfile = DefaultSharedConfigProfile + +// DefaultSharedCredentialsFilename returns the SDK's default file path +// for the shared credentials file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/credentials +// - Windows: %USERPROFILE%\.aws\credentials +func DefaultSharedCredentialsFilename() string { + return filepath.Join(shareddefaults.UserHomeDir(), ".aws", "credentials") +} + +// DefaultSharedConfigFilename returns the SDK's default file path for +// the shared config file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/config +// - Windows: %USERPROFILE%\.aws\config +func DefaultSharedConfigFilename() string { + return filepath.Join(shareddefaults.UserHomeDir(), ".aws", "config") +} + +// DefaultSharedConfigFiles is a slice of the default shared config files that +// the will be used in order to load the SharedConfig. +var DefaultSharedConfigFiles = []string{ + DefaultSharedConfigFilename(), +} + +// DefaultSharedCredentialsFiles is a slice of the default shared credentials +// files that the will be used in order to load the SharedConfig. +var DefaultSharedCredentialsFiles = []string{ + DefaultSharedCredentialsFilename(), +} + +// SSOSession provides the shared configuration parameters of the sso-session +// section. +type SSOSession struct { + Name string + SSORegion string + SSOStartURL string +} + +func (s *SSOSession) setFromIniSection(section ini.Section) { + updateString(&s.Name, section, ssoSessionNameKey) + updateString(&s.SSORegion, section, ssoRegionKey) + updateString(&s.SSOStartURL, section, ssoStartURLKey) +} + +// Services contains values configured in the services section +// of the AWS configuration file. +type Services struct { + // Services section values + // {"serviceId": {"key": "value"}} + // e.g. {"s3": {"endpoint_url": "example.com"}} + ServiceValues map[string]map[string]string +} + +func (s *Services) setFromIniSection(section ini.Section) { + if s.ServiceValues == nil { + s.ServiceValues = make(map[string]map[string]string) + } + for _, service := range section.List() { + s.ServiceValues[service] = section.Map(service) + } +} + +// SharedConfig represents the configuration fields of the SDK config files. +type SharedConfig struct { + Profile string + + // Credentials values from the config file. Both aws_access_key_id + // and aws_secret_access_key must be provided together in the same file + // to be considered valid. The values will be ignored if not a complete group. + // aws_session_token is an optional field that can be provided if both of the + // other two fields are also provided. + // + // aws_access_key_id + // aws_secret_access_key + // aws_session_token + Credentials aws.Credentials + + CredentialSource string + CredentialProcess string + WebIdentityTokenFile string + + // SSO session options + SSOSessionName string + SSOSession *SSOSession + + // Legacy SSO session options + SSORegion string + SSOStartURL string + + // SSO fields not used + SSOAccountID string + SSORoleName string + + RoleARN string + ExternalID string + MFASerial string + RoleSessionName string + RoleDurationSeconds *time.Duration + + SourceProfileName string + Source *SharedConfig + + // Region is the region the SDK should use for looking up AWS service endpoints + // and signing requests. + // + // region = us-west-2 + Region string + + // EnableEndpointDiscovery can be enabled or disabled in the shared config + // by setting endpoint_discovery_enabled to true, or false respectively. + // + // endpoint_discovery_enabled = true + EnableEndpointDiscovery aws.EndpointDiscoveryEnableState + + // Specifies if the S3 service should allow ARNs to direct the region + // the client's requests are sent to. + // + // s3_use_arn_region=true + S3UseARNRegion *bool + + // Specifies the EC2 Instance Metadata Service default endpoint selection + // mode (IPv4 or IPv6) + // + // ec2_metadata_service_endpoint_mode=IPv6 + EC2IMDSEndpointMode imds.EndpointModeState + + // Specifies the EC2 Instance Metadata Service endpoint to use. If + // specified it overrides EC2IMDSEndpointMode. + // + // ec2_metadata_service_endpoint=http://fd00:ec2::254 + EC2IMDSEndpoint string + + // Specifies that IMDS clients should not fallback to IMDSv1 if token + // requests fail. + // + // ec2_metadata_v1_disabled=true + EC2IMDSv1Disabled *bool + + // Specifies if the S3 service should disable support for Multi-Region + // access-points + // + // s3_disable_multiregion_access_points=true + S3DisableMultiRegionAccessPoints *bool + + // Specifies that SDK clients must resolve a dual-stack endpoint for + // services. + // + // use_dualstack_endpoint=true + UseDualStackEndpoint aws.DualStackEndpointState + + // Specifies that SDK clients must resolve a FIPS endpoint for + // services. + // + // use_fips_endpoint=true + UseFIPSEndpoint aws.FIPSEndpointState + + // Specifies which defaults mode should be used by services. + // + // defaults_mode=standard + DefaultsMode aws.DefaultsMode + + // Specifies the maximum number attempts an API client will call an + // operation that fails with a retryable error. + // + // max_attempts=3 + RetryMaxAttempts int + + // Specifies the retry model the API client will be created with. + // + // retry_mode=standard + RetryMode aws.RetryMode + + // Sets the path to a custom Credentials Authority (CA) Bundle PEM file + // that the SDK will use instead of the system's root CA bundle. Only use + // this if you want to configure the SDK to use a custom set of CAs. + // + // Enabling this option will attempt to merge the Transport into the SDK's + // HTTP client. If the client's Transport is not a http.Transport an error + // will be returned. If the Transport's TLS config is set this option will + // cause the SDK to overwrite the Transport's TLS config's RootCAs value. + // + // Setting a custom HTTPClient in the aws.Config options will override this + // setting. To use this option and custom HTTP client, the HTTP client + // needs to be provided when creating the config. Not the service client. + // + // ca_bundle=$HOME/my_custom_ca_bundle + CustomCABundle string + + // aws sdk app ID that can be added to user agent header string + AppID string + + // Flag used to disable configured endpoints. + IgnoreConfiguredEndpoints *bool + + // Value to contain configured endpoints to be propagated to + // corresponding endpoint resolution field. + BaseEndpoint string + + // Value to contain services section content. + Services Services +} + +func (c SharedConfig) getDefaultsMode(ctx context.Context) (value aws.DefaultsMode, ok bool, err error) { + if len(c.DefaultsMode) == 0 { + return "", false, nil + } + + return c.DefaultsMode, true, nil +} + +// GetRetryMaxAttempts returns the maximum number of attempts an API client +// created Retryer should attempt an operation call before failing. +func (c SharedConfig) GetRetryMaxAttempts(ctx context.Context) (value int, ok bool, err error) { + if c.RetryMaxAttempts == 0 { + return 0, false, nil + } + + return c.RetryMaxAttempts, true, nil +} + +// GetRetryMode returns the model the API client should create its Retryer in. +func (c SharedConfig) GetRetryMode(ctx context.Context) (value aws.RetryMode, ok bool, err error) { + if len(c.RetryMode) == 0 { + return "", false, nil + } + + return c.RetryMode, true, nil +} + +// GetS3UseARNRegion returns if the S3 service should allow ARNs to direct the region +// the client's requests are sent to. +func (c SharedConfig) GetS3UseARNRegion(ctx context.Context) (value, ok bool, err error) { + if c.S3UseARNRegion == nil { + return false, false, nil + } + + return *c.S3UseARNRegion, true, nil +} + +// GetEnableEndpointDiscovery returns if the enable_endpoint_discovery is set. +func (c SharedConfig) GetEnableEndpointDiscovery(ctx context.Context) (value aws.EndpointDiscoveryEnableState, ok bool, err error) { + if c.EnableEndpointDiscovery == aws.EndpointDiscoveryUnset { + return aws.EndpointDiscoveryUnset, false, nil + } + + return c.EnableEndpointDiscovery, true, nil +} + +// GetS3DisableMultiRegionAccessPoints returns if the S3 service should disable support for Multi-Region +// access-points. +func (c SharedConfig) GetS3DisableMultiRegionAccessPoints(ctx context.Context) (value, ok bool, err error) { + if c.S3DisableMultiRegionAccessPoints == nil { + return false, false, nil + } + + return *c.S3DisableMultiRegionAccessPoints, true, nil +} + +// GetRegion returns the region for the profile if a region is set. +func (c SharedConfig) getRegion(ctx context.Context) (string, bool, error) { + if len(c.Region) == 0 { + return "", false, nil + } + return c.Region, true, nil +} + +// GetCredentialsProvider returns the credentials for a profile if they were set. +func (c SharedConfig) getCredentialsProvider() (aws.Credentials, bool, error) { + return c.Credentials, true, nil +} + +// GetEC2IMDSEndpointMode implements a EC2IMDSEndpointMode option resolver interface. +func (c SharedConfig) GetEC2IMDSEndpointMode() (imds.EndpointModeState, bool, error) { + if c.EC2IMDSEndpointMode == imds.EndpointModeStateUnset { + return imds.EndpointModeStateUnset, false, nil + } + + return c.EC2IMDSEndpointMode, true, nil +} + +// GetEC2IMDSEndpoint implements a EC2IMDSEndpoint option resolver interface. +func (c SharedConfig) GetEC2IMDSEndpoint() (string, bool, error) { + if len(c.EC2IMDSEndpoint) == 0 { + return "", false, nil + } + + return c.EC2IMDSEndpoint, true, nil +} + +// GetEC2IMDSV1FallbackDisabled implements an EC2IMDSV1FallbackDisabled option +// resolver interface. +func (c SharedConfig) GetEC2IMDSV1FallbackDisabled() (bool, bool) { + if c.EC2IMDSv1Disabled == nil { + return false, false + } + + return *c.EC2IMDSv1Disabled, true +} + +// GetUseDualStackEndpoint returns whether the service's dual-stack endpoint should be +// used for requests. +func (c SharedConfig) GetUseDualStackEndpoint(ctx context.Context) (value aws.DualStackEndpointState, found bool, err error) { + if c.UseDualStackEndpoint == aws.DualStackEndpointStateUnset { + return aws.DualStackEndpointStateUnset, false, nil + } + + return c.UseDualStackEndpoint, true, nil +} + +// GetUseFIPSEndpoint returns whether the service's FIPS endpoint should be +// used for requests. +func (c SharedConfig) GetUseFIPSEndpoint(ctx context.Context) (value aws.FIPSEndpointState, found bool, err error) { + if c.UseFIPSEndpoint == aws.FIPSEndpointStateUnset { + return aws.FIPSEndpointStateUnset, false, nil + } + + return c.UseFIPSEndpoint, true, nil +} + +// GetCustomCABundle returns the custom CA bundle's PEM bytes if the file was +func (c SharedConfig) getCustomCABundle(context.Context) (io.Reader, bool, error) { + if len(c.CustomCABundle) == 0 { + return nil, false, nil + } + + b, err := ioutil.ReadFile(c.CustomCABundle) + if err != nil { + return nil, false, err + } + return bytes.NewReader(b), true, nil +} + +// getAppID returns the sdk app ID if set in shared config profile +func (c SharedConfig) getAppID(context.Context) (string, bool, error) { + return c.AppID, len(c.AppID) > 0, nil +} + +// GetIgnoreConfiguredEndpoints is used in knowing when to disable configured +// endpoints feature. +func (c SharedConfig) GetIgnoreConfiguredEndpoints(context.Context) (bool, bool, error) { + if c.IgnoreConfiguredEndpoints == nil { + return false, false, nil + } + + return *c.IgnoreConfiguredEndpoints, true, nil +} + +func (c SharedConfig) getBaseEndpoint(context.Context) (string, bool, error) { + return c.BaseEndpoint, len(c.BaseEndpoint) > 0, nil +} + +// GetServiceBaseEndpoint is used to retrieve a normalized SDK ID for use +// with configured endpoints. +func (c SharedConfig) GetServiceBaseEndpoint(ctx context.Context, sdkID string) (string, bool, error) { + if service, ok := c.Services.ServiceValues[normalizeShared(sdkID)]; ok { + if endpt, ok := service[endpointURL]; ok { + return endpt, true, nil + } + } + return "", false, nil +} + +func normalizeShared(sdkID string) string { + lower := strings.ToLower(sdkID) + return strings.ReplaceAll(lower, " ", "_") +} + +func (c SharedConfig) getServicesObject(context.Context) (map[string]map[string]string, bool, error) { + return c.Services.ServiceValues, c.Services.ServiceValues != nil, nil +} + +// loadSharedConfigIgnoreNotExist is an alias for loadSharedConfig with the +// addition of ignoring when none of the files exist or when the profile +// is not found in any of the files. +func loadSharedConfigIgnoreNotExist(ctx context.Context, configs configs) (Config, error) { + cfg, err := loadSharedConfig(ctx, configs) + if err != nil { + if _, ok := err.(SharedConfigProfileNotExistError); ok { + return SharedConfig{}, nil + } + return nil, err + } + + return cfg, nil +} + +// loadSharedConfig uses the configs passed in to load the SharedConfig from file +// The file names and profile name are sourced from the configs. +// +// If profile name is not provided DefaultSharedConfigProfile (default) will +// be used. +// +// If shared config filenames are not provided DefaultSharedConfigFiles will +// be used. +// +// Config providers used: +// * sharedConfigProfileProvider +// * sharedConfigFilesProvider +func loadSharedConfig(ctx context.Context, configs configs) (Config, error) { + var profile string + var configFiles []string + var credentialsFiles []string + var ok bool + var err error + + profile, ok, err = getSharedConfigProfile(ctx, configs) + if err != nil { + return nil, err + } + if !ok { + profile = defaultSharedConfigProfile + } + + configFiles, ok, err = getSharedConfigFiles(ctx, configs) + if err != nil { + return nil, err + } + + credentialsFiles, ok, err = getSharedCredentialsFiles(ctx, configs) + if err != nil { + return nil, err + } + + // setup logger if log configuration warning is seti + var logger logging.Logger + logWarnings, found, err := getLogConfigurationWarnings(ctx, configs) + if err != nil { + return SharedConfig{}, err + } + if found && logWarnings { + logger, found, err = getLogger(ctx, configs) + if err != nil { + return SharedConfig{}, err + } + if !found { + logger = logging.NewStandardLogger(os.Stderr) + } + } + + return LoadSharedConfigProfile(ctx, profile, + func(o *LoadSharedConfigOptions) { + o.Logger = logger + o.ConfigFiles = configFiles + o.CredentialsFiles = credentialsFiles + }, + ) +} + +// LoadSharedConfigOptions struct contains optional values that can be used to load the config. +type LoadSharedConfigOptions struct { + + // CredentialsFiles are the shared credentials files + CredentialsFiles []string + + // ConfigFiles are the shared config files + ConfigFiles []string + + // Logger is the logger used to log shared config behavior + Logger logging.Logger +} + +// LoadSharedConfigProfile retrieves the configuration from the list of files +// using the profile provided. The order the files are listed will determine +// precedence. Values in subsequent files will overwrite values defined in +// earlier files. +// +// For example, given two files A and B. Both define credentials. If the order +// of the files are A then B, B's credential values will be used instead of A's. +// +// If config files are not set, SDK will default to using a file at location `.aws/config` if present. +// If credentials files are not set, SDK will default to using a file at location `.aws/credentials` if present. +// No default files are set, if files set to an empty slice. +// +// You can read more about shared config and credentials file location at +// https://docs.aws.amazon.com/credref/latest/refdocs/file-location.html#file-location +func LoadSharedConfigProfile(ctx context.Context, profile string, optFns ...func(*LoadSharedConfigOptions)) (SharedConfig, error) { + var option LoadSharedConfigOptions + for _, fn := range optFns { + fn(&option) + } + + if option.ConfigFiles == nil { + option.ConfigFiles = DefaultSharedConfigFiles + } + + if option.CredentialsFiles == nil { + option.CredentialsFiles = DefaultSharedCredentialsFiles + } + + // load shared configuration sections from shared configuration INI options + configSections, err := loadIniFiles(option.ConfigFiles) + if err != nil { + return SharedConfig{}, err + } + + // check for profile prefix and drop duplicates or invalid profiles + err = processConfigSections(ctx, &configSections, option.Logger) + if err != nil { + return SharedConfig{}, err + } + + // load shared credentials sections from shared credentials INI options + credentialsSections, err := loadIniFiles(option.CredentialsFiles) + if err != nil { + return SharedConfig{}, err + } + + // check for profile prefix and drop duplicates or invalid profiles + err = processCredentialsSections(ctx, &credentialsSections, option.Logger) + if err != nil { + return SharedConfig{}, err + } + + err = mergeSections(&configSections, credentialsSections) + if err != nil { + return SharedConfig{}, err + } + + cfg := SharedConfig{} + profiles := map[string]struct{}{} + + if err = cfg.setFromIniSections(profiles, profile, configSections, option.Logger); err != nil { + return SharedConfig{}, err + } + + return cfg, nil +} + +func processConfigSections(ctx context.Context, sections *ini.Sections, logger logging.Logger) error { + skipSections := map[string]struct{}{} + + for _, section := range sections.List() { + if _, ok := skipSections[section]; ok { + continue + } + + // drop sections from config file that do not have expected prefixes. + switch { + case strings.HasPrefix(section, profilePrefix): + // Rename sections to remove "profile " prefixing to match with + // credentials file. If default is already present, it will be + // dropped. + newName, err := renameProfileSection(section, sections, logger) + if err != nil { + return fmt.Errorf("failed to rename profile section, %w", err) + } + skipSections[newName] = struct{}{} + + case strings.HasPrefix(section, ssoSectionPrefix): + case strings.HasPrefix(section, servicesPrefix): + case strings.EqualFold(section, "default"): + default: + // drop this section, as invalid profile name + sections.DeleteSection(section) + + if logger != nil { + logger.Logf(logging.Debug, "A profile defined with name `%v` is ignored. "+ + "For use within a shared configuration file, "+ + "a non-default profile must have `profile ` "+ + "prefixed to the profile name.", + section, + ) + } + } + } + return nil +} + +func renameProfileSection(section string, sections *ini.Sections, logger logging.Logger) (string, error) { + v, ok := sections.GetSection(section) + if !ok { + return "", fmt.Errorf("error processing profiles within the shared configuration files") + } + + // delete section with profile as prefix + sections.DeleteSection(section) + + // set the value to non-prefixed name in sections. + section = strings.TrimPrefix(section, profilePrefix) + if sections.HasSection(section) { + oldSection, _ := sections.GetSection(section) + v.Logs = append(v.Logs, + fmt.Sprintf("A non-default profile not prefixed with `profile ` found in %s, "+ + "overriding non-default profile from %s", + v.SourceFile, oldSection.SourceFile)) + sections.DeleteSection(section) + } + + // assign non-prefixed name to section + v.Name = section + sections.SetSection(section, v) + + return section, nil +} + +func processCredentialsSections(ctx context.Context, sections *ini.Sections, logger logging.Logger) error { + for _, section := range sections.List() { + // drop profiles with prefix for credential files + if strings.HasPrefix(section, profilePrefix) { + // drop this section, as invalid profile name + sections.DeleteSection(section) + + if logger != nil { + logger.Logf(logging.Debug, + "The profile defined with name `%v` is ignored. A profile with the `profile ` prefix is invalid "+ + "for the shared credentials file.\n", + section, + ) + } + } + } + return nil +} + +func loadIniFiles(filenames []string) (ini.Sections, error) { + mergedSections := ini.NewSections() + + for _, filename := range filenames { + sections, err := ini.OpenFile(filename) + var v *ini.UnableToReadFile + if ok := errors.As(err, &v); ok { + // Skip files which can't be opened and read for whatever reason. + // We treat such files as empty, and do not fall back to other locations. + continue + } else if err != nil { + return ini.Sections{}, SharedConfigLoadError{Filename: filename, Err: err} + } + + // mergeSections into mergedSections + err = mergeSections(&mergedSections, sections) + if err != nil { + return ini.Sections{}, SharedConfigLoadError{Filename: filename, Err: err} + } + } + + return mergedSections, nil +} + +// mergeSections merges source section properties into destination section properties +func mergeSections(dst *ini.Sections, src ini.Sections) error { + for _, sectionName := range src.List() { + srcSection, _ := src.GetSection(sectionName) + + if (!srcSection.Has(accessKeyIDKey) && srcSection.Has(secretAccessKey)) || + (srcSection.Has(accessKeyIDKey) && !srcSection.Has(secretAccessKey)) { + srcSection.Errors = append(srcSection.Errors, + fmt.Errorf("partial credentials found for profile %v", sectionName)) + } + + if !dst.HasSection(sectionName) { + dst.SetSection(sectionName, srcSection) + continue + } + + // merge with destination srcSection + dstSection, _ := dst.GetSection(sectionName) + + // errors should be overriden if any + dstSection.Errors = srcSection.Errors + + // Access key id update + if srcSection.Has(accessKeyIDKey) && srcSection.Has(secretAccessKey) { + accessKey := srcSection.String(accessKeyIDKey) + secretKey := srcSection.String(secretAccessKey) + + if dstSection.Has(accessKeyIDKey) { + dstSection.Logs = append(dstSection.Logs, newMergeKeyLogMessage(sectionName, accessKeyIDKey, + dstSection.SourceFile[accessKeyIDKey], srcSection.SourceFile[accessKeyIDKey])) + } + + // update access key + v, err := ini.NewStringValue(accessKey) + if err != nil { + return fmt.Errorf("error merging access key, %w", err) + } + dstSection.UpdateValue(accessKeyIDKey, v) + + // update secret key + v, err = ini.NewStringValue(secretKey) + if err != nil { + return fmt.Errorf("error merging secret key, %w", err) + } + dstSection.UpdateValue(secretAccessKey, v) + + // update session token + if err = mergeStringKey(&srcSection, &dstSection, sectionName, sessionTokenKey); err != nil { + return err + } + + // update source file to reflect where the static creds came from + dstSection.UpdateSourceFile(accessKeyIDKey, srcSection.SourceFile[accessKeyIDKey]) + dstSection.UpdateSourceFile(secretAccessKey, srcSection.SourceFile[secretAccessKey]) + } + + stringKeys := []string{ + roleArnKey, + sourceProfileKey, + credentialSourceKey, + externalIDKey, + mfaSerialKey, + roleSessionNameKey, + regionKey, + enableEndpointDiscoveryKey, + credentialProcessKey, + webIdentityTokenFileKey, + s3UseARNRegionKey, + s3DisableMultiRegionAccessPointsKey, + ec2MetadataServiceEndpointModeKey, + ec2MetadataServiceEndpointKey, + ec2MetadataV1DisabledKey, + useDualStackEndpoint, + useFIPSEndpointKey, + defaultsModeKey, + retryModeKey, + caBundleKey, + roleDurationSecondsKey, + retryMaxAttemptsKey, + + ssoSessionNameKey, + ssoAccountIDKey, + ssoRegionKey, + ssoRoleNameKey, + ssoStartURLKey, + } + for i := range stringKeys { + if err := mergeStringKey(&srcSection, &dstSection, sectionName, stringKeys[i]); err != nil { + return err + } + } + + // set srcSection on dst srcSection + *dst = dst.SetSection(sectionName, dstSection) + } + + return nil +} + +func mergeStringKey(srcSection *ini.Section, dstSection *ini.Section, sectionName, key string) error { + if srcSection.Has(key) { + srcValue := srcSection.String(key) + val, err := ini.NewStringValue(srcValue) + if err != nil { + return fmt.Errorf("error merging %s, %w", key, err) + } + + if dstSection.Has(key) { + dstSection.Logs = append(dstSection.Logs, newMergeKeyLogMessage(sectionName, key, + dstSection.SourceFile[key], srcSection.SourceFile[key])) + } + + dstSection.UpdateValue(key, val) + dstSection.UpdateSourceFile(key, srcSection.SourceFile[key]) + } + return nil +} + +func newMergeKeyLogMessage(sectionName, key, dstSourceFile, srcSourceFile string) string { + return fmt.Sprintf("For profile: %v, overriding %v value, defined in %v "+ + "with a %v value found in a duplicate profile defined at file %v. \n", + sectionName, key, dstSourceFile, key, srcSourceFile) +} + +// Returns an error if all of the files fail to load. If at least one file is +// successfully loaded and contains the profile, no error will be returned. +func (c *SharedConfig) setFromIniSections(profiles map[string]struct{}, profile string, + sections ini.Sections, logger logging.Logger) error { + c.Profile = profile + + section, ok := sections.GetSection(profile) + if !ok { + return SharedConfigProfileNotExistError{ + Profile: profile, + } + } + + // if logs are appended to the section, log them + if section.Logs != nil && logger != nil { + for _, log := range section.Logs { + logger.Logf(logging.Debug, log) + } + } + + // set config from the provided INI section + err := c.setFromIniSection(profile, section) + if err != nil { + return fmt.Errorf("error fetching config from profile, %v, %w", profile, err) + } + + if _, ok := profiles[profile]; ok { + // if this is the second instance of the profile the Assume Role + // options must be cleared because they are only valid for the + // first reference of a profile. The self linked instance of the + // profile only have credential provider options. + c.clearAssumeRoleOptions() + } else { + // First time a profile has been seen. Assert if the credential type + // requires a role ARN, the ARN is also set + if err := c.validateCredentialsConfig(profile); err != nil { + return err + } + } + + // if not top level profile and has credentials, return with credentials. + if len(profiles) != 0 && c.Credentials.HasKeys() { + return nil + } + + profiles[profile] = struct{}{} + + // validate no colliding credentials type are present + if err := c.validateCredentialType(); err != nil { + return err + } + + // Link source profiles for assume roles + if len(c.SourceProfileName) != 0 { + // Linked profile via source_profile ignore credential provider + // options, the source profile must provide the credentials. + c.clearCredentialOptions() + + srcCfg := &SharedConfig{} + err := srcCfg.setFromIniSections(profiles, c.SourceProfileName, sections, logger) + if err != nil { + // SourceProfileName that doesn't exist is an error in configuration. + if _, ok := err.(SharedConfigProfileNotExistError); ok { + err = SharedConfigAssumeRoleError{ + RoleARN: c.RoleARN, + Profile: c.SourceProfileName, + Err: err, + } + } + return err + } + + if !srcCfg.hasCredentials() { + return SharedConfigAssumeRoleError{ + RoleARN: c.RoleARN, + Profile: c.SourceProfileName, + } + } + + c.Source = srcCfg + } + + // If the profile contains an SSO session parameter, the session MUST exist + // as a section in the config file. Load the SSO session using the name + // provided. If the session section is not found or incomplete an error + // will be returned. + if c.hasSSOTokenProviderConfiguration() { + section, ok := sections.GetSection(ssoSectionPrefix + strings.TrimSpace(c.SSOSessionName)) + if !ok { + return fmt.Errorf("failed to find SSO session section, %v", c.SSOSessionName) + } + var ssoSession SSOSession + ssoSession.setFromIniSection(section) + ssoSession.Name = c.SSOSessionName + c.SSOSession = &ssoSession + } + + for _, sectionName := range sections.List() { + if strings.HasPrefix(sectionName, servicesPrefix) { + section, ok := sections.GetSection(sectionName) + if ok { + var svcs Services + svcs.setFromIniSection(section) + c.Services = svcs + } + } + } + + return nil +} + +// setFromIniSection loads the configuration from the profile section defined in +// the provided INI file. A SharedConfig pointer type value is used so that +// multiple config file loadings can be chained. +// +// Only loads complete logically grouped values, and will not set fields in cfg +// for incomplete grouped values in the config. Such as credentials. For example +// if a config file only includes aws_access_key_id but no aws_secret_access_key +// the aws_access_key_id will be ignored. +func (c *SharedConfig) setFromIniSection(profile string, section ini.Section) error { + if len(section.Name) == 0 { + sources := make([]string, 0) + for _, v := range section.SourceFile { + sources = append(sources, v) + } + + return fmt.Errorf("parsing error : could not find profile section name after processing files: %v", sources) + } + + if len(section.Errors) != 0 { + var errStatement string + for i, e := range section.Errors { + errStatement = fmt.Sprintf("%d, %v\n", i+1, e.Error()) + } + return fmt.Errorf("Error using profile: \n %v", errStatement) + } + + // Assume Role + updateString(&c.RoleARN, section, roleArnKey) + updateString(&c.ExternalID, section, externalIDKey) + updateString(&c.MFASerial, section, mfaSerialKey) + updateString(&c.RoleSessionName, section, roleSessionNameKey) + updateString(&c.SourceProfileName, section, sourceProfileKey) + updateString(&c.CredentialSource, section, credentialSourceKey) + updateString(&c.Region, section, regionKey) + + // AWS Single Sign-On (AWS SSO) + // SSO session options + updateString(&c.SSOSessionName, section, ssoSessionNameKey) + + // Legacy SSO session options + updateString(&c.SSORegion, section, ssoRegionKey) + updateString(&c.SSOStartURL, section, ssoStartURLKey) + + // SSO fields not used + updateString(&c.SSOAccountID, section, ssoAccountIDKey) + updateString(&c.SSORoleName, section, ssoRoleNameKey) + + // we're retaining a behavioral quirk with this field that existed before + // the removal of literal parsing for #2276: + // - if the key is missing, the config field will not be set + // - if the key is set to a non-numeric, the config field will be set to 0 + if section.Has(roleDurationSecondsKey) { + if v, ok := section.Int(roleDurationSecondsKey); ok { + c.RoleDurationSeconds = aws.Duration(time.Duration(v) * time.Second) + } else { + c.RoleDurationSeconds = aws.Duration(time.Duration(0)) + } + } + + updateString(&c.CredentialProcess, section, credentialProcessKey) + updateString(&c.WebIdentityTokenFile, section, webIdentityTokenFileKey) + + updateEndpointDiscoveryType(&c.EnableEndpointDiscovery, section, enableEndpointDiscoveryKey) + updateBoolPtr(&c.S3UseARNRegion, section, s3UseARNRegionKey) + updateBoolPtr(&c.S3DisableMultiRegionAccessPoints, section, s3DisableMultiRegionAccessPointsKey) + + if err := updateEC2MetadataServiceEndpointMode(&c.EC2IMDSEndpointMode, section, ec2MetadataServiceEndpointModeKey); err != nil { + return fmt.Errorf("failed to load %s from shared config, %v", ec2MetadataServiceEndpointModeKey, err) + } + updateString(&c.EC2IMDSEndpoint, section, ec2MetadataServiceEndpointKey) + updateBoolPtr(&c.EC2IMDSv1Disabled, section, ec2MetadataV1DisabledKey) + + updateUseDualStackEndpoint(&c.UseDualStackEndpoint, section, useDualStackEndpoint) + updateUseFIPSEndpoint(&c.UseFIPSEndpoint, section, useFIPSEndpointKey) + + if err := updateDefaultsMode(&c.DefaultsMode, section, defaultsModeKey); err != nil { + return fmt.Errorf("failed to load %s from shared config, %w", defaultsModeKey, err) + } + + if err := updateInt(&c.RetryMaxAttempts, section, retryMaxAttemptsKey); err != nil { + return fmt.Errorf("failed to load %s from shared config, %w", retryMaxAttemptsKey, err) + } + if err := updateRetryMode(&c.RetryMode, section, retryModeKey); err != nil { + return fmt.Errorf("failed to load %s from shared config, %w", retryModeKey, err) + } + + updateString(&c.CustomCABundle, section, caBundleKey) + + // user agent app ID added to request User-Agent header + updateString(&c.AppID, section, sdkAppID) + + updateBoolPtr(&c.IgnoreConfiguredEndpoints, section, ignoreConfiguredEndpoints) + + updateString(&c.BaseEndpoint, section, endpointURL) + + // Shared Credentials + creds := aws.Credentials{ + AccessKeyID: section.String(accessKeyIDKey), + SecretAccessKey: section.String(secretAccessKey), + SessionToken: section.String(sessionTokenKey), + Source: fmt.Sprintf("SharedConfigCredentials: %s", section.SourceFile[accessKeyIDKey]), + } + + if creds.HasKeys() { + c.Credentials = creds + } + + return nil +} + +func updateDefaultsMode(mode *aws.DefaultsMode, section ini.Section, key string) error { + if !section.Has(key) { + return nil + } + value := section.String(key) + if ok := mode.SetFromString(value); !ok { + return fmt.Errorf("invalid value: %s", value) + } + return nil +} + +func updateRetryMode(mode *aws.RetryMode, section ini.Section, key string) (err error) { + if !section.Has(key) { + return nil + } + value := section.String(key) + if *mode, err = aws.ParseRetryMode(value); err != nil { + return err + } + return nil +} + +func updateEC2MetadataServiceEndpointMode(endpointMode *imds.EndpointModeState, section ini.Section, key string) error { + if !section.Has(key) { + return nil + } + value := section.String(key) + return endpointMode.SetFromString(value) +} + +func (c *SharedConfig) validateCredentialsConfig(profile string) error { + if err := c.validateCredentialsRequireARN(profile); err != nil { + return err + } + + return nil +} + +func (c *SharedConfig) validateCredentialsRequireARN(profile string) error { + var credSource string + + switch { + case len(c.SourceProfileName) != 0: + credSource = sourceProfileKey + case len(c.CredentialSource) != 0: + credSource = credentialSourceKey + case len(c.WebIdentityTokenFile) != 0: + credSource = webIdentityTokenFileKey + } + + if len(credSource) != 0 && len(c.RoleARN) == 0 { + return CredentialRequiresARNError{ + Type: credSource, + Profile: profile, + } + } + + return nil +} + +func (c *SharedConfig) validateCredentialType() error { + // Only one or no credential type can be defined. + if !oneOrNone( + len(c.SourceProfileName) != 0, + len(c.CredentialSource) != 0, + len(c.CredentialProcess) != 0, + len(c.WebIdentityTokenFile) != 0, + ) { + return fmt.Errorf("only one credential type may be specified per profile: source profile, credential source, credential process, web identity token") + } + + return nil +} + +func (c *SharedConfig) validateSSOConfiguration() error { + if c.hasSSOTokenProviderConfiguration() { + err := c.validateSSOTokenProviderConfiguration() + if err != nil { + return err + } + return nil + } + + if c.hasLegacySSOConfiguration() { + err := c.validateLegacySSOConfiguration() + if err != nil { + return err + } + } + return nil +} + +func (c *SharedConfig) validateSSOTokenProviderConfiguration() error { + var missing []string + + if len(c.SSOSessionName) == 0 { + missing = append(missing, ssoSessionNameKey) + } + + if c.SSOSession == nil { + missing = append(missing, ssoSectionPrefix) + } else { + if len(c.SSOSession.SSORegion) == 0 { + missing = append(missing, ssoRegionKey) + } + + if len(c.SSOSession.SSOStartURL) == 0 { + missing = append(missing, ssoStartURLKey) + } + } + + if len(missing) > 0 { + return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s", + c.Profile, strings.Join(missing, ", ")) + } + + if len(c.SSORegion) > 0 && c.SSORegion != c.SSOSession.SSORegion { + return fmt.Errorf("%s in profile %q must match %s in %s", ssoRegionKey, c.Profile, ssoRegionKey, ssoSectionPrefix) + } + + if len(c.SSOStartURL) > 0 && c.SSOStartURL != c.SSOSession.SSOStartURL { + return fmt.Errorf("%s in profile %q must match %s in %s", ssoStartURLKey, c.Profile, ssoStartURLKey, ssoSectionPrefix) + } + + return nil +} + +func (c *SharedConfig) validateLegacySSOConfiguration() error { + var missing []string + + if len(c.SSORegion) == 0 { + missing = append(missing, ssoRegionKey) + } + + if len(c.SSOStartURL) == 0 { + missing = append(missing, ssoStartURLKey) + } + + if len(c.SSOAccountID) == 0 { + missing = append(missing, ssoAccountIDKey) + } + + if len(c.SSORoleName) == 0 { + missing = append(missing, ssoRoleNameKey) + } + + if len(missing) > 0 { + return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s", + c.Profile, strings.Join(missing, ", ")) + } + return nil +} + +func (c *SharedConfig) hasCredentials() bool { + switch { + case len(c.SourceProfileName) != 0: + case len(c.CredentialSource) != 0: + case len(c.CredentialProcess) != 0: + case len(c.WebIdentityTokenFile) != 0: + case c.hasSSOConfiguration(): + case c.Credentials.HasKeys(): + default: + return false + } + + return true +} + +func (c *SharedConfig) hasSSOConfiguration() bool { + return c.hasSSOTokenProviderConfiguration() || c.hasLegacySSOConfiguration() +} + +func (c *SharedConfig) hasSSOTokenProviderConfiguration() bool { + return len(c.SSOSessionName) > 0 +} + +func (c *SharedConfig) hasLegacySSOConfiguration() bool { + return len(c.SSORegion) > 0 || len(c.SSOAccountID) > 0 || len(c.SSOStartURL) > 0 || len(c.SSORoleName) > 0 +} + +func (c *SharedConfig) clearAssumeRoleOptions() { + c.RoleARN = "" + c.ExternalID = "" + c.MFASerial = "" + c.RoleSessionName = "" + c.SourceProfileName = "" +} + +func (c *SharedConfig) clearCredentialOptions() { + c.CredentialSource = "" + c.CredentialProcess = "" + c.WebIdentityTokenFile = "" + c.Credentials = aws.Credentials{} + c.SSOAccountID = "" + c.SSORegion = "" + c.SSORoleName = "" + c.SSOStartURL = "" +} + +// SharedConfigLoadError is an error for the shared config file failed to load. +type SharedConfigLoadError struct { + Filename string + Err error +} + +// Unwrap returns the underlying error that caused the failure. +func (e SharedConfigLoadError) Unwrap() error { + return e.Err +} + +func (e SharedConfigLoadError) Error() string { + return fmt.Sprintf("failed to load shared config file, %s, %v", e.Filename, e.Err) +} + +// SharedConfigProfileNotExistError is an error for the shared config when +// the profile was not find in the config file. +type SharedConfigProfileNotExistError struct { + Filename []string + Profile string + Err error +} + +// Unwrap returns the underlying error that caused the failure. +func (e SharedConfigProfileNotExistError) Unwrap() error { + return e.Err +} + +func (e SharedConfigProfileNotExistError) Error() string { + return fmt.Sprintf("failed to get shared config profile, %s", e.Profile) +} + +// SharedConfigAssumeRoleError is an error for the shared config when the +// profile contains assume role information, but that information is invalid +// or not complete. +type SharedConfigAssumeRoleError struct { + Profile string + RoleARN string + Err error +} + +// Unwrap returns the underlying error that caused the failure. +func (e SharedConfigAssumeRoleError) Unwrap() error { + return e.Err +} + +func (e SharedConfigAssumeRoleError) Error() string { + return fmt.Sprintf("failed to load assume role %s, of profile %s, %v", + e.RoleARN, e.Profile, e.Err) +} + +// CredentialRequiresARNError provides the error for shared config credentials +// that are incorrectly configured in the shared config or credentials file. +type CredentialRequiresARNError struct { + // type of credentials that were configured. + Type string + + // Profile name the credentials were in. + Profile string +} + +// Error satisfies the error interface. +func (e CredentialRequiresARNError) Error() string { + return fmt.Sprintf( + "credential type %s requires role_arn, profile %s", + e.Type, e.Profile, + ) +} + +func oneOrNone(bs ...bool) bool { + var count int + + for _, b := range bs { + if b { + count++ + if count > 1 { + return false + } + } + } + + return true +} + +// updateString will only update the dst with the value in the section key, key +// is present in the section. +func updateString(dst *string, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = section.String(key) +} + +// updateInt will only update the dst with the value in the section key, key +// is present in the section. +// +// Down casts the INI integer value from a int64 to an int, which could be +// different bit size depending on platform. +func updateInt(dst *int, section ini.Section, key string) error { + if !section.Has(key) { + return nil + } + + v, ok := section.Int(key) + if !ok { + return fmt.Errorf("invalid value %s=%s, expect integer", key, section.String(key)) + } + + *dst = int(v) + return nil +} + +// updateBool will only update the dst with the value in the section key, key +// is present in the section. +func updateBool(dst *bool, section ini.Section, key string) { + if !section.Has(key) { + return + } + + // retains pre-#2276 behavior where non-bool value would resolve to false + v, _ := section.Bool(key) + *dst = v +} + +// updateBoolPtr will only update the dst with the value in the section key, +// key is present in the section. +func updateBoolPtr(dst **bool, section ini.Section, key string) { + if !section.Has(key) { + return + } + + // retains pre-#2276 behavior where non-bool value would resolve to false + v, _ := section.Bool(key) + *dst = new(bool) + **dst = v +} + +// updateEndpointDiscoveryType will only update the dst with the value in the section, if +// a valid key and corresponding EndpointDiscoveryType is found. +func updateEndpointDiscoveryType(dst *aws.EndpointDiscoveryEnableState, section ini.Section, key string) { + if !section.Has(key) { + return + } + + value := section.String(key) + if len(value) == 0 { + return + } + + switch { + case strings.EqualFold(value, endpointDiscoveryDisabled): + *dst = aws.EndpointDiscoveryDisabled + case strings.EqualFold(value, endpointDiscoveryEnabled): + *dst = aws.EndpointDiscoveryEnabled + case strings.EqualFold(value, endpointDiscoveryAuto): + *dst = aws.EndpointDiscoveryAuto + } +} + +// updateEndpointDiscoveryType will only update the dst with the value in the section, if +// a valid key and corresponding EndpointDiscoveryType is found. +func updateUseDualStackEndpoint(dst *aws.DualStackEndpointState, section ini.Section, key string) { + if !section.Has(key) { + return + } + + // retains pre-#2276 behavior where non-bool value would resolve to false + if v, _ := section.Bool(key); v { + *dst = aws.DualStackEndpointStateEnabled + } else { + *dst = aws.DualStackEndpointStateDisabled + } + + return +} + +// updateEndpointDiscoveryType will only update the dst with the value in the section, if +// a valid key and corresponding EndpointDiscoveryType is found. +func updateUseFIPSEndpoint(dst *aws.FIPSEndpointState, section ini.Section, key string) { + if !section.Has(key) { + return + } + + // retains pre-#2276 behavior where non-bool value would resolve to false + if v, _ := section.Bool(key); v { + *dst = aws.FIPSEndpointStateEnabled + } else { + *dst = aws.FIPSEndpointStateDisabled + } + + return +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md new file mode 100644 index 00000000000..65c8de85381 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md @@ -0,0 +1,409 @@ +# v1.15.2 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.1 (2023-11-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.0 (2023-11-01) + +* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.43 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.42 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.41 (2023-10-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.40 (2023-09-22) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.39 (2023-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.38 (2023-09-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.37 (2023-09-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.36 (2023-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.35 (2023-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.34 (2023-08-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.33 (2023-08-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.32 (2023-08-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.31 (2023-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.30 (2023-07-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.29 (2023-07-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.28 (2023-07-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.27 (2023-07-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.26 (2023-06-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.25 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.24 (2023-05-09) + +* No change notes available for this release. + +# v1.13.23 (2023-05-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.22 (2023-05-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.21 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.20 (2023-04-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.19 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.18 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.17 (2023-03-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.16 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.15 (2023-02-22) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.14 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.13 (2023-02-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.12 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.11 (2023-02-01) + +* No change notes available for this release. + +# v1.13.10 (2023-01-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.9 (2023-01-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.8 (2023-01-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.7 (2022-12-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.6 (2022-12-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.5 (2022-12-15) + +* **Bug Fix**: Unify logic between shared config and in finding home directory +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.4 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.3 (2022-11-22) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.2 (2022-11-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.1 (2022-11-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.0 (2022-11-11) + +* **Announcement**: When using the SSOTokenProvider, a previous implementation incorrectly compensated for invalid SSOTokenProvider configurations in the shared profile. This has been fixed via PR #1903 and tracked in issue #1846 +* **Feature**: Adds token refresh support (via SSOTokenProvider) when using the SSOCredentialProvider + +# v1.12.24 (2022-11-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.23 (2022-10-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.22 (2022-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.21 (2022-09-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.20 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.19 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.18 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.17 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.16 (2022-08-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.15 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.14 (2022-08-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.13 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.12 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.11 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.10 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.9 (2022-07-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.8 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.7 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.6 (2022-06-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.5 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.4 (2022-05-26) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.3 (2022-05-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.2 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.1 (2022-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.0 (2022-04-25) + +* **Feature**: Adds Duration and Policy options that can be used when creating stscreds.WebIdentityRoleProvider credentials provider. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.2 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.1 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.0 (2022-03-23) + +* **Feature**: Update `ec2rolecreds` package's `Provider` to implememnt support for CredentialsCache new optional caching strategy interfaces, HandleFailRefreshCredentialsCacheStrategy and AdjustExpiresByCredentialsCacheStrategy. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.0 (2022-02-24) + +* **Feature**: Adds support for `SourceIdentity` to `stscreds.AssumeRoleProvider` [#1588](https://github.com/aws/aws-sdk-go-v2/pull/1588). Fixes [#1575](https://github.com/aws/aws-sdk-go-v2/issues/1575) +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.5 (2021-12-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.4 (2021-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.3 (2021-11-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.2 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.1 (2021-11-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.0 (2021-11-06) + +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.0 (2021-10-21) + +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.3 (2021-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.2 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.1 (2021-09-10) + +* **Documentation**: Fixes the AssumeRoleProvider's documentation for using custom TokenProviders. + +# v1.4.0 (2021-08-27) + +* **Feature**: Adds support for Tags and TransitiveTagKeys to stscreds.AssumeRoleProvider. Closes https://github.com/aws/aws-sdk-go-v2/issues/723 +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.3 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.2 (2021-08-04) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.1 (2021-07-15) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2021-06-25) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Bug Fix**: Fixed example usages of aws.CredentialsCache ([#1275](https://github.com/aws/aws-sdk-go-v2/pull/1275)) +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.1 (2021-05-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/envprovider/LICENSE b/vendor/github.com/aws/aws-sdk-go-v2/credentials/LICENSE.txt similarity index 100% rename from vendor/go.opentelemetry.io/collector/confmap/provider/envprovider/LICENSE rename to vendor/github.com/aws/aws-sdk-go-v2/credentials/LICENSE.txt diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/doc.go new file mode 100644 index 00000000000..f6e2873ab90 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/doc.go @@ -0,0 +1,4 @@ +/* +Package credentials provides types for retrieving credentials from credentials sources. +*/ +package credentials diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go new file mode 100644 index 00000000000..6ed71b42b28 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go @@ -0,0 +1,58 @@ +// Package ec2rolecreds provides the credentials provider implementation for +// retrieving AWS credentials from Amazon EC2 Instance Roles via Amazon EC2 IMDS. +// +// # Concurrency and caching +// +// The Provider is not safe to be used concurrently, and does not provide any +// caching of credentials retrieved. You should wrap the Provider with a +// `aws.CredentialsCache` to provide concurrency safety, and caching of +// credentials. +// +// # Loading credentials with the SDK's AWS Config +// +// The EC2 Instance role credentials provider will automatically be the resolved +// credential provider in the credential chain if no other credential provider is +// resolved first. +// +// To explicitly instruct the SDK's credentials resolving to use the EC2 Instance +// role for credentials, you specify a `credentials_source` property in the config +// profile the SDK will load. +// +// [default] +// credential_source = Ec2InstanceMetadata +// +// # Loading credentials with the Provider directly +// +// Another way to use the EC2 Instance role credentials provider is to create it +// directly and assign it as the credentials provider for an API client. +// +// The following example creates a credentials provider for a command, and wraps +// it with the CredentialsCache before assigning the provider to the Amazon S3 API +// client's Credentials option. +// +// provider := imds.New(imds.Options{}) +// +// // Create the service client value configured for credentials. +// svc := s3.New(s3.Options{ +// Credentials: aws.NewCredentialsCache(provider), +// }) +// +// If you need more control, you can set the configuration options on the +// credentials provider using the imds.Options type to configure the EC2 IMDS +// API Client and ExpiryWindow of the retrieved credentials. +// +// provider := imds.New(imds.Options{ +// // See imds.Options type's documentation for more options available. +// Client: imds.New(Options{ +// HTTPClient: customHTTPClient, +// }), +// +// // Modify how soon credentials expire prior to their original expiry time. +// ExpiryWindow: 5 * time.Minute, +// }) +// +// # EC2 IMDS API Client +// +// See the github.com/aws/aws-sdk-go-v2/feature/ec2/imds module for more details on +// configuring the client, and options available. +package ec2rolecreds diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go new file mode 100644 index 00000000000..5c699f16650 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go @@ -0,0 +1,229 @@ +package ec2rolecreds + +import ( + "bufio" + "context" + "encoding/json" + "fmt" + "math" + "path" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" + sdkrand "github.com/aws/aws-sdk-go-v2/internal/rand" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" +) + +// ProviderName provides a name of EC2Role provider +const ProviderName = "EC2RoleProvider" + +// GetMetadataAPIClient provides the interface for an EC2 IMDS API client for the +// GetMetadata operation. +type GetMetadataAPIClient interface { + GetMetadata(context.Context, *imds.GetMetadataInput, ...func(*imds.Options)) (*imds.GetMetadataOutput, error) +} + +// A Provider retrieves credentials from the EC2 service, and keeps track if +// those credentials are expired. +// +// The New function must be used to create the with a custom EC2 IMDS client. +// +// p := &ec2rolecreds.New(func(o *ec2rolecreds.Options{ +// o.Client = imds.New(imds.Options{/* custom options */}) +// }) +type Provider struct { + options Options +} + +// Options is a list of user settable options for setting the behavior of the Provider. +type Options struct { + // The API client that will be used by the provider to make GetMetadata API + // calls to EC2 IMDS. + // + // If nil, the provider will default to the EC2 IMDS client. + Client GetMetadataAPIClient +} + +// New returns an initialized Provider value configured to retrieve +// credentials from EC2 Instance Metadata service. +func New(optFns ...func(*Options)) *Provider { + options := Options{} + + for _, fn := range optFns { + fn(&options) + } + + if options.Client == nil { + options.Client = imds.New(imds.Options{}) + } + + return &Provider{ + options: options, + } +} + +// Retrieve retrieves credentials from the EC2 service. Error will be returned +// if the request fails, or unable to extract the desired credentials. +func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { + credsList, err := requestCredList(ctx, p.options.Client) + if err != nil { + return aws.Credentials{Source: ProviderName}, err + } + + if len(credsList) == 0 { + return aws.Credentials{Source: ProviderName}, + fmt.Errorf("unexpected empty EC2 IMDS role list") + } + credsName := credsList[0] + + roleCreds, err := requestCred(ctx, p.options.Client, credsName) + if err != nil { + return aws.Credentials{Source: ProviderName}, err + } + + creds := aws.Credentials{ + AccessKeyID: roleCreds.AccessKeyID, + SecretAccessKey: roleCreds.SecretAccessKey, + SessionToken: roleCreds.Token, + Source: ProviderName, + + CanExpire: true, + Expires: roleCreds.Expiration, + } + + // Cap role credentials Expires to 1 hour so they can be refreshed more + // often. Jitter will be applied credentials cache if being used. + if anHour := sdk.NowTime().Add(1 * time.Hour); creds.Expires.After(anHour) { + creds.Expires = anHour + } + + return creds, nil +} + +// HandleFailToRefresh will extend the credentials Expires time if it it is +// expired. If the credentials will not expire within the minimum time, they +// will be returned. +// +// If the credentials cannot expire, the original error will be returned. +func (p *Provider) HandleFailToRefresh(ctx context.Context, prevCreds aws.Credentials, err error) ( + aws.Credentials, error, +) { + if !prevCreds.CanExpire { + return aws.Credentials{}, err + } + + if prevCreds.Expires.After(sdk.NowTime().Add(5 * time.Minute)) { + return prevCreds, nil + } + + newCreds := prevCreds + randFloat64, err := sdkrand.CryptoRandFloat64() + if err != nil { + return aws.Credentials{}, fmt.Errorf("failed to get random float, %w", err) + } + + // Random distribution of [5,15) minutes. + expireOffset := time.Duration(randFloat64*float64(10*time.Minute)) + 5*time.Minute + newCreds.Expires = sdk.NowTime().Add(expireOffset) + + logger := middleware.GetLogger(ctx) + logger.Logf(logging.Warn, "Attempting credential expiration extension due to a credential service availability issue. A refresh of these credentials will be attempted again in %v minutes.", math.Floor(expireOffset.Minutes())) + + return newCreds, nil +} + +// AdjustExpiresBy will adds the passed in duration to the passed in +// credential's Expires time, unless the time until Expires is less than 15 +// minutes. Returns the credentials, even if not updated. +func (p *Provider) AdjustExpiresBy(creds aws.Credentials, dur time.Duration) ( + aws.Credentials, error, +) { + if !creds.CanExpire { + return creds, nil + } + if creds.Expires.Before(sdk.NowTime().Add(15 * time.Minute)) { + return creds, nil + } + + creds.Expires = creds.Expires.Add(dur) + return creds, nil +} + +// ec2RoleCredRespBody provides the shape for unmarshaling credential +// request responses. +type ec2RoleCredRespBody struct { + // Success State + Expiration time.Time + AccessKeyID string + SecretAccessKey string + Token string + + // Error state + Code string + Message string +} + +const iamSecurityCredsPath = "/iam/security-credentials/" + +// requestCredList requests a list of credentials from the EC2 service. If +// there are no credentials, or there is an error making or receiving the +// request +func requestCredList(ctx context.Context, client GetMetadataAPIClient) ([]string, error) { + resp, err := client.GetMetadata(ctx, &imds.GetMetadataInput{ + Path: iamSecurityCredsPath, + }) + if err != nil { + return nil, fmt.Errorf("no EC2 IMDS role found, %w", err) + } + defer resp.Content.Close() + + credsList := []string{} + s := bufio.NewScanner(resp.Content) + for s.Scan() { + credsList = append(credsList, s.Text()) + } + + if err := s.Err(); err != nil { + return nil, fmt.Errorf("failed to read EC2 IMDS role, %w", err) + } + + return credsList, nil +} + +// requestCred requests the credentials for a specific credentials from the EC2 service. +// +// If the credentials cannot be found, or there is an error reading the response +// and error will be returned. +func requestCred(ctx context.Context, client GetMetadataAPIClient, credsName string) (ec2RoleCredRespBody, error) { + resp, err := client.GetMetadata(ctx, &imds.GetMetadataInput{ + Path: path.Join(iamSecurityCredsPath, credsName), + }) + if err != nil { + return ec2RoleCredRespBody{}, + fmt.Errorf("failed to get %s EC2 IMDS role credentials, %w", + credsName, err) + } + defer resp.Content.Close() + + var respCreds ec2RoleCredRespBody + if err := json.NewDecoder(resp.Content).Decode(&respCreds); err != nil { + return ec2RoleCredRespBody{}, + fmt.Errorf("failed to decode %s EC2 IMDS role credentials, %w", + credsName, err) + } + + if !strings.EqualFold(respCreds.Code, "Success") { + // If an error code was returned something failed requesting the role. + return ec2RoleCredRespBody{}, + fmt.Errorf("failed to get %s EC2 IMDS role credentials, %w", + credsName, + &smithy.GenericAPIError{Code: respCreds.Code, Message: respCreds.Message}) + } + + return respCreds, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go new file mode 100644 index 00000000000..60b8298f86f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go @@ -0,0 +1,148 @@ +package client + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/retry" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + "github.com/aws/smithy-go" + smithymiddleware "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// ServiceID is the client identifer +const ServiceID = "endpoint-credentials" + +// HTTPClient is a client for sending HTTP requests +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// Options is the endpoint client configurable options +type Options struct { + // The endpoint to retrieve credentials from + Endpoint string + + // The HTTP client to invoke API calls with. Defaults to client's default HTTP + // implementation if nil. + HTTPClient HTTPClient + + // Retryer guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. + Retryer aws.Retryer + + // Set of options to modify how the credentials operation is invoked. + APIOptions []func(*smithymiddleware.Stack) error +} + +// Copy creates a copy of the API options. +func (o Options) Copy() Options { + to := o + to.APIOptions = make([]func(*smithymiddleware.Stack) error, len(o.APIOptions)) + copy(to.APIOptions, o.APIOptions) + return to +} + +// Client is an client for retrieving AWS credentials from an endpoint +type Client struct { + options Options +} + +// New constructs a new Client from the given options +func New(options Options, optFns ...func(*Options)) *Client { + options = options.Copy() + + if options.HTTPClient == nil { + options.HTTPClient = awshttp.NewBuildableClient() + } + + if options.Retryer == nil { + options.Retryer = retry.NewStandard() + } + + for _, fn := range optFns { + fn(&options) + } + + client := &Client{ + options: options, + } + + return client +} + +// GetCredentialsInput is the input to send with the endpoint service to receive credentials. +type GetCredentialsInput struct { + AuthorizationToken string +} + +// GetCredentials retrieves credentials from credential endpoint +func (c *Client) GetCredentials(ctx context.Context, params *GetCredentialsInput, optFns ...func(*Options)) (*GetCredentialsOutput, error) { + stack := smithymiddleware.NewStack("GetCredentials", smithyhttp.NewStackRequest) + options := c.options.Copy() + for _, fn := range optFns { + fn(&options) + } + + stack.Serialize.Add(&serializeOpGetCredential{}, smithymiddleware.After) + stack.Build.Add(&buildEndpoint{Endpoint: options.Endpoint}, smithymiddleware.After) + stack.Deserialize.Add(&deserializeOpGetCredential{}, smithymiddleware.After) + retry.AddRetryMiddlewares(stack, retry.AddRetryMiddlewaresOptions{Retryer: options.Retryer}) + middleware.AddSDKAgentKey(middleware.FeatureMetadata, ServiceID) + smithyhttp.AddErrorCloseResponseBodyMiddleware(stack) + smithyhttp.AddCloseResponseBodyMiddleware(stack) + + for _, fn := range options.APIOptions { + if err := fn(stack); err != nil { + return nil, err + } + } + + handler := smithymiddleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) + result, _, err := handler.Handle(ctx, params) + if err != nil { + return nil, err + } + + return result.(*GetCredentialsOutput), err +} + +// GetCredentialsOutput is the response from the credential endpoint +type GetCredentialsOutput struct { + Expiration *time.Time + AccessKeyID string + SecretAccessKey string + Token string +} + +// EndpointError is an error returned from the endpoint service +type EndpointError struct { + Code string `json:"code"` + Message string `json:"message"` + Fault smithy.ErrorFault `json:"-"` +} + +// Error is the error mesage string +func (e *EndpointError) Error() string { + return fmt.Sprintf("%s: %s", e.Code, e.Message) +} + +// ErrorCode is the error code returned by the endpoint +func (e *EndpointError) ErrorCode() string { + return e.Code +} + +// ErrorMessage is the error message returned by the endpoint +func (e *EndpointError) ErrorMessage() string { + return e.Message +} + +// ErrorFault indicates error fault classification +func (e *EndpointError) ErrorFault() smithy.ErrorFault { + return e.Fault +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/middleware.go new file mode 100644 index 00000000000..40747a53c18 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/middleware.go @@ -0,0 +1,120 @@ +package client + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + + "github.com/aws/smithy-go" + smithymiddleware "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +type buildEndpoint struct { + Endpoint string +} + +func (b *buildEndpoint) ID() string { + return "BuildEndpoint" +} + +func (b *buildEndpoint) HandleBuild(ctx context.Context, in smithymiddleware.BuildInput, next smithymiddleware.BuildHandler) ( + out smithymiddleware.BuildOutput, metadata smithymiddleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport, %T", in.Request) + } + + if len(b.Endpoint) == 0 { + return out, metadata, fmt.Errorf("endpoint not provided") + } + + parsed, err := url.Parse(b.Endpoint) + if err != nil { + return out, metadata, fmt.Errorf("failed to parse endpoint, %w", err) + } + + request.URL = parsed + + return next.HandleBuild(ctx, in) +} + +type serializeOpGetCredential struct{} + +func (s *serializeOpGetCredential) ID() string { + return "OperationSerializer" +} + +func (s *serializeOpGetCredential) HandleSerialize(ctx context.Context, in smithymiddleware.SerializeInput, next smithymiddleware.SerializeHandler) ( + out smithymiddleware.SerializeOutput, metadata smithymiddleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type, %T", in.Request) + } + + params, ok := in.Parameters.(*GetCredentialsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters, %T", in.Parameters) + } + + const acceptHeader = "Accept" + request.Header[acceptHeader] = append(request.Header[acceptHeader][:0], "application/json") + + if len(params.AuthorizationToken) > 0 { + const authHeader = "Authorization" + request.Header[authHeader] = append(request.Header[authHeader][:0], params.AuthorizationToken) + } + + return next.HandleSerialize(ctx, in) +} + +type deserializeOpGetCredential struct{} + +func (d *deserializeOpGetCredential) ID() string { + return "OperationDeserializer" +} + +func (d *deserializeOpGetCredential) HandleDeserialize(ctx context.Context, in smithymiddleware.DeserializeInput, next smithymiddleware.DeserializeHandler) ( + out smithymiddleware.DeserializeOutput, metadata smithymiddleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, deserializeError(response) + } + + var shape *GetCredentialsOutput + if err = json.NewDecoder(response.Body).Decode(&shape); err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to deserialize json response, %w", err)} + } + + out.Result = shape + return out, metadata, err +} + +func deserializeError(response *smithyhttp.Response) error { + var errShape *EndpointError + err := json.NewDecoder(response.Body).Decode(&errShape) + if err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to decode error message, %w", err)} + } + + if response.StatusCode >= 500 { + errShape.Fault = smithy.FaultServer + } else { + errShape.Fault = smithy.FaultClient + } + + return errShape +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go new file mode 100644 index 00000000000..adc7fc6b000 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go @@ -0,0 +1,136 @@ +// Package endpointcreds provides support for retrieving credentials from an +// arbitrary HTTP endpoint. +// +// The credentials endpoint Provider can receive both static and refreshable +// credentials that will expire. Credentials are static when an "Expiration" +// value is not provided in the endpoint's response. +// +// Static credentials will never expire once they have been retrieved. The format +// of the static credentials response: +// +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// } +// +// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration +// value in the response. The format of the refreshable credentials response: +// +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// "Token" : "AQoDY....=", +// "Expiration" : "2016-02-25T06:03:31Z" +// } +// +// Errors should be returned in the following format and only returned with 400 +// or 500 HTTP status codes. +// +// { +// "code": "ErrorCode", +// "message": "Helpful error message." +// } +package endpointcreds + +import ( + "context" + "fmt" + "net/http" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client" + "github.com/aws/smithy-go/middleware" +) + +// ProviderName is the name of the credentials provider. +const ProviderName = `CredentialsEndpointProvider` + +type getCredentialsAPIClient interface { + GetCredentials(context.Context, *client.GetCredentialsInput, ...func(*client.Options)) (*client.GetCredentialsOutput, error) +} + +// Provider satisfies the aws.CredentialsProvider interface, and is a client to +// retrieve credentials from an arbitrary endpoint. +type Provider struct { + // The AWS Client to make HTTP requests to the endpoint with. The endpoint + // the request will be made to is provided by the aws.Config's + // EndpointResolver. + client getCredentialsAPIClient + + options Options +} + +// HTTPClient is a client for sending HTTP requests +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// Options is structure of configurable options for Provider +type Options struct { + // Endpoint to retrieve credentials from. Required + Endpoint string + + // HTTPClient to handle sending HTTP requests to the target endpoint. + HTTPClient HTTPClient + + // Set of options to modify how the credentials operation is invoked. + APIOptions []func(*middleware.Stack) error + + // The Retryer to be used for determining whether a failed requested should be retried + Retryer aws.Retryer + + // Optional authorization token value if set will be used as the value of + // the Authorization header of the endpoint credential request. + AuthorizationToken string +} + +// New returns a credentials Provider for retrieving AWS credentials +// from arbitrary endpoint. +func New(endpoint string, optFns ...func(*Options)) *Provider { + o := Options{ + Endpoint: endpoint, + } + + for _, fn := range optFns { + fn(&o) + } + + p := &Provider{ + client: client.New(client.Options{ + HTTPClient: o.HTTPClient, + Endpoint: o.Endpoint, + APIOptions: o.APIOptions, + Retryer: o.Retryer, + }), + options: o, + } + + return p +} + +// Retrieve will attempt to request the credentials from the endpoint the Provider +// was configured for. And error will be returned if the retrieval fails. +func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { + resp, err := p.getCredentials(ctx) + if err != nil { + return aws.Credentials{}, fmt.Errorf("failed to load credentials, %w", err) + } + + creds := aws.Credentials{ + AccessKeyID: resp.AccessKeyID, + SecretAccessKey: resp.SecretAccessKey, + SessionToken: resp.Token, + Source: ProviderName, + } + + if resp.Expiration != nil { + creds.CanExpire = true + creds.Expires = *resp.Expiration + } + + return creds, nil +} + +func (p *Provider) getCredentials(ctx context.Context) (*client.GetCredentialsOutput, error) { + return p.client.GetCredentials(ctx, &client.GetCredentialsInput{AuthorizationToken: p.options.AuthorizationToken}) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go new file mode 100644 index 00000000000..365a8554529 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package credentials + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.15.2" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/doc.go new file mode 100644 index 00000000000..a3137b8fa9b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/doc.go @@ -0,0 +1,92 @@ +// Package processcreds is a credentials provider to retrieve credentials from a +// external CLI invoked process. +// +// WARNING: The following describes a method of sourcing credentials from an external +// process. This can potentially be dangerous, so proceed with caution. Other +// credential providers should be preferred if at all possible. If using this +// option, you should make sure that the config file is as locked down as possible +// using security best practices for your operating system. +// +// # Concurrency and caching +// +// The Provider is not safe to be used concurrently, and does not provide any +// caching of credentials retrieved. You should wrap the Provider with a +// `aws.CredentialsCache` to provide concurrency safety, and caching of +// credentials. +// +// # Loading credentials with the SDKs AWS Config +// +// You can use credentials from a AWS shared config `credential_process` in a +// variety of ways. +// +// One way is to setup your shared config file, located in the default +// location, with the `credential_process` key and the command you want to be +// called. You also need to set the AWS_SDK_LOAD_CONFIG environment variable +// (e.g., `export AWS_SDK_LOAD_CONFIG=1`) to use the shared config file. +// +// [default] +// credential_process = /command/to/call +// +// Loading configuration using external will use the credential process to +// retrieve credentials. NOTE: If there are credentials in the profile you are +// using, the credential process will not be used. +// +// // Initialize a session to load credentials. +// cfg, _ := config.LoadDefaultConfig(context.TODO()) +// +// // Create S3 service client to use the credentials. +// svc := s3.NewFromConfig(cfg) +// +// # Loading credentials with the Provider directly +// +// Another way to use the credentials process provider is by using the +// `NewProvider` constructor to create the provider and providing a it with a +// command to be executed to retrieve credentials. +// +// The following example creates a credentials provider for a command, and wraps +// it with the CredentialsCache before assigning the provider to the Amazon S3 API +// client's Credentials option. +// +// // Create credentials using the Provider. +// provider := processcreds.NewProvider("/path/to/command") +// +// // Create the service client value configured for credentials. +// svc := s3.New(s3.Options{ +// Credentials: aws.NewCredentialsCache(provider), +// }) +// +// If you need more control, you can set any configurable options in the +// credentials using one or more option functions. +// +// provider := processcreds.NewProvider("/path/to/command", +// func(o *processcreds.Options) { +// // Override the provider's default timeout +// o.Timeout = 2 * time.Minute +// }) +// +// You can also use your own `exec.Cmd` value by satisfying a value that satisfies +// the `NewCommandBuilder` interface and use the `NewProviderCommand` constructor. +// +// // Create an exec.Cmd +// cmdBuilder := processcreds.NewCommandBuilderFunc( +// func(ctx context.Context) (*exec.Cmd, error) { +// cmd := exec.CommandContext(ctx, +// "customCLICommand", +// "-a", "argument", +// ) +// cmd.Env = []string{ +// "ENV_VAR_FOO=value", +// "ENV_VAR_BAR=other_value", +// } +// +// return cmd, nil +// }, +// ) +// +// // Create credentials using your exec.Cmd and custom timeout +// provider := processcreds.NewProviderCommand(cmdBuilder, +// func(opt *processcreds.Provider) { +// // optionally override the provider's default timeout +// opt.Timeout = 1 * time.Second +// }) +package processcreds diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go new file mode 100644 index 00000000000..fe9345e287c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go @@ -0,0 +1,281 @@ +package processcreds + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "os" + "os/exec" + "runtime" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/sdkio" +) + +const ( + // ProviderName is the name this credentials provider will label any + // returned credentials Value with. + ProviderName = `ProcessProvider` + + // DefaultTimeout default limit on time a process can run. + DefaultTimeout = time.Duration(1) * time.Minute +) + +// ProviderError is an error indicating failure initializing or executing the +// process credentials provider +type ProviderError struct { + Err error +} + +// Error returns the error message. +func (e *ProviderError) Error() string { + return fmt.Sprintf("process provider error: %v", e.Err) +} + +// Unwrap returns the underlying error the provider error wraps. +func (e *ProviderError) Unwrap() error { + return e.Err +} + +// Provider satisfies the credentials.Provider interface, and is a +// client to retrieve credentials from a process. +type Provider struct { + // Provides a constructor for exec.Cmd that are invoked by the provider for + // retrieving credentials. Use this to provide custom creation of exec.Cmd + // with things like environment variables, or other configuration. + // + // The provider defaults to the DefaultNewCommand function. + commandBuilder NewCommandBuilder + + options Options +} + +// Options is the configuration options for configuring the Provider. +type Options struct { + // Timeout limits the time a process can run. + Timeout time.Duration +} + +// NewCommandBuilder provides the interface for specifying how command will be +// created that the Provider will use to retrieve credentials with. +type NewCommandBuilder interface { + NewCommand(context.Context) (*exec.Cmd, error) +} + +// NewCommandBuilderFunc provides a wrapper type around a function pointer to +// satisfy the NewCommandBuilder interface. +type NewCommandBuilderFunc func(context.Context) (*exec.Cmd, error) + +// NewCommand calls the underlying function pointer the builder was initialized with. +func (fn NewCommandBuilderFunc) NewCommand(ctx context.Context) (*exec.Cmd, error) { + return fn(ctx) +} + +// DefaultNewCommandBuilder provides the default NewCommandBuilder +// implementation used by the provider. It takes a command and arguments to +// invoke. The command will also be initialized with the current process +// environment variables, stderr, and stdin pipes. +type DefaultNewCommandBuilder struct { + Args []string +} + +// NewCommand returns an initialized exec.Cmd with the builder's initialized +// Args. The command is also initialized current process environment variables, +// stderr, and stdin pipes. +func (b DefaultNewCommandBuilder) NewCommand(ctx context.Context) (*exec.Cmd, error) { + var cmdArgs []string + if runtime.GOOS == "windows" { + cmdArgs = []string{"cmd.exe", "/C"} + } else { + cmdArgs = []string{"sh", "-c"} + } + + if len(b.Args) == 0 { + return nil, &ProviderError{ + Err: fmt.Errorf("failed to prepare command: command must not be empty"), + } + } + + cmdArgs = append(cmdArgs, b.Args...) + cmd := exec.CommandContext(ctx, cmdArgs[0], cmdArgs[1:]...) + cmd.Env = os.Environ() + + cmd.Stderr = os.Stderr // display stderr on console for MFA + cmd.Stdin = os.Stdin // enable stdin for MFA + + return cmd, nil +} + +// NewProvider returns a pointer to a new Credentials object wrapping the +// Provider. +// +// The provider defaults to the DefaultNewCommandBuilder for creating command +// the Provider will use to retrieve credentials with. +func NewProvider(command string, options ...func(*Options)) *Provider { + var args []string + + // Ensure that the command arguments are not set if the provided command is + // empty. This will error out when the command is executed since no + // arguments are specified. + if len(command) > 0 { + args = []string{command} + } + + commanBuilder := DefaultNewCommandBuilder{ + Args: args, + } + return NewProviderCommand(commanBuilder, options...) +} + +// NewProviderCommand returns a pointer to a new Credentials object with the +// specified command, and default timeout duration. Use this to provide custom +// creation of exec.Cmd for options like environment variables, or other +// configuration. +func NewProviderCommand(builder NewCommandBuilder, options ...func(*Options)) *Provider { + p := &Provider{ + commandBuilder: builder, + options: Options{ + Timeout: DefaultTimeout, + }, + } + + for _, option := range options { + option(&p.options) + } + + return p +} + +// A CredentialProcessResponse is the AWS credentials format that must be +// returned when executing an external credential_process. +type CredentialProcessResponse struct { + // As of this writing, the Version key must be set to 1. This might + // increment over time as the structure evolves. + Version int + + // The access key ID that identifies the temporary security credentials. + AccessKeyID string `json:"AccessKeyId"` + + // The secret access key that can be used to sign requests. + SecretAccessKey string + + // The token that users must pass to the service API to use the temporary credentials. + SessionToken string + + // The date on which the current credentials expire. + Expiration *time.Time +} + +// Retrieve executes the credential process command and returns the +// credentials, or error if the command fails. +func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { + out, err := p.executeCredentialProcess(ctx) + if err != nil { + return aws.Credentials{Source: ProviderName}, err + } + + // Serialize and validate response + resp := &CredentialProcessResponse{} + if err = json.Unmarshal(out, resp); err != nil { + return aws.Credentials{Source: ProviderName}, &ProviderError{ + Err: fmt.Errorf("parse failed of process output: %s, error: %w", out, err), + } + } + + if resp.Version != 1 { + return aws.Credentials{Source: ProviderName}, &ProviderError{ + Err: fmt.Errorf("wrong version in process output (not 1)"), + } + } + + if len(resp.AccessKeyID) == 0 { + return aws.Credentials{Source: ProviderName}, &ProviderError{ + Err: fmt.Errorf("missing AccessKeyId in process output"), + } + } + + if len(resp.SecretAccessKey) == 0 { + return aws.Credentials{Source: ProviderName}, &ProviderError{ + Err: fmt.Errorf("missing SecretAccessKey in process output"), + } + } + + creds := aws.Credentials{ + Source: ProviderName, + AccessKeyID: resp.AccessKeyID, + SecretAccessKey: resp.SecretAccessKey, + SessionToken: resp.SessionToken, + } + + // Handle expiration + if resp.Expiration != nil { + creds.CanExpire = true + creds.Expires = *resp.Expiration + } + + return creds, nil +} + +// executeCredentialProcess starts the credential process on the OS and +// returns the results or an error. +func (p *Provider) executeCredentialProcess(ctx context.Context) ([]byte, error) { + if p.options.Timeout >= 0 { + var cancelFunc func() + ctx, cancelFunc = context.WithTimeout(ctx, p.options.Timeout) + defer cancelFunc() + } + + cmd, err := p.commandBuilder.NewCommand(ctx) + if err != nil { + return nil, err + } + + // get creds json on process's stdout + output := bytes.NewBuffer(make([]byte, 0, int(8*sdkio.KibiByte))) + if cmd.Stdout != nil { + cmd.Stdout = io.MultiWriter(cmd.Stdout, output) + } else { + cmd.Stdout = output + } + + execCh := make(chan error, 1) + go executeCommand(cmd, execCh) + + select { + case execError := <-execCh: + if execError == nil { + break + } + select { + case <-ctx.Done(): + return output.Bytes(), &ProviderError{ + Err: fmt.Errorf("credential process timed out: %w", execError), + } + default: + return output.Bytes(), &ProviderError{ + Err: fmt.Errorf("error in credential_process: %w", execError), + } + } + } + + out := output.Bytes() + if runtime.GOOS == "windows" { + // windows adds slashes to quotes + out = bytes.ReplaceAll(out, []byte(`\"`), []byte(`"`)) + } + + return out, nil +} + +func executeCommand(cmd *exec.Cmd, exec chan error) { + // Start the command + err := cmd.Start() + if err == nil { + err = cmd.Wait() + } + + exec <- err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/doc.go new file mode 100644 index 00000000000..ece1e65f73b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/doc.go @@ -0,0 +1,81 @@ +// Package ssocreds provides a credential provider for retrieving temporary AWS +// credentials using an SSO access token. +// +// IMPORTANT: The provider in this package does not initiate or perform the AWS +// SSO login flow. The SDK provider expects that you have already performed the +// SSO login flow using AWS CLI using the "aws sso login" command, or by some +// other mechanism. The provider must find a valid non-expired access token for +// the AWS SSO user portal URL in ~/.aws/sso/cache. If a cached token is not +// found, it is expired, or the file is malformed an error will be returned. +// +// # Loading AWS SSO credentials with the AWS shared configuration file +// +// You can use configure AWS SSO credentials from the AWS shared configuration file by +// specifying the required keys in the profile and referencing an sso-session: +// +// sso_session +// sso_account_id +// sso_role_name +// +// For example, the following defines a profile "devsso" and specifies the AWS +// SSO parameters that defines the target account, role, sign-on portal, and +// the region where the user portal is located. Note: all SSO arguments must be +// provided, or an error will be returned. +// +// [profile devsso] +// sso_session = dev-session +// sso_role_name = SSOReadOnlyRole +// sso_account_id = 123456789012 +// +// [sso-session dev-session] +// sso_start_url = https://my-sso-portal.awsapps.com/start +// sso_region = us-east-1 +// sso_registration_scopes = sso:account:access +// +// Using the config module, you can load the AWS SDK shared configuration, and +// specify that this profile be used to retrieve credentials. For example: +// +// config, err := config.LoadDefaultConfig(context.TODO(), config.WithSharedConfigProfile("devsso")) +// if err != nil { +// return err +// } +// +// # Programmatically loading AWS SSO credentials directly +// +// You can programmatically construct the AWS SSO Provider in your application, +// and provide the necessary information to load and retrieve temporary +// credentials using an access token from ~/.aws/sso/cache. +// +// ssoClient := sso.NewFromConfig(cfg) +// ssoOidcClient := ssooidc.NewFromConfig(cfg) +// tokenPath, err := ssocreds.StandardCachedTokenFilepath("dev-session") +// if err != nil { +// return err +// } +// +// var provider aws.CredentialsProvider +// provider = ssocreds.New(ssoClient, "123456789012", "SSOReadOnlyRole", "https://my-sso-portal.awsapps.com/start", func(options *ssocreds.Options) { +// options.SSOTokenProvider = ssocreds.NewSSOTokenProvider(ssoOidcClient, tokenPath) +// }) +// +// // Wrap the provider with aws.CredentialsCache to cache the credentials until their expire time +// provider = aws.NewCredentialsCache(provider) +// +// credentials, err := provider.Retrieve(context.TODO()) +// if err != nil { +// return err +// } +// +// It is important that you wrap the Provider with aws.CredentialsCache if you +// are programmatically constructing the provider directly. This prevents your +// application from accessing the cached access token and requesting new +// credentials each time the credentials are used. +// +// # Additional Resources +// +// Configuring the AWS CLI to use AWS Single Sign-On: +// https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html +// +// AWS Single Sign-On User Guide: +// https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html +package ssocreds diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_cached_token.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_cached_token.go new file mode 100644 index 00000000000..3b97e6dd406 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_cached_token.go @@ -0,0 +1,233 @@ +package ssocreds + +import ( + "crypto/sha1" + "encoding/hex" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/aws-sdk-go-v2/internal/shareddefaults" +) + +var osUserHomeDur = shareddefaults.UserHomeDir + +// StandardCachedTokenFilepath returns the filepath for the cached SSO token file, or +// error if unable get derive the path. Key that will be used to compute a SHA1 +// value that is hex encoded. +// +// Derives the filepath using the Key as: +// +// ~/.aws/sso/cache/.json +func StandardCachedTokenFilepath(key string) (string, error) { + homeDir := osUserHomeDur() + if len(homeDir) == 0 { + return "", fmt.Errorf("unable to get USER's home directory for cached token") + } + hash := sha1.New() + if _, err := hash.Write([]byte(key)); err != nil { + return "", fmt.Errorf("unable to compute cached token filepath key SHA1 hash, %w", err) + } + + cacheFilename := strings.ToLower(hex.EncodeToString(hash.Sum(nil))) + ".json" + + return filepath.Join(homeDir, ".aws", "sso", "cache", cacheFilename), nil +} + +type tokenKnownFields struct { + AccessToken string `json:"accessToken,omitempty"` + ExpiresAt *rfc3339 `json:"expiresAt,omitempty"` + + RefreshToken string `json:"refreshToken,omitempty"` + ClientID string `json:"clientId,omitempty"` + ClientSecret string `json:"clientSecret,omitempty"` +} + +type token struct { + tokenKnownFields + UnknownFields map[string]interface{} `json:"-"` +} + +func (t token) MarshalJSON() ([]byte, error) { + fields := map[string]interface{}{} + + setTokenFieldString(fields, "accessToken", t.AccessToken) + setTokenFieldRFC3339(fields, "expiresAt", t.ExpiresAt) + + setTokenFieldString(fields, "refreshToken", t.RefreshToken) + setTokenFieldString(fields, "clientId", t.ClientID) + setTokenFieldString(fields, "clientSecret", t.ClientSecret) + + for k, v := range t.UnknownFields { + if _, ok := fields[k]; ok { + return nil, fmt.Errorf("unknown token field %v, duplicates known field", k) + } + fields[k] = v + } + + return json.Marshal(fields) +} + +func setTokenFieldString(fields map[string]interface{}, key, value string) { + if value == "" { + return + } + fields[key] = value +} +func setTokenFieldRFC3339(fields map[string]interface{}, key string, value *rfc3339) { + if value == nil { + return + } + fields[key] = value +} + +func (t *token) UnmarshalJSON(b []byte) error { + var fields map[string]interface{} + if err := json.Unmarshal(b, &fields); err != nil { + return nil + } + + t.UnknownFields = map[string]interface{}{} + + for k, v := range fields { + var err error + switch k { + case "accessToken": + err = getTokenFieldString(v, &t.AccessToken) + case "expiresAt": + err = getTokenFieldRFC3339(v, &t.ExpiresAt) + case "refreshToken": + err = getTokenFieldString(v, &t.RefreshToken) + case "clientId": + err = getTokenFieldString(v, &t.ClientID) + case "clientSecret": + err = getTokenFieldString(v, &t.ClientSecret) + default: + t.UnknownFields[k] = v + } + + if err != nil { + return fmt.Errorf("field %q, %w", k, err) + } + } + + return nil +} + +func getTokenFieldString(v interface{}, value *string) error { + var ok bool + *value, ok = v.(string) + if !ok { + return fmt.Errorf("expect value to be string, got %T", v) + } + return nil +} + +func getTokenFieldRFC3339(v interface{}, value **rfc3339) error { + var stringValue string + if err := getTokenFieldString(v, &stringValue); err != nil { + return err + } + + timeValue, err := parseRFC3339(stringValue) + if err != nil { + return err + } + + *value = &timeValue + return nil +} + +func loadCachedToken(filename string) (token, error) { + fileBytes, err := ioutil.ReadFile(filename) + if err != nil { + return token{}, fmt.Errorf("failed to read cached SSO token file, %w", err) + } + + var t token + if err := json.Unmarshal(fileBytes, &t); err != nil { + return token{}, fmt.Errorf("failed to parse cached SSO token file, %w", err) + } + + if len(t.AccessToken) == 0 || t.ExpiresAt == nil || time.Time(*t.ExpiresAt).IsZero() { + return token{}, fmt.Errorf( + "cached SSO token must contain accessToken and expiresAt fields") + } + + return t, nil +} + +func storeCachedToken(filename string, t token, fileMode os.FileMode) (err error) { + tmpFilename := filename + ".tmp-" + strconv.FormatInt(sdk.NowTime().UnixNano(), 10) + if err := writeCacheFile(tmpFilename, fileMode, t); err != nil { + return err + } + + if err := os.Rename(tmpFilename, filename); err != nil { + return fmt.Errorf("failed to replace old cached SSO token file, %w", err) + } + + return nil +} + +func writeCacheFile(filename string, fileMode os.FileMode, t token) (err error) { + var f *os.File + f, err = os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_RDWR, fileMode) + if err != nil { + return fmt.Errorf("failed to create cached SSO token file %w", err) + } + + defer func() { + closeErr := f.Close() + if err == nil && closeErr != nil { + err = fmt.Errorf("failed to close cached SSO token file, %w", closeErr) + } + }() + + encoder := json.NewEncoder(f) + + if err = encoder.Encode(t); err != nil { + return fmt.Errorf("failed to serialize cached SSO token, %w", err) + } + + return nil +} + +type rfc3339 time.Time + +func parseRFC3339(v string) (rfc3339, error) { + parsed, err := time.Parse(time.RFC3339, v) + if err != nil { + return rfc3339{}, fmt.Errorf("expected RFC3339 timestamp: %w", err) + } + + return rfc3339(parsed), nil +} + +func (r *rfc3339) UnmarshalJSON(bytes []byte) (err error) { + var value string + + // Use JSON unmarshal to unescape the quoted value making use of JSON's + // unquoting rules. + if err = json.Unmarshal(bytes, &value); err != nil { + return err + } + + *r, err = parseRFC3339(value) + + return nil +} + +func (r *rfc3339) MarshalJSON() ([]byte, error) { + value := time.Time(*r).Format(time.RFC3339) + + // Use JSON unmarshal to unescape the quoted value making use of JSON's + // quoting rules. + return json.Marshal(value) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go new file mode 100644 index 00000000000..b3cf7853e76 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go @@ -0,0 +1,152 @@ +package ssocreds + +import ( + "context" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/aws-sdk-go-v2/service/sso" +) + +// ProviderName is the name of the provider used to specify the source of +// credentials. +const ProviderName = "SSOProvider" + +// GetRoleCredentialsAPIClient is a API client that implements the +// GetRoleCredentials operation. +type GetRoleCredentialsAPIClient interface { + GetRoleCredentials(context.Context, *sso.GetRoleCredentialsInput, ...func(*sso.Options)) ( + *sso.GetRoleCredentialsOutput, error, + ) +} + +// Options is the Provider options structure. +type Options struct { + // The Client which is configured for the AWS Region where the AWS SSO user + // portal is located. + Client GetRoleCredentialsAPIClient + + // The AWS account that is assigned to the user. + AccountID string + + // The role name that is assigned to the user. + RoleName string + + // The URL that points to the organization's AWS Single Sign-On (AWS SSO) + // user portal. + StartURL string + + // The filepath the cached token will be retrieved from. If unset Provider will + // use the startURL to determine the filepath at. + // + // ~/.aws/sso/cache/.json + // + // If custom cached token filepath is used, the Provider's startUrl + // parameter will be ignored. + CachedTokenFilepath string + + // Used by the SSOCredentialProvider if a token configuration + // profile is used in the shared config + SSOTokenProvider *SSOTokenProvider +} + +// Provider is an AWS credential provider that retrieves temporary AWS +// credentials by exchanging an SSO login token. +type Provider struct { + options Options + + cachedTokenFilepath string +} + +// New returns a new AWS Single Sign-On (AWS SSO) credential provider. The +// provided client is expected to be configured for the AWS Region where the +// AWS SSO user portal is located. +func New(client GetRoleCredentialsAPIClient, accountID, roleName, startURL string, optFns ...func(options *Options)) *Provider { + options := Options{ + Client: client, + AccountID: accountID, + RoleName: roleName, + StartURL: startURL, + } + + for _, fn := range optFns { + fn(&options) + } + + return &Provider{ + options: options, + cachedTokenFilepath: options.CachedTokenFilepath, + } +} + +// Retrieve retrieves temporary AWS credentials from the configured Amazon +// Single Sign-On (AWS SSO) user portal by exchanging the accessToken present +// in ~/.aws/sso/cache. However, if a token provider configuration exists +// in the shared config, then we ought to use the token provider rather then +// direct access on the cached token. +func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { + var accessToken *string + if p.options.SSOTokenProvider != nil { + token, err := p.options.SSOTokenProvider.RetrieveBearerToken(ctx) + if err != nil { + return aws.Credentials{}, err + } + accessToken = &token.Value + } else { + if p.cachedTokenFilepath == "" { + cachedTokenFilepath, err := StandardCachedTokenFilepath(p.options.StartURL) + if err != nil { + return aws.Credentials{}, &InvalidTokenError{Err: err} + } + p.cachedTokenFilepath = cachedTokenFilepath + } + + tokenFile, err := loadCachedToken(p.cachedTokenFilepath) + if err != nil { + return aws.Credentials{}, &InvalidTokenError{Err: err} + } + + if tokenFile.ExpiresAt == nil || sdk.NowTime().After(time.Time(*tokenFile.ExpiresAt)) { + return aws.Credentials{}, &InvalidTokenError{} + } + accessToken = &tokenFile.AccessToken + } + + output, err := p.options.Client.GetRoleCredentials(ctx, &sso.GetRoleCredentialsInput{ + AccessToken: accessToken, + AccountId: &p.options.AccountID, + RoleName: &p.options.RoleName, + }) + if err != nil { + return aws.Credentials{}, err + } + + return aws.Credentials{ + AccessKeyID: aws.ToString(output.RoleCredentials.AccessKeyId), + SecretAccessKey: aws.ToString(output.RoleCredentials.SecretAccessKey), + SessionToken: aws.ToString(output.RoleCredentials.SessionToken), + CanExpire: true, + Expires: time.Unix(0, output.RoleCredentials.Expiration*int64(time.Millisecond)).UTC(), + Source: ProviderName, + }, nil +} + +// InvalidTokenError is the error type that is returned if loaded token has +// expired or is otherwise invalid. To refresh the SSO session run AWS SSO +// login with the corresponding profile. +type InvalidTokenError struct { + Err error +} + +func (i *InvalidTokenError) Unwrap() error { + return i.Err +} + +func (i *InvalidTokenError) Error() string { + const msg = "the SSO session has expired or is invalid" + if i.Err == nil { + return msg + } + return msg + ": " + i.Err.Error() +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_token_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_token_provider.go new file mode 100644 index 00000000000..7f4fc546772 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_token_provider.go @@ -0,0 +1,147 @@ +package ssocreds + +import ( + "context" + "fmt" + "os" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/aws-sdk-go-v2/service/ssooidc" + "github.com/aws/smithy-go/auth/bearer" +) + +// CreateTokenAPIClient provides the interface for the SSOTokenProvider's API +// client for calling CreateToken operation to refresh the SSO token. +type CreateTokenAPIClient interface { + CreateToken(context.Context, *ssooidc.CreateTokenInput, ...func(*ssooidc.Options)) ( + *ssooidc.CreateTokenOutput, error, + ) +} + +// SSOTokenProviderOptions provides the options for configuring the +// SSOTokenProvider. +type SSOTokenProviderOptions struct { + // Client that can be overridden + Client CreateTokenAPIClient + + // The set of API Client options to be applied when invoking the + // CreateToken operation. + ClientOptions []func(*ssooidc.Options) + + // The path the file containing the cached SSO token will be read from. + // Initialized the NewSSOTokenProvider's cachedTokenFilepath parameter. + CachedTokenFilepath string +} + +// SSOTokenProvider provides an utility for refreshing SSO AccessTokens for +// Bearer Authentication. The SSOTokenProvider can only be used to refresh +// already cached SSO Tokens. This utility cannot perform the initial SSO +// create token. +// +// The SSOTokenProvider is not safe to use concurrently. It must be wrapped in +// a utility such as smithy-go's auth/bearer#TokenCache. The SDK's +// config.LoadDefaultConfig will automatically wrap the SSOTokenProvider with +// the smithy-go TokenCache, if the external configuration loaded configured +// for an SSO session. +// +// The initial SSO create token should be preformed with the AWS CLI before the +// Go application using the SSOTokenProvider will need to retrieve the SSO +// token. If the AWS CLI has not created the token cache file, this provider +// will return an error when attempting to retrieve the cached token. +// +// This provider will attempt to refresh the cached SSO token periodically if +// needed when RetrieveBearerToken is called. +// +// A utility such as the AWS CLI must be used to initially create the SSO +// session and cached token file. +// https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html +type SSOTokenProvider struct { + options SSOTokenProviderOptions +} + +var _ bearer.TokenProvider = (*SSOTokenProvider)(nil) + +// NewSSOTokenProvider returns an initialized SSOTokenProvider that will +// periodically refresh the SSO token cached stored in the cachedTokenFilepath. +// The cachedTokenFilepath file's content will be rewritten by the token +// provider when the token is refreshed. +// +// The client must be configured for the AWS region the SSO token was created for. +func NewSSOTokenProvider(client CreateTokenAPIClient, cachedTokenFilepath string, optFns ...func(o *SSOTokenProviderOptions)) *SSOTokenProvider { + options := SSOTokenProviderOptions{ + Client: client, + CachedTokenFilepath: cachedTokenFilepath, + } + for _, fn := range optFns { + fn(&options) + } + + provider := &SSOTokenProvider{ + options: options, + } + + return provider +} + +// RetrieveBearerToken returns the SSO token stored in the cachedTokenFilepath +// the SSOTokenProvider was created with. If the token has expired +// RetrieveBearerToken will attempt to refresh it. If the token cannot be +// refreshed or is not present an error will be returned. +// +// A utility such as the AWS CLI must be used to initially create the SSO +// session and cached token file. https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html +func (p SSOTokenProvider) RetrieveBearerToken(ctx context.Context) (bearer.Token, error) { + cachedToken, err := loadCachedToken(p.options.CachedTokenFilepath) + if err != nil { + return bearer.Token{}, err + } + + if cachedToken.ExpiresAt != nil && sdk.NowTime().After(time.Time(*cachedToken.ExpiresAt)) { + cachedToken, err = p.refreshToken(ctx, cachedToken) + if err != nil { + return bearer.Token{}, fmt.Errorf("refresh cached SSO token failed, %w", err) + } + } + + expiresAt := aws.ToTime((*time.Time)(cachedToken.ExpiresAt)) + return bearer.Token{ + Value: cachedToken.AccessToken, + CanExpire: !expiresAt.IsZero(), + Expires: expiresAt, + }, nil +} + +func (p SSOTokenProvider) refreshToken(ctx context.Context, cachedToken token) (token, error) { + if cachedToken.ClientSecret == "" || cachedToken.ClientID == "" || cachedToken.RefreshToken == "" { + return token{}, fmt.Errorf("cached SSO token is expired, or not present, and cannot be refreshed") + } + + createResult, err := p.options.Client.CreateToken(ctx, &ssooidc.CreateTokenInput{ + ClientId: &cachedToken.ClientID, + ClientSecret: &cachedToken.ClientSecret, + RefreshToken: &cachedToken.RefreshToken, + GrantType: aws.String("refresh_token"), + }, p.options.ClientOptions...) + if err != nil { + return token{}, fmt.Errorf("unable to refresh SSO token, %w", err) + } + + expiresAt := sdk.NowTime().Add(time.Duration(createResult.ExpiresIn) * time.Second) + + cachedToken.AccessToken = aws.ToString(createResult.AccessToken) + cachedToken.ExpiresAt = (*rfc3339)(&expiresAt) + cachedToken.RefreshToken = aws.ToString(createResult.RefreshToken) + + fileInfo, err := os.Stat(p.options.CachedTokenFilepath) + if err != nil { + return token{}, fmt.Errorf("failed to stat cached SSO token file %w", err) + } + + if err = storeCachedToken(p.options.CachedTokenFilepath, cachedToken, fileInfo.Mode()); err != nil { + return token{}, fmt.Errorf("unable to cache refreshed SSO token, %w", err) + } + + return cachedToken, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/static_provider.go new file mode 100644 index 00000000000..d525cac0960 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/static_provider.go @@ -0,0 +1,53 @@ +package credentials + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +const ( + // StaticCredentialsName provides a name of Static provider + StaticCredentialsName = "StaticCredentials" +) + +// StaticCredentialsEmptyError is emitted when static credentials are empty. +type StaticCredentialsEmptyError struct{} + +func (*StaticCredentialsEmptyError) Error() string { + return "static credentials are empty" +} + +// A StaticCredentialsProvider is a set of credentials which are set, and will +// never expire. +type StaticCredentialsProvider struct { + Value aws.Credentials +} + +// NewStaticCredentialsProvider return a StaticCredentialsProvider initialized with the AWS +// credentials passed in. +func NewStaticCredentialsProvider(key, secret, session string) StaticCredentialsProvider { + return StaticCredentialsProvider{ + Value: aws.Credentials{ + AccessKeyID: key, + SecretAccessKey: secret, + SessionToken: session, + }, + } +} + +// Retrieve returns the credentials or error if the credentials are invalid. +func (s StaticCredentialsProvider) Retrieve(_ context.Context) (aws.Credentials, error) { + v := s.Value + if v.AccessKeyID == "" || v.SecretAccessKey == "" { + return aws.Credentials{ + Source: StaticCredentialsName, + }, &StaticCredentialsEmptyError{} + } + + if len(v.Source) == 0 { + v.Source = StaticCredentialsName + } + + return v, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go new file mode 100644 index 00000000000..289707b6de4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go @@ -0,0 +1,320 @@ +// Package stscreds are credential Providers to retrieve STS AWS credentials. +// +// STS provides multiple ways to retrieve credentials which can be used when making +// future AWS service API operation calls. +// +// The SDK will ensure that per instance of credentials.Credentials all requests +// to refresh the credentials will be synchronized. But, the SDK is unable to +// ensure synchronous usage of the AssumeRoleProvider if the value is shared +// between multiple Credentials or service clients. +// +// # Assume Role +// +// To assume an IAM role using STS with the SDK you can create a new Credentials +// with the SDKs's stscreds package. +// +// // Initial credentials loaded from SDK's default credential chain. Such as +// // the environment, shared credentials (~/.aws/credentials), or EC2 Instance +// // Role. These credentials will be used to to make the STS Assume Role API. +// cfg, err := config.LoadDefaultConfig(context.TODO()) +// if err != nil { +// panic(err) +// } +// +// // Create the credentials from AssumeRoleProvider to assume the role +// // referenced by the "myRoleARN" ARN. +// stsSvc := sts.NewFromConfig(cfg) +// creds := stscreds.NewAssumeRoleProvider(stsSvc, "myRoleArn") +// +// cfg.Credentials = aws.NewCredentialsCache(creds) +// +// // Create service client value configured for credentials +// // from assumed role. +// svc := s3.NewFromConfig(cfg) +// +// # Assume Role with custom MFA Token provider +// +// To assume an IAM role with a MFA token you can either specify a custom MFA +// token provider or use the SDK's built in StdinTokenProvider that will prompt +// the user for a token code each time the credentials need to to be refreshed. +// Specifying a custom token provider allows you to control where the token +// code is retrieved from, and how it is refreshed. +// +// With a custom token provider, the provider is responsible for refreshing the +// token code when called. +// +// cfg, err := config.LoadDefaultConfig(context.TODO()) +// if err != nil { +// panic(err) +// } +// +// staticTokenProvider := func() (string, error) { +// return someTokenCode, nil +// } +// +// // Create the credentials from AssumeRoleProvider to assume the role +// // referenced by the "myRoleARN" ARN using the MFA token code provided. +// creds := stscreds.NewAssumeRoleProvider(sts.NewFromConfig(cfg), "myRoleArn", func(o *stscreds.AssumeRoleOptions) { +// o.SerialNumber = aws.String("myTokenSerialNumber") +// o.TokenProvider = staticTokenProvider +// }) +// +// cfg.Credentials = aws.NewCredentialsCache(creds) +// +// // Create service client value configured for credentials +// // from assumed role. +// svc := s3.NewFromConfig(cfg) +// +// # Assume Role with MFA Token Provider +// +// To assume an IAM role with MFA for longer running tasks where the credentials +// may need to be refreshed setting the TokenProvider field of AssumeRoleProvider +// will allow the credential provider to prompt for new MFA token code when the +// role's credentials need to be refreshed. +// +// The StdinTokenProvider function is available to prompt on stdin to retrieve +// the MFA token code from the user. You can also implement custom prompts by +// satisfying the TokenProvider function signature. +// +// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will +// have undesirable results as the StdinTokenProvider will not be synchronized. A +// single Credentials with an AssumeRoleProvider can be shared safely. +// +// cfg, err := config.LoadDefaultConfig(context.TODO()) +// if err != nil { +// panic(err) +// } +// +// // Create the credentials from AssumeRoleProvider to assume the role +// // referenced by the "myRoleARN" ARN using the MFA token code provided. +// creds := stscreds.NewAssumeRoleProvider(sts.NewFromConfig(cfg), "myRoleArn", func(o *stscreds.AssumeRoleOptions) { +// o.SerialNumber = aws.String("myTokenSerialNumber") +// o.TokenProvider = stscreds.StdinTokenProvider +// }) +// +// cfg.Credentials = aws.NewCredentialsCache(creds) +// +// // Create service client value configured for credentials +// // from assumed role. +// svc := s3.NewFromConfig(cfg) +package stscreds + +import ( + "context" + "fmt" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/sts" + "github.com/aws/aws-sdk-go-v2/service/sts/types" +) + +// StdinTokenProvider will prompt on stdout and read from stdin for a string value. +// An error is returned if reading from stdin fails. +// +// Use this function go read MFA tokens from stdin. The function makes no attempt +// to make atomic prompts from stdin across multiple gorouties. +// +// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will +// have undesirable results as the StdinTokenProvider will not be synchronized. A +// single Credentials with an AssumeRoleProvider can be shared safely +// +// Will wait forever until something is provided on the stdin. +func StdinTokenProvider() (string, error) { + var v string + fmt.Printf("Assume Role MFA token code: ") + _, err := fmt.Scanln(&v) + + return v, err +} + +// ProviderName provides a name of AssumeRole provider +const ProviderName = "AssumeRoleProvider" + +// AssumeRoleAPIClient is a client capable of the STS AssumeRole operation. +type AssumeRoleAPIClient interface { + AssumeRole(ctx context.Context, params *sts.AssumeRoleInput, optFns ...func(*sts.Options)) (*sts.AssumeRoleOutput, error) +} + +// DefaultDuration is the default amount of time in minutes that the +// credentials will be valid for. This value is only used by AssumeRoleProvider +// for specifying the default expiry duration of an assume role. +// +// Other providers such as WebIdentityRoleProvider do not use this value, and +// instead rely on STS API's default parameter handing to assign a default +// value. +var DefaultDuration = time.Duration(15) * time.Minute + +// AssumeRoleProvider retrieves temporary credentials from the STS service, and +// keeps track of their expiration time. +// +// This credential provider will be used by the SDKs default credential change +// when shared configuration is enabled, and the shared config or shared credentials +// file configure assume role. See Session docs for how to do this. +// +// AssumeRoleProvider does not provide any synchronization and it is not safe +// to share this value across multiple Credentials, Sessions, or service clients +// without also sharing the same Credentials instance. +type AssumeRoleProvider struct { + options AssumeRoleOptions +} + +// AssumeRoleOptions is the configurable options for AssumeRoleProvider +type AssumeRoleOptions struct { + // Client implementation of the AssumeRole operation. Required + Client AssumeRoleAPIClient + + // IAM Role ARN to be assumed. Required + RoleARN string + + // Session name, if you wish to uniquely identify this session. + RoleSessionName string + + // Expiry duration of the STS credentials. Defaults to 15 minutes if not set. + Duration time.Duration + + // Optional ExternalID to pass along, defaults to nil if not set. + ExternalID *string + + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string + + // The ARNs of IAM managed policies you want to use as managed session policies. + // The policies must exist in the same account as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plain text that you use for both inline and managed session + // policies can't exceed 2,048 characters. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent AWS API calls to access resources in the account that owns + // the role. You cannot use session policies to grant more permissions than + // those allowed by the identity-based policy of the role that is being assumed. + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyARNs []types.PolicyDescriptorType + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy + // of the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as GAHT12345678) + // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + SerialNumber *string + + // The source identity specified by the principal that is calling the AssumeRole + // operation. You can require users to specify a source identity when they assume a + // role. You do this by using the sts:SourceIdentity condition key in a role trust + // policy. You can use source identity information in CloudTrail logs to determine + // who took actions with a role. You can use the aws:SourceIdentity condition key + // to further control access to Amazon Web Services resources based on the value of + // source identity. For more information about using source identity, see Monitor + // and control actions taken with assumed roles + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. + SourceIdentity *string + + // Async method of providing MFA token code for assuming an IAM role with MFA. + // The value returned by the function will be used as the TokenCode in the Retrieve + // call. See StdinTokenProvider for a provider that prompts and reads from stdin. + // + // This token provider will be called when ever the assumed role's + // credentials need to be refreshed when SerialNumber is set. + TokenProvider func() (string, error) + + // A list of session tags that you want to pass. Each session tag consists of a key + // name and an associated value. For more information about session tags, see + // Tagging STS Sessions + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the + // IAM User Guide. This parameter is optional. You can pass up to 50 session tags. + Tags []types.Tag + + // A list of keys for session tags that you want to set as transitive. If you set a + // tag key as transitive, the corresponding key and value passes to subsequent + // sessions in a role chain. For more information, see Chaining Roles with Session + // Tags + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) + // in the IAM User Guide. This parameter is optional. + TransitiveTagKeys []string +} + +// NewAssumeRoleProvider constructs and returns a credentials provider that +// will retrieve credentials by assuming a IAM role using STS. +func NewAssumeRoleProvider(client AssumeRoleAPIClient, roleARN string, optFns ...func(*AssumeRoleOptions)) *AssumeRoleProvider { + o := AssumeRoleOptions{ + Client: client, + RoleARN: roleARN, + } + + for _, fn := range optFns { + fn(&o) + } + + return &AssumeRoleProvider{ + options: o, + } +} + +// Retrieve generates a new set of temporary credentials using STS. +func (p *AssumeRoleProvider) Retrieve(ctx context.Context) (aws.Credentials, error) { + // Apply defaults where parameters are not set. + if len(p.options.RoleSessionName) == 0 { + // Try to work out a role name that will hopefully end up unique. + p.options.RoleSessionName = fmt.Sprintf("aws-go-sdk-%d", time.Now().UTC().UnixNano()) + } + if p.options.Duration == 0 { + // Expire as often as AWS permits. + p.options.Duration = DefaultDuration + } + input := &sts.AssumeRoleInput{ + DurationSeconds: aws.Int32(int32(p.options.Duration / time.Second)), + PolicyArns: p.options.PolicyARNs, + RoleArn: aws.String(p.options.RoleARN), + RoleSessionName: aws.String(p.options.RoleSessionName), + ExternalId: p.options.ExternalID, + SourceIdentity: p.options.SourceIdentity, + Tags: p.options.Tags, + TransitiveTagKeys: p.options.TransitiveTagKeys, + } + if p.options.Policy != nil { + input.Policy = p.options.Policy + } + if p.options.SerialNumber != nil { + if p.options.TokenProvider != nil { + input.SerialNumber = p.options.SerialNumber + code, err := p.options.TokenProvider() + if err != nil { + return aws.Credentials{}, err + } + input.TokenCode = aws.String(code) + } else { + return aws.Credentials{}, fmt.Errorf("assume role with MFA enabled, but TokenProvider is not set") + } + } + + resp, err := p.options.Client.AssumeRole(ctx, input) + if err != nil { + return aws.Credentials{Source: ProviderName}, err + } + + return aws.Credentials{ + AccessKeyID: *resp.Credentials.AccessKeyId, + SecretAccessKey: *resp.Credentials.SecretAccessKey, + SessionToken: *resp.Credentials.SessionToken, + Source: ProviderName, + + CanExpire: true, + Expires: *resp.Credentials.Expiration, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go new file mode 100644 index 00000000000..ddaf6df6ce1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go @@ -0,0 +1,150 @@ +package stscreds + +import ( + "context" + "fmt" + "io/ioutil" + "strconv" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/aws-sdk-go-v2/service/sts" + "github.com/aws/aws-sdk-go-v2/service/sts/types" +) + +var invalidIdentityTokenExceptionCode = (&types.InvalidIdentityTokenException{}).ErrorCode() + +const ( + // WebIdentityProviderName is the web identity provider name + WebIdentityProviderName = "WebIdentityCredentials" +) + +// AssumeRoleWithWebIdentityAPIClient is a client capable of the STS AssumeRoleWithWebIdentity operation. +type AssumeRoleWithWebIdentityAPIClient interface { + AssumeRoleWithWebIdentity(ctx context.Context, params *sts.AssumeRoleWithWebIdentityInput, optFns ...func(*sts.Options)) (*sts.AssumeRoleWithWebIdentityOutput, error) +} + +// WebIdentityRoleProvider is used to retrieve credentials using +// an OIDC token. +type WebIdentityRoleProvider struct { + options WebIdentityRoleOptions +} + +// WebIdentityRoleOptions is a structure of configurable options for WebIdentityRoleProvider +type WebIdentityRoleOptions struct { + // Client implementation of the AssumeRoleWithWebIdentity operation. Required + Client AssumeRoleWithWebIdentityAPIClient + + // JWT Token Provider. Required + TokenRetriever IdentityTokenRetriever + + // IAM Role ARN to assume. Required + RoleARN string + + // Session name, if you wish to uniquely identify this session. + RoleSessionName string + + // Expiry duration of the STS credentials. STS will assign a default expiry + // duration if this value is unset. This is different from the Duration + // option of AssumeRoleProvider, which automatically assigns 15 minutes if + // Duration is unset. + // + // See the STS AssumeRoleWithWebIdentity API reference guide for more + // information on defaults. + // https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html + Duration time.Duration + + // An IAM policy in JSON format that you want to use as an inline session policy. + Policy *string + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you + // want to use as managed session policies. The policies must exist in the + // same account as the role. + PolicyARNs []types.PolicyDescriptorType +} + +// IdentityTokenRetriever is an interface for retrieving a JWT +type IdentityTokenRetriever interface { + GetIdentityToken() ([]byte, error) +} + +// IdentityTokenFile is for retrieving an identity token from the given file name +type IdentityTokenFile string + +// GetIdentityToken retrieves the JWT token from the file and returns the contents as a []byte +func (j IdentityTokenFile) GetIdentityToken() ([]byte, error) { + b, err := ioutil.ReadFile(string(j)) + if err != nil { + return nil, fmt.Errorf("unable to read file at %s: %v", string(j), err) + } + + return b, nil +} + +// NewWebIdentityRoleProvider will return a new WebIdentityRoleProvider with the +// provided stsiface.ClientAPI +func NewWebIdentityRoleProvider(client AssumeRoleWithWebIdentityAPIClient, roleARN string, tokenRetriever IdentityTokenRetriever, optFns ...func(*WebIdentityRoleOptions)) *WebIdentityRoleProvider { + o := WebIdentityRoleOptions{ + Client: client, + RoleARN: roleARN, + TokenRetriever: tokenRetriever, + } + + for _, fn := range optFns { + fn(&o) + } + + return &WebIdentityRoleProvider{options: o} +} + +// Retrieve will attempt to assume a role from a token which is located at +// 'WebIdentityTokenFilePath' specified destination and if that is empty an +// error will be returned. +func (p *WebIdentityRoleProvider) Retrieve(ctx context.Context) (aws.Credentials, error) { + b, err := p.options.TokenRetriever.GetIdentityToken() + if err != nil { + return aws.Credentials{}, fmt.Errorf("failed to retrieve jwt from provide source, %w", err) + } + + sessionName := p.options.RoleSessionName + if len(sessionName) == 0 { + // session name is used to uniquely identify a session. This simply + // uses unix time in nanoseconds to uniquely identify sessions. + sessionName = strconv.FormatInt(sdk.NowTime().UnixNano(), 10) + } + input := &sts.AssumeRoleWithWebIdentityInput{ + PolicyArns: p.options.PolicyARNs, + RoleArn: &p.options.RoleARN, + RoleSessionName: &sessionName, + WebIdentityToken: aws.String(string(b)), + } + if p.options.Duration != 0 { + // If set use the value, otherwise STS will assign a default expiration duration. + input.DurationSeconds = aws.Int32(int32(p.options.Duration / time.Second)) + } + if p.options.Policy != nil { + input.Policy = p.options.Policy + } + + resp, err := p.options.Client.AssumeRoleWithWebIdentity(ctx, input, func(options *sts.Options) { + options.Retryer = retry.AddWithErrorCodes(options.Retryer, invalidIdentityTokenExceptionCode) + }) + if err != nil { + return aws.Credentials{}, fmt.Errorf("failed to retrieve credentials, %w", err) + } + + // InvalidIdentityToken error is a temporary error that can occur + // when assuming an Role with a JWT web identity token. + + value := aws.Credentials{ + AccessKeyID: aws.ToString(resp.Credentials.AccessKeyId), + SecretAccessKey: aws.ToString(resp.Credentials.SecretAccessKey), + SessionToken: aws.ToString(resp.Credentials.SessionToken), + Source: WebIdentityProviderName, + CanExpire: true, + Expires: *resp.Credentials.Expiration, + } + return value, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md new file mode 100644 index 00000000000..c566941f9e8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md @@ -0,0 +1,253 @@ +# v1.14.3 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.2 (2023-11-02) + +* No change notes available for this release. + +# v1.14.1 (2023-11-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.13 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.12 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.11 (2023-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.10 (2023-08-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.9 (2023-08-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.8 (2023-08-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.7 (2023-07-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.6 (2023-07-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.5 (2023-07-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.4 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.3 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.2 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.1 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.0 (2023-03-14) + +* **Feature**: Add flag to disable IMDSv1 fallback + +# v1.12.24 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.23 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.22 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.21 (2022-12-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.20 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.19 (2022-10-24) + +* **Bug Fix**: Fixes an issue that prevented logging of the API request or responses when the respective log modes were enabled. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.18 (2022-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.17 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.16 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.15 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.14 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.13 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.12 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.11 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.10 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.9 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.8 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.7 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.6 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.5 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.4 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.0 (2022-02-24) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.2 (2021-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.0 (2021-11-06) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.0 (2021-10-21) + +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.0 (2021-10-11) + +* **Feature**: Respect passed in Context Deadline/Timeout. Updates the IMDS Client operations to not override the passed in Context's Deadline or Timeout options. If an Client operation is called with a Context with a Deadline or Timeout, the client will no longer override it with the client's default timeout. +* **Bug Fix**: Fix IMDS client's response handling and operation timeout race. Fixes #1253 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.1 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.0 (2021-08-27) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.1 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2021-08-04) + +* **Feature**: adds error handling for defered close calls +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2021-07-15) + +* **Feature**: Support has been added for EC2 IPv6-enabled Instance Metadata Service Endpoints. +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.0 (2021-06-25) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.1 (2021-05-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/fileprovider/LICENSE b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/LICENSE.txt similarity index 100% rename from vendor/go.opentelemetry.io/collector/confmap/provider/fileprovider/LICENSE rename to vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/LICENSE.txt diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go new file mode 100644 index 00000000000..46e144d9363 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go @@ -0,0 +1,348 @@ +package imds + +import ( + "context" + "fmt" + "net" + "net/http" + "os" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/retry" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + internalconfig "github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config" + "github.com/aws/smithy-go" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// ServiceID provides the unique name of this API client +const ServiceID = "ec2imds" + +// Client provides the API client for interacting with the Amazon EC2 Instance +// Metadata Service API. +type Client struct { + options Options +} + +// ClientEnableState provides an enumeration if the client is enabled, +// disabled, or default behavior. +type ClientEnableState = internalconfig.ClientEnableState + +// Enumeration values for ClientEnableState +const ( + ClientDefaultEnableState ClientEnableState = internalconfig.ClientDefaultEnableState // default behavior + ClientDisabled ClientEnableState = internalconfig.ClientDisabled // client disabled + ClientEnabled ClientEnableState = internalconfig.ClientEnabled // client enabled +) + +// EndpointModeState is an enum configuration variable describing the client endpoint mode. +// Not configurable directly, but used when using the NewFromConfig. +type EndpointModeState = internalconfig.EndpointModeState + +// Enumeration values for EndpointModeState +const ( + EndpointModeStateUnset EndpointModeState = internalconfig.EndpointModeStateUnset + EndpointModeStateIPv4 EndpointModeState = internalconfig.EndpointModeStateIPv4 + EndpointModeStateIPv6 EndpointModeState = internalconfig.EndpointModeStateIPv6 +) + +const ( + disableClientEnvVar = "AWS_EC2_METADATA_DISABLED" + + // Client endpoint options + endpointEnvVar = "AWS_EC2_METADATA_SERVICE_ENDPOINT" + + defaultIPv4Endpoint = "http://169.254.169.254" + defaultIPv6Endpoint = "http://[fd00:ec2::254]" +) + +// New returns an initialized Client based on the functional options. Provide +// additional functional options to further configure the behavior of the client, +// such as changing the client's endpoint or adding custom middleware behavior. +func New(options Options, optFns ...func(*Options)) *Client { + options = options.Copy() + + for _, fn := range optFns { + fn(&options) + } + + options.HTTPClient = resolveHTTPClient(options.HTTPClient) + + if options.Retryer == nil { + options.Retryer = retry.NewStandard() + } + options.Retryer = retry.AddWithMaxBackoffDelay(options.Retryer, 1*time.Second) + + if options.ClientEnableState == ClientDefaultEnableState { + if v := os.Getenv(disableClientEnvVar); strings.EqualFold(v, "true") { + options.ClientEnableState = ClientDisabled + } + } + + if len(options.Endpoint) == 0 { + if v := os.Getenv(endpointEnvVar); len(v) != 0 { + options.Endpoint = v + } + } + + client := &Client{ + options: options, + } + + if client.options.tokenProvider == nil && !client.options.disableAPIToken { + client.options.tokenProvider = newTokenProvider(client, defaultTokenTTL) + } + + return client +} + +// NewFromConfig returns an initialized Client based the AWS SDK config, and +// functional options. Provide additional functional options to further +// configure the behavior of the client, such as changing the client's endpoint +// or adding custom middleware behavior. +func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { + opts := Options{ + APIOptions: append([]func(*middleware.Stack) error{}, cfg.APIOptions...), + HTTPClient: cfg.HTTPClient, + ClientLogMode: cfg.ClientLogMode, + Logger: cfg.Logger, + } + + if cfg.Retryer != nil { + opts.Retryer = cfg.Retryer() + } + + resolveClientEnableState(cfg, &opts) + resolveEndpointConfig(cfg, &opts) + resolveEndpointModeConfig(cfg, &opts) + resolveEnableFallback(cfg, &opts) + + return New(opts, optFns...) +} + +// Options provides the fields for configuring the API client's behavior. +type Options struct { + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation + // call to modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // The endpoint the client will use to retrieve EC2 instance metadata. + // + // Specifies the EC2 Instance Metadata Service endpoint to use. If specified it overrides EndpointMode. + // + // If unset, and the environment variable AWS_EC2_METADATA_SERVICE_ENDPOINT + // has a value the client will use the value of the environment variable as + // the endpoint for operation calls. + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1] + Endpoint string + + // The endpoint selection mode the client will use if no explicit endpoint is provided using the Endpoint field. + // + // Setting EndpointMode to EndpointModeStateIPv4 will configure the client to use the default EC2 IPv4 endpoint. + // Setting EndpointMode to EndpointModeStateIPv6 will configure the client to use the default EC2 IPv6 endpoint. + // + // By default if EndpointMode is not set (EndpointModeStateUnset) than the default endpoint selection mode EndpointModeStateIPv4. + EndpointMode EndpointModeState + + // The HTTP client to invoke API calls with. Defaults to client's default + // HTTP implementation if nil. + HTTPClient HTTPClient + + // Retryer guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. + Retryer aws.Retryer + + // Changes if the EC2 Instance Metadata client is enabled or not. Client + // will default to enabled if not set to ClientDisabled. When the client is + // disabled it will return an error for all operation calls. + // + // If ClientEnableState value is ClientDefaultEnableState (default value), + // and the environment variable "AWS_EC2_METADATA_DISABLED" is set to + // "true", the client will be disabled. + // + // AWS_EC2_METADATA_DISABLED=true + ClientEnableState ClientEnableState + + // Configures the events that will be sent to the configured logger. + ClientLogMode aws.ClientLogMode + + // The logger writer interface to write logging messages to. + Logger logging.Logger + + // Configure IMDSv1 fallback behavior. By default, the client will attempt + // to fall back to IMDSv1 as needed for backwards compatibility. When set to [aws.FalseTernary] + // the client will return any errors encountered from attempting to fetch a token + // instead of silently using the insecure data flow of IMDSv1. + // + // See [configuring IMDS] for more information. + // + // [configuring IMDS]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html + EnableFallback aws.Ternary + + // provides the caching of API tokens used for operation calls. If unset, + // the API token will not be retrieved for the operation. + tokenProvider *tokenProvider + + // option to disable the API token provider for testing. + disableAPIToken bool +} + +// HTTPClient provides the interface for a client making HTTP requests with the +// API. +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// Copy creates a copy of the API options. +func (o Options) Copy() Options { + to := o + to.APIOptions = append([]func(*middleware.Stack) error{}, o.APIOptions...) + return to +} + +// WithAPIOptions wraps the API middleware functions, as a functional option +// for the API Client Options. Use this helper to add additional functional +// options to the API client, or operation calls. +func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { + return func(o *Options) { + o.APIOptions = append(o.APIOptions, optFns...) + } +} + +func (c *Client) invokeOperation( + ctx context.Context, opID string, params interface{}, optFns []func(*Options), + stackFns ...func(*middleware.Stack, Options) error, +) ( + result interface{}, metadata middleware.Metadata, err error, +) { + stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) + options := c.options.Copy() + for _, fn := range optFns { + fn(&options) + } + + if options.ClientEnableState == ClientDisabled { + return nil, metadata, &smithy.OperationError{ + ServiceID: ServiceID, + OperationName: opID, + Err: fmt.Errorf( + "access disabled to EC2 IMDS via client option, or %q environment variable", + disableClientEnvVar), + } + } + + for _, fn := range stackFns { + if err := fn(stack, options); err != nil { + return nil, metadata, err + } + } + + for _, fn := range options.APIOptions { + if err := fn(stack); err != nil { + return nil, metadata, err + } + } + + handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) + result, metadata, err = handler.Handle(ctx, params) + if err != nil { + return nil, metadata, &smithy.OperationError{ + ServiceID: ServiceID, + OperationName: opID, + Err: err, + } + } + + return result, metadata, err +} + +const ( + // HTTP client constants + defaultDialerTimeout = 250 * time.Millisecond + defaultResponseHeaderTimeout = 500 * time.Millisecond +) + +func resolveHTTPClient(client HTTPClient) HTTPClient { + if client == nil { + client = awshttp.NewBuildableClient() + } + + if c, ok := client.(*awshttp.BuildableClient); ok { + client = c. + WithDialerOptions(func(d *net.Dialer) { + // Use a custom Dial timeout for the EC2 Metadata service to account + // for the possibility the application might not be running in an + // environment with the service present. The client should fail fast in + // this case. + d.Timeout = defaultDialerTimeout + }). + WithTransportOptions(func(tr *http.Transport) { + // Use a custom Transport timeout for the EC2 Metadata service to + // account for the possibility that the application might be running in + // a container, and EC2Metadata service drops the connection after a + // single IP Hop. The client should fail fast in this case. + tr.ResponseHeaderTimeout = defaultResponseHeaderTimeout + }) + } + + return client +} + +func resolveClientEnableState(cfg aws.Config, options *Options) error { + if options.ClientEnableState != ClientDefaultEnableState { + return nil + } + value, found, err := internalconfig.ResolveClientEnableState(cfg.ConfigSources) + if err != nil || !found { + return err + } + options.ClientEnableState = value + return nil +} + +func resolveEndpointModeConfig(cfg aws.Config, options *Options) error { + if options.EndpointMode != EndpointModeStateUnset { + return nil + } + value, found, err := internalconfig.ResolveEndpointModeConfig(cfg.ConfigSources) + if err != nil || !found { + return err + } + options.EndpointMode = value + return nil +} + +func resolveEndpointConfig(cfg aws.Config, options *Options) error { + if len(options.Endpoint) != 0 { + return nil + } + value, found, err := internalconfig.ResolveEndpointConfig(cfg.ConfigSources) + if err != nil || !found { + return err + } + options.Endpoint = value + return nil +} + +func resolveEnableFallback(cfg aws.Config, options *Options) { + if options.EnableFallback != aws.UnknownTernary { + return + } + + disabled, ok := internalconfig.ResolveV1FallbackDisabled(cfg.ConfigSources) + if !ok { + return + } + + if disabled { + options.EnableFallback = aws.FalseTernary + } else { + options.EnableFallback = aws.TrueTernary + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go new file mode 100644 index 00000000000..9e3bdb0e66e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go @@ -0,0 +1,76 @@ +package imds + +import ( + "context" + "fmt" + "io" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const getDynamicDataPath = "/latest/dynamic" + +// GetDynamicData uses the path provided to request information from the EC2 +// instance metadata service for dynamic data. The content will be returned +// as a string, or error if the request failed. +func (c *Client) GetDynamicData(ctx context.Context, params *GetDynamicDataInput, optFns ...func(*Options)) (*GetDynamicDataOutput, error) { + if params == nil { + params = &GetDynamicDataInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetDynamicData", params, optFns, + addGetDynamicDataMiddleware, + ) + if err != nil { + return nil, err + } + + out := result.(*GetDynamicDataOutput) + out.ResultMetadata = metadata + return out, nil +} + +// GetDynamicDataInput provides the input parameters for the GetDynamicData +// operation. +type GetDynamicDataInput struct { + // The relative dynamic data path to retrieve. Can be empty string to + // retrieve a response containing a new line separated list of dynamic data + // resources available. + // + // Must not include the dynamic data base path. + // + // May include leading slash. If Path includes trailing slash the trailing + // slash will be included in the request for the resource. + Path string +} + +// GetDynamicDataOutput provides the output parameters for the GetDynamicData +// operation. +type GetDynamicDataOutput struct { + Content io.ReadCloser + + ResultMetadata middleware.Metadata +} + +func addGetDynamicDataMiddleware(stack *middleware.Stack, options Options) error { + return addAPIRequestMiddleware(stack, + options, + buildGetDynamicDataPath, + buildGetDynamicDataOutput) +} + +func buildGetDynamicDataPath(params interface{}) (string, error) { + p, ok := params.(*GetDynamicDataInput) + if !ok { + return "", fmt.Errorf("unknown parameter type %T", params) + } + + return appendURIPath(getDynamicDataPath, p.Path), nil +} + +func buildGetDynamicDataOutput(resp *smithyhttp.Response) (interface{}, error) { + return &GetDynamicDataOutput{ + Content: resp.Body, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go new file mode 100644 index 00000000000..24845dccd6d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go @@ -0,0 +1,102 @@ +package imds + +import ( + "context" + "encoding/json" + "fmt" + "io" + "strings" + "time" + + "github.com/aws/smithy-go" + smithyio "github.com/aws/smithy-go/io" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const getIAMInfoPath = getMetadataPath + "/iam/info" + +// GetIAMInfo retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *Client) GetIAMInfo( + ctx context.Context, params *GetIAMInfoInput, optFns ...func(*Options), +) ( + *GetIAMInfoOutput, error, +) { + if params == nil { + params = &GetIAMInfoInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetIAMInfo", params, optFns, + addGetIAMInfoMiddleware, + ) + if err != nil { + return nil, err + } + + out := result.(*GetIAMInfoOutput) + out.ResultMetadata = metadata + return out, nil +} + +// GetIAMInfoInput provides the input parameters for GetIAMInfo operation. +type GetIAMInfoInput struct{} + +// GetIAMInfoOutput provides the output parameters for GetIAMInfo operation. +type GetIAMInfoOutput struct { + IAMInfo + + ResultMetadata middleware.Metadata +} + +func addGetIAMInfoMiddleware(stack *middleware.Stack, options Options) error { + return addAPIRequestMiddleware(stack, + options, + buildGetIAMInfoPath, + buildGetIAMInfoOutput, + ) +} + +func buildGetIAMInfoPath(params interface{}) (string, error) { + return getIAMInfoPath, nil +} + +func buildGetIAMInfoOutput(resp *smithyhttp.Response) (v interface{}, err error) { + defer func() { + closeErr := resp.Body.Close() + if err == nil { + err = closeErr + } else if closeErr != nil { + err = fmt.Errorf("response body close error: %v, original error: %w", closeErr, err) + } + }() + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(resp.Body, ringBuffer) + + imdsResult := &GetIAMInfoOutput{} + if err = json.NewDecoder(body).Decode(&imdsResult.IAMInfo); err != nil { + return nil, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode instance identity document, %w", err), + Snapshot: ringBuffer.Bytes(), + } + } + // Any code other success is an error + if !strings.EqualFold(imdsResult.Code, "success") { + return nil, fmt.Errorf("failed to get EC2 IMDS IAM info, %s", + imdsResult.Code) + } + + return imdsResult, nil +} + +// IAMInfo provides the shape for unmarshaling an IAM info from the metadata +// API. +type IAMInfo struct { + Code string + LastUpdated time.Time + InstanceProfileArn string + InstanceProfileID string +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go new file mode 100644 index 00000000000..a87758ed302 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go @@ -0,0 +1,109 @@ +package imds + +import ( + "context" + "encoding/json" + "fmt" + "io" + "time" + + "github.com/aws/smithy-go" + smithyio "github.com/aws/smithy-go/io" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const getInstanceIdentityDocumentPath = getDynamicDataPath + "/instance-identity/document" + +// GetInstanceIdentityDocument retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *Client) GetInstanceIdentityDocument( + ctx context.Context, params *GetInstanceIdentityDocumentInput, optFns ...func(*Options), +) ( + *GetInstanceIdentityDocumentOutput, error, +) { + if params == nil { + params = &GetInstanceIdentityDocumentInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetInstanceIdentityDocument", params, optFns, + addGetInstanceIdentityDocumentMiddleware, + ) + if err != nil { + return nil, err + } + + out := result.(*GetInstanceIdentityDocumentOutput) + out.ResultMetadata = metadata + return out, nil +} + +// GetInstanceIdentityDocumentInput provides the input parameters for +// GetInstanceIdentityDocument operation. +type GetInstanceIdentityDocumentInput struct{} + +// GetInstanceIdentityDocumentOutput provides the output parameters for +// GetInstanceIdentityDocument operation. +type GetInstanceIdentityDocumentOutput struct { + InstanceIdentityDocument + + ResultMetadata middleware.Metadata +} + +func addGetInstanceIdentityDocumentMiddleware(stack *middleware.Stack, options Options) error { + return addAPIRequestMiddleware(stack, + options, + buildGetInstanceIdentityDocumentPath, + buildGetInstanceIdentityDocumentOutput, + ) +} + +func buildGetInstanceIdentityDocumentPath(params interface{}) (string, error) { + return getInstanceIdentityDocumentPath, nil +} + +func buildGetInstanceIdentityDocumentOutput(resp *smithyhttp.Response) (v interface{}, err error) { + defer func() { + closeErr := resp.Body.Close() + if err == nil { + err = closeErr + } else if closeErr != nil { + err = fmt.Errorf("response body close error: %v, original error: %w", closeErr, err) + } + }() + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(resp.Body, ringBuffer) + + output := &GetInstanceIdentityDocumentOutput{} + if err = json.NewDecoder(body).Decode(&output.InstanceIdentityDocument); err != nil { + return nil, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode instance identity document, %w", err), + Snapshot: ringBuffer.Bytes(), + } + } + + return output, nil +} + +// InstanceIdentityDocument provides the shape for unmarshaling +// an instance identity document +type InstanceIdentityDocument struct { + DevpayProductCodes []string `json:"devpayProductCodes"` + MarketplaceProductCodes []string `json:"marketplaceProductCodes"` + AvailabilityZone string `json:"availabilityZone"` + PrivateIP string `json:"privateIp"` + Version string `json:"version"` + Region string `json:"region"` + InstanceID string `json:"instanceId"` + BillingProducts []string `json:"billingProducts"` + InstanceType string `json:"instanceType"` + AccountID string `json:"accountId"` + PendingTime time.Time `json:"pendingTime"` + ImageID string `json:"imageId"` + KernelID string `json:"kernelId"` + RamdiskID string `json:"ramdiskId"` + Architecture string `json:"architecture"` +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go new file mode 100644 index 00000000000..cb0ce4c0004 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go @@ -0,0 +1,76 @@ +package imds + +import ( + "context" + "fmt" + "io" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const getMetadataPath = "/latest/meta-data" + +// GetMetadata uses the path provided to request information from the Amazon +// EC2 Instance Metadata Service. The content will be returned as a string, or +// error if the request failed. +func (c *Client) GetMetadata(ctx context.Context, params *GetMetadataInput, optFns ...func(*Options)) (*GetMetadataOutput, error) { + if params == nil { + params = &GetMetadataInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetMetadata", params, optFns, + addGetMetadataMiddleware, + ) + if err != nil { + return nil, err + } + + out := result.(*GetMetadataOutput) + out.ResultMetadata = metadata + return out, nil +} + +// GetMetadataInput provides the input parameters for the GetMetadata +// operation. +type GetMetadataInput struct { + // The relative metadata path to retrieve. Can be empty string to retrieve + // a response containing a new line separated list of metadata resources + // available. + // + // Must not include the metadata base path. + // + // May include leading slash. If Path includes trailing slash the trailing slash + // will be included in the request for the resource. + Path string +} + +// GetMetadataOutput provides the output parameters for the GetMetadata +// operation. +type GetMetadataOutput struct { + Content io.ReadCloser + + ResultMetadata middleware.Metadata +} + +func addGetMetadataMiddleware(stack *middleware.Stack, options Options) error { + return addAPIRequestMiddleware(stack, + options, + buildGetMetadataPath, + buildGetMetadataOutput) +} + +func buildGetMetadataPath(params interface{}) (string, error) { + p, ok := params.(*GetMetadataInput) + if !ok { + return "", fmt.Errorf("unknown parameter type %T", params) + } + + return appendURIPath(getMetadataPath, p.Path), nil +} + +func buildGetMetadataOutput(resp *smithyhttp.Response) (interface{}, error) { + return &GetMetadataOutput{ + Content: resp.Body, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go new file mode 100644 index 00000000000..7b9b48912af --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go @@ -0,0 +1,72 @@ +package imds + +import ( + "context" + "fmt" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// GetRegion retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *Client) GetRegion( + ctx context.Context, params *GetRegionInput, optFns ...func(*Options), +) ( + *GetRegionOutput, error, +) { + if params == nil { + params = &GetRegionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetRegion", params, optFns, + addGetRegionMiddleware, + ) + if err != nil { + return nil, err + } + + out := result.(*GetRegionOutput) + out.ResultMetadata = metadata + return out, nil +} + +// GetRegionInput provides the input parameters for GetRegion operation. +type GetRegionInput struct{} + +// GetRegionOutput provides the output parameters for GetRegion operation. +type GetRegionOutput struct { + Region string + + ResultMetadata middleware.Metadata +} + +func addGetRegionMiddleware(stack *middleware.Stack, options Options) error { + return addAPIRequestMiddleware(stack, + options, + buildGetInstanceIdentityDocumentPath, + buildGetRegionOutput, + ) +} + +func buildGetRegionOutput(resp *smithyhttp.Response) (interface{}, error) { + out, err := buildGetInstanceIdentityDocumentOutput(resp) + if err != nil { + return nil, err + } + + result, ok := out.(*GetInstanceIdentityDocumentOutput) + if !ok { + return nil, fmt.Errorf("unexpected instance identity document type, %T", out) + } + + region := result.Region + if len(region) == 0 { + return "", fmt.Errorf("instance metadata did not return a region value") + } + + return &GetRegionOutput{ + Region: region, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go new file mode 100644 index 00000000000..841f802c1a3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go @@ -0,0 +1,118 @@ +package imds + +import ( + "context" + "fmt" + "io" + "strconv" + "strings" + "time" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const getTokenPath = "/latest/api/token" +const tokenTTLHeader = "X-Aws-Ec2-Metadata-Token-Ttl-Seconds" + +// getToken uses the duration to return a token for EC2 IMDS, or an error if +// the request failed. +func (c *Client) getToken(ctx context.Context, params *getTokenInput, optFns ...func(*Options)) (*getTokenOutput, error) { + if params == nil { + params = &getTokenInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "getToken", params, optFns, + addGetTokenMiddleware, + ) + if err != nil { + return nil, err + } + + out := result.(*getTokenOutput) + out.ResultMetadata = metadata + return out, nil +} + +type getTokenInput struct { + TokenTTL time.Duration +} + +type getTokenOutput struct { + Token string + TokenTTL time.Duration + + ResultMetadata middleware.Metadata +} + +func addGetTokenMiddleware(stack *middleware.Stack, options Options) error { + err := addRequestMiddleware(stack, + options, + "PUT", + buildGetTokenPath, + buildGetTokenOutput) + if err != nil { + return err + } + + err = stack.Serialize.Add(&tokenTTLRequestHeader{}, middleware.After) + if err != nil { + return err + } + + return nil +} + +func buildGetTokenPath(interface{}) (string, error) { + return getTokenPath, nil +} + +func buildGetTokenOutput(resp *smithyhttp.Response) (v interface{}, err error) { + defer func() { + closeErr := resp.Body.Close() + if err == nil { + err = closeErr + } else if closeErr != nil { + err = fmt.Errorf("response body close error: %v, original error: %w", closeErr, err) + } + }() + + ttlHeader := resp.Header.Get(tokenTTLHeader) + tokenTTL, err := strconv.ParseInt(ttlHeader, 10, 64) + if err != nil { + return nil, fmt.Errorf("unable to parse API token, %w", err) + } + + var token strings.Builder + if _, err = io.Copy(&token, resp.Body); err != nil { + return nil, fmt.Errorf("unable to read API token, %w", err) + } + + return &getTokenOutput{ + Token: token.String(), + TokenTTL: time.Duration(tokenTTL) * time.Second, + }, nil +} + +type tokenTTLRequestHeader struct{} + +func (*tokenTTLRequestHeader) ID() string { return "tokenTTLRequestHeader" } +func (*tokenTTLRequestHeader) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("expect HTTP transport, got %T", in.Request) + } + + input, ok := in.Parameters.(*getTokenInput) + if !ok { + return out, metadata, fmt.Errorf("expect getTokenInput, got %T", in.Parameters) + } + + req.Header.Set(tokenTTLHeader, strconv.Itoa(int(input.TokenTTL/time.Second))) + + return next.HandleSerialize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go new file mode 100644 index 00000000000..88aa61e9ad9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go @@ -0,0 +1,60 @@ +package imds + +import ( + "context" + "io" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const getUserDataPath = "/latest/user-data" + +// GetUserData uses the path provided to request information from the EC2 +// instance metadata service for dynamic data. The content will be returned +// as a string, or error if the request failed. +func (c *Client) GetUserData(ctx context.Context, params *GetUserDataInput, optFns ...func(*Options)) (*GetUserDataOutput, error) { + if params == nil { + params = &GetUserDataInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetUserData", params, optFns, + addGetUserDataMiddleware, + ) + if err != nil { + return nil, err + } + + out := result.(*GetUserDataOutput) + out.ResultMetadata = metadata + return out, nil +} + +// GetUserDataInput provides the input parameters for the GetUserData +// operation. +type GetUserDataInput struct{} + +// GetUserDataOutput provides the output parameters for the GetUserData +// operation. +type GetUserDataOutput struct { + Content io.ReadCloser + + ResultMetadata middleware.Metadata +} + +func addGetUserDataMiddleware(stack *middleware.Stack, options Options) error { + return addAPIRequestMiddleware(stack, + options, + buildGetUserDataPath, + buildGetUserDataOutput) +} + +func buildGetUserDataPath(params interface{}) (string, error) { + return getUserDataPath, nil +} + +func buildGetUserDataOutput(resp *smithyhttp.Response) (interface{}, error) { + return &GetUserDataOutput{ + Content: resp.Body, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go new file mode 100644 index 00000000000..bacdb5d21f2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go @@ -0,0 +1,11 @@ +// Package imds provides the API client for interacting with the Amazon EC2 +// Instance Metadata Service. +// +// All Client operation calls have a default timeout. If the operation is not +// completed before this timeout expires, the operation will be canceled. This +// timeout can be overridden by providing Context with a timeout or deadline +// with calling the client's operations. +// +// See the EC2 IMDS user guide for more information on using the API. +// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html +package imds diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go new file mode 100644 index 00000000000..eea163103ef --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package imds + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.14.3" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config/resolvers.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config/resolvers.go new file mode 100644 index 00000000000..ce774558932 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config/resolvers.go @@ -0,0 +1,114 @@ +package config + +import ( + "fmt" + "strings" +) + +// ClientEnableState provides an enumeration if the client is enabled, +// disabled, or default behavior. +type ClientEnableState uint + +// Enumeration values for ClientEnableState +const ( + ClientDefaultEnableState ClientEnableState = iota + ClientDisabled + ClientEnabled +) + +// EndpointModeState is the EC2 IMDS Endpoint Configuration Mode +type EndpointModeState uint + +// Enumeration values for ClientEnableState +const ( + EndpointModeStateUnset EndpointModeState = iota + EndpointModeStateIPv4 + EndpointModeStateIPv6 +) + +// SetFromString sets the EndpointModeState based on the provided string value. Unknown values will default to EndpointModeStateUnset +func (e *EndpointModeState) SetFromString(v string) error { + v = strings.TrimSpace(v) + + switch { + case len(v) == 0: + *e = EndpointModeStateUnset + case strings.EqualFold(v, "IPv6"): + *e = EndpointModeStateIPv6 + case strings.EqualFold(v, "IPv4"): + *e = EndpointModeStateIPv4 + default: + return fmt.Errorf("unknown EC2 IMDS endpoint mode, must be either IPv6 or IPv4") + } + return nil +} + +// ClientEnableStateResolver is a config resolver interface for retrieving whether the IMDS client is disabled. +type ClientEnableStateResolver interface { + GetEC2IMDSClientEnableState() (ClientEnableState, bool, error) +} + +// EndpointModeResolver is a config resolver interface for retrieving the EndpointModeState configuration. +type EndpointModeResolver interface { + GetEC2IMDSEndpointMode() (EndpointModeState, bool, error) +} + +// EndpointResolver is a config resolver interface for retrieving the endpoint. +type EndpointResolver interface { + GetEC2IMDSEndpoint() (string, bool, error) +} + +type v1FallbackDisabledResolver interface { + GetEC2IMDSV1FallbackDisabled() (bool, bool) +} + +// ResolveClientEnableState resolves the ClientEnableState from a list of configuration sources. +func ResolveClientEnableState(sources []interface{}) (value ClientEnableState, found bool, err error) { + for _, source := range sources { + if resolver, ok := source.(ClientEnableStateResolver); ok { + value, found, err = resolver.GetEC2IMDSClientEnableState() + if err != nil || found { + return value, found, err + } + } + } + return value, found, err +} + +// ResolveEndpointModeConfig resolves the EndpointModeState from a list of configuration sources. +func ResolveEndpointModeConfig(sources []interface{}) (value EndpointModeState, found bool, err error) { + for _, source := range sources { + if resolver, ok := source.(EndpointModeResolver); ok { + value, found, err = resolver.GetEC2IMDSEndpointMode() + if err != nil || found { + return value, found, err + } + } + } + return value, found, err +} + +// ResolveEndpointConfig resolves the endpoint from a list of configuration sources. +func ResolveEndpointConfig(sources []interface{}) (value string, found bool, err error) { + for _, source := range sources { + if resolver, ok := source.(EndpointResolver); ok { + value, found, err = resolver.GetEC2IMDSEndpoint() + if err != nil || found { + return value, found, err + } + } + } + return value, found, err +} + +// ResolveV1FallbackDisabled ... +func ResolveV1FallbackDisabled(sources []interface{}) (bool, bool) { + for _, source := range sources { + if resolver, ok := source.(v1FallbackDisabledResolver); ok { + if v, found := resolver.GetEC2IMDSV1FallbackDisabled(); found { + return v, true + } + } + } + return false, false +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go new file mode 100644 index 00000000000..c8abd64916c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go @@ -0,0 +1,285 @@ +package imds + +import ( + "bytes" + "context" + "fmt" + "io/ioutil" + "net/url" + "path" + "time" + + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +func addAPIRequestMiddleware(stack *middleware.Stack, + options Options, + getPath func(interface{}) (string, error), + getOutput func(*smithyhttp.Response) (interface{}, error), +) (err error) { + err = addRequestMiddleware(stack, options, "GET", getPath, getOutput) + if err != nil { + return err + } + + // Token Serializer build and state management. + if !options.disableAPIToken { + err = stack.Finalize.Insert(options.tokenProvider, (*retry.Attempt)(nil).ID(), middleware.After) + if err != nil { + return err + } + + err = stack.Deserialize.Insert(options.tokenProvider, "OperationDeserializer", middleware.Before) + if err != nil { + return err + } + } + + return nil +} + +func addRequestMiddleware(stack *middleware.Stack, + options Options, + method string, + getPath func(interface{}) (string, error), + getOutput func(*smithyhttp.Response) (interface{}, error), +) (err error) { + err = awsmiddleware.AddSDKAgentKey(awsmiddleware.FeatureMetadata, "ec2-imds")(stack) + if err != nil { + return err + } + + // Operation timeout + err = stack.Initialize.Add(&operationTimeout{ + DefaultTimeout: defaultOperationTimeout, + }, middleware.Before) + if err != nil { + return err + } + + // Operation Serializer + err = stack.Serialize.Add(&serializeRequest{ + GetPath: getPath, + Method: method, + }, middleware.After) + if err != nil { + return err + } + + // Operation endpoint resolver + err = stack.Serialize.Insert(&resolveEndpoint{ + Endpoint: options.Endpoint, + EndpointMode: options.EndpointMode, + }, "OperationSerializer", middleware.Before) + if err != nil { + return err + } + + // Operation Deserializer + err = stack.Deserialize.Add(&deserializeResponse{ + GetOutput: getOutput, + }, middleware.After) + if err != nil { + return err + } + + err = stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ + LogRequest: options.ClientLogMode.IsRequest(), + LogRequestWithBody: options.ClientLogMode.IsRequestWithBody(), + LogResponse: options.ClientLogMode.IsResponse(), + LogResponseWithBody: options.ClientLogMode.IsResponseWithBody(), + }, middleware.After) + if err != nil { + return err + } + + err = addSetLoggerMiddleware(stack, options) + if err != nil { + return err + } + + // Retry support + return retry.AddRetryMiddlewares(stack, retry.AddRetryMiddlewaresOptions{ + Retryer: options.Retryer, + LogRetryAttempts: options.ClientLogMode.IsRetries(), + }) +} + +func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { + return middleware.AddSetLoggerMiddleware(stack, o.Logger) +} + +type serializeRequest struct { + GetPath func(interface{}) (string, error) + Method string +} + +func (*serializeRequest) ID() string { + return "OperationSerializer" +} + +func (m *serializeRequest) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + reqPath, err := m.GetPath(in.Parameters) + if err != nil { + return out, metadata, fmt.Errorf("unable to get request URL path, %w", err) + } + + request.Request.URL.Path = reqPath + request.Request.Method = m.Method + + return next.HandleSerialize(ctx, in) +} + +type deserializeResponse struct { + GetOutput func(*smithyhttp.Response) (interface{}, error) +} + +func (*deserializeResponse) ID() string { + return "OperationDeserializer" +} + +func (m *deserializeResponse) HandleDeserialize( + ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + resp, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, fmt.Errorf( + "unexpected transport response type, %T, want %T", out.RawResponse, resp) + } + defer resp.Body.Close() + + // read the full body so that any operation timeouts cleanup will not race + // the body being read. + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return out, metadata, fmt.Errorf("read response body failed, %w", err) + } + resp.Body = ioutil.NopCloser(bytes.NewReader(body)) + + // Anything that's not 200 |< 300 is error + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + return out, metadata, &smithyhttp.ResponseError{ + Response: resp, + Err: fmt.Errorf("request to EC2 IMDS failed"), + } + } + + result, err := m.GetOutput(resp) + if err != nil { + return out, metadata, fmt.Errorf( + "unable to get deserialized result for response, %w", err, + ) + } + out.Result = result + + return out, metadata, err +} + +type resolveEndpoint struct { + Endpoint string + EndpointMode EndpointModeState +} + +func (*resolveEndpoint) ID() string { + return "ResolveEndpoint" +} + +func (m *resolveEndpoint) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + var endpoint string + if len(m.Endpoint) > 0 { + endpoint = m.Endpoint + } else { + switch m.EndpointMode { + case EndpointModeStateIPv6: + endpoint = defaultIPv6Endpoint + case EndpointModeStateIPv4: + fallthrough + case EndpointModeStateUnset: + endpoint = defaultIPv4Endpoint + default: + return out, metadata, fmt.Errorf("unsupported IMDS endpoint mode") + } + } + + req.URL, err = url.Parse(endpoint) + if err != nil { + return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + return next.HandleSerialize(ctx, in) +} + +const ( + defaultOperationTimeout = 5 * time.Second +) + +// operationTimeout adds a timeout on the middleware stack if the Context the +// stack was called with does not have a deadline. The next middleware must +// complete before the timeout, or the context will be canceled. +// +// If DefaultTimeout is zero, no default timeout will be used if the Context +// does not have a timeout. +// +// The next middleware must also ensure that any resources that are also +// canceled by the stack's context are completely consumed before returning. +// Otherwise the timeout cleanup will race the resource being consumed +// upstream. +type operationTimeout struct { + DefaultTimeout time.Duration +} + +func (*operationTimeout) ID() string { return "OperationTimeout" } + +func (m *operationTimeout) HandleInitialize( + ctx context.Context, input middleware.InitializeInput, next middleware.InitializeHandler, +) ( + output middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + if _, ok := ctx.Deadline(); !ok && m.DefaultTimeout != 0 { + var cancelFn func() + ctx, cancelFn = context.WithTimeout(ctx, m.DefaultTimeout) + defer cancelFn() + } + + return next.HandleInitialize(ctx, input) +} + +// appendURIPath joins a URI path component to the existing path with `/` +// separators between the path components. If the path being added ends with a +// trailing `/` that slash will be maintained. +func appendURIPath(base, add string) string { + reqPath := path.Join(base, add) + if len(add) != 0 && add[len(add)-1] == '/' { + reqPath += "/" + } + return reqPath +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/token_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/token_provider.go new file mode 100644 index 00000000000..5703c6e16ad --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/token_provider.go @@ -0,0 +1,261 @@ +package imds + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/smithy-go" + "github.com/aws/smithy-go/logging" + "net/http" + "sync" + "sync/atomic" + "time" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const ( + // Headers for Token and TTL + tokenHeader = "x-aws-ec2-metadata-token" + defaultTokenTTL = 5 * time.Minute +) + +type tokenProvider struct { + client *Client + tokenTTL time.Duration + + token *apiToken + tokenMux sync.RWMutex + + disabled uint32 // Atomic updated +} + +func newTokenProvider(client *Client, ttl time.Duration) *tokenProvider { + return &tokenProvider{ + client: client, + tokenTTL: ttl, + } +} + +// apiToken provides the API token used by all operation calls for th EC2 +// Instance metadata service. +type apiToken struct { + token string + expires time.Time +} + +var timeNow = time.Now + +// Expired returns if the token is expired. +func (t *apiToken) Expired() bool { + // Calling Round(0) on the current time will truncate the monotonic reading only. Ensures credential expiry + // time is always based on reported wall-clock time. + return timeNow().Round(0).After(t.expires) +} + +func (t *tokenProvider) ID() string { return "APITokenProvider" } + +// HandleFinalize is the finalize stack middleware, that if the token provider is +// enabled, will attempt to add the cached API token to the request. If the API +// token is not cached, it will be retrieved in a separate API call, getToken. +// +// For retry attempts, handler must be added after attempt retryer. +// +// If request for getToken fails the token provider may be disabled from future +// requests, depending on the response status code. +func (t *tokenProvider) HandleFinalize( + ctx context.Context, input middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + if t.fallbackEnabled() && !t.enabled() { + // short-circuits to insecure data flow if token provider is disabled. + return next.HandleFinalize(ctx, input) + } + + req, ok := input.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unexpected transport request type %T", input.Request) + } + + tok, err := t.getToken(ctx) + if err != nil { + // If the error allows the token to downgrade to insecure flow allow that. + var bypassErr *bypassTokenRetrievalError + if errors.As(err, &bypassErr) { + return next.HandleFinalize(ctx, input) + } + + return out, metadata, fmt.Errorf("failed to get API token, %w", err) + } + + req.Header.Set(tokenHeader, tok.token) + + return next.HandleFinalize(ctx, input) +} + +// HandleDeserialize is the deserialize stack middleware for determining if the +// operation the token provider is decorating failed because of a 401 +// unauthorized status code. If the operation failed for that reason the token +// provider needs to be re-enabled so that it can start adding the API token to +// operation calls. +func (t *tokenProvider) HandleDeserialize( + ctx context.Context, input middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, input) + if err == nil { + return out, metadata, err + } + + resp, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, fmt.Errorf("expect HTTP transport, got %T", out.RawResponse) + } + + if resp.StatusCode == http.StatusUnauthorized { // unauthorized + t.enable() + err = &retryableError{Err: err, isRetryable: true} + } + + return out, metadata, err +} + +func (t *tokenProvider) getToken(ctx context.Context) (tok *apiToken, err error) { + if t.fallbackEnabled() && !t.enabled() { + return nil, &bypassTokenRetrievalError{ + Err: fmt.Errorf("cannot get API token, provider disabled"), + } + } + + t.tokenMux.RLock() + tok = t.token + t.tokenMux.RUnlock() + + if tok != nil && !tok.Expired() { + return tok, nil + } + + tok, err = t.updateToken(ctx) + if err != nil { + return nil, err + } + + return tok, nil +} + +func (t *tokenProvider) updateToken(ctx context.Context) (*apiToken, error) { + t.tokenMux.Lock() + defer t.tokenMux.Unlock() + + // Prevent multiple requests to update retrieving the token. + if t.token != nil && !t.token.Expired() { + tok := t.token + return tok, nil + } + + result, err := t.client.getToken(ctx, &getTokenInput{ + TokenTTL: t.tokenTTL, + }) + if err != nil { + var statusErr interface{ HTTPStatusCode() int } + if errors.As(err, &statusErr) { + switch statusErr.HTTPStatusCode() { + // Disable future get token if failed because of 403, 404, or 405 + case http.StatusForbidden, + http.StatusNotFound, + http.StatusMethodNotAllowed: + + if t.fallbackEnabled() { + logger := middleware.GetLogger(ctx) + logger.Logf(logging.Warn, "falling back to IMDSv1: %v", err) + t.disable() + } + + // 400 errors are terminal, and need to be upstreamed + case http.StatusBadRequest: + return nil, err + } + } + + // Disable if request send failed or timed out getting response + var re *smithyhttp.RequestSendError + var ce *smithy.CanceledError + if errors.As(err, &re) || errors.As(err, &ce) { + atomic.StoreUint32(&t.disabled, 1) + } + + if !t.fallbackEnabled() { + // NOTE: getToken() is an implementation detail of some outer operation + // (e.g. GetMetadata). It has its own retries that have already been exhausted. + // Mark the underlying error as a terminal error. + err = &retryableError{Err: err, isRetryable: false} + return nil, err + } + + // Token couldn't be retrieved, fallback to IMDSv1 insecure flow for this request + // and allow the request to proceed. Future requests _may_ re-attempt fetching a + // token if not disabled. + return nil, &bypassTokenRetrievalError{Err: err} + } + + tok := &apiToken{ + token: result.Token, + expires: timeNow().Add(result.TokenTTL), + } + t.token = tok + + return tok, nil +} + +// enabled returns if the token provider is current enabled or not. +func (t *tokenProvider) enabled() bool { + return atomic.LoadUint32(&t.disabled) == 0 +} + +// fallbackEnabled returns false if EnableFallback is [aws.FalseTernary], true otherwise +func (t *tokenProvider) fallbackEnabled() bool { + switch t.client.options.EnableFallback { + case aws.FalseTernary: + return false + default: + return true + } +} + +// disable disables the token provider and it will no longer attempt to inject +// the token, nor request updates. +func (t *tokenProvider) disable() { + atomic.StoreUint32(&t.disabled, 1) +} + +// enable enables the token provide to start refreshing tokens, and adding them +// to the pending request. +func (t *tokenProvider) enable() { + t.tokenMux.Lock() + t.token = nil + t.tokenMux.Unlock() + atomic.StoreUint32(&t.disabled, 0) +} + +type bypassTokenRetrievalError struct { + Err error +} + +func (e *bypassTokenRetrievalError) Error() string { + return fmt.Sprintf("bypass token retrieval, %v", e.Err) +} + +func (e *bypassTokenRetrievalError) Unwrap() error { return e.Err } + +type retryableError struct { + Err error + isRetryable bool +} + +func (e *retryableError) RetryableError() bool { return e.isRetryable } + +func (e *retryableError) Error() string { return e.Err.Error() } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/scheme.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/scheme.go new file mode 100644 index 00000000000..ff229c048ff --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/scheme.go @@ -0,0 +1,186 @@ +package auth + +import ( + "context" + "fmt" + + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" +) + +// SigV4 is a constant representing +// Authentication Scheme Signature Version 4 +const SigV4 = "sigv4" + +// SigV4A is a constant representing +// Authentication Scheme Signature Version 4A +const SigV4A = "sigv4a" + +// None is a constant representing the +// None Authentication Scheme +const None = "none" + +// SupportedSchemes is a data structure +// that indicates the list of supported AWS +// authentication schemes +var SupportedSchemes = map[string]bool{ + SigV4: true, + SigV4A: true, + None: true, +} + +// AuthenticationScheme is a representation of +// AWS authentication schemes +type AuthenticationScheme interface { + isAuthenticationScheme() +} + +// AuthenticationSchemeV4 is a AWS SigV4 representation +type AuthenticationSchemeV4 struct { + Name string + SigningName *string + SigningRegion *string + DisableDoubleEncoding *bool +} + +func (a *AuthenticationSchemeV4) isAuthenticationScheme() {} + +// AuthenticationSchemeV4A is a AWS SigV4A representation +type AuthenticationSchemeV4A struct { + Name string + SigningName *string + SigningRegionSet []string + DisableDoubleEncoding *bool +} + +func (a *AuthenticationSchemeV4A) isAuthenticationScheme() {} + +// AuthenticationSchemeNone is a representation for the none auth scheme +type AuthenticationSchemeNone struct{} + +func (a *AuthenticationSchemeNone) isAuthenticationScheme() {} + +// NoAuthenticationSchemesFoundError is used in signaling +// that no authentication schemes have been specified. +type NoAuthenticationSchemesFoundError struct{} + +func (e *NoAuthenticationSchemesFoundError) Error() string { + return fmt.Sprint("No authentication schemes specified.") +} + +// UnSupportedAuthenticationSchemeSpecifiedError is used in +// signaling that only unsupported authentication schemes +// were specified. +type UnSupportedAuthenticationSchemeSpecifiedError struct { + UnsupportedSchemes []string +} + +func (e *UnSupportedAuthenticationSchemeSpecifiedError) Error() string { + return fmt.Sprint("Unsupported authentication scheme specified.") +} + +// GetAuthenticationSchemes extracts the relevant authentication scheme data +// into a custom strongly typed Go data structure. +func GetAuthenticationSchemes(p *smithy.Properties) ([]AuthenticationScheme, error) { + var result []AuthenticationScheme + if !p.Has("authSchemes") { + return nil, &NoAuthenticationSchemesFoundError{} + } + + authSchemes, _ := p.Get("authSchemes").([]interface{}) + + var unsupportedSchemes []string + for _, scheme := range authSchemes { + authScheme, _ := scheme.(map[string]interface{}) + + switch authScheme["name"] { + case SigV4: + v4Scheme := AuthenticationSchemeV4{ + Name: SigV4, + SigningName: getSigningName(authScheme), + SigningRegion: getSigningRegion(authScheme), + DisableDoubleEncoding: getDisableDoubleEncoding(authScheme), + } + result = append(result, AuthenticationScheme(&v4Scheme)) + case SigV4A: + v4aScheme := AuthenticationSchemeV4A{ + Name: SigV4A, + SigningName: getSigningName(authScheme), + SigningRegionSet: getSigningRegionSet(authScheme), + DisableDoubleEncoding: getDisableDoubleEncoding(authScheme), + } + result = append(result, AuthenticationScheme(&v4aScheme)) + case None: + noneScheme := AuthenticationSchemeNone{} + result = append(result, AuthenticationScheme(&noneScheme)) + default: + unsupportedSchemes = append(unsupportedSchemes, authScheme["name"].(string)) + continue + } + } + + if len(result) == 0 { + return nil, &UnSupportedAuthenticationSchemeSpecifiedError{ + UnsupportedSchemes: unsupportedSchemes, + } + } + + return result, nil +} + +type disableDoubleEncoding struct{} + +// SetDisableDoubleEncoding sets or modifies the disable double encoding option +// on the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func SetDisableDoubleEncoding(ctx context.Context, value bool) context.Context { + return middleware.WithStackValue(ctx, disableDoubleEncoding{}, value) +} + +// GetDisableDoubleEncoding retrieves the disable double encoding option +// from the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetDisableDoubleEncoding(ctx context.Context) (value bool, ok bool) { + value, ok = middleware.GetStackValue(ctx, disableDoubleEncoding{}).(bool) + return value, ok +} + +func getSigningName(authScheme map[string]interface{}) *string { + signingName, ok := authScheme["signingName"].(string) + if !ok || signingName == "" { + return nil + } + return &signingName +} + +func getSigningRegionSet(authScheme map[string]interface{}) []string { + untypedSigningRegionSet, ok := authScheme["signingRegionSet"].([]interface{}) + if !ok { + return nil + } + signingRegionSet := []string{} + for _, item := range untypedSigningRegionSet { + signingRegionSet = append(signingRegionSet, item.(string)) + } + return signingRegionSet +} + +func getSigningRegion(authScheme map[string]interface{}) *string { + signingRegion, ok := authScheme["signingRegion"].(string) + if !ok || signingRegion == "" { + return nil + } + return &signingRegion +} + +func getDisableDoubleEncoding(authScheme map[string]interface{}) *bool { + disableDoubleEncoding, ok := authScheme["disableDoubleEncoding"].(bool) + if !ok { + return nil + } + return &disableDoubleEncoding +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md new file mode 100644 index 00000000000..d260b444dca --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md @@ -0,0 +1,223 @@ +# v1.2.2 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.1 (2023-11-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.43 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.42 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.41 (2023-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.40 (2023-08-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.39 (2023-08-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.38 (2023-08-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.37 (2023-07-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.36 (2023-07-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.35 (2023-07-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.34 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.33 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.32 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.31 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.30 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.29 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.28 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.27 (2022-12-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.26 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.25 (2022-10-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.24 (2022-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.23 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.22 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.21 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.20 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.19 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.18 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.17 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.16 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.15 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.14 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.13 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.12 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.11 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.10 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.9 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.8 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.7 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.6 (2022-03-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.5 (2022-02-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.4 (2022-01-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.3 (2022-01-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.2 (2021-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.0 (2021-11-06) + +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.7 (2021-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.6 (2021-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.5 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.4 (2021-08-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.3 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.2 (2021-08-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.1 (2021-07-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.0 (2021-06-25) + +* **Release**: Release new modules +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/httpprovider/LICENSE b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/LICENSE.txt similarity index 100% rename from vendor/go.opentelemetry.io/collector/confmap/provider/httpprovider/LICENSE rename to vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/LICENSE.txt diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/config.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/config.go new file mode 100644 index 00000000000..cd4d19b8982 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/config.go @@ -0,0 +1,65 @@ +package configsources + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws" +) + +// EnableEndpointDiscoveryProvider is an interface for retrieving external configuration value +// for Enable Endpoint Discovery +type EnableEndpointDiscoveryProvider interface { + GetEnableEndpointDiscovery(ctx context.Context) (value aws.EndpointDiscoveryEnableState, found bool, err error) +} + +// ResolveEnableEndpointDiscovery extracts the first instance of a EnableEndpointDiscoveryProvider from the config slice. +// Additionally returns a aws.EndpointDiscoveryEnableState to indicate if the value was found in provided configs, +// and error if one is encountered. +func ResolveEnableEndpointDiscovery(ctx context.Context, configs []interface{}) (value aws.EndpointDiscoveryEnableState, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(EnableEndpointDiscoveryProvider); ok { + value, found, err = p.GetEnableEndpointDiscovery(ctx) + if err != nil || found { + break + } + } + } + return +} + +// UseDualStackEndpointProvider is an interface for retrieving external configuration values for UseDualStackEndpoint +type UseDualStackEndpointProvider interface { + GetUseDualStackEndpoint(context.Context) (value aws.DualStackEndpointState, found bool, err error) +} + +// ResolveUseDualStackEndpoint extracts the first instance of a UseDualStackEndpoint from the config slice. +// Additionally returns a boolean to indicate if the value was found in provided configs, and error if one is encountered. +func ResolveUseDualStackEndpoint(ctx context.Context, configs []interface{}) (value aws.DualStackEndpointState, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(UseDualStackEndpointProvider); ok { + value, found, err = p.GetUseDualStackEndpoint(ctx) + if err != nil || found { + break + } + } + } + return +} + +// UseFIPSEndpointProvider is an interface for retrieving external configuration values for UseFIPSEndpoint +type UseFIPSEndpointProvider interface { + GetUseFIPSEndpoint(context.Context) (value aws.FIPSEndpointState, found bool, err error) +} + +// ResolveUseFIPSEndpoint extracts the first instance of a UseFIPSEndpointProvider from the config slice. +// Additionally, returns a boolean to indicate if the value was found in provided configs, and error if one is encountered. +func ResolveUseFIPSEndpoint(ctx context.Context, configs []interface{}) (value aws.FIPSEndpointState, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(UseFIPSEndpointProvider); ok { + value, found, err = p.GetUseFIPSEndpoint(ctx) + if err != nil || found { + break + } + } + } + return +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/endpoints.go new file mode 100644 index 00000000000..e7835f85241 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/endpoints.go @@ -0,0 +1,57 @@ +package configsources + +import ( + "context" +) + +// ServiceBaseEndpointProvider is needed to search for all providers +// that provide a configured service endpoint +type ServiceBaseEndpointProvider interface { + GetServiceBaseEndpoint(ctx context.Context, sdkID string) (string, bool, error) +} + +// IgnoreConfiguredEndpointsProvider is needed to search for all providers +// that provide a flag to disable configured endpoints. +// +// Currently duplicated from github.com/aws/aws-sdk-go-v2/config because +// service packages cannot import github.com/aws/aws-sdk-go-v2/config +// due to result import cycle error. +type IgnoreConfiguredEndpointsProvider interface { + GetIgnoreConfiguredEndpoints(ctx context.Context) (bool, bool, error) +} + +// GetIgnoreConfiguredEndpoints is used in knowing when to disable configured +// endpoints feature. +// +// Currently duplicated from github.com/aws/aws-sdk-go-v2/config because +// service packages cannot import github.com/aws/aws-sdk-go-v2/config +// due to result import cycle error. +func GetIgnoreConfiguredEndpoints(ctx context.Context, configs []interface{}) (value bool, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(IgnoreConfiguredEndpointsProvider); ok { + value, found, err = p.GetIgnoreConfiguredEndpoints(ctx) + if err != nil || found { + break + } + } + } + return +} + +// ResolveServiceBaseEndpoint is used to retrieve service endpoints from configured sources +// while allowing for configured endpoints to be disabled +func ResolveServiceBaseEndpoint(ctx context.Context, sdkID string, configs []interface{}) (value string, found bool, err error) { + if val, found, _ := GetIgnoreConfiguredEndpoints(ctx, configs); found && val { + return "", false, nil + } + + for _, cs := range configs { + if p, ok := cs.(ServiceBaseEndpointProvider); ok { + value, found, err = p.GetServiceBaseEndpoint(context.Background(), sdkID) + if err != nil || found { + break + } + } + } + return +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go new file mode 100644 index 00000000000..991b7b84abe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package configsources + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.2.2" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/arn.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/arn.go new file mode 100644 index 00000000000..e6223dd3b3e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/arn.go @@ -0,0 +1,94 @@ +package awsrulesfn + +import ( + "strings" +) + +// ARN provides AWS ARN components broken out into a data structure. +type ARN struct { + Partition string + Service string + Region string + AccountId string + ResourceId OptionalStringSlice +} + +const ( + arnDelimiters = ":" + resourceDelimiters = "/:" + arnSections = 6 + arnPrefix = "arn:" + + // zero-indexed + sectionPartition = 1 + sectionService = 2 + sectionRegion = 3 + sectionAccountID = 4 + sectionResource = 5 +) + +// ParseARN returns an [ARN] value parsed from the input string provided. If +// the ARN cannot be parsed nil will be returned, and error added to +// [ErrorCollector]. +func ParseARN(input string) *ARN { + if !strings.HasPrefix(input, arnPrefix) { + return nil + } + + sections := strings.SplitN(input, arnDelimiters, arnSections) + if numSections := len(sections); numSections != arnSections { + return nil + } + + if sections[sectionPartition] == "" { + return nil + } + if sections[sectionService] == "" { + return nil + } + if sections[sectionResource] == "" { + return nil + } + + return &ARN{ + Partition: sections[sectionPartition], + Service: sections[sectionService], + Region: sections[sectionRegion], + AccountId: sections[sectionAccountID], + ResourceId: splitResource(sections[sectionResource]), + } +} + +// splitResource splits the resource components by the ARN resource delimiters. +func splitResource(v string) []string { + var parts []string + var offset int + + for offset <= len(v) { + idx := strings.IndexAny(v[offset:], "/:") + if idx < 0 { + parts = append(parts, v[offset:]) + break + } + parts = append(parts, v[offset:idx+offset]) + offset += idx + 1 + } + + return parts +} + +// OptionalStringSlice provides a helper to safely get the index of a string +// slice that may be out of bounds. Returns pointer to string if index is +// valid. Otherwise returns nil. +type OptionalStringSlice []string + +// Get returns a string pointer of the string at index i if the index is valid. +// Otherwise returns nil. +func (s OptionalStringSlice) Get(i int) *string { + if i < 0 || i >= len(s) { + return nil + } + + v := s[i] + return &v +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/doc.go new file mode 100644 index 00000000000..d5a365853f8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/doc.go @@ -0,0 +1,3 @@ +// Package awsrulesfn provides AWS focused endpoint rule functions for +// evaluating endpoint resolution rules. +package awsrulesfn diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/generate.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/generate.go new file mode 100644 index 00000000000..df72da97ce3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/generate.go @@ -0,0 +1,7 @@ +//go:build codegen +// +build codegen + +package awsrulesfn + +//go:generate go run -tags codegen ./internal/partition/codegen.go -model partitions.json -output partitions.go +//go:generate gofmt -w -s . diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/host.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/host.go new file mode 100644 index 00000000000..637e5fc18e4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/host.go @@ -0,0 +1,51 @@ +package awsrulesfn + +import ( + "net" + "strings" + + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// IsVirtualHostableS3Bucket returns if the input is a DNS compatible bucket +// name and can be used with Amazon S3 virtual hosted style addressing. Similar +// to [rulesfn.IsValidHostLabel] with the added restriction that the length of label +// must be [3:63] characters long, all lowercase, and not formatted as an IP +// address. +func IsVirtualHostableS3Bucket(input string, allowSubDomains bool) bool { + // input should not be formatted as an IP address + // NOTE: this will technically trip up on IPv6 hosts with zone IDs, but + // validation further down will catch that anyway (it's guaranteed to have + // unfriendly characters % and : if that's the case) + if net.ParseIP(input) != nil { + return false + } + + var labels []string + if allowSubDomains { + labels = strings.Split(input, ".") + } else { + labels = []string{input} + } + + for _, label := range labels { + // validate special length constraints + if l := len(label); l < 3 || l > 63 { + return false + } + + // Validate no capital letters + for _, r := range label { + if r >= 'A' && r <= 'Z' { + return false + } + } + + // Validate valid host label + if !smithyhttp.ValidHostLabel(label) { + return false + } + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partition.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partition.go new file mode 100644 index 00000000000..ba6032758a5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partition.go @@ -0,0 +1,75 @@ +package awsrulesfn + +import "regexp" + +// Partition provides the metadata describing an AWS partition. +type Partition struct { + ID string `json:"id"` + Regions map[string]RegionOverrides `json:"regions"` + RegionRegex string `json:"regionRegex"` + DefaultConfig PartitionConfig `json:"outputs"` +} + +// PartitionConfig provides the endpoint metadata for an AWS region or partition. +type PartitionConfig struct { + Name string `json:"name"` + DnsSuffix string `json:"dnsSuffix"` + DualStackDnsSuffix string `json:"dualStackDnsSuffix"` + SupportsFIPS bool `json:"supportsFIPS"` + SupportsDualStack bool `json:"supportsDualStack"` +} + +type RegionOverrides struct { + Name *string `json:"name"` + DnsSuffix *string `json:"dnsSuffix"` + DualStackDnsSuffix *string `json:"dualStackDnsSuffix"` + SupportsFIPS *bool `json:"supportsFIPS"` + SupportsDualStack *bool `json:"supportsDualStack"` +} + +const defaultPartition = "aws" + +func getPartition(partitions []Partition, region string) *PartitionConfig { + for _, partition := range partitions { + if v, ok := partition.Regions[region]; ok { + p := mergeOverrides(partition.DefaultConfig, v) + return &p + } + } + + for _, partition := range partitions { + regionRegex := regexp.MustCompile(partition.RegionRegex) + if regionRegex.MatchString(region) { + v := partition.DefaultConfig + return &v + } + } + + for _, partition := range partitions { + if partition.ID == defaultPartition { + v := partition.DefaultConfig + return &v + } + } + + return nil +} + +func mergeOverrides(into PartitionConfig, from RegionOverrides) PartitionConfig { + if from.Name != nil { + into.Name = *from.Name + } + if from.DnsSuffix != nil { + into.DnsSuffix = *from.DnsSuffix + } + if from.DualStackDnsSuffix != nil { + into.DualStackDnsSuffix = *from.DualStackDnsSuffix + } + if from.SupportsFIPS != nil { + into.SupportsFIPS = *from.SupportsFIPS + } + if from.SupportsDualStack != nil { + into.SupportsDualStack = *from.SupportsDualStack + } + return into +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go new file mode 100644 index 00000000000..7ea49d4ea40 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go @@ -0,0 +1,343 @@ +// Code generated by endpoint/awsrulesfn/internal/partition. DO NOT EDIT. + +package awsrulesfn + +// GetPartition returns an AWS [Partition] for the region provided. If the +// partition cannot be determined nil will be returned. +func GetPartition(region string) *PartitionConfig { + return getPartition(partitions, region) +} + +var partitions = []Partition{ + { + ID: "aws", + RegionRegex: "^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$", + DefaultConfig: PartitionConfig{ + Name: "aws", + DnsSuffix: "amazonaws.com", + DualStackDnsSuffix: "api.aws", + SupportsFIPS: true, + SupportsDualStack: true, + }, + Regions: map[string]RegionOverrides{ + "af-south-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "ap-east-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "ap-northeast-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "ap-northeast-2": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "ap-northeast-3": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "ap-south-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "ap-south-2": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "ap-southeast-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "ap-southeast-2": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "ap-southeast-3": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "aws-global": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "ca-central-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "eu-central-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "eu-central-2": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "eu-north-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "eu-south-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "eu-south-2": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "eu-west-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "eu-west-2": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "eu-west-3": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "me-central-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "me-south-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "sa-east-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-east-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-east-2": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-west-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-west-2": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + }, + }, + { + ID: "aws-cn", + RegionRegex: "^cn\\-\\w+\\-\\d+$", + DefaultConfig: PartitionConfig{ + Name: "aws-cn", + DnsSuffix: "amazonaws.com.cn", + DualStackDnsSuffix: "api.amazonwebservices.com.cn", + SupportsFIPS: true, + SupportsDualStack: true, + }, + Regions: map[string]RegionOverrides{ + "aws-cn-global": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "cn-north-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "cn-northwest-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + }, + }, + { + ID: "aws-us-gov", + RegionRegex: "^us\\-gov\\-\\w+\\-\\d+$", + DefaultConfig: PartitionConfig{ + Name: "aws-us-gov", + DnsSuffix: "amazonaws.com", + DualStackDnsSuffix: "api.aws", + SupportsFIPS: true, + SupportsDualStack: true, + }, + Regions: map[string]RegionOverrides{ + "aws-us-gov-global": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-gov-east-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-gov-west-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + }, + }, + { + ID: "aws-iso", + RegionRegex: "^us\\-iso\\-\\w+\\-\\d+$", + DefaultConfig: PartitionConfig{ + Name: "aws-iso", + DnsSuffix: "c2s.ic.gov", + DualStackDnsSuffix: "c2s.ic.gov", + SupportsFIPS: true, + SupportsDualStack: false, + }, + Regions: map[string]RegionOverrides{ + "aws-iso-global": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-iso-east-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-iso-west-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + }, + }, + { + ID: "aws-iso-b", + RegionRegex: "^us\\-isob\\-\\w+\\-\\d+$", + DefaultConfig: PartitionConfig{ + Name: "aws-iso-b", + DnsSuffix: "sc2s.sgov.gov", + DualStackDnsSuffix: "sc2s.sgov.gov", + SupportsFIPS: true, + SupportsDualStack: false, + }, + Regions: map[string]RegionOverrides{ + "aws-iso-b-global": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-isob-east-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + }, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json new file mode 100644 index 00000000000..ab107ca5511 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json @@ -0,0 +1,213 @@ +{ + "partitions" : [ { + "id" : "aws", + "outputs" : { + "dnsSuffix" : "amazonaws.com", + "dualStackDnsSuffix" : "api.aws", + "implicitGlobalRegion" : "us-east-1", + "name" : "aws", + "supportsDualStack" : true, + "supportsFIPS" : true + }, + "regionRegex" : "^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$", + "regions" : { + "af-south-1" : { + "description" : "Africa (Cape Town)" + }, + "ap-east-1" : { + "description" : "Asia Pacific (Hong Kong)" + }, + "ap-northeast-1" : { + "description" : "Asia Pacific (Tokyo)" + }, + "ap-northeast-2" : { + "description" : "Asia Pacific (Seoul)" + }, + "ap-northeast-3" : { + "description" : "Asia Pacific (Osaka)" + }, + "ap-south-1" : { + "description" : "Asia Pacific (Mumbai)" + }, + "ap-south-2" : { + "description" : "Asia Pacific (Hyderabad)" + }, + "ap-southeast-1" : { + "description" : "Asia Pacific (Singapore)" + }, + "ap-southeast-2" : { + "description" : "Asia Pacific (Sydney)" + }, + "ap-southeast-3" : { + "description" : "Asia Pacific (Jakarta)" + }, + "ap-southeast-4" : { + "description" : "Asia Pacific (Melbourne)" + }, + "aws-global" : { + "description" : "AWS Standard global region" + }, + "ca-central-1" : { + "description" : "Canada (Central)" + }, + "eu-central-1" : { + "description" : "Europe (Frankfurt)" + }, + "eu-central-2" : { + "description" : "Europe (Zurich)" + }, + "eu-north-1" : { + "description" : "Europe (Stockholm)" + }, + "eu-south-1" : { + "description" : "Europe (Milan)" + }, + "eu-south-2" : { + "description" : "Europe (Spain)" + }, + "eu-west-1" : { + "description" : "Europe (Ireland)" + }, + "eu-west-2" : { + "description" : "Europe (London)" + }, + "eu-west-3" : { + "description" : "Europe (Paris)" + }, + "il-central-1" : { + "description" : "Israel (Tel Aviv)" + }, + "me-central-1" : { + "description" : "Middle East (UAE)" + }, + "me-south-1" : { + "description" : "Middle East (Bahrain)" + }, + "sa-east-1" : { + "description" : "South America (Sao Paulo)" + }, + "us-east-1" : { + "description" : "US East (N. Virginia)" + }, + "us-east-2" : { + "description" : "US East (Ohio)" + }, + "us-west-1" : { + "description" : "US West (N. California)" + }, + "us-west-2" : { + "description" : "US West (Oregon)" + } + } + }, { + "id" : "aws-cn", + "outputs" : { + "dnsSuffix" : "amazonaws.com.cn", + "dualStackDnsSuffix" : "api.amazonwebservices.com.cn", + "implicitGlobalRegion" : "cn-northwest-1", + "name" : "aws-cn", + "supportsDualStack" : true, + "supportsFIPS" : true + }, + "regionRegex" : "^cn\\-\\w+\\-\\d+$", + "regions" : { + "aws-cn-global" : { + "description" : "AWS China global region" + }, + "cn-north-1" : { + "description" : "China (Beijing)" + }, + "cn-northwest-1" : { + "description" : "China (Ningxia)" + } + } + }, { + "id" : "aws-us-gov", + "outputs" : { + "dnsSuffix" : "amazonaws.com", + "dualStackDnsSuffix" : "api.aws", + "implicitGlobalRegion" : "us-gov-west-1", + "name" : "aws-us-gov", + "supportsDualStack" : true, + "supportsFIPS" : true + }, + "regionRegex" : "^us\\-gov\\-\\w+\\-\\d+$", + "regions" : { + "aws-us-gov-global" : { + "description" : "AWS GovCloud (US) global region" + }, + "us-gov-east-1" : { + "description" : "AWS GovCloud (US-East)" + }, + "us-gov-west-1" : { + "description" : "AWS GovCloud (US-West)" + } + } + }, { + "id" : "aws-iso", + "outputs" : { + "dnsSuffix" : "c2s.ic.gov", + "dualStackDnsSuffix" : "c2s.ic.gov", + "implicitGlobalRegion" : "us-iso-east-1", + "name" : "aws-iso", + "supportsDualStack" : false, + "supportsFIPS" : true + }, + "regionRegex" : "^us\\-iso\\-\\w+\\-\\d+$", + "regions" : { + "aws-iso-global" : { + "description" : "AWS ISO (US) global region" + }, + "us-iso-east-1" : { + "description" : "US ISO East" + }, + "us-iso-west-1" : { + "description" : "US ISO WEST" + } + } + }, { + "id" : "aws-iso-b", + "outputs" : { + "dnsSuffix" : "sc2s.sgov.gov", + "dualStackDnsSuffix" : "sc2s.sgov.gov", + "implicitGlobalRegion" : "us-isob-east-1", + "name" : "aws-iso-b", + "supportsDualStack" : false, + "supportsFIPS" : true + }, + "regionRegex" : "^us\\-isob\\-\\w+\\-\\d+$", + "regions" : { + "aws-iso-b-global" : { + "description" : "AWS ISOB (US) global region" + }, + "us-isob-east-1" : { + "description" : "US ISOB East (Ohio)" + } + } + }, { + "id" : "aws-iso-e", + "outputs" : { + "dnsSuffix" : "cloud.adc-e.uk", + "dualStackDnsSuffix" : "cloud.adc-e.uk", + "implicitGlobalRegion" : "eu-isoe-west-1", + "name" : "aws-iso-e", + "supportsDualStack" : false, + "supportsFIPS" : true + }, + "regionRegex" : "^eu\\-isoe\\-\\w+\\-\\d+$", + "regions" : { } + }, { + "id" : "aws-iso-f", + "outputs" : { + "dnsSuffix" : "csp.hci.ic.gov", + "dualStackDnsSuffix" : "csp.hci.ic.gov", + "implicitGlobalRegion" : "us-isof-south-1", + "name" : "aws-iso-f", + "supportsDualStack" : false, + "supportsFIPS" : true + }, + "regionRegex" : "^us\\-isof\\-\\w+\\-\\d+$", + "regions" : { } + } ], + "version" : "1.1" +} \ No newline at end of file diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md new file mode 100644 index 00000000000..1a188a5715b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md @@ -0,0 +1,196 @@ +# v2.5.2 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.5.1 (2023-11-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.5.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.37 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.36 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.35 (2023-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.34 (2023-08-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.33 (2023-08-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.32 (2023-08-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.31 (2023-07-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.30 (2023-07-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.29 (2023-07-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.28 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.27 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.26 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.25 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.24 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.23 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.22 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.21 (2022-12-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.20 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.19 (2022-10-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.18 (2022-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.17 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.16 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.15 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.14 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.13 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.12 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.11 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.10 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.9 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.8 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.7 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.6 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.5 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.4 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.3.0 (2022-02-24) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.2.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.1.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.0.2 (2021-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.0.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.0.0 (2021-11-06) + +* **Release**: Endpoint Variant Model Support +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/httpsprovider/LICENSE b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/LICENSE.txt similarity index 100% rename from vendor/go.opentelemetry.io/collector/confmap/provider/httpsprovider/LICENSE rename to vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/LICENSE.txt diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/endpoints.go new file mode 100644 index 00000000000..32251a7e3cc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/endpoints.go @@ -0,0 +1,302 @@ +package endpoints + +import ( + "fmt" + "github.com/aws/smithy-go/logging" + "regexp" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +// DefaultKey is a compound map key of a variant and other values. +type DefaultKey struct { + Variant EndpointVariant + ServiceVariant ServiceVariant +} + +// EndpointKey is a compound map key of a region and associated variant value. +type EndpointKey struct { + Region string + Variant EndpointVariant + ServiceVariant ServiceVariant +} + +// EndpointVariant is a bit field to describe the endpoints attributes. +type EndpointVariant uint64 + +const ( + // FIPSVariant indicates that the endpoint is FIPS capable. + FIPSVariant EndpointVariant = 1 << (64 - 1 - iota) + + // DualStackVariant indicates that the endpoint is DualStack capable. + DualStackVariant +) + +// ServiceVariant is a bit field to describe the service endpoint attributes. +type ServiceVariant uint64 + +const ( + defaultProtocol = "https" + defaultSigner = "v4" +) + +var ( + protocolPriority = []string{"https", "http"} + signerPriority = []string{"v4", "s3v4"} +) + +// Options provide configuration needed to direct how endpoints are resolved. +type Options struct { + // Logger is a logging implementation that log events should be sent to. + Logger logging.Logger + + // LogDeprecated indicates that deprecated endpoints should be logged to the provided logger. + LogDeprecated bool + + // ResolvedRegion is the resolved region string. If provided (non-zero length) it takes priority + // over the region name passed to the ResolveEndpoint call. + ResolvedRegion string + + // Disable usage of HTTPS (TLS / SSL) + DisableHTTPS bool + + // Instruct the resolver to use a service endpoint that supports dual-stack. + // If a service does not have a dual-stack endpoint an error will be returned by the resolver. + UseDualStackEndpoint aws.DualStackEndpointState + + // Instruct the resolver to use a service endpoint that supports FIPS. + // If a service does not have a FIPS endpoint an error will be returned by the resolver. + UseFIPSEndpoint aws.FIPSEndpointState + + // ServiceVariant is a bitfield of service specified endpoint variant data. + ServiceVariant ServiceVariant +} + +// GetEndpointVariant returns the EndpointVariant for the variant associated options. +func (o Options) GetEndpointVariant() (v EndpointVariant) { + if o.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled { + v |= DualStackVariant + } + if o.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled { + v |= FIPSVariant + } + return v +} + +// Partitions is a slice of partition +type Partitions []Partition + +// ResolveEndpoint resolves a service endpoint for the given region and options. +func (ps Partitions) ResolveEndpoint(region string, opts Options) (aws.Endpoint, error) { + if len(ps) == 0 { + return aws.Endpoint{}, fmt.Errorf("no partitions found") + } + + if opts.Logger == nil { + opts.Logger = logging.Nop{} + } + + if len(opts.ResolvedRegion) > 0 { + region = opts.ResolvedRegion + } + + for i := 0; i < len(ps); i++ { + if !ps[i].canResolveEndpoint(region, opts) { + continue + } + + return ps[i].ResolveEndpoint(region, opts) + } + + // fallback to first partition format to use when resolving the endpoint. + return ps[0].ResolveEndpoint(region, opts) +} + +// Partition is an AWS partition description for a service and its' region endpoints. +type Partition struct { + ID string + RegionRegex *regexp.Regexp + PartitionEndpoint string + IsRegionalized bool + Defaults map[DefaultKey]Endpoint + Endpoints Endpoints +} + +func (p Partition) canResolveEndpoint(region string, opts Options) bool { + _, ok := p.Endpoints[EndpointKey{ + Region: region, + Variant: opts.GetEndpointVariant(), + }] + return ok || p.RegionRegex.MatchString(region) +} + +// ResolveEndpoint resolves and service endpoint for the given region and options. +func (p Partition) ResolveEndpoint(region string, options Options) (resolved aws.Endpoint, err error) { + if len(region) == 0 && len(p.PartitionEndpoint) != 0 { + region = p.PartitionEndpoint + } + + endpoints := p.Endpoints + + variant := options.GetEndpointVariant() + serviceVariant := options.ServiceVariant + + defaults := p.Defaults[DefaultKey{ + Variant: variant, + ServiceVariant: serviceVariant, + }] + + return p.endpointForRegion(region, variant, serviceVariant, endpoints).resolve(p.ID, region, defaults, options) +} + +func (p Partition) endpointForRegion(region string, variant EndpointVariant, serviceVariant ServiceVariant, endpoints Endpoints) Endpoint { + key := EndpointKey{ + Region: region, + Variant: variant, + } + + if e, ok := endpoints[key]; ok { + return e + } + + if !p.IsRegionalized { + return endpoints[EndpointKey{ + Region: p.PartitionEndpoint, + Variant: variant, + ServiceVariant: serviceVariant, + }] + } + + // Unable to find any matching endpoint, return + // blank that will be used for generic endpoint creation. + return Endpoint{} +} + +// Endpoints is a map of service config regions to endpoints +type Endpoints map[EndpointKey]Endpoint + +// CredentialScope is the credential scope of a region and service +type CredentialScope struct { + Region string + Service string +} + +// Endpoint is a service endpoint description +type Endpoint struct { + // True if the endpoint cannot be resolved for this partition/region/service + Unresolveable aws.Ternary + + Hostname string + Protocols []string + + CredentialScope CredentialScope + + SignatureVersions []string + + // Indicates that this endpoint is deprecated. + Deprecated aws.Ternary +} + +// IsZero returns whether the endpoint structure is an empty (zero) value. +func (e Endpoint) IsZero() bool { + switch { + case e.Unresolveable != aws.UnknownTernary: + return false + case len(e.Hostname) != 0: + return false + case len(e.Protocols) != 0: + return false + case e.CredentialScope != (CredentialScope{}): + return false + case len(e.SignatureVersions) != 0: + return false + } + return true +} + +func (e Endpoint) resolve(partition, region string, def Endpoint, options Options) (aws.Endpoint, error) { + var merged Endpoint + merged.mergeIn(def) + merged.mergeIn(e) + e = merged + + if e.IsZero() { + return aws.Endpoint{}, fmt.Errorf("unable to resolve endpoint for region: %v", region) + } + + var u string + if e.Unresolveable != aws.TrueTernary { + // Only attempt to resolve the endpoint if it can be resolved. + hostname := strings.Replace(e.Hostname, "{region}", region, 1) + + scheme := getEndpointScheme(e.Protocols, options.DisableHTTPS) + u = scheme + "://" + hostname + } + + signingRegion := e.CredentialScope.Region + if len(signingRegion) == 0 { + signingRegion = region + } + signingName := e.CredentialScope.Service + + if e.Deprecated == aws.TrueTernary && options.LogDeprecated { + options.Logger.Logf(logging.Warn, "endpoint identifier %q, url %q marked as deprecated", region, u) + } + + return aws.Endpoint{ + URL: u, + PartitionID: partition, + SigningRegion: signingRegion, + SigningName: signingName, + SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), + }, nil +} + +func (e *Endpoint) mergeIn(other Endpoint) { + if other.Unresolveable != aws.UnknownTernary { + e.Unresolveable = other.Unresolveable + } + if len(other.Hostname) > 0 { + e.Hostname = other.Hostname + } + if len(other.Protocols) > 0 { + e.Protocols = other.Protocols + } + if len(other.CredentialScope.Region) > 0 { + e.CredentialScope.Region = other.CredentialScope.Region + } + if len(other.CredentialScope.Service) > 0 { + e.CredentialScope.Service = other.CredentialScope.Service + } + if len(other.SignatureVersions) > 0 { + e.SignatureVersions = other.SignatureVersions + } + if other.Deprecated != aws.UnknownTernary { + e.Deprecated = other.Deprecated + } +} + +func getEndpointScheme(protocols []string, disableHTTPS bool) string { + if disableHTTPS { + return "http" + } + + return getByPriority(protocols, protocolPriority, defaultProtocol) +} + +func getByPriority(s []string, p []string, def string) string { + if len(s) == 0 { + return def + } + + for i := 0; i < len(p); i++ { + for j := 0; j < len(s); j++ { + if s[j] == p[i] { + return s[j] + } + } + } + + return s[0] +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go new file mode 100644 index 00000000000..adb6f69921b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package endpoints + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "2.5.2" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md new file mode 100644 index 00000000000..a04ce9b6f77 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md @@ -0,0 +1,255 @@ +# v1.7.0 (2023-11-13) + +* **Feature**: Replace the legacy config parser with a modern, less-strict implementation. Parsing failures within a section will now simply ignore the invalid line rather than silently drop the entire section. + +# v1.6.0 (2023-11-09.2) + +* **Feature**: BREAKFIX: In order to support subproperty parsing, invalid property definitions must not be ignored + +# v1.5.2 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.1 (2023-11-07) + +* **Bug Fix**: Fix subproperty performance regression + +# v1.5.0 (2023-11-01) + +* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.45 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.44 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.43 (2023-09-22) + +* **Bug Fix**: Fixed a bug where merging `max_attempts` or `duration_seconds` fields across shared config files with invalid values would silently default them to 0. +* **Bug Fix**: Move type assertion of config values out of the parsing stage, which resolves an issue where the contents of a profile would silently be dropped with certain numeric formats. + +# v1.3.42 (2023-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.41 (2023-08-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.40 (2023-08-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.39 (2023-08-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.38 (2023-07-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.37 (2023-07-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.36 (2023-07-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.35 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.34 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.33 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.32 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.31 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.30 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.29 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.28 (2022-12-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.27 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.26 (2022-10-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.25 (2022-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.24 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.23 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.22 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.21 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.20 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.19 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.18 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.17 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.16 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.15 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.14 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.13 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.12 (2022-05-17) + +* **Bug Fix**: Removes the fuzz testing files from the module, as they are invalid and not used. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.11 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.10 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.9 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.8 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.7 (2022-03-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.6 (2022-02-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.5 (2022-01-28) + +* **Bug Fix**: Fixes the SDK's handling of `duration_sections` in the shared credentials file or specified in multiple shared config and shared credentials files under the same profile. [#1568](https://github.com/aws/aws-sdk-go-v2/pull/1568). Thanks to [Amir Szekely](https://github.com/kichik) for help reproduce this bug. + +# v1.3.4 (2022-01-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.3 (2022-01-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.2 (2021-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2021-11-06) + +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.5 (2021-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.4 (2021-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.3 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.2 (2021-08-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.1 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.0 (2021-08-04) + +* **Feature**: adds error handling for defered close calls +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.1 (2021-07-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.0 (2021-07-01) + +* **Feature**: Support for `:`, `=`, `[`, `]` being present in expression values. + +# v1.0.1 (2021-06-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.0 (2021-05-20) + +* **Release**: The `github.com/aws/aws-sdk-go-v2/internal/ini` package is now a Go Module. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/yamlprovider/LICENSE b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/LICENSE.txt similarity index 100% rename from vendor/go.opentelemetry.io/collector/confmap/provider/yamlprovider/LICENSE rename to vendor/github.com/aws/aws-sdk-go-v2/internal/ini/LICENSE.txt diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/errors.go new file mode 100644 index 00000000000..0f278d55e6c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/errors.go @@ -0,0 +1,22 @@ +package ini + +import "fmt" + +// UnableToReadFile is an error indicating that a ini file could not be read +type UnableToReadFile struct { + Err error +} + +// Error returns an error message and the underlying error message if present +func (e *UnableToReadFile) Error() string { + base := "unable to read file" + if e.Err == nil { + return base + } + return fmt.Sprintf("%s: %v", base, e.Err) +} + +// Unwrap returns the underlying error +func (e *UnableToReadFile) Unwrap() error { + return e.Err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go new file mode 100644 index 00000000000..9ffee6eedaf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package ini + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.7.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go new file mode 100644 index 00000000000..cefcce91e76 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go @@ -0,0 +1,56 @@ +// Package ini implements parsing of the AWS shared config file. +// +// Example: +// sections, err := ini.OpenFile("/path/to/file") +// if err != nil { +// panic(err) +// } +// +// profile := "foo" +// section, ok := sections.GetSection(profile) +// if !ok { +// fmt.Printf("section %q could not be found", profile) +// } +package ini + +import ( + "fmt" + "io" + "os" + "strings" +) + +// OpenFile parses shared config from the given file path. +func OpenFile(path string) (sections Sections, err error) { + f, oerr := os.Open(path) + if oerr != nil { + return Sections{}, &UnableToReadFile{Err: oerr} + } + + defer func() { + closeErr := f.Close() + if err == nil { + err = closeErr + } else if closeErr != nil { + err = fmt.Errorf("close error: %v, original error: %w", closeErr, err) + } + }() + + return Parse(f, path) +} + +// Parse parses shared config from the given reader. +func Parse(r io.Reader, path string) (Sections, error) { + contents, err := io.ReadAll(r) + if err != nil { + return Sections{}, fmt.Errorf("read all: %v", err) + } + + lines := strings.Split(string(contents), "\n") + tokens, err := tokenize(lines) + if err != nil { + return Sections{}, fmt.Errorf("tokenize: %v", err) + } + + return parse(tokens, path), nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse.go new file mode 100644 index 00000000000..0fcb8ec0763 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse.go @@ -0,0 +1,109 @@ +package ini + +import ( + "fmt" + "strings" +) + +func parse(tokens []lineToken, path string) Sections { + parser := &parser{ + path: path, + sections: NewSections(), + } + parser.parse(tokens) + return parser.sections +} + +type parser struct { + csection, ckey string // current state + path string // source file path + sections Sections // parse result +} + +func (p *parser) parse(tokens []lineToken) { + for _, otok := range tokens { + switch tok := otok.(type) { + case *lineTokenProfile: + p.handleProfile(tok) + case *lineTokenProperty: + p.handleProperty(tok) + case *lineTokenSubProperty: + p.handleSubProperty(tok) + case *lineTokenContinuation: + p.handleContinuation(tok) + } + } +} + +func (p *parser) handleProfile(tok *lineTokenProfile) { + name := tok.Name + if tok.Type != "" { + name = fmt.Sprintf("%s %s", tok.Type, tok.Name) + } + p.ckey = "" + p.csection = name + if _, ok := p.sections.container[name]; !ok { + p.sections.container[name] = NewSection(name) + } +} + +func (p *parser) handleProperty(tok *lineTokenProperty) { + if p.csection == "" { + return // LEGACY: don't error on "global" properties + } + + p.ckey = tok.Key + if _, ok := p.sections.container[p.csection].values[tok.Key]; ok { + section := p.sections.container[p.csection] + section.Logs = append(p.sections.container[p.csection].Logs, + fmt.Sprintf( + "For profile: %v, overriding %v value, with a %v value found in a duplicate profile defined later in the same file %v. \n", + p.csection, tok.Key, tok.Key, p.path, + ), + ) + p.sections.container[p.csection] = section + } + + p.sections.container[p.csection].values[tok.Key] = Value{ + str: tok.Value, + } + p.sections.container[p.csection].SourceFile[tok.Key] = p.path +} + +func (p *parser) handleSubProperty(tok *lineTokenSubProperty) { + if p.csection == "" { + return // LEGACY: don't error on "global" properties + } + + if p.ckey == "" || p.sections.container[p.csection].values[p.ckey].str != "" { + // This is an "orphaned" subproperty, either because it's at + // the beginning of a section or because the last property's + // value isn't empty. Either way we're lenient here and + // "promote" this to a normal property. + p.handleProperty(&lineTokenProperty{ + Key: tok.Key, + Value: strings.TrimSpace(trimComment(tok.Value)), + }) + return + } + + if p.sections.container[p.csection].values[p.ckey].mp == nil { + p.sections.container[p.csection].values[p.ckey] = Value{ + mp: map[string]string{}, + } + } + p.sections.container[p.csection].values[p.ckey].mp[tok.Key] = tok.Value +} + +func (p *parser) handleContinuation(tok *lineTokenContinuation) { + if p.ckey == "" { + return + } + + value, _ := p.sections.container[p.csection].values[p.ckey] + if value.str != "" && value.mp == nil { + value.str = fmt.Sprintf("%s\n%s", value.str, tok.Value) + } + + p.sections.container[p.csection].values[p.ckey] = value +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sections.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sections.go new file mode 100644 index 00000000000..dd89848e696 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sections.go @@ -0,0 +1,157 @@ +package ini + +import ( + "sort" +) + +// Sections is a map of Section structures that represent +// a configuration. +type Sections struct { + container map[string]Section +} + +// NewSections returns empty ini Sections +func NewSections() Sections { + return Sections{ + container: make(map[string]Section, 0), + } +} + +// GetSection will return section p. If section p does not exist, +// false will be returned in the second parameter. +func (t Sections) GetSection(p string) (Section, bool) { + v, ok := t.container[p] + return v, ok +} + +// HasSection denotes if Sections consist of a section with +// provided name. +func (t Sections) HasSection(p string) bool { + _, ok := t.container[p] + return ok +} + +// SetSection sets a section value for provided section name. +func (t Sections) SetSection(p string, v Section) Sections { + t.container[p] = v + return t +} + +// DeleteSection deletes a section entry/value for provided section name./ +func (t Sections) DeleteSection(p string) { + delete(t.container, p) +} + +// values represents a map of union values. +type values map[string]Value + +// List will return a list of all sections that were successfully +// parsed. +func (t Sections) List() []string { + keys := make([]string, len(t.container)) + i := 0 + for k := range t.container { + keys[i] = k + i++ + } + + sort.Strings(keys) + return keys +} + +// Section contains a name and values. This represent +// a sectioned entry in a configuration file. +type Section struct { + // Name is the Section profile name + Name string + + // values are the values within parsed profile + values values + + // Errors is the list of errors + Errors []error + + // Logs is the list of logs + Logs []string + + // SourceFile is the INI Source file from where this section + // was retrieved. They key is the property, value is the + // source file the property was retrieved from. + SourceFile map[string]string +} + +// NewSection returns an initialize section for the name +func NewSection(name string) Section { + return Section{ + Name: name, + values: values{}, + SourceFile: map[string]string{}, + } +} + +// List will return a list of all +// services in values +func (t Section) List() []string { + keys := make([]string, len(t.values)) + i := 0 + for k := range t.values { + keys[i] = k + i++ + } + + sort.Strings(keys) + return keys +} + +// UpdateSourceFile updates source file for a property to provided filepath. +func (t Section) UpdateSourceFile(property string, filepath string) { + t.SourceFile[property] = filepath +} + +// UpdateValue updates value for a provided key with provided value +func (t Section) UpdateValue(k string, v Value) error { + t.values[k] = v + return nil +} + +// Has will return whether or not an entry exists in a given section +func (t Section) Has(k string) bool { + _, ok := t.values[k] + return ok +} + +// ValueType will returned what type the union is set to. If +// k was not found, the NoneType will be returned. +func (t Section) ValueType(k string) (ValueType, bool) { + v, ok := t.values[k] + return v.Type, ok +} + +// Bool returns a bool value at k +func (t Section) Bool(k string) (bool, bool) { + return t.values[k].BoolValue() +} + +// Int returns an integer value at k +func (t Section) Int(k string) (int64, bool) { + return t.values[k].IntValue() +} + +// Map returns a map value at k +func (t Section) Map(k string) map[string]string { + return t.values[k].MapValue() +} + +// Float64 returns a float value at k +func (t Section) Float64(k string) (float64, bool) { + return t.values[k].FloatValue() +} + +// String returns the string value at k +func (t Section) String(k string) string { + _, ok := t.values[k] + if !ok { + return "" + } + return t.values[k].StringValue() +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/strings.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/strings.go new file mode 100644 index 00000000000..478239a2505 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/strings.go @@ -0,0 +1,83 @@ +package ini + +import "strings" + +func trimComment(v string) string { + rest, _, _ := strings.Cut(v, "#") + rest, _, _ = strings.Cut(rest, ";") + return rest +} + +// assumes no surrounding comment +func splitProperty(s string) (string, string, bool) { + equalsi := strings.Index(s, "=") + coloni := strings.Index(s, ":") // LEGACY: also supported for property assignment + sep := "=" + if equalsi == -1 || coloni != -1 && coloni < equalsi { + sep = ":" + } + + k, v, ok := strings.Cut(s, sep) + if !ok { + return "", "", false + } + return strings.TrimSpace(k), strings.TrimSpace(v), true +} + +// assumes no surrounding comment, whitespace, or profile brackets +func splitProfile(s string) (string, string) { + var first int + for i, r := range s { + if isLineSpace(r) { + if first == 0 { + first = i + } + } else { + if first != 0 { + return s[:first], s[i:] + } + } + } + if first == 0 { + return "", s // type component is effectively blank + } + return "", "" +} + +func isLineSpace(r rune) bool { + return r == ' ' || r == '\t' +} + +func unquote(s string) string { + if isSingleQuoted(s) || isDoubleQuoted(s) { + return s[1 : len(s)-1] + } + return s +} + +// applies various legacy conversions to property values: +// - remote wrapping single/doublequotes +// - expand escaped quote and newline sequences +func legacyStrconv(s string) string { + s = unquote(s) + s = strings.ReplaceAll(s, `\"`, `"`) + s = strings.ReplaceAll(s, `\'`, `'`) + s = strings.ReplaceAll(s, `\n`, "\n") + return s +} + +func isSingleQuoted(s string) bool { + return hasAffixes(s, "'", "'") +} + +func isDoubleQuoted(s string) bool { + return hasAffixes(s, `"`, `"`) +} + +func isBracketed(s string) bool { + return hasAffixes(s, "[", "]") +} + +func hasAffixes(s, left, right string) bool { + return strings.HasPrefix(s, left) && strings.HasSuffix(s, right) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/token.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/token.go new file mode 100644 index 00000000000..6e9a03744e0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/token.go @@ -0,0 +1,32 @@ +package ini + +type lineToken interface { + isLineToken() +} + +type lineTokenProfile struct { + Type string + Name string +} + +func (*lineTokenProfile) isLineToken() {} + +type lineTokenProperty struct { + Key string + Value string +} + +func (*lineTokenProperty) isLineToken() {} + +type lineTokenContinuation struct { + Value string +} + +func (*lineTokenContinuation) isLineToken() {} + +type lineTokenSubProperty struct { + Key string + Value string +} + +func (*lineTokenSubProperty) isLineToken() {} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/tokenize.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/tokenize.go new file mode 100644 index 00000000000..9778a1738b3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/tokenize.go @@ -0,0 +1,91 @@ +package ini + +import ( + "strings" +) + +func tokenize(lines []string) ([]lineToken, error) { + tokens := make([]lineToken, 0, len(lines)) + for _, line := range lines { + if len(strings.TrimSpace(line)) == 0 || isLineComment(line) { + continue + } + + if tok := asProfile(line); tok != nil { + tokens = append(tokens, tok) + } else if tok := asProperty(line); tok != nil { + tokens = append(tokens, tok) + } else if tok := asSubProperty(line); tok != nil { + tokens = append(tokens, tok) + } else if tok := asContinuation(line); tok != nil { + tokens = append(tokens, tok) + } // unrecognized tokens are effectively ignored + } + return tokens, nil +} + +func isLineComment(line string) bool { + trimmed := strings.TrimLeft(line, " \t") + return strings.HasPrefix(trimmed, "#") || strings.HasPrefix(trimmed, ";") +} + +func asProfile(line string) *lineTokenProfile { // " [ type name ] ; comment" + trimmed := strings.TrimSpace(trimComment(line)) // "[ type name ]" + if !isBracketed(trimmed) { + return nil + } + trimmed = trimmed[1 : len(trimmed)-1] // " type name " (or just " name ") + trimmed = strings.TrimSpace(trimmed) // "type name" / "name" + typ, name := splitProfile(trimmed) + return &lineTokenProfile{ + Type: typ, + Name: name, + } +} + +func asProperty(line string) *lineTokenProperty { + if isLineSpace(rune(line[0])) { + return nil + } + + trimmed := strings.TrimRight(trimComment(line), " \t") + k, v, ok := splitProperty(trimmed) + if !ok { + return nil + } + + return &lineTokenProperty{ + Key: strings.ToLower(k), // LEGACY: normalize key case + Value: legacyStrconv(v), // LEGACY: see func docs + } +} + +func asSubProperty(line string) *lineTokenSubProperty { + if !isLineSpace(rune(line[0])) { + return nil + } + + // comments on sub-properties are included in the value + trimmed := strings.TrimLeft(line, " \t") + k, v, ok := splitProperty(trimmed) + if !ok { + return nil + } + + return &lineTokenSubProperty{ // same LEGACY constraints as in normal property + Key: strings.ToLower(k), + Value: legacyStrconv(v), + } +} + +func asContinuation(line string) *lineTokenContinuation { + if !isLineSpace(rune(line[0])) { + return nil + } + + // includes comments like sub-properties + trimmed := strings.TrimLeft(line, " \t") + return &lineTokenContinuation{ + Value: trimmed, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value.go new file mode 100644 index 00000000000..ade75bf34e4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value.go @@ -0,0 +1,104 @@ +package ini + +import ( + "fmt" + "strconv" + "strings" +) + +// ValueType is an enum that will signify what type +// the Value is +type ValueType int + +func (v ValueType) String() string { + switch v { + case NoneType: + return "NONE" + case StringType: + return "STRING" + } + + return "" +} + +// ValueType enums +const ( + NoneType = ValueType(iota) + StringType + QuotedStringType +) + +// Value is a union container +type Value struct { + Type ValueType + + str string + mp map[string]string +} + +// NewStringValue returns a Value type generated using a string input. +func NewStringValue(str string) (Value, error) { + return Value{str: str}, nil +} + +func (v Value) String() string { + switch v.Type { + case StringType: + return fmt.Sprintf("string: %s", string(v.str)) + case QuotedStringType: + return fmt.Sprintf("quoted string: %s", string(v.str)) + default: + return "union not set" + } +} + +// MapValue returns a map value for sub properties +func (v Value) MapValue() map[string]string { + newlineParts := strings.Split(string(v.str), "\n") + mp := make(map[string]string) + for _, part := range newlineParts { + operandParts := strings.Split(part, "=") + if len(operandParts) < 2 { + continue + } + key := strings.TrimSpace(operandParts[0]) + val := strings.TrimSpace(operandParts[1]) + mp[key] = val + } + return mp +} + +// IntValue returns an integer value +func (v Value) IntValue() (int64, bool) { + i, err := strconv.ParseInt(string(v.str), 0, 64) + if err != nil { + return 0, false + } + return i, true +} + +// FloatValue returns a float value +func (v Value) FloatValue() (float64, bool) { + f, err := strconv.ParseFloat(string(v.str), 64) + if err != nil { + return 0, false + } + return f, true +} + +// BoolValue returns a bool value +func (v Value) BoolValue() (bool, bool) { + // we don't use ParseBool as it recognizes more than what we've + // historically supported + if strings.EqualFold(v.str, "true") { + return true, true + } else if strings.EqualFold(v.str, "false") { + return false, true + } + return false, false +} + +// StringValue returns the string value +func (v Value) StringValue() string { + return v.str +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/rand/rand.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/rand/rand.go new file mode 100644 index 00000000000..c8484dcd759 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/rand/rand.go @@ -0,0 +1,33 @@ +package rand + +import ( + "crypto/rand" + "fmt" + "io" + "math/big" +) + +func init() { + Reader = rand.Reader +} + +// Reader provides a random reader that can reset during testing. +var Reader io.Reader + +var floatMaxBigInt = big.NewInt(1 << 53) + +// Float64 returns a float64 read from an io.Reader source. The returned float will be between [0.0, 1.0). +func Float64(reader io.Reader) (float64, error) { + bi, err := rand.Int(reader, floatMaxBigInt) + if err != nil { + return 0, fmt.Errorf("failed to read random value, %v", err) + } + + return float64(bi.Int64()) / (1 << 53), nil +} + +// CryptoRandFloat64 returns a random float64 obtained from the crypto rand +// source. +func CryptoRandFloat64() (float64, error) { + return Float64(Reader) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/interfaces.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/interfaces.go new file mode 100644 index 00000000000..2b42cbe6421 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/interfaces.go @@ -0,0 +1,9 @@ +package sdk + +// Invalidator provides access to a type's invalidate method to make it +// invalidate it cache. +// +// e.g aws.SafeCredentialsProvider's Invalidate method. +type Invalidator interface { + Invalidate() +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/time.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/time.go new file mode 100644 index 00000000000..8e8dabad548 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/time.go @@ -0,0 +1,74 @@ +package sdk + +import ( + "context" + "time" +) + +func init() { + NowTime = time.Now + Sleep = time.Sleep + SleepWithContext = sleepWithContext +} + +// NowTime is a value for getting the current time. This value can be overridden +// for testing mocking out current time. +var NowTime func() time.Time + +// Sleep is a value for sleeping for a duration. This value can be overridden +// for testing and mocking out sleep duration. +var Sleep func(time.Duration) + +// SleepWithContext will wait for the timer duration to expire, or the context +// is canceled. Which ever happens first. If the context is canceled the Context's +// error will be returned. +// +// This value can be overridden for testing and mocking out sleep duration. +var SleepWithContext func(context.Context, time.Duration) error + +// sleepWithContext will wait for the timer duration to expire, or the context +// is canceled. Which ever happens first. If the context is canceled the +// Context's error will be returned. +func sleepWithContext(ctx context.Context, dur time.Duration) error { + t := time.NewTimer(dur) + defer t.Stop() + + select { + case <-t.C: + break + case <-ctx.Done(): + return ctx.Err() + } + + return nil +} + +// noOpSleepWithContext does nothing, returns immediately. +func noOpSleepWithContext(context.Context, time.Duration) error { + return nil +} + +func noOpSleep(time.Duration) {} + +// TestingUseNopSleep is a utility for disabling sleep across the SDK for +// testing. +func TestingUseNopSleep() func() { + SleepWithContext = noOpSleepWithContext + Sleep = noOpSleep + + return func() { + SleepWithContext = sleepWithContext + Sleep = time.Sleep + } +} + +// TestingUseReferenceTime is a utility for swapping the time function across the SDK to return a specific reference time +// for testing purposes. +func TestingUseReferenceTime(referenceTime time.Time) func() { + NowTime = func() time.Time { + return referenceTime + } + return func() { + NowTime = time.Now + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sdkio/byte.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdkio/byte.go new file mode 100644 index 00000000000..6c443988bbc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdkio/byte.go @@ -0,0 +1,12 @@ +package sdkio + +const ( + // Byte is 8 bits + Byte int64 = 1 + // KibiByte (KiB) is 1024 Bytes + KibiByte = Byte * 1024 + // MebiByte (MiB) is 1024 KiB + MebiByte = KibiByte * 1024 + // GibiByte (GiB) is 1024 MiB + GibiByte = MebiByte * 1024 +) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/shareddefaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/shareddefaults/shared_config.go new file mode 100644 index 00000000000..c96b717e08a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/shareddefaults/shared_config.go @@ -0,0 +1,47 @@ +package shareddefaults + +import ( + "os" + "os/user" + "path/filepath" +) + +// SharedCredentialsFilename returns the SDK's default file path +// for the shared credentials file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/credentials +// - Windows: %USERPROFILE%\.aws\credentials +func SharedCredentialsFilename() string { + return filepath.Join(UserHomeDir(), ".aws", "credentials") +} + +// SharedConfigFilename returns the SDK's default file path for +// the shared config file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/config +// - Windows: %USERPROFILE%\.aws\config +func SharedConfigFilename() string { + return filepath.Join(UserHomeDir(), ".aws", "config") +} + +// UserHomeDir returns the home directory for the user the process is +// running under. +func UserHomeDir() string { + // Ignore errors since we only care about Windows and *nix. + home, _ := os.UserHomeDir() + + if len(home) > 0 { + return home + } + + currUser, _ := user.Current() + if currUser != nil { + home = currUser.HomeDir + } + + return home +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/strings/strings.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/strings/strings.go new file mode 100644 index 00000000000..d008ae27cb3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/strings/strings.go @@ -0,0 +1,11 @@ +package strings + +import ( + "strings" +) + +// HasPrefixFold tests whether the string s begins with prefix, interpreted as UTF-8 strings, +// under Unicode case-folding. +func HasPrefixFold(s, prefix string) bool { + return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/LICENSE b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/LICENSE new file mode 100644 index 00000000000..fe6a62006a5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/docs.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/docs.go new file mode 100644 index 00000000000..cb70616e802 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/docs.go @@ -0,0 +1,7 @@ +// Package singleflight provides a duplicate function call suppression +// mechanism. This package is a fork of the Go golang.org/x/sync/singleflight +// package. The package is forked, because the package a part of the unstable +// and unversioned golang.org/x/sync module. +// +// https://github.com/golang/sync/tree/67f06af15bc961c363a7260195bcd53487529a21/singleflight +package singleflight diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/singleflight.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/singleflight.go new file mode 100644 index 00000000000..e8a1b17d564 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/singleflight.go @@ -0,0 +1,210 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package singleflight + +import ( + "bytes" + "errors" + "fmt" + "runtime" + "runtime/debug" + "sync" +) + +// errGoexit indicates the runtime.Goexit was called in +// the user given function. +var errGoexit = errors.New("runtime.Goexit was called") + +// A panicError is an arbitrary value recovered from a panic +// with the stack trace during the execution of given function. +type panicError struct { + value interface{} + stack []byte +} + +// Error implements error interface. +func (p *panicError) Error() string { + return fmt.Sprintf("%v\n\n%s", p.value, p.stack) +} + +func newPanicError(v interface{}) error { + stack := debug.Stack() + + // The first line of the stack trace is of the form "goroutine N [status]:" + // but by the time the panic reaches Do the goroutine may no longer exist + // and its status will have changed. Trim out the misleading line. + if line := bytes.IndexByte(stack[:], '\n'); line >= 0 { + stack = stack[line+1:] + } + return &panicError{value: v, stack: stack} +} + +// call is an in-flight or completed singleflight.Do call +type call struct { + wg sync.WaitGroup + + // These fields are written once before the WaitGroup is done + // and are only read after the WaitGroup is done. + val interface{} + err error + + // forgotten indicates whether Forget was called with this call's key + // while the call was still in flight. + forgotten bool + + // These fields are read and written with the singleflight + // mutex held before the WaitGroup is done, and are read but + // not written after the WaitGroup is done. + dups int + chans []chan<- Result +} + +// Group represents a class of work and forms a namespace in +// which units of work can be executed with duplicate suppression. +type Group struct { + mu sync.Mutex // protects m + m map[string]*call // lazily initialized +} + +// Result holds the results of Do, so they can be passed +// on a channel. +type Result struct { + Val interface{} + Err error + Shared bool +} + +// Do executes and returns the results of the given function, making +// sure that only one execution is in-flight for a given key at a +// time. If a duplicate comes in, the duplicate caller waits for the +// original to complete and receives the same results. +// The return value shared indicates whether v was given to multiple callers. +func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) { + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + g.mu.Unlock() + c.wg.Wait() + + if e, ok := c.err.(*panicError); ok { + panic(e) + } else if c.err == errGoexit { + runtime.Goexit() + } + return c.val, c.err, true + } + c := new(call) + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + g.doCall(c, key, fn) + return c.val, c.err, c.dups > 0 +} + +// DoChan is like Do but returns a channel that will receive the +// results when they are ready. +// +// The returned channel will not be closed. +func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result { + ch := make(chan Result, 1) + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + c.chans = append(c.chans, ch) + g.mu.Unlock() + return ch + } + c := &call{chans: []chan<- Result{ch}} + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + go g.doCall(c, key, fn) + + return ch +} + +// doCall handles the single call for a key. +func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { + normalReturn := false + recovered := false + + // use double-defer to distinguish panic from runtime.Goexit, + // more details see https://golang.org/cl/134395 + defer func() { + // the given function invoked runtime.Goexit + if !normalReturn && !recovered { + c.err = errGoexit + } + + c.wg.Done() + g.mu.Lock() + defer g.mu.Unlock() + if !c.forgotten { + delete(g.m, key) + } + + if e, ok := c.err.(*panicError); ok { + // In order to prevent the waiting channels from being blocked forever, + // needs to ensure that this panic cannot be recovered. + if len(c.chans) > 0 { + go panic(e) + select {} // Keep this goroutine around so that it will appear in the crash dump. + } else { + panic(e) + } + } else if c.err == errGoexit { + // Already in the process of goexit, no need to call again + } else { + // Normal return + for _, ch := range c.chans { + ch <- Result{c.val, c.err, c.dups > 0} + } + } + }() + + func() { + defer func() { + if !normalReturn { + // Ideally, we would wait to take a stack trace until we've determined + // whether this is a panic or a runtime.Goexit. + // + // Unfortunately, the only way we can distinguish the two is to see + // whether the recover stopped the goroutine from terminating, and by + // the time we know that, the part of the stack trace relevant to the + // panic has been discarded. + if r := recover(); r != nil { + c.err = newPanicError(r) + } + } + }() + + c.val, c.err = fn() + normalReturn = true + }() + + if !normalReturn { + recovered = true + } +} + +// Forget tells the singleflight to forget about a key. Future calls +// to Do for this key will call the function rather than waiting for +// an earlier call to complete. +func (g *Group) Forget(key string) { + g.mu.Lock() + if c, ok := g.m[key]; ok { + c.forgotten = true + } + delete(g.m, key) + g.mu.Unlock() +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/timeconv/duration.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/timeconv/duration.go new file mode 100644 index 00000000000..5d69db5f249 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/timeconv/duration.go @@ -0,0 +1,13 @@ +package timeconv + +import "time" + +// FloatSecondsDur converts a fractional seconds to duration. +func FloatSecondsDur(v float64) time.Duration { + return time.Duration(v * float64(time.Second)) +} + +// DurSecondsFloat converts a duration into fractional seconds. +func DurSecondsFloat(d time.Duration) float64 { + return float64(d) / float64(time.Second) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md new file mode 100644 index 00000000000..99a54769af8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md @@ -0,0 +1,240 @@ +# v1.10.2 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.1 (2023-11-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.37 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.36 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.35 (2023-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.34 (2023-08-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.33 (2023-08-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.32 (2023-08-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.31 (2023-07-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.30 (2023-07-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.29 (2023-07-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.28 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.27 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.26 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.25 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.24 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.23 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.22 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.21 (2022-12-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.20 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.19 (2022-10-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.18 (2022-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.17 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.16 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.15 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.14 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.13 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.12 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.11 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.10 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.9 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.8 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.7 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.6 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.5 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.4 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.0 (2022-02-24) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.2 (2021-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.0 (2021-11-06) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2021-10-21) + +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.2 (2021-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.1 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2021-08-27) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.3 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.2 (2021-08-04) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.1 (2021-07-15) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.0 (2021-06-25) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.1 (2021-05-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/LICENSE.txt new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go new file mode 100644 index 00000000000..cc919701a06 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go @@ -0,0 +1,48 @@ +package presignedurl + +import ( + "context" + + "github.com/aws/smithy-go/middleware" +) + +// WithIsPresigning adds the isPresigning sentinel value to a context to signal +// that the middleware stack is using the presign flow. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func WithIsPresigning(ctx context.Context) context.Context { + return middleware.WithStackValue(ctx, isPresigningKey{}, true) +} + +// GetIsPresigning returns if the context contains the isPresigning sentinel +// value for presigning flows. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetIsPresigning(ctx context.Context) bool { + v, _ := middleware.GetStackValue(ctx, isPresigningKey{}).(bool) + return v +} + +type isPresigningKey struct{} + +// AddAsIsPresigingMiddleware adds a middleware to the head of the stack that +// will update the stack's context to be flagged as being invoked for the +// purpose of presigning. +func AddAsIsPresigingMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(asIsPresigningMiddleware{}, middleware.Before) +} + +type asIsPresigningMiddleware struct{} + +func (asIsPresigningMiddleware) ID() string { return "AsIsPresigningMiddleware" } + +func (asIsPresigningMiddleware) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + ctx = WithIsPresigning(ctx) + return next.HandleInitialize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/doc.go new file mode 100644 index 00000000000..1b85375cf80 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/doc.go @@ -0,0 +1,3 @@ +// Package presignedurl provides the customizations for API clients to fill in +// presigned URLs into input parameters. +package presignedurl diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go new file mode 100644 index 00000000000..66b8acd87c5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package presignedurl + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.10.2" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/middleware.go new file mode 100644 index 00000000000..1e2f5c8122a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/middleware.go @@ -0,0 +1,110 @@ +package presignedurl + +import ( + "context" + "fmt" + + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + + "github.com/aws/smithy-go/middleware" +) + +// URLPresigner provides the interface to presign the input parameters in to a +// presigned URL. +type URLPresigner interface { + // PresignURL presigns a URL. + PresignURL(ctx context.Context, srcRegion string, params interface{}) (*v4.PresignedHTTPRequest, error) +} + +// ParameterAccessor provides an collection of accessor to for retrieving and +// setting the values needed to PresignedURL generation +type ParameterAccessor struct { + // GetPresignedURL accessor points to a function that retrieves a presigned url if present + GetPresignedURL func(interface{}) (string, bool, error) + + // GetSourceRegion accessor points to a function that retrieves source region for presigned url + GetSourceRegion func(interface{}) (string, bool, error) + + // CopyInput accessor points to a function that takes in an input, and returns a copy. + CopyInput func(interface{}) (interface{}, error) + + // SetDestinationRegion accessor points to a function that sets destination region on api input struct + SetDestinationRegion func(interface{}, string) error + + // SetPresignedURL accessor points to a function that sets presigned url on api input struct + SetPresignedURL func(interface{}, string) error +} + +// Options provides the set of options needed by the presigned URL middleware. +type Options struct { + // Accessor are the parameter accessors used by this middleware + Accessor ParameterAccessor + + // Presigner is the URLPresigner used by the middleware + Presigner URLPresigner +} + +// AddMiddleware adds the Presign URL middleware to the middleware stack. +func AddMiddleware(stack *middleware.Stack, opts Options) error { + return stack.Initialize.Add(&presign{options: opts}, middleware.Before) +} + +// RemoveMiddleware removes the Presign URL middleware from the stack. +func RemoveMiddleware(stack *middleware.Stack) error { + _, err := stack.Initialize.Remove((*presign)(nil).ID()) + return err +} + +type presign struct { + options Options +} + +func (m *presign) ID() string { return "Presign" } + +func (m *presign) HandleInitialize( + ctx context.Context, input middleware.InitializeInput, next middleware.InitializeHandler, +) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + // If PresignedURL is already set ignore middleware. + if _, ok, err := m.options.Accessor.GetPresignedURL(input.Parameters); err != nil { + return out, metadata, fmt.Errorf("presign middleware failed, %w", err) + } else if ok { + return next.HandleInitialize(ctx, input) + } + + // If have source region is not set ignore middleware. + srcRegion, ok, err := m.options.Accessor.GetSourceRegion(input.Parameters) + if err != nil { + return out, metadata, fmt.Errorf("presign middleware failed, %w", err) + } else if !ok || len(srcRegion) == 0 { + return next.HandleInitialize(ctx, input) + } + + // Create a copy of the original input so the destination region value can + // be added. This ensures that value does not leak into the original + // request parameters. + paramCpy, err := m.options.Accessor.CopyInput(input.Parameters) + if err != nil { + return out, metadata, fmt.Errorf("unable to create presigned URL, %w", err) + } + + // Destination region is the API client's configured region. + dstRegion := awsmiddleware.GetRegion(ctx) + if err = m.options.Accessor.SetDestinationRegion(paramCpy, dstRegion); err != nil { + return out, metadata, fmt.Errorf("presign middleware failed, %w", err) + } + + presignedReq, err := m.options.Presigner.PresignURL(ctx, srcRegion, paramCpy) + if err != nil { + return out, metadata, fmt.Errorf("unable to create presigned URL, %w", err) + } + + // Update the original input with the presigned URL value. + if err = m.options.Accessor.SetPresignedURL(input.Parameters, presignedReq.URL); err != nil { + return out, metadata, fmt.Errorf("presign middleware failed, %w", err) + } + + return next.HandleInitialize(ctx, input) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md new file mode 100644 index 00000000000..a4c1d645969 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md @@ -0,0 +1,327 @@ +# v1.17.1 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.0 (2023-11-01) + +* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.2 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.1 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.0 (2023-10-02) + +* **Feature**: Fix FIPS Endpoints in aws-us-gov. + +# v1.14.1 (2023-09-22) + +* No change notes available for this release. + +# v1.14.0 (2023-09-18) + +* **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* **Feature**: Adds several endpoint ruleset changes across all models: smaller rulesets, removed non-unique regional endpoints, fixes FIPS and DualStack endpoints, and make region not required in SDK::Endpoint. Additional breakfix to cognito-sync field. + +# v1.13.6 (2023-08-31) + +* No change notes available for this release. + +# v1.13.5 (2023-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.4 (2023-08-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.3 (2023-08-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.2 (2023-08-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.1 (2023-08-01) + +* No change notes available for this release. + +# v1.13.0 (2023-07-31) + +* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.14 (2023-07-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.13 (2023-07-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.12 (2023-06-15) + +* No change notes available for this release. + +# v1.12.11 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.10 (2023-05-04) + +* No change notes available for this release. + +# v1.12.9 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.8 (2023-04-10) + +* No change notes available for this release. + +# v1.12.7 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.6 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.5 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.4 (2023-02-22) + +* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes. + +# v1.12.3 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.2 (2023-02-15) + +* **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. +* **Bug Fix**: Correct error type parsing for restJson services. + +# v1.12.1 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.0 (2023-01-05) + +* **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). + +# v1.11.28 (2022-12-20) + +* No change notes available for this release. + +# v1.11.27 (2022-12-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.26 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.25 (2022-10-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.24 (2022-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.23 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.22 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.21 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.20 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.19 (2022-08-30) + +* **Documentation**: Documentation updates for the AWS IAM Identity Center Portal CLI Reference. + +# v1.11.18 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.17 (2022-08-15) + +* **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On) + +# v1.11.16 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.15 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.14 (2022-08-08) + +* **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On) +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.13 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.12 (2022-07-11) + +* No change notes available for this release. + +# v1.11.11 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.10 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.9 (2022-06-16) + +* No change notes available for this release. + +# v1.11.8 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.7 (2022-05-26) + +* No change notes available for this release. + +# v1.11.6 (2022-05-25) + +* No change notes available for this release. + +# v1.11.5 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.4 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2022-02-24) + +* **Feature**: API client updated +* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Documentation**: Updated API models +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.0 (2021-12-21) + +* **Feature**: API Paginators now support specifying the initial starting token, and support stopping on empty string tokens. + +# v1.6.2 (2021-12-02) + +* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514)) +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.0 (2021-11-06) + +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Feature**: Updated service to latest API model. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.0 (2021-10-21) + +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.2 (2021-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.1 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2021-08-27) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.3 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.2 (2021-08-04) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.1 (2021-07-15) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2021-06-25) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.1 (2021-05-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/LICENSE.txt new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go new file mode 100644 index 00000000000..a2579227393 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go @@ -0,0 +1,526 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/defaults" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + smithy "github.com/aws/smithy-go" + smithydocument "github.com/aws/smithy-go/document" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net" + "net/http" + "time" +) + +const ServiceID = "SSO" +const ServiceAPIVersion = "2019-06-10" + +// Client provides the API client to make operations call for AWS Single Sign-On. +type Client struct { + options Options +} + +// New returns an initialized Client based on the functional options. Provide +// additional functional options to further configure the behavior of the client, +// such as changing the client's endpoint or adding custom middleware behavior. +func New(options Options, optFns ...func(*Options)) *Client { + options = options.Copy() + + resolveDefaultLogger(&options) + + setResolvedDefaultsMode(&options) + + resolveRetryer(&options) + + resolveHTTPClient(&options) + + resolveHTTPSignerV4(&options) + + for _, fn := range optFns { + fn(&options) + } + + client := &Client{ + options: options, + } + + return client +} + +type Options struct { + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // The optional application specific identifier appended to the User-Agent header. + AppID string + + // This endpoint will be given as input to an EndpointResolverV2. It is used for + // providing a custom base endpoint that is subject to modifications by the + // processing EndpointResolverV2. + BaseEndpoint *string + + // Configures the events that will be sent to the configured logger. + ClientLogMode aws.ClientLogMode + + // The credentials object to use when signing requests. + Credentials aws.CredentialsProvider + + // The configuration DefaultsMode that the SDK should use when constructing the + // clients initial default settings. + DefaultsMode aws.DefaultsMode + + // The endpoint options to be used when attempting to resolve an endpoint. + EndpointOptions EndpointResolverOptions + + // The service endpoint resolver. + // + // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a + // value for this field will likely prevent you from using any endpoint-related + // service features released after the introduction of EndpointResolverV2 and + // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom + // endpoint, set the client option BaseEndpoint instead. + EndpointResolver EndpointResolver + + // Resolves the endpoint used for a particular service. This should be used over + // the deprecated EndpointResolver + EndpointResolverV2 EndpointResolverV2 + + // Signature Version 4 (SigV4) Signer + HTTPSignerV4 HTTPSignerV4 + + // The logger writer interface to write logging messages to. + Logger logging.Logger + + // The region to send requests to. (Required) + Region string + + // RetryMaxAttempts specifies the maximum number attempts an API client will call + // an operation that fails with a retryable error. A value of 0 is ignored, and + // will not be used to configure the API client created default retryer, or modify + // per operation call's retry max attempts. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. If specified in an operation call's functional + // options with a value that is different than the constructed client's Options, + // the Client's Retryer will be wrapped to use the operation's specific + // RetryMaxAttempts value. + RetryMaxAttempts int + + // RetryMode specifies the retry mode the API client will be created with, if + // Retryer option is not also specified. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. Currently does not support per operation call + // overrides, may in the future. + RetryMode aws.RetryMode + + // Retryer guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. The kind of + // default retry created by the API client can be changed with the RetryMode + // option. + Retryer aws.Retryer + + // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set + // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You + // should not populate this structure programmatically, or rely on the values here + // within your applications. + RuntimeEnvironment aws.RuntimeEnvironment + + // The initial DefaultsMode used when the client options were constructed. If the + // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved + // value was at that point in time. Currently does not support per operation call + // overrides, may in the future. + resolvedDefaultsMode aws.DefaultsMode + + // The HTTP client to invoke API calls with. Defaults to client's default HTTP + // implementation if nil. + HTTPClient HTTPClient +} + +// WithAPIOptions returns a functional option for setting the Client's APIOptions +// option. +func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { + return func(o *Options) { + o.APIOptions = append(o.APIOptions, optFns...) + } +} + +// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for +// this field will likely prevent you from using any endpoint-related service +// features released after the introduction of EndpointResolverV2 and BaseEndpoint. +// To migrate an EndpointResolver implementation that uses a custom endpoint, set +// the client option BaseEndpoint instead. +func WithEndpointResolver(v EndpointResolver) func(*Options) { + return func(o *Options) { + o.EndpointResolver = v + } +} + +// WithEndpointResolverV2 returns a functional option for setting the Client's +// EndpointResolverV2 option. +func WithEndpointResolverV2(v EndpointResolverV2) func(*Options) { + return func(o *Options) { + o.EndpointResolverV2 = v + } +} + +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// Copy creates a clone where the APIOptions list is deep copied. +func (o Options) Copy() Options { + to := o + to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) + copy(to.APIOptions, o.APIOptions) + + return to +} +func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) { + ctx = middleware.ClearStackValues(ctx) + stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) + options := c.options.Copy() + resolveEndpointResolverV2(&options) + + for _, fn := range optFns { + fn(&options) + } + + finalizeRetryMaxAttemptOptions(&options, *c) + + finalizeClientEndpointResolverOptions(&options) + + for _, fn := range stackFns { + if err := fn(stack, options); err != nil { + return nil, metadata, err + } + } + + for _, fn := range options.APIOptions { + if err := fn(stack); err != nil { + return nil, metadata, err + } + } + + handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) + result, metadata, err = handler.Handle(ctx, params) + if err != nil { + err = &smithy.OperationError{ + ServiceID: ServiceID, + OperationName: opID, + Err: err, + } + } + return result, metadata, err +} + +type noSmithyDocumentSerde = smithydocument.NoSerde + +type legacyEndpointContextSetter struct { + LegacyResolver EndpointResolver +} + +func (*legacyEndpointContextSetter) ID() string { + return "legacyEndpointContextSetter" +} + +func (m *legacyEndpointContextSetter) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + if m.LegacyResolver != nil { + ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, true) + } + + return next.HandleInitialize(ctx, in) + +} +func addlegacyEndpointContextSetter(stack *middleware.Stack, o Options) error { + return stack.Initialize.Add(&legacyEndpointContextSetter{ + LegacyResolver: o.EndpointResolver, + }, middleware.Before) +} + +func resolveDefaultLogger(o *Options) { + if o.Logger != nil { + return + } + o.Logger = logging.Nop{} +} + +func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { + return middleware.AddSetLoggerMiddleware(stack, o.Logger) +} + +func setResolvedDefaultsMode(o *Options) { + if len(o.resolvedDefaultsMode) > 0 { + return + } + + var mode aws.DefaultsMode + mode.SetFromString(string(o.DefaultsMode)) + + if mode == aws.DefaultsModeAuto { + mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment) + } + + o.resolvedDefaultsMode = mode +} + +// NewFromConfig returns a new client from the provided config. +func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { + opts := Options{ + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + AppID: cfg.AppID, + } + resolveAWSRetryerProvider(cfg, &opts) + resolveAWSRetryMaxAttempts(cfg, &opts) + resolveAWSRetryMode(cfg, &opts) + resolveAWSEndpointResolver(cfg, &opts) + resolveUseDualStackEndpoint(cfg, &opts) + resolveUseFIPSEndpoint(cfg, &opts) + resolveBaseEndpoint(cfg, &opts) + return New(opts, optFns...) +} + +func resolveHTTPClient(o *Options) { + var buildable *awshttp.BuildableClient + + if o.HTTPClient != nil { + var ok bool + buildable, ok = o.HTTPClient.(*awshttp.BuildableClient) + if !ok { + return + } + } else { + buildable = awshttp.NewBuildableClient() + } + + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) { + if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok { + dialer.Timeout = dialerTimeout + } + }) + + buildable = buildable.WithTransportOptions(func(transport *http.Transport) { + if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok { + transport.TLSHandshakeTimeout = tlsHandshakeTimeout + } + }) + } + + o.HTTPClient = buildable +} + +func resolveRetryer(o *Options) { + if o.Retryer != nil { + return + } + + if len(o.RetryMode) == 0 { + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + o.RetryMode = modeConfig.RetryMode + } + } + if len(o.RetryMode) == 0 { + o.RetryMode = aws.RetryModeStandard + } + + var standardOptions []func(*retry.StandardOptions) + if v := o.RetryMaxAttempts; v != 0 { + standardOptions = append(standardOptions, func(so *retry.StandardOptions) { + so.MaxAttempts = v + }) + } + + switch o.RetryMode { + case aws.RetryModeAdaptive: + var adaptiveOptions []func(*retry.AdaptiveModeOptions) + if len(standardOptions) != 0 { + adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) { + ao.StandardOptions = append(ao.StandardOptions, standardOptions...) + }) + } + o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...) + + default: + o.Retryer = retry.NewStandard(standardOptions...) + } +} + +func resolveAWSRetryerProvider(cfg aws.Config, o *Options) { + if cfg.Retryer == nil { + return + } + o.Retryer = cfg.Retryer() +} + +func resolveAWSRetryMode(cfg aws.Config, o *Options) { + if len(cfg.RetryMode) == 0 { + return + } + o.RetryMode = cfg.RetryMode +} +func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) { + if cfg.RetryMaxAttempts == 0 { + return + } + o.RetryMaxAttempts = cfg.RetryMaxAttempts +} + +func finalizeRetryMaxAttemptOptions(o *Options, client Client) { + if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts { + return + } + + o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) +} + +func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { + if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil { + return + } + o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions) +} + +func addClientUserAgent(stack *middleware.Stack, options Options) error { + if err := awsmiddleware.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "sso", goModuleVersion)(stack); err != nil { + return err + } + + if len(options.AppID) > 0 { + return awsmiddleware.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID)(stack) + } + + return nil +} + +func addHTTPSignerV4Middleware(stack *middleware.Stack, o Options) error { + mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{ + CredentialsProvider: o.Credentials, + Signer: o.HTTPSignerV4, + LogSigning: o.ClientLogMode.IsSigning(), + }) + return stack.Finalize.Add(mw, middleware.After) +} + +type HTTPSignerV4 interface { + SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error +} + +func resolveHTTPSignerV4(o *Options) { + if o.HTTPSignerV4 != nil { + return + } + o.HTTPSignerV4 = newDefaultV4Signer(*o) +} + +func newDefaultV4Signer(o Options) *v4.Signer { + return v4.NewSigner(func(so *v4.SignerOptions) { + so.Logger = o.Logger + so.LogSigning = o.ClientLogMode.IsSigning() + }) +} + +func addRetryMiddlewares(stack *middleware.Stack, o Options) error { + mo := retry.AddRetryMiddlewaresOptions{ + Retryer: o.Retryer, + LogRetryAttempts: o.ClientLogMode.IsRetries(), + } + return retry.AddRetryMiddlewares(stack, mo) +} + +// resolves dual-stack endpoint configuration +func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseDualStackEndpoint = value + } + return nil +} + +// resolves FIPS endpoint configuration +func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseFIPSEndpoint = value + } + return nil +} + +func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error { + return awsmiddleware.AddRequestIDRetrieverMiddleware(stack) +} + +func addResponseErrorMiddleware(stack *middleware.Stack) error { + return awshttp.AddResponseErrorMiddleware(stack) +} + +func addRequestResponseLogging(stack *middleware.Stack, o Options) error { + return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ + LogRequest: o.ClientLogMode.IsRequest(), + LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(), + LogResponse: o.ClientLogMode.IsResponse(), + LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(), + }, middleware.After) +} + +type endpointDisableHTTPSMiddleware struct { + EndpointDisableHTTPS bool +} + +func (*endpointDisableHTTPSMiddleware) ID() string { + return "endpointDisableHTTPSMiddleware" +} + +func (m *endpointDisableHTTPSMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.EndpointDisableHTTPS && !smithyhttp.GetHostnameImmutable(ctx) { + req.URL.Scheme = "http" + } + + return next.HandleSerialize(ctx, in) + +} +func addendpointDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { + return stack.Serialize.Insert(&endpointDisableHTTPSMiddleware{ + EndpointDisableHTTPS: o.EndpointOptions.DisableHTTPS, + }, "OperationSerializer", middleware.Before) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go new file mode 100644 index 00000000000..0383bb0bd05 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go @@ -0,0 +1,266 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" + "github.com/aws/aws-sdk-go-v2/service/sso/types" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the STS short-term credentials for a given role name that is assigned +// to the user. +func (c *Client) GetRoleCredentials(ctx context.Context, params *GetRoleCredentialsInput, optFns ...func(*Options)) (*GetRoleCredentialsOutput, error) { + if params == nil { + params = &GetRoleCredentialsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetRoleCredentials", params, optFns, c.addOperationGetRoleCredentialsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetRoleCredentialsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetRoleCredentialsInput struct { + + // The token issued by the CreateToken API call. For more information, see + // CreateToken (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // in the IAM Identity Center OIDC API Reference Guide. + // + // This member is required. + AccessToken *string + + // The identifier for the AWS account that is assigned to the user. + // + // This member is required. + AccountId *string + + // The friendly name of the role that is assigned to the user. + // + // This member is required. + RoleName *string + + noSmithyDocumentSerde +} + +type GetRoleCredentialsOutput struct { + + // The credentials for the role that is assigned to the user. + RoleCredentials *types.RoleCredentials + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetRoleCredentialsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpGetRoleCredentials{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpGetRoleCredentials{}, middleware.After) + if err != nil { + return err + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addGetRoleCredentialsResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetRoleCredentialsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetRoleCredentials(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetRoleCredentials(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetRoleCredentials", + } +} + +type opGetRoleCredentialsResolveEndpointMiddleware struct { + EndpointResolver EndpointResolverV2 + BuiltInResolver builtInParameterResolver +} + +func (*opGetRoleCredentialsResolveEndpointMiddleware) ID() string { + return "ResolveEndpointV2" +} + +func (m *opGetRoleCredentialsResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.EndpointResolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + params := EndpointParameters{} + + m.BuiltInResolver.ResolveBuiltIns(¶ms) + + var resolvedEndpoint smithyendpoints.Endpoint + resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL = &resolvedEndpoint.URI + + for k := range resolvedEndpoint.Headers { + req.Header.Set( + k, + resolvedEndpoint.Headers.Get(k), + ) + } + + authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) + if err != nil { + var nfe *internalauth.NoAuthenticationSchemesFoundError + if errors.As(err, &nfe) { + // if no auth scheme is found, default to sigv4 + signingName := "awsssoportal" + signingRegion := m.BuiltInResolver.(*builtInResolver).Region + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + + } + var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError + if errors.As(err, &ue) { + return out, metadata, fmt.Errorf( + "This operation requests signer version(s) %v but the client only supports %v", + ue.UnsupportedSchemes, + internalauth.SupportedSchemes, + ) + } + } + + for _, authScheme := range authSchemes { + switch authScheme.(type) { + case *internalauth.AuthenticationSchemeV4: + v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) + var signingName, signingRegion string + if v4Scheme.SigningName == nil { + signingName = "awsssoportal" + } else { + signingName = *v4Scheme.SigningName + } + if v4Scheme.SigningRegion == nil { + signingRegion = m.BuiltInResolver.(*builtInResolver).Region + } else { + signingRegion = *v4Scheme.SigningRegion + } + if v4Scheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + break + case *internalauth.AuthenticationSchemeV4A: + v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) + if v4aScheme.SigningName == nil { + v4aScheme.SigningName = aws.String("awsssoportal") + } + if v4aScheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) + ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) + break + case *internalauth.AuthenticationSchemeNone: + break + } + } + + return next.HandleSerialize(ctx, in) +} + +func addGetRoleCredentialsResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { + return stack.Serialize.Insert(&opGetRoleCredentialsResolveEndpointMiddleware{ + EndpointResolver: options.EndpointResolverV2, + BuiltInResolver: &builtInResolver{ + Region: options.Region, + UseDualStack: options.EndpointOptions.UseDualStackEndpoint, + UseFIPS: options.EndpointOptions.UseFIPSEndpoint, + Endpoint: options.BaseEndpoint, + }, + }, "ResolveEndpoint", middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go new file mode 100644 index 00000000000..cc28543f8c3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go @@ -0,0 +1,361 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" + "github.com/aws/aws-sdk-go-v2/service/sso/types" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Lists all roles that are assigned to the user for a given AWS account. +func (c *Client) ListAccountRoles(ctx context.Context, params *ListAccountRolesInput, optFns ...func(*Options)) (*ListAccountRolesOutput, error) { + if params == nil { + params = &ListAccountRolesInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListAccountRoles", params, optFns, c.addOperationListAccountRolesMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListAccountRolesOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListAccountRolesInput struct { + + // The token issued by the CreateToken API call. For more information, see + // CreateToken (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // in the IAM Identity Center OIDC API Reference Guide. + // + // This member is required. + AccessToken *string + + // The identifier for the AWS account that is assigned to the user. + // + // This member is required. + AccountId *string + + // The number of items that clients can request per page. + MaxResults *int32 + + // The page token from the previous response output when you request subsequent + // pages. + NextToken *string + + noSmithyDocumentSerde +} + +type ListAccountRolesOutput struct { + + // The page token client that is used to retrieve the list of accounts. + NextToken *string + + // A paginated response with the list of roles and the next token if more results + // are available. + RoleList []types.RoleInfo + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListAccountRolesMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpListAccountRoles{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpListAccountRoles{}, middleware.After) + if err != nil { + return err + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addListAccountRolesResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addOpListAccountRolesValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListAccountRoles(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +// ListAccountRolesAPIClient is a client that implements the ListAccountRoles +// operation. +type ListAccountRolesAPIClient interface { + ListAccountRoles(context.Context, *ListAccountRolesInput, ...func(*Options)) (*ListAccountRolesOutput, error) +} + +var _ ListAccountRolesAPIClient = (*Client)(nil) + +// ListAccountRolesPaginatorOptions is the paginator options for ListAccountRoles +type ListAccountRolesPaginatorOptions struct { + // The number of items that clients can request per page. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListAccountRolesPaginator is a paginator for ListAccountRoles +type ListAccountRolesPaginator struct { + options ListAccountRolesPaginatorOptions + client ListAccountRolesAPIClient + params *ListAccountRolesInput + nextToken *string + firstPage bool +} + +// NewListAccountRolesPaginator returns a new ListAccountRolesPaginator +func NewListAccountRolesPaginator(client ListAccountRolesAPIClient, params *ListAccountRolesInput, optFns ...func(*ListAccountRolesPaginatorOptions)) *ListAccountRolesPaginator { + if params == nil { + params = &ListAccountRolesInput{} + } + + options := ListAccountRolesPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListAccountRolesPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListAccountRolesPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListAccountRoles page. +func (p *ListAccountRolesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListAccountRolesOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + result, err := p.client.ListAccountRoles(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opListAccountRoles(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListAccountRoles", + } +} + +type opListAccountRolesResolveEndpointMiddleware struct { + EndpointResolver EndpointResolverV2 + BuiltInResolver builtInParameterResolver +} + +func (*opListAccountRolesResolveEndpointMiddleware) ID() string { + return "ResolveEndpointV2" +} + +func (m *opListAccountRolesResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.EndpointResolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + params := EndpointParameters{} + + m.BuiltInResolver.ResolveBuiltIns(¶ms) + + var resolvedEndpoint smithyendpoints.Endpoint + resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL = &resolvedEndpoint.URI + + for k := range resolvedEndpoint.Headers { + req.Header.Set( + k, + resolvedEndpoint.Headers.Get(k), + ) + } + + authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) + if err != nil { + var nfe *internalauth.NoAuthenticationSchemesFoundError + if errors.As(err, &nfe) { + // if no auth scheme is found, default to sigv4 + signingName := "awsssoportal" + signingRegion := m.BuiltInResolver.(*builtInResolver).Region + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + + } + var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError + if errors.As(err, &ue) { + return out, metadata, fmt.Errorf( + "This operation requests signer version(s) %v but the client only supports %v", + ue.UnsupportedSchemes, + internalauth.SupportedSchemes, + ) + } + } + + for _, authScheme := range authSchemes { + switch authScheme.(type) { + case *internalauth.AuthenticationSchemeV4: + v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) + var signingName, signingRegion string + if v4Scheme.SigningName == nil { + signingName = "awsssoportal" + } else { + signingName = *v4Scheme.SigningName + } + if v4Scheme.SigningRegion == nil { + signingRegion = m.BuiltInResolver.(*builtInResolver).Region + } else { + signingRegion = *v4Scheme.SigningRegion + } + if v4Scheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + break + case *internalauth.AuthenticationSchemeV4A: + v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) + if v4aScheme.SigningName == nil { + v4aScheme.SigningName = aws.String("awsssoportal") + } + if v4aScheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) + ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) + break + case *internalauth.AuthenticationSchemeNone: + break + } + } + + return next.HandleSerialize(ctx, in) +} + +func addListAccountRolesResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { + return stack.Serialize.Insert(&opListAccountRolesResolveEndpointMiddleware{ + EndpointResolver: options.EndpointResolverV2, + BuiltInResolver: &builtInResolver{ + Region: options.Region, + UseDualStack: options.EndpointOptions.UseDualStackEndpoint, + UseFIPS: options.EndpointOptions.UseFIPSEndpoint, + Endpoint: options.BaseEndpoint, + }, + }, "ResolveEndpoint", middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go new file mode 100644 index 00000000000..567f6c6691e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go @@ -0,0 +1,358 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" + "github.com/aws/aws-sdk-go-v2/service/sso/types" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Lists all AWS accounts assigned to the user. These AWS accounts are assigned by +// the administrator of the account. For more information, see Assign User Access (https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers) +// in the IAM Identity Center User Guide. This operation returns a paginated +// response. +func (c *Client) ListAccounts(ctx context.Context, params *ListAccountsInput, optFns ...func(*Options)) (*ListAccountsOutput, error) { + if params == nil { + params = &ListAccountsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListAccounts", params, optFns, c.addOperationListAccountsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListAccountsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListAccountsInput struct { + + // The token issued by the CreateToken API call. For more information, see + // CreateToken (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // in the IAM Identity Center OIDC API Reference Guide. + // + // This member is required. + AccessToken *string + + // This is the number of items clients can request per page. + MaxResults *int32 + + // (Optional) When requesting subsequent pages, this is the page token from the + // previous response output. + NextToken *string + + noSmithyDocumentSerde +} + +type ListAccountsOutput struct { + + // A paginated response with the list of account information and the next token if + // more results are available. + AccountList []types.AccountInfo + + // The page token client that is used to retrieve the list of accounts. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListAccountsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpListAccounts{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpListAccounts{}, middleware.After) + if err != nil { + return err + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addListAccountsResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addOpListAccountsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListAccounts(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +// ListAccountsAPIClient is a client that implements the ListAccounts operation. +type ListAccountsAPIClient interface { + ListAccounts(context.Context, *ListAccountsInput, ...func(*Options)) (*ListAccountsOutput, error) +} + +var _ ListAccountsAPIClient = (*Client)(nil) + +// ListAccountsPaginatorOptions is the paginator options for ListAccounts +type ListAccountsPaginatorOptions struct { + // This is the number of items clients can request per page. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListAccountsPaginator is a paginator for ListAccounts +type ListAccountsPaginator struct { + options ListAccountsPaginatorOptions + client ListAccountsAPIClient + params *ListAccountsInput + nextToken *string + firstPage bool +} + +// NewListAccountsPaginator returns a new ListAccountsPaginator +func NewListAccountsPaginator(client ListAccountsAPIClient, params *ListAccountsInput, optFns ...func(*ListAccountsPaginatorOptions)) *ListAccountsPaginator { + if params == nil { + params = &ListAccountsInput{} + } + + options := ListAccountsPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListAccountsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListAccountsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListAccounts page. +func (p *ListAccountsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListAccountsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + result, err := p.client.ListAccounts(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opListAccounts(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListAccounts", + } +} + +type opListAccountsResolveEndpointMiddleware struct { + EndpointResolver EndpointResolverV2 + BuiltInResolver builtInParameterResolver +} + +func (*opListAccountsResolveEndpointMiddleware) ID() string { + return "ResolveEndpointV2" +} + +func (m *opListAccountsResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.EndpointResolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + params := EndpointParameters{} + + m.BuiltInResolver.ResolveBuiltIns(¶ms) + + var resolvedEndpoint smithyendpoints.Endpoint + resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL = &resolvedEndpoint.URI + + for k := range resolvedEndpoint.Headers { + req.Header.Set( + k, + resolvedEndpoint.Headers.Get(k), + ) + } + + authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) + if err != nil { + var nfe *internalauth.NoAuthenticationSchemesFoundError + if errors.As(err, &nfe) { + // if no auth scheme is found, default to sigv4 + signingName := "awsssoportal" + signingRegion := m.BuiltInResolver.(*builtInResolver).Region + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + + } + var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError + if errors.As(err, &ue) { + return out, metadata, fmt.Errorf( + "This operation requests signer version(s) %v but the client only supports %v", + ue.UnsupportedSchemes, + internalauth.SupportedSchemes, + ) + } + } + + for _, authScheme := range authSchemes { + switch authScheme.(type) { + case *internalauth.AuthenticationSchemeV4: + v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) + var signingName, signingRegion string + if v4Scheme.SigningName == nil { + signingName = "awsssoportal" + } else { + signingName = *v4Scheme.SigningName + } + if v4Scheme.SigningRegion == nil { + signingRegion = m.BuiltInResolver.(*builtInResolver).Region + } else { + signingRegion = *v4Scheme.SigningRegion + } + if v4Scheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + break + case *internalauth.AuthenticationSchemeV4A: + v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) + if v4aScheme.SigningName == nil { + v4aScheme.SigningName = aws.String("awsssoportal") + } + if v4aScheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) + ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) + break + case *internalauth.AuthenticationSchemeNone: + break + } + } + + return next.HandleSerialize(ctx, in) +} + +func addListAccountsResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { + return stack.Serialize.Insert(&opListAccountsResolveEndpointMiddleware{ + EndpointResolver: options.EndpointResolverV2, + BuiltInResolver: &builtInResolver{ + Region: options.Region, + UseDualStack: options.EndpointOptions.UseDualStackEndpoint, + UseFIPS: options.EndpointOptions.UseFIPSEndpoint, + Endpoint: options.BaseEndpoint, + }, + }, "ResolveEndpoint", middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go new file mode 100644 index 00000000000..c30da0296f4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go @@ -0,0 +1,261 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Removes the locally stored SSO tokens from the client-side cache and sends an +// API call to the IAM Identity Center service to invalidate the corresponding +// server-side IAM Identity Center sign in session. If a user uses IAM Identity +// Center to access the AWS CLI, the user’s IAM Identity Center sign in session is +// used to obtain an IAM session, as specified in the corresponding IAM Identity +// Center permission set. More specifically, IAM Identity Center assumes an IAM +// role in the target account on behalf of the user, and the corresponding +// temporary AWS credentials are returned to the client. After user logout, any +// existing IAM role sessions that were created by using IAM Identity Center +// permission sets continue based on the duration configured in the permission set. +// For more information, see User authentications (https://docs.aws.amazon.com/singlesignon/latest/userguide/authconcept.html) +// in the IAM Identity Center User Guide. +func (c *Client) Logout(ctx context.Context, params *LogoutInput, optFns ...func(*Options)) (*LogoutOutput, error) { + if params == nil { + params = &LogoutInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "Logout", params, optFns, c.addOperationLogoutMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*LogoutOutput) + out.ResultMetadata = metadata + return out, nil +} + +type LogoutInput struct { + + // The token issued by the CreateToken API call. For more information, see + // CreateToken (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // in the IAM Identity Center OIDC API Reference Guide. + // + // This member is required. + AccessToken *string + + noSmithyDocumentSerde +} + +type LogoutOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationLogoutMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpLogout{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpLogout{}, middleware.After) + if err != nil { + return err + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addLogoutResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addOpLogoutValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opLogout(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opLogout(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "Logout", + } +} + +type opLogoutResolveEndpointMiddleware struct { + EndpointResolver EndpointResolverV2 + BuiltInResolver builtInParameterResolver +} + +func (*opLogoutResolveEndpointMiddleware) ID() string { + return "ResolveEndpointV2" +} + +func (m *opLogoutResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.EndpointResolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + params := EndpointParameters{} + + m.BuiltInResolver.ResolveBuiltIns(¶ms) + + var resolvedEndpoint smithyendpoints.Endpoint + resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL = &resolvedEndpoint.URI + + for k := range resolvedEndpoint.Headers { + req.Header.Set( + k, + resolvedEndpoint.Headers.Get(k), + ) + } + + authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) + if err != nil { + var nfe *internalauth.NoAuthenticationSchemesFoundError + if errors.As(err, &nfe) { + // if no auth scheme is found, default to sigv4 + signingName := "awsssoportal" + signingRegion := m.BuiltInResolver.(*builtInResolver).Region + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + + } + var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError + if errors.As(err, &ue) { + return out, metadata, fmt.Errorf( + "This operation requests signer version(s) %v but the client only supports %v", + ue.UnsupportedSchemes, + internalauth.SupportedSchemes, + ) + } + } + + for _, authScheme := range authSchemes { + switch authScheme.(type) { + case *internalauth.AuthenticationSchemeV4: + v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) + var signingName, signingRegion string + if v4Scheme.SigningName == nil { + signingName = "awsssoportal" + } else { + signingName = *v4Scheme.SigningName + } + if v4Scheme.SigningRegion == nil { + signingRegion = m.BuiltInResolver.(*builtInResolver).Region + } else { + signingRegion = *v4Scheme.SigningRegion + } + if v4Scheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + break + case *internalauth.AuthenticationSchemeV4A: + v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) + if v4aScheme.SigningName == nil { + v4aScheme.SigningName = aws.String("awsssoportal") + } + if v4aScheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) + ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) + break + case *internalauth.AuthenticationSchemeNone: + break + } + } + + return next.HandleSerialize(ctx, in) +} + +func addLogoutResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { + return stack.Serialize.Insert(&opLogoutResolveEndpointMiddleware{ + EndpointResolver: options.EndpointResolverV2, + BuiltInResolver: &builtInResolver{ + Region: options.Region, + UseDualStack: options.EndpointOptions.UseDualStackEndpoint, + UseFIPS: options.EndpointOptions.UseFIPSEndpoint, + Endpoint: options.BaseEndpoint, + }, + }, "ResolveEndpoint", middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go new file mode 100644 index 00000000000..8bba205f435 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go @@ -0,0 +1,1151 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws/protocol/restjson" + "github.com/aws/aws-sdk-go-v2/service/sso/types" + smithy "github.com/aws/smithy-go" + smithyio "github.com/aws/smithy-go/io" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" + "io/ioutil" + "strings" +) + +type awsRestjson1_deserializeOpGetRoleCredentials struct { +} + +func (*awsRestjson1_deserializeOpGetRoleCredentials) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpGetRoleCredentials) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorGetRoleCredentials(response, &metadata) + } + output := &GetRoleCredentialsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentGetRoleCredentialsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorGetRoleCredentials(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidRequestException", errorCode): + return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) + + case strings.EqualFold("UnauthorizedException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentGetRoleCredentialsOutput(v **GetRoleCredentialsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetRoleCredentialsOutput + if *v == nil { + sv = &GetRoleCredentialsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "roleCredentials": + if err := awsRestjson1_deserializeDocumentRoleCredentials(&sv.RoleCredentials, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpListAccountRoles struct { +} + +func (*awsRestjson1_deserializeOpListAccountRoles) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpListAccountRoles) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorListAccountRoles(response, &metadata) + } + output := &ListAccountRolesOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentListAccountRolesOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorListAccountRoles(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidRequestException", errorCode): + return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) + + case strings.EqualFold("UnauthorizedException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentListAccountRolesOutput(v **ListAccountRolesOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListAccountRolesOutput + if *v == nil { + sv = &ListAccountRolesOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "nextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NextTokenType to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + case "roleList": + if err := awsRestjson1_deserializeDocumentRoleListType(&sv.RoleList, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpListAccounts struct { +} + +func (*awsRestjson1_deserializeOpListAccounts) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpListAccounts) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorListAccounts(response, &metadata) + } + output := &ListAccountsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentListAccountsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorListAccounts(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidRequestException", errorCode): + return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) + + case strings.EqualFold("UnauthorizedException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentListAccountsOutput(v **ListAccountsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListAccountsOutput + if *v == nil { + sv = &ListAccountsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "accountList": + if err := awsRestjson1_deserializeDocumentAccountListType(&sv.AccountList, value); err != nil { + return err + } + + case "nextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NextTokenType to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpLogout struct { +} + +func (*awsRestjson1_deserializeOpLogout) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpLogout) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorLogout(response, &metadata) + } + output := &LogoutOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorLogout(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidRequestException", errorCode): + return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) + + case strings.EqualFold("UnauthorizedException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeErrorInvalidRequestException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidRequestException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentInvalidRequestException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorResourceNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ResourceNotFoundException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentResourceNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorTooManyRequestsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.TooManyRequestsException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentTooManyRequestsException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorUnauthorizedException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.UnauthorizedException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentUnauthorizedException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeDocumentAccountInfo(v **types.AccountInfo, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AccountInfo + if *v == nil { + sv = &types.AccountInfo{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "accountId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AccountIdType to be of type string, got %T instead", value) + } + sv.AccountId = ptr.String(jtv) + } + + case "accountName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AccountNameType to be of type string, got %T instead", value) + } + sv.AccountName = ptr.String(jtv) + } + + case "emailAddress": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EmailAddressType to be of type string, got %T instead", value) + } + sv.EmailAddress = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentAccountListType(v *[]types.AccountInfo, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.AccountInfo + if *v == nil { + cv = []types.AccountInfo{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.AccountInfo + destAddr := &col + if err := awsRestjson1_deserializeDocumentAccountInfo(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsRestjson1_deserializeDocumentInvalidRequestException(v **types.InvalidRequestException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidRequestException + if *v == nil { + sv = &types.InvalidRequestException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentResourceNotFoundException(v **types.ResourceNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ResourceNotFoundException + if *v == nil { + sv = &types.ResourceNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentRoleCredentials(v **types.RoleCredentials, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.RoleCredentials + if *v == nil { + sv = &types.RoleCredentials{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "accessKeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AccessKeyType to be of type string, got %T instead", value) + } + sv.AccessKeyId = ptr.String(jtv) + } + + case "expiration": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected ExpirationTimestampType to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.Expiration = i64 + } + + case "secretAccessKey": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SecretAccessKeyType to be of type string, got %T instead", value) + } + sv.SecretAccessKey = ptr.String(jtv) + } + + case "sessionToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SessionTokenType to be of type string, got %T instead", value) + } + sv.SessionToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentRoleInfo(v **types.RoleInfo, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.RoleInfo + if *v == nil { + sv = &types.RoleInfo{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "accountId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AccountIdType to be of type string, got %T instead", value) + } + sv.AccountId = ptr.String(jtv) + } + + case "roleName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RoleNameType to be of type string, got %T instead", value) + } + sv.RoleName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentRoleListType(v *[]types.RoleInfo, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.RoleInfo + if *v == nil { + cv = []types.RoleInfo{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.RoleInfo + destAddr := &col + if err := awsRestjson1_deserializeDocumentRoleInfo(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsRestjson1_deserializeDocumentTooManyRequestsException(v **types.TooManyRequestsException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.TooManyRequestsException + if *v == nil { + sv = &types.TooManyRequestsException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentUnauthorizedException(v **types.UnauthorizedException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.UnauthorizedException + if *v == nil { + sv = &types.UnauthorizedException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go new file mode 100644 index 00000000000..59456d5dc27 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go @@ -0,0 +1,21 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +// Package sso provides the API client, operations, and parameter types for AWS +// Single Sign-On. +// +// AWS IAM Identity Center (successor to AWS Single Sign-On) Portal is a web +// service that makes it easy for you to assign user access to IAM Identity Center +// resources such as the AWS access portal. Users can get AWS account applications +// and roles assigned to them and get federated into the application. Although AWS +// Single Sign-On was renamed, the sso and identitystore API namespaces will +// continue to retain their original name for backward compatibility purposes. For +// more information, see IAM Identity Center rename (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed) +// . This reference guide describes the IAM Identity Center Portal operations that +// you can call programatically and includes detailed information on data types and +// errors. AWS provides SDKs that consist of libraries and sample code for various +// programming languages and platforms, such as Java, Ruby, .Net, iOS, or Android. +// The SDKs provide a convenient way to create programmatic access to IAM Identity +// Center and other AWS services. For more information about the AWS SDKs, +// including how to download and install them, see Tools for Amazon Web Services (http://aws.amazon.com/tools/) +// . +package sso diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go new file mode 100644 index 00000000000..11538705946 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go @@ -0,0 +1,519 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn" + internalendpoints "github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/http" + "net/url" + "os" + "strings" +) + +// EndpointResolverOptions is the service endpoint resolver options +type EndpointResolverOptions = internalendpoints.Options + +// EndpointResolver interface for resolving service endpoints. +type EndpointResolver interface { + ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) +} + +var _ EndpointResolver = &internalendpoints.Resolver{} + +// NewDefaultEndpointResolver constructs a new service endpoint resolver +func NewDefaultEndpointResolver() *internalendpoints.Resolver { + return internalendpoints.New() +} + +// EndpointResolverFunc is a helper utility that wraps a function so it satisfies +// the EndpointResolver interface. This is useful when you want to add additional +// endpoint resolving logic, or stub out specific endpoints with custom values. +type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error) + +func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + return fn(region, options) +} + +// EndpointResolverFromURL returns an EndpointResolver configured using the +// provided endpoint url. By default, the resolved endpoint resolver uses the +// client region as signing region, and the endpoint source is set to +// EndpointSourceCustom.You can provide functional options to configure endpoint +// values for the resolved endpoint. +func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver { + e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom} + for _, fn := range optFns { + fn(&e) + } + + return EndpointResolverFunc( + func(region string, options EndpointResolverOptions) (aws.Endpoint, error) { + if len(e.SigningRegion) == 0 { + e.SigningRegion = region + } + return e, nil + }, + ) +} + +type ResolveEndpoint struct { + Resolver EndpointResolver + Options EndpointResolverOptions +} + +func (*ResolveEndpoint) ID() string { + return "ResolveEndpoint" +} + +func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.Resolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + eo := m.Options + eo.Logger = middleware.GetLogger(ctx) + + var endpoint aws.Endpoint + endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo) + if err != nil { + nf := (&aws.EndpointNotFoundError{}) + if errors.As(err, &nf) { + ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, false) + return next.HandleSerialize(ctx, in) + } + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL, err = url.Parse(endpoint.URL) + if err != nil { + return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + if len(awsmiddleware.GetSigningName(ctx)) == 0 { + signingName := endpoint.SigningName + if len(signingName) == 0 { + signingName = "awsssoportal" + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + } + ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source) + ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable) + ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) + ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID) + return next.HandleSerialize(ctx, in) +} +func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error { + return stack.Serialize.Insert(&ResolveEndpoint{ + Resolver: o.EndpointResolver, + Options: o.EndpointOptions, + }, "OperationSerializer", middleware.Before) +} + +func removeResolveEndpointMiddleware(stack *middleware.Stack) error { + _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID()) + return err +} + +type wrappedEndpointResolver struct { + awsResolver aws.EndpointResolverWithOptions +} + +func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + return w.awsResolver.ResolveEndpoint(ServiceID, region, options) +} + +type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error) + +func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) { + return a(service, region) +} + +var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil) + +// withEndpointResolver returns an aws.EndpointResolverWithOptions that first delegates endpoint resolution to the awsResolver. +// If awsResolver returns aws.EndpointNotFoundError error, the v1 resolver middleware will swallow the error, +// and set an appropriate context flag such that fallback will occur when EndpointResolverV2 is invoked +// via its middleware. +// +// If another error (besides aws.EndpointNotFoundError) is returned, then that error will be propagated. +func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions) EndpointResolver { + var resolver aws.EndpointResolverWithOptions + + if awsResolverWithOptions != nil { + resolver = awsResolverWithOptions + } else if awsResolver != nil { + resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint) + } + + return &wrappedEndpointResolver{ + awsResolver: resolver, + } +} + +func finalizeClientEndpointResolverOptions(options *Options) { + options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage() + + if len(options.EndpointOptions.ResolvedRegion) == 0 { + const fipsInfix = "-fips-" + const fipsPrefix = "fips-" + const fipsSuffix = "-fips" + + if strings.Contains(options.Region, fipsInfix) || + strings.Contains(options.Region, fipsPrefix) || + strings.Contains(options.Region, fipsSuffix) { + options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( + options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") + options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled + } + } + +} + +func resolveEndpointResolverV2(options *Options) { + if options.EndpointResolverV2 == nil { + options.EndpointResolverV2 = NewDefaultEndpointResolverV2() + } +} + +func resolveBaseEndpoint(cfg aws.Config, o *Options) { + if cfg.BaseEndpoint != nil { + o.BaseEndpoint = cfg.BaseEndpoint + } + + _, g := os.LookupEnv("AWS_ENDPOINT_URL") + _, s := os.LookupEnv("AWS_ENDPOINT_URL_SSO") + + if g && !s { + return + } + + value, found, err := internalConfig.ResolveServiceBaseEndpoint(context.Background(), "SSO", cfg.ConfigSources) + if found && err == nil { + o.BaseEndpoint = &value + } +} + +// Utility function to aid with translating pseudo-regions to classical regions +// with the appropriate setting indicated by the pseudo-region +func mapPseudoRegion(pr string) (region string, fips aws.FIPSEndpointState) { + const fipsInfix = "-fips-" + const fipsPrefix = "fips-" + const fipsSuffix = "-fips" + + if strings.Contains(pr, fipsInfix) || + strings.Contains(pr, fipsPrefix) || + strings.Contains(pr, fipsSuffix) { + region = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( + pr, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") + fips = aws.FIPSEndpointStateEnabled + } else { + region = pr + } + + return region, fips +} + +// builtInParameterResolver is the interface responsible for resolving BuiltIn +// values during the sourcing of EndpointParameters +type builtInParameterResolver interface { + ResolveBuiltIns(*EndpointParameters) error +} + +// builtInResolver resolves modeled BuiltIn values using only the members defined +// below. +type builtInResolver struct { + // The AWS region used to dispatch the request. + Region string + + // Sourced BuiltIn value in a historical enabled or disabled state. + UseDualStack aws.DualStackEndpointState + + // Sourced BuiltIn value in a historical enabled or disabled state. + UseFIPS aws.FIPSEndpointState + + // Base endpoint that can potentially be modified during Endpoint resolution. + Endpoint *string +} + +// Invoked at runtime to resolve BuiltIn Values. Only resolution code specific to +// each BuiltIn value is generated. +func (b *builtInResolver) ResolveBuiltIns(params *EndpointParameters) error { + + region, _ := mapPseudoRegion(b.Region) + if len(region) == 0 { + return fmt.Errorf("Could not resolve AWS::Region") + } else { + params.Region = aws.String(region) + } + if b.UseDualStack == aws.DualStackEndpointStateEnabled { + params.UseDualStack = aws.Bool(true) + } else { + params.UseDualStack = aws.Bool(false) + } + if b.UseFIPS == aws.FIPSEndpointStateEnabled { + params.UseFIPS = aws.Bool(true) + } else { + params.UseFIPS = aws.Bool(false) + } + params.Endpoint = b.Endpoint + return nil +} + +// EndpointParameters provides the parameters that influence how endpoints are +// resolved. +type EndpointParameters struct { + // The AWS region used to dispatch the request. + // + // Parameter is + // required. + // + // AWS::Region + Region *string + + // When true, use the dual-stack endpoint. If the configured endpoint does not + // support dual-stack, dispatching the request MAY return an error. + // + // Defaults to + // false if no value is provided. + // + // AWS::UseDualStack + UseDualStack *bool + + // When true, send this request to the FIPS-compliant regional endpoint. If the + // configured endpoint does not have a FIPS compliant endpoint, dispatching the + // request will return an error. + // + // Defaults to false if no value is + // provided. + // + // AWS::UseFIPS + UseFIPS *bool + + // Override the endpoint used to send this request + // + // Parameter is + // required. + // + // SDK::Endpoint + Endpoint *string +} + +// ValidateRequired validates required parameters are set. +func (p EndpointParameters) ValidateRequired() error { + if p.UseDualStack == nil { + return fmt.Errorf("parameter UseDualStack is required") + } + + if p.UseFIPS == nil { + return fmt.Errorf("parameter UseFIPS is required") + } + + return nil +} + +// WithDefaults returns a shallow copy of EndpointParameterswith default values +// applied to members where applicable. +func (p EndpointParameters) WithDefaults() EndpointParameters { + if p.UseDualStack == nil { + p.UseDualStack = ptr.Bool(false) + } + + if p.UseFIPS == nil { + p.UseFIPS = ptr.Bool(false) + } + return p +} + +// EndpointResolverV2 provides the interface for resolving service endpoints. +type EndpointResolverV2 interface { + // ResolveEndpoint attempts to resolve the endpoint with the provided options, + // returning the endpoint if found. Otherwise an error is returned. + ResolveEndpoint(ctx context.Context, params EndpointParameters) ( + smithyendpoints.Endpoint, error, + ) +} + +// resolver provides the implementation for resolving endpoints. +type resolver struct{} + +func NewDefaultEndpointResolverV2() EndpointResolverV2 { + return &resolver{} +} + +// ResolveEndpoint attempts to resolve the endpoint with the provided options, +// returning the endpoint if found. Otherwise an error is returned. +func (r *resolver) ResolveEndpoint( + ctx context.Context, params EndpointParameters, +) ( + endpoint smithyendpoints.Endpoint, err error, +) { + params = params.WithDefaults() + if err = params.ValidateRequired(); err != nil { + return endpoint, fmt.Errorf("endpoint parameters are not valid, %w", err) + } + _UseDualStack := *params.UseDualStack + _UseFIPS := *params.UseFIPS + + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if _UseFIPS == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: FIPS and custom endpoint are not supported") + } + if _UseDualStack == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Dualstack and custom endpoint are not supported") + } + uriString := _Endpoint + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + if exprVal := params.Region; exprVal != nil { + _Region := *exprVal + _ = _Region + if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { + _PartitionResult := *exprVal + _ = _PartitionResult + if _UseFIPS == true { + if _UseDualStack == true { + if true == _PartitionResult.SupportsFIPS { + if true == _PartitionResult.SupportsDualStack { + uriString := func() string { + var out strings.Builder + out.WriteString("https://portal.sso-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS and DualStack are enabled, but this partition does not support one or both") + } + } + if _UseFIPS == true { + if true == _PartitionResult.SupportsFIPS { + if "aws-us-gov" == _PartitionResult.Name { + uriString := func() string { + var out strings.Builder + out.WriteString("https://portal.sso.") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://portal.sso-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS is enabled but this partition does not support FIPS") + } + if _UseDualStack == true { + if true == _PartitionResult.SupportsDualStack { + uriString := func() string { + var out strings.Builder + out.WriteString("https://portal.sso.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "DualStack is enabled but this partition does not support DualStack") + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://portal.sso.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Missing Region") +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json new file mode 100644 index 00000000000..8e618418710 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json @@ -0,0 +1,33 @@ +{ + "dependencies": { + "github.com/aws/aws-sdk-go-v2": "v1.4.0", + "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", + "github.com/aws/smithy-go": "v1.4.0", + "github.com/google/go-cmp": "v0.5.4" + }, + "files": [ + "api_client.go", + "api_client_test.go", + "api_op_GetRoleCredentials.go", + "api_op_ListAccountRoles.go", + "api_op_ListAccounts.go", + "api_op_Logout.go", + "deserializers.go", + "doc.go", + "endpoints.go", + "endpoints_config_test.go", + "endpoints_test.go", + "generated.json", + "internal/endpoints/endpoints.go", + "internal/endpoints/endpoints_test.go", + "protocol_test.go", + "serializers.go", + "types/errors.go", + "types/types.go", + "validators.go" + ], + "go": "1.15", + "module": "github.com/aws/aws-sdk-go-v2/service/sso", + "unstable": false +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go new file mode 100644 index 00000000000..cd496510f1c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package sso + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.17.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go new file mode 100644 index 00000000000..f044afde47c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go @@ -0,0 +1,526 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package endpoints + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2" + "github.com/aws/smithy-go/logging" + "regexp" +) + +// Options is the endpoint resolver configuration options +type Options struct { + // Logger is a logging implementation that log events should be sent to. + Logger logging.Logger + + // LogDeprecated indicates that deprecated endpoints should be logged to the + // provided logger. + LogDeprecated bool + + // ResolvedRegion is used to override the region to be resolved, rather then the + // using the value passed to the ResolveEndpoint method. This value is used by the + // SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative + // name. You must not set this value directly in your application. + ResolvedRegion string + + // DisableHTTPS informs the resolver to return an endpoint that does not use the + // HTTPS scheme. + DisableHTTPS bool + + // UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint. + UseDualStackEndpoint aws.DualStackEndpointState + + // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. + UseFIPSEndpoint aws.FIPSEndpointState +} + +func (o Options) GetResolvedRegion() string { + return o.ResolvedRegion +} + +func (o Options) GetDisableHTTPS() bool { + return o.DisableHTTPS +} + +func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState { + return o.UseDualStackEndpoint +} + +func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState { + return o.UseFIPSEndpoint +} + +func transformToSharedOptions(options Options) endpoints.Options { + return endpoints.Options{ + Logger: options.Logger, + LogDeprecated: options.LogDeprecated, + ResolvedRegion: options.ResolvedRegion, + DisableHTTPS: options.DisableHTTPS, + UseDualStackEndpoint: options.UseDualStackEndpoint, + UseFIPSEndpoint: options.UseFIPSEndpoint, + } +} + +// Resolver SSO endpoint resolver +type Resolver struct { + partitions endpoints.Partitions +} + +// ResolveEndpoint resolves the service endpoint for the given region and options +func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) { + if len(region) == 0 { + return endpoint, &aws.MissingRegionError{} + } + + opt := transformToSharedOptions(options) + return r.partitions.ResolveEndpoint(region, opt) +} + +// New returns a new Resolver +func New() *Resolver { + return &Resolver{ + partitions: defaultPartitions, + } +} + +var partitionRegexp = struct { + Aws *regexp.Regexp + AwsCn *regexp.Regexp + AwsIso *regexp.Regexp + AwsIsoB *regexp.Regexp + AwsIsoE *regexp.Regexp + AwsIsoF *regexp.Regexp + AwsUsGov *regexp.Regexp +}{ + + Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$"), + AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), + AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), + AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), + AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"), + AwsIsoF: regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"), + AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), +} + +var defaultPartitions = endpoints.Partitions{ + { + ID: "aws", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "portal.sso.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "portal.sso-fips.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "portal.sso-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "portal.sso.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.Aws, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "af-south-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.af-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "af-south-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-east-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ap-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-northeast-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ap-northeast-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-northeast-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-northeast-2", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ap-northeast-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-northeast-2", + }, + }, + endpoints.EndpointKey{ + Region: "ap-northeast-3", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ap-northeast-3.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-northeast-3", + }, + }, + endpoints.EndpointKey{ + Region: "ap-south-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ap-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-south-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ap-southeast-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-2", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ap-southeast-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-2", + }, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-3", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ap-southeast-3.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-3", + }, + }, + endpoints.EndpointKey{ + Region: "ca-central-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ca-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ca-central-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-central-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.eu-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-central-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-central-2", + }: endpoints.Endpoint{ + Hostname: "portal.sso.eu-central-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-central-2", + }, + }, + endpoints.EndpointKey{ + Region: "eu-north-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.eu-north-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-north-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-south-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.eu-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-south-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-west-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.eu-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-west-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-west-2", + }: endpoints.Endpoint{ + Hostname: "portal.sso.eu-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-west-2", + }, + }, + endpoints.EndpointKey{ + Region: "eu-west-3", + }: endpoints.Endpoint{ + Hostname: "portal.sso.eu-west-3.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-west-3", + }, + }, + endpoints.EndpointKey{ + Region: "il-central-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.il-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "il-central-1", + }, + }, + endpoints.EndpointKey{ + Region: "me-south-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.me-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "me-south-1", + }, + }, + endpoints.EndpointKey{ + Region: "sa-east-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.sa-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "sa-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-east-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.us-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-east-2", + }: endpoints.Endpoint{ + Hostname: "portal.sso.us-east-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-2", + }, + }, + endpoints.EndpointKey{ + Region: "us-west-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.us-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-west-2", + }: endpoints.Endpoint{ + Hostname: "portal.sso.us-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + { + ID: "aws-cn", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "portal.sso.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "portal.sso-fips.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "portal.sso-fips.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "portal.sso.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsCn, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "cn-north-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.cn-north-1.amazonaws.com.cn", + CredentialScope: endpoints.CredentialScope{ + Region: "cn-north-1", + }, + }, + endpoints.EndpointKey{ + Region: "cn-northwest-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.cn-northwest-1.amazonaws.com.cn", + CredentialScope: endpoints.CredentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + { + ID: "aws-iso", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "portal.sso-fips.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "portal.sso.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIso, + IsRegionalized: true, + }, + { + ID: "aws-iso-b", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "portal.sso-fips.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "portal.sso.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoB, + IsRegionalized: true, + }, + { + ID: "aws-iso-e", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "portal.sso-fips.{region}.cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "portal.sso.{region}.cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoE, + IsRegionalized: true, + }, + { + ID: "aws-iso-f", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "portal.sso-fips.{region}.csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "portal.sso.{region}.csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoF, + IsRegionalized: true, + }, + { + ID: "aws-us-gov", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "portal.sso.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "portal.sso-fips.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "portal.sso-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "portal.sso.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsUsGov, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-gov-east-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.us-gov-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.us-gov-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/serializers.go new file mode 100644 index 00000000000..02e31411566 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/serializers.go @@ -0,0 +1,284 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "context" + "fmt" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/encoding/httpbinding" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +type awsRestjson1_serializeOpGetRoleCredentials struct { +} + +func (*awsRestjson1_serializeOpGetRoleCredentials) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpGetRoleCredentials) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetRoleCredentialsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/federation/credentials") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsGetRoleCredentialsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsGetRoleCredentialsInput(v *GetRoleCredentialsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.AccessToken != nil && len(*v.AccessToken) > 0 { + locationName := "X-Amz-Sso_bearer_token" + encoder.SetHeader(locationName).String(*v.AccessToken) + } + + if v.AccountId != nil { + encoder.SetQuery("account_id").String(*v.AccountId) + } + + if v.RoleName != nil { + encoder.SetQuery("role_name").String(*v.RoleName) + } + + return nil +} + +type awsRestjson1_serializeOpListAccountRoles struct { +} + +func (*awsRestjson1_serializeOpListAccountRoles) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpListAccountRoles) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListAccountRolesInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/assignment/roles") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsListAccountRolesInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsListAccountRolesInput(v *ListAccountRolesInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.AccessToken != nil && len(*v.AccessToken) > 0 { + locationName := "X-Amz-Sso_bearer_token" + encoder.SetHeader(locationName).String(*v.AccessToken) + } + + if v.AccountId != nil { + encoder.SetQuery("account_id").String(*v.AccountId) + } + + if v.MaxResults != nil { + encoder.SetQuery("max_result").Integer(*v.MaxResults) + } + + if v.NextToken != nil { + encoder.SetQuery("next_token").String(*v.NextToken) + } + + return nil +} + +type awsRestjson1_serializeOpListAccounts struct { +} + +func (*awsRestjson1_serializeOpListAccounts) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpListAccounts) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListAccountsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/assignment/accounts") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsListAccountsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsListAccountsInput(v *ListAccountsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.AccessToken != nil && len(*v.AccessToken) > 0 { + locationName := "X-Amz-Sso_bearer_token" + encoder.SetHeader(locationName).String(*v.AccessToken) + } + + if v.MaxResults != nil { + encoder.SetQuery("max_result").Integer(*v.MaxResults) + } + + if v.NextToken != nil { + encoder.SetQuery("next_token").String(*v.NextToken) + } + + return nil +} + +type awsRestjson1_serializeOpLogout struct { +} + +func (*awsRestjson1_serializeOpLogout) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpLogout) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*LogoutInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/logout") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsLogoutInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsLogoutInput(v *LogoutInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.AccessToken != nil && len(*v.AccessToken) > 0 { + locationName := "X-Amz-Sso_bearer_token" + encoder.SetHeader(locationName).String(*v.AccessToken) + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/errors.go new file mode 100644 index 00000000000..e97a126e8bb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/errors.go @@ -0,0 +1,115 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + "fmt" + smithy "github.com/aws/smithy-go" +) + +// Indicates that a problem occurred with the input to the request. For example, a +// required parameter might be missing or out of range. +type InvalidRequestException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *InvalidRequestException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidRequestException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidRequestException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidRequestException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidRequestException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified resource doesn't exist. +type ResourceNotFoundException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ResourceNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ResourceNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ResourceNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ResourceNotFoundException" + } + return *e.ErrorCodeOverride +} +func (e *ResourceNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that the request is being made too frequently and is more than what +// the server can handle. +type TooManyRequestsException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *TooManyRequestsException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *TooManyRequestsException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *TooManyRequestsException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "TooManyRequestsException" + } + return *e.ErrorCodeOverride +} +func (e *TooManyRequestsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. +type UnauthorizedException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *UnauthorizedException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *UnauthorizedException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *UnauthorizedException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "UnauthorizedException" + } + return *e.ErrorCodeOverride +} +func (e *UnauthorizedException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go new file mode 100644 index 00000000000..8dc02296b11 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go @@ -0,0 +1,61 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + smithydocument "github.com/aws/smithy-go/document" +) + +// Provides information about your AWS account. +type AccountInfo struct { + + // The identifier of the AWS account that is assigned to the user. + AccountId *string + + // The display name of the AWS account that is assigned to the user. + AccountName *string + + // The email address of the AWS account that is assigned to the user. + EmailAddress *string + + noSmithyDocumentSerde +} + +// Provides information about the role credentials that are assigned to the user. +type RoleCredentials struct { + + // The identifier used for the temporary security credentials. For more + // information, see Using Temporary Security Credentials to Request Access to AWS + // Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) + // in the AWS IAM User Guide. + AccessKeyId *string + + // The date on which temporary security credentials expire. + Expiration int64 + + // The key that is used to sign the request. For more information, see Using + // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) + // in the AWS IAM User Guide. + SecretAccessKey *string + + // The token used for temporary credentials. For more information, see Using + // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) + // in the AWS IAM User Guide. + SessionToken *string + + noSmithyDocumentSerde +} + +// Provides information about the role that is assigned to the user. +type RoleInfo struct { + + // The identifier of the AWS account assigned to the user. + AccountId *string + + // The friendly name of the role that is assigned to the user. + RoleName *string + + noSmithyDocumentSerde +} + +type noSmithyDocumentSerde = smithydocument.NoSerde diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/validators.go new file mode 100644 index 00000000000..f6bf461f74b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/validators.go @@ -0,0 +1,175 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "context" + "fmt" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" +) + +type validateOpGetRoleCredentials struct { +} + +func (*validateOpGetRoleCredentials) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetRoleCredentials) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetRoleCredentialsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetRoleCredentialsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListAccountRoles struct { +} + +func (*validateOpListAccountRoles) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListAccountRoles) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListAccountRolesInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListAccountRolesInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListAccounts struct { +} + +func (*validateOpListAccounts) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListAccounts) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListAccountsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListAccountsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpLogout struct { +} + +func (*validateOpLogout) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpLogout) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*LogoutInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpLogoutInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +func addOpGetRoleCredentialsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetRoleCredentials{}, middleware.After) +} + +func addOpListAccountRolesValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListAccountRoles{}, middleware.After) +} + +func addOpListAccountsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListAccounts{}, middleware.After) +} + +func addOpLogoutValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpLogout{}, middleware.After) +} + +func validateOpGetRoleCredentialsInput(v *GetRoleCredentialsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetRoleCredentialsInput"} + if v.RoleName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RoleName")) + } + if v.AccountId == nil { + invalidParams.Add(smithy.NewErrParamRequired("AccountId")) + } + if v.AccessToken == nil { + invalidParams.Add(smithy.NewErrParamRequired("AccessToken")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListAccountRolesInput(v *ListAccountRolesInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListAccountRolesInput"} + if v.AccessToken == nil { + invalidParams.Add(smithy.NewErrParamRequired("AccessToken")) + } + if v.AccountId == nil { + invalidParams.Add(smithy.NewErrParamRequired("AccountId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListAccountsInput(v *ListAccountsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListAccountsInput"} + if v.AccessToken == nil { + invalidParams.Add(smithy.NewErrParamRequired("AccessToken")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpLogoutInput(v *LogoutInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "LogoutInput"} + if v.AccessToken == nil { + invalidParams.Add(smithy.NewErrParamRequired("AccessToken")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md new file mode 100644 index 00000000000..b47827d7243 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md @@ -0,0 +1,317 @@ +# v1.19.1 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.19.0 (2023-11-01) + +* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.3 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.2 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.1 (2023-09-22) + +* No change notes available for this release. + +# v1.17.0 (2023-09-20) + +* **Feature**: Update FIPS endpoints in aws-us-gov. + +# v1.16.0 (2023-09-18) + +* **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* **Feature**: Adds several endpoint ruleset changes across all models: smaller rulesets, removed non-unique regional endpoints, fixes FIPS and DualStack endpoints, and make region not required in SDK::Endpoint. Additional breakfix to cognito-sync field. + +# v1.15.6 (2023-09-05) + +* No change notes available for this release. + +# v1.15.5 (2023-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.4 (2023-08-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.3 (2023-08-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.2 (2023-08-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.1 (2023-08-01) + +* No change notes available for this release. + +# v1.15.0 (2023-07-31) + +* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.14 (2023-07-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.13 (2023-07-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.12 (2023-06-15) + +* No change notes available for this release. + +# v1.14.11 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.10 (2023-05-04) + +* No change notes available for this release. + +# v1.14.9 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.8 (2023-04-10) + +* No change notes available for this release. + +# v1.14.7 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.6 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.5 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.4 (2023-02-22) + +* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes. + +# v1.14.3 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.2 (2023-02-15) + +* **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. +* **Bug Fix**: Correct error type parsing for restJson services. + +# v1.14.1 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.0 (2023-01-05) + +* **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). + +# v1.13.11 (2022-12-19) + +* No change notes available for this release. + +# v1.13.10 (2022-12-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.9 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.8 (2022-10-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.7 (2022-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.6 (2022-09-30) + +* **Documentation**: Documentation updates for the IAM Identity Center OIDC CLI Reference. + +# v1.13.5 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.4 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.3 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.2 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.1 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.0 (2022-08-25) + +* **Feature**: Updated required request parameters on IAM Identity Center's OIDC CreateToken action. + +# v1.12.14 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.13 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.12 (2022-08-08) + +* **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On) +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.11 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.10 (2022-07-11) + +* No change notes available for this release. + +# v1.12.9 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.8 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.7 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.6 (2022-05-27) + +* No change notes available for this release. + +# v1.12.5 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.4 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.0 (2022-02-24) + +* **Feature**: API client updated +* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.0 (2022-01-07) + +* **Feature**: API client updated +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.2 (2021-12-02) + +* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514)) +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.0 (2021-11-06) + +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.0 (2021-10-21) + +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.0 (2021-10-11) + +* **Feature**: API client updated +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.0 (2021-09-17) + +* **Feature**: Updated API client and endpoints to latest revision. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2021-08-27) + +* **Feature**: Updated API model to latest revision. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.3 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.2 (2021-08-04) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.1 (2021-07-15) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2021-06-25) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.1 (2021-05-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/LICENSE.txt new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go new file mode 100644 index 00000000000..6a56093d8ac --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go @@ -0,0 +1,526 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ssooidc + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/defaults" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + smithy "github.com/aws/smithy-go" + smithydocument "github.com/aws/smithy-go/document" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net" + "net/http" + "time" +) + +const ServiceID = "SSO OIDC" +const ServiceAPIVersion = "2019-06-10" + +// Client provides the API client to make operations call for AWS SSO OIDC. +type Client struct { + options Options +} + +// New returns an initialized Client based on the functional options. Provide +// additional functional options to further configure the behavior of the client, +// such as changing the client's endpoint or adding custom middleware behavior. +func New(options Options, optFns ...func(*Options)) *Client { + options = options.Copy() + + resolveDefaultLogger(&options) + + setResolvedDefaultsMode(&options) + + resolveRetryer(&options) + + resolveHTTPClient(&options) + + resolveHTTPSignerV4(&options) + + for _, fn := range optFns { + fn(&options) + } + + client := &Client{ + options: options, + } + + return client +} + +type Options struct { + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // The optional application specific identifier appended to the User-Agent header. + AppID string + + // This endpoint will be given as input to an EndpointResolverV2. It is used for + // providing a custom base endpoint that is subject to modifications by the + // processing EndpointResolverV2. + BaseEndpoint *string + + // Configures the events that will be sent to the configured logger. + ClientLogMode aws.ClientLogMode + + // The credentials object to use when signing requests. + Credentials aws.CredentialsProvider + + // The configuration DefaultsMode that the SDK should use when constructing the + // clients initial default settings. + DefaultsMode aws.DefaultsMode + + // The endpoint options to be used when attempting to resolve an endpoint. + EndpointOptions EndpointResolverOptions + + // The service endpoint resolver. + // + // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a + // value for this field will likely prevent you from using any endpoint-related + // service features released after the introduction of EndpointResolverV2 and + // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom + // endpoint, set the client option BaseEndpoint instead. + EndpointResolver EndpointResolver + + // Resolves the endpoint used for a particular service. This should be used over + // the deprecated EndpointResolver + EndpointResolverV2 EndpointResolverV2 + + // Signature Version 4 (SigV4) Signer + HTTPSignerV4 HTTPSignerV4 + + // The logger writer interface to write logging messages to. + Logger logging.Logger + + // The region to send requests to. (Required) + Region string + + // RetryMaxAttempts specifies the maximum number attempts an API client will call + // an operation that fails with a retryable error. A value of 0 is ignored, and + // will not be used to configure the API client created default retryer, or modify + // per operation call's retry max attempts. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. If specified in an operation call's functional + // options with a value that is different than the constructed client's Options, + // the Client's Retryer will be wrapped to use the operation's specific + // RetryMaxAttempts value. + RetryMaxAttempts int + + // RetryMode specifies the retry mode the API client will be created with, if + // Retryer option is not also specified. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. Currently does not support per operation call + // overrides, may in the future. + RetryMode aws.RetryMode + + // Retryer guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. The kind of + // default retry created by the API client can be changed with the RetryMode + // option. + Retryer aws.Retryer + + // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set + // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You + // should not populate this structure programmatically, or rely on the values here + // within your applications. + RuntimeEnvironment aws.RuntimeEnvironment + + // The initial DefaultsMode used when the client options were constructed. If the + // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved + // value was at that point in time. Currently does not support per operation call + // overrides, may in the future. + resolvedDefaultsMode aws.DefaultsMode + + // The HTTP client to invoke API calls with. Defaults to client's default HTTP + // implementation if nil. + HTTPClient HTTPClient +} + +// WithAPIOptions returns a functional option for setting the Client's APIOptions +// option. +func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { + return func(o *Options) { + o.APIOptions = append(o.APIOptions, optFns...) + } +} + +// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for +// this field will likely prevent you from using any endpoint-related service +// features released after the introduction of EndpointResolverV2 and BaseEndpoint. +// To migrate an EndpointResolver implementation that uses a custom endpoint, set +// the client option BaseEndpoint instead. +func WithEndpointResolver(v EndpointResolver) func(*Options) { + return func(o *Options) { + o.EndpointResolver = v + } +} + +// WithEndpointResolverV2 returns a functional option for setting the Client's +// EndpointResolverV2 option. +func WithEndpointResolverV2(v EndpointResolverV2) func(*Options) { + return func(o *Options) { + o.EndpointResolverV2 = v + } +} + +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// Copy creates a clone where the APIOptions list is deep copied. +func (o Options) Copy() Options { + to := o + to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) + copy(to.APIOptions, o.APIOptions) + + return to +} +func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) { + ctx = middleware.ClearStackValues(ctx) + stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) + options := c.options.Copy() + resolveEndpointResolverV2(&options) + + for _, fn := range optFns { + fn(&options) + } + + finalizeRetryMaxAttemptOptions(&options, *c) + + finalizeClientEndpointResolverOptions(&options) + + for _, fn := range stackFns { + if err := fn(stack, options); err != nil { + return nil, metadata, err + } + } + + for _, fn := range options.APIOptions { + if err := fn(stack); err != nil { + return nil, metadata, err + } + } + + handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) + result, metadata, err = handler.Handle(ctx, params) + if err != nil { + err = &smithy.OperationError{ + ServiceID: ServiceID, + OperationName: opID, + Err: err, + } + } + return result, metadata, err +} + +type noSmithyDocumentSerde = smithydocument.NoSerde + +type legacyEndpointContextSetter struct { + LegacyResolver EndpointResolver +} + +func (*legacyEndpointContextSetter) ID() string { + return "legacyEndpointContextSetter" +} + +func (m *legacyEndpointContextSetter) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + if m.LegacyResolver != nil { + ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, true) + } + + return next.HandleInitialize(ctx, in) + +} +func addlegacyEndpointContextSetter(stack *middleware.Stack, o Options) error { + return stack.Initialize.Add(&legacyEndpointContextSetter{ + LegacyResolver: o.EndpointResolver, + }, middleware.Before) +} + +func resolveDefaultLogger(o *Options) { + if o.Logger != nil { + return + } + o.Logger = logging.Nop{} +} + +func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { + return middleware.AddSetLoggerMiddleware(stack, o.Logger) +} + +func setResolvedDefaultsMode(o *Options) { + if len(o.resolvedDefaultsMode) > 0 { + return + } + + var mode aws.DefaultsMode + mode.SetFromString(string(o.DefaultsMode)) + + if mode == aws.DefaultsModeAuto { + mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment) + } + + o.resolvedDefaultsMode = mode +} + +// NewFromConfig returns a new client from the provided config. +func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { + opts := Options{ + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + AppID: cfg.AppID, + } + resolveAWSRetryerProvider(cfg, &opts) + resolveAWSRetryMaxAttempts(cfg, &opts) + resolveAWSRetryMode(cfg, &opts) + resolveAWSEndpointResolver(cfg, &opts) + resolveUseDualStackEndpoint(cfg, &opts) + resolveUseFIPSEndpoint(cfg, &opts) + resolveBaseEndpoint(cfg, &opts) + return New(opts, optFns...) +} + +func resolveHTTPClient(o *Options) { + var buildable *awshttp.BuildableClient + + if o.HTTPClient != nil { + var ok bool + buildable, ok = o.HTTPClient.(*awshttp.BuildableClient) + if !ok { + return + } + } else { + buildable = awshttp.NewBuildableClient() + } + + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) { + if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok { + dialer.Timeout = dialerTimeout + } + }) + + buildable = buildable.WithTransportOptions(func(transport *http.Transport) { + if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok { + transport.TLSHandshakeTimeout = tlsHandshakeTimeout + } + }) + } + + o.HTTPClient = buildable +} + +func resolveRetryer(o *Options) { + if o.Retryer != nil { + return + } + + if len(o.RetryMode) == 0 { + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + o.RetryMode = modeConfig.RetryMode + } + } + if len(o.RetryMode) == 0 { + o.RetryMode = aws.RetryModeStandard + } + + var standardOptions []func(*retry.StandardOptions) + if v := o.RetryMaxAttempts; v != 0 { + standardOptions = append(standardOptions, func(so *retry.StandardOptions) { + so.MaxAttempts = v + }) + } + + switch o.RetryMode { + case aws.RetryModeAdaptive: + var adaptiveOptions []func(*retry.AdaptiveModeOptions) + if len(standardOptions) != 0 { + adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) { + ao.StandardOptions = append(ao.StandardOptions, standardOptions...) + }) + } + o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...) + + default: + o.Retryer = retry.NewStandard(standardOptions...) + } +} + +func resolveAWSRetryerProvider(cfg aws.Config, o *Options) { + if cfg.Retryer == nil { + return + } + o.Retryer = cfg.Retryer() +} + +func resolveAWSRetryMode(cfg aws.Config, o *Options) { + if len(cfg.RetryMode) == 0 { + return + } + o.RetryMode = cfg.RetryMode +} +func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) { + if cfg.RetryMaxAttempts == 0 { + return + } + o.RetryMaxAttempts = cfg.RetryMaxAttempts +} + +func finalizeRetryMaxAttemptOptions(o *Options, client Client) { + if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts { + return + } + + o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) +} + +func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { + if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil { + return + } + o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions) +} + +func addClientUserAgent(stack *middleware.Stack, options Options) error { + if err := awsmiddleware.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "ssooidc", goModuleVersion)(stack); err != nil { + return err + } + + if len(options.AppID) > 0 { + return awsmiddleware.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID)(stack) + } + + return nil +} + +func addHTTPSignerV4Middleware(stack *middleware.Stack, o Options) error { + mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{ + CredentialsProvider: o.Credentials, + Signer: o.HTTPSignerV4, + LogSigning: o.ClientLogMode.IsSigning(), + }) + return stack.Finalize.Add(mw, middleware.After) +} + +type HTTPSignerV4 interface { + SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error +} + +func resolveHTTPSignerV4(o *Options) { + if o.HTTPSignerV4 != nil { + return + } + o.HTTPSignerV4 = newDefaultV4Signer(*o) +} + +func newDefaultV4Signer(o Options) *v4.Signer { + return v4.NewSigner(func(so *v4.SignerOptions) { + so.Logger = o.Logger + so.LogSigning = o.ClientLogMode.IsSigning() + }) +} + +func addRetryMiddlewares(stack *middleware.Stack, o Options) error { + mo := retry.AddRetryMiddlewaresOptions{ + Retryer: o.Retryer, + LogRetryAttempts: o.ClientLogMode.IsRetries(), + } + return retry.AddRetryMiddlewares(stack, mo) +} + +// resolves dual-stack endpoint configuration +func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseDualStackEndpoint = value + } + return nil +} + +// resolves FIPS endpoint configuration +func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseFIPSEndpoint = value + } + return nil +} + +func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error { + return awsmiddleware.AddRequestIDRetrieverMiddleware(stack) +} + +func addResponseErrorMiddleware(stack *middleware.Stack) error { + return awshttp.AddResponseErrorMiddleware(stack) +} + +func addRequestResponseLogging(stack *middleware.Stack, o Options) error { + return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ + LogRequest: o.ClientLogMode.IsRequest(), + LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(), + LogResponse: o.ClientLogMode.IsResponse(), + LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(), + }, middleware.After) +} + +type endpointDisableHTTPSMiddleware struct { + EndpointDisableHTTPS bool +} + +func (*endpointDisableHTTPSMiddleware) ID() string { + return "endpointDisableHTTPSMiddleware" +} + +func (m *endpointDisableHTTPSMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.EndpointDisableHTTPS && !smithyhttp.GetHostnameImmutable(ctx) { + req.URL.Scheme = "http" + } + + return next.HandleSerialize(ctx, in) + +} +func addendpointDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { + return stack.Serialize.Insert(&endpointDisableHTTPSMiddleware{ + EndpointDisableHTTPS: o.EndpointOptions.DisableHTTPS, + }, "OperationSerializer", middleware.Before) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go new file mode 100644 index 00000000000..43df6256cf0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go @@ -0,0 +1,316 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ssooidc + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates and returns an access token for the authorized client. The access token +// issued will be used to fetch short-term credentials for the assigned roles in +// the AWS account. +func (c *Client) CreateToken(ctx context.Context, params *CreateTokenInput, optFns ...func(*Options)) (*CreateTokenOutput, error) { + if params == nil { + params = &CreateTokenInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateToken", params, optFns, c.addOperationCreateTokenMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateTokenOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateTokenInput struct { + + // The unique identifier string for each client. This value should come from the + // persisted result of the RegisterClient API. + // + // This member is required. + ClientId *string + + // A secret string generated for the client. This value should come from the + // persisted result of the RegisterClient API. + // + // This member is required. + ClientSecret *string + + // Supports grant types for the authorization code, refresh token, and device code + // request. For device code requests, specify the following value: + // urn:ietf:params:oauth:grant-type:device_code For information about how to + // obtain the device code, see the StartDeviceAuthorization topic. + // + // This member is required. + GrantType *string + + // The authorization code received from the authorization service. This parameter + // is required to perform an authorization grant request to get access to a token. + Code *string + + // Used only when calling this API for the device code grant type. This short-term + // code is used to identify this authentication attempt. This should come from an + // in-memory reference to the result of the StartDeviceAuthorization API. + DeviceCode *string + + // The location of the application that will receive the authorization code. Users + // authorize the service to send the request to this location. + RedirectUri *string + + // Currently, refreshToken is not yet implemented and is not supported. For more + // information about the features and limitations of the current IAM Identity + // Center OIDC implementation, see Considerations for Using this Guide in the IAM + // Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html) + // . The token used to obtain an access token in the event that the access token is + // invalid or expired. + RefreshToken *string + + // The list of scopes that is defined by the client. Upon authorization, this list + // is used to restrict permissions when granting an access token. + Scope []string + + noSmithyDocumentSerde +} + +type CreateTokenOutput struct { + + // An opaque token to access IAM Identity Center resources assigned to a user. + AccessToken *string + + // Indicates the time in seconds when an access token will expire. + ExpiresIn int32 + + // Currently, idToken is not yet implemented and is not supported. For more + // information about the features and limitations of the current IAM Identity + // Center OIDC implementation, see Considerations for Using this Guide in the IAM + // Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html) + // . The identifier of the user that associated with the access token, if present. + IdToken *string + + // Currently, refreshToken is not yet implemented and is not supported. For more + // information about the features and limitations of the current IAM Identity + // Center OIDC implementation, see Considerations for Using this Guide in the IAM + // Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html) + // . A token that, if present, can be used to refresh a previously issued access + // token that might have expired. + RefreshToken *string + + // Used to notify the client that the returned token is an access token. The + // supported type is BearerToken . + TokenType *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateTokenMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpCreateToken{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpCreateToken{}, middleware.After) + if err != nil { + return err + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addCreateTokenResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addOpCreateTokenValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateToken(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateToken(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "CreateToken", + } +} + +type opCreateTokenResolveEndpointMiddleware struct { + EndpointResolver EndpointResolverV2 + BuiltInResolver builtInParameterResolver +} + +func (*opCreateTokenResolveEndpointMiddleware) ID() string { + return "ResolveEndpointV2" +} + +func (m *opCreateTokenResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.EndpointResolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + params := EndpointParameters{} + + m.BuiltInResolver.ResolveBuiltIns(¶ms) + + var resolvedEndpoint smithyendpoints.Endpoint + resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL = &resolvedEndpoint.URI + + for k := range resolvedEndpoint.Headers { + req.Header.Set( + k, + resolvedEndpoint.Headers.Get(k), + ) + } + + authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) + if err != nil { + var nfe *internalauth.NoAuthenticationSchemesFoundError + if errors.As(err, &nfe) { + // if no auth scheme is found, default to sigv4 + signingName := "awsssooidc" + signingRegion := m.BuiltInResolver.(*builtInResolver).Region + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + + } + var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError + if errors.As(err, &ue) { + return out, metadata, fmt.Errorf( + "This operation requests signer version(s) %v but the client only supports %v", + ue.UnsupportedSchemes, + internalauth.SupportedSchemes, + ) + } + } + + for _, authScheme := range authSchemes { + switch authScheme.(type) { + case *internalauth.AuthenticationSchemeV4: + v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) + var signingName, signingRegion string + if v4Scheme.SigningName == nil { + signingName = "awsssooidc" + } else { + signingName = *v4Scheme.SigningName + } + if v4Scheme.SigningRegion == nil { + signingRegion = m.BuiltInResolver.(*builtInResolver).Region + } else { + signingRegion = *v4Scheme.SigningRegion + } + if v4Scheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + break + case *internalauth.AuthenticationSchemeV4A: + v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) + if v4aScheme.SigningName == nil { + v4aScheme.SigningName = aws.String("awsssooidc") + } + if v4aScheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) + ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) + break + case *internalauth.AuthenticationSchemeNone: + break + } + } + + return next.HandleSerialize(ctx, in) +} + +func addCreateTokenResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { + return stack.Serialize.Insert(&opCreateTokenResolveEndpointMiddleware{ + EndpointResolver: options.EndpointResolverV2, + BuiltInResolver: &builtInResolver{ + Region: options.Region, + UseDualStack: options.EndpointOptions.UseDualStackEndpoint, + UseFIPS: options.EndpointOptions.UseFIPSEndpoint, + Endpoint: options.BaseEndpoint, + }, + }, "ResolveEndpoint", middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go new file mode 100644 index 00000000000..b88ebb7067f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go @@ -0,0 +1,281 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ssooidc + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Registers a client with IAM Identity Center. This allows clients to initiate +// device authorization. The output should be persisted for reuse through many +// authentication requests. +func (c *Client) RegisterClient(ctx context.Context, params *RegisterClientInput, optFns ...func(*Options)) (*RegisterClientOutput, error) { + if params == nil { + params = &RegisterClientInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "RegisterClient", params, optFns, c.addOperationRegisterClientMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*RegisterClientOutput) + out.ResultMetadata = metadata + return out, nil +} + +type RegisterClientInput struct { + + // The friendly name of the client. + // + // This member is required. + ClientName *string + + // The type of client. The service supports only public as a client type. Anything + // other than public will be rejected by the service. + // + // This member is required. + ClientType *string + + // The list of scopes that are defined by the client. Upon authorization, this + // list is used to restrict permissions when granting an access token. + Scopes []string + + noSmithyDocumentSerde +} + +type RegisterClientOutput struct { + + // The endpoint where the client can request authorization. + AuthorizationEndpoint *string + + // The unique identifier string for each client. This client uses this identifier + // to get authenticated by the service in subsequent calls. + ClientId *string + + // Indicates the time at which the clientId and clientSecret were issued. + ClientIdIssuedAt int64 + + // A secret string generated for the client. The client will use this string to + // get authenticated by the service in subsequent calls. + ClientSecret *string + + // Indicates the time at which the clientId and clientSecret will become invalid. + ClientSecretExpiresAt int64 + + // The endpoint where the client can get an access token. + TokenEndpoint *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationRegisterClientMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpRegisterClient{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpRegisterClient{}, middleware.After) + if err != nil { + return err + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addRegisterClientResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addOpRegisterClientValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRegisterClient(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opRegisterClient(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "RegisterClient", + } +} + +type opRegisterClientResolveEndpointMiddleware struct { + EndpointResolver EndpointResolverV2 + BuiltInResolver builtInParameterResolver +} + +func (*opRegisterClientResolveEndpointMiddleware) ID() string { + return "ResolveEndpointV2" +} + +func (m *opRegisterClientResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.EndpointResolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + params := EndpointParameters{} + + m.BuiltInResolver.ResolveBuiltIns(¶ms) + + var resolvedEndpoint smithyendpoints.Endpoint + resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL = &resolvedEndpoint.URI + + for k := range resolvedEndpoint.Headers { + req.Header.Set( + k, + resolvedEndpoint.Headers.Get(k), + ) + } + + authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) + if err != nil { + var nfe *internalauth.NoAuthenticationSchemesFoundError + if errors.As(err, &nfe) { + // if no auth scheme is found, default to sigv4 + signingName := "awsssooidc" + signingRegion := m.BuiltInResolver.(*builtInResolver).Region + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + + } + var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError + if errors.As(err, &ue) { + return out, metadata, fmt.Errorf( + "This operation requests signer version(s) %v but the client only supports %v", + ue.UnsupportedSchemes, + internalauth.SupportedSchemes, + ) + } + } + + for _, authScheme := range authSchemes { + switch authScheme.(type) { + case *internalauth.AuthenticationSchemeV4: + v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) + var signingName, signingRegion string + if v4Scheme.SigningName == nil { + signingName = "awsssooidc" + } else { + signingName = *v4Scheme.SigningName + } + if v4Scheme.SigningRegion == nil { + signingRegion = m.BuiltInResolver.(*builtInResolver).Region + } else { + signingRegion = *v4Scheme.SigningRegion + } + if v4Scheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + break + case *internalauth.AuthenticationSchemeV4A: + v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) + if v4aScheme.SigningName == nil { + v4aScheme.SigningName = aws.String("awsssooidc") + } + if v4aScheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) + ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) + break + case *internalauth.AuthenticationSchemeNone: + break + } + } + + return next.HandleSerialize(ctx, in) +} + +func addRegisterClientResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { + return stack.Serialize.Insert(&opRegisterClientResolveEndpointMiddleware{ + EndpointResolver: options.EndpointResolverV2, + BuiltInResolver: &builtInResolver{ + Region: options.Region, + UseDualStack: options.EndpointOptions.UseDualStackEndpoint, + UseFIPS: options.EndpointOptions.UseFIPSEndpoint, + Endpoint: options.BaseEndpoint, + }, + }, "ResolveEndpoint", middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go new file mode 100644 index 00000000000..327da5f7373 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go @@ -0,0 +1,289 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ssooidc + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Initiates device authorization by requesting a pair of verification codes from +// the authorization service. +func (c *Client) StartDeviceAuthorization(ctx context.Context, params *StartDeviceAuthorizationInput, optFns ...func(*Options)) (*StartDeviceAuthorizationOutput, error) { + if params == nil { + params = &StartDeviceAuthorizationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "StartDeviceAuthorization", params, optFns, c.addOperationStartDeviceAuthorizationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*StartDeviceAuthorizationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type StartDeviceAuthorizationInput struct { + + // The unique identifier string for the client that is registered with IAM + // Identity Center. This value should come from the persisted result of the + // RegisterClient API operation. + // + // This member is required. + ClientId *string + + // A secret string that is generated for the client. This value should come from + // the persisted result of the RegisterClient API operation. + // + // This member is required. + ClientSecret *string + + // The URL for the AWS access portal. For more information, see Using the AWS + // access portal (https://docs.aws.amazon.com/singlesignon/latest/userguide/using-the-portal.html) + // in the IAM Identity Center User Guide. + // + // This member is required. + StartUrl *string + + noSmithyDocumentSerde +} + +type StartDeviceAuthorizationOutput struct { + + // The short-lived code that is used by the device when polling for a session + // token. + DeviceCode *string + + // Indicates the number of seconds in which the verification code will become + // invalid. + ExpiresIn int32 + + // Indicates the number of seconds the client must wait between attempts when + // polling for a session. + Interval int32 + + // A one-time user verification code. This is needed to authorize an in-use device. + UserCode *string + + // The URI of the verification page that takes the userCode to authorize the + // device. + VerificationUri *string + + // An alternate URL that the client can use to automatically launch a browser. + // This process skips the manual step in which the user visits the verification + // page and enters their code. + VerificationUriComplete *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationStartDeviceAuthorizationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpStartDeviceAuthorization{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpStartDeviceAuthorization{}, middleware.After) + if err != nil { + return err + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addStartDeviceAuthorizationResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addOpStartDeviceAuthorizationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opStartDeviceAuthorization(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opStartDeviceAuthorization(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "StartDeviceAuthorization", + } +} + +type opStartDeviceAuthorizationResolveEndpointMiddleware struct { + EndpointResolver EndpointResolverV2 + BuiltInResolver builtInParameterResolver +} + +func (*opStartDeviceAuthorizationResolveEndpointMiddleware) ID() string { + return "ResolveEndpointV2" +} + +func (m *opStartDeviceAuthorizationResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.EndpointResolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + params := EndpointParameters{} + + m.BuiltInResolver.ResolveBuiltIns(¶ms) + + var resolvedEndpoint smithyendpoints.Endpoint + resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL = &resolvedEndpoint.URI + + for k := range resolvedEndpoint.Headers { + req.Header.Set( + k, + resolvedEndpoint.Headers.Get(k), + ) + } + + authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) + if err != nil { + var nfe *internalauth.NoAuthenticationSchemesFoundError + if errors.As(err, &nfe) { + // if no auth scheme is found, default to sigv4 + signingName := "awsssooidc" + signingRegion := m.BuiltInResolver.(*builtInResolver).Region + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + + } + var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError + if errors.As(err, &ue) { + return out, metadata, fmt.Errorf( + "This operation requests signer version(s) %v but the client only supports %v", + ue.UnsupportedSchemes, + internalauth.SupportedSchemes, + ) + } + } + + for _, authScheme := range authSchemes { + switch authScheme.(type) { + case *internalauth.AuthenticationSchemeV4: + v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) + var signingName, signingRegion string + if v4Scheme.SigningName == nil { + signingName = "awsssooidc" + } else { + signingName = *v4Scheme.SigningName + } + if v4Scheme.SigningRegion == nil { + signingRegion = m.BuiltInResolver.(*builtInResolver).Region + } else { + signingRegion = *v4Scheme.SigningRegion + } + if v4Scheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + break + case *internalauth.AuthenticationSchemeV4A: + v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) + if v4aScheme.SigningName == nil { + v4aScheme.SigningName = aws.String("awsssooidc") + } + if v4aScheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) + ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) + break + case *internalauth.AuthenticationSchemeNone: + break + } + } + + return next.HandleSerialize(ctx, in) +} + +func addStartDeviceAuthorizationResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { + return stack.Serialize.Insert(&opStartDeviceAuthorizationResolveEndpointMiddleware{ + EndpointResolver: options.EndpointResolverV2, + BuiltInResolver: &builtInResolver{ + Region: options.Region, + UseDualStack: options.EndpointOptions.UseDualStackEndpoint, + UseFIPS: options.EndpointOptions.UseFIPSEndpoint, + Endpoint: options.BaseEndpoint, + }, + }, "ResolveEndpoint", middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go new file mode 100644 index 00000000000..ca30d22f97b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go @@ -0,0 +1,1689 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ssooidc + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws/protocol/restjson" + "github.com/aws/aws-sdk-go-v2/service/ssooidc/types" + smithy "github.com/aws/smithy-go" + smithyio "github.com/aws/smithy-go/io" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" + "strings" +) + +type awsRestjson1_deserializeOpCreateToken struct { +} + +func (*awsRestjson1_deserializeOpCreateToken) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpCreateToken) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorCreateToken(response, &metadata) + } + output := &CreateTokenOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentCreateTokenOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorCreateToken(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("AuthorizationPendingException", errorCode): + return awsRestjson1_deserializeErrorAuthorizationPendingException(response, errorBody) + + case strings.EqualFold("ExpiredTokenException", errorCode): + return awsRestjson1_deserializeErrorExpiredTokenException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidClientException", errorCode): + return awsRestjson1_deserializeErrorInvalidClientException(response, errorBody) + + case strings.EqualFold("InvalidGrantException", errorCode): + return awsRestjson1_deserializeErrorInvalidGrantException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("InvalidScopeException", errorCode): + return awsRestjson1_deserializeErrorInvalidScopeException(response, errorBody) + + case strings.EqualFold("SlowDownException", errorCode): + return awsRestjson1_deserializeErrorSlowDownException(response, errorBody) + + case strings.EqualFold("UnauthorizedClientException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedClientException(response, errorBody) + + case strings.EqualFold("UnsupportedGrantTypeException", errorCode): + return awsRestjson1_deserializeErrorUnsupportedGrantTypeException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentCreateTokenOutput(v **CreateTokenOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreateTokenOutput + if *v == nil { + sv = &CreateTokenOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "accessToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AccessToken to be of type string, got %T instead", value) + } + sv.AccessToken = ptr.String(jtv) + } + + case "expiresIn": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected ExpirationInSeconds to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ExpiresIn = int32(i64) + } + + case "idToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected IdToken to be of type string, got %T instead", value) + } + sv.IdToken = ptr.String(jtv) + } + + case "refreshToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RefreshToken to be of type string, got %T instead", value) + } + sv.RefreshToken = ptr.String(jtv) + } + + case "tokenType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TokenType to be of type string, got %T instead", value) + } + sv.TokenType = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpRegisterClient struct { +} + +func (*awsRestjson1_deserializeOpRegisterClient) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpRegisterClient) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorRegisterClient(response, &metadata) + } + output := &RegisterClientOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentRegisterClientOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorRegisterClient(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidClientMetadataException", errorCode): + return awsRestjson1_deserializeErrorInvalidClientMetadataException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("InvalidScopeException", errorCode): + return awsRestjson1_deserializeErrorInvalidScopeException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentRegisterClientOutput(v **RegisterClientOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *RegisterClientOutput + if *v == nil { + sv = &RegisterClientOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "authorizationEndpoint": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected URI to be of type string, got %T instead", value) + } + sv.AuthorizationEndpoint = ptr.String(jtv) + } + + case "clientId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ClientId to be of type string, got %T instead", value) + } + sv.ClientId = ptr.String(jtv) + } + + case "clientIdIssuedAt": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected LongTimeStampType to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ClientIdIssuedAt = i64 + } + + case "clientSecret": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ClientSecret to be of type string, got %T instead", value) + } + sv.ClientSecret = ptr.String(jtv) + } + + case "clientSecretExpiresAt": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected LongTimeStampType to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ClientSecretExpiresAt = i64 + } + + case "tokenEndpoint": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected URI to be of type string, got %T instead", value) + } + sv.TokenEndpoint = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpStartDeviceAuthorization struct { +} + +func (*awsRestjson1_deserializeOpStartDeviceAuthorization) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpStartDeviceAuthorization) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorStartDeviceAuthorization(response, &metadata) + } + output := &StartDeviceAuthorizationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentStartDeviceAuthorizationOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorStartDeviceAuthorization(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidClientException", errorCode): + return awsRestjson1_deserializeErrorInvalidClientException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("SlowDownException", errorCode): + return awsRestjson1_deserializeErrorSlowDownException(response, errorBody) + + case strings.EqualFold("UnauthorizedClientException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedClientException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentStartDeviceAuthorizationOutput(v **StartDeviceAuthorizationOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *StartDeviceAuthorizationOutput + if *v == nil { + sv = &StartDeviceAuthorizationOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "deviceCode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DeviceCode to be of type string, got %T instead", value) + } + sv.DeviceCode = ptr.String(jtv) + } + + case "expiresIn": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected ExpirationInSeconds to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ExpiresIn = int32(i64) + } + + case "interval": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected IntervalInSeconds to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.Interval = int32(i64) + } + + case "userCode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected UserCode to be of type string, got %T instead", value) + } + sv.UserCode = ptr.String(jtv) + } + + case "verificationUri": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected URI to be of type string, got %T instead", value) + } + sv.VerificationUri = ptr.String(jtv) + } + + case "verificationUriComplete": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected URI to be of type string, got %T instead", value) + } + sv.VerificationUriComplete = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeErrorAccessDeniedException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.AccessDeniedException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentAccessDeniedException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorAuthorizationPendingException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.AuthorizationPendingException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentAuthorizationPendingException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorExpiredTokenException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ExpiredTokenException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentExpiredTokenException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorInternalServerException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InternalServerException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentInternalServerException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorInvalidClientException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidClientException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentInvalidClientException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorInvalidClientMetadataException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidClientMetadataException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentInvalidClientMetadataException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorInvalidGrantException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidGrantException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentInvalidGrantException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorInvalidRequestException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidRequestException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentInvalidRequestException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorInvalidScopeException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidScopeException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentInvalidScopeException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorSlowDownException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.SlowDownException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentSlowDownException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorUnauthorizedClientException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.UnauthorizedClientException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentUnauthorizedClientException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorUnsupportedGrantTypeException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.UnsupportedGrantTypeException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentUnsupportedGrantTypeException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeDocumentAccessDeniedException(v **types.AccessDeniedException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AccessDeniedException + if *v == nil { + sv = &types.AccessDeniedException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentAuthorizationPendingException(v **types.AuthorizationPendingException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AuthorizationPendingException + if *v == nil { + sv = &types.AuthorizationPendingException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentExpiredTokenException(v **types.ExpiredTokenException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ExpiredTokenException + if *v == nil { + sv = &types.ExpiredTokenException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentInternalServerException(v **types.InternalServerException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InternalServerException + if *v == nil { + sv = &types.InternalServerException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentInvalidClientException(v **types.InvalidClientException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidClientException + if *v == nil { + sv = &types.InvalidClientException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentInvalidClientMetadataException(v **types.InvalidClientMetadataException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidClientMetadataException + if *v == nil { + sv = &types.InvalidClientMetadataException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentInvalidGrantException(v **types.InvalidGrantException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidGrantException + if *v == nil { + sv = &types.InvalidGrantException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentInvalidRequestException(v **types.InvalidRequestException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidRequestException + if *v == nil { + sv = &types.InvalidRequestException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentInvalidScopeException(v **types.InvalidScopeException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidScopeException + if *v == nil { + sv = &types.InvalidScopeException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentSlowDownException(v **types.SlowDownException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.SlowDownException + if *v == nil { + sv = &types.SlowDownException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentUnauthorizedClientException(v **types.UnauthorizedClientException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.UnauthorizedClientException + if *v == nil { + sv = &types.UnauthorizedClientException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentUnsupportedGrantTypeException(v **types.UnsupportedGrantTypeException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.UnsupportedGrantTypeException + if *v == nil { + sv = &types.UnsupportedGrantTypeException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go new file mode 100644 index 00000000000..2239427d889 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go @@ -0,0 +1,36 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +// Package ssooidc provides the API client, operations, and parameter types for +// AWS SSO OIDC. +// +// AWS IAM Identity Center (successor to AWS Single Sign-On) OpenID Connect (OIDC) +// is a web service that enables a client (such as AWS CLI or a native application) +// to register with IAM Identity Center. The service also enables the client to +// fetch the user’s access token upon successful authentication and authorization +// with IAM Identity Center. Although AWS Single Sign-On was renamed, the sso and +// identitystore API namespaces will continue to retain their original name for +// backward compatibility purposes. For more information, see IAM Identity Center +// rename (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed) +// . Considerations for Using This Guide Before you begin using this guide, we +// recommend that you first review the following important information about how +// the IAM Identity Center OIDC service works. +// - The IAM Identity Center OIDC service currently implements only the portions +// of the OAuth 2.0 Device Authorization Grant standard ( +// https://tools.ietf.org/html/rfc8628 (https://tools.ietf.org/html/rfc8628) ) +// that are necessary to enable single sign-on authentication with the AWS CLI. +// Support for other OIDC flows frequently needed for native applications, such as +// Authorization Code Flow (+ PKCE), will be addressed in future releases. +// - The service emits only OIDC access tokens, such that obtaining a new token +// (For example, token refresh) requires explicit user re-authentication. +// - The access tokens provided by this service grant access to all AWS account +// entitlements assigned to an IAM Identity Center user, not just a particular +// application. +// - The documentation in this guide does not describe the mechanism to convert +// the access token into AWS Auth (“sigv4”) credentials for use with IAM-protected +// AWS service endpoints. For more information, see GetRoleCredentials (https://docs.aws.amazon.com/singlesignon/latest/PortalAPIReference/API_GetRoleCredentials.html) +// in the IAM Identity Center Portal API Reference Guide. +// +// For general information about IAM Identity Center, see What is IAM Identity +// Center? (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html) +// in the IAM Identity Center User Guide. +package ssooidc diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go new file mode 100644 index 00000000000..50b490cbdb9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go @@ -0,0 +1,519 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ssooidc + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn" + internalendpoints "github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/http" + "net/url" + "os" + "strings" +) + +// EndpointResolverOptions is the service endpoint resolver options +type EndpointResolverOptions = internalendpoints.Options + +// EndpointResolver interface for resolving service endpoints. +type EndpointResolver interface { + ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) +} + +var _ EndpointResolver = &internalendpoints.Resolver{} + +// NewDefaultEndpointResolver constructs a new service endpoint resolver +func NewDefaultEndpointResolver() *internalendpoints.Resolver { + return internalendpoints.New() +} + +// EndpointResolverFunc is a helper utility that wraps a function so it satisfies +// the EndpointResolver interface. This is useful when you want to add additional +// endpoint resolving logic, or stub out specific endpoints with custom values. +type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error) + +func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + return fn(region, options) +} + +// EndpointResolverFromURL returns an EndpointResolver configured using the +// provided endpoint url. By default, the resolved endpoint resolver uses the +// client region as signing region, and the endpoint source is set to +// EndpointSourceCustom.You can provide functional options to configure endpoint +// values for the resolved endpoint. +func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver { + e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom} + for _, fn := range optFns { + fn(&e) + } + + return EndpointResolverFunc( + func(region string, options EndpointResolverOptions) (aws.Endpoint, error) { + if len(e.SigningRegion) == 0 { + e.SigningRegion = region + } + return e, nil + }, + ) +} + +type ResolveEndpoint struct { + Resolver EndpointResolver + Options EndpointResolverOptions +} + +func (*ResolveEndpoint) ID() string { + return "ResolveEndpoint" +} + +func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.Resolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + eo := m.Options + eo.Logger = middleware.GetLogger(ctx) + + var endpoint aws.Endpoint + endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo) + if err != nil { + nf := (&aws.EndpointNotFoundError{}) + if errors.As(err, &nf) { + ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, false) + return next.HandleSerialize(ctx, in) + } + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL, err = url.Parse(endpoint.URL) + if err != nil { + return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + if len(awsmiddleware.GetSigningName(ctx)) == 0 { + signingName := endpoint.SigningName + if len(signingName) == 0 { + signingName = "awsssooidc" + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + } + ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source) + ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable) + ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) + ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID) + return next.HandleSerialize(ctx, in) +} +func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error { + return stack.Serialize.Insert(&ResolveEndpoint{ + Resolver: o.EndpointResolver, + Options: o.EndpointOptions, + }, "OperationSerializer", middleware.Before) +} + +func removeResolveEndpointMiddleware(stack *middleware.Stack) error { + _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID()) + return err +} + +type wrappedEndpointResolver struct { + awsResolver aws.EndpointResolverWithOptions +} + +func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + return w.awsResolver.ResolveEndpoint(ServiceID, region, options) +} + +type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error) + +func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) { + return a(service, region) +} + +var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil) + +// withEndpointResolver returns an aws.EndpointResolverWithOptions that first delegates endpoint resolution to the awsResolver. +// If awsResolver returns aws.EndpointNotFoundError error, the v1 resolver middleware will swallow the error, +// and set an appropriate context flag such that fallback will occur when EndpointResolverV2 is invoked +// via its middleware. +// +// If another error (besides aws.EndpointNotFoundError) is returned, then that error will be propagated. +func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions) EndpointResolver { + var resolver aws.EndpointResolverWithOptions + + if awsResolverWithOptions != nil { + resolver = awsResolverWithOptions + } else if awsResolver != nil { + resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint) + } + + return &wrappedEndpointResolver{ + awsResolver: resolver, + } +} + +func finalizeClientEndpointResolverOptions(options *Options) { + options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage() + + if len(options.EndpointOptions.ResolvedRegion) == 0 { + const fipsInfix = "-fips-" + const fipsPrefix = "fips-" + const fipsSuffix = "-fips" + + if strings.Contains(options.Region, fipsInfix) || + strings.Contains(options.Region, fipsPrefix) || + strings.Contains(options.Region, fipsSuffix) { + options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( + options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") + options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled + } + } + +} + +func resolveEndpointResolverV2(options *Options) { + if options.EndpointResolverV2 == nil { + options.EndpointResolverV2 = NewDefaultEndpointResolverV2() + } +} + +func resolveBaseEndpoint(cfg aws.Config, o *Options) { + if cfg.BaseEndpoint != nil { + o.BaseEndpoint = cfg.BaseEndpoint + } + + _, g := os.LookupEnv("AWS_ENDPOINT_URL") + _, s := os.LookupEnv("AWS_ENDPOINT_URL_SSO_OIDC") + + if g && !s { + return + } + + value, found, err := internalConfig.ResolveServiceBaseEndpoint(context.Background(), "SSO OIDC", cfg.ConfigSources) + if found && err == nil { + o.BaseEndpoint = &value + } +} + +// Utility function to aid with translating pseudo-regions to classical regions +// with the appropriate setting indicated by the pseudo-region +func mapPseudoRegion(pr string) (region string, fips aws.FIPSEndpointState) { + const fipsInfix = "-fips-" + const fipsPrefix = "fips-" + const fipsSuffix = "-fips" + + if strings.Contains(pr, fipsInfix) || + strings.Contains(pr, fipsPrefix) || + strings.Contains(pr, fipsSuffix) { + region = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( + pr, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") + fips = aws.FIPSEndpointStateEnabled + } else { + region = pr + } + + return region, fips +} + +// builtInParameterResolver is the interface responsible for resolving BuiltIn +// values during the sourcing of EndpointParameters +type builtInParameterResolver interface { + ResolveBuiltIns(*EndpointParameters) error +} + +// builtInResolver resolves modeled BuiltIn values using only the members defined +// below. +type builtInResolver struct { + // The AWS region used to dispatch the request. + Region string + + // Sourced BuiltIn value in a historical enabled or disabled state. + UseDualStack aws.DualStackEndpointState + + // Sourced BuiltIn value in a historical enabled or disabled state. + UseFIPS aws.FIPSEndpointState + + // Base endpoint that can potentially be modified during Endpoint resolution. + Endpoint *string +} + +// Invoked at runtime to resolve BuiltIn Values. Only resolution code specific to +// each BuiltIn value is generated. +func (b *builtInResolver) ResolveBuiltIns(params *EndpointParameters) error { + + region, _ := mapPseudoRegion(b.Region) + if len(region) == 0 { + return fmt.Errorf("Could not resolve AWS::Region") + } else { + params.Region = aws.String(region) + } + if b.UseDualStack == aws.DualStackEndpointStateEnabled { + params.UseDualStack = aws.Bool(true) + } else { + params.UseDualStack = aws.Bool(false) + } + if b.UseFIPS == aws.FIPSEndpointStateEnabled { + params.UseFIPS = aws.Bool(true) + } else { + params.UseFIPS = aws.Bool(false) + } + params.Endpoint = b.Endpoint + return nil +} + +// EndpointParameters provides the parameters that influence how endpoints are +// resolved. +type EndpointParameters struct { + // The AWS region used to dispatch the request. + // + // Parameter is + // required. + // + // AWS::Region + Region *string + + // When true, use the dual-stack endpoint. If the configured endpoint does not + // support dual-stack, dispatching the request MAY return an error. + // + // Defaults to + // false if no value is provided. + // + // AWS::UseDualStack + UseDualStack *bool + + // When true, send this request to the FIPS-compliant regional endpoint. If the + // configured endpoint does not have a FIPS compliant endpoint, dispatching the + // request will return an error. + // + // Defaults to false if no value is + // provided. + // + // AWS::UseFIPS + UseFIPS *bool + + // Override the endpoint used to send this request + // + // Parameter is + // required. + // + // SDK::Endpoint + Endpoint *string +} + +// ValidateRequired validates required parameters are set. +func (p EndpointParameters) ValidateRequired() error { + if p.UseDualStack == nil { + return fmt.Errorf("parameter UseDualStack is required") + } + + if p.UseFIPS == nil { + return fmt.Errorf("parameter UseFIPS is required") + } + + return nil +} + +// WithDefaults returns a shallow copy of EndpointParameterswith default values +// applied to members where applicable. +func (p EndpointParameters) WithDefaults() EndpointParameters { + if p.UseDualStack == nil { + p.UseDualStack = ptr.Bool(false) + } + + if p.UseFIPS == nil { + p.UseFIPS = ptr.Bool(false) + } + return p +} + +// EndpointResolverV2 provides the interface for resolving service endpoints. +type EndpointResolverV2 interface { + // ResolveEndpoint attempts to resolve the endpoint with the provided options, + // returning the endpoint if found. Otherwise an error is returned. + ResolveEndpoint(ctx context.Context, params EndpointParameters) ( + smithyendpoints.Endpoint, error, + ) +} + +// resolver provides the implementation for resolving endpoints. +type resolver struct{} + +func NewDefaultEndpointResolverV2() EndpointResolverV2 { + return &resolver{} +} + +// ResolveEndpoint attempts to resolve the endpoint with the provided options, +// returning the endpoint if found. Otherwise an error is returned. +func (r *resolver) ResolveEndpoint( + ctx context.Context, params EndpointParameters, +) ( + endpoint smithyendpoints.Endpoint, err error, +) { + params = params.WithDefaults() + if err = params.ValidateRequired(); err != nil { + return endpoint, fmt.Errorf("endpoint parameters are not valid, %w", err) + } + _UseDualStack := *params.UseDualStack + _UseFIPS := *params.UseFIPS + + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if _UseFIPS == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: FIPS and custom endpoint are not supported") + } + if _UseDualStack == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Dualstack and custom endpoint are not supported") + } + uriString := _Endpoint + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + if exprVal := params.Region; exprVal != nil { + _Region := *exprVal + _ = _Region + if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { + _PartitionResult := *exprVal + _ = _PartitionResult + if _UseFIPS == true { + if _UseDualStack == true { + if true == _PartitionResult.SupportsFIPS { + if true == _PartitionResult.SupportsDualStack { + uriString := func() string { + var out strings.Builder + out.WriteString("https://oidc-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS and DualStack are enabled, but this partition does not support one or both") + } + } + if _UseFIPS == true { + if true == _PartitionResult.SupportsFIPS { + if "aws-us-gov" == _PartitionResult.Name { + uriString := func() string { + var out strings.Builder + out.WriteString("https://oidc.") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://oidc-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS is enabled but this partition does not support FIPS") + } + if _UseDualStack == true { + if true == _PartitionResult.SupportsDualStack { + uriString := func() string { + var out strings.Builder + out.WriteString("https://oidc.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "DualStack is enabled but this partition does not support DualStack") + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://oidc.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Missing Region") +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json new file mode 100644 index 00000000000..403fac7c5af --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json @@ -0,0 +1,32 @@ +{ + "dependencies": { + "github.com/aws/aws-sdk-go-v2": "v1.4.0", + "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", + "github.com/aws/smithy-go": "v1.4.0", + "github.com/google/go-cmp": "v0.5.4" + }, + "files": [ + "api_client.go", + "api_client_test.go", + "api_op_CreateToken.go", + "api_op_RegisterClient.go", + "api_op_StartDeviceAuthorization.go", + "deserializers.go", + "doc.go", + "endpoints.go", + "endpoints_config_test.go", + "endpoints_test.go", + "generated.json", + "internal/endpoints/endpoints.go", + "internal/endpoints/endpoints_test.go", + "protocol_test.go", + "serializers.go", + "types/errors.go", + "types/types.go", + "validators.go" + ], + "go": "1.15", + "module": "github.com/aws/aws-sdk-go-v2/service/ssooidc", + "unstable": false +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go new file mode 100644 index 00000000000..bcd16fdeac3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package ssooidc + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.19.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go new file mode 100644 index 00000000000..c48da8b88a6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go @@ -0,0 +1,526 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package endpoints + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2" + "github.com/aws/smithy-go/logging" + "regexp" +) + +// Options is the endpoint resolver configuration options +type Options struct { + // Logger is a logging implementation that log events should be sent to. + Logger logging.Logger + + // LogDeprecated indicates that deprecated endpoints should be logged to the + // provided logger. + LogDeprecated bool + + // ResolvedRegion is used to override the region to be resolved, rather then the + // using the value passed to the ResolveEndpoint method. This value is used by the + // SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative + // name. You must not set this value directly in your application. + ResolvedRegion string + + // DisableHTTPS informs the resolver to return an endpoint that does not use the + // HTTPS scheme. + DisableHTTPS bool + + // UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint. + UseDualStackEndpoint aws.DualStackEndpointState + + // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. + UseFIPSEndpoint aws.FIPSEndpointState +} + +func (o Options) GetResolvedRegion() string { + return o.ResolvedRegion +} + +func (o Options) GetDisableHTTPS() bool { + return o.DisableHTTPS +} + +func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState { + return o.UseDualStackEndpoint +} + +func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState { + return o.UseFIPSEndpoint +} + +func transformToSharedOptions(options Options) endpoints.Options { + return endpoints.Options{ + Logger: options.Logger, + LogDeprecated: options.LogDeprecated, + ResolvedRegion: options.ResolvedRegion, + DisableHTTPS: options.DisableHTTPS, + UseDualStackEndpoint: options.UseDualStackEndpoint, + UseFIPSEndpoint: options.UseFIPSEndpoint, + } +} + +// Resolver SSO OIDC endpoint resolver +type Resolver struct { + partitions endpoints.Partitions +} + +// ResolveEndpoint resolves the service endpoint for the given region and options +func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) { + if len(region) == 0 { + return endpoint, &aws.MissingRegionError{} + } + + opt := transformToSharedOptions(options) + return r.partitions.ResolveEndpoint(region, opt) +} + +// New returns a new Resolver +func New() *Resolver { + return &Resolver{ + partitions: defaultPartitions, + } +} + +var partitionRegexp = struct { + Aws *regexp.Regexp + AwsCn *regexp.Regexp + AwsIso *regexp.Regexp + AwsIsoB *regexp.Regexp + AwsIsoE *regexp.Regexp + AwsIsoF *regexp.Regexp + AwsUsGov *regexp.Regexp +}{ + + Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$"), + AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), + AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), + AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), + AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"), + AwsIsoF: regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"), + AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), +} + +var defaultPartitions = endpoints.Partitions{ + { + ID: "aws", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "oidc.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "oidc-fips.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "oidc-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "oidc.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.Aws, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "af-south-1", + }: endpoints.Endpoint{ + Hostname: "oidc.af-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "af-south-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-east-1", + }: endpoints.Endpoint{ + Hostname: "oidc.ap-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-northeast-1", + }: endpoints.Endpoint{ + Hostname: "oidc.ap-northeast-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-northeast-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-northeast-2", + }: endpoints.Endpoint{ + Hostname: "oidc.ap-northeast-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-northeast-2", + }, + }, + endpoints.EndpointKey{ + Region: "ap-northeast-3", + }: endpoints.Endpoint{ + Hostname: "oidc.ap-northeast-3.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-northeast-3", + }, + }, + endpoints.EndpointKey{ + Region: "ap-south-1", + }: endpoints.Endpoint{ + Hostname: "oidc.ap-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-south-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-1", + }: endpoints.Endpoint{ + Hostname: "oidc.ap-southeast-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-2", + }: endpoints.Endpoint{ + Hostname: "oidc.ap-southeast-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-2", + }, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-3", + }: endpoints.Endpoint{ + Hostname: "oidc.ap-southeast-3.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-3", + }, + }, + endpoints.EndpointKey{ + Region: "ca-central-1", + }: endpoints.Endpoint{ + Hostname: "oidc.ca-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ca-central-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-central-1", + }: endpoints.Endpoint{ + Hostname: "oidc.eu-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-central-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-central-2", + }: endpoints.Endpoint{ + Hostname: "oidc.eu-central-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-central-2", + }, + }, + endpoints.EndpointKey{ + Region: "eu-north-1", + }: endpoints.Endpoint{ + Hostname: "oidc.eu-north-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-north-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-south-1", + }: endpoints.Endpoint{ + Hostname: "oidc.eu-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-south-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-west-1", + }: endpoints.Endpoint{ + Hostname: "oidc.eu-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-west-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-west-2", + }: endpoints.Endpoint{ + Hostname: "oidc.eu-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-west-2", + }, + }, + endpoints.EndpointKey{ + Region: "eu-west-3", + }: endpoints.Endpoint{ + Hostname: "oidc.eu-west-3.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-west-3", + }, + }, + endpoints.EndpointKey{ + Region: "il-central-1", + }: endpoints.Endpoint{ + Hostname: "oidc.il-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "il-central-1", + }, + }, + endpoints.EndpointKey{ + Region: "me-south-1", + }: endpoints.Endpoint{ + Hostname: "oidc.me-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "me-south-1", + }, + }, + endpoints.EndpointKey{ + Region: "sa-east-1", + }: endpoints.Endpoint{ + Hostname: "oidc.sa-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "sa-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-east-1", + }: endpoints.Endpoint{ + Hostname: "oidc.us-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-east-2", + }: endpoints.Endpoint{ + Hostname: "oidc.us-east-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-2", + }, + }, + endpoints.EndpointKey{ + Region: "us-west-1", + }: endpoints.Endpoint{ + Hostname: "oidc.us-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-west-2", + }: endpoints.Endpoint{ + Hostname: "oidc.us-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + { + ID: "aws-cn", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "oidc.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "oidc-fips.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "oidc-fips.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "oidc.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsCn, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "cn-north-1", + }: endpoints.Endpoint{ + Hostname: "oidc.cn-north-1.amazonaws.com.cn", + CredentialScope: endpoints.CredentialScope{ + Region: "cn-north-1", + }, + }, + endpoints.EndpointKey{ + Region: "cn-northwest-1", + }: endpoints.Endpoint{ + Hostname: "oidc.cn-northwest-1.amazonaws.com.cn", + CredentialScope: endpoints.CredentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + { + ID: "aws-iso", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "oidc-fips.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "oidc.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIso, + IsRegionalized: true, + }, + { + ID: "aws-iso-b", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "oidc-fips.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "oidc.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoB, + IsRegionalized: true, + }, + { + ID: "aws-iso-e", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "oidc-fips.{region}.cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "oidc.{region}.cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoE, + IsRegionalized: true, + }, + { + ID: "aws-iso-f", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "oidc-fips.{region}.csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "oidc.{region}.csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoF, + IsRegionalized: true, + }, + { + ID: "aws-us-gov", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "oidc.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "oidc-fips.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "oidc-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "oidc.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsUsGov, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-gov-east-1", + }: endpoints.Endpoint{ + Hostname: "oidc.us-gov-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + }: endpoints.Endpoint{ + Hostname: "oidc.us-gov-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go new file mode 100644 index 00000000000..efca8b25079 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go @@ -0,0 +1,309 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ssooidc + +import ( + "bytes" + "context" + "fmt" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/encoding/httpbinding" + smithyjson "github.com/aws/smithy-go/encoding/json" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +type awsRestjson1_serializeOpCreateToken struct { +} + +func (*awsRestjson1_serializeOpCreateToken) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpCreateToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateTokenInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/token") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentCreateTokenInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsCreateTokenInput(v *CreateTokenInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + return nil +} + +func awsRestjson1_serializeOpDocumentCreateTokenInput(v *CreateTokenInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ClientId != nil { + ok := object.Key("clientId") + ok.String(*v.ClientId) + } + + if v.ClientSecret != nil { + ok := object.Key("clientSecret") + ok.String(*v.ClientSecret) + } + + if v.Code != nil { + ok := object.Key("code") + ok.String(*v.Code) + } + + if v.DeviceCode != nil { + ok := object.Key("deviceCode") + ok.String(*v.DeviceCode) + } + + if v.GrantType != nil { + ok := object.Key("grantType") + ok.String(*v.GrantType) + } + + if v.RedirectUri != nil { + ok := object.Key("redirectUri") + ok.String(*v.RedirectUri) + } + + if v.RefreshToken != nil { + ok := object.Key("refreshToken") + ok.String(*v.RefreshToken) + } + + if v.Scope != nil { + ok := object.Key("scope") + if err := awsRestjson1_serializeDocumentScopes(v.Scope, ok); err != nil { + return err + } + } + + return nil +} + +type awsRestjson1_serializeOpRegisterClient struct { +} + +func (*awsRestjson1_serializeOpRegisterClient) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpRegisterClient) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*RegisterClientInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/client/register") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentRegisterClientInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsRegisterClientInput(v *RegisterClientInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + return nil +} + +func awsRestjson1_serializeOpDocumentRegisterClientInput(v *RegisterClientInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ClientName != nil { + ok := object.Key("clientName") + ok.String(*v.ClientName) + } + + if v.ClientType != nil { + ok := object.Key("clientType") + ok.String(*v.ClientType) + } + + if v.Scopes != nil { + ok := object.Key("scopes") + if err := awsRestjson1_serializeDocumentScopes(v.Scopes, ok); err != nil { + return err + } + } + + return nil +} + +type awsRestjson1_serializeOpStartDeviceAuthorization struct { +} + +func (*awsRestjson1_serializeOpStartDeviceAuthorization) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpStartDeviceAuthorization) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*StartDeviceAuthorizationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/device_authorization") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentStartDeviceAuthorizationInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsStartDeviceAuthorizationInput(v *StartDeviceAuthorizationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + return nil +} + +func awsRestjson1_serializeOpDocumentStartDeviceAuthorizationInput(v *StartDeviceAuthorizationInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ClientId != nil { + ok := object.Key("clientId") + ok.String(*v.ClientId) + } + + if v.ClientSecret != nil { + ok := object.Key("clientSecret") + ok.String(*v.ClientSecret) + } + + if v.StartUrl != nil { + ok := object.Key("startUrl") + ok.String(*v.StartUrl) + } + + return nil +} + +func awsRestjson1_serializeDocumentScopes(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go new file mode 100644 index 00000000000..115a51a9eb3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go @@ -0,0 +1,366 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + "fmt" + smithy "github.com/aws/smithy-go" +) + +// You do not have sufficient access to perform this action. +type AccessDeniedException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *AccessDeniedException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *AccessDeniedException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *AccessDeniedException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "AccessDeniedException" + } + return *e.ErrorCodeOverride +} +func (e *AccessDeniedException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that a request to authorize a client with an access user session +// token is pending. +type AuthorizationPendingException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *AuthorizationPendingException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *AuthorizationPendingException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *AuthorizationPendingException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "AuthorizationPendingException" + } + return *e.ErrorCodeOverride +} +func (e *AuthorizationPendingException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that the token issued by the service is expired and is no longer +// valid. +type ExpiredTokenException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *ExpiredTokenException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ExpiredTokenException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ExpiredTokenException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ExpiredTokenException" + } + return *e.ErrorCodeOverride +} +func (e *ExpiredTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that an error from the service occurred while trying to process a +// request. +type InternalServerException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *InternalServerException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InternalServerException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InternalServerException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InternalServerException" + } + return *e.ErrorCodeOverride +} +func (e *InternalServerException) ErrorFault() smithy.ErrorFault { return smithy.FaultServer } + +// Indicates that the clientId or clientSecret in the request is invalid. For +// example, this can occur when a client sends an incorrect clientId or an expired +// clientSecret . +type InvalidClientException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *InvalidClientException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidClientException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidClientException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidClientException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidClientException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that the client information sent in the request during registration +// is invalid. +type InvalidClientMetadataException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *InvalidClientMetadataException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidClientMetadataException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidClientMetadataException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidClientMetadataException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidClientMetadataException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that a request contains an invalid grant. This can occur if a client +// makes a CreateToken request with an invalid grant type. +type InvalidGrantException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *InvalidGrantException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidGrantException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidGrantException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidGrantException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidGrantException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that something is wrong with the input to the request. For example, a +// required parameter might be missing or out of range. +type InvalidRequestException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *InvalidRequestException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidRequestException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidRequestException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidRequestException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidRequestException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that the scope provided in the request is invalid. +type InvalidScopeException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *InvalidScopeException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidScopeException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidScopeException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidScopeException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidScopeException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that the client is making the request too frequently and is more than +// the service can handle. +type SlowDownException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *SlowDownException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *SlowDownException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *SlowDownException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "SlowDownException" + } + return *e.ErrorCodeOverride +} +func (e *SlowDownException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that the client is not currently authorized to make the request. This +// can happen when a clientId is not issued for a public client. +type UnauthorizedClientException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *UnauthorizedClientException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *UnauthorizedClientException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *UnauthorizedClientException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "UnauthorizedClientException" + } + return *e.ErrorCodeOverride +} +func (e *UnauthorizedClientException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that the grant type in the request is not supported by the service. +type UnsupportedGrantTypeException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *UnsupportedGrantTypeException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *UnsupportedGrantTypeException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *UnsupportedGrantTypeException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "UnsupportedGrantTypeException" + } + return *e.ErrorCodeOverride +} +func (e *UnsupportedGrantTypeException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/types.go new file mode 100644 index 00000000000..0ec0789f8d9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/types.go @@ -0,0 +1,9 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + smithydocument "github.com/aws/smithy-go/document" +) + +type noSmithyDocumentSerde = smithydocument.NoSerde diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/validators.go new file mode 100644 index 00000000000..5a309484e01 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/validators.go @@ -0,0 +1,142 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ssooidc + +import ( + "context" + "fmt" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" +) + +type validateOpCreateToken struct { +} + +func (*validateOpCreateToken) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateToken) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateTokenInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateTokenInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpRegisterClient struct { +} + +func (*validateOpRegisterClient) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpRegisterClient) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*RegisterClientInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpRegisterClientInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpStartDeviceAuthorization struct { +} + +func (*validateOpStartDeviceAuthorization) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpStartDeviceAuthorization) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*StartDeviceAuthorizationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpStartDeviceAuthorizationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +func addOpCreateTokenValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateToken{}, middleware.After) +} + +func addOpRegisterClientValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpRegisterClient{}, middleware.After) +} + +func addOpStartDeviceAuthorizationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpStartDeviceAuthorization{}, middleware.After) +} + +func validateOpCreateTokenInput(v *CreateTokenInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateTokenInput"} + if v.ClientId == nil { + invalidParams.Add(smithy.NewErrParamRequired("ClientId")) + } + if v.ClientSecret == nil { + invalidParams.Add(smithy.NewErrParamRequired("ClientSecret")) + } + if v.GrantType == nil { + invalidParams.Add(smithy.NewErrParamRequired("GrantType")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpRegisterClientInput(v *RegisterClientInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "RegisterClientInput"} + if v.ClientName == nil { + invalidParams.Add(smithy.NewErrParamRequired("ClientName")) + } + if v.ClientType == nil { + invalidParams.Add(smithy.NewErrParamRequired("ClientType")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpStartDeviceAuthorizationInput(v *StartDeviceAuthorizationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "StartDeviceAuthorizationInput"} + if v.ClientId == nil { + invalidParams.Add(smithy.NewErrParamRequired("ClientId")) + } + if v.ClientSecret == nil { + invalidParams.Add(smithy.NewErrParamRequired("ClientSecret")) + } + if v.StartUrl == nil { + invalidParams.Add(smithy.NewErrParamRequired("StartUrl")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md new file mode 100644 index 00000000000..eefcd873062 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md @@ -0,0 +1,338 @@ +# v1.25.1 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.25.0 (2023-11-01) + +* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.23.2 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.23.1 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.23.0 (2023-10-02) + +* **Feature**: STS API updates for assumeRole + +# v1.22.0 (2023-09-18) + +* **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* **Feature**: Adds several endpoint ruleset changes across all models: smaller rulesets, removed non-unique regional endpoints, fixes FIPS and DualStack endpoints, and make region not required in SDK::Endpoint. Additional breakfix to cognito-sync field. + +# v1.21.5 (2023-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.21.4 (2023-08-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.21.3 (2023-08-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.21.2 (2023-08-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.21.1 (2023-08-01) + +* No change notes available for this release. + +# v1.21.0 (2023-07-31) + +* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.1 (2023-07-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.0 (2023-07-25) + +* **Feature**: API updates for the AWS Security Token Service + +# v1.19.3 (2023-07-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.19.2 (2023-06-15) + +* No change notes available for this release. + +# v1.19.1 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.19.0 (2023-05-08) + +* **Feature**: Documentation updates for AWS Security Token Service. + +# v1.18.11 (2023-05-04) + +* No change notes available for this release. + +# v1.18.10 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.9 (2023-04-10) + +* No change notes available for this release. + +# v1.18.8 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.7 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.6 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.5 (2023-02-22) + +* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes. + +# v1.18.4 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.3 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions +* **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. + +# v1.18.2 (2023-01-25) + +* **Documentation**: Doc only change to update wording in a key topic + +# v1.18.1 (2023-01-23) + +* No change notes available for this release. + +# v1.18.0 (2023-01-05) + +* **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). + +# v1.17.7 (2022-12-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.6 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.5 (2022-11-22) + +* No change notes available for this release. + +# v1.17.4 (2022-11-17) + +* **Documentation**: Documentation updates for AWS Security Token Service. + +# v1.17.3 (2022-11-16) + +* No change notes available for this release. + +# v1.17.2 (2022-11-10) + +* No change notes available for this release. + +# v1.17.1 (2022-10-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.0 (2022-10-21) + +* **Feature**: Add presign functionality for sts:AssumeRole operation +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.19 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.18 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.17 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.16 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.15 (2022-08-30) + +* No change notes available for this release. + +# v1.16.14 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.13 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.12 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.11 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.10 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.9 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.8 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.7 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.6 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.5 (2022-05-16) + +* **Documentation**: Documentation updates for AWS Security Token Service. + +# v1.16.4 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Documentation**: Updated service client model to latest release. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.0 (2022-02-24) + +* **Feature**: API client updated +* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.0 (2021-12-21) + +* **Feature**: Updated to latest service endpoints + +# v1.11.1 (2021-12-02) + +* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514)) +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.0 (2021-11-30) + +* **Feature**: API client updated + +# v1.10.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2021-11-12) + +* **Feature**: Service clients now support custom endpoints that have an initial URI path defined. + +# v1.9.0 (2021-11-06) + +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.0 (2021-10-21) + +* **Feature**: API client updated +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.2 (2021-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.1 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.0 (2021-08-27) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.2 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.1 (2021-08-04) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.0 (2021-07-15) + +* **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. +* **Documentation**: Updated service model to latest revision. +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.0 (2021-06-25) + +* **Feature**: API client updated +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.1 (2021-05-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/LICENSE.txt new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go new file mode 100644 index 00000000000..c29d8cad17a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go @@ -0,0 +1,630 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/defaults" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/protocol/query" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + presignedurlcust "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url" + smithy "github.com/aws/smithy-go" + smithydocument "github.com/aws/smithy-go/document" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net" + "net/http" + "time" +) + +const ServiceID = "STS" +const ServiceAPIVersion = "2011-06-15" + +// Client provides the API client to make operations call for AWS Security Token +// Service. +type Client struct { + options Options +} + +// New returns an initialized Client based on the functional options. Provide +// additional functional options to further configure the behavior of the client, +// such as changing the client's endpoint or adding custom middleware behavior. +func New(options Options, optFns ...func(*Options)) *Client { + options = options.Copy() + + resolveDefaultLogger(&options) + + setResolvedDefaultsMode(&options) + + resolveRetryer(&options) + + resolveHTTPClient(&options) + + resolveHTTPSignerV4(&options) + + for _, fn := range optFns { + fn(&options) + } + + client := &Client{ + options: options, + } + + return client +} + +type Options struct { + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // The optional application specific identifier appended to the User-Agent header. + AppID string + + // This endpoint will be given as input to an EndpointResolverV2. It is used for + // providing a custom base endpoint that is subject to modifications by the + // processing EndpointResolverV2. + BaseEndpoint *string + + // Configures the events that will be sent to the configured logger. + ClientLogMode aws.ClientLogMode + + // The credentials object to use when signing requests. + Credentials aws.CredentialsProvider + + // The configuration DefaultsMode that the SDK should use when constructing the + // clients initial default settings. + DefaultsMode aws.DefaultsMode + + // The endpoint options to be used when attempting to resolve an endpoint. + EndpointOptions EndpointResolverOptions + + // The service endpoint resolver. + // + // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a + // value for this field will likely prevent you from using any endpoint-related + // service features released after the introduction of EndpointResolverV2 and + // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom + // endpoint, set the client option BaseEndpoint instead. + EndpointResolver EndpointResolver + + // Resolves the endpoint used for a particular service. This should be used over + // the deprecated EndpointResolver + EndpointResolverV2 EndpointResolverV2 + + // Signature Version 4 (SigV4) Signer + HTTPSignerV4 HTTPSignerV4 + + // The logger writer interface to write logging messages to. + Logger logging.Logger + + // The region to send requests to. (Required) + Region string + + // RetryMaxAttempts specifies the maximum number attempts an API client will call + // an operation that fails with a retryable error. A value of 0 is ignored, and + // will not be used to configure the API client created default retryer, or modify + // per operation call's retry max attempts. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. If specified in an operation call's functional + // options with a value that is different than the constructed client's Options, + // the Client's Retryer will be wrapped to use the operation's specific + // RetryMaxAttempts value. + RetryMaxAttempts int + + // RetryMode specifies the retry mode the API client will be created with, if + // Retryer option is not also specified. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. Currently does not support per operation call + // overrides, may in the future. + RetryMode aws.RetryMode + + // Retryer guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. The kind of + // default retry created by the API client can be changed with the RetryMode + // option. + Retryer aws.Retryer + + // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set + // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You + // should not populate this structure programmatically, or rely on the values here + // within your applications. + RuntimeEnvironment aws.RuntimeEnvironment + + // The initial DefaultsMode used when the client options were constructed. If the + // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved + // value was at that point in time. Currently does not support per operation call + // overrides, may in the future. + resolvedDefaultsMode aws.DefaultsMode + + // The HTTP client to invoke API calls with. Defaults to client's default HTTP + // implementation if nil. + HTTPClient HTTPClient +} + +// WithAPIOptions returns a functional option for setting the Client's APIOptions +// option. +func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { + return func(o *Options) { + o.APIOptions = append(o.APIOptions, optFns...) + } +} + +// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for +// this field will likely prevent you from using any endpoint-related service +// features released after the introduction of EndpointResolverV2 and BaseEndpoint. +// To migrate an EndpointResolver implementation that uses a custom endpoint, set +// the client option BaseEndpoint instead. +func WithEndpointResolver(v EndpointResolver) func(*Options) { + return func(o *Options) { + o.EndpointResolver = v + } +} + +// WithEndpointResolverV2 returns a functional option for setting the Client's +// EndpointResolverV2 option. +func WithEndpointResolverV2(v EndpointResolverV2) func(*Options) { + return func(o *Options) { + o.EndpointResolverV2 = v + } +} + +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// Copy creates a clone where the APIOptions list is deep copied. +func (o Options) Copy() Options { + to := o + to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) + copy(to.APIOptions, o.APIOptions) + + return to +} +func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) { + ctx = middleware.ClearStackValues(ctx) + stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) + options := c.options.Copy() + resolveEndpointResolverV2(&options) + + for _, fn := range optFns { + fn(&options) + } + + finalizeRetryMaxAttemptOptions(&options, *c) + + finalizeClientEndpointResolverOptions(&options) + + for _, fn := range stackFns { + if err := fn(stack, options); err != nil { + return nil, metadata, err + } + } + + for _, fn := range options.APIOptions { + if err := fn(stack); err != nil { + return nil, metadata, err + } + } + + handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) + result, metadata, err = handler.Handle(ctx, params) + if err != nil { + err = &smithy.OperationError{ + ServiceID: ServiceID, + OperationName: opID, + Err: err, + } + } + return result, metadata, err +} + +type noSmithyDocumentSerde = smithydocument.NoSerde + +type legacyEndpointContextSetter struct { + LegacyResolver EndpointResolver +} + +func (*legacyEndpointContextSetter) ID() string { + return "legacyEndpointContextSetter" +} + +func (m *legacyEndpointContextSetter) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + if m.LegacyResolver != nil { + ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, true) + } + + return next.HandleInitialize(ctx, in) + +} +func addlegacyEndpointContextSetter(stack *middleware.Stack, o Options) error { + return stack.Initialize.Add(&legacyEndpointContextSetter{ + LegacyResolver: o.EndpointResolver, + }, middleware.Before) +} + +func resolveDefaultLogger(o *Options) { + if o.Logger != nil { + return + } + o.Logger = logging.Nop{} +} + +func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { + return middleware.AddSetLoggerMiddleware(stack, o.Logger) +} + +func setResolvedDefaultsMode(o *Options) { + if len(o.resolvedDefaultsMode) > 0 { + return + } + + var mode aws.DefaultsMode + mode.SetFromString(string(o.DefaultsMode)) + + if mode == aws.DefaultsModeAuto { + mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment) + } + + o.resolvedDefaultsMode = mode +} + +// NewFromConfig returns a new client from the provided config. +func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { + opts := Options{ + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + AppID: cfg.AppID, + } + resolveAWSRetryerProvider(cfg, &opts) + resolveAWSRetryMaxAttempts(cfg, &opts) + resolveAWSRetryMode(cfg, &opts) + resolveAWSEndpointResolver(cfg, &opts) + resolveUseDualStackEndpoint(cfg, &opts) + resolveUseFIPSEndpoint(cfg, &opts) + resolveBaseEndpoint(cfg, &opts) + return New(opts, optFns...) +} + +func resolveHTTPClient(o *Options) { + var buildable *awshttp.BuildableClient + + if o.HTTPClient != nil { + var ok bool + buildable, ok = o.HTTPClient.(*awshttp.BuildableClient) + if !ok { + return + } + } else { + buildable = awshttp.NewBuildableClient() + } + + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) { + if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok { + dialer.Timeout = dialerTimeout + } + }) + + buildable = buildable.WithTransportOptions(func(transport *http.Transport) { + if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok { + transport.TLSHandshakeTimeout = tlsHandshakeTimeout + } + }) + } + + o.HTTPClient = buildable +} + +func resolveRetryer(o *Options) { + if o.Retryer != nil { + return + } + + if len(o.RetryMode) == 0 { + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + o.RetryMode = modeConfig.RetryMode + } + } + if len(o.RetryMode) == 0 { + o.RetryMode = aws.RetryModeStandard + } + + var standardOptions []func(*retry.StandardOptions) + if v := o.RetryMaxAttempts; v != 0 { + standardOptions = append(standardOptions, func(so *retry.StandardOptions) { + so.MaxAttempts = v + }) + } + + switch o.RetryMode { + case aws.RetryModeAdaptive: + var adaptiveOptions []func(*retry.AdaptiveModeOptions) + if len(standardOptions) != 0 { + adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) { + ao.StandardOptions = append(ao.StandardOptions, standardOptions...) + }) + } + o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...) + + default: + o.Retryer = retry.NewStandard(standardOptions...) + } +} + +func resolveAWSRetryerProvider(cfg aws.Config, o *Options) { + if cfg.Retryer == nil { + return + } + o.Retryer = cfg.Retryer() +} + +func resolveAWSRetryMode(cfg aws.Config, o *Options) { + if len(cfg.RetryMode) == 0 { + return + } + o.RetryMode = cfg.RetryMode +} +func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) { + if cfg.RetryMaxAttempts == 0 { + return + } + o.RetryMaxAttempts = cfg.RetryMaxAttempts +} + +func finalizeRetryMaxAttemptOptions(o *Options, client Client) { + if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts { + return + } + + o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) +} + +func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { + if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil { + return + } + o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions) +} + +func addClientUserAgent(stack *middleware.Stack, options Options) error { + if err := awsmiddleware.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "sts", goModuleVersion)(stack); err != nil { + return err + } + + if len(options.AppID) > 0 { + return awsmiddleware.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID)(stack) + } + + return nil +} + +func addHTTPSignerV4Middleware(stack *middleware.Stack, o Options) error { + mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{ + CredentialsProvider: o.Credentials, + Signer: o.HTTPSignerV4, + LogSigning: o.ClientLogMode.IsSigning(), + }) + return stack.Finalize.Add(mw, middleware.After) +} + +type HTTPSignerV4 interface { + SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error +} + +func resolveHTTPSignerV4(o *Options) { + if o.HTTPSignerV4 != nil { + return + } + o.HTTPSignerV4 = newDefaultV4Signer(*o) +} + +func newDefaultV4Signer(o Options) *v4.Signer { + return v4.NewSigner(func(so *v4.SignerOptions) { + so.Logger = o.Logger + so.LogSigning = o.ClientLogMode.IsSigning() + }) +} + +func addRetryMiddlewares(stack *middleware.Stack, o Options) error { + mo := retry.AddRetryMiddlewaresOptions{ + Retryer: o.Retryer, + LogRetryAttempts: o.ClientLogMode.IsRetries(), + } + return retry.AddRetryMiddlewares(stack, mo) +} + +// resolves dual-stack endpoint configuration +func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseDualStackEndpoint = value + } + return nil +} + +// resolves FIPS endpoint configuration +func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseFIPSEndpoint = value + } + return nil +} + +func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error { + return awsmiddleware.AddRequestIDRetrieverMiddleware(stack) +} + +func addResponseErrorMiddleware(stack *middleware.Stack) error { + return awshttp.AddResponseErrorMiddleware(stack) +} + +// HTTPPresignerV4 represents presigner interface used by presign url client +type HTTPPresignerV4 interface { + PresignHTTP( + ctx context.Context, credentials aws.Credentials, r *http.Request, + payloadHash string, service string, region string, signingTime time.Time, + optFns ...func(*v4.SignerOptions), + ) (url string, signedHeader http.Header, err error) +} + +// PresignOptions represents the presign client options +type PresignOptions struct { + + // ClientOptions are list of functional options to mutate client options used by + // the presign client. + ClientOptions []func(*Options) + + // Presigner is the presigner used by the presign url client + Presigner HTTPPresignerV4 +} + +func (o PresignOptions) copy() PresignOptions { + clientOptions := make([]func(*Options), len(o.ClientOptions)) + copy(clientOptions, o.ClientOptions) + o.ClientOptions = clientOptions + return o +} + +// WithPresignClientFromClientOptions is a helper utility to retrieve a function +// that takes PresignOption as input +func WithPresignClientFromClientOptions(optFns ...func(*Options)) func(*PresignOptions) { + return withPresignClientFromClientOptions(optFns).options +} + +type withPresignClientFromClientOptions []func(*Options) + +func (w withPresignClientFromClientOptions) options(o *PresignOptions) { + o.ClientOptions = append(o.ClientOptions, w...) +} + +// PresignClient represents the presign url client +type PresignClient struct { + client *Client + options PresignOptions +} + +// NewPresignClient generates a presign client using provided API Client and +// presign options +func NewPresignClient(c *Client, optFns ...func(*PresignOptions)) *PresignClient { + var options PresignOptions + for _, fn := range optFns { + fn(&options) + } + if len(options.ClientOptions) != 0 { + c = New(c.options, options.ClientOptions...) + } + + if options.Presigner == nil { + options.Presigner = newDefaultV4Signer(c.options) + } + + return &PresignClient{ + client: c, + options: options, + } +} + +func withNopHTTPClientAPIOption(o *Options) { + o.HTTPClient = smithyhttp.NopClient{} +} + +type presignConverter PresignOptions + +func (c presignConverter) convertToPresignMiddleware(stack *middleware.Stack, options Options) (err error) { + stack.Finalize.Clear() + stack.Deserialize.Clear() + stack.Build.Remove((*awsmiddleware.ClientRequestID)(nil).ID()) + stack.Build.Remove("UserAgent") + pmw := v4.NewPresignHTTPRequestMiddleware(v4.PresignHTTPRequestMiddlewareOptions{ + CredentialsProvider: options.Credentials, + Presigner: c.Presigner, + LogSigning: options.ClientLogMode.IsSigning(), + }) + err = stack.Finalize.Add(pmw, middleware.After) + if err != nil { + return err + } + if err = smithyhttp.AddNoPayloadDefaultContentTypeRemover(stack); err != nil { + return err + } + // convert request to a GET request + err = query.AddAsGetRequestMiddleware(stack) + if err != nil { + return err + } + err = presignedurlcust.AddAsIsPresigingMiddleware(stack) + if err != nil { + return err + } + return nil +} + +func addRequestResponseLogging(stack *middleware.Stack, o Options) error { + return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ + LogRequest: o.ClientLogMode.IsRequest(), + LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(), + LogResponse: o.ClientLogMode.IsResponse(), + LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(), + }, middleware.After) +} + +type endpointDisableHTTPSMiddleware struct { + EndpointDisableHTTPS bool +} + +func (*endpointDisableHTTPSMiddleware) ID() string { + return "endpointDisableHTTPSMiddleware" +} + +func (m *endpointDisableHTTPSMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.EndpointDisableHTTPS && !smithyhttp.GetHostnameImmutable(ctx) { + req.URL.Scheme = "http" + } + + return next.HandleSerialize(ctx, in) + +} +func addendpointDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { + return stack.Serialize.Insert(&endpointDisableHTTPSMiddleware{ + EndpointDisableHTTPS: o.EndpointOptions.DisableHTTPS, + }, "OperationSerializer", middleware.Before) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go new file mode 100644 index 00000000000..0ef7affc598 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go @@ -0,0 +1,558 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a set of temporary security credentials that you can use to access +// Amazon Web Services resources. These temporary credentials consist of an access +// key ID, a secret access key, and a security token. Typically, you use AssumeRole +// within your account or for cross-account access. For a comparison of AssumeRole +// with other API operations that produce temporary credentials, see Requesting +// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. Permissions The temporary security credentials created by +// AssumeRole can be used to make API calls to any Amazon Web Services service +// with the following exception: You cannot call the Amazon Web Services STS +// GetFederationToken or GetSessionToken API operations. (Optional) You can pass +// inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policy Amazon +// Resource Names (ARNs) to use as managed session policies. The plaintext that you +// use for both inline and managed session policies can't exceed 2,048 characters. +// Passing policies to this operation returns new temporary credentials. The +// resulting session's permissions are the intersection of the role's +// identity-based policy and the session policies. You can use the role's temporary +// credentials in subsequent Amazon Web Services API calls to access resources in +// the account that owns the role. You cannot use session policies to grant more +// permissions than those allowed by the identity-based policy of the role that is +// being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. When you create a role, you create two policies: a role +// trust policy that specifies who can assume the role, and a permissions policy +// that specifies what can be done with the role. You specify the trusted principal +// that is allowed to assume the role in the role trust policy. To assume a role +// from a different account, your Amazon Web Services account must be trusted by +// the role. The trust relationship is defined in the role's trust policy when the +// role is created. That trust policy states which accounts are allowed to delegate +// that access to users in the account. A user who wants to access a role in a +// different account must also have permissions that are delegated from the account +// administrator. The administrator must attach a policy that allows the user to +// call AssumeRole for the ARN of the role in the other account. To allow a user +// to assume a role in the same account, you can do either of the following: +// - Attach a policy to the user that allows the user to call AssumeRole (as long +// as the role's trust policy trusts the account). +// - Add the user as a principal directly in the role's trust policy. +// +// You can do either because the role’s trust policy acts as an IAM resource-based +// policy. When a resource-based policy grants access to a principal in the same +// account, no additional identity-based policy is required. For more information +// about trust policies and resource-based policies, see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) +// in the IAM User Guide. Tags (Optional) You can pass tag key-value pairs to your +// session. These tags are called session tags. For more information about session +// tags, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. An administrator must grant you the permissions necessary +// to pass session tags. The administrator can also create granular permissions to +// allow you to pass only specific session tags. For more information, see +// Tutorial: Using Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. You can set the session tags as transitive. Transitive +// tags persist during role chaining. For more information, see Chaining Roles +// with Session Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. Using MFA with AssumeRole (Optional) You can include +// multi-factor authentication (MFA) information when you call AssumeRole . This is +// useful for cross-account scenarios to ensure that the user that assumes the role +// has been authenticated with an Amazon Web Services MFA device. In that scenario, +// the trust policy of the role being assumed includes a condition that tests for +// MFA authentication. If the caller does not include valid MFA information, the +// request to assume the role is denied. The condition in a trust policy that tests +// for MFA authentication might look like the following example. "Condition": +// {"Bool": {"aws:MultiFactorAuthPresent": true}} For more information, see +// Configuring MFA-Protected API Access (https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) +// in the IAM User Guide guide. To use MFA with AssumeRole , you pass values for +// the SerialNumber and TokenCode parameters. The SerialNumber value identifies +// the user's hardware or virtual MFA device. The TokenCode is the time-based +// one-time password (TOTP) that the MFA device produces. +func (c *Client) AssumeRole(ctx context.Context, params *AssumeRoleInput, optFns ...func(*Options)) (*AssumeRoleOutput, error) { + if params == nil { + params = &AssumeRoleInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "AssumeRole", params, optFns, c.addOperationAssumeRoleMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*AssumeRoleOutput) + out.ResultMetadata = metadata + return out, nil +} + +type AssumeRoleInput struct { + + // The Amazon Resource Name (ARN) of the role to assume. + // + // This member is required. + RoleArn *string + + // An identifier for the assumed role session. Use the role session name to + // uniquely identify a session when the same role is assumed by different + // principals or for different reasons. In cross-account scenarios, the role + // session name is visible to, and can be logged by the account that owns the role. + // The role session name is also used in the ARN of the assumed role principal. + // This means that subsequent cross-account API requests that use the temporary + // security credentials will expose the role session name to the external account + // in their CloudTrail logs. The regex used to validate this parameter is a string + // of characters consisting of upper- and lower-case alphanumeric characters with + // no spaces. You can also include underscores or any of the following characters: + // =,.@- + // + // This member is required. + RoleSessionName *string + + // The duration, in seconds, of the role session. The value specified can range + // from 900 seconds (15 minutes) up to the maximum session duration set for the + // role. The maximum session duration setting can have a value from 1 hour to 12 + // hours. If you specify a value higher than this setting or the administrator + // setting (whichever is lower), the operation fails. For example, if you specify a + // session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. Role chaining limits your Amazon Web + // Services CLI or Amazon Web Services API role session to a maximum of one hour. + // When you use the AssumeRole API operation to assume a role, you can specify the + // duration of your role session with the DurationSeconds parameter. You can + // specify a parameter value of up to 43200 seconds (12 hours), depending on the + // maximum session duration setting for your role. However, if you assume a role + // using role chaining and provide a DurationSeconds parameter value greater than + // one hour, the operation fails. To learn how to view the maximum value for your + // role, see View the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. By default, the value is set to 3600 seconds. The + // DurationSeconds parameter is separate from the duration of a console session + // that you might request using the returned credentials. The request to the + // federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int32 + + // A unique identifier that might be required when you assume a role in another + // account. If the administrator of the account to which the role belongs provided + // you with an external ID, then provide that value in the ExternalId parameter. + // This value can be any string, such as a passphrase or account number. A + // cross-account role is usually set up to trust everyone in an account. Therefore, + // the administrator of the trusting account might send an external ID to the + // administrator of the trusted account. That way, only someone with the ID can + // assume the role, rather than everyone in the account. For more information about + // the external ID, see How to Use an External ID When Granting Access to Your + // Amazon Web Services Resources to a Third Party (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) + // in the IAM User Guide. The regex used to validate this parameter is a string of + // characters consisting of upper- and lower-case alphanumeric characters with no + // spaces. You can also include underscores or any of the following characters: + // =,.@:/- + ExternalId *string + + // An IAM policy in JSON format that you want to use as an inline session policy. + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use the + // role's temporary credentials in subsequent Amazon Web Services API calls to + // access resources in the account that owns the role. You cannot use session + // policies to grant more permissions than those allowed by the identity-based + // policy of the role that is being assumed. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. The plaintext that you use for both inline and managed + // session policies can't exceed 2,048 characters. The JSON policy characters can + // be any ASCII character from the space character to the end of the valid + // character list (\u0020 through \u00FF). It can also include the tab (\u0009), + // linefeed (\u000A), and carriage return (\u000D) characters. An Amazon Web + // Services conversion compresses the passed inline session policy, managed policy + // ARNs, and session tags into a packed binary format that has a separate limit. + // Your request can fail for this limit even if your plaintext meets the other + // requirements. The PackedPolicySize response element indicates by percentage how + // close the policies and tags for your request are to the upper size limit. + Policy *string + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to + // use as managed session policies. The policies must exist in the same account as + // the role. This parameter is optional. You can provide up to 10 managed policy + // ARNs. However, the plaintext that you use for both inline and managed session + // policies can't exceed 2,048 characters. For more information about ARNs, see + // Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. An Amazon Web Services conversion + // compresses the passed inline session policy, managed policy ARNs, and session + // tags into a packed binary format that has a separate limit. Your request can + // fail for this limit even if your plaintext meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. Passing policies to this + // operation returns new temporary credentials. The resulting session's permissions + // are the intersection of the role's identity-based policy and the session + // policies. You can use the role's temporary credentials in subsequent Amazon Web + // Services API calls to access resources in the account that owns the role. You + // cannot use session policies to grant more permissions than those allowed by the + // identity-based policy of the role that is being assumed. For more information, + // see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []types.PolicyDescriptorType + + // Reserved for future use. + ProvidedContexts []types.ProvidedContext + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy of + // the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as + // GAHT12345678 ) or an Amazon Resource Name (ARN) for a virtual device (such as + // arn:aws:iam::123456789012:mfa/user ). The regex used to validate this parameter + // is a string of characters consisting of upper- and lower-case alphanumeric + // characters with no spaces. You can also include underscores or any of the + // following characters: =,.@- + SerialNumber *string + + // The source identity specified by the principal that is calling the AssumeRole + // operation. You can require users to specify a source identity when they assume a + // role. You do this by using the sts:SourceIdentity condition key in a role trust + // policy. You can use source identity information in CloudTrail logs to determine + // who took actions with a role. You can use the aws:SourceIdentity condition key + // to further control access to Amazon Web Services resources based on the value of + // source identity. For more information about using source identity, see Monitor + // and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. The regex used to validate this parameter is a string of + // characters consisting of upper- and lower-case alphanumeric characters with no + // spaces. You can also include underscores or any of the following characters: + // =,.@-. You cannot use a value that begins with the text aws: . This prefix is + // reserved for Amazon Web Services internal use. + SourceIdentity *string + + // A list of session tags that you want to pass. Each session tag consists of a + // key name and an associated value. For more information about session tags, see + // Tagging Amazon Web Services STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // in the IAM User Guide. This parameter is optional. You can pass up to 50 session + // tags. The plaintext session tag keys can’t exceed 128 characters, and the values + // can’t exceed 256 characters. For these and additional limits, see IAM and STS + // Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. An Amazon Web Services conversion compresses the passed + // inline session policy, managed policy ARNs, and session tags into a packed + // binary format that has a separate limit. Your request can fail for this limit + // even if your plaintext meets the other requirements. The PackedPolicySize + // response element indicates by percentage how close the policies and tags for + // your request are to the upper size limit. You can pass a session tag with the + // same key as a tag that is already attached to the role. When you do, session + // tags override a role tag with the same key. Tag key–value pairs are not case + // sensitive, but case is preserved. This means that you cannot have separate + // Department and department tag keys. Assume that the role has the Department = + // Marketing tag and you pass the department = engineering session tag. Department + // and department are not saved as separate tags, and the session tag passed in + // the request takes precedence over the role tag. Additionally, if you used + // temporary credentials to perform this operation, the new session inherits any + // transitive session tags from the calling session. If you pass a session tag with + // the same key as an inherited tag, the operation fails. To view the inherited + // tags for a session, see the CloudTrail logs. For more information, see Viewing + // Session Tags in CloudTrail (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_ctlogs) + // in the IAM User Guide. + Tags []types.Tag + + // The value provided by the MFA device, if the trust policy of the role being + // assumed requires MFA. (In other words, if the policy includes a condition that + // tests for MFA). If the role being assumed requires MFA and if the TokenCode + // value is missing or expired, the AssumeRole call returns an "access denied" + // error. The format for this parameter, as described by its regex pattern, is a + // sequence of six numeric digits. + TokenCode *string + + // A list of keys for session tags that you want to set as transitive. If you set + // a tag key as transitive, the corresponding key and value passes to subsequent + // sessions in a role chain. For more information, see Chaining Roles with Session + // Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) + // in the IAM User Guide. This parameter is optional. When you set session tags as + // transitive, the session policy and session tags packed binary limit is not + // affected. If you choose not to specify a transitive tag key, then no tags are + // passed from this session to any subsequent sessions. + TransitiveTagKeys []string + + noSmithyDocumentSerde +} + +// Contains the response to a successful AssumeRole request, including temporary +// Amazon Web Services credentials that can be used to make Amazon Web Services +// requests. +type AssumeRoleOutput struct { + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. For + // example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the + // RoleSessionName that you specified when you called AssumeRole . + AssumedRoleUser *types.AssumedRoleUser + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. The size of the security token + // that STS API operations return is not fixed. We strongly recommend that you make + // no assumptions about the maximum size. + Credentials *types.Credentials + + // A percentage value that indicates the packed size of the session policies and + // session tags combined passed in the request. The request fails if the packed + // size is greater than 100 percent, which means the policies and tags exceeded the + // allowed space. + PackedPolicySize *int32 + + // The source identity specified by the principal that is calling the AssumeRole + // operation. You can require users to specify a source identity when they assume a + // role. You do this by using the sts:SourceIdentity condition key in a role trust + // policy. You can use source identity information in CloudTrail logs to determine + // who took actions with a role. You can use the aws:SourceIdentity condition key + // to further control access to Amazon Web Services resources based on the value of + // source identity. For more information about using source identity, see Monitor + // and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. The regex used to validate this parameter is a string of + // characters consisting of upper- and lower-case alphanumeric characters with no + // spaces. You can also include underscores or any of the following characters: + // =,.@- + SourceIdentity *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationAssumeRoleMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsquery_serializeOpAssumeRole{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpAssumeRole{}, middleware.After) + if err != nil { + return err + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addAssumeRoleResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addOpAssumeRoleValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRole(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opAssumeRole(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "sts", + OperationName: "AssumeRole", + } +} + +// PresignAssumeRole is used to generate a presigned HTTP Request which contains +// presigned URL, signed headers and HTTP method used. +func (c *PresignClient) PresignAssumeRole(ctx context.Context, params *AssumeRoleInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) { + if params == nil { + params = &AssumeRoleInput{} + } + options := c.options.copy() + for _, fn := range optFns { + fn(&options) + } + clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) + + result, _, err := c.client.invokeOperation(ctx, "AssumeRole", params, clientOptFns, + c.client.addOperationAssumeRoleMiddlewares, + presignConverter(options).convertToPresignMiddleware, + ) + if err != nil { + return nil, err + } + + out := result.(*v4.PresignedHTTPRequest) + return out, nil +} + +type opAssumeRoleResolveEndpointMiddleware struct { + EndpointResolver EndpointResolverV2 + BuiltInResolver builtInParameterResolver +} + +func (*opAssumeRoleResolveEndpointMiddleware) ID() string { + return "ResolveEndpointV2" +} + +func (m *opAssumeRoleResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.EndpointResolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + params := EndpointParameters{} + + m.BuiltInResolver.ResolveBuiltIns(¶ms) + + var resolvedEndpoint smithyendpoints.Endpoint + resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL = &resolvedEndpoint.URI + + for k := range resolvedEndpoint.Headers { + req.Header.Set( + k, + resolvedEndpoint.Headers.Get(k), + ) + } + + authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) + if err != nil { + var nfe *internalauth.NoAuthenticationSchemesFoundError + if errors.As(err, &nfe) { + // if no auth scheme is found, default to sigv4 + signingName := "sts" + signingRegion := m.BuiltInResolver.(*builtInResolver).Region + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + + } + var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError + if errors.As(err, &ue) { + return out, metadata, fmt.Errorf( + "This operation requests signer version(s) %v but the client only supports %v", + ue.UnsupportedSchemes, + internalauth.SupportedSchemes, + ) + } + } + + for _, authScheme := range authSchemes { + switch authScheme.(type) { + case *internalauth.AuthenticationSchemeV4: + v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) + var signingName, signingRegion string + if v4Scheme.SigningName == nil { + signingName = "sts" + } else { + signingName = *v4Scheme.SigningName + } + if v4Scheme.SigningRegion == nil { + signingRegion = m.BuiltInResolver.(*builtInResolver).Region + } else { + signingRegion = *v4Scheme.SigningRegion + } + if v4Scheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + break + case *internalauth.AuthenticationSchemeV4A: + v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) + if v4aScheme.SigningName == nil { + v4aScheme.SigningName = aws.String("sts") + } + if v4aScheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) + ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) + break + case *internalauth.AuthenticationSchemeNone: + break + } + } + + return next.HandleSerialize(ctx, in) +} + +func addAssumeRoleResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { + return stack.Serialize.Insert(&opAssumeRoleResolveEndpointMiddleware{ + EndpointResolver: options.EndpointResolverV2, + BuiltInResolver: &builtInResolver{ + Region: options.Region, + UseDualStack: options.EndpointOptions.UseDualStackEndpoint, + UseFIPS: options.EndpointOptions.UseFIPSEndpoint, + Endpoint: options.BaseEndpoint, + }, + }, "ResolveEndpoint", middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go new file mode 100644 index 00000000000..9c33720d41a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go @@ -0,0 +1,482 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a set of temporary security credentials for users who have been +// authenticated via a SAML authentication response. This operation provides a +// mechanism for tying an enterprise identity store or directory to role-based +// Amazon Web Services access without user-specific credentials or configuration. +// For a comparison of AssumeRoleWithSAML with the other API operations that +// produce temporary credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. The temporary security credentials returned by this +// operation consist of an access key ID, a secret access key, and a security +// token. Applications can use these temporary security credentials to sign calls +// to Amazon Web Services services. Session Duration By default, the temporary +// security credentials created by AssumeRoleWithSAML last for one hour. However, +// you can use the optional DurationSeconds parameter to specify the duration of +// your session. Your role session lasts for the duration that you specify, or +// until the time specified in the SAML authentication response's +// SessionNotOnOrAfter value, whichever is shorter. You can provide a +// DurationSeconds value from 900 seconds (15 minutes) up to the maximum session +// duration setting for the role. This setting can have a value from 1 hour to 12 +// hours. To learn how to view the maximum value for your role, see View the +// Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you use +// the AssumeRole* API operations or the assume-role* CLI commands. However the +// limit does not apply when you use those operations to create a console URL. For +// more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. Role chaining (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-role-chaining) +// limits your CLI or Amazon Web Services API role session to a maximum of one +// hour. When you use the AssumeRole API operation to assume a role, you can +// specify the duration of your role session with the DurationSeconds parameter. +// You can specify a parameter value of up to 43200 seconds (12 hours), depending +// on the maximum session duration setting for your role. However, if you assume a +// role using role chaining and provide a DurationSeconds parameter value greater +// than one hour, the operation fails. Permissions The temporary security +// credentials created by AssumeRoleWithSAML can be used to make API calls to any +// Amazon Web Services service with the following exception: you cannot call the +// STS GetFederationToken or GetSessionToken API operations. (Optional) You can +// pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policy Amazon +// Resource Names (ARNs) to use as managed session policies. The plaintext that you +// use for both inline and managed session policies can't exceed 2,048 characters. +// Passing policies to this operation returns new temporary credentials. The +// resulting session's permissions are the intersection of the role's +// identity-based policy and the session policies. You can use the role's temporary +// credentials in subsequent Amazon Web Services API calls to access resources in +// the account that owns the role. You cannot use session policies to grant more +// permissions than those allowed by the identity-based policy of the role that is +// being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. Calling AssumeRoleWithSAML does not require the use of +// Amazon Web Services security credentials. The identity of the caller is +// validated by using keys in the metadata document that is uploaded for the SAML +// provider entity for your identity provider. Calling AssumeRoleWithSAML can +// result in an entry in your CloudTrail logs. The entry includes the value in the +// NameID element of the SAML assertion. We recommend that you use a NameIDType +// that is not associated with any personally identifiable information (PII). For +// example, you could instead use the persistent identifier ( +// urn:oasis:names:tc:SAML:2.0:nameid-format:persistent ). Tags (Optional) You can +// configure your IdP to pass attributes into your SAML assertion as session tags. +// Each session tag consists of a key name and an associated value. For more +// information about session tags, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. You can pass up to 50 session tags. The plaintext session +// tag keys can’t exceed 128 characters and the values can’t exceed 256 characters. +// For these and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) +// in the IAM User Guide. An Amazon Web Services conversion compresses the passed +// inline session policy, managed policy ARNs, and session tags into a packed +// binary format that has a separate limit. Your request can fail for this limit +// even if your plaintext meets the other requirements. The PackedPolicySize +// response element indicates by percentage how close the policies and tags for +// your request are to the upper size limit. You can pass a session tag with the +// same key as a tag that is attached to the role. When you do, session tags +// override the role's tags with the same key. An administrator must grant you the +// permissions necessary to pass session tags. The administrator can also create +// granular permissions to allow you to pass only specific session tags. For more +// information, see Tutorial: Using Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. You can set the session tags as transitive. Transitive +// tags persist during role chaining. For more information, see Chaining Roles +// with Session Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. SAML Configuration Before your application can call +// AssumeRoleWithSAML , you must configure your SAML identity provider (IdP) to +// issue the claims required by Amazon Web Services. Additionally, you must use +// Identity and Access Management (IAM) to create a SAML provider entity in your +// Amazon Web Services account that represents your identity provider. You must +// also create an IAM role that specifies this SAML provider in its trust policy. +// For more information, see the following resources: +// - About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) +// in the IAM User Guide. +// - Creating SAML Identity Providers (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) +// in the IAM User Guide. +// - Configuring a Relying Party and Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) +// in the IAM User Guide. +// - Creating a Role for SAML 2.0 Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) +// in the IAM User Guide. +func (c *Client) AssumeRoleWithSAML(ctx context.Context, params *AssumeRoleWithSAMLInput, optFns ...func(*Options)) (*AssumeRoleWithSAMLOutput, error) { + if params == nil { + params = &AssumeRoleWithSAMLInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "AssumeRoleWithSAML", params, optFns, c.addOperationAssumeRoleWithSAMLMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*AssumeRoleWithSAMLOutput) + out.ResultMetadata = metadata + return out, nil +} + +type AssumeRoleWithSAMLInput struct { + + // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes the + // IdP. + // + // This member is required. + PrincipalArn *string + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + // + // This member is required. + RoleArn *string + + // The base64 encoded SAML authentication response provided by the IdP. For more + // information, see Configuring a Relying Party and Adding Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) + // in the IAM User Guide. + // + // This member is required. + SAMLAssertion *string + + // The duration, in seconds, of the role session. Your role session lasts for the + // duration that you specify for the DurationSeconds parameter, or until the time + // specified in the SAML authentication response's SessionNotOnOrAfter value, + // whichever is shorter. You can provide a DurationSeconds value from 900 seconds + // (15 minutes) up to the maximum session duration setting for the role. This + // setting can have a value from 1 hour to 12 hours. If you specify a value higher + // than this setting, the operation fails. For example, if you specify a session + // duration of 12 hours, but your administrator set the maximum session duration to + // 6 hours, your operation fails. To learn how to view the maximum value for your + // role, see View the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. By default, the value is set to 3600 seconds. The + // DurationSeconds parameter is separate from the duration of a console session + // that you might request using the returned credentials. The request to the + // federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int32 + + // An IAM policy in JSON format that you want to use as an inline session policy. + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use the + // role's temporary credentials in subsequent Amazon Web Services API calls to + // access resources in the account that owns the role. You cannot use session + // policies to grant more permissions than those allowed by the identity-based + // policy of the role that is being assumed. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. The plaintext that you use for both inline and managed + // session policies can't exceed 2,048 characters. The JSON policy characters can + // be any ASCII character from the space character to the end of the valid + // character list (\u0020 through \u00FF). It can also include the tab (\u0009), + // linefeed (\u000A), and carriage return (\u000D) characters. An Amazon Web + // Services conversion compresses the passed inline session policy, managed policy + // ARNs, and session tags into a packed binary format that has a separate limit. + // Your request can fail for this limit even if your plaintext meets the other + // requirements. The PackedPolicySize response element indicates by percentage how + // close the policies and tags for your request are to the upper size limit. + Policy *string + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to + // use as managed session policies. The policies must exist in the same account as + // the role. This parameter is optional. You can provide up to 10 managed policy + // ARNs. However, the plaintext that you use for both inline and managed session + // policies can't exceed 2,048 characters. For more information about ARNs, see + // Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. An Amazon Web Services conversion + // compresses the passed inline session policy, managed policy ARNs, and session + // tags into a packed binary format that has a separate limit. Your request can + // fail for this limit even if your plaintext meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. Passing policies to this + // operation returns new temporary credentials. The resulting session's permissions + // are the intersection of the role's identity-based policy and the session + // policies. You can use the role's temporary credentials in subsequent Amazon Web + // Services API calls to access resources in the account that owns the role. You + // cannot use session policies to grant more permissions than those allowed by the + // identity-based policy of the role that is being assumed. For more information, + // see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []types.PolicyDescriptorType + + noSmithyDocumentSerde +} + +// Contains the response to a successful AssumeRoleWithSAML request, including +// temporary Amazon Web Services credentials that can be used to make Amazon Web +// Services requests. +type AssumeRoleWithSAMLOutput struct { + + // The identifiers for the temporary security credentials that the operation + // returns. + AssumedRoleUser *types.AssumedRoleUser + + // The value of the Recipient attribute of the SubjectConfirmationData element of + // the SAML assertion. + Audience *string + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. The size of the security token + // that STS API operations return is not fixed. We strongly recommend that you make + // no assumptions about the maximum size. + Credentials *types.Credentials + + // The value of the Issuer element of the SAML assertion. + Issuer *string + + // A hash value based on the concatenation of the following: + // - The Issuer response value. + // - The Amazon Web Services account ID. + // - The friendly name (the last part of the ARN) of the SAML provider in IAM. + // The combination of NameQualifier and Subject can be used to uniquely identify a + // user. The following pseudocode shows how the hash value is calculated: BASE64 ( + // SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" ) ) + NameQualifier *string + + // A percentage value that indicates the packed size of the session policies and + // session tags combined passed in the request. The request fails if the packed + // size is greater than 100 percent, which means the policies and tags exceeded the + // allowed space. + PackedPolicySize *int32 + + // The value in the SourceIdentity attribute in the SAML assertion. You can + // require users to set a source identity value when they assume a role. You do + // this by using the sts:SourceIdentity condition key in a role trust policy. That + // way, actions that are taken with the role are associated with that user. After + // the source identity is set, the value cannot be changed. It is present in the + // request for all actions that are taken by the role and persists across chained + // role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining) + // sessions. You can configure your SAML identity provider to use an attribute + // associated with your users, like user name or email, as the source identity when + // calling AssumeRoleWithSAML . You do this by adding an attribute to the SAML + // assertion. For more information about using source identity, see Monitor and + // control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. The regex used to validate this parameter is a string of + // characters consisting of upper- and lower-case alphanumeric characters with no + // spaces. You can also include underscores or any of the following characters: + // =,.@- + SourceIdentity *string + + // The value of the NameID element in the Subject element of the SAML assertion. + Subject *string + + // The format of the name ID, as defined by the Format attribute in the NameID + // element of the SAML assertion. Typical examples of the format are transient or + // persistent . If the format includes the prefix + // urn:oasis:names:tc:SAML:2.0:nameid-format , that prefix is removed. For example, + // urn:oasis:names:tc:SAML:2.0:nameid-format:transient is returned as transient . + // If the format includes any other prefix, the format is returned with no + // modifications. + SubjectType *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationAssumeRoleWithSAMLMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsquery_serializeOpAssumeRoleWithSAML{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpAssumeRoleWithSAML{}, middleware.After) + if err != nil { + return err + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addAssumeRoleWithSAMLResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addOpAssumeRoleWithSAMLValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRoleWithSAML(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opAssumeRoleWithSAML(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "sts", + OperationName: "AssumeRoleWithSAML", + } +} + +type opAssumeRoleWithSAMLResolveEndpointMiddleware struct { + EndpointResolver EndpointResolverV2 + BuiltInResolver builtInParameterResolver +} + +func (*opAssumeRoleWithSAMLResolveEndpointMiddleware) ID() string { + return "ResolveEndpointV2" +} + +func (m *opAssumeRoleWithSAMLResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.EndpointResolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + params := EndpointParameters{} + + m.BuiltInResolver.ResolveBuiltIns(¶ms) + + var resolvedEndpoint smithyendpoints.Endpoint + resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL = &resolvedEndpoint.URI + + for k := range resolvedEndpoint.Headers { + req.Header.Set( + k, + resolvedEndpoint.Headers.Get(k), + ) + } + + authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) + if err != nil { + var nfe *internalauth.NoAuthenticationSchemesFoundError + if errors.As(err, &nfe) { + // if no auth scheme is found, default to sigv4 + signingName := "sts" + signingRegion := m.BuiltInResolver.(*builtInResolver).Region + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + + } + var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError + if errors.As(err, &ue) { + return out, metadata, fmt.Errorf( + "This operation requests signer version(s) %v but the client only supports %v", + ue.UnsupportedSchemes, + internalauth.SupportedSchemes, + ) + } + } + + for _, authScheme := range authSchemes { + switch authScheme.(type) { + case *internalauth.AuthenticationSchemeV4: + v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) + var signingName, signingRegion string + if v4Scheme.SigningName == nil { + signingName = "sts" + } else { + signingName = *v4Scheme.SigningName + } + if v4Scheme.SigningRegion == nil { + signingRegion = m.BuiltInResolver.(*builtInResolver).Region + } else { + signingRegion = *v4Scheme.SigningRegion + } + if v4Scheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + break + case *internalauth.AuthenticationSchemeV4A: + v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) + if v4aScheme.SigningName == nil { + v4aScheme.SigningName = aws.String("sts") + } + if v4aScheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) + ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) + break + case *internalauth.AuthenticationSchemeNone: + break + } + } + + return next.HandleSerialize(ctx, in) +} + +func addAssumeRoleWithSAMLResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { + return stack.Serialize.Insert(&opAssumeRoleWithSAMLResolveEndpointMiddleware{ + EndpointResolver: options.EndpointResolverV2, + BuiltInResolver: &builtInResolver{ + Region: options.Region, + UseDualStack: options.EndpointOptions.UseDualStackEndpoint, + UseFIPS: options.EndpointOptions.UseFIPSEndpoint, + Endpoint: options.BaseEndpoint, + }, + }, "ResolveEndpoint", middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go new file mode 100644 index 00000000000..fa4a6084591 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go @@ -0,0 +1,501 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a set of temporary security credentials for users who have been +// authenticated in a mobile or web application with a web identity provider. +// Example providers include the OAuth 2.0 providers Login with Amazon and +// Facebook, or any OpenID Connect-compatible identity provider such as Google or +// Amazon Cognito federated identities (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html) +// . For mobile applications, we recommend that you use Amazon Cognito. You can use +// Amazon Cognito with the Amazon Web Services SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) +// and the Amazon Web Services SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/) +// to uniquely identify a user. You can also supply the user with a consistent +// identity throughout the lifetime of an application. To learn more about Amazon +// Cognito, see Amazon Cognito identity pools (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html) +// in Amazon Cognito Developer Guide. Calling AssumeRoleWithWebIdentity does not +// require the use of Amazon Web Services security credentials. Therefore, you can +// distribute an application (for example, on mobile devices) that requests +// temporary security credentials without including long-term Amazon Web Services +// credentials in the application. You also don't need to deploy server-based proxy +// services that use long-term Amazon Web Services credentials. Instead, the +// identity of the caller is validated by using a token from the web identity +// provider. For a comparison of AssumeRoleWithWebIdentity with the other API +// operations that produce temporary credentials, see Requesting Temporary +// Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. The temporary security credentials returned by this API +// consist of an access key ID, a secret access key, and a security token. +// Applications can use these temporary security credentials to sign calls to +// Amazon Web Services service API operations. Session Duration By default, the +// temporary security credentials created by AssumeRoleWithWebIdentity last for +// one hour. However, you can use the optional DurationSeconds parameter to +// specify the duration of your session. You can provide a value from 900 seconds +// (15 minutes) up to the maximum session duration setting for the role. This +// setting can have a value from 1 hour to 12 hours. To learn how to view the +// maximum value for your role, see View the Maximum Session Duration Setting for +// a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you use +// the AssumeRole* API operations or the assume-role* CLI commands. However the +// limit does not apply when you use those operations to create a console URL. For +// more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. Permissions The temporary security credentials created by +// AssumeRoleWithWebIdentity can be used to make API calls to any Amazon Web +// Services service with the following exception: you cannot call the STS +// GetFederationToken or GetSessionToken API operations. (Optional) You can pass +// inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policy Amazon +// Resource Names (ARNs) to use as managed session policies. The plaintext that you +// use for both inline and managed session policies can't exceed 2,048 characters. +// Passing policies to this operation returns new temporary credentials. The +// resulting session's permissions are the intersection of the role's +// identity-based policy and the session policies. You can use the role's temporary +// credentials in subsequent Amazon Web Services API calls to access resources in +// the account that owns the role. You cannot use session policies to grant more +// permissions than those allowed by the identity-based policy of the role that is +// being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. Tags (Optional) You can configure your IdP to pass +// attributes into your web identity token as session tags. Each session tag +// consists of a key name and an associated value. For more information about +// session tags, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. You can pass up to 50 session tags. The plaintext session +// tag keys can’t exceed 128 characters and the values can’t exceed 256 characters. +// For these and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) +// in the IAM User Guide. An Amazon Web Services conversion compresses the passed +// inline session policy, managed policy ARNs, and session tags into a packed +// binary format that has a separate limit. Your request can fail for this limit +// even if your plaintext meets the other requirements. The PackedPolicySize +// response element indicates by percentage how close the policies and tags for +// your request are to the upper size limit. You can pass a session tag with the +// same key as a tag that is attached to the role. When you do, the session tag +// overrides the role tag with the same key. An administrator must grant you the +// permissions necessary to pass session tags. The administrator can also create +// granular permissions to allow you to pass only specific session tags. For more +// information, see Tutorial: Using Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. You can set the session tags as transitive. Transitive +// tags persist during role chaining. For more information, see Chaining Roles +// with Session Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. Identities Before your application can call +// AssumeRoleWithWebIdentity , you must have an identity token from a supported +// identity provider and create a role that the application can assume. The role +// that your application assumes must trust the identity provider that is +// associated with the identity token. In other words, the identity provider must +// be specified in the role's trust policy. Calling AssumeRoleWithWebIdentity can +// result in an entry in your CloudTrail logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims) +// of the provided web identity token. We recommend that you avoid using any +// personally identifiable information (PII) in this field. For example, you could +// instead use a GUID or a pairwise identifier, as suggested in the OIDC +// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes) +// . For more information about how to use web identity federation and the +// AssumeRoleWithWebIdentity API, see the following resources: +// - Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) +// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) +// . +// - Web Identity Federation Playground (https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/) +// . Walk through the process of authenticating through Login with Amazon, +// Facebook, or Google, getting temporary security credentials, and then using +// those credentials to make a request to Amazon Web Services. +// - Amazon Web Services SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) +// and Amazon Web Services SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/) +// . These toolkits contain sample apps that show how to invoke the identity +// providers. The toolkits then show how to use the information from these +// providers to get and use temporary security credentials. +// - Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications) +// . This article discusses web identity federation and shows an example of how to +// use web identity federation to get access to content in Amazon S3. +func (c *Client) AssumeRoleWithWebIdentity(ctx context.Context, params *AssumeRoleWithWebIdentityInput, optFns ...func(*Options)) (*AssumeRoleWithWebIdentityOutput, error) { + if params == nil { + params = &AssumeRoleWithWebIdentityInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "AssumeRoleWithWebIdentity", params, optFns, c.addOperationAssumeRoleWithWebIdentityMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*AssumeRoleWithWebIdentityOutput) + out.ResultMetadata = metadata + return out, nil +} + +type AssumeRoleWithWebIdentityInput struct { + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + // + // This member is required. + RoleArn *string + + // An identifier for the assumed role session. Typically, you pass the name or + // identifier that is associated with the user who is using your application. That + // way, the temporary security credentials that your application will use are + // associated with that user. This session name is included as part of the ARN and + // assumed role ID in the AssumedRoleUser response element. The regex used to + // validate this parameter is a string of characters consisting of upper- and + // lower-case alphanumeric characters with no spaces. You can also include + // underscores or any of the following characters: =,.@- + // + // This member is required. + RoleSessionName *string + + // The OAuth 2.0 access token or OpenID Connect ID token that is provided by the + // identity provider. Your application must get this token by authenticating the + // user who is using your application with a web identity provider before the + // application makes an AssumeRoleWithWebIdentity call. Only tokens with RSA + // algorithms (RS256) are supported. + // + // This member is required. + WebIdentityToken *string + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) up to the maximum session duration setting for the role. + // This setting can have a value from 1 hour to 12 hours. If you specify a value + // higher than this setting, the operation fails. For example, if you specify a + // session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. To learn how to view the maximum + // value for your role, see View the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. By default, the value is set to 3600 seconds. The + // DurationSeconds parameter is separate from the duration of a console session + // that you might request using the returned credentials. The request to the + // federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int32 + + // An IAM policy in JSON format that you want to use as an inline session policy. + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use the + // role's temporary credentials in subsequent Amazon Web Services API calls to + // access resources in the account that owns the role. You cannot use session + // policies to grant more permissions than those allowed by the identity-based + // policy of the role that is being assumed. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. The plaintext that you use for both inline and managed + // session policies can't exceed 2,048 characters. The JSON policy characters can + // be any ASCII character from the space character to the end of the valid + // character list (\u0020 through \u00FF). It can also include the tab (\u0009), + // linefeed (\u000A), and carriage return (\u000D) characters. An Amazon Web + // Services conversion compresses the passed inline session policy, managed policy + // ARNs, and session tags into a packed binary format that has a separate limit. + // Your request can fail for this limit even if your plaintext meets the other + // requirements. The PackedPolicySize response element indicates by percentage how + // close the policies and tags for your request are to the upper size limit. + Policy *string + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to + // use as managed session policies. The policies must exist in the same account as + // the role. This parameter is optional. You can provide up to 10 managed policy + // ARNs. However, the plaintext that you use for both inline and managed session + // policies can't exceed 2,048 characters. For more information about ARNs, see + // Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. An Amazon Web Services conversion + // compresses the passed inline session policy, managed policy ARNs, and session + // tags into a packed binary format that has a separate limit. Your request can + // fail for this limit even if your plaintext meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. Passing policies to this + // operation returns new temporary credentials. The resulting session's permissions + // are the intersection of the role's identity-based policy and the session + // policies. You can use the role's temporary credentials in subsequent Amazon Web + // Services API calls to access resources in the account that owns the role. You + // cannot use session policies to grant more permissions than those allowed by the + // identity-based policy of the role that is being assumed. For more information, + // see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []types.PolicyDescriptorType + + // The fully qualified host component of the domain name of the OAuth 2.0 identity + // provider. Do not specify this value for an OpenID Connect identity provider. + // Currently www.amazon.com and graph.facebook.com are the only supported identity + // providers for OAuth 2.0 access tokens. Do not include URL schemes and port + // numbers. Do not specify this value for OpenID Connect ID tokens. + ProviderId *string + + noSmithyDocumentSerde +} + +// Contains the response to a successful AssumeRoleWithWebIdentity request, +// including temporary Amazon Web Services credentials that can be used to make +// Amazon Web Services requests. +type AssumeRoleWithWebIdentityOutput struct { + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. For + // example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the + // RoleSessionName that you specified when you called AssumeRole . + AssumedRoleUser *types.AssumedRoleUser + + // The intended audience (also known as client ID) of the web identity token. This + // is traditionally the client identifier issued to the application that requested + // the web identity token. + Audience *string + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security token. The size of the security token that STS API + // operations return is not fixed. We strongly recommend that you make no + // assumptions about the maximum size. + Credentials *types.Credentials + + // A percentage value that indicates the packed size of the session policies and + // session tags combined passed in the request. The request fails if the packed + // size is greater than 100 percent, which means the policies and tags exceeded the + // allowed space. + PackedPolicySize *int32 + + // The issuing authority of the web identity token presented. For OpenID Connect + // ID tokens, this contains the value of the iss field. For OAuth 2.0 access + // tokens, this contains the value of the ProviderId parameter that was passed in + // the AssumeRoleWithWebIdentity request. + Provider *string + + // The value of the source identity that is returned in the JSON web token (JWT) + // from the identity provider. You can require users to set a source identity value + // when they assume a role. You do this by using the sts:SourceIdentity condition + // key in a role trust policy. That way, actions that are taken with the role are + // associated with that user. After the source identity is set, the value cannot be + // changed. It is present in the request for all actions that are taken by the role + // and persists across chained role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining) + // sessions. You can configure your identity provider to use an attribute + // associated with your users, like user name or email, as the source identity when + // calling AssumeRoleWithWebIdentity . You do this by adding a claim to the JSON + // web token. To learn more about OIDC tokens and claims, see Using Tokens with + // User Pools (https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html) + // in the Amazon Cognito Developer Guide. For more information about using source + // identity, see Monitor and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. The regex used to validate this parameter is a string of + // characters consisting of upper- and lower-case alphanumeric characters with no + // spaces. You can also include underscores or any of the following characters: + // =,.@- + SourceIdentity *string + + // The unique user identifier that is returned by the identity provider. This + // identifier is associated with the WebIdentityToken that was submitted with the + // AssumeRoleWithWebIdentity call. The identifier is typically unique to the user + // and the application that acquired the WebIdentityToken (pairwise identifier). + // For OpenID Connect ID tokens, this field contains the value returned by the + // identity provider as the token's sub (Subject) claim. + SubjectFromWebIdentityToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationAssumeRoleWithWebIdentityMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsquery_serializeOpAssumeRoleWithWebIdentity{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpAssumeRoleWithWebIdentity{}, middleware.After) + if err != nil { + return err + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addAssumeRoleWithWebIdentityResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addOpAssumeRoleWithWebIdentityValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRoleWithWebIdentity(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opAssumeRoleWithWebIdentity(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "sts", + OperationName: "AssumeRoleWithWebIdentity", + } +} + +type opAssumeRoleWithWebIdentityResolveEndpointMiddleware struct { + EndpointResolver EndpointResolverV2 + BuiltInResolver builtInParameterResolver +} + +func (*opAssumeRoleWithWebIdentityResolveEndpointMiddleware) ID() string { + return "ResolveEndpointV2" +} + +func (m *opAssumeRoleWithWebIdentityResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.EndpointResolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + params := EndpointParameters{} + + m.BuiltInResolver.ResolveBuiltIns(¶ms) + + var resolvedEndpoint smithyendpoints.Endpoint + resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL = &resolvedEndpoint.URI + + for k := range resolvedEndpoint.Headers { + req.Header.Set( + k, + resolvedEndpoint.Headers.Get(k), + ) + } + + authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) + if err != nil { + var nfe *internalauth.NoAuthenticationSchemesFoundError + if errors.As(err, &nfe) { + // if no auth scheme is found, default to sigv4 + signingName := "sts" + signingRegion := m.BuiltInResolver.(*builtInResolver).Region + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + + } + var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError + if errors.As(err, &ue) { + return out, metadata, fmt.Errorf( + "This operation requests signer version(s) %v but the client only supports %v", + ue.UnsupportedSchemes, + internalauth.SupportedSchemes, + ) + } + } + + for _, authScheme := range authSchemes { + switch authScheme.(type) { + case *internalauth.AuthenticationSchemeV4: + v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) + var signingName, signingRegion string + if v4Scheme.SigningName == nil { + signingName = "sts" + } else { + signingName = *v4Scheme.SigningName + } + if v4Scheme.SigningRegion == nil { + signingRegion = m.BuiltInResolver.(*builtInResolver).Region + } else { + signingRegion = *v4Scheme.SigningRegion + } + if v4Scheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + break + case *internalauth.AuthenticationSchemeV4A: + v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) + if v4aScheme.SigningName == nil { + v4aScheme.SigningName = aws.String("sts") + } + if v4aScheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) + ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) + break + case *internalauth.AuthenticationSchemeNone: + break + } + } + + return next.HandleSerialize(ctx, in) +} + +func addAssumeRoleWithWebIdentityResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { + return stack.Serialize.Insert(&opAssumeRoleWithWebIdentityResolveEndpointMiddleware{ + EndpointResolver: options.EndpointResolverV2, + BuiltInResolver: &builtInResolver{ + Region: options.Region, + UseDualStack: options.EndpointOptions.UseDualStackEndpoint, + UseFIPS: options.EndpointOptions.UseFIPSEndpoint, + Endpoint: options.BaseEndpoint, + }, + }, "ResolveEndpoint", middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go new file mode 100644 index 00000000000..baf2f96866b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go @@ -0,0 +1,285 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Decodes additional information about the authorization status of a request from +// an encoded message returned in response to an Amazon Web Services request. For +// example, if a user is not authorized to perform an operation that he or she has +// requested, the request returns a Client.UnauthorizedOperation response (an HTTP +// 403 response). Some Amazon Web Services operations additionally return an +// encoded message that can provide details about this authorization failure. Only +// certain Amazon Web Services operations return an encoded authorization message. +// The documentation for an individual operation indicates whether that operation +// returns an encoded message in addition to returning an HTTP code. The message is +// encoded because the details of the authorization status can contain privileged +// information that the user who requested the operation should not see. To decode +// an authorization status message, a user must be granted permissions through an +// IAM policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) +// to request the DecodeAuthorizationMessage ( sts:DecodeAuthorizationMessage ) +// action. The decoded message includes the following type of information: +// - Whether the request was denied due to an explicit deny or due to the +// absence of an explicit allow. For more information, see Determining Whether a +// Request is Allowed or Denied (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) +// in the IAM User Guide. +// - The principal who made the request. +// - The requested action. +// - The requested resource. +// - The values of condition keys in the context of the user's request. +func (c *Client) DecodeAuthorizationMessage(ctx context.Context, params *DecodeAuthorizationMessageInput, optFns ...func(*Options)) (*DecodeAuthorizationMessageOutput, error) { + if params == nil { + params = &DecodeAuthorizationMessageInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DecodeAuthorizationMessage", params, optFns, c.addOperationDecodeAuthorizationMessageMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DecodeAuthorizationMessageOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DecodeAuthorizationMessageInput struct { + + // The encoded message that was returned with the response. + // + // This member is required. + EncodedMessage *string + + noSmithyDocumentSerde +} + +// A document that contains additional information about the authorization status +// of a request from an encoded message that is returned in response to an Amazon +// Web Services request. +type DecodeAuthorizationMessageOutput struct { + + // The API returns a response with the decoded message. + DecodedMessage *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDecodeAuthorizationMessageMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsquery_serializeOpDecodeAuthorizationMessage{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpDecodeAuthorizationMessage{}, middleware.After) + if err != nil { + return err + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addDecodeAuthorizationMessageResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addOpDecodeAuthorizationMessageValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDecodeAuthorizationMessage(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDecodeAuthorizationMessage(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "sts", + OperationName: "DecodeAuthorizationMessage", + } +} + +type opDecodeAuthorizationMessageResolveEndpointMiddleware struct { + EndpointResolver EndpointResolverV2 + BuiltInResolver builtInParameterResolver +} + +func (*opDecodeAuthorizationMessageResolveEndpointMiddleware) ID() string { + return "ResolveEndpointV2" +} + +func (m *opDecodeAuthorizationMessageResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.EndpointResolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + params := EndpointParameters{} + + m.BuiltInResolver.ResolveBuiltIns(¶ms) + + var resolvedEndpoint smithyendpoints.Endpoint + resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL = &resolvedEndpoint.URI + + for k := range resolvedEndpoint.Headers { + req.Header.Set( + k, + resolvedEndpoint.Headers.Get(k), + ) + } + + authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) + if err != nil { + var nfe *internalauth.NoAuthenticationSchemesFoundError + if errors.As(err, &nfe) { + // if no auth scheme is found, default to sigv4 + signingName := "sts" + signingRegion := m.BuiltInResolver.(*builtInResolver).Region + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + + } + var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError + if errors.As(err, &ue) { + return out, metadata, fmt.Errorf( + "This operation requests signer version(s) %v but the client only supports %v", + ue.UnsupportedSchemes, + internalauth.SupportedSchemes, + ) + } + } + + for _, authScheme := range authSchemes { + switch authScheme.(type) { + case *internalauth.AuthenticationSchemeV4: + v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) + var signingName, signingRegion string + if v4Scheme.SigningName == nil { + signingName = "sts" + } else { + signingName = *v4Scheme.SigningName + } + if v4Scheme.SigningRegion == nil { + signingRegion = m.BuiltInResolver.(*builtInResolver).Region + } else { + signingRegion = *v4Scheme.SigningRegion + } + if v4Scheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + break + case *internalauth.AuthenticationSchemeV4A: + v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) + if v4aScheme.SigningName == nil { + v4aScheme.SigningName = aws.String("sts") + } + if v4aScheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) + ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) + break + case *internalauth.AuthenticationSchemeNone: + break + } + } + + return next.HandleSerialize(ctx, in) +} + +func addDecodeAuthorizationMessageResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { + return stack.Serialize.Insert(&opDecodeAuthorizationMessageResolveEndpointMiddleware{ + EndpointResolver: options.EndpointResolverV2, + BuiltInResolver: &builtInResolver{ + Region: options.Region, + UseDualStack: options.EndpointOptions.UseDualStackEndpoint, + UseFIPS: options.EndpointOptions.UseFIPSEndpoint, + Endpoint: options.BaseEndpoint, + }, + }, "ResolveEndpoint", middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go new file mode 100644 index 00000000000..f1dd167da93 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go @@ -0,0 +1,278 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the account identifier for the specified access key ID. Access keys +// consist of two parts: an access key ID (for example, AKIAIOSFODNN7EXAMPLE ) and +// a secret access key (for example, wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY ). +// For more information about access keys, see Managing Access Keys for IAM Users (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) +// in the IAM User Guide. When you pass an access key ID to this operation, it +// returns the ID of the Amazon Web Services account to which the keys belong. +// Access key IDs beginning with AKIA are long-term credentials for an IAM user or +// the Amazon Web Services account root user. Access key IDs beginning with ASIA +// are temporary credentials that are created using STS operations. If the account +// in the response belongs to you, you can sign in as the root user and review your +// root user access keys. Then, you can pull a credentials report (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html) +// to learn which IAM user owns the keys. To learn who requested the temporary +// credentials for an ASIA access key, view the STS events in your CloudTrail logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html) +// in the IAM User Guide. This operation does not indicate the state of the access +// key. The key might be active, inactive, or deleted. Active keys might not have +// permissions to perform an operation. Providing a deleted access key might return +// an error that the key doesn't exist. +func (c *Client) GetAccessKeyInfo(ctx context.Context, params *GetAccessKeyInfoInput, optFns ...func(*Options)) (*GetAccessKeyInfoOutput, error) { + if params == nil { + params = &GetAccessKeyInfoInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetAccessKeyInfo", params, optFns, c.addOperationGetAccessKeyInfoMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetAccessKeyInfoOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetAccessKeyInfoInput struct { + + // The identifier of an access key. This parameter allows (through its regex + // pattern) a string of characters that can consist of any upper- or lowercase + // letter or digit. + // + // This member is required. + AccessKeyId *string + + noSmithyDocumentSerde +} + +type GetAccessKeyInfoOutput struct { + + // The number used to identify the Amazon Web Services account. + Account *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetAccessKeyInfoMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsquery_serializeOpGetAccessKeyInfo{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpGetAccessKeyInfo{}, middleware.After) + if err != nil { + return err + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addGetAccessKeyInfoResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetAccessKeyInfoValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetAccessKeyInfo(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetAccessKeyInfo(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "sts", + OperationName: "GetAccessKeyInfo", + } +} + +type opGetAccessKeyInfoResolveEndpointMiddleware struct { + EndpointResolver EndpointResolverV2 + BuiltInResolver builtInParameterResolver +} + +func (*opGetAccessKeyInfoResolveEndpointMiddleware) ID() string { + return "ResolveEndpointV2" +} + +func (m *opGetAccessKeyInfoResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.EndpointResolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + params := EndpointParameters{} + + m.BuiltInResolver.ResolveBuiltIns(¶ms) + + var resolvedEndpoint smithyendpoints.Endpoint + resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL = &resolvedEndpoint.URI + + for k := range resolvedEndpoint.Headers { + req.Header.Set( + k, + resolvedEndpoint.Headers.Get(k), + ) + } + + authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) + if err != nil { + var nfe *internalauth.NoAuthenticationSchemesFoundError + if errors.As(err, &nfe) { + // if no auth scheme is found, default to sigv4 + signingName := "sts" + signingRegion := m.BuiltInResolver.(*builtInResolver).Region + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + + } + var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError + if errors.As(err, &ue) { + return out, metadata, fmt.Errorf( + "This operation requests signer version(s) %v but the client only supports %v", + ue.UnsupportedSchemes, + internalauth.SupportedSchemes, + ) + } + } + + for _, authScheme := range authSchemes { + switch authScheme.(type) { + case *internalauth.AuthenticationSchemeV4: + v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) + var signingName, signingRegion string + if v4Scheme.SigningName == nil { + signingName = "sts" + } else { + signingName = *v4Scheme.SigningName + } + if v4Scheme.SigningRegion == nil { + signingRegion = m.BuiltInResolver.(*builtInResolver).Region + } else { + signingRegion = *v4Scheme.SigningRegion + } + if v4Scheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + break + case *internalauth.AuthenticationSchemeV4A: + v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) + if v4aScheme.SigningName == nil { + v4aScheme.SigningName = aws.String("sts") + } + if v4aScheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) + ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) + break + case *internalauth.AuthenticationSchemeNone: + break + } + } + + return next.HandleSerialize(ctx, in) +} + +func addGetAccessKeyInfoResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { + return stack.Serialize.Insert(&opGetAccessKeyInfoResolveEndpointMiddleware{ + EndpointResolver: options.EndpointResolverV2, + BuiltInResolver: &builtInResolver{ + Region: options.Region, + UseDualStack: options.EndpointOptions.UseDualStackEndpoint, + UseFIPS: options.EndpointOptions.UseFIPSEndpoint, + Endpoint: options.BaseEndpoint, + }, + }, "ResolveEndpoint", middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go new file mode 100644 index 00000000000..66e5d99d491 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go @@ -0,0 +1,294 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns details about the IAM user or role whose credentials are used to call +// the operation. No permissions are required to perform this operation. If an +// administrator attaches a policy to your identity that explicitly denies access +// to the sts:GetCallerIdentity action, you can still perform this operation. +// Permissions are not required because the same information is returned when +// access is denied. To view an example response, see I Am Not Authorized to +// Perform: iam:DeleteVirtualMFADevice (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa) +// in the IAM User Guide. +func (c *Client) GetCallerIdentity(ctx context.Context, params *GetCallerIdentityInput, optFns ...func(*Options)) (*GetCallerIdentityOutput, error) { + if params == nil { + params = &GetCallerIdentityInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetCallerIdentity", params, optFns, c.addOperationGetCallerIdentityMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetCallerIdentityOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetCallerIdentityInput struct { + noSmithyDocumentSerde +} + +// Contains the response to a successful GetCallerIdentity request, including +// information about the entity making the request. +type GetCallerIdentityOutput struct { + + // The Amazon Web Services account ID number of the account that owns or contains + // the calling entity. + Account *string + + // The Amazon Web Services ARN associated with the calling entity. + Arn *string + + // The unique identifier of the calling entity. The exact value depends on the + // type of entity that is making the call. The values returned are those listed in + // the aws:userid column in the Principal table (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) + // found on the Policy Variables reference page in the IAM User Guide. + UserId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetCallerIdentityMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsquery_serializeOpGetCallerIdentity{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpGetCallerIdentity{}, middleware.After) + if err != nil { + return err + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addGetCallerIdentityResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetCallerIdentity(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetCallerIdentity(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "sts", + OperationName: "GetCallerIdentity", + } +} + +// PresignGetCallerIdentity is used to generate a presigned HTTP Request which +// contains presigned URL, signed headers and HTTP method used. +func (c *PresignClient) PresignGetCallerIdentity(ctx context.Context, params *GetCallerIdentityInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) { + if params == nil { + params = &GetCallerIdentityInput{} + } + options := c.options.copy() + for _, fn := range optFns { + fn(&options) + } + clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) + + result, _, err := c.client.invokeOperation(ctx, "GetCallerIdentity", params, clientOptFns, + c.client.addOperationGetCallerIdentityMiddlewares, + presignConverter(options).convertToPresignMiddleware, + ) + if err != nil { + return nil, err + } + + out := result.(*v4.PresignedHTTPRequest) + return out, nil +} + +type opGetCallerIdentityResolveEndpointMiddleware struct { + EndpointResolver EndpointResolverV2 + BuiltInResolver builtInParameterResolver +} + +func (*opGetCallerIdentityResolveEndpointMiddleware) ID() string { + return "ResolveEndpointV2" +} + +func (m *opGetCallerIdentityResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.EndpointResolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + params := EndpointParameters{} + + m.BuiltInResolver.ResolveBuiltIns(¶ms) + + var resolvedEndpoint smithyendpoints.Endpoint + resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL = &resolvedEndpoint.URI + + for k := range resolvedEndpoint.Headers { + req.Header.Set( + k, + resolvedEndpoint.Headers.Get(k), + ) + } + + authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) + if err != nil { + var nfe *internalauth.NoAuthenticationSchemesFoundError + if errors.As(err, &nfe) { + // if no auth scheme is found, default to sigv4 + signingName := "sts" + signingRegion := m.BuiltInResolver.(*builtInResolver).Region + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + + } + var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError + if errors.As(err, &ue) { + return out, metadata, fmt.Errorf( + "This operation requests signer version(s) %v but the client only supports %v", + ue.UnsupportedSchemes, + internalauth.SupportedSchemes, + ) + } + } + + for _, authScheme := range authSchemes { + switch authScheme.(type) { + case *internalauth.AuthenticationSchemeV4: + v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) + var signingName, signingRegion string + if v4Scheme.SigningName == nil { + signingName = "sts" + } else { + signingName = *v4Scheme.SigningName + } + if v4Scheme.SigningRegion == nil { + signingRegion = m.BuiltInResolver.(*builtInResolver).Region + } else { + signingRegion = *v4Scheme.SigningRegion + } + if v4Scheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + break + case *internalauth.AuthenticationSchemeV4A: + v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) + if v4aScheme.SigningName == nil { + v4aScheme.SigningName = aws.String("sts") + } + if v4aScheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) + ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) + break + case *internalauth.AuthenticationSchemeNone: + break + } + } + + return next.HandleSerialize(ctx, in) +} + +func addGetCallerIdentityResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { + return stack.Serialize.Insert(&opGetCallerIdentityResolveEndpointMiddleware{ + EndpointResolver: options.EndpointResolverV2, + BuiltInResolver: &builtInResolver{ + Region: options.Region, + UseDualStack: options.EndpointOptions.UseDualStackEndpoint, + UseFIPS: options.EndpointOptions.UseFIPSEndpoint, + Endpoint: options.BaseEndpoint, + }, + }, "ResolveEndpoint", middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go new file mode 100644 index 00000000000..d577ef686e9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go @@ -0,0 +1,445 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a set of temporary security credentials (consisting of an access key +// ID, a secret access key, and a security token) for a user. A typical use is in a +// proxy application that gets temporary security credentials on behalf of +// distributed applications inside a corporate network. You must call the +// GetFederationToken operation using the long-term security credentials of an IAM +// user. As a result, this call is appropriate in contexts where those credentials +// can be safeguarded, usually in a server-based application. For a comparison of +// GetFederationToken with the other API operations that produce temporary +// credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. Although it is possible to call GetFederationToken using +// the security credentials of an Amazon Web Services account root user rather than +// an IAM user that you create for the purpose of a proxy application, we do not +// recommend it. For more information, see Safeguard your root user credentials +// and don't use them for everyday tasks (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials) +// in the IAM User Guide. You can create a mobile-based or browser-based app that +// can authenticate users using a web identity provider like Login with Amazon, +// Facebook, Google, or an OpenID Connect-compatible identity provider. In this +// case, we recommend that you use Amazon Cognito (http://aws.amazon.com/cognito/) +// or AssumeRoleWithWebIdentity . For more information, see Federation Through a +// Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) +// in the IAM User Guide. Session duration The temporary credentials are valid for +// the specified duration, from 900 seconds (15 minutes) up to a maximum of 129,600 +// seconds (36 hours). The default session duration is 43,200 seconds (12 hours). +// Temporary credentials obtained by using the root user credentials have a maximum +// duration of 3,600 seconds (1 hour). Permissions You can use the temporary +// credentials created by GetFederationToken in any Amazon Web Services service +// with the following exceptions: +// - You cannot call any IAM operations using the CLI or the Amazon Web Services +// API. This limitation does not apply to console sessions. +// - You cannot call any STS operations except GetCallerIdentity . +// +// You can use temporary credentials for single sign-on (SSO) to the console. You +// must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policy Amazon +// Resource Names (ARNs) to use as managed session policies. The plaintext that you +// use for both inline and managed session policies can't exceed 2,048 characters. +// Though the session policy parameters are optional, if you do not pass a policy, +// then the resulting federated user session has no permissions. When you pass +// session policies, the session permissions are the intersection of the IAM user +// policies and the session policies that you pass. This gives you a way to further +// restrict the permissions for a federated user. You cannot use session policies +// to grant more permissions than those that are defined in the permissions policy +// of the IAM user. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. For information about using GetFederationToken to create +// temporary security credentials, see GetFederationToken—Federation Through a +// Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken) +// . You can use the credentials to access a resource that has a resource-based +// policy. If that policy specifically references the federated user session in the +// Principal element of the policy, the session has the permissions allowed by the +// policy. These permissions are granted in addition to the permissions granted by +// the session policies. Tags (Optional) You can pass tag key-value pairs to your +// session. These are called session tags. For more information about session tags, +// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. You can create a mobile-based or browser-based app that +// can authenticate users using a web identity provider like Login with Amazon, +// Facebook, Google, or an OpenID Connect-compatible identity provider. In this +// case, we recommend that you use Amazon Cognito (http://aws.amazon.com/cognito/) +// or AssumeRoleWithWebIdentity . For more information, see Federation Through a +// Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) +// in the IAM User Guide. An administrator must grant you the permissions necessary +// to pass session tags. The administrator can also create granular permissions to +// allow you to pass only specific session tags. For more information, see +// Tutorial: Using Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. Tag key–value pairs are not case sensitive, but case is +// preserved. This means that you cannot have separate Department and department +// tag keys. Assume that the user that you are federating has the Department = +// Marketing tag and you pass the department = engineering session tag. Department +// and department are not saved as separate tags, and the session tag passed in +// the request takes precedence over the user tag. +func (c *Client) GetFederationToken(ctx context.Context, params *GetFederationTokenInput, optFns ...func(*Options)) (*GetFederationTokenOutput, error) { + if params == nil { + params = &GetFederationTokenInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetFederationToken", params, optFns, c.addOperationGetFederationTokenMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetFederationTokenOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetFederationTokenInput struct { + + // The name of the federated user. The name is used as an identifier for the + // temporary security credentials (such as Bob ). For example, you can reference + // the federated user name in a resource-based policy, such as in an Amazon S3 + // bucket policy. The regex used to validate this parameter is a string of + // characters consisting of upper- and lower-case alphanumeric characters with no + // spaces. You can also include underscores or any of the following characters: + // =,.@- + // + // This member is required. + Name *string + + // The duration, in seconds, that the session should last. Acceptable durations + // for federation sessions range from 900 seconds (15 minutes) to 129,600 seconds + // (36 hours), with 43,200 seconds (12 hours) as the default. Sessions obtained + // using root user credentials are restricted to a maximum of 3,600 seconds (one + // hour). If the specified duration is longer than one hour, the session obtained + // by using root user credentials defaults to one hour. + DurationSeconds *int32 + + // An IAM policy in JSON format that you want to use as an inline session policy. + // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // to this operation. You can pass a single JSON policy document to use as an + // inline session policy. You can also specify up to 10 managed policy Amazon + // Resource Names (ARNs) to use as managed session policies. This parameter is + // optional. However, if you do not pass any session policies, then the resulting + // federated user session has no permissions. When you pass session policies, the + // session permissions are the intersection of the IAM user policies and the + // session policies that you pass. This gives you a way to further restrict the + // permissions for a federated user. You cannot use session policies to grant more + // permissions than those that are defined in the permissions policy of the IAM + // user. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. The resulting credentials can be used to access a + // resource that has a resource-based policy. If that policy specifically + // references the federated user session in the Principal element of the policy, + // the session has the permissions allowed by the policy. These permissions are + // granted in addition to the permissions that are granted by the session policies. + // The plaintext that you use for both inline and managed session policies can't + // exceed 2,048 characters. The JSON policy characters can be any ASCII character + // from the space character to the end of the valid character list (\u0020 through + // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage + // return (\u000D) characters. An Amazon Web Services conversion compresses the + // passed inline session policy, managed policy ARNs, and session tags into a + // packed binary format that has a separate limit. Your request can fail for this + // limit even if your plaintext meets the other requirements. The PackedPolicySize + // response element indicates by percentage how close the policies and tags for + // your request are to the upper size limit. + Policy *string + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to + // use as a managed session policy. The policies must exist in the same account as + // the IAM user that is requesting federated access. You must pass an inline or + // managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // to this operation. You can pass a single JSON policy document to use as an + // inline session policy. You can also specify up to 10 managed policy Amazon + // Resource Names (ARNs) to use as managed session policies. The plaintext that you + // use for both inline and managed session policies can't exceed 2,048 characters. + // You can provide up to 10 managed policy ARNs. For more information about ARNs, + // see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. This parameter is optional. + // However, if you do not pass any session policies, then the resulting federated + // user session has no permissions. When you pass session policies, the session + // permissions are the intersection of the IAM user policies and the session + // policies that you pass. This gives you a way to further restrict the permissions + // for a federated user. You cannot use session policies to grant more permissions + // than those that are defined in the permissions policy of the IAM user. For more + // information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. The resulting credentials can be used to access a + // resource that has a resource-based policy. If that policy specifically + // references the federated user session in the Principal element of the policy, + // the session has the permissions allowed by the policy. These permissions are + // granted in addition to the permissions that are granted by the session policies. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + PolicyArns []types.PolicyDescriptorType + + // A list of session tags. Each session tag consists of a key name and an + // associated value. For more information about session tags, see Passing Session + // Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // in the IAM User Guide. This parameter is optional. You can pass up to 50 session + // tags. The plaintext session tag keys can’t exceed 128 characters and the values + // can’t exceed 256 characters. For these and additional limits, see IAM and STS + // Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. An Amazon Web Services conversion compresses the passed + // inline session policy, managed policy ARNs, and session tags into a packed + // binary format that has a separate limit. Your request can fail for this limit + // even if your plaintext meets the other requirements. The PackedPolicySize + // response element indicates by percentage how close the policies and tags for + // your request are to the upper size limit. You can pass a session tag with the + // same key as a tag that is already attached to the user you are federating. When + // you do, session tags override a user tag with the same key. Tag key–value pairs + // are not case sensitive, but case is preserved. This means that you cannot have + // separate Department and department tag keys. Assume that the role has the + // Department = Marketing tag and you pass the department = engineering session + // tag. Department and department are not saved as separate tags, and the session + // tag passed in the request takes precedence over the role tag. + Tags []types.Tag + + noSmithyDocumentSerde +} + +// Contains the response to a successful GetFederationToken request, including +// temporary Amazon Web Services credentials that can be used to make Amazon Web +// Services requests. +type GetFederationTokenOutput struct { + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. The size of the security token + // that STS API operations return is not fixed. We strongly recommend that you make + // no assumptions about the maximum size. + Credentials *types.Credentials + + // Identifiers for the federated user associated with the credentials (such as + // arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob ). You can use + // the federated user's ARN in your resource-based policies, such as an Amazon S3 + // bucket policy. + FederatedUser *types.FederatedUser + + // A percentage value that indicates the packed size of the session policies and + // session tags combined passed in the request. The request fails if the packed + // size is greater than 100 percent, which means the policies and tags exceeded the + // allowed space. + PackedPolicySize *int32 + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetFederationTokenMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsquery_serializeOpGetFederationToken{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpGetFederationToken{}, middleware.After) + if err != nil { + return err + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addGetFederationTokenResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetFederationTokenValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetFederationToken(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetFederationToken(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "sts", + OperationName: "GetFederationToken", + } +} + +type opGetFederationTokenResolveEndpointMiddleware struct { + EndpointResolver EndpointResolverV2 + BuiltInResolver builtInParameterResolver +} + +func (*opGetFederationTokenResolveEndpointMiddleware) ID() string { + return "ResolveEndpointV2" +} + +func (m *opGetFederationTokenResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.EndpointResolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + params := EndpointParameters{} + + m.BuiltInResolver.ResolveBuiltIns(¶ms) + + var resolvedEndpoint smithyendpoints.Endpoint + resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL = &resolvedEndpoint.URI + + for k := range resolvedEndpoint.Headers { + req.Header.Set( + k, + resolvedEndpoint.Headers.Get(k), + ) + } + + authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) + if err != nil { + var nfe *internalauth.NoAuthenticationSchemesFoundError + if errors.As(err, &nfe) { + // if no auth scheme is found, default to sigv4 + signingName := "sts" + signingRegion := m.BuiltInResolver.(*builtInResolver).Region + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + + } + var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError + if errors.As(err, &ue) { + return out, metadata, fmt.Errorf( + "This operation requests signer version(s) %v but the client only supports %v", + ue.UnsupportedSchemes, + internalauth.SupportedSchemes, + ) + } + } + + for _, authScheme := range authSchemes { + switch authScheme.(type) { + case *internalauth.AuthenticationSchemeV4: + v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) + var signingName, signingRegion string + if v4Scheme.SigningName == nil { + signingName = "sts" + } else { + signingName = *v4Scheme.SigningName + } + if v4Scheme.SigningRegion == nil { + signingRegion = m.BuiltInResolver.(*builtInResolver).Region + } else { + signingRegion = *v4Scheme.SigningRegion + } + if v4Scheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + break + case *internalauth.AuthenticationSchemeV4A: + v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) + if v4aScheme.SigningName == nil { + v4aScheme.SigningName = aws.String("sts") + } + if v4aScheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) + ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) + break + case *internalauth.AuthenticationSchemeNone: + break + } + } + + return next.HandleSerialize(ctx, in) +} + +func addGetFederationTokenResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { + return stack.Serialize.Insert(&opGetFederationTokenResolveEndpointMiddleware{ + EndpointResolver: options.EndpointResolverV2, + BuiltInResolver: &builtInResolver{ + Region: options.Region, + UseDualStack: options.EndpointOptions.UseDualStackEndpoint, + UseFIPS: options.EndpointOptions.UseFIPSEndpoint, + Endpoint: options.BaseEndpoint, + }, + }, "ResolveEndpoint", middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go new file mode 100644 index 00000000000..7a2345e8031 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go @@ -0,0 +1,328 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a set of temporary credentials for an Amazon Web Services account or +// IAM user. The credentials consist of an access key ID, a secret access key, and +// a security token. Typically, you use GetSessionToken if you want to use MFA to +// protect programmatic calls to specific Amazon Web Services API operations like +// Amazon EC2 StopInstances . MFA-enabled IAM users must call GetSessionToken and +// submit an MFA code that is associated with their MFA device. Using the temporary +// security credentials that the call returns, IAM users can then make programmatic +// calls to API operations that require MFA authentication. An incorrect MFA code +// causes the API to return an access denied error. For a comparison of +// GetSessionToken with the other API operations that produce temporary +// credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. No permissions are required for users to perform this +// operation. The purpose of the sts:GetSessionToken operation is to authenticate +// the user using MFA. You cannot use policies to control authentication +// operations. For more information, see Permissions for GetSessionToken (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getsessiontoken.html) +// in the IAM User Guide. Session Duration The GetSessionToken operation must be +// called by using the long-term Amazon Web Services security credentials of an IAM +// user. Credentials that are created by IAM users are valid for the duration that +// you specify. This duration can range from 900 seconds (15 minutes) up to a +// maximum of 129,600 seconds (36 hours), with a default of 43,200 seconds (12 +// hours). Credentials based on account credentials can range from 900 seconds (15 +// minutes) up to 3,600 seconds (1 hour), with a default of 1 hour. Permissions The +// temporary security credentials created by GetSessionToken can be used to make +// API calls to any Amazon Web Services service with the following exceptions: +// - You cannot call any IAM API operations unless MFA authentication +// information is included in the request. +// - You cannot call any STS API except AssumeRole or GetCallerIdentity . +// +// The credentials that GetSessionToken returns are based on permissions +// associated with the IAM user whose credentials were used to call the operation. +// The temporary credentials have the same permissions as the IAM user. Although it +// is possible to call GetSessionToken using the security credentials of an Amazon +// Web Services account root user rather than an IAM user, we do not recommend it. +// If GetSessionToken is called using root user credentials, the temporary +// credentials have root user permissions. For more information, see Safeguard +// your root user credentials and don't use them for everyday tasks (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials) +// in the IAM User Guide For more information about using GetSessionToken to +// create temporary credentials, see Temporary Credentials for Users in Untrusted +// Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) +// in the IAM User Guide. +func (c *Client) GetSessionToken(ctx context.Context, params *GetSessionTokenInput, optFns ...func(*Options)) (*GetSessionTokenOutput, error) { + if params == nil { + params = &GetSessionTokenInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetSessionToken", params, optFns, c.addOperationGetSessionTokenMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetSessionTokenOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetSessionTokenInput struct { + + // The duration, in seconds, that the credentials should remain valid. Acceptable + // durations for IAM user sessions range from 900 seconds (15 minutes) to 129,600 + // seconds (36 hours), with 43,200 seconds (12 hours) as the default. Sessions for + // Amazon Web Services account owners are restricted to a maximum of 3,600 seconds + // (one hour). If the duration is longer than one hour, the session for Amazon Web + // Services account owners defaults to one hour. + DurationSeconds *int32 + + // The identification number of the MFA device that is associated with the IAM + // user who is making the GetSessionToken call. Specify this value if the IAM user + // has a policy that requires MFA authentication. The value is either the serial + // number for a hardware device (such as GAHT12345678 ) or an Amazon Resource Name + // (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user ). You + // can find the device for an IAM user by going to the Amazon Web Services + // Management Console and viewing the user's security credentials. The regex used + // to validate this parameter is a string of characters consisting of upper- and + // lower-case alphanumeric characters with no spaces. You can also include + // underscores or any of the following characters: =,.@:/- + SerialNumber *string + + // The value provided by the MFA device, if MFA is required. If any policy + // requires the IAM user to submit an MFA code, specify this value. If MFA + // authentication is required, the user must provide a code when requesting a set + // of temporary security credentials. A user who fails to provide the code receives + // an "access denied" response when requesting resources that require MFA + // authentication. The format for this parameter, as described by its regex + // pattern, is a sequence of six numeric digits. + TokenCode *string + + noSmithyDocumentSerde +} + +// Contains the response to a successful GetSessionToken request, including +// temporary Amazon Web Services credentials that can be used to make Amazon Web +// Services requests. +type GetSessionTokenOutput struct { + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. The size of the security token + // that STS API operations return is not fixed. We strongly recommend that you make + // no assumptions about the maximum size. + Credentials *types.Credentials + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetSessionTokenMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsquery_serializeOpGetSessionToken{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpGetSessionToken{}, middleware.After) + if err != nil { + return err + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addGetSessionTokenResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetSessionToken(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetSessionToken(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "sts", + OperationName: "GetSessionToken", + } +} + +type opGetSessionTokenResolveEndpointMiddleware struct { + EndpointResolver EndpointResolverV2 + BuiltInResolver builtInParameterResolver +} + +func (*opGetSessionTokenResolveEndpointMiddleware) ID() string { + return "ResolveEndpointV2" +} + +func (m *opGetSessionTokenResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.EndpointResolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + params := EndpointParameters{} + + m.BuiltInResolver.ResolveBuiltIns(¶ms) + + var resolvedEndpoint smithyendpoints.Endpoint + resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL = &resolvedEndpoint.URI + + for k := range resolvedEndpoint.Headers { + req.Header.Set( + k, + resolvedEndpoint.Headers.Get(k), + ) + } + + authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) + if err != nil { + var nfe *internalauth.NoAuthenticationSchemesFoundError + if errors.As(err, &nfe) { + // if no auth scheme is found, default to sigv4 + signingName := "sts" + signingRegion := m.BuiltInResolver.(*builtInResolver).Region + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + + } + var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError + if errors.As(err, &ue) { + return out, metadata, fmt.Errorf( + "This operation requests signer version(s) %v but the client only supports %v", + ue.UnsupportedSchemes, + internalauth.SupportedSchemes, + ) + } + } + + for _, authScheme := range authSchemes { + switch authScheme.(type) { + case *internalauth.AuthenticationSchemeV4: + v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) + var signingName, signingRegion string + if v4Scheme.SigningName == nil { + signingName = "sts" + } else { + signingName = *v4Scheme.SigningName + } + if v4Scheme.SigningRegion == nil { + signingRegion = m.BuiltInResolver.(*builtInResolver).Region + } else { + signingRegion = *v4Scheme.SigningRegion + } + if v4Scheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + break + case *internalauth.AuthenticationSchemeV4A: + v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) + if v4aScheme.SigningName == nil { + v4aScheme.SigningName = aws.String("sts") + } + if v4aScheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) + ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) + break + case *internalauth.AuthenticationSchemeNone: + break + } + } + + return next.HandleSerialize(ctx, in) +} + +func addGetSessionTokenResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { + return stack.Serialize.Insert(&opGetSessionTokenResolveEndpointMiddleware{ + EndpointResolver: options.EndpointResolverV2, + BuiltInResolver: &builtInResolver{ + Region: options.Region, + UseDualStack: options.EndpointOptions.UseDualStackEndpoint, + UseFIPS: options.EndpointOptions.UseFIPSEndpoint, + Endpoint: options.BaseEndpoint, + }, + }, "ResolveEndpoint", middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go new file mode 100644 index 00000000000..5d634ce35c8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go @@ -0,0 +1,2507 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "bytes" + "context" + "encoding/xml" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + awsxml "github.com/aws/aws-sdk-go-v2/aws/protocol/xml" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + smithy "github.com/aws/smithy-go" + smithyxml "github.com/aws/smithy-go/encoding/xml" + smithyio "github.com/aws/smithy-go/io" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithytime "github.com/aws/smithy-go/time" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" + "strconv" + "strings" +) + +type awsAwsquery_deserializeOpAssumeRole struct { +} + +func (*awsAwsquery_deserializeOpAssumeRole) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpAssumeRole) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorAssumeRole(response, &metadata) + } + output := &AssumeRoleOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("AssumeRoleResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentAssumeRoleOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorAssumeRole(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("ExpiredTokenException", errorCode): + return awsAwsquery_deserializeErrorExpiredTokenException(response, errorBody) + + case strings.EqualFold("MalformedPolicyDocument", errorCode): + return awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response, errorBody) + + case strings.EqualFold("PackedPolicyTooLarge", errorCode): + return awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response, errorBody) + + case strings.EqualFold("RegionDisabledException", errorCode): + return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsquery_deserializeOpAssumeRoleWithSAML struct { +} + +func (*awsAwsquery_deserializeOpAssumeRoleWithSAML) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpAssumeRoleWithSAML) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorAssumeRoleWithSAML(response, &metadata) + } + output := &AssumeRoleWithSAMLOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("AssumeRoleWithSAMLResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentAssumeRoleWithSAMLOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorAssumeRoleWithSAML(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("ExpiredTokenException", errorCode): + return awsAwsquery_deserializeErrorExpiredTokenException(response, errorBody) + + case strings.EqualFold("IDPRejectedClaim", errorCode): + return awsAwsquery_deserializeErrorIDPRejectedClaimException(response, errorBody) + + case strings.EqualFold("InvalidIdentityToken", errorCode): + return awsAwsquery_deserializeErrorInvalidIdentityTokenException(response, errorBody) + + case strings.EqualFold("MalformedPolicyDocument", errorCode): + return awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response, errorBody) + + case strings.EqualFold("PackedPolicyTooLarge", errorCode): + return awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response, errorBody) + + case strings.EqualFold("RegionDisabledException", errorCode): + return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsquery_deserializeOpAssumeRoleWithWebIdentity struct { +} + +func (*awsAwsquery_deserializeOpAssumeRoleWithWebIdentity) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpAssumeRoleWithWebIdentity) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorAssumeRoleWithWebIdentity(response, &metadata) + } + output := &AssumeRoleWithWebIdentityOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("AssumeRoleWithWebIdentityResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentAssumeRoleWithWebIdentityOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorAssumeRoleWithWebIdentity(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("ExpiredTokenException", errorCode): + return awsAwsquery_deserializeErrorExpiredTokenException(response, errorBody) + + case strings.EqualFold("IDPCommunicationError", errorCode): + return awsAwsquery_deserializeErrorIDPCommunicationErrorException(response, errorBody) + + case strings.EqualFold("IDPRejectedClaim", errorCode): + return awsAwsquery_deserializeErrorIDPRejectedClaimException(response, errorBody) + + case strings.EqualFold("InvalidIdentityToken", errorCode): + return awsAwsquery_deserializeErrorInvalidIdentityTokenException(response, errorBody) + + case strings.EqualFold("MalformedPolicyDocument", errorCode): + return awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response, errorBody) + + case strings.EqualFold("PackedPolicyTooLarge", errorCode): + return awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response, errorBody) + + case strings.EqualFold("RegionDisabledException", errorCode): + return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsquery_deserializeOpDecodeAuthorizationMessage struct { +} + +func (*awsAwsquery_deserializeOpDecodeAuthorizationMessage) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpDecodeAuthorizationMessage) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorDecodeAuthorizationMessage(response, &metadata) + } + output := &DecodeAuthorizationMessageOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("DecodeAuthorizationMessageResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentDecodeAuthorizationMessageOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorDecodeAuthorizationMessage(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("InvalidAuthorizationMessageException", errorCode): + return awsAwsquery_deserializeErrorInvalidAuthorizationMessageException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsquery_deserializeOpGetAccessKeyInfo struct { +} + +func (*awsAwsquery_deserializeOpGetAccessKeyInfo) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpGetAccessKeyInfo) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorGetAccessKeyInfo(response, &metadata) + } + output := &GetAccessKeyInfoOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("GetAccessKeyInfoResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentGetAccessKeyInfoOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorGetAccessKeyInfo(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsquery_deserializeOpGetCallerIdentity struct { +} + +func (*awsAwsquery_deserializeOpGetCallerIdentity) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpGetCallerIdentity) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorGetCallerIdentity(response, &metadata) + } + output := &GetCallerIdentityOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("GetCallerIdentityResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentGetCallerIdentityOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorGetCallerIdentity(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsquery_deserializeOpGetFederationToken struct { +} + +func (*awsAwsquery_deserializeOpGetFederationToken) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpGetFederationToken) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorGetFederationToken(response, &metadata) + } + output := &GetFederationTokenOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("GetFederationTokenResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentGetFederationTokenOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorGetFederationToken(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("MalformedPolicyDocument", errorCode): + return awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response, errorBody) + + case strings.EqualFold("PackedPolicyTooLarge", errorCode): + return awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response, errorBody) + + case strings.EqualFold("RegionDisabledException", errorCode): + return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsquery_deserializeOpGetSessionToken struct { +} + +func (*awsAwsquery_deserializeOpGetSessionToken) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpGetSessionToken) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorGetSessionToken(response, &metadata) + } + output := &GetSessionTokenOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("GetSessionTokenResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentGetSessionTokenOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorGetSessionToken(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("RegionDisabledException", errorCode): + return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsAwsquery_deserializeErrorExpiredTokenException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ExpiredTokenException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentExpiredTokenException(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorIDPCommunicationErrorException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.IDPCommunicationErrorException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentIDPCommunicationErrorException(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorIDPRejectedClaimException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.IDPRejectedClaimException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentIDPRejectedClaimException(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorInvalidAuthorizationMessageException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidAuthorizationMessageException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentInvalidAuthorizationMessageException(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorInvalidIdentityTokenException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidIdentityTokenException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentInvalidIdentityTokenException(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.MalformedPolicyDocumentException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentMalformedPolicyDocumentException(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.PackedPolicyTooLargeException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentPackedPolicyTooLargeException(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorRegionDisabledException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.RegionDisabledException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentRegionDisabledException(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeDocumentAssumedRoleUser(v **types.AssumedRoleUser, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.AssumedRoleUser + if *v == nil { + sv = &types.AssumedRoleUser{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Arn", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Arn = ptr.String(xtv) + } + + case strings.EqualFold("AssumedRoleId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.AssumedRoleId = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentCredentials(v **types.Credentials, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Credentials + if *v == nil { + sv = &types.Credentials{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AccessKeyId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.AccessKeyId = ptr.String(xtv) + } + + case strings.EqualFold("Expiration", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.Expiration = ptr.Time(t) + } + + case strings.EqualFold("SecretAccessKey", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SecretAccessKey = ptr.String(xtv) + } + + case strings.EqualFold("SessionToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SessionToken = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentExpiredTokenException(v **types.ExpiredTokenException, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ExpiredTokenException + if *v == nil { + sv = &types.ExpiredTokenException{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentFederatedUser(v **types.FederatedUser, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.FederatedUser + if *v == nil { + sv = &types.FederatedUser{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Arn", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Arn = ptr.String(xtv) + } + + case strings.EqualFold("FederatedUserId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.FederatedUserId = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentIDPCommunicationErrorException(v **types.IDPCommunicationErrorException, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.IDPCommunicationErrorException + if *v == nil { + sv = &types.IDPCommunicationErrorException{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentIDPRejectedClaimException(v **types.IDPRejectedClaimException, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.IDPRejectedClaimException + if *v == nil { + sv = &types.IDPRejectedClaimException{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentInvalidAuthorizationMessageException(v **types.InvalidAuthorizationMessageException, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.InvalidAuthorizationMessageException + if *v == nil { + sv = &types.InvalidAuthorizationMessageException{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentInvalidIdentityTokenException(v **types.InvalidIdentityTokenException, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.InvalidIdentityTokenException + if *v == nil { + sv = &types.InvalidIdentityTokenException{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentMalformedPolicyDocumentException(v **types.MalformedPolicyDocumentException, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.MalformedPolicyDocumentException + if *v == nil { + sv = &types.MalformedPolicyDocumentException{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentPackedPolicyTooLargeException(v **types.PackedPolicyTooLargeException, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.PackedPolicyTooLargeException + if *v == nil { + sv = &types.PackedPolicyTooLargeException{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentRegionDisabledException(v **types.RegionDisabledException, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.RegionDisabledException + if *v == nil { + sv = &types.RegionDisabledException{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentAssumeRoleOutput(v **AssumeRoleOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *AssumeRoleOutput + if *v == nil { + sv = &AssumeRoleOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AssumedRoleUser", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentAssumedRoleUser(&sv.AssumedRoleUser, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Credentials", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("PackedPolicySize", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.PackedPolicySize = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("SourceIdentity", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SourceIdentity = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentAssumeRoleWithSAMLOutput(v **AssumeRoleWithSAMLOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *AssumeRoleWithSAMLOutput + if *v == nil { + sv = &AssumeRoleWithSAMLOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AssumedRoleUser", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentAssumedRoleUser(&sv.AssumedRoleUser, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Audience", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Audience = ptr.String(xtv) + } + + case strings.EqualFold("Credentials", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Issuer", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Issuer = ptr.String(xtv) + } + + case strings.EqualFold("NameQualifier", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NameQualifier = ptr.String(xtv) + } + + case strings.EqualFold("PackedPolicySize", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.PackedPolicySize = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("SourceIdentity", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SourceIdentity = ptr.String(xtv) + } + + case strings.EqualFold("Subject", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Subject = ptr.String(xtv) + } + + case strings.EqualFold("SubjectType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SubjectType = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentAssumeRoleWithWebIdentityOutput(v **AssumeRoleWithWebIdentityOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *AssumeRoleWithWebIdentityOutput + if *v == nil { + sv = &AssumeRoleWithWebIdentityOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AssumedRoleUser", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentAssumedRoleUser(&sv.AssumedRoleUser, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Audience", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Audience = ptr.String(xtv) + } + + case strings.EqualFold("Credentials", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("PackedPolicySize", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.PackedPolicySize = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("Provider", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Provider = ptr.String(xtv) + } + + case strings.EqualFold("SourceIdentity", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SourceIdentity = ptr.String(xtv) + } + + case strings.EqualFold("SubjectFromWebIdentityToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SubjectFromWebIdentityToken = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentDecodeAuthorizationMessageOutput(v **DecodeAuthorizationMessageOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *DecodeAuthorizationMessageOutput + if *v == nil { + sv = &DecodeAuthorizationMessageOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("DecodedMessage", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.DecodedMessage = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentGetAccessKeyInfoOutput(v **GetAccessKeyInfoOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetAccessKeyInfoOutput + if *v == nil { + sv = &GetAccessKeyInfoOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Account", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Account = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentGetCallerIdentityOutput(v **GetCallerIdentityOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetCallerIdentityOutput + if *v == nil { + sv = &GetCallerIdentityOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Account", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Account = ptr.String(xtv) + } + + case strings.EqualFold("Arn", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Arn = ptr.String(xtv) + } + + case strings.EqualFold("UserId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.UserId = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentGetFederationTokenOutput(v **GetFederationTokenOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetFederationTokenOutput + if *v == nil { + sv = &GetFederationTokenOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Credentials", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("FederatedUser", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentFederatedUser(&sv.FederatedUser, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("PackedPolicySize", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.PackedPolicySize = ptr.Int32(int32(i64)) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentGetSessionTokenOutput(v **GetSessionTokenOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetSessionTokenOutput + if *v == nil { + sv = &GetSessionTokenOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Credentials", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go new file mode 100644 index 00000000000..d963fd8d19a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go @@ -0,0 +1,11 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +// Package sts provides the API client, operations, and parameter types for AWS +// Security Token Service. +// +// Security Token Service Security Token Service (STS) enables you to request +// temporary, limited-privilege credentials for users. This guide provides +// descriptions of the STS API. For more information about using this service, see +// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html) +// . +package sts diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go new file mode 100644 index 00000000000..cb5d56fd9c6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go @@ -0,0 +1,996 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn" + internalendpoints "github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints" + smithy "github.com/aws/smithy-go" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/http" + "net/url" + "os" + "strings" +) + +// EndpointResolverOptions is the service endpoint resolver options +type EndpointResolverOptions = internalendpoints.Options + +// EndpointResolver interface for resolving service endpoints. +type EndpointResolver interface { + ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) +} + +var _ EndpointResolver = &internalendpoints.Resolver{} + +// NewDefaultEndpointResolver constructs a new service endpoint resolver +func NewDefaultEndpointResolver() *internalendpoints.Resolver { + return internalendpoints.New() +} + +// EndpointResolverFunc is a helper utility that wraps a function so it satisfies +// the EndpointResolver interface. This is useful when you want to add additional +// endpoint resolving logic, or stub out specific endpoints with custom values. +type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error) + +func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + return fn(region, options) +} + +// EndpointResolverFromURL returns an EndpointResolver configured using the +// provided endpoint url. By default, the resolved endpoint resolver uses the +// client region as signing region, and the endpoint source is set to +// EndpointSourceCustom.You can provide functional options to configure endpoint +// values for the resolved endpoint. +func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver { + e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom} + for _, fn := range optFns { + fn(&e) + } + + return EndpointResolverFunc( + func(region string, options EndpointResolverOptions) (aws.Endpoint, error) { + if len(e.SigningRegion) == 0 { + e.SigningRegion = region + } + return e, nil + }, + ) +} + +type ResolveEndpoint struct { + Resolver EndpointResolver + Options EndpointResolverOptions +} + +func (*ResolveEndpoint) ID() string { + return "ResolveEndpoint" +} + +func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.Resolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + eo := m.Options + eo.Logger = middleware.GetLogger(ctx) + + var endpoint aws.Endpoint + endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo) + if err != nil { + nf := (&aws.EndpointNotFoundError{}) + if errors.As(err, &nf) { + ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, false) + return next.HandleSerialize(ctx, in) + } + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL, err = url.Parse(endpoint.URL) + if err != nil { + return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + if len(awsmiddleware.GetSigningName(ctx)) == 0 { + signingName := endpoint.SigningName + if len(signingName) == 0 { + signingName = "sts" + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + } + ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source) + ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable) + ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) + ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID) + return next.HandleSerialize(ctx, in) +} +func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error { + return stack.Serialize.Insert(&ResolveEndpoint{ + Resolver: o.EndpointResolver, + Options: o.EndpointOptions, + }, "OperationSerializer", middleware.Before) +} + +func removeResolveEndpointMiddleware(stack *middleware.Stack) error { + _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID()) + return err +} + +type wrappedEndpointResolver struct { + awsResolver aws.EndpointResolverWithOptions +} + +func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + return w.awsResolver.ResolveEndpoint(ServiceID, region, options) +} + +type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error) + +func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) { + return a(service, region) +} + +var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil) + +// withEndpointResolver returns an aws.EndpointResolverWithOptions that first delegates endpoint resolution to the awsResolver. +// If awsResolver returns aws.EndpointNotFoundError error, the v1 resolver middleware will swallow the error, +// and set an appropriate context flag such that fallback will occur when EndpointResolverV2 is invoked +// via its middleware. +// +// If another error (besides aws.EndpointNotFoundError) is returned, then that error will be propagated. +func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions) EndpointResolver { + var resolver aws.EndpointResolverWithOptions + + if awsResolverWithOptions != nil { + resolver = awsResolverWithOptions + } else if awsResolver != nil { + resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint) + } + + return &wrappedEndpointResolver{ + awsResolver: resolver, + } +} + +func finalizeClientEndpointResolverOptions(options *Options) { + options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage() + + if len(options.EndpointOptions.ResolvedRegion) == 0 { + const fipsInfix = "-fips-" + const fipsPrefix = "fips-" + const fipsSuffix = "-fips" + + if strings.Contains(options.Region, fipsInfix) || + strings.Contains(options.Region, fipsPrefix) || + strings.Contains(options.Region, fipsSuffix) { + options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( + options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") + options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled + } + } + +} + +func resolveEndpointResolverV2(options *Options) { + if options.EndpointResolverV2 == nil { + options.EndpointResolverV2 = NewDefaultEndpointResolverV2() + } +} + +func resolveBaseEndpoint(cfg aws.Config, o *Options) { + if cfg.BaseEndpoint != nil { + o.BaseEndpoint = cfg.BaseEndpoint + } + + _, g := os.LookupEnv("AWS_ENDPOINT_URL") + _, s := os.LookupEnv("AWS_ENDPOINT_URL_STS") + + if g && !s { + return + } + + value, found, err := internalConfig.ResolveServiceBaseEndpoint(context.Background(), "STS", cfg.ConfigSources) + if found && err == nil { + o.BaseEndpoint = &value + } +} + +// Utility function to aid with translating pseudo-regions to classical regions +// with the appropriate setting indicated by the pseudo-region +func mapPseudoRegion(pr string) (region string, fips aws.FIPSEndpointState) { + const fipsInfix = "-fips-" + const fipsPrefix = "fips-" + const fipsSuffix = "-fips" + + if strings.Contains(pr, fipsInfix) || + strings.Contains(pr, fipsPrefix) || + strings.Contains(pr, fipsSuffix) { + region = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( + pr, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") + fips = aws.FIPSEndpointStateEnabled + } else { + region = pr + } + + return region, fips +} + +// builtInParameterResolver is the interface responsible for resolving BuiltIn +// values during the sourcing of EndpointParameters +type builtInParameterResolver interface { + ResolveBuiltIns(*EndpointParameters) error +} + +// builtInResolver resolves modeled BuiltIn values using only the members defined +// below. +type builtInResolver struct { + // The AWS region used to dispatch the request. + Region string + + // Sourced BuiltIn value in a historical enabled or disabled state. + UseDualStack aws.DualStackEndpointState + + // Sourced BuiltIn value in a historical enabled or disabled state. + UseFIPS aws.FIPSEndpointState + + // Base endpoint that can potentially be modified during Endpoint resolution. + Endpoint *string + + // Whether the global endpoint should be used, rather then the regional endpoint + // for us-east-1. + UseGlobalEndpoint bool +} + +// Invoked at runtime to resolve BuiltIn Values. Only resolution code specific to +// each BuiltIn value is generated. +func (b *builtInResolver) ResolveBuiltIns(params *EndpointParameters) error { + + region, _ := mapPseudoRegion(b.Region) + if len(region) == 0 { + return fmt.Errorf("Could not resolve AWS::Region") + } else { + params.Region = aws.String(region) + } + if b.UseDualStack == aws.DualStackEndpointStateEnabled { + params.UseDualStack = aws.Bool(true) + } else { + params.UseDualStack = aws.Bool(false) + } + if b.UseFIPS == aws.FIPSEndpointStateEnabled { + params.UseFIPS = aws.Bool(true) + } else { + params.UseFIPS = aws.Bool(false) + } + params.Endpoint = b.Endpoint + params.UseGlobalEndpoint = aws.Bool(b.UseGlobalEndpoint) + return nil +} + +// EndpointParameters provides the parameters that influence how endpoints are +// resolved. +type EndpointParameters struct { + // The AWS region used to dispatch the request. + // + // Parameter is + // required. + // + // AWS::Region + Region *string + + // When true, use the dual-stack endpoint. If the configured endpoint does not + // support dual-stack, dispatching the request MAY return an error. + // + // Defaults to + // false if no value is provided. + // + // AWS::UseDualStack + UseDualStack *bool + + // When true, send this request to the FIPS-compliant regional endpoint. If the + // configured endpoint does not have a FIPS compliant endpoint, dispatching the + // request will return an error. + // + // Defaults to false if no value is + // provided. + // + // AWS::UseFIPS + UseFIPS *bool + + // Override the endpoint used to send this request + // + // Parameter is + // required. + // + // SDK::Endpoint + Endpoint *string + + // Whether the global endpoint should be used, rather then the regional endpoint + // for us-east-1. + // + // Defaults to false if no value is + // provided. + // + // AWS::STS::UseGlobalEndpoint + UseGlobalEndpoint *bool +} + +// ValidateRequired validates required parameters are set. +func (p EndpointParameters) ValidateRequired() error { + if p.UseDualStack == nil { + return fmt.Errorf("parameter UseDualStack is required") + } + + if p.UseFIPS == nil { + return fmt.Errorf("parameter UseFIPS is required") + } + + if p.UseGlobalEndpoint == nil { + return fmt.Errorf("parameter UseGlobalEndpoint is required") + } + + return nil +} + +// WithDefaults returns a shallow copy of EndpointParameterswith default values +// applied to members where applicable. +func (p EndpointParameters) WithDefaults() EndpointParameters { + if p.UseDualStack == nil { + p.UseDualStack = ptr.Bool(false) + } + + if p.UseFIPS == nil { + p.UseFIPS = ptr.Bool(false) + } + + if p.UseGlobalEndpoint == nil { + p.UseGlobalEndpoint = ptr.Bool(false) + } + return p +} + +// EndpointResolverV2 provides the interface for resolving service endpoints. +type EndpointResolverV2 interface { + // ResolveEndpoint attempts to resolve the endpoint with the provided options, + // returning the endpoint if found. Otherwise an error is returned. + ResolveEndpoint(ctx context.Context, params EndpointParameters) ( + smithyendpoints.Endpoint, error, + ) +} + +// resolver provides the implementation for resolving endpoints. +type resolver struct{} + +func NewDefaultEndpointResolverV2() EndpointResolverV2 { + return &resolver{} +} + +// ResolveEndpoint attempts to resolve the endpoint with the provided options, +// returning the endpoint if found. Otherwise an error is returned. +func (r *resolver) ResolveEndpoint( + ctx context.Context, params EndpointParameters, +) ( + endpoint smithyendpoints.Endpoint, err error, +) { + params = params.WithDefaults() + if err = params.ValidateRequired(); err != nil { + return endpoint, fmt.Errorf("endpoint parameters are not valid, %w", err) + } + _UseDualStack := *params.UseDualStack + _UseFIPS := *params.UseFIPS + _UseGlobalEndpoint := *params.UseGlobalEndpoint + + if _UseGlobalEndpoint == true { + if !(params.Endpoint != nil) { + if exprVal := params.Region; exprVal != nil { + _Region := *exprVal + _ = _Region + if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { + _PartitionResult := *exprVal + _ = _PartitionResult + if _UseFIPS == false { + if _UseDualStack == false { + if _Region == "ap-northeast-1" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("authSchemes", []interface{}{ + map[string]interface{}{ + "name": "sigv4", + "signingName": "sts", + "signingRegion": "us-east-1", + }, + }) + return out + }(), + }, nil + } + if _Region == "ap-south-1" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("authSchemes", []interface{}{ + map[string]interface{}{ + "name": "sigv4", + "signingName": "sts", + "signingRegion": "us-east-1", + }, + }) + return out + }(), + }, nil + } + if _Region == "ap-southeast-1" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("authSchemes", []interface{}{ + map[string]interface{}{ + "name": "sigv4", + "signingName": "sts", + "signingRegion": "us-east-1", + }, + }) + return out + }(), + }, nil + } + if _Region == "ap-southeast-2" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("authSchemes", []interface{}{ + map[string]interface{}{ + "name": "sigv4", + "signingName": "sts", + "signingRegion": "us-east-1", + }, + }) + return out + }(), + }, nil + } + if _Region == "aws-global" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("authSchemes", []interface{}{ + map[string]interface{}{ + "name": "sigv4", + "signingName": "sts", + "signingRegion": "us-east-1", + }, + }) + return out + }(), + }, nil + } + if _Region == "ca-central-1" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("authSchemes", []interface{}{ + map[string]interface{}{ + "name": "sigv4", + "signingName": "sts", + "signingRegion": "us-east-1", + }, + }) + return out + }(), + }, nil + } + if _Region == "eu-central-1" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("authSchemes", []interface{}{ + map[string]interface{}{ + "name": "sigv4", + "signingName": "sts", + "signingRegion": "us-east-1", + }, + }) + return out + }(), + }, nil + } + if _Region == "eu-north-1" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("authSchemes", []interface{}{ + map[string]interface{}{ + "name": "sigv4", + "signingName": "sts", + "signingRegion": "us-east-1", + }, + }) + return out + }(), + }, nil + } + if _Region == "eu-west-1" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("authSchemes", []interface{}{ + map[string]interface{}{ + "name": "sigv4", + "signingName": "sts", + "signingRegion": "us-east-1", + }, + }) + return out + }(), + }, nil + } + if _Region == "eu-west-2" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("authSchemes", []interface{}{ + map[string]interface{}{ + "name": "sigv4", + "signingName": "sts", + "signingRegion": "us-east-1", + }, + }) + return out + }(), + }, nil + } + if _Region == "eu-west-3" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("authSchemes", []interface{}{ + map[string]interface{}{ + "name": "sigv4", + "signingName": "sts", + "signingRegion": "us-east-1", + }, + }) + return out + }(), + }, nil + } + if _Region == "sa-east-1" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("authSchemes", []interface{}{ + map[string]interface{}{ + "name": "sigv4", + "signingName": "sts", + "signingRegion": "us-east-1", + }, + }) + return out + }(), + }, nil + } + if _Region == "us-east-1" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("authSchemes", []interface{}{ + map[string]interface{}{ + "name": "sigv4", + "signingName": "sts", + "signingRegion": "us-east-1", + }, + }) + return out + }(), + }, nil + } + if _Region == "us-east-2" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("authSchemes", []interface{}{ + map[string]interface{}{ + "name": "sigv4", + "signingName": "sts", + "signingRegion": "us-east-1", + }, + }) + return out + }(), + }, nil + } + if _Region == "us-west-1" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("authSchemes", []interface{}{ + map[string]interface{}{ + "name": "sigv4", + "signingName": "sts", + "signingRegion": "us-east-1", + }, + }) + return out + }(), + }, nil + } + if _Region == "us-west-2" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("authSchemes", []interface{}{ + map[string]interface{}{ + "name": "sigv4", + "signingName": "sts", + "signingRegion": "us-east-1", + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://sts.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("authSchemes", []interface{}{ + map[string]interface{}{ + "name": "sigv4", + "signingName": "sts", + "signingRegion": _Region, + }, + }) + return out + }(), + }, nil + } + } + } + } + } + } + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if _UseFIPS == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: FIPS and custom endpoint are not supported") + } + if _UseDualStack == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Dualstack and custom endpoint are not supported") + } + uriString := _Endpoint + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + if exprVal := params.Region; exprVal != nil { + _Region := *exprVal + _ = _Region + if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { + _PartitionResult := *exprVal + _ = _PartitionResult + if _UseFIPS == true { + if _UseDualStack == true { + if true == _PartitionResult.SupportsFIPS { + if true == _PartitionResult.SupportsDualStack { + uriString := func() string { + var out strings.Builder + out.WriteString("https://sts-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS and DualStack are enabled, but this partition does not support one or both") + } + } + if _UseFIPS == true { + if true == _PartitionResult.SupportsFIPS { + if "aws-us-gov" == _PartitionResult.Name { + uriString := func() string { + var out strings.Builder + out.WriteString("https://sts.") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://sts-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS is enabled but this partition does not support FIPS") + } + if _UseDualStack == true { + if true == _PartitionResult.SupportsDualStack { + uriString := func() string { + var out strings.Builder + out.WriteString("https://sts.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "DualStack is enabled but this partition does not support DualStack") + } + if _Region == "aws-global" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("authSchemes", []interface{}{ + map[string]interface{}{ + "name": "sigv4", + "signingName": "sts", + "signingRegion": "us-east-1", + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://sts.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Missing Region") +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json new file mode 100644 index 00000000000..e44e7d149c5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json @@ -0,0 +1,38 @@ +{ + "dependencies": { + "github.com/aws/aws-sdk-go-v2": "v1.4.0", + "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url": "v1.0.7", + "github.com/aws/smithy-go": "v1.4.0", + "github.com/google/go-cmp": "v0.5.4" + }, + "files": [ + "api_client.go", + "api_client_test.go", + "api_op_AssumeRole.go", + "api_op_AssumeRoleWithSAML.go", + "api_op_AssumeRoleWithWebIdentity.go", + "api_op_DecodeAuthorizationMessage.go", + "api_op_GetAccessKeyInfo.go", + "api_op_GetCallerIdentity.go", + "api_op_GetFederationToken.go", + "api_op_GetSessionToken.go", + "deserializers.go", + "doc.go", + "endpoints.go", + "endpoints_config_test.go", + "endpoints_test.go", + "generated.json", + "internal/endpoints/endpoints.go", + "internal/endpoints/endpoints_test.go", + "protocol_test.go", + "serializers.go", + "types/errors.go", + "types/types.go", + "validators.go" + ], + "go": "1.15", + "module": "github.com/aws/aws-sdk-go-v2/service/sts", + "unstable": false +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go new file mode 100644 index 00000000000..f934c18f6a5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package sts + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.25.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go new file mode 100644 index 00000000000..ca4c881909a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go @@ -0,0 +1,509 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package endpoints + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2" + "github.com/aws/smithy-go/logging" + "regexp" +) + +// Options is the endpoint resolver configuration options +type Options struct { + // Logger is a logging implementation that log events should be sent to. + Logger logging.Logger + + // LogDeprecated indicates that deprecated endpoints should be logged to the + // provided logger. + LogDeprecated bool + + // ResolvedRegion is used to override the region to be resolved, rather then the + // using the value passed to the ResolveEndpoint method. This value is used by the + // SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative + // name. You must not set this value directly in your application. + ResolvedRegion string + + // DisableHTTPS informs the resolver to return an endpoint that does not use the + // HTTPS scheme. + DisableHTTPS bool + + // UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint. + UseDualStackEndpoint aws.DualStackEndpointState + + // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. + UseFIPSEndpoint aws.FIPSEndpointState +} + +func (o Options) GetResolvedRegion() string { + return o.ResolvedRegion +} + +func (o Options) GetDisableHTTPS() bool { + return o.DisableHTTPS +} + +func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState { + return o.UseDualStackEndpoint +} + +func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState { + return o.UseFIPSEndpoint +} + +func transformToSharedOptions(options Options) endpoints.Options { + return endpoints.Options{ + Logger: options.Logger, + LogDeprecated: options.LogDeprecated, + ResolvedRegion: options.ResolvedRegion, + DisableHTTPS: options.DisableHTTPS, + UseDualStackEndpoint: options.UseDualStackEndpoint, + UseFIPSEndpoint: options.UseFIPSEndpoint, + } +} + +// Resolver STS endpoint resolver +type Resolver struct { + partitions endpoints.Partitions +} + +// ResolveEndpoint resolves the service endpoint for the given region and options +func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) { + if len(region) == 0 { + return endpoint, &aws.MissingRegionError{} + } + + opt := transformToSharedOptions(options) + return r.partitions.ResolveEndpoint(region, opt) +} + +// New returns a new Resolver +func New() *Resolver { + return &Resolver{ + partitions: defaultPartitions, + } +} + +var partitionRegexp = struct { + Aws *regexp.Regexp + AwsCn *regexp.Regexp + AwsIso *regexp.Regexp + AwsIsoB *regexp.Regexp + AwsIsoE *regexp.Regexp + AwsIsoF *regexp.Regexp + AwsUsGov *regexp.Regexp +}{ + + Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$"), + AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), + AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), + AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), + AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"), + AwsIsoF: regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"), + AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), +} + +var defaultPartitions = endpoints.Partitions{ + { + ID: "aws", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "sts.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "sts-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "sts.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.Aws, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "af-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-northeast-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-northeast-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-northeast-3", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-south-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-3", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-4", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "aws-global", + }: endpoints.Endpoint{ + Hostname: "sts.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "ca-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-central-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-north-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-south-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-west-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-west-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-west-3", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "il-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "me-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "me-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "sa-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.us-east-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-east-1-fips", + }: endpoints.Endpoint{ + Hostname: "sts-fips.us-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-east-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-east-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.us-east-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-east-2-fips", + }: endpoints.Endpoint{ + Hostname: "sts-fips.us-east-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-2", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-west-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.us-west-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-west-1-fips", + }: endpoints.Endpoint{ + Hostname: "sts-fips.us-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-west-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-west-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.us-west-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-west-2-fips", + }: endpoints.Endpoint{ + Hostname: "sts-fips.us-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-2", + }, + Deprecated: aws.TrueTernary, + }, + }, + }, + { + ID: "aws-cn", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "sts.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "sts-fips.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "sts.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsCn, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "cn-north-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "cn-northwest-1", + }: endpoints.Endpoint{}, + }, + }, + { + ID: "aws-iso", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "sts.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIso, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-iso-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-iso-west-1", + }: endpoints.Endpoint{}, + }, + }, + { + ID: "aws-iso-b", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "sts.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoB, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-isob-east-1", + }: endpoints.Endpoint{}, + }, + }, + { + ID: "aws-iso-e", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.{region}.cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "sts.{region}.cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoE, + IsRegionalized: true, + }, + { + ID: "aws-iso-f", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.{region}.csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "sts.{region}.csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoF, + IsRegionalized: true, + }, + { + ID: "aws-us-gov", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "sts.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "sts-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "sts.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsUsGov, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-gov-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-gov-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts.us-gov-east-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-gov-east-1-fips", + }: endpoints.Endpoint{ + Hostname: "sts.us-gov-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts.us-gov-west-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-gov-west-1-fips", + }: endpoints.Endpoint{ + Hostname: "sts.us-gov-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: aws.TrueTernary, + }, + }, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go new file mode 100644 index 00000000000..4c08061c0c9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go @@ -0,0 +1,862 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "bytes" + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws/protocol/query" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/encoding/httpbinding" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "path" +) + +type awsAwsquery_serializeOpAssumeRole struct { +} + +func (*awsAwsquery_serializeOpAssumeRole) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpAssumeRole) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*AssumeRoleInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("AssumeRole") + body.Key("Version").String("2011-06-15") + + if err := awsAwsquery_serializeOpDocumentAssumeRoleInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsquery_serializeOpAssumeRoleWithSAML struct { +} + +func (*awsAwsquery_serializeOpAssumeRoleWithSAML) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpAssumeRoleWithSAML) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*AssumeRoleWithSAMLInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("AssumeRoleWithSAML") + body.Key("Version").String("2011-06-15") + + if err := awsAwsquery_serializeOpDocumentAssumeRoleWithSAMLInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsquery_serializeOpAssumeRoleWithWebIdentity struct { +} + +func (*awsAwsquery_serializeOpAssumeRoleWithWebIdentity) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpAssumeRoleWithWebIdentity) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*AssumeRoleWithWebIdentityInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("AssumeRoleWithWebIdentity") + body.Key("Version").String("2011-06-15") + + if err := awsAwsquery_serializeOpDocumentAssumeRoleWithWebIdentityInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsquery_serializeOpDecodeAuthorizationMessage struct { +} + +func (*awsAwsquery_serializeOpDecodeAuthorizationMessage) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpDecodeAuthorizationMessage) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DecodeAuthorizationMessageInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("DecodeAuthorizationMessage") + body.Key("Version").String("2011-06-15") + + if err := awsAwsquery_serializeOpDocumentDecodeAuthorizationMessageInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsquery_serializeOpGetAccessKeyInfo struct { +} + +func (*awsAwsquery_serializeOpGetAccessKeyInfo) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpGetAccessKeyInfo) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetAccessKeyInfoInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("GetAccessKeyInfo") + body.Key("Version").String("2011-06-15") + + if err := awsAwsquery_serializeOpDocumentGetAccessKeyInfoInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsquery_serializeOpGetCallerIdentity struct { +} + +func (*awsAwsquery_serializeOpGetCallerIdentity) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpGetCallerIdentity) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetCallerIdentityInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("GetCallerIdentity") + body.Key("Version").String("2011-06-15") + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsquery_serializeOpGetFederationToken struct { +} + +func (*awsAwsquery_serializeOpGetFederationToken) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpGetFederationToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetFederationTokenInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("GetFederationToken") + body.Key("Version").String("2011-06-15") + + if err := awsAwsquery_serializeOpDocumentGetFederationTokenInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsquery_serializeOpGetSessionToken struct { +} + +func (*awsAwsquery_serializeOpGetSessionToken) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpGetSessionToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetSessionTokenInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("GetSessionToken") + body.Key("Version").String("2011-06-15") + + if err := awsAwsquery_serializeOpDocumentGetSessionTokenInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsAwsquery_serializeDocumentPolicyDescriptorListType(v []types.PolicyDescriptorType, value query.Value) error { + array := value.Array("member") + + for i := range v { + av := array.Value() + if err := awsAwsquery_serializeDocumentPolicyDescriptorType(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsquery_serializeDocumentPolicyDescriptorType(v *types.PolicyDescriptorType, value query.Value) error { + object := value.Object() + _ = object + + if v.Arn != nil { + objectKey := object.Key("arn") + objectKey.String(*v.Arn) + } + + return nil +} + +func awsAwsquery_serializeDocumentProvidedContext(v *types.ProvidedContext, value query.Value) error { + object := value.Object() + _ = object + + if v.ContextAssertion != nil { + objectKey := object.Key("ContextAssertion") + objectKey.String(*v.ContextAssertion) + } + + if v.ProviderArn != nil { + objectKey := object.Key("ProviderArn") + objectKey.String(*v.ProviderArn) + } + + return nil +} + +func awsAwsquery_serializeDocumentProvidedContextsListType(v []types.ProvidedContext, value query.Value) error { + array := value.Array("member") + + for i := range v { + av := array.Value() + if err := awsAwsquery_serializeDocumentProvidedContext(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsquery_serializeDocumentTag(v *types.Tag, value query.Value) error { + object := value.Object() + _ = object + + if v.Key != nil { + objectKey := object.Key("Key") + objectKey.String(*v.Key) + } + + if v.Value != nil { + objectKey := object.Key("Value") + objectKey.String(*v.Value) + } + + return nil +} + +func awsAwsquery_serializeDocumentTagKeyListType(v []string, value query.Value) error { + array := value.Array("member") + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsquery_serializeDocumentTagListType(v []types.Tag, value query.Value) error { + array := value.Array("member") + + for i := range v { + av := array.Value() + if err := awsAwsquery_serializeDocumentTag(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsquery_serializeOpDocumentAssumeRoleInput(v *AssumeRoleInput, value query.Value) error { + object := value.Object() + _ = object + + if v.DurationSeconds != nil { + objectKey := object.Key("DurationSeconds") + objectKey.Integer(*v.DurationSeconds) + } + + if v.ExternalId != nil { + objectKey := object.Key("ExternalId") + objectKey.String(*v.ExternalId) + } + + if v.Policy != nil { + objectKey := object.Key("Policy") + objectKey.String(*v.Policy) + } + + if v.PolicyArns != nil { + objectKey := object.Key("PolicyArns") + if err := awsAwsquery_serializeDocumentPolicyDescriptorListType(v.PolicyArns, objectKey); err != nil { + return err + } + } + + if v.ProvidedContexts != nil { + objectKey := object.Key("ProvidedContexts") + if err := awsAwsquery_serializeDocumentProvidedContextsListType(v.ProvidedContexts, objectKey); err != nil { + return err + } + } + + if v.RoleArn != nil { + objectKey := object.Key("RoleArn") + objectKey.String(*v.RoleArn) + } + + if v.RoleSessionName != nil { + objectKey := object.Key("RoleSessionName") + objectKey.String(*v.RoleSessionName) + } + + if v.SerialNumber != nil { + objectKey := object.Key("SerialNumber") + objectKey.String(*v.SerialNumber) + } + + if v.SourceIdentity != nil { + objectKey := object.Key("SourceIdentity") + objectKey.String(*v.SourceIdentity) + } + + if v.Tags != nil { + objectKey := object.Key("Tags") + if err := awsAwsquery_serializeDocumentTagListType(v.Tags, objectKey); err != nil { + return err + } + } + + if v.TokenCode != nil { + objectKey := object.Key("TokenCode") + objectKey.String(*v.TokenCode) + } + + if v.TransitiveTagKeys != nil { + objectKey := object.Key("TransitiveTagKeys") + if err := awsAwsquery_serializeDocumentTagKeyListType(v.TransitiveTagKeys, objectKey); err != nil { + return err + } + } + + return nil +} + +func awsAwsquery_serializeOpDocumentAssumeRoleWithSAMLInput(v *AssumeRoleWithSAMLInput, value query.Value) error { + object := value.Object() + _ = object + + if v.DurationSeconds != nil { + objectKey := object.Key("DurationSeconds") + objectKey.Integer(*v.DurationSeconds) + } + + if v.Policy != nil { + objectKey := object.Key("Policy") + objectKey.String(*v.Policy) + } + + if v.PolicyArns != nil { + objectKey := object.Key("PolicyArns") + if err := awsAwsquery_serializeDocumentPolicyDescriptorListType(v.PolicyArns, objectKey); err != nil { + return err + } + } + + if v.PrincipalArn != nil { + objectKey := object.Key("PrincipalArn") + objectKey.String(*v.PrincipalArn) + } + + if v.RoleArn != nil { + objectKey := object.Key("RoleArn") + objectKey.String(*v.RoleArn) + } + + if v.SAMLAssertion != nil { + objectKey := object.Key("SAMLAssertion") + objectKey.String(*v.SAMLAssertion) + } + + return nil +} + +func awsAwsquery_serializeOpDocumentAssumeRoleWithWebIdentityInput(v *AssumeRoleWithWebIdentityInput, value query.Value) error { + object := value.Object() + _ = object + + if v.DurationSeconds != nil { + objectKey := object.Key("DurationSeconds") + objectKey.Integer(*v.DurationSeconds) + } + + if v.Policy != nil { + objectKey := object.Key("Policy") + objectKey.String(*v.Policy) + } + + if v.PolicyArns != nil { + objectKey := object.Key("PolicyArns") + if err := awsAwsquery_serializeDocumentPolicyDescriptorListType(v.PolicyArns, objectKey); err != nil { + return err + } + } + + if v.ProviderId != nil { + objectKey := object.Key("ProviderId") + objectKey.String(*v.ProviderId) + } + + if v.RoleArn != nil { + objectKey := object.Key("RoleArn") + objectKey.String(*v.RoleArn) + } + + if v.RoleSessionName != nil { + objectKey := object.Key("RoleSessionName") + objectKey.String(*v.RoleSessionName) + } + + if v.WebIdentityToken != nil { + objectKey := object.Key("WebIdentityToken") + objectKey.String(*v.WebIdentityToken) + } + + return nil +} + +func awsAwsquery_serializeOpDocumentDecodeAuthorizationMessageInput(v *DecodeAuthorizationMessageInput, value query.Value) error { + object := value.Object() + _ = object + + if v.EncodedMessage != nil { + objectKey := object.Key("EncodedMessage") + objectKey.String(*v.EncodedMessage) + } + + return nil +} + +func awsAwsquery_serializeOpDocumentGetAccessKeyInfoInput(v *GetAccessKeyInfoInput, value query.Value) error { + object := value.Object() + _ = object + + if v.AccessKeyId != nil { + objectKey := object.Key("AccessKeyId") + objectKey.String(*v.AccessKeyId) + } + + return nil +} + +func awsAwsquery_serializeOpDocumentGetCallerIdentityInput(v *GetCallerIdentityInput, value query.Value) error { + object := value.Object() + _ = object + + return nil +} + +func awsAwsquery_serializeOpDocumentGetFederationTokenInput(v *GetFederationTokenInput, value query.Value) error { + object := value.Object() + _ = object + + if v.DurationSeconds != nil { + objectKey := object.Key("DurationSeconds") + objectKey.Integer(*v.DurationSeconds) + } + + if v.Name != nil { + objectKey := object.Key("Name") + objectKey.String(*v.Name) + } + + if v.Policy != nil { + objectKey := object.Key("Policy") + objectKey.String(*v.Policy) + } + + if v.PolicyArns != nil { + objectKey := object.Key("PolicyArns") + if err := awsAwsquery_serializeDocumentPolicyDescriptorListType(v.PolicyArns, objectKey); err != nil { + return err + } + } + + if v.Tags != nil { + objectKey := object.Key("Tags") + if err := awsAwsquery_serializeDocumentTagListType(v.Tags, objectKey); err != nil { + return err + } + } + + return nil +} + +func awsAwsquery_serializeOpDocumentGetSessionTokenInput(v *GetSessionTokenInput, value query.Value) error { + object := value.Object() + _ = object + + if v.DurationSeconds != nil { + objectKey := object.Key("DurationSeconds") + objectKey.Integer(*v.DurationSeconds) + } + + if v.SerialNumber != nil { + objectKey := object.Key("SerialNumber") + objectKey.String(*v.SerialNumber) + } + + if v.TokenCode != nil { + objectKey := object.Key("TokenCode") + objectKey.String(*v.TokenCode) + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go new file mode 100644 index 00000000000..097875b279b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go @@ -0,0 +1,244 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + "fmt" + smithy "github.com/aws/smithy-go" +) + +// The web identity token that was passed is expired or is not valid. Get a new +// identity token from the identity provider and then retry the request. +type ExpiredTokenException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ExpiredTokenException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ExpiredTokenException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ExpiredTokenException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ExpiredTokenException" + } + return *e.ErrorCodeOverride +} +func (e *ExpiredTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request could not be fulfilled because the identity provider (IDP) that was +// asked to verify the incoming identity token could not be reached. This is often +// a transient error caused by network conditions. Retry the request a limited +// number of times so that you don't exceed the request rate. If the error +// persists, the identity provider might be down or not responding. +type IDPCommunicationErrorException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *IDPCommunicationErrorException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *IDPCommunicationErrorException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *IDPCommunicationErrorException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "IDPCommunicationError" + } + return *e.ErrorCodeOverride +} +func (e *IDPCommunicationErrorException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The identity provider (IdP) reported that authentication failed. This might be +// because the claim is invalid. If this error is returned for the +// AssumeRoleWithWebIdentity operation, it can also mean that the claim has expired +// or has been explicitly revoked. +type IDPRejectedClaimException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *IDPRejectedClaimException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *IDPRejectedClaimException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *IDPRejectedClaimException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "IDPRejectedClaim" + } + return *e.ErrorCodeOverride +} +func (e *IDPRejectedClaimException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The error returned if the message passed to DecodeAuthorizationMessage was +// invalid. This can happen if the token contains invalid characters, such as +// linebreaks. +type InvalidAuthorizationMessageException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *InvalidAuthorizationMessageException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidAuthorizationMessageException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidAuthorizationMessageException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidAuthorizationMessageException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidAuthorizationMessageException) ErrorFault() smithy.ErrorFault { + return smithy.FaultClient +} + +// The web identity token that was passed could not be validated by Amazon Web +// Services. Get a new identity token from the identity provider and then retry the +// request. +type InvalidIdentityTokenException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *InvalidIdentityTokenException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidIdentityTokenException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidIdentityTokenException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidIdentityToken" + } + return *e.ErrorCodeOverride +} +func (e *InvalidIdentityTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +type MalformedPolicyDocumentException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *MalformedPolicyDocumentException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *MalformedPolicyDocumentException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *MalformedPolicyDocumentException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "MalformedPolicyDocument" + } + return *e.ErrorCodeOverride +} +func (e *MalformedPolicyDocumentException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An Amazon Web Services conversion +// compresses the session policy document, session policy ARNs, and session tags +// into a packed binary format that has a separate limit. The error message +// indicates by percentage how close the policies and tags are to the upper size +// limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. You could receive this error even though you meet other +// defined session policy and session tag limits. For more information, see IAM +// and STS Entity Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +type PackedPolicyTooLargeException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *PackedPolicyTooLargeException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *PackedPolicyTooLargeException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *PackedPolicyTooLargeException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "PackedPolicyTooLarge" + } + return *e.ErrorCodeOverride +} +func (e *PackedPolicyTooLargeException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating Amazon Web Services STS in an Amazon Web Services Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +type RegionDisabledException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *RegionDisabledException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *RegionDisabledException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *RegionDisabledException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "RegionDisabledException" + } + return *e.ErrorCodeOverride +} +func (e *RegionDisabledException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go new file mode 100644 index 00000000000..572a7051225 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go @@ -0,0 +1,130 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + smithydocument "github.com/aws/smithy-go/document" + "time" +) + +// The identifiers for the temporary security credentials that the operation +// returns. +type AssumedRoleUser struct { + + // The ARN of the temporary security credentials that are returned from the + // AssumeRole action. For more information about ARNs and how to use them in + // policies, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in the IAM User Guide. + // + // This member is required. + Arn *string + + // A unique identifier that contains the role ID and the role session name of the + // role that is being assumed. The role ID is generated by Amazon Web Services when + // the role is created. + // + // This member is required. + AssumedRoleId *string + + noSmithyDocumentSerde +} + +// Amazon Web Services credentials for API authentication. +type Credentials struct { + + // The access key ID that identifies the temporary security credentials. + // + // This member is required. + AccessKeyId *string + + // The date on which the current credentials expire. + // + // This member is required. + Expiration *time.Time + + // The secret access key that can be used to sign requests. + // + // This member is required. + SecretAccessKey *string + + // The token that users must pass to the service API to use the temporary + // credentials. + // + // This member is required. + SessionToken *string + + noSmithyDocumentSerde +} + +// Identifiers for the federated user that is associated with the credentials. +type FederatedUser struct { + + // The ARN that specifies the federated user that is associated with the + // credentials. For more information about ARNs and how to use them in policies, + // see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in the IAM User Guide. + // + // This member is required. + Arn *string + + // The string that identifies the federated user associated with the credentials, + // similar to the unique ID of an IAM user. + // + // This member is required. + FederatedUserId *string + + noSmithyDocumentSerde +} + +// A reference to the IAM managed policy that is passed as a session policy for a +// role session or a federated user session. +type PolicyDescriptorType struct { + + // The Amazon Resource Name (ARN) of the IAM managed policy to use as a session + // policy for the role. For more information about ARNs, see Amazon Resource Names + // (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. + Arn *string + + noSmithyDocumentSerde +} + +// Reserved for future use. +type ProvidedContext struct { + + // Reserved for future use. + ContextAssertion *string + + // Reserved for future use. + ProviderArn *string + + noSmithyDocumentSerde +} + +// You can pass custom key-value pair attributes when you assume a role or +// federate a user. These are called session tags. You can then use the session +// tags to control access to resources. For more information, see Tagging Amazon +// Web Services STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +type Tag struct { + + // The key for a session tag. You can pass up to 50 session tags. The plain text + // session tag keys can’t exceed 128 characters. For these and additional limits, + // see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // This member is required. + Key *string + + // The value for a session tag. You can pass up to 50 session tags. The plain text + // session tag values can’t exceed 256 characters. For these and additional limits, + // see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // This member is required. + Value *string + + noSmithyDocumentSerde +} + +type noSmithyDocumentSerde = smithydocument.NoSerde diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go new file mode 100644 index 00000000000..3e4bad2a925 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go @@ -0,0 +1,305 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" +) + +type validateOpAssumeRole struct { +} + +func (*validateOpAssumeRole) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpAssumeRole) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*AssumeRoleInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpAssumeRoleInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpAssumeRoleWithSAML struct { +} + +func (*validateOpAssumeRoleWithSAML) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpAssumeRoleWithSAML) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*AssumeRoleWithSAMLInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpAssumeRoleWithSAMLInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpAssumeRoleWithWebIdentity struct { +} + +func (*validateOpAssumeRoleWithWebIdentity) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpAssumeRoleWithWebIdentity) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*AssumeRoleWithWebIdentityInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpAssumeRoleWithWebIdentityInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDecodeAuthorizationMessage struct { +} + +func (*validateOpDecodeAuthorizationMessage) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDecodeAuthorizationMessage) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DecodeAuthorizationMessageInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDecodeAuthorizationMessageInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetAccessKeyInfo struct { +} + +func (*validateOpGetAccessKeyInfo) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetAccessKeyInfo) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetAccessKeyInfoInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetAccessKeyInfoInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetFederationToken struct { +} + +func (*validateOpGetFederationToken) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetFederationToken) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetFederationTokenInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetFederationTokenInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +func addOpAssumeRoleValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpAssumeRole{}, middleware.After) +} + +func addOpAssumeRoleWithSAMLValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpAssumeRoleWithSAML{}, middleware.After) +} + +func addOpAssumeRoleWithWebIdentityValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpAssumeRoleWithWebIdentity{}, middleware.After) +} + +func addOpDecodeAuthorizationMessageValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDecodeAuthorizationMessage{}, middleware.After) +} + +func addOpGetAccessKeyInfoValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetAccessKeyInfo{}, middleware.After) +} + +func addOpGetFederationTokenValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetFederationToken{}, middleware.After) +} + +func validateTag(v *types.Tag) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Tag"} + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if v.Value == nil { + invalidParams.Add(smithy.NewErrParamRequired("Value")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTagListType(v []types.Tag) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TagListType"} + for i := range v { + if err := validateTag(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpAssumeRoleInput(v *AssumeRoleInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AssumeRoleInput"} + if v.RoleArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("RoleArn")) + } + if v.RoleSessionName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RoleSessionName")) + } + if v.Tags != nil { + if err := validateTagListType(v.Tags); err != nil { + invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpAssumeRoleWithSAMLInput(v *AssumeRoleWithSAMLInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AssumeRoleWithSAMLInput"} + if v.RoleArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("RoleArn")) + } + if v.PrincipalArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("PrincipalArn")) + } + if v.SAMLAssertion == nil { + invalidParams.Add(smithy.NewErrParamRequired("SAMLAssertion")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpAssumeRoleWithWebIdentityInput(v *AssumeRoleWithWebIdentityInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AssumeRoleWithWebIdentityInput"} + if v.RoleArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("RoleArn")) + } + if v.RoleSessionName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RoleSessionName")) + } + if v.WebIdentityToken == nil { + invalidParams.Add(smithy.NewErrParamRequired("WebIdentityToken")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDecodeAuthorizationMessageInput(v *DecodeAuthorizationMessageInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DecodeAuthorizationMessageInput"} + if v.EncodedMessage == nil { + invalidParams.Add(smithy.NewErrParamRequired("EncodedMessage")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetAccessKeyInfoInput(v *GetAccessKeyInfoInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetAccessKeyInfoInput"} + if v.AccessKeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("AccessKeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetFederationTokenInput(v *GetFederationTokenInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetFederationTokenInput"} + if v.Name == nil { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if v.Tags != nil { + if err := validateTagListType(v.Tags); err != nil { + invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go index f3ce8183dd9..2945185b0be 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -349,7 +349,7 @@ func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile s if cfg.hasSSOTokenProviderConfiguration() { skippedFiles = 0 for _, f := range files { - section, ok := f.IniData.GetSection(fmt.Sprintf(ssoSectionPrefix + strings.TrimSpace(cfg.SSOSessionName))) + section, ok := f.IniData.GetSection(ssoSectionPrefix + strings.TrimSpace(cfg.SSOSessionName)) if ok { var ssoSession ssoSession ssoSession.setFromIniSection(section) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index d15e3c84c01..7ab65bae79e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.55.5" +const SDKVersion = "1.55.6" diff --git a/vendor/github.com/aws/smithy-go/.gitignore b/vendor/github.com/aws/smithy-go/.gitignore new file mode 100644 index 00000000000..c92d6105eb3 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/.gitignore @@ -0,0 +1,26 @@ +# Eclipse +.classpath +.project +.settings/ + +# Intellij +.idea/ +*.iml +*.iws + +# Mac +.DS_Store + +# Maven +target/ +**/dependency-reduced-pom.xml + +# Gradle +/.gradle +build/ +*/out/ +*/*/out/ + +# VS Code +bin/ +.vscode/ diff --git a/vendor/github.com/aws/smithy-go/.travis.yml b/vendor/github.com/aws/smithy-go/.travis.yml new file mode 100644 index 00000000000..f8d1035cc33 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/.travis.yml @@ -0,0 +1,28 @@ +language: go +sudo: true +dist: bionic + +branches: + only: + - main + +os: + - linux + - osx + # Travis doesn't work with windows and Go tip + #- windows + +go: + - tip + +matrix: + allow_failures: + - go: tip + +before_install: + - if [ "$TRAVIS_OS_NAME" = "windows" ]; then choco install make; fi + - (cd /tmp/; go get golang.org/x/lint/golint) + +script: + - make go test -v ./...; + diff --git a/vendor/github.com/aws/smithy-go/CHANGELOG.md b/vendor/github.com/aws/smithy-go/CHANGELOG.md new file mode 100644 index 00000000000..9cca07b5563 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/CHANGELOG.md @@ -0,0 +1,194 @@ +# Release (2023-10-31) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.16.0 + * **Feature**: **LANG**: Bump minimum go version to 1.19. + +# Release (2023-10-06) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.15.0 + * **Feature**: Add `http.WithHeaderComment` middleware. + +# Release (2023-08-18) + +* No change notes available for this release. + +# Release (2023-08-07) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.14.1 + * **Bug Fix**: Prevent duplicated error returns in EndpointResolverV2 default implementation. + +# Release (2023-07-31) + +## General Highlights +* **Feature**: Adds support for smithy-modeled endpoint resolution. + +# Release (2022-12-02) + +* No change notes available for this release. + +# Release (2022-10-24) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.13.4 + * **Bug Fix**: fixed document type checking for encoding nested types + +# Release (2022-09-14) + +* No change notes available for this release. + +# Release (v1.13.2) + +* No change notes available for this release. + +# Release (v1.13.1) + +* No change notes available for this release. + +# Release (v1.13.0) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.13.0 + * **Feature**: Adds support for the Smithy httpBearerAuth authentication trait to smithy-go. This allows the SDK to support the bearer authentication flow for API operations decorated with httpBearerAuth. An API client will need to be provided with its own bearer.TokenProvider implementation or use the bearer.StaticTokenProvider implementation. + +# Release (v1.12.1) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.12.1 + * **Bug Fix**: Fixes a bug where JSON object keys were not escaped. + +# Release (v1.12.0) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.12.0 + * **Feature**: `transport/http`: Add utility for setting context metadata when operation serializer automatically assigns content-type default value. + +# Release (v1.11.3) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.11.3 + * **Dependency Update**: Updates smithy-go unit test dependency go-cmp to 0.5.8. + +# Release (v1.11.2) + +* No change notes available for this release. + +# Release (v1.11.1) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.11.1 + * **Bug Fix**: Updates the smithy-go HTTP Request to correctly handle building the request to an http.Request. Related to [aws/aws-sdk-go-v2#1583](https://github.com/aws/aws-sdk-go-v2/issues/1583) + +# Release (v1.11.0) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.11.0 + * **Feature**: Updates deserialization of header list to supported quoted strings + +# Release (v1.10.0) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.10.0 + * **Feature**: Add `ptr.Duration`, `ptr.ToDuration`, `ptr.DurationSlice`, `ptr.ToDurationSlice`, `ptr.DurationMap`, and `ptr.ToDurationMap` functions for the `time.Duration` type. + +# Release (v1.9.1) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.9.1 + * **Documentation**: Fixes various typos in Go package documentation. + +# Release (v1.9.0) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.9.0 + * **Feature**: sync: OnceErr, can be used to concurrently record a signal when an error has occurred. + * **Bug Fix**: `transport/http`: CloseResponseBody and ErrorCloseResponseBody middleware have been updated to ensure that the body is fully drained before closing. + +# Release v1.8.1 + +### Smithy Go Module +* **Bug Fix**: Fixed an issue that would cause the HTTP Content-Length to be set to 0 if the stream body was not set. + * Fixes [aws/aws-sdk-go-v2#1418](https://github.com/aws/aws-sdk-go-v2/issues/1418) + +# Release v1.8.0 + +### Smithy Go Module + +* `time`: Add support for parsing additional DateTime timestamp format ([#324](https://github.com/aws/smithy-go/pull/324)) + * Adds support for parsing DateTime timestamp formatted time similar to RFC 3339, but without the `Z` character, nor UTC offset. + * Fixes [#1387](https://github.com/aws/aws-sdk-go-v2/issues/1387) + +# Release v1.7.0 + +### Smithy Go Module +* `ptr`: Handle error for deferred file close call ([#314](https://github.com/aws/smithy-go/pull/314)) + * Handle error for defer close call +* `middleware`: Add Clone to Metadata ([#318](https://github.com/aws/smithy-go/pull/318)) + * Adds a new Clone method to the middleware Metadata type. This provides a shallow clone of the entries in the Metadata. +* `document`: Add new package for document shape serialization support ([#310](https://github.com/aws/smithy-go/pull/310)) + +### Codegen +* Add Smithy Document Shape Support ([#310](https://github.com/aws/smithy-go/pull/310)) + * Adds support for Smithy Document shapes and supporting types for protocols to implement support + +# Release v1.6.0 (2021-07-15) + +### Smithy Go Module +* `encoding/httpbinding`: Support has been added for encoding `float32` and `float64` values that are `NaN`, `Infinity`, or `-Infinity`. ([#316](https://github.com/aws/smithy-go/pull/316)) + +### Codegen +* Adds support for handling `float32` and `float64` `NaN` values in HTTP Protocol Unit Tests. ([#316](https://github.com/aws/smithy-go/pull/316)) +* Adds support protocol generator implementations to override the error code string returned by `ErrorCode` methods on generated error types. ([#315](https://github.com/aws/smithy-go/pull/315)) + +# Release v1.5.0 (2021-06-25) + +### Smithy Go module +* `time`: Update time parsing to not be as strict for HTTPDate and DateTime ([#307](https://github.com/aws/smithy-go/pull/307)) + * Fixes [#302](https://github.com/aws/smithy-go/issues/302) by changing time to UTC before formatting so no local offset time is lost. + +### Codegen +* Adds support for integrating client members via plugins ([#301](https://github.com/aws/smithy-go/pull/301)) +* Fix serialization of enum types marked with payload trait ([#296](https://github.com/aws/smithy-go/pull/296)) +* Update generation of API client modules to include a manifest of files generated ([#283](https://github.com/aws/smithy-go/pull/283)) +* Update Group Java group ID for smithy-go generator ([#298](https://github.com/aws/smithy-go/pull/298)) +* Support the delegation of determining the errors that can occur for an operation ([#304](https://github.com/aws/smithy-go/pull/304)) +* Support for marking and documenting deprecated client config fields. ([#303](https://github.com/aws/smithy-go/pull/303)) + +# Release v1.4.0 (2021-05-06) + +### Smithy Go module +* `encoding/xml`: Fix escaping of Next Line and Line Start in XML Encoder ([#267](https://github.com/aws/smithy-go/pull/267)) + +### Codegen +* Add support for Smithy 1.7 ([#289](https://github.com/aws/smithy-go/pull/289)) +* Add support for httpQueryParams location +* Add support for model renaming conflict resolution with service closure + +# Release v1.3.1 (2021-04-08) + +### Smithy Go module +* `transport/http`: Loosen endpoint hostname validation to allow specifying port numbers. ([#279](https://github.com/aws/smithy-go/pull/279)) +* `io`: Fix RingBuffer panics due to out of bounds index. ([#282](https://github.com/aws/smithy-go/pull/282)) + +# Release v1.3.0 (2021-04-01) + +### Smithy Go module +* `transport/http`: Add utility to safely join string to url path, and url raw query. + +### Codegen +* Update HttpBindingProtocolGenerator to use http/transport JoinPath and JoinQuery utility. + +# Release v1.2.0 (2021-03-12) + +### Smithy Go module +* Fix support for parsing shortened year format in HTTP Date header. +* Fix GitHub APIDiff action workflow to get gorelease tool correctly. +* Fix codegen artifact unit test for Go 1.16 + +### Codegen +* Fix generating paginator nil parameter handling before usage. +* Fix Serialize unboxed members decorated as required. +* Add ability to define resolvers at both client construction and operation invocation. +* Support for extending paginators with custom runtime trait diff --git a/vendor/github.com/aws/smithy-go/CODE_OF_CONDUCT.md b/vendor/github.com/aws/smithy-go/CODE_OF_CONDUCT.md new file mode 100644 index 00000000000..5b627cfa60b --- /dev/null +++ b/vendor/github.com/aws/smithy-go/CODE_OF_CONDUCT.md @@ -0,0 +1,4 @@ +## Code of Conduct +This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). +For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact +opensource-codeofconduct@amazon.com with any additional questions or comments. diff --git a/vendor/github.com/aws/smithy-go/CONTRIBUTING.md b/vendor/github.com/aws/smithy-go/CONTRIBUTING.md new file mode 100644 index 00000000000..c4b6a1c5081 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/CONTRIBUTING.md @@ -0,0 +1,59 @@ +# Contributing Guidelines + +Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional +documentation, we greatly value feedback and contributions from our community. + +Please read through this document before submitting any issues or pull requests to ensure we have all the necessary +information to effectively respond to your bug report or contribution. + + +## Reporting Bugs/Feature Requests + +We welcome you to use the GitHub issue tracker to report bugs or suggest features. + +When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already +reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: + +* A reproducible test case or series of steps +* The version of our code being used +* Any modifications you've made relevant to the bug +* Anything unusual about your environment or deployment + + +## Contributing via Pull Requests +Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: + +1. You are working against the latest source on the *main* branch. +2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. +3. You open an issue to discuss any significant work - we would hate for your time to be wasted. + +To send us a pull request, please: + +1. Fork the repository. +2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. +3. Ensure local tests pass. +4. Commit to your fork using clear commit messages. +5. Send us a pull request, answering any default questions in the pull request interface. +6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. + +GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and +[creating a pull request](https://help.github.com/articles/creating-a-pull-request/). + + +## Finding contributions to work on +Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start. + + +## Code of Conduct +This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). +For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact +opensource-codeofconduct@amazon.com with any additional questions or comments. + + +## Security issue notifications +If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. + + +## Licensing + +See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. diff --git a/vendor/github.com/aws/smithy-go/LICENSE b/vendor/github.com/aws/smithy-go/LICENSE new file mode 100644 index 00000000000..67db8588217 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/LICENSE @@ -0,0 +1,175 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/aws/smithy-go/Makefile b/vendor/github.com/aws/smithy-go/Makefile new file mode 100644 index 00000000000..4b3c209373c --- /dev/null +++ b/vendor/github.com/aws/smithy-go/Makefile @@ -0,0 +1,97 @@ +PRE_RELEASE_VERSION ?= + +RELEASE_MANIFEST_FILE ?= +RELEASE_CHGLOG_DESC_FILE ?= + +REPOTOOLS_VERSION ?= latest +REPOTOOLS_MODULE = github.com/awslabs/aws-go-multi-module-repository-tools +REPOTOOLS_CMD_CALCULATE_RELEASE = ${REPOTOOLS_MODULE}/cmd/calculaterelease@${REPOTOOLS_VERSION} +REPOTOOLS_CMD_CALCULATE_RELEASE_ADDITIONAL_ARGS ?= +REPOTOOLS_CMD_UPDATE_REQUIRES = ${REPOTOOLS_MODULE}/cmd/updaterequires@${REPOTOOLS_VERSION} +REPOTOOLS_CMD_UPDATE_MODULE_METADATA = ${REPOTOOLS_MODULE}/cmd/updatemodulemeta@${REPOTOOLS_VERSION} +REPOTOOLS_CMD_GENERATE_CHANGELOG = ${REPOTOOLS_MODULE}/cmd/generatechangelog@${REPOTOOLS_VERSION} +REPOTOOLS_CMD_CHANGELOG = ${REPOTOOLS_MODULE}/cmd/changelog@${REPOTOOLS_VERSION} +REPOTOOLS_CMD_TAG_RELEASE = ${REPOTOOLS_MODULE}/cmd/tagrelease@${REPOTOOLS_VERSION} +REPOTOOLS_CMD_MODULE_VERSION = ${REPOTOOLS_MODULE}/cmd/moduleversion@${REPOTOOLS_VERSION} + +UNIT_TEST_TAGS= +BUILD_TAGS= + +ifneq ($(PRE_RELEASE_VERSION),) + REPOTOOLS_CMD_CALCULATE_RELEASE_ADDITIONAL_ARGS += -preview=${PRE_RELEASE_VERSION} +endif + +smithy-publish-local: + cd codegen && ./gradlew publishToMavenLocal + +smithy-build: + cd codegen && ./gradlew build + +smithy-clean: + cd codegen && ./gradlew clean + +################## +# Linting/Verify # +################## +.PHONY: verify vet + +verify: vet + +vet: + go vet ${BUILD_TAGS} --all ./... + +################ +# Unit Testing # +################ +.PHONY: unit unit-race unit-test unit-race-test + +unit: verify + go vet ${BUILD_TAGS} --all ./... && \ + go test ${BUILD_TAGS} ${RUN_NONE} ./... && \ + go test -timeout=1m ${UNIT_TEST_TAGS} ./... + +unit-race: verify + go vet ${BUILD_TAGS} --all ./... && \ + go test ${BUILD_TAGS} ${RUN_NONE} ./... && \ + go test -timeout=1m ${UNIT_TEST_TAGS} -race -cpu=4 ./... + +unit-test: verify + go test -timeout=1m ${UNIT_TEST_TAGS} ./... + +unit-race-test: verify + go test -timeout=1m ${UNIT_TEST_TAGS} -race -cpu=4 ./... + +##################### +# Release Process # +##################### +.PHONY: preview-release pre-release-validation release + +preview-release: + go run ${REPOTOOLS_CMD_CALCULATE_RELEASE} ${REPOTOOLS_CMD_CALCULATE_RELEASE_ADDITIONAL_ARGS} + +pre-release-validation: + @if [[ -z "${RELEASE_MANIFEST_FILE}" ]]; then \ + echo "RELEASE_MANIFEST_FILE is required to specify the file to write the release manifest" && false; \ + fi + @if [[ -z "${RELEASE_CHGLOG_DESC_FILE}" ]]; then \ + echo "RELEASE_CHGLOG_DESC_FILE is required to specify the file to write the release notes" && false; \ + fi + +release: pre-release-validation + go run ${REPOTOOLS_CMD_CALCULATE_RELEASE} -o ${RELEASE_MANIFEST_FILE} ${REPOTOOLS_CMD_CALCULATE_RELEASE_ADDITIONAL_ARGS} + go run ${REPOTOOLS_CMD_UPDATE_REQUIRES} -release ${RELEASE_MANIFEST_FILE} + go run ${REPOTOOLS_CMD_UPDATE_MODULE_METADATA} -release ${RELEASE_MANIFEST_FILE} + go run ${REPOTOOLS_CMD_GENERATE_CHANGELOG} -release ${RELEASE_MANIFEST_FILE} -o ${RELEASE_CHGLOG_DESC_FILE} + go run ${REPOTOOLS_CMD_CHANGELOG} rm -all + go run ${REPOTOOLS_CMD_TAG_RELEASE} -release ${RELEASE_MANIFEST_FILE} + +module-version: + @go run ${REPOTOOLS_CMD_MODULE_VERSION} . + +############## +# Repo Tools # +############## +.PHONY: install-changelog + +install-changelog: + go install ${REPOTOOLS_MODULE}/cmd/changelog@${REPOTOOLS_VERSION} diff --git a/vendor/github.com/aws/smithy-go/NOTICE b/vendor/github.com/aws/smithy-go/NOTICE new file mode 100644 index 00000000000..616fc588945 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/NOTICE @@ -0,0 +1 @@ +Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. diff --git a/vendor/github.com/aws/smithy-go/README.md b/vendor/github.com/aws/smithy-go/README.md new file mode 100644 index 00000000000..c374f69283b --- /dev/null +++ b/vendor/github.com/aws/smithy-go/README.md @@ -0,0 +1,27 @@ +## Smithy Go + +[![Go Build Status](https://github.com/aws/smithy-go/actions/workflows/go.yml/badge.svg?branch=main)](https://github.com/aws/smithy-go/actions/workflows/go.yml)[![Codegen Build Status](https://github.com/aws/smithy-go/actions/workflows/codegen.yml/badge.svg?branch=main)](https://github.com/aws/smithy-go/actions/workflows/codegen.yml) + +[Smithy](https://smithy.io/) code generators for Go. + +**WARNING: All interfaces are subject to change.** + +## Can I use this? + +In order to generate a usable smithy client you must provide a [protocol definition](https://github.com/aws/smithy-go/blob/main/codegen/smithy-go-codegen/src/main/java/software/amazon/smithy/go/codegen/integration/ProtocolGenerator.java), +such as [AWS restJson1](https://smithy.io/2.0/aws/protocols/aws-restjson1-protocol.html), +in order to generate transport mechanisms and serialization/deserialization +code ("serde") accordingly. + +The code generator does not currently support any protocols out of the box, +therefore the useability of this project on its own is currently limited. +Support for all [AWS protocols](https://smithy.io/2.0/aws/protocols/index.html) +exists in [aws-sdk-go-v2](https://github.com/aws/aws-sdk-go-v2). We are +tracking the movement of those out of the SDK into smithy-go in +[#458](https://github.com/aws/smithy-go/issues/458), but there's currently no +timeline for doing so. + +## License + +This project is licensed under the Apache-2.0 License. + diff --git a/vendor/github.com/aws/smithy-go/auth/bearer/docs.go b/vendor/github.com/aws/smithy-go/auth/bearer/docs.go new file mode 100644 index 00000000000..1c9b9715cb0 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/auth/bearer/docs.go @@ -0,0 +1,3 @@ +// Package bearer provides middleware and utilities for authenticating API +// operation calls with a Bearer Token. +package bearer diff --git a/vendor/github.com/aws/smithy-go/auth/bearer/middleware.go b/vendor/github.com/aws/smithy-go/auth/bearer/middleware.go new file mode 100644 index 00000000000..8c7d7209959 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/auth/bearer/middleware.go @@ -0,0 +1,104 @@ +package bearer + +import ( + "context" + "fmt" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Message is the middleware stack's request transport message value. +type Message interface{} + +// Signer provides an interface for implementations to decorate a request +// message with a bearer token. The signer is responsible for validating the +// message type is compatible with the signer. +type Signer interface { + SignWithBearerToken(context.Context, Token, Message) (Message, error) +} + +// AuthenticationMiddleware provides the Finalize middleware step for signing +// an request message with a bearer token. +type AuthenticationMiddleware struct { + signer Signer + tokenProvider TokenProvider +} + +// AddAuthenticationMiddleware helper adds the AuthenticationMiddleware to the +// middleware Stack in the Finalize step with the options provided. +func AddAuthenticationMiddleware(s *middleware.Stack, signer Signer, tokenProvider TokenProvider) error { + return s.Finalize.Add( + NewAuthenticationMiddleware(signer, tokenProvider), + middleware.After, + ) +} + +// NewAuthenticationMiddleware returns an initialized AuthenticationMiddleware. +func NewAuthenticationMiddleware(signer Signer, tokenProvider TokenProvider) *AuthenticationMiddleware { + return &AuthenticationMiddleware{ + signer: signer, + tokenProvider: tokenProvider, + } +} + +const authenticationMiddlewareID = "BearerTokenAuthentication" + +// ID returns the resolver identifier +func (m *AuthenticationMiddleware) ID() string { + return authenticationMiddlewareID +} + +// HandleFinalize implements the FinalizeMiddleware interface in order to +// update the request with bearer token authentication. +func (m *AuthenticationMiddleware) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + token, err := m.tokenProvider.RetrieveBearerToken(ctx) + if err != nil { + return out, metadata, fmt.Errorf("failed AuthenticationMiddleware wrap message, %w", err) + } + + signedMessage, err := m.signer.SignWithBearerToken(ctx, token, in.Request) + if err != nil { + return out, metadata, fmt.Errorf("failed AuthenticationMiddleware sign message, %w", err) + } + + in.Request = signedMessage + return next.HandleFinalize(ctx, in) +} + +// SignHTTPSMessage provides a bearer token authentication implementation that +// will sign the message with the provided bearer token. +// +// Will fail if the message is not a smithy-go HTTP request or the request is +// not HTTPS. +type SignHTTPSMessage struct{} + +// NewSignHTTPSMessage returns an initialized signer for HTTP messages. +func NewSignHTTPSMessage() *SignHTTPSMessage { + return &SignHTTPSMessage{} +} + +// SignWithBearerToken returns a copy of the HTTP request with the bearer token +// added via the "Authorization" header, per RFC 6750, https://datatracker.ietf.org/doc/html/rfc6750. +// +// Returns an error if the request's URL scheme is not HTTPS, or the request +// message is not an smithy-go HTTP Request pointer type. +func (SignHTTPSMessage) SignWithBearerToken(ctx context.Context, token Token, message Message) (Message, error) { + req, ok := message.(*smithyhttp.Request) + if !ok { + return nil, fmt.Errorf("expect smithy-go HTTP Request, got %T", message) + } + + if !req.IsHTTPS() { + return nil, fmt.Errorf("bearer token with HTTP request requires HTTPS") + } + + reqClone := req.Clone() + reqClone.Header.Set("Authorization", "Bearer "+token.Value) + + return reqClone, nil +} diff --git a/vendor/github.com/aws/smithy-go/auth/bearer/token.go b/vendor/github.com/aws/smithy-go/auth/bearer/token.go new file mode 100644 index 00000000000..be260d4c764 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/auth/bearer/token.go @@ -0,0 +1,50 @@ +package bearer + +import ( + "context" + "time" +) + +// Token provides a type wrapping a bearer token and expiration metadata. +type Token struct { + Value string + + CanExpire bool + Expires time.Time +} + +// Expired returns if the token's Expires time is before or equal to the time +// provided. If CanExpires is false, Expired will always return false. +func (t Token) Expired(now time.Time) bool { + if !t.CanExpire { + return false + } + now = now.Round(0) + return now.Equal(t.Expires) || now.After(t.Expires) +} + +// TokenProvider provides interface for retrieving bearer tokens. +type TokenProvider interface { + RetrieveBearerToken(context.Context) (Token, error) +} + +// TokenProviderFunc provides a helper utility to wrap a function as a type +// that implements the TokenProvider interface. +type TokenProviderFunc func(context.Context) (Token, error) + +// RetrieveBearerToken calls the wrapped function, returning the Token or +// error. +func (fn TokenProviderFunc) RetrieveBearerToken(ctx context.Context) (Token, error) { + return fn(ctx) +} + +// StaticTokenProvider provides a utility for wrapping a static bearer token +// value within an implementation of a token provider. +type StaticTokenProvider struct { + Token Token +} + +// RetrieveBearerToken returns the static token specified. +func (s StaticTokenProvider) RetrieveBearerToken(context.Context) (Token, error) { + return s.Token, nil +} diff --git a/vendor/github.com/aws/smithy-go/auth/bearer/token_cache.go b/vendor/github.com/aws/smithy-go/auth/bearer/token_cache.go new file mode 100644 index 00000000000..223ddf52bba --- /dev/null +++ b/vendor/github.com/aws/smithy-go/auth/bearer/token_cache.go @@ -0,0 +1,208 @@ +package bearer + +import ( + "context" + "fmt" + "sync/atomic" + "time" + + smithycontext "github.com/aws/smithy-go/context" + "github.com/aws/smithy-go/internal/sync/singleflight" +) + +// package variable that can be override in unit tests. +var timeNow = time.Now + +// TokenCacheOptions provides a set of optional configuration options for the +// TokenCache TokenProvider. +type TokenCacheOptions struct { + // The duration before the token will expire when the credentials will be + // refreshed. If DisableAsyncRefresh is true, the RetrieveBearerToken calls + // will be blocking. + // + // Asynchronous refreshes are deduplicated, and only one will be in-flight + // at a time. If the token expires while an asynchronous refresh is in + // flight, the next call to RetrieveBearerToken will block on that refresh + // to return. + RefreshBeforeExpires time.Duration + + // The timeout the underlying TokenProvider's RetrieveBearerToken call must + // return within, or will be canceled. Defaults to 0, no timeout. + // + // If 0 timeout, its possible for the underlying tokenProvider's + // RetrieveBearerToken call to block forever. Preventing subsequent + // TokenCache attempts to refresh the token. + // + // If this timeout is reached all pending deduplicated calls to + // TokenCache RetrieveBearerToken will fail with an error. + RetrieveBearerTokenTimeout time.Duration + + // The minimum duration between asynchronous refresh attempts. If the next + // asynchronous recent refresh attempt was within the minimum delay + // duration, the call to retrieve will return the current cached token, if + // not expired. + // + // The asynchronous retrieve is deduplicated across multiple calls when + // RetrieveBearerToken is called. The asynchronous retrieve is not a + // periodic task. It is only performed when the token has not yet expired, + // and the current item is within the RefreshBeforeExpires window, and the + // TokenCache's RetrieveBearerToken method is called. + // + // If 0, (default) there will be no minimum delay between asynchronous + // refresh attempts. + // + // If DisableAsyncRefresh is true, this option is ignored. + AsyncRefreshMinimumDelay time.Duration + + // Sets if the TokenCache will attempt to refresh the token in the + // background asynchronously instead of blocking for credentials to be + // refreshed. If disabled token refresh will be blocking. + // + // The first call to RetrieveBearerToken will always be blocking, because + // there is no cached token. + DisableAsyncRefresh bool +} + +// TokenCache provides an utility to cache Bearer Authentication tokens from a +// wrapped TokenProvider. The TokenCache can be has options to configure the +// cache's early and asynchronous refresh of the token. +type TokenCache struct { + options TokenCacheOptions + provider TokenProvider + + cachedToken atomic.Value + lastRefreshAttemptTime atomic.Value + sfGroup singleflight.Group +} + +// NewTokenCache returns a initialized TokenCache that implements the +// TokenProvider interface. Wrapping the provider passed in. Also taking a set +// of optional functional option parameters to configure the token cache. +func NewTokenCache(provider TokenProvider, optFns ...func(*TokenCacheOptions)) *TokenCache { + var options TokenCacheOptions + for _, fn := range optFns { + fn(&options) + } + + return &TokenCache{ + options: options, + provider: provider, + } +} + +// RetrieveBearerToken returns the token if it could be obtained, or error if a +// valid token could not be retrieved. +// +// The passed in Context's cancel/deadline/timeout will impacting only this +// individual retrieve call and not any other already queued up calls. This +// means underlying provider's RetrieveBearerToken calls could block for ever, +// and not be canceled with the Context. Set RetrieveBearerTokenTimeout to +// provide a timeout, preventing the underlying TokenProvider blocking forever. +// +// By default, if the passed in Context is canceled, all of its values will be +// considered expired. The wrapped TokenProvider will not be able to lookup the +// values from the Context once it is expired. This is done to protect against +// expired values no longer being valid. To disable this behavior, use +// smithy-go's context.WithPreserveExpiredValues to add a value to the Context +// before calling RetrieveBearerToken to enable support for expired values. +// +// Without RetrieveBearerTokenTimeout there is the potential for a underlying +// Provider's RetrieveBearerToken call to sit forever. Blocking in subsequent +// attempts at refreshing the token. +func (p *TokenCache) RetrieveBearerToken(ctx context.Context) (Token, error) { + cachedToken, ok := p.getCachedToken() + if !ok || cachedToken.Expired(timeNow()) { + return p.refreshBearerToken(ctx) + } + + // Check if the token should be refreshed before it expires. + refreshToken := cachedToken.Expired(timeNow().Add(p.options.RefreshBeforeExpires)) + if !refreshToken { + return cachedToken, nil + } + + if p.options.DisableAsyncRefresh { + return p.refreshBearerToken(ctx) + } + + p.tryAsyncRefresh(ctx) + + return cachedToken, nil +} + +// tryAsyncRefresh attempts to asynchronously refresh the token returning the +// already cached token. If it AsyncRefreshMinimumDelay option is not zero, and +// the duration since the last refresh is less than that value, nothing will be +// done. +func (p *TokenCache) tryAsyncRefresh(ctx context.Context) { + if p.options.AsyncRefreshMinimumDelay != 0 { + var lastRefreshAttempt time.Time + if v := p.lastRefreshAttemptTime.Load(); v != nil { + lastRefreshAttempt = v.(time.Time) + } + + if timeNow().Before(lastRefreshAttempt.Add(p.options.AsyncRefreshMinimumDelay)) { + return + } + } + + // Ignore the returned channel so this won't be blocking, and limit the + // number of additional goroutines created. + p.sfGroup.DoChan("async-refresh", func() (interface{}, error) { + res, err := p.refreshBearerToken(ctx) + if p.options.AsyncRefreshMinimumDelay != 0 { + var refreshAttempt time.Time + if err != nil { + refreshAttempt = timeNow() + } + p.lastRefreshAttemptTime.Store(refreshAttempt) + } + + return res, err + }) +} + +func (p *TokenCache) refreshBearerToken(ctx context.Context) (Token, error) { + resCh := p.sfGroup.DoChan("refresh-token", func() (interface{}, error) { + ctx := smithycontext.WithSuppressCancel(ctx) + if v := p.options.RetrieveBearerTokenTimeout; v != 0 { + var cancel func() + ctx, cancel = context.WithTimeout(ctx, v) + defer cancel() + } + return p.singleRetrieve(ctx) + }) + + select { + case res := <-resCh: + return res.Val.(Token), res.Err + case <-ctx.Done(): + return Token{}, fmt.Errorf("retrieve bearer token canceled, %w", ctx.Err()) + } +} + +func (p *TokenCache) singleRetrieve(ctx context.Context) (interface{}, error) { + token, err := p.provider.RetrieveBearerToken(ctx) + if err != nil { + return Token{}, fmt.Errorf("failed to retrieve bearer token, %w", err) + } + + p.cachedToken.Store(&token) + return token, nil +} + +// getCachedToken returns the currently cached token and true if found. Returns +// false if no token is cached. +func (p *TokenCache) getCachedToken() (Token, bool) { + v := p.cachedToken.Load() + if v == nil { + return Token{}, false + } + + t := v.(*Token) + if t == nil || t.Value == "" { + return Token{}, false + } + + return *t, true +} diff --git a/vendor/github.com/aws/smithy-go/context/suppress_expired.go b/vendor/github.com/aws/smithy-go/context/suppress_expired.go new file mode 100644 index 00000000000..a39b84a2784 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/context/suppress_expired.go @@ -0,0 +1,81 @@ +package context + +import "context" + +// valueOnlyContext provides a utility to preserve only the values of a +// Context. Suppressing any cancellation or deadline on that context being +// propagated downstream of this value. +// +// If preserveExpiredValues is false (default), and the valueCtx is canceled, +// calls to lookup values with the Values method, will always return nil. Setting +// preserveExpiredValues to true, will allow the valueOnlyContext to lookup +// values in valueCtx even if valueCtx is canceled. +// +// Based on the Go standard libraries net/lookup.go onlyValuesCtx utility. +// https://github.com/golang/go/blob/da2773fe3e2f6106634673a38dc3a6eb875fe7d8/src/net/lookup.go +type valueOnlyContext struct { + context.Context + + preserveExpiredValues bool + valuesCtx context.Context +} + +var _ context.Context = (*valueOnlyContext)(nil) + +// Value looks up the key, returning its value. If configured to not preserve +// values of expired context, and the wrapping context is canceled, nil will be +// returned. +func (v *valueOnlyContext) Value(key interface{}) interface{} { + if !v.preserveExpiredValues { + select { + case <-v.valuesCtx.Done(): + return nil + default: + } + } + + return v.valuesCtx.Value(key) +} + +// WithSuppressCancel wraps the Context value, suppressing its deadline and +// cancellation events being propagated downstream to consumer of the returned +// context. +// +// By default the wrapped Context's Values are available downstream until the +// wrapped Context is canceled. Once the wrapped Context is canceled, Values +// method called on the context return will no longer lookup any key. As they +// are now considered expired. +// +// To override this behavior, use WithPreserveExpiredValues on the Context +// before it is wrapped by WithSuppressCancel. This will make the Context +// returned by WithSuppressCancel allow lookup of expired values. +func WithSuppressCancel(ctx context.Context) context.Context { + return &valueOnlyContext{ + Context: context.Background(), + valuesCtx: ctx, + + preserveExpiredValues: GetPreserveExpiredValues(ctx), + } +} + +type preserveExpiredValuesKey struct{} + +// WithPreserveExpiredValues adds a Value to the Context if expired values +// should be preserved, and looked up by a Context wrapped by +// WithSuppressCancel. +// +// WithPreserveExpiredValues must be added as a value to a Context, before that +// Context is wrapped by WithSuppressCancel +func WithPreserveExpiredValues(ctx context.Context, enable bool) context.Context { + return context.WithValue(ctx, preserveExpiredValuesKey{}, enable) +} + +// GetPreserveExpiredValues looks up, and returns the PreserveExpressValues +// value in the context. Returning true if enabled, false otherwise. +func GetPreserveExpiredValues(ctx context.Context) bool { + v := ctx.Value(preserveExpiredValuesKey{}) + if v != nil { + return v.(bool) + } + return false +} diff --git a/vendor/github.com/aws/smithy-go/doc.go b/vendor/github.com/aws/smithy-go/doc.go new file mode 100644 index 00000000000..87b0c74b75c --- /dev/null +++ b/vendor/github.com/aws/smithy-go/doc.go @@ -0,0 +1,2 @@ +// Package smithy provides the core components for a Smithy SDK. +package smithy diff --git a/vendor/github.com/aws/smithy-go/document.go b/vendor/github.com/aws/smithy-go/document.go new file mode 100644 index 00000000000..dec498c57bf --- /dev/null +++ b/vendor/github.com/aws/smithy-go/document.go @@ -0,0 +1,10 @@ +package smithy + +// Document provides access to loosely structured data in a document-like +// format. +// +// Deprecated: See the github.com/aws/smithy-go/document package. +type Document interface { + UnmarshalDocument(interface{}) error + GetValue() (interface{}, error) +} diff --git a/vendor/github.com/aws/smithy-go/document/doc.go b/vendor/github.com/aws/smithy-go/document/doc.go new file mode 100644 index 00000000000..03055b7a1c2 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/document/doc.go @@ -0,0 +1,12 @@ +// Package document provides interface definitions and error types for document types. +// +// A document is a protocol-agnostic type which supports a JSON-like data-model. You can use this type to send +// UTF-8 strings, arbitrary precision numbers, booleans, nulls, a list of these values, and a map of UTF-8 +// strings to these values. +// +// API Clients expose document constructors in their respective client document packages which must be used to +// Marshal and Unmarshal Go types to and from their respective protocol representations. +// +// See the Marshaler and Unmarshaler type documentation for more details on how to Go types can be converted to and from +// document types. +package document diff --git a/vendor/github.com/aws/smithy-go/document/document.go b/vendor/github.com/aws/smithy-go/document/document.go new file mode 100644 index 00000000000..8f852d95c69 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/document/document.go @@ -0,0 +1,153 @@ +package document + +import ( + "fmt" + "math/big" + "strconv" +) + +// Marshaler is an interface for a type that marshals a document to its protocol-specific byte representation and +// returns the resulting bytes. A non-nil error will be returned if an error is encountered during marshaling. +// +// Marshal supports basic scalars (int,uint,float,bool,string), big.Int, and big.Float, maps, slices, and structs. +// Anonymous nested types are flattened based on Go anonymous type visibility. +// +// When defining struct types. the `document` struct tag can be used to control how the value will be +// marshaled into the resulting protocol document. +// +// // Field is ignored +// Field int `document:"-"` +// +// // Field object of key "myName" +// Field int `document:"myName"` +// +// // Field object key of key "myName", and +// // Field is omitted if the field is a zero value for the type. +// Field int `document:"myName,omitempty"` +// +// // Field object key of "Field", and +// // Field is omitted if the field is a zero value for the type. +// Field int `document:",omitempty"` +// +// All struct fields, including anonymous fields, are marshaled unless the +// any of the following conditions are meet. +// +// - the field is not exported +// - document field tag is "-" +// - document field tag specifies "omitempty", and is a zero value. +// +// Pointer and interface values are encoded as the value pointed to or +// contained in the interface. A nil value encodes as a null +// value unless `omitempty` struct tag is provided. +// +// Channel, complex, and function values are not encoded and will be skipped +// when walking the value to be marshaled. +// +// time.Time is not supported and will cause the Marshaler to return an error. These values should be represented +// by your application as a string or numerical representation. +// +// Errors that occur when marshaling will stop the marshaler, and return the error. +// +// Marshal cannot represent cyclic data structures and will not handle them. +// Passing cyclic structures to Marshal will result in an infinite recursion. +type Marshaler interface { + MarshalSmithyDocument() ([]byte, error) +} + +// Unmarshaler is an interface for a type that unmarshals a document from its protocol-specific representation, and +// stores the result into the value pointed by v. If v is nil or not a pointer then InvalidUnmarshalError will be +// returned. +// +// Unmarshaler supports the same encodings produced by a document Marshaler. This includes support for the `document` +// struct field tag for controlling how struct fields are unmarshaled. +// +// Both generic interface{} and concrete types are valid unmarshal destination types. When unmarshaling a document +// into an empty interface the Unmarshaler will store one of these values: +// bool, for boolean values +// document.Number, for arbitrary-precision numbers (int64, float64, big.Int, big.Float) +// string, for string values +// []interface{}, for array values +// map[string]interface{}, for objects +// nil, for null values +// +// When unmarshaling, any error that occurs will halt the unmarshal and return the error. +type Unmarshaler interface { + UnmarshalSmithyDocument(v interface{}) error +} + +type noSerde interface { + noSmithyDocumentSerde() +} + +// NoSerde is a sentinel value to indicate that a given type should not be marshaled or unmarshaled +// into a protocol document. +type NoSerde struct{} + +func (n NoSerde) noSmithyDocumentSerde() {} + +var _ noSerde = (*NoSerde)(nil) + +// IsNoSerde returns whether the given type implements the no smithy document serde interface. +func IsNoSerde(x interface{}) bool { + _, ok := x.(noSerde) + return ok +} + +// Number is an arbitrary precision numerical value +type Number string + +// Int64 returns the number as a string. +func (n Number) String() string { + return string(n) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return n.intOfBitSize(64) +} + +func (n Number) intOfBitSize(bitSize int) (int64, error) { + return strconv.ParseInt(string(n), 10, bitSize) +} + +// Uint64 returns the number as a uint64. +func (n Number) Uint64() (uint64, error) { + return n.uintOfBitSize(64) +} + +func (n Number) uintOfBitSize(bitSize int) (uint64, error) { + return strconv.ParseUint(string(n), 10, bitSize) +} + +// Float32 returns the number parsed as a 32-bit float, returns a float64. +func (n Number) Float32() (float64, error) { + return n.floatOfBitSize(32) +} + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return n.floatOfBitSize(64) +} + +// Float64 returns the number as a float64. +func (n Number) floatOfBitSize(bitSize int) (float64, error) { + return strconv.ParseFloat(string(n), bitSize) +} + +// BigFloat attempts to convert the number to a big.Float, returns an error if the operation fails. +func (n Number) BigFloat() (*big.Float, error) { + f, ok := (&big.Float{}).SetString(string(n)) + if !ok { + return nil, fmt.Errorf("failed to convert to big.Float") + } + return f, nil +} + +// BigInt attempts to convert the number to a big.Int, returns an error if the operation fails. +func (n Number) BigInt() (*big.Int, error) { + f, ok := (&big.Int{}).SetString(string(n), 10) + if !ok { + return nil, fmt.Errorf("failed to convert to big.Float") + } + return f, nil +} diff --git a/vendor/github.com/aws/smithy-go/document/errors.go b/vendor/github.com/aws/smithy-go/document/errors.go new file mode 100644 index 00000000000..046a7a76531 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/document/errors.go @@ -0,0 +1,75 @@ +package document + +import ( + "fmt" + "reflect" +) + +// UnmarshalTypeError is an error type representing an error +// unmarshaling a Smithy document to a Go value type. This is different +// from UnmarshalError in that it does not wrap an underlying error type. +type UnmarshalTypeError struct { + Value string + Type reflect.Type +} + +// Error returns the string representation of the error. +// Satisfying the error interface. +func (e *UnmarshalTypeError) Error() string { + return fmt.Sprintf("unmarshal failed, cannot unmarshal %s into Go value type %s", + e.Value, e.Type.String()) +} + +// An InvalidUnmarshalError is an error type representing an invalid type +// encountered while unmarshaling a Smithy document to a Go value type. +type InvalidUnmarshalError struct { + Type reflect.Type +} + +// Error returns the string representation of the error. +// Satisfying the error interface. +func (e *InvalidUnmarshalError) Error() string { + var msg string + if e.Type == nil { + msg = "cannot unmarshal to nil value" + } else if e.Type.Kind() != reflect.Ptr { + msg = fmt.Sprintf("cannot unmarshal to non-pointer value, got %s", e.Type.String()) + } else { + msg = fmt.Sprintf("cannot unmarshal to nil value, %s", e.Type.String()) + } + + return fmt.Sprintf("unmarshal failed, %s", msg) +} + +// An UnmarshalError wraps an error that occurred while unmarshaling a +// Smithy document into a Go type. This is different from +// UnmarshalTypeError in that it wraps the underlying error that occurred. +type UnmarshalError struct { + Err error + Value string + Type reflect.Type +} + +// Unwrap returns the underlying unmarshaling error +func (e *UnmarshalError) Unwrap() error { + return e.Err +} + +// Error returns the string representation of the error. +// Satisfying the error interface. +func (e *UnmarshalError) Error() string { + return fmt.Sprintf("unmarshal failed, cannot unmarshal %q into %s, %v", + e.Value, e.Type.String(), e.Err) +} + +// An InvalidMarshalError is an error type representing an error +// occurring when marshaling a Go value type. +type InvalidMarshalError struct { + Message string +} + +// Error returns the string representation of the error. +// Satisfying the error interface. +func (e *InvalidMarshalError) Error() string { + return fmt.Sprintf("marshal failed, %s", e.Message) +} diff --git a/vendor/github.com/aws/smithy-go/encoding/doc.go b/vendor/github.com/aws/smithy-go/encoding/doc.go new file mode 100644 index 00000000000..792fdfa08b3 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/doc.go @@ -0,0 +1,4 @@ +// Package encoding provides utilities for encoding values for specific +// document encodings. + +package encoding diff --git a/vendor/github.com/aws/smithy-go/encoding/encoding.go b/vendor/github.com/aws/smithy-go/encoding/encoding.go new file mode 100644 index 00000000000..2fdfb522502 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/encoding.go @@ -0,0 +1,40 @@ +package encoding + +import ( + "fmt" + "math" + "strconv" +) + +// EncodeFloat encodes a float value as per the stdlib encoder for json and xml protocol +// This encodes a float value into dst while attempting to conform to ES6 ToString for Numbers +// +// Based on encoding/json floatEncoder from the Go Standard Library +// https://golang.org/src/encoding/json/encode.go +func EncodeFloat(dst []byte, v float64, bits int) []byte { + if math.IsInf(v, 0) || math.IsNaN(v) { + panic(fmt.Sprintf("invalid float value: %s", strconv.FormatFloat(v, 'g', -1, bits))) + } + + abs := math.Abs(v) + fmt := byte('f') + + if abs != 0 { + if bits == 64 && (abs < 1e-6 || abs >= 1e21) || bits == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) { + fmt = 'e' + } + } + + dst = strconv.AppendFloat(dst, v, fmt, -1, bits) + + if fmt == 'e' { + // clean up e-09 to e-9 + n := len(dst) + if n >= 4 && dst[n-4] == 'e' && dst[n-3] == '-' && dst[n-2] == '0' { + dst[n-2] = dst[n-1] + dst = dst[:n-1] + } + } + + return dst +} diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/encode.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/encode.go new file mode 100644 index 00000000000..543e7cf0387 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/httpbinding/encode.go @@ -0,0 +1,123 @@ +package httpbinding + +import ( + "fmt" + "net/http" + "net/url" + "strconv" + "strings" +) + +const ( + contentLengthHeader = "Content-Length" + floatNaN = "NaN" + floatInfinity = "Infinity" + floatNegInfinity = "-Infinity" +) + +// An Encoder provides encoding of REST URI path, query, and header components +// of an HTTP request. Can also encode a stream as the payload. +// +// Does not support SetFields. +type Encoder struct { + path, rawPath, pathBuffer []byte + + query url.Values + header http.Header +} + +// NewEncoder creates a new encoder from the passed in request. It assumes that +// raw path contains no valuable information at this point, so it passes in path +// as path and raw path for subsequent trans +func NewEncoder(path, query string, headers http.Header) (*Encoder, error) { + return NewEncoderWithRawPath(path, path, query, headers) +} + +// NewHTTPBindingEncoder creates a new encoder from the passed in request. All query and +// header values will be added on top of the request's existing values. Overwriting +// duplicate values. +func NewEncoderWithRawPath(path, rawPath, query string, headers http.Header) (*Encoder, error) { + parseQuery, err := url.ParseQuery(query) + if err != nil { + return nil, fmt.Errorf("failed to parse query string: %w", err) + } + + e := &Encoder{ + path: []byte(path), + rawPath: []byte(rawPath), + query: parseQuery, + header: headers.Clone(), + } + + return e, nil +} + +// Encode returns a REST protocol encoder for encoding HTTP bindings. +// +// Due net/http requiring `Content-Length` to be specified on the http.Request#ContentLength directly. Encode +// will look for whether the header is present, and if so will remove it and set the respective value on http.Request. +// +// Returns any error occurring during encoding. +func (e *Encoder) Encode(req *http.Request) (*http.Request, error) { + req.URL.Path, req.URL.RawPath = string(e.path), string(e.rawPath) + req.URL.RawQuery = e.query.Encode() + + // net/http ignores Content-Length header and requires it to be set on http.Request + if v := e.header.Get(contentLengthHeader); len(v) > 0 { + iv, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return nil, err + } + req.ContentLength = iv + e.header.Del(contentLengthHeader) + } + + req.Header = e.header + + return req, nil +} + +// AddHeader returns a HeaderValue for appending to the given header name +func (e *Encoder) AddHeader(key string) HeaderValue { + return newHeaderValue(e.header, key, true) +} + +// SetHeader returns a HeaderValue for setting the given header name +func (e *Encoder) SetHeader(key string) HeaderValue { + return newHeaderValue(e.header, key, false) +} + +// Headers returns a Header used for encoding headers with the given prefix +func (e *Encoder) Headers(prefix string) Headers { + return Headers{ + header: e.header, + prefix: strings.TrimSpace(prefix), + } +} + +// HasHeader returns if a header with the key specified exists with one or +// more value. +func (e Encoder) HasHeader(key string) bool { + return len(e.header[key]) != 0 +} + +// SetURI returns a URIValue used for setting the given path key +func (e *Encoder) SetURI(key string) URIValue { + return newURIValue(&e.path, &e.rawPath, &e.pathBuffer, key) +} + +// SetQuery returns a QueryValue used for setting the given query key +func (e *Encoder) SetQuery(key string) QueryValue { + return NewQueryValue(e.query, key, false) +} + +// AddQuery returns a QueryValue used for appending the given query key +func (e *Encoder) AddQuery(key string) QueryValue { + return NewQueryValue(e.query, key, true) +} + +// HasQuery returns if a query with the key specified exists with one or +// more values. +func (e *Encoder) HasQuery(key string) bool { + return len(e.query.Get(key)) != 0 +} diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/header.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/header.go new file mode 100644 index 00000000000..f9256e175fc --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/httpbinding/header.go @@ -0,0 +1,122 @@ +package httpbinding + +import ( + "encoding/base64" + "math" + "math/big" + "net/http" + "strconv" + "strings" +) + +// Headers is used to encode header keys using a provided prefix +type Headers struct { + header http.Header + prefix string +} + +// AddHeader returns a HeaderValue used to append values to prefix+key +func (h Headers) AddHeader(key string) HeaderValue { + return h.newHeaderValue(key, true) +} + +// SetHeader returns a HeaderValue used to set the value of prefix+key +func (h Headers) SetHeader(key string) HeaderValue { + return h.newHeaderValue(key, false) +} + +func (h Headers) newHeaderValue(key string, append bool) HeaderValue { + return newHeaderValue(h.header, h.prefix+strings.TrimSpace(key), append) +} + +// HeaderValue is used to encode values to an HTTP header +type HeaderValue struct { + header http.Header + key string + append bool +} + +func newHeaderValue(header http.Header, key string, append bool) HeaderValue { + return HeaderValue{header: header, key: strings.TrimSpace(key), append: append} +} + +func (h HeaderValue) modifyHeader(value string) { + if h.append { + h.header[h.key] = append(h.header[h.key], value) + } else { + h.header[h.key] = append(h.header[h.key][:0], value) + } +} + +// String encodes the value v as the header string value +func (h HeaderValue) String(v string) { + h.modifyHeader(v) +} + +// Byte encodes the value v as a query string value +func (h HeaderValue) Byte(v int8) { + h.Long(int64(v)) +} + +// Short encodes the value v as a query string value +func (h HeaderValue) Short(v int16) { + h.Long(int64(v)) +} + +// Integer encodes the value v as the header string value +func (h HeaderValue) Integer(v int32) { + h.Long(int64(v)) +} + +// Long encodes the value v as the header string value +func (h HeaderValue) Long(v int64) { + h.modifyHeader(strconv.FormatInt(v, 10)) +} + +// Boolean encodes the value v as a query string value +func (h HeaderValue) Boolean(v bool) { + h.modifyHeader(strconv.FormatBool(v)) +} + +// Float encodes the value v as a query string value +func (h HeaderValue) Float(v float32) { + h.float(float64(v), 32) +} + +// Double encodes the value v as a query string value +func (h HeaderValue) Double(v float64) { + h.float(v, 64) +} + +func (h HeaderValue) float(v float64, bitSize int) { + switch { + case math.IsNaN(v): + h.String(floatNaN) + case math.IsInf(v, 1): + h.String(floatInfinity) + case math.IsInf(v, -1): + h.String(floatNegInfinity) + default: + h.modifyHeader(strconv.FormatFloat(v, 'f', -1, bitSize)) + } +} + +// BigInteger encodes the value v as a query string value +func (h HeaderValue) BigInteger(v *big.Int) { + h.modifyHeader(v.String()) +} + +// BigDecimal encodes the value v as a query string value +func (h HeaderValue) BigDecimal(v *big.Float) { + if i, accuracy := v.Int64(); accuracy == big.Exact { + h.Long(i) + return + } + h.modifyHeader(v.Text('e', -1)) +} + +// Blob encodes the value v as a base64 header string value +func (h HeaderValue) Blob(v []byte) { + encodeToString := base64.StdEncoding.EncodeToString(v) + h.modifyHeader(encodeToString) +} diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go new file mode 100644 index 00000000000..e78926c9a56 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go @@ -0,0 +1,108 @@ +package httpbinding + +import ( + "bytes" + "fmt" +) + +const ( + uriTokenStart = '{' + uriTokenStop = '}' + uriTokenSkip = '+' +) + +func bufCap(b []byte, n int) []byte { + if cap(b) < n { + return make([]byte, 0, n) + } + + return b[0:0] +} + +// replacePathElement replaces a single element in the path []byte. +// Escape is used to control whether the value will be escaped using Amazon path escape style. +func replacePathElement(path, fieldBuf []byte, key, val string, escape bool) ([]byte, []byte, error) { + fieldBuf = bufCap(fieldBuf, len(key)+3) // { [+] } + fieldBuf = append(fieldBuf, uriTokenStart) + fieldBuf = append(fieldBuf, key...) + + start := bytes.Index(path, fieldBuf) + end := start + len(fieldBuf) + if start < 0 || len(path[end:]) == 0 { + // TODO what to do about error? + return path, fieldBuf, fmt.Errorf("invalid path index, start=%d,end=%d. %s", start, end, path) + } + + encodeSep := true + if path[end] == uriTokenSkip { + // '+' token means do not escape slashes + encodeSep = false + end++ + } + + if escape { + val = EscapePath(val, encodeSep) + } + + if path[end] != uriTokenStop { + return path, fieldBuf, fmt.Errorf("invalid path element, does not contain token stop, %s", path) + } + end++ + + fieldBuf = bufCap(fieldBuf, len(val)) + fieldBuf = append(fieldBuf, val...) + + keyLen := end - start + valLen := len(fieldBuf) + + if keyLen == valLen { + copy(path[start:], fieldBuf) + return path, fieldBuf, nil + } + + newLen := len(path) + (valLen - keyLen) + if len(path) < newLen { + path = path[:cap(path)] + } + if cap(path) < newLen { + newURI := make([]byte, newLen) + copy(newURI, path) + path = newURI + } + + // shift + copy(path[start+valLen:], path[end:]) + path = path[:newLen] + copy(path[start:], fieldBuf) + + return path, fieldBuf, nil +} + +// EscapePath escapes part of a URL path in Amazon style. +func EscapePath(path string, encodeSep bool) string { + var buf bytes.Buffer + for i := 0; i < len(path); i++ { + c := path[i] + if noEscape[c] || (c == '/' && !encodeSep) { + buf.WriteByte(c) + } else { + fmt.Fprintf(&buf, "%%%02X", c) + } + } + return buf.String() +} + +var noEscape [256]bool + +func init() { + for i := 0; i < len(noEscape); i++ { + // AWS expects every character except these to be escaped + noEscape[i] = (i >= 'A' && i <= 'Z') || + (i >= 'a' && i <= 'z') || + (i >= '0' && i <= '9') || + i == '-' || + i == '.' || + i == '_' || + i == '~' + } +} diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/query.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/query.go new file mode 100644 index 00000000000..c2e7d0a20f4 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/httpbinding/query.go @@ -0,0 +1,107 @@ +package httpbinding + +import ( + "encoding/base64" + "math" + "math/big" + "net/url" + "strconv" +) + +// QueryValue is used to encode query key values +type QueryValue struct { + query url.Values + key string + append bool +} + +// NewQueryValue creates a new QueryValue which enables encoding +// a query value into the given url.Values. +func NewQueryValue(query url.Values, key string, append bool) QueryValue { + return QueryValue{ + query: query, + key: key, + append: append, + } +} + +func (qv QueryValue) updateKey(value string) { + if qv.append { + qv.query.Add(qv.key, value) + } else { + qv.query.Set(qv.key, value) + } +} + +// Blob encodes v as a base64 query string value +func (qv QueryValue) Blob(v []byte) { + encodeToString := base64.StdEncoding.EncodeToString(v) + qv.updateKey(encodeToString) +} + +// Boolean encodes v as a query string value +func (qv QueryValue) Boolean(v bool) { + qv.updateKey(strconv.FormatBool(v)) +} + +// String encodes v as a query string value +func (qv QueryValue) String(v string) { + qv.updateKey(v) +} + +// Byte encodes v as a query string value +func (qv QueryValue) Byte(v int8) { + qv.Long(int64(v)) +} + +// Short encodes v as a query string value +func (qv QueryValue) Short(v int16) { + qv.Long(int64(v)) +} + +// Integer encodes v as a query string value +func (qv QueryValue) Integer(v int32) { + qv.Long(int64(v)) +} + +// Long encodes v as a query string value +func (qv QueryValue) Long(v int64) { + qv.updateKey(strconv.FormatInt(v, 10)) +} + +// Float encodes v as a query string value +func (qv QueryValue) Float(v float32) { + qv.float(float64(v), 32) +} + +// Double encodes v as a query string value +func (qv QueryValue) Double(v float64) { + qv.float(v, 64) +} + +func (qv QueryValue) float(v float64, bitSize int) { + switch { + case math.IsNaN(v): + qv.String(floatNaN) + case math.IsInf(v, 1): + qv.String(floatInfinity) + case math.IsInf(v, -1): + qv.String(floatNegInfinity) + default: + qv.updateKey(strconv.FormatFloat(v, 'f', -1, bitSize)) + } +} + +// BigInteger encodes v as a query string value +func (qv QueryValue) BigInteger(v *big.Int) { + qv.updateKey(v.String()) +} + +// BigDecimal encodes v as a query string value +func (qv QueryValue) BigDecimal(v *big.Float) { + if i, accuracy := v.Int64(); accuracy == big.Exact { + qv.Long(i) + return + } + qv.updateKey(v.Text('e', -1)) +} diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/uri.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/uri.go new file mode 100644 index 00000000000..f04e11984ac --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/httpbinding/uri.go @@ -0,0 +1,111 @@ +package httpbinding + +import ( + "math" + "math/big" + "strconv" + "strings" +) + +// URIValue is used to encode named URI parameters +type URIValue struct { + path, rawPath, buffer *[]byte + + key string +} + +func newURIValue(path *[]byte, rawPath *[]byte, buffer *[]byte, key string) URIValue { + return URIValue{path: path, rawPath: rawPath, buffer: buffer, key: key} +} + +func (u URIValue) modifyURI(value string) (err error) { + *u.path, *u.buffer, err = replacePathElement(*u.path, *u.buffer, u.key, value, false) + if err != nil { + return err + } + *u.rawPath, *u.buffer, err = replacePathElement(*u.rawPath, *u.buffer, u.key, value, true) + return err +} + +// Boolean encodes v as a URI string value +func (u URIValue) Boolean(v bool) error { + return u.modifyURI(strconv.FormatBool(v)) +} + +// String encodes v as a URI string value +func (u URIValue) String(v string) error { + return u.modifyURI(v) +} + +// Byte encodes v as a URI string value +func (u URIValue) Byte(v int8) error { + return u.Long(int64(v)) +} + +// Short encodes v as a URI string value +func (u URIValue) Short(v int16) error { + return u.Long(int64(v)) +} + +// Integer encodes v as a URI string value +func (u URIValue) Integer(v int32) error { + return u.Long(int64(v)) +} + +// Long encodes v as a URI string value +func (u URIValue) Long(v int64) error { + return u.modifyURI(strconv.FormatInt(v, 10)) +} + +// Float encodes v as a query string value +func (u URIValue) Float(v float32) error { + return u.float(float64(v), 32) +} + +// Double encodes v as a query string value +func (u URIValue) Double(v float64) error { + return u.float(v, 64) +} + +func (u URIValue) float(v float64, bitSize int) error { + switch { + case math.IsNaN(v): + return u.String(floatNaN) + case math.IsInf(v, 1): + return u.String(floatInfinity) + case math.IsInf(v, -1): + return u.String(floatNegInfinity) + default: + return u.modifyURI(strconv.FormatFloat(v, 'f', -1, bitSize)) + } +} + +// BigInteger encodes v as a query string value +func (u URIValue) BigInteger(v *big.Int) error { + return u.modifyURI(v.String()) +} + +// BigDecimal encodes v as a query string value +func (u URIValue) BigDecimal(v *big.Float) error { + if i, accuracy := v.Int64(); accuracy == big.Exact { + return u.Long(i) + } + return u.modifyURI(v.Text('e', -1)) +} + +// SplitURI parses a Smithy HTTP binding trait URI +func SplitURI(uri string) (path, query string) { + queryStart := strings.IndexRune(uri, '?') + if queryStart == -1 { + path = uri + return path, query + } + + path = uri[:queryStart] + if queryStart+1 >= len(uri) { + return path, query + } + query = uri[queryStart+1:] + + return path, query +} diff --git a/vendor/github.com/aws/smithy-go/encoding/json/array.go b/vendor/github.com/aws/smithy-go/encoding/json/array.go new file mode 100644 index 00000000000..7a232f660f1 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/json/array.go @@ -0,0 +1,35 @@ +package json + +import ( + "bytes" +) + +// Array represents the encoding of a JSON Array +type Array struct { + w *bytes.Buffer + writeComma bool + scratch *[]byte +} + +func newArray(w *bytes.Buffer, scratch *[]byte) *Array { + w.WriteRune(leftBracket) + return &Array{w: w, scratch: scratch} +} + +// Value adds a new element to the JSON Array. +// Returns a Value type that is used to encode +// the array element. +func (a *Array) Value() Value { + if a.writeComma { + a.w.WriteRune(comma) + } else { + a.writeComma = true + } + + return newValue(a.w, a.scratch) +} + +// Close encodes the end of the JSON Array +func (a *Array) Close() { + a.w.WriteRune(rightBracket) +} diff --git a/vendor/github.com/aws/smithy-go/encoding/json/constants.go b/vendor/github.com/aws/smithy-go/encoding/json/constants.go new file mode 100644 index 00000000000..91044092aef --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/json/constants.go @@ -0,0 +1,15 @@ +package json + +const ( + leftBrace = '{' + rightBrace = '}' + + leftBracket = '[' + rightBracket = ']' + + comma = ',' + quote = '"' + colon = ':' + + null = "null" +) diff --git a/vendor/github.com/aws/smithy-go/encoding/json/decoder_util.go b/vendor/github.com/aws/smithy-go/encoding/json/decoder_util.go new file mode 100644 index 00000000000..7050c85b3c6 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/json/decoder_util.go @@ -0,0 +1,139 @@ +package json + +import ( + "bytes" + "encoding/json" + "fmt" + "io" +) + +// DiscardUnknownField discards unknown fields from a decoder body. +// This function is useful while deserializing a JSON body with additional +// unknown information that should be discarded. +func DiscardUnknownField(decoder *json.Decoder) error { + // This deliberately does not share logic with CollectUnknownField, even + // though it could, because if we were to delegate to that then we'd incur + // extra allocations and general memory usage. + v, err := decoder.Token() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + + if _, ok := v.(json.Delim); ok { + for decoder.More() { + err = DiscardUnknownField(decoder) + } + endToken, err := decoder.Token() + if err != nil { + return err + } + if _, ok := endToken.(json.Delim); !ok { + return fmt.Errorf("invalid JSON : expected json delimiter, found %T %v", + endToken, endToken) + } + } + + return nil +} + +// CollectUnknownField grabs the contents of unknown fields from the decoder body +// and returns them as a byte slice. This is useful for skipping unknown fields without +// completely discarding them. +func CollectUnknownField(decoder *json.Decoder) ([]byte, error) { + result, err := collectUnknownField(decoder) + if err != nil { + return nil, err + } + + buff := bytes.NewBuffer(nil) + encoder := json.NewEncoder(buff) + + if err := encoder.Encode(result); err != nil { + return nil, err + } + + return buff.Bytes(), nil +} + +func collectUnknownField(decoder *json.Decoder) (interface{}, error) { + // Grab the initial value. This could either be a concrete value like a string or a a + // delimiter. + token, err := decoder.Token() + if err == io.EOF { + return nil, nil + } + if err != nil { + return nil, err + } + + // If it's an array or object, we'll need to recurse. + delim, ok := token.(json.Delim) + if ok { + var result interface{} + if delim == '{' { + result, err = collectUnknownObject(decoder) + if err != nil { + return nil, err + } + } else { + result, err = collectUnknownArray(decoder) + if err != nil { + return nil, err + } + } + + // Discard the closing token. decoder.Token handles checking for matching delimiters + if _, err := decoder.Token(); err != nil { + return nil, err + } + return result, nil + } + + return token, nil +} + +func collectUnknownArray(decoder *json.Decoder) ([]interface{}, error) { + // We need to create an empty array here instead of a nil array, since by getting + // into this function at all we necessarily have seen a non-nil list. + array := []interface{}{} + + for decoder.More() { + value, err := collectUnknownField(decoder) + if err != nil { + return nil, err + } + array = append(array, value) + } + + return array, nil +} + +func collectUnknownObject(decoder *json.Decoder) (map[string]interface{}, error) { + object := make(map[string]interface{}) + + for decoder.More() { + key, err := collectUnknownField(decoder) + if err != nil { + return nil, err + } + + // Keys have to be strings, which is particularly important as the encoder + // won't except a map with interface{} keys + stringKey, ok := key.(string) + if !ok { + return nil, fmt.Errorf("expected string key, found %T", key) + } + + value, err := collectUnknownField(decoder) + if err != nil { + return nil, err + } + + object[stringKey] = value + } + + return object, nil +} diff --git a/vendor/github.com/aws/smithy-go/encoding/json/encoder.go b/vendor/github.com/aws/smithy-go/encoding/json/encoder.go new file mode 100644 index 00000000000..8772953f1e6 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/json/encoder.go @@ -0,0 +1,30 @@ +package json + +import ( + "bytes" +) + +// Encoder is JSON encoder that supports construction of JSON values +// using methods. +type Encoder struct { + w *bytes.Buffer + Value +} + +// NewEncoder returns a new JSON encoder +func NewEncoder() *Encoder { + writer := bytes.NewBuffer(nil) + scratch := make([]byte, 64) + + return &Encoder{w: writer, Value: newValue(writer, &scratch)} +} + +// String returns the String output of the JSON encoder +func (e Encoder) String() string { + return e.w.String() +} + +// Bytes returns the []byte slice of the JSON encoder +func (e Encoder) Bytes() []byte { + return e.w.Bytes() +} diff --git a/vendor/github.com/aws/smithy-go/encoding/json/escape.go b/vendor/github.com/aws/smithy-go/encoding/json/escape.go new file mode 100644 index 00000000000..d984d0cdca1 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/json/escape.go @@ -0,0 +1,198 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copied and modified from Go 1.8 stdlib's encoding/json/#safeSet + +package json + +import ( + "bytes" + "unicode/utf8" +) + +// safeSet holds the value true if the ASCII character with the given array +// position can be represented inside a JSON string without any further +// escaping. +// +// All values are true except for the ASCII control characters (0-31), the +// double quote ("), and the backslash character ("\"). +var safeSet = [utf8.RuneSelf]bool{ + ' ': true, + '!': true, + '"': false, + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '(': true, + ')': true, + '*': true, + '+': true, + ',': true, + '-': true, + '.': true, + '/': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + ':': true, + ';': true, + '<': true, + '=': true, + '>': true, + '?': true, + '@': true, + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'V': true, + 'W': true, + 'X': true, + 'Y': true, + 'Z': true, + '[': true, + '\\': false, + ']': true, + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + '{': true, + '|': true, + '}': true, + '~': true, + '\u007f': true, +} + +// copied from Go 1.8 stdlib's encoding/json/#hex +var hex = "0123456789abcdef" + +// escapeStringBytes escapes and writes the passed in string bytes to the dst +// buffer +// +// Copied and modifed from Go 1.8 stdlib's encodeing/json/#encodeState.stringBytes +func escapeStringBytes(e *bytes.Buffer, s []byte) { + e.WriteByte('"') + start := 0 + for i := 0; i < len(s); { + if b := s[i]; b < utf8.RuneSelf { + if safeSet[b] { + i++ + continue + } + if start < i { + e.Write(s[start:i]) + } + switch b { + case '\\', '"': + e.WriteByte('\\') + e.WriteByte(b) + case '\n': + e.WriteByte('\\') + e.WriteByte('n') + case '\r': + e.WriteByte('\\') + e.WriteByte('r') + case '\t': + e.WriteByte('\\') + e.WriteByte('t') + default: + // This encodes bytes < 0x20 except for \t, \n and \r. + // If escapeHTML is set, it also escapes <, >, and & + // because they can lead to security holes when + // user-controlled strings are rendered into JSON + // and served to some browsers. + e.WriteString(`\u00`) + e.WriteByte(hex[b>>4]) + e.WriteByte(hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRune(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + e.Write(s[start:i]) + } + e.WriteString(`\ufffd`) + i += size + start = i + continue + } + // U+2028 is LINE SEPARATOR. + // U+2029 is PARAGRAPH SEPARATOR. + // They are both technically valid characters in JSON strings, + // but don't work in JSONP, which has to be evaluated as JavaScript, + // and can lead to security holes there. It is valid JSON to + // escape them, so we do so unconditionally. + // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion. + if c == '\u2028' || c == '\u2029' { + if start < i { + e.Write(s[start:i]) + } + e.WriteString(`\u202`) + e.WriteByte(hex[c&0xF]) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + e.Write(s[start:]) + } + e.WriteByte('"') +} diff --git a/vendor/github.com/aws/smithy-go/encoding/json/object.go b/vendor/github.com/aws/smithy-go/encoding/json/object.go new file mode 100644 index 00000000000..722346d0358 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/json/object.go @@ -0,0 +1,40 @@ +package json + +import ( + "bytes" +) + +// Object represents the encoding of a JSON Object type +type Object struct { + w *bytes.Buffer + writeComma bool + scratch *[]byte +} + +func newObject(w *bytes.Buffer, scratch *[]byte) *Object { + w.WriteRune(leftBrace) + return &Object{w: w, scratch: scratch} +} + +func (o *Object) writeKey(key string) { + escapeStringBytes(o.w, []byte(key)) + o.w.WriteRune(colon) +} + +// Key adds the given named key to the JSON object. +// Returns a Value encoder that should be used to encode +// a JSON value type. +func (o *Object) Key(name string) Value { + if o.writeComma { + o.w.WriteRune(comma) + } else { + o.writeComma = true + } + o.writeKey(name) + return newValue(o.w, o.scratch) +} + +// Close encodes the end of the JSON Object +func (o *Object) Close() { + o.w.WriteRune(rightBrace) +} diff --git a/vendor/github.com/aws/smithy-go/encoding/json/value.go b/vendor/github.com/aws/smithy-go/encoding/json/value.go new file mode 100644 index 00000000000..b41ff1e15c2 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/json/value.go @@ -0,0 +1,149 @@ +package json + +import ( + "bytes" + "encoding/base64" + "math/big" + "strconv" + + "github.com/aws/smithy-go/encoding" +) + +// Value represents a JSON Value type +// JSON Value types: Object, Array, String, Number, Boolean, and Null +type Value struct { + w *bytes.Buffer + scratch *[]byte +} + +// newValue returns a new Value encoder +func newValue(w *bytes.Buffer, scratch *[]byte) Value { + return Value{w: w, scratch: scratch} +} + +// String encodes v as a JSON string +func (jv Value) String(v string) { + escapeStringBytes(jv.w, []byte(v)) +} + +// Byte encodes v as a JSON number +func (jv Value) Byte(v int8) { + jv.Long(int64(v)) +} + +// Short encodes v as a JSON number +func (jv Value) Short(v int16) { + jv.Long(int64(v)) +} + +// Integer encodes v as a JSON number +func (jv Value) Integer(v int32) { + jv.Long(int64(v)) +} + +// Long encodes v as a JSON number +func (jv Value) Long(v int64) { + *jv.scratch = strconv.AppendInt((*jv.scratch)[:0], v, 10) + jv.w.Write(*jv.scratch) +} + +// ULong encodes v as a JSON number +func (jv Value) ULong(v uint64) { + *jv.scratch = strconv.AppendUint((*jv.scratch)[:0], v, 10) + jv.w.Write(*jv.scratch) +} + +// Float encodes v as a JSON number +func (jv Value) Float(v float32) { + jv.float(float64(v), 32) +} + +// Double encodes v as a JSON number +func (jv Value) Double(v float64) { + jv.float(v, 64) +} + +func (jv Value) float(v float64, bits int) { + *jv.scratch = encoding.EncodeFloat((*jv.scratch)[:0], v, bits) + jv.w.Write(*jv.scratch) +} + +// Boolean encodes v as a JSON boolean +func (jv Value) Boolean(v bool) { + *jv.scratch = strconv.AppendBool((*jv.scratch)[:0], v) + jv.w.Write(*jv.scratch) +} + +// Base64EncodeBytes writes v as a base64 value in JSON string +func (jv Value) Base64EncodeBytes(v []byte) { + encodeByteSlice(jv.w, (*jv.scratch)[:0], v) +} + +// Write writes v directly to the JSON document +func (jv Value) Write(v []byte) { + jv.w.Write(v) +} + +// Array returns a new Array encoder +func (jv Value) Array() *Array { + return newArray(jv.w, jv.scratch) +} + +// Object returns a new Object encoder +func (jv Value) Object() *Object { + return newObject(jv.w, jv.scratch) +} + +// Null encodes a null JSON value +func (jv Value) Null() { + jv.w.WriteString(null) +} + +// BigInteger encodes v as JSON value +func (jv Value) BigInteger(v *big.Int) { + jv.w.Write([]byte(v.Text(10))) +} + +// BigDecimal encodes v as JSON value +func (jv Value) BigDecimal(v *big.Float) { + if i, accuracy := v.Int64(); accuracy == big.Exact { + jv.Long(i) + return + } + // TODO: Should this try to match ES6 ToString similar to stdlib JSON? + jv.w.Write([]byte(v.Text('e', -1))) +} + +// Based on encoding/json encodeByteSlice from the Go Standard Library +// https://golang.org/src/encoding/json/encode.go +func encodeByteSlice(w *bytes.Buffer, scratch []byte, v []byte) { + if v == nil { + w.WriteString(null) + return + } + + w.WriteRune(quote) + + encodedLen := base64.StdEncoding.EncodedLen(len(v)) + if encodedLen <= len(scratch) { + // If the encoded bytes fit in e.scratch, avoid an extra + // allocation and use the cheaper Encoding.Encode. + dst := scratch[:encodedLen] + base64.StdEncoding.Encode(dst, v) + w.Write(dst) + } else if encodedLen <= 1024 { + // The encoded bytes are short enough to allocate for, and + // Encoding.Encode is still cheaper. + dst := make([]byte, encodedLen) + base64.StdEncoding.Encode(dst, v) + w.Write(dst) + } else { + // The encoded bytes are too long to cheaply allocate, and + // Encoding.Encode is no longer noticeably cheaper. + enc := base64.NewEncoder(base64.StdEncoding, w) + enc.Write(v) + enc.Close() + } + + w.WriteRune(quote) +} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/array.go b/vendor/github.com/aws/smithy-go/encoding/xml/array.go new file mode 100644 index 00000000000..508f3c997ec --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/array.go @@ -0,0 +1,49 @@ +package xml + +// arrayMemberWrapper is the default member wrapper tag name for XML Array type +var arrayMemberWrapper = StartElement{ + Name: Name{Local: "member"}, +} + +// Array represents the encoding of a XML array type +type Array struct { + w writer + scratch *[]byte + + // member start element is the array member wrapper start element + memberStartElement StartElement + + // isFlattened indicates if the array is a flattened array. + isFlattened bool +} + +// newArray returns an array encoder. +// It also takes in the member start element, array start element. +// It takes in a isFlattened bool, indicating that an array is flattened array. +// +// A wrapped array ["value1", "value2"] is represented as +// `value1value2`. + +// A flattened array `someList: ["value1", "value2"]` is represented as +// `value1value2`. +func newArray(w writer, scratch *[]byte, memberStartElement StartElement, arrayStartElement StartElement, isFlattened bool) *Array { + var memberWrapper = memberStartElement + if isFlattened { + memberWrapper = arrayStartElement + } + + return &Array{ + w: w, + scratch: scratch, + memberStartElement: memberWrapper, + isFlattened: isFlattened, + } +} + +// Member adds a new member to the XML array. +// It returns a Value encoder. +func (a *Array) Member() Value { + v := newValue(a.w, a.scratch, a.memberStartElement) + v.isFlattened = a.isFlattened + return v +} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/constants.go b/vendor/github.com/aws/smithy-go/encoding/xml/constants.go new file mode 100644 index 00000000000..ccee90a636b --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/constants.go @@ -0,0 +1,10 @@ +package xml + +const ( + leftAngleBracket = '<' + rightAngleBracket = '>' + forwardSlash = '/' + colon = ':' + equals = '=' + quote = '"' +) diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/doc.go b/vendor/github.com/aws/smithy-go/encoding/xml/doc.go new file mode 100644 index 00000000000..f9200093e87 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/doc.go @@ -0,0 +1,49 @@ +/* +Package xml holds the XMl encoder utility. This utility is written in accordance to our design to delegate to +shape serializer function in which a xml.Value will be passed around. + +Resources followed: https://smithy.io/2.0/spec/protocol-traits.html#xml-bindings + +Member Element + +Member element should be used to encode xml shapes into xml elements except for flattened xml shapes. Member element +write their own element start tag. These elements should always be closed. + +Flattened Element + +Flattened element should be used to encode shapes marked with flattened trait into xml elements. Flattened element +do not write a start tag, and thus should not be closed. + +Simple types encoding + +All simple type methods on value such as String(), Long() etc; auto close the associated member element. + +Array + +Array returns the collection encoder. It has two modes, wrapped and flattened encoding. + +Wrapped arrays have two methods Array() and ArrayWithCustomName() which facilitate array member wrapping. +By default, a wrapped array members are wrapped with `member` named start element. + + appletree + +Flattened arrays rely on Value being marked as flattened. +If a shape is marked as flattened, Array() will use the shape element name as wrapper for array elements. + + appletree + +Map + +Map is the map encoder. It has two modes, wrapped and flattened encoding. + +Wrapped map has Array() method, which facilitate map member wrapping. +By default, a wrapped map members are wrapped with `entry` named start element. + + appletreesnowice + +Flattened map rely on Value being marked as flattened. +If a shape is marked as flattened, Map() will use the shape element name as wrapper for map entry elements. + + appletreesnowice +*/ +package xml diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/element.go b/vendor/github.com/aws/smithy-go/encoding/xml/element.go new file mode 100644 index 00000000000..ae84e7999ed --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/element.go @@ -0,0 +1,91 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copied and modified from Go 1.14 stdlib's encoding/xml + +package xml + +// A Name represents an XML name (Local) annotated +// with a name space identifier (Space). +// In tokens returned by Decoder.Token, the Space identifier +// is given as a canonical URL, not the short prefix used +// in the document being parsed. +type Name struct { + Space, Local string +} + +// An Attr represents an attribute in an XML element (Name=Value). +type Attr struct { + Name Name + Value string +} + +/* +NewAttribute returns a pointer to an attribute. +It takes in a local name aka attribute name, and value +representing the attribute value. +*/ +func NewAttribute(local, value string) Attr { + return Attr{ + Name: Name{ + Local: local, + }, + Value: value, + } +} + +/* +NewNamespaceAttribute returns a pointer to an attribute. +It takes in a local name aka attribute name, and value +representing the attribute value. + +NewNamespaceAttribute appends `xmlns:` in front of namespace +prefix. + +For creating a name space attribute representing +`xmlns:prefix="http://example.com`, the breakdown would be: +local = "prefix" +value = "http://example.com" +*/ +func NewNamespaceAttribute(local, value string) Attr { + attr := NewAttribute(local, value) + + // default name space identifier + attr.Name.Space = "xmlns" + return attr +} + +// A StartElement represents an XML start element. +type StartElement struct { + Name Name + Attr []Attr +} + +// Copy creates a new copy of StartElement. +func (e StartElement) Copy() StartElement { + attrs := make([]Attr, len(e.Attr)) + copy(attrs, e.Attr) + e.Attr = attrs + return e +} + +// End returns the corresponding XML end element. +func (e StartElement) End() EndElement { + return EndElement{e.Name} +} + +// returns true if start element local name is empty +func (e StartElement) isZero() bool { + return len(e.Name.Local) == 0 +} + +// An EndElement represents an XML end element. +type EndElement struct { + Name Name +} + +// returns true if end element local name is empty +func (e EndElement) isZero() bool { + return len(e.Name.Local) == 0 +} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/encoder.go b/vendor/github.com/aws/smithy-go/encoding/xml/encoder.go new file mode 100644 index 00000000000..16fb3dddb0a --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/encoder.go @@ -0,0 +1,51 @@ +package xml + +// writer interface used by the xml encoder to write an encoded xml +// document in a writer. +type writer interface { + + // Write takes in a byte slice and returns number of bytes written and error + Write(p []byte) (n int, err error) + + // WriteRune takes in a rune and returns number of bytes written and error + WriteRune(r rune) (n int, err error) + + // WriteString takes in a string and returns number of bytes written and error + WriteString(s string) (n int, err error) + + // String method returns a string + String() string + + // Bytes return a byte slice. + Bytes() []byte +} + +// Encoder is an XML encoder that supports construction of XML values +// using methods. The encoder takes in a writer and maintains a scratch buffer. +type Encoder struct { + w writer + scratch *[]byte +} + +// NewEncoder returns an XML encoder +func NewEncoder(w writer) *Encoder { + scratch := make([]byte, 64) + + return &Encoder{w: w, scratch: &scratch} +} + +// String returns the string output of the XML encoder +func (e Encoder) String() string { + return e.w.String() +} + +// Bytes returns the []byte slice of the XML encoder +func (e Encoder) Bytes() []byte { + return e.w.Bytes() +} + +// RootElement builds a root element encoding +// It writes it's start element tag. The value should be closed. +func (e Encoder) RootElement(element StartElement) Value { + return newValue(e.w, e.scratch, element) +} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/error_utils.go b/vendor/github.com/aws/smithy-go/encoding/xml/error_utils.go new file mode 100644 index 00000000000..f3db6ccca85 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/error_utils.go @@ -0,0 +1,51 @@ +package xml + +import ( + "encoding/xml" + "fmt" + "io" +) + +// ErrorComponents represents the error response fields +// that will be deserialized from an xml error response body +type ErrorComponents struct { + Code string + Message string +} + +// GetErrorResponseComponents returns the error fields from an xml error response body +func GetErrorResponseComponents(r io.Reader, noErrorWrapping bool) (ErrorComponents, error) { + if noErrorWrapping { + var errResponse noWrappedErrorResponse + if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF { + return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err) + } + return ErrorComponents{ + Code: errResponse.Code, + Message: errResponse.Message, + }, nil + } + + var errResponse wrappedErrorResponse + if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF { + return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err) + } + return ErrorComponents{ + Code: errResponse.Code, + Message: errResponse.Message, + }, nil +} + +// noWrappedErrorResponse represents the error response body with +// no internal ... +type wrappedErrorResponse struct { + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` +} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/escape.go b/vendor/github.com/aws/smithy-go/encoding/xml/escape.go new file mode 100644 index 00000000000..1c5479af677 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/escape.go @@ -0,0 +1,137 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copied and modified from Go 1.14 stdlib's encoding/xml + +package xml + +import ( + "unicode/utf8" +) + +// Copied from Go 1.14 stdlib's encoding/xml +var ( + escQuot = []byte(""") // shorter than """ + escApos = []byte("'") // shorter than "'" + escAmp = []byte("&") + escLT = []byte("<") + escGT = []byte(">") + escTab = []byte(" ") + escNL = []byte(" ") + escCR = []byte(" ") + escFFFD = []byte("\uFFFD") // Unicode replacement character + + // Additional Escapes + escNextLine = []byte("…") + escLS = []byte("
") +) + +// Decide whether the given rune is in the XML Character Range, per +// the Char production of https://www.xml.com/axml/testaxml.htm, +// Section 2.2 Characters. +func isInCharacterRange(r rune) (inrange bool) { + return r == 0x09 || + r == 0x0A || + r == 0x0D || + r >= 0x20 && r <= 0xD7FF || + r >= 0xE000 && r <= 0xFFFD || + r >= 0x10000 && r <= 0x10FFFF +} + +// TODO: When do we need to escape the string? +// Based on encoding/xml escapeString from the Go Standard Library. +// https://golang.org/src/encoding/xml/xml.go +func escapeString(e writer, s string) { + var esc []byte + last := 0 + for i := 0; i < len(s); { + r, width := utf8.DecodeRuneInString(s[i:]) + i += width + switch r { + case '"': + esc = escQuot + case '\'': + esc = escApos + case '&': + esc = escAmp + case '<': + esc = escLT + case '>': + esc = escGT + case '\t': + esc = escTab + case '\n': + esc = escNL + case '\r': + esc = escCR + case '\u0085': + // Not escaped by stdlib + esc = escNextLine + case '\u2028': + // Not escaped by stdlib + esc = escLS + default: + if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) { + esc = escFFFD + break + } + continue + } + e.WriteString(s[last : i-width]) + e.Write(esc) + last = i + } + e.WriteString(s[last:]) +} + +// escapeText writes to w the properly escaped XML equivalent +// of the plain text data s. If escapeNewline is true, newline +// characters will be escaped. +// +// Based on encoding/xml escapeText from the Go Standard Library. +// https://golang.org/src/encoding/xml/xml.go +func escapeText(e writer, s []byte) { + var esc []byte + last := 0 + for i := 0; i < len(s); { + r, width := utf8.DecodeRune(s[i:]) + i += width + switch r { + case '"': + esc = escQuot + case '\'': + esc = escApos + case '&': + esc = escAmp + case '<': + esc = escLT + case '>': + esc = escGT + case '\t': + esc = escTab + case '\n': + // This always escapes newline, which is different than stdlib's optional + // escape of new line. + esc = escNL + case '\r': + esc = escCR + case '\u0085': + // Not escaped by stdlib + esc = escNextLine + case '\u2028': + // Not escaped by stdlib + esc = escLS + default: + if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) { + esc = escFFFD + break + } + continue + } + e.Write(s[last : i-width]) + e.Write(esc) + last = i + } + e.Write(s[last:]) +} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/map.go b/vendor/github.com/aws/smithy-go/encoding/xml/map.go new file mode 100644 index 00000000000..e42858965cc --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/map.go @@ -0,0 +1,53 @@ +package xml + +// mapEntryWrapper is the default member wrapper start element for XML Map entry +var mapEntryWrapper = StartElement{ + Name: Name{Local: "entry"}, +} + +// Map represents the encoding of a XML map type +type Map struct { + w writer + scratch *[]byte + + // member start element is the map entry wrapper start element + memberStartElement StartElement + + // isFlattened returns true if the map is a flattened map + isFlattened bool +} + +// newMap returns a map encoder which sets the default map +// entry wrapper to `entry`. +// +// A map `someMap : {{key:"abc", value:"123"}}` is represented as +// `abc123`. +func newMap(w writer, scratch *[]byte) *Map { + return &Map{ + w: w, + scratch: scratch, + memberStartElement: mapEntryWrapper, + } +} + +// newFlattenedMap returns a map encoder which sets the map +// entry wrapper to the passed in memberWrapper`. +// +// A flattened map `someMap : {{key:"abc", value:"123"}}` is represented as +// `abc123`. +func newFlattenedMap(w writer, scratch *[]byte, memberWrapper StartElement) *Map { + return &Map{ + w: w, + scratch: scratch, + memberStartElement: memberWrapper, + isFlattened: true, + } +} + +// Entry returns a Value encoder with map's element. +// It writes the member wrapper start tag for each entry. +func (m *Map) Entry() Value { + v := newValue(m.w, m.scratch, m.memberStartElement) + v.isFlattened = m.isFlattened + return v +} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/value.go b/vendor/github.com/aws/smithy-go/encoding/xml/value.go new file mode 100644 index 00000000000..09434b2c0b5 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/value.go @@ -0,0 +1,302 @@ +package xml + +import ( + "encoding/base64" + "fmt" + "math/big" + "strconv" + + "github.com/aws/smithy-go/encoding" +) + +// Value represents an XML Value type +// XML Value types: Object, Array, Map, String, Number, Boolean. +type Value struct { + w writer + scratch *[]byte + + // xml start element is the associated start element for the Value + startElement StartElement + + // indicates if the Value represents a flattened shape + isFlattened bool +} + +// newFlattenedValue returns a Value encoder. newFlattenedValue does NOT write the start element tag +func newFlattenedValue(w writer, scratch *[]byte, startElement StartElement) Value { + return Value{ + w: w, + scratch: scratch, + startElement: startElement, + } +} + +// newValue writes the start element xml tag and returns a Value +func newValue(w writer, scratch *[]byte, startElement StartElement) Value { + writeStartElement(w, startElement) + return Value{w: w, scratch: scratch, startElement: startElement} +} + +// writeStartElement takes in a start element and writes it. +// It handles namespace, attributes in start element. +func writeStartElement(w writer, el StartElement) error { + if el.isZero() { + return fmt.Errorf("xml start element cannot be nil") + } + + w.WriteRune(leftAngleBracket) + + if len(el.Name.Space) != 0 { + escapeString(w, el.Name.Space) + w.WriteRune(colon) + } + escapeString(w, el.Name.Local) + for _, attr := range el.Attr { + w.WriteRune(' ') + writeAttribute(w, &attr) + } + + w.WriteRune(rightAngleBracket) + return nil +} + +// writeAttribute writes an attribute from a provided Attribute +// For a namespace attribute, the attr.Name.Space must be defined as "xmlns". +// https://www.w3.org/TR/REC-xml-names/#NT-DefaultAttName +func writeAttribute(w writer, attr *Attr) { + // if local, space both are not empty + if len(attr.Name.Space) != 0 && len(attr.Name.Local) != 0 { + escapeString(w, attr.Name.Space) + w.WriteRune(colon) + } + + // if prefix is empty, the default `xmlns` space should be used as prefix. + if len(attr.Name.Local) == 0 { + attr.Name.Local = attr.Name.Space + } + + escapeString(w, attr.Name.Local) + w.WriteRune(equals) + w.WriteRune(quote) + escapeString(w, attr.Value) + w.WriteRune(quote) +} + +// writeEndElement takes in a end element and writes it. +func writeEndElement(w writer, el EndElement) error { + if el.isZero() { + return fmt.Errorf("xml end element cannot be nil") + } + + w.WriteRune(leftAngleBracket) + w.WriteRune(forwardSlash) + + if len(el.Name.Space) != 0 { + escapeString(w, el.Name.Space) + w.WriteRune(colon) + } + escapeString(w, el.Name.Local) + w.WriteRune(rightAngleBracket) + + return nil +} + +// String encodes v as a XML string. +// It will auto close the parent xml element tag. +func (xv Value) String(v string) { + escapeString(xv.w, v) + xv.Close() +} + +// Byte encodes v as a XML number. +// It will auto close the parent xml element tag. +func (xv Value) Byte(v int8) { + xv.Long(int64(v)) +} + +// Short encodes v as a XML number. +// It will auto close the parent xml element tag. +func (xv Value) Short(v int16) { + xv.Long(int64(v)) +} + +// Integer encodes v as a XML number. +// It will auto close the parent xml element tag. +func (xv Value) Integer(v int32) { + xv.Long(int64(v)) +} + +// Long encodes v as a XML number. +// It will auto close the parent xml element tag. +func (xv Value) Long(v int64) { + *xv.scratch = strconv.AppendInt((*xv.scratch)[:0], v, 10) + xv.w.Write(*xv.scratch) + + xv.Close() +} + +// Float encodes v as a XML number. +// It will auto close the parent xml element tag. +func (xv Value) Float(v float32) { + xv.float(float64(v), 32) + xv.Close() +} + +// Double encodes v as a XML number. +// It will auto close the parent xml element tag. +func (xv Value) Double(v float64) { + xv.float(v, 64) + xv.Close() +} + +func (xv Value) float(v float64, bits int) { + *xv.scratch = encoding.EncodeFloat((*xv.scratch)[:0], v, bits) + xv.w.Write(*xv.scratch) +} + +// Boolean encodes v as a XML boolean. +// It will auto close the parent xml element tag. +func (xv Value) Boolean(v bool) { + *xv.scratch = strconv.AppendBool((*xv.scratch)[:0], v) + xv.w.Write(*xv.scratch) + + xv.Close() +} + +// Base64EncodeBytes writes v as a base64 value in XML string. +// It will auto close the parent xml element tag. +func (xv Value) Base64EncodeBytes(v []byte) { + encodeByteSlice(xv.w, (*xv.scratch)[:0], v) + xv.Close() +} + +// BigInteger encodes v big.Int as XML value. +// It will auto close the parent xml element tag. +func (xv Value) BigInteger(v *big.Int) { + xv.w.Write([]byte(v.Text(10))) + xv.Close() +} + +// BigDecimal encodes v big.Float as XML value. +// It will auto close the parent xml element tag. +func (xv Value) BigDecimal(v *big.Float) { + if i, accuracy := v.Int64(); accuracy == big.Exact { + xv.Long(i) + return + } + + xv.w.Write([]byte(v.Text('e', -1))) + xv.Close() +} + +// Write writes v directly to the xml document +// if escapeXMLText is set to true, write will escape text. +// It will auto close the parent xml element tag. +func (xv Value) Write(v []byte, escapeXMLText bool) { + // escape and write xml text + if escapeXMLText { + escapeText(xv.w, v) + } else { + // write xml directly + xv.w.Write(v) + } + + xv.Close() +} + +// MemberElement does member element encoding. It returns a Value. +// Member Element method should be used for all shapes except flattened shapes. +// +// A call to MemberElement will write nested element tags directly using the +// provided start element. The value returned by MemberElement should be closed. +func (xv Value) MemberElement(element StartElement) Value { + return newValue(xv.w, xv.scratch, element) +} + +// FlattenedElement returns flattened element encoding. It returns a Value. +// This method should be used for flattened shapes. +// +// Unlike MemberElement, flattened element will NOT write element tags +// directly for the associated start element. +// +// The value returned by the FlattenedElement does not need to be closed. +func (xv Value) FlattenedElement(element StartElement) Value { + v := newFlattenedValue(xv.w, xv.scratch, element) + v.isFlattened = true + return v +} + +// Array returns an array encoder. By default, the members of array are +// wrapped with `` element tag. +// If value is marked as flattened, the start element is used to wrap the members instead of +// the `` element. +func (xv Value) Array() *Array { + return newArray(xv.w, xv.scratch, arrayMemberWrapper, xv.startElement, xv.isFlattened) +} + +/* +ArrayWithCustomName returns an array encoder. + +It takes named start element as an argument, the named start element will used to wrap xml array entries. +for eg, `entry1` +Here `customName` named start element will be wrapped on each array member. +*/ +func (xv Value) ArrayWithCustomName(element StartElement) *Array { + return newArray(xv.w, xv.scratch, element, xv.startElement, xv.isFlattened) +} + +/* +Map returns a map encoder. By default, the map entries are +wrapped with `` element tag. + +If value is marked as flattened, the start element is used to wrap the entry instead of +the `` element. +*/ +func (xv Value) Map() *Map { + // flattened map + if xv.isFlattened { + return newFlattenedMap(xv.w, xv.scratch, xv.startElement) + } + + // un-flattened map + return newMap(xv.w, xv.scratch) +} + +// encodeByteSlice is modified copy of json encoder's encodeByteSlice. +// It is used to base64 encode a byte slice. +func encodeByteSlice(w writer, scratch []byte, v []byte) { + if v == nil { + return + } + + encodedLen := base64.StdEncoding.EncodedLen(len(v)) + if encodedLen <= len(scratch) { + // If the encoded bytes fit in e.scratch, avoid an extra + // allocation and use the cheaper Encoding.Encode. + dst := scratch[:encodedLen] + base64.StdEncoding.Encode(dst, v) + w.Write(dst) + } else if encodedLen <= 1024 { + // The encoded bytes are short enough to allocate for, and + // Encoding.Encode is still cheaper. + dst := make([]byte, encodedLen) + base64.StdEncoding.Encode(dst, v) + w.Write(dst) + } else { + // The encoded bytes are too long to cheaply allocate, and + // Encoding.Encode is no longer noticeably cheaper. + enc := base64.NewEncoder(base64.StdEncoding, w) + enc.Write(v) + enc.Close() + } +} + +// IsFlattened returns true if value is for flattened shape. +func (xv Value) IsFlattened() bool { + return xv.isFlattened +} + +// Close closes the value. +func (xv Value) Close() { + writeEndElement(xv.w, xv.startElement.End()) +} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/xml_decoder.go b/vendor/github.com/aws/smithy-go/encoding/xml/xml_decoder.go new file mode 100644 index 00000000000..dc4eebdffa7 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/xml_decoder.go @@ -0,0 +1,154 @@ +package xml + +import ( + "encoding/xml" + "fmt" + "strings" +) + +// NodeDecoder is a XML decoder wrapper that is responsible to decoding +// a single XML Node element and it's nested member elements. This wrapper decoder +// takes in the start element of the top level node being decoded. +type NodeDecoder struct { + Decoder *xml.Decoder + StartEl xml.StartElement +} + +// WrapNodeDecoder returns an initialized XMLNodeDecoder +func WrapNodeDecoder(decoder *xml.Decoder, startEl xml.StartElement) NodeDecoder { + return NodeDecoder{ + Decoder: decoder, + StartEl: startEl, + } +} + +// Token on a Node Decoder returns a xml StartElement. It returns a boolean that indicates the +// a token is the node decoder's end node token; and an error which indicates any error +// that occurred while retrieving the start element +func (d NodeDecoder) Token() (t xml.StartElement, done bool, err error) { + for { + token, e := d.Decoder.Token() + if e != nil { + return t, done, e + } + + // check if we reach end of the node being decoded + if el, ok := token.(xml.EndElement); ok { + return t, el == d.StartEl.End(), err + } + + if t, ok := token.(xml.StartElement); ok { + return restoreAttrNamespaces(t), false, err + } + + // skip token if it is a comment or preamble or empty space value due to indentation + // or if it's a value and is not expected + } +} + +// restoreAttrNamespaces update XML attributes to restore the short namespaces found within +// the raw XML document. +func restoreAttrNamespaces(node xml.StartElement) xml.StartElement { + if len(node.Attr) == 0 { + return node + } + + // Generate a mapping of XML namespace values to their short names. + ns := map[string]string{} + for _, a := range node.Attr { + if a.Name.Space == "xmlns" { + ns[a.Value] = a.Name.Local + break + } + } + + for i, a := range node.Attr { + if a.Name.Space == "xmlns" { + continue + } + // By default, xml.Decoder will fully resolve these namespaces. So if you had + // then by default the second attribute would have the `Name.Space` resolved to `baz`. But we need it to + // continue to resolve as `bar` so we can easily identify it later on. + if v, ok := ns[node.Attr[i].Name.Space]; ok { + node.Attr[i].Name.Space = v + } + } + return node +} + +// GetElement looks for the given tag name at the current level, and returns the element if found, and +// skipping over non-matching elements. Returns an error if the node is not found, or if an error occurs while walking +// the document. +func (d NodeDecoder) GetElement(name string) (t xml.StartElement, err error) { + for { + token, done, err := d.Token() + if err != nil { + return t, err + } + if done { + return t, fmt.Errorf("%s node not found", name) + } + switch { + case strings.EqualFold(name, token.Name.Local): + return token, nil + default: + err = d.Decoder.Skip() + if err != nil { + return t, err + } + } + } +} + +// Value provides an abstraction to retrieve char data value within an xml element. +// The method will return an error if it encounters a nested xml element instead of char data. +// This method should only be used to retrieve simple type or blob shape values as []byte. +func (d NodeDecoder) Value() (c []byte, err error) { + t, e := d.Decoder.Token() + if e != nil { + return c, e + } + + endElement := d.StartEl.End() + + switch ev := t.(type) { + case xml.CharData: + c = ev.Copy() + case xml.EndElement: // end tag or self-closing + if ev == endElement { + return []byte{}, err + } + return c, fmt.Errorf("expected value for %v element, got %T type %v instead", d.StartEl.Name.Local, t, t) + default: + return c, fmt.Errorf("expected value for %v element, got %T type %v instead", d.StartEl.Name.Local, t, t) + } + + t, e = d.Decoder.Token() + if e != nil { + return c, e + } + + if ev, ok := t.(xml.EndElement); ok { + if ev == endElement { + return c, err + } + } + + return c, fmt.Errorf("expected end element %v, got %T type %v instead", endElement, t, t) +} + +// FetchRootElement takes in a decoder and returns the first start element within the xml body. +// This function is useful in fetching the start element of an XML response and ignore the +// comments and preamble +func FetchRootElement(decoder *xml.Decoder) (startElement xml.StartElement, err error) { + for { + t, e := decoder.Token() + if e != nil { + return startElement, e + } + + if startElement, ok := t.(xml.StartElement); ok { + return startElement, err + } + } +} diff --git a/vendor/github.com/aws/smithy-go/endpoints/endpoint.go b/vendor/github.com/aws/smithy-go/endpoints/endpoint.go new file mode 100644 index 00000000000..a9352839748 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/endpoints/endpoint.go @@ -0,0 +1,23 @@ +package transport + +import ( + "net/http" + "net/url" + + "github.com/aws/smithy-go" +) + +// Endpoint is the endpoint object returned by Endpoint resolution V2 +type Endpoint struct { + // The complete URL minimally specfiying the scheme and host. + // May optionally specify the port and base path component. + URI url.URL + + // An optional set of headers to be sent using transport layer headers. + Headers http.Header + + // A grab-bag property map of endpoint attributes. The + // values present here are subject to change, or being add/removed at any + // time. + Properties smithy.Properties +} diff --git a/vendor/github.com/aws/smithy-go/errors.go b/vendor/github.com/aws/smithy-go/errors.go new file mode 100644 index 00000000000..d6948d02062 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/errors.go @@ -0,0 +1,137 @@ +package smithy + +import "fmt" + +// APIError provides the generic API and protocol agnostic error type all SDK +// generated exception types will implement. +type APIError interface { + error + + // ErrorCode returns the error code for the API exception. + ErrorCode() string + // ErrorMessage returns the error message for the API exception. + ErrorMessage() string + // ErrorFault returns the fault for the API exception. + ErrorFault() ErrorFault +} + +// GenericAPIError provides a generic concrete API error type that SDKs can use +// to deserialize error responses into. Should be used for unmodeled or untyped +// errors. +type GenericAPIError struct { + Code string + Message string + Fault ErrorFault +} + +// ErrorCode returns the error code for the API exception. +func (e *GenericAPIError) ErrorCode() string { return e.Code } + +// ErrorMessage returns the error message for the API exception. +func (e *GenericAPIError) ErrorMessage() string { return e.Message } + +// ErrorFault returns the fault for the API exception. +func (e *GenericAPIError) ErrorFault() ErrorFault { return e.Fault } + +func (e *GenericAPIError) Error() string { + return fmt.Sprintf("api error %s: %s", e.Code, e.Message) +} + +var _ APIError = (*GenericAPIError)(nil) + +// OperationError decorates an underlying error which occurred while invoking +// an operation with names of the operation and API. +type OperationError struct { + ServiceID string + OperationName string + Err error +} + +// Service returns the name of the API service the error occurred with. +func (e *OperationError) Service() string { return e.ServiceID } + +// Operation returns the name of the API operation the error occurred with. +func (e *OperationError) Operation() string { return e.OperationName } + +// Unwrap returns the nested error if any, or nil. +func (e *OperationError) Unwrap() error { return e.Err } + +func (e *OperationError) Error() string { + return fmt.Sprintf("operation error %s: %s, %v", e.ServiceID, e.OperationName, e.Err) +} + +// DeserializationError provides a wrapper for an error that occurs during +// deserialization. +type DeserializationError struct { + Err error // original error + Snapshot []byte +} + +// Error returns a formatted error for DeserializationError +func (e *DeserializationError) Error() string { + const msg = "deserialization failed" + if e.Err == nil { + return msg + } + return fmt.Sprintf("%s, %v", msg, e.Err) +} + +// Unwrap returns the underlying Error in DeserializationError +func (e *DeserializationError) Unwrap() error { return e.Err } + +// ErrorFault provides the type for a Smithy API error fault. +type ErrorFault int + +// ErrorFault enumeration values +const ( + FaultUnknown ErrorFault = iota + FaultServer + FaultClient +) + +func (f ErrorFault) String() string { + switch f { + case FaultServer: + return "server" + case FaultClient: + return "client" + default: + return "unknown" + } +} + +// SerializationError represents an error that occurred while attempting to serialize a request +type SerializationError struct { + Err error // original error +} + +// Error returns a formatted error for SerializationError +func (e *SerializationError) Error() string { + const msg = "serialization failed" + if e.Err == nil { + return msg + } + return fmt.Sprintf("%s: %v", msg, e.Err) +} + +// Unwrap returns the underlying Error in SerializationError +func (e *SerializationError) Unwrap() error { return e.Err } + +// CanceledError is the error that will be returned by an API request that was +// canceled. API operations given a Context may return this error when +// canceled. +type CanceledError struct { + Err error +} + +// CanceledError returns true to satisfy interfaces checking for canceled errors. +func (*CanceledError) CanceledError() bool { return true } + +// Unwrap returns the underlying error, if there was one. +func (e *CanceledError) Unwrap() error { + return e.Err +} + +func (e *CanceledError) Error() string { + return fmt.Sprintf("canceled, %v", e.Err) +} diff --git a/vendor/github.com/aws/smithy-go/go_module_metadata.go b/vendor/github.com/aws/smithy-go/go_module_metadata.go new file mode 100644 index 00000000000..d96be806df6 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package smithy + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.16.0" diff --git a/vendor/github.com/aws/smithy-go/internal/sync/singleflight/LICENSE b/vendor/github.com/aws/smithy-go/internal/sync/singleflight/LICENSE new file mode 100644 index 00000000000..fe6a62006a5 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/internal/sync/singleflight/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/aws/smithy-go/internal/sync/singleflight/docs.go b/vendor/github.com/aws/smithy-go/internal/sync/singleflight/docs.go new file mode 100644 index 00000000000..9c9d02b94b9 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/internal/sync/singleflight/docs.go @@ -0,0 +1,8 @@ +// Package singleflight provides a duplicate function call suppression +// mechanism. This package is a fork of the Go golang.org/x/sync/singleflight +// package. The package is forked, because the package a part of the unstable +// and unversioned golang.org/x/sync module. +// +// https://github.com/golang/sync/tree/67f06af15bc961c363a7260195bcd53487529a21/singleflight + +package singleflight diff --git a/vendor/github.com/aws/smithy-go/internal/sync/singleflight/singleflight.go b/vendor/github.com/aws/smithy-go/internal/sync/singleflight/singleflight.go new file mode 100644 index 00000000000..e8a1b17d564 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/internal/sync/singleflight/singleflight.go @@ -0,0 +1,210 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package singleflight + +import ( + "bytes" + "errors" + "fmt" + "runtime" + "runtime/debug" + "sync" +) + +// errGoexit indicates the runtime.Goexit was called in +// the user given function. +var errGoexit = errors.New("runtime.Goexit was called") + +// A panicError is an arbitrary value recovered from a panic +// with the stack trace during the execution of given function. +type panicError struct { + value interface{} + stack []byte +} + +// Error implements error interface. +func (p *panicError) Error() string { + return fmt.Sprintf("%v\n\n%s", p.value, p.stack) +} + +func newPanicError(v interface{}) error { + stack := debug.Stack() + + // The first line of the stack trace is of the form "goroutine N [status]:" + // but by the time the panic reaches Do the goroutine may no longer exist + // and its status will have changed. Trim out the misleading line. + if line := bytes.IndexByte(stack[:], '\n'); line >= 0 { + stack = stack[line+1:] + } + return &panicError{value: v, stack: stack} +} + +// call is an in-flight or completed singleflight.Do call +type call struct { + wg sync.WaitGroup + + // These fields are written once before the WaitGroup is done + // and are only read after the WaitGroup is done. + val interface{} + err error + + // forgotten indicates whether Forget was called with this call's key + // while the call was still in flight. + forgotten bool + + // These fields are read and written with the singleflight + // mutex held before the WaitGroup is done, and are read but + // not written after the WaitGroup is done. + dups int + chans []chan<- Result +} + +// Group represents a class of work and forms a namespace in +// which units of work can be executed with duplicate suppression. +type Group struct { + mu sync.Mutex // protects m + m map[string]*call // lazily initialized +} + +// Result holds the results of Do, so they can be passed +// on a channel. +type Result struct { + Val interface{} + Err error + Shared bool +} + +// Do executes and returns the results of the given function, making +// sure that only one execution is in-flight for a given key at a +// time. If a duplicate comes in, the duplicate caller waits for the +// original to complete and receives the same results. +// The return value shared indicates whether v was given to multiple callers. +func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) { + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + g.mu.Unlock() + c.wg.Wait() + + if e, ok := c.err.(*panicError); ok { + panic(e) + } else if c.err == errGoexit { + runtime.Goexit() + } + return c.val, c.err, true + } + c := new(call) + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + g.doCall(c, key, fn) + return c.val, c.err, c.dups > 0 +} + +// DoChan is like Do but returns a channel that will receive the +// results when they are ready. +// +// The returned channel will not be closed. +func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result { + ch := make(chan Result, 1) + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + c.chans = append(c.chans, ch) + g.mu.Unlock() + return ch + } + c := &call{chans: []chan<- Result{ch}} + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + go g.doCall(c, key, fn) + + return ch +} + +// doCall handles the single call for a key. +func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { + normalReturn := false + recovered := false + + // use double-defer to distinguish panic from runtime.Goexit, + // more details see https://golang.org/cl/134395 + defer func() { + // the given function invoked runtime.Goexit + if !normalReturn && !recovered { + c.err = errGoexit + } + + c.wg.Done() + g.mu.Lock() + defer g.mu.Unlock() + if !c.forgotten { + delete(g.m, key) + } + + if e, ok := c.err.(*panicError); ok { + // In order to prevent the waiting channels from being blocked forever, + // needs to ensure that this panic cannot be recovered. + if len(c.chans) > 0 { + go panic(e) + select {} // Keep this goroutine around so that it will appear in the crash dump. + } else { + panic(e) + } + } else if c.err == errGoexit { + // Already in the process of goexit, no need to call again + } else { + // Normal return + for _, ch := range c.chans { + ch <- Result{c.val, c.err, c.dups > 0} + } + } + }() + + func() { + defer func() { + if !normalReturn { + // Ideally, we would wait to take a stack trace until we've determined + // whether this is a panic or a runtime.Goexit. + // + // Unfortunately, the only way we can distinguish the two is to see + // whether the recover stopped the goroutine from terminating, and by + // the time we know that, the part of the stack trace relevant to the + // panic has been discarded. + if r := recover(); r != nil { + c.err = newPanicError(r) + } + } + }() + + c.val, c.err = fn() + normalReturn = true + }() + + if !normalReturn { + recovered = true + } +} + +// Forget tells the singleflight to forget about a key. Future calls +// to Do for this key will call the function rather than waiting for +// an earlier call to complete. +func (g *Group) Forget(key string) { + g.mu.Lock() + if c, ok := g.m[key]; ok { + c.forgotten = true + } + delete(g.m, key) + g.mu.Unlock() +} diff --git a/vendor/github.com/aws/smithy-go/io/byte.go b/vendor/github.com/aws/smithy-go/io/byte.go new file mode 100644 index 00000000000..f8417c15b85 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/io/byte.go @@ -0,0 +1,12 @@ +package io + +const ( + // Byte is 8 bits + Byte int64 = 1 + // KibiByte (KiB) is 1024 Bytes + KibiByte = Byte * 1024 + // MebiByte (MiB) is 1024 KiB + MebiByte = KibiByte * 1024 + // GibiByte (GiB) is 1024 MiB + GibiByte = MebiByte * 1024 +) diff --git a/vendor/github.com/aws/smithy-go/io/doc.go b/vendor/github.com/aws/smithy-go/io/doc.go new file mode 100644 index 00000000000..a6a33eaf567 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/io/doc.go @@ -0,0 +1,2 @@ +// Package io provides utilities for Smithy generated API clients. +package io diff --git a/vendor/github.com/aws/smithy-go/io/reader.go b/vendor/github.com/aws/smithy-go/io/reader.go new file mode 100644 index 00000000000..07063f2960d --- /dev/null +++ b/vendor/github.com/aws/smithy-go/io/reader.go @@ -0,0 +1,16 @@ +package io + +import ( + "io" +) + +// ReadSeekNopCloser wraps an io.ReadSeeker with an additional Close method +// that does nothing. +type ReadSeekNopCloser struct { + io.ReadSeeker +} + +// Close does nothing. +func (ReadSeekNopCloser) Close() error { + return nil +} diff --git a/vendor/github.com/aws/smithy-go/io/ringbuffer.go b/vendor/github.com/aws/smithy-go/io/ringbuffer.go new file mode 100644 index 00000000000..06b476add8a --- /dev/null +++ b/vendor/github.com/aws/smithy-go/io/ringbuffer.go @@ -0,0 +1,94 @@ +package io + +import ( + "bytes" + "io" +) + +// RingBuffer struct satisfies io.ReadWrite interface. +// +// ReadBuffer is a revolving buffer data structure, which can be used to store snapshots of data in a +// revolving window. +type RingBuffer struct { + slice []byte + start int + end int + size int +} + +// NewRingBuffer method takes in a byte slice as an input and returns a RingBuffer. +func NewRingBuffer(slice []byte) *RingBuffer { + ringBuf := RingBuffer{ + slice: slice, + } + return &ringBuf +} + +// Write method inserts the elements in a byte slice, and returns the number of bytes written along with any error. +func (r *RingBuffer) Write(p []byte) (int, error) { + for _, b := range p { + // check if end points to invalid index, we need to circle back + if r.end == len(r.slice) { + r.end = 0 + } + // check if start points to invalid index, we need to circle back + if r.start == len(r.slice) { + r.start = 0 + } + // if ring buffer is filled, increment the start index + if r.size == len(r.slice) { + r.size-- + r.start++ + } + + r.slice[r.end] = b + r.end++ + r.size++ + } + return len(p), nil +} + +// Read copies the data on the ring buffer into the byte slice provided to the method. +// Returns the read count along with any error encountered while reading. +func (r *RingBuffer) Read(p []byte) (int, error) { + // readCount keeps track of the number of bytes read + var readCount int + for j := 0; j < len(p); j++ { + // if ring buffer is empty or completely read + // return EOF error. + if r.size == 0 { + return readCount, io.EOF + } + + if r.start == len(r.slice) { + r.start = 0 + } + + p[j] = r.slice[r.start] + readCount++ + // increment the start pointer for ring buffer + r.start++ + // decrement the size of ring buffer + r.size-- + } + return readCount, nil +} + +// Len returns the number of unread bytes in the buffer. +func (r *RingBuffer) Len() int { + return r.size +} + +// Bytes returns a copy of the RingBuffer's bytes. +func (r RingBuffer) Bytes() []byte { + var b bytes.Buffer + io.Copy(&b, &r) + return b.Bytes() +} + +// Reset resets the ring buffer. +func (r *RingBuffer) Reset() { + *r = RingBuffer{ + slice: r.slice, + } +} diff --git a/vendor/github.com/aws/smithy-go/local-mod-replace.sh b/vendor/github.com/aws/smithy-go/local-mod-replace.sh new file mode 100644 index 00000000000..800bf376954 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/local-mod-replace.sh @@ -0,0 +1,39 @@ +#1/usr/bin/env bash + +PROJECT_DIR="" +SMITHY_SOURCE_DIR=$(cd `dirname $0` && pwd) + +usage() { + echo "Usage: $0 [-s SMITHY_SOURCE_DIR] [-d PROJECT_DIR]" 1>&2 + exit 1 +} + +while getopts "hs:d:" options; do + case "${options}" in + s) + SMITHY_SOURCE_DIR=${OPTARG} + if [ "$SMITHY_SOURCE_DIR" == "" ]; then + echo "path to smithy-go source directory is required" || exit + usage + fi + ;; + d) + PROJECT_DIR=${OPTARG} + ;; + h) + usage + ;; + *) + usage + ;; + esac +done + +if [ "$PROJECT_DIR" != "" ]; then + cd $PROJECT_DIR || exit +fi + +go mod graph | awk '{print $1}' | cut -d '@' -f 1 | sort | uniq | grep "github.com/aws/smithy-go" | while read x; do + repPath=${x/github.com\/aws\/smithy-go/${SMITHY_SOURCE_DIR}} + echo -replace $x=$repPath +done | xargs go mod edit diff --git a/vendor/github.com/aws/smithy-go/logging/logger.go b/vendor/github.com/aws/smithy-go/logging/logger.go new file mode 100644 index 00000000000..2071924bd30 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/logging/logger.go @@ -0,0 +1,82 @@ +package logging + +import ( + "context" + "io" + "log" +) + +// Classification is the type of the log entry's classification name. +type Classification string + +// Set of standard classifications that can be used by clients and middleware +const ( + Warn Classification = "WARN" + Debug Classification = "DEBUG" +) + +// Logger is an interface for logging entries at certain classifications. +type Logger interface { + // Logf is expected to support the standard fmt package "verbs". + Logf(classification Classification, format string, v ...interface{}) +} + +// LoggerFunc is a wrapper around a function to satisfy the Logger interface. +type LoggerFunc func(classification Classification, format string, v ...interface{}) + +// Logf delegates the logging request to the wrapped function. +func (f LoggerFunc) Logf(classification Classification, format string, v ...interface{}) { + f(classification, format, v...) +} + +// ContextLogger is an optional interface a Logger implementation may expose that provides +// the ability to create context aware log entries. +type ContextLogger interface { + WithContext(context.Context) Logger +} + +// WithContext will pass the provided context to logger if it implements the ContextLogger interface and return the resulting +// logger. Otherwise the logger will be returned as is. As a special case if a nil logger is provided, a Nop logger will +// be returned to the caller. +func WithContext(ctx context.Context, logger Logger) Logger { + if logger == nil { + return Nop{} + } + + cl, ok := logger.(ContextLogger) + if !ok { + return logger + } + + return cl.WithContext(ctx) +} + +// Nop is a Logger implementation that simply does not perform any logging. +type Nop struct{} + +// Logf simply returns without performing any action +func (n Nop) Logf(Classification, string, ...interface{}) { + return +} + +// StandardLogger is a Logger implementation that wraps the standard library logger, and delegates logging to it's +// Printf method. +type StandardLogger struct { + Logger *log.Logger +} + +// Logf logs the given classification and message to the underlying logger. +func (s StandardLogger) Logf(classification Classification, format string, v ...interface{}) { + if len(classification) != 0 { + format = string(classification) + " " + format + } + + s.Logger.Printf(format, v...) +} + +// NewStandardLogger returns a new StandardLogger +func NewStandardLogger(writer io.Writer) *StandardLogger { + return &StandardLogger{ + Logger: log.New(writer, "SDK ", log.LstdFlags), + } +} diff --git a/vendor/github.com/aws/smithy-go/middleware/doc.go b/vendor/github.com/aws/smithy-go/middleware/doc.go new file mode 100644 index 00000000000..9858928a7f8 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/doc.go @@ -0,0 +1,67 @@ +// Package middleware provides transport agnostic middleware for decorating SDK +// handlers. +// +// The Smithy middleware stack provides ordered behavior to be invoked on an +// underlying handler. The stack is separated into steps that are invoked in a +// static order. A step is a collection of middleware that are injected into a +// ordered list defined by the user. The user may add, insert, swap, and remove a +// step's middleware. When the stack is invoked the step middleware become static, +// and their order cannot be modified. +// +// A stack and its step middleware are **not** safe to modify concurrently. +// +// A stack will use the ordered list of middleware to decorate a underlying +// handler. A handler could be something like an HTTP Client that round trips an +// API operation over HTTP. +// +// Smithy Middleware Stack +// +// A Stack is a collection of middleware that wrap a handler. The stack can be +// broken down into discreet steps. Each step may contain zero or more middleware +// specific to that stack's step. +// +// A Stack Step is a predefined set of middleware that are invoked in a static +// order by the Stack. These steps represent fixed points in the middleware stack +// for organizing specific behavior, such as serialize and build. A Stack Step is +// composed of zero or more middleware that are specific to that step. A step may +// define its own set of input/output parameters the generic input/output +// parameters are cast from. A step calls its middleware recursively, before +// calling the next step in the stack returning the result or error of the step +// middleware decorating the underlying handler. +// +// * Initialize: Prepares the input, and sets any default parameters as needed, +// (e.g. idempotency token, and presigned URLs). +// +// * Serialize: Serializes the prepared input into a data structure that can be +// consumed by the target transport's message, (e.g. REST-JSON serialization). +// +// * Build: Adds additional metadata to the serialized transport message, (e.g. +// HTTP's Content-Length header, or body checksum). Decorations and +// modifications to the message should be copied to all message attempts. +// +// * Finalize: Performs final preparations needed before sending the message. The +// message should already be complete by this stage, and is only alternated to +// meet the expectations of the recipient, (e.g. Retry and AWS SigV4 request +// signing). +// +// * Deserialize: Reacts to the handler's response returned by the recipient of +// the request message. Deserializes the response into a structured type or +// error above stacks can react to. +// +// Adding Middleware to a Stack Step +// +// Middleware can be added to a step front or back, or relative, by name, to an +// existing middleware in that stack. If a middleware does not have a name, a +// unique name will be generated at the middleware and be added to the step. +// +// // Create middleware stack +// stack := middleware.NewStack() +// +// // Add middleware to stack steps +// stack.Initialize.Add(paramValidationMiddleware, middleware.After) +// stack.Serialize.Add(marshalOperationFoo, middleware.After) +// stack.Deserialize.Add(unmarshalOperationFoo, middleware.After) +// +// // Invoke middleware on handler. +// resp, err := stack.HandleMiddleware(ctx, req.Input, clientHandler) +package middleware diff --git a/vendor/github.com/aws/smithy-go/middleware/logging.go b/vendor/github.com/aws/smithy-go/middleware/logging.go new file mode 100644 index 00000000000..c2f0dbb6bda --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/logging.go @@ -0,0 +1,46 @@ +package middleware + +import ( + "context" + + "github.com/aws/smithy-go/logging" +) + +// loggerKey is the context value key for which the logger is associated with. +type loggerKey struct{} + +// GetLogger takes a context to retrieve a Logger from. If no logger is present on the context a logging.Nop logger +// is returned. If the logger retrieved from context supports the ContextLogger interface, the context will be passed +// to the WithContext method and the resulting logger will be returned. Otherwise the stored logger is returned as is. +func GetLogger(ctx context.Context) logging.Logger { + logger, ok := ctx.Value(loggerKey{}).(logging.Logger) + if !ok || logger == nil { + return logging.Nop{} + } + + return logging.WithContext(ctx, logger) +} + +// SetLogger sets the provided logger value on the provided ctx. +func SetLogger(ctx context.Context, logger logging.Logger) context.Context { + return context.WithValue(ctx, loggerKey{}, logger) +} + +type setLogger struct { + Logger logging.Logger +} + +// AddSetLoggerMiddleware adds a middleware that will add the provided logger to the middleware context. +func AddSetLoggerMiddleware(stack *Stack, logger logging.Logger) error { + return stack.Initialize.Add(&setLogger{Logger: logger}, After) +} + +func (a *setLogger) ID() string { + return "SetLogger" +} + +func (a *setLogger) HandleInitialize(ctx context.Context, in InitializeInput, next InitializeHandler) ( + out InitializeOutput, metadata Metadata, err error, +) { + return next.HandleInitialize(SetLogger(ctx, a.Logger), in) +} diff --git a/vendor/github.com/aws/smithy-go/middleware/metadata.go b/vendor/github.com/aws/smithy-go/middleware/metadata.go new file mode 100644 index 00000000000..7bb7dbcf5a0 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/metadata.go @@ -0,0 +1,65 @@ +package middleware + +// MetadataReader provides an interface for reading metadata from the +// underlying metadata container. +type MetadataReader interface { + Get(key interface{}) interface{} +} + +// Metadata provides storing and reading metadata values. Keys may be any +// comparable value type. Get and set will panic if key is not a comparable +// value type. +// +// Metadata uses lazy initialization, and Set method must be called as an +// addressable value, or pointer. Not doing so may cause key/value pair to not +// be set. +type Metadata struct { + values map[interface{}]interface{} +} + +// Get attempts to retrieve the value the key points to. Returns nil if the +// key was not found. +// +// Panics if key type is not comparable. +func (m Metadata) Get(key interface{}) interface{} { + return m.values[key] +} + +// Clone creates a shallow copy of Metadata entries, returning a new Metadata +// value with the original entries copied into it. +func (m Metadata) Clone() Metadata { + vs := make(map[interface{}]interface{}, len(m.values)) + for k, v := range m.values { + vs[k] = v + } + + return Metadata{ + values: vs, + } +} + +// Set stores the value pointed to by the key. If a value already exists at +// that key it will be replaced with the new value. +// +// Set method must be called as an addressable value, or pointer. If Set is not +// called as an addressable value or pointer, the key value pair being set may +// be lost. +// +// Panics if the key type is not comparable. +func (m *Metadata) Set(key, value interface{}) { + if m.values == nil { + m.values = map[interface{}]interface{}{} + } + m.values[key] = value +} + +// Has returns whether the key exists in the metadata. +// +// Panics if the key type is not comparable. +func (m Metadata) Has(key interface{}) bool { + if m.values == nil { + return false + } + _, ok := m.values[key] + return ok +} diff --git a/vendor/github.com/aws/smithy-go/middleware/middleware.go b/vendor/github.com/aws/smithy-go/middleware/middleware.go new file mode 100644 index 00000000000..803b7c75184 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/middleware.go @@ -0,0 +1,71 @@ +package middleware + +import ( + "context" +) + +// Handler provides the interface for performing the logic to obtain an output, +// or error for the given input. +type Handler interface { + // Handle performs logic to obtain an output for the given input. Handler + // should be decorated with middleware to perform input specific behavior. + Handle(ctx context.Context, input interface{}) ( + output interface{}, metadata Metadata, err error, + ) +} + +// HandlerFunc provides a wrapper around a function pointer to be used as a +// middleware handler. +type HandlerFunc func(ctx context.Context, input interface{}) ( + output interface{}, metadata Metadata, err error, +) + +// Handle invokes the underlying function, returning the result. +func (fn HandlerFunc) Handle(ctx context.Context, input interface{}) ( + output interface{}, metadata Metadata, err error, +) { + return fn(ctx, input) +} + +// Middleware provides the interface to call handlers in a chain. +type Middleware interface { + // ID provides a unique identifier for the middleware. + ID() string + + // Performs the middleware's handling of the input, returning the output, + // or error. The middleware can invoke the next Handler if handling should + // continue. + HandleMiddleware(ctx context.Context, input interface{}, next Handler) ( + output interface{}, metadata Metadata, err error, + ) +} + +// decoratedHandler wraps a middleware in order to to call the next handler in +// the chain. +type decoratedHandler struct { + // The next handler to be called. + Next Handler + + // The current middleware decorating the handler. + With Middleware +} + +// Handle implements the Handler interface to handle a operation invocation. +func (m decoratedHandler) Handle(ctx context.Context, input interface{}) ( + output interface{}, metadata Metadata, err error, +) { + return m.With.HandleMiddleware(ctx, input, m.Next) +} + +// DecorateHandler decorates a handler with a middleware. Wrapping the handler +// with the middleware. +func DecorateHandler(h Handler, with ...Middleware) Handler { + for i := len(with) - 1; i >= 0; i-- { + h = decoratedHandler{ + Next: h, + With: with[i], + } + } + + return h +} diff --git a/vendor/github.com/aws/smithy-go/middleware/ordered_group.go b/vendor/github.com/aws/smithy-go/middleware/ordered_group.go new file mode 100644 index 00000000000..4b195308c59 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/ordered_group.go @@ -0,0 +1,268 @@ +package middleware + +import "fmt" + +// RelativePosition provides specifying the relative position of a middleware +// in an ordered group. +type RelativePosition int + +// Relative position for middleware in steps. +const ( + After RelativePosition = iota + Before +) + +type ider interface { + ID() string +} + +// orderedIDs provides an ordered collection of items with relative ordering +// by name. +type orderedIDs struct { + order *relativeOrder + items map[string]ider +} + +const baseOrderedItems = 5 + +func newOrderedIDs() *orderedIDs { + return &orderedIDs{ + order: newRelativeOrder(), + items: make(map[string]ider, baseOrderedItems), + } +} + +// Add injects the item to the relative position of the item group. Returns an +// error if the item already exists. +func (g *orderedIDs) Add(m ider, pos RelativePosition) error { + id := m.ID() + if len(id) == 0 { + return fmt.Errorf("empty ID, ID must not be empty") + } + + if err := g.order.Add(pos, id); err != nil { + return err + } + + g.items[id] = m + return nil +} + +// Insert injects the item relative to an existing item id. Returns an error if +// the original item does not exist, or the item being added already exists. +func (g *orderedIDs) Insert(m ider, relativeTo string, pos RelativePosition) error { + if len(m.ID()) == 0 { + return fmt.Errorf("insert ID must not be empty") + } + if len(relativeTo) == 0 { + return fmt.Errorf("relative to ID must not be empty") + } + + if err := g.order.Insert(relativeTo, pos, m.ID()); err != nil { + return err + } + + g.items[m.ID()] = m + return nil +} + +// Get returns the ider identified by id. If ider is not present, returns false. +func (g *orderedIDs) Get(id string) (ider, bool) { + v, ok := g.items[id] + return v, ok +} + +// Swap removes the item by id, replacing it with the new item. Returns an error +// if the original item doesn't exist. +func (g *orderedIDs) Swap(id string, m ider) (ider, error) { + if len(id) == 0 { + return nil, fmt.Errorf("swap from ID must not be empty") + } + + iderID := m.ID() + if len(iderID) == 0 { + return nil, fmt.Errorf("swap to ID must not be empty") + } + + if err := g.order.Swap(id, iderID); err != nil { + return nil, err + } + + removed := g.items[id] + + delete(g.items, id) + g.items[iderID] = m + + return removed, nil +} + +// Remove removes the item by id. Returns an error if the item +// doesn't exist. +func (g *orderedIDs) Remove(id string) (ider, error) { + if len(id) == 0 { + return nil, fmt.Errorf("remove ID must not be empty") + } + + if err := g.order.Remove(id); err != nil { + return nil, err + } + + removed := g.items[id] + delete(g.items, id) + return removed, nil +} + +func (g *orderedIDs) List() []string { + items := g.order.List() + order := make([]string, len(items)) + copy(order, items) + return order +} + +// Clear removes all entries and slots. +func (g *orderedIDs) Clear() { + g.order.Clear() + g.items = map[string]ider{} +} + +// GetOrder returns the item in the order it should be invoked in. +func (g *orderedIDs) GetOrder() []interface{} { + order := g.order.List() + ordered := make([]interface{}, len(order)) + for i := 0; i < len(order); i++ { + ordered[i] = g.items[order[i]] + } + + return ordered +} + +// relativeOrder provides ordering of item +type relativeOrder struct { + order []string +} + +func newRelativeOrder() *relativeOrder { + return &relativeOrder{ + order: make([]string, 0, baseOrderedItems), + } +} + +// Add inserts an item into the order relative to the position provided. +func (s *relativeOrder) Add(pos RelativePosition, ids ...string) error { + if len(ids) == 0 { + return nil + } + + for _, id := range ids { + if _, ok := s.has(id); ok { + return fmt.Errorf("already exists, %v", id) + } + } + + switch pos { + case Before: + return s.insert(0, Before, ids...) + + case After: + s.order = append(s.order, ids...) + + default: + return fmt.Errorf("invalid position, %v", int(pos)) + } + + return nil +} + +// Insert injects an item before or after the relative item. Returns +// an error if the relative item does not exist. +func (s *relativeOrder) Insert(relativeTo string, pos RelativePosition, ids ...string) error { + if len(ids) == 0 { + return nil + } + + for _, id := range ids { + if _, ok := s.has(id); ok { + return fmt.Errorf("already exists, %v", id) + } + } + + i, ok := s.has(relativeTo) + if !ok { + return fmt.Errorf("not found, %v", relativeTo) + } + + return s.insert(i, pos, ids...) +} + +// Swap will replace the item id with the to item. Returns an +// error if the original item id does not exist. Allows swapping out an +// item for another item with the same id. +func (s *relativeOrder) Swap(id, to string) error { + i, ok := s.has(id) + if !ok { + return fmt.Errorf("not found, %v", id) + } + + if _, ok = s.has(to); ok && id != to { + return fmt.Errorf("already exists, %v", to) + } + + s.order[i] = to + return nil +} + +func (s *relativeOrder) Remove(id string) error { + i, ok := s.has(id) + if !ok { + return fmt.Errorf("not found, %v", id) + } + + s.order = append(s.order[:i], s.order[i+1:]...) + return nil +} + +func (s *relativeOrder) List() []string { + return s.order +} + +func (s *relativeOrder) Clear() { + s.order = s.order[0:0] +} + +func (s *relativeOrder) insert(i int, pos RelativePosition, ids ...string) error { + switch pos { + case Before: + n := len(ids) + var src []string + if n <= cap(s.order)-len(s.order) { + s.order = s.order[:len(s.order)+n] + src = s.order + } else { + src = s.order + s.order = make([]string, len(s.order)+n) + copy(s.order[:i], src[:i]) // only when allocating a new slice do we need to copy the front half + } + copy(s.order[i+n:], src[i:]) + copy(s.order[i:], ids) + case After: + if i == len(s.order)-1 || len(s.order) == 0 { + s.order = append(s.order, ids...) + } else { + s.order = append(s.order[:i+1], append(ids, s.order[i+1:]...)...) + } + + default: + return fmt.Errorf("invalid position, %v", int(pos)) + } + + return nil +} + +func (s *relativeOrder) has(id string) (i int, found bool) { + for i := 0; i < len(s.order); i++ { + if s.order[i] == id { + return i, true + } + } + return 0, false +} diff --git a/vendor/github.com/aws/smithy-go/middleware/stack.go b/vendor/github.com/aws/smithy-go/middleware/stack.go new file mode 100644 index 00000000000..45ccb5b93c9 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/stack.go @@ -0,0 +1,209 @@ +package middleware + +import ( + "context" + "io" + "strings" +) + +// Stack provides protocol and transport agnostic set of middleware split into +// distinct steps. Steps have specific transitions between them, that are +// managed by the individual step. +// +// Steps are composed as middleware around the underlying handler in the +// following order: +// +// Initialize -> Serialize -> Build -> Finalize -> Deserialize -> Handler +// +// Any middleware within the chain may choose to stop and return an error or +// response. Since the middleware decorate the handler like a call stack, each +// middleware will receive the result of the next middleware in the chain. +// Middleware that does not need to react to an input, or result must forward +// along the input down the chain, or return the result back up the chain. +// +// Initialize <- Serialize -> Build -> Finalize <- Deserialize <- Handler +type Stack struct { + // Initialize prepares the input, and sets any default parameters as + // needed, (e.g. idempotency token, and presigned URLs). + // + // Takes Input Parameters, and returns result or error. + // + // Receives result or error from Serialize step. + Initialize *InitializeStep + + // Serialize serializes the prepared input into a data structure that can be consumed + // by the target transport's message, (e.g. REST-JSON serialization) + // + // Converts Input Parameters into a Request, and returns the result or error. + // + // Receives result or error from Build step. + Serialize *SerializeStep + + // Build adds additional metadata to the serialized transport message + // (e.g. HTTP's Content-Length header, or body checksum). Decorations and + // modifications to the message should be copied to all message attempts. + // + // Takes Request, and returns result or error. + // + // Receives result or error from Finalize step. + Build *BuildStep + + // Finalize performs final preparations needed before sending the message. The + // message should already be complete by this stage, and is only alternated + // to meet the expectations of the recipient (e.g. Retry and AWS SigV4 + // request signing) + // + // Takes Request, and returns result or error. + // + // Receives result or error from Deserialize step. + Finalize *FinalizeStep + + // Deserialize reacts to the handler's response returned by the recipient of the request + // message. Deserializes the response into a structured type or error above + // stacks can react to. + // + // Should only forward Request to underlying handler. + // + // Takes Request, and returns result or error. + // + // Receives raw response, or error from underlying handler. + Deserialize *DeserializeStep + + id string +} + +// NewStack returns an initialize empty stack. +func NewStack(id string, newRequestFn func() interface{}) *Stack { + return &Stack{ + id: id, + Initialize: NewInitializeStep(), + Serialize: NewSerializeStep(newRequestFn), + Build: NewBuildStep(), + Finalize: NewFinalizeStep(), + Deserialize: NewDeserializeStep(), + } +} + +// ID returns the unique ID for the stack as a middleware. +func (s *Stack) ID() string { return s.id } + +// HandleMiddleware invokes the middleware stack decorating the next handler. +// Each step of stack will be invoked in order before calling the next step. +// With the next handler call last. +// +// The input value must be the input parameters of the operation being +// performed. +// +// Will return the result of the operation, or error. +func (s *Stack) HandleMiddleware(ctx context.Context, input interface{}, next Handler) ( + output interface{}, metadata Metadata, err error, +) { + h := DecorateHandler(next, + s.Initialize, + s.Serialize, + s.Build, + s.Finalize, + s.Deserialize, + ) + + return h.Handle(ctx, input) +} + +// List returns a list of all middleware in the stack by step. +func (s *Stack) List() []string { + var l []string + l = append(l, s.id) + + l = append(l, s.Initialize.ID()) + l = append(l, s.Initialize.List()...) + + l = append(l, s.Serialize.ID()) + l = append(l, s.Serialize.List()...) + + l = append(l, s.Build.ID()) + l = append(l, s.Build.List()...) + + l = append(l, s.Finalize.ID()) + l = append(l, s.Finalize.List()...) + + l = append(l, s.Deserialize.ID()) + l = append(l, s.Deserialize.List()...) + + return l +} + +func (s *Stack) String() string { + var b strings.Builder + + w := &indentWriter{w: &b} + + w.WriteLine(s.id) + w.Push() + + writeStepItems(w, s.Initialize) + writeStepItems(w, s.Serialize) + writeStepItems(w, s.Build) + writeStepItems(w, s.Finalize) + writeStepItems(w, s.Deserialize) + + return b.String() +} + +type stackStepper interface { + ID() string + List() []string +} + +func writeStepItems(w *indentWriter, s stackStepper) { + type lister interface { + List() []string + } + + w.WriteLine(s.ID()) + w.Push() + + defer w.Pop() + + // ignore stack to prevent circular iterations + if _, ok := s.(*Stack); ok { + return + } + + for _, id := range s.List() { + w.WriteLine(id) + } +} + +type stringWriter interface { + io.Writer + WriteString(string) (int, error) + WriteRune(rune) (int, error) +} + +type indentWriter struct { + w stringWriter + depth int +} + +const indentDepth = "\t\t\t\t\t\t\t\t\t\t" + +func (w *indentWriter) Push() { + w.depth++ +} + +func (w *indentWriter) Pop() { + w.depth-- + if w.depth < 0 { + w.depth = 0 + } +} + +func (w *indentWriter) WriteLine(v string) { + w.w.WriteString(indentDepth[:w.depth]) + + v = strings.ReplaceAll(v, "\n", "\\n") + v = strings.ReplaceAll(v, "\r", "\\r") + + w.w.WriteString(v) + w.w.WriteRune('\n') +} diff --git a/vendor/github.com/aws/smithy-go/middleware/stack_values.go b/vendor/github.com/aws/smithy-go/middleware/stack_values.go new file mode 100644 index 00000000000..ef96009ba18 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/stack_values.go @@ -0,0 +1,100 @@ +package middleware + +import ( + "context" + "reflect" + "strings" +) + +// WithStackValue adds a key value pair to the context that is intended to be +// scoped to a stack. Use ClearStackValues to get a new context with all stack +// values cleared. +func WithStackValue(ctx context.Context, key, value interface{}) context.Context { + md, _ := ctx.Value(stackValuesKey{}).(*stackValues) + + md = withStackValue(md, key, value) + return context.WithValue(ctx, stackValuesKey{}, md) +} + +// ClearStackValues returns a context without any stack values. +func ClearStackValues(ctx context.Context) context.Context { + return context.WithValue(ctx, stackValuesKey{}, nil) +} + +// GetStackValues returns the value pointed to by the key within the stack +// values, if it is present. +func GetStackValue(ctx context.Context, key interface{}) interface{} { + md, _ := ctx.Value(stackValuesKey{}).(*stackValues) + if md == nil { + return nil + } + + return md.Value(key) +} + +type stackValuesKey struct{} + +type stackValues struct { + key interface{} + value interface{} + parent *stackValues +} + +func withStackValue(parent *stackValues, key, value interface{}) *stackValues { + if key == nil { + panic("nil key") + } + if !reflect.TypeOf(key).Comparable() { + panic("key is not comparable") + } + return &stackValues{key: key, value: value, parent: parent} +} + +func (m *stackValues) Value(key interface{}) interface{} { + if key == m.key { + return m.value + } + + if m.parent == nil { + return nil + } + + return m.parent.Value(key) +} + +func (c *stackValues) String() string { + var str strings.Builder + + cc := c + for cc == nil { + str.WriteString("(" + + reflect.TypeOf(c.key).String() + + ": " + + stringify(cc.value) + + ")") + if cc.parent != nil { + str.WriteString(" -> ") + } + cc = cc.parent + } + str.WriteRune('}') + + return str.String() +} + +type stringer interface { + String() string +} + +// stringify tries a bit to stringify v, without using fmt, since we don't +// want context depending on the unicode tables. This is only used by +// *valueCtx.String(). +func stringify(v interface{}) string { + switch s := v.(type) { + case stringer: + return s.String() + case string: + return s + } + return "" +} diff --git a/vendor/github.com/aws/smithy-go/middleware/step_build.go b/vendor/github.com/aws/smithy-go/middleware/step_build.go new file mode 100644 index 00000000000..7e1d94caeef --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/step_build.go @@ -0,0 +1,211 @@ +package middleware + +import ( + "context" +) + +// BuildInput provides the input parameters for the BuildMiddleware to consume. +// BuildMiddleware may modify the Request value before forwarding the input +// along to the next BuildHandler. +type BuildInput struct { + Request interface{} +} + +// BuildOutput provides the result returned by the next BuildHandler. +type BuildOutput struct { + Result interface{} +} + +// BuildHandler provides the interface for the next handler the +// BuildMiddleware will call in the middleware chain. +type BuildHandler interface { + HandleBuild(ctx context.Context, in BuildInput) ( + out BuildOutput, metadata Metadata, err error, + ) +} + +// BuildMiddleware provides the interface for middleware specific to the +// serialize step. Delegates to the next BuildHandler for further +// processing. +type BuildMiddleware interface { + // Unique ID for the middleware in theBuildStep. The step does not allow + // duplicate IDs. + ID() string + + // Invokes the middleware behavior which must delegate to the next handler + // for the middleware chain to continue. The method must return a result or + // error to its caller. + HandleBuild(ctx context.Context, in BuildInput, next BuildHandler) ( + out BuildOutput, metadata Metadata, err error, + ) +} + +// BuildMiddlewareFunc returns a BuildMiddleware with the unique ID provided, +// and the func to be invoked. +func BuildMiddlewareFunc(id string, fn func(context.Context, BuildInput, BuildHandler) (BuildOutput, Metadata, error)) BuildMiddleware { + return buildMiddlewareFunc{ + id: id, + fn: fn, + } +} + +type buildMiddlewareFunc struct { + // Unique ID for the middleware. + id string + + // Middleware function to be called. + fn func(context.Context, BuildInput, BuildHandler) (BuildOutput, Metadata, error) +} + +// ID returns the unique ID for the middleware. +func (s buildMiddlewareFunc) ID() string { return s.id } + +// HandleBuild invokes the middleware Fn. +func (s buildMiddlewareFunc) HandleBuild(ctx context.Context, in BuildInput, next BuildHandler) ( + out BuildOutput, metadata Metadata, err error, +) { + return s.fn(ctx, in, next) +} + +var _ BuildMiddleware = (buildMiddlewareFunc{}) + +// BuildStep provides the ordered grouping of BuildMiddleware to be invoked on +// a handler. +type BuildStep struct { + ids *orderedIDs +} + +// NewBuildStep returns a BuildStep ready to have middleware for +// initialization added to it. +func NewBuildStep() *BuildStep { + return &BuildStep{ + ids: newOrderedIDs(), + } +} + +var _ Middleware = (*BuildStep)(nil) + +// ID returns the unique name of the step as a middleware. +func (s *BuildStep) ID() string { + return "Build stack step" +} + +// HandleMiddleware invokes the middleware by decorating the next handler +// provided. Returns the result of the middleware and handler being invoked. +// +// Implements Middleware interface. +func (s *BuildStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) ( + out interface{}, metadata Metadata, err error, +) { + order := s.ids.GetOrder() + + var h BuildHandler = buildWrapHandler{Next: next} + for i := len(order) - 1; i >= 0; i-- { + h = decoratedBuildHandler{ + Next: h, + With: order[i].(BuildMiddleware), + } + } + + sIn := BuildInput{ + Request: in, + } + + res, metadata, err := h.HandleBuild(ctx, sIn) + return res.Result, metadata, err +} + +// Get retrieves the middleware identified by id. If the middleware is not present, returns false. +func (s *BuildStep) Get(id string) (BuildMiddleware, bool) { + get, ok := s.ids.Get(id) + if !ok { + return nil, false + } + return get.(BuildMiddleware), ok +} + +// Add injects the middleware to the relative position of the middleware group. +// Returns an error if the middleware already exists. +func (s *BuildStep) Add(m BuildMiddleware, pos RelativePosition) error { + return s.ids.Add(m, pos) +} + +// Insert injects the middleware relative to an existing middleware id. +// Returns an error if the original middleware does not exist, or the middleware +// being added already exists. +func (s *BuildStep) Insert(m BuildMiddleware, relativeTo string, pos RelativePosition) error { + return s.ids.Insert(m, relativeTo, pos) +} + +// Swap removes the middleware by id, replacing it with the new middleware. +// Returns the middleware removed, or an error if the middleware to be removed +// doesn't exist. +func (s *BuildStep) Swap(id string, m BuildMiddleware) (BuildMiddleware, error) { + removed, err := s.ids.Swap(id, m) + if err != nil { + return nil, err + } + + return removed.(BuildMiddleware), nil +} + +// Remove removes the middleware by id. Returns error if the middleware +// doesn't exist. +func (s *BuildStep) Remove(id string) (BuildMiddleware, error) { + removed, err := s.ids.Remove(id) + if err != nil { + return nil, err + } + + return removed.(BuildMiddleware), nil +} + +// List returns a list of the middleware in the step. +func (s *BuildStep) List() []string { + return s.ids.List() +} + +// Clear removes all middleware in the step. +func (s *BuildStep) Clear() { + s.ids.Clear() +} + +type buildWrapHandler struct { + Next Handler +} + +var _ BuildHandler = (*buildWrapHandler)(nil) + +// Implements BuildHandler, converts types and delegates to underlying +// generic handler. +func (w buildWrapHandler) HandleBuild(ctx context.Context, in BuildInput) ( + out BuildOutput, metadata Metadata, err error, +) { + res, metadata, err := w.Next.Handle(ctx, in.Request) + return BuildOutput{ + Result: res, + }, metadata, err +} + +type decoratedBuildHandler struct { + Next BuildHandler + With BuildMiddleware +} + +var _ BuildHandler = (*decoratedBuildHandler)(nil) + +func (h decoratedBuildHandler) HandleBuild(ctx context.Context, in BuildInput) ( + out BuildOutput, metadata Metadata, err error, +) { + return h.With.HandleBuild(ctx, in, h.Next) +} + +// BuildHandlerFunc provides a wrapper around a function to be used as a build middleware handler. +type BuildHandlerFunc func(context.Context, BuildInput) (BuildOutput, Metadata, error) + +// HandleBuild invokes the wrapped function with the provided arguments. +func (b BuildHandlerFunc) HandleBuild(ctx context.Context, in BuildInput) (BuildOutput, Metadata, error) { + return b(ctx, in) +} + +var _ BuildHandler = BuildHandlerFunc(nil) diff --git a/vendor/github.com/aws/smithy-go/middleware/step_deserialize.go b/vendor/github.com/aws/smithy-go/middleware/step_deserialize.go new file mode 100644 index 00000000000..44860721571 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/step_deserialize.go @@ -0,0 +1,217 @@ +package middleware + +import ( + "context" +) + +// DeserializeInput provides the input parameters for the DeserializeInput to +// consume. DeserializeMiddleware should not modify the Request, and instead +// forward it along to the next DeserializeHandler. +type DeserializeInput struct { + Request interface{} +} + +// DeserializeOutput provides the result returned by the next +// DeserializeHandler. The DeserializeMiddleware should deserialize the +// RawResponse into a Result that can be consumed by middleware higher up in +// the stack. +type DeserializeOutput struct { + RawResponse interface{} + Result interface{} +} + +// DeserializeHandler provides the interface for the next handler the +// DeserializeMiddleware will call in the middleware chain. +type DeserializeHandler interface { + HandleDeserialize(ctx context.Context, in DeserializeInput) ( + out DeserializeOutput, metadata Metadata, err error, + ) +} + +// DeserializeMiddleware provides the interface for middleware specific to the +// serialize step. Delegates to the next DeserializeHandler for further +// processing. +type DeserializeMiddleware interface { + // ID returns a unique ID for the middleware in the DeserializeStep. The step does not + // allow duplicate IDs. + ID() string + + // HandleDeserialize invokes the middleware behavior which must delegate to the next handler + // for the middleware chain to continue. The method must return a result or + // error to its caller. + HandleDeserialize(ctx context.Context, in DeserializeInput, next DeserializeHandler) ( + out DeserializeOutput, metadata Metadata, err error, + ) +} + +// DeserializeMiddlewareFunc returns a DeserializeMiddleware with the unique ID +// provided, and the func to be invoked. +func DeserializeMiddlewareFunc(id string, fn func(context.Context, DeserializeInput, DeserializeHandler) (DeserializeOutput, Metadata, error)) DeserializeMiddleware { + return deserializeMiddlewareFunc{ + id: id, + fn: fn, + } +} + +type deserializeMiddlewareFunc struct { + // Unique ID for the middleware. + id string + + // Middleware function to be called. + fn func(context.Context, DeserializeInput, DeserializeHandler) ( + DeserializeOutput, Metadata, error, + ) +} + +// ID returns the unique ID for the middleware. +func (s deserializeMiddlewareFunc) ID() string { return s.id } + +// HandleDeserialize invokes the middleware Fn. +func (s deserializeMiddlewareFunc) HandleDeserialize(ctx context.Context, in DeserializeInput, next DeserializeHandler) ( + out DeserializeOutput, metadata Metadata, err error, +) { + return s.fn(ctx, in, next) +} + +var _ DeserializeMiddleware = (deserializeMiddlewareFunc{}) + +// DeserializeStep provides the ordered grouping of DeserializeMiddleware to be +// invoked on a handler. +type DeserializeStep struct { + ids *orderedIDs +} + +// NewDeserializeStep returns a DeserializeStep ready to have middleware for +// initialization added to it. +func NewDeserializeStep() *DeserializeStep { + return &DeserializeStep{ + ids: newOrderedIDs(), + } +} + +var _ Middleware = (*DeserializeStep)(nil) + +// ID returns the unique ID of the step as a middleware. +func (s *DeserializeStep) ID() string { + return "Deserialize stack step" +} + +// HandleMiddleware invokes the middleware by decorating the next handler +// provided. Returns the result of the middleware and handler being invoked. +// +// Implements Middleware interface. +func (s *DeserializeStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) ( + out interface{}, metadata Metadata, err error, +) { + order := s.ids.GetOrder() + + var h DeserializeHandler = deserializeWrapHandler{Next: next} + for i := len(order) - 1; i >= 0; i-- { + h = decoratedDeserializeHandler{ + Next: h, + With: order[i].(DeserializeMiddleware), + } + } + + sIn := DeserializeInput{ + Request: in, + } + + res, metadata, err := h.HandleDeserialize(ctx, sIn) + return res.Result, metadata, err +} + +// Get retrieves the middleware identified by id. If the middleware is not present, returns false. +func (s *DeserializeStep) Get(id string) (DeserializeMiddleware, bool) { + get, ok := s.ids.Get(id) + if !ok { + return nil, false + } + return get.(DeserializeMiddleware), ok +} + +// Add injects the middleware to the relative position of the middleware group. +// Returns an error if the middleware already exists. +func (s *DeserializeStep) Add(m DeserializeMiddleware, pos RelativePosition) error { + return s.ids.Add(m, pos) +} + +// Insert injects the middleware relative to an existing middleware ID. +// Returns error if the original middleware does not exist, or the middleware +// being added already exists. +func (s *DeserializeStep) Insert(m DeserializeMiddleware, relativeTo string, pos RelativePosition) error { + return s.ids.Insert(m, relativeTo, pos) +} + +// Swap removes the middleware by id, replacing it with the new middleware. +// Returns the middleware removed, or error if the middleware to be removed +// doesn't exist. +func (s *DeserializeStep) Swap(id string, m DeserializeMiddleware) (DeserializeMiddleware, error) { + removed, err := s.ids.Swap(id, m) + if err != nil { + return nil, err + } + + return removed.(DeserializeMiddleware), nil +} + +// Remove removes the middleware by id. Returns error if the middleware +// doesn't exist. +func (s *DeserializeStep) Remove(id string) (DeserializeMiddleware, error) { + removed, err := s.ids.Remove(id) + if err != nil { + return nil, err + } + + return removed.(DeserializeMiddleware), nil +} + +// List returns a list of the middleware in the step. +func (s *DeserializeStep) List() []string { + return s.ids.List() +} + +// Clear removes all middleware in the step. +func (s *DeserializeStep) Clear() { + s.ids.Clear() +} + +type deserializeWrapHandler struct { + Next Handler +} + +var _ DeserializeHandler = (*deserializeWrapHandler)(nil) + +// HandleDeserialize implements DeserializeHandler, converts types and delegates to underlying +// generic handler. +func (w deserializeWrapHandler) HandleDeserialize(ctx context.Context, in DeserializeInput) ( + out DeserializeOutput, metadata Metadata, err error, +) { + resp, metadata, err := w.Next.Handle(ctx, in.Request) + return DeserializeOutput{ + RawResponse: resp, + }, metadata, err +} + +type decoratedDeserializeHandler struct { + Next DeserializeHandler + With DeserializeMiddleware +} + +var _ DeserializeHandler = (*decoratedDeserializeHandler)(nil) + +func (h decoratedDeserializeHandler) HandleDeserialize(ctx context.Context, in DeserializeInput) ( + out DeserializeOutput, metadata Metadata, err error, +) { + return h.With.HandleDeserialize(ctx, in, h.Next) +} + +// DeserializeHandlerFunc provides a wrapper around a function to be used as a deserialize middleware handler. +type DeserializeHandlerFunc func(context.Context, DeserializeInput) (DeserializeOutput, Metadata, error) + +// HandleDeserialize invokes the wrapped function with the given arguments. +func (d DeserializeHandlerFunc) HandleDeserialize(ctx context.Context, in DeserializeInput) (DeserializeOutput, Metadata, error) { + return d(ctx, in) +} + +var _ DeserializeHandler = DeserializeHandlerFunc(nil) diff --git a/vendor/github.com/aws/smithy-go/middleware/step_finalize.go b/vendor/github.com/aws/smithy-go/middleware/step_finalize.go new file mode 100644 index 00000000000..065e3885de9 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/step_finalize.go @@ -0,0 +1,211 @@ +package middleware + +import "context" + +// FinalizeInput provides the input parameters for the FinalizeMiddleware to +// consume. FinalizeMiddleware may modify the Request value before forwarding +// the FinalizeInput along to the next next FinalizeHandler. +type FinalizeInput struct { + Request interface{} +} + +// FinalizeOutput provides the result returned by the next FinalizeHandler. +type FinalizeOutput struct { + Result interface{} +} + +// FinalizeHandler provides the interface for the next handler the +// FinalizeMiddleware will call in the middleware chain. +type FinalizeHandler interface { + HandleFinalize(ctx context.Context, in FinalizeInput) ( + out FinalizeOutput, metadata Metadata, err error, + ) +} + +// FinalizeMiddleware provides the interface for middleware specific to the +// serialize step. Delegates to the next FinalizeHandler for further +// processing. +type FinalizeMiddleware interface { + // ID returns a unique ID for the middleware in the FinalizeStep. The step does not + // allow duplicate IDs. + ID() string + + // HandleFinalize invokes the middleware behavior which must delegate to the next handler + // for the middleware chain to continue. The method must return a result or + // error to its caller. + HandleFinalize(ctx context.Context, in FinalizeInput, next FinalizeHandler) ( + out FinalizeOutput, metadata Metadata, err error, + ) +} + +// FinalizeMiddlewareFunc returns a FinalizeMiddleware with the unique ID +// provided, and the func to be invoked. +func FinalizeMiddlewareFunc(id string, fn func(context.Context, FinalizeInput, FinalizeHandler) (FinalizeOutput, Metadata, error)) FinalizeMiddleware { + return finalizeMiddlewareFunc{ + id: id, + fn: fn, + } +} + +type finalizeMiddlewareFunc struct { + // Unique ID for the middleware. + id string + + // Middleware function to be called. + fn func(context.Context, FinalizeInput, FinalizeHandler) ( + FinalizeOutput, Metadata, error, + ) +} + +// ID returns the unique ID for the middleware. +func (s finalizeMiddlewareFunc) ID() string { return s.id } + +// HandleFinalize invokes the middleware Fn. +func (s finalizeMiddlewareFunc) HandleFinalize(ctx context.Context, in FinalizeInput, next FinalizeHandler) ( + out FinalizeOutput, metadata Metadata, err error, +) { + return s.fn(ctx, in, next) +} + +var _ FinalizeMiddleware = (finalizeMiddlewareFunc{}) + +// FinalizeStep provides the ordered grouping of FinalizeMiddleware to be +// invoked on a handler. +type FinalizeStep struct { + ids *orderedIDs +} + +// NewFinalizeStep returns a FinalizeStep ready to have middleware for +// initialization added to it. +func NewFinalizeStep() *FinalizeStep { + return &FinalizeStep{ + ids: newOrderedIDs(), + } +} + +var _ Middleware = (*FinalizeStep)(nil) + +// ID returns the unique id of the step as a middleware. +func (s *FinalizeStep) ID() string { + return "Finalize stack step" +} + +// HandleMiddleware invokes the middleware by decorating the next handler +// provided. Returns the result of the middleware and handler being invoked. +// +// Implements Middleware interface. +func (s *FinalizeStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) ( + out interface{}, metadata Metadata, err error, +) { + order := s.ids.GetOrder() + + var h FinalizeHandler = finalizeWrapHandler{Next: next} + for i := len(order) - 1; i >= 0; i-- { + h = decoratedFinalizeHandler{ + Next: h, + With: order[i].(FinalizeMiddleware), + } + } + + sIn := FinalizeInput{ + Request: in, + } + + res, metadata, err := h.HandleFinalize(ctx, sIn) + return res.Result, metadata, err +} + +// Get retrieves the middleware identified by id. If the middleware is not present, returns false. +func (s *FinalizeStep) Get(id string) (FinalizeMiddleware, bool) { + get, ok := s.ids.Get(id) + if !ok { + return nil, false + } + return get.(FinalizeMiddleware), ok +} + +// Add injects the middleware to the relative position of the middleware group. +// Returns an error if the middleware already exists. +func (s *FinalizeStep) Add(m FinalizeMiddleware, pos RelativePosition) error { + return s.ids.Add(m, pos) +} + +// Insert injects the middleware relative to an existing middleware ID. +// Returns error if the original middleware does not exist, or the middleware +// being added already exists. +func (s *FinalizeStep) Insert(m FinalizeMiddleware, relativeTo string, pos RelativePosition) error { + return s.ids.Insert(m, relativeTo, pos) +} + +// Swap removes the middleware by id, replacing it with the new middleware. +// Returns the middleware removed, or error if the middleware to be removed +// doesn't exist. +func (s *FinalizeStep) Swap(id string, m FinalizeMiddleware) (FinalizeMiddleware, error) { + removed, err := s.ids.Swap(id, m) + if err != nil { + return nil, err + } + + return removed.(FinalizeMiddleware), nil +} + +// Remove removes the middleware by id. Returns error if the middleware +// doesn't exist. +func (s *FinalizeStep) Remove(id string) (FinalizeMiddleware, error) { + removed, err := s.ids.Remove(id) + if err != nil { + return nil, err + } + + return removed.(FinalizeMiddleware), nil +} + +// List returns a list of the middleware in the step. +func (s *FinalizeStep) List() []string { + return s.ids.List() +} + +// Clear removes all middleware in the step. +func (s *FinalizeStep) Clear() { + s.ids.Clear() +} + +type finalizeWrapHandler struct { + Next Handler +} + +var _ FinalizeHandler = (*finalizeWrapHandler)(nil) + +// HandleFinalize implements FinalizeHandler, converts types and delegates to underlying +// generic handler. +func (w finalizeWrapHandler) HandleFinalize(ctx context.Context, in FinalizeInput) ( + out FinalizeOutput, metadata Metadata, err error, +) { + res, metadata, err := w.Next.Handle(ctx, in.Request) + return FinalizeOutput{ + Result: res, + }, metadata, err +} + +type decoratedFinalizeHandler struct { + Next FinalizeHandler + With FinalizeMiddleware +} + +var _ FinalizeHandler = (*decoratedFinalizeHandler)(nil) + +func (h decoratedFinalizeHandler) HandleFinalize(ctx context.Context, in FinalizeInput) ( + out FinalizeOutput, metadata Metadata, err error, +) { + return h.With.HandleFinalize(ctx, in, h.Next) +} + +// FinalizeHandlerFunc provides a wrapper around a function to be used as a finalize middleware handler. +type FinalizeHandlerFunc func(context.Context, FinalizeInput) (FinalizeOutput, Metadata, error) + +// HandleFinalize invokes the wrapped function with the given arguments. +func (f FinalizeHandlerFunc) HandleFinalize(ctx context.Context, in FinalizeInput) (FinalizeOutput, Metadata, error) { + return f(ctx, in) +} + +var _ FinalizeHandler = FinalizeHandlerFunc(nil) diff --git a/vendor/github.com/aws/smithy-go/middleware/step_initialize.go b/vendor/github.com/aws/smithy-go/middleware/step_initialize.go new file mode 100644 index 00000000000..fe359144d24 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/step_initialize.go @@ -0,0 +1,211 @@ +package middleware + +import "context" + +// InitializeInput wraps the input parameters for the InitializeMiddlewares to +// consume. InitializeMiddleware may modify the parameter value before +// forwarding it along to the next InitializeHandler. +type InitializeInput struct { + Parameters interface{} +} + +// InitializeOutput provides the result returned by the next InitializeHandler. +type InitializeOutput struct { + Result interface{} +} + +// InitializeHandler provides the interface for the next handler the +// InitializeMiddleware will call in the middleware chain. +type InitializeHandler interface { + HandleInitialize(ctx context.Context, in InitializeInput) ( + out InitializeOutput, metadata Metadata, err error, + ) +} + +// InitializeMiddleware provides the interface for middleware specific to the +// initialize step. Delegates to the next InitializeHandler for further +// processing. +type InitializeMiddleware interface { + // ID returns a unique ID for the middleware in the InitializeStep. The step does not + // allow duplicate IDs. + ID() string + + // HandleInitialize invokes the middleware behavior which must delegate to the next handler + // for the middleware chain to continue. The method must return a result or + // error to its caller. + HandleInitialize(ctx context.Context, in InitializeInput, next InitializeHandler) ( + out InitializeOutput, metadata Metadata, err error, + ) +} + +// InitializeMiddlewareFunc returns a InitializeMiddleware with the unique ID provided, +// and the func to be invoked. +func InitializeMiddlewareFunc(id string, fn func(context.Context, InitializeInput, InitializeHandler) (InitializeOutput, Metadata, error)) InitializeMiddleware { + return initializeMiddlewareFunc{ + id: id, + fn: fn, + } +} + +type initializeMiddlewareFunc struct { + // Unique ID for the middleware. + id string + + // Middleware function to be called. + fn func(context.Context, InitializeInput, InitializeHandler) ( + InitializeOutput, Metadata, error, + ) +} + +// ID returns the unique ID for the middleware. +func (s initializeMiddlewareFunc) ID() string { return s.id } + +// HandleInitialize invokes the middleware Fn. +func (s initializeMiddlewareFunc) HandleInitialize(ctx context.Context, in InitializeInput, next InitializeHandler) ( + out InitializeOutput, metadata Metadata, err error, +) { + return s.fn(ctx, in, next) +} + +var _ InitializeMiddleware = (initializeMiddlewareFunc{}) + +// InitializeStep provides the ordered grouping of InitializeMiddleware to be +// invoked on a handler. +type InitializeStep struct { + ids *orderedIDs +} + +// NewInitializeStep returns an InitializeStep ready to have middleware for +// initialization added to it. +func NewInitializeStep() *InitializeStep { + return &InitializeStep{ + ids: newOrderedIDs(), + } +} + +var _ Middleware = (*InitializeStep)(nil) + +// ID returns the unique ID of the step as a middleware. +func (s *InitializeStep) ID() string { + return "Initialize stack step" +} + +// HandleMiddleware invokes the middleware by decorating the next handler +// provided. Returns the result of the middleware and handler being invoked. +// +// Implements Middleware interface. +func (s *InitializeStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) ( + out interface{}, metadata Metadata, err error, +) { + order := s.ids.GetOrder() + + var h InitializeHandler = initializeWrapHandler{Next: next} + for i := len(order) - 1; i >= 0; i-- { + h = decoratedInitializeHandler{ + Next: h, + With: order[i].(InitializeMiddleware), + } + } + + sIn := InitializeInput{ + Parameters: in, + } + + res, metadata, err := h.HandleInitialize(ctx, sIn) + return res.Result, metadata, err +} + +// Get retrieves the middleware identified by id. If the middleware is not present, returns false. +func (s *InitializeStep) Get(id string) (InitializeMiddleware, bool) { + get, ok := s.ids.Get(id) + if !ok { + return nil, false + } + return get.(InitializeMiddleware), ok +} + +// Add injects the middleware to the relative position of the middleware group. +// Returns an error if the middleware already exists. +func (s *InitializeStep) Add(m InitializeMiddleware, pos RelativePosition) error { + return s.ids.Add(m, pos) +} + +// Insert injects the middleware relative to an existing middleware ID. +// Returns error if the original middleware does not exist, or the middleware +// being added already exists. +func (s *InitializeStep) Insert(m InitializeMiddleware, relativeTo string, pos RelativePosition) error { + return s.ids.Insert(m, relativeTo, pos) +} + +// Swap removes the middleware by id, replacing it with the new middleware. +// Returns the middleware removed, or error if the middleware to be removed +// doesn't exist. +func (s *InitializeStep) Swap(id string, m InitializeMiddleware) (InitializeMiddleware, error) { + removed, err := s.ids.Swap(id, m) + if err != nil { + return nil, err + } + + return removed.(InitializeMiddleware), nil +} + +// Remove removes the middleware by id. Returns error if the middleware +// doesn't exist. +func (s *InitializeStep) Remove(id string) (InitializeMiddleware, error) { + removed, err := s.ids.Remove(id) + if err != nil { + return nil, err + } + + return removed.(InitializeMiddleware), nil +} + +// List returns a list of the middleware in the step. +func (s *InitializeStep) List() []string { + return s.ids.List() +} + +// Clear removes all middleware in the step. +func (s *InitializeStep) Clear() { + s.ids.Clear() +} + +type initializeWrapHandler struct { + Next Handler +} + +var _ InitializeHandler = (*initializeWrapHandler)(nil) + +// HandleInitialize implements InitializeHandler, converts types and delegates to underlying +// generic handler. +func (w initializeWrapHandler) HandleInitialize(ctx context.Context, in InitializeInput) ( + out InitializeOutput, metadata Metadata, err error, +) { + res, metadata, err := w.Next.Handle(ctx, in.Parameters) + return InitializeOutput{ + Result: res, + }, metadata, err +} + +type decoratedInitializeHandler struct { + Next InitializeHandler + With InitializeMiddleware +} + +var _ InitializeHandler = (*decoratedInitializeHandler)(nil) + +func (h decoratedInitializeHandler) HandleInitialize(ctx context.Context, in InitializeInput) ( + out InitializeOutput, metadata Metadata, err error, +) { + return h.With.HandleInitialize(ctx, in, h.Next) +} + +// InitializeHandlerFunc provides a wrapper around a function to be used as an initialize middleware handler. +type InitializeHandlerFunc func(context.Context, InitializeInput) (InitializeOutput, Metadata, error) + +// HandleInitialize calls the wrapped function with the provided arguments. +func (i InitializeHandlerFunc) HandleInitialize(ctx context.Context, in InitializeInput) (InitializeOutput, Metadata, error) { + return i(ctx, in) +} + +var _ InitializeHandler = InitializeHandlerFunc(nil) diff --git a/vendor/github.com/aws/smithy-go/middleware/step_serialize.go b/vendor/github.com/aws/smithy-go/middleware/step_serialize.go new file mode 100644 index 00000000000..114bafcedea --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/step_serialize.go @@ -0,0 +1,219 @@ +package middleware + +import "context" + +// SerializeInput provides the input parameters for the SerializeMiddleware to +// consume. SerializeMiddleware may modify the Request value before forwarding +// SerializeInput along to the next SerializeHandler. The Parameters member +// should not be modified by SerializeMiddleware, InitializeMiddleware should +// be responsible for modifying the provided Parameter value. +type SerializeInput struct { + Parameters interface{} + Request interface{} +} + +// SerializeOutput provides the result returned by the next SerializeHandler. +type SerializeOutput struct { + Result interface{} +} + +// SerializeHandler provides the interface for the next handler the +// SerializeMiddleware will call in the middleware chain. +type SerializeHandler interface { + HandleSerialize(ctx context.Context, in SerializeInput) ( + out SerializeOutput, metadata Metadata, err error, + ) +} + +// SerializeMiddleware provides the interface for middleware specific to the +// serialize step. Delegates to the next SerializeHandler for further +// processing. +type SerializeMiddleware interface { + // ID returns a unique ID for the middleware in the SerializeStep. The step does not + // allow duplicate IDs. + ID() string + + // HandleSerialize invokes the middleware behavior which must delegate to the next handler + // for the middleware chain to continue. The method must return a result or + // error to its caller. + HandleSerialize(ctx context.Context, in SerializeInput, next SerializeHandler) ( + out SerializeOutput, metadata Metadata, err error, + ) +} + +// SerializeMiddlewareFunc returns a SerializeMiddleware with the unique ID +// provided, and the func to be invoked. +func SerializeMiddlewareFunc(id string, fn func(context.Context, SerializeInput, SerializeHandler) (SerializeOutput, Metadata, error)) SerializeMiddleware { + return serializeMiddlewareFunc{ + id: id, + fn: fn, + } +} + +type serializeMiddlewareFunc struct { + // Unique ID for the middleware. + id string + + // Middleware function to be called. + fn func(context.Context, SerializeInput, SerializeHandler) ( + SerializeOutput, Metadata, error, + ) +} + +// ID returns the unique ID for the middleware. +func (s serializeMiddlewareFunc) ID() string { return s.id } + +// HandleSerialize invokes the middleware Fn. +func (s serializeMiddlewareFunc) HandleSerialize(ctx context.Context, in SerializeInput, next SerializeHandler) ( + out SerializeOutput, metadata Metadata, err error, +) { + return s.fn(ctx, in, next) +} + +var _ SerializeMiddleware = (serializeMiddlewareFunc{}) + +// SerializeStep provides the ordered grouping of SerializeMiddleware to be +// invoked on a handler. +type SerializeStep struct { + newRequest func() interface{} + ids *orderedIDs +} + +// NewSerializeStep returns a SerializeStep ready to have middleware for +// initialization added to it. The newRequest func parameter is used to +// initialize the transport specific request for the stack SerializeStep to +// serialize the input parameters into. +func NewSerializeStep(newRequest func() interface{}) *SerializeStep { + return &SerializeStep{ + ids: newOrderedIDs(), + newRequest: newRequest, + } +} + +var _ Middleware = (*SerializeStep)(nil) + +// ID returns the unique ID of the step as a middleware. +func (s *SerializeStep) ID() string { + return "Serialize stack step" +} + +// HandleMiddleware invokes the middleware by decorating the next handler +// provided. Returns the result of the middleware and handler being invoked. +// +// Implements Middleware interface. +func (s *SerializeStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) ( + out interface{}, metadata Metadata, err error, +) { + order := s.ids.GetOrder() + + var h SerializeHandler = serializeWrapHandler{Next: next} + for i := len(order) - 1; i >= 0; i-- { + h = decoratedSerializeHandler{ + Next: h, + With: order[i].(SerializeMiddleware), + } + } + + sIn := SerializeInput{ + Parameters: in, + Request: s.newRequest(), + } + + res, metadata, err := h.HandleSerialize(ctx, sIn) + return res.Result, metadata, err +} + +// Get retrieves the middleware identified by id. If the middleware is not present, returns false. +func (s *SerializeStep) Get(id string) (SerializeMiddleware, bool) { + get, ok := s.ids.Get(id) + if !ok { + return nil, false + } + return get.(SerializeMiddleware), ok +} + +// Add injects the middleware to the relative position of the middleware group. +// Returns an error if the middleware already exists. +func (s *SerializeStep) Add(m SerializeMiddleware, pos RelativePosition) error { + return s.ids.Add(m, pos) +} + +// Insert injects the middleware relative to an existing middleware ID. +// Returns error if the original middleware does not exist, or the middleware +// being added already exists. +func (s *SerializeStep) Insert(m SerializeMiddleware, relativeTo string, pos RelativePosition) error { + return s.ids.Insert(m, relativeTo, pos) +} + +// Swap removes the middleware by id, replacing it with the new middleware. +// Returns the middleware removed, or error if the middleware to be removed +// doesn't exist. +func (s *SerializeStep) Swap(id string, m SerializeMiddleware) (SerializeMiddleware, error) { + removed, err := s.ids.Swap(id, m) + if err != nil { + return nil, err + } + + return removed.(SerializeMiddleware), nil +} + +// Remove removes the middleware by id. Returns error if the middleware +// doesn't exist. +func (s *SerializeStep) Remove(id string) (SerializeMiddleware, error) { + removed, err := s.ids.Remove(id) + if err != nil { + return nil, err + } + + return removed.(SerializeMiddleware), nil +} + +// List returns a list of the middleware in the step. +func (s *SerializeStep) List() []string { + return s.ids.List() +} + +// Clear removes all middleware in the step. +func (s *SerializeStep) Clear() { + s.ids.Clear() +} + +type serializeWrapHandler struct { + Next Handler +} + +var _ SerializeHandler = (*serializeWrapHandler)(nil) + +// Implements SerializeHandler, converts types and delegates to underlying +// generic handler. +func (w serializeWrapHandler) HandleSerialize(ctx context.Context, in SerializeInput) ( + out SerializeOutput, metadata Metadata, err error, +) { + res, metadata, err := w.Next.Handle(ctx, in.Request) + return SerializeOutput{ + Result: res, + }, metadata, err +} + +type decoratedSerializeHandler struct { + Next SerializeHandler + With SerializeMiddleware +} + +var _ SerializeHandler = (*decoratedSerializeHandler)(nil) + +func (h decoratedSerializeHandler) HandleSerialize(ctx context.Context, in SerializeInput) ( + out SerializeOutput, metadata Metadata, err error, +) { + return h.With.HandleSerialize(ctx, in, h.Next) +} + +// SerializeHandlerFunc provides a wrapper around a function to be used as a serialize middleware handler. +type SerializeHandlerFunc func(context.Context, SerializeInput) (SerializeOutput, Metadata, error) + +// HandleSerialize calls the wrapped function with the provided arguments. +func (s SerializeHandlerFunc) HandleSerialize(ctx context.Context, in SerializeInput) (SerializeOutput, Metadata, error) { + return s(ctx, in) +} + +var _ SerializeHandler = SerializeHandlerFunc(nil) diff --git a/vendor/github.com/aws/smithy-go/modman.toml b/vendor/github.com/aws/smithy-go/modman.toml new file mode 100644 index 00000000000..20295cdd2aa --- /dev/null +++ b/vendor/github.com/aws/smithy-go/modman.toml @@ -0,0 +1,11 @@ +[dependencies] + "github.com/google/go-cmp" = "v0.5.8" + "github.com/jmespath/go-jmespath" = "v0.4.0" + +[modules] + + [modules.codegen] + no_tag = true + + [modules."codegen/smithy-go-codegen/build/test-generated/go/internal/testmodule"] + no_tag = true diff --git a/vendor/github.com/aws/smithy-go/properties.go b/vendor/github.com/aws/smithy-go/properties.go new file mode 100644 index 00000000000..17d659c539e --- /dev/null +++ b/vendor/github.com/aws/smithy-go/properties.go @@ -0,0 +1,52 @@ +package smithy + +// PropertiesReader provides an interface for reading metadata from the +// underlying metadata container. +type PropertiesReader interface { + Get(key interface{}) interface{} +} + +// Properties provides storing and reading metadata values. Keys may be any +// comparable value type. Get and set will panic if key is not a comparable +// value type. +// +// Properties uses lazy initialization, and Set method must be called as an +// addressable value, or pointer. Not doing so may cause key/value pair to not +// be set. +type Properties struct { + values map[interface{}]interface{} +} + +// Get attempts to retrieve the value the key points to. Returns nil if the +// key was not found. +// +// Panics if key type is not comparable. +func (m *Properties) Get(key interface{}) interface{} { + return m.values[key] +} + +// Set stores the value pointed to by the key. If a value already exists at +// that key it will be replaced with the new value. +// +// Set method must be called as an addressable value, or pointer. If Set is not +// called as an addressable value or pointer, the key value pair being set may +// be lost. +// +// Panics if the key type is not comparable. +func (m *Properties) Set(key, value interface{}) { + if m.values == nil { + m.values = map[interface{}]interface{}{} + } + m.values[key] = value +} + +// Has returns whether the key exists in the metadata. +// +// Panics if the key type is not comparable. +func (m *Properties) Has(key interface{}) bool { + if m.values == nil { + return false + } + _, ok := m.values[key] + return ok +} diff --git a/vendor/github.com/aws/smithy-go/ptr/doc.go b/vendor/github.com/aws/smithy-go/ptr/doc.go new file mode 100644 index 00000000000..bc1f6996161 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/ptr/doc.go @@ -0,0 +1,5 @@ +// Package ptr provides utilities for converting scalar literal type values to and from pointers inline. +package ptr + +//go:generate go run -tags codegen generate.go +//go:generate gofmt -w -s . diff --git a/vendor/github.com/aws/smithy-go/ptr/from_ptr.go b/vendor/github.com/aws/smithy-go/ptr/from_ptr.go new file mode 100644 index 00000000000..a2845bb2c80 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/ptr/from_ptr.go @@ -0,0 +1,601 @@ +// Code generated by smithy-go/ptr/generate.go DO NOT EDIT. +package ptr + +import ( + "time" +) + +// ToBool returns bool value dereferenced if the passed +// in pointer was not nil. Returns a bool zero value if the +// pointer was nil. +func ToBool(p *bool) (v bool) { + if p == nil { + return v + } + + return *p +} + +// ToBoolSlice returns a slice of bool values, that are +// dereferenced if the passed in pointer was not nil. Returns a bool +// zero value if the pointer was nil. +func ToBoolSlice(vs []*bool) []bool { + ps := make([]bool, len(vs)) + for i, v := range vs { + ps[i] = ToBool(v) + } + + return ps +} + +// ToBoolMap returns a map of bool values, that are +// dereferenced if the passed in pointer was not nil. The bool +// zero value is used if the pointer was nil. +func ToBoolMap(vs map[string]*bool) map[string]bool { + ps := make(map[string]bool, len(vs)) + for k, v := range vs { + ps[k] = ToBool(v) + } + + return ps +} + +// ToByte returns byte value dereferenced if the passed +// in pointer was not nil. Returns a byte zero value if the +// pointer was nil. +func ToByte(p *byte) (v byte) { + if p == nil { + return v + } + + return *p +} + +// ToByteSlice returns a slice of byte values, that are +// dereferenced if the passed in pointer was not nil. Returns a byte +// zero value if the pointer was nil. +func ToByteSlice(vs []*byte) []byte { + ps := make([]byte, len(vs)) + for i, v := range vs { + ps[i] = ToByte(v) + } + + return ps +} + +// ToByteMap returns a map of byte values, that are +// dereferenced if the passed in pointer was not nil. The byte +// zero value is used if the pointer was nil. +func ToByteMap(vs map[string]*byte) map[string]byte { + ps := make(map[string]byte, len(vs)) + for k, v := range vs { + ps[k] = ToByte(v) + } + + return ps +} + +// ToString returns string value dereferenced if the passed +// in pointer was not nil. Returns a string zero value if the +// pointer was nil. +func ToString(p *string) (v string) { + if p == nil { + return v + } + + return *p +} + +// ToStringSlice returns a slice of string values, that are +// dereferenced if the passed in pointer was not nil. Returns a string +// zero value if the pointer was nil. +func ToStringSlice(vs []*string) []string { + ps := make([]string, len(vs)) + for i, v := range vs { + ps[i] = ToString(v) + } + + return ps +} + +// ToStringMap returns a map of string values, that are +// dereferenced if the passed in pointer was not nil. The string +// zero value is used if the pointer was nil. +func ToStringMap(vs map[string]*string) map[string]string { + ps := make(map[string]string, len(vs)) + for k, v := range vs { + ps[k] = ToString(v) + } + + return ps +} + +// ToInt returns int value dereferenced if the passed +// in pointer was not nil. Returns a int zero value if the +// pointer was nil. +func ToInt(p *int) (v int) { + if p == nil { + return v + } + + return *p +} + +// ToIntSlice returns a slice of int values, that are +// dereferenced if the passed in pointer was not nil. Returns a int +// zero value if the pointer was nil. +func ToIntSlice(vs []*int) []int { + ps := make([]int, len(vs)) + for i, v := range vs { + ps[i] = ToInt(v) + } + + return ps +} + +// ToIntMap returns a map of int values, that are +// dereferenced if the passed in pointer was not nil. The int +// zero value is used if the pointer was nil. +func ToIntMap(vs map[string]*int) map[string]int { + ps := make(map[string]int, len(vs)) + for k, v := range vs { + ps[k] = ToInt(v) + } + + return ps +} + +// ToInt8 returns int8 value dereferenced if the passed +// in pointer was not nil. Returns a int8 zero value if the +// pointer was nil. +func ToInt8(p *int8) (v int8) { + if p == nil { + return v + } + + return *p +} + +// ToInt8Slice returns a slice of int8 values, that are +// dereferenced if the passed in pointer was not nil. Returns a int8 +// zero value if the pointer was nil. +func ToInt8Slice(vs []*int8) []int8 { + ps := make([]int8, len(vs)) + for i, v := range vs { + ps[i] = ToInt8(v) + } + + return ps +} + +// ToInt8Map returns a map of int8 values, that are +// dereferenced if the passed in pointer was not nil. The int8 +// zero value is used if the pointer was nil. +func ToInt8Map(vs map[string]*int8) map[string]int8 { + ps := make(map[string]int8, len(vs)) + for k, v := range vs { + ps[k] = ToInt8(v) + } + + return ps +} + +// ToInt16 returns int16 value dereferenced if the passed +// in pointer was not nil. Returns a int16 zero value if the +// pointer was nil. +func ToInt16(p *int16) (v int16) { + if p == nil { + return v + } + + return *p +} + +// ToInt16Slice returns a slice of int16 values, that are +// dereferenced if the passed in pointer was not nil. Returns a int16 +// zero value if the pointer was nil. +func ToInt16Slice(vs []*int16) []int16 { + ps := make([]int16, len(vs)) + for i, v := range vs { + ps[i] = ToInt16(v) + } + + return ps +} + +// ToInt16Map returns a map of int16 values, that are +// dereferenced if the passed in pointer was not nil. The int16 +// zero value is used if the pointer was nil. +func ToInt16Map(vs map[string]*int16) map[string]int16 { + ps := make(map[string]int16, len(vs)) + for k, v := range vs { + ps[k] = ToInt16(v) + } + + return ps +} + +// ToInt32 returns int32 value dereferenced if the passed +// in pointer was not nil. Returns a int32 zero value if the +// pointer was nil. +func ToInt32(p *int32) (v int32) { + if p == nil { + return v + } + + return *p +} + +// ToInt32Slice returns a slice of int32 values, that are +// dereferenced if the passed in pointer was not nil. Returns a int32 +// zero value if the pointer was nil. +func ToInt32Slice(vs []*int32) []int32 { + ps := make([]int32, len(vs)) + for i, v := range vs { + ps[i] = ToInt32(v) + } + + return ps +} + +// ToInt32Map returns a map of int32 values, that are +// dereferenced if the passed in pointer was not nil. The int32 +// zero value is used if the pointer was nil. +func ToInt32Map(vs map[string]*int32) map[string]int32 { + ps := make(map[string]int32, len(vs)) + for k, v := range vs { + ps[k] = ToInt32(v) + } + + return ps +} + +// ToInt64 returns int64 value dereferenced if the passed +// in pointer was not nil. Returns a int64 zero value if the +// pointer was nil. +func ToInt64(p *int64) (v int64) { + if p == nil { + return v + } + + return *p +} + +// ToInt64Slice returns a slice of int64 values, that are +// dereferenced if the passed in pointer was not nil. Returns a int64 +// zero value if the pointer was nil. +func ToInt64Slice(vs []*int64) []int64 { + ps := make([]int64, len(vs)) + for i, v := range vs { + ps[i] = ToInt64(v) + } + + return ps +} + +// ToInt64Map returns a map of int64 values, that are +// dereferenced if the passed in pointer was not nil. The int64 +// zero value is used if the pointer was nil. +func ToInt64Map(vs map[string]*int64) map[string]int64 { + ps := make(map[string]int64, len(vs)) + for k, v := range vs { + ps[k] = ToInt64(v) + } + + return ps +} + +// ToUint returns uint value dereferenced if the passed +// in pointer was not nil. Returns a uint zero value if the +// pointer was nil. +func ToUint(p *uint) (v uint) { + if p == nil { + return v + } + + return *p +} + +// ToUintSlice returns a slice of uint values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint +// zero value if the pointer was nil. +func ToUintSlice(vs []*uint) []uint { + ps := make([]uint, len(vs)) + for i, v := range vs { + ps[i] = ToUint(v) + } + + return ps +} + +// ToUintMap returns a map of uint values, that are +// dereferenced if the passed in pointer was not nil. The uint +// zero value is used if the pointer was nil. +func ToUintMap(vs map[string]*uint) map[string]uint { + ps := make(map[string]uint, len(vs)) + for k, v := range vs { + ps[k] = ToUint(v) + } + + return ps +} + +// ToUint8 returns uint8 value dereferenced if the passed +// in pointer was not nil. Returns a uint8 zero value if the +// pointer was nil. +func ToUint8(p *uint8) (v uint8) { + if p == nil { + return v + } + + return *p +} + +// ToUint8Slice returns a slice of uint8 values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint8 +// zero value if the pointer was nil. +func ToUint8Slice(vs []*uint8) []uint8 { + ps := make([]uint8, len(vs)) + for i, v := range vs { + ps[i] = ToUint8(v) + } + + return ps +} + +// ToUint8Map returns a map of uint8 values, that are +// dereferenced if the passed in pointer was not nil. The uint8 +// zero value is used if the pointer was nil. +func ToUint8Map(vs map[string]*uint8) map[string]uint8 { + ps := make(map[string]uint8, len(vs)) + for k, v := range vs { + ps[k] = ToUint8(v) + } + + return ps +} + +// ToUint16 returns uint16 value dereferenced if the passed +// in pointer was not nil. Returns a uint16 zero value if the +// pointer was nil. +func ToUint16(p *uint16) (v uint16) { + if p == nil { + return v + } + + return *p +} + +// ToUint16Slice returns a slice of uint16 values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint16 +// zero value if the pointer was nil. +func ToUint16Slice(vs []*uint16) []uint16 { + ps := make([]uint16, len(vs)) + for i, v := range vs { + ps[i] = ToUint16(v) + } + + return ps +} + +// ToUint16Map returns a map of uint16 values, that are +// dereferenced if the passed in pointer was not nil. The uint16 +// zero value is used if the pointer was nil. +func ToUint16Map(vs map[string]*uint16) map[string]uint16 { + ps := make(map[string]uint16, len(vs)) + for k, v := range vs { + ps[k] = ToUint16(v) + } + + return ps +} + +// ToUint32 returns uint32 value dereferenced if the passed +// in pointer was not nil. Returns a uint32 zero value if the +// pointer was nil. +func ToUint32(p *uint32) (v uint32) { + if p == nil { + return v + } + + return *p +} + +// ToUint32Slice returns a slice of uint32 values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint32 +// zero value if the pointer was nil. +func ToUint32Slice(vs []*uint32) []uint32 { + ps := make([]uint32, len(vs)) + for i, v := range vs { + ps[i] = ToUint32(v) + } + + return ps +} + +// ToUint32Map returns a map of uint32 values, that are +// dereferenced if the passed in pointer was not nil. The uint32 +// zero value is used if the pointer was nil. +func ToUint32Map(vs map[string]*uint32) map[string]uint32 { + ps := make(map[string]uint32, len(vs)) + for k, v := range vs { + ps[k] = ToUint32(v) + } + + return ps +} + +// ToUint64 returns uint64 value dereferenced if the passed +// in pointer was not nil. Returns a uint64 zero value if the +// pointer was nil. +func ToUint64(p *uint64) (v uint64) { + if p == nil { + return v + } + + return *p +} + +// ToUint64Slice returns a slice of uint64 values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint64 +// zero value if the pointer was nil. +func ToUint64Slice(vs []*uint64) []uint64 { + ps := make([]uint64, len(vs)) + for i, v := range vs { + ps[i] = ToUint64(v) + } + + return ps +} + +// ToUint64Map returns a map of uint64 values, that are +// dereferenced if the passed in pointer was not nil. The uint64 +// zero value is used if the pointer was nil. +func ToUint64Map(vs map[string]*uint64) map[string]uint64 { + ps := make(map[string]uint64, len(vs)) + for k, v := range vs { + ps[k] = ToUint64(v) + } + + return ps +} + +// ToFloat32 returns float32 value dereferenced if the passed +// in pointer was not nil. Returns a float32 zero value if the +// pointer was nil. +func ToFloat32(p *float32) (v float32) { + if p == nil { + return v + } + + return *p +} + +// ToFloat32Slice returns a slice of float32 values, that are +// dereferenced if the passed in pointer was not nil. Returns a float32 +// zero value if the pointer was nil. +func ToFloat32Slice(vs []*float32) []float32 { + ps := make([]float32, len(vs)) + for i, v := range vs { + ps[i] = ToFloat32(v) + } + + return ps +} + +// ToFloat32Map returns a map of float32 values, that are +// dereferenced if the passed in pointer was not nil. The float32 +// zero value is used if the pointer was nil. +func ToFloat32Map(vs map[string]*float32) map[string]float32 { + ps := make(map[string]float32, len(vs)) + for k, v := range vs { + ps[k] = ToFloat32(v) + } + + return ps +} + +// ToFloat64 returns float64 value dereferenced if the passed +// in pointer was not nil. Returns a float64 zero value if the +// pointer was nil. +func ToFloat64(p *float64) (v float64) { + if p == nil { + return v + } + + return *p +} + +// ToFloat64Slice returns a slice of float64 values, that are +// dereferenced if the passed in pointer was not nil. Returns a float64 +// zero value if the pointer was nil. +func ToFloat64Slice(vs []*float64) []float64 { + ps := make([]float64, len(vs)) + for i, v := range vs { + ps[i] = ToFloat64(v) + } + + return ps +} + +// ToFloat64Map returns a map of float64 values, that are +// dereferenced if the passed in pointer was not nil. The float64 +// zero value is used if the pointer was nil. +func ToFloat64Map(vs map[string]*float64) map[string]float64 { + ps := make(map[string]float64, len(vs)) + for k, v := range vs { + ps[k] = ToFloat64(v) + } + + return ps +} + +// ToTime returns time.Time value dereferenced if the passed +// in pointer was not nil. Returns a time.Time zero value if the +// pointer was nil. +func ToTime(p *time.Time) (v time.Time) { + if p == nil { + return v + } + + return *p +} + +// ToTimeSlice returns a slice of time.Time values, that are +// dereferenced if the passed in pointer was not nil. Returns a time.Time +// zero value if the pointer was nil. +func ToTimeSlice(vs []*time.Time) []time.Time { + ps := make([]time.Time, len(vs)) + for i, v := range vs { + ps[i] = ToTime(v) + } + + return ps +} + +// ToTimeMap returns a map of time.Time values, that are +// dereferenced if the passed in pointer was not nil. The time.Time +// zero value is used if the pointer was nil. +func ToTimeMap(vs map[string]*time.Time) map[string]time.Time { + ps := make(map[string]time.Time, len(vs)) + for k, v := range vs { + ps[k] = ToTime(v) + } + + return ps +} + +// ToDuration returns time.Duration value dereferenced if the passed +// in pointer was not nil. Returns a time.Duration zero value if the +// pointer was nil. +func ToDuration(p *time.Duration) (v time.Duration) { + if p == nil { + return v + } + + return *p +} + +// ToDurationSlice returns a slice of time.Duration values, that are +// dereferenced if the passed in pointer was not nil. Returns a time.Duration +// zero value if the pointer was nil. +func ToDurationSlice(vs []*time.Duration) []time.Duration { + ps := make([]time.Duration, len(vs)) + for i, v := range vs { + ps[i] = ToDuration(v) + } + + return ps +} + +// ToDurationMap returns a map of time.Duration values, that are +// dereferenced if the passed in pointer was not nil. The time.Duration +// zero value is used if the pointer was nil. +func ToDurationMap(vs map[string]*time.Duration) map[string]time.Duration { + ps := make(map[string]time.Duration, len(vs)) + for k, v := range vs { + ps[k] = ToDuration(v) + } + + return ps +} diff --git a/vendor/github.com/aws/smithy-go/ptr/gen_scalars.go b/vendor/github.com/aws/smithy-go/ptr/gen_scalars.go new file mode 100644 index 00000000000..97f01011e7e --- /dev/null +++ b/vendor/github.com/aws/smithy-go/ptr/gen_scalars.go @@ -0,0 +1,83 @@ +//go:build codegen +// +build codegen + +package ptr + +import "strings" + +func GetScalars() Scalars { + return Scalars{ + {Type: "bool"}, + {Type: "byte"}, + {Type: "string"}, + {Type: "int"}, + {Type: "int8"}, + {Type: "int16"}, + {Type: "int32"}, + {Type: "int64"}, + {Type: "uint"}, + {Type: "uint8"}, + {Type: "uint16"}, + {Type: "uint32"}, + {Type: "uint64"}, + {Type: "float32"}, + {Type: "float64"}, + {Type: "Time", Import: &Import{Path: "time"}}, + {Type: "Duration", Import: &Import{Path: "time"}}, + } +} + +// Import provides the import path and optional alias +type Import struct { + Path string + Alias string +} + +// Package returns the Go package name for the import. Returns alias if set. +func (i Import) Package() string { + if v := i.Alias; len(v) != 0 { + return v + } + + if v := i.Path; len(v) != 0 { + parts := strings.Split(v, "/") + pkg := parts[len(parts)-1] + return pkg + } + + return "" +} + +// Scalar provides the definition of a type to generate pointer utilities for. +type Scalar struct { + Type string + Import *Import +} + +// Name returns the exported function name for the type. +func (t Scalar) Name() string { + return strings.Title(t.Type) +} + +// Symbol returns the scalar's Go symbol with path if needed. +func (t Scalar) Symbol() string { + if t.Import != nil { + return t.Import.Package() + "." + t.Type + } + return t.Type +} + +// Scalars is a list of scalars. +type Scalars []Scalar + +// Imports returns all imports for the scalars. +func (ts Scalars) Imports() []*Import { + imports := []*Import{} + for _, t := range ts { + if v := t.Import; v != nil { + imports = append(imports, v) + } + } + + return imports +} diff --git a/vendor/github.com/aws/smithy-go/ptr/to_ptr.go b/vendor/github.com/aws/smithy-go/ptr/to_ptr.go new file mode 100644 index 00000000000..0bfbbecbdce --- /dev/null +++ b/vendor/github.com/aws/smithy-go/ptr/to_ptr.go @@ -0,0 +1,499 @@ +// Code generated by smithy-go/ptr/generate.go DO NOT EDIT. +package ptr + +import ( + "time" +) + +// Bool returns a pointer value for the bool value passed in. +func Bool(v bool) *bool { + return &v +} + +// BoolSlice returns a slice of bool pointers from the values +// passed in. +func BoolSlice(vs []bool) []*bool { + ps := make([]*bool, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// BoolMap returns a map of bool pointers from the values +// passed in. +func BoolMap(vs map[string]bool) map[string]*bool { + ps := make(map[string]*bool, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Byte returns a pointer value for the byte value passed in. +func Byte(v byte) *byte { + return &v +} + +// ByteSlice returns a slice of byte pointers from the values +// passed in. +func ByteSlice(vs []byte) []*byte { + ps := make([]*byte, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// ByteMap returns a map of byte pointers from the values +// passed in. +func ByteMap(vs map[string]byte) map[string]*byte { + ps := make(map[string]*byte, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// String returns a pointer value for the string value passed in. +func String(v string) *string { + return &v +} + +// StringSlice returns a slice of string pointers from the values +// passed in. +func StringSlice(vs []string) []*string { + ps := make([]*string, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// StringMap returns a map of string pointers from the values +// passed in. +func StringMap(vs map[string]string) map[string]*string { + ps := make(map[string]*string, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Int returns a pointer value for the int value passed in. +func Int(v int) *int { + return &v +} + +// IntSlice returns a slice of int pointers from the values +// passed in. +func IntSlice(vs []int) []*int { + ps := make([]*int, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// IntMap returns a map of int pointers from the values +// passed in. +func IntMap(vs map[string]int) map[string]*int { + ps := make(map[string]*int, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Int8 returns a pointer value for the int8 value passed in. +func Int8(v int8) *int8 { + return &v +} + +// Int8Slice returns a slice of int8 pointers from the values +// passed in. +func Int8Slice(vs []int8) []*int8 { + ps := make([]*int8, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Int8Map returns a map of int8 pointers from the values +// passed in. +func Int8Map(vs map[string]int8) map[string]*int8 { + ps := make(map[string]*int8, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Int16 returns a pointer value for the int16 value passed in. +func Int16(v int16) *int16 { + return &v +} + +// Int16Slice returns a slice of int16 pointers from the values +// passed in. +func Int16Slice(vs []int16) []*int16 { + ps := make([]*int16, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Int16Map returns a map of int16 pointers from the values +// passed in. +func Int16Map(vs map[string]int16) map[string]*int16 { + ps := make(map[string]*int16, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Int32 returns a pointer value for the int32 value passed in. +func Int32(v int32) *int32 { + return &v +} + +// Int32Slice returns a slice of int32 pointers from the values +// passed in. +func Int32Slice(vs []int32) []*int32 { + ps := make([]*int32, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Int32Map returns a map of int32 pointers from the values +// passed in. +func Int32Map(vs map[string]int32) map[string]*int32 { + ps := make(map[string]*int32, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Int64 returns a pointer value for the int64 value passed in. +func Int64(v int64) *int64 { + return &v +} + +// Int64Slice returns a slice of int64 pointers from the values +// passed in. +func Int64Slice(vs []int64) []*int64 { + ps := make([]*int64, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Int64Map returns a map of int64 pointers from the values +// passed in. +func Int64Map(vs map[string]int64) map[string]*int64 { + ps := make(map[string]*int64, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Uint returns a pointer value for the uint value passed in. +func Uint(v uint) *uint { + return &v +} + +// UintSlice returns a slice of uint pointers from the values +// passed in. +func UintSlice(vs []uint) []*uint { + ps := make([]*uint, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// UintMap returns a map of uint pointers from the values +// passed in. +func UintMap(vs map[string]uint) map[string]*uint { + ps := make(map[string]*uint, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Uint8 returns a pointer value for the uint8 value passed in. +func Uint8(v uint8) *uint8 { + return &v +} + +// Uint8Slice returns a slice of uint8 pointers from the values +// passed in. +func Uint8Slice(vs []uint8) []*uint8 { + ps := make([]*uint8, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Uint8Map returns a map of uint8 pointers from the values +// passed in. +func Uint8Map(vs map[string]uint8) map[string]*uint8 { + ps := make(map[string]*uint8, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Uint16 returns a pointer value for the uint16 value passed in. +func Uint16(v uint16) *uint16 { + return &v +} + +// Uint16Slice returns a slice of uint16 pointers from the values +// passed in. +func Uint16Slice(vs []uint16) []*uint16 { + ps := make([]*uint16, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Uint16Map returns a map of uint16 pointers from the values +// passed in. +func Uint16Map(vs map[string]uint16) map[string]*uint16 { + ps := make(map[string]*uint16, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Uint32 returns a pointer value for the uint32 value passed in. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint32Slice returns a slice of uint32 pointers from the values +// passed in. +func Uint32Slice(vs []uint32) []*uint32 { + ps := make([]*uint32, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Uint32Map returns a map of uint32 pointers from the values +// passed in. +func Uint32Map(vs map[string]uint32) map[string]*uint32 { + ps := make(map[string]*uint32, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Uint64 returns a pointer value for the uint64 value passed in. +func Uint64(v uint64) *uint64 { + return &v +} + +// Uint64Slice returns a slice of uint64 pointers from the values +// passed in. +func Uint64Slice(vs []uint64) []*uint64 { + ps := make([]*uint64, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Uint64Map returns a map of uint64 pointers from the values +// passed in. +func Uint64Map(vs map[string]uint64) map[string]*uint64 { + ps := make(map[string]*uint64, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Float32 returns a pointer value for the float32 value passed in. +func Float32(v float32) *float32 { + return &v +} + +// Float32Slice returns a slice of float32 pointers from the values +// passed in. +func Float32Slice(vs []float32) []*float32 { + ps := make([]*float32, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Float32Map returns a map of float32 pointers from the values +// passed in. +func Float32Map(vs map[string]float32) map[string]*float32 { + ps := make(map[string]*float32, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Float64 returns a pointer value for the float64 value passed in. +func Float64(v float64) *float64 { + return &v +} + +// Float64Slice returns a slice of float64 pointers from the values +// passed in. +func Float64Slice(vs []float64) []*float64 { + ps := make([]*float64, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Float64Map returns a map of float64 pointers from the values +// passed in. +func Float64Map(vs map[string]float64) map[string]*float64 { + ps := make(map[string]*float64, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Time returns a pointer value for the time.Time value passed in. +func Time(v time.Time) *time.Time { + return &v +} + +// TimeSlice returns a slice of time.Time pointers from the values +// passed in. +func TimeSlice(vs []time.Time) []*time.Time { + ps := make([]*time.Time, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// TimeMap returns a map of time.Time pointers from the values +// passed in. +func TimeMap(vs map[string]time.Time) map[string]*time.Time { + ps := make(map[string]*time.Time, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Duration returns a pointer value for the time.Duration value passed in. +func Duration(v time.Duration) *time.Duration { + return &v +} + +// DurationSlice returns a slice of time.Duration pointers from the values +// passed in. +func DurationSlice(vs []time.Duration) []*time.Duration { + ps := make([]*time.Duration, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// DurationMap returns a map of time.Duration pointers from the values +// passed in. +func DurationMap(vs map[string]time.Duration) map[string]*time.Duration { + ps := make(map[string]*time.Duration, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} diff --git a/vendor/github.com/aws/smithy-go/rand/doc.go b/vendor/github.com/aws/smithy-go/rand/doc.go new file mode 100644 index 00000000000..f8b25d56259 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/rand/doc.go @@ -0,0 +1,3 @@ +// Package rand provides utilities for creating and working with random value +// generators. +package rand diff --git a/vendor/github.com/aws/smithy-go/rand/rand.go b/vendor/github.com/aws/smithy-go/rand/rand.go new file mode 100644 index 00000000000..9c479f62b59 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/rand/rand.go @@ -0,0 +1,31 @@ +package rand + +import ( + "crypto/rand" + "fmt" + "io" + "math/big" +) + +func init() { + Reader = rand.Reader +} + +// Reader provides a random reader that can reset during testing. +var Reader io.Reader + +// Int63n returns a int64 between zero and value of max, read from an io.Reader source. +func Int63n(reader io.Reader, max int64) (int64, error) { + bi, err := rand.Int(reader, big.NewInt(max)) + if err != nil { + return 0, fmt.Errorf("failed to read random value, %w", err) + } + + return bi.Int64(), nil +} + +// CryptoRandInt63n returns a random int64 between zero and value of max +// obtained from the crypto rand source. +func CryptoRandInt63n(max int64) (int64, error) { + return Int63n(Reader, max) +} diff --git a/vendor/github.com/aws/smithy-go/rand/uuid.go b/vendor/github.com/aws/smithy-go/rand/uuid.go new file mode 100644 index 00000000000..dc81cbc68ac --- /dev/null +++ b/vendor/github.com/aws/smithy-go/rand/uuid.go @@ -0,0 +1,87 @@ +package rand + +import ( + "encoding/hex" + "io" +) + +const dash byte = '-' + +// UUIDIdempotencyToken provides a utility to get idempotency tokens in the +// UUID format. +type UUIDIdempotencyToken struct { + uuid *UUID +} + +// NewUUIDIdempotencyToken returns a idempotency token provider returning +// tokens in the UUID random format using the reader provided. +func NewUUIDIdempotencyToken(r io.Reader) *UUIDIdempotencyToken { + return &UUIDIdempotencyToken{uuid: NewUUID(r)} +} + +// GetIdempotencyToken returns a random UUID value for Idempotency token. +func (u UUIDIdempotencyToken) GetIdempotencyToken() (string, error) { + return u.uuid.GetUUID() +} + +// UUID provides computing random UUID version 4 values from a random source +// reader. +type UUID struct { + randSrc io.Reader +} + +// NewUUID returns an initialized UUID value that can be used to retrieve +// random UUID version 4 values. +func NewUUID(r io.Reader) *UUID { + return &UUID{randSrc: r} +} + +// GetUUID returns a random UUID version 4 string representation sourced from the random reader the +// UUID was created with. Returns an error if unable to compute the UUID. +func (r *UUID) GetUUID() (string, error) { + var b [16]byte + if _, err := io.ReadFull(r.randSrc, b[:]); err != nil { + return "", err + } + r.makeUUIDv4(b[:]) + return format(b), nil +} + +// GetBytes returns a byte slice containing a random UUID version 4 sourced from the random reader the +// UUID was created with. Returns an error if unable to compute the UUID. +func (r *UUID) GetBytes() (u []byte, err error) { + u = make([]byte, 16) + if _, err = io.ReadFull(r.randSrc, u); err != nil { + return u, err + } + r.makeUUIDv4(u) + return u, nil +} + +func (r *UUID) makeUUIDv4(u []byte) { + // 13th character is "4" + u[6] = (u[6] & 0x0f) | 0x40 // Version 4 + // 17th character is "8", "9", "a", or "b" + u[8] = (u[8] & 0x3f) | 0x80 // Variant most significant bits are 10x where x can be either 1 or 0 +} + +// Format returns the canonical text representation of a UUID. +// This implementation is optimized to not use fmt. +// Example: 82e42f16-b6cc-4d5b-95f5-d403c4befd3d +func format(u [16]byte) string { + // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29 + + var scratch [36]byte + + hex.Encode(scratch[:8], u[0:4]) + scratch[8] = dash + hex.Encode(scratch[9:13], u[4:6]) + scratch[13] = dash + hex.Encode(scratch[14:18], u[6:8]) + scratch[18] = dash + hex.Encode(scratch[19:23], u[8:10]) + scratch[23] = dash + hex.Encode(scratch[24:], u[10:]) + + return string(scratch[:]) +} diff --git a/vendor/github.com/aws/smithy-go/time/time.go b/vendor/github.com/aws/smithy-go/time/time.go new file mode 100644 index 00000000000..b552a09f8a8 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/time/time.go @@ -0,0 +1,134 @@ +package time + +import ( + "context" + "fmt" + "math/big" + "strings" + "time" +) + +const ( + // dateTimeFormat is a IMF-fixdate formatted RFC3339 section 5.6 + dateTimeFormatInput = "2006-01-02T15:04:05.999999999Z" + dateTimeFormatInputNoZ = "2006-01-02T15:04:05.999999999" + dateTimeFormatOutput = "2006-01-02T15:04:05.999Z" + + // httpDateFormat is a date time defined by RFC 7231#section-7.1.1.1 + // IMF-fixdate with no UTC offset. + httpDateFormat = "Mon, 02 Jan 2006 15:04:05 GMT" + // Additional formats needed for compatibility. + httpDateFormatSingleDigitDay = "Mon, _2 Jan 2006 15:04:05 GMT" + httpDateFormatSingleDigitDayTwoDigitYear = "Mon, _2 Jan 06 15:04:05 GMT" +) + +var millisecondFloat = big.NewFloat(1e3) + +// FormatDateTime formats value as a date-time, (RFC3339 section 5.6) +// +// Example: 1985-04-12T23:20:50.52Z +func FormatDateTime(value time.Time) string { + return value.UTC().Format(dateTimeFormatOutput) +} + +// ParseDateTime parses a string as a date-time, (RFC3339 section 5.6) +// +// Example: 1985-04-12T23:20:50.52Z +func ParseDateTime(value string) (time.Time, error) { + return tryParse(value, + dateTimeFormatInput, + dateTimeFormatInputNoZ, + time.RFC3339Nano, + time.RFC3339, + ) +} + +// FormatHTTPDate formats value as a http-date, (RFC 7231#section-7.1.1.1 IMF-fixdate) +// +// Example: Tue, 29 Apr 2014 18:30:38 GMT +func FormatHTTPDate(value time.Time) string { + return value.UTC().Format(httpDateFormat) +} + +// ParseHTTPDate parses a string as a http-date, (RFC 7231#section-7.1.1.1 IMF-fixdate) +// +// Example: Tue, 29 Apr 2014 18:30:38 GMT +func ParseHTTPDate(value string) (time.Time, error) { + return tryParse(value, + httpDateFormat, + httpDateFormatSingleDigitDay, + httpDateFormatSingleDigitDayTwoDigitYear, + time.RFC850, + time.ANSIC, + ) +} + +// FormatEpochSeconds returns value as a Unix time in seconds with with decimal precision +// +// Example: 1515531081.123 +func FormatEpochSeconds(value time.Time) float64 { + ms := value.UnixNano() / int64(time.Millisecond) + return float64(ms) / 1e3 +} + +// ParseEpochSeconds returns value as a Unix time in seconds with with decimal precision +// +// Example: 1515531081.123 +func ParseEpochSeconds(value float64) time.Time { + f := big.NewFloat(value) + f = f.Mul(f, millisecondFloat) + i, _ := f.Int64() + // Offset to `UTC` because time.Unix returns the time value based on system + // local setting. + return time.Unix(0, i*1e6).UTC() +} + +func tryParse(v string, formats ...string) (time.Time, error) { + var errs parseErrors + for _, f := range formats { + t, err := time.Parse(f, v) + if err != nil { + errs = append(errs, parseError{ + Format: f, + Err: err, + }) + continue + } + return t, nil + } + + return time.Time{}, fmt.Errorf("unable to parse time string, %w", errs) +} + +type parseErrors []parseError + +func (es parseErrors) Error() string { + var s strings.Builder + for _, e := range es { + fmt.Fprintf(&s, "\n * %q: %v", e.Format, e.Err) + } + + return "parse errors:" + s.String() +} + +type parseError struct { + Format string + Err error +} + +// SleepWithContext will wait for the timer duration to expire, or until the context +// is canceled. Whichever happens first. If the context is canceled the +// Context's error will be returned. +func SleepWithContext(ctx context.Context, dur time.Duration) error { + t := time.NewTimer(dur) + defer t.Stop() + + select { + case <-t.C: + break + case <-ctx.Done(): + return ctx.Err() + } + + return nil +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/checksum_middleware.go b/vendor/github.com/aws/smithy-go/transport/http/checksum_middleware.go new file mode 100644 index 00000000000..bc4ad6e7973 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/checksum_middleware.go @@ -0,0 +1,70 @@ +package http + +import ( + "context" + "fmt" + + "github.com/aws/smithy-go/middleware" +) + +const contentMD5Header = "Content-Md5" + +// contentMD5Checksum provides a middleware to compute and set +// content-md5 checksum for a http request +type contentMD5Checksum struct { +} + +// AddContentChecksumMiddleware adds checksum middleware to middleware's +// build step. +func AddContentChecksumMiddleware(stack *middleware.Stack) error { + // This middleware must be executed before request body is set. + return stack.Build.Add(&contentMD5Checksum{}, middleware.Before) +} + +// ID returns the identifier for the checksum middleware +func (m *contentMD5Checksum) ID() string { return "ContentChecksum" } + +// HandleBuild adds behavior to compute md5 checksum and add content-md5 header +// on http request +func (m *contentMD5Checksum) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*Request) + if !ok { + return out, metadata, fmt.Errorf("unknown request type %T", req) + } + + // if Content-MD5 header is already present, return + if v := req.Header.Get(contentMD5Header); len(v) != 0 { + return next.HandleBuild(ctx, in) + } + + // fetch the request stream. + stream := req.GetStream() + // compute checksum if payload is explicit + if stream != nil { + if !req.IsStreamSeekable() { + return out, metadata, fmt.Errorf( + "unseekable stream is not supported for computing md5 checksum") + } + + v, err := computeMD5Checksum(stream) + if err != nil { + return out, metadata, fmt.Errorf("error computing md5 checksum, %w", err) + } + + // reset the request stream + if err := req.RewindStream(); err != nil { + return out, metadata, fmt.Errorf( + "error rewinding request stream after computing md5 checksum, %w", err) + } + + // set the 'Content-MD5' header + req.Header.Set(contentMD5Header, string(v)) + } + + // set md5 header value + return next.HandleBuild(ctx, in) +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/client.go b/vendor/github.com/aws/smithy-go/transport/http/client.go new file mode 100644 index 00000000000..e691c69bf44 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/client.go @@ -0,0 +1,120 @@ +package http + +import ( + "context" + "fmt" + "net/http" + + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" +) + +// ClientDo provides the interface for custom HTTP client implementations. +type ClientDo interface { + Do(*http.Request) (*http.Response, error) +} + +// ClientDoFunc provides a helper to wrap a function as an HTTP client for +// round tripping requests. +type ClientDoFunc func(*http.Request) (*http.Response, error) + +// Do will invoke the underlying func, returning the result. +func (fn ClientDoFunc) Do(r *http.Request) (*http.Response, error) { + return fn(r) +} + +// ClientHandler wraps a client that implements the HTTP Do method. Standard +// implementation is http.Client. +type ClientHandler struct { + client ClientDo +} + +// NewClientHandler returns an initialized middleware handler for the client. +func NewClientHandler(client ClientDo) ClientHandler { + return ClientHandler{ + client: client, + } +} + +// Handle implements the middleware Handler interface, that will invoke the +// underlying HTTP client. Requires the input to be a Smithy *Request. Returns +// a smithy *Response, or error if the request failed. +func (c ClientHandler) Handle(ctx context.Context, input interface{}) ( + out interface{}, metadata middleware.Metadata, err error, +) { + req, ok := input.(*Request) + if !ok { + return nil, metadata, fmt.Errorf("expect Smithy http.Request value as input, got unsupported type %T", input) + } + + builtRequest := req.Build(ctx) + if err := ValidateEndpointHost(builtRequest.Host); err != nil { + return nil, metadata, err + } + + resp, err := c.client.Do(builtRequest) + if resp == nil { + // Ensure a http response value is always present to prevent unexpected + // panics. + resp = &http.Response{ + Header: http.Header{}, + Body: http.NoBody, + } + } + if err != nil { + err = &RequestSendError{Err: err} + + // Override the error with a context canceled error, if that was canceled. + select { + case <-ctx.Done(): + err = &smithy.CanceledError{Err: ctx.Err()} + default: + } + } + + // HTTP RoundTripper *should* close the request body. But this may not happen in a timely manner. + // So instead Smithy *Request Build wraps the body to be sent in a safe closer that will clear the + // stream reference so that it can be safely reused. + if builtRequest.Body != nil { + _ = builtRequest.Body.Close() + } + + return &Response{Response: resp}, metadata, err +} + +// RequestSendError provides a generic request transport error. This error +// should wrap errors making HTTP client requests. +// +// The ClientHandler will wrap the HTTP client's error if the client request +// fails, and did not fail because of context canceled. +type RequestSendError struct { + Err error +} + +// ConnectionError returns that the error is related to not being able to send +// the request, or receive a response from the service. +func (e *RequestSendError) ConnectionError() bool { + return true +} + +// Unwrap returns the underlying error, if there was one. +func (e *RequestSendError) Unwrap() error { + return e.Err +} + +func (e *RequestSendError) Error() string { + return fmt.Sprintf("request send failed, %v", e.Err) +} + +// NopClient provides a client that ignores the request, and returns an empty +// successful HTTP response value. +type NopClient struct{} + +// Do ignores the request and returns a 200 status empty response. +func (NopClient) Do(r *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: 200, + Header: http.Header{}, + Body: http.NoBody, + }, nil +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/doc.go b/vendor/github.com/aws/smithy-go/transport/http/doc.go new file mode 100644 index 00000000000..07366ac85a8 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/doc.go @@ -0,0 +1,5 @@ +/* +Package http provides the HTTP transport client and request/response types +needed to round trip API operation calls with an service. +*/ +package http diff --git a/vendor/github.com/aws/smithy-go/transport/http/headerlist.go b/vendor/github.com/aws/smithy-go/transport/http/headerlist.go new file mode 100644 index 00000000000..cbc9deb4df0 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/headerlist.go @@ -0,0 +1,163 @@ +package http + +import ( + "fmt" + "strconv" + "strings" + "unicode" +) + +func splitHeaderListValues(vs []string, splitFn func(string) ([]string, error)) ([]string, error) { + values := make([]string, 0, len(vs)) + + for i := 0; i < len(vs); i++ { + parts, err := splitFn(vs[i]) + if err != nil { + return nil, err + } + values = append(values, parts...) + } + + return values, nil +} + +// SplitHeaderListValues attempts to split the elements of the slice by commas, +// and return a list of all values separated. Returns error if unable to +// separate the values. +func SplitHeaderListValues(vs []string) ([]string, error) { + return splitHeaderListValues(vs, quotedCommaSplit) +} + +func quotedCommaSplit(v string) (parts []string, err error) { + v = strings.TrimSpace(v) + + expectMore := true + for i := 0; i < len(v); i++ { + if unicode.IsSpace(rune(v[i])) { + continue + } + expectMore = false + + // leading space in part is ignored. + // Start of value must be non-space, or quote. + // + // - If quote, enter quoted mode, find next non-escaped quote to + // terminate the value. + // - Otherwise, find next comma to terminate value. + + remaining := v[i:] + + var value string + var valueLen int + if remaining[0] == '"' { + //------------------------------ + // Quoted value + //------------------------------ + var j int + var skipQuote bool + for j += 1; j < len(remaining); j++ { + if remaining[j] == '\\' || (remaining[j] != '\\' && skipQuote) { + skipQuote = !skipQuote + continue + } + if remaining[j] == '"' { + break + } + } + if j == len(remaining) || j == 1 { + return nil, fmt.Errorf("value %v missing closing double quote", + remaining) + } + valueLen = j + 1 + + tail := remaining[valueLen:] + var k int + for ; k < len(tail); k++ { + if !unicode.IsSpace(rune(tail[k])) && tail[k] != ',' { + return nil, fmt.Errorf("value %v has non-space trailing characters", + remaining) + } + if tail[k] == ',' { + expectMore = true + break + } + } + value = remaining[:valueLen] + value, err = strconv.Unquote(value) + if err != nil { + return nil, fmt.Errorf("failed to unquote value %v, %w", value, err) + } + + // Pad valueLen to include trailing space(s) so `i` is updated correctly. + valueLen += k + + } else { + //------------------------------ + // Unquoted value + //------------------------------ + + // Index of the next comma is the length of the value, or end of string. + valueLen = strings.Index(remaining, ",") + if valueLen != -1 { + expectMore = true + } else { + valueLen = len(remaining) + } + value = strings.TrimSpace(remaining[:valueLen]) + } + + i += valueLen + parts = append(parts, value) + + } + + if expectMore { + parts = append(parts, "") + } + + return parts, nil +} + +// SplitHTTPDateTimestampHeaderListValues attempts to split the HTTP-Date +// timestamp values in the slice by commas, and return a list of all values +// separated. The split is aware of the HTTP-Date timestamp format, and will skip +// comma within the timestamp value. Returns an error if unable to split the +// timestamp values. +func SplitHTTPDateTimestampHeaderListValues(vs []string) ([]string, error) { + return splitHeaderListValues(vs, splitHTTPDateHeaderValue) +} + +func splitHTTPDateHeaderValue(v string) ([]string, error) { + if n := strings.Count(v, ","); n <= 1 { + // Nothing to do if only contains a no, or single HTTPDate value + return []string{v}, nil + } else if n%2 == 0 { + return nil, fmt.Errorf("invalid timestamp HTTPDate header comma separations, %q", v) + } + + var parts []string + var i, j int + + var doSplit bool + for ; i < len(v); i++ { + if v[i] == ',' { + if doSplit { + doSplit = false + parts = append(parts, strings.TrimSpace(v[j:i])) + j = i + 1 + } else { + // Skip the first comma in the timestamp value since that + // separates the day from the rest of the timestamp. + // + // Tue, 17 Dec 2019 23:48:18 GMT + doSplit = true + } + } + } + // Add final part + if j < len(v) { + parts = append(parts, strings.TrimSpace(v[j:])) + } + + return parts, nil +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/host.go b/vendor/github.com/aws/smithy-go/transport/http/host.go new file mode 100644 index 00000000000..6b290fec030 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/host.go @@ -0,0 +1,89 @@ +package http + +import ( + "fmt" + "net" + "strconv" + "strings" +) + +// ValidateEndpointHost validates that the host string passed in is a valid RFC +// 3986 host. Returns error if the host is not valid. +func ValidateEndpointHost(host string) error { + var errors strings.Builder + var hostname string + var port string + var err error + + if strings.Contains(host, ":") { + hostname, port, err = net.SplitHostPort(host) + if err != nil { + errors.WriteString(fmt.Sprintf("\n endpoint %v, failed to parse, got ", host)) + errors.WriteString(err.Error()) + } + + if !ValidPortNumber(port) { + errors.WriteString(fmt.Sprintf("port number should be in range [0-65535], got %v", port)) + } + } else { + hostname = host + } + + labels := strings.Split(hostname, ".") + for i, label := range labels { + if i == len(labels)-1 && len(label) == 0 { + // Allow trailing dot for FQDN hosts. + continue + } + + if !ValidHostLabel(label) { + errors.WriteString("\nendpoint host domain labels must match \"[a-zA-Z0-9-]{1,63}\", but found: ") + errors.WriteString(label) + } + } + + if len(hostname) == 0 && len(port) != 0 { + errors.WriteString("\nendpoint host with port must not be empty") + } + + if len(hostname) > 255 { + errors.WriteString(fmt.Sprintf("\nendpoint host must be less than 255 characters, but was %d", len(hostname))) + } + + if len(errors.String()) > 0 { + return fmt.Errorf("invalid endpoint host%s", errors.String()) + } + return nil +} + +// ValidPortNumber returns whether the port is valid RFC 3986 port. +func ValidPortNumber(port string) bool { + i, err := strconv.Atoi(port) + if err != nil { + return false + } + + if i < 0 || i > 65535 { + return false + } + return true +} + +// ValidHostLabel returns whether the label is a valid RFC 3986 host abel. +func ValidHostLabel(label string) bool { + if l := len(label); l == 0 || l > 63 { + return false + } + for _, r := range label { + switch { + case r >= '0' && r <= '9': + case r >= 'A' && r <= 'Z': + case r >= 'a' && r <= 'z': + case r == '-': + default: + return false + } + } + + return true +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/internal/io/safe.go b/vendor/github.com/aws/smithy-go/transport/http/internal/io/safe.go new file mode 100644 index 00000000000..941a8d6b512 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/internal/io/safe.go @@ -0,0 +1,75 @@ +package io + +import ( + "io" + "sync" +) + +// NewSafeReadCloser returns a new safeReadCloser that wraps readCloser. +func NewSafeReadCloser(readCloser io.ReadCloser) io.ReadCloser { + sr := &safeReadCloser{ + readCloser: readCloser, + } + + if _, ok := readCloser.(io.WriterTo); ok { + return &safeWriteToReadCloser{safeReadCloser: sr} + } + + return sr +} + +// safeWriteToReadCloser wraps a safeReadCloser but exposes a WriteTo interface implementation. This will panic +// if the underlying io.ReadClose does not support WriteTo. Use NewSafeReadCloser to ensure the proper handling of this +// type. +type safeWriteToReadCloser struct { + *safeReadCloser +} + +// WriteTo implements the io.WriteTo interface. +func (r *safeWriteToReadCloser) WriteTo(w io.Writer) (int64, error) { + r.safeReadCloser.mtx.Lock() + defer r.safeReadCloser.mtx.Unlock() + + if r.safeReadCloser.closed { + return 0, io.EOF + } + + return r.safeReadCloser.readCloser.(io.WriterTo).WriteTo(w) +} + +// safeReadCloser wraps a io.ReadCloser and presents an io.ReadCloser interface. When Close is called on safeReadCloser +// the underlying Close method will be executed, and then the reference to the reader will be dropped. This type +// is meant to be used with the net/http library which will retain a reference to the request body for the lifetime +// of a goroutine connection. Wrapping in this manner will ensure that no data race conditions are falsely reported. +// This type is thread-safe. +type safeReadCloser struct { + readCloser io.ReadCloser + closed bool + mtx sync.Mutex +} + +// Read reads up to len(p) bytes into p from the underlying read. If the reader is closed io.EOF will be returned. +func (r *safeReadCloser) Read(p []byte) (n int, err error) { + r.mtx.Lock() + defer r.mtx.Unlock() + if r.closed { + return 0, io.EOF + } + + return r.readCloser.Read(p) +} + +// Close calls the underlying io.ReadCloser's Close method, removes the reference to the reader, and returns any error +// reported from Close. Subsequent calls to Close will always return a nil error. +func (r *safeReadCloser) Close() error { + r.mtx.Lock() + defer r.mtx.Unlock() + if r.closed { + return nil + } + + r.closed = true + rc := r.readCloser + r.readCloser = nil + return rc.Close() +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/md5_checksum.go b/vendor/github.com/aws/smithy-go/transport/http/md5_checksum.go new file mode 100644 index 00000000000..5d6a4b23a27 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/md5_checksum.go @@ -0,0 +1,25 @@ +package http + +import ( + "crypto/md5" + "encoding/base64" + "fmt" + "io" +) + +// computeMD5Checksum computes base64 md5 checksum of an io.Reader's contents. +// Returns the byte slice of md5 checksum and an error. +func computeMD5Checksum(r io.Reader) ([]byte, error) { + h := md5.New() + // copy errors may be assumed to be from the body. + _, err := io.Copy(h, r) + if err != nil { + return nil, fmt.Errorf("failed to read body: %w", err) + } + + // encode the md5 checksum in base64. + sum := h.Sum(nil) + sum64 := make([]byte, base64.StdEncoding.EncodedLen(len(sum))) + base64.StdEncoding.Encode(sum64, sum) + return sum64, nil +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go new file mode 100644 index 00000000000..1d3b218a127 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go @@ -0,0 +1,79 @@ +package http + +import ( + "context" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + "io" + "io/ioutil" +) + +// AddErrorCloseResponseBodyMiddleware adds the middleware to automatically +// close the response body of an operation request if the request response +// failed. +func AddErrorCloseResponseBodyMiddleware(stack *middleware.Stack) error { + return stack.Deserialize.Insert(&errorCloseResponseBodyMiddleware{}, "OperationDeserializer", middleware.Before) +} + +type errorCloseResponseBodyMiddleware struct{} + +func (*errorCloseResponseBodyMiddleware) ID() string { + return "ErrorCloseResponseBody" +} + +func (m *errorCloseResponseBodyMiddleware) HandleDeserialize( + ctx context.Context, input middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + output middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err := next.HandleDeserialize(ctx, input) + if err != nil { + if resp, ok := out.RawResponse.(*Response); ok && resp != nil && resp.Body != nil { + // Consume the full body to prevent TCP connection resets on some platforms + _, _ = io.Copy(ioutil.Discard, resp.Body) + // Do not validate that the response closes successfully. + resp.Body.Close() + } + } + + return out, metadata, err +} + +// AddCloseResponseBodyMiddleware adds the middleware to automatically close +// the response body of an operation request, after the response had been +// deserialized. +func AddCloseResponseBodyMiddleware(stack *middleware.Stack) error { + return stack.Deserialize.Insert(&closeResponseBody{}, "OperationDeserializer", middleware.Before) +} + +type closeResponseBody struct{} + +func (*closeResponseBody) ID() string { + return "CloseResponseBody" +} + +func (m *closeResponseBody) HandleDeserialize( + ctx context.Context, input middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + output middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err := next.HandleDeserialize(ctx, input) + if err != nil { + return out, metadata, err + } + + if resp, ok := out.RawResponse.(*Response); ok { + // Consume the full body to prevent TCP connection resets on some platforms + _, copyErr := io.Copy(ioutil.Discard, resp.Body) + if copyErr != nil { + middleware.GetLogger(ctx).Logf(logging.Warn, "failed to discard remaining HTTP response body, this may affect connection reuse") + } + + closeErr := resp.Body.Close() + if closeErr != nil { + middleware.GetLogger(ctx).Logf(logging.Warn, "failed to close HTTP response body, this may affect connection reuse") + } + } + + return out, metadata, err +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_content_length.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_content_length.go new file mode 100644 index 00000000000..9969389bb29 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_content_length.go @@ -0,0 +1,84 @@ +package http + +import ( + "context" + "fmt" + + "github.com/aws/smithy-go/middleware" +) + +// ComputeContentLength provides a middleware to set the content-length +// header for the length of a serialize request body. +type ComputeContentLength struct { +} + +// AddComputeContentLengthMiddleware adds ComputeContentLength to the middleware +// stack's Build step. +func AddComputeContentLengthMiddleware(stack *middleware.Stack) error { + return stack.Build.Add(&ComputeContentLength{}, middleware.After) +} + +// ID returns the identifier for the ComputeContentLength. +func (m *ComputeContentLength) ID() string { return "ComputeContentLength" } + +// HandleBuild adds the length of the serialized request to the HTTP header +// if the length can be determined. +func (m *ComputeContentLength) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*Request) + if !ok { + return out, metadata, fmt.Errorf("unknown request type %T", req) + } + + // do nothing if request content-length was set to 0 or above. + if req.ContentLength >= 0 { + return next.HandleBuild(ctx, in) + } + + // attempt to compute stream length + if n, ok, err := req.StreamLength(); err != nil { + return out, metadata, fmt.Errorf( + "failed getting length of request stream, %w", err) + } else if ok { + req.ContentLength = n + } + + return next.HandleBuild(ctx, in) +} + +// validateContentLength provides a middleware to validate the content-length +// is valid (greater than zero), for the serialized request payload. +type validateContentLength struct{} + +// ValidateContentLengthHeader adds middleware that validates request content-length +// is set to value greater than zero. +func ValidateContentLengthHeader(stack *middleware.Stack) error { + return stack.Build.Add(&validateContentLength{}, middleware.After) +} + +// ID returns the identifier for the ComputeContentLength. +func (m *validateContentLength) ID() string { return "ValidateContentLength" } + +// HandleBuild adds the length of the serialized request to the HTTP header +// if the length can be determined. +func (m *validateContentLength) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*Request) + if !ok { + return out, metadata, fmt.Errorf("unknown request type %T", req) + } + + // if request content-length was set to less than 0, return an error + if req.ContentLength < 0 { + return out, metadata, fmt.Errorf( + "content length for payload is required and must be at least 0") + } + + return next.HandleBuild(ctx, in) +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_header_comment.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_header_comment.go new file mode 100644 index 00000000000..855c2272031 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_header_comment.go @@ -0,0 +1,81 @@ +package http + +import ( + "context" + "fmt" + "net/http" + + "github.com/aws/smithy-go/middleware" +) + +// WithHeaderComment instruments a middleware stack to append an HTTP field +// comment to the given header as specified in RFC 9110 +// (https://www.rfc-editor.org/rfc/rfc9110#name-comments). +// +// The header is case-insensitive. If the provided header exists when the +// middleware runs, the content will be inserted as-is enclosed in parentheses. +// +// Note that per the HTTP specification, comments are only allowed in fields +// containing "comment" as part of their field value definition, but this API +// will NOT verify whether the provided header is one of them. +// +// WithHeaderComment MAY be applied more than once to a middleware stack and/or +// more than once per header. +func WithHeaderComment(header, content string) func(*middleware.Stack) error { + return func(s *middleware.Stack) error { + m, err := getOrAddHeaderComment(s) + if err != nil { + return fmt.Errorf("get or add header comment: %v", err) + } + + m.values.Add(header, content) + return nil + } +} + +type headerCommentMiddleware struct { + values http.Header // hijack case-insensitive access APIs +} + +func (*headerCommentMiddleware) ID() string { + return "headerComment" +} + +func (m *headerCommentMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + r, ok := in.Request.(*Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + for h, contents := range m.values { + for _, c := range contents { + if existing := r.Header.Get(h); existing != "" { + r.Header.Set(h, fmt.Sprintf("%s (%s)", existing, c)) + } + } + } + + return next.HandleBuild(ctx, in) +} + +func getOrAddHeaderComment(s *middleware.Stack) (*headerCommentMiddleware, error) { + id := (*headerCommentMiddleware)(nil).ID() + m, ok := s.Build.Get(id) + if !ok { + m := &headerCommentMiddleware{values: http.Header{}} + if err := s.Build.Add(m, middleware.After); err != nil { + return nil, fmt.Errorf("add build: %v", err) + } + + return m, nil + } + + hc, ok := m.(*headerCommentMiddleware) + if !ok { + return nil, fmt.Errorf("existing middleware w/ id %s is not *headerCommentMiddleware", id) + } + + return hc, nil +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_headers.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_headers.go new file mode 100644 index 00000000000..eac32b4babd --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_headers.go @@ -0,0 +1,167 @@ +package http + +import ( + "context" + "fmt" + + "github.com/aws/smithy-go/middleware" +) + +type isContentTypeAutoSet struct{} + +// SetIsContentTypeDefaultValue returns a Context specifying if the request's +// content-type header was set to a default value. +func SetIsContentTypeDefaultValue(ctx context.Context, isDefault bool) context.Context { + return context.WithValue(ctx, isContentTypeAutoSet{}, isDefault) +} + +// GetIsContentTypeDefaultValue returns if the content-type HTTP header on the +// request is a default value that was auto assigned by an operation +// serializer. Allows middleware post serialization to know if the content-type +// was auto set to a default value or not. +// +// Also returns false if the Context value was never updated to include if +// content-type was set to a default value. +func GetIsContentTypeDefaultValue(ctx context.Context) bool { + v, _ := ctx.Value(isContentTypeAutoSet{}).(bool) + return v +} + +// AddNoPayloadDefaultContentTypeRemover Adds the DefaultContentTypeRemover +// middleware to the stack after the operation serializer. This middleware will +// remove the content-type header from the request if it was set as a default +// value, and no request payload is present. +// +// Returns error if unable to add the middleware. +func AddNoPayloadDefaultContentTypeRemover(stack *middleware.Stack) (err error) { + err = stack.Serialize.Insert(removeDefaultContentType{}, + "OperationSerializer", middleware.After) + if err != nil { + return fmt.Errorf("failed to add %s serialize middleware, %w", + removeDefaultContentType{}.ID(), err) + } + + return nil +} + +// RemoveNoPayloadDefaultContentTypeRemover removes the +// DefaultContentTypeRemover middleware from the stack. Returns an error if +// unable to remove the middleware. +func RemoveNoPayloadDefaultContentTypeRemover(stack *middleware.Stack) (err error) { + _, err = stack.Serialize.Remove(removeDefaultContentType{}.ID()) + if err != nil { + return fmt.Errorf("failed to remove %s serialize middleware, %w", + removeDefaultContentType{}.ID(), err) + + } + return nil +} + +// removeDefaultContentType provides after serialization middleware that will +// remove the content-type header from an HTTP request if the header was set as +// a default value by the operation serializer, and there is no request payload. +type removeDefaultContentType struct{} + +// ID returns the middleware ID +func (removeDefaultContentType) ID() string { return "RemoveDefaultContentType" } + +// HandleSerialize implements the serialization middleware. +func (removeDefaultContentType) HandleSerialize( + ctx context.Context, input middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, meta middleware.Metadata, err error, +) { + req, ok := input.Request.(*Request) + if !ok { + return out, meta, fmt.Errorf( + "unexpected request type %T for removeDefaultContentType middleware", + input.Request) + } + + if GetIsContentTypeDefaultValue(ctx) && req.GetStream() == nil { + req.Header.Del("Content-Type") + input.Request = req + } + + return next.HandleSerialize(ctx, input) +} + +type headerValue struct { + header string + value string + append bool +} + +type headerValueHelper struct { + headerValues []headerValue +} + +func (h *headerValueHelper) addHeaderValue(value headerValue) { + h.headerValues = append(h.headerValues, value) +} + +func (h *headerValueHelper) ID() string { + return "HTTPHeaderHelper" +} + +func (h *headerValueHelper) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) (out middleware.BuildOutput, metadata middleware.Metadata, err error) { + req, ok := in.Request.(*Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + for _, value := range h.headerValues { + if value.append { + req.Header.Add(value.header, value.value) + } else { + req.Header.Set(value.header, value.value) + } + } + + return next.HandleBuild(ctx, in) +} + +func getOrAddHeaderValueHelper(stack *middleware.Stack) (*headerValueHelper, error) { + id := (*headerValueHelper)(nil).ID() + m, ok := stack.Build.Get(id) + if !ok { + m = &headerValueHelper{} + err := stack.Build.Add(m, middleware.After) + if err != nil { + return nil, err + } + } + + requestUserAgent, ok := m.(*headerValueHelper) + if !ok { + return nil, fmt.Errorf("%T for %s middleware did not match expected type", m, id) + } + + return requestUserAgent, nil +} + +// AddHeaderValue returns a stack mutator that adds the header value pair to header. +// Appends to any existing values if present. +func AddHeaderValue(header string, value string) func(stack *middleware.Stack) error { + return func(stack *middleware.Stack) error { + helper, err := getOrAddHeaderValueHelper(stack) + if err != nil { + return err + } + helper.addHeaderValue(headerValue{header: header, value: value, append: true}) + return nil + } +} + +// SetHeaderValue returns a stack mutator that adds the header value pair to header. +// Replaces any existing values if present. +func SetHeaderValue(header string, value string) func(stack *middleware.Stack) error { + return func(stack *middleware.Stack) error { + helper, err := getOrAddHeaderValueHelper(stack) + if err != nil { + return err + } + helper.addHeaderValue(headerValue{header: header, value: value, append: false}) + return nil + } +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_http_logging.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_http_logging.go new file mode 100644 index 00000000000..d5909b0a242 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_http_logging.go @@ -0,0 +1,75 @@ +package http + +import ( + "context" + "fmt" + "net/http/httputil" + + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" +) + +// RequestResponseLogger is a deserialize middleware that will log the request and response HTTP messages and optionally +// their respective bodies. Will not perform any logging if none of the options are set. +type RequestResponseLogger struct { + LogRequest bool + LogRequestWithBody bool + + LogResponse bool + LogResponseWithBody bool +} + +// ID is the middleware identifier. +func (r *RequestResponseLogger) ID() string { + return "RequestResponseLogger" +} + +// HandleDeserialize will log the request and response HTTP messages if configured accordingly. +func (r *RequestResponseLogger) HandleDeserialize( + ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + logger := middleware.GetLogger(ctx) + + if r.LogRequest || r.LogRequestWithBody { + smithyRequest, ok := in.Request.(*Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in) + } + + rc := smithyRequest.Build(ctx) + reqBytes, err := httputil.DumpRequestOut(rc, r.LogRequestWithBody) + if err != nil { + return out, metadata, err + } + + logger.Logf(logging.Debug, "Request\n%v", string(reqBytes)) + + if r.LogRequestWithBody { + smithyRequest, err = smithyRequest.SetStream(rc.Body) + if err != nil { + return out, metadata, err + } + in.Request = smithyRequest + } + } + + out, metadata, err = next.HandleDeserialize(ctx, in) + + if (err == nil) && (r.LogResponse || r.LogResponseWithBody) { + smithyResponse, ok := out.RawResponse.(*Response) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", out.RawResponse) + } + + respBytes, err := httputil.DumpResponse(smithyResponse.Response, r.LogResponseWithBody) + if err != nil { + return out, metadata, fmt.Errorf("failed to dump response %w", err) + } + + logger.Logf(logging.Debug, "Response\n%v", string(respBytes)) + } + + return out, metadata, err +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_metadata.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_metadata.go new file mode 100644 index 00000000000..d6079b25950 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_metadata.go @@ -0,0 +1,51 @@ +package http + +import ( + "context" + + "github.com/aws/smithy-go/middleware" +) + +type ( + hostnameImmutableKey struct{} + hostPrefixDisableKey struct{} +) + +// GetHostnameImmutable retrieves whether the endpoint hostname should be considered +// immutable or not. +// +// Scoped to stack values. Use middleware#ClearStackValues to clear all stack +// values. +func GetHostnameImmutable(ctx context.Context) (v bool) { + v, _ = middleware.GetStackValue(ctx, hostnameImmutableKey{}).(bool) + return v +} + +// SetHostnameImmutable sets or modifies whether the request's endpoint hostname +// should be considered immutable or not. +// +// Scoped to stack values. Use middleware#ClearStackValues to clear all stack +// values. +func SetHostnameImmutable(ctx context.Context, value bool) context.Context { + return middleware.WithStackValue(ctx, hostnameImmutableKey{}, value) +} + +// IsEndpointHostPrefixDisabled retrieves whether the hostname prefixing is +// disabled. +// +// Scoped to stack values. Use middleware#ClearStackValues to clear all stack +// values. +func IsEndpointHostPrefixDisabled(ctx context.Context) (v bool) { + v, _ = middleware.GetStackValue(ctx, hostPrefixDisableKey{}).(bool) + return v +} + +// DisableEndpointHostPrefix sets or modifies whether the request's endpoint host +// prefixing should be disabled. If value is true, endpoint host prefixing +// will be disabled. +// +// Scoped to stack values. Use middleware#ClearStackValues to clear all stack +// values. +func DisableEndpointHostPrefix(ctx context.Context, value bool) context.Context { + return middleware.WithStackValue(ctx, hostPrefixDisableKey{}, value) +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_min_proto.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_min_proto.go new file mode 100644 index 00000000000..326cb8a6cab --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_min_proto.go @@ -0,0 +1,79 @@ +package http + +import ( + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + "strings" +) + +// MinimumProtocolError is an error type indicating that the established connection did not meet the expected minimum +// HTTP protocol version. +type MinimumProtocolError struct { + proto string + expectedProtoMajor int + expectedProtoMinor int +} + +// Error returns the error message. +func (m *MinimumProtocolError) Error() string { + return fmt.Sprintf("operation requires minimum HTTP protocol of HTTP/%d.%d, but was %s", + m.expectedProtoMajor, m.expectedProtoMinor, m.proto) +} + +// RequireMinimumProtocol is a deserialization middleware that asserts that the established HTTP connection +// meets the minimum major ad minor version. +type RequireMinimumProtocol struct { + ProtoMajor int + ProtoMinor int +} + +// AddRequireMinimumProtocol adds the RequireMinimumProtocol middleware to the stack using the provided minimum +// protocol major and minor version. +func AddRequireMinimumProtocol(stack *middleware.Stack, major, minor int) error { + return stack.Deserialize.Insert(&RequireMinimumProtocol{ + ProtoMajor: major, + ProtoMinor: minor, + }, "OperationDeserializer", middleware.Before) +} + +// ID returns the middleware identifier string. +func (r *RequireMinimumProtocol) ID() string { + return "RequireMinimumProtocol" +} + +// HandleDeserialize asserts that the established connection is a HTTP connection with the minimum major and minor +// protocol version. +func (r *RequireMinimumProtocol) HandleDeserialize( + ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*Response) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type: %T", out.RawResponse) + } + + if !strings.HasPrefix(response.Proto, "HTTP") { + return out, metadata, &MinimumProtocolError{ + proto: response.Proto, + expectedProtoMajor: r.ProtoMajor, + expectedProtoMinor: r.ProtoMinor, + } + } + + if response.ProtoMajor < r.ProtoMajor || response.ProtoMinor < r.ProtoMinor { + return out, metadata, &MinimumProtocolError{ + proto: response.Proto, + expectedProtoMajor: r.ProtoMajor, + expectedProtoMinor: r.ProtoMinor, + } + } + + return out, metadata, err +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/request.go b/vendor/github.com/aws/smithy-go/transport/http/request.go new file mode 100644 index 00000000000..7177d6f957c --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/request.go @@ -0,0 +1,189 @@ +package http + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + + iointernal "github.com/aws/smithy-go/transport/http/internal/io" +) + +// Request provides the HTTP specific request structure for HTTP specific +// middleware steps to use to serialize input, and send an operation's request. +type Request struct { + *http.Request + stream io.Reader + isStreamSeekable bool + streamStartPos int64 +} + +// NewStackRequest returns an initialized request ready to be populated with the +// HTTP request details. Returns empty interface so the function can be used as +// a parameter to the Smithy middleware Stack constructor. +func NewStackRequest() interface{} { + return &Request{ + Request: &http.Request{ + URL: &url.URL{}, + Header: http.Header{}, + ContentLength: -1, // default to unknown length + }, + } +} + +// IsHTTPS returns if the request is HTTPS. Returns false if no endpoint URL is set. +func (r *Request) IsHTTPS() bool { + if r.URL == nil { + return false + } + return strings.EqualFold(r.URL.Scheme, "https") +} + +// Clone returns a deep copy of the Request for the new context. A reference to +// the Stream is copied, but the underlying stream is not copied. +func (r *Request) Clone() *Request { + rc := *r + rc.Request = rc.Request.Clone(context.TODO()) + return &rc +} + +// StreamLength returns the number of bytes of the serialized stream attached +// to the request and ok set. If the length cannot be determined, an error will +// be returned. +func (r *Request) StreamLength() (size int64, ok bool, err error) { + return streamLength(r.stream, r.isStreamSeekable, r.streamStartPos) +} + +func streamLength(stream io.Reader, seekable bool, startPos int64) (size int64, ok bool, err error) { + if stream == nil { + return 0, true, nil + } + + if l, ok := stream.(interface{ Len() int }); ok { + return int64(l.Len()), true, nil + } + + if !seekable { + return 0, false, nil + } + + s := stream.(io.Seeker) + endOffset, err := s.Seek(0, io.SeekEnd) + if err != nil { + return 0, false, err + } + + // The reason to seek to streamStartPos instead of 0 is to ensure that the + // SDK only sends the stream from the starting position the user's + // application provided it to the SDK at. For example application opens a + // file, and wants to skip the first N bytes uploading the rest. The + // application would move the file's offset N bytes, then hand it off to + // the SDK to send the remaining. The SDK should respect that initial offset. + _, err = s.Seek(startPos, io.SeekStart) + if err != nil { + return 0, false, err + } + + return endOffset - startPos, true, nil +} + +// RewindStream will rewind the io.Reader to the relative start position if it +// is an io.Seeker. +func (r *Request) RewindStream() error { + // If there is no stream there is nothing to rewind. + if r.stream == nil { + return nil + } + + if !r.isStreamSeekable { + return fmt.Errorf("request stream is not seekable") + } + _, err := r.stream.(io.Seeker).Seek(r.streamStartPos, io.SeekStart) + return err +} + +// GetStream returns the request stream io.Reader if a stream is set. If no +// stream is present nil will be returned. +func (r *Request) GetStream() io.Reader { + return r.stream +} + +// IsStreamSeekable returns whether the stream is seekable. +func (r *Request) IsStreamSeekable() bool { + return r.isStreamSeekable +} + +// SetStream returns a clone of the request with the stream set to the provided +// reader. May return an error if the provided reader is seekable but returns +// an error. +func (r *Request) SetStream(reader io.Reader) (rc *Request, err error) { + rc = r.Clone() + + if reader == http.NoBody { + reader = nil + } + + var isStreamSeekable bool + var streamStartPos int64 + switch v := reader.(type) { + case io.Seeker: + n, err := v.Seek(0, io.SeekCurrent) + if err != nil { + return r, err + } + isStreamSeekable = true + streamStartPos = n + default: + // If the stream length can be determined, and is determined to be empty, + // use a nil stream to prevent confusion between empty vs not-empty + // streams. + length, ok, err := streamLength(reader, false, 0) + if err != nil { + return nil, err + } else if ok && length == 0 { + reader = nil + } + } + + rc.stream = reader + rc.isStreamSeekable = isStreamSeekable + rc.streamStartPos = streamStartPos + + return rc, err +} + +// Build returns a build standard HTTP request value from the Smithy request. +// The request's stream is wrapped in a safe container that allows it to be +// reused for subsequent attempts. +func (r *Request) Build(ctx context.Context) *http.Request { + req := r.Request.Clone(ctx) + + if r.stream == nil && req.ContentLength == -1 { + req.ContentLength = 0 + } + + switch stream := r.stream.(type) { + case *io.PipeReader: + req.Body = ioutil.NopCloser(stream) + req.ContentLength = -1 + default: + // HTTP Client Request must only have a non-nil body if the + // ContentLength is explicitly unknown (-1) or non-zero. The HTTP + // Client will interpret a non-nil body and ContentLength 0 as + // "unknown". This is unwanted behavior. + if req.ContentLength != 0 && r.stream != nil { + req.Body = iointernal.NewSafeReadCloser(ioutil.NopCloser(stream)) + } + } + + return req +} + +// RequestCloner is a function that can take an input request type and clone the request +// for use in a subsequent retry attempt. +func RequestCloner(v interface{}) interface{} { + return v.(*Request).Clone() +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/response.go b/vendor/github.com/aws/smithy-go/transport/http/response.go new file mode 100644 index 00000000000..0c13bfcc8e2 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/response.go @@ -0,0 +1,34 @@ +package http + +import ( + "fmt" + "net/http" +) + +// Response provides the HTTP specific response structure for HTTP specific +// middleware steps to use to deserialize the response from an operation call. +type Response struct { + *http.Response +} + +// ResponseError provides the HTTP centric error type wrapping the underlying +// error with the HTTP response value. +type ResponseError struct { + Response *Response + Err error +} + +// HTTPStatusCode returns the HTTP response status code received from the service. +func (e *ResponseError) HTTPStatusCode() int { return e.Response.StatusCode } + +// HTTPResponse returns the HTTP response received from the service. +func (e *ResponseError) HTTPResponse() *Response { return e.Response } + +// Unwrap returns the nested error if any, or nil. +func (e *ResponseError) Unwrap() error { return e.Err } + +func (e *ResponseError) Error() string { + return fmt.Sprintf( + "http response error StatusCode: %d, %v", + e.Response.StatusCode, e.Err) +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/time.go b/vendor/github.com/aws/smithy-go/transport/http/time.go new file mode 100644 index 00000000000..607b196a8bd --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/time.go @@ -0,0 +1,13 @@ +package http + +import ( + "time" + + smithytime "github.com/aws/smithy-go/time" +) + +// ParseTime parses a time string like the HTTP Date header. This uses a more +// relaxed rule set for date parsing compared to the standard library. +func ParseTime(text string) (t time.Time, err error) { + return smithytime.ParseHTTPDate(text) +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/url.go b/vendor/github.com/aws/smithy-go/transport/http/url.go new file mode 100644 index 00000000000..60a5fc1002a --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/url.go @@ -0,0 +1,44 @@ +package http + +import "strings" + +// JoinPath returns an absolute URL path composed of the two paths provided. +// Enforces that the returned path begins with '/'. If added path is empty the +// returned path suffix will match the first parameter suffix. +func JoinPath(a, b string) string { + if len(a) == 0 { + a = "/" + } else if a[0] != '/' { + a = "/" + a + } + + if len(b) != 0 && b[0] == '/' { + b = b[1:] + } + + if len(b) != 0 && len(a) > 1 && a[len(a)-1] != '/' { + a = a + "/" + } + + return a + b +} + +// JoinRawQuery returns an absolute raw query expression. Any duplicate '&' +// will be collapsed to single separator between values. +func JoinRawQuery(a, b string) string { + a = strings.TrimFunc(a, isAmpersand) + b = strings.TrimFunc(b, isAmpersand) + + if len(a) == 0 { + return b + } + if len(b) == 0 { + return a + } + + return a + "&" + b +} + +func isAmpersand(v rune) bool { + return v == '&' +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/user_agent.go b/vendor/github.com/aws/smithy-go/transport/http/user_agent.go new file mode 100644 index 00000000000..71a7e0d8af5 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/user_agent.go @@ -0,0 +1,37 @@ +package http + +import ( + "strings" +) + +// UserAgentBuilder is a builder for a HTTP User-Agent string. +type UserAgentBuilder struct { + sb strings.Builder +} + +// NewUserAgentBuilder returns a new UserAgentBuilder. +func NewUserAgentBuilder() *UserAgentBuilder { + return &UserAgentBuilder{sb: strings.Builder{}} +} + +// AddKey adds the named component/product to the agent string +func (u *UserAgentBuilder) AddKey(key string) { + u.appendTo(key) +} + +// AddKeyValue adds the named key to the agent string with the given value. +func (u *UserAgentBuilder) AddKeyValue(key, value string) { + u.appendTo(key + "/" + value) +} + +// Build returns the constructed User-Agent string. May be called multiple times. +func (u *UserAgentBuilder) Build() string { + return u.sb.String() +} + +func (u *UserAgentBuilder) appendTo(value string) { + if u.sb.Len() > 0 { + u.sb.WriteRune(' ') + } + u.sb.WriteString(value) +} diff --git a/vendor/github.com/aws/smithy-go/validation.go b/vendor/github.com/aws/smithy-go/validation.go new file mode 100644 index 00000000000..b5eedc1f90a --- /dev/null +++ b/vendor/github.com/aws/smithy-go/validation.go @@ -0,0 +1,140 @@ +package smithy + +import ( + "bytes" + "fmt" + "strings" +) + +// An InvalidParamsError provides wrapping of invalid parameter errors found when +// validating API operation input parameters. +type InvalidParamsError struct { + // Context is the base context of the invalid parameter group. + Context string + errs []InvalidParamError +} + +// Add adds a new invalid parameter error to the collection of invalid +// parameters. The context of the invalid parameter will be updated to reflect +// this collection. +func (e *InvalidParamsError) Add(err InvalidParamError) { + err.SetContext(e.Context) + e.errs = append(e.errs, err) +} + +// AddNested adds the invalid parameter errors from another InvalidParamsError +// value into this collection. The nested errors will have their nested context +// updated and base context to reflect the merging. +// +// Use for nested validations errors. +func (e *InvalidParamsError) AddNested(nestedCtx string, nested InvalidParamsError) { + for _, err := range nested.errs { + err.SetContext(e.Context) + err.AddNestedContext(nestedCtx) + e.errs = append(e.errs, err) + } +} + +// Len returns the number of invalid parameter errors +func (e *InvalidParamsError) Len() int { + return len(e.errs) +} + +// Error returns the string formatted form of the invalid parameters. +func (e InvalidParamsError) Error() string { + w := &bytes.Buffer{} + fmt.Fprintf(w, "%d validation error(s) found.\n", len(e.errs)) + + for _, err := range e.errs { + fmt.Fprintf(w, "- %s\n", err.Error()) + } + + return w.String() +} + +// Errs returns a slice of the invalid parameters +func (e InvalidParamsError) Errs() []error { + errs := make([]error, len(e.errs)) + for i := 0; i < len(errs); i++ { + errs[i] = e.errs[i] + } + + return errs +} + +// An InvalidParamError represents an invalid parameter error type. +type InvalidParamError interface { + error + + // Field name the error occurred on. + Field() string + + // SetContext updates the context of the error. + SetContext(string) + + // AddNestedContext updates the error's context to include a nested level. + AddNestedContext(string) +} + +type invalidParamError struct { + context string + nestedContext string + field string + reason string +} + +// Error returns the string version of the invalid parameter error. +func (e invalidParamError) Error() string { + return fmt.Sprintf("%s, %s.", e.reason, e.Field()) +} + +// Field Returns the field and context the error occurred. +func (e invalidParamError) Field() string { + sb := &strings.Builder{} + sb.WriteString(e.context) + if sb.Len() > 0 { + if len(e.nestedContext) == 0 || (len(e.nestedContext) > 0 && e.nestedContext[:1] != "[") { + sb.WriteRune('.') + } + } + if len(e.nestedContext) > 0 { + sb.WriteString(e.nestedContext) + sb.WriteRune('.') + } + sb.WriteString(e.field) + return sb.String() +} + +// SetContext updates the base context of the error. +func (e *invalidParamError) SetContext(ctx string) { + e.context = ctx +} + +// AddNestedContext prepends a context to the field's path. +func (e *invalidParamError) AddNestedContext(ctx string) { + if len(e.nestedContext) == 0 { + e.nestedContext = ctx + return + } + // Check if our nested context is an index into a slice or map + if e.nestedContext[:1] != "[" { + e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext) + return + } + e.nestedContext = ctx + e.nestedContext +} + +// An ParamRequiredError represents an required parameter error. +type ParamRequiredError struct { + invalidParamError +} + +// NewErrParamRequired creates a new required parameter error. +func NewErrParamRequired(field string) *ParamRequiredError { + return &ParamRequiredError{ + invalidParamError{ + field: field, + reason: fmt.Sprintf("missing required field"), + }, + } +} diff --git a/vendor/github.com/cespare/xxhash/LICENSE.txt b/vendor/github.com/cespare/xxhash/LICENSE.txt deleted file mode 100644 index 24b53065f40..00000000000 --- a/vendor/github.com/cespare/xxhash/LICENSE.txt +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2016 Caleb Spare - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cespare/xxhash/README.md b/vendor/github.com/cespare/xxhash/README.md deleted file mode 100644 index 0982fd25e5c..00000000000 --- a/vendor/github.com/cespare/xxhash/README.md +++ /dev/null @@ -1,50 +0,0 @@ -# xxhash - -[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash) - -xxhash is a Go implementation of the 64-bit -[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a -high-quality hashing algorithm that is much faster than anything in the Go -standard library. - -The API is very small, taking its cue from the other hashing packages in the -standard library: - - $ go doc github.com/cespare/xxhash ! - package xxhash // import "github.com/cespare/xxhash" - - Package xxhash implements the 64-bit variant of xxHash (XXH64) as described - at http://cyan4973.github.io/xxHash/. - - func New() hash.Hash64 - func Sum64(b []byte) uint64 - func Sum64String(s string) uint64 - -This implementation provides a fast pure-Go implementation and an even faster -assembly implementation for amd64. - -## Benchmarks - -Here are some quick benchmarks comparing the pure-Go and assembly -implementations of Sum64 against another popular Go XXH64 implementation, -[github.com/OneOfOne/xxhash](https://github.com/OneOfOne/xxhash): - -| input size | OneOfOne | cespare (purego) | cespare | -| --- | --- | --- | --- | -| 5 B | 416 MB/s | 720 MB/s | 872 MB/s | -| 100 B | 3980 MB/s | 5013 MB/s | 5252 MB/s | -| 4 KB | 12727 MB/s | 12999 MB/s | 13026 MB/s | -| 10 MB | 9879 MB/s | 10775 MB/s | 10913 MB/s | - -These numbers were generated with: - -``` -$ go test -benchtime 10s -bench '/OneOfOne,' -$ go test -tags purego -benchtime 10s -bench '/xxhash,' -$ go test -benchtime 10s -bench '/xxhash,' -``` - -## Projects using this package - -- [InfluxDB](https://github.com/influxdata/influxdb) -- [Prometheus](https://github.com/prometheus/prometheus) diff --git a/vendor/github.com/cespare/xxhash/rotate.go b/vendor/github.com/cespare/xxhash/rotate.go deleted file mode 100644 index f3eac5ebc02..00000000000 --- a/vendor/github.com/cespare/xxhash/rotate.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build !go1.9 - -package xxhash - -// TODO(caleb): After Go 1.10 comes out, remove this fallback code. - -func rol1(x uint64) uint64 { return (x << 1) | (x >> (64 - 1)) } -func rol7(x uint64) uint64 { return (x << 7) | (x >> (64 - 7)) } -func rol11(x uint64) uint64 { return (x << 11) | (x >> (64 - 11)) } -func rol12(x uint64) uint64 { return (x << 12) | (x >> (64 - 12)) } -func rol18(x uint64) uint64 { return (x << 18) | (x >> (64 - 18)) } -func rol23(x uint64) uint64 { return (x << 23) | (x >> (64 - 23)) } -func rol27(x uint64) uint64 { return (x << 27) | (x >> (64 - 27)) } -func rol31(x uint64) uint64 { return (x << 31) | (x >> (64 - 31)) } diff --git a/vendor/github.com/cespare/xxhash/rotate19.go b/vendor/github.com/cespare/xxhash/rotate19.go deleted file mode 100644 index b99612bab88..00000000000 --- a/vendor/github.com/cespare/xxhash/rotate19.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build go1.9 - -package xxhash - -import "math/bits" - -func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } -func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } -func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } -func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } -func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } -func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } -func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } -func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/cespare/xxhash/xxhash.go b/vendor/github.com/cespare/xxhash/xxhash.go deleted file mode 100644 index f896bd28f05..00000000000 --- a/vendor/github.com/cespare/xxhash/xxhash.go +++ /dev/null @@ -1,168 +0,0 @@ -// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described -// at http://cyan4973.github.io/xxHash/. -package xxhash - -import ( - "encoding/binary" - "hash" -) - -const ( - prime1 uint64 = 11400714785074694791 - prime2 uint64 = 14029467366897019727 - prime3 uint64 = 1609587929392839161 - prime4 uint64 = 9650029242287828579 - prime5 uint64 = 2870177450012600261 -) - -// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where -// possible in the Go code is worth a small (but measurable) performance boost -// by avoiding some MOVQs. Vars are needed for the asm and also are useful for -// convenience in the Go code in a few places where we need to intentionally -// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the -// result overflows a uint64). -var ( - prime1v = prime1 - prime2v = prime2 - prime3v = prime3 - prime4v = prime4 - prime5v = prime5 -) - -type xxh struct { - v1 uint64 - v2 uint64 - v3 uint64 - v4 uint64 - total int - mem [32]byte - n int // how much of mem is used -} - -// New creates a new hash.Hash64 that implements the 64-bit xxHash algorithm. -func New() hash.Hash64 { - var x xxh - x.Reset() - return &x -} - -func (x *xxh) Reset() { - x.n = 0 - x.total = 0 - x.v1 = prime1v + prime2 - x.v2 = prime2 - x.v3 = 0 - x.v4 = -prime1v -} - -func (x *xxh) Size() int { return 8 } -func (x *xxh) BlockSize() int { return 32 } - -// Write adds more data to x. It always returns len(b), nil. -func (x *xxh) Write(b []byte) (n int, err error) { - n = len(b) - x.total += len(b) - - if x.n+len(b) < 32 { - // This new data doesn't even fill the current block. - copy(x.mem[x.n:], b) - x.n += len(b) - return - } - - if x.n > 0 { - // Finish off the partial block. - copy(x.mem[x.n:], b) - x.v1 = round(x.v1, u64(x.mem[0:8])) - x.v2 = round(x.v2, u64(x.mem[8:16])) - x.v3 = round(x.v3, u64(x.mem[16:24])) - x.v4 = round(x.v4, u64(x.mem[24:32])) - b = b[32-x.n:] - x.n = 0 - } - - if len(b) >= 32 { - // One or more full blocks left. - b = writeBlocks(x, b) - } - - // Store any remaining partial block. - copy(x.mem[:], b) - x.n = len(b) - - return -} - -func (x *xxh) Sum(b []byte) []byte { - s := x.Sum64() - return append( - b, - byte(s>>56), - byte(s>>48), - byte(s>>40), - byte(s>>32), - byte(s>>24), - byte(s>>16), - byte(s>>8), - byte(s), - ) -} - -func (x *xxh) Sum64() uint64 { - var h uint64 - - if x.total >= 32 { - v1, v2, v3, v4 := x.v1, x.v2, x.v3, x.v4 - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = x.v3 + prime5 - } - - h += uint64(x.total) - - i, end := 0, x.n - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(x.mem[i:i+8])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if i+4 <= end { - h ^= uint64(u32(x.mem[i:i+4])) * prime1 - h = rol23(h)*prime2 + prime3 - i += 4 - } - for i < end { - h ^= uint64(x.mem[i]) * prime5 - h = rol11(h) * prime1 - i++ - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } -func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } - -func round(acc, input uint64) uint64 { - acc += input * prime2 - acc = rol31(acc) - acc *= prime1 - return acc -} - -func mergeRound(acc, val uint64) uint64 { - val = round(0, val) - acc ^= val - acc = acc*prime1 + prime4 - return acc -} diff --git a/vendor/github.com/cespare/xxhash/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/xxhash_amd64.go deleted file mode 100644 index d6176526802..00000000000 --- a/vendor/github.com/cespare/xxhash/xxhash_amd64.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !appengine -// +build gc -// +build !purego - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -// -//go:noescape -func Sum64(b []byte) uint64 - -func writeBlocks(x *xxh, b []byte) []byte diff --git a/vendor/github.com/cespare/xxhash/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/xxhash_amd64.s deleted file mode 100644 index 757f2011f0f..00000000000 --- a/vendor/github.com/cespare/xxhash/xxhash_amd64.s +++ /dev/null @@ -1,233 +0,0 @@ -// +build !appengine -// +build gc -// +build !purego - -#include "textflag.h" - -// Register allocation: -// AX h -// CX pointer to advance through b -// DX n -// BX loop end -// R8 v1, k1 -// R9 v2 -// R10 v3 -// R11 v4 -// R12 tmp -// R13 prime1v -// R14 prime2v -// R15 prime4v - -// round reads from and advances the buffer pointer in CX. -// It assumes that R13 has prime1v and R14 has prime2v. -#define round(r) \ - MOVQ (CX), R12 \ - ADDQ $8, CX \ - IMULQ R14, R12 \ - ADDQ R12, r \ - ROLQ $31, r \ - IMULQ R13, r - -// mergeRound applies a merge round on the two registers acc and val. -// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v. -#define mergeRound(acc, val) \ - IMULQ R14, val \ - ROLQ $31, val \ - IMULQ R13, val \ - XORQ val, acc \ - IMULQ R13, acc \ - ADDQ R15, acc - -// func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT, $0-32 - // Load fixed primes. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - MOVQ ·prime4v(SB), R15 - - // Load slice. - MOVQ b_base+0(FP), CX - MOVQ b_len+8(FP), DX - LEAQ (CX)(DX*1), BX - - // The first loop limit will be len(b)-32. - SUBQ $32, BX - - // Check whether we have at least one block. - CMPQ DX, $32 - JLT noBlocks - - // Set up initial state (v1, v2, v3, v4). - MOVQ R13, R8 - ADDQ R14, R8 - MOVQ R14, R9 - XORQ R10, R10 - XORQ R11, R11 - SUBQ R13, R11 - - // Loop until CX > BX. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ CX, BX - JLE blockLoop - - MOVQ R8, AX - ROLQ $1, AX - MOVQ R9, R12 - ROLQ $7, R12 - ADDQ R12, AX - MOVQ R10, R12 - ROLQ $12, R12 - ADDQ R12, AX - MOVQ R11, R12 - ROLQ $18, R12 - ADDQ R12, AX - - mergeRound(AX, R8) - mergeRound(AX, R9) - mergeRound(AX, R10) - mergeRound(AX, R11) - - JMP afterBlocks - -noBlocks: - MOVQ ·prime5v(SB), AX - -afterBlocks: - ADDQ DX, AX - - // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8. - ADDQ $24, BX - - CMPQ CX, BX - JG fourByte - -wordLoop: - // Calculate k1. - MOVQ (CX), R8 - ADDQ $8, CX - IMULQ R14, R8 - ROLQ $31, R8 - IMULQ R13, R8 - - XORQ R8, AX - ROLQ $27, AX - IMULQ R13, AX - ADDQ R15, AX - - CMPQ CX, BX - JLE wordLoop - -fourByte: - ADDQ $4, BX - CMPQ CX, BX - JG singles - - MOVL (CX), R8 - ADDQ $4, CX - IMULQ R13, R8 - XORQ R8, AX - - ROLQ $23, AX - IMULQ R14, AX - ADDQ ·prime3v(SB), AX - -singles: - ADDQ $4, BX - CMPQ CX, BX - JGE finalize - -singlesLoop: - MOVBQZX (CX), R12 - ADDQ $1, CX - IMULQ ·prime5v(SB), R12 - XORQ R12, AX - - ROLQ $11, AX - IMULQ R13, AX - - CMPQ CX, BX - JL singlesLoop - -finalize: - MOVQ AX, R12 - SHRQ $33, R12 - XORQ R12, AX - IMULQ R14, AX - MOVQ AX, R12 - SHRQ $29, R12 - XORQ R12, AX - IMULQ ·prime3v(SB), AX - MOVQ AX, R12 - SHRQ $32, R12 - XORQ R12, AX - - MOVQ AX, ret+24(FP) - RET - -// writeBlocks uses the same registers as above except that it uses AX to store -// the x pointer. - -// func writeBlocks(x *xxh, b []byte) []byte -TEXT ·writeBlocks(SB), NOSPLIT, $0-56 - // Load fixed primes needed for round. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - - // Load slice. - MOVQ b_base+8(FP), CX - MOVQ CX, ret_base+32(FP) // initialize return base pointer; see NOTE below - MOVQ b_len+16(FP), DX - LEAQ (CX)(DX*1), BX - SUBQ $32, BX - - // Load vN from x. - MOVQ x+0(FP), AX - MOVQ 0(AX), R8 // v1 - MOVQ 8(AX), R9 // v2 - MOVQ 16(AX), R10 // v3 - MOVQ 24(AX), R11 // v4 - - // We don't need to check the loop condition here; this function is - // always called with at least one block of data to process. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ CX, BX - JLE blockLoop - - // Copy vN back to x. - MOVQ R8, 0(AX) - MOVQ R9, 8(AX) - MOVQ R10, 16(AX) - MOVQ R11, 24(AX) - - // Construct return slice. - // NOTE: It's important that we don't construct a slice that has a base - // pointer off the end of the original slice, as in Go 1.7+ this will - // cause runtime crashes. (See discussion in, for example, - // https://github.com/golang/go/issues/16772.) - // Therefore, we calculate the length/cap first, and if they're zero, we - // keep the old base. This is what the compiler does as well if you - // write code like - // b = b[len(b):] - - // New length is 32 - (CX - BX) -> BX+32 - CX. - ADDQ $32, BX - SUBQ CX, BX - JZ afterSetBase - - MOVQ CX, ret_base+32(FP) - -afterSetBase: - MOVQ BX, ret_len+40(FP) - MOVQ BX, ret_cap+48(FP) // set cap == len - - RET diff --git a/vendor/github.com/cespare/xxhash/xxhash_other.go b/vendor/github.com/cespare/xxhash/xxhash_other.go deleted file mode 100644 index c68d13f89e9..00000000000 --- a/vendor/github.com/cespare/xxhash/xxhash_other.go +++ /dev/null @@ -1,75 +0,0 @@ -// +build !amd64 appengine !gc purego - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -func Sum64(b []byte) uint64 { - // A simpler version would be - // x := New() - // x.Write(b) - // return x.Sum64() - // but this is faster, particularly for small inputs. - - n := len(b) - var h uint64 - - if n >= 32 { - v1 := prime1v + prime2 - v2 := prime2 - v3 := uint64(0) - v4 := -prime1v - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = prime5 - } - - h += uint64(n) - - i, end := 0, len(b) - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(b[i:i+8:len(b)])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if i+4 <= end { - h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 - h = rol23(h)*prime2 + prime3 - i += 4 - } - for ; i < end; i++ { - h ^= uint64(b[i]) * prime5 - h = rol11(h) * prime1 - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -func writeBlocks(x *xxh, b []byte) []byte { - v1, v2, v3, v4 := x.v1, x.v2, x.v3, x.v4 - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - x.v1, x.v2, x.v3, x.v4 = v1, v2, v3, v4 - return b -} diff --git a/vendor/github.com/cespare/xxhash/xxhash_safe.go b/vendor/github.com/cespare/xxhash/xxhash_safe.go deleted file mode 100644 index dfa15ab7e27..00000000000 --- a/vendor/github.com/cespare/xxhash/xxhash_safe.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build appengine - -// This file contains the safe implementations of otherwise unsafe-using code. - -package xxhash - -// Sum64String computes the 64-bit xxHash digest of s. -func Sum64String(s string) uint64 { - return Sum64([]byte(s)) -} diff --git a/vendor/github.com/cespare/xxhash/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/xxhash_unsafe.go deleted file mode 100644 index d2b64e8bb00..00000000000 --- a/vendor/github.com/cespare/xxhash/xxhash_unsafe.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build !appengine - -// This file encapsulates usage of unsafe. -// xxhash_safe.go contains the safe implementations. - -package xxhash - -import ( - "reflect" - "unsafe" -) - -// Sum64String computes the 64-bit xxHash digest of s. -// It may be faster than Sum64([]byte(s)) by avoiding a copy. -// -// TODO(caleb): Consider removing this if an optimization is ever added to make -// it unnecessary: https://golang.org/issue/2205. -// -// TODO(caleb): We still have a function call; we could instead write Go/asm -// copies of Sum64 for strings to squeeze out a bit more speed. -func Sum64String(s string) uint64 { - // See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ - // for some discussion about this unsafe conversion. - var b []byte - bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data - bh.Len = len(s) - bh.Cap = len(s) - return Sum64(b) -} diff --git a/vendor/github.com/ebitengine/purego/.gitignore b/vendor/github.com/ebitengine/purego/.gitignore new file mode 100644 index 00000000000..b25c15b81fa --- /dev/null +++ b/vendor/github.com/ebitengine/purego/.gitignore @@ -0,0 +1 @@ +*~ diff --git a/vendor/github.com/ebitengine/purego/LICENSE b/vendor/github.com/ebitengine/purego/LICENSE new file mode 100644 index 00000000000..8dada3edaf5 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/ebitengine/purego/README.md b/vendor/github.com/ebitengine/purego/README.md new file mode 100644 index 00000000000..f1ff9053ace --- /dev/null +++ b/vendor/github.com/ebitengine/purego/README.md @@ -0,0 +1,97 @@ +# purego +[![Go Reference](https://pkg.go.dev/badge/github.com/ebitengine/purego?GOOS=darwin.svg)](https://pkg.go.dev/github.com/ebitengine/purego?GOOS=darwin) + +A library for calling C functions from Go without Cgo. + +> This is beta software so expect bugs and potentially API breaking changes +> but each release will be tagged to avoid breaking people's code. +> Bug reports are encouraged. + +## Motivation + +The [Ebitengine](https://github.com/hajimehoshi/ebiten) game engine was ported to use only Go on Windows. This enabled +cross-compiling to Windows from any other operating system simply by setting `GOOS=windows`. The purego project was +born to bring that same vision to the other platforms supported by Ebitengine. + +## Benefits + +- **Simple Cross-Compilation**: No C means you can build for other platforms easily without a C compiler. +- **Faster Compilation**: Efficiently cache your entirely Go builds. +- **Smaller Binaries**: Using Cgo generates a C wrapper function for each C function called. Purego doesn't! +- **Dynamic Linking**: Load symbols at runtime and use it as a plugin system. +- **Foreign Function Interface**: Call into other languages that are compiled into shared objects. +- **Cgo Fallback**: Works even with CGO_ENABLED=1 so incremental porting is possible. +This also means unsupported GOARCHs (freebsd/riscv64, linux/mips, etc.) will still work +except for float arguments and return values. + +## Supported Platforms + +- **FreeBSD**: amd64, arm64 +- **Linux**: amd64, arm64 +- **macOS / iOS**: amd64, arm64 +- **Windows**: 386*, amd64, arm*, arm64 + +`*` These architectures only support SyscallN and NewCallback + +## Example + +The example below only showcases purego use for macOS and Linux. The other platforms require special handling which can +be seen in the complete example at [examples/libc](https://github.com/ebitengine/purego/tree/main/examples/libc) which supports Windows and FreeBSD. + +```go +package main + +import ( + "fmt" + "runtime" + + "github.com/ebitengine/purego" +) + +func getSystemLibrary() string { + switch runtime.GOOS { + case "darwin": + return "/usr/lib/libSystem.B.dylib" + case "linux": + return "libc.so.6" + default: + panic(fmt.Errorf("GOOS=%s is not supported", runtime.GOOS)) + } +} + +func main() { + libc, err := purego.Dlopen(getSystemLibrary(), purego.RTLD_NOW|purego.RTLD_GLOBAL) + if err != nil { + panic(err) + } + var puts func(string) + purego.RegisterLibFunc(&puts, libc, "puts") + puts("Calling C from Go without Cgo!") +} +``` + +Then to run: `CGO_ENABLED=0 go run main.go` + +## Questions + +If you have questions about how to incorporate purego in your project or want to discuss +how it works join the [Discord](https://discord.gg/HzGZVD6BkY)! + +### External Code + +Purego uses code that originates from the Go runtime. These files are under the BSD-3 +License that can be found [in the Go Source](https://github.com/golang/go/blob/master/LICENSE). +This is a list of the copied files: + +* `abi_*.h` from package `runtime/cgo` +* `zcallback_darwin_*.s` from package `runtime` +* `internal/fakecgo/abi_*.h` from package `runtime/cgo` +* `internal/fakecgo/asm_GOARCH.s` from package `runtime/cgo` +* `internal/fakecgo/callbacks.go` from package `runtime/cgo` +* `internal/fakecgo/go_GOOS_GOARCH.go` from package `runtime/cgo` +* `internal/fakecgo/iscgo.go` from package `runtime/cgo` +* `internal/fakecgo/setenv.go` from package `runtime/cgo` +* `internal/fakecgo/freebsd.go` from package `runtime/cgo` + +The files `abi_*.h` and `internal/fakecgo/abi_*.h` are the same because Bazel does not support cross-package use of +`#include` so we need each one once per package. (cf. [issue](https://github.com/bazelbuild/rules_go/issues/3636)) diff --git a/vendor/github.com/ebitengine/purego/abi_amd64.h b/vendor/github.com/ebitengine/purego/abi_amd64.h new file mode 100644 index 00000000000..9949435fe9e --- /dev/null +++ b/vendor/github.com/ebitengine/purego/abi_amd64.h @@ -0,0 +1,99 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Macros for transitioning from the host ABI to Go ABI0. +// +// These save the frame pointer, so in general, functions that use +// these should have zero frame size to suppress the automatic frame +// pointer, though it's harmless to not do this. + +#ifdef GOOS_windows + +// REGS_HOST_TO_ABI0_STACK is the stack bytes used by +// PUSH_REGS_HOST_TO_ABI0. +#define REGS_HOST_TO_ABI0_STACK (28*8 + 8) + +// PUSH_REGS_HOST_TO_ABI0 prepares for transitioning from +// the host ABI to Go ABI0 code. It saves all registers that are +// callee-save in the host ABI and caller-save in Go ABI0 and prepares +// for entry to Go. +// +// Save DI SI BP BX R12 R13 R14 R15 X6-X15 registers and the DF flag. +// Clear the DF flag for the Go ABI. +// MXCSR matches the Go ABI, so we don't have to set that, +// and Go doesn't modify it, so we don't have to save it. +#define PUSH_REGS_HOST_TO_ABI0() \ + PUSHFQ \ + CLD \ + ADJSP $(REGS_HOST_TO_ABI0_STACK - 8) \ + MOVQ DI, (0*0)(SP) \ + MOVQ SI, (1*8)(SP) \ + MOVQ BP, (2*8)(SP) \ + MOVQ BX, (3*8)(SP) \ + MOVQ R12, (4*8)(SP) \ + MOVQ R13, (5*8)(SP) \ + MOVQ R14, (6*8)(SP) \ + MOVQ R15, (7*8)(SP) \ + MOVUPS X6, (8*8)(SP) \ + MOVUPS X7, (10*8)(SP) \ + MOVUPS X8, (12*8)(SP) \ + MOVUPS X9, (14*8)(SP) \ + MOVUPS X10, (16*8)(SP) \ + MOVUPS X11, (18*8)(SP) \ + MOVUPS X12, (20*8)(SP) \ + MOVUPS X13, (22*8)(SP) \ + MOVUPS X14, (24*8)(SP) \ + MOVUPS X15, (26*8)(SP) + +#define POP_REGS_HOST_TO_ABI0() \ + MOVQ (0*0)(SP), DI \ + MOVQ (1*8)(SP), SI \ + MOVQ (2*8)(SP), BP \ + MOVQ (3*8)(SP), BX \ + MOVQ (4*8)(SP), R12 \ + MOVQ (5*8)(SP), R13 \ + MOVQ (6*8)(SP), R14 \ + MOVQ (7*8)(SP), R15 \ + MOVUPS (8*8)(SP), X6 \ + MOVUPS (10*8)(SP), X7 \ + MOVUPS (12*8)(SP), X8 \ + MOVUPS (14*8)(SP), X9 \ + MOVUPS (16*8)(SP), X10 \ + MOVUPS (18*8)(SP), X11 \ + MOVUPS (20*8)(SP), X12 \ + MOVUPS (22*8)(SP), X13 \ + MOVUPS (24*8)(SP), X14 \ + MOVUPS (26*8)(SP), X15 \ + ADJSP $-(REGS_HOST_TO_ABI0_STACK - 8) \ + POPFQ + +#else +// SysV ABI + +#define REGS_HOST_TO_ABI0_STACK (6*8) + +// SysV MXCSR matches the Go ABI, so we don't have to set that, +// and Go doesn't modify it, so we don't have to save it. +// Both SysV and Go require DF to be cleared, so that's already clear. +// The SysV and Go frame pointer conventions are compatible. +#define PUSH_REGS_HOST_TO_ABI0() \ + ADJSP $(REGS_HOST_TO_ABI0_STACK) \ + MOVQ BP, (5*8)(SP) \ + LEAQ (5*8)(SP), BP \ + MOVQ BX, (0*8)(SP) \ + MOVQ R12, (1*8)(SP) \ + MOVQ R13, (2*8)(SP) \ + MOVQ R14, (3*8)(SP) \ + MOVQ R15, (4*8)(SP) + +#define POP_REGS_HOST_TO_ABI0() \ + MOVQ (0*8)(SP), BX \ + MOVQ (1*8)(SP), R12 \ + MOVQ (2*8)(SP), R13 \ + MOVQ (3*8)(SP), R14 \ + MOVQ (4*8)(SP), R15 \ + MOVQ (5*8)(SP), BP \ + ADJSP $-(REGS_HOST_TO_ABI0_STACK) + +#endif diff --git a/vendor/github.com/ebitengine/purego/abi_arm64.h b/vendor/github.com/ebitengine/purego/abi_arm64.h new file mode 100644 index 00000000000..5d5061ec1db --- /dev/null +++ b/vendor/github.com/ebitengine/purego/abi_arm64.h @@ -0,0 +1,39 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Macros for transitioning from the host ABI to Go ABI0. +// +// These macros save and restore the callee-saved registers +// from the stack, but they don't adjust stack pointer, so +// the user should prepare stack space in advance. +// SAVE_R19_TO_R28(offset) saves R19 ~ R28 to the stack space +// of ((offset)+0*8)(RSP) ~ ((offset)+9*8)(RSP). +// +// SAVE_F8_TO_F15(offset) saves F8 ~ F15 to the stack space +// of ((offset)+0*8)(RSP) ~ ((offset)+7*8)(RSP). +// +// R29 is not saved because Go will save and restore it. + +#define SAVE_R19_TO_R28(offset) \ + STP (R19, R20), ((offset)+0*8)(RSP) \ + STP (R21, R22), ((offset)+2*8)(RSP) \ + STP (R23, R24), ((offset)+4*8)(RSP) \ + STP (R25, R26), ((offset)+6*8)(RSP) \ + STP (R27, g), ((offset)+8*8)(RSP) +#define RESTORE_R19_TO_R28(offset) \ + LDP ((offset)+0*8)(RSP), (R19, R20) \ + LDP ((offset)+2*8)(RSP), (R21, R22) \ + LDP ((offset)+4*8)(RSP), (R23, R24) \ + LDP ((offset)+6*8)(RSP), (R25, R26) \ + LDP ((offset)+8*8)(RSP), (R27, g) /* R28 */ +#define SAVE_F8_TO_F15(offset) \ + FSTPD (F8, F9), ((offset)+0*8)(RSP) \ + FSTPD (F10, F11), ((offset)+2*8)(RSP) \ + FSTPD (F12, F13), ((offset)+4*8)(RSP) \ + FSTPD (F14, F15), ((offset)+6*8)(RSP) +#define RESTORE_F8_TO_F15(offset) \ + FLDPD ((offset)+0*8)(RSP), (F8, F9) \ + FLDPD ((offset)+2*8)(RSP), (F10, F11) \ + FLDPD ((offset)+4*8)(RSP), (F12, F13) \ + FLDPD ((offset)+6*8)(RSP), (F14, F15) diff --git a/vendor/github.com/ebitengine/purego/cgo.go b/vendor/github.com/ebitengine/purego/cgo.go new file mode 100644 index 00000000000..7d5abef3499 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/cgo.go @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build cgo && (darwin || freebsd || linux) + +package purego + +// if CGO_ENABLED=1 import the Cgo runtime to ensure that it is set up properly. +// This is required since some frameworks need TLS setup the C way which Go doesn't do. +// We currently don't support ios in fakecgo mode so force Cgo or fail +// Even if CGO_ENABLED=1 the Cgo runtime is not imported unless `import "C"` is used. +// which will import this package automatically. Normally this isn't an issue since it +// usually isn't possible to call into C without using that import. However, with purego +// it is since we don't use `import "C"`! +import ( + _ "runtime/cgo" + + _ "github.com/ebitengine/purego/internal/cgo" +) diff --git a/vendor/github.com/ebitengine/purego/dlerror.go b/vendor/github.com/ebitengine/purego/dlerror.go new file mode 100644 index 00000000000..95cdfe16f24 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/dlerror.go @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 The Ebitengine Authors + +//go:build darwin || freebsd || linux + +package purego + +// Dlerror represents an error value returned from Dlopen, Dlsym, or Dlclose. +// +// This type is not available on Windows as there is no counterpart to it on Windows. +type Dlerror struct { + s string +} + +func (e Dlerror) Error() string { + return e.s +} diff --git a/vendor/github.com/ebitengine/purego/dlfcn.go b/vendor/github.com/ebitengine/purego/dlfcn.go new file mode 100644 index 00000000000..f70a24584d6 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/dlfcn.go @@ -0,0 +1,99 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build (darwin || freebsd || linux) && !android && !faketime + +package purego + +import ( + "unsafe" +) + +// Unix Specification for dlfcn.h: https://pubs.opengroup.org/onlinepubs/7908799/xsh/dlfcn.h.html + +var ( + fnDlopen func(path string, mode int) uintptr + fnDlsym func(handle uintptr, name string) uintptr + fnDlerror func() string + fnDlclose func(handle uintptr) bool +) + +func init() { + RegisterFunc(&fnDlopen, dlopenABI0) + RegisterFunc(&fnDlsym, dlsymABI0) + RegisterFunc(&fnDlerror, dlerrorABI0) + RegisterFunc(&fnDlclose, dlcloseABI0) +} + +// Dlopen examines the dynamic library or bundle file specified by path. If the file is compatible +// with the current process and has not already been loaded into the +// current process, it is loaded and linked. After being linked, if it contains +// any initializer functions, they are called, before Dlopen +// returns. It returns a handle that can be used with Dlsym and Dlclose. +// A second call to Dlopen with the same path will return the same handle, but the internal +// reference count for the handle will be incremented. Therefore, all +// Dlopen calls should be balanced with a Dlclose call. +// +// This function is not available on Windows. +// Use [golang.org/x/sys/windows.LoadLibrary], [golang.org/x/sys/windows.LoadLibraryEx], +// [golang.org/x/sys/windows.NewLazyDLL], or [golang.org/x/sys/windows.NewLazySystemDLL] for Windows instead. +func Dlopen(path string, mode int) (uintptr, error) { + u := fnDlopen(path, mode) + if u == 0 { + return 0, Dlerror{fnDlerror()} + } + return u, nil +} + +// Dlsym takes a "handle" of a dynamic library returned by Dlopen and the symbol name. +// It returns the address where that symbol is loaded into memory. If the symbol is not found, +// in the specified library or any of the libraries that were automatically loaded by Dlopen +// when that library was loaded, Dlsym returns zero. +// +// This function is not available on Windows. +// Use [golang.org/x/sys/windows.GetProcAddress] for Windows instead. +func Dlsym(handle uintptr, name string) (uintptr, error) { + u := fnDlsym(handle, name) + if u == 0 { + return 0, Dlerror{fnDlerror()} + } + return u, nil +} + +// Dlclose decrements the reference count on the dynamic library handle. +// If the reference count drops to zero and no other loaded libraries +// use symbols in it, then the dynamic library is unloaded. +// +// This function is not available on Windows. +// Use [golang.org/x/sys/windows.FreeLibrary] for Windows instead. +func Dlclose(handle uintptr) error { + if fnDlclose(handle) { + return Dlerror{fnDlerror()} + } + return nil +} + +func loadSymbol(handle uintptr, name string) (uintptr, error) { + return Dlsym(handle, name) +} + +// these functions exist in dlfcn_stubs.s and are calling C functions linked to in dlfcn_GOOS.go +// the indirection is necessary because a function is actually a pointer to the pointer to the code. +// sadly, I do not know of anyway to remove the assembly stubs entirely because //go:linkname doesn't +// appear to work if you link directly to the C function on darwin arm64. + +//go:linkname dlopen dlopen +var dlopen uintptr +var dlopenABI0 = uintptr(unsafe.Pointer(&dlopen)) + +//go:linkname dlsym dlsym +var dlsym uintptr +var dlsymABI0 = uintptr(unsafe.Pointer(&dlsym)) + +//go:linkname dlclose dlclose +var dlclose uintptr +var dlcloseABI0 = uintptr(unsafe.Pointer(&dlclose)) + +//go:linkname dlerror dlerror +var dlerror uintptr +var dlerrorABI0 = uintptr(unsafe.Pointer(&dlerror)) diff --git a/vendor/github.com/ebitengine/purego/dlfcn_android.go b/vendor/github.com/ebitengine/purego/dlfcn_android.go new file mode 100644 index 00000000000..0d5341764ed --- /dev/null +++ b/vendor/github.com/ebitengine/purego/dlfcn_android.go @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2024 The Ebitengine Authors + +package purego + +import "github.com/ebitengine/purego/internal/cgo" + +// Source for constants: https://android.googlesource.com/platform/bionic/+/refs/heads/main/libc/include/dlfcn.h + +const ( + is64bit = 1 << (^uintptr(0) >> 63) / 2 + is32bit = 1 - is64bit + RTLD_DEFAULT = is32bit * 0xffffffff + RTLD_LAZY = 0x00000001 + RTLD_NOW = is64bit * 0x00000002 + RTLD_LOCAL = 0x00000000 + RTLD_GLOBAL = is64bit*0x00100 | is32bit*0x00000002 +) + +func Dlopen(path string, mode int) (uintptr, error) { + return cgo.Dlopen(path, mode) +} + +func Dlsym(handle uintptr, name string) (uintptr, error) { + return cgo.Dlsym(handle, name) +} + +func Dlclose(handle uintptr) error { + return cgo.Dlclose(handle) +} + +func loadSymbol(handle uintptr, name string) (uintptr, error) { + return Dlsym(handle, name) +} diff --git a/vendor/github.com/ebitengine/purego/dlfcn_darwin.go b/vendor/github.com/ebitengine/purego/dlfcn_darwin.go new file mode 100644 index 00000000000..5f876278a3e --- /dev/null +++ b/vendor/github.com/ebitengine/purego/dlfcn_darwin.go @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +package purego + +// Source for constants: https://opensource.apple.com/source/dyld/dyld-360.14/include/dlfcn.h.auto.html + +const ( + RTLD_DEFAULT = 1<<64 - 2 // Pseudo-handle for dlsym so search for any loaded symbol + RTLD_LAZY = 0x1 // Relocations are performed at an implementation-dependent time. + RTLD_NOW = 0x2 // Relocations are performed when the object is loaded. + RTLD_LOCAL = 0x4 // All symbols are not made available for relocation processing by other modules. + RTLD_GLOBAL = 0x8 // All symbols are available for relocation processing of other modules. +) + +//go:cgo_import_dynamic purego_dlopen dlopen "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_dlsym dlsym "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_dlerror dlerror "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_dlclose dlclose "/usr/lib/libSystem.B.dylib" + +//go:cgo_import_dynamic purego_dlopen dlopen "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_dlsym dlsym "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_dlerror dlerror "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_dlclose dlclose "/usr/lib/libSystem.B.dylib" diff --git a/vendor/github.com/ebitengine/purego/dlfcn_freebsd.go b/vendor/github.com/ebitengine/purego/dlfcn_freebsd.go new file mode 100644 index 00000000000..6b371620d96 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/dlfcn_freebsd.go @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +package purego + +// Constants as defined in https://github.com/freebsd/freebsd-src/blob/main/include/dlfcn.h +const ( + intSize = 32 << (^uint(0) >> 63) // 32 or 64 + RTLD_DEFAULT = 1< C) +// +// string <=> char* +// bool <=> _Bool +// uintptr <=> uintptr_t +// uint <=> uint32_t or uint64_t +// uint8 <=> uint8_t +// uint16 <=> uint16_t +// uint32 <=> uint32_t +// uint64 <=> uint64_t +// int <=> int32_t or int64_t +// int8 <=> int8_t +// int16 <=> int16_t +// int32 <=> int32_t +// int64 <=> int64_t +// float32 <=> float +// float64 <=> double +// struct <=> struct (WIP - darwin only) +// func <=> C function +// unsafe.Pointer, *T <=> void* +// []T => void* +// +// There is a special case when the last argument of fptr is a variadic interface (or []interface} +// it will be expanded into a call to the C function as if it had the arguments in that slice. +// This means that using arg ...interface{} is like a cast to the function with the arguments inside arg. +// This is not the same as C variadic. +// +// # Memory +// +// In general it is not possible for purego to guarantee the lifetimes of objects returned or received from +// calling functions using RegisterFunc. For arguments to a C function it is important that the C function doesn't +// hold onto a reference to Go memory. This is the same as the [Cgo rules]. +// +// However, there are some special cases. When passing a string as an argument if the string does not end in a null +// terminated byte (\x00) then the string will be copied into memory maintained by purego. The memory is only valid for +// that specific call. Therefore, if the C code keeps a reference to that string it may become invalid at some +// undefined time. However, if the string does already contain a null-terminated byte then no copy is done. +// It is then the responsibility of the caller to ensure the string stays alive as long as it's needed in C memory. +// This can be done using runtime.KeepAlive or allocating the string in C memory using malloc. When a C function +// returns a null-terminated pointer to char a Go string can be used. Purego will allocate a new string in Go memory +// and copy the data over. This string will be garbage collected whenever Go decides it's no longer referenced. +// This C created string will not be freed by purego. If the pointer to char is not null-terminated or must continue +// to point to C memory (because it's a buffer for example) then use a pointer to byte and then convert that to a slice +// using unsafe.Slice. Doing this means that it becomes the responsibility of the caller to care about the lifetime +// of the pointer +// +// # Structs +// +// Purego can handle the most common structs that have fields of builtin types like int8, uint16, float32, etc. However, +// it does not support aligning fields properly. It is therefore the responsibility of the caller to ensure +// that all padding is added to the Go struct to match the C one. See `BoolStructFn` in struct_test.go for an example. +// +// # Example +// +// All functions below call this C function: +// +// char *foo(char *str); +// +// // Let purego convert types +// var foo func(s string) string +// goString := foo("copied") +// // Go will garbage collect this string +// +// // Manually, handle allocations +// var foo2 func(b string) *byte +// mustFree := foo2("not copied\x00") +// defer free(mustFree) +// +// [Cgo rules]: https://pkg.go.dev/cmd/cgo#hdr-Go_references_to_C +func RegisterFunc(fptr interface{}, cfn uintptr) { + fn := reflect.ValueOf(fptr).Elem() + ty := fn.Type() + if ty.Kind() != reflect.Func { + panic("purego: fptr must be a function pointer") + } + if ty.NumOut() > 1 { + panic("purego: function can only return zero or one values") + } + if cfn == 0 { + panic("purego: cfn is nil") + } + if ty.NumOut() == 1 && (ty.Out(0).Kind() == reflect.Float32 || ty.Out(0).Kind() == reflect.Float64) && + runtime.GOARCH != "arm64" && runtime.GOARCH != "amd64" { + panic("purego: float returns are not supported") + } + { + // this code checks how many registers and stack this function will use + // to avoid crashing with too many arguments + var ints int + var floats int + var stack int + for i := 0; i < ty.NumIn(); i++ { + arg := ty.In(i) + switch arg.Kind() { + case reflect.Func: + // This only does preliminary testing to ensure the CDecl argument + // is the first argument. Full testing is done when the callback is actually + // created in NewCallback. + for j := 0; j < arg.NumIn(); j++ { + in := arg.In(j) + if !in.AssignableTo(reflect.TypeOf(CDecl{})) { + continue + } + if j != 0 { + panic("purego: CDecl must be the first argument") + } + } + case reflect.String, reflect.Uintptr, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Ptr, reflect.UnsafePointer, + reflect.Slice, reflect.Bool: + if ints < numOfIntegerRegisters() { + ints++ + } else { + stack++ + } + case reflect.Float32, reflect.Float64: + const is32bit = unsafe.Sizeof(uintptr(0)) == 4 + if is32bit { + panic("purego: floats only supported on 64bit platforms") + } + if floats < numOfFloats { + floats++ + } else { + stack++ + } + case reflect.Struct: + if runtime.GOOS != "darwin" || (runtime.GOARCH != "amd64" && runtime.GOARCH != "arm64") { + panic("purego: struct arguments are only supported on darwin amd64 & arm64") + } + if arg.Size() == 0 { + continue + } + addInt := func(u uintptr) { + ints++ + } + addFloat := func(u uintptr) { + floats++ + } + addStack := func(u uintptr) { + stack++ + } + _ = addStruct(reflect.New(arg).Elem(), &ints, &floats, &stack, addInt, addFloat, addStack, nil) + default: + panic("purego: unsupported kind " + arg.Kind().String()) + } + } + if ty.NumOut() == 1 && ty.Out(0).Kind() == reflect.Struct { + if runtime.GOOS != "darwin" { + panic("purego: struct return values only supported on darwin arm64 & amd64") + } + outType := ty.Out(0) + checkStructFieldsSupported(outType) + if runtime.GOARCH == "amd64" && outType.Size() > maxRegAllocStructSize { + // on amd64 if struct is bigger than 16 bytes allocate the return struct + // and pass it in as a hidden first argument. + ints++ + } + } + sizeOfStack := maxArgs - numOfIntegerRegisters() + if stack > sizeOfStack { + panic("purego: too many arguments") + } + } + v := reflect.MakeFunc(ty, func(args []reflect.Value) (results []reflect.Value) { + if len(args) > 0 { + if variadic, ok := args[len(args)-1].Interface().([]interface{}); ok { + // subtract one from args bc the last argument in args is []interface{} + // which we are currently expanding + tmp := make([]reflect.Value, len(args)-1+len(variadic)) + n := copy(tmp, args[:len(args)-1]) + for i, v := range variadic { + tmp[n+i] = reflect.ValueOf(v) + } + args = tmp + } + } + var sysargs [maxArgs]uintptr + stack := sysargs[numOfIntegerRegisters():] + var floats [numOfFloats]uintptr + var numInts int + var numFloats int + var numStack int + var addStack, addInt, addFloat func(x uintptr) + if runtime.GOARCH == "arm64" || runtime.GOOS != "windows" { + // Windows arm64 uses the same calling convention as macOS and Linux + addStack = func(x uintptr) { + stack[numStack] = x + numStack++ + } + addInt = func(x uintptr) { + if numInts >= numOfIntegerRegisters() { + addStack(x) + } else { + sysargs[numInts] = x + numInts++ + } + } + addFloat = func(x uintptr) { + if numFloats < len(floats) { + floats[numFloats] = x + numFloats++ + } else { + addStack(x) + } + } + } else { + // On Windows amd64 the arguments are passed in the numbered registered. + // So the first int is in the first integer register and the first float + // is in the second floating register if there is already a first int. + // This is in contrast to how macOS and Linux pass arguments which + // tries to use as many registers as possible in the calling convention. + addStack = func(x uintptr) { + sysargs[numStack] = x + numStack++ + } + addInt = addStack + addFloat = addStack + } + + var keepAlive []interface{} + defer func() { + runtime.KeepAlive(keepAlive) + runtime.KeepAlive(args) + }() + var syscall syscall15Args + if ty.NumOut() == 1 && ty.Out(0).Kind() == reflect.Struct { + outType := ty.Out(0) + if runtime.GOARCH == "amd64" && outType.Size() > maxRegAllocStructSize { + val := reflect.New(outType) + keepAlive = append(keepAlive, val) + addInt(val.Pointer()) + } else if runtime.GOARCH == "arm64" && outType.Size() > maxRegAllocStructSize { + isAllFloats, numFields := isAllSameFloat(outType) + if !isAllFloats || numFields > 4 { + val := reflect.New(outType) + keepAlive = append(keepAlive, val) + syscall.arm64_r8 = val.Pointer() + } + } + } + for _, v := range args { + switch v.Kind() { + case reflect.String: + ptr := strings.CString(v.String()) + keepAlive = append(keepAlive, ptr) + addInt(uintptr(unsafe.Pointer(ptr))) + case reflect.Uintptr, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + addInt(uintptr(v.Uint())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + addInt(uintptr(v.Int())) + case reflect.Ptr, reflect.UnsafePointer, reflect.Slice: + // There is no need to keepAlive this pointer separately because it is kept alive in the args variable + addInt(v.Pointer()) + case reflect.Func: + addInt(NewCallback(v.Interface())) + case reflect.Bool: + if v.Bool() { + addInt(1) + } else { + addInt(0) + } + case reflect.Float32: + addFloat(uintptr(math.Float32bits(float32(v.Float())))) + case reflect.Float64: + addFloat(uintptr(math.Float64bits(v.Float()))) + case reflect.Struct: + keepAlive = addStruct(v, &numInts, &numFloats, &numStack, addInt, addFloat, addStack, keepAlive) + default: + panic("purego: unsupported kind: " + v.Kind().String()) + } + } + if runtime.GOARCH == "arm64" || runtime.GOOS != "windows" { + // Use the normal arm64 calling convention even on Windows + syscall = syscall15Args{ + cfn, + sysargs[0], sysargs[1], sysargs[2], sysargs[3], sysargs[4], sysargs[5], + sysargs[6], sysargs[7], sysargs[8], sysargs[9], sysargs[10], sysargs[11], + sysargs[12], sysargs[13], sysargs[14], + floats[0], floats[1], floats[2], floats[3], floats[4], floats[5], floats[6], floats[7], + syscall.arm64_r8, + } + runtime_cgocall(syscall15XABI0, unsafe.Pointer(&syscall)) + } else { + // This is a fallback for Windows amd64, 386, and arm. Note this may not support floats + syscall.a1, syscall.a2, _ = syscall_syscall15X(cfn, sysargs[0], sysargs[1], sysargs[2], sysargs[3], sysargs[4], + sysargs[5], sysargs[6], sysargs[7], sysargs[8], sysargs[9], sysargs[10], sysargs[11], + sysargs[12], sysargs[13], sysargs[14]) + syscall.f1 = syscall.a2 // on amd64 a2 stores the float return. On 32bit platforms floats aren't support + } + if ty.NumOut() == 0 { + return nil + } + outType := ty.Out(0) + v := reflect.New(outType).Elem() + switch outType.Kind() { + case reflect.Uintptr, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + v.SetUint(uint64(syscall.a1)) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + v.SetInt(int64(syscall.a1)) + case reflect.Bool: + v.SetBool(byte(syscall.a1) != 0) + case reflect.UnsafePointer: + // We take the address and then dereference it to trick go vet from creating a possible miss-use of unsafe.Pointer + v.SetPointer(*(*unsafe.Pointer)(unsafe.Pointer(&syscall.a1))) + case reflect.Ptr: + v = reflect.NewAt(outType, unsafe.Pointer(&syscall.a1)).Elem() + case reflect.Func: + // wrap this C function in a nicely typed Go function + v = reflect.New(outType) + RegisterFunc(v.Interface(), syscall.a1) + case reflect.String: + v.SetString(strings.GoString(syscall.a1)) + case reflect.Float32: + // NOTE: syscall.r2 is only the floating return value on 64bit platforms. + // On 32bit platforms syscall.r2 is the upper part of a 64bit return. + v.SetFloat(float64(math.Float32frombits(uint32(syscall.f1)))) + case reflect.Float64: + // NOTE: syscall.r2 is only the floating return value on 64bit platforms. + // On 32bit platforms syscall.r2 is the upper part of a 64bit return. + v.SetFloat(math.Float64frombits(uint64(syscall.f1))) + case reflect.Struct: + v = getStruct(outType, syscall) + default: + panic("purego: unsupported return kind: " + outType.Kind().String()) + } + return []reflect.Value{v} + }) + fn.Set(v) +} + +// maxRegAllocStructSize is the biggest a struct can be while still fitting in registers. +// if it is bigger than this than enough space must be allocated on the heap and then passed into +// the function as the first parameter on amd64 or in R8 on arm64. +// +// If you change this make sure to update it in objc_runtime_darwin.go +const maxRegAllocStructSize = 16 + +func isAllSameFloat(ty reflect.Type) (allFloats bool, numFields int) { + allFloats = true + root := ty.Field(0).Type + for root.Kind() == reflect.Struct { + root = root.Field(0).Type + } + first := root.Kind() + if first != reflect.Float32 && first != reflect.Float64 { + allFloats = false + } + for i := 0; i < ty.NumField(); i++ { + f := ty.Field(i).Type + if f.Kind() == reflect.Struct { + var structNumFields int + allFloats, structNumFields = isAllSameFloat(f) + numFields += structNumFields + continue + } + numFields++ + if f.Kind() != first { + allFloats = false + } + } + return allFloats, numFields +} + +func checkStructFieldsSupported(ty reflect.Type) { + for i := 0; i < ty.NumField(); i++ { + f := ty.Field(i).Type + if f.Kind() == reflect.Array { + f = f.Elem() + } else if f.Kind() == reflect.Struct { + checkStructFieldsSupported(f) + continue + } + switch f.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Uintptr, reflect.Ptr, reflect.UnsafePointer, reflect.Float64, reflect.Float32: + default: + panic(fmt.Sprintf("purego: struct field type %s is not supported", f)) + } + } +} + +func roundUpTo8(val uintptr) uintptr { + return (val + 7) &^ 7 +} + +func numOfIntegerRegisters() int { + switch runtime.GOARCH { + case "arm64": + return 8 + case "amd64": + return 6 + default: + // since this platform isn't supported and can therefore only access + // integer registers it is fine to return the maxArgs + return maxArgs + } +} diff --git a/vendor/github.com/ebitengine/purego/go_runtime.go b/vendor/github.com/ebitengine/purego/go_runtime.go new file mode 100644 index 00000000000..13671ff23f2 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/go_runtime.go @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build darwin || freebsd || linux || windows + +package purego + +import ( + "unsafe" +) + +//go:linkname runtime_cgocall runtime.cgocall +func runtime_cgocall(fn uintptr, arg unsafe.Pointer) int32 // from runtime/sys_libc.go diff --git a/vendor/github.com/ebitengine/purego/internal/cgo/dlfcn_cgo_unix.go b/vendor/github.com/ebitengine/purego/internal/cgo/dlfcn_cgo_unix.go new file mode 100644 index 00000000000..b09ecac1cfe --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/cgo/dlfcn_cgo_unix.go @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2024 The Ebitengine Authors + +//go:build freebsd || linux + +package cgo + +/* + #cgo LDFLAGS: -ldl + +#include +#include +*/ +import "C" + +import ( + "errors" + "unsafe" +) + +func Dlopen(filename string, flag int) (uintptr, error) { + cfilename := C.CString(filename) + defer C.free(unsafe.Pointer(cfilename)) + handle := C.dlopen(cfilename, C.int(flag)) + if handle == nil { + return 0, errors.New(C.GoString(C.dlerror())) + } + return uintptr(handle), nil +} + +func Dlsym(handle uintptr, symbol string) (uintptr, error) { + csymbol := C.CString(symbol) + defer C.free(unsafe.Pointer(csymbol)) + symbolAddr := C.dlsym(*(*unsafe.Pointer)(unsafe.Pointer(&handle)), csymbol) + if symbolAddr == nil { + return 0, errors.New(C.GoString(C.dlerror())) + } + return uintptr(symbolAddr), nil +} + +func Dlclose(handle uintptr) error { + result := C.dlclose(*(*unsafe.Pointer)(unsafe.Pointer(&handle))) + if result != 0 { + return errors.New(C.GoString(C.dlerror())) + } + return nil +} + +// all that is needed is to assign each dl function because then its +// symbol will then be made available to the linker and linked to inside dlfcn.go +var ( + _ = C.dlopen + _ = C.dlsym + _ = C.dlerror + _ = C.dlclose +) diff --git a/vendor/github.com/ebitengine/purego/internal/cgo/empty.go b/vendor/github.com/ebitengine/purego/internal/cgo/empty.go new file mode 100644 index 00000000000..1d7cffe2a7e --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/cgo/empty.go @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2024 The Ebitengine Authors + +package cgo + +// Empty so that importing this package doesn't cause issue for certain platforms. diff --git a/vendor/github.com/ebitengine/purego/internal/cgo/syscall_cgo_unix.go b/vendor/github.com/ebitengine/purego/internal/cgo/syscall_cgo_unix.go new file mode 100644 index 00000000000..37ff24d5c1d --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/cgo/syscall_cgo_unix.go @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build freebsd || (linux && !(arm64 || amd64)) + +package cgo + +// this file is placed inside internal/cgo and not package purego +// because Cgo and assembly files can't be in the same package. + +/* + #cgo LDFLAGS: -ldl + +#include +#include +#include +#include + +typedef struct syscall15Args { + uintptr_t fn; + uintptr_t a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15; + uintptr_t f1, f2, f3, f4, f5, f6, f7, f8; + uintptr_t err; +} syscall15Args; + +void syscall15(struct syscall15Args *args) { + assert((args->f1|args->f2|args->f3|args->f4|args->f5|args->f6|args->f7|args->f8) == 0); + uintptr_t (*func_name)(uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, + uintptr_t a7, uintptr_t a8, uintptr_t a9, uintptr_t a10, uintptr_t a11, uintptr_t a12, + uintptr_t a13, uintptr_t a14, uintptr_t a15); + *(void**)(&func_name) = (void*)(args->fn); + uintptr_t r1 = func_name(args->a1,args->a2,args->a3,args->a4,args->a5,args->a6,args->a7,args->a8,args->a9, + args->a10,args->a11,args->a12,args->a13,args->a14,args->a15); + args->a1 = r1; + args->err = errno; +} + +*/ +import "C" +import "unsafe" + +// assign purego.syscall15XABI0 to the C version of this function. +var Syscall15XABI0 = unsafe.Pointer(C.syscall15) + +//go:nosplit +func Syscall15X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr) (r1, r2, err uintptr) { + args := C.syscall15Args{ + C.uintptr_t(fn), C.uintptr_t(a1), C.uintptr_t(a2), C.uintptr_t(a3), + C.uintptr_t(a4), C.uintptr_t(a5), C.uintptr_t(a6), + C.uintptr_t(a7), C.uintptr_t(a8), C.uintptr_t(a9), C.uintptr_t(a10), C.uintptr_t(a11), C.uintptr_t(a12), + C.uintptr_t(a13), C.uintptr_t(a14), C.uintptr_t(a15), 0, 0, 0, 0, 0, 0, 0, 0, 0, + } + C.syscall15(&args) + return uintptr(args.a1), 0, uintptr(args.err) +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/abi_amd64.h b/vendor/github.com/ebitengine/purego/internal/fakecgo/abi_amd64.h new file mode 100644 index 00000000000..9949435fe9e --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/abi_amd64.h @@ -0,0 +1,99 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Macros for transitioning from the host ABI to Go ABI0. +// +// These save the frame pointer, so in general, functions that use +// these should have zero frame size to suppress the automatic frame +// pointer, though it's harmless to not do this. + +#ifdef GOOS_windows + +// REGS_HOST_TO_ABI0_STACK is the stack bytes used by +// PUSH_REGS_HOST_TO_ABI0. +#define REGS_HOST_TO_ABI0_STACK (28*8 + 8) + +// PUSH_REGS_HOST_TO_ABI0 prepares for transitioning from +// the host ABI to Go ABI0 code. It saves all registers that are +// callee-save in the host ABI and caller-save in Go ABI0 and prepares +// for entry to Go. +// +// Save DI SI BP BX R12 R13 R14 R15 X6-X15 registers and the DF flag. +// Clear the DF flag for the Go ABI. +// MXCSR matches the Go ABI, so we don't have to set that, +// and Go doesn't modify it, so we don't have to save it. +#define PUSH_REGS_HOST_TO_ABI0() \ + PUSHFQ \ + CLD \ + ADJSP $(REGS_HOST_TO_ABI0_STACK - 8) \ + MOVQ DI, (0*0)(SP) \ + MOVQ SI, (1*8)(SP) \ + MOVQ BP, (2*8)(SP) \ + MOVQ BX, (3*8)(SP) \ + MOVQ R12, (4*8)(SP) \ + MOVQ R13, (5*8)(SP) \ + MOVQ R14, (6*8)(SP) \ + MOVQ R15, (7*8)(SP) \ + MOVUPS X6, (8*8)(SP) \ + MOVUPS X7, (10*8)(SP) \ + MOVUPS X8, (12*8)(SP) \ + MOVUPS X9, (14*8)(SP) \ + MOVUPS X10, (16*8)(SP) \ + MOVUPS X11, (18*8)(SP) \ + MOVUPS X12, (20*8)(SP) \ + MOVUPS X13, (22*8)(SP) \ + MOVUPS X14, (24*8)(SP) \ + MOVUPS X15, (26*8)(SP) + +#define POP_REGS_HOST_TO_ABI0() \ + MOVQ (0*0)(SP), DI \ + MOVQ (1*8)(SP), SI \ + MOVQ (2*8)(SP), BP \ + MOVQ (3*8)(SP), BX \ + MOVQ (4*8)(SP), R12 \ + MOVQ (5*8)(SP), R13 \ + MOVQ (6*8)(SP), R14 \ + MOVQ (7*8)(SP), R15 \ + MOVUPS (8*8)(SP), X6 \ + MOVUPS (10*8)(SP), X7 \ + MOVUPS (12*8)(SP), X8 \ + MOVUPS (14*8)(SP), X9 \ + MOVUPS (16*8)(SP), X10 \ + MOVUPS (18*8)(SP), X11 \ + MOVUPS (20*8)(SP), X12 \ + MOVUPS (22*8)(SP), X13 \ + MOVUPS (24*8)(SP), X14 \ + MOVUPS (26*8)(SP), X15 \ + ADJSP $-(REGS_HOST_TO_ABI0_STACK - 8) \ + POPFQ + +#else +// SysV ABI + +#define REGS_HOST_TO_ABI0_STACK (6*8) + +// SysV MXCSR matches the Go ABI, so we don't have to set that, +// and Go doesn't modify it, so we don't have to save it. +// Both SysV and Go require DF to be cleared, so that's already clear. +// The SysV and Go frame pointer conventions are compatible. +#define PUSH_REGS_HOST_TO_ABI0() \ + ADJSP $(REGS_HOST_TO_ABI0_STACK) \ + MOVQ BP, (5*8)(SP) \ + LEAQ (5*8)(SP), BP \ + MOVQ BX, (0*8)(SP) \ + MOVQ R12, (1*8)(SP) \ + MOVQ R13, (2*8)(SP) \ + MOVQ R14, (3*8)(SP) \ + MOVQ R15, (4*8)(SP) + +#define POP_REGS_HOST_TO_ABI0() \ + MOVQ (0*8)(SP), BX \ + MOVQ (1*8)(SP), R12 \ + MOVQ (2*8)(SP), R13 \ + MOVQ (3*8)(SP), R14 \ + MOVQ (4*8)(SP), R15 \ + MOVQ (5*8)(SP), BP \ + ADJSP $-(REGS_HOST_TO_ABI0_STACK) + +#endif diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/abi_arm64.h b/vendor/github.com/ebitengine/purego/internal/fakecgo/abi_arm64.h new file mode 100644 index 00000000000..5d5061ec1db --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/abi_arm64.h @@ -0,0 +1,39 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Macros for transitioning from the host ABI to Go ABI0. +// +// These macros save and restore the callee-saved registers +// from the stack, but they don't adjust stack pointer, so +// the user should prepare stack space in advance. +// SAVE_R19_TO_R28(offset) saves R19 ~ R28 to the stack space +// of ((offset)+0*8)(RSP) ~ ((offset)+9*8)(RSP). +// +// SAVE_F8_TO_F15(offset) saves F8 ~ F15 to the stack space +// of ((offset)+0*8)(RSP) ~ ((offset)+7*8)(RSP). +// +// R29 is not saved because Go will save and restore it. + +#define SAVE_R19_TO_R28(offset) \ + STP (R19, R20), ((offset)+0*8)(RSP) \ + STP (R21, R22), ((offset)+2*8)(RSP) \ + STP (R23, R24), ((offset)+4*8)(RSP) \ + STP (R25, R26), ((offset)+6*8)(RSP) \ + STP (R27, g), ((offset)+8*8)(RSP) +#define RESTORE_R19_TO_R28(offset) \ + LDP ((offset)+0*8)(RSP), (R19, R20) \ + LDP ((offset)+2*8)(RSP), (R21, R22) \ + LDP ((offset)+4*8)(RSP), (R23, R24) \ + LDP ((offset)+6*8)(RSP), (R25, R26) \ + LDP ((offset)+8*8)(RSP), (R27, g) /* R28 */ +#define SAVE_F8_TO_F15(offset) \ + FSTPD (F8, F9), ((offset)+0*8)(RSP) \ + FSTPD (F10, F11), ((offset)+2*8)(RSP) \ + FSTPD (F12, F13), ((offset)+4*8)(RSP) \ + FSTPD (F14, F15), ((offset)+6*8)(RSP) +#define RESTORE_F8_TO_F15(offset) \ + FLDPD ((offset)+0*8)(RSP), (F8, F9) \ + FLDPD ((offset)+2*8)(RSP), (F10, F11) \ + FLDPD ((offset)+4*8)(RSP), (F12, F13) \ + FLDPD ((offset)+6*8)(RSP), (F14, F15) diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/asm_amd64.s b/vendor/github.com/ebitengine/purego/internal/fakecgo/asm_amd64.s new file mode 100644 index 00000000000..2b7eb57f8ae --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/asm_amd64.s @@ -0,0 +1,39 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" +#include "abi_amd64.h" + +// Called by C code generated by cmd/cgo. +// func crosscall2(fn, a unsafe.Pointer, n int32, ctxt uintptr) +// Saves C callee-saved registers and calls cgocallback with three arguments. +// fn is the PC of a func(a unsafe.Pointer) function. +// This signature is known to SWIG, so we can't change it. +TEXT crosscall2(SB), NOSPLIT, $0-0 + PUSH_REGS_HOST_TO_ABI0() + + // Make room for arguments to cgocallback. + ADJSP $0x18 + +#ifndef GOOS_windows + MOVQ DI, 0x0(SP) // fn + MOVQ SI, 0x8(SP) // arg + + // Skip n in DX. + MOVQ CX, 0x10(SP) // ctxt + +#else + MOVQ CX, 0x0(SP) // fn + MOVQ DX, 0x8(SP) // arg + + // Skip n in R8. + MOVQ R9, 0x10(SP) // ctxt + +#endif + + CALL runtime·cgocallback(SB) + + ADJSP $-0x18 + POP_REGS_HOST_TO_ABI0() + RET diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/asm_arm64.s b/vendor/github.com/ebitengine/purego/internal/fakecgo/asm_arm64.s new file mode 100644 index 00000000000..50e5261d922 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/asm_arm64.s @@ -0,0 +1,36 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" +#include "abi_arm64.h" + +// Called by C code generated by cmd/cgo. +// func crosscall2(fn, a unsafe.Pointer, n int32, ctxt uintptr) +// Saves C callee-saved registers and calls cgocallback with three arguments. +// fn is the PC of a func(a unsafe.Pointer) function. +TEXT crosscall2(SB), NOSPLIT|NOFRAME, $0 +/* + * We still need to save all callee save register as before, and then + * push 3 args for fn (R0, R1, R3), skipping R2. + * Also note that at procedure entry in gc world, 8(RSP) will be the + * first arg. + */ + SUB $(8*24), RSP + STP (R0, R1), (8*1)(RSP) + MOVD R3, (8*3)(RSP) + + SAVE_R19_TO_R28(8*4) + SAVE_F8_TO_F15(8*14) + STP (R29, R30), (8*22)(RSP) + + // Initialize Go ABI environment + BL runtime·load_g(SB) + BL runtime·cgocallback(SB) + + RESTORE_R19_TO_R28(8*4) + RESTORE_F8_TO_F15(8*14) + LDP (8*22)(RSP), (R29, R30) + + ADD $(8*24), RSP + RET diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/callbacks.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/callbacks.go new file mode 100644 index 00000000000..f29e690cc15 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/callbacks.go @@ -0,0 +1,93 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !cgo && (darwin || freebsd || linux) + +package fakecgo + +import ( + _ "unsafe" +) + +// TODO: decide if we need _runtime_cgo_panic_internal + +//go:linkname x_cgo_init_trampoline x_cgo_init_trampoline +//go:linkname _cgo_init _cgo_init +var x_cgo_init_trampoline byte +var _cgo_init = &x_cgo_init_trampoline + +// Creates a new system thread without updating any Go state. +// +// This method is invoked during shared library loading to create a new OS +// thread to perform the runtime initialization. This method is similar to +// _cgo_sys_thread_start except that it doesn't update any Go state. + +//go:linkname x_cgo_thread_start_trampoline x_cgo_thread_start_trampoline +//go:linkname _cgo_thread_start _cgo_thread_start +var x_cgo_thread_start_trampoline byte +var _cgo_thread_start = &x_cgo_thread_start_trampoline + +// Notifies that the runtime has been initialized. +// +// We currently block at every CGO entry point (via _cgo_wait_runtime_init_done) +// to ensure that the runtime has been initialized before the CGO call is +// executed. This is necessary for shared libraries where we kickoff runtime +// initialization in a separate thread and return without waiting for this +// thread to complete the init. + +//go:linkname x_cgo_notify_runtime_init_done_trampoline x_cgo_notify_runtime_init_done_trampoline +//go:linkname _cgo_notify_runtime_init_done _cgo_notify_runtime_init_done +var x_cgo_notify_runtime_init_done_trampoline byte +var _cgo_notify_runtime_init_done = &x_cgo_notify_runtime_init_done_trampoline + +// Indicates whether a dummy thread key has been created or not. +// +// When calling go exported function from C, we register a destructor +// callback, for a dummy thread key, by using pthread_key_create. + +//go:linkname _cgo_pthread_key_created _cgo_pthread_key_created +var x_cgo_pthread_key_created uintptr +var _cgo_pthread_key_created = &x_cgo_pthread_key_created + +// Set the x_crosscall2_ptr C function pointer variable point to crosscall2. +// It's for the runtime package to call at init time. +func set_crosscall2() { + // nothing needs to be done here for fakecgo + // because it's possible to just call cgocallback directly +} + +//go:linkname _set_crosscall2 runtime.set_crosscall2 +var _set_crosscall2 = set_crosscall2 + +// Store the g into the thread-specific value. +// So that pthread_key_destructor will dropm when the thread is exiting. + +//go:linkname x_cgo_bindm_trampoline x_cgo_bindm_trampoline +//go:linkname _cgo_bindm _cgo_bindm +var x_cgo_bindm_trampoline byte +var _cgo_bindm = &x_cgo_bindm_trampoline + +// TODO: decide if we need x_cgo_set_context_function +// TODO: decide if we need _cgo_yield + +var ( + // In Go 1.20 the race detector was rewritten to pure Go + // on darwin. This means that when CGO_ENABLED=0 is set + // fakecgo is built with race detector code. This is not + // good since this code is pretending to be C. The go:norace + // pragma is not enough, since it only applies to the native + // ABIInternal function. The ABIO wrapper (which is necessary, + // since all references to text symbols from assembly will use it) + // does not inherit the go:norace pragma, so it will still be + // instrumented by the race detector. + // + // To circumvent this issue, using closure calls in the + // assembly, which forces the compiler to use the ABIInternal + // native implementation (which has go:norace) instead. + threadentry_call = threadentry + x_cgo_init_call = x_cgo_init + x_cgo_setenv_call = x_cgo_setenv + x_cgo_unsetenv_call = x_cgo_unsetenv + x_cgo_thread_start_call = x_cgo_thread_start +) diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/doc.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/doc.go new file mode 100644 index 00000000000..be82f7dfca9 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/doc.go @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo && (darwin || freebsd || linux) + +// Package fakecgo implements the Cgo runtime (runtime/cgo) entirely in Go. +// This allows code that calls into C to function properly when CGO_ENABLED=0. +// +// # Goals +// +// fakecgo attempts to replicate the same naming structure as in the runtime. +// For example, functions that have the prefix "gcc_*" are named "go_*". +// This makes it easier to port other GOOSs and GOARCHs as well as to keep +// it in sync with runtime/cgo. +// +// # Support +// +// Currently, fakecgo only supports macOS on amd64 & arm64. It also cannot +// be used with -buildmode=c-archive because that requires special initialization +// that fakecgo does not implement at the moment. +// +// # Usage +// +// Using fakecgo is easy just import _ "github.com/ebitengine/purego" and then +// set the environment variable CGO_ENABLED=0. +// The recommended usage for fakecgo is to prefer using runtime/cgo if possible +// but if cross-compiling or fast build times are important fakecgo is available. +// Purego will pick which ever Cgo runtime is available and prefer the one that +// comes with Go (runtime/cgo). +package fakecgo + +//go:generate go run gen.go diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/freebsd.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/freebsd.go new file mode 100644 index 00000000000..bb73a709e69 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/freebsd.go @@ -0,0 +1,27 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build freebsd && !cgo + +package fakecgo + +import _ "unsafe" // for go:linkname + +// Supply environ and __progname, because we don't +// link against the standard FreeBSD crt0.o and the +// libc dynamic library needs them. + +// Note: when building with cross-compiling or CGO_ENABLED=0, add +// the following argument to `go` so that these symbols are defined by +// making fakecgo the Cgo. +// -gcflags="github.com/ebitengine/purego/internal/fakecgo=-std" + +//go:linkname _environ environ +//go:linkname _progname __progname + +//go:cgo_export_dynamic environ +//go:cgo_export_dynamic __progname + +var _environ uintptr +var _progname uintptr diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_amd64.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_amd64.go new file mode 100644 index 00000000000..39f5ff1f06a --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_amd64.go @@ -0,0 +1,73 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !cgo + +package fakecgo + +import "unsafe" + +//go:nosplit +//go:norace +func _cgo_sys_thread_start(ts *ThreadStart) { + var attr pthread_attr_t + var ign, oset sigset_t + var p pthread_t + var size size_t + var err int + + sigfillset(&ign) + pthread_sigmask(SIG_SETMASK, &ign, &oset) + + size = pthread_get_stacksize_np(pthread_self()) + pthread_attr_init(&attr) + pthread_attr_setstacksize(&attr, size) + // Leave stacklo=0 and set stackhi=size; mstart will do the rest. + ts.g.stackhi = uintptr(size) + + err = _cgo_try_pthread_create(&p, &attr, unsafe.Pointer(threadentry_trampolineABI0), ts) + + pthread_sigmask(SIG_SETMASK, &oset, nil) + + if err != 0 { + print("fakecgo: pthread_create failed: ") + println(err) + abort() + } +} + +// threadentry_trampolineABI0 maps the C ABI to Go ABI then calls the Go function +// +//go:linkname x_threadentry_trampoline threadentry_trampoline +var x_threadentry_trampoline byte +var threadentry_trampolineABI0 = &x_threadentry_trampoline + +//go:nosplit +//go:norace +func threadentry(v unsafe.Pointer) unsafe.Pointer { + ts := *(*ThreadStart)(v) + free(v) + + setg_trampoline(setg_func, uintptr(unsafe.Pointer(ts.g))) + + // faking funcs in go is a bit a... involved - but the following works :) + fn := uintptr(unsafe.Pointer(&ts.fn)) + (*(*func())(unsafe.Pointer(&fn)))() + + return nil +} + +// here we will store a pointer to the provided setg func +var setg_func uintptr + +//go:nosplit +//go:norace +func x_cgo_init(g *G, setg uintptr) { + var size size_t + + setg_func = setg + + size = pthread_get_stacksize_np(pthread_self()) + g.stacklo = uintptr(unsafe.Add(unsafe.Pointer(&size), -size+4096)) +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_arm64.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_arm64.go new file mode 100644 index 00000000000..d0868f0f790 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_arm64.go @@ -0,0 +1,88 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !cgo + +package fakecgo + +import "unsafe" + +//go:nosplit +//go:norace +func _cgo_sys_thread_start(ts *ThreadStart) { + var attr pthread_attr_t + var ign, oset sigset_t + var p pthread_t + var size size_t + var err int + + sigfillset(&ign) + pthread_sigmask(SIG_SETMASK, &ign, &oset) + + size = pthread_get_stacksize_np(pthread_self()) + pthread_attr_init(&attr) + pthread_attr_setstacksize(&attr, size) + // Leave stacklo=0 and set stackhi=size; mstart will do the rest. + ts.g.stackhi = uintptr(size) + + err = _cgo_try_pthread_create(&p, &attr, unsafe.Pointer(threadentry_trampolineABI0), ts) + + pthread_sigmask(SIG_SETMASK, &oset, nil) + + if err != 0 { + print("fakecgo: pthread_create failed: ") + println(err) + abort() + } +} + +// threadentry_trampolineABI0 maps the C ABI to Go ABI then calls the Go function +// +//go:linkname x_threadentry_trampoline threadentry_trampoline +var x_threadentry_trampoline byte +var threadentry_trampolineABI0 = &x_threadentry_trampoline + +//go:nosplit +//go:norace +func threadentry(v unsafe.Pointer) unsafe.Pointer { + ts := *(*ThreadStart)(v) + free(v) + + // TODO: support ios + //#if TARGET_OS_IPHONE + // darwin_arm_init_thread_exception_port(); + //#endif + setg_trampoline(setg_func, uintptr(unsafe.Pointer(ts.g))) + + // faking funcs in go is a bit a... involved - but the following works :) + fn := uintptr(unsafe.Pointer(&ts.fn)) + (*(*func())(unsafe.Pointer(&fn)))() + + return nil +} + +// here we will store a pointer to the provided setg func +var setg_func uintptr + +// x_cgo_init(G *g, void (*setg)(void*)) (runtime/cgo/gcc_linux_amd64.c) +// This get's called during startup, adjusts stacklo, and provides a pointer to setg_gcc for us +// Additionally, if we set _cgo_init to non-null, go won't do it's own TLS setup +// This function can't be go:systemstack since go is not in a state where the systemcheck would work. +// +//go:nosplit +//go:norace +func x_cgo_init(g *G, setg uintptr) { + var size size_t + + setg_func = setg + size = pthread_get_stacksize_np(pthread_self()) + g.stacklo = uintptr(unsafe.Add(unsafe.Pointer(&size), -size+4096)) + + //TODO: support ios + //#if TARGET_OS_IPHONE + // darwin_arm_init_mach_exception_handler(); + // darwin_arm_init_thread_exception_port(); + // init_working_dir(); + //#endif +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_freebsd_amd64.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_freebsd_amd64.go new file mode 100644 index 00000000000..c9ff7156a89 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_freebsd_amd64.go @@ -0,0 +1,95 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !cgo + +package fakecgo + +import "unsafe" + +//go:nosplit +func _cgo_sys_thread_start(ts *ThreadStart) { + var attr pthread_attr_t + var ign, oset sigset_t + var p pthread_t + var size size_t + var err int + + //fprintf(stderr, "runtime/cgo: _cgo_sys_thread_start: fn=%p, g=%p\n", ts->fn, ts->g); // debug + sigfillset(&ign) + pthread_sigmask(SIG_SETMASK, &ign, &oset) + + pthread_attr_init(&attr) + pthread_attr_getstacksize(&attr, &size) + // Leave stacklo=0 and set stackhi=size; mstart will do the rest. + ts.g.stackhi = uintptr(size) + + err = _cgo_try_pthread_create(&p, &attr, unsafe.Pointer(threadentry_trampolineABI0), ts) + + pthread_sigmask(SIG_SETMASK, &oset, nil) + + if err != 0 { + print("fakecgo: pthread_create failed: ") + println(err) + abort() + } +} + +// threadentry_trampolineABI0 maps the C ABI to Go ABI then calls the Go function +// +//go:linkname x_threadentry_trampoline threadentry_trampoline +var x_threadentry_trampoline byte +var threadentry_trampolineABI0 = &x_threadentry_trampoline + +//go:nosplit +func threadentry(v unsafe.Pointer) unsafe.Pointer { + ts := *(*ThreadStart)(v) + free(v) + + setg_trampoline(setg_func, uintptr(unsafe.Pointer(ts.g))) + + // faking funcs in go is a bit a... involved - but the following works :) + fn := uintptr(unsafe.Pointer(&ts.fn)) + (*(*func())(unsafe.Pointer(&fn)))() + + return nil +} + +// here we will store a pointer to the provided setg func +var setg_func uintptr + +//go:nosplit +func x_cgo_init(g *G, setg uintptr) { + var size size_t + var attr *pthread_attr_t + + /* The memory sanitizer distributed with versions of clang + before 3.8 has a bug: if you call mmap before malloc, mmap + may return an address that is later overwritten by the msan + library. Avoid this problem by forcing a call to malloc + here, before we ever call malloc. + + This is only required for the memory sanitizer, so it's + unfortunate that we always run it. It should be possible + to remove this when we no longer care about versions of + clang before 3.8. The test for this is + misc/cgo/testsanitizers. + + GCC works hard to eliminate a seemingly unnecessary call to + malloc, so we actually use the memory we allocate. */ + + setg_func = setg + attr = (*pthread_attr_t)(malloc(unsafe.Sizeof(*attr))) + if attr == nil { + println("fakecgo: malloc failed") + abort() + } + pthread_attr_init(attr) + pthread_attr_getstacksize(attr, &size) + // runtime/cgo uses __builtin_frame_address(0) instead of `uintptr(unsafe.Pointer(&size))` + // but this should be OK since we are taking the address of the first variable in this function. + g.stacklo = uintptr(unsafe.Pointer(&size)) - uintptr(size) + 4096 + pthread_attr_destroy(attr) + free(unsafe.Pointer(attr)) +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_freebsd_arm64.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_freebsd_arm64.go new file mode 100644 index 00000000000..e3a060b9350 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_freebsd_arm64.go @@ -0,0 +1,98 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !cgo + +package fakecgo + +import "unsafe" + +//go:nosplit +func _cgo_sys_thread_start(ts *ThreadStart) { + var attr pthread_attr_t + var ign, oset sigset_t + var p pthread_t + var size size_t + var err int + + // fprintf(stderr, "runtime/cgo: _cgo_sys_thread_start: fn=%p, g=%p\n", ts->fn, ts->g); // debug + sigfillset(&ign) + pthread_sigmask(SIG_SETMASK, &ign, &oset) + + pthread_attr_init(&attr) + pthread_attr_getstacksize(&attr, &size) + // Leave stacklo=0 and set stackhi=size; mstart will do the rest. + ts.g.stackhi = uintptr(size) + + err = _cgo_try_pthread_create(&p, &attr, unsafe.Pointer(threadentry_trampolineABI0), ts) + + pthread_sigmask(SIG_SETMASK, &oset, nil) + + if err != 0 { + print("fakecgo: pthread_create failed: ") + println(err) + abort() + } +} + +// threadentry_trampolineABI0 maps the C ABI to Go ABI then calls the Go function +// +//go:linkname x_threadentry_trampoline threadentry_trampoline +var x_threadentry_trampoline byte +var threadentry_trampolineABI0 = &x_threadentry_trampoline + +//go:nosplit +func threadentry(v unsafe.Pointer) unsafe.Pointer { + ts := *(*ThreadStart)(v) + free(v) + + setg_trampoline(setg_func, uintptr(unsafe.Pointer(ts.g))) + + // faking funcs in go is a bit a... involved - but the following works :) + fn := uintptr(unsafe.Pointer(&ts.fn)) + (*(*func())(unsafe.Pointer(&fn)))() + + return nil +} + +// here we will store a pointer to the provided setg func +var setg_func uintptr + +// x_cgo_init(G *g, void (*setg)(void*)) (runtime/cgo/gcc_linux_amd64.c) +// This get's called during startup, adjusts stacklo, and provides a pointer to setg_gcc for us +// Additionally, if we set _cgo_init to non-null, go won't do it's own TLS setup +// This function can't be go:systemstack since go is not in a state where the systemcheck would work. +// +//go:nosplit +func x_cgo_init(g *G, setg uintptr) { + var size size_t + var attr *pthread_attr_t + + /* The memory sanitizer distributed with versions of clang + before 3.8 has a bug: if you call mmap before malloc, mmap + may return an address that is later overwritten by the msan + library. Avoid this problem by forcing a call to malloc + here, before we ever call malloc. + + This is only required for the memory sanitizer, so it's + unfortunate that we always run it. It should be possible + to remove this when we no longer care about versions of + clang before 3.8. The test for this is + misc/cgo/testsanitizers. + + GCC works hard to eliminate a seemingly unnecessary call to + malloc, so we actually use the memory we allocate. */ + + setg_func = setg + attr = (*pthread_attr_t)(malloc(unsafe.Sizeof(*attr))) + if attr == nil { + println("fakecgo: malloc failed") + abort() + } + pthread_attr_init(attr) + pthread_attr_getstacksize(attr, &size) + g.stacklo = uintptr(unsafe.Pointer(&size)) - uintptr(size) + 4096 + pthread_attr_destroy(attr) + free(unsafe.Pointer(attr)) +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_libinit.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_libinit.go new file mode 100644 index 00000000000..e5cb46be455 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_libinit.go @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo && (darwin || freebsd || linux) + +package fakecgo + +import ( + "syscall" + "unsafe" +) + +var ( + pthread_g pthread_key_t + + runtime_init_cond = PTHREAD_COND_INITIALIZER + runtime_init_mu = PTHREAD_MUTEX_INITIALIZER + runtime_init_done int +) + +//go:nosplit +func x_cgo_notify_runtime_init_done() { + pthread_mutex_lock(&runtime_init_mu) + runtime_init_done = 1 + pthread_cond_broadcast(&runtime_init_cond) + pthread_mutex_unlock(&runtime_init_mu) +} + +// Store the g into a thread-specific value associated with the pthread key pthread_g. +// And pthread_key_destructor will dropm when the thread is exiting. +func x_cgo_bindm(g unsafe.Pointer) { + // We assume this will always succeed, otherwise, there might be extra M leaking, + // when a C thread exits after a cgo call. + // We only invoke this function once per thread in runtime.needAndBindM, + // and the next calls just reuse the bound m. + pthread_setspecific(pthread_g, g) +} + +// _cgo_try_pthread_create retries pthread_create if it fails with +// EAGAIN. +// +//go:nosplit +//go:norace +func _cgo_try_pthread_create(thread *pthread_t, attr *pthread_attr_t, pfn unsafe.Pointer, arg *ThreadStart) int { + var ts syscall.Timespec + // tries needs to be the same type as syscall.Timespec.Nsec + // but the fields are int32 on 32bit and int64 on 64bit. + // tries is assigned to syscall.Timespec.Nsec in order to match its type. + tries := ts.Nsec + var err int + + for tries = 0; tries < 20; tries++ { + err = int(pthread_create(thread, attr, pfn, unsafe.Pointer(arg))) + if err == 0 { + pthread_detach(*thread) + return 0 + } + if err != int(syscall.EAGAIN) { + return err + } + ts.Sec = 0 + ts.Nsec = (tries + 1) * 1000 * 1000 // Milliseconds. + nanosleep(&ts, nil) + } + return int(syscall.EAGAIN) +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_amd64.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_amd64.go new file mode 100644 index 00000000000..c9ff7156a89 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_amd64.go @@ -0,0 +1,95 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !cgo + +package fakecgo + +import "unsafe" + +//go:nosplit +func _cgo_sys_thread_start(ts *ThreadStart) { + var attr pthread_attr_t + var ign, oset sigset_t + var p pthread_t + var size size_t + var err int + + //fprintf(stderr, "runtime/cgo: _cgo_sys_thread_start: fn=%p, g=%p\n", ts->fn, ts->g); // debug + sigfillset(&ign) + pthread_sigmask(SIG_SETMASK, &ign, &oset) + + pthread_attr_init(&attr) + pthread_attr_getstacksize(&attr, &size) + // Leave stacklo=0 and set stackhi=size; mstart will do the rest. + ts.g.stackhi = uintptr(size) + + err = _cgo_try_pthread_create(&p, &attr, unsafe.Pointer(threadentry_trampolineABI0), ts) + + pthread_sigmask(SIG_SETMASK, &oset, nil) + + if err != 0 { + print("fakecgo: pthread_create failed: ") + println(err) + abort() + } +} + +// threadentry_trampolineABI0 maps the C ABI to Go ABI then calls the Go function +// +//go:linkname x_threadentry_trampoline threadentry_trampoline +var x_threadentry_trampoline byte +var threadentry_trampolineABI0 = &x_threadentry_trampoline + +//go:nosplit +func threadentry(v unsafe.Pointer) unsafe.Pointer { + ts := *(*ThreadStart)(v) + free(v) + + setg_trampoline(setg_func, uintptr(unsafe.Pointer(ts.g))) + + // faking funcs in go is a bit a... involved - but the following works :) + fn := uintptr(unsafe.Pointer(&ts.fn)) + (*(*func())(unsafe.Pointer(&fn)))() + + return nil +} + +// here we will store a pointer to the provided setg func +var setg_func uintptr + +//go:nosplit +func x_cgo_init(g *G, setg uintptr) { + var size size_t + var attr *pthread_attr_t + + /* The memory sanitizer distributed with versions of clang + before 3.8 has a bug: if you call mmap before malloc, mmap + may return an address that is later overwritten by the msan + library. Avoid this problem by forcing a call to malloc + here, before we ever call malloc. + + This is only required for the memory sanitizer, so it's + unfortunate that we always run it. It should be possible + to remove this when we no longer care about versions of + clang before 3.8. The test for this is + misc/cgo/testsanitizers. + + GCC works hard to eliminate a seemingly unnecessary call to + malloc, so we actually use the memory we allocate. */ + + setg_func = setg + attr = (*pthread_attr_t)(malloc(unsafe.Sizeof(*attr))) + if attr == nil { + println("fakecgo: malloc failed") + abort() + } + pthread_attr_init(attr) + pthread_attr_getstacksize(attr, &size) + // runtime/cgo uses __builtin_frame_address(0) instead of `uintptr(unsafe.Pointer(&size))` + // but this should be OK since we are taking the address of the first variable in this function. + g.stacklo = uintptr(unsafe.Pointer(&size)) - uintptr(size) + 4096 + pthread_attr_destroy(attr) + free(unsafe.Pointer(attr)) +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_arm64.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_arm64.go new file mode 100644 index 00000000000..a3b1cca59a0 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_arm64.go @@ -0,0 +1,98 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !cgo + +package fakecgo + +import "unsafe" + +//go:nosplit +func _cgo_sys_thread_start(ts *ThreadStart) { + var attr pthread_attr_t + var ign, oset sigset_t + var p pthread_t + var size size_t + var err int + + //fprintf(stderr, "runtime/cgo: _cgo_sys_thread_start: fn=%p, g=%p\n", ts->fn, ts->g); // debug + sigfillset(&ign) + pthread_sigmask(SIG_SETMASK, &ign, &oset) + + pthread_attr_init(&attr) + pthread_attr_getstacksize(&attr, &size) + // Leave stacklo=0 and set stackhi=size; mstart will do the rest. + ts.g.stackhi = uintptr(size) + + err = _cgo_try_pthread_create(&p, &attr, unsafe.Pointer(threadentry_trampolineABI0), ts) + + pthread_sigmask(SIG_SETMASK, &oset, nil) + + if err != 0 { + print("fakecgo: pthread_create failed: ") + println(err) + abort() + } +} + +// threadentry_trampolineABI0 maps the C ABI to Go ABI then calls the Go function +// +//go:linkname x_threadentry_trampoline threadentry_trampoline +var x_threadentry_trampoline byte +var threadentry_trampolineABI0 = &x_threadentry_trampoline + +//go:nosplit +func threadentry(v unsafe.Pointer) unsafe.Pointer { + ts := *(*ThreadStart)(v) + free(v) + + setg_trampoline(setg_func, uintptr(unsafe.Pointer(ts.g))) + + // faking funcs in go is a bit a... involved - but the following works :) + fn := uintptr(unsafe.Pointer(&ts.fn)) + (*(*func())(unsafe.Pointer(&fn)))() + + return nil +} + +// here we will store a pointer to the provided setg func +var setg_func uintptr + +// x_cgo_init(G *g, void (*setg)(void*)) (runtime/cgo/gcc_linux_amd64.c) +// This get's called during startup, adjusts stacklo, and provides a pointer to setg_gcc for us +// Additionally, if we set _cgo_init to non-null, go won't do it's own TLS setup +// This function can't be go:systemstack since go is not in a state where the systemcheck would work. +// +//go:nosplit +func x_cgo_init(g *G, setg uintptr) { + var size size_t + var attr *pthread_attr_t + + /* The memory sanitizer distributed with versions of clang + before 3.8 has a bug: if you call mmap before malloc, mmap + may return an address that is later overwritten by the msan + library. Avoid this problem by forcing a call to malloc + here, before we ever call malloc. + + This is only required for the memory sanitizer, so it's + unfortunate that we always run it. It should be possible + to remove this when we no longer care about versions of + clang before 3.8. The test for this is + misc/cgo/testsanitizers. + + GCC works hard to eliminate a seemingly unnecessary call to + malloc, so we actually use the memory we allocate. */ + + setg_func = setg + attr = (*pthread_attr_t)(malloc(unsafe.Sizeof(*attr))) + if attr == nil { + println("fakecgo: malloc failed") + abort() + } + pthread_attr_init(attr) + pthread_attr_getstacksize(attr, &size) + g.stacklo = uintptr(unsafe.Pointer(&size)) - uintptr(size) + 4096 + pthread_attr_destroy(attr) + free(unsafe.Pointer(attr)) +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_setenv.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_setenv.go new file mode 100644 index 00000000000..e42d84f0b75 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_setenv.go @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo && (darwin || freebsd || linux) + +package fakecgo + +//go:nosplit +//go:norace +func x_cgo_setenv(arg *[2]*byte) { + setenv(arg[0], arg[1], 1) +} + +//go:nosplit +//go:norace +func x_cgo_unsetenv(arg *[1]*byte) { + unsetenv(arg[0]) +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_util.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_util.go new file mode 100644 index 00000000000..0ac10d1f157 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_util.go @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo && (darwin || freebsd || linux) + +package fakecgo + +import "unsafe" + +// _cgo_thread_start is split into three parts in cgo since only one part is system dependent (keep it here for easier handling) + +// _cgo_thread_start(ThreadStart *arg) (runtime/cgo/gcc_util.c) +// This get's called instead of the go code for creating new threads +// -> pthread_* stuff is used, so threads are setup correctly for C +// If this is missing, TLS is only setup correctly on thread 1! +// This function should be go:systemstack instead of go:nosplit (but that requires runtime) +// +//go:nosplit +//go:norace +func x_cgo_thread_start(arg *ThreadStart) { + var ts *ThreadStart + // Make our own copy that can persist after we return. + // _cgo_tsan_acquire(); + ts = (*ThreadStart)(malloc(unsafe.Sizeof(*ts))) + // _cgo_tsan_release(); + if ts == nil { + println("fakecgo: out of memory in thread_start") + abort() + } + // *ts = *arg would cause a writebarrier so copy using slices + s1 := unsafe.Slice((*uintptr)(unsafe.Pointer(ts)), unsafe.Sizeof(*ts)/8) + s2 := unsafe.Slice((*uintptr)(unsafe.Pointer(arg)), unsafe.Sizeof(*arg)/8) + for i := range s2 { + s1[i] = s2[i] + } + _cgo_sys_thread_start(ts) // OS-dependent half +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/iscgo.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/iscgo.go new file mode 100644 index 00000000000..28af41cc640 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/iscgo.go @@ -0,0 +1,19 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !cgo && (darwin || freebsd || linux) + +// The runtime package contains an uninitialized definition +// for runtime·iscgo. Override it to tell the runtime we're here. +// There are various function pointers that should be set too, +// but those depend on dynamic linker magic to get initialized +// correctly, and sometimes they break. This variable is a +// backup: it depends only on old C style static linking rules. + +package fakecgo + +import _ "unsafe" // for go:linkname + +//go:linkname _iscgo runtime.iscgo +var _iscgo bool = true diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo.go new file mode 100644 index 00000000000..74626c64a0e --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo.go @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo && (darwin || freebsd || linux) + +package fakecgo + +type ( + size_t uintptr + sigset_t [128]byte + pthread_attr_t [64]byte + pthread_t int + pthread_key_t uint64 +) + +// for pthread_sigmask: + +type sighow int32 + +const ( + SIG_BLOCK sighow = 0 + SIG_UNBLOCK sighow = 1 + SIG_SETMASK sighow = 2 +) + +type G struct { + stacklo uintptr + stackhi uintptr +} + +type ThreadStart struct { + g *G + tls *uintptr + fn uintptr +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_darwin.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_darwin.go new file mode 100644 index 00000000000..af148333f6d --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_darwin.go @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo + +package fakecgo + +type ( + pthread_mutex_t struct { + sig int64 + opaque [56]byte + } + pthread_cond_t struct { + sig int64 + opaque [40]byte + } +) + +var ( + PTHREAD_COND_INITIALIZER = pthread_cond_t{sig: 0x3CB0B1BB} + PTHREAD_MUTEX_INITIALIZER = pthread_mutex_t{sig: 0x32AAABA7} +) diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_freebsd.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_freebsd.go new file mode 100644 index 00000000000..ca1f722c939 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_freebsd.go @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo + +package fakecgo + +type ( + pthread_cond_t uintptr + pthread_mutex_t uintptr +) + +var ( + PTHREAD_COND_INITIALIZER = pthread_cond_t(0) + PTHREAD_MUTEX_INITIALIZER = pthread_mutex_t(0) +) diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_linux.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_linux.go new file mode 100644 index 00000000000..c4b6e9ea5a4 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_linux.go @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo + +package fakecgo + +type ( + pthread_cond_t [48]byte + pthread_mutex_t [48]byte +) + +var ( + PTHREAD_COND_INITIALIZER = pthread_cond_t{} + PTHREAD_MUTEX_INITIALIZER = pthread_mutex_t{} +) diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/setenv.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/setenv.go new file mode 100644 index 00000000000..f30af0e1515 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/setenv.go @@ -0,0 +1,19 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !cgo && (darwin || freebsd || linux) + +package fakecgo + +import _ "unsafe" // for go:linkname + +//go:linkname x_cgo_setenv_trampoline x_cgo_setenv_trampoline +//go:linkname _cgo_setenv runtime._cgo_setenv +var x_cgo_setenv_trampoline byte +var _cgo_setenv = &x_cgo_setenv_trampoline + +//go:linkname x_cgo_unsetenv_trampoline x_cgo_unsetenv_trampoline +//go:linkname _cgo_unsetenv runtime._cgo_unsetenv +var x_cgo_unsetenv_trampoline byte +var _cgo_unsetenv = &x_cgo_unsetenv_trampoline diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols.go new file mode 100644 index 00000000000..3d19fd822a7 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols.go @@ -0,0 +1,181 @@ +// Code generated by 'go generate' with gen.go. DO NOT EDIT. + +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo && (darwin || freebsd || linux) + +package fakecgo + +import ( + "syscall" + "unsafe" +) + +// setg_trampoline calls setg with the G provided +func setg_trampoline(setg uintptr, G uintptr) + +// call5 takes fn the C function and 5 arguments and calls the function with those arguments +func call5(fn, a1, a2, a3, a4, a5 uintptr) uintptr + +func malloc(size uintptr) unsafe.Pointer { + ret := call5(mallocABI0, uintptr(size), 0, 0, 0, 0) + // this indirection is to avoid go vet complaining about possible misuse of unsafe.Pointer + return *(*unsafe.Pointer)(unsafe.Pointer(&ret)) +} + +func free(ptr unsafe.Pointer) { + call5(freeABI0, uintptr(ptr), 0, 0, 0, 0) +} + +func setenv(name *byte, value *byte, overwrite int32) int32 { + return int32(call5(setenvABI0, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), uintptr(overwrite), 0, 0)) +} + +func unsetenv(name *byte) int32 { + return int32(call5(unsetenvABI0, uintptr(unsafe.Pointer(name)), 0, 0, 0, 0)) +} + +func sigfillset(set *sigset_t) int32 { + return int32(call5(sigfillsetABI0, uintptr(unsafe.Pointer(set)), 0, 0, 0, 0)) +} + +func nanosleep(ts *syscall.Timespec, rem *syscall.Timespec) int32 { + return int32(call5(nanosleepABI0, uintptr(unsafe.Pointer(ts)), uintptr(unsafe.Pointer(rem)), 0, 0, 0)) +} + +func abort() { + call5(abortABI0, 0, 0, 0, 0, 0) +} + +func pthread_attr_init(attr *pthread_attr_t) int32 { + return int32(call5(pthread_attr_initABI0, uintptr(unsafe.Pointer(attr)), 0, 0, 0, 0)) +} + +func pthread_create(thread *pthread_t, attr *pthread_attr_t, start unsafe.Pointer, arg unsafe.Pointer) int32 { + return int32(call5(pthread_createABI0, uintptr(unsafe.Pointer(thread)), uintptr(unsafe.Pointer(attr)), uintptr(start), uintptr(arg), 0)) +} + +func pthread_detach(thread pthread_t) int32 { + return int32(call5(pthread_detachABI0, uintptr(thread), 0, 0, 0, 0)) +} + +func pthread_sigmask(how sighow, ign *sigset_t, oset *sigset_t) int32 { + return int32(call5(pthread_sigmaskABI0, uintptr(how), uintptr(unsafe.Pointer(ign)), uintptr(unsafe.Pointer(oset)), 0, 0)) +} + +func pthread_self() pthread_t { + return pthread_t(call5(pthread_selfABI0, 0, 0, 0, 0, 0)) +} + +func pthread_get_stacksize_np(thread pthread_t) size_t { + return size_t(call5(pthread_get_stacksize_npABI0, uintptr(thread), 0, 0, 0, 0)) +} + +func pthread_attr_getstacksize(attr *pthread_attr_t, stacksize *size_t) int32 { + return int32(call5(pthread_attr_getstacksizeABI0, uintptr(unsafe.Pointer(attr)), uintptr(unsafe.Pointer(stacksize)), 0, 0, 0)) +} + +func pthread_attr_setstacksize(attr *pthread_attr_t, size size_t) int32 { + return int32(call5(pthread_attr_setstacksizeABI0, uintptr(unsafe.Pointer(attr)), uintptr(size), 0, 0, 0)) +} + +func pthread_attr_destroy(attr *pthread_attr_t) int32 { + return int32(call5(pthread_attr_destroyABI0, uintptr(unsafe.Pointer(attr)), 0, 0, 0, 0)) +} + +func pthread_mutex_lock(mutex *pthread_mutex_t) int32 { + return int32(call5(pthread_mutex_lockABI0, uintptr(unsafe.Pointer(mutex)), 0, 0, 0, 0)) +} + +func pthread_mutex_unlock(mutex *pthread_mutex_t) int32 { + return int32(call5(pthread_mutex_unlockABI0, uintptr(unsafe.Pointer(mutex)), 0, 0, 0, 0)) +} + +func pthread_cond_broadcast(cond *pthread_cond_t) int32 { + return int32(call5(pthread_cond_broadcastABI0, uintptr(unsafe.Pointer(cond)), 0, 0, 0, 0)) +} + +func pthread_setspecific(key pthread_key_t, value unsafe.Pointer) int32 { + return int32(call5(pthread_setspecificABI0, uintptr(key), uintptr(value), 0, 0, 0)) +} + +//go:linkname _malloc _malloc +var _malloc uintptr +var mallocABI0 = uintptr(unsafe.Pointer(&_malloc)) + +//go:linkname _free _free +var _free uintptr +var freeABI0 = uintptr(unsafe.Pointer(&_free)) + +//go:linkname _setenv _setenv +var _setenv uintptr +var setenvABI0 = uintptr(unsafe.Pointer(&_setenv)) + +//go:linkname _unsetenv _unsetenv +var _unsetenv uintptr +var unsetenvABI0 = uintptr(unsafe.Pointer(&_unsetenv)) + +//go:linkname _sigfillset _sigfillset +var _sigfillset uintptr +var sigfillsetABI0 = uintptr(unsafe.Pointer(&_sigfillset)) + +//go:linkname _nanosleep _nanosleep +var _nanosleep uintptr +var nanosleepABI0 = uintptr(unsafe.Pointer(&_nanosleep)) + +//go:linkname _abort _abort +var _abort uintptr +var abortABI0 = uintptr(unsafe.Pointer(&_abort)) + +//go:linkname _pthread_attr_init _pthread_attr_init +var _pthread_attr_init uintptr +var pthread_attr_initABI0 = uintptr(unsafe.Pointer(&_pthread_attr_init)) + +//go:linkname _pthread_create _pthread_create +var _pthread_create uintptr +var pthread_createABI0 = uintptr(unsafe.Pointer(&_pthread_create)) + +//go:linkname _pthread_detach _pthread_detach +var _pthread_detach uintptr +var pthread_detachABI0 = uintptr(unsafe.Pointer(&_pthread_detach)) + +//go:linkname _pthread_sigmask _pthread_sigmask +var _pthread_sigmask uintptr +var pthread_sigmaskABI0 = uintptr(unsafe.Pointer(&_pthread_sigmask)) + +//go:linkname _pthread_self _pthread_self +var _pthread_self uintptr +var pthread_selfABI0 = uintptr(unsafe.Pointer(&_pthread_self)) + +//go:linkname _pthread_get_stacksize_np _pthread_get_stacksize_np +var _pthread_get_stacksize_np uintptr +var pthread_get_stacksize_npABI0 = uintptr(unsafe.Pointer(&_pthread_get_stacksize_np)) + +//go:linkname _pthread_attr_getstacksize _pthread_attr_getstacksize +var _pthread_attr_getstacksize uintptr +var pthread_attr_getstacksizeABI0 = uintptr(unsafe.Pointer(&_pthread_attr_getstacksize)) + +//go:linkname _pthread_attr_setstacksize _pthread_attr_setstacksize +var _pthread_attr_setstacksize uintptr +var pthread_attr_setstacksizeABI0 = uintptr(unsafe.Pointer(&_pthread_attr_setstacksize)) + +//go:linkname _pthread_attr_destroy _pthread_attr_destroy +var _pthread_attr_destroy uintptr +var pthread_attr_destroyABI0 = uintptr(unsafe.Pointer(&_pthread_attr_destroy)) + +//go:linkname _pthread_mutex_lock _pthread_mutex_lock +var _pthread_mutex_lock uintptr +var pthread_mutex_lockABI0 = uintptr(unsafe.Pointer(&_pthread_mutex_lock)) + +//go:linkname _pthread_mutex_unlock _pthread_mutex_unlock +var _pthread_mutex_unlock uintptr +var pthread_mutex_unlockABI0 = uintptr(unsafe.Pointer(&_pthread_mutex_unlock)) + +//go:linkname _pthread_cond_broadcast _pthread_cond_broadcast +var _pthread_cond_broadcast uintptr +var pthread_cond_broadcastABI0 = uintptr(unsafe.Pointer(&_pthread_cond_broadcast)) + +//go:linkname _pthread_setspecific _pthread_setspecific +var _pthread_setspecific uintptr +var pthread_setspecificABI0 = uintptr(unsafe.Pointer(&_pthread_setspecific)) diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_darwin.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_darwin.go new file mode 100644 index 00000000000..54aaa46285c --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_darwin.go @@ -0,0 +1,29 @@ +// Code generated by 'go generate' with gen.go. DO NOT EDIT. + +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo + +package fakecgo + +//go:cgo_import_dynamic purego_malloc malloc "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_free free "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_setenv setenv "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_unsetenv unsetenv "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_sigfillset sigfillset "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_nanosleep nanosleep "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_abort abort "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_attr_init pthread_attr_init "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_create pthread_create "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_detach pthread_detach "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_sigmask pthread_sigmask "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_self pthread_self "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_get_stacksize_np pthread_get_stacksize_np "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_attr_getstacksize pthread_attr_getstacksize "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_attr_setstacksize pthread_attr_setstacksize "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_attr_destroy pthread_attr_destroy "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_mutex_lock pthread_mutex_lock "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_mutex_unlock pthread_mutex_unlock "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_cond_broadcast pthread_cond_broadcast "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_setspecific pthread_setspecific "/usr/lib/libSystem.B.dylib" diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_freebsd.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_freebsd.go new file mode 100644 index 00000000000..81538119799 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_freebsd.go @@ -0,0 +1,29 @@ +// Code generated by 'go generate' with gen.go. DO NOT EDIT. + +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo + +package fakecgo + +//go:cgo_import_dynamic purego_malloc malloc "libc.so.7" +//go:cgo_import_dynamic purego_free free "libc.so.7" +//go:cgo_import_dynamic purego_setenv setenv "libc.so.7" +//go:cgo_import_dynamic purego_unsetenv unsetenv "libc.so.7" +//go:cgo_import_dynamic purego_sigfillset sigfillset "libc.so.7" +//go:cgo_import_dynamic purego_nanosleep nanosleep "libc.so.7" +//go:cgo_import_dynamic purego_abort abort "libc.so.7" +//go:cgo_import_dynamic purego_pthread_attr_init pthread_attr_init "libpthread.so" +//go:cgo_import_dynamic purego_pthread_create pthread_create "libpthread.so" +//go:cgo_import_dynamic purego_pthread_detach pthread_detach "libpthread.so" +//go:cgo_import_dynamic purego_pthread_sigmask pthread_sigmask "libpthread.so" +//go:cgo_import_dynamic purego_pthread_self pthread_self "libpthread.so" +//go:cgo_import_dynamic purego_pthread_get_stacksize_np pthread_get_stacksize_np "libpthread.so" +//go:cgo_import_dynamic purego_pthread_attr_getstacksize pthread_attr_getstacksize "libpthread.so" +//go:cgo_import_dynamic purego_pthread_attr_setstacksize pthread_attr_setstacksize "libpthread.so" +//go:cgo_import_dynamic purego_pthread_attr_destroy pthread_attr_destroy "libpthread.so" +//go:cgo_import_dynamic purego_pthread_mutex_lock pthread_mutex_lock "libpthread.so" +//go:cgo_import_dynamic purego_pthread_mutex_unlock pthread_mutex_unlock "libpthread.so" +//go:cgo_import_dynamic purego_pthread_cond_broadcast pthread_cond_broadcast "libpthread.so" +//go:cgo_import_dynamic purego_pthread_setspecific pthread_setspecific "libpthread.so" diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_linux.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_linux.go new file mode 100644 index 00000000000..180057d0156 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_linux.go @@ -0,0 +1,29 @@ +// Code generated by 'go generate' with gen.go. DO NOT EDIT. + +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo + +package fakecgo + +//go:cgo_import_dynamic purego_malloc malloc "libc.so.6" +//go:cgo_import_dynamic purego_free free "libc.so.6" +//go:cgo_import_dynamic purego_setenv setenv "libc.so.6" +//go:cgo_import_dynamic purego_unsetenv unsetenv "libc.so.6" +//go:cgo_import_dynamic purego_sigfillset sigfillset "libc.so.6" +//go:cgo_import_dynamic purego_nanosleep nanosleep "libc.so.6" +//go:cgo_import_dynamic purego_abort abort "libc.so.6" +//go:cgo_import_dynamic purego_pthread_attr_init pthread_attr_init "libpthread.so.0" +//go:cgo_import_dynamic purego_pthread_create pthread_create "libpthread.so.0" +//go:cgo_import_dynamic purego_pthread_detach pthread_detach "libpthread.so.0" +//go:cgo_import_dynamic purego_pthread_sigmask pthread_sigmask "libpthread.so.0" +//go:cgo_import_dynamic purego_pthread_self pthread_self "libpthread.so.0" +//go:cgo_import_dynamic purego_pthread_get_stacksize_np pthread_get_stacksize_np "libpthread.so.0" +//go:cgo_import_dynamic purego_pthread_attr_getstacksize pthread_attr_getstacksize "libpthread.so.0" +//go:cgo_import_dynamic purego_pthread_attr_setstacksize pthread_attr_setstacksize "libpthread.so.0" +//go:cgo_import_dynamic purego_pthread_attr_destroy pthread_attr_destroy "libpthread.so.0" +//go:cgo_import_dynamic purego_pthread_mutex_lock pthread_mutex_lock "libpthread.so.0" +//go:cgo_import_dynamic purego_pthread_mutex_unlock pthread_mutex_unlock "libpthread.so.0" +//go:cgo_import_dynamic purego_pthread_cond_broadcast pthread_cond_broadcast "libpthread.so.0" +//go:cgo_import_dynamic purego_pthread_setspecific pthread_setspecific "libpthread.so.0" diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_amd64.s b/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_amd64.s new file mode 100644 index 00000000000..c9a3cc09eb3 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_amd64.s @@ -0,0 +1,104 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo && (darwin || linux || freebsd) + +/* +trampoline for emulating required C functions for cgo in go (see cgo.go) +(we convert cdecl calling convention to go and vice-versa) + +Since we're called from go and call into C we can cheat a bit with the calling conventions: + - in go all the registers are caller saved + - in C we have a couple of callee saved registers + +=> we can use BX, R12, R13, R14, R15 instead of the stack + +C Calling convention cdecl used here (we only need integer args): +1. arg: DI +2. arg: SI +3. arg: DX +4. arg: CX +5. arg: R8 +6. arg: R9 +We don't need floats with these functions -> AX=0 +return value will be in AX +*/ +#include "textflag.h" +#include "go_asm.h" + +// these trampolines map the gcc ABI to Go ABI and then calls into the Go equivalent functions. + +TEXT x_cgo_init_trampoline(SB), NOSPLIT, $16 + MOVQ DI, AX + MOVQ SI, BX + MOVQ ·x_cgo_init_call(SB), DX + MOVQ (DX), CX + CALL CX + RET + +TEXT x_cgo_thread_start_trampoline(SB), NOSPLIT, $8 + MOVQ DI, AX + MOVQ ·x_cgo_thread_start_call(SB), DX + MOVQ (DX), CX + CALL CX + RET + +TEXT x_cgo_setenv_trampoline(SB), NOSPLIT, $8 + MOVQ DI, AX + MOVQ ·x_cgo_setenv_call(SB), DX + MOVQ (DX), CX + CALL CX + RET + +TEXT x_cgo_unsetenv_trampoline(SB), NOSPLIT, $8 + MOVQ DI, AX + MOVQ ·x_cgo_unsetenv_call(SB), DX + MOVQ (DX), CX + CALL CX + RET + +TEXT x_cgo_notify_runtime_init_done_trampoline(SB), NOSPLIT, $0 + CALL ·x_cgo_notify_runtime_init_done(SB) + RET + +TEXT x_cgo_bindm_trampoline(SB), NOSPLIT, $0 + CALL ·x_cgo_bindm(SB) + RET + +// func setg_trampoline(setg uintptr, g uintptr) +TEXT ·setg_trampoline(SB), NOSPLIT, $0-16 + MOVQ G+8(FP), DI + MOVQ setg+0(FP), BX + XORL AX, AX + CALL BX + RET + +TEXT threadentry_trampoline(SB), NOSPLIT, $16 + MOVQ DI, AX + MOVQ ·threadentry_call(SB), DX + MOVQ (DX), CX + CALL CX + RET + +TEXT ·call5(SB), NOSPLIT, $0-56 + MOVQ fn+0(FP), BX + MOVQ a1+8(FP), DI + MOVQ a2+16(FP), SI + MOVQ a3+24(FP), DX + MOVQ a4+32(FP), CX + MOVQ a5+40(FP), R8 + + XORL AX, AX // no floats + + PUSHQ BP // save BP + MOVQ SP, BP // save SP inside BP bc BP is callee-saved + SUBQ $16, SP // allocate space for alignment + ANDQ $-16, SP // align on 16 bytes for SSE + + CALL BX + + MOVQ BP, SP // get SP back + POPQ BP // restore BP + + MOVQ AX, ret+48(FP) + RET diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_arm64.s b/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_arm64.s new file mode 100644 index 00000000000..9dbdbc0139d --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_arm64.s @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo && (darwin || freebsd || linux) + +#include "textflag.h" +#include "go_asm.h" + +// these trampolines map the gcc ABI to Go ABI and then calls into the Go equivalent functions. + +TEXT x_cgo_init_trampoline(SB), NOSPLIT, $0-0 + MOVD R0, 8(RSP) + MOVD R1, 16(RSP) + MOVD ·x_cgo_init_call(SB), R26 + MOVD (R26), R2 + CALL (R2) + RET + +TEXT x_cgo_thread_start_trampoline(SB), NOSPLIT, $0-0 + MOVD R0, 8(RSP) + MOVD ·x_cgo_thread_start_call(SB), R26 + MOVD (R26), R2 + CALL (R2) + RET + +TEXT x_cgo_setenv_trampoline(SB), NOSPLIT, $0-0 + MOVD R0, 8(RSP) + MOVD ·x_cgo_setenv_call(SB), R26 + MOVD (R26), R2 + CALL (R2) + RET + +TEXT x_cgo_unsetenv_trampoline(SB), NOSPLIT, $0-0 + MOVD R0, 8(RSP) + MOVD ·x_cgo_unsetenv_call(SB), R26 + MOVD (R26), R2 + CALL (R2) + RET + +TEXT x_cgo_notify_runtime_init_done_trampoline(SB), NOSPLIT, $0-0 + CALL ·x_cgo_notify_runtime_init_done(SB) + RET + +TEXT x_cgo_bindm_trampoline(SB), NOSPLIT, $0 + CALL ·x_cgo_bindm(SB) + RET + +// func setg_trampoline(setg uintptr, g uintptr) +TEXT ·setg_trampoline(SB), NOSPLIT, $0-16 + MOVD G+8(FP), R0 + MOVD setg+0(FP), R1 + CALL R1 + RET + +TEXT threadentry_trampoline(SB), NOSPLIT, $0-0 + MOVD R0, 8(RSP) + MOVD ·threadentry_call(SB), R26 + MOVD (R26), R2 + CALL (R2) + MOVD $0, R0 // TODO: get the return value from threadentry + RET + +TEXT ·call5(SB), NOSPLIT, $0-0 + MOVD fn+0(FP), R6 + MOVD a1+8(FP), R0 + MOVD a2+16(FP), R1 + MOVD a3+24(FP), R2 + MOVD a4+32(FP), R3 + MOVD a5+40(FP), R4 + CALL R6 + MOVD R0, ret+48(FP) + RET diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_stubs.s b/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_stubs.s new file mode 100644 index 00000000000..a65b2012c1b --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_stubs.s @@ -0,0 +1,90 @@ +// Code generated by 'go generate' with gen.go. DO NOT EDIT. + +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo && (darwin || freebsd || linux) + +#include "textflag.h" + +// these stubs are here because it is not possible to go:linkname directly the C functions on darwin arm64 + +TEXT _malloc(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_malloc(SB) + RET + +TEXT _free(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_free(SB) + RET + +TEXT _setenv(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_setenv(SB) + RET + +TEXT _unsetenv(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_unsetenv(SB) + RET + +TEXT _sigfillset(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_sigfillset(SB) + RET + +TEXT _nanosleep(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_nanosleep(SB) + RET + +TEXT _abort(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_abort(SB) + RET + +TEXT _pthread_attr_init(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_attr_init(SB) + RET + +TEXT _pthread_create(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_create(SB) + RET + +TEXT _pthread_detach(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_detach(SB) + RET + +TEXT _pthread_sigmask(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_sigmask(SB) + RET + +TEXT _pthread_self(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_self(SB) + RET + +TEXT _pthread_get_stacksize_np(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_get_stacksize_np(SB) + RET + +TEXT _pthread_attr_getstacksize(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_attr_getstacksize(SB) + RET + +TEXT _pthread_attr_setstacksize(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_attr_setstacksize(SB) + RET + +TEXT _pthread_attr_destroy(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_attr_destroy(SB) + RET + +TEXT _pthread_mutex_lock(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_mutex_lock(SB) + RET + +TEXT _pthread_mutex_unlock(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_mutex_unlock(SB) + RET + +TEXT _pthread_cond_broadcast(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_cond_broadcast(SB) + RET + +TEXT _pthread_setspecific(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_setspecific(SB) + RET diff --git a/vendor/github.com/ebitengine/purego/internal/strings/strings.go b/vendor/github.com/ebitengine/purego/internal/strings/strings.go new file mode 100644 index 00000000000..5b0d2522554 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/strings/strings.go @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +package strings + +import ( + "unsafe" +) + +// hasSuffix tests whether the string s ends with suffix. +func hasSuffix(s, suffix string) bool { + return len(s) >= len(suffix) && s[len(s)-len(suffix):] == suffix +} + +// CString converts a go string to *byte that can be passed to C code. +func CString(name string) *byte { + if hasSuffix(name, "\x00") { + return &(*(*[]byte)(unsafe.Pointer(&name)))[0] + } + b := make([]byte, len(name)+1) + copy(b, name) + return &b[0] +} + +// GoString copies a null-terminated char* to a Go string. +func GoString(c uintptr) string { + // We take the address and then dereference it to trick go vet from creating a possible misuse of unsafe.Pointer + ptr := *(*unsafe.Pointer)(unsafe.Pointer(&c)) + if ptr == nil { + return "" + } + var length int + for { + if *(*byte)(unsafe.Add(ptr, uintptr(length))) == '\x00' { + break + } + length++ + } + return string(unsafe.Slice((*byte)(ptr), length)) +} diff --git a/vendor/github.com/ebitengine/purego/is_ios.go b/vendor/github.com/ebitengine/purego/is_ios.go new file mode 100644 index 00000000000..ed31da97824 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/is_ios.go @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo + +package purego + +// if you are getting this error it means that you have +// CGO_ENABLED=0 while trying to build for ios. +// purego does not support this mode yet. +// the fix is to set CGO_ENABLED=1 which will require +// a C compiler. +var _ = _PUREGO_REQUIRES_CGO_ON_IOS diff --git a/vendor/github.com/ebitengine/purego/nocgo.go b/vendor/github.com/ebitengine/purego/nocgo.go new file mode 100644 index 00000000000..5b989ea814e --- /dev/null +++ b/vendor/github.com/ebitengine/purego/nocgo.go @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo && (darwin || freebsd || linux) + +package purego + +// if CGO_ENABLED=0 import fakecgo to setup the Cgo runtime correctly. +// This is required since some frameworks need TLS setup the C way which Go doesn't do. +// We currently don't support ios in fakecgo mode so force Cgo or fail +// +// The way that the Cgo runtime (runtime/cgo) works is by setting some variables found +// in runtime with non-null GCC compiled functions. The variables that are replaced are +// var ( +// iscgo bool // in runtime/cgo.go +// _cgo_init unsafe.Pointer // in runtime/cgo.go +// _cgo_thread_start unsafe.Pointer // in runtime/cgo.go +// _cgo_notify_runtime_init_done unsafe.Pointer // in runtime/cgo.go +// _cgo_setenv unsafe.Pointer // in runtime/env_posix.go +// _cgo_unsetenv unsafe.Pointer // in runtime/env_posix.go +// ) +// importing fakecgo will set these (using //go:linkname) with functions written +// entirely in Go (except for some assembly trampolines to change GCC ABI to Go ABI). +// Doing so makes it possible to build applications that call into C without CGO_ENABLED=1. +import _ "github.com/ebitengine/purego/internal/fakecgo" diff --git a/vendor/github.com/ebitengine/purego/struct_amd64.go b/vendor/github.com/ebitengine/purego/struct_amd64.go new file mode 100644 index 00000000000..f3514c984ef --- /dev/null +++ b/vendor/github.com/ebitengine/purego/struct_amd64.go @@ -0,0 +1,260 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2024 The Ebitengine Authors + +package purego + +import ( + "math" + "reflect" + "unsafe" +) + +func getStruct(outType reflect.Type, syscall syscall15Args) (v reflect.Value) { + outSize := outType.Size() + switch { + case outSize == 0: + return reflect.New(outType).Elem() + case outSize <= 8: + if isAllFloats(outType) { + // 2 float32s or 1 float64s are return in the float register + return reflect.NewAt(outType, unsafe.Pointer(&struct{ a uintptr }{syscall.f1})).Elem() + } + // up to 8 bytes is returned in RAX + return reflect.NewAt(outType, unsafe.Pointer(&struct{ a uintptr }{syscall.a1})).Elem() + case outSize <= 16: + r1, r2 := syscall.a1, syscall.a2 + if isAllFloats(outType) { + r1 = syscall.f1 + r2 = syscall.f2 + } else { + // check first 8 bytes if it's floats + hasFirstFloat := false + f1 := outType.Field(0).Type + if f1.Kind() == reflect.Float64 || f1.Kind() == reflect.Float32 && outType.Field(1).Type.Kind() == reflect.Float32 { + r1 = syscall.f1 + hasFirstFloat = true + } + + // find index of the field that starts the second 8 bytes + var i int + for i = 0; i < outType.NumField(); i++ { + if outType.Field(i).Offset == 8 { + break + } + } + + // check last 8 bytes if they are floats + f1 = outType.Field(i).Type + if f1.Kind() == reflect.Float64 || f1.Kind() == reflect.Float32 && i+1 == outType.NumField() { + r2 = syscall.f1 + } else if hasFirstFloat { + // if the first field was a float then that means the second integer field + // comes from the first integer register + r2 = syscall.a1 + } + } + return reflect.NewAt(outType, unsafe.Pointer(&struct{ a, b uintptr }{r1, r2})).Elem() + default: + // create struct from the Go pointer created above + // weird pointer dereference to circumvent go vet + return reflect.NewAt(outType, *(*unsafe.Pointer)(unsafe.Pointer(&syscall.a1))).Elem() + } +} + +func isAllFloats(ty reflect.Type) bool { + for i := 0; i < ty.NumField(); i++ { + f := ty.Field(i) + switch f.Type.Kind() { + case reflect.Float64, reflect.Float32: + default: + return false + } + } + return true +} + +// https://refspecs.linuxbase.org/elf/x86_64-abi-0.99.pdf +// https://gitlab.com/x86-psABIs/x86-64-ABI +// Class determines where the 8 byte value goes. +// Higher value classes win over lower value classes +const ( + _NO_CLASS = 0b0000 + _SSE = 0b0001 + _X87 = 0b0011 // long double not used in Go + _INTEGER = 0b0111 + _MEMORY = 0b1111 +) + +func addStruct(v reflect.Value, numInts, numFloats, numStack *int, addInt, addFloat, addStack func(uintptr), keepAlive []interface{}) []interface{} { + if v.Type().Size() == 0 { + return keepAlive + } + + // if greater than 64 bytes place on stack + if v.Type().Size() > 8*8 { + placeStack(v, addStack) + return keepAlive + } + var ( + savedNumFloats = *numFloats + savedNumInts = *numInts + savedNumStack = *numStack + ) + placeOnStack := postMerger(v.Type()) || !tryPlaceRegister(v, addFloat, addInt) + if placeOnStack { + // reset any values placed in registers + *numFloats = savedNumFloats + *numInts = savedNumInts + *numStack = savedNumStack + placeStack(v, addStack) + } + return keepAlive +} + +func postMerger(t reflect.Type) (passInMemory bool) { + // (c) If the size of the aggregate exceeds two eightbytes and the first eight- byte isn’t SSE or any other + // eightbyte isn’t SSEUP, the whole argument is passed in memory. + if t.Kind() != reflect.Struct { + return false + } + if t.Size() <= 2*8 { + return false + } + return true // Go does not have an SSE/SEEUP type so this is always true +} + +func tryPlaceRegister(v reflect.Value, addFloat func(uintptr), addInt func(uintptr)) (ok bool) { + ok = true + var val uint64 + var shift byte // # of bits to shift + var flushed bool + class := _NO_CLASS + flushIfNeeded := func() { + if flushed { + return + } + flushed = true + if class == _SSE { + addFloat(uintptr(val)) + } else { + addInt(uintptr(val)) + } + val = 0 + shift = 0 + class = _NO_CLASS + } + var place func(v reflect.Value) + place = func(v reflect.Value) { + var numFields int + if v.Kind() == reflect.Struct { + numFields = v.Type().NumField() + } else { + numFields = v.Type().Len() + } + + for i := 0; i < numFields; i++ { + flushed = false + var f reflect.Value + if v.Kind() == reflect.Struct { + f = v.Field(i) + } else { + f = v.Index(i) + } + switch f.Kind() { + case reflect.Struct: + place(f) + case reflect.Bool: + if f.Bool() { + val |= 1 + } + shift += 8 + class |= _INTEGER + case reflect.Pointer: + ok = false + return + case reflect.Int8: + val |= uint64(f.Int()&0xFF) << shift + shift += 8 + class |= _INTEGER + case reflect.Int16: + val |= uint64(f.Int()&0xFFFF) << shift + shift += 16 + class |= _INTEGER + case reflect.Int32: + val |= uint64(f.Int()&0xFFFF_FFFF) << shift + shift += 32 + class |= _INTEGER + case reflect.Int64, reflect.Int: + val = uint64(f.Int()) + shift = 64 + class = _INTEGER + case reflect.Uint8: + val |= f.Uint() << shift + shift += 8 + class |= _INTEGER + case reflect.Uint16: + val |= f.Uint() << shift + shift += 16 + class |= _INTEGER + case reflect.Uint32: + val |= f.Uint() << shift + shift += 32 + class |= _INTEGER + case reflect.Uint64, reflect.Uint: + val = f.Uint() + shift = 64 + class = _INTEGER + case reflect.Float32: + val |= uint64(math.Float32bits(float32(f.Float()))) << shift + shift += 32 + class |= _SSE + case reflect.Float64: + if v.Type().Size() > 16 { + ok = false + return + } + val = uint64(math.Float64bits(f.Float())) + shift = 64 + class = _SSE + case reflect.Array: + place(f) + default: + panic("purego: unsupported kind " + f.Kind().String()) + } + + if shift == 64 { + flushIfNeeded() + } else if shift > 64 { + // Should never happen, but may if we forget to reset shift after flush (or forget to flush), + // better fall apart here, than corrupt arguments. + panic("purego: tryPlaceRegisters shift > 64") + } + } + } + + place(v) + flushIfNeeded() + return ok +} + +func placeStack(v reflect.Value, addStack func(uintptr)) { + for i := 0; i < v.Type().NumField(); i++ { + f := v.Field(i) + switch f.Kind() { + case reflect.Pointer: + addStack(f.Pointer()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + addStack(uintptr(f.Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + addStack(uintptr(f.Uint())) + case reflect.Float32: + addStack(uintptr(math.Float32bits(float32(f.Float())))) + case reflect.Float64: + addStack(uintptr(math.Float64bits(f.Float()))) + case reflect.Struct: + placeStack(f, addStack) + default: + panic("purego: unsupported kind " + f.Kind().String()) + } + } +} diff --git a/vendor/github.com/ebitengine/purego/struct_arm64.go b/vendor/github.com/ebitengine/purego/struct_arm64.go new file mode 100644 index 00000000000..11c36bd6e47 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/struct_arm64.go @@ -0,0 +1,274 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2024 The Ebitengine Authors + +package purego + +import ( + "math" + "reflect" + "unsafe" +) + +func getStruct(outType reflect.Type, syscall syscall15Args) (v reflect.Value) { + outSize := outType.Size() + switch { + case outSize == 0: + return reflect.New(outType).Elem() + case outSize <= 8: + r1 := syscall.a1 + if isAllFloats, numFields := isAllSameFloat(outType); isAllFloats { + r1 = syscall.f1 + if numFields == 2 { + r1 = syscall.f2<<32 | syscall.f1 + } + } + return reflect.NewAt(outType, unsafe.Pointer(&struct{ a uintptr }{r1})).Elem() + case outSize <= 16: + r1, r2 := syscall.a1, syscall.a2 + if isAllFloats, numFields := isAllSameFloat(outType); isAllFloats { + switch numFields { + case 4: + r1 = syscall.f2<<32 | syscall.f1 + r2 = syscall.f4<<32 | syscall.f3 + case 3: + r1 = syscall.f2<<32 | syscall.f1 + r2 = syscall.f3 + case 2: + r1 = syscall.f1 + r2 = syscall.f2 + default: + panic("unreachable") + } + } + return reflect.NewAt(outType, unsafe.Pointer(&struct{ a, b uintptr }{r1, r2})).Elem() + default: + if isAllFloats, numFields := isAllSameFloat(outType); isAllFloats && numFields <= 4 { + switch numFields { + case 4: + return reflect.NewAt(outType, unsafe.Pointer(&struct{ a, b, c, d uintptr }{syscall.f1, syscall.f2, syscall.f3, syscall.f4})).Elem() + case 3: + return reflect.NewAt(outType, unsafe.Pointer(&struct{ a, b, c uintptr }{syscall.f1, syscall.f2, syscall.f3})).Elem() + default: + panic("unreachable") + } + } + // create struct from the Go pointer created in arm64_r8 + // weird pointer dereference to circumvent go vet + return reflect.NewAt(outType, *(*unsafe.Pointer)(unsafe.Pointer(&syscall.arm64_r8))).Elem() + } +} + +// https://github.com/ARM-software/abi-aa/blob/main/sysvabi64/sysvabi64.rst +const ( + _NO_CLASS = 0b00 + _FLOAT = 0b01 + _INT = 0b11 +) + +func addStruct(v reflect.Value, numInts, numFloats, numStack *int, addInt, addFloat, addStack func(uintptr), keepAlive []interface{}) []interface{} { + if v.Type().Size() == 0 { + return keepAlive + } + + if hva, hfa, size := isHVA(v.Type()), isHFA(v.Type()), v.Type().Size(); hva || hfa || size <= 16 { + // if this doesn't fit entirely in registers then + // each element goes onto the stack + if hfa && *numFloats+v.NumField() > numOfFloats { + *numFloats = numOfFloats + } else if hva && *numInts+v.NumField() > numOfIntegerRegisters() { + *numInts = numOfIntegerRegisters() + } + + placeRegisters(v, addFloat, addInt) + } else { + keepAlive = placeStack(v, keepAlive, addInt) + } + return keepAlive // the struct was allocated so don't panic +} + +func placeRegisters(v reflect.Value, addFloat func(uintptr), addInt func(uintptr)) { + var val uint64 + var shift byte + var flushed bool + class := _NO_CLASS + var place func(v reflect.Value) + place = func(v reflect.Value) { + var numFields int + if v.Kind() == reflect.Struct { + numFields = v.Type().NumField() + } else { + numFields = v.Type().Len() + } + for k := 0; k < numFields; k++ { + flushed = false + var f reflect.Value + if v.Kind() == reflect.Struct { + f = v.Field(k) + } else { + f = v.Index(k) + } + if shift >= 64 { + shift = 0 + flushed = true + if class == _FLOAT { + addFloat(uintptr(val)) + } else { + addInt(uintptr(val)) + } + } + switch f.Type().Kind() { + case reflect.Struct: + place(f) + case reflect.Bool: + if f.Bool() { + val |= 1 + } + shift += 8 + class |= _INT + case reflect.Uint8: + val |= f.Uint() << shift + shift += 8 + class |= _INT + case reflect.Uint16: + val |= f.Uint() << shift + shift += 16 + class |= _INT + case reflect.Uint32: + val |= f.Uint() << shift + shift += 32 + class |= _INT + case reflect.Uint64: + addInt(uintptr(f.Uint())) + shift = 0 + flushed = true + case reflect.Int8: + val |= uint64(f.Int()&0xFF) << shift + shift += 8 + class |= _INT + case reflect.Int16: + val |= uint64(f.Int()&0xFFFF) << shift + shift += 16 + class |= _INT + case reflect.Int32: + val |= uint64(f.Int()&0xFFFF_FFFF) << shift + shift += 32 + class |= _INT + case reflect.Int64: + addInt(uintptr(f.Int())) + shift = 0 + flushed = true + case reflect.Float32: + if class == _FLOAT { + addFloat(uintptr(val)) + val = 0 + shift = 0 + } + val |= uint64(math.Float32bits(float32(f.Float()))) << shift + shift += 32 + class |= _FLOAT + case reflect.Float64: + addFloat(uintptr(math.Float64bits(float64(f.Float())))) + shift = 0 + flushed = true + case reflect.Array: + place(f) + default: + panic("purego: unsupported kind " + f.Kind().String()) + } + } + } + place(v) + if !flushed { + if class == _FLOAT { + addFloat(uintptr(val)) + } else { + addInt(uintptr(val)) + } + } +} + +func placeStack(v reflect.Value, keepAlive []interface{}, addInt func(uintptr)) []interface{} { + // Struct is too big to be placed in registers. + // Copy to heap and place the pointer in register + ptrStruct := reflect.New(v.Type()) + ptrStruct.Elem().Set(v) + ptr := ptrStruct.Elem().Addr().UnsafePointer() + keepAlive = append(keepAlive, ptr) + addInt(uintptr(ptr)) + return keepAlive +} + +// isHFA reports a Homogeneous Floating-point Aggregate (HFA) which is a Fundamental Data Type that is a +// Floating-Point type and at most four uniquely addressable members (5.9.5.1 in [Arm64 Calling Convention]). +// This type of struct will be placed more compactly than the individual fields. +// +// [Arm64 Calling Convention]: https://github.com/ARM-software/abi-aa/blob/main/sysvabi64/sysvabi64.rst +func isHFA(t reflect.Type) bool { + // round up struct size to nearest 8 see section B.4 + structSize := roundUpTo8(t.Size()) + if structSize == 0 || t.NumField() > 4 { + return false + } + first := t.Field(0) + switch first.Type.Kind() { + case reflect.Float32, reflect.Float64: + firstKind := first.Type.Kind() + for i := 0; i < t.NumField(); i++ { + if t.Field(i).Type.Kind() != firstKind { + return false + } + } + return true + case reflect.Array: + switch first.Type.Elem().Kind() { + case reflect.Float32, reflect.Float64: + return true + default: + return false + } + case reflect.Struct: + for i := 0; i < first.Type.NumField(); i++ { + if !isHFA(first.Type) { + return false + } + } + return true + default: + return false + } +} + +// isHVA reports a Homogeneous Aggregate with a Fundamental Data Type that is a Short-Vector type +// and at most four uniquely addressable members (5.9.5.2 in [Arm64 Calling Convention]). +// A short vector is a machine type that is composed of repeated instances of one fundamental integral or +// floating-point type. It may be 8 or 16 bytes in total size (5.4 in [Arm64 Calling Convention]). +// This type of struct will be placed more compactly than the individual fields. +// +// [Arm64 Calling Convention]: https://github.com/ARM-software/abi-aa/blob/main/sysvabi64/sysvabi64.rst +func isHVA(t reflect.Type) bool { + // round up struct size to nearest 8 see section B.4 + structSize := roundUpTo8(t.Size()) + if structSize == 0 || (structSize != 8 && structSize != 16) { + return false + } + first := t.Field(0) + switch first.Type.Kind() { + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Int8, reflect.Int16, reflect.Int32: + firstKind := first.Type.Kind() + for i := 0; i < t.NumField(); i++ { + if t.Field(i).Type.Kind() != firstKind { + return false + } + } + return true + case reflect.Array: + switch first.Type.Elem().Kind() { + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Int8, reflect.Int16, reflect.Int32: + return true + default: + return false + } + default: + return false + } +} diff --git a/vendor/github.com/ebitengine/purego/struct_other.go b/vendor/github.com/ebitengine/purego/struct_other.go new file mode 100644 index 00000000000..9d42adac898 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/struct_other.go @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2024 The Ebitengine Authors + +//go:build !amd64 && !arm64 + +package purego + +import "reflect" + +func addStruct(v reflect.Value, numInts, numFloats, numStack *int, addInt, addFloat, addStack func(uintptr), keepAlive []interface{}) []interface{} { + panic("purego: struct arguments are not supported") +} + +func getStruct(outType reflect.Type, syscall syscall15Args) (v reflect.Value) { + panic("purego: struct returns are not supported") +} diff --git a/vendor/github.com/ebitengine/purego/sys_amd64.s b/vendor/github.com/ebitengine/purego/sys_amd64.s new file mode 100644 index 00000000000..cabde1a584e --- /dev/null +++ b/vendor/github.com/ebitengine/purego/sys_amd64.s @@ -0,0 +1,164 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build darwin || freebsd || linux + +#include "textflag.h" +#include "abi_amd64.h" +#include "go_asm.h" +#include "funcdata.h" + +#define STACK_SIZE 80 +#define PTR_ADDRESS (STACK_SIZE - 8) + +// syscall15X calls a function in libc on behalf of the syscall package. +// syscall15X takes a pointer to a struct like: +// struct { +// fn uintptr +// a1 uintptr +// a2 uintptr +// a3 uintptr +// a4 uintptr +// a5 uintptr +// a6 uintptr +// a7 uintptr +// a8 uintptr +// a9 uintptr +// a10 uintptr +// a11 uintptr +// a12 uintptr +// a13 uintptr +// a14 uintptr +// a15 uintptr +// r1 uintptr +// r2 uintptr +// err uintptr +// } +// syscall15X must be called on the g0 stack with the +// C calling convention (use libcCall). +GLOBL ·syscall15XABI0(SB), NOPTR|RODATA, $8 +DATA ·syscall15XABI0(SB)/8, $syscall15X(SB) +TEXT syscall15X(SB), NOSPLIT|NOFRAME, $0 + PUSHQ BP + MOVQ SP, BP + SUBQ $STACK_SIZE, SP + MOVQ DI, PTR_ADDRESS(BP) // save the pointer + MOVQ DI, R11 + + MOVQ syscall15Args_f1(R11), X0 // f1 + MOVQ syscall15Args_f2(R11), X1 // f2 + MOVQ syscall15Args_f3(R11), X2 // f3 + MOVQ syscall15Args_f4(R11), X3 // f4 + MOVQ syscall15Args_f5(R11), X4 // f5 + MOVQ syscall15Args_f6(R11), X5 // f6 + MOVQ syscall15Args_f7(R11), X6 // f7 + MOVQ syscall15Args_f8(R11), X7 // f8 + + MOVQ syscall15Args_a1(R11), DI // a1 + MOVQ syscall15Args_a2(R11), SI // a2 + MOVQ syscall15Args_a3(R11), DX // a3 + MOVQ syscall15Args_a4(R11), CX // a4 + MOVQ syscall15Args_a5(R11), R8 // a5 + MOVQ syscall15Args_a6(R11), R9 // a6 + + // push the remaining paramters onto the stack + MOVQ syscall15Args_a7(R11), R12 + MOVQ R12, 0(SP) // push a7 + MOVQ syscall15Args_a8(R11), R12 + MOVQ R12, 8(SP) // push a8 + MOVQ syscall15Args_a9(R11), R12 + MOVQ R12, 16(SP) // push a9 + MOVQ syscall15Args_a10(R11), R12 + MOVQ R12, 24(SP) // push a10 + MOVQ syscall15Args_a11(R11), R12 + MOVQ R12, 32(SP) // push a11 + MOVQ syscall15Args_a12(R11), R12 + MOVQ R12, 40(SP) // push a12 + MOVQ syscall15Args_a13(R11), R12 + MOVQ R12, 48(SP) // push a13 + MOVQ syscall15Args_a14(R11), R12 + MOVQ R12, 56(SP) // push a14 + MOVQ syscall15Args_a15(R11), R12 + MOVQ R12, 64(SP) // push a15 + XORL AX, AX // vararg: say "no float args" + + MOVQ syscall15Args_fn(R11), R10 // fn + CALL R10 + + MOVQ PTR_ADDRESS(BP), DI // get the pointer back + MOVQ AX, syscall15Args_a1(DI) // r1 + MOVQ DX, syscall15Args_a2(DI) // r3 + MOVQ X0, syscall15Args_f1(DI) // f1 + MOVQ X1, syscall15Args_f2(DI) // f2 + + XORL AX, AX // no error (it's ignored anyway) + ADDQ $STACK_SIZE, SP + MOVQ BP, SP + POPQ BP + RET + +TEXT callbackasm1(SB), NOSPLIT|NOFRAME, $0 + MOVQ 0(SP), AX // save the return address to calculate the cb index + MOVQ 8(SP), R10 // get the return SP so that we can align register args with stack args + ADDQ $8, SP // remove return address from stack, we are not returning to callbackasm, but to its caller. + + // make space for first six int and 8 float arguments below the frame + ADJSP $14*8, SP + MOVSD X0, (1*8)(SP) + MOVSD X1, (2*8)(SP) + MOVSD X2, (3*8)(SP) + MOVSD X3, (4*8)(SP) + MOVSD X4, (5*8)(SP) + MOVSD X5, (6*8)(SP) + MOVSD X6, (7*8)(SP) + MOVSD X7, (8*8)(SP) + MOVQ DI, (9*8)(SP) + MOVQ SI, (10*8)(SP) + MOVQ DX, (11*8)(SP) + MOVQ CX, (12*8)(SP) + MOVQ R8, (13*8)(SP) + MOVQ R9, (14*8)(SP) + LEAQ 8(SP), R8 // R8 = address of args vector + + PUSHQ R10 // push the stack pointer below registers + + // Switch from the host ABI to the Go ABI. + PUSH_REGS_HOST_TO_ABI0() + + // determine index into runtime·cbs table + MOVQ $callbackasm(SB), DX + SUBQ DX, AX + MOVQ $0, DX + MOVQ $5, CX // divide by 5 because each call instruction in ·callbacks is 5 bytes long + DIVL CX + SUBQ $1, AX // subtract 1 because return PC is to the next slot + + // Create a struct callbackArgs on our stack to be passed as + // the "frame" to cgocallback and on to callbackWrap. + // $24 to make enough room for the arguments to runtime.cgocallback + SUBQ $(24+callbackArgs__size), SP + MOVQ AX, (24+callbackArgs_index)(SP) // callback index + MOVQ R8, (24+callbackArgs_args)(SP) // address of args vector + MOVQ $0, (24+callbackArgs_result)(SP) // result + LEAQ 24(SP), AX // take the address of callbackArgs + + // Call cgocallback, which will call callbackWrap(frame). + MOVQ ·callbackWrap_call(SB), DI // Get the ABIInternal function pointer + MOVQ (DI), DI // without by using a closure. + MOVQ AX, SI // frame (address of callbackArgs) + MOVQ $0, CX // context + + CALL crosscall2(SB) // runtime.cgocallback(fn, frame, ctxt uintptr) + + // Get callback result. + MOVQ (24+callbackArgs_result)(SP), AX + ADDQ $(24+callbackArgs__size), SP // remove callbackArgs struct + + POP_REGS_HOST_TO_ABI0() + + POPQ R10 // get the SP back + ADJSP $-14*8, SP // remove arguments + + MOVQ R10, 0(SP) + + RET diff --git a/vendor/github.com/ebitengine/purego/sys_arm64.s b/vendor/github.com/ebitengine/purego/sys_arm64.s new file mode 100644 index 00000000000..a68fdb99ba7 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/sys_arm64.s @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build darwin || freebsd || linux || windows + +#include "textflag.h" +#include "go_asm.h" +#include "funcdata.h" + +#define STACK_SIZE 64 +#define PTR_ADDRESS (STACK_SIZE - 8) + +// syscall15X calls a function in libc on behalf of the syscall package. +// syscall15X takes a pointer to a struct like: +// struct { +// fn uintptr +// a1 uintptr +// a2 uintptr +// a3 uintptr +// a4 uintptr +// a5 uintptr +// a6 uintptr +// a7 uintptr +// a8 uintptr +// a9 uintptr +// a10 uintptr +// a11 uintptr +// a12 uintptr +// a13 uintptr +// a14 uintptr +// a15 uintptr +// r1 uintptr +// r2 uintptr +// err uintptr +// } +// syscall15X must be called on the g0 stack with the +// C calling convention (use libcCall). +GLOBL ·syscall15XABI0(SB), NOPTR|RODATA, $8 +DATA ·syscall15XABI0(SB)/8, $syscall15X(SB) +TEXT syscall15X(SB), NOSPLIT, $0 + SUB $STACK_SIZE, RSP // push structure pointer + MOVD R0, PTR_ADDRESS(RSP) + MOVD R0, R9 + + FMOVD syscall15Args_f1(R9), F0 // f1 + FMOVD syscall15Args_f2(R9), F1 // f2 + FMOVD syscall15Args_f3(R9), F2 // f3 + FMOVD syscall15Args_f4(R9), F3 // f4 + FMOVD syscall15Args_f5(R9), F4 // f5 + FMOVD syscall15Args_f6(R9), F5 // f6 + FMOVD syscall15Args_f7(R9), F6 // f7 + FMOVD syscall15Args_f8(R9), F7 // f8 + + MOVD syscall15Args_a1(R9), R0 // a1 + MOVD syscall15Args_a2(R9), R1 // a2 + MOVD syscall15Args_a3(R9), R2 // a3 + MOVD syscall15Args_a4(R9), R3 // a4 + MOVD syscall15Args_a5(R9), R4 // a5 + MOVD syscall15Args_a6(R9), R5 // a6 + MOVD syscall15Args_a7(R9), R6 // a7 + MOVD syscall15Args_a8(R9), R7 // a8 + MOVD syscall15Args_arm64_r8(R9), R8 // r8 + + MOVD syscall15Args_a9(R9), R10 + MOVD R10, 0(RSP) // push a9 onto stack + MOVD syscall15Args_a10(R9), R10 + MOVD R10, 8(RSP) // push a10 onto stack + MOVD syscall15Args_a11(R9), R10 + MOVD R10, 16(RSP) // push a11 onto stack + MOVD syscall15Args_a12(R9), R10 + MOVD R10, 24(RSP) // push a12 onto stack + MOVD syscall15Args_a13(R9), R10 + MOVD R10, 32(RSP) // push a13 onto stack + MOVD syscall15Args_a14(R9), R10 + MOVD R10, 40(RSP) // push a14 onto stack + MOVD syscall15Args_a15(R9), R10 + MOVD R10, 48(RSP) // push a15 onto stack + + MOVD syscall15Args_fn(R9), R10 // fn + BL (R10) + + MOVD PTR_ADDRESS(RSP), R2 // pop structure pointer + ADD $STACK_SIZE, RSP + + MOVD R0, syscall15Args_a1(R2) // save r1 + MOVD R1, syscall15Args_a2(R2) // save r3 + FMOVD F0, syscall15Args_f1(R2) // save f0 + FMOVD F1, syscall15Args_f2(R2) // save f1 + FMOVD F2, syscall15Args_f3(R2) // save f2 + FMOVD F3, syscall15Args_f4(R2) // save f3 + + RET diff --git a/vendor/github.com/ebitengine/purego/sys_unix_arm64.s b/vendor/github.com/ebitengine/purego/sys_unix_arm64.s new file mode 100644 index 00000000000..6da06b4d188 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/sys_unix_arm64.s @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 The Ebitengine Authors + +//go:build darwin || freebsd || linux + +#include "textflag.h" +#include "go_asm.h" +#include "funcdata.h" +#include "abi_arm64.h" + +TEXT callbackasm1(SB), NOSPLIT|NOFRAME, $0 + NO_LOCAL_POINTERS + + // On entry, the trampoline in zcallback_darwin_arm64.s left + // the callback index in R12 (which is volatile in the C ABI). + + // Save callback register arguments R0-R7 and F0-F7. + // We do this at the top of the frame so they're contiguous with stack arguments. + SUB $(16*8), RSP, R14 + FSTPD (F0, F1), (0*8)(R14) + FSTPD (F2, F3), (2*8)(R14) + FSTPD (F4, F5), (4*8)(R14) + FSTPD (F6, F7), (6*8)(R14) + STP (R0, R1), (8*8)(R14) + STP (R2, R3), (10*8)(R14) + STP (R4, R5), (12*8)(R14) + STP (R6, R7), (14*8)(R14) + + // Adjust SP by frame size. + SUB $(26*8), RSP + + // It is important to save R27 because the go assembler + // uses it for move instructions for a variable. + // This line: + // MOVD ·callbackWrap_call(SB), R0 + // Creates the instructions: + // ADRP 14335(PC), R27 + // MOVD 388(27), R0 + // R27 is a callee saved register so we are responsible + // for ensuring its value doesn't change. So save it and + // restore it at the end of this function. + // R30 is the link register. crosscall2 doesn't save it + // so it's saved here. + STP (R27, R30), 0(RSP) + + // Create a struct callbackArgs on our stack. + MOVD $(callbackArgs__size)(RSP), R13 + MOVD R12, callbackArgs_index(R13) // callback index + MOVD R14, callbackArgs_args(R13) // address of args vector + MOVD ZR, callbackArgs_result(R13) // result + + // Move parameters into registers + // Get the ABIInternal function pointer + // without by using a closure. + MOVD ·callbackWrap_call(SB), R0 + MOVD (R0), R0 // fn unsafe.Pointer + MOVD R13, R1 // frame (&callbackArgs{...}) + MOVD $0, R3 // ctxt uintptr + + BL crosscall2(SB) + + // Get callback result. + MOVD $(callbackArgs__size)(RSP), R13 + MOVD callbackArgs_result(R13), R0 + + // Restore LR and R27 + LDP 0(RSP), (R27, R30) + ADD $(26*8), RSP + + RET diff --git a/vendor/github.com/ebitengine/purego/syscall.go b/vendor/github.com/ebitengine/purego/syscall.go new file mode 100644 index 00000000000..c30688dda13 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/syscall.go @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build darwin || freebsd || linux || windows + +package purego + +// CDecl marks a function as being called using the __cdecl calling convention as defined in +// the [MSDocs] when passed to NewCallback. It must be the first argument to the function. +// This is only useful on 386 Windows, but it is safe to use on other platforms. +// +// [MSDocs]: https://learn.microsoft.com/en-us/cpp/cpp/cdecl?view=msvc-170 +type CDecl struct{} + +const ( + maxArgs = 15 + numOfFloats = 8 // arm64 and amd64 both have 8 float registers +) + +type syscall15Args struct { + fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr + f1, f2, f3, f4, f5, f6, f7, f8 uintptr + arm64_r8 uintptr +} + +// SyscallN takes fn, a C function pointer and a list of arguments as uintptr. +// There is an internal maximum number of arguments that SyscallN can take. It panics +// when the maximum is exceeded. It returns the result and the libc error code if there is one. +// +// NOTE: SyscallN does not properly call functions that have both integer and float parameters. +// See discussion comment https://github.com/ebiten/purego/pull/1#issuecomment-1128057607 +// for an explanation of why that is. +// +// On amd64, if there are more than 8 floats the 9th and so on will be placed incorrectly on the +// stack. +// +// The pragma go:nosplit is not needed at this function declaration because it uses go:uintptrescapes +// which forces all the objects that the uintptrs point to onto the heap where a stack split won't affect +// their memory location. +// +//go:uintptrescapes +func SyscallN(fn uintptr, args ...uintptr) (r1, r2, err uintptr) { + if fn == 0 { + panic("purego: fn is nil") + } + if len(args) > maxArgs { + panic("purego: too many arguments to SyscallN") + } + // add padding so there is no out-of-bounds slicing + var tmp [maxArgs]uintptr + copy(tmp[:], args) + return syscall_syscall15X(fn, tmp[0], tmp[1], tmp[2], tmp[3], tmp[4], tmp[5], tmp[6], tmp[7], tmp[8], tmp[9], tmp[10], tmp[11], tmp[12], tmp[13], tmp[14]) +} diff --git a/vendor/github.com/ebitengine/purego/syscall_cgo_linux.go b/vendor/github.com/ebitengine/purego/syscall_cgo_linux.go new file mode 100644 index 00000000000..36ee14e3b73 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/syscall_cgo_linux.go @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build cgo && !(amd64 || arm64) + +package purego + +import ( + "github.com/ebitengine/purego/internal/cgo" +) + +var syscall15XABI0 = uintptr(cgo.Syscall15XABI0) + +//go:nosplit +func syscall_syscall15X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr) (r1, r2, err uintptr) { + return cgo.Syscall15X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15) +} + +func NewCallback(_ interface{}) uintptr { + panic("purego: NewCallback on Linux is only supported on amd64/arm64") +} diff --git a/vendor/github.com/ebitengine/purego/syscall_sysv.go b/vendor/github.com/ebitengine/purego/syscall_sysv.go new file mode 100644 index 00000000000..cce171c8f60 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/syscall_sysv.go @@ -0,0 +1,223 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build darwin || freebsd || (linux && (amd64 || arm64)) + +package purego + +import ( + "reflect" + "runtime" + "sync" + "unsafe" +) + +var syscall15XABI0 uintptr + +//go:nosplit +func syscall_syscall15X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr) (r1, r2, err uintptr) { + args := syscall15Args{ + fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, + a1, a2, a3, a4, a5, a6, a7, a8, + 0, + } + runtime_cgocall(syscall15XABI0, unsafe.Pointer(&args)) + return args.a1, args.a2, 0 +} + +// NewCallback converts a Go function to a function pointer conforming to the C calling convention. +// This is useful when interoperating with C code requiring callbacks. The argument is expected to be a +// function with zero or one uintptr-sized result. The function must not have arguments with size larger than the size +// of uintptr. Only a limited number of callbacks may be created in a single Go process, and any memory allocated +// for these callbacks is never released. At least 2000 callbacks can always be created. Although this function +// provides similar functionality to windows.NewCallback it is distinct. +func NewCallback(fn interface{}) uintptr { + ty := reflect.TypeOf(fn) + for i := 0; i < ty.NumIn(); i++ { + in := ty.In(i) + if !in.AssignableTo(reflect.TypeOf(CDecl{})) { + continue + } + if i != 0 { + panic("purego: CDecl must be the first argument") + } + } + return compileCallback(fn) +} + +// maxCb is the maximum number of callbacks +// only increase this if you have added more to the callbackasm function +const maxCB = 2000 + +var cbs struct { + lock sync.Mutex + numFn int // the number of functions currently in cbs.funcs + funcs [maxCB]reflect.Value // the saved callbacks +} + +type callbackArgs struct { + index uintptr + // args points to the argument block. + // + // The structure of the arguments goes + // float registers followed by the + // integer registers followed by the stack. + // + // This variable is treated as a continuous + // block of memory containing all of the arguments + // for this callback. + args unsafe.Pointer + // Below are out-args from callbackWrap + result uintptr +} + +func compileCallback(fn interface{}) uintptr { + val := reflect.ValueOf(fn) + if val.Kind() != reflect.Func { + panic("purego: the type must be a function but was not") + } + if val.IsNil() { + panic("purego: function must not be nil") + } + ty := val.Type() + for i := 0; i < ty.NumIn(); i++ { + in := ty.In(i) + switch in.Kind() { + case reflect.Struct: + if i == 0 && in.AssignableTo(reflect.TypeOf(CDecl{})) { + continue + } + fallthrough + case reflect.Interface, reflect.Func, reflect.Slice, + reflect.Chan, reflect.Complex64, reflect.Complex128, + reflect.String, reflect.Map, reflect.Invalid: + panic("purego: unsupported argument type: " + in.Kind().String()) + } + } +output: + switch { + case ty.NumOut() == 1: + switch ty.Out(0).Kind() { + case reflect.Pointer, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, + reflect.Bool, reflect.UnsafePointer: + break output + } + panic("purego: unsupported return type: " + ty.String()) + case ty.NumOut() > 1: + panic("purego: callbacks can only have one return") + } + cbs.lock.Lock() + defer cbs.lock.Unlock() + if cbs.numFn >= maxCB { + panic("purego: the maximum number of callbacks has been reached") + } + cbs.funcs[cbs.numFn] = val + cbs.numFn++ + return callbackasmAddr(cbs.numFn - 1) +} + +const ptrSize = unsafe.Sizeof((*int)(nil)) + +const callbackMaxFrame = 64 * ptrSize + +// callbackasm is implemented in zcallback_GOOS_GOARCH.s +// +//go:linkname __callbackasm callbackasm +var __callbackasm byte +var callbackasmABI0 = uintptr(unsafe.Pointer(&__callbackasm)) + +// callbackWrap_call allows the calling of the ABIInternal wrapper +// which is required for runtime.cgocallback without the +// tag which is only allowed in the runtime. +// This closure is used inside sys_darwin_GOARCH.s +var callbackWrap_call = callbackWrap + +// callbackWrap is called by assembly code which determines which Go function to call. +// This function takes the arguments and passes them to the Go function and returns the result. +func callbackWrap(a *callbackArgs) { + cbs.lock.Lock() + fn := cbs.funcs[a.index] + cbs.lock.Unlock() + fnType := fn.Type() + args := make([]reflect.Value, fnType.NumIn()) + frame := (*[callbackMaxFrame]uintptr)(a.args) + var floatsN int // floatsN represents the number of float arguments processed + var intsN int // intsN represents the number of integer arguments processed + // stack points to the index into frame of the current stack element. + // The stack begins after the float and integer registers. + stack := numOfIntegerRegisters() + numOfFloats + for i := range args { + var pos int + switch fnType.In(i).Kind() { + case reflect.Float32, reflect.Float64: + if floatsN >= numOfFloats { + pos = stack + stack++ + } else { + pos = floatsN + } + floatsN++ + case reflect.Struct: + // This is the CDecl field + args[i] = reflect.Zero(fnType.In(i)) + continue + default: + + if intsN >= numOfIntegerRegisters() { + pos = stack + stack++ + } else { + // the integers begin after the floats in frame + pos = intsN + numOfFloats + } + intsN++ + } + args[i] = reflect.NewAt(fnType.In(i), unsafe.Pointer(&frame[pos])).Elem() + } + ret := fn.Call(args) + if len(ret) > 0 { + switch k := ret[0].Kind(); k { + case reflect.Uint, reflect.Uint64, reflect.Uint32, reflect.Uint16, reflect.Uint8, reflect.Uintptr: + a.result = uintptr(ret[0].Uint()) + case reflect.Int, reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8: + a.result = uintptr(ret[0].Int()) + case reflect.Bool: + if ret[0].Bool() { + a.result = 1 + } else { + a.result = 0 + } + case reflect.Pointer: + a.result = ret[0].Pointer() + case reflect.UnsafePointer: + a.result = ret[0].Pointer() + default: + panic("purego: unsupported kind: " + k.String()) + } + } +} + +// callbackasmAddr returns address of runtime.callbackasm +// function adjusted by i. +// On x86 and amd64, runtime.callbackasm is a series of CALL instructions, +// and we want callback to arrive at +// correspondent call instruction instead of start of +// runtime.callbackasm. +// On ARM, runtime.callbackasm is a series of mov and branch instructions. +// R12 is loaded with the callback index. Each entry is two instructions, +// hence 8 bytes. +func callbackasmAddr(i int) uintptr { + var entrySize int + switch runtime.GOARCH { + default: + panic("purego: unsupported architecture") + case "386", "amd64": + entrySize = 5 + case "arm", "arm64": + // On ARM and ARM64, each entry is a MOV instruction + // followed by a branch instruction + entrySize = 8 + } + return callbackasmABI0 + uintptr(i*entrySize) +} diff --git a/vendor/github.com/ebitengine/purego/syscall_windows.go b/vendor/github.com/ebitengine/purego/syscall_windows.go new file mode 100644 index 00000000000..5fbfcabfdc9 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/syscall_windows.go @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +package purego + +import ( + "reflect" + "syscall" +) + +var syscall15XABI0 uintptr + +func syscall_syscall15X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr) (r1, r2, err uintptr) { + r1, r2, errno := syscall.Syscall15(fn, 15, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15) + return r1, r2, uintptr(errno) +} + +// NewCallback converts a Go function to a function pointer conforming to the stdcall calling convention. +// This is useful when interoperating with Windows code requiring callbacks. The argument is expected to be a +// function with one uintptr-sized result. The function must not have arguments with size larger than the +// size of uintptr. Only a limited number of callbacks may be created in a single Go process, and any memory +// allocated for these callbacks is never released. Between NewCallback and NewCallbackCDecl, at least 1024 +// callbacks can always be created. Although this function is similiar to the darwin version it may act +// differently. +func NewCallback(fn interface{}) uintptr { + isCDecl := false + ty := reflect.TypeOf(fn) + for i := 0; i < ty.NumIn(); i++ { + in := ty.In(i) + if !in.AssignableTo(reflect.TypeOf(CDecl{})) { + continue + } + if i != 0 { + panic("purego: CDecl must be the first argument") + } + isCDecl = true + } + if isCDecl { + return syscall.NewCallbackCDecl(fn) + } + return syscall.NewCallback(fn) +} + +func loadSymbol(handle uintptr, name string) (uintptr, error) { + return syscall.GetProcAddress(syscall.Handle(handle), name) +} diff --git a/vendor/github.com/ebitengine/purego/zcallback_amd64.s b/vendor/github.com/ebitengine/purego/zcallback_amd64.s new file mode 100644 index 00000000000..6a778bfcad1 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/zcallback_amd64.s @@ -0,0 +1,2014 @@ +// Code generated by wincallback.go using 'go generate'. DO NOT EDIT. + +//go:build darwin || freebsd || linux + +// runtime·callbackasm is called by external code to +// execute Go implemented callback function. It is not +// called from the start, instead runtime·compilecallback +// always returns address into runtime·callbackasm offset +// appropriately so different callbacks start with different +// CALL instruction in runtime·callbackasm. This determines +// which Go callback function is executed later on. +#include "textflag.h" + +TEXT callbackasm(SB), NOSPLIT|NOFRAME, $0 + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) diff --git a/vendor/github.com/ebitengine/purego/zcallback_arm64.s b/vendor/github.com/ebitengine/purego/zcallback_arm64.s new file mode 100644 index 00000000000..c079b8038e3 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/zcallback_arm64.s @@ -0,0 +1,4014 @@ +// Code generated by wincallback.go using 'go generate'. DO NOT EDIT. + +//go:build darwin || freebsd || linux + +// External code calls into callbackasm at an offset corresponding +// to the callback index. Callbackasm is a table of MOV and B instructions. +// The MOV instruction loads R12 with the callback index, and the +// B instruction branches to callbackasm1. +// callbackasm1 takes the callback index from R12 and +// indexes into an array that stores information about each callback. +// It then calls the Go implementation for that callback. +#include "textflag.h" + +TEXT callbackasm(SB), NOSPLIT|NOFRAME, $0 + MOVD $0, R12 + B callbackasm1(SB) + MOVD $1, R12 + B callbackasm1(SB) + MOVD $2, R12 + B callbackasm1(SB) + MOVD $3, R12 + B callbackasm1(SB) + MOVD $4, R12 + B callbackasm1(SB) + MOVD $5, R12 + B callbackasm1(SB) + MOVD $6, R12 + B callbackasm1(SB) + MOVD $7, R12 + B callbackasm1(SB) + MOVD $8, R12 + B callbackasm1(SB) + MOVD $9, R12 + B callbackasm1(SB) + MOVD $10, R12 + B callbackasm1(SB) + MOVD $11, R12 + B callbackasm1(SB) + MOVD $12, R12 + B callbackasm1(SB) + MOVD $13, R12 + B callbackasm1(SB) + MOVD $14, R12 + B callbackasm1(SB) + MOVD $15, R12 + B callbackasm1(SB) + MOVD $16, R12 + B callbackasm1(SB) + MOVD $17, R12 + B callbackasm1(SB) + MOVD $18, R12 + B callbackasm1(SB) + MOVD $19, R12 + B callbackasm1(SB) + MOVD $20, R12 + B callbackasm1(SB) + MOVD $21, R12 + B callbackasm1(SB) + MOVD $22, R12 + B callbackasm1(SB) + MOVD $23, R12 + B callbackasm1(SB) + MOVD $24, R12 + B callbackasm1(SB) + MOVD $25, R12 + B callbackasm1(SB) + MOVD $26, R12 + B callbackasm1(SB) + MOVD $27, R12 + B callbackasm1(SB) + MOVD $28, R12 + B callbackasm1(SB) + MOVD $29, R12 + B callbackasm1(SB) + MOVD $30, R12 + B callbackasm1(SB) + MOVD $31, R12 + B callbackasm1(SB) + MOVD $32, R12 + B callbackasm1(SB) + MOVD $33, R12 + B callbackasm1(SB) + MOVD $34, R12 + B callbackasm1(SB) + MOVD $35, R12 + B callbackasm1(SB) + MOVD $36, R12 + B callbackasm1(SB) + MOVD $37, R12 + B callbackasm1(SB) + MOVD $38, R12 + B callbackasm1(SB) + MOVD $39, R12 + B callbackasm1(SB) + MOVD $40, R12 + B callbackasm1(SB) + MOVD $41, R12 + B callbackasm1(SB) + MOVD $42, R12 + B callbackasm1(SB) + MOVD $43, R12 + B callbackasm1(SB) + MOVD $44, R12 + B callbackasm1(SB) + MOVD $45, R12 + B callbackasm1(SB) + MOVD $46, R12 + B callbackasm1(SB) + MOVD $47, R12 + B callbackasm1(SB) + MOVD $48, R12 + B callbackasm1(SB) + MOVD $49, R12 + B callbackasm1(SB) + MOVD $50, R12 + B callbackasm1(SB) + MOVD $51, R12 + B callbackasm1(SB) + MOVD $52, R12 + B callbackasm1(SB) + MOVD $53, R12 + B callbackasm1(SB) + MOVD $54, R12 + B callbackasm1(SB) + MOVD $55, R12 + B callbackasm1(SB) + MOVD $56, R12 + B callbackasm1(SB) + MOVD $57, R12 + B callbackasm1(SB) + MOVD $58, R12 + B callbackasm1(SB) + MOVD $59, R12 + B callbackasm1(SB) + MOVD $60, R12 + B callbackasm1(SB) + MOVD $61, R12 + B callbackasm1(SB) + MOVD $62, R12 + B callbackasm1(SB) + MOVD $63, R12 + B callbackasm1(SB) + MOVD $64, R12 + B callbackasm1(SB) + MOVD $65, R12 + B callbackasm1(SB) + MOVD $66, R12 + B callbackasm1(SB) + MOVD $67, R12 + B callbackasm1(SB) + MOVD $68, R12 + B callbackasm1(SB) + MOVD $69, R12 + B callbackasm1(SB) + MOVD $70, R12 + B callbackasm1(SB) + MOVD $71, R12 + B callbackasm1(SB) + MOVD $72, R12 + B callbackasm1(SB) + MOVD $73, R12 + B callbackasm1(SB) + MOVD $74, R12 + B callbackasm1(SB) + MOVD $75, R12 + B callbackasm1(SB) + MOVD $76, R12 + B callbackasm1(SB) + MOVD $77, R12 + B callbackasm1(SB) + MOVD $78, R12 + B callbackasm1(SB) + MOVD $79, R12 + B callbackasm1(SB) + MOVD $80, R12 + B callbackasm1(SB) + MOVD $81, R12 + B callbackasm1(SB) + MOVD $82, R12 + B callbackasm1(SB) + MOVD $83, R12 + B callbackasm1(SB) + MOVD $84, R12 + B callbackasm1(SB) + MOVD $85, R12 + B callbackasm1(SB) + MOVD $86, R12 + B callbackasm1(SB) + MOVD $87, R12 + B callbackasm1(SB) + MOVD $88, R12 + B callbackasm1(SB) + MOVD $89, R12 + B callbackasm1(SB) + MOVD $90, R12 + B callbackasm1(SB) + MOVD $91, R12 + B callbackasm1(SB) + MOVD $92, R12 + B callbackasm1(SB) + MOVD $93, R12 + B callbackasm1(SB) + MOVD $94, R12 + B callbackasm1(SB) + MOVD $95, R12 + B callbackasm1(SB) + MOVD $96, R12 + B callbackasm1(SB) + MOVD $97, R12 + B callbackasm1(SB) + MOVD $98, R12 + B callbackasm1(SB) + MOVD $99, R12 + B callbackasm1(SB) + MOVD $100, R12 + B callbackasm1(SB) + MOVD $101, R12 + B callbackasm1(SB) + MOVD $102, R12 + B callbackasm1(SB) + MOVD $103, R12 + B callbackasm1(SB) + MOVD $104, R12 + B callbackasm1(SB) + MOVD $105, R12 + B callbackasm1(SB) + MOVD $106, R12 + B callbackasm1(SB) + MOVD $107, R12 + B callbackasm1(SB) + MOVD $108, R12 + B callbackasm1(SB) + MOVD $109, R12 + B callbackasm1(SB) + MOVD $110, R12 + B callbackasm1(SB) + MOVD $111, R12 + B callbackasm1(SB) + MOVD $112, R12 + B callbackasm1(SB) + MOVD $113, R12 + B callbackasm1(SB) + MOVD $114, R12 + B callbackasm1(SB) + MOVD $115, R12 + B callbackasm1(SB) + MOVD $116, R12 + B callbackasm1(SB) + MOVD $117, R12 + B callbackasm1(SB) + MOVD $118, R12 + B callbackasm1(SB) + MOVD $119, R12 + B callbackasm1(SB) + MOVD $120, R12 + B callbackasm1(SB) + MOVD $121, R12 + B callbackasm1(SB) + MOVD $122, R12 + B callbackasm1(SB) + MOVD $123, R12 + B callbackasm1(SB) + MOVD $124, R12 + B callbackasm1(SB) + MOVD $125, R12 + B callbackasm1(SB) + MOVD $126, R12 + B callbackasm1(SB) + MOVD $127, R12 + B callbackasm1(SB) + MOVD $128, R12 + B callbackasm1(SB) + MOVD $129, R12 + B callbackasm1(SB) + MOVD $130, R12 + B callbackasm1(SB) + MOVD $131, R12 + B callbackasm1(SB) + MOVD $132, R12 + B callbackasm1(SB) + MOVD $133, R12 + B callbackasm1(SB) + MOVD $134, R12 + B callbackasm1(SB) + MOVD $135, R12 + B callbackasm1(SB) + MOVD $136, R12 + B callbackasm1(SB) + MOVD $137, R12 + B callbackasm1(SB) + MOVD $138, R12 + B callbackasm1(SB) + MOVD $139, R12 + B callbackasm1(SB) + MOVD $140, R12 + B callbackasm1(SB) + MOVD $141, R12 + B callbackasm1(SB) + MOVD $142, R12 + B callbackasm1(SB) + MOVD $143, R12 + B callbackasm1(SB) + MOVD $144, R12 + B callbackasm1(SB) + MOVD $145, R12 + B callbackasm1(SB) + MOVD $146, R12 + B callbackasm1(SB) + MOVD $147, R12 + B callbackasm1(SB) + MOVD $148, R12 + B callbackasm1(SB) + MOVD $149, R12 + B callbackasm1(SB) + MOVD $150, R12 + B callbackasm1(SB) + MOVD $151, R12 + B callbackasm1(SB) + MOVD $152, R12 + B callbackasm1(SB) + MOVD $153, R12 + B callbackasm1(SB) + MOVD $154, R12 + B callbackasm1(SB) + MOVD $155, R12 + B callbackasm1(SB) + MOVD $156, R12 + B callbackasm1(SB) + MOVD $157, R12 + B callbackasm1(SB) + MOVD $158, R12 + B callbackasm1(SB) + MOVD $159, R12 + B callbackasm1(SB) + MOVD $160, R12 + B callbackasm1(SB) + MOVD $161, R12 + B callbackasm1(SB) + MOVD $162, R12 + B callbackasm1(SB) + MOVD $163, R12 + B callbackasm1(SB) + MOVD $164, R12 + B callbackasm1(SB) + MOVD $165, R12 + B callbackasm1(SB) + MOVD $166, R12 + B callbackasm1(SB) + MOVD $167, R12 + B callbackasm1(SB) + MOVD $168, R12 + B callbackasm1(SB) + MOVD $169, R12 + B callbackasm1(SB) + MOVD $170, R12 + B callbackasm1(SB) + MOVD $171, R12 + B callbackasm1(SB) + MOVD $172, R12 + B callbackasm1(SB) + MOVD $173, R12 + B callbackasm1(SB) + MOVD $174, R12 + B callbackasm1(SB) + MOVD $175, R12 + B callbackasm1(SB) + MOVD $176, R12 + B callbackasm1(SB) + MOVD $177, R12 + B callbackasm1(SB) + MOVD $178, R12 + B callbackasm1(SB) + MOVD $179, R12 + B callbackasm1(SB) + MOVD $180, R12 + B callbackasm1(SB) + MOVD $181, R12 + B callbackasm1(SB) + MOVD $182, R12 + B callbackasm1(SB) + MOVD $183, R12 + B callbackasm1(SB) + MOVD $184, R12 + B callbackasm1(SB) + MOVD $185, R12 + B callbackasm1(SB) + MOVD $186, R12 + B callbackasm1(SB) + MOVD $187, R12 + B callbackasm1(SB) + MOVD $188, R12 + B callbackasm1(SB) + MOVD $189, R12 + B callbackasm1(SB) + MOVD $190, R12 + B callbackasm1(SB) + MOVD $191, R12 + B callbackasm1(SB) + MOVD $192, R12 + B callbackasm1(SB) + MOVD $193, R12 + B callbackasm1(SB) + MOVD $194, R12 + B callbackasm1(SB) + MOVD $195, R12 + B callbackasm1(SB) + MOVD $196, R12 + B callbackasm1(SB) + MOVD $197, R12 + B callbackasm1(SB) + MOVD $198, R12 + B callbackasm1(SB) + MOVD $199, R12 + B callbackasm1(SB) + MOVD $200, R12 + B callbackasm1(SB) + MOVD $201, R12 + B callbackasm1(SB) + MOVD $202, R12 + B callbackasm1(SB) + MOVD $203, R12 + B callbackasm1(SB) + MOVD $204, R12 + B callbackasm1(SB) + MOVD $205, R12 + B callbackasm1(SB) + MOVD $206, R12 + B callbackasm1(SB) + MOVD $207, R12 + B callbackasm1(SB) + MOVD $208, R12 + B callbackasm1(SB) + MOVD $209, R12 + B callbackasm1(SB) + MOVD $210, R12 + B callbackasm1(SB) + MOVD $211, R12 + B callbackasm1(SB) + MOVD $212, R12 + B callbackasm1(SB) + MOVD $213, R12 + B callbackasm1(SB) + MOVD $214, R12 + B callbackasm1(SB) + MOVD $215, R12 + B callbackasm1(SB) + MOVD $216, R12 + B callbackasm1(SB) + MOVD $217, R12 + B callbackasm1(SB) + MOVD $218, R12 + B callbackasm1(SB) + MOVD $219, R12 + B callbackasm1(SB) + MOVD $220, R12 + B callbackasm1(SB) + MOVD $221, R12 + B callbackasm1(SB) + MOVD $222, R12 + B callbackasm1(SB) + MOVD $223, R12 + B callbackasm1(SB) + MOVD $224, R12 + B callbackasm1(SB) + MOVD $225, R12 + B callbackasm1(SB) + MOVD $226, R12 + B callbackasm1(SB) + MOVD $227, R12 + B callbackasm1(SB) + MOVD $228, R12 + B callbackasm1(SB) + MOVD $229, R12 + B callbackasm1(SB) + MOVD $230, R12 + B callbackasm1(SB) + MOVD $231, R12 + B callbackasm1(SB) + MOVD $232, R12 + B callbackasm1(SB) + MOVD $233, R12 + B callbackasm1(SB) + MOVD $234, R12 + B callbackasm1(SB) + MOVD $235, R12 + B callbackasm1(SB) + MOVD $236, R12 + B callbackasm1(SB) + MOVD $237, R12 + B callbackasm1(SB) + MOVD $238, R12 + B callbackasm1(SB) + MOVD $239, R12 + B callbackasm1(SB) + MOVD $240, R12 + B callbackasm1(SB) + MOVD $241, R12 + B callbackasm1(SB) + MOVD $242, R12 + B callbackasm1(SB) + MOVD $243, R12 + B callbackasm1(SB) + MOVD $244, R12 + B callbackasm1(SB) + MOVD $245, R12 + B callbackasm1(SB) + MOVD $246, R12 + B callbackasm1(SB) + MOVD $247, R12 + B callbackasm1(SB) + MOVD $248, R12 + B callbackasm1(SB) + MOVD $249, R12 + B callbackasm1(SB) + MOVD $250, R12 + B callbackasm1(SB) + MOVD $251, R12 + B callbackasm1(SB) + MOVD $252, R12 + B callbackasm1(SB) + MOVD $253, R12 + B callbackasm1(SB) + MOVD $254, R12 + B callbackasm1(SB) + MOVD $255, R12 + B callbackasm1(SB) + MOVD $256, R12 + B callbackasm1(SB) + MOVD $257, R12 + B callbackasm1(SB) + MOVD $258, R12 + B callbackasm1(SB) + MOVD $259, R12 + B callbackasm1(SB) + MOVD $260, R12 + B callbackasm1(SB) + MOVD $261, R12 + B callbackasm1(SB) + MOVD $262, R12 + B callbackasm1(SB) + MOVD $263, R12 + B callbackasm1(SB) + MOVD $264, R12 + B callbackasm1(SB) + MOVD $265, R12 + B callbackasm1(SB) + MOVD $266, R12 + B callbackasm1(SB) + MOVD $267, R12 + B callbackasm1(SB) + MOVD $268, R12 + B callbackasm1(SB) + MOVD $269, R12 + B callbackasm1(SB) + MOVD $270, R12 + B callbackasm1(SB) + MOVD $271, R12 + B callbackasm1(SB) + MOVD $272, R12 + B callbackasm1(SB) + MOVD $273, R12 + B callbackasm1(SB) + MOVD $274, R12 + B callbackasm1(SB) + MOVD $275, R12 + B callbackasm1(SB) + MOVD $276, R12 + B callbackasm1(SB) + MOVD $277, R12 + B callbackasm1(SB) + MOVD $278, R12 + B callbackasm1(SB) + MOVD $279, R12 + B callbackasm1(SB) + MOVD $280, R12 + B callbackasm1(SB) + MOVD $281, R12 + B callbackasm1(SB) + MOVD $282, R12 + B callbackasm1(SB) + MOVD $283, R12 + B callbackasm1(SB) + MOVD $284, R12 + B callbackasm1(SB) + MOVD $285, R12 + B callbackasm1(SB) + MOVD $286, R12 + B callbackasm1(SB) + MOVD $287, R12 + B callbackasm1(SB) + MOVD $288, R12 + B callbackasm1(SB) + MOVD $289, R12 + B callbackasm1(SB) + MOVD $290, R12 + B callbackasm1(SB) + MOVD $291, R12 + B callbackasm1(SB) + MOVD $292, R12 + B callbackasm1(SB) + MOVD $293, R12 + B callbackasm1(SB) + MOVD $294, R12 + B callbackasm1(SB) + MOVD $295, R12 + B callbackasm1(SB) + MOVD $296, R12 + B callbackasm1(SB) + MOVD $297, R12 + B callbackasm1(SB) + MOVD $298, R12 + B callbackasm1(SB) + MOVD $299, R12 + B callbackasm1(SB) + MOVD $300, R12 + B callbackasm1(SB) + MOVD $301, R12 + B callbackasm1(SB) + MOVD $302, R12 + B callbackasm1(SB) + MOVD $303, R12 + B callbackasm1(SB) + MOVD $304, R12 + B callbackasm1(SB) + MOVD $305, R12 + B callbackasm1(SB) + MOVD $306, R12 + B callbackasm1(SB) + MOVD $307, R12 + B callbackasm1(SB) + MOVD $308, R12 + B callbackasm1(SB) + MOVD $309, R12 + B callbackasm1(SB) + MOVD $310, R12 + B callbackasm1(SB) + MOVD $311, R12 + B callbackasm1(SB) + MOVD $312, R12 + B callbackasm1(SB) + MOVD $313, R12 + B callbackasm1(SB) + MOVD $314, R12 + B callbackasm1(SB) + MOVD $315, R12 + B callbackasm1(SB) + MOVD $316, R12 + B callbackasm1(SB) + MOVD $317, R12 + B callbackasm1(SB) + MOVD $318, R12 + B callbackasm1(SB) + MOVD $319, R12 + B callbackasm1(SB) + MOVD $320, R12 + B callbackasm1(SB) + MOVD $321, R12 + B callbackasm1(SB) + MOVD $322, R12 + B callbackasm1(SB) + MOVD $323, R12 + B callbackasm1(SB) + MOVD $324, R12 + B callbackasm1(SB) + MOVD $325, R12 + B callbackasm1(SB) + MOVD $326, R12 + B callbackasm1(SB) + MOVD $327, R12 + B callbackasm1(SB) + MOVD $328, R12 + B callbackasm1(SB) + MOVD $329, R12 + B callbackasm1(SB) + MOVD $330, R12 + B callbackasm1(SB) + MOVD $331, R12 + B callbackasm1(SB) + MOVD $332, R12 + B callbackasm1(SB) + MOVD $333, R12 + B callbackasm1(SB) + MOVD $334, R12 + B callbackasm1(SB) + MOVD $335, R12 + B callbackasm1(SB) + MOVD $336, R12 + B callbackasm1(SB) + MOVD $337, R12 + B callbackasm1(SB) + MOVD $338, R12 + B callbackasm1(SB) + MOVD $339, R12 + B callbackasm1(SB) + MOVD $340, R12 + B callbackasm1(SB) + MOVD $341, R12 + B callbackasm1(SB) + MOVD $342, R12 + B callbackasm1(SB) + MOVD $343, R12 + B callbackasm1(SB) + MOVD $344, R12 + B callbackasm1(SB) + MOVD $345, R12 + B callbackasm1(SB) + MOVD $346, R12 + B callbackasm1(SB) + MOVD $347, R12 + B callbackasm1(SB) + MOVD $348, R12 + B callbackasm1(SB) + MOVD $349, R12 + B callbackasm1(SB) + MOVD $350, R12 + B callbackasm1(SB) + MOVD $351, R12 + B callbackasm1(SB) + MOVD $352, R12 + B callbackasm1(SB) + MOVD $353, R12 + B callbackasm1(SB) + MOVD $354, R12 + B callbackasm1(SB) + MOVD $355, R12 + B callbackasm1(SB) + MOVD $356, R12 + B callbackasm1(SB) + MOVD $357, R12 + B callbackasm1(SB) + MOVD $358, R12 + B callbackasm1(SB) + MOVD $359, R12 + B callbackasm1(SB) + MOVD $360, R12 + B callbackasm1(SB) + MOVD $361, R12 + B callbackasm1(SB) + MOVD $362, R12 + B callbackasm1(SB) + MOVD $363, R12 + B callbackasm1(SB) + MOVD $364, R12 + B callbackasm1(SB) + MOVD $365, R12 + B callbackasm1(SB) + MOVD $366, R12 + B callbackasm1(SB) + MOVD $367, R12 + B callbackasm1(SB) + MOVD $368, R12 + B callbackasm1(SB) + MOVD $369, R12 + B callbackasm1(SB) + MOVD $370, R12 + B callbackasm1(SB) + MOVD $371, R12 + B callbackasm1(SB) + MOVD $372, R12 + B callbackasm1(SB) + MOVD $373, R12 + B callbackasm1(SB) + MOVD $374, R12 + B callbackasm1(SB) + MOVD $375, R12 + B callbackasm1(SB) + MOVD $376, R12 + B callbackasm1(SB) + MOVD $377, R12 + B callbackasm1(SB) + MOVD $378, R12 + B callbackasm1(SB) + MOVD $379, R12 + B callbackasm1(SB) + MOVD $380, R12 + B callbackasm1(SB) + MOVD $381, R12 + B callbackasm1(SB) + MOVD $382, R12 + B callbackasm1(SB) + MOVD $383, R12 + B callbackasm1(SB) + MOVD $384, R12 + B callbackasm1(SB) + MOVD $385, R12 + B callbackasm1(SB) + MOVD $386, R12 + B callbackasm1(SB) + MOVD $387, R12 + B callbackasm1(SB) + MOVD $388, R12 + B callbackasm1(SB) + MOVD $389, R12 + B callbackasm1(SB) + MOVD $390, R12 + B callbackasm1(SB) + MOVD $391, R12 + B callbackasm1(SB) + MOVD $392, R12 + B callbackasm1(SB) + MOVD $393, R12 + B callbackasm1(SB) + MOVD $394, R12 + B callbackasm1(SB) + MOVD $395, R12 + B callbackasm1(SB) + MOVD $396, R12 + B callbackasm1(SB) + MOVD $397, R12 + B callbackasm1(SB) + MOVD $398, R12 + B callbackasm1(SB) + MOVD $399, R12 + B callbackasm1(SB) + MOVD $400, R12 + B callbackasm1(SB) + MOVD $401, R12 + B callbackasm1(SB) + MOVD $402, R12 + B callbackasm1(SB) + MOVD $403, R12 + B callbackasm1(SB) + MOVD $404, R12 + B callbackasm1(SB) + MOVD $405, R12 + B callbackasm1(SB) + MOVD $406, R12 + B callbackasm1(SB) + MOVD $407, R12 + B callbackasm1(SB) + MOVD $408, R12 + B callbackasm1(SB) + MOVD $409, R12 + B callbackasm1(SB) + MOVD $410, R12 + B callbackasm1(SB) + MOVD $411, R12 + B callbackasm1(SB) + MOVD $412, R12 + B callbackasm1(SB) + MOVD $413, R12 + B callbackasm1(SB) + MOVD $414, R12 + B callbackasm1(SB) + MOVD $415, R12 + B callbackasm1(SB) + MOVD $416, R12 + B callbackasm1(SB) + MOVD $417, R12 + B callbackasm1(SB) + MOVD $418, R12 + B callbackasm1(SB) + MOVD $419, R12 + B callbackasm1(SB) + MOVD $420, R12 + B callbackasm1(SB) + MOVD $421, R12 + B callbackasm1(SB) + MOVD $422, R12 + B callbackasm1(SB) + MOVD $423, R12 + B callbackasm1(SB) + MOVD $424, R12 + B callbackasm1(SB) + MOVD $425, R12 + B callbackasm1(SB) + MOVD $426, R12 + B callbackasm1(SB) + MOVD $427, R12 + B callbackasm1(SB) + MOVD $428, R12 + B callbackasm1(SB) + MOVD $429, R12 + B callbackasm1(SB) + MOVD $430, R12 + B callbackasm1(SB) + MOVD $431, R12 + B callbackasm1(SB) + MOVD $432, R12 + B callbackasm1(SB) + MOVD $433, R12 + B callbackasm1(SB) + MOVD $434, R12 + B callbackasm1(SB) + MOVD $435, R12 + B callbackasm1(SB) + MOVD $436, R12 + B callbackasm1(SB) + MOVD $437, R12 + B callbackasm1(SB) + MOVD $438, R12 + B callbackasm1(SB) + MOVD $439, R12 + B callbackasm1(SB) + MOVD $440, R12 + B callbackasm1(SB) + MOVD $441, R12 + B callbackasm1(SB) + MOVD $442, R12 + B callbackasm1(SB) + MOVD $443, R12 + B callbackasm1(SB) + MOVD $444, R12 + B callbackasm1(SB) + MOVD $445, R12 + B callbackasm1(SB) + MOVD $446, R12 + B callbackasm1(SB) + MOVD $447, R12 + B callbackasm1(SB) + MOVD $448, R12 + B callbackasm1(SB) + MOVD $449, R12 + B callbackasm1(SB) + MOVD $450, R12 + B callbackasm1(SB) + MOVD $451, R12 + B callbackasm1(SB) + MOVD $452, R12 + B callbackasm1(SB) + MOVD $453, R12 + B callbackasm1(SB) + MOVD $454, R12 + B callbackasm1(SB) + MOVD $455, R12 + B callbackasm1(SB) + MOVD $456, R12 + B callbackasm1(SB) + MOVD $457, R12 + B callbackasm1(SB) + MOVD $458, R12 + B callbackasm1(SB) + MOVD $459, R12 + B callbackasm1(SB) + MOVD $460, R12 + B callbackasm1(SB) + MOVD $461, R12 + B callbackasm1(SB) + MOVD $462, R12 + B callbackasm1(SB) + MOVD $463, R12 + B callbackasm1(SB) + MOVD $464, R12 + B callbackasm1(SB) + MOVD $465, R12 + B callbackasm1(SB) + MOVD $466, R12 + B callbackasm1(SB) + MOVD $467, R12 + B callbackasm1(SB) + MOVD $468, R12 + B callbackasm1(SB) + MOVD $469, R12 + B callbackasm1(SB) + MOVD $470, R12 + B callbackasm1(SB) + MOVD $471, R12 + B callbackasm1(SB) + MOVD $472, R12 + B callbackasm1(SB) + MOVD $473, R12 + B callbackasm1(SB) + MOVD $474, R12 + B callbackasm1(SB) + MOVD $475, R12 + B callbackasm1(SB) + MOVD $476, R12 + B callbackasm1(SB) + MOVD $477, R12 + B callbackasm1(SB) + MOVD $478, R12 + B callbackasm1(SB) + MOVD $479, R12 + B callbackasm1(SB) + MOVD $480, R12 + B callbackasm1(SB) + MOVD $481, R12 + B callbackasm1(SB) + MOVD $482, R12 + B callbackasm1(SB) + MOVD $483, R12 + B callbackasm1(SB) + MOVD $484, R12 + B callbackasm1(SB) + MOVD $485, R12 + B callbackasm1(SB) + MOVD $486, R12 + B callbackasm1(SB) + MOVD $487, R12 + B callbackasm1(SB) + MOVD $488, R12 + B callbackasm1(SB) + MOVD $489, R12 + B callbackasm1(SB) + MOVD $490, R12 + B callbackasm1(SB) + MOVD $491, R12 + B callbackasm1(SB) + MOVD $492, R12 + B callbackasm1(SB) + MOVD $493, R12 + B callbackasm1(SB) + MOVD $494, R12 + B callbackasm1(SB) + MOVD $495, R12 + B callbackasm1(SB) + MOVD $496, R12 + B callbackasm1(SB) + MOVD $497, R12 + B callbackasm1(SB) + MOVD $498, R12 + B callbackasm1(SB) + MOVD $499, R12 + B callbackasm1(SB) + MOVD $500, R12 + B callbackasm1(SB) + MOVD $501, R12 + B callbackasm1(SB) + MOVD $502, R12 + B callbackasm1(SB) + MOVD $503, R12 + B callbackasm1(SB) + MOVD $504, R12 + B callbackasm1(SB) + MOVD $505, R12 + B callbackasm1(SB) + MOVD $506, R12 + B callbackasm1(SB) + MOVD $507, R12 + B callbackasm1(SB) + MOVD $508, R12 + B callbackasm1(SB) + MOVD $509, R12 + B callbackasm1(SB) + MOVD $510, R12 + B callbackasm1(SB) + MOVD $511, R12 + B callbackasm1(SB) + MOVD $512, R12 + B callbackasm1(SB) + MOVD $513, R12 + B callbackasm1(SB) + MOVD $514, R12 + B callbackasm1(SB) + MOVD $515, R12 + B callbackasm1(SB) + MOVD $516, R12 + B callbackasm1(SB) + MOVD $517, R12 + B callbackasm1(SB) + MOVD $518, R12 + B callbackasm1(SB) + MOVD $519, R12 + B callbackasm1(SB) + MOVD $520, R12 + B callbackasm1(SB) + MOVD $521, R12 + B callbackasm1(SB) + MOVD $522, R12 + B callbackasm1(SB) + MOVD $523, R12 + B callbackasm1(SB) + MOVD $524, R12 + B callbackasm1(SB) + MOVD $525, R12 + B callbackasm1(SB) + MOVD $526, R12 + B callbackasm1(SB) + MOVD $527, R12 + B callbackasm1(SB) + MOVD $528, R12 + B callbackasm1(SB) + MOVD $529, R12 + B callbackasm1(SB) + MOVD $530, R12 + B callbackasm1(SB) + MOVD $531, R12 + B callbackasm1(SB) + MOVD $532, R12 + B callbackasm1(SB) + MOVD $533, R12 + B callbackasm1(SB) + MOVD $534, R12 + B callbackasm1(SB) + MOVD $535, R12 + B callbackasm1(SB) + MOVD $536, R12 + B callbackasm1(SB) + MOVD $537, R12 + B callbackasm1(SB) + MOVD $538, R12 + B callbackasm1(SB) + MOVD $539, R12 + B callbackasm1(SB) + MOVD $540, R12 + B callbackasm1(SB) + MOVD $541, R12 + B callbackasm1(SB) + MOVD $542, R12 + B callbackasm1(SB) + MOVD $543, R12 + B callbackasm1(SB) + MOVD $544, R12 + B callbackasm1(SB) + MOVD $545, R12 + B callbackasm1(SB) + MOVD $546, R12 + B callbackasm1(SB) + MOVD $547, R12 + B callbackasm1(SB) + MOVD $548, R12 + B callbackasm1(SB) + MOVD $549, R12 + B callbackasm1(SB) + MOVD $550, R12 + B callbackasm1(SB) + MOVD $551, R12 + B callbackasm1(SB) + MOVD $552, R12 + B callbackasm1(SB) + MOVD $553, R12 + B callbackasm1(SB) + MOVD $554, R12 + B callbackasm1(SB) + MOVD $555, R12 + B callbackasm1(SB) + MOVD $556, R12 + B callbackasm1(SB) + MOVD $557, R12 + B callbackasm1(SB) + MOVD $558, R12 + B callbackasm1(SB) + MOVD $559, R12 + B callbackasm1(SB) + MOVD $560, R12 + B callbackasm1(SB) + MOVD $561, R12 + B callbackasm1(SB) + MOVD $562, R12 + B callbackasm1(SB) + MOVD $563, R12 + B callbackasm1(SB) + MOVD $564, R12 + B callbackasm1(SB) + MOVD $565, R12 + B callbackasm1(SB) + MOVD $566, R12 + B callbackasm1(SB) + MOVD $567, R12 + B callbackasm1(SB) + MOVD $568, R12 + B callbackasm1(SB) + MOVD $569, R12 + B callbackasm1(SB) + MOVD $570, R12 + B callbackasm1(SB) + MOVD $571, R12 + B callbackasm1(SB) + MOVD $572, R12 + B callbackasm1(SB) + MOVD $573, R12 + B callbackasm1(SB) + MOVD $574, R12 + B callbackasm1(SB) + MOVD $575, R12 + B callbackasm1(SB) + MOVD $576, R12 + B callbackasm1(SB) + MOVD $577, R12 + B callbackasm1(SB) + MOVD $578, R12 + B callbackasm1(SB) + MOVD $579, R12 + B callbackasm1(SB) + MOVD $580, R12 + B callbackasm1(SB) + MOVD $581, R12 + B callbackasm1(SB) + MOVD $582, R12 + B callbackasm1(SB) + MOVD $583, R12 + B callbackasm1(SB) + MOVD $584, R12 + B callbackasm1(SB) + MOVD $585, R12 + B callbackasm1(SB) + MOVD $586, R12 + B callbackasm1(SB) + MOVD $587, R12 + B callbackasm1(SB) + MOVD $588, R12 + B callbackasm1(SB) + MOVD $589, R12 + B callbackasm1(SB) + MOVD $590, R12 + B callbackasm1(SB) + MOVD $591, R12 + B callbackasm1(SB) + MOVD $592, R12 + B callbackasm1(SB) + MOVD $593, R12 + B callbackasm1(SB) + MOVD $594, R12 + B callbackasm1(SB) + MOVD $595, R12 + B callbackasm1(SB) + MOVD $596, R12 + B callbackasm1(SB) + MOVD $597, R12 + B callbackasm1(SB) + MOVD $598, R12 + B callbackasm1(SB) + MOVD $599, R12 + B callbackasm1(SB) + MOVD $600, R12 + B callbackasm1(SB) + MOVD $601, R12 + B callbackasm1(SB) + MOVD $602, R12 + B callbackasm1(SB) + MOVD $603, R12 + B callbackasm1(SB) + MOVD $604, R12 + B callbackasm1(SB) + MOVD $605, R12 + B callbackasm1(SB) + MOVD $606, R12 + B callbackasm1(SB) + MOVD $607, R12 + B callbackasm1(SB) + MOVD $608, R12 + B callbackasm1(SB) + MOVD $609, R12 + B callbackasm1(SB) + MOVD $610, R12 + B callbackasm1(SB) + MOVD $611, R12 + B callbackasm1(SB) + MOVD $612, R12 + B callbackasm1(SB) + MOVD $613, R12 + B callbackasm1(SB) + MOVD $614, R12 + B callbackasm1(SB) + MOVD $615, R12 + B callbackasm1(SB) + MOVD $616, R12 + B callbackasm1(SB) + MOVD $617, R12 + B callbackasm1(SB) + MOVD $618, R12 + B callbackasm1(SB) + MOVD $619, R12 + B callbackasm1(SB) + MOVD $620, R12 + B callbackasm1(SB) + MOVD $621, R12 + B callbackasm1(SB) + MOVD $622, R12 + B callbackasm1(SB) + MOVD $623, R12 + B callbackasm1(SB) + MOVD $624, R12 + B callbackasm1(SB) + MOVD $625, R12 + B callbackasm1(SB) + MOVD $626, R12 + B callbackasm1(SB) + MOVD $627, R12 + B callbackasm1(SB) + MOVD $628, R12 + B callbackasm1(SB) + MOVD $629, R12 + B callbackasm1(SB) + MOVD $630, R12 + B callbackasm1(SB) + MOVD $631, R12 + B callbackasm1(SB) + MOVD $632, R12 + B callbackasm1(SB) + MOVD $633, R12 + B callbackasm1(SB) + MOVD $634, R12 + B callbackasm1(SB) + MOVD $635, R12 + B callbackasm1(SB) + MOVD $636, R12 + B callbackasm1(SB) + MOVD $637, R12 + B callbackasm1(SB) + MOVD $638, R12 + B callbackasm1(SB) + MOVD $639, R12 + B callbackasm1(SB) + MOVD $640, R12 + B callbackasm1(SB) + MOVD $641, R12 + B callbackasm1(SB) + MOVD $642, R12 + B callbackasm1(SB) + MOVD $643, R12 + B callbackasm1(SB) + MOVD $644, R12 + B callbackasm1(SB) + MOVD $645, R12 + B callbackasm1(SB) + MOVD $646, R12 + B callbackasm1(SB) + MOVD $647, R12 + B callbackasm1(SB) + MOVD $648, R12 + B callbackasm1(SB) + MOVD $649, R12 + B callbackasm1(SB) + MOVD $650, R12 + B callbackasm1(SB) + MOVD $651, R12 + B callbackasm1(SB) + MOVD $652, R12 + B callbackasm1(SB) + MOVD $653, R12 + B callbackasm1(SB) + MOVD $654, R12 + B callbackasm1(SB) + MOVD $655, R12 + B callbackasm1(SB) + MOVD $656, R12 + B callbackasm1(SB) + MOVD $657, R12 + B callbackasm1(SB) + MOVD $658, R12 + B callbackasm1(SB) + MOVD $659, R12 + B callbackasm1(SB) + MOVD $660, R12 + B callbackasm1(SB) + MOVD $661, R12 + B callbackasm1(SB) + MOVD $662, R12 + B callbackasm1(SB) + MOVD $663, R12 + B callbackasm1(SB) + MOVD $664, R12 + B callbackasm1(SB) + MOVD $665, R12 + B callbackasm1(SB) + MOVD $666, R12 + B callbackasm1(SB) + MOVD $667, R12 + B callbackasm1(SB) + MOVD $668, R12 + B callbackasm1(SB) + MOVD $669, R12 + B callbackasm1(SB) + MOVD $670, R12 + B callbackasm1(SB) + MOVD $671, R12 + B callbackasm1(SB) + MOVD $672, R12 + B callbackasm1(SB) + MOVD $673, R12 + B callbackasm1(SB) + MOVD $674, R12 + B callbackasm1(SB) + MOVD $675, R12 + B callbackasm1(SB) + MOVD $676, R12 + B callbackasm1(SB) + MOVD $677, R12 + B callbackasm1(SB) + MOVD $678, R12 + B callbackasm1(SB) + MOVD $679, R12 + B callbackasm1(SB) + MOVD $680, R12 + B callbackasm1(SB) + MOVD $681, R12 + B callbackasm1(SB) + MOVD $682, R12 + B callbackasm1(SB) + MOVD $683, R12 + B callbackasm1(SB) + MOVD $684, R12 + B callbackasm1(SB) + MOVD $685, R12 + B callbackasm1(SB) + MOVD $686, R12 + B callbackasm1(SB) + MOVD $687, R12 + B callbackasm1(SB) + MOVD $688, R12 + B callbackasm1(SB) + MOVD $689, R12 + B callbackasm1(SB) + MOVD $690, R12 + B callbackasm1(SB) + MOVD $691, R12 + B callbackasm1(SB) + MOVD $692, R12 + B callbackasm1(SB) + MOVD $693, R12 + B callbackasm1(SB) + MOVD $694, R12 + B callbackasm1(SB) + MOVD $695, R12 + B callbackasm1(SB) + MOVD $696, R12 + B callbackasm1(SB) + MOVD $697, R12 + B callbackasm1(SB) + MOVD $698, R12 + B callbackasm1(SB) + MOVD $699, R12 + B callbackasm1(SB) + MOVD $700, R12 + B callbackasm1(SB) + MOVD $701, R12 + B callbackasm1(SB) + MOVD $702, R12 + B callbackasm1(SB) + MOVD $703, R12 + B callbackasm1(SB) + MOVD $704, R12 + B callbackasm1(SB) + MOVD $705, R12 + B callbackasm1(SB) + MOVD $706, R12 + B callbackasm1(SB) + MOVD $707, R12 + B callbackasm1(SB) + MOVD $708, R12 + B callbackasm1(SB) + MOVD $709, R12 + B callbackasm1(SB) + MOVD $710, R12 + B callbackasm1(SB) + MOVD $711, R12 + B callbackasm1(SB) + MOVD $712, R12 + B callbackasm1(SB) + MOVD $713, R12 + B callbackasm1(SB) + MOVD $714, R12 + B callbackasm1(SB) + MOVD $715, R12 + B callbackasm1(SB) + MOVD $716, R12 + B callbackasm1(SB) + MOVD $717, R12 + B callbackasm1(SB) + MOVD $718, R12 + B callbackasm1(SB) + MOVD $719, R12 + B callbackasm1(SB) + MOVD $720, R12 + B callbackasm1(SB) + MOVD $721, R12 + B callbackasm1(SB) + MOVD $722, R12 + B callbackasm1(SB) + MOVD $723, R12 + B callbackasm1(SB) + MOVD $724, R12 + B callbackasm1(SB) + MOVD $725, R12 + B callbackasm1(SB) + MOVD $726, R12 + B callbackasm1(SB) + MOVD $727, R12 + B callbackasm1(SB) + MOVD $728, R12 + B callbackasm1(SB) + MOVD $729, R12 + B callbackasm1(SB) + MOVD $730, R12 + B callbackasm1(SB) + MOVD $731, R12 + B callbackasm1(SB) + MOVD $732, R12 + B callbackasm1(SB) + MOVD $733, R12 + B callbackasm1(SB) + MOVD $734, R12 + B callbackasm1(SB) + MOVD $735, R12 + B callbackasm1(SB) + MOVD $736, R12 + B callbackasm1(SB) + MOVD $737, R12 + B callbackasm1(SB) + MOVD $738, R12 + B callbackasm1(SB) + MOVD $739, R12 + B callbackasm1(SB) + MOVD $740, R12 + B callbackasm1(SB) + MOVD $741, R12 + B callbackasm1(SB) + MOVD $742, R12 + B callbackasm1(SB) + MOVD $743, R12 + B callbackasm1(SB) + MOVD $744, R12 + B callbackasm1(SB) + MOVD $745, R12 + B callbackasm1(SB) + MOVD $746, R12 + B callbackasm1(SB) + MOVD $747, R12 + B callbackasm1(SB) + MOVD $748, R12 + B callbackasm1(SB) + MOVD $749, R12 + B callbackasm1(SB) + MOVD $750, R12 + B callbackasm1(SB) + MOVD $751, R12 + B callbackasm1(SB) + MOVD $752, R12 + B callbackasm1(SB) + MOVD $753, R12 + B callbackasm1(SB) + MOVD $754, R12 + B callbackasm1(SB) + MOVD $755, R12 + B callbackasm1(SB) + MOVD $756, R12 + B callbackasm1(SB) + MOVD $757, R12 + B callbackasm1(SB) + MOVD $758, R12 + B callbackasm1(SB) + MOVD $759, R12 + B callbackasm1(SB) + MOVD $760, R12 + B callbackasm1(SB) + MOVD $761, R12 + B callbackasm1(SB) + MOVD $762, R12 + B callbackasm1(SB) + MOVD $763, R12 + B callbackasm1(SB) + MOVD $764, R12 + B callbackasm1(SB) + MOVD $765, R12 + B callbackasm1(SB) + MOVD $766, R12 + B callbackasm1(SB) + MOVD $767, R12 + B callbackasm1(SB) + MOVD $768, R12 + B callbackasm1(SB) + MOVD $769, R12 + B callbackasm1(SB) + MOVD $770, R12 + B callbackasm1(SB) + MOVD $771, R12 + B callbackasm1(SB) + MOVD $772, R12 + B callbackasm1(SB) + MOVD $773, R12 + B callbackasm1(SB) + MOVD $774, R12 + B callbackasm1(SB) + MOVD $775, R12 + B callbackasm1(SB) + MOVD $776, R12 + B callbackasm1(SB) + MOVD $777, R12 + B callbackasm1(SB) + MOVD $778, R12 + B callbackasm1(SB) + MOVD $779, R12 + B callbackasm1(SB) + MOVD $780, R12 + B callbackasm1(SB) + MOVD $781, R12 + B callbackasm1(SB) + MOVD $782, R12 + B callbackasm1(SB) + MOVD $783, R12 + B callbackasm1(SB) + MOVD $784, R12 + B callbackasm1(SB) + MOVD $785, R12 + B callbackasm1(SB) + MOVD $786, R12 + B callbackasm1(SB) + MOVD $787, R12 + B callbackasm1(SB) + MOVD $788, R12 + B callbackasm1(SB) + MOVD $789, R12 + B callbackasm1(SB) + MOVD $790, R12 + B callbackasm1(SB) + MOVD $791, R12 + B callbackasm1(SB) + MOVD $792, R12 + B callbackasm1(SB) + MOVD $793, R12 + B callbackasm1(SB) + MOVD $794, R12 + B callbackasm1(SB) + MOVD $795, R12 + B callbackasm1(SB) + MOVD $796, R12 + B callbackasm1(SB) + MOVD $797, R12 + B callbackasm1(SB) + MOVD $798, R12 + B callbackasm1(SB) + MOVD $799, R12 + B callbackasm1(SB) + MOVD $800, R12 + B callbackasm1(SB) + MOVD $801, R12 + B callbackasm1(SB) + MOVD $802, R12 + B callbackasm1(SB) + MOVD $803, R12 + B callbackasm1(SB) + MOVD $804, R12 + B callbackasm1(SB) + MOVD $805, R12 + B callbackasm1(SB) + MOVD $806, R12 + B callbackasm1(SB) + MOVD $807, R12 + B callbackasm1(SB) + MOVD $808, R12 + B callbackasm1(SB) + MOVD $809, R12 + B callbackasm1(SB) + MOVD $810, R12 + B callbackasm1(SB) + MOVD $811, R12 + B callbackasm1(SB) + MOVD $812, R12 + B callbackasm1(SB) + MOVD $813, R12 + B callbackasm1(SB) + MOVD $814, R12 + B callbackasm1(SB) + MOVD $815, R12 + B callbackasm1(SB) + MOVD $816, R12 + B callbackasm1(SB) + MOVD $817, R12 + B callbackasm1(SB) + MOVD $818, R12 + B callbackasm1(SB) + MOVD $819, R12 + B callbackasm1(SB) + MOVD $820, R12 + B callbackasm1(SB) + MOVD $821, R12 + B callbackasm1(SB) + MOVD $822, R12 + B callbackasm1(SB) + MOVD $823, R12 + B callbackasm1(SB) + MOVD $824, R12 + B callbackasm1(SB) + MOVD $825, R12 + B callbackasm1(SB) + MOVD $826, R12 + B callbackasm1(SB) + MOVD $827, R12 + B callbackasm1(SB) + MOVD $828, R12 + B callbackasm1(SB) + MOVD $829, R12 + B callbackasm1(SB) + MOVD $830, R12 + B callbackasm1(SB) + MOVD $831, R12 + B callbackasm1(SB) + MOVD $832, R12 + B callbackasm1(SB) + MOVD $833, R12 + B callbackasm1(SB) + MOVD $834, R12 + B callbackasm1(SB) + MOVD $835, R12 + B callbackasm1(SB) + MOVD $836, R12 + B callbackasm1(SB) + MOVD $837, R12 + B callbackasm1(SB) + MOVD $838, R12 + B callbackasm1(SB) + MOVD $839, R12 + B callbackasm1(SB) + MOVD $840, R12 + B callbackasm1(SB) + MOVD $841, R12 + B callbackasm1(SB) + MOVD $842, R12 + B callbackasm1(SB) + MOVD $843, R12 + B callbackasm1(SB) + MOVD $844, R12 + B callbackasm1(SB) + MOVD $845, R12 + B callbackasm1(SB) + MOVD $846, R12 + B callbackasm1(SB) + MOVD $847, R12 + B callbackasm1(SB) + MOVD $848, R12 + B callbackasm1(SB) + MOVD $849, R12 + B callbackasm1(SB) + MOVD $850, R12 + B callbackasm1(SB) + MOVD $851, R12 + B callbackasm1(SB) + MOVD $852, R12 + B callbackasm1(SB) + MOVD $853, R12 + B callbackasm1(SB) + MOVD $854, R12 + B callbackasm1(SB) + MOVD $855, R12 + B callbackasm1(SB) + MOVD $856, R12 + B callbackasm1(SB) + MOVD $857, R12 + B callbackasm1(SB) + MOVD $858, R12 + B callbackasm1(SB) + MOVD $859, R12 + B callbackasm1(SB) + MOVD $860, R12 + B callbackasm1(SB) + MOVD $861, R12 + B callbackasm1(SB) + MOVD $862, R12 + B callbackasm1(SB) + MOVD $863, R12 + B callbackasm1(SB) + MOVD $864, R12 + B callbackasm1(SB) + MOVD $865, R12 + B callbackasm1(SB) + MOVD $866, R12 + B callbackasm1(SB) + MOVD $867, R12 + B callbackasm1(SB) + MOVD $868, R12 + B callbackasm1(SB) + MOVD $869, R12 + B callbackasm1(SB) + MOVD $870, R12 + B callbackasm1(SB) + MOVD $871, R12 + B callbackasm1(SB) + MOVD $872, R12 + B callbackasm1(SB) + MOVD $873, R12 + B callbackasm1(SB) + MOVD $874, R12 + B callbackasm1(SB) + MOVD $875, R12 + B callbackasm1(SB) + MOVD $876, R12 + B callbackasm1(SB) + MOVD $877, R12 + B callbackasm1(SB) + MOVD $878, R12 + B callbackasm1(SB) + MOVD $879, R12 + B callbackasm1(SB) + MOVD $880, R12 + B callbackasm1(SB) + MOVD $881, R12 + B callbackasm1(SB) + MOVD $882, R12 + B callbackasm1(SB) + MOVD $883, R12 + B callbackasm1(SB) + MOVD $884, R12 + B callbackasm1(SB) + MOVD $885, R12 + B callbackasm1(SB) + MOVD $886, R12 + B callbackasm1(SB) + MOVD $887, R12 + B callbackasm1(SB) + MOVD $888, R12 + B callbackasm1(SB) + MOVD $889, R12 + B callbackasm1(SB) + MOVD $890, R12 + B callbackasm1(SB) + MOVD $891, R12 + B callbackasm1(SB) + MOVD $892, R12 + B callbackasm1(SB) + MOVD $893, R12 + B callbackasm1(SB) + MOVD $894, R12 + B callbackasm1(SB) + MOVD $895, R12 + B callbackasm1(SB) + MOVD $896, R12 + B callbackasm1(SB) + MOVD $897, R12 + B callbackasm1(SB) + MOVD $898, R12 + B callbackasm1(SB) + MOVD $899, R12 + B callbackasm1(SB) + MOVD $900, R12 + B callbackasm1(SB) + MOVD $901, R12 + B callbackasm1(SB) + MOVD $902, R12 + B callbackasm1(SB) + MOVD $903, R12 + B callbackasm1(SB) + MOVD $904, R12 + B callbackasm1(SB) + MOVD $905, R12 + B callbackasm1(SB) + MOVD $906, R12 + B callbackasm1(SB) + MOVD $907, R12 + B callbackasm1(SB) + MOVD $908, R12 + B callbackasm1(SB) + MOVD $909, R12 + B callbackasm1(SB) + MOVD $910, R12 + B callbackasm1(SB) + MOVD $911, R12 + B callbackasm1(SB) + MOVD $912, R12 + B callbackasm1(SB) + MOVD $913, R12 + B callbackasm1(SB) + MOVD $914, R12 + B callbackasm1(SB) + MOVD $915, R12 + B callbackasm1(SB) + MOVD $916, R12 + B callbackasm1(SB) + MOVD $917, R12 + B callbackasm1(SB) + MOVD $918, R12 + B callbackasm1(SB) + MOVD $919, R12 + B callbackasm1(SB) + MOVD $920, R12 + B callbackasm1(SB) + MOVD $921, R12 + B callbackasm1(SB) + MOVD $922, R12 + B callbackasm1(SB) + MOVD $923, R12 + B callbackasm1(SB) + MOVD $924, R12 + B callbackasm1(SB) + MOVD $925, R12 + B callbackasm1(SB) + MOVD $926, R12 + B callbackasm1(SB) + MOVD $927, R12 + B callbackasm1(SB) + MOVD $928, R12 + B callbackasm1(SB) + MOVD $929, R12 + B callbackasm1(SB) + MOVD $930, R12 + B callbackasm1(SB) + MOVD $931, R12 + B callbackasm1(SB) + MOVD $932, R12 + B callbackasm1(SB) + MOVD $933, R12 + B callbackasm1(SB) + MOVD $934, R12 + B callbackasm1(SB) + MOVD $935, R12 + B callbackasm1(SB) + MOVD $936, R12 + B callbackasm1(SB) + MOVD $937, R12 + B callbackasm1(SB) + MOVD $938, R12 + B callbackasm1(SB) + MOVD $939, R12 + B callbackasm1(SB) + MOVD $940, R12 + B callbackasm1(SB) + MOVD $941, R12 + B callbackasm1(SB) + MOVD $942, R12 + B callbackasm1(SB) + MOVD $943, R12 + B callbackasm1(SB) + MOVD $944, R12 + B callbackasm1(SB) + MOVD $945, R12 + B callbackasm1(SB) + MOVD $946, R12 + B callbackasm1(SB) + MOVD $947, R12 + B callbackasm1(SB) + MOVD $948, R12 + B callbackasm1(SB) + MOVD $949, R12 + B callbackasm1(SB) + MOVD $950, R12 + B callbackasm1(SB) + MOVD $951, R12 + B callbackasm1(SB) + MOVD $952, R12 + B callbackasm1(SB) + MOVD $953, R12 + B callbackasm1(SB) + MOVD $954, R12 + B callbackasm1(SB) + MOVD $955, R12 + B callbackasm1(SB) + MOVD $956, R12 + B callbackasm1(SB) + MOVD $957, R12 + B callbackasm1(SB) + MOVD $958, R12 + B callbackasm1(SB) + MOVD $959, R12 + B callbackasm1(SB) + MOVD $960, R12 + B callbackasm1(SB) + MOVD $961, R12 + B callbackasm1(SB) + MOVD $962, R12 + B callbackasm1(SB) + MOVD $963, R12 + B callbackasm1(SB) + MOVD $964, R12 + B callbackasm1(SB) + MOVD $965, R12 + B callbackasm1(SB) + MOVD $966, R12 + B callbackasm1(SB) + MOVD $967, R12 + B callbackasm1(SB) + MOVD $968, R12 + B callbackasm1(SB) + MOVD $969, R12 + B callbackasm1(SB) + MOVD $970, R12 + B callbackasm1(SB) + MOVD $971, R12 + B callbackasm1(SB) + MOVD $972, R12 + B callbackasm1(SB) + MOVD $973, R12 + B callbackasm1(SB) + MOVD $974, R12 + B callbackasm1(SB) + MOVD $975, R12 + B callbackasm1(SB) + MOVD $976, R12 + B callbackasm1(SB) + MOVD $977, R12 + B callbackasm1(SB) + MOVD $978, R12 + B callbackasm1(SB) + MOVD $979, R12 + B callbackasm1(SB) + MOVD $980, R12 + B callbackasm1(SB) + MOVD $981, R12 + B callbackasm1(SB) + MOVD $982, R12 + B callbackasm1(SB) + MOVD $983, R12 + B callbackasm1(SB) + MOVD $984, R12 + B callbackasm1(SB) + MOVD $985, R12 + B callbackasm1(SB) + MOVD $986, R12 + B callbackasm1(SB) + MOVD $987, R12 + B callbackasm1(SB) + MOVD $988, R12 + B callbackasm1(SB) + MOVD $989, R12 + B callbackasm1(SB) + MOVD $990, R12 + B callbackasm1(SB) + MOVD $991, R12 + B callbackasm1(SB) + MOVD $992, R12 + B callbackasm1(SB) + MOVD $993, R12 + B callbackasm1(SB) + MOVD $994, R12 + B callbackasm1(SB) + MOVD $995, R12 + B callbackasm1(SB) + MOVD $996, R12 + B callbackasm1(SB) + MOVD $997, R12 + B callbackasm1(SB) + MOVD $998, R12 + B callbackasm1(SB) + MOVD $999, R12 + B callbackasm1(SB) + MOVD $1000, R12 + B callbackasm1(SB) + MOVD $1001, R12 + B callbackasm1(SB) + MOVD $1002, R12 + B callbackasm1(SB) + MOVD $1003, R12 + B callbackasm1(SB) + MOVD $1004, R12 + B callbackasm1(SB) + MOVD $1005, R12 + B callbackasm1(SB) + MOVD $1006, R12 + B callbackasm1(SB) + MOVD $1007, R12 + B callbackasm1(SB) + MOVD $1008, R12 + B callbackasm1(SB) + MOVD $1009, R12 + B callbackasm1(SB) + MOVD $1010, R12 + B callbackasm1(SB) + MOVD $1011, R12 + B callbackasm1(SB) + MOVD $1012, R12 + B callbackasm1(SB) + MOVD $1013, R12 + B callbackasm1(SB) + MOVD $1014, R12 + B callbackasm1(SB) + MOVD $1015, R12 + B callbackasm1(SB) + MOVD $1016, R12 + B callbackasm1(SB) + MOVD $1017, R12 + B callbackasm1(SB) + MOVD $1018, R12 + B callbackasm1(SB) + MOVD $1019, R12 + B callbackasm1(SB) + MOVD $1020, R12 + B callbackasm1(SB) + MOVD $1021, R12 + B callbackasm1(SB) + MOVD $1022, R12 + B callbackasm1(SB) + MOVD $1023, R12 + B callbackasm1(SB) + MOVD $1024, R12 + B callbackasm1(SB) + MOVD $1025, R12 + B callbackasm1(SB) + MOVD $1026, R12 + B callbackasm1(SB) + MOVD $1027, R12 + B callbackasm1(SB) + MOVD $1028, R12 + B callbackasm1(SB) + MOVD $1029, R12 + B callbackasm1(SB) + MOVD $1030, R12 + B callbackasm1(SB) + MOVD $1031, R12 + B callbackasm1(SB) + MOVD $1032, R12 + B callbackasm1(SB) + MOVD $1033, R12 + B callbackasm1(SB) + MOVD $1034, R12 + B callbackasm1(SB) + MOVD $1035, R12 + B callbackasm1(SB) + MOVD $1036, R12 + B callbackasm1(SB) + MOVD $1037, R12 + B callbackasm1(SB) + MOVD $1038, R12 + B callbackasm1(SB) + MOVD $1039, R12 + B callbackasm1(SB) + MOVD $1040, R12 + B callbackasm1(SB) + MOVD $1041, R12 + B callbackasm1(SB) + MOVD $1042, R12 + B callbackasm1(SB) + MOVD $1043, R12 + B callbackasm1(SB) + MOVD $1044, R12 + B callbackasm1(SB) + MOVD $1045, R12 + B callbackasm1(SB) + MOVD $1046, R12 + B callbackasm1(SB) + MOVD $1047, R12 + B callbackasm1(SB) + MOVD $1048, R12 + B callbackasm1(SB) + MOVD $1049, R12 + B callbackasm1(SB) + MOVD $1050, R12 + B callbackasm1(SB) + MOVD $1051, R12 + B callbackasm1(SB) + MOVD $1052, R12 + B callbackasm1(SB) + MOVD $1053, R12 + B callbackasm1(SB) + MOVD $1054, R12 + B callbackasm1(SB) + MOVD $1055, R12 + B callbackasm1(SB) + MOVD $1056, R12 + B callbackasm1(SB) + MOVD $1057, R12 + B callbackasm1(SB) + MOVD $1058, R12 + B callbackasm1(SB) + MOVD $1059, R12 + B callbackasm1(SB) + MOVD $1060, R12 + B callbackasm1(SB) + MOVD $1061, R12 + B callbackasm1(SB) + MOVD $1062, R12 + B callbackasm1(SB) + MOVD $1063, R12 + B callbackasm1(SB) + MOVD $1064, R12 + B callbackasm1(SB) + MOVD $1065, R12 + B callbackasm1(SB) + MOVD $1066, R12 + B callbackasm1(SB) + MOVD $1067, R12 + B callbackasm1(SB) + MOVD $1068, R12 + B callbackasm1(SB) + MOVD $1069, R12 + B callbackasm1(SB) + MOVD $1070, R12 + B callbackasm1(SB) + MOVD $1071, R12 + B callbackasm1(SB) + MOVD $1072, R12 + B callbackasm1(SB) + MOVD $1073, R12 + B callbackasm1(SB) + MOVD $1074, R12 + B callbackasm1(SB) + MOVD $1075, R12 + B callbackasm1(SB) + MOVD $1076, R12 + B callbackasm1(SB) + MOVD $1077, R12 + B callbackasm1(SB) + MOVD $1078, R12 + B callbackasm1(SB) + MOVD $1079, R12 + B callbackasm1(SB) + MOVD $1080, R12 + B callbackasm1(SB) + MOVD $1081, R12 + B callbackasm1(SB) + MOVD $1082, R12 + B callbackasm1(SB) + MOVD $1083, R12 + B callbackasm1(SB) + MOVD $1084, R12 + B callbackasm1(SB) + MOVD $1085, R12 + B callbackasm1(SB) + MOVD $1086, R12 + B callbackasm1(SB) + MOVD $1087, R12 + B callbackasm1(SB) + MOVD $1088, R12 + B callbackasm1(SB) + MOVD $1089, R12 + B callbackasm1(SB) + MOVD $1090, R12 + B callbackasm1(SB) + MOVD $1091, R12 + B callbackasm1(SB) + MOVD $1092, R12 + B callbackasm1(SB) + MOVD $1093, R12 + B callbackasm1(SB) + MOVD $1094, R12 + B callbackasm1(SB) + MOVD $1095, R12 + B callbackasm1(SB) + MOVD $1096, R12 + B callbackasm1(SB) + MOVD $1097, R12 + B callbackasm1(SB) + MOVD $1098, R12 + B callbackasm1(SB) + MOVD $1099, R12 + B callbackasm1(SB) + MOVD $1100, R12 + B callbackasm1(SB) + MOVD $1101, R12 + B callbackasm1(SB) + MOVD $1102, R12 + B callbackasm1(SB) + MOVD $1103, R12 + B callbackasm1(SB) + MOVD $1104, R12 + B callbackasm1(SB) + MOVD $1105, R12 + B callbackasm1(SB) + MOVD $1106, R12 + B callbackasm1(SB) + MOVD $1107, R12 + B callbackasm1(SB) + MOVD $1108, R12 + B callbackasm1(SB) + MOVD $1109, R12 + B callbackasm1(SB) + MOVD $1110, R12 + B callbackasm1(SB) + MOVD $1111, R12 + B callbackasm1(SB) + MOVD $1112, R12 + B callbackasm1(SB) + MOVD $1113, R12 + B callbackasm1(SB) + MOVD $1114, R12 + B callbackasm1(SB) + MOVD $1115, R12 + B callbackasm1(SB) + MOVD $1116, R12 + B callbackasm1(SB) + MOVD $1117, R12 + B callbackasm1(SB) + MOVD $1118, R12 + B callbackasm1(SB) + MOVD $1119, R12 + B callbackasm1(SB) + MOVD $1120, R12 + B callbackasm1(SB) + MOVD $1121, R12 + B callbackasm1(SB) + MOVD $1122, R12 + B callbackasm1(SB) + MOVD $1123, R12 + B callbackasm1(SB) + MOVD $1124, R12 + B callbackasm1(SB) + MOVD $1125, R12 + B callbackasm1(SB) + MOVD $1126, R12 + B callbackasm1(SB) + MOVD $1127, R12 + B callbackasm1(SB) + MOVD $1128, R12 + B callbackasm1(SB) + MOVD $1129, R12 + B callbackasm1(SB) + MOVD $1130, R12 + B callbackasm1(SB) + MOVD $1131, R12 + B callbackasm1(SB) + MOVD $1132, R12 + B callbackasm1(SB) + MOVD $1133, R12 + B callbackasm1(SB) + MOVD $1134, R12 + B callbackasm1(SB) + MOVD $1135, R12 + B callbackasm1(SB) + MOVD $1136, R12 + B callbackasm1(SB) + MOVD $1137, R12 + B callbackasm1(SB) + MOVD $1138, R12 + B callbackasm1(SB) + MOVD $1139, R12 + B callbackasm1(SB) + MOVD $1140, R12 + B callbackasm1(SB) + MOVD $1141, R12 + B callbackasm1(SB) + MOVD $1142, R12 + B callbackasm1(SB) + MOVD $1143, R12 + B callbackasm1(SB) + MOVD $1144, R12 + B callbackasm1(SB) + MOVD $1145, R12 + B callbackasm1(SB) + MOVD $1146, R12 + B callbackasm1(SB) + MOVD $1147, R12 + B callbackasm1(SB) + MOVD $1148, R12 + B callbackasm1(SB) + MOVD $1149, R12 + B callbackasm1(SB) + MOVD $1150, R12 + B callbackasm1(SB) + MOVD $1151, R12 + B callbackasm1(SB) + MOVD $1152, R12 + B callbackasm1(SB) + MOVD $1153, R12 + B callbackasm1(SB) + MOVD $1154, R12 + B callbackasm1(SB) + MOVD $1155, R12 + B callbackasm1(SB) + MOVD $1156, R12 + B callbackasm1(SB) + MOVD $1157, R12 + B callbackasm1(SB) + MOVD $1158, R12 + B callbackasm1(SB) + MOVD $1159, R12 + B callbackasm1(SB) + MOVD $1160, R12 + B callbackasm1(SB) + MOVD $1161, R12 + B callbackasm1(SB) + MOVD $1162, R12 + B callbackasm1(SB) + MOVD $1163, R12 + B callbackasm1(SB) + MOVD $1164, R12 + B callbackasm1(SB) + MOVD $1165, R12 + B callbackasm1(SB) + MOVD $1166, R12 + B callbackasm1(SB) + MOVD $1167, R12 + B callbackasm1(SB) + MOVD $1168, R12 + B callbackasm1(SB) + MOVD $1169, R12 + B callbackasm1(SB) + MOVD $1170, R12 + B callbackasm1(SB) + MOVD $1171, R12 + B callbackasm1(SB) + MOVD $1172, R12 + B callbackasm1(SB) + MOVD $1173, R12 + B callbackasm1(SB) + MOVD $1174, R12 + B callbackasm1(SB) + MOVD $1175, R12 + B callbackasm1(SB) + MOVD $1176, R12 + B callbackasm1(SB) + MOVD $1177, R12 + B callbackasm1(SB) + MOVD $1178, R12 + B callbackasm1(SB) + MOVD $1179, R12 + B callbackasm1(SB) + MOVD $1180, R12 + B callbackasm1(SB) + MOVD $1181, R12 + B callbackasm1(SB) + MOVD $1182, R12 + B callbackasm1(SB) + MOVD $1183, R12 + B callbackasm1(SB) + MOVD $1184, R12 + B callbackasm1(SB) + MOVD $1185, R12 + B callbackasm1(SB) + MOVD $1186, R12 + B callbackasm1(SB) + MOVD $1187, R12 + B callbackasm1(SB) + MOVD $1188, R12 + B callbackasm1(SB) + MOVD $1189, R12 + B callbackasm1(SB) + MOVD $1190, R12 + B callbackasm1(SB) + MOVD $1191, R12 + B callbackasm1(SB) + MOVD $1192, R12 + B callbackasm1(SB) + MOVD $1193, R12 + B callbackasm1(SB) + MOVD $1194, R12 + B callbackasm1(SB) + MOVD $1195, R12 + B callbackasm1(SB) + MOVD $1196, R12 + B callbackasm1(SB) + MOVD $1197, R12 + B callbackasm1(SB) + MOVD $1198, R12 + B callbackasm1(SB) + MOVD $1199, R12 + B callbackasm1(SB) + MOVD $1200, R12 + B callbackasm1(SB) + MOVD $1201, R12 + B callbackasm1(SB) + MOVD $1202, R12 + B callbackasm1(SB) + MOVD $1203, R12 + B callbackasm1(SB) + MOVD $1204, R12 + B callbackasm1(SB) + MOVD $1205, R12 + B callbackasm1(SB) + MOVD $1206, R12 + B callbackasm1(SB) + MOVD $1207, R12 + B callbackasm1(SB) + MOVD $1208, R12 + B callbackasm1(SB) + MOVD $1209, R12 + B callbackasm1(SB) + MOVD $1210, R12 + B callbackasm1(SB) + MOVD $1211, R12 + B callbackasm1(SB) + MOVD $1212, R12 + B callbackasm1(SB) + MOVD $1213, R12 + B callbackasm1(SB) + MOVD $1214, R12 + B callbackasm1(SB) + MOVD $1215, R12 + B callbackasm1(SB) + MOVD $1216, R12 + B callbackasm1(SB) + MOVD $1217, R12 + B callbackasm1(SB) + MOVD $1218, R12 + B callbackasm1(SB) + MOVD $1219, R12 + B callbackasm1(SB) + MOVD $1220, R12 + B callbackasm1(SB) + MOVD $1221, R12 + B callbackasm1(SB) + MOVD $1222, R12 + B callbackasm1(SB) + MOVD $1223, R12 + B callbackasm1(SB) + MOVD $1224, R12 + B callbackasm1(SB) + MOVD $1225, R12 + B callbackasm1(SB) + MOVD $1226, R12 + B callbackasm1(SB) + MOVD $1227, R12 + B callbackasm1(SB) + MOVD $1228, R12 + B callbackasm1(SB) + MOVD $1229, R12 + B callbackasm1(SB) + MOVD $1230, R12 + B callbackasm1(SB) + MOVD $1231, R12 + B callbackasm1(SB) + MOVD $1232, R12 + B callbackasm1(SB) + MOVD $1233, R12 + B callbackasm1(SB) + MOVD $1234, R12 + B callbackasm1(SB) + MOVD $1235, R12 + B callbackasm1(SB) + MOVD $1236, R12 + B callbackasm1(SB) + MOVD $1237, R12 + B callbackasm1(SB) + MOVD $1238, R12 + B callbackasm1(SB) + MOVD $1239, R12 + B callbackasm1(SB) + MOVD $1240, R12 + B callbackasm1(SB) + MOVD $1241, R12 + B callbackasm1(SB) + MOVD $1242, R12 + B callbackasm1(SB) + MOVD $1243, R12 + B callbackasm1(SB) + MOVD $1244, R12 + B callbackasm1(SB) + MOVD $1245, R12 + B callbackasm1(SB) + MOVD $1246, R12 + B callbackasm1(SB) + MOVD $1247, R12 + B callbackasm1(SB) + MOVD $1248, R12 + B callbackasm1(SB) + MOVD $1249, R12 + B callbackasm1(SB) + MOVD $1250, R12 + B callbackasm1(SB) + MOVD $1251, R12 + B callbackasm1(SB) + MOVD $1252, R12 + B callbackasm1(SB) + MOVD $1253, R12 + B callbackasm1(SB) + MOVD $1254, R12 + B callbackasm1(SB) + MOVD $1255, R12 + B callbackasm1(SB) + MOVD $1256, R12 + B callbackasm1(SB) + MOVD $1257, R12 + B callbackasm1(SB) + MOVD $1258, R12 + B callbackasm1(SB) + MOVD $1259, R12 + B callbackasm1(SB) + MOVD $1260, R12 + B callbackasm1(SB) + MOVD $1261, R12 + B callbackasm1(SB) + MOVD $1262, R12 + B callbackasm1(SB) + MOVD $1263, R12 + B callbackasm1(SB) + MOVD $1264, R12 + B callbackasm1(SB) + MOVD $1265, R12 + B callbackasm1(SB) + MOVD $1266, R12 + B callbackasm1(SB) + MOVD $1267, R12 + B callbackasm1(SB) + MOVD $1268, R12 + B callbackasm1(SB) + MOVD $1269, R12 + B callbackasm1(SB) + MOVD $1270, R12 + B callbackasm1(SB) + MOVD $1271, R12 + B callbackasm1(SB) + MOVD $1272, R12 + B callbackasm1(SB) + MOVD $1273, R12 + B callbackasm1(SB) + MOVD $1274, R12 + B callbackasm1(SB) + MOVD $1275, R12 + B callbackasm1(SB) + MOVD $1276, R12 + B callbackasm1(SB) + MOVD $1277, R12 + B callbackasm1(SB) + MOVD $1278, R12 + B callbackasm1(SB) + MOVD $1279, R12 + B callbackasm1(SB) + MOVD $1280, R12 + B callbackasm1(SB) + MOVD $1281, R12 + B callbackasm1(SB) + MOVD $1282, R12 + B callbackasm1(SB) + MOVD $1283, R12 + B callbackasm1(SB) + MOVD $1284, R12 + B callbackasm1(SB) + MOVD $1285, R12 + B callbackasm1(SB) + MOVD $1286, R12 + B callbackasm1(SB) + MOVD $1287, R12 + B callbackasm1(SB) + MOVD $1288, R12 + B callbackasm1(SB) + MOVD $1289, R12 + B callbackasm1(SB) + MOVD $1290, R12 + B callbackasm1(SB) + MOVD $1291, R12 + B callbackasm1(SB) + MOVD $1292, R12 + B callbackasm1(SB) + MOVD $1293, R12 + B callbackasm1(SB) + MOVD $1294, R12 + B callbackasm1(SB) + MOVD $1295, R12 + B callbackasm1(SB) + MOVD $1296, R12 + B callbackasm1(SB) + MOVD $1297, R12 + B callbackasm1(SB) + MOVD $1298, R12 + B callbackasm1(SB) + MOVD $1299, R12 + B callbackasm1(SB) + MOVD $1300, R12 + B callbackasm1(SB) + MOVD $1301, R12 + B callbackasm1(SB) + MOVD $1302, R12 + B callbackasm1(SB) + MOVD $1303, R12 + B callbackasm1(SB) + MOVD $1304, R12 + B callbackasm1(SB) + MOVD $1305, R12 + B callbackasm1(SB) + MOVD $1306, R12 + B callbackasm1(SB) + MOVD $1307, R12 + B callbackasm1(SB) + MOVD $1308, R12 + B callbackasm1(SB) + MOVD $1309, R12 + B callbackasm1(SB) + MOVD $1310, R12 + B callbackasm1(SB) + MOVD $1311, R12 + B callbackasm1(SB) + MOVD $1312, R12 + B callbackasm1(SB) + MOVD $1313, R12 + B callbackasm1(SB) + MOVD $1314, R12 + B callbackasm1(SB) + MOVD $1315, R12 + B callbackasm1(SB) + MOVD $1316, R12 + B callbackasm1(SB) + MOVD $1317, R12 + B callbackasm1(SB) + MOVD $1318, R12 + B callbackasm1(SB) + MOVD $1319, R12 + B callbackasm1(SB) + MOVD $1320, R12 + B callbackasm1(SB) + MOVD $1321, R12 + B callbackasm1(SB) + MOVD $1322, R12 + B callbackasm1(SB) + MOVD $1323, R12 + B callbackasm1(SB) + MOVD $1324, R12 + B callbackasm1(SB) + MOVD $1325, R12 + B callbackasm1(SB) + MOVD $1326, R12 + B callbackasm1(SB) + MOVD $1327, R12 + B callbackasm1(SB) + MOVD $1328, R12 + B callbackasm1(SB) + MOVD $1329, R12 + B callbackasm1(SB) + MOVD $1330, R12 + B callbackasm1(SB) + MOVD $1331, R12 + B callbackasm1(SB) + MOVD $1332, R12 + B callbackasm1(SB) + MOVD $1333, R12 + B callbackasm1(SB) + MOVD $1334, R12 + B callbackasm1(SB) + MOVD $1335, R12 + B callbackasm1(SB) + MOVD $1336, R12 + B callbackasm1(SB) + MOVD $1337, R12 + B callbackasm1(SB) + MOVD $1338, R12 + B callbackasm1(SB) + MOVD $1339, R12 + B callbackasm1(SB) + MOVD $1340, R12 + B callbackasm1(SB) + MOVD $1341, R12 + B callbackasm1(SB) + MOVD $1342, R12 + B callbackasm1(SB) + MOVD $1343, R12 + B callbackasm1(SB) + MOVD $1344, R12 + B callbackasm1(SB) + MOVD $1345, R12 + B callbackasm1(SB) + MOVD $1346, R12 + B callbackasm1(SB) + MOVD $1347, R12 + B callbackasm1(SB) + MOVD $1348, R12 + B callbackasm1(SB) + MOVD $1349, R12 + B callbackasm1(SB) + MOVD $1350, R12 + B callbackasm1(SB) + MOVD $1351, R12 + B callbackasm1(SB) + MOVD $1352, R12 + B callbackasm1(SB) + MOVD $1353, R12 + B callbackasm1(SB) + MOVD $1354, R12 + B callbackasm1(SB) + MOVD $1355, R12 + B callbackasm1(SB) + MOVD $1356, R12 + B callbackasm1(SB) + MOVD $1357, R12 + B callbackasm1(SB) + MOVD $1358, R12 + B callbackasm1(SB) + MOVD $1359, R12 + B callbackasm1(SB) + MOVD $1360, R12 + B callbackasm1(SB) + MOVD $1361, R12 + B callbackasm1(SB) + MOVD $1362, R12 + B callbackasm1(SB) + MOVD $1363, R12 + B callbackasm1(SB) + MOVD $1364, R12 + B callbackasm1(SB) + MOVD $1365, R12 + B callbackasm1(SB) + MOVD $1366, R12 + B callbackasm1(SB) + MOVD $1367, R12 + B callbackasm1(SB) + MOVD $1368, R12 + B callbackasm1(SB) + MOVD $1369, R12 + B callbackasm1(SB) + MOVD $1370, R12 + B callbackasm1(SB) + MOVD $1371, R12 + B callbackasm1(SB) + MOVD $1372, R12 + B callbackasm1(SB) + MOVD $1373, R12 + B callbackasm1(SB) + MOVD $1374, R12 + B callbackasm1(SB) + MOVD $1375, R12 + B callbackasm1(SB) + MOVD $1376, R12 + B callbackasm1(SB) + MOVD $1377, R12 + B callbackasm1(SB) + MOVD $1378, R12 + B callbackasm1(SB) + MOVD $1379, R12 + B callbackasm1(SB) + MOVD $1380, R12 + B callbackasm1(SB) + MOVD $1381, R12 + B callbackasm1(SB) + MOVD $1382, R12 + B callbackasm1(SB) + MOVD $1383, R12 + B callbackasm1(SB) + MOVD $1384, R12 + B callbackasm1(SB) + MOVD $1385, R12 + B callbackasm1(SB) + MOVD $1386, R12 + B callbackasm1(SB) + MOVD $1387, R12 + B callbackasm1(SB) + MOVD $1388, R12 + B callbackasm1(SB) + MOVD $1389, R12 + B callbackasm1(SB) + MOVD $1390, R12 + B callbackasm1(SB) + MOVD $1391, R12 + B callbackasm1(SB) + MOVD $1392, R12 + B callbackasm1(SB) + MOVD $1393, R12 + B callbackasm1(SB) + MOVD $1394, R12 + B callbackasm1(SB) + MOVD $1395, R12 + B callbackasm1(SB) + MOVD $1396, R12 + B callbackasm1(SB) + MOVD $1397, R12 + B callbackasm1(SB) + MOVD $1398, R12 + B callbackasm1(SB) + MOVD $1399, R12 + B callbackasm1(SB) + MOVD $1400, R12 + B callbackasm1(SB) + MOVD $1401, R12 + B callbackasm1(SB) + MOVD $1402, R12 + B callbackasm1(SB) + MOVD $1403, R12 + B callbackasm1(SB) + MOVD $1404, R12 + B callbackasm1(SB) + MOVD $1405, R12 + B callbackasm1(SB) + MOVD $1406, R12 + B callbackasm1(SB) + MOVD $1407, R12 + B callbackasm1(SB) + MOVD $1408, R12 + B callbackasm1(SB) + MOVD $1409, R12 + B callbackasm1(SB) + MOVD $1410, R12 + B callbackasm1(SB) + MOVD $1411, R12 + B callbackasm1(SB) + MOVD $1412, R12 + B callbackasm1(SB) + MOVD $1413, R12 + B callbackasm1(SB) + MOVD $1414, R12 + B callbackasm1(SB) + MOVD $1415, R12 + B callbackasm1(SB) + MOVD $1416, R12 + B callbackasm1(SB) + MOVD $1417, R12 + B callbackasm1(SB) + MOVD $1418, R12 + B callbackasm1(SB) + MOVD $1419, R12 + B callbackasm1(SB) + MOVD $1420, R12 + B callbackasm1(SB) + MOVD $1421, R12 + B callbackasm1(SB) + MOVD $1422, R12 + B callbackasm1(SB) + MOVD $1423, R12 + B callbackasm1(SB) + MOVD $1424, R12 + B callbackasm1(SB) + MOVD $1425, R12 + B callbackasm1(SB) + MOVD $1426, R12 + B callbackasm1(SB) + MOVD $1427, R12 + B callbackasm1(SB) + MOVD $1428, R12 + B callbackasm1(SB) + MOVD $1429, R12 + B callbackasm1(SB) + MOVD $1430, R12 + B callbackasm1(SB) + MOVD $1431, R12 + B callbackasm1(SB) + MOVD $1432, R12 + B callbackasm1(SB) + MOVD $1433, R12 + B callbackasm1(SB) + MOVD $1434, R12 + B callbackasm1(SB) + MOVD $1435, R12 + B callbackasm1(SB) + MOVD $1436, R12 + B callbackasm1(SB) + MOVD $1437, R12 + B callbackasm1(SB) + MOVD $1438, R12 + B callbackasm1(SB) + MOVD $1439, R12 + B callbackasm1(SB) + MOVD $1440, R12 + B callbackasm1(SB) + MOVD $1441, R12 + B callbackasm1(SB) + MOVD $1442, R12 + B callbackasm1(SB) + MOVD $1443, R12 + B callbackasm1(SB) + MOVD $1444, R12 + B callbackasm1(SB) + MOVD $1445, R12 + B callbackasm1(SB) + MOVD $1446, R12 + B callbackasm1(SB) + MOVD $1447, R12 + B callbackasm1(SB) + MOVD $1448, R12 + B callbackasm1(SB) + MOVD $1449, R12 + B callbackasm1(SB) + MOVD $1450, R12 + B callbackasm1(SB) + MOVD $1451, R12 + B callbackasm1(SB) + MOVD $1452, R12 + B callbackasm1(SB) + MOVD $1453, R12 + B callbackasm1(SB) + MOVD $1454, R12 + B callbackasm1(SB) + MOVD $1455, R12 + B callbackasm1(SB) + MOVD $1456, R12 + B callbackasm1(SB) + MOVD $1457, R12 + B callbackasm1(SB) + MOVD $1458, R12 + B callbackasm1(SB) + MOVD $1459, R12 + B callbackasm1(SB) + MOVD $1460, R12 + B callbackasm1(SB) + MOVD $1461, R12 + B callbackasm1(SB) + MOVD $1462, R12 + B callbackasm1(SB) + MOVD $1463, R12 + B callbackasm1(SB) + MOVD $1464, R12 + B callbackasm1(SB) + MOVD $1465, R12 + B callbackasm1(SB) + MOVD $1466, R12 + B callbackasm1(SB) + MOVD $1467, R12 + B callbackasm1(SB) + MOVD $1468, R12 + B callbackasm1(SB) + MOVD $1469, R12 + B callbackasm1(SB) + MOVD $1470, R12 + B callbackasm1(SB) + MOVD $1471, R12 + B callbackasm1(SB) + MOVD $1472, R12 + B callbackasm1(SB) + MOVD $1473, R12 + B callbackasm1(SB) + MOVD $1474, R12 + B callbackasm1(SB) + MOVD $1475, R12 + B callbackasm1(SB) + MOVD $1476, R12 + B callbackasm1(SB) + MOVD $1477, R12 + B callbackasm1(SB) + MOVD $1478, R12 + B callbackasm1(SB) + MOVD $1479, R12 + B callbackasm1(SB) + MOVD $1480, R12 + B callbackasm1(SB) + MOVD $1481, R12 + B callbackasm1(SB) + MOVD $1482, R12 + B callbackasm1(SB) + MOVD $1483, R12 + B callbackasm1(SB) + MOVD $1484, R12 + B callbackasm1(SB) + MOVD $1485, R12 + B callbackasm1(SB) + MOVD $1486, R12 + B callbackasm1(SB) + MOVD $1487, R12 + B callbackasm1(SB) + MOVD $1488, R12 + B callbackasm1(SB) + MOVD $1489, R12 + B callbackasm1(SB) + MOVD $1490, R12 + B callbackasm1(SB) + MOVD $1491, R12 + B callbackasm1(SB) + MOVD $1492, R12 + B callbackasm1(SB) + MOVD $1493, R12 + B callbackasm1(SB) + MOVD $1494, R12 + B callbackasm1(SB) + MOVD $1495, R12 + B callbackasm1(SB) + MOVD $1496, R12 + B callbackasm1(SB) + MOVD $1497, R12 + B callbackasm1(SB) + MOVD $1498, R12 + B callbackasm1(SB) + MOVD $1499, R12 + B callbackasm1(SB) + MOVD $1500, R12 + B callbackasm1(SB) + MOVD $1501, R12 + B callbackasm1(SB) + MOVD $1502, R12 + B callbackasm1(SB) + MOVD $1503, R12 + B callbackasm1(SB) + MOVD $1504, R12 + B callbackasm1(SB) + MOVD $1505, R12 + B callbackasm1(SB) + MOVD $1506, R12 + B callbackasm1(SB) + MOVD $1507, R12 + B callbackasm1(SB) + MOVD $1508, R12 + B callbackasm1(SB) + MOVD $1509, R12 + B callbackasm1(SB) + MOVD $1510, R12 + B callbackasm1(SB) + MOVD $1511, R12 + B callbackasm1(SB) + MOVD $1512, R12 + B callbackasm1(SB) + MOVD $1513, R12 + B callbackasm1(SB) + MOVD $1514, R12 + B callbackasm1(SB) + MOVD $1515, R12 + B callbackasm1(SB) + MOVD $1516, R12 + B callbackasm1(SB) + MOVD $1517, R12 + B callbackasm1(SB) + MOVD $1518, R12 + B callbackasm1(SB) + MOVD $1519, R12 + B callbackasm1(SB) + MOVD $1520, R12 + B callbackasm1(SB) + MOVD $1521, R12 + B callbackasm1(SB) + MOVD $1522, R12 + B callbackasm1(SB) + MOVD $1523, R12 + B callbackasm1(SB) + MOVD $1524, R12 + B callbackasm1(SB) + MOVD $1525, R12 + B callbackasm1(SB) + MOVD $1526, R12 + B callbackasm1(SB) + MOVD $1527, R12 + B callbackasm1(SB) + MOVD $1528, R12 + B callbackasm1(SB) + MOVD $1529, R12 + B callbackasm1(SB) + MOVD $1530, R12 + B callbackasm1(SB) + MOVD $1531, R12 + B callbackasm1(SB) + MOVD $1532, R12 + B callbackasm1(SB) + MOVD $1533, R12 + B callbackasm1(SB) + MOVD $1534, R12 + B callbackasm1(SB) + MOVD $1535, R12 + B callbackasm1(SB) + MOVD $1536, R12 + B callbackasm1(SB) + MOVD $1537, R12 + B callbackasm1(SB) + MOVD $1538, R12 + B callbackasm1(SB) + MOVD $1539, R12 + B callbackasm1(SB) + MOVD $1540, R12 + B callbackasm1(SB) + MOVD $1541, R12 + B callbackasm1(SB) + MOVD $1542, R12 + B callbackasm1(SB) + MOVD $1543, R12 + B callbackasm1(SB) + MOVD $1544, R12 + B callbackasm1(SB) + MOVD $1545, R12 + B callbackasm1(SB) + MOVD $1546, R12 + B callbackasm1(SB) + MOVD $1547, R12 + B callbackasm1(SB) + MOVD $1548, R12 + B callbackasm1(SB) + MOVD $1549, R12 + B callbackasm1(SB) + MOVD $1550, R12 + B callbackasm1(SB) + MOVD $1551, R12 + B callbackasm1(SB) + MOVD $1552, R12 + B callbackasm1(SB) + MOVD $1553, R12 + B callbackasm1(SB) + MOVD $1554, R12 + B callbackasm1(SB) + MOVD $1555, R12 + B callbackasm1(SB) + MOVD $1556, R12 + B callbackasm1(SB) + MOVD $1557, R12 + B callbackasm1(SB) + MOVD $1558, R12 + B callbackasm1(SB) + MOVD $1559, R12 + B callbackasm1(SB) + MOVD $1560, R12 + B callbackasm1(SB) + MOVD $1561, R12 + B callbackasm1(SB) + MOVD $1562, R12 + B callbackasm1(SB) + MOVD $1563, R12 + B callbackasm1(SB) + MOVD $1564, R12 + B callbackasm1(SB) + MOVD $1565, R12 + B callbackasm1(SB) + MOVD $1566, R12 + B callbackasm1(SB) + MOVD $1567, R12 + B callbackasm1(SB) + MOVD $1568, R12 + B callbackasm1(SB) + MOVD $1569, R12 + B callbackasm1(SB) + MOVD $1570, R12 + B callbackasm1(SB) + MOVD $1571, R12 + B callbackasm1(SB) + MOVD $1572, R12 + B callbackasm1(SB) + MOVD $1573, R12 + B callbackasm1(SB) + MOVD $1574, R12 + B callbackasm1(SB) + MOVD $1575, R12 + B callbackasm1(SB) + MOVD $1576, R12 + B callbackasm1(SB) + MOVD $1577, R12 + B callbackasm1(SB) + MOVD $1578, R12 + B callbackasm1(SB) + MOVD $1579, R12 + B callbackasm1(SB) + MOVD $1580, R12 + B callbackasm1(SB) + MOVD $1581, R12 + B callbackasm1(SB) + MOVD $1582, R12 + B callbackasm1(SB) + MOVD $1583, R12 + B callbackasm1(SB) + MOVD $1584, R12 + B callbackasm1(SB) + MOVD $1585, R12 + B callbackasm1(SB) + MOVD $1586, R12 + B callbackasm1(SB) + MOVD $1587, R12 + B callbackasm1(SB) + MOVD $1588, R12 + B callbackasm1(SB) + MOVD $1589, R12 + B callbackasm1(SB) + MOVD $1590, R12 + B callbackasm1(SB) + MOVD $1591, R12 + B callbackasm1(SB) + MOVD $1592, R12 + B callbackasm1(SB) + MOVD $1593, R12 + B callbackasm1(SB) + MOVD $1594, R12 + B callbackasm1(SB) + MOVD $1595, R12 + B callbackasm1(SB) + MOVD $1596, R12 + B callbackasm1(SB) + MOVD $1597, R12 + B callbackasm1(SB) + MOVD $1598, R12 + B callbackasm1(SB) + MOVD $1599, R12 + B callbackasm1(SB) + MOVD $1600, R12 + B callbackasm1(SB) + MOVD $1601, R12 + B callbackasm1(SB) + MOVD $1602, R12 + B callbackasm1(SB) + MOVD $1603, R12 + B callbackasm1(SB) + MOVD $1604, R12 + B callbackasm1(SB) + MOVD $1605, R12 + B callbackasm1(SB) + MOVD $1606, R12 + B callbackasm1(SB) + MOVD $1607, R12 + B callbackasm1(SB) + MOVD $1608, R12 + B callbackasm1(SB) + MOVD $1609, R12 + B callbackasm1(SB) + MOVD $1610, R12 + B callbackasm1(SB) + MOVD $1611, R12 + B callbackasm1(SB) + MOVD $1612, R12 + B callbackasm1(SB) + MOVD $1613, R12 + B callbackasm1(SB) + MOVD $1614, R12 + B callbackasm1(SB) + MOVD $1615, R12 + B callbackasm1(SB) + MOVD $1616, R12 + B callbackasm1(SB) + MOVD $1617, R12 + B callbackasm1(SB) + MOVD $1618, R12 + B callbackasm1(SB) + MOVD $1619, R12 + B callbackasm1(SB) + MOVD $1620, R12 + B callbackasm1(SB) + MOVD $1621, R12 + B callbackasm1(SB) + MOVD $1622, R12 + B callbackasm1(SB) + MOVD $1623, R12 + B callbackasm1(SB) + MOVD $1624, R12 + B callbackasm1(SB) + MOVD $1625, R12 + B callbackasm1(SB) + MOVD $1626, R12 + B callbackasm1(SB) + MOVD $1627, R12 + B callbackasm1(SB) + MOVD $1628, R12 + B callbackasm1(SB) + MOVD $1629, R12 + B callbackasm1(SB) + MOVD $1630, R12 + B callbackasm1(SB) + MOVD $1631, R12 + B callbackasm1(SB) + MOVD $1632, R12 + B callbackasm1(SB) + MOVD $1633, R12 + B callbackasm1(SB) + MOVD $1634, R12 + B callbackasm1(SB) + MOVD $1635, R12 + B callbackasm1(SB) + MOVD $1636, R12 + B callbackasm1(SB) + MOVD $1637, R12 + B callbackasm1(SB) + MOVD $1638, R12 + B callbackasm1(SB) + MOVD $1639, R12 + B callbackasm1(SB) + MOVD $1640, R12 + B callbackasm1(SB) + MOVD $1641, R12 + B callbackasm1(SB) + MOVD $1642, R12 + B callbackasm1(SB) + MOVD $1643, R12 + B callbackasm1(SB) + MOVD $1644, R12 + B callbackasm1(SB) + MOVD $1645, R12 + B callbackasm1(SB) + MOVD $1646, R12 + B callbackasm1(SB) + MOVD $1647, R12 + B callbackasm1(SB) + MOVD $1648, R12 + B callbackasm1(SB) + MOVD $1649, R12 + B callbackasm1(SB) + MOVD $1650, R12 + B callbackasm1(SB) + MOVD $1651, R12 + B callbackasm1(SB) + MOVD $1652, R12 + B callbackasm1(SB) + MOVD $1653, R12 + B callbackasm1(SB) + MOVD $1654, R12 + B callbackasm1(SB) + MOVD $1655, R12 + B callbackasm1(SB) + MOVD $1656, R12 + B callbackasm1(SB) + MOVD $1657, R12 + B callbackasm1(SB) + MOVD $1658, R12 + B callbackasm1(SB) + MOVD $1659, R12 + B callbackasm1(SB) + MOVD $1660, R12 + B callbackasm1(SB) + MOVD $1661, R12 + B callbackasm1(SB) + MOVD $1662, R12 + B callbackasm1(SB) + MOVD $1663, R12 + B callbackasm1(SB) + MOVD $1664, R12 + B callbackasm1(SB) + MOVD $1665, R12 + B callbackasm1(SB) + MOVD $1666, R12 + B callbackasm1(SB) + MOVD $1667, R12 + B callbackasm1(SB) + MOVD $1668, R12 + B callbackasm1(SB) + MOVD $1669, R12 + B callbackasm1(SB) + MOVD $1670, R12 + B callbackasm1(SB) + MOVD $1671, R12 + B callbackasm1(SB) + MOVD $1672, R12 + B callbackasm1(SB) + MOVD $1673, R12 + B callbackasm1(SB) + MOVD $1674, R12 + B callbackasm1(SB) + MOVD $1675, R12 + B callbackasm1(SB) + MOVD $1676, R12 + B callbackasm1(SB) + MOVD $1677, R12 + B callbackasm1(SB) + MOVD $1678, R12 + B callbackasm1(SB) + MOVD $1679, R12 + B callbackasm1(SB) + MOVD $1680, R12 + B callbackasm1(SB) + MOVD $1681, R12 + B callbackasm1(SB) + MOVD $1682, R12 + B callbackasm1(SB) + MOVD $1683, R12 + B callbackasm1(SB) + MOVD $1684, R12 + B callbackasm1(SB) + MOVD $1685, R12 + B callbackasm1(SB) + MOVD $1686, R12 + B callbackasm1(SB) + MOVD $1687, R12 + B callbackasm1(SB) + MOVD $1688, R12 + B callbackasm1(SB) + MOVD $1689, R12 + B callbackasm1(SB) + MOVD $1690, R12 + B callbackasm1(SB) + MOVD $1691, R12 + B callbackasm1(SB) + MOVD $1692, R12 + B callbackasm1(SB) + MOVD $1693, R12 + B callbackasm1(SB) + MOVD $1694, R12 + B callbackasm1(SB) + MOVD $1695, R12 + B callbackasm1(SB) + MOVD $1696, R12 + B callbackasm1(SB) + MOVD $1697, R12 + B callbackasm1(SB) + MOVD $1698, R12 + B callbackasm1(SB) + MOVD $1699, R12 + B callbackasm1(SB) + MOVD $1700, R12 + B callbackasm1(SB) + MOVD $1701, R12 + B callbackasm1(SB) + MOVD $1702, R12 + B callbackasm1(SB) + MOVD $1703, R12 + B callbackasm1(SB) + MOVD $1704, R12 + B callbackasm1(SB) + MOVD $1705, R12 + B callbackasm1(SB) + MOVD $1706, R12 + B callbackasm1(SB) + MOVD $1707, R12 + B callbackasm1(SB) + MOVD $1708, R12 + B callbackasm1(SB) + MOVD $1709, R12 + B callbackasm1(SB) + MOVD $1710, R12 + B callbackasm1(SB) + MOVD $1711, R12 + B callbackasm1(SB) + MOVD $1712, R12 + B callbackasm1(SB) + MOVD $1713, R12 + B callbackasm1(SB) + MOVD $1714, R12 + B callbackasm1(SB) + MOVD $1715, R12 + B callbackasm1(SB) + MOVD $1716, R12 + B callbackasm1(SB) + MOVD $1717, R12 + B callbackasm1(SB) + MOVD $1718, R12 + B callbackasm1(SB) + MOVD $1719, R12 + B callbackasm1(SB) + MOVD $1720, R12 + B callbackasm1(SB) + MOVD $1721, R12 + B callbackasm1(SB) + MOVD $1722, R12 + B callbackasm1(SB) + MOVD $1723, R12 + B callbackasm1(SB) + MOVD $1724, R12 + B callbackasm1(SB) + MOVD $1725, R12 + B callbackasm1(SB) + MOVD $1726, R12 + B callbackasm1(SB) + MOVD $1727, R12 + B callbackasm1(SB) + MOVD $1728, R12 + B callbackasm1(SB) + MOVD $1729, R12 + B callbackasm1(SB) + MOVD $1730, R12 + B callbackasm1(SB) + MOVD $1731, R12 + B callbackasm1(SB) + MOVD $1732, R12 + B callbackasm1(SB) + MOVD $1733, R12 + B callbackasm1(SB) + MOVD $1734, R12 + B callbackasm1(SB) + MOVD $1735, R12 + B callbackasm1(SB) + MOVD $1736, R12 + B callbackasm1(SB) + MOVD $1737, R12 + B callbackasm1(SB) + MOVD $1738, R12 + B callbackasm1(SB) + MOVD $1739, R12 + B callbackasm1(SB) + MOVD $1740, R12 + B callbackasm1(SB) + MOVD $1741, R12 + B callbackasm1(SB) + MOVD $1742, R12 + B callbackasm1(SB) + MOVD $1743, R12 + B callbackasm1(SB) + MOVD $1744, R12 + B callbackasm1(SB) + MOVD $1745, R12 + B callbackasm1(SB) + MOVD $1746, R12 + B callbackasm1(SB) + MOVD $1747, R12 + B callbackasm1(SB) + MOVD $1748, R12 + B callbackasm1(SB) + MOVD $1749, R12 + B callbackasm1(SB) + MOVD $1750, R12 + B callbackasm1(SB) + MOVD $1751, R12 + B callbackasm1(SB) + MOVD $1752, R12 + B callbackasm1(SB) + MOVD $1753, R12 + B callbackasm1(SB) + MOVD $1754, R12 + B callbackasm1(SB) + MOVD $1755, R12 + B callbackasm1(SB) + MOVD $1756, R12 + B callbackasm1(SB) + MOVD $1757, R12 + B callbackasm1(SB) + MOVD $1758, R12 + B callbackasm1(SB) + MOVD $1759, R12 + B callbackasm1(SB) + MOVD $1760, R12 + B callbackasm1(SB) + MOVD $1761, R12 + B callbackasm1(SB) + MOVD $1762, R12 + B callbackasm1(SB) + MOVD $1763, R12 + B callbackasm1(SB) + MOVD $1764, R12 + B callbackasm1(SB) + MOVD $1765, R12 + B callbackasm1(SB) + MOVD $1766, R12 + B callbackasm1(SB) + MOVD $1767, R12 + B callbackasm1(SB) + MOVD $1768, R12 + B callbackasm1(SB) + MOVD $1769, R12 + B callbackasm1(SB) + MOVD $1770, R12 + B callbackasm1(SB) + MOVD $1771, R12 + B callbackasm1(SB) + MOVD $1772, R12 + B callbackasm1(SB) + MOVD $1773, R12 + B callbackasm1(SB) + MOVD $1774, R12 + B callbackasm1(SB) + MOVD $1775, R12 + B callbackasm1(SB) + MOVD $1776, R12 + B callbackasm1(SB) + MOVD $1777, R12 + B callbackasm1(SB) + MOVD $1778, R12 + B callbackasm1(SB) + MOVD $1779, R12 + B callbackasm1(SB) + MOVD $1780, R12 + B callbackasm1(SB) + MOVD $1781, R12 + B callbackasm1(SB) + MOVD $1782, R12 + B callbackasm1(SB) + MOVD $1783, R12 + B callbackasm1(SB) + MOVD $1784, R12 + B callbackasm1(SB) + MOVD $1785, R12 + B callbackasm1(SB) + MOVD $1786, R12 + B callbackasm1(SB) + MOVD $1787, R12 + B callbackasm1(SB) + MOVD $1788, R12 + B callbackasm1(SB) + MOVD $1789, R12 + B callbackasm1(SB) + MOVD $1790, R12 + B callbackasm1(SB) + MOVD $1791, R12 + B callbackasm1(SB) + MOVD $1792, R12 + B callbackasm1(SB) + MOVD $1793, R12 + B callbackasm1(SB) + MOVD $1794, R12 + B callbackasm1(SB) + MOVD $1795, R12 + B callbackasm1(SB) + MOVD $1796, R12 + B callbackasm1(SB) + MOVD $1797, R12 + B callbackasm1(SB) + MOVD $1798, R12 + B callbackasm1(SB) + MOVD $1799, R12 + B callbackasm1(SB) + MOVD $1800, R12 + B callbackasm1(SB) + MOVD $1801, R12 + B callbackasm1(SB) + MOVD $1802, R12 + B callbackasm1(SB) + MOVD $1803, R12 + B callbackasm1(SB) + MOVD $1804, R12 + B callbackasm1(SB) + MOVD $1805, R12 + B callbackasm1(SB) + MOVD $1806, R12 + B callbackasm1(SB) + MOVD $1807, R12 + B callbackasm1(SB) + MOVD $1808, R12 + B callbackasm1(SB) + MOVD $1809, R12 + B callbackasm1(SB) + MOVD $1810, R12 + B callbackasm1(SB) + MOVD $1811, R12 + B callbackasm1(SB) + MOVD $1812, R12 + B callbackasm1(SB) + MOVD $1813, R12 + B callbackasm1(SB) + MOVD $1814, R12 + B callbackasm1(SB) + MOVD $1815, R12 + B callbackasm1(SB) + MOVD $1816, R12 + B callbackasm1(SB) + MOVD $1817, R12 + B callbackasm1(SB) + MOVD $1818, R12 + B callbackasm1(SB) + MOVD $1819, R12 + B callbackasm1(SB) + MOVD $1820, R12 + B callbackasm1(SB) + MOVD $1821, R12 + B callbackasm1(SB) + MOVD $1822, R12 + B callbackasm1(SB) + MOVD $1823, R12 + B callbackasm1(SB) + MOVD $1824, R12 + B callbackasm1(SB) + MOVD $1825, R12 + B callbackasm1(SB) + MOVD $1826, R12 + B callbackasm1(SB) + MOVD $1827, R12 + B callbackasm1(SB) + MOVD $1828, R12 + B callbackasm1(SB) + MOVD $1829, R12 + B callbackasm1(SB) + MOVD $1830, R12 + B callbackasm1(SB) + MOVD $1831, R12 + B callbackasm1(SB) + MOVD $1832, R12 + B callbackasm1(SB) + MOVD $1833, R12 + B callbackasm1(SB) + MOVD $1834, R12 + B callbackasm1(SB) + MOVD $1835, R12 + B callbackasm1(SB) + MOVD $1836, R12 + B callbackasm1(SB) + MOVD $1837, R12 + B callbackasm1(SB) + MOVD $1838, R12 + B callbackasm1(SB) + MOVD $1839, R12 + B callbackasm1(SB) + MOVD $1840, R12 + B callbackasm1(SB) + MOVD $1841, R12 + B callbackasm1(SB) + MOVD $1842, R12 + B callbackasm1(SB) + MOVD $1843, R12 + B callbackasm1(SB) + MOVD $1844, R12 + B callbackasm1(SB) + MOVD $1845, R12 + B callbackasm1(SB) + MOVD $1846, R12 + B callbackasm1(SB) + MOVD $1847, R12 + B callbackasm1(SB) + MOVD $1848, R12 + B callbackasm1(SB) + MOVD $1849, R12 + B callbackasm1(SB) + MOVD $1850, R12 + B callbackasm1(SB) + MOVD $1851, R12 + B callbackasm1(SB) + MOVD $1852, R12 + B callbackasm1(SB) + MOVD $1853, R12 + B callbackasm1(SB) + MOVD $1854, R12 + B callbackasm1(SB) + MOVD $1855, R12 + B callbackasm1(SB) + MOVD $1856, R12 + B callbackasm1(SB) + MOVD $1857, R12 + B callbackasm1(SB) + MOVD $1858, R12 + B callbackasm1(SB) + MOVD $1859, R12 + B callbackasm1(SB) + MOVD $1860, R12 + B callbackasm1(SB) + MOVD $1861, R12 + B callbackasm1(SB) + MOVD $1862, R12 + B callbackasm1(SB) + MOVD $1863, R12 + B callbackasm1(SB) + MOVD $1864, R12 + B callbackasm1(SB) + MOVD $1865, R12 + B callbackasm1(SB) + MOVD $1866, R12 + B callbackasm1(SB) + MOVD $1867, R12 + B callbackasm1(SB) + MOVD $1868, R12 + B callbackasm1(SB) + MOVD $1869, R12 + B callbackasm1(SB) + MOVD $1870, R12 + B callbackasm1(SB) + MOVD $1871, R12 + B callbackasm1(SB) + MOVD $1872, R12 + B callbackasm1(SB) + MOVD $1873, R12 + B callbackasm1(SB) + MOVD $1874, R12 + B callbackasm1(SB) + MOVD $1875, R12 + B callbackasm1(SB) + MOVD $1876, R12 + B callbackasm1(SB) + MOVD $1877, R12 + B callbackasm1(SB) + MOVD $1878, R12 + B callbackasm1(SB) + MOVD $1879, R12 + B callbackasm1(SB) + MOVD $1880, R12 + B callbackasm1(SB) + MOVD $1881, R12 + B callbackasm1(SB) + MOVD $1882, R12 + B callbackasm1(SB) + MOVD $1883, R12 + B callbackasm1(SB) + MOVD $1884, R12 + B callbackasm1(SB) + MOVD $1885, R12 + B callbackasm1(SB) + MOVD $1886, R12 + B callbackasm1(SB) + MOVD $1887, R12 + B callbackasm1(SB) + MOVD $1888, R12 + B callbackasm1(SB) + MOVD $1889, R12 + B callbackasm1(SB) + MOVD $1890, R12 + B callbackasm1(SB) + MOVD $1891, R12 + B callbackasm1(SB) + MOVD $1892, R12 + B callbackasm1(SB) + MOVD $1893, R12 + B callbackasm1(SB) + MOVD $1894, R12 + B callbackasm1(SB) + MOVD $1895, R12 + B callbackasm1(SB) + MOVD $1896, R12 + B callbackasm1(SB) + MOVD $1897, R12 + B callbackasm1(SB) + MOVD $1898, R12 + B callbackasm1(SB) + MOVD $1899, R12 + B callbackasm1(SB) + MOVD $1900, R12 + B callbackasm1(SB) + MOVD $1901, R12 + B callbackasm1(SB) + MOVD $1902, R12 + B callbackasm1(SB) + MOVD $1903, R12 + B callbackasm1(SB) + MOVD $1904, R12 + B callbackasm1(SB) + MOVD $1905, R12 + B callbackasm1(SB) + MOVD $1906, R12 + B callbackasm1(SB) + MOVD $1907, R12 + B callbackasm1(SB) + MOVD $1908, R12 + B callbackasm1(SB) + MOVD $1909, R12 + B callbackasm1(SB) + MOVD $1910, R12 + B callbackasm1(SB) + MOVD $1911, R12 + B callbackasm1(SB) + MOVD $1912, R12 + B callbackasm1(SB) + MOVD $1913, R12 + B callbackasm1(SB) + MOVD $1914, R12 + B callbackasm1(SB) + MOVD $1915, R12 + B callbackasm1(SB) + MOVD $1916, R12 + B callbackasm1(SB) + MOVD $1917, R12 + B callbackasm1(SB) + MOVD $1918, R12 + B callbackasm1(SB) + MOVD $1919, R12 + B callbackasm1(SB) + MOVD $1920, R12 + B callbackasm1(SB) + MOVD $1921, R12 + B callbackasm1(SB) + MOVD $1922, R12 + B callbackasm1(SB) + MOVD $1923, R12 + B callbackasm1(SB) + MOVD $1924, R12 + B callbackasm1(SB) + MOVD $1925, R12 + B callbackasm1(SB) + MOVD $1926, R12 + B callbackasm1(SB) + MOVD $1927, R12 + B callbackasm1(SB) + MOVD $1928, R12 + B callbackasm1(SB) + MOVD $1929, R12 + B callbackasm1(SB) + MOVD $1930, R12 + B callbackasm1(SB) + MOVD $1931, R12 + B callbackasm1(SB) + MOVD $1932, R12 + B callbackasm1(SB) + MOVD $1933, R12 + B callbackasm1(SB) + MOVD $1934, R12 + B callbackasm1(SB) + MOVD $1935, R12 + B callbackasm1(SB) + MOVD $1936, R12 + B callbackasm1(SB) + MOVD $1937, R12 + B callbackasm1(SB) + MOVD $1938, R12 + B callbackasm1(SB) + MOVD $1939, R12 + B callbackasm1(SB) + MOVD $1940, R12 + B callbackasm1(SB) + MOVD $1941, R12 + B callbackasm1(SB) + MOVD $1942, R12 + B callbackasm1(SB) + MOVD $1943, R12 + B callbackasm1(SB) + MOVD $1944, R12 + B callbackasm1(SB) + MOVD $1945, R12 + B callbackasm1(SB) + MOVD $1946, R12 + B callbackasm1(SB) + MOVD $1947, R12 + B callbackasm1(SB) + MOVD $1948, R12 + B callbackasm1(SB) + MOVD $1949, R12 + B callbackasm1(SB) + MOVD $1950, R12 + B callbackasm1(SB) + MOVD $1951, R12 + B callbackasm1(SB) + MOVD $1952, R12 + B callbackasm1(SB) + MOVD $1953, R12 + B callbackasm1(SB) + MOVD $1954, R12 + B callbackasm1(SB) + MOVD $1955, R12 + B callbackasm1(SB) + MOVD $1956, R12 + B callbackasm1(SB) + MOVD $1957, R12 + B callbackasm1(SB) + MOVD $1958, R12 + B callbackasm1(SB) + MOVD $1959, R12 + B callbackasm1(SB) + MOVD $1960, R12 + B callbackasm1(SB) + MOVD $1961, R12 + B callbackasm1(SB) + MOVD $1962, R12 + B callbackasm1(SB) + MOVD $1963, R12 + B callbackasm1(SB) + MOVD $1964, R12 + B callbackasm1(SB) + MOVD $1965, R12 + B callbackasm1(SB) + MOVD $1966, R12 + B callbackasm1(SB) + MOVD $1967, R12 + B callbackasm1(SB) + MOVD $1968, R12 + B callbackasm1(SB) + MOVD $1969, R12 + B callbackasm1(SB) + MOVD $1970, R12 + B callbackasm1(SB) + MOVD $1971, R12 + B callbackasm1(SB) + MOVD $1972, R12 + B callbackasm1(SB) + MOVD $1973, R12 + B callbackasm1(SB) + MOVD $1974, R12 + B callbackasm1(SB) + MOVD $1975, R12 + B callbackasm1(SB) + MOVD $1976, R12 + B callbackasm1(SB) + MOVD $1977, R12 + B callbackasm1(SB) + MOVD $1978, R12 + B callbackasm1(SB) + MOVD $1979, R12 + B callbackasm1(SB) + MOVD $1980, R12 + B callbackasm1(SB) + MOVD $1981, R12 + B callbackasm1(SB) + MOVD $1982, R12 + B callbackasm1(SB) + MOVD $1983, R12 + B callbackasm1(SB) + MOVD $1984, R12 + B callbackasm1(SB) + MOVD $1985, R12 + B callbackasm1(SB) + MOVD $1986, R12 + B callbackasm1(SB) + MOVD $1987, R12 + B callbackasm1(SB) + MOVD $1988, R12 + B callbackasm1(SB) + MOVD $1989, R12 + B callbackasm1(SB) + MOVD $1990, R12 + B callbackasm1(SB) + MOVD $1991, R12 + B callbackasm1(SB) + MOVD $1992, R12 + B callbackasm1(SB) + MOVD $1993, R12 + B callbackasm1(SB) + MOVD $1994, R12 + B callbackasm1(SB) + MOVD $1995, R12 + B callbackasm1(SB) + MOVD $1996, R12 + B callbackasm1(SB) + MOVD $1997, R12 + B callbackasm1(SB) + MOVD $1998, R12 + B callbackasm1(SB) + MOVD $1999, R12 + B callbackasm1(SB) diff --git a/vendor/github.com/elastic/go-grok/.gitignore b/vendor/github.com/elastic/go-grok/.gitignore new file mode 100644 index 00000000000..3b735ec4a8c --- /dev/null +++ b/vendor/github.com/elastic/go-grok/.gitignore @@ -0,0 +1,21 @@ +# If you prefer the allow list template instead of the deny list, see community template: +# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore +# +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +# Go workspace file +go.work diff --git a/vendor/github.com/elastic/go-grok/.go-version b/vendor/github.com/elastic/go-grok/.go-version new file mode 100644 index 00000000000..054c858fbf3 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/.go-version @@ -0,0 +1 @@ +1.22.5 \ No newline at end of file diff --git a/vendor/github.com/oklog/run/LICENSE b/vendor/github.com/elastic/go-grok/LICENSE similarity index 100% rename from vendor/github.com/oklog/run/LICENSE rename to vendor/github.com/elastic/go-grok/LICENSE diff --git a/vendor/github.com/elastic/go-grok/NOTICE.txt b/vendor/github.com/elastic/go-grok/NOTICE.txt new file mode 100644 index 00000000000..ecf8e3c9234 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/NOTICE.txt @@ -0,0 +1,2024 @@ +Go-Grok +Copyright 2022-2024 Elasticsearch BV + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +================================================================================ +Third party libraries used by the Go-Grok Libraries: +================================================================================ + + +-------------------------------------------------------------------------------- +Dependency : github.com/elastic/go-licenser +Version: v0.4.1 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/elastic/go-licenser@v0.4.1/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2018 Elasticsearch B.V. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/magefile/mage +Version: v1.15.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/magefile/mage@v1.15.0/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2017 the Mage authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/stretchr/testify +Version: v1.9.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/stretchr/testify@v1.9.0/LICENSE: + +MIT License + +Copyright (c) 2012-2020 Mat Ryer, Tyler Bunnell and contributors. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : go.elastic.co/go-licence-detector +Version: v0.6.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/go.elastic.co/go-licence-detector@v0.6.0/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/tools +Version: v0.13.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/tools@v0.13.0/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + + +================================================================================ +Indirect dependencies + + +-------------------------------------------------------------------------------- +Dependency : github.com/cyphar/filepath-securejoin +Version: v0.2.4 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/cyphar/filepath-securejoin@v0.2.4/LICENSE: + +Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. +Copyright (C) 2017 SUSE LLC. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/davecgh/go-spew +Version: v1.1.1 +Licence type (autodetected): ISC +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/davecgh/go-spew@v1.1.1/LICENSE: + +ISC License + +Copyright (c) 2012-2016 Dave Collins + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/gobuffalo/here +Version: v0.6.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/gobuffalo/here@v0.6.0/LICENSE: + +The MIT License (MIT) + +Copyright (c) 2019 Mark Bates + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/google/go-cmp +Version: v0.2.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/google/go-cmp@v0.2.0/LICENSE: + +Copyright (c) 2017 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/google/licenseclassifier +Version: v0.0.0-20200402202327-879cb1424de0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/google/licenseclassifier@v0.0.0-20200402202327-879cb1424de0/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/karrick/godirwalk +Version: v1.15.6 +Licence type (autodetected): BSD-2-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/karrick/godirwalk@v1.15.6/LICENSE: + +BSD 2-Clause License + +Copyright (c) 2017, Karrick McDermott +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/kr/pretty +Version: v0.1.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/kr/pretty@v0.1.0/License: + +The MIT License (MIT) + +Copyright 2012 Keith Rarick + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/kr/pty +Version: v1.1.1 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/kr/pty@v1.1.1/License: + +Copyright (c) 2011 Keith Rarick + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, +sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall +be included in all copies or substantial portions of the +Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY +KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS +OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/kr/text +Version: v0.1.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/kr/text@v0.1.0/License: + +Copyright 2012 Keith Rarick + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/markbates/pkger +Version: v0.17.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/markbates/pkger@v0.17.0/LICENSE: + +The MIT License (MIT) + +Copyright (c) 2019 Mark Bates + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/pkg/errors +Version: v0.8.1 +Licence type (autodetected): BSD-2-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/pkg/errors@v0.8.1/LICENSE: + +Copyright (c) 2015, Dave Cheney +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/pmezard/go-difflib +Version: v1.0.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/pmezard/go-difflib@v1.0.0/LICENSE: + +Copyright (c) 2013, Patrick Mezard +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + The names of its contributors may not be used to endorse or promote +products derived from this software without specific prior written +permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/sergi/go-diff +Version: v1.1.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/sergi/go-diff@v1.1.0/LICENSE: + +Copyright (c) 2012-2016 The go-diff Authors. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + + + +-------------------------------------------------------------------------------- +Dependency : github.com/stretchr/objx +Version: v0.5.2 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/stretchr/objx@v0.5.2/LICENSE: + +The MIT License + +Copyright (c) 2014 Stretchr, Inc. +Copyright (c) 2017-2018 objx contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/yuin/goldmark +Version: v1.4.13 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/yuin/goldmark@v1.4.13/LICENSE: + +MIT License + +Copyright (c) 2019 Yusuke Inuzuka + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/crypto +Version: v0.0.0-20191011191535-87dc89f01550 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/crypto@v0.0.0-20191011191535-87dc89f01550/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/lint +Version: v0.0.0-20210508222113-6edffad5e616 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/lint@v0.0.0-20210508222113-6edffad5e616/LICENSE: + +Copyright (c) 2013 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/mod +Version: v0.17.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/mod@v0.17.0/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/net +Version: v0.15.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/net@v0.15.0/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/sync +Version: v0.7.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/sync@v0.7.0/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/sys +Version: v0.12.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/sys@v0.12.0/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/term +Version: v0.0.0-20201126162022-7de9c90e9dd1 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/term@v0.0.0-20201126162022-7de9c90e9dd1/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/text +Version: v0.3.6 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/text@v0.3.6/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/xerrors +Version: v0.0.0-20200804184101-5ec99f83aff1 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/xerrors@v0.0.0-20200804184101-5ec99f83aff1/LICENSE: + +Copyright (c) 2019 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : gopkg.in/check.v1 +Version: v1.0.0-20190902080502-41f04d3bba15 +Licence type (autodetected): BSD-2-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/gopkg.in/check.v1@v1.0.0-20190902080502-41f04d3bba15/LICENSE: + +Gocheck - A rich testing framework for Go + +Copyright (c) 2010-2013 Gustavo Niemeyer + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : gopkg.in/yaml.v2 +Version: v2.2.7 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/gopkg.in/yaml.v2@v2.2.7/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : gopkg.in/yaml.v3 +Version: v3.0.1 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/gopkg.in/yaml.v3@v3.0.1/LICENSE: + + +This project is covered by two different licenses: MIT and Apache. + +#### MIT License #### + +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original MIT license, with the additional +copyright staring in 2011 when the project was ported over: + + apic.go emitterc.go parserc.go readerc.go scannerc.go + writerc.go yamlh.go yamlprivateh.go + +Copyright (c) 2006-2010 Kirill Simonov +Copyright (c) 2006-2011 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +### Apache License ### + +All the remaining project files are covered by the Apache license: + +Copyright (c) 2011-2019 Canonical Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + + diff --git a/vendor/github.com/elastic/go-grok/README.md b/vendor/github.com/elastic/go-grok/README.md new file mode 100644 index 00000000000..9f0bade8ba7 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/README.md @@ -0,0 +1,223 @@ +# Grok + +Grok is grok parsing library based on `re2` regexp. + +## Usage + +#### Basic usage: +```go +g := grok.New() + +// use custom patterns +patternDefinitions := map[string]string{ + // patterns can be nested + "NGINX_HOST": `(?:%{IP:destination.ip}|%{NGINX_NOTSEPARATOR:destination.domain})(:%{NUMBER:destination.port})?`, + // NGINX_NOTSEPARATOR is used in NGINX_HOST. IP and NUMBER are part of default pattern set + "NGINX_NOTSEPARATOR": `"[^\t ,:]+"`, +} +g.AddPatterns(patternDefinitions) + +// compile grok before use, this will generate regex.Regex based on pattern and +// subpatterns provided. +// this needs to be performed just once. +err := g.Compile("%{NGINX_HOST}", true) + +res, err := g.ParseString("127.0.0.1:1234") +``` + +results in: +```go +map[string]string { + "destination.ip": "127.0.0.1", + "destination.port": "1234", +} +``` + + +#### Unnamed usage: + +In this case we changed +`err := g.Compile("%{NGINX_HOST}", false)` to +`err := g.Compile("%{NGINX_HOST}", true)` +allowing unnamed return matches. In case of unnamed match, definition name is used. + +```go +g := grok.New() + +// use custom patterns +patternDefinitions := map[string]string{ + // patterns can be nested + "NGINX_HOST": `(?:%{IP:destination.ip}|%{NGINX_NOTSEPARATOR:destination.domain})(:%{NUMBER:destination.port})?`, + // NGINX_NOTSEPARATOR is used in NGINX_HOST. IP and NUMBER are part of default pattern set + "NGINX_NOTSEPARATOR": `"[^\t ,:]+"`, +} +g.AddPatterns(patternDefinitions) + +// compile grok before use, this will generate regex.Regex based on pattern and +// subpatterns provided +err := g.Compile("%{NGINX_HOST}", false) + +res, err := g.ParseString("127.0.0.1:1234") +``` + +results in: +```go +map[string]string { + "NGINX_HOST": "127.0.0.1:1234", + "destination.ip": "127.0.0.1", + "IPV4": "127.0.0.1", + "destination.port": "1234", + "BASE10NUM": "1234", +} +``` + + +#### Typed arguments usage: + +In this case we're marking `destination.port` as `int` using definition `%{NUMBER:destination.port:int}`. + +```go +g := grok.New() + +// use custom patterns +patternDefinitions := map[string]string{ + "NGINX_HOST": `(?:%{IP:destination.ip}|%{NGINX_NOTSEPARATOR:destination.domain})(:%{NUMBER:destination.port:int})?`, + "NGINX_NOTSEPARATOR": `"[^\t ,:]+"`, +} +g.AddPatterns(patternDefinitions) + +// compile grok before use, this will generate regex.Regex based on pattern and +// subpatterns provided +err := g.Compile("%{NGINX_HOST}", true) + +res, err := g.ParseTypedString("127.0.0.1:1234") +``` + +See type changed from `map[string]string` to `map[string]interface{}` and `destination.port` is now a number: +```go +map[string]interface {} { + "destination.ip": "127.0.0.1", + "destination.port": 1234, +} +``` + +## Benchmarks + +Comparing to [github.com/vjeantet/grok](https://github.com/vjeantet/grok) and more optimized version based on previous one [github.com/trivago/grok](https://github.com/trivago/grok) + +``` +BenchmarkParseString-10 15466 76811 ns/op 4578 B/op 5 allocs/op +BenchmarkParseStringRegexp-10 15351 77109 ns/op 3840 B/op 3 allocs/op +BenchmarkParseStringTrivago-10 15868 76416 ns/op 4593 B/op 5 allocs/op +BenchmarkParseStringVjeanet-10 15548 77111 ns/op 5897 B/op 6 allocs/op + +BenchmarkNestedParseString-10 42201 28908 ns/op 3463 B/op 4 allocs/op +BenchmarkNestedParseStringTrivago-10 41937 28836 ns/op 3449 B/op 4 allocs/op +BenchmarkNestedParseStringVjeanet-10 41080 29174 ns/op 4045 B/op 5 allocs/op + +BenchmarkTypedParseString-10 39934 29707 ns/op 3851 B/op 9 allocs/op +BenchmarkTypedParseStringTrivago-10 40146 29238 ns/op 3475 B/op 6 allocs/op +BenchmarkTypedParseStringVjeanet-10 39931 30616 ns/op 4196 B/op 14 allocs/op +``` + + +## Default set of patterns + +This library comes with a default set of patterns defined in `patterns/default.go` file. +You can include more predefined patterns from `patterns/*.go` like so + +```go +g := grok.New() +g.AddPatterns(patterns.Rails) // to include whole set +g.AddPattern(patterns.Ruby["RUBY_LOGLEVEL"]) // to include specific one +``` + +Default set consists of: + +| Name | Example | +|-----|-----| +| WORD | "hello", "world123", "test_data" | +| NOTSPACE | "example", "text-with-dashes", "12345" | +| SPACE | " ", "\t", " " | +| INT | "123", "-456", "+789" | +| NUMBER | "123", "456.789", "-0.123" | +| BOOL |"true", "false", "true" | +| BASE10NUM | "123", "-123.456", "0.789" | +| BASE16NUM | "1a2b", "0x1A2B", "-0x1a2b3c" | +| BASE16FLOAT | "0x1.a2b3", "-0x1A2B3C.D" | +| POSINT | "123", "456", "789" | +| NONNEGINT | "0", "123", "456" | +| GREEDYDATA |"anything goes", "literally anything", "123 #@!" | +| QUOTEDSTRING | "\"This is a quote\"", "'single quoted'" | +| UUID |"123e4567-e89b-12d3-a456-426614174000" | +| URN | "urn:isbn:0451450523", "urn:ietf:rfc:2648" | + +#### Network patterns + +| Name | Example | +|-----|-----| +| IP | "192.168.1.1", "2001:0db8:85a3:0000:0000:8a2e:0370:7334"| +| IPV6 | "2001:0db8:85a3:0000:0000:8a2e:0370:7334", " |:1", "fe80::1ff:fe23:4567:890a" | +| IPV4 | "192.168.1.1", "10.0.0.1", "172.16.254.1" | +| IPORHOST | "example.com", "192.168.1.1", "fe80::1ff:fe23:4567:890a" | +| HOSTNAME | "example.com", "sub.domain.co.uk", "localhost" | +| EMAILLOCALPART | "john.doe", "alice123", "bob-smith" | +| EMAILADDRESS |"john.doe@example.com", "alice123@domain.co.uk" | +| USERNAME | "user1", "john.doe", "alice_123" | +| USER | "user1", "john.doe", "alice_123" | +| MAC |"00:1A:2B:3C:4D:5E", "001A.2B3C.4D5E" | +| CISCOMAC | "001A.2B3C.4D5E", "001B.2C3D.4E5F", "001C.2D3E.4F5A" | +| WINDOWSMAC | "00-1A-2B-3C-4D-5E", "00-1B-2C-3D-4E-5F" | +| COMMONMAC |"00:1A:2B:3C:4D:5E", "00:1B:2C:3D:4E:5F" | +| HOSTPORT | "example.com:80", "192.168.1.1:8080" | + +#### Paths patterns + +| Name | Example | +|-----|-----| +| UNIXPATH | "/home/user", "/var/log/syslog", "/tmp/abc_123" | +| TTY | "/dev/pts/1", "/dev/tty0", "/dev/ttyS0" | +| WINPATH |"C:\\Program Files\\App", "D:\\Work\\project\\file.txt" | +| URIPROTO | "http", "https", "ftp" | +| URIHOST |"example.com", "192.168.1.1:8080" | +| URIPATH |"/path/to/resource", "/another/path", "/root" | +| URIQUERY | "key=value", "search=query&active=true" | +| URIPARAM | "?key=value", "?search=query&active=true" | +| URIPATHPARAM | "/path?query=1", "/folder/path?valid=true" | +| PATH |"/home/user/documents", "C:\\Windows\\system32", "/var/log/syslog" | + +#### Datetime patterns + +| Name | Example | +|-----|-----| +| MONTH | "January", "Feb", "March", "Apr", "May", "Jun", "Jul", "August", "September", "October", "Nov", "December" | +| MONTHNUM | "01", "02", "03", ... "11", "12" | +| DAY | "Monday", "Tuesday", ... "Sunday" | +| YEAR |"1999", "2000", "2021" | +| HOUR |"00", "12", "23" | +| MINUTE | "00", "30", "59" | +| SECOND | "00", "30", "60" | +| TIME |"14:30", "23:59:59", "12:00:00", "12:00:60" | +| DATE_US |"04/21/2022", "12-25-2020", "07/04/1999" | +| DATE_EU |"21.04.2022", "25/12/2020", "04-07-1999" | +| ISO8601_TIMEZONE |"Z", "+02:00", "-05:00" | +| ISO8601_SECOND | "59", "30", "60.123" | +| TIMESTAMP_ISO8601 | "2022-04-21T14:30:00Z", "2020-12-25T23:59:59+02:00", "1999-07-04T12:00:00-05:00" | +| DATE |"04/21/2022", "21.04.2022", "12-25-2020" | +| DATESTAMP | "04/21/2022 14:30", "21.04.2022 23:59", "12-25-2020 12:00" | +| TZ | "EST", "CET", "PDT" | +| DATESTAMP_RFC822 |"Wed Jan 12 2024 14:33 EST" | +| DATESTAMP_RFC2822 | "Tue, 12 Jan 2022 14:30 +0200", "Fri, 25 Dec 2020 23:59 -0500", "Sun, 04 Jul 1999 12:00 Z" | +| DATESTAMP_OTHER | "Tue Jan 12 14:30 EST 2022", "Fri Dec 25 23:59 CET 2020", "Sun Jul 04 12:00 PDT 1999" | +| DATESTAMP_EVENTLOG | "20220421143000", "20201225235959", "19990704120000" | + +#### Syslog patterns + +| Name | Example | +|-----|-----| +| SYSLOGTIMESTAMP | "Jan 1 00:00:00", "Mar 15 12:34:56", "Dec 31 23:59:59" | +| PROG |"sshd", "kernel", "cron" | +| SYSLOGPROG |"sshd[1234]", "kernel", "cron[5678]" | +| SYSLOGHOST |"example.com", "192.168.1.1", "localhost" | +| SYSLOGFACILITY | "<1.2>", "<12345.13456>" | +| HTTPDATE | "25/Dec/2024:14:33 4" | \ No newline at end of file diff --git a/vendor/github.com/elastic/go-grok/catalog-info.yaml b/vendor/github.com/elastic/go-grok/catalog-info.yaml new file mode 100644 index 00000000000..30d839fa811 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/catalog-info.yaml @@ -0,0 +1,58 @@ +# Declare a Backstage Component that represents your application. +--- +# yaml-language-server: $schema=https://json.schemastore.org/catalog-info.json +apiVersion: backstage.io/v1alpha1 +kind: Component +metadata: + name: go-grok + description: Libraries used by Elastic Otel processors + +spec: + type: library + owner: group:ingest-fp + system: platform-ingest + lifecycle: production +--- +# yaml-language-server: $schema=https://gist.githubusercontent.com/elasticmachine/988b80dae436cafea07d9a4a460a011d/raw/e57ee3bed7a6f73077a3f55a38e76e40ec87a7cf/rre.schema.json +apiVersion: backstage.io/v1alpha1 +kind: Resource +metadata: + name: buildkite-pipeline-go-grok + description: Buildkite Pipeline for go-grok + links: + - title: Pipeline + url: https://buildkite.com/elastic/go-grok + +spec: + type: buildkite-pipeline + owner: group:ingest-fp + system: buildkite + implementation: + apiVersion: buildkite.elastic.dev/v1 + kind: Pipeline + metadata: + name: go-grok + description: Buildkite pipeline for the go-grok library + spec: + branch_configuration: "main" + repository: elastic/go-grok + pipeline_file: ".buildkite/pipeline.yml" + maximum_timeout_in_minutes: 60 + provider_settings: + build_pull_request_forks: false + build_pull_requests: true # requires filter_enabled and filter_condition settings as below when used with buildkite-pr-bot + build_tags: true + filter_enabled: true + filter_condition: >- + build.pull_request.id == null || (build.creator.name == 'elasticmachine' && build.pull_request.id != null) + cancel_intermediate_builds: true + cancel_intermediate_builds_branch_filter: '!main' + skip_intermediate_builds: true + skip_intermediate_builds_branch_filter: '!main' + teams: + elastic-agent-control-plane: + access_level: MANAGE_BUILD_AND_READ + ingest-fp: + access_level: MANAGE_BUILD_AND_READ + everyone: + access_level: READ_ONLY diff --git a/vendor/github.com/elastic/go-grok/dev-tools/mage/benchmark.go b/vendor/github.com/elastic/go-grok/dev-tools/mage/benchmark.go new file mode 100644 index 00000000000..def7290ae26 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/dev-tools/mage/benchmark.go @@ -0,0 +1,163 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package mage + +import ( + "errors" + "fmt" + "io" + "log" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + + "github.com/magefile/mage/mg" + "github.com/magefile/mage/sh" + + "github.com/elastic/go-grok/dev-tools/mage/gotool" +) + +const ( + goBenchstat = "golang.org/x/perf/cmd/benchstat@v0.0.0-20230227161431-f7320a6d63e8" +) + +var ( + benchmarkCount = 8 +) + +// Benchmark namespace for mage to group all the related targets under this namespace +type Benchmark mg.Namespace + +// Deps installs required plugins for reading benchmarks results +func (Benchmark) Deps() error { + err := gotool.Install(gotool.Install.Package(goBenchstat)) + if err != nil { + return err + } + return nil +} + +// Run execute the go benchmark tests for this repository, by defining the variable OUTPUT you write the results +// into a file. Optional you can set BENCH_COUNT to how many benchmark iteration you want to execute, default is 8 +func (Benchmark) Run() error { + mg.Deps(Benchmark.Deps) + log.Println(">> go Test: Benchmark") + outputFile := os.Getenv("OUTPUT") + benchmarkCountOverride := os.Getenv("BENCH_COUNT") + if benchmarkCountOverride != "" { + var overrideErr error + benchmarkCount, overrideErr = strconv.Atoi(benchmarkCountOverride) + if overrideErr != nil { + return fmt.Errorf("failed to parse BENCH_COUNT, verify that you set the right value: , %w", overrideErr) + } + } + projectPackages, er := gotool.ListProjectPackages() + if er != nil { + return fmt.Errorf("failed to list package dependencies: %w", er) + } + cmdArg := fmt.Sprintf("test -count=%d -bench=Bench -run=Bench", benchmarkCount) + cmdArgs := strings.Split(cmdArg, " ") + for _, pkg := range projectPackages { + cmdArgs = append(cmdArgs, filepath.Join(pkg, "/...")) + } + _, err := runCommand(nil, "go", outputFile, cmdArgs...) + + var goTestErr *exec.ExitError + switch { + case goTestErr == nil: + return nil + case errors.As(err, &goTestErr): + return fmt.Errorf("failed to execute go test -bench command: %w", err) + default: + return fmt.Errorf("failed to execute go test -bench command %w", err) + } +} + +// Diff parse one or more benchmark outputs, Required environment variables are BASE for parsing results +// and NEXT to compare the base results with. Optional you can define OUTPUT to write the results into a file +func (Benchmark) Diff() error { + mg.Deps(Benchmark.Deps) + log.Println(">> running: benchstat") + outputFile := os.Getenv("OUTPUT") + baseFile := os.Getenv("BASE") + nextFile := os.Getenv("NEXT") + var args []string + if baseFile == "" { + log.Printf("Missing required parameter BASE parameter to parse the results. Please set this to a filepath of the benchmark results") + return fmt.Errorf("missing required parameter BASE parameter to parse the results. Please set this to a filepath of the benchmark results") + } else { + args = append(args, baseFile) + } + if nextFile == "" { + log.Printf("Missing NEXT parameter, we are not going to compare results") + } else { + args = append(args, nextFile) + } + + _, err := runCommand(nil, "benchstat", outputFile, args...) + + var goTestErr *exec.ExitError + switch { + case goTestErr == nil: + return nil + case errors.As(err, &goTestErr): + return fmt.Errorf("failed to execute benchstat command: %w", err) + default: + return fmt.Errorf("failed to execute benchstat command!! %w", err) + } + +} + +// runCommand is executing a command that is represented by cmd. +// when defining an outputFile it will write the stdErr, stdOut of that command to the output file +// otherwise it will capture it to stdErr, stdOut of the console used and return true, nil, if succeed +func runCommand(env map[string]string, cmd string, outputFile string, args ...string) (bool, error) { + var stdOut io.Writer + var stdErr io.Writer + if outputFile != "" { + fileOutput, err := os.Create(createDir(outputFile)) + if err != nil { + return false, fmt.Errorf("failed to create %s output file: %w", cmd, err) + } + defer func(fileOutput *os.File) { + err := fileOutput.Close() + if err != nil { + log.Fatalf("Failed to close file %s", err) + } + }(fileOutput) + stdOut = io.MultiWriter(os.Stdout, fileOutput) + stdErr = io.MultiWriter(os.Stderr, fileOutput) + } else { + stdOut = os.Stdout + stdErr = os.Stderr + } + return sh.Exec(env, stdOut, stdErr, cmd, args...) +} + +// createDir creates the parent directory for the given file. +func createDir(file string) string { + // Create the output directory. + if dir := filepath.Dir(file); dir != "." { + if err := os.MkdirAll(dir, 0755); err != nil { + log.Fatalf("Failed to create parent dir for %s", file) + } + } + return file +} diff --git a/vendor/github.com/elastic/go-grok/dev-tools/mage/check.go b/vendor/github.com/elastic/go-grok/dev-tools/mage/check.go new file mode 100644 index 00000000000..50e843ea2f8 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/dev-tools/mage/check.go @@ -0,0 +1,48 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package mage + +import ( + "fmt" + + "github.com/magefile/mage/sh" +) + +func CheckNoChanges() error { + fmt.Println(">> fmt - go run") + err := sh.RunV("go", "mod", "tidy", "-v") + if err != nil { + return fmt.Errorf("failed running go mod tidy, please fix the issues reported: %w", err) + } + fmt.Println(">> fmt - git diff") + err = sh.RunV("git", "diff") + if err != nil { + return fmt.Errorf("failed running git diff, please fix the issues reported: %w", err) + } + fmt.Println(">> fmt - git update-index") + err = sh.RunV("git", "update-index", "--refresh") + if err != nil { + return fmt.Errorf("failed running git update-index --refresh, please fix the issues reported: %w", err) + } + fmt.Println(">> fmt - git diff-index") + err = sh.RunV("git", "diff-index", "--exit-code", "HEAD", "--") + if err != nil { + return fmt.Errorf("failed running go mod tidy, please fix the issues reported: %w", err) + } + return nil +} diff --git a/vendor/github.com/elastic/go-grok/dev-tools/mage/deps.go b/vendor/github.com/elastic/go-grok/dev-tools/mage/deps.go new file mode 100644 index 00000000000..4f4e179182b --- /dev/null +++ b/vendor/github.com/elastic/go-grok/dev-tools/mage/deps.go @@ -0,0 +1,43 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package mage + +import ( + "fmt" + + "github.com/magefile/mage/mg" + + "github.com/elastic/go-grok/dev-tools/mage/gotool" +) + +// Deps contains targets related to checking dependencies +type Deps mg.Namespace + +// CheckModuleTidy checks if `go mod tidy` was run before the last commit. +func (Deps) CheckModuleTidy() error { + err := gotool.Mod.Tidy() + if err != nil { + return err + } + err = assertUnchanged("go.mod") + if err != nil { + return fmt.Errorf("`go mod tidy` was not called before the last commit: %w", err) + } + + return nil +} diff --git a/vendor/github.com/elastic/go-grok/dev-tools/mage/fmt.go b/vendor/github.com/elastic/go-grok/dev-tools/mage/fmt.go new file mode 100644 index 00000000000..0237a86a283 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/dev-tools/mage/fmt.go @@ -0,0 +1,107 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package mage + +import ( + "fmt" + "io/fs" + "os" + "path/filepath" + + "github.com/magefile/mage/mg" + "github.com/magefile/mage/sh" + + "github.com/elastic/go-grok/dev-tools/mage/gotool" +) + +const ( + // GoImportsImportPath controls the import path used to install goimports. + GoImportsImportPath = "golang.org/x/tools/cmd/goimports" + + // GoImportsLocalPrefix is a string prefix matching imports that should be + // grouped after third-party packages. + GoImportsLocalPrefix = "github.com/elastic" +) + +// Linter contains targets related to linting the Go code +type GoImports mg.Namespace + +// Run executes goimports against all .go files in and below the CWD. +func (GoImports) Run() error { + mg.Deps(GoImports.Install) + goFiles, err := FindFilesRecursive(func(path string, _ os.FileInfo) bool { + return filepath.Ext(path) == ".go" + }) + if err != nil { + return err + } + if len(goFiles) == 0 { + return nil + } + + fmt.Println(">> fmt - goimports: Formatting Go code") //nolint:forbidigo // it's a mage target + args := append( + []string{"-local", GoImportsLocalPrefix, "-l", "-w"}, + goFiles..., + ) + + return sh.RunV("goimports", args...) +} + +func (GoImports) Install() error { + err := gotool.Install(gotool.Install.Package(filepath.Join(GoImportsImportPath))) + if err != nil { + return fmt.Errorf("cannot install GoImports: %w", err) + } + + return nil +} + +// FindFilesRecursive recursively traverses from the CWD and invokes the given +// match function on each regular file to determine if the given path should be +// returned as a match. It ignores files in .git directories. +func FindFilesRecursive(match func(path string, info os.FileInfo) bool) ([]string, error) { + var matches []string + walkDir := func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + + // Don't look for files in git directories + if d.IsDir() && filepath.Base(path) == ".git" { + return filepath.SkipDir + } + + info, err := d.Info() + if err != nil { + return fmt.Errorf("canot get FileInfo: %w", err) + } + if !info.Mode().IsRegular() { + // continue + return nil + } + + if match(filepath.ToSlash(path), info) { + matches = append(matches, path) + } + return nil + } + + err := filepath.WalkDir(".", fs.WalkDirFunc(walkDir)) + return matches, err +} diff --git a/vendor/github.com/elastic/go-grok/dev-tools/mage/gotool/get.go b/vendor/github.com/elastic/go-grok/dev-tools/mage/gotool/get.go new file mode 100644 index 00000000000..9770cc1a1b5 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/dev-tools/mage/gotool/get.go @@ -0,0 +1,43 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package gotool + +type goGet func(opts ...ArgOpt) error +type goDownload func(opts ...ArgOpt) error + +// Get runs `go get` and provides optionals for adding command line arguments. +var Get goGet = runGoGet + +func runGoGet(opts ...ArgOpt) error { + args := buildArgs(opts) + return runVGo("get", args) +} + +func (goGet) Download() ArgOpt { return flagBoolIf("-d", true) } +func (goGet) Update() ArgOpt { return flagBoolIf("-u", true) } +func (goGet) Package(pkg string) ArgOpt { return posArg(pkg) } + +// Download runs `go download` and provides optionals for adding command line arguments. +var Download goDownload = runGoDownload + +func runGoDownload(opts ...ArgOpt) error { + args := buildArgs(opts) + return runVGo("download", args) +} + +func (goDownload) All() ArgOpt { return posArg("all") } diff --git a/vendor/github.com/elastic/go-grok/dev-tools/mage/gotool/go.go b/vendor/github.com/elastic/go-grok/dev-tools/mage/gotool/go.go new file mode 100644 index 00000000000..b7a833cfe4f --- /dev/null +++ b/vendor/github.com/elastic/go-grok/dev-tools/mage/gotool/go.go @@ -0,0 +1,343 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package gotool + +import ( + "fmt" + "os" + "strings" + + "github.com/magefile/mage/mg" + "github.com/magefile/mage/sh" +) + +// Args holds parameters, environment variables and flag information used to +// pass to the go tool. +type Args struct { + extra map[string]string // extra flags one can pass to the command + env map[string]string + flags map[string][]string + pos []string +} + +// ArgOpt is a functional option adding info to Args once executed. +type ArgOpt func(args *Args) + +type goInstall func(opts ...ArgOpt) error + +// Install runs `go install` and provides optionals for adding command line arguments. +var Install goInstall = runGoInstall + +func runGoInstall(opts ...ArgOpt) error { + args := buildArgs(opts) + return runVGo("install", args) +} + +func (goInstall) Package(pkg string) ArgOpt { return posArg(pkg) } +func (goInstall) Vendored() ArgOpt { return flagArg("-mod", "vendor") } + +type goTest func(opts ...ArgOpt) error + +// Test runs `go test` and provides optionals for adding command line arguments. +var Test goTest = runGoTest + +// GetModuleName returns the name of the module. +func GetModuleName() (string, error) { + lines, err := getLines(callGo(nil, "list", "-m")) + if err != nil { + return "", err + } + if len(lines) != 1 { + return "", fmt.Errorf("unexpected number of lines") + } + return lines[0], nil +} + +// ListProjectPackages lists all packages in the current project +func ListProjectPackages() ([]string, error) { + return ListPackages("./...") +} + +// ListPackages calls `go list` for every package spec given. +func ListPackages(pkgs ...string) ([]string, error) { + return getLines(callGo(nil, "list", pkgs...)) +} + +// ListDeps calls `go list -dep` for every package spec given. +func ListDeps(pkg string) ([]string, error) { + const tmpl = `{{if not .Standard}}{{.ImportPath}}{{end}}` + + return getLines(callGo(nil, "list", "-deps", "-f", tmpl, pkg)) +} + +func ListDepsForNotice() (string, error) { + return callGo(nil, "list", "-json", "-m", "all") +} + +// ListDepsLocation calls `go list -dep` for every package spec given. +func ListDepsLocation(pkg string) (map[string]string, error) { + const tmpl = `{{if not .Standard}}{{.ImportPath}};{{.Dir}}{{end}}` + + lines, err := getLines(callGo(nil, "list", "-deps", "-f", tmpl, pkg)) + if err != nil { + return nil, err + } + deps := make(map[string]string, len(lines)) + for _, l := range lines { + parts := strings.Split(l, ";") + if len(parts) != 2 { + return nil, fmt.Errorf("invalid number of parts") + } + deps[parts[0]] = parts[1] + } + return deps, nil +} + +// ListTestFiles lists all go and cgo test files available in a package. +func ListTestFiles(pkg string) ([]string, error) { + const tmpl = `{{ range .TestGoFiles }}{{ printf "%s\n" . }}{{ end }}` + + `{{ range .XTestGoFiles }}{{ printf "%s\n" . }}{{ end }}` + + return getLines(callGo(nil, "list", "-f", tmpl, pkg)) +} + +// ListModuleCacheDir returns the module cache directory containing +// the specified module. If the module does not exist in the cache, +// an error will be returned. +func ListModuleCacheDir(pkg string) (string, error) { + return listModuleDir(pkg, false) +} + +// ListModuleVendorDir returns the vendor directory containing the +// specified module. If the module has not been vendored, an error +// will be returned. +func ListModuleVendorDir(pkg string) (string, error) { + return listModuleDir(pkg, true) +} + +func listModuleDir(pkg string, vendor bool) (string, error) { + env := map[string]string{ + // Make sure GOFLAGS does not influence behaviour. + "GOFLAGS": "", + } + args := []string{"-m", "-f", "{{.Dir}}"} + if vendor { + args = append(args, "-mod=vendor") + } + args = append(args, pkg) + lines, err := getLines(callGo(env, "list", args...)) + if err != nil { + return "", err + } + if n := len(lines); n != 1 { + return "", fmt.Errorf("expected 1 line, got %d while looking for %s", n, pkg) + } + return lines[0], nil +} + +// HasTests returns true if the given package contains test files. +func HasTests(pkg string) (bool, error) { + files, err := ListTestFiles(pkg) + if err != nil { + return false, err + } + return len(files) > 0, nil +} + +func (goTest) WithCoverage(to string) ArgOpt { + return combine(flagArg("-cover", ""), flagArgIf("-test.coverprofile", to)) +} +func (goTest) Short(b bool) ArgOpt { return flagBoolIf("-test.short", b) } +func (goTest) Use(bin string) ArgOpt { return extraArgIf("use", bin) } +func (goTest) OS(os string) ArgOpt { return envArgIf("GOOS", os) } +func (goTest) ARCH(arch string) ArgOpt { return envArgIf("GOARCH", arch) } +func (goTest) Create() ArgOpt { return flagArg("-c", "") } +func (goTest) Out(path string) ArgOpt { return flagArg("-o", path) } +func (goTest) Package(path string) ArgOpt { return posArg(path) } +func (goTest) Verbose() ArgOpt { return flagArg("-test.v", "") } +func runGoTest(opts ...ArgOpt) error { + args := buildArgs(opts) + if bin := args.Val("use"); bin != "" { + flags := map[string][]string{} + for k, v := range args.flags { + if strings.HasPrefix(k, "-test.") { + flags[k] = v + } + } + + useArgs := &Args{} + *useArgs = *args + useArgs.flags = flags + + _, err := sh.Exec(useArgs.env, os.Stdout, os.Stderr, bin, useArgs.build()...) + return err + } + + return runVGo("test", args) +} + +func getLines(out string, err error) ([]string, error) { + if err != nil { + return nil, err + } + + lines := strings.Split(out, "\n") + res := lines[:0] + for _, line := range lines { + line = strings.TrimSpace(line) + if len(line) > 0 { + res = append(res, line) + } + } + + return res, nil +} + +func callGo(env map[string]string, cmd string, opts ...string) (string, error) { //nolint:unparam // not always receives list + args := []string{cmd} + args = append(args, opts...) + return sh.OutputWith(env, mg.GoCmd(), args...) +} + +func runVGo(cmd string, args *Args) error { + return execGoWith(func(env map[string]string, cmd string, args ...string) error { + _, err := sh.Exec(env, os.Stdout, os.Stderr, cmd, args...) + return err + }, cmd, args) +} + +func execGoWith( + fn func(map[string]string, string, ...string) error, + cmd string, args *Args, +) error { + cliArgs := []string{cmd} + cliArgs = append(cliArgs, args.build()...) + return fn(args.env, mg.GoCmd(), cliArgs...) +} + +func posArg(value string) ArgOpt { + return func(a *Args) { a.Add(value) } +} + +func extraArg(k, v string) ArgOpt { + return func(a *Args) { a.Extra(k, v) } +} + +func extraArgIf(k, v string) ArgOpt { + if v == "" { + return nil + } + return extraArg(k, v) +} + +func envArg(k, v string) ArgOpt { + return func(a *Args) { a.Env(k, v) } +} + +func envArgIf(k, v string) ArgOpt { + if v == "" { + return nil + } + return envArg(k, v) +} + +func flagArg(flag, value string) ArgOpt { + return func(a *Args) { a.Flag(flag, value) } +} + +func flagArgIf(flag, value string) ArgOpt { + if value == "" { + return nil + } + return flagArg(flag, value) +} + +func flagBoolIf(flag string, b bool) ArgOpt { + if b { + return flagArg(flag, "") + } + return nil +} + +func combine(opts ...ArgOpt) ArgOpt { + return func(a *Args) { + for _, opt := range opts { + if opt != nil { + opt(a) + } + } + } +} + +func buildArgs(opts []ArgOpt) *Args { + a := &Args{} + combine(opts...)(a) + return a +} + +// Extra sets a special k/v pair to be interpreted by the execution function. +func (a *Args) Extra(k, v string) { + if a.extra == nil { + a.extra = map[string]string{} + } + a.extra[k] = v +} + +// Val returns a special functions value for a given key. +func (a *Args) Val(k string) string { + if a.extra == nil { + return "" + } + return a.extra[k] +} + +// Env sets an environmant variable to be passed to the child process on exec. +func (a *Args) Env(k, v string) { + if a.env == nil { + a.env = map[string]string{} + } + a.env[k] = v +} + +// Flag adds a flag to be passed to the child process on exec. +func (a *Args) Flag(flag, value string) { + if a.flags == nil { + a.flags = map[string][]string{} + } + a.flags[flag] = append(a.flags[flag], value) +} + +// Add adds a positional argument to be passed to the child process on exec. +func (a *Args) Add(p string) { + a.pos = append(a.pos, p) +} + +func (a *Args) build() []string { + args := make([]string, 0, 2*len(a.flags)+len(a.pos)) + for k, values := range a.flags { + for _, v := range values { + args = append(args, k) + if v != "" { + args = append(args, v) + } + } + } + + args = append(args, a.pos...) + return args +} diff --git a/vendor/github.com/elastic/go-grok/dev-tools/mage/gotool/licenser.go b/vendor/github.com/elastic/go-grok/dev-tools/mage/gotool/licenser.go new file mode 100644 index 00000000000..67978ab8218 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/dev-tools/mage/gotool/licenser.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package gotool + +import "github.com/magefile/mage/sh" + +type goLicenser func(opts ...ArgOpt) error + +// Licenser runs `go-licenser` and provides optionals for adding command line arguments. +var Licenser goLicenser = runGoLicenser + +func runGoLicenser(opts ...ArgOpt) error { + args := buildArgs(opts).build() + return sh.RunV("go-licenser", args...) +} + +func (goLicenser) Check() ArgOpt { return flagBoolIf("-d", true) } +func (goLicenser) License(license string) ArgOpt { return flagArgIf("-license", license) } +func (goLicenser) Exclude(path string) ArgOpt { return flagArgIf("-exclude", path) } +func (goLicenser) Path(path string) ArgOpt { return posArg(path) } diff --git a/vendor/github.com/elastic/go-grok/dev-tools/mage/gotool/modules.go b/vendor/github.com/elastic/go-grok/dev-tools/mage/gotool/modules.go new file mode 100644 index 00000000000..d7a880756b7 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/dev-tools/mage/gotool/modules.go @@ -0,0 +1,64 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package gotool + +// Mod is the command go mod. +var Mod = goMod{ + Download: modCommand{"download"}.run, + Init: modCommand{"init"}.run, + Tidy: modCommand{"tidy"}.run, + Verify: modCommand{"verify"}.run, + Vendor: modCommand{"vendor"}.run, +} + +type modCommand struct { + method string +} + +func (cmd modCommand) run(opts ...ArgOpt) error { + o := make([]ArgOpt, len(opts)+1) + o[0] = posArg(cmd.method) + for i, opt := range opts { + o[i+1] = opt + } + args := buildArgs(o) + return runVGo("mod", args) +} + +type goMod struct { + Download modDownload + Init modInit + Tidy modTidy + Verify modVerify + Vendor modVendor +} + +// modDownload cleans the go.mod file +type modDownload func(opts ...ArgOpt) error + +// modInit initializes a new go module in folder. +type modInit func(opts ...ArgOpt) error + +// modTidy cleans the go.mod file +type modTidy func(opts ...ArgOpt) error + +// modVerify check that deps have the expected content. +type modVerify func(opts ...ArgOpt) error + +// modVendor downloads and copies dependencies under the folder vendor. +type modVendor func(opts ...ArgOpt) error diff --git a/vendor/github.com/elastic/go-grok/dev-tools/mage/gotool/noticer.go b/vendor/github.com/elastic/go-grok/dev-tools/mage/gotool/noticer.go new file mode 100644 index 00000000000..26669c3f74f --- /dev/null +++ b/vendor/github.com/elastic/go-grok/dev-tools/mage/gotool/noticer.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package gotool + +import "github.com/magefile/mage/sh" + +type goNoticeGenerator func(opts ...ArgOpt) error + +// NoticeGenerator runs `go-license-detector` and provides optionals for adding command line arguments. +var NoticeGenerator goNoticeGenerator = runGoNoticeGenerator + +func runGoNoticeGenerator(opts ...ArgOpt) error { + args := buildArgs(opts).build() + return sh.RunV("go-licence-detector", args...) +} + +func (goNoticeGenerator) Dependencies(path string) ArgOpt { return flagArg("-in", path) } +func (goNoticeGenerator) IncludeIndirect() ArgOpt { return flagBoolIf("-includeIndirect", true) } +func (goNoticeGenerator) Rules(path string) ArgOpt { return flagArg("-rules", path) } +func (goNoticeGenerator) Overrides(path string) ArgOpt { return flagArg("-overrides", path) } +func (goNoticeGenerator) NoticeTemplate(path string) ArgOpt { return flagArg("-noticeTemplate", path) } +func (goNoticeGenerator) NoticeOutput(path string) ArgOpt { return flagArg("-noticeOut", path) } +func (goNoticeGenerator) DepsOutput(path string) ArgOpt { return flagArg("-depsOut", path) } diff --git a/vendor/github.com/elastic/go-grok/dev-tools/mage/install.go b/vendor/github.com/elastic/go-grok/dev-tools/mage/install.go new file mode 100644 index 00000000000..2a4002011f8 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/dev-tools/mage/install.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package mage + +import ( + "github.com/elastic/go-grok/dev-tools/mage/gotool" +) + +var ( + // GoLicenserImportPath controls the import path used to install go-licenser. + GoLicenserImportPath = "github.com/elastic/go-licenser" + + // GoNoticeGeneratorImportPath controls the import path used to install go-licence-detector. + GoNoticeGeneratorImportPath = "go.elastic.co/go-licence-detector" +) + +// InstallGoLicenser target installs go-licenser +func InstallGoLicenser() error { + return gotool.Install( + gotool.Install.Package(GoLicenserImportPath), + ) +} + +// InstallGoLicenser target installs go-licenser +func InstallGoNoticeGen() error { + return gotool.Install( + gotool.Install.Package(GoNoticeGeneratorImportPath), + ) +} diff --git a/vendor/github.com/elastic/go-grok/dev-tools/mage/linter.go b/vendor/github.com/elastic/go-grok/dev-tools/mage/linter.go new file mode 100644 index 00000000000..e25e0d9b870 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/dev-tools/mage/linter.go @@ -0,0 +1,175 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package mage + +import ( + "errors" + "fmt" + "io" + "log" + "net/http" + "os" + "path/filepath" + + "github.com/magefile/mage/mg" + "github.com/magefile/mage/sh" +) + +const ( + linterVersion = "v1.55.2" + linterInstallURL = "https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh" +) + +var ( + linterConfigFilename = filepath.Join(".", ".golangci.yml") + linterInstallDir = filepath.Join(".", "build") + linterInstallFile = filepath.Join(linterInstallDir, "install-golang-ci.sh") + linterBinaryFile = filepath.Join(linterInstallDir, linterVersion, "golangci-lint") +) + +// Linter contains targets related to linting the Go code +type Linter mg.Namespace + +// CheckConfig makes sure that the `.golangci.yml` does not have uncommitted changes +func (Linter) CheckConfig() error { + err := assertUnchanged(linterConfigFilename) + if err != nil { + return fmt.Errorf("linter configuration has uncommitted changes: %w", err) + } + return nil +} + +// Install installs golangci-lint (https://golangci-lint.run) to `./build` +// using the official installation script downloaded from GitHub. +// If the linter binary already exists does nothing. +func (Linter) Install() error { + return install(false) +} + +// ForceInstall force installs the linter regardless of whether it exists or not. +func (Linter) ForceInstall() error { + return install(true) +} + +func install(force bool) error { + dirPath := filepath.Dir(linterBinaryFile) + err := os.MkdirAll(dirPath, 0700) + if err != nil { + return fmt.Errorf("failed to create path %q: %w", dirPath, err) + } + + _, err = os.Stat(linterBinaryFile) + if !force && err == nil { + log.Println("The linter has been already installed, skipping...") + return nil + } + if err != nil && !errors.Is(err, os.ErrNotExist) { + return fmt.Errorf("failed check if file %q exists: %w", linterBinaryFile, err) + } + + log.Println("Preparing the installation script file...") + + installScript, err := os.OpenFile(linterInstallFile, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0700) + if err != nil { + return fmt.Errorf("failed to create file %q: %w", linterInstallFile, err) + } + defer installScript.Close() + + log.Println("Downloading the linter installation script...") + //nolint:noctx // valid use since there is no context + resp, err := http.Get(linterInstallURL) + if err != nil { + return fmt.Errorf("cannot download the linter installation script from %q: %w", linterInstallURL, err) + } + defer resp.Body.Close() + + lr := io.LimitReader(resp.Body, 1024*100) // not more than 100 KB, just to be safe + _, err = io.Copy(installScript, lr) + if err != nil { + return fmt.Errorf("failed to finish downloading the linter installation script: %w", err) + } + + err = installScript.Close() // otherwise we cannot run the script + if err != nil { + return fmt.Errorf("failed to close file %q: %w", linterInstallFile, err) + } + + binaryDir := filepath.Dir(linterBinaryFile) + err = os.MkdirAll(binaryDir, 0700) + if err != nil { + return fmt.Errorf("cannot create path %q: %w", binaryDir, err) + } + + // there must be no space after `-b`, otherwise the script does not work correctly ¯\_(ツ)_/¯ + return sh.Run(linterInstallFile, "-b"+binaryDir, linterVersion) +} + +// All runs the linter against the entire codebase +func (l Linter) All() error { + mg.Deps(l.Install, l.CheckConfig) + return runLinter() +} + +// Prints the version of the linter in use. +func (l Linter) Version() error { + mg.Deps(l.Install) + return runLinter("--version") +} + +// LastChange runs the linter against all files changed since the fork point from `main`. +// If the current branch is `main` then runs against the files changed in the last commit. +func (l Linter) LastChange() error { + mg.Deps(l.Install, l.CheckConfig) + + // get current branch name + branch, err := sh.Output("git", "rev-parse", "--abbrev-ref", "HEAD") + if err != nil { + return fmt.Errorf("failed to get the current branch: %w", err) + } + + // the linter is supposed to support linting changed diffs only but, + // for some reason, it simply does not work - does not output any + // results without linting the whole files, so we have to use `--whole-files` + // which can lead to some frustration from developers who would like to + // fix a single line in an existing codebase and the linter would force them + // into fixing all linting issues in the whole file instead + + if branch == "main" { + // files changed in the last commit + return runLinter("--new-from-rev=HEAD~", "--whole-files") + } + + return runLinter("--new-from-rev=origin/main", "--whole-files") +} + +// runLinter runs the linter passing the `mage -v` (verbose mode) and given arguments. +// Also redirects linter's output to the `stderr` instead of discarding it. +func runLinter(runFlags ...string) error { + var args []string + + if mg.Verbose() { + args = append(args, "-v") + } + + args = append(args, "run") + args = append(args, runFlags...) + args = append(args, "-c", linterConfigFilename) + args = append(args, "./...") + + return runWithStdErr(linterBinaryFile, args...) +} diff --git a/vendor/github.com/elastic/go-grok/dev-tools/mage/mage.go b/vendor/github.com/elastic/go-grok/dev-tools/mage/mage.go new file mode 100644 index 00000000000..05841a0274e --- /dev/null +++ b/vendor/github.com/elastic/go-grok/dev-tools/mage/mage.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package mage + +import ( + "fmt" + "os" + + "github.com/magefile/mage/sh" +) + +func assertUnchanged(path string) error { + err := sh.Run("git", "diff", "--exit-code", path) + if err != nil { + return fmt.Errorf("failed to assert the unchanged file %q: %w", path, err) + } + + return nil +} + +// runWithStdErr runs a command redirecting its stderr to the console instead of discarding it +func runWithStdErr(command string, args ...string) error { + _, err := sh.Exec(nil, os.Stdout, os.Stderr, command, args...) + return err +} diff --git a/vendor/github.com/elastic/go-grok/dev-tools/mage/notice.go b/vendor/github.com/elastic/go-grok/dev-tools/mage/notice.go new file mode 100644 index 00000000000..4dc48c4763e --- /dev/null +++ b/vendor/github.com/elastic/go-grok/dev-tools/mage/notice.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package mage + +import ( + "fmt" + "os" + + "github.com/magefile/mage/mg" + + "github.com/elastic/go-grok/dev-tools/mage/gotool" +) + +func GenerateNotice(overrides, rules, noticeTemplate string) error { + mg.Deps(InstallGoNoticeGen, Deps.CheckModuleTidy) + + err := gotool.Mod.Download(gotool.Download.All()) + if err != nil { + return fmt.Errorf("error while downloading dependencies: %w", err) + } + + // Ensure the go.mod file is left unchanged after go mod download all runs. + // go mod download will modify go.sum in a way that conflicts with go mod tidy. + // https://github.com/golang/go/issues/43994#issuecomment-770053099 + defer gotool.Mod.Tidy() //nolint:errcheck // No value in handling this error. + + out, _ := gotool.ListDepsForNotice() + depsFile, _ := os.CreateTemp("", "depsout") + defer os.Remove(depsFile.Name()) + _, _ = depsFile.Write([]byte(out)) + depsFile.Close() + + generator := gotool.NoticeGenerator + return generator( + generator.Dependencies(depsFile.Name()), + generator.IncludeIndirect(), + generator.Overrides(overrides), + generator.Rules(rules), + generator.NoticeTemplate(noticeTemplate), + generator.NoticeOutput("NOTICE.txt"), + ) +} diff --git a/vendor/github.com/elastic/go-grok/grok.go b/vendor/github.com/elastic/go-grok/grok.go new file mode 100644 index 00000000000..8fe018cb18e --- /dev/null +++ b/vendor/github.com/elastic/go-grok/grok.go @@ -0,0 +1,377 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package grok + +import ( + "fmt" + "regexp" + "strconv" + "strings" + + "github.com/elastic/go-grok/patterns" +) + +const dotSep = "___" + +var ( + ErrParseFailure = fmt.Errorf("parsing failed") + ErrTypeNotProvided = fmt.Errorf("type not specified") + ErrUnsupportedName = fmt.Errorf("name contains unsupported character ':'") + + // grok can be specified in either of these forms: + // %{SYNTAX} - e.g {NUMBER} + // %{SYNTAX:ID} - e.g {NUMBER:MY_AGE} + // %{SYNTAX:ID:TYPE} - e.g {NUMBER:MY_AGE:INT} + // supported types are int, long, double, float and boolean + // for go specific implementation int and long results in int + // double and float both results in float + reusePattern = regexp.MustCompile(`%{(\w+(?::[\w+.]+(?::\w+)?)?)}`) +) + +type Grok struct { + patternDefinitions map[string]string + re *regexp.Regexp + typeHints map[string]string + lookupDefaultPatterns bool +} + +func New() *Grok { + return &Grok{ + patternDefinitions: make(map[string]string), + lookupDefaultPatterns: true, + } +} + +func NewWithoutDefaultPatterns() *Grok { + return &Grok{ + patternDefinitions: make(map[string]string), + } +} + +func NewWithPatterns(patterns ...map[string]string) (*Grok, error) { + g := &Grok{ + patternDefinitions: make(map[string]string), + lookupDefaultPatterns: true, + } + + for _, p := range patterns { + if err := g.AddPatterns(p); err != nil { + return nil, err + } + } + + return g, nil +} + +// NewComplete creates a grok parser with full set of patterns +func NewComplete(additionalPatterns ...map[string]string) (*Grok, error) { + g, err := NewWithPatterns( + patterns.AWS, + patterns.Bind9, + patterns.Bro, + patterns.Exim, + patterns.HAProxy, + patterns.Httpd, + patterns.Firewalls, + patterns.Java, + patterns.Junos, + patterns.Maven, + patterns.MCollective, + patterns.MongoDB, + patterns.PostgreSQL, + patterns.Rails, + patterns.Redis, + patterns.Ruby, + patterns.Squid, + patterns.Syslog, + ) + if err != nil { + return nil, err + } + + for _, p := range additionalPatterns { + if err := g.AddPatterns(p); err != nil { + return nil, err + } + } + + return g, nil +} + +func (grok *Grok) AddPattern(name, patternDefinition string) error { + if strings.ContainsRune(name, ':') { + return ErrUnsupportedName + } + + // overwrite existing if present + grok.patternDefinitions[name] = patternDefinition + return nil +} + +func (grok *Grok) AddPatterns(patternDefinitions map[string]string) error { + // overwrite existing if present + for name, patternDefinition := range patternDefinitions { + if strings.ContainsRune(name, ':') { + return ErrUnsupportedName + } + + grok.patternDefinitions[name] = patternDefinition + } + return nil +} + +func (grok *Grok) HasCaptureGroups() bool { + if grok == nil || grok.re == nil { + return false + } + + for _, groupName := range grok.re.SubexpNames() { + if groupName != "" { + return true + } + } + + return false +} + +func (grok *Grok) Compile(pattern string, namedCapturesOnly bool) error { + return grok.compile(pattern, namedCapturesOnly) +} + +func (grok *Grok) Match(text []byte) bool { + return grok.re.Match(text) +} + +func (grok *Grok) MatchString(text string) bool { + return grok.re.MatchString(text) +} + +// ParseString parses text in a form of string and returns map[string]string with values +// not converted to types according to hints. +// When expression is not a match nil map is returned. +func (grok *Grok) ParseString(text string) (map[string]string, error) { + return grok.captureString(text) +} + +// Parse parses text in a form of []byte and returns map[string][]byte with values +// not converted to types according to hints. +// When expression is not a match nil map is returned. +func (grok *Grok) Parse(text []byte) (map[string][]byte, error) { + return grok.captureBytes(text) +} + +// ParseTyped parses text and returns map[string]interface{} with values +// typed according to type hints generated at compile time. +// If hint is not found error returned is TypeNotProvided. +// When expression is not a match nil map is returned. +func (grok *Grok) ParseTyped(text []byte) (map[string]interface{}, error) { + captures, err := grok.captureTyped(text) + if err != nil { + return nil, err + } + + captureBytes := make(map[string]interface{}) + for k, v := range captures { + captureBytes[k] = v + } + + return captureBytes, nil +} + +// ParseTypedString parses text and returns map[string]interface{} with values +// typed according to type hints generated at compile time. +// If hint is not found error returned is TypeNotProvided. +// When expression is not a match nil map is returned. +func (grok *Grok) ParseTypedString(text string) (map[string]interface{}, error) { + return grok.ParseTyped([]byte(text)) +} + +func (grok *Grok) compile(pattern string, namedCapturesOnly bool) error { + // get expanded pattern + expandedExpression, hints, err := grok.expand(pattern, namedCapturesOnly) + if err != nil { + return err + } + + compiledExpression, err := regexp.Compile(expandedExpression) + if err != nil { + return err + } + + grok.re = compiledExpression + grok.typeHints = hints + + return nil +} + +func (grok *Grok) captureString(text string) (map[string]string, error) { + return captureTypeFn(grok.re, text, + func(v, _ string) (string, error) { + return v, nil + }, + ) +} + +func (grok *Grok) captureBytes(text []byte) (map[string][]byte, error) { + return captureTypeFn(grok.re, string(text), + func(v, _ string) ([]byte, error) { + return []byte(v), nil + }, + ) +} + +func (grok *Grok) captureTyped(text []byte) (map[string]interface{}, error) { + return captureTypeFn(grok.re, string(text), grok.convertMatch) +} + +func captureTypeFn[K any](re *regexp.Regexp, text string, conversionFn func(v, key string) (K, error)) (map[string]K, error) { + captures := make(map[string]K) + + matches := re.FindStringSubmatch(text) + if len(matches) == 0 { + return captures, nil + } + + names := re.SubexpNames() + if len(names) == 0 { + return captures, nil + } + + for i, name := range names { + if len(name) == 0 { + continue + } + + match := matches[i] + if len(match) == 0 { + continue + } + + if conversionFn != nil { + v, err := conversionFn(string(match), name) + if err != nil { + return nil, err + } + captures[strings.ReplaceAll(name, dotSep, ".")] = v + } + } + + return captures, nil +} + +func (grok *Grok) convertMatch(match, name string) (interface{}, error) { + hint, found := grok.typeHints[name] + if !found { + return match, nil + } + + switch hint { + case "string": + return match, nil + + case "double": + return strconv.ParseFloat(match, 64) + case "float": + return strconv.ParseFloat(match, 64) + + case "int": + return strconv.Atoi(match) + case "long": + return strconv.Atoi(match) + + case "bool": + return strconv.ParseBool(match) + case "boolean": + return strconv.ParseBool(match) + default: + return nil, fmt.Errorf("invalid type for %v: %w", name, ErrTypeNotProvided) + } +} + +// expand processes a pattern and returns expanded regular expression, type hints and error +func (grok *Grok) expand(pattern string, namedCapturesOnly bool) (string, map[string]string, error) { + hints := make(map[string]string) + expandedPattern := pattern + + // recursion break is guarding against cyclic reference in pattern definitions + // as this is performed only once at compile time more clever optimization (e.g detecting cycles in graph) is TBD + for recursionBreak := 1000; recursionBreak > 0; recursionBreak-- { + subMatches := reusePattern.FindAllStringSubmatch(expandedPattern, -1) + if len(subMatches) == 0 { + // nothing to expand anymore + break + } + + for _, nameSubmatch := range subMatches { + // grok can be specified in either of these forms: + // %{SYNTAX} - e.g {NUMBER} + // %{SYNTAX:ID} - e.g {NUMBER:MY_AGE} + // %{SYNTAX:ID:TYPE} - e.g {NUMBER:MY_AGE:INT} + + // nameSubmatch is equal to [["%{NAME:ID:TYPe}" "NAME:ID:TYPe"]] + // we need only inner part + nameParts := strings.Split(nameSubmatch[1], ":") + + grokId := nameParts[0] + var targetId string + if len(nameParts) > 1 { + targetId = strings.ReplaceAll(nameParts[1], ".", dotSep) + } else { + targetId = nameParts[0] + } + // compile hints for used patterns + if len(nameParts) == 3 { + hints[targetId] = nameParts[2] + } + + knownPattern, found := grok.lookupPattern(grokId) + if !found { + return "", nil, fmt.Errorf("pattern definition %q unknown: %w", grokId, ErrParseFailure) + } + + var replacementPattern string + if namedCapturesOnly && len(nameParts) == 1 { + // this has no semantic (pattern:foo) so we don't need to capture + replacementPattern = "(" + knownPattern + ")" + + } else { + replacementPattern = "(?P<" + targetId + ">" + knownPattern + ")" + } + + // expand pattern with definition + expandedPattern = strings.ReplaceAll(expandedPattern, nameSubmatch[0], replacementPattern) + } + } + + return expandedPattern, hints, nil +} + +func (grok *Grok) lookupPattern(grokId string) (string, bool) { + if knownPattern, found := grok.patternDefinitions[grokId]; found { + return knownPattern, found + } + + if grok.lookupDefaultPatterns { + if knownPattern, found := patterns.Default[grokId]; found { + return knownPattern, found + } + } + + return "", false + +} diff --git a/vendor/github.com/elastic/go-grok/magefile.go b/vendor/github.com/elastic/go-grok/magefile.go new file mode 100644 index 00000000000..44959377f54 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/magefile.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build mage +// +build mage + +package main + +import ( + "fmt" + "path/filepath" + + "github.com/magefile/mage/mg" + + // mage:import + "github.com/elastic/go-grok/dev-tools/mage" + + devtools "github.com/elastic/go-grok/dev-tools/mage" + "github.com/elastic/go-grok/dev-tools/mage/gotool" +) + +// Aliases are shortcuts to long target names. +// nolint: deadcode // it's used by `mage`. +var Aliases = map[string]interface{}{ + "llc": mage.Linter.LastChange, + "lint": mage.Linter.All, +} + +// Check runs all the checks +// nolint: deadcode,unparam // it's used as a `mage` target and requires returning an error +func Check() error { + mg.Deps(devtools.InstallGoLicenser) + mg.Deps(devtools.Deps.CheckModuleTidy, CheckLicenseHeaders) + mg.Deps(devtools.CheckNoChanges) + return nil +} + +// Fmt formats code and adds license headers. +func Fmt() { + mg.Deps(devtools.GoImports.Run) + mg.Deps(AddLicenseHeaders) +} + +// AddLicenseHeaders adds ASL2 headers to .go files +func AddLicenseHeaders() error { + fmt.Println(">> fmt - go-licenser: Adding missing headers") + + mg.Deps(devtools.InstallGoLicenser) + + licenser := gotool.Licenser + + return licenser( + licenser.License("ASL2"), + ) +} + +// CheckLicenseHeaders checks ASL2 headers in .go files +func CheckLicenseHeaders() error { + mg.Deps(devtools.InstallGoLicenser) + + licenser := gotool.Licenser + + return licenser( + licenser.Check(), + licenser.License("ASL2"), + ) +} + +// Notice generates a NOTICE.txt file for the module. +func Notice() error { + return devtools.GenerateNotice( + filepath.Join("dev-tools", "templates", "notice", "overrides.json"), + filepath.Join("dev-tools", "templates", "notice", "rules.json"), + filepath.Join("dev-tools", "templates", "notice", "NOTICE.txt.tmpl"), + ) +} diff --git a/vendor/github.com/elastic/go-grok/patterns/aws.go b/vendor/github.com/elastic/go-grok/patterns/aws.go new file mode 100644 index 00000000000..3377bd3029d --- /dev/null +++ b/vendor/github.com/elastic/go-grok/patterns/aws.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package patterns + +var AWS map[string]string = map[string]string{ + "S3_REQUEST_LINE": `(?:%{WORD:http.request.method} %{NOTSPACE:url.original}(?: HTTP/%{NUMBER:http.version})?)`, + "S3_ACCESS_LOG": `%{WORD:aws.s3access.bucket_owner} %{NOTSPACE:aws.s3access.bucket} \[%{HTTPDATE:timestamp}\] (?:-|%{IP:client.address}) (?:-|%{NOTSPACE:client.user.id}) %{NOTSPACE:aws.s3access.request_id} %{NOTSPACE:aws.s3access.operation} (?:-|%{NOTSPACE:aws.s3access.key}) (?:-|"%{S3_REQUEST_LINE:aws.s3access.request_uri}") (?:-|%{INT:http.response.status_code:int}) (?:-|%{NOTSPACE:aws.s3access.error_code}) (?:-|%{INT:aws.s3access.bytes_sent:long}) (?:-|%{INT:aws.s3access.object_size:long}) (?:-|%{INT:aws.s3access.total_time:int}) (?:-|%{INT:aws.s3access.turn_around_time:int}) "(?:-|%{DATA:http.request.referrer})" "(?:-|%{DATA:user_agent.original})" (?:-|%{NOTSPACE:aws.s3access.version_id})(?: (?:-|%{NOTSPACE:aws.s3access.host_id}) (?:-|%{NOTSPACE:aws.s3access.signature_version}) (?:-|%{NOTSPACE:tls.cipher}) (?:-|%{NOTSPACE:aws.s3access.authentication_type}) (?:-|%{NOTSPACE:aws.s3access.host_header}) (?:-|%{NOTSPACE:aws.s3access.tls_version}))?`, + + "ELB_URIHOST": `%{IPORHOST:url.domain}(?::%{POSINT:url.port:int})?`, + "ELB_URIPATHQUERY": `%{URIPATH:url.path}(?:\?%{URIQUERY:url.query})?`, + "ELB_URIPATHPARAM": `%{ELB_URIPATHQUERY}`, + "ELB_URI": `%{URIPROTO:url.scheme}://(?:%{USER:url.username}(?::[^@]*)?@)?(?:%{ELB_URIHOST})?(?:%{ELB_URIPATHQUERY})?`, + "ELB_REQUEST_LINE": `(?:%{WORD:http.request.method} %{ELB_URI:url.original}(?: HTTP/%{NUMBER:http.version})?)`, + "ELB_V1_HTTP_LOG": `%{TIMESTAMP_ISO8601:timestamp} %{NOTSPACE:aws.elb.name} %{IP:source.address}:%{INT:source.port:int} (?:-|(?:%{IP:aws.elb.backend.ip}:%{INT:aws.elb.backend.port:int})) (?:-1|%{NUMBER:aws.elb.request_processing_time.sec:float}) (?:-1|%{NUMBER:aws.elb.backend_processing_time.sec:float}) (?:-1|%{NUMBER:aws.elb.response_processing_time.sec:float}) %{INT:http.response.status_code:int} (?:-|%{INT:aws.elb.backend.http.response.status_code:int}) %{INT:http.request.body.size:long} %{INT:http.response.body.size:long} "%{ELB_REQUEST_LINE}"(?: "(?:-|%{DATA:user_agent.original})" (?:-|%{NOTSPACE:tls.cipher}) (?:-|%{NOTSPACE:aws.elb.ssl_protocol}))?`, + "ELB_ACCESS_LOG": `%{ELB_V1_HTTP_LOG}`, + + "CLOUDFRONT_ACCESS_LOG": `(?%{YEAR}[-]%{MONTHNUM}[-]%{MONTHDAY}\t%{TIME})\t%{WORD:aws.cloudfront.x_edge_location}\t(?:-|%{INT:destination.bytes:long})\t%{IPORHOST:source.address}\t%{WORD:http.request.method}\t%{HOSTNAME:url.domain}\t%{NOTSPACE:url.path}\t(?:(?:000)|%{INT:http.response.status_code:int})\t(?:-|%{DATA:http.request.referrer})\t%{DATA:user_agent.original}\t(?:-|%{DATA:url.query})\t(?:-|%{DATA:aws.cloudfront.http.request.cookie})\t%{WORD:aws.cloudfront.x_edge_result_type}\t%{NOTSPACE:aws.cloudfront.x_edge_request_id}\t%{HOSTNAME:aws.cloudfront.http.request.host}\t%{URIPROTO:network.protocol.name}\t(?:-|%{INT:source.bytes:long})\t%{NUMBER:aws.cloudfront.time_taken:float}\t(?:-|%{IP:network.forwarded_ip})\t(?:-|%{DATA:aws.cloudfront.ssl_protocol})\t(?:-|%{NOTSPACE:tls.cipher})\t%{WORD:aws.cloudfront.x_edge_response_result_type}(?:\t(?:-|HTTP/%{NUMBER:http.version})\t(?:-|%{DATA:aws.cloudfront.fle_status})\t(?:-|%{DATA:aws.cloudfront.fle_encrypted_fields})\t%{INT:source.port:int}\t%{NUMBER:aws.cloudfront.time_to_first_byte:float}\t(?:-|%{DATA:aws.cloudfront.x_edge_detailed_result_type})\t(?:-|%{NOTSPACE:http.request.mime_type})\t(?:-|%{INT:aws.cloudfront.http.request.size:long})\t(?:-|%{INT:aws.cloudfront.http.request.range.start:long})\t(?:-|%{INT:aws.cloudfront.http.request.range.end:long}))?`, +} diff --git a/vendor/github.com/elastic/go-grok/patterns/bind9.go b/vendor/github.com/elastic/go-grok/patterns/bind9.go new file mode 100644 index 00000000000..1639fa4f93c --- /dev/null +++ b/vendor/github.com/elastic/go-grok/patterns/bind9.go @@ -0,0 +1,27 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package patterns + +var Bind9 map[string]string = map[string]string{ + "BIND9_TIMESTAMP": `%{MONTHDAY}[-]%{MONTH}[-]%{YEAR} %{TIME}`, + "BIND9_DNSTYPE": `(?:A|AAAA|CAA|CDNSKEY|CDS|CERT|CNAME|CSYNC|DLV|DNAME|DNSKEY|DS|HINFO|LOC|MX|NAPTR|NS|NSEC|NSEC3|OPENPGPKEY|PTR|RRSIG|RP|SIG|SMIMEA|SOA|SRV|TSIG|TXT|URI|IN)`, + "BIND9_CATEGORY": `(?:queries)`, + "BIND9_QUERYLOGBASE": `client(:? @0x(?:[0-9A-Fa-f]+))? %{IP:client.address}#%{POSINT:client.port:int} \(%{GREEDYDATA:bind.log.question.name}\): query: %{GREEDYDATA:dns.question.name} (?P(?:IN)) %{BIND9_DNSTYPE:dns.question.type}(:? %{DATA:bind.log.question.flags})? \(%{IP:server.address}\)`, + "BIND9_QUERYLOG": `%{BIND9_TIMESTAMP:timestamp} %{BIND9_CATEGORY:bing.log.category}: %{LOGLEVEL:log.level}: %{BIND9_QUERYLOGBASE}`, + "BIND9": `%{BIND9_QUERYLOG}`, +} diff --git a/vendor/github.com/elastic/go-grok/patterns/bro.go b/vendor/github.com/elastic/go-grok/patterns/bro.go new file mode 100644 index 00000000000..6e1ba00d93a --- /dev/null +++ b/vendor/github.com/elastic/go-grok/patterns/bro.go @@ -0,0 +1,27 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package patterns + +var Bro map[string]string = map[string]string{ + "BRO_BOOL": `[TF]`, + "BRO_DATA": `[^\t]+`, + "BRO_HTTP": `%{NUMBER:timestamp}\t%{NOTSPACE:zeek.session_id}\t%{IP:source.address}\t%{INT:source.port:int}\t%{IP:destination.address}\t%{INT:destination.port:int}\t%{INT:zeek.http.trans_depth:int}\t(?:-|%{WORD:http.request.method})\t(?:-|%{BRO_DATA:url.domain})\t(?:-|%{BRO_DATA:url.original})\t(?:-|%{BRO_DATA:http.request.referrer})\t(?:-|%{BRO_DATA:user_agent.original})\t(?:-|%{NUMBER:http.request.body.size:long})\t(?:-|%{NUMBER:http.response.body.size:long})\t(?:-|%{POSINT:http.response.status_code:int})\t(?:-|%{DATA:zeek.http.status_msg})\t(?:-|%{POSINT:zeek.http.info_code:int})\t(?:-|%{DATA:zeek.http.info_msg})\t(?:-|%{BRO_DATA:zeek.http.filename})\t(?:\(empty\)|%{BRO_DATA:zeek.http.tags})\t(?:-|%{BRO_DATA:url.username})\t(?:-|%{BRO_DATA:url.password})\t(?:-|%{BRO_DATA:zeek.http.proxied})\t(?:-|%{BRO_DATA:zeek.http.orig_fuids})\t(?:-|%{BRO_DATA:http.request.mime_type})\t(?:-|%{BRO_DATA:zeek.http.resp_fuids})\t(?:-|%{BRO_DATA:http.response.mime_type})`, + "BRO_DNS": `%{NUMBER:timestamp}\t%{NOTSPACE:zeek.session_id}\t%{IP:source.address}\t%{INT:source.port:int}\t%{IP:destination.address}\t%{INT:destination.port:int}\t%{WORD:network.transport}\t(?:-|%{INT:dns.id:int})\t(?:-|%{BRO_DATA:dns.question.name})\t(?:-|%{INT:zeek.dns.qclass:int})\t(?:-|%{BRO_DATA:zeek.dns.qclass_name})\t(?:-|%{INT:zeek.dns.qtype:int})\t(?:-|%{BRO_DATA:dns.question.type})\t(?:-|%{INT:zeek.dns.rcode:int})\t(?:-|%{BRO_DATA:dns.response_code})\t(?:-|%{BRO_BOOL:zeek.dns.AA})\t(?:-|%{BRO_BOOL:zeek.dns.TC})\t(?:-|%{BRO_BOOL:zeek.dns.RD})\t(?:-|%{BRO_BOOL:zeek.dns.RA})\t(?:-|%{NONNEGINT:zeek.dns.Z:int})\t(?:-|%{BRO_DATA:zeek.dns.answers})\t(?:-|%{DATA:zeek.dns.TTLs})\t(?:-|%{BRO_BOOL:zeek.dns.rejected})`, + "BRO_CONN": `%{NUMBER:timestamp}\t%{NOTSPACE:zeek.session_id}\t%{IP:source.address}\t%{INT:source.port:int}\t%{IP:destination.address}\t%{INT:destination.port:int}\t%{WORD:network.transport}\t(?:-|%{BRO_DATA:network.protocol.name})\t(?:-|%{NUMBER:zeek.connection.duration:float})\t(?:-|%{INT:zeek.connection.orig_bytes:long})\t(?:-|%{INT:zeek.connection.resp_bytes:long})\t(?:-|%{BRO_DATA:zeek.connection.state})\t(?:-|%{BRO_BOOL:zeek.connection.local_orig})\t(?:(?:-|%{BRO_BOOL:zeek.connection.local_resp})\t)?(?:-|%{INT:zeek.connection.missed_bytes:long})\t(?:-|%{BRO_DATA:zeek.connection.history})\t(?:-|%{INT:source.packets:long})\t(?:-|%{INT:source.bytes:long})\t(?:-|%{INT:destination.packets:long})\t(?:-|%{INT:destination.bytes:long})\t(?:\(empty\)|%{BRO_DATA:zeek.connection.tunnel_parents})`, + "BRO_FILES": `%{NUMBER:timestamp}\t%{NOTSPACE:zeek.files.fuid}\t(?:-|%{IP:server.address})\t(?:-|%{IP:client.address})\t(?:-|%{BRO_DATA:zeek.files.session_ids})\t(?:-|%{BRO_DATA:zeek.files.source})\t(?:-|%{INT:zeek.files.depth:int})\t(?:-|%{BRO_DATA:zeek.files.analyzers})\t(?:-|%{BRO_DATA:file.mime_type})\t(?:-|%{BRO_DATA:file.name})\t(?:-|%{NUMBER:zeek.files.duration:float})\t(?:-|%{BRO_DATA:zeek.files.local_orig})\t(?:-|%{BRO_BOOL:zeek.files.is_orig})\t(?:-|%{INT:zeek.files.seen_bytes:long})\t(?:-|%{INT:file.size:long})\t(?:-|%{INT:zeek.files.missing_bytes:long})\t(?:-|%{INT:zeek.files.overflow_bytes:long})\t(?:-|%{BRO_BOOL:zeek.files.timedout})\t(?:-|%{BRO_DATA:zeek.files.parent_fuid})\t(?:-|%{BRO_DATA:file.hash.md5})\t(?:-|%{BRO_DATA:file.hash.sha1})\t(?:-|%{BRO_DATA:file.hash.sha256})\t(?:-|%{BRO_DATA:zeek.files.extracted})`, +} diff --git a/vendor/github.com/elastic/go-grok/patterns/default.go b/vendor/github.com/elastic/go-grok/patterns/default.go new file mode 100644 index 00000000000..350bcec3ff3 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/patterns/default.go @@ -0,0 +1,121 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package patterns + +var Default map[string]string = map[string]string{ + "WORD": `\b\w+\b`, + "NOTSPACE": `\S+`, + "SPACE": `\s*`, + "DATA": `.*?`, + + // types + "INT": `(?:[+-]?(?:[0-9]+))`, + "NUMBER": `(?:%{BASE10NUM})`, + "BOOL": "true|false", + + "BASE10NUM": `([+-]?(?:[0-9]+(?:\.[0-9]+)?)|\.[0-9]+)`, + "BASE16NUM": `[+-]?(?:0x)?[0-9A-Fa-f]+`, // Adjusted, removed lookbehind + "BASE16FLOAT": `[+-]?(?:0x)?[0-9A-Fa-f]+(?:\.[0-9A-Fa-f]*)?`, // Adjusted, removed lookbehind and word boundaries + "POSINT": `\b[1-9][0-9]*\b`, + "NONNEGINT": `\b[0-9]+\b`, + "GREEDYDATA": `.*`, + "QUOTEDSTRING": `"([^"\\]*(\\.[^"\\]*)*)"|\'([^\'\\]*(\\.[^\'\\]*)*)\'`, + "UUID": `[A-Fa-f0-9]{8}-(?:[A-Fa-f0-9]{4}-){3}[A-Fa-f0-9]{12}`, + "URN": `urn:[0-9A-Za-z][0-9A-Za-z-]{0,31}:[0-9A-Za-z()+,.:=@;$_!*'/?#-]+`, + + // network + "IP": `(?:%{IPV6}|%{IPV4})`, + "IPV6": `((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:)))(%.+)?`, + "IPV4": `(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)`, + + "IPORHOST": `(?:%{IP}|%{HOSTNAME})`, + "HOSTNAME": `\b(?:[0-9A-Za-z][0-9A-Za-z-]{0,62})(?:\.(?:[0-9A-Za-z][0-9A-Za-z-]{0,62}))*(\.?|\b)`, + "EMAILLOCALPART": `[a-zA-Z][a-zA-Z0-9_.+-=:]+`, + "EMAILADDRESS": `%{EMAILLOCALPART}@%{HOSTNAME}`, + "USERNAME": `[a-zA-Z0-9._-]+`, + "USER": `%{USERNAME}`, + + "MAC": `(?:%{CISCOMAC}|%{WINDOWSMAC}|%{COMMONMAC})`, + "CISCOMAC": `(?:(?:[A-Fa-f0-9]{4}\.){2}[A-Fa-f0-9]{4})`, + "WINDOWSMAC": `(?:(?:[A-Fa-f0-9]{2}-){5}[A-Fa-f0-9]{2})`, + "COMMONMAC": `(?:(?:[A-Fa-f0-9]{2}:){5}[A-Fa-f0-9]{2})`, + "HOSTPORT": `%{IPORHOST}:%{POSINT}`, + + // paths + "UNIXPATH": `(/[\w_%!$@:.,+~-]+)+`, + "TTY": `/dev/(pts|tty([pq])?)(\w+)?/?(?:[0-9]+)`, + "WINPATH": `[A-Za-z]+:(\\[^\\?*]+)+`, + "URIPROTO": `[A-Za-z][A-Za-z0-9+\.-]+`, + "URIHOST": `%{IPORHOST}(?::%{POSINT})?`, + "URIPATH": `(/[A-Za-z0-9$.+!*'(){},~:;=@#%&_\-]+)+`, + "URIQUERY": `[A-Za-z0-9$.+!*'|(){},~@#%&/=:;_?\-\[\]<>]*`, + "URIPARAM": `\?%{URIQUERY}`, + "URIPATHPARAM": `%{URIPATH}(?:\?%{URIQUERY})?`, + "URI": `%{URIPROTO}://(?:%{USER}(?::[^@]*)?@)?%{URIHOST}(?:%{URIPATH}(?:\?%{URIQUERY})?)?`, + "PATH": `(?:%{UNIXPATH}|%{WINPATH})`, + + // dates + "MONTH": `\b(?:Jan(?:uary)?|Feb(?:ruary)?|Mar(?:ch)?|Apr(?:il)?|May|Jun(?:e)?|Jul(?:y)?|Aug(?:ust)?|Sep(?:tember)?|Oct(?:ober)?|Nov(?:ember)?|Dec(?:ember)?)\b`, + + // Months: January, Feb, 3, 03, 12, December "MONTH": `\b(?:[Jj]an(?:uary|uar)?|[Ff]eb(?:ruary|ruar)?|[Mm](?:a|ä)?r(?:ch|z)?|[Aa]pr(?:il)?|[Mm]a(?:y|i)?|[Jj]un(?:e|i)?|[Jj]ul(?:y|i)?|[Aa]ug(?:ust)?|[Ss]ep(?:tember)?|[Oo](?:c|k)?t(?:ober)?|[Nn]ov(?:ember)?|[Dd]e(?:c|z)(?:ember)?)\b`, + "MONTHNUM": `(?:0[1-9]|1[0-2])`, + "MONTHDAY": `(?:(?:0[1-9])|(?:[12][0-9])|(?:3[01])|[1-9])`, + + // Days Monday, Tue, Thu, etc + "DAY": `\b(?:Mon(?:day)?|Tue(?:sday)?|Wed(?:nesday)?|Thu(?:rsday)?|Fri(?:day)?|Sat(?:urday)?|Sun(?:day)?)\b`, + + // Years? + "YEAR": `(\d\d){1,2}`, + "HOUR": `(?:2[0123]|[01]?[0-9])`, + "MINUTE": `(?:[0-5][0-9])`, + + // '60' is a leap second in most time standards and thus is valid. + "SECOND": `(?:(?:[0-5][0-9]|60)(?:[:.,][0-9]+)?)`, + "TIME": `%{HOUR}:%{MINUTE}(?::%{SECOND})?`, + + // datestamp is YYYY/MM/DD-HH:MM:SS.UUUU (or something like it) + "DATE_US": `%{MONTHNUM}[/-]%{MONTHDAY}[/-]%{YEAR}`, + "DATE_EU": `%{MONTHDAY}[./-]%{MONTHNUM}[./-]%{YEAR}`, + "ISO8601_TIMEZONE": `(?:Z|[+-]%{HOUR}(?::?%{MINUTE}))`, + "ISO8601_SECOND": `%{SECOND}`, + "TIMESTAMP_ISO8601": `%{YEAR}-%{MONTHNUM}-%{MONTHDAY}[T ]%{HOUR}:?%{MINUTE}(?::?%{SECOND})?%{ISO8601_TIMEZONE}?`, + "DATE": `%{DATE_US}|%{DATE_EU}`, + "DATESTAMP": `%{DATE}[- ]%{TIME}`, + "TZ": `(?:[PMACE][SED]T|UTC)`, + "DATESTAMP_RFC822": `%{DAY} %{MONTH} %{MONTHDAY} %{YEAR} %{TIME} %{TZ}`, + "DATESTAMP_RFC2822": `%{DAY}, %{MONTHDAY} %{MONTH} %{YEAR} %{TIME} %{ISO8601_TIMEZONE}`, + "DATESTAMP_OTHER": `%{DAY} %{MONTH} %{MONTHDAY} %{TIME} %{TZ} %{YEAR}`, + "DATESTAMP_EVENTLOG": `%{YEAR}%{MONTHNUM}%{MONTHDAY}%{HOUR}%{MINUTE}%{SECOND}`, + + // Syslog Dates: Month Day HH:MM:SS "MONTH": `\b(?:Jan(?:uary|uar)?|Feb(?:ruary|ruar)?|Mar(?:ch|z)?|Apr(?:il)?|May|i|Jun(?:e|i)?|Jul(?:y|i)?|Aug(?:ust)?|Sep(?:tember)?|Oct(?:ober)?|Nov(?:ember)?|Dec(?:ember)?)\b`, + "SYSLOGTIMESTAMP": `%{MONTH} +%{MONTHDAY} %{TIME}`, + "PROG": `[!-Z\\^-~]+`, // Simplified range based on ASCII + "SYSLOGPROG": `%{PROG}(?:\[\d+\])?`, // Simplified, as type hints and named groups aren't supported in Go `regexp` + "SYSLOGHOST": `%{IPORHOST}`, + "SYSLOGFACILITY": `<%{NONNEGINT}.%{NONNEGINT}>`, // Simplified to remove type hints + "HTTPDATE": `%{MONTHDAY}/%{MONTH}/%{YEAR}:%{TIME} %{INT}`, + + // Shortcuts + "QS": `%{QUOTEDSTRING}`, + + // Log formats + "SYSLOGBASE": `%{SYSLOGTIMESTAMP:timestamp} (?:%{SYSLOGFACILITY} )?%{SYSLOGHOST:host.name} %{SYSLOGPROG}:`, + + // Log Levels + "LOGLEVEL": `(?i)(alert|trace|debug|notice|info(?:rmation)?|warn(?:ing)?|err(?:or)?|crit(?:ical)?|fatal|severe|emerg(?:ency)?)`, +} diff --git a/vendor/github.com/elastic/go-grok/patterns/exim.go b/vendor/github.com/elastic/go-grok/patterns/exim.go new file mode 100644 index 00000000000..8873c4c1d00 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/patterns/exim.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package patterns + +var Exim map[string]string = map[string]string{ + "EXIM_MSGID": `[0-9A-Za-z]{6}-[0-9A-Za-z]{6}-[0-9A-Za-z]{2}`, + "EXIM_FLAGS": `(?:<=|=>|->|\*>|\*\*|==|<>|>>)`, + "EXIM_DATE": `(:?%{YEAR}-%{MONTHNUM}-%{MONTHDAY} %{TIME})`, + "EXIM_PID": `\[%{POSINT:process.pid:int}\]`, + "EXIM_QT": `((\d+y)?(\d+w)?(\d+d)?(\d+h)?(\d+m)?(\d+s)?)`, + "EXIM_EXCLUDE_TERMS": `(Message is frozen|(Start|End) queue run| Warning: | retry time not reached | no (IP address|host name) found for (IP address|host) | unexpected disconnection while reading SMTP command | no immediate delivery: |another process is handling this message)`, + "EXIM_REMOTE_HOST": `(H=(\(%{NOTSPACE:source.host.name}\) )?(\(%{NOTSPACE:exim.log.remote_address}\) )?\[%{IP:source.address}\](?::%{POSINT:source.port:int})?)`, + "EXIM_INTERFACE": `(I=\[%{IP:destination.address}\](?::%{NUMBER:destination.port:int}))`, + "EXIM_PROTOCOL": `(P=%{NOTSPACE:network.protocol.name})`, + "EXIM_MSG_SIZE": `(S=%{NUMBER:exim.log.message.body.size:int})`, + "EXIM_HEADER_ID": `(id=%{NOTSPACE:exim.log.header_id})`, + "EXIM_QUOTED_CONTENT": `(?:\\.|[^\\"])*`, + "EXIM_SUBJECT": `(T="%{EXIM_QUOTED_CONTENT:exim.log.message.subject}")`, + "EXIM_UNKNOWN_FIELD": `(?:[A-Za-z0-9]{1,4}=(?:%{QUOTEDSTRING}|%{NOTSPACE}))`, + "EXIM_NAMED_FIELDS": `(?: (?:%{EXIM_REMOTE_HOST}|%{EXIM_INTERFACE}|%{EXIM_PROTOCOL}|%{EXIM_MSG_SIZE}|%{EXIM_HEADER_ID}|%{EXIM_SUBJECT}|%{EXIM_UNKNOWN_FIELD}))*`, + "EXIM_MESSAGE_ARRIVAL": `%{EXIM_DATE:timestamp} (?:%{EXIM_PID} )?%{EXIM_MSGID:exim.log.message.id} (?P\<\=) ((?P[a-z:]) )?%{EMAILADDRESS:exim.log.sender.email}%{EXIM_NAMED_FIELDS}(?:(?: from \?)? for %{EMAILADDRESS:exim.log.recipient.email})?`, + "EXIM": `%{EXIM_MESSAGE_ARRIVAL}`, +} diff --git a/vendor/github.com/elastic/go-grok/patterns/firewalls.go b/vendor/github.com/elastic/go-grok/patterns/firewalls.go new file mode 100644 index 00000000000..78244b72e95 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/patterns/firewalls.go @@ -0,0 +1,95 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package patterns + +var Firewalls map[string]string = map[string]string{ + + // NetScreen firewall logs + "NETSCREENSESSIONLOG": `%{SYSLOGTIMESTAMP:timestamp} %{IPORHOST:observer.hostname} %{NOTSPACE:observer.name}\: (?PNetScreen) device_id=%{WORD:netscreen.device_id} .*?(system-(\w+)-(%{NONNEGINT:event.code})\((%{WORD:netscreen.session.type})\))?\: start_time="%{DATA:netscreen.session.start_time}" duration=%{INT:netscreen.session.duration:int} policy_id=%{INT:netscreen.policy_id} service=%{DATA:netscreen.service} proto=%{INT:netscreen.protocol_number:int} src zone=%{WORD:observer.ingress.zone} dst zone=%{WORD:observer.egress.zone} action=%{WORD:event.action} sent=%{INT:source.bytes:long} rcvd=%{INT:destination.bytes:long} src=%{IPORHOST:source.address} dst=%{IPORHOST:destination.address}(?: src_port=%{INT:source.port:int} dst_port=%{INT:destination.port:int})?(?: src-xlated ip=%{IP:source.nat.ip} port=%{INT:source.nat.port:int} dst-xlated ip=%{IP:destination.nat.ip} port=%{INT:destination.nat.port:int})?(?: session_id=%{INT:netscreen.session.id} reason=%{GREEDYDATA:netscreen.session.reason})?`, + + // == Cisco ASA == + "CISCO_TAGGED_SYSLOG": `^<%{POSINT:log.syslog.priority:int}>%{CISCOTIMESTAMP:timestamp}( %{SYSLOGHOST:host.name})? ?: %%{CISCOTAG:cisco.asa.tag}:`, + "CISCOTIMESTAMP": `%{MONTH} +%{MONTHDAY}(?: %{YEAR})? %{TIME}`, + "CISCOTAG": `[A-Z0-9]+-%{INT}-(?:[A-Z0-9_]+)`, + + // Common Particles + "CISCO_ACTION": `Built|Teardown|Deny|Denied|denied by ACL|requested|permitted|denied|discarded|est-allowed|Dropping|created|deleted`, + "CISCO_REASON": `Duplicate TCP SYN|Failed to locate egress interface|Invalid transport field|No matching connection|DNS Response|DNS Query|(?:%{WORD}\s*)*`, + "CISCO_DIRECTION": `Inbound|inbound|Outbound|outbound`, + "CISCO_INTERVAL": `first hit|%{INT}-second interval`, + "CISCO_XLATE_TYPE": `static|dynamic`, + + // helpers + "CISCO_HITCOUNT_INTERVAL": `hit-cnt %{INT:cisco.asa.hit_count:int} (?:first hit|%{INT:cisco.asa.interval:int}-second interval)`, + "CISCO_SRC_IP_USER": `%{NOTSPACE:observer.ingress.interface.name}:%{IP:source.address}(?:\(%{DATA:source.user.name}\))?`, + "CISCO_DST_IP_USER": `%{NOTSPACE:observer.egress.interface.name}:%{IP:destination.address}(?:\(%{DATA:destination.user.name}\))?`, + "CISCO_SRC_HOST_PORT_USER": `%{NOTSPACE:observer.ingress.interface.name}:(?:(?:%{IP:source.address})|(?:%{HOSTNAME:source.address}))(?:/%{INT:source.port:int})?(?:\(%{DATA:source.user.name}\))?`, + "CISCO_DST_HOST_PORT_USER": `%{NOTSPACE:observer.egress.interface.name}:(?:(?:%{IP:destination.address})|(?:%{HOSTNAME:destination.address}))(?:/%{INT:destination.port:int})?(?:\(%{DATA:destination.user.name}\))?`, + "CISCOFW104001": `\((?:Primary|Secondary)\) Switching to ACTIVE - %{GREEDYDATA:event.reason}`, + "CISCOFW104002": `\((?:Primary|Secondary)\) Switching to STANDBY - %{GREEDYDATA:event.reason}`, + "CISCOFW104003": `\((?:Primary|Secondary)\) Switching to FAILED\.`, + "CISCOFW104004": `\((?:Primary|Secondary)\) Switching to OK\.`, + "CISCOFW105003": `\((?:Primary|Secondary)\) Monitoring on [Ii]nterface %{NOTSPACE:network.interface.name} waiting`, + "CISCOFW105004": `\((?:Primary|Secondary)\) Monitoring on [Ii]nterface %{NOTSPACE:network.interface.name} normal`, + "CISCOFW105005": `\((?:Primary|Secondary)\) Lost Failover communications with mate on [Ii]nterface %{NOTSPACE:network.interface.name}`, + "CISCOFW105008": `\((?:Primary|Secondary)\) Testing [Ii]nterface %{NOTSPACE:network.interface.name}`, + "CISCOFW105009": `\((?:Primary|Secondary)\) Testing on [Ii]nterface %{NOTSPACE:network.interface.name} (?:Passed|Failed)`, + "CISCOFW106001": `%{CISCO_DIRECTION:cisco.asa.network.direction} %{WORD:cisco.asa.network.transport} connection %{CISCO_ACTION:cisco.asa.outcome} from %{IP:source.address}/%{INT:source.port:int} to %{IP:destination.address}/%{INT:destination.port:int} flags %{DATA:cisco.asa.tcp_flags} on interface %{NOTSPACE:observer.egress.interface.name}`, + "CISCOFW106006_106007_106010": `%{CISCO_ACTION:cisco.asa.outcome} %{CISCO_DIRECTION:cisco.asa.network.direction} %{WORD:cisco.asa.network.transport} (?:from|src) %{IP:source.address}/%{INT:source.port:int}(?:\(%{DATA:source.user.name}\))? (?:to|dst) %{IP:destination.address}/%{INT:destination.port:int}(?:\(%{DATA:destination.user.name}\))? (?:(?:on interface %{NOTSPACE:observer.egress.interface.name})|(?:due to %{CISCO_REASON:event.reason}))`, + "CISCOFW106014": `%{CISCO_ACTION:cisco.asa.outcome} %{CISCO_DIRECTION:cisco.asa.network.direction} %{WORD:cisco.asa.network.transport} src %{CISCO_SRC_IP_USER} dst %{CISCO_DST_IP_USER}\s?\(type %{INT:cisco.asa.icmp_type:int}, code %{INT:cisco.asa.icmp_code:int}\)`, + "CISCOFW106015": `%{CISCO_ACTION:cisco.asa.outcome} %{WORD:cisco.asa.network.transport} \(%{DATA:cisco.asa.rule_name}\) from %{IP:source.address}/%{INT:source.port:int} to %{IP:destination.address}/%{INT:destination.port:int} flags %{DATA:cisco.asa.tcp_flags} on interface %{NOTSPACE:observer.egress.interface.name}`, + "CISCOFW106021": `%{CISCO_ACTION:cisco.asa.outcome} %{WORD:cisco.asa.network.transport} reverse path check from %{IP:source.address} to %{IP:destination.address} on interface %{NOTSPACE:observer.egress.interface.name}`, + "CISCOFW106023": `%{CISCO_ACTION:action}( protocol)? %{WORD:network.protocol.name} src %{DATA:source.interface}:%{DATA:source.address}(/%{INT:source.port})?(\(%{DATA:source.fwuser}\))? dst %{DATA:destination.interface}:%{DATA:destination.address}(/%{INT:destination.port})?(\(%{DATA:destination.fwuser}\))?( \(type %{INT:icmp_type}, code %{INT:icmp_code}\))? by access-group "?%{DATA:policy_id}"? \[%{DATA:hashcode1}, %{DATA:hashcode2}\]`, + "CISCOFW106100_2_3": `access-list %{NOTSPACE:cisco.asa.rule_name} %{CISCO_ACTION:cisco.asa.outcome} %{WORD:cisco.asa.network.transport} for user '%{DATA:user.name}' %{DATA:observer.ingress.interface.name}\/%{IP:source.address}\(%{INT:source.port:int}\) -> %{DATA:observer.egress.interface.name}\/%{IP:destination.address}\(%{INT:destination.port:int}\) %{CISCO_HITCOUNT_INTERVAL} \[%{DATA:metadata.cisco.asa.hashcode1}\, %{DATA:metadata.cisco.asa.hashcode2}\]`, + + "CISCOFW106100": `access-list %{NOTSPACE:cisco.asa.rule_name} %{CISCO_ACTION:cisco.asa.outcome} %{WORD:cisco.asa.network.transport} %{DATA:observer.ingress.interface.name}/%{IP:source.address}\(%{INT:source.port:int}\)(?:\(%{DATA:source.user.name}\))? -> %{DATA:observer.egress.interface.name}/%{IP:destination.address}\(%{INT:destination.port:int}\)(?:\(%{DATA:source.user.name}\))? hit-cnt %{INT:cisco.asa.hit_count:int} %{CISCO_INTERVAL} \[%{DATA:metadata.cisco.asa.hashcode1}\, %{DATA:metadata.cisco.asa.hashcode2}\]`, + "CISCOFW304001": `%{IP:source.address}(?:\(%{DATA:source.user.name}\))? Accessed URL %{IP:destination.address}:%{GREEDYDATA:url.original}`, + "CISCOFW110002": `%{CISCO_REASON:event.reason} for %{WORD:cisco.asa.network.transport} from %{DATA:observer.ingress.interface.name}:%{IP:source.address}/%{INT:source.port:int} to %{IP:destination.address}/%{INT:destination.port:int}`, + "CISCOFW302010": `%{INT:cisco.asa.connections.in_use:int} in use, %{INT:cisco.asa.connections.most_used:int} most used`, + "CISCOFW302013_302014_302015_302016": `%{CISCO_ACTION:cisco.asa.outcome}(?: %{CISCO_DIRECTION:cisco.asa.network.direction})? %{WORD:cisco.asa.network.transport} connection %{INT:cisco.asa.connection_id} for %{NOTSPACE:observer.ingress.interface.name}:%{IP:source.address}/%{INT:source.port:int}(?: \(%{IP:source.nat.ip}/%{INT:source.nat.port:int}\))?(?:\(%{DATA:source.user.name?}\))? to %{NOTSPACE:observer.egress.interface.name}:%{IP:destination.address}/%{INT:destination.port:int}( \(%{IP:destination.nat.ip}/%{INT:destination.nat.port:int}\))?(?:\(%{DATA:destination.user.name}\))?( duration %{TIME:cisco.asa.duration} bytes %{INT:network.bytes:long})?(?: %{CISCO_REASON:event.reason})?(?: \(%{DATA:user.name}\))?`, + "CISCOFW302020_302021": `%{CISCO_ACTION:cisco.asa.outcome}(?: %{CISCO_DIRECTION:cisco.asa.network.direction})? %{WORD:cisco.asa.network.transport} connection for faddr %{IP:destination.address}/%{INT:cisco.asa.icmp_seq:int}(?:\(%{DATA:destination.user.name}\))? gaddr %{IP:source.nat.ip}/%{INT:cisco.asa.icmp_type:int} laddr %{IP:source.address}/%{INT}(?: \(%{DATA:source.user.name}\))?`, + "CISCOFW305011": `%{CISCO_ACTION:cisco.asa.outcome} %{CISCO_XLATE_TYPE} %{WORD:cisco.asa.network.transport} translation from %{DATA:observer.ingress.interface.name}:%{IP:source.address}(/%{INT:source.port:int})?(?:\(%{DATA:source.user.name}\))? to %{DATA:observer.egress.interface.name}:%{IP:destination.address}/%{INT:destination.port:int}`, + "CISCOFW313001_313004_313008": `%{CISCO_ACTION:cisco.asa.outcome} %{WORD:cisco.asa.network.transport} type=%{INT:cisco.asa.icmp_type:int}, code=%{INT:cisco.asa.icmp_code:int} from %{IP:source.address} on interface %{NOTSPACE:observer.egress.interface.name}(?: to %{IP:destination.address})?`, + "CISCOFW313005": `%{CISCO_REASON:event.reason} for %{WORD:cisco.asa.network.transport} error message: %{WORD} src %{CISCO_SRC_IP_USER} dst %{CISCO_DST_IP_USER} \(type %{INT:cisco.asa.icmp_type:int}, code %{INT:cisco.asa.icmp_code:int}\) on %{NOTSPACE} interface\.\s+Original IP payload: %{WORD:cisco.asa.original_ip_payload.network.transport} src %{IP:cisco.asa.original_ip_payload.source.address}/%{INT:cisco.asa.original_ip_payload.source.port:int}(?:\(%{DATA:cisco.asa.original_ip_payload.source.user.name}\))? dst %{IP:cisco.asa.original_ip_payload.destination.address}/%{INT:cisco.asa.original_ip_payload.destination.port:int}(?:\(%{DATA:cisco.asa.original_ip_payload.destination.user.name}\))?`, + "CISCOFW321001": `Resource '%{DATA:cisco.asa.resource.name}' limit of %{POSINT:cisco.asa.resource.limit:int} reached for system`, + "CISCOFW402117": `%{WORD:cisco.asa.network.type}: Received a non-IPSec packet \(protocol=\s?%{WORD:cisco.asa.network.transport}\) from %{IP:source.address} to %{IP:destination.address}\.?`, + "CISCOFW402119": `%{WORD:cisco.asa.network.type}: Received an %{WORD:cisco.asa.ipsec.protocol} packet \(SPI=\s?%{DATA:cisco.asa.ipsec.spi}, sequence number=\s?%{DATA:cisco.asa.ipsec.seq_num}\) from %{IP:source.address} \(user=\s?%{DATA:source.user.name}\) to %{IP:destination.address} that failed anti-replay checking\.?`, + "CISCOFW419001": `%{CISCO_ACTION:cisco.asa.outcome} %{WORD:cisco.asa.network.transport} packet from %{NOTSPACE:observer.ingress.interface.name}:%{IP:source.address}/%{INT:source.port:int} to %{NOTSPACE:observer.egress.interface.name}:%{IP:destination.address}/%{INT:destination.port:int}, reason: %{GREEDYDATA:event.reason}`, + "CISCOFW419002": `%{CISCO_REASON:event.reason} from %{DATA:observer.ingress.interface.name}:%{IP:source.address}/%{INT:source.port:int} to %{DATA:observer.egress.interface.name}:%{IP:destination.address}/%{INT:destination.port:int} with different initial sequence number`, + "CISCOFW500004": `%{CISCO_REASON:event.reason} for protocol=%{WORD:cisco.asa.network.transport}, from %{IP:source.address}/%{INT:source.port:int} to %{IP:destination.address}/%{INT:destination.port:int}`, + "CISCOFW602303_602304": `%{WORD:cisco.asa.network.type}: An %{CISCO_DIRECTION:cisco.asa.network.direction} %{DATA:cisco.asa.ipsec.tunnel_type} SA \(SPI=%{DATA:cisco.asa.ipsec.spi}\) between %{IP:source.address} and %{IP:destination.address} \(user=%{DATA:source.user.name}\) has been %{CISCO_ACTION:cisco.asa.outcome}`, + "CISCOFW710001_710002_710003_710005_710006": `%{WORD:cisco.asa.network.transport} (?:request|access) %{CISCO_ACTION:cisco.asa.outcome} from %{IP:source.address}/%{INT:source.port:int} to %{DATA:observer.egress.interface.name}:%{IP:destination.address}/%{INT:destination.port:int}`, + "CISCOFW713172": `Group = %{DATA:cisco.asa.source.group}, IP = %{IP:source.address}, Automatic NAT Detection Status:\s+Remote end\s*%{DATA:metadata.cisco.asa.remote_nat}\s*behind a NAT device\s+This\s+end\s*%{DATA:metadata.cisco.asa.local_nat}\s*behind a NAT device`, + "CISCOFW733100": `\\s*%{DATA:[cisco.asa.burst.object}\s*\] drop %{DATA:cisco.asa.burst.id} exceeded. Current burst rate is %{INT:cisco.asa.burst.current_rate:int} per second, max configured rate is %{INT:cisco.asa.burst.configured_rate:int}; Current average rate is %{INT:cisco.asa.burst.avg_rate:int} per second, max configured rate is %{INT:cisco.asa.burst.configured_avg_rate:int}; Cumulative total count is %{INT:cisco.asa.burst.cumulative_count:int}`, + + "IPTABLES_TCP_FLAGS": `(CWR |ECE |URG |ACK |PSH |RST |SYN |FIN )*`, + "IPTABLES_TCP_PART": `(?:SEQ=%{INT:iptables.tcp.seq:int}\s+)?(?:ACK=%{INT:iptables.tcp.ack:int}\s+)?WINDOW=%{INT:iptables.tcp.window:int}\s+RES=0x%{BASE16NUM:iptables.tcp_reserved_bits}\s+%{IPTABLES_TCP_FLAGS:iptables.tcp.flags}`, + + "IPTABLES4_FRAG": `((\s)?(CE|DF|MF))*`, + "IPTABLES4_PART": `SRC=%{IPV4:source.address}\s+DST=%{IPV4:destination.address}\s+LEN=(?:%{INT:iptables.length:int})?\s+TOS=(?:0|0x%{BASE16NUM:iptables.tos})?\s+PREC=(?:0x%{BASE16NUM:iptables.precedence_bits})?\s+TTL=(?:%{INT:iptables.ttl:int})?\s+ID=(?:%{INT:iptables.id})?\s+(?:%{IPTABLES4_FRAG:iptables.fragment_flags})?(?:\s+FRAG: %{INT:iptables.fragment_offset:int})?`, + "IPTABLES6_PART": `SRC=%{IPV6:source.address}\s+DST=%{IPV6:destination.address}\s+LEN=(?:%{INT:iptables.length:int})?\s+TC=(?:0|0x%{BASE16NUM:iptables.tos})?\s+HOPLIMIT=(?:%{INT:iptables.ttl:int})?\s+FLOWLBL=(?:%{INT:iptables.flow_label})?`, + + "IPTABLES": `IN=(?:%{NOTSPACE:observer.ingress.interface.name})?\s+OUT=(?:%{NOTSPACE:observer.egress.interface.name})?\s+(?:MAC=(?:%{COMMONMAC:destination.mac})?(?::%{COMMONMAC:source.mac})?(?::A-Fa-f0-9{2}:A-Fa-f0-9{2})?\s+)?(:?%{IPTABLES4_PART}|%{IPTABLES6_PART}).*?PROTO=(?:%{WORD:network.transport})?\s+SPT=(?:%{INT:source.port:int})?\s+DPT=(?:%{INT:destination.port:int})?\s+(?:%{IPTABLES_TCP_PART})?`, + + // Shorewall firewall logs + "SHOREWALL": `(?:%{SYSLOGTIMESTAMP:timestamp}) (?:%{WORD:observer.hostname}) .*Shorewall:(?:%{WORD:shorewall.firewall.type})?:(?:%{WORD:shorewall.firewall.action})?.*%{IPTABLES}`, + + // == SuSE Firewall 2 == + "SFW2_LOG_PREFIX": `SFW2\-INext\-%{NOTSPACE:suse.firewall.action}`, + "SFW2": `((?:%{SYSLOGTIMESTAMP:timestamp})|(?:%{TIMESTAMP_ISO8601:timestamp}))\s*%{HOSTNAME:observer.hostname}.*?%{SFW2_LOG_PREFIX:suse.firewall.log_prefix}\s*%{IPTABLES}`, +} diff --git a/vendor/github.com/elastic/go-grok/patterns/haproxy.go b/vendor/github.com/elastic/go-grok/patterns/haproxy.go new file mode 100644 index 00000000000..04a60da566c --- /dev/null +++ b/vendor/github.com/elastic/go-grok/patterns/haproxy.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package patterns + +var HAProxy map[string]string = map[string]string{ + "HAPROXYTIME": `\b%{HOUR}:%{MINUTE}(:%{SECOND})?\b`, + "HAPROXYDATE": `%{MONTHDAY}/%{MONTH}/%{YEAR}:%{HAPROXYTIME}.%{INT}`, + "HAPROXYCAPTUREDREQUESTHEADERS": `(?:-|%{DATA:haproxy.http.request.captured_headers})`, + "HAPROXYCAPTUREDRESPONSEHEADERS": `(?:-|%{DATA:haproxy.http.response.captured_headers})`, + "HAPROXYURI": `(?:%{URIPROTO:url.scheme}://)?(?:%{USER:url.username}(?::[^@]*)?@)?(?:%{IPORHOST:url.domain}(?::%{POSINT:url.port:int})?)?(?:%{URIPATH:url.path}(?:\?%{URIQUERY:url.query})?)?`, + "HAPROXYHTTPREQUESTLINE": `(?:|(?:%{WORD:http.request.method} %{HAPROXYURI:url.original}(?: HTTP/%{NUMBER:http.version})?))`, + "HAPROXYHTTPBASE": `%{IP:source.address}:%{INT:source.port:int} \[%{HAPROXYDATE:haproxy.request_date}\] %{NOTSPACE:haproxy.frontend_name} %{NOTSPACE:haproxy.backend_name}/(?:|%{NOTSPACE:haproxy.server_name}) (?:-1|%{INT:haproxy.http.request.time_wait_ms:int})/(?:-1|%{INT:haproxy.total_waiting_time_ms:int})/(?:-1|%{INT:haproxy.connection_wait_time_ms:int})/(?:-1|%{INT:haproxy.http.request.time_wait_without_data_ms:int})/%{NOTSPACE:haproxy.total_time_ms} %{INT:http.response.status_code:int} %{INT:source.bytes:long} (?:-|%{DATA:haproxy.http.request.captured_cookie}) (?:-|%{DATA:haproxy.http.response.captured_cookie}) %{NOTSPACE:haproxy.termination_state} %{INT:haproxy.connections.active:int}/%{INT:haproxy.connections.frontend:int}/%{INT:haproxy.connections.backend:int}/%{INT:haproxy.connections.server:int}/%{INT:haproxy.connections.retries:int} %{INT:haproxy.server_queue:int}/%{INT:haproxy.backend_queue:int}(?: \{%{HAPROXYCAPTUREDREQUESTHEADERS}\}(?: \{%{HAPROXYCAPTUREDRESPONSEHEADERS}\})?)?(?: "%{HAPROXYHTTPREQUESTLINE}"?)?`, + "HAPROXYHTTP": `(?:%{SYSLOGTIMESTAMP:timestamp}|%{TIMESTAMP_ISO8601:timestamp}) %{IPORHOST:host.name} %{SYSLOGPROG}: %{HAPROXYHTTPBASE}`, + "HAPROXYTCP": `(?:%{SYSLOGTIMESTAMP:timestamp}|%{TIMESTAMP_ISO8601:timestamp}) %{IPORHOST:host.name} %{SYSLOGPROG}: %{IP:source.address}:%{INT:source.port:int} \[%{HAPROXYDATE:haproxy.request_date}\] %{NOTSPACE:haproxy.frontend_name} %{NOTSPACE:haproxy.backend_name}/(?:|%{NOTSPACE:haproxy.server_name}) (?:-1|%{INT:haproxy.total_waiting_time_ms:int})/(?:-1|%{INT:haproxy.connection_wait_time_ms:int})/%{NOTSPACE:haproxy.total_time_ms} %{INT:source.bytes:long} %{NOTSPACE:haproxy.termination_state} %{INT:haproxy.connections.active:int}/%{INT:haproxy.connections.frontend:int}/%{INT:haproxy.connections.backend:int}/%{INT:haproxy.connections.server:int}/%{INT:haproxy.connections.retries:int} %{INT:haproxy.server_queue:int}/%{INT:haproxy.backend_queue:int}`, +} diff --git a/vendor/github.com/elastic/go-grok/patterns/httpd.go b/vendor/github.com/elastic/go-grok/patterns/httpd.go new file mode 100644 index 00000000000..eff84457ab3 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/patterns/httpd.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package patterns + +var Httpd map[string]string = map[string]string{ + "HTTPDUSER": `%{EMAILADDRESS}|%{USER}`, + "HTTPDERROR_DATE": `%{DAY} %{MONTH} %{MONTHDAY} %{TIME} %{YEAR}`, + + "HTTPD_COMMONLOG": `%{IPORHOST:source.address} (?:-|%{HTTPDUSER:apache.access.user.identity}) (?:-|%{HTTPDUSER:user.name}) \[%{HTTPDATE:timestamp}\] "(?:%{WORD:http.request.method} %{NOTSPACE:url.original}(?: HTTP/%{NUMBER:http.version})?|%{DATA})" (?:-|%{INT:http.response.status_code:int}) (?:-|%{INT:http.response.body.size:long})`, + "HTTPD_COMBINEDLOG": `%{HTTPD_COMMONLOG} "(?:-|%{DATA:http.request.referrer})" "(?:-|%{DATA:user_agent.original})"`, + + "HTTPD20_ERRORLOG": `\[%{HTTPDERROR_DATE:timestamp}\] \[%{LOGLEVEL:log.level}\] (?:\[client %{IPORHOST:source.address}\] )?%{GREEDYDATA:message}`, + "HTTPD24_ERRORLOG": `\[%{HTTPDERROR_DATE:timestamp}\] \[(?:%{WORD:apache.error.module})?:%{LOGLEVEL:log.level}\] \[pid %{POSINT:process.pid:long}(:tid %{INT:process.thread.id:int})?\](?: \(%{POSINT:apache.error.proxy.error.code}\)?%{DATA:apache.error.proxy.error.message}:)?(?: \[client %{IPORHOST:source.address}(?::%{POSINT:source.port:int})?\])?(?: %{DATA:error.code}:)? %{GREEDYDATA:message}`, + "HTTPD_ERRORLOG": `%{HTTPD20_ERRORLOG}|%{HTTPD24_ERRORLOG}`, + + "COMMONAPACHELOG": `%{HTTPD_COMMONLOG}`, + "COMBINEDAPACHELOG": `%{HTTPD_COMBINEDLOG}`, +} diff --git a/vendor/github.com/elastic/go-grok/patterns/java.go b/vendor/github.com/elastic/go-grok/patterns/java.go new file mode 100644 index 00000000000..12e5f2f0056 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/patterns/java.go @@ -0,0 +1,46 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package patterns + +var Java map[string]string = map[string]string{ + "JAVACLASS": `(?:[a-zA-Z$_][a-zA-Z$_0-9]*\.)*[a-zA-Z$_][a-zA-Z$_0-9]*`, + "JAVAFILE": `(?:[a-zA-Z$_0-9. -]+)`, + "JAVAMETHOD": `(?:(<(?:cl)?init>)|[a-zA-Z$_][a-zA-Z$_0-9]*)`, + "JAVASTACKTRACEPART": `%{SPACE}at %{JAVACLASS:java.log.origin.class.name}\.%{JAVAMETHOD:log.origin.function}\(%{JAVAFILE:log.origin.file.name}(?::%{INT:log.origin.file.line:int})?\)`, + "JAVATHREAD": `(?:[A-Z]{2}-Processor[\d]+)`, + "JAVALOGMESSAGE": `(?:.*)`, + + "CATALINA7_DATESTAMP": `%{MONTH} %{MONTHDAY}, %{YEAR} %{HOUR}:%{MINUTE}:%{SECOND} (?:AM|PM)`, + "CATALINA7_LOG": `%{CATALINA7_DATESTAMP:timestamp} %{JAVACLASS:java.log.origin.class.name}(?: %{JAVAMETHOD:log.origin.function})?\s*(?:%{LOGLEVEL:log.level}:)? %{JAVALOGMESSAGE:message}`, + + "CATALINA8_DATESTAMP": `%{MONTHDAY}-%{MONTH}-%{YEAR} %{HOUR}:%{MINUTE}:%{SECOND}`, + "CATALINA8_LOG": `%{CATALINA8_DATESTAMP:timestamp} %{LOGLEVEL:log.level} \[%{DATA:java.log.origin.thread.name}\] %{JAVACLASS:java.log.origin.class.name}\.(?:%{JAVAMETHOD:log.origin.function})? %{JAVALOGMESSAGE:message}`, + + "CATALINA_DATESTAMP": `(?:%{CATALINA8_DATESTAMP})|(?:%{CATALINA7_DATESTAMP})`, + "CATALINALOG": `(?:%{CATALINA8_LOG})|(?:%{CATALINA7_LOG})`, + + "TOMCAT7_LOG": `%{CATALINA7_LOG}`, + "TOMCAT8_LOG": `%{CATALINA8_LOG}`, + + "TOMCATLEGACY_DATESTAMP": `%{YEAR}-%{MONTHNUM}-%{MONTHDAY} %{HOUR}:%{MINUTE}:%{SECOND}(?: %{ISO8601_TIMEZONE})?`, + "TOMCATLEGACY_LOG": `%{TOMCATLEGACY_DATESTAMP:timestamp} \| %{LOGLEVEL:log.level} \| %{JAVACLASS:java.log.origin.class.name} - %{JAVALOGMESSAGE:message}`, + + "TOMCAT_DATESTAMP": `(?:%{CATALINA8_DATESTAMP})|(?:%{CATALINA7_DATESTAMP})|(?:%{TOMCATLEGACY_DATESTAMP})`, + + "TOMCATLOG": `(?:%{TOMCAT8_LOG})|(?:%{TOMCAT7_LOG})|(?:%{TOMCATLEGACY_LOG})`, +} diff --git a/vendor/github.com/elastic/go-grok/patterns/junos.go b/vendor/github.com/elastic/go-grok/patterns/junos.go new file mode 100644 index 00000000000..c244e156803 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/patterns/junos.go @@ -0,0 +1,27 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package patterns + +var Junos map[string]string = map[string]string{ + "RT_FLOW_TAG": `(?:RT_FLOW_SESSION_CREATE|RT_FLOW_SESSION_CLOSE|RT_FLOW_SESSION_DENY)`, + "RT_FLOW_EVENT": `%{RT_FLOW_TAG}`, + + "RT_FLOW1": `%{RT_FLOW_TAG:juniper.srx.tag}: %{GREEDYDATA:juniper.srx.reason}: %{IP:source.address}/%{INT:source.port:int}->%{IP:destination.address}/%{INT:destination.port:int} %{DATA:juniper.srx.service_name} %{IP:source.nat.ip}/%{INT:source.nat.port:int}->%{IP:destination.nat.ip}/%{INT:destination.nat.port:int} (?:(?:None)|(?:%{DATA:juniper.srx.src_nat_rule_name})) (?:(?:None)|(?:%{DATA:juniper.srx.dst_nat_rule_name})) %{INT:network.iana_number} %{DATA:rule.name} %{DATA:observer.ingress.zone} %{DATA:observer.egress.zone} %{INT:juniper.srx.session_id} \d+\(%{INT:source.bytes:long}\) \d+\(%{INT:destination.bytes:long}\) %{INT:juniper.srx.elapsed_time:int} .*`, + "RT_FLOW2": `%{RT_FLOW_TAG:juniper.srx.tag}: session created %{IP:source.address}/%{INT:source.port:int}->%{IP:destination.address}/%{INT:destination.port:int} %{DATA:juniper.srx.service_name} %{IP:source.nat.ip}/%{INT:source.nat.port:int}->%{IP:destination.nat.ip}/%{INT:destination.nat.port:int} (?:(?:None)|(?:%{DATA:juniper.srx.src_nat_rule_name})) (?:(?:None)|(?:%{DATA:juniper.srx.dst_nat_rule_name})) %{INT:network.iana_number} %{DATA:rule.name} %{DATA:observer.ingress.zone} %{DATA:observer.egress.zone} %{INT:juniper.srx.session_id} .*`, + "RT_FLOW3": `%{RT_FLOW_TAG:juniper.srx.tag}: session denied %{IP:source.address}/%{INT:source.port:int}->%{IP:destination.address}/%{INT:destination.port:int} %{DATA:juniper.srx.service_name} %{INT:network.iana_number}\(\d\) %{DATA:rule.name} %{DATA:observer.ingress.zone} %{DATA:observer.egress.zone} (.*)?`, +} diff --git a/vendor/github.com/elastic/go-grok/patterns/maven.go b/vendor/github.com/elastic/go-grok/patterns/maven.go new file mode 100644 index 00000000000..dab64d28ddf --- /dev/null +++ b/vendor/github.com/elastic/go-grok/patterns/maven.go @@ -0,0 +1,22 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package patterns + +var Maven map[string]string = map[string]string{ + "MAVEN_VERSION": `(?:(\d+)\.)?(?:(\d+)\.)?(\*|\d+)(?:[.-](RELEASE|SNAPSHOT))?`, +} diff --git a/vendor/github.com/elastic/go-grok/patterns/mcollective.go b/vendor/github.com/elastic/go-grok/patterns/mcollective.go new file mode 100644 index 00000000000..d1a9600b5bd --- /dev/null +++ b/vendor/github.com/elastic/go-grok/patterns/mcollective.go @@ -0,0 +1,23 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package patterns + +var MCollective map[string]string = map[string]string{ + "MCOLLECTIVE": `., \[%{TIMESTAMP_ISO8601:timestamp} #%{POSINT:process.pid:int}\]%{SPACE}%{LOGLEVEL:log.level}`, + "MCOLLECTIVEAUDIT": `%{TIMESTAMP_ISO8601:timestamp}:`, +} diff --git a/vendor/github.com/elastic/go-grok/patterns/mongodb.go b/vendor/github.com/elastic/go-grok/patterns/mongodb.go new file mode 100644 index 00000000000..a23d6e1ea50 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/patterns/mongodb.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package patterns + +var MongoDB map[string]string = map[string]string{ + "MONGO_LOG": `%{SYSLOGTIMESTAMP:timestamp} \[%{WORD:db.mongodb.component}\] %{GREEDYDATA:message}`, + "MONGO_QUERY_CONTENT": `(.*?)`, + "MONGO_QUERY": `\{ %{MONGO_QUERY_CONTENT:MONGO_QUERY} \} ntoreturn:`, + "MONGO_SLOWQUERY": `%{WORD:db.mongodb.profile.op} %{MONGO_WORDDASH:db.mongodb.database}\.%{MONGO_WORDDASH:db.mongodb.collection} %{WORD}: \{ %{MONGO_QUERY_CONTENT:db.mongodb.query.original} \} ntoreturn:%{NONNEGINT:db.mongodb.profile.ntoreturn:int} ntoskip:%{NONNEGINT:db.mongodb.profile.ntoskip:int} nscanned:%{NONNEGINT:db.mongodb.profile.nscanned:int}.*? nreturned:%{NONNEGINT:db.mongodb.profile.nreturned:int}.*? %{INT:db.mongodb.profile.duration:int}ms`, + "MONGO_WORDDASH": `\b[\w-]+\b`, + "MONGO3_SEVERITY": `\w`, + "MONGO3_COMPONENT": `%{WORD}`, + "MONGO3_LOG": `%{TIMESTAMP_ISO8601:timestamp} %{MONGO3_SEVERITY:log.level} (?:-|%{MONGO3_COMPONENT:db.mongodb.component})%{SPACE}(?:\[%{DATA:db.mongodb.context}\])? %{GREEDYDATA:message}`, +} diff --git a/vendor/github.com/elastic/go-grok/patterns/postgresql.go b/vendor/github.com/elastic/go-grok/patterns/postgresql.go new file mode 100644 index 00000000000..5efa6c19151 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/patterns/postgresql.go @@ -0,0 +1,22 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package patterns + +var PostgreSQL map[string]string = map[string]string{ + "POSTGRESQL": "%{DATESTAMP:timestamp} %{TZ:event.timezone} %{DATA:user.name} %{GREEDYDATA:postgresql.log.connection_id} %{POSINT:process.pid:int}", +} diff --git a/vendor/github.com/elastic/go-grok/patterns/rails.go b/vendor/github.com/elastic/go-grok/patterns/rails.go new file mode 100644 index 00000000000..4ddd67972e4 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/patterns/rails.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package patterns + +var Rails map[string]string = map[string]string{ + "RUUID": `\S{32}`, + "RCONTROLLER": `(?P[^#]+)#(?P\w+)`, + + "RAILS3HEAD": `(?m)Started %{WORD:http.request.method} "%{URIPATHPARAM:url.original}" for %{IPORHOST:source.address} at (?%{YEAR}-%{MONTHNUM}-%{MONTHDAY} %{HOUR}:%{MINUTE}:%{SECOND} %{ISO8601_TIMEZONE})`, + "RPROCESSING": `\W*Processing by %{RCONTROLLER} as (?P\S+)(?:\W*Parameters: {%{DATA:rails.request.params}}\W*)?`, + "RAILS3FOOT": `Completed %{POSINT:http.response.status_code:int}%{DATA} in %{NUMBER:rails.request.duration.total:float}ms %{RAILS3PROFILE}%{GREEDYDATA}`, + "RAILS3PROFILE": `(?:\(Views: %{NUMBER:rails.request.duration.view:float}ms \| ActiveRecord: %{NUMBER:rails.request.duration.active_record:float}ms|\(ActiveRecord: %{NUMBER:rails.request.duration.active_record:float}ms)?`, + + "RAILS3": `%{RAILS3HEAD}(?:%{RPROCESSING})?(?P(?:%{DATA}\n)*)(?:%{RAILS3FOOT})?`, +} diff --git a/vendor/github.com/elastic/go-grok/patterns/redis.go b/vendor/github.com/elastic/go-grok/patterns/redis.go new file mode 100644 index 00000000000..5e4b119142f --- /dev/null +++ b/vendor/github.com/elastic/go-grok/patterns/redis.go @@ -0,0 +1,24 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package patterns + +var Redis map[string]string = map[string]string{ + "REDISTIMESTAMP": `%{MONTHDAY} %{MONTH} %{TIME}`, + "REDISLOG": `\[%{POSINT:process.pid:int}\] %{REDISTIMESTAMP:timestamp} \*`, + "REDISMONLOG": `%{NUMBER:timestamp} \[%{INT:redis.database.id} %{IP:client.address}:%{POSINT:client.port:int}\] "%{WORD:redis.command.name}"\s?%{GREEDYDATA:redis.command.args}`, +} diff --git a/vendor/github.com/elastic/go-grok/patterns/ruby.go b/vendor/github.com/elastic/go-grok/patterns/ruby.go new file mode 100644 index 00000000000..4d27bd8a4f5 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/patterns/ruby.go @@ -0,0 +1,23 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package patterns + +var Ruby map[string]string = map[string]string{ + "RUBY_LOGLEVEL": `(?:DEBUG|FATAL|ERROR|WARN|INFO)`, + "RUBY_LOGGER": `[DFEWI], \[%{TIMESTAMP_ISO8601:timestamp} #%{POSINT:process.pid:int}\] *%{RUBY_LOGLEVEL:log.level} -- +%{DATA:process.command}: %{GREEDYDATA:message}`, +} diff --git a/vendor/github.com/elastic/go-grok/patterns/squid.go b/vendor/github.com/elastic/go-grok/patterns/squid.go new file mode 100644 index 00000000000..d1d50657896 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/patterns/squid.go @@ -0,0 +1,23 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package patterns + +var Squid map[string]string = map[string]string{ + "SQUID3_STATUS": `(?:%{POSINT:http.response.status_code:int}|0|000)`, + "SQUID3": `%{NUMBER:timestamp}\s+%{NUMBER:squid.request.duration:int}\s%{IP:source.address}\s%{WORD:event.action}/%{SQUID3_STATUS}\s%{INT:http.response.bytes:long}\s%{WORD:http.request.method}\s%{NOTSPACE:url.original}\s(?:-|%{NOTSPACE:user.name})\s%{WORD:squid.hierarchy_code}/(?:-|%{IPORHOST:destination.address})\s(?:-|%{NOTSPACE:http.response.mime_type})`, +} diff --git a/vendor/github.com/elastic/go-grok/patterns/syslog.go b/vendor/github.com/elastic/go-grok/patterns/syslog.go new file mode 100644 index 00000000000..ba84266a7c7 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/patterns/syslog.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package patterns + +var Syslog map[string]string = map[string]string{ + "SYSLOG5424PRINTASCII": `[!-~]+`, + + "SYSLOGBASE2": `(?:%{SYSLOGTIMESTAMP:timestamp}|%{TIMESTAMP_ISO8601:timestamp})(?: %{SYSLOGFACILITY})?(?: %{SYSLOGHOST:host.name})?(?: %{SYSLOGPROG}:)?`, + "SYSLOGPAMSESSION": `%{SYSLOGBASE} (%{GREEDYDATA:message})%{WORD:system.auth.pam.module}\(%{DATA:system.auth.pam.origin}\): session %{WORD:system.auth.pam.session_state} for user %{USERNAME:user.name}(?: by %{GREEDYDATA})?`, + + "CRON_ACTION": `[A-Z ]+`, + "CRONLOG": `%{SYSLOGBASE} \(%{USER:user.name}\) %{CRON_ACTION:system.cron.action} \(%{DATA:message}\)`, + + "SYSLOGLINE": `%{SYSLOGBASE2} %{GREEDYDATA:message}`, + + "SYSLOG5424PRI": `<%{NONNEGINT:log.syslog.priority:int}>`, + "SYSLOG5424SD": `\[%{DATA}\]+`, + "SYSLOG5424BASE": `%{SYSLOG5424PRI}%{NONNEGINT:system.syslog.version} +(?:-|%{TIMESTAMP_ISO8601:timestamp}) +(?:-|%{IPORHOST:host.name}) +(?:-|%{SYSLOG5424PRINTASCII:process.command}) +(?:-|%{POSINT:process.pid:int}) +(?:-|%{SYSLOG5424PRINTASCII:event.code}) +(?:-|%{SYSLOG5424SD:system.syslog.structured_data})?`, + + "SYSLOG5424LINE": `%{SYSLOG5424BASE} +%{GREEDYDATA:message}`, +} diff --git a/vendor/github.com/elastic/lunes/.editorconfig b/vendor/github.com/elastic/lunes/.editorconfig new file mode 100644 index 00000000000..0f7c6831979 --- /dev/null +++ b/vendor/github.com/elastic/lunes/.editorconfig @@ -0,0 +1,8 @@ +# See: http://editorconfig.org +root = true + +[*] +charset = utf-8 +end_of_line = lf +insert_final_newline = true +trim_trailing_whitespace = true \ No newline at end of file diff --git a/vendor/github.com/elastic/lunes/.gitignore b/vendor/github.com/elastic/lunes/.gitignore new file mode 100644 index 00000000000..91d5320da4f --- /dev/null +++ b/vendor/github.com/elastic/lunes/.gitignore @@ -0,0 +1,25 @@ +# If you prefer the allow list template instead of the deny list, see community template: +# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore +# +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +# Go workspace file +go.work + +# Idea +.idea +build diff --git a/vendor/github.com/elastic/lunes/.go-version b/vendor/github.com/elastic/lunes/.go-version new file mode 100644 index 00000000000..da9594fd66f --- /dev/null +++ b/vendor/github.com/elastic/lunes/.go-version @@ -0,0 +1 @@ +1.22.5 diff --git a/vendor/github.com/elastic/lunes/.golangci.yml b/vendor/github.com/elastic/lunes/.golangci.yml new file mode 100644 index 00000000000..8c2b5e60d07 --- /dev/null +++ b/vendor/github.com/elastic/lunes/.golangci.yml @@ -0,0 +1,114 @@ +# options for analysis running +run: + # timeout for analysis, e.g. 30s, 5m, default is 1m + timeout: 1m + +issues: + # Maximum count of issues with the same text. + # Set to 0 to disable. + # Default: 3 + max-same-issues: 0 + # Maximum issues count per one linter. + # Set to 0 to disable. + # Default: 50 + max-issues-per-linter: 0 + +output: + sort-results: true + +# Find the whole list here https://golangci-lint.run/usage/linters/ +linters: + disable-all: true + enable: + - errcheck # checking for unchecked errors in go programs + - errorlint # errorlint is a linter for that can be used to find code that will cause problems with the error wrapping scheme introduced in Go 1.13. + - forbidigo # forbids identifiers matched by reg exps + - gomoddirectives # manage the use of 'replace', 'retract', and 'excludes' directives in go.mod. + - gosimple # linter for Go source code that specializes in simplifying a code + - misspell # finds commonly misspelled English words in comments + - nakedret # finds naked returns in functions greater than a specified function length + - nolintlint # reports ill-formed or insufficient nolint directives + - staticcheck # Staticcheck is a go vet on steroids, applying a ton of static analysis checks + - stylecheck # a replacement for golint + - unused # checks Go code for unused constants, variables, functions and types + - govet # Vet examines Go source code and reports suspicious constructs, such as Printf calls whose arguments do not align with the format string + - ineffassign # detects when assignments to existing variables are not used + - typecheck # Like the front-end of a Go compiler, parses and type-checks Go code + - unused # Finds unused global variables and constants + - asciicheck # simple linter to check that your code does not contain non-ASCII identifiers + - bodyclose # checks whether HTTP response body is closed successfully + - durationcheck # check for two durations multiplied together + - exportloopref # checks for pointers to enclosing loop variables + - goimports # Goimports does everything that gofmt does. Additionally, it checks unused imports + - gosec # inspects source code for security problems + - importas # enforces consistent import aliases + - nilerr # finds the code that returns nil even if it checks that the error is not nil. + - noctx # noctx finds sending http request without context.Context + - unconvert # Remove unnecessary type conversions + - wastedassign # wastedassign finds wasted assignment statements. + - gomodguard # check for blocked dependencies + +# all available settings of specific linters +linters-settings: + errcheck: + # report about not checking of errors in type assertions: `a := b.(MyStruct)`; + check-type-assertions: true + # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`. + check-blank: false + # List of functions to exclude from checking, where each entry is a single function to exclude. + # See https://github.com/kisielk/errcheck#excluding-functions for details. + exclude-functions: + - (mapstr.M).Delete # Only returns ErrKeyNotFound, can safely be ignored. + - (mapstr.M).Put # Can only fail on type conversions, usually safe to ignore. + + errorlint: + # Check whether fmt.Errorf uses the %w verb for formatting errors. See the readme for caveats + errorf: true + # Check for plain type assertions and type switches + asserts: true + # Check for plain error comparisons + comparison: true + + forbidigo: + # Forbid the following identifiers + forbid: + - fmt.Print.* # too much log noise + # Exclude godoc examples from forbidigo checks. Default is true. + exclude-godoc-examples: true + + nakedret: + # make an issue if func has more lines of code than this setting and it has naked returns; default is 30 + max-func-lines: 0 + + nolintlint: + # Enable to ensure that nolint directives are all used. Default is true. + allow-unused: false + # Exclude following linters from requiring an explanation. Default is []. + allow-no-explanation: [] + # Enable to require an explanation of nonzero length after each nolint directive. Default is false. + require-explanation: true + # Enable to require nolint directives to mention the specific linter being suppressed. Default is false. + require-specific: true + + gomodguard: + blocked: + # List of blocked modules. + modules: + # Blocked module. + - github.com/pkg/errors: + # Recommended modules that should be used instead. (Optional) + recommendations: + - errors + - fmt + reason: "This package is deprecated, use fmt.Errorf with %%w instead" + - github.com/elastic/beats/v7: + reason: "There must be no Beats dependency" + + staticcheck: + # https://staticcheck.io/docs/options#checks + checks: ["all"] + + stylecheck: + # https://staticcheck.io/docs/options#checks + checks: ["all"] + diff --git a/vendor/github.com/elastic/lunes/CHANGELOG.md b/vendor/github.com/elastic/lunes/CHANGELOG.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/LICENSE b/vendor/github.com/elastic/lunes/LICENSE similarity index 100% rename from vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/LICENSE rename to vendor/github.com/elastic/lunes/LICENSE diff --git a/vendor/github.com/elastic/lunes/NOTICE.txt b/vendor/github.com/elastic/lunes/NOTICE.txt new file mode 100644 index 00000000000..2a1c861765d --- /dev/null +++ b/vendor/github.com/elastic/lunes/NOTICE.txt @@ -0,0 +1,1991 @@ +Lunes +Copyright 2022-2024 Elasticsearch BV + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +================================================================================ +Third party libraries used by the Lunes Libraries: +================================================================================ + + +-------------------------------------------------------------------------------- +Dependency : github.com/elastic/go-licenser +Version: v0.4.2 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/elastic/go-licenser@v0.4.2/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2018 Elasticsearch B.V. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/magefile/mage +Version: v1.15.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/magefile/mage@v1.15.0/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2017 the Mage authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : go.elastic.co/go-licence-detector +Version: v0.6.1 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/go.elastic.co/go-licence-detector@v0.6.1/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/tools +Version: v0.23.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/tools@v0.23.0/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + + +================================================================================ +Indirect dependencies + + +-------------------------------------------------------------------------------- +Dependency : github.com/cyphar/filepath-securejoin +Version: v0.2.5 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/cyphar/filepath-securejoin@v0.2.5/LICENSE: + +Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. +Copyright (C) 2017 SUSE LLC. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/davecgh/go-spew +Version: v1.1.1 +Licence type (autodetected): ISC +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/davecgh/go-spew@v1.1.1/LICENSE: + +ISC License + +Copyright (c) 2012-2016 Dave Collins + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/gobuffalo/here +Version: v0.6.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/gobuffalo/here@v0.6.0/LICENSE: + +The MIT License (MIT) + +Copyright (c) 2019 Mark Bates + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/google/go-cmp +Version: v0.6.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/google/go-cmp@v0.6.0/LICENSE: + +Copyright (c) 2017 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/google/licenseclassifier +Version: v0.0.0-20200402202327-879cb1424de0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/google/licenseclassifier@v0.0.0-20200402202327-879cb1424de0/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/karrick/godirwalk +Version: v1.15.6 +Licence type (autodetected): BSD-2-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/karrick/godirwalk@v1.15.6/LICENSE: + +BSD 2-Clause License + +Copyright (c) 2017, Karrick McDermott +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/kr/pretty +Version: v0.1.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/kr/pretty@v0.1.0/License: + +The MIT License (MIT) + +Copyright 2012 Keith Rarick + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/kr/pty +Version: v1.1.1 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/kr/pty@v1.1.1/License: + +Copyright (c) 2011 Keith Rarick + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, +sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall +be included in all copies or substantial portions of the +Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY +KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS +OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/kr/text +Version: v0.1.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/kr/text@v0.1.0/License: + +Copyright 2012 Keith Rarick + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/markbates/pkger +Version: v0.17.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/markbates/pkger@v0.17.0/LICENSE: + +The MIT License (MIT) + +Copyright (c) 2019 Mark Bates + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/pmezard/go-difflib +Version: v1.0.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/pmezard/go-difflib@v1.0.0/LICENSE: + +Copyright (c) 2013, Patrick Mezard +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + The names of its contributors may not be used to endorse or promote +products derived from this software without specific prior written +permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/sergi/go-diff +Version: v1.1.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/sergi/go-diff@v1.1.0/LICENSE: + +Copyright (c) 2012-2016 The go-diff Authors. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + + + +-------------------------------------------------------------------------------- +Dependency : github.com/stretchr/objx +Version: v0.1.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/stretchr/objx@v0.1.0/LICENSE: + +The MIT License + +Copyright (c) 2014 Stretchr, Inc. +Copyright (c) 2017-2018 objx contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/stretchr/testify +Version: v1.6.1 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/stretchr/testify@v1.6.1/LICENSE: + +MIT License + +Copyright (c) 2012-2020 Mat Ryer, Tyler Bunnell and contributors. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/yuin/goldmark +Version: v1.4.13 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/yuin/goldmark@v1.4.13/LICENSE: + +MIT License + +Copyright (c) 2019 Yusuke Inuzuka + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/crypto +Version: v0.13.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/crypto@v0.13.0/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/mod +Version: v0.19.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/mod@v0.19.0/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/net +Version: v0.27.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/net@v0.27.0/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/sync +Version: v0.7.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/sync@v0.7.0/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/sys +Version: v0.22.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/sys@v0.22.0/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/telemetry +Version: v0.0.0-20240521205824-bda55230c457 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/telemetry@v0.0.0-20240521205824-bda55230c457/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/term +Version: v0.12.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/term@v0.12.0/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/text +Version: v0.13.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/text@v0.13.0/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/xerrors +Version: v0.0.0-20190717185122-a985d3407aa7 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/xerrors@v0.0.0-20190717185122-a985d3407aa7/LICENSE: + +Copyright (c) 2019 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : gopkg.in/check.v1 +Version: v1.0.0-20190902080502-41f04d3bba15 +Licence type (autodetected): BSD-2-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/gopkg.in/check.v1@v1.0.0-20190902080502-41f04d3bba15/LICENSE: + +Gocheck - A rich testing framework for Go + +Copyright (c) 2010-2013 Gustavo Niemeyer + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : gopkg.in/yaml.v2 +Version: v2.2.7 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/gopkg.in/yaml.v2@v2.2.7/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : gopkg.in/yaml.v3 +Version: v3.0.0-20200313102051-9f266ea9e77c +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/gopkg.in/yaml.v3@v3.0.0-20200313102051-9f266ea9e77c/LICENSE: + + +This project is covered by two different licenses: MIT and Apache. + +#### MIT License #### + +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original MIT license, with the additional +copyright staring in 2011 when the project was ported over: + + apic.go emitterc.go parserc.go readerc.go scannerc.go + writerc.go yamlh.go yamlprivateh.go + +Copyright (c) 2006-2010 Kirill Simonov +Copyright (c) 2006-2011 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +### Apache License ### + +All the remaining project files are covered by the Apache license: + +Copyright (c) 2011-2019 Canonical Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + + diff --git a/vendor/github.com/elastic/lunes/README.md b/vendor/github.com/elastic/lunes/README.md new file mode 100644 index 00000000000..fce081713ab --- /dev/null +++ b/vendor/github.com/elastic/lunes/README.md @@ -0,0 +1,144 @@ +# Lunes + +--- + +**Lunes** is a [Go](http://golang.org) library for parsing localized time strings into `time.Time`. + +There's no intention to replace the standard `time` package parsing functions, instead, it acts as wrapper +translating the provided value to English before invoking the `time.Parse` and `time.ParseInLocation`. + +It currently supports almost all [CLDR](https://cldr.unicode.org/) core locales (+900 including drafts), +being limited to the **gregorian** calendars. + +Once the official Go i18n features for time parsing are ready, it should be replaced. + +## Usage + +#### Parse + +```go +// it's like time.Parse, but with an additional locale parameter to perform the value translation. +// the language argument must be a well-formed BCP 47 language tag, e.g ("en", "en-US") and +// a known locale. If no data is found for the language, it returns ErrUnsupportedLocale. +// If the given locale does not support any layout element specified on the layout argument, +// it results in an ErrUnsupportedLayoutElem error. On the other hand, if the value does not +// match the layout, an ErrLayoutMismatch is returned. +t, err := lunes.Parse("Monday Jan _2 2006 15:04:05", "lunes oct 27 1988 11:53:29", lunes.LocaleEsES) + +// parse in specific time zones. +t, err := lunes.ParseInLocation("Monday Jan _2 2006 15:04:05", "lunes oct 27 1988 11:53:29", time.UTC, lunes.LocaleEsES) +``` + +```go +// creates a new generic locale for the given BCP 47 language tag, using the default CLDR +// gregorian calendars data of the specified language. If the locale is unknown and/or no +// default data is found, it returns ErrUnsupportedLocale. +locale, err := lunes.NewDefaultLocale(lunes.LocaleEsES) + +// ParseWithLocale has a better performance for multiple parse operations, as it does not +// need to look up the locale data in each iteration. +for _, val := range valuesToParse { + t, err := lunes.ParseWithLocale("Monday Jan _2 2006 15:04:05", val, locale) +} +``` + +#### Translate + +```go +// translates the value, without parsing it to time.Time. The language argument must be a +// well-formed BCP 47 language tag, e.g ("en", "en-US") and a known locale. If no data is +// found for the language, it returns ErrUnsupportedLocale. +// If the given locale does not support any layout element specified on the layout argument, +// it results in an ErrUnsupportedLayoutElem error. On the other hand, if the values does not +// match the layout, an ErrLayoutMismatch is returned. +// For the following example, it results in: Friday Jan 27 11:53:29. +str, err := lunes.Translate("Monday Jan _2 15:04:05", "viernes ene 27 11:53:29", lunes.LocaleEsES) + +// the translated value is meant to be used with the time package functions +t, err := time.Parse("Monday Jan _2 15:04:05", str) +``` + +#### Custom Locales + +A `lunes.Locale` provides a collection of time layouts values in a specific language. +It is used to provide a map between the time layout elements in foreign language to English. +In oder to use custom locales, the following functions must be implemented: + +```go +// Language represents a BCP 47 tag, specifying this locale language. +Language() string + +// LongDayNames returns the long day names translations for the week days. +// It must be sorted, starting from Sunday to Saturday, and contains all 7 elements, +// even if one or more days are empty. If this locale does not support this format, +// it should return an empty slice. +LongDayNames() []string + +// ShortDayNames returns the short day names translations for the week days. +// It must be sorted, starting from Sunday to Saturday, and contains all 7 elements, +// even if one or more days are empty. If this locale does not support this format, +// it should return an empty slice. +ShortDayNames() []string + +// LongMonthNames returns the long day names translations for the months names. +// It must be sorted, starting from January to December, and contains all 12 elements, +// even if one or more months are empty. If this locale does not support this format, +// it should return an empty slice. +LongMonthNames() []string + +// ShortMonthNames returns the short day names translations for the months names. +// It must be sorted, starting from January to December, and contains all 12 elements, +// even if one or more months are empty. If this locale does not support this format, +// it should return an empty slice. +ShortMonthNames() []string + +// DayPeriods returns the periods of day translations for the AM and PM abbreviations. +// It must be sorted, starting from AM to PM, and contains both elements, even if one +// of them is empty. If this locale does not support this format, it should return an +// empty slice. +DayPeriods() []string +``` + +Custom locales can be used with the `lunes.ParseWithLocale`, `lunes.ParseInLocationWithLocale`, and `lunes.TranslateWithLocale` +functions: + +```go +locale := &CustomLocale{} + +// It's like Parse, but instead of receiving a BCP 47 language tag argument, it receives a lunes.Locale +t, err := lunes.ParseWithLocale("Monday Jan _2 2006 15:04:05", "lunes oct 27 1988 11:53:29", locale) + +// It's like ParseInLocation, but instead of receiving a BCP 47 language tag argument, it receives a lunes.Locale +t, err := lunes.ParseInLocationWithLocale("Monday Jan _2 2006 15:04:05", "lunes oct 27 1988 11:53:29", time.UTC, locale) + +// It's like Translate, but instead of receiving a BCP 47 language tag argument, it receives a lunes.Locale +t, err := lunes.TranslateWithLocale("Monday Jan _2 2006 15:04:05", "lunes oct 27 1988 11:53:29", locale) +``` + +## Benchmarks + +Comparing to [github.com/goodsign/monday](https://github.com/goodsign/monday) + +``` +BenchmarkTranslate-10 3850832 303.2 ns/op 220 B/op 5 allocs/op +BenchmarkTranslateWithLocale-10 5149981 235.1 ns/op 76 B/op 4 allocs/op +BenchmarkParse-10 2811612 428.1 ns/op 220 B/op 5 allocs/op +BenchmarkParseInLocation-10 2792997 439.2 ns/op 220 B/op 5 allocs/op +BenchmarkParseWithLocale-10 3268903 362.7 ns/op 76 B/op 4 allocs/op +BenchmarkParseInLocationWithLocale-10 2974732 390.2 ns/op 76 B/op 4 allocs/op +BenchmarkParseMonday-10 213014 5584 ns/op 3754 B/op 117 allocs/op +BenchmarkParseInLocationMonday-10 211826 5593 ns/op 3754 B/op 117 allocs/op +``` + +### Usage notes + +- It currently supports the following time layout replacements: + - Short days names (`Mon`) + - Long days names (`Monday`) + - Short month names (`Jan`) + - Long month names (`January`) + - Day periods (`PM`) +- Translations are auto-generated, and it might be inconsistent depending on the CLDR locale [stage](https://cldr.unicode.org/index/process). +- A few locales does not support (or are missing) translations for specific layout elements (short/long days/month names or day periods), in that case, + an ErrUnsupportedLayoutElem will be reported. + diff --git a/vendor/github.com/elastic/lunes/catalog-info.yaml b/vendor/github.com/elastic/lunes/catalog-info.yaml new file mode 100644 index 00000000000..5fbdfb98a76 --- /dev/null +++ b/vendor/github.com/elastic/lunes/catalog-info.yaml @@ -0,0 +1,57 @@ +# Declare a Backstage Component that represents your application. +--- +# yaml-language-server: $schema=https://json.schemastore.org/catalog-info.json +apiVersion: backstage.io/v1alpha1 +kind: Component +metadata: + name: lunes + description: Lunes is a Go library for parsing localized time strings into time.Time + +spec: + type: library + owner: group:logstash + system: platform-ingest + lifecycle: production + +--- +# yaml-language-server: $schema=https://gist.githubusercontent.com/elasticmachine/988b80dae436cafea07d9a4a460a011d/raw/e57ee3bed7a6f73077a3f55a38e76e40ec87a7cf/rre.schema.json +apiVersion: backstage.io/v1alpha1 +kind: Resource +metadata: + name: buildkite-pipeline-lunes + description: Buildkite Pipeline for lunes + links: + - title: Pipeline + url: https://buildkite.com/elastic/lunes + +spec: + type: buildkite-pipeline + owner: group:logstash + system: buildkite + implementation: + apiVersion: buildkite.elastic.dev/v1 + kind: Pipeline + metadata: + name: lunes + description: Buildkite pipeline for the lunes library + spec: + branch_configuration: "main" + repository: elastic/lunes + pipeline_file: ".buildkite/pipeline.yml" + maximum_timeout_in_minutes: 60 + provider_settings: + build_pull_request_forks: false + build_pull_requests: true # requires filter_enabled and filter_condition settings as below when used with buildkite-pr-bot + build_tags: true + filter_enabled: true + filter_condition: >- + build.pull_request.id == null || (build.creator.name == 'elasticmachine' && build.pull_request.id != null) + cancel_intermediate_builds: true + cancel_intermediate_builds_branch_filter: '!main' + skip_intermediate_builds: true + skip_intermediate_builds_branch_filter: '!main' + teams: + logstash: + access_level: MANAGE_BUILD_AND_READ + everyone: + access_level: READ_ONLY diff --git a/vendor/github.com/elastic/lunes/dev-tools/mage/check.go b/vendor/github.com/elastic/lunes/dev-tools/mage/check.go new file mode 100644 index 00000000000..75d856ccab8 --- /dev/null +++ b/vendor/github.com/elastic/lunes/dev-tools/mage/check.go @@ -0,0 +1,49 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package mage + +import ( + "fmt" + + "github.com/magefile/mage/sh" +) + +//nolint:forbidigo // allow print +func CheckNoChanges() error { + fmt.Println(">> fmt - go run") + err := sh.RunV("go", "mod", "tidy", "-v") + if err != nil { + return fmt.Errorf("failed running go mod tidy, please fix the issues reported: %w", err) + } + fmt.Println(">> fmt - git diff") + err = sh.RunV("git", "diff") + if err != nil { + return fmt.Errorf("failed running git diff, please fix the issues reported: %w", err) + } + fmt.Println(">> fmt - git update-index") + err = sh.RunV("git", "update-index", "--refresh") + if err != nil { + return fmt.Errorf("failed running git update-index --refresh, please fix the issues reported: %w", err) + } + fmt.Println(">> fmt - git diff-index") + err = sh.RunV("git", "diff-index", "--exit-code", "HEAD", "--") + if err != nil { + return fmt.Errorf("failed running go mod tidy, please fix the issues reported: %w", err) + } + return nil +} diff --git a/vendor/github.com/elastic/lunes/dev-tools/mage/deps.go b/vendor/github.com/elastic/lunes/dev-tools/mage/deps.go new file mode 100644 index 00000000000..16822bf434f --- /dev/null +++ b/vendor/github.com/elastic/lunes/dev-tools/mage/deps.go @@ -0,0 +1,43 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package mage + +import ( + "fmt" + + "github.com/magefile/mage/mg" + + "github.com/elastic/lunes/dev-tools/mage/gotool" +) + +// Deps contains targets related to checking dependencies +type Deps mg.Namespace + +// CheckModuleTidy checks if `go mod tidy` was run before the last commit. +func (Deps) CheckModuleTidy() error { + err := gotool.Mod.Tidy() + if err != nil { + return err + } + err = assertUnchanged("go.mod") + if err != nil { + return fmt.Errorf("`go mod tidy` was not called before the last commit: %w", err) + } + + return nil +} diff --git a/vendor/github.com/elastic/lunes/dev-tools/mage/fmt.go b/vendor/github.com/elastic/lunes/dev-tools/mage/fmt.go new file mode 100644 index 00000000000..210b2e98cfe --- /dev/null +++ b/vendor/github.com/elastic/lunes/dev-tools/mage/fmt.go @@ -0,0 +1,107 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package mage + +import ( + "fmt" + "io/fs" + "os" + "path/filepath" + + "github.com/magefile/mage/mg" + "github.com/magefile/mage/sh" + + "github.com/elastic/lunes/dev-tools/mage/gotool" +) + +const ( + // GoImportsImportPath controls the import path used to install goimports. + GoImportsImportPath = "golang.org/x/tools/cmd/goimports" + + // GoImportsLocalPrefix is a string prefix matching imports that should be + // grouped after third-party packages. + GoImportsLocalPrefix = "github.com/elastic" +) + +// Linter contains targets related to linting the Go code +type GoImports mg.Namespace + +// Run executes goimports against all .go files in and below the CWD. +func (GoImports) Run() error { + mg.Deps(GoImports.Install) + goFiles, err := FindFilesRecursive(func(path string, _ os.FileInfo) bool { + return filepath.Ext(path) == ".go" + }) + if err != nil { + return err + } + if len(goFiles) == 0 { + return nil + } + + fmt.Println(">> fmt - goimports: Formatting Go code") //nolint:forbidigo // it's a mage target + args := append( + []string{"-local", GoImportsLocalPrefix, "-l", "-w"}, + goFiles..., + ) + + return sh.RunV("goimports", args...) +} + +func (GoImports) Install() error { + err := gotool.Install(gotool.Install.Package(filepath.Join(GoImportsImportPath))) + if err != nil { + return fmt.Errorf("cannot install GoImports: %w", err) + } + + return nil +} + +// FindFilesRecursive recursively traverses from the CWD and invokes the given +// match function on each regular file to determine if the given path should be +// returned as a match. It ignores files in .git directories. +func FindFilesRecursive(match func(path string, info os.FileInfo) bool) ([]string, error) { + var matches []string + walkDir := func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + + // Don't look for files in git directories + if d.IsDir() && filepath.Base(path) == ".git" { + return filepath.SkipDir + } + + info, err := d.Info() + if err != nil { + return fmt.Errorf("canot get FileInfo: %w", err) + } + if !info.Mode().IsRegular() { + // continue + return nil + } + + if match(filepath.ToSlash(path), info) { + matches = append(matches, path) + } + return nil + } + + err := filepath.WalkDir(".", fs.WalkDirFunc(walkDir)) + return matches, err +} diff --git a/vendor/github.com/elastic/lunes/dev-tools/mage/gotool/get.go b/vendor/github.com/elastic/lunes/dev-tools/mage/gotool/get.go new file mode 100644 index 00000000000..9770cc1a1b5 --- /dev/null +++ b/vendor/github.com/elastic/lunes/dev-tools/mage/gotool/get.go @@ -0,0 +1,43 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package gotool + +type goGet func(opts ...ArgOpt) error +type goDownload func(opts ...ArgOpt) error + +// Get runs `go get` and provides optionals for adding command line arguments. +var Get goGet = runGoGet + +func runGoGet(opts ...ArgOpt) error { + args := buildArgs(opts) + return runVGo("get", args) +} + +func (goGet) Download() ArgOpt { return flagBoolIf("-d", true) } +func (goGet) Update() ArgOpt { return flagBoolIf("-u", true) } +func (goGet) Package(pkg string) ArgOpt { return posArg(pkg) } + +// Download runs `go download` and provides optionals for adding command line arguments. +var Download goDownload = runGoDownload + +func runGoDownload(opts ...ArgOpt) error { + args := buildArgs(opts) + return runVGo("download", args) +} + +func (goDownload) All() ArgOpt { return posArg("all") } diff --git a/vendor/github.com/elastic/lunes/dev-tools/mage/gotool/go.go b/vendor/github.com/elastic/lunes/dev-tools/mage/gotool/go.go new file mode 100644 index 00000000000..b7a833cfe4f --- /dev/null +++ b/vendor/github.com/elastic/lunes/dev-tools/mage/gotool/go.go @@ -0,0 +1,343 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package gotool + +import ( + "fmt" + "os" + "strings" + + "github.com/magefile/mage/mg" + "github.com/magefile/mage/sh" +) + +// Args holds parameters, environment variables and flag information used to +// pass to the go tool. +type Args struct { + extra map[string]string // extra flags one can pass to the command + env map[string]string + flags map[string][]string + pos []string +} + +// ArgOpt is a functional option adding info to Args once executed. +type ArgOpt func(args *Args) + +type goInstall func(opts ...ArgOpt) error + +// Install runs `go install` and provides optionals for adding command line arguments. +var Install goInstall = runGoInstall + +func runGoInstall(opts ...ArgOpt) error { + args := buildArgs(opts) + return runVGo("install", args) +} + +func (goInstall) Package(pkg string) ArgOpt { return posArg(pkg) } +func (goInstall) Vendored() ArgOpt { return flagArg("-mod", "vendor") } + +type goTest func(opts ...ArgOpt) error + +// Test runs `go test` and provides optionals for adding command line arguments. +var Test goTest = runGoTest + +// GetModuleName returns the name of the module. +func GetModuleName() (string, error) { + lines, err := getLines(callGo(nil, "list", "-m")) + if err != nil { + return "", err + } + if len(lines) != 1 { + return "", fmt.Errorf("unexpected number of lines") + } + return lines[0], nil +} + +// ListProjectPackages lists all packages in the current project +func ListProjectPackages() ([]string, error) { + return ListPackages("./...") +} + +// ListPackages calls `go list` for every package spec given. +func ListPackages(pkgs ...string) ([]string, error) { + return getLines(callGo(nil, "list", pkgs...)) +} + +// ListDeps calls `go list -dep` for every package spec given. +func ListDeps(pkg string) ([]string, error) { + const tmpl = `{{if not .Standard}}{{.ImportPath}}{{end}}` + + return getLines(callGo(nil, "list", "-deps", "-f", tmpl, pkg)) +} + +func ListDepsForNotice() (string, error) { + return callGo(nil, "list", "-json", "-m", "all") +} + +// ListDepsLocation calls `go list -dep` for every package spec given. +func ListDepsLocation(pkg string) (map[string]string, error) { + const tmpl = `{{if not .Standard}}{{.ImportPath}};{{.Dir}}{{end}}` + + lines, err := getLines(callGo(nil, "list", "-deps", "-f", tmpl, pkg)) + if err != nil { + return nil, err + } + deps := make(map[string]string, len(lines)) + for _, l := range lines { + parts := strings.Split(l, ";") + if len(parts) != 2 { + return nil, fmt.Errorf("invalid number of parts") + } + deps[parts[0]] = parts[1] + } + return deps, nil +} + +// ListTestFiles lists all go and cgo test files available in a package. +func ListTestFiles(pkg string) ([]string, error) { + const tmpl = `{{ range .TestGoFiles }}{{ printf "%s\n" . }}{{ end }}` + + `{{ range .XTestGoFiles }}{{ printf "%s\n" . }}{{ end }}` + + return getLines(callGo(nil, "list", "-f", tmpl, pkg)) +} + +// ListModuleCacheDir returns the module cache directory containing +// the specified module. If the module does not exist in the cache, +// an error will be returned. +func ListModuleCacheDir(pkg string) (string, error) { + return listModuleDir(pkg, false) +} + +// ListModuleVendorDir returns the vendor directory containing the +// specified module. If the module has not been vendored, an error +// will be returned. +func ListModuleVendorDir(pkg string) (string, error) { + return listModuleDir(pkg, true) +} + +func listModuleDir(pkg string, vendor bool) (string, error) { + env := map[string]string{ + // Make sure GOFLAGS does not influence behaviour. + "GOFLAGS": "", + } + args := []string{"-m", "-f", "{{.Dir}}"} + if vendor { + args = append(args, "-mod=vendor") + } + args = append(args, pkg) + lines, err := getLines(callGo(env, "list", args...)) + if err != nil { + return "", err + } + if n := len(lines); n != 1 { + return "", fmt.Errorf("expected 1 line, got %d while looking for %s", n, pkg) + } + return lines[0], nil +} + +// HasTests returns true if the given package contains test files. +func HasTests(pkg string) (bool, error) { + files, err := ListTestFiles(pkg) + if err != nil { + return false, err + } + return len(files) > 0, nil +} + +func (goTest) WithCoverage(to string) ArgOpt { + return combine(flagArg("-cover", ""), flagArgIf("-test.coverprofile", to)) +} +func (goTest) Short(b bool) ArgOpt { return flagBoolIf("-test.short", b) } +func (goTest) Use(bin string) ArgOpt { return extraArgIf("use", bin) } +func (goTest) OS(os string) ArgOpt { return envArgIf("GOOS", os) } +func (goTest) ARCH(arch string) ArgOpt { return envArgIf("GOARCH", arch) } +func (goTest) Create() ArgOpt { return flagArg("-c", "") } +func (goTest) Out(path string) ArgOpt { return flagArg("-o", path) } +func (goTest) Package(path string) ArgOpt { return posArg(path) } +func (goTest) Verbose() ArgOpt { return flagArg("-test.v", "") } +func runGoTest(opts ...ArgOpt) error { + args := buildArgs(opts) + if bin := args.Val("use"); bin != "" { + flags := map[string][]string{} + for k, v := range args.flags { + if strings.HasPrefix(k, "-test.") { + flags[k] = v + } + } + + useArgs := &Args{} + *useArgs = *args + useArgs.flags = flags + + _, err := sh.Exec(useArgs.env, os.Stdout, os.Stderr, bin, useArgs.build()...) + return err + } + + return runVGo("test", args) +} + +func getLines(out string, err error) ([]string, error) { + if err != nil { + return nil, err + } + + lines := strings.Split(out, "\n") + res := lines[:0] + for _, line := range lines { + line = strings.TrimSpace(line) + if len(line) > 0 { + res = append(res, line) + } + } + + return res, nil +} + +func callGo(env map[string]string, cmd string, opts ...string) (string, error) { //nolint:unparam // not always receives list + args := []string{cmd} + args = append(args, opts...) + return sh.OutputWith(env, mg.GoCmd(), args...) +} + +func runVGo(cmd string, args *Args) error { + return execGoWith(func(env map[string]string, cmd string, args ...string) error { + _, err := sh.Exec(env, os.Stdout, os.Stderr, cmd, args...) + return err + }, cmd, args) +} + +func execGoWith( + fn func(map[string]string, string, ...string) error, + cmd string, args *Args, +) error { + cliArgs := []string{cmd} + cliArgs = append(cliArgs, args.build()...) + return fn(args.env, mg.GoCmd(), cliArgs...) +} + +func posArg(value string) ArgOpt { + return func(a *Args) { a.Add(value) } +} + +func extraArg(k, v string) ArgOpt { + return func(a *Args) { a.Extra(k, v) } +} + +func extraArgIf(k, v string) ArgOpt { + if v == "" { + return nil + } + return extraArg(k, v) +} + +func envArg(k, v string) ArgOpt { + return func(a *Args) { a.Env(k, v) } +} + +func envArgIf(k, v string) ArgOpt { + if v == "" { + return nil + } + return envArg(k, v) +} + +func flagArg(flag, value string) ArgOpt { + return func(a *Args) { a.Flag(flag, value) } +} + +func flagArgIf(flag, value string) ArgOpt { + if value == "" { + return nil + } + return flagArg(flag, value) +} + +func flagBoolIf(flag string, b bool) ArgOpt { + if b { + return flagArg(flag, "") + } + return nil +} + +func combine(opts ...ArgOpt) ArgOpt { + return func(a *Args) { + for _, opt := range opts { + if opt != nil { + opt(a) + } + } + } +} + +func buildArgs(opts []ArgOpt) *Args { + a := &Args{} + combine(opts...)(a) + return a +} + +// Extra sets a special k/v pair to be interpreted by the execution function. +func (a *Args) Extra(k, v string) { + if a.extra == nil { + a.extra = map[string]string{} + } + a.extra[k] = v +} + +// Val returns a special functions value for a given key. +func (a *Args) Val(k string) string { + if a.extra == nil { + return "" + } + return a.extra[k] +} + +// Env sets an environmant variable to be passed to the child process on exec. +func (a *Args) Env(k, v string) { + if a.env == nil { + a.env = map[string]string{} + } + a.env[k] = v +} + +// Flag adds a flag to be passed to the child process on exec. +func (a *Args) Flag(flag, value string) { + if a.flags == nil { + a.flags = map[string][]string{} + } + a.flags[flag] = append(a.flags[flag], value) +} + +// Add adds a positional argument to be passed to the child process on exec. +func (a *Args) Add(p string) { + a.pos = append(a.pos, p) +} + +func (a *Args) build() []string { + args := make([]string, 0, 2*len(a.flags)+len(a.pos)) + for k, values := range a.flags { + for _, v := range values { + args = append(args, k) + if v != "" { + args = append(args, v) + } + } + } + + args = append(args, a.pos...) + return args +} diff --git a/vendor/github.com/elastic/lunes/dev-tools/mage/gotool/licenser.go b/vendor/github.com/elastic/lunes/dev-tools/mage/gotool/licenser.go new file mode 100644 index 00000000000..67978ab8218 --- /dev/null +++ b/vendor/github.com/elastic/lunes/dev-tools/mage/gotool/licenser.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package gotool + +import "github.com/magefile/mage/sh" + +type goLicenser func(opts ...ArgOpt) error + +// Licenser runs `go-licenser` and provides optionals for adding command line arguments. +var Licenser goLicenser = runGoLicenser + +func runGoLicenser(opts ...ArgOpt) error { + args := buildArgs(opts).build() + return sh.RunV("go-licenser", args...) +} + +func (goLicenser) Check() ArgOpt { return flagBoolIf("-d", true) } +func (goLicenser) License(license string) ArgOpt { return flagArgIf("-license", license) } +func (goLicenser) Exclude(path string) ArgOpt { return flagArgIf("-exclude", path) } +func (goLicenser) Path(path string) ArgOpt { return posArg(path) } diff --git a/vendor/github.com/elastic/lunes/dev-tools/mage/gotool/modules.go b/vendor/github.com/elastic/lunes/dev-tools/mage/gotool/modules.go new file mode 100644 index 00000000000..d7a880756b7 --- /dev/null +++ b/vendor/github.com/elastic/lunes/dev-tools/mage/gotool/modules.go @@ -0,0 +1,64 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package gotool + +// Mod is the command go mod. +var Mod = goMod{ + Download: modCommand{"download"}.run, + Init: modCommand{"init"}.run, + Tidy: modCommand{"tidy"}.run, + Verify: modCommand{"verify"}.run, + Vendor: modCommand{"vendor"}.run, +} + +type modCommand struct { + method string +} + +func (cmd modCommand) run(opts ...ArgOpt) error { + o := make([]ArgOpt, len(opts)+1) + o[0] = posArg(cmd.method) + for i, opt := range opts { + o[i+1] = opt + } + args := buildArgs(o) + return runVGo("mod", args) +} + +type goMod struct { + Download modDownload + Init modInit + Tidy modTidy + Verify modVerify + Vendor modVendor +} + +// modDownload cleans the go.mod file +type modDownload func(opts ...ArgOpt) error + +// modInit initializes a new go module in folder. +type modInit func(opts ...ArgOpt) error + +// modTidy cleans the go.mod file +type modTidy func(opts ...ArgOpt) error + +// modVerify check that deps have the expected content. +type modVerify func(opts ...ArgOpt) error + +// modVendor downloads and copies dependencies under the folder vendor. +type modVendor func(opts ...ArgOpt) error diff --git a/vendor/github.com/elastic/lunes/dev-tools/mage/gotool/noticer.go b/vendor/github.com/elastic/lunes/dev-tools/mage/gotool/noticer.go new file mode 100644 index 00000000000..26669c3f74f --- /dev/null +++ b/vendor/github.com/elastic/lunes/dev-tools/mage/gotool/noticer.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package gotool + +import "github.com/magefile/mage/sh" + +type goNoticeGenerator func(opts ...ArgOpt) error + +// NoticeGenerator runs `go-license-detector` and provides optionals for adding command line arguments. +var NoticeGenerator goNoticeGenerator = runGoNoticeGenerator + +func runGoNoticeGenerator(opts ...ArgOpt) error { + args := buildArgs(opts).build() + return sh.RunV("go-licence-detector", args...) +} + +func (goNoticeGenerator) Dependencies(path string) ArgOpt { return flagArg("-in", path) } +func (goNoticeGenerator) IncludeIndirect() ArgOpt { return flagBoolIf("-includeIndirect", true) } +func (goNoticeGenerator) Rules(path string) ArgOpt { return flagArg("-rules", path) } +func (goNoticeGenerator) Overrides(path string) ArgOpt { return flagArg("-overrides", path) } +func (goNoticeGenerator) NoticeTemplate(path string) ArgOpt { return flagArg("-noticeTemplate", path) } +func (goNoticeGenerator) NoticeOutput(path string) ArgOpt { return flagArg("-noticeOut", path) } +func (goNoticeGenerator) DepsOutput(path string) ArgOpt { return flagArg("-depsOut", path) } diff --git a/vendor/github.com/elastic/lunes/dev-tools/mage/install.go b/vendor/github.com/elastic/lunes/dev-tools/mage/install.go new file mode 100644 index 00000000000..0e30803f093 --- /dev/null +++ b/vendor/github.com/elastic/lunes/dev-tools/mage/install.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package mage + +import ( + "github.com/elastic/lunes/dev-tools/mage/gotool" +) + +var ( + // GoLicenserImportPath controls the import path used to install go-licenser. + GoLicenserImportPath = "github.com/elastic/go-licenser" + + // GoNoticeGeneratorImportPath controls the import path used to install go-licence-detector. + GoNoticeGeneratorImportPath = "go.elastic.co/go-licence-detector" +) + +// InstallGoLicenser target installs go-licenser +func InstallGoLicenser() error { + return gotool.Install( + gotool.Install.Package(GoLicenserImportPath), + ) +} + +// InstallGoLicenser target installs go-licenser +func InstallGoNoticeGen() error { + return gotool.Install( + gotool.Install.Package(GoNoticeGeneratorImportPath), + ) +} diff --git a/vendor/github.com/elastic/lunes/dev-tools/mage/linter.go b/vendor/github.com/elastic/lunes/dev-tools/mage/linter.go new file mode 100644 index 00000000000..d8bed5a245e --- /dev/null +++ b/vendor/github.com/elastic/lunes/dev-tools/mage/linter.go @@ -0,0 +1,175 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package mage + +import ( + "errors" + "fmt" + "io" + "log" + "net/http" + "os" + "path/filepath" + + "github.com/magefile/mage/mg" + "github.com/magefile/mage/sh" +) + +const ( + linterVersion = "v1.55.2" + linterInstallURL = "https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh" +) + +var ( + linterConfigFilename = filepath.Join(".", ".golangci.yml") + linterInstallDir = filepath.Join(".", "build") + linterInstallFile = filepath.Join(linterInstallDir, "install-golang-ci.sh") + linterBinaryFile = filepath.Join(linterInstallDir, linterVersion, "golangci-lint") +) + +// Linter contains targets related to linting the Go code +type Linter mg.Namespace + +// CheckConfig makes sure that the `.golangci.yml` does not have uncommitted changes +func (Linter) CheckConfig() error { + err := assertUnchanged(linterConfigFilename) + if err != nil { + return fmt.Errorf("linter configuration has uncommitted changes: %w", err) + } + return nil +} + +// Install installs golangci-lint (https://golangci-lint.run) to `./build` +// using the official installation script downloaded from GitHub. +// If the linter binary already exists does nothing. +func (Linter) Install() error { + return install(false) +} + +// ForceInstall force installs the linter regardless of whether it exists or not. +func (Linter) ForceInstall() error { + return install(true) +} + +func install(force bool) error { + dirPath := filepath.Dir(linterBinaryFile) + err := os.MkdirAll(dirPath, 0700) + if err != nil { + return fmt.Errorf("failed to create path %q: %w", dirPath, err) + } + + _, err = os.Stat(linterBinaryFile) + if !force && err == nil { + log.Println("The linter has been already installed, skipping...") + return nil + } + if err != nil && !errors.Is(err, os.ErrNotExist) { + return fmt.Errorf("failed check if file %q exists: %w", linterBinaryFile, err) + } + + log.Println("Preparing the installation script file...") + + installScript, err := os.OpenFile(linterInstallFile, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0700) + if err != nil { + return fmt.Errorf("failed to create file %q: %w", linterInstallFile, err) + } + defer installScript.Close() + + log.Println("Downloading the linter installation script...") + //nolint:noctx // valid use since there is no context + resp, err := http.Get(linterInstallURL) + if err != nil { + return fmt.Errorf("cannot download the linter installation script from %q: %w", linterInstallURL, err) + } + defer resp.Body.Close() + + lr := io.LimitReader(resp.Body, 1024*100) // not more than 100 KB, just to be safe + _, err = io.Copy(installScript, lr) + if err != nil { + return fmt.Errorf("failed to finish downloading the linter installation script: %w", err) + } + + err = installScript.Close() // otherwise we cannot run the script + if err != nil { + return fmt.Errorf("failed to close file %q: %w", linterInstallFile, err) + } + + binaryDir := filepath.Dir(linterBinaryFile) + err = os.MkdirAll(binaryDir, 0700) + if err != nil { + return fmt.Errorf("cannot create path %q: %w", binaryDir, err) + } + + // there must be no space after `-b`, otherwise the script does not work correctly ¯\_(ツ)_/¯ + return sh.Run(linterInstallFile, "-b"+binaryDir, linterVersion) +} + +// All runs the linter against the entire codebase +func (l Linter) All() error { + mg.Deps(l.Install, l.CheckConfig) + return runLinter() +} + +// Version Prints the version of the linter in use. +func (l Linter) Version() error { + mg.Deps(l.Install) + return runLinter("--version") +} + +// LastChange runs the linter against all files changed since the fork point from `main`. +// If the current branch is `main` then runs against the files changed in the last commit. +func (l Linter) LastChange() error { + mg.Deps(l.Install, l.CheckConfig) + + // get current branch name + branch, err := sh.Output("git", "rev-parse", "--abbrev-ref", "HEAD") + if err != nil { + return fmt.Errorf("failed to get the current branch: %w", err) + } + + // the linter is supposed to support linting changed diffs only but, + // for some reason, it simply does not work - does not output any + // results without linting the whole files, so we have to use `--whole-files` + // which can lead to some frustration from developers who would like to + // fix a single line in an existing codebase and the linter would force them + // into fixing all linting issues in the whole file instead + + if branch == "main" { + // files changed in the last commit + return runLinter("--new-from-rev=HEAD~", "--whole-files") + } + + return runLinter("--new-from-rev=origin/main", "--whole-files") +} + +// runLinter runs the linter passing the `mage -v` (verbose mode) and given arguments. +// Also redirects linter's output to the `stderr` instead of discarding it. +func runLinter(runFlags ...string) error { + var args []string + + if mg.Verbose() { + args = append(args, "-v") + } + + args = append(args, "run") + args = append(args, runFlags...) + args = append(args, "-c", linterConfigFilename) + args = append(args, "./...") + + return runWithStdErr(linterBinaryFile, args...) +} diff --git a/vendor/github.com/elastic/lunes/dev-tools/mage/mage.go b/vendor/github.com/elastic/lunes/dev-tools/mage/mage.go new file mode 100644 index 00000000000..05841a0274e --- /dev/null +++ b/vendor/github.com/elastic/lunes/dev-tools/mage/mage.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package mage + +import ( + "fmt" + "os" + + "github.com/magefile/mage/sh" +) + +func assertUnchanged(path string) error { + err := sh.Run("git", "diff", "--exit-code", path) + if err != nil { + return fmt.Errorf("failed to assert the unchanged file %q: %w", path, err) + } + + return nil +} + +// runWithStdErr runs a command redirecting its stderr to the console instead of discarding it +func runWithStdErr(command string, args ...string) error { + _, err := sh.Exec(nil, os.Stdout, os.Stderr, command, args...) + return err +} diff --git a/vendor/github.com/elastic/lunes/dev-tools/mage/notice.go b/vendor/github.com/elastic/lunes/dev-tools/mage/notice.go new file mode 100644 index 00000000000..19dad5bbefe --- /dev/null +++ b/vendor/github.com/elastic/lunes/dev-tools/mage/notice.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package mage + +import ( + "fmt" + "os" + + "github.com/magefile/mage/mg" + + "github.com/elastic/lunes/dev-tools/mage/gotool" +) + +func GenerateNotice(overrides, rules, noticeTemplate string) error { + mg.Deps(InstallGoNoticeGen, Deps.CheckModuleTidy) + + err := gotool.Mod.Download(gotool.Download.All()) + if err != nil { + return fmt.Errorf("error while downloading dependencies: %w", err) + } + + // Ensure the go.mod file is left unchanged after go mod download all runs. + // go mod download will modify go.sum in a way that conflicts with go mod tidy. + // https://github.com/golang/go/issues/43994#issuecomment-770053099 + defer gotool.Mod.Tidy() //nolint:errcheck // No value in handling this error. + + out, _ := gotool.ListDepsForNotice() + depsFile, _ := os.CreateTemp("", "depsout") + defer os.Remove(depsFile.Name()) + _, _ = depsFile.Write([]byte(out)) + depsFile.Close() + + generator := gotool.NoticeGenerator + return generator( + generator.Dependencies(depsFile.Name()), + generator.IncludeIndirect(), + generator.Overrides(overrides), + generator.Rules(rules), + generator.NoticeTemplate(noticeTemplate), + generator.NoticeOutput("NOTICE.txt"), + ) +} diff --git a/vendor/github.com/elastic/lunes/locale.go b/vendor/github.com/elastic/lunes/locale.go new file mode 100644 index 00000000000..8b6e1e9d367 --- /dev/null +++ b/vendor/github.com/elastic/lunes/locale.go @@ -0,0 +1,113 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:generate go run generator.go + +package lunes + +import ( + "fmt" +) + +// A Locale provides a collection of time layouts values in a specific language. +// It is used to provide a map between the time layout elements in foreign language to English. +type Locale interface { + // Language represents a BCP 47 tag, specifying this locale language. + Language() string + + // LongDayNames returns the long day names translations for the week days. + // It must be sorted, starting from Sunday to Saturday, and contains all 7 elements, + // even if one or more days are empty. If this locale does not support this format, + // it should return an empty slice. + LongDayNames() []string + + // ShortDayNames returns the short day names translations for the week days. + // It must be sorted, starting from Sunday to Saturday, and contains all 7 elements, + // even if one or more days are empty. If this locale does not support this format, + // it should return an empty slice. + ShortDayNames() []string + + // LongMonthNames returns the long day names translations for the months names. + // It must be sorted, starting from January to December, and contains all 12 elements, + // even if one or more months are empty. If this locale does not support this format, + // it should return an empty slice. + LongMonthNames() []string + + // ShortMonthNames returns the short day names translations for the months names. + // It must be sorted, starting from January to December, and contains all 12 elements, + // even if one or more months are empty. If this locale does not support this format, + // it should return an empty slice. + ShortMonthNames() []string + + // DayPeriods returns the periods of day translations for the AM and PM abbreviations. + // It must be sorted, starting from AM to PM, and contains both elements, even if one + // of them is empty. If this locale does not support this format, it should return an + // empty slice. + DayPeriods() []string +} + +type genericLocale struct { + lang string + table [5][]string +} + +func (g *genericLocale) LongDayNames() []string { + return g.table[longDayNamesField] +} + +func (g *genericLocale) ShortDayNames() []string { + return g.table[shortDayNamesField] +} + +func (g *genericLocale) LongMonthNames() []string { + return g.table[longMonthNamesField] +} + +func (g *genericLocale) ShortMonthNames() []string { + return g.table[shortMonthNamesField] +} + +func (g *genericLocale) DayPeriods() []string { + return g.table[dayPeriodsField] +} + +func (g *genericLocale) Language() string { + return g.lang +} + +// ErrUnsupportedLocale indicates that a provided language.Tag is not supported by the +// default CLDR generic locales. +type ErrUnsupportedLocale struct { + lang string +} + +func (e *ErrUnsupportedLocale) Error() string { + return fmt.Sprintf("locale %s not supported", e.lang) +} + +// NewDefaultLocale creates a new generic locale for the given BCP 47 language tag, using +// the default CLDR gregorian calendars data of the specified language. +// If the language is unknown and no default data is found, it returns ErrUnsupportedLocale. +func NewDefaultLocale(lang string) (Locale, error) { + table, ok := tables[lang] + if !ok { + return nil, &ErrUnsupportedLocale{lang} + } + + locale := genericLocale{lang: lang, table: table} + return &locale, nil +} diff --git a/vendor/github.com/elastic/lunes/lunes.go b/vendor/github.com/elastic/lunes/lunes.go new file mode 100644 index 00000000000..8074b2a3d86 --- /dev/null +++ b/vendor/github.com/elastic/lunes/lunes.go @@ -0,0 +1,482 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package lunes + +import ( + "errors" + "fmt" + "strings" + "time" + "unicode" +) + +var longDayNamesStd = []string{ + "Sunday", + "Monday", + "Tuesday", + "Wednesday", + "Thursday", + "Friday", + "Saturday", +} + +var shortDayNamesStd = []string{ + "Sun", + "Mon", + "Tue", + "Wed", + "Thu", + "Fri", + "Sat", +} + +var shortMonthNamesStd = []string{ + "Jan", + "Feb", + "Mar", + "Apr", + "May", + "Jun", + "Jul", + "Aug", + "Sep", + "Oct", + "Nov", + "Dec", +} + +var longMonthNamesStd = []string{ + "January", + "February", + "March", + "April", + "May", + "June", + "July", + "August", + "September", + "October", + "November", + "December", +} + +var dayPeriodsStdUpper = []string{ + "AM", + "PM", +} + +var dayPeriodsStdLower = []string{ + "am", + "pm", +} + +// Parse parses a formatted string in foreign language and returns the [time.Time] value +// it represents. See the documentation for the constant called [time.Layout] to see how to +// represent the format. +// +// After translating the foreign language value to English, it gets the time value by +// calling the Go standard [time.Parse] function. +// +// The language argument must be a well-formed BCP 47 language tag, e.g ("en", "en-US") and +// a known locale. If no data is found for the language, it returns ErrUnsupportedLocale. +// If the given language does not support any [time.Layout] element specified on the layout +// argument, it results in an ErrUnsupportedLayoutElem error. On the other hand, if the value +// does not match the layout, an ErrLayoutMismatch is returned. See the documentation for +// [time.Parse] for other possible errors it might return. +// +// To execute several parses for the same locale, use [ParseWithLocale] as it performs better. +func Parse(layout string, value string, lang string) (time.Time, error) { + locale, err := NewDefaultLocale(lang) + if err != nil { + return time.Time{}, err + } + + return ParseWithLocale(layout, value, locale) +} + +// ParseWithLocale is like Parse, but instead of receiving a BCP 47 language tag argument, +// it receives a built [lunes.Locale], avoiding looking up existing data in each operation +// and allowing extensibility. +func ParseWithLocale(layout string, value string, locale Locale) (time.Time, error) { + pv, err := TranslateWithLocale(layout, value, locale) + if err != nil { + return time.Time{}, err + } + + return time.Parse(layout, pv) +} + +// ParseInLocation is like Parse, but it interprets the time as in the given location. +// In addition to the [Parse] errors, it might return any [time.ParseInLocation] possible errors. +// To execute several parses for the same locale, use [ParseInLocationWithLocale] as it performs better. +func ParseInLocation(layout string, value string, lang string, location *time.Location) (time.Time, error) { + locale, err := NewDefaultLocale(lang) + if err != nil { + return time.Time{}, err + } + + return ParseInLocationWithLocale(layout, value, location, locale) +} + +// ParseInLocationWithLocale is like ParseInLocation, but instead of receiving a BCP 47 +// language tag argument, it receives a built [lunes.Locale], avoiding looking up existing +// data in each operation and allowing extensibility. +func ParseInLocationWithLocale(layout string, value string, location *time.Location, locale Locale) (time.Time, error) { + pv, err := TranslateWithLocale(layout, value, locale) + if err != nil { + return time.Time{}, err + } + + return time.ParseInLocation(layout, pv, location) +} + +// Translate parses a localized textual time value from the provided locale to English. +// It replaces short and long week days names, months names, and day periods by their +// equivalents. The first argument must be a native Go time layout. The second argument +// must be parseable using the format string (layout) provided as the first argument, +// but in the foreign language. The language argument must be a well-formed BCP 47 +// language tag, e.g ("en", "en-US") and a known locale. If no data is found for the +// language, it returns ErrUnsupportedLocale. +// +// If the given locale does not support a layout element specified on the layout argument, +// it results in an ErrUnsupportedLayoutElem error. On the other hand, if the value does +// not match the layout, an ErrLayoutMismatch is returned. +// +// This function is meant to return a value that can be used with the Go standard +// [time.Parse] or [time.ParseInLocation] methods. Although it maintains value's empty +// spaces that are not present in the layout string, it might drop them in the future, +// as they are ignored by both standard time parsings functions. +func Translate(layout string, value string, lang string) (string, error) { + locale, err := NewDefaultLocale(lang) + if err != nil { + return value, err + } + + return TranslateWithLocale(layout, value, locale) +} + +// TranslateWithLocale is like Translate, but instead of receiving a BCP 47 language tag +// argument, it receives a built [lunes.Locale], avoiding looking up existing data in each +// operation and allowing extensibility. +func TranslateWithLocale(layout string, value string, locale Locale) (string, error) { + var err error + var sb strings.Builder + var layoutOffset, valueOffset int + + sb.Grow(len(layout) + 32) + + for layoutOffset < len(layout) { + written := false + var lookupTab, stdTab []string + + switch c := int(layout[layoutOffset]); c { + case 'J': // January, Jan + if len(layout) >= layoutOffset+3 && layout[layoutOffset:layoutOffset+3] == "Jan" { + layoutElem := "" + if len(layout) >= layoutOffset+7 && layout[layoutOffset:layoutOffset+7] == "January" { + layoutElem = "January" + lookupTab = locale.LongMonthNames() + stdTab = longMonthNamesStd + } else if !startsWithLowerCase(layout[layoutOffset+3:]) { + layoutElem = "Jan" + lookupTab = locale.ShortMonthNames() + stdTab = shortMonthNamesStd + } + + if layoutElem == "" { + break + } + + if len(lookupTab) == 0 { + return "", newUnsupportedLayoutElemError(layoutElem, locale) + } + + layoutOffset += len(layoutElem) + valueOffset, err = writeLayoutValue(layoutElem, lookupTab, stdTab, valueOffset, value, &sb) + if err != nil { + return "", err + } + + written = true + } + case 'M': // Monday, Mon + if len(layout) >= layoutOffset+3 && layout[layoutOffset:layoutOffset+3] == "Mon" { + layoutElem := "" + if len(layout) >= layoutOffset+6 && layout[layoutOffset:layoutOffset+6] == "Monday" { + layoutElem = "Monday" + lookupTab = locale.LongDayNames() + stdTab = longDayNamesStd + } else if !startsWithLowerCase(layout[layoutOffset+3:]) { + layoutElem = "Mon" + lookupTab = locale.ShortDayNames() + stdTab = shortDayNamesStd + } + + if layoutElem == "" { + break + } + + if len(lookupTab) == 0 { + return "", newUnsupportedLayoutElemError(layoutElem, locale) + } + + layoutOffset += len(layoutElem) + valueOffset, err = writeLayoutValue(layoutElem, lookupTab, stdTab, valueOffset, value, &sb) + if err != nil { + return "", err + } + written = true + } + case 'P', 'p': // PM, pm + if len(layout) >= layoutOffset+2 && unicode.ToUpper(rune(layout[layoutOffset+1])) == 'M' { + var layoutElem string + // day-periods case matters for the time package parsing functions + if c == 'p' { + layoutElem = "pm" + stdTab = dayPeriodsStdLower + } else { + layoutElem = "PM" + stdTab = dayPeriodsStdUpper + } + + lookupTab = locale.DayPeriods() + if len(lookupTab) == 0 { + return "", newUnsupportedLayoutElemError(layoutElem, locale) + } + + layoutOffset += 2 + valueOffset, err = writeLayoutValue(layoutElem, lookupTab, stdTab, valueOffset, value, &sb) + if err != nil { + return "", err + } + written = true + } + case '_': // _2, _2006, __2 + // Although no translations happens here, it is still necessary to calculate the + // variable size of `_` values, so the layoutOffset stays synchronized with + // its layout element counterpart. + if len(layout) >= layoutOffset+2 && layout[layoutOffset+1] == '2' { + var layoutElemSize int + // _2006 is really a literal _, followed by the long year placeholder + if len(layout) >= layoutOffset+5 && layout[layoutOffset+1:layoutOffset+5] == "2006" { + if len(value) >= valueOffset+5 { + layoutElemSize = 5 // _2006 + } + } else { + if len(value) >= valueOffset+2 { + layoutElemSize = 2 // _2 + } + } + + if layoutElemSize > 0 { + layoutOffset += layoutElemSize + valueOffset, err = writeNextNonSpaceValue(value, valueOffset, layoutElemSize, &sb) + if err != nil { + return "", err + } + written = true + } + } + + if len(layout) >= layoutOffset+3 && layout[layoutOffset+1] == '_' && layout[layoutOffset+2] == '2' { + if len(value) >= valueOffset+3 { + layoutOffset += 3 + valueOffset, err = writeNextNonSpaceValue(value, valueOffset, 3, &sb) + if err != nil { + return "", err + } + written = true + } + } + } + + if !written { + var writtenSize int + if len(value) > valueOffset { + writtenSize, err = sb.WriteRune(rune(value[valueOffset])) + if err != nil { + return "", err + } + } + + layoutOffset++ + valueOffset += writtenSize + } + } + + if len(value) >= valueOffset { + sb.WriteString(value[valueOffset:]) + } + + return sb.String(), nil +} + +func writeNextNonSpaceValue(value string, offset int, max int, sb *strings.Builder) (int, error) { + nextValOffset, skippedSpaces, val, err := nextNonSpaceValue(value, offset, max) + if err != nil { + return offset, err + } + + if skippedSpaces > 0 { + val = strings.Repeat(" ", skippedSpaces) + val + } + + _, err = sb.WriteString(val) + if err != nil { + return offset, err + } + + return nextValOffset, nil +} + +func writeLayoutValue(layoutElem string, lookupTab, stdTab []string, valueOffset int, value string, sb *strings.Builder) (int, error) { + newOffset, skippedSpaces, foundStdValue, val := lookup(lookupTab, valueOffset, value, stdTab) + if foundStdValue == "" { + return valueOffset, newLayoutMismatchError(layoutElem, value) + } + + if skippedSpaces > 0 { + foundStdValue = strings.Repeat(" ", skippedSpaces) + foundStdValue + } + + _, err := sb.WriteString(foundStdValue) + if err != nil { + return valueOffset, err + } + + newOffset += len(val) + return newOffset, nil +} + +func nextNonSpaceValue(value string, offset int, max int) (newOffset, skippedSpaces int, val string, err error) { + newOffset = offset + for newOffset < len(value) && unicode.IsSpace(rune(value[newOffset])) { + newOffset++ + } + + skippedSpaces = newOffset - offset + if newOffset > len(value) { + return offset, skippedSpaces, "", errors.New("next non-space value not found") + } + + for newOffset < len(value) { + if !unicode.IsSpace(rune(value[newOffset])) { + val += string(value[newOffset]) + newOffset++ + } else { + return newOffset, skippedSpaces, val, nil + } + + if len(val) == max { + return newOffset, skippedSpaces, val, nil + } + } + + return newOffset, skippedSpaces, val, nil +} + +func lookup(lookupTab []string, offset int, val string, stdTab []string) (newOffset, skippedSpaces int, stdValue string, value string) { + newOffset = offset + for newOffset < len(val) && unicode.IsSpace(rune(val[newOffset])) { + newOffset++ + } + + skippedSpaces = newOffset - offset + if newOffset > len(val) { + return offset, skippedSpaces, "", val + } + + for i, v := range lookupTab { + // Already matched a more specific/longer value + if stdValue != "" && len(v) <= len(value) { + continue + } + + end := newOffset + len(v) + if end > len(val) { + continue + } + + candidate := val[newOffset:end] + if len(candidate) == len(v) && strings.EqualFold(candidate, v) { + stdValue = stdTab[i] + value = candidate + } + } + + return newOffset, skippedSpaces, stdValue, value +} + +func startsWithLowerCase(value string) bool { + if len(value) == 0 { + return false + } + c := value[0] + return 'a' <= c && c <= 'z' +} + +// ErrLayoutMismatch indicates that a provided value does not match its layout counterpart. +type ErrLayoutMismatch struct { + Value string + LayoutElem string +} + +func (l *ErrLayoutMismatch) Error() string { + return fmt.Sprintf(`value "%s" does not match the layout element "%s"`, l.Value, l.LayoutElem) +} + +func (l *ErrLayoutMismatch) Is(err error) bool { + var target *ErrLayoutMismatch + if ok := errors.As(err, &target); ok { + return l.Value == target.Value && l.LayoutElem == target.LayoutElem + } + return false +} + +func newLayoutMismatchError(elem, value string) error { + return &ErrLayoutMismatch{ + LayoutElem: elem, + Value: value, + } +} + +// ErrUnsupportedLayoutElem indicates that a provided layout element is not supported by +// the given locale/language. +type ErrUnsupportedLayoutElem struct { + LayoutElem string + Language string +} + +func (u *ErrUnsupportedLayoutElem) Error() string { + return fmt.Sprintf(`layout element "%s" is not support by the language "%s"`, u.LayoutElem, u.Language) +} + +func (u *ErrUnsupportedLayoutElem) Is(err error) bool { + var target *ErrUnsupportedLayoutElem + if ok := errors.As(err, &target); ok { + return u.Language == target.Language && u.LayoutElem == target.LayoutElem + } + return false +} + +func newUnsupportedLayoutElemError(elem string, locale Locale) error { + return &ErrUnsupportedLayoutElem{ + LayoutElem: elem, + Language: locale.Language(), + } +} diff --git a/vendor/github.com/elastic/lunes/magefile.go b/vendor/github.com/elastic/lunes/magefile.go new file mode 100644 index 00000000000..9a898fe3311 --- /dev/null +++ b/vendor/github.com/elastic/lunes/magefile.go @@ -0,0 +1,107 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build mage +// +build mage + +package main + +import ( + "fmt" + "path/filepath" + + "github.com/magefile/mage/mg" // mg contains helpful utility functions, like Deps + "github.com/magefile/mage/sh" + + // mage:import + "github.com/elastic/lunes/dev-tools/mage" + + devtools "github.com/elastic/lunes/dev-tools/mage" + "github.com/elastic/lunes/dev-tools/mage/gotool" +) + +// Aliases are shortcuts to long target names. +// nolint: deadcode // it's used by `mage`. +var Aliases = map[string]interface{}{ + "llc": mage.Linter.LastChange, + "lint": mage.Linter.All, +} + +// Check runs all the checks +// nolint: deadcode,unparam // it's used as a `mage` target and requires returning an error +func Check() error { + mg.Deps(devtools.InstallGoLicenser) + mg.Deps(devtools.Deps.CheckModuleTidy, CheckLicenseHeaders) + mg.Deps(devtools.CheckNoChanges) + return nil +} + +// Fmt formats code and adds license headers. +func Fmt() { + mg.Deps(devtools.GoImports.Run) + mg.Deps(AddLicenseHeaders) +} + +// AddLicenseHeaders adds ASL2 headers to .go files +func AddLicenseHeaders() error { + fmt.Println(">> fmt - go-licenser: Adding missing headers") + + mg.Deps(devtools.InstallGoLicenser) + + licenser := gotool.Licenser + + return licenser( + licenser.License("ASL2"), + ) +} + +// CheckLicenseHeaders checks ASL2 headers in .go files +func CheckLicenseHeaders() error { + mg.Deps(devtools.InstallGoLicenser) + + licenser := gotool.Licenser + + return licenser( + licenser.Check(), + licenser.License("ASL2"), + ) +} + +// Notice generates a NOTICE.txt file for the module. +func Notice() error { + return devtools.GenerateNotice( + filepath.Join("dev-tools", "templates", "notice", "overrides.json"), + filepath.Join("dev-tools", "templates", "notice", "rules.json"), + filepath.Join("dev-tools", "templates", "notice", "NOTICE.txt.tmpl"), + ) +} + +// Generate the tables.go file +func GenerateTables() error { + if err := sh.Run("go", "generate"); err != nil { + return err + } + + mg.Deps(devtools.InstallGoLicenser) + + licenser := gotool.Licenser + + return licenser( + licenser.License("ASL2"), + licenser.Path("tables.go"), + ) +} diff --git a/vendor/github.com/elastic/lunes/tables.go b/vendor/github.com/elastic/lunes/tables.go new file mode 100644 index 00000000000..b7be6eb0008 --- /dev/null +++ b/vendor/github.com/elastic/lunes/tables.go @@ -0,0 +1,9616 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated by running "go generate" in github.com/elastic/lunes. DO NOT EDIT. + +package lunes + +var CLDRVersion = 45 + +var localeTableAa = [5][]string{ + {"Aca", "Etl", "Tal", "Arb", "Kam", "Gum", "Sab"}, + {"Acaada", "Etleeni", "Talaata", "Arbaqa", "Kamiisi", "Gumqata", "Sabti"}, + {"Qun", "Nah", "Cig", "Agd", "Cax", "Qas", "Qad", "Leq", "Way", "Dit", "Xim", "Kax"}, + {"Qunxa Garablu", "Kudo", "Ciggilta Kudo", "Agda Baxis", "Caxah Alsa", "Qasa Dirri", "Qado Dirri", "Liiqen", "Waysu", "Diteli", "Ximoli", "Kaxxa Garablu"}, + {"saaku", "carra"}, +} + +var localeTableAaDJ = [5][]string{ + {"Aca", "Etl", "Tal", "Arb", "Kam", "Gum", "Sab"}, + {"Acaada", "Etleeni", "Talaata", "Arbaqa", "Kamiisi", "Gumqata", "Sabti"}, + {"Qun", "Nah", "Cig", "Agd", "Cax", "Qas", "Qad", "Leq", "Way", "Dit", "Xim", "Kax"}, + {"Qunxa Garablu", "Kudo", "Ciggilta Kudo", "Agda Baxis", "Caxah Alsa", "Qasa Dirri", "Qado Dirri", "Leqeeni", "Waysu", "Diteli", "Ximoli", "Kaxxa Garablu"}, + {"saaku", "carra"}, +} + +var localeTableAaER = [5][]string{ + {"Aca", "Etl", "Tal", "Arb", "Kam", "Gum", "Sab"}, + {"Acaada", "Etleeni", "Talaata", "Arbaqa", "Kamiisi", "Gumqata", "Sabti"}, + {"Qun", "Nah", "Cig", "Agd", "Cax", "Qas", "Qad", "Leq", "Way", "Dit", "Xim", "Kax"}, + {"Qunxa Garablu", "Kudo", "Ciggilta Kudo", "Agda Baxis", "Caxah Alsa", "Qasa Dirri", "Qado Dirri", "Liiqen", "Waysu", "Diteli", "Ximoli", "Kaxxa Garablu"}, + {"saaku", "carra"}, +} + +var localeTableAaET = [5][]string{ + {"Aca", "Etl", "Tal", "Arb", "Kam", "Gum", "Sab"}, + {"Acaada", "Etleeni", "Talaata", "Arbaqa", "Kamiisi", "Gumqata", "Sabti"}, + {"Qun", "Nah", "Cig", "Agd", "Cax", "Qas", "Qad", "Leq", "Way", "Dit", "Xim", "Kax"}, + {"Qunxa Garablu", "Kudo", "Ciggilta Kudo", "Agda Baxis", "Caxah Alsa", "Qasa Dirri", "Qado Dirri", "Liiqen", "Waysu", "Diteli", "Ximoli", "Kaxxa Garablu"}, + {"saaku", "carra"}, +} + +var localeTableAb = [5][]string{ + {"Ам", "Ашә", "Аҩ", "Ах", "Аԥ", "Ахә", "Ас"}, + {"Амҽыша", "Ашәахьа", "Аҩаша", "Ахаша", "Аԥшьаша", "Ахәаша", "Асабша"}, + {"Ажь", "Жəаб", "Хəажә", "Мш", "Лаҵ", "Рашә", "Ԥхынгә", "Нанҳә", "Цəыб", "Жьҭ", "Абҵ", "Ԥхынҷ"}, + {"Ажьырныҳəа", "Жəабран", "Хəажəкыра", "Мшаԥы", "Лаҵара", "Рашəара", "Ԥхынгəы", "Нанҳəа", "Цəыббра", "Жьҭаара", "Абҵара", "Ԥхынҷкәын"}, + {}, +} + +var localeTableAbGE = [5][]string{ + {"Ам", "Ашә", "Аҩ", "Ах", "Аԥ", "Ахә", "Ас"}, + {"Амҽыша", "Ашәахьа", "Аҩаша", "Ахаша", "Аԥшьаша", "Ахәаша", "Асабша"}, + {"Ажь", "Жəаб", "Хəажә", "Мш", "Лаҵ", "Рашә", "Ԥхынгә", "Нанҳә", "Цəыб", "Жьҭ", "Абҵ", "Ԥхынҷ"}, + {"Ажьырныҳəа", "Жəабран", "Хəажəкыра", "Мшаԥы", "Лаҵара", "Рашəара", "Ԥхынгəы", "Нанҳəа", "Цəыббра", "Жьҭаара", "Абҵара", "Ԥхынҷкәын"}, + {}, +} + +var localeTableAf = [5][]string{ + {"So.", "Ma.", "Di.", "Wo.", "Do.", "Vr.", "Sa."}, + {"Sondag", "Maandag", "Dinsdag", "Woensdag", "Donderdag", "Vrydag", "Saterdag"}, + {"Jan.", "Feb.", "Mrt.", "Apr.", "Mei", "Jun.", "Jul.", "Aug.", "Sep.", "Okt.", "Nov.", "Des."}, + {"Januarie", "Februarie", "Maart", "April", "Mei", "Junie", "Julie", "Augustus", "September", "Oktober", "November", "Desember"}, + {"vm.", "nm."}, +} + +var localeTableAfNA = [5][]string{ + {"So.", "Ma.", "Di.", "Wo.", "Do.", "Vr.", "Sa."}, + {"Sondag", "Maandag", "Dinsdag", "Woensdag", "Donderdag", "Vrydag", "Saterdag"}, + {"Jan.", "Feb.", "Mrt.", "Apr.", "Mei", "Jun.", "Jul.", "Aug.", "Sep.", "Okt.", "Nov.", "Des."}, + {"Januarie", "Februarie", "Maart", "April", "Mei", "Junie", "Julie", "Augustus", "September", "Oktober", "November", "Desember"}, + {"vm.", "nm."}, +} + +var localeTableAfZA = [5][]string{ + {"So.", "Ma.", "Di.", "Wo.", "Do.", "Vr.", "Sa."}, + {"Sondag", "Maandag", "Dinsdag", "Woensdag", "Donderdag", "Vrydag", "Saterdag"}, + {"Jan.", "Feb.", "Mrt.", "Apr.", "Mei", "Jun.", "Jul.", "Aug.", "Sep.", "Okt.", "Nov.", "Des."}, + {"Januarie", "Februarie", "Maart", "April", "Mei", "Junie", "Julie", "Augustus", "September", "Oktober", "November", "Desember"}, + {"vm.", "nm."}, +} + +var localeTableAgq = [5][]string{ + {"nts", "kpa", "ghɔ", "tɔm", "ume", "ghɨ", "dzk"}, + {"tsuʔntsɨ", "tsuʔukpà", "tsuʔughɔe", "tsuʔutɔ̀mlò", "tsuʔumè", "tsuʔughɨ̂m", "tsuʔndzɨkɔʔɔ"}, + {"nùm", "kɨz", "tɨd", "taa", "see", "nzu", "dum", "fɔe", "dzu", "lɔm", "kaa", "fwo"}, + {"ndzɔ̀ŋɔ̀nùm", "ndzɔ̀ŋɔ̀kƗ̀zùʔ", "ndzɔ̀ŋɔ̀tƗ̀dʉ̀ghà", "ndzɔ̀ŋɔ̀tǎafʉ̄ghā", "ndzɔ̀ŋèsèe", "ndzɔ̀ŋɔ̀nzùghò", "ndzɔ̀ŋɔ̀dùmlo", "ndzɔ̀ŋɔ̀kwîfɔ̀e", "ndzɔ̀ŋɔ̀tƗ̀fʉ̀ghàdzughù", "ndzɔ̀ŋɔ̀ghǔuwelɔ̀m", "ndzɔ̀ŋɔ̀chwaʔàkaa wo", "ndzɔ̀ŋèfwòo"}, + {"a.g", "a.k"}, +} + +var localeTableAgqCM = [5][]string{ + {"nts", "kpa", "ghɔ", "tɔm", "ume", "ghɨ", "dzk"}, + {"tsuʔntsɨ", "tsuʔukpà", "tsuʔughɔe", "tsuʔutɔ̀mlò", "tsuʔumè", "tsuʔughɨ̂m", "tsuʔndzɨkɔʔɔ"}, + {"nùm", "kɨz", "tɨd", "taa", "see", "nzu", "dum", "fɔe", "dzu", "lɔm", "kaa", "fwo"}, + {"ndzɔ̀ŋɔ̀nùm", "ndzɔ̀ŋɔ̀kƗ̀zùʔ", "ndzɔ̀ŋɔ̀tƗ̀dʉ̀ghà", "ndzɔ̀ŋɔ̀tǎafʉ̄ghā", "ndzɔ̀ŋèsèe", "ndzɔ̀ŋɔ̀nzùghò", "ndzɔ̀ŋɔ̀dùmlo", "ndzɔ̀ŋɔ̀kwîfɔ̀e", "ndzɔ̀ŋɔ̀tƗ̀fʉ̀ghàdzughù", "ndzɔ̀ŋɔ̀ghǔuwelɔ̀m", "ndzɔ̀ŋɔ̀chwaʔàkaa wo", "ndzɔ̀ŋèfwòo"}, + {"a.g", "a.k"}, +} + +var localeTableAk = [5][]string{ + {"Kwe", "Dwo", "Ben", "Wuk", "Yaw", "Fia", "Mem"}, + {"Kwesida", "Dwowda", "Benada", "Wukuda", "Yawda", "Fida", "Memeneda"}, + {"S-Ɔ", "K-Ɔ", "E-Ɔ", "E-O", "E-K", "O-A", "A-K", "D-Ɔ", "F-Ɛ", "Ɔ-A", "Ɔ-O", "M-Ɔ"}, + {"Sanda-Ɔpɛpɔn", "Kwakwar-Ɔgyefuo", "Ebɔw-Ɔbenem", "Ebɔbira-Oforisuo", "Esusow Aketseaba-Kɔtɔnimba", "Obirade-Ayɛwohomumu", "Ayɛwoho-Kitawonsa", "Difuu-Ɔsandaa", "Fankwa-Ɛbɔ", "Ɔbɛsɛ-Ahinime", "Ɔberɛfɛw-Obubuo", "Mumu-Ɔpɛnimba"}, + {"AN", "EW"}, +} + +var localeTableAkGH = [5][]string{ + {"Kwe", "Dwo", "Ben", "Wuk", "Yaw", "Fia", "Mem"}, + {"Kwesida", "Dwowda", "Benada", "Wukuda", "Yawda", "Fida", "Memeneda"}, + {"S-Ɔ", "K-Ɔ", "E-Ɔ", "E-O", "E-K", "O-A", "A-K", "D-Ɔ", "F-Ɛ", "Ɔ-A", "Ɔ-O", "M-Ɔ"}, + {"Sanda-Ɔpɛpɔn", "Kwakwar-Ɔgyefuo", "Ebɔw-Ɔbenem", "Ebɔbira-Oforisuo", "Esusow Aketseaba-Kɔtɔnimba", "Obirade-Ayɛwohomumu", "Ayɛwoho-Kitawonsa", "Difuu-Ɔsandaa", "Fankwa-Ɛbɔ", "Ɔbɛsɛ-Ahinime", "Ɔberɛfɛw-Obubuo", "Mumu-Ɔpɛnimba"}, + {"AN", "EW"}, +} + +var localeTableAm = [5][]string{ + {"እሑድ", "ሰኞ", "ማክሰ", "ረቡዕ", "ሐሙስ", "ዓርብ", "ቅዳሜ"}, + {"እሑድ", "ሰኞ", "ማክሰኞ", "ረቡዕ", "ሐሙስ", "ዓርብ", "ቅዳሜ"}, + {"ጃን", "ፌብ", "ማርች", "ኤፕሪ", "ሜይ", "ጁን", "ጁላይ", "ኦገስ", "ሴፕቴ", "ኦክቶ", "ኖቬም", "ዲሴም"}, + {"ጃንዋሪ", "ፌብሩዋሪ", "ማርች", "ኤፕሪል", "ሜይ", "ጁን", "ጁላይ", "ኦገስት", "ሴፕቴምበር", "ኦክቶበር", "ኖቬምበር", "ዲሴምበር"}, + {"ጥዋት", "ከሰዓት"}, +} + +var localeTableAmET = [5][]string{ + {"እሑድ", "ሰኞ", "ማክሰ", "ረቡዕ", "ሐሙስ", "ዓርብ", "ቅዳሜ"}, + {"እሑድ", "ሰኞ", "ማክሰኞ", "ረቡዕ", "ሐሙስ", "ዓርብ", "ቅዳሜ"}, + {"ጃን", "ፌብ", "ማርች", "ኤፕሪ", "ሜይ", "ጁን", "ጁላይ", "ኦገስ", "ሴፕቴ", "ኦክቶ", "ኖቬም", "ዲሴም"}, + {"ጃንዋሪ", "ፌብሩዋሪ", "ማርች", "ኤፕሪል", "ሜይ", "ጁን", "ጁላይ", "ኦገስት", "ሴፕቴምበር", "ኦክቶበር", "ኖቬምበር", "ዲሴምበር"}, + {"ጥዋት", "ከሰዓት"}, +} + +var localeTableAn = [5][]string{ + {"dom", "lun", "mar", "mie", "chu", "vie", "sab"}, + {"dominche", "luns", "martz", "miercres", "chueves", "viernes", "sabado"}, + {"chi.", "feb.", "mar.", "abr.", "may.", "chn.", "chl.", "ago.", "set.", "oct.", "nov.", "avi."}, + {"de chinero", "de febrero", "de marzo", "d’abril", "de mayo", "de chunyo", "de chuliol", "d’agosto", "de setiembre", "d’octubre", "de noviembre", "d’aviento"}, + {"a.m.", "p.m."}, +} + +var localeTableAnES = [5][]string{ + {"dom", "lun", "mar", "mie", "chu", "vie", "sab"}, + {"dominche", "luns", "martz", "miercres", "chueves", "viernes", "sabado"}, + {"chi.", "feb.", "mar.", "abr.", "may.", "chn.", "chl.", "ago.", "set.", "oct.", "nov.", "avi."}, + {"de chinero", "de febrero", "de marzo", "d’abril", "de mayo", "de chunyo", "de chuliol", "d’agosto", "de setiembre", "d’octubre", "de noviembre", "d’aviento"}, + {"a.m.", "p.m."}, +} + +var localeTableApc = [5][]string{ + {}, + {"الأحد", "التنين", "التلاتا", "الأربعا", "الخميس", "الجمعة", "السبت"}, + {}, + {"كانون التاني", "شباط", "أذار", "نيسان", "أيار", "حزيران", "تموز", "آب", "أيلول", "تشرين الأول", "تشرين التاني", "كانون الأول"}, + {}, +} + +var localeTableApcSY = [5][]string{ + {}, + {"الأحد", "التنين", "التلاتا", "الأربعا", "الخميس", "الجمعة", "السبت"}, + {}, + {"كانون التاني", "شباط", "أذار", "نيسان", "أيار", "حزيران", "تموز", "آب", "أيلول", "تشرين الأول", "تشرين التاني", "كانون الأول"}, + {}, +} + +var localeTableAr = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"يناير", "فبراير", "مارس", "أبريل", "مايو", "يونيو", "يوليو", "أغسطس", "سبتمبر", "أكتوبر", "نوفمبر", "ديسمبر"}, + {"ص", "م"}, +} + +var localeTableAr001 = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"يناير", "فبراير", "مارس", "أبريل", "مايو", "يونيو", "يوليو", "أغسطس", "سبتمبر", "أكتوبر", "نوفمبر", "ديسمبر"}, + {"ص", "م"}, +} + +var localeTableArAE = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"يناير", "فبراير", "مارس", "أبريل", "مايو", "يونيو", "يوليو", "أغسطس", "سبتمبر", "أكتوبر", "نوفمبر", "ديسمبر"}, + {"ص", "م"}, +} + +var localeTableArBH = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"يناير", "فبراير", "مارس", "أبريل", "مايو", "يونيو", "يوليو", "أغسطس", "سبتمبر", "أكتوبر", "نوفمبر", "ديسمبر"}, + {"ص", "م"}, +} + +var localeTableArDJ = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"يناير", "فبراير", "مارس", "أبريل", "مايو", "يونيو", "يوليو", "أغسطس", "سبتمبر", "أكتوبر", "نوفمبر", "ديسمبر"}, + {"ص", "م"}, +} + +var localeTableArDZ = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"جانفي", "فيفري", "مارس", "أفريل", "ماي", "جوان", "جويلية", "أوت", "سبتمبر", "أكتوبر", "نوفمبر", "ديسمبر"}, + {"ص", "م"}, +} + +var localeTableArEG = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"يناير", "فبراير", "مارس", "أبريل", "مايو", "يونيو", "يوليو", "أغسطس", "سبتمبر", "أكتوبر", "نوفمبر", "ديسمبر"}, + {"ص", "م"}, +} + +var localeTableArEH = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"يناير", "فبراير", "مارس", "أبريل", "مايو", "يونيو", "يوليو", "أغسطس", "سبتمبر", "أكتوبر", "نوفمبر", "ديسمبر"}, + {"ص", "م"}, +} + +var localeTableArER = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"يناير", "فبراير", "مارس", "أبريل", "مايو", "يونيو", "يوليو", "أغسطس", "سبتمبر", "أكتوبر", "نوفمبر", "ديسمبر"}, + {"ص", "م"}, +} + +var localeTableArIL = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"يناير", "فبراير", "مارس", "أبريل", "مايو", "يونيو", "يوليو", "أغسطس", "سبتمبر", "أكتوبر", "نوفمبر", "ديسمبر"}, + {"ص", "م"}, +} + +var localeTableArIQ = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {"كانون الثاني", "شباط", "آذار", "نيسان", "أيار", "حزيران", "تموز", "آب", "أيلول", "تشرين الأول", "تشرين الثاني", "كانون الأول"}, + {"كانون الثاني", "شباط", "آذار", "نيسان", "أيار", "حزيران", "تموز", "آب", "أيلول", "تشرين الأول", "تشرين الثاني", "كانون الأول"}, + {"ص", "م"}, +} + +var localeTableArJO = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"كانون الثاني", "شباط", "آذار", "نيسان", "أيار", "حزيران", "تموز", "آب", "أيلول", "تشرين الأول", "تشرين الثاني", "كانون الأول"}, + {"ص", "م"}, +} + +var localeTableArKM = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"يناير", "فبراير", "مارس", "أبريل", "مايو", "يونيو", "يوليو", "أغسطس", "سبتمبر", "أكتوبر", "نوفمبر", "ديسمبر"}, + {"ص", "م"}, +} + +var localeTableArKW = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"يناير", "فبراير", "مارس", "أبريل", "مايو", "يونيو", "يوليو", "أغسطس", "سبتمبر", "أكتوبر", "نوفمبر", "ديسمبر"}, + {"ص", "م"}, +} + +var localeTableArLB = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"كانون الثاني", "شباط", "آذار", "نيسان", "أيار", "حزيران", "تموز", "آب", "أيلول", "تشرين الأول", "تشرين الثاني", "كانون الأول"}, + {"ص", "م"}, +} + +var localeTableArLY = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"يناير", "فبراير", "مارس", "أبريل", "مايو", "يونيو", "يوليو", "أغسطس", "سبتمبر", "أكتوبر", "نوفمبر", "ديسمبر"}, + {"ص", "م"}, +} + +var localeTableArMA = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"يناير", "فبراير", "مارس", "أبريل", "ماي", "يونيو", "يوليوز", "غشت", "شتنبر", "أكتوبر", "نونبر", "دجنبر"}, + {"ص", "م"}, +} + +var localeTableArMR = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"يناير", "فبراير", "مارس", "إبريل", "مايو", "يونيو", "يوليو", "أغشت", "شتمبر", "أكتوبر", "نوفمبر", "دجمبر"}, + {"ص", "م"}, +} + +var localeTableArOM = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"يناير", "فبراير", "مارس", "أبريل", "مايو", "يونيو", "يوليو", "أغسطس", "سبتمبر", "أكتوبر", "نوفمبر", "ديسمبر"}, + {"ص", "م"}, +} + +var localeTableArPS = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"كانون الثاني", "شباط", "آذار", "نيسان", "أيار", "حزيران", "تموز", "آب", "أيلول", "تشرين الأول", "تشرين الثاني", "كانون الأول"}, + {"ص", "م"}, +} + +var localeTableArQA = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"يناير", "فبراير", "مارس", "أبريل", "مايو", "يونيو", "يوليو", "أغسطس", "سبتمبر", "أكتوبر", "نوفمبر", "ديسمبر"}, + {"ص", "م"}, +} + +var localeTableArSA = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"يناير", "فبراير", "مارس", "أبريل", "مايو", "يونيو", "يوليو", "أغسطس", "سبتمبر", "أكتوبر", "نوفمبر", "ديسمبر"}, + {"ص", "م"}, +} + +var localeTableArSD = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"يناير", "فبراير", "مارس", "أبريل", "مايو", "يونيو", "يوليو", "أغسطس", "سبتمبر", "أكتوبر", "نوفمبر", "ديسمبر"}, + {"ص", "م"}, +} + +var localeTableArSO = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"يناير", "فبراير", "مارس", "أبريل", "مايو", "يونيو", "يوليو", "أغسطس", "سبتمبر", "أكتوبر", "نوفمبر", "ديسمبر"}, + {"ص", "م"}, +} + +var localeTableArSS = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"يناير", "فبراير", "مارس", "أبريل", "مايو", "يونيو", "يوليو", "أغسطس", "سبتمبر", "أكتوبر", "نوفمبر", "ديسمبر"}, + {"ص", "م"}, +} + +var localeTableArSY = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"كانون الثاني", "شباط", "آذار", "نيسان", "أيار", "حزيران", "تموز", "آب", "أيلول", "تشرين الأول", "تشرين الثاني", "كانون الأول"}, + {"ص", "م"}, +} + +var localeTableArTD = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"يناير", "فبراير", "مارس", "أبريل", "مايو", "يونيو", "يوليو", "أغسطس", "سبتمبر", "أكتوبر", "نوفمبر", "ديسمبر"}, + {"ص", "م"}, +} + +var localeTableArTN = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"جانفي", "فيفري", "مارس", "أفريل", "ماي", "جوان", "جويلية", "أوت", "سبتمبر", "أكتوبر", "نوفمبر", "ديسمبر"}, + {"ص", "م"}, +} + +var localeTableArYE = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"يناير", "فبراير", "مارس", "أبريل", "مايو", "يونيو", "يوليو", "أغسطس", "سبتمبر", "أكتوبر", "نوفمبر", "ديسمبر"}, + {"ص", "م"}, +} + +var localeTableAs = [5][]string{ + {"দেও", "সোম", "মঙ্গল", "বুধ", "বৃহ", "শুক্ৰ", "শনি"}, + {"দেওবাৰ", "সোমবাৰ", "মঙ্গলবাৰ", "বুধবাৰ", "বৃহস্পতিবাৰ", "শুক্ৰবাৰ", "শনিবাৰ"}, + {"জানু", "ফেব্ৰু", "মাৰ্চ", "এপ্ৰিল", "মে’", "জুন", "জুলাই", "আগ", "ছেপ্তে", "অক্টো", "নৱে", "ডিচে"}, + {"জানুৱাৰী", "ফেব্ৰুৱাৰী", "মাৰ্চ", "এপ্ৰিল", "মে’", "জুন", "জুলাই", "আগষ্ট", "ছেপ্তেম্বৰ", "অক্টোবৰ", "নৱেম্বৰ", "ডিচেম্বৰ"}, + {"পূৰ্বাহ্ন", "অপৰাহ্ন"}, +} + +var localeTableAsIN = [5][]string{ + {"দেও", "সোম", "মঙ্গল", "বুধ", "বৃহ", "শুক্ৰ", "শনি"}, + {"দেওবাৰ", "সোমবাৰ", "মঙ্গলবাৰ", "বুধবাৰ", "বৃহস্পতিবাৰ", "শুক্ৰবাৰ", "শনিবাৰ"}, + {"জানু", "ফেব্ৰু", "মাৰ্চ", "এপ্ৰিল", "মে’", "জুন", "জুলাই", "আগ", "ছেপ্তে", "অক্টো", "নৱে", "ডিচে"}, + {"জানুৱাৰী", "ফেব্ৰুৱাৰী", "মাৰ্চ", "এপ্ৰিল", "মে’", "জুন", "জুলাই", "আগষ্ট", "ছেপ্তেম্বৰ", "অক্টোবৰ", "নৱেম্বৰ", "ডিচেম্বৰ"}, + {"পূৰ্বাহ্ন", "অপৰাহ্ন"}, +} + +var localeTableAsa = [5][]string{ + {"Jpi", "Jtt", "Jnn", "Jtn", "Alh", "Ijm", "Jmo"}, + {"Jumapili", "Jumatatu", "Jumanne", "Jumatano", "Alhamisi", "Ijumaa", "Jumamosi"}, + {"Jan", "Feb", "Mac", "Apr", "Mei", "Jun", "Jul", "Ago", "Sep", "Okt", "Nov", "Dec"}, + {"Januari", "Februari", "Machi", "Aprili", "Mei", "Juni", "Julai", "Agosti", "Septemba", "Oktoba", "Novemba", "Desemba"}, + {"icheheavo", "ichamthi"}, +} + +var localeTableAsaTZ = [5][]string{ + {"Jpi", "Jtt", "Jnn", "Jtn", "Alh", "Ijm", "Jmo"}, + {"Jumapili", "Jumatatu", "Jumanne", "Jumatano", "Alhamisi", "Ijumaa", "Jumamosi"}, + {"Jan", "Feb", "Mac", "Apr", "Mei", "Jun", "Jul", "Ago", "Sep", "Okt", "Nov", "Dec"}, + {"Januari", "Februari", "Machi", "Aprili", "Mei", "Juni", "Julai", "Agosti", "Septemba", "Oktoba", "Novemba", "Desemba"}, + {"icheheavo", "ichamthi"}, +} + +var localeTableAst = [5][]string{ + {"dom", "llu", "mar", "mié", "xue", "vie", "sáb"}, + {"domingu", "llunes", "martes", "miércoles", "xueves", "vienres", "sábadu"}, + {"xin", "feb", "mar", "abr", "may", "xun", "xnt", "ago", "set", "och", "pay", "avi"}, + {"de xineru", "de febreru", "de marzu", "d’abril", "de mayu", "de xunu", "de xunetu", "d’agostu", "de setiembre", "d’ochobre", "de payares", "d’avientu"}, + {"a", "p"}, +} + +var localeTableAstES = [5][]string{ + {"dom", "llu", "mar", "mié", "xue", "vie", "sáb"}, + {"domingu", "llunes", "martes", "miércoles", "xueves", "vienres", "sábadu"}, + {"xin", "feb", "mar", "abr", "may", "xun", "xnt", "ago", "set", "och", "pay", "avi"}, + {"de xineru", "de febreru", "de marzu", "d’abril", "de mayu", "de xunu", "de xunetu", "d’agostu", "de setiembre", "d’ochobre", "de payares", "d’avientu"}, + {"a", "p"}, +} + +var localeTableAz = [5][]string{ + {"B.", "B.e.", "Ç.a.", "Ç.", "C.a.", "C.", "Ş."}, + {"bazar", "bazar ertəsi", "çərşənbə axşamı", "çərşənbə", "cümə axşamı", "cümə", "şənbə"}, + {"yan", "fev", "mar", "apr", "may", "iyn", "iyl", "avq", "sen", "okt", "noy", "dek"}, + {"yanvar", "fevral", "mart", "aprel", "may", "iyun", "iyul", "avqust", "sentyabr", "oktyabr", "noyabr", "dekabr"}, + {"a", "p"}, +} + +var localeTableAzCyrl = [5][]string{ + {"Б.", "Б.Е.", "Ч.А.", "Ч.", "Ҹ.А.", "Ҹ.", "Ш."}, + {"базар", "базар ертәси", "чәршәнбә ахшамы", "чәршәнбә", "ҹүмә ахшамы", "ҹүмә", "шәнбә"}, + {"јан", "фев", "мар", "апр", "май", "ијн", "ијл", "авг", "сен", "окт", "ној", "дек"}, + {"јанвар", "феврал", "март", "апрел", "май", "ијун", "ијул", "август", "сентјабр", "октјабр", "нојабр", "декабр"}, + {"АМ", "ПМ"}, +} + +var localeTableAzCyrlAZ = [5][]string{ + {"Б.", "Б.Е.", "Ч.А.", "Ч.", "Ҹ.А.", "Ҹ.", "Ш."}, + {"базар", "базар ертәси", "чәршәнбә ахшамы", "чәршәнбә", "ҹүмә ахшамы", "ҹүмә", "шәнбә"}, + {"јан", "фев", "мар", "апр", "май", "ијн", "ијл", "авг", "сен", "окт", "ној", "дек"}, + {"јанвар", "феврал", "март", "апрел", "май", "ијун", "ијул", "август", "сентјабр", "октјабр", "нојабр", "декабр"}, + {"АМ", "ПМ"}, +} + +var localeTableAzLatn = [5][]string{ + {"B.", "B.e.", "Ç.a.", "Ç.", "C.a.", "C.", "Ş."}, + {"bazar", "bazar ertəsi", "çərşənbə axşamı", "çərşənbə", "cümə axşamı", "cümə", "şənbə"}, + {"yan", "fev", "mar", "apr", "may", "iyn", "iyl", "avq", "sen", "okt", "noy", "dek"}, + {"yanvar", "fevral", "mart", "aprel", "may", "iyun", "iyul", "avqust", "sentyabr", "oktyabr", "noyabr", "dekabr"}, + {"a", "p"}, +} + +var localeTableAzLatnAZ = [5][]string{ + {"B.", "B.e.", "Ç.a.", "Ç.", "C.a.", "C.", "Ş."}, + {"bazar", "bazar ertəsi", "çərşənbə axşamı", "çərşənbə", "cümə axşamı", "cümə", "şənbə"}, + {"yan", "fev", "mar", "apr", "may", "iyn", "iyl", "avq", "sen", "okt", "noy", "dek"}, + {"yanvar", "fevral", "mart", "aprel", "may", "iyun", "iyul", "avqust", "sentyabr", "oktyabr", "noyabr", "dekabr"}, + {"a", "p"}, +} + +var localeTableBal = [5][]string{ + {"یک", "دو", "سئے", "چار", "پنچ", "جمه", "شم"}, + {"یکشمبه", "دوشمبه", "سئیشمبه", "چارشمبه", "پنچشمبه", "جمه", "شمبه"}, + {"جن", "پر", "مار", "اپر", "مئیی", "جون", "جۆل", "اگست", "ستم", "اکت", "نئوم", "دسم"}, + {"جنوری", "پروری", "مارچ", "اپرێل", "مئیی", "جون", "جۆلایی", "اگست", "ستمبر", "اکتوبر", "نئومبر", "دسمبر"}, + {}, +} + +var localeTableBalArab = [5][]string{ + {"یک", "دو", "سئے", "چار", "پنچ", "جمه", "شم"}, + {"یکشمبه", "دوشمبه", "سئیشمبه", "چارشمبه", "پنچشمبه", "جمه", "شمبه"}, + {"جن", "پر", "مار", "اپر", "مئیی", "جون", "جۆل", "اگست", "ستم", "اکت", "نئوم", "دسم"}, + {"جنوری", "پروری", "مارچ", "اپرێل", "مئیی", "جون", "جۆلایی", "اگست", "ستمبر", "اکتوبر", "نئومبر", "دسمبر"}, + {}, +} + +var localeTableBalArabPK = [5][]string{ + {"یک", "دو", "سئے", "چار", "پنچ", "جمه", "شم"}, + {"یکشمبه", "دوشمبه", "سئیشمبه", "چارشمبه", "پنچشمبه", "جمه", "شمبه"}, + {"جن", "پر", "مار", "اپر", "مئیی", "جون", "جۆل", "اگست", "ستم", "اکت", "نئوم", "دسم"}, + {"جنوری", "پروری", "مارچ", "اپرێل", "مئیی", "جون", "جۆلایی", "اگست", "ستمبر", "اکتوبر", "نئومبر", "دسمبر"}, + {}, +} + +var localeTableBalLatn = [5][]string{ + {"Yak", "Do", "Say", "Chá", "Pan", "Jom", "Sha"}, + {"Yakshambeh", "Doshambeh", "Sayshambeh", "Chárshambeh", "Panchshambeh", "Jomah", "Shambeh"}, + {"Jan", "Par", "Már", "Apr", "Mai", "Jun", "Jól", "Aga", "Sat", "Akt", "Naw", "Das"}, + {"Janwari", "Parwari", "Márch", "Aprél", "Mai", "Jun", "Jólái", "Agast", "Satambar", "Aktubar", "Nawambar", "Dasambar"}, + {}, +} + +var localeTableBalLatnPK = [5][]string{ + {"Yak", "Do", "Say", "Chá", "Pan", "Jom", "Sha"}, + {"Yakshambeh", "Doshambeh", "Sayshambeh", "Chárshambeh", "Panchshambeh", "Jomah", "Shambeh"}, + {"Jan", "Par", "Már", "Apr", "Mai", "Jun", "Jól", "Aga", "Sat", "Akt", "Naw", "Das"}, + {"Janwari", "Parwari", "Márch", "Aprél", "Mai", "Jun", "Jólái", "Agast", "Satambar", "Aktubar", "Nawambar", "Dasambar"}, + {}, +} + +var localeTableBas = [5][]string{ + {"nɔy", "nja", "uum", "ŋge", "mbɔ", "kɔɔ", "jon"}, + {"ŋgwà nɔ̂y", "ŋgwà njaŋgumba", "ŋgwà ûm", "ŋgwà ŋgê", "ŋgwà mbɔk", "ŋgwà kɔɔ", "ŋgwà jôn"}, + {"kɔn", "mac", "mat", "mto", "mpu", "hil", "nje", "hik", "dip", "bio", "may", "liɓ"}, + {"Kɔndɔŋ", "Màcɛ̂l", "Màtùmb", "Màtop", "M̀puyɛ", "Hìlòndɛ̀", "Njèbà", "Hìkaŋ", "Dìpɔ̀s", "Bìòôm", "Màyɛsèp", "Lìbuy li ńyèe"}, + {"Ibikɛ̂glà", "Iɓugajɔp"}, +} + +var localeTableBasCM = [5][]string{ + {"nɔy", "nja", "uum", "ŋge", "mbɔ", "kɔɔ", "jon"}, + {"ŋgwà nɔ̂y", "ŋgwà njaŋgumba", "ŋgwà ûm", "ŋgwà ŋgê", "ŋgwà mbɔk", "ŋgwà kɔɔ", "ŋgwà jôn"}, + {"kɔn", "mac", "mat", "mto", "mpu", "hil", "nje", "hik", "dip", "bio", "may", "liɓ"}, + {"Kɔndɔŋ", "Màcɛ̂l", "Màtùmb", "Màtop", "M̀puyɛ", "Hìlòndɛ̀", "Njèbà", "Hìkaŋ", "Dìpɔ̀s", "Bìòôm", "Màyɛsèp", "Lìbuy li ńyèe"}, + {"Ibikɛ̂glà", "Iɓugajɔp"}, +} + +var localeTableBe = [5][]string{ + {"нд", "пн", "аў", "ср", "чц", "пт", "сб"}, + {"нядзеля", "панядзелак", "аўторак", "серада", "чацвер", "пятніца", "субота"}, + {"сту", "лют", "сак", "кра", "мая", "чэр", "ліп", "жні", "вер", "кас", "ліс", "сне"}, + {"студзеня", "лютага", "сакавіка", "красавіка", "мая", "чэрвеня", "ліпеня", "жніўня", "верасня", "кастрычніка", "лістапада", "снежня"}, + {"am", "pm"}, +} + +var localeTableBeBY = [5][]string{ + {"нд", "пн", "аў", "ср", "чц", "пт", "сб"}, + {"нядзеля", "панядзелак", "аўторак", "серада", "чацвер", "пятніца", "субота"}, + {"сту", "лют", "сак", "кра", "мая", "чэр", "ліп", "жні", "вер", "кас", "ліс", "сне"}, + {"студзеня", "лютага", "сакавіка", "красавіка", "мая", "чэрвеня", "ліпеня", "жніўня", "верасня", "кастрычніка", "лістапада", "снежня"}, + {"am", "pm"}, +} + +var localeTableBetarask = [5][]string{ + {"няд", "пан", "аўт", "сер", "чац", "пят", "суб"}, + {"нядзеля", "панядзелак", "аўторак", "серада", "чацьвер", "пятніца", "субота"}, + {"сту", "лют", "сак", "кра", "тра", "чэр", "ліп", "жні", "вер", "кас", "ліс", "сьн"}, + {"студзеня", "лютага", "сакавіка", "красавіка", "траўня", "чэрвеня", "ліпеня", "жніўня", "верасьня", "кастрычніка", "лістапада", "сьнежня"}, + {"апоўначы", "апоўдні"}, +} + +var localeTableBem = [5][]string{ + {}, + {"Pa Mulungu", "Palichimo", "Palichibuli", "Palichitatu", "Palichine", "Palichisano", "Pachibelushi"}, + {"Jan", "Feb", "Mac", "Epr", "Mei", "Jun", "Jul", "Oga", "Sep", "Okt", "Nov", "Dis"}, + {"Januari", "Februari", "Machi", "Epreo", "Mei", "Juni", "Julai", "Ogasti", "Septemba", "Oktoba", "Novemba", "Disemba"}, + {"uluchelo", "akasuba"}, +} + +var localeTableBemZM = [5][]string{ + {}, + {"Pa Mulungu", "Palichimo", "Palichibuli", "Palichitatu", "Palichine", "Palichisano", "Pachibelushi"}, + {"Jan", "Feb", "Mac", "Epr", "Mei", "Jun", "Jul", "Oga", "Sep", "Okt", "Nov", "Dis"}, + {"Januari", "Februari", "Machi", "Epreo", "Mei", "Juni", "Julai", "Ogasti", "Septemba", "Oktoba", "Novemba", "Disemba"}, + {"uluchelo", "akasuba"}, +} + +var localeTableBew = [5][]string{ + {"Min", "Sen", "Sel", "Reb", "Kem", "Jum", "Sap"}, + {"Minggu", "Senèn", "Selasa", "Rebo", "Kemis", "Juma’at", "Saptu"}, + {"Jan", "Pèb", "Mar", "Apr", "Méi", "Jun", "Jul", "Ags", "Sèp", "Okt", "Nop", "Dés"}, + {"Januari", "Pèbruari", "Maret", "April", "Méi", "Juni", "Juli", "Agustus", "Sèptèmber", "Oktober", "Nopèmber", "Désèmber"}, + {"pg/sg", "sr/mlm"}, +} + +var localeTableBewID = [5][]string{ + {"Min", "Sen", "Sel", "Reb", "Kem", "Jum", "Sap"}, + {"Minggu", "Senèn", "Selasa", "Rebo", "Kemis", "Juma’at", "Saptu"}, + {"Jan", "Pèb", "Mar", "Apr", "Méi", "Jun", "Jul", "Ags", "Sèp", "Okt", "Nop", "Dés"}, + {"Januari", "Pèbruari", "Maret", "April", "Méi", "Juni", "Juli", "Agustus", "Sèptèmber", "Oktober", "Nopèmber", "Désèmber"}, + {"pg/sg", "sr/mlm"}, +} + +var localeTableBez = [5][]string{ + {"Mul", "Vil", "Hiv", "Hid", "Hit", "Hih", "Lem"}, + {"pa mulungu", "pa shahuviluha", "pa hivili", "pa hidatu", "pa hitayi", "pa hihanu", "pa shahulembela"}, + {"Hut", "Vil", "Dat", "Tai", "Han", "Sit", "Sab", "Nan", "Tis", "Kum", "Kmj", "Kmb"}, + {"pa mwedzi gwa hutala", "pa mwedzi gwa wuvili", "pa mwedzi gwa wudatu", "pa mwedzi gwa wutai", "pa mwedzi gwa wuhanu", "pa mwedzi gwa sita", "pa mwedzi gwa saba", "pa mwedzi gwa nane", "pa mwedzi gwa tisa", "pa mwedzi gwa kumi", "pa mwedzi gwa kumi na moja", "pa mwedzi gwa kumi na mbili"}, + {"pamilau", "pamunyi"}, +} + +var localeTableBezTZ = [5][]string{ + {"Mul", "Vil", "Hiv", "Hid", "Hit", "Hih", "Lem"}, + {"pa mulungu", "pa shahuviluha", "pa hivili", "pa hidatu", "pa hitayi", "pa hihanu", "pa shahulembela"}, + {"Hut", "Vil", "Dat", "Tai", "Han", "Sit", "Sab", "Nan", "Tis", "Kum", "Kmj", "Kmb"}, + {"pa mwedzi gwa hutala", "pa mwedzi gwa wuvili", "pa mwedzi gwa wudatu", "pa mwedzi gwa wutai", "pa mwedzi gwa wuhanu", "pa mwedzi gwa sita", "pa mwedzi gwa saba", "pa mwedzi gwa nane", "pa mwedzi gwa tisa", "pa mwedzi gwa kumi", "pa mwedzi gwa kumi na moja", "pa mwedzi gwa kumi na mbili"}, + {"pamilau", "pamunyi"}, +} + +var localeTableBg = [5][]string{ + {"нд", "пн", "вт", "ср", "чт", "пт", "сб"}, + {"неделя", "понеделник", "вторник", "сряда", "четвъртък", "петък", "събота"}, + {"яну", "фев", "март", "апр", "май", "юни", "юли", "авг", "сеп", "окт", "ное", "дек"}, + {"януари", "февруари", "март", "април", "май", "юни", "юли", "август", "септември", "октомври", "ноември", "декември"}, + {"am", "pm"}, +} + +var localeTableBgBG = [5][]string{ + {"нд", "пн", "вт", "ср", "чт", "пт", "сб"}, + {"неделя", "понеделник", "вторник", "сряда", "четвъртък", "петък", "събота"}, + {"яну", "фев", "март", "апр", "май", "юни", "юли", "авг", "сеп", "окт", "ное", "дек"}, + {"януари", "февруари", "март", "април", "май", "юни", "юли", "август", "септември", "октомври", "ноември", "декември"}, + {"am", "pm"}, +} + +var localeTableBgc = [5][]string{ + {}, + {"ऐतवार", "सोमवार", "मंगलवार", "बुधवार", "बृहस्पतवार", "शुक्रवार", "शनिवार"}, + {}, + {"जनवरी", "फरवरी", "मार्च", "अप्रैल", "मई", "जून", "जुलाई", "अगस्त", "सितम्बर", "अक्टूबर", "नवम्बर", "दिसंबर"}, + {}, +} + +var localeTableBgcIN = [5][]string{ + {}, + {"ऐतवार", "सोमवार", "मंगलवार", "बुधवार", "बृहस्पतवार", "शुक्रवार", "शनिवार"}, + {}, + {"जनवरी", "फरवरी", "मार्च", "अप्रैल", "मई", "जून", "जुलाई", "अगस्त", "सितम्बर", "अक्टूबर", "नवम्बर", "दिसंबर"}, + {}, +} + +var localeTableBho = [5][]string{ + {}, + {"रबीबार", "सोमबार", "मंगलबार", "बुधबार", "बृहस्पतिबार", "शुक्रबार", "सनीचर"}, + {}, + {"जनवरी", "फरवरी", "मार्च", "अप्रैल", "मई", "जून", "जुलाई", "अगस्त", "सितम्बर", "अक्टूबर", "नवंबर", "दिसंबर"}, + {}, +} + +var localeTableBhoIN = [5][]string{ + {}, + {"रबीबार", "सोमबार", "मंगलबार", "बुधबार", "बृहस्पतिबार", "शुक्रबार", "सनीचर"}, + {}, + {"जनवरी", "फरवरी", "मार्च", "अप्रैल", "मई", "जून", "जुलाई", "अगस्त", "सितम्बर", "अक्टूबर", "नवंबर", "दिसंबर"}, + {}, +} + +var localeTableBlo = [5][]string{ + {"alah", "aɖɩt", "atal", "alar", "alam", "arɩs", "asib"}, + {"alahaɖɩ", "aɖɩtɛnɛɛ", "atalaata", "alaarba", "alaamɩshɩ", "arɩsǝma", "asiibi"}, + {"kaw", "kpa", "ci", "ɖʊ", "ɖu5", "ɖu6", "la", "kǝu", "fʊm", "cim", "pom", "bʊn"}, + {"ɩjikawǝrka kaŋɔrɔ", "ɩjikpaka kaŋɔrɔ", "arɛ́cika kaŋɔrɔ", "njɩbɔ nɖʊka kaŋɔrɔ", "acafʊnɖuka kaŋɔrɔ", "anɔɔɖuka kaŋɔrɔ", "alàlaka kaŋɔrɔ", "ɩjikǝuka kaŋɔrɔ", "abofʊmka kaŋɔrɔ", "ɩjicimka kaŋɔrɔ", "acapomka kaŋɔrɔ", "anɔɔbʊnka kaŋɔrɔ"}, + {"1ka", "2ja"}, +} + +var localeTableBloBJ = [5][]string{ + {"alah", "aɖɩt", "atal", "alar", "alam", "arɩs", "asib"}, + {"alahaɖɩ", "aɖɩtɛnɛɛ", "atalaata", "alaarba", "alaamɩshɩ", "arɩsǝma", "asiibi"}, + {"kaw", "kpa", "ci", "ɖʊ", "ɖu5", "ɖu6", "la", "kǝu", "fʊm", "cim", "pom", "bʊn"}, + {"ɩjikawǝrka kaŋɔrɔ", "ɩjikpaka kaŋɔrɔ", "arɛ́cika kaŋɔrɔ", "njɩbɔ nɖʊka kaŋɔrɔ", "acafʊnɖuka kaŋɔrɔ", "anɔɔɖuka kaŋɔrɔ", "alàlaka kaŋɔrɔ", "ɩjikǝuka kaŋɔrɔ", "abofʊmka kaŋɔrɔ", "ɩjicimka kaŋɔrɔ", "acapomka kaŋɔrɔ", "anɔɔbʊnka kaŋɔrɔ"}, + {"1ka", "2ja"}, +} + +var localeTableBm = [5][]string{ + {"kar", "ntɛ", "tar", "ara", "ala", "jum", "sib"}, + {"kari", "ntɛnɛ", "tarata", "araba", "alamisa", "juma", "sibiri"}, + {"zan", "feb", "mar", "awi", "mɛ", "zuw", "zul", "uti", "sɛt", "ɔku", "now", "des"}, + {"zanwuye", "feburuye", "marisi", "awirili", "mɛ", "zuwɛn", "zuluye", "uti", "sɛtanburu", "ɔkutɔburu", "nowanburu", "desanburu"}, + {}, +} + +var localeTableBmML = [5][]string{ + {"kar", "ntɛ", "tar", "ara", "ala", "jum", "sib"}, + {"kari", "ntɛnɛ", "tarata", "araba", "alamisa", "juma", "sibiri"}, + {"zan", "feb", "mar", "awi", "mɛ", "zuw", "zul", "uti", "sɛt", "ɔku", "now", "des"}, + {"zanwuye", "feburuye", "marisi", "awirili", "mɛ", "zuwɛn", "zuluye", "uti", "sɛtanburu", "ɔkutɔburu", "nowanburu", "desanburu"}, + {}, +} + +var localeTableBn = [5][]string{ + {"রবি", "সোম", "মঙ্গল", "বুধ", "বৃহস্পতি", "শুক্র", "শনি"}, + {"রবিবার", "সোমবার", "মঙ্গলবার", "বুধবার", "বৃহস্পতিবার", "শুক্রবার", "শনিবার"}, + {"জানু", "ফেব", "মার্চ", "এপ্রি", "মে", "জুন", "জুল", "আগ", "সেপ", "অক্টো", "নভে", "ডিসে"}, + {"জানুয়ারী", "ফেব্রুয়ারী", "মার্চ", "এপ্রিল", "মে", "জুন", "জুলাই", "আগস্ট", "সেপ্টেম্বর", "অক্টোবর", "নভেম্বর", "ডিসেম্বর"}, + {}, +} + +var localeTableBnBD = [5][]string{ + {"রবি", "সোম", "মঙ্গল", "বুধ", "বৃহস্পতি", "শুক্র", "শনি"}, + {"রবিবার", "সোমবার", "মঙ্গলবার", "বুধবার", "বৃহস্পতিবার", "শুক্রবার", "শনিবার"}, + {"জানু", "ফেব", "মার্চ", "এপ্রি", "মে", "জুন", "জুল", "আগ", "সেপ", "অক্টো", "নভে", "ডিসে"}, + {"জানুয়ারী", "ফেব্রুয়ারী", "মার্চ", "এপ্রিল", "মে", "জুন", "জুলাই", "আগস্ট", "সেপ্টেম্বর", "অক্টোবর", "নভেম্বর", "ডিসেম্বর"}, + {}, +} + +var localeTableBnIN = [5][]string{ + {"রবি", "সোম", "মঙ্গল", "বুধ", "বৃহস্পতি", "শুক্র", "শনি"}, + {"রবিবার", "সোমবার", "মঙ্গলবার", "বুধবার", "বৃহস্পতিবার", "শুক্রবার", "শনিবার"}, + {"জানু", "ফেব", "মার্চ", "এপ্রি", "মে", "জুন", "জুল", "আগ", "সেপ্টেঃ", "অক্টোঃ", "নভেঃ", "ডিসেঃ"}, + {"জানুয়ারী", "ফেব্রুয়ারী", "মার্চ", "এপ্রিল", "মে", "জুন", "জুলাই", "আগস্ট", "সেপ্টেম্বর", "অক্টোবর", "নভেম্বর", "ডিসেম্বর"}, + {}, +} + +var localeTableBo = [5][]string{ + {"ཉི་མ་", "ཟླ་བ་", "མིག་དམར་", "ལྷག་པ་", "ཕུར་བུ་", "པ་སངས་", "སྤེན་པ་"}, + {"གཟའ་ཉི་མ་", "གཟའ་ཟླ་བ་", "གཟའ་མིག་དམར་", "གཟའ་ལྷག་པ་", "གཟའ་ཕུར་བུ་", "གཟའ་པ་སངས་", "གཟའ་སྤེན་པ་"}, + {"ཟླ་༡", "ཟླ་༢", "ཟླ་༣", "ཟླ་༤", "ཟླ་༥", "ཟླ་༦", "ཟླ་༧", "ཟླ་༨", "ཟླ་༩", "ཟླ་༡༠", "ཟླ་༡༡", "ཟླ་༡༢"}, + {"ཟླ་བ་དང་པོ", "ཟླ་བ་གཉིས་པ", "ཟླ་བ་གསུམ་པ", "ཟླ་བ་བཞི་པ", "ཟླ་བ་ལྔ་པ", "ཟླ་བ་དྲུག་པ", "ཟླ་བ་བདུན་པ", "ཟླ་བ་བརྒྱད་པ", "ཟླ་བ་དགུ་པ", "ཟླ་བ་བཅུ་པ", "ཟླ་བ་བཅུ་གཅིག་པ", "ཟླ་བ་བཅུ་གཉིས་པ"}, + {"སྔ་དྲོ་", "ཕྱི་དྲོ་"}, +} + +var localeTableBoCN = [5][]string{ + {"ཉི་མ་", "ཟླ་བ་", "མིག་དམར་", "ལྷག་པ་", "ཕུར་བུ་", "པ་སངས་", "སྤེན་པ་"}, + {"གཟའ་ཉི་མ་", "གཟའ་ཟླ་བ་", "གཟའ་མིག་དམར་", "གཟའ་ལྷག་པ་", "གཟའ་ཕུར་བུ་", "གཟའ་པ་སངས་", "གཟའ་སྤེན་པ་"}, + {"ཟླ་༡", "ཟླ་༢", "ཟླ་༣", "ཟླ་༤", "ཟླ་༥", "ཟླ་༦", "ཟླ་༧", "ཟླ་༨", "ཟླ་༩", "ཟླ་༡༠", "ཟླ་༡༡", "ཟླ་༡༢"}, + {"ཟླ་བ་དང་པོ", "ཟླ་བ་གཉིས་པ", "ཟླ་བ་གསུམ་པ", "ཟླ་བ་བཞི་པ", "ཟླ་བ་ལྔ་པ", "ཟླ་བ་དྲུག་པ", "ཟླ་བ་བདུན་པ", "ཟླ་བ་བརྒྱད་པ", "ཟླ་བ་དགུ་པ", "ཟླ་བ་བཅུ་པ", "ཟླ་བ་བཅུ་གཅིག་པ", "ཟླ་བ་བཅུ་གཉིས་པ"}, + {"སྔ་དྲོ་", "ཕྱི་དྲོ་"}, +} + +var localeTableBoIN = [5][]string{ + {"ཉི་མ་", "ཟླ་བ་", "མིག་དམར་", "ལྷག་པ་", "ཕུར་བུ་", "པ་སངས་", "སྤེན་པ་"}, + {"གཟའ་ཉི་མ་", "གཟའ་ཟླ་བ་", "གཟའ་མིག་དམར་", "གཟའ་ལྷག་པ་", "གཟའ་ཕུར་བུ་", "གཟའ་པ་སངས་", "གཟའ་སྤེན་པ་"}, + {"ཟླ་༡", "ཟླ་༢", "ཟླ་༣", "ཟླ་༤", "ཟླ་༥", "ཟླ་༦", "ཟླ་༧", "ཟླ་༨", "ཟླ་༩", "ཟླ་༡༠", "ཟླ་༡༡", "ཟླ་༡༢"}, + {"ཟླ་བ་དང་པོ", "ཟླ་བ་གཉིས་པ", "ཟླ་བ་གསུམ་པ", "ཟླ་བ་བཞི་པ", "ཟླ་བ་ལྔ་པ", "ཟླ་བ་དྲུག་པ", "ཟླ་བ་བདུན་པ", "ཟླ་བ་བརྒྱད་པ", "ཟླ་བ་དགུ་པ", "ཟླ་བ་བཅུ་པ", "ཟླ་བ་བཅུ་གཅིག་པ", "ཟླ་བ་བཅུ་གཉིས་པ"}, + {"སྔ་དྲོ་", "ཕྱི་དྲོ་"}, +} + +var localeTableBr = [5][]string{ + {"Sul", "Lun", "Meu.", "Mer.", "Yaou", "Gwe.", "Sad."}, + {"Sul", "Lun", "Meurzh", "Mercʼher", "Yaou", "Gwener", "Sadorn"}, + {"Gen.", "Cʼhwe.", "Meur.", "Ebr.", "Mae", "Mezh.", "Goue.", "Eost", "Gwen.", "Here", "Du", "Kzu."}, + {"Genver", "Cʼhwevrer", "Meurzh", "Ebrel", "Mae", "Mezheven", "Gouere", "Eost", "Gwengolo", "Here", "Du", "Kerzu"}, + {"A.M.", "G.M."}, +} + +var localeTableBrFR = [5][]string{ + {"Sul", "Lun", "Meu.", "Mer.", "Yaou", "Gwe.", "Sad."}, + {"Sul", "Lun", "Meurzh", "Mercʼher", "Yaou", "Gwener", "Sadorn"}, + {"Gen.", "Cʼhwe.", "Meur.", "Ebr.", "Mae", "Mezh.", "Goue.", "Eost", "Gwen.", "Here", "Du", "Kzu."}, + {"Genver", "Cʼhwevrer", "Meurzh", "Ebrel", "Mae", "Mezheven", "Gouere", "Eost", "Gwengolo", "Here", "Du", "Kerzu"}, + {"A.M.", "G.M."}, +} + +var localeTableBrx = [5][]string{ + {"रबि", "सम", "मंगल", "बुध", "बिस्थि", "सुखुर", "सनि"}, + {"रबिबार", "समबार", "मंगलबार", "बुधबार", "बिस्थिबार", "सुखुरबार", "सनिबार"}, + {"जान", "फेब", "मार्च", "एप्रि", "मे", "जुन", "जुल", "आग", "सेप", "अक्ट’", "नवे", "डिसे"}, + {"जानुवारी", "फेब्रूवारी", "मार्च", "एप्रिल", "मे", "जुन", "जुलाई", "आगष्ट", "सेप्थेम्बर", "अक्ट’बर", "नवेम्बर", "डिसेम्बर"}, + {"फुं", "बेलासे"}, +} + +var localeTableBrxIN = [5][]string{ + {"रबि", "सम", "मंगल", "बुध", "बिस्थि", "सुखुर", "सनि"}, + {"रबिबार", "समबार", "मंगलबार", "बुधबार", "बिस्थिबार", "सुखुरबार", "सनिबार"}, + {"जान", "फेब", "मार्च", "एप्रि", "मे", "जुन", "जुल", "आग", "सेप", "अक्ट’", "नवे", "डिसे"}, + {"जानुवारी", "फेब्रूवारी", "मार्च", "एप्रिल", "मे", "जुन", "जुलाई", "आगष्ट", "सेप्थेम्बर", "अक्ट’बर", "नवेम्बर", "डिसेम्बर"}, + {"फुं", "बेलासे"}, +} + +var localeTableBs = [5][]string{ + {"ned", "pon", "uto", "sri", "čet", "pet", "sub"}, + {"nedjelja", "ponedjeljak", "utorak", "srijeda", "četvrtak", "petak", "subota"}, + {"jan", "feb", "mar", "apr", "maj", "jun", "jul", "aug", "sep", "okt", "nov", "dec"}, + {"januar", "februar", "mart", "april", "maj", "juni", "juli", "august", "septembar", "oktobar", "novembar", "decembar"}, + {"prijepodne", "popodne"}, +} + +var localeTableBsCyrl = [5][]string{ + {"нед", "пон", "уто", "сри", "чет", "пет", "суб"}, + {"недјеља", "понедјељак", "уторак", "сриједа", "четвртак", "петак", "субота"}, + {"јан", "феб", "мар", "апр", "мај", "јун", "јул", "ауг", "сеп", "окт", "нов", "дец"}, + {"јануар", "фебруар", "март", "април", "мај", "јуни", "јули", "аугуст", "септембар", "октобар", "новембар", "децембар"}, + {"преподне", "поподне"}, +} + +var localeTableBsCyrlBA = [5][]string{ + {"нед", "пон", "уто", "сри", "чет", "пет", "суб"}, + {"недјеља", "понедјељак", "уторак", "сриједа", "четвртак", "петак", "субота"}, + {"јан", "феб", "мар", "апр", "мај", "јун", "јул", "ауг", "сеп", "окт", "нов", "дец"}, + {"јануар", "фебруар", "март", "април", "мај", "јуни", "јули", "аугуст", "септембар", "октобар", "новембар", "децембар"}, + {"преподне", "поподне"}, +} + +var localeTableBsLatn = [5][]string{ + {"ned", "pon", "uto", "sri", "čet", "pet", "sub"}, + {"nedjelja", "ponedjeljak", "utorak", "srijeda", "četvrtak", "petak", "subota"}, + {"jan", "feb", "mar", "apr", "maj", "jun", "jul", "aug", "sep", "okt", "nov", "dec"}, + {"januar", "februar", "mart", "april", "maj", "juni", "juli", "august", "septembar", "oktobar", "novembar", "decembar"}, + {"prijepodne", "popodne"}, +} + +var localeTableBsLatnBA = [5][]string{ + {"ned", "pon", "uto", "sri", "čet", "pet", "sub"}, + {"nedjelja", "ponedjeljak", "utorak", "srijeda", "četvrtak", "petak", "subota"}, + {"jan", "feb", "mar", "apr", "maj", "jun", "jul", "aug", "sep", "okt", "nov", "dec"}, + {"januar", "februar", "mart", "april", "maj", "juni", "juli", "august", "septembar", "oktobar", "novembar", "decembar"}, + {"prijepodne", "popodne"}, +} + +var localeTableByn = [5][]string{ + {"ሰ/ቅ", "ሰኑ", "ሰሊጝ", "ለጓ", "ኣምድ", "ኣርብ", "ሰ/ሽ"}, + {"ሰንበር ቅዳዅ", "ሰኑ", "ሰሊጝ", "ለጓ ወሪ ለብዋ", "ኣምድ", "ኣርብ", "ሰንበር ሽጓዅ"}, + {"ልደት", "ካብኽ", "ክብላ", "ፋጅኺ", "ክቢቅ", "ም/ት", "ኰር", "ማርያ", "ያኸኒ", "መተሉ", "ም/ም", "ተሕሳ"}, + {"ልደትሪ", "ካብኽብቲ", "ክብላ", "ፋጅኺሪ", "ክቢቅሪ", "ምኪኤል ትጟኒሪ", "ኰርኩ", "ማርያም ትሪ", "ያኸኒ መሳቅለሪ", "መተሉ", "ምኪኤል መሽወሪ", "ተሕሳስሪ"}, + {"ፋዱስጃብ", "ፋዱስደምቢ"}, +} + +var localeTableBynER = [5][]string{ + {"ሰ/ቅ", "ሰኑ", "ሰሊጝ", "ለጓ", "ኣምድ", "ኣርብ", "ሰ/ሽ"}, + {"ሰንበር ቅዳዅ", "ሰኑ", "ሰሊጝ", "ለጓ ወሪ ለብዋ", "ኣምድ", "ኣርብ", "ሰንበር ሽጓዅ"}, + {"ልደት", "ካብኽ", "ክብላ", "ፋጅኺ", "ክቢቅ", "ም/ት", "ኰር", "ማርያ", "ያኸኒ", "መተሉ", "ም/ም", "ተሕሳ"}, + {"ልደትሪ", "ካብኽብቲ", "ክብላ", "ፋጅኺሪ", "ክቢቅሪ", "ምኪኤል ትጟኒሪ", "ኰርኩ", "ማርያም ትሪ", "ያኸኒ መሳቅለሪ", "መተሉ", "ምኪኤል መሽወሪ", "ተሕሳስሪ"}, + {"ፋዱስጃብ", "ፋዱስደምቢ"}, +} + +var localeTableCa = [5][]string{ + {"dg.", "dl.", "dt.", "dc.", "dj.", "dv.", "ds."}, + {"diumenge", "dilluns", "dimarts", "dimecres", "dijous", "divendres", "dissabte"}, + {"de gen.", "de febr.", "de març", "d’abr.", "de maig", "de juny", "de jul.", "d’ag.", "de set.", "d’oct.", "de nov.", "de des."}, + {"de gener", "de febrer", "de març", "d’abril", "de maig", "de juny", "de juliol", "d’agost", "de setembre", "d’octubre", "de novembre", "de desembre"}, + {"a.m.", "p.m."}, +} + +var localeTableCaAD = [5][]string{ + {"dg.", "dl.", "dt.", "dc.", "dj.", "dv.", "ds."}, + {"diumenge", "dilluns", "dimarts", "dimecres", "dijous", "divendres", "dissabte"}, + {"de gen.", "de febr.", "de març", "d’abr.", "de maig", "de juny", "de jul.", "d’ag.", "de set.", "d’oct.", "de nov.", "de des."}, + {"de gener", "de febrer", "de març", "d’abril", "de maig", "de juny", "de juliol", "d’agost", "de setembre", "d’octubre", "de novembre", "de desembre"}, + {"a.m.", "p.m."}, +} + +var localeTableCaES = [5][]string{ + {"dg.", "dl.", "dt.", "dc.", "dj.", "dv.", "ds."}, + {"diumenge", "dilluns", "dimarts", "dimecres", "dijous", "divendres", "dissabte"}, + {"de gen.", "de febr.", "de març", "d’abr.", "de maig", "de juny", "de jul.", "d’ag.", "de set.", "d’oct.", "de nov.", "de des."}, + {"de gener", "de febrer", "de març", "d’abril", "de maig", "de juny", "de juliol", "d’agost", "de setembre", "d’octubre", "de novembre", "de desembre"}, + {"a.m.", "p.m."}, +} + +var localeTableCaESvalencia = [5][]string{ + {"dg.", "dl.", "dt.", "dc.", "dj.", "dv.", "ds."}, + {"diumenge", "dilluns", "dimarts", "dimecres", "dijous", "divendres", "dissabte"}, + {"de gen.", "de febr.", "de març", "d’abr.", "de maig", "de juny", "de jul.", "d’ag.", "de set.", "d’oct.", "de nov.", "de des."}, + {"de gener", "de febrer", "de març", "d’abril", "de maig", "de juny", "de juliol", "d’agost", "de setembre", "d’octubre", "de novembre", "de desembre"}, + {"a.m.", "p.m."}, +} + +var localeTableCaFR = [5][]string{ + {"dg.", "dl.", "dt.", "dc.", "dj.", "dv.", "ds."}, + {"diumenge", "dilluns", "dimarts", "dimecres", "dijous", "divendres", "dissabte"}, + {"de gen.", "de febr.", "de març", "d’abr.", "de maig", "de juny", "de jul.", "d’ag.", "de set.", "d’oct.", "de nov.", "de des."}, + {"de gener", "de febrer", "de març", "d’abril", "de maig", "de juny", "de juliol", "d’agost", "de setembre", "d’octubre", "de novembre", "de desembre"}, + {"a.m.", "p.m."}, +} + +var localeTableCaIT = [5][]string{ + {"dg.", "dl.", "dt.", "dc.", "dj.", "dv.", "ds."}, + {"diumenge", "dilluns", "dimarts", "dimecres", "dijous", "divendres", "dissabte"}, + {"de gen.", "de febr.", "de març", "d’abr.", "de maig", "de juny", "de jul.", "d’ag.", "de set.", "d’oct.", "de nov.", "de des."}, + {"de gener", "de febrer", "de març", "d’abril", "de maig", "de juny", "de juliol", "d’agost", "de setembre", "d’octubre", "de novembre", "de desembre"}, + {"a.m.", "p.m."}, +} + +var localeTableCad = [5][]string{ + {}, + {"Inikuˀ", "Wísts’i hayashuh", "Bít hayashuh", "Dahó hayashuh", "Hiwí hayashuh", "Dissik’an hayashuh", "Inikuˀtiˀtiˀ"}, + {}, + {"Cháykáhday Haˀimay", "Tsahkápbiˀ", "Wánit", "Háshnihtiˀtiˀ", "Háshnih Haˀimay", "Háshnihtsiˀ", "Násˀahˀatsus", "Dahósikah nish", "Híisikah nish", "Nípbaatiˀtiˀ", "Nípbaa Haˀimay", "Cháykáhdaytiˀtiˀ"}, + {}, +} + +var localeTableCadUS = [5][]string{ + {}, + {"Inikuˀ", "Wísts’i hayashuh", "Bít hayashuh", "Dahó hayashuh", "Hiwí hayashuh", "Dissik’an hayashuh", "Inikuˀtiˀtiˀ"}, + {}, + {"Cháykáhday Haˀimay", "Tsahkápbiˀ", "Wánit", "Háshnihtiˀtiˀ", "Háshnih Haˀimay", "Háshnihtsiˀ", "Násˀahˀatsus", "Dahósikah nish", "Híisikah nish", "Nípbaatiˀtiˀ", "Nípbaa Haˀimay", "Cháykáhdaytiˀtiˀ"}, + {}, +} + +var localeTableCch = [5][]string{ + {"Yok", "Tung", "Gitung", "Tsan", "Nas", "Nat", "Chir"}, + {"Wai Yoka Bawai", "Wai Tunga", "Toki Gitung", "Tsam Kasuwa", "Wai Na Nas", "Wai Na Tiyon", "Wai Na Chirim"}, + {"Dyon", "Baa", "Atat", "Anas", "Atyo", "Achi", "Atar", "Awur", "Shad", "Shak", "Naba", "Nata"}, + {"Pen Dyon", "Pen Baʼa", "Pen Atat", "Pen Anas", "Pen Atyon", "Pen Achirim", "Pen Atariba", "Pen Awurr", "Pen Shadon", "Pen Shakur", "Pen Kur Naba", "Pen Kur Natat"}, + {}, +} + +var localeTableCchNG = [5][]string{ + {"Yok", "Tung", "Gitung", "Tsan", "Nas", "Nat", "Chir"}, + {"Wai Yoka Bawai", "Wai Tunga", "Toki Gitung", "Tsam Kasuwa", "Wai Na Nas", "Wai Na Tiyon", "Wai Na Chirim"}, + {"Dyon", "Baa", "Atat", "Anas", "Atyo", "Achi", "Atar", "Awur", "Shad", "Shak", "Naba", "Nata"}, + {"Pen Dyon", "Pen Baʼa", "Pen Atat", "Pen Anas", "Pen Atyon", "Pen Achirim", "Pen Atariba", "Pen Awurr", "Pen Shadon", "Pen Shakur", "Pen Kur Naba", "Pen Kur Natat"}, + {}, +} + +var localeTableCcp = [5][]string{ + {"𑄢𑄧𑄝𑄨", "𑄥𑄧𑄟𑄴", "𑄟𑄧𑄁𑄉𑄧𑄣𑄴", "𑄝𑄪𑄖𑄴", "𑄝𑄳𑄢𑄨𑄥𑄪𑄛𑄴", "𑄥𑄪𑄇𑄴𑄇𑄮𑄢𑄴", "𑄥𑄧𑄚𑄨"}, + {"𑄢𑄧𑄝𑄨𑄝𑄢𑄴", "𑄥𑄧𑄟𑄴𑄝𑄢𑄴", "𑄟𑄧𑄁𑄉𑄧𑄣𑄴𑄝𑄢𑄴", "𑄝𑄪𑄖𑄴𑄝𑄢𑄴", "𑄝𑄳𑄢𑄨𑄥𑄪𑄛𑄴𑄝𑄢𑄴", "𑄥𑄪𑄇𑄴𑄇𑄮𑄢𑄴𑄝𑄢𑄴", "𑄥𑄧𑄚𑄨𑄝𑄢𑄴"}, + {"𑄎𑄚𑄪", "𑄜𑄬𑄛𑄴", "𑄟𑄢𑄴𑄌𑄧", "𑄃𑄬𑄛𑄳𑄢𑄨𑄣𑄴", "𑄟𑄬", "𑄎𑄪𑄚𑄴", "𑄎𑄪𑄣𑄭", "𑄃𑄉𑄧𑄌𑄴𑄑𑄴", "𑄥𑄬𑄛𑄴𑄑𑄬𑄟𑄴𑄝𑄧𑄢𑄴", "𑄃𑄧𑄇𑄴𑄑𑄮𑄝𑄧𑄢𑄴", "𑄚𑄧𑄞𑄬𑄟𑄴𑄝𑄧𑄢𑄴", "𑄓𑄨𑄥𑄬𑄟𑄴𑄝𑄢𑄴"}, + {"𑄎𑄚𑄪𑄠𑄢𑄨", "𑄜𑄬𑄛𑄴𑄝𑄳𑄢𑄪𑄠𑄢𑄨", "𑄟𑄢𑄴𑄌𑄧", "𑄃𑄬𑄛𑄳𑄢𑄨𑄣𑄴", "𑄟𑄬", "𑄎𑄪𑄚𑄴", "𑄎𑄪𑄣𑄭", "𑄃𑄉𑄧𑄌𑄴𑄑𑄴", "𑄥𑄬𑄛𑄴𑄑𑄬𑄟𑄴𑄝𑄧𑄢𑄴", "𑄃𑄧𑄇𑄴𑄑𑄬𑄝𑄧𑄢𑄴", "𑄚𑄧𑄞𑄬𑄟𑄴𑄝𑄧𑄢𑄴", "𑄓𑄨𑄥𑄬𑄟𑄴𑄝𑄧𑄢𑄴"}, + {}, +} + +var localeTableCcpBD = [5][]string{ + {"𑄢𑄧𑄝𑄨", "𑄥𑄧𑄟𑄴", "𑄟𑄧𑄁𑄉𑄧𑄣𑄴", "𑄝𑄪𑄖𑄴", "𑄝𑄳𑄢𑄨𑄥𑄪𑄛𑄴", "𑄥𑄪𑄇𑄴𑄇𑄮𑄢𑄴", "𑄥𑄧𑄚𑄨"}, + {"𑄢𑄧𑄝𑄨𑄝𑄢𑄴", "𑄥𑄧𑄟𑄴𑄝𑄢𑄴", "𑄟𑄧𑄁𑄉𑄧𑄣𑄴𑄝𑄢𑄴", "𑄝𑄪𑄖𑄴𑄝𑄢𑄴", "𑄝𑄳𑄢𑄨𑄥𑄪𑄛𑄴𑄝𑄢𑄴", "𑄥𑄪𑄇𑄴𑄇𑄮𑄢𑄴𑄝𑄢𑄴", "𑄥𑄧𑄚𑄨𑄝𑄢𑄴"}, + {"𑄎𑄚𑄪", "𑄜𑄬𑄛𑄴", "𑄟𑄢𑄴𑄌𑄧", "𑄃𑄬𑄛𑄳𑄢𑄨𑄣𑄴", "𑄟𑄬", "𑄎𑄪𑄚𑄴", "𑄎𑄪𑄣𑄭", "𑄃𑄉𑄧𑄌𑄴𑄑𑄴", "𑄥𑄬𑄛𑄴𑄑𑄬𑄟𑄴𑄝𑄧𑄢𑄴", "𑄃𑄧𑄇𑄴𑄑𑄮𑄝𑄧𑄢𑄴", "𑄚𑄧𑄞𑄬𑄟𑄴𑄝𑄧𑄢𑄴", "𑄓𑄨𑄥𑄬𑄟𑄴𑄝𑄢𑄴"}, + {"𑄎𑄚𑄪𑄠𑄢𑄨", "𑄜𑄬𑄛𑄴𑄝𑄳𑄢𑄪𑄠𑄢𑄨", "𑄟𑄢𑄴𑄌𑄧", "𑄃𑄬𑄛𑄳𑄢𑄨𑄣𑄴", "𑄟𑄬", "𑄎𑄪𑄚𑄴", "𑄎𑄪𑄣𑄭", "𑄃𑄉𑄧𑄌𑄴𑄑𑄴", "𑄥𑄬𑄛𑄴𑄑𑄬𑄟𑄴𑄝𑄧𑄢𑄴", "𑄃𑄧𑄇𑄴𑄑𑄬𑄝𑄧𑄢𑄴", "𑄚𑄧𑄞𑄬𑄟𑄴𑄝𑄧𑄢𑄴", "𑄓𑄨𑄥𑄬𑄟𑄴𑄝𑄧𑄢𑄴"}, + {}, +} + +var localeTableCcpIN = [5][]string{ + {"𑄢𑄧𑄝𑄨", "𑄥𑄧𑄟𑄴", "𑄟𑄧𑄁𑄉𑄧𑄣𑄴", "𑄝𑄪𑄖𑄴", "𑄝𑄳𑄢𑄨𑄥𑄪𑄛𑄴", "𑄥𑄪𑄇𑄴𑄇𑄮𑄢𑄴", "𑄥𑄧𑄚𑄨"}, + {"𑄢𑄧𑄝𑄨𑄝𑄢𑄴", "𑄥𑄧𑄟𑄴𑄝𑄢𑄴", "𑄟𑄧𑄁𑄉𑄧𑄣𑄴𑄝𑄢𑄴", "𑄝𑄪𑄖𑄴𑄝𑄢𑄴", "𑄝𑄳𑄢𑄨𑄥𑄪𑄛𑄴𑄝𑄢𑄴", "𑄥𑄪𑄇𑄴𑄇𑄮𑄢𑄴𑄝𑄢𑄴", "𑄥𑄧𑄚𑄨𑄝𑄢𑄴"}, + {"𑄎𑄚𑄪", "𑄜𑄬𑄛𑄴", "𑄟𑄢𑄴𑄌𑄧", "𑄃𑄬𑄛𑄳𑄢𑄨𑄣𑄴", "𑄟𑄬", "𑄎𑄪𑄚𑄴", "𑄎𑄪𑄣𑄭", "𑄃𑄉𑄧𑄌𑄴𑄑𑄴", "𑄥𑄬𑄛𑄴𑄑𑄬𑄟𑄴𑄝𑄧𑄢𑄴", "𑄃𑄧𑄇𑄴𑄑𑄮𑄝𑄧𑄢𑄴", "𑄚𑄧𑄞𑄬𑄟𑄴𑄝𑄧𑄢𑄴", "𑄓𑄨𑄥𑄬𑄟𑄴𑄝𑄢𑄴"}, + {"𑄎𑄚𑄪𑄠𑄢𑄨", "𑄜𑄬𑄛𑄴𑄝𑄳𑄢𑄪𑄠𑄢𑄨", "𑄟𑄢𑄴𑄌𑄧", "𑄃𑄬𑄛𑄳𑄢𑄨𑄣𑄴", "𑄟𑄬", "𑄎𑄪𑄚𑄴", "𑄎𑄪𑄣𑄭", "𑄃𑄉𑄧𑄌𑄴𑄑𑄴", "𑄥𑄬𑄛𑄴𑄑𑄬𑄟𑄴𑄝𑄧𑄢𑄴", "𑄃𑄧𑄇𑄴𑄑𑄬𑄝𑄧𑄢𑄴", "𑄚𑄧𑄞𑄬𑄟𑄴𑄝𑄧𑄢𑄴", "𑄓𑄨𑄥𑄬𑄟𑄴𑄝𑄧𑄢𑄴"}, + {}, +} + +var localeTableCe = [5][]string{ + {"кӀи", "ор", "ши", "кха", "еа", "пӀе", "шуо"}, + {"кӀира", "оршот", "шинара", "кхаара", "еара", "пӀераска", "шуот"}, + {"янв", "фев", "мар", "апр", "май", "июн", "июл", "авг", "сен", "окт", "ноя", "дек"}, + {"январь", "февраль", "март", "апрель", "май", "июнь", "июль", "август", "сентябрь", "октябрь", "ноябрь", "декабрь"}, + {}, +} + +var localeTableCeRU = [5][]string{ + {"кӀи", "ор", "ши", "кха", "еа", "пӀе", "шуо"}, + {"кӀира", "оршот", "шинара", "кхаара", "еара", "пӀераска", "шуот"}, + {"янв", "фев", "мар", "апр", "май", "июн", "июл", "авг", "сен", "окт", "ноя", "дек"}, + {"январь", "февраль", "март", "апрель", "май", "июнь", "июль", "август", "сентябрь", "октябрь", "ноябрь", "декабрь"}, + {}, +} + +var localeTableCeb = [5][]string{ + {"Dom", "Lun", "Mar", "Miy", "Huw", "Biy", "Sab"}, + {"Domingo", "Lunes", "Martes", "Miyerkules", "Huwebes", "Biyernes", "Sabado"}, + {"Ene", "Peb", "Mar", "Abr", "May", "Hun", "Hul", "Ago", "Sep", "Okt", "Nob", "Dis"}, + {"Enero", "Pebrero", "Marso", "Abril", "Mayo", "Hunyo", "Hulyo", "Agosto", "Septiyembre", "Oktubre", "Nobyembre", "Disyembre"}, + {"a", "p"}, +} + +var localeTableCebPH = [5][]string{ + {"Dom", "Lun", "Mar", "Miy", "Huw", "Biy", "Sab"}, + {"Domingo", "Lunes", "Martes", "Miyerkules", "Huwebes", "Biyernes", "Sabado"}, + {"Ene", "Peb", "Mar", "Abr", "May", "Hun", "Hul", "Ago", "Sep", "Okt", "Nob", "Dis"}, + {"Enero", "Pebrero", "Marso", "Abril", "Mayo", "Hunyo", "Hulyo", "Agosto", "Septiyembre", "Oktubre", "Nobyembre", "Disyembre"}, + {"a", "p"}, +} + +var localeTableCgg = [5][]string{ + {"SAN", "ORK", "OKB", "OKS", "OKN", "OKT", "OMK"}, + {"Sande", "Orwokubanza", "Orwakabiri", "Orwakashatu", "Orwakana", "Orwakataano", "Orwamukaaga"}, + {"KBZ", "KBR", "KST", "KKN", "KTN", "KMK", "KMS", "KMN", "KMW", "KKM", "KNK", "KNB"}, + {"Okwokubanza", "Okwakabiri", "Okwakashatu", "Okwakana", "Okwakataana", "Okwamukaaga", "Okwamushanju", "Okwamunaana", "Okwamwenda", "Okwaikumi", "Okwaikumi na kumwe", "Okwaikumi na ibiri"}, + {}, +} + +var localeTableCggUG = [5][]string{ + {"SAN", "ORK", "OKB", "OKS", "OKN", "OKT", "OMK"}, + {"Sande", "Orwokubanza", "Orwakabiri", "Orwakashatu", "Orwakana", "Orwakataano", "Orwamukaaga"}, + {"KBZ", "KBR", "KST", "KKN", "KTN", "KMK", "KMS", "KMN", "KMW", "KKM", "KNK", "KNB"}, + {"Okwokubanza", "Okwakabiri", "Okwakashatu", "Okwakana", "Okwakataana", "Okwamukaaga", "Okwamushanju", "Okwamunaana", "Okwamwenda", "Okwaikumi", "Okwaikumi na kumwe", "Okwaikumi na ibiri"}, + {}, +} + +var localeTableChr = [5][]string{ + {"ᏆᏍᎬ", "ᏉᏅᎯ", "ᏔᎵᏁ", "ᏦᎢᏁ", "ᏅᎩᏁ", "ᏧᎾᎩ", "ᏈᏕᎾ"}, + {"ᎤᎾᏙᏓᏆᏍᎬ", "ᎤᎾᏙᏓᏉᏅᎯ", "ᏔᎵᏁᎢᎦ", "ᏦᎢᏁᎢᎦ", "ᏅᎩᏁᎢᎦ", "ᏧᎾᎩᎶᏍᏗ", "ᎤᎾᏙᏓᏈᏕᎾ"}, + {"ᎤᏃ", "ᎧᎦ", "ᎠᏅ", "ᎧᏬ", "ᎠᏂ", "ᏕᎭ", "ᎫᏰ", "ᎦᎶ", "ᏚᎵ", "ᏚᏂ", "ᏅᏓ", "ᎥᏍ"}, + {"ᎤᏃᎸᏔᏅ", "ᎧᎦᎵ", "ᎠᏅᏱ", "ᎧᏬᏂ", "ᎠᏂᏍᎬᏘ", "ᏕᎭᎷᏱ", "ᎫᏰᏉᏂ", "ᎦᎶᏂ", "ᏚᎵᏍᏗ", "ᏚᏂᏅᏗ", "ᏅᏓᏕᏆ", "ᎥᏍᎩᏱ"}, + {"ᏌᎾᎴ", "ᏒᎯᏱᎢ"}, +} + +var localeTableChrUS = [5][]string{ + {"ᏆᏍᎬ", "ᏉᏅᎯ", "ᏔᎵᏁ", "ᏦᎢᏁ", "ᏅᎩᏁ", "ᏧᎾᎩ", "ᏈᏕᎾ"}, + {"ᎤᎾᏙᏓᏆᏍᎬ", "ᎤᎾᏙᏓᏉᏅᎯ", "ᏔᎵᏁᎢᎦ", "ᏦᎢᏁᎢᎦ", "ᏅᎩᏁᎢᎦ", "ᏧᎾᎩᎶᏍᏗ", "ᎤᎾᏙᏓᏈᏕᎾ"}, + {"ᎤᏃ", "ᎧᎦ", "ᎠᏅ", "ᎧᏬ", "ᎠᏂ", "ᏕᎭ", "ᎫᏰ", "ᎦᎶ", "ᏚᎵ", "ᏚᏂ", "ᏅᏓ", "ᎥᏍ"}, + {"ᎤᏃᎸᏔᏅ", "ᎧᎦᎵ", "ᎠᏅᏱ", "ᎧᏬᏂ", "ᎠᏂᏍᎬᏘ", "ᏕᎭᎷᏱ", "ᎫᏰᏉᏂ", "ᎦᎶᏂ", "ᏚᎵᏍᏗ", "ᏚᏂᏅᏗ", "ᏅᏓᏕᏆ", "ᎥᏍᎩᏱ"}, + {"ᏌᎾᎴ", "ᏒᎯᏱᎢ"}, +} + +var localeTableCic = [5][]string{ + {}, + {"Nittak Holloʼ", "Mantiʼ", "Chostiʼ", "Winstiʼ", "Soistiʼ", "Nannalhchifaʼ Nittak", "Nittak Holloʼ Nakfish"}, + {}, + {"Hashiʼ Ammoʼnaʼ", "Hashiʼ Atokloʼ", "Hashiʼ Atochchíʼnaʼ", "Iiplal", "Mih", "Choon", "Choola", "Akaas", "Siptimpaʼ", "Aaktopaʼ", "Nofimpaʼ", "Tiisimpaʼ"}, + {}, +} + +var localeTableCicUS = [5][]string{ + {}, + {"Nittak Holloʼ", "Mantiʼ", "Chostiʼ", "Winstiʼ", "Soistiʼ", "Nannalhchifaʼ Nittak", "Nittak Holloʼ Nakfish"}, + {}, + {"Hashiʼ Ammoʼnaʼ", "Hashiʼ Atokloʼ", "Hashiʼ Atochchíʼnaʼ", "Iiplal", "Mih", "Choon", "Choola", "Akaas", "Siptimpaʼ", "Aaktopaʼ", "Nofimpaʼ", "Tiisimpaʼ"}, + {}, +} + +var localeTableCkb = [5][]string{ + {}, + {"یەکشەممە", "دووشەممە", "سێشەممە", "چوارشەممە", "پێنجشەممە", "ھەینی", "شەممە"}, + {}, + {"کانوونی دووەم", "شوبات", "ئازار", "نیسان", "ئایار", "حوزەیران", "تەمووز", "ئاب", "ئەیلوول", "تشرینی یەکەم", "تشرینی دووەم", "کانونی یەکەم"}, + {"ب.ن", "د.ن"}, +} + +var localeTableCkbIQ = [5][]string{ + {}, + {"یەکشەممە", "دووشەممە", "سێشەممە", "چوارشەممە", "پێنجشەممە", "ھەینی", "شەممە"}, + {}, + {"کانوونی دووەم", "شوبات", "ئازار", "نیسان", "ئایار", "حوزەیران", "تەمووز", "ئاب", "ئەیلوول", "تشرینی یەکەم", "تشرینی دووەم", "کانونی یەکەم"}, + {"ب.ن", "د.ن"}, +} + +var localeTableCkbIR = [5][]string{ + {}, + {"یەکشەممە", "دووشەممە", "سێشەممە", "چوارشەممە", "پێنجشەممە", "ھەینی", "شەممە"}, + {}, + {"کانوونی دووەم", "شوبات", "ئازار", "نیسان", "ئایار", "حوزەیران", "تەمووز", "ئاب", "ئەیلوول", "تشرینی یەکەم", "تشرینی دووەم", "کانونی یەکەم"}, + {"ب.ن", "د.ن"}, +} + +var localeTableCo = [5][]string{ + {"dum.", "lun.", "mar.", "mer.", "ghj.", "ven.", "sab."}, + {"dumenica", "luni", "marti", "mercuri", "ghjovi", "venneri", "sabbatu"}, + {"ghj.", "fer.", "mar.", "apr.", "mag.", "ghju.", "lug.", "aos.", "sit.", "ott.", "nuv.", "dic."}, + {"di ghjennaghju", "di ferraghju", "di marzu", "d’aprile", "di maghju", "di ghjugnu", "di lugliu", "d’aostu", "di sittembre", "d’ottobre", "di nuvembre", "di dicembre"}, + {}, +} + +var localeTableCoFR = [5][]string{ + {"dum.", "lun.", "mar.", "mer.", "ghj.", "ven.", "sab."}, + {"dumenica", "luni", "marti", "mercuri", "ghjovi", "venneri", "sabbatu"}, + {"ghj.", "fer.", "mar.", "apr.", "mag.", "ghju.", "lug.", "aos.", "sit.", "ott.", "nuv.", "dic."}, + {"di ghjennaghju", "di ferraghju", "di marzu", "d’aprile", "di maghju", "di ghjugnu", "di lugliu", "d’aostu", "di sittembre", "d’ottobre", "di nuvembre", "di dicembre"}, + {}, +} + +var localeTableCs = [5][]string{ + {"ne", "po", "út", "st", "čt", "pá", "so"}, + {"neděle", "pondělí", "úterý", "středa", "čtvrtek", "pátek", "sobota"}, + {"led", "úno", "bře", "dub", "kvě", "čvn", "čvc", "srp", "zář", "říj", "lis", "pro"}, + {"ledna", "února", "března", "dubna", "května", "června", "července", "srpna", "září", "října", "listopadu", "prosince"}, + {"dop.", "odp."}, +} + +var localeTableCsCZ = [5][]string{ + {"ne", "po", "út", "st", "čt", "pá", "so"}, + {"neděle", "pondělí", "úterý", "středa", "čtvrtek", "pátek", "sobota"}, + {"led", "úno", "bře", "dub", "kvě", "čvn", "čvc", "srp", "zář", "říj", "lis", "pro"}, + {"ledna", "února", "března", "dubna", "května", "června", "července", "srpna", "září", "října", "listopadu", "prosince"}, + {"dop.", "odp."}, +} + +var localeTableCsw = [5][]string{ + {}, + {"ᐊᔭᒥᐦᐁᐃ ᑭᓯᑲᐤ", "ᐯᔭᐠᑭᓯᑲᐤ", "ᓂᓱᑭᓯᑲᐤ", "ᐊᐱᐦᑕᐘᐣ", "ᓀᐓᑭᓯᑲᐤ", "ᓂᔭᓇᓄᑭᓯᑲᐤ", "ᒪᑎᓄᐏᑭᓯᑲᐤ"}, + {}, + {"ᑭᓴᐱᓯᒼ", "ᒥᑭᓯᐏᐱᓯᒼ", "ᓂᐢᑭᐱᓯᒼ", "ᐊᓂᑭᐱᓯᒼ", "ᓴᑭᐸᑲᐏᐱᓯᒼ", "ᐸᐢᑲᐍᐦᐅᐱᓯᒼ", "ᐸᐢᑯᐏᐱᓯᒼ", "ᐅᐸᐦᐅᐏᐱᓯᒼ", "ᑕᐦᑿᑭᐱᓯᒼ", "ᐱᒪᐦᐊᒧᐏᐱᓯᒼ", "ᐊᑿᑎᓄᐏᐱᓯᒼ", "ᐸᐘᐢᒐᑲᓂᓹᐱᓯᒼ"}, + {}, +} + +var localeTableCswCA = [5][]string{ + {}, + {"ᐊᔭᒥᐦᐁᐃ ᑭᓯᑲᐤ", "ᐯᔭᐠᑭᓯᑲᐤ", "ᓂᓱᑭᓯᑲᐤ", "ᐊᐱᐦᑕᐘᐣ", "ᓀᐓᑭᓯᑲᐤ", "ᓂᔭᓇᓄᑭᓯᑲᐤ", "ᒪᑎᓄᐏᑭᓯᑲᐤ"}, + {}, + {"ᑭᓴᐱᓯᒼ", "ᒥᑭᓯᐏᐱᓯᒼ", "ᓂᐢᑭᐱᓯᒼ", "ᐊᓂᑭᐱᓯᒼ", "ᓴᑭᐸᑲᐏᐱᓯᒼ", "ᐸᐢᑲᐍᐦᐅᐱᓯᒼ", "ᐸᐢᑯᐏᐱᓯᒼ", "ᐅᐸᐦᐅᐏᐱᓯᒼ", "ᑕᐦᑿᑭᐱᓯᒼ", "ᐱᒪᐦᐊᒧᐏᐱᓯᒼ", "ᐊᑿᑎᓄᐏᐱᓯᒼ", "ᐸᐘᐢᒐᑲᓂᓹᐱᓯᒼ"}, + {}, +} + +var localeTableCu = [5][]string{ + {"ндⷧ҇ѧ", "пнⷣе", "втоⷬ҇", "срⷣе", "чеⷦ҇", "пѧⷦ҇", "сꙋⷠ҇"}, + {"недѣ́лѧ", "понедѣ́льникъ", "вто́рникъ", "среда̀", "четверто́къ", "пѧто́къ", "сꙋббѡ́та"}, + {"і҆аⷩ҇", "феⷡ҇", "маⷬ҇", "а҆пⷬ҇", "маꙵ", "і҆ꙋⷩ҇", "і҆ꙋⷧ҇", "а҆́ѵⷢ҇", "сеⷫ҇", "ѻ҆кⷮ", "ноеⷨ", "деⷦ҇"}, + {"і҆аннꙋа́рїа", "феврꙋа́рїа", "ма́рта", "а҆прі́ллїа", "ма́їа", "і҆ꙋ́нїа", "і҆ꙋ́лїа", "а҆́ѵгꙋста", "септе́мврїа", "ѻ҆ктѡ́врїа", "ное́мврїа", "деке́мврїа"}, + {"ДП", "ПП"}, +} + +var localeTableCuRU = [5][]string{ + {"ндⷧ҇ѧ", "пнⷣе", "втоⷬ҇", "срⷣе", "чеⷦ҇", "пѧⷦ҇", "сꙋⷠ҇"}, + {"недѣ́лѧ", "понедѣ́льникъ", "вто́рникъ", "среда̀", "четверто́къ", "пѧто́къ", "сꙋббѡ́та"}, + {"і҆аⷩ҇", "феⷡ҇", "маⷬ҇", "а҆пⷬ҇", "маꙵ", "і҆ꙋⷩ҇", "і҆ꙋⷧ҇", "а҆́ѵⷢ҇", "сеⷫ҇", "ѻ҆кⷮ", "ноеⷨ", "деⷦ҇"}, + {"і҆аннꙋа́рїа", "феврꙋа́рїа", "ма́рта", "а҆прі́ллїа", "ма́їа", "і҆ꙋ́нїа", "і҆ꙋ́лїа", "а҆́ѵгꙋста", "септе́мврїа", "ѻ҆ктѡ́врїа", "ное́мврїа", "деке́мврїа"}, + {"ДП", "ПП"}, +} + +var localeTableCv = [5][]string{ + {"выр.", "тун.", "ытл.", "юн.", "кӗҫ.", "эр.", "шӑм."}, + {"вырсарникун", "тунтикун", "ытларикун", "юнкун", "кӗҫнерникун", "эрнекун", "шӑматкун"}, + {"кӑр.", "нар.", "пуш", "ака", "ҫу", "ҫӗр.", "утӑ", "ҫур.", "авӑн", "юпа", "чӳк", "раш."}, + {"кӑрлач", "нарӑс", "пуш", "ака", "ҫу", "ҫӗртме", "утӑ", "ҫурла", "авӑн", "юпа", "чӳк", "раштав"}, + {}, +} + +var localeTableCvRU = [5][]string{ + {"выр.", "тун.", "ытл.", "юн.", "кӗҫ.", "эр.", "шӑм."}, + {"вырсарникун", "тунтикун", "ытларикун", "юнкун", "кӗҫнерникун", "эрнекун", "шӑматкун"}, + {"кӑр.", "нар.", "пуш", "ака", "ҫу", "ҫӗр.", "утӑ", "ҫур.", "авӑн", "юпа", "чӳк", "раш."}, + {"кӑрлач", "нарӑс", "пуш", "ака", "ҫу", "ҫӗртме", "утӑ", "ҫурла", "авӑн", "юпа", "чӳк", "раштав"}, + {}, +} + +var localeTableCy = [5][]string{ + {"Sul", "Llun", "Maw", "Mer", "Iau", "Gwen", "Sad"}, + {"Dydd Sul", "Dydd Llun", "Dydd Mawrth", "Dydd Mercher", "Dydd Iau", "Dydd Gwener", "Dydd Sadwrn"}, + {"Ion", "Chwef", "Maw", "Ebr", "Mai", "Meh", "Gorff", "Awst", "Medi", "Hyd", "Tach", "Rhag"}, + {"Ionawr", "Chwefror", "Mawrth", "Ebrill", "Mai", "Mehefin", "Gorffennaf", "Awst", "Medi", "Hydref", "Tachwedd", "Rhagfyr"}, + {"b", "h"}, +} + +var localeTableCyGB = [5][]string{ + {"Sul", "Llun", "Maw", "Mer", "Iau", "Gwen", "Sad"}, + {"Dydd Sul", "Dydd Llun", "Dydd Mawrth", "Dydd Mercher", "Dydd Iau", "Dydd Gwener", "Dydd Sadwrn"}, + {"Ion", "Chwef", "Maw", "Ebr", "Mai", "Meh", "Gorff", "Awst", "Medi", "Hyd", "Tach", "Rhag"}, + {"Ionawr", "Chwefror", "Mawrth", "Ebrill", "Mai", "Mehefin", "Gorffennaf", "Awst", "Medi", "Hydref", "Tachwedd", "Rhagfyr"}, + {"b", "h"}, +} + +var localeTableDa = [5][]string{ + {"søn.", "man.", "tirs.", "ons.", "tors.", "fre.", "lør."}, + {"søndag", "mandag", "tirsdag", "onsdag", "torsdag", "fredag", "lørdag"}, + {"jan.", "feb.", "mar.", "apr.", "maj", "jun.", "jul.", "aug.", "sep.", "okt.", "nov.", "dec."}, + {"januar", "februar", "marts", "april", "maj", "juni", "juli", "august", "september", "oktober", "november", "december"}, + {"a", "p"}, +} + +var localeTableDaDK = [5][]string{ + {"søn.", "man.", "tirs.", "ons.", "tors.", "fre.", "lør."}, + {"søndag", "mandag", "tirsdag", "onsdag", "torsdag", "fredag", "lørdag"}, + {"jan.", "feb.", "mar.", "apr.", "maj", "jun.", "jul.", "aug.", "sep.", "okt.", "nov.", "dec."}, + {"januar", "februar", "marts", "april", "maj", "juni", "juli", "august", "september", "oktober", "november", "december"}, + {"a", "p"}, +} + +var localeTableDaGL = [5][]string{ + {"søn.", "man.", "tirs.", "ons.", "tors.", "fre.", "lør."}, + {"søndag", "mandag", "tirsdag", "onsdag", "torsdag", "fredag", "lørdag"}, + {"jan.", "feb.", "mar.", "apr.", "maj", "jun.", "jul.", "aug.", "sep.", "okt.", "nov.", "dec."}, + {"januar", "februar", "marts", "april", "maj", "juni", "juli", "august", "september", "oktober", "november", "december"}, + {"a", "p"}, +} + +var localeTableDav = [5][]string{ + {"Jum", "Jim", "Kaw", "Kad", "Kan", "Kas", "Ngu"}, + {"Ituku ja jumwa", "Kuramuka jimweri", "Kuramuka kawi", "Kuramuka kadadu", "Kuramuka kana", "Kuramuka kasanu", "Kifula nguwo"}, + {"Imb", "Kaw", "Kad", "Kan", "Kas", "Kar", "Mfu", "Wun", "Ike", "Iku", "Imw", "Iwi"}, + {"Mori ghwa imbiri", "Mori ghwa kawi", "Mori ghwa kadadu", "Mori ghwa kana", "Mori ghwa kasanu", "Mori ghwa karandadu", "Mori ghwa mfungade", "Mori ghwa wunyanya", "Mori ghwa ikenda", "Mori ghwa ikumi", "Mori ghwa ikumi na imweri", "Mori ghwa ikumi na iwi"}, + {"LumalwaK", "lumalwap"}, +} + +var localeTableDavKE = [5][]string{ + {"Jum", "Jim", "Kaw", "Kad", "Kan", "Kas", "Ngu"}, + {"Ituku ja jumwa", "Kuramuka jimweri", "Kuramuka kawi", "Kuramuka kadadu", "Kuramuka kana", "Kuramuka kasanu", "Kifula nguwo"}, + {"Imb", "Kaw", "Kad", "Kan", "Kas", "Kar", "Mfu", "Wun", "Ike", "Iku", "Imw", "Iwi"}, + {"Mori ghwa imbiri", "Mori ghwa kawi", "Mori ghwa kadadu", "Mori ghwa kana", "Mori ghwa kasanu", "Mori ghwa karandadu", "Mori ghwa mfungade", "Mori ghwa wunyanya", "Mori ghwa ikenda", "Mori ghwa ikumi", "Mori ghwa ikumi na imweri", "Mori ghwa ikumi na iwi"}, + {"LumalwaK", "lumalwap"}, +} + +var localeTableDe = [5][]string{ + {"So.", "Mo.", "Di.", "Mi.", "Do.", "Fr.", "Sa."}, + {"Sonntag", "Montag", "Dienstag", "Mittwoch", "Donnerstag", "Freitag", "Samstag"}, + {"Jan.", "Feb.", "März", "Apr.", "Mai", "Juni", "Juli", "Aug.", "Sept.", "Okt.", "Nov.", "Dez."}, + {"Januar", "Februar", "März", "April", "Mai", "Juni", "Juli", "August", "September", "Oktober", "November", "Dezember"}, + {}, +} + +var localeTableDeAT = [5][]string{ + {"So.", "Mo.", "Di.", "Mi.", "Do.", "Fr.", "Sa."}, + {"Sonntag", "Montag", "Dienstag", "Mittwoch", "Donnerstag", "Freitag", "Samstag"}, + {"Jän.", "Feb.", "März", "Apr.", "Mai", "Juni", "Juli", "Aug.", "Sep.", "Okt.", "Nov.", "Dez."}, + {"Jänner", "Februar", "März", "April", "Mai", "Juni", "Juli", "August", "September", "Oktober", "November", "Dezember"}, + {}, +} + +var localeTableDeBE = [5][]string{ + {"So.", "Mo.", "Di.", "Mi.", "Do.", "Fr.", "Sa."}, + {"Sonntag", "Montag", "Dienstag", "Mittwoch", "Donnerstag", "Freitag", "Samstag"}, + {"Jan.", "Feb.", "März", "Apr.", "Mai", "Juni", "Juli", "Aug.", "Sept.", "Okt.", "Nov.", "Dez."}, + {"Januar", "Februar", "März", "April", "Mai", "Juni", "Juli", "August", "September", "Oktober", "November", "Dezember"}, + {}, +} + +var localeTableDeCH = [5][]string{ + {"So.", "Mo.", "Di.", "Mi.", "Do.", "Fr.", "Sa."}, + {"Sonntag", "Montag", "Dienstag", "Mittwoch", "Donnerstag", "Freitag", "Samstag"}, + {"Jan.", "Feb.", "März", "Apr.", "Mai", "Juni", "Juli", "Aug.", "Sept.", "Okt.", "Nov.", "Dez."}, + {"Januar", "Februar", "März", "April", "Mai", "Juni", "Juli", "August", "September", "Oktober", "November", "Dezember"}, + {}, +} + +var localeTableDeDE = [5][]string{ + {"So.", "Mo.", "Di.", "Mi.", "Do.", "Fr.", "Sa."}, + {"Sonntag", "Montag", "Dienstag", "Mittwoch", "Donnerstag", "Freitag", "Samstag"}, + {"Jan.", "Feb.", "März", "Apr.", "Mai", "Juni", "Juli", "Aug.", "Sept.", "Okt.", "Nov.", "Dez."}, + {"Januar", "Februar", "März", "April", "Mai", "Juni", "Juli", "August", "September", "Oktober", "November", "Dezember"}, + {}, +} + +var localeTableDeIT = [5][]string{ + {"So.", "Mo.", "Di.", "Mi.", "Do.", "Fr.", "Sa."}, + {"Sonntag", "Montag", "Dienstag", "Mittwoch", "Donnerstag", "Freitag", "Samstag"}, + {"Jän.", "Feb.", "März", "Apr.", "Mai", "Juni", "Juli", "Aug.", "Sep.", "Okt.", "Nov.", "Dez."}, + {"Jänner", "Februar", "März", "April", "Mai", "Juni", "Juli", "August", "September", "Oktober", "November", "Dezember"}, + {}, +} + +var localeTableDeLI = [5][]string{ + {"So.", "Mo.", "Di.", "Mi.", "Do.", "Fr.", "Sa."}, + {"Sonntag", "Montag", "Dienstag", "Mittwoch", "Donnerstag", "Freitag", "Samstag"}, + {"Jan.", "Feb.", "März", "Apr.", "Mai", "Juni", "Juli", "Aug.", "Sept.", "Okt.", "Nov.", "Dez."}, + {"Januar", "Februar", "März", "April", "Mai", "Juni", "Juli", "August", "September", "Oktober", "November", "Dezember"}, + {}, +} + +var localeTableDeLU = [5][]string{ + {"So.", "Mo.", "Di.", "Mi.", "Do.", "Fr.", "Sa."}, + {"Sonntag", "Montag", "Dienstag", "Mittwoch", "Donnerstag", "Freitag", "Samstag"}, + {"Jan.", "Feb.", "März", "Apr.", "Mai", "Juni", "Juli", "Aug.", "Sept.", "Okt.", "Nov.", "Dez."}, + {"Januar", "Februar", "März", "April", "Mai", "Juni", "Juli", "August", "September", "Oktober", "November", "Dezember"}, + {"vorm.", "nachm."}, +} + +var localeTableDje = [5][]string{ + {"Alh", "Ati", "Ata", "Ala", "Alm", "Alz", "Asi"}, + {"Alhadi", "Atinni", "Atalaata", "Alarba", "Alhamisi", "Alzuma", "Asibti"}, + {"Žan", "Fee", "Mar", "Awi", "Me", "Žuw", "Žuy", "Ut", "Sek", "Okt", "Noo", "Dee"}, + {"Žanwiye", "Feewiriye", "Marsi", "Awiril", "Me", "Žuweŋ", "Žuyye", "Ut", "Sektanbur", "Oktoobur", "Noowanbur", "Deesanbur"}, + {"Subbaahi", "Zaarikayb"}, +} + +var localeTableDjeNE = [5][]string{ + {"Alh", "Ati", "Ata", "Ala", "Alm", "Alz", "Asi"}, + {"Alhadi", "Atinni", "Atalaata", "Alarba", "Alhamisi", "Alzuma", "Asibti"}, + {"Žan", "Fee", "Mar", "Awi", "Me", "Žuw", "Žuy", "Ut", "Sek", "Okt", "Noo", "Dee"}, + {"Žanwiye", "Feewiriye", "Marsi", "Awiril", "Me", "Žuweŋ", "Žuyye", "Ut", "Sektanbur", "Oktoobur", "Noowanbur", "Deesanbur"}, + {"Subbaahi", "Zaarikayb"}, +} + +var localeTableDoi = [5][]string{ + {"ऐत", "सोम", "मंगल", "बुध", "बीर", "शुक्र", "शनि"}, + {"ऐतबार", "सोमबार", "मंगलबार", "बुधबार", "बीरबार", "शुक्रबार", "शनिबार"}, + {"जन.", "फर.", "मार्च", "अप्रैल", "मेई", "जून", "जुलाई", "अग.", "सित.", "अक्तू.", "नव.", "दिस."}, + {"जनवरी", "फरवरी", "मार्च", "अप्रैल", "मेई", "जून", "जुलाई", "अगस्त", "सितंबर", "अक्तूबर", "नवंबर", "दिसंबर"}, + {"सवेर", "स’ञ"}, +} + +var localeTableDoiIN = [5][]string{ + {"ऐत", "सोम", "मंगल", "बुध", "बीर", "शुक्र", "शनि"}, + {"ऐतबार", "सोमबार", "मंगलबार", "बुधबार", "बीरबार", "शुक्रबार", "शनिबार"}, + {"जन.", "फर.", "मार्च", "अप्रैल", "मेई", "जून", "जुलाई", "अग.", "सित.", "अक्तू.", "नव.", "दिस."}, + {"जनवरी", "फरवरी", "मार्च", "अप्रैल", "मेई", "जून", "जुलाई", "अगस्त", "सितंबर", "अक्तूबर", "नवंबर", "दिसंबर"}, + {"सवेर", "स’ञ"}, +} + +var localeTableDsb = [5][]string{ + {"nje", "pón", "wał", "srj", "stw", "pět", "sob"}, + {"njeźela", "pónjeźele", "wałtora", "srjoda", "stwórtk", "pětk", "sobota"}, + {"jan.", "feb.", "měr.", "apr.", "maj.", "jun.", "jul.", "awg.", "sep.", "okt.", "now.", "dec."}, + {"januara", "februara", "měrca", "apryla", "maja", "junija", "julija", "awgusta", "septembra", "oktobra", "nowembra", "decembra"}, + {"dop.", "wótp."}, +} + +var localeTableDsbDE = [5][]string{ + {"nje", "pón", "wał", "srj", "stw", "pět", "sob"}, + {"njeźela", "pónjeźele", "wałtora", "srjoda", "stwórtk", "pětk", "sobota"}, + {"jan.", "feb.", "měr.", "apr.", "maj.", "jun.", "jul.", "awg.", "sep.", "okt.", "now.", "dec."}, + {"januara", "februara", "měrca", "apryla", "maja", "junija", "julija", "awgusta", "septembra", "oktobra", "nowembra", "decembra"}, + {"dop.", "wótp."}, +} + +var localeTableDua = [5][]string{ + {"ét", "mɔ́s", "kwa", "muk", "ŋgi", "ɗón", "esa"}, + {"éti", "mɔ́sú", "kwasú", "mukɔ́sú", "ŋgisú", "ɗónɛsú", "esaɓasú"}, + {"di", "ŋgɔn", "sɔŋ", "diɓ", "emi", "esɔ", "mad", "diŋ", "nyɛt", "may", "tin", "elá"}, + {"dimɔ́di", "ŋgɔndɛ", "sɔŋɛ", "diɓáɓá", "emiasele", "esɔpɛsɔpɛ", "madiɓɛ́díɓɛ́", "diŋgindi", "nyɛtɛki", "mayésɛ́", "tiníní", "eláŋgɛ́"}, + {"idiɓa", "ebyámu"}, +} + +var localeTableDuaCM = [5][]string{ + {"ét", "mɔ́s", "kwa", "muk", "ŋgi", "ɗón", "esa"}, + {"éti", "mɔ́sú", "kwasú", "mukɔ́sú", "ŋgisú", "ɗónɛsú", "esaɓasú"}, + {"di", "ŋgɔn", "sɔŋ", "diɓ", "emi", "esɔ", "mad", "diŋ", "nyɛt", "may", "tin", "elá"}, + {"dimɔ́di", "ŋgɔndɛ", "sɔŋɛ", "diɓáɓá", "emiasele", "esɔpɛsɔpɛ", "madiɓɛ́díɓɛ́", "diŋgindi", "nyɛtɛki", "mayésɛ́", "tiníní", "eláŋgɛ́"}, + {"idiɓa", "ebyámu"}, +} + +var localeTableDyo = [5][]string{ + {"Dim", "Ten", "Tal", "Ala", "Ara", "Arj", "Sib"}, + {"Dimas", "Teneŋ", "Talata", "Alarbay", "Aramisay", "Arjuma", "Sibiti"}, + {"Sa", "Fe", "Ma", "Ab", "Me", "Su", "Sú", "Ut", "Se", "Ok", "No", "De"}, + {"Sanvie", "Fébirie", "Mars", "Aburil", "Mee", "Sueŋ", "Súuyee", "Ut", "Settembar", "Oktobar", "Novembar", "Disambar"}, + {}, +} + +var localeTableDyoSN = [5][]string{ + {"Dim", "Ten", "Tal", "Ala", "Ara", "Arj", "Sib"}, + {"Dimas", "Teneŋ", "Talata", "Alarbay", "Aramisay", "Arjuma", "Sibiti"}, + {"Sa", "Fe", "Ma", "Ab", "Me", "Su", "Sú", "Ut", "Se", "Ok", "No", "De"}, + {"Sanvie", "Fébirie", "Mars", "Aburil", "Mee", "Sueŋ", "Súuyee", "Ut", "Settembar", "Oktobar", "Novembar", "Disambar"}, + {}, +} + +var localeTableDz = [5][]string{ + {"ཟླ་", "མིར་", "ལྷག་", "ཕུར་", "སངས་", "སྤེན་", "ཉི་"}, + {"གཟའ་ཟླ་བ་", "གཟའ་མིག་དམར་", "གཟའ་ལྷག་པ་", "གཟའ་ཕུར་བུ་", "གཟའ་པ་སངས་", "གཟའ་སྤེན་པ་", "གཟའ་ཉི་མ་"}, + {"༡", "༢", "༣", "༤", "༥", "༦", "༧", "༨", "༩", "༡༠", "༡༡", "12"}, + {"ཟླ་དངཔ་", "ཟླ་གཉིས་པ་", "ཟླ་གསུམ་པ་", "ཟླ་བཞི་པ་", "ཟླ་ལྔ་པ་", "ཟླ་དྲུག་པ", "ཟླ་བདུན་པ་", "ཟླ་བརྒྱད་པ་", "ཟླ་དགུ་པ་", "ཟླ་བཅུ་པ་", "ཟླ་བཅུ་གཅིག་པ་", "ཟླ་བཅུ་གཉིས་པ་"}, + {"སྔ་ཆ་", "ཕྱི་ཆ་"}, +} + +var localeTableDzBT = [5][]string{ + {"ཟླ་", "མིར་", "ལྷག་", "ཕུར་", "སངས་", "སྤེན་", "ཉི་"}, + {"གཟའ་ཟླ་བ་", "གཟའ་མིག་དམར་", "གཟའ་ལྷག་པ་", "གཟའ་ཕུར་བུ་", "གཟའ་པ་སངས་", "གཟའ་སྤེན་པ་", "གཟའ་ཉི་མ་"}, + {"༡", "༢", "༣", "༤", "༥", "༦", "༧", "༨", "༩", "༡༠", "༡༡", "12"}, + {"ཟླ་དངཔ་", "ཟླ་གཉིས་པ་", "ཟླ་གསུམ་པ་", "ཟླ་བཞི་པ་", "ཟླ་ལྔ་པ་", "ཟླ་དྲུག་པ", "ཟླ་བདུན་པ་", "ཟླ་བརྒྱད་པ་", "ཟླ་དགུ་པ་", "ཟླ་བཅུ་པ་", "ཟླ་བཅུ་གཅིག་པ་", "ཟླ་བཅུ་གཉིས་པ་"}, + {"སྔ་ཆ་", "ཕྱི་ཆ་"}, +} + +var localeTableEbu = [5][]string{ + {"Kma", "Tat", "Ine", "Tan", "Arm", "Maa", "NMM"}, + {"Kiumia", "Njumatatu", "Njumaine", "Njumatano", "Aramithi", "Njumaa", "NJumamothii"}, + {"Mbe", "Kai", "Kat", "Kan", "Gat", "Gan", "Mug", "Knn", "Ken", "Iku", "Imw", "Igi"}, + {"Mweri wa mbere", "Mweri wa kaĩri", "Mweri wa kathatũ", "Mweri wa kana", "Mweri wa gatano", "Mweri wa gatantatũ", "Mweri wa mũgwanja", "Mweri wa kanana", "Mweri wa kenda", "Mweri wa ikũmi", "Mweri wa ikũmi na ũmwe", "Mweri wa ikũmi na Kaĩrĩ"}, + {"KI", "UT"}, +} + +var localeTableEbuKE = [5][]string{ + {"Kma", "Tat", "Ine", "Tan", "Arm", "Maa", "NMM"}, + {"Kiumia", "Njumatatu", "Njumaine", "Njumatano", "Aramithi", "Njumaa", "NJumamothii"}, + {"Mbe", "Kai", "Kat", "Kan", "Gat", "Gan", "Mug", "Knn", "Ken", "Iku", "Imw", "Igi"}, + {"Mweri wa mbere", "Mweri wa kaĩri", "Mweri wa kathatũ", "Mweri wa kana", "Mweri wa gatano", "Mweri wa gatantatũ", "Mweri wa mũgwanja", "Mweri wa kanana", "Mweri wa kenda", "Mweri wa ikũmi", "Mweri wa ikũmi na ũmwe", "Mweri wa ikũmi na Kaĩrĩ"}, + {"KI", "UT"}, +} + +var localeTableEe = [5][]string{ + {"kɔs", "dzo", "bla", "kuɖ", "yaw", "fiɖ", "mem"}, + {"kɔsiɖa", "dzoɖa", "blaɖa", "kuɖa", "yawoɖa", "fiɖa", "memleɖa"}, + {"dzv", "dzd", "ted", "afɔ", "dam", "mas", "sia", "dea", "any", "kel", "ade", "dzm"}, + {"dzove", "dzodze", "tedoxe", "afɔfĩe", "dama", "masa", "siamlɔm", "deasiamime", "anyɔnyɔ", "kele", "adeɛmekpɔxe", "dzome"}, + {"ŋdi", "ɣetrɔ"}, +} + +var localeTableEeGH = [5][]string{ + {"kɔs", "dzo", "bla", "kuɖ", "yaw", "fiɖ", "mem"}, + {"kɔsiɖa", "dzoɖa", "blaɖa", "kuɖa", "yawoɖa", "fiɖa", "memleɖa"}, + {"dzv", "dzd", "ted", "afɔ", "dam", "mas", "sia", "dea", "any", "kel", "ade", "dzm"}, + {"dzove", "dzodze", "tedoxe", "afɔfĩe", "dama", "masa", "siamlɔm", "deasiamime", "anyɔnyɔ", "kele", "adeɛmekpɔxe", "dzome"}, + {"ŋdi", "ɣetrɔ"}, +} + +var localeTableEeTG = [5][]string{ + {"kɔs", "dzo", "bla", "kuɖ", "yaw", "fiɖ", "mem"}, + {"kɔsiɖa", "dzoɖa", "blaɖa", "kuɖa", "yawoɖa", "fiɖa", "memleɖa"}, + {"dzv", "dzd", "ted", "afɔ", "dam", "mas", "sia", "dea", "any", "kel", "ade", "dzm"}, + {"dzove", "dzodze", "tedoxe", "afɔfĩe", "dama", "masa", "siamlɔm", "deasiamime", "anyɔnyɔ", "kele", "adeɛmekpɔxe", "dzome"}, + {"ŋdi", "ɣetrɔ"}, +} + +var localeTableEl = [5][]string{ + {"Κυρ", "Δευ", "Τρί", "Τετ", "Πέμ", "Παρ", "Σάβ"}, + {"Κυριακή", "Δευτέρα", "Τρίτη", "Τετάρτη", "Πέμπτη", "Παρασκευή", "Σάββατο"}, + {"Ιαν", "Φεβ", "Μαρ", "Απρ", "Μαΐ", "Ιουν", "Ιουλ", "Αυγ", "Σεπ", "Οκτ", "Νοε", "Δεκ"}, + {"Ιανουαρίου", "Φεβρουαρίου", "Μαρτίου", "Απριλίου", "Μαΐου", "Ιουνίου", "Ιουλίου", "Αυγούστου", "Σεπτεμβρίου", "Οκτωβρίου", "Νοεμβρίου", "Δεκεμβρίου"}, + {"π.μ.", "μ.μ."}, +} + +var localeTableElCY = [5][]string{ + {"Κυρ", "Δευ", "Τρί", "Τετ", "Πέμ", "Παρ", "Σάβ"}, + {"Κυριακή", "Δευτέρα", "Τρίτη", "Τετάρτη", "Πέμπτη", "Παρασκευή", "Σάββατο"}, + {"Ιαν", "Φεβ", "Μαρ", "Απρ", "Μαΐ", "Ιουν", "Ιουλ", "Αυγ", "Σεπ", "Οκτ", "Νοε", "Δεκ"}, + {"Ιανουαρίου", "Φεβρουαρίου", "Μαρτίου", "Απριλίου", "Μαΐου", "Ιουνίου", "Ιουλίου", "Αυγούστου", "Σεπτεμβρίου", "Οκτωβρίου", "Νοεμβρίου", "Δεκεμβρίου"}, + {"π.μ.", "μ.μ."}, +} + +var localeTableElGR = [5][]string{ + {"Κυρ", "Δευ", "Τρί", "Τετ", "Πέμ", "Παρ", "Σάβ"}, + {"Κυριακή", "Δευτέρα", "Τρίτη", "Τετάρτη", "Πέμπτη", "Παρασκευή", "Σάββατο"}, + {"Ιαν", "Φεβ", "Μαρ", "Απρ", "Μαΐ", "Ιουν", "Ιουλ", "Αυγ", "Σεπ", "Οκτ", "Νοε", "Δεκ"}, + {"Ιανουαρίου", "Φεβρουαρίου", "Μαρτίου", "Απριλίου", "Μαΐου", "Ιουνίου", "Ιουλίου", "Αυγούστου", "Σεπτεμβρίου", "Οκτωβρίου", "Νοεμβρίου", "Δεκεμβρίου"}, + {"π.μ.", "μ.μ."}, +} + +var localeTableElpolyton = [5][]string{ + {"Κυρ", "Δευ", "Τρί", "Τετ", "Πέμ", "Παρ", "Σάβ"}, + {"Κυριακή", "Δευτέρα", "Τρίτη", "Τετάρτη", "Πέμπτη", "Παρασκευή", "Σάββατο"}, + {"Ιαν", "Φεβ", "Μαρ", "Απρ", "Μαΐ", "Ιουν", "Ιουλ", "Αὐγ", "Σεπ", "Ὀκτ", "Νοε", "Δεκ"}, + {"Ιανουαρίου", "Φεβρουαρίου", "Μαρτίου", "Απριλίου", "Μαΐου", "Ιουνίου", "Ιουλίου", "Αὐγούστου", "Σεπτεμβρίου", "Ὀκτωβρίου", "Νοεμβρίου", "Δεκεμβρίου"}, + {"π.μ.", "μ.μ."}, +} + +var localeTableEn = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"AM", "PM"}, +} + +var localeTableEn001 = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEn150 = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnAE = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"AM", "PM"}, +} + +var localeTableEnAG = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnAI = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnAS = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"AM", "PM"}, +} + +var localeTableEnAT = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnAU = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "June", "July", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnBB = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnBE = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnBI = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"AM", "PM"}, +} + +var localeTableEnBM = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnBS = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnBW = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnBZ = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnCA = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"a.m.", "p.m."}, +} + +var localeTableEnCC = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnCH = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnCK = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnCM = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnCX = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnCY = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnDE = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnDG = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnDK = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnDM = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnDsrt = [5][]string{ + {"𐐝𐐲𐑌", "𐐣𐐲𐑌", "𐐓𐐭𐑆", "𐐎𐐯𐑌", "𐐛𐐲𐑉", "𐐙𐑉𐐴", "𐐝𐐰𐐻"}, + {"𐐝𐐲𐑌𐐼𐐩", "𐐣𐐲𐑌𐐼𐐩", "𐐓𐐭𐑆𐐼𐐩", "𐐎𐐯𐑌𐑆𐐼𐐩", "𐐛𐐲𐑉𐑆𐐼𐐩", "𐐙𐑉𐐴𐐼𐐩", "𐐝𐐰𐐻𐐲𐑉𐐼𐐩"}, + {"𐐖𐐰𐑌", "𐐙𐐯𐐺", "𐐣𐐪𐑉", "𐐁𐐹𐑉", "𐐣𐐩", "𐐖𐐭𐑌", "𐐖𐐭𐑊", "𐐂𐑀", "𐐝𐐯𐐹", "𐐉𐐿𐐻", "𐐤𐐬𐑂", "𐐔𐐨𐑅"}, + {"𐐖𐐰𐑌𐐷𐐭𐐯𐑉𐐨", "𐐙𐐯𐐺𐑉𐐭𐐯𐑉𐐨", "𐐣𐐪𐑉𐐽", "𐐁𐐹𐑉𐐮𐑊", "𐐣𐐩", "𐐖𐐭𐑌", "𐐖𐐭𐑊𐐴", "𐐂𐑀𐐲𐑅𐐻", "𐐝𐐯𐐹𐐻𐐯𐑋𐐺𐐲𐑉", "𐐉𐐿𐐻𐐬𐐺𐐲𐑉", "𐐤𐐬𐑂𐐯𐑋𐐺𐐲𐑉", "𐐔𐐨𐑅𐐯𐑋𐐺𐐲𐑉"}, + {"𐐈𐐣", "𐐑𐐣"}, +} + +var localeTableEnDsrtUS = [5][]string{ + {"𐐝𐐲𐑌", "𐐣𐐲𐑌", "𐐓𐐭𐑆", "𐐎𐐯𐑌", "𐐛𐐲𐑉", "𐐙𐑉𐐴", "𐐝𐐰𐐻"}, + {"𐐝𐐲𐑌𐐼𐐩", "𐐣𐐲𐑌𐐼𐐩", "𐐓𐐭𐑆𐐼𐐩", "𐐎𐐯𐑌𐑆𐐼𐐩", "𐐛𐐲𐑉𐑆𐐼𐐩", "𐐙𐑉𐐴𐐼𐐩", "𐐝𐐰𐐻𐐲𐑉𐐼𐐩"}, + {"𐐖𐐰𐑌", "𐐙𐐯𐐺", "𐐣𐐪𐑉", "𐐁𐐹𐑉", "𐐣𐐩", "𐐖𐐭𐑌", "𐐖𐐭𐑊", "𐐂𐑀", "𐐝𐐯𐐹", "𐐉𐐿𐐻", "𐐤𐐬𐑂", "𐐔𐐨𐑅"}, + {"𐐖𐐰𐑌𐐷𐐭𐐯𐑉𐐨", "𐐙𐐯𐐺𐑉𐐭𐐯𐑉𐐨", "𐐣𐐪𐑉𐐽", "𐐁𐐹𐑉𐐮𐑊", "𐐣𐐩", "𐐖𐐭𐑌", "𐐖𐐭𐑊𐐴", "𐐂𐑀𐐲𐑅𐐻", "𐐝𐐯𐐹𐐻𐐯𐑋𐐺𐐲𐑉", "𐐉𐐿𐐻𐐬𐐺𐐲𐑉", "𐐤𐐬𐑂𐐯𐑋𐐺𐐲𐑉", "𐐔𐐨𐑅𐐯𐑋𐐺𐐲𐑉"}, + {"𐐈𐐣", "𐐑𐐣"}, +} + +var localeTableEnER = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnFI = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnFJ = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnFK = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnFM = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnGB = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnGD = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnGG = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnGH = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnGI = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnGM = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnGU = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"AM", "PM"}, +} + +var localeTableEnGY = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnHK = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnID = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"AM", "PM"}, +} + +var localeTableEnIE = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnIL = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnIM = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnIN = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnIO = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnJE = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnJM = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnKE = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnKI = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnKN = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnKY = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnLC = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnLR = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnLS = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnMG = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnMH = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"AM", "PM"}, +} + +var localeTableEnMO = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnMP = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"AM", "PM"}, +} + +var localeTableEnMS = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnMT = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnMU = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnMV = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"AM", "PM"}, +} + +var localeTableEnMW = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnMY = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnNA = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnNF = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnNG = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnNL = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnNR = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnNU = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnNZ = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnPG = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnPH = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnPK = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnPN = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnPR = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"AM", "PM"}, +} + +var localeTableEnPW = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnRW = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnSB = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnSC = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnSD = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnSE = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnSG = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnSH = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnSI = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnSL = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnSS = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnSX = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnSZ = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnShaw = [5][]string{ + {"·𐑕𐑭", "·𐑥𐑭", "·𐑑𐑵", "·𐑢𐑧", "·𐑔𐑻", "·𐑓𐑮", "·𐑕𐑨"}, + {"·𐑕𐑭𐑙𐑛𐑱", "·𐑥𐑭𐑙𐑛𐑱", "·𐑑𐑵𐑟𐑛𐑱", "·𐑢𐑧𐑙𐑟𐑛𐑱", "·𐑔𐑻𐑟𐑛𐑱", "·𐑓𐑮𐑲𐑛𐑱", "·𐑕𐑨𐑛𐑻𐑛𐑱"}, + {"·𐑡𐑨", "·𐑓𐑧", "·𐑥𐑸", "·𐑱𐑐", "·𐑥𐑱", "·𐑡𐑵", "·𐑡𐑫", "·𐑪𐑜", "·𐑕𐑧", "·𐑷𐑒", "·𐑯𐑴", "·𐑛𐑭"}, + {"·𐑡𐑨𐑙𐑘𐑭𐑢𐑺𐑰", "·𐑓𐑧𐑚𐑘𐑵𐑢𐑺𐑰", "·𐑥𐑸𐑗", "·𐑱𐑐𐑮𐑭𐑤", "·𐑥𐑱", "·𐑡𐑵𐑯", "·𐑡𐑫𐑤𐑲", "·𐑪𐑜𐑭𐑕𐑑", "·𐑕𐑧𐑐𐑑𐑧𐑥𐑚𐑸", "·𐑷𐑒𐑑𐑴𐑚𐑸", "·𐑯𐑴𐑝𐑧𐑥𐑚𐑸", "·𐑛𐑭𐑕𐑧𐑥𐑚𐑸"}, + {"𐑨𐑥", "𐑐𐑥"}, +} + +var localeTableEnShawGB = [5][]string{ + {"·𐑕𐑭", "·𐑥𐑭", "·𐑑𐑵", "·𐑢𐑧", "·𐑔𐑻", "·𐑓𐑮", "·𐑕𐑨"}, + {"·𐑕𐑭𐑙𐑛𐑱", "·𐑥𐑭𐑙𐑛𐑱", "·𐑑𐑵𐑟𐑛𐑱", "·𐑢𐑧𐑙𐑟𐑛𐑱", "·𐑔𐑻𐑟𐑛𐑱", "·𐑓𐑮𐑲𐑛𐑱", "·𐑕𐑨𐑛𐑻𐑛𐑱"}, + {"·𐑡𐑨", "·𐑓𐑧", "·𐑥𐑸", "·𐑱𐑐", "·𐑥𐑱", "·𐑡𐑵", "·𐑡𐑫", "·𐑪𐑜", "·𐑕𐑧", "·𐑷𐑒", "·𐑯𐑴", "·𐑛𐑭"}, + {"·𐑡𐑨𐑙𐑘𐑭𐑢𐑺𐑰", "·𐑓𐑧𐑚𐑘𐑵𐑢𐑺𐑰", "·𐑥𐑸𐑗", "·𐑱𐑐𐑮𐑭𐑤", "·𐑥𐑱", "·𐑡𐑵𐑯", "·𐑡𐑫𐑤𐑲", "·𐑪𐑜𐑭𐑕𐑑", "·𐑕𐑧𐑐𐑑𐑧𐑥𐑚𐑸", "·𐑷𐑒𐑑𐑴𐑚𐑸", "·𐑯𐑴𐑝𐑧𐑥𐑚𐑸", "·𐑛𐑭𐑕𐑧𐑥𐑚𐑸"}, + {"𐑨𐑥", "𐑐𐑥"}, +} + +var localeTableEnTC = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnTK = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnTO = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnTT = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnTV = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnTZ = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnUG = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnUM = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"AM", "PM"}, +} + +var localeTableEnUS = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"AM", "PM"}, +} + +var localeTableEnUSuvaposix = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"AM", "PM"}, +} + +var localeTableEnVC = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnVG = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnVI = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"AM", "PM"}, +} + +var localeTableEnVU = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnWS = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnZA = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnZM = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnZW = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEo = [5][]string{ + {"di", "lu", "ma", "me", "ĵa", "ve", "sa"}, + {"dimanĉo", "lundo", "mardo", "merkredo", "ĵaŭdo", "vendredo", "sabato"}, + {"Jan", "Feb", "Mar", "Apr", "Maj", "Jun", "Jul", "Aŭg", "Sep", "Okt", "Nov", "Dec"}, + {"Januaro", "Februaro", "Marto", "Aprilo", "Majo", "Junio", "Julio", "Aŭgusto", "Septembro", "Oktobro", "Novembro", "Decembro"}, + {"atm", "ptm"}, +} + +var localeTableEo001 = [5][]string{ + {"di", "lu", "ma", "me", "ĵa", "ve", "sa"}, + {"dimanĉo", "lundo", "mardo", "merkredo", "ĵaŭdo", "vendredo", "sabato"}, + {"Jan", "Feb", "Mar", "Apr", "Maj", "Jun", "Jul", "Aŭg", "Sep", "Okt", "Nov", "Dec"}, + {"Januaro", "Februaro", "Marto", "Aprilo", "Majo", "Junio", "Julio", "Aŭgusto", "Septembro", "Oktobro", "Novembro", "Decembro"}, + {"atm", "ptm"}, +} + +var localeTableEs = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sept", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEs419 = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sept", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsAR = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sept", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsBO = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sept", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsBR = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sept", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsBZ = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sept", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsCL = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sept", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsCO = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sept", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsCR = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sept", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsCU = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sept", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsDO = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sept", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsEA = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sept", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsEC = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sept", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsES = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sept", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsGQ = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sept", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsGT = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sept", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsHN = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sept", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsIC = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sept", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsMX = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sep", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsNI = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sept", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsPA = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sept", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsPE = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene.", "feb.", "mar.", "abr.", "may.", "jun.", "jul.", "ago.", "set.", "oct.", "nov.", "dic."}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "setiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsPH = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sept", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsPR = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sept", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsPY = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene.", "feb.", "mar.", "abr.", "may.", "jun.", "jul.", "ago.", "sept.", "oct.", "nov.", "dic."}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsSV = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sept", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsUS = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sept", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsUY = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene.", "feb.", "mar.", "abr.", "may.", "jun.", "jul.", "ago.", "set.", "oct.", "nov.", "dic."}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "setiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsVE = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene.", "feb.", "mar.", "abr.", "may.", "jun.", "jul.", "ago.", "sept.", "oct.", "nov.", "dic."}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEt = [5][]string{ + {"P", "E", "T", "K", "N", "R", "L"}, + {"pühapäev", "esmaspäev", "teisipäev", "kolmapäev", "neljapäev", "reede", "laupäev"}, + {"jaan", "veebr", "märts", "apr", "mai", "juuni", "juuli", "aug", "sept", "okt", "nov", "dets"}, + {"jaanuar", "veebruar", "märts", "aprill", "mai", "juuni", "juuli", "august", "september", "oktoober", "november", "detsember"}, + {}, +} + +var localeTableEtEE = [5][]string{ + {"P", "E", "T", "K", "N", "R", "L"}, + {"pühapäev", "esmaspäev", "teisipäev", "kolmapäev", "neljapäev", "reede", "laupäev"}, + {"jaan", "veebr", "märts", "apr", "mai", "juuni", "juuli", "aug", "sept", "okt", "nov", "dets"}, + {"jaanuar", "veebruar", "märts", "aprill", "mai", "juuni", "juuli", "august", "september", "oktoober", "november", "detsember"}, + {}, +} + +var localeTableEu = [5][]string{ + {"ig.", "al.", "ar.", "az.", "og.", "or.", "lr."}, + {"igandea", "astelehena", "asteartea", "asteazkena", "osteguna", "ostirala", "larunbata"}, + {"urt.", "ots.", "mar.", "api.", "mai.", "eka.", "uzt.", "abu.", "ira.", "urr.", "aza.", "abe."}, + {"urtarrila", "otsaila", "martxoa", "apirila", "maiatza", "ekaina", "uztaila", "abuztua", "iraila", "urria", "azaroa", "abendua"}, + {"g", "a"}, +} + +var localeTableEuES = [5][]string{ + {"ig.", "al.", "ar.", "az.", "og.", "or.", "lr."}, + {"igandea", "astelehena", "asteartea", "asteazkena", "osteguna", "ostirala", "larunbata"}, + {"urt.", "ots.", "mar.", "api.", "mai.", "eka.", "uzt.", "abu.", "ira.", "urr.", "aza.", "abe."}, + {"urtarrila", "otsaila", "martxoa", "apirila", "maiatza", "ekaina", "uztaila", "abuztua", "iraila", "urria", "azaroa", "abendua"}, + {"g", "a"}, +} + +var localeTableEwo = [5][]string{ + {"sɔ́n", "mɔ́n", "smb", "sml", "smn", "fúl", "sér"}, + {"sɔ́ndɔ", "mɔ́ndi", "sɔ́ndɔ məlú mə́bɛ̌", "sɔ́ndɔ məlú mə́lɛ́", "sɔ́ndɔ məlú mə́nyi", "fúladé", "séradé"}, + {"ngo", "ngb", "ngl", "ngn", "ngt", "ngs", "ngz", "ngm", "nge", "nga", "ngad", "ngab"}, + {"ngɔn osú", "ngɔn bɛ̌", "ngɔn lála", "ngɔn nyina", "ngɔn tána", "ngɔn saməna", "ngɔn zamgbála", "ngɔn mwom", "ngɔn ebulú", "ngɔn awóm", "ngɔn awóm ai dziá", "ngɔn awóm ai bɛ̌"}, + {"kíkíríg", "ngəgógəle"}, +} + +var localeTableEwoCM = [5][]string{ + {"sɔ́n", "mɔ́n", "smb", "sml", "smn", "fúl", "sér"}, + {"sɔ́ndɔ", "mɔ́ndi", "sɔ́ndɔ məlú mə́bɛ̌", "sɔ́ndɔ məlú mə́lɛ́", "sɔ́ndɔ məlú mə́nyi", "fúladé", "séradé"}, + {"ngo", "ngb", "ngl", "ngn", "ngt", "ngs", "ngz", "ngm", "nge", "nga", "ngad", "ngab"}, + {"ngɔn osú", "ngɔn bɛ̌", "ngɔn lála", "ngɔn nyina", "ngɔn tána", "ngɔn saməna", "ngɔn zamgbála", "ngɔn mwom", "ngɔn ebulú", "ngɔn awóm", "ngɔn awóm ai dziá", "ngɔn awóm ai bɛ̌"}, + {"kíkíríg", "ngəgógəle"}, +} + +var localeTableFa = [5][]string{ + {}, + {"یکشنبه", "دوشنبه", "سه‌شنبه", "چهارشنبه", "پنجشنبه", "جمعه", "شنبه"}, + {"ژانویه", "فوریه", "مارس", "آوریل", "مه", "ژوئن", "ژوئیه", "اوت", "سپتامبر", "اکتبر", "نوامبر", "دسامبر"}, + {"ژانویهٔ", "فوریهٔ", "مارس", "آوریل", "مهٔ", "ژوئن", "ژوئیهٔ", "اوت", "سپتامبر", "اکتبر", "نوامبر", "دسامبر"}, + {"ق.ظ.", "ب.ظ."}, +} + +var localeTableFaAF = [5][]string{ + {}, + {"یکشنبه", "دوشنبه", "سه‌شنبه", "چهارشنبه", "پنجشنبه", "جمعه", "شنبه"}, + {"جنو", "فبروری", "مارچ", "اپریل", "می", "جون", "جول", "اگست", "سپتمبر", "اکتوبر", "نومبر", "دسم"}, + {"جنوری", "فبروری", "مارچ", "اپریل", "می", "جون", "جولای", "اگست", "سپتمبر", "اکتوبر", "نومبر", "دسمبر"}, + {"ق.ظ.", "ب.ظ."}, +} + +var localeTableFaIR = [5][]string{ + {}, + {"یکشنبه", "دوشنبه", "سه‌شنبه", "چهارشنبه", "پنجشنبه", "جمعه", "شنبه"}, + {"ژانویه", "فوریه", "مارس", "آوریل", "مه", "ژوئن", "ژوئیه", "اوت", "سپتامبر", "اکتبر", "نوامبر", "دسامبر"}, + {"ژانویهٔ", "فوریهٔ", "مارس", "آوریل", "مهٔ", "ژوئن", "ژوئیهٔ", "اوت", "سپتامبر", "اکتبر", "نوامبر", "دسامبر"}, + {"ق.ظ.", "ب.ظ."}, +} + +var localeTableFf = [5][]string{ + {"dew", "aaɓ", "maw", "nje", "naa", "mwd", "hbi"}, + {"dewo", "aaɓnde", "mawbaare", "njeslaare", "naasaande", "mawnde", "hoore-biir"}, + {"sii", "col", "mbo", "see", "duu", "kor", "mor", "juk", "slt", "yar", "jol", "bow"}, + {"siilo", "colte", "mbooy", "seeɗto", "duujal", "korse", "morso", "juko", "siilto", "yarkomaa", "jolal", "bowte"}, + {"subaka", "kikiiɗe"}, +} + +var localeTableFfAdlm = [5][]string{ + {"𞤈𞤫𞤬", "𞤀𞥄𞤩𞤵", "𞤃𞤢𞤦", "𞤔𞤫𞤧", "𞤐𞤢𞥄𞤧", "𞤃𞤢𞤣", "𞤖𞤮𞤪"}, + {"𞤈𞤫𞤬𞤦𞤭𞤪𞥆𞤫", "𞤀𞥄𞤩𞤵𞤲𞥋𞤣𞤫", "𞤃𞤢𞤱𞤦𞤢𞥄𞤪𞤫", "𞤐𞤶𞤫𞤧𞤤𞤢𞥄𞤪𞤫", "𞤐𞤢𞥄𞤧𞤢𞥄𞤲𞤣𞤫", "𞤃𞤢𞤱𞤲𞤣𞤫", "𞤖𞤮𞤪𞤦𞤭𞤪𞥆𞤫"}, + {}, + {"𞤅𞤭𞥅𞤤𞤮", "𞤕𞤮𞤤𞤼𞤮", "𞤐𞤦𞤮𞥅𞤴𞤮", "𞤅𞤫𞥅𞤼𞤮", "𞤁𞤵𞥅𞤶𞤮", "𞤑𞤮𞤪𞤧𞤮", "𞤃𞤮𞤪𞤧𞤮", "𞤔𞤵𞤳𞤮", "𞤅𞤭𞤤𞤼𞤮", "𞤒𞤢𞤪𞤳𞤮", "𞤔𞤮𞤤𞤮", "𞤄𞤮𞤱𞤼𞤮"}, + {"𞤀𞤎", "𞤇𞤎"}, +} + +var localeTableFfAdlmBF = [5][]string{ + {"𞤈𞤫𞤬", "𞤀𞥄𞤩𞤵", "𞤃𞤢𞤦", "𞤔𞤫𞤧", "𞤐𞤢𞥄𞤧", "𞤃𞤢𞤣", "𞤖𞤮𞤪"}, + {"𞤈𞤫𞤬𞤦𞤭𞤪𞥆𞤫", "𞤀𞥄𞤩𞤵𞤲𞥋𞤣𞤫", "𞤃𞤢𞤱𞤦𞤢𞥄𞤪𞤫", "𞤐𞤶𞤫𞤧𞤤𞤢𞥄𞤪𞤫", "𞤐𞤢𞥄𞤧𞤢𞥄𞤲𞤣𞤫", "𞤃𞤢𞤱𞤲𞤣𞤫", "𞤖𞤮𞤪𞤦𞤭𞤪𞥆𞤫"}, + {}, + {"𞤅𞤭𞥅𞤤𞤮", "𞤕𞤮𞤤𞤼𞤮", "𞤐𞤦𞤮𞥅𞤴𞤮", "𞤅𞤫𞥅𞤼𞤮", "𞤁𞤵𞥅𞤶𞤮", "𞤑𞤮𞤪𞤧𞤮", "𞤃𞤮𞤪𞤧𞤮", "𞤔𞤵𞤳𞤮", "𞤅𞤭𞤤𞤼𞤮", "𞤒𞤢𞤪𞤳𞤮", "𞤔𞤮𞤤𞤮", "𞤄𞤮𞤱𞤼𞤮"}, + {"𞤀𞤎", "𞤇𞤎"}, +} + +var localeTableFfAdlmCM = [5][]string{ + {"𞤈𞤫𞤬", "𞤀𞥄𞤩𞤵", "𞤃𞤢𞤦", "𞤔𞤫𞤧", "𞤐𞤢𞥄𞤧", "𞤃𞤢𞤣", "𞤖𞤮𞤪"}, + {"𞤈𞤫𞤬𞤦𞤭𞤪𞥆𞤫", "𞤀𞥄𞤩𞤵𞤲𞥋𞤣𞤫", "𞤃𞤢𞤱𞤦𞤢𞥄𞤪𞤫", "𞤐𞤶𞤫𞤧𞤤𞤢𞥄𞤪𞤫", "𞤐𞤢𞥄𞤧𞤢𞥄𞤲𞤣𞤫", "𞤃𞤢𞤱𞤲𞤣𞤫", "𞤖𞤮𞤪𞤦𞤭𞤪𞥆𞤫"}, + {}, + {"𞤅𞤭𞥅𞤤𞤮", "𞤕𞤮𞤤𞤼𞤮", "𞤐𞤦𞤮𞥅𞤴𞤮", "𞤅𞤫𞥅𞤼𞤮", "𞤁𞤵𞥅𞤶𞤮", "𞤑𞤮𞤪𞤧𞤮", "𞤃𞤮𞤪𞤧𞤮", "𞤔𞤵𞤳𞤮", "𞤅𞤭𞤤𞤼𞤮", "𞤒𞤢𞤪𞤳𞤮", "𞤔𞤮𞤤𞤮", "𞤄𞤮𞤱𞤼𞤮"}, + {"𞤀𞤎", "𞤇𞤎"}, +} + +var localeTableFfAdlmGH = [5][]string{ + {"𞤈𞤫𞤬", "𞤀𞥄𞤩𞤵", "𞤃𞤢𞤦", "𞤔𞤫𞤧", "𞤐𞤢𞥄𞤧", "𞤃𞤢𞤣", "𞤖𞤮𞤪"}, + {"𞤈𞤫𞤬𞤦𞤭𞤪𞥆𞤫", "𞤀𞥄𞤩𞤵𞤲𞥋𞤣𞤫", "𞤃𞤢𞤱𞤦𞤢𞥄𞤪𞤫", "𞤐𞤶𞤫𞤧𞤤𞤢𞥄𞤪𞤫", "𞤐𞤢𞥄𞤧𞤢𞥄𞤲𞤣𞤫", "𞤃𞤢𞤱𞤲𞤣𞤫", "𞤖𞤮𞤪𞤦𞤭𞤪𞥆𞤫"}, + {}, + {"𞤅𞤭𞥅𞤤𞤮", "𞤕𞤮𞤤𞤼𞤮", "𞤐𞤦𞤮𞥅𞤴𞤮", "𞤅𞤫𞥅𞤼𞤮", "𞤁𞤵𞥅𞤶𞤮", "𞤑𞤮𞤪𞤧𞤮", "𞤃𞤮𞤪𞤧𞤮", "𞤔𞤵𞤳𞤮", "𞤅𞤭𞤤𞤼𞤮", "𞤒𞤢𞤪𞤳𞤮", "𞤔𞤮𞤤𞤮", "𞤄𞤮𞤱𞤼𞤮"}, + {"𞤀𞤎", "𞤇𞤎"}, +} + +var localeTableFfAdlmGM = [5][]string{ + {"𞤈𞤫𞤬", "𞤀𞥄𞤩𞤵", "𞤃𞤢𞤦", "𞤔𞤫𞤧", "𞤐𞤢𞥄𞤧", "𞤃𞤢𞤣", "𞤖𞤮𞤪"}, + {"𞤈𞤫𞤬𞤦𞤭𞤪𞥆𞤫", "𞤀𞥄𞤩𞤵𞤲𞥋𞤣𞤫", "𞤃𞤢𞤱𞤦𞤢𞥄𞤪𞤫", "𞤐𞤶𞤫𞤧𞤤𞤢𞥄𞤪𞤫", "𞤐𞤢𞥄𞤧𞤢𞥄𞤲𞤣𞤫", "𞤃𞤢𞤱𞤲𞤣𞤫", "𞤖𞤮𞤪𞤦𞤭𞤪𞥆𞤫"}, + {}, + {"𞤅𞤭𞥅𞤤𞤮", "𞤕𞤮𞤤𞤼𞤮", "𞤐𞤦𞤮𞥅𞤴𞤮", "𞤅𞤫𞥅𞤼𞤮", "𞤁𞤵𞥅𞤶𞤮", "𞤑𞤮𞤪𞤧𞤮", "𞤃𞤮𞤪𞤧𞤮", "𞤔𞤵𞤳𞤮", "𞤅𞤭𞤤𞤼𞤮", "𞤒𞤢𞤪𞤳𞤮", "𞤔𞤮𞤤𞤮", "𞤄𞤮𞤱𞤼𞤮"}, + {"𞤀𞤎", "𞤇𞤎"}, +} + +var localeTableFfAdlmGN = [5][]string{ + {"𞤈𞤫𞤬", "𞤀𞥄𞤩𞤵", "𞤃𞤢𞤦", "𞤔𞤫𞤧", "𞤐𞤢𞥄𞤧", "𞤃𞤢𞤣", "𞤖𞤮𞤪"}, + {"𞤈𞤫𞤬𞤦𞤭𞤪𞥆𞤫", "𞤀𞥄𞤩𞤵𞤲𞥋𞤣𞤫", "𞤃𞤢𞤱𞤦𞤢𞥄𞤪𞤫", "𞤐𞤶𞤫𞤧𞤤𞤢𞥄𞤪𞤫", "𞤐𞤢𞥄𞤧𞤢𞥄𞤲𞤣𞤫", "𞤃𞤢𞤱𞤲𞤣𞤫", "𞤖𞤮𞤪𞤦𞤭𞤪𞥆𞤫"}, + {}, + {"𞤅𞤭𞥅𞤤𞤮", "𞤕𞤮𞤤𞤼𞤮", "𞤐𞤦𞤮𞥅𞤴𞤮", "𞤅𞤫𞥅𞤼𞤮", "𞤁𞤵𞥅𞤶𞤮", "𞤑𞤮𞤪𞤧𞤮", "𞤃𞤮𞤪𞤧𞤮", "𞤔𞤵𞤳𞤮", "𞤅𞤭𞤤𞤼𞤮", "𞤒𞤢𞤪𞤳𞤮", "𞤔𞤮𞤤𞤮", "𞤄𞤮𞤱𞤼𞤮"}, + {"𞤀𞤎", "𞤇𞤎"}, +} + +var localeTableFfAdlmGW = [5][]string{ + {"𞤈𞤫𞤬", "𞤀𞥄𞤩𞤵", "𞤃𞤢𞤦", "𞤔𞤫𞤧", "𞤐𞤢𞥄𞤧", "𞤃𞤢𞤣", "𞤖𞤮𞤪"}, + {"𞤈𞤫𞤬𞤦𞤭𞤪𞥆𞤫", "𞤀𞥄𞤩𞤵𞤲𞥋𞤣𞤫", "𞤃𞤢𞤱𞤦𞤢𞥄𞤪𞤫", "𞤐𞤶𞤫𞤧𞤤𞤢𞥄𞤪𞤫", "𞤐𞤢𞥄𞤧𞤢𞥄𞤲𞤣𞤫", "𞤃𞤢𞤱𞤲𞤣𞤫", "𞤖𞤮𞤪𞤦𞤭𞤪𞥆𞤫"}, + {}, + {"𞤅𞤭𞥅𞤤𞤮", "𞤕𞤮𞤤𞤼𞤮", "𞤐𞤦𞤮𞥅𞤴𞤮", "𞤅𞤫𞥅𞤼𞤮", "𞤁𞤵𞥅𞤶𞤮", "𞤑𞤮𞤪𞤧𞤮", "𞤃𞤮𞤪𞤧𞤮", "𞤔𞤵𞤳𞤮", "𞤅𞤭𞤤𞤼𞤮", "𞤒𞤢𞤪𞤳𞤮", "𞤔𞤮𞤤𞤮", "𞤄𞤮𞤱𞤼𞤮"}, + {"𞤀𞤎", "𞤇𞤎"}, +} + +var localeTableFfAdlmLR = [5][]string{ + {"𞤈𞤫𞤬", "𞤀𞥄𞤩𞤵", "𞤃𞤢𞤦", "𞤔𞤫𞤧", "𞤐𞤢𞥄𞤧", "𞤃𞤢𞤣", "𞤖𞤮𞤪"}, + {"𞤈𞤫𞤬𞤦𞤭𞤪𞥆𞤫", "𞤀𞥄𞤩𞤵𞤲𞥋𞤣𞤫", "𞤃𞤢𞤱𞤦𞤢𞥄𞤪𞤫", "𞤐𞤶𞤫𞤧𞤤𞤢𞥄𞤪𞤫", "𞤐𞤢𞥄𞤧𞤢𞥄𞤲𞤣𞤫", "𞤃𞤢𞤱𞤲𞤣𞤫", "𞤖𞤮𞤪𞤦𞤭𞤪𞥆𞤫"}, + {}, + {"𞤅𞤭𞥅𞤤𞤮", "𞤕𞤮𞤤𞤼𞤮", "𞤐𞤦𞤮𞥅𞤴𞤮", "𞤅𞤫𞥅𞤼𞤮", "𞤁𞤵𞥅𞤶𞤮", "𞤑𞤮𞤪𞤧𞤮", "𞤃𞤮𞤪𞤧𞤮", "𞤔𞤵𞤳𞤮", "𞤅𞤭𞤤𞤼𞤮", "𞤒𞤢𞤪𞤳𞤮", "𞤔𞤮𞤤𞤮", "𞤄𞤮𞤱𞤼𞤮"}, + {"𞤀𞤎", "𞤇𞤎"}, +} + +var localeTableFfAdlmMR = [5][]string{ + {"𞤈𞤫𞤬", "𞤀𞥄𞤩𞤵", "𞤃𞤢𞤦", "𞤔𞤫𞤧", "𞤐𞤢𞥄𞤧", "𞤃𞤢𞤣", "𞤖𞤮𞤪"}, + {"𞤈𞤫𞤬𞤦𞤭𞤪𞥆𞤫", "𞤀𞥄𞤩𞤵𞤲𞥋𞤣𞤫", "𞤃𞤢𞤱𞤦𞤢𞥄𞤪𞤫", "𞤐𞤶𞤫𞤧𞤤𞤢𞥄𞤪𞤫", "𞤐𞤢𞥄𞤧𞤢𞥄𞤲𞤣𞤫", "𞤃𞤢𞤱𞤲𞤣𞤫", "𞤖𞤮𞤪𞤦𞤭𞤪𞥆𞤫"}, + {}, + {"𞤅𞤭𞥅𞤤𞤮", "𞤕𞤮𞤤𞤼𞤮", "𞤐𞤦𞤮𞥅𞤴𞤮", "𞤅𞤫𞥅𞤼𞤮", "𞤁𞤵𞥅𞤶𞤮", "𞤑𞤮𞤪𞤧𞤮", "𞤃𞤮𞤪𞤧𞤮", "𞤔𞤵𞤳𞤮", "𞤅𞤭𞤤𞤼𞤮", "𞤒𞤢𞤪𞤳𞤮", "𞤔𞤮𞤤𞤮", "𞤄𞤮𞤱𞤼𞤮"}, + {"𞤀𞤎", "𞤇𞤎"}, +} + +var localeTableFfAdlmNE = [5][]string{ + {"𞤈𞤫𞤬", "𞤀𞥄𞤩𞤵", "𞤃𞤢𞤦", "𞤔𞤫𞤧", "𞤐𞤢𞥄𞤧", "𞤃𞤢𞤣", "𞤖𞤮𞤪"}, + {"𞤈𞤫𞤬𞤦𞤭𞤪𞥆𞤫", "𞤀𞥄𞤩𞤵𞤲𞥋𞤣𞤫", "𞤃𞤢𞤱𞤦𞤢𞥄𞤪𞤫", "𞤐𞤶𞤫𞤧𞤤𞤢𞥄𞤪𞤫", "𞤐𞤢𞥄𞤧𞤢𞥄𞤲𞤣𞤫", "𞤃𞤢𞤱𞤲𞤣𞤫", "𞤖𞤮𞤪𞤦𞤭𞤪𞥆𞤫"}, + {}, + {"𞤅𞤭𞥅𞤤𞤮", "𞤕𞤮𞤤𞤼𞤮", "𞤐𞤦𞤮𞥅𞤴𞤮", "𞤅𞤫𞥅𞤼𞤮", "𞤁𞤵𞥅𞤶𞤮", "𞤑𞤮𞤪𞤧𞤮", "𞤃𞤮𞤪𞤧𞤮", "𞤔𞤵𞤳𞤮", "𞤅𞤭𞤤𞤼𞤮", "𞤒𞤢𞤪𞤳𞤮", "𞤔𞤮𞤤𞤮", "𞤄𞤮𞤱𞤼𞤮"}, + {"𞤀𞤎", "𞤇𞤎"}, +} + +var localeTableFfAdlmNG = [5][]string{ + {"𞤈𞤫𞤬", "𞤀𞥄𞤩𞤵", "𞤃𞤢𞤦", "𞤔𞤫𞤧", "𞤐𞤢𞥄𞤧", "𞤃𞤢𞤣", "𞤖𞤮𞤪"}, + {"𞤈𞤫𞤬𞤦𞤭𞤪𞥆𞤫", "𞤀𞥄𞤩𞤵𞤲𞥋𞤣𞤫", "𞤃𞤢𞤱𞤦𞤢𞥄𞤪𞤫", "𞤐𞤶𞤫𞤧𞤤𞤢𞥄𞤪𞤫", "𞤐𞤢𞥄𞤧𞤢𞥄𞤲𞤣𞤫", "𞤃𞤢𞤱𞤲𞤣𞤫", "𞤖𞤮𞤪𞤦𞤭𞤪𞥆𞤫"}, + {}, + {"𞤅𞤭𞥅𞤤𞤮", "𞤕𞤮𞤤𞤼𞤮", "𞤐𞤦𞤮𞥅𞤴𞤮", "𞤅𞤫𞥅𞤼𞤮", "𞤁𞤵𞥅𞤶𞤮", "𞤑𞤮𞤪𞤧𞤮", "𞤃𞤮𞤪𞤧𞤮", "𞤔𞤵𞤳𞤮", "𞤅𞤭𞤤𞤼𞤮", "𞤒𞤢𞤪𞤳𞤮", "𞤔𞤮𞤤𞤮", "𞤄𞤮𞤱𞤼𞤮"}, + {"𞤀𞤎", "𞤇𞤎"}, +} + +var localeTableFfAdlmSL = [5][]string{ + {"𞤈𞤫𞤬", "𞤀𞥄𞤩𞤵", "𞤃𞤢𞤦", "𞤔𞤫𞤧", "𞤐𞤢𞥄𞤧", "𞤃𞤢𞤣", "𞤖𞤮𞤪"}, + {"𞤈𞤫𞤬𞤦𞤭𞤪𞥆𞤫", "𞤀𞥄𞤩𞤵𞤲𞥋𞤣𞤫", "𞤃𞤢𞤱𞤦𞤢𞥄𞤪𞤫", "𞤐𞤶𞤫𞤧𞤤𞤢𞥄𞤪𞤫", "𞤐𞤢𞥄𞤧𞤢𞥄𞤲𞤣𞤫", "𞤃𞤢𞤱𞤲𞤣𞤫", "𞤖𞤮𞤪𞤦𞤭𞤪𞥆𞤫"}, + {}, + {"𞤅𞤭𞥅𞤤𞤮", "𞤕𞤮𞤤𞤼𞤮", "𞤐𞤦𞤮𞥅𞤴𞤮", "𞤅𞤫𞥅𞤼𞤮", "𞤁𞤵𞥅𞤶𞤮", "𞤑𞤮𞤪𞤧𞤮", "𞤃𞤮𞤪𞤧𞤮", "𞤔𞤵𞤳𞤮", "𞤅𞤭𞤤𞤼𞤮", "𞤒𞤢𞤪𞤳𞤮", "𞤔𞤮𞤤𞤮", "𞤄𞤮𞤱𞤼𞤮"}, + {"𞤀𞤎", "𞤇𞤎"}, +} + +var localeTableFfAdlmSN = [5][]string{ + {"𞤈𞤫𞤬", "𞤀𞥄𞤩𞤵", "𞤃𞤢𞤦", "𞤔𞤫𞤧", "𞤐𞤢𞥄𞤧", "𞤃𞤢𞤣", "𞤖𞤮𞤪"}, + {"𞤈𞤫𞤬𞤦𞤭𞤪𞥆𞤫", "𞤀𞥄𞤩𞤵𞤲𞥋𞤣𞤫", "𞤃𞤢𞤱𞤦𞤢𞥄𞤪𞤫", "𞤐𞤶𞤫𞤧𞤤𞤢𞥄𞤪𞤫", "𞤐𞤢𞥄𞤧𞤢𞥄𞤲𞤣𞤫", "𞤃𞤢𞤱𞤲𞤣𞤫", "𞤖𞤮𞤪𞤦𞤭𞤪𞥆𞤫"}, + {}, + {"𞤅𞤭𞥅𞤤𞤮", "𞤕𞤮𞤤𞤼𞤮", "𞤐𞤦𞤮𞥅𞤴𞤮", "𞤅𞤫𞥅𞤼𞤮", "𞤁𞤵𞥅𞤶𞤮", "𞤑𞤮𞤪𞤧𞤮", "𞤃𞤮𞤪𞤧𞤮", "𞤔𞤵𞤳𞤮", "𞤅𞤭𞤤𞤼𞤮", "𞤒𞤢𞤪𞤳𞤮", "𞤔𞤮𞤤𞤮", "𞤄𞤮𞤱𞤼𞤮"}, + {"𞤀𞤎", "𞤇𞤎"}, +} + +var localeTableFfLatn = [5][]string{ + {"dew", "aaɓ", "maw", "nje", "naa", "mwd", "hbi"}, + {"dewo", "aaɓnde", "mawbaare", "njeslaare", "naasaande", "mawnde", "hoore-biir"}, + {"sii", "col", "mbo", "see", "duu", "kor", "mor", "juk", "slt", "yar", "jol", "bow"}, + {"siilo", "colte", "mbooy", "seeɗto", "duujal", "korse", "morso", "juko", "siilto", "yarkomaa", "jolal", "bowte"}, + {"subaka", "kikiiɗe"}, +} + +var localeTableFfLatnBF = [5][]string{ + {"dew", "aaɓ", "maw", "nje", "naa", "mwd", "hbi"}, + {"dewo", "aaɓnde", "mawbaare", "njeslaare", "naasaande", "mawnde", "hoore-biir"}, + {"sii", "col", "mbo", "see", "duu", "kor", "mor", "juk", "slt", "yar", "jol", "bow"}, + {"siilo", "colte", "mbooy", "seeɗto", "duujal", "korse", "morso", "juko", "siilto", "yarkomaa", "jolal", "bowte"}, + {"subaka", "kikiiɗe"}, +} + +var localeTableFfLatnCM = [5][]string{ + {"dew", "aaɓ", "maw", "nje", "naa", "mwd", "hbi"}, + {"dewo", "aaɓnde", "mawbaare", "njeslaare", "naasaande", "mawnde", "hoore-biir"}, + {"sii", "col", "mbo", "see", "duu", "kor", "mor", "juk", "slt", "yar", "jol", "bow"}, + {"siilo", "colte", "mbooy", "seeɗto", "duujal", "korse", "morso", "juko", "siilto", "yarkomaa", "jolal", "bowte"}, + {"subaka", "kikiiɗe"}, +} + +var localeTableFfLatnGH = [5][]string{ + {"dew", "aaɓ", "maw", "nje", "naa", "mwd", "hbi"}, + {"dewo", "aaɓnde", "mawbaare", "njeslaare", "naasaande", "mawnde", "hoore-biir"}, + {"sii", "col", "mbo", "see", "duu", "kor", "mor", "juk", "slt", "yar", "jol", "bow"}, + {"siilo", "colte", "mbooy", "seeɗto", "duujal", "korse", "morso", "juko", "siilto", "yarkomaa", "jolal", "bowte"}, + {"subaka", "kikiiɗe"}, +} + +var localeTableFfLatnGM = [5][]string{ + {"dew", "aaɓ", "maw", "nje", "naa", "mwd", "hbi"}, + {"dewo", "aaɓnde", "mawbaare", "njeslaare", "naasaande", "mawnde", "hoore-biir"}, + {"sii", "col", "mbo", "see", "duu", "kor", "mor", "juk", "slt", "yar", "jol", "bow"}, + {"siilo", "colte", "mbooy", "seeɗto", "duujal", "korse", "morso", "juko", "siilto", "yarkomaa", "jolal", "bowte"}, + {"subaka", "kikiiɗe"}, +} + +var localeTableFfLatnGN = [5][]string{ + {"dew", "aaɓ", "maw", "nje", "naa", "mwd", "hbi"}, + {"dewo", "aaɓnde", "mawbaare", "njeslaare", "naasaande", "mawnde", "hoore-biir"}, + {"sii", "col", "mbo", "see", "duu", "kor", "mor", "juk", "slt", "yar", "jol", "bow"}, + {"siilo", "colte", "mbooy", "seeɗto", "duujal", "korse", "morso", "juko", "siilto", "yarkomaa", "jolal", "bowte"}, + {"subaka", "kikiiɗe"}, +} + +var localeTableFfLatnGW = [5][]string{ + {"dew", "aaɓ", "maw", "nje", "naa", "mwd", "hbi"}, + {"dewo", "aaɓnde", "mawbaare", "njeslaare", "naasaande", "mawnde", "hoore-biir"}, + {"sii", "col", "mbo", "see", "duu", "kor", "mor", "juk", "slt", "yar", "jol", "bow"}, + {"siilo", "colte", "mbooy", "seeɗto", "duujal", "korse", "morso", "juko", "siilto", "yarkomaa", "jolal", "bowte"}, + {"subaka", "kikiiɗe"}, +} + +var localeTableFfLatnLR = [5][]string{ + {"dew", "aaɓ", "maw", "nje", "naa", "mwd", "hbi"}, + {"dewo", "aaɓnde", "mawbaare", "njeslaare", "naasaande", "mawnde", "hoore-biir"}, + {"sii", "col", "mbo", "see", "duu", "kor", "mor", "juk", "slt", "yar", "jol", "bow"}, + {"siilo", "colte", "mbooy", "seeɗto", "duujal", "korse", "morso", "juko", "siilto", "yarkomaa", "jolal", "bowte"}, + {"subaka", "kikiiɗe"}, +} + +var localeTableFfLatnMR = [5][]string{ + {"dew", "aaɓ", "maw", "nje", "naa", "mwd", "hbi"}, + {"dewo", "aaɓnde", "mawbaare", "njeslaare", "naasaande", "mawnde", "hoore-biir"}, + {"sii", "col", "mbo", "see", "duu", "kor", "mor", "juk", "slt", "yar", "jol", "bow"}, + {"siilo", "colte", "mbooy", "seeɗto", "duujal", "korse", "morso", "juko", "siilto", "yarkomaa", "jolal", "bowte"}, + {"subaka", "kikiiɗe"}, +} + +var localeTableFfLatnNE = [5][]string{ + {"dew", "aaɓ", "maw", "nje", "naa", "mwd", "hbi"}, + {"dewo", "aaɓnde", "mawbaare", "njeslaare", "naasaande", "mawnde", "hoore-biir"}, + {"sii", "col", "mbo", "see", "duu", "kor", "mor", "juk", "slt", "yar", "jol", "bow"}, + {"siilo", "colte", "mbooy", "seeɗto", "duujal", "korse", "morso", "juko", "siilto", "yarkomaa", "jolal", "bowte"}, + {"subaka", "kikiiɗe"}, +} + +var localeTableFfLatnNG = [5][]string{ + {"dew", "aaɓ", "maw", "nje", "naa", "mwd", "hbi"}, + {"dewo", "aaɓnde", "mawbaare", "njeslaare", "naasaande", "mawnde", "hoore-biir"}, + {"sii", "col", "mbo", "see", "duu", "kor", "mor", "juk", "slt", "yar", "jol", "bow"}, + {"siilo", "colte", "mbooy", "seeɗto", "duujal", "korse", "morso", "juko", "siilto", "yarkomaa", "jolal", "bowte"}, + {"subaka", "kikiiɗe"}, +} + +var localeTableFfLatnSL = [5][]string{ + {"dew", "aaɓ", "maw", "nje", "naa", "mwd", "hbi"}, + {"dewo", "aaɓnde", "mawbaare", "njeslaare", "naasaande", "mawnde", "hoore-biir"}, + {"sii", "col", "mbo", "see", "duu", "kor", "mor", "juk", "slt", "yar", "jol", "bow"}, + {"siilo", "colte", "mbooy", "seeɗto", "duujal", "korse", "morso", "juko", "siilto", "yarkomaa", "jolal", "bowte"}, + {"subaka", "kikiiɗe"}, +} + +var localeTableFfLatnSN = [5][]string{ + {"dew", "aaɓ", "maw", "nje", "naa", "mwd", "hbi"}, + {"dewo", "aaɓnde", "mawbaare", "njeslaare", "naasaande", "mawnde", "hoore-biir"}, + {"sii", "col", "mbo", "see", "duu", "kor", "mor", "juk", "slt", "yar", "jol", "bow"}, + {"siilo", "colte", "mbooy", "seeɗto", "duujal", "korse", "morso", "juko", "siilto", "yarkomaa", "jolal", "bowte"}, + {"subaka", "kikiiɗe"}, +} + +var localeTableFi = [5][]string{ + {"su", "ma", "ti", "ke", "to", "pe", "la"}, + {"sunnuntaina", "maanantaina", "tiistaina", "keskiviikkona", "torstaina", "perjantaina", "lauantaina"}, + {"tammik.", "helmik.", "maalisk.", "huhtik.", "toukok.", "kesäk.", "heinäk.", "elok.", "syysk.", "lokak.", "marrask.", "jouluk."}, + {"tammikuuta", "helmikuuta", "maaliskuuta", "huhtikuuta", "toukokuuta", "kesäkuuta", "heinäkuuta", "elokuuta", "syyskuuta", "lokakuuta", "marraskuuta", "joulukuuta"}, + {"ap.", "ip."}, +} + +var localeTableFiFI = [5][]string{ + {"su", "ma", "ti", "ke", "to", "pe", "la"}, + {"sunnuntaina", "maanantaina", "tiistaina", "keskiviikkona", "torstaina", "perjantaina", "lauantaina"}, + {"tammik.", "helmik.", "maalisk.", "huhtik.", "toukok.", "kesäk.", "heinäk.", "elok.", "syysk.", "lokak.", "marrask.", "jouluk."}, + {"tammikuuta", "helmikuuta", "maaliskuuta", "huhtikuuta", "toukokuuta", "kesäkuuta", "heinäkuuta", "elokuuta", "syyskuuta", "lokakuuta", "marraskuuta", "joulukuuta"}, + {"ap.", "ip."}, +} + +var localeTableFil = [5][]string{ + {"Lin", "Lun", "Mar", "Miy", "Huw", "Biy", "Sab"}, + {"Linggo", "Lunes", "Martes", "Miyerkules", "Huwebes", "Biyernes", "Sabado"}, + {"Ene", "Peb", "Mar", "Abr", "May", "Hun", "Hul", "Ago", "Set", "Okt", "Nob", "Dis"}, + {"Enero", "Pebrero", "Marso", "Abril", "Mayo", "Hunyo", "Hulyo", "Agosto", "Setyembre", "Oktubre", "Nobyembre", "Disyembre"}, + {"am", "pm"}, +} + +var localeTableFilPH = [5][]string{ + {"Lin", "Lun", "Mar", "Miy", "Huw", "Biy", "Sab"}, + {"Linggo", "Lunes", "Martes", "Miyerkules", "Huwebes", "Biyernes", "Sabado"}, + {"Ene", "Peb", "Mar", "Abr", "May", "Hun", "Hul", "Ago", "Set", "Okt", "Nob", "Dis"}, + {"Enero", "Pebrero", "Marso", "Abril", "Mayo", "Hunyo", "Hulyo", "Agosto", "Setyembre", "Oktubre", "Nobyembre", "Disyembre"}, + {"am", "pm"}, +} + +var localeTableFo = [5][]string{ + {"sun.", "mán.", "týs.", "mik.", "hós.", "frí.", "ley."}, + {"sunnudagur", "mánadagur", "týsdagur", "mikudagur", "hósdagur", "fríggjadagur", "leygardagur"}, + {"jan.", "feb.", "mar.", "apr.", "mai", "jun.", "jul.", "aug.", "sep.", "okt.", "nov.", "des."}, + {"januar", "februar", "mars", "apríl", "mai", "juni", "juli", "august", "september", "oktober", "november", "desember"}, + {}, +} + +var localeTableFoDK = [5][]string{ + {"sun.", "mán.", "týs.", "mik.", "hós.", "frí.", "ley."}, + {"sunnudagur", "mánadagur", "týsdagur", "mikudagur", "hósdagur", "fríggjadagur", "leygardagur"}, + {"jan.", "feb.", "mar.", "apr.", "mai", "jun.", "jul.", "aug.", "sep.", "okt.", "nov.", "des."}, + {"januar", "februar", "mars", "apríl", "mai", "juni", "juli", "august", "september", "oktober", "november", "desember"}, + {}, +} + +var localeTableFoFO = [5][]string{ + {"sun.", "mán.", "týs.", "mik.", "hós.", "frí.", "ley."}, + {"sunnudagur", "mánadagur", "týsdagur", "mikudagur", "hósdagur", "fríggjadagur", "leygardagur"}, + {"jan.", "feb.", "mar.", "apr.", "mai", "jun.", "jul.", "aug.", "sep.", "okt.", "nov.", "des."}, + {"januar", "februar", "mars", "apríl", "mai", "juni", "juli", "august", "september", "oktober", "november", "desember"}, + {}, +} + +var localeTableFr = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrBE = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrBF = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrBI = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrBJ = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrBL = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrCA = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juill.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {"a.m.", "p.m."}, +} + +var localeTableFrCD = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrCF = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrCG = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrCH = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrCI = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrCM = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {"mat.", "soir"}, +} + +var localeTableFrDJ = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrDZ = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrFR = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrGA = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrGF = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrGN = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrGP = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrGQ = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrHT = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrKM = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrLU = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrMA = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"jan.", "fév.", "mar.", "avr.", "mai", "jui.", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrMC = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrMF = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrMG = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrML = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrMQ = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrMR = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrMU = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrNC = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrNE = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrPF = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrPM = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrRE = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrRW = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrSC = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrSN = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrSY = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrTD = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrTG = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrTN = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrVU = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrWF = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrYT = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrr = [5][]string{ + {"Sön", "Mun", "Tei", "Wed", "Tür", "Fre", "San"}, + {"Söndai", "Mundai", "Teisdai", "Weedensdai", "Tüürsdai", "Freidai", "Saninj"}, + {"Jan", "Feb", "Mar", "Apr", "Mei", "Jün", "Jül", "Aug", "Sep", "Okt", "Nof", "Det"}, + {"Janewoore", "Febrewoore", "Maarts", "April", "Mei", "Jüüne", "Jüüle", "August", "September", "Oktuuber", "Nofember", "Detsember"}, + {"i/m", "e/m"}, +} + +var localeTableFrrDE = [5][]string{ + {"Sön", "Mun", "Tei", "Wed", "Tür", "Fre", "San"}, + {"Söndai", "Mundai", "Teisdai", "Weedensdai", "Tüürsdai", "Freidai", "Saninj"}, + {"Jan", "Feb", "Mar", "Apr", "Mei", "Jün", "Jül", "Aug", "Sep", "Okt", "Nof", "Det"}, + {"Janewoore", "Febrewoore", "Maarts", "April", "Mei", "Jüüne", "Jüüle", "August", "September", "Oktuuber", "Nofember", "Detsember"}, + {"i/m", "e/m"}, +} + +var localeTableFur = [5][]string{ + {"dom", "lun", "mar", "mie", "joi", "vin", "sab"}, + {"domenie", "lunis", "martars", "miercus", "joibe", "vinars", "sabide"}, + {"Zen", "Fev", "Mar", "Avr", "Mai", "Jug", "Lui", "Avo", "Set", "Otu", "Nov", "Dic"}, + {"Zenâr", "Fevrâr", "Març", "Avrîl", "Mai", "Jugn", "Lui", "Avost", "Setembar", "Otubar", "Novembar", "Dicembar"}, + {"a.", "p."}, +} + +var localeTableFurIT = [5][]string{ + {"dom", "lun", "mar", "mie", "joi", "vin", "sab"}, + {"domenie", "lunis", "martars", "miercus", "joibe", "vinars", "sabide"}, + {"Zen", "Fev", "Mar", "Avr", "Mai", "Jug", "Lui", "Avo", "Set", "Otu", "Nov", "Dic"}, + {"Zenâr", "Fevrâr", "Març", "Avrîl", "Mai", "Jugn", "Lui", "Avost", "Setembar", "Otubar", "Novembar", "Dicembar"}, + {"a.", "p."}, +} + +var localeTableFy = [5][]string{ + {"si", "mo", "ti", "wo", "to", "fr", "so"}, + {"snein", "moandei", "tiisdei", "woansdei", "tongersdei", "freed", "sneon"}, + {"Jan", "Feb", "Mrt", "Apr", "Mai", "Jun", "Jul", "Aug", "Sep", "Okt", "Nov", "Des"}, + {"Jannewaris", "Febrewaris", "Maart", "April", "Maaie", "Juny", "July", "Augustus", "Septimber", "Oktober", "Novimber", "Desimber"}, + {}, +} + +var localeTableFyNL = [5][]string{ + {"si", "mo", "ti", "wo", "to", "fr", "so"}, + {"snein", "moandei", "tiisdei", "woansdei", "tongersdei", "freed", "sneon"}, + {"Jan", "Feb", "Mrt", "Apr", "Mai", "Jun", "Jul", "Aug", "Sep", "Okt", "Nov", "Des"}, + {"Jannewaris", "Febrewaris", "Maart", "April", "Maaie", "Juny", "July", "Augustus", "Septimber", "Oktober", "Novimber", "Desimber"}, + {}, +} + +var localeTableGa = [5][]string{ + {"Domh", "Luan", "Máirt", "Céad", "Déar", "Aoine", "Sath"}, + {"Dé Domhnaigh", "Dé Luain", "Dé Máirt", "Dé Céadaoin", "Déardaoin", "Dé hAoine", "Dé Sathairn"}, + {"Ean", "Feabh", "Márta", "Aib", "Beal", "Meith", "Iúil", "Lún", "MFómh", "DFómh", "Samh", "Noll"}, + {"Eanáir", "Feabhra", "Márta", "Aibreán", "Bealtaine", "Meitheamh", "Iúil", "Lúnasa", "Meán Fómhair", "Deireadh Fómhair", "Samhain", "Nollaig"}, + {"r.n.", "i.n."}, +} + +var localeTableGaGB = [5][]string{ + {"Domh", "Luan", "Máirt", "Céad", "Déar", "Aoine", "Sath"}, + {"Dé Domhnaigh", "Dé Luain", "Dé Máirt", "Dé Céadaoin", "Déardaoin", "Dé hAoine", "Dé Sathairn"}, + {"Ean", "Feabh", "Márta", "Aib", "Beal", "Meith", "Iúil", "Lún", "MFómh", "DFómh", "Samh", "Noll"}, + {"Eanáir", "Feabhra", "Márta", "Aibreán", "Bealtaine", "Meitheamh", "Iúil", "Lúnasa", "Meán Fómhair", "Deireadh Fómhair", "Samhain", "Nollaig"}, + {"r.n.", "i.n."}, +} + +var localeTableGaIE = [5][]string{ + {"Domh", "Luan", "Máirt", "Céad", "Déar", "Aoine", "Sath"}, + {"Dé Domhnaigh", "Dé Luain", "Dé Máirt", "Dé Céadaoin", "Déardaoin", "Dé hAoine", "Dé Sathairn"}, + {"Ean", "Feabh", "Márta", "Aib", "Beal", "Meith", "Iúil", "Lún", "MFómh", "DFómh", "Samh", "Noll"}, + {"Eanáir", "Feabhra", "Márta", "Aibreán", "Bealtaine", "Meitheamh", "Iúil", "Lúnasa", "Meán Fómhair", "Deireadh Fómhair", "Samhain", "Nollaig"}, + {"r.n.", "i.n."}, +} + +var localeTableGaa = [5][]string{ + {"Hɔg", "Ju", "Juf", "Shɔ", "Soo", "Soh", "Hɔɔ"}, + {"Hɔgbaa", "Ju", "Jufɔ", "Shɔ", "Soo", "Sohaa", "Hɔɔ"}, + {"Aha", "Ofl", "Ots", "Abe", "Agb", "Otu", "Maa", "Man", "Gbo", "Ant", "Ale", "Afu"}, + {"Aharabata", "Oflɔ", "Otsokrikri", "Abeibe", "Agbiɛnaa", "Otukwajaŋ", "Maawɛ", "Manyawale", "Gbo", "Antɔŋ", "Alemle", "Afuabe"}, + {"LB", "SN"}, +} + +var localeTableGaaGH = [5][]string{ + {"Hɔg", "Ju", "Juf", "Shɔ", "Soo", "Soh", "Hɔɔ"}, + {"Hɔgbaa", "Ju", "Jufɔ", "Shɔ", "Soo", "Sohaa", "Hɔɔ"}, + {"Aha", "Ofl", "Ots", "Abe", "Agb", "Otu", "Maa", "Man", "Gbo", "Ant", "Ale", "Afu"}, + {"Aharabata", "Oflɔ", "Otsokrikri", "Abeibe", "Agbiɛnaa", "Otukwajaŋ", "Maawɛ", "Manyawale", "Gbo", "Antɔŋ", "Alemle", "Afuabe"}, + {"LB", "SN"}, +} + +var localeTableGd = [5][]string{ + {"DiD", "DiL", "DiM", "DiC", "Dia", "Dih", "DiS"}, + {"DiDòmhnaich", "DiLuain", "DiMàirt", "DiCiadain", "DiarDaoin", "DihAoine", "DiSathairne"}, + {"Faoi", "Gearr", "Màrt", "Gibl", "Cèit", "Ògmh", "Iuch", "Lùna", "Sult", "Dàmh", "Samh", "Dùbh"}, + {"dhen Fhaoilleach", "dhen Ghearran", "dhen Mhàrt", "dhen Ghiblean", "dhen Chèitean", "dhen Ògmhios", "dhen Iuchar", "dhen Lùnastal", "dhen t-Sultain", "dhen Dàmhair", "dhen t-Samhain", "dhen Dùbhlachd"}, + {"m", "f"}, +} + +var localeTableGdGB = [5][]string{ + {"DiD", "DiL", "DiM", "DiC", "Dia", "Dih", "DiS"}, + {"DiDòmhnaich", "DiLuain", "DiMàirt", "DiCiadain", "DiarDaoin", "DihAoine", "DiSathairne"}, + {"Faoi", "Gearr", "Màrt", "Gibl", "Cèit", "Ògmh", "Iuch", "Lùna", "Sult", "Dàmh", "Samh", "Dùbh"}, + {"dhen Fhaoilleach", "dhen Ghearran", "dhen Mhàrt", "dhen Ghiblean", "dhen Chèitean", "dhen Ògmhios", "dhen Iuchar", "dhen Lùnastal", "dhen t-Sultain", "dhen Dàmhair", "dhen t-Samhain", "dhen Dùbhlachd"}, + {"m", "f"}, +} + +var localeTableGez = [5][]string{ + {}, + {"እኁድ", "ሰኑይ", "ሠሉስ", "ራብዕ", "ሐሙስ", "ዓርበ", "ቀዳሚት"}, + {}, + {"ጠሐረ", "ከተተ", "መገበ", "አኀዘ", "ግንባት", "ሠንየ", "ሐመለ", "ነሐሰ", "ከረመ", "ጠቀመ", "ኀደረ", "ኀሠሠ"}, + {"ጽባሕ", "ምሴት"}, +} + +var localeTableGezER = [5][]string{ + {}, + {"እኁድ", "ሰኑይ", "ሠሉስ", "ራብዕ", "ሐሙስ", "ዓርበ", "ቀዳሚት"}, + {}, + {"ጠሐረ", "ከተተ", "መገበ", "አኀዘ", "ግንባት", "ሠንየ", "ሐመለ", "ነሐሰ", "ከረመ", "ጠቀመ", "ኀደረ", "ኀሠሠ"}, + {"ጽባሕ", "ምሴት"}, +} + +var localeTableGezET = [5][]string{ + {}, + {"እኁድ", "ሰኑይ", "ሠሉስ", "ራብዕ", "ሐሙስ", "ዓርበ", "ቀዳሚት"}, + {}, + {"ጠሐረ", "ከተተ", "መገበ", "አኀዘ", "ግንባት", "ሠንየ", "ሐመለ", "ነሐሰ", "ከረመ", "ጠቀመ", "ኀደረ", "ኀሠሠ"}, + {"ጽባሕ", "ምሴት"}, +} + +var localeTableGl = [5][]string{ + {"dom.", "luns", "mar.", "mér.", "xov.", "ven.", "sáb."}, + {"domingo", "luns", "martes", "mércores", "xoves", "venres", "sábado"}, + {"xan.", "feb.", "mar.", "abr.", "maio", "xuño", "xul.", "ago.", "set.", "out.", "nov.", "dec."}, + {"xaneiro", "febreiro", "marzo", "abril", "maio", "xuño", "xullo", "agosto", "setembro", "outubro", "novembro", "decembro"}, + {"a.m.", "p.m."}, +} + +var localeTableGlES = [5][]string{ + {"dom.", "luns", "mar.", "mér.", "xov.", "ven.", "sáb."}, + {"domingo", "luns", "martes", "mércores", "xoves", "venres", "sábado"}, + {"xan.", "feb.", "mar.", "abr.", "maio", "xuño", "xul.", "ago.", "set.", "out.", "nov.", "dec."}, + {"xaneiro", "febreiro", "marzo", "abril", "maio", "xuño", "xullo", "agosto", "setembro", "outubro", "novembro", "decembro"}, + {"a.m.", "p.m."}, +} + +var localeTableGn = [5][]string{ + {}, + {"Arateĩ", "Arakõi", "Araapy", "Ararundy", "Arapo", "Arapoteĩ", "Arapokõi"}, + {}, + {"Jasyteĩ", "Jasykõi", "Jasyapy", "Jasyrundy", "Jasypo", "Jasypoteĩ", "Jasypokõi", "Jasypoapy", "Jasyporundy", "Jasypa", "Jasypateĩ", "Jasypakõi"}, + {}, +} + +var localeTableGnPY = [5][]string{ + {}, + {"Arateĩ", "Arakõi", "Araapy", "Ararundy", "Arapo", "Arapoteĩ", "Arapokõi"}, + {}, + {"Jasyteĩ", "Jasykõi", "Jasyapy", "Jasyrundy", "Jasypo", "Jasypoteĩ", "Jasypokõi", "Jasypoapy", "Jasyporundy", "Jasypa", "Jasypateĩ", "Jasypakõi"}, + {}, +} + +var localeTableGsw = [5][]string{ + {"Su.", "Mä.", "Zi.", "Mi.", "Du.", "Fr.", "Sa."}, + {"Sunntig", "Määntig", "Ziischtig", "Mittwuch", "Dunschtig", "Friitig", "Samschtig"}, + {"Jan", "Feb", "Mär", "Apr", "Mai", "Jun", "Jul", "Aug", "Sep", "Okt", "Nov", "Dez"}, + {"Januar", "Februar", "März", "April", "Mai", "Juni", "Juli", "Auguscht", "Septämber", "Oktoober", "Novämber", "Dezämber"}, + {"vorm.", "nam."}, +} + +var localeTableGswCH = [5][]string{ + {"Su.", "Mä.", "Zi.", "Mi.", "Du.", "Fr.", "Sa."}, + {"Sunntig", "Määntig", "Ziischtig", "Mittwuch", "Dunschtig", "Friitig", "Samschtig"}, + {"Jan", "Feb", "Mär", "Apr", "Mai", "Jun", "Jul", "Aug", "Sep", "Okt", "Nov", "Dez"}, + {"Januar", "Februar", "März", "April", "Mai", "Juni", "Juli", "Auguscht", "Septämber", "Oktoober", "Novämber", "Dezämber"}, + {"vorm.", "nam."}, +} + +var localeTableGswFR = [5][]string{ + {"Su.", "Mä.", "Zi.", "Mi.", "Du.", "Fr.", "Sa."}, + {"Sunntig", "Määntig", "Ziischtig", "Mittwuch", "Dunschtig", "Friitig", "Samschtig"}, + {"Jan", "Feb", "Mär", "Apr", "Mai", "Jun", "Jul", "Aug", "Sep", "Okt", "Nov", "Dez"}, + {"Januar", "Februar", "März", "April", "Mai", "Juni", "Juli", "Auguscht", "Septämber", "Oktoober", "Novämber", "Dezämber"}, + {"vorm.", "nam."}, +} + +var localeTableGswLI = [5][]string{ + {"Su.", "Mä.", "Zi.", "Mi.", "Du.", "Fr.", "Sa."}, + {"Sunntig", "Määntig", "Ziischtig", "Mittwuch", "Dunschtig", "Friitig", "Samschtig"}, + {"Jan", "Feb", "Mär", "Apr", "Mai", "Jun", "Jul", "Aug", "Sep", "Okt", "Nov", "Dez"}, + {"Januar", "Februar", "März", "April", "Mai", "Juni", "Juli", "Auguscht", "Septämber", "Oktoober", "Novämber", "Dezämber"}, + {"vorm.", "nam."}, +} + +var localeTableGu = [5][]string{ + {"રવિ", "સોમ", "મંગળ", "બુધ", "ગુરુ", "શુક્ર", "શનિ"}, + {"રવિવાર", "સોમવાર", "મંગળવાર", "બુધવાર", "ગુરુવાર", "શુક્રવાર", "શનિવાર"}, + {"જાન્યુ", "ફેબ્રુ", "માર્ચ", "એપ્રિલ", "મે", "જૂન", "જુલાઈ", "ઑગસ્ટ", "સપ્ટે", "ઑક્ટો", "નવે", "ડિસે"}, + {"જાન્યુઆરી", "ફેબ્રુઆરી", "માર્ચ", "એપ્રિલ", "મે", "જૂન", "જુલાઈ", "ઑગસ્ટ", "સપ્ટેમ્બર", "ઑક્ટોબર", "નવેમ્બર", "ડિસેમ્બર"}, + {}, +} + +var localeTableGuIN = [5][]string{ + {"રવિ", "સોમ", "મંગળ", "બુધ", "ગુરુ", "શુક્ર", "શનિ"}, + {"રવિવાર", "સોમવાર", "મંગળવાર", "બુધવાર", "ગુરુવાર", "શુક્રવાર", "શનિવાર"}, + {"જાન્યુ", "ફેબ્રુ", "માર્ચ", "એપ્રિલ", "મે", "જૂન", "જુલાઈ", "ઑગસ્ટ", "સપ્ટે", "ઑક્ટો", "નવે", "ડિસે"}, + {"જાન્યુઆરી", "ફેબ્રુઆરી", "માર્ચ", "એપ્રિલ", "મે", "જૂન", "જુલાઈ", "ઑગસ્ટ", "સપ્ટેમ્બર", "ઑક્ટોબર", "નવેમ્બર", "ડિસેમ્બર"}, + {}, +} + +var localeTableGuz = [5][]string{ + {"Cpr", "Ctt", "Cmn", "Cmt", "Ars", "Icm", "Est"}, + {"Chumapiri", "Chumatato", "Chumaine", "Chumatano", "Aramisi", "Ichuma", "Esabato"}, + {"Can", "Feb", "Mac", "Apr", "Mei", "Jun", "Cul", "Agt", "Sep", "Okt", "Nob", "Dis"}, + {"Chanuari", "Feburari", "Machi", "Apiriri", "Mei", "Juni", "Chulai", "Agosti", "Septemba", "Okitoba", "Nobemba", "Disemba"}, + {"Ma", "Mo"}, +} + +var localeTableGuzKE = [5][]string{ + {"Cpr", "Ctt", "Cmn", "Cmt", "Ars", "Icm", "Est"}, + {"Chumapiri", "Chumatato", "Chumaine", "Chumatano", "Aramisi", "Ichuma", "Esabato"}, + {"Can", "Feb", "Mac", "Apr", "Mei", "Jun", "Cul", "Agt", "Sep", "Okt", "Nob", "Dis"}, + {"Chanuari", "Feburari", "Machi", "Apiriri", "Mei", "Juni", "Chulai", "Agosti", "Septemba", "Okitoba", "Nobemba", "Disemba"}, + {"Ma", "Mo"}, +} + +var localeTableGv = [5][]string{ + {"Jed", "Jel", "Jem", "Jerc", "Jerd", "Jeh", "Jes"}, + {"Jedoonee", "Jelhein", "Jemayrt", "Jercean", "Jerdein", "Jeheiney", "Jesarn"}, + {"J-guer", "T-arree", "Mayrnt", "Avrril", "Boaldyn", "M-souree", "J-souree", "Luanistyn", "M-fouyir", "J-fouyir", "M-Houney", "M-Nollick"}, + {"Jerrey-geuree", "Toshiaght-arree", "Mayrnt", "Averil", "Boaldyn", "Mean-souree", "Jerrey-souree", "Luanistyn", "Mean-fouyir", "Jerrey-fouyir", "Mee Houney", "Mee ny Nollick"}, + {"a.m.", "p.m."}, +} + +var localeTableGvIM = [5][]string{ + {"Jed", "Jel", "Jem", "Jerc", "Jerd", "Jeh", "Jes"}, + {"Jedoonee", "Jelhein", "Jemayrt", "Jercean", "Jerdein", "Jeheiney", "Jesarn"}, + {"J-guer", "T-arree", "Mayrnt", "Avrril", "Boaldyn", "M-souree", "J-souree", "Luanistyn", "M-fouyir", "J-fouyir", "M-Houney", "M-Nollick"}, + {"Jerrey-geuree", "Toshiaght-arree", "Mayrnt", "Averil", "Boaldyn", "Mean-souree", "Jerrey-souree", "Luanistyn", "Mean-fouyir", "Jerrey-fouyir", "Mee Houney", "Mee ny Nollick"}, + {"a.m.", "p.m."}, +} + +var localeTableHa = [5][]string{ + {"Lah", "Lit", "Tal", "Lar", "Alh", "Jum", "Asa"}, + {"Lahadi", "Litinin", "Talata", "Laraba", "Alhamis", "Jummaʼa", "Asabar"}, + {"Jan", "Fab", "Mar", "Afi", "May", "Yun", "Yul", "Agu", "Sat", "Okt", "Nuw", "Dis"}, + {"Janairu", "Faburairu", "Maris", "Afirilu", "Mayu", "Yuni", "Yuli", "Agusta", "Satumba", "Oktoba", "Nuwamba", "Disamba"}, + {"SF", "YM"}, +} + +var localeTableHaArab = [5][]string{ + {"لَح", "لِت", "تَل", "لَر", "أَلْح", "جُم", "أَسَ"}, + {"لَحَدِ", "لِتِنِنْ", "تَلَتَ", "لَرَبَ", "أَلْحَمِسْ", "جُمَعَ", "أَسَبَرْ"}, + {"جَن", "ڢَب", "مَر", "أَڢْر", "مَي", "يُون", "يُول", "أَغُ", "سَت", "أُكْت", "نُو", "دِس"}, + {"جَنَيْرُ", "ڢَبْرَيْرُ", "مَرِسْ", "أَڢْرِلُ", "مَيُ", "يُونِ", "يُولِ", "أَغُسْتَ", "سَتُمْبَ", "أُكْتوُبَ", "نُوَمْبَ", "دِسَمْبَ"}, + {"A.M.", "P.M."}, +} + +var localeTableHaArabNG = [5][]string{ + {"لَح", "لِت", "تَل", "لَر", "أَلْح", "جُم", "أَسَ"}, + {"لَحَدِ", "لِتِنِنْ", "تَلَتَ", "لَرَبَ", "أَلْحَمِسْ", "جُمَعَ", "أَسَبَرْ"}, + {"جَن", "ڢَب", "مَر", "أَڢْر", "مَي", "يُون", "يُول", "أَغُ", "سَت", "أُكْت", "نُو", "دِس"}, + {"جَنَيْرُ", "ڢَبْرَيْرُ", "مَرِسْ", "أَڢْرِلُ", "مَيُ", "يُونِ", "يُولِ", "أَغُسْتَ", "سَتُمْبَ", "أُكْتوُبَ", "نُوَمْبَ", "دِسَمْبَ"}, + {"A.M.", "P.M."}, +} + +var localeTableHaArabSD = [5][]string{ + {"لَح", "لِت", "تَل", "لَر", "أَلْح", "جُم", "أَسَ"}, + {"لَحَدِ", "لِتِنِنْ", "تَلَتَ", "لَرَبَ", "أَلْحَمِسْ", "جُمَعَ", "أَسَبَرْ"}, + {"جَن", "ڢَب", "مَر", "أَڢْر", "مَي", "يُون", "يُول", "أَغُ", "سَت", "أُكْت", "نُو", "دِس"}, + {"جَنَيْرُ", "ڢَبْرَيْرُ", "مَرِسْ", "أَڢْرِلُ", "مَيُ", "يُونِ", "يُولِ", "أَغُسْتَ", "سَتُمْبَ", "أُكْتوُبَ", "نُوَمْبَ", "دِسَمْبَ"}, + {"A.M.", "P.M."}, +} + +var localeTableHaGH = [5][]string{ + {"Lah", "Lit", "Tal", "Lar", "Alh", "Jum", "Asa"}, + {"Lahadi", "Litinin", "Talata", "Laraba", "Alhamis", "Jummaʼa", "Asabar"}, + {"Jan", "Fab", "Mar", "Afi", "May", "Yun", "Yul", "Agu", "Sat", "Okt", "Nuw", "Dis"}, + {"Janairu", "Faburairu", "Maris", "Afirilu", "Mayu", "Yuni", "Yuli", "Agusta", "Satumba", "Oktoba", "Nuwamba", "Disamba"}, + {"SF", "YM"}, +} + +var localeTableHaNE = [5][]string{ + {"Lah", "Lit", "Tal", "Lar", "Alh", "Jum", "Asa"}, + {"Lahadi", "Litinin", "Talata", "Laraba", "Alhamis", "Jummaʼa", "Asabar"}, + {"Jan", "Fab", "Mar", "Afi", "May", "Yun", "Yul", "Agu", "Sat", "Okt", "Nuw", "Dis"}, + {"Janairu", "Faburairu", "Maris", "Afirilu", "Mayu", "Yuni", "Yuli", "Agusta", "Satumba", "Oktoba", "Nuwamba", "Disamba"}, + {"SF", "YM"}, +} + +var localeTableHaNG = [5][]string{ + {"Lah", "Lit", "Tal", "Lar", "Alh", "Jum", "Asa"}, + {"Lahadi", "Litinin", "Talata", "Laraba", "Alhamis", "Jummaʼa", "Asabar"}, + {"Jan", "Fab", "Mar", "Afi", "May", "Yun", "Yul", "Agu", "Sat", "Okt", "Nuw", "Dis"}, + {"Janairu", "Faburairu", "Maris", "Afirilu", "Mayu", "Yuni", "Yuli", "Agusta", "Satumba", "Oktoba", "Nuwamba", "Disamba"}, + {"SF", "YM"}, +} + +var localeTableHaw = [5][]string{ + {"LP", "P1", "P2", "P3", "P4", "P5", "P6"}, + {"Lāpule", "Poʻakahi", "Poʻalua", "Poʻakolu", "Poʻahā", "Poʻalima", "Poʻaono"}, + {"Ian.", "Pep.", "Mal.", "ʻAp.", "Mei", "Iun.", "Iul.", "ʻAu.", "Kep.", "ʻOk.", "Now.", "Kek."}, + {"Ianuali", "Pepeluali", "Malaki", "ʻApelila", "Mei", "Iune", "Iulai", "ʻAukake", "Kepakemapa", "ʻOkakopa", "Nowemapa", "Kekemapa"}, + {}, +} + +var localeTableHawUS = [5][]string{ + {"LP", "P1", "P2", "P3", "P4", "P5", "P6"}, + {"Lāpule", "Poʻakahi", "Poʻalua", "Poʻakolu", "Poʻahā", "Poʻalima", "Poʻaono"}, + {"Ian.", "Pep.", "Mal.", "ʻAp.", "Mei", "Iun.", "Iul.", "ʻAu.", "Kep.", "ʻOk.", "Now.", "Kek."}, + {"Ianuali", "Pepeluali", "Malaki", "ʻApelila", "Mei", "Iune", "Iulai", "ʻAukake", "Kepakemapa", "ʻOkakopa", "Nowemapa", "Kekemapa"}, + {}, +} + +var localeTableHe = [5][]string{ + {"יום א׳", "יום ב׳", "יום ג׳", "יום ד׳", "יום ה׳", "יום ו׳", "שבת"}, + {"יום ראשון", "יום שני", "יום שלישי", "יום רביעי", "יום חמישי", "יום שישי", "יום שבת"}, + {"ינו׳", "פבר׳", "מרץ", "אפר׳", "מאי", "יוני", "יולי", "אוג׳", "ספט׳", "אוק׳", "נוב׳", "דצמ׳"}, + {"ינואר", "פברואר", "מרץ", "אפריל", "מאי", "יוני", "יולי", "אוגוסט", "ספטמבר", "אוקטובר", "נובמבר", "דצמבר"}, + {"לפנה״צ", "אחה״צ"}, +} + +var localeTableHeIL = [5][]string{ + {"יום א׳", "יום ב׳", "יום ג׳", "יום ד׳", "יום ה׳", "יום ו׳", "שבת"}, + {"יום ראשון", "יום שני", "יום שלישי", "יום רביעי", "יום חמישי", "יום שישי", "יום שבת"}, + {"ינו׳", "פבר׳", "מרץ", "אפר׳", "מאי", "יוני", "יולי", "אוג׳", "ספט׳", "אוק׳", "נוב׳", "דצמ׳"}, + {"ינואר", "פברואר", "מרץ", "אפריל", "מאי", "יוני", "יולי", "אוגוסט", "ספטמבר", "אוקטובר", "נובמבר", "דצמבר"}, + {"לפנה״צ", "אחה״צ"}, +} + +var localeTableHi = [5][]string{ + {"रवि", "सोम", "मंगल", "बुध", "गुरु", "शुक्र", "शनि"}, + {"रविवार", "सोमवार", "मंगलवार", "बुधवार", "गुरुवार", "शुक्रवार", "शनिवार"}, + {"जन॰", "फ़र॰", "मार्च", "अप्रैल", "मई", "जून", "जुल॰", "अग॰", "सित॰", "अक्तू॰", "नव॰", "दिस॰"}, + {"जनवरी", "फ़रवरी", "मार्च", "अप्रैल", "मई", "जून", "जुलाई", "अगस्त", "सितंबर", "अक्तूबर", "नवंबर", "दिसंबर"}, + {"am", "pm"}, +} + +var localeTableHiIN = [5][]string{ + {"रवि", "सोम", "मंगल", "बुध", "गुरु", "शुक्र", "शनि"}, + {"रविवार", "सोमवार", "मंगलवार", "बुधवार", "गुरुवार", "शुक्रवार", "शनिवार"}, + {"जन॰", "फ़र॰", "मार्च", "अप्रैल", "मई", "जून", "जुल॰", "अग॰", "सित॰", "अक्तू॰", "नव॰", "दिस॰"}, + {"जनवरी", "फ़रवरी", "मार्च", "अप्रैल", "मई", "जून", "जुलाई", "अगस्त", "सितंबर", "अक्तूबर", "नवंबर", "दिसंबर"}, + {"am", "pm"}, +} + +var localeTableHiLatn = [5][]string{ + {"Ravi", "Som", "Mangal", "Budh", "Guru", "Shukra", "Shani"}, + {"Raviwaar", "Somwaar", "Mangalwaar", "Budhwaar", "Guruwaar", "Shukrawaar", "Shaniwaar"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}, + {}, + {"AM", "PM"}, +} + +var localeTableHiLatnIN = [5][]string{ + {"Ravi", "Som", "Mangal", "Budh", "Guru", "Shukra", "Shani"}, + {"Raviwaar", "Somwaar", "Mangalwaar", "Budhwaar", "Guruwaar", "Shukrawaar", "Shaniwaar"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}, + {}, + {"AM", "PM"}, +} + +var localeTableHnj = [5][]string{ + {}, + {}, + {}, + {"𞄆𞄬", "𞄛𞄨𞄱𞄄𞄤𞄲𞄨", "𞄒𞄫𞄰𞄒𞄪𞄱", "𞄤𞄨𞄱", "𞄀𞄪𞄴", "𞄛𞄤𞄱𞄞𞄤𞄦", "𞄔𞄩𞄴𞄆𞄨𞄰", "𞄕𞄩𞄲𞄔𞄄𞄰𞄤", "𞄛𞄤𞄱𞄒𞄤𞄰", "𞄪𞄱𞄀𞄤𞄴", "𞄚𞄦𞄲𞄤𞄚𞄄𞄰𞄫", "𞄒𞄩𞄱𞄔𞄬𞄴"}, + {}, +} + +var localeTableHr = [5][]string{ + {"ned", "pon", "uto", "sri", "čet", "pet", "sub"}, + {"nedjelja", "ponedjeljak", "utorak", "srijeda", "četvrtak", "petak", "subota"}, + {"sij", "velj", "ožu", "tra", "svi", "lip", "srp", "kol", "ruj", "lis", "stu", "pro"}, + {"siječnja", "veljače", "ožujka", "travnja", "svibnja", "lipnja", "srpnja", "kolovoza", "rujna", "listopada", "studenoga", "prosinca"}, + {}, +} + +var localeTableHrBA = [5][]string{ + {"ned", "pon", "uto", "sri", "čet", "pet", "sub"}, + {"nedjelja", "ponedjeljak", "utorak", "srijeda", "četvrtak", "petak", "subota"}, + {"sij", "velj", "ožu", "tra", "svi", "lip", "srp", "kol", "ruj", "lis", "stu", "pro"}, + {"siječnja", "veljače", "ožujka", "travnja", "svibnja", "lipnja", "srpnja", "kolovoza", "rujna", "listopada", "studenoga", "prosinca"}, + {}, +} + +var localeTableHrHR = [5][]string{ + {"ned", "pon", "uto", "sri", "čet", "pet", "sub"}, + {"nedjelja", "ponedjeljak", "utorak", "srijeda", "četvrtak", "petak", "subota"}, + {"sij", "velj", "ožu", "tra", "svi", "lip", "srp", "kol", "ruj", "lis", "stu", "pro"}, + {"siječnja", "veljače", "ožujka", "travnja", "svibnja", "lipnja", "srpnja", "kolovoza", "rujna", "listopada", "studenoga", "prosinca"}, + {}, +} + +var localeTableHsb = [5][]string{ + {"nje", "pón", "wut", "srj", "štw", "pja", "sob"}, + {"njedźela", "póndźela", "wutora", "srjeda", "štwórtk", "pjatk", "sobota"}, + {"jan.", "feb.", "měr.", "apr.", "mej.", "jun.", "jul.", "awg.", "sep.", "okt.", "now.", "dec."}, + {"januara", "februara", "měrca", "apryla", "meje", "junija", "julija", "awgusta", "septembra", "oktobra", "nowembra", "decembra"}, + {"dop.", "pop."}, +} + +var localeTableHsbDE = [5][]string{ + {"nje", "pón", "wut", "srj", "štw", "pja", "sob"}, + {"njedźela", "póndźela", "wutora", "srjeda", "štwórtk", "pjatk", "sobota"}, + {"jan.", "feb.", "měr.", "apr.", "mej.", "jun.", "jul.", "awg.", "sep.", "okt.", "now.", "dec."}, + {"januara", "februara", "měrca", "apryla", "meje", "junija", "julija", "awgusta", "septembra", "oktobra", "nowembra", "decembra"}, + {"dop.", "pop."}, +} + +var localeTableHu = [5][]string{ + {"V", "H", "K", "Sze", "Cs", "P", "Szo"}, + {"vasárnap", "hétfő", "kedd", "szerda", "csütörtök", "péntek", "szombat"}, + {"jan.", "febr.", "márc.", "ápr.", "máj.", "jún.", "júl.", "aug.", "szept.", "okt.", "nov.", "dec."}, + {"január", "február", "március", "április", "május", "június", "július", "augusztus", "szeptember", "október", "november", "december"}, + {"de.", "du."}, +} + +var localeTableHuHU = [5][]string{ + {"V", "H", "K", "Sze", "Cs", "P", "Szo"}, + {"vasárnap", "hétfő", "kedd", "szerda", "csütörtök", "péntek", "szombat"}, + {"jan.", "febr.", "márc.", "ápr.", "máj.", "jún.", "júl.", "aug.", "szept.", "okt.", "nov.", "dec."}, + {"január", "február", "március", "április", "május", "június", "július", "augusztus", "szeptember", "október", "november", "december"}, + {"de.", "du."}, +} + +var localeTableHy = [5][]string{ + {"կիր", "երկ", "երք", "չրք", "հնգ", "ուր", "շբթ"}, + {"կիրակի", "երկուշաբթի", "երեքշաբթի", "չորեքշաբթի", "հինգշաբթի", "ուրբաթ", "շաբաթ"}, + {"հնվ", "փտվ", "մրտ", "ապր", "մյս", "հնս", "հլս", "օգս", "սեպ", "հոկ", "նոյ", "դեկ"}, + {"հունվարի", "փետրվարի", "մարտի", "ապրիլի", "մայիսի", "հունիսի", "հուլիսի", "օգոստոսի", "սեպտեմբերի", "հոկտեմբերի", "նոյեմբերի", "դեկտեմբերի"}, + {"ա", "հ"}, +} + +var localeTableHyAM = [5][]string{ + {"կիր", "երկ", "երք", "չրք", "հնգ", "ուր", "շբթ"}, + {"կիրակի", "երկուշաբթի", "երեքշաբթի", "չորեքշաբթի", "հինգշաբթի", "ուրբաթ", "շաբաթ"}, + {"հնվ", "փտվ", "մրտ", "ապր", "մյս", "հնս", "հլս", "օգս", "սեպ", "հոկ", "նոյ", "դեկ"}, + {"հունվարի", "փետրվարի", "մարտի", "ապրիլի", "մայիսի", "հունիսի", "հուլիսի", "օգոստոսի", "սեպտեմբերի", "հոկտեմբերի", "նոյեմբերի", "դեկտեմբերի"}, + {"ա", "հ"}, +} + +var localeTableIa = [5][]string{ + {"dom", "lun", "mar", "mer", "jov", "ven", "sab"}, + {"dominica", "lunedi", "martedi", "mercuridi", "jovedi", "venerdi", "sabbato"}, + {"jan", "feb", "mar", "apr", "mai", "jun", "jul", "aug", "sep", "oct", "nov", "dec"}, + {"januario", "februario", "martio", "april", "maio", "junio", "julio", "augusto", "septembre", "octobre", "novembre", "decembre"}, + {}, +} + +var localeTableIa001 = [5][]string{ + {"dom", "lun", "mar", "mer", "jov", "ven", "sab"}, + {"dominica", "lunedi", "martedi", "mercuridi", "jovedi", "venerdi", "sabbato"}, + {"jan", "feb", "mar", "apr", "mai", "jun", "jul", "aug", "sep", "oct", "nov", "dec"}, + {"januario", "februario", "martio", "april", "maio", "junio", "julio", "augusto", "septembre", "octobre", "novembre", "decembre"}, + {}, +} + +var localeTableId = [5][]string{ + {"Min", "Sen", "Sel", "Rab", "Kam", "Jum", "Sab"}, + {"Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"}, + {"Jan", "Feb", "Mar", "Apr", "Mei", "Jun", "Jul", "Agu", "Sep", "Okt", "Nov", "Des"}, + {"Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"}, + {}, +} + +var localeTableIdID = [5][]string{ + {"Min", "Sen", "Sel", "Rab", "Kam", "Jum", "Sab"}, + {"Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"}, + {"Jan", "Feb", "Mar", "Apr", "Mei", "Jun", "Jul", "Agu", "Sep", "Okt", "Nov", "Des"}, + {"Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"}, + {}, +} + +var localeTableIe = [5][]string{ + {"sol.", "lun.", "mar.", "mer.", "jov.", "ven.", "sat."}, + {"soledí", "lunedí", "mardí", "mercurdí", "jovedí", "venerdí", "saturdí"}, + {"jan.", "febr.", "mar.", "apr.", "may", "jun.", "julí", "aug.", "sept.", "oct.", "nov.", "dec."}, + {"januar", "februar", "marte", "april", "may", "junio", "julí", "august", "septembre", "octobre", "novembre", "decembre"}, + {"a.m.", "p.m."}, +} + +var localeTableIeEE = [5][]string{ + {"sol.", "lun.", "mar.", "mer.", "jov.", "ven.", "sat."}, + {"soledí", "lunedí", "mardí", "mercurdí", "jovedí", "venerdí", "saturdí"}, + {"jan.", "febr.", "mar.", "apr.", "may", "jun.", "julí", "aug.", "sept.", "oct.", "nov.", "dec."}, + {"januar", "februar", "marte", "april", "may", "junio", "julí", "august", "septembre", "octobre", "novembre", "decembre"}, + {"a.m.", "p.m."}, +} + +var localeTableIg = [5][]string{ + {"Sọn", "Mọn", "Tiu", "Wen", "Tọọ", "Fraị", "Sat"}, + {"Sọndee", "Mọnde", "Tiuzdee", "Wenezdee", "Tọọzdee", "Fraịdee", "Satọdee"}, + {"Jen", "Feb", "Maa", "Epr", "Mee", "Juu", "Jul", "Ọgọ", "Sep", "Ọkt", "Nov", "Dis"}, + {"Jenụwarị", "Febrụwarị", "Maachị", "Epreel", "Mee", "Juun", "Julaị", "Ọgọọst", "Septemba", "Ọktoba", "Novemba", "Disemba"}, + {"A.M.", "P.M."}, +} + +var localeTableIgNG = [5][]string{ + {"Sọn", "Mọn", "Tiu", "Wen", "Tọọ", "Fraị", "Sat"}, + {"Sọndee", "Mọnde", "Tiuzdee", "Wenezdee", "Tọọzdee", "Fraịdee", "Satọdee"}, + {"Jen", "Feb", "Maa", "Epr", "Mee", "Juu", "Jul", "Ọgọ", "Sep", "Ọkt", "Nov", "Dis"}, + {"Jenụwarị", "Febrụwarị", "Maachị", "Epreel", "Mee", "Juun", "Julaị", "Ọgọọst", "Septemba", "Ọktoba", "Novemba", "Disemba"}, + {"A.M.", "P.M."}, +} + +var localeTableIi = [5][]string{ + {"ꑭꆏ", "ꆏꋍ", "ꆏꑍ", "ꆏꌕ", "ꆏꇖ", "ꆏꉬ", "ꆏꃘ"}, + {"ꑭꆏꑍ", "ꆏꊂꋍ", "ꆏꊂꑍ", "ꆏꊂꌕ", "ꆏꊂꇖ", "ꆏꊂꉬ", "ꆏꊂꃘ"}, + {}, + {"ꋍꆪ", "ꑍꆪ", "ꌕꆪ", "ꇖꆪ", "ꉬꆪ", "ꃘꆪ", "ꏃꆪ", "ꉆꆪ", "ꈬꆪ", "ꊰꆪ", "ꊰꊪꆪ", "ꊰꑋꆪ"}, + {"ꎸꄑ", "ꁯꋒ"}, +} + +var localeTableIiCN = [5][]string{ + {"ꑭꆏ", "ꆏꋍ", "ꆏꑍ", "ꆏꌕ", "ꆏꇖ", "ꆏꉬ", "ꆏꃘ"}, + {"ꑭꆏꑍ", "ꆏꊂꋍ", "ꆏꊂꑍ", "ꆏꊂꌕ", "ꆏꊂꇖ", "ꆏꊂꉬ", "ꆏꊂꃘ"}, + {}, + {"ꋍꆪ", "ꑍꆪ", "ꌕꆪ", "ꇖꆪ", "ꉬꆪ", "ꃘꆪ", "ꏃꆪ", "ꉆꆪ", "ꈬꆪ", "ꊰꆪ", "ꊰꊪꆪ", "ꊰꑋꆪ"}, + {"ꎸꄑ", "ꁯꋒ"}, +} + +var localeTableIs = [5][]string{ + {"sun.", "mán.", "þri.", "mið.", "fim.", "fös.", "lau."}, + {"sunnudagur", "mánudagur", "þriðjudagur", "miðvikudagur", "fimmtudagur", "föstudagur", "laugardagur"}, + {"jan.", "feb.", "mar.", "apr.", "maí", "jún.", "júl.", "ágú.", "sep.", "okt.", "nóv.", "des."}, + {"janúar", "febrúar", "mars", "apríl", "maí", "júní", "júlí", "ágúst", "september", "október", "nóvember", "desember"}, + {"f.h.", "e.h."}, +} + +var localeTableIsIS = [5][]string{ + {"sun.", "mán.", "þri.", "mið.", "fim.", "fös.", "lau."}, + {"sunnudagur", "mánudagur", "þriðjudagur", "miðvikudagur", "fimmtudagur", "föstudagur", "laugardagur"}, + {"jan.", "feb.", "mar.", "apr.", "maí", "jún.", "júl.", "ágú.", "sep.", "okt.", "nóv.", "des."}, + {"janúar", "febrúar", "mars", "apríl", "maí", "júní", "júlí", "ágúst", "september", "október", "nóvember", "desember"}, + {"f.h.", "e.h."}, +} + +var localeTableIt = [5][]string{ + {"dom", "lun", "mar", "mer", "gio", "ven", "sab"}, + {"domenica", "lunedì", "martedì", "mercoledì", "giovedì", "venerdì", "sabato"}, + {"gen", "feb", "mar", "apr", "mag", "giu", "lug", "ago", "set", "ott", "nov", "dic"}, + {"gennaio", "febbraio", "marzo", "aprile", "maggio", "giugno", "luglio", "agosto", "settembre", "ottobre", "novembre", "dicembre"}, + {"m.", "p."}, +} + +var localeTableItCH = [5][]string{ + {"dom", "lun", "mar", "mer", "gio", "ven", "sab"}, + {"domenica", "lunedì", "martedì", "mercoledì", "giovedì", "venerdì", "sabato"}, + {"gen", "feb", "mar", "apr", "mag", "giu", "lug", "ago", "set", "ott", "nov", "dic"}, + {"gennaio", "febbraio", "marzo", "aprile", "maggio", "giugno", "luglio", "agosto", "settembre", "ottobre", "novembre", "dicembre"}, + {"m.", "p."}, +} + +var localeTableItIT = [5][]string{ + {"dom", "lun", "mar", "mer", "gio", "ven", "sab"}, + {"domenica", "lunedì", "martedì", "mercoledì", "giovedì", "venerdì", "sabato"}, + {"gen", "feb", "mar", "apr", "mag", "giu", "lug", "ago", "set", "ott", "nov", "dic"}, + {"gennaio", "febbraio", "marzo", "aprile", "maggio", "giugno", "luglio", "agosto", "settembre", "ottobre", "novembre", "dicembre"}, + {"m.", "p."}, +} + +var localeTableItSM = [5][]string{ + {"dom", "lun", "mar", "mer", "gio", "ven", "sab"}, + {"domenica", "lunedì", "martedì", "mercoledì", "giovedì", "venerdì", "sabato"}, + {"gen", "feb", "mar", "apr", "mag", "giu", "lug", "ago", "set", "ott", "nov", "dic"}, + {"gennaio", "febbraio", "marzo", "aprile", "maggio", "giugno", "luglio", "agosto", "settembre", "ottobre", "novembre", "dicembre"}, + {"m.", "p."}, +} + +var localeTableItVA = [5][]string{ + {"dom", "lun", "mar", "mer", "gio", "ven", "sab"}, + {"domenica", "lunedì", "martedì", "mercoledì", "giovedì", "venerdì", "sabato"}, + {"gen", "feb", "mar", "apr", "mag", "giu", "lug", "ago", "set", "ott", "nov", "dic"}, + {"gennaio", "febbraio", "marzo", "aprile", "maggio", "giugno", "luglio", "agosto", "settembre", "ottobre", "novembre", "dicembre"}, + {"m.", "p."}, +} + +var localeTableIu = [5][]string{ + {}, + {"ᓈᑦᑏᖑᔭᖅ", "ᓇᒡᒐᔾᔭᐅ", "ᓇᒡᒐᔾᔭᐅᓕᖅᑭ", "ᐱᖓᑦᓯᖅ", "ᓯᑕᒻᒥᖅ", "ᑕᓪᓕᒻᒥᐅᑦ", "ᓈᑦᓰᖑᔭᓛᕐᓂᐊᖅ"}, + {}, + {"ᔭᓐᓄᐊᓕ", "ᕕᕝᕗᐊᓕ", "ᒫᑦᓯ", "ᐊᐃᑉᐳᓗ", "ᒪᐃ", "ᔫᓂ", "ᔪᓚᐃ", "ᐊᐅᒡᒍᓯ", "ᓯᑎᐱᕆ", "ᐆᑦᑑᕝᕙ", "ᓄᕕᐱᕆ", "ᑎᓯᐱᕆ"}, + {"am", "pm"}, +} + +var localeTableIuCA = [5][]string{ + {}, + {"ᓈᑦᑏᖑᔭᖅ", "ᓇᒡᒐᔾᔭᐅ", "ᓇᒡᒐᔾᔭᐅᓕᖅᑭ", "ᐱᖓᑦᓯᖅ", "ᓯᑕᒻᒥᖅ", "ᑕᓪᓕᒻᒥᐅᑦ", "ᓈᑦᓰᖑᔭᓛᕐᓂᐊᖅ"}, + {}, + {"ᔭᓐᓄᐊᓕ", "ᕕᕝᕗᐊᓕ", "ᒫᑦᓯ", "ᐊᐃᑉᐳᓗ", "ᒪᐃ", "ᔫᓂ", "ᔪᓚᐃ", "ᐊᐅᒡᒍᓯ", "ᓯᑎᐱᕆ", "ᐆᑦᑑᕝᕙ", "ᓄᕕᐱᕆ", "ᑎᓯᐱᕆ"}, + {"am", "pm"}, +} + +var localeTableJa = [5][]string{ + {"日", "月", "火", "水", "木", "金", "土"}, + {"日曜日", "月曜日", "火曜日", "水曜日", "木曜日", "金曜日", "土曜日"}, + {}, + {"1月", "2月", "3月", "4月", "5月", "6月", "7月", "8月", "9月", "10月", "11月", "12月"}, + {"午前", "午後"}, +} + +var localeTableJaJP = [5][]string{ + {"日", "月", "火", "水", "木", "金", "土"}, + {"日曜日", "月曜日", "火曜日", "水曜日", "木曜日", "金曜日", "土曜日"}, + {}, + {"1月", "2月", "3月", "4月", "5月", "6月", "7月", "8月", "9月", "10月", "11月", "12月"}, + {"午前", "午後"}, +} + +var localeTableJgo = [5][]string{ + {}, + {"Sɔ́ndi", "Mɔ́ndi", "Ápta Mɔ́ndi", "Wɛ́nɛsɛdɛ", "Tɔ́sɛdɛ", "Fɛlâyɛdɛ", "Sásidɛ"}, + {}, + {"Nduŋmbi Saŋ", "Pɛsaŋ Pɛ́pá", "Pɛsaŋ Pɛ́tát", "Pɛsaŋ Pɛ́nɛ́kwa", "Pɛsaŋ Pataa", "Pɛsaŋ Pɛ́nɛ́ntúkú", "Pɛsaŋ Saambá", "Pɛsaŋ Pɛ́nɛ́fɔm", "Pɛsaŋ Pɛ́nɛ́pfúꞋú", "Pɛsaŋ Nɛgɛ́m", "Pɛsaŋ Ntsɔ̌pmɔ́", "Pɛsaŋ Ntsɔ̌ppá"}, + {"mbaꞌmbaꞌ", "ŋkambɔ́tnji"}, +} + +var localeTableJgoCM = [5][]string{ + {}, + {"Sɔ́ndi", "Mɔ́ndi", "Ápta Mɔ́ndi", "Wɛ́nɛsɛdɛ", "Tɔ́sɛdɛ", "Fɛlâyɛdɛ", "Sásidɛ"}, + {}, + {"Nduŋmbi Saŋ", "Pɛsaŋ Pɛ́pá", "Pɛsaŋ Pɛ́tát", "Pɛsaŋ Pɛ́nɛ́kwa", "Pɛsaŋ Pataa", "Pɛsaŋ Pɛ́nɛ́ntúkú", "Pɛsaŋ Saambá", "Pɛsaŋ Pɛ́nɛ́fɔm", "Pɛsaŋ Pɛ́nɛ́pfúꞋú", "Pɛsaŋ Nɛgɛ́m", "Pɛsaŋ Ntsɔ̌pmɔ́", "Pɛsaŋ Ntsɔ̌ppá"}, + {"mbaꞌmbaꞌ", "ŋkambɔ́tnji"}, +} + +var localeTableJmc = [5][]string{ + {"Jpi", "Jtt", "Jnn", "Jtn", "Alh", "Iju", "Jmo"}, + {"Jumapilyi", "Jumatatuu", "Jumanne", "Jumatanu", "Alhamisi", "Ijumaa", "Jumamosi"}, + {"Jan", "Feb", "Mac", "Apr", "Mei", "Jun", "Jul", "Ago", "Sep", "Okt", "Nov", "Des"}, + {"Januari", "Februari", "Machi", "Aprilyi", "Mei", "Junyi", "Julyai", "Agusti", "Septemba", "Oktoba", "Novemba", "Desemba"}, + {"utuko", "kyiukonyi"}, +} + +var localeTableJmcTZ = [5][]string{ + {"Jpi", "Jtt", "Jnn", "Jtn", "Alh", "Iju", "Jmo"}, + {"Jumapilyi", "Jumatatuu", "Jumanne", "Jumatanu", "Alhamisi", "Ijumaa", "Jumamosi"}, + {"Jan", "Feb", "Mac", "Apr", "Mei", "Jun", "Jul", "Ago", "Sep", "Okt", "Nov", "Des"}, + {"Januari", "Februari", "Machi", "Aprilyi", "Mei", "Junyi", "Julyai", "Agusti", "Septemba", "Oktoba", "Novemba", "Desemba"}, + {"utuko", "kyiukonyi"}, +} + +var localeTableJv = [5][]string{ + {"Ahad", "Sen", "Sel", "Rab", "Kam", "Jum", "Sab"}, + {"Ahad", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"}, + {"Jan", "Feb", "Mar", "Apr", "Mei", "Jun", "Jul", "Agt", "Sep", "Okt", "Nov", "Des"}, + {"Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"}, + {"Isuk", "Wengi"}, +} + +var localeTableJvID = [5][]string{ + {"Ahad", "Sen", "Sel", "Rab", "Kam", "Jum", "Sab"}, + {"Ahad", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"}, + {"Jan", "Feb", "Mar", "Apr", "Mei", "Jun", "Jul", "Agt", "Sep", "Okt", "Nov", "Des"}, + {"Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"}, + {"Isuk", "Wengi"}, +} + +var localeTableKa = [5][]string{ + {"კვი", "ორშ", "სამ", "ოთხ", "ხუთ", "პარ", "შაბ"}, + {"კვირა", "ორშაბათი", "სამშაბათი", "ოთხშაბათი", "ხუთშაბათი", "პარასკევი", "შაბათი"}, + {"იან", "თებ", "მარ", "აპრ", "მაი", "ივნ", "ივლ", "აგვ", "სექ", "ოქტ", "ნოე", "დეკ"}, + {"იანვარი", "თებერვალი", "მარტი", "აპრილი", "მაისი", "ივნისი", "ივლისი", "აგვისტო", "სექტემბერი", "ოქტომბერი", "ნოემბერი", "დეკემბერი"}, + {"a", "p"}, +} + +var localeTableKaGE = [5][]string{ + {"კვი", "ორშ", "სამ", "ოთხ", "ხუთ", "პარ", "შაბ"}, + {"კვირა", "ორშაბათი", "სამშაბათი", "ოთხშაბათი", "ხუთშაბათი", "პარასკევი", "შაბათი"}, + {"იან", "თებ", "მარ", "აპრ", "მაი", "ივნ", "ივლ", "აგვ", "სექ", "ოქტ", "ნოე", "დეკ"}, + {"იანვარი", "თებერვალი", "მარტი", "აპრილი", "მაისი", "ივნისი", "ივლისი", "აგვისტო", "სექტემბერი", "ოქტომბერი", "ნოემბერი", "დეკემბერი"}, + {"a", "p"}, +} + +var localeTableKab = [5][]string{ + {"Yan", "San", "Kraḍ", "Kuẓ", "Sam", "Sḍis", "Say"}, + {"Yanass", "Sanass", "Kraḍass", "Kuẓass", "Samass", "Sḍisass", "Sayass"}, + {"Yen", "Fur", "Meɣ", "Yeb", "May", "Yun", "Yul", "Ɣuc", "Cte", "Tub", "Nun", "Duǧ"}, + {"Yennayer", "Fuṛar", "Meɣres", "Yebrir", "Mayyu", "Yunyu", "Yulyu", "Ɣuct", "Ctembeṛ", "Tubeṛ", "Nunembeṛ", "Duǧembeṛ"}, + {"ntufat", "ntmeddit"}, +} + +var localeTableKabDZ = [5][]string{ + {"Yan", "San", "Kraḍ", "Kuẓ", "Sam", "Sḍis", "Say"}, + {"Yanass", "Sanass", "Kraḍass", "Kuẓass", "Samass", "Sḍisass", "Sayass"}, + {"Yen", "Fur", "Meɣ", "Yeb", "May", "Yun", "Yul", "Ɣuc", "Cte", "Tub", "Nun", "Duǧ"}, + {"Yennayer", "Fuṛar", "Meɣres", "Yebrir", "Mayyu", "Yunyu", "Yulyu", "Ɣuct", "Ctembeṛ", "Tubeṛ", "Nunembeṛ", "Duǧembeṛ"}, + {"ntufat", "ntmeddit"}, +} + +var localeTableKaj = [5][]string{ + {"Lad", "Lin", "Tal", "Lar", "Lam", "Jum", "Asa"}, + {"Ladi", "Lintani", "Talata", "Larba", "Lamit", "Juma", "Asabar"}, + {"A̱yr", "A̱hw", "A̱ta", "A̱na", "A̱pf", "A̱ki", "A̱ty", "A̱ni", "A̱ku", "Swa", "Sby", "Sbh"}, + {"Hywan A̱yrnig", "Hywan A̱hwa", "Hywan A̱tat", "Hywan A̱naai", "Hywan A̱pfwon", "Hywan A̱kitat", "Hywan A̱tyirin", "Hywan A̱ninai", "Hywan A̱kumviriyin", "Hywan Swak", "Hywan Swak B’a̱yrnig", "Hywan Swak B’a̱hwa"}, + {"A.M.", "P.M."}, +} + +var localeTableKajNG = [5][]string{ + {"Lad", "Lin", "Tal", "Lar", "Lam", "Jum", "Asa"}, + {"Ladi", "Lintani", "Talata", "Larba", "Lamit", "Juma", "Asabar"}, + {"A̱yr", "A̱hw", "A̱ta", "A̱na", "A̱pf", "A̱ki", "A̱ty", "A̱ni", "A̱ku", "Swa", "Sby", "Sbh"}, + {"Hywan A̱yrnig", "Hywan A̱hwa", "Hywan A̱tat", "Hywan A̱naai", "Hywan A̱pfwon", "Hywan A̱kitat", "Hywan A̱tyirin", "Hywan A̱ninai", "Hywan A̱kumviriyin", "Hywan Swak", "Hywan Swak B’a̱yrnig", "Hywan Swak B’a̱hwa"}, + {"A.M.", "P.M."}, +} + +var localeTableKam = [5][]string{ + {"Wky", "Wkw", "Wkl", "Wtũ", "Wkn", "Wtn", "Wth"}, + {"Wa kyumwa", "Wa kwambĩlĩlya", "Wa kelĩ", "Wa katatũ", "Wa kana", "Wa katano", "Wa thanthatũ"}, + {"Mbe", "Kel", "Ktũ", "Kan", "Ktn", "Tha", "Moo", "Nya", "Knd", "Ĩku", "Ĩkm", "Ĩkl"}, + {"Mwai wa mbee", "Mwai wa kelĩ", "Mwai wa katatũ", "Mwai wa kana", "Mwai wa katano", "Mwai wa thanthatũ", "Mwai wa muonza", "Mwai wa nyaanya", "Mwai wa kenda", "Mwai wa ĩkumi", "Mwai wa ĩkumi na ĩmwe", "Mwai wa ĩkumi na ilĩ"}, + {"Ĩyakwakya", "Ĩyawĩoo"}, +} + +var localeTableKamKE = [5][]string{ + {"Wky", "Wkw", "Wkl", "Wtũ", "Wkn", "Wtn", "Wth"}, + {"Wa kyumwa", "Wa kwambĩlĩlya", "Wa kelĩ", "Wa katatũ", "Wa kana", "Wa katano", "Wa thanthatũ"}, + {"Mbe", "Kel", "Ktũ", "Kan", "Ktn", "Tha", "Moo", "Nya", "Knd", "Ĩku", "Ĩkm", "Ĩkl"}, + {"Mwai wa mbee", "Mwai wa kelĩ", "Mwai wa katatũ", "Mwai wa kana", "Mwai wa katano", "Mwai wa thanthatũ", "Mwai wa muonza", "Mwai wa nyaanya", "Mwai wa kenda", "Mwai wa ĩkumi", "Mwai wa ĩkumi na ĩmwe", "Mwai wa ĩkumi na ilĩ"}, + {"Ĩyakwakya", "Ĩyawĩoo"}, +} + +var localeTableKcg = [5][]string{ + {"Lad", "Tan", "Tal", "Lar", "Lam", "Jum", "Asa"}, + {"Ladi", "Tanii", "Talata", "Larba", "Lamit", "Juma", "Asabat"}, + {"Juw", "Swi", "Tsa", "Nya", "Tsw", "Ata", "Ana", "Ari", "Aku", "Swa", "Man", "Mas"}, + {"Zwat Juwung", "Zwat Swiyang", "Zwat Tsat", "Zwat Nyai", "Zwat Tswon", "Zwat Ataah", "Zwat Anatat", "Zwat Arinai", "Zwat Akubunyung", "Zwat Swag", "Zwat Mangjuwang", "Zwat Swag-Ma-Suyang"}, + {}, +} + +var localeTableKcgNG = [5][]string{ + {"Lad", "Tan", "Tal", "Lar", "Lam", "Jum", "Asa"}, + {"Ladi", "Tanii", "Talata", "Larba", "Lamit", "Juma", "Asabat"}, + {"Juw", "Swi", "Tsa", "Nya", "Tsw", "Ata", "Ana", "Ari", "Aku", "Swa", "Man", "Mas"}, + {"Zwat Juwung", "Zwat Swiyang", "Zwat Tsat", "Zwat Nyai", "Zwat Tswon", "Zwat Ataah", "Zwat Anatat", "Zwat Arinai", "Zwat Akubunyung", "Zwat Swag", "Zwat Mangjuwang", "Zwat Swag-Ma-Suyang"}, + {}, +} + +var localeTableKde = [5][]string{ + {"Ll2", "Ll3", "Ll4", "Ll5", "Ll6", "Ll7", "Ll1"}, + {"Liduva lyapili", "Liduva lyatatu", "Liduva lyanchechi", "Liduva lyannyano", "Liduva lyannyano na linji", "Liduva lyannyano na mavili", "Liduva litandi"}, + {"Jan", "Feb", "Mac", "Apr", "Mei", "Jun", "Jul", "Ago", "Sep", "Okt", "Nov", "Des"}, + {"Mwedi Ntandi", "Mwedi wa Pili", "Mwedi wa Tatu", "Mwedi wa Nchechi", "Mwedi wa Nnyano", "Mwedi wa Nnyano na Umo", "Mwedi wa Nnyano na Mivili", "Mwedi wa Nnyano na Mitatu", "Mwedi wa Nnyano na Nchechi", "Mwedi wa Nnyano na Nnyano", "Mwedi wa Nnyano na Nnyano na U", "Mwedi wa Nnyano na Nnyano na M"}, + {"Muhi", "Chilo"}, +} + +var localeTableKdeTZ = [5][]string{ + {"Ll2", "Ll3", "Ll4", "Ll5", "Ll6", "Ll7", "Ll1"}, + {"Liduva lyapili", "Liduva lyatatu", "Liduva lyanchechi", "Liduva lyannyano", "Liduva lyannyano na linji", "Liduva lyannyano na mavili", "Liduva litandi"}, + {"Jan", "Feb", "Mac", "Apr", "Mei", "Jun", "Jul", "Ago", "Sep", "Okt", "Nov", "Des"}, + {"Mwedi Ntandi", "Mwedi wa Pili", "Mwedi wa Tatu", "Mwedi wa Nchechi", "Mwedi wa Nnyano", "Mwedi wa Nnyano na Umo", "Mwedi wa Nnyano na Mivili", "Mwedi wa Nnyano na Mitatu", "Mwedi wa Nnyano na Nchechi", "Mwedi wa Nnyano na Nnyano", "Mwedi wa Nnyano na Nnyano na U", "Mwedi wa Nnyano na Nnyano na M"}, + {"Muhi", "Chilo"}, +} + +var localeTableKea = [5][]string{ + {"dum", "sig", "ter", "kua", "kin", "ses", "sab"}, + {"dumingu", "sigunda-fera", "tersa-fera", "kuarta-fera", "kinta-fera", "sesta-fera", "sábadu"}, + {"Jan", "Feb", "Mar", "Abr", "Mai", "Jun", "Jul", "Ago", "Set", "Otu", "Nuv", "Diz"}, + {"Janeru", "Febreru", "Marsu", "Abril", "Maiu", "Junhu", "Julhu", "Agostu", "Setenbru", "Otubru", "Nuvenbru", "Dizenbru"}, + {"am", "pm"}, +} + +var localeTableKeaCV = [5][]string{ + {"dum", "sig", "ter", "kua", "kin", "ses", "sab"}, + {"dumingu", "sigunda-fera", "tersa-fera", "kuarta-fera", "kinta-fera", "sesta-fera", "sábadu"}, + {"Jan", "Feb", "Mar", "Abr", "Mai", "Jun", "Jul", "Ago", "Set", "Otu", "Nuv", "Diz"}, + {"Janeru", "Febreru", "Marsu", "Abril", "Maiu", "Junhu", "Julhu", "Agostu", "Setenbru", "Otubru", "Nuvenbru", "Dizenbru"}, + {"am", "pm"}, +} + +var localeTableKgp = [5][]string{ + {"num.", "pir.", "rég.", "tẽg.", "vẽn.", "pén.", "sav."}, + {"numĩggu", "pir-kurã-há", "régre-kurã-há", "tẽgtũ-kurã-há", "vẽnhkãgra-kurã-há", "pénkar-kurã-há", "savnu"}, + {"1Ky.", "2Ky.", "3Ky.", "4Ky.", "5Ky.", "6Ky.", "7Ky.", "8Ky.", "9Ky.", "10Ky.", "11Ky.", "12Ky."}, + {"1-Kysã", "2-Kysã", "3-Kysã", "4-Kysã", "5-Kysã", "6-Kysã", "7-Kysã", "8-Kysã", "9-Kysã", "10-Kysã", "11-Kysã", "12-Kysã"}, + {}, +} + +var localeTableKgpBR = [5][]string{ + {"num.", "pir.", "rég.", "tẽg.", "vẽn.", "pén.", "sav."}, + {"numĩggu", "pir-kurã-há", "régre-kurã-há", "tẽgtũ-kurã-há", "vẽnhkãgra-kurã-há", "pénkar-kurã-há", "savnu"}, + {"1Ky.", "2Ky.", "3Ky.", "4Ky.", "5Ky.", "6Ky.", "7Ky.", "8Ky.", "9Ky.", "10Ky.", "11Ky.", "12Ky."}, + {"1-Kysã", "2-Kysã", "3-Kysã", "4-Kysã", "5-Kysã", "6-Kysã", "7-Kysã", "8-Kysã", "9-Kysã", "10-Kysã", "11-Kysã", "12-Kysã"}, + {}, +} + +var localeTableKhq = [5][]string{ + {"Alh", "Ati", "Ata", "Ala", "Alm", "Alj", "Ass"}, + {"Alhadi", "Atini", "Atalata", "Alarba", "Alhamiisa", "Aljuma", "Assabdu"}, + {"Žan", "Fee", "Mar", "Awi", "Me", "Žuw", "Žuy", "Ut", "Sek", "Okt", "Noo", "Dee"}, + {"Žanwiye", "Feewiriye", "Marsi", "Awiril", "Me", "Žuweŋ", "Žuyye", "Ut", "Sektanbur", "Oktoobur", "Noowanbur", "Deesanbur"}, + {"Adduha", "Aluula"}, +} + +var localeTableKhqML = [5][]string{ + {"Alh", "Ati", "Ata", "Ala", "Alm", "Alj", "Ass"}, + {"Alhadi", "Atini", "Atalata", "Alarba", "Alhamiisa", "Aljuma", "Assabdu"}, + {"Žan", "Fee", "Mar", "Awi", "Me", "Žuw", "Žuy", "Ut", "Sek", "Okt", "Noo", "Dee"}, + {"Žanwiye", "Feewiriye", "Marsi", "Awiril", "Me", "Žuweŋ", "Žuyye", "Ut", "Sektanbur", "Oktoobur", "Noowanbur", "Deesanbur"}, + {"Adduha", "Aluula"}, +} + +var localeTableKi = [5][]string{ + {"KMA", "NTT", "NMN", "NMT", "ART", "NMA", "NMM"}, + {"Kiumia", "Njumatatũ", "Njumaine", "Njumatana", "Aramithi", "Njumaa", "Njumamothi"}, + {"JEN", "WKR", "WGT", "WKN", "WTN", "WTD", "WMJ", "WNN", "WKD", "WIK", "WMW", "DIT"}, + {"Njenuarĩ", "Mwere wa kerĩ", "Mwere wa gatatũ", "Mwere wa kana", "Mwere wa gatano", "Mwere wa gatandatũ", "Mwere wa mũgwanja", "Mwere wa kanana", "Mwere wa kenda", "Mwere wa ikũmi", "Mwere wa ikũmi na ũmwe", "Ndithemba"}, + {"Kiroko", "Hwaĩ-inĩ"}, +} + +var localeTableKiKE = [5][]string{ + {"KMA", "NTT", "NMN", "NMT", "ART", "NMA", "NMM"}, + {"Kiumia", "Njumatatũ", "Njumaine", "Njumatana", "Aramithi", "Njumaa", "Njumamothi"}, + {"JEN", "WKR", "WGT", "WKN", "WTN", "WTD", "WMJ", "WNN", "WKD", "WIK", "WMW", "DIT"}, + {"Njenuarĩ", "Mwere wa kerĩ", "Mwere wa gatatũ", "Mwere wa kana", "Mwere wa gatano", "Mwere wa gatandatũ", "Mwere wa mũgwanja", "Mwere wa kanana", "Mwere wa kenda", "Mwere wa ikũmi", "Mwere wa ikũmi na ũmwe", "Ndithemba"}, + {"Kiroko", "Hwaĩ-inĩ"}, +} + +var localeTableKk = [5][]string{ + {"жс", "дс", "сс", "ср", "бс", "жм", "сб"}, + {"жексенбі", "дүйсенбі", "сейсенбі", "сәрсенбі", "бейсенбі", "жұма", "сенбі"}, + {"қаң.", "ақп.", "нау.", "сәу.", "мам.", "мау.", "шіл.", "там.", "қыр.", "қаз.", "қар.", "жел."}, + {"қаңтар", "ақпан", "наурыз", "сәуір", "мамыр", "маусым", "шілде", "тамыз", "қыркүйек", "қазан", "қараша", "желтоқсан"}, + {}, +} + +var localeTableKkKZ = [5][]string{ + {"жс", "дс", "сс", "ср", "бс", "жм", "сб"}, + {"жексенбі", "дүйсенбі", "сейсенбі", "сәрсенбі", "бейсенбі", "жұма", "сенбі"}, + {"қаң.", "ақп.", "нау.", "сәу.", "мам.", "мау.", "шіл.", "там.", "қыр.", "қаз.", "қар.", "жел."}, + {"қаңтар", "ақпан", "наурыз", "сәуір", "мамыр", "маусым", "шілде", "тамыз", "қыркүйек", "қазан", "қараша", "желтоқсан"}, + {}, +} + +var localeTableKkj = [5][]string{ + {}, + {"sɔndi", "lundi", "mardi", "mɛrkɛrɛdi", "yedi", "vaŋdɛrɛdi", "mɔnɔ sɔndi"}, + {}, + {"pamba", "wanja", "mbiyɔ mɛndoŋgɔ", "Nyɔlɔmbɔŋgɔ", "Mɔnɔ ŋgbanja", "Nyaŋgwɛ ŋgbanja", "kuŋgwɛ", "fɛ", "njapi", "nyukul", "M11", "ɓulɓusɛ"}, + {}, +} + +var localeTableKkjCM = [5][]string{ + {}, + {"sɔndi", "lundi", "mardi", "mɛrkɛrɛdi", "yedi", "vaŋdɛrɛdi", "mɔnɔ sɔndi"}, + {}, + {"pamba", "wanja", "mbiyɔ mɛndoŋgɔ", "Nyɔlɔmbɔŋgɔ", "Mɔnɔ ŋgbanja", "Nyaŋgwɛ ŋgbanja", "kuŋgwɛ", "fɛ", "njapi", "nyukul", "M11", "ɓulɓusɛ"}, + {}, +} + +var localeTableKl = [5][]string{ + {"sap", "ata", "mar", "pin", "sis", "tal", "arf"}, + {"sapaat", "ataasinngorneq", "marlunngorneq", "pingasunngorneq", "sisamanngorneq", "tallimanngorneq", "arfininngorneq"}, + {"jan", "febr", "mar", "apr", "maj", "jun", "jul", "aug", "sept", "okt", "nov", "dec"}, + {"januaarip", "februaarip", "marsip", "apriilip", "maajip", "juunip", "juulip", "aggustip", "septembarip", "oktobarip", "novembarip", "decembarip"}, + {"u.t.", "u.k."}, +} + +var localeTableKlGL = [5][]string{ + {"sap", "ata", "mar", "pin", "sis", "tal", "arf"}, + {"sapaat", "ataasinngorneq", "marlunngorneq", "pingasunngorneq", "sisamanngorneq", "tallimanngorneq", "arfininngorneq"}, + {"jan", "febr", "mar", "apr", "maj", "jun", "jul", "aug", "sept", "okt", "nov", "dec"}, + {"januaarip", "februaarip", "marsip", "apriilip", "maajip", "juunip", "juulip", "aggustip", "septembarip", "oktobarip", "novembarip", "decembarip"}, + {"u.t.", "u.k."}, +} + +var localeTableKln = [5][]string{ + {"Kts", "Kot", "Koo", "Kos", "Koa", "Kom", "Kol"}, + {"Kotisap", "Kotaai", "Koaeng’", "Kosomok", "Koang’wan", "Komuut", "Kolo"}, + {"Mul", "Ngat", "Taa", "Iwo", "Mam", "Paa", "Nge", "Roo", "Bur", "Epe", "Kpt", "Kpa"}, + {"Mulgul", "Ng’atyaato", "Kiptaamo", "Iwootkuut", "Mamuut", "Paagi", "Ng’eiyeet", "Rooptui", "Bureet", "Epeeso", "Kipsuunde ne taai", "Kipsuunde nebo aeng’"}, + {"krn", "koosk"}, +} + +var localeTableKlnKE = [5][]string{ + {"Kts", "Kot", "Koo", "Kos", "Koa", "Kom", "Kol"}, + {"Kotisap", "Kotaai", "Koaeng’", "Kosomok", "Koang’wan", "Komuut", "Kolo"}, + {"Mul", "Ngat", "Taa", "Iwo", "Mam", "Paa", "Nge", "Roo", "Bur", "Epe", "Kpt", "Kpa"}, + {"Mulgul", "Ng’atyaato", "Kiptaamo", "Iwootkuut", "Mamuut", "Paagi", "Ng’eiyeet", "Rooptui", "Bureet", "Epeeso", "Kipsuunde ne taai", "Kipsuunde nebo aeng’"}, + {"krn", "koosk"}, +} + +var localeTableKm = [5][]string{ + {"អាទិត្យ", "ចន្ទ", "អង្គារ", "ពុធ", "ព្រហ", "សុក្រ", "សៅរ៍"}, + {"អាទិត្យ", "ច័ន្ទ", "អង្គារ", "ពុធ", "ព្រហស្បតិ៍", "សុក្រ", "សៅរ៍"}, + {}, + {"មករា", "កុម្ភៈ", "មីនា", "មេសា", "ឧសភា", "មិថុនា", "កក្កដា", "សីហា", "កញ្ញា", "តុលា", "វិច្ឆិកា", "ធ្នូ"}, + {"a", "p"}, +} + +var localeTableKmKH = [5][]string{ + {"អាទិត្យ", "ចន្ទ", "អង្គារ", "ពុធ", "ព្រហ", "សុក្រ", "សៅរ៍"}, + {"អាទិត្យ", "ច័ន្ទ", "អង្គារ", "ពុធ", "ព្រហស្បតិ៍", "សុក្រ", "សៅរ៍"}, + {}, + {"មករា", "កុម្ភៈ", "មីនា", "មេសា", "ឧសភា", "មិថុនា", "កក្កដា", "សីហា", "កញ្ញា", "តុលា", "វិច្ឆិកា", "ធ្នូ"}, + {"a", "p"}, +} + +var localeTableKn = [5][]string{ + {"ಭಾನು", "ಸೋಮ", "ಮಂಗಳ", "ಬುಧ", "ಗುರು", "ಶುಕ್ರ", "ಶನಿ"}, + {"ಭಾನುವಾರ", "ಸೋಮವಾರ", "ಮಂಗಳವಾರ", "ಬುಧವಾರ", "ಗುರುವಾರ", "ಶುಕ್ರವಾರ", "ಶನಿವಾರ"}, + {"ಜನವರಿ", "ಫೆಬ್ರವರಿ", "ಮಾರ್ಚ್", "ಏಪ್ರಿ", "ಮೇ", "ಜೂನ್", "ಜುಲೈ", "ಆಗಸ್ಟ್", "ಸೆಪ್ಟೆಂ", "ಅಕ್ಟೋ", "ನವೆಂ", "ಡಿಸೆಂ"}, + {"ಜನವರಿ", "ಫೆಬ್ರವರಿ", "ಮಾರ್ಚ್", "ಏಪ್ರಿಲ್", "ಮೇ", "ಜೂನ್", "ಜುಲೈ", "ಆಗಸ್ಟ್", "ಸೆಪ್ಟೆಂಬರ್", "ಅಕ್ಟೋಬರ್", "ನವೆಂಬರ್", "ಡಿಸೆಂಬರ್"}, + {"ಪೂರ್ವಾಹ್ನ", "ಅಪರಾಹ್ನ"}, +} + +var localeTableKnIN = [5][]string{ + {"ಭಾನು", "ಸೋಮ", "ಮಂಗಳ", "ಬುಧ", "ಗುರು", "ಶುಕ್ರ", "ಶನಿ"}, + {"ಭಾನುವಾರ", "ಸೋಮವಾರ", "ಮಂಗಳವಾರ", "ಬುಧವಾರ", "ಗುರುವಾರ", "ಶುಕ್ರವಾರ", "ಶನಿವಾರ"}, + {"ಜನವರಿ", "ಫೆಬ್ರವರಿ", "ಮಾರ್ಚ್", "ಏಪ್ರಿ", "ಮೇ", "ಜೂನ್", "ಜುಲೈ", "ಆಗಸ್ಟ್", "ಸೆಪ್ಟೆಂ", "ಅಕ್ಟೋ", "ನವೆಂ", "ಡಿಸೆಂ"}, + {"ಜನವರಿ", "ಫೆಬ್ರವರಿ", "ಮಾರ್ಚ್", "ಏಪ್ರಿಲ್", "ಮೇ", "ಜೂನ್", "ಜುಲೈ", "ಆಗಸ್ಟ್", "ಸೆಪ್ಟೆಂಬರ್", "ಅಕ್ಟೋಬರ್", "ನವೆಂಬರ್", "ಡಿಸೆಂಬರ್"}, + {"ಪೂರ್ವಾಹ್ನ", "ಅಪರಾಹ್ನ"}, +} + +var localeTableKo = [5][]string{ + {"일", "월", "화", "수", "목", "금", "토"}, + {"일요일", "월요일", "화요일", "수요일", "목요일", "금요일", "토요일"}, + {}, + {"1월", "2월", "3월", "4월", "5월", "6월", "7월", "8월", "9월", "10월", "11월", "12월"}, + {}, +} + +var localeTableKoCN = [5][]string{ + {"일", "월", "화", "수", "목", "금", "토"}, + {"일요일", "월요일", "화요일", "수요일", "목요일", "금요일", "토요일"}, + {}, + {"1월", "2월", "3월", "4월", "5월", "6월", "7월", "8월", "9월", "10월", "11월", "12월"}, + {}, +} + +var localeTableKoKP = [5][]string{ + {"일", "월", "화", "수", "목", "금", "토"}, + {"일요일", "월요일", "화요일", "수요일", "목요일", "금요일", "토요일"}, + {}, + {"1월", "2월", "3월", "4월", "5월", "6월", "7월", "8월", "9월", "10월", "11월", "12월"}, + {}, +} + +var localeTableKoKR = [5][]string{ + {"일", "월", "화", "수", "목", "금", "토"}, + {"일요일", "월요일", "화요일", "수요일", "목요일", "금요일", "토요일"}, + {}, + {"1월", "2월", "3월", "4월", "5월", "6월", "7월", "8월", "9월", "10월", "11월", "12월"}, + {}, +} + +var localeTableKok = [5][]string{ + {}, + {"आयतार", "सोमार", "मंगळार", "बुधवार", "बिरेस्तार", "शुक्रार", "शेनवार"}, + {}, + {"जानेवारी", "फेब्रुवारी", "मार्च", "एप्रील", "मे", "जून", "जुलय", "ऑगस्ट", "सप्टेंबर", "ऑक्टोबर", "नोव्हेंबर", "डिसेंबर"}, + {"a", "p"}, +} + +var localeTableKokIN = [5][]string{ + {}, + {"आयतार", "सोमार", "मंगळार", "बुधवार", "बिरेस्तार", "शुक्रार", "शेनवार"}, + {}, + {"जानेवारी", "फेब्रुवारी", "मार्च", "एप्रील", "मे", "जून", "जुलय", "ऑगस्ट", "सप्टेंबर", "ऑक्टोबर", "नोव्हेंबर", "डिसेंबर"}, + {"a", "p"}, +} + +var localeTableKs = [5][]string{ + {"آتھوار", "ژٔندٕروار", "بۆموار", "بودوار", "برؠسوار", "جُمہ", "بٹوار"}, + {"اَتھوار", "ژٔندرٕروار", "بۆموار", "بودوار", "برؠسوار", "جُمہ", "بٹوار"}, + {"جنؤری", "فرؤری", "مارٕچ", "اپریل", "مئی", "جوٗن", "جُلَے", "اگست", "ستمبر", "اکتوٗبر", "نومبر", "دسمبر"}, + {"جنؤری", "فرؤری", "مارٕچ", "اپریل", "مئی", "جوٗن", "جُلَے", "اگست", "ستمبر", "اکتوٗبر", "نومبر", "دَسَمبَر"}, + {"a", "p"}, +} + +var localeTableKsArab = [5][]string{ + {"آتھوار", "ژٔندٕروار", "بۆموار", "بودوار", "برؠسوار", "جُمہ", "بٹوار"}, + {"اَتھوار", "ژٔندرٕروار", "بۆموار", "بودوار", "برؠسوار", "جُمہ", "بٹوار"}, + {"جنؤری", "فرؤری", "مارٕچ", "اپریل", "مئی", "جوٗن", "جُلَے", "اگست", "ستمبر", "اکتوٗبر", "نومبر", "دسمبر"}, + {"جنؤری", "فرؤری", "مارٕچ", "اپریل", "مئی", "جوٗن", "جُلَے", "اگست", "ستمبر", "اکتوٗبر", "نومبر", "دَسَمبَر"}, + {"a", "p"}, +} + +var localeTableKsArabIN = [5][]string{ + {"آتھوار", "ژٔندٕروار", "بۆموار", "بودوار", "برؠسوار", "جُمہ", "بٹوار"}, + {"اَتھوار", "ژٔندرٕروار", "بۆموار", "بودوار", "برؠسوار", "جُمہ", "بٹوار"}, + {"جنؤری", "فرؤری", "مارٕچ", "اپریل", "مئی", "جوٗن", "جُلَے", "اگست", "ستمبر", "اکتوٗبر", "نومبر", "دسمبر"}, + {"جنؤری", "فرؤری", "مارٕچ", "اپریل", "مئی", "جوٗن", "جُلَے", "اگست", "ستمبر", "اکتوٗبر", "نومبر", "دَسَمبَر"}, + {"a", "p"}, +} + +var localeTableKsDeva = [5][]string{ + {"आथवार", "चंदिरवार", "बुवार", "बोदवार", "ब्रेसवार", "जुमा", "बटवार"}, + {"आथवार", "च़ंदिरवार", "बोमवार", "बोदवार", "ब्रेसवार", "जुमा", "बटवार"}, + {"जनवरी", "फ़रवरी", "मार्च", "अप्रैल", "मे", "जून", "जुलाई", "अगस्त", "सतुंबर", "अक्तूबर", "नवूमबर", "दसूमबर"}, + {"जनवरी", "फ़रवरी", "मार्च", "अप्रैल", "मे", "जून", "जुलाई", "अगस्त", "सतमबर", "अक्तूबर", "नवमबर", "दसमबर"}, + {}, +} + +var localeTableKsDevaIN = [5][]string{ + {"आथवार", "चंदिरवार", "बुवार", "बोदवार", "ब्रेसवार", "जुमा", "बटवार"}, + {"आथवार", "च़ंदिरवार", "बोमवार", "बोदवार", "ब्रेसवार", "जुमा", "बटवार"}, + {"जनवरी", "फ़रवरी", "मार्च", "अप्रैल", "मे", "जून", "जुलाई", "अगस्त", "सतुंबर", "अक्तूबर", "नवूमबर", "दसूमबर"}, + {"जनवरी", "फ़रवरी", "मार्च", "अप्रैल", "मे", "जून", "जुलाई", "अगस्त", "सतमबर", "अक्तूबर", "नवमबर", "दसमबर"}, + {}, +} + +var localeTableKsb = [5][]string{ + {"Jpi", "Jtt", "Jmn", "Jtn", "Alh", "Iju", "Jmo"}, + {"Jumaapii", "Jumaatatu", "Jumaane", "Jumaatano", "Alhamisi", "Ijumaa", "Jumaamosi"}, + {"Jan", "Feb", "Mac", "Apr", "Mei", "Jun", "Jul", "Ago", "Sep", "Okt", "Nov", "Des"}, + {"Januali", "Febluali", "Machi", "Aplili", "Mei", "Juni", "Julai", "Agosti", "Septemba", "Oktoba", "Novemba", "Desemba"}, + {"makeo", "nyiaghuo"}, +} + +var localeTableKsbTZ = [5][]string{ + {"Jpi", "Jtt", "Jmn", "Jtn", "Alh", "Iju", "Jmo"}, + {"Jumaapii", "Jumaatatu", "Jumaane", "Jumaatano", "Alhamisi", "Ijumaa", "Jumaamosi"}, + {"Jan", "Feb", "Mac", "Apr", "Mei", "Jun", "Jul", "Ago", "Sep", "Okt", "Nov", "Des"}, + {"Januali", "Febluali", "Machi", "Aplili", "Mei", "Juni", "Julai", "Agosti", "Septemba", "Oktoba", "Novemba", "Desemba"}, + {"makeo", "nyiaghuo"}, +} + +var localeTableKsf = [5][]string{ + {"sɔ́n", "lǝn", "maa", "mɛk", "jǝǝ", "júm", "sam"}, + {"sɔ́ndǝ", "lǝndí", "maadí", "mɛkrɛdí", "jǝǝdí", "júmbá", "samdí"}, + {"ŋ1", "ŋ2", "ŋ3", "ŋ4", "ŋ5", "ŋ6", "ŋ7", "ŋ8", "ŋ9", "ŋ10", "ŋ11", "ŋ12"}, + {"ŋwíí a ntɔ́ntɔ", "ŋwíí akǝ bɛ́ɛ", "ŋwíí akǝ ráá", "ŋwíí akǝ nin", "ŋwíí akǝ táan", "ŋwíí akǝ táafɔk", "ŋwíí akǝ táabɛɛ", "ŋwíí akǝ táaraa", "ŋwíí akǝ táanin", "ŋwíí akǝ ntɛk", "ŋwíí akǝ ntɛk di bɔ́k", "ŋwíí akǝ ntɛk di bɛ́ɛ"}, + {"sárúwá", "cɛɛ́nko"}, +} + +var localeTableKsfCM = [5][]string{ + {"sɔ́n", "lǝn", "maa", "mɛk", "jǝǝ", "júm", "sam"}, + {"sɔ́ndǝ", "lǝndí", "maadí", "mɛkrɛdí", "jǝǝdí", "júmbá", "samdí"}, + {"ŋ1", "ŋ2", "ŋ3", "ŋ4", "ŋ5", "ŋ6", "ŋ7", "ŋ8", "ŋ9", "ŋ10", "ŋ11", "ŋ12"}, + {"ŋwíí a ntɔ́ntɔ", "ŋwíí akǝ bɛ́ɛ", "ŋwíí akǝ ráá", "ŋwíí akǝ nin", "ŋwíí akǝ táan", "ŋwíí akǝ táafɔk", "ŋwíí akǝ táabɛɛ", "ŋwíí akǝ táaraa", "ŋwíí akǝ táanin", "ŋwíí akǝ ntɛk", "ŋwíí akǝ ntɛk di bɔ́k", "ŋwíí akǝ ntɛk di bɛ́ɛ"}, + {"sárúwá", "cɛɛ́nko"}, +} + +var localeTableKsh = [5][]string{ + {"Su.", "Mo.", "Di.", "Me.", "Du.", "Fr.", "Sa."}, + {"Sunndaach", "Mohndaach", "Dinnsdaach", "Metwoch", "Dunnersdaach", "Friidaach", "Samsdaach"}, + {"Jan", "Fäb", "Mäz", "Apr", "Mai", "Jun", "Jul", "Ouj", "Säp", "Okt", "Nov", "Dez"}, + {"Jannewa", "Fäbrowa", "Määz", "Aprell", "Mai", "Juuni", "Juuli", "Oujoß", "Septämber", "Oktohber", "Novämber", "Dezämber"}, + {"v.M.", "n.M."}, +} + +var localeTableKshDE = [5][]string{ + {"Su.", "Mo.", "Di.", "Me.", "Du.", "Fr.", "Sa."}, + {"Sunndaach", "Mohndaach", "Dinnsdaach", "Metwoch", "Dunnersdaach", "Friidaach", "Samsdaach"}, + {"Jan", "Fäb", "Mäz", "Apr", "Mai", "Jun", "Jul", "Ouj", "Säp", "Okt", "Nov", "Dez"}, + {"Jannewa", "Fäbrowa", "Määz", "Aprell", "Mai", "Juuni", "Juuli", "Oujoß", "Septämber", "Oktohber", "Novämber", "Dezämber"}, + {"v.M.", "n.M."}, +} + +var localeTableKu = [5][]string{ + {"yşm", "dşm", "sşm", "çşm", "pşm", "înî", "şem"}, + {"yekşem", "duşem", "sêşem", "çarşem", "pêncşem", "înî", "şemî"}, + {"rbn", "sbt", "adr", "nsn", "gln", "hzr", "trm", "tbx", "îln", "cot", "mjd", "brf"}, + {"rêbendan", "sibat", "adar", "nîsan", "gulan", "hezîran", "tîrmeh", "tebax", "îlon", "cotmeh", "mijdar", "berfanbar"}, + {"BN", "PN"}, +} + +var localeTableKuTR = [5][]string{ + {"yşm", "dşm", "sşm", "çşm", "pşm", "înî", "şem"}, + {"yekşem", "duşem", "sêşem", "çarşem", "pêncşem", "înî", "şemî"}, + {"rbn", "sbt", "adr", "nsn", "gln", "hzr", "trm", "tbx", "îln", "cot", "mjd", "brf"}, + {"rêbendan", "sibat", "adar", "nîsan", "gulan", "hezîran", "tîrmeh", "tebax", "îlon", "cotmeh", "mijdar", "berfanbar"}, + {"BN", "PN"}, +} + +var localeTableKw = [5][]string{ + {"Sul", "Lun", "Mth", "Mhr", "Yow", "Gwe", "Sad"}, + {"dy Sul", "dy Lun", "dy Meurth", "dy Merher", "dy Yow", "dy Gwener", "dy Sadorn"}, + {"Gen", "Hwe", "Meu", "Ebr", "Me", "Met", "Gor", "Est", "Gwn", "Hed", "Du", "Kev"}, + {"mis Genver", "mis Hwevrer", "mis Meurth", "mis Ebrel", "mis Me", "mis Metheven", "mis Gortheren", "mis Est", "mis Gwynngala", "mis Hedra", "mis Du", "mis Kevardhu"}, + {"a.m.", "p.m."}, +} + +var localeTableKwGB = [5][]string{ + {"Sul", "Lun", "Mth", "Mhr", "Yow", "Gwe", "Sad"}, + {"dy Sul", "dy Lun", "dy Meurth", "dy Merher", "dy Yow", "dy Gwener", "dy Sadorn"}, + {"Gen", "Hwe", "Meu", "Ebr", "Me", "Met", "Gor", "Est", "Gwn", "Hed", "Du", "Kev"}, + {"mis Genver", "mis Hwevrer", "mis Meurth", "mis Ebrel", "mis Me", "mis Metheven", "mis Gortheren", "mis Est", "mis Gwynngala", "mis Hedra", "mis Du", "mis Kevardhu"}, + {"a.m.", "p.m."}, +} + +var localeTableKxv = [5][]string{ + {"aadi", "smba", "manga", "puda", "laki", "sukru", "sani"}, + {"aadi vara", "smbara", "mangaḍa", "pudara", "laki vara", "sukru vara", "sani vara"}, + {"pusu", "maha", "pagu", "hire", "bese", "jaṭṭa", "aasaḍi", "srabĩ", "bado", "dasara", "divi", "pande"}, + {"pusu lenju", "maha lenju", "pagu lenju", "hire lenju", "bese lenju", "jaṭṭa lenju", "aasaḍi lenju", "srabĩ lenju", "bado lenju", "dasara lenju", "divi lenju", "pande lenju"}, + {"am", "pm"}, +} + +var localeTableKxvDeva = [5][]string{ + {}, + {"आदि वारा", "साॅम्वारा", "मंगाड़ा", "पुद्दारा", "लाक्कि वारा", "सुकुरु वारा", "सान्नि वारा"}, + {}, + {"पुसु लेञ्जु", "माहाका लेञ्जु", "पागुणी लेञ्जु", "हिरे लेञ्जु", "बेसे लेञ्जु", "जाटा लेञ्जु", "आसाड़ी लेञ्जु", "स्राबाँ लेञ्जु", "बोदो लेञ्जु", "दसारा लेञ्जु", "दिवी लेञ्जु", "पान्डे लेञ्जु"}, + {}, +} + +var localeTableKxvDevaIN = [5][]string{ + {}, + {"आदि वारा", "साॅम्वारा", "मंगाड़ा", "पुद्दारा", "लाक्कि वारा", "सुकुरु वारा", "सान्नि वारा"}, + {}, + {"पुसु लेञ्जु", "माहाका लेञ्जु", "पागुणी लेञ्जु", "हिरे लेञ्जु", "बेसे लेञ्जु", "जाटा लेञ्जु", "आसाड़ी लेञ्जु", "स्राबाँ लेञ्जु", "बोदो लेञ्जु", "दसारा लेञ्जु", "दिवी लेञ्जु", "पान्डे लेञ्जु"}, + {}, +} + +var localeTableKxvOrya = [5][]string{ + {}, + {"ଆଦି ୱାରା", "ସମ୍ବାରା", "ମାଙ୍ଗାଡ଼ା", "ପୁଦାରା", "ଲାକି ୱାରା", "ସୁକ୍ରୁ ୱାରା", "ସାନି ୱାରା"}, + {}, + {"ପୁସୁ ଲେଞ୍ଜୁ", "ମାହାକା ଲେଞ୍ଜୁ", "ପାଗୁଣି ଲେଞ୍ଜୁ", "ହିରେ ଲେଞ୍ଜୁ", "ବେସେ ଲେଞ୍ଜୁ", "ଜାଟା ଲେଞ୍ଜୁ", "ଆସାଡ଼ି ଲେଞ୍ଜୁ", "ସ୍ରାବାଁ ଲେଞ୍ଜୁ", "ବଦ ଲେଞ୍ଜୁ", "ଦାସାରା ଲେଞ୍ଜୁ", "ଦିୱିଡ଼ି ଲେଞ୍ଜୁ", "ପାଣ୍ଡେ ଲେଞ୍ଜୁ"}, + {}, +} + +var localeTableKxvOryaIN = [5][]string{ + {}, + {"ଆଦି ୱାରା", "ସମ୍ବାରା", "ମାଙ୍ଗାଡ଼ା", "ପୁଦାରା", "ଲାକି ୱାରା", "ସୁକ୍ରୁ ୱାରା", "ସାନି ୱାରା"}, + {}, + {"ପୁସୁ ଲେଞ୍ଜୁ", "ମାହାକା ଲେଞ୍ଜୁ", "ପାଗୁଣି ଲେଞ୍ଜୁ", "ହିରେ ଲେଞ୍ଜୁ", "ବେସେ ଲେଞ୍ଜୁ", "ଜାଟା ଲେଞ୍ଜୁ", "ଆସାଡ଼ି ଲେଞ୍ଜୁ", "ସ୍ରାବାଁ ଲେଞ୍ଜୁ", "ବଦ ଲେଞ୍ଜୁ", "ଦାସାରା ଲେଞ୍ଜୁ", "ଦିୱିଡ଼ି ଲେଞ୍ଜୁ", "ପାଣ୍ଡେ ଲେଞ୍ଜୁ"}, + {}, +} + +var localeTableKxvTelu = [5][]string{ + {}, + {"వారమి", "నమారా", "మాంగాడా", "వుదారా", "లాకివరా", "నుక్ వరా", "సానివరా"}, + {}, + {"మాగ", "గుండు", "హిరెఇ", "బెసెకి", "లండి", "రాత", "బాన్దపాణా", "బార్సి", "అస్ర", "దివెడి", "పాండు", "పుసు"}, + {}, +} + +var localeTableKxvTeluIN = [5][]string{ + {}, + {"వారమి", "నమారా", "మాంగాడా", "వుదారా", "లాకివరా", "నుక్ వరా", "సానివరా"}, + {}, + {"మాగ", "గుండు", "హిరెఇ", "బెసెకి", "లండి", "రాత", "బాన్దపాణా", "బార్సి", "అస్ర", "దివెడి", "పాండు", "పుసు"}, + {}, +} + +var localeTableKy = [5][]string{ + {"жек.", "дүй.", "шейш.", "шарш.", "бейш.", "жума", "ишм."}, + {"жекшемби", "дүйшөмбү", "шейшемби", "шаршемби", "бейшемби", "жума", "ишемби"}, + {"янв.", "фев.", "мар.", "апр.", "май", "июн.", "июл.", "авг.", "сен.", "окт.", "ноя.", "дек."}, + {"январь", "февраль", "март", "апрель", "май", "июнь", "июль", "август", "сентябрь", "октябрь", "ноябрь", "декабрь"}, + {"тң", "тк"}, +} + +var localeTableKyKG = [5][]string{ + {"жек.", "дүй.", "шейш.", "шарш.", "бейш.", "жума", "ишм."}, + {"жекшемби", "дүйшөмбү", "шейшемби", "шаршемби", "бейшемби", "жума", "ишемби"}, + {"янв.", "фев.", "мар.", "апр.", "май", "июн.", "июл.", "авг.", "сен.", "окт.", "ноя.", "дек."}, + {"январь", "февраль", "март", "апрель", "май", "июнь", "июль", "август", "сентябрь", "октябрь", "ноябрь", "декабрь"}, + {"тң", "тк"}, +} + +var localeTableLa = [5][]string{ + {"Dom", "Lun", "Mar", "Mer", "Iov", "Ven", "Sab"}, + {"Dominica", "dies Lunae", "dies Martis", "dies Mercurii", "dies Iovis", "dies Veneris", "dies Sabbati"}, + {"Ian", "Feb", "Mar", "Apr", "Mai", "Iun", "Iul", "Aug", "Sep", "Oct", "Nov", "Dec"}, + {"Ianuarii", "Februarii", "Martii", "Aprilis", "Maii", "Iunii", "Iulii", "Augusti", "Septembris", "Octobris", "Novembris", "Decembris"}, + {"a.m.", "p.m."}, +} + +var localeTableLaVA = [5][]string{ + {"Dom", "Lun", "Mar", "Mer", "Iov", "Ven", "Sab"}, + {"Dominica", "dies Lunae", "dies Martis", "dies Mercurii", "dies Iovis", "dies Veneris", "dies Sabbati"}, + {"Ian", "Feb", "Mar", "Apr", "Mai", "Iun", "Iul", "Aug", "Sep", "Oct", "Nov", "Dec"}, + {"Ianuarii", "Februarii", "Martii", "Aprilis", "Maii", "Iunii", "Iulii", "Augusti", "Septembris", "Octobris", "Novembris", "Decembris"}, + {"a.m.", "p.m."}, +} + +var localeTableLag = [5][]string{ + {"Píili", "Táatu", "Íne", "Táano", "Alh", "Ijm", "Móosi"}, + {"Jumapíiri", "Jumatátu", "Jumaíne", "Jumatáano", "Alamíisi", "Ijumáa", "Jumamóosi"}, + {"Fúngatɨ", "Naanɨ", "Keenda", "Ikúmi", "Inyambala", "Idwaata", "Mʉʉnchɨ", "Vɨɨrɨ", "Saatʉ", "Inyi", "Saano", "Sasatʉ"}, + {"Kʉfúngatɨ", "Kʉnaanɨ", "Kʉkeenda", "Kwiikumi", "Kwiinyambála", "Kwiidwaata", "Kʉmʉʉnchɨ", "Kʉvɨɨrɨ", "Kʉsaatʉ", "Kwiinyi", "Kʉsaano", "Kʉsasatʉ"}, + {"TOO", "MUU"}, +} + +var localeTableLagTZ = [5][]string{ + {"Píili", "Táatu", "Íne", "Táano", "Alh", "Ijm", "Móosi"}, + {"Jumapíiri", "Jumatátu", "Jumaíne", "Jumatáano", "Alamíisi", "Ijumáa", "Jumamóosi"}, + {"Fúngatɨ", "Naanɨ", "Keenda", "Ikúmi", "Inyambala", "Idwaata", "Mʉʉnchɨ", "Vɨɨrɨ", "Saatʉ", "Inyi", "Saano", "Sasatʉ"}, + {"Kʉfúngatɨ", "Kʉnaanɨ", "Kʉkeenda", "Kwiikumi", "Kwiinyambála", "Kwiidwaata", "Kʉmʉʉnchɨ", "Kʉvɨɨrɨ", "Kʉsaatʉ", "Kwiinyi", "Kʉsaano", "Kʉsasatʉ"}, + {"TOO", "MUU"}, +} + +var localeTableLb = [5][]string{ + {"Son.", "Méi.", "Dën.", "Mët.", "Don.", "Fre.", "Sam."}, + {"Sonndeg", "Méindeg", "Dënschdeg", "Mëttwoch", "Donneschdeg", "Freideg", "Samschdeg"}, + {"Jan.", "Feb.", "Mäe.", "Abr.", "Mee", "Juni", "Juli", "Aug.", "Sep.", "Okt.", "Nov.", "Dez."}, + {"Januar", "Februar", "Mäerz", "Abrëll", "Mee", "Juni", "Juli", "August", "September", "Oktober", "November", "Dezember"}, + {"moies", "nomëttes"}, +} + +var localeTableLbLU = [5][]string{ + {"Son.", "Méi.", "Dën.", "Mët.", "Don.", "Fre.", "Sam."}, + {"Sonndeg", "Méindeg", "Dënschdeg", "Mëttwoch", "Donneschdeg", "Freideg", "Samschdeg"}, + {"Jan.", "Feb.", "Mäe.", "Abr.", "Mee", "Juni", "Juli", "Aug.", "Sep.", "Okt.", "Nov.", "Dez."}, + {"Januar", "Februar", "Mäerz", "Abrëll", "Mee", "Juni", "Juli", "August", "September", "Oktober", "November", "Dezember"}, + {"moies", "nomëttes"}, +} + +var localeTableLg = [5][]string{ + {"Sab", "Bal", "Lw2", "Lw3", "Lw4", "Lw5", "Lw6"}, + {"Sabbiiti", "Balaza", "Lwakubiri", "Lwakusatu", "Lwakuna", "Lwakutaano", "Lwamukaaga"}, + {"Jan", "Feb", "Mar", "Apu", "Maa", "Juu", "Jul", "Agu", "Seb", "Oki", "Nov", "Des"}, + {"Janwaliyo", "Febwaliyo", "Marisi", "Apuli", "Maayi", "Juuni", "Julaayi", "Agusito", "Sebuttemba", "Okitobba", "Novemba", "Desemba"}, + {}, +} + +var localeTableLgUG = [5][]string{ + {"Sab", "Bal", "Lw2", "Lw3", "Lw4", "Lw5", "Lw6"}, + {"Sabbiiti", "Balaza", "Lwakubiri", "Lwakusatu", "Lwakuna", "Lwakutaano", "Lwamukaaga"}, + {"Jan", "Feb", "Mar", "Apu", "Maa", "Juu", "Jul", "Agu", "Seb", "Oki", "Nov", "Des"}, + {"Janwaliyo", "Febwaliyo", "Marisi", "Apuli", "Maayi", "Juuni", "Julaayi", "Agusito", "Sebuttemba", "Okitobba", "Novemba", "Desemba"}, + {}, +} + +var localeTableLij = [5][]string{ + {"dom.", "lun.", "mät.", "mäc.", "zeu.", "ven.", "sab."}, + {"domenega", "lunesdì", "mätesdì", "mäcordì", "zeuggia", "venardì", "sabbo"}, + {"de zen.", "de fre.", "de mar.", "d’arv.", "de maz.", "de zug.", "de lug.", "d’ago.", "de set.", "d’ott.", "de nov.", "de dex."}, + {"de zenâ", "de frevâ", "de marso", "d’arvî", "de mazzo", "de zugno", "de luggio", "d’agosto", "de settembre", "d’ottobre", "de novembre", "de dexembre"}, + {"m.", "p."}, +} + +var localeTableLijIT = [5][]string{ + {"dom.", "lun.", "mät.", "mäc.", "zeu.", "ven.", "sab."}, + {"domenega", "lunesdì", "mätesdì", "mäcordì", "zeuggia", "venardì", "sabbo"}, + {"de zen.", "de fre.", "de mar.", "d’arv.", "de maz.", "de zug.", "de lug.", "d’ago.", "de set.", "d’ott.", "de nov.", "de dex."}, + {"de zenâ", "de frevâ", "de marso", "d’arvî", "de mazzo", "de zugno", "de luggio", "d’agosto", "de settembre", "d’ottobre", "de novembre", "de dexembre"}, + {"m.", "p."}, +} + +var localeTableLkt = [5][]string{ + {}, + {"Aŋpétuwakȟaŋ", "Aŋpétuwaŋži", "Aŋpétunuŋpa", "Aŋpétuyamni", "Aŋpétutopa", "Aŋpétuzaptaŋ", "Owáŋgyužažapi"}, + {}, + {"Wiótheȟika Wí", "Thiyóȟeyuŋka Wí", "Ištáwičhayazaŋ Wí", "Pȟežítȟo Wí", "Čhaŋwápetȟo Wí", "Wípazukȟa-wašté Wí", "Čhaŋpȟásapa Wí", "Wasútȟuŋ Wí", "Čhaŋwápeǧi Wí", "Čhaŋwápe-kasná Wí", "Waníyetu Wí", "Tȟahékapšuŋ Wí"}, + {}, +} + +var localeTableLktUS = [5][]string{ + {}, + {"Aŋpétuwakȟaŋ", "Aŋpétuwaŋži", "Aŋpétunuŋpa", "Aŋpétuyamni", "Aŋpétutopa", "Aŋpétuzaptaŋ", "Owáŋgyužažapi"}, + {}, + {"Wiótheȟika Wí", "Thiyóȟeyuŋka Wí", "Ištáwičhayazaŋ Wí", "Pȟežítȟo Wí", "Čhaŋwápetȟo Wí", "Wípazukȟa-wašté Wí", "Čhaŋpȟásapa Wí", "Wasútȟuŋ Wí", "Čhaŋwápeǧi Wí", "Čhaŋwápe-kasná Wí", "Waníyetu Wí", "Tȟahékapšuŋ Wí"}, + {}, +} + +var localeTableLmo = [5][]string{ + {}, + {"domenega", "lundì", "mardì", "mercoldì", "sgiovedì", "venerdì", "sabet"}, + {}, + {"sginer", "fevrer", "marz", "avril", "masg", "sgiugn", "luj", "avost", "setember", "otover", "november", "dicember"}, + {}, +} + +var localeTableLmoIT = [5][]string{ + {}, + {"domenega", "lundì", "mardì", "mercoldì", "sgiovedì", "venerdì", "sabet"}, + {}, + {"sginer", "fevrer", "marz", "avril", "masg", "sgiugn", "luj", "avost", "setember", "otover", "november", "dicember"}, + {}, +} + +var localeTableLn = [5][]string{ + {"eye", "ybo", "mbl", "mst", "min", "mtn", "mps"}, + {"eyenga", "mokɔlɔ mwa yambo", "mokɔlɔ mwa míbalé", "mokɔlɔ mwa mísáto", "mokɔlɔ ya mínéi", "mokɔlɔ ya mítáno", "mpɔ́sɔ"}, + {"yan", "fbl", "msi", "apl", "mai", "yun", "yul", "agt", "stb", "ɔtb", "nvb", "dsb"}, + {"sánzá ya yambo", "sánzá ya míbalé", "sánzá ya mísáto", "sánzá ya mínei", "sánzá ya mítáno", "sánzá ya motóbá", "sánzá ya nsambo", "sánzá ya mwambe", "sánzá ya libwa", "sánzá ya zómi", "sánzá ya zómi na mɔ̌kɔ́", "sánzá ya zómi na míbalé"}, + {"ntɔ́ngɔ́", "mpókwa"}, +} + +var localeTableLnAO = [5][]string{ + {"eye", "ybo", "mbl", "mst", "min", "mtn", "mps"}, + {"eyenga", "mokɔlɔ mwa yambo", "mokɔlɔ mwa míbalé", "mokɔlɔ mwa mísáto", "mokɔlɔ ya mínéi", "mokɔlɔ ya mítáno", "mpɔ́sɔ"}, + {"yan", "fbl", "msi", "apl", "mai", "yun", "yul", "agt", "stb", "ɔtb", "nvb", "dsb"}, + {"sánzá ya yambo", "sánzá ya míbalé", "sánzá ya mísáto", "sánzá ya mínei", "sánzá ya mítáno", "sánzá ya motóbá", "sánzá ya nsambo", "sánzá ya mwambe", "sánzá ya libwa", "sánzá ya zómi", "sánzá ya zómi na mɔ̌kɔ́", "sánzá ya zómi na míbalé"}, + {"ntɔ́ngɔ́", "mpókwa"}, +} + +var localeTableLnCD = [5][]string{ + {"eye", "ybo", "mbl", "mst", "min", "mtn", "mps"}, + {"eyenga", "mokɔlɔ mwa yambo", "mokɔlɔ mwa míbalé", "mokɔlɔ mwa mísáto", "mokɔlɔ ya mínéi", "mokɔlɔ ya mítáno", "mpɔ́sɔ"}, + {"yan", "fbl", "msi", "apl", "mai", "yun", "yul", "agt", "stb", "ɔtb", "nvb", "dsb"}, + {"sánzá ya yambo", "sánzá ya míbalé", "sánzá ya mísáto", "sánzá ya mínei", "sánzá ya mítáno", "sánzá ya motóbá", "sánzá ya nsambo", "sánzá ya mwambe", "sánzá ya libwa", "sánzá ya zómi", "sánzá ya zómi na mɔ̌kɔ́", "sánzá ya zómi na míbalé"}, + {"ntɔ́ngɔ́", "mpókwa"}, +} + +var localeTableLnCF = [5][]string{ + {"eye", "ybo", "mbl", "mst", "min", "mtn", "mps"}, + {"eyenga", "mokɔlɔ mwa yambo", "mokɔlɔ mwa míbalé", "mokɔlɔ mwa mísáto", "mokɔlɔ ya mínéi", "mokɔlɔ ya mítáno", "mpɔ́sɔ"}, + {"yan", "fbl", "msi", "apl", "mai", "yun", "yul", "agt", "stb", "ɔtb", "nvb", "dsb"}, + {"sánzá ya yambo", "sánzá ya míbalé", "sánzá ya mísáto", "sánzá ya mínei", "sánzá ya mítáno", "sánzá ya motóbá", "sánzá ya nsambo", "sánzá ya mwambe", "sánzá ya libwa", "sánzá ya zómi", "sánzá ya zómi na mɔ̌kɔ́", "sánzá ya zómi na míbalé"}, + {"ntɔ́ngɔ́", "mpókwa"}, +} + +var localeTableLnCG = [5][]string{ + {"eye", "ybo", "mbl", "mst", "min", "mtn", "mps"}, + {"eyenga", "mokɔlɔ mwa yambo", "mokɔlɔ mwa míbalé", "mokɔlɔ mwa mísáto", "mokɔlɔ ya mínéi", "mokɔlɔ ya mítáno", "mpɔ́sɔ"}, + {"yan", "fbl", "msi", "apl", "mai", "yun", "yul", "agt", "stb", "ɔtb", "nvb", "dsb"}, + {"sánzá ya yambo", "sánzá ya míbalé", "sánzá ya mísáto", "sánzá ya mínei", "sánzá ya mítáno", "sánzá ya motóbá", "sánzá ya nsambo", "sánzá ya mwambe", "sánzá ya libwa", "sánzá ya zómi", "sánzá ya zómi na mɔ̌kɔ́", "sánzá ya zómi na míbalé"}, + {"ntɔ́ngɔ́", "mpókwa"}, +} + +var localeTableLo = [5][]string{ + {"ອາທິດ", "ຈັນ", "ອັງຄານ", "ພຸດ", "ພະຫັດ", "ສຸກ", "ເສົາ"}, + {"ວັນອາທິດ", "ວັນຈັນ", "ວັນອັງຄານ", "ວັນພຸດ", "ວັນພະຫັດ", "ວັນສຸກ", "ວັນເສົາ"}, + {"ມ.ກ.", "ກ.ພ.", "ມ.ນ.", "ມ.ສ.", "ພ.ພ.", "ມິ.ຖ.", "ກ.ລ.", "ສ.ຫ.", "ກ.ຍ.", "ຕ.ລ.", "ພ.ຈ.", "ທ.ວ."}, + {"ມັງກອນ", "ກຸມພາ", "ມີນາ", "ເມສາ", "ພຶດສະພາ", "ມິຖຸນາ", "ກໍລະກົດ", "ສິງຫາ", "ກັນຍາ", "ຕຸລາ", "ພະຈິກ", "ທັນວາ"}, + {"ກ່ອນທ່ຽງ", "ຫຼັງທ່ຽງ"}, +} + +var localeTableLoLA = [5][]string{ + {"ອາທິດ", "ຈັນ", "ອັງຄານ", "ພຸດ", "ພະຫັດ", "ສຸກ", "ເສົາ"}, + {"ວັນອາທິດ", "ວັນຈັນ", "ວັນອັງຄານ", "ວັນພຸດ", "ວັນພະຫັດ", "ວັນສຸກ", "ວັນເສົາ"}, + {"ມ.ກ.", "ກ.ພ.", "ມ.ນ.", "ມ.ສ.", "ພ.ພ.", "ມິ.ຖ.", "ກ.ລ.", "ສ.ຫ.", "ກ.ຍ.", "ຕ.ລ.", "ພ.ຈ.", "ທ.ວ."}, + {"ມັງກອນ", "ກຸມພາ", "ມີນາ", "ເມສາ", "ພຶດສະພາ", "ມິຖຸນາ", "ກໍລະກົດ", "ສິງຫາ", "ກັນຍາ", "ຕຸລາ", "ພະຈິກ", "ທັນວາ"}, + {"ກ່ອນທ່ຽງ", "ຫຼັງທ່ຽງ"}, +} + +var localeTableLrc = [5][]string{ + {}, + {}, + {}, + {"جانڤیە", "فئڤریە", "مارس", "آڤریل", "مئی", "جوٙأن", "جوٙلا", "آگوست", "سئپتامر", "ئوکتوڤر", "نوڤامر", "دئسامر"}, + {}, +} + +var localeTableLrcIQ = [5][]string{ + {}, + {}, + {}, + {"جانڤیە", "فئڤریە", "مارس", "آڤریل", "مئی", "جوٙأن", "جوٙلا", "آگوست", "سئپتامر", "ئوکتوڤر", "نوڤامر", "دئسامر"}, + {}, +} + +var localeTableLrcIR = [5][]string{ + {}, + {}, + {}, + {"جانڤیە", "فئڤریە", "مارس", "آڤریل", "مئی", "جوٙأن", "جوٙلا", "آگوست", "سئپتامر", "ئوکتوڤر", "نوڤامر", "دئسامر"}, + {}, +} + +var localeTableLt = [5][]string{ + {"sk", "pr", "an", "tr", "kt", "pn", "št"}, + {"sekmadienis", "pirmadienis", "antradienis", "trečiadienis", "ketvirtadienis", "penktadienis", "šeštadienis"}, + {"saus.", "vas.", "kov.", "bal.", "geg.", "birž.", "liep.", "rugp.", "rugs.", "spal.", "lapkr.", "gruod."}, + {"sausio", "vasario", "kovo", "balandžio", "gegužės", "birželio", "liepos", "rugpjūčio", "rugsėjo", "spalio", "lapkričio", "gruodžio"}, + {"priešpiet", "popiet"}, +} + +var localeTableLtLT = [5][]string{ + {"sk", "pr", "an", "tr", "kt", "pn", "št"}, + {"sekmadienis", "pirmadienis", "antradienis", "trečiadienis", "ketvirtadienis", "penktadienis", "šeštadienis"}, + {"saus.", "vas.", "kov.", "bal.", "geg.", "birž.", "liep.", "rugp.", "rugs.", "spal.", "lapkr.", "gruod."}, + {"sausio", "vasario", "kovo", "balandžio", "gegužės", "birželio", "liepos", "rugpjūčio", "rugsėjo", "spalio", "lapkričio", "gruodžio"}, + {"priešpiet", "popiet"}, +} + +var localeTableLu = [5][]string{ + {"Lum", "Nko", "Ndy", "Ndg", "Njw", "Ngv", "Lub"}, + {"Lumingu", "Nkodya", "Ndàayà", "Ndangù", "Njòwa", "Ngòvya", "Lubingu"}, + {"Cio", "Lui", "Lus", "Muu", "Lum", "Luf", "Kab", "Lush", "Lut", "Lun", "Kas", "Cis"}, + {"Ciongo", "Lùishi", "Lusòlo", "Mùuyà", "Lumùngùlù", "Lufuimi", "Kabàlàshìpù", "Lùshìkà", "Lutongolo", "Lungùdi", "Kaswèkèsè", "Ciswà"}, + {"Dinda", "Dilolo"}, +} + +var localeTableLuCD = [5][]string{ + {"Lum", "Nko", "Ndy", "Ndg", "Njw", "Ngv", "Lub"}, + {"Lumingu", "Nkodya", "Ndàayà", "Ndangù", "Njòwa", "Ngòvya", "Lubingu"}, + {"Cio", "Lui", "Lus", "Muu", "Lum", "Luf", "Kab", "Lush", "Lut", "Lun", "Kas", "Cis"}, + {"Ciongo", "Lùishi", "Lusòlo", "Mùuyà", "Lumùngùlù", "Lufuimi", "Kabàlàshìpù", "Lùshìkà", "Lutongolo", "Lungùdi", "Kaswèkèsè", "Ciswà"}, + {"Dinda", "Dilolo"}, +} + +var localeTableLuo = [5][]string{ + {"JMP", "WUT", "TAR", "TAD", "TAN", "TAB", "NGS"}, + {"Jumapil", "Wuok Tich", "Tich Ariyo", "Tich Adek", "Tich Ang’wen", "Tich Abich", "Ngeso"}, + {"DAC", "DAR", "DAD", "DAN", "DAH", "DAU", "DAO", "DAB", "DOC", "DAP", "DGI", "DAG"}, + {"Dwe mar Achiel", "Dwe mar Ariyo", "Dwe mar Adek", "Dwe mar Ang’wen", "Dwe mar Abich", "Dwe mar Auchiel", "Dwe mar Abiriyo", "Dwe mar Aboro", "Dwe mar Ochiko", "Dwe mar Apar", "Dwe mar gi achiel", "Dwe mar Apar gi ariyo"}, + {"OD", "OT"}, +} + +var localeTableLuoKE = [5][]string{ + {"JMP", "WUT", "TAR", "TAD", "TAN", "TAB", "NGS"}, + {"Jumapil", "Wuok Tich", "Tich Ariyo", "Tich Adek", "Tich Ang’wen", "Tich Abich", "Ngeso"}, + {"DAC", "DAR", "DAD", "DAN", "DAH", "DAU", "DAO", "DAB", "DOC", "DAP", "DGI", "DAG"}, + {"Dwe mar Achiel", "Dwe mar Ariyo", "Dwe mar Adek", "Dwe mar Ang’wen", "Dwe mar Abich", "Dwe mar Auchiel", "Dwe mar Abiriyo", "Dwe mar Aboro", "Dwe mar Ochiko", "Dwe mar Apar", "Dwe mar gi achiel", "Dwe mar Apar gi ariyo"}, + {"OD", "OT"}, +} + +var localeTableLuy = [5][]string{ + {"J2", "J3", "J4", "J5", "Al", "Ij", "J1"}, + {"Jumapiri", "Jumatatu", "Jumanne", "Jumatano", "Murwa wa Kanne", "Murwa wa Katano", "Jumamosi"}, + {"Jan", "Feb", "Mar", "Apr", "Mei", "Jun", "Jul", "Ago", "Sep", "Okt", "Nov", "Des"}, + {"Januari", "Februari", "Machi", "Aprili", "Mei", "Juni", "Julai", "Agosti", "Septemba", "Oktoba", "Novemba", "Desemba"}, + {"a.m.", "p.m."}, +} + +var localeTableLuyKE = [5][]string{ + {"J2", "J3", "J4", "J5", "Al", "Ij", "J1"}, + {"Jumapiri", "Jumatatu", "Jumanne", "Jumatano", "Murwa wa Kanne", "Murwa wa Katano", "Jumamosi"}, + {"Jan", "Feb", "Mar", "Apr", "Mei", "Jun", "Jul", "Ago", "Sep", "Okt", "Nov", "Des"}, + {"Januari", "Februari", "Machi", "Aprili", "Mei", "Juni", "Julai", "Agosti", "Septemba", "Oktoba", "Novemba", "Desemba"}, + {"a.m.", "p.m."}, +} + +var localeTableLv = [5][]string{ + {"svētd.", "pirmd.", "otrd.", "trešd.", "ceturtd.", "piektd.", "sestd."}, + {"svētdiena", "pirmdiena", "otrdiena", "trešdiena", "ceturtdiena", "piektdiena", "sestdiena"}, + {"janv.", "febr.", "marts", "apr.", "maijs", "jūn.", "jūl.", "aug.", "sept.", "okt.", "nov.", "dec."}, + {"janvāris", "februāris", "marts", "aprīlis", "maijs", "jūnijs", "jūlijs", "augusts", "septembris", "oktobris", "novembris", "decembris"}, + {"priekšp.", "pēcp."}, +} + +var localeTableLvLV = [5][]string{ + {"svētd.", "pirmd.", "otrd.", "trešd.", "ceturtd.", "piektd.", "sestd."}, + {"svētdiena", "pirmdiena", "otrdiena", "trešdiena", "ceturtdiena", "piektdiena", "sestdiena"}, + {"janv.", "febr.", "marts", "apr.", "maijs", "jūn.", "jūl.", "aug.", "sept.", "okt.", "nov.", "dec."}, + {"janvāris", "februāris", "marts", "aprīlis", "maijs", "jūnijs", "jūlijs", "augusts", "septembris", "oktobris", "novembris", "decembris"}, + {"priekšp.", "pēcp."}, +} + +var localeTableMai = [5][]string{ + {"रवि", "सोम", "मंगल", "बुध", "गुरु", "शुक्र", "शनि"}, + {"रवि दिन", "सोम दिन", "मंगल दिन", "बुध दिन", "बृहस्पति दिन", "शुक्र दिन", "शनि दिन"}, + {"जन॰", "फ़र॰", "मार्च", "अप्रैल", "मई", "जून", "जुल॰", "अग॰", "सित॰", "अक्तू॰", "नव॰", "दिस॰"}, + {"जनवरी", "फरवरी", "मार्च", "अप्रैल", "मई", "जून", "जुलाई", "अगस्त", "सितंबर", "अक्तूबर", "नवंबर", "दिसंबर"}, + {"भोर", "सांझ"}, +} + +var localeTableMaiIN = [5][]string{ + {"रवि", "सोम", "मंगल", "बुध", "गुरु", "शुक्र", "शनि"}, + {"रवि दिन", "सोम दिन", "मंगल दिन", "बुध दिन", "बृहस्पति दिन", "शुक्र दिन", "शनि दिन"}, + {"जन॰", "फ़र॰", "मार्च", "अप्रैल", "मई", "जून", "जुल॰", "अग॰", "सित॰", "अक्तू॰", "नव॰", "दिस॰"}, + {"जनवरी", "फरवरी", "मार्च", "अप्रैल", "मई", "जून", "जुलाई", "अगस्त", "सितंबर", "अक्तूबर", "नवंबर", "दिसंबर"}, + {"भोर", "सांझ"}, +} + +var localeTableMas = [5][]string{ + {"Jpi", "Jtt", "Jnn", "Jtn", "Alh", "Iju", "Jmo"}, + {"Jumapílí", "Jumatátu", "Jumane", "Jumatánɔ", "Alaámisi", "Jumáa", "Jumamósi"}, + {"Dal", "Ará", "Ɔɛn", "Doy", "Lép", "Rok", "Sás", "Bɔ́r", "Kús", "Gís", "Shʉ́", "Ntʉ́"}, + {"Oladalʉ́", "Arát", "Ɔɛnɨ́ɔɨŋɔk", "Olodoyíóríê inkókúâ", "Oloilépūnyīē inkókúâ", "Kújúɔrɔk", "Mórusásin", "Ɔlɔ́ɨ́bɔ́rárɛ", "Kúshîn", "Olgísan", "Pʉshʉ́ka", "Ntʉ́ŋʉ́s"}, + {"Ɛnkakɛnyá", "Ɛndámâ"}, +} + +var localeTableMasKE = [5][]string{ + {"Jpi", "Jtt", "Jnn", "Jtn", "Alh", "Iju", "Jmo"}, + {"Jumapílí", "Jumatátu", "Jumane", "Jumatánɔ", "Alaámisi", "Jumáa", "Jumamósi"}, + {"Dal", "Ará", "Ɔɛn", "Doy", "Lép", "Rok", "Sás", "Bɔ́r", "Kús", "Gís", "Shʉ́", "Ntʉ́"}, + {"Oladalʉ́", "Arát", "Ɔɛnɨ́ɔɨŋɔk", "Olodoyíóríê inkókúâ", "Oloilépūnyīē inkókúâ", "Kújúɔrɔk", "Mórusásin", "Ɔlɔ́ɨ́bɔ́rárɛ", "Kúshîn", "Olgísan", "Pʉshʉ́ka", "Ntʉ́ŋʉ́s"}, + {"Ɛnkakɛnyá", "Ɛndámâ"}, +} + +var localeTableMasTZ = [5][]string{ + {"Jpi", "Jtt", "Jnn", "Jtn", "Alh", "Iju", "Jmo"}, + {"Jumapílí", "Jumatátu", "Jumane", "Jumatánɔ", "Alaámisi", "Jumáa", "Jumamósi"}, + {"Dal", "Ará", "Ɔɛn", "Doy", "Lép", "Rok", "Sás", "Bɔ́r", "Kús", "Gís", "Shʉ́", "Ntʉ́"}, + {"Oladalʉ́", "Arát", "Ɔɛnɨ́ɔɨŋɔk", "Olodoyíóríê inkókúâ", "Oloilépūnyīē inkókúâ", "Kújúɔrɔk", "Mórusásin", "Ɔlɔ́ɨ́bɔ́rárɛ", "Kúshîn", "Olgísan", "Pʉshʉ́ka", "Ntʉ́ŋʉ́s"}, + {"Ɛnkakɛnyá", "Ɛndámâ"}, +} + +var localeTableMer = [5][]string{ + {"KIU", "MRA", "WAI", "WET", "WEN", "WTN", "JUM"}, + {"Kiumia", "Muramuko", "Wairi", "Wethatu", "Wena", "Wetano", "Jumamosi"}, + {"JAN", "FEB", "MAC", "ĨPU", "MĨĨ", "NJU", "NJR", "AGA", "SPT", "OKT", "NOV", "DEC"}, + {"Januarĩ", "Feburuarĩ", "Machi", "Ĩpurũ", "Mĩĩ", "Njuni", "Njuraĩ", "Agasti", "Septemba", "Oktũba", "Novemba", "Dicemba"}, + {"RŨ", "ŨG"}, +} + +var localeTableMerKE = [5][]string{ + {"KIU", "MRA", "WAI", "WET", "WEN", "WTN", "JUM"}, + {"Kiumia", "Muramuko", "Wairi", "Wethatu", "Wena", "Wetano", "Jumamosi"}, + {"JAN", "FEB", "MAC", "ĨPU", "MĨĨ", "NJU", "NJR", "AGA", "SPT", "OKT", "NOV", "DEC"}, + {"Januarĩ", "Feburuarĩ", "Machi", "Ĩpurũ", "Mĩĩ", "Njuni", "Njuraĩ", "Agasti", "Septemba", "Oktũba", "Novemba", "Dicemba"}, + {"RŨ", "ŨG"}, +} + +var localeTableMfe = [5][]string{ + {"dim", "lin", "mar", "mer", "ze", "van", "sam"}, + {"dimans", "lindi", "mardi", "merkredi", "zedi", "vandredi", "samdi"}, + {"zan", "fev", "mar", "avr", "me", "zin", "zil", "out", "sep", "okt", "nov", "des"}, + {"zanvie", "fevriye", "mars", "avril", "me", "zin", "zilye", "out", "septam", "oktob", "novam", "desam"}, + {}, +} + +var localeTableMfeMU = [5][]string{ + {"dim", "lin", "mar", "mer", "ze", "van", "sam"}, + {"dimans", "lindi", "mardi", "merkredi", "zedi", "vandredi", "samdi"}, + {"zan", "fev", "mar", "avr", "me", "zin", "zil", "out", "sep", "okt", "nov", "des"}, + {"zanvie", "fevriye", "mars", "avril", "me", "zin", "zilye", "out", "septam", "oktob", "novam", "desam"}, + {}, +} + +var localeTableMg = [5][]string{ + {"Alah", "Alats", "Tal", "Alar", "Alak", "Zom", "Asab"}, + {"Alahady", "Alatsinainy", "Talata", "Alarobia", "Alakamisy", "Zoma", "Asabotsy"}, + {"Jan", "Feb", "Mar", "Apr", "Mey", "Jon", "Jol", "Aog", "Sep", "Okt", "Nov", "Des"}, + {"Janoary", "Febroary", "Martsa", "Aprily", "Mey", "Jona", "Jolay", "Aogositra", "Septambra", "Oktobra", "Novambra", "Desambra"}, + {}, +} + +var localeTableMgMG = [5][]string{ + {"Alah", "Alats", "Tal", "Alar", "Alak", "Zom", "Asab"}, + {"Alahady", "Alatsinainy", "Talata", "Alarobia", "Alakamisy", "Zoma", "Asabotsy"}, + {"Jan", "Feb", "Mar", "Apr", "Mey", "Jon", "Jol", "Aog", "Sep", "Okt", "Nov", "Des"}, + {"Janoary", "Febroary", "Martsa", "Aprily", "Mey", "Jona", "Jolay", "Aogositra", "Septambra", "Oktobra", "Novambra", "Desambra"}, + {}, +} + +var localeTableMgh = [5][]string{ + {"Sab", "Jtt", "Jnn", "Jtn", "Ara", "Iju", "Jmo"}, + {"Sabato", "Jumatatu", "Jumanne", "Jumatano", "Arahamisi", "Ijumaa", "Jumamosi"}, + {"Kwa", "Una", "Rar", "Che", "Tha", "Moc", "Sab", "Nan", "Tis", "Kum", "Moj", "Yel"}, + {"Mweri wo kwanza", "Mweri wo unayeli", "Mweri wo uneraru", "Mweri wo unecheshe", "Mweri wo unethanu", "Mweri wo thanu na mocha", "Mweri wo saba", "Mweri wo nane", "Mweri wo tisa", "Mweri wo kumi", "Mweri wo kumi na moja", "Mweri wo kumi na yel’li"}, + {"wichishu", "mchochil’l"}, +} + +var localeTableMghMZ = [5][]string{ + {"Sab", "Jtt", "Jnn", "Jtn", "Ara", "Iju", "Jmo"}, + {"Sabato", "Jumatatu", "Jumanne", "Jumatano", "Arahamisi", "Ijumaa", "Jumamosi"}, + {"Kwa", "Una", "Rar", "Che", "Tha", "Moc", "Sab", "Nan", "Tis", "Kum", "Moj", "Yel"}, + {"Mweri wo kwanza", "Mweri wo unayeli", "Mweri wo uneraru", "Mweri wo unecheshe", "Mweri wo unethanu", "Mweri wo thanu na mocha", "Mweri wo saba", "Mweri wo nane", "Mweri wo tisa", "Mweri wo kumi", "Mweri wo kumi na moja", "Mweri wo kumi na yel’li"}, + {"wichishu", "mchochil’l"}, +} + +var localeTableMgo = [5][]string{ + {}, + {"Aneg 1", "Aneg 2", "Aneg 3", "Aneg 4", "Aneg 5", "Aneg 6", "Aneg 7"}, + {"mbegtug", "imeg àbùbì", "imeg mbəŋchubi", "iməg ngwə̀t", "iməg fog", "iməg ichiibɔd", "iməg àdùmbə̀ŋ", "iməg ichika", "iməg kud", "iməg tèsiʼe", "iməg zò", "iməg krizmed"}, + {"iməg mbegtug", "imeg àbùbì", "imeg mbəŋchubi", "iməg ngwə̀t", "iməg fog", "iməg ichiibɔd", "iməg àdùmbə̀ŋ", "iməg ichika", "iməg kud", "iməg tèsiʼe", "iməg zò", "iməg krizmed"}, + {}, +} + +var localeTableMgoCM = [5][]string{ + {}, + {"Aneg 1", "Aneg 2", "Aneg 3", "Aneg 4", "Aneg 5", "Aneg 6", "Aneg 7"}, + {"mbegtug", "imeg àbùbì", "imeg mbəŋchubi", "iməg ngwə̀t", "iməg fog", "iməg ichiibɔd", "iməg àdùmbə̀ŋ", "iməg ichika", "iməg kud", "iməg tèsiʼe", "iməg zò", "iməg krizmed"}, + {"iməg mbegtug", "imeg àbùbì", "imeg mbəŋchubi", "iməg ngwə̀t", "iməg fog", "iməg ichiibɔd", "iməg àdùmbə̀ŋ", "iməg ichika", "iməg kud", "iməg tèsiʼe", "iməg zò", "iməg krizmed"}, + {}, +} + +var localeTableMi = [5][]string{ + {"Rāt", "Man", "Tūr", "Wen", "Tāi", "Par", "Rāh"}, + {"Rātapu", "Mane", "Tūrei", "Wenerei", "Tāite", "Paraire", "Rāhoroi"}, + {"Hān", "Pēp", "Māe", "Āpe", "Mei", "Hun", "Hūr", "Āku", "Hep", "Oke", "Noe", "Tīh"}, + {"Hānuere", "Pēpuere", "Māehe", "Āpereira", "Mei", "Hune", "Hūrae", "Ākuhata", "Hepetema", "Oketopa", "Noema", "Tīhema"}, + {}, +} + +var localeTableMiNZ = [5][]string{ + {"Rāt", "Man", "Tūr", "Wen", "Tāi", "Par", "Rāh"}, + {"Rātapu", "Mane", "Tūrei", "Wenerei", "Tāite", "Paraire", "Rāhoroi"}, + {"Hān", "Pēp", "Māe", "Āpe", "Mei", "Hun", "Hūr", "Āku", "Hep", "Oke", "Noe", "Tīh"}, + {"Hānuere", "Pēpuere", "Māehe", "Āpereira", "Mei", "Hune", "Hūrae", "Ākuhata", "Hepetema", "Oketopa", "Noema", "Tīhema"}, + {}, +} + +var localeTableMk = [5][]string{ + {"нед.", "пон.", "вто.", "сре.", "чет.", "пет.", "саб."}, + {"недела", "понеделник", "вторник", "среда", "четврток", "петок", "сабота"}, + {"јан.", "фев.", "мар.", "апр.", "мај", "јун.", "јул.", "авг.", "сеп.", "окт.", "ное.", "дек."}, + {"јануари", "февруари", "март", "април", "мај", "јуни", "јули", "август", "септември", "октомври", "ноември", "декември"}, + {"претпл.", "попл."}, +} + +var localeTableMkMK = [5][]string{ + {"нед.", "пон.", "вто.", "сре.", "чет.", "пет.", "саб."}, + {"недела", "понеделник", "вторник", "среда", "четврток", "петок", "сабота"}, + {"јан.", "фев.", "мар.", "апр.", "мај", "јун.", "јул.", "авг.", "сеп.", "окт.", "ное.", "дек."}, + {"јануари", "февруари", "март", "април", "мај", "јуни", "јули", "август", "септември", "октомври", "ноември", "декември"}, + {"претпл.", "попл."}, +} + +var localeTableMl = [5][]string{ + {"ഞായർ", "തിങ്കൾ", "ചൊവ്വ", "ബുധൻ", "വ്യാഴം", "വെള്ളി", "ശനി"}, + {"ഞായറാഴ്‌ച", "തിങ്കളാഴ്‌ച", "ചൊവ്വാഴ്ച", "ബുധനാഴ്‌ച", "വ്യാഴാഴ്‌ച", "വെള്ളിയാഴ്‌ച", "ശനിയാഴ്‌ച"}, + {"ജനു", "ഫെബ്രു", "മാർ", "ഏപ്രി", "മേയ്", "ജൂൺ", "ജൂലൈ", "ഓഗ", "സെപ്റ്റം", "ഒക്ടോ", "നവം", "ഡിസം"}, + {"ജനുവരി", "ഫെബ്രുവരി", "മാർച്ച്", "ഏപ്രിൽ", "മേയ്", "ജൂൺ", "ജൂലൈ", "ഓഗസ്റ്റ്", "സെപ്റ്റംബർ", "ഒക്‌ടോബർ", "നവംബർ", "ഡിസംബർ"}, + {}, +} + +var localeTableMlIN = [5][]string{ + {"ഞായർ", "തിങ്കൾ", "ചൊവ്വ", "ബുധൻ", "വ്യാഴം", "വെള്ളി", "ശനി"}, + {"ഞായറാഴ്‌ച", "തിങ്കളാഴ്‌ച", "ചൊവ്വാഴ്ച", "ബുധനാഴ്‌ച", "വ്യാഴാഴ്‌ച", "വെള്ളിയാഴ്‌ച", "ശനിയാഴ്‌ച"}, + {"ജനു", "ഫെബ്രു", "മാർ", "ഏപ്രി", "മേയ്", "ജൂൺ", "ജൂലൈ", "ഓഗ", "സെപ്റ്റം", "ഒക്ടോ", "നവം", "ഡിസം"}, + {"ജനുവരി", "ഫെബ്രുവരി", "മാർച്ച്", "ഏപ്രിൽ", "മേയ്", "ജൂൺ", "ജൂലൈ", "ഓഗസ്റ്റ്", "സെപ്റ്റംബർ", "ഒക്‌ടോബർ", "നവംബർ", "ഡിസംബർ"}, + {}, +} + +var localeTableMn = [5][]string{ + {"Ня", "Да", "Мя", "Лх", "Пү", "Ба", "Бя"}, + {"ням", "даваа", "мягмар", "лхагва", "пүрэв", "баасан", "бямба"}, + {"1-р сар", "2-р сар", "3-р сар", "4-р сар", "5-р сар", "6-р сар", "7-р сар", "8-р сар", "9-р сар", "10-р сар", "11-р сар", "12-р сар"}, + {"нэгдүгээр сар", "хоёрдугаар сар", "гуравдугаар сар", "дөрөвдүгээр сар", "тавдугаар сар", "зургаадугаар сар", "долоодугаар сар", "наймдугаар сар", "есдүгээр сар", "аравдугаар сар", "арван нэгдүгээр сар", "арван хоёрдугаар сар"}, + {"ү.ө.", "ү.х."}, +} + +var localeTableMnMN = [5][]string{ + {"Ня", "Да", "Мя", "Лх", "Пү", "Ба", "Бя"}, + {"ням", "даваа", "мягмар", "лхагва", "пүрэв", "баасан", "бямба"}, + {"1-р сар", "2-р сар", "3-р сар", "4-р сар", "5-р сар", "6-р сар", "7-р сар", "8-р сар", "9-р сар", "10-р сар", "11-р сар", "12-р сар"}, + {"нэгдүгээр сар", "хоёрдугаар сар", "гуравдугаар сар", "дөрөвдүгээр сар", "тавдугаар сар", "зургаадугаар сар", "долоодугаар сар", "наймдугаар сар", "есдүгээр сар", "аравдугаар сар", "арван нэгдүгээр сар", "арван хоёрдугаар сар"}, + {"ү.ө.", "ү.х."}, +} + +var localeTableMnMongMN = [5][]string{ + {"ᠨᠢ", "ᠲᠠ", "ᠮᠢᠭ", "ᡀᠠ", "ᠫᠥᠷ", "ᠪᠠ", "ᠪᠢᠮ"}, + {"ᠨᠢᠮ᠎ᠠ", "ᠳᠠᠸᠠ", "ᠮᠢᠠᠠᠮᠠᠷ", "ᡀᠠᠭᠪᠠ", "ᠫᠦᠷᠪᠦ", "ᠪᠠᠰᠠᠩ", "ᠪᠢᠮᠪᠠ"}, + {"1 ᠊ᠷ ᠰᠠᠷ᠎ᠠ", "2 ᠊ᠷ ᠰᠠᠷ᠎ᠠ", "3᠊ᠷ ᠰᠠᠷ᠎ᠠ", "4 ᠊ᠷ ᠰᠠᠷ᠎ᠠ", "5 ᠊ᠷ ᠰᠠᠷ᠎ᠠ", "6 ᠊ᠷ ᠰᠠᠷ᠎ᠠ", "7 ᠊ᠷ ᠰᠠᠷ᠎ᠠ", "8᠊ᠷ ᠰᠠᠷ᠎ᠠ", "9 ᠊ᠷ ᠰᠠᠷ᠎ᠠ", "10 ᠊ᠷ ᠰᠠᠷ᠎ᠠ", "11 ᠊ᠷ ᠰᠠᠷ᠎ᠠ", "12 ᠊ᠷ ᠰᠠᠷ᠎ᠠ"}, + {"ᠨᠢᠭᠡᠳᠥᠭᠡᠷ ᠰᠠᠷ᠎ᠠ", "ᠬᠣᠶᠠᠳᠣᠭᠠᠷ ᠰᠠᠷ ᠠ", "ᠭᠣᠷᠪᠡᠳᠣᠭᠠᠷ ᠰᠠᠷ ᠠ", "ᠳᠥᠷᠪᠡᠳᠥᠭᠡᠷ ᠰᠠᠷ᠎ᠠ", "ᠲᠠᠪᠣᠳᠣᠭᠠᠷ ᠰᠠᠷ ᠠ", "ᠵᠢᠷᠭᠣᠭᠠᠳᠣᠭᠠᠷ ᠰᠠᠷ᠎ᠠ", "ᠲᠣᠯᠣᠭᠠᠳᠣᠭᠠᠷ ᠰᠠᠷ᠎ᠠ", "ᠨᠠᠢᠮᠠᠳᠥᠭᠠᠷ ᠰᠠᠷ᠎ᠠ", "ᠶᠢᠰᠥᠳᠥᠭᠡᠷ ᠰᠠᠷ᠎ᠠ", "ᠠᠷᠪᠠᠳᠣᠭᠠᠷ ᠰᠠᠷ᠎ᠠ", "ᠠᠷᠪᠠᠨ ᠨᠢᠭᠡᠳᠥᠭᠡᠷ ᠰᠠᠷ᠎ᠠ", "ᠠᠷᠪᠠᠨ ᠬᠣᠶᠠᠳᠣᠭᠠᠷ ᠰᠠᠷ᠎ᠠ"}, + {}, +} + +var localeTableMni = [5][]string{ + {}, + {"নোংমাইজিং", "নিংথৌকাবা", "লৈবাকপোকপা", "য়ুমশকৈশা", "শগোলশেন", "ইরাই", "থাংজ"}, + {"জন", "ফেব্রুৱারি", "মার্চ", "এপ্রিল", "মে", "জুন", "জুলাই", "ওগ", "সেপ্টেম্বর", "ওক্টোবর", "নভেম্বর", "ডিসেম্বর"}, + {"জনুৱারী", "ফেব্রুৱারি", "মার্চ", "এপ্রিল", "মে", "জুন", "জুলাই", "‌ওগষ্ট", "সেপ্টেম্বর", "ওক্টোবর", "নভেম্বর", "ডিসেম্বর"}, + {"নুমাং", "PM"}, +} + +var localeTableMniBeng = [5][]string{ + {}, + {"নোংমাইজিং", "নিংথৌকাবা", "লৈবাকপোকপা", "য়ুমশকৈশা", "শগোলশেন", "ইরাই", "থাংজ"}, + {"জন", "ফেব্রুৱারি", "মার্চ", "এপ্রিল", "মে", "জুন", "জুলাই", "ওগ", "সেপ্টেম্বর", "ওক্টোবর", "নভেম্বর", "ডিসেম্বর"}, + {"জনুৱারী", "ফেব্রুৱারি", "মার্চ", "এপ্রিল", "মে", "জুন", "জুলাই", "‌ওগষ্ট", "সেপ্টেম্বর", "ওক্টোবর", "নভেম্বর", "ডিসেম্বর"}, + {"নুমাং", "PM"}, +} + +var localeTableMniBengIN = [5][]string{ + {}, + {"নোংমাইজিং", "নিংথৌকাবা", "লৈবাকপোকপা", "য়ুমশকৈশা", "শগোলশেন", "ইরাই", "থাংজ"}, + {"জন", "ফেব্রুৱারি", "মার্চ", "এপ্রিল", "মে", "জুন", "জুলাই", "ওগ", "সেপ্টেম্বর", "ওক্টোবর", "নভেম্বর", "ডিসেম্বর"}, + {"জনুৱারী", "ফেব্রুৱারি", "মার্চ", "এপ্রিল", "মে", "জুন", "জুলাই", "‌ওগষ্ট", "সেপ্টেম্বর", "ওক্টোবর", "নভেম্বর", "ডিসেম্বর"}, + {"নুমাং", "PM"}, +} + +var localeTableMr = [5][]string{ + {"रवि", "सोम", "मंगळ", "बुध", "गुरु", "शुक्र", "शनि"}, + {"रविवार", "सोमवार", "मंगळवार", "बुधवार", "गुरुवार", "शुक्रवार", "शनिवार"}, + {"जाने", "फेब्रु", "मार्च", "एप्रि", "मे", "जून", "जुलै", "ऑग", "सप्टें", "ऑक्टो", "नोव्हें", "डिसें"}, + {"जानेवारी", "फेब्रुवारी", "मार्च", "एप्रिल", "मे", "जून", "जुलै", "ऑगस्ट", "सप्टेंबर", "ऑक्टोबर", "नोव्हेंबर", "डिसेंबर"}, + {"a", "p"}, +} + +var localeTableMrIN = [5][]string{ + {"रवि", "सोम", "मंगळ", "बुध", "गुरु", "शुक्र", "शनि"}, + {"रविवार", "सोमवार", "मंगळवार", "बुधवार", "गुरुवार", "शुक्रवार", "शनिवार"}, + {"जाने", "फेब्रु", "मार्च", "एप्रि", "मे", "जून", "जुलै", "ऑग", "सप्टें", "ऑक्टो", "नोव्हें", "डिसें"}, + {"जानेवारी", "फेब्रुवारी", "मार्च", "एप्रिल", "मे", "जून", "जुलै", "ऑगस्ट", "सप्टेंबर", "ऑक्टोबर", "नोव्हेंबर", "डिसेंबर"}, + {"a", "p"}, +} + +var localeTableMs = [5][]string{ + {"Ahd", "Isn", "Sel", "Rab", "Kha", "Jum", "Sab"}, + {"Ahad", "Isnin", "Selasa", "Rabu", "Khamis", "Jumaat", "Sabtu"}, + {"Jan", "Feb", "Mac", "Apr", "Mei", "Jun", "Jul", "Ogo", "Sep", "Okt", "Nov", "Dis"}, + {"Januari", "Februari", "Mac", "April", "Mei", "Jun", "Julai", "Ogos", "September", "Oktober", "November", "Disember"}, + {"PG", "PTG"}, +} + +var localeTableMsArab = [5][]string{ + {}, + {"احد", "اثنين", "ثلاث", "رابو", "خميس", "جمعة", "سبتو"}, + {}, + {"جانواري", "فيبواري", "مچ", "اڤريل", "مي", "جون", "جولاي", "ݢوس", "سيڤتيمبر", "اوکتوبر", "نوۏيمبر", "ديسيمبر"}, + {}, +} + +var localeTableMsArabBN = [5][]string{ + {}, + {"احد", "اثنين", "ثلاث", "رابو", "خميس", "جمعة", "سبتو"}, + {}, + {"جانواري", "فيبواري", "مچ", "اڤريل", "مي", "جون", "جولاي", "ݢوس", "سيڤتيمبر", "اوکتوبر", "نوۏيمبر", "ديسيمبر"}, + {}, +} + +var localeTableMsArabMY = [5][]string{ + {}, + {"احد", "اثنين", "ثلاث", "رابو", "خميس", "جمعة", "سبتو"}, + {}, + {"جانواري", "فيبواري", "مچ", "اڤريل", "مي", "جون", "جولاي", "ݢوس", "سيڤتيمبر", "اوکتوبر", "نوۏيمبر", "ديسيمبر"}, + {}, +} + +var localeTableMsBN = [5][]string{ + {"Ahd", "Isn", "Sel", "Rab", "Kha", "Jum", "Sab"}, + {"Ahad", "Isnin", "Selasa", "Rabu", "Khamis", "Jumaat", "Sabtu"}, + {"Jan", "Feb", "Mac", "Apr", "Mei", "Jun", "Jul", "Ogo", "Sep", "Okt", "Nov", "Dis"}, + {"Januari", "Februari", "Mac", "April", "Mei", "Jun", "Julai", "Ogos", "September", "Oktober", "November", "Disember"}, + {"PG", "PTG"}, +} + +var localeTableMsID = [5][]string{ + {}, + {"احد", "اثنين", "ثلاث", "رابو", "خميس", "جمعة", "سبتو"}, + {}, + {"جانواري", "فيبواري", "مچ", "اڤريل", "مي", "جون", "جولاي", "ݢوس", "سيڤتيمبر", "اوکتوبر", "نوۏيمبر", "ديسيمبر"}, + {}, +} + +var localeTableMsMY = [5][]string{ + {"Ahd", "Isn", "Sel", "Rab", "Kha", "Jum", "Sab"}, + {"Ahad", "Isnin", "Selasa", "Rabu", "Khamis", "Jumaat", "Sabtu"}, + {"Jan", "Feb", "Mac", "Apr", "Mei", "Jun", "Jul", "Ogo", "Sep", "Okt", "Nov", "Dis"}, + {"Januari", "Februari", "Mac", "April", "Mei", "Jun", "Julai", "Ogos", "September", "Oktober", "November", "Disember"}, + {"PG", "PTG"}, +} + +var localeTableMsSG = [5][]string{ + {"Ahd", "Isn", "Sel", "Rab", "Kha", "Jum", "Sab"}, + {"Ahad", "Isnin", "Selasa", "Rabu", "Khamis", "Jumaat", "Sabtu"}, + {"Jan", "Feb", "Mac", "Apr", "Mei", "Jun", "Jul", "Ogo", "Sep", "Okt", "Nov", "Dis"}, + {"Januari", "Februari", "Mac", "April", "Mei", "Jun", "Julai", "Ogos", "September", "Oktober", "November", "Disember"}, + {"PG", "PTG"}, +} + +var localeTableMt = [5][]string{ + {"Ħad", "Tne", "Tli", "Erb", "Ħam", "Ġim", "Sib"}, + {"Il-Ħadd", "It-Tnejn", "It-Tlieta", "L-Erbgħa", "Il-Ħamis", "Il-Ġimgħa", "Is-Sibt"}, + {"Jan", "Fra", "Mar", "Apr", "Mej", "Ġun", "Lul", "Aww", "Set", "Ott", "Nov", "Diċ"}, + {"Jannar", "Frar", "Marzu", "April", "Mejju", "Ġunju", "Lulju", "Awwissu", "Settembru", "Ottubru", "Novembru", "Diċembru"}, + {"am", "pm"}, +} + +var localeTableMtMT = [5][]string{ + {"Ħad", "Tne", "Tli", "Erb", "Ħam", "Ġim", "Sib"}, + {"Il-Ħadd", "It-Tnejn", "It-Tlieta", "L-Erbgħa", "Il-Ħamis", "Il-Ġimgħa", "Is-Sibt"}, + {"Jan", "Fra", "Mar", "Apr", "Mej", "Ġun", "Lul", "Aww", "Set", "Ott", "Nov", "Diċ"}, + {"Jannar", "Frar", "Marzu", "April", "Mejju", "Ġunju", "Lulju", "Awwissu", "Settembru", "Ottubru", "Novembru", "Diċembru"}, + {"am", "pm"}, +} + +var localeTableMua = [5][]string{ + {"Cya", "Cla", "Czi", "Cko", "Cka", "Cga", "Cze"}, + {"Com’yakke", "Comlaaɗii", "Comzyiiɗii", "Comkolle", "Comkaldǝɓlii", "Comgaisuu", "Comzyeɓsuu"}, + {"FLO", "CLA", "CKI", "FMF", "MAD", "MBI", "MLI", "MAM", "FDE", "FMU", "FGW", "FYU"}, + {"Fĩi Loo", "Cokcwaklaŋne", "Cokcwaklii", "Fĩi Marfoo", "Madǝǝuutǝbijaŋ", "Mamǝŋgwãafahbii", "Mamǝŋgwãalii", "Madǝmbii", "Fĩi Dǝɓlii", "Fĩi Mundaŋ", "Fĩi Gwahlle", "Fĩi Yuru"}, + {"comme", "lilli"}, +} + +var localeTableMuaCM = [5][]string{ + {"Cya", "Cla", "Czi", "Cko", "Cka", "Cga", "Cze"}, + {"Com’yakke", "Comlaaɗii", "Comzyiiɗii", "Comkolle", "Comkaldǝɓlii", "Comgaisuu", "Comzyeɓsuu"}, + {"FLO", "CLA", "CKI", "FMF", "MAD", "MBI", "MLI", "MAM", "FDE", "FMU", "FGW", "FYU"}, + {"Fĩi Loo", "Cokcwaklaŋne", "Cokcwaklii", "Fĩi Marfoo", "Madǝǝuutǝbijaŋ", "Mamǝŋgwãafahbii", "Mamǝŋgwãalii", "Madǝmbii", "Fĩi Dǝɓlii", "Fĩi Mundaŋ", "Fĩi Gwahlle", "Fĩi Yuru"}, + {"comme", "lilli"}, +} + +var localeTableMus = [5][]string{ + {}, + {"Nettvʼcako", "Enhvteceskv", "Enhvteceskv Enhvyvtke", "Ennvrkvpv", "Ennvrkvpv Enhvyvtke", "Nak Okkoskv Nettv", "Nettv Cakʼcuse"}, + {}, + {"Rvfo Cuse", "Hotvle Hvse", "Tasahcuce", "Tasahce Rakko", "Ke Hvse", "Kvco Hvse", "Hiyuce", "Hiyo Rakko", "Otowoskuce", "Otowoskv Rakko", "Ehole", "Rvfo Rakko"}, + {}, +} + +var localeTableMusUS = [5][]string{ + {}, + {"Nettvʼcako", "Enhvteceskv", "Enhvteceskv Enhvyvtke", "Ennvrkvpv", "Ennvrkvpv Enhvyvtke", "Nak Okkoskv Nettv", "Nettv Cakʼcuse"}, + {}, + {"Rvfo Cuse", "Hotvle Hvse", "Tasahcuce", "Tasahce Rakko", "Ke Hvse", "Kvco Hvse", "Hiyuce", "Hiyo Rakko", "Otowoskuce", "Otowoskv Rakko", "Ehole", "Rvfo Rakko"}, + {}, +} + +var localeTableMy = [5][]string{ + {}, + {"တနင်္ဂနွေ", "တနင်္လာ", "အင်္ဂါ", "ဗုဒ္ဓဟူး", "ကြာသပတေး", "သောကြာ", "စနေ"}, + {"ဇန်", "ဖေ", "မတ်", "ဧ", "မေ", "ဇွန်", "ဇူ", "ဩ", "စက်", "အောက်", "နို", "ဒီ"}, + {"ဇန်နဝါရီ", "ဖေဖော်ဝါရီ", "မတ်", "ဧပြီ", "မေ", "ဇွန်", "ဇူလိုင်", "ဩဂုတ်", "စက်တင်ဘာ", "အောက်တိုဘာ", "နိုဝင်ဘာ", "ဒီဇင်ဘာ"}, + {"နံနက်", "ညနေ"}, +} + +var localeTableMyMM = [5][]string{ + {}, + {"တနင်္ဂနွေ", "တနင်္လာ", "အင်္ဂါ", "ဗုဒ္ဓဟူး", "ကြာသပတေး", "သောကြာ", "စနေ"}, + {"ဇန်", "ဖေ", "မတ်", "ဧ", "မေ", "ဇွန်", "ဇူ", "ဩ", "စက်", "အောက်", "နို", "ဒီ"}, + {"ဇန်နဝါရီ", "ဖေဖော်ဝါရီ", "မတ်", "ဧပြီ", "မေ", "ဇွန်", "ဇူလိုင်", "ဩဂုတ်", "စက်တင်ဘာ", "အောက်တိုဘာ", "နိုဝင်ဘာ", "ဒီဇင်ဘာ"}, + {"နံနက်", "ညနေ"}, +} + +var localeTableMyv = [5][]string{ + {"тар", "атя", "вас", "кун", "кал", "сюк", "шля"}, + {"таргочистэ", "атяньчистэ", "вастаньчистэ", "куншкачистэ", "калоньчистэ", "сюконьчистэ", "шлямочистэ"}, + {}, + {"якшамков", "даволков", "эйзюрков", "чадыков", "панжиков", "аштемков", "медьков", "умарьков", "таштамков", "ожоков", "сундерьков", "ацамков"}, + {}, +} + +var localeTableMyvRU = [5][]string{ + {"тар", "атя", "вас", "кун", "кал", "сюк", "шля"}, + {"таргочистэ", "атяньчистэ", "вастаньчистэ", "куншкачистэ", "калоньчистэ", "сюконьчистэ", "шлямочистэ"}, + {}, + {"якшамков", "даволков", "эйзюрков", "чадыков", "панжиков", "аштемков", "медьков", "умарьков", "таштамков", "ожоков", "сундерьков", "ацамков"}, + {}, +} + +var localeTableMzn = [5][]string{ + {}, + {}, + {}, + {"ژانویه", "فوریه", "مارس", "آوریل", "مه", "ژوئن", "ژوئیه", "اوت", "سپتامبر", "اکتبر", "نوامبر", "دسامبر"}, + {}, +} + +var localeTableMznIR = [5][]string{ + {}, + {}, + {}, + {"ژانویه", "فوریه", "مارس", "آوریل", "مه", "ژوئن", "ژوئیه", "اوت", "سپتامبر", "اکتبر", "نوامبر", "دسامبر"}, + {}, +} + +var localeTableNaq = [5][]string{ + {"Son", "Ma", "De", "Wu", "Do", "Fr", "Sat"}, + {"Sontaxtsees", "Mantaxtsees", "Denstaxtsees", "Wunstaxtsees", "Dondertaxtsees", "Fraitaxtsees", "Satertaxtsees"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}, + {"ǃKhanni", "ǃKhanǀgôab", "ǀKhuuǁkhâb", "ǃHôaǂkhaib", "ǃKhaitsâb", "Gamaǀaeb", "ǂKhoesaob", "Aoǁkhuumûǁkhâb", "Taraǀkhuumûǁkhâb", "ǂNûǁnâiseb", "ǀHooǂgaeb", "Hôasoreǁkhâb"}, + {"ǁgoagas", "ǃuias"}, +} + +var localeTableNaqNA = [5][]string{ + {"Son", "Ma", "De", "Wu", "Do", "Fr", "Sat"}, + {"Sontaxtsees", "Mantaxtsees", "Denstaxtsees", "Wunstaxtsees", "Dondertaxtsees", "Fraitaxtsees", "Satertaxtsees"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}, + {"ǃKhanni", "ǃKhanǀgôab", "ǀKhuuǁkhâb", "ǃHôaǂkhaib", "ǃKhaitsâb", "Gamaǀaeb", "ǂKhoesaob", "Aoǁkhuumûǁkhâb", "Taraǀkhuumûǁkhâb", "ǂNûǁnâiseb", "ǀHooǂgaeb", "Hôasoreǁkhâb"}, + {"ǁgoagas", "ǃuias"}, +} + +var localeTableNd = [5][]string{ + {"Son", "Mvu", "Sib", "Sit", "Sin", "Sih", "Mgq"}, + {"Sonto", "Mvulo", "Sibili", "Sithathu", "Sine", "Sihlanu", "Mgqibelo"}, + {"Zib", "Nhlo", "Mbi", "Mab", "Nkw", "Nhla", "Ntu", "Ncw", "Mpan", "Mfu", "Lwe", "Mpal"}, + {"Zibandlela", "Nhlolanja", "Mbimbitho", "Mabasa", "Nkwenkwezi", "Nhlangula", "Ntulikazi", "Ncwabakazi", "Mpandula", "Mfumfu", "Lwezi", "Mpalakazi"}, + {}, +} + +var localeTableNdZW = [5][]string{ + {"Son", "Mvu", "Sib", "Sit", "Sin", "Sih", "Mgq"}, + {"Sonto", "Mvulo", "Sibili", "Sithathu", "Sine", "Sihlanu", "Mgqibelo"}, + {"Zib", "Nhlo", "Mbi", "Mab", "Nkw", "Nhla", "Ntu", "Ncw", "Mpan", "Mfu", "Lwe", "Mpal"}, + {"Zibandlela", "Nhlolanja", "Mbimbitho", "Mabasa", "Nkwenkwezi", "Nhlangula", "Ntulikazi", "Ncwabakazi", "Mpandula", "Mfumfu", "Lwezi", "Mpalakazi"}, + {}, +} + +var localeTableNds = [5][]string{ + {"Sü.", "Ma.", "Di.", "Mi.", "Du.", "Fr.", "Sa."}, + {"Sünndag", "Maandag", "Dingsdag", "Middeweken", "Dunnersdag", "Freedag", "Sünnavend"}, + {"Jan.", "Feb.", "März", "Apr.", "Mai", "Juni", "Juli", "Aug.", "Sep.", "Okt.", "Nov.", "Dez."}, + {"Januaar", "Februaar", "März", "April", "Mai", "Juni", "Juli", "August", "September", "Oktover", "November", "Dezember"}, + {"vm", "nm"}, +} + +var localeTableNdsDE = [5][]string{ + {"Sü.", "Ma.", "Di.", "Mi.", "Du.", "Fr.", "Sa."}, + {"Sünndag", "Maandag", "Dingsdag", "Middeweken", "Dunnersdag", "Freedag", "Sünnavend"}, + {"Jan.", "Feb.", "März", "Apr.", "Mai", "Juni", "Juli", "Aug.", "Sep.", "Okt.", "Nov.", "Dez."}, + {"Januaar", "Februaar", "März", "April", "Mai", "Juni", "Juli", "August", "September", "Oktover", "November", "Dezember"}, + {"vm", "nm"}, +} + +var localeTableNdsNL = [5][]string{ + {"Sü.", "Ma.", "Di.", "Mi.", "Du.", "Fr.", "Sa."}, + {"Sünndag", "Maandag", "Dingsdag", "Middeweken", "Dunnersdag", "Freedag", "Sünnavend"}, + {"Jan.", "Feb.", "März", "Apr.", "Mai", "Juni", "Juli", "Aug.", "Sep.", "Okt.", "Nov.", "Dez."}, + {"Januaar", "Februaar", "März", "April", "Mai", "Juni", "Juli", "August", "September", "Oktover", "November", "Dezember"}, + {"vm", "nm"}, +} + +var localeTableNe = [5][]string{ + {"आइत", "सोम", "मङ्गल", "बुध", "बिहि", "शुक्र", "शनि"}, + {"आइतबार", "सोमबार", "मङ्गलबार", "बुधबार", "बिहिबार", "शुक्रबार", "शनिबार"}, + {}, + {"जनवरी", "फेब्रुअरी", "मार्च", "अप्रिल", "मे", "जुन", "जुलाई", "अगस्ट", "सेप्टेम्बर", "अक्टोबर", "नोभेम्बर", "डिसेम्बर"}, + {"पूर्वाह्न", "अपराह्न"}, +} + +var localeTableNeIN = [5][]string{ + {"आइत", "सोम", "मङ्गल", "बुध", "बिहि", "शुक्र", "शनि"}, + {"आइतबार", "सोमबार", "मङ्गलबार", "बुधबार", "बिहिबार", "शुक्रबार", "शनिबार"}, + {}, + {"जनवरी", "फेब्रुअरी", "मार्च", "अप्रिल", "मे", "जुन", "जुलाई", "अगस्ट", "सेप्टेम्बर", "अक्टोबर", "नोभेम्बर", "डिसेम्बर"}, + {"पूर्वाह्न", "अपराह्न"}, +} + +var localeTableNeNP = [5][]string{ + {"आइत", "सोम", "मङ्गल", "बुध", "बिहि", "शुक्र", "शनि"}, + {"आइतबार", "सोमबार", "मङ्गलबार", "बुधबार", "बिहिबार", "शुक्रबार", "शनिबार"}, + {}, + {"जनवरी", "फेब्रुअरी", "मार्च", "अप्रिल", "मे", "जुन", "जुलाई", "अगस्ट", "सेप्टेम्बर", "अक्टोबर", "नोभेम्बर", "डिसेम्बर"}, + {"पूर्वाह्न", "अपराह्न"}, +} + +var localeTableNl = [5][]string{ + {"zo", "ma", "di", "wo", "do", "vr", "za"}, + {"zondag", "maandag", "dinsdag", "woensdag", "donderdag", "vrijdag", "zaterdag"}, + {"jan", "feb", "mrt", "apr", "mei", "jun", "jul", "aug", "sep", "okt", "nov", "dec"}, + {"januari", "februari", "maart", "april", "mei", "juni", "juli", "augustus", "september", "oktober", "november", "december"}, + {"a.m.", "p.m."}, +} + +var localeTableNlAW = [5][]string{ + {"zo", "ma", "di", "wo", "do", "vr", "za"}, + {"zondag", "maandag", "dinsdag", "woensdag", "donderdag", "vrijdag", "zaterdag"}, + {"jan", "feb", "mrt", "apr", "mei", "jun", "jul", "aug", "sep", "okt", "nov", "dec"}, + {"januari", "februari", "maart", "april", "mei", "juni", "juli", "augustus", "september", "oktober", "november", "december"}, + {"a.m.", "p.m."}, +} + +var localeTableNlBE = [5][]string{ + {"zo", "ma", "di", "wo", "do", "vr", "za"}, + {"zondag", "maandag", "dinsdag", "woensdag", "donderdag", "vrijdag", "zaterdag"}, + {"jan", "feb", "mrt", "apr", "mei", "jun", "jul", "aug", "sep", "okt", "nov", "dec"}, + {"januari", "februari", "maart", "april", "mei", "juni", "juli", "augustus", "september", "oktober", "november", "december"}, + {"a.m.", "p.m."}, +} + +var localeTableNlBQ = [5][]string{ + {"zo", "ma", "di", "wo", "do", "vr", "za"}, + {"zondag", "maandag", "dinsdag", "woensdag", "donderdag", "vrijdag", "zaterdag"}, + {"jan", "feb", "mrt", "apr", "mei", "jun", "jul", "aug", "sep", "okt", "nov", "dec"}, + {"januari", "februari", "maart", "april", "mei", "juni", "juli", "augustus", "september", "oktober", "november", "december"}, + {"a.m.", "p.m."}, +} + +var localeTableNlCW = [5][]string{ + {"zo", "ma", "di", "wo", "do", "vr", "za"}, + {"zondag", "maandag", "dinsdag", "woensdag", "donderdag", "vrijdag", "zaterdag"}, + {"jan", "feb", "mrt", "apr", "mei", "jun", "jul", "aug", "sep", "okt", "nov", "dec"}, + {"januari", "februari", "maart", "april", "mei", "juni", "juli", "augustus", "september", "oktober", "november", "december"}, + {"a.m.", "p.m."}, +} + +var localeTableNlNL = [5][]string{ + {"zo", "ma", "di", "wo", "do", "vr", "za"}, + {"zondag", "maandag", "dinsdag", "woensdag", "donderdag", "vrijdag", "zaterdag"}, + {"jan", "feb", "mrt", "apr", "mei", "jun", "jul", "aug", "sep", "okt", "nov", "dec"}, + {"januari", "februari", "maart", "april", "mei", "juni", "juli", "augustus", "september", "oktober", "november", "december"}, + {"a.m.", "p.m."}, +} + +var localeTableNlSR = [5][]string{ + {"zo", "ma", "di", "wo", "do", "vr", "za"}, + {"zondag", "maandag", "dinsdag", "woensdag", "donderdag", "vrijdag", "zaterdag"}, + {"jan", "feb", "mrt", "apr", "mei", "jun", "jul", "aug", "sep", "okt", "nov", "dec"}, + {"januari", "februari", "maart", "april", "mei", "juni", "juli", "augustus", "september", "oktober", "november", "december"}, + {"a.m.", "p.m."}, +} + +var localeTableNlSX = [5][]string{ + {"zo", "ma", "di", "wo", "do", "vr", "za"}, + {"zondag", "maandag", "dinsdag", "woensdag", "donderdag", "vrijdag", "zaterdag"}, + {"jan", "feb", "mrt", "apr", "mei", "jun", "jul", "aug", "sep", "okt", "nov", "dec"}, + {"januari", "februari", "maart", "april", "mei", "juni", "juli", "augustus", "september", "oktober", "november", "december"}, + {"a.m.", "p.m."}, +} + +var localeTableNmg = [5][]string{ + {"sɔ́n", "mɔ́n", "smb", "sml", "smn", "mbs", "sas"}, + {"sɔ́ndɔ", "mɔ́ndɔ", "sɔ́ndɔ mafú mába", "sɔ́ndɔ mafú málal", "sɔ́ndɔ mafú mána", "mabágá má sukul", "sásadi"}, + {"ng1", "ng2", "ng3", "ng4", "ng5", "ng6", "ng7", "ng8", "ng9", "ng10", "ng11", "kris"}, + {"ngwɛn matáhra", "ngwɛn ńmba", "ngwɛn ńlal", "ngwɛn ńna", "ngwɛn ńtan", "ngwɛn ńtuó", "ngwɛn hɛmbuɛrí", "ngwɛn lɔmbi", "ngwɛn rɛbvuâ", "ngwɛn wum", "ngwɛn wum navǔr", "krísimin"}, + {"maná", "kugú"}, +} + +var localeTableNmgCM = [5][]string{ + {"sɔ́n", "mɔ́n", "smb", "sml", "smn", "mbs", "sas"}, + {"sɔ́ndɔ", "mɔ́ndɔ", "sɔ́ndɔ mafú mába", "sɔ́ndɔ mafú málal", "sɔ́ndɔ mafú mána", "mabágá má sukul", "sásadi"}, + {"ng1", "ng2", "ng3", "ng4", "ng5", "ng6", "ng7", "ng8", "ng9", "ng10", "ng11", "kris"}, + {"ngwɛn matáhra", "ngwɛn ńmba", "ngwɛn ńlal", "ngwɛn ńna", "ngwɛn ńtan", "ngwɛn ńtuó", "ngwɛn hɛmbuɛrí", "ngwɛn lɔmbi", "ngwɛn rɛbvuâ", "ngwɛn wum", "ngwɛn wum navǔr", "krísimin"}, + {"maná", "kugú"}, +} + +var localeTableNn = [5][]string{ + {"sø.", "må.", "ty.", "on.", "to.", "fr.", "la."}, + {"søndag", "måndag", "tysdag", "onsdag", "torsdag", "fredag", "laurdag"}, + {}, + {}, + {"f.m.", "e.m."}, +} + +var localeTableNnNO = [5][]string{ + {"sø.", "må.", "ty.", "on.", "to.", "fr.", "la."}, + {"søndag", "måndag", "tysdag", "onsdag", "torsdag", "fredag", "laurdag"}, + {}, + {}, + {"f.m.", "e.m."}, +} + +var localeTableNnh = [5][]string{ + {}, + {"lyɛʼɛ́ sẅíŋtè", "mvfò lyɛ̌ʼ", "mbɔ́ɔntè mvfò lyɛ̌ʼ", "tsètsɛ̀ɛ lyɛ̌ʼ", "mbɔ́ɔntè tsetsɛ̀ɛ lyɛ̌ʼ", "mvfò màga lyɛ̌ʼ", "màga lyɛ̌ʼ"}, + {}, + {"saŋ tsetsɛ̀ɛ lùm", "saŋ kàg ngwóŋ", "saŋ lepyè shúm", "saŋ cÿó", "saŋ tsɛ̀ɛ cÿó", "saŋ njÿoláʼ", "saŋ tyɛ̀b tyɛ̀b mbʉ̀ŋ", "saŋ mbʉ̀ŋ", "saŋ ngwɔ̀ʼ mbÿɛ", "saŋ tàŋa tsetsáʼ", "saŋ mejwoŋó", "saŋ lùm"}, + {"mbaʼámbaʼ", "ncwònzém"}, +} + +var localeTableNnhCM = [5][]string{ + {}, + {"lyɛʼɛ́ sẅíŋtè", "mvfò lyɛ̌ʼ", "mbɔ́ɔntè mvfò lyɛ̌ʼ", "tsètsɛ̀ɛ lyɛ̌ʼ", "mbɔ́ɔntè tsetsɛ̀ɛ lyɛ̌ʼ", "mvfò màga lyɛ̌ʼ", "màga lyɛ̌ʼ"}, + {}, + {"saŋ tsetsɛ̀ɛ lùm", "saŋ kàg ngwóŋ", "saŋ lepyè shúm", "saŋ cÿó", "saŋ tsɛ̀ɛ cÿó", "saŋ njÿoláʼ", "saŋ tyɛ̀b tyɛ̀b mbʉ̀ŋ", "saŋ mbʉ̀ŋ", "saŋ ngwɔ̀ʼ mbÿɛ", "saŋ tàŋa tsetsáʼ", "saŋ mejwoŋó", "saŋ lùm"}, + {"mbaʼámbaʼ", "ncwònzém"}, +} + +var localeTableNo = [5][]string{ + {"søn.", "man.", "tir.", "ons.", "tor.", "fre.", "lør."}, + {"søndag", "mandag", "tirsdag", "onsdag", "torsdag", "fredag", "lørdag"}, + {"jan.", "feb.", "mars", "apr.", "mai", "juni", "juli", "aug.", "sep.", "okt.", "nov.", "des."}, + {"januar", "februar", "mars", "april", "mai", "juni", "juli", "august", "september", "oktober", "november", "desember"}, + {"a.m.", "p.m."}, +} + +var localeTableNqo = [5][]string{ + {"ߞߊ߯ߙ", "ߞߐ߬ߓ", "ߞߐ߬ߟߏ߲", "ߞߎߣ", "ߓߌߟ", "ߛߌ߬ߣ", "ߞߍ߲ߘ"}, + {"ߞߊ߯ߙߌߟߏ߲", "ߞߐ߬ߓߊ߬ߟߏ߲", "ߞߐ߬ߟߏ߲", "ߞߎߣߎ߲ߟߏ߲", "ߓߌߟߏ߲", "ߛߌ߬ߣߌ߲߬ߟߏ߲", "ߞߍ߲ߘߍߟߏ߲"}, + {"ߓߌ߲ߠ", "ߞߏ߲ߞ", "ߕߙߊ", "ߞߏ߲ߘ", "ߘߓߊ߬ߕ", "ߥߊ߬ߛ", "ߞߊ߬ߙ", "ߘߓߊ߬ߓ", "ߕߎߟߊߝߌ߲", "ߞߏ߲ߓ", "ߣߍߣ", "ߞߏߟ"}, + {"ߓߌ߲ߠߊߥߎߟߋ߲", "ߞߏ߲ߞߏߜߍ", "ߕߙߊߓߊ", "ߞߏ߲ߞߏߘߌ߬ߓߌ", "ߘߓߊ߬ߕߊ", "ߥߊ߬ߛߌ߬ߥߙߊ", "ߞߊ߬ߙߌߝߐ߭", "ߘߓߊ߬ߓߌߟߊ", "ߕߎߟߊߝߌ߲", "ߞߏ߲ߓߌߕߌ߮", "ߣߍߣߍߓߊ", "ߞߏߟߌ߲ߞߏߟߌ߲"}, + {"ߛ", "ߥ"}, +} + +var localeTableNqoGN = [5][]string{ + {"ߞߊ߯ߙ", "ߞߐ߬ߓ", "ߞߐ߬ߟߏ߲", "ߞߎߣ", "ߓߌߟ", "ߛߌ߬ߣ", "ߞߍ߲ߘ"}, + {"ߞߊ߯ߙߌߟߏ߲", "ߞߐ߬ߓߊ߬ߟߏ߲", "ߞߐ߬ߟߏ߲", "ߞߎߣߎ߲ߟߏ߲", "ߓߌߟߏ߲", "ߛߌ߬ߣߌ߲߬ߟߏ߲", "ߞߍ߲ߘߍߟߏ߲"}, + {"ߓߌ߲ߠ", "ߞߏ߲ߞ", "ߕߙߊ", "ߞߏ߲ߘ", "ߘߓߊ߬ߕ", "ߥߊ߬ߛ", "ߞߊ߬ߙ", "ߘߓߊ߬ߓ", "ߕߎߟߊߝߌ߲", "ߞߏ߲ߓ", "ߣߍߣ", "ߞߏߟ"}, + {"ߓߌ߲ߠߊߥߎߟߋ߲", "ߞߏ߲ߞߏߜߍ", "ߕߙߊߓߊ", "ߞߏ߲ߞߏߘߌ߬ߓߌ", "ߘߓߊ߬ߕߊ", "ߥߊ߬ߛߌ߬ߥߙߊ", "ߞߊ߬ߙߌߝߐ߭", "ߘߓߊ߬ߓߌߟߊ", "ߕߎߟߊߝߌ߲", "ߞߏ߲ߓߌߕߌ߮", "ߣߍߣߍߓߊ", "ߞߏߟߌ߲ߞߏߟߌ߲"}, + {"ߛ", "ߥ"}, +} + +var localeTableNr = [5][]string{ + {"Son", "Mvu", "Bil", "Tha", "Ne", "Hla", "Gqi"}, + {"uSonto", "uMvulo", "uLesibili", "Lesithathu", "uLesine", "ngoLesihlanu", "umGqibelo"}, + {"Jan", "Feb", "Mat", "Apr", "Mey", "Jun", "Jul", "Arh", "Sep", "Okt", "Usi", "Dis"}, + {"Janabari", "uFeberbari", "uMatjhi", "u-Apreli", "Meyi", "Juni", "Julayi", "Arhostosi", "Septemba", "Oktoba", "Usinyikhaba", "Disemba"}, + {}, +} + +var localeTableNrZA = [5][]string{ + {"Son", "Mvu", "Bil", "Tha", "Ne", "Hla", "Gqi"}, + {"uSonto", "uMvulo", "uLesibili", "Lesithathu", "uLesine", "ngoLesihlanu", "umGqibelo"}, + {"Jan", "Feb", "Mat", "Apr", "Mey", "Jun", "Jul", "Arh", "Sep", "Okt", "Usi", "Dis"}, + {"Janabari", "uFeberbari", "uMatjhi", "u-Apreli", "Meyi", "Juni", "Julayi", "Arhostosi", "Septemba", "Oktoba", "Usinyikhaba", "Disemba"}, + {}, +} + +var localeTableNso = [5][]string{ + {"Lam", "Mos", "Bed", "Rar", "Ne", "Hla", "Mok"}, + {"Lamorena", "Musopologo", "Labobedi", "Laboraro", "Labone", "Labohlano", "Mokibelo"}, + {"Phere", "Dibo", "Hlak", "Mora", "Mopi", "Phupu", "Mose", "Phato", "Lewe", "Dipha", "Diba", "Manth"}, + {"Pherekgong", "Dibokwane", "Hlakola", "Moranang", "Mopitlo", "Phupu", "Mosegemanye", "Phato", "Lewedi", "Diphalane", "Dibatsela", "Manthole"}, + {"a", "p"}, +} + +var localeTableNsoZA = [5][]string{ + {"Lam", "Mos", "Bed", "Rar", "Ne", "Hla", "Mok"}, + {"Lamorena", "Musopologo", "Labobedi", "Laboraro", "Labone", "Labohlano", "Mokibelo"}, + {"Phere", "Dibo", "Hlak", "Mora", "Mopi", "Phupu", "Mose", "Phato", "Lewe", "Dipha", "Diba", "Manth"}, + {"Pherekgong", "Dibokwane", "Hlakola", "Moranang", "Mopitlo", "Phupu", "Mosegemanye", "Phato", "Lewedi", "Diphalane", "Dibatsela", "Manthole"}, + {"a", "p"}, +} + +var localeTableNus = [5][]string{ + {"Cäŋ", "Jiec", "Rɛw", "Diɔ̱k", "Ŋuaan", "Dhieec", "Bäkɛl"}, + {"Cäŋ kuɔth", "Jiec la̱t", "Rɛw lätni", "Diɔ̱k lätni", "Ŋuaan lätni", "Dhieec lätni", "Bäkɛl lätni"}, + {"Tiop", "Pɛt", "Duɔ̱ɔ̱", "Guak", "Duä", "Kor", "Pay", "Thoo", "Tɛɛ", "Laa", "Kur", "Tid"}, + {"Tiop thar pɛt", "Pɛt", "Duɔ̱ɔ̱ŋ", "Guak", "Duät", "Kornyoot", "Pay yie̱tni", "Tho̱o̱r", "Tɛɛr", "Laath", "Kur", "Tio̱p in di̱i̱t"}, + {"RW", "TŊ"}, +} + +var localeTableNusSS = [5][]string{ + {"Cäŋ", "Jiec", "Rɛw", "Diɔ̱k", "Ŋuaan", "Dhieec", "Bäkɛl"}, + {"Cäŋ kuɔth", "Jiec la̱t", "Rɛw lätni", "Diɔ̱k lätni", "Ŋuaan lätni", "Dhieec lätni", "Bäkɛl lätni"}, + {"Tiop", "Pɛt", "Duɔ̱ɔ̱", "Guak", "Duä", "Kor", "Pay", "Thoo", "Tɛɛ", "Laa", "Kur", "Tid"}, + {"Tiop thar pɛt", "Pɛt", "Duɔ̱ɔ̱ŋ", "Guak", "Duät", "Kornyoot", "Pay yie̱tni", "Tho̱o̱r", "Tɛɛr", "Laath", "Kur", "Tio̱p in di̱i̱t"}, + {"RW", "TŊ"}, +} + +var localeTableNy = [5][]string{ + {"Mul", "Lem", "Wir", "Tat", "Nai", "San", "Wer"}, + {"Lamulungu", "Lolemba", "Lachiwiri", "Lachitatu", "Lachinayi", "Lachisanu", "Loweruka"}, + {"Jan", "Feb", "Mal", "Epu", "Mei", "Jun", "Jul", "Oga", "Sep", "Oku", "Nov", "Dis"}, + {"Januwale", "Febuluwale", "Malichi", "Epulo", "Mei", "Juni", "Julai", "Ogasiti", "Seputemba", "Okutoba", "Novemba", "Disemba"}, + {}, +} + +var localeTableNyMW = [5][]string{ + {"Mul", "Lem", "Wir", "Tat", "Nai", "San", "Wer"}, + {"Lamulungu", "Lolemba", "Lachiwiri", "Lachitatu", "Lachinayi", "Lachisanu", "Loweruka"}, + {"Jan", "Feb", "Mal", "Epu", "Mei", "Jun", "Jul", "Oga", "Sep", "Oku", "Nov", "Dis"}, + {"Januwale", "Febuluwale", "Malichi", "Epulo", "Mei", "Juni", "Julai", "Ogasiti", "Seputemba", "Okutoba", "Novemba", "Disemba"}, + {}, +} + +var localeTableNyn = [5][]string{ + {"SAN", "ORK", "OKB", "OKS", "OKN", "OKT", "OMK"}, + {"Sande", "Orwokubanza", "Orwakabiri", "Orwakashatu", "Orwakana", "Orwakataano", "Orwamukaaga"}, + {"KBZ", "KBR", "KST", "KKN", "KTN", "KMK", "KMS", "KMN", "KMW", "KKM", "KNK", "KNB"}, + {"Okwokubanza", "Okwakabiri", "Okwakashatu", "Okwakana", "Okwakataana", "Okwamukaaga", "Okwamushanju", "Okwamunaana", "Okwamwenda", "Okwaikumi", "Okwaikumi na kumwe", "Okwaikumi na ibiri"}, + {}, +} + +var localeTableNynUG = [5][]string{ + {"SAN", "ORK", "OKB", "OKS", "OKN", "OKT", "OMK"}, + {"Sande", "Orwokubanza", "Orwakabiri", "Orwakashatu", "Orwakana", "Orwakataano", "Orwamukaaga"}, + {"KBZ", "KBR", "KST", "KKN", "KTN", "KMK", "KMS", "KMN", "KMW", "KKM", "KNK", "KNB"}, + {"Okwokubanza", "Okwakabiri", "Okwakashatu", "Okwakana", "Okwakataana", "Okwamukaaga", "Okwamushanju", "Okwamunaana", "Okwamwenda", "Okwaikumi", "Okwaikumi na kumwe", "Okwaikumi na ibiri"}, + {}, +} + +var localeTableOc = [5][]string{ + {}, + {"dimenge", "diluns", "dimars", "dimècres", "dijòus", "divendres", "dissabte"}, + {"gen.", "feb.", "març", "abr.", "mai", "junh", "jul.", "ago.", "set.", "oct.", "nov.", "dec."}, + {"de genièr", "de febrièr", "de març", "d’abril", "de mai", "de junh", "de julhet", "d’agost", "de setembre", "d’octòbre", "de novembre", "de decembre"}, + {}, +} + +var localeTableOcES = [5][]string{ + {"dim", "del", "dma", "dmè", "dij", "diu", "dis"}, + {"dimenge", "deluns", "dimars", "dimèrcles", "dijaus", "diuendres", "dissabte"}, + {"gèr", "her", "mar", "abr", "mai", "jun", "jur", "ago", "set", "oct", "nov", "dec"}, + {"gèr", "hereuèr", "març", "abriu", "mai", "junh", "juriòl", "agost", "seteme", "octobre", "noveme", "deseme"}, + {"a.m.", "p.m."}, +} + +var localeTableOcFR = [5][]string{ + {}, + {"dimenge", "diluns", "dimars", "dimècres", "dijòus", "divendres", "dissabte"}, + {"gen.", "feb.", "març", "abr.", "mai", "junh", "jul.", "ago.", "set.", "oct.", "nov.", "dec."}, + {"de genièr", "de febrièr", "de març", "d’abril", "de mai", "de junh", "de julhet", "d’agost", "de setembre", "d’octòbre", "de novembre", "de decembre"}, + {}, +} + +var localeTableOm = [5][]string{ + {"Dil", "Wix", "Qib", "Rob", "Kam", "Jim", "San"}, + {"Dilbata", "Wiixata", "Qibxata", "Roobii", "Kamiisa", "Jimaata", "Sanbata"}, + {"Ama", "Gur", "Bit", "Elb", "Cam", "Wax", "Ado", "Hag", "Ful", "Onk", "Sad", "Mud"}, + {"Amajjii", "Guraandhala", "Bitooteessa", "Elba", "Caamsa", "Waxabajjii", "Adooleessa", "Hagayya", "Fuulbana", "Onkololeessa", "Sadaasa", "Muddee"}, + {"WD", "WB"}, +} + +var localeTableOmET = [5][]string{ + {"Dil", "Wix", "Qib", "Rob", "Kam", "Jim", "San"}, + {"Dilbata", "Wiixata", "Qibxata", "Roobii", "Kamiisa", "Jimaata", "Sanbata"}, + {"Ama", "Gur", "Bit", "Elb", "Cam", "Wax", "Ado", "Hag", "Ful", "Onk", "Sad", "Mud"}, + {"Amajjii", "Guraandhala", "Bitooteessa", "Elba", "Caamsa", "Waxabajjii", "Adooleessa", "Hagayya", "Fuulbana", "Onkololeessa", "Sadaasa", "Muddee"}, + {"WD", "WB"}, +} + +var localeTableOmKE = [5][]string{ + {"Dil", "Wix", "Qib", "Rob", "Kam", "Jim", "San"}, + {"Dilbata", "Wiixata", "Qibxata", "Roobii", "Kamiisa", "Jimaata", "Sanbata"}, + {"Ama", "Gur", "Bit", "Elb", "Cam", "Wax", "Ado", "Hag", "Ful", "Onk", "Sad", "Mud"}, + {"Amajjii", "Guraandhala", "Bitooteessa", "Elba", "Caamsa", "Waxabajjii", "Adooleessa", "Hagayya", "Fuulbana", "Onkololeessa", "Sadaasa", "Muddee"}, + {"WD", "WB"}, +} + +var localeTableOr = [5][]string{ + {"ରବି", "ସୋମ", "ମଙ୍ଗଳ", "ବୁଧ", "ଗୁରୁ", "ଶୁକ୍ର", "ଶନି"}, + {"ରବିବାର", "ସୋମବାର", "ମଙ୍ଗଳବାର", "ବୁଧବାର", "ଗୁରୁବାର", "ଶୁକ୍ରବାର", "ଶନିବାର"}, + {}, + {"ଜାନୁଆରୀ", "ଫେବୃଆରୀ", "ମାର୍ଚ୍ଚ", "ଅପ୍ରେଲ", "ମଇ", "ଜୁନ", "ଜୁଲାଇ", "ଅଗଷ୍ଟ", "ସେପ୍ଟେମ୍ବର", "ଅକ୍ଟୋବର", "ନଭେମ୍ବର", "ଡିସେମ୍ବର"}, + {"ପୂ", "ଅ"}, +} + +var localeTableOrIN = [5][]string{ + {"ରବି", "ସୋମ", "ମଙ୍ଗଳ", "ବୁଧ", "ଗୁରୁ", "ଶୁକ୍ର", "ଶନି"}, + {"ରବିବାର", "ସୋମବାର", "ମଙ୍ଗଳବାର", "ବୁଧବାର", "ଗୁରୁବାର", "ଶୁକ୍ରବାର", "ଶନିବାର"}, + {}, + {"ଜାନୁଆରୀ", "ଫେବୃଆରୀ", "ମାର୍ଚ୍ଚ", "ଅପ୍ରେଲ", "ମଇ", "ଜୁନ", "ଜୁଲାଇ", "ଅଗଷ୍ଟ", "ସେପ୍ଟେମ୍ବର", "ଅକ୍ଟୋବର", "ନଭେମ୍ବର", "ଡିସେମ୍ବର"}, + {"ପୂ", "ଅ"}, +} + +var localeTableOs = [5][]string{ + {"хцб", "крс", "дцг", "ӕрт", "цпр", "мрб", "сбт"}, + {"хуыцаубон", "къуырисӕр", "дыццӕг", "ӕртыццӕг", "цыппӕрӕм", "майрӕмбон", "сабат"}, + {"янв.", "фев.", "мар.", "апр.", "майы", "июны", "июлы", "авг.", "сен.", "окт.", "ноя.", "дек."}, + {"январы", "февралы", "мартъийы", "апрелы", "майы", "июны", "июлы", "августы", "сентябры", "октябры", "ноябры", "декабры"}, + {}, +} + +var localeTableOsGE = [5][]string{ + {"хцб", "крс", "дцг", "ӕрт", "цпр", "мрб", "сбт"}, + {"хуыцаубон", "къуырисӕр", "дыццӕг", "ӕртыццӕг", "цыппӕрӕм", "майрӕмбон", "сабат"}, + {"янв.", "фев.", "мар.", "апр.", "майы", "июны", "июлы", "авг.", "сен.", "окт.", "ноя.", "дек."}, + {"январы", "февралы", "мартъийы", "апрелы", "майы", "июны", "июлы", "августы", "сентябры", "октябры", "ноябры", "декабры"}, + {}, +} + +var localeTableOsRU = [5][]string{ + {"хцб", "крс", "дцг", "ӕрт", "цпр", "мрб", "сбт"}, + {"хуыцаубон", "къуырисӕр", "дыццӕг", "ӕртыццӕг", "цыппӕрӕм", "майрӕмбон", "сабат"}, + {"янв.", "фев.", "мар.", "апр.", "майы", "июны", "июлы", "авг.", "сен.", "окт.", "ноя.", "дек."}, + {"январы", "февралы", "мартъийы", "апрелы", "майы", "июны", "июлы", "августы", "сентябры", "октябры", "ноябры", "декабры"}, + {}, +} + +var localeTableOsa = [5][]string{ + {}, + {"𐒹𐓘͘𐓬𐓘 𐓏𐓘𐓤𐓘͘𐓰𐓘𐓤𐓣", "𐒹𐓘͘𐓬𐓘 𐓄𐓘𐓡𐓛͘𐓧𐓣", "𐒹𐓘͘𐓬𐓘 𐓏𐓟𐓵𐓪͘𐓬𐓘", "𐒹𐓘͘𐓬𐓘 𐓏𐓟𐓵𐓘𐓜𐓣", "𐒹𐓘͘𐓬𐓘 𐓏𐓟𐓰𐓪𐓬𐓘", "𐒹𐓘͘𐓬𐓘 𐓈𐓘 𐓵𐓘𐓲𐓘 𐓻𐓣͘", "𐒹𐓘͘𐓬𐓘 𐓂𐓤𐓘𐓸𐓟 𐓣͘𐓤𐓟"}, + {"𐓄𐓘𐓡𐓛͘𐓧𐓟", "𐓵𐓪͘𐓬𐓘", "𐓵𐓘𐓜𐓣", "𐓰𐓪𐓬𐓘", "𐓮𐓘𐓰𐓘", "𐓯𐓘𐓬𐓟", "𐓄𐓟𐓵𐓪͘𐓬𐓘", "𐒼𐓣𐓟𐓰𐓪𐓬𐓘", "𐒿𐓟𐓜𐓛𐓲𐓟𐓷𐓣͘𐓤𐓟", "𐒿𐓟𐓜𐓛", "𐒰𐓧𐓣 𐓏𐓣͘𐓸𐓲𐓣", "𐒰𐓧𐓣 𐓍𐓪͘𐓬𐓘"}, + {"𐓀𐓣͘𐓪͘𐓬𐓘 𐓄𐓘𐓡𐓛͘𐓧𐓟", "𐓀𐓣͘𐓪͘𐓬𐓘 𐓏𐓟𐓵𐓪͘𐓬𐓘", "𐓀𐓣͘𐓪͘𐓬𐓘 𐓏𐓟𐓵𐓘𐓜𐓣", "𐓀𐓣͘𐓪͘𐓬𐓘 𐓏𐓟𐓰𐓪𐓬𐓘", "𐓀𐓣͘𐓪͘𐓬𐓘 𐓏𐓟𐓮𐓘𐓰𐓘", "𐓀𐓣͘𐓪͘𐓬𐓘 𐓏𐓟𐓯𐓘𐓬𐓟", "𐓀𐓣͘𐓪͘𐓬𐓘 𐓄𐓟𐓵𐓪͘𐓬𐓘", "𐓀𐓣͘𐓪͘𐓬𐓘 𐒼𐓣𐓟𐓰𐓪𐓬𐓘", "𐓀𐓣͘𐓪͘𐓬𐓘 𐒿𐓟𐓜𐓛𐓲𐓟𐓷𐓣͘𐓤𐓟", "𐓀𐓣͘𐓪͘𐓬𐓘 𐒿𐓟𐓜𐓛", "𐓀𐓣͘𐓪͘𐓬𐓘 𐒰𐓧𐓣 𐓏𐓣͘𐓸𐓲𐓣", "𐓀𐓣͘𐓪͘𐓬𐓘 𐒰𐓧𐓣 𐓍𐓪͘𐓬𐓘"}, + {}, +} + +var localeTableOsaUS = [5][]string{ + {}, + {"𐒹𐓘͘𐓬𐓘 𐓏𐓘𐓤𐓘͘𐓰𐓘𐓤𐓣", "𐒹𐓘͘𐓬𐓘 𐓄𐓘𐓡𐓛͘𐓧𐓣", "𐒹𐓘͘𐓬𐓘 𐓏𐓟𐓵𐓪͘𐓬𐓘", "𐒹𐓘͘𐓬𐓘 𐓏𐓟𐓵𐓘𐓜𐓣", "𐒹𐓘͘𐓬𐓘 𐓏𐓟𐓰𐓪𐓬𐓘", "𐒹𐓘͘𐓬𐓘 𐓈𐓘 𐓵𐓘𐓲𐓘 𐓻𐓣͘", "𐒹𐓘͘𐓬𐓘 𐓂𐓤𐓘𐓸𐓟 𐓣͘𐓤𐓟"}, + {"𐓄𐓘𐓡𐓛͘𐓧𐓟", "𐓵𐓪͘𐓬𐓘", "𐓵𐓘𐓜𐓣", "𐓰𐓪𐓬𐓘", "𐓮𐓘𐓰𐓘", "𐓯𐓘𐓬𐓟", "𐓄𐓟𐓵𐓪͘𐓬𐓘", "𐒼𐓣𐓟𐓰𐓪𐓬𐓘", "𐒿𐓟𐓜𐓛𐓲𐓟𐓷𐓣͘𐓤𐓟", "𐒿𐓟𐓜𐓛", "𐒰𐓧𐓣 𐓏𐓣͘𐓸𐓲𐓣", "𐒰𐓧𐓣 𐓍𐓪͘𐓬𐓘"}, + {"𐓀𐓣͘𐓪͘𐓬𐓘 𐓄𐓘𐓡𐓛͘𐓧𐓟", "𐓀𐓣͘𐓪͘𐓬𐓘 𐓏𐓟𐓵𐓪͘𐓬𐓘", "𐓀𐓣͘𐓪͘𐓬𐓘 𐓏𐓟𐓵𐓘𐓜𐓣", "𐓀𐓣͘𐓪͘𐓬𐓘 𐓏𐓟𐓰𐓪𐓬𐓘", "𐓀𐓣͘𐓪͘𐓬𐓘 𐓏𐓟𐓮𐓘𐓰𐓘", "𐓀𐓣͘𐓪͘𐓬𐓘 𐓏𐓟𐓯𐓘𐓬𐓟", "𐓀𐓣͘𐓪͘𐓬𐓘 𐓄𐓟𐓵𐓪͘𐓬𐓘", "𐓀𐓣͘𐓪͘𐓬𐓘 𐒼𐓣𐓟𐓰𐓪𐓬𐓘", "𐓀𐓣͘𐓪͘𐓬𐓘 𐒿𐓟𐓜𐓛𐓲𐓟𐓷𐓣͘𐓤𐓟", "𐓀𐓣͘𐓪͘𐓬𐓘 𐒿𐓟𐓜𐓛", "𐓀𐓣͘𐓪͘𐓬𐓘 𐒰𐓧𐓣 𐓏𐓣͘𐓸𐓲𐓣", "𐓀𐓣͘𐓪͘𐓬𐓘 𐒰𐓧𐓣 𐓍𐓪͘𐓬𐓘"}, + {}, +} + +var localeTablePa = [5][]string{ + {"ਐਤ", "ਸੋਮ", "ਮੰਗਲ", "ਬੁੱਧ", "ਵੀਰ", "ਸ਼ੁੱਕਰ", "ਸ਼ਨਿੱਚਰ"}, + {"ਐਤਵਾਰ", "ਸੋਮਵਾਰ", "ਮੰਗਲਵਾਰ", "ਬੁੱਧਵਾਰ", "ਵੀਰਵਾਰ", "ਸ਼ੁੱਕਰਵਾਰ", "ਸ਼ਨਿੱਚਰਵਾਰ"}, + {"ਜਨ", "ਫ਼ਰ", "ਮਾਰਚ", "ਅਪ੍ਰੈ", "ਮਈ", "ਜੂਨ", "ਜੁਲਾ", "ਅਗ", "ਸਤੰ", "ਅਕਤੂ", "ਨਵੰ", "ਦਸੰ"}, + {"ਜਨਵਰੀ", "ਫ਼ਰਵਰੀ", "ਮਾਰਚ", "ਅਪ੍ਰੈਲ", "ਮਈ", "ਜੂਨ", "ਜੁਲਾਈ", "ਅਗਸਤ", "ਸਤੰਬਰ", "ਅਕਤੂਬਰ", "ਨਵੰਬਰ", "ਦਸੰਬਰ"}, + {"ਪੂ.ਦੁ.", "ਬਾ.ਦੁ."}, +} + +var localeTablePaArab = [5][]string{ + {}, + {"اتوار", "پیر", "منگل", "بُدھ", "جمعرات", "جمعہ", "ہفتہ"}, + {}, + {"جنوری", "فروری", "مارچ", "اپریل", "مئ", "جون", "جولائی", "اگست", "ستمبر", "اکتوبر", "نومبر", "دسمبر"}, + {}, +} + +var localeTablePaArabPK = [5][]string{ + {}, + {"اتوار", "پیر", "منگل", "بُدھ", "جمعرات", "جمعہ", "ہفتہ"}, + {}, + {"جنوری", "فروری", "مارچ", "اپریل", "مئ", "جون", "جولائی", "اگست", "ستمبر", "اکتوبر", "نومبر", "دسمبر"}, + {}, +} + +var localeTablePaGuru = [5][]string{ + {"ਐਤ", "ਸੋਮ", "ਮੰਗਲ", "ਬੁੱਧ", "ਵੀਰ", "ਸ਼ੁੱਕਰ", "ਸ਼ਨਿੱਚਰ"}, + {"ਐਤਵਾਰ", "ਸੋਮਵਾਰ", "ਮੰਗਲਵਾਰ", "ਬੁੱਧਵਾਰ", "ਵੀਰਵਾਰ", "ਸ਼ੁੱਕਰਵਾਰ", "ਸ਼ਨਿੱਚਰਵਾਰ"}, + {"ਜਨ", "ਫ਼ਰ", "ਮਾਰਚ", "ਅਪ੍ਰੈ", "ਮਈ", "ਜੂਨ", "ਜੁਲਾ", "ਅਗ", "ਸਤੰ", "ਅਕਤੂ", "ਨਵੰ", "ਦਸੰ"}, + {"ਜਨਵਰੀ", "ਫ਼ਰਵਰੀ", "ਮਾਰਚ", "ਅਪ੍ਰੈਲ", "ਮਈ", "ਜੂਨ", "ਜੁਲਾਈ", "ਅਗਸਤ", "ਸਤੰਬਰ", "ਅਕਤੂਬਰ", "ਨਵੰਬਰ", "ਦਸੰਬਰ"}, + {"ਪੂ.ਦੁ.", "ਬਾ.ਦੁ."}, +} + +var localeTablePaGuruIN = [5][]string{ + {"ਐਤ", "ਸੋਮ", "ਮੰਗਲ", "ਬੁੱਧ", "ਵੀਰ", "ਸ਼ੁੱਕਰ", "ਸ਼ਨਿੱਚਰ"}, + {"ਐਤਵਾਰ", "ਸੋਮਵਾਰ", "ਮੰਗਲਵਾਰ", "ਬੁੱਧਵਾਰ", "ਵੀਰਵਾਰ", "ਸ਼ੁੱਕਰਵਾਰ", "ਸ਼ਨਿੱਚਰਵਾਰ"}, + {"ਜਨ", "ਫ਼ਰ", "ਮਾਰਚ", "ਅਪ੍ਰੈ", "ਮਈ", "ਜੂਨ", "ਜੁਲਾ", "ਅਗ", "ਸਤੰ", "ਅਕਤੂ", "ਨਵੰ", "ਦਸੰ"}, + {"ਜਨਵਰੀ", "ਫ਼ਰਵਰੀ", "ਮਾਰਚ", "ਅਪ੍ਰੈਲ", "ਮਈ", "ਜੂਨ", "ਜੁਲਾਈ", "ਅਗਸਤ", "ਸਤੰਬਰ", "ਅਕਤੂਬਰ", "ਨਵੰਬਰ", "ਦਸੰਬਰ"}, + {"ਪੂ.ਦੁ.", "ਬਾ.ਦੁ."}, +} + +var localeTablePap = [5][]string{ + {}, + {"djadumingu", "djaluna", "djamars", "djarason", "djaweps", "djabièrnè", "djasabra"}, + {"Yan", "Feb", "Mar", "Apr", "Mei", "Yün", "Yül", "Oug", "Sèp", "Òkt", "Nov", "Des"}, + {"Yanüari", "Febrüari", "Mart", "Aprel", "Mei", "Yüni", "Yüli", "Ougùstùs", "Sèptèmber", "Òktober", "Novèmber", "Desèmber"}, + {}, +} + +var localeTablePapAW = [5][]string{ + {}, + {"djadumingu", "djaluna", "djamars", "djarason", "djaweps", "djabièrnè", "djasabra"}, + {"Yan", "Feb", "Mar", "Apr", "Mei", "Yün", "Yül", "Oug", "Sèp", "Òkt", "Nov", "Des"}, + {"Yanüari", "Febrüari", "Mart", "Aprel", "Mei", "Yüni", "Yüli", "Ougùstùs", "Sèptèmber", "Òktober", "Novèmber", "Desèmber"}, + {}, +} + +var localeTablePapCW = [5][]string{ + {}, + {"djadumingu", "djaluna", "djamars", "djarason", "djaweps", "djabièrnè", "djasabra"}, + {"Yan", "Feb", "Mar", "Apr", "Mei", "Yün", "Yül", "Oug", "Sèp", "Òkt", "Nov", "Des"}, + {"Yanüari", "Febrüari", "Mart", "Aprel", "Mei", "Yüni", "Yüli", "Ougùstùs", "Sèptèmber", "Òktober", "Novèmber", "Desèmber"}, + {}, +} + +var localeTablePcm = [5][]string{ + {"Sọ́n", "Mọ́n", "Tiú", "Wẹ́n", "Tọ́z", "Fraí", "Sát"}, + {"Sọ́ndè", "Mọ́ndè", "Tiúzdè", "Wẹ́nẹ́zdè", "Tọ́zdè", "Fraídè", "Sátọdè"}, + {"Jén", "Fẹ́b", "Mach", "Épr", "Mee", "Jun", "Jul", "Ọgọ", "Sẹp", "Ọkt", "Nọv", "Dis"}, + {"Jénúári", "Fẹ́búári", "Mach", "Éprel", "Mee", "Jun", "Julai", "Ọgọst", "Sẹptẹ́mba", "Ọktóba", "Nọvẹ́mba", "Disẹ́mba"}, + {"FM", "FI"}, +} + +var localeTablePcmNG = [5][]string{ + {"Sọ́n", "Mọ́n", "Tiú", "Wẹ́n", "Tọ́z", "Fraí", "Sát"}, + {"Sọ́ndè", "Mọ́ndè", "Tiúzdè", "Wẹ́nẹ́zdè", "Tọ́zdè", "Fraídè", "Sátọdè"}, + {"Jén", "Fẹ́b", "Mach", "Épr", "Mee", "Jun", "Jul", "Ọgọ", "Sẹp", "Ọkt", "Nọv", "Dis"}, + {"Jénúári", "Fẹ́búári", "Mach", "Éprel", "Mee", "Jun", "Julai", "Ọgọst", "Sẹptẹ́mba", "Ọktóba", "Nọvẹ́mba", "Disẹ́mba"}, + {"FM", "FI"}, +} + +var localeTablePis = [5][]string{ + {}, + {"Sande", "Mande", "Tiusde", "Wenesde", "Tosde", "Fraede", "Satade"}, + {}, + {"Januare", "Febuare", "Mas", "Eprel", "Mei", "Jun", "Julae", "Ogus", "Septemba", "Oktoba", "Novemba", "Disemba"}, + {}, +} + +var localeTablePisSB = [5][]string{ + {}, + {"Sande", "Mande", "Tiusde", "Wenesde", "Tosde", "Fraede", "Satade"}, + {}, + {"Januare", "Febuare", "Mas", "Eprel", "Mei", "Jun", "Julae", "Ogus", "Septemba", "Oktoba", "Novemba", "Disemba"}, + {}, +} + +var localeTablePl = [5][]string{ + {"niedz.", "pon.", "wt.", "śr.", "czw.", "pt.", "sob."}, + {"niedziela", "poniedziałek", "wtorek", "środa", "czwartek", "piątek", "sobota"}, + {"sty", "lut", "mar", "kwi", "maj", "cze", "lip", "sie", "wrz", "paź", "lis", "gru"}, + {"stycznia", "lutego", "marca", "kwietnia", "maja", "czerwca", "lipca", "sierpnia", "września", "października", "listopada", "grudnia"}, + {"a", "p"}, +} + +var localeTablePlPL = [5][]string{ + {"niedz.", "pon.", "wt.", "śr.", "czw.", "pt.", "sob."}, + {"niedziela", "poniedziałek", "wtorek", "środa", "czwartek", "piątek", "sobota"}, + {"sty", "lut", "mar", "kwi", "maj", "cze", "lip", "sie", "wrz", "paź", "lis", "gru"}, + {"stycznia", "lutego", "marca", "kwietnia", "maja", "czerwca", "lipca", "sierpnia", "września", "października", "listopada", "grudnia"}, + {"a", "p"}, +} + +var localeTablePrg = [5][]string{ + {"nad", "pan", "wis", "pus", "ket", "pēn", "sab"}, + {"nadīli", "panadīli", "wisasīdis", "pussisawaiti", "ketwirtiks", "pēntniks", "sabattika"}, + {"rag", "was", "pūl", "sak", "zal", "sīm", "līp", "dag", "sil", "spa", "lap", "sal"}, + {"rags", "wassarins", "pūlis", "sakkis", "zallaws", "sīmenis", "līpa", "daggis", "sillins", "spallins", "lapkrūtis", "sallaws"}, + {}, +} + +var localeTablePrgPL = [5][]string{ + {"nad", "pan", "wis", "pus", "ket", "pēn", "sab"}, + {"nadīli", "panadīli", "wisasīdis", "pussisawaiti", "ketwirtiks", "pēntniks", "sabattika"}, + {"rag", "was", "pūl", "sak", "zal", "sīm", "līp", "dag", "sil", "spa", "lap", "sal"}, + {"rags", "wassarins", "pūlis", "sakkis", "zallaws", "sīmenis", "līpa", "daggis", "sillins", "spallins", "lapkrūtis", "sallaws"}, + {}, +} + +var localeTablePs = [5][]string{ + {}, + {"يونۍ", "دونۍ", "درېنۍ", "څلرنۍ", "پينځنۍ", "جمعه", "اونۍ"}, + {}, + {"جنوري", "فبروري", "مارچ", "اپریل", "مۍ", "جون", "جولای", "اګست", "سېپتمبر", "اکتوبر", "نومبر", "دسمبر"}, + {"غ.م.", "غ.و."}, +} + +var localeTablePsAF = [5][]string{ + {}, + {"يونۍ", "دونۍ", "درېنۍ", "څلرنۍ", "پينځنۍ", "جمعه", "اونۍ"}, + {}, + {"جنوري", "فبروري", "مارچ", "اپریل", "مۍ", "جون", "جولای", "اګست", "سېپتمبر", "اکتوبر", "نومبر", "دسمبر"}, + {"غ.م.", "غ.و."}, +} + +var localeTablePsPK = [5][]string{ + {}, + {"يونۍ", "دونۍ", "درېنۍ", "څلرنۍ", "پينځنۍ", "جمعه", "اونۍ"}, + {}, + {"جنوري", "فبروري", "مارچ", "اپریل", "مۍ", "جون", "جولای", "اګست", "سېپتمبر", "اکتوبر", "نومبر", "دسمبر"}, + {"غ.م.", "غ.و."}, +} + +var localeTablePt = [5][]string{ + {"dom.", "seg.", "ter.", "qua.", "qui.", "sex.", "sáb."}, + {"domingo", "segunda-feira", "terça-feira", "quarta-feira", "quinta-feira", "sexta-feira", "sábado"}, + {"jan.", "fev.", "mar.", "abr.", "mai.", "jun.", "jul.", "ago.", "set.", "out.", "nov.", "dez."}, + {"janeiro", "fevereiro", "março", "abril", "maio", "junho", "julho", "agosto", "setembro", "outubro", "novembro", "dezembro"}, + {}, +} + +var localeTablePtAO = [5][]string{ + {"domingo", "segunda", "terça", "quarta", "quinta", "sexta", "sábado"}, + {"domingo", "segunda-feira", "terça-feira", "quarta-feira", "quinta-feira", "sexta-feira", "sábado"}, + {"jan.", "fev.", "mar.", "abr.", "mai.", "jun.", "jul.", "ago.", "set.", "out.", "nov.", "dez."}, + {"janeiro", "fevereiro", "março", "abril", "maio", "junho", "julho", "agosto", "setembro", "outubro", "novembro", "dezembro"}, + {"a.m.", "p.m."}, +} + +var localeTablePtBR = [5][]string{ + {"dom.", "seg.", "ter.", "qua.", "qui.", "sex.", "sáb."}, + {"domingo", "segunda-feira", "terça-feira", "quarta-feira", "quinta-feira", "sexta-feira", "sábado"}, + {"jan.", "fev.", "mar.", "abr.", "mai.", "jun.", "jul.", "ago.", "set.", "out.", "nov.", "dez."}, + {"janeiro", "fevereiro", "março", "abril", "maio", "junho", "julho", "agosto", "setembro", "outubro", "novembro", "dezembro"}, + {}, +} + +var localeTablePtCH = [5][]string{ + {"domingo", "segunda", "terça", "quarta", "quinta", "sexta", "sábado"}, + {"domingo", "segunda-feira", "terça-feira", "quarta-feira", "quinta-feira", "sexta-feira", "sábado"}, + {"jan.", "fev.", "mar.", "abr.", "mai.", "jun.", "jul.", "ago.", "set.", "out.", "nov.", "dez."}, + {"janeiro", "fevereiro", "março", "abril", "maio", "junho", "julho", "agosto", "setembro", "outubro", "novembro", "dezembro"}, + {"a.m.", "p.m."}, +} + +var localeTablePtCV = [5][]string{ + {"domingo", "segunda", "terça", "quarta", "quinta", "sexta", "sábado"}, + {"domingo", "segunda-feira", "terça-feira", "quarta-feira", "quinta-feira", "sexta-feira", "sábado"}, + {"jan.", "fev.", "mar.", "abr.", "mai.", "jun.", "jul.", "ago.", "set.", "out.", "nov.", "dez."}, + {"janeiro", "fevereiro", "março", "abril", "maio", "junho", "julho", "agosto", "setembro", "outubro", "novembro", "dezembro"}, + {"a.m.", "p.m."}, +} + +var localeTablePtGQ = [5][]string{ + {"domingo", "segunda", "terça", "quarta", "quinta", "sexta", "sábado"}, + {"domingo", "segunda-feira", "terça-feira", "quarta-feira", "quinta-feira", "sexta-feira", "sábado"}, + {"jan.", "fev.", "mar.", "abr.", "mai.", "jun.", "jul.", "ago.", "set.", "out.", "nov.", "dez."}, + {"janeiro", "fevereiro", "março", "abril", "maio", "junho", "julho", "agosto", "setembro", "outubro", "novembro", "dezembro"}, + {"a.m.", "p.m."}, +} + +var localeTablePtGW = [5][]string{ + {"domingo", "segunda", "terça", "quarta", "quinta", "sexta", "sábado"}, + {"domingo", "segunda-feira", "terça-feira", "quarta-feira", "quinta-feira", "sexta-feira", "sábado"}, + {"jan.", "fev.", "mar.", "abr.", "mai.", "jun.", "jul.", "ago.", "set.", "out.", "nov.", "dez."}, + {"janeiro", "fevereiro", "março", "abril", "maio", "junho", "julho", "agosto", "setembro", "outubro", "novembro", "dezembro"}, + {"a.m.", "p.m."}, +} + +var localeTablePtLU = [5][]string{ + {"domingo", "segunda", "terça", "quarta", "quinta", "sexta", "sábado"}, + {"domingo", "segunda-feira", "terça-feira", "quarta-feira", "quinta-feira", "sexta-feira", "sábado"}, + {"jan.", "fev.", "mar.", "abr.", "mai.", "jun.", "jul.", "ago.", "set.", "out.", "nov.", "dez."}, + {"janeiro", "fevereiro", "março", "abril", "maio", "junho", "julho", "agosto", "setembro", "outubro", "novembro", "dezembro"}, + {"a.m.", "p.m."}, +} + +var localeTablePtMO = [5][]string{ + {"domingo", "segunda", "terça", "quarta", "quinta", "sexta", "sábado"}, + {"domingo", "segunda-feira", "terça-feira", "quarta-feira", "quinta-feira", "sexta-feira", "sábado"}, + {"jan.", "fev.", "mar.", "abr.", "mai.", "jun.", "jul.", "ago.", "set.", "out.", "nov.", "dez."}, + {"janeiro", "fevereiro", "março", "abril", "maio", "junho", "julho", "agosto", "setembro", "outubro", "novembro", "dezembro"}, + {"a.m.", "p.m."}, +} + +var localeTablePtMZ = [5][]string{ + {"domingo", "segunda", "terça", "quarta", "quinta", "sexta", "sábado"}, + {"domingo", "segunda-feira", "terça-feira", "quarta-feira", "quinta-feira", "sexta-feira", "sábado"}, + {"jan.", "fev.", "mar.", "abr.", "mai.", "jun.", "jul.", "ago.", "set.", "out.", "nov.", "dez."}, + {"janeiro", "fevereiro", "março", "abril", "maio", "junho", "julho", "agosto", "setembro", "outubro", "novembro", "dezembro"}, + {"a.m.", "p.m."}, +} + +var localeTablePtPT = [5][]string{ + {"domingo", "segunda", "terça", "quarta", "quinta", "sexta", "sábado"}, + {"domingo", "segunda-feira", "terça-feira", "quarta-feira", "quinta-feira", "sexta-feira", "sábado"}, + {"jan.", "fev.", "mar.", "abr.", "mai.", "jun.", "jul.", "ago.", "set.", "out.", "nov.", "dez."}, + {"janeiro", "fevereiro", "março", "abril", "maio", "junho", "julho", "agosto", "setembro", "outubro", "novembro", "dezembro"}, + {"a.m.", "p.m."}, +} + +var localeTablePtST = [5][]string{ + {"domingo", "segunda", "terça", "quarta", "quinta", "sexta", "sábado"}, + {"domingo", "segunda-feira", "terça-feira", "quarta-feira", "quinta-feira", "sexta-feira", "sábado"}, + {"jan.", "fev.", "mar.", "abr.", "mai.", "jun.", "jul.", "ago.", "set.", "out.", "nov.", "dez."}, + {"janeiro", "fevereiro", "março", "abril", "maio", "junho", "julho", "agosto", "setembro", "outubro", "novembro", "dezembro"}, + {"a.m.", "p.m."}, +} + +var localeTablePtTL = [5][]string{ + {"domingo", "segunda", "terça", "quarta", "quinta", "sexta", "sábado"}, + {"domingo", "segunda-feira", "terça-feira", "quarta-feira", "quinta-feira", "sexta-feira", "sábado"}, + {"jan.", "fev.", "mar.", "abr.", "mai.", "jun.", "jul.", "ago.", "set.", "out.", "nov.", "dez."}, + {"janeiro", "fevereiro", "março", "abril", "maio", "junho", "julho", "agosto", "setembro", "outubro", "novembro", "dezembro"}, + {"a.m.", "p.m."}, +} + +var localeTableQu = [5][]string{ + {"Dom", "Lun", "Mar", "Mié", "Jue", "Vie", "Sab"}, + {"Domingo", "Lunes", "Martes", "Miércoles", "Jueves", "Viernes", "Sábado"}, + {"Ene", "Feb", "Mar", "Abr", "May", "Jun", "Jul", "Ago", "Set", "Oct", "Nov", "Dic"}, + {"Enero", "Febrero", "Marzo", "Abril", "Mayo", "Junio", "Julio", "Agosto", "Setiembre", "Octubre", "Noviembre", "Diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableQuBO = [5][]string{ + {"Dom", "Lun", "Mar", "Mié", "Jue", "Vie", "Sab"}, + {"Domingo", "Lunes", "Martes", "Miércoles", "Jueves", "Viernes", "Sábado"}, + {"Ene", "Feb", "Mar", "Abr", "May", "Jun", "Jul", "Ago", "Set", "Oct", "Nov", "Dic"}, + {"Enero", "Febrero", "Marzo", "Abril", "Mayo", "Junio", "Julio", "Agosto", "Setiembre", "Octubre", "Noviembre", "Diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableQuEC = [5][]string{ + {"Dom", "Lun", "Mar", "Mié", "Jue", "Vie", "Sab"}, + {"Domingo", "Lunes", "Martes", "Miércoles", "Jueves", "Viernes", "Sábado"}, + {"Ene", "Feb", "Mar", "Abr", "May", "Jun", "Jul", "Ago", "Set", "Oct", "Nov", "Dic"}, + {"Enero", "Febrero", "Marzo", "Abril", "Mayo", "Junio", "Julio", "Agosto", "Setiembre", "Octubre", "Noviembre", "Diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableQuPE = [5][]string{ + {"Dom", "Lun", "Mar", "Mié", "Jue", "Vie", "Sab"}, + {"Domingo", "Lunes", "Martes", "Miércoles", "Jueves", "Viernes", "Sábado"}, + {"Ene", "Feb", "Mar", "Abr", "May", "Jun", "Jul", "Ago", "Set", "Oct", "Nov", "Dic"}, + {"Enero", "Febrero", "Marzo", "Abril", "Mayo", "Junio", "Julio", "Agosto", "Setiembre", "Octubre", "Noviembre", "Diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableRaj = [5][]string{ + {}, + {"रविवार", "सोमवार", "मंगलवार", "बुधवार", "गुरुवार", "शुक्रवार", "शनिवार"}, + {}, + {"जनवरी", "फरवरी", "मार्च", "अप्रैल", "मई", "जून", "जुलाई", "अगस्त", "सितम्बर", "अक्टूबर", "नवंबर", "दिसंबर"}, + {}, +} + +var localeTableRajIN = [5][]string{ + {}, + {"रविवार", "सोमवार", "मंगलवार", "बुधवार", "गुरुवार", "शुक्रवार", "शनिवार"}, + {}, + {"जनवरी", "फरवरी", "मार्च", "अप्रैल", "मई", "जून", "जुलाई", "अगस्त", "सितम्बर", "अक्टूबर", "नवंबर", "दिसंबर"}, + {}, +} + +var localeTableRif = [5][]string{ + {"lḥe", "let", "ttl", "lar", "lex", "jje", "sse"}, + {"lḥed", "letnayen", "ttlat", "larbeɛ", "lexmis", "jjemɛa", "ssebt"}, + {"yen", "feb", "mar", "yeb", "may", "yun", "yul", "ɣuc", "cut", "kṭu", "nuw", "duj"}, + {"yennayer", "febrayer", "mars", "yebril", "mayyu", "yunyu", "yulyuz", "ɣucct", "cutenber", "kṭuber", "nuwember", "dujember"}, + {"a", "p"}, +} + +var localeTableRifMA = [5][]string{ + {"lḥe", "let", "ttl", "lar", "lex", "jje", "sse"}, + {"lḥed", "letnayen", "ttlat", "larbeɛ", "lexmis", "jjemɛa", "ssebt"}, + {"yen", "feb", "mar", "yeb", "may", "yun", "yul", "ɣuc", "cut", "kṭu", "nuw", "duj"}, + {"yennayer", "febrayer", "mars", "yebril", "mayyu", "yunyu", "yulyuz", "ɣucct", "cutenber", "kṭuber", "nuwember", "dujember"}, + {"a", "p"}, +} + +var localeTableRm = [5][]string{ + {"du", "gli", "ma", "me", "gie", "ve", "so"}, + {"dumengia", "glindesdi", "mardi", "mesemna", "gievgia", "venderdi", "sonda"}, + {"schan.", "favr.", "mars", "avr.", "matg", "zercl.", "fan.", "avust", "sett.", "oct.", "nov.", "dec."}, + {"da schaner", "da favrer", "da mars", "d’avrigl", "da matg", "da zercladur", "da fanadur", "d’avust", "da settember", "d’october", "da november", "da december"}, + {}, +} + +var localeTableRmCH = [5][]string{ + {"du", "gli", "ma", "me", "gie", "ve", "so"}, + {"dumengia", "glindesdi", "mardi", "mesemna", "gievgia", "venderdi", "sonda"}, + {"schan.", "favr.", "mars", "avr.", "matg", "zercl.", "fan.", "avust", "sett.", "oct.", "nov.", "dec."}, + {"da schaner", "da favrer", "da mars", "d’avrigl", "da matg", "da zercladur", "da fanadur", "d’avust", "da settember", "d’october", "da november", "da december"}, + {}, +} + +var localeTableRn = [5][]string{ + {"cu.", "mbe.", "kab.", "gtu.", "kan.", "gnu.", "gnd."}, + {"Ku w’indwi", "Ku wa mbere", "Ku wa kabiri", "Ku wa gatatu", "Ku wa kane", "Ku wa gatanu", "Ku wa gatandatu"}, + {"Mut.", "Gas.", "Wer.", "Mat.", "Gic.", "Kam.", "Nya.", "Kan.", "Nze.", "Ukw.", "Ugu.", "Uku."}, + {"Nzero", "Ruhuhuma", "Ntwarante", "Ndamukiza", "Rusama", "Ruheshi", "Mukakaro", "Nyandagaro", "Nyakanga", "Gitugutu", "Munyonyo", "Kigarama"}, + {"Z.MU.", "Z.MW."}, +} + +var localeTableRnBI = [5][]string{ + {"cu.", "mbe.", "kab.", "gtu.", "kan.", "gnu.", "gnd."}, + {"Ku w’indwi", "Ku wa mbere", "Ku wa kabiri", "Ku wa gatatu", "Ku wa kane", "Ku wa gatanu", "Ku wa gatandatu"}, + {"Mut.", "Gas.", "Wer.", "Mat.", "Gic.", "Kam.", "Nya.", "Kan.", "Nze.", "Ukw.", "Ugu.", "Uku."}, + {"Nzero", "Ruhuhuma", "Ntwarante", "Ndamukiza", "Rusama", "Ruheshi", "Mukakaro", "Nyandagaro", "Nyakanga", "Gitugutu", "Munyonyo", "Kigarama"}, + {"Z.MU.", "Z.MW."}, +} + +var localeTableRo = [5][]string{ + {"dum.", "lun.", "mar.", "mie.", "joi", "vin.", "sâm."}, + {"duminică", "luni", "marți", "miercuri", "joi", "vineri", "sâmbătă"}, + {"ian.", "feb.", "mar.", "apr.", "mai", "iun.", "iul.", "aug.", "sept.", "oct.", "nov.", "dec."}, + {"ianuarie", "februarie", "martie", "aprilie", "mai", "iunie", "iulie", "august", "septembrie", "octombrie", "noiembrie", "decembrie"}, + {"a.m.", "p.m."}, +} + +var localeTableRoMD = [5][]string{ + {"Dum", "Lun", "Mar", "Mie", "Joi", "Vin", "Sâm"}, + {"duminică", "luni", "marți", "miercuri", "joi", "vineri", "sâmbătă"}, + {"ian.", "feb.", "mar.", "apr.", "mai", "iun.", "iul.", "aug.", "sept.", "oct.", "nov.", "dec."}, + {"ianuarie", "februarie", "martie", "aprilie", "mai", "iunie", "iulie", "august", "septembrie", "octombrie", "noiembrie", "decembrie"}, + {"a.m.", "p.m."}, +} + +var localeTableRoRO = [5][]string{ + {"dum.", "lun.", "mar.", "mie.", "joi", "vin.", "sâm."}, + {"duminică", "luni", "marți", "miercuri", "joi", "vineri", "sâmbătă"}, + {"ian.", "feb.", "mar.", "apr.", "mai", "iun.", "iul.", "aug.", "sept.", "oct.", "nov.", "dec."}, + {"ianuarie", "februarie", "martie", "aprilie", "mai", "iunie", "iulie", "august", "septembrie", "octombrie", "noiembrie", "decembrie"}, + {"a.m.", "p.m."}, +} + +var localeTableRof = [5][]string{ + {"Ijp", "Ijt", "Ijn", "Ijtn", "Alh", "Iju", "Ijm"}, + {"Ijumapili", "Ijumatatu", "Ijumanne", "Ijumatano", "Alhamisi", "Ijumaa", "Ijumamosi"}, + {"M1", "M2", "M3", "M4", "M5", "M6", "M7", "M8", "M9", "M10", "M11", "M12"}, + {"Mweri wa kwanza", "Mweri wa kaili", "Mweri wa katatu", "Mweri wa kaana", "Mweri wa tanu", "Mweri wa sita", "Mweri wa saba", "Mweri wa nane", "Mweri wa tisa", "Mweri wa ikumi", "Mweri wa ikumi na moja", "Mweri wa ikumi na mbili"}, + {"kang’ama", "kingoto"}, +} + +var localeTableRofTZ = [5][]string{ + {"Ijp", "Ijt", "Ijn", "Ijtn", "Alh", "Iju", "Ijm"}, + {"Ijumapili", "Ijumatatu", "Ijumanne", "Ijumatano", "Alhamisi", "Ijumaa", "Ijumamosi"}, + {"M1", "M2", "M3", "M4", "M5", "M6", "M7", "M8", "M9", "M10", "M11", "M12"}, + {"Mweri wa kwanza", "Mweri wa kaili", "Mweri wa katatu", "Mweri wa kaana", "Mweri wa tanu", "Mweri wa sita", "Mweri wa saba", "Mweri wa nane", "Mweri wa tisa", "Mweri wa ikumi", "Mweri wa ikumi na moja", "Mweri wa ikumi na mbili"}, + {"kang’ama", "kingoto"}, +} + +var localeTableRu = [5][]string{ + {"вс", "пн", "вт", "ср", "чт", "пт", "сб"}, + {"воскресенье", "понедельник", "вторник", "среда", "четверг", "пятница", "суббота"}, + {"янв.", "февр.", "мар.", "апр.", "мая", "июн.", "июл.", "авг.", "сент.", "окт.", "нояб.", "дек."}, + {"января", "февраля", "марта", "апреля", "мая", "июня", "июля", "августа", "сентября", "октября", "ноября", "декабря"}, + {}, +} + +var localeTableRuBY = [5][]string{ + {"вс", "пн", "вт", "ср", "чт", "пт", "сб"}, + {"воскресенье", "понедельник", "вторник", "среда", "четверг", "пятница", "суббота"}, + {"янв.", "февр.", "мар.", "апр.", "мая", "июн.", "июл.", "авг.", "сент.", "окт.", "нояб.", "дек."}, + {"января", "февраля", "марта", "апреля", "мая", "июня", "июля", "августа", "сентября", "октября", "ноября", "декабря"}, + {}, +} + +var localeTableRuKG = [5][]string{ + {"вс", "пн", "вт", "ср", "чт", "пт", "сб"}, + {"воскресенье", "понедельник", "вторник", "среда", "четверг", "пятница", "суббота"}, + {"янв.", "февр.", "мар.", "апр.", "мая", "июн.", "июл.", "авг.", "сент.", "окт.", "нояб.", "дек."}, + {"января", "февраля", "марта", "апреля", "мая", "июня", "июля", "августа", "сентября", "октября", "ноября", "декабря"}, + {}, +} + +var localeTableRuKZ = [5][]string{ + {"вс", "пн", "вт", "ср", "чт", "пт", "сб"}, + {"воскресенье", "понедельник", "вторник", "среда", "четверг", "пятница", "суббота"}, + {"янв.", "февр.", "мар.", "апр.", "мая", "июн.", "июл.", "авг.", "сент.", "окт.", "нояб.", "дек."}, + {"января", "февраля", "марта", "апреля", "мая", "июня", "июля", "августа", "сентября", "октября", "ноября", "декабря"}, + {}, +} + +var localeTableRuMD = [5][]string{ + {"вс", "пн", "вт", "ср", "чт", "пт", "сб"}, + {"воскресенье", "понедельник", "вторник", "среда", "четверг", "пятница", "суббота"}, + {"янв.", "февр.", "мар.", "апр.", "мая", "июн.", "июл.", "авг.", "сент.", "окт.", "нояб.", "дек."}, + {"января", "февраля", "марта", "апреля", "мая", "июня", "июля", "августа", "сентября", "октября", "ноября", "декабря"}, + {}, +} + +var localeTableRuRU = [5][]string{ + {"вс", "пн", "вт", "ср", "чт", "пт", "сб"}, + {"воскресенье", "понедельник", "вторник", "среда", "четверг", "пятница", "суббота"}, + {"янв.", "февр.", "мар.", "апр.", "мая", "июн.", "июл.", "авг.", "сент.", "окт.", "нояб.", "дек."}, + {"января", "февраля", "марта", "апреля", "мая", "июня", "июля", "августа", "сентября", "октября", "ноября", "декабря"}, + {}, +} + +var localeTableRuUA = [5][]string{ + {"вс", "пн", "вт", "ср", "чт", "пт", "сб"}, + {"воскресенье", "понедельник", "вторник", "среда", "четверг", "пятница", "суббота"}, + {"янв.", "февр.", "мар.", "апр.", "мая", "июн.", "июл.", "авг.", "сент.", "окт.", "нояб.", "дек."}, + {"января", "февраля", "марта", "апреля", "мая", "июня", "июля", "августа", "сентября", "октября", "ноября", "декабря"}, + {}, +} + +var localeTableRw = [5][]string{ + {"cyu.", "mbe.", "kab.", "gtu.", "kan.", "gnu.", "gnd."}, + {"Ku cyumweru", "Kuwa mbere", "Kuwa kabiri", "Kuwa gatatu", "Kuwa kane", "Kuwa gatanu", "Kuwa gatandatu"}, + {"mut.", "gas.", "wer.", "mat.", "gic.", "kam.", "nya.", "kan.", "nze.", "ukw.", "ugu.", "uku."}, + {"Mutarama", "Gashyantare", "Werurwe", "Mata", "Gicurasi", "Kamena", "Nyakanga", "Kanama", "Nzeli", "Ukwakira", "Ugushyingo", "Ukuboza"}, + {}, +} + +var localeTableRwRW = [5][]string{ + {"cyu.", "mbe.", "kab.", "gtu.", "kan.", "gnu.", "gnd."}, + {"Ku cyumweru", "Kuwa mbere", "Kuwa kabiri", "Kuwa gatatu", "Kuwa kane", "Kuwa gatanu", "Kuwa gatandatu"}, + {"mut.", "gas.", "wer.", "mat.", "gic.", "kam.", "nya.", "kan.", "nze.", "ukw.", "ugu.", "uku."}, + {"Mutarama", "Gashyantare", "Werurwe", "Mata", "Gicurasi", "Kamena", "Nyakanga", "Kanama", "Nzeli", "Ukwakira", "Ugushyingo", "Ukuboza"}, + {}, +} + +var localeTableRwk = [5][]string{ + {"Jpi", "Jtt", "Jnn", "Jtn", "Alh", "Iju", "Jmo"}, + {"Jumapilyi", "Jumatatuu", "Jumanne", "Jumatanu", "Alhamisi", "Ijumaa", "Jumamosi"}, + {"Jan", "Feb", "Mac", "Apr", "Mei", "Jun", "Jul", "Ago", "Sep", "Okt", "Nov", "Des"}, + {"Januari", "Februari", "Machi", "Aprilyi", "Mei", "Junyi", "Julyai", "Agusti", "Septemba", "Oktoba", "Novemba", "Desemba"}, + {"utuko", "kyiukonyi"}, +} + +var localeTableRwkTZ = [5][]string{ + {"Jpi", "Jtt", "Jnn", "Jtn", "Alh", "Iju", "Jmo"}, + {"Jumapilyi", "Jumatatuu", "Jumanne", "Jumatanu", "Alhamisi", "Ijumaa", "Jumamosi"}, + {"Jan", "Feb", "Mac", "Apr", "Mei", "Jun", "Jul", "Ago", "Sep", "Okt", "Nov", "Des"}, + {"Januari", "Februari", "Machi", "Aprilyi", "Mei", "Junyi", "Julyai", "Agusti", "Septemba", "Oktoba", "Novemba", "Desemba"}, + {"utuko", "kyiukonyi"}, +} + +var localeTableSa = [5][]string{ + {"रवि", "सोम", "मंगल", "बुध", "गुरु", "शुक्र", "शनि"}, + {"रविवासरः", "सोमवासरः", "मंगलवासरः", "बुधवासरः", "गुरुवासर:", "शुक्रवासरः", "शनिवासरः"}, + {"जनवरी:", "फरवरी:", "मार्च:", "अप्रैल:", "मई", "जून:", "जुलाई:", "अगस्त:", "सितंबर:", "अक्तूबर:", "नवंबर:", "दिसंबर:"}, + {"जनवरीमासः", "फरवरीमासः", "मार्चमासः", "अप्रैलमासः", "मईमासः", "जूनमासः", "जुलाईमासः", "अगस्तमासः", "सितंबरमासः", "अक्तूबरमासः", "नवंबरमासः", "दिसंबरमासः"}, + {}, +} + +var localeTableSaIN = [5][]string{ + {"रवि", "सोम", "मंगल", "बुध", "गुरु", "शुक्र", "शनि"}, + {"रविवासरः", "सोमवासरः", "मंगलवासरः", "बुधवासरः", "गुरुवासर:", "शुक्रवासरः", "शनिवासरः"}, + {"जनवरी:", "फरवरी:", "मार्च:", "अप्रैल:", "मई", "जून:", "जुलाई:", "अगस्त:", "सितंबर:", "अक्तूबर:", "नवंबर:", "दिसंबर:"}, + {"जनवरीमासः", "फरवरीमासः", "मार्चमासः", "अप्रैलमासः", "मईमासः", "जूनमासः", "जुलाईमासः", "अगस्तमासः", "सितंबरमासः", "अक्तूबरमासः", "नवंबरमासः", "दिसंबरमासः"}, + {}, +} + +var localeTableSah = [5][]string{ + {"бс", "бн", "оп", "сэ", "чп", "бэ", "сб"}, + {"баскыһыанньа", "бэнидиэнньик", "оптуорунньук", "сэрэдэ", "чэппиэр", "Бээтиҥсэ", "субуота"}, + {"Тохс", "Олун", "Клн", "Мсу", "Ыам", "Бэс", "Отй", "Атр", "Блҕ", "Алт", "Сэт", "Ахс"}, + {"Тохсунньу", "Олунньу", "Кулун тутар", "Муус устар", "Ыам ыйын", "Бэс ыйын", "От ыйын", "Атырдьых ыйын", "Балаҕан ыйын", "Алтынньы", "Сэтинньи", "ахсынньы"}, + {"ЭИ", "ЭК"}, +} + +var localeTableSahRU = [5][]string{ + {"бс", "бн", "оп", "сэ", "чп", "бэ", "сб"}, + {"баскыһыанньа", "бэнидиэнньик", "оптуорунньук", "сэрэдэ", "чэппиэр", "Бээтиҥсэ", "субуота"}, + {"Тохс", "Олун", "Клн", "Мсу", "Ыам", "Бэс", "Отй", "Атр", "Блҕ", "Алт", "Сэт", "Ахс"}, + {"Тохсунньу", "Олунньу", "Кулун тутар", "Муус устар", "Ыам ыйын", "Бэс ыйын", "От ыйын", "Атырдьых ыйын", "Балаҕан ыйын", "Алтынньы", "Сэтинньи", "ахсынньы"}, + {"ЭИ", "ЭК"}, +} + +var localeTableSaq = [5][]string{ + {"Are", "Kun", "Ong", "Ine", "Ile", "Sap", "Kwe"}, + {"Mderot ee are", "Mderot ee kuni", "Mderot ee ong’wan", "Mderot ee inet", "Mderot ee ile", "Mderot ee sapa", "Mderot ee kwe"}, + {"Obo", "Waa", "Oku", "Ong", "Ime", "Ile", "Sap", "Isi", "Saa", "Tom", "Tob", "Tow"}, + {"Lapa le obo", "Lapa le waare", "Lapa le okuni", "Lapa le ong’wan", "Lapa le imet", "Lapa le ile", "Lapa le sapa", "Lapa le isiet", "Lapa le saal", "Lapa le tomon", "Lapa le tomon obo", "Lapa le tomon waare"}, + {"Tesiran", "Teipa"}, +} + +var localeTableSaqKE = [5][]string{ + {"Are", "Kun", "Ong", "Ine", "Ile", "Sap", "Kwe"}, + {"Mderot ee are", "Mderot ee kuni", "Mderot ee ong’wan", "Mderot ee inet", "Mderot ee ile", "Mderot ee sapa", "Mderot ee kwe"}, + {"Obo", "Waa", "Oku", "Ong", "Ime", "Ile", "Sap", "Isi", "Saa", "Tom", "Tob", "Tow"}, + {"Lapa le obo", "Lapa le waare", "Lapa le okuni", "Lapa le ong’wan", "Lapa le imet", "Lapa le ile", "Lapa le sapa", "Lapa le isiet", "Lapa le saal", "Lapa le tomon", "Lapa le tomon obo", "Lapa le tomon waare"}, + {"Tesiran", "Teipa"}, +} + +var localeTableSat = [5][]string{ + {"ᱥᱤᱸ", "ᱚᱛ", "ᱵᱟ", "ᱥᱟᱹ", "ᱥᱟᱹᱨ", "ᱡᱟᱹ", "ᱧᱩ"}, + {"ᱥᱤᱸᱜᱮ", "ᱚᱛᱮ", "ᱵᱟᱞᱮ", "ᱥᱟᱹᱜᱩᱱ", "ᱥᱟᱹᱨᱫᱤ", "ᱡᱟᱹᱨᱩᱢ", "ᱧᱩᱦᱩᱢ"}, + {"ᱡᱟᱱ", "ᱯᱷᱟ", "ᱢᱟᱨ", "ᱟᱯᱨ", "ᱢᱮ", "ᱡᱩᱱ", "ᱡᱩᱞ", "ᱟᱜᱟ", "ᱥᱮᱯ", "ᱚᱠᱴ", "ᱱᱟᱣ", "ᱫᱤᱥ"}, + {"ᱡᱟᱱᱣᱟᱨᱤ", "ᱯᱷᱟᱨᱣᱟᱨᱤ", "ᱢᱟᱨᱪ", "ᱟᱯᱨᱮᱞ", "ᱢᱮ", "ᱡᱩᱱ", "ᱡᱩᱞᱟᱭ", "ᱟᱜᱟᱥᱛ", "ᱥᱮᱯᱴᱮᱢᱵᱟᱨ", "ᱚᱠᱴᱚᱵᱟᱨ", "ᱱᱟᱣᱟᱢᱵᱟᱨ", "ᱫᱤᱥᱟᱢᱵᱟᱨ"}, + {}, +} + +var localeTableSbp = [5][]string{ + {"Mul", "Jtt", "Jnn", "Jtn", "Alh", "Iju", "Jmo"}, + {"Mulungu", "Jumatatu", "Jumanne", "Jumatano", "Alahamisi", "Ijumaa", "Jumamosi"}, + {"Mup", "Mwi", "Msh", "Mun", "Mag", "Muj", "Msp", "Mpg", "Mye", "Mok", "Mus", "Muh"}, + {"Mupalangulwa", "Mwitope", "Mushende", "Munyi", "Mushende Magali", "Mujimbi", "Mushipepo", "Mupuguto", "Munyense", "Mokhu", "Musongandembwe", "Muhaano"}, + {"Lwamilawu", "Pashamihe"}, +} + +var localeTableSbpTZ = [5][]string{ + {"Mul", "Jtt", "Jnn", "Jtn", "Alh", "Iju", "Jmo"}, + {"Mulungu", "Jumatatu", "Jumanne", "Jumatano", "Alahamisi", "Ijumaa", "Jumamosi"}, + {"Mup", "Mwi", "Msh", "Mun", "Mag", "Muj", "Msp", "Mpg", "Mye", "Mok", "Mus", "Muh"}, + {"Mupalangulwa", "Mwitope", "Mushende", "Munyi", "Mushende Magali", "Mujimbi", "Mushipepo", "Mupuguto", "Munyense", "Mokhu", "Musongandembwe", "Muhaano"}, + {"Lwamilawu", "Pashamihe"}, +} + +var localeTableSc = [5][]string{ + {"dom", "lun", "mar", "mèr", "giò", "che", "sàb"}, + {"domìniga", "lunis", "martis", "mèrcuris", "giòbia", "chenàbura", "sàbadu"}, + {"ghe", "fre", "mar", "abr", "maj", "làm", "trì", "aus", "cab", "stG", "stA", "nad"}, + {"ghennàrgiu", "freàrgiu", "martzu", "abrile", "maju", "làmpadas", "trìulas", "austu", "cabudanni", "santugaine", "santandria", "nadale"}, + {"m.", "b."}, +} + +var localeTableScIT = [5][]string{ + {"dom", "lun", "mar", "mèr", "giò", "che", "sàb"}, + {"domìniga", "lunis", "martis", "mèrcuris", "giòbia", "chenàbura", "sàbadu"}, + {"ghe", "fre", "mar", "abr", "maj", "làm", "trì", "aus", "cab", "stG", "stA", "nad"}, + {"ghennàrgiu", "freàrgiu", "martzu", "abrile", "maju", "làmpadas", "trìulas", "austu", "cabudanni", "santugaine", "santandria", "nadale"}, + {"m.", "b."}, +} + +var localeTableScn = [5][]string{ + {}, + {"dumìnica", "lunnidìa", "martidìa", "mercuridìa", "jovidìa", "vennidìa", "sàbbatu"}, + {"jin", "fri", "mar", "apr", "maj", "giu", "gnt", "agu", "sit", "utt", "nuv", "dic"}, + {"jinnaru", "frivaru", "marzu", "aprili", "maju", "giugnu", "giugnettu", "agustu", "sittèmmiru", "uttòviru", "nuvèmmiru", "dicèmmiru"}, + {}, +} + +var localeTableScnIT = [5][]string{ + {}, + {"dumìnica", "lunnidìa", "martidìa", "mercuridìa", "jovidìa", "vennidìa", "sàbbatu"}, + {"jin", "fri", "mar", "apr", "maj", "giu", "gnt", "agu", "sit", "utt", "nuv", "dic"}, + {"jinnaru", "frivaru", "marzu", "aprili", "maju", "giugnu", "giugnettu", "agustu", "sittèmmiru", "uttòviru", "nuvèmmiru", "dicèmmiru"}, + {}, +} + +var localeTableSd = [5][]string{ + {}, + {"آچر", "سومر", "اڱارو", "اربع", "خميس", "جمعو", "ڇنڇر"}, + {}, + {"جنوري", "فيبروري", "مارچ", "اپريل", "مئي", "جون", "جولاءِ", "آگسٽ", "سيپٽمبر", "آڪٽوبر", "نومبر", "ڊسمبر"}, + {"صبح،منجهند", "شام،منجهند"}, +} + +var localeTableSdArab = [5][]string{ + {}, + {"آچر", "سومر", "اڱارو", "اربع", "خميس", "جمعو", "ڇنڇر"}, + {}, + {"جنوري", "فيبروري", "مارچ", "اپريل", "مئي", "جون", "جولاءِ", "آگسٽ", "سيپٽمبر", "آڪٽوبر", "نومبر", "ڊسمبر"}, + {"صبح،منجهند", "شام،منجهند"}, +} + +var localeTableSdArabPK = [5][]string{ + {}, + {"آچر", "سومر", "اڱارو", "اربع", "خميس", "جمعو", "ڇنڇر"}, + {}, + {"جنوري", "فيبروري", "مارچ", "اپريل", "مئي", "جون", "جولاءِ", "آگسٽ", "سيپٽمبر", "آڪٽوبر", "نومبر", "ڊسمبر"}, + {"صبح،منجهند", "شام،منجهند"}, +} + +var localeTableSdDeva = [5][]string{ + {"आर्त", "सू", "मंग", "बु॒ध", "विस", "जुम", "छंछ"}, + {"आर्तवार", "सूमर", "मंगलु", "बु॒धर", "विस्पत", "जुमो", "छंछर"}, + {"जन", "फर", "मार्च", "अप्रै", "मई", "जून", "जु", "अग", "सप्टे", "ऑक्टो", "नवं", "डिसं"}, + {"जनवरी", "फरवरी", "मार्चु", "अप्रैल", "मई", "जून", "जुलाई", "अगस्ट", "सप्टेंबर", "ओक्टोबर", "नवंबर", "डिसंबर"}, + {}, +} + +var localeTableSdDevaIN = [5][]string{ + {"आर्त", "सू", "मंग", "बु॒ध", "विस", "जुम", "छंछ"}, + {"आर्तवार", "सूमर", "मंगलु", "बु॒धर", "विस्पत", "जुमो", "छंछर"}, + {"जन", "फर", "मार्च", "अप्रै", "मई", "जून", "जु", "अग", "सप्टे", "ऑक्टो", "नवं", "डिसं"}, + {"जनवरी", "फरवरी", "मार्चु", "अप्रैल", "मई", "जून", "जुलाई", "अगस्ट", "सप्टेंबर", "ओक्टोबर", "नवंबर", "डिसंबर"}, + {}, +} + +var localeTableSe = [5][]string{ + {"sotn", "vuos", "maŋ", "gask", "duor", "bear", "láv"}, + {"sotnabeaivi", "vuossárga", "maŋŋebárga", "gaskavahkku", "duorasdat", "bearjadat", "lávvardat"}, + {"ođđj", "guov", "njuk", "cuo", "mies", "geas", "suoi", "borg", "čakč", "golg", "skáb", "juov"}, + {"ođđajagemánnu", "guovvamánnu", "njukčamánnu", "cuoŋománnu", "miessemánnu", "geassemánnu", "suoidnemánnu", "borgemánnu", "čakčamánnu", "golggotmánnu", "skábmamánnu", "juovlamánnu"}, + {"i.b.", "e.b."}, +} + +var localeTableSeFI = [5][]string{ + {"so", "má", "di", "ga", "du", "be", "lá"}, + {"sotnabeaivi", "mánnodat", "disdat", "gaskavahkku", "duorastat", "bearjadat", "lávvordat"}, + {"ođđj", "guov", "njuk", "cuoŋ", "mies", "geas", "suoi", "borg", "čakč", "golg", "skáb", "juov"}, + {"ođđajagemánnu", "guovvamánnu", "njukčamánnu", "cuoŋománnu", "miessemánnu", "geassemánnu", "suoidnemánnu", "borgemánnu", "čakčamánnu", "golggotmánnu", "skábmamánnu", "juovlamánnu"}, + {"ib", "eb"}, +} + +var localeTableSeNO = [5][]string{ + {"sotn", "vuos", "maŋ", "gask", "duor", "bear", "láv"}, + {"sotnabeaivi", "vuossárga", "maŋŋebárga", "gaskavahkku", "duorasdat", "bearjadat", "lávvardat"}, + {"ođđj", "guov", "njuk", "cuo", "mies", "geas", "suoi", "borg", "čakč", "golg", "skáb", "juov"}, + {"ođđajagemánnu", "guovvamánnu", "njukčamánnu", "cuoŋománnu", "miessemánnu", "geassemánnu", "suoidnemánnu", "borgemánnu", "čakčamánnu", "golggotmánnu", "skábmamánnu", "juovlamánnu"}, + {"i.b.", "e.b."}, +} + +var localeTableSeSE = [5][]string{ + {"sotn", "vuos", "maŋ", "gask", "duor", "bear", "láv"}, + {"sotnabeaivi", "vuossárga", "maŋŋebárga", "gaskavahkku", "duorasdat", "bearjadat", "lávvardat"}, + {"ođđj", "guov", "njuk", "cuo", "mies", "geas", "suoi", "borg", "čakč", "golg", "skáb", "juov"}, + {"ođđajagemánnu", "guovvamánnu", "njukčamánnu", "cuoŋománnu", "miessemánnu", "geassemánnu", "suoidnemánnu", "borgemánnu", "čakčamánnu", "golggotmánnu", "skábmamánnu", "juovlamánnu"}, + {"i.b.", "e.b."}, +} + +var localeTableSeh = [5][]string{ + {"Dim", "Pos", "Pir", "Tat", "Nai", "Sha", "Sab"}, + {"Dimingu", "Chiposi", "Chipiri", "Chitatu", "Chinai", "Chishanu", "Sabudu"}, + {"Jan", "Fev", "Mar", "Abr", "Mai", "Jun", "Jul", "Aug", "Set", "Otu", "Nov", "Dec"}, + {"Janeiro", "Fevreiro", "Marco", "Abril", "Maio", "Junho", "Julho", "Augusto", "Setembro", "Otubro", "Novembro", "Decembro"}, + {}, +} + +var localeTableSehMZ = [5][]string{ + {"Dim", "Pos", "Pir", "Tat", "Nai", "Sha", "Sab"}, + {"Dimingu", "Chiposi", "Chipiri", "Chitatu", "Chinai", "Chishanu", "Sabudu"}, + {"Jan", "Fev", "Mar", "Abr", "Mai", "Jun", "Jul", "Aug", "Set", "Otu", "Nov", "Dec"}, + {"Janeiro", "Fevreiro", "Marco", "Abril", "Maio", "Junho", "Julho", "Augusto", "Setembro", "Otubro", "Novembro", "Decembro"}, + {}, +} + +var localeTableSes = [5][]string{ + {"Alh", "Ati", "Ata", "Ala", "Alm", "Alz", "Asi"}, + {"Alhadi", "Atinni", "Atalaata", "Alarba", "Alhamiisa", "Alzuma", "Asibti"}, + {"Žan", "Fee", "Mar", "Awi", "Me", "Žuw", "Žuy", "Ut", "Sek", "Okt", "Noo", "Dee"}, + {"Žanwiye", "Feewiriye", "Marsi", "Awiril", "Me", "Žuweŋ", "Žuyye", "Ut", "Sektanbur", "Oktoobur", "Noowanbur", "Deesanbur"}, + {"Adduha", "Aluula"}, +} + +var localeTableSesML = [5][]string{ + {"Alh", "Ati", "Ata", "Ala", "Alm", "Alz", "Asi"}, + {"Alhadi", "Atinni", "Atalaata", "Alarba", "Alhamiisa", "Alzuma", "Asibti"}, + {"Žan", "Fee", "Mar", "Awi", "Me", "Žuw", "Žuy", "Ut", "Sek", "Okt", "Noo", "Dee"}, + {"Žanwiye", "Feewiriye", "Marsi", "Awiril", "Me", "Žuweŋ", "Žuyye", "Ut", "Sektanbur", "Oktoobur", "Noowanbur", "Deesanbur"}, + {"Adduha", "Aluula"}, +} + +var localeTableSg = [5][]string{ + {"Bk1", "Bk2", "Bk3", "Bk4", "Bk5", "Lâp", "Lây"}, + {"Bikua-ôko", "Bïkua-ûse", "Bïkua-ptâ", "Bïkua-usïö", "Bïkua-okü", "Lâpôsö", "Lâyenga"}, + {"Nye", "Ful", "Mbä", "Ngu", "Bêl", "Fön", "Len", "Kük", "Mvu", "Ngb", "Nab", "Kak"}, + {"Nyenye", "Fulundïgi", "Mbängü", "Ngubùe", "Bêläwü", "Föndo", "Lengua", "Kükürü", "Mvuka", "Ngberere", "Nabändüru", "Kakauka"}, + {"ND", "LK"}, +} + +var localeTableSgCF = [5][]string{ + {"Bk1", "Bk2", "Bk3", "Bk4", "Bk5", "Lâp", "Lây"}, + {"Bikua-ôko", "Bïkua-ûse", "Bïkua-ptâ", "Bïkua-usïö", "Bïkua-okü", "Lâpôsö", "Lâyenga"}, + {"Nye", "Ful", "Mbä", "Ngu", "Bêl", "Fön", "Len", "Kük", "Mvu", "Ngb", "Nab", "Kak"}, + {"Nyenye", "Fulundïgi", "Mbängü", "Ngubùe", "Bêläwü", "Föndo", "Lengua", "Kükürü", "Mvuka", "Ngberere", "Nabändüru", "Kakauka"}, + {"ND", "LK"}, +} + +var localeTableShi = [5][]string{ + {"ⴰⵙⴰ", "ⴰⵢⵏ", "ⴰⵙⵉ", "ⴰⴽⵕ", "ⴰⴽⵡ", "ⴰⵙⵉⵎ", "ⴰⵙⵉⴹ"}, + {"ⴰⵙⴰⵎⴰⵙ", "ⴰⵢⵏⴰⵙ", "ⴰⵙⵉⵏⴰⵙ", "ⴰⴽⵕⴰⵙ", "ⴰⴽⵡⴰⵙ", "ⵙⵉⵎⵡⴰⵙ", "ⴰⵙⵉⴹⵢⴰⵙ"}, + {"ⵉⵏⵏ", "ⴱⵕⴰ", "ⵎⴰⵕ", "ⵉⴱⵔ", "ⵎⴰⵢ", "ⵢⵓⵏ", "ⵢⵓⵍ", "ⵖⵓⵛ", "ⵛⵓⵜ", "ⴽⵜⵓ", "ⵏⵓⵡ", "ⴷⵓⵊ"}, + {"ⵉⵏⵏⴰⵢⵔ", "ⴱⵕⴰⵢⵕ", "ⵎⴰⵕⵚ", "ⵉⴱⵔⵉⵔ", "ⵎⴰⵢⵢⵓ", "ⵢⵓⵏⵢⵓ", "ⵢⵓⵍⵢⵓⵣ", "ⵖⵓⵛⵜ", "ⵛⵓⵜⴰⵏⴱⵉⵔ", "ⴽⵜⵓⴱⵔ", "ⵏⵓⵡⴰⵏⴱⵉⵔ", "ⴷⵓⵊⴰⵏⴱⵉⵔ"}, + {"ⵜⵉⴼⴰⵡⵜ", "ⵜⴰⴷⴳⴳⵯⴰⵜ"}, +} + +var localeTableShiLatn = [5][]string{ + {"asa", "ayn", "asi", "akṛ", "akw", "asim", "asiḍ"}, + {"asamas", "aynas", "asinas", "akṛas", "akwas", "asimwas", "asiḍyas"}, + {"inn", "bṛa", "maṛ", "ibr", "may", "yun", "yul", "ɣuc", "cut", "ktu", "nuw", "duj"}, + {"innayr", "bṛayṛ", "maṛṣ", "ibrir", "mayyu", "yunyu", "yulyuz", "ɣuct", "cutanbir", "ktubr", "nuwanbir", "dujanbir"}, + {"tifawt", "tadggʷat"}, +} + +var localeTableShiLatnMA = [5][]string{ + {"asa", "ayn", "asi", "akṛ", "akw", "asim", "asiḍ"}, + {"asamas", "aynas", "asinas", "akṛas", "akwas", "asimwas", "asiḍyas"}, + {"inn", "bṛa", "maṛ", "ibr", "may", "yun", "yul", "ɣuc", "cut", "ktu", "nuw", "duj"}, + {"innayr", "bṛayṛ", "maṛṣ", "ibrir", "mayyu", "yunyu", "yulyuz", "ɣuct", "cutanbir", "ktubr", "nuwanbir", "dujanbir"}, + {"tifawt", "tadggʷat"}, +} + +var localeTableShiTfng = [5][]string{ + {"ⴰⵙⴰ", "ⴰⵢⵏ", "ⴰⵙⵉ", "ⴰⴽⵕ", "ⴰⴽⵡ", "ⴰⵙⵉⵎ", "ⴰⵙⵉⴹ"}, + {"ⴰⵙⴰⵎⴰⵙ", "ⴰⵢⵏⴰⵙ", "ⴰⵙⵉⵏⴰⵙ", "ⴰⴽⵕⴰⵙ", "ⴰⴽⵡⴰⵙ", "ⵙⵉⵎⵡⴰⵙ", "ⴰⵙⵉⴹⵢⴰⵙ"}, + {"ⵉⵏⵏ", "ⴱⵕⴰ", "ⵎⴰⵕ", "ⵉⴱⵔ", "ⵎⴰⵢ", "ⵢⵓⵏ", "ⵢⵓⵍ", "ⵖⵓⵛ", "ⵛⵓⵜ", "ⴽⵜⵓ", "ⵏⵓⵡ", "ⴷⵓⵊ"}, + {"ⵉⵏⵏⴰⵢⵔ", "ⴱⵕⴰⵢⵕ", "ⵎⴰⵕⵚ", "ⵉⴱⵔⵉⵔ", "ⵎⴰⵢⵢⵓ", "ⵢⵓⵏⵢⵓ", "ⵢⵓⵍⵢⵓⵣ", "ⵖⵓⵛⵜ", "ⵛⵓⵜⴰⵏⴱⵉⵔ", "ⴽⵜⵓⴱⵔ", "ⵏⵓⵡⴰⵏⴱⵉⵔ", "ⴷⵓⵊⴰⵏⴱⵉⵔ"}, + {"ⵜⵉⴼⴰⵡⵜ", "ⵜⴰⴷⴳⴳⵯⴰⵜ"}, +} + +var localeTableShiTfngMA = [5][]string{ + {"ⴰⵙⴰ", "ⴰⵢⵏ", "ⴰⵙⵉ", "ⴰⴽⵕ", "ⴰⴽⵡ", "ⴰⵙⵉⵎ", "ⴰⵙⵉⴹ"}, + {"ⴰⵙⴰⵎⴰⵙ", "ⴰⵢⵏⴰⵙ", "ⴰⵙⵉⵏⴰⵙ", "ⴰⴽⵕⴰⵙ", "ⴰⴽⵡⴰⵙ", "ⵙⵉⵎⵡⴰⵙ", "ⴰⵙⵉⴹⵢⴰⵙ"}, + {"ⵉⵏⵏ", "ⴱⵕⴰ", "ⵎⴰⵕ", "ⵉⴱⵔ", "ⵎⴰⵢ", "ⵢⵓⵏ", "ⵢⵓⵍ", "ⵖⵓⵛ", "ⵛⵓⵜ", "ⴽⵜⵓ", "ⵏⵓⵡ", "ⴷⵓⵊ"}, + {"ⵉⵏⵏⴰⵢⵔ", "ⴱⵕⴰⵢⵕ", "ⵎⴰⵕⵚ", "ⵉⴱⵔⵉⵔ", "ⵎⴰⵢⵢⵓ", "ⵢⵓⵏⵢⵓ", "ⵢⵓⵍⵢⵓⵣ", "ⵖⵓⵛⵜ", "ⵛⵓⵜⴰⵏⴱⵉⵔ", "ⴽⵜⵓⴱⵔ", "ⵏⵓⵡⴰⵏⴱⵉⵔ", "ⴷⵓⵊⴰⵏⴱⵉⵔ"}, + {"ⵜⵉⴼⴰⵡⵜ", "ⵜⴰⴷⴳⴳⵯⴰⵜ"}, +} + +var localeTableSi = [5][]string{ + {"ඉරිදා", "සඳුදා", "අඟහ", "බදාදා", "බ්‍රහස්", "සිකු", "සෙන"}, + {"ඉරිදා", "සඳුදා", "අඟහරුවාදා", "බදාදා", "බ්‍රහස්පතින්දා", "සිකුරාදා", "සෙනසුරාදා"}, + {"ජන", "පෙබ", "මාර්තු", "අප්‍රේල්", "මැයි", "ජූනි", "ජූලි", "අගෝ", "සැප්", "ඔක්", "නොවැ", "දෙසැ"}, + {"ජනවාරි", "පෙබරවාරි", "මාර්තු", "අප්‍රේල්", "මැයි", "ජූනි", "ජූලි", "අගෝස්තු", "සැප්තැම්බර්", "ඔක්තෝබර්", "නොවැම්බර්", "දෙසැම්බර්"}, + {"පෙ.ව.", "ප.ව."}, +} + +var localeTableSiLK = [5][]string{ + {"ඉරිදා", "සඳුදා", "අඟහ", "බදාදා", "බ්‍රහස්", "සිකු", "සෙන"}, + {"ඉරිදා", "සඳුදා", "අඟහරුවාදා", "බදාදා", "බ්‍රහස්පතින්දා", "සිකුරාදා", "සෙනසුරාදා"}, + {"ජන", "පෙබ", "මාර්තු", "අප්‍රේල්", "මැයි", "ජූනි", "ජූලි", "අගෝ", "සැප්", "ඔක්", "නොවැ", "දෙසැ"}, + {"ජනවාරි", "පෙබරවාරි", "මාර්තු", "අප්‍රේල්", "මැයි", "ජූනි", "ජූලි", "අගෝස්තු", "සැප්තැම්බර්", "ඔක්තෝබර්", "නොවැම්බර්", "දෙසැම්බර්"}, + {"පෙ.ව.", "ප.ව."}, +} + +var localeTableSid = [5][]string{ + {"Sam", "San", "Mak", "Row", "Ham", "Arb", "Qid"}, + {"Sambata", "Sanyo", "Maakisanyo", "Roowe", "Hamuse", "Arbe", "Qidaame"}, + {}, + {}, + {"soodo", "hawwaro"}, +} + +var localeTableSidET = [5][]string{ + {"Sam", "San", "Mak", "Row", "Ham", "Arb", "Qid"}, + {"Sambata", "Sanyo", "Maakisanyo", "Roowe", "Hamuse", "Arbe", "Qidaame"}, + {}, + {}, + {"soodo", "hawwaro"}, +} + +var localeTableSk = [5][]string{ + {"ne", "po", "ut", "st", "št", "pi", "so"}, + {"nedeľa", "pondelok", "utorok", "streda", "štvrtok", "piatok", "sobota"}, + {"jan", "feb", "mar", "apr", "máj", "jún", "júl", "aug", "sep", "okt", "nov", "dec"}, + {"januára", "februára", "marca", "apríla", "mája", "júna", "júla", "augusta", "septembra", "októbra", "novembra", "decembra"}, + {}, +} + +var localeTableSkSK = [5][]string{ + {"ne", "po", "ut", "st", "št", "pi", "so"}, + {"nedeľa", "pondelok", "utorok", "streda", "štvrtok", "piatok", "sobota"}, + {"jan", "feb", "mar", "apr", "máj", "jún", "júl", "aug", "sep", "okt", "nov", "dec"}, + {"januára", "februára", "marca", "apríla", "mája", "júna", "júla", "augusta", "septembra", "októbra", "novembra", "decembra"}, + {}, +} + +var localeTableSkr = [5][]string{ + {}, + {"اتوار", "سوموار", "منگل", "ٻدھ", "خمیس", "جمعہ", "چھݨ چھݨ"}, + {}, + {"جنوری", "فروری", "مارچ", "اپریل", "مئی", "جون", "جولائی", "اگست", "ستمبر", "اکتوبر", "نومبر", "دسمبر"}, + {}, +} + +var localeTableSkrPK = [5][]string{ + {}, + {"اتوار", "سوموار", "منگل", "ٻدھ", "خمیس", "جمعہ", "چھݨ چھݨ"}, + {}, + {"جنوری", "فروری", "مارچ", "اپریل", "مئی", "جون", "جولائی", "اگست", "ستمبر", "اکتوبر", "نومبر", "دسمبر"}, + {}, +} + +var localeTableSl = [5][]string{ + {"ned.", "pon.", "tor.", "sre.", "čet.", "pet.", "sob."}, + {"nedelja", "ponedeljek", "torek", "sreda", "četrtek", "petek", "sobota"}, + {"jan.", "feb.", "mar.", "apr.", "maj", "jun.", "jul.", "avg.", "sep.", "okt.", "nov.", "dec."}, + {"januar", "februar", "marec", "april", "maj", "junij", "julij", "avgust", "september", "oktober", "november", "december"}, + {"dop.", "pop."}, +} + +var localeTableSlSI = [5][]string{ + {"ned.", "pon.", "tor.", "sre.", "čet.", "pet.", "sob."}, + {"nedelja", "ponedeljek", "torek", "sreda", "četrtek", "petek", "sobota"}, + {"jan.", "feb.", "mar.", "apr.", "maj", "jun.", "jul.", "avg.", "sep.", "okt.", "nov.", "dec."}, + {"januar", "februar", "marec", "april", "maj", "junij", "julij", "avgust", "september", "oktober", "november", "december"}, + {"dop.", "pop."}, +} + +var localeTableSmn = [5][]string{ + {"pas", "vuo", "maj", "kos", "tuo", "vás", "láv"}, + {"pasepeeivi", "vuossaargâ", "majebaargâ", "koskoho", "tuorâstuv", "vástuppeeivi", "lávurduv"}, + {"uđiv", "kuovâ", "njuhčâ", "cuáŋui", "vyesi", "kesi", "syeini", "porge", "čohčâ", "roovvâd", "skammâ", "juovlâ"}, + {"uđđâivemáánu", "kuovâmáánu", "njuhčâmáánu", "cuáŋuimáánu", "vyesimáánu", "kesimáánu", "syeinimáánu", "porgemáánu", "čohčâmáánu", "roovvâdmáánu", "skammâmáánu", "juovlâmáánu"}, + {"ip.", "ep."}, +} + +var localeTableSmnFI = [5][]string{ + {"pas", "vuo", "maj", "kos", "tuo", "vás", "láv"}, + {"pasepeeivi", "vuossaargâ", "majebaargâ", "koskoho", "tuorâstuv", "vástuppeeivi", "lávurduv"}, + {"uđiv", "kuovâ", "njuhčâ", "cuáŋui", "vyesi", "kesi", "syeini", "porge", "čohčâ", "roovvâd", "skammâ", "juovlâ"}, + {"uđđâivemáánu", "kuovâmáánu", "njuhčâmáánu", "cuáŋuimáánu", "vyesimáánu", "kesimáánu", "syeinimáánu", "porgemáánu", "čohčâmáánu", "roovvâdmáánu", "skammâmáánu", "juovlâmáánu"}, + {"ip.", "ep."}, +} + +var localeTableSn = [5][]string{ + {"Svo", "Muv", "Chp", "Cht", "Chn", "Chs", "Mug"}, + {"Svondo", "Muvhuro", "Chipiri", "Chitatu", "China", "Chishanu", "Mugovera"}, + {"Ndi", "Kuk", "Kur", "Kub", "Chv", "Chk", "Chg", "Nya", "Gun", "Gum", "Mbu", "Zvi"}, + {"Ndira", "Kukadzi", "Kurume", "Kubvumbi", "Chivabvu", "Chikumi", "Chikunguru", "Nyamavhuvhu", "Gunyana", "Gumiguru", "Mbudzi", "Zvita"}, + {"a", "p"}, +} + +var localeTableSnZW = [5][]string{ + {"Svo", "Muv", "Chp", "Cht", "Chn", "Chs", "Mug"}, + {"Svondo", "Muvhuro", "Chipiri", "Chitatu", "China", "Chishanu", "Mugovera"}, + {"Ndi", "Kuk", "Kur", "Kub", "Chv", "Chk", "Chg", "Nya", "Gun", "Gum", "Mbu", "Zvi"}, + {"Ndira", "Kukadzi", "Kurume", "Kubvumbi", "Chivabvu", "Chikumi", "Chikunguru", "Nyamavhuvhu", "Gunyana", "Gumiguru", "Mbudzi", "Zvita"}, + {"a", "p"}, +} + +var localeTableSo = [5][]string{ + {"Axd", "Isn", "Tldo", "Arbc", "Khms", "Jmc", "Sbti"}, + {"Axad", "Isniin", "Talaado", "Arbaco", "Khamiis", "Jimco", "Sabti"}, + {"Jan", "Feb", "Mar", "Abr", "May", "Jun", "Lul", "Ogs", "Seb", "Okt", "Nof", "Dis"}, + {"Bisha Koobaad", "Bisha Labaad", "Bisha Saddexaad", "Bisha Afraad", "Bisha Shanaad", "Bisha Lixaad", "Bisha Todobaad", "Bisha Sideedaad", "Bisha Sagaalaad", "Bisha Tobnaad", "Bisha Kow iyo Tobnaad", "Bisha Laba iyo Tobnaad"}, + {"GH", "GD"}, +} + +var localeTableSoDJ = [5][]string{ + {"Axd", "Isn", "Tldo", "Arbc", "Khms", "Jmc", "Sbti"}, + {"Axad", "Isniin", "Talaado", "Arbaco", "Khamiis", "Jimco", "Sabti"}, + {"Jan", "Feb", "Mar", "Abr", "May", "Jun", "Lul", "Ogs", "Seb", "Okt", "Nof", "Dis"}, + {"Bisha Koobaad", "Bisha Labaad", "Bisha Saddexaad", "Bisha Afraad", "Bisha Shanaad", "Bisha Lixaad", "Bisha Todobaad", "Bisha Sideedaad", "Bisha Sagaalaad", "Bisha Tobnaad", "Bisha Kow iyo Tobnaad", "Bisha Laba iyo Tobnaad"}, + {"GH", "GD"}, +} + +var localeTableSoET = [5][]string{ + {"Axd", "Isn", "Tldo", "Arbc", "Khms", "Jmc", "Sbti"}, + {"Axad", "Isniin", "Talaado", "Arbaco", "Khamiis", "Jimco", "Sabti"}, + {"Jan", "Feb", "Mar", "Abr", "May", "Jun", "Lul", "Ogs", "Seb", "Okt", "Nof", "Dis"}, + {"Bisha Koobaad", "Bisha Labaad", "Bisha Saddexaad", "Bisha Afraad", "Bisha Shanaad", "Bisha Lixaad", "Bisha Todobaad", "Bisha Sideedaad", "Bisha Sagaalaad", "Bisha Tobnaad", "Bisha Kow iyo Tobnaad", "Bisha Laba iyo Tobnaad"}, + {"GH", "GD"}, +} + +var localeTableSoKE = [5][]string{ + {"Axd", "Isn", "Tldo", "Arbc", "Khms", "Jmc", "Sbti"}, + {"Axad", "Isniin", "Talaado", "Arbaco", "Khamiis", "Jimco", "Sabti"}, + {"Jan", "Feb", "Mar", "Abr", "May", "Jun", "Lul", "Ogs", "Seb", "Okt", "Nof", "Dis"}, + {"Bisha Koobaad", "Bisha Labaad", "Bisha Saddexaad", "Bisha Afraad", "Bisha Shanaad", "Bisha Lixaad", "Bisha Todobaad", "Bisha Sideedaad", "Bisha Sagaalaad", "Bisha Tobnaad", "Bisha Kow iyo Tobnaad", "Bisha Laba iyo Tobnaad"}, + {"GH", "GD"}, +} + +var localeTableSoSO = [5][]string{ + {"Axd", "Isn", "Tldo", "Arbc", "Khms", "Jmc", "Sbti"}, + {"Axad", "Isniin", "Talaado", "Arbaco", "Khamiis", "Jimco", "Sabti"}, + {"Jan", "Feb", "Mar", "Abr", "May", "Jun", "Lul", "Ogs", "Seb", "Okt", "Nof", "Dis"}, + {"Bisha Koobaad", "Bisha Labaad", "Bisha Saddexaad", "Bisha Afraad", "Bisha Shanaad", "Bisha Lixaad", "Bisha Todobaad", "Bisha Sideedaad", "Bisha Sagaalaad", "Bisha Tobnaad", "Bisha Kow iyo Tobnaad", "Bisha Laba iyo Tobnaad"}, + {"GH", "GD"}, +} + +var localeTableSq = [5][]string{ + {"die", "hën", "mar", "mër", "enj", "pre", "sht"}, + {"e diel", "e hënë", "e martë", "e mërkurë", "e enjte", "e premte", "e shtunë"}, + {"jan", "shk", "mar", "pri", "maj", "qer", "korr", "gush", "sht", "tet", "nën", "dhj"}, + {"janar", "shkurt", "mars", "prill", "maj", "qershor", "korrik", "gusht", "shtator", "tetor", "nëntor", "dhjetor"}, + {"p.d.", "m.d."}, +} + +var localeTableSqAL = [5][]string{ + {"die", "hën", "mar", "mër", "enj", "pre", "sht"}, + {"e diel", "e hënë", "e martë", "e mërkurë", "e enjte", "e premte", "e shtunë"}, + {"jan", "shk", "mar", "pri", "maj", "qer", "korr", "gush", "sht", "tet", "nën", "dhj"}, + {"janar", "shkurt", "mars", "prill", "maj", "qershor", "korrik", "gusht", "shtator", "tetor", "nëntor", "dhjetor"}, + {"p.d.", "m.d."}, +} + +var localeTableSqMK = [5][]string{ + {"die", "hën", "mar", "mër", "enj", "pre", "sht"}, + {"e diel", "e hënë", "e martë", "e mërkurë", "e enjte", "e premte", "e shtunë"}, + {"jan", "shk", "mar", "pri", "maj", "qer", "korr", "gush", "sht", "tet", "nën", "dhj"}, + {"janar", "shkurt", "mars", "prill", "maj", "qershor", "korrik", "gusht", "shtator", "tetor", "nëntor", "dhjetor"}, + {"p.d.", "m.d."}, +} + +var localeTableSqXK = [5][]string{ + {"die", "hën", "mar", "mër", "enj", "pre", "sht"}, + {"e diel", "e hënë", "e martë", "e mërkurë", "e enjte", "e premte", "e shtunë"}, + {"jan", "shk", "mar", "pri", "maj", "qer", "korr", "gush", "sht", "tet", "nën", "dhj"}, + {"janar", "shkurt", "mars", "prill", "maj", "qershor", "korrik", "gusht", "shtator", "tetor", "nëntor", "dhjetor"}, + {"p.d.", "m.d."}, +} + +var localeTableSr = [5][]string{ + {"нед", "пон", "уто", "сре", "чет", "пет", "суб"}, + {"недеља", "понедељак", "уторак", "среда", "четвртак", "петак", "субота"}, + {"јан", "феб", "мар", "апр", "мај", "јун", "јул", "авг", "сеп", "окт", "нов", "дец"}, + {"јануар", "фебруар", "март", "април", "мај", "јун", "јул", "август", "септембар", "октобар", "новембар", "децембар"}, + {}, +} + +var localeTableSrCyrl = [5][]string{ + {"нед", "пон", "уто", "сре", "чет", "пет", "суб"}, + {"недеља", "понедељак", "уторак", "среда", "четвртак", "петак", "субота"}, + {"јан", "феб", "мар", "апр", "мај", "јун", "јул", "авг", "сеп", "окт", "нов", "дец"}, + {"јануар", "фебруар", "март", "април", "мај", "јун", "јул", "август", "септембар", "октобар", "новембар", "децембар"}, + {}, +} + +var localeTableSrCyrlBA = [5][]string{ + {"нед", "пон", "уто", "сри", "чет", "пет", "суб"}, + {"недјеља", "понедјељак", "уторак", "сриједа", "четвртак", "петак", "субота"}, + {"јан", "феб", "мар", "апр", "мај", "јун", "јул", "авг", "сеп", "окт", "нов", "дец"}, + {"јануар", "фебруар", "март", "април", "мај", "јун", "јул", "август", "септембар", "октобар", "новембар", "децембар"}, + {"пријеподне", "поподне"}, +} + +var localeTableSrCyrlME = [5][]string{ + {"нед", "пон", "уто", "сре", "чет", "пет", "суб"}, + {"недјеља", "понедељак", "уторак", "сриједа", "четвртак", "петак", "субота"}, + {"јан", "феб", "март", "апр", "мај", "јун", "јул", "авг", "септ", "окт", "нов", "дец"}, + {"јануар", "фебруар", "март", "април", "мај", "јун", "јул", "август", "септембар", "октобар", "новембар", "децембар"}, + {"пријеподне", "поподне"}, +} + +var localeTableSrCyrlRS = [5][]string{ + {"нед", "пон", "уто", "сре", "чет", "пет", "суб"}, + {"недеља", "понедељак", "уторак", "среда", "четвртак", "петак", "субота"}, + {"јан", "феб", "мар", "апр", "мај", "јун", "јул", "авг", "сеп", "окт", "нов", "дец"}, + {"јануар", "фебруар", "март", "април", "мај", "јун", "јул", "август", "септембар", "октобар", "новембар", "децембар"}, + {}, +} + +var localeTableSrCyrlXK = [5][]string{ + {"нед", "пон", "уто", "сре", "чет", "пет", "суб"}, + {"недеља", "понедељак", "уторак", "среда", "четвртак", "петак", "субота"}, + {"јан", "феб", "март", "апр", "мај", "јун", "јул", "авг", "септ", "окт", "нов", "дец"}, + {"јануар", "фебруар", "март", "април", "мај", "јун", "јул", "август", "септембар", "октобар", "новембар", "децембар"}, + {}, +} + +var localeTableSrLatn = [5][]string{ + {"ned", "pon", "uto", "sre", "čet", "pet", "sub"}, + {"nedelja", "ponedeljak", "utorak", "sreda", "četvrtak", "petak", "subota"}, + {"jan", "feb", "mar", "apr", "maj", "jun", "jul", "avg", "sep", "okt", "nov", "dec"}, + {"januar", "februar", "mart", "april", "maj", "jun", "jul", "avgust", "septembar", "oktobar", "novembar", "decembar"}, + {}, +} + +var localeTableSrLatnBA = [5][]string{ + {"ned", "pon", "uto", "sri", "čet", "pet", "sub"}, + {"nedjelja", "ponedjeljak", "utorak", "srijeda", "četvrtak", "petak", "subota"}, + {"jan", "feb", "mar", "apr", "maj", "jun", "jul", "avg", "sep", "okt", "nov", "dec"}, + {"januar", "februar", "mart", "april", "maj", "jun", "jul", "avgust", "septembar", "oktobar", "novembar", "decembar"}, + {"prijepodne", "popodne"}, +} + +var localeTableSrLatnME = [5][]string{ + {"ned", "pon", "uto", "sre", "čet", "pet", "sub"}, + {"nedjelja", "ponedeljak", "utorak", "srijeda", "četvrtak", "petak", "subota"}, + {"jan", "feb", "mart", "apr", "maj", "jun", "jul", "avg", "sept", "okt", "nov", "dec"}, + {"januar", "februar", "mart", "april", "maj", "jun", "jul", "avgust", "septembar", "oktobar", "novembar", "decembar"}, + {"prijepodne", "popodne"}, +} + +var localeTableSrLatnRS = [5][]string{ + {"ned", "pon", "uto", "sre", "čet", "pet", "sub"}, + {"nedelja", "ponedeljak", "utorak", "sreda", "četvrtak", "petak", "subota"}, + {"jan", "feb", "mar", "apr", "maj", "jun", "jul", "avg", "sep", "okt", "nov", "dec"}, + {"januar", "februar", "mart", "april", "maj", "jun", "jul", "avgust", "septembar", "oktobar", "novembar", "decembar"}, + {}, +} + +var localeTableSrLatnXK = [5][]string{ + {"ned", "pon", "uto", "sre", "čet", "pet", "sub"}, + {"nedelja", "ponedeljak", "utorak", "sreda", "četvrtak", "petak", "subota"}, + {"jan", "feb", "mart", "apr", "maj", "jun", "jul", "avg", "sept", "okt", "nov", "dec"}, + {"januar", "februar", "mart", "april", "maj", "jun", "jul", "avgust", "septembar", "oktobar", "novembar", "decembar"}, + {}, +} + +var localeTableSs = [5][]string{ + {"Son", "Mso", "Bil", "Tsa", "Ne", "Hla", "Mgc"}, + {"Lisontfo", "uMsombuluko", "Lesibili", "Lesitsatfu", "Lesine", "Lesihlanu", "uMgcibelo"}, + {"Bhi", "Van", "Vol", "Mab", "Nkh", "Nhl", "Kho", "Ngc", "Nyo", "Mph", "Lwe", "Ngo"}, + {"Bhimbidvwane", "iNdlovana", "iNdlovu-lenkhulu", "Mabasa", "iNkhwekhweti", "iNhlaba", "Kholwane", "iNgci", "iNyoni", "iMphala", "Lweti", "iNgongoni"}, + {}, +} + +var localeTableSsSZ = [5][]string{ + {"Son", "Mso", "Bil", "Tsa", "Ne", "Hla", "Mgc"}, + {"Lisontfo", "uMsombuluko", "Lesibili", "Lesitsatfu", "Lesine", "Lesihlanu", "uMgcibelo"}, + {"Bhi", "Van", "Vol", "Mab", "Nkh", "Nhl", "Kho", "Ngc", "Nyo", "Mph", "Lwe", "Ngo"}, + {"Bhimbidvwane", "iNdlovana", "iNdlovu-lenkhulu", "Mabasa", "iNkhwekhweti", "iNhlaba", "Kholwane", "iNgci", "iNyoni", "iMphala", "Lweti", "iNgongoni"}, + {}, +} + +var localeTableSsZA = [5][]string{ + {"Son", "Mso", "Bil", "Tsa", "Ne", "Hla", "Mgc"}, + {"Lisontfo", "uMsombuluko", "Lesibili", "Lesitsatfu", "Lesine", "Lesihlanu", "uMgcibelo"}, + {"Bhi", "Van", "Vol", "Mab", "Nkh", "Nhl", "Kho", "Ngc", "Nyo", "Mph", "Lwe", "Ngo"}, + {"Bhimbidvwane", "iNdlovana", "iNdlovu-lenkhulu", "Mabasa", "iNkhwekhweti", "iNhlaba", "Kholwane", "iNgci", "iNyoni", "iMphala", "Lweti", "iNgongoni"}, + {}, +} + +var localeTableSsy = [5][]string{ + {"Nab", "San", "Sal", "Rab", "Cam", "Jum", "Qun"}, + {"Naba Sambat", "Sani", "Salus", "Rabuq", "Camus", "Jumqata", "Qunxa Sambat"}, + {"Qun", "Nah", "Cig", "Agd", "Cax", "Qas", "Qad", "Leq", "Way", "Dit", "Xim", "Kax"}, + {"Qunxa Garablu", "Kudo", "Ciggilta Kudo", "Agda Baxis", "Caxah Alsa", "Qasa Dirri", "Qado Dirri", "Liiqen", "Waysu", "Diteli", "Ximoli", "Kaxxa Garablu"}, + {"saaku", "carra"}, +} + +var localeTableSsyER = [5][]string{ + {"Nab", "San", "Sal", "Rab", "Cam", "Jum", "Qun"}, + {"Naba Sambat", "Sani", "Salus", "Rabuq", "Camus", "Jumqata", "Qunxa Sambat"}, + {"Qun", "Nah", "Cig", "Agd", "Cax", "Qas", "Qad", "Leq", "Way", "Dit", "Xim", "Kax"}, + {"Qunxa Garablu", "Kudo", "Ciggilta Kudo", "Agda Baxis", "Caxah Alsa", "Qasa Dirri", "Qado Dirri", "Liiqen", "Waysu", "Diteli", "Ximoli", "Kaxxa Garablu"}, + {"saaku", "carra"}, +} + +var localeTableSt = [5][]string{ + {"Son", "Mma", "Bed", "Rar", "Ne", "Hla", "Moq"}, + {"Sontaha", "Mmantaha", "Labobedi", "Laboraru", "Labone", "Labohlane", "Moqebelo"}, + {"Phe", "Kol", "Ube", "Mme", "Mot", "Jan", "Upu", "Pha", "Leo", "Mph", "Pun", "Tsh"}, + {"Phesekgong", "Hlakola", "Hlakubele", "Mmese", "Motsheanong", "Phupjane", "Phupu", "Phata", "Leotshe", "Mphalane", "Pundungwane", "Tshitwe"}, + {}, +} + +var localeTableStLS = [5][]string{ + {"Son", "Mma", "Bed", "Rar", "Ne", "Hla", "Moq"}, + {"Sontaha", "Mmantaha", "Labobedi", "Laboraru", "Labone", "Labohlane", "Moqebelo"}, + {"Phe", "Kol", "Ube", "Mme", "Mot", "Jan", "Upu", "Pha", "Leo", "Mph", "Pun", "Tsh"}, + {"Phesekgong", "Hlakola", "Hlakubele", "Mmese", "Motsheanong", "Phupjane", "Phupu", "Phata", "Leotshe", "Mphalane", "Pundungwane", "Tshitwe"}, + {}, +} + +var localeTableStZA = [5][]string{ + {"Son", "Mma", "Bed", "Rar", "Ne", "Hla", "Moq"}, + {"Sontaha", "Mmantaha", "Labobedi", "Laboraru", "Labone", "Labohlane", "Moqebelo"}, + {"Phe", "Kol", "Ube", "Mme", "Mot", "Jan", "Upu", "Pha", "Leo", "Mph", "Pun", "Tsh"}, + {"Phesekgong", "Hlakola", "Hlakubele", "Mmese", "Motsheanong", "Phupjane", "Phupu", "Phata", "Leotshe", "Mphalane", "Pundungwane", "Tshitwe"}, + {}, +} + +var localeTableSu = [5][]string{ + {"Mng", "Sen", "Sal", "Reb", "Kem", "Jum", "Sap"}, + {"Minggu", "Senén", "Salasa", "Rebo", "Kemis", "Jumaah", "Saptu"}, + {"Jan", "Péb", "Mar", "Apr", "Méi", "Jun", "Jul", "Ags", "Sép", "Okt", "Nop", "Dés"}, + {"Januari", "Pébruari", "Maret", "April", "Méi", "Juni", "Juli", "Agustus", "Séptémber", "Oktober", "Nopémber", "Désémber"}, + {}, +} + +var localeTableSuLatn = [5][]string{ + {"Mng", "Sen", "Sal", "Reb", "Kem", "Jum", "Sap"}, + {"Minggu", "Senén", "Salasa", "Rebo", "Kemis", "Jumaah", "Saptu"}, + {"Jan", "Péb", "Mar", "Apr", "Méi", "Jun", "Jul", "Ags", "Sép", "Okt", "Nop", "Dés"}, + {"Januari", "Pébruari", "Maret", "April", "Méi", "Juni", "Juli", "Agustus", "Séptémber", "Oktober", "Nopémber", "Désémber"}, + {}, +} + +var localeTableSuLatnID = [5][]string{ + {"Mng", "Sen", "Sal", "Reb", "Kem", "Jum", "Sap"}, + {"Minggu", "Senén", "Salasa", "Rebo", "Kemis", "Jumaah", "Saptu"}, + {"Jan", "Péb", "Mar", "Apr", "Méi", "Jun", "Jul", "Ags", "Sép", "Okt", "Nop", "Dés"}, + {"Januari", "Pébruari", "Maret", "April", "Méi", "Juni", "Juli", "Agustus", "Séptémber", "Oktober", "Nopémber", "Désémber"}, + {}, +} + +var localeTableSv = [5][]string{ + {"sön", "mån", "tis", "ons", "tors", "fre", "lör"}, + {"söndag", "måndag", "tisdag", "onsdag", "torsdag", "fredag", "lördag"}, + {"jan.", "feb.", "mars", "apr.", "maj", "juni", "juli", "aug.", "sep.", "okt.", "nov.", "dec."}, + {"januari", "februari", "mars", "april", "maj", "juni", "juli", "augusti", "september", "oktober", "november", "december"}, + {"fm", "em"}, +} + +var localeTableSvAX = [5][]string{ + {"sön", "mån", "tis", "ons", "tors", "fre", "lör"}, + {"söndag", "måndag", "tisdag", "onsdag", "torsdag", "fredag", "lördag"}, + {"jan.", "feb.", "mars", "apr.", "maj", "juni", "juli", "aug.", "sep.", "okt.", "nov.", "dec."}, + {"januari", "februari", "mars", "april", "maj", "juni", "juli", "augusti", "september", "oktober", "november", "december"}, + {"fm", "em"}, +} + +var localeTableSvFI = [5][]string{ + {"sön", "mån", "tis", "ons", "tors", "fre", "lör"}, + {"söndag", "måndag", "tisdag", "onsdag", "torsdag", "fredag", "lördag"}, + {"jan.", "feb.", "mars", "apr.", "maj", "juni", "juli", "aug.", "sep.", "okt.", "nov.", "dec."}, + {"januari", "februari", "mars", "april", "maj", "juni", "juli", "augusti", "september", "oktober", "november", "december"}, + {"fm", "em"}, +} + +var localeTableSvSE = [5][]string{ + {"sön", "mån", "tis", "ons", "tors", "fre", "lör"}, + {"söndag", "måndag", "tisdag", "onsdag", "torsdag", "fredag", "lördag"}, + {"jan.", "feb.", "mars", "apr.", "maj", "juni", "juli", "aug.", "sep.", "okt.", "nov.", "dec."}, + {"januari", "februari", "mars", "april", "maj", "juni", "juli", "augusti", "september", "oktober", "november", "december"}, + {"fm", "em"}, +} + +var localeTableSw = [5][]string{ + {}, + {"Jumapili", "Jumatatu", "Jumanne", "Jumatano", "Alhamisi", "Ijumaa", "Jumamosi"}, + {"Jan", "Feb", "Mac", "Apr", "Mei", "Jun", "Jul", "Ago", "Sep", "Okt", "Nov", "Des"}, + {"Januari", "Februari", "Machi", "Aprili", "Mei", "Juni", "Julai", "Agosti", "Septemba", "Oktoba", "Novemba", "Desemba"}, + {"am", "pm"}, +} + +var localeTableSwCD = [5][]string{ + {}, + {"Jumapili", "Jumatatu", "Jumanne", "Jumatano", "Alhamisi", "Ijumaa", "Jumamosi"}, + {"Jan", "Feb", "Mac", "Apr", "Mei", "Jun", "Jul", "Ago", "Sep", "Okt", "Nov", "Des"}, + {"Januari", "Februari", "Machi", "Aprili", "Mei", "Juni", "Julai", "Agosti", "Septemba", "Oktoba", "Novemba", "Desemba"}, + {"am", "pm"}, +} + +var localeTableSwKE = [5][]string{ + {}, + {"Jumapili", "Jumatatu", "Jumanne", "Jumatano", "Alhamisi", "Ijumaa", "Jumamosi"}, + {"Jan", "Feb", "Mac", "Apr", "Mei", "Jun", "Jul", "Ago", "Sep", "Okt", "Nov", "Des"}, + {"Januari", "Februari", "Machi", "Aprili", "Mei", "Juni", "Julai", "Agosti", "Septemba", "Oktoba", "Novemba", "Desemba"}, + {"am", "pm"}, +} + +var localeTableSwTZ = [5][]string{ + {}, + {"Jumapili", "Jumatatu", "Jumanne", "Jumatano", "Alhamisi", "Ijumaa", "Jumamosi"}, + {"Jan", "Feb", "Mac", "Apr", "Mei", "Jun", "Jul", "Ago", "Sep", "Okt", "Nov", "Des"}, + {"Januari", "Februari", "Machi", "Aprili", "Mei", "Juni", "Julai", "Agosti", "Septemba", "Oktoba", "Novemba", "Desemba"}, + {"am", "pm"}, +} + +var localeTableSwUG = [5][]string{ + {}, + {"Jumapili", "Jumatatu", "Jumanne", "Jumatano", "Alhamisi", "Ijumaa", "Jumamosi"}, + {"Jan", "Feb", "Mac", "Apr", "Mei", "Jun", "Jul", "Ago", "Sep", "Okt", "Nov", "Des"}, + {"Januari", "Februari", "Machi", "Aprili", "Mei", "Juni", "Julai", "Agosti", "Septemba", "Oktoba", "Novemba", "Desemba"}, + {"am", "pm"}, +} + +var localeTableSyr = [5][]string{ + {"ܚܕ", "ܬܪܝܢ", "ܬܠܬ", "ܐܪܒܥ", "ܚܡܫ", "ܥܪܘ", "ܫܒܬܐ"}, + {"ܚܕܒܫܒܐ", "ܬܪܝܢܒܫܒܐ", "ܬܠܬܒܫܒܐ", "ܐܪܒܥܒܫܒܐ", "ܚܡܫܒܫܒܐ", "ܥܪܘܒܬܐ", "ܫܒܬܐ"}, + {"ܟܢܘܢ ܒ", "ܫܒܛ", "ܐܕܪ", "ܢܝܣܢ", "ܐܝܪ", "ܚܙܝܪܢ", "ܬܡܘܙ", "ܐܒ", "ܐܝܠܘܠ", "ܬܫܪܝܢ ܐ", "ܬܫܪܝܢ ܒ", "ܟܢܘܢ ܐ"}, + {"ܟܢܘܢ ܐܚܪܝܐ", "ܫܒܛ", "ܐܕܪ", "ܢܝܣܢ", "ܐܝܪ", "ܚܙܝܪܢ", "ܬܡܘܙ", "ܐܒ", "ܐܝܠܘܠ", "ܬܫܪܝܢ ܩܕܡܝܐ", "ܬܫܪܝܢ ܐܚܪܝܐ", "ܟܢܘܢ ܩܕܡܝܐ"}, + {"܏ܩܛ‌", "܏ܒܛ‌"}, +} + +var localeTableSyrIQ = [5][]string{ + {"ܚܕ", "ܬܪܝܢ", "ܬܠܬ", "ܐܪܒܥ", "ܚܡܫ", "ܥܪܘ", "ܫܒܬܐ"}, + {"ܚܕܒܫܒܐ", "ܬܪܝܢܒܫܒܐ", "ܬܠܬܒܫܒܐ", "ܐܪܒܥܒܫܒܐ", "ܚܡܫܒܫܒܐ", "ܥܪܘܒܬܐ", "ܫܒܬܐ"}, + {"ܟܢܘܢ ܒ", "ܫܒܛ", "ܐܕܪ", "ܢܝܣܢ", "ܐܝܪ", "ܚܙܝܪܢ", "ܬܡܘܙ", "ܐܒ", "ܐܝܠܘܠ", "ܬܫܪܝܢ ܐ", "ܬܫܪܝܢ ܒ", "ܟܢܘܢ ܐ"}, + {"ܟܢܘܢ ܐܚܪܝܐ", "ܫܒܛ", "ܐܕܪ", "ܢܝܣܢ", "ܐܝܪ", "ܚܙܝܪܢ", "ܬܡܘܙ", "ܐܒ", "ܐܝܠܘܠ", "ܬܫܪܝܢ ܩܕܡܝܐ", "ܬܫܪܝܢ ܐܚܪܝܐ", "ܟܢܘܢ ܩܕܡܝܐ"}, + {"܏ܩܛ‌", "܏ܒܛ‌"}, +} + +var localeTableSyrSY = [5][]string{ + {"ܚܕ", "ܬܪܝܢ", "ܬܠܬ", "ܐܪܒܥ", "ܚܡܫ", "ܥܪܘ", "ܫܒܬܐ"}, + {"ܚܕܒܫܒܐ", "ܬܪܝܢܒܫܒܐ", "ܬܠܬܒܫܒܐ", "ܐܪܒܥܒܫܒܐ", "ܚܡܫܒܫܒܐ", "ܥܪܘܒܬܐ", "ܫܒܬܐ"}, + {"ܟܢܘܢ ܒ", "ܫܒܛ", "ܐܕܪ", "ܢܝܣܢ", "ܐܝܪ", "ܚܙܝܪܢ", "ܬܡܘܙ", "ܐܒ", "ܐܝܠܘܠ", "ܬܫܪܝܢ ܐ", "ܬܫܪܝܢ ܒ", "ܟܢܘܢ ܐ"}, + {"ܟܢܘܢ ܐܚܪܝܐ", "ܫܒܛ", "ܐܕܪ", "ܢܝܣܢ", "ܐܝܪ", "ܚܙܝܪܢ", "ܬܡܘܙ", "ܐܒ", "ܐܝܠܘܠ", "ܬܫܪܝܢ ܩܕܡܝܐ", "ܬܫܪܝܢ ܐܚܪܝܐ", "ܟܢܘܢ ܩܕܡܝܐ"}, + {"܏ܩܛ‌", "܏ܒܛ‌"}, +} + +var localeTableSzl = [5][]string{ + {"niy", "pyń", "wto", "str", "szt", "piō", "sob"}, + {"niydziela", "pyńdziałek", "wtorek", "strzoda", "sztwortek", "piōntek", "sobota"}, + {"sty", "lut", "mar", "kwi", "moj", "czy", "lip", "siy", "wrz", "paź", "lis", "gru"}, + {"stycznia", "lutego", "marca", "kwietnia", "moja", "czyrwca", "lipca", "siyrpnia", "września", "października", "listopada", "grudnia"}, + {"dopołedniŏ", "popołedniu"}, +} + +var localeTableSzlPL = [5][]string{ + {"niy", "pyń", "wto", "str", "szt", "piō", "sob"}, + {"niydziela", "pyńdziałek", "wtorek", "strzoda", "sztwortek", "piōntek", "sobota"}, + {"sty", "lut", "mar", "kwi", "moj", "czy", "lip", "siy", "wrz", "paź", "lis", "gru"}, + {"stycznia", "lutego", "marca", "kwietnia", "moja", "czyrwca", "lipca", "siyrpnia", "września", "października", "listopada", "grudnia"}, + {"dopołedniŏ", "popołedniu"}, +} + +var localeTableTa = [5][]string{ + {"ஞாயி.", "திங்.", "செவ்.", "புத.", "வியா.", "வெள்.", "சனி"}, + {"ஞாயிறு", "திங்கள்", "செவ்வாய்", "புதன்", "வியாழன்", "வெள்ளி", "சனி"}, + {"ஜன.", "பிப்.", "மார்.", "ஏப்.", "மே", "ஜூன்", "ஜூலை", "ஆக.", "செப்.", "அக்.", "நவ.", "டிச."}, + {"ஜனவரி", "பிப்ரவரி", "மார்ச்", "ஏப்ரல்", "மே", "ஜூன்", "ஜூலை", "ஆகஸ்ட்", "செப்டம்பர்", "அக்டோபர்", "நவம்பர்", "டிசம்பர்"}, + {}, +} + +var localeTableTaIN = [5][]string{ + {"ஞாயி.", "திங்.", "செவ்.", "புத.", "வியா.", "வெள்.", "சனி"}, + {"ஞாயிறு", "திங்கள்", "செவ்வாய்", "புதன்", "வியாழன்", "வெள்ளி", "சனி"}, + {"ஜன.", "பிப்.", "மார்.", "ஏப்.", "மே", "ஜூன்", "ஜூலை", "ஆக.", "செப்.", "அக்.", "நவ.", "டிச."}, + {"ஜனவரி", "பிப்ரவரி", "மார்ச்", "ஏப்ரல்", "மே", "ஜூன்", "ஜூலை", "ஆகஸ்ட்", "செப்டம்பர்", "அக்டோபர்", "நவம்பர்", "டிசம்பர்"}, + {}, +} + +var localeTableTaLK = [5][]string{ + {"ஞாயி.", "திங்.", "செவ்.", "புத.", "வியா.", "வெள்.", "சனி"}, + {"ஞாயிறு", "திங்கள்", "செவ்வாய்", "புதன்", "வியாழன்", "வெள்ளி", "சனி"}, + {"ஜன.", "பிப்.", "மார்.", "ஏப்.", "மே", "ஜூன்", "ஜூலை", "ஆக.", "செப்.", "அக்.", "நவ.", "டிச."}, + {"ஜனவரி", "பிப்ரவரி", "மார்ச்", "ஏப்ரல்", "மே", "ஜூன்", "ஜூலை", "ஆகஸ்ட்", "செப்டம்பர்", "அக்டோபர்", "நவம்பர்", "டிசம்பர்"}, + {}, +} + +var localeTableTaMY = [5][]string{ + {"ஞாயி.", "திங்.", "செவ்.", "புத.", "வியா.", "வெள்.", "சனி"}, + {"ஞாயிறு", "திங்கள்", "செவ்வாய்", "புதன்", "வியாழன்", "வெள்ளி", "சனி"}, + {"ஜன.", "பிப்.", "மார்.", "ஏப்.", "மே", "ஜூன்", "ஜூலை", "ஆக.", "செப்.", "அக்.", "நவ.", "டிச."}, + {"ஜனவரி", "பிப்ரவரி", "மார்ச்", "ஏப்ரல்", "மே", "ஜூன்", "ஜூலை", "ஆகஸ்ட்", "செப்டம்பர்", "அக்டோபர்", "நவம்பர்", "டிசம்பர்"}, + {}, +} + +var localeTableTaSG = [5][]string{ + {"ஞாயி.", "திங்.", "செவ்.", "புத.", "வியா.", "வெள்.", "சனி"}, + {"ஞாயிறு", "திங்கள்", "செவ்வாய்", "புதன்", "வியாழன்", "வெள்ளி", "சனி"}, + {"ஜன.", "பிப்.", "மார்.", "ஏப்.", "மே", "ஜூன்", "ஜூலை", "ஆக.", "செப்.", "அக்.", "நவ.", "டிச."}, + {"ஜனவரி", "பிப்ரவரி", "மார்ச்", "ஏப்ரல்", "மே", "ஜூன்", "ஜூலை", "ஆகஸ்ட்", "செப்டம்பர்", "அக்டோபர்", "நவம்பர்", "டிசம்பர்"}, + {}, +} + +var localeTableTe = [5][]string{ + {"ఆది", "సోమ", "మంగళ", "బుధ", "గురు", "శుక్ర", "శని"}, + {"ఆదివారం", "సోమవారం", "మంగళవారం", "బుధవారం", "గురువారం", "శుక్రవారం", "శనివారం"}, + {"జన", "ఫిబ్ర", "మార్చి", "ఏప్రి", "మే", "జూన్", "జులై", "ఆగ", "సెప్టెం", "అక్టో", "నవం", "డిసెం"}, + {"జనవరి", "ఫిబ్రవరి", "మార్చి", "ఏప్రిల్", "మే", "జూన్", "జులై", "ఆగస్టు", "సెప్టెంబర్", "అక్టోబర్", "నవంబర్", "డిసెంబర్"}, + {"ఉ", "సా"}, +} + +var localeTableTeIN = [5][]string{ + {"ఆది", "సోమ", "మంగళ", "బుధ", "గురు", "శుక్ర", "శని"}, + {"ఆదివారం", "సోమవారం", "మంగళవారం", "బుధవారం", "గురువారం", "శుక్రవారం", "శనివారం"}, + {"జన", "ఫిబ్ర", "మార్చి", "ఏప్రి", "మే", "జూన్", "జులై", "ఆగ", "సెప్టెం", "అక్టో", "నవం", "డిసెం"}, + {"జనవరి", "ఫిబ్రవరి", "మార్చి", "ఏప్రిల్", "మే", "జూన్", "జులై", "ఆగస్టు", "సెప్టెంబర్", "అక్టోబర్", "నవంబర్", "డిసెంబర్"}, + {"ఉ", "సా"}, +} + +var localeTableTeo = [5][]string{ + {"Jum", "Bar", "Aar", "Uni", "Ung", "Kan", "Sab"}, + {"Nakaejuma", "Nakaebarasa", "Nakaare", "Nakauni", "Nakaung’on", "Nakakany", "Nakasabiti"}, + {"Rar", "Muk", "Kwa", "Dun", "Mar", "Mod", "Jol", "Ped", "Sok", "Tib", "Lab", "Poo"}, + {"Orara", "Omuk", "Okwamg’", "Odung’el", "Omaruk", "Omodok’king’ol", "Ojola", "Opedel", "Osokosokoma", "Otibar", "Olabor", "Opoo"}, + {"Taparachu", "Ebongi"}, +} + +var localeTableTeoKE = [5][]string{ + {"Jum", "Bar", "Aar", "Uni", "Ung", "Kan", "Sab"}, + {"Nakaejuma", "Nakaebarasa", "Nakaare", "Nakauni", "Nakaung’on", "Nakakany", "Nakasabiti"}, + {"Rar", "Muk", "Kwa", "Dun", "Mar", "Mod", "Jol", "Ped", "Sok", "Tib", "Lab", "Poo"}, + {"Orara", "Omuk", "Okwamg’", "Odung’el", "Omaruk", "Omodok’king’ol", "Ojola", "Opedel", "Osokosokoma", "Otibar", "Olabor", "Opoo"}, + {"Taparachu", "Ebongi"}, +} + +var localeTableTeoUG = [5][]string{ + {"Jum", "Bar", "Aar", "Uni", "Ung", "Kan", "Sab"}, + {"Nakaejuma", "Nakaebarasa", "Nakaare", "Nakauni", "Nakaung’on", "Nakakany", "Nakasabiti"}, + {"Rar", "Muk", "Kwa", "Dun", "Mar", "Mod", "Jol", "Ped", "Sok", "Tib", "Lab", "Poo"}, + {"Orara", "Omuk", "Okwamg’", "Odung’el", "Omaruk", "Omodok’king’ol", "Ojola", "Opedel", "Osokosokoma", "Otibar", "Olabor", "Opoo"}, + {"Taparachu", "Ebongi"}, +} + +var localeTableTg = [5][]string{ + {"Яшб", "Дшб", "Сшб", "Чшб", "Пшб", "Ҷмъ", "Шнб"}, + {"Якшанбе", "Душанбе", "Сешанбе", "Чоршанбе", "Панҷшанбе", "Ҷумъа", "Шанбе"}, + {"Янв", "Фев", "Мар", "Апр", "Май", "Июн", "Июл", "Авг", "Сен", "Окт", "Ноя", "Дек"}, + {"Январ", "Феврал", "Март", "Апрел", "Май", "Июн", "Июл", "Август", "Сентябр", "Октябр", "Ноябр", "Декабр"}, + {}, +} + +var localeTableTgTJ = [5][]string{ + {"Яшб", "Дшб", "Сшб", "Чшб", "Пшб", "Ҷмъ", "Шнб"}, + {"Якшанбе", "Душанбе", "Сешанбе", "Чоршанбе", "Панҷшанбе", "Ҷумъа", "Шанбе"}, + {"Янв", "Фев", "Мар", "Апр", "Май", "Июн", "Июл", "Авг", "Сен", "Окт", "Ноя", "Дек"}, + {"Январ", "Феврал", "Март", "Апрел", "Май", "Июн", "Июл", "Август", "Сентябр", "Октябр", "Ноябр", "Декабр"}, + {}, +} + +var localeTableTh = [5][]string{ + {"อา.", "จ.", "อ.", "พ.", "พฤ.", "ศ.", "ส."}, + {"วันอาทิตย์", "วันจันทร์", "วันอังคาร", "วันพุธ", "วันพฤหัสบดี", "วันศุกร์", "วันเสาร์"}, + {"ม.ค.", "ก.พ.", "มี.ค.", "เม.ย.", "พ.ค.", "มิ.ย.", "ก.ค.", "ส.ค.", "ก.ย.", "ต.ค.", "พ.ย.", "ธ.ค."}, + {"มกราคม", "กุมภาพันธ์", "มีนาคม", "เมษายน", "พฤษภาคม", "มิถุนายน", "กรกฎาคม", "สิงหาคม", "กันยายน", "ตุลาคม", "พฤศจิกายน", "ธันวาคม"}, + {"a", "p"}, +} + +var localeTableThTH = [5][]string{ + {"อา.", "จ.", "อ.", "พ.", "พฤ.", "ศ.", "ส."}, + {"วันอาทิตย์", "วันจันทร์", "วันอังคาร", "วันพุธ", "วันพฤหัสบดี", "วันศุกร์", "วันเสาร์"}, + {"ม.ค.", "ก.พ.", "มี.ค.", "เม.ย.", "พ.ค.", "มิ.ย.", "ก.ค.", "ส.ค.", "ก.ย.", "ต.ค.", "พ.ย.", "ธ.ค."}, + {"มกราคม", "กุมภาพันธ์", "มีนาคม", "เมษายน", "พฤษภาคม", "มิถุนายน", "กรกฎาคม", "สิงหาคม", "กันยายน", "ตุลาคม", "พฤศจิกายน", "ธันวาคม"}, + {"a", "p"}, +} + +var localeTableTi = [5][]string{ + {"ሰን", "ሰኑ", "ሰሉ", "ረቡ", "ሓሙ", "ዓር", "ቀዳ"}, + {"ሰንበት", "ሰኑይ", "ሰሉስ", "ረቡዕ", "ሓሙስ", "ዓርቢ", "ቀዳም"}, + {"ጥሪ", "ለካ", "መጋ", "ሚያ", "ግን", "ሰነ", "ሓም", "ነሓ", "መስ", "ጥቅ", "ሕዳ", "ታሕ"}, + {"ጥሪ", "ለካቲት", "መጋቢት", "ሚያዝያ", "ግንቦት", "ሰነ", "ሓምለ", "ነሓሰ", "መስከረም", "ጥቅምቲ", "ሕዳር", "ታሕሳስ"}, + {"ቅ.ቀ.", "ድ.ቀ."}, +} + +var localeTableTiER = [5][]string{ + {"ሰን", "ሰኑ", "ሰሉ", "ረቡ", "ሓሙ", "ዓር", "ቀዳ"}, + {"ሰንበት", "ሰኑይ", "ሰሉስ", "ረቡዕ", "ሓሙስ", "ዓርቢ", "ቀዳም"}, + {"ጥሪ", "ለካ", "መጋ", "ሚያ", "ግን", "ሰነ", "ሓም", "ነሓ", "መስ", "ጥቅ", "ሕዳ", "ታሕ"}, + {"ጥሪ", "ለካቲት", "መጋቢት", "ሚያዝያ", "ግንቦት", "ሰነ", "ሓምለ", "ነሓሰ", "መስከረም", "ጥቅምቲ", "ሕዳር", "ታሕሳስ"}, + {"ቅ.ቀ.", "ድ.ቀ."}, +} + +var localeTableTiET = [5][]string{ + {"ሰን", "ሰኑ", "ሰሉ", "ረቡ", "ሓሙ", "ዓር", "ቀዳ"}, + {"ሰንበት", "ሰኑይ", "ሰሉስ", "ረቡዕ", "ሓሙስ", "ዓርቢ", "ቀዳም"}, + {"ጥሪ", "ለካ", "መጋ", "ሚያ", "ግን", "ሰነ", "ሓም", "ነሓ", "መስ", "ጥቅ", "ሕዳ", "ታሕ"}, + {"ጥሪ", "ለካቲት", "መጋቢት", "ሚያዝያ", "ግንቦት", "ሰነ", "ሓምለ", "ነሓሰ", "መስከረም", "ጥቅምቲ", "ሕዳር", "ታሕሳስ"}, + {"ቅ.ቀ.", "ድ.ቀ."}, +} + +var localeTableTig = [5][]string{ + {"ሰ/ዓ", "ሰኖ", "ታላሸ", "ኣረር", "ከሚሽ", "ጅምዓ", "ሰ/ን"}, + {"ሰንበት ዓባይ", "ሰኖ", "ታላሸኖ", "ኣረርባዓ", "ከሚሽ", "ጅምዓት", "ሰንበት ንኢሽ"}, + {"ጃንዩ", "ፌብሩ", "ማርች", "ኤፕረ", "ሜይ", "ጁን", "ጁላይ", "ኦገስ", "ሴፕቴ", "ኦክተ", "ኖቬም", "ዲሴም"}, + {"ጃንዩወሪ", "ፌብሩወሪ", "ማርች", "ኤፕረል", "ሜይ", "ጁን", "ጁላይ", "ኦገስት", "ሴፕቴምበር", "ኦክተውበር", "ኖቬምበር", "ዲሴምበር"}, + {"ቀደምሰርምዕል", "ሓቆስርምዕል"}, +} + +var localeTableTigER = [5][]string{ + {"ሰ/ዓ", "ሰኖ", "ታላሸ", "ኣረር", "ከሚሽ", "ጅምዓ", "ሰ/ን"}, + {"ሰንበት ዓባይ", "ሰኖ", "ታላሸኖ", "ኣረርባዓ", "ከሚሽ", "ጅምዓት", "ሰንበት ንኢሽ"}, + {"ጃንዩ", "ፌብሩ", "ማርች", "ኤፕረ", "ሜይ", "ጁን", "ጁላይ", "ኦገስ", "ሴፕቴ", "ኦክተ", "ኖቬም", "ዲሴም"}, + {"ጃንዩወሪ", "ፌብሩወሪ", "ማርች", "ኤፕረል", "ሜይ", "ጁን", "ጁላይ", "ኦገስት", "ሴፕቴምበር", "ኦክተውበር", "ኖቬምበር", "ዲሴምበር"}, + {"ቀደምሰርምዕል", "ሓቆስርምዕል"}, +} + +var localeTableTk = [5][]string{ + {"ýek", "duş", "siş", "çar", "pen", "ann", "şen"}, + {"ýekşenbe", "duşenbe", "sişenbe", "çarşenbe", "penşenbe", "anna", "şenbe"}, + {"ýan", "few", "mart", "apr", "maý", "iýun", "iýul", "awg", "sen", "okt", "noý", "dek"}, + {"ýanwar", "fewral", "mart", "aprel", "maý", "iýun", "iýul", "awgust", "sentýabr", "oktýabr", "noýabr", "dekabr"}, + {"go.öň", "go.soň"}, +} + +var localeTableTkTM = [5][]string{ + {"ýek", "duş", "siş", "çar", "pen", "ann", "şen"}, + {"ýekşenbe", "duşenbe", "sişenbe", "çarşenbe", "penşenbe", "anna", "şenbe"}, + {"ýan", "few", "mart", "apr", "maý", "iýun", "iýul", "awg", "sen", "okt", "noý", "dek"}, + {"ýanwar", "fewral", "mart", "aprel", "maý", "iýun", "iýul", "awgust", "sentýabr", "oktýabr", "noýabr", "dekabr"}, + {"go.öň", "go.soň"}, +} + +var localeTableTn = [5][]string{ + {"Tsh", "Mos", "Labb", "Labr", "Labn", "Labt", "Mat"}, + {"Tshipi", "Mosopulogo", "Labobedi", "Laboraro", "Labone", "Labotlhano", "Matlhatso"}, + {"Fer", "Tlh", "Mop", "Mor", "Mot", "See", "Phu", "Pha", "Lwe", "Dip", "Ngw", "Sed"}, + {"Ferikgong", "Tlhakole", "Mopitlo", "Moranang", "Motsheganang", "Seetebosigo", "Phukwi", "Phatwe", "Lwetse", "Diphalane", "Ngwanatsele", "Sedimonthole"}, + {"a", "p"}, +} + +var localeTableTnBW = [5][]string{ + {"Tsh", "Mos", "Labb", "Labr", "Labn", "Labt", "Mat"}, + {"Tshipi", "Mosopulogo", "Labobedi", "Laboraro", "Labone", "Labotlhano", "Matlhatso"}, + {"Fer", "Tlh", "Mop", "Mor", "Mot", "See", "Phu", "Pha", "Lwe", "Dip", "Ngw", "Sed"}, + {"Ferikgong", "Tlhakole", "Mopitlo", "Moranang", "Motsheganang", "Seetebosigo", "Phukwi", "Phatwe", "Lwetse", "Diphalane", "Ngwanatsele", "Sedimonthole"}, + {"a", "p"}, +} + +var localeTableTnZA = [5][]string{ + {"Tsh", "Mos", "Labb", "Labr", "Labn", "Labt", "Mat"}, + {"Tshipi", "Mosopulogo", "Labobedi", "Laboraro", "Labone", "Labotlhano", "Matlhatso"}, + {"Fer", "Tlh", "Mop", "Mor", "Mot", "See", "Phu", "Pha", "Lwe", "Dip", "Ngw", "Sed"}, + {"Ferikgong", "Tlhakole", "Mopitlo", "Moranang", "Motsheganang", "Seetebosigo", "Phukwi", "Phatwe", "Lwetse", "Diphalane", "Ngwanatsele", "Sedimonthole"}, + {"a", "p"}, +} + +var localeTableTo = [5][]string{ + {"Sāp", "Mōn", "Tūs", "Pul", "Tuʻa", "Fal", "Tok"}, + {"Sāpate", "Mōnite", "Tūsite", "Pulelulu", "Tuʻapulelulu", "Falaite", "Tokonaki"}, + {"Sān", "Fēp", "Maʻa", "ʻEpe", "Mē", "Sun", "Siu", "ʻAok", "Sēp", "ʻOka", "Nōv", "Tīs"}, + {"Sānuali", "Fēpueli", "Maʻasi", "ʻEpeleli", "Mē", "Sune", "Siulai", "ʻAokosi", "Sēpitema", "ʻOkatopa", "Nōvema", "Tīsema"}, + {"HH", "EA"}, +} + +var localeTableToTO = [5][]string{ + {"Sāp", "Mōn", "Tūs", "Pul", "Tuʻa", "Fal", "Tok"}, + {"Sāpate", "Mōnite", "Tūsite", "Pulelulu", "Tuʻapulelulu", "Falaite", "Tokonaki"}, + {"Sān", "Fēp", "Maʻa", "ʻEpe", "Mē", "Sun", "Siu", "ʻAok", "Sēp", "ʻOka", "Nōv", "Tīs"}, + {"Sānuali", "Fēpueli", "Maʻasi", "ʻEpeleli", "Mē", "Sune", "Siulai", "ʻAokosi", "Sēpitema", "ʻOkatopa", "Nōvema", "Tīsema"}, + {"HH", "EA"}, +} + +var localeTableTok = [5][]string{ + {}, + {"suno esun #7", "suno esun #1", "suno esun #2", "suno esun #3", "suno esun #4", "suno esun #5", "suno esun #6"}, + {}, + {"mun #1", "mun #2", "mun #3", "mun #4", "mun #5", "mun #6", "mun #7", "mun #8", "mun #9", "mun #10", "mun #11", "mun #12"}, + {}, +} + +var localeTableTok001 = [5][]string{ + {}, + {"suno esun #7", "suno esun #1", "suno esun #2", "suno esun #3", "suno esun #4", "suno esun #5", "suno esun #6"}, + {}, + {"mun #1", "mun #2", "mun #3", "mun #4", "mun #5", "mun #6", "mun #7", "mun #8", "mun #9", "mun #10", "mun #11", "mun #12"}, + {}, +} + +var localeTableTpi = [5][]string{ + {"San", "Man", "Tun", "Tri", "Fon", "Fra", "Sar"}, + {"Sande", "Mande", "Tunde", "Trinde", "Fonde", "Fraide", "Sarere"}, + {"Jan", "Feb", "Mas", "Epr", "Me", "Jun", "Jul", "Oga", "Sep", "Okt", "Nov", "Des"}, + {"Janueri", "Februeri", "Mas", "Epril", "Me", "Jun", "Julai", "Ogas", "Septemba", "Oktoba", "Novemba", "Desemba"}, + {}, +} + +var localeTableTpiPG = [5][]string{ + {"San", "Man", "Tun", "Tri", "Fon", "Fra", "Sar"}, + {"Sande", "Mande", "Tunde", "Trinde", "Fonde", "Fraide", "Sarere"}, + {"Jan", "Feb", "Mas", "Epr", "Me", "Jun", "Jul", "Oga", "Sep", "Okt", "Nov", "Des"}, + {"Janueri", "Februeri", "Mas", "Epril", "Me", "Jun", "Julai", "Ogas", "Septemba", "Oktoba", "Novemba", "Desemba"}, + {}, +} + +var localeTableTr = [5][]string{ + {"Paz", "Pzt", "Sal", "Çar", "Per", "Cum", "Cmt"}, + {"Pazar", "Pazartesi", "Salı", "Çarşamba", "Perşembe", "Cuma", "Cumartesi"}, + {"Oca", "Şub", "Mar", "Nis", "May", "Haz", "Tem", "Ağu", "Eyl", "Eki", "Kas", "Ara"}, + {"Ocak", "Şubat", "Mart", "Nisan", "Mayıs", "Haziran", "Temmuz", "Ağustos", "Eylül", "Ekim", "Kasım", "Aralık"}, + {"ÖÖ", "ÖS"}, +} + +var localeTableTrCY = [5][]string{ + {"Paz", "Pzt", "Sal", "Çar", "Per", "Cum", "Cmt"}, + {"Pazar", "Pazartesi", "Salı", "Çarşamba", "Perşembe", "Cuma", "Cumartesi"}, + {"Oca", "Şub", "Mar", "Nis", "May", "Haz", "Tem", "Ağu", "Eyl", "Eki", "Kas", "Ara"}, + {"Ocak", "Şubat", "Mart", "Nisan", "Mayıs", "Haziran", "Temmuz", "Ağustos", "Eylül", "Ekim", "Kasım", "Aralık"}, + {"ÖÖ", "ÖS"}, +} + +var localeTableTrTR = [5][]string{ + {"Paz", "Pzt", "Sal", "Çar", "Per", "Cum", "Cmt"}, + {"Pazar", "Pazartesi", "Salı", "Çarşamba", "Perşembe", "Cuma", "Cumartesi"}, + {"Oca", "Şub", "Mar", "Nis", "May", "Haz", "Tem", "Ağu", "Eyl", "Eki", "Kas", "Ara"}, + {"Ocak", "Şubat", "Mart", "Nisan", "Mayıs", "Haziran", "Temmuz", "Ağustos", "Eylül", "Ekim", "Kasım", "Aralık"}, + {"ÖÖ", "ÖS"}, +} + +var localeTableTrv = [5][]string{ + {"Emp", "Kin", "Dha", "Tru", "Spa", "Rim", "Mat"}, + {"Jiyax sngayan", "tgKingal jiyax iyax sngayan", "tgDha jiyax iyax sngayan", "tgTru jiyax iyax sngayan", "tgSpac jiyax iyax sngayan", "tgRima jiyax iyax sngayan", "tgMataru jiyax iyax sngayan"}, + {"Kii", "Dhi", "Tri", "Spi", "Rii", "Mti", "Emi", "Mai", "Mni", "Mxi", "Mxk", "Mxd"}, + {"Kingal idas", "Dha idas", "Tru idas", "Spat idas", "Rima idas", "Mataru idas", "Empitu idas", "Maspat idas", "Mngari idas", "Maxal idas", "Maxal kingal idas", "Maxal dha idas"}, + {}, +} + +var localeTableTrvTW = [5][]string{ + {"Emp", "Kin", "Dha", "Tru", "Spa", "Rim", "Mat"}, + {"Jiyax sngayan", "tgKingal jiyax iyax sngayan", "tgDha jiyax iyax sngayan", "tgTru jiyax iyax sngayan", "tgSpac jiyax iyax sngayan", "tgRima jiyax iyax sngayan", "tgMataru jiyax iyax sngayan"}, + {"Kii", "Dhi", "Tri", "Spi", "Rii", "Mti", "Emi", "Mai", "Mni", "Mxi", "Mxk", "Mxd"}, + {"Kingal idas", "Dha idas", "Tru idas", "Spat idas", "Rima idas", "Mataru idas", "Empitu idas", "Maspat idas", "Mngari idas", "Maxal idas", "Maxal kingal idas", "Maxal dha idas"}, + {}, +} + +var localeTableTrw = [5][]string{ + {}, + {"ایکشیمے", "دُوشیمے", "گھن آنگا", "چارشیمے", "پَئ شیمے", "شُوگار", "لَو آنگا"}, + {}, + {"جنوری", "فروری", "مارچ", "اپریل", "مئ", "جون", "جولائی", "اگست", "ستمبر", "اکتوبر", "نومبر", "دسمبر"}, + {"a", "p"}, +} + +var localeTableTrwPK = [5][]string{ + {}, + {"ایکشیمے", "دُوشیمے", "گھن آنگا", "چارشیمے", "پَئ شیمے", "شُوگار", "لَو آنگا"}, + {}, + {"جنوری", "فروری", "مارچ", "اپریل", "مئ", "جون", "جولائی", "اگست", "ستمبر", "اکتوبر", "نومبر", "دسمبر"}, + {"a", "p"}, +} + +var localeTableTs = [5][]string{ + {"Son", "Mus", "Bir", "Har", "Ne", "Tlh", "Mug"}, + {"Sonta", "Musumbhunuku", "Ravumbirhi", "Ravunharhu", "Ravumune", "Ravuntlhanu", "Mugqivela"}, + {"Sun", "Yan", "Kul", "Dzi", "Mud", "Kho", "Maw", "Mha", "Ndz", "Nhl", "Huk", "N’w"}, + {"Sunguti", "Nyenyenyani", "Nyenyankulu", "Dzivamisoko", "Mudyaxihi", "Khotavuxika", "Mawuwani", "Mhawuri", "Ndzhati", "Nhlangula", "Hukuri", "N’wendzamhala"}, + {}, +} + +var localeTableTsZA = [5][]string{ + {"Son", "Mus", "Bir", "Har", "Ne", "Tlh", "Mug"}, + {"Sonta", "Musumbhunuku", "Ravumbirhi", "Ravunharhu", "Ravumune", "Ravuntlhanu", "Mugqivela"}, + {"Sun", "Yan", "Kul", "Dzi", "Mud", "Kho", "Maw", "Mha", "Ndz", "Nhl", "Huk", "N’w"}, + {"Sunguti", "Nyenyenyani", "Nyenyankulu", "Dzivamisoko", "Mudyaxihi", "Khotavuxika", "Mawuwani", "Mhawuri", "Ndzhati", "Nhlangula", "Hukuri", "N’wendzamhala"}, + {}, +} + +var localeTableTt = [5][]string{ + {"якш.", "дүш.", "сиш.", "чәр.", "пәнҗ.", "җом.", "шим."}, + {"якшәмбе", "дүшәмбе", "сишәмбе", "чәршәмбе", "пәнҗешәмбе", "җомга", "шимбә"}, + {"гыйн.", "фев.", "мар.", "апр.", "май", "июнь", "июль", "авг.", "сент.", "окт.", "нояб.", "дек."}, + {"гыйнвар", "февраль", "март", "апрель", "май", "июнь", "июль", "август", "сентябрь", "октябрь", "ноябрь", "декабрь"}, + {}, +} + +var localeTableTtRU = [5][]string{ + {"якш.", "дүш.", "сиш.", "чәр.", "пәнҗ.", "җом.", "шим."}, + {"якшәмбе", "дүшәмбе", "сишәмбе", "чәршәмбе", "пәнҗешәмбе", "җомга", "шимбә"}, + {"гыйн.", "фев.", "мар.", "апр.", "май", "июнь", "июль", "авг.", "сент.", "окт.", "нояб.", "дек."}, + {"гыйнвар", "февраль", "март", "апрель", "май", "июнь", "июль", "август", "сентябрь", "октябрь", "ноябрь", "декабрь"}, + {}, +} + +var localeTableTwq = [5][]string{ + {"Alh", "Ati", "Ata", "Ala", "Alm", "Alz", "Asi"}, + {"Alhadi", "Atinni", "Atalaata", "Alarba", "Alhamiisa", "Alzuma", "Asibti"}, + {"Žan", "Fee", "Mar", "Awi", "Me", "Žuw", "Žuy", "Ut", "Sek", "Okt", "Noo", "Dee"}, + {"Žanwiye", "Feewiriye", "Marsi", "Awiril", "Me", "Žuweŋ", "Žuyye", "Ut", "Sektanbur", "Oktoobur", "Noowanbur", "Deesanbur"}, + {"Subbaahi", "Zaarikayb"}, +} + +var localeTableTwqNE = [5][]string{ + {"Alh", "Ati", "Ata", "Ala", "Alm", "Alz", "Asi"}, + {"Alhadi", "Atinni", "Atalaata", "Alarba", "Alhamiisa", "Alzuma", "Asibti"}, + {"Žan", "Fee", "Mar", "Awi", "Me", "Žuw", "Žuy", "Ut", "Sek", "Okt", "Noo", "Dee"}, + {"Žanwiye", "Feewiriye", "Marsi", "Awiril", "Me", "Žuweŋ", "Žuyye", "Ut", "Sektanbur", "Oktoobur", "Noowanbur", "Deesanbur"}, + {"Subbaahi", "Zaarikayb"}, +} + +var localeTableTzm = [5][]string{ + {"Asa", "Ayn", "Asn", "Akr", "Akw", "Asm", "Asḍ"}, + {"Asamas", "Aynas", "Asinas", "Akras", "Akwas", "Asimwas", "Asiḍyas"}, + {"Yen", "Yeb", "Mar", "Ibr", "May", "Yun", "Yul", "Ɣuc", "Cut", "Kṭu", "Nwa", "Duj"}, + {"Yennayer", "Yebrayer", "Mars", "Ibrir", "Mayyu", "Yunyu", "Yulyuz", "Ɣuct", "Cutanbir", "Kṭuber", "Nwanbir", "Dujanbir"}, + {"Zdatazal", "Ḍeffiraza"}, +} + +var localeTableTzmMA = [5][]string{ + {"Asa", "Ayn", "Asn", "Akr", "Akw", "Asm", "Asḍ"}, + {"Asamas", "Aynas", "Asinas", "Akras", "Akwas", "Asimwas", "Asiḍyas"}, + {"Yen", "Yeb", "Mar", "Ibr", "May", "Yun", "Yul", "Ɣuc", "Cut", "Kṭu", "Nwa", "Duj"}, + {"Yennayer", "Yebrayer", "Mars", "Ibrir", "Mayyu", "Yunyu", "Yulyuz", "Ɣuct", "Cutanbir", "Kṭuber", "Nwanbir", "Dujanbir"}, + {"Zdatazal", "Ḍeffiraza"}, +} + +var localeTableUg = [5][]string{ + {"يە", "دۈ", "سە", "چا", "پە", "جۈ", "شە"}, + {"يەكشەنبە", "دۈشەنبە", "سەيشەنبە", "چارشەنبە", "پەيشەنبە", "جۈمە", "شەنبە"}, + {}, + {"يانۋار", "فېۋرال", "مارت", "ئاپرېل", "ماي", "ئىيۇن", "ئىيۇل", "ئاۋغۇست", "سېنتەبىر", "ئۆكتەبىر", "نويابىر", "دېكابىر"}, + {"چ.ب", "چ.ك"}, +} + +var localeTableUgCN = [5][]string{ + {"يە", "دۈ", "سە", "چا", "پە", "جۈ", "شە"}, + {"يەكشەنبە", "دۈشەنبە", "سەيشەنبە", "چارشەنبە", "پەيشەنبە", "جۈمە", "شەنبە"}, + {}, + {"يانۋار", "فېۋرال", "مارت", "ئاپرېل", "ماي", "ئىيۇن", "ئىيۇل", "ئاۋغۇست", "سېنتەبىر", "ئۆكتەبىر", "نويابىر", "دېكابىر"}, + {"چ.ب", "چ.ك"}, +} + +var localeTableUk = [5][]string{ + {"нд", "пн", "вт", "ср", "чт", "пт", "сб"}, + {"неділю", "понеділок", "вівторок", "середу", "четвер", "пʼятницю", "суботу"}, + {"січ.", "лют.", "бер.", "квіт.", "трав.", "черв.", "лип.", "серп.", "вер.", "жовт.", "лист.", "груд."}, + {"січня", "лютого", "березня", "квітня", "травня", "червня", "липня", "серпня", "вересня", "жовтня", "листопада", "грудня"}, + {"дп", "пп"}, +} + +var localeTableUkUA = [5][]string{ + {"нд", "пн", "вт", "ср", "чт", "пт", "сб"}, + {"неділю", "понеділок", "вівторок", "середу", "четвер", "пʼятницю", "суботу"}, + {"січ.", "лют.", "бер.", "квіт.", "трав.", "черв.", "лип.", "серп.", "вер.", "жовт.", "лист.", "груд."}, + {"січня", "лютого", "березня", "квітня", "травня", "червня", "липня", "серпня", "вересня", "жовтня", "листопада", "грудня"}, + {"дп", "пп"}, +} + +var localeTableUnd = [5][]string{ + {}, + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {}, + {"M01", "M02", "M03", "M04", "M05", "M06", "M07", "M08", "M09", "M10", "M11", "M12"}, + {"AM", "PM"}, +} + +var localeTableUr = [5][]string{ + {}, + {"اتوار", "پیر", "منگل", "بدھ", "جمعرات", "جمعہ", "ہفتہ"}, + {}, + {"جنوری", "فروری", "مارچ", "اپریل", "مئی", "جون", "جولائی", "اگست", "ستمبر", "اکتوبر", "نومبر", "دسمبر"}, + {"a", "p"}, +} + +var localeTableUrIN = [5][]string{ + {}, + {"اتوار", "پیر", "منگل", "بدھ", "جمعرات", "جمعہ", "ہفتہ"}, + {}, + {"جنوری", "فروری", "مارچ", "اپریل", "مئی", "جون", "جولائی", "اگست", "ستمبر", "اکتوبر", "نومبر", "دسمبر"}, + {"a", "p"}, +} + +var localeTableUrPK = [5][]string{ + {}, + {"اتوار", "پیر", "منگل", "بدھ", "جمعرات", "جمعہ", "ہفتہ"}, + {}, + {"جنوری", "فروری", "مارچ", "اپریل", "مئی", "جون", "جولائی", "اگست", "ستمبر", "اکتوبر", "نومبر", "دسمبر"}, + {"a", "p"}, +} + +var localeTableUz = [5][]string{ + {"Yak", "Dush", "Sesh", "Chor", "Pay", "Jum", "Shan"}, + {"yakshanba", "dushanba", "seshanba", "chorshanba", "payshanba", "juma", "shanba"}, + {"yan", "fev", "mar", "apr", "may", "iyn", "iyl", "avg", "sen", "okt", "noy", "dek"}, + {"yanvar", "fevral", "mart", "aprel", "may", "iyun", "iyul", "avgust", "sentabr", "oktabr", "noyabr", "dekabr"}, + {"TO", "TK"}, +} + +var localeTableUzArab = [5][]string{ + {"ی.", "د.", "س.", "چ.", "پ.", "ج.", "ش."}, + {"یکشنبه", "دوشنبه", "سه‌شنبه", "چهارشنبه", "پنجشنبه", "جمعه", "شنبه"}, + {"جنو", "فبر", "مار", "اپر", "می", "جون", "جول", "اگس", "سپت", "اکت", "نوم", "دسم"}, + {"جنوری", "فبروری", "مارچ", "اپریل", "می", "جون", "جولای", "اگست", "سپتمبر", "اکتوبر", "نومبر", "دسمبر"}, + {}, +} + +var localeTableUzArabAF = [5][]string{ + {"ی.", "د.", "س.", "چ.", "پ.", "ج.", "ش."}, + {"یکشنبه", "دوشنبه", "سه‌شنبه", "چهارشنبه", "پنجشنبه", "جمعه", "شنبه"}, + {"جنو", "فبر", "مار", "اپر", "می", "جون", "جول", "اگس", "سپت", "اکت", "نوم", "دسم"}, + {"جنوری", "فبروری", "مارچ", "اپریل", "می", "جون", "جولای", "اگست", "سپتمبر", "اکتوبر", "نومبر", "دسمبر"}, + {}, +} + +var localeTableUzCyrl = [5][]string{ + {"якш", "душ", "сеш", "чор", "пай", "жум", "шан"}, + {"якшанба", "душанба", "сешанба", "чоршанба", "пайшанба", "жума", "шанба"}, + {"янв", "фев", "мар", "апр", "май", "июн", "июл", "авг", "сен", "окт", "ноя", "дек"}, + {"январ", "феврал", "март", "апрел", "май", "июн", "июл", "август", "сентябр", "октябр", "ноябр", "декабр"}, + {"ТО", "ТК"}, +} + +var localeTableUzCyrlUZ = [5][]string{ + {"якш", "душ", "сеш", "чор", "пай", "жум", "шан"}, + {"якшанба", "душанба", "сешанба", "чоршанба", "пайшанба", "жума", "шанба"}, + {"янв", "фев", "мар", "апр", "май", "июн", "июл", "авг", "сен", "окт", "ноя", "дек"}, + {"январ", "феврал", "март", "апрел", "май", "июн", "июл", "август", "сентябр", "октябр", "ноябр", "декабр"}, + {"ТО", "ТК"}, +} + +var localeTableUzLatn = [5][]string{ + {"Yak", "Dush", "Sesh", "Chor", "Pay", "Jum", "Shan"}, + {"yakshanba", "dushanba", "seshanba", "chorshanba", "payshanba", "juma", "shanba"}, + {"yan", "fev", "mar", "apr", "may", "iyn", "iyl", "avg", "sen", "okt", "noy", "dek"}, + {"yanvar", "fevral", "mart", "aprel", "may", "iyun", "iyul", "avgust", "sentabr", "oktabr", "noyabr", "dekabr"}, + {"TO", "TK"}, +} + +var localeTableUzLatnUZ = [5][]string{ + {"Yak", "Dush", "Sesh", "Chor", "Pay", "Jum", "Shan"}, + {"yakshanba", "dushanba", "seshanba", "chorshanba", "payshanba", "juma", "shanba"}, + {"yan", "fev", "mar", "apr", "may", "iyn", "iyl", "avg", "sen", "okt", "noy", "dek"}, + {"yanvar", "fevral", "mart", "aprel", "may", "iyun", "iyul", "avgust", "sentabr", "oktabr", "noyabr", "dekabr"}, + {"TO", "TK"}, +} + +var localeTableVai = [5][]string{ + {}, + {"ꕞꕌꔵ", "ꗳꗡꘉ", "ꕚꕞꕚ", "ꕉꕞꕒ", "ꕉꔤꕆꕢ", "ꕉꔤꕀꕮ", "ꔻꔬꔳ"}, + {"ꖨꖕꔞ", "ꕒꕡ", "ꕾꖺ", "ꖢꖕ", "ꖑꕱ", "ꖱꘋ", "ꖱꕞ", "ꗛꔕ", "ꕢꕌ", "ꕭꖃ", "ꔞꘋ", "ꖨꖕꗏ"}, + {"ꖨꖕ ꕪꕴ ꔞꔀꕮꕊ", "ꕒꕡꖝꖕ", "ꕾꖺ", "ꖢꖕ", "ꖑꕱ", "ꖱꘋ", "ꖱꕞꔤ", "ꗛꔕ", "ꕢꕌ", "ꕭꖃ", "ꔞꘋꕔꕿ ꕸꖃꗏ", "ꖨꖕ ꕪꕴ ꗏꖺꕮꕊ"}, + {}, +} + +var localeTableVaiLatn = [5][]string{ + {}, + {"lahadi", "tɛɛnɛɛ", "talata", "alaba", "aimisa", "aijima", "siɓiti"}, + {}, + {}, + {}, +} + +var localeTableVaiLatnLR = [5][]string{ + {}, + {"lahadi", "tɛɛnɛɛ", "talata", "alaba", "aimisa", "aijima", "siɓiti"}, + {}, + {}, + {}, +} + +var localeTableVaiVaii = [5][]string{ + {}, + {"ꕞꕌꔵ", "ꗳꗡꘉ", "ꕚꕞꕚ", "ꕉꕞꕒ", "ꕉꔤꕆꕢ", "ꕉꔤꕀꕮ", "ꔻꔬꔳ"}, + {"ꖨꖕꔞ", "ꕒꕡ", "ꕾꖺ", "ꖢꖕ", "ꖑꕱ", "ꖱꘋ", "ꖱꕞ", "ꗛꔕ", "ꕢꕌ", "ꕭꖃ", "ꔞꘋ", "ꖨꖕꗏ"}, + {"ꖨꖕ ꕪꕴ ꔞꔀꕮꕊ", "ꕒꕡꖝꖕ", "ꕾꖺ", "ꖢꖕ", "ꖑꕱ", "ꖱꘋ", "ꖱꕞꔤ", "ꗛꔕ", "ꕢꕌ", "ꕭꖃ", "ꔞꘋꕔꕿ ꕸꖃꗏ", "ꖨꖕ ꕪꕴ ꗏꖺꕮꕊ"}, + {}, +} + +var localeTableVaiVaiiLR = [5][]string{ + {}, + {"ꕞꕌꔵ", "ꗳꗡꘉ", "ꕚꕞꕚ", "ꕉꕞꕒ", "ꕉꔤꕆꕢ", "ꕉꔤꕀꕮ", "ꔻꔬꔳ"}, + {"ꖨꖕꔞ", "ꕒꕡ", "ꕾꖺ", "ꖢꖕ", "ꖑꕱ", "ꖱꘋ", "ꖱꕞ", "ꗛꔕ", "ꕢꕌ", "ꕭꖃ", "ꔞꘋ", "ꖨꖕꗏ"}, + {"ꖨꖕ ꕪꕴ ꔞꔀꕮꕊ", "ꕒꕡꖝꖕ", "ꕾꖺ", "ꖢꖕ", "ꖑꕱ", "ꖱꘋ", "ꖱꕞꔤ", "ꗛꔕ", "ꕢꕌ", "ꕭꖃ", "ꔞꘋꕔꕿ ꕸꖃꗏ", "ꖨꖕ ꕪꕴ ꗏꖺꕮꕊ"}, + {}, +} + +var localeTableVe = [5][]string{ + {"Swo", "Mus", "Vhi", "Rar", "Ṋa", "Ṱan", "Mug"}, + {"Swondaha", "Musumbuluwo", "Ḽavhuvhili", "Ḽavhuraru", "Ḽavhuṋa", "Ḽavhuṱanu", "Mugivhela"}, + {"Pha", "Luh", "Ṱhf", "Lam", "Shu", "Lwi", "Lwa", "Ṱha", "Khu", "Tsh", "Ḽar", "Nye"}, + {"Phando", "Luhuhi", "Ṱhafamuhwe", "Lambamai", "Shundunthule", "Fulwi", "Fulwana", "Ṱhangule", "Khubvumedzi", "Tshimedzi", "Ḽara", "Nyendavhusiku"}, + {}, +} + +var localeTableVeZA = [5][]string{ + {"Swo", "Mus", "Vhi", "Rar", "Ṋa", "Ṱan", "Mug"}, + {"Swondaha", "Musumbuluwo", "Ḽavhuvhili", "Ḽavhuraru", "Ḽavhuṋa", "Ḽavhuṱanu", "Mugivhela"}, + {"Pha", "Luh", "Ṱhf", "Lam", "Shu", "Lwi", "Lwa", "Ṱha", "Khu", "Tsh", "Ḽar", "Nye"}, + {"Phando", "Luhuhi", "Ṱhafamuhwe", "Lambamai", "Shundunthule", "Fulwi", "Fulwana", "Ṱhangule", "Khubvumedzi", "Tshimedzi", "Ḽara", "Nyendavhusiku"}, + {}, +} + +var localeTableVec = [5][]string{ + {"dom", "lun", "mar", "mer", "zob", "vèn", "sab"}, + {"doménega", "luni", "marti", "mèrcore", "zoba", "vènare", "sabo"}, + {"jen", "feb", "mar", "apr", "maj", "jug", "luj", "ago", "set", "oto", "nov", "des"}, + {"jenaro", "febraro", "marso", "aprile", "majo", "jugno", "lujo", "agosto", "setenbre", "otobre", "novenbre", "desenbre"}, + {}, +} + +var localeTableVecIT = [5][]string{ + {"dom", "lun", "mar", "mer", "zob", "vèn", "sab"}, + {"doménega", "luni", "marti", "mèrcore", "zoba", "vènare", "sabo"}, + {"jen", "feb", "mar", "apr", "maj", "jug", "luj", "ago", "set", "oto", "nov", "des"}, + {"jenaro", "febraro", "marso", "aprile", "majo", "jugno", "lujo", "agosto", "setenbre", "otobre", "novenbre", "desenbre"}, + {}, +} + +var localeTableVi = [5][]string{ + {"CN", "Th 2", "Th 3", "Th 4", "Th 5", "Th 6", "Th 7"}, + {"Chủ Nhật", "Thứ Hai", "Thứ Ba", "Thứ Tư", "Thứ Năm", "Thứ Sáu", "Thứ Bảy"}, + {"thg 1", "thg 2", "thg 3", "thg 4", "thg 5", "thg 6", "thg 7", "thg 8", "thg 9", "thg 10", "thg 11", "thg 12"}, + {"tháng 1", "tháng 2", "tháng 3", "tháng 4", "tháng 5", "tháng 6", "tháng 7", "tháng 8", "tháng 9", "tháng 10", "tháng 11", "tháng 12"}, + {"SA", "CH"}, +} + +var localeTableViVN = [5][]string{ + {"CN", "Th 2", "Th 3", "Th 4", "Th 5", "Th 6", "Th 7"}, + {"Chủ Nhật", "Thứ Hai", "Thứ Ba", "Thứ Tư", "Thứ Năm", "Thứ Sáu", "Thứ Bảy"}, + {"thg 1", "thg 2", "thg 3", "thg 4", "thg 5", "thg 6", "thg 7", "thg 8", "thg 9", "thg 10", "thg 11", "thg 12"}, + {"tháng 1", "tháng 2", "tháng 3", "tháng 4", "tháng 5", "tháng 6", "tháng 7", "tháng 8", "tháng 9", "tháng 10", "tháng 11", "tháng 12"}, + {"SA", "CH"}, +} + +var localeTableVmw = [5][]string{ + {}, + {"ettiminku", "nihiku noolempwa", "namaanli", "namararu", "namaxexe", "namathanu", "esaabadu"}, + {}, + {"janeiru", "fevereiru", "marsu", "abril", "maiu", "junyu", "julyu", "agostu", "setembru", "outubru", "novembru", "dezembru"}, + {}, +} + +var localeTableVmwMZ = [5][]string{ + {}, + {"ettiminku", "nihiku noolempwa", "namaanli", "namararu", "namaxexe", "namathanu", "esaabadu"}, + {}, + {"janeiru", "fevereiru", "marsu", "abril", "maiu", "junyu", "julyu", "agostu", "setembru", "outubru", "novembru", "dezembru"}, + {}, +} + +var localeTableVo = [5][]string{ + {"su.", "mu.", "tu.", "ve.", "dö.", "fr.", "zä."}, + {"sudel", "mudel", "tudel", "vedel", "dödel", "fridel", "zädel"}, + {"yan", "feb", "mäz", "prl", "may", "yun", "yul", "gst", "set", "ton", "nov", "dek"}, + {"yanul", "febul", "mäzul", "prilul", "mayul", "yunul", "yulul", "gustul", "setul", "tobul", "novul", "dekul"}, + {}, +} + +var localeTableVo001 = [5][]string{ + {"su.", "mu.", "tu.", "ve.", "dö.", "fr.", "zä."}, + {"sudel", "mudel", "tudel", "vedel", "dödel", "fridel", "zädel"}, + {"yan", "feb", "mäz", "prl", "may", "yun", "yul", "gst", "set", "ton", "nov", "dek"}, + {"yanul", "febul", "mäzul", "prilul", "mayul", "yunul", "yulul", "gustul", "setul", "tobul", "novul", "dekul"}, + {}, +} + +var localeTableVun = [5][]string{ + {"Jpi", "Jtt", "Jnn", "Jtn", "Alh", "Iju", "Jmo"}, + {"Jumapilyi", "Jumatatuu", "Jumanne", "Jumatanu", "Alhamisi", "Ijumaa", "Jumamosi"}, + {"Jan", "Feb", "Mac", "Apr", "Mei", "Jun", "Jul", "Ago", "Sep", "Okt", "Nov", "Des"}, + {"Januari", "Februari", "Machi", "Aprilyi", "Mei", "Junyi", "Julyai", "Agusti", "Septemba", "Oktoba", "Novemba", "Desemba"}, + {"utuko", "kyiukonyi"}, +} + +var localeTableVunTZ = [5][]string{ + {"Jpi", "Jtt", "Jnn", "Jtn", "Alh", "Iju", "Jmo"}, + {"Jumapilyi", "Jumatatuu", "Jumanne", "Jumatanu", "Alhamisi", "Ijumaa", "Jumamosi"}, + {"Jan", "Feb", "Mac", "Apr", "Mei", "Jun", "Jul", "Ago", "Sep", "Okt", "Nov", "Des"}, + {"Januari", "Februari", "Machi", "Aprilyi", "Mei", "Junyi", "Julyai", "Agusti", "Septemba", "Oktoba", "Novemba", "Desemba"}, + {"utuko", "kyiukonyi"}, +} + +var localeTableWae = [5][]string{ + {"Sun", "Män", "Ziš", "Mit", "Fró", "Fri", "Sam"}, + {"Sunntag", "Mäntag", "Zištag", "Mittwuč", "Fróntag", "Fritag", "Samštag"}, + {"Jen", "Hor", "Mär", "Abr", "Mei", "Brá", "Hei", "Öig", "Her", "Wím", "Win", "Chr"}, + {"Jenner", "Hornig", "Märze", "Abrille", "Meije", "Bráčet", "Heiwet", "Öigšte", "Herbštmánet", "Wímánet", "Wintermánet", "Chrištmánet"}, + {}, +} + +var localeTableWaeCH = [5][]string{ + {"Sun", "Män", "Ziš", "Mit", "Fró", "Fri", "Sam"}, + {"Sunntag", "Mäntag", "Zištag", "Mittwuč", "Fróntag", "Fritag", "Samštag"}, + {"Jen", "Hor", "Mär", "Abr", "Mei", "Brá", "Hei", "Öig", "Her", "Wím", "Win", "Chr"}, + {"Jenner", "Hornig", "Märze", "Abrille", "Meije", "Bráčet", "Heiwet", "Öigšte", "Herbštmánet", "Wímánet", "Wintermánet", "Chrištmánet"}, + {}, +} + +var localeTableWal = [5][]string{ + {}, + {"ወጋ", "ሳይኖ", "ማቆሳኛ", "አሩዋ", "ሃሙሳ", "አርባ", "ቄራ"}, + {"ጃንዩ", "ፌብሩ", "ማርች", "ኤፕረ", "ሜይ", "ጁን", "ጁላይ", "ኦገስ", "ሴፕቴ", "ኦክተ", "ኖቬም", "ዲሴም"}, + {"ጃንዩወሪ", "ፌብሩወሪ", "ማርች", "ኤፕረል", "ሜይ", "ጁን", "ጁላይ", "ኦገስት", "ሴፕቴምበር", "ኦክተውበር", "ኖቬምበር", "ዲሴምበር"}, + {"ማለዶ", "ቃማ"}, +} + +var localeTableWalET = [5][]string{ + {}, + {"ወጋ", "ሳይኖ", "ማቆሳኛ", "አሩዋ", "ሃሙሳ", "አርባ", "ቄራ"}, + {"ጃንዩ", "ፌብሩ", "ማርች", "ኤፕረ", "ሜይ", "ጁን", "ጁላይ", "ኦገስ", "ሴፕቴ", "ኦክተ", "ኖቬም", "ዲሴም"}, + {"ጃንዩወሪ", "ፌብሩወሪ", "ማርች", "ኤፕረል", "ሜይ", "ጁን", "ጁላይ", "ኦገስት", "ሴፕቴምበር", "ኦክተውበር", "ኖቬምበር", "ዲሴምበር"}, + {"ማለዶ", "ቃማ"}, +} + +var localeTableWo = [5][]string{ + {"Dib", "Alt", "Tal", "Àla", "Alx", "Àjj", "Ase"}, + {"Dibéer", "Altine", "Talaata", "Àlarba", "Alxamis", "Àjjuma", "Aseer"}, + {"Sam", "Few", "Mar", "Awr", "Mee", "Suw", "Sul", "Ut", "Sàt", "Okt", "Now", "Des"}, + {"Samwiyee", "Fewriyee", "Mars", "Awril", "Mee", "Suwe", "Sulet", "Ut", "Sàttumbar", "Oktoobar", "Nowàmbar", "Desàmbar"}, + {"Sub", "Ngo"}, +} + +var localeTableWoSN = [5][]string{ + {"Dib", "Alt", "Tal", "Àla", "Alx", "Àjj", "Ase"}, + {"Dibéer", "Altine", "Talaata", "Àlarba", "Alxamis", "Àjjuma", "Aseer"}, + {"Sam", "Few", "Mar", "Awr", "Mee", "Suw", "Sul", "Ut", "Sàt", "Okt", "Now", "Des"}, + {"Samwiyee", "Fewriyee", "Mars", "Awril", "Mee", "Suwe", "Sulet", "Ut", "Sàttumbar", "Oktoobar", "Nowàmbar", "Desàmbar"}, + {"Sub", "Ngo"}, +} + +var localeTableXh = [5][]string{ + {"Caw", "Mvu", "Lwesb", "Tha", "Sin", "Hla", "Mgq"}, + {"Cawe", "Mvulo", "Lwesibini", "Lwesithathu", "Lwesine", "Lwesihlanu", "Mgqibelo"}, + {"Jan", "Feb", "Mat", "Epr", "Mey", "Jun", "Jul", "Aga", "Sept", "Okt", "Nov", "Dis"}, + {"Janyuwari", "Februwari", "Matshi", "Epreli", "Meyi", "Juni", "Julayi", "Agasti", "Septemba", "Okthobha", "Novemba", "Disemba"}, + {}, +} + +var localeTableXhZA = [5][]string{ + {"Caw", "Mvu", "Lwesb", "Tha", "Sin", "Hla", "Mgq"}, + {"Cawe", "Mvulo", "Lwesibini", "Lwesithathu", "Lwesine", "Lwesihlanu", "Mgqibelo"}, + {"Jan", "Feb", "Mat", "Epr", "Mey", "Jun", "Jul", "Aga", "Sept", "Okt", "Nov", "Dis"}, + {"Janyuwari", "Februwari", "Matshi", "Epreli", "Meyi", "Juni", "Julayi", "Agasti", "Septemba", "Okthobha", "Novemba", "Disemba"}, + {}, +} + +var localeTableXnr = [5][]string{ + {"तोआर", "सोआर", "मंगल", "बुध", "वीर", "शुक्कर", "शनि"}, + {"तोआर", "सोआर", "मंगलवार", "बुधवार", "वीरवार", "शुक्करवार", "शनिच्चरवार"}, + {"जन॰", "फ़र॰", "मार्च", "अप्रैल", "मई", "जून", "जुल॰", "अग॰", "सित॰", "अक्तू॰", "नव॰", "दिस॰"}, + {"जनवरी", "फ़रवरी", "मार्च", "अप्रैल", "मई", "जून", "जुलाई", "अगस्त", "सितंबर", "अक्तूबर", "नवंबर", "दिसंबर"}, + {"भ्यागा", "दपेहरा/संजा"}, +} + +var localeTableXnrIN = [5][]string{ + {"तोआर", "सोआर", "मंगल", "बुध", "वीर", "शुक्कर", "शनि"}, + {"तोआर", "सोआर", "मंगलवार", "बुधवार", "वीरवार", "शुक्करवार", "शनिच्चरवार"}, + {"जन॰", "फ़र॰", "मार्च", "अप्रैल", "मई", "जून", "जुल॰", "अग॰", "सित॰", "अक्तू॰", "नव॰", "दिस॰"}, + {"जनवरी", "फ़रवरी", "मार्च", "अप्रैल", "मई", "जून", "जुलाई", "अगस्त", "सितंबर", "अक्तूबर", "नवंबर", "दिसंबर"}, + {"भ्यागा", "दपेहरा/संजा"}, +} + +var localeTableXog = [5][]string{ + {"Sabi", "Bala", "Kubi", "Kusa", "Kuna", "Kuta", "Muka"}, + {"Sabiiti", "Balaza", "Owokubili", "Owokusatu", "Olokuna", "Olokutaanu", "Olomukaaga"}, + {"Jan", "Feb", "Mar", "Apu", "Maa", "Juu", "Jul", "Agu", "Seb", "Oki", "Nov", "Des"}, + {"Janwaliyo", "Febwaliyo", "Marisi", "Apuli", "Maayi", "Juuni", "Julaayi", "Agusito", "Sebuttemba", "Okitobba", "Novemba", "Desemba"}, + {"Munkyo", "Eigulo"}, +} + +var localeTableXogUG = [5][]string{ + {"Sabi", "Bala", "Kubi", "Kusa", "Kuna", "Kuta", "Muka"}, + {"Sabiiti", "Balaza", "Owokubili", "Owokusatu", "Olokuna", "Olokutaanu", "Olomukaaga"}, + {"Jan", "Feb", "Mar", "Apu", "Maa", "Juu", "Jul", "Agu", "Seb", "Oki", "Nov", "Des"}, + {"Janwaliyo", "Febwaliyo", "Marisi", "Apuli", "Maayi", "Juuni", "Julaayi", "Agusito", "Sebuttemba", "Okitobba", "Novemba", "Desemba"}, + {"Munkyo", "Eigulo"}, +} + +var localeTableYav = [5][]string{ + {"sd", "md", "mw", "et", "kl", "fl", "ss"}, + {"sɔ́ndiɛ", "móndie", "muányáŋmóndie", "metúkpíápɛ", "kúpélimetúkpiapɛ", "feléte", "séselé"}, + {"o.1", "o.2", "o.3", "o.4", "o.5", "o.6", "o.7", "o.8", "o.9", "o.10", "o.11", "o.12"}, + {"pikítíkítie, oólí ú kutúan", "siɛyɛ́, oóli ú kándíɛ", "ɔnsúmbɔl, oóli ú kátátúɛ", "mesiŋ, oóli ú kénie", "ensil, oóli ú kátánuɛ", "ɔsɔn", "efute", "pisuyú", "imɛŋ i puɔs", "imɛŋ i putúk,oóli ú kátíɛ", "makandikɛ", "pilɔndɔ́"}, + {"kiɛmɛ́ɛm", "kisɛ́ndɛ"}, +} + +var localeTableYavCM = [5][]string{ + {"sd", "md", "mw", "et", "kl", "fl", "ss"}, + {"sɔ́ndiɛ", "móndie", "muányáŋmóndie", "metúkpíápɛ", "kúpélimetúkpiapɛ", "feléte", "séselé"}, + {"o.1", "o.2", "o.3", "o.4", "o.5", "o.6", "o.7", "o.8", "o.9", "o.10", "o.11", "o.12"}, + {"pikítíkítie, oólí ú kutúan", "siɛyɛ́, oóli ú kándíɛ", "ɔnsúmbɔl, oóli ú kátátúɛ", "mesiŋ, oóli ú kénie", "ensil, oóli ú kátánuɛ", "ɔsɔn", "efute", "pisuyú", "imɛŋ i puɔs", "imɛŋ i putúk,oóli ú kátíɛ", "makandikɛ", "pilɔndɔ́"}, + {"kiɛmɛ́ɛm", "kisɛ́ndɛ"}, +} + +var localeTableYi = [5][]string{ + {}, + {"זונטיק", "מאָנטיק", "דינסטיק", "מיטוואך", "דאנערשטיק", "פֿרײַטיק", "שבת"}, + {}, + {"יאַנואַר", "פֿעברואַר", "מערץ", "אַפּריל", "מיי", "יוני", "יולי", "אויגוסט", "סעפּטעמבער", "אקטאבער", "נאוועמבער", "דעצעמבער"}, + {"פֿאַרמיטאָג", "נאָכמיטאָג"}, +} + +var localeTableYiUA = [5][]string{ + {}, + {"זונטיק", "מאָנטיק", "דינסטיק", "מיטוואך", "דאנערשטיק", "פֿרײַטיק", "שבת"}, + {}, + {"יאַנואַר", "פֿעברואַר", "מערץ", "אַפּריל", "מיי", "יוני", "יולי", "אויגוסט", "סעפּטעמבער", "אקטאבער", "נאוועמבער", "דעצעמבער"}, + {"פֿאַרמיטאָג", "נאָכמיטאָג"}, +} + +var localeTableYo = [5][]string{ + {"Àìk", "Aj", "Ìsẹ́g", "Ọjọ́r", "Ọjọ́b", "Ẹt", "Àbám"}, + {"Ọjọ́ Àìkú", "Ọjọ́ Ajé", "Ọjọ́ Ìsẹ́gun", "Ọjọ́rú", "Ọjọ́bọ", "Ọjọ́ Ẹtì", "Ọjọ́ Àbámẹ́ta"}, + {"Ṣẹ́r", "Èrèl", "Ẹrẹ̀n", "Ìgb", "Ẹ̀bi", "Òkú", "Agẹ", "Ògú", "Owe", "Ọ̀wà", "Bél", "Ọ̀pẹ"}, + {"Oṣù Ṣẹ́rẹ́", "Oṣù Èrèlè", "Oṣù Ẹrẹ̀nà", "Oṣù Ìgbé", "Oṣù Ẹ̀bibi", "Oṣù Òkúdu", "Oṣù Agẹmọ", "Oṣù Ògún", "Oṣù Owewe", "Oṣù Ọ̀wàrà", "Oṣù Bélú", "Oṣù Ọ̀pẹ̀"}, + {"Àárọ̀", "Ọ̀sán"}, +} + +var localeTableYoBJ = [5][]string{ + {"Àìk", "Aj", "Ìsɛ́g", "Ɔjɔ́r", "Ɔjɔ́b", "Ɛt", "Àbám"}, + {"Ɔjɔ́ Àìkú", "Ɔjɔ́ Ajé", "Ɔjɔ́ Ìsɛ́gun", "Ɔjɔ́rú", "Ɔjɔ́bɔ", "Ɔjɔ́ Ɛtì", "Ɔjɔ́ Àbámɛ́ta"}, + {"Shɛ́r", "Èrèl", "Ɛrɛ̀n", "Ìgb", "Ɛ̀bi", "Òkú", "Agɛ", "Ògú", "Owe", "Ɔ̀wà", "Bél", "Ɔ̀pɛ"}, + {"Oshù Shɛ́rɛ́", "Oshù Èrèlè", "Oshù Ɛrɛ̀nà", "Oshù Ìgbé", "Oshù Ɛ̀bibi", "Oshù Òkúdu", "Oshù Agɛmɔ", "Oshù Ògún", "Oshù Owewe", "Oshù Ɔ̀wàrà", "Oshù Bélú", "Oshù Ɔ̀pɛ̀"}, + {"Àárɔ̀", "Ɔ̀sán"}, +} + +var localeTableYoNG = [5][]string{ + {"Àìk", "Aj", "Ìsẹ́g", "Ọjọ́r", "Ọjọ́b", "Ẹt", "Àbám"}, + {"Ọjọ́ Àìkú", "Ọjọ́ Ajé", "Ọjọ́ Ìsẹ́gun", "Ọjọ́rú", "Ọjọ́bọ", "Ọjọ́ Ẹtì", "Ọjọ́ Àbámẹ́ta"}, + {"Ṣẹ́r", "Èrèl", "Ẹrẹ̀n", "Ìgb", "Ẹ̀bi", "Òkú", "Agẹ", "Ògú", "Owe", "Ọ̀wà", "Bél", "Ọ̀pẹ"}, + {"Oṣù Ṣẹ́rẹ́", "Oṣù Èrèlè", "Oṣù Ẹrẹ̀nà", "Oṣù Ìgbé", "Oṣù Ẹ̀bibi", "Oṣù Òkúdu", "Oṣù Agẹmọ", "Oṣù Ògún", "Oṣù Owewe", "Oṣù Ọ̀wàrà", "Oṣù Bélú", "Oṣù Ọ̀pẹ̀"}, + {"Àárọ̀", "Ọ̀sán"}, +} + +var localeTableYrl = [5][]string{ + {"mit", "mur", "mmk", "mms", "sup", "yuk", "sau"}, + {"mituú", "murakipí", "murakí-mukũi", "murakí-musapíri", "supapá", "yukuakú", "saurú"}, + {"ye", "mk", "ms", "id", "pu", "py", "pm", "ps", "pi", "yp", "yy", "ym"}, + {"yepé", "mukũi", "musapíri", "irũdí", "pú", "pú-yepé", "pú-mukũi", "pú-musapíri", "pú-irũdí", "yepé-putimaã", "yepé-yepé", "yepé-mukũi"}, + {}, +} + +var localeTableYrlBR = [5][]string{ + {"mit", "mur", "mmk", "mms", "sup", "yuk", "sau"}, + {"mituú", "murakipí", "murakí-mukũi", "murakí-musapíri", "supapá", "yukuakú", "saurú"}, + {"ye", "mk", "ms", "id", "pu", "py", "pm", "ps", "pi", "yp", "yy", "ym"}, + {"yepé", "mukũi", "musapíri", "irũdí", "pú", "pú-yepé", "pú-mukũi", "pú-musapíri", "pú-irũdí", "yepé-putimaã", "yepé-yepé", "yepé-mukũi"}, + {}, +} + +var localeTableYrlCO = [5][]string{ + {"mit", "mur", "mmk", "mms", "sup", "yuk", "sau"}, + {"mituú", "murakipí", "murakí-mukũi", "murakí-musapíri", "supapá", "yukuakú", "saurú"}, + {"ye", "mk", "ms", "id", "pu", "py", "pm", "ps", "pi", "yp", "yy", "ym"}, + {"yepé", "mukũi", "musapíri", "irũdí", "pú", "pú-yepé", "pú-mukũi", "pú-musapíri", "pú-irũdí", "yepé-putimaã", "yepé-yepé", "yepé-mukũi"}, + {"a.m.", "p.m."}, +} + +var localeTableYrlVE = [5][]string{ + {"mit", "mur", "mmk", "mms", "sup", "yuk", "sau"}, + {"mituú", "murakipí", "murakí-mukũi", "murakí-musapíri", "supapá", "yukuakú", "saurú"}, + {"ye", "mk", "ms", "id", "pu", "py", "pm", "ps", "pi", "yp", "yy", "ym"}, + {"yepé", "mukũi", "musapíri", "irũdí", "pú", "pú-yepé", "pú-mukũi", "pú-musapíri", "pú-irũdí", "yepé-putimaã", "yepé-yepé", "yepé-mukũi"}, + {"a.m.", "p.m."}, +} + +var localeTableYue = [5][]string{ + {}, + {"星期日", "星期一", "星期二", "星期三", "星期四", "星期五", "星期六"}, + {}, + {"1月", "2月", "3月", "4月", "5月", "6月", "7月", "8月", "9月", "10月", "11月", "12月"}, + {"上午", "下午"}, +} + +var localeTableYueHans = [5][]string{ + {"周日", "周一", "周二", "周三", "周四", "周五", "周六"}, + {"星期日", "星期一", "星期二", "星期三", "星期四", "星期五", "星期六"}, + {"1月", "2月", "3月", "4月", "5月", "6月", "7月", "8月", "9月", "10月", "11月", "12月"}, + {"一月", "二月", "三月", "四月", "五月", "六月", "七月", "八月", "九月", "十月", "十一月", "十二月"}, + {"上午", "下午"}, +} + +var localeTableYueHansCN = [5][]string{ + {"周日", "周一", "周二", "周三", "周四", "周五", "周六"}, + {"星期日", "星期一", "星期二", "星期三", "星期四", "星期五", "星期六"}, + {"1月", "2月", "3月", "4月", "5月", "6月", "7月", "8月", "9月", "10月", "11月", "12月"}, + {"一月", "二月", "三月", "四月", "五月", "六月", "七月", "八月", "九月", "十月", "十一月", "十二月"}, + {"上午", "下午"}, +} + +var localeTableYueHant = [5][]string{ + {}, + {"星期日", "星期一", "星期二", "星期三", "星期四", "星期五", "星期六"}, + {}, + {"1月", "2月", "3月", "4月", "5月", "6月", "7月", "8月", "9月", "10月", "11月", "12月"}, + {"上午", "下午"}, +} + +var localeTableYueHantHK = [5][]string{ + {}, + {"星期日", "星期一", "星期二", "星期三", "星期四", "星期五", "星期六"}, + {}, + {"1月", "2月", "3月", "4月", "5月", "6月", "7月", "8月", "9月", "10月", "11月", "12月"}, + {"上午", "下午"}, +} + +var localeTableZa = [5][]string{ + {}, + {"ngoenzsinghgiz", "singhgizit", "singhgizngeih", "singhgizsam", "singhgizseiq", "singhgizhaj", "singhgizroek"}, + {}, + {"ndwenit", "ndwenngeih", "ndwensam", "ndwenseiq", "ndwenngux", "ndwenloeg", "ndwencaet", "ndwenbet", "ndwengouj", "ndwencib", "ndwencib’it", "ndwencibngeih"}, + {}, +} + +var localeTableZaCN = [5][]string{ + {}, + {"ngoenzsinghgiz", "singhgizit", "singhgizngeih", "singhgizsam", "singhgizseiq", "singhgizhaj", "singhgizroek"}, + {}, + {"ndwenit", "ndwenngeih", "ndwensam", "ndwenseiq", "ndwenngux", "ndwenloeg", "ndwencaet", "ndwenbet", "ndwengouj", "ndwencib", "ndwencib’it", "ndwencibngeih"}, + {}, +} + +var localeTableZgh = [5][]string{ + {"ⴰⵙⴰ", "ⴰⵢⵏ", "ⴰⵙⵉ", "ⴰⴽⵕ", "ⴰⴽⵡ", "ⴰⵙⵉⵎ", "ⴰⵙⵉⴹ"}, + {"ⴰⵙⴰⵎⴰⵙ", "ⴰⵢⵏⴰⵙ", "ⴰⵙⵉⵏⴰⵙ", "ⴰⴽⵕⴰⵙ", "ⴰⴽⵡⴰⵙ", "ⴰⵙⵉⵎⵡⴰⵙ", "ⴰⵙⵉⴹⵢⴰⵙ"}, + {"ⵉⵏⵏ", "ⴱⵕⴰ", "ⵎⴰⵕ", "ⵉⴱⵔ", "ⵎⴰⵢ", "ⵢⵓⵏ", "ⵢⵓⵍ", "ⵖⵓⵛ", "ⵛⵓⵜ", "ⴽⵜⵓ", "ⵏⵓⵡ", "ⴷⵓⵊ"}, + {"ⵉⵏⵏⴰⵢⵔ", "ⴱⵕⴰⵢⵕ", "ⵎⴰⵕⵚ", "ⵉⴱⵔⵉⵔ", "ⵎⴰⵢⵢⵓ", "ⵢⵓⵏⵢⵓ", "ⵢⵓⵍⵢⵓⵣ", "ⵖⵓⵛⵜ", "ⵛⵓⵜⴰⵏⴱⵉⵔ", "ⴽⵜⵓⴱⵔ", "ⵏⵓⵡⴰⵏⴱⵉⵔ", "ⴷⵓⵊⴰⵏⴱⵉⵔ"}, + {"ⵜⵉⴼⴰⵡⵜ", "ⵜⴰⴷⴳⴳⵯⴰⵜ"}, +} + +var localeTableZghMA = [5][]string{ + {"ⴰⵙⴰ", "ⴰⵢⵏ", "ⴰⵙⵉ", "ⴰⴽⵕ", "ⴰⴽⵡ", "ⴰⵙⵉⵎ", "ⴰⵙⵉⴹ"}, + {"ⴰⵙⴰⵎⴰⵙ", "ⴰⵢⵏⴰⵙ", "ⴰⵙⵉⵏⴰⵙ", "ⴰⴽⵕⴰⵙ", "ⴰⴽⵡⴰⵙ", "ⴰⵙⵉⵎⵡⴰⵙ", "ⴰⵙⵉⴹⵢⴰⵙ"}, + {"ⵉⵏⵏ", "ⴱⵕⴰ", "ⵎⴰⵕ", "ⵉⴱⵔ", "ⵎⴰⵢ", "ⵢⵓⵏ", "ⵢⵓⵍ", "ⵖⵓⵛ", "ⵛⵓⵜ", "ⴽⵜⵓ", "ⵏⵓⵡ", "ⴷⵓⵊ"}, + {"ⵉⵏⵏⴰⵢⵔ", "ⴱⵕⴰⵢⵕ", "ⵎⴰⵕⵚ", "ⵉⴱⵔⵉⵔ", "ⵎⴰⵢⵢⵓ", "ⵢⵓⵏⵢⵓ", "ⵢⵓⵍⵢⵓⵣ", "ⵖⵓⵛⵜ", "ⵛⵓⵜⴰⵏⴱⵉⵔ", "ⴽⵜⵓⴱⵔ", "ⵏⵓⵡⴰⵏⴱⵉⵔ", "ⴷⵓⵊⴰⵏⴱⵉⵔ"}, + {"ⵜⵉⴼⴰⵡⵜ", "ⵜⴰⴷⴳⴳⵯⴰⵜ"}, +} + +var localeTableZh = [5][]string{ + {"周日", "周一", "周二", "周三", "周四", "周五", "周六"}, + {"星期日", "星期一", "星期二", "星期三", "星期四", "星期五", "星期六"}, + {"1月", "2月", "3月", "4月", "5月", "6月", "7月", "8月", "9月", "10月", "11月", "12月"}, + {"一月", "二月", "三月", "四月", "五月", "六月", "七月", "八月", "九月", "十月", "十一月", "十二月"}, + {"上午", "下午"}, +} + +var localeTableZhHans = [5][]string{ + {"周日", "周一", "周二", "周三", "周四", "周五", "周六"}, + {"星期日", "星期一", "星期二", "星期三", "星期四", "星期五", "星期六"}, + {"1月", "2月", "3月", "4月", "5月", "6月", "7月", "8月", "9月", "10月", "11月", "12月"}, + {"一月", "二月", "三月", "四月", "五月", "六月", "七月", "八月", "九月", "十月", "十一月", "十二月"}, + {"上午", "下午"}, +} + +var localeTableZhHansCN = [5][]string{ + {"周日", "周一", "周二", "周三", "周四", "周五", "周六"}, + {"星期日", "星期一", "星期二", "星期三", "星期四", "星期五", "星期六"}, + {"1月", "2月", "3月", "4月", "5月", "6月", "7月", "8月", "9月", "10月", "11月", "12月"}, + {"一月", "二月", "三月", "四月", "五月", "六月", "七月", "八月", "九月", "十月", "十一月", "十二月"}, + {"上午", "下午"}, +} + +var localeTableZhHansHK = [5][]string{ + {"周日", "周一", "周二", "周三", "周四", "周五", "周六"}, + {"星期日", "星期一", "星期二", "星期三", "星期四", "星期五", "星期六"}, + {"1月", "2月", "3月", "4月", "5月", "6月", "7月", "8月", "9月", "10月", "11月", "12月"}, + {"一月", "二月", "三月", "四月", "五月", "六月", "七月", "八月", "九月", "十月", "十一月", "十二月"}, + {"上午", "下午"}, +} + +var localeTableZhHansMO = [5][]string{ + {"周日", "周一", "周二", "周三", "周四", "周五", "周六"}, + {"星期日", "星期一", "星期二", "星期三", "星期四", "星期五", "星期六"}, + {"1月", "2月", "3月", "4月", "5月", "6月", "7月", "8月", "9月", "10月", "11月", "12月"}, + {"一月", "二月", "三月", "四月", "五月", "六月", "七月", "八月", "九月", "十月", "十一月", "十二月"}, + {"上午", "下午"}, +} + +var localeTableZhHansSG = [5][]string{ + {"周日", "周一", "周二", "周三", "周四", "周五", "周六"}, + {"星期日", "星期一", "星期二", "星期三", "星期四", "星期五", "星期六"}, + {"1月", "2月", "3月", "4月", "5月", "6月", "7月", "8月", "9月", "10月", "11月", "12月"}, + {"一月", "二月", "三月", "四月", "五月", "六月", "七月", "八月", "九月", "十月", "十一月", "十二月"}, + {"上午", "下午"}, +} + +var localeTableZhHant = [5][]string{ + {"週日", "週一", "週二", "週三", "週四", "週五", "週六"}, + {"星期日", "星期一", "星期二", "星期三", "星期四", "星期五", "星期六"}, + {}, + {"1月", "2月", "3月", "4月", "5月", "6月", "7月", "8月", "9月", "10月", "11月", "12月"}, + {"上午", "下午"}, +} + +var localeTableZhHantHK = [5][]string{ + {"週日", "週一", "週二", "週三", "週四", "週五", "週六"}, + {"星期日", "星期一", "星期二", "星期三", "星期四", "星期五", "星期六"}, + {}, + {"1月", "2月", "3月", "4月", "5月", "6月", "7月", "8月", "9月", "10月", "11月", "12月"}, + {"上午", "下午"}, +} + +var localeTableZhHantMO = [5][]string{ + {"週日", "週一", "週二", "週三", "週四", "週五", "週六"}, + {"星期日", "星期一", "星期二", "星期三", "星期四", "星期五", "星期六"}, + {}, + {"1月", "2月", "3月", "4月", "5月", "6月", "7月", "8月", "9月", "10月", "11月", "12月"}, + {"上午", "下午"}, +} + +var localeTableZhHantTW = [5][]string{ + {"週日", "週一", "週二", "週三", "週四", "週五", "週六"}, + {"星期日", "星期一", "星期二", "星期三", "星期四", "星期五", "星期六"}, + {}, + {"1月", "2月", "3月", "4月", "5月", "6月", "7月", "8月", "9月", "10月", "11月", "12月"}, + {"上午", "下午"}, +} + +var localeTableZu = [5][]string{ + {"Son", "Mso", "Bil", "Tha", "Sin", "Hla", "Mgq"}, + {"ISonto", "UMsombuluko", "ULwesibili", "ULwesithathu", "ULwesine", "ULwesihlanu", "UMgqibelo"}, + {"Jan", "Feb", "Mas", "Eph", "Mey", "Jun", "Jul", "Aga", "Sep", "Okt", "Nov", "Dis"}, + {"Januwari", "Februwari", "Mashi", "Ephreli", "Meyi", "Juni", "Julayi", "Agasti", "Septhemba", "Okthoba", "Novemba", "Disemba"}, + {"a", "p"}, +} + +var localeTableZuZA = [5][]string{ + {"Son", "Mso", "Bil", "Tha", "Sin", "Hla", "Mgq"}, + {"ISonto", "UMsombuluko", "ULwesibili", "ULwesithathu", "ULwesine", "ULwesihlanu", "UMgqibelo"}, + {"Jan", "Feb", "Mas", "Eph", "Mey", "Jun", "Jul", "Aga", "Sep", "Okt", "Nov", "Dis"}, + {"Januwari", "Februwari", "Mashi", "Ephreli", "Meyi", "Juni", "Julayi", "Agasti", "Septhemba", "Okthoba", "Novemba", "Disemba"}, + {"a", "p"}, +} + +const ( + LocaleAa = "aa" + LocaleAaDJ = "aa-DJ" + LocaleAaER = "aa-ER" + LocaleAaET = "aa-ET" + LocaleAb = "ab" + LocaleAbGE = "ab-GE" + LocaleAf = "af" + LocaleAfNA = "af-NA" + LocaleAfZA = "af-ZA" + LocaleAgq = "agq" + LocaleAgqCM = "agq-CM" + LocaleAk = "ak" + LocaleAkGH = "ak-GH" + LocaleAm = "am" + LocaleAmET = "am-ET" + LocaleAn = "an" + LocaleAnES = "an-ES" + LocaleApc = "apc" + LocaleApcSY = "apc-SY" + LocaleAr = "ar" + LocaleAr001 = "ar-001" + LocaleArAE = "ar-AE" + LocaleArBH = "ar-BH" + LocaleArDJ = "ar-DJ" + LocaleArDZ = "ar-DZ" + LocaleArEG = "ar-EG" + LocaleArEH = "ar-EH" + LocaleArER = "ar-ER" + LocaleArIL = "ar-IL" + LocaleArIQ = "ar-IQ" + LocaleArJO = "ar-JO" + LocaleArKM = "ar-KM" + LocaleArKW = "ar-KW" + LocaleArLB = "ar-LB" + LocaleArLY = "ar-LY" + LocaleArMA = "ar-MA" + LocaleArMR = "ar-MR" + LocaleArOM = "ar-OM" + LocaleArPS = "ar-PS" + LocaleArQA = "ar-QA" + LocaleArSA = "ar-SA" + LocaleArSD = "ar-SD" + LocaleArSO = "ar-SO" + LocaleArSS = "ar-SS" + LocaleArSY = "ar-SY" + LocaleArTD = "ar-TD" + LocaleArTN = "ar-TN" + LocaleArYE = "ar-YE" + LocaleAs = "as" + LocaleAsIN = "as-IN" + LocaleAsa = "asa" + LocaleAsaTZ = "asa-TZ" + LocaleAst = "ast" + LocaleAstES = "ast-ES" + LocaleAz = "az" + LocaleAzCyrl = "az-Cyrl" + LocaleAzCyrlAZ = "az-Cyrl-AZ" + LocaleAzLatn = "az-Latn" + LocaleAzLatnAZ = "az-Latn-AZ" + LocaleBal = "bal" + LocaleBalArab = "bal-Arab" + LocaleBalArabPK = "bal-Arab-PK" + LocaleBalLatn = "bal-Latn" + LocaleBalLatnPK = "bal-Latn-PK" + LocaleBas = "bas" + LocaleBasCM = "bas-CM" + LocaleBe = "be" + LocaleBeBY = "be-BY" + LocaleBetarask = "be-tarask" + LocaleBem = "bem" + LocaleBemZM = "bem-ZM" + LocaleBew = "bew" + LocaleBewID = "bew-ID" + LocaleBez = "bez" + LocaleBezTZ = "bez-TZ" + LocaleBg = "bg" + LocaleBgBG = "bg-BG" + LocaleBgc = "bgc" + LocaleBgcIN = "bgc-IN" + LocaleBho = "bho" + LocaleBhoIN = "bho-IN" + LocaleBlo = "blo" + LocaleBloBJ = "blo-BJ" + LocaleBm = "bm" + LocaleBmML = "bm-ML" + LocaleBn = "bn" + LocaleBnBD = "bn-BD" + LocaleBnIN = "bn-IN" + LocaleBo = "bo" + LocaleBoCN = "bo-CN" + LocaleBoIN = "bo-IN" + LocaleBr = "br" + LocaleBrFR = "br-FR" + LocaleBrx = "brx" + LocaleBrxIN = "brx-IN" + LocaleBs = "bs" + LocaleBsCyrl = "bs-Cyrl" + LocaleBsCyrlBA = "bs-Cyrl-BA" + LocaleBsLatn = "bs-Latn" + LocaleBsLatnBA = "bs-Latn-BA" + LocaleByn = "byn" + LocaleBynER = "byn-ER" + LocaleCa = "ca" + LocaleCaAD = "ca-AD" + LocaleCaES = "ca-ES" + LocaleCaESvalencia = "ca-ES-valencia" + LocaleCaFR = "ca-FR" + LocaleCaIT = "ca-IT" + LocaleCad = "cad" + LocaleCadUS = "cad-US" + LocaleCch = "cch" + LocaleCchNG = "cch-NG" + LocaleCcp = "ccp" + LocaleCcpBD = "ccp-BD" + LocaleCcpIN = "ccp-IN" + LocaleCe = "ce" + LocaleCeRU = "ce-RU" + LocaleCeb = "ceb" + LocaleCebPH = "ceb-PH" + LocaleCgg = "cgg" + LocaleCggUG = "cgg-UG" + LocaleChr = "chr" + LocaleChrUS = "chr-US" + LocaleCic = "cic" + LocaleCicUS = "cic-US" + LocaleCkb = "ckb" + LocaleCkbIQ = "ckb-IQ" + LocaleCkbIR = "ckb-IR" + LocaleCo = "co" + LocaleCoFR = "co-FR" + LocaleCs = "cs" + LocaleCsCZ = "cs-CZ" + LocaleCsw = "csw" + LocaleCswCA = "csw-CA" + LocaleCu = "cu" + LocaleCuRU = "cu-RU" + LocaleCv = "cv" + LocaleCvRU = "cv-RU" + LocaleCy = "cy" + LocaleCyGB = "cy-GB" + LocaleDa = "da" + LocaleDaDK = "da-DK" + LocaleDaGL = "da-GL" + LocaleDav = "dav" + LocaleDavKE = "dav-KE" + LocaleDe = "de" + LocaleDeAT = "de-AT" + LocaleDeBE = "de-BE" + LocaleDeCH = "de-CH" + LocaleDeDE = "de-DE" + LocaleDeIT = "de-IT" + LocaleDeLI = "de-LI" + LocaleDeLU = "de-LU" + LocaleDje = "dje" + LocaleDjeNE = "dje-NE" + LocaleDoi = "doi" + LocaleDoiIN = "doi-IN" + LocaleDsb = "dsb" + LocaleDsbDE = "dsb-DE" + LocaleDua = "dua" + LocaleDuaCM = "dua-CM" + LocaleDyo = "dyo" + LocaleDyoSN = "dyo-SN" + LocaleDz = "dz" + LocaleDzBT = "dz-BT" + LocaleEbu = "ebu" + LocaleEbuKE = "ebu-KE" + LocaleEe = "ee" + LocaleEeGH = "ee-GH" + LocaleEeTG = "ee-TG" + LocaleEl = "el" + LocaleElCY = "el-CY" + LocaleElGR = "el-GR" + LocaleElpolyton = "el-polyton" + LocaleEn = "en" + LocaleEn001 = "en-001" + LocaleEn150 = "en-150" + LocaleEnAE = "en-AE" + LocaleEnAG = "en-AG" + LocaleEnAI = "en-AI" + LocaleEnAS = "en-AS" + LocaleEnAT = "en-AT" + LocaleEnAU = "en-AU" + LocaleEnBB = "en-BB" + LocaleEnBE = "en-BE" + LocaleEnBI = "en-BI" + LocaleEnBM = "en-BM" + LocaleEnBS = "en-BS" + LocaleEnBW = "en-BW" + LocaleEnBZ = "en-BZ" + LocaleEnCA = "en-CA" + LocaleEnCC = "en-CC" + LocaleEnCH = "en-CH" + LocaleEnCK = "en-CK" + LocaleEnCM = "en-CM" + LocaleEnCX = "en-CX" + LocaleEnCY = "en-CY" + LocaleEnDE = "en-DE" + LocaleEnDG = "en-DG" + LocaleEnDK = "en-DK" + LocaleEnDM = "en-DM" + LocaleEnDsrt = "en-Dsrt" + LocaleEnDsrtUS = "en-Dsrt-US" + LocaleEnER = "en-ER" + LocaleEnFI = "en-FI" + LocaleEnFJ = "en-FJ" + LocaleEnFK = "en-FK" + LocaleEnFM = "en-FM" + LocaleEnGB = "en-GB" + LocaleEnGD = "en-GD" + LocaleEnGG = "en-GG" + LocaleEnGH = "en-GH" + LocaleEnGI = "en-GI" + LocaleEnGM = "en-GM" + LocaleEnGU = "en-GU" + LocaleEnGY = "en-GY" + LocaleEnHK = "en-HK" + LocaleEnID = "en-ID" + LocaleEnIE = "en-IE" + LocaleEnIL = "en-IL" + LocaleEnIM = "en-IM" + LocaleEnIN = "en-IN" + LocaleEnIO = "en-IO" + LocaleEnJE = "en-JE" + LocaleEnJM = "en-JM" + LocaleEnKE = "en-KE" + LocaleEnKI = "en-KI" + LocaleEnKN = "en-KN" + LocaleEnKY = "en-KY" + LocaleEnLC = "en-LC" + LocaleEnLR = "en-LR" + LocaleEnLS = "en-LS" + LocaleEnMG = "en-MG" + LocaleEnMH = "en-MH" + LocaleEnMO = "en-MO" + LocaleEnMP = "en-MP" + LocaleEnMS = "en-MS" + LocaleEnMT = "en-MT" + LocaleEnMU = "en-MU" + LocaleEnMV = "en-MV" + LocaleEnMW = "en-MW" + LocaleEnMY = "en-MY" + LocaleEnNA = "en-NA" + LocaleEnNF = "en-NF" + LocaleEnNG = "en-NG" + LocaleEnNL = "en-NL" + LocaleEnNR = "en-NR" + LocaleEnNU = "en-NU" + LocaleEnNZ = "en-NZ" + LocaleEnPG = "en-PG" + LocaleEnPH = "en-PH" + LocaleEnPK = "en-PK" + LocaleEnPN = "en-PN" + LocaleEnPR = "en-PR" + LocaleEnPW = "en-PW" + LocaleEnRW = "en-RW" + LocaleEnSB = "en-SB" + LocaleEnSC = "en-SC" + LocaleEnSD = "en-SD" + LocaleEnSE = "en-SE" + LocaleEnSG = "en-SG" + LocaleEnSH = "en-SH" + LocaleEnSI = "en-SI" + LocaleEnSL = "en-SL" + LocaleEnSS = "en-SS" + LocaleEnSX = "en-SX" + LocaleEnSZ = "en-SZ" + LocaleEnShaw = "en-Shaw" + LocaleEnShawGB = "en-Shaw-GB" + LocaleEnTC = "en-TC" + LocaleEnTK = "en-TK" + LocaleEnTO = "en-TO" + LocaleEnTT = "en-TT" + LocaleEnTV = "en-TV" + LocaleEnTZ = "en-TZ" + LocaleEnUG = "en-UG" + LocaleEnUM = "en-UM" + LocaleEnUS = "en-US" + LocaleEnUSuvaposix = "en-US-u-va-posix" + LocaleEnVC = "en-VC" + LocaleEnVG = "en-VG" + LocaleEnVI = "en-VI" + LocaleEnVU = "en-VU" + LocaleEnWS = "en-WS" + LocaleEnZA = "en-ZA" + LocaleEnZM = "en-ZM" + LocaleEnZW = "en-ZW" + LocaleEo = "eo" + LocaleEo001 = "eo-001" + LocaleEs = "es" + LocaleEs419 = "es-419" + LocaleEsAR = "es-AR" + LocaleEsBO = "es-BO" + LocaleEsBR = "es-BR" + LocaleEsBZ = "es-BZ" + LocaleEsCL = "es-CL" + LocaleEsCO = "es-CO" + LocaleEsCR = "es-CR" + LocaleEsCU = "es-CU" + LocaleEsDO = "es-DO" + LocaleEsEA = "es-EA" + LocaleEsEC = "es-EC" + LocaleEsES = "es-ES" + LocaleEsGQ = "es-GQ" + LocaleEsGT = "es-GT" + LocaleEsHN = "es-HN" + LocaleEsIC = "es-IC" + LocaleEsMX = "es-MX" + LocaleEsNI = "es-NI" + LocaleEsPA = "es-PA" + LocaleEsPE = "es-PE" + LocaleEsPH = "es-PH" + LocaleEsPR = "es-PR" + LocaleEsPY = "es-PY" + LocaleEsSV = "es-SV" + LocaleEsUS = "es-US" + LocaleEsUY = "es-UY" + LocaleEsVE = "es-VE" + LocaleEt = "et" + LocaleEtEE = "et-EE" + LocaleEu = "eu" + LocaleEuES = "eu-ES" + LocaleEwo = "ewo" + LocaleEwoCM = "ewo-CM" + LocaleFa = "fa" + LocaleFaAF = "fa-AF" + LocaleFaIR = "fa-IR" + LocaleFf = "ff" + LocaleFfAdlm = "ff-Adlm" + LocaleFfAdlmBF = "ff-Adlm-BF" + LocaleFfAdlmCM = "ff-Adlm-CM" + LocaleFfAdlmGH = "ff-Adlm-GH" + LocaleFfAdlmGM = "ff-Adlm-GM" + LocaleFfAdlmGN = "ff-Adlm-GN" + LocaleFfAdlmGW = "ff-Adlm-GW" + LocaleFfAdlmLR = "ff-Adlm-LR" + LocaleFfAdlmMR = "ff-Adlm-MR" + LocaleFfAdlmNE = "ff-Adlm-NE" + LocaleFfAdlmNG = "ff-Adlm-NG" + LocaleFfAdlmSL = "ff-Adlm-SL" + LocaleFfAdlmSN = "ff-Adlm-SN" + LocaleFfLatn = "ff-Latn" + LocaleFfLatnBF = "ff-Latn-BF" + LocaleFfLatnCM = "ff-Latn-CM" + LocaleFfLatnGH = "ff-Latn-GH" + LocaleFfLatnGM = "ff-Latn-GM" + LocaleFfLatnGN = "ff-Latn-GN" + LocaleFfLatnGW = "ff-Latn-GW" + LocaleFfLatnLR = "ff-Latn-LR" + LocaleFfLatnMR = "ff-Latn-MR" + LocaleFfLatnNE = "ff-Latn-NE" + LocaleFfLatnNG = "ff-Latn-NG" + LocaleFfLatnSL = "ff-Latn-SL" + LocaleFfLatnSN = "ff-Latn-SN" + LocaleFi = "fi" + LocaleFiFI = "fi-FI" + LocaleFil = "fil" + LocaleFilPH = "fil-PH" + LocaleFo = "fo" + LocaleFoDK = "fo-DK" + LocaleFoFO = "fo-FO" + LocaleFr = "fr" + LocaleFrBE = "fr-BE" + LocaleFrBF = "fr-BF" + LocaleFrBI = "fr-BI" + LocaleFrBJ = "fr-BJ" + LocaleFrBL = "fr-BL" + LocaleFrCA = "fr-CA" + LocaleFrCD = "fr-CD" + LocaleFrCF = "fr-CF" + LocaleFrCG = "fr-CG" + LocaleFrCH = "fr-CH" + LocaleFrCI = "fr-CI" + LocaleFrCM = "fr-CM" + LocaleFrDJ = "fr-DJ" + LocaleFrDZ = "fr-DZ" + LocaleFrFR = "fr-FR" + LocaleFrGA = "fr-GA" + LocaleFrGF = "fr-GF" + LocaleFrGN = "fr-GN" + LocaleFrGP = "fr-GP" + LocaleFrGQ = "fr-GQ" + LocaleFrHT = "fr-HT" + LocaleFrKM = "fr-KM" + LocaleFrLU = "fr-LU" + LocaleFrMA = "fr-MA" + LocaleFrMC = "fr-MC" + LocaleFrMF = "fr-MF" + LocaleFrMG = "fr-MG" + LocaleFrML = "fr-ML" + LocaleFrMQ = "fr-MQ" + LocaleFrMR = "fr-MR" + LocaleFrMU = "fr-MU" + LocaleFrNC = "fr-NC" + LocaleFrNE = "fr-NE" + LocaleFrPF = "fr-PF" + LocaleFrPM = "fr-PM" + LocaleFrRE = "fr-RE" + LocaleFrRW = "fr-RW" + LocaleFrSC = "fr-SC" + LocaleFrSN = "fr-SN" + LocaleFrSY = "fr-SY" + LocaleFrTD = "fr-TD" + LocaleFrTG = "fr-TG" + LocaleFrTN = "fr-TN" + LocaleFrVU = "fr-VU" + LocaleFrWF = "fr-WF" + LocaleFrYT = "fr-YT" + LocaleFrr = "frr" + LocaleFrrDE = "frr-DE" + LocaleFur = "fur" + LocaleFurIT = "fur-IT" + LocaleFy = "fy" + LocaleFyNL = "fy-NL" + LocaleGa = "ga" + LocaleGaGB = "ga-GB" + LocaleGaIE = "ga-IE" + LocaleGaa = "gaa" + LocaleGaaGH = "gaa-GH" + LocaleGd = "gd" + LocaleGdGB = "gd-GB" + LocaleGez = "gez" + LocaleGezER = "gez-ER" + LocaleGezET = "gez-ET" + LocaleGl = "gl" + LocaleGlES = "gl-ES" + LocaleGn = "gn" + LocaleGnPY = "gn-PY" + LocaleGsw = "gsw" + LocaleGswCH = "gsw-CH" + LocaleGswFR = "gsw-FR" + LocaleGswLI = "gsw-LI" + LocaleGu = "gu" + LocaleGuIN = "gu-IN" + LocaleGuz = "guz" + LocaleGuzKE = "guz-KE" + LocaleGv = "gv" + LocaleGvIM = "gv-IM" + LocaleHa = "ha" + LocaleHaArab = "ha-Arab" + LocaleHaArabNG = "ha-Arab-NG" + LocaleHaArabSD = "ha-Arab-SD" + LocaleHaGH = "ha-GH" + LocaleHaNE = "ha-NE" + LocaleHaNG = "ha-NG" + LocaleHaw = "haw" + LocaleHawUS = "haw-US" + LocaleHe = "he" + LocaleHeIL = "he-IL" + LocaleHi = "hi" + LocaleHiIN = "hi-IN" + LocaleHiLatn = "hi-Latn" + LocaleHiLatnIN = "hi-Latn-IN" + LocaleHnj = "hnj" + LocaleHr = "hr" + LocaleHrBA = "hr-BA" + LocaleHrHR = "hr-HR" + LocaleHsb = "hsb" + LocaleHsbDE = "hsb-DE" + LocaleHu = "hu" + LocaleHuHU = "hu-HU" + LocaleHy = "hy" + LocaleHyAM = "hy-AM" + LocaleIa = "ia" + LocaleIa001 = "ia-001" + LocaleId = "id" + LocaleIdID = "id-ID" + LocaleIe = "ie" + LocaleIeEE = "ie-EE" + LocaleIg = "ig" + LocaleIgNG = "ig-NG" + LocaleIi = "ii" + LocaleIiCN = "ii-CN" + LocaleIs = "is" + LocaleIsIS = "is-IS" + LocaleIt = "it" + LocaleItCH = "it-CH" + LocaleItIT = "it-IT" + LocaleItSM = "it-SM" + LocaleItVA = "it-VA" + LocaleIu = "iu" + LocaleIuCA = "iu-CA" + LocaleJa = "ja" + LocaleJaJP = "ja-JP" + LocaleJgo = "jgo" + LocaleJgoCM = "jgo-CM" + LocaleJmc = "jmc" + LocaleJmcTZ = "jmc-TZ" + LocaleJv = "jv" + LocaleJvID = "jv-ID" + LocaleKa = "ka" + LocaleKaGE = "ka-GE" + LocaleKab = "kab" + LocaleKabDZ = "kab-DZ" + LocaleKaj = "kaj" + LocaleKajNG = "kaj-NG" + LocaleKam = "kam" + LocaleKamKE = "kam-KE" + LocaleKcg = "kcg" + LocaleKcgNG = "kcg-NG" + LocaleKde = "kde" + LocaleKdeTZ = "kde-TZ" + LocaleKea = "kea" + LocaleKeaCV = "kea-CV" + LocaleKgp = "kgp" + LocaleKgpBR = "kgp-BR" + LocaleKhq = "khq" + LocaleKhqML = "khq-ML" + LocaleKi = "ki" + LocaleKiKE = "ki-KE" + LocaleKk = "kk" + LocaleKkKZ = "kk-KZ" + LocaleKkj = "kkj" + LocaleKkjCM = "kkj-CM" + LocaleKl = "kl" + LocaleKlGL = "kl-GL" + LocaleKln = "kln" + LocaleKlnKE = "kln-KE" + LocaleKm = "km" + LocaleKmKH = "km-KH" + LocaleKn = "kn" + LocaleKnIN = "kn-IN" + LocaleKo = "ko" + LocaleKoCN = "ko-CN" + LocaleKoKP = "ko-KP" + LocaleKoKR = "ko-KR" + LocaleKok = "kok" + LocaleKokIN = "kok-IN" + LocaleKs = "ks" + LocaleKsArab = "ks-Arab" + LocaleKsArabIN = "ks-Arab-IN" + LocaleKsDeva = "ks-Deva" + LocaleKsDevaIN = "ks-Deva-IN" + LocaleKsb = "ksb" + LocaleKsbTZ = "ksb-TZ" + LocaleKsf = "ksf" + LocaleKsfCM = "ksf-CM" + LocaleKsh = "ksh" + LocaleKshDE = "ksh-DE" + LocaleKu = "ku" + LocaleKuTR = "ku-TR" + LocaleKw = "kw" + LocaleKwGB = "kw-GB" + LocaleKxv = "kxv" + LocaleKxvDeva = "kxv-Deva" + LocaleKxvDevaIN = "kxv-Deva-IN" + LocaleKxvOrya = "kxv-Orya" + LocaleKxvOryaIN = "kxv-Orya-IN" + LocaleKxvTelu = "kxv-Telu" + LocaleKxvTeluIN = "kxv-Telu-IN" + LocaleKy = "ky" + LocaleKyKG = "ky-KG" + LocaleLa = "la" + LocaleLaVA = "la-VA" + LocaleLag = "lag" + LocaleLagTZ = "lag-TZ" + LocaleLb = "lb" + LocaleLbLU = "lb-LU" + LocaleLg = "lg" + LocaleLgUG = "lg-UG" + LocaleLij = "lij" + LocaleLijIT = "lij-IT" + LocaleLkt = "lkt" + LocaleLktUS = "lkt-US" + LocaleLmo = "lmo" + LocaleLmoIT = "lmo-IT" + LocaleLn = "ln" + LocaleLnAO = "ln-AO" + LocaleLnCD = "ln-CD" + LocaleLnCF = "ln-CF" + LocaleLnCG = "ln-CG" + LocaleLo = "lo" + LocaleLoLA = "lo-LA" + LocaleLrc = "lrc" + LocaleLrcIQ = "lrc-IQ" + LocaleLrcIR = "lrc-IR" + LocaleLt = "lt" + LocaleLtLT = "lt-LT" + LocaleLu = "lu" + LocaleLuCD = "lu-CD" + LocaleLuo = "luo" + LocaleLuoKE = "luo-KE" + LocaleLuy = "luy" + LocaleLuyKE = "luy-KE" + LocaleLv = "lv" + LocaleLvLV = "lv-LV" + LocaleMai = "mai" + LocaleMaiIN = "mai-IN" + LocaleMas = "mas" + LocaleMasKE = "mas-KE" + LocaleMasTZ = "mas-TZ" + LocaleMer = "mer" + LocaleMerKE = "mer-KE" + LocaleMfe = "mfe" + LocaleMfeMU = "mfe-MU" + LocaleMg = "mg" + LocaleMgMG = "mg-MG" + LocaleMgh = "mgh" + LocaleMghMZ = "mgh-MZ" + LocaleMgo = "mgo" + LocaleMgoCM = "mgo-CM" + LocaleMi = "mi" + LocaleMiNZ = "mi-NZ" + LocaleMk = "mk" + LocaleMkMK = "mk-MK" + LocaleMl = "ml" + LocaleMlIN = "ml-IN" + LocaleMn = "mn" + LocaleMnMN = "mn-MN" + LocaleMnMongMN = "mn-Mong-MN" + LocaleMni = "mni" + LocaleMniBeng = "mni-Beng" + LocaleMniBengIN = "mni-Beng-IN" + LocaleMr = "mr" + LocaleMrIN = "mr-IN" + LocaleMs = "ms" + LocaleMsArab = "ms-Arab" + LocaleMsArabBN = "ms-Arab-BN" + LocaleMsArabMY = "ms-Arab-MY" + LocaleMsBN = "ms-BN" + LocaleMsID = "ms-ID" + LocaleMsMY = "ms-MY" + LocaleMsSG = "ms-SG" + LocaleMt = "mt" + LocaleMtMT = "mt-MT" + LocaleMua = "mua" + LocaleMuaCM = "mua-CM" + LocaleMus = "mus" + LocaleMusUS = "mus-US" + LocaleMy = "my" + LocaleMyMM = "my-MM" + LocaleMyv = "myv" + LocaleMyvRU = "myv-RU" + LocaleMzn = "mzn" + LocaleMznIR = "mzn-IR" + LocaleNaq = "naq" + LocaleNaqNA = "naq-NA" + LocaleNd = "nd" + LocaleNdZW = "nd-ZW" + LocaleNds = "nds" + LocaleNdsDE = "nds-DE" + LocaleNdsNL = "nds-NL" + LocaleNe = "ne" + LocaleNeIN = "ne-IN" + LocaleNeNP = "ne-NP" + LocaleNl = "nl" + LocaleNlAW = "nl-AW" + LocaleNlBE = "nl-BE" + LocaleNlBQ = "nl-BQ" + LocaleNlCW = "nl-CW" + LocaleNlNL = "nl-NL" + LocaleNlSR = "nl-SR" + LocaleNlSX = "nl-SX" + LocaleNmg = "nmg" + LocaleNmgCM = "nmg-CM" + LocaleNn = "nn" + LocaleNnNO = "nn-NO" + LocaleNnh = "nnh" + LocaleNnhCM = "nnh-CM" + LocaleNo = "no" + LocaleNqo = "nqo" + LocaleNqoGN = "nqo-GN" + LocaleNr = "nr" + LocaleNrZA = "nr-ZA" + LocaleNso = "nso" + LocaleNsoZA = "nso-ZA" + LocaleNus = "nus" + LocaleNusSS = "nus-SS" + LocaleNy = "ny" + LocaleNyMW = "ny-MW" + LocaleNyn = "nyn" + LocaleNynUG = "nyn-UG" + LocaleOc = "oc" + LocaleOcES = "oc-ES" + LocaleOcFR = "oc-FR" + LocaleOm = "om" + LocaleOmET = "om-ET" + LocaleOmKE = "om-KE" + LocaleOr = "or" + LocaleOrIN = "or-IN" + LocaleOs = "os" + LocaleOsGE = "os-GE" + LocaleOsRU = "os-RU" + LocaleOsa = "osa" + LocaleOsaUS = "osa-US" + LocalePa = "pa" + LocalePaArab = "pa-Arab" + LocalePaArabPK = "pa-Arab-PK" + LocalePaGuru = "pa-Guru" + LocalePaGuruIN = "pa-Guru-IN" + LocalePap = "pap" + LocalePapAW = "pap-AW" + LocalePapCW = "pap-CW" + LocalePcm = "pcm" + LocalePcmNG = "pcm-NG" + LocalePis = "pis" + LocalePisSB = "pis-SB" + LocalePl = "pl" + LocalePlPL = "pl-PL" + LocalePrg = "prg" + LocalePrgPL = "prg-PL" + LocalePs = "ps" + LocalePsAF = "ps-AF" + LocalePsPK = "ps-PK" + LocalePt = "pt" + LocalePtAO = "pt-AO" + LocalePtBR = "pt-BR" + LocalePtCH = "pt-CH" + LocalePtCV = "pt-CV" + LocalePtGQ = "pt-GQ" + LocalePtGW = "pt-GW" + LocalePtLU = "pt-LU" + LocalePtMO = "pt-MO" + LocalePtMZ = "pt-MZ" + LocalePtPT = "pt-PT" + LocalePtST = "pt-ST" + LocalePtTL = "pt-TL" + LocaleQu = "qu" + LocaleQuBO = "qu-BO" + LocaleQuEC = "qu-EC" + LocaleQuPE = "qu-PE" + LocaleRaj = "raj" + LocaleRajIN = "raj-IN" + LocaleRif = "rif" + LocaleRifMA = "rif-MA" + LocaleRm = "rm" + LocaleRmCH = "rm-CH" + LocaleRn = "rn" + LocaleRnBI = "rn-BI" + LocaleRo = "ro" + LocaleRoMD = "ro-MD" + LocaleRoRO = "ro-RO" + LocaleRof = "rof" + LocaleRofTZ = "rof-TZ" + LocaleRu = "ru" + LocaleRuBY = "ru-BY" + LocaleRuKG = "ru-KG" + LocaleRuKZ = "ru-KZ" + LocaleRuMD = "ru-MD" + LocaleRuRU = "ru-RU" + LocaleRuUA = "ru-UA" + LocaleRw = "rw" + LocaleRwRW = "rw-RW" + LocaleRwk = "rwk" + LocaleRwkTZ = "rwk-TZ" + LocaleSa = "sa" + LocaleSaIN = "sa-IN" + LocaleSah = "sah" + LocaleSahRU = "sah-RU" + LocaleSaq = "saq" + LocaleSaqKE = "saq-KE" + LocaleSat = "sat" + LocaleSbp = "sbp" + LocaleSbpTZ = "sbp-TZ" + LocaleSc = "sc" + LocaleScIT = "sc-IT" + LocaleScn = "scn" + LocaleScnIT = "scn-IT" + LocaleSd = "sd" + LocaleSdArab = "sd-Arab" + LocaleSdArabPK = "sd-Arab-PK" + LocaleSdDeva = "sd-Deva" + LocaleSdDevaIN = "sd-Deva-IN" + LocaleSe = "se" + LocaleSeFI = "se-FI" + LocaleSeNO = "se-NO" + LocaleSeSE = "se-SE" + LocaleSeh = "seh" + LocaleSehMZ = "seh-MZ" + LocaleSes = "ses" + LocaleSesML = "ses-ML" + LocaleSg = "sg" + LocaleSgCF = "sg-CF" + LocaleShi = "shi" + LocaleShiLatn = "shi-Latn" + LocaleShiLatnMA = "shi-Latn-MA" + LocaleShiTfng = "shi-Tfng" + LocaleShiTfngMA = "shi-Tfng-MA" + LocaleSi = "si" + LocaleSiLK = "si-LK" + LocaleSid = "sid" + LocaleSidET = "sid-ET" + LocaleSk = "sk" + LocaleSkSK = "sk-SK" + LocaleSkr = "skr" + LocaleSkrPK = "skr-PK" + LocaleSl = "sl" + LocaleSlSI = "sl-SI" + LocaleSmn = "smn" + LocaleSmnFI = "smn-FI" + LocaleSn = "sn" + LocaleSnZW = "sn-ZW" + LocaleSo = "so" + LocaleSoDJ = "so-DJ" + LocaleSoET = "so-ET" + LocaleSoKE = "so-KE" + LocaleSoSO = "so-SO" + LocaleSq = "sq" + LocaleSqAL = "sq-AL" + LocaleSqMK = "sq-MK" + LocaleSqXK = "sq-XK" + LocaleSr = "sr" + LocaleSrCyrl = "sr-Cyrl" + LocaleSrCyrlBA = "sr-Cyrl-BA" + LocaleSrCyrlME = "sr-Cyrl-ME" + LocaleSrCyrlRS = "sr-Cyrl-RS" + LocaleSrCyrlXK = "sr-Cyrl-XK" + LocaleSrLatn = "sr-Latn" + LocaleSrLatnBA = "sr-Latn-BA" + LocaleSrLatnME = "sr-Latn-ME" + LocaleSrLatnRS = "sr-Latn-RS" + LocaleSrLatnXK = "sr-Latn-XK" + LocaleSs = "ss" + LocaleSsSZ = "ss-SZ" + LocaleSsZA = "ss-ZA" + LocaleSsy = "ssy" + LocaleSsyER = "ssy-ER" + LocaleSt = "st" + LocaleStLS = "st-LS" + LocaleStZA = "st-ZA" + LocaleSu = "su" + LocaleSuLatn = "su-Latn" + LocaleSuLatnID = "su-Latn-ID" + LocaleSv = "sv" + LocaleSvAX = "sv-AX" + LocaleSvFI = "sv-FI" + LocaleSvSE = "sv-SE" + LocaleSw = "sw" + LocaleSwCD = "sw-CD" + LocaleSwKE = "sw-KE" + LocaleSwTZ = "sw-TZ" + LocaleSwUG = "sw-UG" + LocaleSyr = "syr" + LocaleSyrIQ = "syr-IQ" + LocaleSyrSY = "syr-SY" + LocaleSzl = "szl" + LocaleSzlPL = "szl-PL" + LocaleTa = "ta" + LocaleTaIN = "ta-IN" + LocaleTaLK = "ta-LK" + LocaleTaMY = "ta-MY" + LocaleTaSG = "ta-SG" + LocaleTe = "te" + LocaleTeIN = "te-IN" + LocaleTeo = "teo" + LocaleTeoKE = "teo-KE" + LocaleTeoUG = "teo-UG" + LocaleTg = "tg" + LocaleTgTJ = "tg-TJ" + LocaleTh = "th" + LocaleThTH = "th-TH" + LocaleTi = "ti" + LocaleTiER = "ti-ER" + LocaleTiET = "ti-ET" + LocaleTig = "tig" + LocaleTigER = "tig-ER" + LocaleTk = "tk" + LocaleTkTM = "tk-TM" + LocaleTn = "tn" + LocaleTnBW = "tn-BW" + LocaleTnZA = "tn-ZA" + LocaleTo = "to" + LocaleToTO = "to-TO" + LocaleTok = "tok" + LocaleTok001 = "tok-001" + LocaleTpi = "tpi" + LocaleTpiPG = "tpi-PG" + LocaleTr = "tr" + LocaleTrCY = "tr-CY" + LocaleTrTR = "tr-TR" + LocaleTrv = "trv" + LocaleTrvTW = "trv-TW" + LocaleTrw = "trw" + LocaleTrwPK = "trw-PK" + LocaleTs = "ts" + LocaleTsZA = "ts-ZA" + LocaleTt = "tt" + LocaleTtRU = "tt-RU" + LocaleTwq = "twq" + LocaleTwqNE = "twq-NE" + LocaleTzm = "tzm" + LocaleTzmMA = "tzm-MA" + LocaleUg = "ug" + LocaleUgCN = "ug-CN" + LocaleUk = "uk" + LocaleUkUA = "uk-UA" + LocaleUnd = "und" + LocaleUr = "ur" + LocaleUrIN = "ur-IN" + LocaleUrPK = "ur-PK" + LocaleUz = "uz" + LocaleUzArab = "uz-Arab" + LocaleUzArabAF = "uz-Arab-AF" + LocaleUzCyrl = "uz-Cyrl" + LocaleUzCyrlUZ = "uz-Cyrl-UZ" + LocaleUzLatn = "uz-Latn" + LocaleUzLatnUZ = "uz-Latn-UZ" + LocaleVai = "vai" + LocaleVaiLatn = "vai-Latn" + LocaleVaiLatnLR = "vai-Latn-LR" + LocaleVaiVaii = "vai-Vaii" + LocaleVaiVaiiLR = "vai-Vaii-LR" + LocaleVe = "ve" + LocaleVeZA = "ve-ZA" + LocaleVec = "vec" + LocaleVecIT = "vec-IT" + LocaleVi = "vi" + LocaleViVN = "vi-VN" + LocaleVmw = "vmw" + LocaleVmwMZ = "vmw-MZ" + LocaleVo = "vo" + LocaleVo001 = "vo-001" + LocaleVun = "vun" + LocaleVunTZ = "vun-TZ" + LocaleWae = "wae" + LocaleWaeCH = "wae-CH" + LocaleWal = "wal" + LocaleWalET = "wal-ET" + LocaleWo = "wo" + LocaleWoSN = "wo-SN" + LocaleXh = "xh" + LocaleXhZA = "xh-ZA" + LocaleXnr = "xnr" + LocaleXnrIN = "xnr-IN" + LocaleXog = "xog" + LocaleXogUG = "xog-UG" + LocaleYav = "yav" + LocaleYavCM = "yav-CM" + LocaleYi = "yi" + LocaleYiUA = "yi-UA" + LocaleYo = "yo" + LocaleYoBJ = "yo-BJ" + LocaleYoNG = "yo-NG" + LocaleYrl = "yrl" + LocaleYrlBR = "yrl-BR" + LocaleYrlCO = "yrl-CO" + LocaleYrlVE = "yrl-VE" + LocaleYue = "yue" + LocaleYueHans = "yue-Hans" + LocaleYueHansCN = "yue-Hans-CN" + LocaleYueHant = "yue-Hant" + LocaleYueHantHK = "yue-Hant-HK" + LocaleZa = "za" + LocaleZaCN = "za-CN" + LocaleZgh = "zgh" + LocaleZghMA = "zgh-MA" + LocaleZh = "zh" + LocaleZhHans = "zh-Hans" + LocaleZhHansCN = "zh-Hans-CN" + LocaleZhHansHK = "zh-Hans-HK" + LocaleZhHansMO = "zh-Hans-MO" + LocaleZhHansSG = "zh-Hans-SG" + LocaleZhHant = "zh-Hant" + LocaleZhHantHK = "zh-Hant-HK" + LocaleZhHantMO = "zh-Hant-MO" + LocaleZhHantTW = "zh-Hant-TW" + LocaleZu = "zu" + LocaleZuZA = "zu-ZA" +) + +var tables = map[string][5][]string{ + LocaleAa: localeTableAa, + LocaleAaDJ: localeTableAaDJ, + LocaleAaER: localeTableAaER, + LocaleAaET: localeTableAaET, + LocaleAb: localeTableAb, + LocaleAbGE: localeTableAbGE, + LocaleAf: localeTableAf, + LocaleAfNA: localeTableAfNA, + LocaleAfZA: localeTableAfZA, + LocaleAgq: localeTableAgq, + LocaleAgqCM: localeTableAgqCM, + LocaleAk: localeTableAk, + LocaleAkGH: localeTableAkGH, + LocaleAm: localeTableAm, + LocaleAmET: localeTableAmET, + LocaleAn: localeTableAn, + LocaleAnES: localeTableAnES, + LocaleApc: localeTableApc, + LocaleApcSY: localeTableApcSY, + LocaleAr: localeTableAr, + LocaleAr001: localeTableAr001, + LocaleArAE: localeTableArAE, + LocaleArBH: localeTableArBH, + LocaleArDJ: localeTableArDJ, + LocaleArDZ: localeTableArDZ, + LocaleArEG: localeTableArEG, + LocaleArEH: localeTableArEH, + LocaleArER: localeTableArER, + LocaleArIL: localeTableArIL, + LocaleArIQ: localeTableArIQ, + LocaleArJO: localeTableArJO, + LocaleArKM: localeTableArKM, + LocaleArKW: localeTableArKW, + LocaleArLB: localeTableArLB, + LocaleArLY: localeTableArLY, + LocaleArMA: localeTableArMA, + LocaleArMR: localeTableArMR, + LocaleArOM: localeTableArOM, + LocaleArPS: localeTableArPS, + LocaleArQA: localeTableArQA, + LocaleArSA: localeTableArSA, + LocaleArSD: localeTableArSD, + LocaleArSO: localeTableArSO, + LocaleArSS: localeTableArSS, + LocaleArSY: localeTableArSY, + LocaleArTD: localeTableArTD, + LocaleArTN: localeTableArTN, + LocaleArYE: localeTableArYE, + LocaleAs: localeTableAs, + LocaleAsIN: localeTableAsIN, + LocaleAsa: localeTableAsa, + LocaleAsaTZ: localeTableAsaTZ, + LocaleAst: localeTableAst, + LocaleAstES: localeTableAstES, + LocaleAz: localeTableAz, + LocaleAzCyrl: localeTableAzCyrl, + LocaleAzCyrlAZ: localeTableAzCyrlAZ, + LocaleAzLatn: localeTableAzLatn, + LocaleAzLatnAZ: localeTableAzLatnAZ, + LocaleBal: localeTableBal, + LocaleBalArab: localeTableBalArab, + LocaleBalArabPK: localeTableBalArabPK, + LocaleBalLatn: localeTableBalLatn, + LocaleBalLatnPK: localeTableBalLatnPK, + LocaleBas: localeTableBas, + LocaleBasCM: localeTableBasCM, + LocaleBe: localeTableBe, + LocaleBeBY: localeTableBeBY, + LocaleBetarask: localeTableBetarask, + LocaleBem: localeTableBem, + LocaleBemZM: localeTableBemZM, + LocaleBew: localeTableBew, + LocaleBewID: localeTableBewID, + LocaleBez: localeTableBez, + LocaleBezTZ: localeTableBezTZ, + LocaleBg: localeTableBg, + LocaleBgBG: localeTableBgBG, + LocaleBgc: localeTableBgc, + LocaleBgcIN: localeTableBgcIN, + LocaleBho: localeTableBho, + LocaleBhoIN: localeTableBhoIN, + LocaleBlo: localeTableBlo, + LocaleBloBJ: localeTableBloBJ, + LocaleBm: localeTableBm, + LocaleBmML: localeTableBmML, + LocaleBn: localeTableBn, + LocaleBnBD: localeTableBnBD, + LocaleBnIN: localeTableBnIN, + LocaleBo: localeTableBo, + LocaleBoCN: localeTableBoCN, + LocaleBoIN: localeTableBoIN, + LocaleBr: localeTableBr, + LocaleBrFR: localeTableBrFR, + LocaleBrx: localeTableBrx, + LocaleBrxIN: localeTableBrxIN, + LocaleBs: localeTableBs, + LocaleBsCyrl: localeTableBsCyrl, + LocaleBsCyrlBA: localeTableBsCyrlBA, + LocaleBsLatn: localeTableBsLatn, + LocaleBsLatnBA: localeTableBsLatnBA, + LocaleByn: localeTableByn, + LocaleBynER: localeTableBynER, + LocaleCa: localeTableCa, + LocaleCaAD: localeTableCaAD, + LocaleCaES: localeTableCaES, + LocaleCaESvalencia: localeTableCaESvalencia, + LocaleCaFR: localeTableCaFR, + LocaleCaIT: localeTableCaIT, + LocaleCad: localeTableCad, + LocaleCadUS: localeTableCadUS, + LocaleCch: localeTableCch, + LocaleCchNG: localeTableCchNG, + LocaleCcp: localeTableCcp, + LocaleCcpBD: localeTableCcpBD, + LocaleCcpIN: localeTableCcpIN, + LocaleCe: localeTableCe, + LocaleCeRU: localeTableCeRU, + LocaleCeb: localeTableCeb, + LocaleCebPH: localeTableCebPH, + LocaleCgg: localeTableCgg, + LocaleCggUG: localeTableCggUG, + LocaleChr: localeTableChr, + LocaleChrUS: localeTableChrUS, + LocaleCic: localeTableCic, + LocaleCicUS: localeTableCicUS, + LocaleCkb: localeTableCkb, + LocaleCkbIQ: localeTableCkbIQ, + LocaleCkbIR: localeTableCkbIR, + LocaleCo: localeTableCo, + LocaleCoFR: localeTableCoFR, + LocaleCs: localeTableCs, + LocaleCsCZ: localeTableCsCZ, + LocaleCsw: localeTableCsw, + LocaleCswCA: localeTableCswCA, + LocaleCu: localeTableCu, + LocaleCuRU: localeTableCuRU, + LocaleCv: localeTableCv, + LocaleCvRU: localeTableCvRU, + LocaleCy: localeTableCy, + LocaleCyGB: localeTableCyGB, + LocaleDa: localeTableDa, + LocaleDaDK: localeTableDaDK, + LocaleDaGL: localeTableDaGL, + LocaleDav: localeTableDav, + LocaleDavKE: localeTableDavKE, + LocaleDe: localeTableDe, + LocaleDeAT: localeTableDeAT, + LocaleDeBE: localeTableDeBE, + LocaleDeCH: localeTableDeCH, + LocaleDeDE: localeTableDeDE, + LocaleDeIT: localeTableDeIT, + LocaleDeLI: localeTableDeLI, + LocaleDeLU: localeTableDeLU, + LocaleDje: localeTableDje, + LocaleDjeNE: localeTableDjeNE, + LocaleDoi: localeTableDoi, + LocaleDoiIN: localeTableDoiIN, + LocaleDsb: localeTableDsb, + LocaleDsbDE: localeTableDsbDE, + LocaleDua: localeTableDua, + LocaleDuaCM: localeTableDuaCM, + LocaleDyo: localeTableDyo, + LocaleDyoSN: localeTableDyoSN, + LocaleDz: localeTableDz, + LocaleDzBT: localeTableDzBT, + LocaleEbu: localeTableEbu, + LocaleEbuKE: localeTableEbuKE, + LocaleEe: localeTableEe, + LocaleEeGH: localeTableEeGH, + LocaleEeTG: localeTableEeTG, + LocaleEl: localeTableEl, + LocaleElCY: localeTableElCY, + LocaleElGR: localeTableElGR, + LocaleElpolyton: localeTableElpolyton, + LocaleEn: localeTableEn, + LocaleEn001: localeTableEn001, + LocaleEn150: localeTableEn150, + LocaleEnAE: localeTableEnAE, + LocaleEnAG: localeTableEnAG, + LocaleEnAI: localeTableEnAI, + LocaleEnAS: localeTableEnAS, + LocaleEnAT: localeTableEnAT, + LocaleEnAU: localeTableEnAU, + LocaleEnBB: localeTableEnBB, + LocaleEnBE: localeTableEnBE, + LocaleEnBI: localeTableEnBI, + LocaleEnBM: localeTableEnBM, + LocaleEnBS: localeTableEnBS, + LocaleEnBW: localeTableEnBW, + LocaleEnBZ: localeTableEnBZ, + LocaleEnCA: localeTableEnCA, + LocaleEnCC: localeTableEnCC, + LocaleEnCH: localeTableEnCH, + LocaleEnCK: localeTableEnCK, + LocaleEnCM: localeTableEnCM, + LocaleEnCX: localeTableEnCX, + LocaleEnCY: localeTableEnCY, + LocaleEnDE: localeTableEnDE, + LocaleEnDG: localeTableEnDG, + LocaleEnDK: localeTableEnDK, + LocaleEnDM: localeTableEnDM, + LocaleEnDsrt: localeTableEnDsrt, + LocaleEnDsrtUS: localeTableEnDsrtUS, + LocaleEnER: localeTableEnER, + LocaleEnFI: localeTableEnFI, + LocaleEnFJ: localeTableEnFJ, + LocaleEnFK: localeTableEnFK, + LocaleEnFM: localeTableEnFM, + LocaleEnGB: localeTableEnGB, + LocaleEnGD: localeTableEnGD, + LocaleEnGG: localeTableEnGG, + LocaleEnGH: localeTableEnGH, + LocaleEnGI: localeTableEnGI, + LocaleEnGM: localeTableEnGM, + LocaleEnGU: localeTableEnGU, + LocaleEnGY: localeTableEnGY, + LocaleEnHK: localeTableEnHK, + LocaleEnID: localeTableEnID, + LocaleEnIE: localeTableEnIE, + LocaleEnIL: localeTableEnIL, + LocaleEnIM: localeTableEnIM, + LocaleEnIN: localeTableEnIN, + LocaleEnIO: localeTableEnIO, + LocaleEnJE: localeTableEnJE, + LocaleEnJM: localeTableEnJM, + LocaleEnKE: localeTableEnKE, + LocaleEnKI: localeTableEnKI, + LocaleEnKN: localeTableEnKN, + LocaleEnKY: localeTableEnKY, + LocaleEnLC: localeTableEnLC, + LocaleEnLR: localeTableEnLR, + LocaleEnLS: localeTableEnLS, + LocaleEnMG: localeTableEnMG, + LocaleEnMH: localeTableEnMH, + LocaleEnMO: localeTableEnMO, + LocaleEnMP: localeTableEnMP, + LocaleEnMS: localeTableEnMS, + LocaleEnMT: localeTableEnMT, + LocaleEnMU: localeTableEnMU, + LocaleEnMV: localeTableEnMV, + LocaleEnMW: localeTableEnMW, + LocaleEnMY: localeTableEnMY, + LocaleEnNA: localeTableEnNA, + LocaleEnNF: localeTableEnNF, + LocaleEnNG: localeTableEnNG, + LocaleEnNL: localeTableEnNL, + LocaleEnNR: localeTableEnNR, + LocaleEnNU: localeTableEnNU, + LocaleEnNZ: localeTableEnNZ, + LocaleEnPG: localeTableEnPG, + LocaleEnPH: localeTableEnPH, + LocaleEnPK: localeTableEnPK, + LocaleEnPN: localeTableEnPN, + LocaleEnPR: localeTableEnPR, + LocaleEnPW: localeTableEnPW, + LocaleEnRW: localeTableEnRW, + LocaleEnSB: localeTableEnSB, + LocaleEnSC: localeTableEnSC, + LocaleEnSD: localeTableEnSD, + LocaleEnSE: localeTableEnSE, + LocaleEnSG: localeTableEnSG, + LocaleEnSH: localeTableEnSH, + LocaleEnSI: localeTableEnSI, + LocaleEnSL: localeTableEnSL, + LocaleEnSS: localeTableEnSS, + LocaleEnSX: localeTableEnSX, + LocaleEnSZ: localeTableEnSZ, + LocaleEnShaw: localeTableEnShaw, + LocaleEnShawGB: localeTableEnShawGB, + LocaleEnTC: localeTableEnTC, + LocaleEnTK: localeTableEnTK, + LocaleEnTO: localeTableEnTO, + LocaleEnTT: localeTableEnTT, + LocaleEnTV: localeTableEnTV, + LocaleEnTZ: localeTableEnTZ, + LocaleEnUG: localeTableEnUG, + LocaleEnUM: localeTableEnUM, + LocaleEnUS: localeTableEnUS, + LocaleEnUSuvaposix: localeTableEnUSuvaposix, + LocaleEnVC: localeTableEnVC, + LocaleEnVG: localeTableEnVG, + LocaleEnVI: localeTableEnVI, + LocaleEnVU: localeTableEnVU, + LocaleEnWS: localeTableEnWS, + LocaleEnZA: localeTableEnZA, + LocaleEnZM: localeTableEnZM, + LocaleEnZW: localeTableEnZW, + LocaleEo: localeTableEo, + LocaleEo001: localeTableEo001, + LocaleEs: localeTableEs, + LocaleEs419: localeTableEs419, + LocaleEsAR: localeTableEsAR, + LocaleEsBO: localeTableEsBO, + LocaleEsBR: localeTableEsBR, + LocaleEsBZ: localeTableEsBZ, + LocaleEsCL: localeTableEsCL, + LocaleEsCO: localeTableEsCO, + LocaleEsCR: localeTableEsCR, + LocaleEsCU: localeTableEsCU, + LocaleEsDO: localeTableEsDO, + LocaleEsEA: localeTableEsEA, + LocaleEsEC: localeTableEsEC, + LocaleEsES: localeTableEsES, + LocaleEsGQ: localeTableEsGQ, + LocaleEsGT: localeTableEsGT, + LocaleEsHN: localeTableEsHN, + LocaleEsIC: localeTableEsIC, + LocaleEsMX: localeTableEsMX, + LocaleEsNI: localeTableEsNI, + LocaleEsPA: localeTableEsPA, + LocaleEsPE: localeTableEsPE, + LocaleEsPH: localeTableEsPH, + LocaleEsPR: localeTableEsPR, + LocaleEsPY: localeTableEsPY, + LocaleEsSV: localeTableEsSV, + LocaleEsUS: localeTableEsUS, + LocaleEsUY: localeTableEsUY, + LocaleEsVE: localeTableEsVE, + LocaleEt: localeTableEt, + LocaleEtEE: localeTableEtEE, + LocaleEu: localeTableEu, + LocaleEuES: localeTableEuES, + LocaleEwo: localeTableEwo, + LocaleEwoCM: localeTableEwoCM, + LocaleFa: localeTableFa, + LocaleFaAF: localeTableFaAF, + LocaleFaIR: localeTableFaIR, + LocaleFf: localeTableFf, + LocaleFfAdlm: localeTableFfAdlm, + LocaleFfAdlmBF: localeTableFfAdlmBF, + LocaleFfAdlmCM: localeTableFfAdlmCM, + LocaleFfAdlmGH: localeTableFfAdlmGH, + LocaleFfAdlmGM: localeTableFfAdlmGM, + LocaleFfAdlmGN: localeTableFfAdlmGN, + LocaleFfAdlmGW: localeTableFfAdlmGW, + LocaleFfAdlmLR: localeTableFfAdlmLR, + LocaleFfAdlmMR: localeTableFfAdlmMR, + LocaleFfAdlmNE: localeTableFfAdlmNE, + LocaleFfAdlmNG: localeTableFfAdlmNG, + LocaleFfAdlmSL: localeTableFfAdlmSL, + LocaleFfAdlmSN: localeTableFfAdlmSN, + LocaleFfLatn: localeTableFfLatn, + LocaleFfLatnBF: localeTableFfLatnBF, + LocaleFfLatnCM: localeTableFfLatnCM, + LocaleFfLatnGH: localeTableFfLatnGH, + LocaleFfLatnGM: localeTableFfLatnGM, + LocaleFfLatnGN: localeTableFfLatnGN, + LocaleFfLatnGW: localeTableFfLatnGW, + LocaleFfLatnLR: localeTableFfLatnLR, + LocaleFfLatnMR: localeTableFfLatnMR, + LocaleFfLatnNE: localeTableFfLatnNE, + LocaleFfLatnNG: localeTableFfLatnNG, + LocaleFfLatnSL: localeTableFfLatnSL, + LocaleFfLatnSN: localeTableFfLatnSN, + LocaleFi: localeTableFi, + LocaleFiFI: localeTableFiFI, + LocaleFil: localeTableFil, + LocaleFilPH: localeTableFilPH, + LocaleFo: localeTableFo, + LocaleFoDK: localeTableFoDK, + LocaleFoFO: localeTableFoFO, + LocaleFr: localeTableFr, + LocaleFrBE: localeTableFrBE, + LocaleFrBF: localeTableFrBF, + LocaleFrBI: localeTableFrBI, + LocaleFrBJ: localeTableFrBJ, + LocaleFrBL: localeTableFrBL, + LocaleFrCA: localeTableFrCA, + LocaleFrCD: localeTableFrCD, + LocaleFrCF: localeTableFrCF, + LocaleFrCG: localeTableFrCG, + LocaleFrCH: localeTableFrCH, + LocaleFrCI: localeTableFrCI, + LocaleFrCM: localeTableFrCM, + LocaleFrDJ: localeTableFrDJ, + LocaleFrDZ: localeTableFrDZ, + LocaleFrFR: localeTableFrFR, + LocaleFrGA: localeTableFrGA, + LocaleFrGF: localeTableFrGF, + LocaleFrGN: localeTableFrGN, + LocaleFrGP: localeTableFrGP, + LocaleFrGQ: localeTableFrGQ, + LocaleFrHT: localeTableFrHT, + LocaleFrKM: localeTableFrKM, + LocaleFrLU: localeTableFrLU, + LocaleFrMA: localeTableFrMA, + LocaleFrMC: localeTableFrMC, + LocaleFrMF: localeTableFrMF, + LocaleFrMG: localeTableFrMG, + LocaleFrML: localeTableFrML, + LocaleFrMQ: localeTableFrMQ, + LocaleFrMR: localeTableFrMR, + LocaleFrMU: localeTableFrMU, + LocaleFrNC: localeTableFrNC, + LocaleFrNE: localeTableFrNE, + LocaleFrPF: localeTableFrPF, + LocaleFrPM: localeTableFrPM, + LocaleFrRE: localeTableFrRE, + LocaleFrRW: localeTableFrRW, + LocaleFrSC: localeTableFrSC, + LocaleFrSN: localeTableFrSN, + LocaleFrSY: localeTableFrSY, + LocaleFrTD: localeTableFrTD, + LocaleFrTG: localeTableFrTG, + LocaleFrTN: localeTableFrTN, + LocaleFrVU: localeTableFrVU, + LocaleFrWF: localeTableFrWF, + LocaleFrYT: localeTableFrYT, + LocaleFrr: localeTableFrr, + LocaleFrrDE: localeTableFrrDE, + LocaleFur: localeTableFur, + LocaleFurIT: localeTableFurIT, + LocaleFy: localeTableFy, + LocaleFyNL: localeTableFyNL, + LocaleGa: localeTableGa, + LocaleGaGB: localeTableGaGB, + LocaleGaIE: localeTableGaIE, + LocaleGaa: localeTableGaa, + LocaleGaaGH: localeTableGaaGH, + LocaleGd: localeTableGd, + LocaleGdGB: localeTableGdGB, + LocaleGez: localeTableGez, + LocaleGezER: localeTableGezER, + LocaleGezET: localeTableGezET, + LocaleGl: localeTableGl, + LocaleGlES: localeTableGlES, + LocaleGn: localeTableGn, + LocaleGnPY: localeTableGnPY, + LocaleGsw: localeTableGsw, + LocaleGswCH: localeTableGswCH, + LocaleGswFR: localeTableGswFR, + LocaleGswLI: localeTableGswLI, + LocaleGu: localeTableGu, + LocaleGuIN: localeTableGuIN, + LocaleGuz: localeTableGuz, + LocaleGuzKE: localeTableGuzKE, + LocaleGv: localeTableGv, + LocaleGvIM: localeTableGvIM, + LocaleHa: localeTableHa, + LocaleHaArab: localeTableHaArab, + LocaleHaArabNG: localeTableHaArabNG, + LocaleHaArabSD: localeTableHaArabSD, + LocaleHaGH: localeTableHaGH, + LocaleHaNE: localeTableHaNE, + LocaleHaNG: localeTableHaNG, + LocaleHaw: localeTableHaw, + LocaleHawUS: localeTableHawUS, + LocaleHe: localeTableHe, + LocaleHeIL: localeTableHeIL, + LocaleHi: localeTableHi, + LocaleHiIN: localeTableHiIN, + LocaleHiLatn: localeTableHiLatn, + LocaleHiLatnIN: localeTableHiLatnIN, + LocaleHnj: localeTableHnj, + LocaleHr: localeTableHr, + LocaleHrBA: localeTableHrBA, + LocaleHrHR: localeTableHrHR, + LocaleHsb: localeTableHsb, + LocaleHsbDE: localeTableHsbDE, + LocaleHu: localeTableHu, + LocaleHuHU: localeTableHuHU, + LocaleHy: localeTableHy, + LocaleHyAM: localeTableHyAM, + LocaleIa: localeTableIa, + LocaleIa001: localeTableIa001, + LocaleId: localeTableId, + LocaleIdID: localeTableIdID, + LocaleIe: localeTableIe, + LocaleIeEE: localeTableIeEE, + LocaleIg: localeTableIg, + LocaleIgNG: localeTableIgNG, + LocaleIi: localeTableIi, + LocaleIiCN: localeTableIiCN, + LocaleIs: localeTableIs, + LocaleIsIS: localeTableIsIS, + LocaleIt: localeTableIt, + LocaleItCH: localeTableItCH, + LocaleItIT: localeTableItIT, + LocaleItSM: localeTableItSM, + LocaleItVA: localeTableItVA, + LocaleIu: localeTableIu, + LocaleIuCA: localeTableIuCA, + LocaleJa: localeTableJa, + LocaleJaJP: localeTableJaJP, + LocaleJgo: localeTableJgo, + LocaleJgoCM: localeTableJgoCM, + LocaleJmc: localeTableJmc, + LocaleJmcTZ: localeTableJmcTZ, + LocaleJv: localeTableJv, + LocaleJvID: localeTableJvID, + LocaleKa: localeTableKa, + LocaleKaGE: localeTableKaGE, + LocaleKab: localeTableKab, + LocaleKabDZ: localeTableKabDZ, + LocaleKaj: localeTableKaj, + LocaleKajNG: localeTableKajNG, + LocaleKam: localeTableKam, + LocaleKamKE: localeTableKamKE, + LocaleKcg: localeTableKcg, + LocaleKcgNG: localeTableKcgNG, + LocaleKde: localeTableKde, + LocaleKdeTZ: localeTableKdeTZ, + LocaleKea: localeTableKea, + LocaleKeaCV: localeTableKeaCV, + LocaleKgp: localeTableKgp, + LocaleKgpBR: localeTableKgpBR, + LocaleKhq: localeTableKhq, + LocaleKhqML: localeTableKhqML, + LocaleKi: localeTableKi, + LocaleKiKE: localeTableKiKE, + LocaleKk: localeTableKk, + LocaleKkKZ: localeTableKkKZ, + LocaleKkj: localeTableKkj, + LocaleKkjCM: localeTableKkjCM, + LocaleKl: localeTableKl, + LocaleKlGL: localeTableKlGL, + LocaleKln: localeTableKln, + LocaleKlnKE: localeTableKlnKE, + LocaleKm: localeTableKm, + LocaleKmKH: localeTableKmKH, + LocaleKn: localeTableKn, + LocaleKnIN: localeTableKnIN, + LocaleKo: localeTableKo, + LocaleKoCN: localeTableKoCN, + LocaleKoKP: localeTableKoKP, + LocaleKoKR: localeTableKoKR, + LocaleKok: localeTableKok, + LocaleKokIN: localeTableKokIN, + LocaleKs: localeTableKs, + LocaleKsArab: localeTableKsArab, + LocaleKsArabIN: localeTableKsArabIN, + LocaleKsDeva: localeTableKsDeva, + LocaleKsDevaIN: localeTableKsDevaIN, + LocaleKsb: localeTableKsb, + LocaleKsbTZ: localeTableKsbTZ, + LocaleKsf: localeTableKsf, + LocaleKsfCM: localeTableKsfCM, + LocaleKsh: localeTableKsh, + LocaleKshDE: localeTableKshDE, + LocaleKu: localeTableKu, + LocaleKuTR: localeTableKuTR, + LocaleKw: localeTableKw, + LocaleKwGB: localeTableKwGB, + LocaleKxv: localeTableKxv, + LocaleKxvDeva: localeTableKxvDeva, + LocaleKxvDevaIN: localeTableKxvDevaIN, + LocaleKxvOrya: localeTableKxvOrya, + LocaleKxvOryaIN: localeTableKxvOryaIN, + LocaleKxvTelu: localeTableKxvTelu, + LocaleKxvTeluIN: localeTableKxvTeluIN, + LocaleKy: localeTableKy, + LocaleKyKG: localeTableKyKG, + LocaleLa: localeTableLa, + LocaleLaVA: localeTableLaVA, + LocaleLag: localeTableLag, + LocaleLagTZ: localeTableLagTZ, + LocaleLb: localeTableLb, + LocaleLbLU: localeTableLbLU, + LocaleLg: localeTableLg, + LocaleLgUG: localeTableLgUG, + LocaleLij: localeTableLij, + LocaleLijIT: localeTableLijIT, + LocaleLkt: localeTableLkt, + LocaleLktUS: localeTableLktUS, + LocaleLmo: localeTableLmo, + LocaleLmoIT: localeTableLmoIT, + LocaleLn: localeTableLn, + LocaleLnAO: localeTableLnAO, + LocaleLnCD: localeTableLnCD, + LocaleLnCF: localeTableLnCF, + LocaleLnCG: localeTableLnCG, + LocaleLo: localeTableLo, + LocaleLoLA: localeTableLoLA, + LocaleLrc: localeTableLrc, + LocaleLrcIQ: localeTableLrcIQ, + LocaleLrcIR: localeTableLrcIR, + LocaleLt: localeTableLt, + LocaleLtLT: localeTableLtLT, + LocaleLu: localeTableLu, + LocaleLuCD: localeTableLuCD, + LocaleLuo: localeTableLuo, + LocaleLuoKE: localeTableLuoKE, + LocaleLuy: localeTableLuy, + LocaleLuyKE: localeTableLuyKE, + LocaleLv: localeTableLv, + LocaleLvLV: localeTableLvLV, + LocaleMai: localeTableMai, + LocaleMaiIN: localeTableMaiIN, + LocaleMas: localeTableMas, + LocaleMasKE: localeTableMasKE, + LocaleMasTZ: localeTableMasTZ, + LocaleMer: localeTableMer, + LocaleMerKE: localeTableMerKE, + LocaleMfe: localeTableMfe, + LocaleMfeMU: localeTableMfeMU, + LocaleMg: localeTableMg, + LocaleMgMG: localeTableMgMG, + LocaleMgh: localeTableMgh, + LocaleMghMZ: localeTableMghMZ, + LocaleMgo: localeTableMgo, + LocaleMgoCM: localeTableMgoCM, + LocaleMi: localeTableMi, + LocaleMiNZ: localeTableMiNZ, + LocaleMk: localeTableMk, + LocaleMkMK: localeTableMkMK, + LocaleMl: localeTableMl, + LocaleMlIN: localeTableMlIN, + LocaleMn: localeTableMn, + LocaleMnMN: localeTableMnMN, + LocaleMnMongMN: localeTableMnMongMN, + LocaleMni: localeTableMni, + LocaleMniBeng: localeTableMniBeng, + LocaleMniBengIN: localeTableMniBengIN, + LocaleMr: localeTableMr, + LocaleMrIN: localeTableMrIN, + LocaleMs: localeTableMs, + LocaleMsArab: localeTableMsArab, + LocaleMsArabBN: localeTableMsArabBN, + LocaleMsArabMY: localeTableMsArabMY, + LocaleMsBN: localeTableMsBN, + LocaleMsID: localeTableMsID, + LocaleMsMY: localeTableMsMY, + LocaleMsSG: localeTableMsSG, + LocaleMt: localeTableMt, + LocaleMtMT: localeTableMtMT, + LocaleMua: localeTableMua, + LocaleMuaCM: localeTableMuaCM, + LocaleMus: localeTableMus, + LocaleMusUS: localeTableMusUS, + LocaleMy: localeTableMy, + LocaleMyMM: localeTableMyMM, + LocaleMyv: localeTableMyv, + LocaleMyvRU: localeTableMyvRU, + LocaleMzn: localeTableMzn, + LocaleMznIR: localeTableMznIR, + LocaleNaq: localeTableNaq, + LocaleNaqNA: localeTableNaqNA, + LocaleNd: localeTableNd, + LocaleNdZW: localeTableNdZW, + LocaleNds: localeTableNds, + LocaleNdsDE: localeTableNdsDE, + LocaleNdsNL: localeTableNdsNL, + LocaleNe: localeTableNe, + LocaleNeIN: localeTableNeIN, + LocaleNeNP: localeTableNeNP, + LocaleNl: localeTableNl, + LocaleNlAW: localeTableNlAW, + LocaleNlBE: localeTableNlBE, + LocaleNlBQ: localeTableNlBQ, + LocaleNlCW: localeTableNlCW, + LocaleNlNL: localeTableNlNL, + LocaleNlSR: localeTableNlSR, + LocaleNlSX: localeTableNlSX, + LocaleNmg: localeTableNmg, + LocaleNmgCM: localeTableNmgCM, + LocaleNn: localeTableNn, + LocaleNnNO: localeTableNnNO, + LocaleNnh: localeTableNnh, + LocaleNnhCM: localeTableNnhCM, + LocaleNo: localeTableNo, + LocaleNqo: localeTableNqo, + LocaleNqoGN: localeTableNqoGN, + LocaleNr: localeTableNr, + LocaleNrZA: localeTableNrZA, + LocaleNso: localeTableNso, + LocaleNsoZA: localeTableNsoZA, + LocaleNus: localeTableNus, + LocaleNusSS: localeTableNusSS, + LocaleNy: localeTableNy, + LocaleNyMW: localeTableNyMW, + LocaleNyn: localeTableNyn, + LocaleNynUG: localeTableNynUG, + LocaleOc: localeTableOc, + LocaleOcES: localeTableOcES, + LocaleOcFR: localeTableOcFR, + LocaleOm: localeTableOm, + LocaleOmET: localeTableOmET, + LocaleOmKE: localeTableOmKE, + LocaleOr: localeTableOr, + LocaleOrIN: localeTableOrIN, + LocaleOs: localeTableOs, + LocaleOsGE: localeTableOsGE, + LocaleOsRU: localeTableOsRU, + LocaleOsa: localeTableOsa, + LocaleOsaUS: localeTableOsaUS, + LocalePa: localeTablePa, + LocalePaArab: localeTablePaArab, + LocalePaArabPK: localeTablePaArabPK, + LocalePaGuru: localeTablePaGuru, + LocalePaGuruIN: localeTablePaGuruIN, + LocalePap: localeTablePap, + LocalePapAW: localeTablePapAW, + LocalePapCW: localeTablePapCW, + LocalePcm: localeTablePcm, + LocalePcmNG: localeTablePcmNG, + LocalePis: localeTablePis, + LocalePisSB: localeTablePisSB, + LocalePl: localeTablePl, + LocalePlPL: localeTablePlPL, + LocalePrg: localeTablePrg, + LocalePrgPL: localeTablePrgPL, + LocalePs: localeTablePs, + LocalePsAF: localeTablePsAF, + LocalePsPK: localeTablePsPK, + LocalePt: localeTablePt, + LocalePtAO: localeTablePtAO, + LocalePtBR: localeTablePtBR, + LocalePtCH: localeTablePtCH, + LocalePtCV: localeTablePtCV, + LocalePtGQ: localeTablePtGQ, + LocalePtGW: localeTablePtGW, + LocalePtLU: localeTablePtLU, + LocalePtMO: localeTablePtMO, + LocalePtMZ: localeTablePtMZ, + LocalePtPT: localeTablePtPT, + LocalePtST: localeTablePtST, + LocalePtTL: localeTablePtTL, + LocaleQu: localeTableQu, + LocaleQuBO: localeTableQuBO, + LocaleQuEC: localeTableQuEC, + LocaleQuPE: localeTableQuPE, + LocaleRaj: localeTableRaj, + LocaleRajIN: localeTableRajIN, + LocaleRif: localeTableRif, + LocaleRifMA: localeTableRifMA, + LocaleRm: localeTableRm, + LocaleRmCH: localeTableRmCH, + LocaleRn: localeTableRn, + LocaleRnBI: localeTableRnBI, + LocaleRo: localeTableRo, + LocaleRoMD: localeTableRoMD, + LocaleRoRO: localeTableRoRO, + LocaleRof: localeTableRof, + LocaleRofTZ: localeTableRofTZ, + LocaleRu: localeTableRu, + LocaleRuBY: localeTableRuBY, + LocaleRuKG: localeTableRuKG, + LocaleRuKZ: localeTableRuKZ, + LocaleRuMD: localeTableRuMD, + LocaleRuRU: localeTableRuRU, + LocaleRuUA: localeTableRuUA, + LocaleRw: localeTableRw, + LocaleRwRW: localeTableRwRW, + LocaleRwk: localeTableRwk, + LocaleRwkTZ: localeTableRwkTZ, + LocaleSa: localeTableSa, + LocaleSaIN: localeTableSaIN, + LocaleSah: localeTableSah, + LocaleSahRU: localeTableSahRU, + LocaleSaq: localeTableSaq, + LocaleSaqKE: localeTableSaqKE, + LocaleSat: localeTableSat, + LocaleSbp: localeTableSbp, + LocaleSbpTZ: localeTableSbpTZ, + LocaleSc: localeTableSc, + LocaleScIT: localeTableScIT, + LocaleScn: localeTableScn, + LocaleScnIT: localeTableScnIT, + LocaleSd: localeTableSd, + LocaleSdArab: localeTableSdArab, + LocaleSdArabPK: localeTableSdArabPK, + LocaleSdDeva: localeTableSdDeva, + LocaleSdDevaIN: localeTableSdDevaIN, + LocaleSe: localeTableSe, + LocaleSeFI: localeTableSeFI, + LocaleSeNO: localeTableSeNO, + LocaleSeSE: localeTableSeSE, + LocaleSeh: localeTableSeh, + LocaleSehMZ: localeTableSehMZ, + LocaleSes: localeTableSes, + LocaleSesML: localeTableSesML, + LocaleSg: localeTableSg, + LocaleSgCF: localeTableSgCF, + LocaleShi: localeTableShi, + LocaleShiLatn: localeTableShiLatn, + LocaleShiLatnMA: localeTableShiLatnMA, + LocaleShiTfng: localeTableShiTfng, + LocaleShiTfngMA: localeTableShiTfngMA, + LocaleSi: localeTableSi, + LocaleSiLK: localeTableSiLK, + LocaleSid: localeTableSid, + LocaleSidET: localeTableSidET, + LocaleSk: localeTableSk, + LocaleSkSK: localeTableSkSK, + LocaleSkr: localeTableSkr, + LocaleSkrPK: localeTableSkrPK, + LocaleSl: localeTableSl, + LocaleSlSI: localeTableSlSI, + LocaleSmn: localeTableSmn, + LocaleSmnFI: localeTableSmnFI, + LocaleSn: localeTableSn, + LocaleSnZW: localeTableSnZW, + LocaleSo: localeTableSo, + LocaleSoDJ: localeTableSoDJ, + LocaleSoET: localeTableSoET, + LocaleSoKE: localeTableSoKE, + LocaleSoSO: localeTableSoSO, + LocaleSq: localeTableSq, + LocaleSqAL: localeTableSqAL, + LocaleSqMK: localeTableSqMK, + LocaleSqXK: localeTableSqXK, + LocaleSr: localeTableSr, + LocaleSrCyrl: localeTableSrCyrl, + LocaleSrCyrlBA: localeTableSrCyrlBA, + LocaleSrCyrlME: localeTableSrCyrlME, + LocaleSrCyrlRS: localeTableSrCyrlRS, + LocaleSrCyrlXK: localeTableSrCyrlXK, + LocaleSrLatn: localeTableSrLatn, + LocaleSrLatnBA: localeTableSrLatnBA, + LocaleSrLatnME: localeTableSrLatnME, + LocaleSrLatnRS: localeTableSrLatnRS, + LocaleSrLatnXK: localeTableSrLatnXK, + LocaleSs: localeTableSs, + LocaleSsSZ: localeTableSsSZ, + LocaleSsZA: localeTableSsZA, + LocaleSsy: localeTableSsy, + LocaleSsyER: localeTableSsyER, + LocaleSt: localeTableSt, + LocaleStLS: localeTableStLS, + LocaleStZA: localeTableStZA, + LocaleSu: localeTableSu, + LocaleSuLatn: localeTableSuLatn, + LocaleSuLatnID: localeTableSuLatnID, + LocaleSv: localeTableSv, + LocaleSvAX: localeTableSvAX, + LocaleSvFI: localeTableSvFI, + LocaleSvSE: localeTableSvSE, + LocaleSw: localeTableSw, + LocaleSwCD: localeTableSwCD, + LocaleSwKE: localeTableSwKE, + LocaleSwTZ: localeTableSwTZ, + LocaleSwUG: localeTableSwUG, + LocaleSyr: localeTableSyr, + LocaleSyrIQ: localeTableSyrIQ, + LocaleSyrSY: localeTableSyrSY, + LocaleSzl: localeTableSzl, + LocaleSzlPL: localeTableSzlPL, + LocaleTa: localeTableTa, + LocaleTaIN: localeTableTaIN, + LocaleTaLK: localeTableTaLK, + LocaleTaMY: localeTableTaMY, + LocaleTaSG: localeTableTaSG, + LocaleTe: localeTableTe, + LocaleTeIN: localeTableTeIN, + LocaleTeo: localeTableTeo, + LocaleTeoKE: localeTableTeoKE, + LocaleTeoUG: localeTableTeoUG, + LocaleTg: localeTableTg, + LocaleTgTJ: localeTableTgTJ, + LocaleTh: localeTableTh, + LocaleThTH: localeTableThTH, + LocaleTi: localeTableTi, + LocaleTiER: localeTableTiER, + LocaleTiET: localeTableTiET, + LocaleTig: localeTableTig, + LocaleTigER: localeTableTigER, + LocaleTk: localeTableTk, + LocaleTkTM: localeTableTkTM, + LocaleTn: localeTableTn, + LocaleTnBW: localeTableTnBW, + LocaleTnZA: localeTableTnZA, + LocaleTo: localeTableTo, + LocaleToTO: localeTableToTO, + LocaleTok: localeTableTok, + LocaleTok001: localeTableTok001, + LocaleTpi: localeTableTpi, + LocaleTpiPG: localeTableTpiPG, + LocaleTr: localeTableTr, + LocaleTrCY: localeTableTrCY, + LocaleTrTR: localeTableTrTR, + LocaleTrv: localeTableTrv, + LocaleTrvTW: localeTableTrvTW, + LocaleTrw: localeTableTrw, + LocaleTrwPK: localeTableTrwPK, + LocaleTs: localeTableTs, + LocaleTsZA: localeTableTsZA, + LocaleTt: localeTableTt, + LocaleTtRU: localeTableTtRU, + LocaleTwq: localeTableTwq, + LocaleTwqNE: localeTableTwqNE, + LocaleTzm: localeTableTzm, + LocaleTzmMA: localeTableTzmMA, + LocaleUg: localeTableUg, + LocaleUgCN: localeTableUgCN, + LocaleUk: localeTableUk, + LocaleUkUA: localeTableUkUA, + LocaleUnd: localeTableUnd, + LocaleUr: localeTableUr, + LocaleUrIN: localeTableUrIN, + LocaleUrPK: localeTableUrPK, + LocaleUz: localeTableUz, + LocaleUzArab: localeTableUzArab, + LocaleUzArabAF: localeTableUzArabAF, + LocaleUzCyrl: localeTableUzCyrl, + LocaleUzCyrlUZ: localeTableUzCyrlUZ, + LocaleUzLatn: localeTableUzLatn, + LocaleUzLatnUZ: localeTableUzLatnUZ, + LocaleVai: localeTableVai, + LocaleVaiLatn: localeTableVaiLatn, + LocaleVaiLatnLR: localeTableVaiLatnLR, + LocaleVaiVaii: localeTableVaiVaii, + LocaleVaiVaiiLR: localeTableVaiVaiiLR, + LocaleVe: localeTableVe, + LocaleVeZA: localeTableVeZA, + LocaleVec: localeTableVec, + LocaleVecIT: localeTableVecIT, + LocaleVi: localeTableVi, + LocaleViVN: localeTableViVN, + LocaleVmw: localeTableVmw, + LocaleVmwMZ: localeTableVmwMZ, + LocaleVo: localeTableVo, + LocaleVo001: localeTableVo001, + LocaleVun: localeTableVun, + LocaleVunTZ: localeTableVunTZ, + LocaleWae: localeTableWae, + LocaleWaeCH: localeTableWaeCH, + LocaleWal: localeTableWal, + LocaleWalET: localeTableWalET, + LocaleWo: localeTableWo, + LocaleWoSN: localeTableWoSN, + LocaleXh: localeTableXh, + LocaleXhZA: localeTableXhZA, + LocaleXnr: localeTableXnr, + LocaleXnrIN: localeTableXnrIN, + LocaleXog: localeTableXog, + LocaleXogUG: localeTableXogUG, + LocaleYav: localeTableYav, + LocaleYavCM: localeTableYavCM, + LocaleYi: localeTableYi, + LocaleYiUA: localeTableYiUA, + LocaleYo: localeTableYo, + LocaleYoBJ: localeTableYoBJ, + LocaleYoNG: localeTableYoNG, + LocaleYrl: localeTableYrl, + LocaleYrlBR: localeTableYrlBR, + LocaleYrlCO: localeTableYrlCO, + LocaleYrlVE: localeTableYrlVE, + LocaleYue: localeTableYue, + LocaleYueHans: localeTableYueHans, + LocaleYueHansCN: localeTableYueHansCN, + LocaleYueHant: localeTableYueHant, + LocaleYueHantHK: localeTableYueHantHK, + LocaleZa: localeTableZa, + LocaleZaCN: localeTableZaCN, + LocaleZgh: localeTableZgh, + LocaleZghMA: localeTableZghMA, + LocaleZh: localeTableZh, + LocaleZhHans: localeTableZhHans, + LocaleZhHansCN: localeTableZhHansCN, + LocaleZhHansHK: localeTableZhHansHK, + LocaleZhHansMO: localeTableZhHansMO, + LocaleZhHansSG: localeTableZhHansSG, + LocaleZhHant: localeTableZhHant, + LocaleZhHantHK: localeTableZhHantHK, + LocaleZhHantMO: localeTableZhHantMO, + LocaleZhHantTW: localeTableZhHantTW, + LocaleZu: localeTableZu, + LocaleZuZA: localeTableZuZA, +} + +const ( + shortDayNamesField = iota + longDayNamesField + shortMonthNamesField + longMonthNamesField + dayPeriodsField +) diff --git a/vendor/github.com/expr-lang/expr/README.md b/vendor/github.com/expr-lang/expr/README.md index bd34c7d2487..6c56c67b675 100644 --- a/vendor/github.com/expr-lang/expr/README.md +++ b/vendor/github.com/expr-lang/expr/README.md @@ -162,6 +162,9 @@ func main() { * [Visually.io](https://visually.io) employs Expr as a business rule engine for its personalization targeting algorithm. * [Akvorado](https://github.com/akvorado/akvorado) utilizes Expr to classify exporters and interfaces in network flows. * [keda.sh](https://keda.sh) uses Expr to allow customization of its Kubernetes-based event-driven autoscaling. +* [Span Digital](https://spandigital.com/) uses Expr in it's Knowledge Management products. +* [Xiaohongshu](https://www.xiaohongshu.com/) combining yaml with Expr for dynamically policies delivery. +* [Melrōse](https://melrōse.org) uses Expr to implement its music programming language. [Add your company too](https://github.com/expr-lang/expr/edit/master/README.md) diff --git a/vendor/github.com/expr-lang/expr/ast/print.go b/vendor/github.com/expr-lang/expr/ast/print.go index fa593ae28b1..6a7d698a99f 100644 --- a/vendor/github.com/expr-lang/expr/ast/print.go +++ b/vendor/github.com/expr-lang/expr/ast/print.go @@ -65,8 +65,7 @@ func (n *BinaryNode) String() string { var lhs, rhs string var lwrap, rwrap bool - lb, ok := n.Left.(*BinaryNode) - if ok { + if lb, ok := n.Left.(*BinaryNode); ok { if operator.Less(lb.Operator, n.Operator) { lwrap = true } @@ -77,9 +76,7 @@ func (n *BinaryNode) String() string { lwrap = true } } - - rb, ok := n.Right.(*BinaryNode) - if ok { + if rb, ok := n.Right.(*BinaryNode); ok { if operator.Less(rb.Operator, n.Operator) { rwrap = true } @@ -88,6 +85,13 @@ func (n *BinaryNode) String() string { } } + if _, ok := n.Left.(*ConditionalNode); ok { + lwrap = true + } + if _, ok := n.Right.(*ConditionalNode); ok { + rwrap = true + } + if lwrap { lhs = fmt.Sprintf("(%s)", n.Left.String()) } else { @@ -108,20 +112,25 @@ func (n *ChainNode) String() string { } func (n *MemberNode) String() string { + node := n.Node.String() + if _, ok := n.Node.(*BinaryNode); ok { + node = fmt.Sprintf("(%s)", node) + } + if n.Optional { if str, ok := n.Property.(*StringNode); ok && utils.IsValidIdentifier(str.Value) { - return fmt.Sprintf("%s?.%s", n.Node.String(), str.Value) + return fmt.Sprintf("%s?.%s", node, str.Value) } else { - return fmt.Sprintf("%s?.[%s]", n.Node.String(), n.Property.String()) + return fmt.Sprintf("%s?.[%s]", node, n.Property.String()) } } if str, ok := n.Property.(*StringNode); ok && utils.IsValidIdentifier(str.Value) { if _, ok := n.Node.(*PointerNode); ok { return fmt.Sprintf(".%s", str.Value) } - return fmt.Sprintf("%s.%s", n.Node.String(), str.Value) + return fmt.Sprintf("%s.%s", node, str.Value) } - return fmt.Sprintf("%s[%s]", n.Node.String(), n.Property.String()) + return fmt.Sprintf("%s[%s]", node, n.Property.String()) } func (n *SliceNode) String() string { @@ -202,5 +211,11 @@ func (n *MapNode) String() string { } func (n *PairNode) String() string { - return fmt.Sprintf("%s: %s", n.Key.String(), n.Value.String()) + if str, ok := n.Key.(*StringNode); ok { + if utils.IsValidIdentifier(str.Value) { + return fmt.Sprintf("%s: %s", str.Value, n.Value.String()) + } + return fmt.Sprintf("%s: %s", str.String(), n.Value.String()) + } + return fmt.Sprintf("(%s): %s", n.Key.String(), n.Value.String()) } diff --git a/vendor/github.com/expr-lang/expr/ast/visitor.go b/vendor/github.com/expr-lang/expr/ast/visitor.go index 287a7558967..90bc9f1d0ee 100644 --- a/vendor/github.com/expr-lang/expr/ast/visitor.go +++ b/vendor/github.com/expr-lang/expr/ast/visitor.go @@ -7,6 +7,9 @@ type Visitor interface { } func Walk(node *Node, v Visitor) { + if *node == nil { + return + } switch n := (*node).(type) { case *NilNode: case *IdentifierNode: diff --git a/vendor/github.com/expr-lang/expr/builtin/builtin.go b/vendor/github.com/expr-lang/expr/builtin/builtin.go index 7bf377df224..cc6f197cdf2 100644 --- a/vendor/github.com/expr-lang/expr/builtin/builtin.go +++ b/vendor/github.com/expr-lang/expr/builtin/builtin.go @@ -83,6 +83,11 @@ var Builtins = []*Function{ Predicate: true, Types: types(new(func([]any, func(any) bool) int)), }, + { + Name: "sum", + Predicate: true, + Types: types(new(func([]any, func(any) bool) int)), + }, { Name: "groupBy", Predicate: true, @@ -387,13 +392,6 @@ var Builtins = []*Function{ return validateAggregateFunc("min", args) }, }, - { - Name: "sum", - Func: sum, - Validate: func(args []reflect.Type) (reflect.Type, error) { - return validateAggregateFunc("sum", args) - }, - }, { Name: "mean", Func: func(args ...any) (any, error) { @@ -474,9 +472,27 @@ var Builtins = []*Function{ { Name: "now", Func: func(args ...any) (any, error) { - return time.Now(), nil + if len(args) == 0 { + return time.Now(), nil + } + if len(args) == 1 { + if tz, ok := args[0].(*time.Location); ok { + return time.Now().In(tz), nil + } + } + return nil, fmt.Errorf("invalid number of arguments (expected 0, got %d)", len(args)) + }, + Validate: func(args []reflect.Type) (reflect.Type, error) { + if len(args) == 0 { + return timeType, nil + } + if len(args) == 1 { + if args[0] != nil && args[0].AssignableTo(locationType) { + return timeType, nil + } + } + return anyType, fmt.Errorf("invalid number of arguments (expected 0, got %d)", len(args)) }, - Types: types(new(func() time.Time)), }, { Name: "duration", @@ -488,9 +504,17 @@ var Builtins = []*Function{ { Name: "date", Func: func(args ...any) (any, error) { + tz, ok := args[0].(*time.Location) + if ok { + args = args[1:] + } + date := args[0].(string) if len(args) == 2 { layout := args[1].(string) + if tz != nil { + return time.ParseInLocation(layout, date, tz) + } return time.Parse(layout, date) } if len(args) == 3 { @@ -517,18 +541,43 @@ var Builtins = []*Function{ time.RFC1123, } for _, layout := range layouts { - t, err := time.Parse(layout, date) - if err == nil { - return t, nil + if tz == nil { + t, err := time.Parse(layout, date) + if err == nil { + return t, nil + } + } else { + t, err := time.ParseInLocation(layout, date, tz) + if err == nil { + return t, nil + } } } return nil, fmt.Errorf("invalid date %s", date) }, - Types: types( - new(func(string) time.Time), - new(func(string, string) time.Time), - new(func(string, string, string) time.Time), - ), + Validate: func(args []reflect.Type) (reflect.Type, error) { + if len(args) < 1 { + return anyType, fmt.Errorf("invalid number of arguments (expected at least 1, got %d)", len(args)) + } + if args[0] != nil && args[0].AssignableTo(locationType) { + args = args[1:] + } + if len(args) > 3 { + return anyType, fmt.Errorf("invalid number of arguments (expected at most 3, got %d)", len(args)) + } + return timeType, nil + }, + }, + { + Name: "timezone", + Func: func(args ...any) (any, error) { + tz, err := time.LoadLocation(args[0].(string)) + if err != nil { + return nil, err + } + return tz, nil + }, + Types: types(time.LoadLocation), }, { Name: "first", diff --git a/vendor/github.com/expr-lang/expr/builtin/lib.go b/vendor/github.com/expr-lang/expr/builtin/lib.go index e3a6c0aef91..e3cd61b968f 100644 --- a/vendor/github.com/expr-lang/expr/builtin/lib.go +++ b/vendor/github.com/expr-lang/expr/builtin/lib.go @@ -258,45 +258,6 @@ func String(arg any) any { return fmt.Sprintf("%v", arg) } -func sum(args ...any) (any, error) { - var total int - var fTotal float64 - - for _, arg := range args { - rv := reflect.ValueOf(deref.Deref(arg)) - - switch rv.Kind() { - case reflect.Array, reflect.Slice: - size := rv.Len() - for i := 0; i < size; i++ { - elemSum, err := sum(rv.Index(i).Interface()) - if err != nil { - return nil, err - } - switch elemSum := elemSum.(type) { - case int: - total += elemSum - case float64: - fTotal += elemSum - } - } - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - total += int(rv.Int()) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - total += int(rv.Uint()) - case reflect.Float32, reflect.Float64: - fTotal += rv.Float() - default: - return nil, fmt.Errorf("invalid argument for sum (type %T)", arg) - } - } - - if fTotal != 0.0 { - return fTotal + float64(total), nil - } - return total, nil -} - func minMax(name string, fn func(any, any) bool, args ...any) (any, error) { var val any for _, arg := range args { diff --git a/vendor/github.com/expr-lang/expr/builtin/utils.go b/vendor/github.com/expr-lang/expr/builtin/utils.go index 7d3b6ee8e70..29a95731a05 100644 --- a/vendor/github.com/expr-lang/expr/builtin/utils.go +++ b/vendor/github.com/expr-lang/expr/builtin/utils.go @@ -3,14 +3,17 @@ package builtin import ( "fmt" "reflect" + "time" ) var ( - anyType = reflect.TypeOf(new(any)).Elem() - integerType = reflect.TypeOf(0) - floatType = reflect.TypeOf(float64(0)) - arrayType = reflect.TypeOf([]any{}) - mapType = reflect.TypeOf(map[any]any{}) + anyType = reflect.TypeOf(new(any)).Elem() + integerType = reflect.TypeOf(0) + floatType = reflect.TypeOf(float64(0)) + arrayType = reflect.TypeOf([]any{}) + mapType = reflect.TypeOf(map[any]any{}) + timeType = reflect.TypeOf(new(time.Time)).Elem() + locationType = reflect.TypeOf(new(time.Location)) ) func kind(t reflect.Type) reflect.Kind { diff --git a/vendor/github.com/expr-lang/expr/checker/checker.go b/vendor/github.com/expr-lang/expr/checker/checker.go index b46178d43a5..c71a98f07e7 100644 --- a/vendor/github.com/expr-lang/expr/checker/checker.go +++ b/vendor/github.com/expr-lang/expr/checker/checker.go @@ -13,6 +13,45 @@ import ( "github.com/expr-lang/expr/parser" ) +// ParseCheck parses input expression and checks its types. Also, it applies +// all provided patchers. In case of error, it returns error with a tree. +func ParseCheck(input string, config *conf.Config) (*parser.Tree, error) { + tree, err := parser.ParseWithConfig(input, config) + if err != nil { + return tree, err + } + + if len(config.Visitors) > 0 { + for i := 0; i < 1000; i++ { + more := false + for _, v := range config.Visitors { + // We need to perform types check, because some visitors may rely on + // types information available in the tree. + _, _ = Check(tree, config) + + ast.Walk(&tree.Node, v) + + if v, ok := v.(interface { + ShouldRepeat() bool + }); ok { + more = more || v.ShouldRepeat() + } + } + if !more { + break + } + } + } + _, err = Check(tree, config) + if err != nil { + return tree, err + } + + return tree, nil +} + +// Check checks types of the expression tree. It returns type of the expression +// and error if any. If config is nil, then default configuration will be used. func Check(tree *parser.Tree, config *conf.Config) (t reflect.Type, err error) { if config == nil { config = conf.New(nil) @@ -653,6 +692,10 @@ func (v *checker) BuiltinNode(node *ast.BuiltinNode) (reflect.Type, info) { return v.error(node.Arguments[0], "builtin %v takes only array (got %v)", node.Name, collection) } + if len(node.Arguments) == 1 { + return integerType, info{} + } + v.begin(collection) closure, _ := v.visit(node.Arguments[1]) v.end() @@ -668,6 +711,29 @@ func (v *checker) BuiltinNode(node *ast.BuiltinNode) (reflect.Type, info) { } return v.error(node.Arguments[1], "predicate should has one input and one output param") + case "sum": + collection, _ := v.visit(node.Arguments[0]) + if !isArray(collection) && !isAny(collection) { + return v.error(node.Arguments[0], "builtin %v takes only array (got %v)", node.Name, collection) + } + + if len(node.Arguments) == 2 { + v.begin(collection) + closure, _ := v.visit(node.Arguments[1]) + v.end() + + if isFunc(closure) && + closure.NumOut() == 1 && + closure.NumIn() == 1 && isAny(closure.In(0)) { + return closure.Out(0), info{} + } + } else { + if isAny(collection) { + return anyType, info{} + } + return collection.Elem(), info{} + } + case "find", "findLast": collection, _ := v.visit(node.Arguments[0]) if !isArray(collection) && !isAny(collection) { @@ -978,7 +1044,7 @@ func (v *checker) checkArguments( continue } - if !t.AssignableTo(in) && kind(t) != reflect.Interface { + if !(t.AssignableTo(in) || deref.Type(t).AssignableTo(in)) && kind(t) != reflect.Interface { return anyType, &file.Error{ Location: arg.Location(), Message: fmt.Sprintf("cannot use %v as argument (type %v) to call %v ", t, in, name), @@ -1012,9 +1078,11 @@ func traverseAndReplaceIntegerNodesWithIntegerNodes(node *ast.Node, newType refl case *ast.IntegerNode: (*node).SetType(newType) case *ast.UnaryNode: + (*node).SetType(newType) unaryNode := (*node).(*ast.UnaryNode) traverseAndReplaceIntegerNodesWithIntegerNodes(&unaryNode.Node, newType) case *ast.BinaryNode: + // TODO: Binary node return type is dependent on the type of the operands. We can't just change the type of the node. binaryNode := (*node).(*ast.BinaryNode) switch binaryNode.Operator { case "+", "-", "*": diff --git a/vendor/github.com/expr-lang/expr/compiler/compiler.go b/vendor/github.com/expr-lang/expr/compiler/compiler.go index 808b53c9b74..720f6a26528 100644 --- a/vendor/github.com/expr-lang/expr/compiler/compiler.go +++ b/vendor/github.com/expr-lang/expr/compiler/compiler.go @@ -2,6 +2,7 @@ package compiler import ( "fmt" + "math" "reflect" "regexp" @@ -92,6 +93,13 @@ type scope struct { index int } +func (c *compiler) nodeParent() ast.Node { + if len(c.nodes) > 1 { + return c.nodes[len(c.nodes)-2] + } + return nil +} + func (c *compiler) emitLocation(loc file.Location, op Opcode, arg int) int { c.bytecode = append(c.bytecode, op) current := len(c.bytecode) @@ -322,22 +330,46 @@ func (c *compiler) IntegerNode(node *ast.IntegerNode) { case reflect.Int: c.emitPush(node.Value) case reflect.Int8: + if node.Value > math.MaxInt8 || node.Value < math.MinInt8 { + panic(fmt.Sprintf("constant %d overflows int8", node.Value)) + } c.emitPush(int8(node.Value)) case reflect.Int16: + if node.Value > math.MaxInt16 || node.Value < math.MinInt16 { + panic(fmt.Sprintf("constant %d overflows int16", node.Value)) + } c.emitPush(int16(node.Value)) case reflect.Int32: + if node.Value > math.MaxInt32 || node.Value < math.MinInt32 { + panic(fmt.Sprintf("constant %d overflows int32", node.Value)) + } c.emitPush(int32(node.Value)) case reflect.Int64: c.emitPush(int64(node.Value)) case reflect.Uint: + if node.Value < 0 { + panic(fmt.Sprintf("constant %d overflows uint", node.Value)) + } c.emitPush(uint(node.Value)) case reflect.Uint8: + if node.Value > math.MaxUint8 || node.Value < 0 { + panic(fmt.Sprintf("constant %d overflows uint8", node.Value)) + } c.emitPush(uint8(node.Value)) case reflect.Uint16: + if node.Value > math.MaxUint16 || node.Value < 0 { + panic(fmt.Sprintf("constant %d overflows uint16", node.Value)) + } c.emitPush(uint16(node.Value)) case reflect.Uint32: + if node.Value < 0 { + panic(fmt.Sprintf("constant %d overflows uint32", node.Value)) + } c.emitPush(uint32(node.Value)) case reflect.Uint64: + if node.Value < 0 { + panic(fmt.Sprintf("constant %d overflows uint64", node.Value)) + } c.emitPush(uint64(node.Value)) default: c.emitPush(node.Value) @@ -395,34 +427,12 @@ func (c *compiler) UnaryNode(node *ast.UnaryNode) { } func (c *compiler) BinaryNode(node *ast.BinaryNode) { - l := kind(node.Left) - r := kind(node.Right) - - leftIsSimple := isSimpleType(node.Left) - rightIsSimple := isSimpleType(node.Right) - leftAndRightAreSimple := leftIsSimple && rightIsSimple - switch node.Operator { case "==": - c.compile(node.Left) - c.derefInNeeded(node.Left) - c.compile(node.Right) - c.derefInNeeded(node.Right) - - if l == r && l == reflect.Int && leftAndRightAreSimple { - c.emit(OpEqualInt) - } else if l == r && l == reflect.String && leftAndRightAreSimple { - c.emit(OpEqualString) - } else { - c.emit(OpEqual) - } + c.equalBinaryNode(node) case "!=": - c.compile(node.Left) - c.derefInNeeded(node.Left) - c.compile(node.Right) - c.derefInNeeded(node.Right) - c.emit(OpEqual) + c.equalBinaryNode(node) c.emit(OpNot) case "or", "||": @@ -580,6 +590,28 @@ func (c *compiler) BinaryNode(node *ast.BinaryNode) { } } +func (c *compiler) equalBinaryNode(node *ast.BinaryNode) { + l := kind(node.Left.Type()) + r := kind(node.Right.Type()) + + leftIsSimple := isSimpleType(node.Left) + rightIsSimple := isSimpleType(node.Right) + leftAndRightAreSimple := leftIsSimple && rightIsSimple + + c.compile(node.Left) + c.derefInNeeded(node.Left) + c.compile(node.Right) + c.derefInNeeded(node.Right) + + if l == r && l == reflect.Int && leftAndRightAreSimple { + c.emit(OpEqualInt) + } else if l == r && l == reflect.String && leftAndRightAreSimple { + c.emit(OpEqualString) + } else { + c.emit(OpEqual) + } +} + func isSimpleType(node ast.Node) bool { if node == nil { return false @@ -594,9 +626,21 @@ func isSimpleType(node ast.Node) bool { func (c *compiler) ChainNode(node *ast.ChainNode) { c.chains = append(c.chains, []int{}) c.compile(node.Node) - // Chain activate (got nit somewhere) for _, ph := range c.chains[len(c.chains)-1] { - c.patchJump(ph) + c.patchJump(ph) // If chain activated jump here (got nit somewhere). + } + parent := c.nodeParent() + if binary, ok := parent.(*ast.BinaryNode); ok && binary.Operator == "??" { + // If chain is used in nil coalescing operator, we can omit + // nil push at the end of the chain. The ?? operator will + // handle it. + } else { + // We need to put the nil on the stack, otherwise "typed" + // nil will be used as a result of the chain. + j := c.emit(OpJumpIfNotNil, placeholder) + c.emit(OpPop) + c.emit(OpNil) + c.patchJump(j) } c.chains = c.chains[:len(c.chains)-1] } @@ -682,9 +726,44 @@ func (c *compiler) SliceNode(node *ast.SliceNode) { } func (c *compiler) CallNode(node *ast.CallNode) { - for _, arg := range node.Arguments { - c.compile(arg) + fn := node.Callee.Type() + if kind(fn) == reflect.Func { + fnInOffset := 0 + fnNumIn := fn.NumIn() + switch callee := node.Callee.(type) { + case *ast.MemberNode: + if prop, ok := callee.Property.(*ast.StringNode); ok { + if _, ok = callee.Node.Type().MethodByName(prop.Value); ok && callee.Node.Type().Kind() != reflect.Interface { + fnInOffset = 1 + fnNumIn-- + } + } + case *ast.IdentifierNode: + if t, ok := c.config.Types[callee.Value]; ok && t.Method { + fnInOffset = 1 + fnNumIn-- + } + } + for i, arg := range node.Arguments { + c.compile(arg) + if k := kind(arg.Type()); k == reflect.Ptr || k == reflect.Interface { + var in reflect.Type + if fn.IsVariadic() && i >= fnNumIn-1 { + in = fn.In(fn.NumIn() - 1).Elem() + } else { + in = fn.In(i + fnInOffset) + } + if k = kind(in); k != reflect.Ptr && k != reflect.Interface { + c.emit(OpDeref) + } + } + } + } else { + for _, arg := range node.Arguments { + c.compile(arg) + } } + if ident, ok := node.Callee.(*ast.IdentifierNode); ok { if c.config != nil { if fn, ok := c.config.Functions[ident.Value]; ok { @@ -800,7 +879,11 @@ func (c *compiler) BuiltinNode(node *ast.BuiltinNode) { c.compile(node.Arguments[0]) c.emit(OpBegin) c.emitLoop(func() { - c.compile(node.Arguments[1]) + if len(node.Arguments) == 2 { + c.compile(node.Arguments[1]) + } else { + c.emit(OpPointer) + } c.emitCond(func() { c.emit(OpIncrementCount) }) @@ -809,6 +892,25 @@ func (c *compiler) BuiltinNode(node *ast.BuiltinNode) { c.emit(OpEnd) return + case "sum": + c.compile(node.Arguments[0]) + c.emit(OpBegin) + c.emit(OpInt, 0) + c.emit(OpSetAcc) + c.emitLoop(func() { + if len(node.Arguments) == 2 { + c.compile(node.Arguments[1]) + } else { + c.emit(OpPointer) + } + c.emit(OpGetAcc) + c.emit(OpAdd) + c.emit(OpSetAcc) + }) + c.emit(OpGetAcc) + c.emit(OpEnd) + return + case "find": c.compile(node.Arguments[0]) c.emit(OpBegin) @@ -1094,7 +1196,7 @@ func (c *compiler) PairNode(node *ast.PairNode) { } func (c *compiler) derefInNeeded(node ast.Node) { - switch kind(node) { + switch kind(node.Type()) { case reflect.Ptr, reflect.Interface: c.emit(OpDeref) } @@ -1113,8 +1215,7 @@ func (c *compiler) optimize() { } } -func kind(node ast.Node) reflect.Kind { - t := node.Type() +func kind(t reflect.Type) reflect.Kind { if t == nil { return reflect.Invalid } diff --git a/vendor/github.com/expr-lang/expr/expr.go b/vendor/github.com/expr-lang/expr/expr.go index ba786c0174c..8c619e1c4db 100644 --- a/vendor/github.com/expr-lang/expr/expr.go +++ b/vendor/github.com/expr-lang/expr/expr.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "reflect" + "time" "github.com/expr-lang/expr/ast" "github.com/expr-lang/expr/builtin" @@ -12,7 +13,6 @@ import ( "github.com/expr-lang/expr/conf" "github.com/expr-lang/expr/file" "github.com/expr-lang/expr/optimizer" - "github.com/expr-lang/expr/parser" "github.com/expr-lang/expr/patcher" "github.com/expr-lang/expr/vm" ) @@ -183,6 +183,17 @@ func WithContext(name string) Option { }) } +// Timezone sets default timezone for date() and now() builtin functions. +func Timezone(name string) Option { + tz, err := time.LoadLocation(name) + if err != nil { + panic(err) + } + return Patch(patcher.WithTimezone{ + Location: tz, + }) +} + // Compile parses and compiles given input expression to bytecode program. func Compile(input string, ops ...Option) (*vm.Program, error) { config := conf.CreateNew() @@ -194,33 +205,7 @@ func Compile(input string, ops ...Option) (*vm.Program, error) { } config.Check() - tree, err := parser.ParseWithConfig(input, config) - if err != nil { - return nil, err - } - - if len(config.Visitors) > 0 { - for i := 0; i < 1000; i++ { - more := false - for _, v := range config.Visitors { - // We need to perform types check, because some visitors may rely on - // types information available in the tree. - _, _ = checker.Check(tree, config) - - ast.Walk(&tree.Node, v) - - if v, ok := v.(interface { - ShouldRepeat() bool - }); ok { - more = more || v.ShouldRepeat() - } - } - if !more { - break - } - } - } - _, err = checker.Check(tree, config) + tree, err := checker.ParseCheck(input, config) if err != nil { return nil, err } diff --git a/vendor/github.com/expr-lang/expr/file/error.go b/vendor/github.com/expr-lang/expr/file/error.go index edf202b0456..8ff85dfa5f7 100644 --- a/vendor/github.com/expr-lang/expr/file/error.go +++ b/vendor/github.com/expr-lang/expr/file/error.go @@ -8,22 +8,36 @@ import ( type Error struct { Location - Message string - Snippet string - Prev error + Line int `json:"line"` + Column int `json:"column"` + Message string `json:"message"` + Snippet string `json:"snippet"` + Prev error `json:"prev"` } func (e *Error) Error() string { return e.format() } -func (e *Error) Bind(source *Source) *Error { - if snippet, found := source.Snippet(e.Location.Line); found { +func (e *Error) Bind(source Source) *Error { + e.Line = 1 + for i, r := range source { + if i == e.From { + break + } + if r == '\n' { + e.Line++ + e.Column = 0 + } else { + e.Column++ + } + } + if snippet, found := source.Snippet(e.Line); found { snippet := strings.Replace(snippet, "\t", " ", -1) srcLine := "\n | " + snippet var bytes = []byte(snippet) var indLine = "\n | " - for i := 0; i < e.Location.Column && len(bytes) > 0; i++ { + for i := 0; i < e.Column && len(bytes) > 0; i++ { _, sz := utf8.DecodeRune(bytes) bytes = bytes[sz:] if sz > 1 { @@ -54,7 +68,7 @@ func (e *Error) Wrap(err error) { } func (e *Error) format() string { - if e.Location.Empty() { + if e.Snippet == "" { return e.Message } return fmt.Sprintf( diff --git a/vendor/github.com/expr-lang/expr/file/location.go b/vendor/github.com/expr-lang/expr/file/location.go index a92e27f0b1c..6c6bc2427ec 100644 --- a/vendor/github.com/expr-lang/expr/file/location.go +++ b/vendor/github.com/expr-lang/expr/file/location.go @@ -1,10 +1,6 @@ package file type Location struct { - Line int // The 1-based line of the location. - Column int // The 0-based column number of the location. -} - -func (l Location) Empty() bool { - return l.Column == 0 && l.Line == 0 + From int `json:"from"` + To int `json:"to"` } diff --git a/vendor/github.com/expr-lang/expr/file/source.go b/vendor/github.com/expr-lang/expr/file/source.go index d86a546b100..8e2b2d1540c 100644 --- a/vendor/github.com/expr-lang/expr/file/source.go +++ b/vendor/github.com/expr-lang/expr/file/source.go @@ -1,78 +1,47 @@ package file import ( - "encoding/json" "strings" "unicode/utf8" ) -type Source struct { - contents []rune - lineOffsets []int32 -} - -func NewSource(contents string) *Source { - s := &Source{ - contents: []rune(contents), - } - s.updateOffsets() - return s -} - -func (s *Source) MarshalJSON() ([]byte, error) { - return json.Marshal(s.contents) -} - -func (s *Source) UnmarshalJSON(b []byte) error { - contents := make([]rune, 0) - err := json.Unmarshal(b, &contents) - if err != nil { - return err - } +type Source []rune - s.contents = contents - s.updateOffsets() - return nil +func NewSource(contents string) Source { + return []rune(contents) } -func (s *Source) Content() string { - return string(s.contents) +func (s Source) String() string { + return string(s) } -func (s *Source) Snippet(line int) (string, bool) { +func (s Source) Snippet(line int) (string, bool) { if s == nil { return "", false } - charStart, found := s.findLineOffset(line) - if !found || len(s.contents) == 0 { + lines := strings.Split(string(s), "\n") + lineOffsets := make([]int, len(lines)) + var offset int + for i, line := range lines { + offset = offset + utf8.RuneCountInString(line) + 1 + lineOffsets[i] = offset + } + charStart, found := getLineOffset(lineOffsets, line) + if !found || len(s) == 0 { return "", false } - charEnd, found := s.findLineOffset(line + 1) + charEnd, found := getLineOffset(lineOffsets, line+1) if found { - return string(s.contents[charStart : charEnd-1]), true - } - return string(s.contents[charStart:]), true -} - -// updateOffsets compute line offsets up front as they are referred to frequently. -func (s *Source) updateOffsets() { - lines := strings.Split(string(s.contents), "\n") - offsets := make([]int32, len(lines)) - var offset int32 - for i, line := range lines { - offset = offset + int32(utf8.RuneCountInString(line)) + 1 - offsets[int32(i)] = offset + return string(s[charStart : charEnd-1]), true } - s.lineOffsets = offsets + return string(s[charStart:]), true } -// findLineOffset returns the offset where the (1-indexed) line begins, -// or false if line doesn't exist. -func (s *Source) findLineOffset(line int) (int32, bool) { +func getLineOffset(lineOffsets []int, line int) (int, bool) { if line == 1 { return 0, true - } else if line > 1 && line <= len(s.lineOffsets) { - offset := s.lineOffsets[line-2] + } else if line > 1 && line <= len(lineOffsets) { + offset := lineOffsets[line-2] return offset, true } return -1, false diff --git a/vendor/github.com/expr-lang/expr/optimizer/optimizer.go b/vendor/github.com/expr-lang/expr/optimizer/optimizer.go index 6d1fb0b546e..4ceb3fa43a7 100644 --- a/vendor/github.com/expr-lang/expr/optimizer/optimizer.go +++ b/vendor/github.com/expr-lang/expr/optimizer/optimizer.go @@ -37,5 +37,7 @@ func Optimize(node *Node, config *conf.Config) error { Walk(node, &filterLast{}) Walk(node, &filterFirst{}) Walk(node, &predicateCombination{}) + Walk(node, &sumArray{}) + Walk(node, &sumMap{}) return nil } diff --git a/vendor/github.com/expr-lang/expr/optimizer/predicate_combination.go b/vendor/github.com/expr-lang/expr/optimizer/predicate_combination.go index 2733781df53..6e8a7f7cfc8 100644 --- a/vendor/github.com/expr-lang/expr/optimizer/predicate_combination.go +++ b/vendor/github.com/expr-lang/expr/optimizer/predicate_combination.go @@ -5,6 +5,14 @@ import ( "github.com/expr-lang/expr/parser/operator" ) +/* +predicateCombination is a visitor that combines multiple predicate calls into a single call. +For example, the following expression: + + all(x, x > 1) && all(x, x < 10) -> all(x, x > 1 && x < 10) + any(x, x > 1) || any(x, x < 10) -> any(x, x > 1 || x < 10) + none(x, x > 1) && none(x, x < 10) -> none(x, x > 1 || x < 10) +*/ type predicateCombination struct{} func (v *predicateCombination) Visit(node *Node) { @@ -36,10 +44,12 @@ func (v *predicateCombination) Visit(node *Node) { } func combinedOperator(fn, op string) (string, bool) { - switch fn { - case "all", "any": + switch { + case fn == "all" && (op == "and" || op == "&&"): + return op, true + case fn == "any" && (op == "or" || op == "||"): return op, true - case "one", "none": + case fn == "none" && (op == "and" || op == "&&"): switch op { case "and": return "or", true diff --git a/vendor/github.com/expr-lang/expr/optimizer/sum_array.go b/vendor/github.com/expr-lang/expr/optimizer/sum_array.go new file mode 100644 index 00000000000..0a05d1f2e6b --- /dev/null +++ b/vendor/github.com/expr-lang/expr/optimizer/sum_array.go @@ -0,0 +1,37 @@ +package optimizer + +import ( + "fmt" + + . "github.com/expr-lang/expr/ast" +) + +type sumArray struct{} + +func (*sumArray) Visit(node *Node) { + if sumBuiltin, ok := (*node).(*BuiltinNode); ok && + sumBuiltin.Name == "sum" && + len(sumBuiltin.Arguments) == 1 { + if array, ok := sumBuiltin.Arguments[0].(*ArrayNode); ok && + len(array.Nodes) >= 2 { + Patch(node, sumArrayFold(array)) + } + } +} + +func sumArrayFold(array *ArrayNode) *BinaryNode { + if len(array.Nodes) > 2 { + return &BinaryNode{ + Operator: "+", + Left: array.Nodes[0], + Right: sumArrayFold(&ArrayNode{Nodes: array.Nodes[1:]}), + } + } else if len(array.Nodes) == 2 { + return &BinaryNode{ + Operator: "+", + Left: array.Nodes[0], + Right: array.Nodes[1], + } + } + panic(fmt.Errorf("sumArrayFold: invalid array length %d", len(array.Nodes))) +} diff --git a/vendor/github.com/expr-lang/expr/optimizer/sum_map.go b/vendor/github.com/expr-lang/expr/optimizer/sum_map.go new file mode 100644 index 00000000000..a41a537327c --- /dev/null +++ b/vendor/github.com/expr-lang/expr/optimizer/sum_map.go @@ -0,0 +1,25 @@ +package optimizer + +import ( + . "github.com/expr-lang/expr/ast" +) + +type sumMap struct{} + +func (*sumMap) Visit(node *Node) { + if sumBuiltin, ok := (*node).(*BuiltinNode); ok && + sumBuiltin.Name == "sum" && + len(sumBuiltin.Arguments) == 1 { + if mapBuiltin, ok := sumBuiltin.Arguments[0].(*BuiltinNode); ok && + mapBuiltin.Name == "map" && + len(mapBuiltin.Arguments) == 2 { + Patch(node, &BuiltinNode{ + Name: "sum", + Arguments: []Node{ + mapBuiltin.Arguments[0], + mapBuiltin.Arguments[1], + }, + }) + } + } +} diff --git a/vendor/github.com/expr-lang/expr/parser/lexer/lexer.go b/vendor/github.com/expr-lang/expr/parser/lexer/lexer.go index c32658637f2..e6b06c09d09 100644 --- a/vendor/github.com/expr-lang/expr/parser/lexer/lexer.go +++ b/vendor/github.com/expr-lang/expr/parser/lexer/lexer.go @@ -3,20 +3,18 @@ package lexer import ( "fmt" "strings" - "unicode/utf8" "github.com/expr-lang/expr/file" ) -func Lex(source *file.Source) ([]Token, error) { +func Lex(source file.Source) ([]Token, error) { l := &lexer{ - input: source.Content(), + source: source, tokens: make([]Token, 0), + start: 0, + end: 0, } - - l.loc = file.Location{Line: 1, Column: 0} - l.prev = l.loc - l.startLoc = l.loc + l.commit() for state := root; state != nil; { state = state(l) @@ -30,34 +28,25 @@ func Lex(source *file.Source) ([]Token, error) { } type lexer struct { - input string + source file.Source tokens []Token - start, end int // current position in input - width int // last rune width - startLoc file.Location // start location - prev, loc file.Location // prev location of end location, end location + start, end int err *file.Error } const eof rune = -1 +func (l *lexer) commit() { + l.start = l.end +} + func (l *lexer) next() rune { - if l.end >= len(l.input) { - l.width = 0 + if l.end >= len(l.source) { + l.end++ return eof } - r, w := utf8.DecodeRuneInString(l.input[l.end:]) - l.width = w - l.end += w - - l.prev = l.loc - if r == '\n' { - l.loc.Line++ - l.loc.Column = 0 - } else { - l.loc.Column++ - } - + r := l.source[l.end] + l.end++ return r } @@ -68,8 +57,7 @@ func (l *lexer) peek() rune { } func (l *lexer) backup() { - l.end -= l.width - l.loc = l.prev + l.end-- } func (l *lexer) emit(t Kind) { @@ -78,35 +66,39 @@ func (l *lexer) emit(t Kind) { func (l *lexer) emitValue(t Kind, value string) { l.tokens = append(l.tokens, Token{ - Location: l.startLoc, + Location: file.Location{From: l.start, To: l.end}, Kind: t, Value: value, }) - l.start = l.end - l.startLoc = l.loc + l.commit() } func (l *lexer) emitEOF() { + from := l.end - 2 + if from < 0 { + from = 0 + } + to := l.end - 1 + if to < 0 { + to = 0 + } l.tokens = append(l.tokens, Token{ - Location: l.prev, // Point to previous position for better error messages. + Location: file.Location{From: from, To: to}, Kind: EOF, }) - l.start = l.end - l.startLoc = l.loc + l.commit() } func (l *lexer) skip() { - l.start = l.end - l.startLoc = l.loc + l.commit() } func (l *lexer) word() string { - return l.input[l.start:l.end] -} - -func (l *lexer) ignore() { - l.start = l.end - l.startLoc = l.loc + // TODO: boundary check is NOT needed here, but for some reason CI fuzz tests are failing. + if l.start > len(l.source) || l.end > len(l.source) { + return "__invalid__" + } + return string(l.source[l.start:l.end]) } func (l *lexer) accept(valid string) bool { @@ -132,18 +124,18 @@ func (l *lexer) skipSpaces() { } func (l *lexer) acceptWord(word string) bool { - pos, loc, prev := l.end, l.loc, l.prev + pos := l.end l.skipSpaces() for _, ch := range word { if l.next() != ch { - l.end, l.loc, l.prev = pos, loc, prev + l.end = pos return false } } if r := l.peek(); r != ' ' && r != eof { - l.end, l.loc, l.prev = pos, loc, prev + l.end = pos return false } @@ -153,8 +145,11 @@ func (l *lexer) acceptWord(word string) bool { func (l *lexer) error(format string, args ...any) stateFn { if l.err == nil { // show first error l.err = &file.Error{ - Location: l.loc, - Message: fmt.Sprintf(format, args...), + Location: file.Location{ + From: l.end - 1, + To: l.end, + }, + Message: fmt.Sprintf(format, args...), } } return nil @@ -230,6 +225,6 @@ func (l *lexer) scanRawString(quote rune) (n int) { ch = l.next() n++ } - l.emitValue(String, l.input[l.start+1:l.end-1]) + l.emitValue(String, string(l.source[l.start+1:l.end-1])) return } diff --git a/vendor/github.com/expr-lang/expr/parser/lexer/state.go b/vendor/github.com/expr-lang/expr/parser/lexer/state.go index 72f02bf4ef7..d351e2f5c8b 100644 --- a/vendor/github.com/expr-lang/expr/parser/lexer/state.go +++ b/vendor/github.com/expr-lang/expr/parser/lexer/state.go @@ -14,7 +14,7 @@ func root(l *lexer) stateFn { l.emitEOF() return nil case utils.IsSpace(r): - l.ignore() + l.skip() return root case r == '\'' || r == '"': l.scanString(r) @@ -83,14 +83,14 @@ func (l *lexer) scanNumber() bool { } } l.acceptRun(digits) - loc, prev, end := l.loc, l.prev, l.end + end := l.end if l.accept(".") { // Lookup for .. operator: if after dot there is another dot (1..2), it maybe a range operator. if l.peek() == '.' { // We can't backup() here, as it would require two backups, // and backup() func supports only one for now. So, save and // restore it here. - l.loc, l.prev, l.end = loc, prev, end + l.end = end return true } l.acceptRun(digits) @@ -147,7 +147,7 @@ func not(l *lexer) stateFn { l.skipSpaces() - pos, loc, prev := l.end, l.loc, l.prev + end := l.end // Get the next word. for { @@ -164,7 +164,7 @@ func not(l *lexer) stateFn { case "in", "matches", "contains", "startsWith", "endsWith": l.emit(Operator) default: - l.end, l.loc, l.prev = pos, loc, prev + l.end = end } return root } @@ -193,7 +193,7 @@ func singleLineComment(l *lexer) stateFn { break } } - l.ignore() + l.skip() return root } @@ -207,7 +207,7 @@ func multiLineComment(l *lexer) stateFn { break } } - l.ignore() + l.skip() return root } diff --git a/vendor/github.com/expr-lang/expr/parser/parser.go b/vendor/github.com/expr-lang/expr/parser/parser.go index 9cb79cbbb42..77b2a700a31 100644 --- a/vendor/github.com/expr-lang/expr/parser/parser.go +++ b/vendor/github.com/expr-lang/expr/parser/parser.go @@ -33,7 +33,8 @@ var predicates = map[string]struct { "one": {[]arg{expr, closure}}, "filter": {[]arg{expr, closure}}, "map": {[]arg{expr, closure}}, - "count": {[]arg{expr, closure}}, + "count": {[]arg{expr, closure | optional}}, + "sum": {[]arg{expr, closure | optional}}, "find": {[]arg{expr, closure}}, "findIndex": {[]arg{expr, closure}}, "findLast": {[]arg{expr, closure}}, @@ -54,7 +55,7 @@ type parser struct { type Tree struct { Node Node - Source *file.Source + Source file.Source } func Parse(input string) (*Tree, error) { @@ -83,14 +84,16 @@ func ParseWithConfig(input string, config *conf.Config) (*Tree, error) { p.error("unexpected token %v", p.current) } + tree := &Tree{ + Node: node, + Source: source, + } + if p.err != nil { - return nil, p.err.Bind(source) + return tree, p.err.Bind(source) } - return &Tree{ - Node: node, - Source: source, - }, nil + return tree, nil } func (p *parser) error(format string, args ...any) { diff --git a/vendor/github.com/expr-lang/expr/patcher/with_context.go b/vendor/github.com/expr-lang/expr/patcher/with_context.go index 55b6042614f..f9861a2c2f7 100644 --- a/vendor/github.com/expr-lang/expr/patcher/with_context.go +++ b/vendor/github.com/expr-lang/expr/patcher/with_context.go @@ -22,11 +22,18 @@ func (w WithContext) Visit(node *ast.Node) { if fn.Kind() != reflect.Func { return } - if fn.NumIn() == 0 { - return - } - if fn.In(0).String() != "context.Context" { + switch fn.NumIn() { + case 0: return + case 1: + if fn.In(0).String() != "context.Context" { + return + } + default: + if fn.In(0).String() != "context.Context" && + fn.In(1).String() != "context.Context" { + return + } } ast.Patch(node, &ast.CallNode{ Callee: call.Callee, diff --git a/vendor/github.com/expr-lang/expr/patcher/with_timezone.go b/vendor/github.com/expr-lang/expr/patcher/with_timezone.go new file mode 100644 index 00000000000..83eb28e95ac --- /dev/null +++ b/vendor/github.com/expr-lang/expr/patcher/with_timezone.go @@ -0,0 +1,25 @@ +package patcher + +import ( + "time" + + "github.com/expr-lang/expr/ast" +) + +// WithTimezone passes Location to date() and now() functions. +type WithTimezone struct { + Location *time.Location +} + +func (t WithTimezone) Visit(node *ast.Node) { + if btin, ok := (*node).(*ast.BuiltinNode); ok { + switch btin.Name { + case "date", "now": + loc := &ast.ConstantNode{Value: t.Location} + ast.Patch(node, &ast.BuiltinNode{ + Name: btin.Name, + Arguments: append([]ast.Node{loc}, btin.Arguments...), + }) + } + } +} diff --git a/vendor/github.com/expr-lang/expr/vm/program.go b/vendor/github.com/expr-lang/expr/vm/program.go index 98954674412..15ce26f5b28 100644 --- a/vendor/github.com/expr-lang/expr/vm/program.go +++ b/vendor/github.com/expr-lang/expr/vm/program.go @@ -21,7 +21,7 @@ type Program struct { Arguments []int Constants []any - source *file.Source + source file.Source node ast.Node locations []file.Location variables int @@ -32,7 +32,7 @@ type Program struct { // NewProgram returns a new Program. It's used by the compiler. func NewProgram( - source *file.Source, + source file.Source, node ast.Node, locations []file.Location, variables int, @@ -58,7 +58,7 @@ func NewProgram( } // Source returns origin file.Source. -func (program *Program) Source() *file.Source { +func (program *Program) Source() file.Source { return program.source } diff --git a/vendor/github.com/expr-lang/expr/vm/runtime/helpers[generated].go b/vendor/github.com/expr-lang/expr/vm/runtime/helpers[generated].go index 3529fdd5867..d950f111147 100644 --- a/vendor/github.com/expr-lang/expr/vm/runtime/helpers[generated].go +++ b/vendor/github.com/expr-lang/expr/vm/runtime/helpers[generated].go @@ -334,6 +334,344 @@ func Equal(a, b interface{}) bool { case float64: return float64(x) == float64(y) } + case []any: + switch y := b.(type) { + case []string: + if len(x) != len(y) { + return false + } + for i := range x { + if !Equal(x[i], y[i]) { + return false + } + } + return true + case []uint: + if len(x) != len(y) { + return false + } + for i := range x { + if !Equal(x[i], y[i]) { + return false + } + } + return true + case []uint8: + if len(x) != len(y) { + return false + } + for i := range x { + if !Equal(x[i], y[i]) { + return false + } + } + return true + case []uint16: + if len(x) != len(y) { + return false + } + for i := range x { + if !Equal(x[i], y[i]) { + return false + } + } + return true + case []uint32: + if len(x) != len(y) { + return false + } + for i := range x { + if !Equal(x[i], y[i]) { + return false + } + } + return true + case []uint64: + if len(x) != len(y) { + return false + } + for i := range x { + if !Equal(x[i], y[i]) { + return false + } + } + return true + case []int: + if len(x) != len(y) { + return false + } + for i := range x { + if !Equal(x[i], y[i]) { + return false + } + } + return true + case []int8: + if len(x) != len(y) { + return false + } + for i := range x { + if !Equal(x[i], y[i]) { + return false + } + } + return true + case []int16: + if len(x) != len(y) { + return false + } + for i := range x { + if !Equal(x[i], y[i]) { + return false + } + } + return true + case []int32: + if len(x) != len(y) { + return false + } + for i := range x { + if !Equal(x[i], y[i]) { + return false + } + } + return true + case []int64: + if len(x) != len(y) { + return false + } + for i := range x { + if !Equal(x[i], y[i]) { + return false + } + } + return true + case []float32: + if len(x) != len(y) { + return false + } + for i := range x { + if !Equal(x[i], y[i]) { + return false + } + } + return true + case []float64: + if len(x) != len(y) { + return false + } + for i := range x { + if !Equal(x[i], y[i]) { + return false + } + } + return true + case []any: + if len(x) != len(y) { + return false + } + for i := range x { + if !Equal(x[i], y[i]) { + return false + } + } + return true + } + case []string: + switch y := b.(type) { + case []any: + return Equal(y, x) + case []string: + if len(x) != len(y) { + return false + } + for i := range x { + if x[i] != y[i] { + return false + } + } + return true + } + case []uint: + switch y := b.(type) { + case []any: + return Equal(y, x) + case []uint: + if len(x) != len(y) { + return false + } + for i := range x { + if x[i] != y[i] { + return false + } + } + return true + } + case []uint8: + switch y := b.(type) { + case []any: + return Equal(y, x) + case []uint8: + if len(x) != len(y) { + return false + } + for i := range x { + if x[i] != y[i] { + return false + } + } + return true + } + case []uint16: + switch y := b.(type) { + case []any: + return Equal(y, x) + case []uint16: + if len(x) != len(y) { + return false + } + for i := range x { + if x[i] != y[i] { + return false + } + } + return true + } + case []uint32: + switch y := b.(type) { + case []any: + return Equal(y, x) + case []uint32: + if len(x) != len(y) { + return false + } + for i := range x { + if x[i] != y[i] { + return false + } + } + return true + } + case []uint64: + switch y := b.(type) { + case []any: + return Equal(y, x) + case []uint64: + if len(x) != len(y) { + return false + } + for i := range x { + if x[i] != y[i] { + return false + } + } + return true + } + case []int: + switch y := b.(type) { + case []any: + return Equal(y, x) + case []int: + if len(x) != len(y) { + return false + } + for i := range x { + if x[i] != y[i] { + return false + } + } + return true + } + case []int8: + switch y := b.(type) { + case []any: + return Equal(y, x) + case []int8: + if len(x) != len(y) { + return false + } + for i := range x { + if x[i] != y[i] { + return false + } + } + return true + } + case []int16: + switch y := b.(type) { + case []any: + return Equal(y, x) + case []int16: + if len(x) != len(y) { + return false + } + for i := range x { + if x[i] != y[i] { + return false + } + } + return true + } + case []int32: + switch y := b.(type) { + case []any: + return Equal(y, x) + case []int32: + if len(x) != len(y) { + return false + } + for i := range x { + if x[i] != y[i] { + return false + } + } + return true + } + case []int64: + switch y := b.(type) { + case []any: + return Equal(y, x) + case []int64: + if len(x) != len(y) { + return false + } + for i := range x { + if x[i] != y[i] { + return false + } + } + return true + } + case []float32: + switch y := b.(type) { + case []any: + return Equal(y, x) + case []float32: + if len(x) != len(y) { + return false + } + for i := range x { + if x[i] != y[i] { + return false + } + } + return true + } + case []float64: + switch y := b.(type) { + case []any: + return Equal(y, x) + case []float64: + if len(x) != len(y) { + return false + } + for i := range x { + if x[i] != y[i] { + return false + } + } + return true + } case string: switch y := b.(type) { case string: diff --git a/vendor/github.com/expr-lang/expr/vm/runtime/runtime.go b/vendor/github.com/expr-lang/expr/vm/runtime/runtime.go index 7da1320de38..cd48a280dc3 100644 --- a/vendor/github.com/expr-lang/expr/vm/runtime/runtime.go +++ b/vendor/github.com/expr-lang/expr/vm/runtime/runtime.go @@ -35,8 +35,12 @@ func Fetch(from, i any) any { switch v.Kind() { case reflect.Array, reflect.Slice, reflect.String: index := ToInt(i) + l := v.Len() if index < 0 { - index = v.Len() + index + index = l + index + } + if index < 0 || index >= l { + panic(fmt.Sprintf("index out of range: %v (array length is %v)", index, l)) } value := v.Index(index) if value.IsValid() { diff --git a/vendor/github.com/expr-lang/expr/vm/vm.go b/vendor/github.com/expr-lang/expr/vm/vm.go index 7e933ce7408..fa1223b420f 100644 --- a/vendor/github.com/expr-lang/expr/vm/vm.go +++ b/vendor/github.com/expr-lang/expr/vm/vm.go @@ -274,31 +274,50 @@ func (vm *VM) Run(program *Program, env any) (_ any, err error) { case OpMatches: b := vm.pop() a := vm.pop() + if runtime.IsNil(a) || runtime.IsNil(b) { + vm.push(false) + break + } match, err := regexp.MatchString(b.(string), a.(string)) if err != nil { panic(err) } - vm.push(match) case OpMatchesConst: a := vm.pop() + if runtime.IsNil(a) { + vm.push(false) + break + } r := program.Constants[arg].(*regexp.Regexp) vm.push(r.MatchString(a.(string))) case OpContains: b := vm.pop() a := vm.pop() + if runtime.IsNil(a) || runtime.IsNil(b) { + vm.push(false) + break + } vm.push(strings.Contains(a.(string), b.(string))) case OpStartsWith: b := vm.pop() a := vm.pop() + if runtime.IsNil(a) || runtime.IsNil(b) { + vm.push(false) + break + } vm.push(strings.HasPrefix(a.(string), b.(string))) case OpEndsWith: b := vm.pop() a := vm.pop() + if runtime.IsNil(a) || runtime.IsNil(b) { + vm.push(false) + break + } vm.push(strings.HasSuffix(a.(string), b.(string))) case OpSlice: diff --git a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml index ffc7b992b3c..f4e7dbf37b3 100644 --- a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml +++ b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml @@ -1,7 +1,7 @@ freebsd_task: name: 'FreeBSD' freebsd_instance: - image_family: freebsd-13-2 + image_family: freebsd-14-1 install_script: - pkg update -f - pkg install -y go @@ -9,5 +9,6 @@ freebsd_task: # run tests as user "cirrus" instead of root - pw useradd cirrus -m - chown -R cirrus:cirrus . - - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... - - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - FSNOTIFY_DEBUG=1 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race -v ./... diff --git a/vendor/github.com/fsnotify/fsnotify/.editorconfig b/vendor/github.com/fsnotify/fsnotify/.editorconfig deleted file mode 100644 index fad895851e5..00000000000 --- a/vendor/github.com/fsnotify/fsnotify/.editorconfig +++ /dev/null @@ -1,12 +0,0 @@ -root = true - -[*.go] -indent_style = tab -indent_size = 4 -insert_final_newline = true - -[*.{yml,yaml}] -indent_style = space -indent_size = 2 -insert_final_newline = true -trim_trailing_whitespace = true diff --git a/vendor/github.com/fsnotify/fsnotify/.gitattributes b/vendor/github.com/fsnotify/fsnotify/.gitattributes deleted file mode 100644 index 32f1001be0a..00000000000 --- a/vendor/github.com/fsnotify/fsnotify/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -go.sum linguist-generated diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore index 391cc076b12..daea9dd6d6d 100644 --- a/vendor/github.com/fsnotify/fsnotify/.gitignore +++ b/vendor/github.com/fsnotify/fsnotify/.gitignore @@ -5,3 +5,6 @@ # Output of go build ./cmd/fsnotify /fsnotify /fsnotify.exe + +/test/kqueue +/test/a.out diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md index e0e57575496..fa854785d0f 100644 --- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -1,8 +1,36 @@ # Changelog -Unreleased ----------- -Nothing yet. +1.8.0 2023-10-31 +---------------- + +### Additions + +- all: add `FSNOTIFY_DEBUG` to print debug logs to stderr ([#619]) + +### Changes and fixes + +- windows: fix behaviour of `WatchList()` to be consistent with other platforms ([#610]) + +- kqueue: ignore events with Ident=0 ([#590]) + +- kqueue: set O_CLOEXEC to prevent passing file descriptors to children ([#617]) + +- kqueue: emit events as "/path/dir/file" instead of "path/link/file" when watching a symlink ([#625]) + +- inotify: don't send event for IN_DELETE_SELF when also watching the parent ([#620]) + +- inotify: fix panic when calling Remove() in a goroutine ([#650]) + +- fen: allow watching subdirectories of watched directories ([#621]) + +[#590]: https://github.com/fsnotify/fsnotify/pull/590 +[#610]: https://github.com/fsnotify/fsnotify/pull/610 +[#617]: https://github.com/fsnotify/fsnotify/pull/617 +[#619]: https://github.com/fsnotify/fsnotify/pull/619 +[#620]: https://github.com/fsnotify/fsnotify/pull/620 +[#621]: https://github.com/fsnotify/fsnotify/pull/621 +[#625]: https://github.com/fsnotify/fsnotify/pull/625 +[#650]: https://github.com/fsnotify/fsnotify/pull/650 1.7.0 - 2023-10-22 ------------------ diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md index ea379759d51..e4ac2a2fffd 100644 --- a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md +++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md @@ -1,7 +1,7 @@ Thank you for your interest in contributing to fsnotify! We try to review and merge PRs in a reasonable timeframe, but please be aware that: -- To avoid "wasted" work, please discus changes on the issue tracker first. You +- To avoid "wasted" work, please discuss changes on the issue tracker first. You can just send PRs, but they may end up being rejected for one reason or the other. @@ -20,6 +20,124 @@ platforms. Testing different platforms locally can be done with something like Use the `-short` flag to make the "stress test" run faster. +Writing new tests +----------------- +Scripts in the testdata directory allow creating test cases in a "shell-like" +syntax. The basic format is: + + script + + Output: + desired output + +For example: + + # Create a new empty file with some data. + watch / + echo data >/file + + Output: + create /file + write /file + +Just create a new file to add a new test; select which tests to run with +`-run TestScript/[path]`. + +script +------ +The script is a "shell-like" script: + + cmd arg arg + +Comments are supported with `#`: + + # Comment + cmd arg arg # Comment + +All operations are done in a temp directory; a path like "/foo" is rewritten to +"/tmp/TestFoo/foo". + +Arguments can be quoted with `"` or `'`; there are no escapes and they're +functionally identical right now, but this may change in the future, so best to +assume shell-like rules. + + touch "/file with spaces" + +End-of-line escapes with `\` are not supported. + +### Supported commands + + watch path [ops] # Watch the path, reporting events for it. Nothing is + # watched by default. Optionally a list of ops can be + # given, as with AddWith(path, WithOps(...)). + unwatch path # Stop watching the path. + watchlist n # Assert watchlist length. + + stop # Stop running the script; for debugging. + debug [yes/no] # Enable/disable FSNOTIFY_DEBUG (tests are run in + parallel by default, so -parallel=1 is probably a good + idea). + + touch path + mkdir [-p] dir + ln -s target link # Only ln -s supported. + mkfifo path + mknod dev path + mv src dst + rm [-r] path + chmod mode path # Octal only + sleep time-in-ms + + cat path # Read path (does nothing with the data; just reads it). + echo str >>path # Append "str" to "path". + echo str >path # Truncate "path" and write "str". + + require reason # Skip the test if "reason" is true; "skip" and + skip reason # "require" behave identical; it supports both for + # readability. Possible reasons are: + # + # always Always skip this test. + # symlink Symlinks are supported (requires admin + # permissions on Windows). + # mkfifo Platform doesn't support FIFO named sockets. + # mknod Platform doesn't support device nodes. + + +output +------ +After `Output:` the desired output is given; this is indented by convention, but +that's not required. + +The format of that is: + + # Comment + event path # Comment + + system: + event path + system2: + event path + +Every event is one line, and any whitespace between the event and path are +ignored. The path can optionally be surrounded in ". Anything after a "#" is +ignored. + +Platform-specific tests can be added after GOOS; for example: + + watch / + touch /file + + Output: + # Tested if nothing else matches + create /file + + # Windows-specific test. + windows: + write /file + +You can specify multiple platforms with a comma (e.g. "windows, linux:"). +"kqueue" is a shortcut for all kqueue systems (BSD, macOS). + [goon]: https://github.com/arp242/goon [Vagrant]: https://www.vagrantup.com/ diff --git a/vendor/github.com/fsnotify/fsnotify/backend_fen.go b/vendor/github.com/fsnotify/fsnotify/backend_fen.go index 28497f1dd8e..c349c326c71 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_fen.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_fen.go @@ -1,8 +1,8 @@ //go:build solaris -// +build solaris -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh +// FEN backend for illumos (supported) and Solaris (untested, but should work). +// +// See port_create(3c) etc. for docs. https://www.illumos.org/man/3C/port_create package fsnotify @@ -12,150 +12,33 @@ import ( "os" "path/filepath" "sync" + "time" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type fen struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error mu sync.Mutex port *unix.EventPort - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - dirs map[string]struct{} // Explicitly watched directories - watches map[string]struct{} // Explicitly watched non-directories + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + dirs map[string]Op // Explicitly watched directories + watches map[string]Op // Explicitly watched non-directories } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { - w := &Watcher{ - Events: make(chan Event, sz), - Errors: make(chan error), - dirs: make(map[string]struct{}), - watches: make(map[string]struct{}), +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { + w := &fen{ + Events: ev, + Errors: errs, + dirs: make(map[string]Op), + watches: make(map[string]Op), done: make(chan struct{}), } @@ -171,27 +54,30 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { // sendEvent attempts to send an event to the user, returning true if the event // was put in the channel successfully and false if the watcher has been closed. -func (w *Watcher) sendEvent(name string, op Op) (sent bool) { +func (w *fen) sendEvent(name string, op Op) (sent bool) { select { - case w.Events <- Event{Name: name, Op: op}: - return true case <-w.done: return false + case w.Events <- Event{Name: name, Op: op}: + return true } } // sendError attempts to send an error to the user, returning true if the error // was put in the channel successfully and false if the watcher has been closed. -func (w *Watcher) sendError(err error) (sent bool) { - select { - case w.Errors <- err: +func (w *fen) sendError(err error) (sent bool) { + if err == nil { return true + } + select { case <-w.done: return false + case w.Errors <- err: + return true } } -func (w *Watcher) isClosed() bool { +func (w *fen) isClosed() bool { select { case <-w.done: return true @@ -200,8 +86,7 @@ func (w *Watcher) isClosed() bool { } } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { +func (w *fen) Close() error { // Take the lock used by associateFile to prevent lingering events from // being processed after the close w.mu.Lock() @@ -213,60 +98,21 @@ func (w *Watcher) Close() error { return w.port.Close() } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *fen) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *fen) AddWith(name string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } - if w.port.PathIsWatched(name) { - return nil + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), name) } - _ = getOptions(opts...) + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } // Currently we resolve symlinks that were explicitly requested to be // watched. Otherwise we would use LStat here. @@ -283,7 +129,7 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { } w.mu.Lock() - w.dirs[name] = struct{}{} + w.dirs[name] = with.op w.mu.Unlock() return nil } @@ -294,26 +140,22 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { } w.mu.Lock() - w.watches[name] = struct{}{} + w.watches[name] = with.op w.mu.Unlock() return nil } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *fen) Remove(name string) error { if w.isClosed() { return nil } if !w.port.PathIsWatched(name) { return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } // The user has expressed an intent. Immediately remove this name from // whichever watch list it might be in. If it's not in there the delete @@ -346,7 +188,7 @@ func (w *Watcher) Remove(name string) error { } // readEvents contains the main loop that runs in a goroutine watching for events. -func (w *Watcher) readEvents() { +func (w *fen) readEvents() { // If this function returns, the watcher has been closed and we can close // these channels defer func() { @@ -382,17 +224,19 @@ func (w *Watcher) readEvents() { continue } + if debug { + internal.Debug(pevent.Path, pevent.Events) + } + err = w.handleEvent(&pevent) - if err != nil { - if !w.sendError(err) { - return - } + if !w.sendError(err) { + return } } } } -func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error { +func (w *fen) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error { files, err := os.ReadDir(path) if err != nil { return err @@ -418,7 +262,7 @@ func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, ha // bitmap matches more than one event type (e.g. the file was both modified and // had the attributes changed between when the association was created and the // when event was returned) -func (w *Watcher) handleEvent(event *unix.PortEvent) error { +func (w *fen) handleEvent(event *unix.PortEvent) error { var ( events = event.Events path = event.Path @@ -510,15 +354,9 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error { } if events&unix.FILE_MODIFIED != 0 { - if fmode.IsDir() { - if watchedDir { - if err := w.updateDirectory(path); err != nil { - return err - } - } else { - if !w.sendEvent(path, Write) { - return nil - } + if fmode.IsDir() && watchedDir { + if err := w.updateDirectory(path); err != nil { + return err } } else { if !w.sendEvent(path, Write) { @@ -543,7 +381,7 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error { return nil } -func (w *Watcher) updateDirectory(path string) error { +func (w *fen) updateDirectory(path string) error { // The directory was modified, so we must find unwatched entities and watch // them. If something was removed from the directory, nothing will happen, // as everything else should still be watched. @@ -563,10 +401,8 @@ func (w *Watcher) updateDirectory(path string) error { return err } err = w.associateFile(path, finfo, false) - if err != nil { - if !w.sendError(err) { - return nil - } + if !w.sendError(err) { + return nil } if !w.sendEvent(path, Create) { return nil @@ -575,7 +411,7 @@ func (w *Watcher) updateDirectory(path string) error { return nil } -func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) error { +func (w *fen) associateFile(path string, stat os.FileInfo, follow bool) error { if w.isClosed() { return ErrClosed } @@ -593,34 +429,34 @@ func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) erro // cleared up that discrepancy. The most likely cause is that the event // has fired but we haven't processed it yet. err := w.port.DissociatePath(path) - if err != nil && err != unix.ENOENT { + if err != nil && !errors.Is(err, unix.ENOENT) { return err } } - // FILE_NOFOLLOW means we watch symlinks themselves rather than their - // targets. - events := unix.FILE_MODIFIED | unix.FILE_ATTRIB | unix.FILE_NOFOLLOW - if follow { - // We *DO* follow symlinks for explicitly watched entries. - events = unix.FILE_MODIFIED | unix.FILE_ATTRIB + + var events int + if !follow { + // Watch symlinks themselves rather than their targets unless this entry + // is explicitly watched. + events |= unix.FILE_NOFOLLOW + } + if true { // TODO: implement withOps() + events |= unix.FILE_MODIFIED } - return w.port.AssociatePath(path, stat, - events, - stat.Mode()) + if true { + events |= unix.FILE_ATTRIB + } + return w.port.AssociatePath(path, stat, events, stat.Mode()) } -func (w *Watcher) dissociateFile(path string, stat os.FileInfo, unused bool) error { +func (w *fen) dissociateFile(path string, stat os.FileInfo, unused bool) error { if !w.port.PathIsWatched(path) { return nil } return w.port.DissociatePath(path) } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *fen) WatchList() []string { if w.isClosed() { return nil } @@ -638,3 +474,11 @@ func (w *Watcher) WatchList() []string { return entries } + +func (w *fen) xSupports(op Op) bool { + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go index 921c1c1e401..36c311694cd 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go @@ -1,8 +1,4 @@ //go:build linux && !appengine -// +build linux,!appengine - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -10,127 +6,20 @@ import ( "errors" "fmt" "io" + "io/fs" "os" "path/filepath" "strings" "sync" + "time" "unsafe" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type inotify struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error // Store fd here as os.File.Read() will no longer return on close after @@ -139,8 +28,26 @@ type Watcher struct { inotifyFile *os.File watches *watches done chan struct{} // Channel for sending a "quit message" to the reader goroutine - closeMu sync.Mutex + doneMu sync.Mutex doneResp chan struct{} // Channel to respond to Close + + // Store rename cookies in an array, with the index wrapping to 0. Almost + // all of the time what we get is a MOVED_FROM to set the cookie and the + // next event inotify sends will be MOVED_TO to read it. However, this is + // not guaranteed – as described in inotify(7) – and we may get other events + // between the two MOVED_* events (including other MOVED_* ones). + // + // A second issue is that moving a file outside the watched directory will + // trigger a MOVED_FROM to set the cookie, but we never see the MOVED_TO to + // read and delete it. So just storing it in a map would slowly leak memory. + // + // Doing it like this gives us a simple fast LRU-cache that won't allocate. + // Ten items should be more than enough for our purpose, and a loop over + // such a short array is faster than a map access anyway (not that it hugely + // matters since we're talking about hundreds of ns at the most, but still). + cookies [10]koekje + cookieIndex uint8 + cookiesMu sync.Mutex } type ( @@ -150,9 +57,14 @@ type ( path map[string]uint32 // pathname → wd } watch struct { - wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) - flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) - path string // Watch path. + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) + path string // Watch path. + recurse bool // Recursion with ./...? + } + koekje struct { + cookie uint32 + path string } ) @@ -179,23 +91,45 @@ func (w *watches) add(ww *watch) { func (w *watches) remove(wd uint32) { w.mu.Lock() defer w.mu.Unlock() - delete(w.path, w.wd[wd].path) + watch := w.wd[wd] // Could have had Remove() called. See #616. + if watch == nil { + return + } + delete(w.path, watch.path) delete(w.wd, wd) } -func (w *watches) removePath(path string) (uint32, bool) { +func (w *watches) removePath(path string) ([]uint32, error) { w.mu.Lock() defer w.mu.Unlock() + path, recurse := recursivePath(path) wd, ok := w.path[path] if !ok { - return 0, false + return nil, fmt.Errorf("%w: %s", ErrNonExistentWatch, path) + } + + watch := w.wd[wd] + if recurse && !watch.recurse { + return nil, fmt.Errorf("can't use /... with non-recursive watch %q", path) } delete(w.path, path) delete(w.wd, wd) + if !watch.recurse { + return []uint32{wd}, nil + } - return wd, true + wds := make([]uint32, 0, 8) + wds = append(wds, wd) + for p, rwd := range w.path { + if filepath.HasPrefix(p, path) { + delete(w.path, p) + delete(w.wd, rwd) + wds = append(wds, rwd) + } + } + return wds, nil } func (w *watches) byPath(path string) *watch { @@ -236,20 +170,11 @@ func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error return nil } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { // Need to set nonblocking mode for SetDeadline to work, otherwise blocking // I/O operations won't terminate on close. fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK) @@ -257,12 +182,12 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { return nil, errno } - w := &Watcher{ + w := &inotify{ + Events: ev, + Errors: errs, fd: fd, inotifyFile: os.NewFile(uintptr(fd), ""), watches: newWatches(), - Events: make(chan Event, sz), - Errors: make(chan error), done: make(chan struct{}), doneResp: make(chan struct{}), } @@ -272,26 +197,29 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { } // Returns true if the event was sent, or false if watcher is closed. -func (w *Watcher) sendEvent(e Event) bool { +func (w *inotify) sendEvent(e Event) bool { select { - case w.Events <- e: - return true case <-w.done: return false + case w.Events <- e: + return true } } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { - select { - case w.Errors <- err: +func (w *inotify) sendError(err error) bool { + if err == nil { return true + } + select { case <-w.done: return false + case w.Errors <- err: + return true } } -func (w *Watcher) isClosed() bool { +func (w *inotify) isClosed() bool { select { case <-w.done: return true @@ -300,15 +228,14 @@ func (w *Watcher) isClosed() bool { } } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { - w.closeMu.Lock() +func (w *inotify) Close() error { + w.doneMu.Lock() if w.isClosed() { - w.closeMu.Unlock() + w.doneMu.Unlock() return nil } close(w.done) - w.closeMu.Unlock() + w.doneMu.Unlock() // Causes any blocking reads to return with an error, provided the file // still supports deadline operations. @@ -323,78 +250,104 @@ func (w *Watcher) Close() error { return nil } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *inotify) Add(name string) error { return w.AddWith(name) } + +func (w *inotify) AddWith(path string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), path) + } + + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } - name = filepath.Clean(name) - _ = getOptions(opts...) + path, recurse := recursivePath(path) + if recurse { + return filepath.WalkDir(path, func(root string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if !d.IsDir() { + if root == path { + return fmt.Errorf("fsnotify: not a directory: %q", path) + } + return nil + } - var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | - unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | - unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF + // Send a Create event when adding new directory from a recursive + // watch; this is for "mkdir -p one/two/three". Usually all those + // directories will be created before we can set up watchers on the + // subdirectories, so only "one" would be sent as a Create event and + // not "one/two" and "one/two/three" (inotifywait -r has the same + // problem). + if with.sendCreate && root != path { + w.sendEvent(Event{Name: root, Op: Create}) + } + + return w.add(root, with, true) + }) + } - return w.watches.updatePath(name, func(existing *watch) (*watch, error) { + return w.add(path, with, false) +} + +func (w *inotify) add(path string, with withOpts, recurse bool) error { + var flags uint32 + if with.noFollow { + flags |= unix.IN_DONT_FOLLOW + } + if with.op.Has(Create) { + flags |= unix.IN_CREATE + } + if with.op.Has(Write) { + flags |= unix.IN_MODIFY + } + if with.op.Has(Remove) { + flags |= unix.IN_DELETE | unix.IN_DELETE_SELF + } + if with.op.Has(Rename) { + flags |= unix.IN_MOVED_TO | unix.IN_MOVED_FROM | unix.IN_MOVE_SELF + } + if with.op.Has(Chmod) { + flags |= unix.IN_ATTRIB + } + if with.op.Has(xUnportableOpen) { + flags |= unix.IN_OPEN + } + if with.op.Has(xUnportableRead) { + flags |= unix.IN_ACCESS + } + if with.op.Has(xUnportableCloseWrite) { + flags |= unix.IN_CLOSE_WRITE + } + if with.op.Has(xUnportableCloseRead) { + flags |= unix.IN_CLOSE_NOWRITE + } + return w.register(path, flags, recurse) +} + +func (w *inotify) register(path string, flags uint32, recurse bool) error { + return w.watches.updatePath(path, func(existing *watch) (*watch, error) { if existing != nil { flags |= existing.flags | unix.IN_MASK_ADD } - wd, err := unix.InotifyAddWatch(w.fd, name, flags) + wd, err := unix.InotifyAddWatch(w.fd, path, flags) if wd == -1 { return nil, err } if existing == nil { return &watch{ - wd: uint32(wd), - path: name, - flags: flags, + wd: uint32(wd), + path: path, + flags: flags, + recurse: recurse, }, nil } @@ -404,49 +357,44 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { }) } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *inotify) Remove(name string) error { if w.isClosed() { return nil } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } return w.remove(filepath.Clean(name)) } -func (w *Watcher) remove(name string) error { - wd, ok := w.watches.removePath(name) - if !ok { - return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) - } - - success, errno := unix.InotifyRmWatch(w.fd, wd) - if success == -1 { - // TODO: Perhaps it's not helpful to return an error here in every case; - // The only two possible errors are: - // - // - EBADF, which happens when w.fd is not a valid file descriptor - // of any kind. - // - EINVAL, which is when fd is not an inotify descriptor or wd - // is not a valid watch descriptor. Watch descriptors are - // invalidated when they are removed explicitly or implicitly; - // explicitly by inotify_rm_watch, implicitly when the file they - // are watching is deleted. - return errno +func (w *inotify) remove(name string) error { + wds, err := w.watches.removePath(name) + if err != nil { + return err + } + + for _, wd := range wds { + _, err := unix.InotifyRmWatch(w.fd, wd) + if err != nil { + // TODO: Perhaps it's not helpful to return an error here in every + // case; the only two possible errors are: + // + // EBADF, which happens when w.fd is not a valid file descriptor of + // any kind. + // + // EINVAL, which is when fd is not an inotify descriptor or wd is + // not a valid watch descriptor. Watch descriptors are invalidated + // when they are removed explicitly or implicitly; explicitly by + // inotify_rm_watch, implicitly when the file they are watching is + // deleted. + return err + } } return nil } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *inotify) WatchList() []string { if w.isClosed() { return nil } @@ -463,7 +411,7 @@ func (w *Watcher) WatchList() []string { // readEvents reads from the inotify file descriptor, converts the // received events into Event objects and sends them via the Events channel -func (w *Watcher) readEvents() { +func (w *inotify) readEvents() { defer func() { close(w.doneResp) close(w.Errors) @@ -506,15 +454,17 @@ func (w *Watcher) readEvents() { continue } - var offset uint32 // We don't know how many events we just read into the buffer // While the offset points to at least one whole event... + var offset uint32 for offset <= uint32(n-unix.SizeofInotifyEvent) { var ( // Point "raw" to the event in the buffer raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) mask = uint32(raw.Mask) nameLen = uint32(raw.Len) + // Move to the next event in the buffer + next = func() { offset += unix.SizeofInotifyEvent + nameLen } ) if mask&unix.IN_Q_OVERFLOW != 0 { @@ -523,21 +473,53 @@ func (w *Watcher) readEvents() { } } - // If the event happened to the watched directory or the watched file, the kernel - // doesn't append the filename to the event, but we would like to always fill the - // the "Name" field with a valid filename. We retrieve the path of the watch from - // the "paths" map. + /// If the event happened to the watched directory or the watched + /// file, the kernel doesn't append the filename to the event, but + /// we would like to always fill the the "Name" field with a valid + /// filename. We retrieve the path of the watch from the "paths" + /// map. watch := w.watches.byWd(uint32(raw.Wd)) + /// Can be nil if Remove() was called in another goroutine for this + /// path inbetween reading the events from the kernel and reading + /// the internal state. Not much we can do about it, so just skip. + /// See #616. + if watch == nil { + next() + continue + } + + name := watch.path + if nameLen > 0 { + /// Point "bytes" at the first byte of the filename + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] + /// The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + } + + if debug { + internal.Debug(name, raw.Mask, raw.Cookie) + } + + if mask&unix.IN_IGNORED != 0 { //&& event.Op != 0 + next() + continue + } // inotify will automatically remove the watch on deletes; just need // to clean our state here. - if watch != nil && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { w.watches.remove(watch.wd) } + // We can't really update the state when a watched path is moved; // only IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove // the watch. - if watch != nil && mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { + if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { + if watch.recurse { + next() // Do nothing + continue + } + err := w.remove(watch.path) if err != nil && !errors.Is(err, ErrNonExistentWatch) { if !w.sendError(err) { @@ -546,34 +528,69 @@ func (w *Watcher) readEvents() { } } - var name string - if watch != nil { - name = watch.path - } - if nameLen > 0 { - // Point "bytes" at the first byte of the filename - bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] - // The filename is padded with NULL bytes. TrimRight() gets rid of those. - name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + /// Skip if we're watching both this path and the parent; the parent + /// will already send a delete so no need to do it twice. + if mask&unix.IN_DELETE_SELF != 0 { + if _, ok := w.watches.path[filepath.Dir(watch.path)]; ok { + next() + continue + } } - event := w.newEvent(name, mask) + ev := w.newEvent(name, mask, raw.Cookie) + // Need to update watch path for recurse. + if watch.recurse { + isDir := mask&unix.IN_ISDIR == unix.IN_ISDIR + /// New directory created: set up watch on it. + if isDir && ev.Has(Create) { + err := w.register(ev.Name, watch.flags, true) + if !w.sendError(err) { + return + } - // Send the events that are not ignored on the events channel - if mask&unix.IN_IGNORED == 0 { - if !w.sendEvent(event) { - return + // This was a directory rename, so we need to update all + // the children. + // + // TODO: this is of course pretty slow; we should use a + // better data structure for storing all of this, e.g. store + // children in the watch. I have some code for this in my + // kqueue refactor we can use in the future. For now I'm + // okay with this as it's not publicly available. + // Correctness first, performance second. + if ev.renamedFrom != "" { + w.watches.mu.Lock() + for k, ww := range w.watches.wd { + if k == watch.wd || ww.path == ev.Name { + continue + } + if strings.HasPrefix(ww.path, ev.renamedFrom) { + ww.path = strings.Replace(ww.path, ev.renamedFrom, ev.Name, 1) + w.watches.wd[k] = ww + } + } + w.watches.mu.Unlock() + } } } - // Move to the next event in the buffer - offset += unix.SizeofInotifyEvent + nameLen + /// Send the events that are not ignored on the events channel + if !w.sendEvent(ev) { + return + } + next() } } } -// newEvent returns an platform-independent Event based on an inotify mask. -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *inotify) isRecursive(path string) bool { + ww := w.watches.byPath(path) + if ww == nil { // path could be a file, so also check the Dir. + ww = w.watches.byPath(filepath.Dir(path)) + } + return ww != nil && ww.recurse +} + +func (w *inotify) newEvent(name string, mask, cookie uint32) Event { e := Event{Name: name} if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { e.Op |= Create @@ -584,11 +601,58 @@ func (w *Watcher) newEvent(name string, mask uint32) Event { if mask&unix.IN_MODIFY == unix.IN_MODIFY { e.Op |= Write } + if mask&unix.IN_OPEN == unix.IN_OPEN { + e.Op |= xUnportableOpen + } + if mask&unix.IN_ACCESS == unix.IN_ACCESS { + e.Op |= xUnportableRead + } + if mask&unix.IN_CLOSE_WRITE == unix.IN_CLOSE_WRITE { + e.Op |= xUnportableCloseWrite + } + if mask&unix.IN_CLOSE_NOWRITE == unix.IN_CLOSE_NOWRITE { + e.Op |= xUnportableCloseRead + } if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { e.Op |= Rename } if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { e.Op |= Chmod } + + if cookie != 0 { + if mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { + w.cookiesMu.Lock() + w.cookies[w.cookieIndex] = koekje{cookie: cookie, path: e.Name} + w.cookieIndex++ + if w.cookieIndex > 9 { + w.cookieIndex = 0 + } + w.cookiesMu.Unlock() + } else if mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { + w.cookiesMu.Lock() + var prev string + for _, c := range w.cookies { + if c.cookie == cookie { + prev = c.path + break + } + } + w.cookiesMu.Unlock() + e.renamedFrom = prev + } + } return e } + +func (w *inotify) xSupports(op Op) bool { + return true // Supports everything. +} + +func (w *inotify) state() { + w.watches.mu.Lock() + defer w.watches.mu.Unlock() + for wd, ww := range w.watches.wd { + fmt.Fprintf(os.Stderr, "%4d: recurse=%t %q\n", wd, ww.recurse, ww.path) + } +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go index 063a0915a07..d8de5ab76fd 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go @@ -1,8 +1,4 @@ //go:build freebsd || openbsd || netbsd || dragonfly || darwin -// +build freebsd openbsd netbsd dragonfly darwin - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -11,174 +7,195 @@ import ( "fmt" "os" "path/filepath" + "runtime" "sync" + "time" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type kqueue struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error - done chan struct{} - kq int // File descriptor (as returned by the kqueue() syscall). - closepipe [2]int // Pipe used for closing. - mu sync.Mutex // Protects access to watcher data - watches map[string]int // Watched file descriptors (key: path). - watchesByDir map[string]map[int]struct{} // Watched file descriptors indexed by the parent directory (key: dirname(path)). - userWatches map[string]struct{} // Watches added with Watcher.Add() - dirFlags map[string]uint32 // Watched directories to fflags used in kqueue. - paths map[int]pathInfo // File descriptors to path names for processing kqueue events. - fileExists map[string]struct{} // Keep track of if we know this file exists (to stop duplicate create events). - isClosed bool // Set to true when Close() is first called + kq int // File descriptor (as returned by the kqueue() syscall). + closepipe [2]int // Pipe used for closing kq. + watches *watches + done chan struct{} + doneMu sync.Mutex } -type pathInfo struct { - name string - isDir bool +type ( + watches struct { + mu sync.RWMutex + wd map[int]watch // wd → watch + path map[string]int // pathname → wd + byDir map[string]map[int]struct{} // dirname(path) → wd + seen map[string]struct{} // Keep track of if we know this file exists. + byUser map[string]struct{} // Watches added with Watcher.Add() + } + watch struct { + wd int + name string + linkName string // In case of links; name is the target, and this is the link. + isDir bool + dirFlags uint32 + } +) + +func newWatches() *watches { + return &watches{ + wd: make(map[int]watch), + path: make(map[string]int), + byDir: make(map[string]map[int]struct{}), + seen: make(map[string]struct{}), + byUser: make(map[string]struct{}), + } } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func (w *watches) listPaths(userOnly bool) []string { + w.mu.RLock() + defer w.mu.RUnlock() + + if userOnly { + l := make([]string, 0, len(w.byUser)) + for p := range w.byUser { + l = append(l, p) + } + return l + } + + l := make([]string, 0, len(w.path)) + for p := range w.path { + l = append(l, p) + } + return l } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func (w *watches) watchesInDir(path string) []string { + w.mu.RLock() + defer w.mu.RUnlock() + + l := make([]string, 0, 4) + for fd := range w.byDir[path] { + info := w.wd[fd] + if _, ok := w.byUser[info.name]; !ok { + l = append(l, info.name) + } + } + return l +} + +// Mark path as added by the user. +func (w *watches) addUserWatch(path string) { + w.mu.Lock() + defer w.mu.Unlock() + w.byUser[path] = struct{}{} +} + +func (w *watches) addLink(path string, fd int) { + w.mu.Lock() + defer w.mu.Unlock() + + w.path[path] = fd + w.seen[path] = struct{}{} +} + +func (w *watches) add(path, linkPath string, fd int, isDir bool) { + w.mu.Lock() + defer w.mu.Unlock() + + w.path[path] = fd + w.wd[fd] = watch{wd: fd, name: path, linkName: linkPath, isDir: isDir} + + parent := filepath.Dir(path) + byDir, ok := w.byDir[parent] + if !ok { + byDir = make(map[int]struct{}, 1) + w.byDir[parent] = byDir + } + byDir[fd] = struct{}{} +} + +func (w *watches) byWd(fd int) (watch, bool) { + w.mu.RLock() + defer w.mu.RUnlock() + info, ok := w.wd[fd] + return info, ok +} + +func (w *watches) byPath(path string) (watch, bool) { + w.mu.RLock() + defer w.mu.RUnlock() + info, ok := w.wd[w.path[path]] + return info, ok +} + +func (w *watches) updateDirFlags(path string, flags uint32) { + w.mu.Lock() + defer w.mu.Unlock() + + fd := w.path[path] + info := w.wd[fd] + info.dirFlags = flags + w.wd[fd] = info +} + +func (w *watches) remove(fd int, path string) bool { + w.mu.Lock() + defer w.mu.Unlock() + + isDir := w.wd[fd].isDir + delete(w.path, path) + delete(w.byUser, path) + + parent := filepath.Dir(path) + delete(w.byDir[parent], fd) + + if len(w.byDir[parent]) == 0 { + delete(w.byDir, parent) + } + + delete(w.wd, fd) + delete(w.seen, path) + return isDir +} + +func (w *watches) markSeen(path string, exists bool) { + w.mu.Lock() + defer w.mu.Unlock() + if exists { + w.seen[path] = struct{}{} + } else { + delete(w.seen, path) + } +} + +func (w *watches) seenBefore(path string) bool { + w.mu.RLock() + defer w.mu.RUnlock() + _, ok := w.seen[path] + return ok +} + +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) +} + +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { kq, closepipe, err := newKqueue() if err != nil { return nil, err } - w := &Watcher{ - kq: kq, - closepipe: closepipe, - watches: make(map[string]int), - watchesByDir: make(map[string]map[int]struct{}), - dirFlags: make(map[string]uint32), - paths: make(map[int]pathInfo), - fileExists: make(map[string]struct{}), - userWatches: make(map[string]struct{}), - Events: make(chan Event, sz), - Errors: make(chan error), - done: make(chan struct{}), + w := &kqueue{ + Events: ev, + Errors: errs, + kq: kq, + closepipe: closepipe, + done: make(chan struct{}), + watches: newWatches(), } go w.readEvents() @@ -203,6 +220,8 @@ func newKqueue() (kq int, closepipe [2]int, err error) { unix.Close(kq) return kq, closepipe, err } + unix.CloseOnExec(closepipe[0]) + unix.CloseOnExec(closepipe[1]) // Register changes to listen on the closepipe. changes := make([]unix.Kevent_t, 1) @@ -221,166 +240,108 @@ func newKqueue() (kq int, closepipe [2]int, err error) { } // Returns true if the event was sent, or false if watcher is closed. -func (w *Watcher) sendEvent(e Event) bool { +func (w *kqueue) sendEvent(e Event) bool { select { - case w.Events <- e: - return true case <-w.done: return false + case w.Events <- e: + return true } } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { +func (w *kqueue) sendError(err error) bool { + if err == nil { + return true + } select { + case <-w.done: + return false case w.Errors <- err: return true + } +} + +func (w *kqueue) isClosed() bool { + select { case <-w.done: + return true + default: return false } } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) Close() error { + w.doneMu.Lock() + if w.isClosed() { + w.doneMu.Unlock() return nil } - w.isClosed = true + close(w.done) + w.doneMu.Unlock() - // copy paths to remove while locked - pathsToRemove := make([]string, 0, len(w.watches)) - for name := range w.watches { - pathsToRemove = append(pathsToRemove, name) - } - w.mu.Unlock() // Unlock before calling Remove, which also locks + pathsToRemove := w.watches.listPaths(false) for _, name := range pathsToRemove { w.Remove(name) } // Send "quit" message to the reader goroutine. unix.Close(w.closepipe[1]) - close(w.done) - return nil } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *kqueue) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { - _ = getOptions(opts...) +func (w *kqueue) AddWith(name string, opts ...addOpt) error { + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } + + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } - w.mu.Lock() - w.userWatches[name] = struct{}{} - w.mu.Unlock() _, err := w.addWatch(name, noteAllEvents) - return err + if err != nil { + return err + } + w.watches.addUserWatch(name) + return nil } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *kqueue) Remove(name string) error { + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } return w.remove(name, true) } -func (w *Watcher) remove(name string, unwatchFiles bool) error { - name = filepath.Clean(name) - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) remove(name string, unwatchFiles bool) error { + if w.isClosed() { return nil } - watchfd, ok := w.watches[name] - w.mu.Unlock() + + name = filepath.Clean(name) + info, ok := w.watches.byPath(name) if !ok { return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) } - err := w.register([]int{watchfd}, unix.EV_DELETE, 0) + err := w.register([]int{info.wd}, unix.EV_DELETE, 0) if err != nil { return err } - unix.Close(watchfd) - - w.mu.Lock() - isDir := w.paths[watchfd].isDir - delete(w.watches, name) - delete(w.userWatches, name) - - parentName := filepath.Dir(name) - delete(w.watchesByDir[parentName], watchfd) - - if len(w.watchesByDir[parentName]) == 0 { - delete(w.watchesByDir, parentName) - } + unix.Close(info.wd) - delete(w.paths, watchfd) - delete(w.dirFlags, name) - delete(w.fileExists, name) - w.mu.Unlock() + isDir := w.watches.remove(info.wd, name) // Find all watched paths that are in this directory that are not external. if unwatchFiles && isDir { - var pathsToRemove []string - w.mu.Lock() - for fd := range w.watchesByDir[name] { - path := w.paths[fd] - if _, ok := w.userWatches[path.name]; !ok { - pathsToRemove = append(pathsToRemove, path.name) - } - } - w.mu.Unlock() + pathsToRemove := w.watches.watchesInDir(name) for _, name := range pathsToRemove { // Since these are internal, not much sense in propagating error to // the user, as that will just confuse them with an error about a @@ -391,23 +352,11 @@ func (w *Watcher) remove(name string, unwatchFiles bool) error { return nil } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { - w.mu.Lock() - defer w.mu.Unlock() - if w.isClosed { +func (w *kqueue) WatchList() []string { + if w.isClosed() { return nil } - - entries := make([]string, 0, len(w.userWatches)) - for pathname := range w.userWatches { - entries = append(entries, pathname) - } - - return entries + return w.watches.listPaths(true) } // Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) @@ -417,34 +366,26 @@ const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | un // described in kevent(2). // // Returns the real path to the file which was added, with symlinks resolved. -func (w *Watcher) addWatch(name string, flags uint32) (string, error) { - var isDir bool - name = filepath.Clean(name) - - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) addWatch(name string, flags uint32) (string, error) { + if w.isClosed() { return "", ErrClosed } - watchfd, alreadyWatching := w.watches[name] - // We already have a watch, but we can still override flags. - if alreadyWatching { - isDir = w.paths[watchfd].isDir - } - w.mu.Unlock() + name = filepath.Clean(name) + + info, alreadyWatching := w.watches.byPath(name) if !alreadyWatching { fi, err := os.Lstat(name) if err != nil { return "", err } - // Don't watch sockets or named pipes + // Don't watch sockets or named pipes. if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) { return "", nil } - // Follow Symlinks. + // Follow symlinks. if fi.Mode()&os.ModeSymlink == os.ModeSymlink { link, err := os.Readlink(name) if err != nil { @@ -455,18 +396,15 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { return "", nil } - w.mu.Lock() - _, alreadyWatching = w.watches[link] - w.mu.Unlock() - + _, alreadyWatching = w.watches.byPath(link) if alreadyWatching { // Add to watches so we don't get spurious Create events later // on when we diff the directories. - w.watches[name] = 0 - w.fileExists[name] = struct{}{} + w.watches.addLink(name, 0) return link, nil } + info.linkName = name name = link fi, err = os.Lstat(name) if err != nil { @@ -477,7 +415,7 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { // Retry on EINTR; open() can return EINTR in practice on macOS. // See #354, and Go issues 11180 and 39237. for { - watchfd, err = unix.Open(name, openMode, 0) + info.wd, err = unix.Open(name, openMode, 0) if err == nil { break } @@ -488,40 +426,25 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { return "", err } - isDir = fi.IsDir() + info.isDir = fi.IsDir() } - err := w.register([]int{watchfd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) + err := w.register([]int{info.wd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) if err != nil { - unix.Close(watchfd) + unix.Close(info.wd) return "", err } if !alreadyWatching { - w.mu.Lock() - parentName := filepath.Dir(name) - w.watches[name] = watchfd - - watchesByDir, ok := w.watchesByDir[parentName] - if !ok { - watchesByDir = make(map[int]struct{}, 1) - w.watchesByDir[parentName] = watchesByDir - } - watchesByDir[watchfd] = struct{}{} - w.paths[watchfd] = pathInfo{name: name, isDir: isDir} - w.mu.Unlock() + w.watches.add(name, info.linkName, info.wd, info.isDir) } - if isDir { - // Watch the directory if it has not been watched before, or if it was - // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) - w.mu.Lock() - + // Watch the directory if it has not been watched before, or if it was + // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + if info.isDir { watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && - (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) - // Store flags so this watch can be updated later - w.dirFlags[name] = flags - w.mu.Unlock() + (!alreadyWatching || (info.dirFlags&unix.NOTE_WRITE) != unix.NOTE_WRITE) + w.watches.updateDirFlags(name, flags) if watchDir { if err := w.watchDirectoryFiles(name); err != nil { @@ -534,7 +457,7 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { // readEvents reads from kqueue and converts the received kevents into // Event values that it sends down the Events channel. -func (w *Watcher) readEvents() { +func (w *kqueue) readEvents() { defer func() { close(w.Events) close(w.Errors) @@ -543,50 +466,65 @@ func (w *Watcher) readEvents() { }() eventBuffer := make([]unix.Kevent_t, 10) - for closed := false; !closed; { + for { kevents, err := w.read(eventBuffer) // EINTR is okay, the syscall was interrupted before timeout expired. if err != nil && err != unix.EINTR { if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) { - closed = true + return } - continue } - // Flush the events we received to the Events channel for _, kevent := range kevents { var ( - watchfd = int(kevent.Ident) - mask = uint32(kevent.Fflags) + wd = int(kevent.Ident) + mask = uint32(kevent.Fflags) ) // Shut down the loop when the pipe is closed, but only after all // other events have been processed. - if watchfd == w.closepipe[0] { - closed = true - continue + if wd == w.closepipe[0] { + return } - w.mu.Lock() - path := w.paths[watchfd] - w.mu.Unlock() + path, ok := w.watches.byWd(wd) + if debug { + internal.Debug(path.name, &kevent) + } - event := w.newEvent(path.name, mask) + // On macOS it seems that sometimes an event with Ident=0 is + // delivered, and no other flags/information beyond that, even + // though we never saw such a file descriptor. For example in + // TestWatchSymlink/277 (usually at the end, but sometimes sooner): + // + // fmt.Printf("READ: %2d %#v\n", kevent.Ident, kevent) + // unix.Kevent_t{Ident:0x2a, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)} + // unix.Kevent_t{Ident:0x0, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)} + // + // The first is a normal event, the second with Ident 0. No error + // flag, no data, no ... nothing. + // + // I read a bit through bsd/kern_event.c from the xnu source, but I + // don't really see an obvious location where this is triggered – + // this doesn't seem intentional, but idk... + // + // Technically fd 0 is a valid descriptor, so only skip it if + // there's no path, and if we're on macOS. + if !ok && kevent.Ident == 0 && runtime.GOOS == "darwin" { + continue + } + + event := w.newEvent(path.name, path.linkName, mask) if event.Has(Rename) || event.Has(Remove) { w.remove(event.Name, false) - w.mu.Lock() - delete(w.fileExists, event.Name) - w.mu.Unlock() + w.watches.markSeen(event.Name, false) } if path.isDir && event.Has(Write) && !event.Has(Remove) { - w.sendDirectoryChangeEvents(event.Name) - } else { - if !w.sendEvent(event) { - closed = true - continue - } + w.dirChange(event.Name) + } else if !w.sendEvent(event) { + return } if event.Has(Remove) { @@ -594,25 +532,34 @@ func (w *Watcher) readEvents() { // mv f1 f2 will delete f2, then create f2. if path.isDir { fileDir := filepath.Clean(event.Name) - w.mu.Lock() - _, found := w.watches[fileDir] - w.mu.Unlock() + _, found := w.watches.byPath(fileDir) if found { - err := w.sendDirectoryChangeEvents(fileDir) - if err != nil { - if !w.sendError(err) { - closed = true - } + // TODO: this branch is never triggered in any test. + // Added in d6220df (2012). + // isDir check added in 8611c35 (2016): https://github.com/fsnotify/fsnotify/pull/111 + // + // I don't really get how this can be triggered either. + // And it wasn't triggered in the patch that added it, + // either. + // + // Original also had a comment: + // make sure the directory exists before we watch for + // changes. When we do a recursive watch and perform + // rm -rf, the parent directory might have gone + // missing, ignore the missing directory and let the + // upcoming delete event remove the watch from the + // parent directory. + err := w.dirChange(fileDir) + if !w.sendError(err) { + return } } } else { - filePath := filepath.Clean(event.Name) - if fi, err := os.Lstat(filePath); err == nil { - err := w.sendFileCreatedEventIfNew(filePath, fi) - if err != nil { - if !w.sendError(err) { - closed = true - } + path := filepath.Clean(event.Name) + if fi, err := os.Lstat(path); err == nil { + err := w.sendCreateIfNew(path, fi) + if !w.sendError(err) { + return } } } @@ -622,8 +569,14 @@ func (w *Watcher) readEvents() { } // newEvent returns an platform-independent Event based on kqueue Fflags. -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *kqueue) newEvent(name, linkName string, mask uint32) Event { e := Event{Name: name} + if linkName != "" { + // If the user watched "/path/link" then emit events as "/path/link" + // rather than "/path/target". + e.Name = linkName + } + if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { e.Op |= Remove } @@ -645,8 +598,7 @@ func (w *Watcher) newEvent(name string, mask uint32) Event { } // watchDirectoryFiles to mimic inotify when adding a watch on a directory -func (w *Watcher) watchDirectoryFiles(dirPath string) error { - // Get all files +func (w *kqueue) watchDirectoryFiles(dirPath string) error { files, err := os.ReadDir(dirPath) if err != nil { return err @@ -674,9 +626,7 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error { } } - w.mu.Lock() - w.fileExists[cleanPath] = struct{}{} - w.mu.Unlock() + w.watches.markSeen(cleanPath, true) } return nil @@ -686,7 +636,7 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error { // // This functionality is to have the BSD watcher match the inotify, which sends // a create event for files created in a watched directory. -func (w *Watcher) sendDirectoryChangeEvents(dir string) error { +func (w *kqueue) dirChange(dir string) error { files, err := os.ReadDir(dir) if err != nil { // Directory no longer exists: we can ignore this safely. kqueue will @@ -694,61 +644,51 @@ func (w *Watcher) sendDirectoryChangeEvents(dir string) error { if errors.Is(err, os.ErrNotExist) { return nil } - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } for _, f := range files { fi, err := f.Info() if err != nil { - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } - err = w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi) + err = w.sendCreateIfNew(filepath.Join(dir, fi.Name()), fi) if err != nil { // Don't need to send an error if this file isn't readable. if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) { return nil } - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } } return nil } -// sendFileCreatedEvent sends a create event if the file isn't already being tracked. -func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fi os.FileInfo) (err error) { - w.mu.Lock() - _, doesExist := w.fileExists[filePath] - w.mu.Unlock() - if !doesExist { - if !w.sendEvent(Event{Name: filePath, Op: Create}) { - return +// Send a create event if the file isn't already being tracked, and start +// watching this file. +func (w *kqueue) sendCreateIfNew(path string, fi os.FileInfo) error { + if !w.watches.seenBefore(path) { + if !w.sendEvent(Event{Name: path, Op: Create}) { + return nil } } - // like watchDirectoryFiles (but without doing another ReadDir) - filePath, err = w.internalWatch(filePath, fi) + // Like watchDirectoryFiles, but without doing another ReadDir. + path, err := w.internalWatch(path, fi) if err != nil { return err } - - w.mu.Lock() - w.fileExists[filePath] = struct{}{} - w.mu.Unlock() - + w.watches.markSeen(path, true) return nil } -func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) { +func (w *kqueue) internalWatch(name string, fi os.FileInfo) (string, error) { if fi.IsDir() { // mimic Linux providing delete events for subdirectories, but preserve // the flags used if currently watching subdirectory - w.mu.Lock() - flags := w.dirFlags[name] - w.mu.Unlock() - - flags |= unix.NOTE_DELETE | unix.NOTE_RENAME - return w.addWatch(name, flags) + info, _ := w.watches.byPath(name) + return w.addWatch(name, info.dirFlags|unix.NOTE_DELETE|unix.NOTE_RENAME) } // watch file to mimic Linux inotify @@ -756,7 +696,7 @@ func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) { } // Register events with the queue. -func (w *Watcher) register(fds []int, flags int, fflags uint32) error { +func (w *kqueue) register(fds []int, flags int, fflags uint32) error { changes := make([]unix.Kevent_t, len(fds)) for i, fd := range fds { // SetKevent converts int to the platform-specific types. @@ -773,10 +713,21 @@ func (w *Watcher) register(fds []int, flags int, fflags uint32) error { } // read retrieves pending events, or waits until an event occurs. -func (w *Watcher) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { +func (w *kqueue) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { n, err := unix.Kevent(w.kq, nil, events, nil) if err != nil { return nil, err } return events[0:n], nil } + +func (w *kqueue) xSupports(op Op) bool { + if runtime.GOOS == "freebsd" { + //return true // Supports everything. + } + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_other.go b/vendor/github.com/fsnotify/fsnotify/backend_other.go index d34a23c015f..5eb5dbc66f2 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_other.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_other.go @@ -1,205 +1,23 @@ //go:build appengine || (!darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows) -// +build appengine !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify import "errors" -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type other struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { +func newBackend(ev chan Event, errs chan error) (backend, error) { return nil, errors.New("fsnotify not supported on the current platform") } - -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { return NewWatcher() } - -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { return nil } - -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { return nil } - -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return nil } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { return nil } - -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { return nil } +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { + return newBackend(ev, errs) +} +func (w *other) Close() error { return nil } +func (w *other) WatchList() []string { return nil } +func (w *other) Add(name string) error { return nil } +func (w *other) AddWith(name string, opts ...addOpt) error { return nil } +func (w *other) Remove(name string) error { return nil } +func (w *other) xSupports(op Op) bool { return false } diff --git a/vendor/github.com/fsnotify/fsnotify/backend_windows.go b/vendor/github.com/fsnotify/fsnotify/backend_windows.go index 9bc91e5d613..c54a6308383 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_windows.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_windows.go @@ -1,12 +1,8 @@ //go:build windows -// +build windows // Windows backend based on ReadDirectoryChangesW() // // https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw -// -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -19,123 +15,15 @@ import ( "runtime" "strings" "sync" + "time" "unsafe" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/windows" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type readDirChangesW struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error port windows.Handle // Handle to completion port @@ -147,48 +35,40 @@ type Watcher struct { closed bool // Set to true when Close() is first called } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(50) +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(50, ev, errs) } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0) if err != nil { return nil, os.NewSyscallError("CreateIoCompletionPort", err) } - w := &Watcher{ + w := &readDirChangesW{ + Events: ev, + Errors: errs, port: port, watches: make(watchMap), input: make(chan *input, 1), - Events: make(chan Event, sz), - Errors: make(chan error), quit: make(chan chan<- error, 1), } go w.readEvents() return w, nil } -func (w *Watcher) isClosed() bool { +func (w *readDirChangesW) isClosed() bool { w.mu.Lock() defer w.mu.Unlock() return w.closed } -func (w *Watcher) sendEvent(name string, mask uint64) bool { +func (w *readDirChangesW) sendEvent(name, renamedFrom string, mask uint64) bool { if mask == 0 { return false } event := w.newEvent(name, uint32(mask)) + event.renamedFrom = renamedFrom select { case ch := <-w.quit: w.quit <- ch @@ -198,17 +78,19 @@ func (w *Watcher) sendEvent(name string, mask uint64) bool { } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { +func (w *readDirChangesW) sendError(err error) bool { + if err == nil { + return true + } select { case w.Errors <- err: return true case <-w.quit: + return false } - return false } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { +func (w *readDirChangesW) Close() error { if w.isClosed() { return nil } @@ -226,57 +108,21 @@ func (w *Watcher) Close() error { return <-ch } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *readDirChangesW) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *readDirChangesW) AddWith(name string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name)) + } with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } if with.bufsize < 4096 { return fmt.Errorf("fsnotify.WithBufferSize: buffer size cannot be smaller than 4096 bytes") } @@ -295,18 +141,14 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { return <-in.reply } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *readDirChangesW) Remove(name string) error { if w.isClosed() { return nil } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name)) + } in := &input{ op: opRemoveWatch, @@ -320,11 +162,7 @@ func (w *Watcher) Remove(name string) error { return <-in.reply } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *readDirChangesW) WatchList() []string { if w.isClosed() { return nil } @@ -335,7 +173,13 @@ func (w *Watcher) WatchList() []string { entries := make([]string, 0, len(w.watches)) for _, entry := range w.watches { for _, watchEntry := range entry { - entries = append(entries, watchEntry.path) + for name := range watchEntry.names { + entries = append(entries, filepath.Join(watchEntry.path, name)) + } + // the directory itself is being watched + if watchEntry.mask != 0 { + entries = append(entries, watchEntry.path) + } } } @@ -361,7 +205,7 @@ const ( sysFSIGNORED = 0x8000 ) -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *readDirChangesW) newEvent(name string, mask uint32) Event { e := Event{Name: name} if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { e.Op |= Create @@ -417,7 +261,7 @@ type ( watchMap map[uint32]indexMap ) -func (w *Watcher) wakeupReader() error { +func (w *readDirChangesW) wakeupReader() error { err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil) if err != nil { return os.NewSyscallError("PostQueuedCompletionStatus", err) @@ -425,7 +269,7 @@ func (w *Watcher) wakeupReader() error { return nil } -func (w *Watcher) getDir(pathname string) (dir string, err error) { +func (w *readDirChangesW) getDir(pathname string) (dir string, err error) { attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname)) if err != nil { return "", os.NewSyscallError("GetFileAttributes", err) @@ -439,7 +283,7 @@ func (w *Watcher) getDir(pathname string) (dir string, err error) { return } -func (w *Watcher) getIno(path string) (ino *inode, err error) { +func (w *readDirChangesW) getIno(path string) (ino *inode, err error) { h, err := windows.CreateFile(windows.StringToUTF16Ptr(path), windows.FILE_LIST_DIRECTORY, windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE, @@ -482,9 +326,8 @@ func (m watchMap) set(ino *inode, watch *watch) { } // Must run within the I/O thread. -func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error { - //pathname, recurse := recursivePath(pathname) - recurse := false +func (w *readDirChangesW) addWatch(pathname string, flags uint64, bufsize int) error { + pathname, recurse := recursivePath(pathname) dir, err := w.getDir(pathname) if err != nil { @@ -538,7 +381,7 @@ func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error { } // Must run within the I/O thread. -func (w *Watcher) remWatch(pathname string) error { +func (w *readDirChangesW) remWatch(pathname string) error { pathname, recurse := recursivePath(pathname) dir, err := w.getDir(pathname) @@ -566,11 +409,11 @@ func (w *Watcher) remWatch(pathname string) error { return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname) } if pathname == dir { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED) watch.mask = 0 } else { name := filepath.Base(pathname) - w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) + w.sendEvent(filepath.Join(watch.path, name), "", watch.names[name]&sysFSIGNORED) delete(watch.names, name) } @@ -578,23 +421,23 @@ func (w *Watcher) remWatch(pathname string) error { } // Must run within the I/O thread. -func (w *Watcher) deleteWatch(watch *watch) { +func (w *readDirChangesW) deleteWatch(watch *watch) { for name, mask := range watch.names { if mask&provisional == 0 { - w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) + w.sendEvent(filepath.Join(watch.path, name), "", mask&sysFSIGNORED) } delete(watch.names, name) } if watch.mask != 0 { if watch.mask&provisional == 0 { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED) } watch.mask = 0 } } // Must run within the I/O thread. -func (w *Watcher) startRead(watch *watch) error { +func (w *readDirChangesW) startRead(watch *watch) error { err := windows.CancelIo(watch.ino.handle) if err != nil { w.sendError(os.NewSyscallError("CancelIo", err)) @@ -624,7 +467,7 @@ func (w *Watcher) startRead(watch *watch) error { err := os.NewSyscallError("ReadDirectoryChanges", rdErr) if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF) err = nil } w.deleteWatch(watch) @@ -637,7 +480,7 @@ func (w *Watcher) startRead(watch *watch) error { // readEvents reads from the I/O completion port, converts the // received events into Event objects and sends them via the Events channel. // Entry point to the I/O thread. -func (w *Watcher) readEvents() { +func (w *readDirChangesW) readEvents() { var ( n uint32 key uintptr @@ -700,7 +543,7 @@ func (w *Watcher) readEvents() { } case windows.ERROR_ACCESS_DENIED: // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF) w.deleteWatch(watch) w.startRead(watch) continue @@ -733,6 +576,10 @@ func (w *Watcher) readEvents() { name := windows.UTF16ToString(buf) fullname := filepath.Join(watch.path, name) + if debug { + internal.Debug(fullname, raw.Action) + } + var mask uint64 switch raw.Action { case windows.FILE_ACTION_REMOVED: @@ -761,21 +608,22 @@ func (w *Watcher) readEvents() { } } - sendNameEvent := func() { - w.sendEvent(fullname, watch.names[name]&mask) - } if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME { - sendNameEvent() + w.sendEvent(fullname, "", watch.names[name]&mask) } if raw.Action == windows.FILE_ACTION_REMOVED { - w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) + w.sendEvent(fullname, "", watch.names[name]&sysFSIGNORED) delete(watch.names, name) } - w.sendEvent(fullname, watch.mask&w.toFSnotifyFlags(raw.Action)) + if watch.rename != "" && raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { + w.sendEvent(fullname, filepath.Join(watch.path, watch.rename), watch.mask&w.toFSnotifyFlags(raw.Action)) + } else { + w.sendEvent(fullname, "", watch.mask&w.toFSnotifyFlags(raw.Action)) + } + if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { - fullname = filepath.Join(watch.path, watch.rename) - sendNameEvent() + w.sendEvent(filepath.Join(watch.path, watch.rename), "", watch.names[name]&mask) } // Move to the next event in the buffer @@ -787,8 +635,7 @@ func (w *Watcher) readEvents() { // Error! if offset >= n { //lint:ignore ST1005 Windows should be capitalized - w.sendError(errors.New( - "Windows system assumed buffer larger than it is, events have likely been missed")) + w.sendError(errors.New("Windows system assumed buffer larger than it is, events have likely been missed")) break } } @@ -799,7 +646,7 @@ func (w *Watcher) readEvents() { } } -func (w *Watcher) toWindowsFlags(mask uint64) uint32 { +func (w *readDirChangesW) toWindowsFlags(mask uint64) uint32 { var m uint32 if mask&sysFSMODIFY != 0 { m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE @@ -810,7 +657,7 @@ func (w *Watcher) toWindowsFlags(mask uint64) uint32 { return m } -func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { +func (w *readDirChangesW) toFSnotifyFlags(action uint32) uint64 { switch action { case windows.FILE_ACTION_ADDED: return sysFSCREATE @@ -825,3 +672,11 @@ func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { } return 0 } + +func (w *readDirChangesW) xSupports(op Op) bool { + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go index 24c99cc4999..0760efe9160 100644 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -3,19 +3,146 @@ // // Currently supported systems: // -// Linux 2.6.32+ via inotify -// BSD, macOS via kqueue -// Windows via ReadDirectoryChangesW -// illumos via FEN +// - Linux via inotify +// - BSD, macOS via kqueue +// - Windows via ReadDirectoryChangesW +// - illumos via FEN +// +// # FSNOTIFY_DEBUG +// +// Set the FSNOTIFY_DEBUG environment variable to "1" to print debug messages to +// stderr. This can be useful to track down some problems, especially in cases +// where fsnotify is used as an indirect dependency. +// +// Every event will be printed as soon as there's something useful to print, +// with as little processing from fsnotify. +// +// Example output: +// +// FSNOTIFY_DEBUG: 11:34:23.633087586 256:IN_CREATE → "/tmp/file-1" +// FSNOTIFY_DEBUG: 11:34:23.633202319 4:IN_ATTRIB → "/tmp/file-1" +// FSNOTIFY_DEBUG: 11:34:28.989728764 512:IN_DELETE → "/tmp/file-1" package fsnotify import ( "errors" "fmt" + "os" "path/filepath" "strings" ) +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # Windows notes +// +// Paths can be added as "C:\\path\\to\\dir", but forward slashes +// ("C:/path/to/dir") will also work. +// +// When a watched directory is removed it will always send an event for the +// directory itself, but may not send events for all files in that directory. +// Sometimes it will send events for all files, sometimes it will send no +// events, and often only for some files. +// +// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest +// value that is guaranteed to work with SMB filesystems. If you have many +// events in quick succession this may not be enough, and you will have to use +// [WithBufferSize] to increase the value. +type Watcher struct { + b backend + + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, and you may + // want to wait until you've stopped receiving them + // (see the dedup example in cmd/fsnotify). + // + // Some systems may send Write event for directories + // when the directory content changes. + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // when a file is truncated. On Windows it's never + // sent. + Events chan Event + + // Errors sends any errors. + Errors chan error +} + // Event represents a file system notification. type Event struct { // Path to the file or directory. @@ -30,6 +157,16 @@ type Event struct { // This is a bitmask and some systems may send multiple operations at once. // Use the Event.Has() method instead of comparing with ==. Op Op + + // Create events will have this set to the old path if it's a rename. This + // only works when both the source and destination are watched. It's not + // reliable when watching individual files, only directories. + // + // For example "mv /tmp/file /tmp/rename" will emit: + // + // Event{Op: Rename, Name: "/tmp/file"} + // Event{Op: Create, Name: "/tmp/rename", RenamedFrom: "/tmp/file"} + renamedFrom string } // Op describes a set of file operations. @@ -50,7 +187,7 @@ const ( // example "remove to trash" is often a rename). Remove - // The path was renamed to something else; any watched on it will be + // The path was renamed to something else; any watches on it will be // removed. Rename @@ -60,15 +197,155 @@ const ( // get triggered very frequently by some software. For example, Spotlight // indexing on macOS, anti-virus software, backup software, etc. Chmod + + // File descriptor was opened. + // + // Only works on Linux and FreeBSD. + xUnportableOpen + + // File was read from. + // + // Only works on Linux and FreeBSD. + xUnportableRead + + // File opened for writing was closed. + // + // Only works on Linux and FreeBSD. + // + // The advantage of using this over Write is that it's more reliable than + // waiting for Write events to stop. It's also faster (if you're not + // listening to Write events): copying a file of a few GB can easily + // generate tens of thousands of Write events in a short span of time. + xUnportableCloseWrite + + // File opened for reading was closed. + // + // Only works on Linux and FreeBSD. + xUnportableCloseRead ) -// Common errors that can be reported. var ( + // ErrNonExistentWatch is used when Remove() is called on a path that's not + // added. ErrNonExistentWatch = errors.New("fsnotify: can't remove non-existent watch") - ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow") - ErrClosed = errors.New("fsnotify: watcher already closed") + + // ErrClosed is used when trying to operate on a closed Watcher. + ErrClosed = errors.New("fsnotify: watcher already closed") + + // ErrEventOverflow is reported from the Errors channel when there are too + // many events: + // + // - inotify: inotify returns IN_Q_OVERFLOW – because there are too + // many queued events (the fs.inotify.max_queued_events + // sysctl can be used to increase this). + // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. + // - kqueue, fen: Not used. + ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow") + + // ErrUnsupported is returned by AddWith() when WithOps() specified an + // Unportable event that's not supported on this platform. + xErrUnsupported = errors.New("fsnotify: not supported with this backend") ) +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + ev, errs := make(chan Event), make(chan error) + b, err := newBackend(ev, errs) + if err != nil { + return nil, err + } + return &Watcher{b: b, Events: ev, Errors: errs}, nil +} + +// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events +// channel. +// +// The main use case for this is situations with a very large number of events +// where the kernel buffer size can't be increased (e.g. due to lack of +// permissions). An unbuffered Watcher will perform better for almost all use +// cases, and whenever possible you will be better off increasing the kernel +// buffers instead of adding a large userspace buffer. +func NewBufferedWatcher(sz uint) (*Watcher, error) { + ev, errs := make(chan Event), make(chan error) + b, err := newBufferedBackend(sz, ev, errs) + if err != nil { + return nil, err + } + return &Watcher{b: b, Events: ev, Errors: errs}, nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; watching it more than once is a no-op and will +// not return an error. Paths that do not yet exist on the filesystem cannot be +// watched. +// +// A watch will be automatically removed if the watched path is deleted or +// renamed. The exception is the Windows backend, which doesn't remove the +// watcher on renames. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// Returns [ErrClosed] if [Watcher.Close] was called. +// +// See [Watcher.AddWith] for a version that allows adding options. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many programs (especially editors) update files atomically: it +// will write to a temporary file which is then moved to destination, +// overwriting the original (or some variant thereof). The watcher on the +// original file is now lost, as that no longer exists. +// +// The upshot of this is that a power failure or crash won't leave a +// half-written file. +// +// Watch the parent directory and use Event.Name to filter out files you're not +// interested in. There is an example of this in cmd/fsnotify/file.go. +func (w *Watcher) Add(path string) error { return w.b.Add(path) } + +// AddWith is like [Watcher.Add], but allows adding options. When using Add() +// the defaults described below are used. +// +// Possible options are: +// +// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on +// other platforms. The default is 64K (65536 bytes). +func (w *Watcher) AddWith(path string, opts ...addOpt) error { return w.b.AddWith(path, opts...) } + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) Remove(path string) error { return w.b.Remove(path) } + +// Close removes all watches and closes the Events channel. +func (w *Watcher) Close() error { return w.b.Close() } + +// WatchList returns all paths explicitly added with [Watcher.Add] (and are not +// yet removed). +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) WatchList() []string { return w.b.WatchList() } + +// Supports reports if all the listed operations are supported by this platform. +// +// Create, Write, Remove, Rename, and Chmod are always supported. It can only +// return false for an Op starting with Unportable. +func (w *Watcher) xSupports(op Op) bool { return w.b.xSupports(op) } + func (o Op) String() string { var b strings.Builder if o.Has(Create) { @@ -80,6 +357,18 @@ func (o Op) String() string { if o.Has(Write) { b.WriteString("|WRITE") } + if o.Has(xUnportableOpen) { + b.WriteString("|OPEN") + } + if o.Has(xUnportableRead) { + b.WriteString("|READ") + } + if o.Has(xUnportableCloseWrite) { + b.WriteString("|CLOSE_WRITE") + } + if o.Has(xUnportableCloseRead) { + b.WriteString("|CLOSE_READ") + } if o.Has(Rename) { b.WriteString("|RENAME") } @@ -100,24 +389,48 @@ func (e Event) Has(op Op) bool { return e.Op.Has(op) } // String returns a string representation of the event with their path. func (e Event) String() string { + if e.renamedFrom != "" { + return fmt.Sprintf("%-13s %q ← %q", e.Op.String(), e.Name, e.renamedFrom) + } return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name) } type ( + backend interface { + Add(string) error + AddWith(string, ...addOpt) error + Remove(string) error + WatchList() []string + Close() error + xSupports(Op) bool + } addOpt func(opt *withOpts) withOpts struct { - bufsize int + bufsize int + op Op + noFollow bool + sendCreate bool } ) +var debug = func() bool { + // Check for exactly "1" (rather than mere existence) so we can add + // options/flags in the future. I don't know if we ever want that, but it's + // nice to leave the option open. + return os.Getenv("FSNOTIFY_DEBUG") == "1" +}() + var defaultOpts = withOpts{ bufsize: 65536, // 64K + op: Create | Write | Remove | Rename | Chmod, } func getOptions(opts ...addOpt) withOpts { with := defaultOpts for _, o := range opts { - o(&with) + if o != nil { + o(&with) + } } return with } @@ -136,9 +449,44 @@ func WithBufferSize(bytes int) addOpt { return func(opt *withOpts) { opt.bufsize = bytes } } +// WithOps sets which operations to listen for. The default is [Create], +// [Write], [Remove], [Rename], and [Chmod]. +// +// Excluding operations you're not interested in can save quite a bit of CPU +// time; in some use cases there may be hundreds of thousands of useless Write +// or Chmod operations per second. +// +// This can also be used to add unportable operations not supported by all +// platforms; unportable operations all start with "Unportable": +// [UnportableOpen], [UnportableRead], [UnportableCloseWrite], and +// [UnportableCloseRead]. +// +// AddWith returns an error when using an unportable operation that's not +// supported. Use [Watcher.Support] to check for support. +func withOps(op Op) addOpt { + return func(opt *withOpts) { opt.op = op } +} + +// WithNoFollow disables following symlinks, so the symlinks themselves are +// watched. +func withNoFollow() addOpt { + return func(opt *withOpts) { opt.noFollow = true } +} + +// "Internal" option for recursive watches on inotify. +func withCreate() addOpt { + return func(opt *withOpts) { opt.sendCreate = true } +} + +var enableRecurse = false + // Check if this path is recursive (ends with "/..." or "\..."), and return the // path with the /... stripped. func recursivePath(path string) (string, bool) { + path = filepath.Clean(path) + if !enableRecurse { // Only enabled in tests for now. + return path, false + } if filepath.Base(path) == "..." { return filepath.Dir(path), true } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/darwin.go b/vendor/github.com/fsnotify/fsnotify/internal/darwin.go new file mode 100644 index 00000000000..b0eab10090d --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/darwin.go @@ -0,0 +1,39 @@ +//go:build darwin + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ +func SetRlimit() { + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = l.Cur + + if n, err := syscall.SysctlUint32("kern.maxfiles"); err == nil && uint64(n) < maxfiles { + maxfiles = uint64(n) + } + + if n, err := syscall.SysctlUint32("kern.maxfilesperproc"); err == nil && uint64(n) < maxfiles { + maxfiles = uint64(n) + } +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go new file mode 100644 index 00000000000..928319fb09a --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go @@ -0,0 +1,57 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ABSOLUTE", unix.NOTE_ABSOLUTE}, + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_BACKGROUND", unix.NOTE_BACKGROUND}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_CRITICAL", unix.NOTE_CRITICAL}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXITSTATUS", unix.NOTE_EXITSTATUS}, + {"NOTE_EXIT_CSERROR", unix.NOTE_EXIT_CSERROR}, + {"NOTE_EXIT_DECRYPTFAIL", unix.NOTE_EXIT_DECRYPTFAIL}, + {"NOTE_EXIT_DETAIL", unix.NOTE_EXIT_DETAIL}, + {"NOTE_EXIT_DETAIL_MASK", unix.NOTE_EXIT_DETAIL_MASK}, + {"NOTE_EXIT_MEMORY", unix.NOTE_EXIT_MEMORY}, + {"NOTE_EXIT_REPARENTED", unix.NOTE_EXIT_REPARENTED}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_FUNLOCK", unix.NOTE_FUNLOCK}, + {"NOTE_LEEWAY", unix.NOTE_LEEWAY}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_MACHTIME", unix.NOTE_MACHTIME}, + {"NOTE_MACH_CONTINUOUS_TIME", unix.NOTE_MACH_CONTINUOUS_TIME}, + {"NOTE_NONE", unix.NOTE_NONE}, + {"NOTE_NSECONDS", unix.NOTE_NSECONDS}, + {"NOTE_OOB", unix.NOTE_OOB}, + //{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, -0x100000 (?!) + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_REAP", unix.NOTE_REAP}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_SECONDS", unix.NOTE_SECONDS}, + {"NOTE_SIGNAL", unix.NOTE_SIGNAL}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_USECONDS", unix.NOTE_USECONDS}, + {"NOTE_VM_ERROR", unix.NOTE_VM_ERROR}, + {"NOTE_VM_PRESSURE", unix.NOTE_VM_PRESSURE}, + {"NOTE_VM_PRESSURE_SUDDEN_TERMINATE", unix.NOTE_VM_PRESSURE_SUDDEN_TERMINATE}, + {"NOTE_VM_PRESSURE_TERMINATE", unix.NOTE_VM_PRESSURE_TERMINATE}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go new file mode 100644 index 00000000000..3186b0c3491 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go @@ -0,0 +1,33 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_OOB", unix.NOTE_OOB}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go new file mode 100644 index 00000000000..f69fdb930f5 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go @@ -0,0 +1,42 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ABSTIME", unix.NOTE_ABSTIME}, + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_CLOSE", unix.NOTE_CLOSE}, + {"NOTE_CLOSE_WRITE", unix.NOTE_CLOSE_WRITE}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FILE_POLL", unix.NOTE_FILE_POLL}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_MSECONDS", unix.NOTE_MSECONDS}, + {"NOTE_NSECONDS", unix.NOTE_NSECONDS}, + {"NOTE_OPEN", unix.NOTE_OPEN}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_READ", unix.NOTE_READ}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_SECONDS", unix.NOTE_SECONDS}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_USECONDS", unix.NOTE_USECONDS}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go new file mode 100644 index 00000000000..607e683bd73 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go @@ -0,0 +1,32 @@ +//go:build freebsd || openbsd || netbsd || dragonfly || darwin + +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, kevent *unix.Kevent_t) { + mask := uint32(kevent.Fflags) + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-60s → %q\n", + time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go new file mode 100644 index 00000000000..35c734be431 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go @@ -0,0 +1,56 @@ +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, mask, cookie uint32) { + names := []struct { + n string + m uint32 + }{ + {"IN_ACCESS", unix.IN_ACCESS}, + {"IN_ATTRIB", unix.IN_ATTRIB}, + {"IN_CLOSE", unix.IN_CLOSE}, + {"IN_CLOSE_NOWRITE", unix.IN_CLOSE_NOWRITE}, + {"IN_CLOSE_WRITE", unix.IN_CLOSE_WRITE}, + {"IN_CREATE", unix.IN_CREATE}, + {"IN_DELETE", unix.IN_DELETE}, + {"IN_DELETE_SELF", unix.IN_DELETE_SELF}, + {"IN_IGNORED", unix.IN_IGNORED}, + {"IN_ISDIR", unix.IN_ISDIR}, + {"IN_MODIFY", unix.IN_MODIFY}, + {"IN_MOVE", unix.IN_MOVE}, + {"IN_MOVED_FROM", unix.IN_MOVED_FROM}, + {"IN_MOVED_TO", unix.IN_MOVED_TO}, + {"IN_MOVE_SELF", unix.IN_MOVE_SELF}, + {"IN_OPEN", unix.IN_OPEN}, + {"IN_Q_OVERFLOW", unix.IN_Q_OVERFLOW}, + {"IN_UNMOUNT", unix.IN_UNMOUNT}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + var c string + if cookie > 0 { + c = fmt.Sprintf("(cookie: %d) ", cookie) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-30s → %s%q\n", + time.Now().Format("15:04:05.000000000"), strings.Join(l, "|"), c, name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go new file mode 100644 index 00000000000..e5b3b6f6943 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go @@ -0,0 +1,25 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go new file mode 100644 index 00000000000..1dd455bc5a4 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go @@ -0,0 +1,28 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + // {"NOTE_CHANGE", unix.NOTE_CHANGE}, // Not on 386? + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EOF", unix.NOTE_EOF}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRUNCATE", unix.NOTE_TRUNCATE}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go new file mode 100644 index 00000000000..f1b2e73bd5b --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go @@ -0,0 +1,45 @@ +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, mask int32) { + names := []struct { + n string + m int32 + }{ + {"FILE_ACCESS", unix.FILE_ACCESS}, + {"FILE_MODIFIED", unix.FILE_MODIFIED}, + {"FILE_ATTRIB", unix.FILE_ATTRIB}, + {"FILE_TRUNC", unix.FILE_TRUNC}, + {"FILE_NOFOLLOW", unix.FILE_NOFOLLOW}, + {"FILE_DELETE", unix.FILE_DELETE}, + {"FILE_RENAME_TO", unix.FILE_RENAME_TO}, + {"FILE_RENAME_FROM", unix.FILE_RENAME_FROM}, + {"UNMOUNTED", unix.UNMOUNTED}, + {"MOUNTEDOVER", unix.MOUNTEDOVER}, + {"FILE_EXCEPTION", unix.FILE_EXCEPTION}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-30s → %q\n", + time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go new file mode 100644 index 00000000000..52bf4ce53b5 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go @@ -0,0 +1,40 @@ +package internal + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "golang.org/x/sys/windows" +) + +func Debug(name string, mask uint32) { + names := []struct { + n string + m uint32 + }{ + {"FILE_ACTION_ADDED", windows.FILE_ACTION_ADDED}, + {"FILE_ACTION_REMOVED", windows.FILE_ACTION_REMOVED}, + {"FILE_ACTION_MODIFIED", windows.FILE_ACTION_MODIFIED}, + {"FILE_ACTION_RENAMED_OLD_NAME", windows.FILE_ACTION_RENAMED_OLD_NAME}, + {"FILE_ACTION_RENAMED_NEW_NAME", windows.FILE_ACTION_RENAMED_NEW_NAME}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-65s → %q\n", + time.Now().Format("15:04:05.000000000"), strings.Join(l, " | "), filepath.ToSlash(name)) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go b/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go new file mode 100644 index 00000000000..547df1df84b --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go @@ -0,0 +1,31 @@ +//go:build freebsd + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +func SetRlimit() { + // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = uint64(l.Cur) +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, uint64(dev)) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/internal.go b/vendor/github.com/fsnotify/fsnotify/internal/internal.go new file mode 100644 index 00000000000..7daa45e19ee --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/internal.go @@ -0,0 +1,2 @@ +// Package internal contains some helpers. +package internal diff --git a/vendor/github.com/fsnotify/fsnotify/internal/unix.go b/vendor/github.com/fsnotify/fsnotify/internal/unix.go new file mode 100644 index 00000000000..30976ce9739 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/unix.go @@ -0,0 +1,31 @@ +//go:build !windows && !darwin && !freebsd + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +func SetRlimit() { + // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = uint64(l.Cur) +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/unix2.go b/vendor/github.com/fsnotify/fsnotify/internal/unix2.go new file mode 100644 index 00000000000..37dfeddc289 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/unix2.go @@ -0,0 +1,7 @@ +//go:build !windows + +package internal + +func HasPrivilegesForSymlink() bool { + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/windows.go b/vendor/github.com/fsnotify/fsnotify/internal/windows.go new file mode 100644 index 00000000000..a72c6495490 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/windows.go @@ -0,0 +1,41 @@ +//go:build windows + +package internal + +import ( + "errors" + + "golang.org/x/sys/windows" +) + +// Just a dummy. +var ( + SyscallEACCES = errors.New("dummy") + UnixEACCES = errors.New("dummy") +) + +func SetRlimit() {} +func Maxfiles() uint64 { return 1<<64 - 1 } +func Mkfifo(path string, mode uint32) error { return errors.New("no FIFOs on Windows") } +func Mknod(path string, mode uint32, dev int) error { return errors.New("no device nodes on Windows") } + +func HasPrivilegesForSymlink() bool { + var sid *windows.SID + err := windows.AllocateAndInitializeSid( + &windows.SECURITY_NT_AUTHORITY, + 2, + windows.SECURITY_BUILTIN_DOMAIN_RID, + windows.DOMAIN_ALIAS_RID_ADMINS, + 0, 0, 0, 0, 0, 0, + &sid) + if err != nil { + return false + } + defer windows.FreeSid(sid) + token := windows.Token(0) + member, err := token.IsMember(sid) + if err != nil { + return false + } + return member || token.IsElevated() +} diff --git a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh b/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh deleted file mode 100644 index 99012ae6539..00000000000 --- a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh +++ /dev/null @@ -1,259 +0,0 @@ -#!/usr/bin/env zsh -[ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1 -setopt err_exit no_unset pipefail extended_glob - -# Simple script to update the godoc comments on all watchers so you don't need -# to update the same comment 5 times. - -watcher=$(</tmp/x - print -r -- $cmt >>/tmp/x - tail -n+$(( end + 1 )) $file >>/tmp/x - mv /tmp/x $file - done -} - -set-cmt '^type Watcher struct ' $watcher -set-cmt '^func NewWatcher(' $new -set-cmt '^func NewBufferedWatcher(' $newbuffered -set-cmt '^func (w \*Watcher) Add(' $add -set-cmt '^func (w \*Watcher) AddWith(' $addwith -set-cmt '^func (w \*Watcher) Remove(' $remove -set-cmt '^func (w \*Watcher) Close(' $close -set-cmt '^func (w \*Watcher) WatchList(' $watchlist -set-cmt '^[[:space:]]*Events *chan Event$' $events -set-cmt '^[[:space:]]*Errors *chan error$' $errors diff --git a/vendor/github.com/fsnotify/fsnotify/system_bsd.go b/vendor/github.com/fsnotify/fsnotify/system_bsd.go index 4322b0b8855..f65e8fe3edc 100644 --- a/vendor/github.com/fsnotify/fsnotify/system_bsd.go +++ b/vendor/github.com/fsnotify/fsnotify/system_bsd.go @@ -1,5 +1,4 @@ //go:build freebsd || openbsd || netbsd || dragonfly -// +build freebsd openbsd netbsd dragonfly package fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/system_darwin.go b/vendor/github.com/fsnotify/fsnotify/system_darwin.go index 5da5ffa78fe..a29fc7aab62 100644 --- a/vendor/github.com/fsnotify/fsnotify/system_darwin.go +++ b/vendor/github.com/fsnotify/fsnotify/system_darwin.go @@ -1,5 +1,4 @@ //go:build darwin -// +build darwin package fsnotify diff --git a/vendor/github.com/go-kit/kit/LICENSE b/vendor/github.com/go-kit/kit/LICENSE deleted file mode 100644 index 9d83342acdc..00000000000 --- a/vendor/github.com/go-kit/kit/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Peter Bourgon - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/github.com/go-kit/kit/metrics/README.md b/vendor/github.com/go-kit/kit/metrics/README.md deleted file mode 100644 index 5aa791a750d..00000000000 --- a/vendor/github.com/go-kit/kit/metrics/README.md +++ /dev/null @@ -1,98 +0,0 @@ -# package metrics - -`package metrics` provides a set of uniform interfaces for service instrumentation. -It has - [counters](http://prometheus.io/docs/concepts/metric_types/#counter), - [gauges](http://prometheus.io/docs/concepts/metric_types/#gauge), and - [histograms](http://prometheus.io/docs/concepts/metric_types/#histogram), -and provides adapters to popular metrics packages, like - [expvar](https://golang.org/pkg/expvar), - [StatsD](https://github.com/etsy/statsd), and - [Prometheus](https://prometheus.io). - -## Rationale - -Code instrumentation is absolutely essential to achieve - [observability](https://speakerdeck.com/mattheath/observability-in-micro-service-architectures) - into a distributed system. -Metrics and instrumentation tools have coalesced around a few well-defined idioms. -`package metrics` provides a common, minimal interface those idioms for service authors. - -## Usage - -A simple counter, exported via expvar. - -```go -import ( - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/expvar" -) - -func main() { - var myCount metrics.Counter - myCount = expvar.NewCounter("my_count") - myCount.Add(1) -} -``` - -A histogram for request duration, - exported via a Prometheus summary with dynamically-computed quantiles. - -```go -import ( - "time" - - stdprometheus "github.com/prometheus/client_golang/prometheus" - - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/prometheus" -) - -func main() { - var dur metrics.Histogram = prometheus.NewSummaryFrom(stdprometheus.SummaryOpts{ - Namespace: "myservice", - Subsystem: "api", - Name: "request_duration_seconds", - Help: "Total time spent serving requests.", - }, []string{}) - // ... -} - -func handleRequest(dur metrics.Histogram) { - defer func(begin time.Time) { dur.Observe(time.Since(begin).Seconds()) }(time.Now()) - // handle request -} -``` - -A gauge for the number of goroutines currently running, exported via StatsD. - -```go -import ( - "context" - "net" - "os" - "runtime" - "time" - - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/statsd" -) - -func main() { - statsd := statsd.New("foo_svc.", log.NewNopLogger()) - report := time.NewTicker(5 * time.Second) - defer report.Stop() - go statsd.SendLoop(context.Background(), report.C, "tcp", "statsd.internal:8125") - goroutines := statsd.NewGauge("goroutine_count") - go exportGoroutines(goroutines) - // ... -} - -func exportGoroutines(g metrics.Gauge) { - for range time.Tick(time.Second) { - g.Set(float64(runtime.NumGoroutine())) - } -} -``` - -For more information, see [the package documentation](https://godoc.org/github.com/go-kit/kit/metrics). diff --git a/vendor/github.com/go-kit/kit/metrics/doc.go b/vendor/github.com/go-kit/kit/metrics/doc.go deleted file mode 100644 index 25cda4f7c81..00000000000 --- a/vendor/github.com/go-kit/kit/metrics/doc.go +++ /dev/null @@ -1,97 +0,0 @@ -// Package metrics provides a framework for application instrumentation. It's -// primarily designed to help you get started with good and robust -// instrumentation, and to help you migrate from a less-capable system like -// Graphite to a more-capable system like Prometheus. If your organization has -// already standardized on an instrumentation system like Prometheus, and has no -// plans to change, it may make sense to use that system's instrumentation -// library directly. -// -// This package provides three core metric abstractions (Counter, Gauge, and -// Histogram) and implementations for almost all common instrumentation -// backends. Each metric has an observation method (Add, Set, or Observe, -// respectively) used to record values, and a With method to "scope" the -// observation by various parameters. For example, you might have a Histogram to -// record request durations, parameterized by the method that's being called. -// -// var requestDuration metrics.Histogram -// // ... -// requestDuration.With("method", "MyMethod").Observe(time.Since(begin)) -// -// This allows a single high-level metrics object (requestDuration) to work with -// many code paths somewhat dynamically. The concept of With is fully supported -// in some backends like Prometheus, and not supported in other backends like -// Graphite. So, With may be a no-op, depending on the concrete implementation -// you choose. Please check the implementation to know for sure. For -// implementations that don't provide With, it's necessary to fully parameterize -// each metric in the metric name, e.g. -// -// // Statsd -// c := statsd.NewCounter("request_duration_MyMethod_200") -// c.Add(1) -// -// // Prometheus -// c := prometheus.NewCounter(stdprometheus.CounterOpts{ -// Name: "request_duration", -// ... -// }, []string{"method", "status_code"}) -// c.With("method", "MyMethod", "status_code", strconv.Itoa(code)).Add(1) -// -// Usage -// -// Metrics are dependencies, and should be passed to the components that need -// them in the same way you'd construct and pass a database handle, or reference -// to another component. Metrics should *not* be created in the global scope. -// Instead, instantiate metrics in your func main, using whichever concrete -// implementation is appropriate for your organization. -// -// latency := prometheus.NewSummaryFrom(stdprometheus.SummaryOpts{ -// Namespace: "myteam", -// Subsystem: "foosvc", -// Name: "request_latency_seconds", -// Help: "Incoming request latency in seconds.", -// }, []string{"method", "status_code"}) -// -// Write your components to take the metrics they will use as parameters to -// their constructors. Use the interface types, not the concrete types. That is, -// -// // NewAPI takes metrics.Histogram, not *prometheus.Summary -// func NewAPI(s Store, logger log.Logger, latency metrics.Histogram) *API { -// // ... -// } -// -// func (a *API) ServeFoo(w http.ResponseWriter, r *http.Request) { -// begin := time.Now() -// // ... -// a.latency.Observe(time.Since(begin).Seconds()) -// } -// -// Finally, pass the metrics as dependencies when building your object graph. -// This should happen in func main, not in the global scope. -// -// api := NewAPI(store, logger, latency) -// http.ListenAndServe("/", api) -// -// Note that metrics are "write-only" interfaces. -// -// Implementation details -// -// All metrics are safe for concurrent use. Considerable design influence has -// been taken from https://github.com/codahale/metrics and -// https://prometheus.io. -// -// Each telemetry system has different semantics for label values, push vs. -// pull, support for histograms, etc. These properties influence the design of -// their respective packages. This table attempts to summarize the key points of -// distinction. -// -// SYSTEM DIM COUNTERS GAUGES HISTOGRAMS -// dogstatsd n batch, push-aggregate batch, push-aggregate native, batch, push-each -// statsd 1 batch, push-aggregate batch, push-aggregate native, batch, push-each -// graphite 1 batch, push-aggregate batch, push-aggregate synthetic, batch, push-aggregate -// expvar 1 atomic atomic synthetic, batch, in-place expose -// influx n custom custom custom -// prometheus n native native native -// pcp 1 native native native -// cloudwatch n batch push-aggregate batch push-aggregate synthetic, batch, push-aggregate -// -package metrics diff --git a/vendor/github.com/go-kit/kit/metrics/expvar/expvar.go b/vendor/github.com/go-kit/kit/metrics/expvar/expvar.go deleted file mode 100644 index ce6f3b836a0..00000000000 --- a/vendor/github.com/go-kit/kit/metrics/expvar/expvar.go +++ /dev/null @@ -1,94 +0,0 @@ -// Package expvar provides expvar backends for metrics. -// Label values are not supported. -package expvar - -import ( - "expvar" - "sync" - - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/generic" -) - -// Counter implements the counter metric with an expvar float. -// Label values are not supported. -type Counter struct { - f *expvar.Float -} - -// NewCounter creates an expvar Float with the given name, and returns an object -// that implements the Counter interface. -func NewCounter(name string) *Counter { - return &Counter{ - f: expvar.NewFloat(name), - } -} - -// With is a no-op. -func (c *Counter) With(labelValues ...string) metrics.Counter { return c } - -// Add implements Counter. -func (c *Counter) Add(delta float64) { c.f.Add(delta) } - -// Gauge implements the gauge metric with an expvar float. -// Label values are not supported. -type Gauge struct { - f *expvar.Float -} - -// NewGauge creates an expvar Float with the given name, and returns an object -// that implements the Gauge interface. -func NewGauge(name string) *Gauge { - return &Gauge{ - f: expvar.NewFloat(name), - } -} - -// With is a no-op. -func (g *Gauge) With(labelValues ...string) metrics.Gauge { return g } - -// Set implements Gauge. -func (g *Gauge) Set(value float64) { g.f.Set(value) } - -// Add implements metrics.Gauge. -func (g *Gauge) Add(delta float64) { g.f.Add(delta) } - -// Histogram implements the histogram metric with a combination of the generic -// Histogram object and several expvar Floats, one for each of the 50th, 90th, -// 95th, and 99th quantiles of observed values, with the quantile attached to -// the name as a suffix. Label values are not supported. -type Histogram struct { - mtx sync.Mutex - h *generic.Histogram - p50 *expvar.Float - p90 *expvar.Float - p95 *expvar.Float - p99 *expvar.Float -} - -// NewHistogram returns a Histogram object with the given name and number of -// buckets in the underlying histogram object. 50 is a good default number of -// buckets. -func NewHistogram(name string, buckets int) *Histogram { - return &Histogram{ - h: generic.NewHistogram(name, buckets), - p50: expvar.NewFloat(name + ".p50"), - p90: expvar.NewFloat(name + ".p90"), - p95: expvar.NewFloat(name + ".p95"), - p99: expvar.NewFloat(name + ".p99"), - } -} - -// With is a no-op. -func (h *Histogram) With(labelValues ...string) metrics.Histogram { return h } - -// Observe implements Histogram. -func (h *Histogram) Observe(value float64) { - h.mtx.Lock() - defer h.mtx.Unlock() - h.h.Observe(value) - h.p50.Set(h.h.Quantile(0.50)) - h.p90.Set(h.h.Quantile(0.90)) - h.p95.Set(h.h.Quantile(0.95)) - h.p99.Set(h.h.Quantile(0.99)) -} diff --git a/vendor/github.com/go-kit/kit/metrics/generic/generic.go b/vendor/github.com/go-kit/kit/metrics/generic/generic.go deleted file mode 100644 index 39216c04f09..00000000000 --- a/vendor/github.com/go-kit/kit/metrics/generic/generic.go +++ /dev/null @@ -1,247 +0,0 @@ -// Package generic implements generic versions of each of the metric types. They -// can be embedded by other implementations, and converted to specific formats -// as necessary. -package generic - -import ( - "fmt" - "io" - "math" - "sync" - "sync/atomic" - - "github.com/VividCortex/gohistogram" - - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/internal/lv" -) - -// Counter is an in-memory implementation of a Counter. -type Counter struct { - bits uint64 // bits has to be the first word in order to be 64-aligned on 32-bit - Name string - lvs lv.LabelValues -} - -// NewCounter returns a new, usable Counter. -func NewCounter(name string) *Counter { - return &Counter{ - Name: name, - } -} - -// With implements Counter. -func (c *Counter) With(labelValues ...string) metrics.Counter { - return &Counter{ - Name: c.Name, - bits: atomic.LoadUint64(&c.bits), - lvs: c.lvs.With(labelValues...), - } -} - -// Add implements Counter. -func (c *Counter) Add(delta float64) { - for { - var ( - old = atomic.LoadUint64(&c.bits) - newf = math.Float64frombits(old) + delta - new = math.Float64bits(newf) - ) - if atomic.CompareAndSwapUint64(&c.bits, old, new) { - break - } - } -} - -// Value returns the current value of the counter. -func (c *Counter) Value() float64 { - return math.Float64frombits(atomic.LoadUint64(&c.bits)) -} - -// ValueReset returns the current value of the counter, and resets it to zero. -// This is useful for metrics backends whose counter aggregations expect deltas, -// like Graphite. -func (c *Counter) ValueReset() float64 { - for { - var ( - old = atomic.LoadUint64(&c.bits) - newf = 0.0 - new = math.Float64bits(newf) - ) - if atomic.CompareAndSwapUint64(&c.bits, old, new) { - return math.Float64frombits(old) - } - } -} - -// LabelValues returns the set of label values attached to the counter. -func (c *Counter) LabelValues() []string { - return c.lvs -} - -// Gauge is an in-memory implementation of a Gauge. -type Gauge struct { - bits uint64 // bits has to be the first word in order to be 64-aligned on 32-bit - Name string - lvs lv.LabelValues -} - -// NewGauge returns a new, usable Gauge. -func NewGauge(name string) *Gauge { - return &Gauge{ - Name: name, - } -} - -// With implements Gauge. -func (g *Gauge) With(labelValues ...string) metrics.Gauge { - return &Gauge{ - Name: g.Name, - bits: atomic.LoadUint64(&g.bits), - lvs: g.lvs.With(labelValues...), - } -} - -// Set implements Gauge. -func (g *Gauge) Set(value float64) { - atomic.StoreUint64(&g.bits, math.Float64bits(value)) -} - -// Add implements metrics.Gauge. -func (g *Gauge) Add(delta float64) { - for { - var ( - old = atomic.LoadUint64(&g.bits) - newf = math.Float64frombits(old) + delta - new = math.Float64bits(newf) - ) - if atomic.CompareAndSwapUint64(&g.bits, old, new) { - break - } - } -} - -// Value returns the current value of the gauge. -func (g *Gauge) Value() float64 { - return math.Float64frombits(atomic.LoadUint64(&g.bits)) -} - -// LabelValues returns the set of label values attached to the gauge. -func (g *Gauge) LabelValues() []string { - return g.lvs -} - -// Histogram is an in-memory implementation of a streaming histogram, based on -// VividCortex/gohistogram. It dynamically computes quantiles, so it's not -// suitable for aggregation. -type Histogram struct { - Name string - lvs lv.LabelValues - h *safeHistogram -} - -// NewHistogram returns a numeric histogram based on VividCortex/gohistogram. A -// good default value for buckets is 50. -func NewHistogram(name string, buckets int) *Histogram { - return &Histogram{ - Name: name, - h: &safeHistogram{Histogram: gohistogram.NewHistogram(buckets)}, - } -} - -// With implements Histogram. -func (h *Histogram) With(labelValues ...string) metrics.Histogram { - return &Histogram{ - Name: h.Name, - lvs: h.lvs.With(labelValues...), - h: h.h, - } -} - -// Observe implements Histogram. -func (h *Histogram) Observe(value float64) { - h.h.Lock() - defer h.h.Unlock() - h.h.Add(value) -} - -// Quantile returns the value of the quantile q, 0.0 < q < 1.0. -func (h *Histogram) Quantile(q float64) float64 { - h.h.RLock() - defer h.h.RUnlock() - return h.h.Quantile(q) -} - -// LabelValues returns the set of label values attached to the histogram. -func (h *Histogram) LabelValues() []string { - return h.lvs -} - -// Print writes a string representation of the histogram to the passed writer. -// Useful for printing to a terminal. -func (h *Histogram) Print(w io.Writer) { - h.h.RLock() - defer h.h.RUnlock() - fmt.Fprint(w, h.h.String()) -} - -// safeHistogram exists as gohistogram.Histogram is not goroutine-safe. -type safeHistogram struct { - sync.RWMutex - gohistogram.Histogram -} - -// Bucket is a range in a histogram which aggregates observations. -type Bucket struct { - From, To, Count int64 -} - -// Quantile is a pair of a quantile (0..100) and its observed maximum value. -type Quantile struct { - Quantile int // 0..100 - Value int64 -} - -// SimpleHistogram is an in-memory implementation of a Histogram. It only tracks -// an approximate moving average, so is likely too naïve for many use cases. -type SimpleHistogram struct { - mtx sync.RWMutex - lvs lv.LabelValues - avg float64 - n uint64 -} - -// NewSimpleHistogram returns a SimpleHistogram, ready for observations. -func NewSimpleHistogram() *SimpleHistogram { - return &SimpleHistogram{} -} - -// With implements Histogram. -func (h *SimpleHistogram) With(labelValues ...string) metrics.Histogram { - return &SimpleHistogram{ - lvs: h.lvs.With(labelValues...), - avg: h.avg, - n: h.n, - } -} - -// Observe implements Histogram. -func (h *SimpleHistogram) Observe(value float64) { - h.mtx.Lock() - defer h.mtx.Unlock() - h.n++ - h.avg -= h.avg / float64(h.n) - h.avg += value / float64(h.n) -} - -// ApproximateMovingAverage returns the approximate moving average of observations. -func (h *SimpleHistogram) ApproximateMovingAverage() float64 { - h.mtx.RLock() - defer h.mtx.RUnlock() - return h.avg -} - -// LabelValues returns the set of label values attached to the histogram. -func (h *SimpleHistogram) LabelValues() []string { - return h.lvs -} diff --git a/vendor/github.com/go-kit/kit/metrics/internal/lv/labelvalues.go b/vendor/github.com/go-kit/kit/metrics/internal/lv/labelvalues.go deleted file mode 100644 index 8bb1ba09414..00000000000 --- a/vendor/github.com/go-kit/kit/metrics/internal/lv/labelvalues.go +++ /dev/null @@ -1,14 +0,0 @@ -package lv - -// LabelValues is a type alias that provides validation on its With method. -// Metrics may include it as a member to help them satisfy With semantics and -// save some code duplication. -type LabelValues []string - -// With validates the input, and returns a new aggregate labelValues. -func (lvs LabelValues) With(labelValues ...string) LabelValues { - if len(labelValues)%2 != 0 { - labelValues = append(labelValues, "unknown") - } - return append(lvs, labelValues...) -} diff --git a/vendor/github.com/go-kit/kit/metrics/internal/lv/space.go b/vendor/github.com/go-kit/kit/metrics/internal/lv/space.go deleted file mode 100644 index 371964a35ae..00000000000 --- a/vendor/github.com/go-kit/kit/metrics/internal/lv/space.go +++ /dev/null @@ -1,145 +0,0 @@ -package lv - -import "sync" - -// NewSpace returns an N-dimensional vector space. -func NewSpace() *Space { - return &Space{} -} - -// Space represents an N-dimensional vector space. Each name and unique label -// value pair establishes a new dimension and point within that dimension. Order -// matters, i.e. [a=1 b=2] identifies a different timeseries than [b=2 a=1]. -type Space struct { - mtx sync.RWMutex - nodes map[string]*node -} - -// Observe locates the time series identified by the name and label values in -// the vector space, and appends the value to the list of observations. -func (s *Space) Observe(name string, lvs LabelValues, value float64) { - s.nodeFor(name).observe(lvs, value) -} - -// Add locates the time series identified by the name and label values in -// the vector space, and appends the delta to the last value in the list of -// observations. -func (s *Space) Add(name string, lvs LabelValues, delta float64) { - s.nodeFor(name).add(lvs, delta) -} - -// Walk traverses the vector space and invokes fn for each non-empty time series -// which is encountered. Return false to abort the traversal. -func (s *Space) Walk(fn func(name string, lvs LabelValues, observations []float64) bool) { - s.mtx.RLock() - defer s.mtx.RUnlock() - for name, node := range s.nodes { - f := func(lvs LabelValues, observations []float64) bool { return fn(name, lvs, observations) } - if !node.walk(LabelValues{}, f) { - return - } - } -} - -// Reset empties the current space and returns a new Space with the old -// contents. Reset a Space to get an immutable copy suitable for walking. -func (s *Space) Reset() *Space { - s.mtx.Lock() - defer s.mtx.Unlock() - n := NewSpace() - n.nodes, s.nodes = s.nodes, n.nodes - return n -} - -func (s *Space) nodeFor(name string) *node { - s.mtx.Lock() - defer s.mtx.Unlock() - if s.nodes == nil { - s.nodes = map[string]*node{} - } - n, ok := s.nodes[name] - if !ok { - n = &node{} - s.nodes[name] = n - } - return n -} - -// node exists at a specific point in the N-dimensional vector space of all -// possible label values. The node collects observations and has child nodes -// with greater specificity. -type node struct { - mtx sync.RWMutex - observations []float64 - children map[pair]*node -} - -type pair struct{ label, value string } - -func (n *node) observe(lvs LabelValues, value float64) { - n.mtx.Lock() - defer n.mtx.Unlock() - if len(lvs) <= 0 { - n.observations = append(n.observations, value) - return - } - if len(lvs) < 2 { - panic("too few LabelValues; programmer error!") - } - head, tail := pair{lvs[0], lvs[1]}, lvs[2:] - if n.children == nil { - n.children = map[pair]*node{} - } - child, ok := n.children[head] - if !ok { - child = &node{} - n.children[head] = child - } - child.observe(tail, value) -} - -func (n *node) add(lvs LabelValues, delta float64) { - n.mtx.Lock() - defer n.mtx.Unlock() - if len(lvs) <= 0 { - var value float64 - if len(n.observations) > 0 { - value = last(n.observations) + delta - } else { - value = delta - } - n.observations = append(n.observations, value) - return - } - if len(lvs) < 2 { - panic("too few LabelValues; programmer error!") - } - head, tail := pair{lvs[0], lvs[1]}, lvs[2:] - if n.children == nil { - n.children = map[pair]*node{} - } - child, ok := n.children[head] - if !ok { - child = &node{} - n.children[head] = child - } - child.add(tail, delta) -} - -func (n *node) walk(lvs LabelValues, fn func(LabelValues, []float64) bool) bool { - n.mtx.RLock() - defer n.mtx.RUnlock() - if len(n.observations) > 0 && !fn(lvs, n.observations) { - return false - } - for p, child := range n.children { - if !child.walk(append(lvs, p.label, p.value), fn) { - return false - } - } - return true -} - -func last(a []float64) float64 { - return a[len(a)-1] -} diff --git a/vendor/github.com/go-kit/kit/metrics/metrics.go b/vendor/github.com/go-kit/kit/metrics/metrics.go deleted file mode 100644 index a7ba1b1fe3f..00000000000 --- a/vendor/github.com/go-kit/kit/metrics/metrics.go +++ /dev/null @@ -1,25 +0,0 @@ -package metrics - -// Counter describes a metric that accumulates values monotonically. -// An example of a counter is the number of received HTTP requests. -type Counter interface { - With(labelValues ...string) Counter - Add(delta float64) -} - -// Gauge describes a metric that takes specific values over time. -// An example of a gauge is the current depth of a job queue. -type Gauge interface { - With(labelValues ...string) Gauge - Set(value float64) - Add(delta float64) -} - -// Histogram describes a metric that takes repeated observations of the same -// kind of thing, and produces a statistical summary of those observations, -// typically expressed as quantiles or buckets. An example of a histogram is -// HTTP request latencies. -type Histogram interface { - With(labelValues ...string) Histogram - Observe(value float64) -} diff --git a/vendor/github.com/go-kit/kit/metrics/timer.go b/vendor/github.com/go-kit/kit/metrics/timer.go deleted file mode 100644 index e12d9cd5c49..00000000000 --- a/vendor/github.com/go-kit/kit/metrics/timer.go +++ /dev/null @@ -1,36 +0,0 @@ -package metrics - -import "time" - -// Timer acts as a stopwatch, sending observations to a wrapped histogram. -// It's a bit of helpful syntax sugar for h.Observe(time.Since(x)). -type Timer struct { - h Histogram - t time.Time - u time.Duration -} - -// NewTimer wraps the given histogram and records the current time. -func NewTimer(h Histogram) *Timer { - return &Timer{ - h: h, - t: time.Now(), - u: time.Second, - } -} - -// ObserveDuration captures the number of seconds since the timer was -// constructed, and forwards that observation to the histogram. -func (t *Timer) ObserveDuration() { - d := float64(time.Since(t.t).Nanoseconds()) / float64(t.u) - if d < 0 { - d = 0 - } - t.h.Observe(d) -} - -// Unit sets the unit of the float64 emitted by the timer. -// By default, the timer emits seconds. -func (t *Timer) Unit(u time.Duration) { - t.u = u -} diff --git a/vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md b/vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md index ae634d1cc08..afd44e5f5fc 100644 --- a/vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md +++ b/vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md @@ -1,3 +1,6 @@ +> [!WARNING] +> As of v2 of this library, change log can be found in GitHub releases. + ## 1.5.1 * Wrap errors so they're compatible with `errors.Is` and `errors.As` [GH-282] diff --git a/vendor/github.com/go-viper/mapstructure/v2/README.md b/vendor/github.com/go-viper/mapstructure/v2/README.md index 2b28db89489..dd5ec69ddf7 100644 --- a/vendor/github.com/go-viper/mapstructure/v2/README.md +++ b/vendor/github.com/go-viper/mapstructure/v2/README.md @@ -19,6 +19,27 @@ structure. go get github.com/go-viper/mapstructure/v2 ``` +## Migrating from `github.com/mitchellh/mapstructure` + +[@mitchehllh](https://github.com/mitchellh) announced his intent to archive some of his unmaintained projects (see [here](https://gist.github.com/mitchellh/90029601268e59a29e64e55bab1c5bdc) and [here](https://github.com/mitchellh/mapstructure/issues/349)). This is a repository achieved the "blessed fork" status. + +You can migrate to this package by changing your import paths in your Go files to `github.com/go-viper/mapstructure/v2`. +The API is the same, so you don't need to change anything else. + +Here is a script that can help you with the migration: + +```shell +sed -i 's/github.com\/mitchellh\/mapstructure/github.com\/go-viper\/mapstructure\/v2/g' $(find . -type f -name '*.go') +``` + +If you need more time to migrate your code, that is absolutely fine. + +Some of the latest fixes are backported to the v1 release branch of this package, so you can use the Go modules `replace` feature until you are ready to migrate: + +```shell +replace github.com/mitchellh/mapstructure => github.com/go-viper/mapstructure v1.6.0 +``` + ## Usage & Example For usage and examples see the [documentation](https://pkg.go.dev/mod/github.com/go-viper/mapstructure/v2). diff --git a/vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go b/vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go index 840d6adce0e..1f3c69d4b8c 100644 --- a/vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go +++ b/vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go @@ -6,6 +6,7 @@ import ( "fmt" "net" "net/netip" + "net/url" "reflect" "strconv" "strings" @@ -36,6 +37,30 @@ func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { return nil } +// cachedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns +// it into a closure to be used directly +// if the type fails to convert we return a closure always erroring to keep the previous behaviour +func cachedDecodeHook(raw DecodeHookFunc) func(from reflect.Value, to reflect.Value) (interface{}, error) { + switch f := typedDecodeHook(raw).(type) { + case DecodeHookFuncType: + return func(from reflect.Value, to reflect.Value) (interface{}, error) { + return f(from.Type(), to.Type(), from.Interface()) + } + case DecodeHookFuncKind: + return func(from reflect.Value, to reflect.Value) (interface{}, error) { + return f(from.Kind(), to.Kind(), from.Interface()) + } + case DecodeHookFuncValue: + return func(from reflect.Value, to reflect.Value) (interface{}, error) { + return f(from, to) + } + default: + return func(from reflect.Value, to reflect.Value) (interface{}, error) { + return nil, errors.New("invalid decode hook signature") + } + } +} + // DecodeHookExec executes the given decode hook. This should be used // since it'll naturally degrade to the older backwards compatible DecodeHookFunc // that took reflect.Kind instead of reflect.Type. @@ -61,13 +86,17 @@ func DecodeHookExec( // The composed funcs are called in order, with the result of the // previous transformation. func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { + cached := make([]func(from reflect.Value, to reflect.Value) (interface{}, error), 0, len(fs)) + for _, f := range fs { + cached = append(cached, cachedDecodeHook(f)) + } return func(f reflect.Value, t reflect.Value) (interface{}, error) { var err error data := f.Interface() newFrom := f - for _, f1 := range fs { - data, err = DecodeHookExec(f1, newFrom, t) + for _, c := range cached { + data, err = c(newFrom, t) if err != nil { return nil, err } @@ -81,13 +110,17 @@ func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { // OrComposeDecodeHookFunc executes all input hook functions until one of them returns no error. In that case its value is returned. // If all hooks return an error, OrComposeDecodeHookFunc returns an error concatenating all error messages. func OrComposeDecodeHookFunc(ff ...DecodeHookFunc) DecodeHookFunc { + cached := make([]func(from reflect.Value, to reflect.Value) (interface{}, error), 0, len(ff)) + for _, f := range ff { + cached = append(cached, cachedDecodeHook(f)) + } return func(a, b reflect.Value) (interface{}, error) { var allErrs string var out interface{} var err error - for _, f := range ff { - out, err = DecodeHookExec(f, a, b) + for _, c := range cached { + out, err = c(a, b) if err != nil { allErrs += err.Error() + "\n" continue @@ -144,6 +177,26 @@ func StringToTimeDurationHookFunc() DecodeHookFunc { } } +// StringToURLHookFunc returns a DecodeHookFunc that converts +// strings to *url.URL. +func StringToURLHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(&url.URL{}) { + return data, nil + } + + // Convert it by parsing + return url.Parse(data.(string)) + } +} + // StringToIPHookFunc returns a DecodeHookFunc that converts // strings to net.IP func StringToIPHookFunc() DecodeHookFunc { @@ -332,3 +385,246 @@ func StringToNetIPAddrPortHookFunc() DecodeHookFunc { return netip.ParseAddrPort(data.(string)) } } + +// StringToBasicTypeHookFunc returns a DecodeHookFunc that converts +// strings to basic types. +// int8, uint8, int16, uint16, int32, uint32, int64, uint64, int, uint, float32, float64, bool, byte, rune, complex64, complex128 +func StringToBasicTypeHookFunc() DecodeHookFunc { + return ComposeDecodeHookFunc( + StringToInt8HookFunc(), + StringToUint8HookFunc(), + StringToInt16HookFunc(), + StringToUint16HookFunc(), + StringToInt32HookFunc(), + StringToUint32HookFunc(), + StringToInt64HookFunc(), + StringToUint64HookFunc(), + StringToIntHookFunc(), + StringToUintHookFunc(), + StringToFloat32HookFunc(), + StringToFloat64HookFunc(), + StringToBoolHookFunc(), + // byte and rune are aliases for uint8 and int32 respectively + // StringToByteHookFunc(), + // StringToRuneHookFunc(), + StringToComplex64HookFunc(), + StringToComplex128HookFunc(), + ) +} + +// StringToInt8HookFunc returns a DecodeHookFunc that converts +// strings to int8. +func StringToInt8HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Int8 { + return data, nil + } + + // Convert it by parsing + i64, err := strconv.ParseInt(data.(string), 0, 8) + return int8(i64), err + } +} + +// StringToUint8HookFunc returns a DecodeHookFunc that converts +// strings to uint8. +func StringToUint8HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Uint8 { + return data, nil + } + + // Convert it by parsing + u64, err := strconv.ParseUint(data.(string), 0, 8) + return uint8(u64), err + } +} + +// StringToInt16HookFunc returns a DecodeHookFunc that converts +// strings to int16. +func StringToInt16HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Int16 { + return data, nil + } + + // Convert it by parsing + i64, err := strconv.ParseInt(data.(string), 0, 16) + return int16(i64), err + } +} + +// StringToUint16HookFunc returns a DecodeHookFunc that converts +// strings to uint16. +func StringToUint16HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Uint16 { + return data, nil + } + + // Convert it by parsing + u64, err := strconv.ParseUint(data.(string), 0, 16) + return uint16(u64), err + } +} + +// StringToInt32HookFunc returns a DecodeHookFunc that converts +// strings to int32. +func StringToInt32HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Int32 { + return data, nil + } + + // Convert it by parsing + i64, err := strconv.ParseInt(data.(string), 0, 32) + return int32(i64), err + } +} + +// StringToUint32HookFunc returns a DecodeHookFunc that converts +// strings to uint32. +func StringToUint32HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Uint32 { + return data, nil + } + + // Convert it by parsing + u64, err := strconv.ParseUint(data.(string), 0, 32) + return uint32(u64), err + } +} + +// StringToInt64HookFunc returns a DecodeHookFunc that converts +// strings to int64. +func StringToInt64HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Int64 { + return data, nil + } + + // Convert it by parsing + return strconv.ParseInt(data.(string), 0, 64) + } +} + +// StringToUint64HookFunc returns a DecodeHookFunc that converts +// strings to uint64. +func StringToUint64HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Uint64 { + return data, nil + } + + // Convert it by parsing + return strconv.ParseUint(data.(string), 0, 64) + } +} + +// StringToIntHookFunc returns a DecodeHookFunc that converts +// strings to int. +func StringToIntHookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Int { + return data, nil + } + + // Convert it by parsing + i64, err := strconv.ParseInt(data.(string), 0, 0) + return int(i64), err + } +} + +// StringToUintHookFunc returns a DecodeHookFunc that converts +// strings to uint. +func StringToUintHookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Uint { + return data, nil + } + + // Convert it by parsing + u64, err := strconv.ParseUint(data.(string), 0, 0) + return uint(u64), err + } +} + +// StringToFloat32HookFunc returns a DecodeHookFunc that converts +// strings to float32. +func StringToFloat32HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Float32 { + return data, nil + } + + // Convert it by parsing + f64, err := strconv.ParseFloat(data.(string), 32) + return float32(f64), err + } +} + +// StringToFloat64HookFunc returns a DecodeHookFunc that converts +// strings to float64. +func StringToFloat64HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Float64 { + return data, nil + } + + // Convert it by parsing + return strconv.ParseFloat(data.(string), 64) + } +} + +// StringToBoolHookFunc returns a DecodeHookFunc that converts +// strings to bool. +func StringToBoolHookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Bool { + return data, nil + } + + // Convert it by parsing + return strconv.ParseBool(data.(string)) + } +} + +// StringToByteHookFunc returns a DecodeHookFunc that converts +// strings to byte. +func StringToByteHookFunc() DecodeHookFunc { + return StringToUint8HookFunc() +} + +// StringToRuneHookFunc returns a DecodeHookFunc that converts +// strings to rune. +func StringToRuneHookFunc() DecodeHookFunc { + return StringToInt32HookFunc() +} + +// StringToComplex64HookFunc returns a DecodeHookFunc that converts +// strings to complex64. +func StringToComplex64HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Complex64 { + return data, nil + } + + // Convert it by parsing + c128, err := strconv.ParseComplex(data.(string), 64) + return complex64(c128), err + } +} + +// StringToComplex128HookFunc returns a DecodeHookFunc that converts +// strings to complex128. +func StringToComplex128HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Complex128 { + return data, nil + } + + // Convert it by parsing + return strconv.ParseComplex(data.(string), 128) + } +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/error.go b/vendor/github.com/go-viper/mapstructure/v2/error.go deleted file mode 100644 index 47a99e5af3f..00000000000 --- a/vendor/github.com/go-viper/mapstructure/v2/error.go +++ /dev/null @@ -1,50 +0,0 @@ -package mapstructure - -import ( - "errors" - "fmt" - "sort" - "strings" -) - -// Error implements the error interface and can represents multiple -// errors that occur in the course of a single decode. -type Error struct { - Errors []string -} - -func (e *Error) Error() string { - points := make([]string, len(e.Errors)) - for i, err := range e.Errors { - points[i] = fmt.Sprintf("* %s", err) - } - - sort.Strings(points) - return fmt.Sprintf( - "%d error(s) decoding:\n\n%s", - len(e.Errors), strings.Join(points, "\n")) -} - -// WrappedErrors implements the errwrap.Wrapper interface to make this -// return value more useful with the errwrap and go-multierror libraries. -func (e *Error) WrappedErrors() []error { - if e == nil { - return nil - } - - result := make([]error, len(e.Errors)) - for i, e := range e.Errors { - result[i] = errors.New(e) - } - - return result -} - -func appendErrors(errors []string, err error) []string { - switch e := err.(type) { - case *Error: - return append(errors, e.Errors...) - default: - return append(errors, e.Error()) - } -} diff --git a/vendor/github.com/go-viper/mapstructure/v2/flake.lock b/vendor/github.com/go-viper/mapstructure/v2/flake.lock index 5a387d32994..4bea8154e04 100644 --- a/vendor/github.com/go-viper/mapstructure/v2/flake.lock +++ b/vendor/github.com/go-viper/mapstructure/v2/flake.lock @@ -1,22 +1,84 @@ { "nodes": { + "cachix": { + "inputs": { + "devenv": "devenv_2", + "flake-compat": [ + "devenv", + "flake-compat" + ], + "nixpkgs": [ + "devenv", + "nixpkgs" + ], + "pre-commit-hooks": [ + "devenv", + "pre-commit-hooks" + ] + }, + "locked": { + "lastModified": 1712055811, + "narHash": "sha256-7FcfMm5A/f02yyzuavJe06zLa9hcMHsagE28ADcmQvk=", + "owner": "cachix", + "repo": "cachix", + "rev": "02e38da89851ec7fec3356a5c04bc8349cae0e30", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "cachix", + "type": "github" + } + }, "devenv": { "inputs": { - "flake-compat": "flake-compat", + "cachix": "cachix", + "flake-compat": "flake-compat_2", + "nix": "nix_2", + "nixpkgs": "nixpkgs_2", + "pre-commit-hooks": "pre-commit-hooks" + }, + "locked": { + "lastModified": 1717245169, + "narHash": "sha256-+mW3rTBjGU8p1THJN0lX/Dd/8FbnF+3dB+mJuSaxewE=", + "owner": "cachix", + "repo": "devenv", + "rev": "c3f9f053c077c6f88a3de5276d9178c62baa3fc3", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "devenv", + "type": "github" + } + }, + "devenv_2": { + "inputs": { + "flake-compat": [ + "devenv", + "cachix", + "flake-compat" + ], "nix": "nix", "nixpkgs": "nixpkgs", - "pre-commit-hooks": "pre-commit-hooks" + "poetry2nix": "poetry2nix", + "pre-commit-hooks": [ + "devenv", + "cachix", + "pre-commit-hooks" + ] }, "locked": { - "lastModified": 1702549996, - "narHash": "sha256-mEN+8gjWUXRxBCcixeth+jlDNuzxbpFwZNOEc4K22vw=", + "lastModified": 1708704632, + "narHash": "sha256-w+dOIW60FKMaHI1q5714CSibk99JfYxm0CzTinYWr+Q=", "owner": "cachix", "repo": "devenv", - "rev": "e681a99ffe2d2882f413a5d771129223c838ddce", + "rev": "2ee4450b0f4b95a1b90f2eb5ffea98b90e48c196", "type": "github" }, "original": { "owner": "cachix", + "ref": "python-rewrite", "repo": "devenv", "type": "github" } @@ -37,16 +99,32 @@ "type": "github" } }, + "flake-compat_2": { + "flake": false, + "locked": { + "lastModified": 1696426674, + "narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=", + "owner": "edolstra", + "repo": "flake-compat", + "rev": "0f9255e01c2351cc7d116c072cb317785dd33b33", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, "flake-parts": { "inputs": { "nixpkgs-lib": "nixpkgs-lib" }, "locked": { - "lastModified": 1701473968, - "narHash": "sha256-YcVE5emp1qQ8ieHUnxt1wCZCC3ZfAS+SRRWZ2TMda7E=", + "lastModified": 1717285511, + "narHash": "sha256-iKzJcpdXih14qYVcZ9QC9XuZYnPc6T8YImb6dX166kw=", "owner": "hercules-ci", "repo": "flake-parts", - "rev": "34fed993f1674c8d06d58b37ce1e0fe5eebcb9f5", + "rev": "2a55567fcf15b1b1c7ed712a2c6fadaec7412ea8", "type": "github" }, "original": { @@ -60,11 +138,29 @@ "systems": "systems" }, "locked": { - "lastModified": 1685518550, - "narHash": "sha256-o2d0KcvaXzTrPRIo0kOLV0/QXHhDQ5DTi+OxcjO8xqY=", + "lastModified": 1689068808, + "narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "flake-utils_2": { + "inputs": { + "systems": "systems_2" + }, + "locked": { + "lastModified": 1710146030, + "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", "owner": "numtide", "repo": "flake-utils", - "rev": "a1720a10a6cfe8234c0e93907ffe81be440f4cef", + "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", "type": "github" }, "original": { @@ -82,11 +178,11 @@ ] }, "locked": { - "lastModified": 1660459072, - "narHash": "sha256-8DFJjXG8zqoONA1vXtgeKXy68KdJL5UaXR8NtVMUbx8=", + "lastModified": 1709087332, + "narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=", "owner": "hercules-ci", "repo": "gitignore.nix", - "rev": "a20de23b925fd8264fd7fad6454652e142fd7f73", + "rev": "637db329424fd7e46cf4185293b9cc8c88c95394", "type": "github" }, "original": { @@ -95,53 +191,90 @@ "type": "github" } }, - "lowdown-src": { - "flake": false, + "nix": { + "inputs": { + "flake-compat": "flake-compat", + "nixpkgs": [ + "devenv", + "cachix", + "devenv", + "nixpkgs" + ], + "nixpkgs-regression": "nixpkgs-regression" + }, "locked": { - "lastModified": 1633514407, - "narHash": "sha256-Dw32tiMjdK9t3ETl5fzGrutQTzh2rufgZV4A/BbxuD4=", - "owner": "kristapsdz", - "repo": "lowdown", - "rev": "d2c2b44ff6c27b936ec27358a2653caaef8f73b8", + "lastModified": 1712911606, + "narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=", + "owner": "domenkozar", + "repo": "nix", + "rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12", "type": "github" }, "original": { - "owner": "kristapsdz", - "repo": "lowdown", + "owner": "domenkozar", + "ref": "devenv-2.21", + "repo": "nix", "type": "github" } }, - "nix": { + "nix-github-actions": { + "inputs": { + "nixpkgs": [ + "devenv", + "cachix", + "devenv", + "poetry2nix", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1688870561, + "narHash": "sha256-4UYkifnPEw1nAzqqPOTL2MvWtm3sNGw1UTYTalkTcGY=", + "owner": "nix-community", + "repo": "nix-github-actions", + "rev": "165b1650b753316aa7f1787f3005a8d2da0f5301", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "nix-github-actions", + "type": "github" + } + }, + "nix_2": { "inputs": { - "lowdown-src": "lowdown-src", + "flake-compat": [ + "devenv", + "flake-compat" + ], "nixpkgs": [ "devenv", "nixpkgs" ], - "nixpkgs-regression": "nixpkgs-regression" + "nixpkgs-regression": "nixpkgs-regression_2" }, "locked": { - "lastModified": 1676545802, - "narHash": "sha256-EK4rZ+Hd5hsvXnzSzk2ikhStJnD63odF7SzsQ8CuSPU=", + "lastModified": 1712911606, + "narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=", "owner": "domenkozar", "repo": "nix", - "rev": "7c91803598ffbcfe4a55c44ac6d49b2cf07a527f", + "rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12", "type": "github" }, "original": { "owner": "domenkozar", - "ref": "relaxed-flakes", + "ref": "devenv-2.21", "repo": "nix", "type": "github" } }, "nixpkgs": { "locked": { - "lastModified": 1678875422, - "narHash": "sha256-T3o6NcQPwXjxJMn2shz86Chch4ljXgZn746c2caGxd8=", + "lastModified": 1692808169, + "narHash": "sha256-x9Opq06rIiwdwGeK2Ykj69dNc2IvUH1fY55Wm7atwrE=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "126f49a01de5b7e35a43fd43f891ecf6d3a51459", + "rev": "9201b5ff357e781bf014d0330d18555695df7ba8", "type": "github" }, "original": { @@ -153,23 +286,33 @@ }, "nixpkgs-lib": { "locked": { - "dir": "lib", - "lastModified": 1701253981, - "narHash": "sha256-ztaDIyZ7HrTAfEEUt9AtTDNoCYxUdSd6NrRHaYOIxtk=", + "lastModified": 1717284937, + "narHash": "sha256-lIbdfCsf8LMFloheeE6N31+BMIeixqyQWbSr2vk79EQ=", + "type": "tarball", + "url": "https://github.com/NixOS/nixpkgs/archive/eb9ceca17df2ea50a250b6b27f7bf6ab0186f198.tar.gz" + }, + "original": { + "type": "tarball", + "url": "https://github.com/NixOS/nixpkgs/archive/eb9ceca17df2ea50a250b6b27f7bf6ab0186f198.tar.gz" + } + }, + "nixpkgs-regression": { + "locked": { + "lastModified": 1643052045, + "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "e92039b55bcd58469325ded85d4f58dd5a4eaf58", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", "type": "github" }, "original": { - "dir": "lib", "owner": "NixOS", - "ref": "nixos-unstable", "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", "type": "github" } }, - "nixpkgs-regression": { + "nixpkgs-regression_2": { "locked": { "lastModified": 1643052045, "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", @@ -187,27 +330,43 @@ }, "nixpkgs-stable": { "locked": { - "lastModified": 1685801374, - "narHash": "sha256-otaSUoFEMM+LjBI1XL/xGB5ao6IwnZOXc47qhIgJe8U=", + "lastModified": 1710695816, + "narHash": "sha256-3Eh7fhEID17pv9ZxrPwCLfqXnYP006RKzSs0JptsN84=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "c37ca420157f4abc31e26f436c1145f8951ff373", + "rev": "614b4613980a522ba49f0d194531beddbb7220d3", "type": "github" }, "original": { "owner": "NixOS", - "ref": "nixos-23.05", + "ref": "nixos-23.11", "repo": "nixpkgs", "type": "github" } }, "nixpkgs_2": { "locked": { - "lastModified": 1702539185, - "narHash": "sha256-KnIRG5NMdLIpEkZTnN5zovNYc0hhXjAgv6pfd5Z4c7U=", + "lastModified": 1713361204, + "narHash": "sha256-TA6EDunWTkc5FvDCqU3W2T3SFn0gRZqh6D/hJnM02MM=", + "owner": "cachix", + "repo": "devenv-nixpkgs", + "rev": "285676e87ad9f0ca23d8714a6ab61e7e027020c6", + "type": "github" + }, + "original": { + "owner": "cachix", + "ref": "rolling", + "repo": "devenv-nixpkgs", + "type": "github" + } + }, + "nixpkgs_3": { + "locked": { + "lastModified": 1717112898, + "narHash": "sha256-7R2ZvOnvd9h8fDd65p0JnB7wXfUvreox3xFdYWd1BnY=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "aa9d4729cbc99dabacb50e3994dcefb3ea0f7447", + "rev": "6132b0f6e344ce2fe34fc051b72fb46e34f668e0", "type": "github" }, "original": { @@ -217,13 +376,38 @@ "type": "github" } }, + "poetry2nix": { + "inputs": { + "flake-utils": "flake-utils", + "nix-github-actions": "nix-github-actions", + "nixpkgs": [ + "devenv", + "cachix", + "devenv", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1692876271, + "narHash": "sha256-IXfZEkI0Mal5y1jr6IRWMqK8GW2/f28xJenZIPQqkY0=", + "owner": "nix-community", + "repo": "poetry2nix", + "rev": "d5006be9c2c2417dafb2e2e5034d83fabd207ee3", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "poetry2nix", + "type": "github" + } + }, "pre-commit-hooks": { "inputs": { "flake-compat": [ "devenv", "flake-compat" ], - "flake-utils": "flake-utils", + "flake-utils": "flake-utils_2", "gitignore": "gitignore", "nixpkgs": [ "devenv", @@ -232,11 +416,11 @@ "nixpkgs-stable": "nixpkgs-stable" }, "locked": { - "lastModified": 1688056373, - "narHash": "sha256-2+SDlNRTKsgo3LBRiMUcoEUb6sDViRNQhzJquZ4koOI=", + "lastModified": 1713775815, + "narHash": "sha256-Wu9cdYTnGQQwtT20QQMg7jzkANKQjwBD9iccfGKkfls=", "owner": "cachix", "repo": "pre-commit-hooks.nix", - "rev": "5843cf069272d92b60c3ed9e55b7a8989c01d4c7", + "rev": "2ac4dcbf55ed43f3be0bae15e181f08a57af24a4", "type": "github" }, "original": { @@ -249,7 +433,7 @@ "inputs": { "devenv": "devenv", "flake-parts": "flake-parts", - "nixpkgs": "nixpkgs_2" + "nixpkgs": "nixpkgs_3" } }, "systems": { @@ -266,6 +450,21 @@ "repo": "default", "type": "github" } + }, + "systems_2": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } } }, "root": "root", diff --git a/vendor/github.com/go-viper/mapstructure/v2/internal/errors/errors.go b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/errors.go new file mode 100644 index 00000000000..d1c15e474f4 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/errors.go @@ -0,0 +1,11 @@ +package errors + +import "errors" + +func New(text string) error { + return errors.New(text) +} + +func As(err error, target interface{}) bool { + return errors.As(err, target) +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join.go b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join.go new file mode 100644 index 00000000000..d74e3a0b5a4 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join.go @@ -0,0 +1,9 @@ +//go:build go1.20 + +package errors + +import "errors" + +func Join(errs ...error) error { + return errors.Join(errs...) +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join_go1_19.go b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join_go1_19.go new file mode 100644 index 00000000000..700b40229cb --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join_go1_19.go @@ -0,0 +1,61 @@ +//go:build !go1.20 + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package errors + +// Join returns an error that wraps the given errors. +// Any nil error values are discarded. +// Join returns nil if every value in errs is nil. +// The error formats as the concatenation of the strings obtained +// by calling the Error method of each element of errs, with a newline +// between each string. +// +// A non-nil error returned by Join implements the Unwrap() []error method. +func Join(errs ...error) error { + n := 0 + for _, err := range errs { + if err != nil { + n++ + } + } + if n == 0 { + return nil + } + e := &joinError{ + errs: make([]error, 0, n), + } + for _, err := range errs { + if err != nil { + e.errs = append(e.errs, err) + } + } + return e +} + +type joinError struct { + errs []error +} + +func (e *joinError) Error() string { + // Since Join returns nil if every value in errs is nil, + // e.errs cannot be empty. + if len(e.errs) == 1 { + return e.errs[0].Error() + } + + b := []byte(e.errs[0].Error()) + for _, err := range e.errs[1:] { + b = append(b, '\n') + b = append(b, err.Error()...) + } + // At this point, b has at least one byte '\n'. + // return unsafe.String(&b[0], len(b)) + return string(b) +} + +func (e *joinError) Unwrap() []error { + return e.errs +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go b/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go index 27f21bc7219..e77e63ba383 100644 --- a/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go +++ b/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go @@ -160,12 +160,13 @@ package mapstructure import ( "encoding/json" - "errors" "fmt" "reflect" "sort" "strconv" "strings" + + "github.com/go-viper/mapstructure/v2/internal/errors" ) // DecodeHookFunc is the callback function that can be used for @@ -265,6 +266,10 @@ type DecoderConfig struct { // defaults to "mapstructure" TagName string + // The option of the value in the tag that indicates a field should + // be squashed. This defaults to "squash". + SquashTagOption string + // IgnoreUntaggedFields ignores all struct fields without explicit // TagName, comparable to `mapstructure:"-"` as default behaviour. IgnoreUntaggedFields bool @@ -273,6 +278,10 @@ type DecoderConfig struct { // field name or tag. Defaults to `strings.EqualFold`. This can be used // to implement case-sensitive tag values, support snake casing, etc. MatchName func(mapKey, fieldName string) bool + + // DecodeNil, if set to true, will cause the DecodeHook (if present) to run + // even if the input is nil. This can be used to provide default values. + DecodeNil bool } // A Decoder takes a raw interface value and turns it into structured @@ -282,7 +291,8 @@ type DecoderConfig struct { // structure. The top-level Decode method is just a convenience that sets // up the most basic Decoder. type Decoder struct { - config *DecoderConfig + config *DecoderConfig + cachedDecodeHook func(from reflect.Value, to reflect.Value) (interface{}, error) } // Metadata contains information about decoding a structure that @@ -400,6 +410,10 @@ func NewDecoder(config *DecoderConfig) (*Decoder, error) { config.TagName = "mapstructure" } + if config.SquashTagOption == "" { + config.SquashTagOption = "squash" + } + if config.MatchName == nil { config.MatchName = strings.EqualFold } @@ -407,6 +421,9 @@ func NewDecoder(config *DecoderConfig) (*Decoder, error) { result := &Decoder{ config: config, } + if config.DecodeHook != nil { + result.cachedDecodeHook = cachedDecodeHook(config.DecodeHook) + } return result, nil } @@ -414,22 +431,37 @@ func NewDecoder(config *DecoderConfig) (*Decoder, error) { // Decode decodes the given raw interface to the target pointer specified // by the configuration. func (d *Decoder) Decode(input interface{}) error { - return d.decode("", input, reflect.ValueOf(d.config.Result).Elem()) + err := d.decode("", input, reflect.ValueOf(d.config.Result).Elem()) + + // Retain some of the original behavior when multiple errors ocurr + var joinedErr interface{ Unwrap() []error } + if errors.As(err, &joinedErr) { + return fmt.Errorf("decoding failed due to the following error(s):\n\n%w", err) + } + + return err +} + +// isNil returns true if the input is nil or a typed nil pointer. +func isNil(input interface{}) bool { + if input == nil { + return true + } + val := reflect.ValueOf(input) + return val.Kind() == reflect.Ptr && val.IsNil() } // Decodes an unknown data type into a specific reflection value. func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error { - var inputVal reflect.Value - if input != nil { - inputVal = reflect.ValueOf(input) - - // We need to check here if input is a typed nil. Typed nils won't - // match the "input == nil" below so we check that here. - if inputVal.Kind() == reflect.Ptr && inputVal.IsNil() { - input = nil - } + var ( + inputVal = reflect.ValueOf(input) + outputKind = getKind(outVal) + decodeNil = d.config.DecodeNil && d.cachedDecodeHook != nil + ) + if isNil(input) { + // Typed nils won't match the "input == nil" below, so reset input. + input = nil } - if input == nil { // If the data is nil, then we don't set anything, unless ZeroFields is set // to true. @@ -440,30 +472,46 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) } } - return nil + if !decodeNil { + return nil + } } - if !inputVal.IsValid() { - // If the input value is invalid, then we just set the value - // to be the zero value. - outVal.Set(reflect.Zero(outVal.Type())) - if d.config.Metadata != nil && name != "" { - d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + if !decodeNil { + // If the input value is invalid, then we just set the value + // to be the zero value. + outVal.Set(reflect.Zero(outVal.Type())) + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + return nil + } + // Hooks need a valid inputVal, so reset it to zero value of outVal type. + switch outputKind { + case reflect.Struct, reflect.Map: + var mapVal map[string]interface{} + inputVal = reflect.ValueOf(mapVal) // create nil map pointer + case reflect.Slice, reflect.Array: + var sliceVal []interface{} + inputVal = reflect.ValueOf(sliceVal) // create nil slice pointer + default: + inputVal = reflect.Zero(outVal.Type()) } - return nil } - if d.config.DecodeHook != nil { + if d.cachedDecodeHook != nil { // We have a DecodeHook, so let's pre-process the input. var err error - input, err = DecodeHookExec(d.config.DecodeHook, inputVal, outVal) + input, err = d.cachedDecodeHook(inputVal, outVal) if err != nil { return fmt.Errorf("error decoding '%s': %w", name, err) } } + if isNil(input) { + return nil + } var err error - outputKind := getKind(outVal) addMetaKey := true switch outputKind { case reflect.Bool: @@ -478,6 +526,8 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e err = d.decodeUint(name, input, outVal) case reflect.Float32: err = d.decodeFloat(name, input, outVal) + case reflect.Complex64: + err = d.decodeComplex(name, input, outVal) case reflect.Struct: err = d.decodeStruct(name, input, outVal) case reflect.Map: @@ -742,8 +792,8 @@ func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) e } default: return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) + "'%s' expected type '%s', got unconvertible type '%#v', value: '%#v'", + name, val, dataVal, data) } return nil @@ -796,6 +846,22 @@ func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) return nil } +func (d *Decoder) decodeComplex(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Complex64: + val.SetComplex(dataVal.Complex()) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error { valType := val.Type() valKeyType := valType.Key() @@ -863,7 +929,7 @@ func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val refle valElemType := valType.Elem() // Accumulate errors - errors := make([]string, 0) + var errs []error // If the input data is empty, then we just match what the input data is. if dataVal.Len() == 0 { @@ -885,7 +951,7 @@ func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val refle // First decode the key into the proper type currentKey := reflect.Indirect(reflect.New(valKeyType)) if err := d.decode(fieldName, k.Interface(), currentKey); err != nil { - errors = appendErrors(errors, err) + errs = append(errs, err) continue } @@ -893,7 +959,7 @@ func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val refle v := dataVal.MapIndex(k).Interface() currentVal := reflect.Indirect(reflect.New(valElemType)) if err := d.decode(fieldName, v, currentVal); err != nil { - errors = appendErrors(errors, err) + errs = append(errs, err) continue } @@ -903,12 +969,7 @@ func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val refle // Set the built up map to the value val.Set(valMap) - // If we had errors, return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil + return errors.Join(errs...) } func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { @@ -951,7 +1012,7 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re } // If "squash" is specified in the tag, we squash the field down. - squash = squash || strings.Index(tagValue[index+1:], "squash") != -1 + squash = squash || strings.Contains(tagValue[index+1:], d.config.SquashTagOption) if squash { // When squashing, the embedded type can be a pointer to a struct. if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct { @@ -1146,7 +1207,7 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) } // Accumulate any errors - errors := make([]string, 0) + var errs []error for i := 0; i < dataVal.Len(); i++ { currentData := dataVal.Index(i).Interface() @@ -1157,19 +1218,14 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) fieldName := name + "[" + strconv.Itoa(i) + "]" if err := d.decode(fieldName, currentData, currentField); err != nil { - errors = appendErrors(errors, err) + errs = append(errs, err) } } // Finally, set the value to the slice we built up val.Set(valSlice) - // If there were errors, we return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil + return errors.Join(errs...) } func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error { @@ -1215,7 +1271,7 @@ func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) } // Accumulate any errors - errors := make([]string, 0) + var errs []error for i := 0; i < dataVal.Len(); i++ { currentData := dataVal.Index(i).Interface() @@ -1223,19 +1279,14 @@ func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) fieldName := name + "[" + strconv.Itoa(i) + "]" if err := d.decode(fieldName, currentData, currentField); err != nil { - errors = appendErrors(errors, err) + errs = append(errs, err) } } // Finally, set the value to the array we built up val.Set(valArray) - // If there were errors, we return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil + return errors.Join(errs...) } func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error { @@ -1297,7 +1348,8 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e } targetValKeysUnused := make(map[interface{}]struct{}) - errors := make([]string, 0) + + var errs []error // This slice will keep track of all the structs we'll be decoding. // There can be more than one struct if there are embedded structs @@ -1338,7 +1390,7 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e // We always parse the tags cause we're looking for other tags too tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") for _, tag := range tagParts[1:] { - if tag == "squash" { + if tag == d.config.SquashTagOption { squash = true break } @@ -1350,11 +1402,15 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e } if squash { - if fieldVal.Kind() != reflect.Struct { - errors = appendErrors(errors, - fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldVal.Kind())) - } else { + switch fieldVal.Kind() { + case reflect.Struct: structs = append(structs, fieldVal) + case reflect.Interface: + if !fieldVal.IsNil() { + structs = append(structs, fieldVal.Elem().Elem()) + } + default: + errs = append(errs, fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldVal.Kind())) } continue } @@ -1431,7 +1487,7 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e } if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil { - errors = appendErrors(errors, err) + errs = append(errs, err) } } @@ -1446,7 +1502,7 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e // Decode it as-if we were just decoding this map onto our map. if err := d.decodeMap(name, remain, remainField.val); err != nil { - errors = appendErrors(errors, err) + errs = append(errs, err) } // Set the map to nil so we have none so that the next check will @@ -1462,7 +1518,7 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e sort.Strings(keys) err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", ")) - errors = appendErrors(errors, err) + errs = append(errs, err) } if d.config.ErrorUnset && len(targetValKeysUnused) > 0 { @@ -1473,11 +1529,11 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e sort.Strings(keys) err := fmt.Errorf("'%s' has unset fields: %s", name, strings.Join(keys, ", ")) - errors = appendErrors(errors, err) + errs = append(errs, err) } - if len(errors) > 0 { - return &Error{errors} + if err := errors.Join(errs...); err != nil { + return err } // Add the unused keys to the list of unused keys if we're tracking metadata @@ -1531,6 +1587,8 @@ func getKind(val reflect.Value) reflect.Kind { return reflect.Uint case kind >= reflect.Float32 && kind <= reflect.Float64: return reflect.Float32 + case kind >= reflect.Complex64 && kind <= reflect.Complex128: + return reflect.Complex64 default: return kind } diff --git a/vendor/github.com/goccy/go-json/internal/decoder/compile.go b/vendor/github.com/goccy/go-json/internal/decoder/compile.go index fab6437647b..8ad50936c0c 100644 --- a/vendor/github.com/goccy/go-json/internal/decoder/compile.go +++ b/vendor/github.com/goccy/go-json/internal/decoder/compile.go @@ -5,6 +5,7 @@ import ( "fmt" "reflect" "strings" + "sync" "sync/atomic" "unicode" "unsafe" @@ -17,22 +18,27 @@ var ( typeAddr *runtime.TypeAddr cachedDecoderMap unsafe.Pointer // map[uintptr]decoder cachedDecoder []Decoder + initOnce sync.Once ) -func init() { - typeAddr = runtime.AnalyzeTypeAddr() - if typeAddr == nil { - typeAddr = &runtime.TypeAddr{} - } - cachedDecoder = make([]Decoder, typeAddr.AddrRange>>typeAddr.AddrShift+1) +func initDecoder() { + initOnce.Do(func() { + typeAddr = runtime.AnalyzeTypeAddr() + if typeAddr == nil { + typeAddr = &runtime.TypeAddr{} + } + cachedDecoder = make([]Decoder, typeAddr.AddrRange>>typeAddr.AddrShift+1) + }) } func loadDecoderMap() map[uintptr]Decoder { + initDecoder() p := atomic.LoadPointer(&cachedDecoderMap) return *(*map[uintptr]Decoder)(unsafe.Pointer(&p)) } func storeDecoder(typ uintptr, dec Decoder, m map[uintptr]Decoder) { + initDecoder() newDecoderMap := make(map[uintptr]Decoder, len(m)+1) newDecoderMap[typ] = dec diff --git a/vendor/github.com/goccy/go-json/internal/decoder/compile_norace.go b/vendor/github.com/goccy/go-json/internal/decoder/compile_norace.go index eb7e2b1345d..025ca85b5e2 100644 --- a/vendor/github.com/goccy/go-json/internal/decoder/compile_norace.go +++ b/vendor/github.com/goccy/go-json/internal/decoder/compile_norace.go @@ -10,6 +10,7 @@ import ( ) func CompileToGetDecoder(typ *runtime.Type) (Decoder, error) { + initDecoder() typeptr := uintptr(unsafe.Pointer(typ)) if typeptr > typeAddr.MaxTypeAddr { return compileToGetDecoderSlowPath(typeptr, typ) diff --git a/vendor/github.com/goccy/go-json/internal/decoder/compile_race.go b/vendor/github.com/goccy/go-json/internal/decoder/compile_race.go index 49cdda4a172..023b817c368 100644 --- a/vendor/github.com/goccy/go-json/internal/decoder/compile_race.go +++ b/vendor/github.com/goccy/go-json/internal/decoder/compile_race.go @@ -13,6 +13,7 @@ import ( var decMu sync.RWMutex func CompileToGetDecoder(typ *runtime.Type) (Decoder, error) { + initDecoder() typeptr := uintptr(unsafe.Pointer(typ)) if typeptr > typeAddr.MaxTypeAddr { return compileToGetDecoderSlowPath(typeptr, typ) diff --git a/vendor/github.com/goccy/go-json/internal/encoder/compiler.go b/vendor/github.com/goccy/go-json/internal/encoder/compiler.go index 37b7aa38e26..b107636890a 100644 --- a/vendor/github.com/goccy/go-json/internal/encoder/compiler.go +++ b/vendor/github.com/goccy/go-json/internal/encoder/compiler.go @@ -5,6 +5,7 @@ import ( "encoding" "encoding/json" "reflect" + "sync" "sync/atomic" "unsafe" @@ -24,14 +25,17 @@ var ( cachedOpcodeSets []*OpcodeSet cachedOpcodeMap unsafe.Pointer // map[uintptr]*OpcodeSet typeAddr *runtime.TypeAddr + initEncoderOnce sync.Once ) -func init() { - typeAddr = runtime.AnalyzeTypeAddr() - if typeAddr == nil { - typeAddr = &runtime.TypeAddr{} - } - cachedOpcodeSets = make([]*OpcodeSet, typeAddr.AddrRange>>typeAddr.AddrShift+1) +func initEncoder() { + initEncoderOnce.Do(func() { + typeAddr = runtime.AnalyzeTypeAddr() + if typeAddr == nil { + typeAddr = &runtime.TypeAddr{} + } + cachedOpcodeSets = make([]*OpcodeSet, typeAddr.AddrRange>>typeAddr.AddrShift+1) + }) } func loadOpcodeMap() map[uintptr]*OpcodeSet { diff --git a/vendor/github.com/goccy/go-json/internal/encoder/compiler_norace.go b/vendor/github.com/goccy/go-json/internal/encoder/compiler_norace.go index 20c93cbf709..b6f45a49b0e 100644 --- a/vendor/github.com/goccy/go-json/internal/encoder/compiler_norace.go +++ b/vendor/github.com/goccy/go-json/internal/encoder/compiler_norace.go @@ -4,6 +4,7 @@ package encoder func CompileToGetCodeSet(ctx *RuntimeContext, typeptr uintptr) (*OpcodeSet, error) { + initEncoder() if typeptr > typeAddr.MaxTypeAddr || typeptr < typeAddr.BaseTypeAddr { codeSet, err := compileToGetCodeSetSlowPath(typeptr) if err != nil { diff --git a/vendor/github.com/goccy/go-json/internal/encoder/compiler_race.go b/vendor/github.com/goccy/go-json/internal/encoder/compiler_race.go index 13ba23fdff8..47b482f7fb6 100644 --- a/vendor/github.com/goccy/go-json/internal/encoder/compiler_race.go +++ b/vendor/github.com/goccy/go-json/internal/encoder/compiler_race.go @@ -10,6 +10,7 @@ import ( var setsMu sync.RWMutex func CompileToGetCodeSet(ctx *RuntimeContext, typeptr uintptr) (*OpcodeSet, error) { + initEncoder() if typeptr > typeAddr.MaxTypeAddr || typeptr < typeAddr.BaseTypeAddr { codeSet, err := compileToGetCodeSetSlowPath(typeptr) if err != nil { diff --git a/vendor/github.com/goccy/go-json/internal/encoder/encoder.go b/vendor/github.com/goccy/go-json/internal/encoder/encoder.go index 14eb6a0d643..b436f5b21ff 100644 --- a/vendor/github.com/goccy/go-json/internal/encoder/encoder.go +++ b/vendor/github.com/goccy/go-json/internal/encoder/encoder.go @@ -406,6 +406,11 @@ func AppendMarshalJSON(ctx *RuntimeContext, code *Opcode, b []byte, v interface{ rv = newV } } + + if rv.Kind() == reflect.Ptr && rv.IsNil() { + return AppendNull(ctx, b), nil + } + v = rv.Interface() var bb []byte if (code.Flags & MarshalerContextFlags) != 0 { diff --git a/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go b/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go index 16278a1d995..fcd049de922 100644 --- a/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go +++ b/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.2 // protoc v3.21.12 // source: internal/proto/common/common.proto @@ -145,8 +145,8 @@ type Identity struct { // *Identity_SpiffeId // *Identity_Hostname // *Identity_Uid - // *Identity_MdbUsername - // *Identity_GaiaId + // *Identity_Username + // *Identity_GcpId IdentityOneof isIdentity_IdentityOneof `protobuf_oneof:"identity_oneof"` // Additional identity-specific attributes. Attributes map[string]string `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` @@ -212,16 +212,16 @@ func (x *Identity) GetUid() string { return "" } -func (x *Identity) GetMdbUsername() string { - if x, ok := x.GetIdentityOneof().(*Identity_MdbUsername); ok { - return x.MdbUsername +func (x *Identity) GetUsername() string { + if x, ok := x.GetIdentityOneof().(*Identity_Username); ok { + return x.Username } return "" } -func (x *Identity) GetGaiaId() string { - if x, ok := x.GetIdentityOneof().(*Identity_GaiaId); ok { - return x.GaiaId +func (x *Identity) GetGcpId() string { + if x, ok := x.GetIdentityOneof().(*Identity_GcpId); ok { + return x.GcpId } return "" } @@ -252,14 +252,14 @@ type Identity_Uid struct { Uid string `protobuf:"bytes,4,opt,name=uid,proto3,oneof"` } -type Identity_MdbUsername struct { - // The MDB username of a connection endpoint. - MdbUsername string `protobuf:"bytes,5,opt,name=mdb_username,json=mdbUsername,proto3,oneof"` +type Identity_Username struct { + // The username of a connection endpoint. + Username string `protobuf:"bytes,5,opt,name=username,proto3,oneof"` } -type Identity_GaiaId struct { - // The Gaia ID of a connection endpoint. - GaiaId string `protobuf:"bytes,6,opt,name=gaia_id,json=gaiaId,proto3,oneof"` +type Identity_GcpId struct { + // The GCP ID of a connection endpoint. + GcpId string `protobuf:"bytes,6,opt,name=gcp_id,json=gcpId,proto3,oneof"` } func (*Identity_SpiffeId) isIdentity_IdentityOneof() {} @@ -268,9 +268,9 @@ func (*Identity_Hostname) isIdentity_IdentityOneof() {} func (*Identity_Uid) isIdentity_IdentityOneof() {} -func (*Identity_MdbUsername) isIdentity_IdentityOneof() {} +func (*Identity_Username) isIdentity_IdentityOneof() {} -func (*Identity_GaiaId) isIdentity_IdentityOneof() {} +func (*Identity_GcpId) isIdentity_IdentityOneof() {} var File_internal_proto_common_common_proto protoreflect.FileDescriptor @@ -278,38 +278,37 @@ var file_internal_proto_common_common_proto_rawDesc = []byte{ 0x0a, 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, - 0xb1, 0x02, 0x0a, 0x08, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1d, 0x0a, 0x09, + 0xa8, 0x02, 0x0a, 0x08, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1d, 0x0a, 0x09, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x03, 0x75, 0x69, 0x64, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x75, 0x69, 0x64, 0x12, 0x23, 0x0a, - 0x0c, 0x6d, 0x64, 0x62, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x6d, 0x64, 0x62, 0x55, 0x73, 0x65, 0x72, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x07, 0x67, 0x61, 0x69, 0x61, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x67, 0x61, 0x69, 0x61, 0x49, 0x64, 0x12, 0x43, 0x0a, - 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x23, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, - 0x65, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, - 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6f, 0x6e, - 0x65, 0x6f, 0x66, 0x2a, 0x5b, 0x0a, 0x0b, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, - 0x74, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, - 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x45, - 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, - 0x10, 0x01, 0x12, 0x1c, 0x0a, 0x18, 0x43, 0x48, 0x41, 0x43, 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, - 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x02, - 0x2a, 0x24, 0x0a, 0x0a, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0a, - 0x0a, 0x06, 0x54, 0x4c, 0x53, 0x31, 0x5f, 0x32, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x54, 0x4c, - 0x53, 0x31, 0x5f, 0x33, 0x10, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x75, 0x69, 0x64, 0x12, 0x1c, 0x0a, + 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x00, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x17, 0x0a, 0x06, 0x67, + 0x63, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x67, + 0x63, 0x70, 0x49, 0x64, 0x12, 0x43, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x41, 0x74, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x61, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74, + 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x69, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x2a, 0x5b, 0x0a, 0x0b, 0x43, 0x69, + 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x45, 0x53, + 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, + 0x00, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, 0x47, 0x43, 0x4d, + 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x01, 0x12, 0x1c, 0x0a, 0x18, 0x43, 0x48, 0x41, + 0x43, 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, + 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x02, 0x2a, 0x24, 0x0a, 0x0a, 0x54, 0x4c, 0x53, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x54, 0x4c, 0x53, 0x31, 0x5f, 0x32, 0x10, + 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x54, 0x4c, 0x53, 0x31, 0x5f, 0x33, 0x10, 0x01, 0x42, 0x36, 0x5a, + 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x67, 0x6f, 0x5f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -326,7 +325,7 @@ func file_internal_proto_common_common_proto_rawDescGZIP() []byte { var file_internal_proto_common_common_proto_enumTypes = make([]protoimpl.EnumInfo, 2) var file_internal_proto_common_common_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_internal_proto_common_common_proto_goTypes = []interface{}{ +var file_internal_proto_common_common_proto_goTypes = []any{ (Ciphersuite)(0), // 0: s2a.proto.Ciphersuite (TLSVersion)(0), // 1: s2a.proto.TLSVersion (*Identity)(nil), // 2: s2a.proto.Identity @@ -347,7 +346,7 @@ func file_internal_proto_common_common_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_internal_proto_common_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_common_common_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Identity); i { case 0: return &v.state @@ -360,12 +359,12 @@ func file_internal_proto_common_common_proto_init() { } } } - file_internal_proto_common_common_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_internal_proto_common_common_proto_msgTypes[0].OneofWrappers = []any{ (*Identity_SpiffeId)(nil), (*Identity_Hostname)(nil), (*Identity_Uid)(nil), - (*Identity_MdbUsername)(nil), - (*Identity_GaiaId)(nil), + (*Identity_Username)(nil), + (*Identity_GcpId)(nil), } type x struct{} out := protoimpl.TypeBuilder{ diff --git a/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go b/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go index f4f763ae102..2af3ee3dc1c 100644 --- a/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go +++ b/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.2 // protoc v3.21.12 // source: internal/proto/s2a_context/s2a_context.proto @@ -209,7 +209,7 @@ func file_internal_proto_s2a_context_s2a_context_proto_rawDescGZIP() []byte { } var file_internal_proto_s2a_context_s2a_context_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_internal_proto_s2a_context_s2a_context_proto_goTypes = []interface{}{ +var file_internal_proto_s2a_context_s2a_context_proto_goTypes = []any{ (*S2AContext)(nil), // 0: s2a.proto.S2AContext (common_go_proto.TLSVersion)(0), // 1: s2a.proto.TLSVersion (common_go_proto.Ciphersuite)(0), // 2: s2a.proto.Ciphersuite @@ -233,7 +233,7 @@ func file_internal_proto_s2a_context_s2a_context_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_internal_proto_s2a_context_s2a_context_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_context_s2a_context_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*S2AContext); i { case 0: return &v.state diff --git a/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go b/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go index 0a86ebee592..8919232fd88 100644 --- a/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go +++ b/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.2 // protoc v3.21.12 // source: internal/proto/s2a/s2a.proto @@ -1171,7 +1171,7 @@ func file_internal_proto_s2a_s2a_proto_rawDescGZIP() []byte { } var file_internal_proto_s2a_s2a_proto_msgTypes = make([]protoimpl.MessageInfo, 10) -var file_internal_proto_s2a_s2a_proto_goTypes = []interface{}{ +var file_internal_proto_s2a_s2a_proto_goTypes = []any{ (*AuthenticationMechanism)(nil), // 0: s2a.proto.AuthenticationMechanism (*ClientSessionStartReq)(nil), // 1: s2a.proto.ClientSessionStartReq (*ServerSessionStartReq)(nil), // 2: s2a.proto.ServerSessionStartReq @@ -1226,7 +1226,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_internal_proto_s2a_s2a_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*AuthenticationMechanism); i { case 0: return &v.state @@ -1238,7 +1238,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*ClientSessionStartReq); i { case 0: return &v.state @@ -1250,7 +1250,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*ServerSessionStartReq); i { case 0: return &v.state @@ -1262,7 +1262,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*SessionNextReq); i { case 0: return &v.state @@ -1274,7 +1274,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*ResumptionTicketReq); i { case 0: return &v.state @@ -1286,7 +1286,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*SessionReq); i { case 0: return &v.state @@ -1298,7 +1298,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*SessionState); i { case 0: return &v.state @@ -1310,7 +1310,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*SessionResult); i { case 0: return &v.state @@ -1322,7 +1322,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*SessionStatus); i { case 0: return &v.state @@ -1334,7 +1334,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*SessionResp); i { case 0: return &v.state @@ -1347,10 +1347,10 @@ func file_internal_proto_s2a_s2a_proto_init() { } } } - file_internal_proto_s2a_s2a_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_internal_proto_s2a_s2a_proto_msgTypes[0].OneofWrappers = []any{ (*AuthenticationMechanism_Token)(nil), } - file_internal_proto_s2a_s2a_proto_msgTypes[5].OneofWrappers = []interface{}{ + file_internal_proto_s2a_s2a_proto_msgTypes[5].OneofWrappers = []any{ (*SessionReq_ClientStart)(nil), (*SessionReq_ServerStart)(nil), (*SessionReq_Next)(nil), diff --git a/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go b/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go index 0fa582fc874..8fac3841be5 100644 --- a/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go +++ b/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 +// - protoc-gen-go-grpc v1.4.0 // - protoc v3.21.12 // source: internal/proto/s2a/s2a.proto @@ -29,8 +29,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 +// Requires gRPC-Go v1.62.0 or later. +const _ = grpc.SupportPackageIsVersion8 const ( S2AService_SetUpSession_FullMethodName = "/s2a.proto.S2AService/SetUpSession" @@ -61,11 +61,12 @@ func NewS2AServiceClient(cc grpc.ClientConnInterface) S2AServiceClient { } func (c *s2AServiceClient) SetUpSession(ctx context.Context, opts ...grpc.CallOption) (S2AService_SetUpSessionClient, error) { - stream, err := c.cc.NewStream(ctx, &S2AService_ServiceDesc.Streams[0], S2AService_SetUpSession_FullMethodName, opts...) + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &S2AService_ServiceDesc.Streams[0], S2AService_SetUpSession_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &s2AServiceSetUpSessionClient{stream} + x := &s2AServiceSetUpSessionClient{ClientStream: stream} return x, nil } @@ -129,7 +130,7 @@ func RegisterS2AServiceServer(s grpc.ServiceRegistrar, srv S2AServiceServer) { } func _S2AService_SetUpSession_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(S2AServiceServer).SetUpSession(&s2AServiceSetUpSessionServer{stream}) + return srv.(S2AServiceServer).SetUpSession(&s2AServiceSetUpSessionServer{ServerStream: stream}) } type S2AService_SetUpSessionServer interface { diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go index c84bed97748..e9aa5d14c0d 100644 --- a/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go +++ b/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.2 // protoc v3.21.12 // source: internal/proto/v2/common/common.proto @@ -256,62 +256,218 @@ func (AlpnProtocol) EnumDescriptor() ([]byte, []int) { return file_internal_proto_v2_common_common_proto_rawDescGZIP(), []int{3} } +type Identity struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to IdentityOneof: + // + // *Identity_SpiffeId + // *Identity_Hostname + // *Identity_Uid + // *Identity_Username + // *Identity_GcpId + IdentityOneof isIdentity_IdentityOneof `protobuf_oneof:"identity_oneof"` + // Additional identity-specific attributes. + Attributes map[string]string `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Identity) Reset() { + *x = Identity{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_common_common_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Identity) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Identity) ProtoMessage() {} + +func (x *Identity) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_common_common_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Identity.ProtoReflect.Descriptor instead. +func (*Identity) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_common_common_proto_rawDescGZIP(), []int{0} +} + +func (m *Identity) GetIdentityOneof() isIdentity_IdentityOneof { + if m != nil { + return m.IdentityOneof + } + return nil +} + +func (x *Identity) GetSpiffeId() string { + if x, ok := x.GetIdentityOneof().(*Identity_SpiffeId); ok { + return x.SpiffeId + } + return "" +} + +func (x *Identity) GetHostname() string { + if x, ok := x.GetIdentityOneof().(*Identity_Hostname); ok { + return x.Hostname + } + return "" +} + +func (x *Identity) GetUid() string { + if x, ok := x.GetIdentityOneof().(*Identity_Uid); ok { + return x.Uid + } + return "" +} + +func (x *Identity) GetUsername() string { + if x, ok := x.GetIdentityOneof().(*Identity_Username); ok { + return x.Username + } + return "" +} + +func (x *Identity) GetGcpId() string { + if x, ok := x.GetIdentityOneof().(*Identity_GcpId); ok { + return x.GcpId + } + return "" +} + +func (x *Identity) GetAttributes() map[string]string { + if x != nil { + return x.Attributes + } + return nil +} + +type isIdentity_IdentityOneof interface { + isIdentity_IdentityOneof() +} + +type Identity_SpiffeId struct { + // The SPIFFE ID of a connection endpoint. + SpiffeId string `protobuf:"bytes,1,opt,name=spiffe_id,json=spiffeId,proto3,oneof"` +} + +type Identity_Hostname struct { + // The hostname of a connection endpoint. + Hostname string `protobuf:"bytes,2,opt,name=hostname,proto3,oneof"` +} + +type Identity_Uid struct { + // The UID of a connection endpoint. + Uid string `protobuf:"bytes,4,opt,name=uid,proto3,oneof"` +} + +type Identity_Username struct { + // The username of a connection endpoint. + Username string `protobuf:"bytes,5,opt,name=username,proto3,oneof"` +} + +type Identity_GcpId struct { + // The GCP ID of a connection endpoint. + GcpId string `protobuf:"bytes,6,opt,name=gcp_id,json=gcpId,proto3,oneof"` +} + +func (*Identity_SpiffeId) isIdentity_IdentityOneof() {} + +func (*Identity_Hostname) isIdentity_IdentityOneof() {} + +func (*Identity_Uid) isIdentity_IdentityOneof() {} + +func (*Identity_Username) isIdentity_IdentityOneof() {} + +func (*Identity_GcpId) isIdentity_IdentityOneof() {} + var File_internal_proto_v2_common_common_proto protoreflect.FileDescriptor var file_internal_proto_v2_common_common_proto_rawDesc = []byte{ 0x0a, 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2a, 0xee, 0x02, 0x0a, 0x0b, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, - 0x73, 0x75, 0x69, 0x74, 0x65, 0x12, 0x1b, 0x0a, 0x17, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, - 0x55, 0x49, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, - 0x10, 0x00, 0x12, 0x33, 0x0a, 0x2f, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, - 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x57, 0x49, - 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, - 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x01, 0x12, 0x33, 0x0a, 0x2f, 0x43, 0x49, 0x50, 0x48, 0x45, - 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x45, 0x43, 0x44, - 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, - 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x39, 0x0a, 0x35, - 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, - 0x45, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x43, 0x48, 0x41, - 0x43, 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, - 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x03, 0x12, 0x31, 0x0a, 0x2d, 0x43, 0x49, 0x50, 0x48, 0x45, - 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x52, 0x53, 0x41, - 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, - 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x31, 0x0a, 0x2d, 0x43, 0x49, + 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x22, 0xab, 0x02, 0x0a, 0x08, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x12, 0x1d, 0x0a, 0x09, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, + 0x64, 0x12, 0x1c, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x12, 0x0a, 0x03, 0x75, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, + 0x75, 0x69, 0x64, 0x12, 0x1c, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x17, 0x0a, 0x06, 0x67, 0x63, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x48, 0x00, 0x52, 0x05, 0x67, 0x63, 0x70, 0x49, 0x64, 0x12, 0x46, 0x0a, 0x0a, 0x61, 0x74, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, + 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6f, 0x6e, + 0x65, 0x6f, 0x66, 0x2a, 0xee, 0x02, 0x0a, 0x0b, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, + 0x69, 0x74, 0x65, 0x12, 0x1b, 0x0a, 0x17, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, + 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x33, 0x0a, 0x2f, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, + 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, + 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, + 0x32, 0x35, 0x36, 0x10, 0x01, 0x12, 0x33, 0x0a, 0x2f, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, + 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, + 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, 0x47, 0x43, + 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x39, 0x0a, 0x35, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, - 0x52, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, - 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x05, 0x12, 0x37, 0x0a, - 0x33, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, - 0x48, 0x45, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x43, 0x48, 0x41, 0x43, - 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, 0x48, - 0x41, 0x32, 0x35, 0x36, 0x10, 0x06, 0x2a, 0x7d, 0x0a, 0x0a, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, - 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, - 0x5f, 0x31, 0x5f, 0x30, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, - 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x31, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x54, - 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x32, 0x10, 0x03, - 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, - 0x31, 0x5f, 0x33, 0x10, 0x04, 0x2a, 0x69, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x53, 0x69, 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x43, 0x4f, 0x4e, 0x4e, 0x45, - 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, - 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4e, 0x4e, - 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, - 0x4e, 0x54, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, - 0x2a, 0x79, 0x0a, 0x0c, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, - 0x12, 0x1d, 0x0a, 0x19, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, - 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, - 0x16, 0x0a, 0x12, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, - 0x5f, 0x47, 0x52, 0x50, 0x43, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c, 0x50, 0x4e, 0x5f, - 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x48, 0x54, 0x54, 0x50, 0x32, 0x10, 0x02, - 0x12, 0x19, 0x0a, 0x15, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, - 0x4c, 0x5f, 0x48, 0x54, 0x54, 0x50, 0x31, 0x5f, 0x31, 0x10, 0x03, 0x42, 0x39, 0x5a, 0x37, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x67, 0x6f, - 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x43, 0x48, 0x41, 0x43, 0x48, + 0x41, 0x32, 0x30, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, 0x48, 0x41, + 0x32, 0x35, 0x36, 0x10, 0x03, 0x12, 0x31, 0x0a, 0x2d, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, + 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x57, + 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, 0x4d, 0x5f, + 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x31, 0x0a, 0x2d, 0x43, 0x49, 0x50, 0x48, + 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x52, 0x53, + 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, 0x47, + 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x05, 0x12, 0x37, 0x0a, 0x33, 0x43, + 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, + 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x43, 0x48, 0x41, 0x43, 0x48, 0x41, + 0x32, 0x30, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, 0x48, 0x41, 0x32, + 0x35, 0x36, 0x10, 0x06, 0x2a, 0x7d, 0x0a, 0x0a, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, + 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x31, + 0x5f, 0x30, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, + 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x31, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, + 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x32, 0x10, 0x03, 0x12, 0x13, + 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, + 0x33, 0x10, 0x04, 0x2a, 0x69, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x53, 0x69, 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, + 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x2a, 0x79, + 0x0a, 0x0c, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x1d, + 0x0a, 0x19, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, + 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x16, 0x0a, + 0x12, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x47, + 0x52, 0x50, 0x43, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, + 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x48, 0x54, 0x54, 0x50, 0x32, 0x10, 0x02, 0x12, 0x19, + 0x0a, 0x15, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, + 0x48, 0x54, 0x54, 0x50, 0x31, 0x5f, 0x31, 0x10, 0x03, 0x42, 0x39, 0x5a, 0x37, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, + 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x67, 0x6f, 0x5f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -327,18 +483,22 @@ func file_internal_proto_v2_common_common_proto_rawDescGZIP() []byte { } var file_internal_proto_v2_common_common_proto_enumTypes = make([]protoimpl.EnumInfo, 4) -var file_internal_proto_v2_common_common_proto_goTypes = []interface{}{ +var file_internal_proto_v2_common_common_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_internal_proto_v2_common_common_proto_goTypes = []any{ (Ciphersuite)(0), // 0: s2a.proto.v2.Ciphersuite (TLSVersion)(0), // 1: s2a.proto.v2.TLSVersion (ConnectionSide)(0), // 2: s2a.proto.v2.ConnectionSide (AlpnProtocol)(0), // 3: s2a.proto.v2.AlpnProtocol + (*Identity)(nil), // 4: s2a.proto.v2.Identity + nil, // 5: s2a.proto.v2.Identity.AttributesEntry } var file_internal_proto_v2_common_common_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name + 5, // 0: s2a.proto.v2.Identity.attributes:type_name -> s2a.proto.v2.Identity.AttributesEntry + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name } func init() { file_internal_proto_v2_common_common_proto_init() } @@ -346,19 +506,41 @@ func file_internal_proto_v2_common_common_proto_init() { if File_internal_proto_v2_common_common_proto != nil { return } + if !protoimpl.UnsafeEnabled { + file_internal_proto_v2_common_common_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*Identity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_internal_proto_v2_common_common_proto_msgTypes[0].OneofWrappers = []any{ + (*Identity_SpiffeId)(nil), + (*Identity_Hostname)(nil), + (*Identity_Uid)(nil), + (*Identity_Username)(nil), + (*Identity_GcpId)(nil), + } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_internal_proto_v2_common_common_proto_rawDesc, NumEnums: 4, - NumMessages: 0, + NumMessages: 2, NumExtensions: 0, NumServices: 0, }, GoTypes: file_internal_proto_v2_common_common_proto_goTypes, DependencyIndexes: file_internal_proto_v2_common_common_proto_depIdxs, EnumInfos: file_internal_proto_v2_common_common_proto_enumTypes, + MessageInfos: file_internal_proto_v2_common_common_proto_msgTypes, }.Build() File_internal_proto_v2_common_common_proto = out.File file_internal_proto_v2_common_common_proto_rawDesc = nil diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go index b7fd871c7a7..418331a4bde 100644 --- a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go +++ b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go @@ -14,14 +14,14 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.2 // protoc v3.21.12 // source: internal/proto/v2/s2a_context/s2a_context.proto package s2a_context_go_proto import ( - common_go_proto "github.com/google/s2a-go/internal/proto/common_go_proto" + common_go_proto "github.com/google/s2a-go/internal/proto/v2/common_go_proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -64,7 +64,7 @@ type S2AContext struct { // certificate chain was NOT validated successfully. PeerCertificateChainFingerprints []string `protobuf:"bytes,4,rep,name=peer_certificate_chain_fingerprints,json=peerCertificateChainFingerprints,proto3" json:"peer_certificate_chain_fingerprints,omitempty"` // The local identity used during session setup. - LocalIdentity *common_go_proto.Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` + LocalIdentity *common_go_proto.Identity `protobuf:"bytes,9,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` // The SHA256 hash of the DER-encoding of the local leaf certificate used in // the handshake. LocalLeafCertFingerprint []byte `protobuf:"bytes,6,opt,name=local_leaf_cert_fingerprint,json=localLeafCertFingerprint,proto3" json:"local_leaf_cert_fingerprint,omitempty"` @@ -151,35 +151,36 @@ var file_internal_proto_v2_s2a_context_s2a_context_proto_rawDesc = []byte{ 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x1a, - 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x22, 0xd9, 0x02, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, - 0x78, 0x74, 0x12, 0x2d, 0x0a, 0x13, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, - 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x10, 0x6c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x53, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, - 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x75, - 0x72, 0x69, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x6c, 0x65, 0x61, 0x66, 0x43, - 0x65, 0x72, 0x74, 0x55, 0x72, 0x69, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x65, 0x61, 0x66, 0x5f, - 0x63, 0x65, 0x72, 0x74, 0x5f, 0x64, 0x6e, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x10, 0x6c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x44, 0x6e, 0x73, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x4d, 0x0a, 0x23, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, - 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, - 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x20, 0x70, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, - 0x69, 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, - 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, - 0x12, 0x3d, 0x0a, 0x1b, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, - 0x65, 0x72, 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x18, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x4c, 0x65, 0x61, 0x66, - 0x43, 0x65, 0x72, 0x74, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x42, - 0x3e, 0x5a, 0x3c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, - 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, - 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xee, 0x02, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x2d, 0x0a, 0x13, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, + 0x72, 0x74, 0x5f, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x10, 0x6c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x53, 0x70, 0x69, 0x66, + 0x66, 0x65, 0x49, 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, + 0x74, 0x5f, 0x75, 0x72, 0x69, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x6c, 0x65, + 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x55, 0x72, 0x69, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x65, + 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x64, 0x6e, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x6c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, + 0x44, 0x6e, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x4d, 0x0a, 0x23, 0x70, 0x65, 0x65, 0x72, + 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, + 0x69, 0x6e, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x73, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x20, 0x70, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x46, 0x69, 0x6e, 0x67, 0x65, + 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x3d, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x16, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x49, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x3d, 0x0a, 0x1b, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, + 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, + 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x18, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x4c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, + 0x70, 0x72, 0x69, 0x6e, 0x74, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x07, 0x10, + 0x08, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x42, 0x3e, 0x5a, 0x3c, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, + 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x67, + 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -195,12 +196,12 @@ func file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescGZIP() []byte { } var file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_internal_proto_v2_s2a_context_s2a_context_proto_goTypes = []interface{}{ +var file_internal_proto_v2_s2a_context_s2a_context_proto_goTypes = []any{ (*S2AContext)(nil), // 0: s2a.proto.v2.S2AContext - (*common_go_proto.Identity)(nil), // 1: s2a.proto.Identity + (*common_go_proto.Identity)(nil), // 1: s2a.proto.v2.Identity } var file_internal_proto_v2_s2a_context_s2a_context_proto_depIdxs = []int32{ - 1, // 0: s2a.proto.v2.S2AContext.local_identity:type_name -> s2a.proto.Identity + 1, // 0: s2a.proto.v2.S2AContext.local_identity:type_name -> s2a.proto.v2.Identity 1, // [1:1] is the sub-list for method output_type 1, // [1:1] is the sub-list for method input_type 1, // [1:1] is the sub-list for extension type_name @@ -214,7 +215,7 @@ func file_internal_proto_v2_s2a_context_s2a_context_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*S2AContext); i { case 0: return &v.state diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go index e843450c7ed..f47c77a2ba0 100644 --- a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go +++ b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go @@ -14,14 +14,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.2 // protoc v3.21.12 // source: internal/proto/v2/s2a/s2a.proto package s2a_go_proto import ( - common_go_proto1 "github.com/google/s2a-go/internal/proto/common_go_proto" common_go_proto "github.com/google/s2a-go/internal/proto/v2/common_go_proto" s2a_context_go_proto "github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" @@ -292,6 +291,14 @@ const ( // The connect-to-Google verification mode uses the trust bundle for // connecting to Google, e.g. *.mtls.googleapis.com endpoints. ValidatePeerCertificateChainReq_CONNECT_TO_GOOGLE ValidatePeerCertificateChainReq_VerificationMode = 2 + // Internal use only. + ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_3 ValidatePeerCertificateChainReq_VerificationMode = 3 + // Internal use only. + ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_4 ValidatePeerCertificateChainReq_VerificationMode = 4 + // Internal use only. + ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_5 ValidatePeerCertificateChainReq_VerificationMode = 5 + // Internal use only. + ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_6 ValidatePeerCertificateChainReq_VerificationMode = 6 ) // Enum value maps for ValidatePeerCertificateChainReq_VerificationMode. @@ -300,11 +307,19 @@ var ( 0: "UNSPECIFIED", 1: "SPIFFE", 2: "CONNECT_TO_GOOGLE", + 3: "RESERVED_CUSTOM_VERIFICATION_MODE_3", + 4: "RESERVED_CUSTOM_VERIFICATION_MODE_4", + 5: "RESERVED_CUSTOM_VERIFICATION_MODE_5", + 6: "RESERVED_CUSTOM_VERIFICATION_MODE_6", } ValidatePeerCertificateChainReq_VerificationMode_value = map[string]int32{ - "UNSPECIFIED": 0, - "SPIFFE": 1, - "CONNECT_TO_GOOGLE": 2, + "UNSPECIFIED": 0, + "SPIFFE": 1, + "CONNECT_TO_GOOGLE": 2, + "RESERVED_CUSTOM_VERIFICATION_MODE_3": 3, + "RESERVED_CUSTOM_VERIFICATION_MODE_4": 4, + "RESERVED_CUSTOM_VERIFICATION_MODE_5": 5, + "RESERVED_CUSTOM_VERIFICATION_MODE_6": 6, } ) @@ -454,7 +469,7 @@ type AuthenticationMechanism struct { // mechanism. Otherwise, S2A assumes that the authentication mechanism is // associated with the default identity. If the default identity cannot be // determined, the request is rejected. - Identity *common_go_proto1.Identity `protobuf:"bytes,1,opt,name=identity,proto3" json:"identity,omitempty"` + Identity *common_go_proto.Identity `protobuf:"bytes,3,opt,name=identity,proto3" json:"identity,omitempty"` // Types that are assignable to MechanismOneof: // // *AuthenticationMechanism_Token @@ -493,7 +508,7 @@ func (*AuthenticationMechanism) Descriptor() ([]byte, []int) { return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{1} } -func (x *AuthenticationMechanism) GetIdentity() *common_go_proto1.Identity { +func (x *AuthenticationMechanism) GetIdentity() *common_go_proto.Identity { if x != nil { return x.Identity } @@ -1185,7 +1200,7 @@ type SessionReq struct { // identity is not populated, S2A will try to deduce the managed identity to // use from the SNI extension. If that also fails, S2A uses the default // identity (if one exists). - LocalIdentity *common_go_proto1.Identity `protobuf:"bytes,1,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` + LocalIdentity *common_go_proto.Identity `protobuf:"bytes,7,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` // The authentication mechanisms that the application wishes to use to // authenticate to S2A, ordered by preference. S2A will always use the first // authentication mechanism that matches the managed identity. @@ -1231,7 +1246,7 @@ func (*SessionReq) Descriptor() ([]byte, []int) { return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{11} } -func (x *SessionReq) GetLocalIdentity() *common_go_proto1.Identity { +func (x *SessionReq) GetLocalIdentity() *common_go_proto.Identity { if x != nil { return x.LocalIdentity } @@ -1790,358 +1805,367 @@ var file_internal_proto_v2_s2a_s2a_proto_rawDesc = []byte{ 0x0a, 0x1f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x1a, - 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2f, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, - 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, - 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x87, 0x01, 0x0a, 0x0a, - 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x36, 0x0a, 0x17, 0x65, 0x6e, - 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x6e, 0x65, 0x67, 0x6f, 0x74, 0x69, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x65, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x41, 0x6c, 0x70, 0x6e, 0x4e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0e, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x73, 0x32, 0x61, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x0d, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x22, 0x75, 0x0a, 0x17, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, - 0x12, 0x2f, 0x0a, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, - 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x12, 0x16, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x48, 0x00, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x6d, 0x65, 0x63, - 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x36, 0x0a, 0x06, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, - 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x74, - 0x61, 0x69, 0x6c, 0x73, 0x22, 0x71, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x45, - 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x69, 0x64, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x53, 0x69, 0x64, 0x65, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x53, 0x69, 0x64, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x6e, 0x69, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x73, 0x6e, 0x69, 0x22, 0xf1, 0x0b, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x54, - 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x73, 0x70, 0x12, 0x78, 0x0a, 0x18, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x6c, - 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x43, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x16, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6c, 0x73, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x78, 0x0a, - 0x18, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x3c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, - 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, - 0x16, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xcf, 0x02, 0x0a, 0x16, 0x43, 0x6c, 0x69, 0x65, + 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x87, 0x01, 0x0a, 0x0a, 0x41, 0x6c, 0x70, 0x6e, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x36, 0x0a, 0x17, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x5f, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x6e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, + 0x6c, 0x70, 0x6e, 0x4e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, + 0x0a, 0x0e, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x52, 0x0d, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x73, 0x22, 0x7e, 0x0a, 0x17, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x12, 0x32, 0x0a, 0x08, + 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x12, 0x16, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x00, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x6d, 0x65, 0x63, 0x68, + 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x4a, 0x04, 0x08, 0x01, 0x10, + 0x02, 0x22, 0x36, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, + 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, + 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0x71, 0x0a, 0x16, 0x47, 0x65, 0x74, + 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x12, 0x45, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x73, 0x69, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x73, + 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x69, 0x64, 0x65, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x69, 0x64, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x6e, + 0x69, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x6e, 0x69, 0x22, 0xf1, 0x0b, 0x0a, + 0x17, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x78, 0x0a, 0x18, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x16, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x63, - 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, - 0x40, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, - 0x74, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x73, 0x32, 0x61, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, - 0x75, 0x69, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, - 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0b, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x52, 0x0a, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4a, 0x04, 0x08, - 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x1a, 0xfa, 0x06, 0x0a, 0x16, 0x53, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, - 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, - 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, 0x73, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, - 0x75, 0x69, 0x74, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x73, 0x32, - 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, - 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, - 0x69, 0x74, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x16, 0x74, 0x6c, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x75, - 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x74, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x93, 0x01, 0x0a, 0x1a, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x65, - 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x55, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, - 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x18, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x12, 0x3c, 0x0a, 0x1b, 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x68, 0x65, 0x61, 0x64, - 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x61, 0x65, 0x61, 0x64, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x17, 0x6d, 0x61, 0x78, 0x4f, 0x76, 0x65, 0x72, 0x68, 0x65, - 0x61, 0x64, 0x4f, 0x66, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x65, 0x61, 0x64, 0x12, 0x39, - 0x0a, 0x0b, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0b, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0a, 0x61, - 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x9e, 0x02, 0x0a, 0x18, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, - 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x23, 0x0a, 0x1f, 0x44, 0x4f, 0x4e, 0x54, 0x5f, - 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, - 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x10, 0x01, 0x12, 0x2e, 0x0a, 0x2a, - 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, - 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x42, 0x55, 0x54, 0x5f, 0x44, - 0x4f, 0x4e, 0x54, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x02, 0x12, 0x29, 0x0a, 0x25, - 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, - 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x56, - 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x03, 0x12, 0x3a, 0x0a, 0x36, 0x52, 0x45, 0x51, 0x55, 0x45, - 0x53, 0x54, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x5f, 0x43, - 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, - 0x45, 0x5f, 0x42, 0x55, 0x54, 0x5f, 0x44, 0x4f, 0x4e, 0x54, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, - 0x59, 0x10, 0x04, 0x12, 0x35, 0x0a, 0x31, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x41, - 0x4e, 0x44, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, - 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x41, 0x4e, - 0x44, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, - 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x42, 0x13, 0x0a, 0x11, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb0, 0x03, 0x0a, 0x1d, - 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, - 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x5d, 0x0a, - 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x3f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, - 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, - 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x50, 0x72, + 0x6f, 0x6e, 0x12, 0x78, 0x0a, 0x18, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x74, 0x6c, 0x73, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x16, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xcf, 0x02, 0x0a, + 0x16, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, + 0x68, 0x61, 0x69, 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, + 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, + 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, + 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, + 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0c, 0x63, 0x69, 0x70, 0x68, + 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x19, + 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x69, + 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, + 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0b, 0x61, 0x6c, 0x70, 0x6e, 0x5f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, + 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0a, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x1a, 0xfa, + 0x06, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, + 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, + 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, + 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, + 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, + 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, + 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0c, 0x63, 0x69, + 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0e, + 0x32, 0x19, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, + 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x69, 0x70, + 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x16, 0x74, 0x6c, 0x73, + 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x74, 0x6c, 0x73, 0x52, 0x65, + 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, + 0x93, 0x01, 0x0a, 0x1a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x55, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x18, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x1b, 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x76, 0x65, + 0x72, 0x68, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x5f, + 0x61, 0x65, 0x61, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x17, 0x6d, 0x61, 0x78, 0x4f, + 0x76, 0x65, 0x72, 0x68, 0x65, 0x61, 0x64, 0x4f, 0x66, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x41, + 0x65, 0x61, 0x64, 0x12, 0x39, 0x0a, 0x0b, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x52, 0x0a, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x9e, + 0x02, 0x0a, 0x18, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, + 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x23, 0x0a, 0x1f, + 0x44, 0x4f, 0x4e, 0x54, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, + 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x10, + 0x01, 0x12, 0x2e, 0x0a, 0x2a, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, + 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, + 0x42, 0x55, 0x54, 0x5f, 0x44, 0x4f, 0x4e, 0x54, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, + 0x02, 0x12, 0x29, 0x0a, 0x25, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, + 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, + 0x41, 0x4e, 0x44, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x03, 0x12, 0x3a, 0x0a, 0x36, + 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x52, 0x45, 0x51, 0x55, + 0x49, 0x52, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, + 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x42, 0x55, 0x54, 0x5f, 0x44, 0x4f, 0x4e, 0x54, 0x5f, + 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x04, 0x12, 0x35, 0x0a, 0x31, 0x52, 0x45, 0x51, 0x55, + 0x45, 0x53, 0x54, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x5f, + 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, + 0x54, 0x45, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x05, 0x4a, + 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x42, 0x13, 0x0a, 0x11, 0x74, + 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x22, 0xb0, 0x03, 0x0a, 0x1d, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, + 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x12, 0x5d, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, + 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x2e, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x51, 0x0a, 0x13, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x61, + 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, + 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x69, + 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, + 0x52, 0x12, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, + 0x69, 0x74, 0x68, 0x6d, 0x12, 0x1d, 0x0a, 0x09, 0x72, 0x61, 0x77, 0x5f, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x08, 0x72, 0x61, 0x77, 0x42, 0x79, + 0x74, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x64, 0x69, + 0x67, 0x65, 0x73, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x68, + 0x61, 0x32, 0x35, 0x36, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0d, 0x73, 0x68, + 0x61, 0x33, 0x38, 0x34, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x33, 0x38, 0x34, 0x44, 0x69, 0x67, 0x65, 0x73, + 0x74, 0x12, 0x25, 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x35, 0x31, 0x32, 0x5f, 0x64, 0x69, 0x67, 0x65, + 0x73, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x35, + 0x31, 0x32, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x22, 0x3d, 0x0a, 0x13, 0x50, 0x72, 0x69, 0x76, + 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x08, 0x0a, 0x04, 0x53, 0x49, 0x47, 0x4e, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, + 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x02, 0x42, 0x0a, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, + 0x74, 0x65, 0x73, 0x22, 0x3d, 0x0a, 0x1e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x51, 0x0a, 0x13, - 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, - 0x74, 0x68, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x73, 0x32, 0x61, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x52, 0x12, 0x73, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, - 0x1d, 0x0a, 0x09, 0x72, 0x61, 0x77, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0c, 0x48, 0x00, 0x52, 0x08, 0x72, 0x61, 0x77, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x25, - 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x44, - 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x33, 0x38, 0x34, 0x5f, - 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, - 0x73, 0x68, 0x61, 0x33, 0x38, 0x34, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0d, - 0x73, 0x68, 0x61, 0x35, 0x31, 0x32, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x35, 0x31, 0x32, 0x44, 0x69, 0x67, - 0x65, 0x73, 0x74, 0x22, 0x3d, 0x0a, 0x13, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, - 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, - 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x53, - 0x49, 0x47, 0x4e, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, - 0x10, 0x02, 0x42, 0x0a, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x22, 0x3d, - 0x0a, 0x1e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, - 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, - 0x12, 0x1b, 0x0a, 0x09, 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xe7, 0x01, - 0x0a, 0x20, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x71, 0x12, 0x63, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x45, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, - 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, - 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, - 0x65, 0x73, 0x22, 0x43, 0x0a, 0x16, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0f, 0x0a, 0x0b, - 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, - 0x07, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, - 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x02, 0x22, 0x40, 0x0a, 0x21, 0x4f, 0x66, 0x66, 0x6c, 0x6f, - 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, - 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xf8, 0x04, 0x0a, 0x1f, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x52, 0x0a, - 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x73, 0x32, + 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, + 0x65, 0x73, 0x22, 0xe7, 0x01, 0x0a, 0x20, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, + 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x63, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x45, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, + 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6d, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, + 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, + 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x43, 0x0a, 0x16, 0x52, 0x65, 0x73, 0x75, 0x6d, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x01, 0x12, + 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x02, 0x22, 0x40, 0x0a, 0x21, + 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x9d, + 0x06, 0x0a, 0x1f, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, + 0x65, 0x71, 0x12, 0x52, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x3e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x2e, + 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, + 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x5b, 0x0a, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x5f, 0x70, 0x65, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6d, 0x6f, 0x64, - 0x65, 0x12, 0x5b, 0x0a, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x65, 0x65, 0x72, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, - 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, - 0x69, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, - 0x48, 0x00, 0x52, 0x0a, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, 0x12, 0x5b, - 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, + 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x50, 0x65, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0a, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, + 0x65, 0x65, 0x72, 0x12, 0x5b, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x65, + 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, + 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x65, + 0x65, 0x72, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x65, 0x65, 0x72, + 0x1a, 0x39, 0x0a, 0x0a, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, 0x12, 0x2b, + 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, + 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x1a, 0xb5, 0x01, 0x0a, 0x0a, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x65, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x51, 0x0a, 0x25, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x75, + 0x6e, 0x72, 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x22, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x55, 0x6e, 0x72, 0x65, 0x73, + 0x74, 0x72, 0x69, 0x63, 0x74, 0x65, 0x64, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x22, 0xea, 0x01, 0x0a, 0x10, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x50, 0x49, + 0x46, 0x46, 0x45, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, + 0x5f, 0x54, 0x4f, 0x5f, 0x47, 0x4f, 0x4f, 0x47, 0x4c, 0x45, 0x10, 0x02, 0x12, 0x27, 0x0a, 0x23, + 0x52, 0x45, 0x53, 0x45, 0x52, 0x56, 0x45, 0x44, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, + 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x4f, 0x44, + 0x45, 0x5f, 0x33, 0x10, 0x03, 0x12, 0x27, 0x0a, 0x23, 0x52, 0x45, 0x53, 0x45, 0x52, 0x56, 0x45, + 0x44, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x43, + 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x34, 0x10, 0x04, 0x12, 0x27, + 0x0a, 0x23, 0x52, 0x45, 0x53, 0x45, 0x52, 0x56, 0x45, 0x44, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, + 0x4d, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, + 0x4f, 0x44, 0x45, 0x5f, 0x35, 0x10, 0x05, 0x12, 0x27, 0x0a, 0x23, 0x52, 0x45, 0x53, 0x45, 0x52, + 0x56, 0x45, 0x44, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, + 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x36, 0x10, 0x06, + 0x42, 0x0c, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xb2, + 0x02, 0x0a, 0x20, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, - 0x65, 0x71, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x65, 0x65, 0x72, 0x48, 0x00, 0x52, - 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x65, 0x65, 0x72, 0x1a, 0x39, 0x0a, 0x0a, 0x43, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0c, 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x1a, 0xb5, 0x01, 0x0a, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x50, 0x65, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, - 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, - 0x69, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x68, 0x6f, 0x73, - 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x51, 0x0a, 0x25, 0x73, - 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x75, 0x6e, 0x72, 0x65, 0x73, 0x74, - 0x72, 0x69, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x22, 0x73, 0x65, 0x72, 0x69, - 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x55, 0x6e, 0x72, 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, - 0x65, 0x64, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x46, - 0x0a, 0x10, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, - 0x64, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, - 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x50, 0x49, 0x46, 0x46, 0x45, 0x10, 0x01, 0x12, - 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x5f, 0x54, 0x4f, 0x5f, 0x47, 0x4f, - 0x4f, 0x47, 0x4c, 0x45, 0x10, 0x02, 0x42, 0x0c, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x6f, - 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xb2, 0x02, 0x0a, 0x20, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x6c, 0x0a, 0x11, 0x76, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, + 0x65, 0x73, 0x70, 0x12, 0x6c, 0x0a, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, + 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, + 0x10, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x12, 0x2d, 0x0a, 0x12, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, + 0x12, 0x32, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, + 0x2e, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x78, 0x74, 0x22, 0x3d, 0x0a, 0x10, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, + 0x43, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, + 0x45, 0x10, 0x02, 0x22, 0xa0, 0x05, 0x0a, 0x0a, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x12, 0x3d, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x12, 0x62, 0x0a, 0x19, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x52, 0x18, 0x61, 0x75, 0x74, + 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, + 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x12, 0x61, 0x0a, 0x19, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x6c, 0x73, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, + 0x65, 0x71, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x48, 0x00, + 0x52, 0x16, 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x77, 0x0a, 0x21, 0x6f, 0x66, 0x66, 0x6c, + 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, + 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, + 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, + 0x48, 0x00, 0x52, 0x1d, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, + 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, + 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, + 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, + 0x48, 0x00, 0x52, 0x20, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x12, 0x7d, 0x0a, 0x23, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2d, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, + 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, + 0x48, 0x00, 0x52, 0x1f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, - 0x52, 0x65, 0x73, 0x70, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x10, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x2d, 0x0a, 0x12, 0x76, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, - 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x32, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, - 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, - 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x3d, 0x0a, 0x10, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x0f, - 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, - 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, - 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x02, 0x22, 0x97, 0x05, 0x0a, 0x0a, 0x53, 0x65, - 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, - 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, - 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x79, 0x12, 0x62, 0x0a, 0x19, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x52, 0x18, - 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, - 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x12, 0x61, 0x0a, 0x19, 0x67, 0x65, 0x74, 0x5f, - 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x73, 0x32, - 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, - 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x71, 0x48, 0x00, 0x52, 0x16, 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x77, 0x0a, 0x21, 0x6f, - 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, - 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, + 0x52, 0x65, 0x71, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x71, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, + 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0xb4, 0x04, 0x0a, 0x0b, 0x53, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x12, 0x64, 0x0a, 0x1a, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x6c, 0x73, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, + 0x73, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, + 0x00, 0x52, 0x17, 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x7a, 0x0a, 0x22, 0x6f, 0x66, + 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, + 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1d, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, - 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x71, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, - 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, - 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x20, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, - 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x7d, 0x0a, 0x23, 0x76, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x65, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, - 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, - 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, - 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, - 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x71, 0x5f, 0x6f, 0x6e, - 0x65, 0x6f, 0x66, 0x22, 0xb4, 0x04, 0x0a, 0x0b, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x73, 0x70, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x12, 0x64, 0x0a, 0x1a, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x17, - 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x7a, 0x0a, 0x22, 0x6f, 0x66, 0x66, 0x6c, 0x6f, - 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, - 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, - 0x70, 0x48, 0x00, 0x52, 0x1e, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, - 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x73, 0x70, 0x12, 0x83, 0x01, 0x0a, 0x25, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, - 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x21, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, - 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x76, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, 0x65, - 0x73, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, - 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, - 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x20, 0x76, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x42, 0x0c, 0x0a, 0x0a, - 0x72, 0x65, 0x73, 0x70, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x2a, 0xa2, 0x03, 0x0a, 0x12, 0x53, - 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, - 0x6d, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, - 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, - 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, - 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, - 0x10, 0x01, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, + 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x1e, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, + 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x83, 0x01, 0x0a, 0x25, 0x6f, 0x66, 0x66, 0x6c, 0x6f, + 0x61, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, + 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, + 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x21, 0x6f, 0x66, 0x66, 0x6c, 0x6f, + 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x80, 0x01, 0x0a, + 0x24, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, + 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, + 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x20, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x42, + 0x0c, 0x0a, 0x0a, 0x72, 0x65, 0x73, 0x70, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x2a, 0xa2, 0x03, + 0x0a, 0x12, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, + 0x69, 0x74, 0x68, 0x6d, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, + 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, - 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, + 0x32, 0x35, 0x36, 0x10, 0x01, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, - 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x03, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, - 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, - 0x45, 0x43, 0x50, 0x32, 0x35, 0x36, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, - 0x04, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, - 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x33, 0x38, 0x34, 0x52, - 0x31, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x05, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, - 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, - 0x5f, 0x53, 0x45, 0x43, 0x50, 0x35, 0x32, 0x31, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, - 0x32, 0x10, 0x06, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, - 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, - 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x07, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, - 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, - 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x08, 0x12, - 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, - 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, - 0x35, 0x31, 0x32, 0x10, 0x09, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, - 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x44, 0x32, 0x35, 0x35, 0x31, 0x39, 0x10, 0x0a, 0x32, - 0x57, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x49, 0x0a, - 0x0c, 0x53, 0x65, 0x74, 0x55, 0x70, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x2e, - 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x1a, 0x19, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x73, 0x70, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, - 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, + 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, + 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x03, 0x12, 0x27, 0x0a, 0x23, 0x53, + 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, + 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x32, 0x35, 0x36, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, + 0x35, 0x36, 0x10, 0x04, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, + 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x33, + 0x38, 0x34, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x05, 0x12, 0x27, 0x0a, + 0x23, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, + 0x44, 0x53, 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x35, 0x32, 0x31, 0x52, 0x31, 0x5f, 0x53, 0x48, + 0x41, 0x35, 0x31, 0x32, 0x10, 0x06, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, + 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, + 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x07, 0x12, 0x24, 0x0a, 0x20, + 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, + 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, + 0x10, 0x08, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, + 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, + 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x09, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x32, 0x41, 0x5f, + 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x44, 0x32, 0x35, 0x35, 0x31, 0x39, + 0x10, 0x0a, 0x32, 0x57, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x12, 0x49, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x55, 0x70, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, + 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x1a, 0x19, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2158,7 +2182,7 @@ func file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP() []byte { var file_internal_proto_v2_s2a_s2a_proto_enumTypes = make([]protoimpl.EnumInfo, 6) var file_internal_proto_v2_s2a_s2a_proto_msgTypes = make([]protoimpl.MessageInfo, 17) -var file_internal_proto_v2_s2a_s2a_proto_goTypes = []interface{}{ +var file_internal_proto_v2_s2a_s2a_proto_goTypes = []any{ (SignatureAlgorithm)(0), // 0: s2a.proto.v2.SignatureAlgorithm (GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate)(0), // 1: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.RequestClientCertificate (OffloadPrivateKeyOperationReq_PrivateKeyOperation)(0), // 2: s2a.proto.v2.OffloadPrivateKeyOperationReq.PrivateKeyOperation @@ -2183,7 +2207,7 @@ var file_internal_proto_v2_s2a_s2a_proto_goTypes = []interface{}{ (*ValidatePeerCertificateChainReq_ClientPeer)(nil), // 21: s2a.proto.v2.ValidatePeerCertificateChainReq.ClientPeer (*ValidatePeerCertificateChainReq_ServerPeer)(nil), // 22: s2a.proto.v2.ValidatePeerCertificateChainReq.ServerPeer (common_go_proto.AlpnProtocol)(0), // 23: s2a.proto.v2.AlpnProtocol - (*common_go_proto1.Identity)(nil), // 24: s2a.proto.Identity + (*common_go_proto.Identity)(nil), // 24: s2a.proto.v2.Identity (common_go_proto.ConnectionSide)(0), // 25: s2a.proto.v2.ConnectionSide (*s2a_context_go_proto.S2AContext)(nil), // 26: s2a.proto.v2.S2AContext (common_go_proto.TLSVersion)(0), // 27: s2a.proto.v2.TLSVersion @@ -2191,7 +2215,7 @@ var file_internal_proto_v2_s2a_s2a_proto_goTypes = []interface{}{ } var file_internal_proto_v2_s2a_s2a_proto_depIdxs = []int32{ 23, // 0: s2a.proto.v2.AlpnPolicy.alpn_protocols:type_name -> s2a.proto.v2.AlpnProtocol - 24, // 1: s2a.proto.v2.AuthenticationMechanism.identity:type_name -> s2a.proto.Identity + 24, // 1: s2a.proto.v2.AuthenticationMechanism.identity:type_name -> s2a.proto.v2.Identity 25, // 2: s2a.proto.v2.GetTlsConfigurationReq.connection_side:type_name -> s2a.proto.v2.ConnectionSide 19, // 3: s2a.proto.v2.GetTlsConfigurationResp.client_tls_configuration:type_name -> s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration 20, // 4: s2a.proto.v2.GetTlsConfigurationResp.server_tls_configuration:type_name -> s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration @@ -2203,7 +2227,7 @@ var file_internal_proto_v2_s2a_s2a_proto_depIdxs = []int32{ 22, // 10: s2a.proto.v2.ValidatePeerCertificateChainReq.server_peer:type_name -> s2a.proto.v2.ValidatePeerCertificateChainReq.ServerPeer 5, // 11: s2a.proto.v2.ValidatePeerCertificateChainResp.validation_result:type_name -> s2a.proto.v2.ValidatePeerCertificateChainResp.ValidationResult 26, // 12: s2a.proto.v2.ValidatePeerCertificateChainResp.context:type_name -> s2a.proto.v2.S2AContext - 24, // 13: s2a.proto.v2.SessionReq.local_identity:type_name -> s2a.proto.Identity + 24, // 13: s2a.proto.v2.SessionReq.local_identity:type_name -> s2a.proto.v2.Identity 7, // 14: s2a.proto.v2.SessionReq.authentication_mechanisms:type_name -> s2a.proto.v2.AuthenticationMechanism 9, // 15: s2a.proto.v2.SessionReq.get_tls_configuration_req:type_name -> s2a.proto.v2.GetTlsConfigurationReq 11, // 16: s2a.proto.v2.SessionReq.offload_private_key_operation_req:type_name -> s2a.proto.v2.OffloadPrivateKeyOperationReq @@ -2238,7 +2262,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_internal_proto_v2_s2a_s2a_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*AlpnPolicy); i { case 0: return &v.state @@ -2250,7 +2274,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*AuthenticationMechanism); i { case 0: return &v.state @@ -2262,7 +2286,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*Status); i { case 0: return &v.state @@ -2274,7 +2298,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*GetTlsConfigurationReq); i { case 0: return &v.state @@ -2286,7 +2310,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*GetTlsConfigurationResp); i { case 0: return &v.state @@ -2298,7 +2322,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*OffloadPrivateKeyOperationReq); i { case 0: return &v.state @@ -2310,7 +2334,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*OffloadPrivateKeyOperationResp); i { case 0: return &v.state @@ -2322,7 +2346,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*OffloadResumptionKeyOperationReq); i { case 0: return &v.state @@ -2334,7 +2358,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*OffloadResumptionKeyOperationResp); i { case 0: return &v.state @@ -2346,7 +2370,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*ValidatePeerCertificateChainReq); i { case 0: return &v.state @@ -2358,7 +2382,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*ValidatePeerCertificateChainResp); i { case 0: return &v.state @@ -2370,7 +2394,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*SessionReq); i { case 0: return &v.state @@ -2382,7 +2406,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*SessionResp); i { case 0: return &v.state @@ -2394,7 +2418,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[13].Exporter = func(v any, i int) any { switch v := v.(*GetTlsConfigurationResp_ClientTlsConfiguration); i { case 0: return &v.state @@ -2406,7 +2430,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[14].Exporter = func(v any, i int) any { switch v := v.(*GetTlsConfigurationResp_ServerTlsConfiguration); i { case 0: return &v.state @@ -2418,7 +2442,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[15].Exporter = func(v any, i int) any { switch v := v.(*ValidatePeerCertificateChainReq_ClientPeer); i { case 0: return &v.state @@ -2430,7 +2454,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[16].Exporter = func(v any, i int) any { switch v := v.(*ValidatePeerCertificateChainReq_ServerPeer); i { case 0: return &v.state @@ -2443,30 +2467,30 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { } } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[1].OneofWrappers = []interface{}{ + file_internal_proto_v2_s2a_s2a_proto_msgTypes[1].OneofWrappers = []any{ (*AuthenticationMechanism_Token)(nil), } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[4].OneofWrappers = []interface{}{ + file_internal_proto_v2_s2a_s2a_proto_msgTypes[4].OneofWrappers = []any{ (*GetTlsConfigurationResp_ClientTlsConfiguration_)(nil), (*GetTlsConfigurationResp_ServerTlsConfiguration_)(nil), } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[5].OneofWrappers = []interface{}{ + file_internal_proto_v2_s2a_s2a_proto_msgTypes[5].OneofWrappers = []any{ (*OffloadPrivateKeyOperationReq_RawBytes)(nil), (*OffloadPrivateKeyOperationReq_Sha256Digest)(nil), (*OffloadPrivateKeyOperationReq_Sha384Digest)(nil), (*OffloadPrivateKeyOperationReq_Sha512Digest)(nil), } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[9].OneofWrappers = []interface{}{ + file_internal_proto_v2_s2a_s2a_proto_msgTypes[9].OneofWrappers = []any{ (*ValidatePeerCertificateChainReq_ClientPeer_)(nil), (*ValidatePeerCertificateChainReq_ServerPeer_)(nil), } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[11].OneofWrappers = []interface{}{ + file_internal_proto_v2_s2a_s2a_proto_msgTypes[11].OneofWrappers = []any{ (*SessionReq_GetTlsConfigurationReq)(nil), (*SessionReq_OffloadPrivateKeyOperationReq)(nil), (*SessionReq_OffloadResumptionKeyOperationReq)(nil), (*SessionReq_ValidatePeerCertificateChainReq)(nil), } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[12].OneofWrappers = []interface{}{ + file_internal_proto_v2_s2a_s2a_proto_msgTypes[12].OneofWrappers = []any{ (*SessionResp_GetTlsConfigurationResp)(nil), (*SessionResp_OffloadPrivateKeyOperationResp)(nil), (*SessionResp_OffloadResumptionKeyOperationResp)(nil), diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go index 2566df6c304..c93f75a78b0 100644 --- a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go +++ b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 +// - protoc-gen-go-grpc v1.4.0 // - protoc v3.21.12 // source: internal/proto/v2/s2a/s2a.proto @@ -29,8 +29,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 +// Requires gRPC-Go v1.62.0 or later. +const _ = grpc.SupportPackageIsVersion8 const ( S2AService_SetUpSession_FullMethodName = "/s2a.proto.v2.S2AService/SetUpSession" @@ -54,11 +54,12 @@ func NewS2AServiceClient(cc grpc.ClientConnInterface) S2AServiceClient { } func (c *s2AServiceClient) SetUpSession(ctx context.Context, opts ...grpc.CallOption) (S2AService_SetUpSessionClient, error) { - stream, err := c.cc.NewStream(ctx, &S2AService_ServiceDesc.Streams[0], S2AService_SetUpSession_FullMethodName, opts...) + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &S2AService_ServiceDesc.Streams[0], S2AService_SetUpSession_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &s2AServiceSetUpSessionClient{stream} + x := &s2AServiceSetUpSessionClient{ClientStream: stream} return x, nil } @@ -115,7 +116,7 @@ func RegisterS2AServiceServer(s grpc.ServiceRegistrar, srv S2AServiceServer) { } func _S2AService_SetUpSession_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(S2AServiceServer).SetUpSession(&s2AServiceSetUpSessionServer{stream}) + return srv.(S2AServiceServer).SetUpSession(&s2AServiceSetUpSessionServer{ServerStream: stream}) } type S2AService_SetUpSessionServer interface { diff --git a/vendor/github.com/google/s2a-go/internal/record/record.go b/vendor/github.com/google/s2a-go/internal/record/record.go index c60515510a7..e76509ef01a 100644 --- a/vendor/github.com/google/s2a-go/internal/record/record.go +++ b/vendor/github.com/google/s2a-go/internal/record/record.go @@ -378,11 +378,6 @@ func (p *conn) Read(b []byte) (n int, err error) { if len(p.handshakeBuf) > 0 { return 0, errors.New("application data received while processing fragmented handshake messages") } - if p.ticketState == receivingTickets { - p.ticketState = notReceivingTickets - grpclog.Infof("Sending session tickets to S2A.") - p.ticketSender.sendTicketsToS2A(p.sessionTickets, p.callComplete) - } case alert: return 0, p.handleAlertMessage() case handshake: @@ -500,17 +495,7 @@ func (p *conn) buildRecord(plaintext []byte, recordType byte, recordStartIndex i } func (p *conn) Close() error { - p.readMutex.Lock() - defer p.readMutex.Unlock() - p.writeMutex.Lock() - defer p.writeMutex.Unlock() - // If p.ticketState is equal to notReceivingTickets, then S2A has - // been sent a flight of session tickets, and we must wait for the - // call to S2A to complete before closing the record protocol. - if p.ticketState == notReceivingTickets { - <-p.callComplete - grpclog.Infof("Safe to close the connection because sending tickets to S2A is (already) complete.") - } + // Close the connection immediately. return p.Conn.Close() } @@ -663,7 +648,7 @@ func (p *conn) handleHandshakeMessage() error { // Several handshake messages may be coalesced into a single record. // Continue reading them until the handshake buffer is empty. for len(p.handshakeBuf) > 0 { - handshakeMsgType, msgLen, msg, rawMsg, ok := p.parseHandshakeMsg() + handshakeMsgType, msgLen, msg, _, ok := p.parseHandshakeMsg() if !ok { // The handshake could not be fully parsed, so read in another // record and try again later. @@ -681,20 +666,7 @@ func (p *conn) handleHandshakeMessage() error { return err } case tlsHandshakeNewSessionTicketType: - // Ignore tickets that are received after a batch of tickets has - // been sent to S2A. - if p.ticketState == notReceivingTickets { - continue - } - if p.ticketState == ticketsNotYetReceived { - p.ticketState = receivingTickets - } - p.sessionTickets = append(p.sessionTickets, rawMsg) - if len(p.sessionTickets) == maxAllowedTickets { - p.ticketState = notReceivingTickets - grpclog.Infof("Sending session tickets to S2A.") - p.ticketSender.sendTicketsToS2A(p.sessionTickets, p.callComplete) - } + // Do nothing for session ticket. default: return errors.New("unknown handshake message type") } diff --git a/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go b/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go index ec96ba3b6a6..4057e70c8ad 100644 --- a/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go +++ b/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go @@ -23,7 +23,8 @@ import ( "fmt" "os" - commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" + commonpbv1 "github.com/google/s2a-go/internal/proto/common_go_proto" + commonpb "github.com/google/s2a-go/internal/proto/v2/common_go_proto" ) const ( @@ -37,7 +38,7 @@ type AccessTokenManager interface { DefaultToken() (token string, err error) // Token returns a token that an application with local identity equal to // identity must use to authenticate to S2A. - Token(identity *commonpb.Identity) (token string, err error) + Token(identity interface{}) (token string, err error) } type singleTokenAccessTokenManager struct { @@ -65,6 +66,14 @@ func (m *singleTokenAccessTokenManager) DefaultToken() (string, error) { } // Token always returns the token managed by the singleTokenAccessTokenManager. -func (m *singleTokenAccessTokenManager) Token(*commonpb.Identity) (string, error) { +func (m *singleTokenAccessTokenManager) Token(identity interface{}) (string, error) { + switch v := identity.(type) { + case *commonpbv1.Identity: + // valid type. + case *commonpb.Identity: + // valid type. + default: + return "", fmt.Errorf("Incorrect identity type: %v", v) + } return m.token, nil } diff --git a/vendor/github.com/google/s2a-go/internal/v2/s2av2.go b/vendor/github.com/google/s2a-go/internal/v2/s2av2.go index 85a8379d833..0cc78547e9f 100644 --- a/vendor/github.com/google/s2a-go/internal/v2/s2av2.go +++ b/vendor/github.com/google/s2a-go/internal/v2/s2av2.go @@ -28,7 +28,6 @@ import ( "os" "time" - "github.com/golang/protobuf/proto" "github.com/google/s2a-go/fallback" "github.com/google/s2a-go/internal/handshaker/service" "github.com/google/s2a-go/internal/tokenmanager" @@ -38,8 +37,9 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" + "google.golang.org/protobuf/proto" - commonpbv1 "github.com/google/s2a-go/internal/proto/common_go_proto" + commonpb "github.com/google/s2a-go/internal/proto/v2/common_go_proto" s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" ) @@ -59,18 +59,18 @@ type s2av2TransportCreds struct { transportCreds credentials.TransportCredentials tokenManager *tokenmanager.AccessTokenManager // localIdentity should only be used by the client. - localIdentity *commonpbv1.Identity + localIdentity *commonpb.Identity // localIdentities should only be used by the server. - localIdentities []*commonpbv1.Identity + localIdentities []*commonpb.Identity verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode fallbackClientHandshake fallback.ClientHandshake - getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error) + getS2AStream stream.GetS2AStream serverAuthorizationPolicy []byte } // NewClientCreds returns a client-side transport credentials object that uses // the S2Av2 to establish a secure connection with a server. -func NewClientCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentity *commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, fallbackClientHandshakeFunc fallback.ClientHandshake, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error), serverAuthorizationPolicy []byte) (credentials.TransportCredentials, error) { +func NewClientCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentity *commonpb.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, fallbackClientHandshakeFunc fallback.ClientHandshake, getS2AStream stream.GetS2AStream, serverAuthorizationPolicy []byte) (credentials.TransportCredentials, error) { // Create an AccessTokenManager instance to use to authenticate to S2Av2. accessTokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() @@ -101,7 +101,7 @@ func NewClientCreds(s2av2Address string, transportCreds credentials.TransportCre // NewServerCreds returns a server-side transport credentials object that uses // the S2Av2 to establish a secure connection with a client. -func NewServerCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentities []*commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)) (credentials.TransportCredentials, error) { +func NewServerCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentities []*commonpb.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, getS2AStream stream.GetS2AStream) (credentials.TransportCredentials, error) { // Create an AccessTokenManager instance to use to authenticate to S2Av2. accessTokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() creds := &s2av2TransportCreds{ @@ -183,13 +183,7 @@ func (c *s2av2TransportCreds) ClientHandshake(ctx context.Context, serverAuthori } creds := credentials.NewTLS(config) - var conn net.Conn - var authInfo credentials.AuthInfo - retry.Run(timeoutCtx, - func() error { - conn, authInfo, err = creds.ClientHandshake(timeoutCtx, serverName, rawConn) - return err - }) + conn, authInfo, err := creds.ClientHandshake(timeoutCtx, serverName, rawConn) if err != nil { grpclog.Infof("Failed to do client handshake using S2Av2: %v", err) if c.fallbackClientHandshake != nil { @@ -197,7 +191,7 @@ func (c *s2av2TransportCreds) ClientHandshake(ctx context.Context, serverAuthori } return nil, nil, err } - grpclog.Infof("Successfully done client handshake using S2Av2 to: %s", serverName) + grpclog.Infof("client-side handshake is done using S2Av2 to: %s", serverName) return conn, authInfo, err } @@ -247,13 +241,7 @@ func (c *s2av2TransportCreds) ServerHandshake(rawConn net.Conn) (net.Conn, crede } creds := credentials.NewTLS(config) - var conn net.Conn - var authInfo credentials.AuthInfo - retry.Run(ctx, - func() error { - conn, authInfo, err = creds.ServerHandshake(rawConn) - return err - }) + conn, authInfo, err := creds.ServerHandshake(rawConn) if err != nil { grpclog.Infof("Failed to do server handshake using S2Av2: %v", err) return nil, nil, err @@ -280,15 +268,15 @@ func (c *s2av2TransportCreds) Clone() credentials.TransportCredentials { tokenManager = *c.tokenManager } verificationMode := c.verificationMode - var localIdentity *commonpbv1.Identity + var localIdentity *commonpb.Identity if c.localIdentity != nil { - localIdentity = proto.Clone(c.localIdentity).(*commonpbv1.Identity) + localIdentity = proto.Clone(c.localIdentity).(*commonpb.Identity) } - var localIdentities []*commonpbv1.Identity + var localIdentities []*commonpb.Identity if c.localIdentities != nil { - localIdentities = make([]*commonpbv1.Identity, len(c.localIdentities)) + localIdentities = make([]*commonpb.Identity, len(c.localIdentities)) for i, localIdentity := range c.localIdentities { - localIdentities[i] = proto.Clone(localIdentity).(*commonpbv1.Identity) + localIdentities[i] = proto.Clone(localIdentity).(*commonpb.Identity) } } creds := &s2av2TransportCreds{ @@ -318,8 +306,9 @@ func NewClientTLSConfig( tokenManager tokenmanager.AccessTokenManager, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, serverName string, - serverAuthorizationPolicy []byte) (*tls.Config, error) { - s2AStream, err := createStream(ctx, s2av2Address, transportCreds, nil) + serverAuthorizationPolicy []byte, + getStream stream.GetS2AStream) (*tls.Config, error) { + s2AStream, err := createStream(ctx, s2av2Address, transportCreds, getStream) if err != nil { grpclog.Infof("Failed to connect to S2Av2: %v", err) return nil, err @@ -362,7 +351,7 @@ func (x s2AGrpcStream) CloseSend() error { return x.stream.CloseSend() } -func createStream(ctx context.Context, s2av2Address string, transportCreds credentials.TransportCredentials, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)) (stream.S2AStream, error) { +func createStream(ctx context.Context, s2av2Address string, transportCreds credentials.TransportCredentials, getS2AStream stream.GetS2AStream) (stream.S2AStream, error) { if getS2AStream != nil { return getS2AStream(ctx, s2av2Address) } diff --git a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go index 4d919132295..6ca75f56080 100644 --- a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go +++ b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go @@ -33,7 +33,6 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" - commonpbv1 "github.com/google/s2a-go/internal/proto/common_go_proto" commonpb "github.com/google/s2a-go/internal/proto/v2/common_go_proto" s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" ) @@ -44,8 +43,8 @@ const ( ) // GetTLSConfigurationForClient returns a tls.Config instance for use by a client application. -func GetTLSConfigurationForClient(serverHostname string, s2AStream stream.S2AStream, tokenManager tokenmanager.AccessTokenManager, localIdentity *commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, serverAuthorizationPolicy []byte) (*tls.Config, error) { - authMechanisms := getAuthMechanisms(tokenManager, []*commonpbv1.Identity{localIdentity}) +func GetTLSConfigurationForClient(serverHostname string, s2AStream stream.S2AStream, tokenManager tokenmanager.AccessTokenManager, localIdentity *commonpb.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, serverAuthorizationPolicy []byte) (*tls.Config, error) { + authMechanisms := getAuthMechanisms(tokenManager, []*commonpb.Identity{localIdentity}) if grpclog.V(1) { grpclog.Infof("Sending request to S2Av2 for client TLS config.") @@ -76,7 +75,7 @@ func GetTLSConfigurationForClient(serverHostname string, s2AStream stream.S2AStr return nil, fmt.Errorf("failed to get TLS configuration from S2A: %d, %v", resp.GetStatus().Code, resp.GetStatus().Details) } - // Extract TLS configiguration from SessionResp. + // Extract TLS configuration from SessionResp. tlsConfig := resp.GetGetTlsConfigurationResp().GetClientTlsConfiguration() var cert tls.Certificate @@ -126,7 +125,7 @@ func GetTLSConfigurationForClient(serverHostname string, s2AStream stream.S2AStr } // GetTLSConfigurationForServer returns a tls.Config instance for use by a server application. -func GetTLSConfigurationForServer(s2AStream stream.S2AStream, tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode) (*tls.Config, error) { +func GetTLSConfigurationForServer(s2AStream stream.S2AStream, tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpb.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode) (*tls.Config, error) { return &tls.Config{ GetConfigForClient: ClientConfig(tokenManager, localIdentities, verificationMode, s2AStream), }, nil @@ -136,7 +135,7 @@ func GetTLSConfigurationForServer(s2AStream stream.S2AStream, tokenManager token // connection with a client, based on SNI communicated during ClientHello. // Ensures that server presents the correct certificate to establish a TLS // connection. -func ClientConfig(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, s2AStream stream.S2AStream) func(chi *tls.ClientHelloInfo) (*tls.Config, error) { +func ClientConfig(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpb.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, s2AStream stream.S2AStream) func(chi *tls.ClientHelloInfo) (*tls.Config, error) { return func(chi *tls.ClientHelloInfo) (*tls.Config, error) { tlsConfig, err := getServerConfigFromS2Av2(tokenManager, localIdentities, chi.ServerName, s2AStream) if err != nil { @@ -219,9 +218,9 @@ func getTLSCipherSuite(tlsCipherSuite commonpb.Ciphersuite) uint16 { } } -func getServerConfigFromS2Av2(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity, sni string, s2AStream stream.S2AStream) (*s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration, error) { +func getServerConfigFromS2Av2(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpb.Identity, sni string, s2AStream stream.S2AStream) (*s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration, error) { authMechanisms := getAuthMechanisms(tokenManager, localIdentities) - var locID *commonpbv1.Identity + var locID *commonpb.Identity if localIdentities != nil { locID = localIdentities[0] } @@ -283,7 +282,7 @@ func getTLSClientAuthType(tlsConfig *s2av2pb.GetTlsConfigurationResp_ServerTlsCo return clientAuth } -func getAuthMechanisms(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity) []*s2av2pb.AuthenticationMechanism { +func getAuthMechanisms(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpb.Identity) []*s2av2pb.AuthenticationMechanism { if tokenManager == nil { return nil } diff --git a/vendor/github.com/google/s2a-go/s2a.go b/vendor/github.com/google/s2a-go/s2a.go index 5ecb06f930e..c52fccddf8c 100644 --- a/vendor/github.com/google/s2a-go/s2a.go +++ b/vendor/github.com/google/s2a-go/s2a.go @@ -29,17 +29,19 @@ import ( "sync" "time" - "github.com/golang/protobuf/proto" "github.com/google/s2a-go/fallback" "github.com/google/s2a-go/internal/handshaker" "github.com/google/s2a-go/internal/handshaker/service" "github.com/google/s2a-go/internal/tokenmanager" "github.com/google/s2a-go/internal/v2" "github.com/google/s2a-go/retry" + "github.com/google/s2a-go/stream" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" + "google.golang.org/protobuf/proto" - commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" + commonpbv1 "github.com/google/s2a-go/internal/proto/common_go_proto" + commonpb "github.com/google/s2a-go/internal/proto/v2/common_go_proto" s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" ) @@ -54,17 +56,17 @@ const ( // credentials.TransportCredentials interface. type s2aTransportCreds struct { info *credentials.ProtocolInfo - minTLSVersion commonpb.TLSVersion - maxTLSVersion commonpb.TLSVersion + minTLSVersion commonpbv1.TLSVersion + maxTLSVersion commonpbv1.TLSVersion // tlsCiphersuites contains the ciphersuites used in the S2A connection. // Note that these are currently unconfigurable. - tlsCiphersuites []commonpb.Ciphersuite + tlsCiphersuites []commonpbv1.Ciphersuite // localIdentity should only be used by the client. - localIdentity *commonpb.Identity + localIdentity *commonpbv1.Identity // localIdentities should only be used by the server. - localIdentities []*commonpb.Identity + localIdentities []*commonpbv1.Identity // targetIdentities should only be used by the client. - targetIdentities []*commonpb.Identity + targetIdentities []*commonpbv1.Identity isClient bool s2aAddr string ensureProcessSessionTickets *sync.WaitGroup @@ -76,7 +78,7 @@ func NewClientCreds(opts *ClientOptions) (credentials.TransportCredentials, erro if opts == nil { return nil, errors.New("nil client options") } - var targetIdentities []*commonpb.Identity + var targetIdentities []*commonpbv1.Identity for _, targetIdentity := range opts.TargetIdentities { protoTargetIdentity, err := toProtoIdentity(targetIdentity) if err != nil { @@ -93,12 +95,12 @@ func NewClientCreds(opts *ClientOptions) (credentials.TransportCredentials, erro info: &credentials.ProtocolInfo{ SecurityProtocol: s2aSecurityProtocol, }, - minTLSVersion: commonpb.TLSVersion_TLS1_3, - maxTLSVersion: commonpb.TLSVersion_TLS1_3, - tlsCiphersuites: []commonpb.Ciphersuite{ - commonpb.Ciphersuite_AES_128_GCM_SHA256, - commonpb.Ciphersuite_AES_256_GCM_SHA384, - commonpb.Ciphersuite_CHACHA20_POLY1305_SHA256, + minTLSVersion: commonpbv1.TLSVersion_TLS1_3, + maxTLSVersion: commonpbv1.TLSVersion_TLS1_3, + tlsCiphersuites: []commonpbv1.Ciphersuite{ + commonpbv1.Ciphersuite_AES_128_GCM_SHA256, + commonpbv1.Ciphersuite_AES_256_GCM_SHA384, + commonpbv1.Ciphersuite_CHACHA20_POLY1305_SHA256, }, localIdentity: localIdentity, targetIdentities: targetIdentities, @@ -112,7 +114,11 @@ func NewClientCreds(opts *ClientOptions) (credentials.TransportCredentials, erro if opts.FallbackOpts != nil && opts.FallbackOpts.FallbackClientHandshakeFunc != nil { fallbackFunc = opts.FallbackOpts.FallbackClientHandshakeFunc } - return v2.NewClientCreds(opts.S2AAddress, opts.TransportCreds, localIdentity, verificationMode, fallbackFunc, opts.getS2AStream, opts.serverAuthorizationPolicy) + v2LocalIdentity, err := toV2ProtoIdentity(opts.LocalIdentity) + if err != nil { + return nil, err + } + return v2.NewClientCreds(opts.S2AAddress, opts.TransportCreds, v2LocalIdentity, verificationMode, fallbackFunc, opts.getS2AStream, opts.serverAuthorizationPolicy) } // NewServerCreds returns a server-side transport credentials object that uses @@ -121,7 +127,7 @@ func NewServerCreds(opts *ServerOptions) (credentials.TransportCredentials, erro if opts == nil { return nil, errors.New("nil server options") } - var localIdentities []*commonpb.Identity + var localIdentities []*commonpbv1.Identity for _, localIdentity := range opts.LocalIdentities { protoLocalIdentity, err := toProtoIdentity(localIdentity) if err != nil { @@ -134,12 +140,12 @@ func NewServerCreds(opts *ServerOptions) (credentials.TransportCredentials, erro info: &credentials.ProtocolInfo{ SecurityProtocol: s2aSecurityProtocol, }, - minTLSVersion: commonpb.TLSVersion_TLS1_3, - maxTLSVersion: commonpb.TLSVersion_TLS1_3, - tlsCiphersuites: []commonpb.Ciphersuite{ - commonpb.Ciphersuite_AES_128_GCM_SHA256, - commonpb.Ciphersuite_AES_256_GCM_SHA384, - commonpb.Ciphersuite_CHACHA20_POLY1305_SHA256, + minTLSVersion: commonpbv1.TLSVersion_TLS1_3, + maxTLSVersion: commonpbv1.TLSVersion_TLS1_3, + tlsCiphersuites: []commonpbv1.Ciphersuite{ + commonpbv1.Ciphersuite_AES_128_GCM_SHA256, + commonpbv1.Ciphersuite_AES_256_GCM_SHA384, + commonpbv1.Ciphersuite_CHACHA20_POLY1305_SHA256, }, localIdentities: localIdentities, isClient: false, @@ -147,7 +153,15 @@ func NewServerCreds(opts *ServerOptions) (credentials.TransportCredentials, erro }, nil } verificationMode := getVerificationMode(opts.VerificationMode) - return v2.NewServerCreds(opts.S2AAddress, opts.TransportCreds, localIdentities, verificationMode, opts.getS2AStream) + var v2LocalIdentities []*commonpb.Identity + for _, localIdentity := range opts.LocalIdentities { + protoLocalIdentity, err := toV2ProtoIdentity(localIdentity) + if err != nil { + return nil, err + } + v2LocalIdentities = append(v2LocalIdentities, protoLocalIdentity) + } + return v2.NewServerCreds(opts.S2AAddress, opts.TransportCreds, v2LocalIdentities, verificationMode, opts.getS2AStream) } // ClientHandshake initiates a client-side TLS handshake using the S2A. @@ -248,22 +262,22 @@ func (c *s2aTransportCreds) Info() credentials.ProtocolInfo { func (c *s2aTransportCreds) Clone() credentials.TransportCredentials { info := *c.info - var localIdentity *commonpb.Identity + var localIdentity *commonpbv1.Identity if c.localIdentity != nil { - localIdentity = proto.Clone(c.localIdentity).(*commonpb.Identity) + localIdentity = proto.Clone(c.localIdentity).(*commonpbv1.Identity) } - var localIdentities []*commonpb.Identity + var localIdentities []*commonpbv1.Identity if c.localIdentities != nil { - localIdentities = make([]*commonpb.Identity, len(c.localIdentities)) + localIdentities = make([]*commonpbv1.Identity, len(c.localIdentities)) for i, localIdentity := range c.localIdentities { - localIdentities[i] = proto.Clone(localIdentity).(*commonpb.Identity) + localIdentities[i] = proto.Clone(localIdentity).(*commonpbv1.Identity) } } - var targetIdentities []*commonpb.Identity + var targetIdentities []*commonpbv1.Identity if c.targetIdentities != nil { - targetIdentities = make([]*commonpb.Identity, len(c.targetIdentities)) + targetIdentities = make([]*commonpbv1.Identity, len(c.targetIdentities)) for i, targetIdentity := range c.targetIdentities { - targetIdentities[i] = proto.Clone(targetIdentity).(*commonpb.Identity) + targetIdentities[i] = proto.Clone(targetIdentity).(*commonpbv1.Identity) } } return &s2aTransportCreds{ @@ -317,6 +331,7 @@ func NewTLSClientConfigFactory(opts *ClientOptions) (TLSClientConfigFactory, err tokenManager: nil, verificationMode: getVerificationMode(opts.VerificationMode), serverAuthorizationPolicy: opts.serverAuthorizationPolicy, + getStream: opts.getS2AStream, }, nil } return &s2aTLSClientConfigFactory{ @@ -325,6 +340,7 @@ func NewTLSClientConfigFactory(opts *ClientOptions) (TLSClientConfigFactory, err tokenManager: tokenManager, verificationMode: getVerificationMode(opts.VerificationMode), serverAuthorizationPolicy: opts.serverAuthorizationPolicy, + getStream: opts.getS2AStream, }, nil } @@ -334,6 +350,7 @@ type s2aTLSClientConfigFactory struct { tokenManager tokenmanager.AccessTokenManager verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode serverAuthorizationPolicy []byte + getStream stream.GetS2AStream } func (f *s2aTLSClientConfigFactory) Build( @@ -342,7 +359,7 @@ func (f *s2aTLSClientConfigFactory) Build( if opts != nil && opts.ServerName != "" { serverName = opts.ServerName } - return v2.NewClientTLSConfig(ctx, f.s2av2Address, f.transportCreds, f.tokenManager, f.verificationMode, serverName, f.serverAuthorizationPolicy) + return v2.NewClientTLSConfig(ctx, f.s2av2Address, f.transportCreds, f.tokenManager, f.verificationMode, serverName, f.serverAuthorizationPolicy, f.getStream) } func getVerificationMode(verificationMode VerificationModeType) s2av2pb.ValidatePeerCertificateChainReq_VerificationMode { @@ -351,6 +368,14 @@ func getVerificationMode(verificationMode VerificationModeType) s2av2pb.Validate return s2av2pb.ValidatePeerCertificateChainReq_CONNECT_TO_GOOGLE case Spiffe: return s2av2pb.ValidatePeerCertificateChainReq_SPIFFE + case ReservedCustomVerificationMode3: + return s2av2pb.ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_3 + case ReservedCustomVerificationMode4: + return s2av2pb.ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_4 + case ReservedCustomVerificationMode5: + return s2av2pb.ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_5 + case ReservedCustomVerificationMode6: + return s2av2pb.ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_6 default: return s2av2pb.ValidatePeerCertificateChainReq_UNSPECIFIED } @@ -396,24 +421,20 @@ func NewS2ADialTLSContextFunc(opts *ClientOptions) func(ctx context.Context, net defer cancel() var s2aTLSConfig *tls.Config + var c net.Conn retry.Run(timeoutCtx, func() error { s2aTLSConfig, err = factory.Build(timeoutCtx, &TLSClientConfigOptions{ ServerName: serverName, }) - return err - }) - if err != nil { - grpclog.Infof("error building S2A TLS config: %v", err) - return fallback(err) - } + if err != nil { + grpclog.Infof("error building S2A TLS config: %v", err) + return err + } - s2aDialer := &tls.Dialer{ - Config: s2aTLSConfig, - } - var c net.Conn - retry.Run(timeoutCtx, - func() error { + s2aDialer := &tls.Dialer{ + Config: s2aTLSConfig, + } c, err = s2aDialer.DialContext(timeoutCtx, network, addr) return err }) diff --git a/vendor/github.com/google/s2a-go/s2a_options.go b/vendor/github.com/google/s2a-go/s2a_options.go index fcdbc1621bd..b7a277f9e3f 100644 --- a/vendor/github.com/google/s2a-go/s2a_options.go +++ b/vendor/github.com/google/s2a-go/s2a_options.go @@ -19,7 +19,6 @@ package s2a import ( - "context" "crypto/tls" "errors" "sync" @@ -28,13 +27,25 @@ import ( "github.com/google/s2a-go/stream" "google.golang.org/grpc/credentials" - s2apb "github.com/google/s2a-go/internal/proto/common_go_proto" + s2av1pb "github.com/google/s2a-go/internal/proto/common_go_proto" + s2apb "github.com/google/s2a-go/internal/proto/v2/common_go_proto" ) // Identity is the interface for S2A identities. type Identity interface { // Name returns the name of the identity. Name() string + Attributes() map[string]string +} + +type UnspecifiedID struct { + Attr map[string]string +} + +func (u *UnspecifiedID) Name() string { return "" } + +func (u *UnspecifiedID) Attributes() map[string]string { + return u.Attr } type spiffeID struct { @@ -43,10 +54,10 @@ type spiffeID struct { func (s *spiffeID) Name() string { return s.spiffeID } +func (spiffeID) Attributes() map[string]string { return nil } + // NewSpiffeID creates a SPIFFE ID from id. -func NewSpiffeID(id string) Identity { - return &spiffeID{spiffeID: id} -} +func NewSpiffeID(id string) Identity { return &spiffeID{spiffeID: id} } type hostname struct { hostname string @@ -54,10 +65,10 @@ type hostname struct { func (h *hostname) Name() string { return h.hostname } +func (hostname) Attributes() map[string]string { return nil } + // NewHostname creates a hostname from name. -func NewHostname(name string) Identity { - return &hostname{hostname: name} -} +func NewHostname(name string) Identity { return &hostname{hostname: name} } type uid struct { uid string @@ -65,10 +76,10 @@ type uid struct { func (h *uid) Name() string { return h.uid } +func (uid) Attributes() map[string]string { return nil } + // NewUID creates a UID from name. -func NewUID(name string) Identity { - return &uid{uid: name} -} +func NewUID(name string) Identity { return &uid{uid: name} } // VerificationModeType specifies the mode that S2A must use to verify the peer // certificate chain. @@ -76,9 +87,13 @@ type VerificationModeType int // Three types of verification modes. const ( - Unspecified = iota - ConnectToGoogle + Unspecified VerificationModeType = iota Spiffe + ConnectToGoogle + ReservedCustomVerificationMode3 + ReservedCustomVerificationMode4 + ReservedCustomVerificationMode5 + ReservedCustomVerificationMode6 ) // ClientOptions contains the client-side options used to establish a secure @@ -133,7 +148,7 @@ type ClientOptions struct { FallbackOpts *FallbackOptions // Generates an S2AStream interface for talking to the S2A server. - getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error) + getS2AStream stream.GetS2AStream // Serialized user specified policy for server authorization. serverAuthorizationPolicy []byte @@ -187,7 +202,7 @@ type ServerOptions struct { VerificationMode VerificationModeType // Generates an S2AStream interface for talking to the S2A server. - getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error) + getS2AStream stream.GetS2AStream } // DefaultServerOptions returns the default server options. @@ -198,17 +213,59 @@ func DefaultServerOptions(s2aAddress string) *ServerOptions { } } -func toProtoIdentity(identity Identity) (*s2apb.Identity, error) { +func toProtoIdentity(identity Identity) (*s2av1pb.Identity, error) { + if identity == nil { + return nil, nil + } + switch id := identity.(type) { + case *spiffeID: + return &s2av1pb.Identity{ + IdentityOneof: &s2av1pb.Identity_SpiffeId{SpiffeId: id.Name()}, + Attributes: id.Attributes(), + }, nil + case *hostname: + return &s2av1pb.Identity{ + IdentityOneof: &s2av1pb.Identity_Hostname{Hostname: id.Name()}, + Attributes: id.Attributes(), + }, nil + case *uid: + return &s2av1pb.Identity{ + IdentityOneof: &s2av1pb.Identity_Uid{Uid: id.Name()}, + Attributes: id.Attributes(), + }, nil + case *UnspecifiedID: + return &s2av1pb.Identity{ + Attributes: id.Attributes(), + }, nil + default: + return nil, errors.New("unrecognized identity type") + } +} + +func toV2ProtoIdentity(identity Identity) (*s2apb.Identity, error) { if identity == nil { return nil, nil } switch id := identity.(type) { case *spiffeID: - return &s2apb.Identity{IdentityOneof: &s2apb.Identity_SpiffeId{SpiffeId: id.Name()}}, nil + return &s2apb.Identity{ + IdentityOneof: &s2apb.Identity_SpiffeId{SpiffeId: id.Name()}, + Attributes: id.Attributes(), + }, nil case *hostname: - return &s2apb.Identity{IdentityOneof: &s2apb.Identity_Hostname{Hostname: id.Name()}}, nil + return &s2apb.Identity{ + IdentityOneof: &s2apb.Identity_Hostname{Hostname: id.Name()}, + Attributes: id.Attributes(), + }, nil case *uid: - return &s2apb.Identity{IdentityOneof: &s2apb.Identity_Uid{Uid: id.Name()}}, nil + return &s2apb.Identity{ + IdentityOneof: &s2apb.Identity_Uid{Uid: id.Name()}, + Attributes: id.Attributes(), + }, nil + case *UnspecifiedID: + return &s2apb.Identity{ + Attributes: id.Attributes(), + }, nil default: return nil, errors.New("unrecognized identity type") } diff --git a/vendor/github.com/google/s2a-go/stream/s2a_stream.go b/vendor/github.com/google/s2a-go/stream/s2a_stream.go index 584bf32b1c7..ae2d5eb4c16 100644 --- a/vendor/github.com/google/s2a-go/stream/s2a_stream.go +++ b/vendor/github.com/google/s2a-go/stream/s2a_stream.go @@ -20,6 +20,8 @@ package stream import ( + "context" + s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" ) @@ -32,3 +34,6 @@ type S2AStream interface { // Closes the channel to the S2A server. CloseSend() error } + +// GetS2AStream type is for generating an S2AStream interface for talking to the S2A server. +type GetS2AStream func(ctx context.Context, s2av2Address string, opts ...string) (S2AStream, error) diff --git a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json index 44d4d00202f..a8c082dd61e 100644 --- a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json +++ b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json @@ -1,3 +1,3 @@ { - "v2": "2.13.0" + "v2": "2.14.1" } diff --git a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md index d63421b71ca..17cced15eca 100644 --- a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md +++ b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md @@ -1,5 +1,24 @@ # Changelog +## [2.14.1](https://github.com/googleapis/gax-go/compare/v2.14.0...v2.14.1) (2024-12-19) + + +### Bug Fixes + +* update golang.org/x/net to v0.33.0 ([#391](https://github.com/googleapis/gax-go/issues/391)) ([547a5b4](https://github.com/googleapis/gax-go/commit/547a5b43aa6f376f71242da9f18e65fbdfb342f6)) + + +### Documentation + +* fix godoc to refer to the proper envvar ([#387](https://github.com/googleapis/gax-go/issues/387)) ([dc6baf7](https://github.com/googleapis/gax-go/commit/dc6baf75c1a737233739630b5af6c9759f08abcd)) + +## [2.14.0](https://github.com/googleapis/gax-go/compare/v2.13.0...v2.14.0) (2024-11-13) + + +### Features + +* **internallog:** add a logging support package ([#380](https://github.com/googleapis/gax-go/issues/380)) ([c877470](https://github.com/googleapis/gax-go/commit/c87747098135631a3de5865ed03aaf2c79fd9319)) + ## [2.13.0](https://github.com/googleapis/gax-go/compare/v2.12.5...v2.13.0) (2024-07-22) diff --git a/vendor/github.com/googleapis/gax-go/v2/internal/version.go b/vendor/github.com/googleapis/gax-go/v2/internal/version.go index e12421cf599..2b284a24a48 100644 --- a/vendor/github.com/googleapis/gax-go/v2/internal/version.go +++ b/vendor/github.com/googleapis/gax-go/v2/internal/version.go @@ -30,4 +30,4 @@ package internal // Version is the current tagged release of the library. -const Version = "2.13.0" +const Version = "2.14.1" diff --git a/vendor/github.com/googleapis/gax-go/v2/internallog/internal/internal.go b/vendor/github.com/googleapis/gax-go/v2/internallog/internal/internal.go new file mode 100644 index 00000000000..19f4be35c2c --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/internallog/internal/internal.go @@ -0,0 +1,134 @@ +// Copyright 2024, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package internal provides some common logic and types to other logging +// sub-packages. +package internal + +import ( + "context" + "io" + "log/slog" + "os" + "strings" + "time" +) + +const ( + // LoggingLevelEnvVar is the environment variable used to enable logging + // at a particular level. + LoggingLevelEnvVar = "GOOGLE_SDK_GO_LOGGING_LEVEL" + + googLvlKey = "severity" + googMsgKey = "message" + googSourceKey = "sourceLocation" + googTimeKey = "timestamp" +) + +// NewLoggerWithWriter is exposed for testing. +func NewLoggerWithWriter(w io.Writer) *slog.Logger { + lvl, loggingEnabled := checkLoggingLevel() + if !loggingEnabled { + return slog.New(noOpHandler{}) + } + return slog.New(newGCPSlogHandler(lvl, w)) +} + +// checkLoggingLevel returned the configured logging level and whether or not +// logging is enabled. +func checkLoggingLevel() (slog.Leveler, bool) { + sLevel := strings.ToLower(os.Getenv(LoggingLevelEnvVar)) + var level slog.Level + switch sLevel { + case "debug": + level = slog.LevelDebug + case "info": + level = slog.LevelInfo + case "warn": + level = slog.LevelWarn + case "error": + level = slog.LevelError + default: + return nil, false + } + return level, true +} + +// newGCPSlogHandler returns a Handler that is configured to output in a JSON +// format with well-known keys. For more information on this format see +// https://cloud.google.com/logging/docs/agent/logging/configuration#special-fields. +func newGCPSlogHandler(lvl slog.Leveler, w io.Writer) slog.Handler { + return slog.NewJSONHandler(w, &slog.HandlerOptions{ + Level: lvl, + ReplaceAttr: replaceAttr, + }) +} + +// replaceAttr remaps default Go logging keys to match what is expected in +// cloud logging. +func replaceAttr(groups []string, a slog.Attr) slog.Attr { + if groups == nil { + if a.Key == slog.LevelKey { + a.Key = googLvlKey + return a + } else if a.Key == slog.MessageKey { + a.Key = googMsgKey + return a + } else if a.Key == slog.SourceKey { + a.Key = googSourceKey + return a + } else if a.Key == slog.TimeKey { + a.Key = googTimeKey + if a.Value.Kind() == slog.KindTime { + a.Value = slog.StringValue(a.Value.Time().Format(time.RFC3339)) + } + return a + } + } + return a +} + +// The handler returned if logging is not enabled. +type noOpHandler struct{} + +func (h noOpHandler) Enabled(_ context.Context, _ slog.Level) bool { + return false +} + +func (h noOpHandler) Handle(_ context.Context, _ slog.Record) error { + return nil +} + +func (h noOpHandler) WithAttrs(_ []slog.Attr) slog.Handler { + return h +} + +func (h noOpHandler) WithGroup(_ string) slog.Handler { + return h +} diff --git a/vendor/github.com/googleapis/gax-go/v2/internallog/internallog.go b/vendor/github.com/googleapis/gax-go/v2/internallog/internallog.go new file mode 100644 index 00000000000..e47ab32acc2 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/internallog/internallog.go @@ -0,0 +1,154 @@ +// Copyright 2024, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package internallog in intended for internal use by generated clients only. +package internallog + +import ( + "bytes" + "encoding/json" + "fmt" + "log/slog" + "net/http" + "os" + "strings" + + "github.com/googleapis/gax-go/v2/internallog/internal" +) + +// New returns a new [slog.Logger] default logger, or the provided logger if +// non-nil. The returned logger will be a no-op logger unless the environment +// variable GOOGLE_SDK_GO_LOGGING_LEVEL is set. +func New(l *slog.Logger) *slog.Logger { + if l != nil { + return l + } + return internal.NewLoggerWithWriter(os.Stderr) +} + +// HTTPRequest returns a lazily evaluated [slog.LogValuer] for a +// [http.Request] and the associated body. +func HTTPRequest(req *http.Request, body []byte) slog.LogValuer { + return &request{ + req: req, + payload: body, + } +} + +type request struct { + req *http.Request + payload []byte +} + +func (r *request) LogValue() slog.Value { + if r == nil || r.req == nil { + return slog.Value{} + } + var groupValueAttrs []slog.Attr + groupValueAttrs = append(groupValueAttrs, slog.String("method", r.req.Method)) + groupValueAttrs = append(groupValueAttrs, slog.String("url", r.req.URL.String())) + + var headerAttr []slog.Attr + for k, val := range r.req.Header { + headerAttr = append(headerAttr, slog.String(k, strings.Join(val, ","))) + } + if len(headerAttr) > 0 { + groupValueAttrs = append(groupValueAttrs, slog.Any("headers", headerAttr)) + } + + if len(r.payload) > 0 { + if attr, ok := processPayload(r.payload); ok { + groupValueAttrs = append(groupValueAttrs, attr) + } + } + return slog.GroupValue(groupValueAttrs...) +} + +// HTTPResponse returns a lazily evaluated [slog.LogValuer] for a +// [http.Response] and the associated body. +func HTTPResponse(resp *http.Response, body []byte) slog.LogValuer { + return &response{ + resp: resp, + payload: body, + } +} + +type response struct { + resp *http.Response + payload []byte +} + +func (r *response) LogValue() slog.Value { + if r == nil { + return slog.Value{} + } + var groupValueAttrs []slog.Attr + groupValueAttrs = append(groupValueAttrs, slog.String("status", fmt.Sprint(r.resp.StatusCode))) + + var headerAttr []slog.Attr + for k, val := range r.resp.Header { + headerAttr = append(headerAttr, slog.String(k, strings.Join(val, ","))) + } + if len(headerAttr) > 0 { + groupValueAttrs = append(groupValueAttrs, slog.Any("headers", headerAttr)) + } + + if len(r.payload) > 0 { + if attr, ok := processPayload(r.payload); ok { + groupValueAttrs = append(groupValueAttrs, attr) + } + } + return slog.GroupValue(groupValueAttrs...) +} + +func processPayload(payload []byte) (slog.Attr, bool) { + peekChar := payload[0] + if peekChar == '{' { + // JSON object + var m map[string]any + if err := json.Unmarshal(payload, &m); err == nil { + return slog.Any("payload", m), true + } + } else if peekChar == '[' { + // JSON array + var m []any + if err := json.Unmarshal(payload, &m); err == nil { + return slog.Any("payload", m), true + } + } else { + // Everything else + buf := &bytes.Buffer{} + if err := json.Compact(buf, payload); err != nil { + // Write raw payload incase of error + buf.Write(payload) + } + return slog.String("payload", buf.String()), true + } + return slog.Attr{}, false +} diff --git a/vendor/github.com/gorilla/handlers/.editorconfig b/vendor/github.com/gorilla/handlers/.editorconfig new file mode 100644 index 00000000000..c6b74c3e0d0 --- /dev/null +++ b/vendor/github.com/gorilla/handlers/.editorconfig @@ -0,0 +1,20 @@ +; https://editorconfig.org/ + +root = true + +[*] +insert_final_newline = true +charset = utf-8 +trim_trailing_whitespace = true +indent_style = space +indent_size = 2 + +[{Makefile,go.mod,go.sum,*.go,.gitmodules}] +indent_style = tab +indent_size = 4 + +[*.md] +indent_size = 4 +trim_trailing_whitespace = false + +eclint_indent_style = unset \ No newline at end of file diff --git a/vendor/github.com/gorilla/handlers/.gitignore b/vendor/github.com/gorilla/handlers/.gitignore new file mode 100644 index 00000000000..577a89e8138 --- /dev/null +++ b/vendor/github.com/gorilla/handlers/.gitignore @@ -0,0 +1,2 @@ +# Output of the go test coverage tool +coverage.coverprofile diff --git a/vendor/github.com/gorilla/handlers/LICENSE b/vendor/github.com/gorilla/handlers/LICENSE index 66ea3c8ae71..bb9d80bc9b6 100644 --- a/vendor/github.com/gorilla/handlers/LICENSE +++ b/vendor/github.com/gorilla/handlers/LICENSE @@ -1,22 +1,27 @@ -Copyright (c) 2013 The Gorilla Handlers Authors. All rights reserved. +Copyright (c) 2023 The Gorilla Authors. All rights reserved. Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: +modification, are permitted provided that the following conditions are +met: - Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. - Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/handlers/Makefile b/vendor/github.com/gorilla/handlers/Makefile new file mode 100644 index 00000000000..003b784f7ed --- /dev/null +++ b/vendor/github.com/gorilla/handlers/Makefile @@ -0,0 +1,34 @@ +GO_LINT=$(shell which golangci-lint 2> /dev/null || echo '') +GO_LINT_URI=github.com/golangci/golangci-lint/cmd/golangci-lint@latest + +GO_SEC=$(shell which gosec 2> /dev/null || echo '') +GO_SEC_URI=github.com/securego/gosec/v2/cmd/gosec@latest + +GO_VULNCHECK=$(shell which govulncheck 2> /dev/null || echo '') +GO_VULNCHECK_URI=golang.org/x/vuln/cmd/govulncheck@latest + +.PHONY: verify +verify: sec govulncheck lint test + +.PHONY: lint +lint: + $(if $(GO_LINT), ,go install $(GO_LINT_URI)) + @echo "##### Running golangci-lint #####" + golangci-lint run -v + +.PHONY: sec +sec: + $(if $(GO_SEC), ,go install $(GO_SEC_URI)) + @echo "##### Running gosec #####" + gosec ./... + +.PHONY: govulncheck +govulncheck: + $(if $(GO_VULNCHECK), ,go install $(GO_VULNCHECK_URI)) + @echo "##### Running govulncheck #####" + govulncheck ./... + +.PHONY: test +test: + @echo "##### Running tests #####" + go test -race -cover -coverprofile=coverage.coverprofile -covermode=atomic -v ./... diff --git a/vendor/github.com/gorilla/handlers/README.md b/vendor/github.com/gorilla/handlers/README.md index 6eba66bf302..02555b2642c 100644 --- a/vendor/github.com/gorilla/handlers/README.md +++ b/vendor/github.com/gorilla/handlers/README.md @@ -1,10 +1,10 @@ -gorilla/handlers -================ +# gorilla/handlers + +![Testing](https://github.com/gorilla/handlers/actions/workflows/test.yml/badge.svg) +[![Codecov](https://codecov.io/github/gorilla/handlers/branch/main/graph/badge.svg)](https://codecov.io/github/gorilla/handlers) [![GoDoc](https://godoc.org/github.com/gorilla/handlers?status.svg)](https://godoc.org/github.com/gorilla/handlers) -[![CircleCI](https://circleci.com/gh/gorilla/handlers.svg?style=svg)](https://circleci.com/gh/gorilla/handlers) [![Sourcegraph](https://sourcegraph.com/github.com/gorilla/handlers/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/handlers?badge) - Package handlers is a collection of handlers (aka "HTTP middleware") for use with Go's `net/http` package (or any framework supporting `http.Handler`), including: diff --git a/vendor/github.com/gorilla/handlers/canonical.go b/vendor/github.com/gorilla/handlers/canonical.go index 8437fefc1ef..7121f5307be 100644 --- a/vendor/github.com/gorilla/handlers/canonical.go +++ b/vendor/github.com/gorilla/handlers/canonical.go @@ -21,12 +21,11 @@ type canonical struct { // // Example: // -// r := mux.NewRouter() -// canonical := handlers.CanonicalHost("http://www.gorillatoolkit.org", 302) -// r.HandleFunc("/route", YourHandler) -// -// log.Fatal(http.ListenAndServe(":7000", canonical(r))) +// r := mux.NewRouter() +// canonical := handlers.CanonicalHost("http://www.gorillatoolkit.org", 302) +// r.HandleFunc("/route", YourHandler) // +// log.Fatal(http.ListenAndServe(":7000", canonical(r))) func CanonicalHost(domain string, code int) func(h http.Handler) http.Handler { fn := func(h http.Handler) http.Handler { return canonical{h, domain, code} diff --git a/vendor/github.com/gorilla/handlers/compress.go b/vendor/github.com/gorilla/handlers/compress.go index 1e95f1ccbfa..d6f589503b5 100644 --- a/vendor/github.com/gorilla/handlers/compress.go +++ b/vendor/github.com/gorilla/handlers/compress.go @@ -44,13 +44,13 @@ type flusher interface { Flush() error } -func (w *compressResponseWriter) Flush() { +func (cw *compressResponseWriter) Flush() { // Flush compressed data if compressor supports it. - if f, ok := w.compressor.(flusher); ok { - f.Flush() + if f, ok := cw.compressor.(flusher); ok { + _ = f.Flush() } // Flush HTTP response. - if f, ok := w.w.(http.Flusher); ok { + if f, ok := cw.w.(http.Flusher); ok { f.Flush() } } diff --git a/vendor/github.com/gorilla/handlers/cors.go b/vendor/github.com/gorilla/handlers/cors.go index 0dcdffb3d32..8af9c096e5e 100644 --- a/vendor/github.com/gorilla/handlers/cors.go +++ b/vendor/github.com/gorilla/handlers/cors.go @@ -26,14 +26,14 @@ type cors struct { type OriginValidator func(string) bool var ( - defaultCorsOptionStatusCode = 200 - defaultCorsMethods = []string{"GET", "HEAD", "POST"} + defaultCorsOptionStatusCode = http.StatusOK + defaultCorsMethods = []string{http.MethodGet, http.MethodHead, http.MethodPost} defaultCorsHeaders = []string{"Accept", "Accept-Language", "Content-Language", "Origin"} - // (WebKit/Safari v9 sends the Origin header by default in AJAX requests) + // (WebKit/Safari v9 sends the Origin header by default in AJAX requests). ) const ( - corsOptionMethod string = "OPTIONS" + corsOptionMethod string = http.MethodOptions corsAllowOriginHeader string = "Access-Control-Allow-Origin" corsExposeHeadersHeader string = "Access-Control-Expose-Headers" corsMaxAgeHeader string = "Access-Control-Max-Age" @@ -101,10 +101,8 @@ func (ch *cors) ServeHTTP(w http.ResponseWriter, r *http.Request) { if !ch.isMatch(method, defaultCorsMethods) { w.Header().Set(corsAllowMethodsHeader, method) } - } else { - if len(ch.exposedHeaders) > 0 { - w.Header().Set(corsExposeHeadersHeader, strings.Join(ch.exposedHeaders, ",")) - } + } else if len(ch.exposedHeaders) > 0 { + w.Header().Set(corsExposeHeadersHeader, strings.Join(ch.exposedHeaders, ",")) } if ch.allowCredentials { @@ -141,22 +139,21 @@ func (ch *cors) ServeHTTP(w http.ResponseWriter, r *http.Request) { // CORS provides Cross-Origin Resource Sharing middleware. // Example: // -// import ( -// "net/http" -// -// "github.com/gorilla/handlers" -// "github.com/gorilla/mux" -// ) +// import ( +// "net/http" // -// func main() { -// r := mux.NewRouter() -// r.HandleFunc("/users", UserEndpoint) -// r.HandleFunc("/projects", ProjectEndpoint) +// "github.com/gorilla/handlers" +// "github.com/gorilla/mux" +// ) // -// // Apply the CORS middleware to our top-level router, with the defaults. -// http.ListenAndServe(":8000", handlers.CORS()(r)) -// } +// func main() { +// r := mux.NewRouter() +// r.HandleFunc("/users", UserEndpoint) +// r.HandleFunc("/projects", ProjectEndpoint) // +// // Apply the CORS middleware to our top-level router, with the defaults. +// http.ListenAndServe(":8000", handlers.CORS()(r)) +// } func CORS(opts ...CORSOption) func(http.Handler) http.Handler { return func(h http.Handler) http.Handler { ch := parseCORSOptions(opts...) @@ -174,7 +171,7 @@ func parseCORSOptions(opts ...CORSOption) *cors { } for _, option := range opts { - option(ch) + _ = option(ch) //TODO: @bharat-rajani, return error to caller if not nil? } return ch diff --git a/vendor/github.com/gorilla/handlers/handlers.go b/vendor/github.com/gorilla/handlers/handlers.go index 0509482ad7a..9b92fce3333 100644 --- a/vendor/github.com/gorilla/handlers/handlers.go +++ b/vendor/github.com/gorilla/handlers/handlers.go @@ -35,7 +35,7 @@ func (h MethodHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { } sort.Strings(allow) w.Header().Set("Allow", strings.Join(allow, ", ")) - if req.Method == "OPTIONS" { + if req.Method == http.MethodOptions { w.WriteHeader(http.StatusOK) } else { http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) @@ -44,7 +44,7 @@ func (h MethodHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { } // responseLogger is wrapper of http.ResponseWriter that keeps track of its HTTP -// status code and body size +// status code and body size. type responseLogger struct { w http.ResponseWriter status int @@ -97,7 +97,7 @@ func isContentType(h http.Header, contentType string) bool { // Only PUT, POST, and PATCH requests are considered. func ContentTypeHandler(h http.Handler, contentTypes ...string) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if !(r.Method == "PUT" || r.Method == "POST" || r.Method == "PATCH") { + if !(r.Method == http.MethodPut || r.Method == http.MethodPost || r.Method == http.MethodPatch) { h.ServeHTTP(w, r) return } @@ -108,7 +108,10 @@ func ContentTypeHandler(h http.Handler, contentTypes ...string) http.Handler { return } } - http.Error(w, fmt.Sprintf("Unsupported content type %q; expected one of %q", r.Header.Get("Content-Type"), contentTypes), http.StatusUnsupportedMediaType) + http.Error(w, fmt.Sprintf("Unsupported content type %q; expected one of %q", + r.Header.Get("Content-Type"), + contentTypes), + http.StatusUnsupportedMediaType) }) } @@ -133,12 +136,12 @@ const ( // Form method takes precedence over header method. func HTTPMethodOverrideHandler(h http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Method == "POST" { + if r.Method == http.MethodPost { om := r.FormValue(HTTPMethodOverrideFormKey) if om == "" { om = r.Header.Get(HTTPMethodOverrideHeader) } - if om == "PUT" || om == "PATCH" || om == "DELETE" { + if om == http.MethodPut || om == http.MethodPatch || om == http.MethodDelete { r.Method = om } } diff --git a/vendor/github.com/gorilla/handlers/logging.go b/vendor/github.com/gorilla/handlers/logging.go index 228465eba00..2badb6fbff8 100644 --- a/vendor/github.com/gorilla/handlers/logging.go +++ b/vendor/github.com/gorilla/handlers/logging.go @@ -18,7 +18,7 @@ import ( // Logging -// LogFormatterParams is the structure any formatter will be handed when time to log comes +// LogFormatterParams is the structure any formatter will be handed when time to log comes. type LogFormatterParams struct { Request *http.Request URL url.URL @@ -27,7 +27,7 @@ type LogFormatterParams struct { Size int } -// LogFormatter gives the signature of the formatter function passed to CustomLoggingHandler +// LogFormatter gives the signature of the formatter function passed to CustomLoggingHandler. type LogFormatter func(writer io.Writer, params LogFormatterParams) // loggingHandler is the http.Handler implementation for LoggingHandlerTo and its @@ -46,7 +46,10 @@ func (h loggingHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { h.handler.ServeHTTP(w, req) if req.MultipartForm != nil { - req.MultipartForm.RemoveAll() + err := req.MultipartForm.RemoveAll() + if err != nil { + return + } } params := LogFormatterParams{ @@ -76,7 +79,7 @@ const lowerhex = "0123456789abcdef" func appendQuoted(buf []byte, s string) []byte { var runeTmp [utf8.UTFMax]byte - for width := 0; len(s) > 0; s = s[width:] { + for width := 0; len(s) > 0; s = s[width:] { //nolint: wastedassign //TODO: why width starts from 0and reassigned as 1 r := rune(s[0]) width = 1 if r >= utf8.RuneSelf { @@ -191,7 +194,7 @@ func buildCommonLogLine(req *http.Request, url url.URL, ts time.Time, status int func writeLog(writer io.Writer, params LogFormatterParams) { buf := buildCommonLogLine(params.Request, params.URL, params.TimeStamp, params.StatusCode, params.Size) buf = append(buf, '\n') - writer.Write(buf) + _, _ = writer.Write(buf) } // writeCombinedLog writes a log entry for req to w in Apache Combined Log Format. @@ -204,7 +207,7 @@ func writeCombinedLog(writer io.Writer, params LogFormatterParams) { buf = append(buf, `" "`...) buf = appendQuoted(buf, params.Request.UserAgent()) buf = append(buf, '"', '\n') - writer.Write(buf) + _, _ = writer.Write(buf) } // CombinedLoggingHandler return a http.Handler that wraps h and logs requests to out in @@ -212,7 +215,7 @@ func writeCombinedLog(writer io.Writer, params LogFormatterParams) { // // See http://httpd.apache.org/docs/2.2/logs.html#combined for a description of this format. // -// LoggingHandler always sets the ident field of the log to - +// LoggingHandler always sets the ident field of the log to -. func CombinedLoggingHandler(out io.Writer, h http.Handler) http.Handler { return loggingHandler{out, h, writeCombinedLog} } @@ -226,19 +229,18 @@ func CombinedLoggingHandler(out io.Writer, h http.Handler) http.Handler { // // Example: // -// r := mux.NewRouter() -// r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { -// w.Write([]byte("This is a catch-all route")) -// }) -// loggedRouter := handlers.LoggingHandler(os.Stdout, r) -// http.ListenAndServe(":1123", loggedRouter) -// +// r := mux.NewRouter() +// r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { +// w.Write([]byte("This is a catch-all route")) +// }) +// loggedRouter := handlers.LoggingHandler(os.Stdout, r) +// http.ListenAndServe(":1123", loggedRouter) func LoggingHandler(out io.Writer, h http.Handler) http.Handler { return loggingHandler{out, h, writeLog} } // CustomLoggingHandler provides a way to supply a custom log formatter -// while taking advantage of the mechanisms in this package +// while taking advantage of the mechanisms in this package. func CustomLoggingHandler(out io.Writer, h http.Handler, f LogFormatter) http.Handler { return loggingHandler{out, h, f} } diff --git a/vendor/github.com/gorilla/handlers/proxy_headers.go b/vendor/github.com/gorilla/handlers/proxy_headers.go index ed939dcef5d..281d753e95a 100644 --- a/vendor/github.com/gorilla/handlers/proxy_headers.go +++ b/vendor/github.com/gorilla/handlers/proxy_headers.go @@ -18,7 +18,7 @@ var ( var ( // RFC7239 defines a new "Forwarded: " header designed to replace the // existing use of X-Forwarded-* headers. - // e.g. Forwarded: for=192.0.2.60;proto=https;by=203.0.113.43 + // e.g. Forwarded: for=192.0.2.60;proto=https;by=203.0.113.43. forwarded = http.CanonicalHeaderKey("Forwarded") // Allows for a sub-match of the first value after 'for=' to the next // comma, semi-colon or space. The match is case-insensitive. @@ -67,7 +67,9 @@ func ProxyHeaders(h http.Handler) http.Handler { func getIP(r *http.Request) string { var addr string - if fwd := r.Header.Get(xForwardedFor); fwd != "" { + switch { + case r.Header.Get(xForwardedFor) != "": + fwd := r.Header.Get(xForwardedFor) // Only grab the first (client) address. Note that '192.168.0.1, // 10.1.1.1' is a valid key for X-Forwarded-For where addresses after // the first may represent forwarding proxies earlier in the chain. @@ -76,17 +78,15 @@ func getIP(r *http.Request) string { s = len(fwd) } addr = fwd[:s] - } else if fwd := r.Header.Get(xRealIP); fwd != "" { - // X-Real-IP should only contain one IP address (the client making the - // request). - addr = fwd - } else if fwd := r.Header.Get(forwarded); fwd != "" { + case r.Header.Get(xRealIP) != "": + addr = r.Header.Get(xRealIP) + case r.Header.Get(forwarded) != "": // match should contain at least two elements if the protocol was // specified in the Forwarded header. The first element will always be // the 'for=' capture, which we ignore. In the case of multiple IP // addresses (for=8.8.8.8, 8.8.4.4,172.16.1.20 is valid) we only // extract the first, which should be the client IP. - if match := forRegex.FindStringSubmatch(fwd); len(match) > 1 { + if match := forRegex.FindStringSubmatch(r.Header.Get(forwarded)); len(match) > 1 { // IPv6 addresses in Forwarded headers are quoted-strings. We strip // these quotes. addr = strings.Trim(match[1], `"`) diff --git a/vendor/github.com/gorilla/handlers/recovery.go b/vendor/github.com/gorilla/handlers/recovery.go index 4c4c1d9c6ce..0d4f955ecbd 100644 --- a/vendor/github.com/gorilla/handlers/recovery.go +++ b/vendor/github.com/gorilla/handlers/recovery.go @@ -36,12 +36,12 @@ func parseRecoveryOptions(h http.Handler, opts ...RecoveryOption) http.Handler { // // Example: // -// r := mux.NewRouter() -// r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { -// panic("Unexpected error!") -// }) +// r := mux.NewRouter() +// r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { +// panic("Unexpected error!") +// }) // -// http.ListenAndServe(":1123", handlers.RecoveryHandler()(r)) +// http.ListenAndServe(":1123", handlers.RecoveryHandler()(r)) func RecoveryHandler(opts ...RecoveryOption) func(h http.Handler) http.Handler { return func(h http.Handler) http.Handler { r := &recoveryHandler{handler: h} @@ -50,20 +50,22 @@ func RecoveryHandler(opts ...RecoveryOption) func(h http.Handler) http.Handler { } // RecoveryLogger is a functional option to override -// the default logger +// the default logger. func RecoveryLogger(logger RecoveryHandlerLogger) RecoveryOption { return func(h http.Handler) { - r := h.(*recoveryHandler) + r := h.(*recoveryHandler) //nolint:errcheck //TODO: + // @bharat-rajani should return type-assertion error but would break the API? r.logger = logger } } // PrintRecoveryStack is a functional option to enable // or disable printing stack traces on panic. -func PrintRecoveryStack(print bool) RecoveryOption { +func PrintRecoveryStack(shouldPrint bool) RecoveryOption { return func(h http.Handler) { - r := h.(*recoveryHandler) - r.printStack = print + r := h.(*recoveryHandler) //nolint:errcheck //TODO: + // @bharat-rajani should return type-assertion error but would break the API? + r.printStack = shouldPrint } } diff --git a/vendor/github.com/grafana/dskit/test/diff.go b/vendor/github.com/grafana/dskit/test/diff.go new file mode 100644 index 00000000000..3cc7ea63892 --- /dev/null +++ b/vendor/github.com/grafana/dskit/test/diff.go @@ -0,0 +1,28 @@ +// Provenance-includes-location: https://github.com/weaveworks/common/blob/main/test/diff.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Weaveworks Ltd. + +package test + +import ( + "github.com/davecgh/go-spew/spew" + "github.com/pmezard/go-difflib/difflib" +) + +// Diff diffs two arbitrary data structures, giving human-readable output. +func Diff(want, have interface{}) string { + config := spew.NewDefaultConfig() + // Set ContinueOnMethod to true if you cannot see a difference and + // want to look beyond the String() method + config.ContinueOnMethod = false + config.SortKeys = true + config.SpewKeys = true + text, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ + A: difflib.SplitLines(config.Sdump(want)), + B: difflib.SplitLines(config.Sdump(have)), + FromFile: "want", + ToFile: "have", + Context: 3, + }) + return "\n" + text +} diff --git a/vendor/github.com/grafana/dskit/test/poll.go b/vendor/github.com/grafana/dskit/test/poll.go new file mode 100644 index 00000000000..05ba41235ac --- /dev/null +++ b/vendor/github.com/grafana/dskit/test/poll.go @@ -0,0 +1,26 @@ +package test + +import ( + "reflect" + "testing" + "time" +) + +// Poll repeatedly calls a function until the function returns the correct response or until poll timeout. +func Poll(t testing.TB, d time.Duration, want interface{}, have func() interface{}) { + t.Helper() + deadline := time.Now().Add(d) + for { + if time.Now().After(deadline) { + break + } + if reflect.DeepEqual(want, have()) { + return + } + time.Sleep(d / 100) + } + h := have() + if !reflect.DeepEqual(want, h) { + t.Fatalf("expected %v, got %v", want, h) + } +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/common.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/common.go deleted file mode 100644 index 3e905cf1e32..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/common.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2017 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. - -package grpc_logging - -import ( - "context" - "io" - - "github.com/golang/protobuf/proto" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// ErrorToCode function determines the error code of an error -// This makes using custom errors with grpc middleware easier -type ErrorToCode func(err error) codes.Code - -func DefaultErrorToCode(err error) codes.Code { - return status.Code(err) -} - -// Decider function defines rules for suppressing any interceptor logs -type Decider func(fullMethodName string, err error) bool - -// DefaultDeciderMethod is the default implementation of decider to see if you should log the call -// by default this if always true so all calls are logged -func DefaultDeciderMethod(fullMethodName string, err error) bool { - return true -} - -// ServerPayloadLoggingDecider is a user-provided function for deciding whether to log the server-side -// request/response payloads -type ServerPayloadLoggingDecider func(ctx context.Context, fullMethodName string, servingObject interface{}) bool - -// ClientPayloadLoggingDecider is a user-provided function for deciding whether to log the client-side -// request/response payloads -type ClientPayloadLoggingDecider func(ctx context.Context, fullMethodName string) bool - -// JsonPbMarshaller is a marshaller that serializes protobuf messages. -type JsonPbMarshaler interface { - Marshal(out io.Writer, pb proto.Message) error -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/doc.go deleted file mode 100644 index d8fcea081a6..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/doc.go +++ /dev/null @@ -1,32 +0,0 @@ -// -/* -grpc_logging is a "parent" package for gRPC logging middlewares. - -General functionality of all middleware - -The gRPC logging middleware populates request-scoped data to `grpc_ctxtags.Tags` that relate to the current gRPC call -(e.g. service and method names). - -Once the gRPC logging middleware has added the gRPC specific Tags to the ctx they will then be written with the logs -that are made using the `ctx_logrus` or `ctx_zap` loggers. - -All logging middleware will emit a final log statement. It is based on the error returned by the handler function, -the gRPC status code, an error (if any) and it will emit at a level controlled via `WithLevels`. - -This parent package - -This particular package is intended for use by other middleware, logging or otherwise. It contains interfaces that other -logging middlewares *could* share . This allows code to be shared between different implementations. - -Field names - -All field names of loggers follow the OpenTracing semantics definitions, with `grpc.` prefix if needed: -https://github.com/opentracing/specification/blob/master/semantic_conventions.md - -Implementations - -There are three implementations at the moment: logrus, zap and kit - -See relevant packages below. -*/ -package grpc_logging diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/settable/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/settable/doc.go deleted file mode 100644 index c447ec6b9e5..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/settable/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// -/* -grpc_logsettable contains a thread-safe wrapper around grpc-logging -infrastructure. - -The go-grpc assumes that logger can be only configured once as the `SetLoggerV2` -method is: -```Not mutex-protected, should be called before any gRPC functions.``` - -This package allows to supply parent logger once ("before any grpc"), but -later change underlying implementation in thread-safe way when needed. - -It's in particular useful for testing, where each testcase might need its own -logger. -*/ -package grpc_logsettable diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/settable/logsettable.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/settable/logsettable.go deleted file mode 100644 index 9e403b2b2d0..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/settable/logsettable.go +++ /dev/null @@ -1,99 +0,0 @@ -package grpc_logsettable - -import ( - "io/ioutil" - "sync" - - "google.golang.org/grpc/grpclog" -) - -// SettableLoggerV2 is thread-safe. -type SettableLoggerV2 interface { - grpclog.LoggerV2 - // Sets given logger as the underlying implementation. - Set(loggerv2 grpclog.LoggerV2) - // Sets `discard` logger as the underlying implementation. - Reset() -} - -// ReplaceGrpcLoggerV2 creates and configures SettableLoggerV2 as grpc logger. -func ReplaceGrpcLoggerV2() SettableLoggerV2 { - settable := &settableLoggerV2{} - settable.Reset() - grpclog.SetLoggerV2(settable) - return settable -} - -// SettableLoggerV2 implements SettableLoggerV2 -type settableLoggerV2 struct { - log grpclog.LoggerV2 - mu sync.RWMutex -} - -func (s *settableLoggerV2) Set(log grpclog.LoggerV2) { - s.mu.Lock() - defer s.mu.Unlock() - s.log = log -} - -func (s *settableLoggerV2) Reset() { - s.Set(grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard)) -} - -func (s *settableLoggerV2) get() grpclog.LoggerV2 { - s.mu.RLock() - defer s.mu.RUnlock() - return s.log -} - -func (s *settableLoggerV2) Info(args ...interface{}) { - s.get().Info(args) -} - -func (s *settableLoggerV2) Infoln(args ...interface{}) { - s.get().Infoln(args) -} - -func (s *settableLoggerV2) Infof(format string, args ...interface{}) { - s.get().Infof(format, args) -} - -func (s *settableLoggerV2) Warning(args ...interface{}) { - s.get().Warning(args) -} - -func (s *settableLoggerV2) Warningln(args ...interface{}) { - s.get().Warningln(args) -} - -func (s *settableLoggerV2) Warningf(format string, args ...interface{}) { - s.get().Warningf(format, args) -} - -func (s *settableLoggerV2) Error(args ...interface{}) { - s.get().Error(args) -} - -func (s *settableLoggerV2) Errorln(args ...interface{}) { - s.get().Errorln(args) -} - -func (s *settableLoggerV2) Errorf(format string, args ...interface{}) { - s.get().Errorf(format, args) -} - -func (s *settableLoggerV2) Fatal(args ...interface{}) { - s.get().Fatal(args) -} - -func (s *settableLoggerV2) Fatalln(args ...interface{}) { - s.get().Fatalln(args) -} - -func (s *settableLoggerV2) Fatalf(format string, args ...interface{}) { - s.get().Fatalf(format, args) -} - -func (s *settableLoggerV2) V(l int) bool { - return s.get().V(l) -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/client_interceptors.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/client_interceptors.go deleted file mode 100644 index de80c1cc02e..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/client_interceptors.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2017 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. - -package grpc_zap - -import ( - "context" - "path" - "time" - - "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" - "google.golang.org/grpc" -) - -var ( - // ClientField is used in every client-side log statement made through grpc_zap. Can be overwritten before initialization. - ClientField = zap.String("span.kind", "client") -) - -// UnaryClientInterceptor returns a new unary client interceptor that optionally logs the execution of external gRPC calls. -func UnaryClientInterceptor(logger *zap.Logger, opts ...Option) grpc.UnaryClientInterceptor { - o := evaluateClientOpt(opts) - return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { - fields := newClientLoggerFields(ctx, method) - startTime := time.Now() - err := invoker(ctx, method, req, reply, cc, opts...) - newCtx := ctxzap.ToContext(ctx, logger.With(fields...)) - logFinalClientLine(newCtx, o, startTime, err, "finished client unary call") - return err - } -} - -// StreamClientInterceptor returns a new streaming client interceptor that optionally logs the execution of external gRPC calls. -func StreamClientInterceptor(logger *zap.Logger, opts ...Option) grpc.StreamClientInterceptor { - o := evaluateClientOpt(opts) - return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { - fields := newClientLoggerFields(ctx, method) - startTime := time.Now() - clientStream, err := streamer(ctx, desc, cc, method, opts...) - newCtx := ctxzap.ToContext(ctx, logger.With(fields...)) - logFinalClientLine(newCtx, o, startTime, err, "finished client streaming call") - return clientStream, err - } -} - -func logFinalClientLine(ctx context.Context, o *options, startTime time.Time, err error, msg string) { - code := o.codeFunc(err) - level := o.levelFunc(code) - duration := o.durationFunc(time.Now().Sub(startTime)) - o.messageFunc(ctx, msg, level, code, err, duration) -} - -func newClientLoggerFields(ctx context.Context, fullMethodString string) []zapcore.Field { - service := path.Dir(fullMethodString)[1:] - method := path.Base(fullMethodString) - return []zapcore.Field{ - SystemField, - ClientField, - zap.String("grpc.service", service), - zap.String("grpc.method", method), - } -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/context.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/context.go deleted file mode 100644 index 56f6408a620..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/context.go +++ /dev/null @@ -1,21 +0,0 @@ -package grpc_zap - -import ( - "context" - - "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -// AddFields adds zap fields to the logger. -// Deprecated: should use the ctxzap.AddFields instead -func AddFields(ctx context.Context, fields ...zapcore.Field) { - ctxzap.AddFields(ctx, fields...) -} - -// Extract takes the call-scoped Logger from grpc_zap middleware. -// Deprecated: should use the ctxzap.Extract instead -func Extract(ctx context.Context) *zap.Logger { - return ctxzap.Extract(ctx) -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap/context.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap/context.go deleted file mode 100644 index 1d8ae49a19b..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap/context.go +++ /dev/null @@ -1,88 +0,0 @@ -package ctxzap - -import ( - "context" - - "github.com/grpc-ecosystem/go-grpc-middleware/tags" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -type ctxMarker struct{} - -type ctxLogger struct { - logger *zap.Logger - fields []zapcore.Field -} - -var ( - ctxMarkerKey = &ctxMarker{} - nullLogger = zap.NewNop() -) - -// AddFields adds zap fields to the logger. -func AddFields(ctx context.Context, fields ...zapcore.Field) { - l, ok := ctx.Value(ctxMarkerKey).(*ctxLogger) - if !ok || l == nil { - return - } - l.fields = append(l.fields, fields...) -} - -// Extract takes the call-scoped Logger from grpc_zap middleware. -// -// It always returns a Logger that has all the grpc_ctxtags updated. -func Extract(ctx context.Context) *zap.Logger { - l, ok := ctx.Value(ctxMarkerKey).(*ctxLogger) - if !ok || l == nil { - return nullLogger - } - // Add grpc_ctxtags tags metadata until now. - fields := TagsToFields(ctx) - // Add zap fields added until now. - fields = append(fields, l.fields...) - return l.logger.With(fields...) -} - -// TagsToFields transforms the Tags on the supplied context into zap fields. -func TagsToFields(ctx context.Context) []zapcore.Field { - fields := []zapcore.Field{} - tags := grpc_ctxtags.Extract(ctx) - for k, v := range tags.Values() { - fields = append(fields, zap.Any(k, v)) - } - return fields -} - -// ToContext adds the zap.Logger to the context for extraction later. -// Returning the new context that has been created. -func ToContext(ctx context.Context, logger *zap.Logger) context.Context { - l := &ctxLogger{ - logger: logger, - } - return context.WithValue(ctx, ctxMarkerKey, l) -} - -// Debug is equivalent to calling Debug on the zap.Logger in the context. -// It is a no-op if the context does not contain a zap.Logger. -func Debug(ctx context.Context, msg string, fields ...zap.Field) { - Extract(ctx).WithOptions(zap.AddCallerSkip(1)).Debug(msg, fields...) -} - -// Info is equivalent to calling Info on the zap.Logger in the context. -// It is a no-op if the context does not contain a zap.Logger. -func Info(ctx context.Context, msg string, fields ...zap.Field) { - Extract(ctx).WithOptions(zap.AddCallerSkip(1)).Info(msg, fields...) -} - -// Warn is equivalent to calling Warn on the zap.Logger in the context. -// It is a no-op if the context does not contain a zap.Logger. -func Warn(ctx context.Context, msg string, fields ...zap.Field) { - Extract(ctx).WithOptions(zap.AddCallerSkip(1)).Warn(msg, fields...) -} - -// Error is equivalent to calling Error on the zap.Logger in the context. -// It is a no-op if the context does not contain a zap.Logger. -func Error(ctx context.Context, msg string, fields ...zap.Field) { - Extract(ctx).WithOptions(zap.AddCallerSkip(1)).Error(msg, fields...) -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap/doc.go deleted file mode 100644 index 3591e3585ea..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap/doc.go +++ /dev/null @@ -1,14 +0,0 @@ -/* -`ctxzap` is a ctxlogger that is backed by Zap - -It accepts a user-configured `zap.Logger` that will be used for logging. The same `zap.Logger` will -be populated into the `context.Context` passed into gRPC handler code. - -You can use `ctxzap.Extract` to log into a request-scoped `zap.Logger` instance in your handler code. - -As `ctxzap.Extract` will iterate all tags on from `grpc_ctxtags` it is therefore expensive so it is advised that you -extract once at the start of the function from the context and reuse it for the remainder of the function (see examples). - -Please see examples and tests for examples of use. -*/ -package ctxzap diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/doc.go deleted file mode 100644 index ffa6b5c3194..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/doc.go +++ /dev/null @@ -1,75 +0,0 @@ -/* -`grpc_zap` is a gRPC logging middleware backed by ZAP loggers - -It accepts a user-configured `zap.Logger` that will be used for logging completed gRPC calls. The same `zap.Logger` will -be used for logging completed gRPC calls, and be populated into the `context.Context` passed into gRPC handler code. - -On calling `StreamServerInterceptor` or `UnaryServerInterceptor` this logging middleware will add gRPC call information -to the ctx so that it will be present on subsequent use of the `ctx_zap` logger. - -If a deadline is present on the gRPC request the grpc.request.deadline tag is populated when the request begins. grpc.request.deadline -is a string representing the time (RFC3339) when the current call will expire. - -This package also implements request and response *payload* logging, both for server-side and client-side. These will be -logged as structured `jsonpb` fields for every message received/sent (both unary and streaming). For that please use -`Payload*Interceptor` functions for that. Please note that the user-provided function that determines whether to log -the full request/response payload needs to be written with care, this can significantly slow down gRPC. - -ZAP can also be made as a backend for gRPC library internals. For that use `ReplaceGrpcLoggerV2`. - - -*Server Interceptor* -Below is a JSON formatted example of a log that would be logged by the server interceptor: - - { - "level": "info", // string zap log levels - "msg": "finished unary call", // string log message - - "grpc.code": "OK", // string grpc status code - "grpc.method": "Ping", // string method name - "grpc.service": "mwitkow.testproto.TestService", // string full name of the called service - "grpc.start_time": "2006-01-02T15:04:05Z07:00", // string RFC3339 representation of the start time - "grpc.request.deadline": "2006-01-02T15:04:05Z07:00", // string RFC3339 deadline of the current request if supplied - "grpc.request.value": "something", // string value on the request - "grpc.time_ms": 1.345, // float32 run time of the call in ms - - "peer.address": { - "IP": "127.0.0.1", // string IP address of calling party - "Port": 60216, // int port call is coming in on - "Zone": "" // string peer zone for caller - }, - "span.kind": "server", // string client | server - "system": "grpc" // string - - "custom_field": "custom_value", // string user defined field - "custom_tags.int": 1337, // int user defined tag on the ctx - "custom_tags.string": "something", // string user defined tag on the ctx - } - -*Payload Interceptor* -Below is a JSON formatted example of a log that would be logged by the payload interceptor: - - { - "level": "info", // string zap log levels - "msg": "client request payload logged as grpc.request.content", // string log message - - "grpc.request.content": { // object content of RPC request - "msg" : { // object ZAP specific inner object - "value": "something", // string defined by caller - "sleepTimeMs": 9999 // int defined by caller - } - }, - "grpc.method": "Ping", // string method being called - "grpc.service": "mwitkow.testproto.TestService", // string service being called - - "span.kind": "client", // string client | server - "system": "grpc" // string - } - -Note - due to implementation ZAP differs from Logrus in the "grpc.request.content" object by having an inner "msg" object. - - -Please see examples and tests for examples of use. -Please see settable_test.go for canonical integration through "zaptest" with golang testing infrastructure. -*/ -package grpc_zap diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/grpclogger.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/grpclogger.go deleted file mode 100644 index 4cdee602048..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/grpclogger.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2017 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. - -package grpc_zap - -import ( - "fmt" - - grpc_logsettable "github.com/grpc-ecosystem/go-grpc-middleware/logging/settable" - "go.uber.org/zap" - "google.golang.org/grpc/grpclog" -) - -// ReplaceGrpcLogger sets the given zap.Logger as a gRPC-level logger. -// This should be called *before* any other initialization, preferably from init() functions. -// Deprecated: use ReplaceGrpcLoggerV2. -func ReplaceGrpcLogger(logger *zap.Logger) { - zgl := &zapGrpcLogger{logger.With(SystemField, zap.Bool("grpc_log", true))} - grpclog.SetLogger(zgl) -} - -type zapGrpcLogger struct { - logger *zap.Logger -} - -func (l *zapGrpcLogger) Fatal(args ...interface{}) { - l.logger.Fatal(fmt.Sprint(args...)) -} - -func (l *zapGrpcLogger) Fatalf(format string, args ...interface{}) { - l.logger.Fatal(fmt.Sprintf(format, args...)) -} - -func (l *zapGrpcLogger) Fatalln(args ...interface{}) { - l.logger.Fatal(fmt.Sprint(args...)) -} - -func (l *zapGrpcLogger) Print(args ...interface{}) { - l.logger.Info(fmt.Sprint(args...)) -} - -func (l *zapGrpcLogger) Printf(format string, args ...interface{}) { - l.logger.Info(fmt.Sprintf(format, args...)) -} - -func (l *zapGrpcLogger) Println(args ...interface{}) { - l.logger.Info(fmt.Sprint(args...)) -} - -// ReplaceGrpcLoggerV2 replaces the grpclog.LoggerV2 with the provided logger. -// It should be called before any gRPC functions. Logging verbosity defaults to info level. -// To adjust gRPC logging verbosity, see ReplaceGrpcLoggerV2WithVerbosity. -func ReplaceGrpcLoggerV2(logger *zap.Logger) { - ReplaceGrpcLoggerV2WithVerbosity(logger, 0) -} - -// ReplaceGrpcLoggerV2WithVerbosity replaces the grpclog.Logger with the provided logger and verbosity. -// It should be called before any gRPC functions. -// verbosity correlates to grpclogs verbosity levels. A higher verbosity value results in less logging. -func ReplaceGrpcLoggerV2WithVerbosity(logger *zap.Logger, verbosity int) { - zgl := &zapGrpcLoggerV2{ - logger: logger.With(SystemField, zap.Bool("grpc_log", true)).WithOptions(zap.AddCallerSkip(2)), - verbosity: verbosity, - } - grpclog.SetLoggerV2(zgl) -} - -// SetGrpcLoggerV2 replaces the grpc_log.Logger with the provided logger. -// It can be used even when grpc infrastructure was initialized. -func SetGrpcLoggerV2(settable grpc_logsettable.SettableLoggerV2, logger *zap.Logger) { - SetGrpcLoggerV2WithVerbosity(settable, logger, 0) -} - -// SetGrpcLoggerV2WithVerbosity replaces the grpc_.LoggerV2 with the provided logger and verbosity. -// It can be used even when grpc infrastructure was initialized. -func SetGrpcLoggerV2WithVerbosity(settable grpc_logsettable.SettableLoggerV2, logger *zap.Logger, verbosity int) { - zgl := &zapGrpcLoggerV2{ - logger: logger.With(SystemField, zap.Bool("grpc_log", true)), - verbosity: verbosity, - } - settable.Set(zgl) -} - -type zapGrpcLoggerV2 struct { - logger *zap.Logger - verbosity int -} - -func (l *zapGrpcLoggerV2) Info(args ...interface{}) { - l.logger.Info(fmt.Sprint(args...)) -} - -func (l *zapGrpcLoggerV2) Infoln(args ...interface{}) { - l.logger.Info(fmt.Sprint(args...)) -} - -func (l *zapGrpcLoggerV2) Infof(format string, args ...interface{}) { - l.logger.Info(fmt.Sprintf(format, args...)) -} - -func (l *zapGrpcLoggerV2) Warning(args ...interface{}) { - l.logger.Warn(fmt.Sprint(args...)) -} - -func (l *zapGrpcLoggerV2) Warningln(args ...interface{}) { - l.logger.Warn(fmt.Sprint(args...)) -} - -func (l *zapGrpcLoggerV2) Warningf(format string, args ...interface{}) { - l.logger.Warn(fmt.Sprintf(format, args...)) -} - -func (l *zapGrpcLoggerV2) Error(args ...interface{}) { - l.logger.Error(fmt.Sprint(args...)) -} - -func (l *zapGrpcLoggerV2) Errorln(args ...interface{}) { - l.logger.Error(fmt.Sprint(args...)) -} - -func (l *zapGrpcLoggerV2) Errorf(format string, args ...interface{}) { - l.logger.Error(fmt.Sprintf(format, args...)) -} - -func (l *zapGrpcLoggerV2) Fatal(args ...interface{}) { - l.logger.Fatal(fmt.Sprint(args...)) -} - -func (l *zapGrpcLoggerV2) Fatalln(args ...interface{}) { - l.logger.Fatal(fmt.Sprint(args...)) -} - -func (l *zapGrpcLoggerV2) Fatalf(format string, args ...interface{}) { - l.logger.Fatal(fmt.Sprintf(format, args...)) -} - -func (l *zapGrpcLoggerV2) V(level int) bool { - // Check whether the verbosity of the current log ('level') is within the specified threshold ('l.verbosity'). - // As in https://github.com/grpc/grpc-go/blob/41e044e1c82fcf6a5801d6cbd7ecf952505eecb1/grpclog/loggerv2.go#L199-L201. - return level <= l.verbosity -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/options.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/options.go deleted file mode 100644 index b59db9ad69e..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/options.go +++ /dev/null @@ -1,217 +0,0 @@ -package grpc_zap - -import ( - "context" - "time" - - grpc_logging "github.com/grpc-ecosystem/go-grpc-middleware/logging" - "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" - "google.golang.org/grpc/codes" -) - -var ( - defaultOptions = &options{ - levelFunc: DefaultCodeToLevel, - shouldLog: grpc_logging.DefaultDeciderMethod, - codeFunc: grpc_logging.DefaultErrorToCode, - durationFunc: DefaultDurationToField, - messageFunc: DefaultMessageProducer, - timestampFormat: time.RFC3339, - } -) - -type options struct { - levelFunc CodeToLevel - shouldLog grpc_logging.Decider - codeFunc grpc_logging.ErrorToCode - durationFunc DurationToField - messageFunc MessageProducer - timestampFormat string -} - -func evaluateServerOpt(opts []Option) *options { - optCopy := &options{} - *optCopy = *defaultOptions - optCopy.levelFunc = DefaultCodeToLevel - for _, o := range opts { - o(optCopy) - } - return optCopy -} - -func evaluateClientOpt(opts []Option) *options { - optCopy := &options{} - *optCopy = *defaultOptions - optCopy.levelFunc = DefaultClientCodeToLevel - for _, o := range opts { - o(optCopy) - } - return optCopy -} - -type Option func(*options) - -// CodeToLevel function defines the mapping between gRPC return codes and interceptor log level. -type CodeToLevel func(code codes.Code) zapcore.Level - -// DurationToField function defines how to produce duration fields for logging -type DurationToField func(duration time.Duration) zapcore.Field - -// WithDecider customizes the function for deciding if the gRPC interceptor logs should log. -func WithDecider(f grpc_logging.Decider) Option { - return func(o *options) { - o.shouldLog = f - } -} - -// WithLevels customizes the function for mapping gRPC return codes and interceptor log level statements. -func WithLevels(f CodeToLevel) Option { - return func(o *options) { - o.levelFunc = f - } -} - -// WithCodes customizes the function for mapping errors to error codes. -func WithCodes(f grpc_logging.ErrorToCode) Option { - return func(o *options) { - o.codeFunc = f - } -} - -// WithDurationField customizes the function for mapping request durations to Zap fields. -func WithDurationField(f DurationToField) Option { - return func(o *options) { - o.durationFunc = f - } -} - -// WithMessageProducer customizes the function for message formation. -func WithMessageProducer(f MessageProducer) Option { - return func(o *options) { - o.messageFunc = f - } -} - -// WithTimestampFormat customizes the timestamps emitted in the log fields. -func WithTimestampFormat(format string) Option { - return func(o *options) { - o.timestampFormat = format - } -} - -// DefaultCodeToLevel is the default implementation of gRPC return codes and interceptor log level for server side. -func DefaultCodeToLevel(code codes.Code) zapcore.Level { - switch code { - case codes.OK: - return zap.InfoLevel - case codes.Canceled: - return zap.InfoLevel - case codes.Unknown: - return zap.ErrorLevel - case codes.InvalidArgument: - return zap.InfoLevel - case codes.DeadlineExceeded: - return zap.WarnLevel - case codes.NotFound: - return zap.InfoLevel - case codes.AlreadyExists: - return zap.InfoLevel - case codes.PermissionDenied: - return zap.WarnLevel - case codes.Unauthenticated: - return zap.InfoLevel // unauthenticated requests can happen - case codes.ResourceExhausted: - return zap.WarnLevel - case codes.FailedPrecondition: - return zap.WarnLevel - case codes.Aborted: - return zap.WarnLevel - case codes.OutOfRange: - return zap.WarnLevel - case codes.Unimplemented: - return zap.ErrorLevel - case codes.Internal: - return zap.ErrorLevel - case codes.Unavailable: - return zap.WarnLevel - case codes.DataLoss: - return zap.ErrorLevel - default: - return zap.ErrorLevel - } -} - -// DefaultClientCodeToLevel is the default implementation of gRPC return codes to log levels for client side. -func DefaultClientCodeToLevel(code codes.Code) zapcore.Level { - switch code { - case codes.OK: - return zap.DebugLevel - case codes.Canceled: - return zap.DebugLevel - case codes.Unknown: - return zap.InfoLevel - case codes.InvalidArgument: - return zap.DebugLevel - case codes.DeadlineExceeded: - return zap.InfoLevel - case codes.NotFound: - return zap.DebugLevel - case codes.AlreadyExists: - return zap.DebugLevel - case codes.PermissionDenied: - return zap.InfoLevel - case codes.Unauthenticated: - return zap.InfoLevel // unauthenticated requests can happen - case codes.ResourceExhausted: - return zap.DebugLevel - case codes.FailedPrecondition: - return zap.DebugLevel - case codes.Aborted: - return zap.DebugLevel - case codes.OutOfRange: - return zap.DebugLevel - case codes.Unimplemented: - return zap.WarnLevel - case codes.Internal: - return zap.WarnLevel - case codes.Unavailable: - return zap.WarnLevel - case codes.DataLoss: - return zap.WarnLevel - default: - return zap.InfoLevel - } -} - -// DefaultDurationToField is the default implementation of converting request duration to a Zap field. -var DefaultDurationToField = DurationToTimeMillisField - -// DurationToTimeMillisField converts the duration to milliseconds and uses the key `grpc.time_ms`. -func DurationToTimeMillisField(duration time.Duration) zapcore.Field { - return zap.Float32("grpc.time_ms", durationToMilliseconds(duration)) -} - -// DurationToDurationField uses a Duration field to log the request duration -// and leaves it up to Zap's encoder settings to determine how that is output. -func DurationToDurationField(duration time.Duration) zapcore.Field { - return zap.Duration("grpc.duration", duration) -} - -func durationToMilliseconds(duration time.Duration) float32 { - return float32(duration.Nanoseconds()/1000) / 1000 -} - -// MessageProducer produces a user defined log message -type MessageProducer func(ctx context.Context, msg string, level zapcore.Level, code codes.Code, err error, duration zapcore.Field) - -// DefaultMessageProducer writes the default message -func DefaultMessageProducer(ctx context.Context, msg string, level zapcore.Level, code codes.Code, err error, duration zapcore.Field) { - // re-extract logger from newCtx, as it may have extra fields that changed in the holder. - ctxzap.Extract(ctx).Check(level, msg).Write( - zap.Error(err), - zap.String("grpc.code", code.String()), - duration, - ) -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/payload_interceptors.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/payload_interceptors.go deleted file mode 100644 index 329019b1722..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/payload_interceptors.go +++ /dev/null @@ -1,150 +0,0 @@ -package grpc_zap - -import ( - "bytes" - "context" - "fmt" - - "github.com/golang/protobuf/jsonpb" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/go-grpc-middleware/logging" - "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" - "google.golang.org/grpc" -) - -var ( - // JsonPbMarshaller is the marshaller used for serializing protobuf messages. - // If needed, this variable can be reassigned with a different marshaller with the same Marshal() signature. - JsonPbMarshaller grpc_logging.JsonPbMarshaler = &jsonpb.Marshaler{} -) - -// PayloadUnaryServerInterceptor returns a new unary server interceptors that logs the payloads of requests. -// -// This *only* works when placed *after* the `grpc_zap.UnaryServerInterceptor`. However, the logging can be done to a -// separate instance of the logger. -func PayloadUnaryServerInterceptor(logger *zap.Logger, decider grpc_logging.ServerPayloadLoggingDecider) grpc.UnaryServerInterceptor { - return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - if !decider(ctx, info.FullMethod, info.Server) { - return handler(ctx, req) - } - // Use the provided zap.Logger for logging but use the fields from context. - logEntry := logger.With(append(serverCallFields(info.FullMethod), ctxzap.TagsToFields(ctx)...)...) - logProtoMessageAsJson(logEntry, req, "grpc.request.content", "server request payload logged as grpc.request.content field") - resp, err := handler(ctx, req) - if err == nil { - logProtoMessageAsJson(logEntry, resp, "grpc.response.content", "server response payload logged as grpc.response.content field") - } - return resp, err - } -} - -// PayloadStreamServerInterceptor returns a new server server interceptors that logs the payloads of requests. -// -// This *only* works when placed *after* the `grpc_zap.StreamServerInterceptor`. However, the logging can be done to a -// separate instance of the logger. -func PayloadStreamServerInterceptor(logger *zap.Logger, decider grpc_logging.ServerPayloadLoggingDecider) grpc.StreamServerInterceptor { - return func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - if !decider(stream.Context(), info.FullMethod, srv) { - return handler(srv, stream) - } - logEntry := logger.With(append(serverCallFields(info.FullMethod), ctxzap.TagsToFields(stream.Context())...)...) - newStream := &loggingServerStream{ServerStream: stream, logger: logEntry} - return handler(srv, newStream) - } -} - -// PayloadUnaryClientInterceptor returns a new unary client interceptor that logs the payloads of requests and responses. -func PayloadUnaryClientInterceptor(logger *zap.Logger, decider grpc_logging.ClientPayloadLoggingDecider) grpc.UnaryClientInterceptor { - return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { - if !decider(ctx, method) { - return invoker(ctx, method, req, reply, cc, opts...) - } - logEntry := logger.With(newClientLoggerFields(ctx, method)...) - logProtoMessageAsJson(logEntry, req, "grpc.request.content", "client request payload logged as grpc.request.content") - err := invoker(ctx, method, req, reply, cc, opts...) - if err == nil { - logProtoMessageAsJson(logEntry, reply, "grpc.response.content", "client response payload logged as grpc.response.content") - } - return err - } -} - -// PayloadStreamClientInterceptor returns a new streaming client interceptor that logs the payloads of requests and responses. -func PayloadStreamClientInterceptor(logger *zap.Logger, decider grpc_logging.ClientPayloadLoggingDecider) grpc.StreamClientInterceptor { - return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { - if !decider(ctx, method) { - return streamer(ctx, desc, cc, method, opts...) - } - logEntry := logger.With(newClientLoggerFields(ctx, method)...) - clientStream, err := streamer(ctx, desc, cc, method, opts...) - newStream := &loggingClientStream{ClientStream: clientStream, logger: logEntry} - return newStream, err - } -} - -type loggingClientStream struct { - grpc.ClientStream - logger *zap.Logger -} - -func (l *loggingClientStream) SendMsg(m interface{}) error { - err := l.ClientStream.SendMsg(m) - if err == nil { - logProtoMessageAsJson(l.logger, m, "grpc.request.content", "server request payload logged as grpc.request.content field") - } - return err -} - -func (l *loggingClientStream) RecvMsg(m interface{}) error { - err := l.ClientStream.RecvMsg(m) - if err == nil { - logProtoMessageAsJson(l.logger, m, "grpc.response.content", "server response payload logged as grpc.response.content field") - } - return err -} - -type loggingServerStream struct { - grpc.ServerStream - logger *zap.Logger -} - -func (l *loggingServerStream) SendMsg(m interface{}) error { - err := l.ServerStream.SendMsg(m) - if err == nil { - logProtoMessageAsJson(l.logger, m, "grpc.response.content", "server response payload logged as grpc.response.content field") - } - return err -} - -func (l *loggingServerStream) RecvMsg(m interface{}) error { - err := l.ServerStream.RecvMsg(m) - if err == nil { - logProtoMessageAsJson(l.logger, m, "grpc.request.content", "server request payload logged as grpc.request.content field") - } - return err -} - -func logProtoMessageAsJson(logger *zap.Logger, pbMsg interface{}, key string, msg string) { - if p, ok := pbMsg.(proto.Message); ok { - logger.Check(zapcore.InfoLevel, msg).Write(zap.Object(key, &jsonpbObjectMarshaler{pb: p})) - } -} - -type jsonpbObjectMarshaler struct { - pb proto.Message -} - -func (j *jsonpbObjectMarshaler) MarshalLogObject(e zapcore.ObjectEncoder) error { - // ZAP jsonEncoder deals with AddReflect by using json.MarshalObject. The same thing applies for consoleEncoder. - return e.AddReflected("msg", j) -} - -func (j *jsonpbObjectMarshaler) MarshalJSON() ([]byte, error) { - b := &bytes.Buffer{} - if err := JsonPbMarshaller.Marshal(b, j.pb); err != nil { - return nil, fmt.Errorf("jsonpb serializer failed: %v", err) - } - return b.Bytes(), nil -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/server_interceptors.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/server_interceptors.go deleted file mode 100644 index 1db035897eb..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/server_interceptors.go +++ /dev/null @@ -1,85 +0,0 @@ -package grpc_zap - -import ( - "context" - "path" - "time" - - grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" - "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" - "google.golang.org/grpc" -) - -var ( - // SystemField is used in every log statement made through grpc_zap. Can be overwritten before any initialization code. - SystemField = zap.String("system", "grpc") - - // ServerField is used in every server-side log statement made through grpc_zap.Can be overwritten before initialization. - ServerField = zap.String("span.kind", "server") -) - -// UnaryServerInterceptor returns a new unary server interceptors that adds zap.Logger to the context. -func UnaryServerInterceptor(logger *zap.Logger, opts ...Option) grpc.UnaryServerInterceptor { - o := evaluateServerOpt(opts) - return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - startTime := time.Now() - - newCtx := newLoggerForCall(ctx, logger, info.FullMethod, startTime, o.timestampFormat) - - resp, err := handler(newCtx, req) - if !o.shouldLog(info.FullMethod, err) { - return resp, err - } - code := o.codeFunc(err) - level := o.levelFunc(code) - duration := o.durationFunc(time.Since(startTime)) - - o.messageFunc(newCtx, "finished unary call with code "+code.String(), level, code, err, duration) - return resp, err - } -} - -// StreamServerInterceptor returns a new streaming server interceptor that adds zap.Logger to the context. -func StreamServerInterceptor(logger *zap.Logger, opts ...Option) grpc.StreamServerInterceptor { - o := evaluateServerOpt(opts) - return func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - startTime := time.Now() - newCtx := newLoggerForCall(stream.Context(), logger, info.FullMethod, startTime, o.timestampFormat) - wrapped := grpc_middleware.WrapServerStream(stream) - wrapped.WrappedContext = newCtx - - err := handler(srv, wrapped) - if !o.shouldLog(info.FullMethod, err) { - return err - } - code := o.codeFunc(err) - level := o.levelFunc(code) - duration := o.durationFunc(time.Since(startTime)) - - o.messageFunc(newCtx, "finished streaming call with code "+code.String(), level, code, err, duration) - return err - } -} - -func serverCallFields(fullMethodString string) []zapcore.Field { - service := path.Dir(fullMethodString)[1:] - method := path.Base(fullMethodString) - return []zapcore.Field{ - SystemField, - ServerField, - zap.String("grpc.service", service), - zap.String("grpc.method", method), - } -} - -func newLoggerForCall(ctx context.Context, logger *zap.Logger, fullMethodString string, start time.Time, timestampFormat string) context.Context { - var f []zapcore.Field - f = append(f, zap.String("grpc.start_time", start.Format(timestampFormat))) - if d, ok := ctx.Deadline(); ok { - f = append(f, zap.String("grpc.request.deadline", d.Format(timestampFormat))) - } - callLog := logger.With(append(f, serverCallFields(fullMethodString)...)...) - return ctxzap.ToContext(ctx, callLog) -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/backoff.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/backoff.go deleted file mode 100644 index ad35f09a87f..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/backoff.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2016 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. - -package grpc_retry - -import ( - "time" - - "github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils" -) - -// BackoffLinear is very simple: it waits for a fixed period of time between calls. -func BackoffLinear(waitBetween time.Duration) BackoffFunc { - return func(attempt uint) time.Duration { - return waitBetween - } -} - -// BackoffLinearWithJitter waits a set period of time, allowing for jitter (fractional adjustment). -// -// For example waitBetween=1s and jitter=0.10 can generate waits between 900ms and 1100ms. -func BackoffLinearWithJitter(waitBetween time.Duration, jitterFraction float64) BackoffFunc { - return func(attempt uint) time.Duration { - return backoffutils.JitterUp(waitBetween, jitterFraction) - } -} - -// BackoffExponential produces increasing intervals for each attempt. -// -// The scalar is multiplied times 2 raised to the current attempt. So the first -// retry with a scalar of 100ms is 100ms, while the 5th attempt would be 1.6s. -func BackoffExponential(scalar time.Duration) BackoffFunc { - return func(attempt uint) time.Duration { - return scalar * time.Duration(backoffutils.ExponentBase2(attempt)) - } -} - -// BackoffExponentialWithJitter creates an exponential backoff like -// BackoffExponential does, but adds jitter. -func BackoffExponentialWithJitter(scalar time.Duration, jitterFraction float64) BackoffFunc { - return func(attempt uint) time.Duration { - return backoffutils.JitterUp(scalar*time.Duration(backoffutils.ExponentBase2(attempt)), jitterFraction) - } -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go deleted file mode 100644 index f8ba7198a5a..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2016 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. - -/* -`grpc_retry` provides client-side request retry logic for gRPC. - -Client-Side Request Retry Interceptor - -It allows for automatic retry, inside the generated gRPC code of requests based on the gRPC status -of the reply. It supports unary (1:1), and server stream (1:n) requests. - -By default the interceptors *are disabled*, preventing accidental use of retries. You can easily -override the number of retries (setting them to more than 0) with a `grpc.ClientOption`, e.g.: - - myclient.Ping(ctx, goodPing, grpc_retry.WithMax(5)) - -Other default options are: retry on `ResourceExhausted` and `Unavailable` gRPC codes, use a 50ms -linear backoff with 10% jitter. - -For chained interceptors, the retry interceptor will call every interceptor that follows it -whenever a retry happens. - -Please see examples for more advanced use. -*/ -package grpc_retry diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/options.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/options.go deleted file mode 100644 index 7a633e29347..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/options.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2016 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. - -package grpc_retry - -import ( - "context" - "time" - - "google.golang.org/grpc" - "google.golang.org/grpc/codes" -) - -var ( - // DefaultRetriableCodes is a set of well known types gRPC codes that should be retri-able. - // - // `ResourceExhausted` means that the user quota, e.g. per-RPC limits, have been reached. - // `Unavailable` means that system is currently unavailable and the client should retry again. - DefaultRetriableCodes = []codes.Code{codes.ResourceExhausted, codes.Unavailable} - - defaultOptions = &options{ - max: 0, // disabled - perCallTimeout: 0, // disabled - includeHeader: true, - codes: DefaultRetriableCodes, - backoffFunc: BackoffFuncContext(func(ctx context.Context, attempt uint) time.Duration { - return BackoffLinearWithJitter(50*time.Millisecond /*jitter*/, 0.10)(attempt) - }), - } -) - -// BackoffFunc denotes a family of functions that control the backoff duration between call retries. -// -// They are called with an identifier of the attempt, and should return a time the system client should -// hold off for. If the time returned is longer than the `context.Context.Deadline` of the request -// the deadline of the request takes precedence and the wait will be interrupted before proceeding -// with the next iteration. -type BackoffFunc func(attempt uint) time.Duration - -// BackoffFuncContext denotes a family of functions that control the backoff duration between call retries. -// -// They are called with an identifier of the attempt, and should return a time the system client should -// hold off for. If the time returned is longer than the `context.Context.Deadline` of the request -// the deadline of the request takes precedence and the wait will be interrupted before proceeding -// with the next iteration. The context can be used to extract request scoped metadata and context values. -type BackoffFuncContext func(ctx context.Context, attempt uint) time.Duration - -// Disable disables the retry behaviour on this call, or this interceptor. -// -// Its semantically the same to `WithMax` -func Disable() CallOption { - return WithMax(0) -} - -// WithMax sets the maximum number of retries on this call, or this interceptor. -func WithMax(maxRetries uint) CallOption { - return CallOption{applyFunc: func(o *options) { - o.max = maxRetries - }} -} - -// WithBackoff sets the `BackoffFunc` used to control time between retries. -func WithBackoff(bf BackoffFunc) CallOption { - return CallOption{applyFunc: func(o *options) { - o.backoffFunc = BackoffFuncContext(func(ctx context.Context, attempt uint) time.Duration { - return bf(attempt) - }) - }} -} - -// WithBackoffContext sets the `BackoffFuncContext` used to control time between retries. -func WithBackoffContext(bf BackoffFuncContext) CallOption { - return CallOption{applyFunc: func(o *options) { - o.backoffFunc = bf - }} -} - -// WithCodes sets which codes should be retried. -// -// Please *use with care*, as you may be retrying non-idempotent calls. -// -// You cannot automatically retry on Cancelled and Deadline, please use `WithPerRetryTimeout` for these. -func WithCodes(retryCodes ...codes.Code) CallOption { - return CallOption{applyFunc: func(o *options) { - o.codes = retryCodes - }} -} - -// WithPerRetryTimeout sets the RPC timeout per call (including initial call) on this call, or this interceptor. -// -// The context.Deadline of the call takes precedence and sets the maximum time the whole invocation -// will take, but WithPerRetryTimeout can be used to limit the RPC time per each call. -// -// For example, with context.Deadline = now + 10s, and WithPerRetryTimeout(3 * time.Seconds), each -// of the retry calls (including the initial one) will have a deadline of now + 3s. -// -// A value of 0 disables the timeout overrides completely and returns to each retry call using the -// parent `context.Deadline`. -// -// Note that when this is enabled, any DeadlineExceeded errors that are propagated up will be retried. -func WithPerRetryTimeout(timeout time.Duration) CallOption { - return CallOption{applyFunc: func(o *options) { - o.perCallTimeout = timeout - }} -} - -type options struct { - max uint - perCallTimeout time.Duration - includeHeader bool - codes []codes.Code - backoffFunc BackoffFuncContext -} - -// CallOption is a grpc.CallOption that is local to grpc_retry. -type CallOption struct { - grpc.EmptyCallOption // make sure we implement private after() and before() fields so we don't panic. - applyFunc func(opt *options) -} - -func reuseOrNewWithCallOptions(opt *options, callOptions []CallOption) *options { - if len(callOptions) == 0 { - return opt - } - optCopy := &options{} - *optCopy = *opt - for _, f := range callOptions { - f.applyFunc(optCopy) - } - return optCopy -} - -func filterCallOptions(callOptions []grpc.CallOption) (grpcOptions []grpc.CallOption, retryOptions []CallOption) { - for _, opt := range callOptions { - if co, ok := opt.(CallOption); ok { - retryOptions = append(retryOptions, co) - } else { - grpcOptions = append(grpcOptions, opt) - } - } - return grpcOptions, retryOptions -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go deleted file mode 100644 index 003bbd9066e..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go +++ /dev/null @@ -1,319 +0,0 @@ -// Copyright 2016 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. - -package grpc_retry - -import ( - "context" - "io" - "strconv" - "sync" - "time" - - "github.com/grpc-ecosystem/go-grpc-middleware/util/metautils" - "golang.org/x/net/trace" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -const ( - AttemptMetadataKey = "x-retry-attempty" -) - -// UnaryClientInterceptor returns a new retrying unary client interceptor. -// -// The default configuration of the interceptor is to not retry *at all*. This behaviour can be -// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions). -func UnaryClientInterceptor(optFuncs ...CallOption) grpc.UnaryClientInterceptor { - intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs) - return func(parentCtx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { - grpcOpts, retryOpts := filterCallOptions(opts) - callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts) - // short circuit for simplicity, and avoiding allocations. - if callOpts.max == 0 { - return invoker(parentCtx, method, req, reply, cc, grpcOpts...) - } - var lastErr error - for attempt := uint(0); attempt < callOpts.max; attempt++ { - if err := waitRetryBackoff(attempt, parentCtx, callOpts); err != nil { - return err - } - callCtx := perCallContext(parentCtx, callOpts, attempt) - lastErr = invoker(callCtx, method, req, reply, cc, grpcOpts...) - // TODO(mwitkow): Maybe dial and transport errors should be retriable? - if lastErr == nil { - return nil - } - logTrace(parentCtx, "grpc_retry attempt: %d, got err: %v", attempt, lastErr) - if isContextError(lastErr) { - if parentCtx.Err() != nil { - logTrace(parentCtx, "grpc_retry attempt: %d, parent context error: %v", attempt, parentCtx.Err()) - // its the parent context deadline or cancellation. - return lastErr - } else if callOpts.perCallTimeout != 0 { - // We have set a perCallTimeout in the retry middleware, which would result in a context error if - // the deadline was exceeded, in which case try again. - logTrace(parentCtx, "grpc_retry attempt: %d, context error from retry call", attempt) - continue - } - } - if !isRetriable(lastErr, callOpts) { - return lastErr - } - } - return lastErr - } -} - -// StreamClientInterceptor returns a new retrying stream client interceptor for server side streaming calls. -// -// The default configuration of the interceptor is to not retry *at all*. This behaviour can be -// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions). -// -// Retry logic is available *only for ServerStreams*, i.e. 1:n streams, as the internal logic needs -// to buffer the messages sent by the client. If retry is enabled on any other streams (ClientStreams, -// BidiStreams), the retry interceptor will fail the call. -func StreamClientInterceptor(optFuncs ...CallOption) grpc.StreamClientInterceptor { - intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs) - return func(parentCtx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { - grpcOpts, retryOpts := filterCallOptions(opts) - callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts) - // short circuit for simplicity, and avoiding allocations. - if callOpts.max == 0 { - return streamer(parentCtx, desc, cc, method, grpcOpts...) - } - if desc.ClientStreams { - return nil, status.Errorf(codes.Unimplemented, "grpc_retry: cannot retry on ClientStreams, set grpc_retry.Disable()") - } - - var lastErr error - for attempt := uint(0); attempt < callOpts.max; attempt++ { - if err := waitRetryBackoff(attempt, parentCtx, callOpts); err != nil { - return nil, err - } - callCtx := perCallContext(parentCtx, callOpts, 0) - - var newStreamer grpc.ClientStream - newStreamer, lastErr = streamer(callCtx, desc, cc, method, grpcOpts...) - if lastErr == nil { - retryingStreamer := &serverStreamingRetryingStream{ - ClientStream: newStreamer, - callOpts: callOpts, - parentCtx: parentCtx, - streamerCall: func(ctx context.Context) (grpc.ClientStream, error) { - return streamer(ctx, desc, cc, method, grpcOpts...) - }, - } - return retryingStreamer, nil - } - - logTrace(parentCtx, "grpc_retry attempt: %d, got err: %v", attempt, lastErr) - if isContextError(lastErr) { - if parentCtx.Err() != nil { - logTrace(parentCtx, "grpc_retry attempt: %d, parent context error: %v", attempt, parentCtx.Err()) - // its the parent context deadline or cancellation. - return nil, lastErr - } else if callOpts.perCallTimeout != 0 { - // We have set a perCallTimeout in the retry middleware, which would result in a context error if - // the deadline was exceeded, in which case try again. - logTrace(parentCtx, "grpc_retry attempt: %d, context error from retry call", attempt) - continue - } - } - if !isRetriable(lastErr, callOpts) { - return nil, lastErr - } - } - return nil, lastErr - } -} - -// type serverStreamingRetryingStream is the implementation of grpc.ClientStream that acts as a -// proxy to the underlying call. If any of the RecvMsg() calls fail, it will try to reestablish -// a new ClientStream according to the retry policy. -type serverStreamingRetryingStream struct { - grpc.ClientStream - bufferedSends []interface{} // single message that the client can sen - wasClosedSend bool // indicates that CloseSend was closed - parentCtx context.Context - callOpts *options - streamerCall func(ctx context.Context) (grpc.ClientStream, error) - mu sync.RWMutex -} - -func (s *serverStreamingRetryingStream) setStream(clientStream grpc.ClientStream) { - s.mu.Lock() - s.ClientStream = clientStream - s.mu.Unlock() -} - -func (s *serverStreamingRetryingStream) getStream() grpc.ClientStream { - s.mu.RLock() - defer s.mu.RUnlock() - return s.ClientStream -} - -func (s *serverStreamingRetryingStream) SendMsg(m interface{}) error { - s.mu.Lock() - s.bufferedSends = append(s.bufferedSends, m) - s.mu.Unlock() - return s.getStream().SendMsg(m) -} - -func (s *serverStreamingRetryingStream) CloseSend() error { - s.mu.Lock() - s.wasClosedSend = true - s.mu.Unlock() - return s.getStream().CloseSend() -} - -func (s *serverStreamingRetryingStream) Header() (metadata.MD, error) { - return s.getStream().Header() -} - -func (s *serverStreamingRetryingStream) Trailer() metadata.MD { - return s.getStream().Trailer() -} - -func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error { - attemptRetry, lastErr := s.receiveMsgAndIndicateRetry(m) - if !attemptRetry { - return lastErr // success or hard failure - } - // We start off from attempt 1, because zeroth was already made on normal SendMsg(). - for attempt := uint(1); attempt < s.callOpts.max; attempt++ { - if err := waitRetryBackoff(attempt, s.parentCtx, s.callOpts); err != nil { - return err - } - callCtx := perCallContext(s.parentCtx, s.callOpts, attempt) - newStream, err := s.reestablishStreamAndResendBuffer(callCtx) - if err != nil { - // Retry dial and transport errors of establishing stream as grpc doesn't retry. - if isRetriable(err, s.callOpts) { - continue - } - return err - } - - s.setStream(newStream) - attemptRetry, lastErr = s.receiveMsgAndIndicateRetry(m) - //fmt.Printf("Received message and indicate: %v %v\n", attemptRetry, lastErr) - if !attemptRetry { - return lastErr - } - } - return lastErr -} - -func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m interface{}) (bool, error) { - err := s.getStream().RecvMsg(m) - if err == nil || err == io.EOF { - return false, err - } - if isContextError(err) { - if s.parentCtx.Err() != nil { - logTrace(s.parentCtx, "grpc_retry parent context error: %v", s.parentCtx.Err()) - return false, err - } else if s.callOpts.perCallTimeout != 0 { - // We have set a perCallTimeout in the retry middleware, which would result in a context error if - // the deadline was exceeded, in which case try again. - logTrace(s.parentCtx, "grpc_retry context error from retry call") - return true, err - } - } - return isRetriable(err, s.callOpts), err -} - -func (s *serverStreamingRetryingStream) reestablishStreamAndResendBuffer( - callCtx context.Context, -) (grpc.ClientStream, error) { - s.mu.RLock() - bufferedSends := s.bufferedSends - s.mu.RUnlock() - newStream, err := s.streamerCall(callCtx) - if err != nil { - logTrace(callCtx, "grpc_retry failed redialing new stream: %v", err) - return nil, err - } - for _, msg := range bufferedSends { - if err := newStream.SendMsg(msg); err != nil { - logTrace(callCtx, "grpc_retry failed resending message: %v", err) - return nil, err - } - } - if err := newStream.CloseSend(); err != nil { - logTrace(callCtx, "grpc_retry failed CloseSend on new stream %v", err) - return nil, err - } - return newStream, nil -} - -func waitRetryBackoff(attempt uint, parentCtx context.Context, callOpts *options) error { - var waitTime time.Duration = 0 - if attempt > 0 { - waitTime = callOpts.backoffFunc(parentCtx, attempt) - } - if waitTime > 0 { - logTrace(parentCtx, "grpc_retry attempt: %d, backoff for %v", attempt, waitTime) - timer := time.NewTimer(waitTime) - select { - case <-parentCtx.Done(): - timer.Stop() - return contextErrToGrpcErr(parentCtx.Err()) - case <-timer.C: - } - } - return nil -} - -func isRetriable(err error, callOpts *options) bool { - errCode := status.Code(err) - if isContextError(err) { - // context errors are not retriable based on user settings. - return false - } - for _, code := range callOpts.codes { - if code == errCode { - return true - } - } - return false -} - -func isContextError(err error) bool { - code := status.Code(err) - return code == codes.DeadlineExceeded || code == codes.Canceled -} - -func perCallContext(parentCtx context.Context, callOpts *options, attempt uint) context.Context { - ctx := parentCtx - if callOpts.perCallTimeout != 0 { - ctx, _ = context.WithTimeout(ctx, callOpts.perCallTimeout) - } - if attempt > 0 && callOpts.includeHeader { - mdClone := metautils.ExtractOutgoing(ctx).Clone().Set(AttemptMetadataKey, strconv.FormatUint(uint64(attempt), 10)) - ctx = mdClone.ToOutgoing(ctx) - } - return ctx -} - -func contextErrToGrpcErr(err error) error { - switch err { - case context.DeadlineExceeded: - return status.Error(codes.DeadlineExceeded, err.Error()) - case context.Canceled: - return status.Error(codes.Canceled, err.Error()) - default: - return status.Error(codes.Unknown, err.Error()) - } -} - -func logTrace(ctx context.Context, format string, a ...interface{}) { - tr, ok := trace.FromContext(ctx) - if !ok { - return - } - tr.LazyPrintf(format, a...) -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/context.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/context.go deleted file mode 100644 index 0da1658bbe8..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/context.go +++ /dev/null @@ -1,78 +0,0 @@ -package grpc_ctxtags - -import ( - "context" -) - -type ctxMarker struct{} - -var ( - // ctxMarkerKey is the Context value marker used by *all* logging middleware. - // The logging middleware object must interf - ctxMarkerKey = &ctxMarker{} - // NoopTags is a trivial, minimum overhead implementation of Tags for which all operations are no-ops. - NoopTags = &noopTags{} -) - -// Tags is the interface used for storing request tags between Context calls. -// The default implementation is *not* thread safe, and should be handled only in the context of the request. -type Tags interface { - // Set sets the given key in the metadata tags. - Set(key string, value interface{}) Tags - // Has checks if the given key exists. - Has(key string) bool - // Values returns a map of key to values. - // Do not modify the underlying map, please use Set instead. - Values() map[string]interface{} -} - -type mapTags struct { - values map[string]interface{} -} - -func (t *mapTags) Set(key string, value interface{}) Tags { - t.values[key] = value - return t -} - -func (t *mapTags) Has(key string) bool { - _, ok := t.values[key] - return ok -} - -func (t *mapTags) Values() map[string]interface{} { - return t.values -} - -type noopTags struct{} - -func (t *noopTags) Set(key string, value interface{}) Tags { - return t -} - -func (t *noopTags) Has(key string) bool { - return false -} - -func (t *noopTags) Values() map[string]interface{} { - return nil -} - -// Extracts returns a pre-existing Tags object in the Context. -// If the context wasn't set in a tag interceptor, a no-op Tag storage is returned that will *not* be propagated in context. -func Extract(ctx context.Context) Tags { - t, ok := ctx.Value(ctxMarkerKey).(Tags) - if !ok { - return NoopTags - } - - return t -} - -func SetInContext(ctx context.Context, tags Tags) context.Context { - return context.WithValue(ctx, ctxMarkerKey, tags) -} - -func NewTags() Tags { - return &mapTags{values: make(map[string]interface{})} -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/doc.go deleted file mode 100644 index 960638d0fa3..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/doc.go +++ /dev/null @@ -1,22 +0,0 @@ -/* -`grpc_ctxtags` adds a Tag object to the context that can be used by other middleware to add context about a request. - -Request Context Tags - -Tags describe information about the request, and can be set and used by other middleware, or handlers. Tags are used -for logging and tracing of requests. Tags are populated both upwards, *and* downwards in the interceptor-handler stack. - -You can automatically extract tags (in `grpc.request.`) from request payloads. - -For unary and server-streaming methods, pass in the `WithFieldExtractor` option. For client-streams and bidirectional-streams, you can -use `WithFieldExtractorForInitialReq` which will extract the tags from the first message passed from client to server. -Note the tags will not be modified for subsequent requests, so this option only makes sense when the initial message -establishes the meta-data for the stream. - -If a user doesn't use the interceptors that initialize the `Tags` object, all operations following from an `Extract(ctx)` -will be no-ops. This is to ensure that code doesn't panic if the interceptors weren't used. - -Tags fields are typed, and shallow and should follow the OpenTracing semantics convention: -https://github.com/opentracing/specification/blob/master/semantic_conventions.md -*/ -package grpc_ctxtags diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/fieldextractor.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/fieldextractor.go deleted file mode 100644 index a4073ab49b2..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/fieldextractor.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2017 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. - -package grpc_ctxtags - -import ( - "reflect" -) - -// RequestFieldExtractorFunc is a user-provided function that extracts field information from a gRPC request. -// It is called from tags middleware on arrival of unary request or a server-stream request. -// Keys and values will be added to the context tags of the request. If there are no fields, you should return a nil. -type RequestFieldExtractorFunc func(fullMethod string, req interface{}) map[string]interface{} - -type requestFieldsExtractor interface { - // ExtractRequestFields is a method declared on a Protobuf message that extracts fields from the interface. - // The values from the extracted fields should be set in the appendToMap, in order to avoid allocations. - ExtractRequestFields(appendToMap map[string]interface{}) -} - -// CodeGenRequestFieldExtractor is a function that relies on code-generated functions that export log fields from requests. -// These are usually coming from a protoc-plugin that generates additional information based on custom field options. -func CodeGenRequestFieldExtractor(fullMethod string, req interface{}) map[string]interface{} { - if ext, ok := req.(requestFieldsExtractor); ok { - retMap := make(map[string]interface{}) - ext.ExtractRequestFields(retMap) - if len(retMap) == 0 { - return nil - } - return retMap - } - return nil -} - -// TagBasedRequestFieldExtractor is a function that relies on Go struct tags to export log fields from requests. -// These are usually coming from a protoc-plugin, such as Gogo protobuf. -// -// message Metadata { -// repeated string tags = 1 [ (gogoproto.moretags) = "log_field:\"meta_tags\"" ]; -// } -// -// The tagName is configurable using the tagName variable. Here it would be "log_field". -func TagBasedRequestFieldExtractor(tagName string) RequestFieldExtractorFunc { - return func(fullMethod string, req interface{}) map[string]interface{} { - retMap := make(map[string]interface{}) - reflectMessageTags(req, retMap, tagName) - if len(retMap) == 0 { - return nil - } - return retMap - } -} - -func reflectMessageTags(msg interface{}, existingMap map[string]interface{}, tagName string) { - v := reflect.ValueOf(msg) - // Only deal with pointers to structs. - if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct { - return - } - // Deref the pointer get to the struct. - v = v.Elem() - t := v.Type() - for i := 0; i < v.NumField(); i++ { - field := v.Field(i) - kind := field.Kind() - // Only recurse down direct pointers, which should only be to nested structs. - if (kind == reflect.Ptr || kind == reflect.Interface) && field.CanInterface() { - reflectMessageTags(field.Interface(), existingMap, tagName) - } - // In case of arrays/slices (repeated fields) go down to the concrete type. - if kind == reflect.Array || kind == reflect.Slice { - if field.Len() == 0 { - continue - } - kind = field.Index(0).Kind() - } - // Only be interested in - if (kind >= reflect.Bool && kind <= reflect.Float64) || kind == reflect.String { - if tag := t.Field(i).Tag.Get(tagName); tag != "" { - existingMap[tag] = field.Interface() - } - } - } - return -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/interceptors.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/interceptors.go deleted file mode 100644 index a7ced60f5b9..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/interceptors.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2017 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. - -package grpc_ctxtags - -import ( - "context" - - "google.golang.org/grpc" - "google.golang.org/grpc/peer" - - "github.com/grpc-ecosystem/go-grpc-middleware" -) - -// UnaryServerInterceptor returns a new unary server interceptors that sets the values for request tags. -func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor { - o := evaluateOptions(opts) - return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - newCtx := newTagsForCtx(ctx) - if o.requestFieldsFunc != nil { - setRequestFieldTags(newCtx, o.requestFieldsFunc, info.FullMethod, req) - } - return handler(newCtx, req) - } -} - -// StreamServerInterceptor returns a new streaming server interceptor that sets the values for request tags. -func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor { - o := evaluateOptions(opts) - return func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - newCtx := newTagsForCtx(stream.Context()) - if o.requestFieldsFunc == nil { - // Short-circuit, don't do the expensive bit of allocating a wrappedStream. - wrappedStream := grpc_middleware.WrapServerStream(stream) - wrappedStream.WrappedContext = newCtx - return handler(srv, wrappedStream) - } - wrapped := &wrappedStream{stream, info, o, newCtx, true} - err := handler(srv, wrapped) - return err - } -} - -// wrappedStream is a thin wrapper around grpc.ServerStream that allows modifying context and extracts log fields from the initial message. -type wrappedStream struct { - grpc.ServerStream - info *grpc.StreamServerInfo - opts *options - // WrappedContext is the wrapper's own Context. You can assign it. - WrappedContext context.Context - initial bool -} - -// Context returns the wrapper's WrappedContext, overwriting the nested grpc.ServerStream.Context() -func (w *wrappedStream) Context() context.Context { - return w.WrappedContext -} - -func (w *wrappedStream) RecvMsg(m interface{}) error { - err := w.ServerStream.RecvMsg(m) - // We only do log fields extraction on the single-request of a server-side stream. - if !w.info.IsClientStream || w.opts.requestFieldsFromInitial && w.initial { - w.initial = false - - setRequestFieldTags(w.Context(), w.opts.requestFieldsFunc, w.info.FullMethod, m) - } - return err -} - -func newTagsForCtx(ctx context.Context) context.Context { - t := NewTags() - if peer, ok := peer.FromContext(ctx); ok { - t.Set("peer.address", peer.Addr.String()) - } - return SetInContext(ctx, t) -} - -func setRequestFieldTags(ctx context.Context, f RequestFieldExtractorFunc, fullMethodName string, req interface{}) { - if valMap := f(fullMethodName, req); valMap != nil { - t := Extract(ctx) - for k, v := range valMap { - t.Set("grpc.request."+k, v) - } - } -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/options.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/options.go deleted file mode 100644 index 952775f88d4..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/options.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2017 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. - -package grpc_ctxtags - -var ( - defaultOptions = &options{ - requestFieldsFunc: nil, - } -) - -type options struct { - requestFieldsFunc RequestFieldExtractorFunc - requestFieldsFromInitial bool -} - -func evaluateOptions(opts []Option) *options { - optCopy := &options{} - *optCopy = *defaultOptions - for _, o := range opts { - o(optCopy) - } - return optCopy -} - -type Option func(*options) - -// WithFieldExtractor customizes the function for extracting log fields from protobuf messages, for -// unary and server-streamed methods only. -func WithFieldExtractor(f RequestFieldExtractorFunc) Option { - return func(o *options) { - o.requestFieldsFunc = f - } -} - -// WithFieldExtractorForInitialReq customizes the function for extracting log fields from protobuf messages, -// for all unary and streaming methods. For client-streams and bidirectional-streams, the tags will be -// extracted from the first message from the client. -func WithFieldExtractorForInitialReq(f RequestFieldExtractorFunc) Option { - return func(o *options) { - o.requestFieldsFunc = f - o.requestFieldsFromInitial = true - } -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils/backoff.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils/backoff.go deleted file mode 100644 index 4e69a6305aa..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils/backoff.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2016 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. - -/* -Backoff Helper Utilities - -Implements common backoff features. -*/ -package backoffutils - -import ( - "math/rand" - "time" -) - -// JitterUp adds random jitter to the duration. -// -// This adds or subtracts time from the duration within a given jitter fraction. -// For example for 10s and jitter 0.1, it will return a time within [9s, 11s]) -func JitterUp(duration time.Duration, jitter float64) time.Duration { - multiplier := jitter * (rand.Float64()*2 - 1) - return time.Duration(float64(duration) * (1 + multiplier)) -} - -// ExponentBase2 computes 2^(a-1) where a >= 1. If a is 0, the result is 0. -func ExponentBase2(a uint) uint { - return (1 << a) >> 1 -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/doc.go deleted file mode 100644 index 1ed9bb499b3..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2016 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. - -/* -Package `metautils` provides convenience functions for dealing with gRPC metadata.MD objects inside -Context handlers. - -While the upstream grpc-go package contains decent functionality (see https://github.com/grpc/grpc-go/blob/master/Documentation/grpc-metadata.md) -they are hard to use. - -The majority of functions center around the NiceMD, which is a convenience wrapper around metadata.MD. For example -the following code allows you to easily extract incoming metadata (server handler) and put it into a new client context -metadata. - - nmd := metautils.ExtractIncoming(serverCtx).Clone(":authorization", ":custom") - clientCtx := nmd.Set("x-client-header", "2").Set("x-another", "3").ToOutgoing(ctx) -*/ - -package metautils diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go deleted file mode 100644 index 15225d710ac..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2016 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. - -package metautils - -import ( - "context" - "strings" - - "google.golang.org/grpc/metadata" -) - -// NiceMD is a convenience wrapper defining extra functions on the metadata. -type NiceMD metadata.MD - -// ExtractIncoming extracts an inbound metadata from the server-side context. -// -// This function always returns a NiceMD wrapper of the metadata.MD, in case the context doesn't have metadata it returns -// a new empty NiceMD. -func ExtractIncoming(ctx context.Context) NiceMD { - md, ok := metadata.FromIncomingContext(ctx) - if !ok { - return NiceMD(metadata.Pairs()) - } - return NiceMD(md) -} - -// ExtractOutgoing extracts an outbound metadata from the client-side context. -// -// This function always returns a NiceMD wrapper of the metadata.MD, in case the context doesn't have metadata it returns -// a new empty NiceMD. -func ExtractOutgoing(ctx context.Context) NiceMD { - md, ok := metadata.FromOutgoingContext(ctx) - if !ok { - return NiceMD(metadata.Pairs()) - } - return NiceMD(md) -} - -// Clone performs a *deep* copy of the metadata.MD. -// -// You can specify the lower-case copiedKeys to only copy certain allow-listed keys. If no keys are explicitly allow-listed -// all keys get copied. -func (m NiceMD) Clone(copiedKeys ...string) NiceMD { - newMd := NiceMD(metadata.Pairs()) - for k, vv := range m { - found := false - if len(copiedKeys) == 0 { - found = true - } else { - for _, allowedKey := range copiedKeys { - if strings.EqualFold(allowedKey, k) { - found = true - break - } - } - } - if !found { - continue - } - newMd[k] = make([]string, len(vv)) - copy(newMd[k], vv) - } - return newMd -} - -// ToOutgoing sets the given NiceMD as a client-side context for dispatching. -func (m NiceMD) ToOutgoing(ctx context.Context) context.Context { - return metadata.NewOutgoingContext(ctx, metadata.MD(m)) -} - -// ToIncoming sets the given NiceMD as a server-side context for dispatching. -// -// This is mostly useful in ServerInterceptors.. -func (m NiceMD) ToIncoming(ctx context.Context) context.Context { - return metadata.NewIncomingContext(ctx, metadata.MD(m)) -} - -// Get retrieves a single value from the metadata. -// -// It works analogously to http.Header.Get, returning the first value if there are many set. If the value is not set, -// an empty string is returned. -// -// The function is binary-key safe. -func (m NiceMD) Get(key string) string { - k := strings.ToLower(key) - vv, ok := m[k] - if !ok { - return "" - } - return vv[0] -} - -// Del retrieves a single value from the metadata. -// -// It works analogously to http.Header.Del, deleting all values if they exist. -// -// The function is binary-key safe. - -func (m NiceMD) Del(key string) NiceMD { - k := strings.ToLower(key) - delete(m, k) - return m -} - -// Set sets the given value in a metadata. -// -// It works analogously to http.Header.Set, overwriting all previous metadata values. -// -// The function is binary-key safe. -func (m NiceMD) Set(key string, value string) NiceMD { - k := strings.ToLower(key) - m[k] = []string{value} - return m -} - -// Add retrieves a single value from the metadata. -// -// It works analogously to http.Header.Add, as it appends to any existing values associated with key. -// -// The function is binary-key safe. -func (m NiceMD) Add(key string, value string) NiceMD { - k := strings.ToLower(key) - m[k] = append(m[k], value) - return m -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/COPYRIGHT b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/COPYRIGHT new file mode 100644 index 00000000000..3b13627cdbb --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/COPYRIGHT @@ -0,0 +1,2 @@ +Copyright (c) The go-grpc-middleware Authors. +Licensed under the Apache License 2.0. diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/LICENSE b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/LICENSE new file mode 100644 index 00000000000..b2b065037fc --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry/backoff.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry/backoff.go new file mode 100644 index 00000000000..5b8aaa683f1 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry/backoff.go @@ -0,0 +1,55 @@ +// Copyright (c) The go-grpc-middleware Authors. +// Licensed under the Apache License 2.0. + +package retry + +import ( + "context" + "math/rand" + "time" +) + +// BackoffLinear is very simple: it waits for a fixed period of time between calls. +func BackoffLinear(waitBetween time.Duration) BackoffFunc { + return func(ctx context.Context, attempt uint) time.Duration { + return waitBetween + } +} + +// jitterUp adds random jitter to the duration. +// This adds or subtracts time from the duration within a given jitter fraction. +// For example for 10s and jitter 0.1, it will return a time within [9s, 11s]) +func jitterUp(duration time.Duration, jitter float64) time.Duration { + multiplier := jitter * (rand.Float64()*2 - 1) + return time.Duration(float64(duration) * (1 + multiplier)) +} + +// exponentBase2 computes 2^(a-1) where a >= 1. If a is 0, the result is 0. +func exponentBase2(a uint) uint { + return (1 << a) >> 1 +} + +// BackoffLinearWithJitter waits a set period of time, allowing for jitter (fractional adjustment). +// For example waitBetween=1s and jitter=0.10 can generate waits between 900ms and 1100ms. +func BackoffLinearWithJitter(waitBetween time.Duration, jitterFraction float64) BackoffFunc { + return func(ctx context.Context, attempt uint) time.Duration { + return jitterUp(waitBetween, jitterFraction) + } +} + +// BackoffExponential produces increasing intervals for each attempt. +// The scalar is multiplied times 2 raised to the current attempt. So the first +// retry with a scalar of 100ms is 100ms, while the 5th attempt would be 1.6s. +func BackoffExponential(scalar time.Duration) BackoffFunc { + return func(ctx context.Context, attempt uint) time.Duration { + return scalar * time.Duration(exponentBase2(attempt)) + } +} + +// BackoffExponentialWithJitter creates an exponential backoff like +// BackoffExponential does, but adds jitter. +func BackoffExponentialWithJitter(scalar time.Duration, jitterFraction float64) BackoffFunc { + return func(ctx context.Context, attempt uint) time.Duration { + return jitterUp(scalar*time.Duration(exponentBase2(attempt)), jitterFraction) + } +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry/doc.go new file mode 100644 index 00000000000..cad659022ee --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry/doc.go @@ -0,0 +1,25 @@ +// Copyright (c) The go-grpc-middleware Authors. +// Licensed under the Apache License 2.0. + +/* +Package retry provides client-side request retry logic for gRPC. + +# Client-Side Request Retry Interceptor + +It allows for automatic retry, inside the generated gRPC code of requests based on the gRPC status +of the reply. It supports unary (1:1), and server stream (1:n) requests. + +By default the interceptors *are disabled*, preventing accidental use of retries. You can easily +override the number of retries (setting them to more than 0) with a `grpc.ClientOption`, e.g.: + + myclient.Ping(ctx, goodPing, grpc_retry.WithMax(5)) + +Other default options are: retry on `ResourceExhausted` and `Unavailable` gRPC codes, use a 50ms +linear backoff with 10% jitter. + +For chained interceptors, the retry interceptor will call every interceptor that follows it +whenever when a retry happens. + +Please see examples for more advanced use. +*/ +package retry diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry/options.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry/options.go new file mode 100644 index 00000000000..e1466598679 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry/options.go @@ -0,0 +1,167 @@ +// Copyright (c) The go-grpc-middleware Authors. +// Licensed under the Apache License 2.0. + +package retry + +import ( + "context" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var ( + // DefaultRetriableCodes is a set of well known types gRPC codes that should be retri-able. + // + // `ResourceExhausted` means that the user quota, e.g. per-RPC limits, have been reached. + // `Unavailable` means that system is currently unavailable and the client should retry again. + DefaultRetriableCodes = []codes.Code{codes.ResourceExhausted, codes.Unavailable} + + defaultOptions = &options{ + max: 0, // disabled + perCallTimeout: 0, // disabled + includeHeader: true, + backoffFunc: BackoffLinearWithJitter(50*time.Millisecond /*jitter*/, 0.10), + onRetryCallback: OnRetryCallback(func(ctx context.Context, attempt uint, err error) { + logTrace(ctx, "grpc_retry attempt: %d, backoff for %v", attempt, err) + }), + retriableFunc: newRetriableFuncForCodes(DefaultRetriableCodes), + } +) + +// BackoffFunc denotes a family of functions that control the backoff duration between call retries. +// +// They are called with an identifier of the attempt, and should return a time the system client should +// hold off for. If the time returned is longer than the `context.Context.Deadline` of the request +// the deadline of the request takes precedence and the wait will be interrupted before proceeding +// with the next iteration. The context can be used to extract request scoped metadata and context values. +type BackoffFunc func(ctx context.Context, attempt uint) time.Duration + +// OnRetryCallback is the type of function called when a retry occurs. +type OnRetryCallback func(ctx context.Context, attempt uint, err error) + +// RetriableFunc denotes a family of functions that control which error should be retried. +type RetriableFunc func(err error) bool + +// Disable disables the retry behaviour on this call, or this interceptor. +// +// Its semantically the same to `WithMax` +func Disable() CallOption { + return WithMax(0) +} + +// WithMax sets the maximum number of retries on this call, or this interceptor. +func WithMax(maxRetries uint) CallOption { + return CallOption{applyFunc: func(o *options) { + o.max = maxRetries + }} +} + +// WithBackoff sets the `BackoffFunc` used to control time between retries. +func WithBackoff(bf BackoffFunc) CallOption { + return CallOption{applyFunc: func(o *options) { + o.backoffFunc = bf + }} +} + +// WithOnRetryCallback sets the callback to use when a retry occurs. +// +// By default, when no callback function provided, we will just print a log to trace +func WithOnRetryCallback(fn OnRetryCallback) CallOption { + return CallOption{applyFunc: func(o *options) { + o.onRetryCallback = fn + }} +} + +// WithCodes sets which codes should be retried. +// +// Please *use with care*, as you may be retrying non-idempotent calls. +// +// You cannot automatically retry on Cancelled and Deadline, please use `WithPerRetryTimeout` for these. +func WithCodes(retryCodes ...codes.Code) CallOption { + return CallOption{applyFunc: func(o *options) { + o.retriableFunc = newRetriableFuncForCodes(retryCodes) + }} +} + +// WithPerRetryTimeout sets the RPC timeout per call (including initial call) on this call, or this interceptor. +// +// The context.Deadline of the call takes precedence and sets the maximum time the whole invocation +// will take, but WithPerRetryTimeout can be used to limit the RPC time per each call. +// +// For example, with context.Deadline = now + 10s, and WithPerRetryTimeout(3 * time.Seconds), each +// of the retry calls (including the initial one) will have a deadline of now + 3s. +// +// A value of 0 disables the timeout overrides completely and returns to each retry call using the +// parent `context.Deadline`. +// +// Note that when this is enabled, any DeadlineExceeded errors that are propagated up will be retried. +func WithPerRetryTimeout(timeout time.Duration) CallOption { + return CallOption{applyFunc: func(o *options) { + o.perCallTimeout = timeout + }} +} + +// WithRetriable sets which error should be retried. +func WithRetriable(retriableFunc RetriableFunc) CallOption { + return CallOption{applyFunc: func(o *options) { + o.retriableFunc = retriableFunc + }} +} + +type options struct { + max uint + perCallTimeout time.Duration + includeHeader bool + backoffFunc BackoffFunc + onRetryCallback OnRetryCallback + retriableFunc RetriableFunc +} + +// CallOption is a grpc.CallOption that is local to grpc_retry. +type CallOption struct { + grpc.EmptyCallOption // make sure we implement private after() and before() fields so we don't panic. + applyFunc func(opt *options) +} + +func reuseOrNewWithCallOptions(opt *options, callOptions []CallOption) *options { + if len(callOptions) == 0 { + return opt + } + optCopy := &options{} + *optCopy = *opt + for _, f := range callOptions { + f.applyFunc(optCopy) + } + return optCopy +} + +func filterCallOptions(callOptions []grpc.CallOption) (grpcOptions []grpc.CallOption, retryOptions []CallOption) { + for _, opt := range callOptions { + if co, ok := opt.(CallOption); ok { + retryOptions = append(retryOptions, co) + } else { + grpcOptions = append(grpcOptions, opt) + } + } + return grpcOptions, retryOptions +} + +// newRetriableFuncForCodes returns retriable function for specific Codes. +func newRetriableFuncForCodes(codes []codes.Code) func(err error) bool { + return func(err error) bool { + errCode := status.Code(err) + if isContextError(err) { + // context errors are not retriable based on user settings. + return false + } + for _, code := range codes { + if code == errCode { + return true + } + } + return false + } +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry/retry.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry/retry.go new file mode 100644 index 00000000000..f0254f9150f --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry/retry.go @@ -0,0 +1,326 @@ +// Copyright (c) The go-grpc-middleware Authors. +// Licensed under the Apache License 2.0. + +package retry + +import ( + "context" + "fmt" + "io" + "sync" + "time" + + "github.com/grpc-ecosystem/go-grpc-middleware/v2/metadata" + "golang.org/x/net/trace" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + grpcMetadata "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +const ( + AttemptMetadataKey = "x-retry-attempt" +) + +// UnaryClientInterceptor returns a new retrying unary client interceptor. +// +// The default configuration of the interceptor is to not retry *at all*. This behaviour can be +// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions). +func UnaryClientInterceptor(optFuncs ...CallOption) grpc.UnaryClientInterceptor { + intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs) + return func(parentCtx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + grpcOpts, retryOpts := filterCallOptions(opts) + callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts) + // short circuit for simplicity, and avoiding allocations. + if callOpts.max == 0 { + return invoker(parentCtx, method, req, reply, cc, grpcOpts...) + } + var lastErr error + for attempt := uint(0); attempt < callOpts.max; attempt++ { + if err := waitRetryBackoff(attempt, parentCtx, callOpts); err != nil { + return err + } + if attempt > 0 { + callOpts.onRetryCallback(parentCtx, attempt, lastErr) + } + callCtx, cancel := perCallContext(parentCtx, callOpts, attempt) + defer cancel() // Clean up potential resources. + lastErr = invoker(callCtx, method, req, reply, cc, grpcOpts...) + // TODO(mwitkow): Maybe dial and transport errors should be retriable? + if lastErr == nil { + return nil + } + if isContextError(lastErr) { + if parentCtx.Err() != nil { + logTrace(parentCtx, "grpc_retry attempt: %d, parent context error: %v", attempt, parentCtx.Err()) + // its the parent context deadline or cancellation. + return lastErr + } else if callOpts.perCallTimeout != 0 { + // We have set a perCallTimeout in the retry middleware, which would result in a context error if + // the deadline was exceeded, in which case try again. + logTrace(parentCtx, "grpc_retry attempt: %d, context error from retry call", attempt) + continue + } + } + if !isRetriable(lastErr, callOpts) { + return lastErr + } + } + return lastErr + } +} + +// StreamClientInterceptor returns a new retrying stream client interceptor for server side streaming calls. +// +// The default configuration of the interceptor is to not retry *at all*. This behaviour can be +// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions). +// +// Retry logic is available *only for ServerStreams*, i.e. 1:n streams, as the internal logic needs +// to buffer the messages sent by the client. If retry is enabled on any other streams (ClientStreams, +// BidiStreams), the retry interceptor will fail the call. +func StreamClientInterceptor(optFuncs ...CallOption) grpc.StreamClientInterceptor { + intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs) + return func(parentCtx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + grpcOpts, retryOpts := filterCallOptions(opts) + callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts) + // short circuit for simplicity, and avoiding allocations. + if callOpts.max == 0 { + return streamer(parentCtx, desc, cc, method, grpcOpts...) + } + if desc.ClientStreams { + return nil, status.Error(codes.Unimplemented, "grpc_retry: cannot retry on ClientStreams, set grpc_retry.Disable()") + } + + var lastErr error + for attempt := uint(0); attempt < callOpts.max; attempt++ { + if err := waitRetryBackoff(attempt, parentCtx, callOpts); err != nil { + return nil, err + } + if attempt > 0 { + callOpts.onRetryCallback(parentCtx, attempt, lastErr) + } + var newStreamer grpc.ClientStream + newStreamer, lastErr = streamer(perStreamContext(parentCtx, callOpts, attempt), desc, cc, method, grpcOpts...) + if lastErr == nil { + retryingStreamer := &serverStreamingRetryingStream{ + ClientStream: newStreamer, + callOpts: callOpts, + parentCtx: parentCtx, + streamerCall: func(ctx context.Context) (grpc.ClientStream, error) { + attempt++ + return streamer(perStreamContext(ctx, callOpts, attempt), desc, cc, method, grpcOpts...) + }, + } + return retryingStreamer, nil + } + if isContextError(lastErr) { + if parentCtx.Err() != nil { + logTrace(parentCtx, "grpc_retry attempt: %d, parent context error: %v", attempt, parentCtx.Err()) + // its the parent context deadline or cancellation. + return nil, lastErr + } else if callOpts.perCallTimeout != 0 { + // We have set a perCallTimeout in the retry middleware, which would result in a context error if + // the deadline was exceeded, in which case try again. + logTrace(parentCtx, "grpc_retry attempt: %d, context error from retry call", attempt) + continue + } + } + if !isRetriable(lastErr, callOpts) { + return nil, lastErr + } + } + return nil, lastErr + } +} + +// type serverStreamingRetryingStream is the implementation of grpc.ClientStream that acts as a +// proxy to the underlying call. If any of the RecvMsg() calls fail, it will try to reestablish +// a new ClientStream according to the retry policy. +type serverStreamingRetryingStream struct { + grpc.ClientStream + bufferedSends []any // single message that the client can sen + wasClosedSend bool // indicates that CloseSend was closed + parentCtx context.Context + callOpts *options + streamerCall func(ctx context.Context) (grpc.ClientStream, error) + mu sync.RWMutex +} + +func (s *serverStreamingRetryingStream) setStream(clientStream grpc.ClientStream) { + s.mu.Lock() + s.ClientStream = clientStream + s.mu.Unlock() +} + +func (s *serverStreamingRetryingStream) getStream() grpc.ClientStream { + s.mu.RLock() + defer s.mu.RUnlock() + return s.ClientStream +} + +func (s *serverStreamingRetryingStream) SendMsg(m any) error { + s.mu.Lock() + s.bufferedSends = append(s.bufferedSends, m) + s.mu.Unlock() + return s.getStream().SendMsg(m) +} + +func (s *serverStreamingRetryingStream) CloseSend() error { + s.mu.Lock() + s.wasClosedSend = true + s.mu.Unlock() + return s.getStream().CloseSend() +} + +func (s *serverStreamingRetryingStream) Header() (grpcMetadata.MD, error) { + return s.getStream().Header() +} + +func (s *serverStreamingRetryingStream) Trailer() grpcMetadata.MD { + return s.getStream().Trailer() +} + +func (s *serverStreamingRetryingStream) RecvMsg(m any) error { + attemptRetry, lastErr := s.receiveMsgAndIndicateRetry(m) + if !attemptRetry { + return lastErr // success or hard failure + } + // We start off from attempt 1, because zeroth was already made on normal SendMsg(). + for attempt := uint(1); attempt < s.callOpts.max; attempt++ { + if err := waitRetryBackoff(attempt, s.parentCtx, s.callOpts); err != nil { + return err + } + s.callOpts.onRetryCallback(s.parentCtx, attempt, lastErr) + newStream, err := s.reestablishStreamAndResendBuffer(s.parentCtx) + if err != nil { + // Retry dial and transport errors of establishing stream as grpc doesn't retry. + if isRetriable(err, s.callOpts) { + continue + } + return err + } + + s.setStream(newStream) + attemptRetry, lastErr = s.receiveMsgAndIndicateRetry(m) + + if !attemptRetry { + return lastErr + } + } + return lastErr +} + +func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m any) (bool, error) { + err := s.getStream().RecvMsg(m) + if err == nil || err == io.EOF { + return false, err + } + if isContextError(err) { + if s.parentCtx.Err() != nil { + logTrace(s.parentCtx, "grpc_retry parent context error: %v", s.parentCtx.Err()) + return false, err + } else if s.callOpts.perCallTimeout != 0 { + // We have set a perCallTimeout in the retry middleware, which would result in a context error if + // the deadline was exceeded, in which case try again. + logTrace(s.parentCtx, "grpc_retry context error from retry call") + return true, err + } + } + return isRetriable(err, s.callOpts), err +} + +func (s *serverStreamingRetryingStream) reestablishStreamAndResendBuffer(callCtx context.Context) (grpc.ClientStream, error) { + s.mu.RLock() + bufferedSends := s.bufferedSends + s.mu.RUnlock() + newStream, err := s.streamerCall(callCtx) + if err != nil { + logTrace(callCtx, "grpc_retry failed redialing new stream: %v", err) + return nil, err + } + for _, msg := range bufferedSends { + if err := newStream.SendMsg(msg); err != nil { + logTrace(callCtx, "grpc_retry failed resending message: %v", err) + return nil, err + } + } + if err := newStream.CloseSend(); err != nil { + logTrace(callCtx, "grpc_retry failed CloseSend on new stream %v", err) + return nil, err + } + return newStream, nil +} + +func waitRetryBackoff(attempt uint, parentCtx context.Context, callOpts *options) error { + var waitTime time.Duration = 0 + if attempt > 0 { + waitTime = callOpts.backoffFunc(parentCtx, attempt) + } + if waitTime > 0 { + logTrace(parentCtx, "grpc_retry attempt: %d, backoff for %v", attempt, waitTime) + timer := time.NewTimer(waitTime) + select { + case <-parentCtx.Done(): + if !timer.Stop() { + <-timer.C + } + return contextErrToGrpcErr(parentCtx.Err()) + case <-timer.C: + } + } + return nil +} + +func isRetriable(err error, callOpts *options) bool { + if callOpts.retriableFunc != nil { + return callOpts.retriableFunc(err) + } + return false +} + +func isContextError(err error) bool { + code := status.Code(err) + return code == codes.DeadlineExceeded || code == codes.Canceled +} + +func perCallContext(parentCtx context.Context, callOpts *options, attempt uint) (context.Context, context.CancelFunc) { + cancel := context.CancelFunc(func() {}) + + ctx := parentCtx + if callOpts.perCallTimeout != 0 { + ctx, cancel = context.WithTimeout(ctx, callOpts.perCallTimeout) + } + if attempt > 0 && callOpts.includeHeader { + mdClone := metadata.ExtractOutgoing(ctx).Clone().Set(AttemptMetadataKey, fmt.Sprintf("%d", attempt)) + ctx = mdClone.ToOutgoing(ctx) + } + return ctx, cancel +} + +func perStreamContext(parentCtx context.Context, callOpts *options, attempt uint) context.Context { + ctx := parentCtx + if attempt > 0 && callOpts.includeHeader { + mdClone := metadata.ExtractOutgoing(ctx).Clone().Set(AttemptMetadataKey, fmt.Sprintf("%d", attempt)) + ctx = mdClone.ToOutgoing(ctx) + } + return ctx +} + +func contextErrToGrpcErr(err error) error { + switch err { + case context.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return status.Error(codes.Canceled, err.Error()) + default: + return status.Error(codes.Unknown, err.Error()) + } +} + +func logTrace(ctx context.Context, format string, a ...any) { + tr, ok := trace.FromContext(ctx) + if !ok { + return + } + tr.LazyPrintf(format, a...) +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/metadata/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/metadata/doc.go new file mode 100644 index 00000000000..3aac1b3648f --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/metadata/doc.go @@ -0,0 +1,19 @@ +// Copyright (c) The go-grpc-middleware Authors. +// Licensed under the Apache License 2.0. + +/* +Package `metadata` provides convenience functions for dealing with gRPC metadata.MD objects inside +Context handlers. + +While the upstream grpc-go package contains decent functionality (see https://github.com/grpc/grpc-go/blob/master/Documentation/grpc-metadata.md) +they are hard to use. + +The majority of functions center around the MD, which is a convenience wrapper around metadata.MD. For example +the following code allows you to easily extract incoming metadata (server handler) and put it into a new client context +metadata. + + md := metadata.ExtractIncoming(serverCtx).Clone(":authorization", ":custom") + clientCtx := md.Set("x-client-header", "2").Set("x-another", "3").ToOutgoing(ctx) +*/ + +package metadata diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/metadata/metadata.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/metadata/metadata.go new file mode 100644 index 00000000000..2eeda06857c --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/metadata/metadata.go @@ -0,0 +1,126 @@ +// Copyright (c) The go-grpc-middleware Authors. +// Licensed under the Apache License 2.0. + +package metadata + +import ( + "context" + "strings" + + grpcMetadata "google.golang.org/grpc/metadata" +) + +// MD is a convenience wrapper defining extra functions on the metadata. +type MD grpcMetadata.MD + +// ExtractIncoming extracts an inbound metadata from the server-side context. +// +// This function always returns a MD wrapper of the grpcMetadata.MD, in case the context doesn't have metadata it returns +// a new empty MD. +func ExtractIncoming(ctx context.Context) MD { + md, ok := grpcMetadata.FromIncomingContext(ctx) + if !ok { + return MD(grpcMetadata.Pairs()) + } + return MD(md) +} + +// ExtractOutgoing extracts an outbound metadata from the client-side context. +// +// This function always returns a MD wrapper of the grpcMetadata.MD, in case the context doesn't have metadata it returns +// a new empty MD. +func ExtractOutgoing(ctx context.Context) MD { + md, ok := grpcMetadata.FromOutgoingContext(ctx) + if !ok { + return MD(grpcMetadata.Pairs()) + } + return MD(md) +} + +// Clone performs a *deep* copy of the grpcMetadata.MD. +// +// You can specify the lower-case copiedKeys to only copy certain whitelisted keys. If no keys are explicitly whitelisted +// all keys get copied. +func (m MD) Clone(copiedKeys ...string) MD { + newMd := MD(grpcMetadata.Pairs()) + for k, vv := range m { + found := false + if len(copiedKeys) == 0 { + found = true + } else { + for _, allowedKey := range copiedKeys { + if strings.EqualFold(allowedKey, k) { + found = true + break + } + } + } + if !found { + continue + } + newMd[k] = make([]string, len(vv)) + copy(newMd[k], vv) + } + return newMd +} + +// ToOutgoing sets the given MD as a client-side context for dispatching. +func (m MD) ToOutgoing(ctx context.Context) context.Context { + return grpcMetadata.NewOutgoingContext(ctx, grpcMetadata.MD(m)) +} + +// ToIncoming sets the given MD as a server-side context for dispatching. +// +// This is mostly useful in ServerInterceptors. +func (m MD) ToIncoming(ctx context.Context) context.Context { + return grpcMetadata.NewIncomingContext(ctx, grpcMetadata.MD(m)) +} + +// Get retrieves a single value from the metadata. +// +// It works analogously to http.Header.Get, returning the first value if there are many set. If the value is not set, +// an empty string is returned. +// +// The function is binary-key safe. +func (m MD) Get(key string) string { + k, _ := encodeKeyValue(key, "") + vv, ok := m[k] + if !ok { + return "" + } + return vv[0] +} + +// Del retrieves a single value from the metadata. +// +// It works analogously to http.Header.Del, deleting all values if they exist. +// +// The function is binary-key safe. + +func (m MD) Del(key string) MD { + k, _ := encodeKeyValue(key, "") + delete(m, k) + return m +} + +// Set sets the given value in a metadata. +// +// It works analogously to http.Header.Set, overwriting all previous metadata values. +// +// The function is binary-key safe. +func (m MD) Set(key string, value string) MD { + k, v := encodeKeyValue(key, value) + m[k] = []string{v} + return m +} + +// Add retrieves a single value from the metadata. +// +// It works analogously to http.Header.Add, as it appends to any existing values associated with key. +// +// The function is binary-key safe. +func (m MD) Add(key string, value string) MD { + k, v := encodeKeyValue(key, value) + m[k] = append(m[k], v) + return m +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/metadata/single_key.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/metadata/single_key.go new file mode 100644 index 00000000000..e758c092f93 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/metadata/single_key.go @@ -0,0 +1,21 @@ +// Copyright (c) The go-grpc-middleware Authors. +// Licensed under the Apache License 2.0. + +package metadata + +import ( + "encoding/base64" + "strings" +) + +const ( + binHdrSuffix = "-bin" +) + +func encodeKeyValue(k, v string) (string, string) { + k = strings.ToLower(k) + if strings.HasSuffix(k, binHdrSuffix) { + return k, base64.StdEncoding.EncodeToString([]byte(v)) + } + return k, v +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel index 78d7c9f5c88..a65d88eb865 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel @@ -73,7 +73,7 @@ go_test( "@org_golang_google_genproto_googleapis_api//httpbody", "@org_golang_google_genproto_googleapis_rpc//errdetails", "@org_golang_google_genproto_googleapis_rpc//status", - "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//health/grpc_health_v1", "@org_golang_google_grpc//metadata", diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go index 5dd4e447862..2f2b342431d 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go @@ -49,6 +49,7 @@ var malformedHTTPHeaders = map[string]struct{}{ type ( rpcMethodKey struct{} httpPathPatternKey struct{} + httpPatternKey struct{} AnnotateContextOption func(ctx context.Context) context.Context ) @@ -404,3 +405,13 @@ func HTTPPathPattern(ctx context.Context) (string, bool) { func withHTTPPathPattern(ctx context.Context, httpPathPattern string) context.Context { return context.WithValue(ctx, httpPathPatternKey{}, httpPathPattern) } + +// HTTPPattern returns the HTTP path pattern struct relating to the HTTP handler, if one exists. +func HTTPPattern(ctx context.Context) (Pattern, bool) { + v, ok := ctx.Value(httpPatternKey{}).(Pattern) + return v, ok +} + +func withHTTPPattern(ctx context.Context, httpPattern Pattern) context.Context { + return context.WithValue(ctx, httpPatternKey{}, httpPattern) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go index d7b15fcfb3f..2e50082ad11 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go @@ -94,7 +94,7 @@ func Int64(val string) (int64, error) { } // Int64Slice converts 'val' where individual integers are separated by -// 'sep' into a int64 slice. +// 'sep' into an int64 slice. func Int64Slice(val, sep string) ([]int64, error) { s := strings.Split(val, sep) values := make([]int64, len(s)) @@ -118,7 +118,7 @@ func Int32(val string) (int32, error) { } // Int32Slice converts 'val' where individual integers are separated by -// 'sep' into a int32 slice. +// 'sep' into an int32 slice. func Int32Slice(val, sep string) ([]int32, error) { s := strings.Split(val, sep) values := make([]int32, len(s)) @@ -190,7 +190,7 @@ func Bytes(val string) ([]byte, error) { } // BytesSlice converts 'val' where individual bytes sequences, encoded in URL-safe -// base64 without padding, are separated by 'sep' into a slice of bytes slices slice. +// base64 without padding, are separated by 'sep' into a slice of byte slices. func BytesSlice(val, sep string) ([][]byte, error) { s := strings.Split(val, sep) values := make([][]byte, len(s)) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go index 5682998699a..41cd4f5030e 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go @@ -81,6 +81,21 @@ func HTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.R mux.errorHandler(ctx, mux, marshaler, w, r, err) } +// HTTPStreamError uses the mux-configured stream error handler to notify error to the client without closing the connection. +func HTTPStreamError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) { + st := mux.streamErrorHandler(ctx, err) + msg := errorChunk(st) + buf, err := marshaler.Marshal(msg) + if err != nil { + grpclog.Errorf("Failed to marshal an error: %v", err) + return + } + if _, err := w.Write(buf); err != nil { + grpclog.Errorf("Failed to notify error to client: %v", err) + return + } +} + // DefaultHTTPErrorHandler is the default error handler. // If "err" is a gRPC Status, the function replies with the status code mapped by HTTPStatusFromCode. // If "err" is a HTTPStatusError, the function replies with the status code provide by that struct. This is @@ -93,6 +108,7 @@ func HTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.R func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) { // return Internal when Marshal failed const fallback = `{"code": 13, "message": "failed to marshal error message"}` + const fallbackRewriter = `{"code": 13, "message": "failed to rewrite error message"}` var customStatus *HTTPStatusError if errors.As(err, &customStatus) { @@ -100,19 +116,28 @@ func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marsh } s := status.Convert(err) - pb := s.Proto() w.Header().Del("Trailer") w.Header().Del("Transfer-Encoding") - contentType := marshaler.ContentType(pb) + respRw, err := mux.forwardResponseRewriter(ctx, s.Proto()) + if err != nil { + grpclog.Errorf("Failed to rewrite error message %q: %v", s, err) + w.WriteHeader(http.StatusInternalServerError) + if _, err := io.WriteString(w, fallbackRewriter); err != nil { + grpclog.Errorf("Failed to write response: %v", err) + } + return + } + + contentType := marshaler.ContentType(respRw) w.Header().Set("Content-Type", contentType) if s.Code() == codes.Unauthenticated { w.Header().Set("WWW-Authenticate", s.Message()) } - buf, merr := marshaler.Marshal(pb) + buf, merr := marshaler.Marshal(respRw) if merr != nil { grpclog.Errorf("Failed to marshal error message %q: %v", s, merr) w.WriteHeader(http.StatusInternalServerError) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go index 9005d6a0bf4..2fcd7af3c40 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go @@ -155,7 +155,7 @@ func buildPathsBlindly(name string, in interface{}) []string { return paths } -// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask +// fieldMaskPathItem stores an in-progress deconstruction of a path for a fieldmask type fieldMaskPathItem struct { // the list of prior fields leading up to node connected by dots path string diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go index de1eef1f4f8..f0727cf7c06 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go @@ -3,6 +3,7 @@ package runtime import ( "context" "errors" + "fmt" "io" "net/http" "net/textproto" @@ -55,20 +56,33 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal return } + respRw, err := mux.forwardResponseRewriter(ctx, resp) + if err != nil { + grpclog.Errorf("Rewrite error: %v", err) + handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter) + return + } + if !wroteHeader { - w.Header().Set("Content-Type", marshaler.ContentType(resp)) + var contentType string + if sct, ok := marshaler.(StreamContentType); ok { + contentType = sct.StreamContentType(respRw) + } else { + contentType = marshaler.ContentType(respRw) + } + w.Header().Set("Content-Type", contentType) } var buf []byte - httpBody, isHTTPBody := resp.(*httpbody.HttpBody) + httpBody, isHTTPBody := respRw.(*httpbody.HttpBody) switch { - case resp == nil: + case respRw == nil: buf, err = marshaler.Marshal(errorChunk(status.New(codes.Internal, "empty response"))) case isHTTPBody: buf = httpBody.GetData() default: - result := map[string]interface{}{"result": resp} - if rb, ok := resp.(responseBody); ok { + result := map[string]interface{}{"result": respRw} + if rb, ok := respRw.(responseBody); ok { result["result"] = rb.XXX_ResponseBody() } @@ -164,12 +178,17 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha HTTPError(ctx, mux, marshaler, w, req, err) return } + respRw, err := mux.forwardResponseRewriter(ctx, resp) + if err != nil { + grpclog.Errorf("Rewrite error: %v", err) + HTTPError(ctx, mux, marshaler, w, req, err) + return + } var buf []byte - var err error - if rb, ok := resp.(responseBody); ok { + if rb, ok := respRw.(responseBody); ok { buf, err = marshaler.Marshal(rb.XXX_ResponseBody()) } else { - buf, err = marshaler.Marshal(resp) + buf, err = marshaler.Marshal(respRw) } if err != nil { grpclog.Errorf("Marshal error: %v", err) @@ -177,11 +196,11 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha return } - if !doForwardTrailers { + if !doForwardTrailers && mux.writeContentLength { w.Header().Set("Content-Length", strconv.Itoa(len(buf))) } - if _, err = w.Write(buf); err != nil { + if _, err = w.Write(buf); err != nil && !errors.Is(err, http.ErrBodyNotAllowed) { grpclog.Errorf("Failed to write response: %v", err) } @@ -201,8 +220,7 @@ func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, re } for _, opt := range opts { if err := opt(ctx, w, resp); err != nil { - grpclog.Errorf("Error handling ForwardResponseOptions: %v", err) - return err + return fmt.Errorf("error handling ForwardResponseOptions: %w", err) } } return nil diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go index 2c0d25ff493..b1dfc37af9b 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go @@ -48,3 +48,11 @@ type Delimited interface { // Delimiter returns the record separator for the stream. Delimiter() []byte } + +// StreamContentType defines the streaming content type. +type StreamContentType interface { + // StreamContentType returns the content type for a stream. This shares the + // same behaviour as for `Marshaler.ContentType`, but is called, if present, + // in the case of a streamed response. + StreamContentType(v interface{}) string +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go index 0b051e6e894..07c28112c89 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go @@ -86,8 +86,8 @@ func (m marshalerRegistry) add(mime string, marshaler Marshaler) error { // It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces. // // For example, you could allow the client to specify the use of the runtime.JSONPb marshaler -// with a "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler -// with a "application/json" Content-Type. +// with an "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler +// with an "application/json" Content-Type. // "*" can be used to match any Content-Type. // This can be attached to a ServerMux with the marshaler option. func makeMarshalerMIMERegistry() marshalerRegistry { diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go index ed9a7e4387d..19255ec441e 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go @@ -48,12 +48,19 @@ var encodedPathSplitter = regexp.MustCompile("(/|%2F)") // A HandlerFunc handles a specific pair of path pattern and HTTP method. type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string) +// A Middleware handler wraps another HandlerFunc to do some pre- and/or post-processing of the request. This is used as an alternative to gRPC interceptors when using the direct-to-implementation +// registration methods. It is generally recommended to use gRPC client or server interceptors instead +// where possible. +type Middleware func(HandlerFunc) HandlerFunc + // ServeMux is a request multiplexer for grpc-gateway. // It matches http requests to patterns and invokes the corresponding handler. type ServeMux struct { // handlers maps HTTP method to a list of handlers. handlers map[string][]handler + middlewares []Middleware forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error + forwardResponseRewriter ForwardResponseRewriter marshalers marshalerRegistry incomingHeaderMatcher HeaderMatcherFunc outgoingHeaderMatcher HeaderMatcherFunc @@ -64,11 +71,30 @@ type ServeMux struct { routingErrorHandler RoutingErrorHandlerFunc disablePathLengthFallback bool unescapingMode UnescapingMode + writeContentLength bool } // ServeMuxOption is an option that can be given to a ServeMux on construction. type ServeMuxOption func(*ServeMux) +// ForwardResponseRewriter is the signature of a function that is capable of rewriting messages +// before they are forwarded in a unary, stream, or error response. +type ForwardResponseRewriter func(ctx context.Context, response proto.Message) (any, error) + +// WithForwardResponseRewriter returns a ServeMuxOption that allows for implementers to insert logic +// that can rewrite the final response before it is forwarded. +// +// The response rewriter function is called during unary message forwarding, stream message +// forwarding and when errors are being forwarded. +// +// NOTE: Using this option will likely make what is generated by `protoc-gen-openapiv2` incorrect. +// Since this option involves making runtime changes to the response shape or type. +func WithForwardResponseRewriter(fwdResponseRewriter ForwardResponseRewriter) ServeMuxOption { + return func(sm *ServeMux) { + sm.forwardResponseRewriter = fwdResponseRewriter + } +} + // WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption. // // forwardResponseOption is an option that will be called on the relevant context.Context, @@ -89,6 +115,15 @@ func WithUnescapingMode(mode UnescapingMode) ServeMuxOption { } } +// WithMiddlewares sets server middleware for all handlers. This is useful as an alternative to gRPC +// interceptors when using the direct-to-implementation registration methods and cannot rely +// on gRPC interceptors. It's recommended to use gRPC interceptors instead if possible. +func WithMiddlewares(middlewares ...Middleware) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.middlewares = append(serveMux.middlewares, middlewares...) + } +} + // SetQueryParameterParser sets the query parameter parser, used to populate message from query parameters. // Configuring this will mean the generated OpenAPI output is no longer correct, and it should be // done with careful consideration. @@ -224,6 +259,13 @@ func WithDisablePathLengthFallback() ServeMuxOption { } } +// WithWriteContentLength returns a ServeMuxOption to enable writing content length on non-streaming responses +func WithWriteContentLength() ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.writeContentLength = true + } +} + // WithHealthEndpointAt returns a ServeMuxOption that will add an endpoint to the created ServeMux at the path specified by endpointPath. // When called the handler will forward the request to the upstream grpc service health check (defined in the // gRPC Health Checking Protocol). @@ -277,13 +319,14 @@ func WithHealthzEndpoint(healthCheckClient grpc_health_v1.HealthClient) ServeMux // NewServeMux returns a new ServeMux whose internal mapping is empty. func NewServeMux(opts ...ServeMuxOption) *ServeMux { serveMux := &ServeMux{ - handlers: make(map[string][]handler), - forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0), - marshalers: makeMarshalerMIMERegistry(), - errorHandler: DefaultHTTPErrorHandler, - streamErrorHandler: DefaultStreamErrorHandler, - routingErrorHandler: DefaultRoutingErrorHandler, - unescapingMode: UnescapingModeDefault, + handlers: make(map[string][]handler), + forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0), + forwardResponseRewriter: func(ctx context.Context, response proto.Message) (any, error) { return response, nil }, + marshalers: makeMarshalerMIMERegistry(), + errorHandler: DefaultHTTPErrorHandler, + streamErrorHandler: DefaultStreamErrorHandler, + routingErrorHandler: DefaultRoutingErrorHandler, + unescapingMode: UnescapingModeDefault, } for _, opt := range opts { @@ -305,6 +348,9 @@ func NewServeMux(opts ...ServeMuxOption) *ServeMux { // Handle associates "h" to the pair of HTTP method and path pattern. func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) { + if len(s.middlewares) > 0 { + h = chainMiddlewares(s.middlewares)(h) + } s.handlers[meth] = append([]handler{{pat: pat, h: h}}, s.handlers[meth]...) } @@ -405,7 +451,7 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { } continue } - h.h(w, r, pathParams) + s.handleHandler(h, w, r, pathParams) return } @@ -458,7 +504,7 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr) return } - h.h(w, r, pathParams) + s.handleHandler(h, w, r, pathParams) return } _, outboundMarshaler := MarshalerForRequest(s, r) @@ -484,3 +530,16 @@ type handler struct { pat Pattern h HandlerFunc } + +func (s *ServeMux) handleHandler(h handler, w http.ResponseWriter, r *http.Request, pathParams map[string]string) { + h.h(w, r.WithContext(withHTTPPattern(r.Context(), h.pat)), pathParams) +} + +func chainMiddlewares(mws []Middleware) Middleware { + return func(next HandlerFunc) HandlerFunc { + for i := len(mws); i > 0; i-- { + next = mws[i-1](next) + } + return next + } +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go index d549407f20f..f710036b350 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go @@ -40,7 +40,7 @@ func Float32P(val string) (*float32, error) { } // Int64P parses the given string representation of an integer -// and returns a pointer to a int64 whose value is same as the parsed integer. +// and returns a pointer to an int64 whose value is same as the parsed integer. func Int64P(val string) (*int64, error) { i, err := Int64(val) if err != nil { @@ -50,7 +50,7 @@ func Int64P(val string) (*int64, error) { } // Int32P parses the given string representation of an integer -// and returns a pointer to a int32 whose value is same as the parsed integer. +// and returns a pointer to an int32 whose value is same as the parsed integer. func Int32P(val string) (*int32, error) { i, err := Int32(val) if err != nil { diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go index fe634174b85..0a1ca7e06fe 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go @@ -141,7 +141,7 @@ func populateFieldValueFromPath(msgValue protoreflect.Message, fieldPath []strin } // Check if oneof already set - if of := fieldDescriptor.ContainingOneof(); of != nil { + if of := fieldDescriptor.ContainingOneof(); of != nil && !of.IsSynthetic() { if f := msgValue.WhichOneof(of); f != nil { return fmt.Errorf("field already set for oneof %q", of.FullName().Name()) } @@ -291,7 +291,11 @@ func parseMessage(msgDescriptor protoreflect.MessageDescriptor, value string) (p if err != nil { return protoreflect.Value{}, err } - msg = timestamppb.New(t) + timestamp := timestamppb.New(t) + if ok := timestamp.IsValid(); !ok { + return protoreflect.Value{}, fmt.Errorf("%s before 0001-01-01", value) + } + msg = timestamp case "google.protobuf.Duration": d, err := time.ParseDuration(value) if err != nil { diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go index dfe7de4864a..38ca39cc538 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go @@ -1,6 +1,6 @@ package utilities -// An OpCode is a opcode of compiled path patterns. +// OpCode is an opcode of compiled path patterns. type OpCode int // These constants are the valid values of OpCode. diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go index d224ab776c0..66aa5f2dcc5 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go @@ -5,7 +5,7 @@ import ( "strings" ) -// flagInterface is an cut down interface to `flag` +// flagInterface is a cut down interface to `flag` type flagInterface interface { Var(value flag.Value, name string, usage string) } diff --git a/vendor/github.com/hashicorp/go-plugin/.gitignore b/vendor/github.com/hashicorp/go-plugin/.gitignore deleted file mode 100644 index 4befed30a1c..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -.DS_Store -.idea diff --git a/vendor/github.com/hashicorp/go-plugin/CHANGELOG.md b/vendor/github.com/hashicorp/go-plugin/CHANGELOG.md deleted file mode 100644 index 3d0379c500e..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/CHANGELOG.md +++ /dev/null @@ -1,102 +0,0 @@ -## v1.6.0 - -CHANGES: - -* plugin: Plugins written in other languages can optionally start to advertise whether they support gRPC broker multiplexing. - If the environment variable `PLUGIN_MULTIPLEX_GRPC` is set, it is safe to include a seventh field containing a boolean - value in the `|`-separated protocol negotiation line. - -ENHANCEMENTS: - -* Support muxing gRPC broker connections over a single listener [[GH-288](https://github.com/hashicorp/go-plugin/pull/288)] -* client: Configurable buffer size for reading plugin log lines [[GH-265](https://github.com/hashicorp/go-plugin/pull/265)] -* Use `buf` for proto generation [[GH-286](https://github.com/hashicorp/go-plugin/pull/286)] -* deps: bump golang.org/x/net to v0.17.0 [[GH-285](https://github.com/hashicorp/go-plugin/pull/285)] -* deps: bump golang.org/x/sys to v0.13.0 [[GH-285](https://github.com/hashicorp/go-plugin/pull/285)] -* deps: bump golang.org/x/text to v0.13.0 [[GH-285](https://github.com/hashicorp/go-plugin/pull/285)] - -## v1.5.2 - -ENHANCEMENTS: - -client: New `UnixSocketConfig.TempDir` option allows setting the directory to use when creating plugin-specific Unix socket directories [[GH-282](https://github.com/hashicorp/go-plugin/pull/282)] - -## v1.5.1 - -BUGS: - -* server: `PLUGIN_UNIX_SOCKET_DIR` is consistently used for gRPC broker sockets as well as the initial socket [[GH-277](https://github.com/hashicorp/go-plugin/pull/277)] - -ENHANCEMENTS: - -* client: New `UnixSocketConfig` option in `ClientConfig` to support making the client's Unix sockets group-writable [[GH-277](https://github.com/hashicorp/go-plugin/pull/277)] - -## v1.5.0 - -ENHANCEMENTS: - -* client: New `runner.Runner` interface to support clients providing custom plugin command runner implementations [[GH-270](https://github.com/hashicorp/go-plugin/pull/270)] - * Accessible via new `ClientConfig` field `RunnerFunc`, which is mutually exclusive with `Cmd` and `Reattach` - * Reattaching support via `ReattachConfig` field `ReattachFunc` -* client: New `ClientConfig` field `SkipHostEnv` allows omitting the client process' own environment variables from the plugin command's environment [[GH-270](https://github.com/hashicorp/go-plugin/pull/270)] -* client: Add `ID()` method to `Client` for retrieving the pid or other unique ID of a running plugin [[GH-272](https://github.com/hashicorp/go-plugin/pull/272)] -* server: Support setting the directory to create Unix sockets in with the env var `PLUGIN_UNIX_SOCKET_DIR` [[GH-270](https://github.com/hashicorp/go-plugin/pull/270)] -* server: Support setting group write permission and a custom group name or gid owner with the env var `PLUGIN_UNIX_SOCKET_GROUP` [[GH-270](https://github.com/hashicorp/go-plugin/pull/270)] - -## v1.4.11-rc1 - -ENHANCEMENTS: - -* deps: bump protoreflect to v1.15.1 [[GH-264](https://github.com/hashicorp/go-plugin/pull/264)] - -## v1.4.10 - -BUG FIXES: - -* additional notes: ensure to close files [[GH-241](https://github.com/hashicorp/go-plugin/pull/241)] - -ENHANCEMENTS: - -* deps: Remove direct dependency on golang.org/x/net [[GH-240](https://github.com/hashicorp/go-plugin/pull/240)] - -## v1.4.9 - -ENHANCEMENTS: - -* client: Remove log warning introduced in 1.4.5 when SecureConfig is nil. [[GH-238](https://github.com/hashicorp/go-plugin/pull/238)] - -## v1.4.8 - -BUG FIXES: - -* Fix windows build: [[GH-227](https://github.com/hashicorp/go-plugin/pull/227)] - -## v1.4.7 - -ENHANCEMENTS: - -* More detailed error message on plugin start failure: [[GH-223](https://github.com/hashicorp/go-plugin/pull/223)] - -## v1.4.6 - -BUG FIXES: - -* server: Prevent gRPC broker goroutine leak when using `GRPCServer` type `GracefulStop()` or `Stop()` methods [[GH-220](https://github.com/hashicorp/go-plugin/pull/220)] - -## v1.4.5 - -ENHANCEMENTS: - -* client: log warning when SecureConfig is nil [[GH-207](https://github.com/hashicorp/go-plugin/pull/207)] - - -## v1.4.4 - -ENHANCEMENTS: - -* client: increase level of plugin exit logs [[GH-195](https://github.com/hashicorp/go-plugin/pull/195)] - -BUG FIXES: - -* Bidirectional communication: fix bidirectional communication when AutoMTLS is enabled [[GH-193](https://github.com/hashicorp/go-plugin/pull/193)] -* RPC: Trim a spurious log message for plugins using RPC [[GH-186](https://github.com/hashicorp/go-plugin/pull/186)] diff --git a/vendor/github.com/hashicorp/go-plugin/LICENSE b/vendor/github.com/hashicorp/go-plugin/LICENSE deleted file mode 100644 index 042324fb7e1..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/LICENSE +++ /dev/null @@ -1,355 +0,0 @@ -Copyright (c) 2016 HashiCorp, Inc. - -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-plugin/README.md b/vendor/github.com/hashicorp/go-plugin/README.md deleted file mode 100644 index 50baee06e15..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/README.md +++ /dev/null @@ -1,165 +0,0 @@ -# Go Plugin System over RPC - -`go-plugin` is a Go (golang) plugin system over RPC. It is the plugin system -that has been in use by HashiCorp tooling for over 4 years. While initially -created for [Packer](https://www.packer.io), it is additionally in use by -[Terraform](https://www.terraform.io), [Nomad](https://www.nomadproject.io), -[Vault](https://www.vaultproject.io), -[Boundary](https://www.boundaryproject.io), -and [Waypoint](https://www.waypointproject.io). - -While the plugin system is over RPC, it is currently only designed to work -over a local [reliable] network. Plugins over a real network are not supported -and will lead to unexpected behavior. - -This plugin system has been used on millions of machines across many different -projects and has proven to be battle hardened and ready for production use. - -## Features - -The HashiCorp plugin system supports a number of features: - -**Plugins are Go interface implementations.** This makes writing and consuming -plugins feel very natural. To a plugin author: you just implement an -interface as if it were going to run in the same process. For a plugin user: -you just use and call functions on an interface as if it were in the same -process. This plugin system handles the communication in between. - -**Cross-language support.** Plugins can be written (and consumed) by -almost every major language. This library supports serving plugins via -[gRPC](http://www.grpc.io). gRPC-based plugins enable plugins to be written -in any language. - -**Complex arguments and return values are supported.** This library -provides APIs for handling complex arguments and return values such -as interfaces, `io.Reader/Writer`, etc. We do this by giving you a library -(`MuxBroker`) for creating new connections between the client/server to -serve additional interfaces or transfer raw data. - -**Bidirectional communication.** Because the plugin system supports -complex arguments, the host process can send it interface implementations -and the plugin can call back into the host process. - -**Built-in Logging.** Any plugins that use the `log` standard library -will have log data automatically sent to the host process. The host -process will mirror this output prefixed with the path to the plugin -binary. This makes debugging with plugins simple. If the host system -uses [hclog](https://github.com/hashicorp/go-hclog) then the log data -will be structured. If the plugin also uses hclog, logs from the plugin -will be sent to the host hclog and be structured. - -**Protocol Versioning.** A very basic "protocol version" is supported that -can be incremented to invalidate any previous plugins. This is useful when -interface signatures are changing, protocol level changes are necessary, -etc. When a protocol version is incompatible, a human friendly error -message is shown to the end user. - -**Stdout/Stderr Syncing.** While plugins are subprocesses, they can continue -to use stdout/stderr as usual and the output will get mirrored back to -the host process. The host process can control what `io.Writer` these -streams go to to prevent this from happening. - -**TTY Preservation.** Plugin subprocesses are connected to the identical -stdin file descriptor as the host process, allowing software that requires -a TTY to work. For example, a plugin can execute `ssh` and even though there -are multiple subprocesses and RPC happening, it will look and act perfectly -to the end user. - -**Host upgrade while a plugin is running.** Plugins can be "reattached" -so that the host process can be upgraded while the plugin is still running. -This requires the host/plugin to know this is possible and daemonize -properly. `NewClient` takes a `ReattachConfig` to determine if and how to -reattach. - -**Cryptographically Secure Plugins.** Plugins can be verified with an expected -checksum and RPC communications can be configured to use TLS. The host process -must be properly secured to protect this configuration. - -## Architecture - -The HashiCorp plugin system works by launching subprocesses and communicating -over RPC (using standard `net/rpc` or [gRPC](http://www.grpc.io)). A single -connection is made between any plugin and the host process. For net/rpc-based -plugins, we use a [connection multiplexing](https://github.com/hashicorp/yamux) -library to multiplex any other connections on top. For gRPC-based plugins, -the HTTP2 protocol handles multiplexing. - -This architecture has a number of benefits: - - * Plugins can't crash your host process: A panic in a plugin doesn't - panic the plugin user. - - * Plugins are very easy to write: just write a Go application and `go build`. - Or use any other language to write a gRPC server with a tiny amount of - boilerplate to support go-plugin. - - * Plugins are very easy to install: just put the binary in a location where - the host will find it (depends on the host but this library also provides - helpers), and the plugin host handles the rest. - - * Plugins can be relatively secure: The plugin only has access to the - interfaces and args given to it, not to the entire memory space of the - process. Additionally, go-plugin can communicate with the plugin over - TLS. - -## Usage - -To use the plugin system, you must take the following steps. These are -high-level steps that must be done. Examples are available in the -`examples/` directory. - - 1. Choose the interface(s) you want to expose for plugins. - - 2. For each interface, implement an implementation of that interface - that communicates over a `net/rpc` connection or over a - [gRPC](http://www.grpc.io) connection or both. You'll have to implement - both a client and server implementation. - - 3. Create a `Plugin` implementation that knows how to create the RPC - client/server for a given plugin type. - - 4. Plugin authors call `plugin.Serve` to serve a plugin from the - `main` function. - - 5. Plugin users use `plugin.Client` to launch a subprocess and request - an interface implementation over RPC. - -That's it! In practice, step 2 is the most tedious and time consuming step. -Even so, it isn't very difficult and you can see examples in the `examples/` -directory as well as throughout our various open source projects. - -For complete API documentation, see [GoDoc](https://godoc.org/github.com/hashicorp/go-plugin). - -## Roadmap - -Our plugin system is constantly evolving. As we use the plugin system for -new projects or for new features in existing projects, we constantly find -improvements we can make. - -At this point in time, the roadmap for the plugin system is: - -**Semantic Versioning.** Plugins will be able to implement a semantic version. -This plugin system will give host processes a system for constraining -versions. This is in addition to the protocol versioning already present -which is more for larger underlying changes. - -## What About Shared Libraries? - -When we started using plugins (late 2012, early 2013), plugins over RPC -were the only option since Go didn't support dynamic library loading. Today, -Go supports the [plugin](https://golang.org/pkg/plugin/) standard library with -a number of limitations. Since 2012, our plugin system has stabilized -from tens of millions of users using it, and has many benefits we've come to -value greatly. - -For example, we use this plugin system in -[Vault](https://www.vaultproject.io) where dynamic library loading is -not acceptable for security reasons. That is an extreme -example, but we believe our library system has more upsides than downsides -over dynamic library loading and since we've had it built and tested for years, -we'll continue to use it. - -Shared libraries have one major advantage over our system which is much -higher performance. In real world scenarios across our various tools, -we've never required any more performance out of our plugin system and it -has seen very high throughput, so this isn't a concern for us at the moment. diff --git a/vendor/github.com/hashicorp/go-plugin/buf.gen.yaml b/vendor/github.com/hashicorp/go-plugin/buf.gen.yaml deleted file mode 100644 index 033d0153b2a..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/buf.gen.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -version: v1 -plugins: - - plugin: buf.build/protocolbuffers/go - out: . - opt: - - paths=source_relative - - plugin: buf.build/grpc/go:v1.3.0 - out: . - opt: - - paths=source_relative - - require_unimplemented_servers=false diff --git a/vendor/github.com/hashicorp/go-plugin/buf.yaml b/vendor/github.com/hashicorp/go-plugin/buf.yaml deleted file mode 100644 index 3d0da4c7199..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/buf.yaml +++ /dev/null @@ -1,7 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -version: v1 -build: - excludes: - - examples/ \ No newline at end of file diff --git a/vendor/github.com/hashicorp/go-plugin/client.go b/vendor/github.com/hashicorp/go-plugin/client.go deleted file mode 100644 index 73f6b35151c..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/client.go +++ /dev/null @@ -1,1239 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package plugin - -import ( - "bufio" - "context" - "crypto/subtle" - "crypto/tls" - "crypto/x509" - "encoding/base64" - "errors" - "fmt" - "hash" - "io" - "io/ioutil" - "net" - "os" - "os/exec" - "path/filepath" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-plugin/internal/cmdrunner" - "github.com/hashicorp/go-plugin/internal/grpcmux" - "github.com/hashicorp/go-plugin/runner" - "google.golang.org/grpc" -) - -// If this is 1, then we've called CleanupClients. This can be used -// by plugin RPC implementations to change error behavior since you -// can expected network connection errors at this point. This should be -// read by using sync/atomic. -var Killed uint32 = 0 - -// This is a slice of the "managed" clients which are cleaned up when -// calling Cleanup -var managedClients = make([]*Client, 0, 5) -var managedClientsLock sync.Mutex - -// Error types -var ( - // ErrProcessNotFound is returned when a client is instantiated to - // reattach to an existing process and it isn't found. - ErrProcessNotFound = cmdrunner.ErrProcessNotFound - - // ErrChecksumsDoNotMatch is returned when binary's checksum doesn't match - // the one provided in the SecureConfig. - ErrChecksumsDoNotMatch = errors.New("checksums did not match") - - // ErrSecureNoChecksum is returned when an empty checksum is provided to the - // SecureConfig. - ErrSecureConfigNoChecksum = errors.New("no checksum provided") - - // ErrSecureNoHash is returned when a nil Hash object is provided to the - // SecureConfig. - ErrSecureConfigNoHash = errors.New("no hash implementation provided") - - // ErrSecureConfigAndReattach is returned when both Reattach and - // SecureConfig are set. - ErrSecureConfigAndReattach = errors.New("only one of Reattach or SecureConfig can be set") - - // ErrGRPCBrokerMuxNotSupported is returned when the client requests - // multiplexing over the gRPC broker, but the plugin does not support the - // feature. In most cases, this should be resolvable by updating and - // rebuilding the plugin, or restarting the plugin with - // ClientConfig.GRPCBrokerMultiplex set to false. - ErrGRPCBrokerMuxNotSupported = errors.New("client requested gRPC broker multiplexing but plugin does not support the feature") -) - -// defaultPluginLogBufferSize is the default size of the buffer used to read from stderr for plugin log lines. -const defaultPluginLogBufferSize = 64 * 1024 - -// Client handles the lifecycle of a plugin application. It launches -// plugins, connects to them, dispenses interface implementations, and handles -// killing the process. -// -// Plugin hosts should use one Client for each plugin executable. To -// dispense a plugin type, use the `Client.Client` function, and then -// cal `Dispense`. This awkward API is mostly historical but is used to split -// the client that deals with subprocess management and the client that -// does RPC management. -// -// See NewClient and ClientConfig for using a Client. -type Client struct { - config *ClientConfig - exited bool - l sync.Mutex - address net.Addr - runner runner.AttachedRunner - client ClientProtocol - protocol Protocol - logger hclog.Logger - doneCtx context.Context - ctxCancel context.CancelFunc - negotiatedVersion int - - // clientWaitGroup is used to manage the lifecycle of the plugin management - // goroutines. - clientWaitGroup sync.WaitGroup - - // stderrWaitGroup is used to prevent the command's Wait() function from - // being called before we've finished reading from the stderr pipe. - stderrWaitGroup sync.WaitGroup - - // processKilled is used for testing only, to flag when the process was - // forcefully killed. - processKilled bool - - unixSocketCfg UnixSocketConfig - - grpcMuxerOnce sync.Once - grpcMuxer *grpcmux.GRPCClientMuxer -} - -// NegotiatedVersion returns the protocol version negotiated with the server. -// This is only valid after Start() is called. -func (c *Client) NegotiatedVersion() int { - return c.negotiatedVersion -} - -// ID returns a unique ID for the running plugin. By default this is the process -// ID (pid), but it could take other forms if RunnerFunc was provided. -func (c *Client) ID() string { - c.l.Lock() - defer c.l.Unlock() - - if c.runner != nil { - return c.runner.ID() - } - - return "" -} - -// ClientConfig is the configuration used to initialize a new -// plugin client. After being used to initialize a plugin client, -// that configuration must not be modified again. -type ClientConfig struct { - // HandshakeConfig is the configuration that must match servers. - HandshakeConfig - - // Plugins are the plugins that can be consumed. - // The implied version of this PluginSet is the Handshake.ProtocolVersion. - Plugins PluginSet - - // VersionedPlugins is a map of PluginSets for specific protocol versions. - // These can be used to negotiate a compatible version between client and - // server. If this is set, Handshake.ProtocolVersion is not required. - VersionedPlugins map[int]PluginSet - - // One of the following must be set, but not both. - // - // Cmd is the unstarted subprocess for starting the plugin. If this is - // set, then the Client starts the plugin process on its own and connects - // to it. - // - // Reattach is configuration for reattaching to an existing plugin process - // that is already running. This isn't common. - Cmd *exec.Cmd - Reattach *ReattachConfig - - // RunnerFunc allows consumers to provide their own implementation of - // runner.Runner and control the context within which a plugin is executed. - // The cmd argument will have been copied from the config and populated with - // environment variables that a go-plugin server expects to read such as - // AutoMTLS certs and the magic cookie key. - RunnerFunc func(l hclog.Logger, cmd *exec.Cmd, tmpDir string) (runner.Runner, error) - - // SecureConfig is configuration for verifying the integrity of the - // executable. It can not be used with Reattach. - SecureConfig *SecureConfig - - // TLSConfig is used to enable TLS on the RPC client. - TLSConfig *tls.Config - - // Managed represents if the client should be managed by the - // plugin package or not. If true, then by calling CleanupClients, - // it will automatically be cleaned up. Otherwise, the client - // user is fully responsible for making sure to Kill all plugin - // clients. By default the client is _not_ managed. - Managed bool - - // The minimum and maximum port to use for communicating with - // the subprocess. If not set, this defaults to 10,000 and 25,000 - // respectively. - MinPort, MaxPort uint - - // StartTimeout is the timeout to wait for the plugin to say it - // has started successfully. - StartTimeout time.Duration - - // If non-nil, then the stderr of the client will be written to here - // (as well as the log). This is the original os.Stderr of the subprocess. - // This isn't the output of synced stderr. - Stderr io.Writer - - // SyncStdout, SyncStderr can be set to override the - // respective os.Std* values in the plugin. Care should be taken to - // avoid races here. If these are nil, then this will be set to - // ioutil.Discard. - SyncStdout io.Writer - SyncStderr io.Writer - - // AllowedProtocols is a list of allowed protocols. If this isn't set, - // then only netrpc is allowed. This is so that older go-plugin systems - // can show friendly errors if they see a plugin with an unknown - // protocol. - // - // By setting this, you can cause an error immediately on plugin start - // if an unsupported protocol is used with a good error message. - // - // If this isn't set at all (nil value), then only net/rpc is accepted. - // This is done for legacy reasons. You must explicitly opt-in to - // new protocols. - AllowedProtocols []Protocol - - // Logger is the logger that the client will used. If none is provided, - // it will default to hclog's default logger. - Logger hclog.Logger - - // PluginLogBufferSize is the buffer size(bytes) to read from stderr for plugin log lines. - // If this is 0, then the default of 64KB is used. - PluginLogBufferSize int - - // AutoMTLS has the client and server automatically negotiate mTLS for - // transport authentication. This ensures that only the original client will - // be allowed to connect to the server, and all other connections will be - // rejected. The client will also refuse to connect to any server that isn't - // the original instance started by the client. - // - // In this mode of operation, the client generates a one-time use tls - // certificate, sends the public x.509 certificate to the new server, and - // the server generates a one-time use tls certificate, and sends the public - // x.509 certificate back to the client. These are used to authenticate all - // rpc connections between the client and server. - // - // Setting AutoMTLS to true implies that the server must support the - // protocol, and correctly negotiate the tls certificates, or a connection - // failure will result. - // - // The client should not set TLSConfig, nor should the server set a - // TLSProvider, because AutoMTLS implies that a new certificate and tls - // configuration will be generated at startup. - // - // You cannot Reattach to a server with this option enabled. - AutoMTLS bool - - // GRPCDialOptions allows plugin users to pass custom grpc.DialOption - // to create gRPC connections. This only affects plugins using the gRPC - // protocol. - GRPCDialOptions []grpc.DialOption - - // GRPCBrokerMultiplex turns on multiplexing for the gRPC broker. The gRPC - // broker will multiplex all brokered gRPC servers over the plugin's original - // listener socket instead of making a new listener for each server. The - // go-plugin library currently only includes a Go implementation for the - // server (i.e. plugin) side of gRPC broker multiplexing. - // - // Does not support reattaching. - // - // Multiplexed gRPC streams MUST be established sequentially, i.e. after - // calling AcceptAndServe from one side, wait for the other side to Dial - // before calling AcceptAndServe again. - GRPCBrokerMultiplex bool - - // SkipHostEnv allows plugins to run without inheriting the parent process' - // environment variables. - SkipHostEnv bool - - // UnixSocketConfig configures additional options for any Unix sockets - // that are created. Not normally required. Not supported on Windows. - UnixSocketConfig *UnixSocketConfig -} - -type UnixSocketConfig struct { - // If set, go-plugin will change the owner of any Unix sockets created to - // this group, and set them as group-writable. Can be a name or gid. The - // client process must be a member of this group or chown will fail. - Group string - - // TempDir specifies the base directory to use when creating a plugin-specific - // temporary directory. It is expected to already exist and be writable. If - // not set, defaults to the directory chosen by os.MkdirTemp. - TempDir string - - // The directory to create Unix sockets in. Internally created and managed - // by go-plugin and deleted when the plugin is killed. Will be created - // inside TempDir if specified. - socketDir string -} - -// ReattachConfig is used to configure a client to reattach to an -// already-running plugin process. You can retrieve this information by -// calling ReattachConfig on Client. -type ReattachConfig struct { - Protocol Protocol - ProtocolVersion int - Addr net.Addr - Pid int - - // ReattachFunc allows consumers to provide their own implementation of - // runner.AttachedRunner and attach to something other than a plain process. - // At least one of Pid or ReattachFunc must be set. - ReattachFunc runner.ReattachFunc - - // Test is set to true if this is reattaching to to a plugin in "test mode" - // (see ServeConfig.Test). In this mode, client.Kill will NOT kill the - // process and instead will rely on the plugin to terminate itself. This - // should not be used in non-test environments. - Test bool -} - -// SecureConfig is used to configure a client to verify the integrity of an -// executable before running. It does this by verifying the checksum is -// expected. Hash is used to specify the hashing method to use when checksumming -// the file. The configuration is verified by the client by calling the -// SecureConfig.Check() function. -// -// The host process should ensure the checksum was provided by a trusted and -// authoritative source. The binary should be installed in such a way that it -// can not be modified by an unauthorized user between the time of this check -// and the time of execution. -type SecureConfig struct { - Checksum []byte - Hash hash.Hash -} - -// Check takes the filepath to an executable and returns true if the checksum of -// the file matches the checksum provided in the SecureConfig. -func (s *SecureConfig) Check(filePath string) (bool, error) { - if len(s.Checksum) == 0 { - return false, ErrSecureConfigNoChecksum - } - - if s.Hash == nil { - return false, ErrSecureConfigNoHash - } - - file, err := os.Open(filePath) - if err != nil { - return false, err - } - defer file.Close() - - _, err = io.Copy(s.Hash, file) - if err != nil { - return false, err - } - - sum := s.Hash.Sum(nil) - - return subtle.ConstantTimeCompare(sum, s.Checksum) == 1, nil -} - -// This makes sure all the managed subprocesses are killed and properly -// logged. This should be called before the parent process running the -// plugins exits. -// -// This must only be called _once_. -func CleanupClients() { - // Set the killed to true so that we don't get unexpected panics - atomic.StoreUint32(&Killed, 1) - - // Kill all the managed clients in parallel and use a WaitGroup - // to wait for them all to finish up. - var wg sync.WaitGroup - managedClientsLock.Lock() - for _, client := range managedClients { - wg.Add(1) - - go func(client *Client) { - client.Kill() - wg.Done() - }(client) - } - managedClientsLock.Unlock() - - wg.Wait() -} - -// NewClient creates a new plugin client which manages the lifecycle of an external -// plugin and gets the address for the RPC connection. -// -// The client must be cleaned up at some point by calling Kill(). If -// the client is a managed client (created with ClientConfig.Managed) you -// can just call CleanupClients at the end of your program and they will -// be properly cleaned. -func NewClient(config *ClientConfig) (c *Client) { - if config.MinPort == 0 && config.MaxPort == 0 { - config.MinPort = 10000 - config.MaxPort = 25000 - } - - if config.StartTimeout == 0 { - config.StartTimeout = 1 * time.Minute - } - - if config.Stderr == nil { - config.Stderr = ioutil.Discard - } - - if config.SyncStdout == nil { - config.SyncStdout = io.Discard - } - if config.SyncStderr == nil { - config.SyncStderr = io.Discard - } - - if config.AllowedProtocols == nil { - config.AllowedProtocols = []Protocol{ProtocolNetRPC} - } - - if config.Logger == nil { - config.Logger = hclog.New(&hclog.LoggerOptions{ - Output: hclog.DefaultOutput, - Level: hclog.Trace, - Name: "plugin", - }) - } - - if config.PluginLogBufferSize == 0 { - config.PluginLogBufferSize = defaultPluginLogBufferSize - } - - c = &Client{ - config: config, - logger: config.Logger, - } - if config.Managed { - managedClientsLock.Lock() - managedClients = append(managedClients, c) - managedClientsLock.Unlock() - } - - return -} - -// Client returns the protocol client for this connection. -// -// Subsequent calls to this will return the same client. -func (c *Client) Client() (ClientProtocol, error) { - _, err := c.Start() - if err != nil { - return nil, err - } - - c.l.Lock() - defer c.l.Unlock() - - if c.client != nil { - return c.client, nil - } - - switch c.protocol { - case ProtocolNetRPC: - c.client, err = newRPCClient(c) - - case ProtocolGRPC: - c.client, err = newGRPCClient(c.doneCtx, c) - - default: - return nil, fmt.Errorf("unknown server protocol: %s", c.protocol) - } - - if err != nil { - c.client = nil - return nil, err - } - - return c.client, nil -} - -// Tells whether or not the underlying process has exited. -func (c *Client) Exited() bool { - c.l.Lock() - defer c.l.Unlock() - return c.exited -} - -// killed is used in tests to check if a process failed to exit gracefully, and -// needed to be killed. -func (c *Client) killed() bool { - c.l.Lock() - defer c.l.Unlock() - return c.processKilled -} - -// End the executing subprocess (if it is running) and perform any cleanup -// tasks necessary such as capturing any remaining logs and so on. -// -// This method blocks until the process successfully exits. -// -// This method can safely be called multiple times. -func (c *Client) Kill() { - // Grab a lock to read some private fields. - c.l.Lock() - runner := c.runner - addr := c.address - hostSocketDir := c.unixSocketCfg.socketDir - c.l.Unlock() - - // If there is no runner or ID, there is nothing to kill. - if runner == nil || runner.ID() == "" { - return - } - - defer func() { - // Wait for the all client goroutines to finish. - c.clientWaitGroup.Wait() - - if hostSocketDir != "" { - os.RemoveAll(hostSocketDir) - } - - // Make sure there is no reference to the old process after it has been - // killed. - c.l.Lock() - c.runner = nil - c.l.Unlock() - }() - - // We need to check for address here. It is possible that the plugin - // started (process != nil) but has no address (addr == nil) if the - // plugin failed at startup. If we do have an address, we need to close - // the plugin net connections. - graceful := false - if addr != nil { - // Close the client to cleanly exit the process. - client, err := c.Client() - if err == nil { - err = client.Close() - - // If there is no error, then we attempt to wait for a graceful - // exit. If there was an error, we assume that graceful cleanup - // won't happen and just force kill. - graceful = err == nil - if err != nil { - // If there was an error just log it. We're going to force - // kill in a moment anyways. - c.logger.Warn("error closing client during Kill", "err", err) - } - } else { - c.logger.Error("client", "error", err) - } - } - - // If we're attempting a graceful exit, then we wait for a short period - // of time to allow that to happen. To wait for this we just wait on the - // doneCh which would be closed if the process exits. - if graceful { - select { - case <-c.doneCtx.Done(): - c.logger.Debug("plugin exited") - return - case <-time.After(2 * time.Second): - } - } - - // If graceful exiting failed, just kill it - c.logger.Warn("plugin failed to exit gracefully") - if err := runner.Kill(context.Background()); err != nil { - c.logger.Debug("error killing plugin", "error", err) - } - - c.l.Lock() - c.processKilled = true - c.l.Unlock() -} - -// Start the underlying subprocess, communicating with it to negotiate -// a port for RPC connections, and returning the address to connect via RPC. -// -// This method is safe to call multiple times. Subsequent calls have no effect. -// Once a client has been started once, it cannot be started again, even if -// it was killed. -func (c *Client) Start() (addr net.Addr, err error) { - c.l.Lock() - defer c.l.Unlock() - - if c.address != nil { - return c.address, nil - } - - // If one of cmd or reattach isn't set, then it is an error. We wrap - // this in a {} for scoping reasons, and hopeful that the escape - // analysis will pop the stack here. - { - var mutuallyExclusiveOptions int - if c.config.Cmd != nil { - mutuallyExclusiveOptions += 1 - } - if c.config.Reattach != nil { - mutuallyExclusiveOptions += 1 - } - if c.config.RunnerFunc != nil { - mutuallyExclusiveOptions += 1 - } - if mutuallyExclusiveOptions != 1 { - return nil, fmt.Errorf("exactly one of Cmd, or Reattach, or RunnerFunc must be set") - } - - if c.config.SecureConfig != nil && c.config.Reattach != nil { - return nil, ErrSecureConfigAndReattach - } - - if c.config.GRPCBrokerMultiplex && c.config.Reattach != nil { - return nil, fmt.Errorf("gRPC broker multiplexing is not supported with Reattach config") - } - } - - if c.config.Reattach != nil { - return c.reattach() - } - - if c.config.VersionedPlugins == nil { - c.config.VersionedPlugins = make(map[int]PluginSet) - } - - // handle all plugins as versioned, using the handshake config as the default. - version := int(c.config.ProtocolVersion) - - // Make sure we're not overwriting a real version 0. If ProtocolVersion was - // non-zero, then we have to just assume the user made sure that - // VersionedPlugins doesn't conflict. - if _, ok := c.config.VersionedPlugins[version]; !ok && c.config.Plugins != nil { - c.config.VersionedPlugins[version] = c.config.Plugins - } - - var versionStrings []string - for v := range c.config.VersionedPlugins { - versionStrings = append(versionStrings, strconv.Itoa(v)) - } - - env := []string{ - fmt.Sprintf("%s=%s", c.config.MagicCookieKey, c.config.MagicCookieValue), - fmt.Sprintf("PLUGIN_MIN_PORT=%d", c.config.MinPort), - fmt.Sprintf("PLUGIN_MAX_PORT=%d", c.config.MaxPort), - fmt.Sprintf("PLUGIN_PROTOCOL_VERSIONS=%s", strings.Join(versionStrings, ",")), - } - if c.config.GRPCBrokerMultiplex { - env = append(env, fmt.Sprintf("%s=true", envMultiplexGRPC)) - } - - cmd := c.config.Cmd - if cmd == nil { - // It's only possible to get here if RunnerFunc is non-nil, but we'll - // still use cmd as a spec to populate metadata for the external - // implementation to consume. - cmd = exec.Command("") - } - if !c.config.SkipHostEnv { - cmd.Env = append(cmd.Env, os.Environ()...) - } - cmd.Env = append(cmd.Env, env...) - cmd.Stdin = os.Stdin - - if c.config.SecureConfig != nil { - if ok, err := c.config.SecureConfig.Check(cmd.Path); err != nil { - return nil, fmt.Errorf("error verifying checksum: %s", err) - } else if !ok { - return nil, ErrChecksumsDoNotMatch - } - } - - // Setup a temporary certificate for client/server mtls, and send the public - // certificate to the plugin. - if c.config.AutoMTLS { - c.logger.Info("configuring client automatic mTLS") - certPEM, keyPEM, err := generateCert() - if err != nil { - c.logger.Error("failed to generate client certificate", "error", err) - return nil, err - } - cert, err := tls.X509KeyPair(certPEM, keyPEM) - if err != nil { - c.logger.Error("failed to parse client certificate", "error", err) - return nil, err - } - - cmd.Env = append(cmd.Env, fmt.Sprintf("PLUGIN_CLIENT_CERT=%s", certPEM)) - - c.config.TLSConfig = &tls.Config{ - Certificates: []tls.Certificate{cert}, - ClientAuth: tls.RequireAndVerifyClientCert, - MinVersion: tls.VersionTLS12, - ServerName: "localhost", - } - } - - if c.config.UnixSocketConfig != nil { - c.unixSocketCfg = *c.config.UnixSocketConfig - } - - if c.unixSocketCfg.Group != "" { - cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", EnvUnixSocketGroup, c.unixSocketCfg.Group)) - } - - var runner runner.Runner - switch { - case c.config.RunnerFunc != nil: - c.unixSocketCfg.socketDir, err = os.MkdirTemp(c.unixSocketCfg.TempDir, "plugin-dir") - if err != nil { - return nil, err - } - // os.MkdirTemp creates folders with 0o700, so if we have a group - // configured we need to make it group-writable. - if c.unixSocketCfg.Group != "" { - err = setGroupWritable(c.unixSocketCfg.socketDir, c.unixSocketCfg.Group, 0o770) - if err != nil { - return nil, err - } - } - cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", EnvUnixSocketDir, c.unixSocketCfg.socketDir)) - c.logger.Trace("created temporary directory for unix sockets", "dir", c.unixSocketCfg.socketDir) - - runner, err = c.config.RunnerFunc(c.logger, cmd, c.unixSocketCfg.socketDir) - if err != nil { - return nil, err - } - default: - runner, err = cmdrunner.NewCmdRunner(c.logger, cmd) - if err != nil { - return nil, err - } - - } - - c.runner = runner - startCtx, startCtxCancel := context.WithTimeout(context.Background(), c.config.StartTimeout) - defer startCtxCancel() - err = runner.Start(startCtx) - if err != nil { - return nil, err - } - - // Make sure the command is properly cleaned up if there is an error - defer func() { - rErr := recover() - - if err != nil || rErr != nil { - runner.Kill(context.Background()) - } - - if rErr != nil { - panic(rErr) - } - }() - - // Create a context for when we kill - c.doneCtx, c.ctxCancel = context.WithCancel(context.Background()) - - // Start goroutine that logs the stderr - c.clientWaitGroup.Add(1) - c.stderrWaitGroup.Add(1) - // logStderr calls Done() - go c.logStderr(runner.Name(), runner.Stderr()) - - c.clientWaitGroup.Add(1) - go func() { - // ensure the context is cancelled when we're done - defer c.ctxCancel() - - defer c.clientWaitGroup.Done() - - // wait to finish reading from stderr since the stderr pipe reader - // will be closed by the subsequent call to cmd.Wait(). - c.stderrWaitGroup.Wait() - - // Wait for the command to end. - err := runner.Wait(context.Background()) - if err != nil { - c.logger.Error("plugin process exited", "plugin", runner.Name(), "id", runner.ID(), "error", err.Error()) - } else { - // Log and make sure to flush the logs right away - c.logger.Info("plugin process exited", "plugin", runner.Name(), "id", runner.ID()) - } - - os.Stderr.Sync() - - // Set that we exited, which takes a lock - c.l.Lock() - defer c.l.Unlock() - c.exited = true - }() - - // Start a goroutine that is going to be reading the lines - // out of stdout - linesCh := make(chan string) - c.clientWaitGroup.Add(1) - go func() { - defer c.clientWaitGroup.Done() - defer close(linesCh) - - scanner := bufio.NewScanner(runner.Stdout()) - for scanner.Scan() { - linesCh <- scanner.Text() - } - if scanner.Err() != nil { - c.logger.Error("error encountered while scanning stdout", "error", scanner.Err()) - } - }() - - // Make sure after we exit we read the lines from stdout forever - // so they don't block since it is a pipe. - // The scanner goroutine above will close this, but track it with a wait - // group for completeness. - c.clientWaitGroup.Add(1) - defer func() { - go func() { - defer c.clientWaitGroup.Done() - for range linesCh { - } - }() - }() - - // Some channels for the next step - timeout := time.After(c.config.StartTimeout) - - // Start looking for the address - c.logger.Debug("waiting for RPC address", "plugin", runner.Name()) - select { - case <-timeout: - err = errors.New("timeout while waiting for plugin to start") - case <-c.doneCtx.Done(): - err = errors.New("plugin exited before we could connect") - case line, ok := <-linesCh: - // Trim the line and split by "|" in order to get the parts of - // the output. - line = strings.TrimSpace(line) - parts := strings.Split(line, "|") - if len(parts) < 4 { - errText := fmt.Sprintf("Unrecognized remote plugin message: %s", line) - if !ok { - errText += "\n" + "Failed to read any lines from plugin's stdout" - } - additionalNotes := runner.Diagnose(context.Background()) - if additionalNotes != "" { - errText += "\n" + additionalNotes - } - err = errors.New(errText) - return - } - - // Check the core protocol. Wrapped in a {} for scoping. - { - var coreProtocol int - coreProtocol, err = strconv.Atoi(parts[0]) - if err != nil { - err = fmt.Errorf("Error parsing core protocol version: %s", err) - return - } - - if coreProtocol != CoreProtocolVersion { - err = fmt.Errorf("Incompatible core API version with plugin. "+ - "Plugin version: %s, Core version: %d\n\n"+ - "To fix this, the plugin usually only needs to be recompiled.\n"+ - "Please report this to the plugin author.", parts[0], CoreProtocolVersion) - return - } - } - - // Test the API version - version, pluginSet, err := c.checkProtoVersion(parts[1]) - if err != nil { - return addr, err - } - - // set the Plugins value to the compatible set, so the version - // doesn't need to be passed through to the ClientProtocol - // implementation. - c.config.Plugins = pluginSet - c.negotiatedVersion = version - c.logger.Debug("using plugin", "version", version) - - network, address, err := runner.PluginToHost(parts[2], parts[3]) - if err != nil { - return addr, err - } - - switch network { - case "tcp": - addr, err = net.ResolveTCPAddr("tcp", address) - case "unix": - addr, err = net.ResolveUnixAddr("unix", address) - default: - err = fmt.Errorf("Unknown address type: %s", address) - } - - // If we have a server type, then record that. We default to net/rpc - // for backwards compatibility. - c.protocol = ProtocolNetRPC - if len(parts) >= 5 { - c.protocol = Protocol(parts[4]) - } - - found := false - for _, p := range c.config.AllowedProtocols { - if p == c.protocol { - found = true - break - } - } - if !found { - err = fmt.Errorf("Unsupported plugin protocol %q. Supported: %v", - c.protocol, c.config.AllowedProtocols) - return addr, err - } - - // See if we have a TLS certificate from the server. - // Checking if the length is > 50 rules out catching the unused "extra" - // data returned from some older implementations. - if len(parts) >= 6 && len(parts[5]) > 50 { - err := c.loadServerCert(parts[5]) - if err != nil { - return nil, fmt.Errorf("error parsing server cert: %s", err) - } - } - - if c.config.GRPCBrokerMultiplex && c.protocol == ProtocolGRPC { - if len(parts) <= 6 { - return nil, fmt.Errorf("%w; for Go plugins, you will need to update the "+ - "github.com/hashicorp/go-plugin dependency and recompile", ErrGRPCBrokerMuxNotSupported) - } - if muxSupported, err := strconv.ParseBool(parts[6]); err != nil { - return nil, fmt.Errorf("error parsing %q as a boolean for gRPC broker multiplexing support", parts[6]) - } else if !muxSupported { - return nil, ErrGRPCBrokerMuxNotSupported - } - } - } - - c.address = addr - return -} - -// loadServerCert is used by AutoMTLS to read an x.509 cert returned by the -// server, and load it as the RootCA and ClientCA for the client TLSConfig. -func (c *Client) loadServerCert(cert string) error { - certPool := x509.NewCertPool() - - asn1, err := base64.RawStdEncoding.DecodeString(cert) - if err != nil { - return err - } - - x509Cert, err := x509.ParseCertificate([]byte(asn1)) - if err != nil { - return err - } - - certPool.AddCert(x509Cert) - - c.config.TLSConfig.RootCAs = certPool - c.config.TLSConfig.ClientCAs = certPool - return nil -} - -func (c *Client) reattach() (net.Addr, error) { - reattachFunc := c.config.Reattach.ReattachFunc - // For backwards compatibility default to cmdrunner.ReattachFunc - if reattachFunc == nil { - reattachFunc = cmdrunner.ReattachFunc(c.config.Reattach.Pid, c.config.Reattach.Addr) - } - - r, err := reattachFunc() - if err != nil { - return nil, err - } - - // Create a context for when we kill - c.doneCtx, c.ctxCancel = context.WithCancel(context.Background()) - - c.clientWaitGroup.Add(1) - // Goroutine to mark exit status - go func(r runner.AttachedRunner) { - defer c.clientWaitGroup.Done() - - // ensure the context is cancelled when we're done - defer c.ctxCancel() - - // Wait for the process to die - r.Wait(context.Background()) - - // Log so we can see it - c.logger.Debug("reattached plugin process exited") - - // Mark it - c.l.Lock() - defer c.l.Unlock() - c.exited = true - }(r) - - // Set the address and protocol - c.address = c.config.Reattach.Addr - c.protocol = c.config.Reattach.Protocol - if c.protocol == "" { - // Default the protocol to net/rpc for backwards compatibility - c.protocol = ProtocolNetRPC - } - - if c.config.Reattach.Test { - c.negotiatedVersion = c.config.Reattach.ProtocolVersion - } else { - // If we're in test mode, we do NOT set the runner. This avoids the - // runner being killed (the only purpose we have for setting c.runner - // when reattaching), since in test mode the process is responsible for - // exiting on its own. - c.runner = r - } - - return c.address, nil -} - -// checkProtoVersion returns the negotiated version and PluginSet. -// This returns an error if the server returned an incompatible protocol -// version, or an invalid handshake response. -func (c *Client) checkProtoVersion(protoVersion string) (int, PluginSet, error) { - serverVersion, err := strconv.Atoi(protoVersion) - if err != nil { - return 0, nil, fmt.Errorf("Error parsing protocol version %q: %s", protoVersion, err) - } - - // record these for the error message - var clientVersions []int - - // all versions, including the legacy ProtocolVersion have been added to - // the versions set - for version, plugins := range c.config.VersionedPlugins { - clientVersions = append(clientVersions, version) - - if serverVersion != version { - continue - } - return version, plugins, nil - } - - return 0, nil, fmt.Errorf("Incompatible API version with plugin. "+ - "Plugin version: %d, Client versions: %d", serverVersion, clientVersions) -} - -// ReattachConfig returns the information that must be provided to NewClient -// to reattach to the plugin process that this client started. This is -// useful for plugins that detach from their parent process. -// -// If this returns nil then the process hasn't been started yet. Please -// call Start or Client before calling this. -// -// Clients who specified a RunnerFunc will need to populate their own -// ReattachFunc in the returned ReattachConfig before it can be used. -func (c *Client) ReattachConfig() *ReattachConfig { - c.l.Lock() - defer c.l.Unlock() - - if c.address == nil { - return nil - } - - if c.config.Cmd != nil && c.config.Cmd.Process == nil { - return nil - } - - // If we connected via reattach, just return the information as-is - if c.config.Reattach != nil { - return c.config.Reattach - } - - reattach := &ReattachConfig{ - Protocol: c.protocol, - Addr: c.address, - } - - if c.config.Cmd != nil && c.config.Cmd.Process != nil { - reattach.Pid = c.config.Cmd.Process.Pid - } - - return reattach -} - -// Protocol returns the protocol of server on the remote end. This will -// start the plugin process if it isn't already started. Errors from -// starting the plugin are surpressed and ProtocolInvalid is returned. It -// is recommended you call Start explicitly before calling Protocol to ensure -// no errors occur. -func (c *Client) Protocol() Protocol { - _, err := c.Start() - if err != nil { - return ProtocolInvalid - } - - return c.protocol -} - -func netAddrDialer(addr net.Addr) func(string, time.Duration) (net.Conn, error) { - return func(_ string, _ time.Duration) (net.Conn, error) { - // Connect to the client - conn, err := net.Dial(addr.Network(), addr.String()) - if err != nil { - return nil, err - } - if tcpConn, ok := conn.(*net.TCPConn); ok { - // Make sure to set keep alive so that the connection doesn't die - tcpConn.SetKeepAlive(true) - } - - return conn, nil - } -} - -// dialer is compatible with grpc.WithDialer and creates the connection -// to the plugin. -func (c *Client) dialer(_ string, timeout time.Duration) (net.Conn, error) { - muxer, err := c.getGRPCMuxer(c.address) - if err != nil { - return nil, err - } - - var conn net.Conn - if muxer.Enabled() { - conn, err = muxer.Dial() - if err != nil { - return nil, err - } - } else { - conn, err = netAddrDialer(c.address)("", timeout) - if err != nil { - return nil, err - } - } - - // If we have a TLS config we wrap our connection. We only do this - // for net/rpc since gRPC uses its own mechanism for TLS. - if c.protocol == ProtocolNetRPC && c.config.TLSConfig != nil { - conn = tls.Client(conn, c.config.TLSConfig) - } - - return conn, nil -} - -func (c *Client) getGRPCMuxer(addr net.Addr) (*grpcmux.GRPCClientMuxer, error) { - if c.protocol != ProtocolGRPC || !c.config.GRPCBrokerMultiplex { - return nil, nil - } - - var err error - c.grpcMuxerOnce.Do(func() { - c.grpcMuxer, err = grpcmux.NewGRPCClientMuxer(c.logger, addr) - }) - if err != nil { - return nil, err - } - - return c.grpcMuxer, nil -} - -func (c *Client) logStderr(name string, r io.Reader) { - defer c.clientWaitGroup.Done() - defer c.stderrWaitGroup.Done() - l := c.logger.Named(filepath.Base(name)) - - reader := bufio.NewReaderSize(r, c.config.PluginLogBufferSize) - // continuation indicates the previous line was a prefix - continuation := false - - for { - line, isPrefix, err := reader.ReadLine() - switch { - case err == io.EOF: - return - case err != nil: - l.Error("reading plugin stderr", "error", err) - return - } - - c.config.Stderr.Write(line) - - // The line was longer than our max token size, so it's likely - // incomplete and won't unmarshal. - if isPrefix || continuation { - l.Debug(string(line)) - - // if we're finishing a continued line, add the newline back in - if !isPrefix { - c.config.Stderr.Write([]byte{'\n'}) - } - - continuation = isPrefix - continue - } - - c.config.Stderr.Write([]byte{'\n'}) - - entry, err := parseJSON(line) - // If output is not JSON format, print directly to Debug - if err != nil { - // Attempt to infer the desired log level from the commonly used - // string prefixes - switch line := string(line); { - case strings.HasPrefix(line, "[TRACE]"): - l.Trace(line) - case strings.HasPrefix(line, "[DEBUG]"): - l.Debug(line) - case strings.HasPrefix(line, "[INFO]"): - l.Info(line) - case strings.HasPrefix(line, "[WARN]"): - l.Warn(line) - case strings.HasPrefix(line, "[ERROR]"): - l.Error(line) - default: - l.Debug(line) - } - } else { - out := flattenKVPairs(entry.KVPairs) - - out = append(out, "timestamp", entry.Timestamp.Format(hclog.TimeFormat)) - switch hclog.LevelFromString(entry.Level) { - case hclog.Trace: - l.Trace(entry.Message, out...) - case hclog.Debug: - l.Debug(entry.Message, out...) - case hclog.Info: - l.Info(entry.Message, out...) - case hclog.Warn: - l.Warn(entry.Message, out...) - case hclog.Error: - l.Error(entry.Message, out...) - default: - // if there was no log level, it's likely this is unexpected - // json from something other than hclog, and we should output - // it verbatim. - l.Debug(string(line)) - } - } - } -} diff --git a/vendor/github.com/hashicorp/go-plugin/constants.go b/vendor/github.com/hashicorp/go-plugin/constants.go deleted file mode 100644 index e7f5bbe5f7c..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/constants.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package plugin - -const ( - // EnvUnixSocketDir specifies the directory that _plugins_ should create unix - // sockets in. Does not affect client behavior. - EnvUnixSocketDir = "PLUGIN_UNIX_SOCKET_DIR" - - // EnvUnixSocketGroup specifies the owning, writable group to set for Unix - // sockets created by _plugins_. Does not affect client behavior. - EnvUnixSocketGroup = "PLUGIN_UNIX_SOCKET_GROUP" - - envMultiplexGRPC = "PLUGIN_MULTIPLEX_GRPC" -) diff --git a/vendor/github.com/hashicorp/go-plugin/discover.go b/vendor/github.com/hashicorp/go-plugin/discover.go deleted file mode 100644 index c5b96242b1a..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/discover.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package plugin - -import ( - "path/filepath" -) - -// Discover discovers plugins that are in a given directory. -// -// The directory doesn't need to be absolute. For example, "." will work fine. -// -// This currently assumes any file matching the glob is a plugin. -// In the future this may be smarter about checking that a file is -// executable and so on. -// -// TODO: test -func Discover(glob, dir string) ([]string, error) { - var err error - - // Make the directory absolute if it isn't already - if !filepath.IsAbs(dir) { - dir, err = filepath.Abs(dir) - if err != nil { - return nil, err - } - } - - return filepath.Glob(filepath.Join(dir, glob)) -} diff --git a/vendor/github.com/hashicorp/go-plugin/error.go b/vendor/github.com/hashicorp/go-plugin/error.go deleted file mode 100644 index e62a21913f4..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/error.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package plugin - -// This is a type that wraps error types so that they can be messaged -// across RPC channels. Since "error" is an interface, we can't always -// gob-encode the underlying structure. This is a valid error interface -// implementer that we will push across. -type BasicError struct { - Message string -} - -// NewBasicError is used to create a BasicError. -// -// err is allowed to be nil. -func NewBasicError(err error) *BasicError { - if err == nil { - return nil - } - - return &BasicError{err.Error()} -} - -func (e *BasicError) Error() string { - return e.Message -} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_broker.go b/vendor/github.com/hashicorp/go-plugin/grpc_broker.go deleted file mode 100644 index 5b17e37fef0..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/grpc_broker.go +++ /dev/null @@ -1,654 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package plugin - -import ( - "context" - "crypto/tls" - "errors" - "fmt" - "log" - "net" - "sync" - "sync/atomic" - "time" - - "github.com/hashicorp/go-plugin/internal/grpcmux" - "github.com/hashicorp/go-plugin/internal/plugin" - "github.com/hashicorp/go-plugin/runner" - - "github.com/oklog/run" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" -) - -// streamer interface is used in the broker to send/receive connection -// information. -type streamer interface { - Send(*plugin.ConnInfo) error - Recv() (*plugin.ConnInfo, error) - Close() -} - -// sendErr is used to pass errors back during a send. -type sendErr struct { - i *plugin.ConnInfo - ch chan error -} - -// gRPCBrokerServer is used by the plugin to start a stream and to send -// connection information to/from the plugin. Implements GRPCBrokerServer and -// streamer interfaces. -type gRPCBrokerServer struct { - plugin.UnimplementedGRPCBrokerServer - - // send is used to send connection info to the gRPC stream. - send chan *sendErr - - // recv is used to receive connection info from the gRPC stream. - recv chan *plugin.ConnInfo - - // quit closes down the stream. - quit chan struct{} - - // o is used to ensure we close the quit channel only once. - o sync.Once -} - -func newGRPCBrokerServer() *gRPCBrokerServer { - return &gRPCBrokerServer{ - send: make(chan *sendErr), - recv: make(chan *plugin.ConnInfo), - quit: make(chan struct{}), - } -} - -// StartStream implements the GRPCBrokerServer interface and will block until -// the quit channel is closed or the context reports Done. The stream will pass -// connection information to/from the client. -func (s *gRPCBrokerServer) StartStream(stream plugin.GRPCBroker_StartStreamServer) error { - doneCh := stream.Context().Done() - defer s.Close() - - // Proccess send stream - go func() { - for { - select { - case <-doneCh: - return - case <-s.quit: - return - case se := <-s.send: - err := stream.Send(se.i) - se.ch <- err - } - } - }() - - // Process receive stream - for { - i, err := stream.Recv() - if err != nil { - return err - } - select { - case <-doneCh: - return nil - case <-s.quit: - return nil - case s.recv <- i: - } - } - - return nil -} - -// Send is used by the GRPCBroker to pass connection information into the stream -// to the client. -func (s *gRPCBrokerServer) Send(i *plugin.ConnInfo) error { - ch := make(chan error) - defer close(ch) - - select { - case <-s.quit: - return errors.New("broker closed") - case s.send <- &sendErr{ - i: i, - ch: ch, - }: - } - - return <-ch -} - -// Recv is used by the GRPCBroker to pass connection information that has been -// sent from the client from the stream to the broker. -func (s *gRPCBrokerServer) Recv() (*plugin.ConnInfo, error) { - select { - case <-s.quit: - return nil, errors.New("broker closed") - case i := <-s.recv: - return i, nil - } -} - -// Close closes the quit channel, shutting down the stream. -func (s *gRPCBrokerServer) Close() { - s.o.Do(func() { - close(s.quit) - }) -} - -// gRPCBrokerClientImpl is used by the client to start a stream and to send -// connection information to/from the client. Implements GRPCBrokerClient and -// streamer interfaces. -type gRPCBrokerClientImpl struct { - // client is the underlying GRPC client used to make calls to the server. - client plugin.GRPCBrokerClient - - // send is used to send connection info to the gRPC stream. - send chan *sendErr - - // recv is used to receive connection info from the gRPC stream. - recv chan *plugin.ConnInfo - - // quit closes down the stream. - quit chan struct{} - - // o is used to ensure we close the quit channel only once. - o sync.Once -} - -func newGRPCBrokerClient(conn *grpc.ClientConn) *gRPCBrokerClientImpl { - return &gRPCBrokerClientImpl{ - client: plugin.NewGRPCBrokerClient(conn), - send: make(chan *sendErr), - recv: make(chan *plugin.ConnInfo), - quit: make(chan struct{}), - } -} - -// StartStream implements the GRPCBrokerClient interface and will block until -// the quit channel is closed or the context reports Done. The stream will pass -// connection information to/from the plugin. -func (s *gRPCBrokerClientImpl) StartStream() error { - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - defer s.Close() - - stream, err := s.client.StartStream(ctx) - if err != nil { - return err - } - doneCh := stream.Context().Done() - - go func() { - for { - select { - case <-doneCh: - return - case <-s.quit: - return - case se := <-s.send: - err := stream.Send(se.i) - se.ch <- err - } - } - }() - - for { - i, err := stream.Recv() - if err != nil { - return err - } - select { - case <-doneCh: - return nil - case <-s.quit: - return nil - case s.recv <- i: - } - } - - return nil -} - -// Send is used by the GRPCBroker to pass connection information into the stream -// to the plugin. -func (s *gRPCBrokerClientImpl) Send(i *plugin.ConnInfo) error { - ch := make(chan error) - defer close(ch) - - select { - case <-s.quit: - return errors.New("broker closed") - case s.send <- &sendErr{ - i: i, - ch: ch, - }: - } - - return <-ch -} - -// Recv is used by the GRPCBroker to pass connection information that has been -// sent from the plugin to the broker. -func (s *gRPCBrokerClientImpl) Recv() (*plugin.ConnInfo, error) { - select { - case <-s.quit: - return nil, errors.New("broker closed") - case i := <-s.recv: - return i, nil - } -} - -// Close closes the quit channel, shutting down the stream. -func (s *gRPCBrokerClientImpl) Close() { - s.o.Do(func() { - close(s.quit) - }) -} - -// GRPCBroker is responsible for brokering connections by unique ID. -// -// It is used by plugins to create multiple gRPC connections and data -// streams between the plugin process and the host process. -// -// This allows a plugin to request a channel with a specific ID to connect to -// or accept a connection from, and the broker handles the details of -// holding these channels open while they're being negotiated. -// -// The Plugin interface has access to these for both Server and Client. -// The broker can be used by either (optionally) to reserve and connect to -// new streams. This is useful for complex args and return values, -// or anything else you might need a data stream for. -type GRPCBroker struct { - nextId uint32 - streamer streamer - tls *tls.Config - doneCh chan struct{} - o sync.Once - - clientStreams map[uint32]*gRPCBrokerPending - serverStreams map[uint32]*gRPCBrokerPending - - unixSocketCfg UnixSocketConfig - addrTranslator runner.AddrTranslator - - dialMutex sync.Mutex - - muxer grpcmux.GRPCMuxer - - sync.Mutex -} - -type gRPCBrokerPending struct { - ch chan *plugin.ConnInfo - doneCh chan struct{} - once sync.Once -} - -func newGRPCBroker(s streamer, tls *tls.Config, unixSocketCfg UnixSocketConfig, addrTranslator runner.AddrTranslator, muxer grpcmux.GRPCMuxer) *GRPCBroker { - return &GRPCBroker{ - streamer: s, - tls: tls, - doneCh: make(chan struct{}), - - clientStreams: make(map[uint32]*gRPCBrokerPending), - serverStreams: make(map[uint32]*gRPCBrokerPending), - muxer: muxer, - - unixSocketCfg: unixSocketCfg, - addrTranslator: addrTranslator, - } -} - -// Accept accepts a connection by ID. -// -// This should not be called multiple times with the same ID at one time. -func (b *GRPCBroker) Accept(id uint32) (net.Listener, error) { - if b.muxer.Enabled() { - p := b.getServerStream(id) - go func() { - err := b.listenForKnocks(id) - if err != nil { - log.Printf("[ERR]: error listening for knocks, id: %d, error: %s", id, err) - } - }() - - ln, err := b.muxer.Listener(id, p.doneCh) - if err != nil { - return nil, err - } - - ln = &rmListener{ - Listener: ln, - close: func() error { - // We could have multiple listeners on the same ID, so use sync.Once - // for closing doneCh to ensure we don't get a panic. - p.once.Do(func() { - close(p.doneCh) - }) - - b.Lock() - defer b.Unlock() - - // No longer need to listen for knocks once the listener is closed. - delete(b.serverStreams, id) - - return nil - }, - } - - return ln, nil - } - - listener, err := serverListener(b.unixSocketCfg) - if err != nil { - return nil, err - } - - advertiseNet := listener.Addr().Network() - advertiseAddr := listener.Addr().String() - if b.addrTranslator != nil { - advertiseNet, advertiseAddr, err = b.addrTranslator.HostToPlugin(advertiseNet, advertiseAddr) - if err != nil { - return nil, err - } - } - err = b.streamer.Send(&plugin.ConnInfo{ - ServiceId: id, - Network: advertiseNet, - Address: advertiseAddr, - }) - if err != nil { - return nil, err - } - - return listener, nil -} - -// AcceptAndServe is used to accept a specific stream ID and immediately -// serve a gRPC server on that stream ID. This is used to easily serve -// complex arguments. Each AcceptAndServe call opens a new listener socket and -// sends the connection info down the stream to the dialer. Since a new -// connection is opened every call, these calls should be used sparingly. -// Multiple gRPC server implementations can be registered to a single -// AcceptAndServe call. -func (b *GRPCBroker) AcceptAndServe(id uint32, newGRPCServer func([]grpc.ServerOption) *grpc.Server) { - ln, err := b.Accept(id) - if err != nil { - log.Printf("[ERR] plugin: plugin acceptAndServe error: %s", err) - return - } - defer ln.Close() - - var opts []grpc.ServerOption - if b.tls != nil { - opts = []grpc.ServerOption{grpc.Creds(credentials.NewTLS(b.tls))} - } - - server := newGRPCServer(opts) - - // Here we use a run group to close this goroutine if the server is shutdown - // or the broker is shutdown. - var g run.Group - { - // Serve on the listener, if shutting down call GracefulStop. - g.Add(func() error { - return server.Serve(ln) - }, func(err error) { - server.GracefulStop() - }) - } - { - // block on the closeCh or the doneCh. If we are shutting down close the - // closeCh. - closeCh := make(chan struct{}) - g.Add(func() error { - select { - case <-b.doneCh: - case <-closeCh: - } - return nil - }, func(err error) { - close(closeCh) - }) - } - - // Block until we are done - g.Run() -} - -// Close closes the stream and all servers. -func (b *GRPCBroker) Close() error { - b.streamer.Close() - b.o.Do(func() { - close(b.doneCh) - }) - return nil -} - -func (b *GRPCBroker) listenForKnocks(id uint32) error { - p := b.getServerStream(id) - for { - select { - case msg := <-p.ch: - // Shouldn't be possible. - if msg.ServiceId != id { - return fmt.Errorf("knock received with wrong service ID; expected %d but got %d", id, msg.ServiceId) - } - - // Also shouldn't be possible. - if msg.Knock == nil || !msg.Knock.Knock || msg.Knock.Ack { - return fmt.Errorf("knock received for service ID %d with incorrect values; knock=%+v", id, msg.Knock) - } - - // Successful knock, open the door for the given ID. - var ackError string - err := b.muxer.AcceptKnock(id) - if err != nil { - ackError = err.Error() - } - - // Send back an acknowledgement to allow the client to start dialling. - err = b.streamer.Send(&plugin.ConnInfo{ - ServiceId: id, - Knock: &plugin.ConnInfo_Knock{ - Knock: true, - Ack: true, - Error: ackError, - }, - }) - if err != nil { - return fmt.Errorf("error sending back knock acknowledgement: %w", err) - } - case <-p.doneCh: - return nil - } - } -} - -func (b *GRPCBroker) knock(id uint32) error { - // Send a knock. - err := b.streamer.Send(&plugin.ConnInfo{ - ServiceId: id, - Knock: &plugin.ConnInfo_Knock{ - Knock: true, - }, - }) - if err != nil { - return err - } - - // Wait for the ack. - p := b.getClientStream(id) - select { - case msg := <-p.ch: - if msg.ServiceId != id { - return fmt.Errorf("handshake failed for multiplexing on id %d; got response for %d", id, msg.ServiceId) - } - if msg.Knock == nil || !msg.Knock.Knock || !msg.Knock.Ack { - return fmt.Errorf("handshake failed for multiplexing on id %d; expected knock and ack, but got %+v", id, msg.Knock) - } - if msg.Knock.Error != "" { - return fmt.Errorf("failed to knock for id %d: %s", id, msg.Knock.Error) - } - case <-time.After(5 * time.Second): - return fmt.Errorf("timeout waiting for multiplexing knock handshake on id %d", id) - } - - return nil -} - -func (b *GRPCBroker) muxDial(id uint32) func(string, time.Duration) (net.Conn, error) { - return func(string, time.Duration) (net.Conn, error) { - b.dialMutex.Lock() - defer b.dialMutex.Unlock() - - // Tell the other side the listener ID it should give the next stream to. - err := b.knock(id) - if err != nil { - return nil, fmt.Errorf("failed to knock before dialling client: %w", err) - } - - conn, err := b.muxer.Dial() - if err != nil { - return nil, err - } - - return conn, nil - } -} - -// Dial opens a connection by ID. -func (b *GRPCBroker) Dial(id uint32) (conn *grpc.ClientConn, err error) { - if b.muxer.Enabled() { - return dialGRPCConn(b.tls, b.muxDial(id)) - } - - var c *plugin.ConnInfo - - // Open the stream - p := b.getClientStream(id) - select { - case c = <-p.ch: - close(p.doneCh) - case <-time.After(5 * time.Second): - return nil, fmt.Errorf("timeout waiting for connection info") - } - - network, address := c.Network, c.Address - if b.addrTranslator != nil { - network, address, err = b.addrTranslator.PluginToHost(network, address) - if err != nil { - return nil, err - } - } - - var addr net.Addr - switch network { - case "tcp": - addr, err = net.ResolveTCPAddr("tcp", address) - case "unix": - addr, err = net.ResolveUnixAddr("unix", address) - default: - err = fmt.Errorf("Unknown address type: %s", c.Address) - } - if err != nil { - return nil, err - } - - return dialGRPCConn(b.tls, netAddrDialer(addr)) -} - -// NextId returns a unique ID to use next. -// -// It is possible for very long-running plugin hosts to wrap this value, -// though it would require a very large amount of calls. In practice -// we've never seen it happen. -func (m *GRPCBroker) NextId() uint32 { - return atomic.AddUint32(&m.nextId, 1) -} - -// Run starts the brokering and should be executed in a goroutine, since it -// blocks forever, or until the session closes. -// -// Uses of GRPCBroker never need to call this. It is called internally by -// the plugin host/client. -func (m *GRPCBroker) Run() { - for { - msg, err := m.streamer.Recv() - if err != nil { - // Once we receive an error, just exit - break - } - - // Initialize the waiter - var p *gRPCBrokerPending - if msg.Knock != nil && msg.Knock.Knock && !msg.Knock.Ack { - p = m.getServerStream(msg.ServiceId) - // The server side doesn't close the channel immediately as it needs - // to continuously listen for knocks. - } else { - p = m.getClientStream(msg.ServiceId) - go m.timeoutWait(msg.ServiceId, p) - } - select { - case p.ch <- msg: - default: - } - } -} - -// getClientStream is a buffer to receive new connection info and knock acks -// by stream ID. -func (m *GRPCBroker) getClientStream(id uint32) *gRPCBrokerPending { - m.Lock() - defer m.Unlock() - - p, ok := m.clientStreams[id] - if ok { - return p - } - - m.clientStreams[id] = &gRPCBrokerPending{ - ch: make(chan *plugin.ConnInfo, 1), - doneCh: make(chan struct{}), - } - return m.clientStreams[id] -} - -// getServerStream is a buffer to receive knocks to a multiplexed stream ID -// that its side is listening on. Not used unless multiplexing is enabled. -func (m *GRPCBroker) getServerStream(id uint32) *gRPCBrokerPending { - m.Lock() - defer m.Unlock() - - p, ok := m.serverStreams[id] - if ok { - return p - } - - m.serverStreams[id] = &gRPCBrokerPending{ - ch: make(chan *plugin.ConnInfo, 1), - doneCh: make(chan struct{}), - } - return m.serverStreams[id] -} - -func (m *GRPCBroker) timeoutWait(id uint32, p *gRPCBrokerPending) { - // Wait for the stream to either be picked up and connected, or - // for a timeout. - select { - case <-p.doneCh: - case <-time.After(5 * time.Second): - } - - m.Lock() - defer m.Unlock() - - // Delete the stream so no one else can grab it - delete(m.clientStreams, id) -} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_client.go b/vendor/github.com/hashicorp/go-plugin/grpc_client.go deleted file mode 100644 index 627649d8394..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/grpc_client.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package plugin - -import ( - "context" - "crypto/tls" - "fmt" - "math" - "net" - "time" - - "github.com/hashicorp/go-plugin/internal/plugin" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/health/grpc_health_v1" -) - -func dialGRPCConn(tls *tls.Config, dialer func(string, time.Duration) (net.Conn, error), dialOpts ...grpc.DialOption) (*grpc.ClientConn, error) { - // Build dialing options. - opts := make([]grpc.DialOption, 0) - - // We use a custom dialer so that we can connect over unix domain sockets. - opts = append(opts, grpc.WithDialer(dialer)) - - // Fail right away - opts = append(opts, grpc.FailOnNonTempDialError(true)) - - // If we have no TLS configuration set, we need to explicitly tell grpc - // that we're connecting with an insecure connection. - if tls == nil { - opts = append(opts, grpc.WithInsecure()) - } else { - opts = append(opts, grpc.WithTransportCredentials( - credentials.NewTLS(tls))) - } - - opts = append(opts, - grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(math.MaxInt32)), - grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(math.MaxInt32))) - - // Add our custom options if we have any - opts = append(opts, dialOpts...) - - // Connect. Note the first parameter is unused because we use a custom - // dialer that has the state to see the address. - conn, err := grpc.Dial("unused", opts...) - if err != nil { - return nil, err - } - - return conn, nil -} - -// newGRPCClient creates a new GRPCClient. The Client argument is expected -// to be successfully started already with a lock held. -func newGRPCClient(doneCtx context.Context, c *Client) (*GRPCClient, error) { - conn, err := dialGRPCConn(c.config.TLSConfig, c.dialer, c.config.GRPCDialOptions...) - if err != nil { - return nil, err - } - - muxer, err := c.getGRPCMuxer(c.address) - if err != nil { - return nil, err - } - - // Start the broker. - brokerGRPCClient := newGRPCBrokerClient(conn) - broker := newGRPCBroker(brokerGRPCClient, c.config.TLSConfig, c.unixSocketCfg, c.runner, muxer) - go broker.Run() - go brokerGRPCClient.StartStream() - - // Start the stdio client - stdioClient, err := newGRPCStdioClient(doneCtx, c.logger.Named("stdio"), conn) - if err != nil { - return nil, err - } - go stdioClient.Run(c.config.SyncStdout, c.config.SyncStderr) - - cl := &GRPCClient{ - Conn: conn, - Plugins: c.config.Plugins, - doneCtx: doneCtx, - broker: broker, - controller: plugin.NewGRPCControllerClient(conn), - } - - return cl, nil -} - -// GRPCClient connects to a GRPCServer over gRPC to dispense plugin types. -type GRPCClient struct { - Conn *grpc.ClientConn - Plugins map[string]Plugin - - doneCtx context.Context - broker *GRPCBroker - - controller plugin.GRPCControllerClient -} - -// ClientProtocol impl. -func (c *GRPCClient) Close() error { - c.broker.Close() - c.controller.Shutdown(c.doneCtx, &plugin.Empty{}) - return c.Conn.Close() -} - -// ClientProtocol impl. -func (c *GRPCClient) Dispense(name string) (interface{}, error) { - raw, ok := c.Plugins[name] - if !ok { - return nil, fmt.Errorf("unknown plugin type: %s", name) - } - - p, ok := raw.(GRPCPlugin) - if !ok { - return nil, fmt.Errorf("plugin %q doesn't support gRPC", name) - } - - return p.GRPCClient(c.doneCtx, c.broker, c.Conn) -} - -// ClientProtocol impl. -func (c *GRPCClient) Ping() error { - client := grpc_health_v1.NewHealthClient(c.Conn) - _, err := client.Check(context.Background(), &grpc_health_v1.HealthCheckRequest{ - Service: GRPCServiceName, - }) - - return err -} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_controller.go b/vendor/github.com/hashicorp/go-plugin/grpc_controller.go deleted file mode 100644 index 2085356cd34..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/grpc_controller.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package plugin - -import ( - "context" - - "github.com/hashicorp/go-plugin/internal/plugin" -) - -// GRPCControllerServer handles shutdown calls to terminate the server when the -// plugin client is closed. -type grpcControllerServer struct { - server *GRPCServer -} - -// Shutdown stops the grpc server. It first will attempt a graceful stop, then a -// full stop on the server. -func (s *grpcControllerServer) Shutdown(ctx context.Context, _ *plugin.Empty) (*plugin.Empty, error) { - resp := &plugin.Empty{} - - // TODO: figure out why GracefullStop doesn't work. - s.server.Stop() - return resp, nil -} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_server.go b/vendor/github.com/hashicorp/go-plugin/grpc_server.go deleted file mode 100644 index a5f40c7f06e..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/grpc_server.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package plugin - -import ( - "bytes" - "crypto/tls" - "encoding/json" - "fmt" - "io" - "net" - - hclog "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-plugin/internal/grpcmux" - "github.com/hashicorp/go-plugin/internal/plugin" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/health" - "google.golang.org/grpc/health/grpc_health_v1" - "google.golang.org/grpc/reflection" -) - -// GRPCServiceName is the name of the service that the health check should -// return as passing. -const GRPCServiceName = "plugin" - -// DefaultGRPCServer can be used with the "GRPCServer" field for Server -// as a default factory method to create a gRPC server with no extra options. -func DefaultGRPCServer(opts []grpc.ServerOption) *grpc.Server { - return grpc.NewServer(opts...) -} - -// GRPCServer is a ServerType implementation that serves plugins over -// gRPC. This allows plugins to easily be written for other languages. -// -// The GRPCServer outputs a custom configuration as a base64-encoded -// JSON structure represented by the GRPCServerConfig config structure. -type GRPCServer struct { - // Plugins are the list of plugins to serve. - Plugins map[string]Plugin - - // Server is the actual server that will accept connections. This - // will be used for plugin registration as well. - Server func([]grpc.ServerOption) *grpc.Server - - // TLS should be the TLS configuration if available. If this is nil, - // the connection will not have transport security. - TLS *tls.Config - - // DoneCh is the channel that is closed when this server has exited. - DoneCh chan struct{} - - // Stdout/StderrLis are the readers for stdout/stderr that will be copied - // to the stdout/stderr connection that is output. - Stdout io.Reader - Stderr io.Reader - - config GRPCServerConfig - server *grpc.Server - broker *GRPCBroker - stdioServer *grpcStdioServer - - logger hclog.Logger - - muxer *grpcmux.GRPCServerMuxer -} - -// ServerProtocol impl. -func (s *GRPCServer) Init() error { - // Create our server - var opts []grpc.ServerOption - if s.TLS != nil { - opts = append(opts, grpc.Creds(credentials.NewTLS(s.TLS))) - } - s.server = s.Server(opts) - - // Register the health service - healthCheck := health.NewServer() - healthCheck.SetServingStatus( - GRPCServiceName, grpc_health_v1.HealthCheckResponse_SERVING) - grpc_health_v1.RegisterHealthServer(s.server, healthCheck) - - // Register the reflection service - reflection.Register(s.server) - - // Register the broker service - brokerServer := newGRPCBrokerServer() - plugin.RegisterGRPCBrokerServer(s.server, brokerServer) - s.broker = newGRPCBroker(brokerServer, s.TLS, unixSocketConfigFromEnv(), nil, s.muxer) - go s.broker.Run() - - // Register the controller - controllerServer := &grpcControllerServer{server: s} - plugin.RegisterGRPCControllerServer(s.server, controllerServer) - - // Register the stdio service - s.stdioServer = newGRPCStdioServer(s.logger, s.Stdout, s.Stderr) - plugin.RegisterGRPCStdioServer(s.server, s.stdioServer) - - // Register all our plugins onto the gRPC server. - for k, raw := range s.Plugins { - p, ok := raw.(GRPCPlugin) - if !ok { - return fmt.Errorf("%q is not a GRPC-compatible plugin", k) - } - - if err := p.GRPCServer(s.broker, s.server); err != nil { - return fmt.Errorf("error registering %q: %s", k, err) - } - } - - return nil -} - -// Stop calls Stop on the underlying grpc.Server and Close on the underlying -// grpc.Broker if present. -func (s *GRPCServer) Stop() { - s.server.Stop() - - if s.broker != nil { - s.broker.Close() - s.broker = nil - } -} - -// GracefulStop calls GracefulStop on the underlying grpc.Server and Close on -// the underlying grpc.Broker if present. -func (s *GRPCServer) GracefulStop() { - s.server.GracefulStop() - - if s.broker != nil { - s.broker.Close() - s.broker = nil - } -} - -// Config is the GRPCServerConfig encoded as JSON then base64. -func (s *GRPCServer) Config() string { - // Create a buffer that will contain our final contents - var buf bytes.Buffer - - // Wrap the base64 encoding with JSON encoding. - if err := json.NewEncoder(&buf).Encode(s.config); err != nil { - // We panic since ths shouldn't happen under any scenario. We - // carefully control the structure being encoded here and it should - // always be successful. - panic(err) - } - - return buf.String() -} - -func (s *GRPCServer) Serve(lis net.Listener) { - defer close(s.DoneCh) - err := s.server.Serve(lis) - if err != nil { - s.logger.Error("grpc server", "error", err) - } -} - -// GRPCServerConfig is the extra configuration passed along for consumers -// to facilitate using GRPC plugins. -type GRPCServerConfig struct { - StdoutAddr string `json:"stdout_addr"` - StderrAddr string `json:"stderr_addr"` -} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go b/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go deleted file mode 100644 index ae06c116313..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package plugin - -import ( - "bufio" - "bytes" - "context" - "io" - - empty "github.com/golang/protobuf/ptypes/empty" - hclog "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-plugin/internal/plugin" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// grpcStdioBuffer is the buffer size we try to fill when sending a chunk of -// stdio data. This is currently 1 KB for no reason other than that seems like -// enough (stdio data isn't that common) and is fairly low. -const grpcStdioBuffer = 1 * 1024 - -// grpcStdioServer implements the Stdio service and streams stdiout/stderr. -type grpcStdioServer struct { - stdoutCh <-chan []byte - stderrCh <-chan []byte -} - -// newGRPCStdioServer creates a new grpcStdioServer and starts the stream -// copying for the given out and err readers. -// -// This must only be called ONCE per srcOut, srcErr. -func newGRPCStdioServer(log hclog.Logger, srcOut, srcErr io.Reader) *grpcStdioServer { - stdoutCh := make(chan []byte) - stderrCh := make(chan []byte) - - // Begin copying the streams - go copyChan(log, stdoutCh, srcOut) - go copyChan(log, stderrCh, srcErr) - - // Construct our server - return &grpcStdioServer{ - stdoutCh: stdoutCh, - stderrCh: stderrCh, - } -} - -// StreamStdio streams our stdout/err as the response. -func (s *grpcStdioServer) StreamStdio( - _ *empty.Empty, - srv plugin.GRPCStdio_StreamStdioServer, -) error { - // Share the same data value between runs. Sending this over the wire - // marshals it so we can reuse this. - var data plugin.StdioData - - for { - // Read our data - select { - case data.Data = <-s.stdoutCh: - data.Channel = plugin.StdioData_STDOUT - - case data.Data = <-s.stderrCh: - data.Channel = plugin.StdioData_STDERR - - case <-srv.Context().Done(): - return nil - } - - // Not sure if this is possible, but if we somehow got here and - // we didn't populate any data at all, then just continue. - if len(data.Data) == 0 { - continue - } - - // Send our data to the client. - if err := srv.Send(&data); err != nil { - return err - } - } -} - -// grpcStdioClient wraps the stdio service as a client to copy -// the stdio data to output writers. -type grpcStdioClient struct { - log hclog.Logger - stdioClient plugin.GRPCStdio_StreamStdioClient -} - -// newGRPCStdioClient creates a grpcStdioClient. This will perform the -// initial connection to the stdio service. If the stdio service is unavailable -// then this will be a no-op. This allows this to work without error for -// plugins that don't support this. -func newGRPCStdioClient( - ctx context.Context, - log hclog.Logger, - conn *grpc.ClientConn, -) (*grpcStdioClient, error) { - client := plugin.NewGRPCStdioClient(conn) - - // Connect immediately to the endpoint - stdioClient, err := client.StreamStdio(ctx, &empty.Empty{}) - - // If we get an Unavailable or Unimplemented error, this means that the plugin isn't - // updated and linking to the latest version of go-plugin that supports - // this. We fall back to the previous behavior of just not syncing anything. - if status.Code(err) == codes.Unavailable || status.Code(err) == codes.Unimplemented { - log.Warn("stdio service not available, stdout/stderr syncing unavailable") - stdioClient = nil - err = nil - } - if err != nil { - return nil, err - } - - return &grpcStdioClient{ - log: log, - stdioClient: stdioClient, - }, nil -} - -// Run starts the loop that receives stdio data and writes it to the given -// writers. This blocks and should be run in a goroutine. -func (c *grpcStdioClient) Run(stdout, stderr io.Writer) { - // This will be nil if stdio is not supported by the plugin - if c.stdioClient == nil { - c.log.Warn("stdio service unavailable, run will do nothing") - return - } - - for { - c.log.Trace("waiting for stdio data") - data, err := c.stdioClient.Recv() - if err != nil { - if err == io.EOF || - status.Code(err) == codes.Unavailable || - status.Code(err) == codes.Canceled || - status.Code(err) == codes.Unimplemented || - err == context.Canceled { - c.log.Debug("received EOF, stopping recv loop", "err", err) - return - } - - c.log.Error("error receiving data", "err", err) - return - } - - // Determine our output writer based on channel - var w io.Writer - switch data.Channel { - case plugin.StdioData_STDOUT: - w = stdout - - case plugin.StdioData_STDERR: - w = stderr - - default: - c.log.Warn("unknown channel, dropping", "channel", data.Channel) - continue - } - - // Write! In the event of an error we just continue. - if c.log.IsTrace() { - c.log.Trace("received data", "channel", data.Channel.String(), "len", len(data.Data)) - } - if _, err := io.Copy(w, bytes.NewReader(data.Data)); err != nil { - c.log.Error("failed to copy all bytes", "err", err) - } - } -} - -// copyChan copies an io.Reader into a channel. -func copyChan(log hclog.Logger, dst chan<- []byte, src io.Reader) { - bufsrc := bufio.NewReader(src) - - for { - // Make our data buffer. We allocate a new one per loop iteration - // so that we can send it over the channel. - var data [1024]byte - - // Read the data, this will block until data is available - n, err := bufsrc.Read(data[:]) - - // We have to check if we have data BEFORE err != nil. The bufio - // docs guarantee n == 0 on EOF but its better to be safe here. - if n > 0 { - // We have data! Send it on the channel. This will block if there - // is no reader on the other side. We expect that go-plugin will - // connect immediately to the stdio server to drain this so we want - // this block to happen for backpressure. - dst <- data[:n] - } - - // If we hit EOF we're done copying - if err == io.EOF { - log.Debug("stdio EOF, exiting copy loop") - return - } - - // Any other error we just exit the loop. We don't expect there to - // be errors since our use case for this is reading/writing from - // a in-process pipe (os.Pipe). - if err != nil { - log.Warn("error copying stdio data, stopping copy", "err", err) - return - } - } -} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/addr_translator.go b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/addr_translator.go deleted file mode 100644 index 1854d2dd531..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/addr_translator.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package cmdrunner - -// addrTranslator implements stateless identity functions, as the host and plugin -// run in the same context wrt Unix and network addresses. -type addrTranslator struct{} - -func (*addrTranslator) PluginToHost(pluginNet, pluginAddr string) (string, string, error) { - return pluginNet, pluginAddr, nil -} - -func (*addrTranslator) HostToPlugin(hostNet, hostAddr string) (string, string, error) { - return hostNet, hostAddr, nil -} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/cmd_reattach.go b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/cmd_reattach.go deleted file mode 100644 index dce1a86a88e..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/cmd_reattach.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package cmdrunner - -import ( - "context" - "fmt" - "net" - "os" - - "github.com/hashicorp/go-plugin/runner" -) - -// ReattachFunc returns a function that allows reattaching to a plugin running -// as a plain process. The process may or may not be a child process. -func ReattachFunc(pid int, addr net.Addr) runner.ReattachFunc { - return func() (runner.AttachedRunner, error) { - p, err := os.FindProcess(pid) - if err != nil { - // On Unix systems, FindProcess never returns an error. - // On Windows, for non-existent pids it returns: - // os.SyscallError - 'OpenProcess: the paremter is incorrect' - return nil, ErrProcessNotFound - } - - // Attempt to connect to the addr since on Unix systems FindProcess - // doesn't actually return an error if it can't find the process. - conn, err := net.Dial(addr.Network(), addr.String()) - if err != nil { - p.Kill() - return nil, ErrProcessNotFound - } - conn.Close() - - return &CmdAttachedRunner{ - pid: pid, - process: p, - }, nil - } -} - -// CmdAttachedRunner is mostly a subset of CmdRunner, except the Wait function -// does not assume the process is a child of the host process, and so uses a -// different implementation to wait on the process. -type CmdAttachedRunner struct { - pid int - process *os.Process - - addrTranslator -} - -func (c *CmdAttachedRunner) Wait(_ context.Context) error { - return pidWait(c.pid) -} - -func (c *CmdAttachedRunner) Kill(_ context.Context) error { - return c.process.Kill() -} - -func (c *CmdAttachedRunner) ID() string { - return fmt.Sprintf("%d", c.pid) -} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/cmd_runner.go b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/cmd_runner.go deleted file mode 100644 index b26fea928e2..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/cmd_runner.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package cmdrunner - -import ( - "context" - "errors" - "fmt" - "io" - "os" - "os/exec" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-plugin/runner" -) - -var ( - _ runner.Runner = (*CmdRunner)(nil) - - // ErrProcessNotFound is returned when a client is instantiated to - // reattach to an existing process and it isn't found. - ErrProcessNotFound = errors.New("Reattachment process not found") -) - -const unrecognizedRemotePluginMessage = `This usually means - the plugin was not compiled for this architecture, - the plugin is missing dynamic-link libraries necessary to run, - the plugin is not executable by this process due to file permissions, or - the plugin failed to negotiate the initial go-plugin protocol handshake -%s` - -// CmdRunner implements the runner.Runner interface. It mostly just passes through -// to exec.Cmd methods. -type CmdRunner struct { - logger hclog.Logger - cmd *exec.Cmd - - stdout io.ReadCloser - stderr io.ReadCloser - - // Cmd info is persisted early, since the process information will be removed - // after Kill is called. - path string - pid int - - addrTranslator -} - -// NewCmdRunner returns an implementation of runner.Runner for running a plugin -// as a subprocess. It must be passed a cmd that hasn't yet been started. -func NewCmdRunner(logger hclog.Logger, cmd *exec.Cmd) (*CmdRunner, error) { - stdout, err := cmd.StdoutPipe() - if err != nil { - return nil, err - } - - stderr, err := cmd.StderrPipe() - if err != nil { - return nil, err - } - - return &CmdRunner{ - logger: logger, - cmd: cmd, - stdout: stdout, - stderr: stderr, - path: cmd.Path, - }, nil -} - -func (c *CmdRunner) Start(_ context.Context) error { - c.logger.Debug("starting plugin", "path", c.cmd.Path, "args", c.cmd.Args) - err := c.cmd.Start() - if err != nil { - return err - } - - c.pid = c.cmd.Process.Pid - c.logger.Debug("plugin started", "path", c.path, "pid", c.pid) - return nil -} - -func (c *CmdRunner) Wait(_ context.Context) error { - return c.cmd.Wait() -} - -func (c *CmdRunner) Kill(_ context.Context) error { - if c.cmd.Process != nil { - err := c.cmd.Process.Kill() - // Swallow ErrProcessDone, we support calling Kill multiple times. - if !errors.Is(err, os.ErrProcessDone) { - return err - } - return nil - } - - return nil -} - -func (c *CmdRunner) Stdout() io.ReadCloser { - return c.stdout -} - -func (c *CmdRunner) Stderr() io.ReadCloser { - return c.stderr -} - -func (c *CmdRunner) Name() string { - return c.path -} - -func (c *CmdRunner) ID() string { - return fmt.Sprintf("%d", c.pid) -} - -// peTypes is a list of Portable Executable (PE) machine types from https://learn.microsoft.com/en-us/windows/win32/debug/pe-format -// mapped to GOARCH types. It is not comprehensive, and only includes machine types that Go supports. -var peTypes = map[uint16]string{ - 0x14c: "386", - 0x1c0: "arm", - 0x6264: "loong64", - 0x8664: "amd64", - 0xaa64: "arm64", -} - -func (c *CmdRunner) Diagnose(_ context.Context) string { - return fmt.Sprintf(unrecognizedRemotePluginMessage, additionalNotesAboutCommand(c.cmd.Path)) -} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/notes_unix.go b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/notes_unix.go deleted file mode 100644 index ce04cfebc67..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/notes_unix.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -//go:build !windows -// +build !windows - -package cmdrunner - -import ( - "debug/elf" - "debug/macho" - "debug/pe" - "fmt" - "os" - "os/user" - "runtime" - "strconv" - "syscall" -) - -// additionalNotesAboutCommand tries to get additional information about a command that might help diagnose -// why it won't run correctly. It runs as a best effort only. -func additionalNotesAboutCommand(path string) string { - notes := "" - stat, err := os.Stat(path) - if err != nil { - return notes - } - - notes += "\nAdditional notes about plugin:\n" - notes += fmt.Sprintf(" Path: %s\n", path) - notes += fmt.Sprintf(" Mode: %s\n", stat.Mode()) - statT, ok := stat.Sys().(*syscall.Stat_t) - if ok { - currentUsername := "?" - if u, err := user.LookupId(strconv.FormatUint(uint64(os.Getuid()), 10)); err == nil { - currentUsername = u.Username - } - currentGroup := "?" - if g, err := user.LookupGroupId(strconv.FormatUint(uint64(os.Getgid()), 10)); err == nil { - currentGroup = g.Name - } - username := "?" - if u, err := user.LookupId(strconv.FormatUint(uint64(statT.Uid), 10)); err == nil { - username = u.Username - } - group := "?" - if g, err := user.LookupGroupId(strconv.FormatUint(uint64(statT.Gid), 10)); err == nil { - group = g.Name - } - notes += fmt.Sprintf(" Owner: %d [%s] (current: %d [%s])\n", statT.Uid, username, os.Getuid(), currentUsername) - notes += fmt.Sprintf(" Group: %d [%s] (current: %d [%s])\n", statT.Gid, group, os.Getgid(), currentGroup) - } - - if elfFile, err := elf.Open(path); err == nil { - defer elfFile.Close() - notes += fmt.Sprintf(" ELF architecture: %s (current architecture: %s)\n", elfFile.Machine, runtime.GOARCH) - } else if machoFile, err := macho.Open(path); err == nil { - defer machoFile.Close() - notes += fmt.Sprintf(" MachO architecture: %s (current architecture: %s)\n", machoFile.Cpu, runtime.GOARCH) - } else if peFile, err := pe.Open(path); err == nil { - defer peFile.Close() - machine, ok := peTypes[peFile.Machine] - if !ok { - machine = "unknown" - } - notes += fmt.Sprintf(" PE architecture: %s (current architecture: %s)\n", machine, runtime.GOARCH) - } - return notes -} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/notes_windows.go b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/notes_windows.go deleted file mode 100644 index 39c51dd1e08..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/notes_windows.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -//go:build windows -// +build windows - -package cmdrunner - -import ( - "debug/elf" - "debug/macho" - "debug/pe" - "fmt" - "os" - "runtime" -) - -// additionalNotesAboutCommand tries to get additional information about a command that might help diagnose -// why it won't run correctly. It runs as a best effort only. -func additionalNotesAboutCommand(path string) string { - notes := "" - stat, err := os.Stat(path) - if err != nil { - return notes - } - - notes += "\nAdditional notes about plugin:\n" - notes += fmt.Sprintf(" Path: %s\n", path) - notes += fmt.Sprintf(" Mode: %s\n", stat.Mode()) - - if elfFile, err := elf.Open(path); err == nil { - defer elfFile.Close() - notes += fmt.Sprintf(" ELF architecture: %s (current architecture: %s)\n", elfFile.Machine, runtime.GOARCH) - } else if machoFile, err := macho.Open(path); err == nil { - defer machoFile.Close() - notes += fmt.Sprintf(" MachO architecture: %s (current architecture: %s)\n", machoFile.Cpu, runtime.GOARCH) - } else if peFile, err := pe.Open(path); err == nil { - defer peFile.Close() - machine, ok := peTypes[peFile.Machine] - if !ok { - machine = "unknown" - } - notes += fmt.Sprintf(" PE architecture: %s (current architecture: %s)\n", machine, runtime.GOARCH) - } - return notes -} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process.go b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process.go deleted file mode 100644 index 6c34dc7747f..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package cmdrunner - -import "time" - -// pidAlive checks whether a pid is alive. -func pidAlive(pid int) bool { - return _pidAlive(pid) -} - -// pidWait blocks for a process to exit. -func pidWait(pid int) error { - ticker := time.NewTicker(1 * time.Second) - defer ticker.Stop() - - for range ticker.C { - if !pidAlive(pid) { - break - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process_posix.go b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process_posix.go deleted file mode 100644 index bf3fc5b683e..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process_posix.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -//go:build !windows -// +build !windows - -package cmdrunner - -import ( - "os" - "syscall" -) - -// _pidAlive tests whether a process is alive or not by sending it Signal 0, -// since Go otherwise has no way to test this. -func _pidAlive(pid int) bool { - proc, err := os.FindProcess(pid) - if err == nil { - err = proc.Signal(syscall.Signal(0)) - } - - return err == nil -} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process_windows.go b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process_windows.go deleted file mode 100644 index 6c39df28f7e..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process_windows.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package cmdrunner - -import ( - "syscall" -) - -const ( - // Weird name but matches the MSDN docs - exit_STILL_ACTIVE = 259 - - processDesiredAccess = syscall.STANDARD_RIGHTS_READ | - syscall.PROCESS_QUERY_INFORMATION | - syscall.SYNCHRONIZE -) - -// _pidAlive tests whether a process is alive or not -func _pidAlive(pid int) bool { - h, err := syscall.OpenProcess(processDesiredAccess, false, uint32(pid)) - if err != nil { - return false - } - defer syscall.CloseHandle(h) - - var ec uint32 - if e := syscall.GetExitCodeProcess(h, &ec); e != nil { - return false - } - - return ec == exit_STILL_ACTIVE -} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/blocked_client_listener.go b/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/blocked_client_listener.go deleted file mode 100644 index e8a3a152a13..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/blocked_client_listener.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package grpcmux - -import ( - "io" - "net" - - "github.com/hashicorp/yamux" -) - -var _ net.Listener = (*blockedClientListener)(nil) - -// blockedClientListener accepts connections for a specific gRPC broker stream -// ID on the client (host) side of the connection. -type blockedClientListener struct { - session *yamux.Session - waitCh chan struct{} - doneCh <-chan struct{} -} - -func newBlockedClientListener(session *yamux.Session, doneCh <-chan struct{}) *blockedClientListener { - return &blockedClientListener{ - waitCh: make(chan struct{}, 1), - doneCh: doneCh, - session: session, - } -} - -func (b *blockedClientListener) Accept() (net.Conn, error) { - select { - case <-b.waitCh: - return b.session.Accept() - case <-b.doneCh: - return nil, io.EOF - } -} - -func (b *blockedClientListener) Addr() net.Addr { - return b.session.Addr() -} - -func (b *blockedClientListener) Close() error { - // We don't close the session, the client muxer is responsible for that. - return nil -} - -func (b *blockedClientListener) unblock() { - b.waitCh <- struct{}{} -} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/blocked_server_listener.go b/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/blocked_server_listener.go deleted file mode 100644 index 0edb2c05d26..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/blocked_server_listener.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package grpcmux - -import ( - "io" - "net" -) - -var _ net.Listener = (*blockedServerListener)(nil) - -// blockedServerListener accepts connections for a specific gRPC broker stream -// ID on the server (plugin) side of the connection. -type blockedServerListener struct { - addr net.Addr - acceptCh chan acceptResult - doneCh <-chan struct{} -} - -type acceptResult struct { - conn net.Conn - err error -} - -func newBlockedServerListener(addr net.Addr, doneCh <-chan struct{}) *blockedServerListener { - return &blockedServerListener{ - addr: addr, - acceptCh: make(chan acceptResult), - doneCh: doneCh, - } -} - -func (b *blockedServerListener) Accept() (net.Conn, error) { - select { - case accept := <-b.acceptCh: - return accept.conn, accept.err - case <-b.doneCh: - return nil, io.EOF - } -} - -func (b *blockedServerListener) Addr() net.Addr { - return b.addr -} - -func (b *blockedServerListener) Close() error { - return nil -} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_client_muxer.go b/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_client_muxer.go deleted file mode 100644 index b203ba467b2..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_client_muxer.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package grpcmux - -import ( - "fmt" - "net" - "sync" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/yamux" -) - -var _ GRPCMuxer = (*GRPCClientMuxer)(nil) - -// GRPCClientMuxer implements the client (host) side of the gRPC broker's -// GRPCMuxer interface for multiplexing multiple gRPC broker connections over -// a single net.Conn. -// -// The client dials the initial net.Conn eagerly, and creates a yamux.Session -// as the implementation for multiplexing any additional connections. -// -// Each net.Listener returned from Listener will block until the client receives -// a knock that matches its gRPC broker stream ID. There is no default listener -// on the client, as it is a client for the gRPC broker's control services. (See -// GRPCServerMuxer for more details). -type GRPCClientMuxer struct { - logger hclog.Logger - session *yamux.Session - - acceptMutex sync.Mutex - acceptListeners map[uint32]*blockedClientListener -} - -func NewGRPCClientMuxer(logger hclog.Logger, addr net.Addr) (*GRPCClientMuxer, error) { - // Eagerly establish the underlying connection as early as possible. - logger.Debug("making new client mux initial connection", "addr", addr) - conn, err := net.Dial(addr.Network(), addr.String()) - if err != nil { - return nil, err - } - if tcpConn, ok := conn.(*net.TCPConn); ok { - // Make sure to set keep alive so that the connection doesn't die - _ = tcpConn.SetKeepAlive(true) - } - - cfg := yamux.DefaultConfig() - cfg.Logger = logger.Named("yamux").StandardLogger(&hclog.StandardLoggerOptions{ - InferLevels: true, - }) - cfg.LogOutput = nil - sess, err := yamux.Client(conn, cfg) - if err != nil { - return nil, err - } - - logger.Debug("client muxer connected", "addr", addr) - m := &GRPCClientMuxer{ - logger: logger, - session: sess, - acceptListeners: make(map[uint32]*blockedClientListener), - } - - return m, nil -} - -func (m *GRPCClientMuxer) Enabled() bool { - return m != nil -} - -func (m *GRPCClientMuxer) Listener(id uint32, doneCh <-chan struct{}) (net.Listener, error) { - ln := newBlockedClientListener(m.session, doneCh) - - m.acceptMutex.Lock() - m.acceptListeners[id] = ln - m.acceptMutex.Unlock() - - return ln, nil -} - -func (m *GRPCClientMuxer) AcceptKnock(id uint32) error { - m.acceptMutex.Lock() - defer m.acceptMutex.Unlock() - - ln, ok := m.acceptListeners[id] - if !ok { - return fmt.Errorf("no listener for id %d", id) - } - ln.unblock() - return nil -} - -func (m *GRPCClientMuxer) Dial() (net.Conn, error) { - stream, err := m.session.Open() - if err != nil { - return nil, fmt.Errorf("error dialling new client stream: %w", err) - } - - return stream, nil -} - -func (m *GRPCClientMuxer) Close() error { - return m.session.Close() -} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_muxer.go b/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_muxer.go deleted file mode 100644 index c52aaf553e9..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_muxer.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package grpcmux - -import ( - "net" -) - -// GRPCMuxer enables multiple implementations of net.Listener to accept -// connections over a single "main" multiplexed net.Conn, and dial multiple -// client connections over the same multiplexed net.Conn. -// -// The first multiplexed connection is used to serve the gRPC broker's own -// control services: plugin.GRPCBroker, plugin.GRPCController, plugin.GRPCStdio. -// -// Clients must "knock" before dialling, to tell the server side that the -// next net.Conn should be accepted onto a specific stream ID. The knock is a -// bidirectional streaming message on the plugin.GRPCBroker service. -type GRPCMuxer interface { - // Enabled determines whether multiplexing should be used. It saves users - // of the interface from having to compare an interface with nil, which - // is a bit awkward to do correctly. - Enabled() bool - - // Listener returns a multiplexed listener that will wait until AcceptKnock - // is called with a matching ID before its Accept function returns. - Listener(id uint32, doneCh <-chan struct{}) (net.Listener, error) - - // AcceptKnock unblocks the listener with the matching ID, and returns an - // error if it hasn't been created yet. - AcceptKnock(id uint32) error - - // Dial makes a new multiplexed client connection. To dial a specific ID, - // a knock must be sent first. - Dial() (net.Conn, error) - - // Close closes connections and releases any resources associated with the - // muxer. - Close() error -} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_server_muxer.go b/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_server_muxer.go deleted file mode 100644 index 27696ee769d..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_server_muxer.go +++ /dev/null @@ -1,190 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package grpcmux - -import ( - "errors" - "fmt" - "net" - "sync" - "time" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/yamux" -) - -var _ GRPCMuxer = (*GRPCServerMuxer)(nil) -var _ net.Listener = (*GRPCServerMuxer)(nil) - -// GRPCServerMuxer implements the server (plugin) side of the gRPC broker's -// GRPCMuxer interface for multiplexing multiple gRPC broker connections over -// a single net.Conn. -// -// The server side needs a listener to serve the gRPC broker's control services, -// which includes the service we will receive knocks on. That means we always -// accept the first connection onto a "default" main listener, and if we accept -// any further connections without receiving a knock first, they are also given -// to the default listener. -// -// When creating additional multiplexed listeners for specific stream IDs, we -// can't control the order in which gRPC servers will call Accept() on each -// listener, but we do need to control which gRPC server accepts which connection. -// As such, each multiplexed listener blocks waiting on a channel. It will be -// unblocked when a knock is received for the matching stream ID. -type GRPCServerMuxer struct { - addr net.Addr - logger hclog.Logger - - sessionErrCh chan error - sess *yamux.Session - - knockCh chan uint32 - - acceptMutex sync.Mutex - acceptChannels map[uint32]chan acceptResult -} - -func NewGRPCServerMuxer(logger hclog.Logger, ln net.Listener) *GRPCServerMuxer { - m := &GRPCServerMuxer{ - addr: ln.Addr(), - logger: logger, - - sessionErrCh: make(chan error), - - knockCh: make(chan uint32, 1), - acceptChannels: make(map[uint32]chan acceptResult), - } - - go m.acceptSession(ln) - - return m -} - -// acceptSessionAndMuxAccept is responsible for establishing the yamux session, -// and then kicking off the acceptLoop function. -func (m *GRPCServerMuxer) acceptSession(ln net.Listener) { - defer close(m.sessionErrCh) - - m.logger.Debug("accepting initial connection", "addr", m.addr) - conn, err := ln.Accept() - if err != nil { - m.sessionErrCh <- err - return - } - - m.logger.Debug("initial server connection accepted", "addr", m.addr) - cfg := yamux.DefaultConfig() - cfg.Logger = m.logger.Named("yamux").StandardLogger(&hclog.StandardLoggerOptions{ - InferLevels: true, - }) - cfg.LogOutput = nil - m.sess, err = yamux.Server(conn, cfg) - if err != nil { - m.sessionErrCh <- err - return - } -} - -func (m *GRPCServerMuxer) session() (*yamux.Session, error) { - select { - case err := <-m.sessionErrCh: - if err != nil { - return nil, err - } - case <-time.After(5 * time.Second): - return nil, errors.New("timed out waiting for connection to be established") - } - - // Should never happen. - if m.sess == nil { - return nil, errors.New("no connection established and no error received") - } - - return m.sess, nil -} - -// Accept accepts all incoming connections and routes them to the correct -// stream ID based on the most recent knock received. -func (m *GRPCServerMuxer) Accept() (net.Conn, error) { - session, err := m.session() - if err != nil { - return nil, fmt.Errorf("error establishing yamux session: %w", err) - } - - for { - conn, acceptErr := session.Accept() - - select { - case id := <-m.knockCh: - m.acceptMutex.Lock() - acceptCh, ok := m.acceptChannels[id] - m.acceptMutex.Unlock() - - if !ok { - if conn != nil { - _ = conn.Close() - } - return nil, fmt.Errorf("received knock on ID %d that doesn't have a listener", id) - } - m.logger.Debug("sending conn to brokered listener", "id", id) - acceptCh <- acceptResult{ - conn: conn, - err: acceptErr, - } - default: - m.logger.Debug("sending conn to default listener") - return conn, acceptErr - } - } -} - -func (m *GRPCServerMuxer) Addr() net.Addr { - return m.addr -} - -func (m *GRPCServerMuxer) Close() error { - session, err := m.session() - if err != nil { - return err - } - - return session.Close() -} - -func (m *GRPCServerMuxer) Enabled() bool { - return m != nil -} - -func (m *GRPCServerMuxer) Listener(id uint32, doneCh <-chan struct{}) (net.Listener, error) { - sess, err := m.session() - if err != nil { - return nil, err - } - - ln := newBlockedServerListener(sess.Addr(), doneCh) - m.acceptMutex.Lock() - m.acceptChannels[id] = ln.acceptCh - m.acceptMutex.Unlock() - - return ln, nil -} - -func (m *GRPCServerMuxer) Dial() (net.Conn, error) { - sess, err := m.session() - if err != nil { - return nil, err - } - - stream, err := sess.OpenStream() - if err != nil { - return nil, fmt.Errorf("error dialling new server stream: %w", err) - } - - return stream, nil -} - -func (m *GRPCServerMuxer) AcceptKnock(id uint32) error { - m.knockCh <- id - return nil -} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go deleted file mode 100644 index acc6dc9c77f..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.31.0 -// protoc (unknown) -// source: internal/plugin/grpc_broker.proto - -package plugin - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type ConnInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ServiceId uint32 `protobuf:"varint,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` - Network string `protobuf:"bytes,2,opt,name=network,proto3" json:"network,omitempty"` - Address string `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"` - Knock *ConnInfo_Knock `protobuf:"bytes,4,opt,name=knock,proto3" json:"knock,omitempty"` -} - -func (x *ConnInfo) Reset() { - *x = ConnInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_plugin_grpc_broker_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ConnInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ConnInfo) ProtoMessage() {} - -func (x *ConnInfo) ProtoReflect() protoreflect.Message { - mi := &file_internal_plugin_grpc_broker_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ConnInfo.ProtoReflect.Descriptor instead. -func (*ConnInfo) Descriptor() ([]byte, []int) { - return file_internal_plugin_grpc_broker_proto_rawDescGZIP(), []int{0} -} - -func (x *ConnInfo) GetServiceId() uint32 { - if x != nil { - return x.ServiceId - } - return 0 -} - -func (x *ConnInfo) GetNetwork() string { - if x != nil { - return x.Network - } - return "" -} - -func (x *ConnInfo) GetAddress() string { - if x != nil { - return x.Address - } - return "" -} - -func (x *ConnInfo) GetKnock() *ConnInfo_Knock { - if x != nil { - return x.Knock - } - return nil -} - -type ConnInfo_Knock struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Knock bool `protobuf:"varint,1,opt,name=knock,proto3" json:"knock,omitempty"` - Ack bool `protobuf:"varint,2,opt,name=ack,proto3" json:"ack,omitempty"` - Error string `protobuf:"bytes,3,opt,name=error,proto3" json:"error,omitempty"` -} - -func (x *ConnInfo_Knock) Reset() { - *x = ConnInfo_Knock{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_plugin_grpc_broker_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ConnInfo_Knock) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ConnInfo_Knock) ProtoMessage() {} - -func (x *ConnInfo_Knock) ProtoReflect() protoreflect.Message { - mi := &file_internal_plugin_grpc_broker_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ConnInfo_Knock.ProtoReflect.Descriptor instead. -func (*ConnInfo_Knock) Descriptor() ([]byte, []int) { - return file_internal_plugin_grpc_broker_proto_rawDescGZIP(), []int{0, 0} -} - -func (x *ConnInfo_Knock) GetKnock() bool { - if x != nil { - return x.Knock - } - return false -} - -func (x *ConnInfo_Knock) GetAck() bool { - if x != nil { - return x.Ack - } - return false -} - -func (x *ConnInfo_Knock) GetError() string { - if x != nil { - return x.Error - } - return "" -} - -var File_internal_plugin_grpc_broker_proto protoreflect.FileDescriptor - -var file_internal_plugin_grpc_broker_proto_rawDesc = []byte{ - 0x0a, 0x21, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x62, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x22, 0xd2, 0x01, 0x0a, 0x08, - 0x43, 0x6f, 0x6e, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, - 0x72, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, - 0x6b, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x2c, 0x0a, 0x05, 0x6b, - 0x6e, 0x6f, 0x63, 0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4b, 0x6e, 0x6f, - 0x63, 0x6b, 0x52, 0x05, 0x6b, 0x6e, 0x6f, 0x63, 0x6b, 0x1a, 0x45, 0x0a, 0x05, 0x4b, 0x6e, 0x6f, - 0x63, 0x6b, 0x12, 0x14, 0x0a, 0x05, 0x6b, 0x6e, 0x6f, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x05, 0x6b, 0x6e, 0x6f, 0x63, 0x6b, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x63, 0x6b, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x61, 0x63, 0x6b, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, - 0x32, 0x43, 0x0a, 0x0a, 0x47, 0x52, 0x50, 0x43, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x12, 0x35, - 0x0a, 0x0b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x10, 0x2e, - 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x1a, - 0x10, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x49, 0x6e, 0x66, - 0x6f, 0x28, 0x01, 0x30, 0x01, 0x42, 0x0a, 0x5a, 0x08, 0x2e, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_internal_plugin_grpc_broker_proto_rawDescOnce sync.Once - file_internal_plugin_grpc_broker_proto_rawDescData = file_internal_plugin_grpc_broker_proto_rawDesc -) - -func file_internal_plugin_grpc_broker_proto_rawDescGZIP() []byte { - file_internal_plugin_grpc_broker_proto_rawDescOnce.Do(func() { - file_internal_plugin_grpc_broker_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_plugin_grpc_broker_proto_rawDescData) - }) - return file_internal_plugin_grpc_broker_proto_rawDescData -} - -var file_internal_plugin_grpc_broker_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_internal_plugin_grpc_broker_proto_goTypes = []interface{}{ - (*ConnInfo)(nil), // 0: plugin.ConnInfo - (*ConnInfo_Knock)(nil), // 1: plugin.ConnInfo.Knock -} -var file_internal_plugin_grpc_broker_proto_depIdxs = []int32{ - 1, // 0: plugin.ConnInfo.knock:type_name -> plugin.ConnInfo.Knock - 0, // 1: plugin.GRPCBroker.StartStream:input_type -> plugin.ConnInfo - 0, // 2: plugin.GRPCBroker.StartStream:output_type -> plugin.ConnInfo - 2, // [2:3] is the sub-list for method output_type - 1, // [1:2] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name -} - -func init() { file_internal_plugin_grpc_broker_proto_init() } -func file_internal_plugin_grpc_broker_proto_init() { - if File_internal_plugin_grpc_broker_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_internal_plugin_grpc_broker_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ConnInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_plugin_grpc_broker_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ConnInfo_Knock); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_internal_plugin_grpc_broker_proto_rawDesc, - NumEnums: 0, - NumMessages: 2, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_internal_plugin_grpc_broker_proto_goTypes, - DependencyIndexes: file_internal_plugin_grpc_broker_proto_depIdxs, - MessageInfos: file_internal_plugin_grpc_broker_proto_msgTypes, - }.Build() - File_internal_plugin_grpc_broker_proto = out.File - file_internal_plugin_grpc_broker_proto_rawDesc = nil - file_internal_plugin_grpc_broker_proto_goTypes = nil - file_internal_plugin_grpc_broker_proto_depIdxs = nil -} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto deleted file mode 100644 index c92cd645cb6..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -syntax = "proto3"; -package plugin; -option go_package = "./plugin"; - -message ConnInfo { - uint32 service_id = 1; - string network = 2; - string address = 3; - message Knock { - bool knock = 1; - bool ack = 2; - string error = 3; - } - Knock knock = 4; -} - -service GRPCBroker { - rpc StartStream(stream ConnInfo) returns (stream ConnInfo); -} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker_grpc.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker_grpc.pb.go deleted file mode 100644 index 1b0f80705d8..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker_grpc.pb.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.3.0 -// - protoc (unknown) -// source: internal/plugin/grpc_broker.proto - -package plugin - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -const ( - GRPCBroker_StartStream_FullMethodName = "/plugin.GRPCBroker/StartStream" -) - -// GRPCBrokerClient is the client API for GRPCBroker service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type GRPCBrokerClient interface { - StartStream(ctx context.Context, opts ...grpc.CallOption) (GRPCBroker_StartStreamClient, error) -} - -type gRPCBrokerClient struct { - cc grpc.ClientConnInterface -} - -func NewGRPCBrokerClient(cc grpc.ClientConnInterface) GRPCBrokerClient { - return &gRPCBrokerClient{cc} -} - -func (c *gRPCBrokerClient) StartStream(ctx context.Context, opts ...grpc.CallOption) (GRPCBroker_StartStreamClient, error) { - stream, err := c.cc.NewStream(ctx, &GRPCBroker_ServiceDesc.Streams[0], GRPCBroker_StartStream_FullMethodName, opts...) - if err != nil { - return nil, err - } - x := &gRPCBrokerStartStreamClient{stream} - return x, nil -} - -type GRPCBroker_StartStreamClient interface { - Send(*ConnInfo) error - Recv() (*ConnInfo, error) - grpc.ClientStream -} - -type gRPCBrokerStartStreamClient struct { - grpc.ClientStream -} - -func (x *gRPCBrokerStartStreamClient) Send(m *ConnInfo) error { - return x.ClientStream.SendMsg(m) -} - -func (x *gRPCBrokerStartStreamClient) Recv() (*ConnInfo, error) { - m := new(ConnInfo) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// GRPCBrokerServer is the server API for GRPCBroker service. -// All implementations should embed UnimplementedGRPCBrokerServer -// for forward compatibility -type GRPCBrokerServer interface { - StartStream(GRPCBroker_StartStreamServer) error -} - -// UnimplementedGRPCBrokerServer should be embedded to have forward compatible implementations. -type UnimplementedGRPCBrokerServer struct { -} - -func (UnimplementedGRPCBrokerServer) StartStream(GRPCBroker_StartStreamServer) error { - return status.Errorf(codes.Unimplemented, "method StartStream not implemented") -} - -// UnsafeGRPCBrokerServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to GRPCBrokerServer will -// result in compilation errors. -type UnsafeGRPCBrokerServer interface { - mustEmbedUnimplementedGRPCBrokerServer() -} - -func RegisterGRPCBrokerServer(s grpc.ServiceRegistrar, srv GRPCBrokerServer) { - s.RegisterService(&GRPCBroker_ServiceDesc, srv) -} - -func _GRPCBroker_StartStream_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(GRPCBrokerServer).StartStream(&gRPCBrokerStartStreamServer{stream}) -} - -type GRPCBroker_StartStreamServer interface { - Send(*ConnInfo) error - Recv() (*ConnInfo, error) - grpc.ServerStream -} - -type gRPCBrokerStartStreamServer struct { - grpc.ServerStream -} - -func (x *gRPCBrokerStartStreamServer) Send(m *ConnInfo) error { - return x.ServerStream.SendMsg(m) -} - -func (x *gRPCBrokerStartStreamServer) Recv() (*ConnInfo, error) { - m := new(ConnInfo) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// GRPCBroker_ServiceDesc is the grpc.ServiceDesc for GRPCBroker service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var GRPCBroker_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "plugin.GRPCBroker", - HandlerType: (*GRPCBrokerServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "StartStream", - Handler: _GRPCBroker_StartStream_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "internal/plugin/grpc_broker.proto", -} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go deleted file mode 100644 index 8ca48e0d92d..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.31.0 -// protoc (unknown) -// source: internal/plugin/grpc_controller.proto - -package plugin - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type Empty struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *Empty) Reset() { - *x = Empty{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_plugin_grpc_controller_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Empty) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Empty) ProtoMessage() {} - -func (x *Empty) ProtoReflect() protoreflect.Message { - mi := &file_internal_plugin_grpc_controller_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Empty.ProtoReflect.Descriptor instead. -func (*Empty) Descriptor() ([]byte, []int) { - return file_internal_plugin_grpc_controller_proto_rawDescGZIP(), []int{0} -} - -var File_internal_plugin_grpc_controller_proto protoreflect.FileDescriptor - -var file_internal_plugin_grpc_controller_proto_rawDesc = []byte{ - 0x0a, 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, - 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x22, - 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x32, 0x3a, 0x0a, 0x0e, 0x47, 0x52, 0x50, 0x43, - 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x12, 0x28, 0x0a, 0x08, 0x53, 0x68, - 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x12, 0x0d, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, - 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x0d, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x42, 0x0a, 0x5a, 0x08, 0x2e, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_internal_plugin_grpc_controller_proto_rawDescOnce sync.Once - file_internal_plugin_grpc_controller_proto_rawDescData = file_internal_plugin_grpc_controller_proto_rawDesc -) - -func file_internal_plugin_grpc_controller_proto_rawDescGZIP() []byte { - file_internal_plugin_grpc_controller_proto_rawDescOnce.Do(func() { - file_internal_plugin_grpc_controller_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_plugin_grpc_controller_proto_rawDescData) - }) - return file_internal_plugin_grpc_controller_proto_rawDescData -} - -var file_internal_plugin_grpc_controller_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_internal_plugin_grpc_controller_proto_goTypes = []interface{}{ - (*Empty)(nil), // 0: plugin.Empty -} -var file_internal_plugin_grpc_controller_proto_depIdxs = []int32{ - 0, // 0: plugin.GRPCController.Shutdown:input_type -> plugin.Empty - 0, // 1: plugin.GRPCController.Shutdown:output_type -> plugin.Empty - 1, // [1:2] is the sub-list for method output_type - 0, // [0:1] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_internal_plugin_grpc_controller_proto_init() } -func file_internal_plugin_grpc_controller_proto_init() { - if File_internal_plugin_grpc_controller_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_internal_plugin_grpc_controller_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Empty); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_internal_plugin_grpc_controller_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_internal_plugin_grpc_controller_proto_goTypes, - DependencyIndexes: file_internal_plugin_grpc_controller_proto_depIdxs, - MessageInfos: file_internal_plugin_grpc_controller_proto_msgTypes, - }.Build() - File_internal_plugin_grpc_controller_proto = out.File - file_internal_plugin_grpc_controller_proto_rawDesc = nil - file_internal_plugin_grpc_controller_proto_goTypes = nil - file_internal_plugin_grpc_controller_proto_depIdxs = nil -} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto deleted file mode 100644 index 2755fa638b5..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -syntax = "proto3"; -package plugin; -option go_package = "./plugin"; - -message Empty { -} - -// The GRPCController is responsible for telling the plugin server to shutdown. -service GRPCController { - rpc Shutdown(Empty) returns (Empty); -} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller_grpc.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller_grpc.pb.go deleted file mode 100644 index 427611aa00f..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller_grpc.pb.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.3.0 -// - protoc (unknown) -// source: internal/plugin/grpc_controller.proto - -package plugin - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -const ( - GRPCController_Shutdown_FullMethodName = "/plugin.GRPCController/Shutdown" -) - -// GRPCControllerClient is the client API for GRPCController service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type GRPCControllerClient interface { - Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) -} - -type gRPCControllerClient struct { - cc grpc.ClientConnInterface -} - -func NewGRPCControllerClient(cc grpc.ClientConnInterface) GRPCControllerClient { - return &gRPCControllerClient{cc} -} - -func (c *gRPCControllerClient) Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { - out := new(Empty) - err := c.cc.Invoke(ctx, GRPCController_Shutdown_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// GRPCControllerServer is the server API for GRPCController service. -// All implementations should embed UnimplementedGRPCControllerServer -// for forward compatibility -type GRPCControllerServer interface { - Shutdown(context.Context, *Empty) (*Empty, error) -} - -// UnimplementedGRPCControllerServer should be embedded to have forward compatible implementations. -type UnimplementedGRPCControllerServer struct { -} - -func (UnimplementedGRPCControllerServer) Shutdown(context.Context, *Empty) (*Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Shutdown not implemented") -} - -// UnsafeGRPCControllerServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to GRPCControllerServer will -// result in compilation errors. -type UnsafeGRPCControllerServer interface { - mustEmbedUnimplementedGRPCControllerServer() -} - -func RegisterGRPCControllerServer(s grpc.ServiceRegistrar, srv GRPCControllerServer) { - s.RegisterService(&GRPCController_ServiceDesc, srv) -} - -func _GRPCController_Shutdown_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(GRPCControllerServer).Shutdown(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: GRPCController_Shutdown_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(GRPCControllerServer).Shutdown(ctx, req.(*Empty)) - } - return interceptor(ctx, in, info, handler) -} - -// GRPCController_ServiceDesc is the grpc.ServiceDesc for GRPCController service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var GRPCController_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "plugin.GRPCController", - HandlerType: (*GRPCControllerServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Shutdown", - Handler: _GRPCController_Shutdown_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "internal/plugin/grpc_controller.proto", -} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go deleted file mode 100644 index 139cbb4a90b..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go +++ /dev/null @@ -1,225 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.31.0 -// protoc (unknown) -// source: internal/plugin/grpc_stdio.proto - -package plugin - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - emptypb "google.golang.org/protobuf/types/known/emptypb" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type StdioData_Channel int32 - -const ( - StdioData_INVALID StdioData_Channel = 0 - StdioData_STDOUT StdioData_Channel = 1 - StdioData_STDERR StdioData_Channel = 2 -) - -// Enum value maps for StdioData_Channel. -var ( - StdioData_Channel_name = map[int32]string{ - 0: "INVALID", - 1: "STDOUT", - 2: "STDERR", - } - StdioData_Channel_value = map[string]int32{ - "INVALID": 0, - "STDOUT": 1, - "STDERR": 2, - } -) - -func (x StdioData_Channel) Enum() *StdioData_Channel { - p := new(StdioData_Channel) - *p = x - return p -} - -func (x StdioData_Channel) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (StdioData_Channel) Descriptor() protoreflect.EnumDescriptor { - return file_internal_plugin_grpc_stdio_proto_enumTypes[0].Descriptor() -} - -func (StdioData_Channel) Type() protoreflect.EnumType { - return &file_internal_plugin_grpc_stdio_proto_enumTypes[0] -} - -func (x StdioData_Channel) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use StdioData_Channel.Descriptor instead. -func (StdioData_Channel) EnumDescriptor() ([]byte, []int) { - return file_internal_plugin_grpc_stdio_proto_rawDescGZIP(), []int{0, 0} -} - -// StdioData is a single chunk of stdout or stderr data that is streamed -// from GRPCStdio. -type StdioData struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Channel StdioData_Channel `protobuf:"varint,1,opt,name=channel,proto3,enum=plugin.StdioData_Channel" json:"channel,omitempty"` - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` -} - -func (x *StdioData) Reset() { - *x = StdioData{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_plugin_grpc_stdio_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StdioData) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StdioData) ProtoMessage() {} - -func (x *StdioData) ProtoReflect() protoreflect.Message { - mi := &file_internal_plugin_grpc_stdio_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StdioData.ProtoReflect.Descriptor instead. -func (*StdioData) Descriptor() ([]byte, []int) { - return file_internal_plugin_grpc_stdio_proto_rawDescGZIP(), []int{0} -} - -func (x *StdioData) GetChannel() StdioData_Channel { - if x != nil { - return x.Channel - } - return StdioData_INVALID -} - -func (x *StdioData) GetData() []byte { - if x != nil { - return x.Data - } - return nil -} - -var File_internal_plugin_grpc_stdio_proto protoreflect.FileDescriptor - -var file_internal_plugin_grpc_stdio_proto_rawDesc = []byte{ - 0x0a, 0x20, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x74, 0x64, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x12, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, - 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x84, 0x01, 0x0a, 0x09, 0x53, 0x74, 0x64, 0x69, - 0x6f, 0x44, 0x61, 0x74, 0x61, 0x12, 0x33, 0x0a, 0x07, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, - 0x53, 0x74, 0x64, 0x69, 0x6f, 0x44, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, - 0x6c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, - 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x2e, - 0x0a, 0x07, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x56, - 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x44, 0x4f, 0x55, 0x54, - 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x44, 0x45, 0x52, 0x52, 0x10, 0x02, 0x32, 0x47, - 0x0a, 0x09, 0x47, 0x52, 0x50, 0x43, 0x53, 0x74, 0x64, 0x69, 0x6f, 0x12, 0x3a, 0x0a, 0x0b, 0x53, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x53, 0x74, 0x64, 0x69, 0x6f, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x1a, 0x11, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x53, 0x74, 0x64, 0x69, - 0x6f, 0x44, 0x61, 0x74, 0x61, 0x30, 0x01, 0x42, 0x0a, 0x5a, 0x08, 0x2e, 0x2f, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_internal_plugin_grpc_stdio_proto_rawDescOnce sync.Once - file_internal_plugin_grpc_stdio_proto_rawDescData = file_internal_plugin_grpc_stdio_proto_rawDesc -) - -func file_internal_plugin_grpc_stdio_proto_rawDescGZIP() []byte { - file_internal_plugin_grpc_stdio_proto_rawDescOnce.Do(func() { - file_internal_plugin_grpc_stdio_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_plugin_grpc_stdio_proto_rawDescData) - }) - return file_internal_plugin_grpc_stdio_proto_rawDescData -} - -var file_internal_plugin_grpc_stdio_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_internal_plugin_grpc_stdio_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_internal_plugin_grpc_stdio_proto_goTypes = []interface{}{ - (StdioData_Channel)(0), // 0: plugin.StdioData.Channel - (*StdioData)(nil), // 1: plugin.StdioData - (*emptypb.Empty)(nil), // 2: google.protobuf.Empty -} -var file_internal_plugin_grpc_stdio_proto_depIdxs = []int32{ - 0, // 0: plugin.StdioData.channel:type_name -> plugin.StdioData.Channel - 2, // 1: plugin.GRPCStdio.StreamStdio:input_type -> google.protobuf.Empty - 1, // 2: plugin.GRPCStdio.StreamStdio:output_type -> plugin.StdioData - 2, // [2:3] is the sub-list for method output_type - 1, // [1:2] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name -} - -func init() { file_internal_plugin_grpc_stdio_proto_init() } -func file_internal_plugin_grpc_stdio_proto_init() { - if File_internal_plugin_grpc_stdio_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_internal_plugin_grpc_stdio_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StdioData); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_internal_plugin_grpc_stdio_proto_rawDesc, - NumEnums: 1, - NumMessages: 1, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_internal_plugin_grpc_stdio_proto_goTypes, - DependencyIndexes: file_internal_plugin_grpc_stdio_proto_depIdxs, - EnumInfos: file_internal_plugin_grpc_stdio_proto_enumTypes, - MessageInfos: file_internal_plugin_grpc_stdio_proto_msgTypes, - }.Build() - File_internal_plugin_grpc_stdio_proto = out.File - file_internal_plugin_grpc_stdio_proto_rawDesc = nil - file_internal_plugin_grpc_stdio_proto_goTypes = nil - file_internal_plugin_grpc_stdio_proto_depIdxs = nil -} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto deleted file mode 100644 index f48ac76c978..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -syntax = "proto3"; -package plugin; -option go_package = "./plugin"; - -import "google/protobuf/empty.proto"; - -// GRPCStdio is a service that is automatically run by the plugin process -// to stream any stdout/err data so that it can be mirrored on the plugin -// host side. -service GRPCStdio { - // StreamStdio returns a stream that contains all the stdout/stderr. - // This RPC endpoint must only be called ONCE. Once stdio data is consumed - // it is not sent again. - // - // Callers should connect early to prevent blocking on the plugin process. - rpc StreamStdio(google.protobuf.Empty) returns (stream StdioData); -} - -// StdioData is a single chunk of stdout or stderr data that is streamed -// from GRPCStdio. -message StdioData { - enum Channel { - INVALID = 0; - STDOUT = 1; - STDERR = 2; - } - - Channel channel = 1; - bytes data = 2; -} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio_grpc.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio_grpc.pb.go deleted file mode 100644 index f82b1503502..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio_grpc.pb.go +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.3.0 -// - protoc (unknown) -// source: internal/plugin/grpc_stdio.proto - -package plugin - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - emptypb "google.golang.org/protobuf/types/known/emptypb" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -const ( - GRPCStdio_StreamStdio_FullMethodName = "/plugin.GRPCStdio/StreamStdio" -) - -// GRPCStdioClient is the client API for GRPCStdio service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type GRPCStdioClient interface { - // StreamStdio returns a stream that contains all the stdout/stderr. - // This RPC endpoint must only be called ONCE. Once stdio data is consumed - // it is not sent again. - // - // Callers should connect early to prevent blocking on the plugin process. - StreamStdio(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (GRPCStdio_StreamStdioClient, error) -} - -type gRPCStdioClient struct { - cc grpc.ClientConnInterface -} - -func NewGRPCStdioClient(cc grpc.ClientConnInterface) GRPCStdioClient { - return &gRPCStdioClient{cc} -} - -func (c *gRPCStdioClient) StreamStdio(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (GRPCStdio_StreamStdioClient, error) { - stream, err := c.cc.NewStream(ctx, &GRPCStdio_ServiceDesc.Streams[0], GRPCStdio_StreamStdio_FullMethodName, opts...) - if err != nil { - return nil, err - } - x := &gRPCStdioStreamStdioClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type GRPCStdio_StreamStdioClient interface { - Recv() (*StdioData, error) - grpc.ClientStream -} - -type gRPCStdioStreamStdioClient struct { - grpc.ClientStream -} - -func (x *gRPCStdioStreamStdioClient) Recv() (*StdioData, error) { - m := new(StdioData) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// GRPCStdioServer is the server API for GRPCStdio service. -// All implementations should embed UnimplementedGRPCStdioServer -// for forward compatibility -type GRPCStdioServer interface { - // StreamStdio returns a stream that contains all the stdout/stderr. - // This RPC endpoint must only be called ONCE. Once stdio data is consumed - // it is not sent again. - // - // Callers should connect early to prevent blocking on the plugin process. - StreamStdio(*emptypb.Empty, GRPCStdio_StreamStdioServer) error -} - -// UnimplementedGRPCStdioServer should be embedded to have forward compatible implementations. -type UnimplementedGRPCStdioServer struct { -} - -func (UnimplementedGRPCStdioServer) StreamStdio(*emptypb.Empty, GRPCStdio_StreamStdioServer) error { - return status.Errorf(codes.Unimplemented, "method StreamStdio not implemented") -} - -// UnsafeGRPCStdioServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to GRPCStdioServer will -// result in compilation errors. -type UnsafeGRPCStdioServer interface { - mustEmbedUnimplementedGRPCStdioServer() -} - -func RegisterGRPCStdioServer(s grpc.ServiceRegistrar, srv GRPCStdioServer) { - s.RegisterService(&GRPCStdio_ServiceDesc, srv) -} - -func _GRPCStdio_StreamStdio_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(emptypb.Empty) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(GRPCStdioServer).StreamStdio(m, &gRPCStdioStreamStdioServer{stream}) -} - -type GRPCStdio_StreamStdioServer interface { - Send(*StdioData) error - grpc.ServerStream -} - -type gRPCStdioStreamStdioServer struct { - grpc.ServerStream -} - -func (x *gRPCStdioStreamStdioServer) Send(m *StdioData) error { - return x.ServerStream.SendMsg(m) -} - -// GRPCStdio_ServiceDesc is the grpc.ServiceDesc for GRPCStdio service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var GRPCStdio_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "plugin.GRPCStdio", - HandlerType: (*GRPCStdioServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "StreamStdio", - Handler: _GRPCStdio_StreamStdio_Handler, - ServerStreams: true, - }, - }, - Metadata: "internal/plugin/grpc_stdio.proto", -} diff --git a/vendor/github.com/hashicorp/go-plugin/log_entry.go b/vendor/github.com/hashicorp/go-plugin/log_entry.go deleted file mode 100644 index ab963d56b54..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/log_entry.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package plugin - -import ( - "encoding/json" - "time" -) - -// logEntry is the JSON payload that gets sent to Stderr from the plugin to the host -type logEntry struct { - Message string `json:"@message"` - Level string `json:"@level"` - Timestamp time.Time `json:"timestamp"` - KVPairs []*logEntryKV `json:"kv_pairs"` -} - -// logEntryKV is a key value pair within the Output payload -type logEntryKV struct { - Key string `json:"key"` - Value interface{} `json:"value"` -} - -// flattenKVPairs is used to flatten KVPair slice into []interface{} -// for hclog consumption. -func flattenKVPairs(kvs []*logEntryKV) []interface{} { - var result []interface{} - for _, kv := range kvs { - result = append(result, kv.Key) - result = append(result, kv.Value) - } - - return result -} - -// parseJSON handles parsing JSON output -func parseJSON(input []byte) (*logEntry, error) { - var raw map[string]interface{} - entry := &logEntry{} - - err := json.Unmarshal(input, &raw) - if err != nil { - return nil, err - } - - // Parse hclog-specific objects - if v, ok := raw["@message"]; ok { - entry.Message = v.(string) - delete(raw, "@message") - } - - if v, ok := raw["@level"]; ok { - entry.Level = v.(string) - delete(raw, "@level") - } - - if v, ok := raw["@timestamp"]; ok { - t, err := time.Parse("2006-01-02T15:04:05.000000Z07:00", v.(string)) - if err != nil { - return nil, err - } - entry.Timestamp = t - delete(raw, "@timestamp") - } - - // Parse dynamic KV args from the hclog payload. - for k, v := range raw { - entry.KVPairs = append(entry.KVPairs, &logEntryKV{ - Key: k, - Value: v, - }) - } - - return entry, nil -} diff --git a/vendor/github.com/hashicorp/go-plugin/mtls.go b/vendor/github.com/hashicorp/go-plugin/mtls.go deleted file mode 100644 index 09ecafaf45a..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/mtls.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package plugin - -import ( - "bytes" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "math/big" - "time" -) - -// generateCert generates a temporary certificate for plugin authentication. The -// certificate and private key are returns in PEM format. -func generateCert() (cert []byte, privateKey []byte, err error) { - key, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) - if err != nil { - return nil, nil, err - } - - serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) - sn, err := rand.Int(rand.Reader, serialNumberLimit) - if err != nil { - return nil, nil, err - } - - host := "localhost" - - template := &x509.Certificate{ - Subject: pkix.Name{ - CommonName: host, - Organization: []string{"HashiCorp"}, - }, - DNSNames: []string{host}, - ExtKeyUsage: []x509.ExtKeyUsage{ - x509.ExtKeyUsageClientAuth, - x509.ExtKeyUsageServerAuth, - }, - KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement | x509.KeyUsageCertSign, - BasicConstraintsValid: true, - SerialNumber: sn, - NotBefore: time.Now().Add(-30 * time.Second), - NotAfter: time.Now().Add(262980 * time.Hour), - IsCA: true, - } - - der, err := x509.CreateCertificate(rand.Reader, template, template, key.Public(), key) - if err != nil { - return nil, nil, err - } - - var certOut bytes.Buffer - if err := pem.Encode(&certOut, &pem.Block{Type: "CERTIFICATE", Bytes: der}); err != nil { - return nil, nil, err - } - - keyBytes, err := x509.MarshalECPrivateKey(key) - if err != nil { - return nil, nil, err - } - - var keyOut bytes.Buffer - if err := pem.Encode(&keyOut, &pem.Block{Type: "EC PRIVATE KEY", Bytes: keyBytes}); err != nil { - return nil, nil, err - } - - cert = certOut.Bytes() - privateKey = keyOut.Bytes() - - return cert, privateKey, nil -} diff --git a/vendor/github.com/hashicorp/go-plugin/mux_broker.go b/vendor/github.com/hashicorp/go-plugin/mux_broker.go deleted file mode 100644 index 4eb1208fbb7..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/mux_broker.go +++ /dev/null @@ -1,207 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package plugin - -import ( - "encoding/binary" - "fmt" - "log" - "net" - "sync" - "sync/atomic" - "time" - - "github.com/hashicorp/yamux" -) - -// MuxBroker is responsible for brokering multiplexed connections by unique ID. -// -// It is used by plugins to multiplex multiple RPC connections and data -// streams on top of a single connection between the plugin process and the -// host process. -// -// This allows a plugin to request a channel with a specific ID to connect to -// or accept a connection from, and the broker handles the details of -// holding these channels open while they're being negotiated. -// -// The Plugin interface has access to these for both Server and Client. -// The broker can be used by either (optionally) to reserve and connect to -// new multiplexed streams. This is useful for complex args and return values, -// or anything else you might need a data stream for. -type MuxBroker struct { - nextId uint32 - session *yamux.Session - streams map[uint32]*muxBrokerPending - - sync.Mutex -} - -type muxBrokerPending struct { - ch chan net.Conn - doneCh chan struct{} -} - -func newMuxBroker(s *yamux.Session) *MuxBroker { - return &MuxBroker{ - session: s, - streams: make(map[uint32]*muxBrokerPending), - } -} - -// Accept accepts a connection by ID. -// -// This should not be called multiple times with the same ID at one time. -func (m *MuxBroker) Accept(id uint32) (net.Conn, error) { - var c net.Conn - p := m.getStream(id) - select { - case c = <-p.ch: - close(p.doneCh) - case <-time.After(5 * time.Second): - m.Lock() - defer m.Unlock() - delete(m.streams, id) - - return nil, fmt.Errorf("timeout waiting for accept") - } - - // Ack our connection - if err := binary.Write(c, binary.LittleEndian, id); err != nil { - c.Close() - return nil, err - } - - return c, nil -} - -// AcceptAndServe is used to accept a specific stream ID and immediately -// serve an RPC server on that stream ID. This is used to easily serve -// complex arguments. -// -// The served interface is always registered to the "Plugin" name. -func (m *MuxBroker) AcceptAndServe(id uint32, v interface{}) { - conn, err := m.Accept(id) - if err != nil { - log.Printf("[ERR] plugin: plugin acceptAndServe error: %s", err) - return - } - - serve(conn, "Plugin", v) -} - -// Close closes the connection and all sub-connections. -func (m *MuxBroker) Close() error { - return m.session.Close() -} - -// Dial opens a connection by ID. -func (m *MuxBroker) Dial(id uint32) (net.Conn, error) { - // Open the stream - stream, err := m.session.OpenStream() - if err != nil { - return nil, err - } - - // Write the stream ID onto the wire. - if err := binary.Write(stream, binary.LittleEndian, id); err != nil { - stream.Close() - return nil, err - } - - // Read the ack that we connected. Then we're off! - var ack uint32 - if err := binary.Read(stream, binary.LittleEndian, &ack); err != nil { - stream.Close() - return nil, err - } - if ack != id { - stream.Close() - return nil, fmt.Errorf("bad ack: %d (expected %d)", ack, id) - } - - return stream, nil -} - -// NextId returns a unique ID to use next. -// -// It is possible for very long-running plugin hosts to wrap this value, -// though it would require a very large amount of RPC calls. In practice -// we've never seen it happen. -func (m *MuxBroker) NextId() uint32 { - return atomic.AddUint32(&m.nextId, 1) -} - -// Run starts the brokering and should be executed in a goroutine, since it -// blocks forever, or until the session closes. -// -// Uses of MuxBroker never need to call this. It is called internally by -// the plugin host/client. -func (m *MuxBroker) Run() { - for { - stream, err := m.session.AcceptStream() - if err != nil { - // Once we receive an error, just exit - break - } - - // Read the stream ID from the stream - var id uint32 - if err := binary.Read(stream, binary.LittleEndian, &id); err != nil { - stream.Close() - continue - } - - // Initialize the waiter - p := m.getStream(id) - select { - case p.ch <- stream: - default: - } - - // Wait for a timeout - go m.timeoutWait(id, p) - } -} - -func (m *MuxBroker) getStream(id uint32) *muxBrokerPending { - m.Lock() - defer m.Unlock() - - p, ok := m.streams[id] - if ok { - return p - } - - m.streams[id] = &muxBrokerPending{ - ch: make(chan net.Conn, 1), - doneCh: make(chan struct{}), - } - return m.streams[id] -} - -func (m *MuxBroker) timeoutWait(id uint32, p *muxBrokerPending) { - // Wait for the stream to either be picked up and connected, or - // for a timeout. - timeout := false - select { - case <-p.doneCh: - case <-time.After(5 * time.Second): - timeout = true - } - - m.Lock() - defer m.Unlock() - - // Delete the stream so no one else can grab it - delete(m.streams, id) - - // If we timed out, then check if we have a channel in the buffer, - // and if so, close it. - if timeout { - select { - case s := <-p.ch: - s.Close() - } - } -} diff --git a/vendor/github.com/hashicorp/go-plugin/plugin.go b/vendor/github.com/hashicorp/go-plugin/plugin.go deleted file mode 100644 index 184749b96ef..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/plugin.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -// The plugin package exposes functions and helpers for communicating to -// plugins which are implemented as standalone binary applications. -// -// plugin.Client fully manages the lifecycle of executing the application, -// connecting to it, and returning the RPC client for dispensing plugins. -// -// plugin.Serve fully manages listeners to expose an RPC server from a binary -// that plugin.Client can connect to. -package plugin - -import ( - "context" - "errors" - "net/rpc" - - "google.golang.org/grpc" -) - -// Plugin is the interface that is implemented to serve/connect to an -// inteface implementation. -type Plugin interface { - // Server should return the RPC server compatible struct to serve - // the methods that the Client calls over net/rpc. - Server(*MuxBroker) (interface{}, error) - - // Client returns an interface implementation for the plugin you're - // serving that communicates to the server end of the plugin. - Client(*MuxBroker, *rpc.Client) (interface{}, error) -} - -// GRPCPlugin is the interface that is implemented to serve/connect to -// a plugin over gRPC. -type GRPCPlugin interface { - // GRPCServer should register this plugin for serving with the - // given GRPCServer. Unlike Plugin.Server, this is only called once - // since gRPC plugins serve singletons. - GRPCServer(*GRPCBroker, *grpc.Server) error - - // GRPCClient should return the interface implementation for the plugin - // you're serving via gRPC. The provided context will be canceled by - // go-plugin in the event of the plugin process exiting. - GRPCClient(context.Context, *GRPCBroker, *grpc.ClientConn) (interface{}, error) -} - -// NetRPCUnsupportedPlugin implements Plugin but returns errors for the -// Server and Client functions. This will effectively disable support for -// net/rpc based plugins. -// -// This struct can be embedded in your struct. -type NetRPCUnsupportedPlugin struct{} - -func (p NetRPCUnsupportedPlugin) Server(*MuxBroker) (interface{}, error) { - return nil, errors.New("net/rpc plugin protocol not supported") -} - -func (p NetRPCUnsupportedPlugin) Client(*MuxBroker, *rpc.Client) (interface{}, error) { - return nil, errors.New("net/rpc plugin protocol not supported") -} diff --git a/vendor/github.com/hashicorp/go-plugin/process.go b/vendor/github.com/hashicorp/go-plugin/process.go deleted file mode 100644 index b88446361da..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/process.go +++ /dev/null @@ -1,4 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package plugin diff --git a/vendor/github.com/hashicorp/go-plugin/protocol.go b/vendor/github.com/hashicorp/go-plugin/protocol.go deleted file mode 100644 index e4b7be38378..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/protocol.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package plugin - -import ( - "io" - "net" -) - -// Protocol is an enum representing the types of protocols. -type Protocol string - -const ( - ProtocolInvalid Protocol = "" - ProtocolNetRPC Protocol = "netrpc" - ProtocolGRPC Protocol = "grpc" -) - -// ServerProtocol is an interface that must be implemented for new plugin -// protocols to be servers. -type ServerProtocol interface { - // Init is called once to configure and initialize the protocol, but - // not start listening. This is the point at which all validation should - // be done and errors returned. - Init() error - - // Config is extra configuration to be outputted to stdout. This will - // be automatically base64 encoded to ensure it can be parsed properly. - // This can be an empty string if additional configuration is not needed. - Config() string - - // Serve is called to serve connections on the given listener. This should - // continue until the listener is closed. - Serve(net.Listener) -} - -// ClientProtocol is an interface that must be implemented for new plugin -// protocols to be clients. -type ClientProtocol interface { - io.Closer - - // Dispense dispenses a new instance of the plugin with the given name. - Dispense(string) (interface{}, error) - - // Ping checks that the client connection is still healthy. - Ping() error -} diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_client.go b/vendor/github.com/hashicorp/go-plugin/rpc_client.go deleted file mode 100644 index 142454df80d..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/rpc_client.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package plugin - -import ( - "crypto/tls" - "fmt" - "io" - "net" - "net/rpc" - - "github.com/hashicorp/yamux" -) - -// RPCClient connects to an RPCServer over net/rpc to dispense plugin types. -type RPCClient struct { - broker *MuxBroker - control *rpc.Client - plugins map[string]Plugin - - // These are the streams used for the various stdout/err overrides - stdout, stderr net.Conn -} - -// newRPCClient creates a new RPCClient. The Client argument is expected -// to be successfully started already with a lock held. -func newRPCClient(c *Client) (*RPCClient, error) { - // Connect to the client - conn, err := net.Dial(c.address.Network(), c.address.String()) - if err != nil { - return nil, err - } - if tcpConn, ok := conn.(*net.TCPConn); ok { - // Make sure to set keep alive so that the connection doesn't die - tcpConn.SetKeepAlive(true) - } - - if c.config.TLSConfig != nil { - conn = tls.Client(conn, c.config.TLSConfig) - } - - // Create the actual RPC client - result, err := NewRPCClient(conn, c.config.Plugins) - if err != nil { - conn.Close() - return nil, err - } - - // Begin the stream syncing so that stdin, out, err work properly - err = result.SyncStreams( - c.config.SyncStdout, - c.config.SyncStderr) - if err != nil { - result.Close() - return nil, err - } - - return result, nil -} - -// NewRPCClient creates a client from an already-open connection-like value. -// Dial is typically used instead. -func NewRPCClient(conn io.ReadWriteCloser, plugins map[string]Plugin) (*RPCClient, error) { - // Create the yamux client so we can multiplex - mux, err := yamux.Client(conn, nil) - if err != nil { - conn.Close() - return nil, err - } - - // Connect to the control stream. - control, err := mux.Open() - if err != nil { - mux.Close() - return nil, err - } - - // Connect stdout, stderr streams - stdstream := make([]net.Conn, 2) - for i, _ := range stdstream { - stdstream[i], err = mux.Open() - if err != nil { - mux.Close() - return nil, err - } - } - - // Create the broker and start it up - broker := newMuxBroker(mux) - go broker.Run() - - // Build the client using our broker and control channel. - return &RPCClient{ - broker: broker, - control: rpc.NewClient(control), - plugins: plugins, - stdout: stdstream[0], - stderr: stdstream[1], - }, nil -} - -// SyncStreams should be called to enable syncing of stdout, -// stderr with the plugin. -// -// This will return immediately and the syncing will continue to happen -// in the background. You do not need to launch this in a goroutine itself. -// -// This should never be called multiple times. -func (c *RPCClient) SyncStreams(stdout io.Writer, stderr io.Writer) error { - go copyStream("stdout", stdout, c.stdout) - go copyStream("stderr", stderr, c.stderr) - return nil -} - -// Close closes the connection. The client is no longer usable after this -// is called. -func (c *RPCClient) Close() error { - // Call the control channel and ask it to gracefully exit. If this - // errors, then we save it so that we always return an error but we - // want to try to close the other channels anyways. - var empty struct{} - returnErr := c.control.Call("Control.Quit", true, &empty) - - // Close the other streams we have - if err := c.control.Close(); err != nil { - return err - } - if err := c.stdout.Close(); err != nil { - return err - } - if err := c.stderr.Close(); err != nil { - return err - } - if err := c.broker.Close(); err != nil { - return err - } - - // Return back the error we got from Control.Quit. This is very important - // since we MUST return non-nil error if this fails so that Client.Kill - // will properly try a process.Kill. - return returnErr -} - -func (c *RPCClient) Dispense(name string) (interface{}, error) { - p, ok := c.plugins[name] - if !ok { - return nil, fmt.Errorf("unknown plugin type: %s", name) - } - - var id uint32 - if err := c.control.Call( - "Dispenser.Dispense", name, &id); err != nil { - return nil, err - } - - conn, err := c.broker.Dial(id) - if err != nil { - return nil, err - } - - return p.Client(c.broker, rpc.NewClient(conn)) -} - -// Ping pings the connection to ensure it is still alive. -// -// The error from the RPC call is returned exactly if you want to inspect -// it for further error analysis. Any error returned from here would indicate -// that the connection to the plugin is not healthy. -func (c *RPCClient) Ping() error { - var empty struct{} - return c.control.Call("Control.Ping", true, &empty) -} diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_server.go b/vendor/github.com/hashicorp/go-plugin/rpc_server.go deleted file mode 100644 index cec0a3d93a2..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/rpc_server.go +++ /dev/null @@ -1,209 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package plugin - -import ( - "errors" - "fmt" - "io" - "log" - "net" - "net/rpc" - "sync" - - "github.com/hashicorp/yamux" -) - -// RPCServer listens for network connections and then dispenses interface -// implementations over net/rpc. -// -// After setting the fields below, they shouldn't be read again directly -// from the structure which may be reading/writing them concurrently. -type RPCServer struct { - Plugins map[string]Plugin - - // Stdout, Stderr are what this server will use instead of the - // normal stdin/out/err. This is because due to the multi-process nature - // of our plugin system, we can't use the normal process values so we - // make our own custom one we pipe across. - Stdout io.Reader - Stderr io.Reader - - // DoneCh should be set to a non-nil channel that will be closed - // when the control requests the RPC server to end. - DoneCh chan<- struct{} - - lock sync.Mutex -} - -// ServerProtocol impl. -func (s *RPCServer) Init() error { return nil } - -// ServerProtocol impl. -func (s *RPCServer) Config() string { return "" } - -// ServerProtocol impl. -func (s *RPCServer) Serve(lis net.Listener) { - defer s.done() - - for { - conn, err := lis.Accept() - if err != nil { - severity := "ERR" - if errors.Is(err, net.ErrClosed) { - severity = "DEBUG" - } - log.Printf("[%s] plugin: plugin server: %s", severity, err) - return - } - - go s.ServeConn(conn) - } -} - -// ServeConn runs a single connection. -// -// ServeConn blocks, serving the connection until the client hangs up. -func (s *RPCServer) ServeConn(conn io.ReadWriteCloser) { - // First create the yamux server to wrap this connection - mux, err := yamux.Server(conn, nil) - if err != nil { - conn.Close() - log.Printf("[ERR] plugin: error creating yamux server: %s", err) - return - } - - // Accept the control connection - control, err := mux.Accept() - if err != nil { - mux.Close() - if err != io.EOF { - log.Printf("[ERR] plugin: error accepting control connection: %s", err) - } - - return - } - - // Connect the stdstreams (in, out, err) - stdstream := make([]net.Conn, 2) - for i := range stdstream { - stdstream[i], err = mux.Accept() - if err != nil { - mux.Close() - log.Printf("[ERR] plugin: accepting stream %d: %s", i, err) - return - } - } - - // Copy std streams out to the proper place - go copyStream("stdout", stdstream[0], s.Stdout) - go copyStream("stderr", stdstream[1], s.Stderr) - - // Create the broker and start it up - broker := newMuxBroker(mux) - go broker.Run() - - // Use the control connection to build the dispenser and serve the - // connection. - server := rpc.NewServer() - server.RegisterName("Control", &controlServer{ - server: s, - }) - server.RegisterName("Dispenser", &dispenseServer{ - broker: broker, - plugins: s.Plugins, - }) - server.ServeConn(control) -} - -// done is called internally by the control server to trigger the -// doneCh to close which is listened to by the main process to cleanly -// exit. -func (s *RPCServer) done() { - s.lock.Lock() - defer s.lock.Unlock() - - if s.DoneCh != nil { - close(s.DoneCh) - s.DoneCh = nil - } -} - -// dispenseServer dispenses variousinterface implementations for Terraform. -type controlServer struct { - server *RPCServer -} - -// Ping can be called to verify the connection (and likely the binary) -// is still alive to a plugin. -func (c *controlServer) Ping( - null bool, response *struct{}, -) error { - *response = struct{}{} - return nil -} - -func (c *controlServer) Quit( - null bool, response *struct{}, -) error { - // End the server - c.server.done() - - // Always return true - *response = struct{}{} - - return nil -} - -// dispenseServer dispenses variousinterface implementations for Terraform. -type dispenseServer struct { - broker *MuxBroker - plugins map[string]Plugin -} - -func (d *dispenseServer) Dispense( - name string, response *uint32, -) error { - // Find the function to create this implementation - p, ok := d.plugins[name] - if !ok { - return fmt.Errorf("unknown plugin type: %s", name) - } - - // Create the implementation first so we know if there is an error. - impl, err := p.Server(d.broker) - if err != nil { - // We turn the error into an errors error so that it works across RPC - return errors.New(err.Error()) - } - - // Reserve an ID for our implementation - id := d.broker.NextId() - *response = id - - // Run the rest in a goroutine since it can only happen once this RPC - // call returns. We wait for a connection for the plugin implementation - // and serve it. - go func() { - conn, err := d.broker.Accept(id) - if err != nil { - log.Printf("[ERR] go-plugin: plugin dispense error: %s: %s", name, err) - return - } - - serve(conn, "Plugin", impl) - }() - - return nil -} - -func serve(conn io.ReadWriteCloser, name string, v interface{}) { - server := rpc.NewServer() - if err := server.RegisterName(name, v); err != nil { - log.Printf("[ERR] go-plugin: plugin dispense error: %s", err) - return - } - - server.ServeConn(conn) -} diff --git a/vendor/github.com/hashicorp/go-plugin/runner/runner.go b/vendor/github.com/hashicorp/go-plugin/runner/runner.go deleted file mode 100644 index e638ae5f8ee..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/runner/runner.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package runner - -import ( - "context" - "io" -) - -// Runner defines the interface required by go-plugin to manage the lifecycle of -// of a plugin and attempt to negotiate a connection with it. Note that this -// is orthogonal to the protocol and transport used, which is negotiated over stdout. -type Runner interface { - // Start should start the plugin and ensure any work required for servicing - // other interface methods is done. If the context is cancelled, it should - // only abort any attempts to _start_ the plugin. Waiting and shutdown are - // handled separately. - Start(ctx context.Context) error - - // Diagnose makes a best-effort attempt to return any debug information that - // might help users understand why a plugin failed to start and negotiate a - // connection. - Diagnose(ctx context.Context) string - - // Stdout is used to negotiate the go-plugin protocol. - Stdout() io.ReadCloser - - // Stderr is used for forwarding plugin logs to the host process logger. - Stderr() io.ReadCloser - - // Name is a human-friendly name for the plugin, such as the path to the - // executable. It does not have to be unique. - Name() string - - AttachedRunner -} - -// AttachedRunner defines a limited subset of Runner's interface to represent the -// reduced responsibility for plugin lifecycle when attaching to an already running -// plugin. -type AttachedRunner interface { - // Wait should wait until the plugin stops running, whether in response to - // an out of band signal or in response to calling Kill(). - Wait(ctx context.Context) error - - // Kill should stop the plugin and perform any cleanup required. - Kill(ctx context.Context) error - - // ID is a unique identifier to represent the running plugin. e.g. pid or - // container ID. - ID() string - - AddrTranslator -} - -// AddrTranslator translates addresses between the execution context of the host -// process and the plugin. For example, if the plugin is in a container, the file -// path for a Unix socket may be different between the host and the container. -// -// It is only intended to be used by the host process. -type AddrTranslator interface { - // Called before connecting on any addresses received back from the plugin. - PluginToHost(pluginNet, pluginAddr string) (hostNet string, hostAddr string, err error) - - // Called on any host process addresses before they are sent to the plugin. - HostToPlugin(hostNet, hostAddr string) (pluginNet string, pluginAddr string, err error) -} - -// ReattachFunc can be passed to a client's reattach config to reattach to an -// already running plugin instead of starting it ourselves. -type ReattachFunc func() (AttachedRunner, error) diff --git a/vendor/github.com/hashicorp/go-plugin/server.go b/vendor/github.com/hashicorp/go-plugin/server.go deleted file mode 100644 index e741bc7fa18..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/server.go +++ /dev/null @@ -1,665 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package plugin - -import ( - "context" - "crypto/tls" - "crypto/x509" - "encoding/base64" - "errors" - "fmt" - "io" - "net" - "os" - "os/signal" - "os/user" - "runtime" - "sort" - "strconv" - "strings" - - hclog "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-plugin/internal/grpcmux" - "google.golang.org/grpc" -) - -// CoreProtocolVersion is the ProtocolVersion of the plugin system itself. -// We will increment this whenever we change any protocol behavior. This -// will invalidate any prior plugins but will at least allow us to iterate -// on the core in a safe way. We will do our best to do this very -// infrequently. -const CoreProtocolVersion = 1 - -// HandshakeConfig is the configuration used by client and servers to -// handshake before starting a plugin connection. This is embedded by -// both ServeConfig and ClientConfig. -// -// In practice, the plugin host creates a HandshakeConfig that is exported -// and plugins then can easily consume it. -type HandshakeConfig struct { - // ProtocolVersion is the version that clients must match on to - // agree they can communicate. This should match the ProtocolVersion - // set on ClientConfig when using a plugin. - // This field is not required if VersionedPlugins are being used in the - // Client or Server configurations. - ProtocolVersion uint - - // MagicCookieKey and value are used as a very basic verification - // that a plugin is intended to be launched. This is not a security - // measure, just a UX feature. If the magic cookie doesn't match, - // we show human-friendly output. - MagicCookieKey string - MagicCookieValue string -} - -// PluginSet is a set of plugins provided to be registered in the plugin -// server. -type PluginSet map[string]Plugin - -// ServeConfig configures what sorts of plugins are served. -type ServeConfig struct { - // HandshakeConfig is the configuration that must match clients. - HandshakeConfig - - // TLSProvider is a function that returns a configured tls.Config. - TLSProvider func() (*tls.Config, error) - - // Plugins are the plugins that are served. - // The implied version of this PluginSet is the Handshake.ProtocolVersion. - Plugins PluginSet - - // VersionedPlugins is a map of PluginSets for specific protocol versions. - // These can be used to negotiate a compatible version between client and - // server. If this is set, Handshake.ProtocolVersion is not required. - VersionedPlugins map[int]PluginSet - - // GRPCServer should be non-nil to enable serving the plugins over - // gRPC. This is a function to create the server when needed with the - // given server options. The server options populated by go-plugin will - // be for TLS if set. You may modify the input slice. - // - // Note that the grpc.Server will automatically be registered with - // the gRPC health checking service. This is not optional since go-plugin - // relies on this to implement Ping(). - GRPCServer func([]grpc.ServerOption) *grpc.Server - - // Logger is used to pass a logger into the server. If none is provided the - // server will create a default logger. - Logger hclog.Logger - - // Test, if non-nil, will put plugin serving into "test mode". This is - // meant to be used as part of `go test` within a plugin's codebase to - // launch the plugin in-process and output a ReattachConfig. - // - // This changes the behavior of the server in a number of ways to - // accomodate the expectation of running in-process: - // - // * The handshake cookie is not validated. - // * Stdout/stderr will receive plugin reads and writes - // * Connection information will not be sent to stdout - // - Test *ServeTestConfig -} - -// ServeTestConfig configures plugin serving for test mode. See ServeConfig.Test. -type ServeTestConfig struct { - // Context, if set, will force the plugin serving to end when cancelled. - // This is only a test configuration because the non-test configuration - // expects to take over the process and therefore end on an interrupt or - // kill signal. For tests, we need to kill the plugin serving routinely - // and this provides a way to do so. - // - // If you want to wait for the plugin process to close before moving on, - // you can wait on CloseCh. - Context context.Context - - // If this channel is non-nil, we will send the ReattachConfig via - // this channel. This can be encoded (via JSON recommended) to the - // plugin client to attach to this plugin. - ReattachConfigCh chan<- *ReattachConfig - - // CloseCh, if non-nil, will be closed when serving exits. This can be - // used along with Context to determine when the server is fully shut down. - // If this is not set, you can still use Context on its own, but note there - // may be a period of time between canceling the context and the plugin - // server being shut down. - CloseCh chan<- struct{} - - // SyncStdio, if true, will enable the client side "SyncStdout/Stderr" - // functionality to work. This defaults to false because the implementation - // of making this work within test environments is particularly messy - // and SyncStdio functionality is fairly rare, so we default to the simple - // scenario. - SyncStdio bool -} - -func unixSocketConfigFromEnv() UnixSocketConfig { - return UnixSocketConfig{ - Group: os.Getenv(EnvUnixSocketGroup), - socketDir: os.Getenv(EnvUnixSocketDir), - } -} - -// protocolVersion determines the protocol version and plugin set to be used by -// the server. In the event that there is no suitable version, the last version -// in the config is returned leaving the client to report the incompatibility. -func protocolVersion(opts *ServeConfig) (int, Protocol, PluginSet) { - protoVersion := int(opts.ProtocolVersion) - pluginSet := opts.Plugins - protoType := ProtocolNetRPC - // Check if the client sent a list of acceptable versions - var clientVersions []int - if vs := os.Getenv("PLUGIN_PROTOCOL_VERSIONS"); vs != "" { - for _, s := range strings.Split(vs, ",") { - v, err := strconv.Atoi(s) - if err != nil { - fmt.Fprintf(os.Stderr, "server sent invalid plugin version %q", s) - continue - } - clientVersions = append(clientVersions, v) - } - } - - // We want to iterate in reverse order, to ensure we match the newest - // compatible plugin version. - sort.Sort(sort.Reverse(sort.IntSlice(clientVersions))) - - // set the old un-versioned fields as if they were versioned plugins - if opts.VersionedPlugins == nil { - opts.VersionedPlugins = make(map[int]PluginSet) - } - - if pluginSet != nil { - opts.VersionedPlugins[protoVersion] = pluginSet - } - - // Sort the version to make sure we match the latest first - var versions []int - for v := range opts.VersionedPlugins { - versions = append(versions, v) - } - - sort.Sort(sort.Reverse(sort.IntSlice(versions))) - - // See if we have multiple versions of Plugins to choose from - for _, version := range versions { - // Record each version, since we guarantee that this returns valid - // values even if they are not a protocol match. - protoVersion = version - pluginSet = opts.VersionedPlugins[version] - - // If we have a configured gRPC server we should select a protocol - if opts.GRPCServer != nil { - // All plugins in a set must use the same transport, so check the first - // for the protocol type - for _, p := range pluginSet { - switch p.(type) { - case GRPCPlugin: - protoType = ProtocolGRPC - default: - protoType = ProtocolNetRPC - } - break - } - } - - for _, clientVersion := range clientVersions { - if clientVersion == protoVersion { - return protoVersion, protoType, pluginSet - } - } - } - - // Return the lowest version as the fallback. - // Since we iterated over all the versions in reverse order above, these - // values are from the lowest version number plugins (which may be from - // a combination of the Handshake.ProtocolVersion and ServeConfig.Plugins - // fields). This allows serving the oldest version of our plugins to a - // legacy client that did not send a PLUGIN_PROTOCOL_VERSIONS list. - return protoVersion, protoType, pluginSet -} - -// Serve serves the plugins given by ServeConfig. -// -// Serve doesn't return until the plugin is done being executed. Any -// fixable errors will be output to os.Stderr and the process will -// exit with a status code of 1. Serve will panic for unexpected -// conditions where a user's fix is unknown. -// -// This is the method that plugins should call in their main() functions. -func Serve(opts *ServeConfig) { - exitCode := -1 - // We use this to trigger an `os.Exit` so that we can execute our other - // deferred functions. In test mode, we just output the err to stderr - // and return. - defer func() { - if opts.Test == nil && exitCode >= 0 { - os.Exit(exitCode) - } - - if opts.Test != nil && opts.Test.CloseCh != nil { - close(opts.Test.CloseCh) - } - }() - - if opts.Test == nil { - // Validate the handshake config - if opts.MagicCookieKey == "" || opts.MagicCookieValue == "" { - fmt.Fprintf(os.Stderr, - "Misconfigured ServeConfig given to serve this plugin: no magic cookie\n"+ - "key or value was set. Please notify the plugin author and report\n"+ - "this as a bug.\n") - exitCode = 1 - return - } - - // First check the cookie - if os.Getenv(opts.MagicCookieKey) != opts.MagicCookieValue { - fmt.Fprintf(os.Stderr, - "This binary is a plugin. These are not meant to be executed directly.\n"+ - "Please execute the program that consumes these plugins, which will\n"+ - "load any plugins automatically\n") - exitCode = 1 - return - } - } - - // negotiate the version and plugins - // start with default version in the handshake config - protoVersion, protoType, pluginSet := protocolVersion(opts) - - logger := opts.Logger - if logger == nil { - // internal logger to os.Stderr - logger = hclog.New(&hclog.LoggerOptions{ - Level: hclog.Trace, - Output: os.Stderr, - JSONFormat: true, - }) - } - - // Register a listener so we can accept a connection - listener, err := serverListener(unixSocketConfigFromEnv()) - if err != nil { - logger.Error("plugin init error", "error", err) - return - } - - // Close the listener on return. We wrap this in a func() on purpose - // because the "listener" reference may change to TLS. - defer func() { - listener.Close() - }() - - var tlsConfig *tls.Config - if opts.TLSProvider != nil { - tlsConfig, err = opts.TLSProvider() - if err != nil { - logger.Error("plugin tls init", "error", err) - return - } - } - - var serverCert string - clientCert := os.Getenv("PLUGIN_CLIENT_CERT") - // If the client is configured using AutoMTLS, the certificate will be here, - // and we need to generate our own in response. - if tlsConfig == nil && clientCert != "" { - logger.Info("configuring server automatic mTLS") - clientCertPool := x509.NewCertPool() - if !clientCertPool.AppendCertsFromPEM([]byte(clientCert)) { - logger.Error("client cert provided but failed to parse", "cert", clientCert) - } - - certPEM, keyPEM, err := generateCert() - if err != nil { - logger.Error("failed to generate server certificate", "error", err) - panic(err) - } - - cert, err := tls.X509KeyPair(certPEM, keyPEM) - if err != nil { - logger.Error("failed to parse server certificate", "error", err) - panic(err) - } - - tlsConfig = &tls.Config{ - Certificates: []tls.Certificate{cert}, - ClientAuth: tls.RequireAndVerifyClientCert, - ClientCAs: clientCertPool, - MinVersion: tls.VersionTLS12, - RootCAs: clientCertPool, - ServerName: "localhost", - } - - // We send back the raw leaf cert data for the client rather than the - // PEM, since the protocol can't handle newlines. - serverCert = base64.RawStdEncoding.EncodeToString(cert.Certificate[0]) - } - - // Create the channel to tell us when we're done - doneCh := make(chan struct{}) - - // Create our new stdout, stderr files. These will override our built-in - // stdout/stderr so that it works across the stream boundary. - var stdout_r, stderr_r io.Reader - stdout_r, stdout_w, err := os.Pipe() - if err != nil { - fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err) - os.Exit(1) - } - stderr_r, stderr_w, err := os.Pipe() - if err != nil { - fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err) - os.Exit(1) - } - - // If we're in test mode, we tee off the reader and write the data - // as-is to our normal Stdout and Stderr so that they continue working - // while stdio works. This is because in test mode, we assume we're running - // in `go test` or some equivalent and we want output to go to standard - // locations. - if opts.Test != nil { - // TODO(mitchellh): This isn't super ideal because a TeeReader - // only works if the reader side is actively read. If we never - // connect via a plugin client, the output still gets swallowed. - stdout_r = io.TeeReader(stdout_r, os.Stdout) - stderr_r = io.TeeReader(stderr_r, os.Stderr) - } - - // Build the server type - var server ServerProtocol - switch protoType { - case ProtocolNetRPC: - // If we have a TLS configuration then we wrap the listener - // ourselves and do it at that level. - if tlsConfig != nil { - listener = tls.NewListener(listener, tlsConfig) - } - - // Create the RPC server to dispense - server = &RPCServer{ - Plugins: pluginSet, - Stdout: stdout_r, - Stderr: stderr_r, - DoneCh: doneCh, - } - - case ProtocolGRPC: - var muxer *grpcmux.GRPCServerMuxer - if multiplex, _ := strconv.ParseBool(os.Getenv(envMultiplexGRPC)); multiplex { - muxer = grpcmux.NewGRPCServerMuxer(logger, listener) - listener = muxer - } - - // Create the gRPC server - server = &GRPCServer{ - Plugins: pluginSet, - Server: opts.GRPCServer, - TLS: tlsConfig, - Stdout: stdout_r, - Stderr: stderr_r, - DoneCh: doneCh, - logger: logger, - muxer: muxer, - } - - default: - panic("unknown server protocol: " + protoType) - } - - // Initialize the servers - if err := server.Init(); err != nil { - logger.Error("protocol init", "error", err) - return - } - - logger.Debug("plugin address", "network", listener.Addr().Network(), "address", listener.Addr().String()) - - // Output the address and service name to stdout so that the client can - // bring it up. In test mode, we don't do this because clients will - // attach via a reattach config. - if opts.Test == nil { - const grpcBrokerMultiplexingSupported = true - protocolLine := fmt.Sprintf("%d|%d|%s|%s|%s|%s", - CoreProtocolVersion, - protoVersion, - listener.Addr().Network(), - listener.Addr().String(), - protoType, - serverCert) - - // Old clients will error with new plugins if we blindly append the - // seventh segment for gRPC broker multiplexing support, because old - // client code uses strings.SplitN(line, "|", 6), which means a seventh - // segment will get appended to the sixth segment as "sixthpart|true". - // - // If the environment variable is set, we assume the client is new enough - // to handle a seventh segment, as it should now use - // strings.Split(line, "|") and always handle each segment individually. - if os.Getenv(envMultiplexGRPC) != "" { - protocolLine += fmt.Sprintf("|%v", grpcBrokerMultiplexingSupported) - } - fmt.Printf("%s\n", protocolLine) - os.Stdout.Sync() - } else if ch := opts.Test.ReattachConfigCh; ch != nil { - // Send back the reattach config that can be used. This isn't - // quite ready if they connect immediately but the client should - // retry a few times. - ch <- &ReattachConfig{ - Protocol: protoType, - ProtocolVersion: protoVersion, - Addr: listener.Addr(), - Pid: os.Getpid(), - Test: true, - } - } - - // Eat the interrupts. In test mode we disable this so that go test - // can be cancelled properly. - if opts.Test == nil { - ch := make(chan os.Signal, 1) - signal.Notify(ch, os.Interrupt) - go func() { - count := 0 - for { - <-ch - count++ - logger.Trace("plugin received interrupt signal, ignoring", "count", count) - } - }() - } - - // Set our stdout, stderr to the stdio stream that clients can retrieve - // using ClientConfig.SyncStdout/err. We only do this for non-test mode - // or if the test mode explicitly requests it. - // - // In test mode, we use a multiwriter so that the data continues going - // to the normal stdout/stderr so output can show up in test logs. We - // also send to the stdio stream so that clients can continue working - // if they depend on that. - if opts.Test == nil || opts.Test.SyncStdio { - if opts.Test != nil { - // In test mode we need to maintain the original values so we can - // reset it. - defer func(out, err *os.File) { - os.Stdout = out - os.Stderr = err - }(os.Stdout, os.Stderr) - } - os.Stdout = stdout_w - os.Stderr = stderr_w - } - - // Accept connections and wait for completion - go server.Serve(listener) - - ctx := context.Background() - if opts.Test != nil && opts.Test.Context != nil { - ctx = opts.Test.Context - } - select { - case <-ctx.Done(): - // Cancellation. We can stop the server by closing the listener. - // This isn't graceful at all but this is currently only used by - // tests and its our only way to stop. - listener.Close() - - // If this is a grpc server, then we also ask the server itself to - // end which will kill all connections. There isn't an easy way to do - // this for net/rpc currently but net/rpc is more and more unused. - if s, ok := server.(*GRPCServer); ok { - s.Stop() - } - - // Wait for the server itself to shut down - <-doneCh - - case <-doneCh: - // Note that given the documentation of Serve we should probably be - // setting exitCode = 0 and using os.Exit here. That's how it used to - // work before extracting this library. However, for years we've done - // this so we'll keep this functionality. - } -} - -func serverListener(unixSocketCfg UnixSocketConfig) (net.Listener, error) { - if runtime.GOOS == "windows" { - return serverListener_tcp() - } - - return serverListener_unix(unixSocketCfg) -} - -func serverListener_tcp() (net.Listener, error) { - envMinPort := os.Getenv("PLUGIN_MIN_PORT") - envMaxPort := os.Getenv("PLUGIN_MAX_PORT") - - var minPort, maxPort int64 - var err error - - switch { - case len(envMinPort) == 0: - minPort = 0 - default: - minPort, err = strconv.ParseInt(envMinPort, 10, 32) - if err != nil { - return nil, fmt.Errorf("Couldn't get value from PLUGIN_MIN_PORT: %v", err) - } - } - - switch { - case len(envMaxPort) == 0: - maxPort = 0 - default: - maxPort, err = strconv.ParseInt(envMaxPort, 10, 32) - if err != nil { - return nil, fmt.Errorf("Couldn't get value from PLUGIN_MAX_PORT: %v", err) - } - } - - if minPort > maxPort { - return nil, fmt.Errorf("PLUGIN_MIN_PORT value of %d is greater than PLUGIN_MAX_PORT value of %d", minPort, maxPort) - } - - for port := minPort; port <= maxPort; port++ { - address := fmt.Sprintf("127.0.0.1:%d", port) - listener, err := net.Listen("tcp", address) - if err == nil { - return listener, nil - } - } - - return nil, errors.New("Couldn't bind plugin TCP listener") -} - -func serverListener_unix(unixSocketCfg UnixSocketConfig) (net.Listener, error) { - tf, err := os.CreateTemp(unixSocketCfg.socketDir, "plugin") - if err != nil { - return nil, err - } - path := tf.Name() - - // Close the file and remove it because it has to not exist for - // the domain socket. - if err := tf.Close(); err != nil { - return nil, err - } - if err := os.Remove(path); err != nil { - return nil, err - } - - l, err := net.Listen("unix", path) - if err != nil { - return nil, err - } - - // By default, unix sockets are only writable by the owner. Set up a custom - // group owner and group write permissions if configured. - if unixSocketCfg.Group != "" { - err = setGroupWritable(path, unixSocketCfg.Group, 0o660) - if err != nil { - return nil, err - } - } - - // Wrap the listener in rmListener so that the Unix domain socket file - // is removed on close. - return newDeleteFileListener(l, path), nil -} - -func setGroupWritable(path, groupString string, mode os.FileMode) error { - groupID, err := strconv.Atoi(groupString) - if err != nil { - group, err := user.LookupGroup(groupString) - if err != nil { - return fmt.Errorf("failed to find gid from %q: %w", groupString, err) - } - groupID, err = strconv.Atoi(group.Gid) - if err != nil { - return fmt.Errorf("failed to parse %q group's gid as an integer: %w", groupString, err) - } - } - - err = os.Chown(path, -1, groupID) - if err != nil { - return err - } - - err = os.Chmod(path, mode) - if err != nil { - return err - } - - return nil -} - -// rmListener is an implementation of net.Listener that forwards most -// calls to the listener but also calls an additional close function. We -// use this to cleanup the unix domain socket on close, as well as clean -// up multiplexed listeners. -type rmListener struct { - net.Listener - close func() error -} - -func newDeleteFileListener(ln net.Listener, path string) *rmListener { - return &rmListener{ - Listener: ln, - close: func() error { - return os.Remove(path) - }, - } -} - -func (l *rmListener) Close() error { - // Close the listener itself - if err := l.Listener.Close(); err != nil { - return err - } - - // Remove the file - return l.close() -} diff --git a/vendor/github.com/hashicorp/go-plugin/server_mux.go b/vendor/github.com/hashicorp/go-plugin/server_mux.go deleted file mode 100644 index 6b14b0c291d..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/server_mux.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package plugin - -import ( - "fmt" - "os" -) - -// ServeMuxMap is the type that is used to configure ServeMux -type ServeMuxMap map[string]*ServeConfig - -// ServeMux is like Serve, but serves multiple types of plugins determined -// by the argument given on the command-line. -// -// This command doesn't return until the plugin is done being executed. Any -// errors are logged or output to stderr. -func ServeMux(m ServeMuxMap) { - if len(os.Args) != 2 { - fmt.Fprintf(os.Stderr, - "Invoked improperly. This is an internal command that shouldn't\n"+ - "be manually invoked.\n") - os.Exit(1) - } - - opts, ok := m[os.Args[1]] - if !ok { - fmt.Fprintf(os.Stderr, "Unknown plugin: %s\n", os.Args[1]) - os.Exit(1) - } - - Serve(opts) -} diff --git a/vendor/github.com/hashicorp/go-plugin/stream.go b/vendor/github.com/hashicorp/go-plugin/stream.go deleted file mode 100644 index a2348642d86..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/stream.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package plugin - -import ( - "io" - "log" -) - -func copyStream(name string, dst io.Writer, src io.Reader) { - if src == nil { - panic(name + ": src is nil") - } - if dst == nil { - panic(name + ": dst is nil") - } - if _, err := io.Copy(dst, src); err != nil && err != io.EOF { - log.Printf("[ERR] plugin: stream copy '%s' error: %s", name, err) - } -} diff --git a/vendor/github.com/hashicorp/go-plugin/testing.go b/vendor/github.com/hashicorp/go-plugin/testing.go deleted file mode 100644 index a8735dfc8c7..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/testing.go +++ /dev/null @@ -1,185 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package plugin - -import ( - "bytes" - "context" - "io" - "net" - "net/rpc" - - hclog "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-plugin/internal/grpcmux" - "github.com/mitchellh/go-testing-interface" - "google.golang.org/grpc" -) - -// TestOptions allows specifying options that can affect the behavior of the -// test functions -type TestOptions struct { - //ServerStdout causes the given value to be used in place of a blank buffer - //for RPCServer's Stdout - ServerStdout io.ReadCloser - - //ServerStderr causes the given value to be used in place of a blank buffer - //for RPCServer's Stderr - ServerStderr io.ReadCloser -} - -// The testing file contains test helpers that you can use outside of -// this package for making it easier to test plugins themselves. - -// TestConn is a helper function for returning a client and server -// net.Conn connected to each other. -func TestConn(t testing.T) (net.Conn, net.Conn) { - // Listen to any local port. This listener will be closed - // after a single connection is established. - l, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatalf("err: %s", err) - } - - // Start a goroutine to accept our client connection - var serverConn net.Conn - doneCh := make(chan struct{}) - go func() { - defer close(doneCh) - defer l.Close() - var err error - serverConn, err = l.Accept() - if err != nil { - t.Fatalf("err: %s", err) - } - }() - - // Connect to the server - clientConn, err := net.Dial("tcp", l.Addr().String()) - if err != nil { - t.Fatalf("err: %s", err) - } - - // Wait for the server side to acknowledge it has connected - <-doneCh - - return clientConn, serverConn -} - -// TestRPCConn returns a rpc client and server connected to each other. -func TestRPCConn(t testing.T) (*rpc.Client, *rpc.Server) { - clientConn, serverConn := TestConn(t) - - server := rpc.NewServer() - go server.ServeConn(serverConn) - - client := rpc.NewClient(clientConn) - return client, server -} - -// TestPluginRPCConn returns a plugin RPC client and server that are connected -// together and configured. -func TestPluginRPCConn(t testing.T, ps map[string]Plugin, opts *TestOptions) (*RPCClient, *RPCServer) { - // Create two net.Conns we can use to shuttle our control connection - clientConn, serverConn := TestConn(t) - - // Start up the server - server := &RPCServer{Plugins: ps, Stdout: new(bytes.Buffer), Stderr: new(bytes.Buffer)} - if opts != nil { - if opts.ServerStdout != nil { - server.Stdout = opts.ServerStdout - } - if opts.ServerStderr != nil { - server.Stderr = opts.ServerStderr - } - } - go server.ServeConn(serverConn) - - // Connect the client to the server - client, err := NewRPCClient(clientConn, ps) - if err != nil { - t.Fatalf("err: %s", err) - } - - return client, server -} - -// TestGRPCConn returns a gRPC client conn and grpc server that are connected -// together and configured. The register function is used to register services -// prior to the Serve call. This is used to test gRPC connections. -func TestGRPCConn(t testing.T, register func(*grpc.Server)) (*grpc.ClientConn, *grpc.Server) { - // Create a listener - l, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatalf("err: %s", err) - } - - server := grpc.NewServer() - register(server) - go server.Serve(l) - - // Connect to the server - conn, err := grpc.Dial( - l.Addr().String(), - grpc.WithBlock(), - grpc.WithInsecure()) - if err != nil { - t.Fatalf("err: %s", err) - } - - // Connection successful, close the listener - l.Close() - - return conn, server -} - -// TestPluginGRPCConn returns a plugin gRPC client and server that are connected -// together and configured. This is used to test gRPC connections. -func TestPluginGRPCConn(t testing.T, multiplex bool, ps map[string]Plugin) (*GRPCClient, *GRPCServer) { - // Create a listener - ln, err := serverListener(UnixSocketConfig{}) - if err != nil { - t.Fatal(err) - } - - logger := hclog.New(&hclog.LoggerOptions{ - Level: hclog.Debug, - }) - - // Start up the server - var muxer *grpcmux.GRPCServerMuxer - if multiplex { - muxer = grpcmux.NewGRPCServerMuxer(logger, ln) - ln = muxer - } - server := &GRPCServer{ - Plugins: ps, - DoneCh: make(chan struct{}), - Server: DefaultGRPCServer, - Stdout: new(bytes.Buffer), - Stderr: new(bytes.Buffer), - logger: logger, - muxer: muxer, - } - if err := server.Init(); err != nil { - t.Fatalf("err: %s", err) - } - go server.Serve(ln) - - client := &Client{ - address: ln.Addr(), - protocol: ProtocolGRPC, - config: &ClientConfig{ - Plugins: ps, - GRPCBrokerMultiplex: multiplex, - }, - logger: logger, - } - - grpcClient, err := newGRPCClient(context.Background(), client) - if err != nil { - t.Fatal(err) - } - - return grpcClient, server -} diff --git a/vendor/github.com/hashicorp/yamux/.gitignore b/vendor/github.com/hashicorp/golang-lru/.gitignore similarity index 100% rename from vendor/github.com/hashicorp/yamux/.gitignore rename to vendor/github.com/hashicorp/golang-lru/.gitignore diff --git a/vendor/github.com/hashicorp/golang-lru/.golangci.yml b/vendor/github.com/hashicorp/golang-lru/.golangci.yml new file mode 100644 index 00000000000..49202fc41e6 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/.golangci.yml @@ -0,0 +1,30 @@ +linters: + enable: + - megacheck + - revive + - govet + - unconvert + - megacheck + - gas + - gocyclo + - dupl + - misspell + - unparam + - unused + - typecheck + - ineffassign + - stylecheck + - exportloopref + - gocritic + - nakedret + - gosimple + - prealloc + fast: false + disable-all: true + +issues: + exclude-rules: + - path: _test\.go + linters: + - dupl + exclude-use-default: false diff --git a/vendor/github.com/hashicorp/golang-lru/2q.go b/vendor/github.com/hashicorp/golang-lru/2q.go new file mode 100644 index 00000000000..15fcad0306e --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/2q.go @@ -0,0 +1,222 @@ +package lru + +import ( + "fmt" + "sync" + + "github.com/hashicorp/golang-lru/simplelru" +) + +const ( + // Default2QRecentRatio is the ratio of the 2Q cache dedicated + // to recently added entries that have only been accessed once. + Default2QRecentRatio = 0.25 + + // Default2QGhostEntries is the default ratio of ghost + // entries kept to track entries recently evicted + Default2QGhostEntries = 0.50 +) + +// TwoQueueCache is a thread-safe fixed size 2Q cache. +// 2Q is an enhancement over the standard LRU cache +// in that it tracks both frequently and recently used +// entries separately. This avoids a burst in access to new +// entries from evicting frequently used entries. It adds some +// additional tracking overhead to the standard LRU cache, and is +// computationally about 2x the cost, and adds some metadata over +// head. The ARCCache is similar, but does not require setting any +// parameters. +type TwoQueueCache struct { + size int + recentSize int + + recent simplelru.LRUCache + frequent simplelru.LRUCache + recentEvict simplelru.LRUCache + lock sync.RWMutex +} + +// New2Q creates a new TwoQueueCache using the default +// values for the parameters. +func New2Q(size int) (*TwoQueueCache, error) { + return New2QParams(size, Default2QRecentRatio, Default2QGhostEntries) +} + +// New2QParams creates a new TwoQueueCache using the provided +// parameter values. +func New2QParams(size int, recentRatio, ghostRatio float64) (*TwoQueueCache, error) { + if size <= 0 { + return nil, fmt.Errorf("invalid size") + } + if recentRatio < 0.0 || recentRatio > 1.0 { + return nil, fmt.Errorf("invalid recent ratio") + } + if ghostRatio < 0.0 || ghostRatio > 1.0 { + return nil, fmt.Errorf("invalid ghost ratio") + } + + // Determine the sub-sizes + recentSize := int(float64(size) * recentRatio) + evictSize := int(float64(size) * ghostRatio) + + // Allocate the LRUs + recent, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + frequent, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + recentEvict, err := simplelru.NewLRU(evictSize, nil) + if err != nil { + return nil, err + } + + // Initialize the cache + c := &TwoQueueCache{ + size: size, + recentSize: recentSize, + recent: recent, + frequent: frequent, + recentEvict: recentEvict, + } + return c, nil +} + +// Get looks up a key's value from the cache. +func (c *TwoQueueCache) Get(key interface{}) (value interface{}, ok bool) { + c.lock.Lock() + defer c.lock.Unlock() + + // Check if this is a frequent value + if val, ok := c.frequent.Get(key); ok { + return val, ok + } + + // If the value is contained in recent, then we + // promote it to frequent + if val, ok := c.recent.Peek(key); ok { + c.recent.Remove(key) + c.frequent.Add(key, val) + return val, ok + } + + // No hit + return nil, false +} + +// Add adds a value to the cache. +func (c *TwoQueueCache) Add(key, value interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + + // Check if the value is frequently used already, + // and just update the value + if c.frequent.Contains(key) { + c.frequent.Add(key, value) + return + } + + // Check if the value is recently used, and promote + // the value into the frequent list + if c.recent.Contains(key) { + c.recent.Remove(key) + c.frequent.Add(key, value) + return + } + + // If the value was recently evicted, add it to the + // frequently used list + if c.recentEvict.Contains(key) { + c.ensureSpace(true) + c.recentEvict.Remove(key) + c.frequent.Add(key, value) + return + } + + // Add to the recently seen list + c.ensureSpace(false) + c.recent.Add(key, value) +} + +// ensureSpace is used to ensure we have space in the cache +func (c *TwoQueueCache) ensureSpace(recentEvict bool) { + // If we have space, nothing to do + recentLen := c.recent.Len() + freqLen := c.frequent.Len() + if recentLen+freqLen < c.size { + return + } + + // If the recent buffer is larger than + // the target, evict from there + if recentLen > 0 && (recentLen > c.recentSize || (recentLen == c.recentSize && !recentEvict)) { + k, _, _ := c.recent.RemoveOldest() + c.recentEvict.Add(k, nil) + return + } + + // Remove from the frequent list otherwise + c.frequent.RemoveOldest() +} + +// Len returns the number of items in the cache. +func (c *TwoQueueCache) Len() int { + c.lock.RLock() + defer c.lock.RUnlock() + return c.recent.Len() + c.frequent.Len() +} + +// Keys returns a slice of the keys in the cache. +// The frequently used keys are first in the returned slice. +func (c *TwoQueueCache) Keys() []interface{} { + c.lock.RLock() + defer c.lock.RUnlock() + k1 := c.frequent.Keys() + k2 := c.recent.Keys() + return append(k1, k2...) +} + +// Remove removes the provided key from the cache. +func (c *TwoQueueCache) Remove(key interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + if c.frequent.Remove(key) { + return + } + if c.recent.Remove(key) { + return + } + if c.recentEvict.Remove(key) { + return + } +} + +// Purge is used to completely clear the cache. +func (c *TwoQueueCache) Purge() { + c.lock.Lock() + defer c.lock.Unlock() + c.recent.Purge() + c.frequent.Purge() + c.recentEvict.Purge() +} + +// Contains is used to check if the cache contains a key +// without updating recency or frequency. +func (c *TwoQueueCache) Contains(key interface{}) bool { + c.lock.RLock() + defer c.lock.RUnlock() + return c.frequent.Contains(key) || c.recent.Contains(key) +} + +// Peek is used to inspect the cache value of a key +// without updating recency or frequency. +func (c *TwoQueueCache) Peek(key interface{}) (value interface{}, ok bool) { + c.lock.RLock() + defer c.lock.RUnlock() + if val, ok := c.frequent.Peek(key); ok { + return val, ok + } + return c.recent.Peek(key) +} diff --git a/vendor/github.com/hashicorp/golang-lru/README.md b/vendor/github.com/hashicorp/golang-lru/README.md new file mode 100644 index 00000000000..03bcfb5b76b --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/README.md @@ -0,0 +1,7 @@ +golang-lru +========== + +Please upgrade to github.com/hashicorp/golang-lru/v2 for all new code as v1 will +not be updated anymore. The v2 version supports generics and is faster; old code +can specify a specific tag, e.g. github.com/hashicorp/golang-lru/v1.0.2 for +backwards compatibility. diff --git a/vendor/github.com/hashicorp/golang-lru/arc.go b/vendor/github.com/hashicorp/golang-lru/arc.go new file mode 100644 index 00000000000..e396f8428aa --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/arc.go @@ -0,0 +1,256 @@ +package lru + +import ( + "sync" + + "github.com/hashicorp/golang-lru/simplelru" +) + +// ARCCache is a thread-safe fixed size Adaptive Replacement Cache (ARC). +// ARC is an enhancement over the standard LRU cache in that tracks both +// frequency and recency of use. This avoids a burst in access to new +// entries from evicting the frequently used older entries. It adds some +// additional tracking overhead to a standard LRU cache, computationally +// it is roughly 2x the cost, and the extra memory overhead is linear +// with the size of the cache. ARC has been patented by IBM, but is +// similar to the TwoQueueCache (2Q) which requires setting parameters. +type ARCCache struct { + size int // Size is the total capacity of the cache + p int // P is the dynamic preference towards T1 or T2 + + t1 simplelru.LRUCache // T1 is the LRU for recently accessed items + b1 simplelru.LRUCache // B1 is the LRU for evictions from t1 + + t2 simplelru.LRUCache // T2 is the LRU for frequently accessed items + b2 simplelru.LRUCache // B2 is the LRU for evictions from t2 + + lock sync.RWMutex +} + +// NewARC creates an ARC of the given size +func NewARC(size int) (*ARCCache, error) { + // Create the sub LRUs + b1, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + b2, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + t1, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + t2, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + + // Initialize the ARC + c := &ARCCache{ + size: size, + p: 0, + t1: t1, + b1: b1, + t2: t2, + b2: b2, + } + return c, nil +} + +// Get looks up a key's value from the cache. +func (c *ARCCache) Get(key interface{}) (value interface{}, ok bool) { + c.lock.Lock() + defer c.lock.Unlock() + + // If the value is contained in T1 (recent), then + // promote it to T2 (frequent) + if val, ok := c.t1.Peek(key); ok { + c.t1.Remove(key) + c.t2.Add(key, val) + return val, ok + } + + // Check if the value is contained in T2 (frequent) + if val, ok := c.t2.Get(key); ok { + return val, ok + } + + // No hit + return nil, false +} + +// Add adds a value to the cache. +func (c *ARCCache) Add(key, value interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + + // Check if the value is contained in T1 (recent), and potentially + // promote it to frequent T2 + if c.t1.Contains(key) { + c.t1.Remove(key) + c.t2.Add(key, value) + return + } + + // Check if the value is already in T2 (frequent) and update it + if c.t2.Contains(key) { + c.t2.Add(key, value) + return + } + + // Check if this value was recently evicted as part of the + // recently used list + if c.b1.Contains(key) { + // T1 set is too small, increase P appropriately + delta := 1 + b1Len := c.b1.Len() + b2Len := c.b2.Len() + if b2Len > b1Len { + delta = b2Len / b1Len + } + if c.p+delta >= c.size { + c.p = c.size + } else { + c.p += delta + } + + // Potentially need to make room in the cache + if c.t1.Len()+c.t2.Len() >= c.size { + c.replace(false) + } + + // Remove from B1 + c.b1.Remove(key) + + // Add the key to the frequently used list + c.t2.Add(key, value) + return + } + + // Check if this value was recently evicted as part of the + // frequently used list + if c.b2.Contains(key) { + // T2 set is too small, decrease P appropriately + delta := 1 + b1Len := c.b1.Len() + b2Len := c.b2.Len() + if b1Len > b2Len { + delta = b1Len / b2Len + } + if delta >= c.p { + c.p = 0 + } else { + c.p -= delta + } + + // Potentially need to make room in the cache + if c.t1.Len()+c.t2.Len() >= c.size { + c.replace(true) + } + + // Remove from B2 + c.b2.Remove(key) + + // Add the key to the frequently used list + c.t2.Add(key, value) + return + } + + // Potentially need to make room in the cache + if c.t1.Len()+c.t2.Len() >= c.size { + c.replace(false) + } + + // Keep the size of the ghost buffers trim + if c.b1.Len() > c.size-c.p { + c.b1.RemoveOldest() + } + if c.b2.Len() > c.p { + c.b2.RemoveOldest() + } + + // Add to the recently seen list + c.t1.Add(key, value) +} + +// replace is used to adaptively evict from either T1 or T2 +// based on the current learned value of P +func (c *ARCCache) replace(b2ContainsKey bool) { + t1Len := c.t1.Len() + if t1Len > 0 && (t1Len > c.p || (t1Len == c.p && b2ContainsKey)) { + k, _, ok := c.t1.RemoveOldest() + if ok { + c.b1.Add(k, nil) + } + } else { + k, _, ok := c.t2.RemoveOldest() + if ok { + c.b2.Add(k, nil) + } + } +} + +// Len returns the number of cached entries +func (c *ARCCache) Len() int { + c.lock.RLock() + defer c.lock.RUnlock() + return c.t1.Len() + c.t2.Len() +} + +// Keys returns all the cached keys +func (c *ARCCache) Keys() []interface{} { + c.lock.RLock() + defer c.lock.RUnlock() + k1 := c.t1.Keys() + k2 := c.t2.Keys() + return append(k1, k2...) +} + +// Remove is used to purge a key from the cache +func (c *ARCCache) Remove(key interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + if c.t1.Remove(key) { + return + } + if c.t2.Remove(key) { + return + } + if c.b1.Remove(key) { + return + } + if c.b2.Remove(key) { + return + } +} + +// Purge is used to clear the cache +func (c *ARCCache) Purge() { + c.lock.Lock() + defer c.lock.Unlock() + c.t1.Purge() + c.t2.Purge() + c.b1.Purge() + c.b2.Purge() +} + +// Contains is used to check if the cache contains a key +// without updating recency or frequency. +func (c *ARCCache) Contains(key interface{}) bool { + c.lock.RLock() + defer c.lock.RUnlock() + return c.t1.Contains(key) || c.t2.Contains(key) +} + +// Peek is used to inspect the cache value of a key +// without updating recency or frequency. +func (c *ARCCache) Peek(key interface{}) (value interface{}, ok bool) { + c.lock.RLock() + defer c.lock.RUnlock() + if val, ok := c.t1.Peek(key); ok { + return val, ok + } + return c.t2.Peek(key) +} diff --git a/vendor/github.com/hashicorp/golang-lru/doc.go b/vendor/github.com/hashicorp/golang-lru/doc.go new file mode 100644 index 00000000000..2547df979d0 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/doc.go @@ -0,0 +1,21 @@ +// Package lru provides three different LRU caches of varying sophistication. +// +// Cache is a simple LRU cache. It is based on the +// LRU implementation in groupcache: +// https://github.com/golang/groupcache/tree/master/lru +// +// TwoQueueCache tracks frequently used and recently used entries separately. +// This avoids a burst of accesses from taking out frequently used entries, +// at the cost of about 2x computational overhead and some extra bookkeeping. +// +// ARCCache is an adaptive replacement cache. It tracks recent evictions as +// well as recent usage in both the frequent and recent caches. Its +// computational overhead is comparable to TwoQueueCache, but the memory +// overhead is linear with the size of the cache. +// +// ARC has been patented by IBM, so do not use it if that is problematic for +// your program. +// +// All caches in this package take locks while operating, and are therefore +// thread-safe for consumers. +package lru diff --git a/vendor/github.com/hashicorp/golang-lru/lru.go b/vendor/github.com/hashicorp/golang-lru/lru.go new file mode 100644 index 00000000000..895d8e3ea0c --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/lru.go @@ -0,0 +1,231 @@ +package lru + +import ( + "sync" + + "github.com/hashicorp/golang-lru/simplelru" +) + +const ( + // DefaultEvictedBufferSize defines the default buffer size to store evicted key/val + DefaultEvictedBufferSize = 16 +) + +// Cache is a thread-safe fixed size LRU cache. +type Cache struct { + lru *simplelru.LRU + evictedKeys, evictedVals []interface{} + onEvictedCB func(k, v interface{}) + lock sync.RWMutex +} + +// New creates an LRU of the given size. +func New(size int) (*Cache, error) { + return NewWithEvict(size, nil) +} + +// NewWithEvict constructs a fixed size cache with the given eviction +// callback. +func NewWithEvict(size int, onEvicted func(key, value interface{})) (c *Cache, err error) { + // create a cache with default settings + c = &Cache{ + onEvictedCB: onEvicted, + } + if onEvicted != nil { + c.initEvictBuffers() + onEvicted = c.onEvicted + } + c.lru, err = simplelru.NewLRU(size, onEvicted) + return +} + +func (c *Cache) initEvictBuffers() { + c.evictedKeys = make([]interface{}, 0, DefaultEvictedBufferSize) + c.evictedVals = make([]interface{}, 0, DefaultEvictedBufferSize) +} + +// onEvicted save evicted key/val and sent in externally registered callback +// outside of critical section +func (c *Cache) onEvicted(k, v interface{}) { + c.evictedKeys = append(c.evictedKeys, k) + c.evictedVals = append(c.evictedVals, v) +} + +// Purge is used to completely clear the cache. +func (c *Cache) Purge() { + var ks, vs []interface{} + c.lock.Lock() + c.lru.Purge() + if c.onEvictedCB != nil && len(c.evictedKeys) > 0 { + ks, vs = c.evictedKeys, c.evictedVals + c.initEvictBuffers() + } + c.lock.Unlock() + // invoke callback outside of critical section + if c.onEvictedCB != nil { + for i := 0; i < len(ks); i++ { + c.onEvictedCB(ks[i], vs[i]) + } + } +} + +// Add adds a value to the cache. Returns true if an eviction occurred. +func (c *Cache) Add(key, value interface{}) (evicted bool) { + var k, v interface{} + c.lock.Lock() + evicted = c.lru.Add(key, value) + if c.onEvictedCB != nil && evicted { + k, v = c.evictedKeys[0], c.evictedVals[0] + c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0] + } + c.lock.Unlock() + if c.onEvictedCB != nil && evicted { + c.onEvictedCB(k, v) + } + return +} + +// Get looks up a key's value from the cache. +func (c *Cache) Get(key interface{}) (value interface{}, ok bool) { + c.lock.Lock() + value, ok = c.lru.Get(key) + c.lock.Unlock() + return value, ok +} + +// Contains checks if a key is in the cache, without updating the +// recent-ness or deleting it for being stale. +func (c *Cache) Contains(key interface{}) bool { + c.lock.RLock() + containKey := c.lru.Contains(key) + c.lock.RUnlock() + return containKey +} + +// Peek returns the key value (or undefined if not found) without updating +// the "recently used"-ness of the key. +func (c *Cache) Peek(key interface{}) (value interface{}, ok bool) { + c.lock.RLock() + value, ok = c.lru.Peek(key) + c.lock.RUnlock() + return value, ok +} + +// ContainsOrAdd checks if a key is in the cache without updating the +// recent-ness or deleting it for being stale, and if not, adds the value. +// Returns whether found and whether an eviction occurred. +func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) { + var k, v interface{} + c.lock.Lock() + if c.lru.Contains(key) { + c.lock.Unlock() + return true, false + } + evicted = c.lru.Add(key, value) + if c.onEvictedCB != nil && evicted { + k, v = c.evictedKeys[0], c.evictedVals[0] + c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0] + } + c.lock.Unlock() + if c.onEvictedCB != nil && evicted { + c.onEvictedCB(k, v) + } + return false, evicted +} + +// PeekOrAdd checks if a key is in the cache without updating the +// recent-ness or deleting it for being stale, and if not, adds the value. +// Returns whether found and whether an eviction occurred. +func (c *Cache) PeekOrAdd(key, value interface{}) (previous interface{}, ok, evicted bool) { + var k, v interface{} + c.lock.Lock() + previous, ok = c.lru.Peek(key) + if ok { + c.lock.Unlock() + return previous, true, false + } + evicted = c.lru.Add(key, value) + if c.onEvictedCB != nil && evicted { + k, v = c.evictedKeys[0], c.evictedVals[0] + c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0] + } + c.lock.Unlock() + if c.onEvictedCB != nil && evicted { + c.onEvictedCB(k, v) + } + return nil, false, evicted +} + +// Remove removes the provided key from the cache. +func (c *Cache) Remove(key interface{}) (present bool) { + var k, v interface{} + c.lock.Lock() + present = c.lru.Remove(key) + if c.onEvictedCB != nil && present { + k, v = c.evictedKeys[0], c.evictedVals[0] + c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0] + } + c.lock.Unlock() + if c.onEvictedCB != nil && present { + c.onEvictedCB(k, v) + } + return +} + +// Resize changes the cache size. +func (c *Cache) Resize(size int) (evicted int) { + var ks, vs []interface{} + c.lock.Lock() + evicted = c.lru.Resize(size) + if c.onEvictedCB != nil && evicted > 0 { + ks, vs = c.evictedKeys, c.evictedVals + c.initEvictBuffers() + } + c.lock.Unlock() + if c.onEvictedCB != nil && evicted > 0 { + for i := 0; i < len(ks); i++ { + c.onEvictedCB(ks[i], vs[i]) + } + } + return evicted +} + +// RemoveOldest removes the oldest item from the cache. +func (c *Cache) RemoveOldest() (key, value interface{}, ok bool) { + var k, v interface{} + c.lock.Lock() + key, value, ok = c.lru.RemoveOldest() + if c.onEvictedCB != nil && ok { + k, v = c.evictedKeys[0], c.evictedVals[0] + c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0] + } + c.lock.Unlock() + if c.onEvictedCB != nil && ok { + c.onEvictedCB(k, v) + } + return +} + +// GetOldest returns the oldest entry +func (c *Cache) GetOldest() (key, value interface{}, ok bool) { + c.lock.RLock() + key, value, ok = c.lru.GetOldest() + c.lock.RUnlock() + return +} + +// Keys returns a slice of the keys in the cache, from oldest to newest. +func (c *Cache) Keys() []interface{} { + c.lock.RLock() + keys := c.lru.Keys() + c.lock.RUnlock() + return keys +} + +// Len returns the number of items in the cache. +func (c *Cache) Len() int { + c.lock.RLock() + length := c.lru.Len() + c.lock.RUnlock() + return length +} diff --git a/vendor/github.com/hashicorp/golang-lru/testing.go b/vendor/github.com/hashicorp/golang-lru/testing.go new file mode 100644 index 00000000000..492760782c5 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/testing.go @@ -0,0 +1,16 @@ +package lru + +import ( + "crypto/rand" + "math" + "math/big" + "testing" +) + +func getRand(tb testing.TB) int64 { + out, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64)) + if err != nil { + tb.Fatal(err) + } + return out.Int64() +} diff --git a/vendor/github.com/hashicorp/yamux/LICENSE b/vendor/github.com/hashicorp/yamux/LICENSE deleted file mode 100644 index f0e5c79e181..00000000000 --- a/vendor/github.com/hashicorp/yamux/LICENSE +++ /dev/null @@ -1,362 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. \ No newline at end of file diff --git a/vendor/github.com/hashicorp/yamux/README.md b/vendor/github.com/hashicorp/yamux/README.md deleted file mode 100644 index d4db7fc99be..00000000000 --- a/vendor/github.com/hashicorp/yamux/README.md +++ /dev/null @@ -1,86 +0,0 @@ -# Yamux - -Yamux (Yet another Multiplexer) is a multiplexing library for Golang. -It relies on an underlying connection to provide reliability -and ordering, such as TCP or Unix domain sockets, and provides -stream-oriented multiplexing. It is inspired by SPDY but is not -interoperable with it. - -Yamux features include: - -* Bi-directional streams - * Streams can be opened by either client or server - * Useful for NAT traversal - * Server-side push support -* Flow control - * Avoid starvation - * Back-pressure to prevent overwhelming a receiver -* Keep Alives - * Enables persistent connections over a load balancer -* Efficient - * Enables thousands of logical streams with low overhead - -## Documentation - -For complete documentation, see the associated [Godoc](http://godoc.org/github.com/hashicorp/yamux). - -## Specification - -The full specification for Yamux is provided in the `spec.md` file. -It can be used as a guide to implementors of interoperable libraries. - -## Usage - -Using Yamux is remarkably simple: - -```go - -func client() { - // Get a TCP connection - conn, err := net.Dial(...) - if err != nil { - panic(err) - } - - // Setup client side of yamux - session, err := yamux.Client(conn, nil) - if err != nil { - panic(err) - } - - // Open a new stream - stream, err := session.Open() - if err != nil { - panic(err) - } - - // Stream implements net.Conn - stream.Write([]byte("ping")) -} - -func server() { - // Accept a TCP connection - conn, err := listener.Accept() - if err != nil { - panic(err) - } - - // Setup server side of yamux - session, err := yamux.Server(conn, nil) - if err != nil { - panic(err) - } - - // Accept a stream - stream, err := session.Accept() - if err != nil { - panic(err) - } - - // Listen for a message - buf := make([]byte, 4) - stream.Read(buf) -} - -``` - diff --git a/vendor/github.com/hashicorp/yamux/addr.go b/vendor/github.com/hashicorp/yamux/addr.go deleted file mode 100644 index f6a00199cdd..00000000000 --- a/vendor/github.com/hashicorp/yamux/addr.go +++ /dev/null @@ -1,60 +0,0 @@ -package yamux - -import ( - "fmt" - "net" -) - -// hasAddr is used to get the address from the underlying connection -type hasAddr interface { - LocalAddr() net.Addr - RemoteAddr() net.Addr -} - -// yamuxAddr is used when we cannot get the underlying address -type yamuxAddr struct { - Addr string -} - -func (*yamuxAddr) Network() string { - return "yamux" -} - -func (y *yamuxAddr) String() string { - return fmt.Sprintf("yamux:%s", y.Addr) -} - -// Addr is used to get the address of the listener. -func (s *Session) Addr() net.Addr { - return s.LocalAddr() -} - -// LocalAddr is used to get the local address of the -// underlying connection. -func (s *Session) LocalAddr() net.Addr { - addr, ok := s.conn.(hasAddr) - if !ok { - return &yamuxAddr{"local"} - } - return addr.LocalAddr() -} - -// RemoteAddr is used to get the address of remote end -// of the underlying connection -func (s *Session) RemoteAddr() net.Addr { - addr, ok := s.conn.(hasAddr) - if !ok { - return &yamuxAddr{"remote"} - } - return addr.RemoteAddr() -} - -// LocalAddr returns the local address -func (s *Stream) LocalAddr() net.Addr { - return s.session.LocalAddr() -} - -// RemoteAddr returns the remote address -func (s *Stream) RemoteAddr() net.Addr { - return s.session.RemoteAddr() -} diff --git a/vendor/github.com/hashicorp/yamux/const.go b/vendor/github.com/hashicorp/yamux/const.go deleted file mode 100644 index 2fdbf844a8e..00000000000 --- a/vendor/github.com/hashicorp/yamux/const.go +++ /dev/null @@ -1,182 +0,0 @@ -package yamux - -import ( - "encoding/binary" - "fmt" -) - -// NetError implements net.Error -type NetError struct { - err error - timeout bool - temporary bool -} - -func (e *NetError) Error() string { - return e.err.Error() -} - -func (e *NetError) Timeout() bool { - return e.timeout -} - -func (e *NetError) Temporary() bool { - return e.temporary -} - -var ( - // ErrInvalidVersion means we received a frame with an - // invalid version - ErrInvalidVersion = fmt.Errorf("invalid protocol version") - - // ErrInvalidMsgType means we received a frame with an - // invalid message type - ErrInvalidMsgType = fmt.Errorf("invalid msg type") - - // ErrSessionShutdown is used if there is a shutdown during - // an operation - ErrSessionShutdown = fmt.Errorf("session shutdown") - - // ErrStreamsExhausted is returned if we have no more - // stream ids to issue - ErrStreamsExhausted = fmt.Errorf("streams exhausted") - - // ErrDuplicateStream is used if a duplicate stream is - // opened inbound - ErrDuplicateStream = fmt.Errorf("duplicate stream initiated") - - // ErrReceiveWindowExceeded indicates the window was exceeded - ErrRecvWindowExceeded = fmt.Errorf("recv window exceeded") - - // ErrTimeout is used when we reach an IO deadline - ErrTimeout = &NetError{ - err: fmt.Errorf("i/o deadline reached"), - - // Error should meet net.Error interface for timeouts for compatability - // with standard library expectations, such as http servers. - timeout: true, - } - - // ErrStreamClosed is returned when using a closed stream - ErrStreamClosed = fmt.Errorf("stream closed") - - // ErrUnexpectedFlag is set when we get an unexpected flag - ErrUnexpectedFlag = fmt.Errorf("unexpected flag") - - // ErrRemoteGoAway is used when we get a go away from the other side - ErrRemoteGoAway = fmt.Errorf("remote end is not accepting connections") - - // ErrConnectionReset is sent if a stream is reset. This can happen - // if the backlog is exceeded, or if there was a remote GoAway. - ErrConnectionReset = fmt.Errorf("connection reset") - - // ErrConnectionWriteTimeout indicates that we hit the "safety valve" - // timeout writing to the underlying stream connection. - ErrConnectionWriteTimeout = fmt.Errorf("connection write timeout") - - // ErrKeepAliveTimeout is sent if a missed keepalive caused the stream close - ErrKeepAliveTimeout = fmt.Errorf("keepalive timeout") -) - -const ( - // protoVersion is the only version we support - protoVersion uint8 = 0 -) - -const ( - // Data is used for data frames. They are followed - // by length bytes worth of payload. - typeData uint8 = iota - - // WindowUpdate is used to change the window of - // a given stream. The length indicates the delta - // update to the window. - typeWindowUpdate - - // Ping is sent as a keep-alive or to measure - // the RTT. The StreamID and Length value are echoed - // back in the response. - typePing - - // GoAway is sent to terminate a session. The StreamID - // should be 0 and the length is an error code. - typeGoAway -) - -const ( - // SYN is sent to signal a new stream. May - // be sent with a data payload - flagSYN uint16 = 1 << iota - - // ACK is sent to acknowledge a new stream. May - // be sent with a data payload - flagACK - - // FIN is sent to half-close the given stream. - // May be sent with a data payload. - flagFIN - - // RST is used to hard close a given stream. - flagRST -) - -const ( - // initialStreamWindow is the initial stream window size - initialStreamWindow uint32 = 256 * 1024 -) - -const ( - // goAwayNormal is sent on a normal termination - goAwayNormal uint32 = iota - - // goAwayProtoErr sent on a protocol error - goAwayProtoErr - - // goAwayInternalErr sent on an internal error - goAwayInternalErr -) - -const ( - sizeOfVersion = 1 - sizeOfType = 1 - sizeOfFlags = 2 - sizeOfStreamID = 4 - sizeOfLength = 4 - headerSize = sizeOfVersion + sizeOfType + sizeOfFlags + - sizeOfStreamID + sizeOfLength -) - -type header []byte - -func (h header) Version() uint8 { - return h[0] -} - -func (h header) MsgType() uint8 { - return h[1] -} - -func (h header) Flags() uint16 { - return binary.BigEndian.Uint16(h[2:4]) -} - -func (h header) StreamID() uint32 { - return binary.BigEndian.Uint32(h[4:8]) -} - -func (h header) Length() uint32 { - return binary.BigEndian.Uint32(h[8:12]) -} - -func (h header) String() string { - return fmt.Sprintf("Vsn:%d Type:%d Flags:%d StreamID:%d Length:%d", - h.Version(), h.MsgType(), h.Flags(), h.StreamID(), h.Length()) -} - -func (h header) encode(msgType uint8, flags uint16, streamID uint32, length uint32) { - h[0] = protoVersion - h[1] = msgType - binary.BigEndian.PutUint16(h[2:4], flags) - binary.BigEndian.PutUint32(h[4:8], streamID) - binary.BigEndian.PutUint32(h[8:12], length) -} diff --git a/vendor/github.com/hashicorp/yamux/mux.go b/vendor/github.com/hashicorp/yamux/mux.go deleted file mode 100644 index 0c3e67b022a..00000000000 --- a/vendor/github.com/hashicorp/yamux/mux.go +++ /dev/null @@ -1,114 +0,0 @@ -package yamux - -import ( - "fmt" - "io" - "log" - "os" - "time" -) - -// Config is used to tune the Yamux session -type Config struct { - // AcceptBacklog is used to limit how many streams may be - // waiting an accept. - AcceptBacklog int - - // EnableKeepalive is used to do a period keep alive - // messages using a ping. - EnableKeepAlive bool - - // KeepAliveInterval is how often to perform the keep alive - KeepAliveInterval time.Duration - - // ConnectionWriteTimeout is meant to be a "safety valve" timeout after - // we which will suspect a problem with the underlying connection and - // close it. This is only applied to writes, where's there's generally - // an expectation that things will move along quickly. - ConnectionWriteTimeout time.Duration - - // MaxStreamWindowSize is used to control the maximum - // window size that we allow for a stream. - MaxStreamWindowSize uint32 - - // StreamOpenTimeout is the maximum amount of time that a stream will - // be allowed to remain in pending state while waiting for an ack from the peer. - // Once the timeout is reached the session will be gracefully closed. - // A zero value disables the StreamOpenTimeout allowing unbounded - // blocking on OpenStream calls. - StreamOpenTimeout time.Duration - - // StreamCloseTimeout is the maximum time that a stream will allowed to - // be in a half-closed state when `Close` is called before forcibly - // closing the connection. Forcibly closed connections will empty the - // receive buffer, drop any future packets received for that stream, - // and send a RST to the remote side. - StreamCloseTimeout time.Duration - - // LogOutput is used to control the log destination. Either Logger or - // LogOutput can be set, not both. - LogOutput io.Writer - - // Logger is used to pass in the logger to be used. Either Logger or - // LogOutput can be set, not both. - Logger *log.Logger -} - -// DefaultConfig is used to return a default configuration -func DefaultConfig() *Config { - return &Config{ - AcceptBacklog: 256, - EnableKeepAlive: true, - KeepAliveInterval: 30 * time.Second, - ConnectionWriteTimeout: 10 * time.Second, - MaxStreamWindowSize: initialStreamWindow, - StreamCloseTimeout: 5 * time.Minute, - StreamOpenTimeout: 75 * time.Second, - LogOutput: os.Stderr, - } -} - -// VerifyConfig is used to verify the sanity of configuration -func VerifyConfig(config *Config) error { - if config.AcceptBacklog <= 0 { - return fmt.Errorf("backlog must be positive") - } - if config.KeepAliveInterval == 0 { - return fmt.Errorf("keep-alive interval must be positive") - } - if config.MaxStreamWindowSize < initialStreamWindow { - return fmt.Errorf("MaxStreamWindowSize must be larger than %d", initialStreamWindow) - } - if config.LogOutput != nil && config.Logger != nil { - return fmt.Errorf("both Logger and LogOutput may not be set, select one") - } else if config.LogOutput == nil && config.Logger == nil { - return fmt.Errorf("one of Logger or LogOutput must be set, select one") - } - return nil -} - -// Server is used to initialize a new server-side connection. -// There must be at most one server-side connection. If a nil config is -// provided, the DefaultConfiguration will be used. -func Server(conn io.ReadWriteCloser, config *Config) (*Session, error) { - if config == nil { - config = DefaultConfig() - } - if err := VerifyConfig(config); err != nil { - return nil, err - } - return newSession(config, conn, false), nil -} - -// Client is used to initialize a new client-side connection. -// There must be at most one client-side connection. -func Client(conn io.ReadWriteCloser, config *Config) (*Session, error) { - if config == nil { - config = DefaultConfig() - } - - if err := VerifyConfig(config); err != nil { - return nil, err - } - return newSession(config, conn, true), nil -} diff --git a/vendor/github.com/hashicorp/yamux/session.go b/vendor/github.com/hashicorp/yamux/session.go deleted file mode 100644 index 38fe3ed1f06..00000000000 --- a/vendor/github.com/hashicorp/yamux/session.go +++ /dev/null @@ -1,732 +0,0 @@ -package yamux - -import ( - "bufio" - "bytes" - "fmt" - "io" - "io/ioutil" - "log" - "math" - "net" - "strings" - "sync" - "sync/atomic" - "time" -) - -// Session is used to wrap a reliable ordered connection and to -// multiplex it into multiple streams. -type Session struct { - // remoteGoAway indicates the remote side does - // not want futher connections. Must be first for alignment. - remoteGoAway int32 - - // localGoAway indicates that we should stop - // accepting futher connections. Must be first for alignment. - localGoAway int32 - - // nextStreamID is the next stream we should - // send. This depends if we are a client/server. - nextStreamID uint32 - - // config holds our configuration - config *Config - - // logger is used for our logs - logger *log.Logger - - // conn is the underlying connection - conn io.ReadWriteCloser - - // bufRead is a buffered reader - bufRead *bufio.Reader - - // pings is used to track inflight pings - pings map[uint32]chan struct{} - pingID uint32 - pingLock sync.Mutex - - // streams maps a stream id to a stream, and inflight has an entry - // for any outgoing stream that has not yet been established. Both are - // protected by streamLock. - streams map[uint32]*Stream - inflight map[uint32]struct{} - streamLock sync.Mutex - - // synCh acts like a semaphore. It is sized to the AcceptBacklog which - // is assumed to be symmetric between the client and server. This allows - // the client to avoid exceeding the backlog and instead blocks the open. - synCh chan struct{} - - // acceptCh is used to pass ready streams to the client - acceptCh chan *Stream - - // sendCh is used to mark a stream as ready to send, - // or to send a header out directly. - sendCh chan *sendReady - - // recvDoneCh is closed when recv() exits to avoid a race - // between stream registration and stream shutdown - recvDoneCh chan struct{} - sendDoneCh chan struct{} - - // shutdown is used to safely close a session - shutdown bool - shutdownErr error - shutdownCh chan struct{} - shutdownLock sync.Mutex - shutdownErrLock sync.Mutex -} - -// sendReady is used to either mark a stream as ready -// or to directly send a header -type sendReady struct { - Hdr []byte - mu sync.Mutex // Protects Body from unsafe reads. - Body []byte - Err chan error -} - -// newSession is used to construct a new session -func newSession(config *Config, conn io.ReadWriteCloser, client bool) *Session { - logger := config.Logger - if logger == nil { - logger = log.New(config.LogOutput, "", log.LstdFlags) - } - - s := &Session{ - config: config, - logger: logger, - conn: conn, - bufRead: bufio.NewReader(conn), - pings: make(map[uint32]chan struct{}), - streams: make(map[uint32]*Stream), - inflight: make(map[uint32]struct{}), - synCh: make(chan struct{}, config.AcceptBacklog), - acceptCh: make(chan *Stream, config.AcceptBacklog), - sendCh: make(chan *sendReady, 64), - recvDoneCh: make(chan struct{}), - sendDoneCh: make(chan struct{}), - shutdownCh: make(chan struct{}), - } - if client { - s.nextStreamID = 1 - } else { - s.nextStreamID = 2 - } - go s.recv() - go s.send() - if config.EnableKeepAlive { - go s.keepalive() - } - return s -} - -// IsClosed does a safe check to see if we have shutdown -func (s *Session) IsClosed() bool { - select { - case <-s.shutdownCh: - return true - default: - return false - } -} - -// CloseChan returns a read-only channel which is closed as -// soon as the session is closed. -func (s *Session) CloseChan() <-chan struct{} { - return s.shutdownCh -} - -// NumStreams returns the number of currently open streams -func (s *Session) NumStreams() int { - s.streamLock.Lock() - num := len(s.streams) - s.streamLock.Unlock() - return num -} - -// Open is used to create a new stream as a net.Conn -func (s *Session) Open() (net.Conn, error) { - conn, err := s.OpenStream() - if err != nil { - return nil, err - } - return conn, nil -} - -// OpenStream is used to create a new stream -func (s *Session) OpenStream() (*Stream, error) { - if s.IsClosed() { - return nil, ErrSessionShutdown - } - if atomic.LoadInt32(&s.remoteGoAway) == 1 { - return nil, ErrRemoteGoAway - } - - // Block if we have too many inflight SYNs - select { - case s.synCh <- struct{}{}: - case <-s.shutdownCh: - return nil, ErrSessionShutdown - } - -GET_ID: - // Get an ID, and check for stream exhaustion - id := atomic.LoadUint32(&s.nextStreamID) - if id >= math.MaxUint32-1 { - return nil, ErrStreamsExhausted - } - if !atomic.CompareAndSwapUint32(&s.nextStreamID, id, id+2) { - goto GET_ID - } - - // Register the stream - stream := newStream(s, id, streamInit) - s.streamLock.Lock() - s.streams[id] = stream - s.inflight[id] = struct{}{} - s.streamLock.Unlock() - - if s.config.StreamOpenTimeout > 0 { - go s.setOpenTimeout(stream) - } - - // Send the window update to create - if err := stream.sendWindowUpdate(); err != nil { - select { - case <-s.synCh: - default: - s.logger.Printf("[ERR] yamux: aborted stream open without inflight syn semaphore") - } - return nil, err - } - return stream, nil -} - -// setOpenTimeout implements a timeout for streams that are opened but not established. -// If the StreamOpenTimeout is exceeded we assume the peer is unable to ACK, -// and close the session. -// The number of running timers is bounded by the capacity of the synCh. -func (s *Session) setOpenTimeout(stream *Stream) { - timer := time.NewTimer(s.config.StreamOpenTimeout) - defer timer.Stop() - - select { - case <-stream.establishCh: - return - case <-s.shutdownCh: - return - case <-timer.C: - // Timeout reached while waiting for ACK. - // Close the session to force connection re-establishment. - s.logger.Printf("[ERR] yamux: aborted stream open (destination=%s): %v", s.RemoteAddr().String(), ErrTimeout.err) - s.Close() - } -} - -// Accept is used to block until the next available stream -// is ready to be accepted. -func (s *Session) Accept() (net.Conn, error) { - conn, err := s.AcceptStream() - if err != nil { - return nil, err - } - return conn, err -} - -// AcceptStream is used to block until the next available stream -// is ready to be accepted. -func (s *Session) AcceptStream() (*Stream, error) { - select { - case stream := <-s.acceptCh: - if err := stream.sendWindowUpdate(); err != nil { - return nil, err - } - return stream, nil - case <-s.shutdownCh: - return nil, s.shutdownErr - } -} - -// Close is used to close the session and all streams. -// Attempts to send a GoAway before closing the connection. -func (s *Session) Close() error { - s.shutdownLock.Lock() - defer s.shutdownLock.Unlock() - - if s.shutdown { - return nil - } - s.shutdown = true - - s.shutdownErrLock.Lock() - if s.shutdownErr == nil { - s.shutdownErr = ErrSessionShutdown - } - s.shutdownErrLock.Unlock() - - close(s.shutdownCh) - - s.conn.Close() - <-s.recvDoneCh - - s.streamLock.Lock() - defer s.streamLock.Unlock() - for _, stream := range s.streams { - stream.forceClose() - } - <-s.sendDoneCh - return nil -} - -// exitErr is used to handle an error that is causing the -// session to terminate. -func (s *Session) exitErr(err error) { - s.shutdownErrLock.Lock() - if s.shutdownErr == nil { - s.shutdownErr = err - } - s.shutdownErrLock.Unlock() - s.Close() -} - -// GoAway can be used to prevent accepting further -// connections. It does not close the underlying conn. -func (s *Session) GoAway() error { - return s.waitForSend(s.goAway(goAwayNormal), nil) -} - -// goAway is used to send a goAway message -func (s *Session) goAway(reason uint32) header { - atomic.SwapInt32(&s.localGoAway, 1) - hdr := header(make([]byte, headerSize)) - hdr.encode(typeGoAway, 0, 0, reason) - return hdr -} - -// Ping is used to measure the RTT response time -func (s *Session) Ping() (time.Duration, error) { - // Get a channel for the ping - ch := make(chan struct{}) - - // Get a new ping id, mark as pending - s.pingLock.Lock() - id := s.pingID - s.pingID++ - s.pings[id] = ch - s.pingLock.Unlock() - - // Send the ping request - hdr := header(make([]byte, headerSize)) - hdr.encode(typePing, flagSYN, 0, id) - if err := s.waitForSend(hdr, nil); err != nil { - return 0, err - } - - // Wait for a response - start := time.Now() - select { - case <-ch: - case <-time.After(s.config.ConnectionWriteTimeout): - s.pingLock.Lock() - delete(s.pings, id) // Ignore it if a response comes later. - s.pingLock.Unlock() - return 0, ErrTimeout - case <-s.shutdownCh: - return 0, ErrSessionShutdown - } - - // Compute the RTT - return time.Now().Sub(start), nil -} - -// keepalive is a long running goroutine that periodically does -// a ping to keep the connection alive. -func (s *Session) keepalive() { - for { - select { - case <-time.After(s.config.KeepAliveInterval): - _, err := s.Ping() - if err != nil { - if err != ErrSessionShutdown { - s.logger.Printf("[ERR] yamux: keepalive failed: %v", err) - s.exitErr(ErrKeepAliveTimeout) - } - return - } - case <-s.shutdownCh: - return - } - } -} - -// waitForSendErr waits to send a header, checking for a potential shutdown -func (s *Session) waitForSend(hdr header, body []byte) error { - errCh := make(chan error, 1) - return s.waitForSendErr(hdr, body, errCh) -} - -// waitForSendErr waits to send a header with optional data, checking for a -// potential shutdown. Since there's the expectation that sends can happen -// in a timely manner, we enforce the connection write timeout here. -func (s *Session) waitForSendErr(hdr header, body []byte, errCh chan error) error { - t := timerPool.Get() - timer := t.(*time.Timer) - timer.Reset(s.config.ConnectionWriteTimeout) - defer func() { - timer.Stop() - select { - case <-timer.C: - default: - } - timerPool.Put(t) - }() - - ready := &sendReady{Hdr: hdr, Body: body, Err: errCh} - select { - case s.sendCh <- ready: - case <-s.shutdownCh: - return ErrSessionShutdown - case <-timer.C: - return ErrConnectionWriteTimeout - } - - bodyCopy := func() { - if body == nil { - return // A nil body is ignored. - } - - // In the event of session shutdown or connection write timeout, - // we need to prevent `send` from reading the body buffer after - // returning from this function since the caller may re-use the - // underlying array. - ready.mu.Lock() - defer ready.mu.Unlock() - - if ready.Body == nil { - return // Body was already copied in `send`. - } - newBody := make([]byte, len(body)) - copy(newBody, body) - ready.Body = newBody - } - - select { - case err := <-errCh: - return err - case <-s.shutdownCh: - bodyCopy() - return ErrSessionShutdown - case <-timer.C: - bodyCopy() - return ErrConnectionWriteTimeout - } -} - -// sendNoWait does a send without waiting. Since there's the expectation that -// the send happens right here, we enforce the connection write timeout if we -// can't queue the header to be sent. -func (s *Session) sendNoWait(hdr header) error { - t := timerPool.Get() - timer := t.(*time.Timer) - timer.Reset(s.config.ConnectionWriteTimeout) - defer func() { - timer.Stop() - select { - case <-timer.C: - default: - } - timerPool.Put(t) - }() - - select { - case s.sendCh <- &sendReady{Hdr: hdr}: - return nil - case <-s.shutdownCh: - return ErrSessionShutdown - case <-timer.C: - return ErrConnectionWriteTimeout - } -} - -// send is a long running goroutine that sends data -func (s *Session) send() { - if err := s.sendLoop(); err != nil { - s.exitErr(err) - } -} - -func (s *Session) sendLoop() error { - defer close(s.sendDoneCh) - var bodyBuf bytes.Buffer - for { - bodyBuf.Reset() - - select { - case ready := <-s.sendCh: - // Send a header if ready - if ready.Hdr != nil { - _, err := s.conn.Write(ready.Hdr) - if err != nil { - s.logger.Printf("[ERR] yamux: Failed to write header: %v", err) - asyncSendErr(ready.Err, err) - return err - } - } - - ready.mu.Lock() - if ready.Body != nil { - // Copy the body into the buffer to avoid - // holding a mutex lock during the write. - _, err := bodyBuf.Write(ready.Body) - if err != nil { - ready.Body = nil - ready.mu.Unlock() - s.logger.Printf("[ERR] yamux: Failed to copy body into buffer: %v", err) - asyncSendErr(ready.Err, err) - return err - } - ready.Body = nil - } - ready.mu.Unlock() - - if bodyBuf.Len() > 0 { - // Send data from a body if given - _, err := s.conn.Write(bodyBuf.Bytes()) - if err != nil { - s.logger.Printf("[ERR] yamux: Failed to write body: %v", err) - asyncSendErr(ready.Err, err) - return err - } - } - - // No error, successful send - asyncSendErr(ready.Err, nil) - case <-s.shutdownCh: - return nil - } - } -} - -// recv is a long running goroutine that accepts new data -func (s *Session) recv() { - if err := s.recvLoop(); err != nil { - s.exitErr(err) - } -} - -// Ensure that the index of the handler (typeData/typeWindowUpdate/etc) matches the message type -var ( - handlers = []func(*Session, header) error{ - typeData: (*Session).handleStreamMessage, - typeWindowUpdate: (*Session).handleStreamMessage, - typePing: (*Session).handlePing, - typeGoAway: (*Session).handleGoAway, - } -) - -// recvLoop continues to receive data until a fatal error is encountered -func (s *Session) recvLoop() error { - defer close(s.recvDoneCh) - hdr := header(make([]byte, headerSize)) - for { - // Read the header - if _, err := io.ReadFull(s.bufRead, hdr); err != nil { - if err != io.EOF && !strings.Contains(err.Error(), "closed") && !strings.Contains(err.Error(), "reset by peer") { - s.logger.Printf("[ERR] yamux: Failed to read header: %v", err) - } - return err - } - - // Verify the version - if hdr.Version() != protoVersion { - s.logger.Printf("[ERR] yamux: Invalid protocol version: %d", hdr.Version()) - return ErrInvalidVersion - } - - mt := hdr.MsgType() - if mt < typeData || mt > typeGoAway { - return ErrInvalidMsgType - } - - if err := handlers[mt](s, hdr); err != nil { - return err - } - } -} - -// handleStreamMessage handles either a data or window update frame -func (s *Session) handleStreamMessage(hdr header) error { - // Check for a new stream creation - id := hdr.StreamID() - flags := hdr.Flags() - if flags&flagSYN == flagSYN { - if err := s.incomingStream(id); err != nil { - return err - } - } - - // Get the stream - s.streamLock.Lock() - stream := s.streams[id] - s.streamLock.Unlock() - - // If we do not have a stream, likely we sent a RST - if stream == nil { - // Drain any data on the wire - if hdr.MsgType() == typeData && hdr.Length() > 0 { - s.logger.Printf("[WARN] yamux: Discarding data for stream: %d", id) - if _, err := io.CopyN(ioutil.Discard, s.bufRead, int64(hdr.Length())); err != nil { - s.logger.Printf("[ERR] yamux: Failed to discard data: %v", err) - return nil - } - } else { - s.logger.Printf("[WARN] yamux: frame for missing stream: %v", hdr) - } - return nil - } - - // Check if this is a window update - if hdr.MsgType() == typeWindowUpdate { - if err := stream.incrSendWindow(hdr, flags); err != nil { - if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil { - s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr) - } - return err - } - return nil - } - - // Read the new data - if err := stream.readData(hdr, flags, s.bufRead); err != nil { - if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil { - s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr) - } - return err - } - return nil -} - -// handlePing is invokde for a typePing frame -func (s *Session) handlePing(hdr header) error { - flags := hdr.Flags() - pingID := hdr.Length() - - // Check if this is a query, respond back in a separate context so we - // don't interfere with the receiving thread blocking for the write. - if flags&flagSYN == flagSYN { - go func() { - hdr := header(make([]byte, headerSize)) - hdr.encode(typePing, flagACK, 0, pingID) - if err := s.sendNoWait(hdr); err != nil { - s.logger.Printf("[WARN] yamux: failed to send ping reply: %v", err) - } - }() - return nil - } - - // Handle a response - s.pingLock.Lock() - ch := s.pings[pingID] - if ch != nil { - delete(s.pings, pingID) - close(ch) - } - s.pingLock.Unlock() - return nil -} - -// handleGoAway is invokde for a typeGoAway frame -func (s *Session) handleGoAway(hdr header) error { - code := hdr.Length() - switch code { - case goAwayNormal: - atomic.SwapInt32(&s.remoteGoAway, 1) - case goAwayProtoErr: - s.logger.Printf("[ERR] yamux: received protocol error go away") - return fmt.Errorf("yamux protocol error") - case goAwayInternalErr: - s.logger.Printf("[ERR] yamux: received internal error go away") - return fmt.Errorf("remote yamux internal error") - default: - s.logger.Printf("[ERR] yamux: received unexpected go away") - return fmt.Errorf("unexpected go away received") - } - return nil -} - -// incomingStream is used to create a new incoming stream -func (s *Session) incomingStream(id uint32) error { - // Reject immediately if we are doing a go away - if atomic.LoadInt32(&s.localGoAway) == 1 { - hdr := header(make([]byte, headerSize)) - hdr.encode(typeWindowUpdate, flagRST, id, 0) - return s.sendNoWait(hdr) - } - - // Allocate a new stream - stream := newStream(s, id, streamSYNReceived) - - s.streamLock.Lock() - defer s.streamLock.Unlock() - - // Check if stream already exists - if _, ok := s.streams[id]; ok { - s.logger.Printf("[ERR] yamux: duplicate stream declared") - if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil { - s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr) - } - return ErrDuplicateStream - } - - // Register the stream - s.streams[id] = stream - - // Check if we've exceeded the backlog - select { - case s.acceptCh <- stream: - return nil - default: - // Backlog exceeded! RST the stream - s.logger.Printf("[WARN] yamux: backlog exceeded, forcing connection reset") - delete(s.streams, id) - hdr := header(make([]byte, headerSize)) - hdr.encode(typeWindowUpdate, flagRST, id, 0) - return s.sendNoWait(hdr) - } -} - -// closeStream is used to close a stream once both sides have -// issued a close. If there was an in-flight SYN and the stream -// was not yet established, then this will give the credit back. -func (s *Session) closeStream(id uint32) { - s.streamLock.Lock() - if _, ok := s.inflight[id]; ok { - select { - case <-s.synCh: - default: - s.logger.Printf("[ERR] yamux: SYN tracking out of sync") - } - } - delete(s.streams, id) - s.streamLock.Unlock() -} - -// establishStream is used to mark a stream that was in the -// SYN Sent state as established. -func (s *Session) establishStream(id uint32) { - s.streamLock.Lock() - if _, ok := s.inflight[id]; ok { - delete(s.inflight, id) - } else { - s.logger.Printf("[ERR] yamux: established stream without inflight SYN (no tracking entry)") - } - select { - case <-s.synCh: - default: - s.logger.Printf("[ERR] yamux: established stream without inflight SYN (didn't have semaphore)") - } - s.streamLock.Unlock() -} diff --git a/vendor/github.com/hashicorp/yamux/spec.md b/vendor/github.com/hashicorp/yamux/spec.md deleted file mode 100644 index 183d797bdea..00000000000 --- a/vendor/github.com/hashicorp/yamux/spec.md +++ /dev/null @@ -1,140 +0,0 @@ -# Specification - -We use this document to detail the internal specification of Yamux. -This is used both as a guide for implementing Yamux, but also for -alternative interoperable libraries to be built. - -# Framing - -Yamux uses a streaming connection underneath, but imposes a message -framing so that it can be shared between many logical streams. Each -frame contains a header like: - -* Version (8 bits) -* Type (8 bits) -* Flags (16 bits) -* StreamID (32 bits) -* Length (32 bits) - -This means that each header has a 12 byte overhead. -All fields are encoded in network order (big endian). -Each field is described below: - -## Version Field - -The version field is used for future backward compatibility. At the -current time, the field is always set to 0, to indicate the initial -version. - -## Type Field - -The type field is used to switch the frame message type. The following -message types are supported: - -* 0x0 Data - Used to transmit data. May transmit zero length payloads - depending on the flags. - -* 0x1 Window Update - Used to updated the senders receive window size. - This is used to implement per-session flow control. - -* 0x2 Ping - Used to measure RTT. It can also be used to heart-beat - and do keep-alives over TCP. - -* 0x3 Go Away - Used to close a session. - -## Flag Field - -The flags field is used to provide additional information related -to the message type. The following flags are supported: - -* 0x1 SYN - Signals the start of a new stream. May be sent with a data or - window update message. Also sent with a ping to indicate outbound. - -* 0x2 ACK - Acknowledges the start of a new stream. May be sent with a data - or window update message. Also sent with a ping to indicate response. - -* 0x4 FIN - Performs a half-close of a stream. May be sent with a data - message or window update. - -* 0x8 RST - Reset a stream immediately. May be sent with a data or - window update message. - -## StreamID Field - -The StreamID field is used to identify the logical stream the frame -is addressing. The client side should use odd ID's, and the server even. -This prevents any collisions. Additionally, the 0 ID is reserved to represent -the session. - -Both Ping and Go Away messages should always use the 0 StreamID. - -## Length Field - -The meaning of the length field depends on the message type: - -* Data - provides the length of bytes following the header -* Window update - provides a delta update to the window size -* Ping - Contains an opaque value, echoed back -* Go Away - Contains an error code - -# Message Flow - -There is no explicit connection setup, as Yamux relies on an underlying -transport to be provided. However, there is a distinction between client -and server side of the connection. - -## Opening a stream - -To open a stream, an initial data or window update frame is sent -with a new StreamID. The SYN flag should be set to signal a new stream. - -The receiver must then reply with either a data or window update frame -with the StreamID along with the ACK flag to accept the stream or with -the RST flag to reject the stream. - -Because we are relying on the reliable stream underneath, a connection -can begin sending data once the SYN flag is sent. The corresponding -ACK does not need to be received. This is particularly well suited -for an RPC system where a client wants to open a stream and immediately -fire a request without waiting for the RTT of the ACK. - -This does introduce the possibility of a connection being rejected -after data has been sent already. This is a slight semantic difference -from TCP, where the conection cannot be refused after it is opened. -Clients should be prepared to handle this by checking for an error -that indicates a RST was received. - -## Closing a stream - -To close a stream, either side sends a data or window update frame -along with the FIN flag. This does a half-close indicating the sender -will send no further data. - -Once both sides have closed the connection, the stream is closed. - -Alternatively, if an error occurs, the RST flag can be used to -hard close a stream immediately. - -## Flow Control - -When Yamux is initially starts each stream with a 256KB window size. -There is no window size for the session. - -To prevent the streams from stalling, window update frames should be -sent regularly. Yamux can be configured to provide a larger limit for -windows sizes. Both sides assume the initial 256KB window, but can -immediately send a window update as part of the SYN/ACK indicating a -larger window. - -Both sides should track the number of bytes sent in Data frames -only, as only they are tracked as part of the window size. - -## Session termination - -When a session is being terminated, the Go Away message should -be sent. The Length should be set to one of the following to -provide an error code: - -* 0x0 Normal termination -* 0x1 Protocol error -* 0x2 Internal error diff --git a/vendor/github.com/hashicorp/yamux/stream.go b/vendor/github.com/hashicorp/yamux/stream.go deleted file mode 100644 index 23d08fcc8da..00000000000 --- a/vendor/github.com/hashicorp/yamux/stream.go +++ /dev/null @@ -1,544 +0,0 @@ -package yamux - -import ( - "bytes" - "errors" - "io" - "sync" - "sync/atomic" - "time" -) - -type streamState int - -const ( - streamInit streamState = iota - streamSYNSent - streamSYNReceived - streamEstablished - streamLocalClose - streamRemoteClose - streamClosed - streamReset -) - -// Stream is used to represent a logical stream -// within a session. -type Stream struct { - recvWindow uint32 - sendWindow uint32 - - id uint32 - session *Session - - state streamState - stateLock sync.Mutex - - recvBuf *bytes.Buffer - recvLock sync.Mutex - - controlHdr header - controlErr chan error - controlHdrLock sync.Mutex - - sendHdr header - sendErr chan error - sendLock sync.Mutex - - recvNotifyCh chan struct{} - sendNotifyCh chan struct{} - - readDeadline atomic.Value // time.Time - writeDeadline atomic.Value // time.Time - - // establishCh is notified if the stream is established or being closed. - establishCh chan struct{} - - // closeTimer is set with stateLock held to honor the StreamCloseTimeout - // setting on Session. - closeTimer *time.Timer -} - -// newStream is used to construct a new stream within -// a given session for an ID -func newStream(session *Session, id uint32, state streamState) *Stream { - s := &Stream{ - id: id, - session: session, - state: state, - controlHdr: header(make([]byte, headerSize)), - controlErr: make(chan error, 1), - sendHdr: header(make([]byte, headerSize)), - sendErr: make(chan error, 1), - recvWindow: initialStreamWindow, - sendWindow: initialStreamWindow, - recvNotifyCh: make(chan struct{}, 1), - sendNotifyCh: make(chan struct{}, 1), - establishCh: make(chan struct{}, 1), - } - s.readDeadline.Store(time.Time{}) - s.writeDeadline.Store(time.Time{}) - return s -} - -// Session returns the associated stream session -func (s *Stream) Session() *Session { - return s.session -} - -// StreamID returns the ID of this stream -func (s *Stream) StreamID() uint32 { - return s.id -} - -// Read is used to read from the stream -func (s *Stream) Read(b []byte) (n int, err error) { - defer asyncNotify(s.recvNotifyCh) -START: - s.stateLock.Lock() - switch s.state { - case streamLocalClose: - fallthrough - case streamRemoteClose: - fallthrough - case streamClosed: - s.recvLock.Lock() - if s.recvBuf == nil || s.recvBuf.Len() == 0 { - s.recvLock.Unlock() - s.stateLock.Unlock() - return 0, io.EOF - } - s.recvLock.Unlock() - case streamReset: - s.stateLock.Unlock() - return 0, ErrConnectionReset - } - s.stateLock.Unlock() - - // If there is no data available, block - s.recvLock.Lock() - if s.recvBuf == nil || s.recvBuf.Len() == 0 { - s.recvLock.Unlock() - goto WAIT - } - - // Read any bytes - n, _ = s.recvBuf.Read(b) - s.recvLock.Unlock() - - // Send a window update potentially - err = s.sendWindowUpdate() - if err == ErrSessionShutdown { - err = nil - } - return n, err - -WAIT: - var timeout <-chan time.Time - var timer *time.Timer - readDeadline := s.readDeadline.Load().(time.Time) - if !readDeadline.IsZero() { - delay := readDeadline.Sub(time.Now()) - timer = time.NewTimer(delay) - timeout = timer.C - } - select { - case <-s.recvNotifyCh: - if timer != nil { - timer.Stop() - } - goto START - case <-timeout: - return 0, ErrTimeout - } -} - -// Write is used to write to the stream -func (s *Stream) Write(b []byte) (n int, err error) { - s.sendLock.Lock() - defer s.sendLock.Unlock() - total := 0 - for total < len(b) { - n, err := s.write(b[total:]) - total += n - if err != nil { - return total, err - } - } - return total, nil -} - -// write is used to write to the stream, may return on -// a short write. -func (s *Stream) write(b []byte) (n int, err error) { - var flags uint16 - var max uint32 - var body []byte -START: - s.stateLock.Lock() - switch s.state { - case streamLocalClose: - fallthrough - case streamClosed: - s.stateLock.Unlock() - return 0, ErrStreamClosed - case streamReset: - s.stateLock.Unlock() - return 0, ErrConnectionReset - } - s.stateLock.Unlock() - - // If there is no data available, block - window := atomic.LoadUint32(&s.sendWindow) - if window == 0 { - goto WAIT - } - - // Determine the flags if any - flags = s.sendFlags() - - // Send up to our send window - max = min(window, uint32(len(b))) - body = b[:max] - - // Send the header - s.sendHdr.encode(typeData, flags, s.id, max) - if err = s.session.waitForSendErr(s.sendHdr, body, s.sendErr); err != nil { - if errors.Is(err, ErrSessionShutdown) || errors.Is(err, ErrConnectionWriteTimeout) { - // Message left in ready queue, header re-use is unsafe. - s.sendHdr = header(make([]byte, headerSize)) - } - return 0, err - } - - // Reduce our send window - atomic.AddUint32(&s.sendWindow, ^uint32(max-1)) - - // Unlock - return int(max), err - -WAIT: - var timeout <-chan time.Time - writeDeadline := s.writeDeadline.Load().(time.Time) - if !writeDeadline.IsZero() { - delay := writeDeadline.Sub(time.Now()) - timeout = time.After(delay) - } - select { - case <-s.sendNotifyCh: - goto START - case <-timeout: - return 0, ErrTimeout - } - return 0, nil -} - -// sendFlags determines any flags that are appropriate -// based on the current stream state -func (s *Stream) sendFlags() uint16 { - s.stateLock.Lock() - defer s.stateLock.Unlock() - var flags uint16 - switch s.state { - case streamInit: - flags |= flagSYN - s.state = streamSYNSent - case streamSYNReceived: - flags |= flagACK - s.state = streamEstablished - } - return flags -} - -// sendWindowUpdate potentially sends a window update enabling -// further writes to take place. Must be invoked with the lock. -func (s *Stream) sendWindowUpdate() error { - s.controlHdrLock.Lock() - defer s.controlHdrLock.Unlock() - - // Determine the delta update - max := s.session.config.MaxStreamWindowSize - var bufLen uint32 - s.recvLock.Lock() - if s.recvBuf != nil { - bufLen = uint32(s.recvBuf.Len()) - } - delta := (max - bufLen) - s.recvWindow - - // Determine the flags if any - flags := s.sendFlags() - - // Check if we can omit the update - if delta < (max/2) && flags == 0 { - s.recvLock.Unlock() - return nil - } - - // Update our window - s.recvWindow += delta - s.recvLock.Unlock() - - // Send the header - s.controlHdr.encode(typeWindowUpdate, flags, s.id, delta) - if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil { - if errors.Is(err, ErrSessionShutdown) || errors.Is(err, ErrConnectionWriteTimeout) { - // Message left in ready queue, header re-use is unsafe. - s.controlHdr = header(make([]byte, headerSize)) - } - return err - } - return nil -} - -// sendClose is used to send a FIN -func (s *Stream) sendClose() error { - s.controlHdrLock.Lock() - defer s.controlHdrLock.Unlock() - - flags := s.sendFlags() - flags |= flagFIN - s.controlHdr.encode(typeWindowUpdate, flags, s.id, 0) - if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil { - if errors.Is(err, ErrSessionShutdown) || errors.Is(err, ErrConnectionWriteTimeout) { - // Message left in ready queue, header re-use is unsafe. - s.controlHdr = header(make([]byte, headerSize)) - } - return err - } - return nil -} - -// Close is used to close the stream -func (s *Stream) Close() error { - closeStream := false - s.stateLock.Lock() - switch s.state { - // Opened means we need to signal a close - case streamSYNSent: - fallthrough - case streamSYNReceived: - fallthrough - case streamEstablished: - s.state = streamLocalClose - goto SEND_CLOSE - - case streamLocalClose: - case streamRemoteClose: - s.state = streamClosed - closeStream = true - goto SEND_CLOSE - - case streamClosed: - case streamReset: - default: - panic("unhandled state") - } - s.stateLock.Unlock() - return nil -SEND_CLOSE: - // This shouldn't happen (the more realistic scenario to cancel the - // timer is via processFlags) but just in case this ever happens, we - // cancel the timer to prevent dangling timers. - if s.closeTimer != nil { - s.closeTimer.Stop() - s.closeTimer = nil - } - - // If we have a StreamCloseTimeout set we start the timeout timer. - // We do this only if we're not already closing the stream since that - // means this was a graceful close. - // - // This prevents memory leaks if one side (this side) closes and the - // remote side poorly behaves and never responds with a FIN to complete - // the close. After the specified timeout, we clean our resources up no - // matter what. - if !closeStream && s.session.config.StreamCloseTimeout > 0 { - s.closeTimer = time.AfterFunc( - s.session.config.StreamCloseTimeout, s.closeTimeout) - } - - s.stateLock.Unlock() - s.sendClose() - s.notifyWaiting() - if closeStream { - s.session.closeStream(s.id) - } - return nil -} - -// closeTimeout is called after StreamCloseTimeout during a close to -// close this stream. -func (s *Stream) closeTimeout() { - // Close our side forcibly - s.forceClose() - - // Free the stream from the session map - s.session.closeStream(s.id) - - // Send a RST so the remote side closes too. - s.sendLock.Lock() - defer s.sendLock.Unlock() - hdr := header(make([]byte, headerSize)) - hdr.encode(typeWindowUpdate, flagRST, s.id, 0) - s.session.sendNoWait(hdr) -} - -// forceClose is used for when the session is exiting -func (s *Stream) forceClose() { - s.stateLock.Lock() - s.state = streamClosed - s.stateLock.Unlock() - s.notifyWaiting() -} - -// processFlags is used to update the state of the stream -// based on set flags, if any. Lock must be held -func (s *Stream) processFlags(flags uint16) error { - s.stateLock.Lock() - defer s.stateLock.Unlock() - - // Close the stream without holding the state lock - closeStream := false - defer func() { - if closeStream { - if s.closeTimer != nil { - // Stop our close timeout timer since we gracefully closed - s.closeTimer.Stop() - } - - s.session.closeStream(s.id) - } - }() - - if flags&flagACK == flagACK { - if s.state == streamSYNSent { - s.state = streamEstablished - } - asyncNotify(s.establishCh) - s.session.establishStream(s.id) - } - if flags&flagFIN == flagFIN { - switch s.state { - case streamSYNSent: - fallthrough - case streamSYNReceived: - fallthrough - case streamEstablished: - s.state = streamRemoteClose - s.notifyWaiting() - case streamLocalClose: - s.state = streamClosed - closeStream = true - s.notifyWaiting() - default: - s.session.logger.Printf("[ERR] yamux: unexpected FIN flag in state %d", s.state) - return ErrUnexpectedFlag - } - } - if flags&flagRST == flagRST { - s.state = streamReset - closeStream = true - s.notifyWaiting() - } - return nil -} - -// notifyWaiting notifies all the waiting channels -func (s *Stream) notifyWaiting() { - asyncNotify(s.recvNotifyCh) - asyncNotify(s.sendNotifyCh) - asyncNotify(s.establishCh) -} - -// incrSendWindow updates the size of our send window -func (s *Stream) incrSendWindow(hdr header, flags uint16) error { - if err := s.processFlags(flags); err != nil { - return err - } - - // Increase window, unblock a sender - atomic.AddUint32(&s.sendWindow, hdr.Length()) - asyncNotify(s.sendNotifyCh) - return nil -} - -// readData is used to handle a data frame -func (s *Stream) readData(hdr header, flags uint16, conn io.Reader) error { - if err := s.processFlags(flags); err != nil { - return err - } - - // Check that our recv window is not exceeded - length := hdr.Length() - if length == 0 { - return nil - } - - // Wrap in a limited reader - conn = &io.LimitedReader{R: conn, N: int64(length)} - - // Copy into buffer - s.recvLock.Lock() - - if length > s.recvWindow { - s.session.logger.Printf("[ERR] yamux: receive window exceeded (stream: %d, remain: %d, recv: %d)", s.id, s.recvWindow, length) - s.recvLock.Unlock() - return ErrRecvWindowExceeded - } - - if s.recvBuf == nil { - // Allocate the receive buffer just-in-time to fit the full data frame. - // This way we can read in the whole packet without further allocations. - s.recvBuf = bytes.NewBuffer(make([]byte, 0, length)) - } - copiedLength, err := io.Copy(s.recvBuf, conn) - if err != nil { - s.session.logger.Printf("[ERR] yamux: Failed to read stream data: %v", err) - s.recvLock.Unlock() - return err - } - - // Decrement the receive window - s.recvWindow -= uint32(copiedLength) - s.recvLock.Unlock() - - // Unblock any readers - asyncNotify(s.recvNotifyCh) - return nil -} - -// SetDeadline sets the read and write deadlines -func (s *Stream) SetDeadline(t time.Time) error { - if err := s.SetReadDeadline(t); err != nil { - return err - } - if err := s.SetWriteDeadline(t); err != nil { - return err - } - return nil -} - -// SetReadDeadline sets the deadline for blocked and future Read calls. -func (s *Stream) SetReadDeadline(t time.Time) error { - s.readDeadline.Store(t) - asyncNotify(s.recvNotifyCh) - return nil -} - -// SetWriteDeadline sets the deadline for blocked and future Write calls -func (s *Stream) SetWriteDeadline(t time.Time) error { - s.writeDeadline.Store(t) - asyncNotify(s.sendNotifyCh) - return nil -} - -// Shrink is used to compact the amount of buffers utilized -// This is useful when using Yamux in a connection pool to reduce -// the idle memory utilization. -func (s *Stream) Shrink() { - s.recvLock.Lock() - if s.recvBuf != nil && s.recvBuf.Len() == 0 { - s.recvBuf = nil - } - s.recvLock.Unlock() -} diff --git a/vendor/github.com/hashicorp/yamux/util.go b/vendor/github.com/hashicorp/yamux/util.go deleted file mode 100644 index 8a73e9249a6..00000000000 --- a/vendor/github.com/hashicorp/yamux/util.go +++ /dev/null @@ -1,43 +0,0 @@ -package yamux - -import ( - "sync" - "time" -) - -var ( - timerPool = &sync.Pool{ - New: func() interface{} { - timer := time.NewTimer(time.Hour * 1e6) - timer.Stop() - return timer - }, - } -) - -// asyncSendErr is used to try an async send of an error -func asyncSendErr(ch chan error, err error) { - if ch == nil { - return - } - select { - case ch <- err: - default: - } -} - -// asyncNotify is used to signal a waiting goroutine -func asyncNotify(ch chan struct{}) { - select { - case ch <- struct{}{}: - default: - } -} - -// min computes the minimum of two values -func min(a, b uint32) uint32 { - if a < b { - return a - } - return b -} diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/configmanager/grpc/manager.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/configmanager/grpc/manager.go index f941a178e6c..67e97ae8b5f 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/configmanager/grpc/manager.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/configmanager/grpc/manager.go @@ -1,28 +1,15 @@ // Copyright (c) 2018 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package grpc import ( "context" - "errors" "fmt" "google.golang.org/grpc" "github.com/jaegertracing/jaeger/proto-gen/api_v2" - "github.com/jaegertracing/jaeger/thrift-gen/baggage" ) // ConfigManagerProxy returns sampling decisions from collector over gRPC. @@ -45,8 +32,3 @@ func (s *ConfigManagerProxy) GetSamplingStrategy(ctx context.Context, serviceNam } return resp, nil } - -// GetBaggageRestrictions returns baggage restrictions from collector. -func (s *ConfigManagerProxy) GetBaggageRestrictions(_ context.Context, _ string) ([]*baggage.BaggageRestriction, error) { - return nil, errors.New("baggage not implemented") -} diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/configmanager/manager.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/configmanager/manager.go index de12a5d6f74..5d290350436 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/configmanager/manager.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/configmanager/manager.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package configmanager @@ -19,16 +8,12 @@ import ( "context" "github.com/jaegertracing/jaeger/proto-gen/api_v2" - "github.com/jaegertracing/jaeger/thrift-gen/baggage" ) // TODO this interface could be moved to pkg/clientcfg, along with grpc proxy, // but not the metrics wrapper (because its metric names are specific to agent). -// ClientConfigManager decides: -// 1) which sampling strategy a given service should be using -// 2) which baggage restrictions a given service should be using. +// ClientConfigManager decides which sampling strategy a given service should be using. type ClientConfigManager interface { GetSamplingStrategy(ctx context.Context, serviceName string) (*api_v2.SamplingStrategyResponse, error) - GetBaggageRestrictions(ctx context.Context, serviceName string) ([]*baggage.BaggageRestriction, error) } diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/configmanager/metrics.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/configmanager/metrics.go index 08b64d76678..253690f6aa7 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/configmanager/metrics.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/configmanager/metrics.go @@ -1,16 +1,5 @@ // Copyright (c) 2018 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package configmanager @@ -19,7 +8,6 @@ import ( "github.com/jaegertracing/jaeger/pkg/metrics" "github.com/jaegertracing/jaeger/proto-gen/api_v2" - "github.com/jaegertracing/jaeger/thrift-gen/baggage" ) // configManagerMetrics holds metrics related to ClientConfigManager @@ -29,12 +17,6 @@ type configManagerMetrics struct { // Number of failed sampling rate responses from collector SamplingFailures metrics.Counter `metric:"collector-proxy" tags:"result=err,endpoint=sampling"` - - // Number of successful baggage restriction responses from collector - BaggageSuccess metrics.Counter `metric:"collector-proxy" tags:"result=ok,endpoint=baggage"` - - // Number of failed baggage restriction responses from collector - BaggageFailures metrics.Counter `metric:"collector-proxy" tags:"result=err,endpoint=baggage"` } // ManagerWithMetrics is manager with metrics integration. @@ -60,14 +42,3 @@ func (m *ManagerWithMetrics) GetSamplingStrategy(ctx context.Context, serviceNam } return r, err } - -// GetBaggageRestrictions returns baggage restrictions from server. -func (m *ManagerWithMetrics) GetBaggageRestrictions(ctx context.Context, serviceName string) ([]*baggage.BaggageRestriction, error) { - r, err := m.wrapped.GetBaggageRestrictions(ctx, serviceName) - if err != nil { - m.metrics.BaggageFailures.Inc(1) - } else { - m.metrics.BaggageSuccess.Inc(1) - } - return r, err -} diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/customtransport/buffered_read_transport.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/customtransport/buffered_read_transport.go index 929457939ad..24058042b1f 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/customtransport/buffered_read_transport.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/customtransport/buffered_read_transport.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package customtransport @@ -36,19 +25,19 @@ func NewTBufferedReadTransport(readBuf *bytes.Buffer) (*TBufferedReadTransport, // IsOpen does nothing as transport is not maintaining the connection // Required to maintain thrift.TTransport interface -func (p *TBufferedReadTransport) IsOpen() bool { +func (*TBufferedReadTransport) IsOpen() bool { return true } // Open does nothing as transport is not maintaining the connection // Required to maintain thrift.TTransport interface -func (p *TBufferedReadTransport) Open() error { +func (*TBufferedReadTransport) Open() error { return nil } // Close does nothing as transport is not maintaining the connection // Required to maintain thrift.TTransport interface -func (p *TBufferedReadTransport) Close() error { +func (*TBufferedReadTransport) Close() error { return nil } @@ -60,6 +49,7 @@ func (p *TBufferedReadTransport) Read(buf []byte) (int, error) { // RemainingBytes returns the number of bytes left to be read from the readBuf func (p *TBufferedReadTransport) RemainingBytes() uint64 { + //nolint: gosec // G115 return uint64(p.readBuf.Len()) } @@ -72,6 +62,6 @@ func (p *TBufferedReadTransport) Write(buf []byte) (int, error) { // Flush does nothing as udp server does not write responses back // Required to maintain thrift.TTransport interface -func (p *TBufferedReadTransport) Flush(_ context.Context) error { +func (*TBufferedReadTransport) Flush(_ context.Context) error { return nil } diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/httpserver/srv.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/httpserver/srv.go deleted file mode 100644 index 698bad33a7e..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/httpserver/srv.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (c) 2019 The Jaeger Authors. -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package httpserver - -import ( - "net/http" - "time" - - "github.com/gorilla/mux" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" - - "github.com/jaegertracing/jaeger/cmd/agent/app/configmanager" - "github.com/jaegertracing/jaeger/pkg/clientcfg/clientcfghttp" - "github.com/jaegertracing/jaeger/pkg/metrics" -) - -// NewHTTPServer creates a new server that hosts an HTTP/JSON endpoint for clients -// to query for sampling strategies and baggage restrictions. -func NewHTTPServer(hostPort string, manager configmanager.ClientConfigManager, mFactory metrics.Factory, logger *zap.Logger) *http.Server { - handler := clientcfghttp.NewHTTPHandler(clientcfghttp.HTTPHandlerParams{ - ConfigManager: manager, - MetricsFactory: mFactory, - LegacySamplingEndpoint: true, - }) - r := mux.NewRouter() - handler.RegisterRoutes(r) - errorLog, _ := zap.NewStdLogAt(logger, zapcore.ErrorLevel) - return &http.Server{ - Addr: hostPort, - Handler: r, - ErrorLog: errorLog, - ReadHeaderTimeout: 2 * time.Second, - } -} diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/processors/processor.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/processors/processor.go index 2ade7a29b52..947ca423104 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/processors/processor.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/processors/processor.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package processors diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/processors/thrift_processor.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/processors/thrift_processor.go index f16aea5cd86..ba3ca9d3f50 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/processors/thrift_processor.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/processors/thrift_processor.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package processors @@ -67,7 +56,7 @@ func NewThriftProcessor( "number of processors must be greater than 0, called with %d", numProcessors) } protocolPool := &sync.Pool{ - New: func() interface{} { + New: func() any { trans := &customtransport.TBufferedReadTransport{} return factory.GetProtocol(trans) }, diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/client_metrics.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/client_metrics.go index c93b60f0b38..7b237582a5f 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/client_metrics.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/client_metrics.go @@ -1,16 +1,5 @@ // Copyright (c) 2020 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package reporter @@ -143,7 +132,7 @@ func (r *ClientMetricsReporter) expireClientMetricsLoop() { func (r *ClientMetricsReporter) expireClientMetrics(t time.Time) { var size int64 - r.lastReceivedClientStats.Range(func(k, v interface{}) bool { + r.lastReceivedClientStats.Range(func(k, v any) bool { stats := v.(*lastReceivedClientStats) stats.lock.Lock() defer stats.lock.Unlock() @@ -186,12 +175,12 @@ func (r *ClientMetricsReporter) updateClientMetrics(batch *jaeger.Batch) { func (s *lastReceivedClientStats) update( batchSeqNo int64, stats *jaeger.ClientStats, - metrics *clientMetrics, + cMetrics *clientMetrics, ) { s.lock.Lock() defer s.lock.Unlock() - metrics.BatchesReceived.Inc(1) + cMetrics.BatchesReceived.Inc(1) if s.batchSeqNo >= batchSeqNo { // Ignore out of order batches. Once we receive a batch with a larger-than-seen number, @@ -202,11 +191,11 @@ func (s *lastReceivedClientStats) update( // do not update counters on the first batch, because it may cause a huge spike in totals // if the client has been running for a while already, but the agent just started. if s.batchSeqNo > 0 { - metrics.BatchesSent.Inc(batchSeqNo - s.batchSeqNo) + cMetrics.BatchesSent.Inc(batchSeqNo - s.batchSeqNo) if stats != nil { - metrics.FailedToEmitSpans.Inc(stats.FailedToEmitSpans - s.failedToEmitSpans) - metrics.TooLargeDroppedSpans.Inc(stats.TooLargeDroppedSpans - s.tooLargeDroppedSpans) - metrics.FullQueueDroppedSpans.Inc(stats.FullQueueDroppedSpans - s.fullQueueDroppedSpans) + cMetrics.FailedToEmitSpans.Inc(stats.FailedToEmitSpans - s.failedToEmitSpans) + cMetrics.TooLargeDroppedSpans.Inc(stats.TooLargeDroppedSpans - s.tooLargeDroppedSpans) + cMetrics.FullQueueDroppedSpans.Inc(stats.FullQueueDroppedSpans - s.fullQueueDroppedSpans) } } diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/connect_metrics.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/connect_metrics.go index 7af947eab1c..d0f6f5770c1 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/connect_metrics.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/connect_metrics.go @@ -1,21 +1,11 @@ // Copyright (c) 2020 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package reporter import ( "expvar" + "sync" "github.com/jaegertracing/jaeger/pkg/metrics" ) @@ -34,17 +24,21 @@ type ConnectMetrics struct { target *expvar.String } +var ( + gRPCTarget *expvar.String + gRPCTargetOnce sync.Once +) + // NewConnectMetrics will be initialize ConnectMetrics func NewConnectMetrics(mf metrics.Factory) *ConnectMetrics { cm := &ConnectMetrics{} metrics.MustInit(&cm.metrics, mf.Namespace(metrics.NSOptions{Name: "connection_status"}), nil) - if r := expvar.Get("gRPCTarget"); r == nil { - cm.target = expvar.NewString("gRPCTarget") - } else { - cm.target = r.(*expvar.String) - } - + // expvar.String is not thread-safe, so we need to initialize it only once + gRPCTargetOnce.Do(func() { + gRPCTarget = expvar.NewString("gRPCTarget") + }) + cm.target = gRPCTarget return cm } diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/flags.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/flags.go index c3ec9616789..5dca6444f18 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/flags.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/flags.go @@ -1,22 +1,10 @@ // Copyright (c) 2018 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package reporter import ( "flag" - "fmt" "github.com/spf13/viper" "go.uber.org/zap" @@ -44,15 +32,15 @@ type Options struct { } // AddFlags adds flags for Options. -func AddFlags(flags *flag.FlagSet) { - flags.String(reporterType, string(GRPC), fmt.Sprintf("Reporter type to use e.g. %s", string(GRPC))) +func AddFlags(flagSet *flag.FlagSet) { + flagSet.String(reporterType, string(GRPC), "Reporter type to use e.g. "+string(GRPC)) if !setupcontext.IsAllInOne() { - flags.String(agentTags, "", "One or more tags to be added to the Process tags of all spans passing through this agent. Ex: key1=value1,key2=${envVar:defaultValue}") + flagSet.String(agentTags, "", "One or more tags to be added to the Process tags of all spans passing through this agent. Ex: key1=value1,key2=${envVar:defaultValue}") } } // InitFromViper initializes Options with properties retrieved from Viper. -func (b *Options) InitFromViper(v *viper.Viper, logger *zap.Logger) *Options { +func (b *Options) InitFromViper(v *viper.Viper, _ *zap.Logger) *Options { b.ReporterType = Type(v.GetString(reporterType)) if !setupcontext.IsAllInOne() { if len(v.GetString(agentTags)) > 0 { diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/grpc/builder.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/grpc/builder.go index 9290107dd3a..939b82156ae 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/grpc/builder.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/grpc/builder.go @@ -1,16 +1,5 @@ // Copyright (c) 2018-2019 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package grpc @@ -20,7 +9,8 @@ import ( "fmt" "strings" - grpc_retry "github.com/grpc-ecosystem/go-grpc-middleware/retry" + "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry" + "go.opentelemetry.io/collector/config/configtls" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/connectivity" @@ -30,7 +20,6 @@ import ( "google.golang.org/grpc/resolver/manual" "github.com/jaegertracing/jaeger/cmd/agent/app/reporter" - "github.com/jaegertracing/jaeger/pkg/config/tlscfg" "github.com/jaegertracing/jaeger/pkg/discovery" "github.com/jaegertracing/jaeger/pkg/discovery/grpcresolver" "github.com/jaegertracing/jaeger/pkg/metrics" @@ -41,13 +30,11 @@ import ( type ConnBuilder struct { // CollectorHostPorts is list of host:port Jaeger Collectors. CollectorHostPorts []string `yaml:"collectorHostPorts"` - - MaxRetry uint - TLS tlscfg.Options - - DiscoveryMinPeers int - Notifier discovery.Notifier - Discoverer discovery.Discoverer + TLS *configtls.ClientConfig + MaxRetry uint + DiscoveryMinPeers int + Notifier discovery.Notifier + Discoverer discovery.Discoverer AdditionalDialOptions []grpc.DialOption } @@ -61,13 +48,12 @@ func NewConnBuilder() *ConnBuilder { func (b *ConnBuilder) CreateConnection(ctx context.Context, logger *zap.Logger, mFactory metrics.Factory) (*grpc.ClientConn, error) { var dialOptions []grpc.DialOption var dialTarget string - if b.TLS.Enabled { // user requested a secure connection + if b.TLS != nil { // user requested a secure connection logger.Info("Agent requested secure grpc connection to collector(s)") - tlsConf, err := b.TLS.Config(logger) + tlsConf, err := b.TLS.LoadTLSConfig(ctx) if err != nil { return nil, fmt.Errorf("failed to load TLS config: %w", err) } - creds := credentials.NewTLS(tlsConf) dialOptions = append(dialOptions, grpc.WithTransportCredentials(creds)) } else { // insecure connection @@ -99,7 +85,7 @@ func (b *ConnBuilder) CreateConnection(ctx context.Context, logger *zap.Logger, } } dialOptions = append(dialOptions, grpc.WithDefaultServiceConfig(grpcresolver.GRPCServiceConfig)) - dialOptions = append(dialOptions, grpc.WithUnaryInterceptor(grpc_retry.UnaryClientInterceptor(grpc_retry.WithMax(b.MaxRetry)))) + dialOptions = append(dialOptions, grpc.WithUnaryInterceptor(retry.UnaryClientInterceptor(retry.WithMax(b.MaxRetry)))) dialOptions = append(dialOptions, b.AdditionalDialOptions...) conn, err := grpc.NewClient(dialTarget, dialOptions...) diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/grpc/collector_proxy.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/grpc/collector_proxy.go index ec41f729b06..bb7b3ff64e8 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/grpc/collector_proxy.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/grpc/collector_proxy.go @@ -1,23 +1,11 @@ // Copyright (c) 2018 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package grpc import ( "context" "errors" - "io" "go.uber.org/zap" "google.golang.org/grpc" @@ -30,10 +18,9 @@ import ( // ProxyBuilder holds objects communicating with collector type ProxyBuilder struct { - reporter *reporter.ClientMetricsReporter - manager configmanager.ClientConfigManager - conn *grpc.ClientConn - tlsCloser io.Closer + reporter *reporter.ClientMetricsReporter + manager configmanager.ClientConfigManager + conn *grpc.ClientConn } // NewCollectorProxy creates ProxyBuilder @@ -51,10 +38,9 @@ func NewCollectorProxy(ctx context.Context, builder *ConnBuilder, agentTags map[ MetricsFactory: mFactory, }) return &ProxyBuilder{ - conn: conn, - reporter: r3, - manager: configmanager.WrapWithMetrics(grpcManager.NewConfigManager(conn), grpcMetrics), - tlsCloser: &builder.TLS, + conn: conn, + reporter: r3, + manager: configmanager.WrapWithMetrics(grpcManager.NewConfigManager(conn), grpcMetrics), }, nil } @@ -77,7 +63,6 @@ func (b ProxyBuilder) GetManager() configmanager.ClientConfigManager { func (b ProxyBuilder) Close() error { return errors.Join( b.reporter.Close(), - b.tlsCloser.Close(), b.GetConn().Close(), ) } diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/grpc/flags.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/grpc/flags.go index 72289e4b2fd..a7eb79ac106 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/grpc/flags.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/grpc/flags.go @@ -1,16 +1,5 @@ // Copyright (c) 2018 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package grpc @@ -27,7 +16,7 @@ import ( const ( gRPCPrefix = "reporter.grpc" collectorHostPort = gRPCPrefix + ".host-port" - retry = gRPCPrefix + ".retry.max" + retryFlag = gRPCPrefix + ".retry.max" defaultMaxRetry = 3 discoveryMinPeers = gRPCPrefix + ".discovery.min-peers" ) @@ -38,7 +27,7 @@ var tlsFlagsConfig = tlscfg.ClientFlagsConfig{ // AddFlags adds flags for Options. func AddFlags(flags *flag.FlagSet) { - flags.Uint(retry, defaultMaxRetry, "Sets the maximum number of retries for a call") + flags.Uint(retryFlag, defaultMaxRetry, "Sets the maximum number of retries for a call") flags.Int(discoveryMinPeers, 3, "Max number of collectors to which the agent will try to connect at any given time") flags.String(collectorHostPort, "", "Comma-separated string representing host:port of a static list of collectors to connect to directly") tlsFlagsConfig.AddFlags(flags) @@ -50,12 +39,14 @@ func (b *ConnBuilder) InitFromViper(v *viper.Viper) (*ConnBuilder, error) { if hostPorts != "" { b.CollectorHostPorts = strings.Split(hostPorts, ",") } - b.MaxRetry = uint(v.GetInt(retry)) - if tls, err := tlsFlagsConfig.InitFromViper(v); err == nil { - b.TLS = tls - } else { + b.MaxRetry = v.GetUint(retryFlag) + tlsCfg, err := tlsFlagsConfig.InitFromViper(v) + if err != nil { return b, fmt.Errorf("failed to process TLS options: %w", err) } + if !tlsCfg.Insecure { + b.TLS = &tlsCfg + } b.DiscoveryMinPeers = v.GetInt(discoveryMinPeers) return b, nil } diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/grpc/reporter.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/grpc/reporter.go index 20ba5241497..879a93aa482 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/grpc/reporter.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/grpc/reporter.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package grpc diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/metrics.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/metrics.go index a1edaca36c2..9b29c3f59f4 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/metrics.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/metrics.go @@ -1,16 +1,5 @@ // Copyright (c) 2018 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package reporter diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/reporter.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/reporter.go index 550a4a2dd9b..0ce8fe95815 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/reporter.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/reporter.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package reporter diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/servers/server.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/servers/server.go index 42d6f85fcf2..1a41e8be267 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/servers/server.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/servers/server.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package servers diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/servers/tbuffered_server.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/servers/tbuffered_server.go index 6a9d49b45ad..a00e8f38609 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/servers/tbuffered_server.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/servers/tbuffered_server.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package servers @@ -79,7 +68,7 @@ func NewTBufferedServer( dataChan := make(chan *ReadBuf, maxQueueSize) readBufPool := &sync.Pool{ - New: func() interface{} { + New: func() any { return &ReadBuf{bytes: make([]byte, maxPacketSize)} }, } diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/servers/thriftudp/socket_buffer.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/servers/thriftudp/socket_buffer.go index 5dd4763593f..fe6eb10cf76 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/servers/thriftudp/socket_buffer.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/servers/thriftudp/socket_buffer.go @@ -1,16 +1,5 @@ // Copyright (c) 2020 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 //go:build !windows // +build !windows diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/servers/thriftudp/socket_buffer_windows.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/servers/thriftudp/socket_buffer_windows.go index dd763ec4c86..b4ad3b066d1 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/servers/thriftudp/socket_buffer_windows.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/servers/thriftudp/socket_buffer_windows.go @@ -1,16 +1,5 @@ // Copyright (c) 2020 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package thriftudp diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/servers/thriftudp/transport.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/servers/thriftudp/transport.go index 4579502061f..81db276ff07 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/servers/thriftudp/transport.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/servers/thriftudp/transport.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package thriftudp @@ -93,7 +82,7 @@ func NewTUDPServerTransport(hostPort string) (*TUDPTransport, error) { // Open does nothing as connection is opened on creation // Required to maintain thrift.TTransport interface -func (p *TUDPTransport) Open() error { +func (*TUDPTransport) Open() error { return nil } @@ -131,7 +120,7 @@ func (p *TUDPTransport) Read(buf []byte) (int, error) { // RemainingBytes returns the max number of bytes (same as Thrift's StreamTransport) as we // do not know how many bytes we have left. -func (p *TUDPTransport) RemainingBytes() uint64 { +func (*TUDPTransport) RemainingBytes() uint64 { const maxSize = ^uint64(0) return maxSize } diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/all-in-one/setupcontext/setupcontext.go b/vendor/github.com/jaegertracing/jaeger/cmd/all-in-one/setupcontext/setupcontext.go index 2701bd88117..396c1bd2428 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/all-in-one/setupcontext/setupcontext.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/all-in-one/setupcontext/setupcontext.go @@ -1,16 +1,5 @@ // Copyright (c) 2019 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package setupcontext diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sampling/model/sampling.go b/vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sampling/model/sampling.go deleted file mode 100644 index 32193039adc..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sampling/model/sampling.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright (c) 2019 The Jaeger Authors. -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -// Throughput keeps track of the queries an operation received. -type Throughput struct { - Service string - Operation string - Count int64 - Probabilities map[string]struct{} -} - -// ServiceOperationProbabilities contains the sampling probabilities for all operations in a service. -// ie [service][operation] = probability -type ServiceOperationProbabilities map[string]map[string]float64 - -// ServiceOperationQPS contains the qps for all operations in a service. -// ie [service][operation] = qps -type ServiceOperationQPS map[string]map[string]float64 - -// ProbabilityAndQPS contains the sampling probability and measured qps for an operation. -type ProbabilityAndQPS struct { - Probability float64 - QPS float64 -} - -// ServiceOperationData contains the sampling probabilities and measured qps for all operations in a service. -// ie [service][operation] = ProbabilityAndQPS -type ServiceOperationData map[string]map[string]*ProbabilityAndQPS diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sampling/strategystore/factory.go b/vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sampling/strategystore/factory.go deleted file mode 100644 index 5fab77b62bd..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sampling/strategystore/factory.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (c) 2018 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package strategystore - -import ( - "go.uber.org/zap" - - "github.com/jaegertracing/jaeger/pkg/metrics" - "github.com/jaegertracing/jaeger/storage" -) - -// Factory defines an interface for a factory that can create implementations of different strategy storage components. -// Implementations are also encouraged to implement plugin.Configurable interface. -// -// # See also -// -// plugin.Configurable -type Factory interface { - // Initialize performs internal initialization of the factory. - Initialize(metricsFactory metrics.Factory, ssFactory storage.SamplingStoreFactory, logger *zap.Logger) error - - // CreateStrategyStore initializes the StrategyStore and returns it. - CreateStrategyStore() (StrategyStore, Aggregator, error) -} diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sampling/strategystore/interface.go b/vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sampling/strategystore/interface.go deleted file mode 100644 index 90d9464918d..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sampling/strategystore/interface.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright (c) 2018 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package strategystore - -import ( - "context" - "io" - - "github.com/jaegertracing/jaeger/model" - "github.com/jaegertracing/jaeger/proto-gen/api_v2" -) - -// StrategyStore keeps track of service specific sampling strategies. -type StrategyStore interface { - // Close() from io.Closer stops the processor from calculating probabilities. - io.Closer - - // GetSamplingStrategy retrieves the sampling strategy for the specified service. - GetSamplingStrategy(ctx context.Context, serviceName string) (*api_v2.SamplingStrategyResponse, error) -} - -// Aggregator defines an interface used to aggregate operation throughput. -type Aggregator interface { - // Close() from io.Closer stops the aggregator from aggregating throughput. - io.Closer - - // RecordThroughput records throughput for an operation for aggregation. - RecordThroughput(service, operation string, samplerType model.SamplerType, probability float64) - - // Start starts aggregating operation throughput. - Start() -} diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sanitizer/zipkin/span_sanitizer.go b/vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sanitizer/zipkin/span_sanitizer.go index 8ba8a71f665..1e9fb29e250 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sanitizer/zipkin/span_sanitizer.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sanitizer/zipkin/span_sanitizer.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package zipkin @@ -69,7 +58,7 @@ func NewSpanDurationSanitizer() Sanitizer { type spanDurationSanitizer struct{} -func (s *spanDurationSanitizer) Sanitize(span *zc.Span) *zc.Span { +func (*spanDurationSanitizer) Sanitize(span *zc.Span) *zc.Span { if span.Duration == nil { duration := defaultDuration if len(span.Annotations) >= 2 { @@ -116,7 +105,7 @@ func NewSpanStartTimeSanitizer() Sanitizer { type spanStartTimeSanitizer struct{} -func (s *spanStartTimeSanitizer) Sanitize(span *zc.Span) *zc.Span { +func (*spanStartTimeSanitizer) Sanitize(span *zc.Span) *zc.Span { if span.Timestamp != nil || len(span.Annotations) == 0 { return span } @@ -143,7 +132,7 @@ func NewParentIDSanitizer() Sanitizer { type parentIDSanitizer struct{} -func (s *parentIDSanitizer) Sanitize(span *zc.Span) *zc.Span { +func (*parentIDSanitizer) Sanitize(span *zc.Span) *zc.Span { if span.ParentID == nil || *span.ParentID != 0 { return span } @@ -166,7 +155,7 @@ func NewErrorTagSanitizer() Sanitizer { type errorTagSanitizer struct{} -func (s *errorTagSanitizer) Sanitize(span *zc.Span) *zc.Span { +func (*errorTagSanitizer) Sanitize(span *zc.Span) *zc.Span { for _, binAnno := range span.BinaryAnnotations { if binAnno.AnnotationType != zc.AnnotationType_BOOL && strings.EqualFold("error", binAnno.Key) { binAnno.AnnotationType = zc.AnnotationType_BOOL diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/.nocover b/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/.nocover deleted file mode 100644 index 46911e9377b..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/.nocover +++ /dev/null @@ -1,2 +0,0 @@ -FIXME - diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/admin.go b/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/admin.go index ea4df675a04..4522fdbbb2a 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/admin.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/admin.go @@ -1,38 +1,27 @@ // Copyright (c) 2019 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package flags import ( "context" - "crypto/tls" "errors" "flag" "fmt" - "io" "net" "net/http" "net/http/pprof" - "time" + "sync" "github.com/spf13/viper" + "go.opentelemetry.io/collector/config/confighttp" "go.uber.org/zap" "go.uber.org/zap/zapcore" "github.com/jaegertracing/jaeger/pkg/config/tlscfg" "github.com/jaegertracing/jaeger/pkg/healthcheck" "github.com/jaegertracing/jaeger/pkg/recoveryhandler" + "github.com/jaegertracing/jaeger/pkg/telemetry" "github.com/jaegertracing/jaeger/pkg/version" ) @@ -46,22 +35,23 @@ var tlsAdminHTTPFlagsConfig = tlscfg.ServerFlagsConfig{ // AdminServer runs an HTTP server with admin endpoints, such as healthcheck at /, /metrics, etc. type AdminServer struct { - logger *zap.Logger - adminHostPort string - hc *healthcheck.HealthCheck - mux *http.ServeMux - server *http.Server - tlsCfg *tls.Config - tlsCertWatcherCloser io.Closer + logger *zap.Logger + hc *healthcheck.HealthCheck + mux *http.ServeMux + server *http.Server + serverCfg confighttp.ServerConfig + stopped sync.WaitGroup } // NewAdminServer creates a new admin server. func NewAdminServer(hostPort string) *AdminServer { return &AdminServer{ - adminHostPort: hostPort, - logger: zap.NewNop(), - hc: healthcheck.New(), - mux: http.NewServeMux(), + logger: zap.NewNop(), + hc: healthcheck.New(), + mux: http.NewServeMux(), + serverCfg: confighttp.ServerConfig{ + Endpoint: hostPort, + }, } } @@ -78,7 +68,7 @@ func (s *AdminServer) setLogger(logger *zap.Logger) { // AddFlags registers CLI flags. func (s *AdminServer) AddFlags(flagSet *flag.FlagSet) { - flagSet.String(adminHTTPHostPort, s.adminHostPort, fmt.Sprintf("The host:port (e.g. 127.0.0.1%s or %s) for the admin server, including health check, /metrics, etc.", s.adminHostPort, s.adminHostPort)) + flagSet.String(adminHTTPHostPort, s.serverCfg.Endpoint, fmt.Sprintf("The host:port (e.g. 127.0.0.1%s or %s) for the admin server, including health check, /metrics, etc.", s.serverCfg.Endpoint, s.serverCfg.Endpoint)) tlsAdminHTTPFlagsConfig.AddFlags(flagSet) } @@ -86,22 +76,13 @@ func (s *AdminServer) AddFlags(flagSet *flag.FlagSet) { func (s *AdminServer) initFromViper(v *viper.Viper, logger *zap.Logger) error { s.setLogger(logger) - s.adminHostPort = v.GetString(adminHTTPHostPort) - var tlsAdminHTTP tlscfg.Options tlsAdminHTTP, err := tlsAdminHTTPFlagsConfig.InitFromViper(v) if err != nil { return fmt.Errorf("failed to parse admin server TLS options: %w", err) } - if tlsAdminHTTP.Enabled { - tlsCfg, err := tlsAdminHTTP.Config(s.logger) // This checks if the certificates are correctly provided - if err != nil { - return err - } - s.tlsCfg = tlsCfg - s.tlsCertWatcherCloser = &tlsAdminHTTP - } else { - s.tlsCertWatcherCloser = io.NopCloser(nil) - } + + s.serverCfg.Endpoint = v.GetString(adminHTTPHostPort) + s.serverCfg.TLSSetting = tlsAdminHTTP return nil } @@ -112,48 +93,52 @@ func (s *AdminServer) Handle(path string, handler http.Handler) { // Serve starts HTTP server. func (s *AdminServer) Serve() error { - l, err := net.Listen("tcp", s.adminHostPort) + l, err := s.serverCfg.ToListener(context.Background()) if err != nil { s.logger.Error("Admin server failed to listen", zap.Error(err)) return err } - s.serveWithListener(l) - s.logger.Info( - "Admin server started", - zap.String("http.host-port", l.Addr().String()), - zap.Stringer("health-status", s.hc.Get())) - return nil + return s.serveWithListener(l) } -func (s *AdminServer) serveWithListener(l net.Listener) { +func (s *AdminServer) serveWithListener(l net.Listener) (err error) { s.logger.Info("Mounting health check on admin server", zap.String("route", "/")) s.mux.Handle("/", s.hc.Handler()) version.RegisterHandler(s.mux, s.logger) s.registerPprofHandlers() recoveryHandler := recoveryhandler.NewRecoveryHandler(s.logger, true) - errorLog, _ := zap.NewStdLogAt(s.logger, zapcore.ErrorLevel) - s.server = &http.Server{ - Handler: recoveryHandler(s.mux), - ErrorLog: errorLog, - ReadHeaderTimeout: 2 * time.Second, - } - if s.tlsCfg != nil { - s.server.TLSConfig = s.tlsCfg + s.server, err = s.serverCfg.ToServer( + context.Background(), + nil, // host + telemetry.NoopSettings().ToOtelComponent(), + recoveryHandler(s.mux), + ) + if err != nil { + return fmt.Errorf("failed to create admin server: %w", err) } - s.logger.Info("Starting admin HTTP server", zap.String("http-addr", s.adminHostPort)) + errorLog, _ := zap.NewStdLogAt(s.logger, zapcore.ErrorLevel) + s.server.ErrorLog = errorLog + + s.logger.Info("Starting admin HTTP server") + var wg sync.WaitGroup + wg.Add(1) + s.stopped.Add(1) go func() { - var err error - if s.tlsCfg != nil { - err = s.server.ServeTLS(l, "", "") - } else { - err = s.server.Serve(l) - } + wg.Done() + defer s.stopped.Done() + err := s.server.Serve(l) if err != nil && !errors.Is(err, http.ErrServerClosed) { s.logger.Error("failed to serve", zap.Error(err)) s.hc.Set(healthcheck.Broken) } }() + wg.Wait() // wait for the server to start listening + s.logger.Info( + "Admin server started", + zap.String("http.host-port", l.Addr().String()), + zap.Stringer("health-status", s.hc.Get())) + return nil } func (s *AdminServer) registerPprofHandlers() { @@ -170,6 +155,7 @@ func (s *AdminServer) registerPprofHandlers() { // Close stops the HTTP server func (s *AdminServer) Close() error { - _ = s.tlsCertWatcherCloser.Close() - return s.server.Shutdown(context.Background()) + err := s.server.Shutdown(context.Background()) + s.stopped.Wait() + return err } diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/doc.go b/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/doc.go index a65f3a8a0cd..e7b6b877678 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/doc.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/doc.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package flags defines command line flags that are shared by several jaeger components. // They are defined in this shared location so that if several components are wired into diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/flags.go b/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/flags.go index 272ad8dd4fe..7eaac2f1794 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/flags.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/flags.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package flags @@ -29,6 +18,7 @@ import ( const ( spanStorageType = "span-storage.type" // deprecated logLevel = "log-level" + logEncoding = "log-encoding" // json or console configFile = "config-file" ) @@ -98,23 +88,26 @@ type SharedFlags struct { } type logging struct { - Level string + Level string + Encoding string } // AddFlags adds flags for SharedFlags func AddFlags(flagSet *flag.FlagSet) { flagSet.String(spanStorageType, "", "(deprecated) please use SPAN_STORAGE_TYPE environment variable. Run this binary with the 'env' command for help.") - AddLoggingFlag(flagSet) + AddLoggingFlags(flagSet) } // AddLoggingFlag adds logging flag for SharedFlags -func AddLoggingFlag(flagSet *flag.FlagSet) { +func AddLoggingFlags(flagSet *flag.FlagSet) { flagSet.String(logLevel, "info", "Minimal allowed log Level. For more levels see https://github.com/uber-go/zap") + flagSet.String(logEncoding, "json", "Log encoding. Supported values are 'json' and 'console'.") } // InitFromViper initializes SharedFlags with properties from viper func (flags *SharedFlags) InitFromViper(v *viper.Viper) *SharedFlags { flags.Logging.Level = v.GetString(logLevel) + flags.Logging.Encoding = v.GetString(logEncoding) return flags } @@ -126,5 +119,9 @@ func (flags *SharedFlags) NewLogger(conf zap.Config, options ...zap.Option) (*za return nil, err } conf.Level = zap.NewAtomicLevelAt(level) + conf.Encoding = flags.Logging.Encoding + if flags.Logging.Encoding == "console" { + conf.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder + } return conf.Build(options...) } diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/service.go b/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/service.go index 622a960a4e2..a076cebc19b 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/service.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/service.go @@ -1,16 +1,5 @@ // Copyright (c) 2019 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package flags @@ -22,9 +11,10 @@ import ( "os/signal" "syscall" - grpcZap "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap" "github.com/spf13/viper" "go.uber.org/zap" + "go.uber.org/zap/zapgrpc" + "google.golang.org/grpc/grpclog" "github.com/jaegertracing/jaeger/internal/metrics/metricsbuilder" "github.com/jaegertracing/jaeger/pkg/healthcheck" @@ -52,6 +42,30 @@ type Service struct { signalsChannel chan os.Signal } +func PrintV1EOL() { + println(` +******************************************************************************* + +🛑 WARNING: End-of-life Notice for Jaeger v1 + +You are currently running a v1 version of Jaeger, which is deprecated and will +reach end-of-life on December 31st, 2025. This means there will be no further +development, bug fixes, or security patches for v1 after this date. + +We strongly recommend migrating to Jaeger v2 for continued support and access +to new features. + +For detailed migration instructions, please refer to the official Jaeger +documentation: https://www.jaegertracing.io/docs/latest/migration/ + +Tracking issue: https://github.com/jaegertracing/jaeger/issues/6321 + +🛑 WARNING: End-of-life Notice for Jaeger v1 + +******************************************************************************* +`) +} + // NewService creates a new Service. func NewService(adminPort int) *Service { signalsChannel := make(chan os.Signal, 1) @@ -67,7 +81,7 @@ func NewService(adminPort int) *Service { func (s *Service) AddFlags(flagSet *flag.FlagSet) { AddConfigFileFlag(flagSet) if s.NoStorage { - AddLoggingFlag(flagSet) + AddLoggingFlags(flagSet) } else { AddFlags(flagSet) } @@ -84,16 +98,15 @@ func (s *Service) Start(v *viper.Viper) error { sFlags := new(SharedFlags).InitFromViper(v) newProdConfig := zap.NewProductionConfig() newProdConfig.Sampling = nil - if logger, err := sFlags.NewLogger(newProdConfig); err == nil { - s.Logger = logger - grpcZap.ReplaceGrpcLoggerV2(logger.WithOptions( - // grpclog is not consistent with the depth of call tree before it's dispatched to zap, - // but Skip(2) still shows grpclog as caller, while Skip(3) shows actual grpc packages. - zap.AddCallerSkip(3), - )) - } else { + logger, err := sFlags.NewLogger(newProdConfig) + if err != nil { return fmt.Errorf("cannot create logger: %w", err) } + s.Logger = logger + grpclog.SetLoggerV2(zapgrpc.NewLogger( + logger.WithOptions( + zap.AddCallerSkip(5), // ensure the actual caller:lineNo is shown + ))) metricsBuilder := new(metricsbuilder.Builder).InitFromViper(v) metricsFactory, err := metricsBuilder.CreateMetricsFactory("") diff --git a/vendor/github.com/jaegertracing/jaeger/internal/metrics/expvar/cache.go b/vendor/github.com/jaegertracing/jaeger/internal/metrics/expvar/cache.go deleted file mode 100644 index f25ddba563b..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/internal/metrics/expvar/cache.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright (c) 2022 The Jaeger Authors. -// Copyright (c) 2018 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expvar - -import ( - "sync" - - "github.com/jaegertracing/jaeger/pkg/metrics" -) - -type cache struct { - lock sync.Mutex - counters map[string]metrics.Counter - gauges map[string]metrics.Gauge - timers map[string]metrics.Timer - histograms map[string]metrics.Histogram -} - -func newCache() *cache { - return &cache{ - counters: make(map[string]metrics.Counter), - gauges: make(map[string]metrics.Gauge), - timers: make(map[string]metrics.Timer), - histograms: make(map[string]metrics.Histogram), - } -} - -func (r *cache) getOrSetCounter(name string, create func() metrics.Counter) metrics.Counter { - r.lock.Lock() - defer r.lock.Unlock() - c, ok := r.counters[name] - if !ok { - c = create() - r.counters[name] = c - } - return c -} - -func (r *cache) getOrSetGauge(name string, create func() metrics.Gauge) metrics.Gauge { - r.lock.Lock() - defer r.lock.Unlock() - g, ok := r.gauges[name] - if !ok { - g = create() - r.gauges[name] = g - } - return g -} - -func (r *cache) getOrSetTimer(name string, create func() metrics.Timer) metrics.Timer { - r.lock.Lock() - defer r.lock.Unlock() - t, ok := r.timers[name] - if !ok { - t = create() - r.timers[name] = t - } - return t -} - -func (r *cache) getOrSetHistogram(name string, create func() metrics.Histogram) metrics.Histogram { - r.lock.Lock() - defer r.lock.Unlock() - t, ok := r.histograms[name] - if !ok { - t = create() - r.histograms[name] = t - } - return t -} diff --git a/vendor/github.com/jaegertracing/jaeger/internal/metrics/expvar/factory.go b/vendor/github.com/jaegertracing/jaeger/internal/metrics/expvar/factory.go deleted file mode 100644 index 6ce562e5ef1..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/internal/metrics/expvar/factory.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright (c) 2022 The Jaeger Authors. -// Copyright (c) 2018 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expvar - -import ( - "sort" - - kexpvar "github.com/go-kit/kit/metrics/expvar" - - "github.com/jaegertracing/jaeger/pkg/metrics" -) - -// NewFactory creates a new metrics factory using go-kit expvar package. -// buckets is the number of buckets to be used in histograms. -// Custom buckets passed via options are not supported. -func NewFactory(buckets int) metrics.Factory { - return &factory{ - buckets: buckets, - scope: "", - scopeSep: ".", - tagsSep: ".", - tagKVSep: "_", - cache: newCache(), - } -} - -type factory struct { - buckets int - - scope string - tags map[string]string - scopeSep string - tagsSep string - tagKVSep string - cache *cache -} - -var _ metrics.Factory = (*factory)(nil) - -func (f *factory) subScope(name string) string { - if f.scope == "" { - return name - } - if name == "" { - return f.scope - } - return f.scope + f.scopeSep + name -} - -func (f *factory) mergeTags(tags map[string]string) map[string]string { - ret := make(map[string]string, len(f.tags)+len(tags)) - for k, v := range f.tags { - ret[k] = v - } - for k, v := range tags { - ret[k] = v - } - return ret -} - -func (f *factory) getKey(name string, tags map[string]string) string { - fullName := f.subScope(name) - fullTags := f.mergeTags(tags) - return makeKey(fullName, fullTags, f.tagsSep, f.tagKVSep) -} - -// getKey converts name+tags into a single string of the form -// "name|tag1=value1|...|tagN=valueN", where tag names are -// sorted alphabetically. -func makeKey(name string, tags map[string]string, tagsSep string, tagKVSep string) string { - keys := make([]string, 0, len(tags)) - for k := range tags { - keys = append(keys, k) - } - sort.Strings(keys) - key := name - for _, k := range keys { - key = key + tagsSep + k + tagKVSep + tags[k] - } - return key -} - -func (f *factory) Counter(options metrics.Options) metrics.Counter { - key := f.getKey(options.Name, options.Tags) - return f.cache.getOrSetCounter(key, func() metrics.Counter { - return NewCounter(kexpvar.NewCounter(key)) - }) -} - -func (f *factory) Gauge(options metrics.Options) metrics.Gauge { - key := f.getKey(options.Name, options.Tags) - return f.cache.getOrSetGauge(key, func() metrics.Gauge { - return NewGauge(kexpvar.NewGauge(key)) - }) -} - -func (f *factory) Timer(options metrics.TimerOptions) metrics.Timer { - key := f.getKey(options.Name, options.Tags) - return f.cache.getOrSetTimer(key, func() metrics.Timer { - return NewTimer(kexpvar.NewHistogram(key, f.buckets)) - }) -} - -func (f *factory) Histogram(options metrics.HistogramOptions) metrics.Histogram { - key := f.getKey(options.Name, options.Tags) - return f.cache.getOrSetHistogram(key, func() metrics.Histogram { - return NewHistogram(kexpvar.NewHistogram(key, f.buckets)) - }) -} - -func (f *factory) Namespace(options metrics.NSOptions) metrics.Factory { - return &factory{ - buckets: f.buckets, - scope: f.subScope(options.Name), - tags: f.mergeTags(options.Tags), - scopeSep: f.scopeSep, - tagsSep: f.tagsSep, - tagKVSep: f.tagKVSep, - cache: f.cache, - } -} diff --git a/vendor/github.com/jaegertracing/jaeger/internal/metrics/expvar/metrics.go b/vendor/github.com/jaegertracing/jaeger/internal/metrics/expvar/metrics.go deleted file mode 100644 index f3cbf740b1a..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/internal/metrics/expvar/metrics.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright (c) 2022 The Jaeger Authors. -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expvar - -import ( - "time" - - kit "github.com/go-kit/kit/metrics" -) - -// Counter is an adapter from go-kit Counter to jaeger-lib Counter -type Counter struct { - counter kit.Counter -} - -// NewCounter creates a new Counter -func NewCounter(counter kit.Counter) *Counter { - return &Counter{counter: counter} -} - -// Inc adds the given value to the counter. -func (c *Counter) Inc(delta int64) { - c.counter.Add(float64(delta)) -} - -// Gauge is an adapter from go-kit Gauge to jaeger-lib Gauge -type Gauge struct { - gauge kit.Gauge -} - -// NewGauge creates a new Gauge -func NewGauge(gauge kit.Gauge) *Gauge { - return &Gauge{gauge: gauge} -} - -// Update the gauge to the value passed in. -func (g *Gauge) Update(value int64) { - g.gauge.Set(float64(value)) -} - -// Timer is an adapter from go-kit Histogram to jaeger-lib Timer -type Timer struct { - hist kit.Histogram -} - -// NewTimer creates a new Timer -func NewTimer(hist kit.Histogram) *Timer { - return &Timer{hist: hist} -} - -// Record saves the time passed in. -func (t *Timer) Record(delta time.Duration) { - t.hist.Observe(delta.Seconds()) -} - -// Histogram is an adapter from go-kit Histogram to jaeger-lib Histogram -type Histogram struct { - hist kit.Histogram -} - -// NewHistogram creates a new Histogram -func NewHistogram(hist kit.Histogram) *Histogram { - return &Histogram{hist: hist} -} - -// Record saves the value passed in. -func (t *Histogram) Record(value float64) { - t.hist.Observe(value) -} diff --git a/vendor/github.com/jaegertracing/jaeger/internal/metrics/metricsbuilder/builder.go b/vendor/github.com/jaegertracing/jaeger/internal/metrics/metricsbuilder/builder.go index 502b7a4aa3d..16c82f117b0 100644 --- a/vendor/github.com/jaegertracing/jaeger/internal/metrics/metricsbuilder/builder.go +++ b/vendor/github.com/jaegertracing/jaeger/internal/metrics/metricsbuilder/builder.go @@ -1,32 +1,18 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metricsbuilder import ( "errors" - "expvar" "flag" - "log" "net/http" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/spf13/viper" - jexpvar "github.com/jaegertracing/jaeger/internal/metrics/expvar" jprom "github.com/jaegertracing/jaeger/internal/metrics/prometheus" "github.com/jaegertracing/jaeger/pkg/metrics" ) @@ -47,14 +33,12 @@ type Builder struct { handler http.Handler } -const expvarDepr = "(deprecated, will be removed after 2024-01-01 or in release v1.53.0, whichever is later) " - // AddFlags adds flags for Builder. func AddFlags(flags *flag.FlagSet) { flags.String( metricsBackend, defaultMetricsBackend, - "Defines which metrics backend to use for metrics reporting: prometheus, none, or expvar "+expvarDepr) + "Defines which metrics backend to use for metrics reporting: prometheus or none") flags.String( metricsHTTPRoute, defaultMetricsRoute, @@ -77,12 +61,6 @@ func (b *Builder) CreateMetricsFactory(namespace string) (metrics.Factory, error b.handler = promhttp.HandlerFor(prometheus.DefaultGatherer, promhttp.HandlerOpts{DisableCompression: true}) return metricsFactory, nil } - if b.Backend == "expvar" { - metricsFactory := jexpvar.NewFactory(10).Namespace(metrics.NSOptions{Name: namespace, Tags: nil}) - b.handler = expvar.Handler() - log.Printf("using expvar as metrics backend " + expvarDepr) - return metricsFactory, nil - } if b.Backend == "none" || b.Backend == "" { return metrics.NullFactory, nil } diff --git a/vendor/github.com/jaegertracing/jaeger/internal/metrics/otelmetrics/counter.go b/vendor/github.com/jaegertracing/jaeger/internal/metrics/otelmetrics/counter.go new file mode 100644 index 00000000000..437442306f3 --- /dev/null +++ b/vendor/github.com/jaegertracing/jaeger/internal/metrics/otelmetrics/counter.go @@ -0,0 +1,20 @@ +// Copyright (c) 2024 The Jaeger Authors. +// SPDX-License-Identifier: Apache-2.0 + +package otelmetrics + +import ( + "context" + + "go.opentelemetry.io/otel/metric" +) + +type otelCounter struct { + counter metric.Int64Counter + fixedCtx context.Context + option metric.AddOption +} + +func (c *otelCounter) Inc(value int64) { + c.counter.Add(c.fixedCtx, value, c.option) +} diff --git a/vendor/github.com/jaegertracing/jaeger/internal/metrics/otelmetrics/factory.go b/vendor/github.com/jaegertracing/jaeger/internal/metrics/otelmetrics/factory.go new file mode 100644 index 00000000000..17adec85a04 --- /dev/null +++ b/vendor/github.com/jaegertracing/jaeger/internal/metrics/otelmetrics/factory.go @@ -0,0 +1,140 @@ +// Copyright (c) 2024 The Jaeger Authors. +// SPDX-License-Identifier: Apache-2.0 + +package otelmetrics + +import ( + "context" + "log" + "strings" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + + "github.com/jaegertracing/jaeger/pkg/metrics" +) + +type otelFactory struct { + meter metric.Meter + scope string + separator string + normalizer *strings.Replacer + tags map[string]string +} + +func NewFactory(meterProvider metric.MeterProvider) metrics.Factory { + return &otelFactory{ + meter: meterProvider.Meter("jaeger-v2"), + separator: ".", + normalizer: strings.NewReplacer(" ", "_", ".", "_", "-", "_"), + tags: make(map[string]string), + } +} + +func (f *otelFactory) Counter(opts metrics.Options) metrics.Counter { + name := CounterNamingConvention(f.subScope(opts.Name)) + counter, err := f.meter.Int64Counter(name) + if err != nil { + log.Printf("Error creating OTEL counter: %v", err) + return metrics.NullCounter + } + return &otelCounter{ + counter: counter, + fixedCtx: context.Background(), + option: attributeSetOption(f.mergeTags(opts.Tags)), + } +} + +func (f *otelFactory) Gauge(opts metrics.Options) metrics.Gauge { + name := f.subScope(opts.Name) + gauge, err := f.meter.Int64Gauge(name) + if err != nil { + log.Printf("Error creating OTEL gauge: %v", err) + return metrics.NullGauge + } + + return &otelGauge{ + gauge: gauge, + fixedCtx: context.Background(), + option: attributeSetOption(f.mergeTags(opts.Tags)), + } +} + +func (f *otelFactory) Histogram(opts metrics.HistogramOptions) metrics.Histogram { + name := f.subScope(opts.Name) + histogram, err := f.meter.Float64Histogram(name) + if err != nil { + log.Printf("Error creating OTEL histogram: %v", err) + return metrics.NullHistogram + } + + return &otelHistogram{ + histogram: histogram, + fixedCtx: context.Background(), + option: attributeSetOption(f.mergeTags(opts.Tags)), + } +} + +func (f *otelFactory) Timer(opts metrics.TimerOptions) metrics.Timer { + name := f.subScope(opts.Name) + timer, err := f.meter.Float64Histogram(name, metric.WithUnit("s")) + if err != nil { + log.Printf("Error creating OTEL timer: %v", err) + return metrics.NullTimer + } + return &otelTimer{ + histogram: timer, + fixedCtx: context.Background(), + option: attributeSetOption(f.mergeTags(opts.Tags)), + } +} + +func (f *otelFactory) Namespace(opts metrics.NSOptions) metrics.Factory { + return &otelFactory{ + meter: f.meter, + scope: f.subScope(opts.Name), + separator: f.separator, + normalizer: f.normalizer, + tags: f.mergeTags(opts.Tags), + } +} + +func (f *otelFactory) subScope(name string) string { + if f.scope == "" { + return f.normalize(name) + } + if name == "" { + return f.normalize(f.scope) + } + return f.normalize(f.scope + f.separator + name) +} + +func (f *otelFactory) normalize(v string) string { + return f.normalizer.Replace(v) +} + +func (f *otelFactory) mergeTags(tags map[string]string) map[string]string { + merged := make(map[string]string) + for k, v := range f.tags { + merged[k] = v + } + for k, v := range tags { + merged[k] = v + } + return merged +} + +func attributeSetOption(tags map[string]string) metric.MeasurementOption { + attributes := make([]attribute.KeyValue, 0, len(tags)) + for k, v := range tags { + attributes = append(attributes, attribute.String(k, v)) + } + return metric.WithAttributes(attributes...) +} + +func CounterNamingConvention(name string) string { + if !strings.HasSuffix(name, "_total") { + name += "_total" + } + return name +} diff --git a/vendor/github.com/jaegertracing/jaeger/internal/metrics/otelmetrics/gauge.go b/vendor/github.com/jaegertracing/jaeger/internal/metrics/otelmetrics/gauge.go new file mode 100644 index 00000000000..dc5c4f38428 --- /dev/null +++ b/vendor/github.com/jaegertracing/jaeger/internal/metrics/otelmetrics/gauge.go @@ -0,0 +1,20 @@ +// Copyright (c) 2024 The Jaeger Authors. +// SPDX-License-Identifier: Apache-2.0 + +package otelmetrics + +import ( + "context" + + "go.opentelemetry.io/otel/metric" +) + +type otelGauge struct { + gauge metric.Int64Gauge + fixedCtx context.Context + option metric.RecordOption +} + +func (g *otelGauge) Update(value int64) { + g.gauge.Record(g.fixedCtx, value, g.option) +} diff --git a/vendor/github.com/jaegertracing/jaeger/internal/metrics/otelmetrics/histogram.go b/vendor/github.com/jaegertracing/jaeger/internal/metrics/otelmetrics/histogram.go new file mode 100644 index 00000000000..b408b47dd7c --- /dev/null +++ b/vendor/github.com/jaegertracing/jaeger/internal/metrics/otelmetrics/histogram.go @@ -0,0 +1,20 @@ +// Copyright (c) 2024 The Jaeger Authors. +// SPDX-License-Identifier: Apache-2.0 + +package otelmetrics + +import ( + "context" + + "go.opentelemetry.io/otel/metric" +) + +type otelHistogram struct { + histogram metric.Float64Histogram + fixedCtx context.Context + option metric.RecordOption +} + +func (h *otelHistogram) Record(value float64) { + h.histogram.Record(h.fixedCtx, value, h.option) +} diff --git a/vendor/github.com/jaegertracing/jaeger/internal/metrics/otelmetrics/timer.go b/vendor/github.com/jaegertracing/jaeger/internal/metrics/otelmetrics/timer.go new file mode 100644 index 00000000000..0df689243b3 --- /dev/null +++ b/vendor/github.com/jaegertracing/jaeger/internal/metrics/otelmetrics/timer.go @@ -0,0 +1,21 @@ +// Copyright (c) 2024 The Jaeger Authors. +// SPDX-License-Identifier: Apache-2.0 + +package otelmetrics + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/metric" +) + +type otelTimer struct { + histogram metric.Float64Histogram + fixedCtx context.Context + option metric.RecordOption +} + +func (t *otelTimer) Record(d time.Duration) { + t.histogram.Record(t.fixedCtx, d.Seconds(), t.option) +} diff --git a/vendor/github.com/jaegertracing/jaeger/internal/metrics/prometheus/cache.go b/vendor/github.com/jaegertracing/jaeger/internal/metrics/prometheus/cache.go index 40791ebb708..733c9e3f33e 100644 --- a/vendor/github.com/jaegertracing/jaeger/internal/metrics/prometheus/cache.go +++ b/vendor/github.com/jaegertracing/jaeger/internal/metrics/prometheus/cache.go @@ -1,16 +1,5 @@ // Copyright (c) 2017 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package prometheus @@ -81,6 +70,6 @@ func (c *vectorCache) getOrMakeHistogramVec(opts prometheus.HistogramOpts, label return hv } -func (c *vectorCache) getCacheKey(name string, labels []string) string { +func (*vectorCache) getCacheKey(name string, labels []string) string { return strings.Join(append([]string{name}, labels...), "||") } diff --git a/vendor/github.com/jaegertracing/jaeger/internal/metrics/prometheus/factory.go b/vendor/github.com/jaegertracing/jaeger/internal/metrics/prometheus/factory.go index 0f8b3a959d7..6bb4c8faceb 100644 --- a/vendor/github.com/jaegertracing/jaeger/internal/metrics/prometheus/factory.go +++ b/vendor/github.com/jaegertracing/jaeger/internal/metrics/prometheus/factory.go @@ -1,16 +1,5 @@ // Copyright (c) 2017 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package prometheus @@ -280,7 +269,7 @@ func (f *Factory) mergeTags(tags map[string]string) map[string]string { return ret } -func (f *Factory) tagNames(tags map[string]string) []string { +func (*Factory) tagNames(tags map[string]string) []string { ret := make([]string, 0, len(tags)) for k := range tags { ret = append(ret, k) @@ -289,7 +278,7 @@ func (f *Factory) tagNames(tags map[string]string) []string { return ret } -func (f *Factory) tagsAsLabelValues(labels []string, tags map[string]string) []string { +func (*Factory) tagsAsLabelValues(labels []string, tags map[string]string) []string { ret := make([]string, 0, len(tags)) for _, l := range labels { ret = append(ret, tags[l]) diff --git a/vendor/github.com/jaegertracing/jaeger/model/converter/json/doc.go b/vendor/github.com/jaegertracing/jaeger/model/converter/json/doc.go deleted file mode 100644 index 547372cb96a..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/model/converter/json/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (c) 2019 The Jaeger Authors. -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package json allows converting model.Trace to external JSON data model. -package json diff --git a/vendor/github.com/jaegertracing/jaeger/model/converter/json/from_domain.go b/vendor/github.com/jaegertracing/jaeger/model/converter/json/from_domain.go deleted file mode 100644 index 26c2e47d6f0..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/model/converter/json/from_domain.go +++ /dev/null @@ -1,201 +0,0 @@ -// Copyright (c) 2019 The Jaeger Authors. -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package json - -import ( - "fmt" - "strings" - - "github.com/jaegertracing/jaeger/model" - "github.com/jaegertracing/jaeger/model/json" -) - -const ( - jsMaxSafeInteger = int64(1)<<53 - 1 - jsMinSafeInteger = -jsMaxSafeInteger -) - -// FromDomain converts model.Trace into json.Trace format. -// It assumes that the domain model is valid, namely that all enums -// have valid values, so that it does not need to check for errors. -func FromDomain(trace *model.Trace) *json.Trace { - fd := fromDomain{} - fd.convertKeyValuesFunc = fd.convertKeyValues - return fd.fromDomain(trace) -} - -// FromDomainEmbedProcess converts model.Span into json.Span format. -// This format includes a ParentSpanID and an embedded Process. -func FromDomainEmbedProcess(span *model.Span) *json.Span { - fd := fromDomain{} - fd.convertKeyValuesFunc = fd.convertKeyValuesString - return fd.convertSpanEmbedProcess(span) -} - -type fromDomain struct { - convertKeyValuesFunc func(keyValues model.KeyValues) []json.KeyValue -} - -func (fd fromDomain) fromDomain(trace *model.Trace) *json.Trace { - jSpans := make([]json.Span, len(trace.Spans)) - processes := &processHashtable{} - var traceID json.TraceID - for i, span := range trace.Spans { - if i == 0 { - traceID = json.TraceID(span.TraceID.String()) - } - processID := json.ProcessID(processes.getKey(span.Process)) - jSpans[i] = fd.convertSpan(span, processID) - } - jTrace := &json.Trace{ - TraceID: traceID, - Spans: jSpans, - Processes: fd.convertProcesses(processes.getMapping()), - Warnings: trace.Warnings, - } - return jTrace -} - -func (fd fromDomain) convertSpanInternal(span *model.Span) json.Span { - return json.Span{ - TraceID: json.TraceID(span.TraceID.String()), - SpanID: json.SpanID(span.SpanID.String()), - Flags: uint32(span.Flags), - OperationName: span.OperationName, - StartTime: model.TimeAsEpochMicroseconds(span.StartTime), - Duration: model.DurationAsMicroseconds(span.Duration), - Tags: fd.convertKeyValuesFunc(span.Tags), - Logs: fd.convertLogs(span.Logs), - } -} - -func (fd fromDomain) convertSpan(span *model.Span, processID json.ProcessID) json.Span { - s := fd.convertSpanInternal(span) - s.ProcessID = processID - s.Warnings = span.Warnings - s.References = fd.convertReferences(span) - return s -} - -func (fd fromDomain) convertSpanEmbedProcess(span *model.Span) *json.Span { - s := fd.convertSpanInternal(span) - process := fd.convertProcess(span.Process) - s.Process = &process - s.References = fd.convertReferences(span) - return &s -} - -func (fd fromDomain) convertReferences(span *model.Span) []json.Reference { - out := make([]json.Reference, 0, len(span.References)) - for _, ref := range span.References { - out = append(out, json.Reference{ - RefType: fd.convertRefType(ref.RefType), - TraceID: json.TraceID(ref.TraceID.String()), - SpanID: json.SpanID(ref.SpanID.String()), - }) - } - return out -} - -func (fd fromDomain) convertRefType(refType model.SpanRefType) json.ReferenceType { - if refType == model.FollowsFrom { - return json.FollowsFrom - } - return json.ChildOf -} - -func (fd fromDomain) convertKeyValues(keyValues model.KeyValues) []json.KeyValue { - out := make([]json.KeyValue, len(keyValues)) - for i, kv := range keyValues { - var value interface{} - switch kv.VType { - case model.StringType: - value = kv.VStr - case model.BoolType: - value = kv.Bool() - case model.Int64Type: - value = kv.Int64() - if kv.Int64() > jsMaxSafeInteger || kv.Int64() < jsMinSafeInteger { - value = fmt.Sprintf("%d", value) - } - case model.Float64Type: - value = kv.Float64() - case model.BinaryType: - value = kv.Binary() - } - - out[i] = json.KeyValue{ - Key: kv.Key, - Type: json.ValueType(strings.ToLower(kv.VType.String())), - Value: value, - } - } - return out -} - -func (fd fromDomain) convertKeyValuesString(keyValues model.KeyValues) []json.KeyValue { - out := make([]json.KeyValue, len(keyValues)) - for i, kv := range keyValues { - out[i] = json.KeyValue{ - Key: kv.Key, - Type: json.ValueType(strings.ToLower(kv.VType.String())), - Value: kv.AsString(), - } - } - return out -} - -func (fd fromDomain) convertLogs(logs []model.Log) []json.Log { - out := make([]json.Log, len(logs)) - for i, log := range logs { - out[i] = json.Log{ - Timestamp: model.TimeAsEpochMicroseconds(log.Timestamp), - Fields: fd.convertKeyValuesFunc(log.Fields), - } - } - return out -} - -func (fd fromDomain) convertProcesses(processes map[string]*model.Process) map[json.ProcessID]json.Process { - out := make(map[json.ProcessID]json.Process) - for key, process := range processes { - out[json.ProcessID(key)] = fd.convertProcess(process) - } - return out -} - -func (fd fromDomain) convertProcess(process *model.Process) json.Process { - return json.Process{ - ServiceName: process.ServiceName, - Tags: fd.convertKeyValuesFunc(process.Tags), - } -} - -// DependenciesFromDomain converts []model.DependencyLink into []json.DependencyLink format. -func DependenciesFromDomain(dependencyLinks []model.DependencyLink) []json.DependencyLink { - retMe := make([]json.DependencyLink, 0, len(dependencyLinks)) - for _, dependencyLink := range dependencyLinks { - retMe = append( - retMe, - json.DependencyLink{ - Parent: dependencyLink.Parent, - Child: dependencyLink.Child, - CallCount: dependencyLink.CallCount, - }, - ) - } - return retMe -} diff --git a/vendor/github.com/jaegertracing/jaeger/model/converter/json/process_hashtable.go b/vendor/github.com/jaegertracing/jaeger/model/converter/json/process_hashtable.go deleted file mode 100644 index 5a95af66f9e..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/model/converter/json/process_hashtable.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright (c) 2019 The Jaeger Authors. -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package json - -import ( - "strconv" - - "github.com/jaegertracing/jaeger/model" -) - -type processHashtable struct { - count int - processes map[uint64][]processKey - extHash func(*model.Process) uint64 -} - -type processKey struct { - process *model.Process - key string -} - -// getKey assigns a new unique string key to the process, or returns -// a previously assigned value if the process has already been seen. -func (ph *processHashtable) getKey(process *model.Process) string { - if ph.processes == nil { - ph.processes = make(map[uint64][]processKey) - } - hash := ph.hash(process) - if keys, ok := ph.processes[hash]; ok { - for _, k := range keys { - if k.process.Equal(process) { - return k.key - } - } - key := ph.nextKey() - keys = append(keys, processKey{process: process, key: key}) - ph.processes[hash] = keys - return key - } - key := ph.nextKey() - ph.processes[hash] = []processKey{{process: process, key: key}} - return key -} - -// getMapping returns the accumulated mapping of string keys to processes. -func (ph *processHashtable) getMapping() map[string]*model.Process { - out := make(map[string]*model.Process) - for _, keys := range ph.processes { - for _, key := range keys { - out[key.key] = key.process - } - } - return out -} - -func (ph *processHashtable) nextKey() string { - ph.count++ - key := "p" + strconv.Itoa(ph.count) - return key -} - -func (ph processHashtable) hash(process *model.Process) uint64 { - if ph.extHash != nil { - // for testing collisions - return ph.extHash(process) - } - hc, _ := model.HashCode(process) - return hc -} diff --git a/vendor/github.com/jaegertracing/jaeger/model/converter/json/sampling.go b/vendor/github.com/jaegertracing/jaeger/model/converter/json/sampling.go deleted file mode 100644 index b124114b6a5..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/model/converter/json/sampling.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (c) 2023 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package json - -import ( - "bytes" - "strings" - - "github.com/gogo/protobuf/jsonpb" - - "github.com/jaegertracing/jaeger/proto-gen/api_v2" -) - -// SamplingStrategyResponseToJSON defines the official way to generate -// a JSON response from /sampling endpoints. -func SamplingStrategyResponseToJSON(protoObj *api_v2.SamplingStrategyResponse) (string, error) { - // For backwards compatibility with Thrift-to-JSON encoding, - // we want the output to include "strategyType":"PROBABILISTIC" when appropriate. - // However, due to design oversight, the enum value for PROBABILISTIC is 0, so - // we need to set EmitDefaults=true. This in turns causes null fields to be emitted too, - // so we take care of them below. - jsonpbMarshaler := jsonpb.Marshaler{ - EmitDefaults: true, - } - - str, err := jsonpbMarshaler.MarshalToString(protoObj) - if err != nil { - return "", err - } - - // Because we set EmitDefaults, jsonpb will also render null entries, so we remove them here. - str = strings.ReplaceAll(str, `"probabilisticSampling":null,`, "") - str = strings.ReplaceAll(str, `,"rateLimitingSampling":null`, "") - str = strings.ReplaceAll(str, `,"operationSampling":null`, "") - - return str, nil -} - -// SamplingStrategyResponseFromJSON is the official way to parse strategy in JSON. -func SamplingStrategyResponseFromJSON(json []byte) (*api_v2.SamplingStrategyResponse, error) { - var obj api_v2.SamplingStrategyResponse - if err := jsonpb.Unmarshal(bytes.NewReader(json), &obj); err != nil { - return nil, err - } - return &obj, nil -} diff --git a/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/jaeger/doc.go b/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/jaeger/doc.go index 26050003d63..4ed14a3000e 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/jaeger/doc.go +++ b/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/jaeger/doc.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package jaeger allows converting model.Trace to/from jaeger.thrift model. package jaeger diff --git a/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/jaeger/from_domain.go b/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/jaeger/from_domain.go index 8ae83e02c57..b115c361504 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/jaeger/from_domain.go +++ b/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/jaeger/from_domain.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package jaeger @@ -44,7 +33,7 @@ func FromDomainSpan(span *model.Span) *jaeger.Span { type domainToJaegerTransformer struct{} -func (d domainToJaegerTransformer) keyValueToTag(kv *model.KeyValue) *jaeger.Tag { +func (domainToJaegerTransformer) keyValueToTag(kv *model.KeyValue) *jaeger.Tag { if kv.VType == model.StringType { stringValue := kv.VStr return &jaeger.Tag{ @@ -112,6 +101,7 @@ func (d domainToJaegerTransformer) convertLogs(logs []model.Log) []*jaeger.Log { jaegerLogs := make([]*jaeger.Log, len(logs)) for idx, log := range logs { jaegerLogs[idx] = &jaeger.Log{ + //nolint: gosec // G115 Timestamp: int64(model.TimeAsEpochMicroseconds(log.Timestamp)), Fields: d.convertKeyValuesToTags(log.Fields), } @@ -119,9 +109,10 @@ func (d domainToJaegerTransformer) convertLogs(logs []model.Log) []*jaeger.Log { return jaegerLogs } -func (d domainToJaegerTransformer) convertSpanRefs(refs []model.SpanRef) []*jaeger.SpanRef { +func (domainToJaegerTransformer) convertSpanRefs(refs []model.SpanRef) []*jaeger.SpanRef { jaegerSpanRefs := make([]*jaeger.SpanRef, len(refs)) for idx, ref := range refs { + //nolint: gosec // G115 jaegerSpanRefs[idx] = &jaeger.SpanRef{ RefType: jaeger.SpanRefType(ref.RefType), TraceIdLow: int64(ref.TraceID.Low), @@ -137,6 +128,7 @@ func (d domainToJaegerTransformer) transformSpan(span *model.Span) *jaeger.Span logs := d.convertLogs(span.Logs) refs := d.convertSpanRefs(span.References) + //nolint: gosec // G115 jaegerSpan := &jaeger.Span{ TraceIdLow: int64(span.TraceID.Low), TraceIdHigh: int64(span.TraceID.High), diff --git a/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/jaeger/sampling_from_domain.go b/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/jaeger/sampling_from_domain.go index c776072cd19..d456b018921 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/jaeger/sampling_from_domain.go +++ b/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/jaeger/sampling_from_domain.go @@ -1,16 +1,5 @@ // Copyright (c) 2018 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package jaeger @@ -55,7 +44,10 @@ func convertRateLimitingFromDomain(s *api_v2.RateLimitingSamplingStrategy) (*sam if s.MaxTracesPerSecond > math.MaxInt16 { return nil, errors.New("maxTracesPerSecond is higher than int16") } - return &sampling.RateLimitingSamplingStrategy{MaxTracesPerSecond: int16(s.GetMaxTracesPerSecond())}, nil + return &sampling.RateLimitingSamplingStrategy{ + //nolint: gosec // G115 + MaxTracesPerSecond: int16(s.GetMaxTracesPerSecond()), + }, nil } func convertPerOperationFromDomain(s *api_v2.PerOperationSamplingStrategies) *sampling.PerOperationSamplingStrategies { diff --git a/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/jaeger/sampling_to_domain.go b/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/jaeger/sampling_to_domain.go index 876e50a4a83..23cea6ccbf2 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/jaeger/sampling_to_domain.go +++ b/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/jaeger/sampling_to_domain.go @@ -1,16 +1,5 @@ // Copyright (c) 2018 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package jaeger diff --git a/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/jaeger/to_domain.go b/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/jaeger/to_domain.go index 2738776123f..b3d082ea5a4 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/jaeger/to_domain.go +++ b/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/jaeger/to_domain.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package jaeger @@ -58,6 +47,7 @@ func (td toDomain) ToDomainSpan(jSpan *jaeger.Span, jProcess *jaeger.Process) *m } func (td toDomain) transformSpan(jSpan *jaeger.Span, mProcess *model.Process) *model.Span { + //nolint: gosec // G115 traceID := model.NewTraceID(uint64(jSpan.TraceIdHigh), uint64(jSpan.TraceIdLow)) // allocate extra space for future append operation tags := td.getTags(jSpan.Tags, 1) @@ -66,24 +56,29 @@ func (td toDomain) transformSpan(jSpan *jaeger.Span, mProcess *model.Process) *m // might still have these IDs without representing them in the References, so we // convert it back into child-of reference. if jSpan.ParentSpanId != 0 { + //nolint: gosec // G115 parentSpanID := model.NewSpanID(uint64(jSpan.ParentSpanId)) refs = model.MaybeAddParentSpanID(traceID, parentSpanID, refs) } return &model.Span{ - TraceID: traceID, + TraceID: traceID, + //nolint: gosec // G115 SpanID: model.NewSpanID(uint64(jSpan.SpanId)), OperationName: jSpan.OperationName, References: refs, - Flags: model.Flags(jSpan.Flags), - StartTime: model.EpochMicrosecondsAsTime(uint64(jSpan.StartTime)), - Duration: model.MicrosecondsAsDuration(uint64(jSpan.Duration)), - Tags: tags, - Logs: td.getLogs(jSpan.Logs), - Process: mProcess, + //nolint: gosec // G115 + Flags: model.Flags(jSpan.Flags), + //nolint: gosec // G115 + StartTime: model.EpochMicrosecondsAsTime(uint64(jSpan.StartTime)), + //nolint: gosec // G115 + Duration: model.MicrosecondsAsDuration(uint64(jSpan.Duration)), + Tags: tags, + Logs: td.getLogs(jSpan.Logs), + Process: mProcess, } } -func (td toDomain) getReferences(jRefs []*jaeger.SpanRef) []model.SpanRef { +func (toDomain) getReferences(jRefs []*jaeger.SpanRef) []model.SpanRef { if len(jRefs) == 0 { return nil } @@ -91,9 +86,12 @@ func (td toDomain) getReferences(jRefs []*jaeger.SpanRef) []model.SpanRef { mRefs := make([]model.SpanRef, len(jRefs)) for idx, jRef := range jRefs { mRefs[idx] = model.SpanRef{ + //nolint: gosec // G115 RefType: model.SpanRefType(int(jRef.RefType)), + //nolint: gosec // G115 TraceID: model.NewTraceID(uint64(jRef.TraceIdHigh), uint64(jRef.TraceIdLow)), - SpanID: model.NewSpanID(uint64(jRef.SpanId)), + //nolint: gosec // G115 + SpanID: model.NewSpanID(uint64(jRef.SpanId)), } } @@ -127,7 +125,7 @@ func (td toDomain) getTags(tags []*jaeger.Tag, extraSpace int) model.KeyValues { return retMe } -func (td toDomain) getTag(tag *jaeger.Tag) model.KeyValue { +func (toDomain) getTag(tag *jaeger.Tag) model.KeyValue { switch tag.VType { case jaeger.TagType_BOOL: return model.Bool(tag.Key, tag.GetVBool()) @@ -151,6 +149,7 @@ func (td toDomain) getLogs(logs []*jaeger.Log) []model.Log { retMe := make([]model.Log, len(logs)) for i, log := range logs { retMe[i] = model.Log{ + //nolint: gosec // G115 Timestamp: model.EpochMicrosecondsAsTime(uint64(log.Timestamp)), Fields: td.getTags(log.Fields, 0), } diff --git a/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/zipkin/deserialize.go b/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/zipkin/deserialize.go index 821cf3914fb..7f581895e30 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/zipkin/deserialize.go +++ b/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/zipkin/deserialize.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2018 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package zipkin diff --git a/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/zipkin/doc.go b/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/zipkin/doc.go index 7b7b461270a..4f47098fd16 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/zipkin/doc.go +++ b/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/zipkin/doc.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package zipkin allows converting model.Trace to/from zipkin.thrift model. package zipkin diff --git a/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/zipkin/process_hashtable.go b/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/zipkin/process_hashtable.go index cd75fca2b0c..05c5a32d8c8 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/zipkin/process_hashtable.go +++ b/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/zipkin/process_hashtable.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package zipkin diff --git a/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/zipkin/to_domain.go b/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/zipkin/to_domain.go index f4dd3655f74..00ca20408fd 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/zipkin/to_domain.go +++ b/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/zipkin/to_domain.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package zipkin @@ -32,7 +21,6 @@ import ( const ( // UnknownServiceName is serviceName we give to model.Process if we cannot find it anywhere in a Zipkin span UnknownServiceName = "unknown-service-name" - keySpanKind = "span.kind" component = "component" peerservice = "peer.service" peerHostIPv4 = "peer.ipv4" @@ -92,7 +80,7 @@ type toDomain struct{} func (td toDomain) ToDomain(zSpans []*zipkincore.Span) (*model.Trace, error) { var errs []error processes := newProcessHashtable() - trace := &model.Trace{} + trc := &model.Trace{} for _, zSpan := range zSpans { jSpans, err := td.ToDomainSpans(zSpan) if err != nil { @@ -101,10 +89,10 @@ func (td toDomain) ToDomain(zSpans []*zipkincore.Span) (*model.Trace, error) { for _, jSpan := range jSpans { // remove duplicate Process instances jSpan.Process = processes.add(jSpan.Process) - trace.Spans = append(trace.Spans, jSpan) + trc.Spans = append(trc.Spans, jSpan) } } - return trace, errors.Join(errs...) + return trc, errors.Join(errs...) } func (td toDomain) ToDomainSpans(zSpan *zipkincore.Span) ([]*model.Span, error) { @@ -116,7 +104,7 @@ func (td toDomain) ToDomainSpans(zSpan *zipkincore.Span) ([]*model.Span, error) return jSpans, err } -func (td toDomain) findAnnotation(zSpan *zipkincore.Span, value string) *zipkincore.Annotation { +func (toDomain) findAnnotation(zSpan *zipkincore.Span, value string) *zipkincore.Annotation { for _, ann := range zSpan.Annotations { if ann.Value == value { return ann @@ -135,9 +123,11 @@ func (td toDomain) transformSpan(zSpan *zipkincore.Span) []*model.Span { if zSpan.TraceIDHigh != nil { traceIDHigh = *zSpan.TraceIDHigh } + //nolint: gosec // G115 traceID := model.NewTraceID(uint64(traceIDHigh), uint64(zSpan.TraceID)) var refs []model.SpanRef if zSpan.ParentID != nil { + //nolint: gosec // G115 parentSpanID := model.NewSpanID(uint64(*zSpan.ParentID)) refs = model.MaybeAddParentSpanID(traceID, parentSpanID, refs) } @@ -147,15 +137,18 @@ func (td toDomain) transformSpan(zSpan *zipkincore.Span) []*model.Span { startTime, duration := td.getStartTimeAndDuration(zSpan) result := []*model.Span{{ - TraceID: traceID, + TraceID: traceID, + //nolint: gosec // G115 SpanID: model.NewSpanID(uint64(zSpan.ID)), OperationName: zSpan.Name, References: refs, Flags: flags, - StartTime: model.EpochMicrosecondsAsTime(uint64(startTime)), - Duration: model.MicrosecondsAsDuration(uint64(duration)), - Tags: tags, - Logs: td.getLogs(zSpan.Annotations), + //nolint: gosec // G115 + StartTime: model.EpochMicrosecondsAsTime(uint64(startTime)), + //nolint: gosec // G115 + Duration: model.MicrosecondsAsDuration(uint64(duration)), + Tags: tags, + Logs: td.getLogs(zSpan.Annotations), }} cs := td.findAnnotation(zSpan, zipkincore.CLIENT_SEND) @@ -163,7 +156,8 @@ func (td toDomain) transformSpan(zSpan *zipkincore.Span) []*model.Span { if cs != nil && sr != nil { // if the span is client and server we split it into two separate spans s := &model.Span{ - TraceID: traceID, + TraceID: traceID, + //nolint: gosec // G115 SpanID: model.NewSpanID(uint64(zSpan.ID)), OperationName: zSpan.Name, References: refs, @@ -171,15 +165,19 @@ func (td toDomain) transformSpan(zSpan *zipkincore.Span) []*model.Span { } // if the first span is a client span we create server span and vice-versa. if result[0].IsRPCClient() { - s.Tags = []model.KeyValue{model.String(keySpanKind, trace.SpanKindServer.String())} + s.Tags = []model.KeyValue{model.SpanKindTag(model.SpanKindServer)} + //nolint: gosec // G115 s.StartTime = model.EpochMicrosecondsAsTime(uint64(sr.Timestamp)) if ss := td.findAnnotation(zSpan, zipkincore.SERVER_SEND); ss != nil { + //nolint: gosec // G115 s.Duration = model.MicrosecondsAsDuration(uint64(ss.Timestamp - sr.Timestamp)) } } else { - s.Tags = []model.KeyValue{model.String(keySpanKind, trace.SpanKindClient.String())} + s.Tags = []model.KeyValue{model.SpanKindTag(model.SpanKindClient)} + //nolint: gosec // G115 s.StartTime = model.EpochMicrosecondsAsTime(uint64(cs.Timestamp)) if cr := td.findAnnotation(zSpan, zipkincore.CLIENT_RECV); cr != nil { + //nolint: gosec // G115 s.Duration = model.MicrosecondsAsDuration(uint64(cr.Timestamp - cs.Timestamp)) } } @@ -189,7 +187,7 @@ func (td toDomain) transformSpan(zSpan *zipkincore.Span) []*model.Span { } // getFlags takes a Zipkin Span and deduces the proper flags settings -func (td toDomain) getFlags(zSpan *zipkincore.Span) model.Flags { +func (toDomain) getFlags(zSpan *zipkincore.Span) model.Flags { f := model.Flags(0) if zSpan.Debug { f.SetDebug() @@ -198,9 +196,9 @@ func (td toDomain) getFlags(zSpan *zipkincore.Span) model.Flags { } // Get a correct start time to use for the span if it's not set directly -func (td toDomain) getStartTimeAndDuration(zSpan *zipkincore.Span) (int64, int64) { - timestamp := zSpan.GetTimestamp() - duration := zSpan.GetDuration() +func (td toDomain) getStartTimeAndDuration(zSpan *zipkincore.Span) (timestamp, duration int64) { + timestamp = zSpan.GetTimestamp() + duration = zSpan.GetDuration() if timestamp == 0 { cs := td.findAnnotation(zSpan, zipkincore.CLIENT_SEND) sr := td.findAnnotation(zSpan, zipkincore.SERVER_RECV) @@ -231,6 +229,7 @@ func (td toDomain) generateProcess(zSpan *zipkincore.Span) (*model.Process, erro serviceName, ipv4, err := td.findServiceNameAndIP(zSpan) if ipv4 != 0 { // If the ip process tag already exists, don't add it again + //nolint: gosec // G115 tags = append(tags, model.Int64(IPTagName, int64(uint64(ipv4)))) } return model.NewProcess(serviceName, tags), err @@ -261,16 +260,17 @@ func (td toDomain) findServiceNameAndIP(zSpan *zipkincore.Span) (string, int32, } err := fmt.Errorf( "cannot find service name in Zipkin span [traceID=%x, spanID=%x]", + //nolint: gosec // G115 uint64(zSpan.TraceID), uint64(zSpan.ID)) return UnknownServiceName, 0, err } -func (td toDomain) isCoreAnnotation(annotation *zipkincore.Annotation) bool { +func (toDomain) isCoreAnnotation(annotation *zipkincore.Annotation) bool { _, ok := coreAnnotations[annotation.Value] return ok } -func (td toDomain) isProcessTag(binaryAnnotation *zipkincore.BinaryAnnotation) bool { +func (toDomain) isProcessTag(binaryAnnotation *zipkincore.BinaryAnnotation) bool { _, ok := processTagAnnotations[binaryAnnotation.Key] return ok } @@ -309,7 +309,7 @@ func (td toDomain) getTags(binAnnotations []*zipkincore.BinaryAnnotation, tagInc return retMe } -func (td toDomain) transformBinaryAnnotation(binaryAnnotation *zipkincore.BinaryAnnotation) (model.KeyValue, error) { +func (toDomain) transformBinaryAnnotation(binaryAnnotation *zipkincore.BinaryAnnotation) (model.KeyValue, error) { switch binaryAnnotation.AnnotationType { case zipkincore.AnnotationType_BOOL: vBool := bytes.Equal(binaryAnnotation.Value, trueByteSlice) @@ -346,7 +346,7 @@ func (td toDomain) transformBinaryAnnotation(binaryAnnotation *zipkincore.Binary return model.KeyValue{}, fmt.Errorf("unknown zipkin annotation type: %d", binaryAnnotation.AnnotationType) } -func bytesToNumber(b []byte, number interface{}) error { +func bytesToNumber(b []byte, number any) error { buf := bytes.NewReader(b) return binary.Read(buf, binary.BigEndian, number) } @@ -364,6 +364,7 @@ func (td toDomain) getLogs(annotations []*zipkincore.Annotation) []model.Log { } logFields := td.getLogFields(a) jLog := model.Log{ + //nolint: gosec // G115 Timestamp: model.EpochMicrosecondsAsTime(uint64(a.Timestamp)), Fields: logFields, } @@ -372,7 +373,7 @@ func (td toDomain) getLogs(annotations []*zipkincore.Annotation) []model.Log { return retMe } -func (td toDomain) getLogFields(annotation *zipkincore.Annotation) []model.KeyValue { +func (toDomain) getLogFields(annotation *zipkincore.Annotation) []model.KeyValue { var logFields map[string]string // Since Zipkin format does not support kv-logging, some clients encode those Logs // as annotations with JSON value. Therefore, we try JSON decoding first. @@ -388,21 +389,22 @@ func (td toDomain) getLogFields(annotation *zipkincore.Annotation) []model.KeyVa return []model.KeyValue{model.String(DefaultLogFieldKey, annotation.Value)} } -func (td toDomain) getSpanKindTag(annotations []*zipkincore.Annotation) (model.KeyValue, bool) { +func (toDomain) getSpanKindTag(annotations []*zipkincore.Annotation) (model.KeyValue, bool) { for _, a := range annotations { if spanKind, ok := coreAnnotations[a.Value]; ok { - return model.String(keySpanKind, spanKind), true + return model.SpanKindTag(model.SpanKind(spanKind)), true } } return model.KeyValue{}, false } -func (td toDomain) getPeerTags(endpoint *zipkincore.Endpoint, tags []model.KeyValue) []model.KeyValue { +func (toDomain) getPeerTags(endpoint *zipkincore.Endpoint, tags []model.KeyValue) []model.KeyValue { if endpoint == nil { return tags } tags = append(tags, model.String(peerservice, endpoint.ServiceName)) if endpoint.Ipv4 != 0 { + //nolint: gosec // G115 ipv4 := int64(uint32(endpoint.Ipv4)) tags = append(tags, model.Int64(peerHostIPv4, ipv4)) } @@ -412,6 +414,7 @@ func (td toDomain) getPeerTags(endpoint *zipkincore.Endpoint, tags []model.KeyVa tags = append(tags, model.Binary(peerHostIPv6, endpoint.Ipv6)) } if endpoint.Port != 0 { + //nolint: gosec // G115 port := int64(uint16(endpoint.Port)) tags = append(tags, model.Int64(peerPort, port)) } diff --git a/vendor/github.com/jaegertracing/jaeger/model/dependencies.go b/vendor/github.com/jaegertracing/jaeger/model/dependencies.go index d1a86ba0d7c..8a0f2745b1f 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/dependencies.go +++ b/vendor/github.com/jaegertracing/jaeger/model/dependencies.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package model @@ -22,8 +11,9 @@ const ( // ApplyDefaults applies defaults to the DependencyLink. func (d DependencyLink) ApplyDefaults() DependencyLink { - if d.Source == "" { - d.Source = JaegerDependencyLinkSource + dd := d + if dd.Source == "" { + dd.Source = JaegerDependencyLinkSource } - return d + return dd } diff --git a/vendor/github.com/jaegertracing/jaeger/model/doc.go b/vendor/github.com/jaegertracing/jaeger/model/doc.go index b44aea615a6..bb8daf13905 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/doc.go +++ b/vendor/github.com/jaegertracing/jaeger/model/doc.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package model describes the internal data model for Trace and Span package model diff --git a/vendor/github.com/jaegertracing/jaeger/model/hash.go b/vendor/github.com/jaegertracing/jaeger/model/hash.go index 890843a8aaf..5b4a4dc442a 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/hash.go +++ b/vendor/github.com/jaegertracing/jaeger/model/hash.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package model diff --git a/vendor/github.com/jaegertracing/jaeger/model/ids.go b/vendor/github.com/jaegertracing/jaeger/model/ids.go index dfdc9070574..4c7b77c6aa9 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/ids.go +++ b/vendor/github.com/jaegertracing/jaeger/model/ids.go @@ -1,27 +1,18 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2018 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package model import ( "encoding/base64" "encoding/binary" + "errors" "fmt" "strconv" "github.com/gogo/protobuf/jsonpb" + "go.opentelemetry.io/collector/pdata/pcommon" ) const ( @@ -87,23 +78,23 @@ func TraceIDFromBytes(data []byte) (TraceID, error) { case len(data) == traceIDShortBytesLen: t.Low = binary.BigEndian.Uint64(data) default: - return TraceID{}, fmt.Errorf("invalid length for TraceID") + return TraceID{}, errors.New("invalid length for TraceID") } return t, nil } // MarshalText is called by encoding/json, which we do not want people to use. -func (t TraceID) MarshalText() ([]byte, error) { - return nil, fmt.Errorf("unsupported method TraceID.MarshalText; please use github.com/gogo/protobuf/jsonpb for marshalling") +func (TraceID) MarshalText() ([]byte, error) { + return nil, errors.New("unsupported method TraceID.MarshalText; please use github.com/gogo/protobuf/jsonpb for marshalling") } // UnmarshalText is called by encoding/json, which we do not want people to use. -func (t *TraceID) UnmarshalText(text []byte) error { - return fmt.Errorf("unsupported method TraceID.UnmarshalText; please use github.com/gogo/protobuf/jsonpb for marshalling") +func (*TraceID) UnmarshalText([]byte /* text */) error { + return errors.New("unsupported method TraceID.UnmarshalText; please use github.com/gogo/protobuf/jsonpb for marshalling") } // Size returns the size of this datum in protobuf. It is always 16 bytes. -func (t *TraceID) Size() int { +func (*TraceID) Size() int { return 16 } @@ -124,7 +115,7 @@ func (t *TraceID) Unmarshal(data []byte) error { func marshalBytes(dst []byte, src []byte) (n int, err error) { if len(dst) < len(src) { - return 0, fmt.Errorf("buffer is too short") + return 0, errors.New("buffer is too short") } return copy(dst, src), nil } @@ -155,6 +146,23 @@ func (t *TraceID) UnmarshalJSON(data []byte) error { return t.Unmarshal(b) } +// ToOTELTraceID converts the TraceID to OTEL's representation of a trace identitfier. +// This was taken from +// https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/internal/coreinternal/idutils/big_endian_converter.go. +func (t *TraceID) ToOTELTraceID() pcommon.TraceID { + traceID := [16]byte{} + binary.BigEndian.PutUint64(traceID[:8], t.High) + binary.BigEndian.PutUint64(traceID[8:], t.Low) + return traceID +} + +func TraceIDFromOTEL(traceID pcommon.TraceID) TraceID { + return TraceID{ + High: binary.BigEndian.Uint64(traceID[:traceIDShortBytesLen]), + Low: binary.BigEndian.Uint64(traceID[traceIDShortBytesLen:]), + } +} + // ------- SpanID ------- // NewSpanID creates a new SpanID from a 64bit unsigned int. @@ -181,23 +189,23 @@ func SpanIDFromString(s string) (SpanID, error) { // SpanIDFromBytes creates a SpandID from list of bytes func SpanIDFromBytes(data []byte) (SpanID, error) { if len(data) != traceIDShortBytesLen { - return SpanID(0), fmt.Errorf("invalid length for SpanID") + return SpanID(0), errors.New("invalid length for SpanID") } return NewSpanID(binary.BigEndian.Uint64(data)), nil } // MarshalText is called by encoding/json, which we do not want people to use. -func (s SpanID) MarshalText() ([]byte, error) { - return nil, fmt.Errorf("unsupported method SpanID.MarshalText; please use github.com/gogo/protobuf/jsonpb for marshalling") +func (SpanID) MarshalText() ([]byte, error) { + return nil, errors.New("unsupported method SpanID.MarshalText; please use github.com/gogo/protobuf/jsonpb for marshalling") } // UnmarshalText is called by encoding/json, which we do not want people to use. -func (s *SpanID) UnmarshalText(text []byte) error { - return fmt.Errorf("unsupported method SpanID.UnmarshalText; please use github.com/gogo/protobuf/jsonpb for marshalling") +func (*SpanID) UnmarshalText([]byte /* text */) error { + return errors.New("unsupported method SpanID.UnmarshalText; please use github.com/gogo/protobuf/jsonpb for marshalling") } // Size returns the size of this datum in protobuf. It is always 8 bytes. -func (s *SpanID) Size() int { +func (*SpanID) Size() int { return 8 } @@ -253,3 +261,19 @@ func (s *SpanID) UnmarshalJSON(data []byte) error { func (s *SpanID) UnmarshalJSONPB(_ *jsonpb.Unmarshaler, b []byte) error { return s.UnmarshalJSON(b) } + +// ToOTELSpanID converts the SpanID to OTEL's representation of a span identitfier. +// This was taken from +// https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/internal/coreinternal/idutils/big_endian_converter.go. +func (s SpanID) ToOTELSpanID() pcommon.SpanID { + spanID := [8]byte{} + binary.BigEndian.PutUint64(spanID[:], uint64(s)) + return pcommon.SpanID(spanID) +} + +// ToOTELSpanID converts OTEL's SpanID to the model representation of a span identitfier. +// This was taken from +// https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/internal/coreinternal/idutils/big_endian_converter.go. +func SpanIDFromOTEL(spanID pcommon.SpanID) SpanID { + return SpanID(binary.BigEndian.Uint64(spanID[:])) +} diff --git a/vendor/github.com/jaegertracing/jaeger/model/json/doc.go b/vendor/github.com/jaegertracing/jaeger/model/json/doc.go deleted file mode 100644 index ba31238980d..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/model/json/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (c) 2019 The Jaeger Authors. -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package json defines the external JSON representation for Jaeger traces. -package json diff --git a/vendor/github.com/jaegertracing/jaeger/model/json/fixture.json b/vendor/github.com/jaegertracing/jaeger/model/json/fixture.json deleted file mode 100644 index 29c27c8131f..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/model/json/fixture.json +++ /dev/null @@ -1,102 +0,0 @@ -{ - "traceID": "abc0", - "spans": [ - { - "traceID": "abc0", - "spanID": "abc0", - "operationName": "root-span", - "references": null, - "startTime": 1000, - "duration": 500, - "tags": null, - "logs": null, - "processID": "p1", - "warnings": null - }, - { - "traceID": "abc0", - "spanID": "123", - "operationName": "span1", - "references": [ - { - "refType": "CHILD_OF", - "traceID": "abc0", - "spanID": "abc0" - } - ], - "startTime": 1000, - "duration": 500, - "tags": [ - { - "key": "error", - "type": "bool", - "value": true - }, - { - "key": "int64", - "type": "int64", - "value": 123 - }, - { - "key": "float64", - "type": "float64", - "value": 123.567 - }, - { - "key": "binary", - "type": "binary", - "value": "AQ==" - } - ], - "logs": [ - { - "timestamp": 1400, - "fields": [ - { - "key": "error", - "type": "string", - "value": "something bad happened" - } - ] - } - ], - "processID": "p2", - "warnings": null - }, - { - "traceID": "abc0", - "spanID": "567", - "operationName": "span2", - "references": [ - { - "refType": "FOLLOWS_FROM", - "traceID": "abc0", - "spanID": "abc0" - } - ], - "startTime": 1000, - "duration": 500, - "tags": null, - "logs": null, - "processID": "p2", - "warnings": null - } - ], - "processes": { - "p1": { - "serviceName": "service_1", - "tags": null - }, - "p2": { - "serviceName": "service_2", - "tags": [ - { - "key": "host", - "type": "string", - "value": "google.com" - } - ] - } - }, - "warnings": null -} diff --git a/vendor/github.com/jaegertracing/jaeger/model/json/model.go b/vendor/github.com/jaegertracing/jaeger/model/json/model.go deleted file mode 100644 index 8d72dc498e5..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/model/json/model.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright (c) 2019 The Jaeger Authors. -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package json - -// ReferenceType is the reference type of one span to another -type ReferenceType string - -// TraceID is the shared trace ID of all spans in the trace. -type TraceID string - -// SpanID is the id of a span -type SpanID string - -// ProcessID is a hashed value of the Process struct that is unique within the trace. -type ProcessID string - -// ValueType is the type of a value stored in KeyValue struct. -type ValueType string - -const ( - // ChildOf means a span is the child of another span - ChildOf ReferenceType = "CHILD_OF" - // FollowsFrom means a span follows from another span - FollowsFrom ReferenceType = "FOLLOWS_FROM" - - // StringType indicates a string value stored in KeyValue - StringType ValueType = "string" - // BoolType indicates a Boolean value stored in KeyValue - BoolType ValueType = "bool" - // Int64Type indicates a 64bit signed integer value stored in KeyValue - Int64Type ValueType = "int64" - // Float64Type indicates a 64bit float value stored in KeyValue - Float64Type ValueType = "float64" - // BinaryType indicates an arbitrary byte array stored in KeyValue - BinaryType ValueType = "binary" -) - -// Trace is a list of spans -type Trace struct { - TraceID TraceID `json:"traceID"` - Spans []Span `json:"spans"` - Processes map[ProcessID]Process `json:"processes"` - Warnings []string `json:"warnings"` -} - -// Span is a span denoting a piece of work in some infrastructure -// When converting to UI model, ParentSpanID and Process should be dereferenced into -// References and ProcessID, respectively. -// When converting to ES model, ProcessID and Warnings should be omitted. Even if -// included, ES with dynamic settings off will automatically ignore unneeded fields. -type Span struct { - TraceID TraceID `json:"traceID"` - SpanID SpanID `json:"spanID"` - ParentSpanID SpanID `json:"parentSpanID,omitempty"` // deprecated - Flags uint32 `json:"flags,omitempty"` - OperationName string `json:"operationName"` - References []Reference `json:"references"` - StartTime uint64 `json:"startTime"` // microseconds since Unix epoch - Duration uint64 `json:"duration"` // microseconds - Tags []KeyValue `json:"tags"` - Logs []Log `json:"logs"` - ProcessID ProcessID `json:"processID,omitempty"` - Process *Process `json:"process,omitempty"` - Warnings []string `json:"warnings"` -} - -// Reference is a reference from one span to another -type Reference struct { - RefType ReferenceType `json:"refType"` - TraceID TraceID `json:"traceID"` - SpanID SpanID `json:"spanID"` -} - -// Process is the process emitting a set of spans -type Process struct { - ServiceName string `json:"serviceName"` - Tags []KeyValue `json:"tags"` -} - -// Log is a log emitted in a span -type Log struct { - Timestamp uint64 `json:"timestamp"` - Fields []KeyValue `json:"fields"` -} - -// KeyValue is a key-value pair with typed value. -type KeyValue struct { - Key string `json:"key"` - Type ValueType `json:"type,omitempty"` - Value interface{} `json:"value"` -} - -// DependencyLink shows dependencies between services -type DependencyLink struct { - Parent string `json:"parent"` - Child string `json:"child"` - CallCount uint64 `json:"callCount"` -} - -// Operation defines the data in the operation response when query operation by service and span kind -type Operation struct { - Name string `json:"name"` - SpanKind string `json:"spanKind"` -} diff --git a/vendor/github.com/jaegertracing/jaeger/model/keyvalue.go b/vendor/github.com/jaegertracing/jaeger/model/keyvalue.go index 44f3e1bdb03..72304eeee91 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/keyvalue.go +++ b/vendor/github.com/jaegertracing/jaeger/model/keyvalue.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package model @@ -36,8 +25,32 @@ const ( Float64Type = ValueType_FLOAT64 // BinaryType indicates the value is binary blob stored as a byte array BinaryType = ValueType_BINARY + + SpanKindKey = "span.kind" + SamplerTypeKey = "sampler.type" + SamplerParamKey = "sampler.param" +) + +type SpanKind string + +const ( + SpanKindClient SpanKind = "client" + SpanKindServer SpanKind = "server" + SpanKindProducer SpanKind = "producer" + SpanKindConsumer SpanKind = "consumer" + SpanKindInternal SpanKind = "internal" + SpanKindUnspecified SpanKind = "" ) +func SpanKindFromString(kind string) (SpanKind, error) { + switch SpanKind(kind) { + case SpanKindClient, SpanKindServer, SpanKindProducer, SpanKindConsumer, SpanKindInternal, SpanKindUnspecified: + return SpanKind(kind), nil + default: + return SpanKindUnspecified, fmt.Errorf("unknown span kind %q", kind) + } +} + // KeyValues is a type alias that exposes convenience functions like Sort, FindByKey. type KeyValues []KeyValue @@ -102,8 +115,8 @@ func (kv *KeyValue) Binary() []byte { return nil } -// Value returns typed values stored in KeyValue as interface{}. -func (kv *KeyValue) Value() interface{} { +// Value returns typed values stored in KeyValue as any. +func (kv *KeyValue) Value() any { switch kv.VType { case StringType: return kv.VStr @@ -210,6 +223,7 @@ func (kv KeyValue) Hash(w io.Writer) error { if _, err := w.Write([]byte(kv.Key)); err != nil { return err } + //nolint: gosec // G115 if err := binary.Write(w, binary.BigEndian, uint16(kv.VType)); err != nil { return err } diff --git a/vendor/github.com/jaegertracing/jaeger/model/process.go b/vendor/github.com/jaegertracing/jaeger/model/process.go index 1babaeb1823..f2a73151886 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/process.go +++ b/vendor/github.com/jaegertracing/jaeger/model/process.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package model diff --git a/vendor/github.com/jaegertracing/jaeger/model/sort.go b/vendor/github.com/jaegertracing/jaeger/model/sort.go index b2556d8cc4c..91d82219085 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/sort.go +++ b/vendor/github.com/jaegertracing/jaeger/model/sort.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package model diff --git a/vendor/github.com/jaegertracing/jaeger/model/span.go b/vendor/github.com/jaegertracing/jaeger/model/span.go index 833763efe90..3ec5a9431a2 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/span.go +++ b/vendor/github.com/jaegertracing/jaeger/model/span.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package model @@ -20,7 +9,6 @@ import ( "io" "strconv" - "go.opentelemetry.io/otel/trace" "go.uber.org/zap" ) @@ -39,23 +27,11 @@ const ( DebugFlag = Flags(2) // FirehoseFlag is the bit in Flags in order to define a span as a firehose span FirehoseFlag = Flags(8) - - keySamplerType = "sampler.type" - keySpanKind = "span.kind" - keySamplerParam = "sampler.param" ) // Flags is a bit map of flags for a span type Flags uint32 -var toSpanKind = map[string]trace.SpanKind{ - "client": trace.SpanKindClient, - "server": trace.SpanKindServer, - "producer": trace.SpanKindProducer, - "consumer": trace.SpanKindConsumer, - "internal": trace.SpanKindInternal, -} - var toSamplerType = map[string]SamplerType{ "unrecognized": SamplerTypeUnrecognized, "probabilistic": SamplerTypeProbabilistic, @@ -81,6 +57,10 @@ func (s SamplerType) String() string { } } +func SpanKindTag(kind SpanKind) KeyValue { + return String(SpanKindKey, string(kind)) +} + // Hash implements Hash from Hashable. func (s *Span) Hash(w io.Writer) (err error) { // gob is not the most efficient way, but it ensures we don't miss any fields. @@ -90,27 +70,27 @@ func (s *Span) Hash(w io.Writer) (err error) { } // HasSpanKind returns true if the span has a `span.kind` tag set to `kind`. -func (s *Span) HasSpanKind(kind trace.SpanKind) bool { - if tag, ok := KeyValues(s.Tags).FindByKey(keySpanKind); ok { - return tag.AsString() == kind.String() +func (s *Span) HasSpanKind(kind SpanKind) bool { + if tag, ok := KeyValues(s.Tags).FindByKey(SpanKindKey); ok { + return tag.AsString() == string(kind) } return false } // GetSpanKind returns value of `span.kind` tag and whether the tag can be found -func (s *Span) GetSpanKind() (spanKind trace.SpanKind, found bool) { - if tag, ok := KeyValues(s.Tags).FindByKey(keySpanKind); ok { - if kind, ok := toSpanKind[tag.AsString()]; ok { +func (s *Span) GetSpanKind() (spanKind SpanKind, found bool) { + if tag, ok := KeyValues(s.Tags).FindByKey(SpanKindKey); ok { + if kind, err := SpanKindFromString(tag.AsString()); err == nil { return kind, true } } - return trace.SpanKindUnspecified, false + return SpanKindUnspecified, false } // GetSamplerType returns the sampler type for span func (s *Span) GetSamplerType() SamplerType { // There's no corresponding opentelemetry tag label corresponding to sampler.type - if tag, ok := KeyValues(s.Tags).FindByKey(keySamplerType); ok { + if tag, ok := KeyValues(s.Tags).FindByKey(SamplerTypeKey); ok { if s, ok := toSamplerType[tag.VStr]; ok { return s } @@ -121,13 +101,13 @@ func (s *Span) GetSamplerType() SamplerType { // IsRPCClient returns true if the span represents a client side of an RPC, // as indicated by the `span.kind` tag set to `client`. func (s *Span) IsRPCClient() bool { - return s.HasSpanKind(trace.SpanKindClient) + return s.HasSpanKind(SpanKindClient) } // IsRPCServer returns true if the span represents a server side of an RPC, // as indicated by the `span.kind` tag set to `server`. func (s *Span) IsRPCServer() bool { - return s.HasSpanKind(trace.SpanKindServer) + return s.HasSpanKind(SpanKindServer) } // NormalizeTimestamps changes all timestamps in this span to UTC. @@ -179,7 +159,7 @@ func (s *Span) GetSamplerParams(logger *zap.Logger) (SamplerType, float64) { if samplerType == SamplerTypeUnrecognized { return SamplerTypeUnrecognized, 0 } - tag, ok := KeyValues(s.Tags).FindByKey(keySamplerParam) + tag, ok := KeyValues(s.Tags).FindByKey(SamplerParamKey) if !ok { return SamplerTypeUnrecognized, 0 } diff --git a/vendor/github.com/jaegertracing/jaeger/model/spanref.go b/vendor/github.com/jaegertracing/jaeger/model/spanref.go index 92acc21e9a3..39d0e13820d 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/spanref.go +++ b/vendor/github.com/jaegertracing/jaeger/model/spanref.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package model diff --git a/vendor/github.com/jaegertracing/jaeger/model/time.go b/vendor/github.com/jaegertracing/jaeger/model/time.go index 34a66a7efc4..153e52b7609 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/time.go +++ b/vendor/github.com/jaegertracing/jaeger/model/time.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package model @@ -23,22 +12,26 @@ import ( func EpochMicrosecondsAsTime(ts uint64) time.Time { seconds := ts / 1000000 nanos := 1000 * (ts % 1000000) + //nolint: gosec // G115 return time.Unix(int64(seconds), int64(nanos)).UTC() } // TimeAsEpochMicroseconds converts time.Time to microseconds since epoch, // which is the format the StartTime field is stored in the Span. func TimeAsEpochMicroseconds(t time.Time) uint64 { + //nolint: gosec // G115 return uint64(t.UnixNano() / 1000) } // MicrosecondsAsDuration converts duration in microseconds to time.Duration value. func MicrosecondsAsDuration(v uint64) time.Duration { + //nolint: gosec // G115 return time.Duration(v) * time.Microsecond } // DurationAsMicroseconds converts time.Duration to microseconds, // which is the format the Duration field is stored in the Span. func DurationAsMicroseconds(d time.Duration) uint64 { + //nolint: gosec // G115 return uint64(d.Nanoseconds() / 1000) } diff --git a/vendor/github.com/jaegertracing/jaeger/model/trace.go b/vendor/github.com/jaegertracing/jaeger/model/trace.go index fcc31b07ae3..234e2adb143 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/trace.go +++ b/vendor/github.com/jaegertracing/jaeger/model/trace.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package model diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/bearertoken/context.go b/vendor/github.com/jaegertracing/jaeger/pkg/bearertoken/context.go deleted file mode 100644 index 4f9221fe631..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/pkg/bearertoken/context.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) 2021 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bearertoken - -import "context" - -type contextKeyType int - -const contextKey = contextKeyType(iota) - -// StoragePropagationKey is a key for viper configuration to pass this option to storage plugins. -const StoragePropagationKey = "storage.propagate.token" - -// ContextWithBearerToken set bearer token in context. -func ContextWithBearerToken(ctx context.Context, token string) context.Context { - if token == "" { - return ctx - } - return context.WithValue(ctx, contextKey, token) -} - -// GetBearerToken from context, or empty string if there is no token. -func GetBearerToken(ctx context.Context) (string, bool) { - val, ok := ctx.Value(contextKey).(string) - return val, ok -} diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/bearertoken/http.go b/vendor/github.com/jaegertracing/jaeger/pkg/bearertoken/http.go deleted file mode 100644 index 8e264f9b444..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/pkg/bearertoken/http.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (c) 2019 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bearertoken - -import ( - "net/http" - "strings" - - "go.uber.org/zap" -) - -// PropagationHandler returns a http.Handler containing the logic to extract -// the Bearer token from the Authorization header of the http.Request and insert it into request.Context -// for propagation. The token can be accessed via GetBearerToken. -func PropagationHandler(logger *zap.Logger, h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - authHeaderValue := r.Header.Get("Authorization") - // If no Authorization header is present, try with X-Forwarded-Access-Token - if authHeaderValue == "" { - authHeaderValue = r.Header.Get("X-Forwarded-Access-Token") - } - if authHeaderValue != "" { - headerValue := strings.Split(authHeaderValue, " ") - token := "" - switch { - case len(headerValue) == 2: - // Make sure we only capture bearer token , not other types like Basic auth. - if headerValue[0] == "Bearer" { - token = headerValue[1] - } - case len(headerValue) == 1: - // Treat the entire value as a token. - token = authHeaderValue - default: - logger.Warn("Invalid authorization header value, skipping token propagation") - } - h.ServeHTTP(w, r.WithContext(ContextWithBearerToken(ctx, token))) - } else { - h.ServeHTTP(w, r.WithContext(ctx)) - } - }) -} diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/bearertoken/transport.go b/vendor/github.com/jaegertracing/jaeger/pkg/bearertoken/transport.go deleted file mode 100644 index 7e13e6300b5..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/pkg/bearertoken/transport.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (c) 2021 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bearertoken - -import ( - "errors" - "net/http" -) - -// RoundTripper wraps another http.RoundTripper and injects -// an authentication header with bearer token into requests. -type RoundTripper struct { - // Transport is the underlying http.RoundTripper being wrapped. Required. - Transport http.RoundTripper - - // StaticToken is the pre-configured bearer token. Optional. - StaticToken string - - // OverrideFromCtx enables reading bearer token from Context. - OverrideFromCtx bool -} - -// RoundTrip injects the outbound Authorization header with the -// token provided in the inbound request. -func (tr RoundTripper) RoundTrip(r *http.Request) (*http.Response, error) { - if tr.Transport == nil { - return nil, errors.New("no http.RoundTripper provided") - } - token := tr.StaticToken - if tr.OverrideFromCtx { - headerToken, _ := GetBearerToken(r.Context()) - if headerToken != "" { - token = headerToken - } - } - if token != "" { - r.Header.Set("Authorization", "Bearer "+token) - } - return tr.Transport.RoundTrip(r) -} diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/clientcfg/clientcfghttp/cfgmgr.go b/vendor/github.com/jaegertracing/jaeger/pkg/clientcfg/clientcfghttp/cfgmgr.go deleted file mode 100644 index 621162a36fe..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/pkg/clientcfg/clientcfghttp/cfgmgr.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) 2020 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientcfghttp - -import ( - "context" - "errors" - - "github.com/jaegertracing/jaeger/cmd/collector/app/sampling/strategystore" - "github.com/jaegertracing/jaeger/proto-gen/api_v2" - "github.com/jaegertracing/jaeger/thrift-gen/baggage" -) - -// ConfigManager implements ClientConfigManager. -type ConfigManager struct { - SamplingStrategyStore strategystore.StrategyStore - BaggageManager baggage.BaggageRestrictionManager -} - -// GetSamplingStrategy implements ClientConfigManager.GetSamplingStrategy. -func (c *ConfigManager) GetSamplingStrategy(ctx context.Context, serviceName string) (*api_v2.SamplingStrategyResponse, error) { - return c.SamplingStrategyStore.GetSamplingStrategy(ctx, serviceName) -} - -// GetBaggageRestrictions implements ClientConfigManager.GetBaggageRestrictions. -func (c *ConfigManager) GetBaggageRestrictions(ctx context.Context, serviceName string) ([]*baggage.BaggageRestriction, error) { - if c.BaggageManager == nil { - return nil, errors.New("baggage restrictions not implemented") - } - return c.BaggageManager.GetBaggageRestrictions(ctx, serviceName) -} diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/clientcfg/clientcfghttp/handler.go b/vendor/github.com/jaegertracing/jaeger/pkg/clientcfg/clientcfghttp/handler.go deleted file mode 100644 index 2c7bbc12f09..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/pkg/clientcfg/clientcfghttp/handler.go +++ /dev/null @@ -1,227 +0,0 @@ -// Copyright (c) 2019 The Jaeger Authors. -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientcfghttp - -import ( - "encoding/json" - "errors" - "fmt" - "net/http" - "strings" - - "github.com/gorilla/mux" - - "github.com/jaegertracing/jaeger/cmd/agent/app/configmanager" - p2json "github.com/jaegertracing/jaeger/model/converter/json" - t2p "github.com/jaegertracing/jaeger/model/converter/thrift/jaeger" - "github.com/jaegertracing/jaeger/pkg/metrics" - "github.com/jaegertracing/jaeger/proto-gen/api_v2" -) - -const mimeTypeApplicationJSON = "application/json" - -var errBadRequest = errors.New("bad request") - -// HTTPHandlerParams contains parameters that must be passed to NewHTTPHandler. -type HTTPHandlerParams struct { - ConfigManager configmanager.ClientConfigManager // required - MetricsFactory metrics.Factory // required - - // BasePath will be used as a prefix for the endpoints, e.g. "/api" - BasePath string - - // LegacySamplingEndpoint enables returning sampling strategy from "/" endpoint - // using Thrift 0.9.2 enum codes. - LegacySamplingEndpoint bool -} - -// HTTPHandler implements endpoints for used by Jaeger clients to retrieve client configuration, -// such as sampling and baggage restrictions. -type HTTPHandler struct { - params HTTPHandlerParams - metrics struct { - // Number of good sampling requests - SamplingRequestSuccess metrics.Counter `metric:"http-server.requests" tags:"type=sampling"` - - // Number of good sampling requests against the old endpoint / using Thrift 0.9.2 enum codes - LegacySamplingRequestSuccess metrics.Counter `metric:"http-server.requests" tags:"type=sampling-legacy"` - - // Number of good baggage requests - BaggageRequestSuccess metrics.Counter `metric:"http-server.requests" tags:"type=baggage"` - - // Number of bad requests (400s) - BadRequest metrics.Counter `metric:"http-server.errors" tags:"status=4xx,source=all"` - - // Number of collector proxy failures - CollectorProxyFailures metrics.Counter `metric:"http-server.errors" tags:"status=5xx,source=collector-proxy"` - - // Number of bad responses due to malformed thrift - BadThriftFailures metrics.Counter `metric:"http-server.errors" tags:"status=5xx,source=thrift"` - - // Number of bad responses due to proto conversion - BadProtoFailures metrics.Counter `metric:"http-server.errors" tags:"status=5xx,source=proto"` - - // Number of failed response writes from http server - WriteFailures metrics.Counter `metric:"http-server.errors" tags:"status=5xx,source=write"` - } -} - -// NewHTTPHandler creates new HTTPHandler. -func NewHTTPHandler(params HTTPHandlerParams) *HTTPHandler { - handler := &HTTPHandler{params: params} - metrics.MustInit(&handler.metrics, params.MetricsFactory, nil) - return handler -} - -// RegisterRoutes registers configuration handlers with Gorilla Router. -func (h *HTTPHandler) RegisterRoutes(router *mux.Router) { - prefix := h.params.BasePath - if h.params.LegacySamplingEndpoint { - router.HandleFunc( - prefix+"/", - func(w http.ResponseWriter, r *http.Request) { - h.serveSamplingHTTP(w, r, h.encodeThriftLegacy) - }, - ).Methods(http.MethodGet) - } - router.HandleFunc( - prefix+"/sampling", - func(w http.ResponseWriter, r *http.Request) { - h.serveSamplingHTTP(w, r, h.encodeProto) - }, - ).Methods(http.MethodGet) - - router.HandleFunc(prefix+"/baggageRestrictions", func(w http.ResponseWriter, r *http.Request) { - h.serveBaggageHTTP(w, r) - }).Methods(http.MethodGet) -} - -func (h *HTTPHandler) serviceFromRequest(w http.ResponseWriter, r *http.Request) (string, error) { - services := r.URL.Query()["service"] - if len(services) != 1 { - h.metrics.BadRequest.Inc(1) - http.Error(w, "'service' parameter must be provided once", http.StatusBadRequest) - return "", errBadRequest - } - return services[0], nil -} - -func (h *HTTPHandler) writeJSON(w http.ResponseWriter, json []byte) error { - w.Header().Add("Content-Type", mimeTypeApplicationJSON) - if _, err := w.Write(json); err != nil { - h.metrics.WriteFailures.Inc(1) - return err - } - return nil -} - -func (h *HTTPHandler) serveSamplingHTTP( - w http.ResponseWriter, - r *http.Request, - encoder func(strategy *api_v2.SamplingStrategyResponse) ([]byte, error), -) { - service, err := h.serviceFromRequest(w, r) - if err != nil { - return - } - resp, err := h.params.ConfigManager.GetSamplingStrategy(r.Context(), service) - if err != nil { - h.metrics.CollectorProxyFailures.Inc(1) - http.Error(w, fmt.Sprintf("collector error: %+v", err), http.StatusInternalServerError) - return - } - jsonBytes, err := encoder(resp) - if err != nil { - http.Error(w, "cannot marshall to JSON", http.StatusInternalServerError) - return - } - if err = h.writeJSON(w, jsonBytes); err != nil { - return - } -} - -func (h *HTTPHandler) encodeThriftLegacy(strategy *api_v2.SamplingStrategyResponse) ([]byte, error) { - tStrategy, err := t2p.ConvertSamplingResponseFromDomain(strategy) - if err != nil { - h.metrics.BadThriftFailures.Inc(1) - return nil, fmt.Errorf("ConvertSamplingResponseFromDomain failed: %w", err) - } - jsonBytes, err := json.Marshal(tStrategy) - if err != nil { - h.metrics.BadThriftFailures.Inc(1) - return nil, err - } - jsonBytes = h.encodeThriftEnums092(jsonBytes) - h.metrics.LegacySamplingRequestSuccess.Inc(1) - return jsonBytes, nil -} - -func (h *HTTPHandler) encodeProto(strategy *api_v2.SamplingStrategyResponse) ([]byte, error) { - str, err := p2json.SamplingStrategyResponseToJSON(strategy) - if err != nil { - h.metrics.BadProtoFailures.Inc(1) - return nil, fmt.Errorf("SamplingStrategyResponseToJSON failed: %w", err) - } - h.metrics.SamplingRequestSuccess.Inc(1) - return []byte(str), nil -} - -func (h *HTTPHandler) serveBaggageHTTP(w http.ResponseWriter, r *http.Request) { - service, err := h.serviceFromRequest(w, r) - if err != nil { - return - } - resp, err := h.params.ConfigManager.GetBaggageRestrictions(r.Context(), service) - if err != nil { - h.metrics.CollectorProxyFailures.Inc(1) - http.Error(w, fmt.Sprintf("collector error: %+v", err), http.StatusInternalServerError) - return - } - // NB. it's literally impossible for this Marshal to fail - jsonBytes, _ := json.Marshal(resp) - if err = h.writeJSON(w, jsonBytes); err != nil { - return - } - h.metrics.BaggageRequestSuccess.Inc(1) -} - -var samplingStrategyTypes = []api_v2.SamplingStrategyType{ - api_v2.SamplingStrategyType_PROBABILISTIC, - api_v2.SamplingStrategyType_RATE_LIMITING, -} - -// Replace string enum values produced from Thrift 0.9.3 generated classes -// with integer codes produced from Thrift 0.9.2 generated classes. -// -// For example: -// -// Thrift 0.9.2 classes generate this JSON: -// {"strategyType":0,"probabilisticSampling":{"samplingRate":0.5},"rateLimitingSampling":null,"operationSampling":null} -// -// Thrift 0.9.3 classes generate this JSON: -// {"strategyType":"PROBABILISTIC","probabilisticSampling":{"samplingRate":0.5}} -func (h *HTTPHandler) encodeThriftEnums092(json []byte) []byte { - str := string(json) - for _, strategyType := range samplingStrategyTypes { - str = strings.Replace( - str, - fmt.Sprintf(`"strategyType":"%s"`, strategyType.String()), - fmt.Sprintf(`"strategyType":%d`, strategyType), - 1, - ) - } - return []byte(str) -} diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/config/tlscfg/cert_watcher.go b/vendor/github.com/jaegertracing/jaeger/pkg/config/tlscfg/cert_watcher.go deleted file mode 100644 index 106249c3647..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/pkg/config/tlscfg/cert_watcher.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright (c) 2020 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tlscfg - -import ( - "crypto/tls" - "crypto/x509" - "errors" - "fmt" - "io" - "path/filepath" - "sync" - - "go.uber.org/zap" - - "github.com/jaegertracing/jaeger/pkg/fswatcher" -) - -const ( - logMsgPairReloaded = "Reloaded modified key pair" - logMsgCertReloaded = "Reloaded modified certificate" - logMsgPairNotReloaded = "Failed to reload key pair, using previous versions" - logMsgCertNotReloaded = "Failed to reload certificate, using previous version" -) - -// certWatcher watches filesystem changes on certificates supplied via Options -// The changed RootCAs and ClientCAs certificates are added to x509.CertPool without invalidating the previously used certificate. -// The certificate and key can be obtained via certWatcher.certificate. -// The consumers of this API should use GetCertificate or GetClientCertificate from tls.Config to supply the certificate to the config. -type certWatcher struct { - mu sync.RWMutex - opts Options - logger *zap.Logger - watchers []*fswatcher.FSWatcher - cert *tls.Certificate -} - -var _ io.Closer = (*certWatcher)(nil) - -func newCertWatcher(opts Options, logger *zap.Logger, rootCAs, clientCAs *x509.CertPool) (*certWatcher, error) { - var cert *tls.Certificate - if opts.CertPath != "" && opts.KeyPath != "" { - // load certs at startup to catch missing certs error early - c, err := tls.LoadX509KeyPair(filepath.Clean(opts.CertPath), filepath.Clean(opts.KeyPath)) - if err != nil { - return nil, fmt.Errorf("failed to load server TLS cert and key: %w", err) - } - cert = &c - } - - w := &certWatcher{ - opts: opts, - logger: logger, - cert: cert, - } - - if err := w.watchCertPair(); err != nil { - return nil, err - } - if err := w.watchCert(w.opts.CAPath, rootCAs); err != nil { - return nil, err - } - if err := w.watchCert(w.opts.ClientCAPath, clientCAs); err != nil { - return nil, err - } - - return w, nil -} - -func (w *certWatcher) Close() error { - var errs []error - for _, w := range w.watchers { - errs = append(errs, w.Close()) - } - return errors.Join(errs...) -} - -func (w *certWatcher) certificate() *tls.Certificate { - w.mu.RLock() - defer w.mu.RUnlock() - return w.cert -} - -func (w *certWatcher) watchCertPair() error { - watcher, err := fswatcher.New( - []string{w.opts.CertPath, w.opts.KeyPath}, - w.onCertPairChange, - w.logger, - ) - if err == nil { - w.watchers = append(w.watchers, watcher) - return nil - } - w.Close() - return fmt.Errorf("failed to watch key pair %s and %s: %w", w.opts.KeyPath, w.opts.CertPath, err) -} - -func (w *certWatcher) watchCert(certPath string, certPool *x509.CertPool) error { - onCertChange := func() { w.onCertChange(certPath, certPool) } - - watcher, err := fswatcher.New([]string{certPath}, onCertChange, w.logger) - if err == nil { - w.watchers = append(w.watchers, watcher) - return nil - } - w.Close() - return fmt.Errorf("failed to watch cert %s: %w", certPath, err) -} - -func (w *certWatcher) onCertPairChange() { - cert, err := tls.LoadX509KeyPair(filepath.Clean(w.opts.CertPath), filepath.Clean(w.opts.KeyPath)) - if err == nil { - w.mu.Lock() - w.cert = &cert - w.mu.Unlock() - w.logger.Info( - logMsgPairReloaded, - zap.String("key", w.opts.KeyPath), - zap.String("cert", w.opts.CertPath), - ) - } else { - w.logger.Error( - logMsgPairNotReloaded, - zap.String("key", w.opts.KeyPath), - zap.String("cert", w.opts.CertPath), - zap.Error(err), - ) - } -} - -func (w *certWatcher) onCertChange(certPath string, certPool *x509.CertPool) { - w.mu.Lock() // prevent concurrent updates to the same certPool - if err := addCertToPool(certPath, certPool); err == nil { - w.logger.Info(logMsgCertReloaded, zap.String("cert", certPath)) - } else { - w.logger.Error(logMsgCertNotReloaded, zap.String("cert", certPath), zap.Error(err)) - } - w.mu.Unlock() -} diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/config/tlscfg/certpool_unix.go b/vendor/github.com/jaegertracing/jaeger/pkg/config/tlscfg/certpool_unix.go deleted file mode 100644 index b5525e51bc5..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/pkg/config/tlscfg/certpool_unix.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (c) 2021 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !windows -// +build !windows - -package tlscfg - -import ( - "crypto/x509" -) - -func loadSystemCertPool() (*x509.CertPool, error) { - return systemCertPool() -} diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/config/tlscfg/certpool_windows.go b/vendor/github.com/jaegertracing/jaeger/pkg/config/tlscfg/certpool_windows.go deleted file mode 100644 index 56aa4f7746c..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/pkg/config/tlscfg/certpool_windows.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright (c) 2021 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build windows -// +build windows - -package tlscfg - -import ( - "crypto/x509" - "syscall" - "unsafe" -) - -const ( - // CRYPT_E_NOT_FOUND is an error code specific to windows cert pool. - // See https://github.com/golang/go/issues/16736#issuecomment-540373689. - CRYPT_E_NOT_FOUND = 0x80092004 -) - -// workaround https://github.com/golang/go/issues/16736 -// fix borrowed from Sensu: https://github.com/sensu/sensu-go/pull/4018 -func appendCerts(rootCAs *x509.CertPool) (*x509.CertPool, error) { - name, _ := syscall.UTF16PtrFromString("Root") - storeHandle, err := syscall.CertOpenSystemStore(0, name) - if err != nil { - return nil, err - } - - var cert *syscall.CertContext - for { - cert, err = syscall.CertEnumCertificatesInStore(storeHandle, cert) - if err != nil { - if errno, ok := err.(syscall.Errno); ok { - if errno == CRYPT_E_NOT_FOUND { - break - } - } - return nil, err - } - if cert == nil { - break - } - // Copy the buf, since ParseCertificate does not create its own copy. - buf := (*[1 << 20]byte)(unsafe.Pointer(cert.EncodedCert))[:] - buf2 := make([]byte, cert.Length) - copy(buf2, buf) - if c, err := x509.ParseCertificate(buf2); err == nil { - rootCAs.AddCert(c) - } - } - return rootCAs, nil -} - -func loadSystemCertPool() (*x509.CertPool, error) { - certPool, err := systemCertPool() - if err != nil { - return nil, err - } - if certPool == nil { - certPool = x509.NewCertPool() - } - return appendCerts(certPool) -} diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/config/tlscfg/ciphersuites.go b/vendor/github.com/jaegertracing/jaeger/pkg/config/tlscfg/ciphersuites.go deleted file mode 100644 index 4c71db9379b..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/pkg/config/tlscfg/ciphersuites.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (c) 2022 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tlscfg - -import ( - "crypto/tls" - "fmt" -) - -// https://pkg.go.dev/crypto/tls#pkg-constants -var versions = map[string]uint16{ - "1.0": tls.VersionTLS10, - "1.1": tls.VersionTLS11, - "1.2": tls.VersionTLS12, - "1.3": tls.VersionTLS13, -} - -func allCiphers() map[string]uint16 { - acceptedCiphers := make(map[string]uint16) - for _, suite := range tls.CipherSuites() { - acceptedCiphers[suite.Name] = suite.ID - } - return acceptedCiphers -} - -// CipherSuiteNamesToIDs returns a list of cipher suite IDs from the cipher suite names passed. -func CipherSuiteNamesToIDs(cipherNames []string) ([]uint16, error) { - var ciphersIDs []uint16 - possibleCiphers := allCiphers() - for _, cipher := range cipherNames { - intValue, ok := possibleCiphers[cipher] - if !ok { - return nil, fmt.Errorf("cipher suite %s not supported or doesn't exist", cipher) - } - ciphersIDs = append(ciphersIDs, intValue) - } - return ciphersIDs, nil -} - -// VersionNameToID returns the version ID from version name -func VersionNameToID(versionName string) (uint16, error) { - if version, ok := versions[versionName]; ok { - return version, nil - } - return 0, fmt.Errorf("unknown tls version %q", versionName) -} diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/config/tlscfg/flags.go b/vendor/github.com/jaegertracing/jaeger/pkg/config/tlscfg/flags.go index 96de6b7cf4e..82851211a85 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/config/tlscfg/flags.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/config/tlscfg/flags.go @@ -1,16 +1,5 @@ // Copyright (c) 2019 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package tlscfg @@ -21,6 +10,7 @@ import ( "strings" "github.com/spf13/viper" + "go.opentelemetry.io/collector/config/configtls" ) const ( @@ -74,8 +64,8 @@ func (c ServerFlagsConfig) AddFlags(flags *flag.FlagSet) { } // InitFromViper creates tls.Config populated with values retrieved from Viper. -func (c ClientFlagsConfig) InitFromViper(v *viper.Viper) (Options, error) { - var p Options +func (c ClientFlagsConfig) InitFromViper(v *viper.Viper) (configtls.ClientConfig, error) { + var p options p.Enabled = v.GetBool(c.Prefix + tlsEnabled) p.CAPath = v.GetString(c.Prefix + tlsCA) p.CertPath = v.GetString(c.Prefix + tlsCert) @@ -84,18 +74,18 @@ func (c ClientFlagsConfig) InitFromViper(v *viper.Viper) (Options, error) { p.SkipHostVerify = v.GetBool(c.Prefix + tlsSkipHostVerify) if !p.Enabled { - var empty Options + var empty options if !reflect.DeepEqual(&p, &empty) { - return p, fmt.Errorf("%s.tls.* options cannot be used when %s is false", c.Prefix, c.Prefix+tlsEnabled) + return configtls.ClientConfig{}, fmt.Errorf("%s.tls.* options cannot be used when %s is false", c.Prefix, c.Prefix+tlsEnabled) } } - return p, nil + return p.ToOtelClientConfig(), nil } // InitFromViper creates tls.Config populated with values retrieved from Viper. -func (c ServerFlagsConfig) InitFromViper(v *viper.Viper) (Options, error) { - var p Options +func (c ServerFlagsConfig) InitFromViper(v *viper.Viper) (*configtls.ServerConfig, error) { + var p options p.Enabled = v.GetBool(c.Prefix + tlsEnabled) p.CertPath = v.GetString(c.Prefix + tlsCert) p.KeyPath = v.GetString(c.Prefix + tlsKey) @@ -108,13 +98,13 @@ func (c ServerFlagsConfig) InitFromViper(v *viper.Viper) (Options, error) { p.ReloadInterval = v.GetDuration(c.Prefix + tlsReloadInterval) if !p.Enabled { - var empty Options + var empty options if !reflect.DeepEqual(&p, &empty) { - return p, fmt.Errorf("%s.tls.* options cannot be used when %s is false", c.Prefix, c.Prefix+tlsEnabled) + return nil, fmt.Errorf("%s.tls.* options cannot be used when %s is false", c.Prefix, c.Prefix+tlsEnabled) } } - return p, nil + return p.ToOtelServerConfig(), nil } // stripWhiteSpace removes all whitespace characters from a string diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/config/tlscfg/options.go b/vendor/github.com/jaegertracing/jaeger/pkg/config/tlscfg/options.go index 2707887831d..969a0427841 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/config/tlscfg/options.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/config/tlscfg/options.go @@ -1,158 +1,72 @@ // Copyright (c) 2019 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package tlscfg import ( - "crypto/tls" - "crypto/x509" - "fmt" - "io" - "os" - "path/filepath" "time" - "go.uber.org/zap" + "go.opentelemetry.io/collector/config/configtls" ) -// Options describes the configuration properties for TLS Connections. -type Options struct { - Enabled bool `mapstructure:"enabled"` - CAPath string `mapstructure:"ca"` - CertPath string `mapstructure:"cert"` - KeyPath string `mapstructure:"key"` - ServerName string `mapstructure:"server_name"` // only for client-side TLS config - ClientCAPath string `mapstructure:"client_ca"` // only for server-side TLS config for client auth - CipherSuites []string `mapstructure:"cipher_suites"` - MinVersion string `mapstructure:"min_version"` - MaxVersion string `mapstructure:"max_version"` - SkipHostVerify bool `mapstructure:"skip_host_verify"` - ReloadInterval time.Duration `mapstructure:"reload_interval"` - certWatcher *certWatcher `mapstructure:"-"` +// options describes the configuration properties for TLS Connections. +type options struct { + Enabled bool + CAPath string + CertPath string + KeyPath string + ServerName string // only for client-side TLS config + ClientCAPath string // only for server-side TLS config for client auth + CipherSuites []string + MinVersion string + MaxVersion string + SkipHostVerify bool + ReloadInterval time.Duration } -var systemCertPool = x509.SystemCertPool // to allow overriding in unit test - -// Config loads TLS certificates and returns a TLS Config. -func (p *Options) Config(logger *zap.Logger) (*tls.Config, error) { - var minVersionId, maxVersionId uint16 - - certPool, err := p.loadCertPool() - if err != nil { - return nil, fmt.Errorf("failed to load CA CertPool: %w", err) - } - - cipherSuiteIds, err := CipherSuiteNamesToIDs(p.CipherSuites) - if err != nil { - return nil, fmt.Errorf("failed to get cipher suite ids from cipher suite names: %w", err) +func (o *options) ToOtelClientConfig() configtls.ClientConfig { + return configtls.ClientConfig{ + Insecure: !o.Enabled, + InsecureSkipVerify: o.SkipHostVerify, + ServerName: o.ServerName, + Config: configtls.Config{ + CAFile: o.CAPath, + CertFile: o.CertPath, + KeyFile: o.KeyPath, + CipherSuites: o.CipherSuites, + MinVersion: o.MinVersion, + MaxVersion: o.MaxVersion, + ReloadInterval: o.ReloadInterval, + + // when no truststore given, use SystemCertPool + // https://github.com/jaegertracing/jaeger/issues/6334 + IncludeSystemCACertsPool: o.Enabled && (len(o.CAPath) == 0), + }, } - - if p.MinVersion != "" { - minVersionId, err = VersionNameToID(p.MinVersion) - if err != nil { - return nil, fmt.Errorf("failed to get minimum tls version: %w", err) - } - } - - if p.MaxVersion != "" { - maxVersionId, err = VersionNameToID(p.MaxVersion) - if err != nil { - return nil, fmt.Errorf("failed to get maximum tls version: %w", err) - } - } - - if p.MinVersion != "" && p.MaxVersion != "" { - if minVersionId > maxVersionId { - return nil, fmt.Errorf("minimum tls version can't be greater than maximum tls version") - } - } - - tlsCfg := &tls.Config{ - RootCAs: certPool, - ServerName: p.ServerName, - InsecureSkipVerify: p.SkipHostVerify, /* #nosec G402*/ - CipherSuites: cipherSuiteIds, - MinVersion: minVersionId, - MaxVersion: maxVersionId, - } - - if p.ClientCAPath != "" { - // TODO this should be moved to certWatcher, since it already loads key pair - certPool := x509.NewCertPool() - if err := addCertToPool(p.ClientCAPath, certPool); err != nil { - return nil, err - } - tlsCfg.ClientCAs = certPool - tlsCfg.ClientAuth = tls.RequireAndVerifyClientCert - } - - certWatcher, err := newCertWatcher(*p, logger, tlsCfg.RootCAs, tlsCfg.ClientCAs) - if err != nil { - return nil, err - } - p.certWatcher = certWatcher - - if (p.CertPath == "" && p.KeyPath != "") || (p.CertPath != "" && p.KeyPath == "") { - return nil, fmt.Errorf("for client auth via TLS, either both client certificate and key must be supplied, or neither") - } - if p.CertPath != "" && p.KeyPath != "" { - tlsCfg.GetCertificate = func(*tls.ClientHelloInfo) (*tls.Certificate, error) { - return p.certWatcher.certificate(), nil - } - // GetClientCertificate is used on the client side when server is configured with tls.RequireAndVerifyClientCert e.g. mTLS - tlsCfg.GetClientCertificate = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) { - return p.certWatcher.certificate(), nil - } - } - - return tlsCfg, nil } -func (p Options) loadCertPool() (*x509.CertPool, error) { - if len(p.CAPath) == 0 { // no truststore given, use SystemCertPool - certPool, err := loadSystemCertPool() - if err != nil { - return nil, fmt.Errorf("failed to load SystemCertPool: %w", err) - } - return certPool, nil - } - certPool := x509.NewCertPool() - // setup user specified truststore - if err := addCertToPool(p.CAPath, certPool); err != nil { - return nil, err +// ToOtelServerConfig provides a mapping between from Options to OTEL's TLS Server Configuration. +func (o *options) ToOtelServerConfig() *configtls.ServerConfig { + if !o.Enabled { + return nil } - return certPool, nil -} -func addCertToPool(caPath string, certPool *x509.CertPool) error { - caPEM, err := os.ReadFile(filepath.Clean(caPath)) - if err != nil { - return fmt.Errorf("failed to load CA %s: %w", caPath, err) + cfg := &configtls.ServerConfig{ + ClientCAFile: o.ClientCAPath, + Config: configtls.Config{ + CAFile: o.CAPath, + CertFile: o.CertPath, + KeyFile: o.KeyPath, + CipherSuites: o.CipherSuites, + MinVersion: o.MinVersion, + MaxVersion: o.MaxVersion, + ReloadInterval: o.ReloadInterval, + }, } - if !certPool.AppendCertsFromPEM(caPEM) { - return fmt.Errorf("failed to parse CA %s", caPath) + if o.ReloadInterval > 0 { + cfg.ReloadClientCAFile = true } - return nil -} - -var _ io.Closer = (*Options)(nil) -// Close shuts down the embedded certificate watcher. -func (p *Options) Close() error { - if p.certWatcher != nil { - return p.certWatcher.Close() - } - return nil + return cfg } diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/discovery/discoverer.go b/vendor/github.com/jaegertracing/jaeger/pkg/discovery/discoverer.go index ae1780d8b33..5385b3c0c71 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/discovery/discoverer.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/discovery/discoverer.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package discovery diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/discovery/grpcresolver/grpc_resolver.go b/vendor/github.com/jaegertracing/jaeger/pkg/discovery/grpcresolver/grpc_resolver.go index 2329870f7ca..40e1387dcdb 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/discovery/grpcresolver/grpc_resolver.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/discovery/grpcresolver/grpc_resolver.go @@ -1,16 +1,5 @@ // Copyright (c) 2019 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package grpcresolver @@ -85,7 +74,7 @@ func New( } // Build returns itself for Resolver, because it's both a builder and a resolver. -func (r *Resolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { +func (r *Resolver) Build(_ resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { r.cc = cc // Update conn states if proactively updates already work @@ -106,7 +95,7 @@ func (r *Resolver) Scheme() string { // ResolveNow is a noop for Resolver since resolver is already firing r.cc.UpdatesState every time // it receives updates of new instance from discoCh -func (r *Resolver) ResolveNow(o resolver.ResolveNowOptions) {} +func (*Resolver) ResolveNow(resolver.ResolveNowOptions) {} func (r *Resolver) watcher() { defer r.closing.Done() @@ -144,13 +133,6 @@ func (r *Resolver) rendezvousHash(addresses []string) []string { return topN } -func min(a, b int) int { - if a < b { - return a - } - return b -} - func hashAddr(hasher hash.Hash32, node, saltKey []byte) uint32 { hasher.Reset() hasher.Write(saltKey) diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/discovery/notifier.go b/vendor/github.com/jaegertracing/jaeger/pkg/discovery/notifier.go index fba50b71db7..487ca20bab7 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/discovery/notifier.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/discovery/notifier.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package discovery diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/distributedlock/interface.go b/vendor/github.com/jaegertracing/jaeger/pkg/distributedlock/interface.go deleted file mode 100644 index 1abf600f4c7..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/pkg/distributedlock/interface.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (c) 2019 The Jaeger Authors. -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package distributedlock - -import ( - "time" -) - -// Lock uses distributed lock for control of a resource. -type Lock interface { - // Acquire acquires a lease of duration ttl around a given resource. In case of an error, - // acquired is meaningless. - Acquire(resource string, ttl time.Duration) (acquired bool, err error) - - // Forfeit forfeits a lease around a given resource. In case of an error, - // forfeited is meaningless. - Forfeit(resource string) (forfeited bool, err error) -} diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/fswatcher/fswatcher.go b/vendor/github.com/jaegertracing/jaeger/pkg/fswatcher/fswatcher.go deleted file mode 100644 index d3c929d1d9b..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/pkg/fswatcher/fswatcher.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright (c) 2021 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package fswatcher - -import ( - "crypto/sha256" - "fmt" - "io" - "os" - "path" - "path/filepath" - "sync" - - "github.com/fsnotify/fsnotify" - "go.uber.org/zap" -) - -type FSWatcher struct { - watcher *fsnotify.Watcher - logger *zap.Logger - fileHashContentMap map[string]string - onChange func() - mu sync.RWMutex -} - -// FSWatcher waits for notifications of changes in the watched directories -// and attempts to reload all files that changed. -// -// Write and Rename events indicate that some files might have changed and reload might be necessary. -// Remove event indicates that the file was deleted and we should write a warn to log. -// -// Reasoning: -// -// Write event is sent if the file content is rewritten. -// -// Usually files are not rewritten, but they are updated by swapping them with new -// ones by calling Rename. That avoids files being read while they are not yet -// completely written but it also means that inotify on file level will not work: -// watch is invalidated when the old file is deleted. -// -// If reading from Kubernetes Secret volumes the target files are symbolic links -// to files in a different directory. That directory is swapped with a new one, -// while the symbolic links remain the same. This guarantees atomic swap for all -// files at once, but it also means any Rename event in the directory might -// indicate that the files were replaced, even if event.Name is not any of the -// files we are monitoring. We check the hashes of the files to detect if they -// were really changed. -func New(filepaths []string, onChange func(), logger *zap.Logger) (*FSWatcher, error) { - watcher, err := fsnotify.NewWatcher() - if err != nil { - return nil, err - } - w := &FSWatcher{ - watcher: watcher, - logger: logger, - fileHashContentMap: make(map[string]string), - onChange: onChange, - } - - if err = w.setupWatchedPaths(filepaths); err != nil { - w.Close() - return nil, err - } - - go w.watch() - - return w, nil -} - -func (w *FSWatcher) setupWatchedPaths(filepaths []string) error { - uniqueDirs := make(map[string]bool) - - for _, p := range filepaths { - if p == "" { - continue - } - if h, err := hashFile(p); err == nil { - w.fileHashContentMap[p] = h - } else { - return err - } - dir := path.Dir(p) - if _, ok := uniqueDirs[dir]; !ok { - if err := w.watcher.Add(dir); err != nil { - return err - } - uniqueDirs[dir] = true - } - } - - return nil -} - -// Watch watches for Events and Errors of files. -// Each time an Event happen, all the files are checked for content change. -// If a file's content changes, its hashed content is updated and -// onChange is invoked after all file checks. -func (w *FSWatcher) watch() { - for { - select { - case event, ok := <-w.watcher.Events: - if !ok { - return - } - w.logger.Info("Received event", zap.String("event", event.String())) - var changed bool - w.mu.Lock() - for file, hash := range w.fileHashContentMap { - fileChanged, newHash := w.isModified(file, hash) - if fileChanged { - changed = fileChanged - w.fileHashContentMap[file] = newHash - } - } - w.mu.Unlock() - if changed { - w.onChange() - } - case err, ok := <-w.watcher.Errors: - if !ok { - return - } - w.logger.Error("fsnotifier reported an error", zap.Error(err)) - } - } -} - -// Close closes the watcher. -func (w *FSWatcher) Close() error { - return w.watcher.Close() -} - -// isModified returns true if the file has been modified since the last check. -func (w *FSWatcher) isModified(filepath string, previousHash string) (bool, string) { - hash, err := hashFile(filepath) - if err != nil { - w.logger.Warn("Unable to read the file", zap.String("file", filepath), zap.Error(err)) - return true, "" - } - return previousHash != hash, hash -} - -// hashFile returns the SHA256 hash of the file. -func hashFile(file string) (string, error) { - f, err := os.Open(filepath.Clean(file)) - if err != nil { - return "", err - } - defer f.Close() - - h := sha256.New() - if _, err := io.Copy(h, f); err != nil { - return "", err - } - - return fmt.Sprintf("%x", h.Sum(nil)), nil -} diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/gogocodec/codec.go b/vendor/github.com/jaegertracing/jaeger/pkg/gogocodec/codec.go index f7df1c2d2ec..55322baa89c 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/gogocodec/codec.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/gogocodec/codec.go @@ -1,16 +1,5 @@ // Copyright (c) 2021 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package gogocodec @@ -22,6 +11,7 @@ import ( gogoproto "github.com/gogo/protobuf/proto" "google.golang.org/grpc/encoding" "google.golang.org/grpc/encoding/proto" + "google.golang.org/grpc/mem" ) const ( @@ -29,7 +19,7 @@ const ( jaegerModelPkgPath = "github.com/jaegertracing/jaeger/model" ) -var defaultCodec encoding.Codec +var defaultCodec encoding.CodecV2 // CustomType is an interface that Gogo expects custom types to implement. // https://github.com/gogo/protobuf/blob/master/custom_types.md @@ -45,44 +35,45 @@ type CustomType interface { } func init() { - defaultCodec = encoding.GetCodec(proto.Name) + defaultCodec = encoding.GetCodecV2(proto.Name) defaultCodec.Name() // ensure it's not nil - encoding.RegisterCodec(newCodec()) + encoding.RegisterCodecV2(newCodec()) } // gogoCodec forces the use of gogo proto marshalling/unmarshalling for // Jaeger proto types (package jaeger/gen-proto). type gogoCodec struct{} -var _ encoding.Codec = (*gogoCodec)(nil) +var _ encoding.CodecV2 = (*gogoCodec)(nil) func newCodec() *gogoCodec { return &gogoCodec{} } // Name implements encoding.Codec -func (c *gogoCodec) Name() string { +func (*gogoCodec) Name() string { return proto.Name } // Marshal implements encoding.Codec -func (c *gogoCodec) Marshal(v interface{}) ([]byte, error) { +func (*gogoCodec) Marshal(v any) (mem.BufferSlice, error) { t := reflect.TypeOf(v) elem := t.Elem() // use gogo proto only for Jaeger types if useGogo(elem) { - return gogoproto.Marshal(v.(gogoproto.Message)) + bytes, err := gogoproto.Marshal(v.(gogoproto.Message)) + return mem.BufferSlice{mem.SliceBuffer(bytes)}, err } return defaultCodec.Marshal(v) } // Unmarshal implements encoding.Codec -func (c *gogoCodec) Unmarshal(data []byte, v interface{}) error { +func (*gogoCodec) Unmarshal(data mem.BufferSlice, v any) error { t := reflect.TypeOf(v) elem := t.Elem() // only for collections // use gogo proto only for Jaeger types if useGogo(elem) { - return gogoproto.Unmarshal(data, v.(gogoproto.Message)) + return gogoproto.Unmarshal(data.Materialize(), v.(gogoproto.Message)) } return defaultCodec.Unmarshal(data, v) } diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/healthcheck/handler.go b/vendor/github.com/jaegertracing/jaeger/pkg/healthcheck/handler.go index 1a714ea70c6..bbc50a803d7 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/healthcheck/handler.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/healthcheck/handler.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package healthcheck @@ -106,7 +95,7 @@ func (hc *HealthCheck) Handler() http.Handler { }) } -func (hc *HealthCheck) createRespBody(state state, template healthCheckResponse) []byte { +func (*HealthCheck) createRespBody(state state, template healthCheckResponse) []byte { resp := template // clone if state.status == Ready { resp.UpSince = state.upSince diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/metrics/counter.go b/vendor/github.com/jaegertracing/jaeger/pkg/metrics/counter.go index e594940857e..ad9d94febf7 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/metrics/counter.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/metrics/counter.go @@ -1,17 +1,6 @@ // Copyright (c) 2022 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metrics diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/metrics/factory.go b/vendor/github.com/jaegertracing/jaeger/pkg/metrics/factory.go index e77257127d9..faeaf3d1585 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/metrics/factory.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/metrics/factory.go @@ -1,17 +1,6 @@ // Copyright (c) 2022 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metrics @@ -64,19 +53,19 @@ var NullFactory Factory = nullFactory{} type nullFactory struct{} -func (nullFactory) Counter(options Options) Counter { +func (nullFactory) Counter(Options) Counter { return NullCounter } -func (nullFactory) Timer(options TimerOptions) Timer { +func (nullFactory) Timer(TimerOptions) Timer { return NullTimer } -func (nullFactory) Gauge(options Options) Gauge { +func (nullFactory) Gauge(Options) Gauge { return NullGauge } -func (nullFactory) Histogram(options HistogramOptions) Histogram { +func (nullFactory) Histogram(HistogramOptions) Histogram { return NullHistogram } -func (nullFactory) Namespace(scope NSOptions) Factory { return NullFactory } +func (nullFactory) Namespace(NSOptions /* scope */) Factory { return NullFactory } diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/metrics/gauge.go b/vendor/github.com/jaegertracing/jaeger/pkg/metrics/gauge.go index 53631d451d6..3445b7ac32d 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/metrics/gauge.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/metrics/gauge.go @@ -1,17 +1,6 @@ // Copyright (c) 2022 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metrics diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/metrics/histogram.go b/vendor/github.com/jaegertracing/jaeger/pkg/metrics/histogram.go index d3bd6174fe8..b737e74c476 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/metrics/histogram.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/metrics/histogram.go @@ -1,16 +1,5 @@ // Copyright (c) 2018 The Jaeger Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metrics diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/metrics/metrics.go b/vendor/github.com/jaegertracing/jaeger/pkg/metrics/metrics.go index 86cf913bc94..b81e3e4a775 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/metrics/metrics.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/metrics/metrics.go @@ -1,17 +1,6 @@ // Copyright (c) 2022 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metrics @@ -32,7 +21,7 @@ import ( // of type Counter or Gauge or Timer. // // Errors during Init lead to a panic. -func MustInit(metrics interface{}, factory Factory, globalTags map[string]string) { +func MustInit(metrics any, factory Factory, globalTags map[string]string) { if err := Init(metrics, factory, globalTags); err != nil { panic(err.Error()) } @@ -40,7 +29,7 @@ func MustInit(metrics interface{}, factory Factory, globalTags map[string]string // Init does the same as MustInit, but returns an error instead of // panicking. -func Init(m interface{}, factory Factory, globalTags map[string]string) error { +func Init(m any, factory Factory, globalTags map[string]string) error { // Allow user to opt out of reporting metrics by passing in nil. if factory == nil { factory = NullFactory @@ -101,7 +90,7 @@ func Init(m interface{}, factory Factory, globalTags map[string]string) error { } } help := field.Tag.Get("help") - var obj interface{} + var obj any switch { case field.Type.AssignableTo(counterPtrType): obj = factory.Counter(Options{ diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/metrics/package.go b/vendor/github.com/jaegertracing/jaeger/pkg/metrics/package.go index 9764382db10..a0587cbd06e 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/metrics/package.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/metrics/package.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package metrics provides an internal abstraction for metrics API, // and command line flags for configuring the metrics backend. diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/metrics/stopwatch.go b/vendor/github.com/jaegertracing/jaeger/pkg/metrics/stopwatch.go index 0ca70979260..4685eaea9d9 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/metrics/stopwatch.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/metrics/stopwatch.go @@ -1,17 +1,6 @@ // Copyright (c) 2022 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metrics diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/metrics/timer.go b/vendor/github.com/jaegertracing/jaeger/pkg/metrics/timer.go index 75df952d747..9396e50b8c8 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/metrics/timer.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/metrics/timer.go @@ -1,17 +1,6 @@ // Copyright (c) 2022 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metrics diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/netutils/port.go b/vendor/github.com/jaegertracing/jaeger/pkg/netutils/port.go index 201a18adade..9b880cce6e2 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/netutils/port.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/netutils/port.go @@ -1,16 +1,5 @@ // Copyright (c) 2020 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package netutils diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/recoveryhandler/zap.go b/vendor/github.com/jaegertracing/jaeger/pkg/recoveryhandler/zap.go index 678c179dade..0a65c6cea0e 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/recoveryhandler/zap.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/recoveryhandler/zap.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017-2018 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package recoveryhandler @@ -29,7 +18,7 @@ type zapRecoveryWrapper struct { } // Println logs an error message with the given fields -func (z zapRecoveryWrapper) Println(args ...interface{}) { +func (z zapRecoveryWrapper) Println(args ...any) { z.logger.Error(fmt.Sprint(args...)) } diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/telemetry/settings.go b/vendor/github.com/jaegertracing/jaeger/pkg/telemetry/settings.go new file mode 100644 index 00000000000..5b58fd7583d --- /dev/null +++ b/vendor/github.com/jaegertracing/jaeger/pkg/telemetry/settings.go @@ -0,0 +1,80 @@ +// Copyright (c) 2024 The Jaeger Authors. +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componentstatus" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/otel/metric" + noopmetric "go.opentelemetry.io/otel/metric/noop" + "go.opentelemetry.io/otel/trace" + nooptrace "go.opentelemetry.io/otel/trace/noop" + "go.uber.org/zap" + + "github.com/jaegertracing/jaeger/internal/metrics/otelmetrics" + "github.com/jaegertracing/jaeger/pkg/healthcheck" + "github.com/jaegertracing/jaeger/pkg/metrics" +) + +type Settings struct { + Logger *zap.Logger + Metrics metrics.Factory + MeterProvider metric.MeterProvider + TracerProvider trace.TracerProvider + ReportStatus func(*componentstatus.Event) // TODO remove this + Host component.Host +} + +func HCAdapter(hc *healthcheck.HealthCheck) func(*componentstatus.Event) { + return func(event *componentstatus.Event) { + var hcStatus healthcheck.Status + switch event.Status() { + case componentstatus.StatusOK: + hcStatus = healthcheck.Ready + case componentstatus.StatusStarting, + componentstatus.StatusRecoverableError, + componentstatus.StatusPermanentError, + componentstatus.StatusNone, + componentstatus.StatusStopping, + componentstatus.StatusStopped: + hcStatus = healthcheck.Unavailable + case componentstatus.StatusFatalError: + hcStatus = healthcheck.Broken + } + hc.Set(hcStatus) + } +} + +func NoopSettings() Settings { + return Settings{ + Logger: zap.NewNop(), + Metrics: metrics.NullFactory, + MeterProvider: noopmetric.NewMeterProvider(), + TracerProvider: nooptrace.NewTracerProvider(), + ReportStatus: func(*componentstatus.Event) {}, + Host: componenttest.NewNopHost(), + } +} + +func FromOtelComponent(telset component.TelemetrySettings, host component.Host) Settings { + return Settings{ + Logger: telset.Logger, + Metrics: otelmetrics.NewFactory(telset.MeterProvider), + MeterProvider: telset.MeterProvider, + TracerProvider: telset.TracerProvider, + ReportStatus: func(event *componentstatus.Event) { + componentstatus.ReportStatus(host, event) + }, + Host: host, + } +} + +func (s Settings) ToOtelComponent() component.TelemetrySettings { + return component.TelemetrySettings{ + Logger: s.Logger, + MeterProvider: s.MeterProvider, + TracerProvider: s.TracerProvider, + } +} diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/version/build.go b/vendor/github.com/jaegertracing/jaeger/pkg/version/build.go index 6d4fc208cc9..3100d5b32d3 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/version/build.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/version/build.go @@ -1,16 +1,5 @@ // Copyright (c) 2017 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package version diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/version/command.go b/vendor/github.com/jaegertracing/jaeger/pkg/version/command.go index 343c1a477ac..67c824bdffd 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/version/command.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/version/command.go @@ -1,16 +1,5 @@ // Copyright (c) 2017 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package version @@ -30,7 +19,7 @@ func Command() *cobra.Command { Use: "version", Short: "Print the version.", Long: `Print the version and build information.`, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(cmd *cobra.Command, _ /* args */ []string) error { json, err := json.Marshal(info) if err != nil { return err diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/version/handler.go b/vendor/github.com/jaegertracing/jaeger/pkg/version/handler.go index adbc7c55e58..a92cd11ff3b 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/version/handler.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/version/handler.go @@ -1,16 +1,5 @@ // Copyright (c) 2017 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package version @@ -24,12 +13,12 @@ import ( // RegisterHandler registers version handler to /version func RegisterHandler(mu *http.ServeMux, logger *zap.Logger) { info := Get() - json, err := json.Marshal(info) + jsonData, err := json.Marshal(info) if err != nil { logger.Fatal("Could not get Jaeger version", zap.Error(err)) } mu.HandleFunc("/version", func(w http.ResponseWriter, _ *http.Request) { - w.WriteHeader(200) - w.Write(json) + w.WriteHeader(http.StatusOK) + w.Write(jsonData) }) } diff --git a/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/archive.go b/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/archive.go index 41741e7b950..8eeb3b609b2 100644 --- a/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/archive.go +++ b/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/archive.go @@ -1,16 +1,5 @@ // Copyright (c) 2020 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package shared @@ -43,9 +32,11 @@ type archiveWriter struct { } // GetTrace takes a traceID and returns a Trace associated with that traceID from Archive Storage -func (r *archiveReader) GetTrace(ctx context.Context, traceID model.TraceID) (*model.Trace, error) { - stream, err := r.client.GetArchiveTrace(upgradeContext(ctx), &storage_v1.GetTraceRequest{ - TraceID: traceID, +func (r *archiveReader) GetTrace(ctx context.Context, q spanstore.GetTraceParameters) (*model.Trace, error) { + stream, err := r.client.GetArchiveTrace(ctx, &storage_v1.GetTraceRequest{ + TraceID: q.TraceID, + StartTime: q.StartTime, + EndTime: q.EndTime, }) if status.Code(err) == codes.NotFound { return nil, spanstore.ErrTraceNotFound @@ -58,22 +49,22 @@ func (r *archiveReader) GetTrace(ctx context.Context, traceID model.TraceID) (*m } // GetServices not used in archiveReader -func (r *archiveReader) GetServices(ctx context.Context) ([]string, error) { +func (*archiveReader) GetServices(context.Context) ([]string, error) { return nil, errors.New("GetServices not implemented") } // GetOperations not used in archiveReader -func (r *archiveReader) GetOperations(ctx context.Context, query spanstore.OperationQueryParameters) ([]spanstore.Operation, error) { +func (*archiveReader) GetOperations(context.Context, spanstore.OperationQueryParameters) ([]spanstore.Operation, error) { return nil, errors.New("GetOperations not implemented") } // FindTraces not used in archiveReader -func (r *archiveReader) FindTraces(ctx context.Context, query *spanstore.TraceQueryParameters) ([]*model.Trace, error) { +func (*archiveReader) FindTraces(context.Context, *spanstore.TraceQueryParameters) ([]*model.Trace, error) { return nil, errors.New("FindTraces not implemented") } // FindTraceIDs not used in archiveReader -func (r *archiveReader) FindTraceIDs(ctx context.Context, query *spanstore.TraceQueryParameters) ([]model.TraceID, error) { +func (*archiveReader) FindTraceIDs(context.Context, *spanstore.TraceQueryParameters) ([]model.TraceID, error) { return nil, errors.New("FindTraceIDs not implemented") } diff --git a/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/grpc_client.go b/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/grpc_client.go index 9b24b6c18eb..f6323be4568 100644 --- a/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/grpc_client.go +++ b/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/grpc_client.go @@ -1,16 +1,5 @@ // Copyright (c) 2019 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package shared @@ -23,11 +12,10 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" "github.com/jaegertracing/jaeger/model" - "github.com/jaegertracing/jaeger/pkg/bearertoken" + _ "github.com/jaegertracing/jaeger/pkg/gogocodec" // force gogo codec registration "github.com/jaegertracing/jaeger/proto-gen/storage_v1" "github.com/jaegertracing/jaeger/storage/dependencystore" "github.com/jaegertracing/jaeger/storage/spanstore" @@ -37,16 +25,13 @@ import ( const BearerTokenKey = "bearer.token" var ( - _ StoragePlugin = (*grpcClient)(nil) - _ ArchiveStoragePlugin = (*grpcClient)(nil) - _ PluginCapabilities = (*grpcClient)(nil) - - // upgradeContext composites several steps of upgrading context - upgradeContext = composeContextUpgradeFuncs(upgradeContextWithBearerToken) + _ StoragePlugin = (*GRPCClient)(nil) + _ ArchiveStoragePlugin = (*GRPCClient)(nil) + _ PluginCapabilities = (*GRPCClient)(nil) ) -// grpcClient implements shared.StoragePlugin and reads/writes spans and dependencies -type grpcClient struct { +// GRPCClient implements shared.StoragePlugin and reads/writes spans and dependencies +type GRPCClient struct { readerClient storage_v1.SpanReaderPluginClient writerClient storage_v1.SpanWriterPluginClient archiveReaderClient storage_v1.ArchiveSpanReaderPluginClient @@ -56,79 +41,51 @@ type grpcClient struct { streamWriterClient storage_v1.StreamingSpanWriterPluginClient } -func NewGRPCClient(c *grpc.ClientConn) *grpcClient { - return &grpcClient{ - readerClient: storage_v1.NewSpanReaderPluginClient(c), - writerClient: storage_v1.NewSpanWriterPluginClient(c), - archiveReaderClient: storage_v1.NewArchiveSpanReaderPluginClient(c), - archiveWriterClient: storage_v1.NewArchiveSpanWriterPluginClient(c), - capabilitiesClient: storage_v1.NewPluginCapabilitiesClient(c), - depsReaderClient: storage_v1.NewDependenciesReaderPluginClient(c), - streamWriterClient: storage_v1.NewStreamingSpanWriterPluginClient(c), - } -} - -// ContextUpgradeFunc is a functional type that can be composed to upgrade context -type ContextUpgradeFunc func(ctx context.Context) context.Context - -// composeContextUpgradeFuncs composes ContextUpgradeFunc and returns a composed function -// to run the given func in strict order. -func composeContextUpgradeFuncs(funcs ...ContextUpgradeFunc) ContextUpgradeFunc { - return func(ctx context.Context) context.Context { - for _, fun := range funcs { - ctx = fun(ctx) - } - return ctx - } -} - -// upgradeContextWithBearerToken turns the context into a gRPC outgoing context with bearer token -// in the request metadata, if the original context has bearer token attached. -// Otherwise returns original context. -func upgradeContextWithBearerToken(ctx context.Context) context.Context { - bearerToken, hasToken := bearertoken.GetBearerToken(ctx) - if hasToken { - md, ok := metadata.FromOutgoingContext(ctx) - if !ok { - md = metadata.New(nil) - } - md.Set(BearerTokenKey, bearerToken) - return metadata.NewOutgoingContext(ctx, md) +func NewGRPCClient(tracedConn *grpc.ClientConn, untracedConn *grpc.ClientConn) *GRPCClient { + return &GRPCClient{ + readerClient: storage_v1.NewSpanReaderPluginClient(tracedConn), + writerClient: storage_v1.NewSpanWriterPluginClient(untracedConn), + archiveReaderClient: storage_v1.NewArchiveSpanReaderPluginClient(tracedConn), + archiveWriterClient: storage_v1.NewArchiveSpanWriterPluginClient(untracedConn), + capabilitiesClient: storage_v1.NewPluginCapabilitiesClient(tracedConn), + depsReaderClient: storage_v1.NewDependenciesReaderPluginClient(tracedConn), + streamWriterClient: storage_v1.NewStreamingSpanWriterPluginClient(untracedConn), } - return ctx } // DependencyReader implements shared.StoragePlugin. -func (c *grpcClient) DependencyReader() dependencystore.Reader { +func (c *GRPCClient) DependencyReader() dependencystore.Reader { return c } // SpanReader implements shared.StoragePlugin. -func (c *grpcClient) SpanReader() spanstore.Reader { +func (c *GRPCClient) SpanReader() spanstore.Reader { return c } // SpanWriter implements shared.StoragePlugin. -func (c *grpcClient) SpanWriter() spanstore.Writer { +func (c *GRPCClient) SpanWriter() spanstore.Writer { return c } -func (c *grpcClient) StreamingSpanWriter() spanstore.Writer { +func (c *GRPCClient) StreamingSpanWriter() spanstore.Writer { return newStreamingSpanWriter(c.streamWriterClient) } -func (c *grpcClient) ArchiveSpanReader() spanstore.Reader { +func (c *GRPCClient) ArchiveSpanReader() spanstore.Reader { return &archiveReader{client: c.archiveReaderClient} } -func (c *grpcClient) ArchiveSpanWriter() spanstore.Writer { +func (c *GRPCClient) ArchiveSpanWriter() spanstore.Writer { return &archiveWriter{client: c.archiveWriterClient} } // GetTrace takes a traceID and returns a Trace associated with that traceID -func (c *grpcClient) GetTrace(ctx context.Context, traceID model.TraceID) (*model.Trace, error) { - stream, err := c.readerClient.GetTrace(upgradeContext(ctx), &storage_v1.GetTraceRequest{ - TraceID: traceID, +func (c *GRPCClient) GetTrace(ctx context.Context, query spanstore.GetTraceParameters) (*model.Trace, error) { + stream, err := c.readerClient.GetTrace(ctx, &storage_v1.GetTraceRequest{ + TraceID: query.TraceID, + StartTime: query.StartTime, + EndTime: query.EndTime, }) if status.Code(err) == codes.NotFound { return nil, spanstore.ErrTraceNotFound @@ -141,8 +98,8 @@ func (c *grpcClient) GetTrace(ctx context.Context, traceID model.TraceID) (*mode } // GetServices returns a list of all known services -func (c *grpcClient) GetServices(ctx context.Context) ([]string, error) { - resp, err := c.readerClient.GetServices(upgradeContext(ctx), &storage_v1.GetServicesRequest{}) +func (c *GRPCClient) GetServices(ctx context.Context) ([]string, error) { + resp, err := c.readerClient.GetServices(ctx, &storage_v1.GetServicesRequest{}) if err != nil { return nil, fmt.Errorf("plugin error: %w", err) } @@ -151,11 +108,11 @@ func (c *grpcClient) GetServices(ctx context.Context) ([]string, error) { } // GetOperations returns the operations of a given service -func (c *grpcClient) GetOperations( +func (c *GRPCClient) GetOperations( ctx context.Context, query spanstore.OperationQueryParameters, ) ([]spanstore.Operation, error) { - resp, err := c.readerClient.GetOperations(upgradeContext(ctx), &storage_v1.GetOperationsRequest{ + resp, err := c.readerClient.GetOperations(ctx, &storage_v1.GetOperationsRequest{ Service: query.ServiceName, SpanKind: query.SpanKind, }) @@ -182,8 +139,8 @@ func (c *grpcClient) GetOperations( } // FindTraces retrieves traces that match the traceQuery -func (c *grpcClient) FindTraces(ctx context.Context, query *spanstore.TraceQueryParameters) ([]*model.Trace, error) { - stream, err := c.readerClient.FindTraces(upgradeContext(ctx), &storage_v1.FindTracesRequest{ +func (c *GRPCClient) FindTraces(ctx context.Context, query *spanstore.TraceQueryParameters) ([]*model.Trace, error) { + stream, err := c.readerClient.FindTraces(ctx, &storage_v1.FindTracesRequest{ Query: &storage_v1.TraceQueryParameters{ ServiceName: query.ServiceName, OperationName: query.OperationName, @@ -192,7 +149,8 @@ func (c *grpcClient) FindTraces(ctx context.Context, query *spanstore.TraceQuery StartTimeMax: query.StartTimeMax, DurationMin: query.DurationMin, DurationMax: query.DurationMax, - NumTraces: int32(query.NumTraces), + //nolint: gosec // G115 + NumTraces: int32(query.NumTraces), }, }) if err != nil { @@ -208,7 +166,7 @@ func (c *grpcClient) FindTraces(ctx context.Context, query *spanstore.TraceQuery } for i, span := range received.Spans { - if span.TraceID != traceID { + if trace == nil || span.TraceID != traceID { trace = &model.Trace{} traceID = span.TraceID traces = append(traces, trace) @@ -220,8 +178,8 @@ func (c *grpcClient) FindTraces(ctx context.Context, query *spanstore.TraceQuery } // FindTraceIDs retrieves traceIDs that match the traceQuery -func (c *grpcClient) FindTraceIDs(ctx context.Context, query *spanstore.TraceQueryParameters) ([]model.TraceID, error) { - resp, err := c.readerClient.FindTraceIDs(upgradeContext(ctx), &storage_v1.FindTraceIDsRequest{ +func (c *GRPCClient) FindTraceIDs(ctx context.Context, query *spanstore.TraceQueryParameters) ([]model.TraceID, error) { + resp, err := c.readerClient.FindTraceIDs(ctx, &storage_v1.FindTraceIDsRequest{ Query: &storage_v1.TraceQueryParameters{ ServiceName: query.ServiceName, OperationName: query.OperationName, @@ -230,7 +188,8 @@ func (c *grpcClient) FindTraceIDs(ctx context.Context, query *spanstore.TraceQue StartTimeMax: query.StartTimeMax, DurationMin: query.DurationMin, DurationMax: query.DurationMax, - NumTraces: int32(query.NumTraces), + //nolint: gosec // G115 + NumTraces: int32(query.NumTraces), }, }) if err != nil { @@ -241,7 +200,7 @@ func (c *grpcClient) FindTraceIDs(ctx context.Context, query *spanstore.TraceQue } // WriteSpan saves the span -func (c *grpcClient) WriteSpan(ctx context.Context, span *model.Span) error { +func (c *GRPCClient) WriteSpan(ctx context.Context, span *model.Span) error { _, err := c.writerClient.WriteSpan(ctx, &storage_v1.WriteSpanRequest{ Span: span, }) @@ -252,7 +211,7 @@ func (c *grpcClient) WriteSpan(ctx context.Context, span *model.Span) error { return nil } -func (c *grpcClient) Close() error { +func (c *GRPCClient) Close() error { _, err := c.writerClient.Close(context.Background(), &storage_v1.CloseWriterRequest{}) if err != nil && status.Code(err) != codes.Unimplemented { return fmt.Errorf("plugin error: %w", err) @@ -262,7 +221,7 @@ func (c *grpcClient) Close() error { } // GetDependencies returns all interservice dependencies -func (c *grpcClient) GetDependencies(ctx context.Context, endTs time.Time, lookback time.Duration) ([]model.DependencyLink, error) { +func (c *GRPCClient) GetDependencies(ctx context.Context, endTs time.Time, lookback time.Duration) ([]model.DependencyLink, error) { resp, err := c.depsReaderClient.GetDependencies(ctx, &storage_v1.GetDependenciesRequest{ EndTime: endTs, StartTime: endTs.Add(-lookback), @@ -274,7 +233,7 @@ func (c *grpcClient) GetDependencies(ctx context.Context, endTs time.Time, lookb return resp.Dependencies, nil } -func (c *grpcClient) Capabilities() (*Capabilities, error) { +func (c *GRPCClient) Capabilities() (*Capabilities, error) { capabilities, err := c.capabilitiesClient.Capabilities(context.Background(), &storage_v1.CapabilitiesRequest{}) if status.Code(err) == codes.Unimplemented { return &Capabilities{}, nil diff --git a/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/grpc_handler.go b/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/grpc_handler.go index 98f27a53540..28dc0adf8cb 100644 --- a/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/grpc_handler.go +++ b/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/grpc_handler.go @@ -1,16 +1,5 @@ // Copyright (c) 2019 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package shared @@ -22,9 +11,12 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/health" + "google.golang.org/grpc/health/grpc_health_v1" "google.golang.org/grpc/status" "github.com/jaegertracing/jaeger/model" + _ "github.com/jaegertracing/jaeger/pkg/gogocodec" // force gogo codec registration "github.com/jaegertracing/jaeger/proto-gen/storage_v1" "github.com/jaegertracing/jaeger/storage/dependencystore" "github.com/jaegertracing/jaeger/storage/spanstore" @@ -83,7 +75,7 @@ func NewGRPCHandlerWithPlugins( } // Register registers the server as gRPC methods handler. -func (s *GRPCHandler) Register(ss *grpc.Server) error { +func (s *GRPCHandler) Register(ss *grpc.Server, hs *health.Server) error { storage_v1.RegisterSpanReaderPluginServer(ss, s) storage_v1.RegisterSpanWriterPluginServer(ss, s) storage_v1.RegisterArchiveSpanReaderPluginServer(ss, s) @@ -91,6 +83,16 @@ func (s *GRPCHandler) Register(ss *grpc.Server) error { storage_v1.RegisterPluginCapabilitiesServer(ss, s) storage_v1.RegisterDependenciesReaderPluginServer(ss, s) storage_v1.RegisterStreamingSpanWriterPluginServer(ss, s) + + hs.SetServingStatus("jaeger.storage.v1.SpanReaderPlugin", grpc_health_v1.HealthCheckResponse_SERVING) + hs.SetServingStatus("jaeger.storage.v1.SpanWriterPlugin", grpc_health_v1.HealthCheckResponse_SERVING) + hs.SetServingStatus("jaeger.storage.v1.ArchiveSpanReaderPlugin", grpc_health_v1.HealthCheckResponse_SERVING) + hs.SetServingStatus("jaeger.storage.v1.ArchiveSpanWriterPlugin", grpc_health_v1.HealthCheckResponse_SERVING) + hs.SetServingStatus("jaeger.storage.v1.PluginCapabilities", grpc_health_v1.HealthCheckResponse_SERVING) + hs.SetServingStatus("jaeger.storage.v1.DependenciesReaderPlugin", grpc_health_v1.HealthCheckResponse_SERVING) + hs.SetServingStatus("jaeger.storage.v1.StreamingSpanWriterPlugin", grpc_health_v1.HealthCheckResponse_SERVING) + grpc_health_v1.RegisterHealthServer(ss, hs) + return nil } @@ -136,23 +138,26 @@ func (s *GRPCHandler) WriteSpan(ctx context.Context, r *storage_v1.WriteSpanRequ return &storage_v1.WriteSpanResponse{}, nil } -func (s *GRPCHandler) Close(ctx context.Context, r *storage_v1.CloseWriterRequest) (*storage_v1.CloseWriterResponse, error) { +func (s *GRPCHandler) Close(context.Context, *storage_v1.CloseWriterRequest) (*storage_v1.CloseWriterResponse, error) { if closer, ok := s.impl.SpanWriter().(io.Closer); ok { if err := closer.Close(); err != nil { return nil, err } return &storage_v1.CloseWriterResponse{}, nil - } else { - return nil, status.Error(codes.Unimplemented, "span writer does not support graceful shutdown") } + return nil, status.Error(codes.Unimplemented, "span writer does not support graceful shutdown") } // GetTrace takes a traceID and streams a Trace associated with that traceID func (s *GRPCHandler) GetTrace(r *storage_v1.GetTraceRequest, stream storage_v1.SpanReaderPlugin_GetTraceServer) error { - trace, err := s.impl.SpanReader().GetTrace(stream.Context(), r.TraceID) + trace, err := s.impl.SpanReader().GetTrace(stream.Context(), spanstore.GetTraceParameters{ + TraceID: r.TraceID, + StartTime: r.StartTime, + EndTime: r.EndTime, + }) if errors.Is(err, spanstore.ErrTraceNotFound) { - return status.Errorf(codes.NotFound, spanstore.ErrTraceNotFound.Error()) + return status.Error(codes.NotFound, spanstore.ErrTraceNotFound.Error()) } if err != nil { return err @@ -167,7 +172,7 @@ func (s *GRPCHandler) GetTrace(r *storage_v1.GetTraceRequest, stream storage_v1. } // GetServices returns a list of all known services -func (s *GRPCHandler) GetServices(ctx context.Context, r *storage_v1.GetServicesRequest) (*storage_v1.GetServicesResponse, error) { +func (s *GRPCHandler) GetServices(ctx context.Context, _ *storage_v1.GetServicesRequest) (*storage_v1.GetServicesResponse, error) { services, err := s.impl.SpanReader().GetServices(ctx) if err != nil { return nil, err @@ -247,7 +252,7 @@ func (s *GRPCHandler) FindTraceIDs(ctx context.Context, r *storage_v1.FindTraceI }, nil } -func (s *GRPCHandler) sendSpans(spans []*model.Span, sendFn func(*storage_v1.SpansResponseChunk) error) error { +func (*GRPCHandler) sendSpans(spans []*model.Span, sendFn func(*storage_v1.SpansResponseChunk) error) error { chunk := make([]model.Span, 0, len(spans)) for i := 0; i < len(spans); i += spanBatchSize { chunk = chunk[:0] @@ -262,7 +267,7 @@ func (s *GRPCHandler) sendSpans(spans []*model.Span, sendFn func(*storage_v1.Spa return nil } -func (s *GRPCHandler) Capabilities(ctx context.Context, request *storage_v1.CapabilitiesRequest) (*storage_v1.CapabilitiesResponse, error) { +func (s *GRPCHandler) Capabilities(context.Context, *storage_v1.CapabilitiesRequest) (*storage_v1.CapabilitiesResponse, error) { return &storage_v1.CapabilitiesResponse{ ArchiveSpanReader: s.impl.ArchiveSpanReader() != nil, ArchiveSpanWriter: s.impl.ArchiveSpanWriter() != nil, @@ -275,9 +280,13 @@ func (s *GRPCHandler) GetArchiveTrace(r *storage_v1.GetTraceRequest, stream stor if reader == nil { return status.Error(codes.Unimplemented, "not implemented") } - trace, err := reader.GetTrace(stream.Context(), r.TraceID) + trace, err := reader.GetTrace(stream.Context(), spanstore.GetTraceParameters{ + TraceID: r.TraceID, + StartTime: r.StartTime, + EndTime: r.EndTime, + }) if errors.Is(err, spanstore.ErrTraceNotFound) { - return status.Errorf(codes.NotFound, spanstore.ErrTraceNotFound.Error()) + return status.Error(codes.NotFound, spanstore.ErrTraceNotFound.Error()) } if err != nil { return err diff --git a/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/interface.go b/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/interface.go index c6345a29b82..9abc2f75dce 100644 --- a/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/interface.go +++ b/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/interface.go @@ -1,40 +1,13 @@ // Copyright (c) 2019 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package shared import ( - "github.com/hashicorp/go-plugin" - "github.com/jaegertracing/jaeger/storage/dependencystore" "github.com/jaegertracing/jaeger/storage/spanstore" ) -// StoragePluginIdentifier is the identifier that is shared by plugin and host. -const StoragePluginIdentifier = "storage_plugin" - -// Handshake is a common handshake that is shared by plugin and host. -var Handshake = plugin.HandshakeConfig{ - MagicCookieKey: "STORAGE_PLUGIN", - MagicCookieValue: "jaeger", -} - -// PluginMap is the map of plugins we can dispense. -var PluginMap = map[string]plugin.Plugin{ - StoragePluginIdentifier: &StorageGRPCPlugin{}, -} - // StoragePlugin is the interface we're exposing as a plugin. type StoragePlugin interface { SpanReader() spanstore.Reader diff --git a/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/plugin.go b/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/plugin.go deleted file mode 100644 index b4a3fabf93e..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/plugin.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright (c) 2020 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package shared - -import ( - "context" - - "github.com/hashicorp/go-plugin" - "google.golang.org/grpc" - - _ "github.com/jaegertracing/jaeger/pkg/gogocodec" // force gogo codec registration -) - -// Ensure plugin.GRPCPlugin API match. -var _ plugin.GRPCPlugin = (*StorageGRPCPlugin)(nil) - -// StorageGRPCPlugin is the implementation of plugin.GRPCPlugin. -type StorageGRPCPlugin struct { - plugin.Plugin - - // Concrete implementation, This is only used for plugins that are written in Go. - Impl StoragePlugin - ArchiveImpl ArchiveStoragePlugin - StreamImpl StreamingSpanWriterPlugin -} - -// RegisterHandlers registers the plugin with the server -func (p *StorageGRPCPlugin) RegisterHandlers(s *grpc.Server) error { - handler := NewGRPCHandlerWithPlugins(p.Impl, p.ArchiveImpl, p.StreamImpl) - return handler.Register(s) -} - -// GRPCServer implements plugin.GRPCPlugin. It is used by go-plugin to create a grpc plugin server. -func (p *StorageGRPCPlugin) GRPCServer(_ *plugin.GRPCBroker, s *grpc.Server) error { - return p.RegisterHandlers(s) -} - -// GRPCClient implements plugin.GRPCPlugin. It is used by go-plugin to create a grpc plugin client. -func (*StorageGRPCPlugin) GRPCClient(_ context.Context, _ *plugin.GRPCBroker, conn *grpc.ClientConn) (interface{}, error) { - return NewGRPCClient(conn), nil -} diff --git a/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/streaming_writer.go b/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/streaming_writer.go index fad5d6c0db1..b186404afdd 100644 --- a/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/streaming_writer.go +++ b/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/streaming_writer.go @@ -1,16 +1,5 @@ // Copyright (c) 2022 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package shared @@ -78,7 +67,7 @@ func (s *streamingSpanWriter) getStream(ctx context.Context) (storage_v1.Streami if ok { return st, nil } - return nil, fmt.Errorf("plugin is closed") + return nil, errors.New("plugin is closed") default: return s.client.WriteSpanStream(ctx) } diff --git a/vendor/github.com/jaegertracing/jaeger/ports/ports.go b/vendor/github.com/jaegertracing/jaeger/ports/ports.go index caf035fc761..c1244dfb52b 100644 --- a/vendor/github.com/jaegertracing/jaeger/ports/ports.go +++ b/vendor/github.com/jaegertracing/jaeger/ports/ports.go @@ -1,16 +1,5 @@ // Copyright (c) 2019 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package ports @@ -20,17 +9,6 @@ import ( ) const ( - // AgentJaegerThriftCompactUDP is the default port for receiving Jaeger Thrift over UDP in compact encoding - AgentJaegerThriftCompactUDP = 6831 - // AgentJaegerThriftBinaryUDP is the default port for receiving Jaeger Thrift over UDP in binary encoding - AgentJaegerThriftBinaryUDP = 6832 - // AgentZipkinThriftCompactUDP is the default port for receiving Zipkin Thrift over UDP in binary encoding - AgentZipkinThriftCompactUDP = 5775 - // AgentConfigServerHTTP is the default port for the agent's HTTP config server (e.g. /sampling endpoint) - AgentConfigServerHTTP = 5778 - // AgentAdminHTTP is the default admin HTTP port (health check, metrics, etc.) - AgentAdminHTTP = 14271 - // CollectorGRPC is the default port for gRPC server for sending spans CollectorGRPC = 14250 // CollectorHTTP is the default port for HTTP server for sending spans (e.g. /api/traces endpoint) @@ -40,6 +18,13 @@ const ( // CollectorZipkin is the port for Zipkin server for sending spans CollectorZipkin = 9411 + // CollectorV2GRPC is the HTTP port for remote sampling extension + CollectorV2SamplingHTTP = 5778 + // CollectorV2GRPC is the gRPC port for remote sampling extension + CollectorV2SamplingGRPC = 5779 + // CollectorV2HealthChecks is the port for health checks extension + CollectorV2HealthChecks = 13133 + // QueryGRPC is the default port of GRPC requests for Query trace retrieval QueryGRPC = 16685 // QueryHTTP is the default port for UI and Query API (e.g. /api/* endpoints) @@ -61,15 +46,6 @@ func PortToHostPort(port int) string { return ":" + strconv.Itoa(port) } -// GetAddressFromCLIOptions gets listening address based on port (deprecated flags) or host:port (new flags) -func GetAddressFromCLIOptions(port int, hostPort string) string { - if port != 0 { - return PortToHostPort(port) - } - - return FormatHostPort(hostPort) -} - // FormatHostPort returns hostPort in a usable format (host:port) if it wasn't already func FormatHostPort(hostPort string) string { if hostPort == "" { diff --git a/vendor/github.com/jaegertracing/jaeger/proto-gen/api_v2/metrics/metricsquery.pb.go b/vendor/github.com/jaegertracing/jaeger/proto-gen/api_v2/metrics/metricsquery.pb.go deleted file mode 100644 index 6725a182733..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/proto-gen/api_v2/metrics/metricsquery.pb.go +++ /dev/null @@ -1,2067 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: metricsquery.proto - -package metrics - -import ( - context "context" - encoding_binary "encoding/binary" - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - _ "github.com/gogo/protobuf/types" - github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" - time "time" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf -var _ = time.Kitchen - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// MetricsQueryBaseRequest is the base request parameter accompanying a MetricsQueryService RPC call. -type MetricsQueryBaseRequest struct { - // service_names are the service names to fetch metrics from. - // The results will be grouped by service_name. - // Required. At least one service name must be provided. - ServiceNames []string `protobuf:"bytes,1,rep,name=service_names,json=serviceNames,proto3" json:"service_names,omitempty"` - // groupByOperation determines if the metrics returned should be grouped by operation. - // Optional. Default = false. - GroupByOperation bool `protobuf:"varint,2,opt,name=groupByOperation,proto3" json:"groupByOperation,omitempty"` - // end_time is the ending time of the time series query range. - // Optional. Default = now. - EndTime *time.Time `protobuf:"bytes,3,opt,name=end_time,json=endTime,proto3,stdtime" json:"end_time,omitempty"` - // lookback is the duration from the end_time to look back on for metrics data points. - // For example, if set to 1h, the query would span from end_time-1h to end_time. - // Optional. Default = 1h. - Lookback *time.Duration `protobuf:"bytes,4,opt,name=lookback,proto3,stdduration" json:"lookback,omitempty"` - // step size is the duration between data points of the query results. - // For example, if set to 5s, the results would produce a data point every 5 seconds - // from the start_time to end_time. - // Optional. Default = 5s. - Step *time.Duration `protobuf:"bytes,5,opt,name=step,proto3,stdduration" json:"step,omitempty"` - // ratePer is the duration in which the per-second rate of change is calculated for a cumulative counter metric. - // Optional. Default = 10m. - RatePer *time.Duration `protobuf:"bytes,6,opt,name=ratePer,proto3,stdduration" json:"ratePer,omitempty"` - // spanKinds is the list of span kinds to include (logical OR) in the resulting metrics aggregation. - // Optional. Default = [SPAN_KIND_SERVER]. - SpanKinds []SpanKind `protobuf:"varint,7,rep,packed,name=spanKinds,proto3,enum=jaeger.api_v2.metrics.SpanKind" json:"spanKinds,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MetricsQueryBaseRequest) Reset() { *m = MetricsQueryBaseRequest{} } -func (m *MetricsQueryBaseRequest) String() string { return proto.CompactTextString(m) } -func (*MetricsQueryBaseRequest) ProtoMessage() {} -func (*MetricsQueryBaseRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_212ad953da609219, []int{0} -} -func (m *MetricsQueryBaseRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MetricsQueryBaseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MetricsQueryBaseRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MetricsQueryBaseRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricsQueryBaseRequest.Merge(m, src) -} -func (m *MetricsQueryBaseRequest) XXX_Size() int { - return m.Size() -} -func (m *MetricsQueryBaseRequest) XXX_DiscardUnknown() { - xxx_messageInfo_MetricsQueryBaseRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_MetricsQueryBaseRequest proto.InternalMessageInfo - -func (m *MetricsQueryBaseRequest) GetServiceNames() []string { - if m != nil { - return m.ServiceNames - } - return nil -} - -func (m *MetricsQueryBaseRequest) GetGroupByOperation() bool { - if m != nil { - return m.GroupByOperation - } - return false -} - -func (m *MetricsQueryBaseRequest) GetEndTime() *time.Time { - if m != nil { - return m.EndTime - } - return nil -} - -func (m *MetricsQueryBaseRequest) GetLookback() *time.Duration { - if m != nil { - return m.Lookback - } - return nil -} - -func (m *MetricsQueryBaseRequest) GetStep() *time.Duration { - if m != nil { - return m.Step - } - return nil -} - -func (m *MetricsQueryBaseRequest) GetRatePer() *time.Duration { - if m != nil { - return m.RatePer - } - return nil -} - -func (m *MetricsQueryBaseRequest) GetSpanKinds() []SpanKind { - if m != nil { - return m.SpanKinds - } - return nil -} - -// GetLatenciesRequest contains parameters for the GetLatencies RPC call. -type GetLatenciesRequest struct { - BaseRequest *MetricsQueryBaseRequest `protobuf:"bytes,1,opt,name=baseRequest,proto3" json:"baseRequest,omitempty"` - // quantile is the quantile to compute from latency histogram metrics. - // Valid range: (0, 1] - // - // e.g. 0.99 will return the 99th percentile or P99 which is the worst latency - // observed from 99% of all spans for the given service (and operation). - // - // Required. - Quantile float64 `protobuf:"fixed64,2,opt,name=quantile,proto3" json:"quantile,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetLatenciesRequest) Reset() { *m = GetLatenciesRequest{} } -func (m *GetLatenciesRequest) String() string { return proto.CompactTextString(m) } -func (*GetLatenciesRequest) ProtoMessage() {} -func (*GetLatenciesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_212ad953da609219, []int{1} -} -func (m *GetLatenciesRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetLatenciesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetLatenciesRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GetLatenciesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetLatenciesRequest.Merge(m, src) -} -func (m *GetLatenciesRequest) XXX_Size() int { - return m.Size() -} -func (m *GetLatenciesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetLatenciesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetLatenciesRequest proto.InternalMessageInfo - -func (m *GetLatenciesRequest) GetBaseRequest() *MetricsQueryBaseRequest { - if m != nil { - return m.BaseRequest - } - return nil -} - -func (m *GetLatenciesRequest) GetQuantile() float64 { - if m != nil { - return m.Quantile - } - return 0 -} - -// GetCallRatesRequest contains parameters for the GetCallRates RPC call. -type GetCallRatesRequest struct { - BaseRequest *MetricsQueryBaseRequest `protobuf:"bytes,1,opt,name=baseRequest,proto3" json:"baseRequest,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetCallRatesRequest) Reset() { *m = GetCallRatesRequest{} } -func (m *GetCallRatesRequest) String() string { return proto.CompactTextString(m) } -func (*GetCallRatesRequest) ProtoMessage() {} -func (*GetCallRatesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_212ad953da609219, []int{2} -} -func (m *GetCallRatesRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetCallRatesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetCallRatesRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GetCallRatesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetCallRatesRequest.Merge(m, src) -} -func (m *GetCallRatesRequest) XXX_Size() int { - return m.Size() -} -func (m *GetCallRatesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetCallRatesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetCallRatesRequest proto.InternalMessageInfo - -func (m *GetCallRatesRequest) GetBaseRequest() *MetricsQueryBaseRequest { - if m != nil { - return m.BaseRequest - } - return nil -} - -// GetErrorRatesRequest contains parameters for the GetErrorRates RPC call. -type GetErrorRatesRequest struct { - BaseRequest *MetricsQueryBaseRequest `protobuf:"bytes,1,opt,name=baseRequest,proto3" json:"baseRequest,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetErrorRatesRequest) Reset() { *m = GetErrorRatesRequest{} } -func (m *GetErrorRatesRequest) String() string { return proto.CompactTextString(m) } -func (*GetErrorRatesRequest) ProtoMessage() {} -func (*GetErrorRatesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_212ad953da609219, []int{3} -} -func (m *GetErrorRatesRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetErrorRatesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetErrorRatesRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GetErrorRatesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetErrorRatesRequest.Merge(m, src) -} -func (m *GetErrorRatesRequest) XXX_Size() int { - return m.Size() -} -func (m *GetErrorRatesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetErrorRatesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetErrorRatesRequest proto.InternalMessageInfo - -func (m *GetErrorRatesRequest) GetBaseRequest() *MetricsQueryBaseRequest { - if m != nil { - return m.BaseRequest - } - return nil -} - -type GetMinStepDurationRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetMinStepDurationRequest) Reset() { *m = GetMinStepDurationRequest{} } -func (m *GetMinStepDurationRequest) String() string { return proto.CompactTextString(m) } -func (*GetMinStepDurationRequest) ProtoMessage() {} -func (*GetMinStepDurationRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_212ad953da609219, []int{4} -} -func (m *GetMinStepDurationRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetMinStepDurationRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetMinStepDurationRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GetMinStepDurationRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetMinStepDurationRequest.Merge(m, src) -} -func (m *GetMinStepDurationRequest) XXX_Size() int { - return m.Size() -} -func (m *GetMinStepDurationRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetMinStepDurationRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetMinStepDurationRequest proto.InternalMessageInfo - -type GetMinStepDurationResponse struct { - MinStep time.Duration `protobuf:"bytes,1,opt,name=minStep,proto3,stdduration" json:"minStep"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetMinStepDurationResponse) Reset() { *m = GetMinStepDurationResponse{} } -func (m *GetMinStepDurationResponse) String() string { return proto.CompactTextString(m) } -func (*GetMinStepDurationResponse) ProtoMessage() {} -func (*GetMinStepDurationResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_212ad953da609219, []int{5} -} -func (m *GetMinStepDurationResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetMinStepDurationResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetMinStepDurationResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GetMinStepDurationResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetMinStepDurationResponse.Merge(m, src) -} -func (m *GetMinStepDurationResponse) XXX_Size() int { - return m.Size() -} -func (m *GetMinStepDurationResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetMinStepDurationResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetMinStepDurationResponse proto.InternalMessageInfo - -func (m *GetMinStepDurationResponse) GetMinStep() time.Duration { - if m != nil { - return m.MinStep - } - return 0 -} - -type GetMetricsResponse struct { - Metrics MetricFamily `protobuf:"bytes,1,opt,name=metrics,proto3" json:"metrics"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetMetricsResponse) Reset() { *m = GetMetricsResponse{} } -func (m *GetMetricsResponse) String() string { return proto.CompactTextString(m) } -func (*GetMetricsResponse) ProtoMessage() {} -func (*GetMetricsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_212ad953da609219, []int{6} -} -func (m *GetMetricsResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetMetricsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetMetricsResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GetMetricsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetMetricsResponse.Merge(m, src) -} -func (m *GetMetricsResponse) XXX_Size() int { - return m.Size() -} -func (m *GetMetricsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetMetricsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetMetricsResponse proto.InternalMessageInfo - -func (m *GetMetricsResponse) GetMetrics() MetricFamily { - if m != nil { - return m.Metrics - } - return MetricFamily{} -} - -func init() { - proto.RegisterType((*MetricsQueryBaseRequest)(nil), "jaeger.api_v2.metrics.MetricsQueryBaseRequest") - proto.RegisterType((*GetLatenciesRequest)(nil), "jaeger.api_v2.metrics.GetLatenciesRequest") - proto.RegisterType((*GetCallRatesRequest)(nil), "jaeger.api_v2.metrics.GetCallRatesRequest") - proto.RegisterType((*GetErrorRatesRequest)(nil), "jaeger.api_v2.metrics.GetErrorRatesRequest") - proto.RegisterType((*GetMinStepDurationRequest)(nil), "jaeger.api_v2.metrics.GetMinStepDurationRequest") - proto.RegisterType((*GetMinStepDurationResponse)(nil), "jaeger.api_v2.metrics.GetMinStepDurationResponse") - proto.RegisterType((*GetMetricsResponse)(nil), "jaeger.api_v2.metrics.GetMetricsResponse") -} - -func init() { proto.RegisterFile("metricsquery.proto", fileDescriptor_212ad953da609219) } - -var fileDescriptor_212ad953da609219 = []byte{ - // 593 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x53, 0x4f, 0x6f, 0xd3, 0x3c, - 0x18, 0x7f, 0xf3, 0x76, 0xac, 0x9d, 0xb7, 0x21, 0xf0, 0x86, 0xc8, 0x82, 0xd4, 0x56, 0xdd, 0xa5, - 0x0c, 0x29, 0x83, 0x22, 0xc1, 0x69, 0x42, 0xea, 0x80, 0x1e, 0x60, 0x30, 0x52, 0x2e, 0xc0, 0xa1, - 0x72, 0xdb, 0x87, 0x60, 0x9a, 0xd8, 0xa9, 0xed, 0x4c, 0xea, 0x99, 0x2f, 0xc0, 0x91, 0x3b, 0x1f, - 0x86, 0x1d, 0xf9, 0x04, 0x80, 0xfa, 0x49, 0x50, 0x62, 0xa7, 0xeb, 0xda, 0xa5, 0xac, 0x87, 0xdd, - 0xe2, 0xc7, 0xcf, 0xef, 0xcf, 0xe3, 0xe7, 0x17, 0x84, 0x43, 0x50, 0x82, 0xf6, 0xe4, 0x30, 0x06, - 0x31, 0x72, 0x23, 0xc1, 0x15, 0xc7, 0xb7, 0x3e, 0x13, 0xf0, 0x41, 0xb8, 0x24, 0xa2, 0x9d, 0x93, - 0x86, 0x6b, 0x3a, 0x9c, 0x9b, 0x3c, 0x02, 0x66, 0x0e, 0xba, 0xd3, 0xc1, 0x5c, 0x41, 0x20, 0x23, - 0xc2, 0x06, 0x94, 0xf5, 0x4d, 0x6d, 0xdb, 0xe7, 0x3e, 0x4f, 0x3f, 0xf7, 0x93, 0x2f, 0x53, 0xad, - 0xf8, 0x9c, 0xfb, 0x01, 0xec, 0xa7, 0xa7, 0x6e, 0xfc, 0x71, 0x5f, 0xd1, 0x10, 0xa4, 0x22, 0x61, - 0x64, 0x1a, 0xca, 0xb3, 0x0d, 0xfd, 0x58, 0x10, 0x45, 0x39, 0xd3, 0xf7, 0xb5, 0xef, 0x05, 0x74, - 0xfb, 0x48, 0x8b, 0xbf, 0x49, 0xbc, 0x36, 0x89, 0x04, 0x0f, 0x86, 0x31, 0x48, 0x85, 0x77, 0xd1, - 0xa6, 0x04, 0x71, 0x42, 0x7b, 0xd0, 0x61, 0x24, 0x04, 0x69, 0x5b, 0xd5, 0x42, 0x7d, 0xcd, 0xdb, - 0x30, 0xc5, 0x57, 0x49, 0x0d, 0xef, 0xa1, 0x1b, 0xbe, 0xe0, 0x71, 0xd4, 0x1c, 0xbd, 0x8e, 0x40, - 0x53, 0xdb, 0xff, 0x57, 0xad, 0x7a, 0xc9, 0x9b, 0xab, 0xe3, 0x27, 0xa8, 0x04, 0xac, 0xdf, 0x49, - 0x3c, 0xda, 0x85, 0xaa, 0x55, 0x5f, 0x6f, 0x38, 0xae, 0xf6, 0xe7, 0x66, 0xfe, 0xdc, 0xb7, 0xd9, - 0x00, 0xcd, 0xd2, 0xe9, 0xaf, 0x8a, 0xf5, 0xf5, 0x77, 0xc5, 0xf2, 0x8a, 0xc0, 0xfa, 0x49, 0x3d, - 0x21, 0x08, 0x38, 0x1f, 0x74, 0x49, 0x6f, 0x60, 0xaf, 0xa4, 0x04, 0x3b, 0x73, 0x04, 0x4f, 0xcd, - 0x80, 0x1a, 0xff, 0x2d, 0xc1, 0x4f, 0x40, 0xf8, 0x31, 0x5a, 0x91, 0x0a, 0x22, 0xfb, 0xda, 0xe5, - 0xc1, 0x29, 0x00, 0x1f, 0xa0, 0xa2, 0x20, 0x0a, 0x8e, 0x41, 0xd8, 0xab, 0x97, 0xc7, 0x66, 0x18, - 0x7c, 0x80, 0xd6, 0x92, 0x7d, 0xbe, 0xa0, 0xac, 0x2f, 0xed, 0x62, 0xb5, 0x50, 0xbf, 0xde, 0xa8, - 0xb8, 0x17, 0xe6, 0xc1, 0x6d, 0x9b, 0x3e, 0xef, 0x0c, 0x51, 0xfb, 0x62, 0xa1, 0xad, 0x16, 0xa8, - 0x97, 0x44, 0x01, 0xeb, 0x51, 0x90, 0xd9, 0x86, 0x8e, 0xd1, 0x7a, 0xf7, 0x6c, 0x61, 0xb6, 0x95, - 0x3a, 0x73, 0x73, 0x88, 0x73, 0xd6, 0xec, 0x4d, 0x53, 0x60, 0x07, 0x95, 0x86, 0x31, 0x61, 0x8a, - 0x06, 0x90, 0xae, 0xd1, 0xf2, 0x26, 0xe7, 0x9a, 0x9f, 0x9a, 0x38, 0x24, 0x41, 0xe0, 0x11, 0x75, - 0x85, 0x26, 0x6a, 0x9f, 0xd0, 0x76, 0x0b, 0xd4, 0x33, 0x21, 0xb8, 0xb8, 0x62, 0xa5, 0x3b, 0x68, - 0xa7, 0x05, 0xea, 0x88, 0xb2, 0xb6, 0x82, 0x28, 0x5b, 0x60, 0x76, 0xf9, 0x01, 0x39, 0x17, 0x5d, - 0xca, 0x88, 0x33, 0x09, 0x49, 0x22, 0x42, 0x7d, 0x65, 0x8c, 0xfc, 0x23, 0x11, 0xff, 0xe9, 0x44, - 0x18, 0x4c, 0xed, 0x1d, 0xc2, 0x09, 0xb9, 0x36, 0x39, 0x21, 0x3d, 0x44, 0x45, 0xe3, 0xdf, 0x90, - 0xee, 0x2e, 0x9c, 0xee, 0x39, 0x09, 0x69, 0x30, 0x6a, 0xae, 0x24, 0xf4, 0x5e, 0x86, 0x6c, 0xfc, - 0x28, 0xa0, 0xad, 0xe9, 0xe9, 0xdb, 0xfa, 0x7f, 0xc5, 0x23, 0x2d, 0x79, 0x7e, 0x1e, 0x7c, 0x3f, - 0x47, 0x21, 0xf7, 0x5d, 0x9c, 0x07, 0x4b, 0x20, 0xcc, 0x5c, 0x80, 0x36, 0xa6, 0xf3, 0x8b, 0xf7, - 0xf2, 0x29, 0x66, 0x43, 0xee, 0xdc, 0x5d, 0x20, 0x37, 0xf3, 0x7c, 0x5a, 0x66, 0x92, 0xd0, 0x45, - 0x32, 0xb3, 0x31, 0x5e, 0x46, 0xc6, 0x47, 0x9b, 0xe7, 0xf2, 0x89, 0xef, 0xe5, 0x63, 0xe7, 0x52, - 0xbc, 0x84, 0x50, 0xf3, 0xd1, 0xe9, 0xb8, 0x6c, 0xfd, 0x1c, 0x97, 0xad, 0x3f, 0xe3, 0xb2, 0x85, - 0x2a, 0x94, 0x1b, 0xa8, 0x12, 0xa4, 0x47, 0x99, 0x3f, 0xc3, 0xf0, 0x3e, 0x4b, 0x40, 0x77, 0x35, - 0x8d, 0xe0, 0xc3, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x22, 0x5c, 0x1d, 0xae, 0x87, 0x06, 0x00, - 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// MetricsQueryServiceClient is the client API for MetricsQueryService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type MetricsQueryServiceClient interface { - // GetMinStepDuration gets the min time resolution supported by the backing metrics store, - // e.g. 10s means the backend can only return data points that are at least 10s apart, not closer. - GetMinStepDuration(ctx context.Context, in *GetMinStepDurationRequest, opts ...grpc.CallOption) (*GetMinStepDurationResponse, error) - // GetLatencies gets the latency metrics for a given list of services grouped by service - // and optionally grouped by operation. - GetLatencies(ctx context.Context, in *GetLatenciesRequest, opts ...grpc.CallOption) (*GetMetricsResponse, error) - // GetCallRates gets the call rate metrics for a given list of services grouped by service - // and optionally grouped by operation. - GetCallRates(ctx context.Context, in *GetCallRatesRequest, opts ...grpc.CallOption) (*GetMetricsResponse, error) - // GetErrorRates gets the error rate metrics for a given list of services grouped by service - // and optionally grouped by operation. - GetErrorRates(ctx context.Context, in *GetErrorRatesRequest, opts ...grpc.CallOption) (*GetMetricsResponse, error) -} - -type metricsQueryServiceClient struct { - cc *grpc.ClientConn -} - -func NewMetricsQueryServiceClient(cc *grpc.ClientConn) MetricsQueryServiceClient { - return &metricsQueryServiceClient{cc} -} - -func (c *metricsQueryServiceClient) GetMinStepDuration(ctx context.Context, in *GetMinStepDurationRequest, opts ...grpc.CallOption) (*GetMinStepDurationResponse, error) { - out := new(GetMinStepDurationResponse) - err := c.cc.Invoke(ctx, "/jaeger.api_v2.metrics.MetricsQueryService/GetMinStepDuration", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *metricsQueryServiceClient) GetLatencies(ctx context.Context, in *GetLatenciesRequest, opts ...grpc.CallOption) (*GetMetricsResponse, error) { - out := new(GetMetricsResponse) - err := c.cc.Invoke(ctx, "/jaeger.api_v2.metrics.MetricsQueryService/GetLatencies", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *metricsQueryServiceClient) GetCallRates(ctx context.Context, in *GetCallRatesRequest, opts ...grpc.CallOption) (*GetMetricsResponse, error) { - out := new(GetMetricsResponse) - err := c.cc.Invoke(ctx, "/jaeger.api_v2.metrics.MetricsQueryService/GetCallRates", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *metricsQueryServiceClient) GetErrorRates(ctx context.Context, in *GetErrorRatesRequest, opts ...grpc.CallOption) (*GetMetricsResponse, error) { - out := new(GetMetricsResponse) - err := c.cc.Invoke(ctx, "/jaeger.api_v2.metrics.MetricsQueryService/GetErrorRates", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// MetricsQueryServiceServer is the server API for MetricsQueryService service. -type MetricsQueryServiceServer interface { - // GetMinStepDuration gets the min time resolution supported by the backing metrics store, - // e.g. 10s means the backend can only return data points that are at least 10s apart, not closer. - GetMinStepDuration(context.Context, *GetMinStepDurationRequest) (*GetMinStepDurationResponse, error) - // GetLatencies gets the latency metrics for a given list of services grouped by service - // and optionally grouped by operation. - GetLatencies(context.Context, *GetLatenciesRequest) (*GetMetricsResponse, error) - // GetCallRates gets the call rate metrics for a given list of services grouped by service - // and optionally grouped by operation. - GetCallRates(context.Context, *GetCallRatesRequest) (*GetMetricsResponse, error) - // GetErrorRates gets the error rate metrics for a given list of services grouped by service - // and optionally grouped by operation. - GetErrorRates(context.Context, *GetErrorRatesRequest) (*GetMetricsResponse, error) -} - -// UnimplementedMetricsQueryServiceServer can be embedded to have forward compatible implementations. -type UnimplementedMetricsQueryServiceServer struct { -} - -func (*UnimplementedMetricsQueryServiceServer) GetMinStepDuration(ctx context.Context, req *GetMinStepDurationRequest) (*GetMinStepDurationResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetMinStepDuration not implemented") -} -func (*UnimplementedMetricsQueryServiceServer) GetLatencies(ctx context.Context, req *GetLatenciesRequest) (*GetMetricsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetLatencies not implemented") -} -func (*UnimplementedMetricsQueryServiceServer) GetCallRates(ctx context.Context, req *GetCallRatesRequest) (*GetMetricsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetCallRates not implemented") -} -func (*UnimplementedMetricsQueryServiceServer) GetErrorRates(ctx context.Context, req *GetErrorRatesRequest) (*GetMetricsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetErrorRates not implemented") -} - -func RegisterMetricsQueryServiceServer(s *grpc.Server, srv MetricsQueryServiceServer) { - s.RegisterService(&_MetricsQueryService_serviceDesc, srv) -} - -func _MetricsQueryService_GetMinStepDuration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetMinStepDurationRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MetricsQueryServiceServer).GetMinStepDuration(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/jaeger.api_v2.metrics.MetricsQueryService/GetMinStepDuration", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MetricsQueryServiceServer).GetMinStepDuration(ctx, req.(*GetMinStepDurationRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _MetricsQueryService_GetLatencies_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetLatenciesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MetricsQueryServiceServer).GetLatencies(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/jaeger.api_v2.metrics.MetricsQueryService/GetLatencies", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MetricsQueryServiceServer).GetLatencies(ctx, req.(*GetLatenciesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _MetricsQueryService_GetCallRates_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetCallRatesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MetricsQueryServiceServer).GetCallRates(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/jaeger.api_v2.metrics.MetricsQueryService/GetCallRates", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MetricsQueryServiceServer).GetCallRates(ctx, req.(*GetCallRatesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _MetricsQueryService_GetErrorRates_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetErrorRatesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MetricsQueryServiceServer).GetErrorRates(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/jaeger.api_v2.metrics.MetricsQueryService/GetErrorRates", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MetricsQueryServiceServer).GetErrorRates(ctx, req.(*GetErrorRatesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _MetricsQueryService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "jaeger.api_v2.metrics.MetricsQueryService", - HandlerType: (*MetricsQueryServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "GetMinStepDuration", - Handler: _MetricsQueryService_GetMinStepDuration_Handler, - }, - { - MethodName: "GetLatencies", - Handler: _MetricsQueryService_GetLatencies_Handler, - }, - { - MethodName: "GetCallRates", - Handler: _MetricsQueryService_GetCallRates_Handler, - }, - { - MethodName: "GetErrorRates", - Handler: _MetricsQueryService_GetErrorRates_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "metricsquery.proto", -} - -func (m *MetricsQueryBaseRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MetricsQueryBaseRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MetricsQueryBaseRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.SpanKinds) > 0 { - dAtA2 := make([]byte, len(m.SpanKinds)*10) - var j1 int - for _, num := range m.SpanKinds { - for num >= 1<<7 { - dAtA2[j1] = uint8(uint64(num)&0x7f | 0x80) - num >>= 7 - j1++ - } - dAtA2[j1] = uint8(num) - j1++ - } - i -= j1 - copy(dAtA[i:], dAtA2[:j1]) - i = encodeVarintMetricsquery(dAtA, i, uint64(j1)) - i-- - dAtA[i] = 0x3a - } - if m.RatePer != nil { - n3, err3 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.RatePer, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.RatePer):]) - if err3 != nil { - return 0, err3 - } - i -= n3 - i = encodeVarintMetricsquery(dAtA, i, uint64(n3)) - i-- - dAtA[i] = 0x32 - } - if m.Step != nil { - n4, err4 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.Step, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.Step):]) - if err4 != nil { - return 0, err4 - } - i -= n4 - i = encodeVarintMetricsquery(dAtA, i, uint64(n4)) - i-- - dAtA[i] = 0x2a - } - if m.Lookback != nil { - n5, err5 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.Lookback, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.Lookback):]) - if err5 != nil { - return 0, err5 - } - i -= n5 - i = encodeVarintMetricsquery(dAtA, i, uint64(n5)) - i-- - dAtA[i] = 0x22 - } - if m.EndTime != nil { - n6, err6 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.EndTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.EndTime):]) - if err6 != nil { - return 0, err6 - } - i -= n6 - i = encodeVarintMetricsquery(dAtA, i, uint64(n6)) - i-- - dAtA[i] = 0x1a - } - if m.GroupByOperation { - i-- - if m.GroupByOperation { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - if len(m.ServiceNames) > 0 { - for iNdEx := len(m.ServiceNames) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ServiceNames[iNdEx]) - copy(dAtA[i:], m.ServiceNames[iNdEx]) - i = encodeVarintMetricsquery(dAtA, i, uint64(len(m.ServiceNames[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *GetLatenciesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetLatenciesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GetLatenciesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Quantile != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Quantile)))) - i-- - dAtA[i] = 0x11 - } - if m.BaseRequest != nil { - { - size, err := m.BaseRequest.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetricsquery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *GetCallRatesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetCallRatesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GetCallRatesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.BaseRequest != nil { - { - size, err := m.BaseRequest.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetricsquery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *GetErrorRatesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetErrorRatesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GetErrorRatesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.BaseRequest != nil { - { - size, err := m.BaseRequest.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetricsquery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *GetMinStepDurationRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetMinStepDurationRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GetMinStepDurationRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil -} - -func (m *GetMinStepDurationResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetMinStepDurationResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GetMinStepDurationResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - n10, err10 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.MinStep, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.MinStep):]) - if err10 != nil { - return 0, err10 - } - i -= n10 - i = encodeVarintMetricsquery(dAtA, i, uint64(n10)) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *GetMetricsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetMetricsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GetMetricsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - { - size, err := m.Metrics.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetricsquery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintMetricsquery(dAtA []byte, offset int, v uint64) int { - offset -= sovMetricsquery(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *MetricsQueryBaseRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.ServiceNames) > 0 { - for _, s := range m.ServiceNames { - l = len(s) - n += 1 + l + sovMetricsquery(uint64(l)) - } - } - if m.GroupByOperation { - n += 2 - } - if m.EndTime != nil { - l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.EndTime) - n += 1 + l + sovMetricsquery(uint64(l)) - } - if m.Lookback != nil { - l = github_com_gogo_protobuf_types.SizeOfStdDuration(*m.Lookback) - n += 1 + l + sovMetricsquery(uint64(l)) - } - if m.Step != nil { - l = github_com_gogo_protobuf_types.SizeOfStdDuration(*m.Step) - n += 1 + l + sovMetricsquery(uint64(l)) - } - if m.RatePer != nil { - l = github_com_gogo_protobuf_types.SizeOfStdDuration(*m.RatePer) - n += 1 + l + sovMetricsquery(uint64(l)) - } - if len(m.SpanKinds) > 0 { - l = 0 - for _, e := range m.SpanKinds { - l += sovMetricsquery(uint64(e)) - } - n += 1 + sovMetricsquery(uint64(l)) + l - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *GetLatenciesRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.BaseRequest != nil { - l = m.BaseRequest.Size() - n += 1 + l + sovMetricsquery(uint64(l)) - } - if m.Quantile != 0 { - n += 9 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *GetCallRatesRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.BaseRequest != nil { - l = m.BaseRequest.Size() - n += 1 + l + sovMetricsquery(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *GetErrorRatesRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.BaseRequest != nil { - l = m.BaseRequest.Size() - n += 1 + l + sovMetricsquery(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *GetMinStepDurationRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *GetMinStepDurationResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = github_com_gogo_protobuf_types.SizeOfStdDuration(m.MinStep) - n += 1 + l + sovMetricsquery(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *GetMetricsResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Metrics.Size() - n += 1 + l + sovMetricsquery(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovMetricsquery(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozMetricsquery(x uint64) (n int) { - return sovMetricsquery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *MetricsQueryBaseRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetricsquery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MetricsQueryBaseRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MetricsQueryBaseRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceNames", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetricsquery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMetricsquery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthMetricsquery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServiceNames = append(m.ServiceNames, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GroupByOperation", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetricsquery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.GroupByOperation = bool(v != 0) - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EndTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetricsquery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetricsquery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetricsquery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.EndTime == nil { - m.EndTime = new(time.Time) - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.EndTime, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Lookback", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetricsquery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetricsquery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetricsquery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Lookback == nil { - m.Lookback = new(time.Duration) - } - if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(m.Lookback, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Step", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetricsquery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetricsquery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetricsquery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Step == nil { - m.Step = new(time.Duration) - } - if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(m.Step, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RatePer", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetricsquery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetricsquery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetricsquery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RatePer == nil { - m.RatePer = new(time.Duration) - } - if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(m.RatePer, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType == 0 { - var v SpanKind - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetricsquery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= SpanKind(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.SpanKinds = append(m.SpanKinds, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetricsquery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthMetricsquery - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthMetricsquery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - if elementCount != 0 && len(m.SpanKinds) == 0 { - m.SpanKinds = make([]SpanKind, 0, elementCount) - } - for iNdEx < postIndex { - var v SpanKind - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetricsquery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= SpanKind(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.SpanKinds = append(m.SpanKinds, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field SpanKinds", wireType) - } - default: - iNdEx = preIndex - skippy, err := skipMetricsquery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetricsquery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetLatenciesRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetricsquery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetLatenciesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetLatenciesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BaseRequest", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetricsquery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetricsquery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetricsquery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.BaseRequest == nil { - m.BaseRequest = &MetricsQueryBaseRequest{} - } - if err := m.BaseRequest.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Quantile", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Quantile = float64(math.Float64frombits(v)) - default: - iNdEx = preIndex - skippy, err := skipMetricsquery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetricsquery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetCallRatesRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetricsquery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetCallRatesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetCallRatesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BaseRequest", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetricsquery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetricsquery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetricsquery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.BaseRequest == nil { - m.BaseRequest = &MetricsQueryBaseRequest{} - } - if err := m.BaseRequest.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetricsquery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetricsquery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetErrorRatesRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetricsquery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetErrorRatesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetErrorRatesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BaseRequest", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetricsquery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetricsquery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetricsquery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.BaseRequest == nil { - m.BaseRequest = &MetricsQueryBaseRequest{} - } - if err := m.BaseRequest.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetricsquery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetricsquery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetMinStepDurationRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetricsquery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetMinStepDurationRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetMinStepDurationRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipMetricsquery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetricsquery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetMinStepDurationResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetricsquery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetMinStepDurationResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetMinStepDurationResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MinStep", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetricsquery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetricsquery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetricsquery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(&m.MinStep, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetricsquery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetricsquery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetMetricsResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetricsquery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetMetricsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetMetricsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetricsquery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetricsquery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetricsquery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Metrics.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetricsquery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetricsquery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipMetricsquery(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMetricsquery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMetricsquery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMetricsquery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthMetricsquery - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupMetricsquery - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthMetricsquery - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthMetricsquery = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowMetricsquery = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupMetricsquery = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/jaegertracing/jaeger/proto-gen/api_v2/metrics/openmetrics.pb.go b/vendor/github.com/jaegertracing/jaeger/proto-gen/api_v2/metrics/openmetrics.pb.go deleted file mode 100644 index 37ed3660c4e..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/proto-gen/api_v2/metrics/openmetrics.pb.go +++ /dev/null @@ -1,5253 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: openmetrics.proto - -// The OpenMetrics protobuf schema which defines the protobuf wire format. -// Ensure to interpret "required" as semantically required for a valid message. -// All string fields MUST be UTF-8 encoded strings. - -package metrics - -import ( - encoding_binary "encoding/binary" - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - types "github.com/gogo/protobuf/types" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// The type of a Metric. -type MetricType int32 - -const ( - // Unknown must use unknown MetricPoint values. - MetricType_UNKNOWN MetricType = 0 - // Gauge must use gauge MetricPoint values. - MetricType_GAUGE MetricType = 1 - // Counter must use counter MetricPoint values. - MetricType_COUNTER MetricType = 2 - // State set must use state set MetricPoint values. - MetricType_STATE_SET MetricType = 3 - // Info must use info MetricPoint values. - MetricType_INFO MetricType = 4 - // Histogram must use histogram value MetricPoint values. - MetricType_HISTOGRAM MetricType = 5 - // Gauge histogram must use histogram value MetricPoint values. - MetricType_GAUGE_HISTOGRAM MetricType = 6 - // Summary quantiles must use summary value MetricPoint values. - MetricType_SUMMARY MetricType = 7 -) - -var MetricType_name = map[int32]string{ - 0: "UNKNOWN", - 1: "GAUGE", - 2: "COUNTER", - 3: "STATE_SET", - 4: "INFO", - 5: "HISTOGRAM", - 6: "GAUGE_HISTOGRAM", - 7: "SUMMARY", -} - -var MetricType_value = map[string]int32{ - "UNKNOWN": 0, - "GAUGE": 1, - "COUNTER": 2, - "STATE_SET": 3, - "INFO": 4, - "HISTOGRAM": 5, - "GAUGE_HISTOGRAM": 6, - "SUMMARY": 7, -} - -func (x MetricType) String() string { - return proto.EnumName(MetricType_name, int32(x)) -} - -func (MetricType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_0b803df83757ec01, []int{0} -} - -// The top-level container type that is encoded and sent over the wire. -type MetricSet struct { - // Each MetricFamily has one or more MetricPoints for a single Metric. - MetricFamilies []*MetricFamily `protobuf:"bytes,1,rep,name=metric_families,json=metricFamilies,proto3" json:"metric_families,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MetricSet) Reset() { *m = MetricSet{} } -func (m *MetricSet) String() string { return proto.CompactTextString(m) } -func (*MetricSet) ProtoMessage() {} -func (*MetricSet) Descriptor() ([]byte, []int) { - return fileDescriptor_0b803df83757ec01, []int{0} -} -func (m *MetricSet) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MetricSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MetricSet.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MetricSet) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricSet.Merge(m, src) -} -func (m *MetricSet) XXX_Size() int { - return m.Size() -} -func (m *MetricSet) XXX_DiscardUnknown() { - xxx_messageInfo_MetricSet.DiscardUnknown(m) -} - -var xxx_messageInfo_MetricSet proto.InternalMessageInfo - -func (m *MetricSet) GetMetricFamilies() []*MetricFamily { - if m != nil { - return m.MetricFamilies - } - return nil -} - -// One or more Metrics for a single MetricFamily, where each Metric -// has one or more MetricPoints. -type MetricFamily struct { - // Required. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Optional. - Type MetricType `protobuf:"varint,2,opt,name=type,proto3,enum=jaeger.api_v2.metrics.MetricType" json:"type,omitempty"` - // Optional. - Unit string `protobuf:"bytes,3,opt,name=unit,proto3" json:"unit,omitempty"` - // Optional. - Help string `protobuf:"bytes,4,opt,name=help,proto3" json:"help,omitempty"` - // Optional. - Metrics []*Metric `protobuf:"bytes,5,rep,name=metrics,proto3" json:"metrics,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MetricFamily) Reset() { *m = MetricFamily{} } -func (m *MetricFamily) String() string { return proto.CompactTextString(m) } -func (*MetricFamily) ProtoMessage() {} -func (*MetricFamily) Descriptor() ([]byte, []int) { - return fileDescriptor_0b803df83757ec01, []int{1} -} -func (m *MetricFamily) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MetricFamily) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricFamily.Merge(m, src) -} -func (m *MetricFamily) XXX_Size() int { - return m.Size() -} -func (m *MetricFamily) XXX_DiscardUnknown() { - xxx_messageInfo_MetricFamily.DiscardUnknown(m) -} - -var xxx_messageInfo_MetricFamily proto.InternalMessageInfo - -func (m *MetricFamily) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *MetricFamily) GetType() MetricType { - if m != nil { - return m.Type - } - return MetricType_UNKNOWN -} - -func (m *MetricFamily) GetUnit() string { - if m != nil { - return m.Unit - } - return "" -} - -func (m *MetricFamily) GetHelp() string { - if m != nil { - return m.Help - } - return "" -} - -func (m *MetricFamily) GetMetrics() []*Metric { - if m != nil { - return m.Metrics - } - return nil -} - -// A single metric with a unique set of labels within a metric family. -type Metric struct { - // Optional. - Labels []*Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels,omitempty"` - // Optional. - MetricPoints []*MetricPoint `protobuf:"bytes,2,rep,name=metric_points,json=metricPoints,proto3" json:"metric_points,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Metric) Reset() { *m = Metric{} } -func (m *Metric) String() string { return proto.CompactTextString(m) } -func (*Metric) ProtoMessage() {} -func (*Metric) Descriptor() ([]byte, []int) { - return fileDescriptor_0b803df83757ec01, []int{2} -} -func (m *Metric) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Metric.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Metric) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metric.Merge(m, src) -} -func (m *Metric) XXX_Size() int { - return m.Size() -} -func (m *Metric) XXX_DiscardUnknown() { - xxx_messageInfo_Metric.DiscardUnknown(m) -} - -var xxx_messageInfo_Metric proto.InternalMessageInfo - -func (m *Metric) GetLabels() []*Label { - if m != nil { - return m.Labels - } - return nil -} - -func (m *Metric) GetMetricPoints() []*MetricPoint { - if m != nil { - return m.MetricPoints - } - return nil -} - -// A name-value pair. These are used in multiple places: identifying -// timeseries, value of INFO metrics, and exemplars in Histograms. -type Label struct { - // Required. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Required. - Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Label) Reset() { *m = Label{} } -func (m *Label) String() string { return proto.CompactTextString(m) } -func (*Label) ProtoMessage() {} -func (*Label) Descriptor() ([]byte, []int) { - return fileDescriptor_0b803df83757ec01, []int{3} -} -func (m *Label) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Label) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Label.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Label) XXX_Merge(src proto.Message) { - xxx_messageInfo_Label.Merge(m, src) -} -func (m *Label) XXX_Size() int { - return m.Size() -} -func (m *Label) XXX_DiscardUnknown() { - xxx_messageInfo_Label.DiscardUnknown(m) -} - -var xxx_messageInfo_Label proto.InternalMessageInfo - -func (m *Label) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Label) GetValue() string { - if m != nil { - return m.Value - } - return "" -} - -// A MetricPoint in a Metric. -type MetricPoint struct { - // Required. - // - // Types that are valid to be assigned to Value: - // *MetricPoint_UnknownValue - // *MetricPoint_GaugeValue - // *MetricPoint_CounterValue - // *MetricPoint_HistogramValue - // *MetricPoint_StateSetValue - // *MetricPoint_InfoValue - // *MetricPoint_SummaryValue - Value isMetricPoint_Value `protobuf_oneof:"value"` - // Optional. - Timestamp *types.Timestamp `protobuf:"bytes,8,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MetricPoint) Reset() { *m = MetricPoint{} } -func (m *MetricPoint) String() string { return proto.CompactTextString(m) } -func (*MetricPoint) ProtoMessage() {} -func (*MetricPoint) Descriptor() ([]byte, []int) { - return fileDescriptor_0b803df83757ec01, []int{4} -} -func (m *MetricPoint) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MetricPoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MetricPoint.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MetricPoint) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricPoint.Merge(m, src) -} -func (m *MetricPoint) XXX_Size() int { - return m.Size() -} -func (m *MetricPoint) XXX_DiscardUnknown() { - xxx_messageInfo_MetricPoint.DiscardUnknown(m) -} - -var xxx_messageInfo_MetricPoint proto.InternalMessageInfo - -type isMetricPoint_Value interface { - isMetricPoint_Value() - MarshalTo([]byte) (int, error) - Size() int -} - -type MetricPoint_UnknownValue struct { - UnknownValue *UnknownValue `protobuf:"bytes,1,opt,name=unknown_value,json=unknownValue,proto3,oneof" json:"unknown_value,omitempty"` -} -type MetricPoint_GaugeValue struct { - GaugeValue *GaugeValue `protobuf:"bytes,2,opt,name=gauge_value,json=gaugeValue,proto3,oneof" json:"gauge_value,omitempty"` -} -type MetricPoint_CounterValue struct { - CounterValue *CounterValue `protobuf:"bytes,3,opt,name=counter_value,json=counterValue,proto3,oneof" json:"counter_value,omitempty"` -} -type MetricPoint_HistogramValue struct { - HistogramValue *HistogramValue `protobuf:"bytes,4,opt,name=histogram_value,json=histogramValue,proto3,oneof" json:"histogram_value,omitempty"` -} -type MetricPoint_StateSetValue struct { - StateSetValue *StateSetValue `protobuf:"bytes,5,opt,name=state_set_value,json=stateSetValue,proto3,oneof" json:"state_set_value,omitempty"` -} -type MetricPoint_InfoValue struct { - InfoValue *InfoValue `protobuf:"bytes,6,opt,name=info_value,json=infoValue,proto3,oneof" json:"info_value,omitempty"` -} -type MetricPoint_SummaryValue struct { - SummaryValue *SummaryValue `protobuf:"bytes,7,opt,name=summary_value,json=summaryValue,proto3,oneof" json:"summary_value,omitempty"` -} - -func (*MetricPoint_UnknownValue) isMetricPoint_Value() {} -func (*MetricPoint_GaugeValue) isMetricPoint_Value() {} -func (*MetricPoint_CounterValue) isMetricPoint_Value() {} -func (*MetricPoint_HistogramValue) isMetricPoint_Value() {} -func (*MetricPoint_StateSetValue) isMetricPoint_Value() {} -func (*MetricPoint_InfoValue) isMetricPoint_Value() {} -func (*MetricPoint_SummaryValue) isMetricPoint_Value() {} - -func (m *MetricPoint) GetValue() isMetricPoint_Value { - if m != nil { - return m.Value - } - return nil -} - -func (m *MetricPoint) GetUnknownValue() *UnknownValue { - if x, ok := m.GetValue().(*MetricPoint_UnknownValue); ok { - return x.UnknownValue - } - return nil -} - -func (m *MetricPoint) GetGaugeValue() *GaugeValue { - if x, ok := m.GetValue().(*MetricPoint_GaugeValue); ok { - return x.GaugeValue - } - return nil -} - -func (m *MetricPoint) GetCounterValue() *CounterValue { - if x, ok := m.GetValue().(*MetricPoint_CounterValue); ok { - return x.CounterValue - } - return nil -} - -func (m *MetricPoint) GetHistogramValue() *HistogramValue { - if x, ok := m.GetValue().(*MetricPoint_HistogramValue); ok { - return x.HistogramValue - } - return nil -} - -func (m *MetricPoint) GetStateSetValue() *StateSetValue { - if x, ok := m.GetValue().(*MetricPoint_StateSetValue); ok { - return x.StateSetValue - } - return nil -} - -func (m *MetricPoint) GetInfoValue() *InfoValue { - if x, ok := m.GetValue().(*MetricPoint_InfoValue); ok { - return x.InfoValue - } - return nil -} - -func (m *MetricPoint) GetSummaryValue() *SummaryValue { - if x, ok := m.GetValue().(*MetricPoint_SummaryValue); ok { - return x.SummaryValue - } - return nil -} - -func (m *MetricPoint) GetTimestamp() *types.Timestamp { - if m != nil { - return m.Timestamp - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*MetricPoint) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*MetricPoint_UnknownValue)(nil), - (*MetricPoint_GaugeValue)(nil), - (*MetricPoint_CounterValue)(nil), - (*MetricPoint_HistogramValue)(nil), - (*MetricPoint_StateSetValue)(nil), - (*MetricPoint_InfoValue)(nil), - (*MetricPoint_SummaryValue)(nil), - } -} - -// Value for UNKNOWN MetricPoint. -type UnknownValue struct { - // Required. - // - // Types that are valid to be assigned to Value: - // *UnknownValue_DoubleValue - // *UnknownValue_IntValue - Value isUnknownValue_Value `protobuf_oneof:"value"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UnknownValue) Reset() { *m = UnknownValue{} } -func (m *UnknownValue) String() string { return proto.CompactTextString(m) } -func (*UnknownValue) ProtoMessage() {} -func (*UnknownValue) Descriptor() ([]byte, []int) { - return fileDescriptor_0b803df83757ec01, []int{5} -} -func (m *UnknownValue) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UnknownValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UnknownValue.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *UnknownValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_UnknownValue.Merge(m, src) -} -func (m *UnknownValue) XXX_Size() int { - return m.Size() -} -func (m *UnknownValue) XXX_DiscardUnknown() { - xxx_messageInfo_UnknownValue.DiscardUnknown(m) -} - -var xxx_messageInfo_UnknownValue proto.InternalMessageInfo - -type isUnknownValue_Value interface { - isUnknownValue_Value() - MarshalTo([]byte) (int, error) - Size() int -} - -type UnknownValue_DoubleValue struct { - DoubleValue float64 `protobuf:"fixed64,1,opt,name=double_value,json=doubleValue,proto3,oneof" json:"double_value,omitempty"` -} -type UnknownValue_IntValue struct { - IntValue int64 `protobuf:"varint,2,opt,name=int_value,json=intValue,proto3,oneof" json:"int_value,omitempty"` -} - -func (*UnknownValue_DoubleValue) isUnknownValue_Value() {} -func (*UnknownValue_IntValue) isUnknownValue_Value() {} - -func (m *UnknownValue) GetValue() isUnknownValue_Value { - if m != nil { - return m.Value - } - return nil -} - -func (m *UnknownValue) GetDoubleValue() float64 { - if x, ok := m.GetValue().(*UnknownValue_DoubleValue); ok { - return x.DoubleValue - } - return 0 -} - -func (m *UnknownValue) GetIntValue() int64 { - if x, ok := m.GetValue().(*UnknownValue_IntValue); ok { - return x.IntValue - } - return 0 -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*UnknownValue) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*UnknownValue_DoubleValue)(nil), - (*UnknownValue_IntValue)(nil), - } -} - -// Value for GAUGE MetricPoint. -type GaugeValue struct { - // Required. - // - // Types that are valid to be assigned to Value: - // *GaugeValue_DoubleValue - // *GaugeValue_IntValue - Value isGaugeValue_Value `protobuf_oneof:"value"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GaugeValue) Reset() { *m = GaugeValue{} } -func (m *GaugeValue) String() string { return proto.CompactTextString(m) } -func (*GaugeValue) ProtoMessage() {} -func (*GaugeValue) Descriptor() ([]byte, []int) { - return fileDescriptor_0b803df83757ec01, []int{6} -} -func (m *GaugeValue) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GaugeValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GaugeValue.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GaugeValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_GaugeValue.Merge(m, src) -} -func (m *GaugeValue) XXX_Size() int { - return m.Size() -} -func (m *GaugeValue) XXX_DiscardUnknown() { - xxx_messageInfo_GaugeValue.DiscardUnknown(m) -} - -var xxx_messageInfo_GaugeValue proto.InternalMessageInfo - -type isGaugeValue_Value interface { - isGaugeValue_Value() - MarshalTo([]byte) (int, error) - Size() int -} - -type GaugeValue_DoubleValue struct { - DoubleValue float64 `protobuf:"fixed64,1,opt,name=double_value,json=doubleValue,proto3,oneof" json:"double_value,omitempty"` -} -type GaugeValue_IntValue struct { - IntValue int64 `protobuf:"varint,2,opt,name=int_value,json=intValue,proto3,oneof" json:"int_value,omitempty"` -} - -func (*GaugeValue_DoubleValue) isGaugeValue_Value() {} -func (*GaugeValue_IntValue) isGaugeValue_Value() {} - -func (m *GaugeValue) GetValue() isGaugeValue_Value { - if m != nil { - return m.Value - } - return nil -} - -func (m *GaugeValue) GetDoubleValue() float64 { - if x, ok := m.GetValue().(*GaugeValue_DoubleValue); ok { - return x.DoubleValue - } - return 0 -} - -func (m *GaugeValue) GetIntValue() int64 { - if x, ok := m.GetValue().(*GaugeValue_IntValue); ok { - return x.IntValue - } - return 0 -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*GaugeValue) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*GaugeValue_DoubleValue)(nil), - (*GaugeValue_IntValue)(nil), - } -} - -// Value for COUNTER MetricPoint. -type CounterValue struct { - // Required. - // - // Types that are valid to be assigned to Total: - // *CounterValue_DoubleValue - // *CounterValue_IntValue - Total isCounterValue_Total `protobuf_oneof:"total"` - // The time values began being collected for this counter. - // Optional. - Created *types.Timestamp `protobuf:"bytes,3,opt,name=created,proto3" json:"created,omitempty"` - // Optional. - Exemplar *Exemplar `protobuf:"bytes,4,opt,name=exemplar,proto3" json:"exemplar,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CounterValue) Reset() { *m = CounterValue{} } -func (m *CounterValue) String() string { return proto.CompactTextString(m) } -func (*CounterValue) ProtoMessage() {} -func (*CounterValue) Descriptor() ([]byte, []int) { - return fileDescriptor_0b803df83757ec01, []int{7} -} -func (m *CounterValue) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CounterValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CounterValue.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CounterValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_CounterValue.Merge(m, src) -} -func (m *CounterValue) XXX_Size() int { - return m.Size() -} -func (m *CounterValue) XXX_DiscardUnknown() { - xxx_messageInfo_CounterValue.DiscardUnknown(m) -} - -var xxx_messageInfo_CounterValue proto.InternalMessageInfo - -type isCounterValue_Total interface { - isCounterValue_Total() - MarshalTo([]byte) (int, error) - Size() int -} - -type CounterValue_DoubleValue struct { - DoubleValue float64 `protobuf:"fixed64,1,opt,name=double_value,json=doubleValue,proto3,oneof" json:"double_value,omitempty"` -} -type CounterValue_IntValue struct { - IntValue uint64 `protobuf:"varint,2,opt,name=int_value,json=intValue,proto3,oneof" json:"int_value,omitempty"` -} - -func (*CounterValue_DoubleValue) isCounterValue_Total() {} -func (*CounterValue_IntValue) isCounterValue_Total() {} - -func (m *CounterValue) GetTotal() isCounterValue_Total { - if m != nil { - return m.Total - } - return nil -} - -func (m *CounterValue) GetDoubleValue() float64 { - if x, ok := m.GetTotal().(*CounterValue_DoubleValue); ok { - return x.DoubleValue - } - return 0 -} - -func (m *CounterValue) GetIntValue() uint64 { - if x, ok := m.GetTotal().(*CounterValue_IntValue); ok { - return x.IntValue - } - return 0 -} - -func (m *CounterValue) GetCreated() *types.Timestamp { - if m != nil { - return m.Created - } - return nil -} - -func (m *CounterValue) GetExemplar() *Exemplar { - if m != nil { - return m.Exemplar - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*CounterValue) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*CounterValue_DoubleValue)(nil), - (*CounterValue_IntValue)(nil), - } -} - -// Value for HISTOGRAM or GAUGE_HISTOGRAM MetricPoint. -type HistogramValue struct { - // Optional. - // - // Types that are valid to be assigned to Sum: - // *HistogramValue_DoubleValue - // *HistogramValue_IntValue - Sum isHistogramValue_Sum `protobuf_oneof:"sum"` - // Optional. - Count uint64 `protobuf:"varint,3,opt,name=count,proto3" json:"count,omitempty"` - // The time values began being collected for this histogram. - // Optional. - Created *types.Timestamp `protobuf:"bytes,4,opt,name=created,proto3" json:"created,omitempty"` - // Optional. - Buckets []*HistogramValue_Bucket `protobuf:"bytes,5,rep,name=buckets,proto3" json:"buckets,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *HistogramValue) Reset() { *m = HistogramValue{} } -func (m *HistogramValue) String() string { return proto.CompactTextString(m) } -func (*HistogramValue) ProtoMessage() {} -func (*HistogramValue) Descriptor() ([]byte, []int) { - return fileDescriptor_0b803df83757ec01, []int{8} -} -func (m *HistogramValue) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HistogramValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_HistogramValue.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *HistogramValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_HistogramValue.Merge(m, src) -} -func (m *HistogramValue) XXX_Size() int { - return m.Size() -} -func (m *HistogramValue) XXX_DiscardUnknown() { - xxx_messageInfo_HistogramValue.DiscardUnknown(m) -} - -var xxx_messageInfo_HistogramValue proto.InternalMessageInfo - -type isHistogramValue_Sum interface { - isHistogramValue_Sum() - MarshalTo([]byte) (int, error) - Size() int -} - -type HistogramValue_DoubleValue struct { - DoubleValue float64 `protobuf:"fixed64,1,opt,name=double_value,json=doubleValue,proto3,oneof" json:"double_value,omitempty"` -} -type HistogramValue_IntValue struct { - IntValue int64 `protobuf:"varint,2,opt,name=int_value,json=intValue,proto3,oneof" json:"int_value,omitempty"` -} - -func (*HistogramValue_DoubleValue) isHistogramValue_Sum() {} -func (*HistogramValue_IntValue) isHistogramValue_Sum() {} - -func (m *HistogramValue) GetSum() isHistogramValue_Sum { - if m != nil { - return m.Sum - } - return nil -} - -func (m *HistogramValue) GetDoubleValue() float64 { - if x, ok := m.GetSum().(*HistogramValue_DoubleValue); ok { - return x.DoubleValue - } - return 0 -} - -func (m *HistogramValue) GetIntValue() int64 { - if x, ok := m.GetSum().(*HistogramValue_IntValue); ok { - return x.IntValue - } - return 0 -} - -func (m *HistogramValue) GetCount() uint64 { - if m != nil { - return m.Count - } - return 0 -} - -func (m *HistogramValue) GetCreated() *types.Timestamp { - if m != nil { - return m.Created - } - return nil -} - -func (m *HistogramValue) GetBuckets() []*HistogramValue_Bucket { - if m != nil { - return m.Buckets - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*HistogramValue) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*HistogramValue_DoubleValue)(nil), - (*HistogramValue_IntValue)(nil), - } -} - -// Bucket is the number of values for a bucket in the histogram -// with an optional exemplar. -type HistogramValue_Bucket struct { - // Required. - Count uint64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` - // Optional. - UpperBound float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound,proto3" json:"upper_bound,omitempty"` - // Optional. - Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar,proto3" json:"exemplar,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *HistogramValue_Bucket) Reset() { *m = HistogramValue_Bucket{} } -func (m *HistogramValue_Bucket) String() string { return proto.CompactTextString(m) } -func (*HistogramValue_Bucket) ProtoMessage() {} -func (*HistogramValue_Bucket) Descriptor() ([]byte, []int) { - return fileDescriptor_0b803df83757ec01, []int{8, 0} -} -func (m *HistogramValue_Bucket) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HistogramValue_Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_HistogramValue_Bucket.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *HistogramValue_Bucket) XXX_Merge(src proto.Message) { - xxx_messageInfo_HistogramValue_Bucket.Merge(m, src) -} -func (m *HistogramValue_Bucket) XXX_Size() int { - return m.Size() -} -func (m *HistogramValue_Bucket) XXX_DiscardUnknown() { - xxx_messageInfo_HistogramValue_Bucket.DiscardUnknown(m) -} - -var xxx_messageInfo_HistogramValue_Bucket proto.InternalMessageInfo - -func (m *HistogramValue_Bucket) GetCount() uint64 { - if m != nil { - return m.Count - } - return 0 -} - -func (m *HistogramValue_Bucket) GetUpperBound() float64 { - if m != nil { - return m.UpperBound - } - return 0 -} - -func (m *HistogramValue_Bucket) GetExemplar() *Exemplar { - if m != nil { - return m.Exemplar - } - return nil -} - -type Exemplar struct { - // Required. - Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` - // Optional. - Timestamp *types.Timestamp `protobuf:"bytes,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - // Labels are additional information about the exemplar value (e.g. trace id). - // Optional. - Label []*Label `protobuf:"bytes,3,rep,name=label,proto3" json:"label,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Exemplar) Reset() { *m = Exemplar{} } -func (m *Exemplar) String() string { return proto.CompactTextString(m) } -func (*Exemplar) ProtoMessage() {} -func (*Exemplar) Descriptor() ([]byte, []int) { - return fileDescriptor_0b803df83757ec01, []int{9} -} -func (m *Exemplar) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Exemplar) XXX_Merge(src proto.Message) { - xxx_messageInfo_Exemplar.Merge(m, src) -} -func (m *Exemplar) XXX_Size() int { - return m.Size() -} -func (m *Exemplar) XXX_DiscardUnknown() { - xxx_messageInfo_Exemplar.DiscardUnknown(m) -} - -var xxx_messageInfo_Exemplar proto.InternalMessageInfo - -func (m *Exemplar) GetValue() float64 { - if m != nil { - return m.Value - } - return 0 -} - -func (m *Exemplar) GetTimestamp() *types.Timestamp { - if m != nil { - return m.Timestamp - } - return nil -} - -func (m *Exemplar) GetLabel() []*Label { - if m != nil { - return m.Label - } - return nil -} - -// Value for STATE_SET MetricPoint. -type StateSetValue struct { - // Optional. - States []*StateSetValue_State `protobuf:"bytes,1,rep,name=states,proto3" json:"states,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StateSetValue) Reset() { *m = StateSetValue{} } -func (m *StateSetValue) String() string { return proto.CompactTextString(m) } -func (*StateSetValue) ProtoMessage() {} -func (*StateSetValue) Descriptor() ([]byte, []int) { - return fileDescriptor_0b803df83757ec01, []int{10} -} -func (m *StateSetValue) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StateSetValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StateSetValue.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *StateSetValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_StateSetValue.Merge(m, src) -} -func (m *StateSetValue) XXX_Size() int { - return m.Size() -} -func (m *StateSetValue) XXX_DiscardUnknown() { - xxx_messageInfo_StateSetValue.DiscardUnknown(m) -} - -var xxx_messageInfo_StateSetValue proto.InternalMessageInfo - -func (m *StateSetValue) GetStates() []*StateSetValue_State { - if m != nil { - return m.States - } - return nil -} - -type StateSetValue_State struct { - // Required. - Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` - // Required. - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StateSetValue_State) Reset() { *m = StateSetValue_State{} } -func (m *StateSetValue_State) String() string { return proto.CompactTextString(m) } -func (*StateSetValue_State) ProtoMessage() {} -func (*StateSetValue_State) Descriptor() ([]byte, []int) { - return fileDescriptor_0b803df83757ec01, []int{10, 0} -} -func (m *StateSetValue_State) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StateSetValue_State) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StateSetValue_State.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *StateSetValue_State) XXX_Merge(src proto.Message) { - xxx_messageInfo_StateSetValue_State.Merge(m, src) -} -func (m *StateSetValue_State) XXX_Size() int { - return m.Size() -} -func (m *StateSetValue_State) XXX_DiscardUnknown() { - xxx_messageInfo_StateSetValue_State.DiscardUnknown(m) -} - -var xxx_messageInfo_StateSetValue_State proto.InternalMessageInfo - -func (m *StateSetValue_State) GetEnabled() bool { - if m != nil { - return m.Enabled - } - return false -} - -func (m *StateSetValue_State) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -// Value for INFO MetricPoint. -type InfoValue struct { - // Optional. - Info []*Label `protobuf:"bytes,1,rep,name=info,proto3" json:"info,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *InfoValue) Reset() { *m = InfoValue{} } -func (m *InfoValue) String() string { return proto.CompactTextString(m) } -func (*InfoValue) ProtoMessage() {} -func (*InfoValue) Descriptor() ([]byte, []int) { - return fileDescriptor_0b803df83757ec01, []int{11} -} -func (m *InfoValue) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *InfoValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_InfoValue.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *InfoValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_InfoValue.Merge(m, src) -} -func (m *InfoValue) XXX_Size() int { - return m.Size() -} -func (m *InfoValue) XXX_DiscardUnknown() { - xxx_messageInfo_InfoValue.DiscardUnknown(m) -} - -var xxx_messageInfo_InfoValue proto.InternalMessageInfo - -func (m *InfoValue) GetInfo() []*Label { - if m != nil { - return m.Info - } - return nil -} - -// Value for SUMMARY MetricPoint. -type SummaryValue struct { - // Optional. - // - // Types that are valid to be assigned to Sum: - // *SummaryValue_DoubleValue - // *SummaryValue_IntValue - Sum isSummaryValue_Sum `protobuf_oneof:"sum"` - // Optional. - Count uint64 `protobuf:"varint,3,opt,name=count,proto3" json:"count,omitempty"` - // The time sum and count values began being collected for this summary. - // Optional. - Created *types.Timestamp `protobuf:"bytes,4,opt,name=created,proto3" json:"created,omitempty"` - // Optional. - Quantile []*SummaryValue_Quantile `protobuf:"bytes,5,rep,name=quantile,proto3" json:"quantile,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SummaryValue) Reset() { *m = SummaryValue{} } -func (m *SummaryValue) String() string { return proto.CompactTextString(m) } -func (*SummaryValue) ProtoMessage() {} -func (*SummaryValue) Descriptor() ([]byte, []int) { - return fileDescriptor_0b803df83757ec01, []int{12} -} -func (m *SummaryValue) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SummaryValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SummaryValue.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *SummaryValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_SummaryValue.Merge(m, src) -} -func (m *SummaryValue) XXX_Size() int { - return m.Size() -} -func (m *SummaryValue) XXX_DiscardUnknown() { - xxx_messageInfo_SummaryValue.DiscardUnknown(m) -} - -var xxx_messageInfo_SummaryValue proto.InternalMessageInfo - -type isSummaryValue_Sum interface { - isSummaryValue_Sum() - MarshalTo([]byte) (int, error) - Size() int -} - -type SummaryValue_DoubleValue struct { - DoubleValue float64 `protobuf:"fixed64,1,opt,name=double_value,json=doubleValue,proto3,oneof" json:"double_value,omitempty"` -} -type SummaryValue_IntValue struct { - IntValue int64 `protobuf:"varint,2,opt,name=int_value,json=intValue,proto3,oneof" json:"int_value,omitempty"` -} - -func (*SummaryValue_DoubleValue) isSummaryValue_Sum() {} -func (*SummaryValue_IntValue) isSummaryValue_Sum() {} - -func (m *SummaryValue) GetSum() isSummaryValue_Sum { - if m != nil { - return m.Sum - } - return nil -} - -func (m *SummaryValue) GetDoubleValue() float64 { - if x, ok := m.GetSum().(*SummaryValue_DoubleValue); ok { - return x.DoubleValue - } - return 0 -} - -func (m *SummaryValue) GetIntValue() int64 { - if x, ok := m.GetSum().(*SummaryValue_IntValue); ok { - return x.IntValue - } - return 0 -} - -func (m *SummaryValue) GetCount() uint64 { - if m != nil { - return m.Count - } - return 0 -} - -func (m *SummaryValue) GetCreated() *types.Timestamp { - if m != nil { - return m.Created - } - return nil -} - -func (m *SummaryValue) GetQuantile() []*SummaryValue_Quantile { - if m != nil { - return m.Quantile - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*SummaryValue) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*SummaryValue_DoubleValue)(nil), - (*SummaryValue_IntValue)(nil), - } -} - -type SummaryValue_Quantile struct { - // Required. - Quantile float64 `protobuf:"fixed64,1,opt,name=quantile,proto3" json:"quantile,omitempty"` - // Required. - Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SummaryValue_Quantile) Reset() { *m = SummaryValue_Quantile{} } -func (m *SummaryValue_Quantile) String() string { return proto.CompactTextString(m) } -func (*SummaryValue_Quantile) ProtoMessage() {} -func (*SummaryValue_Quantile) Descriptor() ([]byte, []int) { - return fileDescriptor_0b803df83757ec01, []int{12, 0} -} -func (m *SummaryValue_Quantile) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SummaryValue_Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SummaryValue_Quantile.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *SummaryValue_Quantile) XXX_Merge(src proto.Message) { - xxx_messageInfo_SummaryValue_Quantile.Merge(m, src) -} -func (m *SummaryValue_Quantile) XXX_Size() int { - return m.Size() -} -func (m *SummaryValue_Quantile) XXX_DiscardUnknown() { - xxx_messageInfo_SummaryValue_Quantile.DiscardUnknown(m) -} - -var xxx_messageInfo_SummaryValue_Quantile proto.InternalMessageInfo - -func (m *SummaryValue_Quantile) GetQuantile() float64 { - if m != nil { - return m.Quantile - } - return 0 -} - -func (m *SummaryValue_Quantile) GetValue() float64 { - if m != nil { - return m.Value - } - return 0 -} - -func init() { - proto.RegisterEnum("jaeger.api_v2.metrics.MetricType", MetricType_name, MetricType_value) - proto.RegisterType((*MetricSet)(nil), "jaeger.api_v2.metrics.MetricSet") - proto.RegisterType((*MetricFamily)(nil), "jaeger.api_v2.metrics.MetricFamily") - proto.RegisterType((*Metric)(nil), "jaeger.api_v2.metrics.Metric") - proto.RegisterType((*Label)(nil), "jaeger.api_v2.metrics.Label") - proto.RegisterType((*MetricPoint)(nil), "jaeger.api_v2.metrics.MetricPoint") - proto.RegisterType((*UnknownValue)(nil), "jaeger.api_v2.metrics.UnknownValue") - proto.RegisterType((*GaugeValue)(nil), "jaeger.api_v2.metrics.GaugeValue") - proto.RegisterType((*CounterValue)(nil), "jaeger.api_v2.metrics.CounterValue") - proto.RegisterType((*HistogramValue)(nil), "jaeger.api_v2.metrics.HistogramValue") - proto.RegisterType((*HistogramValue_Bucket)(nil), "jaeger.api_v2.metrics.HistogramValue.Bucket") - proto.RegisterType((*Exemplar)(nil), "jaeger.api_v2.metrics.Exemplar") - proto.RegisterType((*StateSetValue)(nil), "jaeger.api_v2.metrics.StateSetValue") - proto.RegisterType((*StateSetValue_State)(nil), "jaeger.api_v2.metrics.StateSetValue.State") - proto.RegisterType((*InfoValue)(nil), "jaeger.api_v2.metrics.InfoValue") - proto.RegisterType((*SummaryValue)(nil), "jaeger.api_v2.metrics.SummaryValue") - proto.RegisterType((*SummaryValue_Quantile)(nil), "jaeger.api_v2.metrics.SummaryValue.Quantile") -} - -func init() { proto.RegisterFile("openmetrics.proto", fileDescriptor_0b803df83757ec01) } - -var fileDescriptor_0b803df83757ec01 = []byte{ - // 981 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x96, 0xdd, 0x6e, 0xe3, 0x44, - 0x14, 0xc7, 0xeb, 0xc4, 0xf9, 0x3a, 0x49, 0xda, 0x30, 0x2c, 0x92, 0x15, 0xb1, 0x6d, 0xf1, 0x82, - 0x54, 0xad, 0x90, 0x0b, 0x65, 0x17, 0x90, 0x80, 0x8b, 0x64, 0x49, 0x93, 0xc2, 0x36, 0x5d, 0x26, - 0x09, 0xa8, 0x70, 0x11, 0x39, 0xe9, 0xd4, 0x35, 0xeb, 0x2f, 0xec, 0xf1, 0x42, 0x05, 0xf7, 0x48, - 0x5c, 0xf0, 0x26, 0xbc, 0x00, 0x4f, 0xc0, 0x15, 0xe2, 0x0d, 0x40, 0xbd, 0xe7, 0x1d, 0xd0, 0x7c, - 0xc5, 0x0e, 0xda, 0x84, 0x2c, 0xda, 0x8b, 0xbd, 0x9b, 0x73, 0xfc, 0x3f, 0xbf, 0x99, 0x73, 0x7c, - 0x7c, 0xc6, 0xf0, 0x52, 0x18, 0x91, 0xc0, 0x27, 0x34, 0x76, 0xe7, 0x89, 0x15, 0xc5, 0x21, 0x0d, - 0xd1, 0x2b, 0x5f, 0xdb, 0xc4, 0x21, 0xb1, 0x65, 0x47, 0xee, 0xf4, 0xc9, 0x91, 0x25, 0x1f, 0xb6, - 0xf7, 0x9c, 0x30, 0x74, 0x3c, 0x72, 0xc8, 0x45, 0xb3, 0xf4, 0xf2, 0x90, 0xba, 0x3e, 0x49, 0xa8, - 0xed, 0x47, 0x22, 0xae, 0x7d, 0xcb, 0x09, 0x9d, 0x90, 0x2f, 0x0f, 0xd9, 0x4a, 0x78, 0xcd, 0x73, - 0xa8, 0x9d, 0x72, 0xc2, 0x88, 0x50, 0xf4, 0x10, 0x76, 0x04, 0x6e, 0x7a, 0x69, 0xfb, 0xae, 0xe7, - 0x92, 0xc4, 0xd0, 0xf6, 0x8b, 0x07, 0xf5, 0xa3, 0x3b, 0xd6, 0x53, 0x37, 0xb5, 0x44, 0xe8, 0x31, - 0x13, 0x5f, 0xe3, 0x6d, 0x3f, 0xb3, 0x5c, 0x92, 0x98, 0xbf, 0x6a, 0xd0, 0xc8, 0x0b, 0x10, 0x02, - 0x3d, 0xb0, 0x7d, 0x62, 0x68, 0xfb, 0xda, 0x41, 0x0d, 0xf3, 0x35, 0xba, 0x0f, 0x3a, 0xbd, 0x8e, - 0x88, 0x51, 0xd8, 0xd7, 0x0e, 0xb6, 0x8f, 0x5e, 0x5b, 0xbb, 0xcf, 0xf8, 0x3a, 0x22, 0x98, 0xcb, - 0x19, 0x2a, 0x0d, 0x5c, 0x6a, 0x14, 0x05, 0x8a, 0xad, 0x99, 0xef, 0x8a, 0x78, 0x91, 0xa1, 0x0b, - 0x1f, 0x5b, 0xa3, 0xf7, 0xa0, 0x22, 0x19, 0x46, 0x89, 0x67, 0x72, 0x7b, 0xed, 0x0e, 0x58, 0xa9, - 0xcd, 0x1f, 0x35, 0x28, 0x0b, 0x1f, 0xba, 0x07, 0x65, 0xcf, 0x9e, 0x11, 0x4f, 0x15, 0xe3, 0xd5, - 0x15, 0x88, 0x87, 0x4c, 0x84, 0xa5, 0x16, 0xf5, 0xa1, 0x29, 0x6b, 0x19, 0x85, 0x6e, 0x40, 0x13, - 0xa3, 0xc0, 0x83, 0xcd, 0xb5, 0xfb, 0x3f, 0x62, 0x52, 0xdc, 0xf0, 0x33, 0x23, 0x31, 0xdf, 0x86, - 0x12, 0x27, 0x3f, 0xb5, 0x7c, 0xb7, 0xa0, 0xf4, 0xc4, 0xf6, 0x52, 0x51, 0xbf, 0x1a, 0x16, 0x86, - 0xf9, 0xa7, 0x0e, 0xf5, 0x1c, 0x10, 0x7d, 0x02, 0xcd, 0x34, 0x78, 0x1c, 0x84, 0xdf, 0x06, 0x53, - 0xa1, 0x66, 0x88, 0xd5, 0x6f, 0x75, 0x22, 0xb4, 0x9f, 0x33, 0xe9, 0x60, 0x0b, 0x37, 0xd2, 0x9c, - 0x8d, 0x3e, 0x86, 0xba, 0x63, 0xa7, 0x0e, 0x99, 0x66, 0xfb, 0xd6, 0x57, 0xbe, 0xb7, 0x3e, 0x53, - 0x2a, 0x0e, 0x38, 0x0b, 0x8b, 0x9d, 0x68, 0x1e, 0xa6, 0x01, 0x25, 0xb1, 0xe4, 0x14, 0xd7, 0x9e, - 0xe8, 0x81, 0xd0, 0x2e, 0x4e, 0x34, 0xcf, 0xd9, 0xe8, 0x11, 0xec, 0x5c, 0xb9, 0x09, 0x0d, 0x9d, - 0xd8, 0xf6, 0x25, 0x4d, 0xe7, 0xb4, 0x37, 0x56, 0xd0, 0x06, 0x4a, 0xad, 0x78, 0xdb, 0x57, 0x4b, - 0x1e, 0x34, 0x84, 0x9d, 0x84, 0xda, 0x94, 0x4c, 0x13, 0x42, 0x25, 0xb1, 0xc4, 0x89, 0xaf, 0xaf, - 0x20, 0x8e, 0x98, 0x7a, 0x44, 0xa8, 0x02, 0x36, 0x93, 0xbc, 0x03, 0x75, 0x00, 0xdc, 0xe0, 0x32, - 0x94, 0xa8, 0x32, 0x47, 0xed, 0xaf, 0x40, 0x9d, 0x04, 0x97, 0xa1, 0xc2, 0xd4, 0x5c, 0x65, 0xb0, - 0x82, 0x25, 0xa9, 0xef, 0xdb, 0xf1, 0xb5, 0xa4, 0x54, 0xd6, 0x16, 0x6c, 0x24, 0xb4, 0x8b, 0x82, - 0x25, 0x39, 0x1b, 0xbd, 0x0f, 0xb5, 0xc5, 0x70, 0x30, 0xaa, 0x9c, 0xd3, 0xb6, 0xc4, 0xf8, 0xb0, - 0xd4, 0xf8, 0xb0, 0xc6, 0x4a, 0x81, 0x33, 0x71, 0xb7, 0x22, 0xdb, 0xcd, 0xfc, 0x0a, 0x1a, 0xf9, - 0x2e, 0x41, 0x77, 0xa0, 0x71, 0x11, 0xa6, 0x33, 0x8f, 0xe4, 0x1a, 0x4c, 0x1b, 0x6c, 0xe1, 0xba, - 0xf0, 0x0a, 0xd1, 0x6d, 0xa8, 0xb9, 0x01, 0xcd, 0x35, 0x4e, 0x71, 0xb0, 0x85, 0xab, 0x6e, 0x20, - 0xaa, 0x94, 0xc1, 0xcf, 0x01, 0xb2, 0xc6, 0x79, 0xbe, 0xe8, 0xdf, 0x35, 0x68, 0xe4, 0x9b, 0xe9, - 0x7f, 0xd2, 0xf5, 0x3c, 0x1d, 0xdd, 0x83, 0xca, 0x3c, 0x26, 0x36, 0x25, 0x17, 0xb2, 0x8d, 0xd7, - 0x55, 0x53, 0x49, 0xd1, 0x07, 0x50, 0x25, 0xdf, 0x11, 0x3f, 0xf2, 0xec, 0x58, 0xf6, 0xeb, 0xde, - 0x8a, 0x97, 0xd9, 0x93, 0x32, 0xbc, 0x08, 0x60, 0x09, 0xd1, 0x90, 0xda, 0x9e, 0xf9, 0x77, 0x01, - 0xb6, 0x97, 0xfb, 0xf9, 0x79, 0x14, 0x8c, 0xcd, 0x15, 0xfe, 0x8d, 0xf1, 0x84, 0x74, 0x2c, 0x8c, - 0x7c, 0xa2, 0xfa, 0xe6, 0x89, 0x1e, 0x43, 0x65, 0x96, 0xce, 0x1f, 0x13, 0xaa, 0x66, 0xf0, 0x9b, - 0x1b, 0x7d, 0x97, 0x56, 0x97, 0x07, 0x61, 0x15, 0xdc, 0xfe, 0x01, 0xca, 0xc2, 0x95, 0x9d, 0x4e, - 0xcb, 0x9f, 0x6e, 0x0f, 0xea, 0x69, 0x14, 0x91, 0x78, 0x3a, 0x0b, 0xd3, 0xe0, 0x82, 0x27, 0xa5, - 0x61, 0xe0, 0xae, 0x2e, 0xf3, 0x2c, 0x55, 0xbc, 0xf8, 0xac, 0x15, 0x2f, 0x41, 0x31, 0x49, 0x7d, - 0xf3, 0x67, 0x0d, 0xaa, 0xea, 0x69, 0x36, 0x7d, 0x79, 0x89, 0xe5, 0xf4, 0x5d, 0xfe, 0xbc, 0x0a, - 0xcf, 0xf0, 0x79, 0xa1, 0x23, 0x28, 0xf1, 0xdb, 0xc3, 0x28, 0x6e, 0x70, 0xd1, 0x08, 0xa9, 0xf9, - 0x93, 0x06, 0xcd, 0xa5, 0xf1, 0x83, 0xba, 0x50, 0xe6, 0xe3, 0x47, 0xdd, 0x57, 0x77, 0x37, 0x19, - 0x5a, 0xc2, 0xc2, 0x32, 0xb2, 0x7d, 0x1f, 0x4a, 0xdc, 0x81, 0x0c, 0xa8, 0x90, 0xc0, 0x9e, 0x79, - 0xe4, 0x82, 0x27, 0x59, 0xc5, 0xca, 0x5c, 0x5c, 0x47, 0x85, 0xec, 0x3a, 0x32, 0x3f, 0x82, 0xda, - 0x62, 0x7e, 0xa1, 0xb7, 0x40, 0x67, 0xf3, 0x6b, 0xa3, 0x5b, 0x93, 0x2b, 0xcd, 0x5f, 0x0a, 0xd0, - 0xc8, 0x4f, 0xae, 0x17, 0xae, 0x95, 0x07, 0x50, 0xfd, 0x26, 0xb5, 0x03, 0xea, 0x7a, 0xe4, 0x3f, - 0x7a, 0x39, 0x9f, 0x86, 0xf5, 0x99, 0x8c, 0xc1, 0x8b, 0xe8, 0xf6, 0x87, 0x50, 0x55, 0x5e, 0xd4, - 0xce, 0x51, 0x45, 0x27, 0x2d, 0xec, 0xe5, 0x0b, 0x5e, 0xb5, 0x98, 0x6c, 0xc6, 0xbb, 0xdf, 0x03, - 0x64, 0x7f, 0x46, 0xa8, 0x0e, 0x95, 0xc9, 0xf0, 0xd3, 0xe1, 0xd9, 0x17, 0xc3, 0xd6, 0x16, 0xaa, - 0x41, 0xa9, 0xdf, 0x99, 0xf4, 0x7b, 0x2d, 0x8d, 0xf9, 0x1f, 0x9c, 0x4d, 0x86, 0xe3, 0x1e, 0x6e, - 0x15, 0x50, 0x13, 0x6a, 0xa3, 0x71, 0x67, 0xdc, 0x9b, 0x8e, 0x7a, 0xe3, 0x56, 0x11, 0x55, 0x41, - 0x3f, 0x19, 0x1e, 0x9f, 0xb5, 0x74, 0xf6, 0x60, 0x70, 0x32, 0x1a, 0x9f, 0xf5, 0x71, 0xe7, 0xb4, - 0x55, 0x42, 0x2f, 0xc3, 0x0e, 0x8f, 0x9f, 0x66, 0xce, 0x32, 0x23, 0x8d, 0x26, 0xa7, 0xa7, 0x1d, - 0x7c, 0xde, 0xaa, 0x74, 0xdf, 0xfd, 0xed, 0x66, 0x57, 0xfb, 0xe3, 0x66, 0x57, 0xfb, 0xeb, 0x66, - 0x57, 0x83, 0x3d, 0x37, 0x94, 0x95, 0xa0, 0xb1, 0x3d, 0x77, 0x03, 0xe7, 0x5f, 0x05, 0xf9, 0x52, - 0xfd, 0x59, 0xcd, 0xca, 0xbc, 0xc0, 0xef, 0xfc, 0x13, 0x00, 0x00, 0xff, 0xff, 0x0a, 0x75, 0xe1, - 0xe3, 0xdb, 0x0a, 0x00, 0x00, -} - -func (m *MetricSet) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MetricSet) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MetricSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.MetricFamilies) > 0 { - for iNdEx := len(m.MetricFamilies) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.MetricFamilies[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOpenmetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *MetricFamily) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MetricFamily) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MetricFamily) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Metrics) > 0 { - for iNdEx := len(m.Metrics) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Metrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOpenmetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - } - if len(m.Help) > 0 { - i -= len(m.Help) - copy(dAtA[i:], m.Help) - i = encodeVarintOpenmetrics(dAtA, i, uint64(len(m.Help))) - i-- - dAtA[i] = 0x22 - } - if len(m.Unit) > 0 { - i -= len(m.Unit) - copy(dAtA[i:], m.Unit) - i = encodeVarintOpenmetrics(dAtA, i, uint64(len(m.Unit))) - i-- - dAtA[i] = 0x1a - } - if m.Type != 0 { - i = encodeVarintOpenmetrics(dAtA, i, uint64(m.Type)) - i-- - dAtA[i] = 0x10 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintOpenmetrics(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Metric) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Metric) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Metric) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.MetricPoints) > 0 { - for iNdEx := len(m.MetricPoints) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.MetricPoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOpenmetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.Labels) > 0 { - for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOpenmetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *Label) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Label) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Label) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Value) > 0 { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintOpenmetrics(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintOpenmetrics(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *MetricPoint) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MetricPoint) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MetricPoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Timestamp != nil { - { - size, err := m.Timestamp.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOpenmetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - if m.Value != nil { - { - size := m.Value.Size() - i -= size - if _, err := m.Value.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } - } - return len(dAtA) - i, nil -} - -func (m *MetricPoint_UnknownValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MetricPoint_UnknownValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.UnknownValue != nil { - { - size, err := m.UnknownValue.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOpenmetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} -func (m *MetricPoint_GaugeValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MetricPoint_GaugeValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.GaugeValue != nil { - { - size, err := m.GaugeValue.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOpenmetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - return len(dAtA) - i, nil -} -func (m *MetricPoint_CounterValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MetricPoint_CounterValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.CounterValue != nil { - { - size, err := m.CounterValue.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOpenmetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - return len(dAtA) - i, nil -} -func (m *MetricPoint_HistogramValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MetricPoint_HistogramValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.HistogramValue != nil { - { - size, err := m.HistogramValue.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOpenmetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - return len(dAtA) - i, nil -} -func (m *MetricPoint_StateSetValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MetricPoint_StateSetValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.StateSetValue != nil { - { - size, err := m.StateSetValue.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOpenmetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - return len(dAtA) - i, nil -} -func (m *MetricPoint_InfoValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MetricPoint_InfoValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.InfoValue != nil { - { - size, err := m.InfoValue.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOpenmetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - return len(dAtA) - i, nil -} -func (m *MetricPoint_SummaryValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MetricPoint_SummaryValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.SummaryValue != nil { - { - size, err := m.SummaryValue.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOpenmetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - return len(dAtA) - i, nil -} -func (m *UnknownValue) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UnknownValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UnknownValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Value != nil { - { - size := m.Value.Size() - i -= size - if _, err := m.Value.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } - } - return len(dAtA) - i, nil -} - -func (m *UnknownValue_DoubleValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UnknownValue_DoubleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.DoubleValue)))) - i-- - dAtA[i] = 0x9 - return len(dAtA) - i, nil -} -func (m *UnknownValue_IntValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UnknownValue_IntValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i = encodeVarintOpenmetrics(dAtA, i, uint64(m.IntValue)) - i-- - dAtA[i] = 0x10 - return len(dAtA) - i, nil -} -func (m *GaugeValue) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GaugeValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GaugeValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Value != nil { - { - size := m.Value.Size() - i -= size - if _, err := m.Value.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } - } - return len(dAtA) - i, nil -} - -func (m *GaugeValue_DoubleValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GaugeValue_DoubleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.DoubleValue)))) - i-- - dAtA[i] = 0x9 - return len(dAtA) - i, nil -} -func (m *GaugeValue_IntValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GaugeValue_IntValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i = encodeVarintOpenmetrics(dAtA, i, uint64(m.IntValue)) - i-- - dAtA[i] = 0x10 - return len(dAtA) - i, nil -} -func (m *CounterValue) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CounterValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CounterValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Exemplar != nil { - { - size, err := m.Exemplar.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOpenmetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.Created != nil { - { - size, err := m.Created.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOpenmetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.Total != nil { - { - size := m.Total.Size() - i -= size - if _, err := m.Total.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } - } - return len(dAtA) - i, nil -} - -func (m *CounterValue_DoubleValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CounterValue_DoubleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.DoubleValue)))) - i-- - dAtA[i] = 0x9 - return len(dAtA) - i, nil -} -func (m *CounterValue_IntValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CounterValue_IntValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i = encodeVarintOpenmetrics(dAtA, i, uint64(m.IntValue)) - i-- - dAtA[i] = 0x10 - return len(dAtA) - i, nil -} -func (m *HistogramValue) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HistogramValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HistogramValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Buckets) > 0 { - for iNdEx := len(m.Buckets) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Buckets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOpenmetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - } - if m.Created != nil { - { - size, err := m.Created.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOpenmetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.Count != 0 { - i = encodeVarintOpenmetrics(dAtA, i, uint64(m.Count)) - i-- - dAtA[i] = 0x18 - } - if m.Sum != nil { - { - size := m.Sum.Size() - i -= size - if _, err := m.Sum.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } - } - return len(dAtA) - i, nil -} - -func (m *HistogramValue_DoubleValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HistogramValue_DoubleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.DoubleValue)))) - i-- - dAtA[i] = 0x9 - return len(dAtA) - i, nil -} -func (m *HistogramValue_IntValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HistogramValue_IntValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i = encodeVarintOpenmetrics(dAtA, i, uint64(m.IntValue)) - i-- - dAtA[i] = 0x10 - return len(dAtA) - i, nil -} -func (m *HistogramValue_Bucket) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HistogramValue_Bucket) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HistogramValue_Bucket) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Exemplar != nil { - { - size, err := m.Exemplar.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOpenmetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.UpperBound != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.UpperBound)))) - i-- - dAtA[i] = 0x11 - } - if m.Count != 0 { - i = encodeVarintOpenmetrics(dAtA, i, uint64(m.Count)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *Exemplar) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Exemplar) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Exemplar) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Label) > 0 { - for iNdEx := len(m.Label) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Label[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOpenmetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if m.Timestamp != nil { - { - size, err := m.Timestamp.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOpenmetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Value != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) - i-- - dAtA[i] = 0x9 - } - return len(dAtA) - i, nil -} - -func (m *StateSetValue) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StateSetValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StateSetValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.States) > 0 { - for iNdEx := len(m.States) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.States[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOpenmetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *StateSetValue_State) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StateSetValue_State) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StateSetValue_State) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintOpenmetrics(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x12 - } - if m.Enabled { - i-- - if m.Enabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *InfoValue) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *InfoValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *InfoValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Info) > 0 { - for iNdEx := len(m.Info) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Info[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOpenmetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *SummaryValue) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SummaryValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SummaryValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Quantile) > 0 { - for iNdEx := len(m.Quantile) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Quantile[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOpenmetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - } - if m.Created != nil { - { - size, err := m.Created.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOpenmetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.Count != 0 { - i = encodeVarintOpenmetrics(dAtA, i, uint64(m.Count)) - i-- - dAtA[i] = 0x18 - } - if m.Sum != nil { - { - size := m.Sum.Size() - i -= size - if _, err := m.Sum.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } - } - return len(dAtA) - i, nil -} - -func (m *SummaryValue_DoubleValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SummaryValue_DoubleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.DoubleValue)))) - i-- - dAtA[i] = 0x9 - return len(dAtA) - i, nil -} -func (m *SummaryValue_IntValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SummaryValue_IntValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i = encodeVarintOpenmetrics(dAtA, i, uint64(m.IntValue)) - i-- - dAtA[i] = 0x10 - return len(dAtA) - i, nil -} -func (m *SummaryValue_Quantile) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SummaryValue_Quantile) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SummaryValue_Quantile) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Value != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) - i-- - dAtA[i] = 0x11 - } - if m.Quantile != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Quantile)))) - i-- - dAtA[i] = 0x9 - } - return len(dAtA) - i, nil -} - -func encodeVarintOpenmetrics(dAtA []byte, offset int, v uint64) int { - offset -= sovOpenmetrics(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *MetricSet) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.MetricFamilies) > 0 { - for _, e := range m.MetricFamilies { - l = e.Size() - n += 1 + l + sovOpenmetrics(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *MetricFamily) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovOpenmetrics(uint64(l)) - } - if m.Type != 0 { - n += 1 + sovOpenmetrics(uint64(m.Type)) - } - l = len(m.Unit) - if l > 0 { - n += 1 + l + sovOpenmetrics(uint64(l)) - } - l = len(m.Help) - if l > 0 { - n += 1 + l + sovOpenmetrics(uint64(l)) - } - if len(m.Metrics) > 0 { - for _, e := range m.Metrics { - l = e.Size() - n += 1 + l + sovOpenmetrics(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Metric) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Labels) > 0 { - for _, e := range m.Labels { - l = e.Size() - n += 1 + l + sovOpenmetrics(uint64(l)) - } - } - if len(m.MetricPoints) > 0 { - for _, e := range m.MetricPoints { - l = e.Size() - n += 1 + l + sovOpenmetrics(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Label) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovOpenmetrics(uint64(l)) - } - l = len(m.Value) - if l > 0 { - n += 1 + l + sovOpenmetrics(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *MetricPoint) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Value != nil { - n += m.Value.Size() - } - if m.Timestamp != nil { - l = m.Timestamp.Size() - n += 1 + l + sovOpenmetrics(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *MetricPoint_UnknownValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.UnknownValue != nil { - l = m.UnknownValue.Size() - n += 1 + l + sovOpenmetrics(uint64(l)) - } - return n -} -func (m *MetricPoint_GaugeValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.GaugeValue != nil { - l = m.GaugeValue.Size() - n += 1 + l + sovOpenmetrics(uint64(l)) - } - return n -} -func (m *MetricPoint_CounterValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.CounterValue != nil { - l = m.CounterValue.Size() - n += 1 + l + sovOpenmetrics(uint64(l)) - } - return n -} -func (m *MetricPoint_HistogramValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.HistogramValue != nil { - l = m.HistogramValue.Size() - n += 1 + l + sovOpenmetrics(uint64(l)) - } - return n -} -func (m *MetricPoint_StateSetValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.StateSetValue != nil { - l = m.StateSetValue.Size() - n += 1 + l + sovOpenmetrics(uint64(l)) - } - return n -} -func (m *MetricPoint_InfoValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.InfoValue != nil { - l = m.InfoValue.Size() - n += 1 + l + sovOpenmetrics(uint64(l)) - } - return n -} -func (m *MetricPoint_SummaryValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.SummaryValue != nil { - l = m.SummaryValue.Size() - n += 1 + l + sovOpenmetrics(uint64(l)) - } - return n -} -func (m *UnknownValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Value != nil { - n += m.Value.Size() - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *UnknownValue_DoubleValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 9 - return n -} -func (m *UnknownValue_IntValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovOpenmetrics(uint64(m.IntValue)) - return n -} -func (m *GaugeValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Value != nil { - n += m.Value.Size() - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *GaugeValue_DoubleValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 9 - return n -} -func (m *GaugeValue_IntValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovOpenmetrics(uint64(m.IntValue)) - return n -} -func (m *CounterValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Total != nil { - n += m.Total.Size() - } - if m.Created != nil { - l = m.Created.Size() - n += 1 + l + sovOpenmetrics(uint64(l)) - } - if m.Exemplar != nil { - l = m.Exemplar.Size() - n += 1 + l + sovOpenmetrics(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CounterValue_DoubleValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 9 - return n -} -func (m *CounterValue_IntValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovOpenmetrics(uint64(m.IntValue)) - return n -} -func (m *HistogramValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Sum != nil { - n += m.Sum.Size() - } - if m.Count != 0 { - n += 1 + sovOpenmetrics(uint64(m.Count)) - } - if m.Created != nil { - l = m.Created.Size() - n += 1 + l + sovOpenmetrics(uint64(l)) - } - if len(m.Buckets) > 0 { - for _, e := range m.Buckets { - l = e.Size() - n += 1 + l + sovOpenmetrics(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *HistogramValue_DoubleValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 9 - return n -} -func (m *HistogramValue_IntValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovOpenmetrics(uint64(m.IntValue)) - return n -} -func (m *HistogramValue_Bucket) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Count != 0 { - n += 1 + sovOpenmetrics(uint64(m.Count)) - } - if m.UpperBound != 0 { - n += 9 - } - if m.Exemplar != nil { - l = m.Exemplar.Size() - n += 1 + l + sovOpenmetrics(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Exemplar) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Value != 0 { - n += 9 - } - if m.Timestamp != nil { - l = m.Timestamp.Size() - n += 1 + l + sovOpenmetrics(uint64(l)) - } - if len(m.Label) > 0 { - for _, e := range m.Label { - l = e.Size() - n += 1 + l + sovOpenmetrics(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *StateSetValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.States) > 0 { - for _, e := range m.States { - l = e.Size() - n += 1 + l + sovOpenmetrics(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *StateSetValue_State) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Enabled { - n += 2 - } - l = len(m.Name) - if l > 0 { - n += 1 + l + sovOpenmetrics(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *InfoValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Info) > 0 { - for _, e := range m.Info { - l = e.Size() - n += 1 + l + sovOpenmetrics(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *SummaryValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Sum != nil { - n += m.Sum.Size() - } - if m.Count != 0 { - n += 1 + sovOpenmetrics(uint64(m.Count)) - } - if m.Created != nil { - l = m.Created.Size() - n += 1 + l + sovOpenmetrics(uint64(l)) - } - if len(m.Quantile) > 0 { - for _, e := range m.Quantile { - l = e.Size() - n += 1 + l + sovOpenmetrics(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *SummaryValue_DoubleValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 9 - return n -} -func (m *SummaryValue_IntValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovOpenmetrics(uint64(m.IntValue)) - return n -} -func (m *SummaryValue_Quantile) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Quantile != 0 { - n += 9 - } - if m.Value != 0 { - n += 9 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovOpenmetrics(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozOpenmetrics(x uint64) (n int) { - return sovOpenmetrics(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *MetricSet) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MetricSet: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MetricSet: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MetricFamilies", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOpenmetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOpenmetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MetricFamilies = append(m.MetricFamilies, &MetricFamily{}) - if err := m.MetricFamilies[len(m.MetricFamilies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOpenmetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOpenmetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MetricFamily) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MetricFamily: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MetricFamily: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOpenmetrics - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOpenmetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Type |= MetricType(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Unit", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOpenmetrics - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOpenmetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Unit = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Help", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOpenmetrics - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOpenmetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Help = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOpenmetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOpenmetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Metrics = append(m.Metrics, &Metric{}) - if err := m.Metrics[len(m.Metrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOpenmetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOpenmetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Metric) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Metric: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Metric: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOpenmetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOpenmetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Labels = append(m.Labels, &Label{}) - if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MetricPoints", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOpenmetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOpenmetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MetricPoints = append(m.MetricPoints, &MetricPoint{}) - if err := m.MetricPoints[len(m.MetricPoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOpenmetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOpenmetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Label) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Label: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Label: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOpenmetrics - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOpenmetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOpenmetrics - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOpenmetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOpenmetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOpenmetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MetricPoint) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MetricPoint: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MetricPoint: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UnknownValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOpenmetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOpenmetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &UnknownValue{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Value = &MetricPoint_UnknownValue{v} - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GaugeValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOpenmetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOpenmetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &GaugeValue{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Value = &MetricPoint_GaugeValue{v} - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CounterValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOpenmetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOpenmetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &CounterValue{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Value = &MetricPoint_CounterValue{v} - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HistogramValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOpenmetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOpenmetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &HistogramValue{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Value = &MetricPoint_HistogramValue{v} - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StateSetValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOpenmetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOpenmetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &StateSetValue{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Value = &MetricPoint_StateSetValue{v} - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InfoValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOpenmetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOpenmetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &InfoValue{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Value = &MetricPoint_InfoValue{v} - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SummaryValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOpenmetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOpenmetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &SummaryValue{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Value = &MetricPoint_SummaryValue{v} - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOpenmetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOpenmetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Timestamp == nil { - m.Timestamp = &types.Timestamp{} - } - if err := m.Timestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOpenmetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOpenmetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UnknownValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UnknownValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UnknownValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field DoubleValue", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Value = &UnknownValue_DoubleValue{float64(math.Float64frombits(v))} - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IntValue", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Value = &UnknownValue_IntValue{v} - default: - iNdEx = preIndex - skippy, err := skipOpenmetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOpenmetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GaugeValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GaugeValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GaugeValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field DoubleValue", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Value = &GaugeValue_DoubleValue{float64(math.Float64frombits(v))} - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IntValue", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Value = &GaugeValue_IntValue{v} - default: - iNdEx = preIndex - skippy, err := skipOpenmetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOpenmetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CounterValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CounterValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CounterValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field DoubleValue", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Total = &CounterValue_DoubleValue{float64(math.Float64frombits(v))} - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IntValue", wireType) - } - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Total = &CounterValue_IntValue{v} - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Created", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOpenmetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOpenmetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Created == nil { - m.Created = &types.Timestamp{} - } - if err := m.Created.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Exemplar", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOpenmetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOpenmetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Exemplar == nil { - m.Exemplar = &Exemplar{} - } - if err := m.Exemplar.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOpenmetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOpenmetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HistogramValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HistogramValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HistogramValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field DoubleValue", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Sum = &HistogramValue_DoubleValue{float64(math.Float64frombits(v))} - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IntValue", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Sum = &HistogramValue_IntValue{v} - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) - } - m.Count = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Count |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Created", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOpenmetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOpenmetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Created == nil { - m.Created = &types.Timestamp{} - } - if err := m.Created.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Buckets", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOpenmetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOpenmetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Buckets = append(m.Buckets, &HistogramValue_Bucket{}) - if err := m.Buckets[len(m.Buckets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOpenmetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOpenmetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HistogramValue_Bucket) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Bucket: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Bucket: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) - } - m.Count = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Count |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field UpperBound", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.UpperBound = float64(math.Float64frombits(v)) - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Exemplar", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOpenmetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOpenmetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Exemplar == nil { - m.Exemplar = &Exemplar{} - } - if err := m.Exemplar.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOpenmetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOpenmetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Exemplar) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Exemplar: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Exemplar: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Value = float64(math.Float64frombits(v)) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOpenmetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOpenmetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Timestamp == nil { - m.Timestamp = &types.Timestamp{} - } - if err := m.Timestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Label", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOpenmetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOpenmetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Label = append(m.Label, &Label{}) - if err := m.Label[len(m.Label)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOpenmetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOpenmetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StateSetValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StateSetValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StateSetValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field States", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOpenmetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOpenmetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.States = append(m.States, &StateSetValue_State{}) - if err := m.States[len(m.States)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOpenmetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOpenmetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StateSetValue_State) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: State: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: State: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Enabled = bool(v != 0) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOpenmetrics - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOpenmetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOpenmetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOpenmetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *InfoValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: InfoValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: InfoValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOpenmetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOpenmetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Info = append(m.Info, &Label{}) - if err := m.Info[len(m.Info)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOpenmetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOpenmetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SummaryValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SummaryValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SummaryValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field DoubleValue", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Sum = &SummaryValue_DoubleValue{float64(math.Float64frombits(v))} - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IntValue", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Sum = &SummaryValue_IntValue{v} - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) - } - m.Count = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Count |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Created", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOpenmetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOpenmetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Created == nil { - m.Created = &types.Timestamp{} - } - if err := m.Created.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Quantile", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOpenmetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOpenmetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Quantile = append(m.Quantile, &SummaryValue_Quantile{}) - if err := m.Quantile[len(m.Quantile)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOpenmetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOpenmetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SummaryValue_Quantile) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Quantile: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Quantile: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Quantile", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Quantile = float64(math.Float64frombits(v)) - case 2: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Value = float64(math.Float64frombits(v)) - default: - iNdEx = preIndex - skippy, err := skipOpenmetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOpenmetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipOpenmetrics(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowOpenmetrics - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthOpenmetrics - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupOpenmetrics - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthOpenmetrics - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthOpenmetrics = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowOpenmetrics = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupOpenmetrics = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/jaegertracing/jaeger/proto-gen/api_v2/metrics/otelspankind.pb.go b/vendor/github.com/jaegertracing/jaeger/proto-gen/api_v2/metrics/otelspankind.pb.go deleted file mode 100644 index 9c61619e340..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/proto-gen/api_v2/metrics/otelspankind.pb.go +++ /dev/null @@ -1,99 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: otelspankind.proto - -package metrics - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// SpanKind is the type of span. Can be used to specify additional relationships between spans -// in addition to a parent/child relationship. -type SpanKind int32 - -const ( - // Unspecified. Do NOT use as default. - // Implementations MAY assume SpanKind to be INTERNAL when receiving UNSPECIFIED. - SpanKind_SPAN_KIND_UNSPECIFIED SpanKind = 0 - // Indicates that the span represents an internal operation within an application, - // as opposed to an operation happening at the boundaries. Default value. - SpanKind_SPAN_KIND_INTERNAL SpanKind = 1 - // Indicates that the span covers server-side handling of an RPC or other - // remote network request. - SpanKind_SPAN_KIND_SERVER SpanKind = 2 - // Indicates that the span describes a request to some remote service. - SpanKind_SPAN_KIND_CLIENT SpanKind = 3 - // Indicates that the span describes a producer sending a message to a broker. - // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship - // between producer and consumer spans. A PRODUCER span ends when the message was accepted - // by the broker while the logical processing of the message might span a much longer time. - SpanKind_SPAN_KIND_PRODUCER SpanKind = 4 - // Indicates that the span describes consumer receiving a message from a broker. - // Like the PRODUCER kind, there is often no direct critical path latency relationship - // between producer and consumer spans. - SpanKind_SPAN_KIND_CONSUMER SpanKind = 5 -) - -var SpanKind_name = map[int32]string{ - 0: "SPAN_KIND_UNSPECIFIED", - 1: "SPAN_KIND_INTERNAL", - 2: "SPAN_KIND_SERVER", - 3: "SPAN_KIND_CLIENT", - 4: "SPAN_KIND_PRODUCER", - 5: "SPAN_KIND_CONSUMER", -} - -var SpanKind_value = map[string]int32{ - "SPAN_KIND_UNSPECIFIED": 0, - "SPAN_KIND_INTERNAL": 1, - "SPAN_KIND_SERVER": 2, - "SPAN_KIND_CLIENT": 3, - "SPAN_KIND_PRODUCER": 4, - "SPAN_KIND_CONSUMER": 5, -} - -func (x SpanKind) String() string { - return proto.EnumName(SpanKind_name, int32(x)) -} - -func (SpanKind) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_77f837d0289d1179, []int{0} -} - -func init() { - proto.RegisterEnum("jaeger.api_v2.metrics.SpanKind", SpanKind_name, SpanKind_value) -} - -func init() { proto.RegisterFile("otelspankind.proto", fileDescriptor_77f837d0289d1179) } - -var fileDescriptor_77f837d0289d1179 = []byte{ - // 224 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xca, 0x2f, 0x49, 0xcd, - 0x29, 0x2e, 0x48, 0xcc, 0xcb, 0xce, 0xcc, 0x4b, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, - 0xcd, 0x4a, 0x4c, 0x4d, 0x4f, 0x2d, 0xd2, 0x4b, 0x2c, 0xc8, 0x8c, 0x2f, 0x33, 0xd2, 0xcb, 0x4d, - 0x2d, 0x29, 0xca, 0x4c, 0x2e, 0x96, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0xab, 0xd0, 0x07, 0xb1, - 0x20, 0x8a, 0xb5, 0x66, 0x32, 0x72, 0x71, 0x04, 0x17, 0x24, 0xe6, 0x79, 0x67, 0xe6, 0xa5, 0x08, - 0x49, 0x72, 0x89, 0x06, 0x07, 0x38, 0xfa, 0xc5, 0x7b, 0x7b, 0xfa, 0xb9, 0xc4, 0x87, 0xfa, 0x05, - 0x07, 0xb8, 0x3a, 0x7b, 0xba, 0x79, 0xba, 0xba, 0x08, 0x30, 0x08, 0x89, 0x71, 0x09, 0x21, 0xa4, - 0x3c, 0xfd, 0x42, 0x5c, 0x83, 0xfc, 0x1c, 0x7d, 0x04, 0x18, 0x85, 0x44, 0xb8, 0x04, 0x10, 0xe2, - 0xc1, 0xae, 0x41, 0x61, 0xae, 0x41, 0x02, 0x4c, 0xa8, 0xa2, 0xce, 0x3e, 0x9e, 0xae, 0x7e, 0x21, - 0x02, 0xcc, 0xa8, 0x66, 0x04, 0x04, 0xf9, 0xbb, 0x84, 0x3a, 0xbb, 0x06, 0x09, 0xb0, 0xa0, 0x8a, - 0x3b, 0xfb, 0xfb, 0x05, 0x87, 0xfa, 0xba, 0x06, 0x09, 0xb0, 0x3a, 0x99, 0x9d, 0x78, 0x24, 0xc7, - 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x5c, 0xf2, 0x99, 0xf9, 0x7a, 0x10, 0x9f, - 0x95, 0x14, 0x25, 0x26, 0x67, 0xe6, 0xa5, 0xa3, 0x79, 0x30, 0x8a, 0x1d, 0xca, 0x48, 0x62, 0x03, - 0x7b, 0xcd, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x87, 0x34, 0x50, 0x06, 0x1d, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/jaegertracing/jaeger/proto-gen/api_v2/query.pb.go b/vendor/github.com/jaegertracing/jaeger/proto-gen/api_v2/query.pb.go index c4898c6c876..311b5c0cd46 100644 --- a/vendor/github.com/jaegertracing/jaeger/proto-gen/api_v2/query.pb.go +++ b/vendor/github.com/jaegertracing/jaeger/proto-gen/api_v2/query.pb.go @@ -37,12 +37,16 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package type GetTraceRequest struct { TraceID github_com_jaegertracing_jaeger_model.TraceID `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3,customtype=github.com/jaegertracing/jaeger/model.TraceID" json:"trace_id"` // Optional. The start time to search trace ID. - StartTime *time.Time `protobuf:"bytes,2,opt,name=start_time,json=startTime,proto3,stdtime" json:"start_time,omitempty"` + StartTime time.Time `protobuf:"bytes,2,opt,name=start_time,json=startTime,proto3,stdtime" json:"start_time"` // Optional. The end time to search trace ID. - EndTime *time.Time `protobuf:"bytes,3,opt,name=end_time,json=endTime,proto3,stdtime" json:"end_time,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + EndTime time.Time `protobuf:"bytes,3,opt,name=end_time,json=endTime,proto3,stdtime" json:"end_time"` + // Optional. If set to true, the response will not include any + // enrichments to the trace, such as clock skew adjustment. + // Instead, the trace will be returned exactly as stored. + RawTraces bool `protobuf:"varint,4,opt,name=raw_traces,json=rawTraces,proto3" json:"raw_traces,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *GetTraceRequest) Reset() { *m = GetTraceRequest{} } @@ -78,18 +82,25 @@ func (m *GetTraceRequest) XXX_DiscardUnknown() { var xxx_messageInfo_GetTraceRequest proto.InternalMessageInfo -func (m *GetTraceRequest) GetStartTime() *time.Time { +func (m *GetTraceRequest) GetStartTime() time.Time { if m != nil { return m.StartTime } - return nil + return time.Time{} } -func (m *GetTraceRequest) GetEndTime() *time.Time { +func (m *GetTraceRequest) GetEndTime() time.Time { if m != nil { return m.EndTime } - return nil + return time.Time{} +} + +func (m *GetTraceRequest) GetRawTraces() bool { + if m != nil { + return m.RawTraces + } + return false } type SpansResponseChunk struct { @@ -142,12 +153,12 @@ func (m *SpansResponseChunk) GetSpans() []model.Span { type ArchiveTraceRequest struct { TraceID github_com_jaegertracing_jaeger_model.TraceID `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3,customtype=github.com/jaegertracing/jaeger/model.TraceID" json:"trace_id"` // Optional. The start time to search trace ID. - StartTime *time.Time `protobuf:"bytes,2,opt,name=start_time,json=startTime,proto3,stdtime" json:"start_time,omitempty"` + StartTime time.Time `protobuf:"bytes,2,opt,name=start_time,json=startTime,proto3,stdtime" json:"start_time"` // Optional. The end time to search trace ID. - EndTime *time.Time `protobuf:"bytes,3,opt,name=end_time,json=endTime,proto3,stdtime" json:"end_time,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + EndTime time.Time `protobuf:"bytes,3,opt,name=end_time,json=endTime,proto3,stdtime" json:"end_time"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *ArchiveTraceRequest) Reset() { *m = ArchiveTraceRequest{} } @@ -183,18 +194,18 @@ func (m *ArchiveTraceRequest) XXX_DiscardUnknown() { var xxx_messageInfo_ArchiveTraceRequest proto.InternalMessageInfo -func (m *ArchiveTraceRequest) GetStartTime() *time.Time { +func (m *ArchiveTraceRequest) GetStartTime() time.Time { if m != nil { return m.StartTime } - return nil + return time.Time{} } -func (m *ArchiveTraceRequest) GetEndTime() *time.Time { +func (m *ArchiveTraceRequest) GetEndTime() time.Time { if m != nil { return m.EndTime } - return nil + return time.Time{} } type ArchiveTraceResponse struct { @@ -249,17 +260,21 @@ var xxx_messageInfo_ArchiveTraceResponse proto.InternalMessageInfo // Note: some storage implementations do not guarantee the correct implementation of all parameters. // type TraceQueryParameters struct { - ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` - OperationName string `protobuf:"bytes,2,opt,name=operation_name,json=operationName,proto3" json:"operation_name,omitempty"` - Tags map[string]string `protobuf:"bytes,3,rep,name=tags,proto3" json:"tags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - StartTimeMin time.Time `protobuf:"bytes,4,opt,name=start_time_min,json=startTimeMin,proto3,stdtime" json:"start_time_min"` - StartTimeMax time.Time `protobuf:"bytes,5,opt,name=start_time_max,json=startTimeMax,proto3,stdtime" json:"start_time_max"` - DurationMin time.Duration `protobuf:"bytes,6,opt,name=duration_min,json=durationMin,proto3,stdduration" json:"duration_min"` - DurationMax time.Duration `protobuf:"bytes,7,opt,name=duration_max,json=durationMax,proto3,stdduration" json:"duration_max"` - SearchDepth int32 `protobuf:"varint,8,opt,name=search_depth,json=searchDepth,proto3" json:"search_depth,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` + OperationName string `protobuf:"bytes,2,opt,name=operation_name,json=operationName,proto3" json:"operation_name,omitempty"` + Tags map[string]string `protobuf:"bytes,3,rep,name=tags,proto3" json:"tags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + StartTimeMin time.Time `protobuf:"bytes,4,opt,name=start_time_min,json=startTimeMin,proto3,stdtime" json:"start_time_min"` + StartTimeMax time.Time `protobuf:"bytes,5,opt,name=start_time_max,json=startTimeMax,proto3,stdtime" json:"start_time_max"` + DurationMin time.Duration `protobuf:"bytes,6,opt,name=duration_min,json=durationMin,proto3,stdduration" json:"duration_min"` + DurationMax time.Duration `protobuf:"bytes,7,opt,name=duration_max,json=durationMax,proto3,stdduration" json:"duration_max"` + SearchDepth int32 `protobuf:"varint,8,opt,name=search_depth,json=searchDepth,proto3" json:"search_depth,omitempty"` + // Optional. If set to true, the response will not include any + // enrichments to the trace, such as clock skew adjustment. + // Instead, the trace will be returned exactly as stored. + RawTraces bool `protobuf:"varint,9,opt,name=raw_traces,json=rawTraces,proto3" json:"raw_traces,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *TraceQueryParameters) Reset() { *m = TraceQueryParameters{} } @@ -351,6 +366,13 @@ func (m *TraceQueryParameters) GetSearchDepth() int32 { return 0 } +func (m *TraceQueryParameters) GetRawTraces() bool { + if m != nil { + return m.RawTraces + } + return false +} + type FindTracesRequest struct { Query *TraceQueryParameters `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -771,69 +793,70 @@ func init() { func init() { proto.RegisterFile("query.proto", fileDescriptor_5c6ac9b241082464) } var fileDescriptor_5c6ac9b241082464 = []byte{ - // 983 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x56, 0x4f, 0x73, 0xdb, 0x44, - 0x14, 0x47, 0x8e, 0x1d, 0xdb, 0x4f, 0x76, 0x4b, 0xd7, 0x4e, 0x2b, 0x54, 0xb0, 0x1d, 0x85, 0x76, - 0x32, 0xcc, 0x44, 0x2a, 0xe6, 0x40, 0x29, 0xcc, 0x94, 0xa6, 0x69, 0x3d, 0x05, 0x5a, 0x40, 0xcd, - 0x09, 0x0e, 0x9e, 0x8d, 0xb5, 0xc8, 0xc2, 0xf1, 0xca, 0x95, 0xd6, 0x21, 0x1e, 0x86, 0x0b, 0x9f, - 0x80, 0x19, 0x2e, 0x9c, 0xb8, 0x72, 0xe2, 0x7b, 0xf4, 0xc8, 0x0c, 0x37, 0x0e, 0x81, 0xc9, 0x70, - 0xe4, 0xc0, 0x47, 0x60, 0xf6, 0x8f, 0x14, 0x49, 0xce, 0xa4, 0x69, 0xae, 0x9c, 0xac, 0x7d, 0xfb, - 0xde, 0xef, 0xfd, 0xfd, 0xbd, 0x35, 0xe8, 0xcf, 0xe6, 0x24, 0x5a, 0xd8, 0xb3, 0x28, 0x64, 0x21, - 0x6a, 0x7e, 0x8d, 0x89, 0x4f, 0x22, 0x1b, 0xcf, 0x82, 0xe1, 0x41, 0xdf, 0xd4, 0xa7, 0xa1, 0x47, - 0xf6, 0xe5, 0x9d, 0xd9, 0xf6, 0x43, 0x3f, 0x14, 0x9f, 0x0e, 0xff, 0x52, 0xd2, 0xd7, 0xfd, 0x30, - 0xf4, 0xf7, 0x89, 0x83, 0x67, 0x81, 0x83, 0x29, 0x0d, 0x19, 0x66, 0x41, 0x48, 0x63, 0x75, 0xdb, - 0x55, 0xb7, 0xe2, 0xb4, 0x37, 0xff, 0xca, 0x61, 0xc1, 0x94, 0xc4, 0x0c, 0x4f, 0x67, 0x4a, 0xa1, - 0x53, 0x54, 0xf0, 0xe6, 0x91, 0x40, 0x90, 0xf7, 0xd6, 0x3f, 0x1a, 0x5c, 0x1e, 0x10, 0xb6, 0x1b, - 0xe1, 0x11, 0x71, 0xc9, 0xb3, 0x39, 0x89, 0x19, 0xfa, 0x12, 0x6a, 0x8c, 0x9f, 0x87, 0x81, 0x67, - 0x68, 0x3d, 0x6d, 0xb3, 0xb1, 0xfd, 0xe1, 0xf3, 0xa3, 0xee, 0x2b, 0x7f, 0x1c, 0x75, 0xb7, 0xfc, - 0x80, 0x8d, 0xe7, 0x7b, 0xf6, 0x28, 0x9c, 0x3a, 0x32, 0x13, 0xae, 0x18, 0x50, 0x5f, 0x9d, 0x1c, - 0x99, 0x8f, 0x40, 0x7b, 0xb4, 0x73, 0x7c, 0xd4, 0xad, 0xaa, 0x4f, 0xb7, 0x2a, 0x10, 0x1f, 0x79, - 0xe8, 0x2e, 0x40, 0xcc, 0x70, 0xc4, 0x86, 0x3c, 0x52, 0xa3, 0xd4, 0xd3, 0x36, 0xf5, 0xbe, 0x69, - 0xcb, 0x28, 0xed, 0x24, 0x4a, 0x7b, 0x37, 0x49, 0x63, 0xbb, 0xfc, 0xc3, 0x9f, 0x5d, 0xcd, 0xad, - 0x0b, 0x1b, 0x2e, 0x45, 0xef, 0x43, 0x8d, 0x50, 0x4f, 0x9a, 0xaf, 0x9c, 0xd3, 0xbc, 0x4a, 0xa8, - 0xc7, 0x65, 0xd6, 0x03, 0x40, 0x4f, 0x67, 0x98, 0xc6, 0x2e, 0x89, 0x67, 0x21, 0x8d, 0xc9, 0xfd, - 0xf1, 0x9c, 0x4e, 0x90, 0x03, 0x95, 0x98, 0x4b, 0x0d, 0xad, 0xb7, 0xb2, 0xa9, 0xf7, 0x5b, 0x76, - 0xae, 0x4b, 0x36, 0xb7, 0xd8, 0x2e, 0xf3, 0x12, 0xb8, 0x52, 0xcf, 0xfa, 0x57, 0x83, 0xd6, 0xbd, - 0x68, 0x34, 0x0e, 0x0e, 0xc8, 0xff, 0xa5, 0x72, 0x57, 0xa1, 0x9d, 0xcf, 0x58, 0x16, 0xd0, 0xfa, - 0xa5, 0x0c, 0x6d, 0x21, 0xf9, 0x9c, 0x8f, 0xf9, 0x67, 0x38, 0xc2, 0x53, 0xc2, 0x48, 0x14, 0xa3, - 0x75, 0x68, 0xc4, 0x24, 0x3a, 0x08, 0x46, 0x64, 0x48, 0xf1, 0x94, 0x88, 0x7a, 0xd4, 0x5d, 0x5d, - 0xc9, 0x9e, 0xe0, 0x29, 0x41, 0x37, 0xe0, 0x52, 0x38, 0x23, 0x72, 0x1e, 0xa5, 0x52, 0x49, 0x28, - 0x35, 0x53, 0xa9, 0x50, 0xbb, 0x07, 0x65, 0x86, 0xfd, 0xd8, 0x58, 0x11, 0xdd, 0xd9, 0x2a, 0x74, - 0xe7, 0x34, 0xe7, 0xf6, 0x2e, 0xf6, 0xe3, 0x07, 0x94, 0x45, 0x0b, 0x57, 0x98, 0xa2, 0x8f, 0xe0, - 0xd2, 0x49, 0xed, 0x86, 0xd3, 0x80, 0x1a, 0xe5, 0x17, 0x16, 0xa0, 0xc6, 0x5b, 0x27, 0x8a, 0xd0, - 0x48, 0x6b, 0xf8, 0x38, 0xa0, 0x45, 0x2c, 0x7c, 0x68, 0x54, 0x2e, 0x86, 0x85, 0x0f, 0xd1, 0x43, - 0x68, 0x24, 0x84, 0x14, 0x51, 0xad, 0x0a, 0xa4, 0xd7, 0x96, 0x90, 0x76, 0x94, 0x92, 0x04, 0xfa, - 0x89, 0x03, 0xe9, 0x89, 0x21, 0x8f, 0x29, 0x87, 0x83, 0x0f, 0x8d, 0xea, 0x45, 0x70, 0xf0, 0xa1, - 0x6c, 0x1a, 0x8e, 0x46, 0xe3, 0xa1, 0x47, 0x66, 0x6c, 0x6c, 0xd4, 0x7a, 0xda, 0x66, 0x85, 0x37, - 0x8d, 0xcb, 0x76, 0xb8, 0xc8, 0x7c, 0x17, 0xea, 0x69, 0x75, 0xd1, 0xab, 0xb0, 0x32, 0x21, 0x0b, - 0xd5, 0x5b, 0xfe, 0x89, 0xda, 0x50, 0x39, 0xc0, 0xfb, 0xf3, 0xa4, 0x95, 0xf2, 0x70, 0xa7, 0x74, - 0x5b, 0xb3, 0x9e, 0xc0, 0x95, 0x87, 0x01, 0xf5, 0x44, 0xbf, 0xe2, 0x84, 0x31, 0xef, 0x41, 0x45, - 0xec, 0x47, 0x01, 0xa1, 0xf7, 0x37, 0xce, 0xd1, 0x5c, 0x57, 0x5a, 0x58, 0x6d, 0x40, 0x03, 0xc2, - 0x9e, 0xca, 0x79, 0x4a, 0x00, 0xad, 0xb7, 0xa1, 0x95, 0x93, 0xca, 0x31, 0x45, 0x26, 0xd4, 0xd4, - 0xe4, 0x49, 0x96, 0xd7, 0xdd, 0xf4, 0x6c, 0x3d, 0x86, 0xf6, 0x80, 0xb0, 0x4f, 0x93, 0x99, 0x4b, - 0x63, 0x33, 0xa0, 0xaa, 0x74, 0x54, 0x82, 0xc9, 0x11, 0x5d, 0x87, 0x3a, 0x5f, 0x04, 0xc3, 0x49, - 0x40, 0x3d, 0x95, 0x68, 0x8d, 0x0b, 0x3e, 0x0e, 0xa8, 0x67, 0x7d, 0x00, 0xf5, 0x14, 0x0b, 0x21, - 0x28, 0x67, 0xa6, 0x5f, 0x7c, 0x9f, 0x6d, 0xbd, 0x80, 0xb5, 0x42, 0x30, 0x2a, 0x83, 0x9b, 0x19, - 0xb2, 0x70, 0x5a, 0x24, 0x79, 0x14, 0xa4, 0xe8, 0x36, 0x40, 0x2a, 0x89, 0x8d, 0x92, 0xe0, 0x8c, - 0x51, 0x28, 0x6b, 0x0a, 0xef, 0x66, 0x74, 0xad, 0x9f, 0x35, 0xb8, 0x3a, 0x20, 0x6c, 0x87, 0xcc, - 0x08, 0xf5, 0x08, 0x1d, 0x05, 0x27, 0x6d, 0xba, 0x9f, 0xdb, 0x3d, 0xda, 0x4b, 0xcc, 0x7b, 0x66, - 0xff, 0xdc, 0xcd, 0xec, 0x9f, 0xd2, 0x4b, 0x40, 0xa4, 0x3b, 0x68, 0x0f, 0xae, 0x2d, 0xc5, 0xa7, - 0xaa, 0x33, 0x80, 0x86, 0x97, 0x91, 0xab, 0x4d, 0xfe, 0x46, 0x21, 0xef, 0xd4, 0x74, 0xf1, 0x49, - 0x40, 0x27, 0x6a, 0xa7, 0xe7, 0x0c, 0xfb, 0xbf, 0x56, 0xa0, 0x21, 0x06, 0x4e, 0x8d, 0x10, 0x9a, - 0x40, 0x2d, 0x79, 0x20, 0x51, 0xa7, 0x80, 0x57, 0x78, 0x39, 0xcd, 0xf5, 0x53, 0x5e, 0x8e, 0xfc, - 0x5b, 0x63, 0x99, 0xdf, 0xff, 0xfe, 0xf7, 0x8f, 0xa5, 0x36, 0x42, 0x8e, 0xd8, 0xeb, 0xb1, 0xf3, - 0x6d, 0xf2, 0x62, 0x7c, 0x77, 0x4b, 0x43, 0x0c, 0x1a, 0xd9, 0x2d, 0x8b, 0xac, 0x02, 0xe0, 0x29, - 0x8f, 0x8e, 0xb9, 0x71, 0xa6, 0x8e, 0x5a, 0xd3, 0xd7, 0x85, 0xdb, 0x35, 0xab, 0xe5, 0x60, 0x79, - 0x9d, 0xf1, 0x8b, 0x7c, 0x80, 0x13, 0x66, 0xa2, 0x5e, 0x01, 0x6f, 0x89, 0xb4, 0xe7, 0x49, 0x13, - 0x09, 0x7f, 0x0d, 0xab, 0xea, 0xc8, 0xdd, 0x71, 0x47, 0x7b, 0xeb, 0x96, 0x86, 0x7c, 0xd0, 0x33, - 0xe4, 0x44, 0xeb, 0xcb, 0xe5, 0x2c, 0xd0, 0xd9, 0xb4, 0xce, 0x52, 0x51, 0xb9, 0x5d, 0x11, 0xbe, - 0x74, 0x54, 0x77, 0x12, 0x4a, 0xa3, 0x10, 0x9a, 0x39, 0x16, 0xa1, 0x8d, 0x65, 0x9c, 0x25, 0xc2, - 0x9b, 0x6f, 0x9e, 0xad, 0xa4, 0xdc, 0xb5, 0x84, 0xbb, 0x26, 0xd2, 0x9d, 0x13, 0xee, 0xa0, 0x6f, - 0xc4, 0xdf, 0xa8, 0xec, 0x68, 0xa2, 0x1b, 0xcb, 0x68, 0xa7, 0x50, 0xcb, 0xbc, 0xf9, 0x22, 0x35, - 0xe5, 0x76, 0x4d, 0xb8, 0xbd, 0x8c, 0x9a, 0x4e, 0x76, 0x5e, 0xb7, 0xb7, 0x9e, 0x1f, 0x77, 0xb4, - 0xdf, 0x8e, 0x3b, 0xda, 0x5f, 0xc7, 0x1d, 0x0d, 0xae, 0x05, 0xa1, 0x9d, 0xfb, 0x7b, 0xa1, 0x50, - 0xbf, 0x58, 0x95, 0xbf, 0x7b, 0xab, 0x82, 0x69, 0xef, 0xfc, 0x17, 0x00, 0x00, 0xff, 0xff, 0xe1, - 0x8b, 0x00, 0xa9, 0x96, 0x0a, 0x00, 0x00, + // 995 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x56, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0x67, 0x1d, 0x3b, 0xb6, 0xdf, 0xda, 0x2d, 0x1d, 0x3b, 0xed, 0xb2, 0xa5, 0xb6, 0xb3, 0xa1, + 0x95, 0x85, 0x94, 0xdd, 0x62, 0x0e, 0x94, 0x0a, 0x09, 0x9a, 0xa6, 0xb5, 0x0a, 0xb4, 0xc0, 0x36, + 0x27, 0x38, 0x58, 0x13, 0xef, 0xb0, 0x5e, 0x1c, 0xcf, 0xba, 0xbb, 0xe3, 0x24, 0x16, 0xe2, 0xc2, + 0x27, 0x40, 0xea, 0x85, 0x13, 0xdf, 0x80, 0xaf, 0x81, 0x72, 0x44, 0xe2, 0xc6, 0x21, 0xa0, 0x88, + 0x0f, 0xc1, 0x11, 0xcd, 0x9f, 0x75, 0x76, 0xd7, 0x51, 0x08, 0xe1, 0xd8, 0x93, 0x77, 0xde, 0xbc, + 0xf7, 0x7b, 0x7f, 0x7f, 0x6f, 0x0c, 0xfa, 0x8b, 0x19, 0x89, 0xe6, 0xf6, 0x34, 0x0a, 0x59, 0x88, + 0xea, 0xdf, 0x60, 0xe2, 0x93, 0xc8, 0xc6, 0xd3, 0x60, 0xb0, 0xdf, 0x33, 0xf5, 0x49, 0xe8, 0x91, + 0x3d, 0x79, 0x67, 0x36, 0xfd, 0xd0, 0x0f, 0xc5, 0xa7, 0xc3, 0xbf, 0x94, 0xf4, 0x4d, 0x3f, 0x0c, + 0xfd, 0x3d, 0xe2, 0xe0, 0x69, 0xe0, 0x60, 0x4a, 0x43, 0x86, 0x59, 0x10, 0xd2, 0x58, 0xdd, 0xb6, + 0xd5, 0xad, 0x38, 0xed, 0xce, 0xbe, 0x76, 0x58, 0x30, 0x21, 0x31, 0xc3, 0x93, 0xa9, 0x52, 0x68, + 0xe5, 0x15, 0xbc, 0x59, 0x24, 0x10, 0xe4, 0xbd, 0xf5, 0xb2, 0x00, 0x57, 0xfb, 0x84, 0xed, 0x44, + 0x78, 0x48, 0x5c, 0xf2, 0x62, 0x46, 0x62, 0x86, 0xbe, 0x82, 0x0a, 0xe3, 0xe7, 0x41, 0xe0, 0x19, + 0x5a, 0x47, 0xeb, 0xd6, 0xb6, 0x3e, 0x3a, 0x3a, 0x6e, 0xbf, 0xf6, 0xfb, 0x71, 0x7b, 0xd3, 0x0f, + 0xd8, 0x68, 0xb6, 0x6b, 0x0f, 0xc3, 0x89, 0x23, 0x33, 0xe1, 0x8a, 0x01, 0xf5, 0xd5, 0xc9, 0x91, + 0xf9, 0x08, 0xb4, 0x27, 0xdb, 0x27, 0xc7, 0xed, 0xb2, 0xfa, 0x74, 0xcb, 0x02, 0xf1, 0x89, 0x87, + 0x1e, 0x02, 0xc4, 0x0c, 0x47, 0x6c, 0xc0, 0x23, 0x35, 0x0a, 0x1d, 0xad, 0xab, 0xf7, 0x4c, 0x5b, + 0x46, 0x69, 0x27, 0x51, 0xda, 0x3b, 0x49, 0x1a, 0x5b, 0x15, 0xee, 0xfa, 0x87, 0x3f, 0xda, 0x9a, + 0x5b, 0x15, 0x76, 0xfc, 0x06, 0x7d, 0x08, 0x15, 0x42, 0x3d, 0x09, 0xb1, 0xf2, 0x1f, 0x20, 0xca, + 0x84, 0x7a, 0x02, 0xe0, 0x16, 0x40, 0x84, 0x0f, 0x06, 0x22, 0xa8, 0xd8, 0x28, 0x76, 0xb4, 0x6e, + 0xc5, 0xad, 0x46, 0xf8, 0x40, 0x84, 0x1b, 0x5b, 0x8f, 0x00, 0x3d, 0x9f, 0x62, 0x1a, 0xbb, 0x24, + 0x9e, 0x86, 0x34, 0x26, 0x0f, 0x47, 0x33, 0x3a, 0x46, 0x0e, 0x94, 0x62, 0x2e, 0x35, 0xb4, 0xce, + 0x4a, 0x57, 0xef, 0x35, 0xec, 0x4c, 0x33, 0x6d, 0x6e, 0xb1, 0x55, 0xe4, 0xbe, 0x5c, 0xa9, 0x67, + 0xfd, 0xad, 0x41, 0xe3, 0x41, 0x34, 0x1c, 0x05, 0xfb, 0xe4, 0x15, 0x2b, 0xb0, 0x75, 0x1d, 0x9a, + 0xd9, 0xcc, 0x65, 0x21, 0xad, 0x5f, 0x8a, 0xd0, 0x14, 0x92, 0x2f, 0x38, 0x2b, 0x3e, 0xc7, 0x11, + 0x9e, 0x10, 0x46, 0xa2, 0x18, 0xad, 0x43, 0x2d, 0x26, 0xd1, 0x7e, 0x30, 0x24, 0x03, 0x8a, 0x27, + 0x44, 0xd4, 0xa5, 0xea, 0xea, 0x4a, 0xf6, 0x0c, 0x4f, 0x08, 0xba, 0x0d, 0x57, 0xc2, 0x29, 0x91, + 0xe3, 0x2b, 0x95, 0x0a, 0x42, 0xa9, 0xbe, 0x90, 0x0a, 0xb5, 0x07, 0x50, 0x64, 0xd8, 0x8f, 0x8d, + 0x15, 0xd1, 0xa5, 0xcd, 0x5c, 0x97, 0xce, 0x72, 0x6e, 0xef, 0x60, 0x3f, 0x7e, 0x44, 0x59, 0x34, + 0x77, 0x85, 0x29, 0xfa, 0x18, 0xae, 0x9c, 0xd6, 0x70, 0x30, 0x09, 0xa8, 0x18, 0x91, 0x8b, 0x16, + 0xa1, 0xb6, 0xa8, 0xe3, 0xd3, 0x80, 0xe6, 0xb1, 0xf0, 0xa1, 0x51, 0xba, 0x1c, 0x16, 0x3e, 0x44, + 0x8f, 0xa1, 0x96, 0xf0, 0x57, 0x44, 0xb5, 0x2a, 0x90, 0xde, 0x58, 0x42, 0xda, 0x56, 0x4a, 0x12, + 0xe8, 0x47, 0x0e, 0xa4, 0x27, 0x86, 0x3c, 0xa6, 0x0c, 0x0e, 0x3e, 0x34, 0xca, 0x97, 0xc1, 0xc1, + 0x87, 0xb2, 0x69, 0x38, 0x1a, 0x8e, 0x06, 0x1e, 0x99, 0xb2, 0x91, 0x51, 0xe9, 0x68, 0xdd, 0x12, + 0x6f, 0x1a, 0x97, 0x6d, 0x73, 0x51, 0x8e, 0x69, 0xd5, 0x1c, 0xd3, 0xcc, 0xf7, 0xa0, 0xba, 0x28, + 0x3e, 0x7a, 0x1d, 0x56, 0xc6, 0x64, 0xae, 0x5a, 0xcf, 0x3f, 0x51, 0x13, 0x4a, 0xfb, 0x78, 0x6f, + 0x96, 0x74, 0x5a, 0x1e, 0xee, 0x17, 0xee, 0x69, 0xd6, 0x33, 0xb8, 0xf6, 0x38, 0xa0, 0x9e, 0x84, + 0x49, 0x88, 0xf5, 0x3e, 0x94, 0xc4, 0xb6, 0x15, 0x10, 0x7a, 0x6f, 0xe3, 0x02, 0xbd, 0x77, 0xa5, + 0x85, 0xd5, 0x04, 0xd4, 0x27, 0xec, 0xb9, 0x1c, 0xb7, 0x04, 0xd0, 0x7a, 0x07, 0x1a, 0x19, 0xa9, + 0x9c, 0x62, 0x64, 0x42, 0x45, 0x0d, 0xa6, 0x5c, 0x06, 0x55, 0x77, 0x71, 0xb6, 0x9e, 0x42, 0xb3, + 0x4f, 0xd8, 0x67, 0xc9, 0x48, 0x2e, 0x62, 0x33, 0xa0, 0xac, 0x74, 0x54, 0x82, 0xc9, 0x11, 0xdd, + 0x84, 0x2a, 0xdf, 0x17, 0x83, 0x71, 0x40, 0x3d, 0x95, 0x68, 0x85, 0x0b, 0x3e, 0x09, 0xa8, 0x67, + 0x7d, 0x00, 0xd5, 0x05, 0x16, 0x42, 0x50, 0x4c, 0x91, 0x43, 0x7c, 0x9f, 0x6f, 0x3d, 0x87, 0xb5, + 0x5c, 0x30, 0x2a, 0x83, 0x3b, 0x29, 0x2e, 0x71, 0xd6, 0x24, 0x79, 0xe4, 0xa4, 0xe8, 0x1e, 0xc0, + 0x42, 0x12, 0x1b, 0x05, 0x41, 0x29, 0x23, 0x57, 0xd6, 0x05, 0xbc, 0x9b, 0xd2, 0xb5, 0x7e, 0xd2, + 0xe0, 0x7a, 0x9f, 0xb0, 0x6d, 0x32, 0x25, 0xd4, 0x23, 0x74, 0x18, 0x9c, 0xb6, 0x29, 0xbb, 0xa2, + 0xb4, 0xff, 0xbf, 0xa2, 0x0a, 0x97, 0x59, 0x51, 0xbb, 0x70, 0x63, 0x29, 0x3e, 0x55, 0x9d, 0x3e, + 0xd4, 0xbc, 0x94, 0x5c, 0x2d, 0xfc, 0x5b, 0xb9, 0xbc, 0x17, 0xa6, 0xf3, 0x4f, 0x03, 0x3a, 0x56, + 0xab, 0x3f, 0x63, 0xd8, 0xfb, 0xb9, 0x04, 0x35, 0x31, 0x70, 0x6a, 0x84, 0xd0, 0x18, 0x2a, 0xc9, + 0x73, 0x8b, 0x5a, 0x39, 0xbc, 0xdc, 0x3b, 0x6c, 0xae, 0x9f, 0xf1, 0xc0, 0x64, 0x9f, 0x24, 0xcb, + 0xfc, 0xfe, 0xb7, 0xbf, 0x5e, 0x16, 0x9a, 0x08, 0x39, 0x92, 0x60, 0xce, 0xb7, 0xc9, 0xc3, 0xf2, + 0xdd, 0x5d, 0x0d, 0x31, 0xa8, 0xa5, 0x97, 0x30, 0xb2, 0x72, 0x80, 0x67, 0xbc, 0x4d, 0xe6, 0xc6, + 0xb9, 0x3a, 0x6a, 0x8b, 0xdf, 0x14, 0x6e, 0xd7, 0xac, 0x86, 0x83, 0xe5, 0x75, 0xca, 0x2f, 0xf2, + 0x01, 0x4e, 0x99, 0x89, 0x3a, 0x39, 0xbc, 0x25, 0xd2, 0x5e, 0x24, 0x4d, 0x24, 0xfc, 0xd5, 0xac, + 0xb2, 0x23, 0x57, 0xcb, 0x7d, 0xed, 0xed, 0xbb, 0x1a, 0xf2, 0x41, 0x4f, 0x91, 0x13, 0xad, 0x2f, + 0x97, 0x33, 0x47, 0x67, 0xd3, 0x3a, 0x4f, 0x45, 0xe5, 0x76, 0x4d, 0xf8, 0xd2, 0x51, 0xd5, 0x49, + 0x28, 0x8d, 0x42, 0xa8, 0x67, 0x58, 0x84, 0x36, 0x96, 0x71, 0x96, 0x08, 0x6f, 0xbe, 0x75, 0xbe, + 0x92, 0x72, 0xd7, 0x10, 0xee, 0xea, 0x48, 0x77, 0x4e, 0xb9, 0x83, 0x0e, 0xc4, 0x9f, 0xb2, 0xf4, + 0x68, 0xa2, 0xdb, 0xcb, 0x68, 0x67, 0x50, 0xcb, 0xbc, 0xf3, 0x6f, 0x6a, 0xca, 0xed, 0x9a, 0x70, + 0x7b, 0x15, 0xd5, 0x9d, 0xf4, 0xbc, 0x6e, 0x6d, 0x1e, 0x9d, 0xb4, 0xb4, 0x5f, 0x4f, 0x5a, 0xda, + 0x9f, 0x27, 0x2d, 0x0d, 0x6e, 0x04, 0xa1, 0x9d, 0xf9, 0x17, 0xa2, 0x50, 0xbf, 0x5c, 0x95, 0xbf, + 0xbb, 0xab, 0x82, 0x69, 0xef, 0xfe, 0x13, 0x00, 0x00, 0xff, 0xff, 0x43, 0x45, 0x1e, 0x73, 0xe4, + 0x0a, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -1175,26 +1198,32 @@ func (m *GetTraceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - if m.EndTime != nil { - n1, err1 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.EndTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.EndTime):]) - if err1 != nil { - return 0, err1 - } - i -= n1 - i = encodeVarintQuery(dAtA, i, uint64(n1)) + if m.RawTraces { i-- - dAtA[i] = 0x1a - } - if m.StartTime != nil { - n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.StartTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.StartTime):]) - if err2 != nil { - return 0, err2 + if m.RawTraces { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i -= n2 - i = encodeVarintQuery(dAtA, i, uint64(n2)) i-- - dAtA[i] = 0x12 + dAtA[i] = 0x20 + } + n1, err1 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.EndTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.EndTime):]) + if err1 != nil { + return 0, err1 } + i -= n1 + i = encodeVarintQuery(dAtA, i, uint64(n1)) + i-- + dAtA[i] = 0x1a + n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.StartTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.StartTime):]) + if err2 != nil { + return 0, err2 + } + i -= n2 + i = encodeVarintQuery(dAtA, i, uint64(n2)) + i-- + dAtA[i] = 0x12 { size := m.TraceID.Size() i -= size @@ -1273,26 +1302,22 @@ func (m *ArchiveTraceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - if m.EndTime != nil { - n3, err3 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.EndTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.EndTime):]) - if err3 != nil { - return 0, err3 - } - i -= n3 - i = encodeVarintQuery(dAtA, i, uint64(n3)) - i-- - dAtA[i] = 0x1a + n3, err3 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.EndTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.EndTime):]) + if err3 != nil { + return 0, err3 } - if m.StartTime != nil { - n4, err4 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.StartTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.StartTime):]) - if err4 != nil { - return 0, err4 - } - i -= n4 - i = encodeVarintQuery(dAtA, i, uint64(n4)) - i-- - dAtA[i] = 0x12 + i -= n3 + i = encodeVarintQuery(dAtA, i, uint64(n3)) + i-- + dAtA[i] = 0x1a + n4, err4 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.StartTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.StartTime):]) + if err4 != nil { + return 0, err4 } + i -= n4 + i = encodeVarintQuery(dAtA, i, uint64(n4)) + i-- + dAtA[i] = 0x12 { size := m.TraceID.Size() i -= size @@ -1357,6 +1382,16 @@ func (m *TraceQueryParameters) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if m.RawTraces { + i-- + if m.RawTraces { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + } if m.SearchDepth != 0 { i = encodeVarintQuery(dAtA, i, uint64(m.SearchDepth)) i-- @@ -1767,13 +1802,12 @@ func (m *GetTraceRequest) Size() (n int) { _ = l l = m.TraceID.Size() n += 1 + l + sovQuery(uint64(l)) - if m.StartTime != nil { - l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.StartTime) - n += 1 + l + sovQuery(uint64(l)) - } - if m.EndTime != nil { - l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.EndTime) - n += 1 + l + sovQuery(uint64(l)) + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.StartTime) + n += 1 + l + sovQuery(uint64(l)) + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.EndTime) + n += 1 + l + sovQuery(uint64(l)) + if m.RawTraces { + n += 2 } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) @@ -1807,14 +1841,10 @@ func (m *ArchiveTraceRequest) Size() (n int) { _ = l l = m.TraceID.Size() n += 1 + l + sovQuery(uint64(l)) - if m.StartTime != nil { - l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.StartTime) - n += 1 + l + sovQuery(uint64(l)) - } - if m.EndTime != nil { - l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.EndTime) - n += 1 + l + sovQuery(uint64(l)) - } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.StartTime) + n += 1 + l + sovQuery(uint64(l)) + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.EndTime) + n += 1 + l + sovQuery(uint64(l)) if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -1866,6 +1896,9 @@ func (m *TraceQueryParameters) Size() (n int) { if m.SearchDepth != 0 { n += 1 + sovQuery(uint64(m.SearchDepth)) } + if m.RawTraces { + n += 2 + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -2113,10 +2146,7 @@ func (m *GetTraceRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.StartTime == nil { - m.StartTime = new(time.Time) - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.StartTime, dAtA[iNdEx:postIndex]); err != nil { + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.StartTime, dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2149,13 +2179,30 @@ func (m *GetTraceRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.EndTime == nil { - m.EndTime = new(time.Time) - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.EndTime, dAtA[iNdEx:postIndex]); err != nil { + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.EndTime, dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RawTraces", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RawTraces = bool(v != 0) default: iNdEx = preIndex skippy, err := skipQuery(dAtA[iNdEx:]) @@ -2354,10 +2401,7 @@ func (m *ArchiveTraceRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.StartTime == nil { - m.StartTime = new(time.Time) - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.StartTime, dAtA[iNdEx:postIndex]); err != nil { + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.StartTime, dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2390,10 +2434,7 @@ func (m *ArchiveTraceRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.EndTime == nil { - m.EndTime = new(time.Time) - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.EndTime, dAtA[iNdEx:postIndex]); err != nil { + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.EndTime, dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2841,6 +2882,26 @@ func (m *TraceQueryParameters) Unmarshal(dAtA []byte) error { break } } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RawTraces", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RawTraces = bool(v != 0) default: iNdEx = preIndex skippy, err := skipQuery(dAtA[iNdEx:]) diff --git a/vendor/github.com/jaegertracing/jaeger/proto-gen/storage_v1/storage.pb.go b/vendor/github.com/jaegertracing/jaeger/proto-gen/storage_v1/storage.pb.go index 5564abb7ed4..72e11ee49ce 100644 --- a/vendor/github.com/jaegertracing/jaeger/proto-gen/storage_v1/storage.pb.go +++ b/vendor/github.com/jaegertracing/jaeger/proto-gen/storage_v1/storage.pb.go @@ -304,6 +304,8 @@ var xxx_messageInfo_CloseWriterResponse proto.InternalMessageInfo type GetTraceRequest struct { TraceID github_com_jaegertracing_jaeger_model.TraceID `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3,customtype=github.com/jaegertracing/jaeger/model.TraceID" json:"trace_id"` + StartTime time.Time `protobuf:"bytes,2,opt,name=start_time,json=startTime,proto3,stdtime" json:"start_time"` + EndTime time.Time `protobuf:"bytes,3,opt,name=end_time,json=endTime,proto3,stdtime" json:"end_time"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -342,6 +344,20 @@ func (m *GetTraceRequest) XXX_DiscardUnknown() { var xxx_messageInfo_GetTraceRequest proto.InternalMessageInfo +func (m *GetTraceRequest) GetStartTime() time.Time { + if m != nil { + return m.StartTime + } + return time.Time{} +} + +func (m *GetTraceRequest) GetEndTime() time.Time { + if m != nil { + return m.EndTime + } + return time.Time{} +} + type GetServicesRequest struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -1006,77 +1022,78 @@ func init() { func init() { proto.RegisterFile("storage.proto", fileDescriptor_0d2c4ccf1453ffdb) } var fileDescriptor_0d2c4ccf1453ffdb = []byte{ - // 1117 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0x4f, 0x6f, 0xdc, 0xc4, - 0x1b, 0xfe, 0x39, 0xd9, 0x6d, 0x76, 0xdf, 0xdd, 0xb4, 0xc9, 0xec, 0xf6, 0x57, 0xd7, 0xd0, 0x24, - 0x18, 0x9a, 0x04, 0x04, 0xde, 0x66, 0x39, 0x80, 0xa0, 0x08, 0x9a, 0x3f, 0x8d, 0x02, 0x14, 0x8a, - 0x13, 0xb5, 0x12, 0x85, 0xac, 0x66, 0xe3, 0xc1, 0x19, 0xb2, 0x1e, 0x6f, 0xed, 0xf1, 0x2a, 0x11, - 0xe2, 0xc6, 0x07, 0xe0, 0xc8, 0x89, 0x13, 0x12, 0xdf, 0x83, 0x53, 0x8f, 0x9c, 0x39, 0x04, 0x94, - 0x2b, 0x5f, 0x02, 0x79, 0x66, 0xec, 0xd8, 0x6b, 0x2b, 0x49, 0xa3, 0xdc, 0x3c, 0xef, 0x3c, 0xf3, - 0xbc, 0xff, 0x66, 0x9e, 0xd7, 0x30, 0x1d, 0x72, 0x3f, 0xc0, 0x2e, 0xb1, 0x86, 0x81, 0xcf, 0x7d, - 0x34, 0xfb, 0x3d, 0x26, 0x2e, 0x09, 0xac, 0xc4, 0x3a, 0x5a, 0x31, 0xda, 0xae, 0xef, 0xfa, 0x62, - 0xb7, 0x13, 0x7f, 0x49, 0xa0, 0x31, 0xef, 0xfa, 0xbe, 0x3b, 0x20, 0x1d, 0xb1, 0xea, 0x47, 0xdf, - 0x75, 0x38, 0xf5, 0x48, 0xc8, 0xb1, 0x37, 0x54, 0x80, 0xb9, 0x71, 0x80, 0x13, 0x05, 0x98, 0x53, - 0x9f, 0xa9, 0xfd, 0x86, 0xe7, 0x3b, 0x64, 0x20, 0x17, 0xe6, 0xaf, 0x1a, 0xfc, 0x7f, 0x93, 0xf0, - 0x75, 0x32, 0x24, 0xcc, 0x21, 0x6c, 0x8f, 0x92, 0xd0, 0x26, 0xcf, 0x23, 0x12, 0x72, 0xb4, 0x06, - 0x10, 0x72, 0x1c, 0xf0, 0x5e, 0xec, 0x40, 0xd7, 0x16, 0xb4, 0xe5, 0x46, 0xd7, 0xb0, 0x24, 0xb9, - 0x95, 0x90, 0x5b, 0x3b, 0x89, 0xf7, 0xd5, 0xda, 0x8b, 0xe3, 0xf9, 0xff, 0xfd, 0xfc, 0xf7, 0xbc, - 0x66, 0xd7, 0xc5, 0xb9, 0x78, 0x07, 0x7d, 0x0c, 0x35, 0xc2, 0x1c, 0x49, 0x31, 0xf1, 0x12, 0x14, - 0x53, 0x84, 0x39, 0xb1, 0xdd, 0xec, 0xc3, 0xad, 0x42, 0x7c, 0xe1, 0xd0, 0x67, 0x21, 0x41, 0x9b, - 0xd0, 0x74, 0x32, 0x76, 0x5d, 0x5b, 0x98, 0x5c, 0x6e, 0x74, 0xef, 0x58, 0xaa, 0x92, 0x78, 0x48, - 0x7b, 0xa3, 0xae, 0x95, 0x1e, 0x3d, 0xfa, 0x9c, 0xb2, 0x83, 0xd5, 0x4a, 0xec, 0xc2, 0xce, 0x1d, - 0x34, 0x3f, 0x84, 0x99, 0xa7, 0x01, 0xe5, 0x64, 0x7b, 0x88, 0x59, 0x92, 0xfd, 0x12, 0x54, 0xc2, - 0x21, 0x66, 0x2a, 0xef, 0xd6, 0x18, 0xa9, 0x40, 0x0a, 0x80, 0xd9, 0x82, 0xd9, 0xcc, 0x61, 0x19, - 0x9a, 0xd9, 0x06, 0xb4, 0x36, 0xf0, 0x43, 0x22, 0x76, 0x02, 0xc5, 0x69, 0xde, 0x84, 0x56, 0xce, - 0xaa, 0xc0, 0x0c, 0x6e, 0x6c, 0x12, 0xbe, 0x13, 0xe0, 0x3d, 0x92, 0x78, 0x7f, 0x06, 0x35, 0x1e, - 0xaf, 0x7b, 0xd4, 0x11, 0x11, 0x34, 0x57, 0x3f, 0x89, 0xe3, 0xfe, 0xeb, 0x78, 0xfe, 0x1d, 0x97, - 0xf2, 0xfd, 0xa8, 0x6f, 0xed, 0xf9, 0x5e, 0x47, 0xc6, 0x14, 0x03, 0x29, 0x73, 0xd5, 0xaa, 0x23, - 0xbb, 0x2b, 0xd8, 0xb6, 0xd6, 0x4f, 0x8e, 0xe7, 0xa7, 0xd4, 0xa7, 0x3d, 0x25, 0x18, 0xb7, 0x9c, - 0x38, 0xb8, 0x4d, 0xc2, 0xb7, 0x49, 0x30, 0xa2, 0x7b, 0x69, 0xbb, 0xcd, 0x15, 0x68, 0xe5, 0xac, - 0xaa, 0xc8, 0x06, 0xd4, 0x42, 0x65, 0x13, 0x05, 0xae, 0xdb, 0xe9, 0xda, 0x7c, 0x04, 0xed, 0x4d, - 0xc2, 0xbf, 0x1c, 0x12, 0x79, 0xbf, 0xd2, 0x9b, 0xa3, 0xc3, 0x94, 0xc2, 0x88, 0xe0, 0xeb, 0x76, - 0xb2, 0x44, 0xaf, 0x40, 0x3d, 0x2e, 0x5a, 0xef, 0x80, 0x32, 0x47, 0xdc, 0x87, 0x98, 0x6e, 0x88, - 0xd9, 0x67, 0x94, 0x39, 0xe6, 0x7d, 0xa8, 0xa7, 0x5c, 0x08, 0x41, 0x85, 0x61, 0x2f, 0x21, 0x10, - 0xdf, 0x67, 0x9f, 0xfe, 0x11, 0x6e, 0x8e, 0x05, 0xa3, 0x32, 0x58, 0x84, 0xeb, 0x7e, 0x62, 0xfd, - 0x02, 0x7b, 0x69, 0x1e, 0x63, 0x56, 0x74, 0x1f, 0x20, 0xb5, 0x84, 0xfa, 0x84, 0xb8, 0x4c, 0xaf, - 0x5a, 0x85, 0x67, 0x69, 0xa5, 0x2e, 0xec, 0x0c, 0xde, 0xfc, 0xbd, 0x02, 0x6d, 0x51, 0xe9, 0xaf, - 0x22, 0x12, 0x1c, 0x3d, 0xc6, 0x01, 0xf6, 0x08, 0x27, 0x41, 0x88, 0x5e, 0x83, 0xa6, 0xca, 0xbe, - 0x97, 0x49, 0xa8, 0xa1, 0x6c, 0xb1, 0x6b, 0x74, 0x37, 0x13, 0xa1, 0x04, 0xc9, 0xe4, 0xa6, 0x73, - 0x11, 0xa2, 0x0d, 0xa8, 0x70, 0xec, 0x86, 0xfa, 0xa4, 0x08, 0x6d, 0xa5, 0x24, 0xb4, 0xb2, 0x00, - 0xac, 0x1d, 0xec, 0x86, 0x1b, 0x8c, 0x07, 0x47, 0xb6, 0x38, 0x8e, 0x3e, 0x85, 0xeb, 0xa7, 0xef, - 0xba, 0xe7, 0x51, 0xa6, 0x57, 0x5e, 0xe2, 0x61, 0x36, 0xd3, 0xb7, 0xfd, 0x88, 0xb2, 0x71, 0x2e, - 0x7c, 0xa8, 0x57, 0x2f, 0xc7, 0x85, 0x0f, 0xd1, 0x43, 0x68, 0x26, 0x4a, 0x25, 0xa2, 0xba, 0x26, - 0x98, 0x6e, 0x17, 0x98, 0xd6, 0x15, 0x48, 0x12, 0xfd, 0x12, 0x13, 0x35, 0x92, 0x83, 0x71, 0x4c, - 0x39, 0x1e, 0x7c, 0xa8, 0x4f, 0x5d, 0x86, 0x07, 0x1f, 0xa2, 0x3b, 0x00, 0x2c, 0xf2, 0x7a, 0xe2, - 0xd5, 0x84, 0x7a, 0x6d, 0x41, 0x5b, 0xae, 0xda, 0x75, 0x16, 0x79, 0xa2, 0xc8, 0xa1, 0xf1, 0x1e, - 0xd4, 0xd3, 0xca, 0xa2, 0x19, 0x98, 0x3c, 0x20, 0x47, 0xaa, 0xb7, 0xf1, 0x27, 0x6a, 0x43, 0x75, - 0x84, 0x07, 0x51, 0xd2, 0x4a, 0xb9, 0xf8, 0x60, 0xe2, 0x7d, 0xcd, 0xb4, 0x61, 0xf6, 0x21, 0x65, - 0x8e, 0xa4, 0x49, 0x9e, 0xcc, 0x47, 0x50, 0x7d, 0x1e, 0xf7, 0x4d, 0xe9, 0xcd, 0xd2, 0x05, 0x9b, - 0x6b, 0xcb, 0x53, 0xe6, 0x06, 0xa0, 0x58, 0x7f, 0xd2, 0x4b, 0xbf, 0xb6, 0x1f, 0xb1, 0x03, 0xd4, - 0x81, 0x6a, 0xfc, 0x3c, 0x12, 0x65, 0x2c, 0x13, 0x31, 0xa5, 0x87, 0x12, 0x67, 0xee, 0x40, 0x2b, - 0x0d, 0x6d, 0x6b, 0xfd, 0xaa, 0x82, 0x1b, 0x41, 0x3b, 0xcf, 0xaa, 0x1e, 0xe6, 0x2e, 0xd4, 0x13, - 0x91, 0x93, 0x21, 0x36, 0x57, 0x1f, 0x5c, 0x56, 0xe5, 0x6a, 0x29, 0x7b, 0x4d, 0xc9, 0x5c, 0x28, - 0xe4, 0x16, 0x0f, 0x71, 0x9f, 0x0e, 0x28, 0x3f, 0x9d, 0x6b, 0xe6, 0x6f, 0x1a, 0xb4, 0xf3, 0x76, - 0x15, 0xcf, 0xdb, 0x30, 0x8b, 0x83, 0xbd, 0x7d, 0x3a, 0x52, 0x5a, 0x8e, 0x1d, 0x12, 0x88, 0x94, - 0x6b, 0x76, 0x71, 0x63, 0x0c, 0x2d, 0x25, 0x5d, 0x34, 0x3b, 0x8f, 0x96, 0x1b, 0xe8, 0x1e, 0xb4, - 0x42, 0x1e, 0x10, 0xec, 0x51, 0xe6, 0x66, 0xf0, 0x93, 0x02, 0x5f, 0xb6, 0xd5, 0xfd, 0x43, 0x83, - 0x99, 0xd3, 0xe5, 0xe3, 0x41, 0xe4, 0x52, 0x86, 0x9e, 0x40, 0x3d, 0x1d, 0x36, 0xe8, 0xf5, 0x92, - 0x3e, 0x8c, 0xcf, 0x31, 0xe3, 0x8d, 0xb3, 0x41, 0x2a, 0xf5, 0x27, 0x50, 0x15, 0x93, 0x09, 0xdd, - 0x2d, 0x81, 0x17, 0x27, 0x99, 0xb1, 0x78, 0x1e, 0x4c, 0xf2, 0x76, 0x7f, 0x80, 0xdb, 0xdb, 0xc5, - 0xdc, 0x54, 0x32, 0xbb, 0x70, 0x23, 0x8d, 0x44, 0xa2, 0xae, 0x30, 0xa5, 0x65, 0xad, 0xfb, 0xef, - 0xa4, 0xac, 0xa0, 0x6c, 0x98, 0x72, 0xfa, 0x14, 0x6a, 0xc9, 0xb0, 0x45, 0x66, 0x09, 0xd1, 0xd8, - 0x24, 0x36, 0xca, 0x0a, 0x52, 0x7c, 0x6a, 0xf7, 0x34, 0xf4, 0x0d, 0x34, 0x32, 0xf3, 0xb3, 0xb4, - 0x90, 0xc5, 0xa9, 0x5b, 0x5a, 0xc8, 0xb2, 0x31, 0xdc, 0x87, 0xe9, 0xdc, 0x74, 0x43, 0x4b, 0xe5, - 0x07, 0x0b, 0xc3, 0xd8, 0x58, 0x3e, 0x1f, 0xa8, 0x7c, 0x3c, 0x03, 0x38, 0x15, 0x26, 0x54, 0x56, - 0xe5, 0x82, 0x6e, 0x5d, 0xbc, 0x3c, 0x3d, 0x68, 0x66, 0x45, 0x00, 0x2d, 0x9e, 0x45, 0x7f, 0xaa, - 0x3d, 0xc6, 0xd2, 0xb9, 0x38, 0x75, 0xd5, 0x0e, 0xe1, 0xd6, 0x83, 0xf1, 0x67, 0xa7, 0x7a, 0xfe, - 0xad, 0xfa, 0xbf, 0xcb, 0xec, 0x5f, 0xe1, 0x4d, 0xeb, 0x1e, 0xe5, 0x3c, 0xe7, 0x6e, 0xdb, 0xae, - 0xf8, 0xb5, 0x53, 0xbb, 0x57, 0x7f, 0xe9, 0xba, 0x3f, 0x69, 0xa0, 0xe7, 0xff, 0x8d, 0x33, 0xce, - 0xf7, 0x85, 0xf3, 0xec, 0x36, 0x7a, 0xb3, 0xdc, 0x79, 0xc9, 0xef, 0xbf, 0xf1, 0xd6, 0x45, 0xa0, - 0xaa, 0x02, 0x11, 0x20, 0xe9, 0x33, 0xab, 0xab, 0x71, 0xcb, 0x73, 0xeb, 0x52, 0xd1, 0x28, 0x0a, - 0x74, 0x69, 0xcb, 0xcb, 0x04, 0x7b, 0x55, 0x7f, 0x71, 0x32, 0xa7, 0xfd, 0x79, 0x32, 0xa7, 0xfd, - 0x73, 0x32, 0xa7, 0x7d, 0x0d, 0x0a, 0xde, 0x1b, 0xad, 0xf4, 0xaf, 0x89, 0x29, 0xff, 0xee, 0x7f, - 0x01, 0x00, 0x00, 0xff, 0xff, 0x6c, 0xa3, 0xc0, 0x72, 0x65, 0x0d, 0x00, 0x00, + // 1128 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0x4f, 0x73, 0xdb, 0x44, + 0x14, 0x47, 0xb1, 0xdd, 0xd8, 0xcf, 0x4e, 0x9b, 0xac, 0x5d, 0xaa, 0x0a, 0x9a, 0x04, 0x41, 0x93, + 0xc0, 0x80, 0xdc, 0x98, 0x03, 0x0c, 0x94, 0x81, 0xe6, 0x4f, 0x33, 0x01, 0x0a, 0x45, 0xc9, 0xb4, + 0x33, 0x14, 0xe2, 0x59, 0x47, 0x8b, 0x22, 0x62, 0xad, 0x5c, 0x69, 0xe5, 0x49, 0x86, 0xe1, 0xc6, + 0x07, 0xe0, 0xc8, 0x89, 0x13, 0x33, 0x7c, 0x0f, 0x4e, 0x3d, 0x72, 0xe6, 0x10, 0x98, 0x5c, 0xb9, + 0xf2, 0x01, 0x18, 0xed, 0xae, 0x14, 0xfd, 0x9b, 0x24, 0x4d, 0x73, 0xf3, 0xbe, 0xf7, 0xdb, 0xdf, + 0xfb, 0xbb, 0xef, 0xc9, 0x30, 0x15, 0x30, 0xcf, 0xc7, 0x36, 0x31, 0x46, 0xbe, 0xc7, 0x3c, 0x34, + 0xf3, 0x3d, 0x26, 0x36, 0xf1, 0x8d, 0x58, 0x3a, 0x5e, 0xd6, 0x3a, 0xb6, 0x67, 0x7b, 0x5c, 0xdb, + 0x8d, 0x7e, 0x09, 0xa0, 0x36, 0x67, 0x7b, 0x9e, 0x3d, 0x24, 0x5d, 0x7e, 0x1a, 0x84, 0xdf, 0x75, + 0x99, 0xe3, 0x92, 0x80, 0x61, 0x77, 0x24, 0x01, 0xb3, 0x79, 0x80, 0x15, 0xfa, 0x98, 0x39, 0x1e, + 0x95, 0xfa, 0xa6, 0xeb, 0x59, 0x64, 0x28, 0x0e, 0xfa, 0xaf, 0x0a, 0xbc, 0xbc, 0x41, 0xd8, 0x1a, + 0x19, 0x11, 0x6a, 0x11, 0xba, 0xeb, 0x90, 0xc0, 0x24, 0x4f, 0x43, 0x12, 0x30, 0xb4, 0x0a, 0x10, + 0x30, 0xec, 0xb3, 0x7e, 0x64, 0x40, 0x55, 0xe6, 0x95, 0xa5, 0x66, 0x4f, 0x33, 0x04, 0xb9, 0x11, + 0x93, 0x1b, 0xdb, 0xb1, 0xf5, 0x95, 0xfa, 0xb3, 0xa3, 0xb9, 0x97, 0x7e, 0xfe, 0x7b, 0x4e, 0x31, + 0x1b, 0xfc, 0x5e, 0xa4, 0x41, 0x1f, 0x43, 0x9d, 0x50, 0x4b, 0x50, 0x4c, 0x3c, 0x07, 0xc5, 0x24, + 0xa1, 0x56, 0x24, 0xd7, 0x07, 0x70, 0xa3, 0xe0, 0x5f, 0x30, 0xf2, 0x68, 0x40, 0xd0, 0x06, 0xb4, + 0xac, 0x94, 0x5c, 0x55, 0xe6, 0x2b, 0x4b, 0xcd, 0xde, 0x2d, 0x43, 0x66, 0x12, 0x8f, 0x9c, 0xfe, + 0xb8, 0x67, 0x24, 0x57, 0x0f, 0x3f, 0x77, 0xe8, 0xfe, 0x4a, 0x35, 0x32, 0x61, 0x66, 0x2e, 0xea, + 0x1f, 0xc2, 0xf4, 0x63, 0xdf, 0x61, 0x64, 0x6b, 0x84, 0x69, 0x1c, 0xfd, 0x22, 0x54, 0x83, 0x11, + 0xa6, 0x32, 0xee, 0x76, 0x8e, 0x94, 0x23, 0x39, 0x40, 0x6f, 0xc3, 0x4c, 0xea, 0xb2, 0x70, 0x4d, + 0xef, 0x00, 0x5a, 0x1d, 0x7a, 0x01, 0xe1, 0x1a, 0x5f, 0x72, 0xea, 0xd7, 0xa1, 0x9d, 0x91, 0x4a, + 0xf0, 0x7f, 0x0a, 0x5c, 0xdb, 0x20, 0x6c, 0xdb, 0xc7, 0xbb, 0x24, 0x36, 0xff, 0x04, 0xea, 0x2c, + 0x3a, 0xf7, 0x1d, 0x8b, 0xbb, 0xd0, 0x5a, 0xf9, 0x24, 0x72, 0xfc, 0xaf, 0xa3, 0xb9, 0x77, 0x6c, + 0x87, 0xed, 0x85, 0x03, 0x63, 0xd7, 0x73, 0xbb, 0xc2, 0xa9, 0x08, 0xe8, 0x50, 0x5b, 0x9e, 0xba, + 0xa2, 0xbc, 0x9c, 0x6d, 0x73, 0xed, 0xf8, 0x68, 0x6e, 0x52, 0xfe, 0x34, 0x27, 0x39, 0xe3, 0xa6, + 0x95, 0xab, 0xec, 0xc4, 0x8b, 0x57, 0xb6, 0x72, 0x91, 0xca, 0x76, 0x00, 0x6d, 0x10, 0xb6, 0x45, + 0xfc, 0xb1, 0xb3, 0x9b, 0x74, 0x9d, 0xbe, 0x0c, 0xed, 0x8c, 0x54, 0xd6, 0x5a, 0x83, 0x7a, 0x20, + 0x65, 0xbc, 0xce, 0x0d, 0x33, 0x39, 0xeb, 0x0f, 0xa0, 0xb3, 0x41, 0xd8, 0x97, 0x23, 0x22, 0xda, + 0x3c, 0x69, 0x60, 0x15, 0x26, 0x25, 0x86, 0xa7, 0xb0, 0x61, 0xc6, 0x47, 0xf4, 0x0a, 0x34, 0xa2, + 0xda, 0xf5, 0xf7, 0x1d, 0x6a, 0xf1, 0xf8, 0x23, 0xba, 0x11, 0xa6, 0x9f, 0x39, 0xd4, 0xd2, 0xef, + 0x42, 0x23, 0xe1, 0x42, 0x08, 0xaa, 0x14, 0xbb, 0x31, 0x01, 0xff, 0x7d, 0xfa, 0xed, 0x1f, 0xe1, + 0x7a, 0xce, 0x19, 0x19, 0xc1, 0x02, 0x5c, 0xf5, 0x62, 0xe9, 0x17, 0xd8, 0x4d, 0xe2, 0xc8, 0x49, + 0xd1, 0x5d, 0x80, 0x44, 0x12, 0xa8, 0x13, 0xbc, 0xa7, 0x5f, 0x35, 0x0a, 0xd3, 0xc1, 0x48, 0x4c, + 0x98, 0x29, 0xbc, 0xfe, 0x7b, 0x15, 0x3a, 0xbc, 0xde, 0x5f, 0x85, 0xc4, 0x3f, 0x7c, 0x88, 0x7d, + 0xec, 0x12, 0x46, 0xfc, 0x00, 0xbd, 0x06, 0x2d, 0x19, 0x7d, 0x3f, 0x15, 0x50, 0x53, 0xca, 0x22, + 0xd3, 0xe8, 0x76, 0xca, 0x43, 0x01, 0x12, 0xc1, 0x4d, 0x65, 0x3c, 0x44, 0xeb, 0x50, 0x65, 0xd8, + 0x0e, 0xd4, 0x0a, 0x77, 0x6d, 0xb9, 0xc4, 0xb5, 0x32, 0x07, 0x8c, 0x6d, 0x6c, 0x07, 0xeb, 0x94, + 0xf9, 0x87, 0x26, 0xbf, 0x8e, 0x3e, 0x85, 0xab, 0x27, 0x4d, 0xd8, 0x77, 0x1d, 0xaa, 0x56, 0x9f, + 0xa3, 0x8b, 0x5a, 0x49, 0x23, 0x3e, 0x70, 0x68, 0x9e, 0x0b, 0x1f, 0xa8, 0xb5, 0x8b, 0x71, 0xe1, + 0x03, 0x74, 0x1f, 0x5a, 0xf1, 0xc0, 0xe4, 0x5e, 0x5d, 0xe1, 0x4c, 0x37, 0x0b, 0x4c, 0x6b, 0x12, + 0x24, 0x88, 0x7e, 0x89, 0x88, 0x9a, 0xf1, 0xc5, 0xc8, 0xa7, 0x0c, 0x0f, 0x3e, 0x50, 0x27, 0x2f, + 0xc2, 0x83, 0x0f, 0xd0, 0x2d, 0x00, 0x1a, 0xba, 0x7d, 0xfe, 0x76, 0x03, 0xb5, 0x3e, 0xaf, 0x2c, + 0xd5, 0xcc, 0x06, 0x0d, 0x5d, 0x9e, 0xe4, 0x40, 0x7b, 0x0f, 0x1a, 0x49, 0x66, 0xd1, 0x34, 0x54, + 0xf6, 0xc9, 0xa1, 0xac, 0x6d, 0xf4, 0x13, 0x75, 0xa0, 0x36, 0xc6, 0xc3, 0x30, 0x2e, 0xa5, 0x38, + 0x7c, 0x30, 0xf1, 0xbe, 0xa2, 0x9b, 0x30, 0x73, 0xdf, 0xa1, 0x96, 0xa0, 0x89, 0x9f, 0xcc, 0x47, + 0x50, 0x7b, 0x1a, 0xd5, 0x4d, 0x8e, 0xbd, 0xc5, 0x73, 0x16, 0xd7, 0x14, 0xb7, 0xf4, 0x75, 0x40, + 0xd1, 0x18, 0x4c, 0x9a, 0x7e, 0x75, 0x2f, 0xa4, 0xfb, 0xa8, 0x0b, 0xb5, 0xe8, 0x79, 0xc4, 0x03, + 0xba, 0x6c, 0x96, 0xca, 0xb1, 0x2c, 0x70, 0xfa, 0x36, 0xb4, 0x13, 0xd7, 0x36, 0xd7, 0x2e, 0xcb, + 0xb9, 0x31, 0x74, 0xb2, 0xac, 0xf2, 0x61, 0xee, 0x40, 0x23, 0x1e, 0xb5, 0xc2, 0xc5, 0xd6, 0xca, + 0xbd, 0x8b, 0xce, 0xda, 0x7a, 0xc2, 0x5e, 0x97, 0xc3, 0x36, 0xe0, 0x53, 0x1f, 0x8f, 0xf0, 0xc0, + 0x19, 0x3a, 0xec, 0x64, 0xbd, 0xea, 0xbf, 0x29, 0xd0, 0xc9, 0xca, 0xa5, 0x3f, 0x6f, 0xc3, 0x0c, + 0xf6, 0x77, 0xf7, 0x9c, 0xb1, 0x5c, 0x29, 0xd8, 0x22, 0x3e, 0x0f, 0xb9, 0x6e, 0x16, 0x15, 0x39, + 0xb4, 0xd8, 0x2c, 0xbc, 0xd8, 0x59, 0xb4, 0x50, 0xa0, 0x3b, 0xd0, 0x0e, 0x98, 0x4f, 0xb0, 0xeb, + 0x50, 0x3b, 0x85, 0xaf, 0x70, 0x7c, 0x99, 0xaa, 0xf7, 0x87, 0x02, 0xd3, 0x27, 0xc7, 0x87, 0xc3, + 0xd0, 0x76, 0x28, 0x7a, 0x04, 0x8d, 0x64, 0xe7, 0xa1, 0xd7, 0x4b, 0xea, 0x90, 0x5f, 0xa7, 0xda, + 0x1b, 0xa7, 0x83, 0x64, 0xe8, 0x8f, 0xa0, 0xc6, 0x17, 0x24, 0xba, 0x5d, 0x02, 0x2f, 0x2e, 0x54, + 0x6d, 0xe1, 0x2c, 0x98, 0xe0, 0xed, 0xfd, 0x00, 0x37, 0xb7, 0x8a, 0xb1, 0xc9, 0x60, 0x76, 0xe0, + 0x5a, 0xe2, 0x89, 0x40, 0x5d, 0x62, 0x48, 0x4b, 0x4a, 0xef, 0xdf, 0x8a, 0xc8, 0xa0, 0x28, 0x98, + 0x34, 0xfa, 0x18, 0xea, 0xf1, 0xca, 0x47, 0x7a, 0x09, 0x51, 0xee, 0x7b, 0x40, 0x2b, 0x4b, 0x48, + 0xf1, 0xa9, 0xdd, 0x51, 0xd0, 0x37, 0xd0, 0x4c, 0xed, 0xcf, 0xd2, 0x44, 0x16, 0xb7, 0x6e, 0x69, + 0x22, 0xcb, 0xd6, 0xf0, 0x00, 0xa6, 0x32, 0xdb, 0x0d, 0x2d, 0x96, 0x5f, 0x2c, 0x2c, 0x63, 0x6d, + 0xe9, 0x6c, 0xa0, 0xb4, 0xf1, 0x04, 0xe0, 0x64, 0x30, 0xa1, 0xb2, 0x2c, 0x17, 0xe6, 0xd6, 0xf9, + 0xd3, 0xd3, 0x87, 0x56, 0x7a, 0x08, 0xa0, 0x85, 0xd3, 0xe8, 0x4f, 0x66, 0x8f, 0xb6, 0x78, 0x26, + 0x4e, 0xb6, 0xda, 0x01, 0xdc, 0xb8, 0x97, 0x7f, 0x76, 0xb2, 0xe6, 0xdf, 0xca, 0xcf, 0xcc, 0x94, + 0xfe, 0x12, 0x3b, 0xad, 0x77, 0x98, 0xb1, 0x9c, 0xe9, 0xb6, 0x1d, 0xfe, 0x81, 0x29, 0xb5, 0x97, + 0xdf, 0x74, 0xbd, 0x9f, 0x14, 0x50, 0xb3, 0x9f, 0xe8, 0x29, 0xe3, 0x7b, 0xdc, 0x78, 0x5a, 0x8d, + 0xde, 0x2c, 0x37, 0x5e, 0xf2, 0x2f, 0x44, 0x7b, 0xeb, 0x3c, 0x50, 0x99, 0x81, 0x10, 0x90, 0xb0, + 0x99, 0x9e, 0xab, 0x51, 0xc9, 0x33, 0xe7, 0xd2, 0xa1, 0x51, 0x1c, 0xd0, 0xa5, 0x25, 0x2f, 0x1b, + 0xd8, 0x2b, 0xea, 0xb3, 0xe3, 0x59, 0xe5, 0xcf, 0xe3, 0x59, 0xe5, 0x9f, 0xe3, 0x59, 0xe5, 0x6b, + 0x90, 0xf0, 0xfe, 0x78, 0x79, 0x70, 0x85, 0x6f, 0xf9, 0x77, 0xff, 0x0f, 0x00, 0x00, 0xff, 0xff, + 0x4d, 0x42, 0xc0, 0x41, 0xec, 0x0d, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -2125,6 +2142,22 @@ func (m *GetTraceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + n4, err4 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.EndTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.EndTime):]) + if err4 != nil { + return 0, err4 + } + i -= n4 + i = encodeVarintStorage(dAtA, i, uint64(n4)) + i-- + dAtA[i] = 0x1a + n5, err5 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.StartTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.StartTime):]) + if err5 != nil { + return 0, err5 + } + i -= n5 + i = encodeVarintStorage(dAtA, i, uint64(n5)) + i-- + dAtA[i] = 0x12 { size := m.TraceID.Size() i -= size @@ -2362,37 +2395,37 @@ func (m *TraceQueryParameters) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x40 } - n4, err4 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.DurationMax, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.DurationMax):]) - if err4 != nil { - return 0, err4 - } - i -= n4 - i = encodeVarintStorage(dAtA, i, uint64(n4)) - i-- - dAtA[i] = 0x3a - n5, err5 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.DurationMin, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.DurationMin):]) - if err5 != nil { - return 0, err5 - } - i -= n5 - i = encodeVarintStorage(dAtA, i, uint64(n5)) - i-- - dAtA[i] = 0x32 - n6, err6 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.StartTimeMax, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.StartTimeMax):]) + n6, err6 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.DurationMax, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.DurationMax):]) if err6 != nil { return 0, err6 } i -= n6 i = encodeVarintStorage(dAtA, i, uint64(n6)) i-- - dAtA[i] = 0x2a - n7, err7 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.StartTimeMin, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.StartTimeMin):]) + dAtA[i] = 0x3a + n7, err7 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.DurationMin, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.DurationMin):]) if err7 != nil { return 0, err7 } i -= n7 i = encodeVarintStorage(dAtA, i, uint64(n7)) i-- + dAtA[i] = 0x32 + n8, err8 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.StartTimeMax, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.StartTimeMax):]) + if err8 != nil { + return 0, err8 + } + i -= n8 + i = encodeVarintStorage(dAtA, i, uint64(n8)) + i-- + dAtA[i] = 0x2a + n9, err9 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.StartTimeMin, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.StartTimeMin):]) + if err9 != nil { + return 0, err9 + } + i -= n9 + i = encodeVarintStorage(dAtA, i, uint64(n9)) + i-- dAtA[i] = 0x22 if len(m.Tags) > 0 { for k := range m.Tags { @@ -2779,6 +2812,10 @@ func (m *GetTraceRequest) Size() (n int) { _ = l l = m.TraceID.Size() n += 1 + l + sovStorage(uint64(l)) + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.StartTime) + n += 1 + l + sovStorage(uint64(l)) + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.EndTime) + n += 1 + l + sovStorage(uint64(l)) if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -3529,6 +3566,72 @@ func (m *GetTraceRequest) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStorage + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStorage + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.StartTime, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EndTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStorage + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStorage + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.EndTime, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipStorage(dAtA[iNdEx:]) diff --git a/vendor/github.com/jaegertracing/jaeger/storage/README.md b/vendor/github.com/jaegertracing/jaeger/storage/README.md deleted file mode 100644 index 35af8a74359..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/storage/README.md +++ /dev/null @@ -1,4 +0,0 @@ -The collection of different storage interfaces that are shared by two or more components - -If a storage is used by only one component, its interface should be defined in the component package, and implementations under `./plugin/storage/{db_name}/{store_type}/...`. - diff --git a/vendor/github.com/jaegertracing/jaeger/storage/dependencystore/interface.go b/vendor/github.com/jaegertracing/jaeger/storage/dependencystore/interface.go index 4a68149862d..07f8ddd3123 100644 --- a/vendor/github.com/jaegertracing/jaeger/storage/dependencystore/interface.go +++ b/vendor/github.com/jaegertracing/jaeger/storage/dependencystore/interface.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package dependencystore diff --git a/vendor/github.com/jaegertracing/jaeger/storage/doc.go b/vendor/github.com/jaegertracing/jaeger/storage/doc.go deleted file mode 100644 index 4b3b595ca6a..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/storage/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (c) 2019 The Jaeger Authors. -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package storage is the collection of different storage interfaces that are shared by two or more components. -// -// If a storage is used by only one component, its interface should be defined in the component package, and implementations under ./plugin/storage/{db_name}/{store_type}/.... -package storage diff --git a/vendor/github.com/jaegertracing/jaeger/storage/factory.go b/vendor/github.com/jaegertracing/jaeger/storage/factory.go deleted file mode 100644 index b56e6fdc07f..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/storage/factory.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright (c) 2019 The Jaeger Authors. -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "errors" - - "go.uber.org/zap" - - "github.com/jaegertracing/jaeger/pkg/distributedlock" - "github.com/jaegertracing/jaeger/pkg/metrics" - "github.com/jaegertracing/jaeger/storage/dependencystore" - "github.com/jaegertracing/jaeger/storage/metricsstore" - "github.com/jaegertracing/jaeger/storage/samplingstore" - "github.com/jaegertracing/jaeger/storage/spanstore" -) - -// Factory defines an interface for a factory that can create implementations of different storage components. -// Implementations are also encouraged to implement plugin.Configurable interface. -// -// # See also -// -// plugin.Configurable -type Factory interface { - // Initialize performs internal initialization of the factory, such as opening connections to the backend store. - // It is called after all configuration of the factory itself has been done. - Initialize(metricsFactory metrics.Factory, logger *zap.Logger) error - - // CreateSpanReader creates a spanstore.Reader. - CreateSpanReader() (spanstore.Reader, error) - - // CreateSpanWriter creates a spanstore.Writer. - CreateSpanWriter() (spanstore.Writer, error) - - // CreateDependencyReader creates a dependencystore.Reader. - CreateDependencyReader() (dependencystore.Reader, error) -} - -// Purger defines an interface that is capable of purging the storage. -// Only meant to be used from integration tests. -type Purger interface { - // Purge removes all data from the storage. - Purge() error -} - -// SamplingStoreFactory defines an interface that is capable of returning the necessary backends for -// adaptive sampling. -type SamplingStoreFactory interface { - // CreateLock creates a distributed lock. - CreateLock() (distributedlock.Lock, error) - // CreateSamplingStore creates a sampling store. - CreateSamplingStore(maxBuckets int) (samplingstore.Store, error) -} - -var ( - // ErrArchiveStorageNotConfigured can be returned by the ArchiveFactory when the archive storage is not configured. - ErrArchiveStorageNotConfigured = errors.New("archive storage not configured") - - // ErrArchiveStorageNotSupported can be returned by the ArchiveFactory when the archive storage is not supported by the backend. - ErrArchiveStorageNotSupported = errors.New("archive storage not supported") -) - -// ArchiveFactory is an additional interface that can be implemented by a factory to support trace archiving. -type ArchiveFactory interface { - // CreateArchiveSpanReader creates a spanstore.Reader. - CreateArchiveSpanReader() (spanstore.Reader, error) - - // CreateArchiveSpanWriter creates a spanstore.Writer. - CreateArchiveSpanWriter() (spanstore.Writer, error) -} - -// MetricsFactory defines an interface for a factory that can create implementations of different metrics storage components. -// Implementations are also encouraged to implement plugin.Configurable interface. -// -// # See also -// -// plugin.Configurable -type MetricsFactory interface { - // Initialize performs internal initialization of the factory, such as opening connections to the backend store. - // It is called after all configuration of the factory itself has been done. - Initialize(logger *zap.Logger) error - - // CreateMetricsReader creates a metricsstore.Reader. - CreateMetricsReader() (metricsstore.Reader, error) -} diff --git a/vendor/github.com/jaegertracing/jaeger/storage/metricsstore/interface.go b/vendor/github.com/jaegertracing/jaeger/storage/metricsstore/interface.go deleted file mode 100644 index dc62fd0fa56..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/storage/metricsstore/interface.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright (c) 2021 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metricsstore - -import ( - "context" - "time" - - "github.com/jaegertracing/jaeger/proto-gen/api_v2/metrics" -) - -// Reader can load aggregated trace metrics from storage. -type Reader interface { - // GetLatencies gets the latency metrics for a specific quantile (e.g. 0.99) and list of services - // grouped by service and optionally grouped by operation. - GetLatencies(ctx context.Context, params *LatenciesQueryParameters) (*metrics.MetricFamily, error) - // GetCallRates gets the call rate metrics for a given list of services grouped by service - // and optionally grouped by operation. - GetCallRates(ctx context.Context, params *CallRateQueryParameters) (*metrics.MetricFamily, error) - // GetErrorRates gets the error rate metrics for a given list of services grouped by service - // and optionally grouped by operation. - GetErrorRates(ctx context.Context, params *ErrorRateQueryParameters) (*metrics.MetricFamily, error) - // GetMinStepDuration gets the min time resolution supported by the backing metrics store, - // e.g. 10s means the backend can only return data points that are at least 10s apart, not closer. - GetMinStepDuration(ctx context.Context, params *MinStepDurationQueryParameters) (time.Duration, error) -} - -// BaseQueryParameters contains the common set of parameters used by all metrics queries: -// latency, call rate or error rate. -type BaseQueryParameters struct { - // ServiceNames are the service names to fetch metrics from. The results will be grouped by service_name. - ServiceNames []string - // GroupByOperation determines if the metrics returned should be grouped by operation. - GroupByOperation bool - // EndTime is the ending time of the time series query range. - EndTime *time.Time - // Lookback is the duration from the end_time to look back on for metrics data points. - // For example, if set to 1h, the query would span from end_time-1h to end_time. - Lookback *time.Duration - // Step size is the duration between data points of the query results. - // For example, if set to 5s, the results would produce a data point every 5 seconds from the (EndTime - Lookback) to EndTime. - Step *time.Duration - // RatePer is the duration in which the per-second rate of change is calculated for a cumulative counter metric. - RatePer *time.Duration - // SpanKinds is the list of span kinds to include (logical OR) in the resulting metrics aggregation. - SpanKinds []string -} - -// LatenciesQueryParameters contains the parameters required for latency metrics queries. -type LatenciesQueryParameters struct { - BaseQueryParameters - // Quantile is the quantile to compute from latency histogram metrics. - // Valid range: 0 - 1 (inclusive). - // - // e.g. 0.99 will return the 99th percentile or P99 which is the worst latency - // observed from 99% of all spans for the given service (and operation). - Quantile float64 -} - -// CallRateQueryParameters contains the parameters required for call rate metrics queries. -type CallRateQueryParameters struct { - BaseQueryParameters -} - -// ErrorRateQueryParameters contains the parameters required for error rate metrics queries. -type ErrorRateQueryParameters struct { - BaseQueryParameters -} - -// MinStepDurationQueryParameters contains the parameters required for fetching the minimum step duration. -type MinStepDurationQueryParameters struct{} diff --git a/vendor/github.com/jaegertracing/jaeger/storage/samplingstore/interface.go b/vendor/github.com/jaegertracing/jaeger/storage/samplingstore/interface.go deleted file mode 100644 index 7c10aab8f40..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/storage/samplingstore/interface.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright (c) 2019 The Jaeger Authors. -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package samplingstore - -import ( - "time" - - "github.com/jaegertracing/jaeger/cmd/collector/app/sampling/model" -) - -// Store writes and retrieves sampling data to and from storage. -type Store interface { - // InsertThroughput inserts aggregated throughput for operations into storage. - InsertThroughput(throughput []*model.Throughput) error - - // InsertProbabilitiesAndQPS inserts calculated sampling probabilities and measured qps into storage. - InsertProbabilitiesAndQPS(hostname string, probabilities model.ServiceOperationProbabilities, qps model.ServiceOperationQPS) error - - // GetThroughput retrieves aggregated throughput for operations within a time range. - GetThroughput(start, end time.Time) ([]*model.Throughput, error) - - // GetLatestProbabilities retrieves the latest sampling probabilities. - GetLatestProbabilities() (model.ServiceOperationProbabilities, error) -} diff --git a/vendor/github.com/jaegertracing/jaeger/storage/spanstore/composite.go b/vendor/github.com/jaegertracing/jaeger/storage/spanstore/composite.go index 68f9607448c..e4f79141c0c 100644 --- a/vendor/github.com/jaegertracing/jaeger/storage/spanstore/composite.go +++ b/vendor/github.com/jaegertracing/jaeger/storage/spanstore/composite.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package spanstore diff --git a/vendor/github.com/jaegertracing/jaeger/storage/spanstore/downsampling_writer.go b/vendor/github.com/jaegertracing/jaeger/storage/spanstore/downsampling_writer.go index dd354e4d080..ceb5040ac18 100644 --- a/vendor/github.com/jaegertracing/jaeger/storage/spanstore/downsampling_writer.go +++ b/vendor/github.com/jaegertracing/jaeger/storage/spanstore/downsampling_writer.go @@ -1,16 +1,5 @@ // Copyright (c) 2019 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package spanstore @@ -100,7 +89,7 @@ func NewSampler(ratio float64, hashSalt string) *Sampler { } hashSaltBytes := []byte(hashSalt) pool := &sync.Pool{ - New: func() interface{} { + New: func() any { buffer := make([]byte, len(hashSaltBytes)+traceIDByteSize) copy(buffer, hashSaltBytes) return &hasher{ diff --git a/vendor/github.com/jaegertracing/jaeger/storage/spanstore/interface.go b/vendor/github.com/jaegertracing/jaeger/storage/spanstore/interface.go index 534cf8a2280..3c4dd0dd4e7 100644 --- a/vendor/github.com/jaegertracing/jaeger/storage/spanstore/interface.go +++ b/vendor/github.com/jaegertracing/jaeger/storage/spanstore/interface.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package spanstore @@ -36,7 +25,7 @@ type Reader interface { // GetTrace retrieves the trace with a given id. // // If no spans are stored for this trace, it returns ErrTraceNotFound. - GetTrace(ctx context.Context, traceID model.TraceID) (*model.Trace, error) + GetTrace(ctx context.Context, query GetTraceParameters) (*model.Trace, error) // GetServices returns all service names known to the backend from spans // within its retention period. @@ -61,6 +50,13 @@ type Reader interface { FindTraceIDs(ctx context.Context, query *TraceQueryParameters) ([]model.TraceID, error) } +// GetTraceParameters contains parameters of a trace get. +type GetTraceParameters struct { + TraceID model.TraceID + StartTime time.Time // optional + EndTime time.Time // optional +} + // TraceQueryParameters contains parameters of a trace query. type TraceQueryParameters struct { ServiceName string diff --git a/vendor/github.com/jaegertracing/jaeger/thrift-gen/baggage/GoUnusedProtection__.go b/vendor/github.com/jaegertracing/jaeger/thrift-gen/baggage/GoUnusedProtection__.go deleted file mode 100644 index 2a12aadf771..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/thrift-gen/baggage/GoUnusedProtection__.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by Thrift Compiler (0.19.0). DO NOT EDIT. - -package baggage - -var GoUnusedProtection__ int; - diff --git a/vendor/github.com/jaegertracing/jaeger/thrift-gen/baggage/baggage-consts.go b/vendor/github.com/jaegertracing/jaeger/thrift-gen/baggage/baggage-consts.go deleted file mode 100644 index ddc1c2702d2..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/thrift-gen/baggage/baggage-consts.go +++ /dev/null @@ -1,30 +0,0 @@ -// Code generated by Thrift Compiler (0.19.0). DO NOT EDIT. - -package baggage - -import ( - "bytes" - "context" - "errors" - "fmt" - "time" - thrift "github.com/apache/thrift/lib/go/thrift" - "strings" - "regexp" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = errors.New -var _ = context.Background -var _ = time.Now -var _ = bytes.Equal -// (needed by validator.) -var _ = strings.Contains -var _ = regexp.MatchString - - -func init() { -} - diff --git a/vendor/github.com/jaegertracing/jaeger/thrift-gen/baggage/baggage.go b/vendor/github.com/jaegertracing/jaeger/thrift-gen/baggage/baggage.go deleted file mode 100644 index 3d5db071478..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/thrift-gen/baggage/baggage.go +++ /dev/null @@ -1,591 +0,0 @@ -// Code generated by Thrift Compiler (0.19.0). DO NOT EDIT. - -package baggage - -import ( - "bytes" - "context" - "errors" - "fmt" - "time" - thrift "github.com/apache/thrift/lib/go/thrift" - "strings" - "regexp" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = errors.New -var _ = context.Background -var _ = time.Now -var _ = bytes.Equal -// (needed by validator.) -var _ = strings.Contains -var _ = regexp.MatchString - -// Attributes: -// - BaggageKey -// - MaxValueLength -type BaggageRestriction struct { - BaggageKey string `thrift:"baggageKey,1,required" db:"baggageKey" json:"baggageKey"` - MaxValueLength int32 `thrift:"maxValueLength,2,required" db:"maxValueLength" json:"maxValueLength"` -} - -func NewBaggageRestriction() *BaggageRestriction { - return &BaggageRestriction{} -} - - -func (p *BaggageRestriction) GetBaggageKey() string { - return p.BaggageKey -} - -func (p *BaggageRestriction) GetMaxValueLength() int32 { - return p.MaxValueLength -} -func (p *BaggageRestriction) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetBaggageKey bool = false; - var issetMaxValueLength bool = false; - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetBaggageKey = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.I32 { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - issetMaxValueLength = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetBaggageKey{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field BaggageKey is not set")); - } - if !issetMaxValueLength{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field MaxValueLength is not set")); - } - return nil -} - -func (p *BaggageRestriction) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - p.BaggageKey = v -} - return nil -} - -func (p *BaggageRestriction) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 2: ", err) -} else { - p.MaxValueLength = v -} - return nil -} - -func (p *BaggageRestriction) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "BaggageRestriction"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField2(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *BaggageRestriction) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "baggageKey", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:baggageKey: ", p), err) } - if err := oprot.WriteString(ctx, string(p.BaggageKey)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.baggageKey (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:baggageKey: ", p), err) } - return err -} - -func (p *BaggageRestriction) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "maxValueLength", thrift.I32, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:maxValueLength: ", p), err) } - if err := oprot.WriteI32(ctx, int32(p.MaxValueLength)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.maxValueLength (2) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:maxValueLength: ", p), err) } - return err -} - -func (p *BaggageRestriction) Equals(other *BaggageRestriction) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.BaggageKey != other.BaggageKey { return false } - if p.MaxValueLength != other.MaxValueLength { return false } - return true -} - -func (p *BaggageRestriction) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("BaggageRestriction(%+v)", *p) -} - -func (p *BaggageRestriction) Validate() error { - return nil -} -type BaggageRestrictionManager interface { - // getBaggageRestrictions retrieves the baggage restrictions for a specific service. - // Usually, baggageRestrictions apply to all services however there may be situations - // where a baggageKey might only be allowed to be set by a specific service. - // - // Parameters: - // - ServiceName - GetBaggageRestrictions(ctx context.Context, serviceName string) (_r []*BaggageRestriction, _err error) -} - -type BaggageRestrictionManagerClient struct { - c thrift.TClient - meta thrift.ResponseMeta -} - -func NewBaggageRestrictionManagerClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *BaggageRestrictionManagerClient { - return &BaggageRestrictionManagerClient{ - c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), - } -} - -func NewBaggageRestrictionManagerClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *BaggageRestrictionManagerClient { - return &BaggageRestrictionManagerClient{ - c: thrift.NewTStandardClient(iprot, oprot), - } -} - -func NewBaggageRestrictionManagerClient(c thrift.TClient) *BaggageRestrictionManagerClient { - return &BaggageRestrictionManagerClient{ - c: c, - } -} - -func (p *BaggageRestrictionManagerClient) Client_() thrift.TClient { - return p.c -} - -func (p *BaggageRestrictionManagerClient) LastResponseMeta_() thrift.ResponseMeta { - return p.meta -} - -func (p *BaggageRestrictionManagerClient) SetLastResponseMeta_(meta thrift.ResponseMeta) { - p.meta = meta -} - -// getBaggageRestrictions retrieves the baggage restrictions for a specific service. -// Usually, baggageRestrictions apply to all services however there may be situations -// where a baggageKey might only be allowed to be set by a specific service. -// -// Parameters: -// - ServiceName -func (p *BaggageRestrictionManagerClient) GetBaggageRestrictions(ctx context.Context, serviceName string) (_r []*BaggageRestriction, _err error) { - var _args0 BaggageRestrictionManagerGetBaggageRestrictionsArgs - _args0.ServiceName = serviceName - var _result2 BaggageRestrictionManagerGetBaggageRestrictionsResult - var _meta1 thrift.ResponseMeta - _meta1, _err = p.Client_().Call(ctx, "getBaggageRestrictions", &_args0, &_result2) - p.SetLastResponseMeta_(_meta1) - if _err != nil { - return - } - return _result2.GetSuccess(), nil -} - -type BaggageRestrictionManagerProcessor struct { - processorMap map[string]thrift.TProcessorFunction - handler BaggageRestrictionManager -} - -func (p *BaggageRestrictionManagerProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { - p.processorMap[key] = processor -} - -func (p *BaggageRestrictionManagerProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { - processor, ok = p.processorMap[key] - return processor, ok -} - -func (p *BaggageRestrictionManagerProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { - return p.processorMap -} - -func NewBaggageRestrictionManagerProcessor(handler BaggageRestrictionManager) *BaggageRestrictionManagerProcessor { - - self3 := &BaggageRestrictionManagerProcessor{handler:handler, processorMap:make(map[string]thrift.TProcessorFunction)} - self3.processorMap["getBaggageRestrictions"] = &baggageRestrictionManagerProcessorGetBaggageRestrictions{handler:handler} -return self3 -} - -func (p *BaggageRestrictionManagerProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - name, _, seqId, err2 := iprot.ReadMessageBegin(ctx) - if err2 != nil { return false, thrift.WrapTException(err2) } - if processor, ok := p.GetProcessorFunction(name); ok { - return processor.Process(ctx, seqId, iprot, oprot) - } - iprot.Skip(ctx, thrift.STRUCT) - iprot.ReadMessageEnd(ctx) - x4 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function " + name) - oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId) - x4.Write(ctx, oprot) - oprot.WriteMessageEnd(ctx) - oprot.Flush(ctx) - return false, x4 - -} - -type baggageRestrictionManagerProcessorGetBaggageRestrictions struct { - handler BaggageRestrictionManager -} - -func (p *baggageRestrictionManagerProcessorGetBaggageRestrictions) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - var _write_err5 error - args := BaggageRestrictionManagerGetBaggageRestrictionsArgs{} - if err2 := args.Read(ctx, iprot); err2 != nil { - iprot.ReadMessageEnd(ctx) - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error()) - oprot.WriteMessageBegin(ctx, "getBaggageRestrictions", thrift.EXCEPTION, seqId) - x.Write(ctx, oprot) - oprot.WriteMessageEnd(ctx) - oprot.Flush(ctx) - return false, thrift.WrapTException(err2) - } - iprot.ReadMessageEnd(ctx) - - tickerCancel := func() {} - // Start a goroutine to do server side connectivity check. - if thrift.ServerConnectivityCheckInterval > 0 { - var cancel context.CancelCauseFunc - ctx, cancel = context.WithCancelCause(ctx) - defer cancel(nil) - var tickerCtx context.Context - tickerCtx, tickerCancel = context.WithCancel(context.Background()) - defer tickerCancel() - go func(ctx context.Context, cancel context.CancelCauseFunc) { - ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval) - defer ticker.Stop() - for { - select { - case <-ctx.Done(): - return - case <-ticker.C: - if !iprot.Transport().IsOpen() { - cancel(thrift.ErrAbandonRequest) - return - } - } - } - }(tickerCtx, cancel) - } - - result := BaggageRestrictionManagerGetBaggageRestrictionsResult{} - if retval, err2 := p.handler.GetBaggageRestrictions(ctx, args.ServiceName); err2 != nil { - tickerCancel() - err = thrift.WrapTException(err2) - if errors.Is(err2, thrift.ErrAbandonRequest) { - return false, thrift.WrapTException(err2) - } - if errors.Is(err2, context.Canceled) { - if err := context.Cause(ctx); errors.Is(err, thrift.ErrAbandonRequest) { - return false, thrift.WrapTException(err) - } - } - _exc6 := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getBaggageRestrictions: " + err2.Error()) - if err2 := oprot.WriteMessageBegin(ctx, "getBaggageRestrictions", thrift.EXCEPTION, seqId); err2 != nil { - _write_err5 = thrift.WrapTException(err2) - } - if err2 := _exc6.Write(ctx, oprot); _write_err5 == nil && err2 != nil { - _write_err5 = thrift.WrapTException(err2) - } - if err2 := oprot.WriteMessageEnd(ctx); _write_err5 == nil && err2 != nil { - _write_err5 = thrift.WrapTException(err2) - } - if err2 := oprot.Flush(ctx); _write_err5 == nil && err2 != nil { - _write_err5 = thrift.WrapTException(err2) - } - if _write_err5 != nil { - return false, thrift.WrapTException(_write_err5) - } - return true, err - } else { - result.Success = retval - } - tickerCancel() - if err2 := oprot.WriteMessageBegin(ctx, "getBaggageRestrictions", thrift.REPLY, seqId); err2 != nil { - _write_err5 = thrift.WrapTException(err2) - } - if err2 := result.Write(ctx, oprot); _write_err5 == nil && err2 != nil { - _write_err5 = thrift.WrapTException(err2) - } - if err2 := oprot.WriteMessageEnd(ctx); _write_err5 == nil && err2 != nil { - _write_err5 = thrift.WrapTException(err2) - } - if err2 := oprot.Flush(ctx); _write_err5 == nil && err2 != nil { - _write_err5 = thrift.WrapTException(err2) - } - if _write_err5 != nil { - return false, thrift.WrapTException(_write_err5) - } - return true, err -} - - -// HELPER FUNCTIONS AND STRUCTURES - -// Attributes: -// - ServiceName -type BaggageRestrictionManagerGetBaggageRestrictionsArgs struct { - ServiceName string `thrift:"serviceName,1" db:"serviceName" json:"serviceName"` -} - -func NewBaggageRestrictionManagerGetBaggageRestrictionsArgs() *BaggageRestrictionManagerGetBaggageRestrictionsArgs { - return &BaggageRestrictionManagerGetBaggageRestrictionsArgs{} -} - - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) GetServiceName() string { - return p.ServiceName -} -func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - p.ServiceName = v -} - return nil -} - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "getBaggageRestrictions_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "serviceName", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err) } - if err := oprot.WriteString(ctx, string(p.ServiceName)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err) } - return err -} - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("BaggageRestrictionManagerGetBaggageRestrictionsArgs(%+v)", *p) -} - -// Attributes: -// - Success -type BaggageRestrictionManagerGetBaggageRestrictionsResult struct { - Success []*BaggageRestriction `thrift:"success,0" db:"success" json:"success,omitempty"` -} - -func NewBaggageRestrictionManagerGetBaggageRestrictionsResult() *BaggageRestrictionManagerGetBaggageRestrictionsResult { - return &BaggageRestrictionManagerGetBaggageRestrictionsResult{} -} - -var BaggageRestrictionManagerGetBaggageRestrictionsResult_Success_DEFAULT []*BaggageRestriction - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) GetSuccess() []*BaggageRestriction { - return p.Success -} -func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 0: - if fieldTypeId == thrift.LIST { - if err := p.ReadField0(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*BaggageRestriction, 0, size) - p.Success = tSlice - for i := 0; i < size; i ++ { - _elem7 := &BaggageRestriction{} - if err := _elem7.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem7), err) - } - p.Success = append(p.Success, _elem7) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "getBaggageRestrictions_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField0(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin(ctx, "success", thrift.LIST, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Success)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Success { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) } - } - return err -} - -func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("BaggageRestrictionManagerGetBaggageRestrictionsResult(%+v)", *p) -} - - diff --git a/vendor/github.com/jedib0t/go-pretty/v6/table/README.md b/vendor/github.com/jedib0t/go-pretty/v6/table/README.md index a446a5eec1a..3375bd3c7d3 100644 --- a/vendor/github.com/jedib0t/go-pretty/v6/table/README.md +++ b/vendor/github.com/jedib0t/go-pretty/v6/table/README.md @@ -7,12 +7,13 @@ Pretty-print tables into ASCII/Unicode strings. - Add Header(s) and Footer(s) (`AppendHeader`/`AppendFooter`) - Add a Separator manually after any Row (`AppendSeparator`) - Auto Index Rows (1, 2, 3 ...) and Columns (A, B, C, ...) (`SetAutoIndex`) - - Auto Merge + - Auto Merge (_not supported in CSV/Markdown/TSV modes_) - Cells in a Row (`RowConfig.AutoMerge`) - - Columns (`ColumnConfig.AutoMerge`) + - Columns (`ColumnConfig.AutoMerge`) (_not supported in HTML mode_) - Limit the length of - Rows (`SetAllowedRowLength`) - Columns (`ColumnConfig.Width*`) + - Auto-size Rows (`Style().Size.WidthMin` and `Style().Size.WidthMax`) - Page results by a specified number of Lines (`SetPageSize`) - Alignment - Horizontal & Vertical - Auto (horizontal) Align (numeric columns aligned Right) @@ -21,6 +22,7 @@ Pretty-print tables into ASCII/Unicode strings. - Mirror output to an `io.Writer` (ex. `os.StdOut`) (`SetOutputMirror`) - Sort by one or more Columns (`SortBy`) - Suppress/hide columns with no content (`SuppressEmptyColumns`) + - Suppress trailing spaces in the last column (`SupressTrailingSpaces`) - Customizable Cell rendering per Column (`ColumnConfig.Transformer*`) - Hide any columns that you don't want displayed (`ColumnConfig.Hidden`) - Reset Headers/Rows/Footers at will to reuse the same Table Writer (`Reset*`) @@ -36,7 +38,7 @@ Pretty-print tables into ASCII/Unicode strings. - CSV - HTML Table (with custom CSS Class) - Markdown Table - + - TSV ``` +---------------------------------------------------------------------+ @@ -57,6 +59,11 @@ A demonstration of all the capabilities can be found here: If you want very specific examples, read ahead. +**Hint**: I've tried to ensure that almost all supported use-cases are covered +by unit-tests and that they print the table rendered. Run +`go test -v github.com/jedib0t/go-pretty/v6/table` to see the test outputs and +help you figure out how to do something. + # Examples All the examples below are going to start with the following block, although @@ -201,7 +208,7 @@ it specifically for each row/column using `RowConfig` or `ColumnConfig`. t := table.NewWriter() t.AppendHeader(table.Row{"Node IP", "Pods", "Namespace", "Container", "RCE", "RCE"}, rowConfigAutoMerge) - t.AppendHeader(table.Row{"", "", "", "", "EXE", "RUN"}) + t.AppendHeader(table.Row{"Node IP", "Pods", "Namespace", "Container", "EXE", "RUN"}) t.AppendRow(table.Row{"1.1.1.1", "Pod 1A", "NS 1A", "C 1", "Y", "Y"}, rowConfigAutoMerge) t.AppendRow(table.Row{"1.1.1.1", "Pod 1A", "NS 1A", "C 2", "Y", "N"}, rowConfigAutoMerge) t.AppendRow(table.Row{"1.1.1.1", "Pod 1A", "NS 1B", "C 3", "N", "N"}, rowConfigAutoMerge) @@ -219,7 +226,6 @@ it specifically for each row/column using `RowConfig` or `ColumnConfig`. {Number: 5, Align: text.AlignCenter, AlignFooter: text.AlignCenter, AlignHeader: text.AlignCenter}, {Number: 6, Align: text.AlignCenter, AlignFooter: text.AlignCenter, AlignHeader: text.AlignCenter}, }) - t.SetOutputMirror(os.Stdout) t.SetStyle(table.StyleLight) t.Style().Options.SeparateRows = true fmt.Println(t.Render()) diff --git a/vendor/github.com/jedib0t/go-pretty/v6/table/config.go b/vendor/github.com/jedib0t/go-pretty/v6/table/config.go index 6eb7ce7d2e2..fb96d708643 100644 --- a/vendor/github.com/jedib0t/go-pretty/v6/table/config.go +++ b/vendor/github.com/jedib0t/go-pretty/v6/table/config.go @@ -71,7 +71,7 @@ type ColumnConfig struct { } func (c ColumnConfig) getWidthMaxEnforcer() WidthEnforcer { - if c.WidthMax == 0 { + if c.WidthMax <= 0 { return widthEnforcerNone } if c.WidthMaxEnforcer != nil { @@ -85,8 +85,19 @@ func (c ColumnConfig) getWidthMaxEnforcer() WidthEnforcer { type RowConfig struct { // AutoMerge merges cells with similar values and prevents separators from // being drawn. Caveats: - // * Align is overridden to text.AlignCenter on the merged cell + // * Align is overridden to text.AlignCenter on the merged cell (unless set + // by AutoMergeAlign value below) // * Does not work in CSV/HTML/Markdown render modes // * Does not work well with vertical auto-merge (ColumnConfig.AutoMerge) AutoMerge bool + + // Alignment to use on a merge (defaults to text.AlignCenter) + AutoMergeAlign text.Align +} + +func (rc RowConfig) getAutoMergeAlign() text.Align { + if rc.AutoMergeAlign == text.AlignDefault { + return text.AlignCenter + } + return rc.AutoMergeAlign } diff --git a/vendor/github.com/jedib0t/go-pretty/v6/table/pager.go b/vendor/github.com/jedib0t/go-pretty/v6/table/pager.go new file mode 100644 index 00000000000..f4585b29721 --- /dev/null +++ b/vendor/github.com/jedib0t/go-pretty/v6/table/pager.go @@ -0,0 +1,70 @@ +package table + +import ( + "io" +) + +// Pager lets you interact with the table rendering in a paged manner. +type Pager interface { + // GoTo moves to the given 1-indexed page number. + GoTo(pageNum int) string + // Location returns the current page number in 1-indexed form. + Location() int + // Next moves to the next available page and returns the same. + Next() string + // Prev moves to the previous available page and returns the same. + Prev() string + // Render returns the current page. + Render() string + // SetOutputMirror sets up the writer to which Render() will write the + // output other than returning. + SetOutputMirror(mirror io.Writer) +} + +type pager struct { + index int // 0-indexed + pages []string + outputMirror io.Writer + size int +} + +func (p *pager) GoTo(pageNum int) string { + if pageNum < 1 { + pageNum = 1 + } + if pageNum > len(p.pages) { + pageNum = len(p.pages) + } + p.index = pageNum - 1 + return p.pages[p.index] +} + +func (p *pager) Location() int { + return p.index + 1 +} + +func (p *pager) Next() string { + if p.index < len(p.pages)-1 { + p.index++ + } + return p.pages[p.index] +} + +func (p *pager) Prev() string { + if p.index > 0 { + p.index-- + } + return p.pages[p.index] +} + +func (p *pager) Render() string { + pageToWrite := p.pages[p.index] + if p.outputMirror != nil { + _, _ = p.outputMirror.Write([]byte(pageToWrite)) + } + return pageToWrite +} + +func (p *pager) SetOutputMirror(mirror io.Writer) { + p.outputMirror = mirror +} diff --git a/vendor/github.com/jedib0t/go-pretty/v6/table/pager_options.go b/vendor/github.com/jedib0t/go-pretty/v6/table/pager_options.go new file mode 100644 index 00000000000..f46ebc12806 --- /dev/null +++ b/vendor/github.com/jedib0t/go-pretty/v6/table/pager_options.go @@ -0,0 +1,11 @@ +package table + +// PagerOption helps control Paging. +type PagerOption func(t *Table) + +// PageSize sets the size of each page rendered. +func PageSize(pageSize int) PagerOption { + return func(t *Table) { + t.pager.size = pageSize + } +} diff --git a/vendor/github.com/jedib0t/go-pretty/v6/table/render.go b/vendor/github.com/jedib0t/go-pretty/v6/table/render.go index 2fa1772ed6c..2fd759e62a7 100644 --- a/vendor/github.com/jedib0t/go-pretty/v6/table/render.go +++ b/vendor/github.com/jedib0t/go-pretty/v6/table/render.go @@ -9,15 +9,16 @@ import ( ) // Render renders the Table in a human-readable "pretty" format. Example: -// ┌─────┬────────────┬───────────┬────────┬─────────────────────────────┐ -// │ # │ FIRST NAME │ LAST NAME │ SALARY │ │ -// ├─────┼────────────┼───────────┼────────┼─────────────────────────────┤ -// │ 1 │ Arya │ Stark │ 3000 │ │ -// │ 20 │ Jon │ Snow │ 2000 │ You know nothing, Jon Snow! │ -// │ 300 │ Tyrion │ Lannister │ 5000 │ │ -// ├─────┼────────────┼───────────┼────────┼─────────────────────────────┤ -// │ │ │ TOTAL │ 10000 │ │ -// └─────┴────────────┴───────────┴────────┴─────────────────────────────┘ +// +// ┌─────┬────────────┬───────────┬────────┬─────────────────────────────┐ +// │ # │ FIRST NAME │ LAST NAME │ SALARY │ │ +// ├─────┼────────────┼───────────┼────────┼─────────────────────────────┤ +// │ 1 │ Arya │ Stark │ 3000 │ │ +// │ 20 │ Jon │ Snow │ 2000 │ You know nothing, Jon Snow! │ +// │ 300 │ Tyrion │ Lannister │ 5000 │ │ +// ├─────┼────────────┼───────────┼────────┼─────────────────────────────┤ +// │ │ │ TOTAL │ 10000 │ │ +// └─────┴────────────┴───────────┴────────┴─────────────────────────────┘ func (t *Table) Render() string { t.initForRender() @@ -50,7 +51,7 @@ func (t *Table) Render() string { } func (t *Table) renderColumn(out *strings.Builder, row rowStr, colIdx int, maxColumnLength int, hint renderHint) int { - numColumnsRenderer := 1 + numColumnsRendered := 1 // when working on the first column, and autoIndex is true, insert a new // column with the row number on it. @@ -78,16 +79,18 @@ func (t *Table) renderColumn(out *strings.Builder, row rowStr, colIdx int, maxCo // if horizontal cell merges are enabled, look ahead and see how many cells // have the same content and merge them all until a cell with a different // content is found; override alignment to Center in this case - if t.getRowConfig(hint).AutoMerge && !hint.isSeparatorRow { - for idx := colIdx + 1; idx < len(row); idx++ { - if row[colIdx] != row[idx] { + rowConfig := t.getRowConfig(hint) + if rowConfig.AutoMerge && !hint.isSeparatorRow { + // get the real row to consider all lines in each column instead of just + // looking at the current "line" + rowUnwrapped := t.getRow(hint.rowNumber-1, hint) + for idx := colIdx + 1; idx < len(rowUnwrapped); idx++ { + if rowUnwrapped[colIdx] != rowUnwrapped[idx] { break } - align = text.AlignCenter - maxColumnLength += t.maxColumnLengths[idx] + - text.RuneCount(t.style.Box.PaddingRight+t.style.Box.PaddingLeft) + - text.RuneCount(t.style.Box.PaddingRight) - numColumnsRenderer++ + align = rowConfig.getAutoMergeAlign() + maxColumnLength += t.getMaxColumnLengthForMerging(idx) + numColumnsRendered++ } } colStr = align.Apply(colStr, maxColumnLength) @@ -99,7 +102,7 @@ func (t *Table) renderColumn(out *strings.Builder, row rowStr, colIdx int, maxCo t.renderColumnColorized(out, colIdx, colStr, hint) - return colIdx + numColumnsRenderer + return colIdx + numColumnsRendered } func (t *Table) renderColumnAutoIndex(out *strings.Builder, hint renderHint) { @@ -180,9 +183,9 @@ func (t *Table) renderLine(out *strings.Builder, row rowStr, hint renderHint) { out.WriteRune('\n') } - // use a brand new strings.Builder if a row length limit has been set + // use a brand-new strings.Builder if a row length limit has been set var outLine *strings.Builder - if t.allowedRowLength > 0 { + if t.style.Size.WidthMax > 0 { outLine = &strings.Builder{} } else { outLine = out @@ -202,54 +205,43 @@ func (t *Table) renderLine(out *strings.Builder, row rowStr, hint renderHint) { // merge the strings.Builder objects if a new one was created earlier if outLine != out { - outLineStr := outLine.String() - if text.RuneCount(outLineStr) > t.allowedRowLength { - trimLength := t.allowedRowLength - utf8.RuneCountInString(t.style.Box.UnfinishedRow) - if trimLength > 0 { - out.WriteString(text.Trim(outLineStr, trimLength)) - out.WriteString(t.style.Box.UnfinishedRow) - } - } else { - out.WriteString(outLineStr) - } + t.renderLineMergeOutputs(out, outLine) } + t.firstRowOfPage = false // if a page size has been set, and said number of lines has already // been rendered, and the header is not being rendered right now, render // the header all over again with a spacing line - if hint.isRegularRow() { + if hint.isRegularNonSeparatorRow() { t.numLinesRendered++ - if t.pageSize > 0 && t.numLinesRendered%t.pageSize == 0 && !hint.isLastLineOfLastRow() { + if t.pager.size > 0 && t.numLinesRendered%t.pager.size == 0 && !hint.isLastLineOfLastRow() { t.renderRowsFooter(out) t.renderRowsBorderBottom(out) out.WriteString(t.style.Box.PageSeparator) t.renderRowsBorderTop(out) t.renderRowsHeader(out) + t.firstRowOfPage = true } } } -func (t *Table) renderMarginLeft(out *strings.Builder, hint renderHint) { - if t.style.Options.DrawBorder { - border := t.style.Box.Left - if hint.isBorderTop { - if t.title != "" { - border = t.style.Box.LeftSeparator - } else { - border = t.style.Box.TopLeft - } - } else if hint.isBorderBottom { - border = t.style.Box.BottomLeft - } else if hint.isSeparatorRow { - if t.autoIndex && hint.isHeaderOrFooterSeparator() { - border = t.style.Box.Left - } else if !t.autoIndex && t.shouldMergeCellsVertically(0, hint) { - border = t.style.Box.Left - } else { - border = t.style.Box.LeftSeparator - } +func (t *Table) renderLineMergeOutputs(out *strings.Builder, outLine *strings.Builder) { + outLineStr := outLine.String() + if text.StringWidthWithoutEscSequences(outLineStr) > t.style.Size.WidthMax { + trimLength := t.style.Size.WidthMax - utf8.RuneCountInString(t.style.Box.UnfinishedRow) + if trimLength > 0 { + out.WriteString(text.Trim(outLineStr, trimLength)) + out.WriteString(t.style.Box.UnfinishedRow) } + } else { + out.WriteString(outLineStr) + } +} +func (t *Table) renderMarginLeft(out *strings.Builder, hint renderHint) { + out.WriteString(t.style.Format.Direction.Modifier()) + if t.style.Options.DrawBorder { + border := t.getBorderLeft(hint) colors := t.getBorderColors(hint) if colors.EscapeSeq() != "" { out.WriteString(colors.Sprint(border)) @@ -261,23 +253,7 @@ func (t *Table) renderMarginLeft(out *strings.Builder, hint renderHint) { func (t *Table) renderMarginRight(out *strings.Builder, hint renderHint) { if t.style.Options.DrawBorder { - border := t.style.Box.Right - if hint.isBorderTop { - if t.title != "" { - border = t.style.Box.RightSeparator - } else { - border = t.style.Box.TopRight - } - } else if hint.isBorderBottom { - border = t.style.Box.BottomRight - } else if hint.isSeparatorRow { - if t.shouldMergeCellsVertically(t.numColumns-1, hint) { - border = t.style.Box.Right - } else { - border = t.style.Box.RightSeparator - } - } - + border := t.getBorderRight(hint) colors := t.getBorderColors(hint) if colors.EscapeSeq() != "" { out.WriteString(colors.Sprint(border)) @@ -292,16 +268,7 @@ func (t *Table) renderRow(out *strings.Builder, row rowStr, hint renderHint) { // fit every column into the allowedColumnLength/maxColumnLength limit // and in the process find the max. number of lines in any column in // this row - colMaxLines := 0 - rowWrapped := make(rowStr, len(row)) - for colIdx, colStr := range row { - widthEnforcer := t.columnConfigMap[colIdx].getWidthMaxEnforcer() - rowWrapped[colIdx] = widthEnforcer(colStr, t.maxColumnLengths[colIdx]) - colNumLines := strings.Count(rowWrapped[colIdx], "\n") + 1 - if colNumLines > colMaxLines { - colMaxLines = colNumLines - } - } + colMaxLines, rowWrapped := t.wrapRow(row) // if there is just 1 line in all columns, add the row as such; else // split each column into individual lines and render them one-by-one @@ -348,8 +315,7 @@ func (t *Table) renderRows(out *strings.Builder, rows []rowStr, hint renderHint) hint.rowNumber = rowIdx + 1 t.renderRow(out, row, hint) - if (t.style.Options.SeparateRows && rowIdx < len(rows)-1) || // last row before footer - (t.separators[rowIdx] && rowIdx != len(rows)-1) { // manually added separator not after last row + if t.shouldSeparateRows(rowIdx, len(rows)) { hint.isFirstRow = false t.renderRowSeparator(out, hint) } @@ -357,11 +323,35 @@ func (t *Table) renderRows(out *strings.Builder, rows []rowStr, hint renderHint) } func (t *Table) renderRowsBorderBottom(out *strings.Builder) { - t.renderRowSeparator(out, renderHint{isBorderBottom: true, isFooterRow: true}) + if len(t.rowsFooter) > 0 { + t.renderRowSeparator(out, renderHint{ + isBorderBottom: true, + isFooterRow: true, + rowNumber: len(t.rowsFooter), + }) + } else { + t.renderRowSeparator(out, renderHint{ + isBorderBottom: true, + isFooterRow: false, + rowNumber: len(t.rows), + }) + } } func (t *Table) renderRowsBorderTop(out *strings.Builder) { - t.renderRowSeparator(out, renderHint{isBorderTop: true, isHeaderRow: true}) + if len(t.rowsHeader) > 0 || t.autoIndex { + t.renderRowSeparator(out, renderHint{ + isBorderTop: true, + isHeaderRow: true, + rowNumber: 0, + }) + } else { + t.renderRowSeparator(out, renderHint{ + isBorderTop: true, + isHeaderRow: false, + rowNumber: 0, + }) + } } func (t *Table) renderRowsFooter(out *strings.Builder) { @@ -377,55 +367,62 @@ func (t *Table) renderRowsFooter(out *strings.Builder) { func (t *Table) renderRowsHeader(out *strings.Builder) { if len(t.rowsHeader) > 0 || t.autoIndex { + hintSeparator := renderHint{isHeaderRow: true, isLastRow: true, isSeparatorRow: true} + if len(t.rowsHeader) > 0 { t.renderRows(out, t.rowsHeader, renderHint{isHeaderRow: true}) + hintSeparator.rowNumber = len(t.rowsHeader) } else if t.autoIndex { t.renderRow(out, t.getAutoIndexColumnIDs(), renderHint{isAutoIndexRow: true, isHeaderRow: true}) + hintSeparator.rowNumber = 1 } - t.renderRowSeparator(out, renderHint{ - isHeaderRow: true, - isLastRow: true, - isSeparatorRow: true, - rowNumber: len(t.rowsHeader), - }) + t.renderRowSeparator(out, hintSeparator) } } func (t *Table) renderTitle(out *strings.Builder) { if t.title != "" { + colors := t.style.Title.Colors + colorsBorder := t.getBorderColors(renderHint{isTitleRow: true}) rowLength := t.maxRowLength - if t.allowedRowLength != 0 && t.allowedRowLength < rowLength { - rowLength = t.allowedRowLength + if wm := t.style.Size.WidthMax; wm > 0 && wm < rowLength { + rowLength = wm + } + if wm := t.style.Size.WidthMin; wm > 0 && wm > rowLength { + rowLength = wm } if t.style.Options.DrawBorder { - lenBorder := rowLength - text.RuneCount(t.style.Box.TopLeft+t.style.Box.TopRight) - out.WriteString(t.style.Box.TopLeft) - out.WriteString(text.RepeatAndTrim(t.style.Box.MiddleHorizontal, lenBorder)) - out.WriteString(t.style.Box.TopRight) + lenBorder := rowLength - text.StringWidthWithoutEscSequences(t.style.Box.TopLeft+t.style.Box.TopRight) + out.WriteString(colorsBorder.Sprint(t.style.Box.TopLeft)) + out.WriteString(colorsBorder.Sprint(text.RepeatAndTrim(t.style.Box.MiddleHorizontal, lenBorder))) + out.WriteString(colorsBorder.Sprint(t.style.Box.TopRight)) } - lenText := rowLength - text.RuneCount(t.style.Box.PaddingLeft+t.style.Box.PaddingRight) + lenText := rowLength - text.StringWidthWithoutEscSequences(t.style.Box.PaddingLeft+t.style.Box.PaddingRight) if t.style.Options.DrawBorder { - lenText -= text.RuneCount(t.style.Box.Left + t.style.Box.Right) + lenText -= text.StringWidthWithoutEscSequences(t.style.Box.Left + t.style.Box.Right) } titleText := text.WrapText(t.title, lenText) for _, titleLine := range strings.Split(titleText, "\n") { - titleLine = strings.TrimSpace(titleLine) - titleLine = t.style.Title.Format.Apply(titleLine) - titleLine = t.style.Title.Align.Apply(titleLine, lenText) - titleLine = t.style.Box.PaddingLeft + titleLine + t.style.Box.PaddingRight - titleLine = t.style.Title.Colors.Sprint(titleLine) - - if out.Len() > 0 { - out.WriteRune('\n') - } - if t.style.Options.DrawBorder { - out.WriteString(t.style.Box.Left) - } - out.WriteString(titleLine) - if t.style.Options.DrawBorder { - out.WriteString(t.style.Box.Right) - } + t.renderTitleLine(out, lenText, titleLine, colors, colorsBorder) } } } + +func (t *Table) renderTitleLine(out *strings.Builder, lenText int, titleLine string, colors text.Colors, colorsBorder text.Colors) { + titleLine = strings.TrimSpace(titleLine) + titleLine = t.style.Title.Format.Apply(titleLine) + titleLine = t.style.Title.Align.Apply(titleLine, lenText) + titleLine = t.style.Box.PaddingLeft + titleLine + t.style.Box.PaddingRight + + if out.Len() > 0 { + out.WriteRune('\n') + } + if t.style.Options.DrawBorder { + out.WriteString(colorsBorder.Sprint(t.style.Box.Left)) + } + out.WriteString(colors.Sprint(titleLine)) + if t.style.Options.DrawBorder { + out.WriteString(colorsBorder.Sprint(t.style.Box.Right)) + } +} diff --git a/vendor/github.com/jedib0t/go-pretty/v6/table/render_csv.go b/vendor/github.com/jedib0t/go-pretty/v6/table/render_csv.go index 07da673e889..831194ef330 100644 --- a/vendor/github.com/jedib0t/go-pretty/v6/table/render_csv.go +++ b/vendor/github.com/jedib0t/go-pretty/v6/table/render_csv.go @@ -7,11 +7,12 @@ import ( ) // RenderCSV renders the Table in CSV format. Example: -// #,First Name,Last Name,Salary, -// 1,Arya,Stark,3000, -// 20,Jon,Snow,2000,"You know nothing\, Jon Snow!" -// 300,Tyrion,Lannister,5000, -// ,,Total,10000, +// +// #,First Name,Last Name,Salary, +// 1,Arya,Stark,3000, +// 20,Jon,Snow,2000,"You know nothing\, Jon Snow!" +// 300,Tyrion,Lannister,5000, +// ,,Total,10000, func (t *Table) RenderCSV() string { t.initForRender() diff --git a/vendor/github.com/jedib0t/go-pretty/v6/table/render_hint.go b/vendor/github.com/jedib0t/go-pretty/v6/table/render_hint.go new file mode 100644 index 00000000000..e46cdc845b0 --- /dev/null +++ b/vendor/github.com/jedib0t/go-pretty/v6/table/render_hint.go @@ -0,0 +1,39 @@ +package table + +// renderHint has hints for the Render*() logic +type renderHint struct { + isAutoIndexColumn bool // auto-index column? + isAutoIndexRow bool // auto-index row? + isBorderBottom bool // bottom-border? + isBorderTop bool // top-border? + isFirstRow bool // first-row of header/footer/regular-rows? + isFooterRow bool // footer row? + isHeaderRow bool // header row? + isLastLineOfRow bool // last-line of the current row? + isLastRow bool // last-row of header/footer/regular-rows? + isSeparatorRow bool // separator row? + isTitleRow bool // title row? + rowLineNumber int // the line number for a multi-line row + rowNumber int // the row number/index +} + +func (h *renderHint) isBorderOrSeparator() bool { + return h.isBorderTop || h.isSeparatorRow || h.isBorderBottom +} + +func (h *renderHint) isRegularRow() bool { + return !h.isHeaderRow && !h.isFooterRow +} + +func (h *renderHint) isRegularNonSeparatorRow() bool { + return !h.isHeaderRow && !h.isFooterRow && !h.isSeparatorRow +} + +func (h *renderHint) isHeaderOrFooterSeparator() bool { + return h.isSeparatorRow && !h.isBorderBottom && !h.isBorderTop && + ((h.isHeaderRow && !h.isLastRow) || (h.isFooterRow && (!h.isFirstRow || h.rowNumber > 0))) +} + +func (h *renderHint) isLastLineOfLastRow() bool { + return h.isLastLineOfRow && h.isLastRow +} diff --git a/vendor/github.com/jedib0t/go-pretty/v6/table/render_html.go b/vendor/github.com/jedib0t/go-pretty/v6/table/render_html.go index 3f550dd430b..decf4fa0dbb 100644 --- a/vendor/github.com/jedib0t/go-pretty/v6/table/render_html.go +++ b/vendor/github.com/jedib0t/go-pretty/v6/table/render_html.go @@ -4,6 +4,8 @@ import ( "fmt" "html" "strings" + + "github.com/jedib0t/go-pretty/v6/text" ) const ( @@ -13,49 +15,50 @@ const ( ) // RenderHTML renders the Table in HTML format. Example: -//
-// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// -//
#First NameLast NameSalary 
1AryaStark3000 
20JonSnow2000You know nothing, Jon Snow!
300TyrionLannister5000 
  Total10000 
+// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +// +//
#First NameLast NameSalary 
1AryaStark3000 
20JonSnow2000You know nothing, Jon Snow!
300TyrionLannister5000 
  Total10000 
func (t *Table) RenderHTML() string { t.initForRender() @@ -78,6 +81,22 @@ func (t *Table) RenderHTML() string { return t.render(&out) } +func (t *Table) htmlGetColStrAndTag(row rowStr, colIdx int, hint renderHint) (string, string) { + // get the column contents + var colStr string + if colIdx < len(row) { + colStr = row[colIdx] + } + + // header uses "th" instead of "td" + colTagName := "td" + if hint.isHeaderRow { + colTagName = "th" + } + + return colStr, colTagName +} + func (t *Table) htmlRenderCaption(out *strings.Builder) { if t.caption != "" { out.WriteString(" ") @@ -86,9 +105,19 @@ func (t *Table) htmlRenderCaption(out *strings.Builder) { } } -func (t *Table) htmlRenderColumnAttributes(out *strings.Builder, row rowStr, colIdx int, hint renderHint) { +func (t *Table) htmlRenderColumn(out *strings.Builder, colStr string) { + if t.style.HTML.EscapeText { + colStr = html.EscapeString(colStr) + } + if t.style.HTML.Newline != "\n" { + colStr = strings.Replace(colStr, "\n", t.style.HTML.Newline, -1) + } + out.WriteString(colStr) +} + +func (t *Table) htmlRenderColumnAttributes(out *strings.Builder, colIdx int, hint renderHint, alignOverride text.Align) { // determine the HTML "align"/"valign" property values - align := t.getAlign(colIdx, hint).HTMLProperty() + align := alignOverride.HTMLProperty() vAlign := t.getVAlign(colIdx, hint).HTMLProperty() // determine the HTML "class" property values for the colors class := t.getColumnColors(colIdx, hint).HTMLProperty() @@ -131,37 +160,41 @@ func (t *Table) htmlRenderRow(out *strings.Builder, row rowStr, hint renderHint) t.htmlRenderColumnAutoIndex(out, hint) } - // get the column contents - var colStr string - if colIdx < len(row) { - colStr = row[colIdx] - } - - // header uses "th" instead of "td" - colTagName := "td" - if hint.isHeaderRow { - colTagName = "th" + align := t.getAlign(colIdx, hint) + rowConfig := t.getRowConfig(hint) + extraColumnsRendered := 0 + if rowConfig.AutoMerge && !hint.isSeparatorRow { + // get the real row to consider all lines in each column instead of just + // looking at the current "line" + rowUnwrapped := t.getRow(hint.rowNumber-1, hint) + for idx := colIdx + 1; idx < len(rowUnwrapped); idx++ { + if rowUnwrapped[colIdx] != rowUnwrapped[idx] { + break + } + align = rowConfig.getAutoMergeAlign() + extraColumnsRendered++ + } } + colStr, colTagName := t.htmlGetColStrAndTag(row, colIdx, hint) // write the row out.WriteString(" <") out.WriteString(colTagName) - t.htmlRenderColumnAttributes(out, row, colIdx, hint) + t.htmlRenderColumnAttributes(out, colIdx, hint, align) + if extraColumnsRendered > 0 { + out.WriteString(" colspan=") + out.WriteString(fmt.Sprint(extraColumnsRendered + 1)) + } out.WriteString(">") if len(colStr) == 0 { out.WriteString(t.style.HTML.EmptyColumn) } else { - if t.style.HTML.EscapeText { - colStr = html.EscapeString(colStr) - } - if t.style.HTML.Newline != "\n" { - colStr = strings.Replace(colStr, "\n", t.style.HTML.Newline, -1) - } - out.WriteString(colStr) + t.htmlRenderColumn(out, colStr) } out.WriteString("\n") + colIdx += extraColumnsRendered } out.WriteString(" \n") } diff --git a/vendor/github.com/jedib0t/go-pretty/v6/table/render_init.go b/vendor/github.com/jedib0t/go-pretty/v6/table/render_init.go new file mode 100644 index 00000000000..8a7a0f4527b --- /dev/null +++ b/vendor/github.com/jedib0t/go-pretty/v6/table/render_init.go @@ -0,0 +1,349 @@ +package table + +import ( + "fmt" + "strings" + "unicode" + + "github.com/jedib0t/go-pretty/v6/text" +) + +func (t *Table) analyzeAndStringify(row Row, hint renderHint) rowStr { + // update t.numColumns if this row is the longest seen till now + if len(row) > t.numColumns { + // init the slice for the first time; and pad it the rest of the time + if t.numColumns == 0 { + t.columnIsNonNumeric = make([]bool, len(row)) + } else { + t.columnIsNonNumeric = append(t.columnIsNonNumeric, make([]bool, len(row)-t.numColumns)...) + } + // update t.numColumns + t.numColumns = len(row) + } + + // convert each column to string and figure out if it has non-numeric data + rowOut := make(rowStr, len(row)) + for colIdx, col := range row { + // if the column is not a number, keep track of it + if !hint.isHeaderRow && !hint.isFooterRow && !t.columnIsNonNumeric[colIdx] && !isNumber(col) { + t.columnIsNonNumeric[colIdx] = true + } + + rowOut[colIdx] = t.analyzeAndStringifyColumn(colIdx, col, hint) + } + return rowOut +} + +func (t *Table) analyzeAndStringifyColumn(colIdx int, col interface{}, hint renderHint) string { + // convert to a string and store it in the row + var colStr string + if transformer := t.getColumnTransformer(colIdx, hint); transformer != nil { + colStr = transformer(col) + } else if colStrVal, ok := col.(string); ok { + colStr = colStrVal + } else { + colStr = fmt.Sprint(col) + } + colStr = strings.ReplaceAll(colStr, "\t", " ") + colStr = text.ProcessCRLF(colStr) + return fmt.Sprintf("%s%s", t.style.Format.Direction.Modifier(), colStr) +} + +func (t *Table) extractMaxColumnLengths(rows []rowStr, hint renderHint) { + for rowIdx, row := range rows { + hint.rowNumber = rowIdx + 1 + t.extractMaxColumnLengthsFromRow(row, t.getMergedColumnIndices(row, hint)) + } +} + +func (t *Table) extractMaxColumnLengthsFromRow(row rowStr, mci mergedColumnIndices) { + for colIdx, colStr := range row { + longestLineLen := text.LongestLineLen(colStr) + maxColWidth := t.getColumnWidthMax(colIdx) + if maxColWidth > 0 && maxColWidth < longestLineLen { + longestLineLen = maxColWidth + } + mergedColumnsLength := mci.mergedLength(colIdx, t.maxColumnLengths) + if longestLineLen > mergedColumnsLength { + if mergedColumnsLength > 0 { + t.extractMaxColumnLengthsFromRowForMergedColumns(colIdx, longestLineLen, mci) + } else { + t.maxColumnLengths[colIdx] = longestLineLen + } + } else if maxColWidth == 0 && longestLineLen > t.maxColumnLengths[colIdx] { + t.maxColumnLengths[colIdx] = longestLineLen + } + } +} + +func (t *Table) extractMaxColumnLengthsFromRowForMergedColumns(colIdx int, mergedColumnLength int, mci mergedColumnIndices) { + numMergedColumns := mci.len(colIdx) + mergedColumnLength -= (numMergedColumns - 1) * text.StringWidthWithoutEscSequences(t.style.Box.MiddleSeparator) + maxLengthSplitAcrossColumns := mergedColumnLength / numMergedColumns + if maxLengthSplitAcrossColumns > t.maxColumnLengths[colIdx] { + t.maxColumnLengths[colIdx] = maxLengthSplitAcrossColumns + } + for otherColIdx := range mci[colIdx] { + if maxLengthSplitAcrossColumns > t.maxColumnLengths[otherColIdx] { + t.maxColumnLengths[otherColIdx] = maxLengthSplitAcrossColumns + } + } +} + +func (t *Table) initForRender() { + // pick a default style if none was set until now + t.Style() + + // reset rendering state + t.reset() + + // initialize the column configs and normalize them + t.initForRenderColumnConfigs() + + // initialize and stringify all the raw rows + t.initForRenderRows() + + // find the longest continuous line in each column + t.initForRenderColumnLengths() + t.initForRenderMaxRowLength() + t.initForRenderPaddedColumns() + + // generate a separator row and calculate maximum row length + t.initForRenderRowSeparator() + + // reset the counter for the number of lines rendered + t.numLinesRendered = 0 +} + +func (t *Table) initForRenderColumnConfigs() { + t.columnConfigMap = map[int]ColumnConfig{} + for _, colCfg := range t.columnConfigs { + // find the column number if none provided; this logic can work only if + // a header row is present and has a column with the given name + if colCfg.Number == 0 { + for _, row := range t.rowsHeaderRaw { + colCfg.Number = row.findColumnNumber(colCfg.Name) + if colCfg.Number > 0 { + break + } + } + } + if colCfg.Number > 0 { + t.columnConfigMap[colCfg.Number-1] = colCfg + } + } +} + +func (t *Table) initForRenderColumnLengths() { + t.maxColumnLengths = make([]int, t.numColumns) + t.extractMaxColumnLengths(t.rowsHeader, renderHint{isHeaderRow: true}) + t.extractMaxColumnLengths(t.rows, renderHint{}) + t.extractMaxColumnLengths(t.rowsFooter, renderHint{isFooterRow: true}) + + // increase the column lengths if any are under the limits + for colIdx := range t.maxColumnLengths { + minWidth := t.getColumnWidthMin(colIdx) + if minWidth > 0 && t.maxColumnLengths[colIdx] < minWidth { + t.maxColumnLengths[colIdx] = minWidth + } + } +} + +func (t *Table) initForRenderHideColumns() { + if !t.hasHiddenColumns() { + return + } + colIdxMap := t.hideColumns() + + // re-create columnIsNonNumeric with new column indices + columnIsNonNumeric := make([]bool, t.numColumns) + for oldColIdx, nonNumeric := range t.columnIsNonNumeric { + if newColIdx, ok := colIdxMap[oldColIdx]; ok { + columnIsNonNumeric[newColIdx] = nonNumeric + } + } + t.columnIsNonNumeric = columnIsNonNumeric + + // re-create columnConfigMap with new column indices + columnConfigMap := make(map[int]ColumnConfig) + for oldColIdx, cc := range t.columnConfigMap { + if newColIdx, ok := colIdxMap[oldColIdx]; ok { + columnConfigMap[newColIdx] = cc + } + } + t.columnConfigMap = columnConfigMap +} + +func (t *Table) initForRenderMaxRowLength() { + t.maxRowLength = 0 + if t.autoIndex { + t.maxRowLength += text.StringWidthWithoutEscSequences(t.style.Box.PaddingLeft) + t.maxRowLength += len(fmt.Sprint(len(t.rows))) + t.maxRowLength += text.StringWidthWithoutEscSequences(t.style.Box.PaddingRight) + if t.style.Options.SeparateColumns { + t.maxRowLength += text.StringWidthWithoutEscSequences(t.style.Box.MiddleSeparator) + } + } + if t.style.Options.SeparateColumns { + t.maxRowLength += text.StringWidthWithoutEscSequences(t.style.Box.MiddleSeparator) * (t.numColumns - 1) + } + for _, maxColumnLength := range t.maxColumnLengths { + maxColumnLength += text.StringWidthWithoutEscSequences(t.style.Box.PaddingLeft + t.style.Box.PaddingRight) + t.maxRowLength += maxColumnLength + } + if t.style.Options.DrawBorder { + t.maxRowLength += text.StringWidthWithoutEscSequences(t.style.Box.Left + t.style.Box.Right) + } +} + +func (t *Table) initForRenderPaddedColumns() { + paddingSize := t.style.Size.WidthMin - t.maxRowLength + for paddingSize > 0 { + // distribute padding equally among all columns + numColumnsPadded := 0 + for colIdx := 0; paddingSize > 0 && colIdx < t.numColumns; colIdx++ { + colWidthMax := t.getColumnWidthMax(colIdx) + if colWidthMax == 0 || t.maxColumnLengths[colIdx] < colWidthMax { + t.maxColumnLengths[colIdx]++ + numColumnsPadded++ + paddingSize-- + } + } + + // avoid endless looping because all columns are at max size and cannot + // be expanded any further + if numColumnsPadded == 0 { + break + } + } +} + +func (t *Table) initForRenderRows() { + // auto-index: calc the index column's max length + t.autoIndexVIndexMaxLength = len(fmt.Sprint(len(t.rowsRaw))) + + // stringify all the rows to make it easy to render + t.rows = t.initForRenderRowsStringify(t.rowsRaw, renderHint{}) + t.rowsFooter = t.initForRenderRowsStringify(t.rowsFooterRaw, renderHint{isFooterRow: true}) + t.rowsHeader = t.initForRenderRowsStringify(t.rowsHeaderRaw, renderHint{isHeaderRow: true}) + + // sort the rows as requested + t.initForRenderSortRows() + + // find the row colors (if any) + t.initForRenderRowPainterColors() + + // suppress columns without any content + t.initForRenderSuppressColumns() + + // strip out hidden columns + t.initForRenderHideColumns() +} + +func (t *Table) initForRenderRowsStringify(rows []Row, hint renderHint) []rowStr { + rowsStr := make([]rowStr, len(rows)) + for idx, row := range rows { + hint.rowNumber = idx + 1 + rowsStr[idx] = t.analyzeAndStringify(row, hint) + } + return rowsStr +} + +func (t *Table) initForRenderRowPainterColors() { + if !t.hasRowPainter() { + return + } + + // generate the colors + t.rowsColors = make([]text.Colors, len(t.rowsRaw)) + for idx, row := range t.rowsRaw { + idxColors := idx + if len(t.sortedRowIndices) > 0 { + // override with the sorted row index + for j := 0; j < len(t.sortedRowIndices); j++ { + if t.sortedRowIndices[j] == idx { + idxColors = j + break + } + } + } + + if t.rowPainter != nil { + t.rowsColors[idxColors] = t.rowPainter(row) + } else if t.rowPainterWithAttributes != nil { + t.rowsColors[idxColors] = t.rowPainterWithAttributes(row, RowAttributes{ + Number: idx + 1, + NumberSorted: idxColors + 1, + }) + } + } +} + +func (t *Table) initForRenderRowSeparator() { + t.rowSeparator = make(rowStr, t.numColumns) + for colIdx, maxColumnLength := range t.maxColumnLengths { + maxColumnLength += text.StringWidthWithoutEscSequences(t.style.Box.PaddingLeft + t.style.Box.PaddingRight) + t.rowSeparator[colIdx] = text.RepeatAndTrim(t.style.Box.MiddleHorizontal, maxColumnLength) + } +} + +func (t *Table) initForRenderSortRows() { + if len(t.sortBy) == 0 { + return + } + + // sort the rows + t.sortedRowIndices = t.getSortedRowIndices() + sortedRows := make([]rowStr, len(t.rows)) + for idx := range t.rows { + sortedRows[idx] = t.rows[t.sortedRowIndices[idx]] + } + t.rows = sortedRows +} + +func (t *Table) initForRenderSuppressColumns() { + shouldSuppressColumn := func(colIdx int) bool { + for _, row := range t.rows { + if colIdx < len(row) && row[colIdx] != "" { + // Columns may contain non-printable characters. For example + // the text.Direction modifiers. These should not be considered + // when deciding to suppress a column. + for _, r := range row[colIdx] { + if unicode.IsPrint(r) { + return false + } + } + return true + } + } + return true + } + + if t.suppressEmptyColumns { + for colIdx := 0; colIdx < t.numColumns; colIdx++ { + if shouldSuppressColumn(colIdx) { + cc := t.columnConfigMap[colIdx] + cc.Hidden = true + t.columnConfigMap[colIdx] = cc + } + } + } +} + +// reset initializes all the variables used to maintain rendering information +// that are written to in this file +func (t *Table) reset() { + t.autoIndexVIndexMaxLength = 0 + t.columnConfigMap = nil + t.columnIsNonNumeric = nil + t.firstRowOfPage = true + t.maxColumnLengths = nil + t.maxRowLength = 0 + t.numColumns = 0 + t.numLinesRendered = 0 + t.rowSeparator = nil + t.rows = nil + t.rowsColors = nil + t.rowsFooter = nil + t.rowsHeader = nil +} diff --git a/vendor/github.com/jedib0t/go-pretty/v6/table/render_markdown.go b/vendor/github.com/jedib0t/go-pretty/v6/table/render_markdown.go index 8ca852d8f6e..adf573fc913 100644 --- a/vendor/github.com/jedib0t/go-pretty/v6/table/render_markdown.go +++ b/vendor/github.com/jedib0t/go-pretty/v6/table/render_markdown.go @@ -6,12 +6,13 @@ import ( ) // RenderMarkdown renders the Table in Markdown format. Example: -// | # | First Name | Last Name | Salary | | -// | ---:| --- | --- | ---:| --- | -// | 1 | Arya | Stark | 3000 | | -// | 20 | Jon | Snow | 2000 | You know nothing, Jon Snow! | -// | 300 | Tyrion | Lannister | 5000 | | -// | | | Total | 10000 | | +// +// | # | First Name | Last Name | Salary | | +// | ---:| --- | --- | ---:| --- | +// | 1 | Arya | Stark | 3000 | | +// | 20 | Jon | Snow | 2000 | You know nothing, Jon Snow! | +// | 300 | Tyrion | Lannister | 5000 | | +// | | | Total | 10000 | | func (t *Table) RenderMarkdown() string { t.initForRender() @@ -44,16 +45,7 @@ func (t *Table) markdownRenderRow(out *strings.Builder, row rowStr, hint renderH // render each column up to the max. columns seen in all the rows out.WriteRune('|') for colIdx := 0; colIdx < t.numColumns; colIdx++ { - // auto-index column - if colIdx == 0 && t.autoIndex { - out.WriteRune(' ') - if hint.isSeparatorRow { - out.WriteString("---:") - } else if hint.isRegularRow() { - out.WriteString(fmt.Sprintf("%d ", hint.rowNumber)) - } - out.WriteRune('|') - } + t.markdownRenderRowAutoIndex(out, colIdx, hint) if hint.isSeparatorRow { out.WriteString(t.getAlign(colIdx, hint).MarkdownProperty()) @@ -63,12 +55,8 @@ func (t *Table) markdownRenderRow(out *strings.Builder, row rowStr, hint renderH colStr = row[colIdx] } out.WriteRune(' ') - if strings.Contains(colStr, "|") { - colStr = strings.Replace(colStr, "|", "\\|", -1) - } - if strings.Contains(colStr, "\n") { - colStr = strings.Replace(colStr, "\n", "
", -1) - } + colStr = strings.ReplaceAll(colStr, "|", "\\|") + colStr = strings.ReplaceAll(colStr, "\n", "
") out.WriteString(colStr) out.WriteRune(' ') } @@ -76,6 +64,18 @@ func (t *Table) markdownRenderRow(out *strings.Builder, row rowStr, hint renderH } } +func (t *Table) markdownRenderRowAutoIndex(out *strings.Builder, colIdx int, hint renderHint) { + if colIdx == 0 && t.autoIndex { + out.WriteRune(' ') + if hint.isSeparatorRow { + out.WriteString("---:") + } else if hint.isRegularRow() { + out.WriteString(fmt.Sprintf("%d ", hint.rowNumber)) + } + out.WriteRune('|') + } +} + func (t *Table) markdownRenderRows(out *strings.Builder, rows []rowStr, hint renderHint) { if len(rows) > 0 { for idx, row := range rows { @@ -91,7 +91,6 @@ func (t *Table) markdownRenderRows(out *strings.Builder, rows []rowStr, hint ren func (t *Table) markdownRenderRowsFooter(out *strings.Builder) { t.markdownRenderRows(out, t.rowsFooter, renderHint{isFooterRow: true}) - } func (t *Table) markdownRenderRowsHeader(out *strings.Builder) { diff --git a/vendor/github.com/jedib0t/go-pretty/v6/table/render_tsv.go b/vendor/github.com/jedib0t/go-pretty/v6/table/render_tsv.go new file mode 100644 index 00000000000..bb7397506fa --- /dev/null +++ b/vendor/github.com/jedib0t/go-pretty/v6/table/render_tsv.go @@ -0,0 +1,73 @@ +package table + +import ( + "fmt" + "strings" +) + +func (t *Table) RenderTSV() string { + t.initForRender() + + var out strings.Builder + + if t.numColumns > 0 { + if t.title != "" { + out.WriteString(t.title) + } + + if t.autoIndex && len(t.rowsHeader) == 0 { + t.tsvRenderRow(&out, t.getAutoIndexColumnIDs(), renderHint{isAutoIndexRow: true, isHeaderRow: true}) + } + + t.tsvRenderRows(&out, t.rowsHeader, renderHint{isHeaderRow: true}) + t.tsvRenderRows(&out, t.rows, renderHint{}) + t.tsvRenderRows(&out, t.rowsFooter, renderHint{isFooterRow: true}) + + if t.caption != "" { + out.WriteRune('\n') + out.WriteString(t.caption) + } + } + + return t.render(&out) +} + +func (t *Table) tsvRenderRow(out *strings.Builder, row rowStr, hint renderHint) { + if out.Len() > 0 { + out.WriteRune('\n') + } + + for idx, col := range row { + if idx == 0 && t.autoIndex { + if hint.isRegularRow() { + out.WriteString(fmt.Sprint(hint.rowNumber)) + } + out.WriteRune('\t') + } + + if idx > 0 { + out.WriteRune('\t') + } + + if strings.ContainsAny(col, "\t\n\"") || strings.Contains(col, " ") { + out.WriteString(fmt.Sprintf("\"%s\"", t.tsvFixDoubleQuotes(col))) + } else { + out.WriteString(col) + } + } + + for colIdx := len(row); colIdx < t.numColumns; colIdx++ { + out.WriteRune('\t') + } +} + +func (t *Table) tsvFixDoubleQuotes(str string) string { + return strings.Replace(str, "\"", "\"\"", -1) +} + +func (t *Table) tsvRenderRows(out *strings.Builder, rows []rowStr, hint renderHint) { + for idx, row := range rows { + hint.rowNumber = idx + 1 + t.tsvRenderRow(out, row, hint) + } +} diff --git a/vendor/github.com/jedib0t/go-pretty/v6/table/row.go b/vendor/github.com/jedib0t/go-pretty/v6/table/row.go new file mode 100644 index 00000000000..3c165fcab25 --- /dev/null +++ b/vendor/github.com/jedib0t/go-pretty/v6/table/row.go @@ -0,0 +1,43 @@ +package table + +import ( + "fmt" + + "github.com/jedib0t/go-pretty/v6/text" +) + +// Row defines a single row in the Table. +type Row []interface{} + +func (r Row) findColumnNumber(colName string) int { + for colIdx, col := range r { + if fmt.Sprint(col) == colName { + return colIdx + 1 + } + } + return 0 +} + +// RowAttributes contains properties about the Row during the render. +type RowAttributes struct { + Number int // Row Number (1-indexed) as appended + NumberSorted int // Row number (1-indexed) after sorting +} + +// RowPainter is a custom function that takes a Row as input and returns the +// text.Colors{} to use on the entire row +type RowPainter func(row Row) text.Colors + +// RowPainterWithAttributes is the same as RowPainter but passes in additional +// attributes from render time +type RowPainterWithAttributes func(row Row, attr RowAttributes) text.Colors + +// rowStr defines a single row in the Table comprised of just string objects. +type rowStr []string + +// areEqual returns true if the contents of the 2 given columns are the same +func (row rowStr) areEqual(colIdx1 int, colIdx2 int) bool { + return colIdx1 >= 0 && colIdx1 < len(row) && + colIdx2 >= 0 && colIdx2 < len(row) && + row[colIdx1] == row[colIdx2] +} diff --git a/vendor/github.com/jedib0t/go-pretty/v6/table/sort.go b/vendor/github.com/jedib0t/go-pretty/v6/table/sort.go index 621bc1c4d4f..7a47765aeca 100644 --- a/vendor/github.com/jedib0t/go-pretty/v6/table/sort.go +++ b/vendor/github.com/jedib0t/go-pretty/v6/table/sort.go @@ -3,6 +3,7 @@ package table import ( "sort" "strconv" + "strings" ) // SortBy defines What to sort (Column Name or Number), and How to sort (Mode). @@ -17,6 +18,9 @@ type SortBy struct { // Mode tells the Writer how to Sort. Asc/Dsc/etc. Mode SortMode + + // IgnoreCase makes sorting case-insensitive + IgnoreCase bool } // SortMode defines How to sort. @@ -25,12 +29,24 @@ type SortMode int const ( // Asc sorts the column in Ascending order alphabetically. Asc SortMode = iota + // AscAlphaNumeric sorts the column in Ascending order alphabetically and + // then numerically. + AscAlphaNumeric // AscNumeric sorts the column in Ascending order numerically. AscNumeric + // AscNumericAlpha sorts the column in Ascending order numerically and + // then alphabetically. + AscNumericAlpha // Dsc sorts the column in Descending order alphabetically. Dsc + // DscAlphaNumeric sorts the column in Descending order alphabetically and + // then numerically. + DscAlphaNumeric // DscNumeric sorts the column in Descending order numerically. DscNumeric + // DscNumericAlpha sorts the column in Descending order numerically and + // then alphabetically. + DscNumericAlpha ) type rowsSorter struct { @@ -74,9 +90,10 @@ func (t *Table) parseSortBy(sortBy []SortBy) []SortBy { } if colNum > 0 { resSortBy = append(resSortBy, SortBy{ - Name: col.Name, - Number: colNum, - Mode: col.Mode, + Name: col.Name, + Number: colNum, + Mode: col.Mode, + IgnoreCase: col.IgnoreCase, }) } } @@ -92,28 +109,120 @@ func (rs rowsSorter) Swap(i, j int) { } func (rs rowsSorter) Less(i, j int) bool { + shouldContinue, returnValue := false, false realI, realJ := rs.sortedIndices[i], rs.sortedIndices[j] - for _, col := range rs.sortBy { - rowI, rowJ, colIdx := rs.rows[realI], rs.rows[realJ], col.Number-1 - if colIdx < len(rowI) && colIdx < len(rowJ) { - if rowI[colIdx] == rowJ[colIdx] { - continue - } else if col.Mode == Asc { - return rowI[colIdx] < rowJ[colIdx] - } else if col.Mode == Dsc { - return rowI[colIdx] > rowJ[colIdx] - } + for _, sortBy := range rs.sortBy { + // extract the values/cells from the rows for comparison + rowI, rowJ, colIdx := rs.rows[realI], rs.rows[realJ], sortBy.Number-1 + iVal, jVal := "", "" + if colIdx < len(rowI) { + iVal = rowI[colIdx] + } + if colIdx < len(rowJ) { + jVal = rowJ[colIdx] + } - iVal, iErr := strconv.ParseFloat(rowI[colIdx], 64) - jVal, jErr := strconv.ParseFloat(rowJ[colIdx], 64) - if iErr == nil && jErr == nil { - if col.Mode == AscNumeric { - return iVal < jVal - } else if col.Mode == DscNumeric { - return jVal < iVal - } - } + // compare and choose whether to continue + shouldContinue, returnValue = less(iVal, jVal, sortBy) + if !shouldContinue { + break + } + } + return returnValue +} + +func less(iVal string, jVal string, sb SortBy) (bool, bool) { + if iVal == jVal { + return true, false + } + + switch sb.Mode { + case Asc, Dsc: + return lessAlphabetic(iVal, jVal, sb) + case AscNumeric, DscNumeric: + return lessNumeric(iVal, jVal, sb) + default: // AscAlphaNumeric, AscNumericAlpha, DscAlphaNumeric, DscNumericAlpha + return lessMixedMode(iVal, jVal, sb) + } +} + +func lessAlphabetic(iVal string, jVal string, sb SortBy) (bool, bool) { + if sb.IgnoreCase { + iLow := strings.ToLower(iVal) + jLow := strings.ToLower(jVal) + // when two strings are case-insensitive identical, compare them casesensitive. + // That makes sure to get a consistent sorting + identical := iLow == jLow + switch sb.Mode { + case Asc, AscAlphaNumeric, AscNumericAlpha: + return identical, (identical && iVal < jVal) || iLow < jLow + default: // Dsc, DscAlphaNumeric, DscNumericAlpha + return identical, (identical && iVal > jVal) || iLow > jLow } } - return false + switch sb.Mode { + case Asc, AscAlphaNumeric, AscNumericAlpha: + return false, iVal < jVal + default: // Dsc, DscAlphaNumeric, DscNumericAlpha + return false, iVal > jVal + } +} + +func lessAlphaNumericI(sb SortBy) (bool, bool) { + // i == "abc"; j == 5 + switch sb.Mode { + case AscAlphaNumeric, DscAlphaNumeric: + return false, true + default: // AscNumericAlpha, DscNumericAlpha + return false, false + } +} + +func lessAlphaNumericJ(sb SortBy) (bool, bool) { + // i == 5; j == "abc" + switch sb.Mode { + case AscAlphaNumeric, DscAlphaNumeric: + return false, false + default: // AscNumericAlpha, DscNumericAlpha: + return false, true + } +} + +func lessMixedMode(iVal string, jVal string, sb SortBy) (bool, bool) { + iNumVal, iErr := strconv.ParseFloat(iVal, 64) + jNumVal, jErr := strconv.ParseFloat(jVal, 64) + if iErr != nil && jErr != nil { // both are alphanumeric + return lessAlphabetic(iVal, jVal, sb) + } + if iErr != nil { // iVal is alphabetic, jVal is numeric + return lessAlphaNumericI(sb) + } + if jErr != nil { // iVal is numeric, jVal is alphabetic + return lessAlphaNumericJ(sb) + } + // both values numeric + return lessNumericVal(iNumVal, jNumVal, sb) +} + +func lessNumeric(iVal string, jVal string, sb SortBy) (bool, bool) { + iNumVal, iErr := strconv.ParseFloat(iVal, 64) + jNumVal, jErr := strconv.ParseFloat(jVal, 64) + if iErr != nil || jErr != nil { + return false, false + } + + return lessNumericVal(iNumVal, jNumVal, sb) +} + +func lessNumericVal(iVal float64, jVal float64, sb SortBy) (bool, bool) { + if iVal == jVal { + return true, false + } + + switch sb.Mode { + case AscNumeric, AscAlphaNumeric, AscNumericAlpha: + return false, iVal < jVal + default: // DscNumeric, DscAlphaNumeric, DscNumericAlpha + return false, iVal > jVal + } } diff --git a/vendor/github.com/jedib0t/go-pretty/v6/table/style.go b/vendor/github.com/jedib0t/go-pretty/v6/table/style.go index d6e2ed36ba9..aaed2c27fe6 100644 --- a/vendor/github.com/jedib0t/go-pretty/v6/table/style.go +++ b/vendor/github.com/jedib0t/go-pretty/v6/table/style.go @@ -13,6 +13,7 @@ type Style struct { Format FormatOptions // formatting options for the rows and columns HTML HTMLOptions // rendering options for HTML mode Options Options // misc. options for the table + Size SizeOptions // size (width) options for the table Title TitleOptions // formation options for the title text } @@ -34,6 +35,7 @@ var ( Format: FormatOptionsDefault, HTML: DefaultHTMLOptions, Options: OptionsDefault, + Size: SizeOptionsDefault, Title: TitleOptionsDefault, } @@ -54,6 +56,7 @@ var ( Format: FormatOptionsDefault, HTML: DefaultHTMLOptions, Options: OptionsDefault, + Size: SizeOptionsDefault, Title: TitleOptionsDefault, } @@ -67,6 +70,7 @@ var ( Format: FormatOptionsDefault, HTML: DefaultHTMLOptions, Options: OptionsNoBordersAndSeparators, + Size: SizeOptionsDefault, Title: TitleOptionsDark, } @@ -80,6 +84,7 @@ var ( Format: FormatOptionsDefault, HTML: DefaultHTMLOptions, Options: OptionsNoBordersAndSeparators, + Size: SizeOptionsDefault, Title: TitleOptionsBright, } @@ -93,6 +98,7 @@ var ( Format: FormatOptionsDefault, HTML: DefaultHTMLOptions, Options: OptionsNoBordersAndSeparators, + Size: SizeOptionsDefault, Title: TitleOptionsBlueOnBlack, } @@ -106,6 +112,7 @@ var ( Format: FormatOptionsDefault, HTML: DefaultHTMLOptions, Options: OptionsNoBordersAndSeparators, + Size: SizeOptionsDefault, Title: TitleOptionsCyanOnBlack, } @@ -119,6 +126,7 @@ var ( Format: FormatOptionsDefault, HTML: DefaultHTMLOptions, Options: OptionsNoBordersAndSeparators, + Size: SizeOptionsDefault, Title: TitleOptionsGreenOnBlack, } @@ -132,6 +140,7 @@ var ( Format: FormatOptionsDefault, HTML: DefaultHTMLOptions, Options: OptionsNoBordersAndSeparators, + Size: SizeOptionsDefault, Title: TitleOptionsMagentaOnBlack, } @@ -145,6 +154,7 @@ var ( Format: FormatOptionsDefault, HTML: DefaultHTMLOptions, Options: OptionsNoBordersAndSeparators, + Size: SizeOptionsDefault, Title: TitleOptionsYellowOnBlack, } @@ -158,6 +168,7 @@ var ( Format: FormatOptionsDefault, HTML: DefaultHTMLOptions, Options: OptionsNoBordersAndSeparators, + Size: SizeOptionsDefault, Title: TitleOptionsRedOnBlack, } @@ -171,6 +182,7 @@ var ( Format: FormatOptionsDefault, HTML: DefaultHTMLOptions, Options: OptionsNoBordersAndSeparators, + Size: SizeOptionsDefault, Title: TitleOptionsBlackOnBlue, } @@ -184,6 +196,7 @@ var ( Format: FormatOptionsDefault, HTML: DefaultHTMLOptions, Options: OptionsNoBordersAndSeparators, + Size: SizeOptionsDefault, Title: TitleOptionsBlackOnCyan, } @@ -197,6 +210,7 @@ var ( Format: FormatOptionsDefault, HTML: DefaultHTMLOptions, Options: OptionsNoBordersAndSeparators, + Size: SizeOptionsDefault, Title: TitleOptionsBlackOnGreen, } @@ -210,6 +224,7 @@ var ( Format: FormatOptionsDefault, HTML: DefaultHTMLOptions, Options: OptionsNoBordersAndSeparators, + Size: SizeOptionsDefault, Title: TitleOptionsBlackOnMagenta, } @@ -223,6 +238,7 @@ var ( Format: FormatOptionsDefault, HTML: DefaultHTMLOptions, Options: OptionsNoBordersAndSeparators, + Size: SizeOptionsDefault, Title: TitleOptionsBlackOnRed, } @@ -236,6 +252,7 @@ var ( Format: FormatOptionsDefault, HTML: DefaultHTMLOptions, Options: OptionsNoBordersAndSeparators, + Size: SizeOptionsDefault, Title: TitleOptionsBlackOnYellow, } @@ -256,6 +273,7 @@ var ( Format: FormatOptionsDefault, HTML: DefaultHTMLOptions, Options: OptionsDefault, + Size: SizeOptionsDefault, Title: TitleOptionsDefault, } @@ -276,6 +294,7 @@ var ( Format: FormatOptionsDefault, HTML: DefaultHTMLOptions, Options: OptionsDefault, + Size: SizeOptionsDefault, Title: TitleOptionsDefault, } @@ -296,6 +315,7 @@ var ( Format: FormatOptionsDefault, HTML: DefaultHTMLOptions, Options: OptionsDefault, + Size: SizeOptionsDefault, Title: TitleOptionsDefault, } @@ -316,6 +336,7 @@ var ( Format: FormatOptionsDefault, HTML: DefaultHTMLOptions, Options: OptionsDefault, + Size: SizeOptionsDefault, Title: TitleOptionsDefault, } ) @@ -358,7 +379,7 @@ var ( BottomLeft: "+", BottomRight: "+", BottomSeparator: "+", - EmptySeparator: text.RepeatAndTrim(" ", text.RuneCount("+")), + EmptySeparator: " ", Left: "|", LeftSeparator: "+", MiddleHorizontal: "-", @@ -389,7 +410,7 @@ var ( BottomLeft: "┗", BottomRight: "┛", BottomSeparator: "┻", - EmptySeparator: text.RepeatAndTrim(" ", text.RuneCount("╋")), + EmptySeparator: " ", Left: "┃", LeftSeparator: "┣", MiddleHorizontal: "━", @@ -420,7 +441,7 @@ var ( BottomLeft: "╚", BottomRight: "╝", BottomSeparator: "╩", - EmptySeparator: text.RepeatAndTrim(" ", text.RuneCount("╬")), + EmptySeparator: " ", Left: "║", LeftSeparator: "╠", MiddleHorizontal: "═", @@ -451,7 +472,7 @@ var ( BottomLeft: "└", BottomRight: "┘", BottomSeparator: "┴", - EmptySeparator: text.RepeatAndTrim(" ", text.RuneCount("┼")), + EmptySeparator: " ", Left: "│", LeftSeparator: "├", MiddleHorizontal: "─", @@ -482,7 +503,7 @@ var ( BottomLeft: "╰", BottomRight: "╯", BottomSeparator: "┴", - EmptySeparator: text.RepeatAndTrim(" ", text.RuneCount("┼")), + EmptySeparator: " ", Left: "│", LeftSeparator: "├", MiddleHorizontal: "─", @@ -513,7 +534,7 @@ var ( BottomLeft: "\\", BottomRight: "/", BottomSeparator: "v", - EmptySeparator: text.RepeatAndTrim(" ", text.RuneCount("+")), + EmptySeparator: " ", Left: "[", LeftSeparator: "{", MiddleHorizontal: "--", @@ -533,11 +554,13 @@ var ( // ColorOptions defines the ANSI colors to use for parts of the Table. type ColorOptions struct { - IndexColumn text.Colors // index-column colors (row #, etc.) + Border text.Colors // borders (if nil, uses one of the below) Footer text.Colors // footer row(s) colors Header text.Colors // header row(s) colors + IndexColumn text.Colors // index-column colors (row #, etc.) Row text.Colors // regular row(s) colors RowAlternate text.Colors // regular row(s) colors for the even-numbered rows + Separator text.Colors // separators (if nil, uses one of the above) } var ( @@ -552,18 +575,18 @@ var ( // ColorOptionsBlackOnBlueWhite renders Black text on Blue/White background. ColorOptionsBlackOnBlueWhite = ColorOptions{ - IndexColumn: text.Colors{text.BgHiBlue, text.FgBlack}, Footer: text.Colors{text.BgBlue, text.FgBlack}, Header: text.Colors{text.BgHiBlue, text.FgBlack}, + IndexColumn: text.Colors{text.BgHiBlue, text.FgBlack}, Row: text.Colors{text.BgHiWhite, text.FgBlack}, RowAlternate: text.Colors{text.BgWhite, text.FgBlack}, } // ColorOptionsBlackOnCyanWhite renders Black text on Cyan/White background. ColorOptionsBlackOnCyanWhite = ColorOptions{ - IndexColumn: text.Colors{text.BgHiCyan, text.FgBlack}, Footer: text.Colors{text.BgCyan, text.FgBlack}, Header: text.Colors{text.BgHiCyan, text.FgBlack}, + IndexColumn: text.Colors{text.BgHiCyan, text.FgBlack}, Row: text.Colors{text.BgHiWhite, text.FgBlack}, RowAlternate: text.Colors{text.BgWhite, text.FgBlack}, } @@ -571,9 +594,9 @@ var ( // ColorOptionsBlackOnGreenWhite renders Black text on Green/White // background. ColorOptionsBlackOnGreenWhite = ColorOptions{ - IndexColumn: text.Colors{text.BgHiGreen, text.FgBlack}, Footer: text.Colors{text.BgGreen, text.FgBlack}, Header: text.Colors{text.BgHiGreen, text.FgBlack}, + IndexColumn: text.Colors{text.BgHiGreen, text.FgBlack}, Row: text.Colors{text.BgHiWhite, text.FgBlack}, RowAlternate: text.Colors{text.BgWhite, text.FgBlack}, } @@ -581,18 +604,18 @@ var ( // ColorOptionsBlackOnMagentaWhite renders Black text on Magenta/White // background. ColorOptionsBlackOnMagentaWhite = ColorOptions{ - IndexColumn: text.Colors{text.BgHiMagenta, text.FgBlack}, Footer: text.Colors{text.BgMagenta, text.FgBlack}, Header: text.Colors{text.BgHiMagenta, text.FgBlack}, + IndexColumn: text.Colors{text.BgHiMagenta, text.FgBlack}, Row: text.Colors{text.BgHiWhite, text.FgBlack}, RowAlternate: text.Colors{text.BgWhite, text.FgBlack}, } // ColorOptionsBlackOnRedWhite renders Black text on Red/White background. ColorOptionsBlackOnRedWhite = ColorOptions{ - IndexColumn: text.Colors{text.BgHiRed, text.FgBlack}, Footer: text.Colors{text.BgRed, text.FgBlack}, Header: text.Colors{text.BgHiRed, text.FgBlack}, + IndexColumn: text.Colors{text.BgHiRed, text.FgBlack}, Row: text.Colors{text.BgHiWhite, text.FgBlack}, RowAlternate: text.Colors{text.BgWhite, text.FgBlack}, } @@ -600,27 +623,27 @@ var ( // ColorOptionsBlackOnYellowWhite renders Black text on Yellow/White // background. ColorOptionsBlackOnYellowWhite = ColorOptions{ - IndexColumn: text.Colors{text.BgHiYellow, text.FgBlack}, Footer: text.Colors{text.BgYellow, text.FgBlack}, Header: text.Colors{text.BgHiYellow, text.FgBlack}, + IndexColumn: text.Colors{text.BgHiYellow, text.FgBlack}, Row: text.Colors{text.BgHiWhite, text.FgBlack}, RowAlternate: text.Colors{text.BgWhite, text.FgBlack}, } // ColorOptionsBlueWhiteOnBlack renders Blue/White text on Black background. ColorOptionsBlueWhiteOnBlack = ColorOptions{ - IndexColumn: text.Colors{text.FgHiBlue, text.BgHiBlack}, Footer: text.Colors{text.FgBlue, text.BgHiBlack}, Header: text.Colors{text.FgHiBlue, text.BgHiBlack}, + IndexColumn: text.Colors{text.FgHiBlue, text.BgHiBlack}, Row: text.Colors{text.FgHiWhite, text.BgBlack}, RowAlternate: text.Colors{text.FgWhite, text.BgBlack}, } // ColorOptionsCyanWhiteOnBlack renders Cyan/White text on Black background. ColorOptionsCyanWhiteOnBlack = ColorOptions{ - IndexColumn: text.Colors{text.FgHiCyan, text.BgHiBlack}, Footer: text.Colors{text.FgCyan, text.BgHiBlack}, Header: text.Colors{text.FgHiCyan, text.BgHiBlack}, + IndexColumn: text.Colors{text.FgHiCyan, text.BgHiBlack}, Row: text.Colors{text.FgHiWhite, text.BgBlack}, RowAlternate: text.Colors{text.FgWhite, text.BgBlack}, } @@ -628,9 +651,9 @@ var ( // ColorOptionsGreenWhiteOnBlack renders Green/White text on Black // background. ColorOptionsGreenWhiteOnBlack = ColorOptions{ - IndexColumn: text.Colors{text.FgHiGreen, text.BgHiBlack}, Footer: text.Colors{text.FgGreen, text.BgHiBlack}, Header: text.Colors{text.FgHiGreen, text.BgHiBlack}, + IndexColumn: text.Colors{text.FgHiGreen, text.BgHiBlack}, Row: text.Colors{text.FgHiWhite, text.BgBlack}, RowAlternate: text.Colors{text.FgWhite, text.BgBlack}, } @@ -638,18 +661,18 @@ var ( // ColorOptionsMagentaWhiteOnBlack renders Magenta/White text on Black // background. ColorOptionsMagentaWhiteOnBlack = ColorOptions{ - IndexColumn: text.Colors{text.FgHiMagenta, text.BgHiBlack}, Footer: text.Colors{text.FgMagenta, text.BgHiBlack}, Header: text.Colors{text.FgHiMagenta, text.BgHiBlack}, + IndexColumn: text.Colors{text.FgHiMagenta, text.BgHiBlack}, Row: text.Colors{text.FgHiWhite, text.BgBlack}, RowAlternate: text.Colors{text.FgWhite, text.BgBlack}, } // ColorOptionsRedWhiteOnBlack renders Red/White text on Black background. ColorOptionsRedWhiteOnBlack = ColorOptions{ - IndexColumn: text.Colors{text.FgHiRed, text.BgHiBlack}, Footer: text.Colors{text.FgRed, text.BgHiBlack}, Header: text.Colors{text.FgHiRed, text.BgHiBlack}, + IndexColumn: text.Colors{text.FgHiRed, text.BgHiBlack}, Row: text.Colors{text.FgHiWhite, text.BgBlack}, RowAlternate: text.Colors{text.FgWhite, text.BgBlack}, } @@ -657,9 +680,9 @@ var ( // ColorOptionsYellowWhiteOnBlack renders Yellow/White text on Black // background. ColorOptionsYellowWhiteOnBlack = ColorOptions{ - IndexColumn: text.Colors{text.FgHiYellow, text.BgHiBlack}, Footer: text.Colors{text.FgYellow, text.BgHiBlack}, Header: text.Colors{text.FgHiYellow, text.BgHiBlack}, + IndexColumn: text.Colors{text.FgHiYellow, text.BgHiBlack}, Row: text.Colors{text.FgHiWhite, text.BgBlack}, RowAlternate: text.Colors{text.FgWhite, text.BgBlack}, } @@ -667,19 +690,30 @@ var ( // FormatOptions defines the text-formatting to perform on parts of the Table. type FormatOptions struct { - Footer text.Format // footer row(s) text format - Header text.Format // header row(s) text format - Row text.Format // (data) row(s) text format + Direction text.Direction // (forced) BiDi direction for each Column + Footer text.Format // default text format + FooterAlign text.Align // default horizontal align + FooterVAlign text.VAlign // default vertical align + Header text.Format // default text format + HeaderAlign text.Align // default horizontal align + HeaderVAlign text.VAlign // default vertical align + Row text.Format // default text format + RowAlign text.Align // default horizontal align + RowVAlign text.VAlign // default vertical align } -var ( - // FormatOptionsDefault defines sensible formatting options. - FormatOptionsDefault = FormatOptions{ - Footer: text.FormatUpper, - Header: text.FormatUpper, - Row: text.FormatDefault, - } -) +// FormatOptionsDefault defines sensible formatting options. +var FormatOptionsDefault = FormatOptions{ + Footer: text.FormatUpper, + FooterAlign: text.AlignDefault, + FooterVAlign: text.VAlignDefault, + Header: text.FormatUpper, + HeaderAlign: text.AlignDefault, + HeaderVAlign: text.VAlignDefault, + Row: text.FormatDefault, + RowAlign: text.AlignDefault, + RowVAlign: text.VAlignDefault, +} // HTMLOptions defines the global options to control HTML rendering. type HTMLOptions struct { @@ -689,19 +723,21 @@ type HTMLOptions struct { Newline string // string to replace "\n" characters with } -var ( - // DefaultHTMLOptions defines sensible HTML rendering defaults. - DefaultHTMLOptions = HTMLOptions{ - CSSClass: DefaultHTMLCSSClass, - EmptyColumn: " ", - EscapeText: true, - Newline: "
", - } -) +// DefaultHTMLOptions defines sensible HTML rendering defaults. +var DefaultHTMLOptions = HTMLOptions{ + CSSClass: DefaultHTMLCSSClass, + EmptyColumn: " ", + EscapeText: true, + Newline: "
", +} // Options defines the global options that determine how the Table is // rendered. type Options struct { + // DoNotColorBordersAndSeparators disables coloring all the borders and row + // or column separators. + DoNotColorBordersAndSeparators bool + // DrawBorder enables or disables drawing the border around the Table. // Example of a table where it is disabled: // # │ FIRST NAME │ LAST NAME │ SALARY │ @@ -795,6 +831,26 @@ var ( } ) +// SizeOptions defines the way to control the width of the table output. +type SizeOptions struct { + // WidthMax is the maximum allotted width for the full row; + // any content beyond this will be truncated using the text + // in Style.Box.UnfinishedRow + WidthMax int + // WidthMin is the minimum allotted width for the full row; + // columns will be auto-expanded until the overall width + // is met + WidthMin int +} + +var ( + // SizeOptionsDefault defines sensible size options - basically NONE. + SizeOptionsDefault = SizeOptions{ + WidthMax: 0, + WidthMin: 0, + } +) + // TitleOptions defines the way the title text is to be rendered. type TitleOptions struct { Align text.Align diff --git a/vendor/github.com/jedib0t/go-pretty/v6/table/table.go b/vendor/github.com/jedib0t/go-pretty/v6/table/table.go index ae49725ff9c..ea2fae68f36 100644 --- a/vendor/github.com/jedib0t/go-pretty/v6/table/table.go +++ b/vendor/github.com/jedib0t/go-pretty/v6/table/table.go @@ -4,32 +4,19 @@ import ( "fmt" "io" "strings" + "time" + "unicode" "github.com/jedib0t/go-pretty/v6/text" ) -// Row defines a single row in the Table. -type Row []interface{} - -// RowPainter is a custom function that takes a Row as input and returns the -// text.Colors{} to use on the entire row -type RowPainter func(row Row) text.Colors - -// rowStr defines a single row in the Table comprised of just string objects. -type rowStr []string - -// areEqual returns true if the contents of the 2 given columns are the same -func (row rowStr) areEqual(colIdx1 int, colIdx2 int) bool { - return colIdx1 >= 0 && colIdx2 < len(row) && row[colIdx1] == row[colIdx2] -} - -// Table helps print a 2-dimensional array in a human readable pretty-table. +// Table helps print a 2-dimensional array in a human-readable pretty-table. type Table struct { // allowedRowLength is the max allowed length for a row (or line of output) allowedRowLength int // enable automatic indexing of the rows and columns like a spreadsheet? autoIndex bool - // autoIndexVIndexMaxLength denotes the length in chars for the last rownum + // autoIndexVIndexMaxLength denotes the length in chars for the last row autoIndexVIndexMaxLength int // caption stores the text to be rendered just below the table; and doesn't // get used when rendered as a CSV @@ -41,6 +28,8 @@ type Table struct { // columnConfigMap stores the custom-configuration by column // number and is generated before rendering columnConfigMap map[int]ColumnConfig + // firstRowOfPage tells if the renderer is on the first row of a page? + firstRowOfPage bool // htmlCSSClass stores the HTML CSS Class to use on the node htmlCSSClass string // indexColumn stores the number of the column considered as the "index" @@ -56,14 +45,12 @@ type Table struct { numLinesRendered int // outputMirror stores an io.Writer where the "Render" functions would write outputMirror io.Writer - // pageSize stores the maximum lines to render before rendering the header - // again (to denote a page break) - useful when you are dealing with really - // long tables - pageSize int + // pager controls how the output is separated into pages + pager pager // rows stores the rows that make up the body (in string form) rows []rowStr // rowsColors stores the text.Colors over-rides for each row as defined by - // rowPainter + // rowPainter or rowPainterWithAttributes rowsColors []text.Colors // rowsConfigs stores RowConfig for each row rowsConfigMap map[int]RowConfig @@ -84,6 +71,8 @@ type Table struct { // rowPainter is a custom function that given a Row, returns the colors to // use on the entire row rowPainter RowPainter + // rowPainterWithAttributes is same as rowPainter, but with attributes + rowPainterWithAttributes RowPainterWithAttributes // rowSeparator is a dummy row that contains the separator columns (dashes // that make up the separator between header/body/footer rowSeparator rowStr @@ -92,11 +81,15 @@ type Table struct { separators map[int]bool // sortBy stores a map of Column sortBy []SortBy + // sortedRowIndices is the output of sorting + sortedRowIndices []int // style contains all the strings used to draw the table, and more style *Style // suppressEmptyColumns hides columns which have no content on all regular // rows suppressEmptyColumns bool + // suppressTrailingSpaces removes all trailing spaces from the end of the last column + suppressTrailingSpaces bool // title contains the text to appear above the table title string } @@ -155,14 +148,15 @@ func (t *Table) AppendRows(rows []Row, config ...RowConfig) { // append is a separator, it will not be rendered in addition to the usual table // separator. // -//****************************************************************************** +// ****************************************************************************** // Please note the following caveats: -// 1. SetPageSize(): this may end up creating consecutive separator rows near -// the end of a page or at the beginning of a page -// 2. SortBy(): since SortBy could inherently alter the ordering of rows, the -// separators may not appear after the row it was originally intended to -// follow -//****************************************************************************** +// 1. SetPageSize(): this may end up creating consecutive separator rows near +// the end of a page or at the beginning of a page +// 2. SortBy(): since SortBy could inherently alter the ordering of rows, the +// separators may not appear after the row it was originally intended to +// follow +// +// ****************************************************************************** func (t *Table) AppendSeparator() { if t.separators == nil { t.separators = make(map[int]bool) @@ -172,11 +166,56 @@ func (t *Table) AppendSeparator() { } } +// ImportGrid helps import 1d or 2d arrays as rows. +func (t *Table) ImportGrid(grid interface{}) bool { + rows := objAsSlice(grid) + if rows == nil { + return false + } + addedRows := false + for _, row := range rows { + rowAsSlice := objAsSlice(row) + if rowAsSlice != nil { + t.AppendRow(rowAsSlice) + } else if row != nil { + t.AppendRow(Row{row}) + } + addedRows = true + } + return addedRows +} + // Length returns the number of rows to be rendered. func (t *Table) Length() int { return len(t.rowsRaw) } +// Pager returns an object that splits the table output into pages and +// lets you move back and forth through them. +func (t *Table) Pager(opts ...PagerOption) Pager { + for _, opt := range opts { + opt(t) + } + + // use a temporary page separator for splitting up the pages + tempPageSep := fmt.Sprintf("%p // page separator // %d", t.rows, time.Now().UnixNano()) + + // backup + origOutputMirror, origPageSep := t.outputMirror, t.Style().Box.PageSeparator + // restore on exit + defer func() { + t.outputMirror = origOutputMirror + t.Style().Box.PageSeparator = origPageSep + }() + // override + t.outputMirror = nil + t.Style().Box.PageSeparator = tempPageSep + // render + t.pager.pages = strings.Split(t.Render(), tempPageSep) + + return &t.pager +} + // ResetFooters resets and clears all the Footer rows appended earlier. func (t *Table) ResetFooters() { t.rowsFooterRaw = nil @@ -196,6 +235,8 @@ func (t *Table) ResetRows() { // SetAllowedRowLength sets the maximum allowed length or a row (or line of // output) when rendered as a table. Rows that are longer than this limit will // be "snipped" to the length. Length has to be a positive value to take effect. +// +// Deprecated: in favor if Style().Size.WidthMax func (t *Table) SetAllowedRowLength(length int) { t.allowedRowLength = length } @@ -219,7 +260,7 @@ func (t *Table) SetColumnConfigs(configs []ColumnConfig) { t.columnConfigs = configs } -// SetHTMLCSSClass sets the the HTML CSS Class to use on the
node +// SetHTMLCSSClass sets the HTML CSS Class to use on the
node // when rendering the Table in HTML format. // // Deprecated: in favor of Style().HTML.CSSClass @@ -237,6 +278,7 @@ func (t *Table) SetIndexColumn(colNum int) { // in addition to returning a string. func (t *Table) SetOutputMirror(mirror io.Writer) { t.outputMirror = mirror + t.pager.SetOutputMirror(mirror) } // SetPageSize sets the maximum number of lines to render before rendering the @@ -244,15 +286,40 @@ func (t *Table) SetOutputMirror(mirror io.Writer) { // long list of rows that can span pages. Please note that the pagination logic // will not consider Header/Footer lines for paging. func (t *Table) SetPageSize(numLines int) { - t.pageSize = numLines + t.pager.size = numLines } -// SetRowPainter sets the RowPainter function which determines the colors to use -// on a row. Before rendering, this function is invoked on all rows and the -// color of each row is determined. This color takes precedence over other ways -// to set color (ColumnConfig.Color*, SetColor*()). -func (t *Table) SetRowPainter(painter RowPainter) { - t.rowPainter = painter +// SetRowPainter sets up the function which determines the colors to use on a +// row. Before rendering, this function is invoked on all rows and the color +// of each row is determined. This color takes precedence over other ways to +// set color (ColumnConfig.Color*, SetColor*()). +func (t *Table) SetRowPainter(painter interface{}) { + // TODO: fix interface on major version bump to accept only + // one type of RowPainter: RowPainterWithAttributes renamed to RowPainter + + // reset both so only one is set at any given time + t.rowPainter = nil + t.rowPainterWithAttributes = nil + + // if called as SetRowPainter(RowPainter(func...)) + switch painter.(type) { + case RowPainter: + t.rowPainter = painter.(RowPainter) + return + case RowPainterWithAttributes: + t.rowPainterWithAttributes = painter.(RowPainterWithAttributes) + return + } + + // if called as SetRowPainter(func...) + switch fmt.Sprintf("%T", painter) { + case "func(table.Row) text.Colors": + t.rowPainter = painter.(func(row Row) text.Colors) + return + case "func(table.Row, table.RowAttributes) text.Colors": + t.rowPainterWithAttributes = painter.(func(row Row, attr RowAttributes) text.Colors) + return + } } // SetStyle overrides the DefaultStyle with the provided one. @@ -278,6 +345,11 @@ func (t *Table) Style() *Style { tempStyle := StyleDefault t.style = &tempStyle } + // override WidthMax with allowedRowLength until allowedRowLength is + // removed from code + if t.allowedRowLength > 0 { + t.style.Size.WidthMax = t.allowedRowLength + } return t.style } @@ -287,45 +359,9 @@ func (t *Table) SuppressEmptyColumns() { t.suppressEmptyColumns = true } -func (t *Table) analyzeAndStringify(row Row, hint renderHint) rowStr { - // update t.numColumns if this row is the longest seen till now - if len(row) > t.numColumns { - // init the slice for the first time; and pad it the rest of the time - if t.numColumns == 0 { - t.columnIsNonNumeric = make([]bool, len(row)) - } else { - t.columnIsNonNumeric = append(t.columnIsNonNumeric, make([]bool, len(row)-t.numColumns)...) - } - // update t.numColumns - t.numColumns = len(row) - } - - // convert each column to string and figure out if it has non-numeric data - rowOut := make(rowStr, len(row)) - for colIdx, col := range row { - // if the column is not a number, keep track of it - if !hint.isHeaderRow && !hint.isFooterRow && !t.columnIsNonNumeric[colIdx] && !isNumber(col) { - t.columnIsNonNumeric[colIdx] = true - } - - // convert to a string and store it in the row - var colStr string - if transformer := t.getColumnTransformer(colIdx, hint); transformer != nil { - colStr = transformer(col) - } else if colStrVal, ok := col.(string); ok { - colStr = colStrVal - } else { - colStr = fmt.Sprint(col) - } - if strings.Contains(colStr, "\t") { - colStr = strings.Replace(colStr, "\t", " ", -1) - } - if strings.Contains(colStr, "\r") { - colStr = strings.Replace(colStr, "\r", "", -1) - } - rowOut[colIdx] = colStr - } - return rowOut +// SuppressTrailingSpaces removes all trailing spaces from the output. +func (t *Table) SuppressTrailingSpaces() { + t.suppressTrailingSpaces = true } func (t *Table) getAlign(colIdx int, hint renderHint) text.Align { @@ -344,6 +380,12 @@ func (t *Table) getAlign(colIdx int, hint renderHint) text.Align { align = text.AlignRight } else if hint.isAutoIndexRow { align = text.AlignCenter + } else if hint.isHeaderRow { + align = t.style.Format.HeaderAlign + } else if hint.isFooterRow { + align = t.style.Format.FooterAlign + } else { + align = t.style.Format.RowAlign } } return align @@ -358,18 +400,74 @@ func (t *Table) getAutoIndexColumnIDs() rowStr { } func (t *Table) getBorderColors(hint renderHint) text.Colors { - if hint.isFooterRow { + if t.style.Options.DoNotColorBordersAndSeparators { + return nil + } else if t.style.Color.Border != nil { + return t.style.Color.Border + } else if hint.isTitleRow { + return t.style.Title.Colors + } else if hint.isHeaderRow { + return t.style.Color.Header + } else if hint.isFooterRow { return t.style.Color.Footer } else if t.autoIndex { return t.style.Color.IndexColumn + } else if hint.rowNumber%2 == 0 && t.style.Color.RowAlternate != nil { + return t.style.Color.RowAlternate } - return t.style.Color.Header + return t.style.Color.Row +} + +func (t *Table) getBorderLeft(hint renderHint) string { + border := t.style.Box.Left + if hint.isBorderTop { + if t.title != "" { + border = t.style.Box.LeftSeparator + } else { + border = t.style.Box.TopLeft + } + } else if hint.isBorderBottom { + border = t.style.Box.BottomLeft + } else if hint.isSeparatorRow { + if t.autoIndex && hint.isHeaderOrFooterSeparator() { + border = t.style.Box.Left + } else if !t.autoIndex && t.shouldMergeCellsVertically(0, hint) { + border = t.style.Box.Left + } else { + border = t.style.Box.LeftSeparator + } + } + return border +} + +func (t *Table) getBorderRight(hint renderHint) string { + border := t.style.Box.Right + if hint.isBorderTop { + if t.title != "" { + border = t.style.Box.RightSeparator + } else { + border = t.style.Box.TopRight + } + } else if hint.isBorderBottom { + border = t.style.Box.BottomRight + } else if hint.isSeparatorRow { + if t.shouldMergeCellsVertically(t.numColumns-1, hint) { + border = t.style.Box.Right + } else { + border = t.style.Box.RightSeparator + } + } + return border } func (t *Table) getColumnColors(colIdx int, hint renderHint) text.Colors { - if t.rowPainter != nil && hint.isRegularRow() && !t.isIndexColumn(colIdx, hint) { - colors := t.rowsColors[hint.rowNumber-1] - if colors != nil { + if hint.isBorderOrSeparator() { + if colors := t.getColumnColorsForBorderOrSeparator(hint); colors != nil { + return colors + } + } + if t.hasRowPainter() && hint.isRegularNonSeparatorRow() && !t.isIndexColumn(colIdx, hint) { + if colors := t.rowsColors[hint.rowNumber-1]; colors != nil { return colors } } @@ -386,6 +484,19 @@ func (t *Table) getColumnColors(colIdx int, hint renderHint) text.Colors { return nil } +func (t *Table) getColumnColorsForBorderOrSeparator(hint renderHint) text.Colors { + if t.style.Options.DoNotColorBordersAndSeparators { + return text.Colors{} // not nil to force caller to paint with no colors + } + if (hint.isBorderBottom || hint.isBorderTop) && t.style.Color.Border != nil { + return t.style.Color.Border + } + if hint.isSeparatorRow && t.style.Color.Separator != nil { + return t.style.Color.Separator + } + return nil +} + func (t *Table) getColumnSeparator(row rowStr, colIdx int, hint renderHint) string { separator := t.style.Box.MiddleVertical if hint.isSeparatorRow { @@ -402,12 +513,9 @@ func (t *Table) getColumnSeparator(row rowStr, colIdx int, hint renderHint) stri separator = t.style.Box.BottomSeparator } } else { - separator = t.getColumnSeparatorNonBorder( - t.shouldMergeCellsHorizontallyAbove(row, colIdx, hint), - t.shouldMergeCellsHorizontallyBelow(row, colIdx, hint), - colIdx, - hint, - ) + sm1 := t.shouldMergeCellsHorizontallyAbove(row, colIdx, hint) + sm2 := t.shouldMergeCellsHorizontallyBelow(row, colIdx, hint) + separator = t.getColumnSeparatorNonBorder(sm1, sm2, colIdx, hint) } } return separator @@ -448,6 +556,8 @@ func (t *Table) getColumnSeparatorNonBorderNonAutoIndex(mergeCellsAbove bool, me return t.style.Box.MiddleVertical } else if mergeCurrCol { return t.style.Box.LeftSeparator + } else if mergeNextCol { + return t.style.Box.RightSeparator } return t.style.Box.MiddleSeparator } @@ -491,6 +601,42 @@ func (t *Table) getFormat(hint renderHint) text.Format { return t.style.Format.Row } +func (t *Table) getMaxColumnLengthForMerging(colIdx int) int { + maxColumnLength := t.maxColumnLengths[colIdx] + maxColumnLength += text.StringWidthWithoutEscSequences(t.style.Box.PaddingRight + t.style.Box.PaddingLeft) + if t.style.Options.SeparateColumns { + maxColumnLength += text.StringWidthWithoutEscSequences(t.style.Box.EmptySeparator) + } + return maxColumnLength +} + +// getMergedColumnIndices returns a map of colIdx values to all the other colIdx +// values (that are being merged) and their lengths. +func (t *Table) getMergedColumnIndices(row rowStr, hint renderHint) mergedColumnIndices { + if !t.getRowConfig(hint).AutoMerge { + return nil + } + + mci := make(mergedColumnIndices) + for colIdx := 0; colIdx < t.numColumns-1; colIdx++ { + // look backward + for otherColIdx := colIdx - 1; colIdx >= 0 && otherColIdx >= 0; otherColIdx-- { + if row[colIdx] != row[otherColIdx] { + break + } + mci.safeAppend(colIdx, otherColIdx) + } + // look forward + for otherColIdx := colIdx + 1; colIdx < len(row) && otherColIdx < len(row); otherColIdx++ { + if row[colIdx] != row[otherColIdx] { + break + } + mci.safeAppend(colIdx, otherColIdx) + } + } + return mci +} + func (t *Table) getRow(rowIdx int, hint renderHint) rowStr { switch { case hint.isHeaderRow: @@ -526,7 +672,13 @@ func (t *Table) getRowConfig(hint renderHint) RowConfig { } func (t *Table) getSeparatorColors(hint renderHint) text.Colors { - if hint.isHeaderRow { + if t.style.Options.DoNotColorBordersAndSeparators { + return nil + } else if (hint.isBorderBottom || hint.isBorderTop) && t.style.Color.Border != nil { + return t.style.Color.Border + } else if t.style.Color.Separator != nil { + return t.style.Color.Separator + } else if hint.isHeaderRow { return t.style.Color.Header } else if hint.isFooterRow { return t.style.Color.Footer @@ -549,103 +701,35 @@ func (t *Table) getVAlign(colIdx int, hint renderHint) text.VAlign { vAlign = cfg.VAlign } } - return vAlign -} - -func (t *Table) initForRender() { - // pick a default style if none was set until now - t.Style() - - // initialize the column configs and normalize them - t.initForRenderColumnConfigs() - - // initialize and stringify all the raw rows - t.initForRenderRows() - - // find the longest continuous line in each column - t.initForRenderColumnLengths() - - // generate a separator row and calculate maximum row length - t.initForRenderRowSeparator() - - // reset the counter for the number of lines rendered - t.numLinesRendered = 0 -} - -func (t *Table) initForRenderColumnConfigs() { - findColumnNumber := func(row Row, colName string) int { - for colIdx, col := range row { - if fmt.Sprint(col) == colName { - return colIdx + 1 - } - } - return 0 - } - - t.columnConfigMap = map[int]ColumnConfig{} - for _, colCfg := range t.columnConfigs { - // find the column number if none provided; this logic can work only if - // a header row is present and has a column with the given name - if colCfg.Number == 0 { - for _, row := range t.rowsHeaderRaw { - colCfg.Number = findColumnNumber(row, colCfg.Name) - if colCfg.Number > 0 { - break - } - } - } - if colCfg.Number > 0 { - t.columnConfigMap[colCfg.Number-1] = colCfg - } - } -} - -func (t *Table) initForRenderColumnLengths() { - var findMaxColumnLengths = func(rows []rowStr) { - for _, row := range rows { - for colIdx, colStr := range row { - longestLineLen := text.LongestLineLen(colStr) - if longestLineLen > t.maxColumnLengths[colIdx] { - t.maxColumnLengths[colIdx] = longestLineLen - } - } - } - } - - t.maxColumnLengths = make([]int, t.numColumns) - findMaxColumnLengths(t.rowsHeader) - findMaxColumnLengths(t.rows) - findMaxColumnLengths(t.rowsFooter) - - // restrict the column lengths if any are over or under the limits - for colIdx := range t.maxColumnLengths { - maxWidth := t.getColumnWidthMax(colIdx) - if maxWidth > 0 && t.maxColumnLengths[colIdx] > maxWidth { - t.maxColumnLengths[colIdx] = maxWidth - } - minWidth := t.getColumnWidthMin(colIdx) - if minWidth > 0 && t.maxColumnLengths[colIdx] < minWidth { - t.maxColumnLengths[colIdx] = minWidth + if vAlign == text.VAlignDefault { + if hint.isHeaderRow { + vAlign = t.style.Format.HeaderVAlign + } else if hint.isFooterRow { + vAlign = t.style.Format.FooterVAlign + } else { + vAlign = t.style.Format.RowVAlign } } + return vAlign } -func (t *Table) initForRenderHideColumns() { - // if there is nothing to hide, return fast - hasHiddenColumns := false +func (t *Table) hasHiddenColumns() bool { for _, cc := range t.columnConfigMap { if cc.Hidden { - hasHiddenColumns = true - break + return true } } - if !hasHiddenColumns { - return - } + return false +} +func (t *Table) hasRowPainter() bool { + return t.rowPainter != nil || t.rowPainterWithAttributes != nil +} + +func (t *Table) hideColumns() map[int]int { colIdxMap := make(map[int]int) numColumns := 0 - _hideColumns := func(rows []rowStr) []rowStr { + hideColumnsInRows := func(rows []rowStr) []rowStr { var rsp []rowStr for _, row := range rows { var rowNew rowStr @@ -665,133 +749,14 @@ func (t *Table) initForRenderHideColumns() { } // hide columns as directed - t.rows = _hideColumns(t.rows) - t.rowsFooter = _hideColumns(t.rowsFooter) - t.rowsHeader = _hideColumns(t.rowsHeader) + t.rows = hideColumnsInRows(t.rows) + t.rowsFooter = hideColumnsInRows(t.rowsFooter) + t.rowsHeader = hideColumnsInRows(t.rowsHeader) // reset numColumns to the new number of columns t.numColumns = numColumns - // re-create columnIsNonNumeric with new column indices - columnIsNonNumeric := make([]bool, t.numColumns) - for oldColIdx, nonNumeric := range t.columnIsNonNumeric { - if newColIdx, ok := colIdxMap[oldColIdx]; ok { - columnIsNonNumeric[newColIdx] = nonNumeric - } - } - t.columnIsNonNumeric = columnIsNonNumeric - - // re-create columnConfigMap with new column indices - columnConfigMap := make(map[int]ColumnConfig) - for oldColIdx, cc := range t.columnConfigMap { - if newColIdx, ok := colIdxMap[oldColIdx]; ok { - columnConfigMap[newColIdx] = cc - } - } - t.columnConfigMap = columnConfigMap -} - -func (t *Table) initForRenderRows() { - t.reset() - - // auto-index: calc the index column's max length - t.autoIndexVIndexMaxLength = len(fmt.Sprint(len(t.rowsRaw))) - - // stringify all the rows to make it easy to render - if t.rowPainter != nil { - t.rowsColors = make([]text.Colors, len(t.rowsRaw)) - } - t.rows = t.initForRenderRowsStringify(t.rowsRaw, renderHint{}) - t.rowsFooter = t.initForRenderRowsStringify(t.rowsFooterRaw, renderHint{isFooterRow: true}) - t.rowsHeader = t.initForRenderRowsStringify(t.rowsHeaderRaw, renderHint{isHeaderRow: true}) - - // sort the rows as requested - t.initForRenderSortRows() - - // suppress columns without any content - t.initForRenderSuppressColumns() - - // strip out hidden columns - t.initForRenderHideColumns() -} - -func (t *Table) initForRenderRowsStringify(rows []Row, hint renderHint) []rowStr { - rowsStr := make([]rowStr, len(rows)) - for idx, row := range rows { - if t.rowPainter != nil && hint.isRegularRow() { - t.rowsColors[idx] = t.rowPainter(row) - } - rowsStr[idx] = t.analyzeAndStringify(row, hint) - } - return rowsStr -} - -func (t *Table) initForRenderRowSeparator() { - t.maxRowLength = 0 - if t.autoIndex { - t.maxRowLength += text.RuneCount(t.style.Box.PaddingLeft) - t.maxRowLength += len(fmt.Sprint(len(t.rows))) - t.maxRowLength += text.RuneCount(t.style.Box.PaddingRight) - if t.style.Options.SeparateColumns { - t.maxRowLength += text.RuneCount(t.style.Box.MiddleSeparator) - } - } - if t.style.Options.SeparateColumns { - t.maxRowLength += text.RuneCount(t.style.Box.MiddleSeparator) * (t.numColumns - 1) - } - t.rowSeparator = make(rowStr, t.numColumns) - for colIdx, maxColumnLength := range t.maxColumnLengths { - maxColumnLength += text.RuneCount(t.style.Box.PaddingLeft + t.style.Box.PaddingRight) - t.maxRowLength += maxColumnLength - t.rowSeparator[colIdx] = text.RepeatAndTrim(t.style.Box.MiddleHorizontal, maxColumnLength) - } - if t.style.Options.DrawBorder { - t.maxRowLength += text.RuneCount(t.style.Box.Left + t.style.Box.Right) - } -} - -func (t *Table) initForRenderSortRows() { - if len(t.sortBy) == 0 { - return - } - - // sort the rows - sortedRowIndices := t.getSortedRowIndices() - sortedRows := make([]rowStr, len(t.rows)) - for idx := range t.rows { - sortedRows[idx] = t.rows[sortedRowIndices[idx]] - } - t.rows = sortedRows - - // sort the rowsColors - if len(t.rowsColors) > 0 { - sortedRowsColors := make([]text.Colors, len(t.rows)) - for idx := range t.rows { - sortedRowsColors[idx] = t.rowsColors[sortedRowIndices[idx]] - } - t.rowsColors = sortedRowsColors - } -} - -func (t *Table) initForRenderSuppressColumns() { - shouldSuppressColumn := func(colIdx int) bool { - for _, row := range t.rows { - if colIdx < len(row) && row[colIdx] != "" { - return false - } - } - return true - } - - if t.suppressEmptyColumns { - for colIdx := 0; colIdx < t.numColumns; colIdx++ { - if shouldSuppressColumn(colIdx) { - cc := t.columnConfigMap[colIdx] - cc.Hidden = true - t.columnConfigMap[colIdx] = cc - } - } - } + return colIdxMap } func (t *Table) isIndexColumn(colIdx int, hint renderHint) bool { @@ -800,6 +765,13 @@ func (t *Table) isIndexColumn(colIdx int, hint renderHint) bool { func (t *Table) render(out *strings.Builder) string { outStr := out.String() + if t.suppressTrailingSpaces { + var trimmed []string + for _, line := range strings.Split(outStr, "\n") { + trimmed = append(trimmed, strings.TrimRightFunc(line, unicode.IsSpace)) + } + outStr = strings.Join(trimmed, "\n") + } if t.outputMirror != nil && len(outStr) > 0 { _, _ = t.outputMirror.Write([]byte(outStr)) _, _ = t.outputMirror.Write([]byte("\n")) @@ -807,19 +779,6 @@ func (t *Table) render(out *strings.Builder) string { return outStr } -func (t *Table) reset() { - t.autoIndexVIndexMaxLength = 0 - t.columnIsNonNumeric = nil - t.maxColumnLengths = nil - t.maxRowLength = 0 - t.numColumns = 0 - t.rowsColors = nil - t.rowSeparator = nil - t.rows = nil - t.rowsFooter = nil - t.rowsHeader = nil -} - func (t *Table) shouldMergeCellsHorizontallyAbove(row rowStr, colIdx int, hint renderHint) bool { if hint.isAutoIndexColumn || hint.isAutoIndexRow { return false @@ -853,18 +812,21 @@ func (t *Table) shouldMergeCellsHorizontallyBelow(row rowStr, colIdx int, hint r var rowConfig RowConfig if hint.isSeparatorRow { - if hint.isHeaderRow && hint.rowNumber == 0 { + if hint.isRegularRow() { + rowConfig = t.getRowConfig(renderHint{rowNumber: hint.rowNumber + 1}) + row = t.getRow(hint.rowNumber, renderHint{}) + } else if hint.isHeaderRow && hint.rowNumber == 0 { rowConfig = t.getRowConfig(renderHint{isHeaderRow: true, rowNumber: 1}) row = t.getRow(0, hint) } else if hint.isHeaderRow && hint.isLastRow { rowConfig = t.getRowConfig(renderHint{rowNumber: 1}) row = t.getRow(0, renderHint{}) + } else if hint.isHeaderRow { + rowConfig = t.getRowConfig(renderHint{isHeaderRow: true, rowNumber: hint.rowNumber + 1}) + row = t.getRow(hint.rowNumber, hint) } else if hint.isFooterRow && hint.rowNumber >= 0 { rowConfig = t.getRowConfig(renderHint{isFooterRow: true, rowNumber: 1}) row = t.getRow(hint.rowNumber, renderHint{isFooterRow: true}) - } else if hint.isRegularRow() { - rowConfig = t.getRowConfig(renderHint{rowNumber: hint.rowNumber + 1}) - row = t.getRow(hint.rowNumber, renderHint{}) } } @@ -875,49 +837,57 @@ func (t *Table) shouldMergeCellsHorizontallyBelow(row rowStr, colIdx int, hint r } func (t *Table) shouldMergeCellsVertically(colIdx int, hint renderHint) bool { - if t.columnConfigMap[colIdx].AutoMerge && colIdx < t.numColumns { + if !t.firstRowOfPage && t.columnConfigMap[colIdx].AutoMerge && colIdx < t.numColumns { if hint.isSeparatorRow { rowPrev := t.getRow(hint.rowNumber-1, hint) rowNext := t.getRow(hint.rowNumber, hint) if colIdx < len(rowPrev) && colIdx < len(rowNext) { - return rowPrev[colIdx] == rowNext[colIdx] || "" == rowNext[colIdx] + return rowPrev[colIdx] == rowNext[colIdx] } } else { rowPrev := t.getRow(hint.rowNumber-2, hint) rowCurr := t.getRow(hint.rowNumber-1, hint) if colIdx < len(rowPrev) && colIdx < len(rowCurr) { - return rowPrev[colIdx] == rowCurr[colIdx] || "" == rowCurr[colIdx] + return rowPrev[colIdx] == rowCurr[colIdx] } } } return false } -// renderHint has hints for the Render*() logic -type renderHint struct { - isAutoIndexColumn bool // auto-index column? - isAutoIndexRow bool // auto-index row? - isBorderBottom bool // bottom-border? - isBorderTop bool // top-border? - isFirstRow bool // first-row of header/footer/regular-rows? - isFooterRow bool // footer row? - isHeaderRow bool // header row? - isLastLineOfRow bool // last-line of the current row? - isLastRow bool // last-row of header/footer/regular-rows? - isSeparatorRow bool // separator row? - rowLineNumber int // the line number for a multi-line row - rowNumber int // the row number/index -} - -func (h *renderHint) isRegularRow() bool { - return !h.isHeaderRow && !h.isFooterRow -} +func (t *Table) shouldSeparateRows(rowIdx int, numRows int) bool { + // not asked to separate rows and no manually added separator + if !t.style.Options.SeparateRows && !t.separators[rowIdx] { + return false + } -func (h *renderHint) isHeaderOrFooterSeparator() bool { - return h.isSeparatorRow && !h.isBorderBottom && !h.isBorderTop && - ((h.isHeaderRow && !h.isLastRow) || (h.isFooterRow && (!h.isFirstRow || h.rowNumber > 0))) + pageSize := numRows + if t.pager.size > 0 { + pageSize = t.pager.size + } + if rowIdx%pageSize == pageSize-1 { // last row of page + return false + } + if rowIdx == numRows-1 { // last row of table + return false + } + return true } -func (h *renderHint) isLastLineOfLastRow() bool { - return h.isLastLineOfRow && h.isLastRow +func (t *Table) wrapRow(row rowStr) (int, rowStr) { + colMaxLines := 0 + rowWrapped := make(rowStr, len(row)) + for colIdx, colStr := range row { + widthEnforcer := t.columnConfigMap[colIdx].getWidthMaxEnforcer() + maxWidth := t.getColumnWidthMax(colIdx) + if maxWidth == 0 { + maxWidth = t.maxColumnLengths[colIdx] + } + rowWrapped[colIdx] = widthEnforcer(colStr, maxWidth) + colNumLines := strings.Count(rowWrapped[colIdx], "\n") + 1 + if colNumLines > colMaxLines { + colMaxLines = colNumLines + } + } + return colMaxLines, rowWrapped } diff --git a/vendor/github.com/jedib0t/go-pretty/v6/table/util.go b/vendor/github.com/jedib0t/go-pretty/v6/table/util.go index 43f6a9376ce..6b7a6585dee 100644 --- a/vendor/github.com/jedib0t/go-pretty/v6/table/util.go +++ b/vendor/github.com/jedib0t/go-pretty/v6/table/util.go @@ -17,8 +17,20 @@ func AutoIndexColumnID(colIdx int) string { return out } +// WidthEnforcer is a function that helps enforce a width condition on a string. +type WidthEnforcer func(col string, maxLen int) string + +// widthEnforcerNone returns the input string as is without any modifications. +func widthEnforcerNone(col string, _ int) string { + return col +} + // isNumber returns true if the argument is a numeric type; false otherwise. func isNumber(x interface{}) bool { + if x == nil { + return false + } + switch reflect.TypeOf(x).Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, @@ -28,10 +40,73 @@ func isNumber(x interface{}) bool { return false } -// WidthEnforcer is a function that helps enforce a width condition on a string. -type WidthEnforcer func(col string, maxLen int) string +type mergedColumnIndices map[int]map[int]bool -// widthEnforcerNone returns the input string as is without any modifications. -func widthEnforcerNone(col string, maxLen int) string { - return col +func (m mergedColumnIndices) mergedLength(colIdx int, maxColumnLengths []int) int { + mergedLength := maxColumnLengths[colIdx] + for otherColIdx := range m[colIdx] { + mergedLength += maxColumnLengths[otherColIdx] + } + return mergedLength +} + +func (m mergedColumnIndices) len(colIdx int) int { + return len(m[colIdx]) + 1 +} + +func (m mergedColumnIndices) safeAppend(colIdx, otherColIdx int) { + // map + if m[colIdx] == nil { + m[colIdx] = make(map[int]bool) + } + m[colIdx][otherColIdx] = true + + // reverse map + if m[otherColIdx] == nil { + m[otherColIdx] = make(map[int]bool) + } + m[otherColIdx][colIdx] = true +} + +func objAsSlice(in interface{}) []interface{} { + var out []interface{} + if in != nil { + // dereference pointers + val := reflect.ValueOf(in) + if val.Kind() == reflect.Ptr && !val.IsNil() { + in = val.Elem().Interface() + } + + if objIsSlice(in) { + v := reflect.ValueOf(in) + for i := 0; i < v.Len(); i++ { + // dereference pointers + v2 := v.Index(i) + if v2.Kind() == reflect.Ptr && !v2.IsNil() { + v2 = reflect.ValueOf(v2.Elem().Interface()) + } + + out = append(out, v2.Interface()) + } + } + } + + // remove trailing nil pointers + tailIdx := len(out) + for i := len(out) - 1; i >= 0; i-- { + val := reflect.ValueOf(out[i]) + if val.Kind() != reflect.Ptr || !val.IsNil() { + break + } + tailIdx = i + } + return out[:tailIdx] +} + +func objIsSlice(in interface{}) bool { + if in == nil { + return false + } + k := reflect.TypeOf(in).Kind() + return k == reflect.Slice || k == reflect.Array } diff --git a/vendor/github.com/jedib0t/go-pretty/v6/table/writer.go b/vendor/github.com/jedib0t/go-pretty/v6/table/writer.go index c70ba33637d..51eee8c7266 100644 --- a/vendor/github.com/jedib0t/go-pretty/v6/table/writer.go +++ b/vendor/github.com/jedib0t/go-pretty/v6/table/writer.go @@ -4,37 +4,43 @@ import ( "io" ) -// Writer declares the interfaces that can be used to setup and render a table. +// Writer declares the interfaces that can be used to set up and render a table. type Writer interface { AppendFooter(row Row, configs ...RowConfig) AppendHeader(row Row, configs ...RowConfig) AppendRow(row Row, configs ...RowConfig) AppendRows(rows []Row, configs ...RowConfig) AppendSeparator() + ImportGrid(grid interface{}) bool Length() int + Pager(opts ...PagerOption) Pager Render() string RenderCSV() string RenderHTML() string RenderMarkdown() string + RenderTSV() string ResetFooters() ResetHeaders() ResetRows() - SetAllowedRowLength(length int) SetAutoIndex(autoIndex bool) SetCaption(format string, a ...interface{}) SetColumnConfigs(configs []ColumnConfig) SetIndexColumn(colNum int) SetOutputMirror(mirror io.Writer) - SetPageSize(numLines int) - SetRowPainter(painter RowPainter) + SetRowPainter(painter interface{}) SetStyle(style Style) SetTitle(format string, a ...interface{}) SortBy(sortBy []SortBy) Style() *Style SuppressEmptyColumns() + SuppressTrailingSpaces() + // deprecated; in favor if Style().Size.WidthMax + SetAllowedRowLength(length int) // deprecated; in favor of Style().HTML.CSSClass SetHTMLCSSClass(cssClass string) + // deprecated; in favor of Pager() + SetPageSize(numLines int) } // NewWriter initializes and returns a Writer. diff --git a/vendor/github.com/jedib0t/go-pretty/v6/text/README.md b/vendor/github.com/jedib0t/go-pretty/v6/text/README.md new file mode 100644 index 00000000000..afd163aff12 --- /dev/null +++ b/vendor/github.com/jedib0t/go-pretty/v6/text/README.md @@ -0,0 +1,8 @@ +# text + +[![Go Reference](https://pkg.go.dev/badge/github.com/jedib0t/go-pretty/v6.svg)](https://pkg.go.dev/github.com/jedib0t/go-pretty/v6/text) + +Package with utility functions to manipulate strings/text. + +Used heavily in the other packages in this repo ([list](../list), +[progress](../progress), and [table](../table)). \ No newline at end of file diff --git a/vendor/github.com/jedib0t/go-pretty/v6/text/align.go b/vendor/github.com/jedib0t/go-pretty/v6/text/align.go index dd4134d3a62..189531dc692 100644 --- a/vendor/github.com/jedib0t/go-pretty/v6/text/align.go +++ b/vendor/github.com/jedib0t/go-pretty/v6/text/align.go @@ -17,32 +17,44 @@ const ( AlignCenter // " center " AlignJustify // "justify it" AlignRight // " right" + AlignAuto // AlignRight for numbers, AlignLeft for the rest ) // Apply aligns the text as directed. For ex.: -// * AlignDefault.Apply("Jon Snow", 12) returns "Jon Snow " -// * AlignLeft.Apply("Jon Snow", 12) returns "Jon Snow " -// * AlignCenter.Apply("Jon Snow", 12) returns " Jon Snow " -// * AlignJustify.Apply("Jon Snow", 12) returns "Jon Snow" -// * AlignRight.Apply("Jon Snow", 12) returns " Jon Snow" +// - AlignDefault.Apply("Jon Snow", 12) returns "Jon Snow " +// - AlignLeft.Apply("Jon Snow", 12) returns "Jon Snow " +// - AlignCenter.Apply("Jon Snow", 12) returns " Jon Snow " +// - AlignJustify.Apply("Jon Snow", 12) returns "Jon Snow" +// - AlignRight.Apply("Jon Snow", 12) returns " Jon Snow" +// - AlignAuto.Apply("Jon Snow", 12) returns "Jon Snow " func (a Align) Apply(text string, maxLength int) string { - text = a.trimString(text) + aComputed := a + if aComputed == AlignAuto { + _, err := strconv.ParseFloat(text, 64) + if err == nil { // was able to parse a number out of the string + aComputed = AlignRight + } else { + aComputed = AlignLeft + } + } + + text = aComputed.trimString(text) sLen := utf8.RuneCountInString(text) - sLenWoE := RuneCount(text) + sLenWoE := StringWidthWithoutEscSequences(text) numEscChars := sLen - sLenWoE // now, align the text - switch a { + switch aComputed { case AlignDefault, AlignLeft: return fmt.Sprintf("%-"+strconv.Itoa(maxLength+numEscChars)+"s", text) case AlignCenter: if sLenWoE < maxLength { // left pad with half the number of spaces needed before using %text return fmt.Sprintf("%"+strconv.Itoa(maxLength+numEscChars)+"s", - text+strings.Repeat(" ", int((maxLength-sLenWoE)/2))) + text+strings.Repeat(" ", (maxLength-sLenWoE)/2)) } case AlignJustify: - return a.justifyText(text, sLenWoE, maxLength) + return justifyText(text, sLenWoE, maxLength) } return fmt.Sprintf("%"+strconv.Itoa(maxLength+numEscChars)+"s", text) } @@ -77,16 +89,34 @@ func (a Align) MarkdownProperty() string { } } -func (a Align) justifyText(text string, textLength int, maxLength int) string { +func (a Align) trimString(text string) string { + switch a { + case AlignDefault, AlignLeft: + if strings.HasSuffix(text, " ") { + return strings.TrimRight(text, " ") + } + case AlignRight: + if strings.HasPrefix(text, " ") { + return strings.TrimLeft(text, " ") + } + default: + if strings.HasPrefix(text, " ") || strings.HasSuffix(text, " ") { + return strings.Trim(text, " ") + } + } + return text +} + +func justifyText(text string, textLength int, maxLength int) string { // split the text into individual words - wordsUnfiltered := strings.Split(text, " ") - words := Filter(wordsUnfiltered, func(item string) bool { + words := Filter(strings.Split(text, " "), func(item string) bool { return item != "" }) - // empty string implies spaces for maxLength + // empty string implies result is just spaces for maxLength if len(words) == 0 { return strings.Repeat(" ", maxLength) } + // get the number of spaces to insert into the text numSpacesNeeded := maxLength - textLength + strings.Count(text, " ") numSpacesNeededBetweenWords := 0 @@ -117,21 +147,3 @@ func (a Align) justifyText(text string, textLength int, maxLength int) string { } return outText.String() } - -func (a Align) trimString(text string) string { - switch a { - case AlignDefault, AlignLeft: - if strings.HasSuffix(text, " ") { - return strings.TrimRight(text, " ") - } - case AlignRight: - if strings.HasPrefix(text, " ") { - return strings.TrimLeft(text, " ") - } - default: - if strings.HasPrefix(text, " ") || strings.HasSuffix(text, " ") { - return strings.Trim(text, " ") - } - } - return text -} diff --git a/vendor/github.com/jedib0t/go-pretty/v6/text/ansi.go b/vendor/github.com/jedib0t/go-pretty/v6/text/ansi.go index 1ea5cdfe0db..6f13b656a8f 100644 --- a/vendor/github.com/jedib0t/go-pretty/v6/text/ansi.go +++ b/vendor/github.com/jedib0t/go-pretty/v6/text/ansi.go @@ -8,7 +8,8 @@ var ANSICodesSupported = areANSICodesSupported() // Escape encodes the string with the ANSI Escape Sequence. // For ex.: -// Escape("Ghost", "") == "Ghost" +// +// Escape("Ghost", "") == "Ghost" // Escape("Ghost", "\x1b[91m") == "\x1b[91mGhost\x1b[0m" // Escape("\x1b[94mGhost\x1b[0mLady", "\x1b[91m") == "\x1b[94mGhost\x1b[0m\x1b[91mLady\x1b[0m" // Escape("Nymeria\x1b[94mGhost\x1b[0mLady", "\x1b[91m") == "\x1b[91mNymeria\x1b[94mGhost\x1b[0m\x1b[91mLady\x1b[0m" @@ -30,14 +31,15 @@ func Escape(str string, escapeSeq string) string { // StripEscape strips all ANSI Escape Sequence from the string. // For ex.: -// StripEscape("Ghost") == "Ghost" +// +// StripEscape("Ghost") == "Ghost" // StripEscape("\x1b[91mGhost\x1b[0m") == "Ghost" // StripEscape("\x1b[94mGhost\x1b[0m\x1b[91mLady\x1b[0m") == "GhostLady" // StripEscape("\x1b[91mNymeria\x1b[94mGhost\x1b[0m\x1b[91mLady\x1b[0m") == "NymeriaGhostLady" // StripEscape("\x1b[91mNymeria \x1b[94mGhost\x1b[0m\x1b[91m Lady\x1b[0m") == "Nymeria Ghost Lady" func StripEscape(str string) string { var out strings.Builder - out.Grow(RuneCount(str)) + out.Grow(StringWidthWithoutEscSequences(str)) isEscSeq := false for _, sChr := range str { diff --git a/vendor/github.com/jedib0t/go-pretty/v6/text/ansi_unix.go b/vendor/github.com/jedib0t/go-pretty/v6/text/ansi_unix.go index 6e79b4063ac..635be79eed4 100644 --- a/vendor/github.com/jedib0t/go-pretty/v6/text/ansi_unix.go +++ b/vendor/github.com/jedib0t/go-pretty/v6/text/ansi_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package text diff --git a/vendor/github.com/jedib0t/go-pretty/v6/text/ansi_windows.go b/vendor/github.com/jedib0t/go-pretty/v6/text/ansi_windows.go index 24585b98374..4f8b42f0685 100644 --- a/vendor/github.com/jedib0t/go-pretty/v6/text/ansi_windows.go +++ b/vendor/github.com/jedib0t/go-pretty/v6/text/ansi_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package text @@ -9,9 +10,7 @@ import ( "golang.org/x/sys/windows" ) -var ( - enableVTPMutex = sync.Mutex{} -) +var enableVTPMutex = sync.Mutex{} func areANSICodesSupported() bool { enableVTPMutex.Lock() diff --git a/vendor/github.com/jedib0t/go-pretty/v6/text/color.go b/vendor/github.com/jedib0t/go-pretty/v6/text/color.go index f5b7bdd3976..f42dfb385dc 100644 --- a/vendor/github.com/jedib0t/go-pretty/v6/text/color.go +++ b/vendor/github.com/jedib0t/go-pretty/v6/text/color.go @@ -8,9 +8,7 @@ import ( "sync" ) -var ( - colorsEnabled = areANSICodesSupported() -) +var colorsEnabled = areANSICodesSupported() // DisableColors (forcefully) disables color coding globally. func DisableColors() { @@ -23,7 +21,7 @@ func EnableColors() { } // The logic here is inspired from github.com/fatih/color; the following is -// the the bare minimum logic required to print Colored to the console. +// the bare minimum logic required to print Colored to the console. // The differences: // * This one caches the escape sequences for cases with multiple colors // * This one handles cases where the incoming already has colors in the @@ -123,10 +121,8 @@ func (c Color) Sprintf(format string, a ...interface{}) string { // Example: Colors{FgCyan, BgBlack} type Colors []Color -var ( - // colorsSeqMap caches the escape sequence for a set of colors - colorsSeqMap = sync.Map{} -) +// colorsSeqMap caches the escape sequence for a set of colors +var colorsSeqMap = sync.Map{} // EscapeSeq returns the ANSI escape sequence for the colors set. func (c Colors) EscapeSeq() string { diff --git a/vendor/github.com/jedib0t/go-pretty/v6/text/color_html.go b/vendor/github.com/jedib0t/go-pretty/v6/text/color_html.go index 3e5c55be6d6..14fa17432e7 100644 --- a/vendor/github.com/jedib0t/go-pretty/v6/text/color_html.go +++ b/vendor/github.com/jedib0t/go-pretty/v6/text/color_html.go @@ -1,48 +1,46 @@ package text -var ( - // colorCSSClassMap contains the equivalent CSS-class for all colors - colorCSSClassMap = map[Color]string{ - Bold: "bold", - Faint: "faint", - Italic: "italic", - Underline: "underline", - BlinkSlow: "blink-slow", - BlinkRapid: "blink-rapid", - ReverseVideo: "reverse-video", - Concealed: "concealed", - CrossedOut: "crossed-out", - FgBlack: "fg-black", - FgRed: "fg-red", - FgGreen: "fg-green", - FgYellow: "fg-yellow", - FgBlue: "fg-blue", - FgMagenta: "fg-magenta", - FgCyan: "fg-cyan", - FgWhite: "fg-white", - FgHiBlack: "fg-hi-black", - FgHiRed: "fg-hi-red", - FgHiGreen: "fg-hi-green", - FgHiYellow: "fg-hi-yellow", - FgHiBlue: "fg-hi-blue", - FgHiMagenta: "fg-hi-magenta", - FgHiCyan: "fg-hi-cyan", - FgHiWhite: "fg-hi-white", - BgBlack: "bg-black", - BgRed: "bg-red", - BgGreen: "bg-green", - BgYellow: "bg-yellow", - BgBlue: "bg-blue", - BgMagenta: "bg-magenta", - BgCyan: "bg-cyan", - BgWhite: "bg-white", - BgHiBlack: "bg-hi-black", - BgHiRed: "bg-hi-red", - BgHiGreen: "bg-hi-green", - BgHiYellow: "bg-hi-yellow", - BgHiBlue: "bg-hi-blue", - BgHiMagenta: "bg-hi-magenta", - BgHiCyan: "bg-hi-cyan", - BgHiWhite: "bg-hi-white", - } -) +// colorCSSClassMap contains the equivalent CSS-class for all colors +var colorCSSClassMap = map[Color]string{ + Bold: "bold", + Faint: "faint", + Italic: "italic", + Underline: "underline", + BlinkSlow: "blink-slow", + BlinkRapid: "blink-rapid", + ReverseVideo: "reverse-video", + Concealed: "concealed", + CrossedOut: "crossed-out", + FgBlack: "fg-black", + FgRed: "fg-red", + FgGreen: "fg-green", + FgYellow: "fg-yellow", + FgBlue: "fg-blue", + FgMagenta: "fg-magenta", + FgCyan: "fg-cyan", + FgWhite: "fg-white", + FgHiBlack: "fg-hi-black", + FgHiRed: "fg-hi-red", + FgHiGreen: "fg-hi-green", + FgHiYellow: "fg-hi-yellow", + FgHiBlue: "fg-hi-blue", + FgHiMagenta: "fg-hi-magenta", + FgHiCyan: "fg-hi-cyan", + FgHiWhite: "fg-hi-white", + BgBlack: "bg-black", + BgRed: "bg-red", + BgGreen: "bg-green", + BgYellow: "bg-yellow", + BgBlue: "bg-blue", + BgMagenta: "bg-magenta", + BgCyan: "bg-cyan", + BgWhite: "bg-white", + BgHiBlack: "bg-hi-black", + BgHiRed: "bg-hi-red", + BgHiGreen: "bg-hi-green", + BgHiYellow: "bg-hi-yellow", + BgHiBlue: "bg-hi-blue", + BgHiMagenta: "bg-hi-magenta", + BgHiCyan: "bg-hi-cyan", + BgHiWhite: "bg-hi-white", +} diff --git a/vendor/github.com/jedib0t/go-pretty/v6/text/direction.go b/vendor/github.com/jedib0t/go-pretty/v6/text/direction.go new file mode 100644 index 00000000000..d0a74f183db --- /dev/null +++ b/vendor/github.com/jedib0t/go-pretty/v6/text/direction.go @@ -0,0 +1,29 @@ +package text + +// Direction defines the overall flow of text. Similar to bidi.Direction, but +// simplified and specific to this package. +type Direction int + +// Available Directions. +const ( + Default Direction = iota + LeftToRight + RightToLeft +) + +const ( + RuneL2R = '\u202a' + RuneR2L = '\u202b' +) + +// Modifier returns a character to force the given direction for the text that +// follows the modifier. +func (d Direction) Modifier() string { + switch d { + case LeftToRight: + return string(RuneL2R) + case RightToLeft: + return string(RuneR2L) + } + return "" +} diff --git a/vendor/github.com/jedib0t/go-pretty/v6/text/escape_seq_parser.go b/vendor/github.com/jedib0t/go-pretty/v6/text/escape_seq_parser.go new file mode 100644 index 00000000000..ab618acce6d --- /dev/null +++ b/vendor/github.com/jedib0t/go-pretty/v6/text/escape_seq_parser.go @@ -0,0 +1,201 @@ +package text + +import ( + "fmt" + "sort" + "strconv" + "strings" +) + +// Constants +const ( + EscapeReset = EscapeResetCSI + EscapeResetCSI = EscapeStartCSI + "0" + EscapeStopCSI + EscapeResetOSI = EscapeStartOSI + "0" + EscapeStopOSI + EscapeStart = EscapeStartCSI + EscapeStartCSI = "\x1b[" + EscapeStartOSI = "\x1b]" + EscapeStartRune = rune(27) // \x1b + EscapeStartRuneCSI = '[' // [ + EscapeStartRuneOSI = ']' // ] + EscapeStop = EscapeStopCSI + EscapeStopCSI = "m" + EscapeStopOSI = "\\" + EscapeStopRune = EscapeStopRuneCSI + EscapeStopRuneCSI = 'm' + EscapeStopRuneOSI = '\\' +) + +// Deprecated Constants +const ( + CSIStartRune = EscapeStartRuneCSI + CSIStopRune = EscapeStopRuneCSI + OSIStartRune = EscapeStartRuneOSI + OSIStopRune = EscapeStopRuneOSI +) + +type escSeqKind int + +const ( + escSeqKindUnknown escSeqKind = iota + escSeqKindCSI + escSeqKindOSI +) + +type escSeqParser struct { + codes map[int]bool + + // consume specific + inEscSeq bool + escSeqKind escSeqKind + escapeSeq string +} + +func (s *escSeqParser) Codes() []int { + codes := make([]int, 0) + for code, val := range s.codes { + if val { + codes = append(codes, code) + } + } + sort.Ints(codes) + return codes +} + +func (s *escSeqParser) Consume(char rune) { + if !s.inEscSeq && char == EscapeStartRune { + s.inEscSeq = true + s.escSeqKind = escSeqKindUnknown + s.escapeSeq = "" + } else if s.inEscSeq && s.escSeqKind == escSeqKindUnknown { + if char == EscapeStartRuneCSI { + s.escSeqKind = escSeqKindCSI + } else if char == EscapeStartRuneOSI { + s.escSeqKind = escSeqKindOSI + } + } + + if s.inEscSeq { + s.escapeSeq += string(char) + + if s.isEscapeStopRune(char) { + s.ParseSeq(s.escapeSeq, s.escSeqKind) + s.Reset() + } + } +} + +func (s *escSeqParser) InSequence() bool { + return s.inEscSeq +} + +func (s *escSeqParser) IsOpen() bool { + return len(s.codes) > 0 +} + +func (s *escSeqParser) Reset() { + s.inEscSeq = false + s.escSeqKind = escSeqKindUnknown + s.escapeSeq = "" +} + +const ( + escCodeResetAll = 0 + escCodeResetIntensity = 22 + escCodeResetItalic = 23 + escCodeResetUnderline = 24 + escCodeResetBlink = 25 + escCodeResetReverse = 27 + escCodeResetCrossedOut = 29 + escCodeBold = 1 + escCodeDim = 2 + escCodeItalic = 3 + escCodeUnderline = 4 + escCodeBlinkSlow = 5 + escCodeBlinkRapid = 6 + escCodeReverse = 7 + escCodeConceal = 8 + escCodeCrossedOut = 9 +) + +func (s *escSeqParser) ParseSeq(seq string, seqKind escSeqKind) { + if s.codes == nil { + s.codes = make(map[int]bool) + } + + if seqKind == escSeqKindOSI { + seq = strings.Replace(seq, EscapeStartOSI, "", 1) + seq = strings.Replace(seq, EscapeStopOSI, "", 1) + } else { // escSeqKindCSI + seq = strings.Replace(seq, EscapeStartCSI, "", 1) + seq = strings.Replace(seq, EscapeStopCSI, "", 1) + } + + codes := strings.Split(seq, ";") + for _, code := range codes { + code = strings.TrimSpace(code) + if codeNum, err := strconv.Atoi(code); err == nil { + switch codeNum { + case escCodeResetAll: + s.codes = make(map[int]bool) // clear everything + case escCodeResetIntensity: + delete(s.codes, escCodeBold) + delete(s.codes, escCodeDim) + case escCodeResetItalic: + delete(s.codes, escCodeItalic) + case escCodeResetUnderline: + delete(s.codes, escCodeUnderline) + case escCodeResetBlink: + delete(s.codes, escCodeBlinkSlow) + delete(s.codes, escCodeBlinkRapid) + case escCodeResetReverse: + delete(s.codes, escCodeReverse) + case escCodeResetCrossedOut: + delete(s.codes, escCodeCrossedOut) + default: + s.codes[codeNum] = true + } + } + } +} + +func (s *escSeqParser) ParseString(str string) string { + s.escapeSeq, s.inEscSeq, s.escSeqKind = "", false, escSeqKindUnknown + for _, char := range str { + s.Consume(char) + } + return s.Sequence() +} + +func (s *escSeqParser) Sequence() string { + out := strings.Builder{} + if s.IsOpen() { + out.WriteString(EscapeStart) + for idx, code := range s.Codes() { + if idx > 0 { + out.WriteRune(';') + } + out.WriteString(fmt.Sprint(code)) + } + out.WriteString(EscapeStop) + } + + return out.String() +} + +const ( + escapeStartConcealOSI = "\x1b]8;" + escapeStopConcealOSI = "\x1b\\" +) + +func (s *escSeqParser) isEscapeStopRune(char rune) bool { + if strings.HasPrefix(s.escapeSeq, escapeStartConcealOSI) { + if strings.HasSuffix(s.escapeSeq, escapeStopConcealOSI) { + return true + } + } else if (s.escSeqKind == escSeqKindCSI && char == EscapeStopRuneCSI) || + (s.escSeqKind == escSeqKindOSI && char == EscapeStopRuneOSI) { + return true + } + return false +} diff --git a/vendor/github.com/jedib0t/go-pretty/v6/text/hyperlink.go b/vendor/github.com/jedib0t/go-pretty/v6/text/hyperlink.go new file mode 100644 index 00000000000..00a551ae273 --- /dev/null +++ b/vendor/github.com/jedib0t/go-pretty/v6/text/hyperlink.go @@ -0,0 +1,14 @@ +package text + +import "fmt" + +func Hyperlink(url, text string) string { + if url == "" { + return text + } + if text == "" { + return url + } + // source https://gist.github.com/egmontkob/eb114294efbcd5adb1944c9f3cb5feda + return fmt.Sprintf("\x1b]8;;%s\x1b\\%s\x1b]8;;\x1b\\", url, text) +} diff --git a/vendor/github.com/jedib0t/go-pretty/v6/text/string.go b/vendor/github.com/jedib0t/go-pretty/v6/text/string.go index 877f04a3b4f..fa28a4c0370 100644 --- a/vendor/github.com/jedib0t/go-pretty/v6/text/string.go +++ b/vendor/github.com/jedib0t/go-pretty/v6/text/string.go @@ -5,71 +5,68 @@ import ( "unicode/utf8" "github.com/mattn/go-runewidth" + "golang.org/x/text/width" ) -// Constants -const ( - EscapeReset = EscapeStart + "0" + EscapeStop - EscapeStart = "\x1b[" - EscapeStartRune = rune(27) // \x1b - EscapeStop = "m" - EscapeStopRune = 'm' +// RuneWidth stuff +var ( + rwCondition = runewidth.NewCondition() ) // InsertEveryN inserts the rune every N characters in the string. For ex.: -// InsertEveryN("Ghost", '-', 1) == "G-h-o-s-t" -// InsertEveryN("Ghost", '-', 2) == "Gh-os-t" -// InsertEveryN("Ghost", '-', 3) == "Gho-st" -// InsertEveryN("Ghost", '-', 4) == "Ghos-t" -// InsertEveryN("Ghost", '-', 5) == "Ghost" +// +// InsertEveryN("Ghost", '-', 1) == "G-h-o-s-t" +// InsertEveryN("Ghost", '-', 2) == "Gh-os-t" +// InsertEveryN("Ghost", '-', 3) == "Gho-st" +// InsertEveryN("Ghost", '-', 4) == "Ghos-t" +// InsertEveryN("Ghost", '-', 5) == "Ghost" func InsertEveryN(str string, runeToInsert rune, n int) string { if n <= 0 { return str } - sLen := RuneCount(str) + sLen := StringWidthWithoutEscSequences(str) var out strings.Builder out.Grow(sLen + (sLen / n)) - outLen, isEscSeq := 0, false + outLen, esp := 0, escSeqParser{} for idx, c := range str { - if c == EscapeStartRune { - isEscSeq = true + if esp.InSequence() { + esp.Consume(c) + out.WriteRune(c) + continue } - - if !isEscSeq && outLen > 0 && (outLen%n) == 0 && idx != sLen { + esp.Consume(c) + if !esp.InSequence() && outLen > 0 && (outLen%n) == 0 && idx != sLen { out.WriteRune(runeToInsert) } out.WriteRune(c) - if !isEscSeq { + if !esp.InSequence() { outLen += RuneWidth(c) } - - if isEscSeq && c == EscapeStopRune { - isEscSeq = false - } } return out.String() } // LongestLineLen returns the length of the longest "line" within the // argument string. For ex.: -// LongestLineLen("Ghost!\nCome back here!\nRight now!") == 15 +// +// LongestLineLen("Ghost!\nCome back here!\nRight now!") == 15 func LongestLineLen(str string) int { - maxLength, currLength, isEscSeq := 0, 0, false + maxLength, currLength, esp := 0, 0, escSeqParser{} + //fmt.Println(str) for _, c := range str { - if c == EscapeStartRune { - isEscSeq = true - } else if isEscSeq && c == EscapeStopRune { - isEscSeq = false + //fmt.Printf("%03d | %03d | %c | %5v | %v | %#v\n", idx, c, c, esp.inEscSeq, esp.Codes(), esp.escapeSeq) + if esp.InSequence() { + esp.Consume(c) continue } - + esp.Consume(c) if c == '\n' { if currLength > maxLength { maxLength = currLength } currLength = 0 - } else if !isEscSeq { + } else if !esp.InSequence() { currLength += RuneWidth(c) } } @@ -79,29 +76,91 @@ func LongestLineLen(str string) int { return maxLength } +// OverrideRuneWidthEastAsianWidth can *probably* help with alignment, and +// length calculation issues when dealing with Unicode character-set and a +// non-English language set in the LANG variable. +// +// Set this to 'false' to force the "runewidth" library to pretend to deal with +// English character-set. Be warned that if the text/content you are dealing +// with contains East Asian character-set, this may result in unexpected +// behavior. +// +// References: +// * https://github.com/mattn/go-runewidth/issues/64#issuecomment-1221642154 +// * https://github.com/jedib0t/go-pretty/issues/220 +// * https://github.com/jedib0t/go-pretty/issues/204 +func OverrideRuneWidthEastAsianWidth(val bool) { + rwCondition.EastAsianWidth = val +} + // Pad pads the given string with as many characters as needed to make it as // long as specified (maxLen). This function does not count escape sequences // while calculating length of the string. Ex.: -// Pad("Ghost", 0, ' ') == "Ghost" -// Pad("Ghost", 3, ' ') == "Ghost" -// Pad("Ghost", 5, ' ') == "Ghost" -// Pad("Ghost", 7, ' ') == "Ghost " -// Pad("Ghost", 10, '.') == "Ghost....." +// +// Pad("Ghost", 0, ' ') == "Ghost" +// Pad("Ghost", 3, ' ') == "Ghost" +// Pad("Ghost", 5, ' ') == "Ghost" +// Pad("Ghost", 7, ' ') == "Ghost " +// Pad("Ghost", 10, '.') == "Ghost....." func Pad(str string, maxLen int, paddingChar rune) string { - strLen := RuneCount(str) + strLen := StringWidthWithoutEscSequences(str) if strLen < maxLen { str += strings.Repeat(string(paddingChar), maxLen-strLen) } return str } +// ProcessCRLF converts "\r\n" to "\n", and processes lone "\r" by moving the +// cursor/carriage to the start of the line and overwrites the contents +// accordingly. Ex.: +// +// ProcessCRLF("abc") == "abc" +// ProcessCRLF("abc\r\ndef") == "abc\ndef" +// ProcessCRLF("abc\r\ndef\rghi") == "abc\nghi" +// ProcessCRLF("abc\r\ndef\rghi\njkl") == "abc\nghi\njkl" +// ProcessCRLF("abc\r\ndef\rghi\njkl\r") == "abc\nghi\njkl" +// ProcessCRLF("abc\r\ndef\rghi\rjkl\rmn") == "abc\nmnl" +func ProcessCRLF(str string) string { + str = strings.ReplaceAll(str, "\r\n", "\n") + if !strings.Contains(str, "\r") { + return str + } + + lines := strings.Split(str, "\n") + for lineIdx, line := range lines { + if !strings.Contains(line, "\r") { + continue + } + + lineRunes, newLineRunes := []rune(line), make([]rune, 0) + for idx, realIdx := 0, 0; idx < len(lineRunes); idx++ { + // if a CR, move "cursor" back to beginning of line + if lineRunes[idx] == '\r' { + realIdx = 0 + continue + } + + // if cursor is not at end, overwrite + if realIdx < len(newLineRunes) { + newLineRunes[realIdx] = lineRunes[idx] + } else { // else append + newLineRunes = append(newLineRunes, lineRunes[idx]) + } + realIdx++ + } + lines[lineIdx] = string(newLineRunes) + } + return strings.Join(lines, "\n") +} + // RepeatAndTrim repeats the given string until it is as long as maxRunes. // For ex.: -// RepeatAndTrim("", 5) == "" -// RepeatAndTrim("Ghost", 0) == "" -// RepeatAndTrim("Ghost", 5) == "Ghost" -// RepeatAndTrim("Ghost", 7) == "GhostGh" -// RepeatAndTrim("Ghost", 10) == "GhostGhost" +// +// RepeatAndTrim("", 5) == "" +// RepeatAndTrim("Ghost", 0) == "" +// RepeatAndTrim("Ghost", 5) == "Ghost" +// RepeatAndTrim("Ghost", 7) == "GhostGh" +// RepeatAndTrim("Ghost", 10) == "GhostGhost" func RepeatAndTrim(str string, maxRunes int) string { if str == "" || maxRunes == 0 { return "" @@ -114,62 +173,102 @@ func RepeatAndTrim(str string, maxRunes int) string { // RuneCount is similar to utf8.RuneCountInString, except for the fact that it // ignores escape sequences while counting. For ex.: -// RuneCount("") == 0 -// RuneCount("Ghost") == 5 -// RuneCount("\x1b[33mGhost\x1b[0m") == 5 -// RuneCount("\x1b[33mGhost\x1b[0") == 5 +// +// RuneCount("") == 0 +// RuneCount("Ghost") == 5 +// RuneCount("\x1b[33mGhost\x1b[0m") == 5 +// RuneCount("\x1b[33mGhost\x1b[0") == 5 +// +// Deprecated: in favor of RuneWidthWithoutEscSequences func RuneCount(str string) int { - count, isEscSeq := 0, false - for _, c := range str { - if c == EscapeStartRune { - isEscSeq = true - } else if isEscSeq { - if c == EscapeStopRune { - isEscSeq = false - } - } else { - count += RuneWidth(c) - } - } - return count + return StringWidthWithoutEscSequences(str) } // RuneWidth returns the mostly accurate character-width of the rune. This is -// not 100% accurate as the character width is usually dependant on the +// not 100% accurate as the character width is usually dependent on the // typeface (font) used in the console/terminal. For ex.: -// RuneWidth('A') == 1 -// RuneWidth('ツ') == 2 -// RuneWidth('⊙') == 1 -// RuneWidth('︿') == 2 -// RuneWidth(0x27) == 0 +// +// RuneWidth('A') == 1 +// RuneWidth('ツ') == 2 +// RuneWidth('⊙') == 1 +// RuneWidth('︿') == 2 +// RuneWidth(0x27) == 0 func RuneWidth(r rune) int { - return runewidth.RuneWidth(r) + return rwCondition.RuneWidth(r) +} + +// RuneWidthWithoutEscSequences is similar to RuneWidth, except for the fact +// that it ignores escape sequences while counting. For ex.: +// +// RuneWidthWithoutEscSequences("") == 0 +// RuneWidthWithoutEscSequences("Ghost") == 5 +// RuneWidthWithoutEscSequences("\x1b[33mGhost\x1b[0m") == 5 +// RuneWidthWithoutEscSequences("\x1b[33mGhost\x1b[0") == 5 +// +// deprecated: use StringWidthWithoutEscSequences instead +func RuneWidthWithoutEscSequences(str string) int { + return StringWidthWithoutEscSequences(str) } // Snip returns the given string with a fixed length. For ex.: -// Snip("Ghost", 0, "~") == "Ghost" -// Snip("Ghost", 1, "~") == "~" -// Snip("Ghost", 3, "~") == "Gh~" -// Snip("Ghost", 5, "~") == "Ghost" -// Snip("Ghost", 7, "~") == "Ghost " -// Snip("\x1b[33mGhost\x1b[0m", 7, "~") == "\x1b[33mGhost\x1b[0m " +// +// Snip("Ghost", 0, "~") == "Ghost" +// Snip("Ghost", 1, "~") == "~" +// Snip("Ghost", 3, "~") == "Gh~" +// Snip("Ghost", 5, "~") == "Ghost" +// Snip("Ghost", 7, "~") == "Ghost " +// Snip("\x1b[33mGhost\x1b[0m", 7, "~") == "\x1b[33mGhost\x1b[0m " func Snip(str string, length int, snipIndicator string) string { if length > 0 { - lenStr := RuneCount(str) + lenStr := StringWidthWithoutEscSequences(str) if lenStr > length { - lenStrFinal := length - RuneCount(snipIndicator) + lenStrFinal := length - StringWidthWithoutEscSequences(snipIndicator) return Trim(str, lenStrFinal) + snipIndicator } } return str } +// StringWidth is similar to RuneWidth, except it works on a string. For +// ex.: +// +// StringWidth("Ghost 生命"): 10 +// StringWidth("\x1b[33mGhost 生命\x1b[0m"): 19 +func StringWidth(str string) int { + return rwCondition.StringWidth(str) +} + +// StringWidthWithoutEscSequences is similar to RuneWidth, except for the fact +// that it ignores escape sequences while counting. For ex.: +// +// StringWidthWithoutEscSequences("") == 0 +// StringWidthWithoutEscSequences("Ghost") == 5 +// StringWidthWithoutEscSequences("\x1b[33mGhost\x1b[0m") == 5 +// StringWidthWithoutEscSequences("\x1b[33mGhost\x1b[0") == 5 +// StringWidthWithoutEscSequences("Ghost 生命"): 10 +// StringWidthWithoutEscSequences("\x1b[33mGhost 生命\x1b[0m"): 10 +func StringWidthWithoutEscSequences(str string) int { + count, esp := 0, escSeqParser{} + for _, c := range str { + if esp.InSequence() { + esp.Consume(c) + continue + } + esp.Consume(c) + if !esp.InSequence() { + count += RuneWidth(c) + } + } + return count +} + // Trim trims a string to the given length while ignoring escape sequences. For // ex.: -// Trim("Ghost", 3) == "Gho" -// Trim("Ghost", 6) == "Ghost" -// Trim("\x1b[33mGhost\x1b[0m", 3) == "\x1b[33mGho\x1b[0m" -// Trim("\x1b[33mGhost\x1b[0m", 6) == "\x1b[33mGhost\x1b[0m" +// +// Trim("Ghost", 3) == "Gho" +// Trim("Ghost", 6) == "Ghost" +// Trim("\x1b[33mGhost\x1b[0m", 3) == "\x1b[33mGho\x1b[0m" +// Trim("\x1b[33mGhost\x1b[0m", 6) == "\x1b[33mGhost\x1b[0m" func Trim(str string, maxLen int) string { if maxLen <= 0 { return "" @@ -178,27 +277,48 @@ func Trim(str string, maxLen int) string { var out strings.Builder out.Grow(maxLen) - outLen, isEscSeq, lastEscSeq := 0, false, strings.Builder{} + outLen, esp := 0, escSeqParser{} for _, sChr := range str { - out.WriteRune(sChr) - if sChr == EscapeStartRune { - isEscSeq = true - lastEscSeq.Reset() - lastEscSeq.WriteRune(sChr) - } else if isEscSeq { - lastEscSeq.WriteRune(sChr) - if sChr == EscapeStopRune { - isEscSeq = false - } - } else { + if esp.InSequence() { + esp.Consume(sChr) + out.WriteRune(sChr) + continue + } + esp.Consume(sChr) + if esp.InSequence() { + out.WriteRune(sChr) + continue + } + if outLen < maxLen { outLen++ - if outLen == maxLen { - break - } + out.WriteRune(sChr) + continue } } - if lastEscSeq.Len() > 0 && lastEscSeq.String() != EscapeReset { - out.WriteString(EscapeReset) - } return out.String() } + +// Widen is like width.Widen.String() but ignores escape sequences. For ex: +// +// Widen("Ghost 生命"): "Ghost\u3000生命" +// Widen("\x1b[33mGhost 生命\x1b[0m"): "\x1b[33mGhost\u3000生命\x1b[0m" +func Widen(str string) string { + sb := strings.Builder{} + sb.Grow(len(str)) + + esp := escSeqParser{} + for _, c := range str { + if esp.InSequence() { + sb.WriteRune(c) + esp.Consume(c) + continue + } + esp.Consume(c) + if !esp.InSequence() { + sb.WriteString(width.Widen.String(string(c))) + } else { + sb.WriteRune(c) + } + } + return sb.String() +} diff --git a/vendor/github.com/jedib0t/go-pretty/v6/text/transformer.go b/vendor/github.com/jedib0t/go-pretty/v6/text/transformer.go index d01c0eba0b2..193a721cce2 100644 --- a/vendor/github.com/jedib0t/go-pretty/v6/text/transformer.go +++ b/vendor/github.com/jedib0t/go-pretty/v6/text/transformer.go @@ -37,76 +37,97 @@ var ( type Transformer func(val interface{}) string // NewNumberTransformer returns a number Transformer that: -// * transforms the number as directed by 'format' (ex.: %.2f) -// * colors negative values Red -// * colors positive values Green +// - transforms the number as directed by 'format' (ex.: %.2f) +// - colors negative values Red +// - colors positive values Green func NewNumberTransformer(format string) Transformer { return func(val interface{}) string { - if number, ok := val.(int); ok { - return transformInt(format, int64(number)) + if valStr := transformInt(format, val); valStr != "" { + return valStr } - if number, ok := val.(int8); ok { - return transformInt(format, int64(number)) + if valStr := transformUint(format, val); valStr != "" { + return valStr } - if number, ok := val.(int16); ok { - return transformInt(format, int64(number)) - } - if number, ok := val.(int32); ok { - return transformInt(format, int64(number)) - } - if number, ok := val.(int64); ok { - return transformInt(format, int64(number)) - } - if number, ok := val.(uint); ok { - return transformUint(format, uint64(number)) - } - if number, ok := val.(uint8); ok { - return transformUint(format, uint64(number)) - } - if number, ok := val.(uint16); ok { - return transformUint(format, uint64(number)) - } - if number, ok := val.(uint32); ok { - return transformUint(format, uint64(number)) - } - if number, ok := val.(uint64); ok { - return transformUint(format, uint64(number)) - } - if number, ok := val.(float32); ok { - return transformFloat(format, float64(number)) - } - if number, ok := val.(float64); ok { - return transformFloat(format, float64(number)) + if valStr := transformFloat(format, val); valStr != "" { + return valStr } return fmt.Sprint(val) } } -func transformInt(format string, val int64) string { - if val < 0 { - return colorsNumberNegative.Sprintf("-"+format, -val) +func transformInt(format string, val interface{}) string { + transform := func(val int64) string { + if val < 0 { + return colorsNumberNegative.Sprintf("-"+format, -val) + } + if val > 0 { + return colorsNumberPositive.Sprintf(format, val) + } + return colorsNumberZero.Sprintf(format, val) } - if val > 0 { - return colorsNumberPositive.Sprintf(format, val) + + if number, ok := val.(int); ok { + return transform(int64(number)) + } + if number, ok := val.(int8); ok { + return transform(int64(number)) + } + if number, ok := val.(int16); ok { + return transform(int64(number)) + } + if number, ok := val.(int32); ok { + return transform(int64(number)) + } + if number, ok := val.(int64); ok { + return transform(number) } - return colorsNumberZero.Sprintf(format, val) + return "" } -func transformUint(format string, val uint64) string { - if val > 0 { - return colorsNumberPositive.Sprintf(format, val) +func transformUint(format string, val interface{}) string { + transform := func(val uint64) string { + if val > 0 { + return colorsNumberPositive.Sprintf(format, val) + } + return colorsNumberZero.Sprintf(format, val) + } + + if number, ok := val.(uint); ok { + return transform(uint64(number)) + } + if number, ok := val.(uint8); ok { + return transform(uint64(number)) + } + if number, ok := val.(uint16); ok { + return transform(uint64(number)) } - return colorsNumberZero.Sprintf(format, val) + if number, ok := val.(uint32); ok { + return transform(uint64(number)) + } + if number, ok := val.(uint64); ok { + return transform(number) + } + return "" } -func transformFloat(format string, val float64) string { - if val < 0 { - return colorsNumberNegative.Sprintf("-"+format, -val) +func transformFloat(format string, val interface{}) string { + transform := func(val float64) string { + if val < 0 { + return colorsNumberNegative.Sprintf("-"+format, -val) + } + if val > 0 { + return colorsNumberPositive.Sprintf(format, val) + } + return colorsNumberZero.Sprintf(format, val) + } + + if number, ok := val.(float32); ok { + return transform(float64(number)) } - if val > 0 { - return colorsNumberPositive.Sprintf(format, val) + if number, ok := val.(float64); ok { + return transform(number) } - return colorsNumberZero.Sprintf(format, val) + return "" } // NewJSONTransformer returns a Transformer that can format a JSON string or an @@ -116,7 +137,7 @@ func NewJSONTransformer(prefix string, indent string) Transformer { if valStr, ok := val.(string); ok { var b bytes.Buffer if err := json.Indent(&b, []byte(strings.TrimSpace(valStr)), prefix, indent); err == nil { - return string(b.Bytes()) + return b.String() } } else if b, err := json.MarshalIndent(val, prefix, indent); err == nil { return string(b) @@ -133,26 +154,15 @@ func NewJSONTransformer(prefix string, indent string) Transformer { // location (use time.Local to get localized timestamps). func NewTimeTransformer(layout string, location *time.Location) Transformer { return func(val interface{}) string { - formatTime := func(t time.Time) string { - rsp := "" - if t.Unix() > 0 { - if location != nil { - t = t.In(location) - } - rsp = t.Format(layout) - } - return rsp - } - rsp := fmt.Sprint(val) if valTime, ok := val.(time.Time); ok { - rsp = formatTime(valTime) + rsp = formatTime(valTime, layout, location) } else { // cycle through some supported layouts to see if the string form // of the object matches any of these layouts for _, possibleTimeLayout := range possibleTimeLayouts { if valTime, err := time.Parse(possibleTimeLayout, rsp); err == nil { - rsp = formatTime(valTime) + rsp = formatTime(valTime, layout, location) break } } @@ -168,24 +178,14 @@ func NewTimeTransformer(layout string, location *time.Location) Transformer { // If a non-nil location value is provided, the time will be localized to that // location (use time.Local to get localized timestamps). func NewUnixTimeTransformer(layout string, location *time.Location) Transformer { - timeTransformer := NewTimeTransformer(layout, location) - formatUnixTime := func(unixTime int64) string { - if unixTime >= unixTimeMinNanoSeconds { - unixTime = unixTime / time.Second.Nanoseconds() - } else if unixTime >= unixTimeMinMicroseconds { - unixTime = unixTime / (time.Second.Nanoseconds() / 1000) - } else if unixTime >= unixTimeMinMilliseconds { - unixTime = unixTime / (time.Second.Nanoseconds() / 1000000) - } - return timeTransformer(time.Unix(unixTime, 0)) - } + transformer := NewTimeTransformer(layout, location) return func(val interface{}) string { if unixTime, ok := val.(int64); ok { - return formatUnixTime(unixTime) + return formatTimeUnix(unixTime, transformer) } else if unixTimeStr, ok := val.(string); ok { if unixTime, err := strconv.ParseInt(unixTimeStr, 10, 64); err == nil { - return formatUnixTime(unixTime) + return formatTimeUnix(unixTime, transformer) } } return fmt.Sprint(val) @@ -193,9 +193,36 @@ func NewUnixTimeTransformer(layout string, location *time.Location) Transformer } // NewURLTransformer returns a Transformer that can format and pretty print a string -// that contains an URL (the text is underlined and colored Blue). -func NewURLTransformer() Transformer { +// that contains a URL (the text is underlined and colored Blue). +func NewURLTransformer(colors ...Color) Transformer { + colorsToUse := colorsURL + if len(colors) > 0 { + colorsToUse = colors + } + return func(val interface{}) string { - return colorsURL.Sprint(val) + return colorsToUse.Sprint(val) + } +} + +func formatTime(t time.Time, layout string, location *time.Location) string { + rsp := "" + if t.Unix() > 0 { + if location != nil { + t = t.In(location) + } + rsp = t.Format(layout) + } + return rsp +} + +func formatTimeUnix(unixTime int64, timeTransformer Transformer) string { + if unixTime >= unixTimeMinNanoSeconds { + unixTime = unixTime / time.Second.Nanoseconds() + } else if unixTime >= unixTimeMinMicroseconds { + unixTime = unixTime / (time.Second.Nanoseconds() / 1000) + } else if unixTime >= unixTimeMinMilliseconds { + unixTime = unixTime / (time.Second.Nanoseconds() / 1000000) } + return timeTransformer(time.Unix(unixTime, 0)) } diff --git a/vendor/github.com/jedib0t/go-pretty/v6/text/valign.go b/vendor/github.com/jedib0t/go-pretty/v6/text/valign.go index 76b1943ce36..f1a75e96d5d 100644 --- a/vendor/github.com/jedib0t/go-pretty/v6/text/valign.go +++ b/vendor/github.com/jedib0t/go-pretty/v6/text/valign.go @@ -14,12 +14,12 @@ const ( ) // Apply aligns the lines vertically. For ex.: -// * VAlignTop.Apply({"Game", "Of", "Thrones"}, 5) -// returns {"Game", "Of", "Thrones", "", ""} -// * VAlignMiddle.Apply({"Game", "Of", "Thrones"}, 5) -// returns {"", "Game", "Of", "Thrones", ""} -// * VAlignBottom.Apply({"Game", "Of", "Thrones"}, 5) -// returns {"", "", "Game", "Of", "Thrones"} +// - VAlignTop.Apply({"Game", "Of", "Thrones"}, 5) +// returns {"Game", "Of", "Thrones", "", ""} +// - VAlignMiddle.Apply({"Game", "Of", "Thrones"}, 5) +// returns {"", "Game", "Of", "Thrones", ""} +// - VAlignBottom.Apply({"Game", "Of", "Thrones"}, 5) +// returns {"", "", "Game", "Of", "Thrones"} func (va VAlign) Apply(lines []string, maxLines int) []string { if len(lines) == maxLines { return lines @@ -42,12 +42,12 @@ func (va VAlign) Apply(lines []string, maxLines int) []string { } // ApplyStr aligns the string (of 1 or more lines) vertically. For ex.: -// * VAlignTop.ApplyStr("Game\nOf\nThrones", 5) -// returns {"Game", "Of", "Thrones", "", ""} -// * VAlignMiddle.ApplyStr("Game\nOf\nThrones", 5) -// returns {"", "Game", "Of", "Thrones", ""} -// * VAlignBottom.ApplyStr("Game\nOf\nThrones", 5) -// returns {"", "", "Game", "Of", "Thrones"} +// - VAlignTop.ApplyStr("Game\nOf\nThrones", 5) +// returns {"Game", "Of", "Thrones", "", ""} +// - VAlignMiddle.ApplyStr("Game\nOf\nThrones", 5) +// returns {"", "Game", "Of", "Thrones", ""} +// - VAlignBottom.ApplyStr("Game\nOf\nThrones", 5) +// returns {"", "", "Game", "Of", "Thrones"} func (va VAlign) ApplyStr(text string, maxLines int) []string { return va.Apply(strings.Split(text, "\n"), maxLines) } diff --git a/vendor/github.com/jedib0t/go-pretty/v6/text/wrap.go b/vendor/github.com/jedib0t/go-pretty/v6/text/wrap.go index 1cf61e2012f..8ad848550b3 100644 --- a/vendor/github.com/jedib0t/go-pretty/v6/text/wrap.go +++ b/vendor/github.com/jedib0t/go-pretty/v6/text/wrap.go @@ -2,7 +2,6 @@ package text import ( "strings" - "unicode/utf8" ) // WrapHard wraps a string to the given length using a newline. Handles strings @@ -15,7 +14,7 @@ func WrapHard(str string, wrapLen int) string { return "" } str = strings.Replace(str, "\t", " ", -1) - sLen := utf8.RuneCountInString(str) + sLen := StringWidthWithoutEscSequences(str) if sLen <= wrapLen { return str } @@ -43,7 +42,7 @@ func WrapSoft(str string, wrapLen int) string { return "" } str = strings.Replace(str, "\t", " ", -1) - sLen := utf8.RuneCountInString(str) + sLen := StringWidthWithoutEscSequences(str) if sLen <= wrapLen { return str } @@ -69,32 +68,21 @@ func WrapText(str string, wrapLen int) string { if wrapLen <= 0 { return "" } + str = strings.Replace(str, "\t", " ", -1) + sLen := StringWidthWithoutEscSequences(str) + if sLen <= wrapLen { + return str + } - var out strings.Builder - sLen := utf8.RuneCountInString(str) + out := &strings.Builder{} out.Grow(sLen + (sLen / wrapLen)) - lineIdx, isEscSeq, lastEscSeq := 0, false, "" - for _, char := range str { - if char == EscapeStartRune { - isEscSeq = true - lastEscSeq = "" - } - if isEscSeq { - lastEscSeq += string(char) - } - - appendChar(char, wrapLen, &lineIdx, isEscSeq, lastEscSeq, &out) - - if isEscSeq && char == EscapeStopRune { - isEscSeq = false - } - if lastEscSeq == EscapeReset { - lastEscSeq = "" + for idx, line := range strings.Split(str, "\n") { + if idx > 0 { + out.WriteString("\n") } + wrapHard(line, wrapLen, out) } - if lastEscSeq != "" && lastEscSeq != EscapeReset { - out.WriteString(EscapeReset) - } + return out.String() } @@ -122,7 +110,7 @@ func appendChar(char rune, wrapLen int, lineLen *int, inEscSeq bool, lastSeenEsc // increment the line index if not in the middle of an escape sequence if !inEscSeq { - *lineLen++ + *lineLen += RuneWidth(char) } } } @@ -149,26 +137,6 @@ func appendWord(word string, lineIdx *int, lastSeenEscSeq string, wrapLen int, o } } -func extractOpenEscapeSeq(str string) string { - escapeSeq, inEscSeq := "", false - for _, char := range str { - if char == EscapeStartRune { - inEscSeq = true - escapeSeq = "" - } - if inEscSeq { - escapeSeq += string(char) - } - if char == EscapeStopRune { - inEscSeq = false - } - } - if escapeSeq == EscapeReset { - escapeSeq = "" - } - return escapeSeq -} - func terminateLine(wrapLen int, lineLen *int, lastSeenEscSeq string, out *strings.Builder) { if *lineLen < wrapLen { out.WriteString(strings.Repeat(" ", wrapLen-*lineLen)) @@ -189,19 +157,19 @@ func terminateOutput(lastSeenEscSeq string, out *strings.Builder) { } func wrapHard(paragraph string, wrapLen int, out *strings.Builder) { + esp := escSeqParser{} lineLen, lastSeenEscSeq := 0, "" words := strings.Fields(paragraph) for wordIdx, word := range words { - escSeq := extractOpenEscapeSeq(word) - if escSeq != "" { - lastSeenEscSeq = escSeq + if openEscSeq := esp.ParseString(word); openEscSeq != "" { + lastSeenEscSeq = openEscSeq } if lineLen > 0 { out.WriteRune(' ') lineLen++ } - wordLen := RuneCount(word) + wordLen := StringWidthWithoutEscSequences(word) if lineLen+wordLen <= wrapLen { // word fits within the line out.WriteString(word) lineLen += wordLen @@ -218,33 +186,22 @@ func wrapHard(paragraph string, wrapLen int, out *strings.Builder) { } func wrapSoft(paragraph string, wrapLen int, out *strings.Builder) { + esp := escSeqParser{} lineLen, lastSeenEscSeq := 0, "" words := strings.Fields(paragraph) for wordIdx, word := range words { - escSeq := extractOpenEscapeSeq(word) - if escSeq != "" { - lastSeenEscSeq = escSeq - } - spacing, spacingLen := "", 0 - if lineLen > 0 { - spacing, spacingLen = " ", 1 + if openEscSeq := esp.ParseString(word); openEscSeq != "" { + lastSeenEscSeq = openEscSeq } - wordLen := RuneCount(word) + spacing, spacingLen := wrapSoftSpacing(lineLen) + wordLen := StringWidthWithoutEscSequences(word) if lineLen+spacingLen+wordLen <= wrapLen { // word fits within the line out.WriteString(spacing) out.WriteString(word) lineLen += spacingLen + wordLen } else { // word doesn't fit within the line - if lineLen > 0 { // something is already on the line; terminate it - terminateLine(wrapLen, &lineLen, lastSeenEscSeq, out) - } - if wordLen <= wrapLen { // word fits within a single line - out.WriteString(word) - lineLen = wordLen - } else { // word doesn't fit within a single line; hard-wrap - appendWord(word, &lineLen, lastSeenEscSeq, wrapLen, out) - } + lineLen = wrapSoftLastWordInLine(wrapLen, lineLen, lastSeenEscSeq, wordLen, word, out) } // end of line; but more words incoming @@ -254,3 +211,24 @@ func wrapSoft(paragraph string, wrapLen int, out *strings.Builder) { } terminateOutput(lastSeenEscSeq, out) } + +func wrapSoftLastWordInLine(wrapLen int, lineLen int, lastSeenEscSeq string, wordLen int, word string, out *strings.Builder) int { + if lineLen > 0 { // something is already on the line; terminate it + terminateLine(wrapLen, &lineLen, lastSeenEscSeq, out) + } + if wordLen <= wrapLen { // word fits within a single line + out.WriteString(word) + lineLen = wordLen + } else { // word doesn't fit within a single line; hard-wrap + appendWord(word, &lineLen, lastSeenEscSeq, wrapLen, out) + } + return lineLen +} + +func wrapSoftSpacing(lineLen int) (string, int) { + spacing, spacingLen := "", 0 + if lineLen > 0 { + spacing, spacingLen = " ", 1 + } + return spacing, spacingLen +} diff --git a/vendor/github.com/klauspost/cpuid/v2/README.md b/vendor/github.com/klauspost/cpuid/v2/README.md index 21508edbdb3..f06ba51c56b 100644 --- a/vendor/github.com/klauspost/cpuid/v2/README.md +++ b/vendor/github.com/klauspost/cpuid/v2/README.md @@ -281,6 +281,7 @@ Exit Code 1 | AMXBF16 | Tile computational operations on BFLOAT16 numbers | | AMXINT8 | Tile computational operations on 8-bit integers | | AMXFP16 | Tile computational operations on FP16 numbers | +| AMXFP8 | Tile computational operations on FP8 numbers | | AMXTILE | Tile architecture | | APX_F | Intel APX | | AVX | AVX functions | diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid.go b/vendor/github.com/klauspost/cpuid/v2/cpuid.go index 53bc18ca719..db99eb62f7b 100644 --- a/vendor/github.com/klauspost/cpuid/v2/cpuid.go +++ b/vendor/github.com/klauspost/cpuid/v2/cpuid.go @@ -55,6 +55,12 @@ const ( Qualcomm Marvell + QEMU + QNX + ACRN + SRE + Apple + lastVendor ) @@ -75,6 +81,7 @@ const ( AMXBF16 // Tile computational operations on BFLOAT16 numbers AMXFP16 // Tile computational operations on FP16 numbers AMXINT8 // Tile computational operations on 8-bit integers + AMXFP8 // Tile computational operations on FP8 numbers AMXTILE // Tile architecture APX_F // Intel APX AVX // AVX functions @@ -296,20 +303,22 @@ const ( // CPUInfo contains information about the detected system CPU. type CPUInfo struct { - BrandName string // Brand name reported by the CPU - VendorID Vendor // Comparable CPU vendor ID - VendorString string // Raw vendor string. - featureSet flagSet // Features of the CPU - PhysicalCores int // Number of physical processor cores in your CPU. Will be 0 if undetectable. - ThreadsPerCore int // Number of threads per physical core. Will be 1 if undetectable. - LogicalCores int // Number of physical cores times threads that can run on each core through the use of hyperthreading. Will be 0 if undetectable. - Family int // CPU family number - Model int // CPU model number - Stepping int // CPU stepping info - CacheLine int // Cache line size in bytes. Will be 0 if undetectable. - Hz int64 // Clock speed, if known, 0 otherwise. Will attempt to contain base clock speed. - BoostFreq int64 // Max clock speed, if known, 0 otherwise - Cache struct { + BrandName string // Brand name reported by the CPU + VendorID Vendor // Comparable CPU vendor ID + VendorString string // Raw vendor string. + HypervisorVendorID Vendor // Hypervisor vendor + HypervisorVendorString string // Raw hypervisor vendor string + featureSet flagSet // Features of the CPU + PhysicalCores int // Number of physical processor cores in your CPU. Will be 0 if undetectable. + ThreadsPerCore int // Number of threads per physical core. Will be 1 if undetectable. + LogicalCores int // Number of physical cores times threads that can run on each core through the use of hyperthreading. Will be 0 if undetectable. + Family int // CPU family number + Model int // CPU model number + Stepping int // CPU stepping info + CacheLine int // Cache line size in bytes. Will be 0 if undetectable. + Hz int64 // Clock speed, if known, 0 otherwise. Will attempt to contain base clock speed. + BoostFreq int64 // Max clock speed, if known, 0 otherwise + Cache struct { L1I int // L1 Instruction Cache (per core or shared). Will be -1 if undetected L1D int // L1 Data Cache (per core or shared). Will be -1 if undetected L2 int // L2 Cache (per core or shared). Will be -1 if undetected @@ -318,8 +327,9 @@ type CPUInfo struct { SGX SGXSupport AMDMemEncryption AMDMemEncryptionSupport AVX10Level uint8 - maxFunc uint32 - maxExFunc uint32 + + maxFunc uint32 + maxExFunc uint32 } var cpuid func(op uint32) (eax, ebx, ecx, edx uint32) @@ -503,7 +513,7 @@ func (c CPUInfo) FeatureSet() []string { // Uses the RDTSCP instruction. The value 0 is returned // if the CPU does not support the instruction. func (c CPUInfo) RTCounter() uint64 { - if !c.Supports(RDTSCP) { + if !c.Has(RDTSCP) { return 0 } a, _, _, d := rdtscpAsm() @@ -515,13 +525,22 @@ func (c CPUInfo) RTCounter() uint64 { // about the current cpu/core the code is running on. // If the RDTSCP instruction isn't supported on the CPU, the value 0 is returned. func (c CPUInfo) Ia32TscAux() uint32 { - if !c.Supports(RDTSCP) { + if !c.Has(RDTSCP) { return 0 } _, _, ecx, _ := rdtscpAsm() return ecx } +// SveLengths returns arm SVE vector and predicate lengths. +// Will return 0, 0 if SVE is not enabled or otherwise unable to detect. +func (c CPUInfo) SveLengths() (vl, pl uint64) { + if !c.Has(SVE) { + return 0, 0 + } + return getVectorLength() +} + // LogicalCPU will return the Logical CPU the code is currently executing on. // This is likely to change when the OS re-schedules the running thread // to another CPU. @@ -781,11 +800,16 @@ func threadsPerCore() int { _, b, _, _ := cpuidex(0xb, 0) if b&0xffff == 0 { if vend == AMD { - // Workaround for AMD returning 0, assume 2 if >= Zen 2 - // It will be more correct than not. + // if >= Zen 2 0x8000001e EBX 15-8 bits means threads per core. + // The number of threads per core is ThreadsPerCore+1 + // See PPR for AMD Family 17h Models 00h-0Fh (page 82) fam, _, _ := familyModel() _, _, _, d := cpuid(1) if (d&(1<<28)) != 0 && fam >= 23 { + if maxExtendedFunction() >= 0x8000001e { + _, b, _, _ := cpuid(0x8000001e) + return int((b>>8)&0xff) + 1 + } return 2 } } @@ -877,7 +901,9 @@ var vendorMapping = map[string]Vendor{ "GenuineTMx86": Transmeta, "Geode by NSC": NSC, "VIA VIA VIA ": VIA, - "KVMKVMKVMKVM": KVM, + "KVMKVMKVM": KVM, + "Linux KVM Hv": KVM, + "TCGTCGTCGTCG": QEMU, "Microsoft Hv": MSVM, "VMwareVMware": VMware, "XenVMMXenVMM": XenHVM, @@ -887,6 +913,10 @@ var vendorMapping = map[string]Vendor{ "SiS SiS SiS ": SiS, "RiseRiseRise": SiS, "Genuine RDC": RDC, + "QNXQVMBSQG": QNX, + "ACRNACRNACRN": ACRN, + "SRESRESRESRE": SRE, + "Apple VZ": Apple, } func vendorID() (Vendor, string) { @@ -899,6 +929,17 @@ func vendorID() (Vendor, string) { return vend, v } +func hypervisorVendorID() (Vendor, string) { + // https://lwn.net/Articles/301888/ + _, b, c, d := cpuid(0x40000000) + v := string(valAsString(b, c, d)) + vend, ok := vendorMapping[v] + if !ok { + return VendorUnknown, v + } + return vend, v +} + func cacheLine() int { if maxFunctionID() < 0x1 { return 0 @@ -1271,6 +1312,7 @@ func support() flagSet { fs.setIf(ebx&(1<<31) != 0, AVX512VL) // ecx fs.setIf(ecx&(1<<1) != 0, AVX512VBMI) + fs.setIf(ecx&(1<<3) != 0, AMXFP8) fs.setIf(ecx&(1<<6) != 0, AVX512VBMI2) fs.setIf(ecx&(1<<11) != 0, AVX512VNNI) fs.setIf(ecx&(1<<12) != 0, AVX512BITALG) diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid_arm64.s b/vendor/github.com/klauspost/cpuid/v2/cpuid_arm64.s index b31d6aec43f..b196f78eb44 100644 --- a/vendor/github.com/klauspost/cpuid/v2/cpuid_arm64.s +++ b/vendor/github.com/klauspost/cpuid/v2/cpuid_arm64.s @@ -24,3 +24,13 @@ TEXT ·getInstAttributes(SB), 7, $0 MOVD R1, instAttrReg1+8(FP) RET +TEXT ·getVectorLength(SB), 7, $0 + WORD $0xd2800002 // mov x2, #0 + WORD $0x04225022 // addvl x2, x2, #1 + WORD $0xd37df042 // lsl x2, x2, #3 + WORD $0xd2800003 // mov x3, #0 + WORD $0x04635023 // addpl x3, x3, #1 + WORD $0xd37df063 // lsl x3, x3, #3 + MOVD R2, vl+0(FP) + MOVD R3, pl+8(FP) + RET diff --git a/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go b/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go index 9a53504a042..566743d2204 100644 --- a/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go +++ b/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go @@ -10,6 +10,7 @@ import "runtime" func getMidr() (midr uint64) func getProcFeatures() (procFeatures uint64) func getInstAttributes() (instAttrReg0, instAttrReg1 uint64) +func getVectorLength() (vl, pl uint64) func initCPU() { cpuid = func(uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 } @@ -24,7 +25,7 @@ func addInfo(c *CPUInfo, safe bool) { detectOS(c) // ARM64 disabled since it may crash if interrupt is not intercepted by OS. - if safe && !c.Supports(ARMCPUID) && runtime.GOOS != "freebsd" { + if safe && !c.Has(ARMCPUID) && runtime.GOOS != "freebsd" { return } midr := getMidr() diff --git a/vendor/github.com/klauspost/cpuid/v2/detect_ref.go b/vendor/github.com/klauspost/cpuid/v2/detect_ref.go index 9636c2bc17c..574f9389c07 100644 --- a/vendor/github.com/klauspost/cpuid/v2/detect_ref.go +++ b/vendor/github.com/klauspost/cpuid/v2/detect_ref.go @@ -10,6 +10,8 @@ func initCPU() { cpuidex = func(x, y uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 } xgetbv = func(uint32) (a, b uint32) { return 0, 0 } rdtscpAsm = func() (a, b, c, d uint32) { return 0, 0, 0, 0 } + } func addInfo(info *CPUInfo, safe bool) {} +func getVectorLength() (vl, pl uint64) { return 0, 0 } diff --git a/vendor/github.com/klauspost/cpuid/v2/detect_x86.go b/vendor/github.com/klauspost/cpuid/v2/detect_x86.go index 799b400c2ec..f924c9d8399 100644 --- a/vendor/github.com/klauspost/cpuid/v2/detect_x86.go +++ b/vendor/github.com/klauspost/cpuid/v2/detect_x86.go @@ -32,7 +32,10 @@ func addInfo(c *CPUInfo, safe bool) { c.LogicalCores = logicalCores() c.PhysicalCores = physicalCores() c.VendorID, c.VendorString = vendorID() + c.HypervisorVendorID, c.HypervisorVendorString = hypervisorVendorID() c.AVX10Level = c.supportAVX10() c.cacheSize() c.frequencies() } + +func getVectorLength() (vl, pl uint64) { return 0, 0 } diff --git a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go index 3a256031039..e7f874a7e8d 100644 --- a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go +++ b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go @@ -15,224 +15,225 @@ func _() { _ = x[AMXBF16-5] _ = x[AMXFP16-6] _ = x[AMXINT8-7] - _ = x[AMXTILE-8] - _ = x[APX_F-9] - _ = x[AVX-10] - _ = x[AVX10-11] - _ = x[AVX10_128-12] - _ = x[AVX10_256-13] - _ = x[AVX10_512-14] - _ = x[AVX2-15] - _ = x[AVX512BF16-16] - _ = x[AVX512BITALG-17] - _ = x[AVX512BW-18] - _ = x[AVX512CD-19] - _ = x[AVX512DQ-20] - _ = x[AVX512ER-21] - _ = x[AVX512F-22] - _ = x[AVX512FP16-23] - _ = x[AVX512IFMA-24] - _ = x[AVX512PF-25] - _ = x[AVX512VBMI-26] - _ = x[AVX512VBMI2-27] - _ = x[AVX512VL-28] - _ = x[AVX512VNNI-29] - _ = x[AVX512VP2INTERSECT-30] - _ = x[AVX512VPOPCNTDQ-31] - _ = x[AVXIFMA-32] - _ = x[AVXNECONVERT-33] - _ = x[AVXSLOW-34] - _ = x[AVXVNNI-35] - _ = x[AVXVNNIINT8-36] - _ = x[AVXVNNIINT16-37] - _ = x[BHI_CTRL-38] - _ = x[BMI1-39] - _ = x[BMI2-40] - _ = x[CETIBT-41] - _ = x[CETSS-42] - _ = x[CLDEMOTE-43] - _ = x[CLMUL-44] - _ = x[CLZERO-45] - _ = x[CMOV-46] - _ = x[CMPCCXADD-47] - _ = x[CMPSB_SCADBS_SHORT-48] - _ = x[CMPXCHG8-49] - _ = x[CPBOOST-50] - _ = x[CPPC-51] - _ = x[CX16-52] - _ = x[EFER_LMSLE_UNS-53] - _ = x[ENQCMD-54] - _ = x[ERMS-55] - _ = x[F16C-56] - _ = x[FLUSH_L1D-57] - _ = x[FMA3-58] - _ = x[FMA4-59] - _ = x[FP128-60] - _ = x[FP256-61] - _ = x[FSRM-62] - _ = x[FXSR-63] - _ = x[FXSROPT-64] - _ = x[GFNI-65] - _ = x[HLE-66] - _ = x[HRESET-67] - _ = x[HTT-68] - _ = x[HWA-69] - _ = x[HYBRID_CPU-70] - _ = x[HYPERVISOR-71] - _ = x[IA32_ARCH_CAP-72] - _ = x[IA32_CORE_CAP-73] - _ = x[IBPB-74] - _ = x[IBPB_BRTYPE-75] - _ = x[IBRS-76] - _ = x[IBRS_PREFERRED-77] - _ = x[IBRS_PROVIDES_SMP-78] - _ = x[IBS-79] - _ = x[IBSBRNTRGT-80] - _ = x[IBSFETCHSAM-81] - _ = x[IBSFFV-82] - _ = x[IBSOPCNT-83] - _ = x[IBSOPCNTEXT-84] - _ = x[IBSOPSAM-85] - _ = x[IBSRDWROPCNT-86] - _ = x[IBSRIPINVALIDCHK-87] - _ = x[IBS_FETCH_CTLX-88] - _ = x[IBS_OPDATA4-89] - _ = x[IBS_OPFUSE-90] - _ = x[IBS_PREVENTHOST-91] - _ = x[IBS_ZEN4-92] - _ = x[IDPRED_CTRL-93] - _ = x[INT_WBINVD-94] - _ = x[INVLPGB-95] - _ = x[KEYLOCKER-96] - _ = x[KEYLOCKERW-97] - _ = x[LAHF-98] - _ = x[LAM-99] - _ = x[LBRVIRT-100] - _ = x[LZCNT-101] - _ = x[MCAOVERFLOW-102] - _ = x[MCDT_NO-103] - _ = x[MCOMMIT-104] - _ = x[MD_CLEAR-105] - _ = x[MMX-106] - _ = x[MMXEXT-107] - _ = x[MOVBE-108] - _ = x[MOVDIR64B-109] - _ = x[MOVDIRI-110] - _ = x[MOVSB_ZL-111] - _ = x[MOVU-112] - _ = x[MPX-113] - _ = x[MSRIRC-114] - _ = x[MSRLIST-115] - _ = x[MSR_PAGEFLUSH-116] - _ = x[NRIPS-117] - _ = x[NX-118] - _ = x[OSXSAVE-119] - _ = x[PCONFIG-120] - _ = x[POPCNT-121] - _ = x[PPIN-122] - _ = x[PREFETCHI-123] - _ = x[PSFD-124] - _ = x[RDPRU-125] - _ = x[RDRAND-126] - _ = x[RDSEED-127] - _ = x[RDTSCP-128] - _ = x[RRSBA_CTRL-129] - _ = x[RTM-130] - _ = x[RTM_ALWAYS_ABORT-131] - _ = x[SBPB-132] - _ = x[SERIALIZE-133] - _ = x[SEV-134] - _ = x[SEV_64BIT-135] - _ = x[SEV_ALTERNATIVE-136] - _ = x[SEV_DEBUGSWAP-137] - _ = x[SEV_ES-138] - _ = x[SEV_RESTRICTED-139] - _ = x[SEV_SNP-140] - _ = x[SGX-141] - _ = x[SGXLC-142] - _ = x[SHA-143] - _ = x[SME-144] - _ = x[SME_COHERENT-145] - _ = x[SPEC_CTRL_SSBD-146] - _ = x[SRBDS_CTRL-147] - _ = x[SRSO_MSR_FIX-148] - _ = x[SRSO_NO-149] - _ = x[SRSO_USER_KERNEL_NO-150] - _ = x[SSE-151] - _ = x[SSE2-152] - _ = x[SSE3-153] - _ = x[SSE4-154] - _ = x[SSE42-155] - _ = x[SSE4A-156] - _ = x[SSSE3-157] - _ = x[STIBP-158] - _ = x[STIBP_ALWAYSON-159] - _ = x[STOSB_SHORT-160] - _ = x[SUCCOR-161] - _ = x[SVM-162] - _ = x[SVMDA-163] - _ = x[SVMFBASID-164] - _ = x[SVML-165] - _ = x[SVMNP-166] - _ = x[SVMPF-167] - _ = x[SVMPFT-168] - _ = x[SYSCALL-169] - _ = x[SYSEE-170] - _ = x[TBM-171] - _ = x[TDX_GUEST-172] - _ = x[TLB_FLUSH_NESTED-173] - _ = x[TME-174] - _ = x[TOPEXT-175] - _ = x[TSCRATEMSR-176] - _ = x[TSXLDTRK-177] - _ = x[VAES-178] - _ = x[VMCBCLEAN-179] - _ = x[VMPL-180] - _ = x[VMSA_REGPROT-181] - _ = x[VMX-182] - _ = x[VPCLMULQDQ-183] - _ = x[VTE-184] - _ = x[WAITPKG-185] - _ = x[WBNOINVD-186] - _ = x[WRMSRNS-187] - _ = x[X87-188] - _ = x[XGETBV1-189] - _ = x[XOP-190] - _ = x[XSAVE-191] - _ = x[XSAVEC-192] - _ = x[XSAVEOPT-193] - _ = x[XSAVES-194] - _ = x[AESARM-195] - _ = x[ARMCPUID-196] - _ = x[ASIMD-197] - _ = x[ASIMDDP-198] - _ = x[ASIMDHP-199] - _ = x[ASIMDRDM-200] - _ = x[ATOMICS-201] - _ = x[CRC32-202] - _ = x[DCPOP-203] - _ = x[EVTSTRM-204] - _ = x[FCMA-205] - _ = x[FP-206] - _ = x[FPHP-207] - _ = x[GPA-208] - _ = x[JSCVT-209] - _ = x[LRCPC-210] - _ = x[PMULL-211] - _ = x[SHA1-212] - _ = x[SHA2-213] - _ = x[SHA3-214] - _ = x[SHA512-215] - _ = x[SM3-216] - _ = x[SM4-217] - _ = x[SVE-218] - _ = x[lastID-219] + _ = x[AMXFP8-8] + _ = x[AMXTILE-9] + _ = x[APX_F-10] + _ = x[AVX-11] + _ = x[AVX10-12] + _ = x[AVX10_128-13] + _ = x[AVX10_256-14] + _ = x[AVX10_512-15] + _ = x[AVX2-16] + _ = x[AVX512BF16-17] + _ = x[AVX512BITALG-18] + _ = x[AVX512BW-19] + _ = x[AVX512CD-20] + _ = x[AVX512DQ-21] + _ = x[AVX512ER-22] + _ = x[AVX512F-23] + _ = x[AVX512FP16-24] + _ = x[AVX512IFMA-25] + _ = x[AVX512PF-26] + _ = x[AVX512VBMI-27] + _ = x[AVX512VBMI2-28] + _ = x[AVX512VL-29] + _ = x[AVX512VNNI-30] + _ = x[AVX512VP2INTERSECT-31] + _ = x[AVX512VPOPCNTDQ-32] + _ = x[AVXIFMA-33] + _ = x[AVXNECONVERT-34] + _ = x[AVXSLOW-35] + _ = x[AVXVNNI-36] + _ = x[AVXVNNIINT8-37] + _ = x[AVXVNNIINT16-38] + _ = x[BHI_CTRL-39] + _ = x[BMI1-40] + _ = x[BMI2-41] + _ = x[CETIBT-42] + _ = x[CETSS-43] + _ = x[CLDEMOTE-44] + _ = x[CLMUL-45] + _ = x[CLZERO-46] + _ = x[CMOV-47] + _ = x[CMPCCXADD-48] + _ = x[CMPSB_SCADBS_SHORT-49] + _ = x[CMPXCHG8-50] + _ = x[CPBOOST-51] + _ = x[CPPC-52] + _ = x[CX16-53] + _ = x[EFER_LMSLE_UNS-54] + _ = x[ENQCMD-55] + _ = x[ERMS-56] + _ = x[F16C-57] + _ = x[FLUSH_L1D-58] + _ = x[FMA3-59] + _ = x[FMA4-60] + _ = x[FP128-61] + _ = x[FP256-62] + _ = x[FSRM-63] + _ = x[FXSR-64] + _ = x[FXSROPT-65] + _ = x[GFNI-66] + _ = x[HLE-67] + _ = x[HRESET-68] + _ = x[HTT-69] + _ = x[HWA-70] + _ = x[HYBRID_CPU-71] + _ = x[HYPERVISOR-72] + _ = x[IA32_ARCH_CAP-73] + _ = x[IA32_CORE_CAP-74] + _ = x[IBPB-75] + _ = x[IBPB_BRTYPE-76] + _ = x[IBRS-77] + _ = x[IBRS_PREFERRED-78] + _ = x[IBRS_PROVIDES_SMP-79] + _ = x[IBS-80] + _ = x[IBSBRNTRGT-81] + _ = x[IBSFETCHSAM-82] + _ = x[IBSFFV-83] + _ = x[IBSOPCNT-84] + _ = x[IBSOPCNTEXT-85] + _ = x[IBSOPSAM-86] + _ = x[IBSRDWROPCNT-87] + _ = x[IBSRIPINVALIDCHK-88] + _ = x[IBS_FETCH_CTLX-89] + _ = x[IBS_OPDATA4-90] + _ = x[IBS_OPFUSE-91] + _ = x[IBS_PREVENTHOST-92] + _ = x[IBS_ZEN4-93] + _ = x[IDPRED_CTRL-94] + _ = x[INT_WBINVD-95] + _ = x[INVLPGB-96] + _ = x[KEYLOCKER-97] + _ = x[KEYLOCKERW-98] + _ = x[LAHF-99] + _ = x[LAM-100] + _ = x[LBRVIRT-101] + _ = x[LZCNT-102] + _ = x[MCAOVERFLOW-103] + _ = x[MCDT_NO-104] + _ = x[MCOMMIT-105] + _ = x[MD_CLEAR-106] + _ = x[MMX-107] + _ = x[MMXEXT-108] + _ = x[MOVBE-109] + _ = x[MOVDIR64B-110] + _ = x[MOVDIRI-111] + _ = x[MOVSB_ZL-112] + _ = x[MOVU-113] + _ = x[MPX-114] + _ = x[MSRIRC-115] + _ = x[MSRLIST-116] + _ = x[MSR_PAGEFLUSH-117] + _ = x[NRIPS-118] + _ = x[NX-119] + _ = x[OSXSAVE-120] + _ = x[PCONFIG-121] + _ = x[POPCNT-122] + _ = x[PPIN-123] + _ = x[PREFETCHI-124] + _ = x[PSFD-125] + _ = x[RDPRU-126] + _ = x[RDRAND-127] + _ = x[RDSEED-128] + _ = x[RDTSCP-129] + _ = x[RRSBA_CTRL-130] + _ = x[RTM-131] + _ = x[RTM_ALWAYS_ABORT-132] + _ = x[SBPB-133] + _ = x[SERIALIZE-134] + _ = x[SEV-135] + _ = x[SEV_64BIT-136] + _ = x[SEV_ALTERNATIVE-137] + _ = x[SEV_DEBUGSWAP-138] + _ = x[SEV_ES-139] + _ = x[SEV_RESTRICTED-140] + _ = x[SEV_SNP-141] + _ = x[SGX-142] + _ = x[SGXLC-143] + _ = x[SHA-144] + _ = x[SME-145] + _ = x[SME_COHERENT-146] + _ = x[SPEC_CTRL_SSBD-147] + _ = x[SRBDS_CTRL-148] + _ = x[SRSO_MSR_FIX-149] + _ = x[SRSO_NO-150] + _ = x[SRSO_USER_KERNEL_NO-151] + _ = x[SSE-152] + _ = x[SSE2-153] + _ = x[SSE3-154] + _ = x[SSE4-155] + _ = x[SSE42-156] + _ = x[SSE4A-157] + _ = x[SSSE3-158] + _ = x[STIBP-159] + _ = x[STIBP_ALWAYSON-160] + _ = x[STOSB_SHORT-161] + _ = x[SUCCOR-162] + _ = x[SVM-163] + _ = x[SVMDA-164] + _ = x[SVMFBASID-165] + _ = x[SVML-166] + _ = x[SVMNP-167] + _ = x[SVMPF-168] + _ = x[SVMPFT-169] + _ = x[SYSCALL-170] + _ = x[SYSEE-171] + _ = x[TBM-172] + _ = x[TDX_GUEST-173] + _ = x[TLB_FLUSH_NESTED-174] + _ = x[TME-175] + _ = x[TOPEXT-176] + _ = x[TSCRATEMSR-177] + _ = x[TSXLDTRK-178] + _ = x[VAES-179] + _ = x[VMCBCLEAN-180] + _ = x[VMPL-181] + _ = x[VMSA_REGPROT-182] + _ = x[VMX-183] + _ = x[VPCLMULQDQ-184] + _ = x[VTE-185] + _ = x[WAITPKG-186] + _ = x[WBNOINVD-187] + _ = x[WRMSRNS-188] + _ = x[X87-189] + _ = x[XGETBV1-190] + _ = x[XOP-191] + _ = x[XSAVE-192] + _ = x[XSAVEC-193] + _ = x[XSAVEOPT-194] + _ = x[XSAVES-195] + _ = x[AESARM-196] + _ = x[ARMCPUID-197] + _ = x[ASIMD-198] + _ = x[ASIMDDP-199] + _ = x[ASIMDHP-200] + _ = x[ASIMDRDM-201] + _ = x[ATOMICS-202] + _ = x[CRC32-203] + _ = x[DCPOP-204] + _ = x[EVTSTRM-205] + _ = x[FCMA-206] + _ = x[FP-207] + _ = x[FPHP-208] + _ = x[GPA-209] + _ = x[JSCVT-210] + _ = x[LRCPC-211] + _ = x[PMULL-212] + _ = x[SHA1-213] + _ = x[SHA2-214] + _ = x[SHA3-215] + _ = x[SHA512-216] + _ = x[SM3-217] + _ = x[SM4-218] + _ = x[SVE-219] + _ = x[lastID-220] _ = x[firstID-0] } -const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXTILEAPX_FAVXAVX10AVX10_128AVX10_256AVX10_512AVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8AVXVNNIINT16BHI_CTRLBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBPB_BRTYPEIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4IDPRED_CTRLINT_WBINVDINVLPGBKEYLOCKERKEYLOCKERWLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSRLISTMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRRSBA_CTRLRTMRTM_ALWAYS_ABORTSBPBSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSPEC_CTRL_SSBDSRBDS_CTRLSRSO_MSR_FIXSRSO_NOSRSO_USER_KERNEL_NOSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTDX_GUESTTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDWRMSRNSX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID" +const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXFP8AMXTILEAPX_FAVXAVX10AVX10_128AVX10_256AVX10_512AVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8AVXVNNIINT16BHI_CTRLBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBPB_BRTYPEIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4IDPRED_CTRLINT_WBINVDINVLPGBKEYLOCKERKEYLOCKERWLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSRLISTMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRRSBA_CTRLRTMRTM_ALWAYS_ABORTSBPBSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSPEC_CTRL_SSBDSRBDS_CTRLSRSO_MSR_FIXSRSO_NOSRSO_USER_KERNEL_NOSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTDX_GUESTTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDWRMSRNSX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID" -var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 62, 67, 70, 75, 84, 93, 102, 106, 116, 128, 136, 144, 152, 160, 167, 177, 187, 195, 205, 216, 224, 234, 252, 267, 274, 286, 293, 300, 311, 323, 331, 335, 339, 345, 350, 358, 363, 369, 373, 382, 400, 408, 415, 419, 423, 437, 443, 447, 451, 460, 464, 468, 473, 478, 482, 486, 493, 497, 500, 506, 509, 512, 522, 532, 545, 558, 562, 573, 577, 591, 608, 611, 621, 632, 638, 646, 657, 665, 677, 693, 707, 718, 728, 743, 751, 762, 772, 779, 788, 798, 802, 805, 812, 817, 828, 835, 842, 850, 853, 859, 864, 873, 880, 888, 892, 895, 901, 908, 921, 926, 928, 935, 942, 948, 952, 961, 965, 970, 976, 982, 988, 998, 1001, 1017, 1021, 1030, 1033, 1042, 1057, 1070, 1076, 1090, 1097, 1100, 1105, 1108, 1111, 1123, 1137, 1147, 1159, 1166, 1185, 1188, 1192, 1196, 1200, 1205, 1210, 1215, 1220, 1234, 1245, 1251, 1254, 1259, 1268, 1272, 1277, 1282, 1288, 1295, 1300, 1303, 1312, 1328, 1331, 1337, 1347, 1355, 1359, 1368, 1372, 1384, 1387, 1397, 1400, 1407, 1415, 1422, 1425, 1432, 1435, 1440, 1446, 1454, 1460, 1466, 1474, 1479, 1486, 1493, 1501, 1508, 1513, 1518, 1525, 1529, 1531, 1535, 1538, 1543, 1548, 1553, 1557, 1561, 1565, 1571, 1574, 1577, 1580, 1586} +var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 61, 68, 73, 76, 81, 90, 99, 108, 112, 122, 134, 142, 150, 158, 166, 173, 183, 193, 201, 211, 222, 230, 240, 258, 273, 280, 292, 299, 306, 317, 329, 337, 341, 345, 351, 356, 364, 369, 375, 379, 388, 406, 414, 421, 425, 429, 443, 449, 453, 457, 466, 470, 474, 479, 484, 488, 492, 499, 503, 506, 512, 515, 518, 528, 538, 551, 564, 568, 579, 583, 597, 614, 617, 627, 638, 644, 652, 663, 671, 683, 699, 713, 724, 734, 749, 757, 768, 778, 785, 794, 804, 808, 811, 818, 823, 834, 841, 848, 856, 859, 865, 870, 879, 886, 894, 898, 901, 907, 914, 927, 932, 934, 941, 948, 954, 958, 967, 971, 976, 982, 988, 994, 1004, 1007, 1023, 1027, 1036, 1039, 1048, 1063, 1076, 1082, 1096, 1103, 1106, 1111, 1114, 1117, 1129, 1143, 1153, 1165, 1172, 1191, 1194, 1198, 1202, 1206, 1211, 1216, 1221, 1226, 1240, 1251, 1257, 1260, 1265, 1274, 1278, 1283, 1288, 1294, 1301, 1306, 1309, 1318, 1334, 1337, 1343, 1353, 1361, 1365, 1374, 1378, 1390, 1393, 1403, 1406, 1413, 1421, 1428, 1431, 1438, 1441, 1446, 1452, 1460, 1466, 1472, 1480, 1485, 1492, 1499, 1507, 1514, 1519, 1524, 1531, 1535, 1537, 1541, 1544, 1549, 1554, 1559, 1563, 1567, 1571, 1577, 1580, 1583, 1586, 1592} func (i FeatureID) String() string { if i < 0 || i >= FeatureID(len(_FeatureID_index)-1) { @@ -270,12 +271,17 @@ func _() { _ = x[AMCC-23] _ = x[Qualcomm-24] _ = x[Marvell-25] - _ = x[lastVendor-26] + _ = x[QEMU-26] + _ = x[QNX-27] + _ = x[ACRN-28] + _ = x[SRE-29] + _ = x[Apple-30] + _ = x[lastVendor-31] } -const _Vendor_name = "VendorUnknownIntelAMDVIATransmetaNSCKVMMSVMVMwareXenHVMBhyveHygonSiSRDCAmpereARMBroadcomCaviumDECFujitsuInfineonMotorolaNVIDIAAMCCQualcommMarvelllastVendor" +const _Vendor_name = "VendorUnknownIntelAMDVIATransmetaNSCKVMMSVMVMwareXenHVMBhyveHygonSiSRDCAmpereARMBroadcomCaviumDECFujitsuInfineonMotorolaNVIDIAAMCCQualcommMarvellQEMUQNXACRNSREApplelastVendor" -var _Vendor_index = [...]uint8{0, 13, 18, 21, 24, 33, 36, 39, 43, 49, 55, 60, 65, 68, 71, 77, 80, 88, 94, 97, 104, 112, 120, 126, 130, 138, 145, 155} +var _Vendor_index = [...]uint8{0, 13, 18, 21, 24, 33, 36, 39, 43, 49, 55, 60, 65, 68, 71, 77, 80, 88, 94, 97, 104, 112, 120, 126, 130, 138, 145, 149, 152, 156, 159, 164, 174} func (i Vendor) String() string { if i < 0 || i >= Vendor(len(_Vendor_index)-1) { diff --git a/vendor/github.com/knadh/koanf/v2/README.md b/vendor/github.com/knadh/koanf/v2/README.md index d7412704ee5..b4947e556f6 100644 --- a/vendor/github.com/knadh/koanf/v2/README.md +++ b/vendor/github.com/knadh/koanf/v2/README.md @@ -8,7 +8,7 @@ koanf v2 has modules (Providers) for reading configuration from a variety of sou All external dependencies in providers and parsers are detached from the core and can be installed separately as necessary. -[![Run Tests](https://github.com/knadh/koanf/actions/workflows/test.yml/badge.svg)](https://github.com/knadh/koanf/actions/workflows/test.yml) [![GoDoc](https://godoc.org/github.com/knadh/koanf?status.svg)](https://godoc.org/github.com/knadh/koanf) +[![Run Tests](https://github.com/knadh/koanf/actions/workflows/test.yml/badge.svg)](https://github.com/knadh/koanf/actions/workflows/test.yml) [![GoDoc](https://pkg.go.dev/badge/github.com/knadh/koanf?utm_source=godoc)](https://pkg.go.dev/github.com/knadh/koanf/v2) ### Installation @@ -26,11 +26,10 @@ go get -u github.com/knadh/koanf/providers/file # Install the necessary Parser(s). -# Available: toml, json, yaml, dotenv, hcl, hjson, nestedtext +# Available: toml, toml/v2, json, yaml, dotenv, hcl, hjson, nestedtext # go get -u github.com/knadh/koanf/parsers/$parser go get -u github.com/knadh/koanf/parsers/toml - ``` [See the list](#api) of all bundled Providers and Parsers. @@ -146,6 +145,9 @@ func main() { k.Print() }) + // To stop a file watcher, call: + // f.Unwatch() + // Block forever (and manually make a change to mock/mock.json) to // reload the config. log.Println("waiting forever. Try making a change to mock/mock.json to live reload") @@ -168,6 +170,10 @@ import ( "github.com/knadh/koanf/v2" "github.com/knadh/koanf/parsers/toml" + + // TOML version 2 is available at: + // "github.com/knadh/koanf/parsers/toml/v2" + "github.com/knadh/koanf/providers/file" "github.com/knadh/koanf/providers/posflag" flag "github.com/spf13/pflag" @@ -317,7 +323,7 @@ func main() { ``` ### Unmarshalling and marshalling -`Parser`s can be used to unmarshal and scan the values in a Koanf instance into a struct based on the field tags, and to marshal a Koanf instance back into serialized bytes, for example, back to JSON or YAML, to write back to files. +`Parser`s can be used to unmarshal and scan the values in a Koanf instance into a struct based on the field tags, and to marshal a Koanf instance back into serialized bytes, for example to JSON or YAML files ```go package main @@ -366,7 +372,7 @@ func main() { fmt.Println(out) // Marshal the instance back to JSON. - // The paser instance can be anything, eg: json.Paser(), yaml.Parser() etc. + // The parser instance can be anything, eg: json.Parser(), yaml.Parser() etc. b, _ := k.Marshal(parser) fmt.Println(string(b)) } @@ -677,6 +683,7 @@ Install with `go get -u github.com/knadh/koanf/parsers/$parser` | json | `json.Parser()` | Parses JSON bytes into a nested map | | yaml | `yaml.Parser()` | Parses YAML bytes into a nested map | | toml | `toml.Parser()` | Parses TOML bytes into a nested map | +| toml/v2 | `toml.Parser()` | Parses TOML bytes into a nested map (using go-toml v2) | | dotenv | `dotenv.Parser()` | Parses DotEnv bytes into a flat map | | hcl | `hcl.Parser(flattenSlices bool)` | Parses Hashicorp HCL bytes into a nested map. `flattenSlices` is recommended to be set to true. [Read more](https://github.com/hashicorp/hcl/issues/162). | | nestedtext | `nestedtext.Parser()` | Parses NestedText bytes into a flat map | diff --git a/vendor/github.com/knadh/koanf/v2/go.work.sum b/vendor/github.com/knadh/koanf/v2/go.work.sum index b879b3bf7de..eab7bcf845e 100644 --- a/vendor/github.com/knadh/koanf/v2/go.work.sum +++ b/vendor/github.com/knadh/koanf/v2/go.work.sum @@ -125,7 +125,6 @@ github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91 github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= @@ -137,18 +136,18 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFb github.com/hashicorp/consul/sdk v0.13.1/go.mod h1:SW/mM4LbKfqmMvcFu8v+eiQQ7oitXEFeiBe9StxERb0= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= diff --git a/vendor/github.com/lufia/plan9stats/cpu.go b/vendor/github.com/lufia/plan9stats/cpu.go index a101b911906..eaff362c345 100644 --- a/vendor/github.com/lufia/plan9stats/cpu.go +++ b/vendor/github.com/lufia/plan9stats/cpu.go @@ -178,9 +178,12 @@ func ReadCPUStats(ctx context.Context, opts ...Option) (*CPUStats, error) { var up uint32parser pids := make([]uint32, len(names)) for i, s := range names { + if s == "trace" { + continue + } pids[i] = up.Parse(s) } - if up.err != nil { + if err := up.err; err != nil { return nil, err } sort.Slice(pids, func(i, j int) bool { diff --git a/vendor/github.com/magefile/mage/LICENSE b/vendor/github.com/magefile/mage/LICENSE new file mode 100644 index 00000000000..d0632bc1458 --- /dev/null +++ b/vendor/github.com/magefile/mage/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2017 the Mage authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/magefile/mage/mg/color.go b/vendor/github.com/magefile/mage/mg/color.go new file mode 100644 index 00000000000..3e27103325a --- /dev/null +++ b/vendor/github.com/magefile/mage/mg/color.go @@ -0,0 +1,80 @@ +package mg + +// Color is ANSI color type +type Color int + +// If you add/change/remove any items in this constant, +// you will need to run "stringer -type=Color" in this directory again. +// NOTE: Please keep the list in an alphabetical order. +const ( + Black Color = iota + Red + Green + Yellow + Blue + Magenta + Cyan + White + BrightBlack + BrightRed + BrightGreen + BrightYellow + BrightBlue + BrightMagenta + BrightCyan + BrightWhite +) + +// AnsiColor are ANSI color codes for supported terminal colors. +var ansiColor = map[Color]string{ + Black: "\u001b[30m", + Red: "\u001b[31m", + Green: "\u001b[32m", + Yellow: "\u001b[33m", + Blue: "\u001b[34m", + Magenta: "\u001b[35m", + Cyan: "\u001b[36m", + White: "\u001b[37m", + BrightBlack: "\u001b[30;1m", + BrightRed: "\u001b[31;1m", + BrightGreen: "\u001b[32;1m", + BrightYellow: "\u001b[33;1m", + BrightBlue: "\u001b[34;1m", + BrightMagenta: "\u001b[35;1m", + BrightCyan: "\u001b[36;1m", + BrightWhite: "\u001b[37;1m", +} + +// AnsiColorReset is an ANSI color code to reset the terminal color. +const AnsiColorReset = "\033[0m" + +// DefaultTargetAnsiColor is a default ANSI color for colorizing targets. +// It is set to Cyan as an arbitrary color, because it has a neutral meaning +var DefaultTargetAnsiColor = ansiColor[Cyan] + +func toLowerCase(s string) string { + // this is a naive implementation + // borrowed from https://golang.org/src/strings/strings.go + // and only considers alphabetical characters [a-zA-Z] + // so that we don't depend on the "strings" package + buf := make([]byte, len(s)) + for i := 0; i < len(s); i++ { + c := s[i] + if 'A' <= c && c <= 'Z' { + c += 'a' - 'A' + } + buf[i] = c + } + return string(buf) +} + +func getAnsiColor(color string) (string, bool) { + colorLower := toLowerCase(color) + for k, v := range ansiColor { + colorConstLower := toLowerCase(k.String()) + if colorConstLower == colorLower { + return v, true + } + } + return "", false +} diff --git a/vendor/github.com/magefile/mage/mg/color_string.go b/vendor/github.com/magefile/mage/mg/color_string.go new file mode 100644 index 00000000000..06debca5404 --- /dev/null +++ b/vendor/github.com/magefile/mage/mg/color_string.go @@ -0,0 +1,38 @@ +// Code generated by "stringer -type=Color"; DO NOT EDIT. + +package mg + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[Black-0] + _ = x[Red-1] + _ = x[Green-2] + _ = x[Yellow-3] + _ = x[Blue-4] + _ = x[Magenta-5] + _ = x[Cyan-6] + _ = x[White-7] + _ = x[BrightBlack-8] + _ = x[BrightRed-9] + _ = x[BrightGreen-10] + _ = x[BrightYellow-11] + _ = x[BrightBlue-12] + _ = x[BrightMagenta-13] + _ = x[BrightCyan-14] + _ = x[BrightWhite-15] +} + +const _Color_name = "BlackRedGreenYellowBlueMagentaCyanWhiteBrightBlackBrightRedBrightGreenBrightYellowBrightBlueBrightMagentaBrightCyanBrightWhite" + +var _Color_index = [...]uint8{0, 5, 8, 13, 19, 23, 30, 34, 39, 50, 59, 70, 82, 92, 105, 115, 126} + +func (i Color) String() string { + if i < 0 || i >= Color(len(_Color_index)-1) { + return "Color(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Color_name[_Color_index[i]:_Color_index[i+1]] +} diff --git a/vendor/github.com/magefile/mage/mg/deps.go b/vendor/github.com/magefile/mage/mg/deps.go new file mode 100644 index 00000000000..f0c2509b833 --- /dev/null +++ b/vendor/github.com/magefile/mage/mg/deps.go @@ -0,0 +1,211 @@ +package mg + +import ( + "context" + "fmt" + "log" + "os" + "reflect" + "runtime" + "strings" + "sync" +) + +var logger = log.New(os.Stderr, "", 0) + +type onceMap struct { + mu *sync.Mutex + m map[onceKey]*onceFun +} + +type onceKey struct { + Name string + ID string +} + +func (o *onceMap) LoadOrStore(f Fn) *onceFun { + defer o.mu.Unlock() + o.mu.Lock() + + key := onceKey{ + Name: f.Name(), + ID: f.ID(), + } + existing, ok := o.m[key] + if ok { + return existing + } + one := &onceFun{ + once: &sync.Once{}, + fn: f, + displayName: displayName(f.Name()), + } + o.m[key] = one + return one +} + +var onces = &onceMap{ + mu: &sync.Mutex{}, + m: map[onceKey]*onceFun{}, +} + +// SerialDeps is like Deps except it runs each dependency serially, instead of +// in parallel. This can be useful for resource intensive dependencies that +// shouldn't be run at the same time. +func SerialDeps(fns ...interface{}) { + funcs := checkFns(fns) + ctx := context.Background() + for i := range fns { + runDeps(ctx, funcs[i:i+1]) + } +} + +// SerialCtxDeps is like CtxDeps except it runs each dependency serially, +// instead of in parallel. This can be useful for resource intensive +// dependencies that shouldn't be run at the same time. +func SerialCtxDeps(ctx context.Context, fns ...interface{}) { + funcs := checkFns(fns) + for i := range fns { + runDeps(ctx, funcs[i:i+1]) + } +} + +// CtxDeps runs the given functions as dependencies of the calling function. +// Dependencies must only be of type: +// func() +// func() error +// func(context.Context) +// func(context.Context) error +// Or a similar method on a mg.Namespace type. +// Or an mg.Fn interface. +// +// The function calling Deps is guaranteed that all dependent functions will be +// run exactly once when Deps returns. Dependent functions may in turn declare +// their own dependencies using Deps. Each dependency is run in their own +// goroutines. Each function is given the context provided if the function +// prototype allows for it. +func CtxDeps(ctx context.Context, fns ...interface{}) { + funcs := checkFns(fns) + runDeps(ctx, funcs) +} + +// runDeps assumes you've already called checkFns. +func runDeps(ctx context.Context, fns []Fn) { + mu := &sync.Mutex{} + var errs []string + var exit int + wg := &sync.WaitGroup{} + for _, f := range fns { + fn := onces.LoadOrStore(f) + wg.Add(1) + go func() { + defer func() { + if v := recover(); v != nil { + mu.Lock() + if err, ok := v.(error); ok { + exit = changeExit(exit, ExitStatus(err)) + } else { + exit = changeExit(exit, 1) + } + errs = append(errs, fmt.Sprint(v)) + mu.Unlock() + } + wg.Done() + }() + if err := fn.run(ctx); err != nil { + mu.Lock() + errs = append(errs, fmt.Sprint(err)) + exit = changeExit(exit, ExitStatus(err)) + mu.Unlock() + } + }() + } + + wg.Wait() + if len(errs) > 0 { + panic(Fatal(exit, strings.Join(errs, "\n"))) + } +} + +func checkFns(fns []interface{}) []Fn { + funcs := make([]Fn, len(fns)) + for i, f := range fns { + if fn, ok := f.(Fn); ok { + funcs[i] = fn + continue + } + + // Check if the target provided is a not function so we can give a clear warning + t := reflect.TypeOf(f) + if t == nil || t.Kind() != reflect.Func { + panic(fmt.Errorf("non-function used as a target dependency: %T. The mg.Deps, mg.SerialDeps and mg.CtxDeps functions accept function names, such as mg.Deps(TargetA, TargetB)", f)) + } + + funcs[i] = F(f) + } + return funcs +} + +// Deps runs the given functions in parallel, exactly once. Dependencies must +// only be of type: +// func() +// func() error +// func(context.Context) +// func(context.Context) error +// Or a similar method on a mg.Namespace type. +// Or an mg.Fn interface. +// +// This is a way to build up a tree of dependencies with each dependency +// defining its own dependencies. Functions must have the same signature as a +// Mage target, i.e. optional context argument, optional error return. +func Deps(fns ...interface{}) { + CtxDeps(context.Background(), fns...) +} + +func changeExit(old, new int) int { + if new == 0 { + return old + } + if old == 0 { + return new + } + if old == new { + return old + } + // both different and both non-zero, just set + // exit to 1. Nothing more we can do. + return 1 +} + +// funcName returns the unique name for the function +func funcName(i interface{}) string { + return runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name() +} + +func displayName(name string) string { + splitByPackage := strings.Split(name, ".") + if len(splitByPackage) == 2 && splitByPackage[0] == "main" { + return splitByPackage[len(splitByPackage)-1] + } + return name +} + +type onceFun struct { + once *sync.Once + fn Fn + err error + + displayName string +} + +// run will run the function exactly once and capture the error output. Further runs simply return +// the same error output. +func (o *onceFun) run(ctx context.Context) error { + o.once.Do(func() { + if Verbose() { + logger.Println("Running dependency:", displayName(o.fn.Name())) + } + o.err = o.fn.Run(ctx) + }) + return o.err +} diff --git a/vendor/github.com/magefile/mage/mg/errors.go b/vendor/github.com/magefile/mage/mg/errors.go new file mode 100644 index 00000000000..2dd780fe3db --- /dev/null +++ b/vendor/github.com/magefile/mage/mg/errors.go @@ -0,0 +1,51 @@ +package mg + +import ( + "errors" + "fmt" +) + +type fatalErr struct { + code int + error +} + +func (f fatalErr) ExitStatus() int { + return f.code +} + +type exitStatus interface { + ExitStatus() int +} + +// Fatal returns an error that will cause mage to print out the +// given args and exit with the given exit code. +func Fatal(code int, args ...interface{}) error { + return fatalErr{ + code: code, + error: errors.New(fmt.Sprint(args...)), + } +} + +// Fatalf returns an error that will cause mage to print out the +// given message and exit with the given exit code. +func Fatalf(code int, format string, args ...interface{}) error { + return fatalErr{ + code: code, + error: fmt.Errorf(format, args...), + } +} + +// ExitStatus queries the error for an exit status. If the error is nil, it +// returns 0. If the error does not implement ExitStatus() int, it returns 1. +// Otherwise it retiurns the value from ExitStatus(). +func ExitStatus(err error) int { + if err == nil { + return 0 + } + exit, ok := err.(exitStatus) + if !ok { + return 1 + } + return exit.ExitStatus() +} diff --git a/vendor/github.com/magefile/mage/mg/fn.go b/vendor/github.com/magefile/mage/mg/fn.go new file mode 100644 index 00000000000..3856857acea --- /dev/null +++ b/vendor/github.com/magefile/mage/mg/fn.go @@ -0,0 +1,192 @@ +package mg + +import ( + "context" + "encoding/json" + "fmt" + "reflect" + "time" +) + +// Fn represents a function that can be run with mg.Deps. Package, Name, and ID must combine to +// uniquely identify a function, while ensuring the "same" function has identical values. These are +// used as a map key to find and run (or not run) the function. +type Fn interface { + // Name should return the fully qualified name of the function. Usually + // it's best to use runtime.FuncForPC(reflect.ValueOf(f).Pointer()).Name(). + Name() string + + // ID should be an additional uniqueness qualifier in case the name is insufficiently unique. + // This can be the case for functions that take arguments (mg.F json-encodes an array of the + // args). + ID() string + + // Run should run the function. + Run(ctx context.Context) error +} + +// F takes a function that is compatible as a mage target, and any args that need to be passed to +// it, and wraps it in an mg.Fn that mg.Deps can run. Args must be passed in the same order as they +// are declared by the function. Note that you do not need to and should not pass a context.Context +// to F, even if the target takes a context. Compatible args are int, bool, string, and +// time.Duration. +func F(target interface{}, args ...interface{}) Fn { + hasContext, isNamespace, err := checkF(target, args) + if err != nil { + panic(err) + } + id, err := json.Marshal(args) + if err != nil { + panic(fmt.Errorf("can't convert args into a mage-compatible id for mg.Deps: %s", err)) + } + return fn{ + name: funcName(target), + id: string(id), + f: func(ctx context.Context) error { + v := reflect.ValueOf(target) + count := len(args) + if hasContext { + count++ + } + if isNamespace { + count++ + } + vargs := make([]reflect.Value, count) + x := 0 + if isNamespace { + vargs[0] = reflect.ValueOf(struct{}{}) + x++ + } + if hasContext { + vargs[x] = reflect.ValueOf(ctx) + x++ + } + for y := range args { + vargs[x+y] = reflect.ValueOf(args[y]) + } + ret := v.Call(vargs) + if len(ret) > 0 { + // we only allow functions with a single error return, so this should be safe. + if ret[0].IsNil() { + return nil + } + return ret[0].Interface().(error) + } + return nil + }, + } +} + +type fn struct { + name string + id string + f func(ctx context.Context) error +} + +// Name returns the fully qualified name of the function. +func (f fn) Name() string { + return f.name +} + +// ID returns a hash of the argument values passed in +func (f fn) ID() string { + return f.id +} + +// Run runs the function. +func (f fn) Run(ctx context.Context) error { + return f.f(ctx) +} + +func checkF(target interface{}, args []interface{}) (hasContext, isNamespace bool, _ error) { + t := reflect.TypeOf(target) + if t == nil || t.Kind() != reflect.Func { + return false, false, fmt.Errorf("non-function passed to mg.F: %T. The mg.F function accepts function names, such as mg.F(TargetA, \"arg1\", \"arg2\")", target) + } + + if t.NumOut() > 1 { + return false, false, fmt.Errorf("target has too many return values, must be zero or just an error: %T", target) + } + if t.NumOut() == 1 && t.Out(0) != errType { + return false, false, fmt.Errorf("target's return value is not an error") + } + + // more inputs than slots is an error if not variadic + if len(args) > t.NumIn() && !t.IsVariadic() { + return false, false, fmt.Errorf("too many arguments for target, got %d for %T", len(args), target) + } + + if t.NumIn() == 0 { + return false, false, nil + } + + x := 0 + inputs := t.NumIn() + + if t.In(0).AssignableTo(emptyType) { + // nameSpace func + isNamespace = true + x++ + // callers must leave off the namespace value + inputs-- + } + if t.NumIn() > x && t.In(x) == ctxType { + // callers must leave off the context + inputs-- + + // let the upper function know it should pass us a context. + hasContext = true + + // skip checking the first argument in the below loop if it's a context, since first arg is + // special. + x++ + } + + if t.IsVariadic() { + if len(args) < inputs-1 { + return false, false, fmt.Errorf("too few arguments for target, got %d for %T", len(args), target) + + } + } else if len(args) != inputs { + return false, false, fmt.Errorf("wrong number of arguments for target, got %d for %T", len(args), target) + } + + for _, arg := range args { + argT := t.In(x) + if t.IsVariadic() && x == t.NumIn()-1 { + // For the variadic argument, use the slice element type. + argT = argT.Elem() + } + if !argTypes[argT] { + return false, false, fmt.Errorf("argument %d (%s), is not a supported argument type", x, argT) + } + passedT := reflect.TypeOf(arg) + if argT != passedT { + return false, false, fmt.Errorf("argument %d expected to be %s, but is %s", x, argT, passedT) + } + if x < t.NumIn()-1 { + x++ + } + } + return hasContext, isNamespace, nil +} + +// Here we define the types that are supported as arguments/returns +var ( + ctxType = reflect.TypeOf(func(context.Context) {}).In(0) + errType = reflect.TypeOf(func() error { return nil }).Out(0) + emptyType = reflect.TypeOf(struct{}{}) + + intType = reflect.TypeOf(int(0)) + stringType = reflect.TypeOf(string("")) + boolType = reflect.TypeOf(bool(false)) + durType = reflect.TypeOf(time.Second) + + // don't put ctx in here, this is for non-context types + argTypes = map[reflect.Type]bool{ + intType: true, + boolType: true, + stringType: true, + durType: true, + } +) diff --git a/vendor/github.com/magefile/mage/mg/runtime.go b/vendor/github.com/magefile/mage/mg/runtime.go new file mode 100644 index 00000000000..9a8de12ce71 --- /dev/null +++ b/vendor/github.com/magefile/mage/mg/runtime.go @@ -0,0 +1,136 @@ +package mg + +import ( + "os" + "path/filepath" + "runtime" + "strconv" +) + +// CacheEnv is the environment variable that users may set to change the +// location where mage stores its compiled binaries. +const CacheEnv = "MAGEFILE_CACHE" + +// VerboseEnv is the environment variable that indicates the user requested +// verbose mode when running a magefile. +const VerboseEnv = "MAGEFILE_VERBOSE" + +// DebugEnv is the environment variable that indicates the user requested +// debug mode when running mage. +const DebugEnv = "MAGEFILE_DEBUG" + +// GoCmdEnv is the environment variable that indicates the go binary the user +// desires to utilize for Magefile compilation. +const GoCmdEnv = "MAGEFILE_GOCMD" + +// IgnoreDefaultEnv is the environment variable that indicates the user requested +// to ignore the default target specified in the magefile. +const IgnoreDefaultEnv = "MAGEFILE_IGNOREDEFAULT" + +// HashFastEnv is the environment variable that indicates the user requested to +// use a quick hash of magefiles to determine whether or not the magefile binary +// needs to be rebuilt. This results in faster runtimes, but means that mage +// will fail to rebuild if a dependency has changed. To force a rebuild, run +// mage with the -f flag. +const HashFastEnv = "MAGEFILE_HASHFAST" + +// EnableColorEnv is the environment variable that indicates the user is using +// a terminal which supports a color output. The default is false for backwards +// compatibility. When the value is true and the detected terminal does support colors +// then the list of mage targets will be displayed in ANSI color. When the value +// is true but the detected terminal does not support colors, then the list of +// mage targets will be displayed in the default colors (e.g. black and white). +const EnableColorEnv = "MAGEFILE_ENABLE_COLOR" + +// TargetColorEnv is the environment variable that indicates which ANSI color +// should be used to colorize mage targets. This is only applicable when +// the MAGEFILE_ENABLE_COLOR environment variable is true. +// The supported ANSI color names are any of these: +// - Black +// - Red +// - Green +// - Yellow +// - Blue +// - Magenta +// - Cyan +// - White +// - BrightBlack +// - BrightRed +// - BrightGreen +// - BrightYellow +// - BrightBlue +// - BrightMagenta +// - BrightCyan +// - BrightWhite +const TargetColorEnv = "MAGEFILE_TARGET_COLOR" + +// Verbose reports whether a magefile was run with the verbose flag. +func Verbose() bool { + b, _ := strconv.ParseBool(os.Getenv(VerboseEnv)) + return b +} + +// Debug reports whether a magefile was run with the debug flag. +func Debug() bool { + b, _ := strconv.ParseBool(os.Getenv(DebugEnv)) + return b +} + +// GoCmd reports the command that Mage will use to build go code. By default mage runs +// the "go" binary in the PATH. +func GoCmd() string { + if cmd := os.Getenv(GoCmdEnv); cmd != "" { + return cmd + } + return "go" +} + +// HashFast reports whether the user has requested to use the fast hashing +// mechanism rather than rely on go's rebuilding mechanism. +func HashFast() bool { + b, _ := strconv.ParseBool(os.Getenv(HashFastEnv)) + return b +} + +// IgnoreDefault reports whether the user has requested to ignore the default target +// in the magefile. +func IgnoreDefault() bool { + b, _ := strconv.ParseBool(os.Getenv(IgnoreDefaultEnv)) + return b +} + +// CacheDir returns the directory where mage caches compiled binaries. It +// defaults to $HOME/.magefile, but may be overridden by the MAGEFILE_CACHE +// environment variable. +func CacheDir() string { + d := os.Getenv(CacheEnv) + if d != "" { + return d + } + switch runtime.GOOS { + case "windows": + return filepath.Join(os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH"), "magefile") + default: + return filepath.Join(os.Getenv("HOME"), ".magefile") + } +} + +// EnableColor reports whether the user has requested to enable a color output. +func EnableColor() bool { + b, _ := strconv.ParseBool(os.Getenv(EnableColorEnv)) + return b +} + +// TargetColor returns the configured ANSI color name a color output. +func TargetColor() string { + s, exists := os.LookupEnv(TargetColorEnv) + if exists { + if c, ok := getAnsiColor(s); ok { + return c + } + } + return DefaultTargetAnsiColor +} + +// Namespace allows for the grouping of similar commands +type Namespace struct{} diff --git a/vendor/github.com/magefile/mage/sh/cmd.go b/vendor/github.com/magefile/mage/sh/cmd.go new file mode 100644 index 00000000000..312de65ae34 --- /dev/null +++ b/vendor/github.com/magefile/mage/sh/cmd.go @@ -0,0 +1,184 @@ +package sh + +import ( + "bytes" + "fmt" + "io" + "log" + "os" + "os/exec" + "strings" + + "github.com/magefile/mage/mg" +) + +// RunCmd returns a function that will call Run with the given command. This is +// useful for creating command aliases to make your scripts easier to read, like +// this: +// +// // in a helper file somewhere +// var g0 = sh.RunCmd("go") // go is a keyword :( +// +// // somewhere in your main code +// if err := g0("install", "github.com/gohugo/hugo"); err != nil { +// return err +// } +// +// Args passed to command get baked in as args to the command when you run it. +// Any args passed in when you run the returned function will be appended to the +// original args. For example, this is equivalent to the above: +// +// var goInstall = sh.RunCmd("go", "install") goInstall("github.com/gohugo/hugo") +// +// RunCmd uses Exec underneath, so see those docs for more details. +func RunCmd(cmd string, args ...string) func(args ...string) error { + return func(args2 ...string) error { + return Run(cmd, append(args, args2...)...) + } +} + +// OutCmd is like RunCmd except the command returns the output of the +// command. +func OutCmd(cmd string, args ...string) func(args ...string) (string, error) { + return func(args2 ...string) (string, error) { + return Output(cmd, append(args, args2...)...) + } +} + +// Run is like RunWith, but doesn't specify any environment variables. +func Run(cmd string, args ...string) error { + return RunWith(nil, cmd, args...) +} + +// RunV is like Run, but always sends the command's stdout to os.Stdout. +func RunV(cmd string, args ...string) error { + _, err := Exec(nil, os.Stdout, os.Stderr, cmd, args...) + return err +} + +// RunWith runs the given command, directing stderr to this program's stderr and +// printing stdout to stdout if mage was run with -v. It adds adds env to the +// environment variables for the command being run. Environment variables should +// be in the format name=value. +func RunWith(env map[string]string, cmd string, args ...string) error { + var output io.Writer + if mg.Verbose() { + output = os.Stdout + } + _, err := Exec(env, output, os.Stderr, cmd, args...) + return err +} + +// RunWithV is like RunWith, but always sends the command's stdout to os.Stdout. +func RunWithV(env map[string]string, cmd string, args ...string) error { + _, err := Exec(env, os.Stdout, os.Stderr, cmd, args...) + return err +} + +// Output runs the command and returns the text from stdout. +func Output(cmd string, args ...string) (string, error) { + buf := &bytes.Buffer{} + _, err := Exec(nil, buf, os.Stderr, cmd, args...) + return strings.TrimSuffix(buf.String(), "\n"), err +} + +// OutputWith is like RunWith, but returns what is written to stdout. +func OutputWith(env map[string]string, cmd string, args ...string) (string, error) { + buf := &bytes.Buffer{} + _, err := Exec(env, buf, os.Stderr, cmd, args...) + return strings.TrimSuffix(buf.String(), "\n"), err +} + +// Exec executes the command, piping its stdout and stderr to the given +// writers. If the command fails, it will return an error that, if returned +// from a target or mg.Deps call, will cause mage to exit with the same code as +// the command failed with. Env is a list of environment variables to set when +// running the command, these override the current environment variables set +// (which are also passed to the command). cmd and args may include references +// to environment variables in $FOO format, in which case these will be +// expanded before the command is run. +// +// Ran reports if the command ran (rather than was not found or not executable). +// Code reports the exit code the command returned if it ran. If err == nil, ran +// is always true and code is always 0. +func Exec(env map[string]string, stdout, stderr io.Writer, cmd string, args ...string) (ran bool, err error) { + expand := func(s string) string { + s2, ok := env[s] + if ok { + return s2 + } + return os.Getenv(s) + } + cmd = os.Expand(cmd, expand) + for i := range args { + args[i] = os.Expand(args[i], expand) + } + ran, code, err := run(env, stdout, stderr, cmd, args...) + if err == nil { + return true, nil + } + if ran { + return ran, mg.Fatalf(code, `running "%s %s" failed with exit code %d`, cmd, strings.Join(args, " "), code) + } + return ran, fmt.Errorf(`failed to run "%s %s: %v"`, cmd, strings.Join(args, " "), err) +} + +func run(env map[string]string, stdout, stderr io.Writer, cmd string, args ...string) (ran bool, code int, err error) { + c := exec.Command(cmd, args...) + c.Env = os.Environ() + for k, v := range env { + c.Env = append(c.Env, k+"="+v) + } + c.Stderr = stderr + c.Stdout = stdout + c.Stdin = os.Stdin + + var quoted []string + for i := range args { + quoted = append(quoted, fmt.Sprintf("%q", args[i])); + } + // To protect against logging from doing exec in global variables + if mg.Verbose() { + log.Println("exec:", cmd, strings.Join(quoted, " ")) + } + err = c.Run() + return CmdRan(err), ExitStatus(err), err +} +// CmdRan examines the error to determine if it was generated as a result of a +// command running via os/exec.Command. If the error is nil, or the command ran +// (even if it exited with a non-zero exit code), CmdRan reports true. If the +// error is an unrecognized type, or it is an error from exec.Command that says +// the command failed to run (usually due to the command not existing or not +// being executable), it reports false. +func CmdRan(err error) bool { + if err == nil { + return true + } + ee, ok := err.(*exec.ExitError) + if ok { + return ee.Exited() + } + return false +} + +type exitStatus interface { + ExitStatus() int +} + +// ExitStatus returns the exit status of the error if it is an exec.ExitError +// or if it implements ExitStatus() int. +// 0 if it is nil or 1 if it is a different error. +func ExitStatus(err error) int { + if err == nil { + return 0 + } + if e, ok := err.(exitStatus); ok { + return e.ExitStatus() + } + if e, ok := err.(*exec.ExitError); ok { + if ex, ok := e.Sys().(exitStatus); ok { + return ex.ExitStatus() + } + } + return 1 +} diff --git a/vendor/github.com/magefile/mage/sh/helpers.go b/vendor/github.com/magefile/mage/sh/helpers.go new file mode 100644 index 00000000000..f5d20a2712b --- /dev/null +++ b/vendor/github.com/magefile/mage/sh/helpers.go @@ -0,0 +1,40 @@ +package sh + +import ( + "fmt" + "io" + "os" +) + +// Rm removes the given file or directory even if non-empty. It will not return +// an error if the target doesn't exist, only if the target cannot be removed. +func Rm(path string) error { + err := os.RemoveAll(path) + if err == nil || os.IsNotExist(err) { + return nil + } + return fmt.Errorf(`failed to remove %s: %v`, path, err) +} + +// Copy robustly copies the source file to the destination, overwriting the destination if necessary. +func Copy(dst string, src string) error { + from, err := os.Open(src) + if err != nil { + return fmt.Errorf(`can't copy %s: %v`, src, err) + } + defer from.Close() + finfo, err := from.Stat() + if err != nil { + return fmt.Errorf(`can't stat %s: %v`, src, err) + } + to, err := os.OpenFile(dst, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, finfo.Mode()) + if err != nil { + return fmt.Errorf(`can't copy to %s: %v`, dst, err) + } + defer to.Close() + _, err = io.Copy(to, from) + if err != nil { + return fmt.Errorf(`error copying %s to %s: %v`, src, dst, err) + } + return nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-copy-object.go b/vendor/github.com/minio/minio-go/v7/api-copy-object.go index 0c95d91ec76..b6cadc86a92 100644 --- a/vendor/github.com/minio/minio-go/v7/api-copy-object.go +++ b/vendor/github.com/minio/minio-go/v7/api-copy-object.go @@ -68,7 +68,7 @@ func (c *Client) CopyObject(ctx context.Context, dst CopyDestOptions, src CopySr Bucket: dst.Bucket, Key: dst.Object, LastModified: cpObjRes.LastModified, - ETag: trimEtag(resp.Header.Get("ETag")), + ETag: trimEtag(cpObjRes.ETag), VersionID: resp.Header.Get(amzVersionID), Expiration: expTime, ExpirationRuleID: ruleID, diff --git a/vendor/github.com/minio/minio-go/v7/api-datatypes.go b/vendor/github.com/minio/minio-go/v7/api-datatypes.go index 97a6f80b259..8a8fd889856 100644 --- a/vendor/github.com/minio/minio-go/v7/api-datatypes.go +++ b/vendor/github.com/minio/minio-go/v7/api-datatypes.go @@ -143,10 +143,11 @@ type UploadInfo struct { // Verified checksum values, if any. // Values are base64 (standard) encoded. // For multipart objects this is a checksum of the checksum of each part. - ChecksumCRC32 string - ChecksumCRC32C string - ChecksumSHA1 string - ChecksumSHA256 string + ChecksumCRC32 string + ChecksumCRC32C string + ChecksumSHA1 string + ChecksumSHA256 string + ChecksumCRC64NVME string } // RestoreInfo contains information of the restore operation of an archived object @@ -215,10 +216,11 @@ type ObjectInfo struct { Restore *RestoreInfo // Checksum values - ChecksumCRC32 string - ChecksumCRC32C string - ChecksumSHA1 string - ChecksumSHA256 string + ChecksumCRC32 string + ChecksumCRC32C string + ChecksumSHA1 string + ChecksumSHA256 string + ChecksumCRC64NVME string Internal *struct { K int // Data blocks diff --git a/vendor/github.com/minio/minio-go/v7/api-get-object.go b/vendor/github.com/minio/minio-go/v7/api-get-object.go index d7fd27835ba..5cc85f61c23 100644 --- a/vendor/github.com/minio/minio-go/v7/api-get-object.go +++ b/vendor/github.com/minio/minio-go/v7/api-get-object.go @@ -318,7 +318,7 @@ func (o *Object) doGetRequest(request getRequest) (getResponse, error) { response := <-o.resCh // Return any error to the top level. - if response.Error != nil { + if response.Error != nil && response.Error != io.EOF { return response, response.Error } @@ -340,7 +340,7 @@ func (o *Object) doGetRequest(request getRequest) (getResponse, error) { // Data are ready on the wire, no need to reinitiate connection in lower level o.seekData = false - return response, nil + return response, response.Error } // setOffset - handles the setting of offsets for diff --git a/vendor/github.com/minio/minio-go/v7/api-presigned.go b/vendor/github.com/minio/minio-go/v7/api-presigned.go index 9e85f818167..29642200ee1 100644 --- a/vendor/github.com/minio/minio-go/v7/api-presigned.go +++ b/vendor/github.com/minio/minio-go/v7/api-presigned.go @@ -140,7 +140,7 @@ func (c *Client) PresignedPostPolicy(ctx context.Context, p *PostPolicy) (u *url } // Get credentials from the configured credentials provider. - credValues, err := c.credsProvider.Get() + credValues, err := c.credsProvider.GetWithContext(c.CredContext()) if err != nil { return nil, nil, err } diff --git a/vendor/github.com/minio/minio-go/v7/api-prompt-object.go b/vendor/github.com/minio/minio-go/v7/api-prompt-object.go new file mode 100644 index 00000000000..dac062a75b0 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-prompt-object.go @@ -0,0 +1,78 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2024 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "io" + "net/http" + + "github.com/goccy/go-json" + "github.com/minio/minio-go/v7/pkg/s3utils" +) + +// PromptObject performs language model inference with the prompt and referenced object as context. +// Inference is performed using a Lambda handler that can process the prompt and object. +// Currently, this functionality is limited to certain MinIO servers. +func (c *Client) PromptObject(ctx context.Context, bucketName, objectName, prompt string, opts PromptObjectOptions) (io.ReadCloser, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: "InvalidBucketName", + Message: err.Error(), + } + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return nil, ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: "XMinioInvalidObjectName", + Message: err.Error(), + } + } + + opts.AddLambdaArnToReqParams(opts.LambdaArn) + opts.SetHeader("Content-Type", "application/json") + opts.AddPromptArg("prompt", prompt) + promptReqBytes, err := json.Marshal(opts.PromptArgs) + if err != nil { + return nil, err + } + + // Execute POST on bucket/object. + resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: opts.toQueryValues(), + customHeader: opts.Header(), + contentSHA256Hex: sum256Hex(promptReqBytes), + contentBody: bytes.NewReader(promptReqBytes), + contentLength: int64(len(promptReqBytes)), + }) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + defer closeResponse(resp) + return nil, httpRespToErrorResponse(resp, bucketName, objectName) + } + + return resp.Body, nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-prompt-options.go b/vendor/github.com/minio/minio-go/v7/api-prompt-options.go new file mode 100644 index 00000000000..4493a75d4c7 --- /dev/null +++ b/vendor/github.com/minio/minio-go/v7/api-prompt-options.go @@ -0,0 +1,84 @@ +/* + * MinIO Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2024 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "net/http" + "net/url" +) + +// PromptObjectOptions provides options to PromptObject call. +// LambdaArn is the ARN of the Prompt Lambda to be invoked. +// PromptArgs is a map of key-value pairs to be passed to the inference action on the Prompt Lambda. +// "prompt" is a reserved key and should not be used as a key in PromptArgs. +type PromptObjectOptions struct { + LambdaArn string + PromptArgs map[string]any + headers map[string]string + reqParams url.Values +} + +// Header returns the http.Header representation of the POST options. +func (o PromptObjectOptions) Header() http.Header { + headers := make(http.Header, len(o.headers)) + for k, v := range o.headers { + headers.Set(k, v) + } + return headers +} + +// AddPromptArg Add a key value pair to the prompt arguments where the key is a string and +// the value is a JSON serializable. +func (o *PromptObjectOptions) AddPromptArg(key string, value any) { + if o.PromptArgs == nil { + o.PromptArgs = make(map[string]any) + } + o.PromptArgs[key] = value +} + +// AddLambdaArnToReqParams adds the lambdaArn to the request query string parameters. +func (o *PromptObjectOptions) AddLambdaArnToReqParams(lambdaArn string) { + if o.reqParams == nil { + o.reqParams = make(url.Values) + } + o.reqParams.Add("lambdaArn", lambdaArn) +} + +// SetHeader adds a key value pair to the options. The +// key-value pair will be part of the HTTP POST request +// headers. +func (o *PromptObjectOptions) SetHeader(key, value string) { + if o.headers == nil { + o.headers = make(map[string]string) + } + o.headers[http.CanonicalHeaderKey(key)] = value +} + +// toQueryValues - Convert the reqParams in Options to query string parameters. +func (o *PromptObjectOptions) toQueryValues() url.Values { + urlValues := make(url.Values) + if o.reqParams != nil { + for key, values := range o.reqParams { + for _, value := range values { + urlValues.Add(key, value) + } + } + } + + return urlValues +} diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-fan-out.go b/vendor/github.com/minio/minio-go/v7/api-put-object-fan-out.go index 0ae9142e1d3..3023b949cd4 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object-fan-out.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-fan-out.go @@ -85,7 +85,10 @@ func (c *Client) PutObjectFanOut(ctx context.Context, bucket string, fanOutData policy.SetEncryption(fanOutReq.SSE) // Set checksum headers if any. - policy.SetChecksum(fanOutReq.Checksum) + err := policy.SetChecksum(fanOutReq.Checksum) + if err != nil { + return nil, err + } url, formData, err := c.PresignedPostPolicy(ctx, policy) if err != nil { diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go index a70cbea9e57..03bd34f76ba 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go @@ -83,10 +83,7 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj // HTTPS connection. hashAlgos, hashSums := c.hashMaterials(opts.SendContentMd5, !opts.DisableContentSha256) if len(hashSums) == 0 { - if opts.UserMetadata == nil { - opts.UserMetadata = make(map[string]string, 1) - } - opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String() + addAutoChecksumHeaders(&opts) } // Initiate a new multipart upload. @@ -113,7 +110,6 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj // Create checksums // CRC32C is ~50% faster on AMD64 @ 30GB/s - var crcBytes []byte customHeader := make(http.Header) crc := opts.AutoChecksum.Hasher() for partNumber <= totalPartsCount { @@ -154,7 +150,6 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj crc.Write(buf[:length]) cSum := crc.Sum(nil) customHeader.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(cSum)) - crcBytes = append(crcBytes, cSum...) } p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: rd, partNumber: partNumber, md5Base64: md5Base64, sha256Hex: sha256Hex, size: int64(length), sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader} @@ -182,18 +177,21 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj // Loop over total uploaded parts to save them in // Parts array before completing the multipart request. + allParts := make([]ObjectPart, 0, len(partsInfo)) for i := 1; i < partNumber; i++ { part, ok := partsInfo[i] if !ok { return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i)) } + allParts = append(allParts, part) complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ - ETag: part.ETag, - PartNumber: part.PartNumber, - ChecksumCRC32: part.ChecksumCRC32, - ChecksumCRC32C: part.ChecksumCRC32C, - ChecksumSHA1: part.ChecksumSHA1, - ChecksumSHA256: part.ChecksumSHA256, + ETag: part.ETag, + PartNumber: part.PartNumber, + ChecksumCRC32: part.ChecksumCRC32, + ChecksumCRC32C: part.ChecksumCRC32C, + ChecksumSHA1: part.ChecksumSHA1, + ChecksumSHA256: part.ChecksumSHA256, + ChecksumCRC64NVME: part.ChecksumCRC64NVME, }) } @@ -203,12 +201,8 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj ServerSideEncryption: opts.ServerSideEncryption, AutoChecksum: opts.AutoChecksum, } - if len(crcBytes) > 0 { - // Add hash of hashes. - crc.Reset() - crc.Write(crcBytes) - opts.UserMetadata = map[string]string{opts.AutoChecksum.Key(): base64.StdEncoding.EncodeToString(crc.Sum(nil))} - } + applyAutoChecksum(&opts, allParts) + uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts) if err != nil { return UploadInfo{}, err @@ -354,10 +348,11 @@ func (c *Client) uploadPart(ctx context.Context, p uploadPartParams) (ObjectPart // Once successfully uploaded, return completed part. h := resp.Header objPart := ObjectPart{ - ChecksumCRC32: h.Get("x-amz-checksum-crc32"), - ChecksumCRC32C: h.Get("x-amz-checksum-crc32c"), - ChecksumSHA1: h.Get("x-amz-checksum-sha1"), - ChecksumSHA256: h.Get("x-amz-checksum-sha256"), + ChecksumCRC32: h.Get(ChecksumCRC32.Key()), + ChecksumCRC32C: h.Get(ChecksumCRC32C.Key()), + ChecksumSHA1: h.Get(ChecksumSHA1.Key()), + ChecksumSHA256: h.Get(ChecksumSHA256.Key()), + ChecksumCRC64NVME: h.Get(ChecksumCRC64NVME.Key()), } objPart.Size = p.size objPart.PartNumber = p.partNumber @@ -457,9 +452,10 @@ func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, object Expiration: expTime, ExpirationRuleID: ruleID, - ChecksumSHA256: completeMultipartUploadResult.ChecksumSHA256, - ChecksumSHA1: completeMultipartUploadResult.ChecksumSHA1, - ChecksumCRC32: completeMultipartUploadResult.ChecksumCRC32, - ChecksumCRC32C: completeMultipartUploadResult.ChecksumCRC32C, + ChecksumSHA256: completeMultipartUploadResult.ChecksumSHA256, + ChecksumSHA1: completeMultipartUploadResult.ChecksumSHA1, + ChecksumCRC32: completeMultipartUploadResult.ChecksumCRC32, + ChecksumCRC32C: completeMultipartUploadResult.ChecksumCRC32C, + ChecksumCRC64NVME: completeMultipartUploadResult.ChecksumCRC64NVME, }, nil } diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go index dac4c0efefd..3ff3b69efd6 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go @@ -113,10 +113,7 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN } withChecksum := c.trailingHeaderSupport if withChecksum { - if opts.UserMetadata == nil { - opts.UserMetadata = make(map[string]string, 1) - } - opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String() + addAutoChecksumHeaders(&opts) } // Initiate a new multipart upload. uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) @@ -240,6 +237,7 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN // Gather the responses as they occur and update any // progress bar. + allParts := make([]ObjectPart, 0, totalPartsCount) for u := 1; u <= totalPartsCount; u++ { select { case <-ctx.Done(): @@ -248,16 +246,17 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN if uploadRes.Error != nil { return UploadInfo{}, uploadRes.Error } - + allParts = append(allParts, uploadRes.Part) // Update the totalUploadedSize. totalUploadedSize += uploadRes.Size complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ - ETag: uploadRes.Part.ETag, - PartNumber: uploadRes.Part.PartNumber, - ChecksumCRC32: uploadRes.Part.ChecksumCRC32, - ChecksumCRC32C: uploadRes.Part.ChecksumCRC32C, - ChecksumSHA1: uploadRes.Part.ChecksumSHA1, - ChecksumSHA256: uploadRes.Part.ChecksumSHA256, + ETag: uploadRes.Part.ETag, + PartNumber: uploadRes.Part.PartNumber, + ChecksumCRC32: uploadRes.Part.ChecksumCRC32, + ChecksumCRC32C: uploadRes.Part.ChecksumCRC32C, + ChecksumSHA1: uploadRes.Part.ChecksumSHA1, + ChecksumSHA256: uploadRes.Part.ChecksumSHA256, + ChecksumCRC64NVME: uploadRes.Part.ChecksumCRC64NVME, }) } } @@ -275,15 +274,7 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN AutoChecksum: opts.AutoChecksum, } if withChecksum { - // Add hash of hashes. - crc := opts.AutoChecksum.Hasher() - for _, part := range complMultipartUpload.Parts { - cs, err := base64.StdEncoding.DecodeString(part.Checksum(opts.AutoChecksum)) - if err == nil { - crc.Write(cs) - } - } - opts.UserMetadata = map[string]string{opts.AutoChecksum.KeyCapitalized(): base64.StdEncoding.EncodeToString(crc.Sum(nil))} + applyAutoChecksum(&opts, allParts) } uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts) @@ -312,10 +303,7 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b } if !opts.SendContentMd5 { - if opts.UserMetadata == nil { - opts.UserMetadata = make(map[string]string, 1) - } - opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String() + addAutoChecksumHeaders(&opts) } // Calculate the optimal parts info for a given size. @@ -342,7 +330,6 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b // Create checksums // CRC32C is ~50% faster on AMD64 @ 30GB/s - var crcBytes []byte customHeader := make(http.Header) crc := opts.AutoChecksum.Hasher() md5Hash := c.md5Hasher() @@ -389,7 +376,6 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b crc.Write(buf[:length]) cSum := crc.Sum(nil) customHeader.Set(opts.AutoChecksum.KeyCapitalized(), base64.StdEncoding.EncodeToString(cSum)) - crcBytes = append(crcBytes, cSum...) } // Update progress reader appropriately to the latest offset @@ -420,18 +406,21 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b // Loop over total uploaded parts to save them in // Parts array before completing the multipart request. + allParts := make([]ObjectPart, 0, len(partsInfo)) for i := 1; i < partNumber; i++ { part, ok := partsInfo[i] if !ok { return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i)) } + allParts = append(allParts, part) complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ - ETag: part.ETag, - PartNumber: part.PartNumber, - ChecksumCRC32: part.ChecksumCRC32, - ChecksumCRC32C: part.ChecksumCRC32C, - ChecksumSHA1: part.ChecksumSHA1, - ChecksumSHA256: part.ChecksumSHA256, + ETag: part.ETag, + PartNumber: part.PartNumber, + ChecksumCRC32: part.ChecksumCRC32, + ChecksumCRC32C: part.ChecksumCRC32C, + ChecksumSHA1: part.ChecksumSHA1, + ChecksumSHA256: part.ChecksumSHA256, + ChecksumCRC64NVME: part.ChecksumCRC64NVME, }) } @@ -442,12 +431,7 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b ServerSideEncryption: opts.ServerSideEncryption, AutoChecksum: opts.AutoChecksum, } - if len(crcBytes) > 0 { - // Add hash of hashes. - crc.Reset() - crc.Write(crcBytes) - opts.UserMetadata = map[string]string{opts.AutoChecksum.KeyCapitalized(): base64.StdEncoding.EncodeToString(crc.Sum(nil))} - } + applyAutoChecksum(&opts, allParts) uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts) if err != nil { return UploadInfo{}, err @@ -475,10 +459,7 @@ func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketNam opts.AutoChecksum = opts.Checksum } if !opts.SendContentMd5 { - if opts.UserMetadata == nil { - opts.UserMetadata = make(map[string]string, 1) - } - opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String() + addAutoChecksumHeaders(&opts) } // Cancel all when an error occurs. @@ -510,7 +491,6 @@ func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketNam // Create checksums // CRC32C is ~50% faster on AMD64 @ 30GB/s - var crcBytes []byte crc := opts.AutoChecksum.Hasher() // Total data read and written to server. should be equal to 'size' at the end of the call. @@ -570,7 +550,6 @@ func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketNam crc.Write(buf[:length]) cSum := crc.Sum(nil) customHeader.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(cSum)) - crcBytes = append(crcBytes, cSum...) } wg.Add(1) @@ -630,18 +609,21 @@ func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketNam // Loop over total uploaded parts to save them in // Parts array before completing the multipart request. + allParts := make([]ObjectPart, 0, len(partsInfo)) for i := 1; i < partNumber; i++ { part, ok := partsInfo[i] if !ok { return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i)) } + allParts = append(allParts, part) complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ - ETag: part.ETag, - PartNumber: part.PartNumber, - ChecksumCRC32: part.ChecksumCRC32, - ChecksumCRC32C: part.ChecksumCRC32C, - ChecksumSHA1: part.ChecksumSHA1, - ChecksumSHA256: part.ChecksumSHA256, + ETag: part.ETag, + PartNumber: part.PartNumber, + ChecksumCRC32: part.ChecksumCRC32, + ChecksumCRC32C: part.ChecksumCRC32C, + ChecksumSHA1: part.ChecksumSHA1, + ChecksumSHA256: part.ChecksumSHA256, + ChecksumCRC64NVME: part.ChecksumCRC64NVME, }) } @@ -652,12 +634,8 @@ func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketNam ServerSideEncryption: opts.ServerSideEncryption, AutoChecksum: opts.AutoChecksum, } - if len(crcBytes) > 0 { - // Add hash of hashes. - crc.Reset() - crc.Write(crcBytes) - opts.UserMetadata = map[string]string{opts.AutoChecksum.KeyCapitalized(): base64.StdEncoding.EncodeToString(crc.Sum(nil))} - } + applyAutoChecksum(&opts, allParts) + uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts) if err != nil { return UploadInfo{}, err @@ -823,9 +801,10 @@ func (c *Client) putObjectDo(ctx context.Context, bucketName, objectName string, ExpirationRuleID: ruleID, // Checksum values - ChecksumCRC32: h.Get("x-amz-checksum-crc32"), - ChecksumCRC32C: h.Get("x-amz-checksum-crc32c"), - ChecksumSHA1: h.Get("x-amz-checksum-sha1"), - ChecksumSHA256: h.Get("x-amz-checksum-sha256"), + ChecksumCRC32: h.Get(ChecksumCRC32.Key()), + ChecksumCRC32C: h.Get(ChecksumCRC32C.Key()), + ChecksumSHA1: h.Get(ChecksumSHA1.Key()), + ChecksumSHA256: h.Get(ChecksumSHA256.Key()), + ChecksumCRC64NVME: h.Get(ChecksumCRC64NVME.Key()), }, nil } diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object.go b/vendor/github.com/minio/minio-go/v7/api-put-object.go index 10131a5be63..09817578412 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object.go @@ -387,10 +387,7 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam opts.AutoChecksum = opts.Checksum } if !opts.SendContentMd5 { - if opts.UserMetadata == nil { - opts.UserMetadata = make(map[string]string, 1) - } - opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String() + addAutoChecksumHeaders(&opts) } // Initiate a new multipart upload. @@ -417,7 +414,6 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam // Create checksums // CRC32C is ~50% faster on AMD64 @ 30GB/s - var crcBytes []byte customHeader := make(http.Header) crc := opts.AutoChecksum.Hasher() @@ -443,7 +439,6 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam crc.Write(buf[:length]) cSum := crc.Sum(nil) customHeader.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(cSum)) - crcBytes = append(crcBytes, cSum...) } // Update progress reader appropriately to the latest offset @@ -475,18 +470,21 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam // Loop over total uploaded parts to save them in // Parts array before completing the multipart request. + allParts := make([]ObjectPart, 0, len(partsInfo)) for i := 1; i < partNumber; i++ { part, ok := partsInfo[i] if !ok { return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i)) } + allParts = append(allParts, part) complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ - ETag: part.ETag, - PartNumber: part.PartNumber, - ChecksumCRC32: part.ChecksumCRC32, - ChecksumCRC32C: part.ChecksumCRC32C, - ChecksumSHA1: part.ChecksumSHA1, - ChecksumSHA256: part.ChecksumSHA256, + ETag: part.ETag, + PartNumber: part.PartNumber, + ChecksumCRC32: part.ChecksumCRC32, + ChecksumCRC32C: part.ChecksumCRC32C, + ChecksumSHA1: part.ChecksumSHA1, + ChecksumSHA256: part.ChecksumSHA256, + ChecksumCRC64NVME: part.ChecksumCRC64NVME, }) } @@ -497,12 +495,8 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam ServerSideEncryption: opts.ServerSideEncryption, AutoChecksum: opts.AutoChecksum, } - if len(crcBytes) > 0 { - // Add hash of hashes. - crc.Reset() - crc.Write(crcBytes) - opts.UserMetadata = map[string]string{opts.AutoChecksum.KeyCapitalized(): base64.StdEncoding.EncodeToString(crc.Sum(nil))} - } + applyAutoChecksum(&opts, allParts) + uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts) if err != nil { return UploadInfo{}, err diff --git a/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go index 790606c509d..5e015fb8279 100644 --- a/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go +++ b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go @@ -18,6 +18,7 @@ package minio import ( + "encoding/base64" "encoding/xml" "errors" "io" @@ -276,10 +277,45 @@ type ObjectPart struct { Size int64 // Checksum values of each part. - ChecksumCRC32 string - ChecksumCRC32C string - ChecksumSHA1 string - ChecksumSHA256 string + ChecksumCRC32 string + ChecksumCRC32C string + ChecksumSHA1 string + ChecksumSHA256 string + ChecksumCRC64NVME string +} + +// Checksum will return the checksum for the given type. +// Will return the empty string if not set. +func (c ObjectPart) Checksum(t ChecksumType) string { + switch { + case t.Is(ChecksumCRC32C): + return c.ChecksumCRC32C + case t.Is(ChecksumCRC32): + return c.ChecksumCRC32 + case t.Is(ChecksumSHA1): + return c.ChecksumSHA1 + case t.Is(ChecksumSHA256): + return c.ChecksumSHA256 + case t.Is(ChecksumCRC64NVME): + return c.ChecksumCRC64NVME + } + return "" +} + +// ChecksumRaw returns the decoded checksum from the part. +func (c ObjectPart) ChecksumRaw(t ChecksumType) ([]byte, error) { + b64 := c.Checksum(t) + if b64 == "" { + return nil, errors.New("no checksum set") + } + decoded, err := base64.StdEncoding.DecodeString(b64) + if err != nil { + return nil, err + } + if len(decoded) != t.RawByteLen() { + return nil, errors.New("checksum length mismatch") + } + return decoded, nil } // ListObjectPartsResult container for ListObjectParts response. @@ -296,6 +332,12 @@ type ListObjectPartsResult struct { NextPartNumberMarker int MaxParts int + // ChecksumAlgorithm will be CRC32, CRC32C, etc. + ChecksumAlgorithm string + + // ChecksumType is FULL_OBJECT or COMPOSITE (assume COMPOSITE when unset) + ChecksumType string + // Indicates whether the returned list of parts is truncated. IsTruncated bool ObjectParts []ObjectPart `xml:"Part"` @@ -320,10 +362,11 @@ type completeMultipartUploadResult struct { ETag string // Checksum values, hash of hashes of parts. - ChecksumCRC32 string - ChecksumCRC32C string - ChecksumSHA1 string - ChecksumSHA256 string + ChecksumCRC32 string + ChecksumCRC32C string + ChecksumSHA1 string + ChecksumSHA256 string + ChecksumCRC64NVME string } // CompletePart sub container lists individual part numbers and their @@ -334,10 +377,11 @@ type CompletePart struct { ETag string // Checksum values - ChecksumCRC32 string `xml:"ChecksumCRC32,omitempty"` - ChecksumCRC32C string `xml:"ChecksumCRC32C,omitempty"` - ChecksumSHA1 string `xml:"ChecksumSHA1,omitempty"` - ChecksumSHA256 string `xml:"ChecksumSHA256,omitempty"` + ChecksumCRC32 string `xml:"ChecksumCRC32,omitempty"` + ChecksumCRC32C string `xml:"ChecksumCRC32C,omitempty"` + ChecksumSHA1 string `xml:"ChecksumSHA1,omitempty"` + ChecksumSHA256 string `xml:"ChecksumSHA256,omitempty"` + ChecksumCRC64NVME string `xml:",omitempty"` } // Checksum will return the checksum for the given type. @@ -352,6 +396,8 @@ func (c CompletePart) Checksum(t ChecksumType) string { return c.ChecksumSHA1 case t.Is(ChecksumSHA256): return c.ChecksumSHA256 + case t.Is(ChecksumCRC64NVME): + return c.ChecksumCRC64NVME } return "" } diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go index 380ec4fdefe..cc0ded2c7f2 100644 --- a/vendor/github.com/minio/minio-go/v7/api.go +++ b/vendor/github.com/minio/minio-go/v7/api.go @@ -133,7 +133,7 @@ type Options struct { // Global constants. const ( libraryName = "minio-go" - libraryVersion = "v7.0.80" + libraryVersion = "v7.0.84" ) // User Agent should always following the below style. @@ -600,9 +600,9 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ return nil, errors.New(c.endpointURL.String() + " is offline.") } - var retryable bool // Indicates if request can be retried. - var bodySeeker io.Seeker // Extracted seeker from io.Reader. - var reqRetry = c.maxRetries // Indicates how many times we can retry the request + var retryable bool // Indicates if request can be retried. + var bodySeeker io.Seeker // Extracted seeker from io.Reader. + reqRetry := c.maxRetries // Indicates how many times we can retry the request if metadata.contentBody != nil { // Check if body is seekable then it is retryable. @@ -808,7 +808,7 @@ func (c *Client) newRequest(ctx context.Context, method string, metadata request } // Get credentials from the configured credentials provider. - value, err := c.credsProvider.Get() + value, err := c.credsProvider.GetWithContext(c.CredContext()) if err != nil { return nil, err } @@ -1018,3 +1018,15 @@ func (c *Client) isVirtualHostStyleRequest(url url.URL, bucketName string) bool // path style requests return s3utils.IsVirtualHostSupported(url, bucketName) } + +// CredContext returns the context for fetching credentials +func (c *Client) CredContext() *credentials.CredContext { + httpClient := c.httpClient + if httpClient == nil { + httpClient = http.DefaultClient + } + return &credentials.CredContext{ + Client: httpClient, + Endpoint: c.endpointURL.String(), + } +} diff --git a/vendor/github.com/minio/minio-go/v7/bucket-cache.go b/vendor/github.com/minio/minio-go/v7/bucket-cache.go index b1d3b3852cf..4e4305acd5b 100644 --- a/vendor/github.com/minio/minio-go/v7/bucket-cache.go +++ b/vendor/github.com/minio/minio-go/v7/bucket-cache.go @@ -212,7 +212,7 @@ func (c *Client) getBucketLocationRequest(ctx context.Context, bucketName string c.setUserAgent(req) // Get credentials from the configured credentials provider. - value, err := c.credsProvider.Get() + value, err := c.credsProvider.GetWithContext(c.CredContext()) if err != nil { return nil, err } diff --git a/vendor/github.com/minio/minio-go/v7/checksum.go b/vendor/github.com/minio/minio-go/v7/checksum.go index 7eb1bf25abf..8e4c27ce42f 100644 --- a/vendor/github.com/minio/minio-go/v7/checksum.go +++ b/vendor/github.com/minio/minio-go/v7/checksum.go @@ -21,11 +21,15 @@ import ( "crypto/sha1" "crypto/sha256" "encoding/base64" + "encoding/binary" + "errors" "hash" "hash/crc32" + "hash/crc64" "io" "math/bits" "net/http" + "sort" ) // ChecksumType contains information about the checksum type. @@ -41,23 +45,41 @@ const ( ChecksumCRC32 // ChecksumCRC32C indicates a CRC32 checksum with Castagnoli table. ChecksumCRC32C + // ChecksumCRC64NVME indicates CRC64 with 0xad93d23594c93659 polynomial. + ChecksumCRC64NVME // Keep after all valid checksums checksumLast + // ChecksumFullObject is a modifier that can be used on CRC32 and CRC32C + // to indicate full object checksums. + ChecksumFullObject + // checksumMask is a mask for valid checksum types. checksumMask = checksumLast - 1 // ChecksumNone indicates no checksum. ChecksumNone ChecksumType = 0 - amzChecksumAlgo = "x-amz-checksum-algorithm" - amzChecksumCRC32 = "x-amz-checksum-crc32" - amzChecksumCRC32C = "x-amz-checksum-crc32c" - amzChecksumSHA1 = "x-amz-checksum-sha1" - amzChecksumSHA256 = "x-amz-checksum-sha256" + // ChecksumFullObjectCRC32 indicates full object CRC32 + ChecksumFullObjectCRC32 = ChecksumCRC32 | ChecksumFullObject + + // ChecksumFullObjectCRC32C indicates full object CRC32C + ChecksumFullObjectCRC32C = ChecksumCRC32C | ChecksumFullObject + + amzChecksumAlgo = "x-amz-checksum-algorithm" + amzChecksumCRC32 = "x-amz-checksum-crc32" + amzChecksumCRC32C = "x-amz-checksum-crc32c" + amzChecksumSHA1 = "x-amz-checksum-sha1" + amzChecksumSHA256 = "x-amz-checksum-sha256" + amzChecksumCRC64NVME = "x-amz-checksum-crc64nvme" ) +// Base returns the base type, without modifiers. +func (c ChecksumType) Base() ChecksumType { + return c & checksumMask +} + // Is returns if c is all of t. func (c ChecksumType) Is(t ChecksumType) bool { return c&t == t @@ -75,10 +97,39 @@ func (c ChecksumType) Key() string { return amzChecksumSHA1 case ChecksumSHA256: return amzChecksumSHA256 + case ChecksumCRC64NVME: + return amzChecksumCRC64NVME } return "" } +// CanComposite will return if the checksum type can be used for composite multipart upload on AWS. +func (c ChecksumType) CanComposite() bool { + switch c & checksumMask { + case ChecksumSHA256, ChecksumSHA1, ChecksumCRC32, ChecksumCRC32C: + return true + } + return false +} + +// CanMergeCRC will return if the checksum type can be used for multipart upload on AWS. +func (c ChecksumType) CanMergeCRC() bool { + switch c & checksumMask { + case ChecksumCRC32, ChecksumCRC32C, ChecksumCRC64NVME: + return true + } + return false +} + +// FullObjectRequested will return if the checksum type indicates full object checksum was requested. +func (c ChecksumType) FullObjectRequested() bool { + switch c & (ChecksumFullObject | checksumMask) { + case ChecksumFullObjectCRC32C, ChecksumFullObjectCRC32, ChecksumCRC64NVME: + return true + } + return false +} + // KeyCapitalized returns the capitalized key as used in HTTP headers. func (c ChecksumType) KeyCapitalized() string { return http.CanonicalHeaderKey(c.Key()) @@ -93,10 +144,17 @@ func (c ChecksumType) RawByteLen() int { return sha1.Size case ChecksumSHA256: return sha256.Size + case ChecksumCRC64NVME: + return crc64.Size } return 0 } +const crc64NVMEPolynomial = 0xad93d23594c93659 + +// crc64 uses reversed polynomials. +var crc64Table = crc64.MakeTable(bits.Reverse64(crc64NVMEPolynomial)) + // Hasher returns a hasher corresponding to the checksum type. // Returns nil if no checksum. func (c ChecksumType) Hasher() hash.Hash { @@ -109,13 +167,15 @@ func (c ChecksumType) Hasher() hash.Hash { return sha1.New() case ChecksumSHA256: return sha256.New() + case ChecksumCRC64NVME: + return crc64.New(crc64Table) } return nil } // IsSet returns whether the type is valid and known. func (c ChecksumType) IsSet() bool { - return bits.OnesCount32(uint32(c)) == 1 + return bits.OnesCount32(uint32(c&checksumMask)) == 1 } // SetDefault will set the checksum if not already set. @@ -125,6 +185,16 @@ func (c *ChecksumType) SetDefault(t ChecksumType) { } } +// EncodeToString the encoded hash value of the content provided in b. +func (c ChecksumType) EncodeToString(b []byte) string { + if !c.IsSet() { + return "" + } + h := c.Hasher() + h.Write(b) + return base64.StdEncoding.EncodeToString(h.Sum(nil)) +} + // String returns the type as a string. // CRC32, CRC32C, SHA1, and SHA256 for valid values. // Empty string for unset and "" if not valid. @@ -140,6 +210,8 @@ func (c ChecksumType) String() string { return "SHA256" case ChecksumNone: return "" + case ChecksumCRC64NVME: + return "CRC64NVME" } return "" } @@ -221,3 +293,129 @@ func (c Checksum) Raw() []byte { } return c.r } + +// CompositeChecksum returns the composite checksum of all provided parts. +func (c ChecksumType) CompositeChecksum(p []ObjectPart) (*Checksum, error) { + if !c.CanComposite() { + return nil, errors.New("cannot do composite checksum") + } + sort.Slice(p, func(i, j int) bool { + return p[i].PartNumber < p[j].PartNumber + }) + c = c.Base() + crcBytes := make([]byte, 0, len(p)*c.RawByteLen()) + for _, part := range p { + pCrc, err := part.ChecksumRaw(c) + if err != nil { + return nil, err + } + crcBytes = append(crcBytes, pCrc...) + } + h := c.Hasher() + h.Write(crcBytes) + return &Checksum{Type: c, r: h.Sum(nil)}, nil +} + +// FullObjectChecksum will return the full object checksum from provided parts. +func (c ChecksumType) FullObjectChecksum(p []ObjectPart) (*Checksum, error) { + if !c.CanMergeCRC() { + return nil, errors.New("cannot merge this checksum type") + } + c = c.Base() + sort.Slice(p, func(i, j int) bool { + return p[i].PartNumber < p[j].PartNumber + }) + + switch len(p) { + case 0: + return nil, errors.New("no parts given") + case 1: + check, err := p[0].ChecksumRaw(c) + if err != nil { + return nil, err + } + return &Checksum{ + Type: c, + r: check, + }, nil + } + var merged uint32 + var merged64 uint64 + first, err := p[0].ChecksumRaw(c) + if err != nil { + return nil, err + } + sz := p[0].Size + switch c { + case ChecksumCRC32, ChecksumCRC32C: + merged = binary.BigEndian.Uint32(first) + case ChecksumCRC64NVME: + merged64 = binary.BigEndian.Uint64(first) + } + + poly32 := uint32(crc32.IEEE) + if c.Is(ChecksumCRC32C) { + poly32 = crc32.Castagnoli + } + for _, part := range p[1:] { + if part.Size == 0 { + continue + } + sz += part.Size + pCrc, err := part.ChecksumRaw(c) + if err != nil { + return nil, err + } + switch c { + case ChecksumCRC32, ChecksumCRC32C: + merged = crc32Combine(poly32, merged, binary.BigEndian.Uint32(pCrc), part.Size) + case ChecksumCRC64NVME: + merged64 = crc64Combine(bits.Reverse64(crc64NVMEPolynomial), merged64, binary.BigEndian.Uint64(pCrc), part.Size) + } + } + var tmp [8]byte + switch c { + case ChecksumCRC32, ChecksumCRC32C: + binary.BigEndian.PutUint32(tmp[:], merged) + return &Checksum{ + Type: c, + r: tmp[:4], + }, nil + case ChecksumCRC64NVME: + binary.BigEndian.PutUint64(tmp[:], merged64) + return &Checksum{ + Type: c, + r: tmp[:8], + }, nil + default: + return nil, errors.New("unknown checksum type") + } +} + +func addAutoChecksumHeaders(opts *PutObjectOptions) { + if opts.UserMetadata == nil { + opts.UserMetadata = make(map[string]string, 1) + } + opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String() + if opts.AutoChecksum.FullObjectRequested() { + opts.UserMetadata["X-Amz-Checksum-Type"] = "FULL_OBJECT" + } +} + +func applyAutoChecksum(opts *PutObjectOptions, allParts []ObjectPart) { + if !opts.AutoChecksum.IsSet() { + return + } + if opts.AutoChecksum.CanComposite() && !opts.AutoChecksum.Is(ChecksumFullObject) { + // Add composite hash of hashes. + crc, err := opts.AutoChecksum.CompositeChecksum(allParts) + if err == nil { + opts.UserMetadata = map[string]string{opts.AutoChecksum.Key(): crc.Encoded()} + } + } else if opts.AutoChecksum.CanMergeCRC() { + crc, err := opts.AutoChecksum.FullObjectChecksum(allParts) + if err == nil { + opts.UserMetadata = map[string]string{opts.AutoChecksum.KeyCapitalized(): crc.Encoded(), "X-Amz-Checksum-Type": "FULL_OBJECT"} + } + } +} diff --git a/vendor/github.com/minio/minio-go/v7/functional_tests.go b/vendor/github.com/minio/minio-go/v7/functional_tests.go index c0180b36b70..33e87e6e162 100644 --- a/vendor/github.com/minio/minio-go/v7/functional_tests.go +++ b/vendor/github.com/minio/minio-go/v7/functional_tests.go @@ -160,7 +160,7 @@ func logError(testName, function string, args map[string]interface{}, startTime } else { logFailure(testName, function, args, startTime, alert, message, err) if !isRunOnFail() { - panic(err) + panic(fmt.Sprintf("Test failed with message: %s, err: %v", message, err)) } } } @@ -393,6 +393,42 @@ func getFuncNameLoc(caller int) string { return strings.TrimPrefix(runtime.FuncForPC(pc).Name(), "main.") } +type ClientConfig struct { + // MinIO client configuration + TraceOn bool // Turn on tracing of HTTP requests and responses to stderr + CredsV2 bool // Use V2 credentials if true, otherwise use v4 + TrailingHeaders bool // Send trailing headers in requests +} + +func NewClient(config ClientConfig) (*minio.Client, error) { + // Instantiate new MinIO client + var creds *credentials.Credentials + if config.CredsV2 { + creds = credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), "") + } else { + creds = credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), "") + } + opts := &minio.Options{ + Creds: creds, + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + TrailingHeaders: config.TrailingHeaders, + } + client, err := minio.New(os.Getenv(serverEndpoint), opts) + if err != nil { + return nil, err + } + + if config.TraceOn { + client.TraceOn(os.Stderr) + } + + // Set user agent. + client.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + return client, nil +} + // Tests bucket re-create errors. func testMakeBucketError() { region := "eu-central-1" @@ -407,27 +443,12 @@ func testMakeBucketError() { "region": region, } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - Transport: createHTTPTransport(), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -462,20 +483,12 @@ func testMetadataSizeLimit() { "objectName": "", "opts.UserMetadata": "", } - rand.Seed(startTime.Unix()) - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - Transport: createHTTPTransport(), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client creation failed", err) return } - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -531,27 +544,12 @@ func testMakeBucketRegions() { "region": region, } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -598,27 +596,12 @@ func testPutObjectReadAt() { "opts": "objectContentType", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -697,27 +680,12 @@ func testListObjectVersions() { "recursive": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -817,27 +785,12 @@ func testStatObjectWithVersioning() { function := "StatObject" args := map[string]interface{}{} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -935,27 +888,12 @@ func testGetObjectWithVersioning() { function := "GetObject()" args := map[string]interface{}{} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -1075,27 +1013,12 @@ func testPutObjectWithVersioning() { function := "GetObject()" args := map[string]interface{}{} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -1223,28 +1146,12 @@ func testListMultipartUpload() { function := "GetObject()" args := map[string]interface{}{} - // Instantiate new minio client object. - opts := &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - } - c, err := minio.New(os.Getenv(serverEndpoint), opts) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - core, err := minio.NewCore(os.Getenv(serverEndpoint), opts) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO core client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + core := minio.Core{Client: c} // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -1347,27 +1254,12 @@ func testCopyObjectWithVersioning() { function := "CopyObject()" args := map[string]interface{}{} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -1485,27 +1377,12 @@ func testConcurrentCopyObjectWithVersioning() { function := "CopyObject()" args := map[string]interface{}{} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -1646,27 +1523,12 @@ func testComposeObjectWithVersioning() { function := "ComposeObject()" args := map[string]interface{}{} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -1787,27 +1649,12 @@ func testRemoveObjectWithVersioning() { function := "DeleteObject()" args := map[string]interface{}{} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -1900,27 +1747,12 @@ func testRemoveObjectsWithVersioning() { function := "DeleteObjects()" args := map[string]interface{}{} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -1996,27 +1828,12 @@ func testObjectTaggingWithVersioning() { function := "{Get,Set,Remove}ObjectTagging()" args := map[string]interface{}{} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -2164,27 +1981,12 @@ func testPutObjectWithChecksums() { return } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -2204,9 +2006,13 @@ func testPutObjectWithChecksums() { {cs: minio.ChecksumCRC32}, {cs: minio.ChecksumSHA1}, {cs: minio.ChecksumSHA256}, + {cs: minio.ChecksumCRC64NVME}, } for _, test := range tests { + if os.Getenv("MINT_NO_FULL_OBJECT") != "" && test.cs.FullObjectRequested() { + continue + } bufSize := dataFileMap["datafile-10-kB"] // Save the data @@ -2230,7 +2036,7 @@ func testPutObjectWithChecksums() { h := test.cs.Hasher() h.Reset() - // Test with Wrong CRC. + // Test with a bad CRC - we haven't called h.Write(b), so this is a checksum of empty data meta[test.cs.Key()] = base64.StdEncoding.EncodeToString(h.Sum(nil)) args["metadata"] = meta args["range"] = "false" @@ -2263,6 +2069,7 @@ func testPutObjectWithChecksums() { cmpChecksum(resp.ChecksumSHA1, meta["x-amz-checksum-sha1"]) cmpChecksum(resp.ChecksumCRC32, meta["x-amz-checksum-crc32"]) cmpChecksum(resp.ChecksumCRC32C, meta["x-amz-checksum-crc32c"]) + cmpChecksum(resp.ChecksumCRC64NVME, meta["x-amz-checksum-crc64nvme"]) // Read the data back gopts := minio.GetObjectOptions{Checksum: true} @@ -2282,6 +2089,7 @@ func testPutObjectWithChecksums() { cmpChecksum(st.ChecksumSHA1, meta["x-amz-checksum-sha1"]) cmpChecksum(st.ChecksumCRC32, meta["x-amz-checksum-crc32"]) cmpChecksum(st.ChecksumCRC32C, meta["x-amz-checksum-crc32c"]) + cmpChecksum(st.ChecksumCRC64NVME, meta["x-amz-checksum-crc64nvme"]) if st.Size != int64(bufSize) { logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(bufSize)+" got "+string(st.Size), err) @@ -2325,12 +2133,12 @@ func testPutObjectWithChecksums() { cmpChecksum(st.ChecksumSHA1, "") cmpChecksum(st.ChecksumCRC32, "") cmpChecksum(st.ChecksumCRC32C, "") + cmpChecksum(st.ChecksumCRC64NVME, "") delete(args, "range") delete(args, "metadata") + logSuccess(testName, function, args, startTime) } - - logSuccess(testName, function, args, startTime) } // Test PutObject with custom checksums. @@ -2350,28 +2158,12 @@ func testPutObjectWithTrailingChecksums() { return } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - TrailingHeaders: true, - }) + c, err := NewClient(ClientConfig{TrailingHeaders: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -2387,13 +2179,16 @@ func testPutObjectWithTrailingChecksums() { tests := []struct { cs minio.ChecksumType }{ + {cs: minio.ChecksumCRC64NVME}, {cs: minio.ChecksumCRC32C}, {cs: minio.ChecksumCRC32}, {cs: minio.ChecksumSHA1}, {cs: minio.ChecksumSHA256}, } - for _, test := range tests { + if os.Getenv("MINT_NO_FULL_OBJECT") != "" && test.cs.FullObjectRequested() { + continue + } function := "PutObject(bucketName, objectName, reader,size, opts)" bufSize := dataFileMap["datafile-10-kB"] @@ -2441,6 +2236,7 @@ func testPutObjectWithTrailingChecksums() { cmpChecksum(resp.ChecksumSHA1, meta["x-amz-checksum-sha1"]) cmpChecksum(resp.ChecksumCRC32, meta["x-amz-checksum-crc32"]) cmpChecksum(resp.ChecksumCRC32C, meta["x-amz-checksum-crc32c"]) + cmpChecksum(resp.ChecksumCRC64NVME, meta["x-amz-checksum-crc64nvme"]) // Read the data back gopts := minio.GetObjectOptions{Checksum: true} @@ -2461,6 +2257,7 @@ func testPutObjectWithTrailingChecksums() { cmpChecksum(st.ChecksumSHA1, meta["x-amz-checksum-sha1"]) cmpChecksum(st.ChecksumCRC32, meta["x-amz-checksum-crc32"]) cmpChecksum(st.ChecksumCRC32C, meta["x-amz-checksum-crc32c"]) + cmpChecksum(resp.ChecksumCRC64NVME, meta["x-amz-checksum-crc64nvme"]) if st.Size != int64(bufSize) { logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(bufSize)+" got "+string(st.Size), err) @@ -2505,6 +2302,7 @@ func testPutObjectWithTrailingChecksums() { cmpChecksum(st.ChecksumSHA1, "") cmpChecksum(st.ChecksumCRC32, "") cmpChecksum(st.ChecksumCRC32C, "") + cmpChecksum(st.ChecksumCRC64NVME, "") function = "GetObjectAttributes(...)" s, err := c.GetObjectAttributes(context.Background(), bucketName, objectName, minio.ObjectAttributesOptions{}) @@ -2519,9 +2317,8 @@ func testPutObjectWithTrailingChecksums() { delete(args, "range") delete(args, "metadata") + logSuccess(testName, function, args, startTime) } - - logSuccess(testName, function, args, startTime) } // Test PutObject with custom checksums. @@ -2533,7 +2330,7 @@ func testPutMultipartObjectWithChecksums(trailing bool) { args := map[string]interface{}{ "bucketName": "", "objectName": "", - "opts": fmt.Sprintf("minio.PutObjectOptions{UserMetadata: metadata, Progress: progress Checksum: %v}", trailing), + "opts": fmt.Sprintf("minio.PutObjectOptions{UserMetadata: metadata, Trailing: %v}", trailing), } if !isFullMode() { @@ -2541,28 +2338,12 @@ func testPutMultipartObjectWithChecksums(trailing bool) { return } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - TrailingHeaders: trailing, - }) + c, err := NewClient(ClientConfig{TrailingHeaders: trailing}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -2574,14 +2355,18 @@ func testPutMultipartObjectWithChecksums(trailing bool) { return } - hashMultiPart := func(b []byte, partSize int, hasher hash.Hash) string { + hashMultiPart := func(b []byte, partSize int, cs minio.ChecksumType) string { r := bytes.NewReader(b) + hasher := cs.Hasher() + if cs.FullObjectRequested() { + partSize = len(b) + } tmp := make([]byte, partSize) parts := 0 var all []byte for { n, err := io.ReadFull(r, tmp) - if err != nil && err != io.ErrUnexpectedEOF { + if err != nil && err != io.ErrUnexpectedEOF && err != io.EOF { logError(testName, function, args, startTime, "", "Calc crc failed", err) } if n == 0 { @@ -2595,6 +2380,9 @@ func testPutMultipartObjectWithChecksums(trailing bool) { break } } + if parts == 1 { + return base64.StdEncoding.EncodeToString(hasher.Sum(nil)) + } hasher.Reset() hasher.Write(all) return fmt.Sprintf("%s-%d", base64.StdEncoding.EncodeToString(hasher.Sum(nil)), parts) @@ -2603,6 +2391,9 @@ func testPutMultipartObjectWithChecksums(trailing bool) { tests := []struct { cs minio.ChecksumType }{ + {cs: minio.ChecksumFullObjectCRC32}, + {cs: minio.ChecksumFullObjectCRC32C}, + {cs: minio.ChecksumCRC64NVME}, {cs: minio.ChecksumCRC32C}, {cs: minio.ChecksumCRC32}, {cs: minio.ChecksumSHA1}, @@ -2610,8 +2401,12 @@ func testPutMultipartObjectWithChecksums(trailing bool) { } for _, test := range tests { - bufSize := dataFileMap["datafile-129-MB"] + if os.Getenv("MINT_NO_FULL_OBJECT") != "" && test.cs.FullObjectRequested() { + continue + } + args["section"] = "prep" + bufSize := dataFileMap["datafile-129-MB"] // Save the data objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") args["objectName"] = objectName @@ -2620,7 +2415,7 @@ func testPutMultipartObjectWithChecksums(trailing bool) { cmpChecksum := func(got, want string) { if want != got { logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %s, got %s", want, got)) - //fmt.Printf("want %s, got %s\n", want, got) + // fmt.Printf("want %s, got %s\n", want, got) return } } @@ -2635,7 +2430,7 @@ func testPutMultipartObjectWithChecksums(trailing bool) { reader.Close() h := test.cs.Hasher() h.Reset() - want := hashMultiPart(b, partSize, test.cs.Hasher()) + want := hashMultiPart(b, partSize, test.cs) var cs minio.ChecksumType rd := io.Reader(io.NopCloser(bytes.NewReader(b))) @@ -2643,7 +2438,9 @@ func testPutMultipartObjectWithChecksums(trailing bool) { cs = test.cs rd = bytes.NewReader(b) } + // Set correct CRC. + args["section"] = "PutObject" resp, err := c.PutObject(context.Background(), bucketName, objectName, rd, int64(bufSize), minio.PutObjectOptions{ DisableContentSha256: true, DisableMultipart: false, @@ -2657,7 +2454,7 @@ func testPutMultipartObjectWithChecksums(trailing bool) { return } - switch test.cs { + switch test.cs.Base() { case minio.ChecksumCRC32C: cmpChecksum(resp.ChecksumCRC32C, want) case minio.ChecksumCRC32: @@ -2666,15 +2463,41 @@ func testPutMultipartObjectWithChecksums(trailing bool) { cmpChecksum(resp.ChecksumSHA1, want) case minio.ChecksumSHA256: cmpChecksum(resp.ChecksumSHA256, want) + case minio.ChecksumCRC64NVME: + cmpChecksum(resp.ChecksumCRC64NVME, want) } + args["section"] = "HeadObject" + st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{Checksum: true}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + switch test.cs.Base() { + case minio.ChecksumCRC32C: + cmpChecksum(st.ChecksumCRC32C, want) + case minio.ChecksumCRC32: + cmpChecksum(st.ChecksumCRC32, want) + case minio.ChecksumSHA1: + cmpChecksum(st.ChecksumSHA1, want) + case minio.ChecksumSHA256: + cmpChecksum(st.ChecksumSHA256, want) + case minio.ChecksumCRC64NVME: + cmpChecksum(st.ChecksumCRC64NVME, want) + } + + args["section"] = "GetObjectAttributes" s, err := c.GetObjectAttributes(context.Background(), bucketName, objectName, minio.ObjectAttributesOptions{}) if err != nil { logError(testName, function, args, startTime, "", "GetObjectAttributes failed", err) return } - want = want[:strings.IndexByte(want, '-')] + + if strings.ContainsRune(want, '-') { + want = want[:strings.IndexByte(want, '-')] + } switch test.cs { + // Full Object CRC does not return anything with GetObjectAttributes case minio.ChecksumCRC32C: cmpChecksum(s.Checksum.ChecksumCRC32C, want) case minio.ChecksumCRC32: @@ -2690,13 +2513,14 @@ func testPutMultipartObjectWithChecksums(trailing bool) { gopts.PartNumber = 2 // We cannot use StatObject, since it ignores partnumber. + args["section"] = "GetObject-Part" r, err := c.GetObject(context.Background(), bucketName, objectName, gopts) if err != nil { logError(testName, function, args, startTime, "", "GetObject failed", err) return } io.Copy(io.Discard, r) - st, err := r.Stat() + st, err = r.Stat() if err != nil { logError(testName, function, args, startTime, "", "Stat failed", err) return @@ -2708,6 +2532,7 @@ func testPutMultipartObjectWithChecksums(trailing bool) { want = base64.StdEncoding.EncodeToString(h.Sum(nil)) switch test.cs { + // Full Object CRC does not return any part CRC for whatever reason. case minio.ChecksumCRC32C: cmpChecksum(st.ChecksumCRC32C, want) case minio.ChecksumCRC32: @@ -2716,12 +2541,17 @@ func testPutMultipartObjectWithChecksums(trailing bool) { cmpChecksum(st.ChecksumSHA1, want) case minio.ChecksumSHA256: cmpChecksum(st.ChecksumSHA256, want) + case minio.ChecksumCRC64NVME: + // AWS doesn't return part checksum, but may in the future. + if st.ChecksumCRC64NVME != "" { + cmpChecksum(st.ChecksumCRC64NVME, want) + } } delete(args, "metadata") + delete(args, "section") + logSuccess(testName, function, args, startTime) } - - logSuccess(testName, function, args, startTime) } // Test PutObject with trailing checksums. @@ -2741,25 +2571,12 @@ func testTrailingChecksums() { return } - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - TrailingHeaders: true, - }) + c, err := NewClient(ClientConfig{TrailingHeaders: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -2881,7 +2698,6 @@ func testTrailingChecksums() { test.ChecksumCRC32C = hashMultiPart(b, int(test.PO.PartSize), test.hasher) // Set correct CRC. - // c.TraceOn(os.Stderr) resp, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), test.PO) if err != nil { logError(testName, function, args, startTime, "", "PutObject failed", err) @@ -2932,6 +2748,7 @@ func testTrailingChecksums() { } delete(args, "metadata") + logSuccess(testName, function, args, startTime) } } @@ -2952,25 +2769,12 @@ func testPutObjectWithAutomaticChecksums() { return } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - TrailingHeaders: true, - }) + c, err := NewClient(ClientConfig{TrailingHeaders: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -2997,8 +2801,6 @@ func testPutObjectWithAutomaticChecksums() { {header: "x-amz-checksum-crc32c", hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli))}, } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) // defer c.TraceOff() for i, test := range tests { @@ -3108,20 +2910,12 @@ func testGetObjectAttributes() { return } - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - TrailingHeaders: true, - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{TrailingHeaders: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName err = c.MakeBucket( @@ -3315,19 +3109,12 @@ func testGetObjectAttributesSSECEncryption() { return } - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - TrailingHeaders: true, - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - Transport: createHTTPTransport(), - }) + c, err := NewClient(ClientConfig{TrailingHeaders: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName err = c.MakeBucket( @@ -3401,19 +3188,12 @@ func testGetObjectAttributesErrorCases() { return } - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - TrailingHeaders: true, - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{TrailingHeaders: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) unknownBucket := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-bucket-") unknownObject := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-object-") @@ -3657,27 +3437,12 @@ func testPutObjectWithMetadata() { return } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -3764,27 +3529,12 @@ func testPutObjectWithContentLanguage() { "opts": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -3834,27 +3584,12 @@ func testPutObjectStreaming() { "opts": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -3906,27 +3641,12 @@ func testGetObjectSeekEnd() { function := "GetObject(bucketName, objectName)" args := map[string]interface{}{} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -4029,27 +3749,12 @@ func testGetObjectClosedTwice() { function := "GetObject(bucketName, objectName)" args := map[string]interface{}{} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -4120,26 +3825,13 @@ func testRemoveObjectsContext() { "bucketName": "", } - // Seed random based on current tie. - rand.Seed(time.Now().Unix()) - // Instantiate new minio client. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Enable tracing, write to stdout. - // c.TraceOn(os.Stderr) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -4217,27 +3909,12 @@ func testRemoveMultipleObjects() { "bucketName": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - - // Enable tracing, write to stdout. - // c.TraceOn(os.Stderr) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -4301,27 +3978,12 @@ func testRemoveMultipleObjectsWithResult() { "bucketName": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - - // Enable tracing, write to stdout. - // c.TraceOn(os.Stderr) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -4437,27 +4099,12 @@ func testFPutObjectMultipart() { "opts": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -4543,27 +4190,12 @@ func testFPutObject() { "opts": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") location := "us-east-1" @@ -4713,27 +4345,13 @@ func testFPutObjectContext() { "fileName": "", "opts": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -4814,27 +4432,13 @@ func testFPutObjectContextV2() { "objectName": "", "opts": "minio.PutObjectOptions{ContentType:objectContentType}", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{CredsV2: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -4919,24 +4523,12 @@ func testPutObjectContext() { "opts": "", } - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Make a new bucket. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -4989,27 +4581,12 @@ func testGetObjectS3Zip() { function := "GetObject(bucketName, objectName)" args := map[string]interface{}{"x-minio-extract": true} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -5173,27 +4750,12 @@ func testGetObjectReadSeekFunctional() { function := "GetObject(bucketName, objectName)" args := map[string]interface{}{} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -5343,27 +4905,12 @@ func testGetObjectReadAtFunctional() { function := "GetObject(bucketName, objectName)" args := map[string]interface{}{} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -5521,27 +5068,12 @@ func testGetObjectReadAtWhenEOFWasReached() { function := "GetObject(bucketName, objectName)" args := map[string]interface{}{} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -5641,27 +5173,12 @@ func testPresignedPostPolicy() { "policy": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -5689,50 +5206,22 @@ func testPresignedPostPolicy() { return } - // Save the data - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - policy := minio.NewPostPolicy() - - if err := policy.SetBucket(""); err == nil { - logError(testName, function, args, startTime, "", "SetBucket did not fail for invalid conditions", err) - return - } - if err := policy.SetKey(""); err == nil { - logError(testName, function, args, startTime, "", "SetKey did not fail for invalid conditions", err) - return - } - if err := policy.SetExpires(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)); err == nil { - logError(testName, function, args, startTime, "", "SetExpires did not fail for invalid conditions", err) - return - } - if err := policy.SetContentType(""); err == nil { - logError(testName, function, args, startTime, "", "SetContentType did not fail for invalid conditions", err) - return - } - if err := policy.SetContentLengthRange(1024*1024, 1024); err == nil { - logError(testName, function, args, startTime, "", "SetContentLengthRange did not fail for invalid conditions", err) - return - } - if err := policy.SetUserMetadata("", ""); err == nil { - logError(testName, function, args, startTime, "", "SetUserMetadata did not fail for invalid conditions", err) - return - } - policy.SetBucket(bucketName) policy.SetKey(objectName) policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days policy.SetContentType("binary/octet-stream") policy.SetContentLengthRange(10, 1024*1024) policy.SetUserMetadata(metadataKey, metadataValue) + policy.SetContentEncoding("gzip") // Add CRC32C checksum := minio.ChecksumCRC32C.ChecksumBytes(buf) - policy.SetChecksum(checksum) + err = policy.SetChecksum(checksum) + if err != nil { + logError(testName, function, args, startTime, "", "SetChecksum failed", err) + return + } args["policy"] = policy.String() @@ -5828,7 +5317,7 @@ func testPresignedPostPolicy() { expectedLocation := scheme + os.Getenv(serverEndpoint) + "/" + bucketName + "/" + objectName expectedLocationBucketDNS := scheme + bucketName + "." + os.Getenv(serverEndpoint) + "/" + objectName - if !strings.Contains(expectedLocation, "s3.amazonaws.com/") { + if !strings.Contains(expectedLocation, ".amazonaws.com/") { // Test when not against AWS S3. if val, ok := res.Header["Location"]; ok { if val[0] != expectedLocation && val[0] != expectedLocationBucketDNS { @@ -5840,9 +5329,184 @@ func testPresignedPostPolicy() { return } } - want := checksum.Encoded() - if got := res.Header.Get("X-Amz-Checksum-Crc32c"); got != want { - logError(testName, function, args, startTime, "", fmt.Sprintf("Want checksum %q, got %q", want, got), nil) + wantChecksumCrc32c := checksum.Encoded() + if got := res.Header.Get("X-Amz-Checksum-Crc32c"); got != wantChecksumCrc32c { + logError(testName, function, args, startTime, "", fmt.Sprintf("Want checksum %q, got %q", wantChecksumCrc32c, got), nil) + return + } + + // Ensure that when we subsequently GetObject, the checksum is returned + gopts := minio.GetObjectOptions{Checksum: true} + r, err := c.GetObject(context.Background(), bucketName, objectName, gopts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + if st.ChecksumCRC32C != wantChecksumCrc32c { + logError(testName, function, args, startTime, "", fmt.Sprintf("Want checksum %s, got %s", wantChecksumCrc32c, st.ChecksumCRC32C), nil) + return + } + + logSuccess(testName, function, args, startTime) +} + +// testPresignedPostPolicyWrongFile tests that when we have a policy with a checksum, we cannot POST the wrong file +func testPresignedPostPolicyWrongFile() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PresignedPostPolicy(policy)" + args := map[string]interface{}{ + "policy": "", + } + + c, err := NewClient(ClientConfig{}) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + + // Make a new bucket in 'us-east-1' (source bucket). + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + // Azure requires the key to not start with a number + metadataKey := randString(60, rand.NewSource(time.Now().UnixNano()), "user") + metadataValue := randString(60, rand.NewSource(time.Now().UnixNano()), "") + + policy := minio.NewPostPolicy() + policy.SetBucket(bucketName) + policy.SetKey(objectName) + policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days + policy.SetContentType("binary/octet-stream") + policy.SetContentLengthRange(10, 1024*1024) + policy.SetUserMetadata(metadataKey, metadataValue) + + // Add CRC32C of some data that the policy will explicitly allow. + checksum := minio.ChecksumCRC32C.ChecksumBytes([]byte{0x01, 0x02, 0x03}) + err = policy.SetChecksum(checksum) + if err != nil { + logError(testName, function, args, startTime, "", "SetChecksum failed", err) + return + } + + args["policy"] = policy.String() + + presignedPostPolicyURL, formData, err := c.PresignedPostPolicy(context.Background(), policy) + if err != nil { + logError(testName, function, args, startTime, "", "PresignedPostPolicy failed", err) + return + } + + // At this stage, we have a policy that allows us to upload for a specific checksum. + // Test that uploading datafile-10-kB, with a different checksum, fails as expected + filePath := getMintDataDirFilePath("datafile-10-kB") + if filePath == "" { + // Make a temp file with 10 KB data. + file, err := os.CreateTemp(os.TempDir(), "PresignedPostPolicyTest") + if err != nil { + logError(testName, function, args, startTime, "", "TempFile creation failed", err) + return + } + if _, err = io.Copy(file, getDataReader("datafile-10-kB")); err != nil { + logError(testName, function, args, startTime, "", "Copy failed", err) + return + } + if err = file.Close(); err != nil { + logError(testName, function, args, startTime, "", "File Close failed", err) + return + } + filePath = file.Name() + } + fileReader := getDataReader("datafile-10-kB") + defer fileReader.Close() + buf10k, err := io.ReadAll(fileReader) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + otherChecksum := minio.ChecksumCRC32C.ChecksumBytes(buf10k) + + var formBuf bytes.Buffer + writer := multipart.NewWriter(&formBuf) + for k, v := range formData { + if k == "x-amz-checksum-crc32c" { + v = otherChecksum.Encoded() + } + writer.WriteField(k, v) + } + + // Add file to post request + f, err := os.Open(filePath) + defer f.Close() + if err != nil { + logError(testName, function, args, startTime, "", "File open failed", err) + return + } + w, err := writer.CreateFormFile("file", filePath) + if err != nil { + logError(testName, function, args, startTime, "", "CreateFormFile failed", err) + return + } + _, err = io.Copy(w, f) + if err != nil { + logError(testName, function, args, startTime, "", "Copy failed", err) + return + } + writer.Close() + + httpClient := &http.Client{ + Timeout: 30 * time.Second, + Transport: createHTTPTransport(), + } + args["url"] = presignedPostPolicyURL.String() + + req, err := http.NewRequest(http.MethodPost, presignedPostPolicyURL.String(), bytes.NewReader(formBuf.Bytes())) + if err != nil { + logError(testName, function, args, startTime, "", "HTTP request failed", err) + return + } + + req.Header.Set("Content-Type", writer.FormDataContentType()) + + // Make the POST request with the form data. + res, err := httpClient.Do(req) + if err != nil { + logError(testName, function, args, startTime, "", "HTTP request failed", err) + return + } + defer res.Body.Close() + if res.StatusCode != http.StatusForbidden { + logError(testName, function, args, startTime, "", "HTTP request unexpected status", errors.New(res.Status)) + return + } + + // Read the response body, ensure it has checksum failure message + resBody, err := io.ReadAll(res.Body) + if err != nil { + logError(testName, function, args, startTime, "", "ReadAll failed", err) + return + } + + // Normalize the response body, because S3 uses quotes around the policy condition components + // in the error message, MinIO does not. + resBodyStr := strings.ReplaceAll(string(resBody), `"`, "") + if !strings.Contains(resBodyStr, "Policy Condition failed: [eq, $x-amz-checksum-crc32c, 8TDyHg=") { + logError(testName, function, args, startTime, "", "Unexpected response body", errors.New(resBodyStr)) return } @@ -5857,27 +5521,12 @@ func testCopyObject() { function := "CopyObject(dst, src)" args := map[string]interface{}{} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -6052,27 +5701,12 @@ func testSSECEncryptedGetObjectReadSeekFunctional() { function := "GetObject(bucketName, objectName)" args := map[string]interface{}{} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -6235,27 +5869,12 @@ func testSSES3EncryptedGetObjectReadSeekFunctional() { function := "GetObject(bucketName, objectName)" args := map[string]interface{}{} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -6416,27 +6035,12 @@ func testSSECEncryptedGetObjectReadAtFunctional() { function := "GetObject(bucketName, objectName)" args := map[string]interface{}{} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -6600,27 +6204,12 @@ func testSSES3EncryptedGetObjectReadAtFunctional() { function := "GetObject(bucketName, objectName)" args := map[string]interface{}{} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -6785,27 +6374,13 @@ func testSSECEncryptionPutGet() { "objectName": "", "sse": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -6895,27 +6470,13 @@ func testSSECEncryptionFPut() { "contentType": "", "sse": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -7018,27 +6579,13 @@ func testSSES3EncryptionPutGet() { "objectName": "", "sse": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -7126,27 +6673,13 @@ func testSSES3EncryptionFPut() { "contentType": "", "sse": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -7255,26 +6788,12 @@ func testBucketNotification() { return } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable to debug - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - bucketName := os.Getenv("NOTIFY_BUCKET") args["bucketName"] = bucketName @@ -7350,26 +6869,12 @@ func testFunctional() { functionAll := "" args := map[string]interface{}{} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, nil, startTime, "", "MinIO client object creation failed", err) return } - // Enable to debug - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -8029,24 +7534,12 @@ func testGetObjectModified() { function := "GetObject(bucketName, objectName)" args := map[string]interface{}{} - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Make a new bucket. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -8125,24 +7618,12 @@ func testPutObjectUploadSeekedObject() { "contentType": "binary/octet-stream", } - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Make a new bucket. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -8245,27 +7726,12 @@ func testMakeBucketErrorV2() { "region": "eu-west-1", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{CredsV2: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") region := "eu-west-1" @@ -8305,27 +7771,12 @@ func testGetObjectClosedTwiceV2() { "region": "eu-west-1", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{CredsV2: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -8396,27 +7847,12 @@ func testFPutObjectV2() { "opts": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{CredsV2: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -8557,27 +7993,12 @@ func testMakeBucketRegionsV2() { "region": "eu-west-1", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{CredsV2: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -8620,27 +8041,12 @@ func testGetObjectReadSeekFunctionalV2() { function := "GetObject(bucketName, objectName)" args := map[string]interface{}{} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{CredsV2: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -8775,27 +8181,12 @@ func testGetObjectReadAtFunctionalV2() { function := "GetObject(bucketName, objectName)" args := map[string]interface{}{} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{CredsV2: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -8937,27 +8328,12 @@ func testCopyObjectV2() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{CredsV2: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -9156,13 +8532,7 @@ func testComposeObjectErrorCasesV2() { function := "ComposeObject(destination, sourceList)" args := map[string]interface{}{} - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{CredsV2: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -9254,13 +8624,7 @@ func testCompose10KSourcesV2() { function := "ComposeObject(destination, sourceList)" args := map[string]interface{}{} - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{CredsV2: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -9276,13 +8640,7 @@ func testEncryptedEmptyObject() { function := "PutObject(bucketName, objectName, reader, objectSize, opts)" args := map[string]interface{}{} - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return @@ -9430,7 +8788,7 @@ func testEncryptedCopyObjectWrapper(c *minio.Client, bucketName string, sseSrc, dstEncryption = sseDst } // 3. get copied object and check if content is equal - coreClient := minio.Core{c} + coreClient := minio.Core{Client: c} reader, _, _, err := coreClient.GetObject(context.Background(), bucketName, "dstObject", minio.GetObjectOptions{ServerSideEncryption: dstEncryption}) if err != nil { logError(testName, function, args, startTime, "", "GetObject failed", err) @@ -9537,13 +8895,7 @@ func testUnencryptedToSSECCopyObject() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -9552,7 +8904,6 @@ func testUnencryptedToSSECCopyObject() { bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) - // c.TraceOn(os.Stderr) testEncryptedCopyObjectWrapper(c, bucketName, nil, sseDst) } @@ -9564,13 +8915,7 @@ func testUnencryptedToSSES3CopyObject() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -9580,7 +8925,6 @@ func testUnencryptedToSSES3CopyObject() { var sseSrc encrypt.ServerSide sseDst := encrypt.NewSSE() - // c.TraceOn(os.Stderr) testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) } @@ -9592,13 +8936,7 @@ func testUnencryptedToUnencryptedCopyObject() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -9607,7 +8945,6 @@ func testUnencryptedToUnencryptedCopyObject() { bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") var sseSrc, sseDst encrypt.ServerSide - // c.TraceOn(os.Stderr) testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) } @@ -9619,13 +8956,7 @@ func testEncryptedSSECToSSECCopyObject() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -9635,7 +8966,6 @@ func testEncryptedSSECToSSECCopyObject() { sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) - // c.TraceOn(os.Stderr) testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) } @@ -9647,13 +8977,7 @@ func testEncryptedSSECToSSES3CopyObject() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -9663,7 +8987,6 @@ func testEncryptedSSECToSSES3CopyObject() { sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) sseDst := encrypt.NewSSE() - // c.TraceOn(os.Stderr) testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) } @@ -9675,13 +8998,7 @@ func testEncryptedSSECToUnencryptedCopyObject() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -9691,7 +9008,6 @@ func testEncryptedSSECToUnencryptedCopyObject() { sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) var sseDst encrypt.ServerSide - // c.TraceOn(os.Stderr) testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) } @@ -9703,13 +9019,7 @@ func testEncryptedSSES3ToSSECCopyObject() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -9719,7 +9029,6 @@ func testEncryptedSSES3ToSSECCopyObject() { sseSrc := encrypt.NewSSE() sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) - // c.TraceOn(os.Stderr) testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) } @@ -9731,13 +9040,7 @@ func testEncryptedSSES3ToSSES3CopyObject() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -9747,7 +9050,6 @@ func testEncryptedSSES3ToSSES3CopyObject() { sseSrc := encrypt.NewSSE() sseDst := encrypt.NewSSE() - // c.TraceOn(os.Stderr) testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) } @@ -9759,13 +9061,7 @@ func testEncryptedSSES3ToUnencryptedCopyObject() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -9775,7 +9071,6 @@ func testEncryptedSSES3ToUnencryptedCopyObject() { sseSrc := encrypt.NewSSE() var sseDst encrypt.ServerSide - // c.TraceOn(os.Stderr) testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) } @@ -9787,13 +9082,7 @@ func testEncryptedCopyObjectV2() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{CredsV2: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -9803,7 +9092,6 @@ func testEncryptedCopyObjectV2() { sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) - // c.TraceOn(os.Stderr) testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) } @@ -9814,13 +9102,7 @@ func testDecryptedCopyObject() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -9874,26 +9156,14 @@ func testSSECMultipartEncryptedToSSECCopyObjectPart() { function := "CopyObjectPart(destination, source)" args := map[string]interface{}{} - // Instantiate new minio client object - client, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + client, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return } // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + c := minio.Core{Client: client} // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") @@ -10072,26 +9342,14 @@ func testSSECEncryptedToSSECCopyObjectPart() { function := "CopyObjectPart(destination, source)" args := map[string]interface{}{} - // Instantiate new minio client object - client, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + client, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return } // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + c := minio.Core{Client: client} // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") @@ -10250,26 +9508,14 @@ func testSSECEncryptedToUnencryptedCopyPart() { function := "CopyObjectPart(destination, source)" args := map[string]interface{}{} - // Instantiate new minio client object - client, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + client, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return } // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + c := minio.Core{Client: client} // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") @@ -10427,26 +9673,14 @@ func testSSECEncryptedToSSES3CopyObjectPart() { function := "CopyObjectPart(destination, source)" args := map[string]interface{}{} - // Instantiate new minio client object - client, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + client, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return } // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + c := minio.Core{Client: client} // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") @@ -10607,26 +9841,14 @@ func testUnencryptedToSSECCopyObjectPart() { function := "CopyObjectPart(destination, source)" args := map[string]interface{}{} - // Instantiate new minio client object - client, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + client, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return } // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + c := minio.Core{Client: client} // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") @@ -10782,26 +10004,14 @@ func testUnencryptedToUnencryptedCopyPart() { function := "CopyObjectPart(destination, source)" args := map[string]interface{}{} - // Instantiate new minio client object - client, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + client, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return } // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + c := minio.Core{Client: client} // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") @@ -10953,26 +10163,14 @@ func testUnencryptedToSSES3CopyObjectPart() { function := "CopyObjectPart(destination, source)" args := map[string]interface{}{} - // Instantiate new minio client object - client, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + client, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return } // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + c := minio.Core{Client: client} // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") @@ -11126,26 +10324,14 @@ func testSSES3EncryptedToSSECCopyObjectPart() { function := "CopyObjectPart(destination, source)" args := map[string]interface{}{} - // Instantiate new minio client object - client, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + client, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return } // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + c := minio.Core{Client: client} // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") @@ -11302,26 +10488,14 @@ func testSSES3EncryptedToUnencryptedCopyPart() { function := "CopyObjectPart(destination, source)" args := map[string]interface{}{} - // Instantiate new minio client object - client, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + client, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return } // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + c := minio.Core{Client: client} // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") @@ -11474,26 +10648,14 @@ func testSSES3EncryptedToSSES3CopyObjectPart() { function := "CopyObjectPart(destination, source)" args := map[string]interface{}{} - // Instantiate new minio client object - client, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + client, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return } // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + c := minio.Core{Client: client} // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") @@ -11648,19 +10810,12 @@ func testUserMetadataCopying() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // c.TraceOn(os.Stderr) testUserMetadataCopyingWrapper(c) } @@ -11825,19 +10980,12 @@ func testUserMetadataCopyingV2() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{CredsV2: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) return } - // c.TraceOn(os.Stderr) testUserMetadataCopyingWrapper(c) } @@ -11848,13 +10996,7 @@ func testStorageClassMetadataPutObject() { args := map[string]interface{}{} testName := getFuncName() - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return @@ -11936,13 +11078,7 @@ func testStorageClassInvalidMetadataPutObject() { args := map[string]interface{}{} testName := getFuncName() - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return @@ -11979,13 +11115,7 @@ func testStorageClassMetadataCopyObject() { args := map[string]interface{}{} testName := getFuncName() - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - Transport: createHTTPTransport(), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return @@ -12106,27 +11236,12 @@ func testPutObjectNoLengthV2() { "opts": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{CredsV2: true}) if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -12182,27 +11297,12 @@ func testPutObjectsUnknownV2() { "opts": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{CredsV2: true}) if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -12273,27 +11373,12 @@ func testPutObject0ByteV2() { "opts": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{CredsV2: true}) if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -12338,13 +11423,7 @@ func testComposeObjectErrorCases() { function := "ComposeObject(destination, sourceList)" args := map[string]interface{}{} - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return @@ -12361,13 +11440,7 @@ func testCompose10KSources() { function := "ComposeObject(destination, sourceList)" args := map[string]interface{}{} - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return @@ -12385,26 +11458,12 @@ func testFunctionalV2() { functionAll := "" args := map[string]interface{}{} - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - Transport: createHTTPTransport(), - }) + c, err := NewClient(ClientConfig{CredsV2: true}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) return } - // Enable to debug - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") location := "us-east-1" @@ -12838,27 +11897,13 @@ func testGetObjectContext() { "bucketName": "", "objectName": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -12941,27 +11986,13 @@ func testFGetObjectContext() { "objectName": "", "fileName": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -13033,24 +12064,12 @@ func testGetObjectRanges() { defer cancel() rng := rand.NewSource(time.Now().UnixNano()) - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rng, "minio-go-test-") args["bucketName"] = bucketName @@ -13140,27 +12159,13 @@ func testGetObjectACLContext() { "bucketName": "", "objectName": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -13318,24 +12323,12 @@ func testPutObjectContextV2() { "size": "", "opts": "", } - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{CredsV2: true}) if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Make a new bucket. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -13390,27 +12383,13 @@ func testGetObjectContextV2() { "bucketName": "", "objectName": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{CredsV2: true}) if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -13491,27 +12470,13 @@ func testFGetObjectContextV2() { "objectName": "", "fileName": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{CredsV2: true}) if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -13580,27 +12545,13 @@ func testListObjects() { "objectPrefix": "", "recursive": "true", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -13684,24 +12635,12 @@ func testCors() { "cors": "", } - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Create or reuse a bucket that will get cors settings applied to it and deleted when done bucketName := os.Getenv("MINIO_GO_TEST_BUCKET_CORS") if bucketName == "" { @@ -14420,24 +13359,12 @@ func testCorsSetGetDelete() { "cors": "", } - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -14519,27 +13446,13 @@ func testRemoveObjects() { "objectPrefix": "", "recursive": "true", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -14653,27 +13566,13 @@ func testGetBucketTagging() { args := map[string]interface{}{ "bucketName": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -14709,27 +13608,13 @@ func testSetBucketTagging() { "bucketName": "", "tags": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -14795,27 +13680,13 @@ func testRemoveBucketTagging() { args := map[string]interface{}{ "bucketName": "", } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) + c, err := NewClient(ClientConfig{}) if err != nil { logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) return } - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -14961,6 +13832,7 @@ func main() { testGetObjectReadAtFunctional() testGetObjectReadAtWhenEOFWasReached() testPresignedPostPolicy() + testPresignedPostPolicyWrongFile() testCopyObject() testComposeObjectErrorCases() testCompose10KSources() diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go index d245bc07a3a..cd0a641bde1 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go @@ -76,7 +76,8 @@ type AssumeRoleResult struct { type STSAssumeRole struct { Expiry - // Required http Client to use when connecting to MinIO STS service. + // Optional http Client to use when connecting to MinIO STS service + // (overrides default client in CredContext) Client *http.Client // STS endpoint to fetch STS credentials. @@ -108,16 +109,10 @@ type STSAssumeRoleOptions struct { // NewSTSAssumeRole returns a pointer to a new // Credentials object wrapping the STSAssumeRole. func NewSTSAssumeRole(stsEndpoint string, opts STSAssumeRoleOptions) (*Credentials, error) { - if stsEndpoint == "" { - return nil, errors.New("STS endpoint cannot be empty") - } if opts.AccessKey == "" || opts.SecretKey == "" { return nil, errors.New("AssumeRole credentials access/secretkey is mandatory") } return New(&STSAssumeRole{ - Client: &http.Client{ - Transport: http.DefaultTransport, - }, STSEndpoint: stsEndpoint, Options: opts, }), nil @@ -222,10 +217,30 @@ func getAssumeRoleCredentials(clnt *http.Client, endpoint string, opts STSAssume return a, nil } -// Retrieve retrieves credentials from the MinIO service. -// Error will be returned if the request fails. -func (m *STSAssumeRole) Retrieve() (Value, error) { - a, err := getAssumeRoleCredentials(m.Client, m.STSEndpoint, m.Options) +// RetrieveWithCredContext retrieves credentials from the MinIO service. +// Error will be returned if the request fails, optional cred context. +func (m *STSAssumeRole) RetrieveWithCredContext(cc *CredContext) (Value, error) { + if cc == nil { + cc = defaultCredContext + } + + client := m.Client + if client == nil { + client = cc.Client + } + if client == nil { + client = defaultCredContext.Client + } + + stsEndpoint := m.STSEndpoint + if stsEndpoint == "" { + stsEndpoint = cc.Endpoint + } + if stsEndpoint == "" { + return Value{}, errors.New("STS endpoint unknown") + } + + a, err := getAssumeRoleCredentials(client, stsEndpoint, m.Options) if err != nil { return Value{}, err } @@ -241,3 +256,9 @@ func (m *STSAssumeRole) Retrieve() (Value, error) { SignerType: SignatureV4, }, nil } + +// Retrieve retrieves credentials from the MinIO service. +// Error will be returned if the request fails. +func (m *STSAssumeRole) Retrieve() (Value, error) { + return m.RetrieveWithCredContext(nil) +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go index ddccfb173fe..5ef3597d104 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go @@ -55,6 +55,24 @@ func NewChainCredentials(providers []Provider) *Credentials { }) } +// RetrieveWithCredContext is like Retrieve with CredContext +func (c *Chain) RetrieveWithCredContext(cc *CredContext) (Value, error) { + for _, p := range c.Providers { + creds, _ := p.RetrieveWithCredContext(cc) + // Always prioritize non-anonymous providers, if any. + if creds.AccessKeyID == "" && creds.SecretAccessKey == "" { + continue + } + c.curr = p + return creds, nil + } + // At this point we have exhausted all the providers and + // are left without any credentials return anonymous. + return Value{ + SignerType: SignatureAnonymous, + }, nil +} + // Retrieve returns the credentials value, returns no credentials(anonymous) // if no credentials provider returned any value. // diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go index 68f9b38157e..52aff9a57f6 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go @@ -18,6 +18,7 @@ package credentials import ( + "net/http" "sync" "time" ) @@ -30,6 +31,10 @@ const ( defaultExpiryWindow = 0.8 ) +// defaultCredContext is used when the credential context doesn't +// actually matter or the default context is suitable. +var defaultCredContext = &CredContext{Client: http.DefaultClient} + // A Value is the S3 credentials value for individual credential fields. type Value struct { // S3 Access key ID @@ -52,8 +57,17 @@ type Value struct { // Value. A provider is required to manage its own Expired state, and what to // be expired means. type Provider interface { + // RetrieveWithCredContext returns nil if it successfully retrieved the + // value. Error is returned if the value were not obtainable, or empty. + // optionally takes CredContext for additional context to retrieve credentials. + RetrieveWithCredContext(cc *CredContext) (Value, error) + // Retrieve returns nil if it successfully retrieved the value. // Error is returned if the value were not obtainable, or empty. + // + // Deprecated: Retrieve() exists for historical compatibility and should not + // be used. To get new credentials use the RetrieveWithCredContext function + // to ensure the proper context (i.e. HTTP client) will be used. Retrieve() (Value, error) // IsExpired returns if the credentials are no longer valid, and need @@ -61,6 +75,18 @@ type Provider interface { IsExpired() bool } +// CredContext is passed to the Retrieve function of a provider to provide +// some additional context to retrieve credentials. +type CredContext struct { + // Client specifies the HTTP client that should be used if an HTTP + // request is to be made to fetch the credentials. + Client *http.Client + + // Endpoint specifies the MinIO endpoint that will be used if no + // explicit endpoint is provided. + Endpoint string +} + // A Expiry provides shared expiration logic to be used by credentials // providers to implement expiry functionality. // @@ -146,16 +172,36 @@ func New(provider Provider) *Credentials { // // If Credentials.Expire() was called the credentials Value will be force // expired, and the next call to Get() will cause them to be refreshed. +// +// Deprecated: Get() exists for historical compatibility and should not be +// used. To get new credentials use the Credentials.GetWithContext function +// to ensure the proper context (i.e. HTTP client) will be used. func (c *Credentials) Get() (Value, error) { + return c.GetWithContext(nil) +} + +// GetWithContext returns the credentials value, or error if the +// credentials Value failed to be retrieved. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +func (c *Credentials) GetWithContext(cc *CredContext) (Value, error) { if c == nil { return Value{}, nil } + if cc == nil { + cc = defaultCredContext + } c.Lock() defer c.Unlock() if c.isExpired() { - creds, err := c.provider.Retrieve() + creds, err := c.provider.RetrieveWithCredContext(cc) if err != nil { return Value{}, err } diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go index b6e60d0e165..21ab0a38a4d 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go @@ -37,8 +37,7 @@ func NewEnvAWS() *Credentials { return New(&EnvAWS{}) } -// Retrieve retrieves the keys from the environment. -func (e *EnvAWS) Retrieve() (Value, error) { +func (e *EnvAWS) retrieve() (Value, error) { e.retrieved = false id := os.Getenv("AWS_ACCESS_KEY_ID") @@ -65,6 +64,16 @@ func (e *EnvAWS) Retrieve() (Value, error) { }, nil } +// Retrieve retrieves the keys from the environment. +func (e *EnvAWS) Retrieve() (Value, error) { + return e.retrieve() +} + +// RetrieveWithCredContext is like Retrieve (no-op input of Cred Context) +func (e *EnvAWS) RetrieveWithCredContext(_ *CredContext) (Value, error) { + return e.retrieve() +} + // IsExpired returns if the credentials have been retrieved. func (e *EnvAWS) IsExpired() bool { return !e.retrieved diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go index 5bfeab140ae..dbfbdfcef1d 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go @@ -38,8 +38,7 @@ func NewEnvMinio() *Credentials { return New(&EnvMinio{}) } -// Retrieve retrieves the keys from the environment. -func (e *EnvMinio) Retrieve() (Value, error) { +func (e *EnvMinio) retrieve() (Value, error) { e.retrieved = false id := os.Getenv("MINIO_ROOT_USER") @@ -62,6 +61,16 @@ func (e *EnvMinio) Retrieve() (Value, error) { }, nil } +// Retrieve retrieves the keys from the environment. +func (e *EnvMinio) Retrieve() (Value, error) { + return e.retrieve() +} + +// RetrieveWithCredContext is like Retrieve() (no-op input cred context) +func (e *EnvMinio) RetrieveWithCredContext(_ *CredContext) (Value, error) { + return e.retrieve() +} + // IsExpired returns if the credentials have been retrieved. func (e *EnvMinio) IsExpired() bool { return !e.retrieved diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go index 541e1a72f0f..0c83fc7fa4c 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go @@ -71,9 +71,7 @@ func NewFileAWSCredentials(filename, profile string) *Credentials { }) } -// Retrieve reads and extracts the shared credentials from the current -// users home directory. -func (p *FileAWSCredentials) Retrieve() (Value, error) { +func (p *FileAWSCredentials) retrieve() (Value, error) { if p.Filename == "" { p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE") if p.Filename == "" { @@ -142,6 +140,17 @@ func (p *FileAWSCredentials) Retrieve() (Value, error) { }, nil } +// Retrieve reads and extracts the shared credentials from the current +// users home directory. +func (p *FileAWSCredentials) Retrieve() (Value, error) { + return p.retrieve() +} + +// RetrieveWithCredContext is like Retrieve(), cred context is no-op for File credentials +func (p *FileAWSCredentials) RetrieveWithCredContext(_ *CredContext) (Value, error) { + return p.retrieve() +} + // loadProfiles loads from the file pointed to by shared credentials filename for profile. // The credentials retrieved from the profile will be returned or error. Error will be // returned if it fails to read from the file, or the data is invalid. diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go index 750e26ffa8b..5805281fe9e 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go @@ -56,9 +56,7 @@ func NewFileMinioClient(filename, alias string) *Credentials { }) } -// Retrieve reads and extracts the shared credentials from the current -// users home directory. -func (p *FileMinioClient) Retrieve() (Value, error) { +func (p *FileMinioClient) retrieve() (Value, error) { if p.Filename == "" { if value, ok := os.LookupEnv("MINIO_SHARED_CREDENTIALS_FILE"); ok { p.Filename = value @@ -96,6 +94,17 @@ func (p *FileMinioClient) Retrieve() (Value, error) { }, nil } +// Retrieve reads and extracts the shared credentials from the current +// users home directory. +func (p *FileMinioClient) Retrieve() (Value, error) { + return p.retrieve() +} + +// RetrieveWithCredContext - is like Retrieve() +func (p *FileMinioClient) RetrieveWithCredContext(_ *CredContext) (Value, error) { + return p.retrieve() +} + // IsExpired returns if the shared credentials have expired. func (p *FileMinioClient) IsExpired() bool { return !p.retrieved diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go index ea4b3ef9375..e3230bb186d 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go @@ -49,7 +49,8 @@ const DefaultExpiryWindow = -1 type IAM struct { Expiry - // Required http Client to use when connecting to IAM metadata service. + // Optional http Client to use when connecting to IAM metadata service + // (overrides default client in CredContext) Client *http.Client // Custom endpoint to fetch IAM role credentials. @@ -90,17 +91,16 @@ const ( // NewIAM returns a pointer to a new Credentials object wrapping the IAM. func NewIAM(endpoint string) *Credentials { return New(&IAM{ - Client: &http.Client{ - Transport: http.DefaultTransport, - }, Endpoint: endpoint, }) } -// Retrieve retrieves credentials from the EC2 service. -// Error will be returned if the request fails, or unable to extract -// the desired -func (m *IAM) Retrieve() (Value, error) { +// RetrieveWithCredContext is like Retrieve with Cred Context +func (m *IAM) RetrieveWithCredContext(cc *CredContext) (Value, error) { + if cc == nil { + cc = defaultCredContext + } + token := os.Getenv("AWS_CONTAINER_AUTHORIZATION_TOKEN") if token == "" { token = m.Container.AuthorizationToken @@ -144,7 +144,16 @@ func (m *IAM) Retrieve() (Value, error) { var roleCreds ec2RoleCredRespBody var err error + client := m.Client + if client == nil { + client = cc.Client + } + if client == nil { + client = defaultCredContext.Client + } + endpoint := m.Endpoint + switch { case identityFile != "": if len(endpoint) == 0 { @@ -160,7 +169,7 @@ func (m *IAM) Retrieve() (Value, error) { } creds := &STSWebIdentity{ - Client: m.Client, + Client: client, STSEndpoint: endpoint, GetWebIDTokenExpiry: func() (*WebIdentityToken, error) { token, err := os.ReadFile(identityFile) @@ -174,7 +183,7 @@ func (m *IAM) Retrieve() (Value, error) { roleSessionName: roleSessionName, } - stsWebIdentityCreds, err := creds.Retrieve() + stsWebIdentityCreds, err := creds.RetrieveWithCredContext(cc) if err == nil { m.SetExpiration(creds.Expiration(), DefaultExpiryWindow) } @@ -185,11 +194,11 @@ func (m *IAM) Retrieve() (Value, error) { endpoint = fmt.Sprintf("%s%s", DefaultECSRoleEndpoint, relativeURI) } - roleCreds, err = getEcsTaskCredentials(m.Client, endpoint, token) + roleCreds, err = getEcsTaskCredentials(client, endpoint, token) case tokenFile != "" && fullURI != "": endpoint = fullURI - roleCreds, err = getEKSPodIdentityCredentials(m.Client, endpoint, tokenFile) + roleCreds, err = getEKSPodIdentityCredentials(client, endpoint, tokenFile) case fullURI != "": if len(endpoint) == 0 { @@ -203,10 +212,10 @@ func (m *IAM) Retrieve() (Value, error) { } } - roleCreds, err = getEcsTaskCredentials(m.Client, endpoint, token) + roleCreds, err = getEcsTaskCredentials(client, endpoint, token) default: - roleCreds, err = getCredentials(m.Client, endpoint) + roleCreds, err = getCredentials(client, endpoint) } if err != nil { @@ -224,6 +233,13 @@ func (m *IAM) Retrieve() (Value, error) { }, nil } +// Retrieve retrieves credentials from the EC2 service. +// Error will be returned if the request fails, or unable to extract +// the desired +func (m *IAM) Retrieve() (Value, error) { + return m.RetrieveWithCredContext(nil) +} + // A ec2RoleCredRespBody provides the shape for unmarshaling credential // request responses. type ec2RoleCredRespBody struct { diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go index 7dde00b0a16..d90c98c84d5 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go @@ -59,6 +59,11 @@ func (s *Static) Retrieve() (Value, error) { return s.Value, nil } +// RetrieveWithCredContext returns the static credentials. +func (s *Static) RetrieveWithCredContext(_ *CredContext) (Value, error) { + return s.Retrieve() +} + // IsExpired returns if the credentials are expired. // // For Static, the credentials never expired. diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go index 62bfbb6b02c..ef6f436b84b 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go @@ -72,7 +72,8 @@ type ClientGrantsToken struct { type STSClientGrants struct { Expiry - // Required http Client to use when connecting to MinIO STS service. + // Optional http Client to use when connecting to MinIO STS service. + // (overrides default client in CredContext) Client *http.Client // MinIO endpoint to fetch STS credentials. @@ -90,16 +91,10 @@ type STSClientGrants struct { // NewSTSClientGrants returns a pointer to a new // Credentials object wrapping the STSClientGrants. func NewSTSClientGrants(stsEndpoint string, getClientGrantsTokenExpiry func() (*ClientGrantsToken, error)) (*Credentials, error) { - if stsEndpoint == "" { - return nil, errors.New("STS endpoint cannot be empty") - } if getClientGrantsTokenExpiry == nil { return nil, errors.New("Client grants access token and expiry retrieval function should be defined") } return New(&STSClientGrants{ - Client: &http.Client{ - Transport: http.DefaultTransport, - }, STSEndpoint: stsEndpoint, GetClientGrantsTokenExpiry: getClientGrantsTokenExpiry, }), nil @@ -162,10 +157,29 @@ func getClientGrantsCredentials(clnt *http.Client, endpoint string, return a, nil } -// Retrieve retrieves credentials from the MinIO service. -// Error will be returned if the request fails. -func (m *STSClientGrants) Retrieve() (Value, error) { - a, err := getClientGrantsCredentials(m.Client, m.STSEndpoint, m.GetClientGrantsTokenExpiry) +// RetrieveWithCredContext is like Retrieve() with cred context +func (m *STSClientGrants) RetrieveWithCredContext(cc *CredContext) (Value, error) { + if cc == nil { + cc = defaultCredContext + } + + client := m.Client + if client == nil { + client = cc.Client + } + if client == nil { + client = defaultCredContext.Client + } + + stsEndpoint := m.STSEndpoint + if stsEndpoint == "" { + stsEndpoint = cc.Endpoint + } + if stsEndpoint == "" { + return Value{}, errors.New("STS endpoint unknown") + } + + a, err := getClientGrantsCredentials(client, stsEndpoint, m.GetClientGrantsTokenExpiry) if err != nil { return Value{}, err } @@ -181,3 +195,9 @@ func (m *STSClientGrants) Retrieve() (Value, error) { SignerType: SignatureV4, }, nil } + +// Retrieve retrieves credentials from the MinIO service. +// Error will be returned if the request fails. +func (m *STSClientGrants) Retrieve() (Value, error) { + return m.RetrieveWithCredContext(nil) +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go index 75e1a77d322..0021f9315b6 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_custom_identity.go @@ -53,6 +53,8 @@ type AssumeRoleWithCustomTokenResponse struct { type CustomTokenIdentity struct { Expiry + // Optional http Client to use when connecting to MinIO STS service. + // (overrides default client in CredContext) Client *http.Client // MinIO server STS endpoint to fetch STS credentials. @@ -69,9 +71,21 @@ type CustomTokenIdentity struct { RequestedExpiry time.Duration } -// Retrieve - to satisfy Provider interface; fetches credentials from MinIO. -func (c *CustomTokenIdentity) Retrieve() (value Value, err error) { - u, err := url.Parse(c.STSEndpoint) +// RetrieveWithCredContext with Retrieve optionally cred context +func (c *CustomTokenIdentity) RetrieveWithCredContext(cc *CredContext) (value Value, err error) { + if cc == nil { + cc = defaultCredContext + } + + stsEndpoint := c.STSEndpoint + if stsEndpoint == "" { + stsEndpoint = cc.Endpoint + } + if stsEndpoint == "" { + return Value{}, errors.New("STS endpoint unknown") + } + + u, err := url.Parse(stsEndpoint) if err != nil { return value, err } @@ -92,7 +106,15 @@ func (c *CustomTokenIdentity) Retrieve() (value Value, err error) { return value, err } - resp, err := c.Client.Do(req) + client := c.Client + if client == nil { + client = cc.Client + } + if client == nil { + client = defaultCredContext.Client + } + + resp, err := client.Do(req) if err != nil { return value, err } @@ -118,11 +140,15 @@ func (c *CustomTokenIdentity) Retrieve() (value Value, err error) { }, nil } +// Retrieve - to satisfy Provider interface; fetches credentials from MinIO. +func (c *CustomTokenIdentity) Retrieve() (value Value, err error) { + return c.RetrieveWithCredContext(nil) +} + // NewCustomTokenCredentials - returns credentials using the // AssumeRoleWithCustomToken STS API. func NewCustomTokenCredentials(stsEndpoint, token, roleArn string, optFuncs ...CustomTokenOpt) (*Credentials, error) { c := CustomTokenIdentity{ - Client: &http.Client{Transport: http.DefaultTransport}, STSEndpoint: stsEndpoint, Token: token, RoleArn: roleArn, diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go index b8df289f203..e63997e6ef2 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go @@ -20,6 +20,7 @@ package credentials import ( "bytes" "encoding/xml" + "errors" "fmt" "io" "net/http" @@ -55,7 +56,8 @@ type LDAPIdentityResult struct { type LDAPIdentity struct { Expiry - // Required http Client to use when connecting to MinIO STS service. + // Optional http Client to use when connecting to MinIO STS service. + // (overrides default client in CredContext) Client *http.Client // Exported STS endpoint to fetch STS credentials. @@ -77,7 +79,6 @@ type LDAPIdentity struct { // Identity. func NewLDAPIdentity(stsEndpoint, ldapUsername, ldapPassword string, optFuncs ...LDAPIdentityOpt) (*Credentials, error) { l := LDAPIdentity{ - Client: &http.Client{Transport: http.DefaultTransport}, STSEndpoint: stsEndpoint, LDAPUsername: ldapUsername, LDAPPassword: ldapPassword, @@ -113,7 +114,6 @@ func LDAPIdentityExpiryOpt(d time.Duration) LDAPIdentityOpt { // Deprecated: Use the `LDAPIdentityPolicyOpt` with `NewLDAPIdentity` instead. func NewLDAPIdentityWithSessionPolicy(stsEndpoint, ldapUsername, ldapPassword, policy string) (*Credentials, error) { return New(&LDAPIdentity{ - Client: &http.Client{Transport: http.DefaultTransport}, STSEndpoint: stsEndpoint, LDAPUsername: ldapUsername, LDAPPassword: ldapPassword, @@ -121,10 +121,22 @@ func NewLDAPIdentityWithSessionPolicy(stsEndpoint, ldapUsername, ldapPassword, p }), nil } -// Retrieve gets the credential by calling the MinIO STS API for +// RetrieveWithCredContext gets the credential by calling the MinIO STS API for // LDAP on the configured stsEndpoint. -func (k *LDAPIdentity) Retrieve() (value Value, err error) { - u, err := url.Parse(k.STSEndpoint) +func (k *LDAPIdentity) RetrieveWithCredContext(cc *CredContext) (value Value, err error) { + if cc == nil { + cc = defaultCredContext + } + + stsEndpoint := k.STSEndpoint + if stsEndpoint == "" { + stsEndpoint = cc.Endpoint + } + if stsEndpoint == "" { + return Value{}, errors.New("STS endpoint unknown") + } + + u, err := url.Parse(stsEndpoint) if err != nil { return value, err } @@ -148,7 +160,15 @@ func (k *LDAPIdentity) Retrieve() (value Value, err error) { req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - resp, err := k.Client.Do(req) + client := k.Client + if client == nil { + client = cc.Client + } + if client == nil { + client = defaultCredContext.Client + } + + resp, err := client.Do(req) if err != nil { return value, err } @@ -188,3 +208,9 @@ func (k *LDAPIdentity) Retrieve() (value Value, err error) { SignerType: SignatureV4, }, nil } + +// Retrieve gets the credential by calling the MinIO STS API for +// LDAP on the configured stsEndpoint. +func (k *LDAPIdentity) Retrieve() (value Value, err error) { + return k.RetrieveWithCredContext(defaultCredContext) +} diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go index 10083502d1d..c904bbeac78 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_tls_identity.go @@ -20,8 +20,8 @@ import ( "crypto/tls" "encoding/xml" "errors" + "fmt" "io" - "net" "net/http" "net/url" "strconv" @@ -36,7 +36,12 @@ type CertificateIdentityOption func(*STSCertificateIdentity) // CertificateIdentityWithTransport returns a CertificateIdentityOption that // customizes the STSCertificateIdentity with the given http.RoundTripper. func CertificateIdentityWithTransport(t http.RoundTripper) CertificateIdentityOption { - return CertificateIdentityOption(func(i *STSCertificateIdentity) { i.Client.Transport = t }) + return CertificateIdentityOption(func(i *STSCertificateIdentity) { + if i.Client == nil { + i.Client = &http.Client{} + } + i.Client.Transport = t + }) } // CertificateIdentityWithExpiry returns a CertificateIdentityOption that @@ -53,6 +58,10 @@ func CertificateIdentityWithExpiry(livetime time.Duration) CertificateIdentityOp type STSCertificateIdentity struct { Expiry + // Optional http Client to use when connecting to MinIO STS service. + // (overrides default client in CredContext) + Client *http.Client + // STSEndpoint is the base URL endpoint of the STS API. // For example, https://minio.local:9000 STSEndpoint string @@ -68,50 +77,18 @@ type STSCertificateIdentity struct { // The default livetime is one hour. S3CredentialLivetime time.Duration - // Client is the HTTP client used to authenticate and fetch - // S3 credentials. - // - // A custom TLS client configuration can be specified by - // using a custom http.Transport: - // Client: http.Client { - // Transport: &http.Transport{ - // TLSClientConfig: &tls.Config{}, - // }, - // } - Client http.Client + // Certificate is the client certificate that is used for + // STS authentication. + Certificate tls.Certificate } -var _ Provider = (*STSWebIdentity)(nil) // compiler check - // NewSTSCertificateIdentity returns a STSCertificateIdentity that authenticates // to the given STS endpoint with the given TLS certificate and retrieves and // rotates S3 credentials. func NewSTSCertificateIdentity(endpoint string, certificate tls.Certificate, options ...CertificateIdentityOption) (*Credentials, error) { - if endpoint == "" { - return nil, errors.New("STS endpoint cannot be empty") - } - if _, err := url.Parse(endpoint); err != nil { - return nil, err - } identity := &STSCertificateIdentity{ STSEndpoint: endpoint, - Client: http.Client{ - Transport: &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).DialContext, - ForceAttemptHTTP2: true, - MaxIdleConns: 100, - IdleConnTimeout: 90 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 5 * time.Second, - TLSClientConfig: &tls.Config{ - Certificates: []tls.Certificate{certificate}, - }, - }, - }, + Certificate: certificate, } for _, option := range options { option(identity) @@ -119,10 +96,21 @@ func NewSTSCertificateIdentity(endpoint string, certificate tls.Certificate, opt return New(identity), nil } -// Retrieve fetches a new set of S3 credentials from the configured -// STS API endpoint. -func (i *STSCertificateIdentity) Retrieve() (Value, error) { - endpointURL, err := url.Parse(i.STSEndpoint) +// RetrieveWithCredContext is Retrieve with cred context +func (i *STSCertificateIdentity) RetrieveWithCredContext(cc *CredContext) (Value, error) { + if cc == nil { + cc = defaultCredContext + } + + stsEndpoint := i.STSEndpoint + if stsEndpoint == "" { + stsEndpoint = cc.Endpoint + } + if stsEndpoint == "" { + return Value{}, errors.New("STS endpoint unknown") + } + + endpointURL, err := url.Parse(stsEndpoint) if err != nil { return Value{}, err } @@ -145,7 +133,28 @@ func (i *STSCertificateIdentity) Retrieve() (Value, error) { } req.Form.Add("DurationSeconds", strconv.FormatUint(uint64(livetime.Seconds()), 10)) - resp, err := i.Client.Do(req) + client := i.Client + if client == nil { + client = cc.Client + } + if client == nil { + client = defaultCredContext.Client + } + + tr, ok := client.Transport.(*http.Transport) + if !ok { + return Value{}, fmt.Errorf("CredContext should contain an http.Transport value") + } + + // Clone the HTTP transport (patch the TLS client certificate) + trCopy := tr.Clone() + trCopy.TLSClientConfig.Certificates = []tls.Certificate{i.Certificate} + + // Clone the HTTP client (patch the HTTP transport) + clientCopy := *client + clientCopy.Transport = trCopy + + resp, err := clientCopy.Do(req) if err != nil { return Value{}, err } @@ -193,6 +202,11 @@ func (i *STSCertificateIdentity) Retrieve() (Value, error) { }, nil } +// Retrieve fetches a new set of S3 credentials from the configured STS API endpoint. +func (i *STSCertificateIdentity) Retrieve() (Value, error) { + return i.RetrieveWithCredContext(defaultCredContext) +} + // Expiration returns the expiration time of the current S3 credentials. func (i *STSCertificateIdentity) Expiration() time.Time { return i.expiration } diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go index f1c76c78ea0..23525889301 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go @@ -58,9 +58,10 @@ type WebIdentityResult struct { // WebIdentityToken - web identity token with expiry. type WebIdentityToken struct { - Token string - AccessToken string - Expiry int + Token string + AccessToken string + RefreshToken string + Expiry int } // A STSWebIdentity retrieves credentials from MinIO service, and keeps track if @@ -68,7 +69,8 @@ type WebIdentityToken struct { type STSWebIdentity struct { Expiry - // Required http Client to use when connecting to MinIO STS service. + // Optional http Client to use when connecting to MinIO STS service. + // (overrides default client in CredContext) Client *http.Client // Exported STS endpoint to fetch STS credentials. @@ -96,16 +98,10 @@ type STSWebIdentity struct { // NewSTSWebIdentity returns a pointer to a new // Credentials object wrapping the STSWebIdentity. func NewSTSWebIdentity(stsEndpoint string, getWebIDTokenExpiry func() (*WebIdentityToken, error), opts ...func(*STSWebIdentity)) (*Credentials, error) { - if stsEndpoint == "" { - return nil, errors.New("STS endpoint cannot be empty") - } if getWebIDTokenExpiry == nil { return nil, errors.New("Web ID token and expiry retrieval function should be defined") } i := &STSWebIdentity{ - Client: &http.Client{ - Transport: http.DefaultTransport, - }, STSEndpoint: stsEndpoint, GetWebIDTokenExpiry: getWebIDTokenExpiry, } @@ -161,6 +157,10 @@ func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSession // Usually set when server is using extended userInfo endpoint. v.Set("WebIdentityAccessToken", idToken.AccessToken) } + if idToken.RefreshToken != "" { + // Usually set when server is using extended userInfo endpoint. + v.Set("WebIdentityRefreshToken", idToken.RefreshToken) + } if idToken.Expiry > 0 { v.Set("DurationSeconds", fmt.Sprintf("%d", idToken.Expiry)) } @@ -214,10 +214,29 @@ func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSession return a, nil } -// Retrieve retrieves credentials from the MinIO service. -// Error will be returned if the request fails. -func (m *STSWebIdentity) Retrieve() (Value, error) { - a, err := getWebIdentityCredentials(m.Client, m.STSEndpoint, m.RoleARN, m.roleSessionName, m.Policy, m.GetWebIDTokenExpiry) +// RetrieveWithCredContext is like Retrieve with optional cred context. +func (m *STSWebIdentity) RetrieveWithCredContext(cc *CredContext) (Value, error) { + if cc == nil { + cc = defaultCredContext + } + + client := m.Client + if client == nil { + client = cc.Client + } + if client == nil { + client = defaultCredContext.Client + } + + stsEndpoint := m.STSEndpoint + if stsEndpoint == "" { + stsEndpoint = cc.Endpoint + } + if stsEndpoint == "" { + return Value{}, errors.New("STS endpoint unknown") + } + + a, err := getWebIdentityCredentials(client, stsEndpoint, m.RoleARN, m.roleSessionName, m.Policy, m.GetWebIDTokenExpiry) if err != nil { return Value{}, err } @@ -234,6 +253,12 @@ func (m *STSWebIdentity) Retrieve() (Value, error) { }, nil } +// Retrieve retrieves credentials from the MinIO service. +// Error will be returned if the request fails. +func (m *STSWebIdentity) Retrieve() (Value, error) { + return m.RetrieveWithCredContext(nil) +} + // Expiration returns the expiration time of the credentials func (m *STSWebIdentity) Expiration() time.Time { return m.expiration diff --git a/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go b/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go index 0e63ce2f7dc..80fd029d834 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go @@ -118,53 +118,53 @@ func GetRegionFromURL(endpointURL url.URL) string { if endpointURL == sentinelURL { return "" } - if endpointURL.Host == "s3-external-1.amazonaws.com" { + if endpointURL.Hostname() == "s3-external-1.amazonaws.com" { return "" } // if elb's are used we cannot calculate which region it may be, just return empty. - if elbAmazonRegex.MatchString(endpointURL.Host) || elbAmazonCnRegex.MatchString(endpointURL.Host) { + if elbAmazonRegex.MatchString(endpointURL.Hostname()) || elbAmazonCnRegex.MatchString(endpointURL.Hostname()) { return "" } // We check for FIPS dualstack matching first to avoid the non-greedy // regex for FIPS non-dualstack matching a dualstack URL - parts := amazonS3HostFIPSDualStack.FindStringSubmatch(endpointURL.Host) + parts := amazonS3HostFIPSDualStack.FindStringSubmatch(endpointURL.Hostname()) if len(parts) > 1 { return parts[1] } - parts = amazonS3HostFIPS.FindStringSubmatch(endpointURL.Host) + parts = amazonS3HostFIPS.FindStringSubmatch(endpointURL.Hostname()) if len(parts) > 1 { return parts[1] } - parts = amazonS3HostDualStack.FindStringSubmatch(endpointURL.Host) + parts = amazonS3HostDualStack.FindStringSubmatch(endpointURL.Hostname()) if len(parts) > 1 { return parts[1] } - parts = amazonS3HostHyphen.FindStringSubmatch(endpointURL.Host) + parts = amazonS3HostHyphen.FindStringSubmatch(endpointURL.Hostname()) if len(parts) > 1 { return parts[1] } - parts = amazonS3ChinaHost.FindStringSubmatch(endpointURL.Host) + parts = amazonS3ChinaHost.FindStringSubmatch(endpointURL.Hostname()) if len(parts) > 1 { return parts[1] } - parts = amazonS3ChinaHostDualStack.FindStringSubmatch(endpointURL.Host) + parts = amazonS3ChinaHostDualStack.FindStringSubmatch(endpointURL.Hostname()) if len(parts) > 1 { return parts[1] } - parts = amazonS3HostDot.FindStringSubmatch(endpointURL.Host) + parts = amazonS3HostDot.FindStringSubmatch(endpointURL.Hostname()) if len(parts) > 1 { return parts[1] } - parts = amazonS3HostPrivateLink.FindStringSubmatch(endpointURL.Host) + parts = amazonS3HostPrivateLink.FindStringSubmatch(endpointURL.Hostname()) if len(parts) > 1 { return parts[1] } diff --git a/vendor/github.com/minio/minio-go/v7/post-policy.go b/vendor/github.com/minio/minio-go/v7/post-policy.go index 19687e027d0..26bf441b56f 100644 --- a/vendor/github.com/minio/minio-go/v7/post-policy.go +++ b/vendor/github.com/minio/minio-go/v7/post-policy.go @@ -85,7 +85,7 @@ func (p *PostPolicy) SetExpires(t time.Time) error { // SetKey - Sets an object name for the policy based upload. func (p *PostPolicy) SetKey(key string) error { - if strings.TrimSpace(key) == "" || key == "" { + if strings.TrimSpace(key) == "" { return errInvalidArgument("Object name is empty.") } policyCond := policyCondition{ @@ -118,7 +118,7 @@ func (p *PostPolicy) SetKeyStartsWith(keyStartsWith string) error { // SetBucket - Sets bucket at which objects will be uploaded to. func (p *PostPolicy) SetBucket(bucketName string) error { - if strings.TrimSpace(bucketName) == "" || bucketName == "" { + if strings.TrimSpace(bucketName) == "" { return errInvalidArgument("Bucket name is empty.") } policyCond := policyCondition{ @@ -135,7 +135,7 @@ func (p *PostPolicy) SetBucket(bucketName string) error { // SetCondition - Sets condition for credentials, date and algorithm func (p *PostPolicy) SetCondition(matchType, condition, value string) error { - if strings.TrimSpace(value) == "" || value == "" { + if strings.TrimSpace(value) == "" { return errInvalidArgument("No value specified for condition") } @@ -156,7 +156,7 @@ func (p *PostPolicy) SetCondition(matchType, condition, value string) error { // SetTagging - Sets tagging for the object for this policy based upload. func (p *PostPolicy) SetTagging(tagging string) error { - if strings.TrimSpace(tagging) == "" || tagging == "" { + if strings.TrimSpace(tagging) == "" { return errInvalidArgument("No tagging specified.") } _, err := tags.ParseObjectXML(strings.NewReader(tagging)) @@ -178,7 +178,7 @@ func (p *PostPolicy) SetTagging(tagging string) error { // SetContentType - Sets content-type of the object for this policy // based upload. func (p *PostPolicy) SetContentType(contentType string) error { - if strings.TrimSpace(contentType) == "" || contentType == "" { + if strings.TrimSpace(contentType) == "" { return errInvalidArgument("No content type specified.") } policyCond := policyCondition{ @@ -211,7 +211,7 @@ func (p *PostPolicy) SetContentTypeStartsWith(contentTypeStartsWith string) erro // SetContentDisposition - Sets content-disposition of the object for this policy func (p *PostPolicy) SetContentDisposition(contentDisposition string) error { - if strings.TrimSpace(contentDisposition) == "" || contentDisposition == "" { + if strings.TrimSpace(contentDisposition) == "" { return errInvalidArgument("No content disposition specified.") } policyCond := policyCondition{ @@ -226,27 +226,44 @@ func (p *PostPolicy) SetContentDisposition(contentDisposition string) error { return nil } +// SetContentEncoding - Sets content-encoding of the object for this policy +func (p *PostPolicy) SetContentEncoding(contentEncoding string) error { + if strings.TrimSpace(contentEncoding) == "" { + return errInvalidArgument("No content encoding specified.") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$Content-Encoding", + value: contentEncoding, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["Content-Encoding"] = contentEncoding + return nil +} + // SetContentLengthRange - Set new min and max content length // condition for all incoming uploads. -func (p *PostPolicy) SetContentLengthRange(min, max int64) error { - if min > max { +func (p *PostPolicy) SetContentLengthRange(minLen, maxLen int64) error { + if minLen > maxLen { return errInvalidArgument("Minimum limit is larger than maximum limit.") } - if min < 0 { + if minLen < 0 { return errInvalidArgument("Minimum limit cannot be negative.") } - if max <= 0 { + if maxLen <= 0 { return errInvalidArgument("Maximum limit cannot be non-positive.") } - p.contentLengthRange.min = min - p.contentLengthRange.max = max + p.contentLengthRange.min = minLen + p.contentLengthRange.max = maxLen return nil } // SetSuccessActionRedirect - Sets the redirect success url of the object for this policy // based upload. func (p *PostPolicy) SetSuccessActionRedirect(redirect string) error { - if strings.TrimSpace(redirect) == "" || redirect == "" { + if strings.TrimSpace(redirect) == "" { return errInvalidArgument("Redirect is empty") } policyCond := policyCondition{ @@ -264,7 +281,7 @@ func (p *PostPolicy) SetSuccessActionRedirect(redirect string) error { // SetSuccessStatusAction - Sets the status success code of the object for this policy // based upload. func (p *PostPolicy) SetSuccessStatusAction(status string) error { - if strings.TrimSpace(status) == "" || status == "" { + if strings.TrimSpace(status) == "" { return errInvalidArgument("Status is empty") } policyCond := policyCondition{ @@ -282,10 +299,10 @@ func (p *PostPolicy) SetSuccessStatusAction(status string) error { // SetUserMetadata - Set user metadata as a key/value couple. // Can be retrieved through a HEAD request or an event. func (p *PostPolicy) SetUserMetadata(key, value string) error { - if strings.TrimSpace(key) == "" || key == "" { + if strings.TrimSpace(key) == "" { return errInvalidArgument("Key is empty") } - if strings.TrimSpace(value) == "" || value == "" { + if strings.TrimSpace(value) == "" { return errInvalidArgument("Value is empty") } headerName := fmt.Sprintf("x-amz-meta-%s", key) @@ -304,7 +321,7 @@ func (p *PostPolicy) SetUserMetadata(key, value string) error { // SetUserMetadataStartsWith - Set how an user metadata should starts with. // Can be retrieved through a HEAD request or an event. func (p *PostPolicy) SetUserMetadataStartsWith(key, value string) error { - if strings.TrimSpace(key) == "" || key == "" { + if strings.TrimSpace(key) == "" { return errInvalidArgument("Key is empty") } headerName := fmt.Sprintf("x-amz-meta-%s", key) @@ -321,11 +338,29 @@ func (p *PostPolicy) SetUserMetadataStartsWith(key, value string) error { } // SetChecksum sets the checksum of the request. -func (p *PostPolicy) SetChecksum(c Checksum) { +func (p *PostPolicy) SetChecksum(c Checksum) error { if c.IsSet() { p.formData[amzChecksumAlgo] = c.Type.String() p.formData[c.Type.Key()] = c.Encoded() + + policyCond := policyCondition{ + matchType: "eq", + condition: fmt.Sprintf("$%s", amzChecksumAlgo), + value: c.Type.String(), + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + policyCond = policyCondition{ + matchType: "eq", + condition: fmt.Sprintf("$%s", c.Type.Key()), + value: c.Encoded(), + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } } + return nil } // SetEncryption - sets encryption headers for POST API diff --git a/vendor/github.com/minio/minio-go/v7/retry-continous.go b/vendor/github.com/minio/minio-go/v7/retry-continous.go index bfeea95f30d..81fcf16f1b9 100644 --- a/vendor/github.com/minio/minio-go/v7/retry-continous.go +++ b/vendor/github.com/minio/minio-go/v7/retry-continous.go @@ -20,7 +20,7 @@ package minio import "time" // newRetryTimerContinous creates a timer with exponentially increasing delays forever. -func (c *Client) newRetryTimerContinous(unit, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int { +func (c *Client) newRetryTimerContinous(baseSleep, maxSleep time.Duration, jitter float64, doneCh chan struct{}) <-chan int { attemptCh := make(chan int) // normalize jitter to the range [0, 1.0] @@ -39,10 +39,10 @@ func (c *Client) newRetryTimerContinous(unit, cap time.Duration, jitter float64, if attempt > maxAttempt { attempt = maxAttempt } - // sleep = random_between(0, min(cap, base * 2 ** attempt)) - sleep := unit * time.Duration(1< cap { - sleep = cap + // sleep = random_between(0, min(maxSleep, base * 2 ** attempt)) + sleep := baseSleep * time.Duration(1< maxSleep { + sleep = maxSleep } if jitter != NoJitter { sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter) diff --git a/vendor/github.com/minio/minio-go/v7/retry.go b/vendor/github.com/minio/minio-go/v7/retry.go index d15eb59013e..ed954017ca2 100644 --- a/vendor/github.com/minio/minio-go/v7/retry.go +++ b/vendor/github.com/minio/minio-go/v7/retry.go @@ -45,7 +45,7 @@ var DefaultRetryCap = time.Second // newRetryTimer creates a timer with exponentially increasing // delays until the maximum retry attempts are reached. -func (c *Client) newRetryTimer(ctx context.Context, maxRetry int, unit, cap time.Duration, jitter float64) <-chan int { +func (c *Client) newRetryTimer(ctx context.Context, maxRetry int, baseSleep, maxSleep time.Duration, jitter float64) <-chan int { attemptCh := make(chan int) // computes the exponential backoff duration according to @@ -59,10 +59,10 @@ func (c *Client) newRetryTimer(ctx context.Context, maxRetry int, unit, cap time jitter = MaxJitter } - // sleep = random_between(0, min(cap, base * 2 ** attempt)) - sleep := unit * time.Duration(1< cap { - sleep = cap + // sleep = random_between(0, min(maxSleep, base * 2 ** attempt)) + sleep := baseSleep * time.Duration(1< maxSleep { + sleep = maxSleep } if jitter != NoJitter { sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter) @@ -112,6 +112,7 @@ func isS3CodeRetryable(s3Code string) (ok bool) { // List of HTTP status codes which are retryable. var retryableHTTPStatusCodes = map[int]struct{}{ + http.StatusRequestTimeout: {}, 429: {}, // http.StatusTooManyRequests is not part of the Go 1.5 library, yet 499: {}, // client closed request, retry. A non-standard status code introduced by nginx. http.StatusInternalServerError: {}, diff --git a/vendor/github.com/minio/minio-go/v7/utils.go b/vendor/github.com/minio/minio-go/v7/utils.go index a5beb371f2c..cd7d2c27e62 100644 --- a/vendor/github.com/minio/minio-go/v7/utils.go +++ b/vendor/github.com/minio/minio-go/v7/utils.go @@ -378,10 +378,11 @@ func ToObjectInfo(bucketName, objectName string, h http.Header) (ObjectInfo, err Restore: restore, // Checksum values - ChecksumCRC32: h.Get("x-amz-checksum-crc32"), - ChecksumCRC32C: h.Get("x-amz-checksum-crc32c"), - ChecksumSHA1: h.Get("x-amz-checksum-sha1"), - ChecksumSHA256: h.Get("x-amz-checksum-sha256"), + ChecksumCRC32: h.Get(ChecksumCRC32.Key()), + ChecksumCRC32C: h.Get(ChecksumCRC32C.Key()), + ChecksumSHA1: h.Get(ChecksumSHA1.Key()), + ChecksumSHA256: h.Get(ChecksumSHA256.Key()), + ChecksumCRC64NVME: h.Get(ChecksumCRC64NVME.Key()), }, nil } @@ -698,3 +699,146 @@ func (h *hashReaderWrapper) Read(p []byte) (n int, err error) { } return n, err } + +// Following is ported from C to Go in 2016 by Justin Ruggles, with minimal alteration. +// Used uint for unsigned long. Used uint32 for input arguments in order to match +// the Go hash/crc32 package. zlib CRC32 combine (https://github.com/madler/zlib) +// Modified for hash/crc64 by Klaus Post, 2024. +func gf2MatrixTimes(mat []uint64, vec uint64) uint64 { + var sum uint64 + + for vec != 0 { + if vec&1 != 0 { + sum ^= mat[0] + } + vec >>= 1 + mat = mat[1:] + } + return sum +} + +func gf2MatrixSquare(square, mat []uint64) { + if len(square) != len(mat) { + panic("square matrix size mismatch") + } + for n := range mat { + square[n] = gf2MatrixTimes(mat, mat[n]) + } +} + +// crc32Combine returns the combined CRC-32 hash value of the two passed CRC-32 +// hash values crc1 and crc2. poly represents the generator polynomial +// and len2 specifies the byte length that the crc2 hash covers. +func crc32Combine(poly uint32, crc1, crc2 uint32, len2 int64) uint32 { + // degenerate case (also disallow negative lengths) + if len2 <= 0 { + return crc1 + } + + even := make([]uint64, 32) // even-power-of-two zeros operator + odd := make([]uint64, 32) // odd-power-of-two zeros operator + + // put operator for one zero bit in odd + odd[0] = uint64(poly) // CRC-32 polynomial + row := uint64(1) + for n := 1; n < 32; n++ { + odd[n] = row + row <<= 1 + } + + // put operator for two zero bits in even + gf2MatrixSquare(even, odd) + + // put operator for four zero bits in odd + gf2MatrixSquare(odd, even) + + // apply len2 zeros to crc1 (first square will put the operator for one + // zero byte, eight zero bits, in even) + crc1n := uint64(crc1) + for { + // apply zeros operator for this bit of len2 + gf2MatrixSquare(even, odd) + if len2&1 != 0 { + crc1n = gf2MatrixTimes(even, crc1n) + } + len2 >>= 1 + + // if no more bits set, then done + if len2 == 0 { + break + } + + // another iteration of the loop with odd and even swapped + gf2MatrixSquare(odd, even) + if len2&1 != 0 { + crc1n = gf2MatrixTimes(odd, crc1n) + } + len2 >>= 1 + + // if no more bits set, then done + if len2 == 0 { + break + } + } + + // return combined crc + crc1n ^= uint64(crc2) + return uint32(crc1n) +} + +func crc64Combine(poly uint64, crc1, crc2 uint64, len2 int64) uint64 { + // degenerate case (also disallow negative lengths) + if len2 <= 0 { + return crc1 + } + + even := make([]uint64, 64) // even-power-of-two zeros operator + odd := make([]uint64, 64) // odd-power-of-two zeros operator + + // put operator for one zero bit in odd + odd[0] = poly // CRC-64 polynomial + row := uint64(1) + for n := 1; n < 64; n++ { + odd[n] = row + row <<= 1 + } + + // put operator for two zero bits in even + gf2MatrixSquare(even, odd) + + // put operator for four zero bits in odd + gf2MatrixSquare(odd, even) + + // apply len2 zeros to crc1 (first square will put the operator for one + // zero byte, eight zero bits, in even) + crc1n := crc1 + for { + // apply zeros operator for this bit of len2 + gf2MatrixSquare(even, odd) + if len2&1 != 0 { + crc1n = gf2MatrixTimes(even, crc1n) + } + len2 >>= 1 + + // if no more bits set, then done + if len2 == 0 { + break + } + + // another iteration of the loop with odd and even swapped + gf2MatrixSquare(odd, even) + if len2&1 != 0 { + crc1n = gf2MatrixTimes(odd, crc1n) + } + len2 >>= 1 + + // if no more bits set, then done + if len2 == 0 { + break + } + } + + // return combined crc + crc1n ^= crc2 + return crc1n +} diff --git a/vendor/github.com/mitchellh/go-testing-interface/.travis.yml b/vendor/github.com/mitchellh/go-testing-interface/.travis.yml deleted file mode 100644 index 928d000ec49..00000000000 --- a/vendor/github.com/mitchellh/go-testing-interface/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -language: go - -go: - - 1.8 - - 1.x - - tip - -script: - - go test - -matrix: - allow_failures: - - go: tip diff --git a/vendor/github.com/mitchellh/go-testing-interface/LICENSE b/vendor/github.com/mitchellh/go-testing-interface/LICENSE deleted file mode 100644 index a3866a291fd..00000000000 --- a/vendor/github.com/mitchellh/go-testing-interface/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Mitchell Hashimoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/go-testing-interface/README.md b/vendor/github.com/mitchellh/go-testing-interface/README.md deleted file mode 100644 index 26781bbae88..00000000000 --- a/vendor/github.com/mitchellh/go-testing-interface/README.md +++ /dev/null @@ -1,52 +0,0 @@ -# go-testing-interface - -go-testing-interface is a Go library that exports an interface that -`*testing.T` implements as well as a runtime version you can use in its -place. - -The purpose of this library is so that you can export test helpers as a -public API without depending on the "testing" package, since you can't -create a `*testing.T` struct manually. This lets you, for example, use the -public testing APIs to generate mock data at runtime, rather than just at -test time. - -## Usage & Example - -For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/go-testing-interface). - -Given a test helper written using `go-testing-interface` like this: - - import "github.com/mitchellh/go-testing-interface" - - func TestHelper(t testing.T) { - t.Fatal("I failed") - } - -You can call the test helper in a real test easily: - - import "testing" - - func TestThing(t *testing.T) { - TestHelper(t) - } - -You can also call the test helper at runtime if needed: - - import "github.com/mitchellh/go-testing-interface" - - func main() { - TestHelper(&testing.RuntimeT{}) - } - -## Why?! - -**Why would I call a test helper that takes a *testing.T at runtime?** - -You probably shouldn't. The only use case I've seen (and I've had) for this -is to implement a "dev mode" for a service where the test helpers are used -to populate mock data, create a mock DB, perhaps run service dependencies -in-memory, etc. - -Outside of a "dev mode", I've never seen a use case for this and I think -there shouldn't be one since the point of the `testing.T` interface is that -you can fail immediately. diff --git a/vendor/github.com/mitchellh/go-testing-interface/testing.go b/vendor/github.com/mitchellh/go-testing-interface/testing.go deleted file mode 100644 index 204afb42005..00000000000 --- a/vendor/github.com/mitchellh/go-testing-interface/testing.go +++ /dev/null @@ -1,84 +0,0 @@ -// +build !go1.9 - -package testing - -import ( - "fmt" - "log" -) - -// T is the interface that mimics the standard library *testing.T. -// -// In unit tests you can just pass a *testing.T struct. At runtime, outside -// of tests, you can pass in a RuntimeT struct from this package. -type T interface { - Error(args ...interface{}) - Errorf(format string, args ...interface{}) - Fail() - FailNow() - Failed() bool - Fatal(args ...interface{}) - Fatalf(format string, args ...interface{}) - Log(args ...interface{}) - Logf(format string, args ...interface{}) - Name() string - Skip(args ...interface{}) - SkipNow() - Skipf(format string, args ...interface{}) - Skipped() bool -} - -// RuntimeT implements T and can be instantiated and run at runtime to -// mimic *testing.T behavior. Unlike *testing.T, this will simply panic -// for calls to Fatal. For calls to Error, you'll have to check the errors -// list to determine whether to exit yourself. Name and Skip methods are -// unimplemented noops. -type RuntimeT struct { - failed bool -} - -func (t *RuntimeT) Error(args ...interface{}) { - log.Println(fmt.Sprintln(args...)) - t.Fail() -} - -func (t *RuntimeT) Errorf(format string, args ...interface{}) { - log.Println(fmt.Sprintf(format, args...)) - t.Fail() -} - -func (t *RuntimeT) Fatal(args ...interface{}) { - log.Println(fmt.Sprintln(args...)) - t.FailNow() -} - -func (t *RuntimeT) Fatalf(format string, args ...interface{}) { - log.Println(fmt.Sprintf(format, args...)) - t.FailNow() -} - -func (t *RuntimeT) Fail() { - t.failed = true -} - -func (t *RuntimeT) FailNow() { - panic("testing.T failed, see logs for output (if any)") -} - -func (t *RuntimeT) Failed() bool { - return t.failed -} - -func (t *RuntimeT) Log(args ...interface{}) { - log.Println(fmt.Sprintln(args...)) -} - -func (t *RuntimeT) Logf(format string, args ...interface{}) { - log.Println(fmt.Sprintf(format, args...)) -} - -func (t *RuntimeT) Name() string { return "" } -func (t *RuntimeT) Skip(args ...interface{}) {} -func (t *RuntimeT) SkipNow() {} -func (t *RuntimeT) Skipf(format string, args ...interface{}) {} -func (t *RuntimeT) Skipped() bool { return false } diff --git a/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go b/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go deleted file mode 100644 index 31b42cadf8d..00000000000 --- a/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go +++ /dev/null @@ -1,108 +0,0 @@ -// +build go1.9 - -// NOTE: This is a temporary copy of testing.go for Go 1.9 with the addition -// of "Helper" to the T interface. Go 1.9 at the time of typing is in RC -// and is set for release shortly. We'll support this on master as the default -// as soon as 1.9 is released. - -package testing - -import ( - "fmt" - "log" -) - -// T is the interface that mimics the standard library *testing.T. -// -// In unit tests you can just pass a *testing.T struct. At runtime, outside -// of tests, you can pass in a RuntimeT struct from this package. -type T interface { - Error(args ...interface{}) - Errorf(format string, args ...interface{}) - Fail() - FailNow() - Failed() bool - Fatal(args ...interface{}) - Fatalf(format string, args ...interface{}) - Log(args ...interface{}) - Logf(format string, args ...interface{}) - Name() string - Skip(args ...interface{}) - SkipNow() - Skipf(format string, args ...interface{}) - Skipped() bool - Helper() -} - -// RuntimeT implements T and can be instantiated and run at runtime to -// mimic *testing.T behavior. Unlike *testing.T, this will simply panic -// for calls to Fatal. For calls to Error, you'll have to check the errors -// list to determine whether to exit yourself. -type RuntimeT struct { - skipped bool - failed bool -} - -func (t *RuntimeT) Error(args ...interface{}) { - log.Println(fmt.Sprintln(args...)) - t.Fail() -} - -func (t *RuntimeT) Errorf(format string, args ...interface{}) { - log.Printf(format, args...) - t.Fail() -} - -func (t *RuntimeT) Fail() { - t.failed = true -} - -func (t *RuntimeT) FailNow() { - panic("testing.T failed, see logs for output (if any)") -} - -func (t *RuntimeT) Failed() bool { - return t.failed -} - -func (t *RuntimeT) Fatal(args ...interface{}) { - log.Print(args...) - t.FailNow() -} - -func (t *RuntimeT) Fatalf(format string, args ...interface{}) { - log.Printf(format, args...) - t.FailNow() -} - -func (t *RuntimeT) Log(args ...interface{}) { - log.Println(fmt.Sprintln(args...)) -} - -func (t *RuntimeT) Logf(format string, args ...interface{}) { - log.Println(fmt.Sprintf(format, args...)) -} - -func (t *RuntimeT) Name() string { - return "" -} - -func (t *RuntimeT) Skip(args ...interface{}) { - log.Print(args...) - t.SkipNow() -} - -func (t *RuntimeT) SkipNow() { - t.skipped = true -} - -func (t *RuntimeT) Skipf(format string, args ...interface{}) { - log.Printf(format, args...) - t.SkipNow() -} - -func (t *RuntimeT) Skipped() bool { - return t.skipped -} - -func (t *RuntimeT) Helper() {} diff --git a/vendor/github.com/mostynb/go-grpc-compression/internal/zstd/zstd.go b/vendor/github.com/mostynb/go-grpc-compression/internal/zstd/zstd.go new file mode 100644 index 00000000000..66e5091136a --- /dev/null +++ b/vendor/github.com/mostynb/go-grpc-compression/internal/zstd/zstd.go @@ -0,0 +1,146 @@ +// Copyright 2020 Mostyn Bramley-Moore. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package zstd is a wrapper for using github.com/klauspost/compress/zstd +// with gRPC. +package zstd + +import ( + "bytes" + "errors" + "io" + "runtime" + "sync" + + "github.com/klauspost/compress/zstd" + "google.golang.org/grpc/encoding" +) + +const Name = "zstd" + +var encoderOptions = []zstd.EOption{ + // The default zstd window size is 8MB, which is much larger than the + // typical RPC message and wastes a bunch of memory. + zstd.WithWindowSize(512 * 1024), +} + +var decoderOptions = []zstd.DOption{ + // If the decoder concurrency level is not 1, we would need to call + // Close() to avoid leaking resources when the object is released + // from compressor.decoderPool. + zstd.WithDecoderConcurrency(1), +} + +// We will set a finalizer on these objects, so when the go-grpc code is +// finished with them, they will be added back to compressor.decoderPool. +type decoderWrapper struct { + *zstd.Decoder +} + +type compressor struct { + encoder *zstd.Encoder + decoderPool sync.Pool // To hold *zstd.Decoder's. +} + +func PretendInit(clobbering bool) { + if !clobbering && encoding.GetCompressor(Name) != nil { + return + } + + enc, _ := zstd.NewWriter(nil, encoderOptions...) + c := &compressor{ + encoder: enc, + } + encoding.RegisterCompressor(c) +} + +var ErrNotInUse = errors.New("SetLevel ineffective because another zstd compressor has been registered") + +// SetLevel updates the registered compressor to use a particular compression +// level. NOTE: this function must only be called from an init function, and +// is not threadsafe. +func SetLevel(level zstd.EncoderLevel) error { + c, ok := encoding.GetCompressor(Name).(*compressor) + if !ok { + return ErrNotInUse + } + + enc, err := zstd.NewWriter(nil, zstd.WithEncoderLevel(level)) + if err != nil { + return err + } + + c.encoder = enc + return nil +} + +func (c *compressor) Compress(w io.Writer) (io.WriteCloser, error) { + return &zstdWriteCloser{ + enc: c.encoder, + writer: w, + }, nil +} + +type zstdWriteCloser struct { + enc *zstd.Encoder + writer io.Writer // Compressed data will be written here. + buf bytes.Buffer // Buffer uncompressed data here, compress on Close. +} + +func (z *zstdWriteCloser) Write(p []byte) (int, error) { + return z.buf.Write(p) +} + +func (z *zstdWriteCloser) Close() error { + compressed := z.enc.EncodeAll(z.buf.Bytes(), nil) + _, err := io.Copy(z.writer, bytes.NewReader(compressed)) + return err +} + +func (c *compressor) Decompress(r io.Reader) (io.Reader, error) { + var err error + var found bool + var decoder *zstd.Decoder + + // Note: avoid the use of zstd.Decoder.DecodeAll here, since + // malicious payloads could DoS us with a decompression bomb. + + decoder, found = c.decoderPool.Get().(*zstd.Decoder) + if !found { + decoder, err = zstd.NewReader(r, decoderOptions...) + if err != nil { + return nil, err + } + } else { + err = decoder.Reset(r) + if err != nil { + c.decoderPool.Put(decoder) + return nil, err + } + } + + wrapper := &decoderWrapper{Decoder: decoder} + runtime.SetFinalizer(wrapper, func(dw *decoderWrapper) { + err := dw.Reset(nil) + if err == nil { + c.decoderPool.Put(dw.Decoder) + } + }) + + return wrapper, nil +} + +func (c *compressor) Name() string { + return Name +} diff --git a/vendor/github.com/mostynb/go-grpc-compression/nonclobbering/zstd/zstd.go b/vendor/github.com/mostynb/go-grpc-compression/nonclobbering/zstd/zstd.go new file mode 100644 index 00000000000..18b94d93fb0 --- /dev/null +++ b/vendor/github.com/mostynb/go-grpc-compression/nonclobbering/zstd/zstd.go @@ -0,0 +1,52 @@ +// Copyright 2023 Mostyn Bramley-Moore. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package github.com/mostynb/go-grpc-compression/nonclobbering/zstd is a +// wrapper for using github.com/klauspost/compress/zstd with gRPC. +// +// If you import this package, it will only register itself as the encoder +// for the "zstd" compressor if no other compressors have already been +// registered with that name. +// +// If you do want to override previously registered "zstd" compressors, +// then you should instead import +// github.com/mostynb/go-grpc-compression/zstd +package zstd + +import ( + internalzstd "github.com/mostynb/go-grpc-compression/internal/zstd" + + "github.com/klauspost/compress/zstd" +) + +const Name = internalzstd.Name + +func init() { + clobbering := false + internalzstd.PretendInit(clobbering) +} + +var ErrNotInUse = internalzstd.ErrNotInUse + +// SetLevel updates the registered compressor to use a particular compression +// level. Returns ErrNotInUse if this module isn't registered (because it has +// been overridden by another encoder with the same name), or any error +// returned by zstd.NewWriter(nil, zstd.WithEncoderLevel(level). +// +// NOTE: this function is not threadsafe and must only be called from an init +// function or from the main goroutine before any other goroutines have been +// created. +func SetLevel(level zstd.EncoderLevel) error { + return internalzstd.SetLevel(level) +} diff --git a/vendor/github.com/oklog/run/.gitignore b/vendor/github.com/oklog/run/.gitignore deleted file mode 100644 index a1338d68517..00000000000 --- a/vendor/github.com/oklog/run/.gitignore +++ /dev/null @@ -1,14 +0,0 @@ -# Binaries for programs and plugins -*.exe -*.dll -*.so -*.dylib - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 -.glide/ diff --git a/vendor/github.com/oklog/run/README.md b/vendor/github.com/oklog/run/README.md deleted file mode 100644 index eba7d11cf3a..00000000000 --- a/vendor/github.com/oklog/run/README.md +++ /dev/null @@ -1,75 +0,0 @@ -# run - -[![GoDoc](https://godoc.org/github.com/oklog/run?status.svg)](https://godoc.org/github.com/oklog/run) -[![Build Status](https://img.shields.io/endpoint.svg?url=https%3A%2F%2Factions-badge.atrox.dev%2Foklog%2Frun%2Fbadge&style=flat-square&label=build)](https://github.com/oklog/run/actions?query=workflow%3ATest) -[![Go Report Card](https://goreportcard.com/badge/github.com/oklog/run)](https://goreportcard.com/report/github.com/oklog/run) -[![Apache 2 licensed](https://img.shields.io/badge/license-Apache2-blue.svg)](https://raw.githubusercontent.com/oklog/run/master/LICENSE) - -run.Group is a universal mechanism to manage goroutine lifecycles. - -Create a zero-value run.Group, and then add actors to it. Actors are defined as -a pair of functions: an **execute** function, which should run synchronously; -and an **interrupt** function, which, when invoked, should cause the execute -function to return. Finally, invoke Run, which concurrently runs all of the -actors, waits until the first actor exits, invokes the interrupt functions, and -finally returns control to the caller only once all actors have returned. This -general-purpose API allows callers to model pretty much any runnable task, and -achieve well-defined lifecycle semantics for the group. - -run.Group was written to manage component lifecycles in func main for -[OK Log](https://github.com/oklog/oklog). -But it's useful in any circumstance where you need to orchestrate multiple -goroutines as a unit whole. -[Click here](https://www.youtube.com/watch?v=LHe1Cb_Ud_M&t=15m45s) to see a -video of a talk where run.Group is described. - -## Examples - -### context.Context - -```go -ctx, cancel := context.WithCancel(context.Background()) -g.Add(func() error { - return myProcess(ctx, ...) -}, func(error) { - cancel() -}) -``` - -### net.Listener - -```go -ln, _ := net.Listen("tcp", ":8080") -g.Add(func() error { - return http.Serve(ln, nil) -}, func(error) { - ln.Close() -}) -``` - -### io.ReadCloser - -```go -var conn io.ReadCloser = ... -g.Add(func() error { - s := bufio.NewScanner(conn) - for s.Scan() { - println(s.Text()) - } - return s.Err() -}, func(error) { - conn.Close() -}) -``` - -## Comparisons - -Package run is somewhat similar to package -[errgroup](https://godoc.org/golang.org/x/sync/errgroup), -except it doesn't require actor goroutines to understand context semantics. - -It's somewhat similar to package -[tomb.v1](https://godoc.org/gopkg.in/tomb.v1) or -[tomb.v2](https://godoc.org/gopkg.in/tomb.v2), -except it has a much smaller API surface, delegating e.g. staged shutdown of -goroutines to the caller. diff --git a/vendor/github.com/oklog/run/actors.go b/vendor/github.com/oklog/run/actors.go deleted file mode 100644 index ef93495d3f0..00000000000 --- a/vendor/github.com/oklog/run/actors.go +++ /dev/null @@ -1,38 +0,0 @@ -package run - -import ( - "context" - "fmt" - "os" - "os/signal" -) - -// SignalHandler returns an actor, i.e. an execute and interrupt func, that -// terminates with SignalError when the process receives one of the provided -// signals, or the parent context is canceled. -func SignalHandler(ctx context.Context, signals ...os.Signal) (execute func() error, interrupt func(error)) { - ctx, cancel := context.WithCancel(ctx) - return func() error { - c := make(chan os.Signal, 1) - signal.Notify(c, signals...) - select { - case sig := <-c: - return SignalError{Signal: sig} - case <-ctx.Done(): - return ctx.Err() - } - }, func(error) { - cancel() - } -} - -// SignalError is returned by the signal handler's execute function -// when it terminates due to a received signal. -type SignalError struct { - Signal os.Signal -} - -// Error implements the error interface. -func (e SignalError) Error() string { - return fmt.Sprintf("received signal %s", e.Signal) -} diff --git a/vendor/github.com/oklog/run/group.go b/vendor/github.com/oklog/run/group.go deleted file mode 100644 index 832d47dd169..00000000000 --- a/vendor/github.com/oklog/run/group.go +++ /dev/null @@ -1,62 +0,0 @@ -// Package run implements an actor-runner with deterministic teardown. It is -// somewhat similar to package errgroup, except it does not require actor -// goroutines to understand context semantics. This makes it suitable for use in -// more circumstances; for example, goroutines which are handling connections -// from net.Listeners, or scanning input from a closable io.Reader. -package run - -// Group collects actors (functions) and runs them concurrently. -// When one actor (function) returns, all actors are interrupted. -// The zero value of a Group is useful. -type Group struct { - actors []actor -} - -// Add an actor (function) to the group. Each actor must be pre-emptable by an -// interrupt function. That is, if interrupt is invoked, execute should return. -// Also, it must be safe to call interrupt even after execute has returned. -// -// The first actor (function) to return interrupts all running actors. -// The error is passed to the interrupt functions, and is returned by Run. -func (g *Group) Add(execute func() error, interrupt func(error)) { - g.actors = append(g.actors, actor{execute, interrupt}) -} - -// Run all actors (functions) concurrently. -// When the first actor returns, all others are interrupted. -// Run only returns when all actors have exited. -// Run returns the error returned by the first exiting actor. -func (g *Group) Run() error { - if len(g.actors) == 0 { - return nil - } - - // Run each actor. - errors := make(chan error, len(g.actors)) - for _, a := range g.actors { - go func(a actor) { - errors <- a.execute() - }(a) - } - - // Wait for the first actor to stop. - err := <-errors - - // Signal all actors to stop. - for _, a := range g.actors { - a.interrupt(err) - } - - // Wait for all actors to stop. - for i := 1; i < cap(errors); i++ { - <-errors - } - - // Return the original error. - return err -} - -type actor struct { - execute func() error - interrupt func(error) -} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/README.md index d5ac9644199..1fcb128a8a5 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/README.md +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/README.md @@ -8,7 +8,7 @@ | Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Aexporter%2Fkafka%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Aexporter%2Fkafka) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Aexporter%2Fkafka%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Aexporter%2Fkafka) | | [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@pavolloffay](https://www.github.com/pavolloffay), [@MovieStoreGuy](https://www.github.com/MovieStoreGuy) | -[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta +[beta]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#beta [core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol [contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib @@ -24,8 +24,8 @@ The following settings can be optionally configured: - `brokers` (default = localhost:9092): The list of kafka brokers. - `resolve_canonical_bootstrap_servers_only` (default = false): Whether to resolve then reverse-lookup broker IPs during startup. - `client_id` (default = "sarama"): The client ID to configure the Sarama Kafka client with. The client ID will be used for all produce requests. -- `topic` (default = otlp_spans for traces, otlp_metrics for metrics, otlp_logs for logs): The name of the kafka topic to export to. -- `topic_from_attribute` (default = ""): Specify the resource attribute whose value should be used as the message's topic. This option, when set, will take precedence over the default topic. If `topic_from_attribute` is not set, the message's topic will be set to the value of the configuration option `topic` instead. +- `topic` (default = otlp_spans for traces, otlp_metrics for metrics, otlp_logs for logs): The name of the default kafka topic to export to. See [Destination Topic](#destination-topic) below for more details. +- `topic_from_attribute` (default = ""): Specify the resource attribute whose value should be used as the message's topic. See [Destination Topic](#destination-topic) below for more details. - `encoding` (default = otlp_proto): The encoding of the traces sent to kafka. All available encodings: - `otlp_proto`: payload is Protobuf serialized from `ExportTraceServiceRequest` if set as a traces exporter or `ExportMetricsServiceRequest` for metrics or `ExportLogsServiceRequest` for logs. - `otlp_json`: payload is JSON serialized from `ExportTraceServiceRequest` if set as a traces exporter or `ExportMetricsServiceRequest` for metrics or `ExportLogsServiceRequest` for logs. @@ -38,6 +38,7 @@ The following settings can be optionally configured: - `raw`: if the log record body is a byte array, it is sent as is. Otherwise, it is serialized to JSON. Resource and record attributes are discarded. - `partition_traces_by_id` (default = false): configures the exporter to include the trace ID as the message key in trace messages sent to kafka. *Please note:* this setting does not have any effect on Jaeger encoding exporters since Jaeger exporters include trace ID as the message key by default. - `partition_metrics_by_resource_attributes` (default = false) configures the exporter to include the hash of sorted resource attributes as the message partitioning key in metric messages sent to kafka. +- `partition_logs_by_resource_attributes` (default = false) configures the exporter to include the hash of sorted resource attributes as the message partitioning key in log messages sent to kafka. - `auth` - `plain_text` - `username`: The username to use. @@ -45,9 +46,9 @@ The following settings can be optionally configured: - `sasl` - `username`: The username to use. - `password`: The password to use - - `mechanism`: The SASL mechanism to use (SCRAM-SHA-256, SCRAM-SHA-512, AWS_MSK_IAM or PLAIN) + - `mechanism`: The SASL mechanism to use (SCRAM-SHA-256, SCRAM-SHA-512, AWS_MSK_IAM, AWS_MSK_IAM_OAUTHBEARER or PLAIN) - `version` (default = 0): The SASL protocol version to use (0 or 1) - - `aws_msk.region`: AWS Region in case of AWS_MSK_IAM mechanism + - `aws_msk.region`: AWS Region in case of AWS_MSK_IAM or AWS_MSK_IAM_OAUTHBEARER mechanism - `aws_msk.broker_addr`: MSK Broker address in case of AWS_MSK_IAM mechanism - `tls`: see [TLS Configuration Settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md) for the full set of available options. - `ca_file`: path to the CA cert. For a client this verifies the server certificate. Should @@ -68,6 +69,7 @@ The following settings can be optionally configured: - `password`: The Kerberos password used for authenticate with KDC - `config_file`: Path to Kerberos configuration. i.e /etc/krb5.conf - `keytab_file`: Path to keytab file. i.e /etc/security/kafka.keytab + - `disable_fast_negotiation`: Disable PA-FX-FAST negotiation (Pre-Authentication Framework - Fast). Some common Kerberos implementations do not support PA-FX-FAST negotiation. This is set to `false` by default. - `metadata` - `full` (default = true): Whether to maintain a full set of metadata. When disabled, the client does not make the initial request to broker at the @@ -103,3 +105,9 @@ exporters: - localhost:9092 protocol_version: 2.0.0 ``` + +## Destination Topic +The destination topic can be defined in a few different ways and takes priority in the following order: +1. When `topic_from_attribute` is configured, and the corresponding attribute is found on the ingested data, the value of this attribute is used. +2. If a prior component in the collector pipeline sets the topic on the context via the `topic.WithTopic` function (from the `github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic` package), the value set in the context is used. +3. Finally, the `topic` configuration is used as a default/fallback destination. diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/config.go index d048b27a88f..aac2cf77c09 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/config.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/config.go @@ -17,9 +17,9 @@ import ( // Config defines configuration for Kafka exporter. type Config struct { - exporterhelper.TimeoutSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. - exporterhelper.QueueSettings `mapstructure:"sending_queue"` - configretry.BackOffConfig `mapstructure:"retry_on_failure"` + TimeoutSettings exporterhelper.TimeoutConfig `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. + QueueSettings exporterhelper.QueueConfig `mapstructure:"sending_queue"` + configretry.BackOffConfig `mapstructure:"retry_on_failure"` // The list of kafka brokers (default localhost:9092) Brokers []string `mapstructure:"brokers"` @@ -53,6 +53,8 @@ type Config struct { PartitionMetricsByResourceAttributes bool `mapstructure:"partition_metrics_by_resource_attributes"` + PartitionLogsByResourceAttributes bool `mapstructure:"partition_logs_by_resource_attributes"` + // Metadata is the namespace for metadata management properties used by the // Client, and shared by the Producer/Consumer. Metadata Metadata `mapstructure:"metadata"` @@ -142,10 +144,10 @@ func validateSASLConfig(c *kafka.SASLConfig) error { } switch c.Mechanism { - case "PLAIN", "AWS_MSK_IAM", "SCRAM-SHA-256", "SCRAM-SHA-512": + case "PLAIN", "AWS_MSK_IAM", "AWS_MSK_IAM_OAUTHBEARER", "SCRAM-SHA-256", "SCRAM-SHA-512": // Do nothing, valid mechanism default: - return fmt.Errorf("auth.sasl.mechanism should be one of 'PLAIN', 'AWS_MSK_IAM', 'SCRAM-SHA-256' or 'SCRAM-SHA-512'. configured value %v", c.Mechanism) + return fmt.Errorf("auth.sasl.mechanism should be one of 'PLAIN', 'AWS_MSK_IAM', 'AWS_MSK_IAM_OAUTHBEARER', 'SCRAM-SHA-256' or 'SCRAM-SHA-512'. configured value %v", c.Mechanism) } if c.Version < 0 || c.Version > 1 { diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/factory.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/factory.go index d990a17dab8..eb366d7cfa8 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/factory.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/factory.go @@ -40,45 +40,16 @@ const ( defaultFluxMaxMessages = 0 // partitioning metrics by resource attributes is disabled by default defaultPartitionMetricsByResourceAttributesEnabled = false + // partitioning logs by resource attributes is disabled by default + defaultPartitionLogsByResourceAttributesEnabled = false ) // FactoryOption applies changes to kafkaExporterFactory. type FactoryOption func(factory *kafkaExporterFactory) -// withTracesMarshalers adds tracesMarshalers. -func withTracesMarshalers(tracesMarshalers ...TracesMarshaler) FactoryOption { - return func(factory *kafkaExporterFactory) { - for _, marshaler := range tracesMarshalers { - factory.tracesMarshalers[marshaler.Encoding()] = marshaler - } - } -} - -// withMetricsMarshalers adds additional metric marshalers to the exporter factory. -func withMetricsMarshalers(metricMarshalers ...MetricsMarshaler) FactoryOption { - return func(factory *kafkaExporterFactory) { - for _, marshaler := range metricMarshalers { - factory.metricsMarshalers[marshaler.Encoding()] = marshaler - } - } -} - -// withLogsMarshalers adds additional log marshalers to the exporter factory. -func withLogsMarshalers(logsMarshalers ...LogsMarshaler) FactoryOption { - return func(factory *kafkaExporterFactory) { - for _, marshaler := range logsMarshalers { - factory.logsMarshalers[marshaler.Encoding()] = marshaler - } - } -} - // NewFactory creates Kafka exporter factory. func NewFactory(options ...FactoryOption) exporter.Factory { - f := &kafkaExporterFactory{ - tracesMarshalers: tracesMarshalers(), - metricsMarshalers: metricsMarshalers(), - logsMarshalers: logsMarshalers(), - } + f := &kafkaExporterFactory{} for _, o := range options { o(f) } @@ -93,15 +64,16 @@ func NewFactory(options ...FactoryOption) exporter.Factory { func createDefaultConfig() component.Config { return &Config{ - TimeoutSettings: exporterhelper.NewDefaultTimeoutSettings(), + TimeoutSettings: exporterhelper.NewDefaultTimeoutConfig(), BackOffConfig: configretry.NewDefaultBackOffConfig(), - QueueSettings: exporterhelper.NewDefaultQueueSettings(), + QueueSettings: exporterhelper.NewDefaultQueueConfig(), Brokers: []string{defaultBroker}, ClientID: defaultClientID, // using an empty topic to track when it has not been set by user, default is based on traces or metrics. Topic: "", Encoding: defaultEncoding, PartitionMetricsByResourceAttributes: defaultPartitionMetricsByResourceAttributesEnabled, + PartitionLogsByResourceAttributes: defaultPartitionLogsByResourceAttributesEnabled, Metadata: Metadata{ Full: defaultMetadataFull, Retry: MetadataRetry{ @@ -118,15 +90,11 @@ func createDefaultConfig() component.Config { } } -type kafkaExporterFactory struct { - tracesMarshalers map[string]TracesMarshaler - metricsMarshalers map[string]MetricsMarshaler - logsMarshalers map[string]LogsMarshaler -} +type kafkaExporterFactory struct{} func (f *kafkaExporterFactory) createTracesExporter( ctx context.Context, - set exporter.CreateSettings, + set exporter.Settings, cfg component.Config, ) (exporter.Traces, error) { oCfg := *(cfg.(*Config)) // Clone the config @@ -136,11 +104,8 @@ func (f *kafkaExporterFactory) createTracesExporter( if oCfg.Encoding == "otlp_json" { set.Logger.Info("otlp_json is considered experimental and should not be used in a production environment") } - exp, err := newTracesExporter(oCfg, set, f.tracesMarshalers) - if err != nil { - return nil, err - } - return exporterhelper.NewTracesExporter( + exp := newTracesExporter(oCfg, set) + return exporterhelper.NewTraces( ctx, set, &oCfg, @@ -148,7 +113,7 @@ func (f *kafkaExporterFactory) createTracesExporter( exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), // Disable exporterhelper Timeout, because we cannot pass a Context to the Producer, // and will rely on the sarama Producer Timeout logic. - exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}), + exporterhelper.WithTimeout(exporterhelper.TimeoutConfig{Timeout: 0}), exporterhelper.WithRetry(oCfg.BackOffConfig), exporterhelper.WithQueue(oCfg.QueueSettings), exporterhelper.WithStart(exp.start), @@ -157,7 +122,7 @@ func (f *kafkaExporterFactory) createTracesExporter( func (f *kafkaExporterFactory) createMetricsExporter( ctx context.Context, - set exporter.CreateSettings, + set exporter.Settings, cfg component.Config, ) (exporter.Metrics, error) { oCfg := *(cfg.(*Config)) // Clone the config @@ -167,11 +132,8 @@ func (f *kafkaExporterFactory) createMetricsExporter( if oCfg.Encoding == "otlp_json" { set.Logger.Info("otlp_json is considered experimental and should not be used in a production environment") } - exp, err := newMetricsExporter(oCfg, set, f.metricsMarshalers) - if err != nil { - return nil, err - } - return exporterhelper.NewMetricsExporter( + exp := newMetricsExporter(oCfg, set) + return exporterhelper.NewMetrics( ctx, set, &oCfg, @@ -179,7 +141,7 @@ func (f *kafkaExporterFactory) createMetricsExporter( exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), // Disable exporterhelper Timeout, because we cannot pass a Context to the Producer, // and will rely on the sarama Producer Timeout logic. - exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}), + exporterhelper.WithTimeout(exporterhelper.TimeoutConfig{Timeout: 0}), exporterhelper.WithRetry(oCfg.BackOffConfig), exporterhelper.WithQueue(oCfg.QueueSettings), exporterhelper.WithStart(exp.start), @@ -188,7 +150,7 @@ func (f *kafkaExporterFactory) createMetricsExporter( func (f *kafkaExporterFactory) createLogsExporter( ctx context.Context, - set exporter.CreateSettings, + set exporter.Settings, cfg component.Config, ) (exporter.Logs, error) { oCfg := *(cfg.(*Config)) // Clone the config @@ -198,11 +160,8 @@ func (f *kafkaExporterFactory) createLogsExporter( if oCfg.Encoding == "otlp_json" { set.Logger.Info("otlp_json is considered experimental and should not be used in a production environment") } - exp, err := newLogsExporter(oCfg, set, f.logsMarshalers) - if err != nil { - return nil, err - } - return exporterhelper.NewLogsExporter( + exp := newLogsExporter(oCfg, set) + return exporterhelper.NewLogs( ctx, set, &oCfg, @@ -210,7 +169,7 @@ func (f *kafkaExporterFactory) createLogsExporter( exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), // Disable exporterhelper Timeout, because we cannot pass a Context to the Producer, // and will rely on the sarama Producer Timeout logic. - exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}), + exporterhelper.WithTimeout(exporterhelper.TimeoutConfig{Timeout: 0}), exporterhelper.WithRetry(oCfg.BackOffConfig), exporterhelper.WithQueue(oCfg.QueueSettings), exporterhelper.WithStart(exp.start), diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/internal/metadata/generated_status.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/internal/metadata/generated_status.go index 63e9bce921a..e139063fd64 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/internal/metadata/generated_status.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/internal/metadata/generated_status.go @@ -7,7 +7,8 @@ import ( ) var ( - Type = component.MustNewType("kafka") + Type = component.MustNewType("kafka") + ScopeName = "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter" ) const ( diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/internal/metadata/generated_telemetry.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/internal/metadata/generated_telemetry.go deleted file mode 100644 index 5f4488d9269..00000000000 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/internal/metadata/generated_telemetry.go +++ /dev/null @@ -1,17 +0,0 @@ -// Code generated by mdatagen. DO NOT EDIT. - -package metadata - -import ( - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/trace" -) - -func Meter(settings component.TelemetrySettings) metric.Meter { - return settings.MeterProvider.Meter("otelcol/kafka") -} - -func Tracer(settings component.TelemetrySettings) trace.Tracer { - return settings.TracerProvider.Tracer("otelcol/kafka") -} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/jaeger_marshaler.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/jaeger_marshaler.go index abc73c22f18..50640c83763 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/jaeger_marshaler.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/jaeger_marshaler.go @@ -22,10 +22,7 @@ type jaegerMarshaler struct { var _ TracesMarshaler = (*jaegerMarshaler)(nil) func (j jaegerMarshaler) Marshal(traces ptrace.Traces, topic string) ([]*sarama.ProducerMessage, error) { - batches, err := jaeger.ProtoFromTraces(traces) - if err != nil { - return nil, err - } + batches := jaeger.ProtoFromTraces(traces) var messages []*sarama.ProducerMessage var errs error @@ -58,8 +55,7 @@ type jaegerSpanMarshaler interface { encoding() string } -type jaegerProtoSpanMarshaler struct { -} +type jaegerProtoSpanMarshaler struct{} var _ jaegerSpanMarshaler = (*jaegerProtoSpanMarshaler)(nil) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/kafka_exporter.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/kafka_exporter.go index 59fc26e647b..6720237fabe 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/kafka_exporter.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/kafka_exporter.go @@ -19,6 +19,7 @@ import ( "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic" ) var errUnrecognizedEncoding = fmt.Errorf("unrecognized encoding") @@ -40,8 +41,8 @@ func (ke kafkaErrors) Error() string { return fmt.Sprintf("Failed to deliver %d messages due to %s", ke.count, ke.err) } -func (e *kafkaTracesProducer) tracesPusher(_ context.Context, td ptrace.Traces) error { - messages, err := e.marshaler.Marshal(td, getTopic(&e.cfg, td.ResourceSpans())) +func (e *kafkaTracesProducer) tracesPusher(ctx context.Context, td ptrace.Traces) error { + messages, err := e.marshaler.Marshal(td, getTopic(ctx, &e.cfg, td.ResourceSpans())) if err != nil { return consumererror.NewPermanent(err) } @@ -65,8 +66,24 @@ func (e *kafkaTracesProducer) Close(context.Context) error { return e.producer.Close() } -func (e *kafkaTracesProducer) start(_ context.Context, _ component.Host) error { - producer, err := newSaramaProducer(e.cfg) +func (e *kafkaTracesProducer) start(ctx context.Context, host component.Host) error { + // extensions take precedence over internal encodings + if marshaler, errExt := loadEncodingExtension[ptrace.Marshaler]( + host, + e.cfg.Encoding, + ); errExt == nil { + e.marshaler = &tracesEncodingMarshaler{ + marshaler: *marshaler, + encoding: e.cfg.Encoding, + } + } + if marshaler, errInt := createTracesMarshaler(e.cfg); e.marshaler == nil && errInt == nil { + e.marshaler = marshaler + } + if e.marshaler == nil { + return errUnrecognizedEncoding + } + producer, err := newSaramaProducer(ctx, e.cfg) if err != nil { return err } @@ -82,8 +99,8 @@ type kafkaMetricsProducer struct { logger *zap.Logger } -func (e *kafkaMetricsProducer) metricsDataPusher(_ context.Context, md pmetric.Metrics) error { - messages, err := e.marshaler.Marshal(md, getTopic(&e.cfg, md.ResourceMetrics())) +func (e *kafkaMetricsProducer) metricsDataPusher(ctx context.Context, md pmetric.Metrics) error { + messages, err := e.marshaler.Marshal(md, getTopic(ctx, &e.cfg, md.ResourceMetrics())) if err != nil { return consumererror.NewPermanent(err) } @@ -107,8 +124,24 @@ func (e *kafkaMetricsProducer) Close(context.Context) error { return e.producer.Close() } -func (e *kafkaMetricsProducer) start(_ context.Context, _ component.Host) error { - producer, err := newSaramaProducer(e.cfg) +func (e *kafkaMetricsProducer) start(ctx context.Context, host component.Host) error { + // extensions take precedence over internal encodings + if marshaler, errExt := loadEncodingExtension[pmetric.Marshaler]( + host, + e.cfg.Encoding, + ); errExt == nil { + e.marshaler = &metricsEncodingMarshaler{ + marshaler: *marshaler, + encoding: e.cfg.Encoding, + } + } + if marshaler, errInt := createMetricMarshaler(e.cfg); e.marshaler == nil && errInt == nil { + e.marshaler = marshaler + } + if e.marshaler == nil { + return errUnrecognizedEncoding + } + producer, err := newSaramaProducer(ctx, e.cfg) if err != nil { return err } @@ -124,8 +157,8 @@ type kafkaLogsProducer struct { logger *zap.Logger } -func (e *kafkaLogsProducer) logsDataPusher(_ context.Context, ld plog.Logs) error { - messages, err := e.marshaler.Marshal(ld, getTopic(&e.cfg, ld.ResourceLogs())) +func (e *kafkaLogsProducer) logsDataPusher(ctx context.Context, ld plog.Logs) error { + messages, err := e.marshaler.Marshal(ld, getTopic(ctx, &e.cfg, ld.ResourceLogs())) if err != nil { return consumererror.NewPermanent(err) } @@ -149,8 +182,24 @@ func (e *kafkaLogsProducer) Close(context.Context) error { return e.producer.Close() } -func (e *kafkaLogsProducer) start(_ context.Context, _ component.Host) error { - producer, err := newSaramaProducer(e.cfg) +func (e *kafkaLogsProducer) start(ctx context.Context, host component.Host) error { + // extensions take precedence over internal encodings + if marshaler, errExt := loadEncodingExtension[plog.Marshaler]( + host, + e.cfg.Encoding, + ); errExt == nil { + e.marshaler = &logsEncodingMarshaler{ + marshaler: *marshaler, + encoding: e.cfg.Encoding, + } + } + if marshaler, errInt := createLogMarshaler(e.cfg); e.marshaler == nil && errInt == nil { + e.marshaler = marshaler + } + if e.marshaler == nil { + return errUnrecognizedEncoding + } + producer, err := newSaramaProducer(ctx, e.cfg) if err != nil { return err } @@ -158,7 +207,7 @@ func (e *kafkaLogsProducer) start(_ context.Context, _ component.Host) error { return nil } -func newSaramaProducer(config Config) (sarama.SyncProducer, error) { +func newSaramaProducer(ctx context.Context, config Config) (sarama.SyncProducer, error) { c := sarama.NewConfig() c.ClientID = config.ClientID @@ -168,7 +217,7 @@ func newSaramaProducer(config Config) (sarama.SyncProducer, error) { c.Producer.Return.Errors = true c.Producer.RequiredAcks = config.Producer.RequiredAcks // Because sarama does not accept a Context for every message, set the Timeout here. - c.Producer.Timeout = config.Timeout + c.Producer.Timeout = config.TimeoutSettings.Timeout c.Metadata.Full = config.Metadata.Full c.Metadata.Retry.Max = config.Metadata.Retry.Max c.Metadata.Retry.Backoff = config.Metadata.Retry.Backoff @@ -187,7 +236,7 @@ func newSaramaProducer(config Config) (sarama.SyncProducer, error) { c.Version = version } - if err := kafka.ConfigureAuthentication(config.Authentication, c); err != nil { + if err := kafka.ConfigureAuthentication(ctx, config.Authentication, c); err != nil { return nil, err } @@ -204,56 +253,26 @@ func newSaramaProducer(config Config) (sarama.SyncProducer, error) { return producer, nil } -func newMetricsExporter(config Config, set exporter.CreateSettings, marshalers map[string]MetricsMarshaler) (*kafkaMetricsProducer, error) { - marshaler := marshalers[config.Encoding] - if marshaler == nil { - return nil, errUnrecognizedEncoding - } - if config.PartitionMetricsByResourceAttributes { - if keyableMarshaler, ok := marshaler.(KeyableMetricsMarshaler); ok { - keyableMarshaler.Key() - } - } - +func newMetricsExporter(config Config, set exporter.Settings) *kafkaMetricsProducer { return &kafkaMetricsProducer{ - cfg: config, - marshaler: marshaler, - logger: set.Logger, - }, nil - + cfg: config, + logger: set.Logger, + } } // newTracesExporter creates Kafka exporter. -func newTracesExporter(config Config, set exporter.CreateSettings, marshalers map[string]TracesMarshaler) (*kafkaTracesProducer, error) { - marshaler := marshalers[config.Encoding] - if marshaler == nil { - return nil, errUnrecognizedEncoding - } - if config.PartitionTracesByID { - if keyableMarshaler, ok := marshaler.(KeyableTracesMarshaler); ok { - keyableMarshaler.Key() - } - } - +func newTracesExporter(config Config, set exporter.Settings) *kafkaTracesProducer { return &kafkaTracesProducer{ - cfg: config, - marshaler: marshaler, - logger: set.Logger, - }, nil -} - -func newLogsExporter(config Config, set exporter.CreateSettings, marshalers map[string]LogsMarshaler) (*kafkaLogsProducer, error) { - marshaler := marshalers[config.Encoding] - if marshaler == nil { - return nil, errUnrecognizedEncoding + cfg: config, + logger: set.Logger, } +} +func newLogsExporter(config Config, set exporter.Settings) *kafkaLogsProducer { return &kafkaLogsProducer{ - cfg: config, - marshaler: marshaler, - logger: set.Logger, - }, nil - + cfg: config, + logger: set.Logger, + } } type resourceSlice[T any] interface { @@ -265,15 +284,45 @@ type resource interface { Resource() pcommon.Resource } -func getTopic[T resource](cfg *Config, resources resourceSlice[T]) string { - if cfg.TopicFromAttribute == "" { - return cfg.Topic - } - for i := 0; i < resources.Len(); i++ { - rv, ok := resources.At(i).Resource().Attributes().Get(cfg.TopicFromAttribute) - if ok && rv.Str() != "" { - return rv.Str() +func getTopic[T resource](ctx context.Context, cfg *Config, resources resourceSlice[T]) string { + if cfg.TopicFromAttribute != "" { + for i := 0; i < resources.Len(); i++ { + rv, ok := resources.At(i).Resource().Attributes().Get(cfg.TopicFromAttribute) + if ok && rv.Str() != "" { + return rv.Str() + } } } + contextTopic, ok := topic.FromContext(ctx) + if ok { + return contextTopic + } return cfg.Topic } + +// loadEncodingExtension tries to load an available extension for the given encoding. +func loadEncodingExtension[T any](host component.Host, encoding string) (*T, error) { + extensionID, err := encodingToComponentID(encoding) + if err != nil { + return nil, err + } + encodingExtension, ok := host.GetExtensions()[*extensionID] + if !ok { + return nil, fmt.Errorf("unknown encoding extension %q", encoding) + } + unmarshaler, ok := encodingExtension.(T) + if !ok { + return nil, fmt.Errorf("extension %q is not an unmarshaler", encoding) + } + return &unmarshaler, nil +} + +// encodingToComponentID converts an encoding string to a component ID using the given encoding as type. +func encodingToComponentID(encoding string) (*component.ID, error) { + componentType, err := component.NewType(encoding) + if err != nil { + return nil, fmt.Errorf("invalid component type: %w", err) + } + id := component.NewID(componentType) + return &id, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/marshaler.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/marshaler.go index 4acb625407f..ba8f00ff5cf 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/marshaler.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/marshaler.go @@ -4,6 +4,8 @@ package kafkaexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter" import ( + "fmt" + "github.com/IBM/sarama" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" @@ -39,42 +41,129 @@ type LogsMarshaler interface { Encoding() string } -// tracesMarshalers returns map of supported encodings with TracesMarshaler. -func tracesMarshalers() map[string]TracesMarshaler { - otlpPb := newPdataTracesMarshaler(&ptrace.ProtoMarshaler{}, defaultEncoding) - otlpJSON := newPdataTracesMarshaler(&ptrace.JSONMarshaler{}, "otlp_json") - zipkinProto := newPdataTracesMarshaler(zipkinv2.NewProtobufTracesMarshaler(), "zipkin_proto") - zipkinJSON := newPdataTracesMarshaler(zipkinv2.NewJSONTracesMarshaler(), "zipkin_json") +// creates TracesMarshaler based on the provided config +func createTracesMarshaler(config Config) (TracesMarshaler, error) { + encoding := config.Encoding + partitionTracesByID := config.PartitionTracesByID + jaegerProto := jaegerMarshaler{marshaler: jaegerProtoSpanMarshaler{}} jaegerJSON := jaegerMarshaler{marshaler: newJaegerJSONMarshaler()} - return map[string]TracesMarshaler{ - otlpPb.Encoding(): otlpPb, - otlpJSON.Encoding(): otlpJSON, - zipkinProto.Encoding(): zipkinProto, - zipkinJSON.Encoding(): zipkinJSON, - jaegerProto.Encoding(): jaegerProto, - jaegerJSON.Encoding(): jaegerJSON, + + switch encoding { + case defaultEncoding: + return newPdataTracesMarshaler(&ptrace.ProtoMarshaler{}, defaultEncoding, partitionTracesByID), nil + case "otlp_json": + return newPdataTracesMarshaler(&ptrace.JSONMarshaler{}, "otlp_json", partitionTracesByID), nil + case "zipkin_proto": + return newPdataTracesMarshaler(zipkinv2.NewProtobufTracesMarshaler(), "zipkin_proto", partitionTracesByID), nil + case "zipkin_json": + return newPdataTracesMarshaler(zipkinv2.NewJSONTracesMarshaler(), "zipkin_json", partitionTracesByID), nil + case jaegerProtoSpanMarshaler{}.encoding(): + return jaegerProto, nil + case jaegerJSON.Encoding(): + return jaegerJSON, nil + default: + return nil, errUnrecognizedEncoding } } -// metricsMarshalers returns map of supported encodings and MetricsMarshaler -func metricsMarshalers() map[string]MetricsMarshaler { - otlpPb := newPdataMetricsMarshaler(&pmetric.ProtoMarshaler{}, defaultEncoding) - otlpJSON := newPdataMetricsMarshaler(&pmetric.JSONMarshaler{}, "otlp_json") - return map[string]MetricsMarshaler{ - otlpPb.Encoding(): otlpPb, - otlpJSON.Encoding(): otlpJSON, +// creates MetricsMarshaler based on the provided config +func createMetricMarshaler(config Config) (MetricsMarshaler, error) { + encoding := config.Encoding + partitionMetricsByResources := config.PartitionMetricsByResourceAttributes + switch encoding { + case defaultEncoding: + return newPdataMetricsMarshaler(&pmetric.ProtoMarshaler{}, defaultEncoding, partitionMetricsByResources), nil + case "otlp_json": + return newPdataMetricsMarshaler(&pmetric.JSONMarshaler{}, "otlp_json", partitionMetricsByResources), nil + default: + return nil, errUnrecognizedEncoding } } -// logsMarshalers returns map of supported encodings and LogsMarshaler -func logsMarshalers() map[string]LogsMarshaler { - otlpPb := newPdataLogsMarshaler(&plog.ProtoMarshaler{}, defaultEncoding) - otlpJSON := newPdataLogsMarshaler(&plog.JSONMarshaler{}, "otlp_json") +// creates LogsMarshalers based on the provided config +func createLogMarshaler(config Config) (LogsMarshaler, error) { + encoding := config.Encoding + partitionLogsByAttributes := config.PartitionLogsByResourceAttributes + raw := newRawMarshaler() - return map[string]LogsMarshaler{ - otlpPb.Encoding(): otlpPb, - otlpJSON.Encoding(): otlpJSON, - raw.Encoding(): raw, + switch encoding { + case defaultEncoding: + return newPdataLogsMarshaler(&plog.ProtoMarshaler{}, defaultEncoding, partitionLogsByAttributes), nil + case "otlp_json": + return newPdataLogsMarshaler(&plog.JSONMarshaler{}, "otlp_json", partitionLogsByAttributes), nil + case raw.Encoding(): + return raw, nil + default: + return nil, errUnrecognizedEncoding } } + +// tracesEncodingMarshaler is a wrapper around ptrace.Marshaler that implements TracesMarshaler. +type tracesEncodingMarshaler struct { + marshaler ptrace.Marshaler + encoding string +} + +func (t *tracesEncodingMarshaler) Marshal(traces ptrace.Traces, topic string) ([]*sarama.ProducerMessage, error) { + var messages []*sarama.ProducerMessage + data, err := t.marshaler.MarshalTraces(traces) + if err != nil { + return nil, fmt.Errorf("failed to marshal traces: %w", err) + } + messages = append(messages, &sarama.ProducerMessage{ + Topic: topic, + Value: sarama.ByteEncoder(data), + }) + return messages, nil +} + +func (t *tracesEncodingMarshaler) Encoding() string { + return t.encoding +} + +// metricsEncodingMarshaler is a wrapper around pmetric.Marshaler that implements MetricsMarshaler. +type metricsEncodingMarshaler struct { + marshaler pmetric.Marshaler + encoding string +} + +func (m *metricsEncodingMarshaler) Marshal(metrics pmetric.Metrics, topic string) ([]*sarama.ProducerMessage, error) { + var messages []*sarama.ProducerMessage + data, err := m.marshaler.MarshalMetrics(metrics) + if err != nil { + return nil, fmt.Errorf("failed to marshal metrics: %w", err) + } + messages = append(messages, &sarama.ProducerMessage{ + Topic: topic, + Value: sarama.ByteEncoder(data), + }) + return messages, nil +} + +func (m *metricsEncodingMarshaler) Encoding() string { + return m.encoding +} + +// logsEncodingMarshaler is a wrapper around plog.Marshaler that implements LogsMarshaler. +type logsEncodingMarshaler struct { + marshaler plog.Marshaler + encoding string +} + +func (l *logsEncodingMarshaler) Marshal(logs plog.Logs, topic string) ([]*sarama.ProducerMessage, error) { + var messages []*sarama.ProducerMessage + data, err := l.marshaler.MarshalLogs(logs) + if err != nil { + return nil, fmt.Errorf("failed to marshal logs: %w", err) + } + messages = append(messages, &sarama.ProducerMessage{ + Topic: topic, + Value: sarama.ByteEncoder(data), + }) + return messages, nil +} + +func (l *logsEncodingMarshaler) Encoding() string { + return l.encoding +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/metadata.yaml index 7c168398950..ea264ca5e48 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/metadata.yaml +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/metadata.yaml @@ -1,5 +1,4 @@ type: kafka -scope_name: otelcol/kafka status: class: exporter diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/pdata_marshaler.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/pdata_marshaler.go index 3429cdd8316..ae9726f2cbe 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/pdata_marshaler.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/pdata_marshaler.go @@ -15,60 +15,72 @@ import ( ) type pdataLogsMarshaler struct { - marshaler plog.Marshaler - encoding string + marshaler plog.Marshaler + encoding string + partitionedByResources bool } func (p pdataLogsMarshaler) Marshal(ld plog.Logs, topic string) ([]*sarama.ProducerMessage, error) { - bts, err := p.marshaler.MarshalLogs(ld) - if err != nil { - return nil, err - } - return []*sarama.ProducerMessage{ - { + var msgs []*sarama.ProducerMessage + if p.partitionedByResources { + logs := ld.ResourceLogs() + + for i := 0; i < logs.Len(); i++ { + resourceMetrics := logs.At(i) + hash := pdatautil.MapHash(resourceMetrics.Resource().Attributes()) + + newLogs := plog.NewLogs() + resourceMetrics.CopyTo(newLogs.ResourceLogs().AppendEmpty()) + + bts, err := p.marshaler.MarshalLogs(newLogs) + if err != nil { + return nil, err + } + msgs = append(msgs, &sarama.ProducerMessage{ + Topic: topic, + Value: sarama.ByteEncoder(bts), + Key: sarama.ByteEncoder(hash[:]), + }) + } + } else { + bts, err := p.marshaler.MarshalLogs(ld) + if err != nil { + return nil, err + } + msgs = append(msgs, &sarama.ProducerMessage{ Topic: topic, Value: sarama.ByteEncoder(bts), - }, - }, nil + }) + } + return msgs, nil } func (p pdataLogsMarshaler) Encoding() string { return p.encoding } -func newPdataLogsMarshaler(marshaler plog.Marshaler, encoding string) LogsMarshaler { +func newPdataLogsMarshaler(marshaler plog.Marshaler, encoding string, partitionedByResources bool) LogsMarshaler { return pdataLogsMarshaler{ - marshaler: marshaler, - encoding: encoding, + marshaler: marshaler, + encoding: encoding, + partitionedByResources: partitionedByResources, } } -// KeyableMetricsMarshaler is an extension of the MetricsMarshaler interface intended to provide partition key capabilities -// for metrics messages -type KeyableMetricsMarshaler interface { - MetricsMarshaler - Key() -} - type pdataMetricsMarshaler struct { - marshaler pmetric.Marshaler - encoding string - keyed bool -} - -// Key configures the pdataMetricsMarshaler to set the message key on the kafka messages -func (p *pdataMetricsMarshaler) Key() { - p.keyed = true + marshaler pmetric.Marshaler + encoding string + partitionedByResources bool } func (p pdataMetricsMarshaler) Marshal(ld pmetric.Metrics, topic string) ([]*sarama.ProducerMessage, error) { var msgs []*sarama.ProducerMessage - if p.keyed { + if p.partitionedByResources { metrics := ld.ResourceMetrics() for i := 0; i < metrics.Len(); i++ { resourceMetrics := metrics.At(i) - var hash = pdatautil.MapHash(resourceMetrics.Resource().Attributes()) + hash := pdatautil.MapHash(resourceMetrics.Resource().Attributes()) newMetrics := pmetric.NewMetrics() resourceMetrics.CopyTo(newMetrics.ResourceMetrics().AppendEmpty()) @@ -101,29 +113,23 @@ func (p pdataMetricsMarshaler) Encoding() string { return p.encoding } -func newPdataMetricsMarshaler(marshaler pmetric.Marshaler, encoding string) MetricsMarshaler { +func newPdataMetricsMarshaler(marshaler pmetric.Marshaler, encoding string, partitionedByResources bool) MetricsMarshaler { return &pdataMetricsMarshaler{ - marshaler: marshaler, - encoding: encoding, + marshaler: marshaler, + encoding: encoding, + partitionedByResources: partitionedByResources, } } -// KeyableTracesMarshaler is an extension of the TracesMarshaler interface intended to provide partition key capabilities -// for trace messages -type KeyableTracesMarshaler interface { - TracesMarshaler - Key() -} - type pdataTracesMarshaler struct { - marshaler ptrace.Marshaler - encoding string - keyed bool + marshaler ptrace.Marshaler + encoding string + partitionedByTraceID bool } func (p *pdataTracesMarshaler) Marshal(td ptrace.Traces, topic string) ([]*sarama.ProducerMessage, error) { var msgs []*sarama.ProducerMessage - if p.keyed { + if p.partitionedByTraceID { for _, trace := range batchpersignal.SplitTraces(td) { bts, err := p.marshaler.MarshalTraces(trace) if err != nil { @@ -134,7 +140,6 @@ func (p *pdataTracesMarshaler) Marshal(td ptrace.Traces, topic string) ([]*saram Value: sarama.ByteEncoder(bts), Key: sarama.ByteEncoder(traceutil.TraceIDToHexOrEmptyString(trace.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).TraceID())), }) - } } else { bts, err := p.marshaler.MarshalTraces(td) @@ -154,14 +159,10 @@ func (p *pdataTracesMarshaler) Encoding() string { return p.encoding } -// Key configures the pdataTracesMarshaler to set the message key on the kafka messages -func (p *pdataTracesMarshaler) Key() { - p.keyed = true -} - -func newPdataTracesMarshaler(marshaler ptrace.Marshaler, encoding string) TracesMarshaler { +func newPdataTracesMarshaler(marshaler ptrace.Marshaler, encoding string, partitionedByTraceID bool) TracesMarshaler { return &pdataTracesMarshaler{ - marshaler: marshaler, - encoding: encoding, + marshaler: marshaler, + encoding: encoding, + partitionedByTraceID: partitionedByTraceID, } } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/raw_marshaler.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/raw_marshaler.go index 166be57f3e0..5a9e283436e 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/raw_marshaler.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/raw_marshaler.go @@ -14,8 +14,7 @@ import ( var errUnsupported = errors.New("unsupported serialization") -type rawMarshaler struct { -} +type rawMarshaler struct{} func newRawMarshaler() rawMarshaler { return rawMarshaler{} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/README.md index 97b53cd0172..ce07fcf541b 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/README.md +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/README.md @@ -8,7 +8,7 @@ | Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Aexporter%2Fzipkin%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Aexporter%2Fzipkin) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Aexporter%2Fzipkin%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Aexporter%2Fzipkin) | | [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@MovieStoreGuy](https://www.github.com/MovieStoreGuy), [@andrzej-stencel](https://www.github.com/andrzej-stencel), [@crobert-1](https://www.github.com/crobert-1) | -[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta +[beta]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#beta [core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol [contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/config.go index 5f18554fa17..a4202794c38 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/config.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/config.go @@ -14,8 +14,8 @@ import ( // Config defines configuration settings for the Zipkin exporter. type Config struct { - exporterhelper.QueueSettings `mapstructure:"sending_queue"` - configretry.BackOffConfig `mapstructure:"retry_on_failure"` + QueueSettings exporterhelper.QueueConfig `mapstructure:"sending_queue"` + configretry.BackOffConfig `mapstructure:"retry_on_failure"` // Configures the exporter client. // The Endpoint to send the Zipkin trace data to (e.g.: http://some.url:9411/api/v2/spans). diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/factory.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/factory.go index d3d7f423f22..b853cbcb310 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/factory.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/factory.go @@ -38,7 +38,7 @@ func createDefaultConfig() component.Config { defaultClientHTTPSettings.WriteBufferSize = 512 * 1024 return &Config{ BackOffConfig: configretry.NewDefaultBackOffConfig(), - QueueSettings: exporterhelper.NewDefaultQueueSettings(), + QueueSettings: exporterhelper.NewDefaultQueueConfig(), ClientConfig: defaultClientHTTPSettings, Format: defaultFormat, DefaultServiceName: defaultServiceName, @@ -47,7 +47,7 @@ func createDefaultConfig() component.Config { func createTracesExporter( ctx context.Context, - set exporter.CreateSettings, + set exporter.Settings, cfg component.Config, ) (exporter.Traces, error) { zc := cfg.(*Config) @@ -56,14 +56,14 @@ func createTracesExporter( if err != nil { return nil, err } - return exporterhelper.NewTracesExporter( + return exporterhelper.NewTraces( ctx, set, cfg, ze.pushTraces, exporterhelper.WithStart(ze.start), // explicitly disable since we rely on http.Client timeout logic. - exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}), + exporterhelper.WithTimeout(exporterhelper.TimeoutConfig{Timeout: 0}), exporterhelper.WithQueue(zc.QueueSettings), exporterhelper.WithRetry(zc.BackOffConfig)) } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/internal/metadata/generated_status.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/internal/metadata/generated_status.go index 9c236a37f60..ee966007d28 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/internal/metadata/generated_status.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/internal/metadata/generated_status.go @@ -7,7 +7,8 @@ import ( ) var ( - Type = component.MustNewType("zipkin") + Type = component.MustNewType("zipkin") + ScopeName = "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter" ) const ( diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/internal/metadata/generated_telemetry.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/internal/metadata/generated_telemetry.go deleted file mode 100644 index 8a3acfd214e..00000000000 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/internal/metadata/generated_telemetry.go +++ /dev/null @@ -1,17 +0,0 @@ -// Code generated by mdatagen. DO NOT EDIT. - -package metadata - -import ( - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/trace" -) - -func Meter(settings component.TelemetrySettings) metric.Meter { - return settings.MeterProvider.Meter("otelcol/zipkin") -} - -func Tracer(settings component.TelemetrySettings) trace.Tracer { - return settings.TracerProvider.Tracer("otelcol/zipkin") -} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/metadata.yaml index 61a9ba824a7..2d12aa880db 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/metadata.yaml +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/metadata.yaml @@ -1,5 +1,4 @@ type: zipkin -scope_name: otelcol/zipkin status: class: exporter diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/zipkin.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/zipkin.go index 23e45bb1352..825a00277a6 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/zipkin.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/zipkin.go @@ -74,7 +74,7 @@ func (ze *zipkinExporter) pushTraces(ctx context.Context, td ptrace.Traces) erro return consumererror.NewPermanent(fmt.Errorf("failed to push trace data via Zipkin exporter: %w", err)) } - req, err := http.NewRequestWithContext(ctx, "POST", ze.url, bytes.NewReader(body)) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, ze.url, bytes.NewReader(body)) if err != nil { return fmt.Errorf("failed to push trace data via Zipkin exporter: %w", err) } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/localhostgate/featuregate.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/localhostgate/featuregate.go deleted file mode 100644 index 238d909f0b6..00000000000 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/localhostgate/featuregate.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// package localhostgate defines a feature gate that controls whether server-like receivers and extensions use localhost as the default host for their endpoints. -// This package is duplicated across core and contrib to avoid exposing the feature gate as part of the public API. -// To do this we define a `registerOrLoad` helper and try to register the gate in both modules. -// IMPORTANT NOTE: ANY CHANGES TO THIS PACKAGE MUST BE MIRRORED IN THE CORE COUNTERPART. -// See https://github.com/open-telemetry/opentelemetry-collector/blob/main/internal/localhostgate/featuregate.go -package localhostgate // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/localhostgate" - -import ( - "errors" - "fmt" - - "go.opentelemetry.io/collector/featuregate" - "go.uber.org/zap" -) - -const UseLocalHostAsDefaultHostID = "component.UseLocalHostAsDefaultHost" - -// UseLocalHostAsDefaultHostfeatureGate is the feature gate that controls whether -// server-like receivers and extensions such as the OTLP receiver use localhost as the default host for their endpoints. -var UseLocalHostAsDefaultHostfeatureGate = mustRegisterOrLoad( - featuregate.GlobalRegistry(), - UseLocalHostAsDefaultHostID, - featuregate.StageAlpha, - featuregate.WithRegisterDescription("controls whether server-like receivers and extensions such as the OTLP receiver use localhost as the default host for their endpoints"), -) - -// mustRegisterOrLoad tries to register the feature gate and loads it if it already exists. -// It panics on any other error. -func mustRegisterOrLoad(reg *featuregate.Registry, id string, stage featuregate.Stage, opts ...featuregate.RegisterOption) *featuregate.Gate { - gate, err := reg.Register(id, stage, opts...) - - if errors.Is(err, featuregate.ErrAlreadyRegistered) { - // Gate is already registered; find it. - // Only a handful of feature gates are registered, so it's fine to iterate over all of them. - reg.VisitAll(func(g *featuregate.Gate) { - if g.ID() == id { - gate = g - return - } - }) - } else if err != nil { - panic(err) - } - - return gate -} - -// EndpointForPort gets the endpoint for a given port using localhost or 0.0.0.0 depending on the feature gate. -func EndpointForPort(port int) string { - host := "localhost" - if !UseLocalHostAsDefaultHostfeatureGate.IsEnabled() { - host = "0.0.0.0" - } - return fmt.Sprintf("%s:%d", host, port) -} - -// LogAboutUseLocalHostAsDefault logs about the upcoming change from 0.0.0.0 to localhost on server-like components. -func LogAboutUseLocalHostAsDefault(logger *zap.Logger) { - if !UseLocalHostAsDefaultHostfeatureGate.IsEnabled() { - logger.Warn( - "The default endpoints for all servers in components will change to use localhost instead of 0.0.0.0 in a future version. Use the feature gate to preview the new default.", - zap.String("feature gate ID", UseLocalHostAsDefaultHostID), - ) - } -} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/parseutils/parser.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/parseutils/parser.go index 2758161ec56..9c9df67d19c 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/parseutils/parser.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/parseutils/parser.go @@ -18,6 +18,7 @@ func SplitString(input, delimiter string) ([]string, error) { current := "" delimiterLength := len(delimiter) quoteChar := "" // "" means we are not in quotes + escaped := false for i := 0; i < len(input); i++ { if quoteChar == "" && i+delimiterLength <= len(input) && input[i:i+delimiterLength] == delimiter { // delimiter @@ -31,13 +32,19 @@ func SplitString(input, delimiter string) ([]string, error) { continue } - if quoteChar == "" && (input[i] == '"' || input[i] == '\'') { // start of quote - quoteChar = string(input[i]) - continue - } - if string(input[i]) == quoteChar { // end of quote - quoteChar = "" - continue + if !escaped { // consider quote termination so long as previous character wasn't backslash + if quoteChar == "" && (input[i] == '"' || input[i] == '\'') { // start of quote + quoteChar = string(input[i]) + continue + } + if string(input[i]) == quoteChar { // end of quote + quoteChar = "" + continue + } + // Only if we weren't escaped could the next character result in escaped state + escaped = input[i] == '\\' // potentially escaping next character + } else { + escaped = false } current += string(input[i]) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/parseutils/uri.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/parseutils/uri.go new file mode 100644 index 00000000000..cd154b9d36c --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/parseutils/uri.go @@ -0,0 +1,164 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package parseutils // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/parseutils" + +import ( + "net/url" + "strconv" + "strings" + + semconv "go.opentelemetry.io/collector/semconv/v1.27.0" +) + +const ( + // replace once conventions includes these + AttributeURLUserInfo = "url.user_info" + AttributeURLUsername = "url.username" + AttributeURLPassword = "url.password" +) + +// parseURI takes an absolute or relative uri and returns the parsed values. +func ParseURI(value string, semconvCompliant bool) (map[string]any, error) { + m := make(map[string]any) + + if strings.HasPrefix(value, "?") { + // remove the query string '?' prefix before parsing + v, err := url.ParseQuery(value[1:]) + if err != nil { + return nil, err + } + return queryToMap(v, m), nil + } + + var x *url.URL + var err error + var mappingFn func(*url.URL, map[string]any) (map[string]any, error) + + if semconvCompliant { + mappingFn = urlToSemconvMap + x, err = url.Parse(value) + if err != nil { + return nil, err + } + } else { + x, err = url.ParseRequestURI(value) + if err != nil { + return nil, err + } + + mappingFn = urlToMap + } + return mappingFn(x, m) +} + +// urlToMap converts a url.URL to a map, excludes any values that are not set. +func urlToSemconvMap(parsedURI *url.URL, m map[string]any) (map[string]any, error) { + m[semconv.AttributeURLOriginal] = parsedURI.String() + m[semconv.AttributeURLDomain] = parsedURI.Hostname() + m[semconv.AttributeURLScheme] = parsedURI.Scheme + m[semconv.AttributeURLPath] = parsedURI.Path + + if portString := parsedURI.Port(); len(portString) > 0 { + port, err := strconv.Atoi(portString) + if err != nil { + return nil, err + } + m[semconv.AttributeURLPort] = port + } + + if fragment := parsedURI.Fragment; len(fragment) > 0 { + m[semconv.AttributeURLFragment] = fragment + } + + if parsedURI.User != nil { + m[AttributeURLUserInfo] = parsedURI.User.String() + + if username := parsedURI.User.Username(); len(username) > 0 { + m[AttributeURLUsername] = username + } + + if pwd, isSet := parsedURI.User.Password(); isSet { + m[AttributeURLPassword] = pwd + } + } + + if query := parsedURI.RawQuery; len(query) > 0 { + m[semconv.AttributeURLQuery] = query + } + + if periodIdx := strings.LastIndex(parsedURI.Path, "."); periodIdx != -1 { + if periodIdx < len(parsedURI.Path)-1 { + m[semconv.AttributeURLExtension] = parsedURI.Path[periodIdx+1:] + } + } + + return m, nil +} + +// urlToMap converts a url.URL to a map, excludes any values that are not set. +func urlToMap(p *url.URL, m map[string]any) (map[string]any, error) { + scheme := p.Scheme + if scheme != "" { + m["scheme"] = scheme + } + + user := p.User.Username() + if user != "" { + m["user"] = user + } + + host := p.Hostname() + if host != "" { + m["host"] = host + } + + port := p.Port() + if port != "" { + m["port"] = port + } + + path := p.EscapedPath() + if path != "" { + m["path"] = path + } + + return queryToMap(p.Query(), m), nil +} + +// queryToMap converts a query string url.Values to a map. +func queryToMap(query url.Values, m map[string]any) map[string]any { + // no-op if query is empty, do not create the key m["query"] + if len(query) == 0 { + return m + } + + /* 'parameter' will represent url.Values + map[string]any{ + "parameter-a": []any{ + "a", + "b", + }, + "parameter-b": []any{ + "x", + "y", + }, + } + */ + parameters := map[string]any{} + for param, values := range query { + parameters[param] = queryParamValuesToMap(values) + } + m["query"] = parameters + return m +} + +// queryParamValuesToMap takes query string parameter values and +// returns an []interface populated with the values +func queryParamValuesToMap(values []string) []any { + v := make([]any, len(values)) + for i, value := range values { + v[i] = value + } + return v +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/textutils/encoding.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/textutils/encoding.go index 209c2cbc775..512b6a6f7da 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/textutils/encoding.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/textutils/encoding.go @@ -74,7 +74,7 @@ var encodingOverrides = map[string]encoding.Encoding{ } func lookupEncoding(enc string) (encoding.Encoding, error) { - if e, ok := encodingOverrides[strings.ToLower(enc)]; ok { + if e, ok := EncodingOverridesMap.Get(strings.ToLower(enc)); ok { return e, nil } e, err := ianaindex.IANA.Encoding(enc) @@ -94,3 +94,12 @@ func IsNop(enc string) bool { } return e == encoding.Nop } + +var EncodingOverridesMap = encodingOverridesMap{} + +type encodingOverridesMap struct{} + +func (e *encodingOverridesMap) Get(key string) (encoding.Encoding, bool) { + v, ok := encodingOverrides[key] + return v, ok +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/timeutils/internal/ctimefmt/ctimefmt.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/timeutils/internal/ctimefmt/ctimefmt.go index 52170879a57..adb0c5dded5 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/timeutils/internal/ctimefmt/ctimefmt.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/timeutils/internal/ctimefmt/ctimefmt.go @@ -16,8 +16,11 @@ import ( "time" ) -var ctimeRegexp = regexp.MustCompile(`%.`) -var decimalsRegexp = regexp.MustCompile(`\d`) +var ( + ctimeRegexp = regexp.MustCompile(`%.`) + invalidFractionalSecondsStrptime = regexp.MustCompile(`[^.,]%[Lfs]`) + decimalsRegexp = regexp.MustCompile(`\d`) +) var ctimeSubstitutes = map[string]string{ "%Y": "2006", @@ -121,10 +124,6 @@ func Parse(format, value string) (time.Time, error) { // ToNative converts ctime-like format string to Go native layout // (which is used by time.Time.Format() and time.Parse() functions). func ToNative(format string) (string, error) { - if match := decimalsRegexp.FindString(format); match != "" { - return "", errors.New("format string should not contain decimals") - } - var errs []error replaceFunc := func(directive string) string { if subst, ok := ctimeSubstitutes[directive]; ok { @@ -141,3 +140,26 @@ func ToNative(format string) (string, error) { return replaced, nil } + +func Validate(format string) error { + if match := decimalsRegexp.FindString(format); match != "" { + return errors.New("format string should not contain decimals") + } + + if match := invalidFractionalSecondsStrptime.FindString(format); match != "" { + return fmt.Errorf("invalid fractional seconds directive: '%s'. must be preceded with '.' or ','", match) + } + + directives := ctimeRegexp.FindAllString(format, -1) + + var errs []error + for _, directive := range directives { + if _, ok := ctimeSubstitutes[directive]; !ok { + errs = append(errs, errors.New("unsupported ctimefmt.ToNative() directive: "+directive)) + } + } + if len(errs) != 0 { + return fmt.Errorf("invalid strptime format: %v", errs) + } + return nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/timeutils/parser.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/timeutils/parser.go index 8af480096f2..82c5523292e 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/timeutils/parser.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/timeutils/parser.go @@ -4,13 +4,19 @@ package timeutils // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/timeutils" import ( + "errors" "fmt" + "regexp" "strings" "time" + "github.com/elastic/lunes" + strptime "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/timeutils/internal/ctimefmt" ) +var invalidFractionalSecondsGoTime = regexp.MustCompile(`[^.,9]9+`) + func StrptimeToGotime(layout string) (string, error) { return strptime.ToNative(layout) } @@ -23,6 +29,17 @@ func ParseStrptime(layout string, value any, location *time.Location) (time.Time return ParseGotime(goLayout, value, location) } +// ParseLocalizedStrptime is like ParseLocalizedGotime, but instead of using the native Go time layout, +// it uses the ctime-like format. +func ParseLocalizedStrptime(layout string, value any, location *time.Location, language string) (time.Time, error) { + goLayout, err := strptime.ToNative(layout) + if err != nil { + return time.Time{}, err + } + + return ParseLocalizedGotime(goLayout, value, location, language) +} + func GetLocation(location *string, layout *string) (*time.Location, error) { if location != nil && *location != "" { // If location is specified, it must be in the local timezone database @@ -41,6 +58,24 @@ func GetLocation(location *string, layout *string) (*time.Location, error) { return time.Local, nil } +// ParseLocalizedGotime is like ParseGotime, but instead of parsing a formatted time in +// English, it parses a value in foreign language, and returns the [time.Time] it represents. +// The language argument must be a well-formed BCP 47 language tag (e.g.: "en", "en-US"), and +// a known CLDR locale. +func ParseLocalizedGotime(layout string, value any, location *time.Location, language string) (time.Time, error) { + stringValue, err := convertParsingValue(value) + if err != nil { + return time.Time{}, err + } + + translatedVal, err := lunes.Translate(layout, stringValue, language) + if err != nil { + return time.Time{}, err + } + + return ParseGotime(layout, translatedVal, location) +} + func ParseGotime(layout string, value any, location *time.Location) (time.Time, error) { timeValue, err := parseGotime(layout, value, location) if err != nil { @@ -50,14 +85,9 @@ func ParseGotime(layout string, value any, location *time.Location) (time.Time, } func parseGotime(layout string, value any, location *time.Location) (time.Time, error) { - var str string - switch v := value.(type) { - case string: - str = v - case []byte: - str = string(v) - default: - return time.Time{}, fmt.Errorf("type %T cannot be parsed as a time", value) + str, err := convertParsingValue(value) + if err != nil { + return time.Time{}, err } result, err := time.ParseInLocation(layout, str, location) @@ -86,6 +116,20 @@ func parseGotime(layout string, value any, location *time.Location) (time.Time, return resultLoc, locErr } +func convertParsingValue(value any) (string, error) { + var str string + switch v := value.(type) { + case string: + str = v + case []byte: + str = string(v) + default: + return "", fmt.Errorf("type %T cannot be parsed as a time", value) + } + + return str, nil +} + // SetTimestampYear sets the year of a timestamp to the current year. // This is needed because year is missing from some time formats, such as rfc3164. func SetTimestampYear(t time.Time) time.Time { @@ -104,5 +148,35 @@ func SetTimestampYear(t time.Time) time.Time { return d } +// ValidateStrptime checks the given strptime layout and returns an error if it detects any known issues +// that prevent it from being parsed. +func ValidateStrptime(layout string) error { + return strptime.Validate(layout) +} + +func ValidateGotime(layout string) error { + if match := invalidFractionalSecondsGoTime.FindString(layout); match != "" { + return fmt.Errorf("invalid fractional seconds directive: '%s'. must be preceded with '.' or ','", match) + } + + return nil +} + +// ValidateLocale checks the given locale and returns an error if the language tag +// is not supported by the localized parser functions. +func ValidateLocale(locale string) error { + _, err := lunes.NewDefaultLocale(locale) + if err == nil { + return nil + } + + var e *lunes.ErrUnsupportedLocale + if errors.As(err, &e) { + return fmt.Errorf("unsupported locale '%s', value must be a supported BCP 47 language tag", locale) + } + + return fmt.Errorf("invalid locale '%s': %w", locale, err) +} + // Allows tests to override with deterministic value var Now = time.Now diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr/matcher.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr/matcher.go index f73e9f8e8a8..037b4888d46 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr/matcher.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr/matcher.go @@ -25,6 +25,16 @@ func Not[K any](matcher BoolExpr[K]) BoolExpr[K] { return notMatcher[K]{matcher: matcher} } +type alwaysTrueMatcher[K any] struct{} + +func (alm alwaysTrueMatcher[K]) Eval(_ context.Context, _ K) (bool, error) { + return true, nil +} + +func AlwaysTrue[K any]() BoolExpr[K] { + return alwaysTrueMatcher[K]{} +} + type orMatcher[K any] struct { matchers []BoolExpr[K] } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterlog/filterlog.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterlog/filterlog.go index b394397c193..41d324d86db 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterlog/filterlog.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterlog/filterlog.go @@ -28,7 +28,6 @@ var useOTTLBridge = featuregate.GlobalRegistry().MustRegister( // The logic determining if a log should be processed is based on include and exclude settings. // Include properties are checked before exclude settings are checked. func NewSkipExpr(mp *filterconfig.MatchConfig) (expr.BoolExpr[ottllog.TransformContext], error) { - if useOTTLBridge.IsEnabled() { return filterottl.NewLogSkipExprBridge(mp) } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filtermatcher/attributematcher.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filtermatcher/attributematcher.go index a4261a0676c..0a1d2c6d18c 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filtermatcher/attributematcher.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filtermatcher/attributematcher.go @@ -37,7 +37,6 @@ func NewAttributesMatcher(config filterset.Config, attributes []filterconfig.Att // Convert attribute values from mp representation to in-memory representation. var rawAttributes []AttributeMatcher for _, attribute := range attributes { - if attribute.Key == "" { return nil, errors.New("can't have empty key in the list of attributes") } @@ -73,7 +72,6 @@ func NewAttributesMatcher(config filterset.Config, attributes []filterconfig.Att } default: return nil, filterset.NewUnrecognizedMatchTypeError(config.MatchType) - } } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl/filter.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl/filter.go index 6324c8a35bd..a5af8f87d01 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl/filter.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl/filter.go @@ -6,12 +6,12 @@ package filterottl // import "github.com/open-telemetry/opentelemetry-collector- import ( "go.opentelemetry.io/collector/component" - "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspanevent" ) @@ -19,8 +19,13 @@ import ( // NewBoolExprForSpan creates a BoolExpr[ottlspan.TransformContext] that will return true if any of the given OTTL conditions evaluate to true. // The passed in functions should use the ottlspan.TransformContext. // If a function named `match` is not present in the function map it will be added automatically so that parsing works as expected -func NewBoolExprForSpan(conditions []string, functions map[string]ottl.Factory[ottlspan.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (expr.BoolExpr[ottlspan.TransformContext], error) { - parser, err := ottlspan.NewParser(functions, set) +func NewBoolExprForSpan(conditions []string, functions map[string]ottl.Factory[ottlspan.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (*ottl.ConditionSequence[ottlspan.TransformContext], error) { + return NewBoolExprForSpanWithOptions(conditions, functions, errorMode, set, nil) +} + +// NewBoolExprForSpanWithOptions is like NewBoolExprForSpan, but with additional options. +func NewBoolExprForSpanWithOptions(conditions []string, functions map[string]ottl.Factory[ottlspan.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings, parserOptions []ottlspan.Option) (*ottl.ConditionSequence[ottlspan.TransformContext], error) { + parser, err := ottlspan.NewParser(functions, set, parserOptions...) if err != nil { return nil, err } @@ -35,8 +40,13 @@ func NewBoolExprForSpan(conditions []string, functions map[string]ottl.Factory[o // NewBoolExprForSpanEvent creates a BoolExpr[ottlspanevent.TransformContext] that will return true if any of the given OTTL conditions evaluate to true. // The passed in functions should use the ottlspanevent.TransformContext. // If a function named `match` is not present in the function map it will be added automatically so that parsing works as expected -func NewBoolExprForSpanEvent(conditions []string, functions map[string]ottl.Factory[ottlspanevent.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (expr.BoolExpr[ottlspanevent.TransformContext], error) { - parser, err := ottlspanevent.NewParser(functions, set) +func NewBoolExprForSpanEvent(conditions []string, functions map[string]ottl.Factory[ottlspanevent.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (*ottl.ConditionSequence[ottlspanevent.TransformContext], error) { + return NewBoolExprForSpanEventWithOptions(conditions, functions, errorMode, set, nil) +} + +// NewBoolExprForSpanEventWithOptions is like NewBoolExprForSpanEvent, but with additional options. +func NewBoolExprForSpanEventWithOptions(conditions []string, functions map[string]ottl.Factory[ottlspanevent.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings, parserOptions []ottlspanevent.Option) (*ottl.ConditionSequence[ottlspanevent.TransformContext], error) { + parser, err := ottlspanevent.NewParser(functions, set, parserOptions...) if err != nil { return nil, err } @@ -51,8 +61,13 @@ func NewBoolExprForSpanEvent(conditions []string, functions map[string]ottl.Fact // NewBoolExprForMetric creates a BoolExpr[ottlmetric.TransformContext] that will return true if any of the given OTTL conditions evaluate to true. // The passed in functions should use the ottlmetric.TransformContext. // If a function named `match` is not present in the function map it will be added automatically so that parsing works as expected -func NewBoolExprForMetric(conditions []string, functions map[string]ottl.Factory[ottlmetric.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (expr.BoolExpr[ottlmetric.TransformContext], error) { - parser, err := ottlmetric.NewParser(functions, set) +func NewBoolExprForMetric(conditions []string, functions map[string]ottl.Factory[ottlmetric.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (*ottl.ConditionSequence[ottlmetric.TransformContext], error) { + return NewBoolExprForMetricWithOptions(conditions, functions, errorMode, set, nil) +} + +// NewBoolExprForMetricWithOptions is like NewBoolExprForMetric, but with additional options. +func NewBoolExprForMetricWithOptions(conditions []string, functions map[string]ottl.Factory[ottlmetric.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings, parserOptions []ottlmetric.Option) (*ottl.ConditionSequence[ottlmetric.TransformContext], error) { + parser, err := ottlmetric.NewParser(functions, set, parserOptions...) if err != nil { return nil, err } @@ -67,8 +82,13 @@ func NewBoolExprForMetric(conditions []string, functions map[string]ottl.Factory // NewBoolExprForDataPoint creates a BoolExpr[ottldatapoint.TransformContext] that will return true if any of the given OTTL conditions evaluate to true. // The passed in functions should use the ottldatapoint.TransformContext. // If a function named `match` is not present in the function map it will be added automatically so that parsing works as expected -func NewBoolExprForDataPoint(conditions []string, functions map[string]ottl.Factory[ottldatapoint.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (expr.BoolExpr[ottldatapoint.TransformContext], error) { - parser, err := ottldatapoint.NewParser(functions, set) +func NewBoolExprForDataPoint(conditions []string, functions map[string]ottl.Factory[ottldatapoint.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (*ottl.ConditionSequence[ottldatapoint.TransformContext], error) { + return NewBoolExprForDataPointWithOptions(conditions, functions, errorMode, set, nil) +} + +// NewBoolExprForDataPointWithOptions is like NewBoolExprForDataPoint, but with additional options. +func NewBoolExprForDataPointWithOptions(conditions []string, functions map[string]ottl.Factory[ottldatapoint.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings, parserOptions []ottldatapoint.Option) (*ottl.ConditionSequence[ottldatapoint.TransformContext], error) { + parser, err := ottldatapoint.NewParser(functions, set, parserOptions...) if err != nil { return nil, err } @@ -83,8 +103,13 @@ func NewBoolExprForDataPoint(conditions []string, functions map[string]ottl.Fact // NewBoolExprForLog creates a BoolExpr[ottllog.TransformContext] that will return true if any of the given OTTL conditions evaluate to true. // The passed in functions should use the ottllog.TransformContext. // If a function named `match` is not present in the function map it will be added automatically so that parsing works as expected -func NewBoolExprForLog(conditions []string, functions map[string]ottl.Factory[ottllog.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (expr.BoolExpr[ottllog.TransformContext], error) { - parser, err := ottllog.NewParser(functions, set) +func NewBoolExprForLog(conditions []string, functions map[string]ottl.Factory[ottllog.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (*ottl.ConditionSequence[ottllog.TransformContext], error) { + return NewBoolExprForLogWithOptions(conditions, functions, errorMode, set, nil) +} + +// NewBoolExprForLogWithOptions is like NewBoolExprForLog, but with additional options. +func NewBoolExprForLogWithOptions(conditions []string, functions map[string]ottl.Factory[ottllog.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings, parserOptions []ottllog.Option) (*ottl.ConditionSequence[ottllog.TransformContext], error) { + parser, err := ottllog.NewParser(functions, set, parserOptions...) if err != nil { return nil, err } @@ -99,8 +124,13 @@ func NewBoolExprForLog(conditions []string, functions map[string]ottl.Factory[ot // NewBoolExprForResource creates a BoolExpr[ottlresource.TransformContext] that will return true if any of the given OTTL conditions evaluate to true. // The passed in functions should use the ottlresource.TransformContext. // If a function named `match` is not present in the function map it will be added automatically so that parsing works as expected -func NewBoolExprForResource(conditions []string, functions map[string]ottl.Factory[ottlresource.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (expr.BoolExpr[ottlresource.TransformContext], error) { - parser, err := ottlresource.NewParser(functions, set) +func NewBoolExprForResource(conditions []string, functions map[string]ottl.Factory[ottlresource.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (*ottl.ConditionSequence[ottlresource.TransformContext], error) { + return NewBoolExprForResourceWithOptions(conditions, functions, errorMode, set, nil) +} + +// NewBoolExprForResourceWithOptions is like NewBoolExprForResource, but with additional options. +func NewBoolExprForResourceWithOptions(conditions []string, functions map[string]ottl.Factory[ottlresource.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings, parserOptions []ottlresource.Option) (*ottl.ConditionSequence[ottlresource.TransformContext], error) { + parser, err := ottlresource.NewParser(functions, set, parserOptions...) if err != nil { return nil, err } @@ -111,3 +141,24 @@ func NewBoolExprForResource(conditions []string, functions map[string]ottl.Facto c := ottlresource.NewConditionSequence(statements, set, ottlresource.WithConditionSequenceErrorMode(errorMode)) return &c, nil } + +// NewBoolExprForScope creates a BoolExpr[ottlscope.TransformContext] that will return true if any of the given OTTL conditions evaluate to true. +// The passed in functions should use the ottlresource.TransformContext. +// If a function named `match` is not present in the function map it will be added automatically so that parsing works as expected +func NewBoolExprForScope(conditions []string, functions map[string]ottl.Factory[ottlscope.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (*ottl.ConditionSequence[ottlscope.TransformContext], error) { + return NewBoolExprForScopeWithOptions(conditions, functions, errorMode, set, nil) +} + +// NewBoolExprForScopeWithOptions is like NewBoolExprForScope, but with additional options. +func NewBoolExprForScopeWithOptions(conditions []string, functions map[string]ottl.Factory[ottlscope.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings, parserOptions []ottlscope.Option) (*ottl.ConditionSequence[ottlscope.TransformContext], error) { + parser, err := ottlscope.NewParser(functions, set, parserOptions...) + if err != nil { + return nil, err + } + statements, err := parser.ParseConditions(conditions) + if err != nil { + return nil, err + } + c := ottlscope.NewConditionSequence(statements, set, ottlscope.WithConditionSequenceErrorMode(errorMode)) + return &c, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl/functions.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl/functions.go index 355a148f6cb..3612d184966 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl/functions.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl/functions.go @@ -14,13 +14,17 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspanevent" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" ) func StandardSpanFuncs() map[string]ottl.Factory[ottlspan.TransformContext] { - return ottlfuncs.StandardConverters[ottlspan.TransformContext]() + m := ottlfuncs.StandardConverters[ottlspan.TransformContext]() + isRootSpanFactory := ottlfuncs.NewIsRootSpanFactory() + m[isRootSpanFactory.Name()] = isRootSpanFactory + return m } func StandardSpanEventFuncs() map[string]ottl.Factory[ottlspanevent.TransformContext] { @@ -40,6 +44,10 @@ func StandardDataPointFuncs() map[string]ottl.Factory[ottldatapoint.TransformCon return ottlfuncs.StandardConverters[ottldatapoint.TransformContext]() } +func StandardScopeFuncs() map[string]ottl.Factory[ottlscope.TransformContext] { + return ottlfuncs.StandardConverters[ottlscope.TransformContext]() +} + func StandardLogFuncs() map[string]ottl.Factory[ottllog.TransformContext] { return ottlfuncs.StandardConverters[ottllog.TransformContext]() } @@ -68,7 +76,7 @@ func createHasAttributeOnDatapointFunction(_ ottl.FunctionContext, oArgs ottl.Ar } func hasAttributeOnDatapoint(key string, expectedVal string) (ottl.ExprFunc[ottlmetric.TransformContext], error) { - return func(ctx context.Context, tCtx ottlmetric.TransformContext) (any, error) { + return func(_ context.Context, tCtx ottlmetric.TransformContext) (any, error) { return checkDataPoints(tCtx, key, &expectedVal) }, nil } @@ -92,7 +100,7 @@ func createHasAttributeKeyOnDatapointFunction(_ ottl.FunctionContext, oArgs ottl } func hasAttributeKeyOnDatapoint(key string) (ottl.ExprFunc[ottlmetric.TransformContext], error) { - return func(ctx context.Context, tCtx ottlmetric.TransformContext) (any, error) { + return func(_ context.Context, tCtx ottlmetric.TransformContext) (any, error) { return checkDataPoints(tCtx, key, nil) }, nil } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/config.go index 7a60b641651..f6c6c757d54 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/config.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/config.go @@ -22,9 +22,7 @@ const ( MatchTypeFieldName = "match_type" ) -var ( - validMatchTypes = []MatchType{Regexp, Strict} -) +var validMatchTypes = []MatchType{Regexp, Strict} // Config configures the matching behavior of a FilterSet. type Config struct { diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterspan/filterspan.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterspan/filterspan.go index 2ee4358991f..734b8a01b75 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterspan/filterspan.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterspan/filterspan.go @@ -9,7 +9,7 @@ import ( "go.opentelemetry.io/collector/featuregate" "go.opentelemetry.io/collector/pdata/pcommon" - conventions "go.opentelemetry.io/collector/semconv/v1.6.1" + conventions "go.opentelemetry.io/collector/semconv/v1.27.0" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/traceutil" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr" diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka/authentication.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka/authentication.go index 104d6152a1b..ca6ce4c3519 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka/authentication.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka/authentication.go @@ -7,9 +7,11 @@ import ( "context" "crypto/sha256" "crypto/sha512" + "crypto/tls" "fmt" "github.com/IBM/sarama" + "github.com/aws/aws-msk-iam-sasl-signer-go/signer" "go.opentelemetry.io/collector/config/configtls" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka/awsmsk" @@ -35,7 +37,7 @@ type SASLConfig struct { Username string `mapstructure:"username"` // Password to be used on authentication Password string `mapstructure:"password"` - // SASL Mechanism to be used, possible values are: (PLAIN, AWS_MSK_IAM, SCRAM-SHA-256 or SCRAM-SHA-512). + // SASL Mechanism to be used, possible values are: (PLAIN, AWS_MSK_IAM, AWS_MSK_IAM_OAUTHBEARER, SCRAM-SHA-256 or SCRAM-SHA-512). Mechanism string `mapstructure:"mechanism"` // SASL Protocol Version to be used, possible values are: (0, 1). Defaults to 0. Version int `mapstructure:"version"` @@ -44,27 +46,37 @@ type SASLConfig struct { } // AWSMSKConfig defines the additional SASL authentication -// measures needed to use AWS_MSK_IAM mechanism +// measures needed to use AWS_MSK_IAM and AWS_MSK_IAM_OAUTHBEARER mechanism type AWSMSKConfig struct { // Region is the AWS region the MSK cluster is based in Region string `mapstructure:"region"` // BrokerAddr is the client is connecting to in order to perform the auth required BrokerAddr string `mapstructure:"broker_addr"` + // Context + ctx context.Context } -// KerberosConfig defines kereros configuration. +// Token return the AWS session token for the AWS_MSK_IAM_OAUTHBEARER mechanism +func (c *AWSMSKConfig) Token() (*sarama.AccessToken, error) { + token, _, err := signer.GenerateAuthToken(c.ctx, c.Region) + + return &sarama.AccessToken{Token: token}, err +} + +// KerberosConfig defines kerberos configuration. type KerberosConfig struct { - ServiceName string `mapstructure:"service_name"` - Realm string `mapstructure:"realm"` - UseKeyTab bool `mapstructure:"use_keytab"` - Username string `mapstructure:"username"` - Password string `mapstructure:"password" json:"-"` - ConfigPath string `mapstructure:"config_file"` - KeyTabPath string `mapstructure:"keytab_file"` + ServiceName string `mapstructure:"service_name"` + Realm string `mapstructure:"realm"` + UseKeyTab bool `mapstructure:"use_keytab"` + Username string `mapstructure:"username"` + Password string `mapstructure:"password" json:"-"` + ConfigPath string `mapstructure:"config_file"` + KeyTabPath string `mapstructure:"keytab_file"` + DisablePAFXFAST bool `mapstructure:"disable_fast_negotiation"` } // ConfigureAuthentication configures authentication in sarama.Config. -func ConfigureAuthentication(config Authentication, saramaConfig *sarama.Config) error { +func ConfigureAuthentication(ctx context.Context, config Authentication, saramaConfig *sarama.Config) error { if config.PlainText != nil { configurePlaintext(*config.PlainText, saramaConfig) } @@ -74,7 +86,7 @@ func ConfigureAuthentication(config Authentication, saramaConfig *sarama.Config) } } if config.SASL != nil { - if err := configureSASL(*config.SASL, saramaConfig); err != nil { + if err := configureSASL(ctx, *config.SASL, saramaConfig); err != nil { return err } } @@ -91,13 +103,12 @@ func configurePlaintext(config PlainTextConfig, saramaConfig *sarama.Config) { saramaConfig.Net.SASL.Password = config.Password } -func configureSASL(config SASLConfig, saramaConfig *sarama.Config) error { - - if config.Username == "" { +func configureSASL(ctx context.Context, config SASLConfig, saramaConfig *sarama.Config) error { + if config.Username == "" && config.Mechanism != "AWS_MSK_IAM_OAUTHBEARER" { return fmt.Errorf("username have to be provided") } - if config.Password == "" { + if config.Password == "" && config.Mechanism != "AWS_MSK_IAM_OAUTHBEARER" { return fmt.Errorf("password have to be provided") } @@ -119,8 +130,15 @@ func configureSASL(config SASLConfig, saramaConfig *sarama.Config) error { return awsmsk.NewIAMSASLClient(config.AWSMSK.BrokerAddr, config.AWSMSK.Region, saramaConfig.ClientID) } saramaConfig.Net.SASL.Mechanism = awsmsk.Mechanism + case "AWS_MSK_IAM_OAUTHBEARER": + config.AWSMSK.ctx = ctx + saramaConfig.Net.SASL.Mechanism = sarama.SASLTypeOAuth + saramaConfig.Net.SASL.TokenProvider = &config.AWSMSK + tlsConfig := tls.Config{} + saramaConfig.Net.TLS.Enable = true + saramaConfig.Net.TLS.Config = &tlsConfig default: - return fmt.Errorf(`invalid SASL Mechanism %q: can be either "PLAIN", "AWS_MSK_IAM", "SCRAM-SHA-256" or "SCRAM-SHA-512"`, config.Mechanism) + return fmt.Errorf(`invalid SASL Mechanism %q: can be either "PLAIN", "AWS_MSK_IAM", "AWS_MSK_IAM_OAUTHBEARER", "SCRAM-SHA-256" or "SCRAM-SHA-512"`, config.Mechanism) } switch config.Version { @@ -159,4 +177,5 @@ func configureKerberos(config KerberosConfig, saramaConfig *sarama.Config) { saramaConfig.Net.SASL.GSSAPI.Username = config.Username saramaConfig.Net.SASL.GSSAPI.Realm = config.Realm saramaConfig.Net.SASL.GSSAPI.ServiceName = config.ServiceName + saramaConfig.Net.SASL.GSSAPI.DisablePAFXFAST = config.DisablePAFXFAST } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka/scram_client.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka/scram_client.go index 0c3f83d8baa..269dcfbd771 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka/scram_client.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka/scram_client.go @@ -34,7 +34,6 @@ func (x *XDGSCRAMClient) Begin(userName, password, authzID string) (err error) { // completes is also an error. func (x *XDGSCRAMClient) Step(challenge string) (response string, err error) { return x.ClientConversation.Step(challenge) - } // Done returns true if the conversation is completed or has errored. diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic/LICENSE b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/confmap/converter/expandconverter/Makefile b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic/Makefile similarity index 100% rename from vendor/go.opentelemetry.io/collector/confmap/converter/expandconverter/Makefile rename to vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic/Makefile diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic/README.md new file mode 100644 index 00000000000..663d969e5d6 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic/README.md @@ -0,0 +1,4 @@ +# Kafka Topic Context Accessor + +This module is used for accessing the topic within a context. +See the [kafka exporter readme](../../../exporter/kafkaexporter/README.md#destination-topic) for more details. diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic/kafka_ctx.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic/kafka_ctx.go new file mode 100644 index 00000000000..603863615fb --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic/kafka_ctx.go @@ -0,0 +1,19 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package topic // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic" + +import ( + "context" +) + +func WithTopic(ctx context.Context, topic string) context.Context { + return context.WithValue(ctx, topicContextKey{}, topic) +} + +func FromContext(ctx context.Context) (string, bool) { + contextTopic, ok := ctx.Value(topicContextKey{}).(string) + return contextTopic, ok +} + +type topicContextKey struct{} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic/metadata.yaml new file mode 100644 index 00000000000..708ccce63f8 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic/metadata.yaml @@ -0,0 +1,6 @@ +type: topic + +status: + class: pkg + codeowners: + active: [pavolloffay, MovieStoreGuy] diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/CONTRIBUTING.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/CONTRIBUTING.md index c644810d2c4..e5e54971b6c 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/CONTRIBUTING.md +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/CONTRIBUTING.md @@ -6,20 +6,57 @@ This guide is specific to the OpenTelemetry Transformation Language. All guidel - Changes to the OpenTelemetry Transformation Language should be made independent of any component that depend on the package. Whenever possible, try not to submit PRs that change both the OTTL and a dependent component. Instead, submit a PR that updates the OTTL and then, once merged, update the component as needed. -## New Values +## Adding New Editors/Converters -When adding new values to the grammar you must: +Before raising a PR with a new Editor or Converter, raise an issue to verify its acceptance. While acceptance is strongly specific to a specific use case, consider these guidelines for early assessment. -1. Update the `Value` struct with the new value. This may also mean adding new token(s) to the lexer. -2. Update `NewFunctionCall` to be able to handle calling functions with this new value. -3. Update `NewGetter` to be able to handle the new value. -4. Add new unit tests. +Your proposal likely will be accepted if: + +- The proposed functionality is missing, +- The proposed solution significantly improves user experience and readability for very common use cases, +- The proposed solution is more performant in cases where it is possible to achieve the same result with existing options. +- The proposed solution makes use of packages from the Go standard library to offer functionality possible through an existing option in a more standard or reliable manner. + +It will be up for discussion if: + +- Your proposal solves an issue that can be achieved in another way but does not improve user experience or performance. +- The proposed functionality is not obviously applicable to the needs of a significant number of OTTL users. +- Your proposal extracts data into a structure with enumerable keys or values and OpenTelemetry semantic conventions do not cover the shape or values for this data. + +Your proposal likely won't be accepted if: + +- User experience is worse and assumes a highly technical user, +- The performance of your proposal very negatively affects the processing pipeline. + +As with code, OTTL aims for readability first. This means: -## New Functions +- Using short, meaningful, and descriptive names, +- Ensuring naming consistency across Editors and Converters, +- Avoiding deep nesting to achieve desired transformations, +- Ensuring Editors and Converters have a single responsibility. + +### Implementation guidelines All new functions must be added via a new file. Function files must start with `func_`. Functions must be placed in `ottlfuncs`. Unit tests must be added for all new functions. Unit test files must start with `func_` and end in `_test`. Unit tests must be placed in the same directory as the function. Functions that are not specific to a pipeline should be tested independently of any specific pipeline. Functions that are specific to a pipeline should be tests against that pipeline. End-to-end tests must be added in the `e2e` directory. -Function names should follow the [Function Syntax Guidelines](ottlfuncs/README.md#function-syntax) +#### Naming and Parameter Guidelines + +Functions should be named and formatted according to the following standards. + +- Function names MUST start with a verb unless it is a Factory that creates a new type. +- Converters MUST be UpperCamelCase. +- Function names that contain multiple words MUST separate those words with `_`. +- Functions that interact with multiple items MUST have plurality in the name. Ex: `truncate_all`, `keep_keys`, `replace_all_matches`. +- Functions that interact with a single item MUST NOT have plurality in the name. If a function would interact with multiple items due to a condition, like `where`, it is still considered singular. Ex: `set`, `delete`, `replace_match`. +- Functions that change a specific target MUST set the target as the first parameter. +## New Values + +When adding new values to the grammar you must: + +1. Update the `Value` struct with the new value. This may also mean adding new token(s) to the lexer. +2. Update `NewFunctionCall` to be able to handle calling functions with this new value. +3. Update `NewGetter` to be able to handle the new value. +4. Add new unit tests. diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/LANGUAGE.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/LANGUAGE.md index 05df67e0ffe..ee4545228a7 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/LANGUAGE.md +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/LANGUAGE.md @@ -1,6 +1,6 @@ ## Grammar -The OTTL grammar includes function invocations, Values and Boolean Expressions. These parts all fit into a Statement, which is the basis of execution in the OTTL. +OTTL grammar includes function invocations, Values and Boolean Expressions. These parts all fit into a Statement, which is the basis of execution in OTTL. ### Design principles @@ -18,9 +18,9 @@ An Editor is made up of 2 parts: - a string identifier. The string identifier must start with a lowercase letter. - zero or more Values (comma separated) surrounded by parentheses (`()`). -**The OTTL has no built-in Editors.** +**OTTL has no built-in Editors.** Users must supply a map between string identifiers and Editor implementations. -The OTTL will use this map to determine which implementation to call when executing a Statement. +OTTL will use this map to determine which implementation to call when executing a Statement. See [ottlfuncs](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/ottlfuncs#editors) for pre-made, usable Editors. ### Converters @@ -32,9 +32,9 @@ Converters are made up of 3 parts: - zero or more Values (comma separated) surrounded by parentheses (`()`). - a combination of zero or more a string key (`["key"]`) or int key (`[0]`) -**The OTTL has no built-in Converters.** +**OTTL has no built-in Converters.** Users must include Converters in the same map that Editors are supplied. -The OTTL will use this map and reflection to generate Converters that can then be invoked by the user. +OTTL will use this map and reflection to generate Converters that can then be invoked by the user. See [ottlfuncs](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/ottlfuncs#converters) for pre-made, usable Converters. When keys are supplied the value returned by the Converter will be indexed by the keys in order. @@ -69,6 +69,7 @@ The following types are supported for single-value parameters in OTTL functions: - `IntLikeGetter` - `BoolGetter` - `BoolLikeGetter` +- `ByteSliceLikeGetter` - `Enum` - `string` - `float64` @@ -88,7 +89,7 @@ For slice parameters, the following types are supported: - `string` - `float64` - `int64` -- `uint8`. Byte slice literals are parsed as byte slices by the OTTL. +- `uint8`. Byte slice literals are parsed as byte slices by OTTL. - `Getter` To make a parameter optional, use the `Optional` type, which takes a type argument for the underlying @@ -113,10 +114,11 @@ Values are passed as function parameters or are used in a Boolean Expression. Va - [Enums](#enums) - [Converters](#converters) - [Math Expressions](#math-expressions) +- [Maps](#maps) ### Paths -A Path Value is a reference to a telemetry field. Paths are made up of lowercase identifiers, dots (`.`), and square brackets combined with a string key (`["key"]`) or int key (`[0]`). **The interpretation of a Path is NOT implemented by the OTTL.** Instead, the user must provide a `PathExpressionParser` that the OTTL can use to interpret paths. As a result, how the Path parts are used is up to the user. However, it is recommended that the parts be used like so: +A Path Value is a reference to a telemetry field. Paths are made up of lowercase identifiers, dots (`.`), and square brackets combined with a string key (`["key"]`) or int key (`[0]`). **The interpretation of a Path is NOT implemented by OTTL.** Instead, the user must provide a `PathExpressionParser` that OTTL can use to interpret paths. As a result, how the Path parts are used is up to the user. However, it is recommended that the parts be used like so: - Identifiers are used to map to a telemetry field. - Dots (`.`) are used to separate nested fields. @@ -154,13 +156,23 @@ Example List Values: - `["1", "2", "3"]` - `["a", attributes["key"], Concat(["a", "b"], "-")]` +### Maps + +A Map Value comprises a set of key Value pairs. + +Example Map Values: +- `{}` +- `{"foo": "bar"}` +- `{"foo": {"a": 2}}` +- `{"foo": {"a": attributes["key"]}}` + ### Literals Literals are literal interpretations of the Value into a Go value. Accepted literals are: - Strings. Strings are represented as literals by surrounding the string in double quotes (`""`). -- Ints. Ints are represented by any digit, optionally prepended by plus (`+`) or minus (`-`). Internally the OTTL represents all ints as `int64` -- Floats. Floats are represented by digits separated by a dot (`.`), optionally prepended by plus (`+`) or minus (`-`). The leading digit is optional. Internally the OTTL represents all Floats as `float64`. +- Ints. Ints are represented by any digit, optionally prepended by plus (`+`) or minus (`-`). Internally OTTL represents all ints as `int64` +- Floats. Floats are represented by digits separated by a dot (`.`), optionally prepended by plus (`+`) or minus (`-`). The leading digit is optional. Internally OTTL represents all Floats as `float64`. - Bools. Bools are represented by the exact strings `true` and `false`. - Nil. Nil is represented by the exact string `nil`. - Byte slices. Byte slices are represented via a hex string prefaced with `0x` @@ -175,7 +187,7 @@ Example Literals ### Enums -Enums are uppercase identifiers that get interpreted during parsing and converted to an `int64`. **The interpretation of an Enum is NOT implemented by the OTTL.** Instead, the user must provide a `EnumParser` that the OTTL can use to interpret the Enum. The `EnumParser` returns an `int64` instead of a function, which means that the Enum's numeric value is retrieved during parsing instead of during execution. +Enums are uppercase identifiers that get interpreted during parsing and converted to an `int64`. **The interpretation of an Enum is NOT implemented by OTTL.** Instead, the user must provide a `EnumParser` that OTTL can use to interpret the Enum. The `EnumParser` returns an `int64` instead of a function, which means that the Enum's numeric value is retrieved during parsing instead of during execution. Within the grammar Enums are always used as `int64`. As a result, the Enum's symbol can be used as if it is an Int value. @@ -280,7 +292,7 @@ Examples: ## Accessing signal telemetry -Access to signal telemetry is provided to OTTL functions through a `TransformContext` that is created by the user and passed during statement evaluation. To allow functions to operate on the `TransformContext`, the OTTL provides `Getter`, `Setter`, and `GetSetter` interfaces. +Access to signal telemetry is provided to OTTL functions through a `TransformContext` that is created by the user and passed during statement evaluation. To allow functions to operate on the `TransformContext`, OTTL provides `Getter`, `Setter`, and `GetSetter` interfaces. ### Getters and Setters @@ -293,6 +305,6 @@ Getters allow for reading the following types of data. See the respective sectio It is possible to update the Value in a telemetry field using a Setter. For read and write access, the `GetSetter` interface extends both interfaces. -## Logging inside a OTTL function +## Logging inside an OTTL function -To emit logs inside a OTTL function, add a parameter of type [`component.TelemetrySettings`](https://pkg.go.dev/go.opentelemetry.io/collector/component#TelemetrySettings) to the function signature. The OTTL will then inject the TelemetrySettings that were passed to `NewParser` into the function. TelemetrySettings can be used to emit logs. +To emit logs inside an OTTL function, add a parameter of type [`component.TelemetrySettings`](https://pkg.go.dev/go.opentelemetry.io/collector/component#TelemetrySettings) to the function signature. OTTL will then inject the TelemetrySettings that were passed to `NewParser` into the function. TelemetrySettings can be used to emit logs. diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/README.md index 2c25f746009..33017a22d0a 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/README.md +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/README.md @@ -4,14 +4,19 @@ | ------------- |-----------| | Stability | [alpha]: traces, metrics, logs | | Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Apkg%2Fottl%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Apkg%2Fottl) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Apkg%2Fottl%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Apkg%2Fottl) | -| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@TylerHelmuth](https://www.github.com/TylerHelmuth), [@kentquirk](https://www.github.com/kentquirk), [@bogdandrutu](https://www.github.com/bogdandrutu), [@evan-bradley](https://www.github.com/evan-bradley) | +| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@TylerHelmuth](https://www.github.com/TylerHelmuth), [@kentquirk](https://www.github.com/kentquirk), [@bogdandrutu](https://www.github.com/bogdandrutu), [@evan-bradley](https://www.github.com/evan-bradley), [@edmocosta](https://www.github.com/edmocosta) \| Seeking more code owners! | -[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha +[alpha]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#alpha -The OpenTelemetry Transformation Language is a language for transforming open telemetry data based on the [OpenTelemetry Collector Processing Exploration](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/processing.md). +The OpenTelemetry Transformation Language (OTTL) is a small, domain-specific programming language intended to process data with OpenTelemetry-native concepts and constructs. -This package reads in OTTL statements and converts them to invokable functions/booleans based on the OTTL's grammar. +This package implements everything necessary to use OTTL in a Collector component or in another user-facing system. + +- [Getting Started](#getting-started) +- [Where to use OTTL](#where-to-use-ottl) +- [Troubleshooting](#troubleshooting) +- [Resources](#resources) ## Getting Started @@ -19,114 +24,52 @@ If you're looking to write OTTL statements for a component's configuration check See [OTTL Functions](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/ottlfuncs#ottl-functions) for a list of functions available for use in the OTTL statements of most components. -OTTL Contexts define how you access the fields on a piece of telemetry. See the table to find the exact list of available fields: +OTTL Contexts define how you access the fields on a given telemetry item. See the table to find the exact list of available fields: | Telemetry | OTTL Context | |-------------------------|--------------------------------------------------------------------------------------------------------------------------------------------| -| `Resource` | [Resource](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/contexts/ottlresource/README.md) | -| `Instrumentation Scope` | [Instrumentation Scope](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/contexts/ottlscope/README.md) | -| `Span` | [Span](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/contexts/ottlspan/README.md) | -| `Span Event` | [SpanEvent](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/contexts/ottlspanevent/README.md) | -| `Metric` | [Metric](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/contexts/ottlmetric/README.md) | -| `Datapoint` | [DataPoint](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/contexts/ottldatapoint/README.md) | -| `Log` | [Log](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/contexts/ottllog/README.md) | - -### Component Creators - -If you're looking to use OTTL in your component, check out [the OTTL grammar](./LANGUAGE.md). - -## Examples - -These examples contain a SQL-like declarative language. Applied statements interact with only one signal, but statements can be declared across multiple signals. Functions used in examples are indicative of what could be useful. - -### Remove a forbidden attribute - -``` -traces: - delete(attributes["http.request.header.authorization"]) -metrics: - delete(attributes["http.request.header.authorization"]) -logs: - delete(attributes["http.request.header.authorization"]) -``` - -### Remove all attributes except for some - -``` -traces: - keep_keys(attributes, ["http.method", "http.status_code"]) -metrics: - keep_keys(attributes, ["http.method", "http.status_code"]) -logs: - keep_keys(attributes, ["http.method", "http.status_code"]) -``` - -### Reduce cardinality of an attribute - -``` -traces: - replace_match(attributes["http.target"], "/user/*/list/*", "/user/{userId}/list/{listId}") -``` - -### Reduce cardinality of a span name +| `Resource` | [Resource](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/contexts/ottlresource/README.md) | +| `Instrumentation Scope` | [Instrumentation Scope](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/contexts/ottlscope/README.md) | +| `Span` | [Span](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/contexts/ottlspan/README.md) | +| `Span Event` | [SpanEvent](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/contexts/ottlspanevent/README.md) | +| `Metric` | [Metric](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/contexts/ottlmetric/README.md) | +| `Datapoint` | [DataPoint](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/contexts/ottldatapoint/README.md) | +| `Log` | [Log](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/contexts/ottllog/README.md) | -``` -traces: - replace_match(name, "GET /user/*/list/*", "GET /user/{userId}/list/{listId}") -``` - -### Reduce cardinality of any matching attribute +To understand what OTTL offers as a language, check out [OTTL's grammar doc](./LANGUAGE.md). -``` -traces: - replace_all_matches(attributes, "/user/*/list/*", "/user/{userId}/list/{listId}") -``` - -### Decrease the size of the telemetry payload - -``` -traces: - delete(resource.attributes["process.command_line"]) -metrics: - delete(resource.attributes["process.command_line"]) -logs: - delete(resource.attributes["process.command_line"]) -``` +## Where to use OTTL -### Attach information from resource into telemetry +- To modify your data as it passes through a pipeline, use the [transform processor](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/transformprocessor/README.md). +- To remove data from your pipeline, use the [filter processor](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/filterprocessor/README.md). +- To select spans to be sampled, use the [tail sampling processor](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/tailsamplingprocessor/README.md). +- To route data between pipelines, use the [routing connector](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/connector/routingconnector/README.md). -``` -metrics: - set(attributes["k8s_pod"], resource.attributes["k8s.pod.name"]) -``` +## Troubleshooting -### Decorate error spans with additional information +When using OTTL you can enable debug logging in the collector to print out useful information, +such as the current Statement/Condition and the current TransformContext, to help you troubleshoot +why a statement is not behaving as you expect. This feature is very verbose, but provides you an accurate +view into how OTTL views the underlying data. -``` -traces: - set(attributes["whose_fault"], "theirs") where attributes["http.status"] == 400 or attributes["http.status"] == 404 - set(attributes["whose_fault"], "ours") where attributes["http.status"] == 500 +```yaml +service: + telemetry: + logs: + level: debug ``` -### Update a spans ID - ``` -logs: - set(span_id, SpanID(0x0000000000000000)) -traces: - set(span_id, SpanID(0x0000000000000000)) +2024-05-29T16:38:09.600-0600 debug ottl@v0.101.0/parser.go:265 initial TransformContext {"kind": "processor", "name": "transform", "pipeline": "logs", "TransformContext": {"resource": {"attributes": {}, "dropped_attribute_count": 0}, "scope": {"attributes": {}, "dropped_attribute_count": 0, "name": "", "version": ""}, "log_record": {"attributes": {"log.file.name": "test.log"}, "body": "test", "dropped_attribute_count": 0, "flags": 0, "observed_time_unix_nano": 1717022289500721000, "severity_number": 0, "severity_text": "", "span_id": "", "time_unix_nano": 0, "trace_id": ""}, "cache": {}}} +2024-05-29T16:38:09.600-0600 debug ottl@v0.101.0/parser.go:268 TransformContext after statement execution {"kind": "processor", "name": "transform", "pipeline": "logs", "statement": "set(resource.attributes[\"test\"], \"pass\")", "condition matched": true, "TransformContext": {"resource": {"attributes": {"test": "pass"}, "dropped_attribute_count": 0}, "scope": {"attributes": {}, "dropped_attribute_count": 0, "name": "", "version": ""}, "log_record": {"attributes": {"log.file.name": "test.log"}, "body": "test", "dropped_attribute_count": 0, "flags": 0, "observed_time_unix_nano": 1717022289500721000, "severity_number": 0, "severity_text": "", "span_id": "", "time_unix_nano": 0, "trace_id": ""}, "cache": {}}} +2024-05-29T16:38:09.600-0600 debug ottl@v0.101.0/parser.go:268 TransformContext after statement execution {"kind": "processor", "name": "transform", "pipeline": "logs", "statement": "set(instrumentation_scope.attributes[\"test\"], [\"pass\"])", "condition matched": true, "TransformContext": {"resource": {"attributes": {"test": "pass"}, "dropped_attribute_count": 0}, "scope": {"attributes": {"test": ["pass"]}, "dropped_attribute_count": 0, "name": "", "version": ""}, "log_record": {"attributes": {"log.file.name": "test.log"}, "body": "test", "dropped_attribute_count": 0, "flags": 0, "observed_time_unix_nano": 1717022289500721000, "severity_number": 0, "severity_text": "", "span_id": "", "time_unix_nano": 0, "trace_id": ""}, "cache": {}}} +2024-05-29T16:38:09.601-0600 debug ottl@v0.101.0/parser.go:268 TransformContext after statement execution {"kind": "processor", "name": "transform", "pipeline": "logs", "statement": "set(attributes[\"test\"], true)", "condition matched": true, "TransformContext": {"resource": {"attributes": {"test": "pass"}, "dropped_attribute_count": 0}, "scope": {"attributes": {"test": ["pass"]}, "dropped_attribute_count": 0, "name": "", "version": ""}, "log_record": {"attributes": {"log.file.name": "test.log", "test": true}, "body": "test", "dropped_attribute_count": 0, "flags": 0, "observed_time_unix_nano": 1717022289500721000, "severity_number": 0, "severity_text": "", "span_id": "", "time_unix_nano": 0, "trace_id": ""}, "cache": {}}} ``` -### Convert metric name to snake case +## Resources -``` -metrics: - set(metric.name, ConvertCase(metric.name, "snake")) -``` - -### Check if an attribute exists +These are previous conference presentations given about OTTL: -``` -traces: - set(attributes["test-passed"], true) where attributes["target-attribute"] != nil -``` +- [OTTL Me Why Transforming Telemetry in the OpenTelemetry Collector Just Got Better](https://youtu.be/uVs0oUV72CE) +- [Managing Observability Data at the Edge with the OpenTelemetry Collector and OTTL](https://youtu.be/GO0ulYLxy_8) +- [The OTTL Cookbook: A Collection of Solutions to Common Problems](https://www.youtube.com/watch?v=UGTU0-KT_60) \ No newline at end of file diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/boolean_value.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/boolean_value.go index 9bd2df9171b..fd3378d343c 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/boolean_value.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/boolean_value.go @@ -94,7 +94,6 @@ func (p *Parser[K]) newComparisonEvaluator(comparison *comparison) (BoolExpr[K], } return p.compare(a, b, comparison.Op), nil }}, nil - } func (p *Parser[K]) newBoolExpr(expr *booleanExpression) (BoolExpr[K], error) { diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/compare.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/compare.go index 50d1109f00c..0c20542f2a8 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/compare.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/compare.go @@ -7,7 +7,6 @@ import ( "bytes" "time" - "go.uber.org/zap" "golang.org/x/exp/constraints" ) @@ -17,9 +16,7 @@ import ( // invalidComparison returns false for everything except ne (where it returns true to indicate that the // objects were definitely not equivalent). -// It also gives us an opportunity to log something. -func (p *Parser[K]) invalidComparison(msg string, op compareOp) bool { - p.telemetrySettings.Logger.Debug(msg, zap.Any("op", op)) +func (p *Parser[K]) invalidComparison(op compareOp) bool { return op == ne } @@ -87,7 +84,7 @@ func (p *Parser[K]) compareBool(a bool, b any, op compareOp) bool { case bool: return compareBools(a, v, op) default: - return p.invalidComparison("bool to non-bool", op) + return p.invalidComparison(op) } } @@ -96,7 +93,7 @@ func (p *Parser[K]) compareString(a string, b any, op compareOp) bool { case string: return comparePrimitives(a, v, op) default: - return p.invalidComparison("string to non-string", op) + return p.invalidComparison(op) } } @@ -110,7 +107,7 @@ func (p *Parser[K]) compareByte(a []byte, b any, op compareOp) bool { } return compareBytes(a, v, op) default: - return p.invalidComparison("Bytes to non-Bytes", op) + return p.invalidComparison(op) } } @@ -121,7 +118,7 @@ func (p *Parser[K]) compareInt64(a int64, b any, op compareOp) bool { case float64: return comparePrimitives(float64(a), v, op) default: - return p.invalidComparison("int to non-numeric value", op) + return p.invalidComparison(op) } } @@ -132,7 +129,7 @@ func (p *Parser[K]) compareFloat64(a float64, b any, op compareOp) bool { case float64: return comparePrimitives(a, v, op) default: - return p.invalidComparison("float to non-numeric value", op) + return p.invalidComparison(op) } } @@ -143,7 +140,7 @@ func (p *Parser[K]) compareDuration(a time.Duration, b any, op compareOp) bool { vnsecs := v.Nanoseconds() return comparePrimitives(ansecs, vnsecs, op) default: - return p.invalidComparison("cannot compare invalid duration", op) + return p.invalidComparison(op) } } @@ -164,10 +161,10 @@ func (p *Parser[K]) compareTime(a time.Time, b any, op compareOp) bool { case gt: return a.After(v) default: - return p.invalidComparison("invalid comparison operator", op) + return p.invalidComparison(op) } default: - return p.invalidComparison("time to non-time value", op) + return p.invalidComparison(op) } } @@ -211,7 +208,7 @@ func (p *Parser[K]) compare(a any, b any, op compareOp) bool { case ne: return a != b default: - return p.invalidComparison("unsupported type for inequality on left", op) + return p.invalidComparison(op) } } } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/context_inferrer.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/context_inferrer.go new file mode 100644 index 00000000000..714100ac4d5 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/context_inferrer.go @@ -0,0 +1,236 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottl // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" + +import ( + "cmp" + "math" + "slices" +) + +var defaultContextInferPriority = []string{ + "log", + "datapoint", + "metric", + "spanevent", + "span", + "resource", + "scope", + "instrumentation_scope", +} + +// contextInferrer is an interface used to infer the OTTL context from statements. +type contextInferrer interface { + // infer returns the OTTL context inferred from the given statements. + infer(statements []string) (string, error) +} + +type priorityContextInferrer struct { + contextPriority map[string]int + contextCandidate map[string]*priorityContextInferrerCandidate +} + +type priorityContextInferrerCandidate struct { + hasEnumSymbol func(enum *EnumSymbol) bool + hasFunctionName func(name string) bool + getLowerContexts func(context string) []string +} + +type priorityContextInferrerOption func(*priorityContextInferrer) + +// newPriorityContextInferrer creates a new priority-based context inferrer. To infer the context, +// it uses a slice of priorities (withContextInferrerPriorities) and a set of hints extracted from +// the parsed statements. +// +// To be eligible, a context must support all functions and enums symbols present on the statements. +// If the path context with the highest priority does not meet this requirement, it falls back to its +// lower contexts, testing them with the same logic and choosing the first one that meets all requirements. +// +// If non-prioritized contexts are found on the statements, they get assigned the lowest possible priority, +// and are only selected if no other prioritized context is found. +func newPriorityContextInferrer(contextsCandidate map[string]*priorityContextInferrerCandidate, options ...priorityContextInferrerOption) contextInferrer { + c := &priorityContextInferrer{ + contextCandidate: contextsCandidate, + } + for _, opt := range options { + opt(c) + } + if len(c.contextPriority) == 0 { + withContextInferrerPriorities(defaultContextInferPriority)(c) + } + return c +} + +// withContextInferrerPriorities sets the contexts candidates priorities. The lower the +// context position is in the array, the more priority it will have over other items. +func withContextInferrerPriorities(priorities []string) priorityContextInferrerOption { + return func(c *priorityContextInferrer) { + contextPriority := map[string]int{} + for pri, context := range priorities { + contextPriority[context] = pri + } + c.contextPriority = contextPriority + } +} + +func (s *priorityContextInferrer) infer(statements []string) (string, error) { + requiredFunctions := map[string]struct{}{} + requiredEnums := map[enumSymbol]struct{}{} + + var inferredContext string + var inferredContextPriority int + for _, statement := range statements { + parsed, err := parseStatement(statement) + if err != nil { + return "", err + } + + statementPaths, statementFunctions, statementEnums := s.getParsedStatementHints(parsed) + for _, p := range statementPaths { + candidate := p.Context + candidatePriority, ok := s.contextPriority[candidate] + if !ok { + candidatePriority = math.MaxInt + } + if inferredContext == "" || candidatePriority < inferredContextPriority { + inferredContext = candidate + inferredContextPriority = candidatePriority + } + } + for function := range statementFunctions { + requiredFunctions[function] = struct{}{} + } + for enum := range statementEnums { + requiredEnums[enum] = struct{}{} + } + } + // No inferred context or nothing left to verify. + if inferredContext == "" || (len(requiredFunctions) == 0 && len(requiredEnums) == 0) { + return inferredContext, nil + } + ok := s.validateContextCandidate(inferredContext, requiredFunctions, requiredEnums) + if ok { + return inferredContext, nil + } + return s.inferFromLowerContexts(inferredContext, requiredFunctions, requiredEnums), nil +} + +// validateContextCandidate checks if the given context candidate has all required functions names +// and enums symbols. The functions arity are not verified. +func (s *priorityContextInferrer) validateContextCandidate( + context string, + requiredFunctions map[string]struct{}, + requiredEnums map[enumSymbol]struct{}, +) bool { + candidate, ok := s.contextCandidate[context] + if !ok { + return false + } + if len(requiredFunctions) == 0 && len(requiredEnums) == 0 { + return true + } + for function := range requiredFunctions { + if !candidate.hasFunctionName(function) { + return false + } + } + for enum := range requiredEnums { + if !candidate.hasEnumSymbol((*EnumSymbol)(&enum)) { + return false + } + } + return true +} + +// inferFromLowerContexts returns the first lower context that supports all required functions +// and enum symbols used on the statements. +// If no lower context meets the requirements, or if the context candidate is unknown, it +// returns an empty string. +func (s *priorityContextInferrer) inferFromLowerContexts( + context string, + requiredFunctions map[string]struct{}, + requiredEnums map[enumSymbol]struct{}, +) string { + inferredContextCandidate, ok := s.contextCandidate[context] + if !ok { + return "" + } + + lowerContextCandidates := inferredContextCandidate.getLowerContexts(context) + if len(lowerContextCandidates) == 0 { + return "" + } + + s.sortContextCandidates(lowerContextCandidates) + for _, lowerCandidate := range lowerContextCandidates { + ok = s.validateContextCandidate(lowerCandidate, requiredFunctions, requiredEnums) + if ok { + return lowerCandidate + } + } + return "" +} + +// sortContextCandidates sorts the slice candidates using the priorityContextInferrer.contextsPriority order. +func (s *priorityContextInferrer) sortContextCandidates(candidates []string) { + slices.SortFunc(candidates, func(l, r string) int { + lp, ok := s.contextPriority[l] + if !ok { + lp = math.MaxInt + } + rp, ok := s.contextPriority[r] + if !ok { + rp = math.MaxInt + } + return cmp.Compare(lp, rp) + }) +} + +// getParsedStatementHints extracts all path, function names (editor and converter), and enumSymbol +// from the given parsed statements. These values are used by the context inferrer as hints to +// select a context in which the function/enum are supported. +func (s *priorityContextInferrer) getParsedStatementHints(parsed *parsedStatement) ([]path, map[string]struct{}, map[enumSymbol]struct{}) { + visitor := newGrammarContextInferrerVisitor() + parsed.Editor.accept(&visitor) + if parsed.WhereClause != nil { + parsed.WhereClause.accept(&visitor) + } + return visitor.paths, visitor.functions, visitor.enumsSymbols +} + +// priorityContextInferrerHintsVisitor is a grammarVisitor implementation that collects +// all path, function names (converter.Function and editor.Function), and enumSymbol. +type priorityContextInferrerHintsVisitor struct { + paths []path + functions map[string]struct{} + enumsSymbols map[enumSymbol]struct{} +} + +func newGrammarContextInferrerVisitor() priorityContextInferrerHintsVisitor { + return priorityContextInferrerHintsVisitor{ + paths: []path{}, + functions: make(map[string]struct{}), + enumsSymbols: make(map[enumSymbol]struct{}), + } +} + +func (v *priorityContextInferrerHintsVisitor) visitMathExprLiteral(_ *mathExprLiteral) {} + +func (v *priorityContextInferrerHintsVisitor) visitEditor(e *editor) { + v.functions[e.Function] = struct{}{} +} + +func (v *priorityContextInferrerHintsVisitor) visitConverter(c *converter) { + v.functions[c.Function] = struct{}{} +} + +func (v *priorityContextInferrerHintsVisitor) visitValue(va *value) { + if va.Enum != nil { + v.enumsSymbols[*va.Enum] = struct{}{} + } +} + +func (v *priorityContextInferrerHintsVisitor) visitPath(value *path) { + v.paths = append(v.paths, *value) +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/errors.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/errors.go index 7ccc931e2ba..eef0e8aca99 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/errors.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/errors.go @@ -3,7 +3,10 @@ package internal // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal" -import "fmt" +import ( + "fmt" + "strings" +) const ( DefaultErrorMessage = "segment %q from path %q is not a valid path nor a valid OTTL keyword for the %v context - review %v to see all valid paths" @@ -20,3 +23,8 @@ const ( func FormatDefaultErrorMessage(pathSegment, fullPath, context, ref string) error { return fmt.Errorf(DefaultErrorMessage, pathSegment, fullPath, context, ref) } + +func FormatCacheErrorMessage(lowerContext, pathContext, fullPath string) error { + pathSuggestion := strings.Replace(fullPath, pathContext+".", lowerContext+".", 1) + return fmt.Errorf(`access to cache must be performed using the same statement's context, please replace "%s" with "%s"`, fullPath, pathSuggestion) +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/logging/logging.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/logging/logging.go new file mode 100644 index 00000000000..4837506d55d --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/logging/logging.go @@ -0,0 +1,390 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package logging // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/logging" + +import ( + "encoding/hex" + "errors" + + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" + "go.uber.org/zap/zapcore" +) + +type Slice pcommon.Slice + +func (s Slice) MarshalLogArray(encoder zapcore.ArrayEncoder) error { + ss := pcommon.Slice(s) + var err error + for i := 0; i < ss.Len(); i++ { + v := ss.At(i) + switch v.Type() { + case pcommon.ValueTypeStr: + encoder.AppendString(v.Str()) + case pcommon.ValueTypeBool: + encoder.AppendBool(v.Bool()) + case pcommon.ValueTypeInt: + encoder.AppendInt64(v.Int()) + case pcommon.ValueTypeDouble: + encoder.AppendFloat64(v.Double()) + case pcommon.ValueTypeMap: + err = errors.Join(err, encoder.AppendObject(Map(v.Map()))) + case pcommon.ValueTypeSlice: + err = errors.Join(err, encoder.AppendArray(Slice(v.Slice()))) + case pcommon.ValueTypeBytes: + encoder.AppendByteString(v.Bytes().AsRaw()) + } + } + return err +} + +type Map pcommon.Map + +func (m Map) MarshalLogObject(encoder zapcore.ObjectEncoder) error { + mm := pcommon.Map(m) + var err error + mm.Range(func(k string, v pcommon.Value) bool { + switch v.Type() { + case pcommon.ValueTypeStr: + encoder.AddString(k, v.Str()) + case pcommon.ValueTypeBool: + encoder.AddBool(k, v.Bool()) + case pcommon.ValueTypeInt: + encoder.AddInt64(k, v.Int()) + case pcommon.ValueTypeDouble: + encoder.AddFloat64(k, v.Double()) + case pcommon.ValueTypeMap: + err = errors.Join(err, encoder.AddObject(k, Map(v.Map()))) + case pcommon.ValueTypeSlice: + err = errors.Join(err, encoder.AddArray(k, Slice(v.Slice()))) + case pcommon.ValueTypeBytes: + encoder.AddByteString(k, v.Bytes().AsRaw()) + } + return true + }) + return nil +} + +type Resource pcommon.Resource + +func (r Resource) MarshalLogObject(encoder zapcore.ObjectEncoder) error { + rr := pcommon.Resource(r) + err := encoder.AddObject("attributes", Map(rr.Attributes())) + encoder.AddUint32("dropped_attribute_count", rr.DroppedAttributesCount()) + return err +} + +type InstrumentationScope pcommon.InstrumentationScope + +func (i InstrumentationScope) MarshalLogObject(encoder zapcore.ObjectEncoder) error { + is := pcommon.InstrumentationScope(i) + err := encoder.AddObject("attributes", Map(is.Attributes())) + encoder.AddUint32("dropped_attribute_count", is.DroppedAttributesCount()) + encoder.AddString("name", is.Name()) + encoder.AddString("version", is.Version()) + return err +} + +type Span ptrace.Span + +func (s Span) MarshalLogObject(encoder zapcore.ObjectEncoder) error { + ss := ptrace.Span(s) + parentSpanID := ss.ParentSpanID() + spanID := ss.SpanID() + traceID := ss.TraceID() + err := encoder.AddObject("attributes", Map(ss.Attributes())) + encoder.AddUint32("dropped_attribute_count", ss.DroppedAttributesCount()) + encoder.AddUint32("dropped_events_count", ss.DroppedEventsCount()) + encoder.AddUint32("dropped_links_count", ss.DroppedLinksCount()) + encoder.AddUint64("end_time_unix_nano", uint64(ss.EndTimestamp())) + err = errors.Join(err, encoder.AddArray("events", SpanEventSlice(ss.Events()))) + encoder.AddString("kind", ss.Kind().String()) + err = errors.Join(err, encoder.AddArray("links", SpanLinkSlice(ss.Links()))) + encoder.AddString("name", ss.Name()) + encoder.AddString("parent_span_id", hex.EncodeToString(parentSpanID[:])) + encoder.AddString("span_id", hex.EncodeToString(spanID[:])) + encoder.AddUint64("start_time_unix_nano", uint64(ss.StartTimestamp())) + encoder.AddString("status.code", ss.Status().Code().String()) + encoder.AddString("status.message", ss.Status().Message()) + encoder.AddString("trace_id", hex.EncodeToString(traceID[:])) + encoder.AddString("trace_state", ss.TraceState().AsRaw()) + return err +} + +type SpanEventSlice ptrace.SpanEventSlice + +func (s SpanEventSlice) MarshalLogArray(encoder zapcore.ArrayEncoder) error { + ses := ptrace.SpanEventSlice(s) + var err error + for i := 0; i < ses.Len(); i++ { + err = errors.Join(err, encoder.AppendObject(SpanEvent(ses.At(i)))) + } + return err +} + +type SpanEvent ptrace.SpanEvent + +func (s SpanEvent) MarshalLogObject(encoder zapcore.ObjectEncoder) error { + se := ptrace.SpanEvent(s) + err := encoder.AddObject("attributes", Map(se.Attributes())) + encoder.AddUint32("dropped_attribute_count", se.DroppedAttributesCount()) + encoder.AddString("name", se.Name()) + encoder.AddUint64("time_unix_nano", uint64(se.Timestamp())) + return err +} + +type SpanLinkSlice ptrace.SpanLinkSlice + +func (s SpanLinkSlice) MarshalLogArray(encoder zapcore.ArrayEncoder) error { + sls := ptrace.SpanLinkSlice(s) + var err error + for i := 0; i < sls.Len(); i++ { + err = errors.Join(err, encoder.AppendObject(SpanLink(sls.At(i)))) + } + return err +} + +type SpanLink ptrace.SpanLink + +func (s SpanLink) MarshalLogObject(encoder zapcore.ObjectEncoder) error { + sl := ptrace.SpanLink(s) + spanID := sl.SpanID() + traceID := sl.TraceID() + err := encoder.AddObject("attributes", Map(sl.Attributes())) + encoder.AddUint32("dropped_attribute_count", sl.DroppedAttributesCount()) + encoder.AddUint32("flags", sl.Flags()) + encoder.AddString("span_id", hex.EncodeToString(spanID[:])) + encoder.AddString("trace_id", hex.EncodeToString(traceID[:])) + encoder.AddString("trace_state", sl.TraceState().AsRaw()) + return err +} + +type Metric pmetric.Metric + +func (m Metric) MarshalLogObject(encoder zapcore.ObjectEncoder) error { + mm := pmetric.Metric(m) + encoder.AddString("description", mm.Description()) + encoder.AddString("name", mm.Name()) + encoder.AddString("unit", mm.Unit()) + encoder.AddString("type", mm.Type().String()) + + var err error + switch mm.Type() { + case pmetric.MetricTypeSum: + encoder.AddString("aggregation_temporality", mm.Sum().AggregationTemporality().String()) + encoder.AddBool("is_monotonic", mm.Sum().IsMonotonic()) + err = encoder.AddArray("datapoints", NumberDataPointSlice(mm.Sum().DataPoints())) + case pmetric.MetricTypeGauge: + err = encoder.AddArray("datapoints", NumberDataPointSlice(mm.Gauge().DataPoints())) + case pmetric.MetricTypeHistogram: + encoder.AddString("aggregation_temporality", mm.Histogram().AggregationTemporality().String()) + err = encoder.AddArray("datapoints", HistogramDataPointSlice(mm.Histogram().DataPoints())) + case pmetric.MetricTypeExponentialHistogram: + encoder.AddString("aggregation_temporality", mm.ExponentialHistogram().AggregationTemporality().String()) + err = encoder.AddArray("datapoints", ExponentialHistogramDataPointSlice(mm.ExponentialHistogram().DataPoints())) + case pmetric.MetricTypeSummary: + err = encoder.AddArray("datapoints", SummaryDataPointSlice(mm.Summary().DataPoints())) + } + + return err +} + +type NumberDataPointSlice pmetric.NumberDataPointSlice + +func (n NumberDataPointSlice) MarshalLogArray(encoder zapcore.ArrayEncoder) error { + ndps := pmetric.NumberDataPointSlice(n) + var err error + for i := 0; i < ndps.Len(); i++ { + err = errors.Join(err, encoder.AppendObject(NumberDataPoint(ndps.At(i)))) + } + return err +} + +type NumberDataPoint pmetric.NumberDataPoint + +func (n NumberDataPoint) MarshalLogObject(encoder zapcore.ObjectEncoder) error { + ndp := pmetric.NumberDataPoint(n) + err := encoder.AddObject("attributes", Map(ndp.Attributes())) + err = errors.Join(err, encoder.AddArray("exemplars", ExemplarSlice(ndp.Exemplars()))) + encoder.AddUint32("flags", uint32(ndp.Flags())) + encoder.AddUint64("start_time_unix_nano", uint64(ndp.StartTimestamp())) + encoder.AddUint64("time_unix_nano", uint64(ndp.Timestamp())) + if ndp.ValueType() == pmetric.NumberDataPointValueTypeInt { + encoder.AddInt64("value_int", ndp.IntValue()) + } + if ndp.ValueType() == pmetric.NumberDataPointValueTypeDouble { + encoder.AddFloat64("value_double", ndp.DoubleValue()) + } + + return err +} + +type HistogramDataPointSlice pmetric.HistogramDataPointSlice + +func (h HistogramDataPointSlice) MarshalLogArray(encoder zapcore.ArrayEncoder) error { + hdps := pmetric.HistogramDataPointSlice(h) + var err error + for i := 0; i < hdps.Len(); i++ { + err = errors.Join(err, encoder.AppendObject(HistogramDataPoint(hdps.At(i)))) + } + return err +} + +type HistogramDataPoint pmetric.HistogramDataPoint + +func (h HistogramDataPoint) MarshalLogObject(encoder zapcore.ObjectEncoder) error { + hdp := pmetric.HistogramDataPoint(h) + err := encoder.AddObject("attributes", Map(hdp.Attributes())) + err = errors.Join(err, encoder.AddArray("bucket_counts", UInt64Slice(hdp.BucketCounts()))) + encoder.AddUint64("count", hdp.Count()) + err = errors.Join(err, encoder.AddArray("exemplars", ExemplarSlice(hdp.Exemplars()))) + err = errors.Join(err, encoder.AddArray("explicit_bounds", Float64Slice(hdp.ExplicitBounds()))) + encoder.AddUint32("flags", uint32(hdp.Flags())) + encoder.AddFloat64("max", hdp.Max()) + encoder.AddFloat64("min", hdp.Min()) + encoder.AddUint64("start_time_unix_nano", uint64(hdp.StartTimestamp())) + encoder.AddFloat64("sum", hdp.Sum()) + encoder.AddUint64("time_unix_nano", uint64(hdp.Timestamp())) + + return err +} + +type ExponentialHistogramDataPointSlice pmetric.ExponentialHistogramDataPointSlice + +func (e ExponentialHistogramDataPointSlice) MarshalLogArray(encoder zapcore.ArrayEncoder) error { + ehdps := pmetric.ExponentialHistogramDataPointSlice(e) + var err error + for i := 0; i < ehdps.Len(); i++ { + err = errors.Join(err, encoder.AppendObject(ExponentialHistogramDataPoint(ehdps.At(i)))) + } + return err +} + +type ExponentialHistogramDataPoint pmetric.ExponentialHistogramDataPoint + +func (e ExponentialHistogramDataPoint) MarshalLogObject(encoder zapcore.ObjectEncoder) error { + ehdp := pmetric.ExponentialHistogramDataPoint(e) + err := encoder.AddObject("attributes", Map(ehdp.Attributes())) + encoder.AddUint64("count", ehdp.Count()) + err = errors.Join(err, encoder.AddArray("exemplars", ExemplarSlice(ehdp.Exemplars()))) + encoder.AddUint32("flags", uint32(ehdp.Flags())) + encoder.AddFloat64("max", ehdp.Max()) + encoder.AddFloat64("min", ehdp.Min()) + err = errors.Join(err, encoder.AddObject("negative", ExponentialHistogramDataPointBuckets(ehdp.Negative()))) + err = errors.Join(err, encoder.AddObject("positive", ExponentialHistogramDataPointBuckets(ehdp.Positive()))) + encoder.AddInt32("scale", ehdp.Scale()) + encoder.AddUint64("start_time_unix_nano", uint64(ehdp.StartTimestamp())) + encoder.AddFloat64("sum", ehdp.Sum()) + encoder.AddUint64("time_unix_nano", uint64(ehdp.Timestamp())) + encoder.AddUint64("zero_count", ehdp.ZeroCount()) + encoder.AddFloat64("zero_threshold", ehdp.ZeroThreshold()) + return err +} + +type ExponentialHistogramDataPointBuckets pmetric.ExponentialHistogramDataPointBuckets + +func (e ExponentialHistogramDataPointBuckets) MarshalLogObject(encoder zapcore.ObjectEncoder) error { + b := pmetric.ExponentialHistogramDataPointBuckets(e) + err := encoder.AddArray("bucket_counts", UInt64Slice(b.BucketCounts())) + encoder.AddInt32("offset", b.Offset()) + return err +} + +type SummaryDataPointSlice pmetric.SummaryDataPointSlice + +func (s SummaryDataPointSlice) MarshalLogArray(encoder zapcore.ArrayEncoder) error { + sdps := pmetric.SummaryDataPointSlice(s) + var err error + for i := 0; i < sdps.Len(); i++ { + err = errors.Join(err, encoder.AppendObject(SummaryDataPoint(sdps.At(i)))) + } + return err +} + +type SummaryDataPoint pmetric.SummaryDataPoint + +func (s SummaryDataPoint) MarshalLogObject(encoder zapcore.ObjectEncoder) error { + sdp := pmetric.SummaryDataPoint(s) + err := encoder.AddObject("attributes", Map(sdp.Attributes())) + encoder.AddUint64("count", sdp.Count()) + encoder.AddUint32("flags", uint32(sdp.Flags())) + encoder.AddUint64("start_time_unix_nano", uint64(sdp.StartTimestamp())) + encoder.AddFloat64("sum", sdp.Sum()) + encoder.AddUint64("time_unix_nano", uint64(sdp.Timestamp())) + err = errors.Join(err, encoder.AddArray("quantile_values", SummaryDataPointValueAtQuantileSlice(sdp.QuantileValues()))) + + return err +} + +type SummaryDataPointValueAtQuantileSlice pmetric.SummaryDataPointValueAtQuantileSlice + +func (s SummaryDataPointValueAtQuantileSlice) MarshalLogArray(encoder zapcore.ArrayEncoder) error { + qs := pmetric.SummaryDataPointValueAtQuantileSlice(s) + var err error + for i := 0; i < qs.Len(); i++ { + err = errors.Join(err, encoder.AppendObject(SummaryDataPointValueAtQuantile(qs.At(i)))) + } + return nil +} + +type SummaryDataPointValueAtQuantile pmetric.SummaryDataPointValueAtQuantile + +func (s SummaryDataPointValueAtQuantile) MarshalLogObject(encoder zapcore.ObjectEncoder) error { + q := pmetric.SummaryDataPointValueAtQuantile(s) + encoder.AddFloat64("value", q.Value()) + encoder.AddFloat64("quantile", q.Quantile()) + return nil +} + +type UInt64Slice pcommon.UInt64Slice + +func (u UInt64Slice) MarshalLogArray(encoder zapcore.ArrayEncoder) error { + uis := pcommon.UInt64Slice(u) + for i := 0; i < uis.Len(); i++ { + encoder.AppendUint64(uis.At(i)) + } + return nil +} + +type Float64Slice pcommon.Float64Slice + +func (f Float64Slice) MarshalLogArray(encoder zapcore.ArrayEncoder) error { + fs := pcommon.Float64Slice(f) + for i := 0; i < fs.Len(); i++ { + encoder.AppendFloat64(fs.At(i)) + } + return nil +} + +type ExemplarSlice pmetric.ExemplarSlice + +func (e ExemplarSlice) MarshalLogArray(encoder zapcore.ArrayEncoder) error { + es := pmetric.ExemplarSlice(e) + var err error + for i := 0; i < es.Len(); i++ { + ee := es.At(i) + err = errors.Join(err, encoder.AppendObject(Exemplar(ee))) + } + return err +} + +type Exemplar pmetric.Exemplar + +func (e Exemplar) MarshalLogObject(encoder zapcore.ObjectEncoder) error { + ee := pmetric.Exemplar(e) + spanID := ee.SpanID() + traceID := ee.TraceID() + err := encoder.AddObject("filtered_attributes", Map(ee.FilteredAttributes())) + encoder.AddString("span_id", hex.EncodeToString(spanID[:])) + encoder.AddUint64("time_unix_nano", uint64(ee.Timestamp())) + encoder.AddString("trace_id", hex.EncodeToString(traceID[:])) + if ee.ValueType() == pmetric.ExemplarValueTypeInt { + encoder.AddInt64("value_int", ee.IntValue()) + } + if ee.ValueType() == pmetric.ExemplarValueTypeDouble { + encoder.AddFloat64("value_double", ee.DoubleValue()) + } + return err +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/map.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/map.go index 34e891c032c..9965d6dc5c8 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/map.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/map.go @@ -22,7 +22,11 @@ func GetMapValue[K any](ctx context.Context, tCtx K, m pcommon.Map, keys []ottl. return nil, err } if s == nil { - return nil, fmt.Errorf("non-string indexing is not supported") + resString, err := FetchValueFromExpression[K, string](ctx, tCtx, keys[0]) + if err != nil { + return nil, fmt.Errorf("unable to resolve a string index in map: %w", err) + } + s = resString } val, ok := m.Get(*s) @@ -43,7 +47,11 @@ func SetMapValue[K any](ctx context.Context, tCtx K, m pcommon.Map, keys []ottl. return err } if s == nil { - return fmt.Errorf("non-string indexing is not supported") + resString, err := FetchValueFromExpression[K, string](ctx, tCtx, keys[0]) + if err != nil { + return fmt.Errorf("unable to resolve a string index in map: %w", err) + } + s = resString } currentValue, ok := m.Get(*s) @@ -52,3 +60,22 @@ func SetMapValue[K any](ctx context.Context, tCtx K, m pcommon.Map, keys []ottl. } return setIndexableValue[K](ctx, tCtx, currentValue, val, keys[1:]) } + +func FetchValueFromExpression[K any, T int64 | string](ctx context.Context, tCtx K, key ottl.Key[K]) (*T, error) { + p, err := key.ExpressionGetter(ctx, tCtx) + if err != nil { + return nil, err + } + if p == nil { + return nil, fmt.Errorf("invalid key type") + } + res, err := p.Get(ctx, tCtx) + if err != nil { + return nil, err + } + resVal, ok := res.(T) + if !ok { + return nil, fmt.Errorf("could not resolve key for map/slice, expecting '%T' but got '%T'", resVal, res) + } + return &resVal, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/metric.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/metric.go index 97aa011d118..0b299f89189 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/metric.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/metric.go @@ -11,6 +11,10 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" ) +const ( + MetricContextName = "metric" +) + type MetricContext interface { GetMetric() pmetric.Metric } @@ -29,7 +33,7 @@ var MetricSymbolTable = map[ottl.EnumSymbol]ottl.Enum{ func MetricPathGetSetter[K MetricContext](path ottl.Path[K]) (ottl.GetSetter[K], error) { if path == nil { - return accessMetric[K](), nil + return nil, FormatDefaultErrorMessage(MetricContextName, MetricContextName, "Metric", MetricRef) } switch path.Name() { case "name": @@ -51,26 +55,12 @@ func MetricPathGetSetter[K MetricContext](path ottl.Path[K]) (ottl.GetSetter[K], } } -func accessMetric[K MetricContext]() ottl.StandardGetSetter[K] { - return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { - return tCtx.GetMetric(), nil - }, - Setter: func(ctx context.Context, tCtx K, val any) error { - if newMetric, ok := val.(pmetric.Metric); ok { - newMetric.CopyTo(tCtx.GetMetric()) - } - return nil - }, - } -} - func accessName[K MetricContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetMetric().Name(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if str, ok := val.(string); ok { tCtx.GetMetric().SetName(str) } @@ -81,10 +71,10 @@ func accessName[K MetricContext]() ottl.StandardGetSetter[K] { func accessDescription[K MetricContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetMetric().Description(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if str, ok := val.(string); ok { tCtx.GetMetric().SetDescription(str) } @@ -95,10 +85,10 @@ func accessDescription[K MetricContext]() ottl.StandardGetSetter[K] { func accessUnit[K MetricContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetMetric().Unit(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if str, ok := val.(string); ok { tCtx.GetMetric().SetUnit(str) } @@ -109,10 +99,10 @@ func accessUnit[K MetricContext]() ottl.StandardGetSetter[K] { func accessType[K MetricContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return int64(tCtx.GetMetric().Type()), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, _ K, _ any) error { // TODO Implement methods so correctly convert data types. // https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/10130 return nil @@ -122,7 +112,7 @@ func accessType[K MetricContext]() ottl.StandardGetSetter[K] { func accessAggTemporality[K MetricContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { metric := tCtx.GetMetric() switch metric.Type() { case pmetric.MetricTypeSum: @@ -134,7 +124,7 @@ func accessAggTemporality[K MetricContext]() ottl.StandardGetSetter[K] { } return nil, nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if newAggTemporality, ok := val.(int64); ok { metric := tCtx.GetMetric() switch metric.Type() { @@ -153,14 +143,14 @@ func accessAggTemporality[K MetricContext]() ottl.StandardGetSetter[K] { func accessIsMonotonic[K MetricContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { metric := tCtx.GetMetric() if metric.Type() == pmetric.MetricTypeSum { return metric.Sum().IsMonotonic(), nil } return nil, nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if newIsMonotonic, ok := val.(bool); ok { metric := tCtx.GetMetric() if metric.Type() == pmetric.MetricTypeSum { @@ -174,7 +164,7 @@ func accessIsMonotonic[K MetricContext]() ottl.StandardGetSetter[K] { func accessDataPoints[K MetricContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { metric := tCtx.GetMetric() switch metric.Type() { case pmetric.MetricTypeSum: @@ -190,7 +180,7 @@ func accessDataPoints[K MetricContext]() ottl.StandardGetSetter[K] { } return nil, nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { metric := tCtx.GetMetric() switch metric.Type() { case pmetric.MetricTypeSum: diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/path.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/path.go index c7d9d802b66..75341935462 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/path.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/path.go @@ -12,15 +12,21 @@ import ( var _ ottl.Path[any] = &TestPath[any]{} type TestPath[K any] struct { + C string N string KeySlice []ottl.Key[K] NextPath *TestPath[K] + FullPath string } func (p *TestPath[K]) Name() string { return p.N } +func (p *TestPath[K]) Context() string { + return p.C +} + func (p *TestPath[K]) Next() ottl.Path[K] { if p.NextPath == nil { return nil @@ -33,6 +39,9 @@ func (p *TestPath[K]) Keys() []ottl.Key[K] { } func (p *TestPath[K]) String() string { + if p.FullPath != "" { + return p.FullPath + } return p.N } @@ -41,6 +50,7 @@ var _ ottl.Key[any] = &TestKey[any]{} type TestKey[K any] struct { S *string I *int64 + G ottl.Getter[K] } func (k *TestKey[K]) String(_ context.Context, _ K) (*string, error) { @@ -50,3 +60,7 @@ func (k *TestKey[K]) String(_ context.Context, _ K) (*string, error) { func (k *TestKey[K]) Int(_ context.Context, _ K) (*int64, error) { return k.I, nil } + +func (k *TestKey[K]) ExpressionGetter(_ context.Context, _ K) (ottl.Getter[K], error) { + return k.G, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/resource.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/resource.go index 3606f5d6bd6..7b454d50ae8 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/resource.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/resource.go @@ -11,13 +11,18 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" ) +const ( + ResourceContextName = "resource" +) + type ResourceContext interface { GetResource() pcommon.Resource + GetResourceSchemaURLItem() SchemaURLItem } -func ResourcePathGetSetter[K ResourceContext](path ottl.Path[K]) (ottl.GetSetter[K], error) { +func ResourcePathGetSetter[K ResourceContext](lowerContext string, path ottl.Path[K]) (ottl.GetSetter[K], error) { if path == nil { - return accessResource[K](), nil + return nil, FormatDefaultErrorMessage(ResourceContextName, ResourceContextName, "Resource", ResourceContextRef) } switch path.Name() { case "attributes": @@ -27,31 +32,21 @@ func ResourcePathGetSetter[K ResourceContext](path ottl.Path[K]) (ottl.GetSetter return accessResourceAttributesKey[K](path.Keys()), nil case "dropped_attributes_count": return accessResourceDroppedAttributesCount[K](), nil + case "schema_url": + return accessResourceSchemaURLItem[K](), nil + case "cache": + return nil, FormatCacheErrorMessage(lowerContext, path.Context(), path.String()) default: return nil, FormatDefaultErrorMessage(path.Name(), path.String(), "Resource", ResourceContextRef) } } -func accessResource[K ResourceContext]() ottl.StandardGetSetter[K] { - return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { - return tCtx.GetResource(), nil - }, - Setter: func(ctx context.Context, tCtx K, val any) error { - if newRes, ok := val.(pcommon.Resource); ok { - newRes.CopyTo(tCtx.GetResource()) - } - return nil - }, - } -} - func accessResourceAttributes[K ResourceContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetResource().Attributes(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if attrs, ok := val.(pcommon.Map); ok { attrs.CopyTo(tCtx.GetResource().Attributes()) } @@ -73,10 +68,10 @@ func accessResourceAttributesKey[K ResourceContext](keys []ottl.Key[K]) ottl.Sta func accessResourceDroppedAttributesCount[K ResourceContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return int64(tCtx.GetResource().DroppedAttributesCount()), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if i, ok := val.(int64); ok { tCtx.GetResource().SetDroppedAttributesCount(uint32(i)) } @@ -84,3 +79,17 @@ func accessResourceDroppedAttributesCount[K ResourceContext]() ottl.StandardGetS }, } } + +func accessResourceSchemaURLItem[K ResourceContext]() ottl.StandardGetSetter[K] { + return ottl.StandardGetSetter[K]{ + Getter: func(_ context.Context, tCtx K) (any, error) { + return tCtx.GetResourceSchemaURLItem().SchemaUrl(), nil + }, + Setter: func(_ context.Context, tCtx K, val any) error { + if schemaURL, ok := val.(string); ok { + tCtx.GetResourceSchemaURLItem().SetSchemaUrl(schemaURL) + } + return nil + }, + } +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/schema.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/schema.go new file mode 100644 index 00000000000..c5b6702200e --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/schema.go @@ -0,0 +1,11 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 +package internal // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal" + +//revive:disable:var-naming The methods in this interface are defined by pdata types. +type SchemaURLItem interface { + SchemaUrl() string + SetSchemaUrl(v string) +} + +//revive:enable:var-naming diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/scope.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/scope.go index dcf85ae6881..35a40a3869c 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/scope.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/scope.go @@ -11,13 +11,19 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" ) +const ( + InstrumentationScopeContextName = "instrumentation_scope" + ScopeContextName = "scope" +) + type InstrumentationScopeContext interface { GetInstrumentationScope() pcommon.InstrumentationScope + GetScopeSchemaURLItem() SchemaURLItem } -func ScopePathGetSetter[K InstrumentationScopeContext](path ottl.Path[K]) (ottl.GetSetter[K], error) { +func ScopePathGetSetter[K InstrumentationScopeContext](lowerContext string, path ottl.Path[K]) (ottl.GetSetter[K], error) { if path == nil { - return accessInstrumentationScope[K](), nil + return nil, FormatDefaultErrorMessage(InstrumentationScopeContextName, InstrumentationScopeContextName, "Instrumentation Scope", InstrumentationScopeRef) } switch path.Name() { case "name": @@ -32,31 +38,21 @@ func ScopePathGetSetter[K InstrumentationScopeContext](path ottl.Path[K]) (ottl. return accessInstrumentationScopeAttributesKey[K](mapKeys), nil case "dropped_attributes_count": return accessInstrumentationScopeDroppedAttributesCount[K](), nil + case "schema_url": + return accessInstrumentationScopeSchemaURLItem[K](), nil + case "cache": + return nil, FormatCacheErrorMessage(lowerContext, path.Context(), path.String()) default: return nil, FormatDefaultErrorMessage(path.Name(), path.String(), "Instrumentation Scope", InstrumentationScopeRef) } } -func accessInstrumentationScope[K InstrumentationScopeContext]() ottl.StandardGetSetter[K] { - return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { - return tCtx.GetInstrumentationScope(), nil - }, - Setter: func(ctx context.Context, tCtx K, val any) error { - if newIl, ok := val.(pcommon.InstrumentationScope); ok { - newIl.CopyTo(tCtx.GetInstrumentationScope()) - } - return nil - }, - } -} - func accessInstrumentationScopeAttributes[K InstrumentationScopeContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetInstrumentationScope().Attributes(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if attrs, ok := val.(pcommon.Map); ok { attrs.CopyTo(tCtx.GetInstrumentationScope().Attributes()) } @@ -78,10 +74,10 @@ func accessInstrumentationScopeAttributesKey[K InstrumentationScopeContext](keys func accessInstrumentationScopeName[K InstrumentationScopeContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetInstrumentationScope().Name(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if str, ok := val.(string); ok { tCtx.GetInstrumentationScope().SetName(str) } @@ -92,10 +88,10 @@ func accessInstrumentationScopeName[K InstrumentationScopeContext]() ottl.Standa func accessInstrumentationScopeVersion[K InstrumentationScopeContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetInstrumentationScope().Version(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if str, ok := val.(string); ok { tCtx.GetInstrumentationScope().SetVersion(str) } @@ -106,10 +102,10 @@ func accessInstrumentationScopeVersion[K InstrumentationScopeContext]() ottl.Sta func accessInstrumentationScopeDroppedAttributesCount[K InstrumentationScopeContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return int64(tCtx.GetInstrumentationScope().DroppedAttributesCount()), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if i, ok := val.(int64); ok { tCtx.GetInstrumentationScope().SetDroppedAttributesCount(uint32(i)) } @@ -117,3 +113,17 @@ func accessInstrumentationScopeDroppedAttributesCount[K InstrumentationScopeCont }, } } + +func accessInstrumentationScopeSchemaURLItem[K InstrumentationScopeContext]() ottl.StandardGetSetter[K] { + return ottl.StandardGetSetter[K]{ + Getter: func(_ context.Context, tCtx K) (any, error) { + return tCtx.GetScopeSchemaURLItem().SchemaUrl(), nil + }, + Setter: func(_ context.Context, tCtx K, val any) error { + if schemaURL, ok := val.(string); ok { + tCtx.GetScopeSchemaURLItem().SetSchemaUrl(schemaURL) + } + return nil + }, + } +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/slice.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/slice.go index 5a90e281a90..9004d08ae83 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/slice.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/slice.go @@ -22,7 +22,11 @@ func GetSliceValue[K any](ctx context.Context, tCtx K, s pcommon.Slice, keys []o return nil, err } if i == nil { - return nil, fmt.Errorf("non-integer indexing is not supported") + resInt, err := FetchValueFromExpression[K, int64](ctx, tCtx, keys[0]) + if err != nil { + return nil, fmt.Errorf("unable to resolve an integer index in slice: %w", err) + } + i = resInt } idx := int(*i) @@ -44,7 +48,11 @@ func SetSliceValue[K any](ctx context.Context, tCtx K, s pcommon.Slice, keys []o return err } if i == nil { - return fmt.Errorf("non-integer indexing is not supported") + resInt, err := FetchValueFromExpression[K, int64](ctx, tCtx, keys[0]) + if err != nil { + return fmt.Errorf("unable to resolve an integer index in slice: %w", err) + } + i = resInt } idx := int(*i) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/span.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/span.go index e7270cbc0d6..fc7ca27177e 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/span.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/span.go @@ -18,7 +18,8 @@ import ( ) const ( - SpanContextName = "Span" + SpanContextName = "span" + SpanContextNameDescription = "Span" ) type SpanContext interface { @@ -37,9 +38,9 @@ var SpanSymbolTable = map[ottl.EnumSymbol]ottl.Enum{ "STATUS_CODE_ERROR": ottl.Enum(ptrace.StatusCodeError), } -func SpanPathGetSetter[K SpanContext](path ottl.Path[K]) (ottl.GetSetter[K], error) { +func SpanPathGetSetter[K SpanContext](lowerContext string, path ottl.Path[K]) (ottl.GetSetter[K], error) { if path == nil { - return accessSpan[K](), nil + return nil, FormatDefaultErrorMessage(SpanContextName, SpanContextName, SpanContextNameDescription, SpanRef) } switch path.Name() { case "trace_id": @@ -48,7 +49,7 @@ func SpanPathGetSetter[K SpanContext](path ottl.Path[K]) (ottl.GetSetter[K], err if nextPath.Name() == "string" { return accessStringTraceID[K](), nil } - return nil, FormatDefaultErrorMessage(nextPath.Name(), nextPath.String(), SpanContextName, SpanRef) + return nil, FormatDefaultErrorMessage(nextPath.Name(), nextPath.String(), SpanContextNameDescription, SpanRef) } return accessTraceID[K](), nil case "span_id": @@ -57,7 +58,7 @@ func SpanPathGetSetter[K SpanContext](path ottl.Path[K]) (ottl.GetSetter[K], err if nextPath.Name() == "string" { return accessStringSpanID[K](), nil } - return nil, FormatDefaultErrorMessage(nextPath.Name(), nextPath.String(), SpanContextName, SpanRef) + return nil, FormatDefaultErrorMessage(nextPath.Name(), nextPath.String(), SpanContextNameDescription, SpanRef) } return accessSpanID[K](), nil case "trace_state": @@ -72,7 +73,7 @@ func SpanPathGetSetter[K SpanContext](path ottl.Path[K]) (ottl.GetSetter[K], err if nextPath.Name() == "string" { return accessStringParentSpanID[K](), nil } - return nil, FormatDefaultErrorMessage(nextPath.Name(), nextPath.String(), SpanContextName, SpanRef) + return nil, FormatDefaultErrorMessage(nextPath.Name(), nextPath.String(), SpanContextNameDescription, SpanRef) } return accessParentSpanID[K](), nil case "name": @@ -86,7 +87,7 @@ func SpanPathGetSetter[K SpanContext](path ottl.Path[K]) (ottl.GetSetter[K], err case "deprecated_string": return accessDeprecatedStringKind[K](), nil default: - return nil, FormatDefaultErrorMessage(nextPath.Name(), nextPath.String(), SpanContextName, SpanRef) + return nil, FormatDefaultErrorMessage(nextPath.Name(), nextPath.String(), SpanContextNameDescription, SpanRef) } } return accessKind[K](), nil @@ -123,35 +124,23 @@ func SpanPathGetSetter[K SpanContext](path ottl.Path[K]) (ottl.GetSetter[K], err case "message": return accessStatusMessage[K](), nil default: - return nil, FormatDefaultErrorMessage(nextPath.Name(), nextPath.String(), SpanContextName, SpanRef) + return nil, FormatDefaultErrorMessage(nextPath.Name(), nextPath.String(), SpanContextNameDescription, SpanRef) } } return accessStatus[K](), nil + case "cache": + return nil, FormatCacheErrorMessage(lowerContext, path.Context(), path.String()) default: - return nil, FormatDefaultErrorMessage(path.Name(), path.String(), SpanContextName, SpanRef) - } -} - -func accessSpan[K SpanContext]() ottl.StandardGetSetter[K] { - return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { - return tCtx.GetSpan(), nil - }, - Setter: func(ctx context.Context, tCtx K, val any) error { - if newSpan, ok := val.(ptrace.Span); ok { - newSpan.CopyTo(tCtx.GetSpan()) - } - return nil - }, + return nil, FormatDefaultErrorMessage(path.Name(), path.String(), SpanContextNameDescription, SpanRef) } } func accessTraceID[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetSpan().TraceID(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if newTraceID, ok := val.(pcommon.TraceID); ok { tCtx.GetSpan().SetTraceID(newTraceID) } @@ -162,11 +151,11 @@ func accessTraceID[K SpanContext]() ottl.StandardGetSetter[K] { func accessStringTraceID[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { id := tCtx.GetSpan().TraceID() return hex.EncodeToString(id[:]), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if str, ok := val.(string); ok { id, err := ParseTraceID(str) if err != nil { @@ -181,10 +170,10 @@ func accessStringTraceID[K SpanContext]() ottl.StandardGetSetter[K] { func accessSpanID[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetSpan().SpanID(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if newSpanID, ok := val.(pcommon.SpanID); ok { tCtx.GetSpan().SetSpanID(newSpanID) } @@ -195,11 +184,11 @@ func accessSpanID[K SpanContext]() ottl.StandardGetSetter[K] { func accessStringSpanID[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { id := tCtx.GetSpan().SpanID() return hex.EncodeToString(id[:]), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if str, ok := val.(string); ok { id, err := ParseSpanID(str) if err != nil { @@ -214,10 +203,10 @@ func accessStringSpanID[K SpanContext]() ottl.StandardGetSetter[K] { func accessTraceState[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetSpan().TraceState().AsRaw(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if str, ok := val.(string); ok { tCtx.GetSpan().TraceState().FromRaw(str) } @@ -266,10 +255,10 @@ func accessTraceStateKey[K SpanContext](keys []ottl.Key[K]) (ottl.StandardGetSet func accessParentSpanID[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetSpan().ParentSpanID(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if newParentSpanID, ok := val.(pcommon.SpanID); ok { tCtx.GetSpan().SetParentSpanID(newParentSpanID) } @@ -280,11 +269,11 @@ func accessParentSpanID[K SpanContext]() ottl.StandardGetSetter[K] { func accessStringParentSpanID[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { id := tCtx.GetSpan().ParentSpanID() return hex.EncodeToString(id[:]), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if str, ok := val.(string); ok { id, err := ParseSpanID(str) if err != nil { @@ -299,10 +288,10 @@ func accessStringParentSpanID[K SpanContext]() ottl.StandardGetSetter[K] { func accessSpanName[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetSpan().Name(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if str, ok := val.(string); ok { tCtx.GetSpan().SetName(str) } @@ -313,10 +302,10 @@ func accessSpanName[K SpanContext]() ottl.StandardGetSetter[K] { func accessKind[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return int64(tCtx.GetSpan().Kind()), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if i, ok := val.(int64); ok { tCtx.GetSpan().SetKind(ptrace.SpanKind(i)) } @@ -327,10 +316,10 @@ func accessKind[K SpanContext]() ottl.StandardGetSetter[K] { func accessStringKind[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetSpan().Kind().String(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if s, ok := val.(string); ok { var kind ptrace.SpanKind switch s { @@ -358,10 +347,10 @@ func accessStringKind[K SpanContext]() ottl.StandardGetSetter[K] { func accessDeprecatedStringKind[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return traceutil.SpanKindStr(tCtx.GetSpan().Kind()), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if s, ok := val.(string); ok { var kind ptrace.SpanKind switch s { @@ -389,10 +378,10 @@ func accessDeprecatedStringKind[K SpanContext]() ottl.StandardGetSetter[K] { func accessStartTimeUnixNano[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetSpan().StartTimestamp().AsTime().UnixNano(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if i, ok := val.(int64); ok { tCtx.GetSpan().SetStartTimestamp(pcommon.NewTimestampFromTime(time.Unix(0, i))) } @@ -403,10 +392,10 @@ func accessStartTimeUnixNano[K SpanContext]() ottl.StandardGetSetter[K] { func accessEndTimeUnixNano[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetSpan().EndTimestamp().AsTime().UnixNano(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if i, ok := val.(int64); ok { tCtx.GetSpan().SetEndTimestamp(pcommon.NewTimestampFromTime(time.Unix(0, i))) } @@ -417,10 +406,10 @@ func accessEndTimeUnixNano[K SpanContext]() ottl.StandardGetSetter[K] { func accessStartTime[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetSpan().StartTimestamp().AsTime(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if i, ok := val.(time.Time); ok { tCtx.GetSpan().SetStartTimestamp(pcommon.NewTimestampFromTime(i)) } @@ -431,10 +420,10 @@ func accessStartTime[K SpanContext]() ottl.StandardGetSetter[K] { func accessEndTime[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetSpan().EndTimestamp().AsTime(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if i, ok := val.(time.Time); ok { tCtx.GetSpan().SetEndTimestamp(pcommon.NewTimestampFromTime(i)) } @@ -445,10 +434,10 @@ func accessEndTime[K SpanContext]() ottl.StandardGetSetter[K] { func accessAttributes[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetSpan().Attributes(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if attrs, ok := val.(pcommon.Map); ok { attrs.CopyTo(tCtx.GetSpan().Attributes()) } @@ -470,10 +459,10 @@ func accessAttributesKey[K SpanContext](keys []ottl.Key[K]) ottl.StandardGetSett func accessSpanDroppedAttributesCount[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return int64(tCtx.GetSpan().DroppedAttributesCount()), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if i, ok := val.(int64); ok { tCtx.GetSpan().SetDroppedAttributesCount(uint32(i)) } @@ -484,12 +473,12 @@ func accessSpanDroppedAttributesCount[K SpanContext]() ottl.StandardGetSetter[K] func accessEvents[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetSpan().Events(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if slc, ok := val.(ptrace.SpanEventSlice); ok { - tCtx.GetSpan().Events().RemoveIf(func(event ptrace.SpanEvent) bool { + tCtx.GetSpan().Events().RemoveIf(func(_ ptrace.SpanEvent) bool { return true }) slc.CopyTo(tCtx.GetSpan().Events()) @@ -501,10 +490,10 @@ func accessEvents[K SpanContext]() ottl.StandardGetSetter[K] { func accessDroppedEventsCount[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return int64(tCtx.GetSpan().DroppedEventsCount()), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if i, ok := val.(int64); ok { tCtx.GetSpan().SetDroppedEventsCount(uint32(i)) } @@ -515,12 +504,12 @@ func accessDroppedEventsCount[K SpanContext]() ottl.StandardGetSetter[K] { func accessLinks[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetSpan().Links(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if slc, ok := val.(ptrace.SpanLinkSlice); ok { - tCtx.GetSpan().Links().RemoveIf(func(event ptrace.SpanLink) bool { + tCtx.GetSpan().Links().RemoveIf(func(_ ptrace.SpanLink) bool { return true }) slc.CopyTo(tCtx.GetSpan().Links()) @@ -532,10 +521,10 @@ func accessLinks[K SpanContext]() ottl.StandardGetSetter[K] { func accessDroppedLinksCount[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return int64(tCtx.GetSpan().DroppedLinksCount()), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if i, ok := val.(int64); ok { tCtx.GetSpan().SetDroppedLinksCount(uint32(i)) } @@ -546,10 +535,10 @@ func accessDroppedLinksCount[K SpanContext]() ottl.StandardGetSetter[K] { func accessStatus[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetSpan().Status(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if status, ok := val.(ptrace.Status); ok { status.CopyTo(tCtx.GetSpan().Status()) } @@ -560,10 +549,10 @@ func accessStatus[K SpanContext]() ottl.StandardGetSetter[K] { func accessStatusCode[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return int64(tCtx.GetSpan().Status().Code()), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if i, ok := val.(int64); ok { tCtx.GetSpan().Status().SetCode(ptrace.StatusCode(i)) } @@ -574,10 +563,10 @@ func accessStatusCode[K SpanContext]() ottl.StandardGetSetter[K] { func accessStatusMessage[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetSpan().Status().Message(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if str, ok := val.(string); ok { tCtx.GetSpan().Status().SetMessage(str) } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/value.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/value.go index ce335854eb3..34136fe89ca 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/value.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/value.go @@ -71,27 +71,35 @@ func SetValue(value pcommon.Value, val any) error { func getIndexableValue[K any](ctx context.Context, tCtx K, value pcommon.Value, keys []ottl.Key[K]) (any, error) { val := value var ok bool - for i := 0; i < len(keys); i++ { + for index := 0; index < len(keys); index++ { switch val.Type() { case pcommon.ValueTypeMap: - s, err := keys[i].String(ctx, tCtx) + s, err := keys[index].String(ctx, tCtx) if err != nil { return nil, err } if s == nil { - return nil, fmt.Errorf("map must be indexed by a string") + resString, err := FetchValueFromExpression[K, string](ctx, tCtx, keys[index]) + if err != nil { + return nil, fmt.Errorf("unable to resolve a string index in map: %w", err) + } + s = resString } val, ok = val.Map().Get(*s) if !ok { return nil, nil } case pcommon.ValueTypeSlice: - i, err := keys[i].Int(ctx, tCtx) + i, err := keys[index].Int(ctx, tCtx) if err != nil { return nil, err } if i == nil { - return nil, fmt.Errorf("slice must be indexed by an int") + resInt, err := FetchValueFromExpression[K, int64](ctx, tCtx, keys[index]) + if err != nil { + return nil, fmt.Errorf("unable to resolve an integer index in slice: %w", err) + } + i = resInt } if int(*i) >= val.Slice().Len() || int(*i) < 0 { return nil, fmt.Errorf("index %v out of bounds", *i) @@ -117,15 +125,19 @@ func setIndexableValue[K any](ctx context.Context, tCtx K, currentValue pcommon. return err } - for i := 0; i < len(keys); i++ { + for index := 0; index < len(keys); index++ { switch currentValue.Type() { case pcommon.ValueTypeMap: - s, err := keys[i].String(ctx, tCtx) + s, err := keys[index].String(ctx, tCtx) if err != nil { return err } if s == nil { - return errors.New("map must be indexed by a string") + resString, err := FetchValueFromExpression[K, string](ctx, tCtx, keys[index]) + if err != nil { + return fmt.Errorf("unable to resolve a string index in map: %w", err) + } + s = resString } potentialValue, ok := currentValue.Map().Get(*s) if !ok { @@ -134,23 +146,27 @@ func setIndexableValue[K any](ctx context.Context, tCtx K, currentValue pcommon. currentValue = potentialValue } case pcommon.ValueTypeSlice: - i, err := keys[i].Int(ctx, tCtx) + i, err := keys[index].Int(ctx, tCtx) if err != nil { return err } if i == nil { - return errors.New("slice must be indexed by an int") + resInt, err := FetchValueFromExpression[K, int64](ctx, tCtx, keys[index]) + if err != nil { + return fmt.Errorf("unable to resolve an integer index in slice: %w", err) + } + i = resInt } if int(*i) >= currentValue.Slice().Len() || int(*i) < 0 { return fmt.Errorf("index %v out of bounds", *i) } currentValue = currentValue.Slice().At(int(*i)) case pcommon.ValueTypeEmpty: - s, err := keys[i].String(ctx, tCtx) + s, err := keys[index].String(ctx, tCtx) if err != nil { return err } - i, err := keys[i].Int(ctx, tCtx) + i, err := keys[index].Int(ctx, tCtx) if err != nil { return err } @@ -164,7 +180,20 @@ func setIndexableValue[K any](ctx context.Context, tCtx K, currentValue pcommon. } currentValue = currentValue.Slice().AppendEmpty() default: - return errors.New("neither a string nor an int index was given, this is an error in the OTTL") + resString, errString := FetchValueFromExpression[K, string](ctx, tCtx, keys[index]) + resInt, errInt := FetchValueFromExpression[K, int64](ctx, tCtx, keys[index]) + switch { + case errInt == nil: + currentValue.SetEmptySlice() + for k := 0; k < int(*resInt); k++ { + currentValue.Slice().AppendEmpty() + } + currentValue = currentValue.Slice().AppendEmpty() + case errString == nil: + currentValue = currentValue.SetEmptyMap().PutEmpty(*resString) + default: + return errors.New("neither a string nor an int index was given, this is an error in the OTTL") + } } default: return fmt.Errorf("type %v does not support string indexing", currentValue.Type()) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint/datapoint.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint/datapoint.go index 37a39068d3e..4f491c82caf 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint/datapoint.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint/datapoint.go @@ -5,23 +5,31 @@ package ottldatapoint // import "github.com/open-telemetry/opentelemetry-collect import ( "context" + "errors" "fmt" "time" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" + "go.uber.org/zap/zapcore" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/logging" ) const ( - contextName = "DataPoint" + // Experimental: *NOTE* this constant is subject to change or removal in the future. + ContextName = "datapoint" + contextNameDescription = "DataPoint" ) -var _ internal.ResourceContext = TransformContext{} -var _ internal.InstrumentationScopeContext = TransformContext{} +var ( + _ internal.ResourceContext = (*TransformContext)(nil) + _ internal.InstrumentationScopeContext = (*TransformContext)(nil) + _ zapcore.ObjectMarshaler = (*TransformContext)(nil) +) type TransformContext struct { dataPoint any @@ -30,18 +38,57 @@ type TransformContext struct { instrumentationScope pcommon.InstrumentationScope resource pcommon.Resource cache pcommon.Map + scopeMetrics pmetric.ScopeMetrics + resourceMetrics pmetric.ResourceMetrics +} + +func (tCtx TransformContext) MarshalLogObject(encoder zapcore.ObjectEncoder) error { + err := encoder.AddObject("resource", logging.Resource(tCtx.resource)) + err = errors.Join(err, encoder.AddObject("scope", logging.InstrumentationScope(tCtx.instrumentationScope))) + err = errors.Join(err, encoder.AddObject("metric", logging.Metric(tCtx.metric))) + + switch dp := tCtx.dataPoint.(type) { + case pmetric.NumberDataPoint: + err = encoder.AddObject("datapoint", logging.NumberDataPoint(dp)) + case pmetric.HistogramDataPoint: + err = encoder.AddObject("datapoint", logging.HistogramDataPoint(dp)) + case pmetric.ExponentialHistogramDataPoint: + err = encoder.AddObject("datapoint", logging.ExponentialHistogramDataPoint(dp)) + case pmetric.SummaryDataPoint: + err = encoder.AddObject("datapoint", logging.SummaryDataPoint(dp)) + } + + err = errors.Join(err, encoder.AddObject("cache", logging.Map(tCtx.cache))) + return err } type Option func(*ottl.Parser[TransformContext]) -func NewTransformContext(dataPoint any, metric pmetric.Metric, metrics pmetric.MetricSlice, instrumentationScope pcommon.InstrumentationScope, resource pcommon.Resource) TransformContext { - return TransformContext{ +type TransformContextOption func(*TransformContext) + +func NewTransformContext(dataPoint any, metric pmetric.Metric, metrics pmetric.MetricSlice, instrumentationScope pcommon.InstrumentationScope, resource pcommon.Resource, scopeMetrics pmetric.ScopeMetrics, resourceMetrics pmetric.ResourceMetrics, options ...TransformContextOption) TransformContext { + tc := TransformContext{ dataPoint: dataPoint, metric: metric, metrics: metrics, instrumentationScope: instrumentationScope, resource: resource, cache: pcommon.NewMap(), + scopeMetrics: scopeMetrics, + resourceMetrics: resourceMetrics, + } + for _, opt := range options { + opt(&tc) + } + return tc +} + +// Experimental: *NOTE* this option is subject to change or removal in the future. +func WithCache(cache *pcommon.Map) TransformContextOption { + return func(p *TransformContext) { + if cache != nil { + p.cache = *cache + } } } @@ -69,6 +116,14 @@ func (tCtx TransformContext) getCache() pcommon.Map { return tCtx.cache } +func (tCtx TransformContext) GetScopeSchemaURLItem() internal.SchemaURLItem { + return tCtx.scopeMetrics +} + +func (tCtx TransformContext) GetResourceSchemaURLItem() internal.SchemaURLItem { + return tCtx.resourceMetrics +} + func NewParser(functions map[string]ottl.Factory[TransformContext], telemetrySettings component.TelemetrySettings, options ...Option) (ottl.Parser[TransformContext], error) { pep := pathExpressionParser{telemetrySettings} p, err := ottl.NewParser[TransformContext]( @@ -86,6 +141,22 @@ func NewParser(functions map[string]ottl.Factory[TransformContext], telemetrySet return p, nil } +// EnablePathContextNames enables the support to path's context names on statements. +// When this option is configured, all statement's paths must have a valid context prefix, +// otherwise an error is reported. +// +// Experimental: *NOTE* this option is subject to change or removal in the future. +func EnablePathContextNames() Option { + return func(p *ottl.Parser[TransformContext]) { + ottl.WithPathContextNames[TransformContext]([]string{ + ContextName, + internal.ResourceContextName, + internal.InstrumentationScopeContextName, + internal.MetricContextName, + })(p) + } +} + type StatementSequenceOption func(*ottl.StatementSequence[TransformContext]) func WithStatementSequenceErrorMode(errorMode ottl.ErrorMode) StatementSequenceOption { @@ -147,18 +218,23 @@ func (pep *pathExpressionParser) parsePath(path ottl.Path[TransformContext]) (ot if path == nil { return nil, fmt.Errorf("path cannot be nil") } + // Higher contexts parsing + if path.Context() != "" && path.Context() != ContextName { + return pep.parseHigherContextPath(path.Context(), path) + } + // Backward compatibility with paths without context + if path.Context() == "" && (path.Name() == internal.ResourceContextName || + path.Name() == internal.InstrumentationScopeContextName || + path.Name() == internal.MetricContextName) { + return pep.parseHigherContextPath(path.Name(), path.Next()) + } + switch path.Name() { case "cache": if path.Keys() == nil { return accessCache(), nil } return accessCacheKey(path.Keys()), nil - case "resource": - return internal.ResourcePathGetSetter[TransformContext](path.Next()) - case "instrumentation_scope": - return internal.ScopePathGetSetter[TransformContext](path.Next()) - case "metric": - return internal.MetricPathGetSetter[TransformContext](path.Next()) case "attributes": if path.Keys() == nil { return accessAttributes(), nil @@ -201,7 +277,7 @@ func (pep *pathExpressionParser) parsePath(path ottl.Path[TransformContext]) (ot case "bucket_counts": return accessPositiveBucketCounts(), nil default: - return nil, internal.FormatDefaultErrorMessage(nextPath.Name(), path.String(), contextName, internal.DataPointRef) + return nil, internal.FormatDefaultErrorMessage(nextPath.Name(), path.String(), contextNameDescription, internal.DataPointRef) } } return accessPositive(), nil @@ -214,23 +290,40 @@ func (pep *pathExpressionParser) parsePath(path ottl.Path[TransformContext]) (ot case "bucket_counts": return accessNegativeBucketCounts(), nil default: - return nil, internal.FormatDefaultErrorMessage(nextPath.Name(), path.String(), contextName, internal.DataPointRef) + return nil, internal.FormatDefaultErrorMessage(nextPath.Name(), path.String(), contextNameDescription, internal.DataPointRef) } } return accessNegative(), nil case "quantile_values": return accessQuantileValues(), nil default: - return nil, internal.FormatDefaultErrorMessage(path.Name(), path.String(), contextName, internal.DataPointRef) + return nil, internal.FormatDefaultErrorMessage(path.Name(), path.String(), contextNameDescription, internal.DataPointRef) + } +} + +func (pep *pathExpressionParser) parseHigherContextPath(context string, path ottl.Path[TransformContext]) (ottl.GetSetter[TransformContext], error) { + switch context { + case internal.ResourceContextName: + return internal.ResourcePathGetSetter(ContextName, path) + case internal.InstrumentationScopeContextName: + return internal.ScopePathGetSetter(ContextName, path) + case internal.MetricContextName: + return internal.MetricPathGetSetter(path) + default: + var fullPath string + if path != nil { + fullPath = path.String() + } + return nil, internal.FormatDefaultErrorMessage(context, fullPath, contextNameDescription, internal.DataPointRef) } } func accessCache() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.getCache(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if m, ok := val.(pcommon.Map); ok { m.CopyTo(tCtx.getCache()) } @@ -252,7 +345,7 @@ func accessCacheKey(key []ottl.Key[TransformContext]) ottl.StandardGetSetter[Tra func accessAttributes() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { switch tCtx.GetDataPoint().(type) { case pmetric.NumberDataPoint: return tCtx.GetDataPoint().(pmetric.NumberDataPoint).Attributes(), nil @@ -265,7 +358,7 @@ func accessAttributes() ottl.StandardGetSetter[TransformContext] { } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { switch tCtx.GetDataPoint().(type) { case pmetric.NumberDataPoint: if attrs, ok := val.(pcommon.Map); ok { @@ -322,7 +415,7 @@ func accessAttributesKey(key []ottl.Key[TransformContext]) ottl.StandardGetSette func accessStartTimeUnixNano() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { switch tCtx.GetDataPoint().(type) { case pmetric.NumberDataPoint: return tCtx.GetDataPoint().(pmetric.NumberDataPoint).StartTimestamp().AsTime().UnixNano(), nil @@ -335,7 +428,7 @@ func accessStartTimeUnixNano() ottl.StandardGetSetter[TransformContext] { } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newTime, ok := val.(int64); ok { switch tCtx.GetDataPoint().(type) { case pmetric.NumberDataPoint: @@ -355,7 +448,7 @@ func accessStartTimeUnixNano() ottl.StandardGetSetter[TransformContext] { func accessStartTime() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { switch tCtx.GetDataPoint().(type) { case pmetric.NumberDataPoint: return tCtx.GetDataPoint().(pmetric.NumberDataPoint).StartTimestamp().AsTime(), nil @@ -368,7 +461,7 @@ func accessStartTime() ottl.StandardGetSetter[TransformContext] { } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newTime, ok := val.(time.Time); ok { switch tCtx.GetDataPoint().(type) { case pmetric.NumberDataPoint: @@ -388,7 +481,7 @@ func accessStartTime() ottl.StandardGetSetter[TransformContext] { func accessTimeUnixNano() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { switch tCtx.GetDataPoint().(type) { case pmetric.NumberDataPoint: return tCtx.GetDataPoint().(pmetric.NumberDataPoint).Timestamp().AsTime().UnixNano(), nil @@ -401,7 +494,7 @@ func accessTimeUnixNano() ottl.StandardGetSetter[TransformContext] { } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newTime, ok := val.(int64); ok { switch tCtx.GetDataPoint().(type) { case pmetric.NumberDataPoint: @@ -421,7 +514,7 @@ func accessTimeUnixNano() ottl.StandardGetSetter[TransformContext] { func accessTime() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { switch tCtx.GetDataPoint().(type) { case pmetric.NumberDataPoint: return tCtx.GetDataPoint().(pmetric.NumberDataPoint).Timestamp().AsTime(), nil @@ -434,7 +527,7 @@ func accessTime() ottl.StandardGetSetter[TransformContext] { } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newTime, ok := val.(time.Time); ok { switch tCtx.GetDataPoint().(type) { case pmetric.NumberDataPoint: @@ -454,13 +547,13 @@ func accessTime() ottl.StandardGetSetter[TransformContext] { func accessDoubleValue() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { if numberDataPoint, ok := tCtx.GetDataPoint().(pmetric.NumberDataPoint); ok { return numberDataPoint.DoubleValue(), nil } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newDouble, ok := val.(float64); ok { if numberDataPoint, ok := tCtx.GetDataPoint().(pmetric.NumberDataPoint); ok { numberDataPoint.SetDoubleValue(newDouble) @@ -473,13 +566,13 @@ func accessDoubleValue() ottl.StandardGetSetter[TransformContext] { func accessIntValue() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { if numberDataPoint, ok := tCtx.GetDataPoint().(pmetric.NumberDataPoint); ok { return numberDataPoint.IntValue(), nil } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newInt, ok := val.(int64); ok { if numberDataPoint, ok := tCtx.GetDataPoint().(pmetric.NumberDataPoint); ok { numberDataPoint.SetIntValue(newInt) @@ -492,7 +585,7 @@ func accessIntValue() ottl.StandardGetSetter[TransformContext] { func accessExemplars() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { switch tCtx.GetDataPoint().(type) { case pmetric.NumberDataPoint: return tCtx.GetDataPoint().(pmetric.NumberDataPoint).Exemplars(), nil @@ -503,7 +596,7 @@ func accessExemplars() ottl.StandardGetSetter[TransformContext] { } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newExemplars, ok := val.(pmetric.ExemplarSlice); ok { switch tCtx.GetDataPoint().(type) { case pmetric.NumberDataPoint: @@ -521,7 +614,7 @@ func accessExemplars() ottl.StandardGetSetter[TransformContext] { func accessFlags() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { switch tCtx.GetDataPoint().(type) { case pmetric.NumberDataPoint: return int64(tCtx.GetDataPoint().(pmetric.NumberDataPoint).Flags()), nil @@ -534,7 +627,7 @@ func accessFlags() ottl.StandardGetSetter[TransformContext] { } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newFlags, ok := val.(int64); ok { switch tCtx.GetDataPoint().(type) { case pmetric.NumberDataPoint: @@ -554,7 +647,7 @@ func accessFlags() ottl.StandardGetSetter[TransformContext] { func accessCount() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { switch tCtx.GetDataPoint().(type) { case pmetric.HistogramDataPoint: return int64(tCtx.GetDataPoint().(pmetric.HistogramDataPoint).Count()), nil @@ -565,7 +658,7 @@ func accessCount() ottl.StandardGetSetter[TransformContext] { } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newCount, ok := val.(int64); ok { switch tCtx.GetDataPoint().(type) { case pmetric.HistogramDataPoint: @@ -583,7 +676,7 @@ func accessCount() ottl.StandardGetSetter[TransformContext] { func accessSum() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { switch tCtx.GetDataPoint().(type) { case pmetric.HistogramDataPoint: return tCtx.GetDataPoint().(pmetric.HistogramDataPoint).Sum(), nil @@ -594,7 +687,7 @@ func accessSum() ottl.StandardGetSetter[TransformContext] { } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newSum, ok := val.(float64); ok { switch tCtx.GetDataPoint().(type) { case pmetric.HistogramDataPoint: @@ -612,13 +705,13 @@ func accessSum() ottl.StandardGetSetter[TransformContext] { func accessExplicitBounds() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { if histogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.HistogramDataPoint); ok { return histogramDataPoint.ExplicitBounds().AsRaw(), nil } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newExplicitBounds, ok := val.([]float64); ok { if histogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.HistogramDataPoint); ok { histogramDataPoint.ExplicitBounds().FromRaw(newExplicitBounds) @@ -631,13 +724,13 @@ func accessExplicitBounds() ottl.StandardGetSetter[TransformContext] { func accessBucketCounts() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { if histogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.HistogramDataPoint); ok { return histogramDataPoint.BucketCounts().AsRaw(), nil } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newBucketCount, ok := val.([]uint64); ok { if histogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.HistogramDataPoint); ok { histogramDataPoint.BucketCounts().FromRaw(newBucketCount) @@ -650,13 +743,13 @@ func accessBucketCounts() ottl.StandardGetSetter[TransformContext] { func accessScale() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok { return int64(expoHistogramDataPoint.Scale()), nil } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newScale, ok := val.(int64); ok { if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok { expoHistogramDataPoint.SetScale(int32(newScale)) @@ -669,13 +762,13 @@ func accessScale() ottl.StandardGetSetter[TransformContext] { func accessZeroCount() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok { return int64(expoHistogramDataPoint.ZeroCount()), nil } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newZeroCount, ok := val.(int64); ok { if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok { expoHistogramDataPoint.SetZeroCount(uint64(newZeroCount)) @@ -688,13 +781,13 @@ func accessZeroCount() ottl.StandardGetSetter[TransformContext] { func accessPositive() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok { return expoHistogramDataPoint.Positive(), nil } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newPositive, ok := val.(pmetric.ExponentialHistogramDataPointBuckets); ok { if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok { newPositive.CopyTo(expoHistogramDataPoint.Positive()) @@ -707,13 +800,13 @@ func accessPositive() ottl.StandardGetSetter[TransformContext] { func accessPositiveOffset() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok { return int64(expoHistogramDataPoint.Positive().Offset()), nil } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newPositiveOffset, ok := val.(int64); ok { if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok { expoHistogramDataPoint.Positive().SetOffset(int32(newPositiveOffset)) @@ -726,13 +819,13 @@ func accessPositiveOffset() ottl.StandardGetSetter[TransformContext] { func accessPositiveBucketCounts() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok { return expoHistogramDataPoint.Positive().BucketCounts().AsRaw(), nil } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newPositiveBucketCounts, ok := val.([]uint64); ok { if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok { expoHistogramDataPoint.Positive().BucketCounts().FromRaw(newPositiveBucketCounts) @@ -745,13 +838,13 @@ func accessPositiveBucketCounts() ottl.StandardGetSetter[TransformContext] { func accessNegative() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok { return expoHistogramDataPoint.Negative(), nil } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newNegative, ok := val.(pmetric.ExponentialHistogramDataPointBuckets); ok { if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok { newNegative.CopyTo(expoHistogramDataPoint.Negative()) @@ -764,13 +857,13 @@ func accessNegative() ottl.StandardGetSetter[TransformContext] { func accessNegativeOffset() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok { return int64(expoHistogramDataPoint.Negative().Offset()), nil } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newNegativeOffset, ok := val.(int64); ok { if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok { expoHistogramDataPoint.Negative().SetOffset(int32(newNegativeOffset)) @@ -783,13 +876,13 @@ func accessNegativeOffset() ottl.StandardGetSetter[TransformContext] { func accessNegativeBucketCounts() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok { return expoHistogramDataPoint.Negative().BucketCounts().AsRaw(), nil } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newNegativeBucketCounts, ok := val.([]uint64); ok { if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok { expoHistogramDataPoint.Negative().BucketCounts().FromRaw(newNegativeBucketCounts) @@ -802,13 +895,13 @@ func accessNegativeBucketCounts() ottl.StandardGetSetter[TransformContext] { func accessQuantileValues() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { if summaryDataPoint, ok := tCtx.GetDataPoint().(pmetric.SummaryDataPoint); ok { return summaryDataPoint.QuantileValues(), nil } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newQuantileValues, ok := val.(pmetric.SummaryDataPointValueAtQuantileSlice); ok { if summaryDataPoint, ok := tCtx.GetDataPoint().(pmetric.SummaryDataPoint); ok { newQuantileValues.CopyTo(summaryDataPoint.QuantileValues()) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog/log.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog/log.go index 7e73c2dbde3..99970f4a093 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog/log.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog/log.go @@ -6,40 +6,94 @@ package ottllog // import "github.com/open-telemetry/opentelemetry-collector-con import ( "context" "encoding/hex" + "errors" "fmt" "time" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/plog" + "go.uber.org/zap/zapcore" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal" - "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/internal/ottlcommon" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/logging" + common "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/internal/ottlcommon" ) const ( - contextName = "Log" + // Experimental: *NOTE* this constant is subject to change or removal in the future. + ContextName = "log" + contextNameDescription = "Log" ) -var _ internal.ResourceContext = TransformContext{} -var _ internal.InstrumentationScopeContext = TransformContext{} +var ( + _ internal.ResourceContext = (*TransformContext)(nil) + _ internal.InstrumentationScopeContext = (*TransformContext)(nil) + _ zapcore.ObjectMarshaler = (*TransformContext)(nil) +) type TransformContext struct { logRecord plog.LogRecord instrumentationScope pcommon.InstrumentationScope resource pcommon.Resource cache pcommon.Map + scopeLogs plog.ScopeLogs + resourceLogs plog.ResourceLogs +} + +type logRecord plog.LogRecord + +func (l logRecord) MarshalLogObject(encoder zapcore.ObjectEncoder) error { + lr := plog.LogRecord(l) + spanID := lr.SpanID() + traceID := lr.TraceID() + err := encoder.AddObject("attributes", logging.Map(lr.Attributes())) + encoder.AddString("body", lr.Body().AsString()) + encoder.AddUint32("dropped_attribute_count", lr.DroppedAttributesCount()) + encoder.AddUint32("flags", uint32(lr.Flags())) + encoder.AddUint64("observed_time_unix_nano", uint64(lr.ObservedTimestamp())) + encoder.AddInt32("severity_number", int32(lr.SeverityNumber())) + encoder.AddString("severity_text", lr.SeverityText()) + encoder.AddString("span_id", hex.EncodeToString(spanID[:])) + encoder.AddUint64("time_unix_nano", uint64(lr.Timestamp())) + encoder.AddString("trace_id", hex.EncodeToString(traceID[:])) + return err +} + +func (tCtx TransformContext) MarshalLogObject(encoder zapcore.ObjectEncoder) error { + err := encoder.AddObject("resource", logging.Resource(tCtx.resource)) + err = errors.Join(err, encoder.AddObject("scope", logging.InstrumentationScope(tCtx.instrumentationScope))) + err = errors.Join(err, encoder.AddObject("log_record", logRecord(tCtx.logRecord))) + err = errors.Join(err, encoder.AddObject("cache", logging.Map(tCtx.cache))) + return err } type Option func(*ottl.Parser[TransformContext]) -func NewTransformContext(logRecord plog.LogRecord, instrumentationScope pcommon.InstrumentationScope, resource pcommon.Resource) TransformContext { - return TransformContext{ +type TransformContextOption func(*TransformContext) + +func NewTransformContext(logRecord plog.LogRecord, instrumentationScope pcommon.InstrumentationScope, resource pcommon.Resource, scopeLogs plog.ScopeLogs, resourceLogs plog.ResourceLogs, options ...TransformContextOption) TransformContext { + tc := TransformContext{ logRecord: logRecord, instrumentationScope: instrumentationScope, resource: resource, cache: pcommon.NewMap(), + scopeLogs: scopeLogs, + resourceLogs: resourceLogs, + } + for _, opt := range options { + opt(&tc) + } + return tc +} + +// Experimental: *NOTE* this option is subject to change or removal in the future. +func WithCache(cache *pcommon.Map) TransformContextOption { + return func(p *TransformContext) { + if cache != nil { + p.cache = *cache + } } } @@ -59,6 +113,14 @@ func (tCtx TransformContext) getCache() pcommon.Map { return tCtx.cache } +func (tCtx TransformContext) GetScopeSchemaURLItem() internal.SchemaURLItem { + return tCtx.scopeLogs +} + +func (tCtx TransformContext) GetResourceSchemaURLItem() internal.SchemaURLItem { + return tCtx.resourceLogs +} + func NewParser(functions map[string]ottl.Factory[TransformContext], telemetrySettings component.TelemetrySettings, options ...Option) (ottl.Parser[TransformContext], error) { pep := pathExpressionParser{telemetrySettings} p, err := ottl.NewParser[TransformContext]( @@ -76,6 +138,21 @@ func NewParser(functions map[string]ottl.Factory[TransformContext], telemetrySet return p, nil } +// EnablePathContextNames enables the support to path's context names on statements. +// When this option is configured, all statement's paths must have a valid context prefix, +// otherwise an error is reported. +// +// Experimental: *NOTE* this option is subject to change or removal in the future. +func EnablePathContextNames() Option { + return func(p *ottl.Parser[TransformContext]) { + ottl.WithPathContextNames[TransformContext]([]string{ + ContextName, + internal.InstrumentationScopeContextName, + internal.ResourceContextName, + })(p) + } +} + type StatementSequenceOption func(*ottl.StatementSequence[TransformContext]) func WithStatementSequenceErrorMode(errorMode ottl.ErrorMode) StatementSequenceOption { @@ -154,16 +231,21 @@ func (pep *pathExpressionParser) parsePath(path ottl.Path[TransformContext]) (ot if path == nil { return nil, fmt.Errorf("path cannot be nil") } + // Higher contexts parsing + if path.Context() != "" && path.Context() != ContextName { + return pep.parseHigherContextPath(path.Context(), path) + } + // Backward compatibility with paths without context + if path.Context() == "" && (path.Name() == internal.ResourceContextName || path.Name() == internal.InstrumentationScopeContextName) { + return pep.parseHigherContextPath(path.Name(), path.Next()) + } + switch path.Name() { case "cache": if path.Keys() == nil { return accessCache(), nil } return accessCacheKey(path.Keys()), nil - case "resource": - return internal.ResourcePathGetSetter[TransformContext](path.Next()) - case "instrumentation_scope": - return internal.ScopePathGetSetter[TransformContext](path.Next()) case "time_unix_nano": return accessTimeUnixNano(), nil case "observed_time_unix_nano": @@ -182,7 +264,7 @@ func (pep *pathExpressionParser) parsePath(path ottl.Path[TransformContext]) (ot if nextPath.Name() == "string" { return accessStringBody(), nil } - return nil, internal.FormatDefaultErrorMessage(nextPath.Name(), nextPath.String(), contextName, internal.LogRef) + return nil, internal.FormatDefaultErrorMessage(nextPath.Name(), nextPath.String(), contextNameDescription, internal.LogRef) } if path.Keys() == nil { return accessBody(), nil @@ -203,7 +285,7 @@ func (pep *pathExpressionParser) parsePath(path ottl.Path[TransformContext]) (ot if nextPath.Name() == "string" { return accessStringTraceID(), nil } - return nil, internal.FormatDefaultErrorMessage(nextPath.Name(), nextPath.String(), contextName, internal.LogRef) + return nil, internal.FormatDefaultErrorMessage(nextPath.Name(), nextPath.String(), contextNameDescription, internal.LogRef) } return accessTraceID(), nil case "span_id": @@ -212,20 +294,35 @@ func (pep *pathExpressionParser) parsePath(path ottl.Path[TransformContext]) (ot if nextPath.Name() == "string" { return accessStringSpanID(), nil } - return nil, internal.FormatDefaultErrorMessage(nextPath.Name(), path.String(), contextName, internal.LogRef) + return nil, internal.FormatDefaultErrorMessage(nextPath.Name(), path.String(), contextNameDescription, internal.LogRef) } return accessSpanID(), nil default: - return nil, internal.FormatDefaultErrorMessage(path.Name(), path.String(), contextName, internal.LogRef) + return nil, internal.FormatDefaultErrorMessage(path.Name(), path.String(), contextNameDescription, internal.LogRef) + } +} + +func (pep *pathExpressionParser) parseHigherContextPath(context string, path ottl.Path[TransformContext]) (ottl.GetSetter[TransformContext], error) { + switch context { + case internal.ResourceContextName: + return internal.ResourcePathGetSetter(ContextName, path) + case internal.InstrumentationScopeContextName: + return internal.ScopePathGetSetter(ContextName, path) + default: + var fullPath string + if path != nil { + fullPath = path.String() + } + return nil, internal.FormatDefaultErrorMessage(context, fullPath, contextNameDescription, internal.LogRef) } } func accessCache() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.getCache(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if m, ok := val.(pcommon.Map); ok { m.CopyTo(tCtx.getCache()) } @@ -247,10 +344,10 @@ func accessCacheKey(key []ottl.Key[TransformContext]) ottl.StandardGetSetter[Tra func accessTimeUnixNano() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.GetLogRecord().Timestamp().AsTime().UnixNano(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if i, ok := val.(int64); ok { tCtx.GetLogRecord().SetTimestamp(pcommon.NewTimestampFromTime(time.Unix(0, i))) } @@ -261,10 +358,10 @@ func accessTimeUnixNano() ottl.StandardGetSetter[TransformContext] { func accessObservedTimeUnixNano() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.GetLogRecord().ObservedTimestamp().AsTime().UnixNano(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if i, ok := val.(int64); ok { tCtx.GetLogRecord().SetObservedTimestamp(pcommon.NewTimestampFromTime(time.Unix(0, i))) } @@ -275,10 +372,10 @@ func accessObservedTimeUnixNano() ottl.StandardGetSetter[TransformContext] { func accessTime() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.GetLogRecord().Timestamp().AsTime(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if i, ok := val.(time.Time); ok { tCtx.GetLogRecord().SetTimestamp(pcommon.NewTimestampFromTime(i)) } @@ -289,10 +386,10 @@ func accessTime() ottl.StandardGetSetter[TransformContext] { func accessObservedTime() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.GetLogRecord().ObservedTimestamp().AsTime(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if i, ok := val.(time.Time); ok { tCtx.GetLogRecord().SetObservedTimestamp(pcommon.NewTimestampFromTime(i)) } @@ -303,10 +400,10 @@ func accessObservedTime() ottl.StandardGetSetter[TransformContext] { func accessSeverityNumber() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return int64(tCtx.GetLogRecord().SeverityNumber()), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if i, ok := val.(int64); ok { tCtx.GetLogRecord().SetSeverityNumber(plog.SeverityNumber(i)) } @@ -317,10 +414,10 @@ func accessSeverityNumber() ottl.StandardGetSetter[TransformContext] { func accessSeverityText() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.GetLogRecord().SeverityText(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if s, ok := val.(string); ok { tCtx.GetLogRecord().SetSeverityText(s) } @@ -331,10 +428,10 @@ func accessSeverityText() ottl.StandardGetSetter[TransformContext] { func accessBody() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { - return ottlcommon.GetValue(tCtx.GetLogRecord().Body()), nil + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { + return common.GetValue(tCtx.GetLogRecord().Body()), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { return internal.SetValue(tCtx.GetLogRecord().Body(), val) }, } @@ -369,10 +466,10 @@ func accessBodyKey(key []ottl.Key[TransformContext]) ottl.StandardGetSetter[Tran func accessStringBody() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.GetLogRecord().Body().AsString(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if str, ok := val.(string); ok { tCtx.GetLogRecord().Body().SetStr(str) } @@ -383,10 +480,10 @@ func accessStringBody() ottl.StandardGetSetter[TransformContext] { func accessAttributes() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.GetLogRecord().Attributes(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if attrs, ok := val.(pcommon.Map); ok { attrs.CopyTo(tCtx.GetLogRecord().Attributes()) } @@ -408,10 +505,10 @@ func accessAttributesKey(key []ottl.Key[TransformContext]) ottl.StandardGetSette func accessDroppedAttributesCount() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return int64(tCtx.GetLogRecord().DroppedAttributesCount()), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if i, ok := val.(int64); ok { tCtx.GetLogRecord().SetDroppedAttributesCount(uint32(i)) } @@ -422,10 +519,10 @@ func accessDroppedAttributesCount() ottl.StandardGetSetter[TransformContext] { func accessFlags() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return int64(tCtx.GetLogRecord().Flags()), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if i, ok := val.(int64); ok { tCtx.GetLogRecord().SetFlags(plog.LogRecordFlags(i)) } @@ -436,10 +533,10 @@ func accessFlags() ottl.StandardGetSetter[TransformContext] { func accessTraceID() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.GetLogRecord().TraceID(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newTraceID, ok := val.(pcommon.TraceID); ok { tCtx.GetLogRecord().SetTraceID(newTraceID) } @@ -450,11 +547,11 @@ func accessTraceID() ottl.StandardGetSetter[TransformContext] { func accessStringTraceID() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { id := tCtx.GetLogRecord().TraceID() return hex.EncodeToString(id[:]), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if str, ok := val.(string); ok { id, err := internal.ParseTraceID(str) if err != nil { @@ -469,10 +566,10 @@ func accessStringTraceID() ottl.StandardGetSetter[TransformContext] { func accessSpanID() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.GetLogRecord().SpanID(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newSpanID, ok := val.(pcommon.SpanID); ok { tCtx.GetLogRecord().SetSpanID(newSpanID) } @@ -483,11 +580,11 @@ func accessSpanID() ottl.StandardGetSetter[TransformContext] { func accessStringSpanID() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { id := tCtx.GetLogRecord().SpanID() return hex.EncodeToString(id[:]), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if str, ok := val.(string); ok { id, err := internal.ParseSpanID(str) if err != nil { diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric/metrics.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric/metrics.go index 0cdc916fd14..04b7b5218bf 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric/metrics.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric/metrics.go @@ -15,9 +15,16 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal" ) -var _ internal.ResourceContext = TransformContext{} -var _ internal.InstrumentationScopeContext = TransformContext{} -var _ internal.MetricContext = TransformContext{} +const ( + // Experimental: *NOTE* this constant is subject to change or removal in the future. + ContextName = internal.MetricContextName +) + +var ( + _ internal.ResourceContext = TransformContext{} + _ internal.InstrumentationScopeContext = TransformContext{} + _ internal.MetricContext = TransformContext{} +) type TransformContext struct { metric pmetric.Metric @@ -25,17 +32,36 @@ type TransformContext struct { instrumentationScope pcommon.InstrumentationScope resource pcommon.Resource cache pcommon.Map + scopeMetrics pmetric.ScopeMetrics + resourceMetrics pmetric.ResourceMetrics } type Option func(*ottl.Parser[TransformContext]) -func NewTransformContext(metric pmetric.Metric, metrics pmetric.MetricSlice, instrumentationScope pcommon.InstrumentationScope, resource pcommon.Resource) TransformContext { - return TransformContext{ +type TransformContextOption func(*TransformContext) + +func NewTransformContext(metric pmetric.Metric, metrics pmetric.MetricSlice, instrumentationScope pcommon.InstrumentationScope, resource pcommon.Resource, scopeMetrics pmetric.ScopeMetrics, resourceMetrics pmetric.ResourceMetrics, options ...TransformContextOption) TransformContext { + tc := TransformContext{ metric: metric, metrics: metrics, instrumentationScope: instrumentationScope, resource: resource, cache: pcommon.NewMap(), + scopeMetrics: scopeMetrics, + resourceMetrics: resourceMetrics, + } + for _, opt := range options { + opt(&tc) + } + return tc +} + +// Experimental: *NOTE* this option is subject to change or removal in the future. +func WithCache(cache *pcommon.Map) TransformContextOption { + return func(p *TransformContext) { + if cache != nil { + p.cache = *cache + } } } @@ -59,6 +85,14 @@ func (tCtx TransformContext) getCache() pcommon.Map { return tCtx.cache } +func (tCtx TransformContext) GetScopeSchemaURLItem() internal.SchemaURLItem { + return tCtx.scopeMetrics +} + +func (tCtx TransformContext) GetResourceSchemaURLItem() internal.SchemaURLItem { + return tCtx.resourceMetrics +} + func NewParser(functions map[string]ottl.Factory[TransformContext], telemetrySettings component.TelemetrySettings, options ...Option) (ottl.Parser[TransformContext], error) { pep := pathExpressionParser{telemetrySettings} p, err := ottl.NewParser[TransformContext]( @@ -76,6 +110,21 @@ func NewParser(functions map[string]ottl.Factory[TransformContext], telemetrySet return p, err } +// EnablePathContextNames enables the support to path's context names on statements. +// When this option is configured, all statement's paths must have a valid context prefix, +// otherwise an error is reported. +// +// Experimental: *NOTE* this option is subject to change or removal in the future. +func EnablePathContextNames() Option { + return func(p *ottl.Parser[TransformContext]) { + ottl.WithPathContextNames[TransformContext]([]string{ + ContextName, + internal.InstrumentationScopeContextName, + internal.ResourceContextName, + })(p) + } +} + type StatementSequenceOption func(*ottl.StatementSequence[TransformContext]) func WithStatementSequenceErrorMode(errorMode ottl.ErrorMode) StatementSequenceOption { @@ -128,27 +177,47 @@ func (pep *pathExpressionParser) parsePath(path ottl.Path[TransformContext]) (ot if path == nil { return nil, fmt.Errorf("path cannot be nil") } + // Higher contexts parsing + if path.Context() != "" && path.Context() != ContextName { + return pep.parseHigherContextPath(path.Context(), path) + } + // Backward compatibility with paths without context + if path.Context() == "" && (path.Name() == internal.ResourceContextName || path.Name() == internal.InstrumentationScopeContextName) { + return pep.parseHigherContextPath(path.Name(), path.Next()) + } + switch path.Name() { case "cache": if path.Keys() == nil { return accessCache(), nil } return accessCacheKey(path.Keys()), nil - case "resource": - return internal.ResourcePathGetSetter[TransformContext](path.Next()) - case "instrumentation_scope": - return internal.ScopePathGetSetter[TransformContext](path.Next()) default: return internal.MetricPathGetSetter[TransformContext](path) } } +func (pep *pathExpressionParser) parseHigherContextPath(context string, path ottl.Path[TransformContext]) (ottl.GetSetter[TransformContext], error) { + switch context { + case internal.ResourceContextName: + return internal.ResourcePathGetSetter(ContextName, path) + case internal.InstrumentationScopeContextName: + return internal.ScopePathGetSetter(ContextName, path) + default: + var fullPath string + if path != nil { + fullPath = path.String() + } + return nil, internal.FormatDefaultErrorMessage(context, fullPath, internal.MetricContextName, internal.MetricRef) + } +} + func accessCache() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.getCache(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if m, ok := val.(pcommon.Map); ok { m.CopyTo(tCtx.getCache()) } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource/resource.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource/resource.go index a7bd956129d..6c44ec4d161 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource/resource.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource/resource.go @@ -5,28 +5,62 @@ package ottlresource // import "github.com/open-telemetry/opentelemetry-collecto import ( "context" + "errors" "fmt" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/pdata/pcommon" + "go.uber.org/zap/zapcore" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/logging" ) -var _ internal.ResourceContext = TransformContext{} +const ( + // Experimental: *NOTE* this constant is subject to change or removal in the future. + ContextName = internal.ResourceContextName +) + +var ( + _ internal.ResourceContext = (*TransformContext)(nil) + _ zapcore.ObjectMarshaler = (*TransformContext)(nil) +) type TransformContext struct { - resource pcommon.Resource - cache pcommon.Map + resource pcommon.Resource + cache pcommon.Map + schemaURLItem internal.SchemaURLItem +} + +func (tCtx TransformContext) MarshalLogObject(encoder zapcore.ObjectEncoder) error { + err := encoder.AddObject("resource", logging.Resource(tCtx.resource)) + err = errors.Join(err, encoder.AddObject("cache", logging.Map(tCtx.cache))) + return err } type Option func(*ottl.Parser[TransformContext]) -func NewTransformContext(resource pcommon.Resource) TransformContext { - return TransformContext{ - resource: resource, - cache: pcommon.NewMap(), +type TransformContextOption func(*TransformContext) + +func NewTransformContext(resource pcommon.Resource, schemaURLItem internal.SchemaURLItem, options ...TransformContextOption) TransformContext { + tc := TransformContext{ + resource: resource, + cache: pcommon.NewMap(), + schemaURLItem: schemaURLItem, + } + for _, opt := range options { + opt(&tc) + } + return tc +} + +// Experimental: *NOTE* this option is subject to change or removal in the future. +func WithCache(cache *pcommon.Map) TransformContextOption { + return func(p *TransformContext) { + if cache != nil { + p.cache = *cache + } } } @@ -38,6 +72,10 @@ func (tCtx TransformContext) getCache() pcommon.Map { return tCtx.cache } +func (tCtx TransformContext) GetResourceSchemaURLItem() internal.SchemaURLItem { + return tCtx.schemaURLItem +} + func NewParser(functions map[string]ottl.Factory[TransformContext], telemetrySettings component.TelemetrySettings, options ...Option) (ottl.Parser[TransformContext], error) { pep := pathExpressionParser{telemetrySettings} p, err := ottl.NewParser[TransformContext]( @@ -55,6 +93,17 @@ func NewParser(functions map[string]ottl.Factory[TransformContext], telemetrySet return p, nil } +// EnablePathContextNames enables the support to path's context names on statements. +// When this option is configured, all statement's paths must have a valid context prefix, +// otherwise an error is reported. +// +// Experimental: *NOTE* this option is subject to change or removal in the future. +func EnablePathContextNames() Option { + return func(p *ottl.Parser[TransformContext]) { + ottl.WithPathContextNames[TransformContext]([]string{ContextName})(p) + } +} + type StatementSequenceOption func(*ottl.StatementSequence[TransformContext]) func WithStatementSequenceErrorMode(errorMode ottl.ErrorMode) StatementSequenceOption { @@ -99,6 +148,10 @@ func (pep *pathExpressionParser) parsePath(path ottl.Path[TransformContext]) (ot if path == nil { return nil, fmt.Errorf("path cannot be nil") } + if path.Context() != "" && path.Context() != ContextName { + return nil, internal.FormatDefaultErrorMessage(path.Context(), path.String(), "Resource", internal.ResourceContextRef) + } + switch path.Name() { case "cache": if path.Keys() == nil { @@ -106,16 +159,16 @@ func (pep *pathExpressionParser) parsePath(path ottl.Path[TransformContext]) (ot } return accessCacheKey(path.Keys()), nil default: - return internal.ResourcePathGetSetter[TransformContext](path) + return internal.ResourcePathGetSetter[TransformContext](ContextName, path) } } func accessCache() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.getCache(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if m, ok := val.(pcommon.Map); ok { m.CopyTo(tCtx.getCache()) } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope/README.md new file mode 100644 index 00000000000..4df2ae36160 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope/README.md @@ -0,0 +1,27 @@ +# Instrumentation Scope Context + +The Instrumentation Scope Context is a Context implementation for [pdata Instrumentation Scope](https://github.com/open-telemetry/opentelemetry-collector/blob/main/pdata/pcommon/generated_instrumentationscope.go), the Collector's internal representation for OTLP instrumentation scope data. This Context should be used when interacting only with OTLP instrumentation scope. + +## Paths +In general, the Instrumentation Scope Context supports accessing pdata using the field names from the instrumentation section in the [common proto](https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/common/v1/common.proto). All integers are returned and set via `int64`. All doubles are returned and set via `float64`. + +The following paths are supported. + +| path | field accessed | type | +|-----------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------| +| cache | the value of the current transform context's temporary cache. cache can be used as a temporary placeholder for data during complex transformations | pcommon.Map | +| cache\[""\] | the value of an item in cache. Supports multiple indexes to access nested fields. | string, bool, int64, float64, pcommon.Map, pcommon.Slice, []byte or nil | +| resource | resource of the instrumentation scope being processed | pcommon.Resource | +| resource.attributes | resource attributes of the instrumentation scope being processed | pcommon.Map | +| resource.attributes\[""\] | the value of the resource attribute of the instrumentation scope being processed. Supports multiple indexes to access nested fields. | string, bool, int64, float64, pcommon.Map, pcommon.Slice, []byte or nil | +| resource.dropped_attributes_count | number of dropped attributes of the resource of the instrumentation scope being processed | int64 | +| name | name of the instrumentation scope of the scope being processed | string | +| version | version of the instrumentation scope of the scope being processed | string | +| dropped_attributes_count | number of dropped attributes of the instrumentation scope of the scope being processed | int64 | +| attributes | instrumentation scope attributes of the scope being processed | pcommon.Map | +| attributes\[""\] | the value of the instrumentation scope attribute of the scope being processed. Supports multiple indexes to access nested fields. | string, bool, int64, float64, pcommon.Map, pcommon.Slice, []byte or nil | + + +## Enums + +The Instrumentation Scope Context does not define any Enums at this time. diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope/scope.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope/scope.go new file mode 100644 index 00000000000..4ccc8f472d5 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope/scope.go @@ -0,0 +1,222 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlscope // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope" + +import ( + "context" + "errors" + "fmt" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.uber.org/zap/zapcore" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/logging" +) + +const ( + // Experimental: *NOTE* this constant is subject to change or removal in the future. + ContextName = internal.ScopeContextName +) + +var ( + _ internal.ResourceContext = (*TransformContext)(nil) + _ internal.InstrumentationScopeContext = (*TransformContext)(nil) + _ zapcore.ObjectMarshaler = (*TransformContext)(nil) +) + +type TransformContext struct { + instrumentationScope pcommon.InstrumentationScope + resource pcommon.Resource + cache pcommon.Map + schemaURLItem internal.SchemaURLItem +} + +func (tCtx TransformContext) MarshalLogObject(encoder zapcore.ObjectEncoder) error { + err := encoder.AddObject("resource", logging.Resource(tCtx.resource)) + err = errors.Join(err, encoder.AddObject("scope", logging.InstrumentationScope(tCtx.instrumentationScope))) + err = errors.Join(err, encoder.AddObject("cache", logging.Map(tCtx.cache))) + return err +} + +type Option func(*ottl.Parser[TransformContext]) + +type TransformContextOption func(*TransformContext) + +func NewTransformContext(instrumentationScope pcommon.InstrumentationScope, resource pcommon.Resource, schemaURLItem internal.SchemaURLItem, options ...TransformContextOption) TransformContext { + tc := TransformContext{ + instrumentationScope: instrumentationScope, + resource: resource, + cache: pcommon.NewMap(), + schemaURLItem: schemaURLItem, + } + for _, opt := range options { + opt(&tc) + } + return tc +} + +// Experimental: *NOTE* this option is subject to change or removal in the future. +func WithCache(cache *pcommon.Map) TransformContextOption { + return func(p *TransformContext) { + if cache != nil { + p.cache = *cache + } + } +} + +func (tCtx TransformContext) GetInstrumentationScope() pcommon.InstrumentationScope { + return tCtx.instrumentationScope +} + +func (tCtx TransformContext) GetResource() pcommon.Resource { + return tCtx.resource +} + +func (tCtx TransformContext) getCache() pcommon.Map { + return tCtx.cache +} + +func (tCtx TransformContext) GetScopeSchemaURLItem() internal.SchemaURLItem { + return tCtx.schemaURLItem +} + +func (tCtx TransformContext) GetResourceSchemaURLItem() internal.SchemaURLItem { + return tCtx.schemaURLItem +} + +func NewParser(functions map[string]ottl.Factory[TransformContext], telemetrySettings component.TelemetrySettings, options ...Option) (ottl.Parser[TransformContext], error) { + pep := pathExpressionParser{telemetrySettings} + p, err := ottl.NewParser[TransformContext]( + functions, + pep.parsePath, + telemetrySettings, + ottl.WithEnumParser[TransformContext](parseEnum), + ) + if err != nil { + return ottl.Parser[TransformContext]{}, err + } + for _, opt := range options { + opt(&p) + } + return p, nil +} + +// EnablePathContextNames enables the support to path's context names on statements. +// When this option is configured, all statement's paths must have a valid context prefix, +// otherwise an error is reported. +// +// Experimental: *NOTE* this option is subject to change or removal in the future. +func EnablePathContextNames() Option { + return func(p *ottl.Parser[TransformContext]) { + ottl.WithPathContextNames[TransformContext]([]string{ + ContextName, + internal.ResourceContextName, + })(p) + } +} + +type StatementSequenceOption func(*ottl.StatementSequence[TransformContext]) + +func WithStatementSequenceErrorMode(errorMode ottl.ErrorMode) StatementSequenceOption { + return func(s *ottl.StatementSequence[TransformContext]) { + ottl.WithStatementSequenceErrorMode[TransformContext](errorMode)(s) + } +} + +func NewStatementSequence(statements []*ottl.Statement[TransformContext], telemetrySettings component.TelemetrySettings, options ...StatementSequenceOption) ottl.StatementSequence[TransformContext] { + s := ottl.NewStatementSequence(statements, telemetrySettings) + for _, op := range options { + op(&s) + } + return s +} + +type ConditionSequenceOption func(*ottl.ConditionSequence[TransformContext]) + +func WithConditionSequenceErrorMode(errorMode ottl.ErrorMode) ConditionSequenceOption { + return func(c *ottl.ConditionSequence[TransformContext]) { + ottl.WithConditionSequenceErrorMode[TransformContext](errorMode)(c) + } +} + +func NewConditionSequence(conditions []*ottl.Condition[TransformContext], telemetrySettings component.TelemetrySettings, options ...ConditionSequenceOption) ottl.ConditionSequence[TransformContext] { + c := ottl.NewConditionSequence(conditions, telemetrySettings) + for _, op := range options { + op(&c) + } + return c +} + +func parseEnum(_ *ottl.EnumSymbol) (*ottl.Enum, error) { + return nil, fmt.Errorf("instrumentation scope context does not provide Enum support") +} + +type pathExpressionParser struct { + telemetrySettings component.TelemetrySettings +} + +func (pep *pathExpressionParser) parsePath(path ottl.Path[TransformContext]) (ottl.GetSetter[TransformContext], error) { + if path == nil { + return nil, fmt.Errorf("path cannot be nil") + } + // Higher contexts parsing + if path.Context() != "" && path.Context() != ContextName { + return pep.parseHigherContextPath(path.Context(), path) + } + // Backward compatibility with paths without context + if path.Context() == "" && path.Name() == internal.ResourceContextName { + return pep.parseHigherContextPath(path.Name(), path.Next()) + } + + switch path.Name() { + case "cache": + if path.Keys() == nil { + return accessCache(), nil + } + return accessCacheKey(path.Keys()), nil + default: + return internal.ScopePathGetSetter[TransformContext](ContextName, path) + } +} + +func (pep *pathExpressionParser) parseHigherContextPath(context string, path ottl.Path[TransformContext]) (ottl.GetSetter[TransformContext], error) { + switch context { + case internal.ResourceContextName: + return internal.ResourcePathGetSetter[TransformContext](ContextName, path) + default: + var fullPath string + if path != nil { + fullPath = path.String() + } + return nil, internal.FormatDefaultErrorMessage(context, fullPath, "Instrumentation Scope", internal.InstrumentationScopeRef) + } +} + +func accessCache() ottl.StandardGetSetter[TransformContext] { + return ottl.StandardGetSetter[TransformContext]{ + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { + return tCtx.getCache(), nil + }, + Setter: func(_ context.Context, tCtx TransformContext, val any) error { + if m, ok := val.(pcommon.Map); ok { + m.CopyTo(tCtx.getCache()) + } + return nil + }, + } +} + +func accessCacheKey(key []ottl.Key[TransformContext]) ottl.StandardGetSetter[TransformContext] { + return ottl.StandardGetSetter[TransformContext]{ + Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + return internal.GetMapValue[TransformContext](ctx, tCtx, tCtx.getCache(), key) + }, + Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + return internal.SetMapValue[TransformContext](ctx, tCtx, tCtx.getCache(), key, val) + }, + } +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan/span.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan/span.go index 8f400540ba6..8f7b4c05f26 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan/span.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan/span.go @@ -5,34 +5,72 @@ package ottlspan // import "github.com/open-telemetry/opentelemetry-collector-co import ( "context" + "errors" "fmt" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/ptrace" + "go.uber.org/zap/zapcore" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/logging" ) -var _ internal.ResourceContext = TransformContext{} -var _ internal.InstrumentationScopeContext = TransformContext{} +const ( + // Experimental: *NOTE* this constant is subject to change or removal in the future. + ContextName = internal.SpanContextName +) + +var ( + _ internal.ResourceContext = (*TransformContext)(nil) + _ internal.InstrumentationScopeContext = (*TransformContext)(nil) + _ zapcore.ObjectMarshaler = (*TransformContext)(nil) +) type TransformContext struct { span ptrace.Span instrumentationScope pcommon.InstrumentationScope resource pcommon.Resource cache pcommon.Map + scopeSpans ptrace.ScopeSpans + resourceSpans ptrace.ResourceSpans +} + +func (tCtx TransformContext) MarshalLogObject(encoder zapcore.ObjectEncoder) error { + err := encoder.AddObject("resource", logging.Resource(tCtx.resource)) + err = errors.Join(err, encoder.AddObject("scope", logging.InstrumentationScope(tCtx.instrumentationScope))) + err = errors.Join(err, encoder.AddObject("span", logging.Span(tCtx.span))) + err = errors.Join(err, encoder.AddObject("cache", logging.Map(tCtx.cache))) + return err } type Option func(*ottl.Parser[TransformContext]) -func NewTransformContext(span ptrace.Span, instrumentationScope pcommon.InstrumentationScope, resource pcommon.Resource) TransformContext { - return TransformContext{ +type TransformContextOption func(*TransformContext) + +func NewTransformContext(span ptrace.Span, instrumentationScope pcommon.InstrumentationScope, resource pcommon.Resource, scopeSpans ptrace.ScopeSpans, resourceSpans ptrace.ResourceSpans, options ...TransformContextOption) TransformContext { + tc := TransformContext{ span: span, instrumentationScope: instrumentationScope, resource: resource, cache: pcommon.NewMap(), + scopeSpans: scopeSpans, + resourceSpans: resourceSpans, + } + for _, opt := range options { + opt(&tc) + } + return tc +} + +// Experimental: *NOTE* this option is subject to change or removal in the future. +func WithCache(cache *pcommon.Map) TransformContextOption { + return func(p *TransformContext) { + if cache != nil { + p.cache = *cache + } } } @@ -52,6 +90,14 @@ func (tCtx TransformContext) getCache() pcommon.Map { return tCtx.cache } +func (tCtx TransformContext) GetResourceSchemaURLItem() internal.SchemaURLItem { + return tCtx.resourceSpans +} + +func (tCtx TransformContext) GetScopeSchemaURLItem() internal.SchemaURLItem { + return tCtx.scopeSpans +} + func NewParser(functions map[string]ottl.Factory[TransformContext], telemetrySettings component.TelemetrySettings, options ...Option) (ottl.Parser[TransformContext], error) { pep := pathExpressionParser{telemetrySettings} p, err := ottl.NewParser[TransformContext]( @@ -69,6 +115,21 @@ func NewParser(functions map[string]ottl.Factory[TransformContext], telemetrySet return p, nil } +// EnablePathContextNames enables the support to path's context names on statements. +// When this option is configured, all statement's paths must have a valid context prefix, +// otherwise an error is reported. +// +// Experimental: *NOTE* this option is subject to change or removal in the future. +func EnablePathContextNames() Option { + return func(p *ottl.Parser[TransformContext]) { + ottl.WithPathContextNames[TransformContext]([]string{ + ContextName, + internal.ResourceContextName, + internal.InstrumentationScopeContextName, + })(p) + } +} + type StatementSequenceOption func(*ottl.StatementSequence[TransformContext]) func WithStatementSequenceErrorMode(errorMode ottl.ErrorMode) StatementSequenceOption { @@ -119,27 +180,47 @@ func (pep *pathExpressionParser) parsePath(path ottl.Path[TransformContext]) (ot if path == nil { return nil, fmt.Errorf("path cannot be nil") } + // Higher contexts parsing + if path.Context() != "" && path.Context() != ContextName { + return pep.parseHigherContextPath(path.Context(), path) + } + // Backward compatibility with paths without context + if path.Context() == "" && (path.Name() == internal.ResourceContextName || path.Name() == internal.InstrumentationScopeContextName) { + return pep.parseHigherContextPath(path.Name(), path.Next()) + } + switch path.Name() { case "cache": if path.Keys() == nil { return accessCache(), nil } return accessCacheKey(path.Keys()), nil - case "resource": - return internal.ResourcePathGetSetter[TransformContext](path.Next()) - case "instrumentation_scope": - return internal.ScopePathGetSetter[TransformContext](path.Next()) default: - return internal.SpanPathGetSetter[TransformContext](path) + return internal.SpanPathGetSetter[TransformContext](ContextName, path) + } +} + +func (pep *pathExpressionParser) parseHigherContextPath(context string, path ottl.Path[TransformContext]) (ottl.GetSetter[TransformContext], error) { + switch context { + case internal.ResourceContextName: + return internal.ResourcePathGetSetter[TransformContext](ContextName, path) + case internal.InstrumentationScopeContextName: + return internal.ScopePathGetSetter[TransformContext](ContextName, path) + default: + var fullPath string + if path != nil { + fullPath = path.String() + } + return nil, internal.FormatDefaultErrorMessage(context, fullPath, internal.SpanContextName, internal.SpanRef) } } func accessCache() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.getCache(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if m, ok := val.(pcommon.Map); ok { m.CopyTo(tCtx.getCache()) } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspanevent/span_events.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspanevent/span_events.go index d8182f4ec88..839147639d6 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspanevent/span_events.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspanevent/span_events.go @@ -5,20 +5,31 @@ package ottlspanevent // import "github.com/open-telemetry/opentelemetry-collect import ( "context" + "errors" "fmt" "time" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/ptrace" + "go.uber.org/zap/zapcore" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/logging" ) -var _ internal.ResourceContext = TransformContext{} -var _ internal.InstrumentationScopeContext = TransformContext{} -var _ internal.SpanContext = TransformContext{} +const ( + // Experimental: *NOTE* this constant is subject to change or removal in the future. + ContextName = "spanevent" + contextNameDescription = "Span Event" +) + +var ( + _ internal.ResourceContext = (*TransformContext)(nil) + _ internal.InstrumentationScopeContext = (*TransformContext)(nil) + _ zapcore.ObjectMarshaler = (*TransformContext)(nil) +) type TransformContext struct { spanEvent ptrace.SpanEvent @@ -26,17 +37,45 @@ type TransformContext struct { instrumentationScope pcommon.InstrumentationScope resource pcommon.Resource cache pcommon.Map + scopeSpans ptrace.ScopeSpans + resouceSpans ptrace.ResourceSpans +} + +func (tCtx TransformContext) MarshalLogObject(encoder zapcore.ObjectEncoder) error { + err := encoder.AddObject("resource", logging.Resource(tCtx.resource)) + err = errors.Join(err, encoder.AddObject("scope", logging.InstrumentationScope(tCtx.instrumentationScope))) + err = errors.Join(err, encoder.AddObject("span", logging.Span(tCtx.span))) + err = errors.Join(err, encoder.AddObject("spanevent", logging.SpanEvent(tCtx.spanEvent))) + err = errors.Join(err, encoder.AddObject("cache", logging.Map(tCtx.cache))) + return err } type Option func(*ottl.Parser[TransformContext]) -func NewTransformContext(spanEvent ptrace.SpanEvent, span ptrace.Span, instrumentationScope pcommon.InstrumentationScope, resource pcommon.Resource) TransformContext { - return TransformContext{ +type TransformContextOption func(*TransformContext) + +func NewTransformContext(spanEvent ptrace.SpanEvent, span ptrace.Span, instrumentationScope pcommon.InstrumentationScope, resource pcommon.Resource, scopeSpans ptrace.ScopeSpans, resourceSpans ptrace.ResourceSpans, options ...TransformContextOption) TransformContext { + tc := TransformContext{ spanEvent: spanEvent, span: span, instrumentationScope: instrumentationScope, resource: resource, cache: pcommon.NewMap(), + scopeSpans: scopeSpans, + resouceSpans: resourceSpans, + } + for _, opt := range options { + opt(&tc) + } + return tc +} + +// Experimental: *NOTE* this option is subject to change or removal in the future. +func WithCache(cache *pcommon.Map) TransformContextOption { + return func(p *TransformContext) { + if cache != nil { + p.cache = *cache + } } } @@ -60,6 +99,14 @@ func (tCtx TransformContext) getCache() pcommon.Map { return tCtx.cache } +func (tCtx TransformContext) GetScopeSchemaURLItem() internal.SchemaURLItem { + return tCtx.scopeSpans +} + +func (tCtx TransformContext) GetResourceSchemaURLItem() internal.SchemaURLItem { + return tCtx.resouceSpans +} + func NewParser(functions map[string]ottl.Factory[TransformContext], telemetrySettings component.TelemetrySettings, options ...Option) (ottl.Parser[TransformContext], error) { pep := pathExpressionParser{telemetrySettings} p, err := ottl.NewParser[TransformContext]( @@ -77,6 +124,22 @@ func NewParser(functions map[string]ottl.Factory[TransformContext], telemetrySet return p, nil } +// EnablePathContextNames enables the support to path's context names on statements. +// When this option is configured, all statement's paths must have a valid context prefix, +// otherwise an error is reported. +// +// Experimental: *NOTE* this option is subject to change or removal in the future. +func EnablePathContextNames() Option { + return func(p *ottl.Parser[TransformContext]) { + ottl.WithPathContextNames[TransformContext]([]string{ + ContextName, + internal.SpanContextName, + internal.ResourceContextName, + internal.InstrumentationScopeContextName, + })(p) + } +} + type StatementSequenceOption func(*ottl.StatementSequence[TransformContext]) func WithStatementSequenceErrorMode(errorMode ottl.ErrorMode) StatementSequenceOption { @@ -127,18 +190,24 @@ func (pep *pathExpressionParser) parsePath(path ottl.Path[TransformContext]) (ot if path == nil { return nil, fmt.Errorf("path cannot be nil") } + // Higher contexts parsing + if path.Context() != "" && path.Context() != ContextName { + return pep.parseHigherContextPath(path.Context(), path) + } + // Backward compatibility with paths without context + if path.Context() == "" && + (path.Name() == internal.ResourceContextName || + path.Name() == internal.InstrumentationScopeContextName || + path.Name() == internal.SpanContextName) { + return pep.parseHigherContextPath(path.Name(), path.Next()) + } + switch path.Name() { case "cache": if path.Keys() == nil { return accessCache(), nil } return accessCacheKey(path.Keys()), nil - case "resource": - return internal.ResourcePathGetSetter[TransformContext](path.Next()) - case "instrumentation_scope": - return internal.ScopePathGetSetter[TransformContext](path.Next()) - case "span": - return internal.SpanPathGetSetter[TransformContext](path.Next()) case "time_unix_nano": return accessSpanEventTimeUnixNano(), nil case "time": @@ -153,16 +222,33 @@ func (pep *pathExpressionParser) parsePath(path ottl.Path[TransformContext]) (ot case "dropped_attributes_count": return accessSpanEventDroppedAttributeCount(), nil default: - return nil, internal.FormatDefaultErrorMessage(path.Name(), path.String(), "Span Event", internal.SpanEventRef) + return nil, internal.FormatDefaultErrorMessage(path.Name(), path.String(), contextNameDescription, internal.SpanEventRef) } +} +func (pep *pathExpressionParser) parseHigherContextPath(context string, path ottl.Path[TransformContext]) (ottl.GetSetter[TransformContext], error) { + switch context { + case internal.ResourceContextName: + return internal.ResourcePathGetSetter(ContextName, path) + case internal.InstrumentationScopeContextName: + return internal.ScopePathGetSetter(ContextName, path) + case internal.SpanContextName: + return internal.SpanPathGetSetter(ContextName, path) + default: + var fullPath string + if path != nil { + fullPath = path.String() + } + return nil, internal.FormatDefaultErrorMessage(context, fullPath, contextNameDescription, internal.SpanEventRef) + } } + func accessCache() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.getCache(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if m, ok := val.(pcommon.Map); ok { m.CopyTo(tCtx.getCache()) } @@ -184,10 +270,10 @@ func accessCacheKey(key []ottl.Key[TransformContext]) ottl.StandardGetSetter[Tra func accessSpanEventTimeUnixNano() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.GetSpanEvent().Timestamp().AsTime().UnixNano(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newTimestamp, ok := val.(int64); ok { tCtx.GetSpanEvent().SetTimestamp(pcommon.NewTimestampFromTime(time.Unix(0, newTimestamp))) } @@ -198,10 +284,10 @@ func accessSpanEventTimeUnixNano() ottl.StandardGetSetter[TransformContext] { func accessSpanEventTime() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.GetSpanEvent().Timestamp().AsTime(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newTimestamp, ok := val.(time.Time); ok { tCtx.GetSpanEvent().SetTimestamp(pcommon.NewTimestampFromTime(newTimestamp)) } @@ -212,10 +298,10 @@ func accessSpanEventTime() ottl.StandardGetSetter[TransformContext] { func accessSpanEventName() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.GetSpanEvent().Name(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newName, ok := val.(string); ok { tCtx.GetSpanEvent().SetName(newName) } @@ -226,10 +312,10 @@ func accessSpanEventName() ottl.StandardGetSetter[TransformContext] { func accessSpanEventAttributes() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.GetSpanEvent().Attributes(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if attrs, ok := val.(pcommon.Map); ok { attrs.CopyTo(tCtx.GetSpanEvent().Attributes()) } @@ -251,10 +337,10 @@ func accessSpanEventAttributesKey(key []ottl.Key[TransformContext]) ottl.Standar func accessSpanEventDroppedAttributeCount() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return int64(tCtx.GetSpanEvent().DroppedAttributesCount()), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newCount, ok := val.(int64); ok { tCtx.GetSpanEvent().SetDroppedAttributesCount(uint32(newCount)) } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/expression.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/expression.go index 6f6cc18b81c..a2e8d29e63c 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/expression.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/expression.go @@ -4,14 +4,16 @@ package ottl // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" import ( + "bytes" "context" + "encoding/binary" "encoding/hex" "fmt" "reflect" "strconv" "time" - jsoniter "github.com/json-iterator/go" + "github.com/goccy/go-json" "go.opentelemetry.io/collector/pdata/pcommon" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/internal/ottlcommon" @@ -109,10 +111,35 @@ func (g exprGetter[K]) Get(ctx context.Context, tCtx K) (any, error) { } result = ottlcommon.GetValue(r.At(int(*k.Int))) case []any: - if int(*k.Int) >= len(r) || int(*k.Int) < 0 { - return nil, fmt.Errorf("index %v out of bounds", *k.Int) + result, err = getElementByIndex(r, k.Int) + if err != nil { + return nil, err + } + case []string: + result, err = getElementByIndex(r, k.Int) + if err != nil { + return nil, err + } + case []bool: + result, err = getElementByIndex(r, k.Int) + if err != nil { + return nil, err + } + case []float64: + result, err = getElementByIndex(r, k.Int) + if err != nil { + return nil, err + } + case []int64: + result, err = getElementByIndex(r, k.Int) + if err != nil { + return nil, err + } + case []byte: + result, err = getElementByIndex(r, k.Int) + if err != nil { + return nil, err } - result = r[*k.Int] default: return nil, fmt.Errorf("type, %T, does not support int indexing", result) } @@ -123,6 +150,13 @@ func (g exprGetter[K]) Get(ctx context.Context, tCtx K) (any, error) { return result, nil } +func getElementByIndex[T any](r []T, idx *int64) (any, error) { + if int(*idx) >= len(r) || int(*idx) < 0 { + return nil, fmt.Errorf("index %v out of bounds", *idx) + } + return r[*idx], nil +} + type listGetter[K any] struct { slice []Getter[K] } @@ -141,6 +175,31 @@ func (l *listGetter[K]) Get(ctx context.Context, tCtx K) (any, error) { return evaluated, nil } +type mapGetter[K any] struct { + mapValues map[string]Getter[K] +} + +func (m *mapGetter[K]) Get(ctx context.Context, tCtx K) (any, error) { + evaluated := map[string]any{} + for k, v := range m.mapValues { + val, err := v.Get(ctx, tCtx) + if err != nil { + return nil, err + } + switch t := val.(type) { + case pcommon.Map: + evaluated[k] = t.AsRaw() + default: + evaluated[k] = t + } + } + result := pcommon.NewMap() + if err := result.FromRaw(evaluated); err != nil { + return nil, err + } + return result, nil +} + // TypeError represents that a value was not an expected type. type TypeError string @@ -401,22 +460,25 @@ func (g StandardStringLikeGetter[K]) Get(ctx context.Context, tCtx K) (*string, case []byte: result = hex.EncodeToString(v) case pcommon.Map: - result, err = jsoniter.MarshalToString(v.AsRaw()) + resultBytes, err := json.Marshal(v.AsRaw()) if err != nil { return nil, err } + result = string(resultBytes) case pcommon.Slice: - result, err = jsoniter.MarshalToString(v.AsRaw()) + resultBytes, err := json.Marshal(v.AsRaw()) if err != nil { return nil, err } + result = string(resultBytes) case pcommon.Value: result = v.AsString() default: - result, err = jsoniter.MarshalToString(v) + resultBytes, err := json.Marshal(v) if err != nil { return nil, TypeError(fmt.Sprintf("unsupported type: %T", v)) } + result = string(resultBytes) } return &result, nil } @@ -549,6 +611,81 @@ func (g StandardIntLikeGetter[K]) Get(ctx context.Context, tCtx K) (*int64, erro return &result, nil } +// ByteSliceLikeGetter is a Getter that returns []byte by converting the underlying value to an []byte if necessary +type ByteSliceLikeGetter[K any] interface { + // Get retrieves []byte value. + // The expectation is that the underlying value is converted to []byte if possible. + // If the value cannot be converted to []byte, nil and an error are returned. + // If the value is nil, nil is returned without an error. + Get(ctx context.Context, tCtx K) ([]byte, error) +} + +type StandardByteSliceLikeGetter[K any] struct { + Getter func(ctx context.Context, tCtx K) (any, error) +} + +func (g StandardByteSliceLikeGetter[K]) Get(ctx context.Context, tCtx K) ([]byte, error) { + val, err := g.Getter(ctx, tCtx) + if err != nil { + return nil, fmt.Errorf("error getting value in %T: %w", g, err) + } + if val == nil { + return nil, nil + } + var result []byte + switch v := val.(type) { + case []byte: + result = v + case string: + result = []byte(v) + case float64, int64, bool: + result, err = valueToBytes(v) + if err != nil { + return nil, fmt.Errorf("error converting value %f of %T: %w", v, g, err) + } + case pcommon.Value: + switch v.Type() { + case pcommon.ValueTypeBytes: + result = v.Bytes().AsRaw() + case pcommon.ValueTypeInt: + result, err = valueToBytes(v.Int()) + if err != nil { + return nil, fmt.Errorf("error converting value %d of int64: %w", v.Int(), err) + } + case pcommon.ValueTypeDouble: + result, err = valueToBytes(v.Double()) + if err != nil { + return nil, fmt.Errorf("error converting value %f of float64: %w", v.Double(), err) + } + case pcommon.ValueTypeStr: + result = []byte(v.Str()) + case pcommon.ValueTypeBool: + result, err = valueToBytes(v.Bool()) + if err != nil { + return nil, fmt.Errorf("error converting value %s of bool: %w", v.Str(), err) + } + default: + return nil, TypeError(fmt.Sprintf("unsupported value type: %v", v.Type())) + } + default: + return nil, TypeError(fmt.Sprintf("unsupported type: %T", v)) + } + return result, nil +} + +// valueToBytes converts a value to a byte slice of length 8. +func valueToBytes(n any) ([]byte, error) { + // Create a buffer to hold the bytes + buf := new(bytes.Buffer) + // Write the value to the buffer using binary.Write + err := binary.Write(buf, binary.BigEndian, n) + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + // BoolLikeGetter is a Getter that returns a bool by converting the underlying value to a bool if necessary. type BoolLikeGetter[K any] interface { // Get retrieves a bool value. @@ -638,7 +775,7 @@ func (p *Parser[K]) newGetter(val value) (Getter[K], error) { return &literal[K]{value: *i}, nil } if eL.Path != nil { - np, err := newPath[K](eL.Path.Fields) + np, err := p.newPath(eL.Path) if err != nil { return nil, err } @@ -661,6 +798,18 @@ func (p *Parser[K]) newGetter(val value) (Getter[K], error) { return &lg, nil } + if val.Map != nil { + mg := mapGetter[K]{mapValues: map[string]Getter[K]{}} + for _, kvp := range val.Map.Values { + getter, err := p.newGetter(*kvp.Value) + if err != nil { + return nil, err + } + mg.mapValues[*kvp.Key] = getter + } + return &mg, nil + } + if val.MathExpression == nil { // In practice, can't happen since the DSL grammar guarantees one is set return nil, fmt.Errorf("no value field set. This is a bug in the OpenTelemetry Transformation Language") diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/functions.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/functions.go index 86782be3c41..5740328fa1c 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/functions.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/functions.go @@ -22,39 +22,75 @@ type Enum int64 type EnumSymbol string -func buildOriginalText(fields []field) string { +func buildOriginalText(path *path) string { var builder strings.Builder - for i, f := range fields { + if path.Context != "" { + builder.WriteString(path.Context) + if len(path.Fields) > 0 { + builder.WriteString(".") + } + } + for i, f := range path.Fields { builder.WriteString(f.Name) if len(f.Keys) > 0 { - for _, k := range f.Keys { - builder.WriteString("[") - if k.Int != nil { - builder.WriteString(strconv.FormatInt(*k.Int, 10)) + builder.WriteString(buildOriginalKeysText(f.Keys)) + } + if i != len(path.Fields)-1 { + builder.WriteString(".") + } + } + return builder.String() +} + +func buildOriginalKeysText(keys []key) string { + var builder strings.Builder + if len(keys) > 0 { + for _, k := range keys { + builder.WriteString("[") + if k.Int != nil { + builder.WriteString(strconv.FormatInt(*k.Int, 10)) + } + if k.String != nil { + builder.WriteString(*k.String) + } + if k.Expression != nil { + if k.Expression.Path != nil { + builder.WriteString(buildOriginalText(k.Expression.Path)) } - if k.String != nil { - builder.WriteString(*k.String) + if k.Expression.Float != nil { + builder.WriteString(strconv.FormatFloat(*k.Expression.Float, 'f', 10, 64)) + } + if k.Expression.Int != nil { + builder.WriteString(strconv.FormatInt(*k.Expression.Int, 10)) } - builder.WriteString("]") } - } - if i != len(fields)-1 { - builder.WriteString(".") + builder.WriteString("]") } } return builder.String() } -func newPath[K any](fields []field) (*basePath[K], error) { - if len(fields) == 0 { +func (p *Parser[K]) newPath(path *path) (*basePath[K], error) { + if len(path.Fields) == 0 { return nil, fmt.Errorf("cannot make a path from zero fields") } - originalText := buildOriginalText(fields) + + pathContext, fields, err := p.parsePathContext(path) + if err != nil { + return nil, err + } + + originalText := buildOriginalText(path) var current *basePath[K] for i := len(fields) - 1; i >= 0; i-- { + keys, err := p.newKeys(fields[i].Keys) + if err != nil { + return nil, err + } current = &basePath[K]{ + context: pathContext, name: fields[i].Name, - keys: newKeys[K](fields[i].Keys), + keys: keys, nextPath: current, originalText: originalText, } @@ -64,10 +100,56 @@ func newPath[K any](fields []field) (*basePath[K], error) { return current, nil } +func (p *Parser[K]) parsePathContext(path *path) (string, []field, error) { + hasPathContextNames := len(p.pathContextNames) > 0 + if path.Context != "" { + // no pathContextNames means the Parser isn't handling the grammar path's context yet, + // so it falls back to the previous behavior with the path.Context value as the first + // path's segment. + if !hasPathContextNames { + return "", append([]field{{Name: path.Context}}, path.Fields...), nil + } + + if _, ok := p.pathContextNames[path.Context]; !ok { + return "", path.Fields, fmt.Errorf(`context "%s" from path "%s" is not valid, it must be replaced by one of: %s`, path.Context, buildOriginalText(path), p.buildPathContextNamesText("")) + } + + return path.Context, path.Fields, nil + } + + if hasPathContextNames { + originalText := buildOriginalText(path) + return "", nil, fmt.Errorf(`missing context name for path "%s", possibly valid options are: %s`, originalText, p.buildPathContextNamesText(originalText)) + } + + return "", path.Fields, nil +} + +func (p *Parser[K]) buildPathContextNamesText(path string) string { + var builder strings.Builder + var suffix string + if path != "" { + suffix = "." + path + } + + i := 0 + for ctx := range p.pathContextNames { + builder.WriteString(fmt.Sprintf(`"%s%s"`, ctx, suffix)) + if i != len(p.pathContextNames)-1 { + builder.WriteString(", ") + } + i++ + } + return builder.String() +} + // Path represents a chain of path parts in an OTTL statement, such as `body.string`. // A Path has a name, and potentially a set of keys. // If the path in the OTTL statement contains multiple parts (separated by a dot (`.`)), then the Path will have a pointer to the next Path. type Path[K any] interface { + // Context is the OTTL context name of this Path. + Context() string + // Name is the name of this segment of the path. Name() string @@ -86,6 +168,7 @@ type Path[K any] interface { var _ Path[any] = &basePath[any]{} type basePath[K any] struct { + context string name string keys []Key[K] nextPath *basePath[K] @@ -94,6 +177,10 @@ type basePath[K any] struct { originalText string } +func (p *basePath[K]) Context() string { + return p.context +} + func (p *basePath[K]) Name() string { return p.name } @@ -131,18 +218,36 @@ func (p *basePath[K]) isComplete() error { return p.nextPath.isComplete() } -func newKeys[K any](keys []key) []Key[K] { +func (p *Parser[K]) newKeys(keys []key) ([]Key[K], error) { if len(keys) == 0 { - return nil + return nil, nil } ks := make([]Key[K], len(keys)) for i := range keys { + var getter Getter[K] + if keys[i].Expression != nil { + if keys[i].Expression.Path != nil { + g, err := p.buildGetSetterFromPath(keys[i].Expression.Path) + if err != nil { + return nil, err + } + getter = g + } + if keys[i].Expression.Converter != nil { + g, err := p.newGetterFromConverter(*keys[i].Expression.Converter) + if err != nil { + return nil, err + } + getter = g + } + } ks[i] = &baseKey[K]{ s: keys[i].String, i: keys[i].Int, + g: getter, } } - return ks + return ks, nil } // Key represents a chain of keys in an OTTL statement, such as `attributes["foo"]["bar"]`. @@ -158,6 +263,12 @@ type Key[K any] interface { // If the Key does not have a int value the returned value is nil. // If Key experiences an error retrieving the value it is returned. Int(context.Context, K) (*int64, error) + + // ExpressionGetter returns a Getter to the expression, that can be + // part of the path. + // If the Key does not have an expression the returned value is nil. + // If Key experiences an error retrieving the value it is returned. + ExpressionGetter(context.Context, K) (Getter[K], error) } var _ Key[any] = &baseKey[any]{} @@ -165,6 +276,7 @@ var _ Key[any] = &baseKey[any]{} type baseKey[K any] struct { s *string i *int64 + g Getter[K] } func (k *baseKey[K]) String(_ context.Context, _ K) (*string, error) { @@ -175,6 +287,10 @@ func (k *baseKey[K]) Int(_ context.Context, _ K) (*int64, error) { return k.i, nil } +func (k *baseKey[K]) ExpressionGetter(_ context.Context, _ K) (Getter[K], error) { + return k.g, nil +} + func (p *Parser[K]) parsePath(ip *basePath[K]) (GetSetter[K], error) { g, err := p.pathParser(ip) if err != nil { @@ -283,8 +399,8 @@ func (p *Parser[K]) buildArgs(ed editor, argsVal reflect.Value) error { switch { case arg.Value.Enum != nil: name = string(*arg.Value.Enum) - case arg.Value.FunctionName != nil: - name = *arg.Value.FunctionName + case arg.FunctionName != nil: + name = *arg.FunctionName default: return fmt.Errorf("invalid function name given") } @@ -402,6 +518,18 @@ func (p *Parser[K]) buildSliceArg(argVal value, argType reflect.Type) (any, erro } } +func (p *Parser[K]) buildGetSetterFromPath(path *path) (GetSetter[K], error) { + np, err := p.newPath(path) + if err != nil { + return nil, err + } + arg, err := p.parsePath(np) + if err != nil { + return nil, err + } + return arg, nil +} + // Handle interfaces that can be passed as arguments to OTTL functions. func (p *Parser[K]) buildArg(argVal value, argType reflect.Type) (any, error) { name := argType.Name() @@ -409,18 +537,10 @@ func (p *Parser[K]) buildArg(argVal value, argType reflect.Type) (any, error) { case strings.HasPrefix(name, "Setter"): fallthrough case strings.HasPrefix(name, "GetSetter"): - if argVal.Literal == nil || argVal.Literal.Path == nil { - return nil, fmt.Errorf("must be a path") - } - np, err := newPath[K](argVal.Literal.Path.Fields) - if err != nil { - return nil, err + if argVal.Literal != nil && argVal.Literal.Path != nil { + return p.buildGetSetterFromPath(argVal.Literal.Path) } - arg, err := p.parsePath(np) - if err != nil { - return nil, err - } - return arg, nil + return nil, fmt.Errorf("must be a path") case strings.HasPrefix(name, "Getter"): arg, err := p.newGetter(argVal) if err != nil { @@ -493,6 +613,12 @@ func (p *Parser[K]) buildArg(argVal value, argType reflect.Type) (any, error) { return nil, err } return StandardBoolLikeGetter[K]{Getter: arg.Get}, nil + case strings.HasPrefix(name, "ByteSliceLikeGetter"): + arg, err := p.newGetter(argVal) + if err != nil { + return nil, err + } + return StandardByteSliceLikeGetter[K]{Getter: arg.Get}, nil case name == "Enum": arg, err := p.enumParser((*EnumSymbol)(argVal.Enum)) if err != nil { diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/grammar.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/grammar.go index ebd8e58ec7c..c9d8fa268dc 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/grammar.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/grammar.go @@ -6,6 +6,7 @@ package ottl // import "github.com/open-telemetry/opentelemetry-collector-contri import ( "encoding/hex" "fmt" + "strings" "github.com/alecthomas/participle/v2/lexer" ) @@ -19,17 +20,17 @@ type parsedStatement struct { } func (p *parsedStatement) checkForCustomError() error { + validator := &grammarCustomErrorsVisitor{} if p.Converter != nil { - return fmt.Errorf("editor names must start with a lowercase letter but got '%v'", p.Converter.Function) - } - err := p.Editor.checkForCustomError() - if err != nil { - return err + validator.add(fmt.Errorf("editor names must start with a lowercase letter but got '%v'", p.Converter.Function)) } + + p.Editor.accept(validator) if p.WhereClause != nil { - return p.WhereClause.checkForCustomError() + p.WhereClause.accept(validator) } - return nil + + return validator.join() } type constExpr struct { @@ -47,14 +48,16 @@ type booleanValue struct { SubExpr *booleanExpression `parser:"| '(' @@ ')' )"` } -func (b *booleanValue) checkForCustomError() error { +func (b *booleanValue) accept(v grammarVisitor) { if b.Comparison != nil { - return b.Comparison.checkForCustomError() + b.Comparison.accept(v) + } + if b.ConstExpr != nil && b.ConstExpr.Converter != nil { + b.ConstExpr.Converter.accept(v) } if b.SubExpr != nil { - return b.SubExpr.checkForCustomError() + b.SubExpr.accept(v) } - return nil } // opAndBooleanValue represents the right side of an AND boolean expression. @@ -63,8 +66,10 @@ type opAndBooleanValue struct { Value *booleanValue `parser:"@@"` } -func (b *opAndBooleanValue) checkForCustomError() error { - return b.Value.checkForCustomError() +func (b *opAndBooleanValue) accept(v grammarVisitor) { + if b.Value != nil { + b.Value.accept(v) + } } // term represents an arbitrary number of boolean values joined by AND. @@ -73,18 +78,15 @@ type term struct { Right []*opAndBooleanValue `parser:"@@*"` } -func (b *term) checkForCustomError() error { - err := b.Left.checkForCustomError() - if err != nil { - return err +func (b *term) accept(v grammarVisitor) { + if b.Left != nil { + b.Left.accept(v) } for _, r := range b.Right { - err = r.checkForCustomError() - if err != nil { - return err + if r != nil { + r.accept(v) } } - return nil } // opOrTerm represents the right side of an OR boolean expression. @@ -93,8 +95,10 @@ type opOrTerm struct { Term *term `parser:"@@"` } -func (b *opOrTerm) checkForCustomError() error { - return b.Term.checkForCustomError() +func (b *opOrTerm) accept(v grammarVisitor) { + if b.Term != nil { + b.Term.accept(v) + } } // booleanExpression represents a true/false decision expressed @@ -105,17 +109,20 @@ type booleanExpression struct { } func (b *booleanExpression) checkForCustomError() error { - err := b.Left.checkForCustomError() - if err != nil { - return err + validator := &grammarCustomErrorsVisitor{} + b.accept(validator) + return validator.join() +} + +func (b *booleanExpression) accept(v grammarVisitor) { + if b.Left != nil { + b.Left.accept(v) } for _, r := range b.Right { - err = r.checkForCustomError() - if err != nil { - return err + if r != nil { + r.accept(v) } } - return nil } // compareOp is the type of a comparison operator. @@ -178,13 +185,9 @@ type comparison struct { Right value `parser:"@@"` } -func (c *comparison) checkForCustomError() error { - err := c.Left.checkForCustomError() - if err != nil { - return err - } - err = c.Right.checkForCustomError() - return err +func (c *comparison) accept(v grammarVisitor) { + c.Left.accept(v) + c.Right.accept(v) } // editor represents the function call of a statement. @@ -195,19 +198,11 @@ type editor struct { Keys []key `parser:"( @@ )*"` } -func (i *editor) checkForCustomError() error { - var err error - +func (i *editor) accept(v grammarVisitor) { + v.visitEditor(i) for _, arg := range i.Arguments { - err = arg.checkForCustomError() - if err != nil { - return err - } + arg.accept(v) } - if i.Keys != nil { - return fmt.Errorf("only paths and converters may be indexed, not editors, but got %v %v", i.Function, i.Keys) - } - return nil } // converter represents a converter function call. @@ -217,13 +212,23 @@ type converter struct { Keys []key `parser:"( @@ )*"` } +func (c *converter) accept(v grammarVisitor) { + v.visitConverter(c) + if c.Arguments != nil { + for _, a := range c.Arguments { + a.accept(v) + } + } +} + type argument struct { - Name string `parser:"(@(Lowercase(Uppercase | Lowercase)*) Equal)?"` - Value value `parser:"@@"` + Name string `parser:"(@(Lowercase(Uppercase | Lowercase)*) Equal)?"` + Value value `parser:"( @@"` + FunctionName *string `parser:"| @(Uppercase(Uppercase | Lowercase)*) )"` } -func (a *argument) checkForCustomError() error { - return a.Value.checkForCustomError() +func (a *argument) accept(v grammarVisitor) { + a.Value.accept(v) } // value represents a part of a parsed statement which is resolved to a value of some sort. This can be a telemetry path @@ -236,23 +241,46 @@ type value struct { String *string `parser:"| @String"` Bool *boolean `parser:"| @Boolean"` Enum *enumSymbol `parser:"| @Uppercase (?! Lowercase)"` - FunctionName *string `parser:"| @(Uppercase(Uppercase | Lowercase)*)"` + Map *mapValue `parser:"| @@"` List *list `parser:"| @@)"` } func (v *value) checkForCustomError() error { + validator := &grammarCustomErrorsVisitor{} + v.accept(validator) + return validator.join() +} + +func (v *value) accept(vis grammarVisitor) { + vis.visitValue(v) if v.Literal != nil { - return v.Literal.checkForCustomError() + v.Literal.accept(vis) } if v.MathExpression != nil { - return v.MathExpression.checkForCustomError() + v.MathExpression.accept(vis) + } + if v.Map != nil { + v.Map.accept(vis) + } + if v.List != nil { + for _, i := range v.List.Values { + i.accept(vis) + } } - return nil } // path represents a telemetry path mathExpression. type path struct { - Fields []field `parser:"@@ ( '.' @@ )*"` + Pos lexer.Position + Context string `parser:"(@Lowercase '.')?"` + Fields []field `parser:"@@ ( '.' @@ )*"` +} + +func (p *path) accept(v grammarVisitor) { + v.visitPath(p) + for _, field := range p.Fields { + field.accept(v) + } } // field is an item within a path. @@ -261,15 +289,45 @@ type field struct { Keys []key `parser:"( @@ )*"` } +func (f *field) accept(v grammarVisitor) { + for _, key := range f.Keys { + key.accept(v) + } +} + type key struct { - String *string `parser:"'[' (@String "` - Int *int64 `parser:"| @Int) ']'"` + String *string `parser:"'[' (@String "` + Int *int64 `parser:"| @Int"` + Expression *mathExprLiteral `parser:"| @@ ) ']'"` +} + +func (k *key) accept(v grammarVisitor) { + if k.Expression != nil { + k.Expression.accept(v) + } } type list struct { Values []value `parser:"'[' (@@)* (',' @@)* ']'"` } +type mapValue struct { + Values []mapItem `parser:"'{' (@@ ','?)* '}'"` +} + +func (m *mapValue) accept(v grammarVisitor) { + for _, i := range m.Values { + if i.Value != nil { + i.Value.accept(v) + } + } +} + +type mapItem struct { + Key *string `parser:"@String ':'"` + Value *value `parser:"@@"` +} + // byteSlice type for capturing byte slices type byteSlice []byte @@ -308,11 +366,17 @@ type mathExprLiteral struct { Path *path `parser:"| @@ )"` } -func (m *mathExprLiteral) checkForCustomError() error { +func (m *mathExprLiteral) accept(v grammarVisitor) { + v.visitMathExprLiteral(m) + if m.Path != nil { + m.Path.accept(v) + } if m.Editor != nil { - return fmt.Errorf("converter names must start with an uppercase letter but got '%v'", m.Editor.Function) + m.Editor.accept(v) + } + if m.Converter != nil { + m.Converter.accept(v) } - return nil } type mathValue struct { @@ -320,11 +384,13 @@ type mathValue struct { SubExpression *mathExpression `parser:"| '(' @@ ')' )"` } -func (m *mathValue) checkForCustomError() error { +func (m *mathValue) accept(v grammarVisitor) { if m.Literal != nil { - return m.Literal.checkForCustomError() + m.Literal.accept(v) + } + if m.SubExpression != nil { + m.SubExpression.accept(v) } - return m.SubExpression.checkForCustomError() } type opMultDivValue struct { @@ -332,8 +398,10 @@ type opMultDivValue struct { Value *mathValue `parser:"@@"` } -func (m *opMultDivValue) checkForCustomError() error { - return m.Value.checkForCustomError() +func (m *opMultDivValue) accept(v grammarVisitor) { + if m.Value != nil { + m.Value.accept(v) + } } type addSubTerm struct { @@ -341,18 +409,15 @@ type addSubTerm struct { Right []*opMultDivValue `parser:"@@*"` } -func (m *addSubTerm) checkForCustomError() error { - err := m.Left.checkForCustomError() - if err != nil { - return err +func (m *addSubTerm) accept(v grammarVisitor) { + if m.Left != nil { + m.Left.accept(v) } for _, r := range m.Right { - err = r.checkForCustomError() - if err != nil { - return err + if r != nil { + r.accept(v) } } - return nil } type opAddSubTerm struct { @@ -360,8 +425,10 @@ type opAddSubTerm struct { Term *addSubTerm `parser:"@@"` } -func (m *opAddSubTerm) checkForCustomError() error { - return m.Term.checkForCustomError() +func (r *opAddSubTerm) accept(v grammarVisitor) { + if r.Term != nil { + r.Term.accept(v) + } } type mathExpression struct { @@ -369,18 +436,17 @@ type mathExpression struct { Right []*opAddSubTerm `parser:"@@*"` } -func (m *mathExpression) checkForCustomError() error { - err := m.Left.checkForCustomError() - if err != nil { - return err +func (m *mathExpression) accept(v grammarVisitor) { + if m.Left != nil { + m.Left.accept(v) } - for _, r := range m.Right { - err = r.checkForCustomError() - if err != nil { - return err + if m.Right != nil { + for _, r := range m.Right { + if r != nil { + r.accept(v) + } } } - return nil } type mathOp int @@ -444,9 +510,83 @@ func buildLexer() *lexer.StatefulDefinition { {Name: `Equal`, Pattern: `=`}, {Name: `LParen`, Pattern: `\(`}, {Name: `RParen`, Pattern: `\)`}, + {Name: `LBrace`, Pattern: `\{`}, + {Name: `RBrace`, Pattern: `\}`}, + {Name: `Colon`, Pattern: `\:`}, {Name: `Punct`, Pattern: `[,.\[\]]`}, {Name: `Uppercase`, Pattern: `[A-Z][A-Z0-9_]*`}, {Name: `Lowercase`, Pattern: `[a-z][a-z0-9_]*`}, {Name: "whitespace", Pattern: `\s+`}, }) } + +// grammarCustomError represents a grammar error in which the statement has a valid syntax +// according to the grammar's definition, but is still logically invalid. +type grammarCustomError struct { + errs []error +} + +// Error returns all errors messages separate by semicolons. +func (e *grammarCustomError) Error() string { + switch len(e.errs) { + case 0: + return "" + case 1: + return e.errs[0].Error() + default: + var b strings.Builder + b.WriteString(e.errs[0].Error()) + for _, err := range e.errs[1:] { + b.WriteString("; ") + b.WriteString(err.Error()) + } + return b.String() + } +} + +func (e *grammarCustomError) Unwrap() []error { + return e.errs +} + +// grammarVisitor allows accessing the grammar AST nodes using the visitor pattern. +type grammarVisitor interface { + visitPath(v *path) + visitEditor(v *editor) + visitConverter(v *converter) + visitValue(v *value) + visitMathExprLiteral(v *mathExprLiteral) +} + +// grammarCustomErrorsVisitor is used to execute custom validations on the grammar AST. +type grammarCustomErrorsVisitor struct { + errs []error +} + +func (g *grammarCustomErrorsVisitor) add(err error) { + g.errs = append(g.errs, err) +} + +func (g *grammarCustomErrorsVisitor) join() error { + if len(g.errs) == 0 { + return nil + } + return &grammarCustomError{errs: g.errs} +} + +func (g *grammarCustomErrorsVisitor) visitPath(_ *path) {} + +func (g *grammarCustomErrorsVisitor) visitValue(_ *value) {} + +func (g *grammarCustomErrorsVisitor) visitConverter(_ *converter) {} + +func (g *grammarCustomErrorsVisitor) visitEditor(v *editor) { + if v.Keys != nil { + g.add(fmt.Errorf("only paths and converters may be indexed, not editors, but got %s%s", v.Function, buildOriginalKeysText(v.Keys))) + } +} + +func (g *grammarCustomErrorsVisitor) visitMathExprLiteral(v *mathExprLiteral) { + if v.Editor != nil { + g.add(fmt.Errorf("converter names must start with an uppercase letter but got '%v'", v.Editor.Function)) + } +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/metadata.yaml index e7333a2c5fd..b1ad2216dd4 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/metadata.yaml +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/metadata.yaml @@ -5,4 +5,5 @@ status: stability: alpha: [ traces, metrics, logs ] codeowners: - active: [TylerHelmuth, kentquirk, bogdandrutu, evan-bradley] \ No newline at end of file + active: [TylerHelmuth, kentquirk, bogdandrutu, evan-bradley, edmocosta] + seeking_new: true \ No newline at end of file diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/README.md index 94712ca3074..2dbb7792f5e 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/README.md +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/README.md @@ -29,9 +29,9 @@ In these situations the function will error if it does not know how to do the co Use `ErrorMode` to determine how the `Statement` handles these errors. See the component-specific guides for how each uses error mode: -- [filterprocessor](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/filterprocessor#ottl) -- [routingprocessor](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/routingprocessor#tech-preview-opentelemetry-transformation-language-statements-as-routing-conditions) -- [transformprocessor](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/transformprocessor#config) +- [filterprocessor](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/filterprocessor/README.md#configuration) +- [routingconnector](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/connector/routingconnector/README.md#configuration) +- [transformprocessor](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/transformprocessor/README.md#config) ## Editors @@ -39,14 +39,16 @@ Editors are what OTTL uses to transform telemetry. Editors: -- Are allowed to transform telemetry. When a Function is invoked the expectation is that the underlying telemetry is modified in some way. -- May have side effects. Some Functions may generate telemetry and add it to the telemetry payload to be processed in this batch. -- May return values. Although not common and not required, Functions may return values. +- Are allowed to transform telemetry. When an Editor is invoked the expectation is that the underlying telemetry is modified in some way. +- May have side effects. Some Editors may generate telemetry and add it to the telemetry payload to be processed in this batch. +- May return values. Although not common and not required, Editors may return values. Available Editors: +- [append](#append) - [delete_key](#delete_key) - [delete_matching_keys](#delete_matching_keys) +- [keep_matching_keys](#keep_matching_keys) - [flatten](#flatten) - [keep_keys](#keep_keys) - [limit](#limit) @@ -58,6 +60,19 @@ Available Editors: - [set](#set) - [truncate_all](#truncate_all) +### append + +`append(target, Optional[value], Optional[values])` + +The `append` function appends single or multiple string values to `target`. +`append` converts scalar values into an array if the field exists but is not an array, and creates an array containing the provided values if the field doesn’t exist. + +Resulting field is always of type `pcommon.Slice` and will not convert the types of existing or new items in the slice. This means that it is possible to create a slice whose elements have different types. Be careful when using `append` to set attribute values, as this will produce values that are not possible to create through OpenTelemetry APIs [according to](https://opentelemetry.io/docs/specs/otel/common/#attribute) the OpenTelemetry specification. + +- `append(attributes["tags"], "prod")` +- `append(attributes["tags"], values = ["staging", "staging:east"])` +- `append(attributes["tags_copy"], attributes["tags"])` + ### delete_key `delete_key(target, key)` @@ -92,6 +107,23 @@ Examples: - `delete_matching_keys(resource.attributes, "(?i).*password.*")` +### keep_matching_keys + +`keep_matching_keys(target, pattern)` + +The `keep_matching_keys` function keeps all keys from a `pcommon.Map` that match a regex pattern. + +`target` is a path expression to a `pcommon.Map` type field. `pattern` is a regex string. + +All keys that match the pattern will remain in the map, while non matching keys will be removed. + +Examples: + + +- `keep_matching_keys(attributes, "(?i).*version.*")` + +- `keep_matching_keys(resource.attributes, "(?i).*version.*")` + ### flatten `flatten(target, Optional[prefix], Optional[depth])` @@ -378,48 +410,77 @@ Unlike functions, they do not modify any input telemetry and always return a val Available Converters: - [Base64Decode](#base64decode) +- [Decode](#decode) - [Concat](#concat) - [ConvertCase](#convertcase) +- [ConvertAttributesToElementsXML](#convertattributestoelementsxml) +- [ConvertTextToElementsXML](#converttexttoelementsxml) +- [Day](#day) +- [Double](#double) +- [Duration](#duration) - [ExtractPatterns](#extractpatterns) +- [ExtractGrokPatterns](#extractgrokpatterns) - [FNV](#fnv) +- [Format](#format) +- [FormatTime](#formattime) +- [GetXML](#getxml) +- [Hex](#hex) - [Hour](#hour) - [Hours](#hours) -- [Double](#double) -- [Duration](#duration) +- [InsertXML](#insertxml) - [Int](#int) - [IsBool](#isbool) - [IsDouble](#isdouble) - [IsInt](#isint) +- [IsRootSpan](#isrootspan) - [IsMap](#ismap) - [IsMatch](#ismatch) +- [IsList](#islist) - [IsString](#isstring) - [Len](#len) - [Log](#log) +- [MD5](#md5) - [Microseconds](#microseconds) - [Milliseconds](#milliseconds) +- [Minute](#minute) - [Minutes](#minutes) +- [Month](#month) +- [Nanosecond](#nanosecond) - [Nanoseconds](#nanoseconds) - [Now](#now) - [ParseCSV](#parsecsv) - [ParseJSON](#parsejson) - [ParseKeyValue](#parsekeyvalue) +- [ParseSimplifiedXML](#parsesimplifiedxml) - [ParseXML](#parsexml) +- [RemoveXML](#removexml) +- [Second](#second) - [Seconds](#seconds) - [SHA1](#sha1) - [SHA256](#sha256) +- [SHA512](#sha512) +- [SliceToMap](#slicetomap) +- [Sort](#sort) - [SpanID](#spanid) - [Split](#split) +- [String](#string) - [Substring](#substring) - [Time](#time) +- [ToKeyValueString](#tokeyvaluestring) - [TraceID](#traceid) - [TruncateTime](#truncatetime) +- [Unix](#unix) - [UnixMicro](#unixmicro) - [UnixMilli](#unixmilli) - [UnixNano](#unixnano) - [UnixSeconds](#unixseconds) +- [UserAgent](#useragent) - [UUID](#UUID) +- [Year](#year) + +### Base64Decode (Deprecated) -### Base64Decode +*This function has been deprecated. Please use the [Decode](#decode) function instead.* `Base64Decode(value)` @@ -434,13 +495,29 @@ Examples: - `Base64Decode(attributes["encoded field"])` +### Decode + +`Decode(value, encoding)` + +The `Decode` Converter takes a string or byte array encoded with the specified encoding and returns the decoded string. + +`value` is a valid encoded string or byte array. +`encoding` is a valid encoding name included in the [IANA encoding index](https://www.iana.org/assignments/character-sets/character-sets.xhtml). + +Examples: + +- `Decode("aGVsbG8gd29ybGQ=", "base64")` + + +- `Decode(attributes["encoded field"], "us-ascii")` + ### Concat `Concat(values[], delimiter)` -The `Concat` Converter takes a delimiter and a sequence of values and concatenates their string representation. Unsupported values, such as lists or maps that may substantially increase payload size, are not added to the resulting string. +The `Concat` Converter takes a sequence of values and a delimiter and concatenates their string representation. Unsupported values, such as lists or maps that may substantially increase payload size, are not added to the resulting string. -`values` is a list of values passed as arguments. It supports paths, primitive values, and byte slices (such as trace IDs or span IDs). +`values` is a list of values. It supports paths, primitive values, and byte slices (such as trace IDs or span IDs). `delimiter` is a string value that is placed between strings during concatenation. If no delimiter is desired, then simply pass an empty string. @@ -477,6 +554,75 @@ Examples: - `ConvertCase(metric.name, "snake")` +### ConvertAttributesToElementsXML + +`ConvertAttributesToElementsXML(target, Optional[xpath])` + +The `ConvertAttributesToElementsXML` Converter returns an edited version of an XML string where attributes are converted into child elements. + +`target` is a Getter that returns a string. This string should be in XML format. +If `target` is not a string, nil, or cannot be parsed as XML, `ConvertAttributesToElementsXML` will return an error. + +`xpath` (optional) is a string that specifies an [XPath](https://www.w3.org/TR/1999/REC-xpath-19991116/) expression that +selects one or more elements. Attributes will only be converted within the result(s) of the xpath. + +For example, `baz` will be converted to `bazbar`. + +Examples: + +Convert all attributes in a document + +- `ConvertAttributesToElementsXML(body)` + +Convert only attributes within "Record" elements + +- `ConvertAttributesToElementsXML(body, "/Log/Record")` + +### ConvertTextToElementsXML + +`ConvertTextToElementsXML(target, Optional[xpath], Optional[elementName])` + +The `ConvertTextToElementsXML` Converter returns an edited version of an XML string where all text belongs to a dedicated element. + +`target` is a Getter that returns a string. This string should be in XML format. +If `target` is not a string, nil, or cannot be parsed as XML, `ConvertTextToElementsXML` will return an error. + +`xpath` (optional) is a string that specifies an [XPath](https://www.w3.org/TR/1999/REC-xpath-19991116/) expression that +selects one or more elements. Content will only be converted within the result(s) of the xpath. The default is `/`. + +`elementName` (optional) is a string that is used for any element tags that are created to wrap content. +The default is `"value"`. + +For example, `foobar` will be converted to `foobar`. + +Examples: + +Ensure all text content in a document is wrapped in a dedicated element + +- `ConvertTextToElementsXML(body)` + +Use a custom name for any new elements + +- `ConvertTextToElementsXML(body, elementName = "custom")` + +Convert only part of the document + +- `ConvertTextToElementsXML(body, "/some/part/", "value")` + +### Day + +`Day(value)` + +The `Day` Converter returns the day component from the specified time using the Go stdlib [`time.Day` function](https://pkg.go.dev/time#Time.Day). + +`value` is a `time.Time`. If `value` is another type, an error is returned. + +The returned type is `int64`. + +Examples: + +- `Day(Now())` + ### Double The `Double` Converter converts an inputted `value` into a double. @@ -532,6 +678,97 @@ Examples: - `ExtractPatterns(body, "^(?P\\w+ \\w+ [0-9]+:[0-9]+:[0-9]+) (?P([A-Za-z0-9-_]+)) (?P\\w+)(\\[(?P\\d+)\\])?: (?P.*)$")` +### ExtractGrokPatterns + +`ExtractGrokPatterns(target, pattern, Optional[namedCapturesOnly], Optional[patternDefinitions])` + +The `ExtractGrokPatterns` Converter parses unstructured data into a format that is structured and queryable. +It returns a `pcommon.Map` struct that is a result of extracting named capture groups from the target string. If no matches are found then an empty `pcommon.Map` is returned. + +- `target` is a Getter that returns a string. +- `pattern` is a grok pattern string. +- `namedCapturesOnly` (optional) specifies if non-named captures should be returned. +- `patternDefinitions` (optional) is a list of custom pattern definition strings used inside `pattern` in the form of `PATTERN_NAME=PATTERN`. +This parameter lets you define your own custom patterns to improve readability when the extracted `pattern` is not part of the default set or when you need custom naming. + +If `target` is not a string or nil `ExtractGrokPatterns` returns an error. If `pattern` does not contain at least 1 named capture group and `namedCapturesOnly` is set to `true` then `ExtractPatterns` errors on startup. + +Parsing is done using [Elastic Go-Grok](https://github.com/elastic/go-grok?tab=readme-ov-file) library. +Grok is a regular expression dialect that supports reusable aliased expressions. It sits on `re2` regex library so any valid `re2` expressions are valid in grok. +Grok uses this regular expression language to allow naming existing patterns and combining them into more complex patterns that match your fields + +Pattern can be specified in either of these forms: + - `%{SYNTAX}` - e.g {NUMBER} + - `%{SYNTAX:ID}` - e.g {NUMBER:MY_AGE} + - `%{SYNTAX:ID:TYPE}` - e.g {NUMBER:MY_AGE:INT} + +Where `SYNTAX` is a pattern that will match your text, `ID` is identifier you give to the piece of text being matched and `TYPE` data type you want to cast your named field. +Supported types are `int`, `long`, `double`, `float` and boolean + +The [Elastic Go-Grok](https://github.com/elastic/go-grok) ships with numerous predefined grok patterns that simplify working with grok. +In collector Complete set is included consisting of a default set and all additional sets adding product/tool specific capabilities (like [aws](https://github.com/elastic/go-grok/blob/main/patterns/aws.go) or [java](https://github.com/elastic/go-grok/blob/main/patterns/java.go) patterns). + + +Default set consists of: + +| Name | Example | +|-----|-----| +| WORD | "hello", "world123", "test_data" | +| NOTSPACE | "example", "text-with-dashes", "12345" | +| SPACE | " ", "\t", " " | +| INT | "123", "-456", "+789" | +| NUMBER | "123", "456.789", "-0.123" | +| BOOL |"true", "false", "true" | +| BASE10NUM | "123", "-123.456", "0.789" | +| BASE16NUM | "1a2b", "0x1A2B", "-0x1a2b3c" | +| BASE16FLOAT | "0x1.a2b3", "-0x1A2B3C.D" | +| POSINT | "123", "456", "789" | +| NONNEGINT | "0", "123", "456" | +| GREEDYDATA |"anything goes", "literally anything", "123 #@!" | +| QUOTEDSTRING | "\"This is a quote\"", "'single quoted'" | +| UUID |"123e4567-e89b-12d3-a456-426614174000" | +| URN | "urn:isbn:0451450523", "urn:ietf:rfc:2648" | + +and many more. Complete list can be found [here](https://github.com/elastic/go-grok/blob/main/patterns/default.go). + +Examples: + +- _Uses regex pattern with named captures to extract_: + + `ExtractGrokPatterns(attributes["k8s.change_cause"], "GIT_SHA=(?P\w+)")` + +- _Uses regex pattern with named captures to extract_: + + `ExtractGrokPatterns(body, "^(?P\\w+ \\w+ [0-9]+:[0-9]+:[0-9]+) (?P([A-Za-z0-9-_]+)) (?P\\w+)(\\[(?P\\d+)\\])?: (?P.*)$")` + +- _Uses `URI` from default set to extract URI and includes only named captures_: + + `ExtractGrokPatterns(body, "%{URI}", true)` + +- _Uses more complex pattern consisting of elements from default set and includes only named captures_: + + `ExtractGrokPatterns(body, "%{DATESTAMP:timestamp} %{TZ:event.timezone} %{DATA:user.name} %{GREEDYDATA:postgresql.log.connection_id} %{POSINT:process.pid:int}", true)` + +- _Uses `LOGLINE` pattern defined in `patternDefinitions` passed as last argument_: + + `ExtractGrokPatterns(body, "%{LOGLINE}", true, ["LOGLINE=%{DATESTAMP:timestamp} %{TZ:event.timezone} %{DATA:user.name} %{GREEDYDATA:postgresql.log.connection_id} %{POSINT:process.pid:int}"])` + +- Add custom patterns to parse the password from `/etc/passwd` and making `pattern` readable: + + - `pattern`: `%{USERNAME:user.name}:%{PASSWORD:user.password}:%{USERINFO}` + - `patternDefinitions`: + - `PASSWORD=%{WORD}` + - `USERINFO=%{GREEDYDATA}` + + Note that `USERNAME` is in the default pattern set and does not need to be redefined. + + - Target: `smith:pass123:1001:1000:J Smith,1234,(234)567-8910,(234)567-1098,email:/home/smith:/bin/sh` + + - Return values: + - `user.name`: smith + - `user.password`: pass123 + + ### FNV `FNV(value)` @@ -551,6 +788,151 @@ Examples: - `FNV("name")` +### Format + +```Format(formatString, []formatArguments)``` + +The `Format` Converter takes the given format string and formats it using `fmt.Sprintf` and the given arguments. + +`formatString` is a string. `formatArguments` is an array of values. + +If the `formatString` is not a string or does not exist, the `Format` Converter will return an error. +If any of the `formatArgs` are incorrect (e.g. missing, or an incorrect type for the corresponding format specifier), then a string will still be returned, but with Go's default error handling for `fmt.Sprintf`. + +Format specifiers that can be used in `formatString` are documented in Go's [fmt package documentation](https://pkg.go.dev/fmt#hdr-Printing) + +Examples: + +- `Format("%02d", [attributes["priority"]])` +- `Format("%04d-%02d-%02d", [Year(Now()), Month(Now()), Day(Now())])` +- `Format("%s/%s/%04d-%02d-%02d.log", [attributes["hostname"], body["program"], Year(Now()), Month(Now()), Day(Now())])` + +### FormatTime + +`FormatTime(time, format)` + +The `FormatTime` Converter takes a `time.Time` and converts it to a human-readable string representation of the time according to the specified format. + +`time` is `time.Time`. If `time` is another type an error is returned. `format` is a string. + +If either `time` or `format` are nil, an error is returned. The parser used is the parser at [internal/coreinternal/parser](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/internal/coreinternal/timeutils). If `format` does not follow the parsing rules used by this parser, an error is returned. + +`format` denotes a human-readable textual representation of the resulting time value formatted according to ctime-like format string. It follows [standard Go Layout formatting](https://pkg.go.dev/time#pkg-constants) with few additional substitutes: +| substitution | description | examples | +|-----|-----|-----| +|`%Y` | Year as a zero-padded number | 0001, 0002, ..., 2019, 2020, ..., 9999 | +|`%y` | Year, last two digits as a zero-padded number | 01, ..., 99 | +|`%m` | Month as a zero-padded number | 01, 02, ..., 12 | +|`%o` | Month as a space-padded number | 1, 2, ..., 12 | +|`%q` | Month as an unpadded number | 1,2,...,12 | +|`%b`, `%h` | Abbreviated month name | Jan, Feb, ... | +|`%B` | Full month name | January, February, ... | +|`%d` | Day of the month as a zero-padded number | 01, 02, ..., 31 | +|`%e` | Day of the month as a space-padded number| 1, 2, ..., 31 | +|`%g` | Day of the month as a unpadded number | 1,2,...,31 | +|`%a` | Abbreviated weekday name | Sun, Mon, ... | +|`%A` | Full weekday name | Sunday, Monday, ... | +|`%H` | Hour (24-hour clock) as a zero-padded number | 00, ..., 24 | +|`%I` | Hour (12-hour clock) as a zero-padded number | 00, ..., 12 | +|`%l` | Hour 12-hour clock | 0, ..., 24 | +|`%p` | Locale’s equivalent of either AM or PM | AM, PM | +|`%P` | Locale’s equivalent of either am or pm | am, pm | +|`%M` | Minute as a zero-padded number | 00, 01, ..., 59 | +|`%S` | Second as a zero-padded number | 00, 01, ..., 59 | +|`%L` | Millisecond as a zero-padded number | 000, 001, ..., 999 | +|`%f` | Microsecond as a zero-padded number | 000000, ..., 999999 | +|`%s` | Nanosecond as a zero-padded number | 00000000, ..., 99999999 | +|`%z` | UTC offset in the form ±HHMM[SS[.ffffff]] or empty | +0000, -0400 | +|`%Z` | Timezone name or abbreviation or empty | UTC, EST, CST | +|`%i` | Timezone as +/-HH | -07 | +|`%j` | Timezone as +/-HH:MM | -07:00 | +|`%k` | Timezone as +/-HH:MM:SS | -07:00:00 | +|`%w` | Timezone as +/-HHMMSS | -070000 | +|`%D`, `%x` | Short MM/DD/YYYY date, equivalent to %m/%d/%y | 01/21/2031 | +|`%F` | Short YYYY-MM-DD date, equivalent to %Y-%m-%d | 2031-01-21 | +|`%T`,`%X` | ISO 8601 time format (HH:MM:SS), equivalent to %H:%M:%S | 02:55:02 | +|`%r` | 12-hour clock time | 02:55:02 pm | +|`%R` | 24-hour HH:MM time, equivalent to %H:%M | 13:55 | +|`%n` | New-line character ('\n') | | +|`%t` | Horizontal-tab character ('\t') | | +|`%%` | A % sign | | +|`%c` | Date and time representation | Mon Jan 02 15:04:05 2006 | + +Examples: + +- `FormatTime(Time("02/04/2023", "%m/%d/%Y"), "%A %h %e %Y")` +- `FormatTime(UnixNano(attributes["time_nanoseconds"]), "%b %d %Y %H:%M:%S")` +- `FormatTime(TruncateTime(time, Duration("10h 20m"))), "%Y-%m-%d %H:%M:%S")` + +### GetXML + +`GetXML(target, xpath)` + +The `GetXML` Converter returns an XML string with selected elements. + +`target` is a Getter that returns a string. This string should be in XML format. +If `target` is not a string, nil, or is not valid xml, `GetXML` will return an error. + +`xpath` is a string that specifies an [XPath](https://www.w3.org/TR/1999/REC-xpath-19991116/) expression that +selects one or more elements. Currently, this converter only supports selecting elements. + +Examples: + +Get all elements at the root of the document with tag "a" + +- `GetXML(body, "/a")` + +Gel all elements anywhere in the document with tag "a" + +- `GetXML(body, "//a")` + +Get the first element at the root of the document with tag "a" + +- `GetXML(body, "/a[1]")` + +Get all elements in the document with tag "a" that have an attribute "b" with value "c" + +- `GetXML(body, "//a[@b='c']")` + +Get `foo` from `foo` + +- `GetXML(body, "/a/text()")` + +Get `hello` from `` + +- `GetXML(body, "/a/text()")` + +Get `bar` from `` + +- `GetXML(body, "/a/@foo")` + +### Hex + +`Hex(value)` + +The `Hex` converter converts the `value` to its hexadecimal representation. + +The returned type is string representation of the hexadecimal value. + +The input `value` types: + +- float64 (`1.1` will result to `0x3ff199999999999a`) +- string (`"1"` will result in `0x31`) +- bool (`true` will result in `0x01`; `false` to `0x00`) +- int64 (`12` will result in `0xC`) +- []byte (without any changes - `0x02` will result to `0x02`) + +If `value` is another type or parsing failed nil is always returned. + +The `value` is either a path expression to a telemetry field to retrieve or a literal. + +Examples: + +- `Hex(attributes["http.status_code"])` + + +- `Hex(2.0)` + ### Hour `Hour(value)` @@ -579,6 +961,35 @@ Examples: - `Hours(Duration("1h"))` +### InsertXML + +`InsertXML(target, xpath, value)` + +The `InsertXML` Converter returns an edited version of an XML string with child elements added to selected elements. + +`target` is a Getter that returns a string. This string should be in XML format and represents the document which will +be modified. If `target` is not a string, nil, or is not valid xml, `InsertXML` will return an error. + +`xpath` is a string that specifies an [XPath](https://www.w3.org/TR/1999/REC-xpath-19991116/) expression that +selects one or more elements. + +`value` is a Getter that returns a string. This string should be in XML format and represents the document which will +be inserted into `target`. If `value` is not a string, nil, or is not valid xml, `InsertXML` will return an error. + +Examples: + +Add an element "foo" to the root of the document + +- `InsertXML(body, "/", "")` + +Add an element "bar" to any element called "foo" + +- `InsertXML(body, "//foo", "")` + +Fetch and insert an xml document into another + +- `InsertXML(body, "/subdoc", attributes["subdoc"])` + ### Int `Int(value)` @@ -663,6 +1074,23 @@ Examples: - `IsInt(attributes["maybe a int"])` +### IsRootSpan + +`IsRootSpan()` + +The `IsRootSpan` Converter returns `true` if the span in the corresponding context is root, which means +its `parent_span_id` is equal to hexadecimal representation of zero. + +This function is supported with [OTTL span context](../contexts/ottlspan/README.md). In any other context it is not supported. + +The function returns `false` in all other scenarios, including `parent_span_id == ""` or `parent_span_id == nil`. + +Examples: + +- `IsRootSpan()` + +- `set(attributes["isRoot"], "true") where IsRootSpan()` + ### IsMap `IsMap(value)` @@ -706,6 +1134,22 @@ Examples: - `IsMatch("string", ".*ring")` +### IsList + +`IsList(value)` + +The `IsList` Converter returns true if the given value is a list. + +The `value` is either a path expression to a telemetry field to retrieve or a literal. + +If `value` is a `list`, `pcommon.ValueTypeSlice`. `pcommon.Slice`, or any other list type, then returns `true`, otherwise returns `false`. + +Examples: + +- `IsList(body)` + +- `IsList(attributes["maybe a slice"])` + ### IsString `IsString(value)` @@ -762,6 +1206,26 @@ Examples: - `Int(Log(attributes["duration_ms"])` +### MD5 + +`MD5(value)` + +The `MD5` Converter converts the `value` to a md5 hash/digest. + +The returned type is string. + +`value` is either a path expression to a string telemetry field or a literal string. If `value` is another type an error is returned. + +If an error occurs during hashing it will be returned. + +Examples: + +- `MD5(attributes["device.name"])` + +- `MD5("name")` + +**Note:** According to the National Institute of Standards and Technology (NIST), MD5 is no longer a recommended hash function. It should be avoided except when required for compatibility. New uses should prefer a SHA-2 family function (e.g. SHA-256, SHA-512) whenever possible. + ### Microseconds `Microseconds(value)` @@ -790,6 +1254,20 @@ Examples: - `Milliseconds(Duration("1h"))` +### Minute + +`Minute(value)` + +The `Minute` Converter returns the minute component from the specified time using the Go stdlib [`time.Minute` function](https://pkg.go.dev/time#Time.Minute). + +`value` is a `time.Time`. If `value` is another type, an error is returned. + +The returned type is `int64`. + +Examples: + +- `Minute(Now())` + ### Minutes `Minutes(value)` @@ -804,6 +1282,34 @@ Examples: - `Minutes(Duration("1h"))` +### Month + +`Month(value)` + +The `Month` Converter returns the month component from the specified time using the Go stdlib [`time.Month` function](https://pkg.go.dev/time#Time.Month). + +`value` is a `time.Time`. If `value` is another type, an error is returned. + +The returned type is `int64`. + +Examples: + +- `Month(Now())` + +### Nanosecond + +`Nanosecond(value)` + +The `Nanosecond` Converter returns the nanosecond component from the specified time using the Go stdlib [`time.Nanosecond` function](https://pkg.go.dev/time#Time.Nanosecond). + +`value` is a `time.Time`. If `value` is another type, an error is returned. + +The returned type is `int64`. + +Examples: + +- `Nanosecond(Now())` + ### Nanoseconds `Nanoseconds(value)` @@ -867,12 +1373,12 @@ Examples: `ParseJSON(target)` -The `ParseJSON` Converter returns a `pcommon.Map` struct that is a result of parsing the target string as JSON +The `ParseJSON` Converter returns a `pcommon.Map` or `pcommon.Slice` struct that is a result of parsing the target string as JSON `target` is a Getter that returns a string. This string should be in json format. If `target` is not a string, nil, or cannot be parsed as JSON, `ParseJSON` will return an error. -Unmarshalling is done using [jsoniter](https://github.com/json-iterator/go). +Unmarshalling is done using [goccy/go-json](https://github.com/goccy/go-json). Each JSON type is converted into a `pdata.Value` using the following map: ``` @@ -889,6 +1395,9 @@ Examples: - `ParseJSON("{\"attr\":true}")` +- `ParseJSON("[\"attr1\",\"attr2\"]")` + + - `ParseJSON(attributes["kubernetes"])` @@ -913,6 +1422,132 @@ Examples: - `ParseKeyValue("k1!v1_k2!v2_k3!v3", "!", "_")` - `ParseKeyValue(attributes["pairs"])` +### ParseSimplifiedXML + +`ParseSimplifiedXML(target)` + +The `ParseSimplifiedXML` Converter returns a `pcommon.Map` struct that is the result of parsing the target string without preservation of attributes or extraneous text content. + +The goal of this Converter is to produce a more user-friendly representation of XML data than the [`ParseXML`](#parsexml) Converter. +This Converter should be preferred over `ParseXML` when minor semantic details (e.g. order of elements) are not critically important, when subsequent processing or querying of the result is expected, or when human-readability is a concern. + +This Converter disregards certain aspects of XML, specifically attributes and extraneous text content, in order to produce +a direct representation of XML data. Users are encouraged to simplify their XML documents prior to using `ParseSimplifiedXML`. + +See other functions which may be useful for preparing XML documents: + +- [`ConvertAttributesToElementsXML`](#convertattributestoelementsxml) +- [`ConvertTextToElementsXML`](#converttexttoelementsxml) +- [`RemoveXML`](#removexml) +- [`InsertXML`](#insertxml) +- [`GetXML`](#getxml) + +#### Formal Definitions + +A "Simplified XML" document contains no attributes and no extraneous text content. + +An element has "extraneous text content" when it contains both text and element content. e.g. + +```xml + + bar + world + +``` + +#### Parsing logic + +1. Declaration elements, attributes, comments, and extraneous text content are ignored. +2. Elements which contain a value are converted into key/value pairs. + e.g. `bar` becomes `"foo": "bar"` +3. Elements which contain child elements are converted into a key/value pair where the value is a map. + e.g. ` baz ` becomes `"foo": { "bar": "baz" }` +4. Sibling elements that share the same tag will be combined into a slice. + e.g. ` 1 2 3 ` becomes `"a": { "b": "1", "c": [ "2", "3" ] }`. +5. Empty elements are dropped, but they can determine whether a value should be a slice or map. + e.g. ` 1 ` becomes `"a": { "b": [ "1" ] }` instead of `"a": { "b": "1" }` + +#### Examples + +Parse a Simplified XML document from the body: + +```xml + + 1 + jane +
+ + Something happened + unknown +
+
+``` + +```json +{ + "event": { + "id": 1, + "user": "jane", + "details": { + "time": "2021-10-01T12:00:00Z", + "description": "Something happened", + "cause": "unknown" + } + } +} +``` + +Parse a Simplified XML document with unique child elements: + +```xml + + 1 + 2 + +``` + +```json +{ + "x": { + "y": "1", + "z": "2" + } +} +``` + +Parse a Simplified XML document with multiple elements of the same tag: + +```xml + + 1 + 2 + +``` + +```json +{ + "a": { + "b": ["1", "2"] + } +} +``` + +Parse a Simplified XML document with CDATA element: + +```xml + + 1 + + +``` + +```json +{ + "a": { + "b": ["1", "2"] + } +} +``` ### ParseXML @@ -984,7 +1619,84 @@ Examples: - `ParseXML("")` +### RemoveXML + +`RemoveXML(target, xpath)` + +The `RemoveXML` Converter returns an edited version of an XML string with selected elements removed. + +`target` is a Getter that returns a string. This string should be in XML format. +If `target` is not a string, nil, or is not valid xml, `RemoveXML` will return an error. + +`xpath` is a string that specifies an [XPath](https://www.w3.org/TR/1999/REC-xpath-19991116/) expression that +selects one or more elements to remove from the XML document. + +For example, the XPath `/Log/Record[./Name/@type="archive"]` applied to the following XML document: + +```xml + + + + 00001 + + Some data + + + 00002 + + Some data + + +``` + +will return: + +```xml + + + + 00002 + + Some data + + +``` + +Examples: + +Delete the attribute "foo" from the elements with tag "a" + +- `RemoveXML(body, "/a/@foo")` +Delete all elements with tag "b" that are children of elements with tag "a" + +- `RemoveXML(body, "/a/b")` + +Delete all elements with tag "b" that are children of elements with tag "a" and have the attribute "foo" with value "bar" + +- `RemoveXML(body, "/a/b[@foo='bar']")` + +Delete all comments + +- `RemoveXML(body, "//comment()")` + +Delete text from nodes that contain the word "sensitive" + +- `RemoveXML(body, "//*[contains(text(), 'sensitive')]")` + +### Second + +`Second(value)` + +The `Second` Converter returns the second component from the specified time using the Go stdlib [`time.Second` function](https://pkg.go.dev/time#Time.Second). + +`value` is a `time.Time`. If `value` is another type, an error is returned. + +The returned type is `int64`. + +Examples: + +- `Second(Now())` ### Seconds @@ -1019,7 +1731,7 @@ Examples: - `SHA1("name")` -**Note:** According to the National Institute of Standards and Technology (NIST), SHA1 is no longer a recommended hash function. It should be avoided except when required for compatibility. New uses should prefer FNV whenever possible. +**Note:** [According to the National Institute of Standards and Technology (NIST)](https://csrc.nist.gov/projects/hash-functions), SHA1 is no longer a recommended hash function. It should be avoided except when required for compatibility. New uses should prefer a SHA-2 family function (such as SHA-256 or SHA-512) whenever possible. ### SHA256 @@ -1037,10 +1749,113 @@ Examples: - `SHA256(attributes["device.name"])` - - `SHA256("name")` -**Note:** According to the National Institute of Standards and Technology (NIST), SHA256 is no longer a recommended hash function. It should be avoided except when required for compatibility. New uses should prefer FNV whenever possible. +### SHA512 + +`SHA512(input)` + +The `SHA512` converter calculates sha512 hash value/digest of the `input`. + +The returned type is string. + +`input` is either a path expression to a string telemetry field or a literal string. If `input` is another type, converter raises an error. +If an error occurs during hashing, the error will be returned. + +Examples: + +- `SHA512(attributes["device.name"])` + +- `SHA512("name")` + +### SliceToMap + +`SliceToMap(target, keyPath, Optional[valuePath])` + +The `SliceToMap` converter converts a slice of objects to a map. The arguments are as follows: + +- `target`: A list of maps containing the entries to be converted. +- `keyPath`: A string array that determines the name of the keys for the map entries by pointing to the value of an attribute within each slice item. Note that +the `keyPath` must resolve to a string value, otherwise the converter will not be able to convert the item +to a map entry. +- `valuePath`: This optional string array determines which attribute should be used as the value for the map entry. If no +`valuePath` is defined, the value of the map entry will be the same as the original slice item. + +Examples: + +The examples below will convert the following input: + +```yaml +attributes: + hello: world + things: + - name: foo + value: 2 + - name: bar + value: 5 +``` + +- `SliceToMap(attributes["things"], ["name"])`: + +This converts the input above to the following: + +```yaml +attributes: + hello: world + things: + foo: + name: foo + value: 2 + bar: + name: bar + value: 5 +``` + +- `SliceToMap(attributes["things"], ["name"], ["value"])`: + +This converts the input above to the following: + +```yaml +attributes: + hello: world + things: + foo: 2 + bar: 5 +``` + +Once the `SliceToMap` function has been applied to a value, the converted entries are addressable via their keys: + +- `set(attributes["thingsMap"], SliceToMap(attributes["things"], ["name"]))` +- `set(attributes["element_1"], attributes["thingsMap"]["foo'])` +- `set(attributes["element_2"], attributes["thingsMap"]["bar'])` + +### Sort + +`Sort(target, Optional[order])` + +The `Sort` Converter sorts the `target` array in either ascending or descending order. + +`target` is an array or `pcommon.Slice` typed field containing the elements to be sorted. + +`order` is a string specifying the sort order. Must be either `asc` or `desc`. The default value is `asc`. + +The Sort Converter preserves the data type of the original elements while sorting. +The behavior varies based on the types of elements in the target slice: + +| Element Types | Sorting Behavior | Return Value | +|---------------|-------------------------------------|--------------| +| Integers | Sorts as integers | Sorted array of integers | +| Doubles | Sorts as doubles | Sorted array of doubles | +| Integers and doubles | Converts all to doubles, then sorts | Sorted array of integers and doubles | +| Strings | Sorts as strings | Sorted array of strings | +| Booleans | Converts all to strings, then sorts | Sorted array of booleans | +| Mix of integers, doubles, booleans, and strings | Converts all to strings, then sorts | Sorted array of mixed types | +| Any other types | N/A | Returns an error | + +Examples: + +- `Sort(attributes["device.tags"])` +- `Sort(attributes["device.tags"], "desc")` ### SpanID @@ -1064,9 +1879,48 @@ The `Split` Converter separates a string by the delimiter, and returns an array If the `target` is not a string or does not exist, the `Split` Converter will return an error. +### Trim + +```Trim(target, Optional[replacement])``` + +The `Trim` Converter removes the leading and trailing character (default: a space character). + +If the `target` is not a string or does not exist, the `Trim` Converter will return an error. + +`target` is a string. +`replacement` is an optional string representing the character to replace with (default: a space character). + Examples: -- `Split("A|B|C", "|")` +- `Trim(" this is a test ", " ")` +- `Trim("!!this is a test!!", "!!")` + +### String + +`String(value)` + +The `String` Converter converts the `value` to string type. + +The returned type is `string`. + +- string. The function returns the `value` without changes. +- []byte. The function returns the `value` as a string encoded in hexadecimal. +- map. The function returns the `value` as a key-value-pair of type string. +- slice. The function returns the `value` as a list formatted string. +- pcommon.Value. The function returns the `value` as a string type. + +If `value` is of another type it gets marshalled to string type. +If `value` is empty, or parsing failed, nil is always returned. + +The `value` is either a path expression to a telemetry field to retrieve, or a literal. + +Examples: + +- `String("test")` +- `String(attributes["http.method"])` +- `String(span_id)` +- `String([1,2,3])` +- `String(false)` ### Substring @@ -1085,15 +1939,122 @@ Examples: ### Time -The `Time` Converter takes a string representation of a time and converts it to a Golang `time.Time`. +`Time(target, format, Optional[location], Optional[locale])` -`time` is a string. `format` is a string. +The `Time` Converter takes a string representation of a time and converts it to a Golang `time.Time`. -If either `time` or `format` are nil, an error is returned. The parser used is the parser at [internal/coreinternal/parser](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/internal/coreinternal/timeutils). If the time and format do not follow the parsing rules used by this parser, an error is returned. +`target` is a string. `format` is a string, `location` is an optional string, `locale` is an optional string. + +If either `target` or `format` are nil, an error is returned. The parser used is the parser at [internal/coreinternal/parser](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/internal/coreinternal/timeutils). If the `target` and `format` do not follow the parsing rules used by this parser, an error is returned. + +`format` denotes a textual representation of the time value formatted according to ctime-like format string. It follows [standard Go Layout formatting](https://pkg.go.dev/time#pkg-constants) with few additional substitutes: +| substitution | description | examples | +|-----|-----|-----| +|`%Y` | Year as a zero-padded number | 0001, 0002, ..., 2019, 2020, ..., 9999 | +|`%y` | Year, last two digits as a zero-padded number | 01, ..., 99 | +|`%m` | Month as a zero-padded number | 01, 02, ..., 12 | +|`%o` | Month as a space-padded number | 1, 2, ..., 12 | +|`%q` | Month as an unpadded number | 1,2,...,12 | +|`%b`, `%h` | Abbreviated month name | Jan, Feb, ... | +|`%B` | Full month name | January, February, ... | +|`%d` | Day of the month as a zero-padded number | 01, 02, ..., 31 | +|`%e` | Day of the month as a space-padded number| 1, 2, ..., 31 | +|`%g` | Day of the month as a unpadded number | 1,2,...,31 | +|`%a` | Abbreviated weekday name | Sun, Mon, ... | +|`%A` | Full weekday name | Sunday, Monday, ... | +|`%H` | Hour (24-hour clock) as a zero-padded number | 00, ..., 24 | +|`%I` | Hour (12-hour clock) as a zero-padded number | 00, ..., 12 | +|`%l` | Hour 12-hour clock | 0, ..., 24 | +|`%p` | Locale’s equivalent of either AM or PM | AM, PM | +|`%P` | Locale’s equivalent of either am or pm | am, pm | +|`%M` | Minute as a zero-padded number | 00, 01, ..., 59 | +|`%S` | Second as a zero-padded number | 00, 01, ..., 59 | +|`%L` | Millisecond as a zero-padded number | 000, 001, ..., 999 | +|`%f` | Microsecond as a zero-padded number | 000000, ..., 999999 | +|`%s` | Nanosecond as a zero-padded number | 00000000, ..., 99999999 | +|`%z` | UTC offset in the form ±HHMM[SS[.ffffff]] or empty | +0000, -0400 | +|`%Z` | Timezone name or abbreviation or empty | UTC, EST, CST | +|`%i` | Timezone as +/-HH | -07 | +|`%j` | Timezone as +/-HH:MM | -07:00 | +|`%k` | Timezone as +/-HH:MM:SS | -07:00:00 | +|`%w` | Timezone as +/-HHMMSS | -070000 | +|`%D`, `%x` | Short MM/DD/YYYY date, equivalent to %m/%d/%y | 01/21/2031 | +|`%F` | Short YYYY-MM-DD date, equivalent to %Y-%m-%d | 2031-01-21 | +|`%T`,`%X` | ISO 8601 time format (HH:MM:SS), equivalent to %H:%M:%S | 02:55:02 | +|`%r` | 12-hour clock time | 02:55:02 pm | +|`%R` | 24-hour HH:MM time, equivalent to %H:%M | 13:55 | +|`%n` | New-line character ('\n') | | +|`%t` | Horizontal-tab character ('\t') | | +|`%%` | A % sign | | +|`%c` | Date and time representation | Mon Jan 02 15:04:05 2006 | + +`location` specifies a default time zone canonical ID to be used for date parsing in case it is not part of `format`. + +When loading `location`, this function will look for the IANA Time Zone database in the following locations in order: +- a directory or uncompressed zip file named by the ZONEINFO environment variable +- on a Unix system, the system standard installation location +- $GOROOT/lib/time/zoneinfo.zip +- the `time/tzdata` package, if it was imported. + +When building a Collector binary, importing `time/tzdata` in any Go source file will bundle the database into the binary, which guarantees the lookups will work regardless of the setup on the host setup. Note this will add roughly 500kB to binary size. Examples: - `Time("02/04/2023", "%m/%d/%Y")` +- `Time("Feb 15, 2023", "%b %d, %Y")` +- `Time("2023-05-26 12:34:56 HST", "%Y-%m-%d %H:%M:%S %Z")` +- `Time("1986-10-01T00:17:33 MST", "%Y-%m-%dT%H:%M:%S %Z")` +- `Time("2012-11-01T22:08:41+0000 EST", "%Y-%m-%dT%H:%M:%S%z %Z")` +- `Time("2023-05-26 12:34:56", "%Y-%m-%d %H:%M:%S", "America/New_York")` + +`locale` specifies the input language of the `target` value. It is used to interpret timestamp values written in a specific language, +ensuring that the function can correctly parse the localized month names, day names, and periods of the day based on the provided language. + +The value must be a well-formed BCP 47 language tag, and a known [CLDR](https://cldr.unicode.org) v45 locale. +If not supplied, English (`en`) is used. + +Examples: + +- `Time("mercoledì set 4 2024", "%A %h %e %Y", "", "it")` +- `Time("Febrero 25 lunes, 2002, 02:03:04 p.m.", "%B %d %A, %Y, %r", "America/New_York", "es-ES")` + +### ToKeyValueString + +`ToKeyValueString(target, Optional[delimiter], Optional[pair_delimiter], Optional[sort_output])` + +The `ToKeyValueString` Converter takes a `pcommon.Map` and converts it to a `string` of key value pairs. + +- `target` is a Getter that returns a `pcommon.Map`. +- `delimiter` is an optional string that is used to join keys and values, the default is `=`. +- `pair_delimiter` is an optional string that is used to join key value pairs, the default is a single space (` `). +- `sort_output` is an optional bool that is used to deterministically sort the keys of the output string. It should only be used if the output is required to be in the same order each time, as it introduces some performance overhead. + +For example, the following map `{"k1":"v1","k2":"v2","k3":"v3"}` will use default delimiters and be converted into the following string: + +``` +`k1=v1 k2=v2 k3=v3` +``` + +**Note:** Any nested arrays or maps will be represented as a JSON string. It is recommended to [flatten](#flatten) `target` before using this function. + +For example, `{"k1":"v1","k2":{"k3":"v3","k4":["v4","v5"]}}` will be converted to: + +``` +`k1=v1 k2={\"k3\":\"v3\",\"k4\":[\"v4\",\"v5\"]}` +``` + +**Note:** If any keys or values contain either delimiter, they will be double quoted. If any double quotes are present in the quoted value, they will be escaped. + +For example, `{"k1":"v1","k2":"v=2","k3"="\"v=3\""}` will be converted to: + +``` +`k1=v1 k2="v=2" k3="\"v=3\""` +``` + +Examples: + +- `ToKeyValueString(body)` +- `ToKeyValueString(body, ":", ",", true)` ### TraceID @@ -1121,6 +2082,21 @@ Examples: - `TruncateTime(start_time, Duration("1s"))` +### Unix + +`Unix(seconds, Optional[nanoseconds])` + +The `Unix` Converter returns an epoch timestamp as a Unix time. Similar to [Golang's Unix function](https://pkg.go.dev/time#Unix). + +`seconds` is `int64`. If `seconds` is another type an error is returned. +`nanoseconds` is `int64`. It is optional and its default value is 0. If `nanoseconds` is another type an error is returned. + +The returned type is `time.Time`. + +Examples: + +- `Unix(1672527600)` + ### UnixMicro `UnixMicro(value)` @@ -1177,19 +2153,88 @@ Examples: - `UnixSeconds(Time("02/04/2023", "%m/%d/%Y"))` +### UserAgent + +`UserAgent(value)` + +The `UserAgent` Converter parses the string argument trying to match it against well-known user-agent strings. + +`value` is a string or a path to a string. If `value` is not a string an error is returned. + +The results of the parsing are returned as a map containing `user_agent.name`, `user_agent.version` and `user_agent.original` +as defined in semconv v1.25.0. + +Parsing is done using the [uap-go package](https://github.com/ua-parser/uap-go). The specific formats it recognizes can be found [here](https://github.com/ua-parser/uap-core/blob/master/regexes.yaml). + +Examples: + +- `UserAgent("curl/7.81.0")` + ```yaml + "user_agent.name": "curl" + "user_agent.version": "7.81.0" + "user_agent.original": "curl/7.81.0" + ``` +- `Mozilla/5.0 (X11; Linux x86_64; rv:126.0) Gecko/20100101 Firefox/126.0` + ```yaml + "user_agent.name": "Firefox" + "user_agent.version": "126.0" + "user_agent.original": "Mozilla/5.0 (X11; Linux x86_64; rv:126.0) Gecko/20100101 Firefox/126.0" + ``` + +### URL + +`URL(url_string)` + +Parses a Uniform Resource Locator (URL) string and extracts its components as an object. +This URL object includes properties for the URL’s domain, path, fragment, port, query, scheme, user info, username, and password. + +`original`, `domain`, `scheme`, and `path` are always present. Other properties are present only if they have corresponding values. + +`url_string` is a `string`. + +- `URL("http://www.example.com")` + +results in +``` + "url.original": "http://www.example.com", + "url.scheme": "http", + "url.domain": "www.example.com", + "url.path": "", +``` + +- `URL("http://myusername:mypassword@www.example.com:80/foo.gif?key1=val1&key2=val2#fragment")` + +results in +``` + "url.path": "/foo.gif", + "url.fragment": "fragment", + "url.extension": "gif", + "url.password": "mypassword", + "url.original": "http://myusername:mypassword@www.example.com:80/foo.gif?key1=val1&key2=val2#fragment", + "url.scheme": "http", + "url.port": 80, + "url.user_info": "myusername:mypassword", + "url.domain": "www.example.com", + "url.query": "key1=val1&key2=val2", + "url.username": "myusername", +``` + ### UUID `UUID()` The `UUID` function generates a v4 uuid string. -## Function syntax +### Year -Functions should be named and formatted according to the following standards. +`Year(value)` + +The `Year` Converter returns the year component from the specified time using the Go stdlib [`time.Year` function](https://pkg.go.dev/time#Time.Year). + +`value` is a `time.Time`. If `value` is another type, an error is returned. + +The returned type is `int64`. + +Examples: -- Function names MUST start with a verb unless it is a Factory that creates a new type. -- Converters MUST be UpperCamelCase. -- Function names that contain multiple words MUST separate those words with `_`. -- Functions that interact with multiple items MUST have plurality in the name. Ex: `truncate_all`, `keep_keys`, `replace_all_matches`. -- Functions that interact with a single item MUST NOT have plurality in the name. If a function would interact with multiple items due to a condition, like `where`, it is still considered singular. Ex: `set`, `delete`, `replace_match`. -- Functions that change a specific target MUST set the target as the first parameter. +- `Year(Now())` diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_append.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_append.go new file mode 100644 index 00000000000..3bf6cb2a902 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_append.go @@ -0,0 +1,135 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/collector/pdata/pcommon" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type AppendArguments[K any] struct { + Target ottl.GetSetter[K] + Value ottl.Optional[ottl.Getter[K]] + Values ottl.Optional[[]ottl.Getter[K]] +} + +func NewAppendFactory[K any]() ottl.Factory[K] { + return ottl.NewFactory("append", &AppendArguments[K]{}, createAppendFunction[K]) +} + +func createAppendFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*AppendArguments[K]) + if !ok { + return nil, fmt.Errorf("AppendFactory args must be of type *Appendrguments[K]") + } + + return appendTo(args.Target, args.Value, args.Values) +} + +func appendTo[K any](target ottl.GetSetter[K], value ottl.Optional[ottl.Getter[K]], values ottl.Optional[[]ottl.Getter[K]]) (ottl.ExprFunc[K], error) { + if value.IsEmpty() && values.IsEmpty() { + return nil, fmt.Errorf("at least one of the optional arguments ('value' or 'values') must be provided") + } + + return func(ctx context.Context, tCtx K) (any, error) { + t, err := target.Get(ctx, tCtx) + if err != nil { + return nil, err + } + + // init res with target values + var res []any + + if t != nil { + switch targetType := t.(type) { + case pcommon.Slice: + res = append(res, targetType.AsRaw()...) + case pcommon.Value: + switch targetType.Type() { + case pcommon.ValueTypeEmpty: + res = append(res, targetType.Str()) + case pcommon.ValueTypeStr: + res = append(res, targetType.Str()) + case pcommon.ValueTypeInt: + res = append(res, targetType.Int()) + case pcommon.ValueTypeDouble: + res = append(res, targetType.Double()) + case pcommon.ValueTypeBool: + res = append(res, targetType.Bool()) + case pcommon.ValueTypeSlice: + res = append(res, targetType.Slice().AsRaw()...) + default: + return nil, fmt.Errorf("unsupported type of target field: %q", targetType.Type()) + } + + case []string: + res = appendMultiple(res, targetType) + case []any: + res = append(res, targetType...) + case []int64: + res = appendMultiple(res, targetType) + case []bool: + res = appendMultiple(res, targetType) + case []float64: + res = appendMultiple(res, targetType) + + case string: + res = append(res, targetType) + case int64: + res = append(res, targetType) + case bool: + res = append(res, targetType) + case float64: + res = append(res, targetType) + case any: + res = append(res, targetType) + default: + return nil, fmt.Errorf("unsupported type of target field: '%T'", t) + } + } + + appendGetterFn := func(g ottl.Getter[K]) error { + v, err := g.Get(ctx, tCtx) + if err != nil { + return err + } + res = append(res, v) + return nil + } + + if !value.IsEmpty() { + getter := value.Get() + if err := appendGetterFn(getter); err != nil { + return nil, err + } + } + if !values.IsEmpty() { + getters := values.Get() + for _, g := range getters { + if err := appendGetterFn(g); err != nil { + return nil, err + } + } + } + + // retype []any to Slice, having []any sometimes misbehaves and nils pcommon.Value + resSlice := pcommon.NewSlice() + if err := resSlice.FromRaw(res); err != nil { + return nil, err + } + + return nil, target.Set(ctx, tCtx, resSlice) + }, nil +} + +func appendMultiple[K any](target []any, values []K) []any { + for _, v := range values { + target = append(target, v) + } + return target +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_base64decode.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_base64decode.go index 42e401d7150..f626adc3b55 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_base64decode.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_base64decode.go @@ -30,7 +30,6 @@ func createBase64DecodeFunction[K any](_ ottl.FunctionContext, oArgs ottl.Argume } func Base64Decode[K any](target ottl.StringGetter[K]) (ottl.ExprFunc[K], error) { - return func(ctx context.Context, tCtx K) (any, error) { val, err := target.Get(ctx, tCtx) if err != nil { diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_convert_attributes_to_elements_xml.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_convert_attributes_to_elements_xml.go new file mode 100644 index 00000000000..64d4ecc5fde --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_convert_attributes_to_elements_xml.go @@ -0,0 +1,69 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" + +import ( + "context" + "fmt" + + "github.com/antchfx/xmlquery" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type ConvertAttributesToElementsXMLArguments[K any] struct { + Target ottl.StringGetter[K] + XPath ottl.Optional[string] +} + +func NewConvertAttributesToElementsXMLFactory[K any]() ottl.Factory[K] { + return ottl.NewFactory("ConvertAttributesToElementsXML", &ConvertAttributesToElementsXMLArguments[K]{}, createConvertAttributesToElementsXMLFunction[K]) +} + +func createConvertAttributesToElementsXMLFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*ConvertAttributesToElementsXMLArguments[K]) + + if !ok { + return nil, fmt.Errorf("ConvertAttributesToElementsXML args must be of type *ConvertAttributesToElementsXMLAguments[K]") + } + + xPath := args.XPath.Get() + if xPath == "" { + xPath = "//@*" // All attributes in the document + } + if err := validateXPath(xPath); err != nil { + return nil, err + } + + return convertAttributesToElementsXML(args.Target, xPath), nil +} + +// convertAttributesToElementsXML returns a string that is a result of converting all attributes of the +// target XML into child elements. These new elements are added as the last child elements of the parent. +// e.g. -> worldbar +func convertAttributesToElementsXML[K any](target ottl.StringGetter[K], xPath string) ottl.ExprFunc[K] { + return func(ctx context.Context, tCtx K) (any, error) { + var doc *xmlquery.Node + if targetVal, err := target.Get(ctx, tCtx); err != nil { + return nil, err + } else if doc, err = parseNodesXML(targetVal); err != nil { + return nil, err + } + for _, n := range xmlquery.Find(doc, xPath) { + if n.Type != xmlquery.AttributeNode { + continue + } + xmlquery.AddChild(n.Parent, &xmlquery.Node{ + Type: xmlquery.ElementNode, + Data: n.Data, + FirstChild: &xmlquery.Node{ + Type: xmlquery.TextNode, + Data: n.InnerText(), + }, + }) + n.Parent.RemoveAttr(n.Data) + } + return doc.OutputXML(false), nil + } +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_convert_text_to_elements_xml.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_convert_text_to_elements_xml.go new file mode 100644 index 00000000000..a0fb108c406 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_convert_text_to_elements_xml.go @@ -0,0 +1,107 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" + +import ( + "context" + "fmt" + + "github.com/antchfx/xmlquery" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type ConvertTextToElementsXMLArguments[K any] struct { + Target ottl.StringGetter[K] + XPath ottl.Optional[string] + ElementName ottl.Optional[string] +} + +func NewConvertTextToElementsXMLFactory[K any]() ottl.Factory[K] { + return ottl.NewFactory("ConvertTextToElementsXML", &ConvertTextToElementsXMLArguments[K]{}, createConvertTextToElementsXMLFunction[K]) +} + +func createConvertTextToElementsXMLFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*ConvertTextToElementsXMLArguments[K]) + + if !ok { + return nil, fmt.Errorf("ConvertTextToElementsXML args must be of type *ConvertTextToElementsXMLAguments[K]") + } + + xPath := args.XPath.Get() + if xPath == "" { + xPath = "/" + } else if err := validateXPath(xPath); err != nil { + return nil, err + } + + elementName := args.ElementName.Get() + if elementName == "" { + elementName = "value" + } + + return convertTextToElementsXML(args.Target, xPath, elementName), nil +} + +// convertTextToElementsXML returns a string that is a result of wrapping any extraneous text nodes with a dedicated element. +func convertTextToElementsXML[K any](target ottl.StringGetter[K], xPath string, elementName string) ottl.ExprFunc[K] { + return func(ctx context.Context, tCtx K) (any, error) { + var doc *xmlquery.Node + if targetVal, err := target.Get(ctx, tCtx); err != nil { + return nil, err + } else if doc, err = parseNodesXML(targetVal); err != nil { + return nil, err + } + for _, n := range xmlquery.Find(doc, xPath) { + convertTextToElementsForNode(n, elementName) + } + return doc.OutputXML(false), nil + } +} + +func convertTextToElementsForNode(parent *xmlquery.Node, elementName string) { + switch parent.Type { + case xmlquery.ElementNode: // ok + case xmlquery.DocumentNode: // ok + default: + return + } + + if parent.FirstChild == nil { + return + } + + // Convert any child nodes and count text and element nodes. + var valueCount, elementCount int + for child := parent.FirstChild; child != nil; child = child.NextSibling { + if child.Type == xmlquery.ElementNode { + convertTextToElementsForNode(child, elementName) + elementCount++ + } else if child.Type == xmlquery.TextNode { + valueCount++ + } + } + + // If there are no values to wrap, or if there is exactly one value OR one element, this node is all set. + if valueCount == 0 || elementCount+valueCount <= 1 { + return + } + + // At this point, we either have multiple values, or a mix of values and elements. + // Either way, we need to wrap the values. + for child := parent.FirstChild; child != nil; child = child.NextSibling { + if child.Type != xmlquery.TextNode { + continue + } + newTextNode := &xmlquery.Node{ + Type: xmlquery.TextNode, + Data: child.Data, + } + // Change this node into an element + child.Type = xmlquery.ElementNode + child.Data = elementName + child.FirstChild = newTextNode + child.LastChild = newTextNode + } +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_day.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_day.go new file mode 100644 index 00000000000..ecdd0ceb2d4 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_day.go @@ -0,0 +1,39 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" + +import ( + "context" + "fmt" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type DayArguments[K any] struct { + Time ottl.TimeGetter[K] +} + +func NewDayFactory[K any]() ottl.Factory[K] { + return ottl.NewFactory("Day", &DayArguments[K]{}, createDayFunction[K]) +} + +func createDayFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*DayArguments[K]) + + if !ok { + return nil, fmt.Errorf("DayFactory args must be of type *DayArguments[K]") + } + + return Day(args.Time) +} + +func Day[K any](time ottl.TimeGetter[K]) (ottl.ExprFunc[K], error) { + return func(ctx context.Context, tCtx K) (any, error) { + t, err := time.Get(ctx, tCtx) + if err != nil { + return nil, err + } + return int64(t.Day()), nil + }, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_decode.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_decode.go new file mode 100644 index 00000000000..d6dc5efc036 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_decode.go @@ -0,0 +1,103 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" + +import ( + "context" + "encoding/base64" + "fmt" + "strings" + + "go.opentelemetry.io/collector/pdata/pcommon" + "golang.org/x/text/encoding" + "golang.org/x/text/encoding/ianaindex" + + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/textutils" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type DecodeArguments[K any] struct { + Target ottl.Getter[K] + Encoding string +} + +func NewDecodeFactory[K any]() ottl.Factory[K] { + return ottl.NewFactory("Decode", &DecodeArguments[K]{}, createDecodeFunction[K]) +} + +func createDecodeFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*DecodeArguments[K]) + if !ok { + return nil, fmt.Errorf("DecodeFactory args must be of type *DecodeArguments[K]") + } + + return Decode(args.Target, args.Encoding) +} + +func Decode[K any](target ottl.Getter[K], encoding string) (ottl.ExprFunc[K], error) { + return func(ctx context.Context, tCtx K) (any, error) { + val, err := target.Get(ctx, tCtx) + if err != nil { + return nil, err + } + var stringValue string + + switch v := val.(type) { + case []byte: + stringValue = string(v) + case *string: + stringValue = *v + case string: + stringValue = v + case pcommon.ByteSlice: + stringValue = string(v.AsRaw()) + case *pcommon.ByteSlice: + stringValue = string(v.AsRaw()) + case pcommon.Value: + stringValue = v.AsString() + case *pcommon.Value: + stringValue = v.AsString() + default: + return nil, fmt.Errorf("unsupported type provided to Decode function: %T", v) + } + + switch encoding { + case "base64": + // base64 is not in IANA index, so we have to deal with this encoding separately + decodedBytes, err := base64.StdEncoding.DecodeString(stringValue) + if err != nil { + return nil, fmt.Errorf("could not decode: %w", err) + } + return string(decodedBytes), nil + default: + e, err := getEncoding(encoding) + if err != nil { + return nil, err + } + + decodedString, err := e.NewDecoder().String(stringValue) + if err != nil { + return nil, fmt.Errorf("could not decode: %w", err) + } + + return decodedString, nil + } + }, nil +} + +func getEncoding(encoding string) (encoding.Encoding, error) { + if e, ok := textutils.EncodingOverridesMap.Get(strings.ToLower(encoding)); ok { + return e, nil + } + e, err := ianaindex.IANA.Encoding(encoding) + if err != nil { + return nil, fmt.Errorf("could not get encoding for %s: %w", encoding, err) + } + if e == nil { + // for some encodings a nil error and a nil encoding is returned, so we need to double check + // if the encoding is actually set here + return nil, fmt.Errorf("no decoder available for encoding: %s", encoding) + } + return e, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_duration.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_duration.go index e125ff2b90d..27138583fa3 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_duration.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_duration.go @@ -18,6 +18,7 @@ type DurationArguments[K any] struct { func NewDurationFactory[K any]() ottl.Factory[K] { return ottl.NewFactory("Duration", &DurationArguments[K]{}, createDurationFunction[K]) } + func createDurationFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { args, ok := oArgs.(*DurationArguments[K]) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_extract_grok_patterns.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_extract_grok_patterns.go new file mode 100644 index 00000000000..78f50866a0c --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_extract_grok_patterns.go @@ -0,0 +1,103 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" + +import ( + "context" + "fmt" + "strings" + + "github.com/elastic/go-grok" + "go.opentelemetry.io/collector/pdata/pcommon" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type ExtractGrokPatternsArguments[K any] struct { + Target ottl.StringGetter[K] + Pattern string + NamedCapturesOnly ottl.Optional[bool] + PatternDefinitions ottl.Optional[[]string] +} + +func NewExtractGrokPatternsFactory[K any]() ottl.Factory[K] { + return ottl.NewFactory("ExtractGrokPatterns", &ExtractGrokPatternsArguments[K]{}, createExtractGrokPatternsFunction[K]) +} + +func createExtractGrokPatternsFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*ExtractGrokPatternsArguments[K]) + + if !ok { + return nil, fmt.Errorf("ExtractGrokPatternsFactory args must be of type *ExtractGrokPatternsArguments[K]") + } + + return extractGrokPatterns(args.Target, args.Pattern, args.NamedCapturesOnly, args.PatternDefinitions) +} + +func extractGrokPatterns[K any](target ottl.StringGetter[K], pattern string, nco ottl.Optional[bool], patternDefinitions ottl.Optional[[]string]) (ottl.ExprFunc[K], error) { + g, err := grok.NewComplete() + if err != nil { + return nil, fmt.Errorf("failed to initialize grok parser: %w", err) + } + namedCapturesOnly := !nco.IsEmpty() && nco.Get() + + if !patternDefinitions.IsEmpty() { + for i, patternDefinition := range patternDefinitions.Get() { + // split pattern in format key=val + parts := strings.SplitN(patternDefinition, "=", 2) + if len(parts) == 1 { + trimmedPattern := patternDefinition + if len(patternDefinition) > 20 { + trimmedPattern = fmt.Sprintf("%s...", patternDefinition[:17]) // keep whole string 20 characters long including ... + } + return nil, fmt.Errorf("pattern %q supplied to ExtractGrokPatterns at index %d has incorrect format, expecting PATTERNNAME=pattern definition", trimmedPattern, i) + } + + if strings.ContainsRune(parts[0], ':') { + return nil, fmt.Errorf("pattern ID %q should not contain ':'", parts[0]) + } + + err = g.AddPattern(parts[0], parts[1]) + if err != nil { + return nil, fmt.Errorf("failed to add pattern %q=%q: %w", parts[0], parts[1], err) + } + } + } + err = g.Compile(pattern, namedCapturesOnly) + if err != nil { + return nil, fmt.Errorf("the pattern supplied to ExtractGrokPatterns is not a valid pattern: %w", err) + } + + if namedCapturesOnly && !g.HasCaptureGroups() { + return nil, fmt.Errorf("at least 1 named capture group must be supplied in the given regex") + } + + return func(ctx context.Context, tCtx K) (any, error) { + val, err := target.Get(ctx, tCtx) + if err != nil { + return nil, err + } + + matches, err := g.ParseTypedString(val) + if err != nil { + return nil, err + } + + result := pcommon.NewMap() + for k, v := range matches { + switch val := v.(type) { + case bool: + result.PutBool(k, val) + case float64: + result.PutDouble(k, val) + case int: + result.PutInt(k, int64(val)) + case string: + result.PutStr(k, val) + } + } + + return result, err + }, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_flatten.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_flatten.go index 44a97b8095b..b651cce4ce5 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_flatten.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_flatten.go @@ -37,8 +37,8 @@ func flatten[K any](target ottl.PMapGetter[K], p ottl.Optional[string], d ottl.O depth := int64(math.MaxInt64) if !d.IsEmpty() { depth = d.Get() - if depth < 0 { - return nil, fmt.Errorf("invalid depth for flatten function, %d cannot be negative", depth) + if depth < 1 { + return nil, fmt.Errorf("invalid depth '%d' for flatten function, must be greater than 0", depth) } } @@ -54,28 +54,45 @@ func flatten[K any](target ottl.PMapGetter[K], p ottl.Optional[string], d ottl.O } result := pcommon.NewMap() - flattenHelper(m, result, prefix, 0, depth) - result.CopyTo(m) + flattenMap(m, result, prefix, 0, depth) + result.MoveTo(m) return nil, nil }, nil } -func flattenHelper(m pcommon.Map, result pcommon.Map, prefix string, currentDepth, maxDepth int64) { +func flattenMap(m pcommon.Map, result pcommon.Map, prefix string, currentDepth, maxDepth int64) { if len(prefix) > 0 { prefix += "." } m.Range(func(k string, v pcommon.Value) bool { - switch { - case v.Type() == pcommon.ValueTypeMap && currentDepth < maxDepth: - flattenHelper(v.Map(), result, prefix+k, currentDepth+1, maxDepth) - case v.Type() == pcommon.ValueTypeSlice: - for i := 0; i < v.Slice().Len(); i++ { + return flattenValue(k, v, currentDepth, maxDepth, result, prefix) + }) +} + +func flattenSlice(s pcommon.Slice, result pcommon.Map, prefix string, currentDepth int64, maxDepth int64) { + for i := 0; i < s.Len(); i++ { + flattenValue(fmt.Sprintf("%d", i), s.At(i), currentDepth+1, maxDepth, result, prefix) + } +} + +func flattenValue(k string, v pcommon.Value, currentDepth int64, maxDepth int64, result pcommon.Map, prefix string) bool { + switch { + case v.Type() == pcommon.ValueTypeMap && currentDepth < maxDepth: + flattenMap(v.Map(), result, prefix+k, currentDepth+1, maxDepth) + case v.Type() == pcommon.ValueTypeSlice && currentDepth < maxDepth: + for i := 0; i < v.Slice().Len(); i++ { + switch { + case v.Slice().At(i).Type() == pcommon.ValueTypeMap && currentDepth+1 < maxDepth: + flattenMap(v.Slice().At(i).Map(), result, fmt.Sprintf("%v.%v", prefix+k, i), currentDepth+2, maxDepth) + case v.Slice().At(i).Type() == pcommon.ValueTypeSlice && currentDepth+1 < maxDepth: + flattenSlice(v.Slice().At(i).Slice(), result, fmt.Sprintf("%v.%v", prefix+k, i), currentDepth+2, maxDepth) + default: v.Slice().At(i).CopyTo(result.PutEmpty(fmt.Sprintf("%v.%v", prefix+k, i))) } - default: - v.CopyTo(result.PutEmpty(prefix + k)) } - return true - }) + default: + v.CopyTo(result.PutEmpty(prefix + k)) + } + return true } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_fnv.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_fnv.go index 5df53a4737e..c9a29ca5739 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_fnv.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_fnv.go @@ -30,7 +30,6 @@ func createFnvFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ott } func FNVHashString[K any](target ottl.StringGetter[K]) (ottl.ExprFunc[K], error) { - return func(ctx context.Context, tCtx K) (any, error) { val, err := target.Get(ctx, tCtx) if err != nil { diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_format.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_format.go new file mode 100644 index 00000000000..86f3dc83046 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_format.go @@ -0,0 +1,45 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" + +import ( + "context" + "fmt" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type FormatArguments[K any] struct { + Format string + Vals []ottl.Getter[K] +} + +func NewFormatFactory[K any]() ottl.Factory[K] { + return ottl.NewFactory("Format", &FormatArguments[K]{}, createFormatFunction[K]) +} + +func createFormatFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*FormatArguments[K]) + if !ok { + return nil, fmt.Errorf("FormatFactory args must be of type *FormatArguments[K]") + } + + return format(args.Format, args.Vals), nil +} + +func format[K any](formatString string, vals []ottl.Getter[K]) ottl.ExprFunc[K] { + return func(ctx context.Context, tCtx K) (any, error) { + formatArgs := make([]any, 0, len(vals)) + for _, arg := range vals { + formatArg, err := arg.Get(ctx, tCtx) + if err != nil { + return nil, err + } + + formatArgs = append(formatArgs, formatArg) + } + + return fmt.Sprintf(formatString, formatArgs...), nil + } +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_formattime.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_formattime.go new file mode 100644 index 00000000000..e73628e76c3 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_formattime.go @@ -0,0 +1,51 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" + +import ( + "context" + "errors" + + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/timeutils" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type FormatTimeArguments[K any] struct { + Time ottl.TimeGetter[K] + Format string +} + +func NewFormatTimeFactory[K any]() ottl.Factory[K] { + return ottl.NewFactory("FormatTime", &FormatTimeArguments[K]{}, createFormatTimeFunction[K]) +} + +func createFormatTimeFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*FormatTimeArguments[K]) + + if !ok { + return nil, errors.New("FormatTimeFactory args must be of type *FormatTimeArguments[K]") + } + + return FormatTime(args.Time, args.Format) +} + +func FormatTime[K any](timeValue ottl.TimeGetter[K], format string) (ottl.ExprFunc[K], error) { + if format == "" { + return nil, errors.New("format cannot be nil") + } + + gotimeFormat, err := timeutils.StrptimeToGotime(format) + if err != nil { + return nil, err + } + + return func(ctx context.Context, tCtx K) (any, error) { + t, err := timeValue.Get(ctx, tCtx) + if err != nil { + return nil, err + } + + return t.Format(gotimeFormat), nil + }, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_get_xml.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_get_xml.go new file mode 100644 index 00000000000..c344dd5b8ef --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_get_xml.go @@ -0,0 +1,70 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" + +import ( + "context" + "fmt" + + "github.com/antchfx/xmlquery" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type GetXMLArguments[K any] struct { + Target ottl.StringGetter[K] + XPath string +} + +func NewGetXMLFactory[K any]() ottl.Factory[K] { + return ottl.NewFactory("GetXML", &GetXMLArguments[K]{}, createGetXMLFunction[K]) +} + +func createGetXMLFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*GetXMLArguments[K]) + + if !ok { + return nil, fmt.Errorf("GetXML args must be of type *GetXMLAguments[K]") + } + + if err := validateXPath(args.XPath); err != nil { + return nil, err + } + + return getXML(args.Target, args.XPath), nil +} + +// getXML returns a XML formatted string that is a result of matching elements from the target XML. +func getXML[K any](target ottl.StringGetter[K], xPath string) ottl.ExprFunc[K] { + return func(ctx context.Context, tCtx K) (any, error) { + var doc *xmlquery.Node + if targetVal, err := target.Get(ctx, tCtx); err != nil { + return nil, err + } else if doc, err = parseNodesXML(targetVal); err != nil { + return nil, err + } + + nodes, err := xmlquery.QueryAll(doc, xPath) + if err != nil { + return nil, err + } + + result := &xmlquery.Node{Type: xmlquery.DocumentNode} + for _, n := range nodes { + switch n.Type { + case xmlquery.ElementNode, xmlquery.TextNode: + xmlquery.AddChild(result, n) + case xmlquery.AttributeNode, xmlquery.CharDataNode: + // get the value + xmlquery.AddChild(result, &xmlquery.Node{ + Type: xmlquery.TextNode, + Data: n.InnerText(), + }) + default: + continue + } + } + return result.OutputXML(false), nil + } +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_hex.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_hex.go new file mode 100644 index 00000000000..dd87e188779 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_hex.go @@ -0,0 +1,40 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" + +import ( + "context" + "encoding/hex" + "fmt" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type HexArguments[K any] struct { + Target ottl.ByteSliceLikeGetter[K] +} + +func NewHexFactory[K any]() ottl.Factory[K] { + return ottl.NewFactory("Hex", &HexArguments[K]{}, createHexFunction[K]) +} + +func createHexFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*HexArguments[K]) + + if !ok { + return nil, fmt.Errorf("HexFactory args must be of type *HexArguments[K]") + } + + return Hex(args.Target) +} + +func Hex[K any](target ottl.ByteSliceLikeGetter[K]) (ottl.ExprFunc[K], error) { + return func(ctx context.Context, tCtx K) (any, error) { + value, err := target.Get(ctx, tCtx) + if err != nil { + return nil, err + } + return hex.EncodeToString(value), nil + }, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_hour.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_hour.go index 3e30fbd1087..5bc3e89600a 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_hour.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_hour.go @@ -17,6 +17,7 @@ type HourArguments[K any] struct { func NewHourFactory[K any]() ottl.Factory[K] { return ottl.NewFactory("Hour", &HourArguments[K]{}, createHourFunction[K]) } + func createHourFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { args, ok := oArgs.(*HourArguments[K]) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_hours.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_hours.go index 66ab2dbcad1..580091ae4be 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_hours.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_hours.go @@ -17,6 +17,7 @@ type HoursArguments[K any] struct { func NewHoursFactory[K any]() ottl.Factory[K] { return ottl.NewFactory("Hours", &HoursArguments[K]{}, createHoursFunction[K]) } + func createHoursFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { args, ok := oArgs.(*HoursArguments[K]) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_insert_xml.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_insert_xml.go new file mode 100644 index 00000000000..778b16938a0 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_insert_xml.go @@ -0,0 +1,75 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" + +import ( + "context" + "errors" + "fmt" + + "github.com/antchfx/xmlquery" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type InsertXMLArguments[K any] struct { + Target ottl.StringGetter[K] + XPath string + SubDocument ottl.StringGetter[K] +} + +func NewInsertXMLFactory[K any]() ottl.Factory[K] { + return ottl.NewFactory("InsertXML", &InsertXMLArguments[K]{}, createInsertXMLFunction[K]) +} + +func createInsertXMLFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*InsertXMLArguments[K]) + + if !ok { + return nil, fmt.Errorf("InsertXML args must be of type *InsertXMLAguments[K]") + } + + if err := validateXPath(args.XPath); err != nil { + return nil, err + } + + return insertXML(args.Target, args.XPath, args.SubDocument), nil +} + +// insertXML returns a XML formatted string that is a result of inserting another XML document into +// the content of each selected target element. +func insertXML[K any](target ottl.StringGetter[K], xPath string, subGetter ottl.StringGetter[K]) ottl.ExprFunc[K] { + return func(ctx context.Context, tCtx K) (any, error) { + var doc *xmlquery.Node + if targetVal, err := target.Get(ctx, tCtx); err != nil { + return nil, err + } else if doc, err = parseNodesXML(targetVal); err != nil { + return nil, err + } + + var subDoc *xmlquery.Node + if subDocVal, err := subGetter.Get(ctx, tCtx); err != nil { + return nil, err + } else if subDoc, err = parseNodesXML(subDocVal); err != nil { + return nil, err + } + + nodes, errs := xmlquery.QueryAll(doc, xPath) + for _, n := range nodes { + switch n.Type { + case xmlquery.ElementNode, xmlquery.DocumentNode: + var nextSibling *xmlquery.Node + for c := subDoc.FirstChild; c != nil; c = nextSibling { + // AddChild updates c.NextSibling but not subDoc.FirstChild + // so we need to get the handle to it prior to the update. + nextSibling = c.NextSibling + xmlquery.AddChild(n, c) + } + default: + errs = errors.Join(errs, fmt.Errorf("InsertXML XPath selected non-element: %q", n.Data)) + } + } + return doc.OutputXML(false), errs + } +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_is_list.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_is_list.go new file mode 100644 index 00000000000..137ccb47070 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_is_list.go @@ -0,0 +1,53 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type IsListArguments[K any] struct { + Target ottl.Getter[K] +} + +func NewIsListFactory[K any]() ottl.Factory[K] { + return ottl.NewFactory("IsList", &IsListArguments[K]{}, createIsListFunction[K]) +} + +func createIsListFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*IsListArguments[K]) + + if !ok { + return nil, fmt.Errorf("IsListFactory args must be of type *IsListArguments[K]") + } + + return isList(args.Target), nil +} + +func isList[K any](target ottl.Getter[K]) ottl.ExprFunc[K] { + return func(ctx context.Context, tCtx K) (any, error) { + val, err := target.Get(ctx, tCtx) + if err != nil { + return false, err + } + + switch valType := val.(type) { + case pcommon.Value: + return valType.Type() == pcommon.ValueTypeSlice, nil + + case pcommon.Slice, plog.LogRecordSlice, plog.ResourceLogsSlice, plog.ScopeLogsSlice, pmetric.ExemplarSlice, pmetric.ExponentialHistogramDataPointSlice, pmetric.HistogramDataPointSlice, pmetric.MetricSlice, pmetric.NumberDataPointSlice, pmetric.ResourceMetricsSlice, pmetric.ScopeMetricsSlice, pmetric.SummaryDataPointSlice, pmetric.SummaryDataPointValueAtQuantileSlice, ptrace.ResourceSpansSlice, ptrace.ScopeSpansSlice, ptrace.SpanEventSlice, ptrace.SpanLinkSlice, ptrace.SpanSlice, []string, []bool, []int64, []float64, [][]byte, []any: + return true, nil + } + + return false, nil + } +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_is_root_span.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_is_root_span.go new file mode 100644 index 00000000000..1aaba5a7bb4 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_is_root_span.go @@ -0,0 +1,25 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" + +import ( + "context" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan" +) + +func NewIsRootSpanFactory() ottl.Factory[ottlspan.TransformContext] { + return ottl.NewFactory("IsRootSpan", nil, createIsRootSpanFunction) +} + +func createIsRootSpanFunction(_ ottl.FunctionContext, _ ottl.Arguments) (ottl.ExprFunc[ottlspan.TransformContext], error) { + return isRootSpan() +} + +func isRootSpan() (ottl.ExprFunc[ottlspan.TransformContext], error) { + return func(_ context.Context, tCtx ottlspan.TransformContext) (any, error) { + return tCtx.GetSpan().ParentSpanID().IsEmpty(), nil + }, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_keep_keys.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_keep_keys.go index 6cae0c4c379..8370414c6de 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_keep_keys.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_keep_keys.go @@ -42,7 +42,7 @@ func keepKeys[K any](target ottl.PMapGetter[K], keys []string) ottl.ExprFunc[K] if err != nil { return nil, err } - val.RemoveIf(func(key string, value pcommon.Value) bool { + val.RemoveIf(func(key string, _ pcommon.Value) bool { _, ok := keySet[key] return !ok }) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_keep_matching_keys.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_keep_matching_keys.go new file mode 100644 index 00000000000..c5878d46eff --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_keep_matching_keys.go @@ -0,0 +1,51 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" +import ( + "fmt" + "regexp" + + "go.opentelemetry.io/collector/pdata/pcommon" + "golang.org/x/net/context" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type KeepMatchingKeysArguments[K any] struct { + Target ottl.PMapGetter[K] + Pattern string +} + +func NewKeepMatchingKeysFactory[K any]() ottl.Factory[K] { + return ottl.NewFactory("keep_matching_keys", &KeepMatchingKeysArguments[K]{}, createKeepMatchingKeysFunction[K]) +} + +func createKeepMatchingKeysFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*KeepMatchingKeysArguments[K]) + + if !ok { + return nil, fmt.Errorf("KeepMatchingKeysFactory args must be of type *KeepMatchingKeysArguments[K") + } + + return keepMatchingKeys(args.Target, args.Pattern) +} + +func keepMatchingKeys[K any](target ottl.PMapGetter[K], pattern string) (ottl.ExprFunc[K], error) { + compiledPattern, err := regexp.Compile(pattern) + if err != nil { + return nil, fmt.Errorf("the regex pattern provided to keep_matching_keys is not a valid pattern: %w", err) + } + + return func(ctx context.Context, tCtx K) (any, error) { + val, err := target.Get(ctx, tCtx) + if err != nil { + return nil, err + } + + val.RemoveIf(func(key string, _ pcommon.Value) bool { + return !compiledPattern.MatchString(key) + }) + return nil, nil + }, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_len.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_len.go index 35a8aaf2214..d45947cfc4e 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_len.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_len.go @@ -5,7 +5,7 @@ package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-c import ( "context" - "fmt" + "errors" "reflect" "go.opentelemetry.io/collector/pdata/pcommon" @@ -32,7 +32,7 @@ func createLenFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ott args, ok := oArgs.(*LenArguments[K]) if !ok { - return nil, fmt.Errorf("LenFactory args must be of type *LenArguments[K]") + return nil, errors.New("LenFactory args must be of type *LenArguments[K]") } return computeLen(args.Target), nil @@ -56,7 +56,7 @@ func computeLen[K any](target ottl.Getter[K]) ottl.ExprFunc[K] { case pcommon.ValueTypeMap: return int64(valType.Map().Len()), nil } - return nil, fmt.Errorf(typeError) + return nil, errors.New(typeError) case pcommon.Map: return int64(valType.Len()), nil case pcommon.Slice: @@ -106,6 +106,6 @@ func computeLen[K any](target ottl.Getter[K]) ottl.ExprFunc[K] { return int64(v.Len()), nil } - return nil, fmt.Errorf(typeError) + return nil, errors.New(typeError) } } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_limit.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_limit.go index 81c3a4a7f4b..0010c0e3d5e 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_limit.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_limit.go @@ -64,7 +64,7 @@ func limit[K any](target ottl.PMapGetter[K], limit int64, priorityKeys []string) } } - val.RemoveIf(func(key string, value pcommon.Value) bool { + val.RemoveIf(func(key string, _ pcommon.Value) bool { if _, ok := keep[key]; ok { return false } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_md5.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_md5.go new file mode 100644 index 00000000000..280ac5c8bcb --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_md5.go @@ -0,0 +1,46 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" + +import ( + "context" + "crypto/md5" // #nosec + "encoding/hex" + "fmt" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type MD5Arguments[K any] struct { + Target ottl.StringGetter[K] +} + +func NewMD5Factory[K any]() ottl.Factory[K] { + return ottl.NewFactory("MD5", &MD5Arguments[K]{}, createMD5Function[K]) +} + +func createMD5Function[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*MD5Arguments[K]) + + if !ok { + return nil, fmt.Errorf("MD5Factory args must be of type *MD5Arguments[K]") + } + + return MD5HashString(args.Target) +} + +func MD5HashString[K any](target ottl.StringGetter[K]) (ottl.ExprFunc[K], error) { + return func(ctx context.Context, tCtx K) (any, error) { + val, err := target.Get(ctx, tCtx) + if err != nil { + return nil, err + } + hash := md5.New() // #nosec + _, err = hash.Write([]byte(val)) + if err != nil { + return nil, err + } + return hex.EncodeToString(hash.Sum(nil)), nil + }, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_microseconds.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_microseconds.go index 0855efaca05..f76616782b9 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_microseconds.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_microseconds.go @@ -17,6 +17,7 @@ type MicrosecondsArguments[K any] struct { func NewMicrosecondsFactory[K any]() ottl.Factory[K] { return ottl.NewFactory("Microseconds", &MicrosecondsArguments[K]{}, createMicrosecondsFunction[K]) } + func createMicrosecondsFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { args, ok := oArgs.(*MicrosecondsArguments[K]) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_milliseconds.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_milliseconds.go index da787e0e157..f0d605d5ee5 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_milliseconds.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_milliseconds.go @@ -17,6 +17,7 @@ type MillisecondsArguments[K any] struct { func NewMillisecondsFactory[K any]() ottl.Factory[K] { return ottl.NewFactory("Milliseconds", &MillisecondsArguments[K]{}, createMillisecondsFunction[K]) } + func createMillisecondsFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { args, ok := oArgs.(*MillisecondsArguments[K]) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_minute.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_minute.go new file mode 100644 index 00000000000..f2ce05a179d --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_minute.go @@ -0,0 +1,39 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" + +import ( + "context" + "fmt" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type MinuteArguments[K any] struct { + Time ottl.TimeGetter[K] +} + +func NewMinuteFactory[K any]() ottl.Factory[K] { + return ottl.NewFactory("Minute", &MinuteArguments[K]{}, createMinuteFunction[K]) +} + +func createMinuteFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*MinuteArguments[K]) + + if !ok { + return nil, fmt.Errorf("MinuteFactory args must be of type *MinuteArguments[K]") + } + + return Minute(args.Time) +} + +func Minute[K any](time ottl.TimeGetter[K]) (ottl.ExprFunc[K], error) { + return func(ctx context.Context, tCtx K) (any, error) { + t, err := time.Get(ctx, tCtx) + if err != nil { + return nil, err + } + return int64(t.Minute()), nil + }, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_minutes.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_minutes.go index 5048befc572..557fa972ed4 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_minutes.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_minutes.go @@ -17,6 +17,7 @@ type MinutesArguments[K any] struct { func NewMinutesFactory[K any]() ottl.Factory[K] { return ottl.NewFactory("Minutes", &MinutesArguments[K]{}, createMinutesFunction[K]) } + func createMinutesFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { args, ok := oArgs.(*MinutesArguments[K]) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_month.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_month.go new file mode 100644 index 00000000000..a4ab013d111 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_month.go @@ -0,0 +1,39 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" + +import ( + "context" + "fmt" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type MonthArguments[K any] struct { + Time ottl.TimeGetter[K] +} + +func NewMonthFactory[K any]() ottl.Factory[K] { + return ottl.NewFactory("Month", &MonthArguments[K]{}, createMonthFunction[K]) +} + +func createMonthFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*MonthArguments[K]) + + if !ok { + return nil, fmt.Errorf("MonthFactory args must be of type *MonthArguments[K]") + } + + return Month(args.Time) +} + +func Month[K any](time ottl.TimeGetter[K]) (ottl.ExprFunc[K], error) { + return func(ctx context.Context, tCtx K) (any, error) { + t, err := time.Get(ctx, tCtx) + if err != nil { + return nil, err + } + return int64(t.Month()), nil + }, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_nanosecond.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_nanosecond.go new file mode 100644 index 00000000000..f35a72e2cfd --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_nanosecond.go @@ -0,0 +1,39 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" + +import ( + "context" + "fmt" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type NanosecondArguments[K any] struct { + Time ottl.TimeGetter[K] +} + +func NewNanosecondFactory[K any]() ottl.Factory[K] { + return ottl.NewFactory("Nanosecond", &NanosecondArguments[K]{}, createNanosecondFunction[K]) +} + +func createNanosecondFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*NanosecondArguments[K]) + + if !ok { + return nil, fmt.Errorf("NanosecondFactory args must be of type *NanosecondArguments[K]") + } + + return Nanosecond(args.Time) +} + +func Nanosecond[K any](time ottl.TimeGetter[K]) (ottl.ExprFunc[K], error) { + return func(ctx context.Context, tCtx K) (any, error) { + t, err := time.Get(ctx, tCtx) + if err != nil { + return nil, err + } + return int64(t.Nanosecond()), nil + }, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_nanoseconds.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_nanoseconds.go index 2bb2cd7d1dd..32769626750 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_nanoseconds.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_nanoseconds.go @@ -17,6 +17,7 @@ type NanosecondsArguments[K any] struct { func NewNanosecondsFactory[K any]() ottl.Factory[K] { return ottl.NewFactory("Nanoseconds", &NanosecondsArguments[K]{}, createNanosecondsFunction[K]) } + func createNanosecondsFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { args, ok := oArgs.(*NanosecondsArguments[K]) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_now.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_now.go index 51815d3f686..698a274a373 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_now.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_now.go @@ -11,7 +11,7 @@ import ( ) func now[K any]() (ottl.ExprFunc[K], error) { - return func(ctx context.Context, tCtx K) (any, error) { + return func(_ context.Context, _ K) (any, error) { return time.Now(), nil }, nil } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_parse_json.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_parse_json.go index 8cca75f7160..011437424c1 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_parse_json.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_parse_json.go @@ -7,7 +7,7 @@ import ( "context" "fmt" - jsoniter "github.com/json-iterator/go" + "github.com/goccy/go-json" "go.opentelemetry.io/collector/pdata/pcommon" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" @@ -31,7 +31,7 @@ func createParseJSONFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments return parseJSON(args.Target), nil } -// parseJSON returns a `pcommon.Map` struct that is a result of parsing the target string as JSON +// parseJSON returns a `pcommon.Map` or `pcommon.Slice` struct that is a result of parsing the target string as JSON // Each JSON type is converted into a `pdata.Value` using the following map: // // JSON boolean -> bool @@ -46,13 +46,22 @@ func parseJSON[K any](target ottl.StringGetter[K]) ottl.ExprFunc[K] { if err != nil { return nil, err } - var parsedValue map[string]any - err = jsoniter.UnmarshalFromString(targetVal, &parsedValue) + var parsedValue any + err = json.Unmarshal([]byte(targetVal), &parsedValue) if err != nil { return nil, err } - result := pcommon.NewMap() - err = result.FromRaw(parsedValue) - return result, err + switch v := parsedValue.(type) { + case []any: + result := pcommon.NewSlice() + err = result.FromRaw(v) + return result, err + case map[string]any: + result := pcommon.NewMap() + err = result.FromRaw(v) + return result, err + default: + return nil, fmt.Errorf("could not convert parsed value of type %T to JSON object", v) + } } } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_parse_simplified_xml.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_parse_simplified_xml.go new file mode 100644 index 00000000000..7e4f1e2753f --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_parse_simplified_xml.go @@ -0,0 +1,134 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" + +import ( + "context" + "fmt" + + "github.com/antchfx/xmlquery" + "go.opentelemetry.io/collector/pdata/pcommon" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type ParseSimplifiedXMLArguments[K any] struct { + Target ottl.StringGetter[K] +} + +func NewParseSimplifiedXMLFactory[K any]() ottl.Factory[K] { + return ottl.NewFactory("ParseSimplifiedXML", &ParseSimplifiedXMLArguments[K]{}, createParseSimplifiedXMLFunction[K]) +} + +func createParseSimplifiedXMLFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*ParseSimplifiedXMLArguments[K]) + + if !ok { + return nil, fmt.Errorf("ParseSimplifiedXML args must be of type *ParseSimplifiedXMLAguments[K]") + } + + return parseSimplifiedXML(args.Target), nil +} + +// The `ParseSimplifiedXML` Converter returns a `pcommon.Map` struct that is the result of parsing the target +// string without preservation of attributes or extraneous text content. +func parseSimplifiedXML[K any](target ottl.StringGetter[K]) ottl.ExprFunc[K] { + return func(ctx context.Context, tCtx K) (any, error) { + var doc *xmlquery.Node + if targetVal, err := target.Get(ctx, tCtx); err != nil { + return nil, err + } else if doc, err = parseNodesXML(targetVal); err != nil { + return nil, err + } + + docMap := pcommon.NewMap() + parseElement(doc, &docMap) + return docMap, nil + } +} + +func parseElement(parent *xmlquery.Node, parentMap *pcommon.Map) { + // Count the number of each element tag so we know whether it will be a member of a slice or not + childTags := make(map[string]int) + for child := parent.FirstChild; child != nil; child = child.NextSibling { + if child.Type != xmlquery.ElementNode { + continue + } + childTags[child.Data]++ + } + if len(childTags) == 0 { + return + } + + // Convert the children, now knowing whether they will be a member of a slice or not + for child := parent.FirstChild; child != nil; child = child.NextSibling { + if child.Type != xmlquery.ElementNode || child.FirstChild == nil { + continue + } + + leafValue := leafValueFromElement(child) + + // Slice of the same element + if childTags[child.Data] > 1 { + // Get or create the slice of children + var childrenSlice pcommon.Slice + childrenValue, ok := parentMap.Get(child.Data) + if ok { + childrenSlice = childrenValue.Slice() + } else { + childrenSlice = parentMap.PutEmptySlice(child.Data) + } + + // Add the child's text content to the slice + if leafValue != "" { + childrenSlice.AppendEmpty().SetStr(leafValue) + continue + } + + // Parse the child to make sure there's something to add + childMap := pcommon.NewMap() + parseElement(child, &childMap) + if childMap.Len() == 0 { + continue + } + + sliceValue := childrenSlice.AppendEmpty() + sliceMap := sliceValue.SetEmptyMap() + childMap.CopyTo(sliceMap) + continue + } + + if leafValue != "" { + parentMap.PutStr(child.Data, leafValue) + continue + } + + // Child will be a map + childMap := pcommon.NewMap() + parseElement(child, &childMap) + if childMap.Len() == 0 { + continue + } + + childMap.CopyTo(parentMap.PutEmptyMap(child.Data)) + } +} + +func leafValueFromElement(node *xmlquery.Node) string { + // First check if there are any child elements. If there are, ignore any extraneous text. + for child := node.FirstChild; child != nil; child = child.NextSibling { + if child.Type == xmlquery.ElementNode { + return "" + } + } + + // No child elements, so return the first text or CDATA content + for child := node.FirstChild; child != nil; child = child.NextSibling { + switch child.Type { + case xmlquery.TextNode, xmlquery.CharDataNode: + return child.Data + } + } + return "" +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_remove_xml.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_remove_xml.go new file mode 100644 index 00000000000..b45ee74fcd1 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_remove_xml.go @@ -0,0 +1,95 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" + +import ( + "context" + "fmt" + "strings" + + "github.com/antchfx/xmlquery" + "github.com/antchfx/xpath" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type RemoveXMLArguments[K any] struct { + Target ottl.StringGetter[K] + XPath string +} + +func NewRemoveXMLFactory[K any]() ottl.Factory[K] { + return ottl.NewFactory("RemoveXML", &RemoveXMLArguments[K]{}, createRemoveXMLFunction[K]) +} + +func createRemoveXMLFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*RemoveXMLArguments[K]) + + if !ok { + return nil, fmt.Errorf("RemoveXML args must be of type *RemoveXMLAguments[K]") + } + + if err := validateXPath(args.XPath); err != nil { + return nil, err + } + + return removeXML(args.Target, args.XPath), nil +} + +// removeXML returns a XML formatted string that is a result of removing all matching nodes from the target XML. +// This currently supports removal of elements, attributes, text values, comments, and CharData. +func removeXML[K any](target ottl.StringGetter[K], xPath string) ottl.ExprFunc[K] { + return func(ctx context.Context, tCtx K) (any, error) { + var doc *xmlquery.Node + if targetVal, err := target.Get(ctx, tCtx); err != nil { + return nil, err + } else if doc, err = parseNodesXML(targetVal); err != nil { + return nil, err + } + + nodes, err := xmlquery.QueryAll(doc, xPath) + if err != nil { + return nil, err + } + + for _, n := range nodes { + switch n.Type { + case xmlquery.ElementNode: + xmlquery.RemoveFromTree(n) + case xmlquery.AttributeNode: + n.Parent.RemoveAttr(n.Data) + case xmlquery.TextNode: + n.Data = "" + case xmlquery.CommentNode: + xmlquery.RemoveFromTree(n) + case xmlquery.CharDataNode: + xmlquery.RemoveFromTree(n) + } + } + return doc.OutputXML(false), nil + } +} + +func validateXPath(xPath string) error { + _, err := xpath.Compile(xPath) + if err != nil { + return fmt.Errorf("invalid xpath: %w", err) + } + return nil +} + +// Aside from parsing the XML document, this function also ensures that +// the XML declaration is included in the result only if it was present in +// the original document. +func parseNodesXML(targetVal string) (*xmlquery.Node, error) { + preserveDeclearation := strings.HasPrefix(targetVal, " limit { value.SetStr(stringVal[:limit]) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_truncate_time.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_truncate_time.go index 3d122dfb874..16705353813 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_truncate_time.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_truncate_time.go @@ -18,6 +18,7 @@ type TruncateTimeArguments[K any] struct { func NewTruncateTimeFactory[K any]() ottl.Factory[K] { return ottl.NewFactory("TruncateTime", &TruncateTimeArguments[K]{}, createTruncateTimeFunction[K]) } + func createTruncateTimeFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { args, ok := oArgs.(*TruncateTimeArguments[K]) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_unix.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_unix.go new file mode 100644 index 00000000000..47c5065ff0e --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_unix.go @@ -0,0 +1,51 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" + +import ( + "context" + "fmt" + "time" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type UnixArguments[K any] struct { + Seconds ottl.IntGetter[K] + Nanoseconds ottl.Optional[ottl.IntGetter[K]] +} + +func NewUnixFactory[K any]() ottl.Factory[K] { + return ottl.NewFactory("Unix", &UnixArguments[K]{}, createUnixFunction[K]) +} + +func createUnixFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*UnixArguments[K]) + + if !ok { + return nil, fmt.Errorf("UnixFactory args must be of type *UnixArguments[K]") + } + + return Unix(args.Seconds, args.Nanoseconds) +} + +func Unix[K any](seconds ottl.IntGetter[K], nanoseconds ottl.Optional[ottl.IntGetter[K]]) (ottl.ExprFunc[K], error) { + return func(ctx context.Context, tCtx K) (any, error) { + sec, err := seconds.Get(ctx, tCtx) + if err != nil { + return nil, err + } + + var nsec int64 + + if !nanoseconds.IsEmpty() { + nsec, err = nanoseconds.Get().Get(ctx, tCtx) + if err != nil { + return nil, err + } + } + + return time.Unix(sec, nsec), nil + }, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_unix_micro.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_unix_micro.go index d55f717c750..939941b12fe 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_unix_micro.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_unix_micro.go @@ -17,6 +17,7 @@ type UnixMicroArguments[K any] struct { func NewUnixMicroFactory[K any]() ottl.Factory[K] { return ottl.NewFactory("UnixMicro", &UnixMicroArguments[K]{}, createUnixMicroFunction[K]) } + func createUnixMicroFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { args, ok := oArgs.(*UnixMicroArguments[K]) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_unix_milli.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_unix_milli.go index 8f5e587c73b..7aabfedb47a 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_unix_milli.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_unix_milli.go @@ -17,6 +17,7 @@ type UnixMilliArguments[K any] struct { func NewUnixMilliFactory[K any]() ottl.Factory[K] { return ottl.NewFactory("UnixMilli", &UnixMilliArguments[K]{}, createUnixMilliFunction[K]) } + func createUnixMilliFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { args, ok := oArgs.(*UnixMilliArguments[K]) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_unix_nano.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_unix_nano.go index 4c027d855fc..5ba82f5d607 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_unix_nano.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_unix_nano.go @@ -17,6 +17,7 @@ type UnixNanoArguments[K any] struct { func NewUnixNanoFactory[K any]() ottl.Factory[K] { return ottl.NewFactory("UnixNano", &UnixNanoArguments[K]{}, createUnixNanoFunction[K]) } + func createUnixNanoFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { args, ok := oArgs.(*UnixNanoArguments[K]) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_unix_seconds.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_unix_seconds.go index 612a87f8c50..55b7f0f130e 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_unix_seconds.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_unix_seconds.go @@ -17,6 +17,7 @@ type UnixSecondsArguments[K any] struct { func NewUnixSecondsFactory[K any]() ottl.Factory[K] { return ottl.NewFactory("UnixSeconds", &UnixSecondsArguments[K]{}, createUnixSecondsFunction[K]) } + func createUnixSecondsFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { args, ok := oArgs.(*UnixSecondsArguments[K]) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_url.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_url.go new file mode 100644 index 00000000000..371576bb024 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_url.go @@ -0,0 +1,44 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" + +import ( + "context" + "fmt" + + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/parseutils" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type URLArguments[K any] struct { + URI ottl.StringGetter[K] +} + +func NewURLFactory[K any]() ottl.Factory[K] { + return ottl.NewFactory("URL", &URLArguments[K]{}, createURIFunction[K]) +} + +func createURIFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*URLArguments[K]) + if !ok { + return nil, fmt.Errorf("URLFactory args must be of type *URLArguments[K]") + } + + return url(args.URI), nil //revive:disable-line:var-naming +} + +func url[K any](uriSource ottl.StringGetter[K]) ottl.ExprFunc[K] { //revive:disable-line:var-naming + return func(ctx context.Context, tCtx K) (any, error) { + urlString, err := uriSource.Get(ctx, tCtx) + if err != nil { + return nil, err + } + + if urlString == "" { + return nil, fmt.Errorf("url cannot be empty") + } + + return parseutils.ParseURI(urlString, true) + } +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_useragent.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_useragent.go new file mode 100644 index 00000000000..0ba1d1d9f34 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_useragent.go @@ -0,0 +1,47 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" +import ( + "context" + "fmt" + + "github.com/ua-parser/uap-go/uaparser" + semconv "go.opentelemetry.io/collector/semconv/v1.25.0" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type UserAgentArguments[K any] struct { + UserAgent ottl.StringGetter[K] +} + +func NewUserAgentFactory[K any]() ottl.Factory[K] { + return ottl.NewFactory("UserAgent", &UserAgentArguments[K]{}, createUserAgentFunction[K]) +} + +func createUserAgentFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*UserAgentArguments[K]) + if !ok { + return nil, fmt.Errorf("URLFactory args must be of type *URLArguments[K]") + } + + return userAgent[K](args.UserAgent), nil +} + +func userAgent[K any](userAgentSource ottl.StringGetter[K]) ottl.ExprFunc[K] { //revive:disable-line:var-naming + parser := uaparser.NewFromSaved() + + return func(ctx context.Context, tCtx K) (any, error) { + userAgentString, err := userAgentSource.Get(ctx, tCtx) + if err != nil { + return nil, err + } + parsedUserAgent := parser.ParseUserAgent(userAgentString) + return map[string]any{ + semconv.AttributeUserAgentName: parsedUserAgent.Family, + semconv.AttributeUserAgentOriginal: userAgentString, + semconv.AttributeUserAgentVersion: parsedUserAgent.ToVersionString(), + }, nil + } +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_uuid.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_uuid.go index 9c03835f503..fec985989d2 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_uuid.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_uuid.go @@ -12,7 +12,7 @@ import ( ) func uuid[K any]() (ottl.ExprFunc[K], error) { - return func(ctx context.Context, tCtx K) (any, error) { + return func(_ context.Context, _ K) (any, error) { u := guuid.New() return u.String(), nil }, nil diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_year.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_year.go new file mode 100644 index 00000000000..b64b35bcd8d --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_year.go @@ -0,0 +1,39 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" + +import ( + "context" + "fmt" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type YearArguments[K any] struct { + Time ottl.TimeGetter[K] +} + +func NewYearFactory[K any]() ottl.Factory[K] { + return ottl.NewFactory("Year", &YearArguments[K]{}, createYearFunction[K]) +} + +func createYearFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*YearArguments[K]) + + if !ok { + return nil, fmt.Errorf("YearFactory args must be of type *YearArguments[K]") + } + + return Year(args.Time) +} + +func Year[K any](time ottl.TimeGetter[K]) (ottl.ExprFunc[K], error) { + return func(ctx context.Context, tCtx K) (any, error) { + t, err := time.Get(ctx, tCtx) + if err != nil { + return nil, err + } + return int64(t.Year()), nil + }, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/functions.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/functions.go index 9bb33ff3230..4aa7ffee9e3 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/functions.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/functions.go @@ -12,6 +12,7 @@ func StandardFuncs[K any]() map[string]ottl.Factory[K] { // Editors NewDeleteKeyFactory[K](), NewDeleteMatchingKeysFactory[K](), + NewKeepMatchingKeysFactory[K](), NewFlattenFactory[K](), NewKeepKeysFactory[K](), NewLimitFactory[K](), @@ -36,45 +37,74 @@ func converters[K any]() []ottl.Factory[K] { return []ottl.Factory[K]{ // Converters NewBase64DecodeFactory[K](), + NewDecodeFactory[K](), NewConcatFactory[K](), NewConvertCaseFactory[K](), + NewConvertAttributesToElementsXMLFactory[K](), + NewConvertTextToElementsXMLFactory[K](), + NewDayFactory[K](), NewDoubleFactory[K](), NewDurationFactory[K](), NewExtractPatternsFactory[K](), + NewExtractGrokPatternsFactory[K](), NewFnvFactory[K](), + NewGetXMLFactory[K](), NewHourFactory[K](), NewHoursFactory[K](), + NewInsertXMLFactory[K](), NewIntFactory[K](), NewIsBoolFactory[K](), NewIsDoubleFactory[K](), + NewIsListFactory[K](), NewIsIntFactory[K](), NewIsMapFactory[K](), NewIsMatchFactory[K](), NewIsStringFactory[K](), NewLenFactory[K](), NewLogFactory[K](), + NewMD5Factory[K](), NewMicrosecondsFactory[K](), NewMillisecondsFactory[K](), + NewMinuteFactory[K](), NewMinutesFactory[K](), + NewMonthFactory[K](), + NewNanosecondFactory[K](), NewNanosecondsFactory[K](), NewNowFactory[K](), NewParseCSVFactory[K](), NewParseJSONFactory[K](), NewParseKeyValueFactory[K](), + NewParseSimplifiedXMLFactory[K](), NewParseXMLFactory[K](), + NewRemoveXMLFactory[K](), + NewSecondFactory[K](), NewSecondsFactory[K](), NewSHA1Factory[K](), NewSHA256Factory[K](), + NewSHA512Factory[K](), + NewSortFactory[K](), NewSpanIDFactory[K](), NewSplitFactory[K](), + NewFormatFactory[K](), + NewStringFactory[K](), NewSubstringFactory[K](), NewTimeFactory[K](), + NewFormatTimeFactory[K](), + NewTrimFactory[K](), + NewToKeyValueStringFactory[K](), NewTruncateTimeFactory[K](), NewTraceIDFactory[K](), + NewUnixFactory[K](), NewUnixMicroFactory[K](), NewUnixMilliFactory[K](), NewUnixNanoFactory[K](), NewUnixSecondsFactory[K](), NewUUIDFactory[K](), + NewURLFactory[K](), + NewUserAgentFactory[K](), + NewAppendFactory[K](), + NewYearFactory[K](), + NewHexFactory[K](), + NewSliceToMapFactory[K](), } } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/parser.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/parser.go index f5b0bf632be..ad42f9e0b32 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/parser.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/parser.go @@ -7,6 +7,8 @@ import ( "context" "errors" "fmt" + "sort" + "strings" "github.com/alecthomas/participle/v2" "go.opentelemetry.io/collector/component" @@ -16,9 +18,10 @@ import ( // Statement holds a top level Statement for processing telemetry data. A Statement is a combination of a function // invocation and the boolean expression to match telemetry for invoking the function. type Statement[K any] struct { - function Expr[K] - condition BoolExpr[K] - origText string + function Expr[K] + condition BoolExpr[K] + origText string + telemetrySettings component.TelemetrySettings } // Execute is a function that will execute the statement's function if the statement's condition is met. @@ -27,6 +30,11 @@ type Statement[K any] struct { // In addition, the functions return value is always returned. func (s *Statement[K]) Execute(ctx context.Context, tCtx K) (any, bool, error) { condition, err := s.condition.Eval(ctx, tCtx) + defer func() { + if s.telemetrySettings.Logger != nil { + s.telemetrySettings.Logger.Debug("TransformContext after statement execution", zap.String("statement", s.origText), zap.Bool("condition matched", condition), zap.Any("TransformContext", tCtx)) + } + }() if err != nil { return nil, false, err } @@ -58,6 +66,7 @@ type Parser[K any] struct { pathParser PathExpressionParser[K] enumParser EnumParser telemetrySettings component.TelemetrySettings + pathContextNames map[string]struct{} } func NewParser[K any]( @@ -91,6 +100,22 @@ func WithEnumParser[K any](parser EnumParser) Option[K] { } } +// WithPathContextNames sets the context names to be considered when parsing a Path value. +// When this option is empty or nil, all Path segments are considered fields, and the +// Path.Context value is always empty. +// When this option is configured, and the path's context is empty or is not present in +// this context names list, it results into an error. +func WithPathContextNames[K any](contexts []string) Option[K] { + return func(p *Parser[K]) { + pathContextNames := make(map[string]struct{}, len(contexts)) + for _, ctx := range contexts { + pathContextNames[ctx] = struct{}{} + } + + p.pathContextNames = pathContextNames + } +} + // ParseStatements parses string statements into ottl.Statement objects ready for execution. // Returns a slice of statements and a nil error on successful parsing. // If parsing fails, returns nil and a joined error containing each error per failed statement. @@ -131,9 +156,10 @@ func (p *Parser[K]) ParseStatement(statement string) (*Statement[K], error) { return nil, err } return &Statement[K]{ - function: function, - condition: expression, - origText: statement, + function: function, + condition: expression, + origText: statement, + telemetrySettings: p.telemetrySettings, }, nil } @@ -178,12 +204,41 @@ func (p *Parser[K]) ParseCondition(condition string) (*Condition[K], error) { }, nil } -var parser = newParser[parsedStatement]() -var conditionParser = newParser[booleanExpression]() +// prependContextToStatementPaths changes the given OTTL statement adding the context name prefix +// to all context-less paths. No modifications are performed for paths which [Path.Context] +// value matches any WithPathContextNames value. +// The context argument must be valid WithPathContextNames value, otherwise an error is returned. +func (p *Parser[K]) prependContextToStatementPaths(context string, statement string) (string, error) { + if _, ok := p.pathContextNames[context]; !ok { + return statement, fmt.Errorf(`unknown context "%s" for parser %T, valid options are: %s`, context, p, p.buildPathContextNamesText("")) + } + parsed, err := parseStatement(statement) + if err != nil { + return "", err + } + paths := getParsedStatementPaths(parsed) + if len(paths) == 0 { + return statement, nil + } + + var missingContextOffsets []int + for _, it := range paths { + if _, ok := p.pathContextNames[it.Context]; !ok { + missingContextOffsets = append(missingContextOffsets, it.Pos.Offset) + } + } + + return insertContextIntoStatementOffsets(context, statement, missingContextOffsets) +} + +var ( + parser = newParser[parsedStatement]() + conditionParser = newParser[booleanExpression]() + valueExpressionParser = newParser[value]() +) func parseStatement(raw string) (*parsedStatement, error) { parsed, err := parser.ParseString("", raw) - if err != nil { return nil, fmt.Errorf("statement has invalid syntax: %w", err) } @@ -197,7 +252,6 @@ func parseStatement(raw string) (*parsedStatement, error) { func parseCondition(raw string) (*booleanExpression, error) { parsed, err := conditionParser.ParseString("", raw) - if err != nil { return nil, fmt.Errorf("condition has invalid syntax: %w", err) } @@ -209,6 +263,43 @@ func parseCondition(raw string) (*booleanExpression, error) { return parsed, nil } +func parseValueExpression(raw string) (*value, error) { + parsed, err := valueExpressionParser.ParseString("", raw) + if err != nil { + return nil, fmt.Errorf("expression has invalid syntax: %w", err) + } + err = parsed.checkForCustomError() + if err != nil { + return nil, err + } + + return parsed, nil +} + +func insertContextIntoStatementOffsets(context string, statement string, offsets []int) (string, error) { + if len(offsets) == 0 { + return statement, nil + } + + contextPrefix := context + "." + var sb strings.Builder + sb.Grow(len(statement) + (len(contextPrefix) * len(offsets))) + + sort.Ints(offsets) + left := 0 + for _, offset := range offsets { + if offset < 0 || offset > len(statement) { + return statement, fmt.Errorf(`failed to insert context "%s" into statement "%s": offset %d is out of range`, context, statement, offset) + } + sb.WriteString(statement[left:offset]) + sb.WriteString(contextPrefix) + left = offset + } + sb.WriteString(statement[left:]) + + return sb.String(), nil +} + // newParser returns a parser that can be used to read a string into a parsedStatement. An error will be returned if the string // is not formatted for the DSL. func newParser[G any]() *participle.Parser[G] { @@ -262,6 +353,7 @@ func NewStatementSequence[K any](statements []*Statement[K], telemetrySettings c // When the ErrorMode of the StatementSequence is `ignore`, errors are logged and execution continues to the next statement. // When the ErrorMode of the StatementSequence is `silent`, errors are not logged and execution continues to the next statement. func (s *StatementSequence[K]) Execute(ctx context.Context, tCtx K) error { + s.telemetrySettings.Logger.Debug("initial TransformContext before executing StatementSequence", zap.Any("TransformContext", tCtx)) for _, statement := range s.statements { _, _, err := statement.Execute(ctx, tCtx) if err != nil { @@ -333,6 +425,7 @@ func (c *ConditionSequence[K]) Eval(ctx context.Context, tCtx K) (bool, error) { var atLeastOneMatch bool for _, condition := range c.conditions { match, err := condition.Eval(ctx, tCtx) + c.telemetrySettings.Logger.Debug("condition evaluation result", zap.String("condition", condition.origText), zap.Bool("match", match), zap.Any("TransformContext", tCtx)) if err != nil { if c.errorMode == PropagateError { err = fmt.Errorf("failed to eval condition: %v, %w", condition.origText, err) @@ -360,3 +453,33 @@ func (c *ConditionSequence[K]) Eval(ctx context.Context, tCtx K) (bool, error) { // It is not possible to get here if any condition during an AND explicitly failed. return c.logicOp == And && atLeastOneMatch, nil } + +// ValueExpression represents an expression that resolves to a value. The returned value can be of any type, +// and the expression can be either a literal value, a path value within the context, or the result of a converter and/or +// a mathematical expression. +// This allows other components using this library to extract data from the context of the incoming signal using OTTL. +type ValueExpression[K any] struct { + getter Getter[K] +} + +// Eval evaluates the given expression and returns the value the expression resolves to. +func (e *ValueExpression[K]) Eval(ctx context.Context, tCtx K) (any, error) { + return e.getter.Get(ctx, tCtx) +} + +// ParseValueExpression parses an expression string into a ValueExpression. The ValueExpression's Eval +// method can then be used to extract the value from the context of the incoming signal. +func (p *Parser[K]) ParseValueExpression(raw string) (*ValueExpression[K], error) { + parsed, err := parseValueExpression(raw) + if err != nil { + return nil, err + } + getter, err := p.newGetter(*parsed) + if err != nil { + return nil, err + } + + return &ValueExpression[K]{ + getter: getter, + }, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/parser_collection.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/parser_collection.go new file mode 100644 index 00000000000..c257e8eb198 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/parser_collection.go @@ -0,0 +1,374 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottl // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" + +import ( + "fmt" + "reflect" + + "go.opentelemetry.io/collector/component" + "go.uber.org/zap" +) + +// Safeguard to statically ensure the Parser.ParseStatements method can be reflectively +// invoked by the ottlParserWrapper.parseStatements +var _ interface { + ParseStatements(statements []string) ([]*Statement[any], error) +} = (*Parser[any])(nil) + +// Safeguard to statically ensure any ParsedStatementConverter method can be reflectively +// invoked by the statementsConverterWrapper.call +var _ ParsedStatementConverter[any, any] = func( + _ *ParserCollection[any], + _ *Parser[any], + _ string, + _ StatementsGetter, + _ []*Statement[any], +) (any, error) { + return nil, nil +} + +// StatementsGetter represents a set of statements to be parsed. +// +// Experimental: *NOTE* this API is subject to change or removal in the future. +type StatementsGetter interface { + // GetStatements retrieves the OTTL statements to be parsed + GetStatements() []string +} + +type defaultStatementsGetter []string + +func (d defaultStatementsGetter) GetStatements() []string { + return d +} + +// NewStatementsGetter creates a new StatementsGetter. +// +// Experimental: *NOTE* this API is subject to change or removal in the future. +func NewStatementsGetter(statements []string) StatementsGetter { + return defaultStatementsGetter(statements) +} + +// ottlParserWrapper wraps an ottl.Parser using reflection, so it can invoke exported +// methods without knowing its generic type (transform context). +type ottlParserWrapper struct { + parser reflect.Value + prependContextToStatementPaths func(context string, statement string) (string, error) +} + +func newParserWrapper[K any](parser *Parser[K]) *ottlParserWrapper { + return &ottlParserWrapper{ + parser: reflect.ValueOf(parser), + prependContextToStatementPaths: parser.prependContextToStatementPaths, + } +} + +func (g *ottlParserWrapper) parseStatements(statements []string) (reflect.Value, error) { + method := g.parser.MethodByName("ParseStatements") + parseStatementsRes := method.Call([]reflect.Value{reflect.ValueOf(statements)}) + err := parseStatementsRes[1] + if !err.IsNil() { + return reflect.Value{}, err.Interface().(error) + } + return parseStatementsRes[0], nil +} + +func (g *ottlParserWrapper) prependContextToStatementsPaths(context string, statements []string) ([]string, error) { + result := make([]string, 0, len(statements)) + for _, s := range statements { + prependedStatement, err := g.prependContextToStatementPaths(context, s) + if err != nil { + return nil, err + } + result = append(result, prependedStatement) + } + return result, nil +} + +// statementsConverterWrapper is a reflection-based wrapper to the ParsedStatementConverter function, +// which does not require knowing all generic parameters to be called. +type statementsConverterWrapper reflect.Value + +func newStatementsConverterWrapper[K any, R any](converter ParsedStatementConverter[K, R]) statementsConverterWrapper { + return statementsConverterWrapper(reflect.ValueOf(converter)) +} + +func (s statementsConverterWrapper) call( + parserCollection reflect.Value, + ottlParser *ottlParserWrapper, + context string, + statements StatementsGetter, + parsedStatements reflect.Value, +) (reflect.Value, error) { + result := reflect.Value(s).Call([]reflect.Value{ + parserCollection, + ottlParser.parser, + reflect.ValueOf(context), + reflect.ValueOf(statements), + parsedStatements, + }) + + resultValue := result[0] + resultError := result[1] + if !resultError.IsNil() { + return reflect.Value{}, resultError.Interface().(error) + } + + return resultValue, nil +} + +// parserCollectionParser holds an ottlParserWrapper and its respectively +// statementsConverter function. +type parserCollectionParser struct { + ottlParser *ottlParserWrapper + statementsConverter statementsConverterWrapper +} + +// ParserCollection is a configurable set of ottl.Parser that can handle multiple OTTL contexts +// parsings, inferring the context, choosing the right parser for the given statements, and +// transforming the parsed ottl.Statement[K] slice into a common result of type R. +// +// Experimental: *NOTE* this API is subject to change or removal in the future. +type ParserCollection[R any] struct { + contextParsers map[string]*parserCollectionParser + contextInferrer contextInferrer + contextInferrerCandidates map[string]*priorityContextInferrerCandidate + candidatesLowerContexts map[string][]string + modifiedStatementLogging bool + Settings component.TelemetrySettings + ErrorMode ErrorMode +} + +// ParserCollectionOption is a configurable ParserCollection option. +// +// Experimental: *NOTE* this API is subject to change or removal in the future. +type ParserCollectionOption[R any] func(*ParserCollection[R]) error + +// NewParserCollection creates a new ParserCollection. +// +// Experimental: *NOTE* this API is subject to change or removal in the future. +func NewParserCollection[R any]( + settings component.TelemetrySettings, + options ...ParserCollectionOption[R], +) (*ParserCollection[R], error) { + contextInferrerCandidates := map[string]*priorityContextInferrerCandidate{} + pc := &ParserCollection[R]{ + Settings: settings, + contextParsers: map[string]*parserCollectionParser{}, + contextInferrer: newPriorityContextInferrer(contextInferrerCandidates), + contextInferrerCandidates: contextInferrerCandidates, + candidatesLowerContexts: map[string][]string{}, + } + + for _, op := range options { + err := op(pc) + if err != nil { + return nil, err + } + } + + return pc, nil +} + +// ParsedStatementConverter is a function that converts the parsed ottl.Statement[K] into +// a common representation to all parser collection contexts passed through WithParserCollectionContext. +// Given each parser has its own transform context type, they must agree on a common type [R] +// so it can be returned by the ParserCollection.ParseStatements and ParserCollection.ParseStatementsWithContext +// functions. +// +// Experimental: *NOTE* this API is subject to change or removal in the future. +type ParsedStatementConverter[K any, R any] func( + collection *ParserCollection[R], + parser *Parser[K], + context string, + statements StatementsGetter, + parsedStatements []*Statement[K], +) (R, error) + +func newNopParsedStatementConverter[K any]() ParsedStatementConverter[K, any] { + return func( + _ *ParserCollection[any], + _ *Parser[K], + _ string, + _ StatementsGetter, + parsedStatements []*Statement[K], + ) (any, error) { + return parsedStatements, nil + } +} + +// WithParserCollectionContext configures an ottl.Parser for the given context. +// The provided ottl.Parser must be configured to support the provided context using +// the ottl.WithPathContextNames option. +// +// Experimental: *NOTE* this API is subject to change or removal in the future. +func WithParserCollectionContext[K any, R any]( + context string, + parser *Parser[K], + converter ParsedStatementConverter[K, R], +) ParserCollectionOption[R] { + return func(mp *ParserCollection[R]) error { + if _, ok := parser.pathContextNames[context]; !ok { + return fmt.Errorf(`context "%s" must be a valid "%T" path context name`, context, parser) + } + mp.contextParsers[context] = &parserCollectionParser{ + ottlParser: newParserWrapper[K](parser), + statementsConverter: newStatementsConverterWrapper(converter), + } + + for lowerContext := range parser.pathContextNames { + if lowerContext != context { + mp.candidatesLowerContexts[lowerContext] = append(mp.candidatesLowerContexts[lowerContext], context) + } + } + + mp.contextInferrerCandidates[context] = &priorityContextInferrerCandidate{ + hasEnumSymbol: func(enum *EnumSymbol) bool { + _, err := parser.enumParser(enum) + return err == nil + }, + hasFunctionName: func(name string) bool { + _, ok := parser.functions[name] + return ok + }, + getLowerContexts: mp.getLowerContexts, + } + return nil + } +} + +func (pc *ParserCollection[R]) getLowerContexts(context string) []string { + return pc.candidatesLowerContexts[context] +} + +// WithParserCollectionErrorMode has no effect on the ParserCollection, but might be used +// by the ParsedStatementConverter functions to handle/create StatementSequence. +// +// Experimental: *NOTE* this API is subject to change or removal in the future. +func WithParserCollectionErrorMode[R any](errorMode ErrorMode) ParserCollectionOption[R] { + return func(tp *ParserCollection[R]) error { + tp.ErrorMode = errorMode + return nil + } +} + +// EnableParserCollectionModifiedStatementLogging controls the statements modification logs. +// When enabled, it logs any statements modifications performed by the parsing operations, +// instructing users to rewrite the statements accordingly. +// +// Experimental: *NOTE* this API is subject to change or removal in the future. +func EnableParserCollectionModifiedStatementLogging[R any](enabled bool) ParserCollectionOption[R] { + return func(tp *ParserCollection[R]) error { + tp.modifiedStatementLogging = enabled + return nil + } +} + +// ParseStatements parses the given statements into [R] using the configured context's ottl.Parser +// and subsequently calling the ParsedStatementConverter function. +// The statement's context is automatically inferred from the [Path.Context] values, choosing the +// highest priority context found. +// If no contexts are present in the statements, or if the inferred value is not supported by +// the [ParserCollection], it returns an error. +// If parsing the statements fails, it returns the underlying [ottl.Parser.ParseStatements] error. +// +// Experimental: *NOTE* this API is subject to change or removal in the future. +func (pc *ParserCollection[R]) ParseStatements(statements StatementsGetter) (R, error) { + statementsValues := statements.GetStatements() + inferredContext, err := pc.contextInferrer.infer(statementsValues) + if err != nil { + return *new(R), err + } + + if inferredContext == "" { + return *new(R), fmt.Errorf("unable to infer context from statements %+q, path's first segment must be a valid context name: %+q", statementsValues, pc.supportedContextNames()) + } + + _, ok := pc.contextParsers[inferredContext] + if !ok { + return *new(R), fmt.Errorf(`context "%s" inferred from the statements %+q is not a supported context: %+q`, inferredContext, statementsValues, pc.supportedContextNames()) + } + + return pc.ParseStatementsWithContext(inferredContext, statements, false) +} + +// ParseStatementsWithContext parses the given statements into [R] using the configured +// context's ottl.Parser and subsequently calling the ParsedStatementConverter function. +// Unlike ParseStatements, it uses the provided context and does not infer it +// automatically. The context value must be supported by the [ParserCollection], +// otherwise an error is returned. +// If the statement's Path does not provide their Path.Context value, the prependPathsContext +// argument should be set to true, so it rewrites the statements prepending the missing paths +// contexts. +// If parsing the statements fails, it returns the underlying [ottl.Parser.ParseStatements] error. +// +// Experimental: *NOTE* this API is subject to change or removal in the future. +func (pc *ParserCollection[R]) ParseStatementsWithContext(context string, statements StatementsGetter, prependPathsContext bool) (R, error) { + contextParser, ok := pc.contextParsers[context] + if !ok { + return *new(R), fmt.Errorf(`unknown context "%s" for stataments: %v`, context, statements.GetStatements()) + } + + var err error + var parsingStatements []string + if prependPathsContext { + originalStatements := statements.GetStatements() + parsingStatements, err = contextParser.ottlParser.prependContextToStatementsPaths(context, originalStatements) + if err != nil { + return *new(R), err + } + if pc.modifiedStatementLogging { + pc.logModifiedStatements(originalStatements, parsingStatements) + } + } else { + parsingStatements = statements.GetStatements() + } + + parsedStatements, err := contextParser.ottlParser.parseStatements(parsingStatements) + if err != nil { + return *new(R), err + } + + convertedStatements, err := contextParser.statementsConverter.call( + reflect.ValueOf(pc), + contextParser.ottlParser, + context, + statements, + parsedStatements, + ) + if err != nil { + return *new(R), err + } + + if convertedStatements.IsNil() { + return *new(R), nil + } + + return convertedStatements.Interface().(R), nil +} + +func (pc *ParserCollection[R]) logModifiedStatements(originalStatements, modifiedStatements []string) { + var fields []zap.Field + for i, original := range originalStatements { + if modifiedStatements[i] != original { + statementKey := fmt.Sprintf("[%v]", i) + fields = append(fields, zap.Dict( + statementKey, + zap.String("original", original), + zap.String("modified", modifiedStatements[i])), + ) + } + } + if len(fields) > 0 { + pc.Settings.Logger.Info("one or more statements were modified to include their paths context, please rewrite them accordingly", zap.Dict("statements", fields...)) + } +} + +func (pc *ParserCollection[R]) supportedContextNames() []string { + contextsNames := make([]string, 0, len(pc.contextParsers)) + for k := range pc.contextParsers { + contextsNames = append(contextsNames, k) + } + return contextsNames +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/paths.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/paths.go new file mode 100644 index 00000000000..8f6c2c15a72 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/paths.go @@ -0,0 +1,33 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottl // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" + +// grammarPathVisitor is used to extract all path from a parsedStatement or booleanExpression +type grammarPathVisitor struct { + paths []path +} + +func (v *grammarPathVisitor) visitEditor(_ *editor) {} +func (v *grammarPathVisitor) visitConverter(_ *converter) {} +func (v *grammarPathVisitor) visitValue(_ *value) {} +func (v *grammarPathVisitor) visitMathExprLiteral(_ *mathExprLiteral) {} + +func (v *grammarPathVisitor) visitPath(value *path) { + v.paths = append(v.paths, *value) +} + +func getParsedStatementPaths(ps *parsedStatement) []path { + visitor := &grammarPathVisitor{} + ps.Editor.accept(visitor) + if ps.WhereClause != nil { + ps.WhereClause.accept(visitor) + } + return visitor.paths +} + +func getBooleanExpressionPaths(be *booleanExpression) []path { + visitor := &grammarPathVisitor{} + be.accept(visitor) + return visitor.paths +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/hash.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/hash.go index 6826de769b8..172789c607b 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/hash.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/hash.go @@ -31,6 +31,31 @@ var ( emptyHash = [16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} ) +// HashOption is a function that sets an option on the hash calculation. +type HashOption func(*hashWriter) + +// WithMap adds a map to the hash calculation. +func WithMap(m pcommon.Map) HashOption { + return func(hw *hashWriter) { + hw.writeMapHash(m) + } +} + +// WithValue adds a value to the hash calculation. +func WithValue(v pcommon.Value) HashOption { + return func(hw *hashWriter) { + hw.writeValueHash(v) + } +} + +// WithString adds a string to the hash calculation. +func WithString(s string) HashOption { + return func(hw *hashWriter) { + hw.byteBuf = append(hw.byteBuf, valStrPrefix...) + hw.byteBuf = append(hw.byteBuf, s...) + } +} + type hashWriter struct { byteBuf []byte keysBuf []string @@ -47,6 +72,29 @@ var hashWriterPool = &sync.Pool{ New: func() any { return newHashWriter() }, } +// Hash generates a hash for the provided options and returns the computed hash as a [16]byte. +func Hash(opts ...HashOption) [16]byte { + if len(opts) == 0 { + return emptyHash + } + + hw := hashWriterPool.Get().(*hashWriter) + defer hashWriterPool.Put(hw) + hw.byteBuf = hw.byteBuf[:0] + + for _, o := range opts { + o(hw) + } + + return hw.hashSum128() +} + +// Hash64 generates a hash for the provided options and returns the computed hash as a uint64. +func Hash64(opts ...HashOption) uint64 { + hash := Hash(opts...) + return xxhash.Sum64(hash[:]) +} + // MapHash return a hash for the provided map. // Maps with the same underlying key/value pairs in different order produce the same deterministic hash value. func MapHash(m pcommon.Map) [16]byte { @@ -105,8 +153,7 @@ func (hw *hashWriter) writeMapHash(m pcommon.Map) { func (hw *hashWriter) writeValueHash(v pcommon.Value) { switch v.Type() { case pcommon.ValueTypeStr: - hw.byteBuf = append(hw.byteBuf, valStrPrefix...) - hw.byteBuf = append(hw.byteBuf, v.Str()...) + hw.writeString(v.Str()) case pcommon.ValueTypeBool: if v.Bool() { hw.byteBuf = append(hw.byteBuf, valBoolTrue...) @@ -138,6 +185,11 @@ func (hw *hashWriter) writeValueHash(v pcommon.Value) { } } +func (hw *hashWriter) writeString(s string) { + hw.byteBuf = append(hw.byteBuf, valStrPrefix...) + hw.byteBuf = append(hw.byteBuf, s...) +} + // hashSum128 returns a [16]byte hash sum. func (hw *hashWriter) hashSum128() [16]byte { r := [16]byte{} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure/resourcelogs_to_logs.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure/resourcelogs_to_logs.go index b2c96d28f56..5b608570612 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure/resourcelogs_to_logs.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure/resourcelogs_to_logs.go @@ -8,6 +8,7 @@ import ( "encoding/json" "errors" "strconv" + "time" jsoniter "github.com/json-iterator/go" "github.com/relvacode/iso8601" @@ -20,7 +21,7 @@ import ( const ( // Constants for OpenTelemetry Specs - scopeName = "otelcol/azureresourcelogs" + scopeName = "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure" // Constants for Azure Log Records azureCategory = "azure.category" @@ -37,9 +38,7 @@ const ( azureTenantID = "azure.tenant.id" ) -var ( - errMissingTimestamp = errors.New("missing timestamp") -) +var errMissingTimestamp = errors.New("missing timestamp") // azureRecords represents an array of Azure log records // as exported via an Azure Event Hub @@ -73,8 +72,9 @@ type azureLogRecord struct { var _ plog.Unmarshaler = (*ResourceLogsUnmarshaler)(nil) type ResourceLogsUnmarshaler struct { - Version string - Logger *zap.Logger + Version string + Logger *zap.Logger + TimeFormats []string } func (r ResourceLogsUnmarshaler) UnmarshalLogs(buf []byte) (plog.Logs, error) { @@ -107,7 +107,7 @@ func (r ResourceLogsUnmarshaler) UnmarshalLogs(buf []byte) (plog.Logs, error) { for i := 0; i < len(logs); i++ { log := logs[i] - nanos, err := getTimestamp(log) + nanos, err := getTimestamp(log, r.TimeFormats...) if err != nil { r.Logger.Warn("Unable to convert timestamp from log", zap.String("timestamp", log.Time)) continue @@ -131,11 +131,11 @@ func (r ResourceLogsUnmarshaler) UnmarshalLogs(buf []byte) (plog.Logs, error) { return l, nil } -func getTimestamp(record azureLogRecord) (pcommon.Timestamp, error) { +func getTimestamp(record azureLogRecord, formats ...string) (pcommon.Timestamp, error) { if record.Time != "" { - return asTimestamp(record.Time) + return asTimestamp(record.Time, formats...) } else if record.Timestamp != "" { - return asTimestamp(record.Timestamp) + return asTimestamp(record.Timestamp, formats...) } return 0, errMissingTimestamp @@ -144,13 +144,21 @@ func getTimestamp(record azureLogRecord) (pcommon.Timestamp, error) { // asTimestamp will parse an ISO8601 string into an OpenTelemetry // nanosecond timestamp. If the string cannot be parsed, it will // return zero and the error. -func asTimestamp(s string) (pcommon.Timestamp, error) { - t, err := iso8601.ParseString(s) - if err != nil { - return 0, err +func asTimestamp(s string, formats ...string) (pcommon.Timestamp, error) { + var err error + var t time.Time + // Try parsing with provided formats first + for _, format := range formats { + if t, err = time.Parse(format, s); err == nil { + return pcommon.Timestamp(t.UnixNano()), nil + } } - return pcommon.Timestamp(t.UnixNano()), nil + // Fallback to ISO 8601 parsing if no format matches + if t, err = iso8601.ParseString(s); err == nil { + return pcommon.Timestamp(t.UnixNano()), nil + } + return 0, err } // asSeverity converts the Azure log level to equivalent @@ -167,7 +175,7 @@ func asSeverity(number json.Number) plog.SeverityNumber { case "Critical": return plog.SeverityNumberFatal default: - var levelNumber, _ = number.Int64() + levelNumber, _ := number.Int64() if levelNumber > 0 { return plog.SeverityNumber(levelNumber) } @@ -177,7 +185,7 @@ func asSeverity(number json.Number) plog.SeverityNumber { } func extractRawAttributes(log azureLogRecord) map[string]any { - var attrs = map[string]any{} + attrs := map[string]any{} attrs[azureCategory] = log.Category setIf(attrs, azureCorrelationID, log.CorrelationID) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure/resources_to_traces.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure/resources_to_traces.go new file mode 100644 index 00000000000..2f92d013248 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure/resources_to_traces.go @@ -0,0 +1,183 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package azure // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure" + +import ( + "bytes" + "encoding/hex" + "net/url" + + jsoniter "github.com/json-iterator/go" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" + conventions "go.opentelemetry.io/collector/semconv/v1.13.0" + "go.uber.org/zap" +) + +const ( + // Constants for OpenTelemetry Specs + traceAzureResourceID = "azure.resource.id" +) + +type azureTracesRecords struct { + Records []azureTracesRecord `json:"records"` +} + +// Azure Trace Records based on Azure AppRequests & AppDependencies table data +// the common record schema reference: +// https://learn.microsoft.com/en-us/azure/azure-monitor/reference/tables/apprequests +// https://learn.microsoft.com/en-us/azure/azure-monitor/reference/tables/appdependencies +type azureTracesRecord struct { + Time string `json:"time"` + ResourceID string `json:"resourceId"` + ResourceGUID string `json:"ResourceGUID"` + Type string `json:"Type"` + AppRoleInstance string `json:"AppRoleInstance"` + AppRoleName string `json:"AppRoleName"` + AppVersion string `json:"AppVersion"` + ClientCity string `json:"ClientCity"` + ClientCountryOrRegion string `json:"ClientCountryOrRegion"` + ClientIP string `json:"ClientIP"` + ClientStateOrProvince string `json:"ClientStateOrProvince"` + ClientType string `json:"ClientType"` + IKey string `json:"IKey"` + OperationName string `json:"OperationName"` + OperationID string `json:"OperationId"` + ParentID string `json:"ParentId"` + SDKVersion string `json:"SDKVersion"` + Properties map[string]string `json:"Properties"` + Measurements map[string]float64 `json:"Measurements"` + SpanID string `json:"Id"` + Name string `json:"Name"` + URL string `json:"Url"` + Source string `json:"Source"` + Success bool `json:"Success"` + ResultCode string `json:"ResultCode"` + DurationMs float64 `json:"DurationMs"` + PerformanceBucket string `json:"PerformanceBucket"` + ItemCount float64 `json:"ItemCount"` +} + +var _ ptrace.Unmarshaler = (*TracesUnmarshaler)(nil) + +type TracesUnmarshaler struct { + Version string + Logger *zap.Logger + TimeFormats []string +} + +func (r TracesUnmarshaler) UnmarshalTraces(buf []byte) (ptrace.Traces, error) { + t := ptrace.NewTraces() + + var azureTraces azureTracesRecords + decoder := jsoniter.NewDecoder(bytes.NewReader(buf)) + err := decoder.Decode(&azureTraces) + if err != nil { + return t, err + } + + resourceTraces := t.ResourceSpans().AppendEmpty() + resource := resourceTraces.Resource() + resource.Attributes().PutStr(conventions.AttributeTelemetrySDKName, scopeName) + resource.Attributes().PutStr(conventions.AttributeTelemetrySDKLanguage, conventions.AttributeTelemetrySDKLanguageGo) + resource.Attributes().PutStr(conventions.AttributeTelemetrySDKVersion, r.Version) + resource.Attributes().PutStr(conventions.AttributeCloudProvider, conventions.AttributeCloudProviderAzure) + + scopeSpans := resourceTraces.ScopeSpans().AppendEmpty() + + spans := scopeSpans.Spans() + + resourceID := "" + for _, azureTrace := range azureTraces.Records { + if resourceID == "" && azureTrace.ResourceID != "" { + resourceID = azureTrace.ResourceID + } + + resource.Attributes().PutStr("service.name", azureTrace.AppRoleName) + + nanos, err := asTimestamp(azureTrace.Time, r.TimeFormats...) + if err != nil { + r.Logger.Warn("Invalid Timestamp", zap.String("time", azureTrace.Time)) + continue + } + + traceID, traceErr := TraceIDFromHex(azureTrace.OperationID) + if traceErr != nil { + r.Logger.Warn("Invalid TraceID", zap.String("traceID", azureTrace.OperationID)) + return t, err + } + spanID, spanErr := SpanIDFromHex(azureTrace.SpanID) + if spanErr != nil { + r.Logger.Warn("Invalid SpanID", zap.String("spanID", azureTrace.SpanID)) + return t, err + } + parentID, parentErr := SpanIDFromHex(azureTrace.ParentID) + if parentErr != nil { + r.Logger.Warn("Invalid ParentID", zap.String("parentID", azureTrace.ParentID)) + return t, err + } + + span := spans.AppendEmpty() + span.SetTraceID(traceID) + span.SetSpanID(spanID) + span.SetParentSpanID(parentID) + + span.Attributes().PutStr("OperationName", azureTrace.OperationName) + span.Attributes().PutStr("AppRoleName", azureTrace.AppRoleName) + span.Attributes().PutStr("AppRoleInstance", azureTrace.AppRoleInstance) + span.Attributes().PutStr("Type", azureTrace.Type) + + span.Attributes().PutStr("http.url", azureTrace.URL) + + urlObj, _ := url.Parse(azureTrace.URL) + hostname := urlObj.Host + hostpath := urlObj.Path + scheme := urlObj.Scheme + + span.Attributes().PutStr("http.host", hostname) + span.Attributes().PutStr("http.path", hostpath) + span.Attributes().PutStr("http.response.status_code", azureTrace.ResultCode) + span.Attributes().PutStr("http.client_ip", azureTrace.ClientIP) + span.Attributes().PutStr("http.client_city", azureTrace.ClientCity) + span.Attributes().PutStr("http.client_type", azureTrace.ClientType) + span.Attributes().PutStr("http.client_state", azureTrace.ClientStateOrProvince) + span.Attributes().PutStr("http.client_type", azureTrace.ClientType) + span.Attributes().PutStr("http.client_country", azureTrace.ClientCountryOrRegion) + span.Attributes().PutStr("http.scheme", scheme) + span.Attributes().PutStr("http.method", azureTrace.Properties["HTTP Method"]) + + span.SetKind(ptrace.SpanKindServer) + span.SetName(azureTrace.Name) + span.SetStartTimestamp(nanos) + span.SetEndTimestamp(nanos + pcommon.Timestamp(azureTrace.DurationMs*1e6)) + } + + if resourceID != "" { + resourceTraces.Resource().Attributes().PutStr(traceAzureResourceID, resourceID) + } else { + r.Logger.Warn("No ResourceID Set on Traces!") + } + + return t, nil +} + +func TraceIDFromHex(hexStr string) (pcommon.TraceID, error) { + bytes, err := hex.DecodeString(hexStr) + if err != nil { + return pcommon.TraceID{}, err + } + var id pcommon.TraceID + copy(id[:], bytes) + return id, nil +} + +func SpanIDFromHex(hexStr string) (pcommon.SpanID, error) { + bytes, err := hex.DecodeString(hexStr) + if err != nil { + return pcommon.SpanID{}, err + } + var id pcommon.SpanID + copy(id[:], bytes) + return id, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger/constants.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger/constants.go index 31b8caed453..42881b0e19b 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger/constants.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger/constants.go @@ -18,7 +18,5 @@ const ( // https://github.com/open-telemetry/opentelemetry-specification/blob/34b907207f3dfe1635a35c4cdac6b6ab3a495e18/specification/trace/sdk_exporters/jaeger.md#events const eventNameAttr = "event" -var ( - // errType indicates that a value is not convertible to the target type. - errType = errors.New("invalid type") -) +// errType indicates that a value is not convertible to the target type. +var errType = errors.New("invalid type") diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger/jaegerproto_to_traces.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger/jaegerproto_to_traces.go index 099e13e5ec8..a0864b24cf1 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger/jaegerproto_to_traces.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger/jaegerproto_to_traces.go @@ -14,7 +14,7 @@ import ( "github.com/jaegertracing/jaeger/model" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/ptrace" - conventions "go.opentelemetry.io/collector/semconv/v1.9.0" + conventions "go.opentelemetry.io/collector/semconv/v1.16.0" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/idutils" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/occonventions" @@ -441,9 +441,9 @@ func getTraceStateFromAttrs(attrs pcommon.Map) string { func getScope(span *model.Span) scope { il := scope{} - if libraryName, ok := getAndDeleteTag(span, conventions.OtelLibraryName); ok { + if libraryName, ok := getAndDeleteTag(span, conventions.AttributeOtelScopeName); ok { il.name = libraryName - if libraryVersion, ok := getAndDeleteTag(span, conventions.OtelLibraryVersion); ok { + if libraryVersion, ok := getAndDeleteTag(span, conventions.AttributeOtelScopeVersion); ok { il.version = libraryVersion } } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger/traces_to_jaegerproto.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger/traces_to_jaegerproto.go index ecbd780f02b..2bfff448dcb 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger/traces_to_jaegerproto.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger/traces_to_jaegerproto.go @@ -7,7 +7,7 @@ import ( "github.com/jaegertracing/jaeger/model" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/ptrace" - conventions "go.opentelemetry.io/collector/semconv/v1.9.0" + conventions "go.opentelemetry.io/collector/semconv/v1.16.0" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/idutils" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/tracetranslator" @@ -15,11 +15,11 @@ import ( // ProtoFromTraces translates internal trace data into the Jaeger Proto for GRPC. // Returns slice of translated Jaeger batches and error if translation failed. -func ProtoFromTraces(td ptrace.Traces) ([]*model.Batch, error) { +func ProtoFromTraces(td ptrace.Traces) []*model.Batch { resourceSpans := td.ResourceSpans() if resourceSpans.Len() == 0 { - return nil, nil + return nil } batches := make([]*model.Batch, 0, resourceSpans.Len()) @@ -31,7 +31,7 @@ func ProtoFromTraces(td ptrace.Traces) ([]*model.Batch, error) { } } - return batches, nil + return batches } func resourceSpansToJaegerProto(rs ptrace.ResourceSpans) *model.Batch { @@ -90,7 +90,6 @@ func resourceToJaegerProtoProcess(resource pcommon.Resource) *model.Process { tags := make([]model.KeyValue, 0, attrsCount) process.Tags = appendTagsFromResourceAttributes(tags, attrs) return process - } func appendTagsFromResourceAttributes(dest []model.KeyValue, attrs pcommon.Map) []model.KeyValue { @@ -355,7 +354,6 @@ func getErrorTagFromStatusCode(statusCode ptrace.StatusCode) (model.KeyValue, bo }, true } return model.KeyValue{}, false - } func getTagFromStatusMsg(statusMsg string) (model.KeyValue, bool) { @@ -388,7 +386,7 @@ func getTagsFromInstrumentationLibrary(il pcommon.InstrumentationScope) ([]model var keyValues []model.KeyValue if ilName := il.Name(); ilName != "" { kv := model.KeyValue{ - Key: conventions.OtelLibraryName, + Key: conventions.AttributeOtelScopeName, VStr: ilName, VType: model.ValueType_STRING, } @@ -396,7 +394,7 @@ func getTagsFromInstrumentationLibrary(il pcommon.InstrumentationScope) ([]model } if ilVersion := il.Version(); ilVersion != "" { kv := model.KeyValue{ - Key: conventions.OtelLibraryVersion, + Key: conventions.AttributeOtelScopeVersion, VStr: ilVersion, VType: model.ValueType_STRING, } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus/oc_to_traces.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus/oc_to_traces.go index 92c230e6f8b..188dd2a0df9 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus/oc_to_traces.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus/oc_to_traces.go @@ -12,7 +12,7 @@ import ( "go.opencensus.io/trace" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/ptrace" - conventions "go.opentelemetry.io/collector/semconv/v1.6.1" + conventions "go.opentelemetry.io/collector/semconv/v1.12.0" "google.golang.org/protobuf/types/known/wrapperspb" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/occonventions" diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus/traces_to_oc.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus/traces_to_oc.go index 9d6dae3841d..0cea312203c 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus/traces_to_oc.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus/traces_to_oc.go @@ -13,7 +13,7 @@ import ( "go.opencensus.io/trace" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/ptrace" - conventions "go.opentelemetry.io/collector/semconv/v1.6.1" + conventions "go.opentelemetry.io/collector/semconv/v1.12.0" "google.golang.org/protobuf/types/known/wrapperspb" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/occonventions" @@ -159,7 +159,6 @@ func spanKindToOCAttribute(kind ptrace.SpanKind) *octrace.AttributeValue { case ptrace.SpanKindServer: // explicitly handled as SpanKind case ptrace.SpanKindClient: // explicitly handled as SpanKind default: - } if string(ocKind) == "" { diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin/zipkinv1/json.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin/zipkinv1/json.go index ee072550991..b45e7b18878 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin/zipkinv1/json.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin/zipkinv1/json.go @@ -167,7 +167,6 @@ func jsonBinAnnotationsToSpanAttributes(span ptrace.Span, binAnnotations []*bina sMapper := &statusMapper{} var localComponent string for _, binAnnotation := range binAnnotations { - if binAnnotation.Endpoint != nil && binAnnotation.Endpoint.ServiceName != "" { fallbackServiceName = binAnnotation.Endpoint.ServiceName } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin/zipkinv2/from_translator.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin/zipkinv2/from_translator.go index 5358aee8c38..29d3ca6fa80 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin/zipkinv2/from_translator.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin/zipkinv2/from_translator.go @@ -27,9 +27,7 @@ const ( spanLinkDataFormat = "%s|%s|%s|%s|%d" ) -var ( - sampled = true -) +var sampled = true // FromTranslator converts from pdata to Zipkin data model. type FromTranslator struct{} @@ -98,7 +96,6 @@ func spanToZipkinSpan( localServiceName string, zTags map[string]string, ) (*zipkinmodel.SpanModel, error) { - tags := aggregateSpanTags(span, zTags) zs := &zipkinmodel.SpanModel{} @@ -318,7 +315,6 @@ func zipkinEndpointFromTags( remoteEndpoint bool, redundantKeys map[string]bool, ) (endpoint *zipkinmodel.Endpoint) { - serviceName := localServiceName if peerSvc, ok := zTags[conventions.AttributePeerService]; ok && remoteEndpoint { serviceName = peerSvc diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/README.md index 0ccf62e5e79..ba1c9b575e1 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/README.md +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/README.md @@ -3,20 +3,15 @@ | Status | | | ------------- |-----------| | Stability | [alpha]: traces, metrics, logs | -| Distributions | [core], [contrib], [aws], [liatrio], [observiq], [redhat], [splunk], [sumo] | +| Distributions | [core], [contrib], [k8s] | | Warnings | [Orphaned Telemetry, Other](#warnings) | | Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Aprocessor%2Ffilter%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Aprocessor%2Ffilter) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Aprocessor%2Ffilter%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Aprocessor%2Ffilter) | | [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@TylerHelmuth](https://www.github.com/TylerHelmuth), [@boostchicken](https://www.github.com/boostchicken) | -[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha +[alpha]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#alpha [core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol [contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib -[aws]: https://github.com/aws-observability/aws-otel-collector -[liatrio]: https://github.com/liatrio/liatrio-otel-collector -[observiq]: https://github.com/observIQ/observiq-otel-collector -[redhat]: https://github.com/os-observability/redhat-opentelemetry-collector -[splunk]: https://github.com/signalfx/splunk-otel-collector -[sumo]: https://github.com/SumoLogic/sumologic-otel-collector +[k8s]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-k8s The filterprocessor allows dropping spans, span events, metrics, datapoints, and logs from the collector. @@ -38,7 +33,7 @@ See the table below for details on each context and the fields it exposes. | `logs.log_record` | [Log](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/contexts/ottllog/README.md) | The OTTL allows the use of `and`, `or`, and `()` in conditions. -See [OTTL Boolean Expressions](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/README.md#boolean-expressions) for more details. +See [OTTL Boolean Expressions](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/LANGUAGE.md#boolean-expressions) for more details. For conditions that apply to the same signal, such as spans and span events, if the "higher" level telemetry matches a condition and is dropped, the "lower" level condition will not be checked. This means that if a span is dropped but a span event condition was defined, the span event condition will not be checked for that span. @@ -111,7 +106,7 @@ processors: error_mode: ignore metrics: datapoint: - - metric.name == "k8s.pod.phase" && value_int == 4 + - metric.name == "k8s.pod.phase" and value_int == 4 ``` #### Dropping non-HTTP spans @@ -149,23 +144,83 @@ In addition, the processor defines a few of its own functions: `HasAttrKeyOnDatapoint(key)` Returns `true` if the given key appears in the attribute map of any datapoint on a metric. -`key` must be a string. +`key` must be a string. You must use the `metrics.metric` context. Examples: - `HasAttrKeyOnDatapoint("http.method")` +```yaml +# Drops metrics containing the 'bad.metric' attribute key +filter/keep_good_metrics: + error_mode: ignore + metrics: + metric: + - 'HasAttrKeyOnDatapoint("bad.metric")' +``` + #### HasAttrOnDatapoint `HasAttrOnDatapoint(key, value)` Returns `true` if the given key and value appears in the attribute map of any datapoint on a metric. -`key` and `value` must both be strings. +`key` and `value` must both be strings. If the value of the attribute on the datapoint is not a string, `value` will be compared to `""`. You must use the `metrics.metric` context. Examples: - `HasAttrOnDatapoint("http.method", "GET")` +```yaml +# Drops metrics containing the 'bad.metric' attribute key and 'true' value +filter/keep_good_metrics: + error_mode: ignore + metrics: + metric: + - 'HasAttrOnDatapoint("bad.metric", "true")' +``` + +## Troubleshooting + +When using OTTL you can enable debug logging in the collector to print out useful information, +such as if the condition matched and the TransformContext used in the condition, to help you troubleshoot +why a condition is not behaving as you expect. This feature is very verbose, but provides you an accurate +view into how OTTL views the underlying data. + +```yaml +receivers: + filelog: + start_at: beginning + include: [ /Users/tylerhelmuth/projects/opentelemetry-collector-contrib/local/test.log ] + + +processors: + filter: + error_mode: ignore + logs: + log_record: + - body == "test" + +exporters: + debug: + +service: + telemetry: + logs: + level: debug + pipelines: + logs: + receivers: + - filelog + processors: + - filter + exporters: + - debug +``` + +``` +2024-05-29T16:47:04.362-0600 debug ottl@v0.101.0/parser.go:338 condition evaluation result {"kind": "processor", "name": "filter", "pipeline": "logs", "condition": "body == \"test\"", "match": true, "TransformContext": {"resource": {"attributes": {}, "dropped_attribute_count": 0}, "scope": {"attributes": {}, "dropped_attribute_count": 0, "name": "", "version": ""}, "log_record": {"attributes": {"log.file.name": "test.log"}, "body": "test", "dropped_attribute_count": 0, "flags": 0, "observed_time_unix_nano": 1717022824262063000, "severity_number": 0, "severity_text": "", "span_id": "", "time_unix_nano": 0, "trace_id": ""}, "cache": {}}} +``` + ## Warnings In general, understand your data before using the filter processor. diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/documentation.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/documentation.md new file mode 100644 index 00000000000..d82c6d106bc --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/documentation.md @@ -0,0 +1,31 @@ +[comment]: <> (Code generated by mdatagen. DO NOT EDIT.) + +# filter + +## Internal Telemetry + +The following telemetry is emitted by this component. + +### otelcol_processor_filter_datapoints.filtered + +Number of metric data points dropped by the filter processor + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| 1 | Sum | Int | true | + +### otelcol_processor_filter_logs.filtered + +Number of logs dropped by the filter processor + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| 1 | Sum | Int | true | + +### otelcol_processor_filter_spans.filtered + +Number of spans dropped by the filter processor + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| 1 | Sum | Int | true | diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/factory.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/factory.go index 894a190b2c5..22c3a443f4b 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/factory.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/factory.go @@ -36,7 +36,7 @@ func createDefaultConfig() component.Config { func createMetricsProcessor( ctx context.Context, - set processor.CreateSettings, + set processor.Settings, cfg component.Config, nextConsumer consumer.Metrics, ) (processor.Metrics, error) { @@ -44,7 +44,7 @@ func createMetricsProcessor( if err != nil { return nil, err } - return processorhelper.NewMetricsProcessor( + return processorhelper.NewMetrics( ctx, set, cfg, @@ -55,7 +55,7 @@ func createMetricsProcessor( func createLogsProcessor( ctx context.Context, - set processor.CreateSettings, + set processor.Settings, cfg component.Config, nextConsumer consumer.Logs, ) (processor.Logs, error) { @@ -63,7 +63,7 @@ func createLogsProcessor( if err != nil { return nil, err } - return processorhelper.NewLogsProcessor( + return processorhelper.NewLogs( ctx, set, cfg, @@ -74,7 +74,7 @@ func createLogsProcessor( func createTracesProcessor( ctx context.Context, - set processor.CreateSettings, + set processor.Settings, cfg component.Config, nextConsumer consumer.Traces, ) (processor.Traces, error) { @@ -82,7 +82,7 @@ func createTracesProcessor( if err != nil { return nil, err } - return processorhelper.NewTracesProcessor( + return processorhelper.NewTraces( ctx, set, cfg, diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/internal/metadata/generated_status.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/internal/metadata/generated_status.go index 93587e1c6d0..87b7737b797 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/internal/metadata/generated_status.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/internal/metadata/generated_status.go @@ -4,12 +4,11 @@ package metadata import ( "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/trace" ) var ( - Type = component.MustNewType("filter") + Type = component.MustNewType("filter") + ScopeName = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor" ) const ( @@ -17,11 +16,3 @@ const ( MetricsStability = component.StabilityLevelAlpha LogsStability = component.StabilityLevelAlpha ) - -func Meter(settings component.TelemetrySettings) metric.Meter { - return settings.MeterProvider.Meter("otelcol/filter") -} - -func Tracer(settings component.TelemetrySettings) trace.Tracer { - return settings.TracerProvider.Tracer("otelcol/filter") -} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/internal/metadata/generated_telemetry.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/internal/metadata/generated_telemetry.go new file mode 100644 index 00000000000..5f28c91665d --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/internal/metadata/generated_telemetry.go @@ -0,0 +1,79 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "errors" + + "go.opentelemetry.io/otel/metric" + noopmetric "go.opentelemetry.io/otel/metric/noop" + "go.opentelemetry.io/otel/trace" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configtelemetry" +) + +func Meter(settings component.TelemetrySettings) metric.Meter { + return settings.MeterProvider.Meter("github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor") +} + +func Tracer(settings component.TelemetrySettings) trace.Tracer { + return settings.TracerProvider.Tracer("github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor") +} + +// TelemetryBuilder provides an interface for components to report telemetry +// as defined in metadata and user config. +type TelemetryBuilder struct { + meter metric.Meter + ProcessorFilterDatapointsFiltered metric.Int64Counter + ProcessorFilterLogsFiltered metric.Int64Counter + ProcessorFilterSpansFiltered metric.Int64Counter +} + +// TelemetryBuilderOption applies changes to default builder. +type TelemetryBuilderOption interface { + apply(*TelemetryBuilder) +} + +type telemetryBuilderOptionFunc func(mb *TelemetryBuilder) + +func (tbof telemetryBuilderOptionFunc) apply(mb *TelemetryBuilder) { + tbof(mb) +} + +// NewTelemetryBuilder provides a struct with methods to update all internal telemetry +// for a component +func NewTelemetryBuilder(settings component.TelemetrySettings, options ...TelemetryBuilderOption) (*TelemetryBuilder, error) { + builder := TelemetryBuilder{} + for _, op := range options { + op.apply(&builder) + } + builder.meter = Meter(settings) + var err, errs error + builder.ProcessorFilterDatapointsFiltered, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_processor_filter_datapoints.filtered", + metric.WithDescription("Number of metric data points dropped by the filter processor"), + metric.WithUnit("1"), + ) + errs = errors.Join(errs, err) + builder.ProcessorFilterLogsFiltered, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_processor_filter_logs.filtered", + metric.WithDescription("Number of logs dropped by the filter processor"), + metric.WithUnit("1"), + ) + errs = errors.Join(errs, err) + builder.ProcessorFilterSpansFiltered, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_processor_filter_spans.filtered", + metric.WithDescription("Number of spans dropped by the filter processor"), + metric.WithUnit("1"), + ) + errs = errors.Join(errs, err) + return &builder, errs +} + +func getLeveledMeter(meter metric.Meter, cfgLevel, srvLevel configtelemetry.Level) metric.Meter { + if cfgLevel <= srvLevel { + return meter + } + return noopmetric.Meter{} +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/logs.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/logs.go index 421eea9c8b6..7cd8be05ec1 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/logs.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/logs.go @@ -8,6 +8,7 @@ import ( "fmt" "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pipeline" "go.opentelemetry.io/collector/processor" "go.opentelemetry.io/collector/processor/processorhelper" "go.uber.org/multierr" @@ -22,16 +23,16 @@ import ( type filterLogProcessor struct { skipExpr expr.BoolExpr[ottllog.TransformContext] - telemetry *filterProcessorTelemetry + telemetry *filterTelemetry logger *zap.Logger } -func newFilterLogsProcessor(set processor.CreateSettings, cfg *Config) (*filterLogProcessor, error) { +func newFilterLogsProcessor(set processor.Settings, cfg *Config) (*filterLogProcessor, error) { flp := &filterLogProcessor{ logger: set.Logger, } - fpt, err := newfilterProcessorTelemetry(set) + fpt, err := newFilterTelemetry(set, pipeline.SignalLogs) if err != nil { return nil, fmt.Errorf("error creating filter processor telemetry: %w", err) } @@ -78,7 +79,7 @@ func (flp *filterLogProcessor) processLogs(ctx context.Context, ld plog.Logs) (p scope := sl.Scope() lrs := sl.LogRecords() lrs.RemoveIf(func(lr plog.LogRecord) bool { - skip, err := flp.skipExpr.Eval(ctx, ottllog.NewTransformContext(lr, scope, resource)) + skip, err := flp.skipExpr.Eval(ctx, ottllog.NewTransformContext(lr, scope, resource, sl, rl)) if err != nil { errors = multierr.Append(errors, err) return false @@ -92,7 +93,7 @@ func (flp *filterLogProcessor) processLogs(ctx context.Context, ld plog.Logs) (p }) logCountAfterFilters := ld.LogRecordCount() - flp.telemetry.record(triggerLogsDropped, int64(logCountBeforeFilters-logCountAfterFilters)) + flp.telemetry.record(ctx, int64(logCountBeforeFilters-logCountAfterFilters)) if errors != nil { flp.logger.Error("failed processing logs", zap.Error(errors)) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/metadata.yaml index 29e2e7f792c..bfccf18b937 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/metadata.yaml +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/metadata.yaml @@ -1,13 +1,36 @@ type: filter -scope_name: otelcol/filter status: class: processor stability: alpha: [traces, metrics, logs] - distributions: [core, contrib, splunk, observiq, sumo, aws, redhat, liatrio] + distributions: [core, contrib, k8s] warnings: [Orphaned Telemetry, Other] codeowners: active: [TylerHelmuth, boostchicken] tests: config: + +telemetry: + metrics: + processor_filter_datapoints.filtered: + enabled: true + description: Number of metric data points dropped by the filter processor + unit: "1" + sum: + value_type: int + monotonic: true + processor_filter_logs.filtered: + enabled: true + description: Number of logs dropped by the filter processor + unit: "1" + sum: + value_type: int + monotonic: true + processor_filter_spans.filtered: + enabled: true + description: Number of spans dropped by the filter processor + unit: "1" + sum: + value_type: int + monotonic: true diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/metrics.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/metrics.go index 10551d3dd11..655f4ba0b3f 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/metrics.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/metrics.go @@ -9,6 +9,7 @@ import ( "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pipeline" "go.opentelemetry.io/collector/processor" "go.opentelemetry.io/collector/processor/processorhelper" "go.uber.org/multierr" @@ -29,17 +30,17 @@ type filterMetricProcessor struct { skipResourceExpr expr.BoolExpr[ottlresource.TransformContext] skipMetricExpr expr.BoolExpr[ottlmetric.TransformContext] skipDataPointExpr expr.BoolExpr[ottldatapoint.TransformContext] - telemetry *filterProcessorTelemetry + telemetry *filterTelemetry logger *zap.Logger } -func newFilterMetricProcessor(set processor.CreateSettings, cfg *Config) (*filterMetricProcessor, error) { +func newFilterMetricProcessor(set processor.Settings, cfg *Config) (*filterMetricProcessor, error) { var err error fsp := &filterMetricProcessor{ logger: set.Logger, } - fpt, err := newfilterProcessorTelemetry(set) + fpt, err := newFilterTelemetry(set, pipeline.SignalMetrics) if err != nil { return nil, fmt.Errorf("error creating filter processor telemetry: %w", err) } @@ -122,7 +123,7 @@ func (fmp *filterMetricProcessor) processMetrics(ctx context.Context, md pmetric md.ResourceMetrics().RemoveIf(func(rmetrics pmetric.ResourceMetrics) bool { resource := rmetrics.Resource() if fmp.skipResourceExpr != nil { - skip, err := fmp.skipResourceExpr.Eval(ctx, ottlresource.NewTransformContext(resource)) + skip, err := fmp.skipResourceExpr.Eval(ctx, ottlresource.NewTransformContext(resource, rmetrics)) if err != nil { errors = multierr.Append(errors, err) return false @@ -135,7 +136,7 @@ func (fmp *filterMetricProcessor) processMetrics(ctx context.Context, md pmetric scope := smetrics.Scope() smetrics.Metrics().RemoveIf(func(metric pmetric.Metric) bool { if fmp.skipMetricExpr != nil { - skip, err := fmp.skipMetricExpr.Eval(ctx, ottlmetric.NewTransformContext(metric, smetrics.Metrics(), scope, resource)) + skip, err := fmp.skipMetricExpr.Eval(ctx, ottlmetric.NewTransformContext(metric, smetrics.Metrics(), scope, resource, smetrics, rmetrics)) if err != nil { errors = multierr.Append(errors, err) } @@ -156,7 +157,7 @@ func (fmp *filterMetricProcessor) processMetrics(ctx context.Context, md pmetric errors = multierr.Append(errors, fmp.handleHistogramDataPoints(ctx, metric.Histogram().DataPoints(), metric, smetrics.Metrics(), scope, resource)) return metric.Histogram().DataPoints().Len() == 0 case pmetric.MetricTypeExponentialHistogram: - errors = multierr.Append(errors, fmp.handleExponetialHistogramDataPoints(ctx, metric.ExponentialHistogram().DataPoints(), metric, smetrics.Metrics(), scope, resource)) + errors = multierr.Append(errors, fmp.handleExponentialHistogramDataPoints(ctx, metric.ExponentialHistogram().DataPoints(), metric, smetrics.Metrics(), scope, resource)) return metric.ExponentialHistogram().DataPoints().Len() == 0 case pmetric.MetricTypeSummary: errors = multierr.Append(errors, fmp.handleSummaryDataPoints(ctx, metric.Summary().DataPoints(), metric, smetrics.Metrics(), scope, resource)) @@ -173,7 +174,7 @@ func (fmp *filterMetricProcessor) processMetrics(ctx context.Context, md pmetric }) metricDataPointCountAfterFilters := md.DataPointCount() - fmp.telemetry.record(triggerMetricDataPointsDropped, int64(metricDataPointCountBeforeFilters-metricDataPointCountAfterFilters)) + fmp.telemetry.record(ctx, int64(metricDataPointCountBeforeFilters-metricDataPointCountAfterFilters)) if errors != nil { fmp.logger.Error("failed processing metrics", zap.Error(errors)) @@ -259,7 +260,7 @@ func newResExpr(mp *filterconfig.MetricMatchProperties) (expr.BoolExpr[ottlresou func (fmp *filterMetricProcessor) handleNumberDataPoints(ctx context.Context, dps pmetric.NumberDataPointSlice, metric pmetric.Metric, metrics pmetric.MetricSlice, is pcommon.InstrumentationScope, resource pcommon.Resource) error { var errors error dps.RemoveIf(func(datapoint pmetric.NumberDataPoint) bool { - skip, err := fmp.skipDataPointExpr.Eval(ctx, ottldatapoint.NewTransformContext(datapoint, metric, metrics, is, resource)) + skip, err := fmp.skipDataPointExpr.Eval(ctx, ottldatapoint.NewTransformContext(datapoint, metric, metrics, is, resource, pmetric.NewScopeMetrics(), pmetric.NewResourceMetrics())) if err != nil { errors = multierr.Append(errors, err) return false @@ -272,7 +273,7 @@ func (fmp *filterMetricProcessor) handleNumberDataPoints(ctx context.Context, dp func (fmp *filterMetricProcessor) handleHistogramDataPoints(ctx context.Context, dps pmetric.HistogramDataPointSlice, metric pmetric.Metric, metrics pmetric.MetricSlice, is pcommon.InstrumentationScope, resource pcommon.Resource) error { var errors error dps.RemoveIf(func(datapoint pmetric.HistogramDataPoint) bool { - skip, err := fmp.skipDataPointExpr.Eval(ctx, ottldatapoint.NewTransformContext(datapoint, metric, metrics, is, resource)) + skip, err := fmp.skipDataPointExpr.Eval(ctx, ottldatapoint.NewTransformContext(datapoint, metric, metrics, is, resource, pmetric.NewScopeMetrics(), pmetric.NewResourceMetrics())) if err != nil { errors = multierr.Append(errors, err) return false @@ -282,10 +283,10 @@ func (fmp *filterMetricProcessor) handleHistogramDataPoints(ctx context.Context, return errors } -func (fmp *filterMetricProcessor) handleExponetialHistogramDataPoints(ctx context.Context, dps pmetric.ExponentialHistogramDataPointSlice, metric pmetric.Metric, metrics pmetric.MetricSlice, is pcommon.InstrumentationScope, resource pcommon.Resource) error { +func (fmp *filterMetricProcessor) handleExponentialHistogramDataPoints(ctx context.Context, dps pmetric.ExponentialHistogramDataPointSlice, metric pmetric.Metric, metrics pmetric.MetricSlice, is pcommon.InstrumentationScope, resource pcommon.Resource) error { var errors error dps.RemoveIf(func(datapoint pmetric.ExponentialHistogramDataPoint) bool { - skip, err := fmp.skipDataPointExpr.Eval(ctx, ottldatapoint.NewTransformContext(datapoint, metric, metrics, is, resource)) + skip, err := fmp.skipDataPointExpr.Eval(ctx, ottldatapoint.NewTransformContext(datapoint, metric, metrics, is, resource, pmetric.NewScopeMetrics(), pmetric.NewResourceMetrics())) if err != nil { errors = multierr.Append(errors, err) return false @@ -298,7 +299,7 @@ func (fmp *filterMetricProcessor) handleExponetialHistogramDataPoints(ctx contex func (fmp *filterMetricProcessor) handleSummaryDataPoints(ctx context.Context, dps pmetric.SummaryDataPointSlice, metric pmetric.Metric, metrics pmetric.MetricSlice, is pcommon.InstrumentationScope, resource pcommon.Resource) error { var errors error dps.RemoveIf(func(datapoint pmetric.SummaryDataPoint) bool { - skip, err := fmp.skipDataPointExpr.Eval(ctx, ottldatapoint.NewTransformContext(datapoint, metric, metrics, is, resource)) + skip, err := fmp.skipDataPointExpr.Eval(ctx, ottldatapoint.NewTransformContext(datapoint, metric, metrics, is, resource, pmetric.NewScopeMetrics(), pmetric.NewResourceMetrics())) if err != nil { errors = multierr.Append(errors, err) return false diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/telemetry.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/telemetry.go index 9267a142475..dd21cf783ce 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/telemetry.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/telemetry.go @@ -5,84 +5,45 @@ package filterprocessor // import "github.com/open-telemetry/opentelemetry-colle import ( "context" + "fmt" + "go.opentelemetry.io/collector/pipeline" "go.opentelemetry.io/collector/processor" - "go.opentelemetry.io/collector/processor/processorhelper" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/internal/metadata" ) -type trigger int - -const ( - triggerMetricDataPointsDropped trigger = iota - triggerLogsDropped - triggerSpansDropped -) - -type filterProcessorTelemetry struct { - exportCtx context.Context - - processorAttr []attribute.KeyValue - - datapointsFiltered metric.Int64Counter - logsFiltered metric.Int64Counter - spansFiltered metric.Int64Counter +type filterTelemetry struct { + attr metric.MeasurementOption + counter metric.Int64Counter } -func newfilterProcessorTelemetry(set processor.CreateSettings) (*filterProcessorTelemetry, error) { - processorID := set.ID.String() - - fpt := &filterProcessorTelemetry{ - processorAttr: []attribute.KeyValue{attribute.String(metadata.Type.String(), processorID)}, - exportCtx: context.Background(), - } - - counter, err := metadata.Meter(set.TelemetrySettings).Int64Counter( - processorhelper.BuildCustomMetricName(metadata.Type.String(), "datapoints.filtered"), - metric.WithDescription("Number of metric data points dropped by the filter processor"), - metric.WithUnit("1"), - ) +func newFilterTelemetry(set processor.Settings, signal pipeline.Signal) (*filterTelemetry, error) { + telemetryBuilder, err := metadata.NewTelemetryBuilder(set.TelemetrySettings) if err != nil { return nil, err } - fpt.datapointsFiltered = counter - counter, err = metadata.Meter(set.TelemetrySettings).Int64Counter( - processorhelper.BuildCustomMetricName(metadata.Type.String(), "logs.filtered"), - metric.WithDescription("Number of logs dropped by the filter processor"), - metric.WithUnit("1"), - ) - if err != nil { - return nil, err - } - fpt.logsFiltered = counter - - counter, err = metadata.Meter(set.TelemetrySettings).Int64Counter( - processorhelper.BuildCustomMetricName(metadata.Type.String(), "spans.filtered"), - metric.WithDescription("Number of spans dropped by the filter processor"), - metric.WithUnit("1"), - ) - if err != nil { - return nil, err + var counter metric.Int64Counter + switch signal { + case pipeline.SignalMetrics: + counter = telemetryBuilder.ProcessorFilterDatapointsFiltered + case pipeline.SignalLogs: + counter = telemetryBuilder.ProcessorFilterLogsFiltered + case pipeline.SignalTraces: + counter = telemetryBuilder.ProcessorFilterSpansFiltered + default: + return nil, fmt.Errorf("unsupported signal type: %v", signal) } - fpt.spansFiltered = counter - return fpt, nil + return &filterTelemetry{ + attr: metric.WithAttributeSet(attribute.NewSet(attribute.String(metadata.Type.String(), set.ID.String()))), + counter: counter, + }, nil } -func (fpt *filterProcessorTelemetry) record(trigger trigger, dropped int64) { - var triggerMeasure metric.Int64Counter - switch trigger { - case triggerMetricDataPointsDropped: - triggerMeasure = fpt.datapointsFiltered - case triggerLogsDropped: - triggerMeasure = fpt.logsFiltered - case triggerSpansDropped: - triggerMeasure = fpt.spansFiltered - } - - triggerMeasure.Add(fpt.exportCtx, dropped, metric.WithAttributes(fpt.processorAttr...)) +func (fpt *filterTelemetry) record(ctx context.Context, dropped int64) { + fpt.counter.Add(ctx, dropped, fpt.attr) } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/traces.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/traces.go index ff64e85470d..3b444cd5897 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/traces.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/traces.go @@ -8,6 +8,7 @@ import ( "fmt" "go.opentelemetry.io/collector/pdata/ptrace" + "go.opentelemetry.io/collector/pipeline" "go.opentelemetry.io/collector/processor" "go.opentelemetry.io/collector/processor/processorhelper" "go.uber.org/multierr" @@ -23,17 +24,17 @@ import ( type filterSpanProcessor struct { skipSpanExpr expr.BoolExpr[ottlspan.TransformContext] skipSpanEventExpr expr.BoolExpr[ottlspanevent.TransformContext] - telemetry *filterProcessorTelemetry + telemetry *filterTelemetry logger *zap.Logger } -func newFilterSpansProcessor(set processor.CreateSettings, cfg *Config) (*filterSpanProcessor, error) { +func newFilterSpansProcessor(set processor.Settings, cfg *Config) (*filterSpanProcessor, error) { var err error fsp := &filterSpanProcessor{ logger: set.Logger, } - fpt, err := newfilterProcessorTelemetry(set) + fpt, err := newFilterTelemetry(set, pipeline.SignalTraces) if err != nil { return nil, fmt.Errorf("error creating filter processor telemetry: %w", err) } @@ -93,7 +94,7 @@ func (fsp *filterSpanProcessor) processTraces(ctx context.Context, td ptrace.Tra scope := ss.Scope() ss.Spans().RemoveIf(func(span ptrace.Span) bool { if fsp.skipSpanExpr != nil { - skip, err := fsp.skipSpanExpr.Eval(ctx, ottlspan.NewTransformContext(span, scope, resource)) + skip, err := fsp.skipSpanExpr.Eval(ctx, ottlspan.NewTransformContext(span, scope, resource, ss, rs)) if err != nil { errors = multierr.Append(errors, err) return false @@ -104,7 +105,7 @@ func (fsp *filterSpanProcessor) processTraces(ctx context.Context, td ptrace.Tra } if fsp.skipSpanEventExpr != nil { span.Events().RemoveIf(func(spanEvent ptrace.SpanEvent) bool { - skip, err := fsp.skipSpanEventExpr.Eval(ctx, ottlspanevent.NewTransformContext(spanEvent, span, scope, resource)) + skip, err := fsp.skipSpanEventExpr.Eval(ctx, ottlspanevent.NewTransformContext(spanEvent, span, scope, resource, ss, rs)) if err != nil { errors = multierr.Append(errors, err) return false @@ -120,7 +121,7 @@ func (fsp *filterSpanProcessor) processTraces(ctx context.Context, td ptrace.Tra }) spanCountAfterFilters := td.SpanCount() - fsp.telemetry.record(triggerSpansDropped, int64(spanCountBeforeFilters-spanCountAfterFilters)) + fsp.telemetry.record(ctx, int64(spanCountBeforeFilters-spanCountAfterFilters)) if errors != nil { fsp.logger.Error("failed processing traces", zap.Error(errors)) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/README.md index f94995852db..6039fefa590 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/README.md +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/README.md @@ -4,13 +4,14 @@ | Status | | | ------------- |-----------| | Stability | [beta]: traces | -| Distributions | [core], [contrib] | +| Distributions | [core], [contrib], [k8s] | | Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Areceiver%2Fjaeger%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Areceiver%2Fjaeger) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Areceiver%2Fjaeger%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Areceiver%2Fjaeger) | | [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@yurishkuro](https://www.github.com/yurishkuro) | -[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta +[beta]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#beta [core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol [contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib +[k8s]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-k8s Receives trace data in [Jaeger](https://www.jaegertracing.io/) format. @@ -22,13 +23,12 @@ named under the `protocols` object for the jaeger receiver to start. The below protocols are supported, each supports an optional `endpoint` object configuration parameter. -- `grpc` (default `endpoint` = 0.0.0.0:14250) -- `thrift_binary` (default `endpoint` = 0.0.0.0:6832) -- `thrift_compact` (default `endpoint` = 0.0.0.0:6831) -- `thrift_http` (default `endpoint` = 0.0.0.0:14268) +- `grpc` (default `endpoint` = localhost:14250) +- `thrift_binary` (default `endpoint` = localhost:6832) +- `thrift_compact` (default `endpoint` = localhost:6831) +- `thrift_http` (default `endpoint` = localhost:14268) -The `component.UseLocalHostAsDefaultHost` feature gate changes these endpoints to localhost:14250, localhost:6832, -localhost:6831 and localhost:14268. This will become the default in a future release. +See our [security best practices doc](https://opentelemetry.io/docs/security/config-best-practices/#protect-against-denial-of-service-attacks) to understand how to set the endpoint in different environments. Examples: diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/config.go index 4ed9dda665c..adb5b087778 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/config.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/config.go @@ -4,6 +4,7 @@ package jaegerreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver" import ( + "errors" "fmt" "net" "strconv" @@ -36,10 +37,10 @@ type RemoteSamplingConfig struct { // Protocols is the configuration for the supported protocols. type Protocols struct { - GRPC *configgrpc.ServerConfig `mapstructure:"grpc"` - ThriftHTTP *confighttp.ServerConfig `mapstructure:"thrift_http"` - ThriftBinary *ProtocolUDP `mapstructure:"thrift_binary"` - ThriftCompact *ProtocolUDP `mapstructure:"thrift_compact"` + GRPC *configgrpc.ServerConfig `mapstructure:"grpc"` + ThriftHTTP *confighttp.ServerConfig `mapstructure:"thrift_http"` + ThriftBinaryUDP *ProtocolUDP `mapstructure:"thrift_binary"` + ThriftCompactUDP *ProtocolUDP `mapstructure:"thrift_compact"` } // ProtocolUDP is the configuration for a UDP protocol. @@ -72,16 +73,18 @@ type Config struct { RemoteSampling *RemoteSamplingConfig `mapstructure:"remote_sampling"` } -var _ component.Config = (*Config)(nil) -var _ confmap.Unmarshaler = (*Config)(nil) +var ( + _ component.Config = (*Config)(nil) + _ confmap.Unmarshaler = (*Config)(nil) +) // Validate checks the receiver configuration is valid func (cfg *Config) Validate() error { if cfg.GRPC == nil && cfg.ThriftHTTP == nil && - cfg.ThriftBinary == nil && - cfg.ThriftCompact == nil { - return fmt.Errorf("must specify at least one protocol when using the Jaeger receiver") + cfg.ThriftBinaryUDP == nil && + cfg.ThriftCompactUDP == nil { + return errors.New("must specify at least one protocol when using the Jaeger receiver") } if cfg.GRPC != nil { @@ -96,21 +99,21 @@ func (cfg *Config) Validate() error { } } - if cfg.ThriftBinary != nil { - if err := checkPortFromEndpoint(cfg.ThriftBinary.Endpoint); err != nil { + if cfg.ThriftBinaryUDP != nil { + if err := checkPortFromEndpoint(cfg.ThriftBinaryUDP.Endpoint); err != nil { return fmt.Errorf("invalid port number for the Thrift UDP Binary endpoint: %w", err) } } - if cfg.ThriftCompact != nil { - if err := checkPortFromEndpoint(cfg.ThriftCompact.Endpoint); err != nil { + if cfg.ThriftCompactUDP != nil { + if err := checkPortFromEndpoint(cfg.ThriftCompactUDP.Endpoint); err != nil { return fmt.Errorf("invalid port number for the Thrift UDP Compact endpoint: %w", err) } } if cfg.RemoteSampling != nil { if disableJaegerReceiverRemoteSampling.IsEnabled() { - return fmt.Errorf("remote sampling config detected in the Jaeger receiver; use the `jaegerremotesampling` extension instead") + return errors.New("remote sampling config detected in the Jaeger receiver; use the `jaegerremotesampling` extension instead") } } @@ -120,7 +123,7 @@ func (cfg *Config) Validate() error { // Unmarshal a config.Parser into the config struct. func (cfg *Config) Unmarshal(componentParser *confmap.Conf) error { if componentParser == nil || len(componentParser.AllKeys()) == 0 { - return fmt.Errorf("empty config for Jaeger receiver") + return errors.New("empty config for Jaeger receiver") } // UnmarshalExact will not set struct properties to nil even if no key is provided, @@ -142,10 +145,10 @@ func (cfg *Config) Unmarshal(componentParser *confmap.Conf) error { cfg.ThriftHTTP = nil } if !protocols.IsSet(protoThriftBinary) { - cfg.ThriftBinary = nil + cfg.ThriftBinaryUDP = nil } if !protocols.IsSet(protoThriftCompact) { - cfg.ThriftCompact = nil + cfg.ThriftCompactUDP = nil } return nil @@ -163,7 +166,7 @@ func checkPortFromEndpoint(endpoint string) error { return fmt.Errorf("endpoint port is not a number: %w", err) } if port < 1 || port > 65535 { - return fmt.Errorf("port number must be between 1 and 65535") + return errors.New("port number must be between 1 and 65535") } return nil } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/factory.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/factory.go index 8009ed1b2d9..56e4b6084bd 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/factory.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/factory.go @@ -7,7 +7,6 @@ package jaegerreceiver // import "github.com/open-telemetry/opentelemetry-collec import ( "context" - "sync" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configgrpc" @@ -16,9 +15,7 @@ import ( "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/featuregate" "go.opentelemetry.io/collector/receiver" - "go.uber.org/zap" - "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/localhostgate" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/internal/metadata" ) @@ -29,11 +26,11 @@ const ( protoThriftBinary = "thrift_binary" protoThriftCompact = "thrift_compact" - // Default ports to bind to. - defaultGRPCPort = 14250 - defaultHTTPPort = 14268 - defaultThriftCompactPort = 6831 - defaultThriftBinaryPort = 6832 + // Default endpoints to bind to. + defaultGRPCEndpoint = "localhost:14250" + defaultHTTPEndpoint = "localhost:14268" + defaultThriftCompactEndpoint = "localhost:6831" + defaultThriftBinaryEndpoint = "localhost:6832" ) var disableJaegerReceiverRemoteSampling = featuregate.GlobalRegistry().MustRegister( @@ -42,15 +39,6 @@ var disableJaegerReceiverRemoteSampling = featuregate.GlobalRegistry().MustRegis featuregate.WithRegisterDescription("When enabled, the Jaeger Receiver will fail to start when it is configured with remote_sampling config. When disabled, the receiver will start and the remote_sampling config will be no-op."), ) -var once sync.Once - -func logDeprecation(logger *zap.Logger) { - once.Do(func() { - logger.Warn("jaeger receiver will deprecate Thrift-gen and replace it with Proto-gen to be compatbible to jaeger 1.42.0 and higher. See https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/18485 for more details.") - - }) -} - // NewFactory creates a new Jaeger receiver factory. func NewFactory() receiver.Factory { return receiver.NewFactory( @@ -65,19 +53,19 @@ func createDefaultConfig() component.Config { Protocols: Protocols{ GRPC: &configgrpc.ServerConfig{ NetAddr: confignet.AddrConfig{ - Endpoint: localhostgate.EndpointForPort(defaultGRPCPort), + Endpoint: defaultGRPCEndpoint, Transport: confignet.TransportTypeTCP, }, }, ThriftHTTP: &confighttp.ServerConfig{ - Endpoint: localhostgate.EndpointForPort(defaultHTTPPort), + Endpoint: defaultHTTPEndpoint, }, - ThriftBinary: &ProtocolUDP{ - Endpoint: localhostgate.EndpointForPort(defaultThriftBinaryPort), + ThriftBinaryUDP: &ProtocolUDP{ + Endpoint: defaultThriftBinaryEndpoint, ServerConfigUDP: defaultServerConfigUDP(), }, - ThriftCompact: &ProtocolUDP{ - Endpoint: localhostgate.EndpointForPort(defaultThriftCompactPort), + ThriftCompactUDP: &ProtocolUDP{ + Endpoint: defaultThriftCompactEndpoint, ServerConfigUDP: defaultServerConfigUDP(), }, }, @@ -87,40 +75,20 @@ func createDefaultConfig() component.Config { // createTracesReceiver creates a trace receiver based on provided config. func createTracesReceiver( _ context.Context, - set receiver.CreateSettings, + set receiver.Settings, cfg component.Config, nextConsumer consumer.Traces, ) (receiver.Traces, error) { - logDeprecation(set.Logger) - // Convert settings in the source config to configuration struct // that Jaeger receiver understands. // Error handling for the conversion is done in the Validate function from the Config object itself. rCfg := cfg.(*Config) - var config configuration - // Set ports - if rCfg.Protocols.GRPC != nil { - config.GRPCServerConfig = *rCfg.Protocols.GRPC - } - - if rCfg.Protocols.ThriftHTTP != nil { - config.HTTPServerConfig = *rCfg.ThriftHTTP - } - - if rCfg.Protocols.ThriftBinary != nil { - config.AgentBinaryThrift = *rCfg.ThriftBinary - } - - if rCfg.Protocols.ThriftCompact != nil { - config.AgentCompactThrift = *rCfg.ThriftCompact - } - if rCfg.RemoteSampling != nil { set.Logger.Warn("You are using a deprecated no-op `remote_sampling` option which will be removed soon; use a `jaegerremotesampling` extension instead") } // Create the receiver. - return newJaegerReceiver(set.ID, &config, nextConsumer, set) + return newJaegerReceiver(set.ID, rCfg.Protocols, nextConsumer, set) } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/internal/metadata/generated_status.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/internal/metadata/generated_status.go index b9e247584ad..0d5b282e3d8 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/internal/metadata/generated_status.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/internal/metadata/generated_status.go @@ -7,7 +7,8 @@ import ( ) var ( - Type = component.MustNewType("jaeger") + Type = component.MustNewType("jaeger") + ScopeName = "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver" ) const ( diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/internal/metadata/generated_telemetry.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/internal/metadata/generated_telemetry.go deleted file mode 100644 index ec58059c781..00000000000 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/internal/metadata/generated_telemetry.go +++ /dev/null @@ -1,17 +0,0 @@ -// Code generated by mdatagen. DO NOT EDIT. - -package metadata - -import ( - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/trace" -) - -func Meter(settings component.TelemetrySettings) metric.Meter { - return settings.MeterProvider.Meter("otelcol/jaegerreceiver") -} - -func Tracer(settings component.TelemetrySettings) trace.Tracer { - return settings.TracerProvider.Tracer("otelcol/jaegerreceiver") -} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/metadata.yaml index 0fd95be9d3a..8ad64a435fd 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/metadata.yaml +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/metadata.yaml @@ -1,13 +1,10 @@ type: jaeger -scope_name: otelcol/jaegerreceiver status: class: receiver stability: beta: [traces] - distributions: - - core - - contrib + distributions: [core, contrib, k8s] codeowners: active: [yurishkuro] diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/trace_receiver.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/trace_receiver.go index 764b55312dd..af204faa68f 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/trace_receiver.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/trace_receiver.go @@ -15,8 +15,6 @@ import ( apacheThrift "github.com/apache/thrift/lib/go/thrift" "github.com/gorilla/mux" - "github.com/jaegertracing/jaeger/cmd/agent/app/configmanager" - "github.com/jaegertracing/jaeger/cmd/agent/app/httpserver" "github.com/jaegertracing/jaeger/cmd/agent/app/processors" "github.com/jaegertracing/jaeger/cmd/agent/app/servers" "github.com/jaegertracing/jaeger/cmd/agent/app/servers/thriftudp" @@ -24,12 +22,10 @@ import ( "github.com/jaegertracing/jaeger/pkg/metrics" "github.com/jaegertracing/jaeger/proto-gen/api_v2" "github.com/jaegertracing/jaeger/thrift-gen/agent" - "github.com/jaegertracing/jaeger/thrift-gen/baggage" "github.com/jaegertracing/jaeger/thrift-gen/jaeger" "github.com/jaegertracing/jaeger/thrift-gen/zipkincore" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/config/configgrpc" - "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/component/componentstatus" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/receiver/receiverhelper" @@ -39,24 +35,13 @@ import ( jaegertranslator "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger" ) -// configuration defines the behavior and the ports that -// the Jaeger receiver will use. -type configuration struct { - HTTPServerConfig confighttp.ServerConfig - GRPCServerConfig configgrpc.ServerConfig - - AgentCompactThrift ProtocolUDP - AgentBinaryThrift ProtocolUDP - AgentHTTPEndpoint string -} - // Receiver type is used to receive spans that were originally intended to be sent to Jaeger. // This receiver is basically a Jaeger collector. type jReceiver struct { nextConsumer consumer.Traces id component.ID - config *configuration + config Protocols grpc *grpc.Server collectorServer *http.Server @@ -66,7 +51,7 @@ type jReceiver struct { goroutines sync.WaitGroup - settings receiver.CreateSettings + settings receiver.Settings grpcObsrecv *receiverhelper.ObsReport httpObsrecv *receiverhelper.ObsReport @@ -82,20 +67,18 @@ const ( protobufFormat = "protobuf" ) -var ( - acceptedThriftFormats = map[string]struct{}{ - "application/x-thrift": {}, - "application/vnd.apache.thrift.binary": {}, - } -) +var acceptedThriftFormats = map[string]struct{}{ + "application/x-thrift": {}, + "application/vnd.apache.thrift.binary": {}, +} // newJaegerReceiver creates a TracesReceiver that receives traffic as a Jaeger collector, and // also as a Jaeger agent. func newJaegerReceiver( id component.ID, - config *configuration, + config Protocols, nextConsumer consumer.Traces, - set receiver.CreateSettings, + set receiver.Settings, ) (*jReceiver, error) { grpcObsrecv, err := receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{ ReceiverID: id, @@ -168,21 +151,10 @@ func consumeTraces(ctx context.Context, batch *jaeger.Batch, consumer consumer.T return len(batch.Spans), consumer.ConsumeTraces(ctx, td) } -var _ agent.Agent = (*agentHandler)(nil) -var _ api_v2.CollectorServiceServer = (*jReceiver)(nil) -var _ configmanager.ClientConfigManager = (*notImplementedConfigManager)(nil) - -var errNotImplemented = fmt.Errorf("not implemented") - -type notImplementedConfigManager struct{} - -func (notImplementedConfigManager) GetSamplingStrategy(_ context.Context, _ string) (*api_v2.SamplingStrategyResponse, error) { - return nil, errNotImplemented -} - -func (notImplementedConfigManager) GetBaggageRestrictions(_ context.Context, _ string) ([]*baggage.BaggageRestriction, error) { - return nil, errNotImplemented -} +var ( + _ agent.Agent = (*agentHandler)(nil) + _ api_v2.CollectorServiceServer = (*jReceiver)(nil) +) type agentHandler struct { nextConsumer consumer.Traces @@ -223,11 +195,7 @@ func (jr *jReceiver) PostSpans(ctx context.Context, r *api_v2.PostSpansRequest) } func (jr *jReceiver) startAgent() error { - if jr.config == nil { - return nil - } - - if jr.config.AgentBinaryThrift.Endpoint != "" { + if jr.config.ThriftBinaryUDP != nil { obsrecv, err := receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{ ReceiverID: jr.id, Transport: agentTransportBinary, @@ -241,14 +209,14 @@ func (jr *jReceiver) startAgent() error { nextConsumer: jr.nextConsumer, obsrecv: obsrecv, } - processor, err := jr.buildProcessor(jr.config.AgentBinaryThrift.Endpoint, jr.config.AgentBinaryThrift.ServerConfigUDP, apacheThrift.NewTBinaryProtocolFactoryConf(nil), h) + processor, err := jr.buildProcessor(jr.config.ThriftBinaryUDP.Endpoint, jr.config.ThriftBinaryUDP.ServerConfigUDP, apacheThrift.NewTBinaryProtocolFactoryConf(nil), h) if err != nil { return err } jr.agentProcessors = append(jr.agentProcessors, processor) } - if jr.config.AgentCompactThrift.Endpoint != "" { + if jr.config.ThriftCompactUDP != nil { obsrecv, err := receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{ ReceiverID: jr.id, Transport: agentTransportCompact, @@ -261,7 +229,7 @@ func (jr *jReceiver) startAgent() error { nextConsumer: jr.nextConsumer, obsrecv: obsrecv, } - processor, err := jr.buildProcessor(jr.config.AgentCompactThrift.Endpoint, jr.config.AgentCompactThrift.ServerConfigUDP, apacheThrift.NewTCompactProtocolFactoryConf(nil), h) + processor, err := jr.buildProcessor(jr.config.ThriftCompactUDP.Endpoint, jr.config.ThriftCompactUDP.ServerConfigUDP, apacheThrift.NewTCompactProtocolFactoryConf(nil), h) if err != nil { return err } @@ -276,18 +244,6 @@ func (jr *jReceiver) startAgent() error { }(processor) } - if jr.config.AgentHTTPEndpoint != "" { - jr.agentServer = httpserver.NewHTTPServer(jr.config.AgentHTTPEndpoint, ¬ImplementedConfigManager{}, metrics.NullFactory, jr.settings.Logger) - - jr.goroutines.Add(1) - go func() { - defer jr.goroutines.Done() - if err := jr.agentServer.ListenAndServe(); !errors.Is(err, http.ErrServerClosed) && err != nil { - jr.settings.ReportStatus(component.NewFatalErrorEvent(fmt.Errorf("jaeger agent server error: %w", err))) - } - }() - } - return nil } @@ -369,20 +325,16 @@ func (jr *jReceiver) HandleThriftHTTPBatch(w http.ResponseWriter, r *http.Reques } func (jr *jReceiver) startCollector(ctx context.Context, host component.Host) error { - if jr.config == nil { - return nil - } - - if jr.config.HTTPServerConfig.Endpoint != "" { - cln, err := jr.config.HTTPServerConfig.ToListener(ctx) + if jr.config.ThriftHTTP != nil { + cln, err := jr.config.ThriftHTTP.ToListener(ctx) if err != nil { return fmt.Errorf("failed to bind to Collector address %q: %w", - jr.config.HTTPServerConfig.Endpoint, err) + jr.config.ThriftHTTP.Endpoint, err) } nr := mux.NewRouter() nr.HandleFunc("/api/traces", jr.HandleThriftHTTPBatch).Methods(http.MethodPost) - jr.collectorServer, err = jr.config.HTTPServerConfig.ToServer(ctx, host, jr.settings.TelemetrySettings, nr) + jr.collectorServer, err = jr.config.ThriftHTTP.ToServer(ctx, host, jr.settings.TelemetrySettings, nr) if err != nil { return err } @@ -391,21 +343,21 @@ func (jr *jReceiver) startCollector(ctx context.Context, host component.Host) er go func() { defer jr.goroutines.Done() if errHTTP := jr.collectorServer.Serve(cln); !errors.Is(errHTTP, http.ErrServerClosed) && errHTTP != nil { - jr.settings.ReportStatus(component.NewFatalErrorEvent(errHTTP)) + componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(errHTTP)) } }() } - if jr.config.GRPCServerConfig.NetAddr.Endpoint != "" { + if jr.config.GRPC != nil { var err error - jr.grpc, err = jr.config.GRPCServerConfig.ToServer(ctx, host, jr.settings.TelemetrySettings) + jr.grpc, err = jr.config.GRPC.ToServer(ctx, host, jr.settings.TelemetrySettings) if err != nil { return fmt.Errorf("failed to build the options for the Jaeger gRPC Collector: %w", err) } - ln, err := jr.config.GRPCServerConfig.NetAddr.Listen(ctx) + ln, err := jr.config.GRPC.NetAddr.Listen(ctx) if err != nil { - return fmt.Errorf("failed to bind to gRPC address %q: %w", jr.config.GRPCServerConfig.NetAddr, err) + return fmt.Errorf("failed to bind to gRPC address %q: %w", jr.config.GRPC.NetAddr, err) } api_v2.RegisterCollectorServiceServer(jr.grpc, jr) @@ -414,7 +366,7 @@ func (jr *jReceiver) startCollector(ctx context.Context, host component.Host) er go func() { defer jr.goroutines.Done() if errGrpc := jr.grpc.Serve(ln); !errors.Is(errGrpc, grpc.ErrServerStopped) && errGrpc != nil { - jr.settings.ReportStatus(component.NewFatalErrorEvent(errGrpc)) + componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(errGrpc)) } }() } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/README.md index 6432ee25658..b7b25c4eadb 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/README.md +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/README.md @@ -8,7 +8,7 @@ | Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Areceiver%2Fkafka%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Areceiver%2Fkafka) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Areceiver%2Fkafka%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Areceiver%2Fkafka) | | [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@pavolloffay](https://www.github.com/pavolloffay), [@MovieStoreGuy](https://www.github.com/MovieStoreGuy) | -[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta +[beta]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#beta [core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol [contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib @@ -27,9 +27,11 @@ The following settings can be optionally configured: - `brokers` (default = localhost:9092): The list of kafka brokers - `resolve_canonical_bootstrap_servers_only` (default = false): Whether to resolve then reverse-lookup broker IPs during startup -- `topic` (default = otlp_spans for traces, otlp_metrics for metrics, otlp_logs for logs): The name of the kafka topic to read from -- `encoding` (default = otlp_proto): The encoding of the payload received from kafka. Available encodings: +- `topic` (default = otlp_spans for traces, otlp_metrics for metrics, otlp_logs for logs): The name of the kafka topic to read from. + Only one telemetry type may be used for a given topic. +- `encoding` (default = otlp_proto): The encoding of the payload received from kafka. Supports encoding extensions. Tries to load an encoding extension and falls back to internal encodings if no extension was loaded. Available internal encodings: - `otlp_proto`: the payload is deserialized to `ExportTraceServiceRequest`, `ExportLogsServiceRequest` or `ExportMetricsServiceRequest` respectively. + - `otlp_json`: the payload is deserialized to `ExportTraceServiceRequest` `ExportLogsServiceRequest` or `ExportMetricsServiceRequest` respectively using JSON encoding. - `jaeger_proto`: the payload is deserialized to a single Jaeger proto `Span`. - `jaeger_json`: the payload is deserialized to a single Jaeger JSON Span using `jsonpb`. - `zipkin_proto`: the payload is deserialized into a list of Zipkin proto spans. @@ -42,6 +44,11 @@ The following settings can be optionally configured: - `group_id` (default = otel-collector): The consumer group that receiver will be consuming messages from - `client_id` (default = otel-collector): The consumer client ID that receiver will use - `initial_offset` (default = latest): The initial offset to use if no offset was previously committed. Must be `latest` or `earliest`. +- `session_timeout` (default = `10s`): The request timeout for detecting client failures when using Kafka’s group management facilities. +- `heartbeat_interval` (default = `3s`): The expected time between heartbeats to the consumer coordinator when using Kafka’s group management facilities. +- `min_fetch_size` (default = `1`): The minimum number of message bytes to fetch in a request, defaults to 1 byte. +- `default_fetch_size` (default = `1048576`): The default number of message bytes to fetch in a request, defaults to 1MB. +- `max_fetch_size` (default = `0`): The maximum number of message bytes to fetch in a request, defaults to unlimited. - `auth` - `plain_text` - `username`: The username to use. @@ -49,8 +56,8 @@ The following settings can be optionally configured: - `sasl` - `username`: The username to use. - `password`: The password to use - - `mechanism`: The sasl mechanism to use (SCRAM-SHA-256, SCRAM-SHA-512, AWS_MSK_IAM or PLAIN) - - `aws_msk.region`: AWS Region in case of AWS_MSK_IAM mechanism + - `mechanism`: The sasl mechanism to use (SCRAM-SHA-256, SCRAM-SHA-512, AWS_MSK_IAM, AWS_MSK_IAM_OAUTHBEARER or PLAIN) + - `aws_msk.region`: AWS Region in case of AWS_MSK_IAM or AWS_MSK_IAM_OAUTHBEARER mechanism - `aws_msk.broker_addr`: MSK Broker address in case of AWS_MSK_IAM mechanism - `tls` - `ca_file`: path to the CA cert. For a client this verifies the server certificate. Should @@ -71,6 +78,7 @@ The following settings can be optionally configured: - `password`: The Kerberos password used for authenticate with KDC - `config_file`: Path to Kerberos configuration. i.e /etc/krb5.conf - `keytab_file`: Path to keytab file. i.e /etc/security/kafka.keytab + - `disable_fast_negotiation`: Disable PA-FX-FAST negotiation (Pre-Authentication Framework - Fast). Some common Kerberos implementations do not support PA-FX-FAST negotiation. This is set to `false` by default. - `metadata` - `full` (default = true): Whether to maintain a full set of metadata. When disabled, the client does not make the initial request to broker at the diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/config.go index 2f2eff8e6d5..5b25c7c7c5f 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/config.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/config.go @@ -48,6 +48,10 @@ type Config struct { ResolveCanonicalBootstrapServersOnly bool `mapstructure:"resolve_canonical_bootstrap_servers_only"` // Kafka protocol version ProtocolVersion string `mapstructure:"protocol_version"` + // Session interval for the Kafka consumer + SessionTimeout time.Duration `mapstructure:"session_timeout"` + // Heartbeat interval for the Kafka consumer + HeartbeatInterval time.Duration `mapstructure:"heartbeat_interval"` // The name of the kafka topic to consume from (default "otlp_spans" for traces, "otlp_metrics" for metrics, "otlp_logs" for logs) Topic string `mapstructure:"topic"` // Encoding of the messages (default "otlp_proto") @@ -74,6 +78,13 @@ type Config struct { // Extract headers from kafka records HeaderExtraction HeaderExtraction `mapstructure:"header_extraction"` + + // The minimum bytes per fetch from Kafka (default "1") + MinFetchSize int32 `mapstructure:"min_fetch_size"` + // The default bytes per fetch from Kafka (default "1048576") + DefaultFetchSize int32 `mapstructure:"default_fetch_size"` + // The maximum bytes per fetch from Kafka (default "0", no limit) + MaxFetchSize int32 `mapstructure:"max_fetch_size"` } const ( diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/documentation.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/documentation.md new file mode 100644 index 00000000000..7f65b8ef691 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/documentation.md @@ -0,0 +1,71 @@ +[comment]: <> (Code generated by mdatagen. DO NOT EDIT.) + +# kafka + +## Internal Telemetry + +The following telemetry is emitted by this component. + +### otelcol_kafka_receiver_current_offset + +Current message offset + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +### otelcol_kafka_receiver_messages + +Number of received messages + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| 1 | Sum | Int | true | + +### otelcol_kafka_receiver_offset_lag + +Current offset lag + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +### otelcol_kafka_receiver_partition_close + +Number of finished partitions + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| 1 | Sum | Int | true | + +### otelcol_kafka_receiver_partition_start + +Number of started partitions + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| 1 | Sum | Int | true | + +### otelcol_kafka_receiver_unmarshal_failed_log_records + +Number of log records failed to be unmarshaled + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| 1 | Sum | Int | true | + +### otelcol_kafka_receiver_unmarshal_failed_metric_points + +Number of metric points failed to be unmarshaled + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| 1 | Sum | Int | true | + +### otelcol_kafka_receiver_unmarshal_failed_spans + +Number of spans failed to be unmarshaled + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| 1 | Sum | Int | true | diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/factory.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/factory.go index 7552f0c4653..be413ec507c 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/factory.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/factory.go @@ -5,11 +5,10 @@ package kafkareceiver // import "github.com/open-telemetry/opentelemetry-collect import ( "context" - "fmt" + "errors" "strings" "time" - "go.opencensus.io/stats/view" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/receiver" @@ -19,14 +18,16 @@ import ( ) const ( - defaultTracesTopic = "otlp_spans" - defaultMetricsTopic = "otlp_metrics" - defaultLogsTopic = "otlp_logs" - defaultEncoding = "otlp_proto" - defaultBroker = "localhost:9092" - defaultClientID = "otel-collector" - defaultGroupID = defaultClientID - defaultInitialOffset = offsetLatest + defaultTracesTopic = "otlp_spans" + defaultMetricsTopic = "otlp_metrics" + defaultLogsTopic = "otlp_logs" + defaultEncoding = "otlp_proto" + defaultBroker = "localhost:9092" + defaultClientID = "otel-collector" + defaultGroupID = defaultClientID + defaultInitialOffset = offsetLatest + defaultSessionTimeout = 10 * time.Second + defaultHeartbeatInterval = 3 * time.Second // default from sarama.NewConfig() defaultMetadataRetryMax = 3 @@ -39,49 +40,23 @@ const ( defaultAutoCommitEnable = true // default from sarama.NewConfig() defaultAutoCommitInterval = 1 * time.Second + + // default from sarama.NewConfig() + defaultMinFetchSize = int32(1) + // default from sarama.NewConfig() + defaultDefaultFetchSize = int32(1048576) + // default from sarama.NewConfig() + defaultMaxFetchSize = int32(0) ) -var errUnrecognizedEncoding = fmt.Errorf("unrecognized encoding") +var errUnrecognizedEncoding = errors.New("unrecognized encoding") // FactoryOption applies changes to kafkaExporterFactory. type FactoryOption func(factory *kafkaReceiverFactory) -// withTracesUnmarshalers adds Unmarshalers. -func withTracesUnmarshalers(tracesUnmarshalers ...TracesUnmarshaler) FactoryOption { - return func(factory *kafkaReceiverFactory) { - for _, unmarshaler := range tracesUnmarshalers { - factory.tracesUnmarshalers[unmarshaler.Encoding()] = unmarshaler - } - } -} - -// withMetricsUnmarshalers adds MetricsUnmarshalers. -func withMetricsUnmarshalers(metricsUnmarshalers ...MetricsUnmarshaler) FactoryOption { - return func(factory *kafkaReceiverFactory) { - for _, unmarshaler := range metricsUnmarshalers { - factory.metricsUnmarshalers[unmarshaler.Encoding()] = unmarshaler - } - } -} - -// withLogsUnmarshalers adds LogsUnmarshalers. -func withLogsUnmarshalers(logsUnmarshalers ...LogsUnmarshaler) FactoryOption { - return func(factory *kafkaReceiverFactory) { - for _, unmarshaler := range logsUnmarshalers { - factory.logsUnmarshalers[unmarshaler.Encoding()] = unmarshaler - } - } -} - // NewFactory creates Kafka receiver factory. func NewFactory(options ...FactoryOption) receiver.Factory { - _ = view.Register(metricViews()...) - - f := &kafkaReceiverFactory{ - tracesUnmarshalers: map[string]TracesUnmarshaler{}, - metricsUnmarshalers: map[string]MetricsUnmarshaler{}, - logsUnmarshalers: map[string]LogsUnmarshaler{}, - } + f := &kafkaReceiverFactory{} for _, o := range options { o(f) } @@ -96,11 +71,13 @@ func NewFactory(options ...FactoryOption) receiver.Factory { func createDefaultConfig() component.Config { return &Config{ - Encoding: defaultEncoding, - Brokers: []string{defaultBroker}, - ClientID: defaultClientID, - GroupID: defaultGroupID, - InitialOffset: defaultInitialOffset, + Encoding: defaultEncoding, + Brokers: []string{defaultBroker}, + ClientID: defaultClientID, + GroupID: defaultGroupID, + InitialOffset: defaultInitialOffset, + SessionTimeout: defaultSessionTimeout, + HeartbeatInterval: defaultHeartbeatInterval, Metadata: kafkaexporter.Metadata{ Full: defaultMetadataFull, Retry: kafkaexporter.MetadataRetry{ @@ -119,35 +96,26 @@ func createDefaultConfig() component.Config { HeaderExtraction: HeaderExtraction{ ExtractHeaders: false, }, + MinFetchSize: defaultMinFetchSize, + DefaultFetchSize: defaultDefaultFetchSize, + MaxFetchSize: defaultMaxFetchSize, } } -type kafkaReceiverFactory struct { - tracesUnmarshalers map[string]TracesUnmarshaler - metricsUnmarshalers map[string]MetricsUnmarshaler - logsUnmarshalers map[string]LogsUnmarshaler -} +type kafkaReceiverFactory struct{} func (f *kafkaReceiverFactory) createTracesReceiver( _ context.Context, - set receiver.CreateSettings, + set receiver.Settings, cfg component.Config, nextConsumer consumer.Traces, ) (receiver.Traces, error) { - for encoding, unmarshal := range defaultTracesUnmarshalers() { - f.tracesUnmarshalers[encoding] = unmarshal - } - oCfg := *(cfg.(*Config)) if oCfg.Topic == "" { oCfg.Topic = defaultTracesTopic } - unmarshaler := f.tracesUnmarshalers[oCfg.Encoding] - if unmarshaler == nil { - return nil, errUnrecognizedEncoding - } - r, err := newTracesReceiver(oCfg, set, unmarshaler, nextConsumer) + r, err := newTracesReceiver(oCfg, set, nextConsumer) if err != nil { return nil, err } @@ -156,24 +124,16 @@ func (f *kafkaReceiverFactory) createTracesReceiver( func (f *kafkaReceiverFactory) createMetricsReceiver( _ context.Context, - set receiver.CreateSettings, + set receiver.Settings, cfg component.Config, nextConsumer consumer.Metrics, ) (receiver.Metrics, error) { - for encoding, unmarshal := range defaultMetricsUnmarshalers() { - f.metricsUnmarshalers[encoding] = unmarshal - } - oCfg := *(cfg.(*Config)) if oCfg.Topic == "" { oCfg.Topic = defaultMetricsTopic } - unmarshaler := f.metricsUnmarshalers[oCfg.Encoding] - if unmarshaler == nil { - return nil, errUnrecognizedEncoding - } - r, err := newMetricsReceiver(oCfg, set, unmarshaler, nextConsumer) + r, err := newMetricsReceiver(oCfg, set, nextConsumer) if err != nil { return nil, err } @@ -182,24 +142,16 @@ func (f *kafkaReceiverFactory) createMetricsReceiver( func (f *kafkaReceiverFactory) createLogsReceiver( _ context.Context, - set receiver.CreateSettings, + set receiver.Settings, cfg component.Config, nextConsumer consumer.Logs, ) (receiver.Logs, error) { - for encoding, unmarshaler := range defaultLogsUnmarshalers(set.BuildInfo.Version, set.Logger) { - f.logsUnmarshalers[encoding] = unmarshaler - } - oCfg := *(cfg.(*Config)) if oCfg.Topic == "" { oCfg.Topic = defaultLogsTopic } - unmarshaler, err := getLogsUnmarshaler(oCfg.Encoding, f.logsUnmarshalers) - if err != nil { - return nil, err - } - r, err := newLogsReceiver(oCfg, set, unmarshaler, nextConsumer) + r, err := newLogsReceiver(oCfg, set, nextConsumer) if err != nil { return nil, err } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/header_extraction.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/header_extraction.go index 265c84fb33d..efae723c201 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/header_extraction.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/header_extraction.go @@ -4,8 +4,6 @@ package kafkareceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver" import ( - "fmt" - "github.com/IBM/sarama" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" @@ -14,7 +12,7 @@ import ( ) func getAttribute(key string) string { - return fmt.Sprintf("kafka.header.%s", key) + return "kafka.header." + key } type HeaderExtractor interface { diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/internal/metadata/generated_status.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/internal/metadata/generated_status.go index 39587e27617..2ed23395790 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/internal/metadata/generated_status.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/internal/metadata/generated_status.go @@ -7,7 +7,8 @@ import ( ) var ( - Type = component.MustNewType("kafka") + Type = component.MustNewType("kafka") + ScopeName = "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver" ) const ( diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/internal/metadata/generated_telemetry.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/internal/metadata/generated_telemetry.go index d60cbac9b5b..cc3fbe00e42 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/internal/metadata/generated_telemetry.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/internal/metadata/generated_telemetry.go @@ -3,15 +3,112 @@ package metadata import ( - "go.opentelemetry.io/collector/component" + "errors" + "go.opentelemetry.io/otel/metric" + noopmetric "go.opentelemetry.io/otel/metric/noop" "go.opentelemetry.io/otel/trace" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configtelemetry" ) func Meter(settings component.TelemetrySettings) metric.Meter { - return settings.MeterProvider.Meter("otelcol/kafkareceiver") + return settings.MeterProvider.Meter("github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver") } func Tracer(settings component.TelemetrySettings) trace.Tracer { - return settings.TracerProvider.Tracer("otelcol/kafkareceiver") + return settings.TracerProvider.Tracer("github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver") +} + +// TelemetryBuilder provides an interface for components to report telemetry +// as defined in metadata and user config. +type TelemetryBuilder struct { + meter metric.Meter + KafkaReceiverCurrentOffset metric.Int64Gauge + KafkaReceiverMessages metric.Int64Counter + KafkaReceiverOffsetLag metric.Int64Gauge + KafkaReceiverPartitionClose metric.Int64Counter + KafkaReceiverPartitionStart metric.Int64Counter + KafkaReceiverUnmarshalFailedLogRecords metric.Int64Counter + KafkaReceiverUnmarshalFailedMetricPoints metric.Int64Counter + KafkaReceiverUnmarshalFailedSpans metric.Int64Counter +} + +// TelemetryBuilderOption applies changes to default builder. +type TelemetryBuilderOption interface { + apply(*TelemetryBuilder) +} + +type telemetryBuilderOptionFunc func(mb *TelemetryBuilder) + +func (tbof telemetryBuilderOptionFunc) apply(mb *TelemetryBuilder) { + tbof(mb) +} + +// NewTelemetryBuilder provides a struct with methods to update all internal telemetry +// for a component +func NewTelemetryBuilder(settings component.TelemetrySettings, options ...TelemetryBuilderOption) (*TelemetryBuilder, error) { + builder := TelemetryBuilder{} + for _, op := range options { + op.apply(&builder) + } + builder.meter = Meter(settings) + var err, errs error + builder.KafkaReceiverCurrentOffset, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Gauge( + "otelcol_kafka_receiver_current_offset", + metric.WithDescription("Current message offset"), + metric.WithUnit("1"), + ) + errs = errors.Join(errs, err) + builder.KafkaReceiverMessages, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_kafka_receiver_messages", + metric.WithDescription("Number of received messages"), + metric.WithUnit("1"), + ) + errs = errors.Join(errs, err) + builder.KafkaReceiverOffsetLag, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Gauge( + "otelcol_kafka_receiver_offset_lag", + metric.WithDescription("Current offset lag"), + metric.WithUnit("1"), + ) + errs = errors.Join(errs, err) + builder.KafkaReceiverPartitionClose, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_kafka_receiver_partition_close", + metric.WithDescription("Number of finished partitions"), + metric.WithUnit("1"), + ) + errs = errors.Join(errs, err) + builder.KafkaReceiverPartitionStart, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_kafka_receiver_partition_start", + metric.WithDescription("Number of started partitions"), + metric.WithUnit("1"), + ) + errs = errors.Join(errs, err) + builder.KafkaReceiverUnmarshalFailedLogRecords, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_kafka_receiver_unmarshal_failed_log_records", + metric.WithDescription("Number of log records failed to be unmarshaled"), + metric.WithUnit("1"), + ) + errs = errors.Join(errs, err) + builder.KafkaReceiverUnmarshalFailedMetricPoints, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_kafka_receiver_unmarshal_failed_metric_points", + metric.WithDescription("Number of metric points failed to be unmarshaled"), + metric.WithUnit("1"), + ) + errs = errors.Join(errs, err) + builder.KafkaReceiverUnmarshalFailedSpans, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_kafka_receiver_unmarshal_failed_spans", + metric.WithDescription("Number of spans failed to be unmarshaled"), + metric.WithUnit("1"), + ) + errs = errors.Join(errs, err) + return &builder, errs +} + +func getLeveledMeter(meter metric.Meter, cfgLevel, srvLevel configtelemetry.Level) metric.Meter { + if cfgLevel <= srvLevel { + return meter + } + return noopmetric.Meter{} } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/jaeger_unmarshaler.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/jaeger_unmarshaler.go index 48fad72042c..ed001e9d11f 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/jaeger_unmarshaler.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/jaeger_unmarshaler.go @@ -13,8 +13,7 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger" ) -type jaegerProtoSpanUnmarshaler struct { -} +type jaegerProtoSpanUnmarshaler struct{} var _ TracesUnmarshaler = (*jaegerProtoSpanUnmarshaler)(nil) @@ -31,8 +30,7 @@ func (j jaegerProtoSpanUnmarshaler) Encoding() string { return "jaeger_proto" } -type jaegerJSONSpanUnmarshaler struct { -} +type jaegerJSONSpanUnmarshaler struct{} var _ TracesUnmarshaler = (*jaegerJSONSpanUnmarshaler)(nil) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/json_unmarshaler.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/json_unmarshaler.go index 9ff02d03a6e..33750457e71 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/json_unmarshaler.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/json_unmarshaler.go @@ -10,8 +10,7 @@ import ( "go.opentelemetry.io/collector/pdata/plog" ) -type jsonLogsUnmarshaler struct { -} +type jsonLogsUnmarshaler struct{} func newJSONLogsUnmarshaler() LogsUnmarshaler { return &jsonLogsUnmarshaler{} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/kafka_receiver.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/kafka_receiver.go index 2b5b2954918..1ec2d5aca6e 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/kafka_receiver.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/kafka_receiver.go @@ -11,22 +11,29 @@ import ( "sync" "github.com/IBM/sarama" - "go.opencensus.io/stats" - "go.opencensus.io/tag" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/receiver/receiverhelper" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/internal/metadata" ) const ( transport = "kafka" + // TODO: update the following attributes to reflect semconv + attrInstanceName = "name" + attrPartition = "partition" ) -var errInvalidInitialOffset = fmt.Errorf("invalid initial offset") +var errInvalidInitialOffset = errors.New("invalid initial offset") // kafkaTracesConsumer uses sarama to consume and handle messages from kafka. type kafkaTracesConsumer struct { @@ -36,13 +43,18 @@ type kafkaTracesConsumer struct { topics []string cancelConsumeLoop context.CancelFunc unmarshaler TracesUnmarshaler + consumeLoopWG *sync.WaitGroup - settings receiver.CreateSettings + settings receiver.Settings + telemetryBuilder *metadata.TelemetryBuilder autocommitEnabled bool messageMarking MessageMarking headerExtraction bool headers []string + minFetchSize int32 + defaultFetchSize int32 + maxFetchSize int32 } // kafkaMetricsConsumer uses sarama to consume and handle messages from kafka. @@ -53,13 +65,18 @@ type kafkaMetricsConsumer struct { topics []string cancelConsumeLoop context.CancelFunc unmarshaler MetricsUnmarshaler + consumeLoopWG *sync.WaitGroup - settings receiver.CreateSettings + settings receiver.Settings + telemetryBuilder *metadata.TelemetryBuilder autocommitEnabled bool messageMarking MessageMarking headerExtraction bool headers []string + minFetchSize int32 + defaultFetchSize int32 + maxFetchSize int32 } // kafkaLogsConsumer uses sarama to consume and handle messages from kafka. @@ -70,38 +87,50 @@ type kafkaLogsConsumer struct { topics []string cancelConsumeLoop context.CancelFunc unmarshaler LogsUnmarshaler + consumeLoopWG *sync.WaitGroup - settings receiver.CreateSettings + settings receiver.Settings + telemetryBuilder *metadata.TelemetryBuilder autocommitEnabled bool messageMarking MessageMarking headerExtraction bool headers []string + minFetchSize int32 + defaultFetchSize int32 + maxFetchSize int32 } -var _ receiver.Traces = (*kafkaTracesConsumer)(nil) -var _ receiver.Metrics = (*kafkaMetricsConsumer)(nil) -var _ receiver.Logs = (*kafkaLogsConsumer)(nil) +var ( + _ receiver.Traces = (*kafkaTracesConsumer)(nil) + _ receiver.Metrics = (*kafkaMetricsConsumer)(nil) + _ receiver.Logs = (*kafkaLogsConsumer)(nil) +) -func newTracesReceiver(config Config, set receiver.CreateSettings, unmarshaler TracesUnmarshaler, nextConsumer consumer.Traces) (*kafkaTracesConsumer, error) { - if unmarshaler == nil { - return nil, errUnrecognizedEncoding +func newTracesReceiver(config Config, set receiver.Settings, nextConsumer consumer.Traces) (*kafkaTracesConsumer, error) { + telemetryBuilder, err := metadata.NewTelemetryBuilder(set.TelemetrySettings) + if err != nil { + return nil, err } return &kafkaTracesConsumer{ config: config, topics: []string{config.Topic}, nextConsumer: nextConsumer, - unmarshaler: unmarshaler, + consumeLoopWG: &sync.WaitGroup{}, settings: set, autocommitEnabled: config.AutoCommit.Enable, messageMarking: config.MessageMarking, headerExtraction: config.HeaderExtraction.ExtractHeaders, headers: config.HeaderExtraction.Headers, + telemetryBuilder: telemetryBuilder, + minFetchSize: config.MinFetchSize, + defaultFetchSize: config.DefaultFetchSize, + maxFetchSize: config.MaxFetchSize, }, nil } -func createKafkaClient(config Config) (sarama.ConsumerGroup, error) { +func createKafkaClient(ctx context.Context, config Config) (sarama.ConsumerGroup, error) { saramaConfig := sarama.NewConfig() saramaConfig.ClientID = config.ClientID saramaConfig.Metadata.Full = config.Metadata.Full @@ -109,6 +138,12 @@ func createKafkaClient(config Config) (sarama.ConsumerGroup, error) { saramaConfig.Metadata.Retry.Backoff = config.Metadata.Retry.Backoff saramaConfig.Consumer.Offsets.AutoCommit.Enable = config.AutoCommit.Enable saramaConfig.Consumer.Offsets.AutoCommit.Interval = config.AutoCommit.Interval + saramaConfig.Consumer.Group.Session.Timeout = config.SessionTimeout + saramaConfig.Consumer.Group.Heartbeat.Interval = config.HeartbeatInterval + saramaConfig.Consumer.Fetch.Min = config.MinFetchSize + saramaConfig.Consumer.Fetch.Default = config.DefaultFetchSize + saramaConfig.Consumer.Fetch.Max = config.MaxFetchSize + var err error if saramaConfig.Consumer.Offsets.Initial, err = toSaramaInitialOffset(config.InitialOffset); err != nil { return nil, err @@ -121,13 +156,13 @@ func createKafkaClient(config Config) (sarama.ConsumerGroup, error) { return nil, err } } - if err := kafka.ConfigureAuthentication(config.Authentication, saramaConfig); err != nil { + if err := kafka.ConfigureAuthentication(ctx, config.Authentication, saramaConfig); err != nil { return nil, err } return sarama.NewConsumerGroup(config.Brokers, config.GroupID, saramaConfig) } -func (c *kafkaTracesConsumer) Start(_ context.Context, _ component.Host) error { +func (c *kafkaTracesConsumer) Start(_ context.Context, host component.Host) error { ctx, cancel := context.WithCancel(context.Background()) c.cancelConsumeLoop = cancel obsrecv, err := receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{ @@ -138,9 +173,25 @@ func (c *kafkaTracesConsumer) Start(_ context.Context, _ component.Host) error { if err != nil { return err } + // extensions take precedence over internal encodings + if unmarshaler, errExt := loadEncodingExtension[ptrace.Unmarshaler]( + host, + c.config.Encoding, + ); errExt == nil { + c.unmarshaler = &tracesEncodingUnmarshaler{ + unmarshaler: *unmarshaler, + encoding: c.config.Encoding, + } + } + if unmarshaler, ok := defaultTracesUnmarshalers()[c.config.Encoding]; c.unmarshaler == nil && ok { + c.unmarshaler = unmarshaler + } + if c.unmarshaler == nil { + return errUnrecognizedEncoding + } // consumerGroup may be set in tests to inject fake implementation. if c.consumerGroup == nil { - if c.consumerGroup, err = createKafkaClient(c.config); err != nil { + if c.consumerGroup, err = createKafkaClient(ctx, c.config); err != nil { return err } } @@ -153,6 +204,7 @@ func (c *kafkaTracesConsumer) Start(_ context.Context, _ component.Host) error { autocommitEnabled: c.autocommitEnabled, messageMarking: c.messageMarking, headerExtractor: &nopHeaderExtractor{}, + telemetryBuilder: c.telemetryBuilder, } if c.headerExtraction { consumerGroup.headerExtractor = &headerExtractor{ @@ -160,16 +212,14 @@ func (c *kafkaTracesConsumer) Start(_ context.Context, _ component.Host) error { headers: c.headers, } } - go func() { - if err := c.consumeLoop(ctx, consumerGroup); !errors.Is(err, context.Canceled) { - c.settings.ReportStatus(component.NewFatalErrorEvent(err)) - } - }() + c.consumeLoopWG.Add(1) + go c.consumeLoop(ctx, consumerGroup) <-consumerGroup.ready return nil } -func (c *kafkaTracesConsumer) consumeLoop(ctx context.Context, handler sarama.ConsumerGroupHandler) error { +func (c *kafkaTracesConsumer) consumeLoop(ctx context.Context, handler sarama.ConsumerGroupHandler) { + defer c.consumeLoopWG.Done() for { // `Consume` should be called inside an infinite loop, when a // server-side rebalance happens, the consumer session will need to be @@ -180,7 +230,7 @@ func (c *kafkaTracesConsumer) consumeLoop(ctx context.Context, handler sarama.Co // check if context was cancelled, signaling that the consumer should stop if ctx.Err() != nil { c.settings.Logger.Info("Consumer stopped", zap.Error(ctx.Err())) - return ctx.Err() + return } } } @@ -190,31 +240,37 @@ func (c *kafkaTracesConsumer) Shutdown(context.Context) error { return nil } c.cancelConsumeLoop() + c.consumeLoopWG.Wait() if c.consumerGroup == nil { return nil } return c.consumerGroup.Close() } -func newMetricsReceiver(config Config, set receiver.CreateSettings, unmarshaler MetricsUnmarshaler, nextConsumer consumer.Metrics) (*kafkaMetricsConsumer, error) { - if unmarshaler == nil { - return nil, errUnrecognizedEncoding +func newMetricsReceiver(config Config, set receiver.Settings, nextConsumer consumer.Metrics) (*kafkaMetricsConsumer, error) { + telemetryBuilder, err := metadata.NewTelemetryBuilder(set.TelemetrySettings) + if err != nil { + return nil, err } return &kafkaMetricsConsumer{ config: config, topics: []string{config.Topic}, nextConsumer: nextConsumer, - unmarshaler: unmarshaler, + consumeLoopWG: &sync.WaitGroup{}, settings: set, autocommitEnabled: config.AutoCommit.Enable, messageMarking: config.MessageMarking, headerExtraction: config.HeaderExtraction.ExtractHeaders, headers: config.HeaderExtraction.Headers, + telemetryBuilder: telemetryBuilder, + minFetchSize: config.MinFetchSize, + defaultFetchSize: config.DefaultFetchSize, + maxFetchSize: config.MaxFetchSize, }, nil } -func (c *kafkaMetricsConsumer) Start(_ context.Context, _ component.Host) error { +func (c *kafkaMetricsConsumer) Start(_ context.Context, host component.Host) error { ctx, cancel := context.WithCancel(context.Background()) c.cancelConsumeLoop = cancel obsrecv, err := receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{ @@ -225,9 +281,25 @@ func (c *kafkaMetricsConsumer) Start(_ context.Context, _ component.Host) error if err != nil { return err } + // extensions take precedence over internal encodings + if unmarshaler, errExt := loadEncodingExtension[pmetric.Unmarshaler]( + host, + c.config.Encoding, + ); errExt == nil { + c.unmarshaler = &metricsEncodingUnmarshaler{ + unmarshaler: *unmarshaler, + encoding: c.config.Encoding, + } + } + if unmarshaler, ok := defaultMetricsUnmarshalers()[c.config.Encoding]; c.unmarshaler == nil && ok { + c.unmarshaler = unmarshaler + } + if c.unmarshaler == nil { + return errUnrecognizedEncoding + } // consumerGroup may be set in tests to inject fake implementation. if c.consumerGroup == nil { - if c.consumerGroup, err = createKafkaClient(c.config); err != nil { + if c.consumerGroup, err = createKafkaClient(ctx, c.config); err != nil { return err } } @@ -240,6 +312,7 @@ func (c *kafkaMetricsConsumer) Start(_ context.Context, _ component.Host) error autocommitEnabled: c.autocommitEnabled, messageMarking: c.messageMarking, headerExtractor: &nopHeaderExtractor{}, + telemetryBuilder: c.telemetryBuilder, } if c.headerExtraction { metricsConsumerGroup.headerExtractor = &headerExtractor{ @@ -247,16 +320,14 @@ func (c *kafkaMetricsConsumer) Start(_ context.Context, _ component.Host) error headers: c.headers, } } - go func() { - if err := c.consumeLoop(ctx, metricsConsumerGroup); err != nil { - c.settings.ReportStatus(component.NewFatalErrorEvent(err)) - } - }() + c.consumeLoopWG.Add(1) + go c.consumeLoop(ctx, metricsConsumerGroup) <-metricsConsumerGroup.ready return nil } -func (c *kafkaMetricsConsumer) consumeLoop(ctx context.Context, handler sarama.ConsumerGroupHandler) error { +func (c *kafkaMetricsConsumer) consumeLoop(ctx context.Context, handler sarama.ConsumerGroupHandler) { + defer c.consumeLoopWG.Done() for { // `Consume` should be called inside an infinite loop, when a // server-side rebalance happens, the consumer session will need to be @@ -267,7 +338,7 @@ func (c *kafkaMetricsConsumer) consumeLoop(ctx context.Context, handler sarama.C // check if context was cancelled, signaling that the consumer should stop if ctx.Err() != nil { c.settings.Logger.Info("Consumer stopped", zap.Error(ctx.Err())) - return ctx.Err() + return } } } @@ -277,31 +348,37 @@ func (c *kafkaMetricsConsumer) Shutdown(context.Context) error { return nil } c.cancelConsumeLoop() + c.consumeLoopWG.Wait() if c.consumerGroup == nil { return nil } return c.consumerGroup.Close() } -func newLogsReceiver(config Config, set receiver.CreateSettings, unmarshaler LogsUnmarshaler, nextConsumer consumer.Logs) (*kafkaLogsConsumer, error) { - if unmarshaler == nil { - return nil, errUnrecognizedEncoding +func newLogsReceiver(config Config, set receiver.Settings, nextConsumer consumer.Logs) (*kafkaLogsConsumer, error) { + telemetryBuilder, err := metadata.NewTelemetryBuilder(set.TelemetrySettings) + if err != nil { + return nil, err } return &kafkaLogsConsumer{ config: config, topics: []string{config.Topic}, nextConsumer: nextConsumer, - unmarshaler: unmarshaler, + consumeLoopWG: &sync.WaitGroup{}, settings: set, autocommitEnabled: config.AutoCommit.Enable, messageMarking: config.MessageMarking, headerExtraction: config.HeaderExtraction.ExtractHeaders, headers: config.HeaderExtraction.Headers, + telemetryBuilder: telemetryBuilder, + minFetchSize: config.MinFetchSize, + defaultFetchSize: config.DefaultFetchSize, + maxFetchSize: config.MaxFetchSize, }, nil } -func (c *kafkaLogsConsumer) Start(_ context.Context, _ component.Host) error { +func (c *kafkaLogsConsumer) Start(_ context.Context, host component.Host) error { ctx, cancel := context.WithCancel(context.Background()) c.cancelConsumeLoop = cancel obsrecv, err := receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{ @@ -312,9 +389,28 @@ func (c *kafkaLogsConsumer) Start(_ context.Context, _ component.Host) error { if err != nil { return err } + // extensions take precedence over internal encodings + if unmarshaler, errExt := loadEncodingExtension[plog.Unmarshaler]( + host, + c.config.Encoding, + ); errExt == nil { + c.unmarshaler = &logsEncodingUnmarshaler{ + unmarshaler: *unmarshaler, + encoding: c.config.Encoding, + } + } + if unmarshaler, errInt := getLogsUnmarshaler( + c.config.Encoding, + defaultLogsUnmarshalers(c.settings.BuildInfo.Version, c.settings.Logger), + ); c.unmarshaler == nil && errInt == nil { + c.unmarshaler = unmarshaler + } + if c.unmarshaler == nil { + return errUnrecognizedEncoding + } // consumerGroup may be set in tests to inject fake implementation. if c.consumerGroup == nil { - if c.consumerGroup, err = createKafkaClient(c.config); err != nil { + if c.consumerGroup, err = createKafkaClient(ctx, c.config); err != nil { return err } } @@ -327,6 +423,7 @@ func (c *kafkaLogsConsumer) Start(_ context.Context, _ component.Host) error { autocommitEnabled: c.autocommitEnabled, messageMarking: c.messageMarking, headerExtractor: &nopHeaderExtractor{}, + telemetryBuilder: c.telemetryBuilder, } if c.headerExtraction { logsConsumerGroup.headerExtractor = &headerExtractor{ @@ -334,16 +431,14 @@ func (c *kafkaLogsConsumer) Start(_ context.Context, _ component.Host) error { headers: c.headers, } } - go func() { - if err := c.consumeLoop(ctx, logsConsumerGroup); err != nil { - c.settings.ReportStatus(component.NewFatalErrorEvent(err)) - } - }() + c.consumeLoopWG.Add(1) + go c.consumeLoop(ctx, logsConsumerGroup) <-logsConsumerGroup.ready return nil } -func (c *kafkaLogsConsumer) consumeLoop(ctx context.Context, handler sarama.ConsumerGroupHandler) error { +func (c *kafkaLogsConsumer) consumeLoop(ctx context.Context, handler sarama.ConsumerGroupHandler) { + defer c.consumeLoopWG.Done() for { // `Consume` should be called inside an infinite loop, when a // server-side rebalance happens, the consumer session will need to be @@ -354,7 +449,7 @@ func (c *kafkaLogsConsumer) consumeLoop(ctx context.Context, handler sarama.Cons // check if context was cancelled, signaling that the consumer should stop if ctx.Err() != nil { c.settings.Logger.Info("Consumer stopped", zap.Error(ctx.Err())) - return ctx.Err() + return } } } @@ -364,6 +459,7 @@ func (c *kafkaLogsConsumer) Shutdown(context.Context) error { return nil } c.cancelConsumeLoop() + c.consumeLoopWG.Wait() if c.consumerGroup == nil { return nil } @@ -379,7 +475,8 @@ type tracesConsumerGroupHandler struct { logger *zap.Logger - obsrecv *receiverhelper.ObsReport + obsrecv *receiverhelper.ObsReport + telemetryBuilder *metadata.TelemetryBuilder autocommitEnabled bool messageMarking MessageMarking @@ -395,7 +492,8 @@ type metricsConsumerGroupHandler struct { logger *zap.Logger - obsrecv *receiverhelper.ObsReport + obsrecv *receiverhelper.ObsReport + telemetryBuilder *metadata.TelemetryBuilder autocommitEnabled bool messageMarking MessageMarking @@ -411,29 +509,30 @@ type logsConsumerGroupHandler struct { logger *zap.Logger - obsrecv *receiverhelper.ObsReport + obsrecv *receiverhelper.ObsReport + telemetryBuilder *metadata.TelemetryBuilder autocommitEnabled bool messageMarking MessageMarking headerExtractor HeaderExtractor } -var _ sarama.ConsumerGroupHandler = (*tracesConsumerGroupHandler)(nil) -var _ sarama.ConsumerGroupHandler = (*metricsConsumerGroupHandler)(nil) -var _ sarama.ConsumerGroupHandler = (*logsConsumerGroupHandler)(nil) +var ( + _ sarama.ConsumerGroupHandler = (*tracesConsumerGroupHandler)(nil) + _ sarama.ConsumerGroupHandler = (*metricsConsumerGroupHandler)(nil) + _ sarama.ConsumerGroupHandler = (*logsConsumerGroupHandler)(nil) +) func (c *tracesConsumerGroupHandler) Setup(session sarama.ConsumerGroupSession) error { c.readyCloser.Do(func() { close(c.ready) }) - statsTags := []tag.Mutator{tag.Upsert(tagInstanceName, c.id.Name())} - _ = stats.RecordWithTags(session.Context(), statsTags, statPartitionStart.M(1)) + c.telemetryBuilder.KafkaReceiverPartitionStart.Add(session.Context(), 1, metric.WithAttributes(attribute.String(attrInstanceName, c.id.Name()))) return nil } func (c *tracesConsumerGroupHandler) Cleanup(session sarama.ConsumerGroupSession) error { - statsTags := []tag.Mutator{tag.Upsert(tagInstanceName, c.id.Name())} - _ = stats.RecordWithTags(session.Context(), statsTags, statPartitionClose.M(1)) + c.telemetryBuilder.KafkaReceiverPartitionClose.Add(session.Context(), 1, metric.WithAttributes(attribute.String(attrInstanceName, c.id.Name()))) return nil } @@ -457,22 +556,18 @@ func (c *tracesConsumerGroupHandler) ConsumeClaim(session sarama.ConsumerGroupSe } ctx := c.obsrecv.StartTracesOp(session.Context()) - statsTags := []tag.Mutator{ - tag.Upsert(tagInstanceName, c.id.String()), - tag.Upsert(tagPartition, strconv.Itoa(int(claim.Partition()))), - } - _ = stats.RecordWithTags(ctx, statsTags, - statMessageCount.M(1), - statMessageOffset.M(message.Offset), - statMessageOffsetLag.M(claim.HighWaterMarkOffset()-message.Offset-1)) + attrs := attribute.NewSet( + attribute.String(attrInstanceName, c.id.String()), + attribute.String(attrPartition, strconv.Itoa(int(claim.Partition()))), + ) + c.telemetryBuilder.KafkaReceiverMessages.Add(ctx, 1, metric.WithAttributeSet(attrs)) + c.telemetryBuilder.KafkaReceiverCurrentOffset.Record(ctx, message.Offset, metric.WithAttributeSet(attrs)) + c.telemetryBuilder.KafkaReceiverOffsetLag.Record(ctx, claim.HighWaterMarkOffset()-message.Offset-1, metric.WithAttributeSet(attrs)) traces, err := c.unmarshaler.Unmarshal(message.Value) if err != nil { c.logger.Error("failed to unmarshal message", zap.Error(err)) - _ = stats.RecordWithTags( - ctx, - []tag.Mutator{tag.Upsert(tagInstanceName, c.id.String())}, - statUnmarshalFailedSpans.M(1)) + c.telemetryBuilder.KafkaReceiverUnmarshalFailedSpans.Add(session.Context(), 1, metric.WithAttributes(attribute.String(attrInstanceName, c.id.String()))) if c.messageMarking.After && c.messageMarking.OnError { session.MarkMessage(message, "") } @@ -509,14 +604,12 @@ func (c *metricsConsumerGroupHandler) Setup(session sarama.ConsumerGroupSession) c.readyCloser.Do(func() { close(c.ready) }) - statsTags := []tag.Mutator{tag.Upsert(tagInstanceName, c.id.Name())} - _ = stats.RecordWithTags(session.Context(), statsTags, statPartitionStart.M(1)) + c.telemetryBuilder.KafkaReceiverPartitionStart.Add(session.Context(), 1, metric.WithAttributes(attribute.String(attrInstanceName, c.id.Name()))) return nil } func (c *metricsConsumerGroupHandler) Cleanup(session sarama.ConsumerGroupSession) error { - statsTags := []tag.Mutator{tag.Upsert(tagInstanceName, c.id.Name())} - _ = stats.RecordWithTags(session.Context(), statsTags, statPartitionClose.M(1)) + c.telemetryBuilder.KafkaReceiverPartitionClose.Add(session.Context(), 1, metric.WithAttributes(attribute.String(attrInstanceName, c.id.Name()))) return nil } @@ -540,22 +633,18 @@ func (c *metricsConsumerGroupHandler) ConsumeClaim(session sarama.ConsumerGroupS } ctx := c.obsrecv.StartMetricsOp(session.Context()) - statsTags := []tag.Mutator{ - tag.Upsert(tagInstanceName, c.id.String()), - tag.Upsert(tagPartition, strconv.Itoa(int(claim.Partition()))), - } - _ = stats.RecordWithTags(ctx, statsTags, - statMessageCount.M(1), - statMessageOffset.M(message.Offset), - statMessageOffsetLag.M(claim.HighWaterMarkOffset()-message.Offset-1)) + attrs := attribute.NewSet( + attribute.String(attrInstanceName, c.id.String()), + attribute.String(attrPartition, strconv.Itoa(int(claim.Partition()))), + ) + c.telemetryBuilder.KafkaReceiverMessages.Add(ctx, 1, metric.WithAttributeSet(attrs)) + c.telemetryBuilder.KafkaReceiverCurrentOffset.Record(ctx, message.Offset, metric.WithAttributeSet(attrs)) + c.telemetryBuilder.KafkaReceiverOffsetLag.Record(ctx, claim.HighWaterMarkOffset()-message.Offset-1, metric.WithAttributeSet(attrs)) metrics, err := c.unmarshaler.Unmarshal(message.Value) if err != nil { c.logger.Error("failed to unmarshal message", zap.Error(err)) - _ = stats.RecordWithTags( - ctx, - []tag.Mutator{tag.Upsert(tagInstanceName, c.id.String())}, - statUnmarshalFailedMetricPoints.M(1)) + c.telemetryBuilder.KafkaReceiverUnmarshalFailedMetricPoints.Add(session.Context(), 1, metric.WithAttributes(attribute.String(attrInstanceName, c.id.String()))) if c.messageMarking.After && c.messageMarking.OnError { session.MarkMessage(message, "") } @@ -592,18 +681,12 @@ func (c *logsConsumerGroupHandler) Setup(session sarama.ConsumerGroupSession) er c.readyCloser.Do(func() { close(c.ready) }) - _ = stats.RecordWithTags( - session.Context(), - []tag.Mutator{tag.Upsert(tagInstanceName, c.id.String())}, - statPartitionStart.M(1)) + c.telemetryBuilder.KafkaReceiverPartitionStart.Add(session.Context(), 1, metric.WithAttributes(attribute.String(attrInstanceName, c.id.String()))) return nil } func (c *logsConsumerGroupHandler) Cleanup(session sarama.ConsumerGroupSession) error { - _ = stats.RecordWithTags( - session.Context(), - []tag.Mutator{tag.Upsert(tagInstanceName, c.id.String())}, - statPartitionClose.M(1)) + c.telemetryBuilder.KafkaReceiverPartitionClose.Add(session.Context(), 1, metric.WithAttributes(attribute.String(attrInstanceName, c.id.String()))) return nil } @@ -627,24 +710,18 @@ func (c *logsConsumerGroupHandler) ConsumeClaim(session sarama.ConsumerGroupSess } ctx := c.obsrecv.StartLogsOp(session.Context()) - statsTags := []tag.Mutator{ - tag.Upsert(tagInstanceName, c.id.String()), - tag.Upsert(tagPartition, strconv.Itoa(int(claim.Partition()))), - } - _ = stats.RecordWithTags( - ctx, - statsTags, - statMessageCount.M(1), - statMessageOffset.M(message.Offset), - statMessageOffsetLag.M(claim.HighWaterMarkOffset()-message.Offset-1)) + attrs := attribute.NewSet( + attribute.String(attrInstanceName, c.id.String()), + attribute.String(attrPartition, strconv.Itoa(int(claim.Partition()))), + ) + c.telemetryBuilder.KafkaReceiverMessages.Add(ctx, 1, metric.WithAttributeSet(attrs)) + c.telemetryBuilder.KafkaReceiverCurrentOffset.Record(ctx, message.Offset, metric.WithAttributeSet(attrs)) + c.telemetryBuilder.KafkaReceiverOffsetLag.Record(ctx, claim.HighWaterMarkOffset()-message.Offset-1, metric.WithAttributeSet(attrs)) logs, err := c.unmarshaler.Unmarshal(message.Value) if err != nil { c.logger.Error("failed to unmarshal message", zap.Error(err)) - _ = stats.RecordWithTags( - ctx, - []tag.Mutator{tag.Upsert(tagInstanceName, c.id.String())}, - statUnmarshalFailedLogRecords.M(1)) + c.telemetryBuilder.KafkaReceiverUnmarshalFailedLogRecords.Add(ctx, 1, metric.WithAttributes(attribute.String(attrInstanceName, c.id.String()))) if c.messageMarking.After && c.messageMarking.OnError { session.MarkMessage(message, "") } @@ -688,3 +765,30 @@ func toSaramaInitialOffset(initialOffset string) (int64, error) { return 0, errInvalidInitialOffset } } + +// loadEncodingExtension tries to load an available extension for the given encoding. +func loadEncodingExtension[T any](host component.Host, encoding string) (*T, error) { + extensionID, err := encodingToComponentID(encoding) + if err != nil { + return nil, err + } + encodingExtension, ok := host.GetExtensions()[*extensionID] + if !ok { + return nil, fmt.Errorf("unknown encoding extension %q", encoding) + } + unmarshaler, ok := encodingExtension.(T) + if !ok { + return nil, fmt.Errorf("extension %q is not an unmarshaler", encoding) + } + return &unmarshaler, nil +} + +// encodingToComponentID converts an encoding string to a component ID using the given encoding as type. +func encodingToComponentID(encoding string) (*component.ID, error) { + componentType, err := component.NewType(encoding) + if err != nil { + return nil, fmt.Errorf("invalid component type: %w", err) + } + id := component.NewID(componentType) + return &id, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/metadata.yaml index 8ccc15c2814..8e02192b17c 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/metadata.yaml +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/metadata.yaml @@ -1,5 +1,4 @@ type: kafka -scope_name: otelcol/kafkareceiver status: class: receiver @@ -14,8 +13,60 @@ status: # TODO: Update the receiver to pass the tests tests: skip_lifecycle: true - goleak: - ignore: - top: - # See https://github.com/census-instrumentation/opencensus-go/issues/1191 for more information. - - "go.opencensus.io/stats/view.(*worker).start" + +telemetry: + metrics: + kafka_receiver_messages: + enabled: true + description: Number of received messages + unit: "1" + sum: + value_type: int + monotonic: true + kafka_receiver_current_offset: + enabled: true + description: Current message offset + unit: "1" + gauge: + value_type: int + kafka_receiver_offset_lag: + enabled: true + description: Current offset lag + unit: "1" + gauge: + value_type: int + kafka_receiver_partition_start: + enabled: true + description: Number of started partitions + unit: "1" + sum: + value_type: int + monotonic: true + kafka_receiver_partition_close: + enabled: true + description: Number of finished partitions + unit: "1" + sum: + value_type: int + monotonic: true + kafka_receiver_unmarshal_failed_metric_points: + enabled: true + description: Number of metric points failed to be unmarshaled + unit: "1" + sum: + value_type: int + monotonic: true + kafka_receiver_unmarshal_failed_log_records: + enabled: true + description: Number of log records failed to be unmarshaled + unit: "1" + sum: + value_type: int + monotonic: true + kafka_receiver_unmarshal_failed_spans: + enabled: true + description: Number of spans failed to be unmarshaled + unit: "1" + sum: + value_type: int + monotonic: true diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/metrics.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/metrics.go deleted file mode 100644 index c1dc6d3e7c3..00000000000 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/metrics.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package kafkareceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver" - -import ( - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" -) - -var ( - tagInstanceName, _ = tag.NewKey("name") - tagPartition, _ = tag.NewKey("partition") - - statMessageCount = stats.Int64("kafka_receiver_messages", "Number of received messages", stats.UnitDimensionless) - statMessageOffset = stats.Int64("kafka_receiver_current_offset", "Current message offset", stats.UnitDimensionless) - statMessageOffsetLag = stats.Int64("kafka_receiver_offset_lag", "Current offset lag", stats.UnitDimensionless) - - statPartitionStart = stats.Int64("kafka_receiver_partition_start", "Number of started partitions", stats.UnitDimensionless) - statPartitionClose = stats.Int64("kafka_receiver_partition_close", "Number of finished partitions", stats.UnitDimensionless) - - statUnmarshalFailedMetricPoints = stats.Int64("kafka_receiver_unmarshal_failed_metric_points", "Number of metric points failed to be unmarshaled", stats.UnitDimensionless) - statUnmarshalFailedLogRecords = stats.Int64("kafka_receiver_unmarshal_failed_log_records", "Number of log records failed to be unmarshaled", stats.UnitDimensionless) - statUnmarshalFailedSpans = stats.Int64("kafka_receiver_unmarshal_failed_spans", "Number of spans failed to be unmarshaled", stats.UnitDimensionless) -) - -// metricViews return metric views for Kafka receiver. -func metricViews() []*view.View { - partitionAgnosticTagKeys := []tag.Key{tagInstanceName} - partitionSpecificTagKeys := []tag.Key{tagInstanceName, tagPartition} - - countMessages := &view.View{ - Name: statMessageCount.Name(), - Measure: statMessageCount, - Description: statMessageCount.Description(), - TagKeys: partitionSpecificTagKeys, - Aggregation: view.Sum(), - } - - lastValueOffset := &view.View{ - Name: statMessageOffset.Name(), - Measure: statMessageOffset, - Description: statMessageOffset.Description(), - TagKeys: partitionSpecificTagKeys, - Aggregation: view.LastValue(), - } - - lastValueOffsetLag := &view.View{ - Name: statMessageOffsetLag.Name(), - Measure: statMessageOffsetLag, - Description: statMessageOffsetLag.Description(), - TagKeys: partitionSpecificTagKeys, - Aggregation: view.LastValue(), - } - - countPartitionStart := &view.View{ - Name: statPartitionStart.Name(), - Measure: statPartitionStart, - Description: statPartitionStart.Description(), - TagKeys: partitionAgnosticTagKeys, - Aggregation: view.Sum(), - } - - countPartitionClose := &view.View{ - Name: statPartitionClose.Name(), - Measure: statPartitionClose, - Description: statPartitionClose.Description(), - TagKeys: partitionAgnosticTagKeys, - Aggregation: view.Sum(), - } - - countUnmarshalFailedMetricPoints := &view.View{ - Name: statUnmarshalFailedMetricPoints.Name(), - Measure: statUnmarshalFailedMetricPoints, - Description: statUnmarshalFailedMetricPoints.Description(), - TagKeys: partitionAgnosticTagKeys, - Aggregation: view.Sum(), - } - - countUnmarshalFailedLogRecords := &view.View{ - Name: statUnmarshalFailedLogRecords.Name(), - Measure: statUnmarshalFailedLogRecords, - Description: statUnmarshalFailedLogRecords.Description(), - TagKeys: partitionAgnosticTagKeys, - Aggregation: view.Sum(), - } - - countUnmarshalFailedSpans := &view.View{ - Name: statUnmarshalFailedSpans.Name(), - Measure: statUnmarshalFailedSpans, - Description: statUnmarshalFailedSpans.Description(), - TagKeys: partitionAgnosticTagKeys, - Aggregation: view.Sum(), - } - - return []*view.View{ - countMessages, - lastValueOffset, - lastValueOffsetLag, - countPartitionStart, - countPartitionClose, - countUnmarshalFailedMetricPoints, - countUnmarshalFailedLogRecords, - countUnmarshalFailedSpans, - } -} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/unmarshaler.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/unmarshaler.go index bf44be7b496..793848d94c8 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/unmarshaler.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/unmarshaler.go @@ -17,7 +17,6 @@ import ( type TracesUnmarshaler interface { // Unmarshal deserializes the message body into traces. Unmarshal([]byte) (ptrace.Traces, error) - // Encoding of the serialized messages. Encoding() string } @@ -26,7 +25,6 @@ type TracesUnmarshaler interface { type MetricsUnmarshaler interface { // Unmarshal deserializes the message body into traces Unmarshal([]byte) (pmetric.Metrics, error) - // Encoding of the serialized messages Encoding() string } @@ -35,14 +33,12 @@ type MetricsUnmarshaler interface { type LogsUnmarshaler interface { // Unmarshal deserializes the message body into traces. Unmarshal([]byte) (plog.Logs, error) - // Encoding of the serialized messages. Encoding() string } type LogsUnmarshalerWithEnc interface { LogsUnmarshaler - // WithEnc sets the character encoding (UTF-8, GBK, etc.) of the unmarshaler. WithEnc(string) (LogsUnmarshalerWithEnc, error) } @@ -50,6 +46,7 @@ type LogsUnmarshalerWithEnc interface { // defaultTracesUnmarshalers returns map of supported encodings with TracesUnmarshaler. func defaultTracesUnmarshalers() map[string]TracesUnmarshaler { otlpPb := newPdataTracesUnmarshaler(&ptrace.ProtoUnmarshaler{}, defaultEncoding) + otlpJSON := newPdataTracesUnmarshaler(&ptrace.JSONUnmarshaler{}, "otlp_json") jaegerProto := jaegerProtoSpanUnmarshaler{} jaegerJSON := jaegerJSONSpanUnmarshaler{} zipkinProto := newPdataTracesUnmarshaler(zipkinv2.NewProtobufTracesUnmarshaler(false, false), "zipkin_proto") @@ -57,6 +54,7 @@ func defaultTracesUnmarshalers() map[string]TracesUnmarshaler { zipkinThrift := newPdataTracesUnmarshaler(zipkinv1.NewThriftTracesUnmarshaler(), "zipkin_thrift") return map[string]TracesUnmarshaler{ otlpPb.Encoding(): otlpPb, + otlpJSON.Encoding(): otlpJSON, jaegerProto.Encoding(): jaegerProto, jaegerJSON.Encoding(): jaegerJSON, zipkinProto.Encoding(): zipkinProto, @@ -67,22 +65,68 @@ func defaultTracesUnmarshalers() map[string]TracesUnmarshaler { func defaultMetricsUnmarshalers() map[string]MetricsUnmarshaler { otlpPb := newPdataMetricsUnmarshaler(&pmetric.ProtoUnmarshaler{}, defaultEncoding) + otlpJSON := newPdataMetricsUnmarshaler(&pmetric.JSONUnmarshaler{}, "otlp_json") return map[string]MetricsUnmarshaler{ - otlpPb.Encoding(): otlpPb, + otlpPb.Encoding(): otlpPb, + otlpJSON.Encoding(): otlpJSON, } } func defaultLogsUnmarshalers(version string, logger *zap.Logger) map[string]LogsUnmarshaler { azureResourceLogs := newAzureResourceLogsUnmarshaler(version, logger) otlpPb := newPdataLogsUnmarshaler(&plog.ProtoUnmarshaler{}, defaultEncoding) + otlpJSON := newPdataLogsUnmarshaler(&plog.JSONUnmarshaler{}, "otlp_json") raw := newRawLogsUnmarshaler() text := newTextLogsUnmarshaler() json := newJSONLogsUnmarshaler() return map[string]LogsUnmarshaler{ azureResourceLogs.Encoding(): azureResourceLogs, otlpPb.Encoding(): otlpPb, + otlpJSON.Encoding(): otlpJSON, raw.Encoding(): raw, text.Encoding(): text, json.Encoding(): json, } } + +// tracesEncodingUnmarshaler is a wrapper around ptrace.Unmarshaler that implements TracesUnmarshaler. +type tracesEncodingUnmarshaler struct { + unmarshaler ptrace.Unmarshaler + encoding string +} + +func (t *tracesEncodingUnmarshaler) Unmarshal(data []byte) (ptrace.Traces, error) { + return t.unmarshaler.UnmarshalTraces(data) +} + +func (t *tracesEncodingUnmarshaler) Encoding() string { + return t.encoding +} + +// metricsEncodingUnmarshaler is a wrapper around pmetric.Unmarshaler that implements MetricsUnmarshaler. +type metricsEncodingUnmarshaler struct { + unmarshaler pmetric.Unmarshaler + encoding string +} + +func (m *metricsEncodingUnmarshaler) Unmarshal(data []byte) (pmetric.Metrics, error) { + return m.unmarshaler.UnmarshalMetrics(data) +} + +func (m *metricsEncodingUnmarshaler) Encoding() string { + return m.encoding +} + +// logsEncodingUnmarshaler is a wrapper around plog.Unmarshaler that implements LogsUnmarshaler. +type logsEncodingUnmarshaler struct { + unmarshaler plog.Unmarshaler + encoding string +} + +func (l *logsEncodingUnmarshaler) Unmarshal(data []byte) (plog.Logs, error) { + return l.unmarshaler.UnmarshalLogs(data) +} + +func (l *logsEncodingUnmarshaler) Encoding() string { + return l.encoding +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/README.md index c53d206fb0c..65aba34c54e 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/README.md +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/README.md @@ -4,13 +4,14 @@ | Status | | | ------------- |-----------| | Stability | [beta]: metrics, traces | -| Distributions | [core], [contrib] | +| Distributions | [core], [contrib], [k8s] | | Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Areceiver%2Fopencensus%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Areceiver%2Fopencensus) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Areceiver%2Fopencensus%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Areceiver%2Fopencensus) | | [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@open-telemetry/collector-approvers](https://github.com/orgs/open-telemetry/teams/collector-approvers) | -[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta +[beta]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#beta [core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol [contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib +[k8s]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-k8s Receives data via gRPC or HTTP using [OpenCensus]( https://opencensus.io/) @@ -28,10 +29,9 @@ receivers: The following settings are configurable: -- `endpoint` (default = 0.0.0.0:55678): host:port to which the receiver is +- `endpoint` (default = localhost:55678): host:port to which the receiver is going to receive data. The valid syntax is described at - https://github.com/grpc/grpc/blob/master/doc/naming.md. The - `component.UseLocalHostAsDefaultHost` feature gate changes this to localhost:55678. This will become the default in a future release. + https://github.com/grpc/grpc/blob/master/doc/naming.md. See our [security best practices doc](https://opentelemetry.io/docs/security/config-best-practices/#protect-against-denial-of-service-attacks) to understand how to set the endpoint in different environments. ## Advanced Configuration diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/factory.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/factory.go index f45c84eeb47..be1633df208 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/factory.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/factory.go @@ -12,12 +12,11 @@ import ( "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/receiver" - "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/localhostgate" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/internal/metadata" ) -const grpcPort = 55678 +const grpcEndpoint = "localhost:55678" // NewFactory creates a new OpenCensus receiver factory. func NewFactory() receiver.Factory { @@ -32,7 +31,7 @@ func createDefaultConfig() component.Config { return &Config{ ServerConfig: configgrpc.ServerConfig{ NetAddr: confignet.AddrConfig{ - Endpoint: localhostgate.EndpointForPort(grpcPort), + Endpoint: grpcEndpoint, Transport: confignet.TransportTypeTCP, }, // We almost write 0 bytes, so no need to tune WriteBufferSize. @@ -43,7 +42,7 @@ func createDefaultConfig() component.Config { func createTracesReceiver( _ context.Context, - set receiver.CreateSettings, + set receiver.Settings, cfg component.Config, nextConsumer consumer.Traces, ) (receiver.Traces, error) { @@ -58,7 +57,7 @@ func createTracesReceiver( func createMetricsReceiver( _ context.Context, - set receiver.CreateSettings, + set receiver.Settings, cfg component.Config, nextConsumer consumer.Metrics, ) (receiver.Metrics, error) { @@ -73,6 +72,6 @@ func createMetricsReceiver( // This is the map of already created OpenCensus receivers for particular configurations. // We maintain this map because the Factory is asked trace and metric receivers separately -// when it gets CreateTracesReceiver() and CreateMetricsReceiver() but they must not +// when it gets CreateTraces() and CreateMetrics() but they must not // create separate objects, they must use one ocReceiver object per configuration. var receivers = sharedcomponent.NewSharedComponents() diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/internal/metadata/generated_status.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/internal/metadata/generated_status.go index 0e972cfaae3..931dac2ca32 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/internal/metadata/generated_status.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/internal/metadata/generated_status.go @@ -7,7 +7,8 @@ import ( ) var ( - Type = component.MustNewType("opencensus") + Type = component.MustNewType("opencensus") + ScopeName = "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver" ) const ( diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/internal/metadata/generated_telemetry.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/internal/metadata/generated_telemetry.go deleted file mode 100644 index 66aba2295e9..00000000000 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/internal/metadata/generated_telemetry.go +++ /dev/null @@ -1,17 +0,0 @@ -// Code generated by mdatagen. DO NOT EDIT. - -package metadata - -import ( - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/trace" -) - -func Meter(settings component.TelemetrySettings) metric.Meter { - return settings.MeterProvider.Meter("otelcol/opencensusreceiver") -} - -func Tracer(settings component.TelemetrySettings) trace.Tracer { - return settings.TracerProvider.Tracer("otelcol/opencensusreceiver") -} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/internal/ocmetrics/opencensus.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/internal/ocmetrics/opencensus.go index a99bf7b865d..a07bee4a797 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/internal/ocmetrics/opencensus.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/internal/ocmetrics/opencensus.go @@ -27,7 +27,7 @@ type Receiver struct { } // New creates a new ocmetrics.Receiver reference. -func New(nextConsumer consumer.Metrics, set receiver.CreateSettings) (*Receiver, error) { +func New(nextConsumer consumer.Metrics, set receiver.Settings) (*Receiver, error) { obsrecv, err := receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{ ReceiverID: set.ID, Transport: receiverTransport, diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/internal/octrace/opencensus.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/internal/octrace/opencensus.go index 8edf43e8628..9977d9fa670 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/internal/octrace/opencensus.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/internal/octrace/opencensus.go @@ -32,8 +32,7 @@ type Receiver struct { } // New creates a new opencensus.Receiver reference. -func New(nextConsumer consumer.Traces, set receiver.CreateSettings) (*Receiver, error) { - +func New(nextConsumer consumer.Traces, set receiver.Settings) (*Receiver, error) { obsrecv, err := receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{ ReceiverID: set.ID, Transport: receiverTransport, diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/metadata.yaml index 7d996ec7ce0..9971ee24253 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/metadata.yaml +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/metadata.yaml @@ -1,12 +1,9 @@ type: opencensus -scope_name: otelcol/opencensusreceiver status: class: receiver stability: beta: [metrics, traces] - distributions: - - core - - contrib + distributions: [core, contrib, k8s] codeowners: active: [open-telemetry/collector-approvers] \ No newline at end of file diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/opencensus.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/opencensus.go index fa37d0b6cf4..e5047c45cd9 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/opencensus.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/opencensus.go @@ -18,6 +18,7 @@ import ( "github.com/rs/cors" "github.com/soheilhy/cmux" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componentstatus" "go.opentelemetry.io/collector/config/configgrpc" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/receiver" @@ -47,7 +48,7 @@ type ocReceiver struct { stopWG sync.WaitGroup - settings receiver.CreateSettings + settings receiver.Settings multiplexer cmux.CMux } @@ -58,7 +59,7 @@ func newOpenCensusReceiver( cfg *Config, tc consumer.Traces, mc consumer.Metrics, - settings receiver.CreateSettings, + settings receiver.Settings, opts ...ocOption, ) *ocReceiver { ocr := &ocReceiver{ @@ -113,10 +114,12 @@ func (ocr *ocReceiver) Start(ctx context.Context, host component.Host) error { if !hasConsumer { return errors.New("cannot start receiver: no consumers were specified") } + ocr.ln, err = net.Listen(string(ocr.cfg.NetAddr.Transport), ocr.cfg.NetAddr.Endpoint) if err != nil { return fmt.Errorf("failed to bind to address %q: %w", ocr.cfg.NetAddr.Endpoint, err) } + // Register the grpc-gateway on the HTTP server mux var c context.Context c, ocr.cancel = context.WithCancel(context.Background()) @@ -143,20 +146,20 @@ func (ocr *ocReceiver) Start(ctx context.Context, host component.Host) error { defer ocr.stopWG.Done() startWG.Done() // Check for cmux.ErrServerClosed, because during the shutdown this is not properly close before closing the cmux, - if err := ocr.serverGRPC.Serve(grpcL); !errors.Is(err, grpc.ErrServerStopped) && !errors.Is(err, cmux.ErrServerClosed) && err != nil { - ocr.settings.TelemetrySettings.ReportStatus(component.NewFatalErrorEvent(err)) + if err := ocr.serverGRPC.Serve(grpcL); err != nil && !errors.Is(err, grpc.ErrServerStopped) && !errors.Is(err, cmux.ErrServerClosed) { + componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(err)) } }() go func() { startWG.Done() - if err := ocr.serverHTTP.Serve(httpL); !errors.Is(err, http.ErrServerClosed) && !errors.Is(err, cmux.ErrServerClosed) && err != nil { - ocr.settings.TelemetrySettings.ReportStatus(component.NewFatalErrorEvent(err)) + if err := ocr.serverHTTP.Serve(httpL); err != nil && !errors.Is(err, http.ErrServerClosed) && !errors.Is(err, cmux.ErrServerClosed) { + componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(err)) } }() go func() { startWG.Done() - if err := ocr.multiplexer.Serve(); !errors.Is(err, cmux.ErrServerClosed) && err != nil { - ocr.settings.TelemetrySettings.ReportStatus(component.NewFatalErrorEvent(err)) + if err := ocr.multiplexer.Serve(); err != nil && !errors.Is(err, net.ErrClosed) { + componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(err)) } }() @@ -186,7 +189,6 @@ func (ocr *ocReceiver) Start(ctx context.Context, host component.Host) error { // Shutdown is a method to turn off receiving. func (ocr *ocReceiver) Shutdown(context.Context) error { - if ocr.cancel != nil { ocr.cancel() } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/options.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/options.go index 3851fca059f..893eee4b676 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/options.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/options.go @@ -36,6 +36,7 @@ func withGRPCServerSettings(settings configgrpc.ServerConfig) ocOption { gsvOpts := grpcServerSettings(settings) return gsvOpts } + func (gsvo grpcServerSettings) withReceiver(ocr *ocReceiver) { ocr.grpcServerSettings = configgrpc.ServerConfig(gsvo) } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/README.md index 7cbd83afb62..0fc0939be84 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/README.md +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/README.md @@ -4,13 +4,14 @@ | Status | | | ------------- |-----------| | Stability | [beta]: traces | -| Distributions | [core], [contrib] | +| Distributions | [core], [contrib], [k8s] | | Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Areceiver%2Fzipkin%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Areceiver%2Fzipkin) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Areceiver%2Fzipkin%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Areceiver%2Fzipkin) | | [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@MovieStoreGuy](https://www.github.com/MovieStoreGuy), [@andrzej-stencel](https://www.github.com/andrzej-stencel), [@crobert-1](https://www.github.com/crobert-1) | -[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta +[beta]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#beta [core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol [contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib +[k8s]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-k8s This receiver receives spans from [Zipkin](https://zipkin.io/) (V1 and V2). @@ -27,7 +28,7 @@ receivers: The following settings are configurable: -- `endpoint` (default = 0.0.0.0:9411): host:port on which the receiver is going to receive data. The `component.UseLocalHostAsDefaultHost` feature gate changes this to localhost:9411. This will become the default in a future release. For full list of `ServerConfig` refer [here](https://github.com/open-telemetry/opentelemetry-collector/tree/main/config/confighttp). +- `endpoint` (default = localhost:9411): host:port on which the receiver is going to receive data.See our [security best practices doc](https://opentelemetry.io/docs/security/config-best-practices/#protect-against-denial-of-service-attacks) to understand how to set the endpoint in different environments. For full list of `ServerConfig` refer [here](https://github.com/open-telemetry/opentelemetry-collector/tree/main/config/confighttp). - `parse_string_tags` (default = false): if enabled, the receiver will attempt to parse string tags/binary annotations into int/bool/float. ## Advanced Configuration diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/factory.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/factory.go index 0675a1829e7..d4bdbf9342d 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/factory.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/factory.go @@ -11,15 +11,13 @@ import ( "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/receiver" - "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/localhostgate" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/internal/metadata" ) // This file implements factory for Zipkin receiver. const ( - defaultHTTPPort = 9411 - defaultBindEndpoint = "0.0.0.0:9411" + defaultHTTPEndpoint = "localhost:9411" ) // NewFactory creates a new Zipkin receiver factory @@ -35,7 +33,7 @@ func NewFactory() receiver.Factory { func createDefaultConfig() component.Config { return &Config{ ServerConfig: confighttp.ServerConfig{ - Endpoint: localhostgate.EndpointForPort(defaultHTTPPort), + Endpoint: defaultHTTPEndpoint, }, ParseStringTags: false, } @@ -44,7 +42,7 @@ func createDefaultConfig() component.Config { // createTracesReceiver creates a trace receiver based on provided config. func createTracesReceiver( _ context.Context, - set receiver.CreateSettings, + set receiver.Settings, cfg component.Config, nextConsumer consumer.Traces, ) (receiver.Traces, error) { diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/internal/metadata/generated_status.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/internal/metadata/generated_status.go index 9c236a37f60..d7bffd7a8f8 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/internal/metadata/generated_status.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/internal/metadata/generated_status.go @@ -7,7 +7,8 @@ import ( ) var ( - Type = component.MustNewType("zipkin") + Type = component.MustNewType("zipkin") + ScopeName = "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver" ) const ( diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/internal/metadata/generated_telemetry.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/internal/metadata/generated_telemetry.go deleted file mode 100644 index e3dd94b3129..00000000000 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/internal/metadata/generated_telemetry.go +++ /dev/null @@ -1,17 +0,0 @@ -// Code generated by mdatagen. DO NOT EDIT. - -package metadata - -import ( - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/trace" -) - -func Meter(settings component.TelemetrySettings) metric.Meter { - return settings.MeterProvider.Meter("otelcol/zipkinreceiver") -} - -func Tracer(settings component.TelemetrySettings) trace.Tracer { - return settings.TracerProvider.Tracer("otelcol/zipkinreceiver") -} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/metadata.yaml index 2ebc6c5df30..779d41098bb 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/metadata.yaml +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/metadata.yaml @@ -1,12 +1,9 @@ type: zipkin -scope_name: otelcol/zipkinreceiver status: class: receiver stability: beta: [traces] - distributions: - - core - - contrib + distributions: [core, contrib, k8s] codeowners: active: [MovieStoreGuy, andrzej-stencel, crobert-1] diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/trace_receiver.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/trace_receiver.go index 9a5a897a70f..c3f5ceffc54 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/trace_receiver.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/trace_receiver.go @@ -15,6 +15,7 @@ import ( "sync" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componentstatus" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/pdata/ptrace" @@ -32,8 +33,10 @@ const ( receiverTransportV2PROTO = "http_v2_proto" ) -var errNextConsumerRespBody = []byte(`"Internal Server Error"`) -var errBadRequestRespBody = []byte(`"Bad Request"`) +var ( + errNextConsumerRespBody = []byte(`"Internal Server Error"`) + errBadRequestRespBody = []byte(`"Bad Request"`) +) // zipkinReceiver type is used to handle spans received in the Zipkin format. type zipkinReceiver struct { @@ -49,14 +52,14 @@ type zipkinReceiver struct { protobufUnmarshaler ptrace.Unmarshaler protobufDebugUnmarshaler ptrace.Unmarshaler - settings receiver.CreateSettings + settings receiver.Settings obsrecvrs map[string]*receiverhelper.ObsReport } var _ http.Handler = (*zipkinReceiver)(nil) // newReceiver creates a new zipkinReceiver reference. -func newReceiver(config *Config, nextConsumer consumer.Traces, settings receiver.CreateSettings) (*zipkinReceiver, error) { +func newReceiver(config *Config, nextConsumer consumer.Traces, settings receiver.Settings) (*zipkinReceiver, error) { transports := []string{receiverTransportV1Thrift, receiverTransportV1JSON, receiverTransportV2JSON, receiverTransportV2PROTO} obsrecvrs := make(map[string]*receiverhelper.ObsReport) for _, transport := range transports { @@ -107,7 +110,7 @@ func (zr *zipkinReceiver) Start(ctx context.Context, host component.Host) error defer zr.shutdownWG.Done() if errHTTP := zr.server.Serve(listener); !errors.Is(errHTTP, http.ErrServerClosed) && errHTTP != nil { - zr.settings.TelemetrySettings.ReportStatus(component.NewFatalErrorEvent(errHTTP)) + componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(errHTTP)) } }() @@ -253,7 +256,6 @@ func (zr *zipkinReceiver) ServeHTTP(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusInternalServerError) _, _ = w.Write(errNextConsumerRespBody) } - } func transportType(r *http.Request, asZipkinv1 bool) string { diff --git a/vendor/github.com/opentracing-contrib/go-grpc/.editorconfig b/vendor/github.com/opentracing-contrib/go-grpc/.editorconfig new file mode 100644 index 00000000000..302cfc42778 --- /dev/null +++ b/vendor/github.com/opentracing-contrib/go-grpc/.editorconfig @@ -0,0 +1,13 @@ +root = true + +[*] +charset = utf-8 +end_of_line = lf +insert_final_newline = true +trim_trailing_whitespace = true +indent_size = 4 +indent_style = tab + +[*.{md,yml,yaml}] +indent_size = 2 +indent_style = space diff --git a/vendor/github.com/opentracing-contrib/go-grpc/.golangci.yml b/vendor/github.com/opentracing-contrib/go-grpc/.golangci.yml new file mode 100644 index 00000000000..30c2b21b02b --- /dev/null +++ b/vendor/github.com/opentracing-contrib/go-grpc/.golangci.yml @@ -0,0 +1,91 @@ +linters-settings: + misspell: + locale: US + revive: + ignore-generated-header: true + rules: + - name: blank-imports + - name: context-as-argument + - name: context-keys-type + - name: dot-imports + - name: empty-block + - name: error-naming + - name: error-return + - name: error-strings + - name: errorf + - name: exported + - name: increment-decrement + - name: indent-error-flow + - name: package-comments + - name: range + - name: receiver-naming + - name: redefines-builtin-id + - name: superfluous-else + - name: time-naming + - name: unexported-return + - name: unreachable-code + - name: unused-parameter + - name: var-declaration + - name: var-naming + govet: + enable-all: true +linters: + enable: + - asasalint + - asciicheck + - bidichk + # - contextcheck + - copyloopvar + - dupword + - durationcheck + - errcheck + - errchkjson + - errname + - errorlint + - fatcontext + - forcetypeassert + - gocheckcompilerdirectives + - gochecksumtype + - gocritic + - godot + - gofmt + - gofumpt + - goimports + - gosec + - gosimple + - gosmopolitan + - govet + - ineffassign + - intrange + - makezero + - misspell + - musttag + - nilerr + - noctx + - nolintlint + - paralleltest + - perfsprint + - prealloc + - predeclared + - reassign + - revive + - staticcheck + - stylecheck + - tagalign + - tenv + - thelper + - tparallel + - typecheck + - unconvert + - unparam + - unused + - usestdlibvars + - wastedassign + - whitespace + # - wrapcheck + disable-all: true +issues: + max-issues-per-linter: 0 + max-same-issues: 0 +run: + timeout: 5m diff --git a/vendor/github.com/opentracing-contrib/go-grpc/README.md b/vendor/github.com/opentracing-contrib/go-grpc/README.md index e6b10658081..b6981f1f9b3 100644 --- a/vendor/github.com/opentracing-contrib/go-grpc/README.md +++ b/vendor/github.com/opentracing-contrib/go-grpc/README.md @@ -1,11 +1,16 @@ # OpenTracing support for gRPC in Go +[![CI](https://github.com/opentracing-contrib/go-grpc/actions/workflows/ci.yml/badge.svg)](https://github.com/opentracing-contrib/go-grpc/actions/workflows/ci.yml) +[![Go Report Card](https://goreportcard.com/badge/github.com/opentracing-contrib/go-grpc)](https://goreportcard.com/report/github.com/opentracing-contrib/go-grpc) +![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/opentracing-contrib/go-grpc) +[![GitHub release (latest SemVer)](https://img.shields.io/github/v/release/opentracing-contrib/go-grpc?logo=github&sort=semver)](https://github.com/opentracing-contrib/go-grpc/releases/latest) + The `otgrpc` package makes it easy to add OpenTracing support to gRPC-based systems in Go. ## Installation -``` +```shell go get github.com/opentracing-contrib/go-grpc ``` @@ -54,4 +59,3 @@ s := grpc.NewServer( // All future RPC activity involving `s` will be automatically traced. ``` - diff --git a/vendor/github.com/opentracing-contrib/go-grpc/client.go b/vendor/github.com/opentracing-contrib/go-grpc/client.go index c9496c85aff..2f7cdf7611e 100644 --- a/vendor/github.com/opentracing-contrib/go-grpc/client.go +++ b/vendor/github.com/opentracing-contrib/go-grpc/client.go @@ -1,12 +1,13 @@ package otgrpc import ( + "context" + "errors" "io" "runtime" "sync/atomic" opentracing "github.com/opentracing/opentracing-go" - "context" "github.com/opentracing/opentracing-go/ext" "github.com/opentracing/opentracing-go/log" "google.golang.org/grpc" @@ -18,10 +19,10 @@ import ( // // For example: // -// conn, err := grpc.Dial( -// address, -// ..., // (existing DialOptions) -// grpc.WithUnaryInterceptor(otgrpc.OpenTracingClientInterceptor(tracer))) +// conn, err := grpc.Dial( +// address, +// ..., // (existing DialOptions) +// grpc.WithUnaryInterceptor(otgrpc.OpenTracingClientInterceptor(tracer))) // // All gRPC client spans will inject the OpenTracing SpanContext into the gRPC // metadata; they will also look in the context.Context for an active @@ -80,10 +81,10 @@ func OpenTracingClientInterceptor(tracer opentracing.Tracer, optFuncs ...Option) // // For example: // -// conn, err := grpc.Dial( -// address, -// ..., // (existing DialOptions) -// grpc.WithStreamInterceptor(otgrpc.OpenTracingStreamClientInterceptor(tracer))) +// conn, err := grpc.Dial( +// address, +// ..., // (existing DialOptions) +// grpc.WithStreamInterceptor(otgrpc.OpenTracingStreamClientInterceptor(tracer))) // // All gRPC client spans will inject the OpenTracing SpanContext into the gRPC // metadata; they will also look in the context.Context for an active @@ -206,7 +207,7 @@ func (cs *openTracingClientStream) SendMsg(m interface{}) error { func (cs *openTracingClientStream) RecvMsg(m interface{}) error { err := cs.ClientStream.RecvMsg(m) - if err == io.EOF { + if errors.Is(err, io.EOF) { cs.finishFunc(nil) return err } else if err != nil { diff --git a/vendor/github.com/opentracing-contrib/go-grpc/errors.go b/vendor/github.com/opentracing-contrib/go-grpc/errors.go index 2f202af726b..b8212b2ace2 100644 --- a/vendor/github.com/opentracing-contrib/go-grpc/errors.go +++ b/vendor/github.com/opentracing-contrib/go-grpc/errors.go @@ -21,7 +21,7 @@ const ( ServerError Class = "5xx" ) -// ErrorClass returns the class of the given error +// ErrorClass returns the class of the given error. func ErrorClass(err error) Class { if s, ok := status.FromError(err); ok { switch s.Code() { diff --git a/vendor/github.com/opentracing-contrib/go-grpc/options.go b/vendor/github.com/opentracing-contrib/go-grpc/options.go index 5fac41e7611..1fdbe24a811 100644 --- a/vendor/github.com/opentracing-contrib/go-grpc/options.go +++ b/vendor/github.com/opentracing-contrib/go-grpc/options.go @@ -32,7 +32,7 @@ type SpanInclusionFunc func( method string, req, resp interface{}) bool -// IncludingSpans binds a IncludeSpanFunc to the options +// IncludingSpans binds a IncludeSpanFunc to the options. func IncludingSpans(inclusionFunc SpanInclusionFunc) Option { return func(o *options) { o.inclusionFunc = inclusionFunc @@ -60,10 +60,9 @@ func SpanDecorator(decorator SpanDecoratorFunc) Option { // scale well as production use dictates other configuration and tuning // parameters. type options struct { - logPayloads bool - decorator SpanDecoratorFunc - // May be nil. + decorator SpanDecoratorFunc inclusionFunc SpanInclusionFunc + logPayloads bool } // newOptions returns the default options. diff --git a/vendor/github.com/opentracing-contrib/go-grpc/renovate.json b/vendor/github.com/opentracing-contrib/go-grpc/renovate.json new file mode 100644 index 00000000000..321957144b5 --- /dev/null +++ b/vendor/github.com/opentracing-contrib/go-grpc/renovate.json @@ -0,0 +1,7 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "extends": [ + "github>opentracing-contrib/common", + "schedule:daily" + ] +} diff --git a/vendor/github.com/opentracing-contrib/go-grpc/server.go b/vendor/github.com/opentracing-contrib/go-grpc/server.go index c79d511bc50..71f7e1a8419 100644 --- a/vendor/github.com/opentracing-contrib/go-grpc/server.go +++ b/vendor/github.com/opentracing-contrib/go-grpc/server.go @@ -1,8 +1,9 @@ package otgrpc import ( - opentracing "github.com/opentracing/opentracing-go" "context" + + opentracing "github.com/opentracing/opentracing-go" "github.com/opentracing/opentracing-go/ext" "github.com/opentracing/opentracing-go/log" "google.golang.org/grpc" @@ -14,9 +15,9 @@ import ( // // For example: // -// s := grpc.NewServer( -// ..., // (existing ServerOptions) -// grpc.UnaryInterceptor(otgrpc.OpenTracingServerInterceptor(tracer))) +// s := grpc.NewServer( +// ..., // (existing ServerOptions) +// grpc.UnaryInterceptor(otgrpc.OpenTracingServerInterceptor(tracer))) // // All gRPC server spans will look for an OpenTracing SpanContext in the gRPC // metadata; if found, the server span will act as the ChildOf that RPC @@ -33,12 +34,12 @@ func OpenTracingServerInterceptor(tracer opentracing.Tracer, optFuncs ...Option) info *grpc.UnaryServerInfo, handler grpc.UnaryHandler, ) (resp interface{}, err error) { - spanContext, err := extractSpanContext(ctx, tracer) - if err != nil && err != opentracing.ErrSpanContextNotFound { - // TODO: establish some sort of error reporting mechanism here. We - // don't know where to put such an error and must rely on Tracer - // implementations to do something appropriate for the time being. - } + spanContext, _ := extractSpanContext(ctx, tracer) + // if err != nil && !errors.Is(err, opentracing.ErrSpanContextNotFound) { + // // TODO: establish some sort of error reporting mechanism here. We + // // don't know where to put such an error and must rely on Tracer + // // implementations to do something appropriate for the time being. + // } if otgrpcOpts.inclusionFunc != nil && !otgrpcOpts.inclusionFunc(spanContext, info.FullMethod, req, nil) { return handler(ctx, req) @@ -76,9 +77,9 @@ func OpenTracingServerInterceptor(tracer opentracing.Tracer, optFuncs ...Option) // // For example: // -// s := grpc.NewServer( -// ..., // (existing ServerOptions) -// grpc.StreamInterceptor(otgrpc.OpenTracingStreamServerInterceptor(tracer))) +// s := grpc.NewServer( +// ..., // (existing ServerOptions) +// grpc.StreamInterceptor(otgrpc.OpenTracingStreamServerInterceptor(tracer))) // // All gRPC server spans will look for an OpenTracing SpanContext in the gRPC // metadata; if found, the server span will act as the ChildOf that RPC @@ -90,12 +91,12 @@ func OpenTracingStreamServerInterceptor(tracer opentracing.Tracer, optFuncs ...O otgrpcOpts := newOptions() otgrpcOpts.apply(optFuncs...) return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - spanContext, err := extractSpanContext(ss.Context(), tracer) - if err != nil && err != opentracing.ErrSpanContextNotFound { - // TODO: establish some sort of error reporting mechanism here. We - // don't know where to put such an error and must rely on Tracer - // implementations to do something appropriate for the time being. - } + spanContext, _ := extractSpanContext(ss.Context(), tracer) + // if err != nil && !errors.Is(err, opentracing.ErrSpanContextNotFound) { + // // TODO: establish some sort of error reporting mechanism here. We + // // don't know where to put such an error and must rely on Tracer + // // implementations to do something appropriate for the time being. + // } if otgrpcOpts.inclusionFunc != nil && !otgrpcOpts.inclusionFunc(spanContext, info.FullMethod, nil, nil) { return handler(srv, ss) @@ -111,7 +112,7 @@ func OpenTracingStreamServerInterceptor(tracer opentracing.Tracer, optFuncs ...O ServerStream: ss, ctx: opentracing.ContextWithSpan(ss.Context(), serverSpan), } - err = handler(srv, ss) + err := handler(srv, ss) if err != nil { SetSpanTags(serverSpan, err, false) serverSpan.LogFields(log.String("event", "error"), log.String("message", err.Error())) diff --git a/vendor/github.com/opentracing-contrib/go-grpc/shared.go b/vendor/github.com/opentracing-contrib/go-grpc/shared.go index 9abd5eaa629..88f9cd13e6e 100644 --- a/vendor/github.com/opentracing-contrib/go-grpc/shared.go +++ b/vendor/github.com/opentracing-contrib/go-grpc/shared.go @@ -8,10 +8,8 @@ import ( "google.golang.org/grpc/metadata" ) -var ( - // Morally a const: - gRPCComponentTag = opentracing.Tag{string(ext.Component), "gRPC"} -) +// Morally a const:. +var gRPCComponentTag = opentracing.Tag{Key: string(ext.Component), Value: "gRPC"} // metadataReaderWriter satisfies both the opentracing.TextMapReader and // opentracing.TextMapWriter interfaces. diff --git a/vendor/github.com/parquet-go/parquet-go/README.md b/vendor/github.com/parquet-go/parquet-go/README.md index f0f0d8f5cf1..94bbdefad5b 100644 --- a/vendor/github.com/parquet-go/parquet-go/README.md +++ b/vendor/github.com/parquet-go/parquet-go/README.md @@ -1,10 +1,13 @@ -# parquet-go/parquet-go [![build status](https://github.com/parquet-go/parquet-go/actions/workflows/test.yml/badge.svg?branch=main)](https://github.com/parquet-go/parquet-go/actions) [![Go Report Card](https://goreportcard.com/badge/github.com/parquet-go/parquet-go)](https://goreportcard.com/report/github.com/parquet-go/parquet-go) [![Go Reference](https://pkg.go.dev/badge/github.com/parquet-go/parquet-go.svg)](https://pkg.go.dev/github.com/parquet-go/parquet-go) - -High-performance Go library to manipulate parquet files, initially developed at -[Twilio Segment](https://segment.com/engineering). - -![parquet-go-logo](https://github.com/parquet-go/parquet-go/assets/96151026/5b1f043b-2cee-4a64-a3c3-40d3353fecc0) - +
+ +
+

parquet-go/parquet-go

+

+High-performance Go library to manipulate parquet files, initially developed at +Twilio Segment. +

+ +
## Motivation @@ -35,7 +38,7 @@ using JSON or Protobuf. For more information, refer to the [Parquet Format Speci The package is distributed as a standard Go module that programs can take a dependency on and install with the following command: -``` +```bash go get github.com/parquet-go/parquet-go ``` @@ -289,11 +292,11 @@ defined by the sorting columns of the groups. There are a few constraints when merging row groups: -* The sorting columns of all the row groups must be the same, or the merge +- The sorting columns of all the row groups must be the same, or the merge operation must be explicitly configured a set of sorting columns which are a prefix of the sorting columns of all merged row groups. -* The schemas of row groups must all be equal, or the merge operation must +- The schemas of row groups must all be equal, or the merge operation must be explicitly configured with a schema that all row groups can be converted to, in which case the limitations of schema conversions apply. diff --git a/vendor/github.com/parquet-go/parquet-go/bloom.go b/vendor/github.com/parquet-go/parquet-go/bloom.go index 69d54c71712..30c64b84861 100644 --- a/vendor/github.com/parquet-go/parquet-go/bloom.go +++ b/vendor/github.com/parquet-go/parquet-go/bloom.go @@ -1,7 +1,6 @@ package parquet import ( - "encoding/binary" "io" "github.com/parquet-go/parquet-go/bloom" @@ -10,7 +9,6 @@ import ( "github.com/parquet-go/parquet-go/encoding" "github.com/parquet-go/parquet-go/format" "github.com/parquet-go/parquet-go/internal/unsafecast" - "golang.org/x/sys/cpu" ) // BloomFilter is an interface allowing applications to test whether a key @@ -174,18 +172,7 @@ func (splitBlockEncoding) EncodeInt64(dst []byte, src []int64) ([]byte, error) { } func (e splitBlockEncoding) EncodeInt96(dst []byte, src []deprecated.Int96) ([]byte, error) { - if cpu.IsBigEndian { - srcLen := len(src) - buf := make([]byte, srcLen*12) - for idx := range srcLen { - binary.LittleEndian.PutUint32(buf[(idx*12):4+(idx*12)], uint32(src[idx][0])) - binary.LittleEndian.PutUint32(buf[4+(idx*12):8+(idx*12)], uint32(src[idx][1])) - binary.LittleEndian.PutUint32(buf[8+(idx*12):12+(idx*12)], uint32(src[idx][2])) - } - splitBlockEncodeFixedLenByteArray(bloom.MakeSplitBlockFilter(dst), buf, 12) - } else { - splitBlockEncodeFixedLenByteArray(bloom.MakeSplitBlockFilter(dst), unsafecast.Slice[byte](src), 12) - } + splitBlockEncodeFixedLenByteArray(bloom.MakeSplitBlockFilter(dst), unsafecastInt96ToBytes(src), 12) return dst, nil } diff --git a/vendor/github.com/parquet-go/parquet-go/bloom_be.go b/vendor/github.com/parquet-go/parquet-go/bloom_be.go new file mode 100644 index 00000000000..f7800301a68 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/bloom_be.go @@ -0,0 +1,19 @@ +//go:build s390x + +package parquet + +import ( + "encoding/binary" + + "github.com/parquet-go/parquet-go/deprecated" +) + +func unsafecastInt96ToBytes(src []deprecated.Int96) []byte { + out := make([]byte, len(src)*12) + for i := range src { + binary.LittleEndian.PutUint32(out[(i*12):4+(i*12)], uint32(src[i][0])) + binary.LittleEndian.PutUint32(out[4+(i*12):8+(i*12)], uint32(src[i][1])) + binary.LittleEndian.PutUint32(out[8+(i*12):12+(i*12)], uint32(src[i][2])) + } + return out +} diff --git a/vendor/github.com/parquet-go/parquet-go/bloom_le.go b/vendor/github.com/parquet-go/parquet-go/bloom_le.go new file mode 100644 index 00000000000..5b93bf07177 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/bloom_le.go @@ -0,0 +1,12 @@ +//go:build !s390x + +package parquet + +import ( + "github.com/parquet-go/parquet-go/deprecated" + "github.com/parquet-go/parquet-go/internal/unsafecast" +) + +func unsafecastInt96ToBytes(src []deprecated.Int96) []byte { + return unsafecast.Slice[byte](src) +} diff --git a/vendor/github.com/parquet-go/parquet-go/column_buffer.go b/vendor/github.com/parquet-go/parquet-go/column_buffer.go index 8435cb2977c..d1bc339862d 100644 --- a/vendor/github.com/parquet-go/parquet-go/column_buffer.go +++ b/vendor/github.com/parquet-go/parquet-go/column_buffer.go @@ -16,12 +16,8 @@ import ( "github.com/parquet-go/parquet-go/internal/bitpack" "github.com/parquet-go/parquet-go/internal/unsafecast" "github.com/parquet-go/parquet-go/sparse" - "golang.org/x/sys/cpu" ) -const offsetOfU64 = unsafe.Offsetof(Value{}.u64) -const offsetOfPtr = unsafe.Offsetof(Value{}.ptr) - // ColumnBuffer is an interface representing columns of a row group. // // ColumnBuffer implements sort.Interface as a way to support reordering the @@ -107,29 +103,6 @@ func columnIndexOfNullable(base ColumnBuffer, maxDefinitionLevel byte, definitio }, nil } -// On a big endian system, a boolean/byte value, which is in little endian byte format, is byte aligned -// to the 7th byte in a u64 (8 bytes) variable.. Hence the data will be available at 7th byte when -// interpreted as a little endian byte format. So, in order to access a boolean/byte value out of u64 variable, -// we need to add an offset of "7"... -// In the same way, an int32/uint32/float value, which is in little endian byte format, is byte aligned -// to the 4th byte in a u64 (8 bytes) variable.. Hence the data will be available at 4th byte when -// interpreted as a little endian byte format. So, in order to access an int32/uint32/float value out of u64 variable, -// we need to add an offset of "4" -func getOffset(colDict interface{}) uintptr { - var offset uintptr = 0 - - if cpu.IsBigEndian { - switch colDict.(type) { - case booleanColumnBuffer, booleanDictionary: - offset = 7 - - case int32ColumnBuffer, uint32ColumnBuffer, floatColumnBuffer, int32Dictionary, floatDictionary, uint32Dictionary: - offset = 4 - } - } - return offset -} - type nullableColumnIndex struct { ColumnIndex maxDefinitionLevel byte @@ -855,8 +828,7 @@ func (col *booleanColumnBuffer) WriteBooleans(values []bool) (int, error) { } func (col *booleanColumnBuffer) WriteValues(values []Value) (int, error) { - offset := getOffset(*col) - col.writeValues(makeArrayValue(values, offsetOfU64+offset), columnLevels{}) + col.writeValues(makeArrayValue(values, offsetOfBool), columnLevels{}) return len(values), nil } @@ -995,8 +967,7 @@ func (col *int32ColumnBuffer) WriteInt32s(values []int32) (int, error) { } func (col *int32ColumnBuffer) WriteValues(values []Value) (int, error) { - offset := getOffset(*col) - col.writeValues(makeArrayValue(values, offsetOfU64+offset), columnLevels{}) + col.writeValues(makeArrayValue(values, offsetOfU32), columnLevels{}) return len(values), nil } @@ -1288,8 +1259,7 @@ func (col *floatColumnBuffer) WriteFloats(values []float32) (int, error) { } func (col *floatColumnBuffer) WriteValues(values []Value) (int, error) { - offset := getOffset(*col) - col.writeValues(makeArrayValue(values, offsetOfU64+offset), columnLevels{}) + col.writeValues(makeArrayValue(values, offsetOfU32), columnLevels{}) return len(values), nil } @@ -1776,8 +1746,7 @@ func (col *uint32ColumnBuffer) WriteUint32s(values []uint32) (int, error) { } func (col *uint32ColumnBuffer) WriteValues(values []Value) (int, error) { - offset := getOffset(*col) - col.writeValues(makeArrayValue(values, offsetOfU64+offset), columnLevels{}) + col.writeValues(makeArrayValue(values, offsetOfU32), columnLevels{}) return len(values), nil } diff --git a/vendor/github.com/parquet-go/parquet-go/dictionary.go b/vendor/github.com/parquet-go/parquet-go/dictionary.go index 9dce0ff6514..5ff0417b0f7 100644 --- a/vendor/github.com/parquet-go/parquet-go/dictionary.go +++ b/vendor/github.com/parquet-go/parquet-go/dictionary.go @@ -140,8 +140,7 @@ func (d *booleanDictionary) Index(i int32) Value { return d.makeValue(d.index(i) func (d *booleanDictionary) index(i int32) bool { return d.valueAt(int(i)) } func (d *booleanDictionary) Insert(indexes []int32, values []Value) { - offset := getOffset(*d) - d.insert(indexes, makeArrayValue(values, offsetOfU64+offset)) + d.insert(indexes, makeArrayValue(values, offsetOfBool)) } func (d *booleanDictionary) insert(indexes []int32, rows sparse.Array) { @@ -238,8 +237,7 @@ func (d *int32Dictionary) Index(i int32) Value { return d.makeValue(d.index(i)) func (d *int32Dictionary) index(i int32) int32 { return d.values[i] } func (d *int32Dictionary) Insert(indexes []int32, values []Value) { - offset := getOffset(*d) - d.insert(indexes, makeArrayValue(values, offsetOfU64+offset)) + d.insert(indexes, makeArrayValue(values, offsetOfU32)) } func (d *int32Dictionary) init(indexes []int32) { @@ -291,8 +289,7 @@ func (d *int32Dictionary) insert(indexes []int32, rows sparse.Array) { func (d *int32Dictionary) Lookup(indexes []int32, values []Value) { model := d.makeValue(0) memsetValues(values, model) - offset := getOffset(*d) - d.lookup(indexes, makeArrayValue(values, offsetOfU64+offset)) + d.lookup(indexes, makeArrayValue(values, offsetOfU32)) } func (d *int32Dictionary) Bounds(indexes []int32) (min, max Value) { @@ -520,8 +517,7 @@ func (d *floatDictionary) Index(i int32) Value { return d.makeValue(d.index(i)) func (d *floatDictionary) index(i int32) float32 { return d.values[i] } func (d *floatDictionary) Insert(indexes []int32, values []Value) { - offset := getOffset(*d) - d.insert(indexes, makeArrayValue(values, offsetOfU64+offset)) + d.insert(indexes, makeArrayValue(values, offsetOfU32)) } func (d *floatDictionary) init(indexes []int32) { @@ -560,8 +556,7 @@ func (d *floatDictionary) insert(indexes []int32, rows sparse.Array) { func (d *floatDictionary) Lookup(indexes []int32, values []Value) { model := d.makeValue(0) memsetValues(values, model) - offset := getOffset(*d) - d.lookup(indexes, makeArrayValue(values, offsetOfU64+offset)) + d.lookup(indexes, makeArrayValue(values, offsetOfU32)) } func (d *floatDictionary) Bounds(indexes []int32) (min, max Value) { @@ -930,8 +925,7 @@ func (d *uint32Dictionary) Index(i int32) Value { return d.makeValue(d.index(i)) func (d *uint32Dictionary) index(i int32) uint32 { return d.values[i] } func (d *uint32Dictionary) Insert(indexes []int32, values []Value) { - offset := getOffset(*d) - d.insert(indexes, makeArrayValue(values, offsetOfU64+offset)) + d.insert(indexes, makeArrayValue(values, offsetOfU32)) } func (d *uint32Dictionary) init(indexes []int32) { @@ -970,8 +964,7 @@ func (d *uint32Dictionary) insert(indexes []int32, rows sparse.Array) { func (d *uint32Dictionary) Lookup(indexes []int32, values []Value) { model := d.makeValue(0) memsetValues(values, model) - offset := getOffset(*d) - d.lookup(indexes, makeArrayValue(values, offsetOfU64+offset)) + d.lookup(indexes, makeArrayValue(values, offsetOfU32)) } func (d *uint32Dictionary) Bounds(indexes []int32) (min, max Value) { diff --git a/vendor/github.com/parquet-go/parquet-go/encoding/plain/plain.go b/vendor/github.com/parquet-go/parquet-go/encoding/plain/plain.go index 8a58af5c39d..690bc815555 100644 --- a/vendor/github.com/parquet-go/parquet-go/encoding/plain/plain.go +++ b/vendor/github.com/parquet-go/parquet-go/encoding/plain/plain.go @@ -6,7 +6,6 @@ package plain import ( "encoding/binary" "fmt" - "golang.org/x/sys/cpu" "io" "math" @@ -37,70 +36,10 @@ func (e *Encoding) EncodeBoolean(dst []byte, src []byte) ([]byte, error) { return append(dst[:0], src...), nil } -func (e *Encoding) EncodeInt32(dst []byte, src []int32) ([]byte, error) { - if cpu.IsBigEndian { - srcLen := len(src) - byteEnc := make([]byte, (srcLen * 4)) - idx := 0 - for k := range srcLen { - binary.LittleEndian.PutUint32(byteEnc[idx:(4+idx)], uint32((src)[k])) - idx += 4 - } - return append(dst[:0], (byteEnc)...), nil - } else { - return append(dst[:0], unsafecast.Slice[byte](src)...), nil - } -} - -func (e *Encoding) EncodeInt64(dst []byte, src []int64) ([]byte, error) { - if cpu.IsBigEndian { - srcLen := len(src) - byteEnc := make([]byte, (srcLen * 8)) - idx := 0 - for k := range srcLen { - binary.LittleEndian.PutUint64(byteEnc[idx:(8+idx)], uint64((src)[k])) - idx += 8 - } - return append(dst[:0], (byteEnc)...), nil - } else { - return append(dst[:0], unsafecast.Slice[byte](src)...), nil - } -} - func (e *Encoding) EncodeInt96(dst []byte, src []deprecated.Int96) ([]byte, error) { return append(dst[:0], unsafecast.Slice[byte](src)...), nil } -func (e *Encoding) EncodeFloat(dst []byte, src []float32) ([]byte, error) { - if cpu.IsBigEndian { - srcLen := len(src) - byteEnc := make([]byte, (srcLen * 4)) - idx := 0 - for k := range srcLen { - binary.LittleEndian.PutUint32(byteEnc[idx:(4+idx)], math.Float32bits((src)[k])) - idx += 4 - } - return append(dst[:0], (byteEnc)...), nil - } else { - return append(dst[:0], unsafecast.Slice[byte](src)...), nil - } -} - -func (e *Encoding) EncodeDouble(dst []byte, src []float64) ([]byte, error) { - if cpu.IsBigEndian { - srcLen := len(src) - byteEnc := make([]byte, (srcLen * 8)) - idx := 0 - for k := range srcLen { - binary.LittleEndian.PutUint64(byteEnc[idx:(8+idx)], math.Float64bits((src)[k])) - idx += 8 - } - return append(dst[:0], (byteEnc)...), nil - } else { - return append(dst[:0], unsafecast.Slice[byte](src)...), nil - } -} - func (e *Encoding) EncodeByteArray(dst []byte, src []byte, offsets []uint32) ([]byte, error) { dst = dst[:0] @@ -127,45 +66,6 @@ func (e *Encoding) DecodeBoolean(dst []byte, src []byte) ([]byte, error) { return append(dst[:0], src...), nil } -func (e *Encoding) DecodeInt32(dst []int32, src []byte) ([]int32, error) { - if (len(src) % 4) != 0 { - return dst, encoding.ErrDecodeInvalidInputSize(e, "INT32", len(src)) - } - - if cpu.IsBigEndian { - srcLen := (len(src) / 4) - byteDec := make([]int32, srcLen) - idx := 0 - for k := range srcLen { - byteDec[k] = int32(binary.LittleEndian.Uint32((src)[idx:(4 + idx)])) - idx += 4 - } - return append(dst[:0], (byteDec)...), nil - } else { - return append(dst[:0], unsafecast.Slice[int32](src)...), nil - } -} - -func (e *Encoding) DecodeInt64(dst []int64, src []byte) ([]int64, error) { - if (len(src) % 8) != 0 { - return dst, encoding.ErrDecodeInvalidInputSize(e, "INT64", len(src)) - } - - if cpu.IsBigEndian { - srcLen := (len(src) / 8) - byteDec := make([]int64, srcLen) - idx := 0 - for k := range srcLen { - byteDec[k] = int64(binary.LittleEndian.Uint64((src)[idx:(8 + idx)])) - idx += 8 - } - - return append(dst[:0], (byteDec)...), nil - } else { - return append(dst[:0], unsafecast.Slice[int64](src)...), nil - } -} - func (e *Encoding) DecodeInt96(dst []deprecated.Int96, src []byte) ([]deprecated.Int96, error) { if (len(src) % 12) != 0 { return dst, encoding.ErrDecodeInvalidInputSize(e, "INT96", len(src)) @@ -173,44 +73,6 @@ func (e *Encoding) DecodeInt96(dst []deprecated.Int96, src []byte) ([]deprecated return append(dst[:0], unsafecast.Slice[deprecated.Int96](src)...), nil } -func (e *Encoding) DecodeFloat(dst []float32, src []byte) ([]float32, error) { - if (len(src) % 4) != 0 { - return dst, encoding.ErrDecodeInvalidInputSize(e, "FLOAT", len(src)) - } - if cpu.IsBigEndian { - srcLen := (len(src) / 4) - byteDec := make([]float32, srcLen) - idx := 0 - for k := range srcLen { - byteDec[k] = float32(math.Float32frombits(binary.LittleEndian.Uint32((src)[idx:(4 + idx)]))) - idx += 4 - } - - return append(dst[:0], (byteDec)...), nil - } else { - return append(dst[:0], unsafecast.Slice[float32](src)...), nil - } -} - -func (e *Encoding) DecodeDouble(dst []float64, src []byte) ([]float64, error) { - if (len(src) % 8) != 0 { - return dst, encoding.ErrDecodeInvalidInputSize(e, "DOUBLE", len(src)) - } - if cpu.IsBigEndian { - srcLen := (len(src) / 8) - byteDec := make([]float64, srcLen) - idx := 0 - for k := range srcLen { - byteDec[k] = float64(math.Float64frombits(binary.LittleEndian.Uint64((src)[idx:(8 + idx)]))) - idx += 8 - } - - return append(dst[:0], (byteDec)...), nil - } else { - return append(dst[:0], unsafecast.Slice[float64](src)...), nil - } -} - func (e *Encoding) DecodeByteArray(dst []byte, src []byte, offsets []uint32) ([]byte, []uint32, error) { dst, offsets = dst[:0], offsets[:0] diff --git a/vendor/github.com/parquet-go/parquet-go/encoding/plain/plain_be.go b/vendor/github.com/parquet-go/parquet-go/encoding/plain/plain_be.go new file mode 100644 index 00000000000..6c8c9000b52 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/encoding/plain/plain_be.go @@ -0,0 +1,113 @@ +//go:build s390x + +package plain + +import ( + "encoding/binary" + "math" + + "github.com/parquet-go/parquet-go/encoding" +) + +// TODO: optimize by doing the byte swap in the output slice instead of +// allocating a temporay buffer. + +func (e *Encoding) EncodeInt32(dst []byte, src []int32) ([]byte, error) { + srcLen := len(src) + byteEnc := make([]byte, (srcLen * 4)) + idx := 0 + for k := range srcLen { + binary.LittleEndian.PutUint32(byteEnc[idx:(4+idx)], uint32((src)[k])) + idx += 4 + } + return append(dst[:0], (byteEnc)...), nil +} + +func (e *Encoding) EncodeInt64(dst []byte, src []int64) ([]byte, error) { + srcLen := len(src) + byteEnc := make([]byte, (srcLen * 8)) + idx := 0 + for k := range srcLen { + binary.LittleEndian.PutUint64(byteEnc[idx:(8+idx)], uint64((src)[k])) + idx += 8 + } + return append(dst[:0], (byteEnc)...), nil +} + +func (e *Encoding) EncodeFloat(dst []byte, src []float32) ([]byte, error) { + srcLen := len(src) + byteEnc := make([]byte, (srcLen * 4)) + idx := 0 + for k := range srcLen { + binary.LittleEndian.PutUint32(byteEnc[idx:(4+idx)], math.Float32bits((src)[k])) + idx += 4 + } + return append(dst[:0], (byteEnc)...), nil +} + +func (e *Encoding) EncodeDouble(dst []byte, src []float64) ([]byte, error) { + srcLen := len(src) + byteEnc := make([]byte, (srcLen * 8)) + idx := 0 + for k := range srcLen { + binary.LittleEndian.PutUint64(byteEnc[idx:(8+idx)], math.Float64bits((src)[k])) + idx += 8 + } + return append(dst[:0], (byteEnc)...), nil +} + +func (e *Encoding) DecodeInt32(dst []int32, src []byte) ([]int32, error) { + if (len(src) % 4) != 0 { + return dst, encoding.ErrDecodeInvalidInputSize(e, "INT32", len(src)) + } + srcLen := (len(src) / 4) + byteDec := make([]int32, srcLen) + idx := 0 + for k := range srcLen { + byteDec[k] = int32(binary.LittleEndian.Uint32((src)[idx:(4 + idx)])) + idx += 4 + } + return append(dst[:0], (byteDec)...), nil +} + +func (e *Encoding) DecodeInt64(dst []int64, src []byte) ([]int64, error) { + if (len(src) % 8) != 0 { + return dst, encoding.ErrDecodeInvalidInputSize(e, "INT64", len(src)) + } + srcLen := (len(src) / 8) + byteDec := make([]int64, srcLen) + idx := 0 + for k := range srcLen { + byteDec[k] = int64(binary.LittleEndian.Uint64((src)[idx:(8 + idx)])) + idx += 8 + } + return append(dst[:0], (byteDec)...), nil +} + +func (e *Encoding) DecodeFloat(dst []float32, src []byte) ([]float32, error) { + if (len(src) % 4) != 0 { + return dst, encoding.ErrDecodeInvalidInputSize(e, "FLOAT", len(src)) + } + srcLen := (len(src) / 4) + byteDec := make([]float32, srcLen) + idx := 0 + for k := range srcLen { + byteDec[k] = float32(math.Float32frombits(binary.LittleEndian.Uint32((src)[idx:(4 + idx)]))) + idx += 4 + } + return append(dst[:0], (byteDec)...), nil +} + +func (e *Encoding) DecodeDouble(dst []float64, src []byte) ([]float64, error) { + if (len(src) % 8) != 0 { + return dst, encoding.ErrDecodeInvalidInputSize(e, "DOUBLE", len(src)) + } + srcLen := (len(src) / 8) + byteDec := make([]float64, srcLen) + idx := 0 + for k := range srcLen { + byteDec[k] = float64(math.Float64frombits(binary.LittleEndian.Uint64((src)[idx:(8 + idx)]))) + idx += 8 + } + return append(dst[:0], (byteDec)...), nil +} diff --git a/vendor/github.com/parquet-go/parquet-go/encoding/plain/plain_le.go b/vendor/github.com/parquet-go/parquet-go/encoding/plain/plain_le.go new file mode 100644 index 00000000000..bd1eadf6a06 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/encoding/plain/plain_le.go @@ -0,0 +1,52 @@ +//go:build !s390x + +package plain + +import ( + "github.com/parquet-go/parquet-go/encoding" + "github.com/parquet-go/parquet-go/internal/unsafecast" +) + +func (e *Encoding) EncodeInt32(dst []byte, src []int32) ([]byte, error) { + return append(dst[:0], unsafecast.Slice[byte](src)...), nil +} + +func (e *Encoding) EncodeInt64(dst []byte, src []int64) ([]byte, error) { + return append(dst[:0], unsafecast.Slice[byte](src)...), nil +} + +func (e *Encoding) EncodeFloat(dst []byte, src []float32) ([]byte, error) { + return append(dst[:0], unsafecast.Slice[byte](src)...), nil +} + +func (e *Encoding) EncodeDouble(dst []byte, src []float64) ([]byte, error) { + return append(dst[:0], unsafecast.Slice[byte](src)...), nil +} + +func (e *Encoding) DecodeInt32(dst []int32, src []byte) ([]int32, error) { + if (len(src) % 4) != 0 { + return dst, encoding.ErrDecodeInvalidInputSize(e, "INT32", len(src)) + } + return append(dst[:0], unsafecast.Slice[int32](src)...), nil +} + +func (e *Encoding) DecodeInt64(dst []int64, src []byte) ([]int64, error) { + if (len(src) % 8) != 0 { + return dst, encoding.ErrDecodeInvalidInputSize(e, "INT64", len(src)) + } + return append(dst[:0], unsafecast.Slice[int64](src)...), nil +} + +func (e *Encoding) DecodeFloat(dst []float32, src []byte) ([]float32, error) { + if (len(src) % 4) != 0 { + return dst, encoding.ErrDecodeInvalidInputSize(e, "FLOAT", len(src)) + } + return append(dst[:0], unsafecast.Slice[float32](src)...), nil +} + +func (e *Encoding) DecodeDouble(dst []float64, src []byte) ([]float64, error) { + if (len(src) % 8) != 0 { + return dst, encoding.ErrDecodeInvalidInputSize(e, "DOUBLE", len(src)) + } + return append(dst[:0], unsafecast.Slice[float64](src)...), nil +} diff --git a/vendor/github.com/parquet-go/parquet-go/internal/bitpack/unpack_int32_be.go b/vendor/github.com/parquet-go/parquet-go/internal/bitpack/unpack_int32_be.go new file mode 100644 index 00000000000..0f4ba054c42 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/internal/bitpack/unpack_int32_be.go @@ -0,0 +1,15 @@ +//go:build s390x + +package bitpack + +import "encoding/binary" + +func unsafecastBytesToUint32(src []byte) []uint32 { + out := make([]uint32, len(src)/4) + idx := 0 + for k := range out { + out[k] = binary.LittleEndian.Uint32((src)[idx:(4 + idx)]) + idx += 4 + } + return out +} diff --git a/vendor/github.com/parquet-go/parquet-go/internal/bitpack/unpack_int32_le.go b/vendor/github.com/parquet-go/parquet-go/internal/bitpack/unpack_int32_le.go new file mode 100644 index 00000000000..f754e704ff1 --- /dev/null +++ b/vendor/github.com/parquet-go/parquet-go/internal/bitpack/unpack_int32_le.go @@ -0,0 +1,9 @@ +//go:build !s390x + +package bitpack + +import "github.com/parquet-go/parquet-go/internal/unsafecast" + +func unsafecastBytesToUint32(src []byte) []uint32 { + return unsafecast.Slice[uint32](src) +} diff --git a/vendor/github.com/parquet-go/parquet-go/internal/bitpack/unpack_int32_purego.go b/vendor/github.com/parquet-go/parquet-go/internal/bitpack/unpack_int32_purego.go index cddbd773a51..1e65d8c02b9 100644 --- a/vendor/github.com/parquet-go/parquet-go/internal/bitpack/unpack_int32_purego.go +++ b/vendor/github.com/parquet-go/parquet-go/internal/bitpack/unpack_int32_purego.go @@ -2,28 +2,8 @@ package bitpack -import ( - "encoding/binary" - - "golang.org/x/sys/cpu" - - "github.com/parquet-go/parquet-go/internal/unsafecast" -) - func unpackInt32(dst []int32, src []byte, bitWidth uint) { - var bits []uint32 - if cpu.IsBigEndian { - srcLen := (len(src) / 4) - bits = make([]uint32, srcLen) - idx := 0 - for k := range srcLen { - bits[k] = binary.LittleEndian.Uint32((src)[idx:(4 + idx)]) - idx += 4 - } - } else { - bits = unsafecast.Slice[uint32](src) - } - + bits := unsafecastBytesToUint32(src) bitMask := uint32(1< -
- - - - - - - - - - + + + + + + + + + + +
Benchmarkgo-toml v1BurntSushi/toml
Marshal/HugoFrontMatter-21.9x1.9x
Marshal/ReferenceFile/map-21.7x1.8x
Marshal/ReferenceFile/struct-22.2x2.5x
Unmarshal/HugoFrontMatter-22.9x2.9x
Unmarshal/ReferenceFile/map-22.6x2.9x
Unmarshal/ReferenceFile/struct-24.4x5.3x
Benchmarkgo-toml v1BurntSushi/toml
Marshal/HugoFrontMatter-21.9x2.2x
Marshal/ReferenceFile/map-21.7x2.1x
Marshal/ReferenceFile/struct-22.2x3.0x
Unmarshal/HugoFrontMatter-22.9x2.7x
Unmarshal/ReferenceFile/map-22.6x2.7x
Unmarshal/ReferenceFile/struct-24.6x5.1x
See more

The table above has the results of the most common use-cases. The table below @@ -193,22 +193,22 @@ contains the results of all benchmarks, including unrealistic ones. It is provided for completeness.

- - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + +
Benchmarkgo-toml v1BurntSushi/toml
Marshal/SimpleDocument/map-21.8x2.9x
Marshal/SimpleDocument/struct-22.7x4.2x
Unmarshal/SimpleDocument/map-24.5x3.1x
Unmarshal/SimpleDocument/struct-26.2x3.9x
UnmarshalDataset/example-23.1x3.5x
UnmarshalDataset/code-22.3x3.1x
UnmarshalDataset/twitter-22.5x2.6x
UnmarshalDataset/citm_catalog-22.1x2.2x
UnmarshalDataset/canada-21.6x1.3x
UnmarshalDataset/config-24.3x3.2x
[Geo mean]2.7x2.8x
Benchmarkgo-toml v1BurntSushi/toml
Marshal/SimpleDocument/map-21.8x2.7x
Marshal/SimpleDocument/struct-22.7x3.8x
Unmarshal/SimpleDocument/map-23.8x3.0x
Unmarshal/SimpleDocument/struct-25.6x4.1x
UnmarshalDataset/example-23.0x3.2x
UnmarshalDataset/code-22.3x2.9x
UnmarshalDataset/twitter-22.6x2.7x
UnmarshalDataset/citm_catalog-22.2x2.3x
UnmarshalDataset/canada-21.8x1.5x
UnmarshalDataset/config-24.1x2.9x
geomean2.7x2.8x

This table can be generated with ./ci.sh benchmark -a -html.

@@ -233,24 +233,24 @@ Go-toml provides three handy command line tools: * `tomljson`: Reads a TOML file and outputs its JSON representation. - ``` - $ go install github.com/pelletier/go-toml/v2/cmd/tomljson@latest - $ tomljson --help - ``` + ``` + $ go install github.com/pelletier/go-toml/v2/cmd/tomljson@latest + $ tomljson --help + ``` * `jsontoml`: Reads a JSON file and outputs a TOML representation. - ``` - $ go install github.com/pelletier/go-toml/v2/cmd/jsontoml@latest - $ jsontoml --help - ``` + ``` + $ go install github.com/pelletier/go-toml/v2/cmd/jsontoml@latest + $ jsontoml --help + ``` * `tomll`: Lints and reformats a TOML file. - ``` - $ go install github.com/pelletier/go-toml/v2/cmd/tomll@latest - $ tomll --help - ``` + ``` + $ go install github.com/pelletier/go-toml/v2/cmd/tomll@latest + $ tomll --help + ``` ### Docker image @@ -261,7 +261,7 @@ Those tools are also available as a [Docker image][docker]. For example, to use docker run -i ghcr.io/pelletier/go-toml:v2 tomljson < example.toml ``` -Multiple versions are availble on [ghcr.io][docker]. +Multiple versions are available on [ghcr.io][docker]. [docker]: https://github.com/pelletier/go-toml/pkgs/container/go-toml @@ -293,16 +293,16 @@ element in the interface to decode the object. For example: ```go type inner struct { - B interface{} + B interface{} } type doc struct { - A interface{} + A interface{} } d := doc{ - A: inner{ - B: "Before", - }, + A: inner{ + B: "Before", + }, } data := ` @@ -341,7 +341,7 @@ contained in the doc is superior to the capacity of the array. For example: ```go type doc struct { - A [2]string + A [2]string } d := doc{} err := toml.Unmarshal([]byte(`A = ["one", "two", "many"]`), &d) @@ -565,10 +565,11 @@ complete solutions exist out there. ## Versioning -Go-toml follows [Semantic Versioning](https://semver.org). The supported version -of [TOML](https://github.com/toml-lang/toml) is indicated at the beginning of -this document. The last two major versions of Go are supported -(see [Go Release Policy](https://golang.org/doc/devel/release.html#policy)). +Expect for parts explicitly marked otherwise, go-toml follows [Semantic +Versioning](https://semver.org). The supported version of +[TOML](https://github.com/toml-lang/toml) is indicated at the beginning of this +document. The last two major versions of Go are supported (see [Go Release +Policy](https://golang.org/doc/devel/release.html#policy)). ## License diff --git a/vendor/github.com/pelletier/go-toml/v2/SECURITY.md b/vendor/github.com/pelletier/go-toml/v2/SECURITY.md index b2f21cfc92c..d4d554fda9d 100644 --- a/vendor/github.com/pelletier/go-toml/v2/SECURITY.md +++ b/vendor/github.com/pelletier/go-toml/v2/SECURITY.md @@ -2,9 +2,6 @@ ## Supported Versions -Use this section to tell people about which versions of your project are -currently being supported with security updates. - | Version | Supported | | ---------- | ------------------ | | Latest 2.x | :white_check_mark: | diff --git a/vendor/github.com/pelletier/go-toml/v2/ci.sh b/vendor/github.com/pelletier/go-toml/v2/ci.sh index 9ae8b753759..86217a9b097 100644 --- a/vendor/github.com/pelletier/go-toml/v2/ci.sh +++ b/vendor/github.com/pelletier/go-toml/v2/ci.sh @@ -77,7 +77,7 @@ cover() { pushd "$dir" go test -covermode=atomic -coverpkg=./... -coverprofile=coverage.out.tmp ./... - cat coverage.out.tmp | grep -v fuzz | grep -v testsuite | grep -v tomltestgen | grep -v gotoml-test-decoder > coverage.out + grep -Ev '(fuzz|testsuite|tomltestgen|gotoml-test-decoder|gotoml-test-encoder)' coverage.out.tmp > coverage.out go tool cover -func=coverage.out echo "Coverage profile for ${branch}: ${dir}/coverage.out" >&2 popd @@ -152,7 +152,7 @@ bench() { fi export GOMAXPROCS=2 - nice -n -19 taskset --cpu-list 0,1 go test '-bench=^Benchmark(Un)?[mM]arshal' -count=5 -run=Nothing ./... | tee "${out}" + go test '-bench=^Benchmark(Un)?[mM]arshal' -count=10 -run=Nothing ./... | tee "${out}" popd if [ "${branch}" != "HEAD" ]; then @@ -161,10 +161,12 @@ bench() { } fmktemp() { - if mktemp --version|grep GNU >/dev/null; then - mktemp --suffix=-$1; + if mktemp --version &> /dev/null; then + # GNU + mktemp --suffix=-$1 else - mktemp -t $1; + # BSD + mktemp -t $1 fi } @@ -184,12 +186,14 @@ with open(sys.argv[1]) as f: lines.append(line.split(',')) results = [] -for line in reversed(lines[1:]): +for line in reversed(lines[2:]): + if len(line) < 8 or line[0] == "": + continue v2 = float(line[1]) results.append([ line[0].replace("-32", ""), "%.1fx" % (float(line[3])/v2), # v1 - "%.1fx" % (float(line[5])/v2), # bs + "%.1fx" % (float(line[7])/v2), # bs ]) # move geomean to the end results.append(results[0]) @@ -260,10 +264,10 @@ benchmark() { if [ "$1" = "-html" ]; then tmpcsv=`fmktemp csv` - benchstat -csv -geomean go-toml-v2.txt go-toml-v1.txt bs-toml.txt > $tmpcsv + benchstat -format csv go-toml-v2.txt go-toml-v1.txt bs-toml.txt > $tmpcsv benchstathtml $tmpcsv else - benchstat -geomean go-toml-v2.txt go-toml-v1.txt bs-toml.txt + benchstat go-toml-v2.txt go-toml-v1.txt bs-toml.txt fi rm -f go-toml-v2.txt go-toml-v1.txt bs-toml.txt diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go index 40e23f8304a..76df2d5b6a9 100644 --- a/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go +++ b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go @@ -57,7 +57,11 @@ type SeenTracker struct { currentIdx int } -var pool sync.Pool +var pool = sync.Pool{ + New: func() interface{} { + return &SeenTracker{} + }, +} func (s *SeenTracker) reset() { // Always contains a root element at index 0. @@ -149,8 +153,9 @@ func (s *SeenTracker) setExplicitFlag(parentIdx int) { // CheckExpression takes a top-level node and checks that it does not contain // keys that have been seen in previous calls, and validates that types are -// consistent. -func (s *SeenTracker) CheckExpression(node *unstable.Node) error { +// consistent. It returns true if it is the first time this node's key is seen. +// Useful to clear array tables on first use. +func (s *SeenTracker) CheckExpression(node *unstable.Node) (bool, error) { if s.entries == nil { s.reset() } @@ -166,7 +171,7 @@ func (s *SeenTracker) CheckExpression(node *unstable.Node) error { } } -func (s *SeenTracker) checkTable(node *unstable.Node) error { +func (s *SeenTracker) checkTable(node *unstable.Node) (bool, error) { if s.currentIdx >= 0 { s.setExplicitFlag(s.currentIdx) } @@ -192,7 +197,7 @@ func (s *SeenTracker) checkTable(node *unstable.Node) error { } else { entry := s.entries[idx] if entry.kind == valueKind { - return fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) + return false, fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) } } parentIdx = idx @@ -201,25 +206,27 @@ func (s *SeenTracker) checkTable(node *unstable.Node) error { k := it.Node().Data idx := s.find(parentIdx, k) + first := false if idx >= 0 { kind := s.entries[idx].kind if kind != tableKind { - return fmt.Errorf("toml: key %s should be a table, not a %s", string(k), kind) + return false, fmt.Errorf("toml: key %s should be a table, not a %s", string(k), kind) } if s.entries[idx].explicit { - return fmt.Errorf("toml: table %s already exists", string(k)) + return false, fmt.Errorf("toml: table %s already exists", string(k)) } s.entries[idx].explicit = true } else { idx = s.create(parentIdx, k, tableKind, true, false) + first = true } s.currentIdx = idx - return nil + return first, nil } -func (s *SeenTracker) checkArrayTable(node *unstable.Node) error { +func (s *SeenTracker) checkArrayTable(node *unstable.Node) (bool, error) { if s.currentIdx >= 0 { s.setExplicitFlag(s.currentIdx) } @@ -242,7 +249,7 @@ func (s *SeenTracker) checkArrayTable(node *unstable.Node) error { } else { entry := s.entries[idx] if entry.kind == valueKind { - return fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) + return false, fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) } } @@ -252,22 +259,23 @@ func (s *SeenTracker) checkArrayTable(node *unstable.Node) error { k := it.Node().Data idx := s.find(parentIdx, k) - if idx >= 0 { + firstTime := idx < 0 + if firstTime { + idx = s.create(parentIdx, k, arrayTableKind, true, false) + } else { kind := s.entries[idx].kind if kind != arrayTableKind { - return fmt.Errorf("toml: key %s already exists as a %s, but should be an array table", kind, string(k)) + return false, fmt.Errorf("toml: key %s already exists as a %s, but should be an array table", kind, string(k)) } s.clear(idx) - } else { - idx = s.create(parentIdx, k, arrayTableKind, true, false) } s.currentIdx = idx - return nil + return firstTime, nil } -func (s *SeenTracker) checkKeyValue(node *unstable.Node) error { +func (s *SeenTracker) checkKeyValue(node *unstable.Node) (bool, error) { parentIdx := s.currentIdx it := node.Key() @@ -281,11 +289,11 @@ func (s *SeenTracker) checkKeyValue(node *unstable.Node) error { } else { entry := s.entries[idx] if it.IsLast() { - return fmt.Errorf("toml: key %s is already defined", string(k)) + return false, fmt.Errorf("toml: key %s is already defined", string(k)) } else if entry.kind != tableKind { - return fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) + return false, fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) } else if entry.explicit { - return fmt.Errorf("toml: cannot redefine table %s that has already been explicitly defined", string(k)) + return false, fmt.Errorf("toml: cannot redefine table %s that has already been explicitly defined", string(k)) } } @@ -303,45 +311,39 @@ func (s *SeenTracker) checkKeyValue(node *unstable.Node) error { return s.checkArray(value) } - return nil + return false, nil } -func (s *SeenTracker) checkArray(node *unstable.Node) error { +func (s *SeenTracker) checkArray(node *unstable.Node) (first bool, err error) { it := node.Children() for it.Next() { n := it.Node() switch n.Kind { case unstable.InlineTable: - err := s.checkInlineTable(n) + first, err = s.checkInlineTable(n) if err != nil { - return err + return false, err } case unstable.Array: - err := s.checkArray(n) + first, err = s.checkArray(n) if err != nil { - return err + return false, err } } } - return nil + return first, nil } -func (s *SeenTracker) checkInlineTable(node *unstable.Node) error { - if pool.New == nil { - pool.New = func() interface{} { - return &SeenTracker{} - } - } - +func (s *SeenTracker) checkInlineTable(node *unstable.Node) (first bool, err error) { s = pool.Get().(*SeenTracker) s.reset() it := node.Children() for it.Next() { n := it.Node() - err := s.checkKeyValue(n) + first, err = s.checkKeyValue(n) if err != nil { - return err + return false, err } } @@ -352,5 +354,5 @@ func (s *SeenTracker) checkInlineTable(node *unstable.Node) error { // redefinition of its keys: check* functions cannot walk into // a value. pool.Put(s) - return nil + return first, nil } diff --git a/vendor/github.com/pelletier/go-toml/v2/marshaler.go b/vendor/github.com/pelletier/go-toml/v2/marshaler.go index 6fe78533c1c..161acd93439 100644 --- a/vendor/github.com/pelletier/go-toml/v2/marshaler.go +++ b/vendor/github.com/pelletier/go-toml/v2/marshaler.go @@ -3,11 +3,12 @@ package toml import ( "bytes" "encoding" + "encoding/json" "fmt" "io" "math" "reflect" - "sort" + "slices" "strconv" "strings" "time" @@ -37,10 +38,11 @@ type Encoder struct { w io.Writer // global settings - tablesInline bool - arraysMultiline bool - indentSymbol string - indentTables bool + tablesInline bool + arraysMultiline bool + indentSymbol string + indentTables bool + marshalJsonNumbers bool } // NewEncoder returns a new Encoder that writes to w. @@ -87,6 +89,17 @@ func (enc *Encoder) SetIndentTables(indent bool) *Encoder { return enc } +// SetMarshalJsonNumbers forces the encoder to serialize `json.Number` as a +// float or integer instead of relying on TextMarshaler to emit a string. +// +// *Unstable:* This method does not follow the compatibility guarantees of +// semver. It can be changed or removed without a new major version being +// issued. +func (enc *Encoder) SetMarshalJsonNumbers(indent bool) *Encoder { + enc.marshalJsonNumbers = indent + return enc +} + // Encode writes a TOML representation of v to the stream. // // If v cannot be represented to TOML it returns an error. @@ -252,10 +265,22 @@ func (enc *Encoder) encode(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, e return append(b, x.String()...), nil case LocalDateTime: return append(b, x.String()...), nil + case json.Number: + if enc.marshalJsonNumbers { + if x == "" { /// Useful zero value. + return append(b, "0"...), nil + } else if v, err := x.Int64(); err == nil { + return enc.encode(b, ctx, reflect.ValueOf(v)) + } else if f, err := x.Float64(); err == nil { + return enc.encode(b, ctx, reflect.ValueOf(f)) + } else { + return nil, fmt.Errorf("toml: unable to convert %q to int64 or float64", x) + } + } } hasTextMarshaler := v.Type().Implements(textMarshalerType) - if hasTextMarshaler || (v.CanAddr() && reflect.PtrTo(v.Type()).Implements(textMarshalerType)) { + if hasTextMarshaler || (v.CanAddr() && reflect.PointerTo(v.Type()).Implements(textMarshalerType)) { if !hasTextMarshaler { v = v.Addr() } @@ -606,6 +631,18 @@ func (enc *Encoder) keyToString(k reflect.Value) (string, error) { return "", fmt.Errorf("toml: error marshalling key %v from text: %w", k, err) } return string(keyB), nil + + case keyType.Kind() == reflect.Int || keyType.Kind() == reflect.Int8 || keyType.Kind() == reflect.Int16 || keyType.Kind() == reflect.Int32 || keyType.Kind() == reflect.Int64: + return strconv.FormatInt(k.Int(), 10), nil + + case keyType.Kind() == reflect.Uint || keyType.Kind() == reflect.Uint8 || keyType.Kind() == reflect.Uint16 || keyType.Kind() == reflect.Uint32 || keyType.Kind() == reflect.Uint64: + return strconv.FormatUint(k.Uint(), 10), nil + + case keyType.Kind() == reflect.Float32: + return strconv.FormatFloat(k.Float(), 'f', -1, 32), nil + + case keyType.Kind() == reflect.Float64: + return strconv.FormatFloat(k.Float(), 'f', -1, 64), nil } return "", fmt.Errorf("toml: type %s is not supported as a map key", keyType.Kind()) } @@ -643,8 +680,8 @@ func (enc *Encoder) encodeMap(b []byte, ctx encoderCtx, v reflect.Value) ([]byte } func sortEntriesByKey(e []entry) { - sort.Slice(e, func(i, j int) bool { - return e[i].Key < e[j].Key + slices.SortFunc(e, func(a, b entry) int { + return strings.Compare(a.Key, b.Key) }) } @@ -707,6 +744,8 @@ func walkStruct(ctx encoderCtx, t *table, v reflect.Value) { if fieldType.Anonymous { if fieldType.Type.Kind() == reflect.Struct { walkStruct(ctx, t, f) + } else if fieldType.Type.Kind() == reflect.Ptr && !f.IsNil() && f.Elem().Kind() == reflect.Struct { + walkStruct(ctx, t, f.Elem()) } continue } else { @@ -924,7 +963,7 @@ func willConvertToTable(ctx encoderCtx, v reflect.Value) bool { if !v.IsValid() { return false } - if v.Type() == timeType || v.Type().Implements(textMarshalerType) || (v.Kind() != reflect.Ptr && v.CanAddr() && reflect.PtrTo(v.Type()).Implements(textMarshalerType)) { + if v.Type() == timeType || v.Type().Implements(textMarshalerType) || (v.Kind() != reflect.Ptr && v.CanAddr() && reflect.PointerTo(v.Type()).Implements(textMarshalerType)) { return false } @@ -998,6 +1037,10 @@ func (enc *Encoder) encodeSliceAsArrayTable(b []byte, ctx encoderCtx, v reflect. scratch = enc.commented(ctx.commented, scratch) + if enc.indentTables { + scratch = enc.indent(ctx.indent, scratch) + } + scratch = append(scratch, "[["...) for i, k := range ctx.parentKey { diff --git a/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go b/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go index 868c74c1577..c3df8bee1cf 100644 --- a/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go +++ b/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go @@ -5,9 +5,9 @@ import ( "errors" "fmt" "io" - "io/ioutil" "math" "reflect" + "strconv" "strings" "sync/atomic" "time" @@ -21,10 +21,8 @@ import ( // // It is a shortcut for Decoder.Decode() with the default options. func Unmarshal(data []byte, v interface{}) error { - p := unstable.Parser{} - p.Reset(data) - d := decoder{p: &p} - + d := decoder{} + d.p.Reset(data) return d.FromParser(v) } @@ -35,6 +33,9 @@ type Decoder struct { // global settings strict bool + + // toggles unmarshaler interface + unmarshalerInterface bool } // NewDecoder creates a new Decoder that will read from r. @@ -54,6 +55,24 @@ func (d *Decoder) DisallowUnknownFields() *Decoder { return d } +// EnableUnmarshalerInterface allows to enable unmarshaler interface. +// +// With this feature enabled, types implementing the unstable/Unmarshaler +// interface can be decoded from any structure of the document. It allows types +// that don't have a straightfoward TOML representation to provide their own +// decoding logic. +// +// Currently, types can only decode from a single value. Tables and array tables +// are not supported. +// +// *Unstable:* This method does not follow the compatibility guarantees of +// semver. It can be changed or removed without a new major version being +// issued. +func (d *Decoder) EnableUnmarshalerInterface() *Decoder { + d.unmarshalerInterface = true + return d +} + // Decode the whole content of r into v. // // By default, values in the document that don't exist in the target Go value @@ -96,26 +115,25 @@ func (d *Decoder) DisallowUnknownFields() *Decoder { // Inline Table -> same as Table // Array of Tables -> same as Array and Table func (d *Decoder) Decode(v interface{}) error { - b, err := ioutil.ReadAll(d.r) + b, err := io.ReadAll(d.r) if err != nil { return fmt.Errorf("toml: %w", err) } - p := unstable.Parser{} - p.Reset(b) dec := decoder{ - p: &p, strict: strict{ Enabled: d.strict, }, + unmarshalerInterface: d.unmarshalerInterface, } + dec.p.Reset(b) return dec.FromParser(v) } type decoder struct { // Which parser instance in use for this decoding session. - p *unstable.Parser + p unstable.Parser // Flag indicating that the current expression is stashed. // If set to true, calling nextExpr will not actually pull a new expression @@ -127,6 +145,10 @@ type decoder struct { // need to be skipped. skipUntilTable bool + // Flag indicating that the current array/slice table should be cleared because + // it is the first encounter of an array table. + clearArrayTable bool + // Tracks position in Go arrays. // This is used when decoding [[array tables]] into Go arrays. Given array // tables are separate TOML expression, we need to keep track of where we @@ -139,6 +161,9 @@ type decoder struct { // Strict mode strict strict + // Flag that enables/disables unmarshaler interface. + unmarshalerInterface bool + // Current context for the error. errorContext *errorContext } @@ -246,9 +271,10 @@ Rules for the unmarshal code: func (d *decoder) handleRootExpression(expr *unstable.Node, v reflect.Value) error { var x reflect.Value var err error + var first bool // used for to clear array tables on first use if !(d.skipUntilTable && expr.Kind == unstable.KeyValue) { - err = d.seen.CheckExpression(expr) + first, err = d.seen.CheckExpression(expr) if err != nil { return err } @@ -267,6 +293,7 @@ func (d *decoder) handleRootExpression(expr *unstable.Node, v reflect.Value) err case unstable.ArrayTable: d.skipUntilTable = false d.strict.EnterArrayTable(expr) + d.clearArrayTable = first x, err = d.handleArrayTable(expr.Key(), v) default: panic(fmt.Errorf("parser should not permit expression of kind %s at document root", expr.Kind)) @@ -307,6 +334,10 @@ func (d *decoder) handleArrayTableCollectionLast(key unstable.Iterator, v reflec reflect.Copy(nelem, elem) elem = nelem } + if d.clearArrayTable && elem.Len() > 0 { + elem.SetLen(0) + d.clearArrayTable = false + } } return d.handleArrayTableCollectionLast(key, elem) case reflect.Ptr: @@ -325,6 +356,10 @@ func (d *decoder) handleArrayTableCollectionLast(key unstable.Iterator, v reflec return v, nil case reflect.Slice: + if d.clearArrayTable && v.Len() > 0 { + v.SetLen(0) + d.clearArrayTable = false + } elemType := v.Type().Elem() var elem reflect.Value if elemType.Kind() == reflect.Interface { @@ -576,7 +611,7 @@ func (d *decoder) handleKeyValues(v reflect.Value) (reflect.Value, error) { break } - err := d.seen.CheckExpression(expr) + _, err := d.seen.CheckExpression(expr) if err != nil { return reflect.Value{}, err } @@ -634,6 +669,14 @@ func (d *decoder) handleValue(value *unstable.Node, v reflect.Value) error { v = initAndDereferencePointer(v) } + if d.unmarshalerInterface { + if v.CanAddr() && v.Addr().CanInterface() { + if outi, ok := v.Addr().Interface().(unstable.Unmarshaler); ok { + return outi.UnmarshalTOML(value) + } + } + } + ok, err := d.tryTextUnmarshaler(value, v) if ok || err != nil { return err @@ -1031,12 +1074,39 @@ func (d *decoder) keyFromData(keyType reflect.Type, data []byte) (reflect.Value, } return mk, nil - case reflect.PtrTo(keyType).Implements(textUnmarshalerType): + case reflect.PointerTo(keyType).Implements(textUnmarshalerType): mk := reflect.New(keyType) if err := mk.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { return reflect.Value{}, fmt.Errorf("toml: error unmarshalling key type %s from text: %w", stringType, err) } return mk.Elem(), nil + + case keyType.Kind() == reflect.Int || keyType.Kind() == reflect.Int8 || keyType.Kind() == reflect.Int16 || keyType.Kind() == reflect.Int32 || keyType.Kind() == reflect.Int64: + key, err := strconv.ParseInt(string(data), 10, 64) + if err != nil { + return reflect.Value{}, fmt.Errorf("toml: error parsing key of type %s from integer: %w", stringType, err) + } + return reflect.ValueOf(key).Convert(keyType), nil + case keyType.Kind() == reflect.Uint || keyType.Kind() == reflect.Uint8 || keyType.Kind() == reflect.Uint16 || keyType.Kind() == reflect.Uint32 || keyType.Kind() == reflect.Uint64: + key, err := strconv.ParseUint(string(data), 10, 64) + if err != nil { + return reflect.Value{}, fmt.Errorf("toml: error parsing key of type %s from unsigned integer: %w", stringType, err) + } + return reflect.ValueOf(key).Convert(keyType), nil + + case keyType.Kind() == reflect.Float32: + key, err := strconv.ParseFloat(string(data), 32) + if err != nil { + return reflect.Value{}, fmt.Errorf("toml: error parsing key of type %s from float: %w", stringType, err) + } + return reflect.ValueOf(float32(key)), nil + + case keyType.Kind() == reflect.Float64: + key, err := strconv.ParseFloat(string(data), 64) + if err != nil { + return reflect.Value{}, fmt.Errorf("toml: error parsing key of type %s from float: %w", stringType, err) + } + return reflect.ValueOf(float64(key)), nil } return reflect.Value{}, fmt.Errorf("toml: cannot convert map key of type %s to expected type %s", stringType, keyType) } @@ -1097,9 +1167,9 @@ func (d *decoder) handleKeyValuePart(key unstable.Iterator, value *unstable.Node f := fieldByIndex(v, path) - if !f.CanSet() { - // If the field is not settable, need to take a slower path and make a copy of - // the struct itself to a new location. + if !f.CanAddr() { + // If the field is not addressable, need to take a slower path and + // make a copy of the struct itself to a new location. nvp := reflect.New(v.Type()) nvp.Elem().Set(v) v = nvp.Elem() diff --git a/vendor/github.com/pelletier/go-toml/v2/unstable/unmarshaler.go b/vendor/github.com/pelletier/go-toml/v2/unstable/unmarshaler.go new file mode 100644 index 00000000000..00cfd6de458 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/unstable/unmarshaler.go @@ -0,0 +1,7 @@ +package unstable + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a TOML document. +type Unmarshaler interface { + UnmarshalTOML(value *Node) error +} diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go index e96465460c5..04aaca8480d 100644 --- a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go @@ -246,7 +246,7 @@ func (b *FrameDataBlock) Compress(f *Frame, src []byte, level lz4block.Compressi b.src = src // keep track of the source for content checksum if f.Descriptor.Flags.BlockChecksum() { - b.Checksum = xxh32.ChecksumZero(src) + b.Checksum = xxh32.ChecksumZero(b.Data) } return b } @@ -328,7 +328,7 @@ func (b *FrameDataBlock) Uncompress(f *Frame, dst, dict []byte, sum bool) ([]byt dst = dst[:n] } if f.Descriptor.Flags.BlockChecksum() { - if c := xxh32.ChecksumZero(dst); c != b.Checksum { + if c := xxh32.ChecksumZero(b.data); c != b.Checksum { err := fmt.Errorf("%w: got %x; expected %x", lz4errors.ErrInvalidBlockChecksum, c, b.Checksum) return nil, err } diff --git a/vendor/github.com/power-devops/perfstat/config.go b/vendor/github.com/power-devops/perfstat/config.go index de7230d28c0..a6df39c6720 100644 --- a/vendor/github.com/power-devops/perfstat/config.go +++ b/vendor/github.com/power-devops/perfstat/config.go @@ -1,3 +1,4 @@ +//go:build aix // +build aix package perfstat diff --git a/vendor/github.com/power-devops/perfstat/cpustat.go b/vendor/github.com/power-devops/perfstat/cpustat.go index 846daafba94..10f543fa4d5 100644 --- a/vendor/github.com/power-devops/perfstat/cpustat.go +++ b/vendor/github.com/power-devops/perfstat/cpustat.go @@ -1,3 +1,4 @@ +//go:build aix // +build aix package perfstat @@ -135,4 +136,3 @@ func CpuUtilTotalStat() (*CPUUtil, error) { u := perfstatcpuutil2cpuutil(cpuutil) return &u, nil } - diff --git a/vendor/github.com/power-devops/perfstat/diskstat.go b/vendor/github.com/power-devops/perfstat/diskstat.go index fc70dfaa4e1..06763b4bc9f 100644 --- a/vendor/github.com/power-devops/perfstat/diskstat.go +++ b/vendor/github.com/power-devops/perfstat/diskstat.go @@ -1,3 +1,4 @@ +//go:build aix // +build aix package perfstat diff --git a/vendor/github.com/power-devops/perfstat/doc.go b/vendor/github.com/power-devops/perfstat/doc.go index 85eaf3e7eda..9730a61c2c0 100644 --- a/vendor/github.com/power-devops/perfstat/doc.go +++ b/vendor/github.com/power-devops/perfstat/doc.go @@ -1,3 +1,4 @@ +//go:build !aix // +build !aix // Copyright 2020 Power-Devops.com. All rights reserved. @@ -36,24 +37,24 @@ func DisableLVMStat() {} // CpuStat() returns array of CPU structures with information about // logical CPUs on the system. // IBM documentation: -// * https://www.ibm.com/support/knowledgecenter/ssw_aix_72/performancetools/idprftools_perfstat_int_cpu.html -// * https://www.ibm.com/support/knowledgecenter/en/ssw_aix_72/p_bostechref/perfstat_cpu.html +// - https://www.ibm.com/support/knowledgecenter/ssw_aix_72/performancetools/idprftools_perfstat_int_cpu.html +// - https://www.ibm.com/support/knowledgecenter/en/ssw_aix_72/p_bostechref/perfstat_cpu.html func CpuStat() ([]CPU, error) { return nil, fmt.Errorf("not implemented") } // CpuTotalStat() returns general information about CPUs on the system. // IBM documentation: -// * https://www.ibm.com/support/knowledgecenter/ssw_aix_72/performancetools/idprftools_perfstat_glob_cpu.html -// * https://www.ibm.com/support/knowledgecenter/en/ssw_aix_72/p_bostechref/perfstat_cputot.html +// - https://www.ibm.com/support/knowledgecenter/ssw_aix_72/performancetools/idprftools_perfstat_glob_cpu.html +// - https://www.ibm.com/support/knowledgecenter/en/ssw_aix_72/p_bostechref/perfstat_cputot.html func CpuTotalStat() (*CPUTotal, error) { return nil, fmt.Errorf("not implemented") } // CpuUtilStat() calculates CPU utilization. // IBM documentation: -// * https://www.ibm.com/support/knowledgecenter/ssw_aix_72/performancetools/idprftools_perfstat_cpu_util.html -// * https://www.ibm.com/support/knowledgecenter/en/ssw_aix_72/p_bostechref/perfstat_cpu_util.html +// - https://www.ibm.com/support/knowledgecenter/ssw_aix_72/performancetools/idprftools_perfstat_cpu_util.html +// - https://www.ibm.com/support/knowledgecenter/en/ssw_aix_72/p_bostechref/perfstat_cpu_util.html func CpuUtilStat(intvl time.Duration) (*CPUUtil, error) { return nil, fmt.Errorf("not implemented") } diff --git a/vendor/github.com/power-devops/perfstat/fsstat.go b/vendor/github.com/power-devops/perfstat/fsstat.go index 27f4c06c158..d3913197acb 100644 --- a/vendor/github.com/power-devops/perfstat/fsstat.go +++ b/vendor/github.com/power-devops/perfstat/fsstat.go @@ -1,3 +1,4 @@ +//go:build aix // +build aix package perfstat diff --git a/vendor/github.com/power-devops/perfstat/helpers.go b/vendor/github.com/power-devops/perfstat/helpers.go index 1b13eb561e2..d5268ab53ac 100644 --- a/vendor/github.com/power-devops/perfstat/helpers.go +++ b/vendor/github.com/power-devops/perfstat/helpers.go @@ -1,3 +1,4 @@ +//go:build aix // +build aix package perfstat @@ -7,6 +8,7 @@ package perfstat #include #include +#include #include "c_helpers.h" */ @@ -762,3 +764,56 @@ func fsinfo2filesystem(n *C.struct_fsinfo) FileSystem { return i } + +func lparinfo2partinfo(n C.lpar_info_format2_t) PartitionInfo { + var i PartitionInfo + + i.Version = int(n.version) + i.OnlineMemory = uint64(n.online_memory) + i.TotalDispatchTime = uint64(n.tot_dispatch_time) + i.PoolIdleTime = uint64(n.pool_idle_time) + i.DispatchLatency = uint64(n.dispatch_latency) + i.LparFlags = uint(n.lpar_flags) + i.PCpusInSys = uint(n.pcpus_in_sys) + i.OnlineVCpus = uint(n.online_vcpus) + i.OnlineLCpus = uint(n.online_lcpus) + i.PCpusInPool = uint(n.pcpus_in_pool) + i.UnallocCapacity = uint(n.unalloc_capacity) + i.EntitledCapacity = uint(n.entitled_capacity) + i.VariableWeight = uint(n.variable_weight) + i.UnallocWeight = uint(n.unalloc_weight) + i.MinReqVCpuCapacity = uint(n.min_req_vcpu_capacity) + i.GroupId = uint8(n.group_id) + i.PoolId = uint8(n.pool_id) + i.ShCpusInSys = uint(n.shcpus_in_sys) + i.MaxPoolCapacity = uint(n.max_pool_capacity) + i.EntitledPoolCapacity = uint(n.entitled_pool_capacity) + i.PoolMaxTime = uint64(n.pool_max_time) + i.PoolBusyTime = uint64(n.pool_busy_time) + i.PoolScaledBusyTime = uint64(n.pool_scaled_busy_time) + i.ShCpuTotalTime = uint64(n.shcpu_tot_time) + i.ShCpuBusyTime = uint64(n.shcpu_busy_time) + i.ShCpuScaledBusyTime = uint64(n.shcpu_scaled_busy_time) + i.EntMemCapacity = uint64(n.ent_mem_capacity) + i.PhysMem = uint64(n.phys_mem) + i.VrmPoolPhysMem = uint64(n.vrm_pool_physmem) + i.HypPageSize = uint(n.hyp_pagesize) + i.VrmPoolId = int(n.vrm_pool_id) + i.VrmGroupId = int(n.vrm_group_id) + i.VarMemWeight = int(n.var_mem_weight) + i.UnallocVarMemWeight = int(n.unalloc_var_mem_weight) + i.UnallocEntMemCapacity = uint64(n.unalloc_ent_mem_capacity) + i.TrueOnlineMemory = uint64(n.true_online_memory) + i.AmeOnlineMemory = uint64(n.ame_online_memory) + i.AmeType = uint8(n.ame_type) + i.SpecExecMode = uint8(n.spec_exec_mode) + i.AmeFactor = uint(n.ame_factor) + i.EmPartMajorCode = uint(n.em_part_major_code) + i.EmPartMinorCode = uint(n.em_part_minor_code) + i.BytesCoalesced = uint64(n.bytes_coalesced) + i.BytesCoalescedMemPool = uint64(n.bytes_coalesced_mempool) + i.PurrCoalescing = uint64(n.purr_coalescing) + i.SpurrCoalescing = uint64(n.spurr_coalescing) + + return i +} diff --git a/vendor/github.com/power-devops/perfstat/lparstat.go b/vendor/github.com/power-devops/perfstat/lparstat.go index 0ce35e3c562..470a1af2f91 100644 --- a/vendor/github.com/power-devops/perfstat/lparstat.go +++ b/vendor/github.com/power-devops/perfstat/lparstat.go @@ -1,3 +1,4 @@ +//go:build aix // +build aix package perfstat @@ -6,11 +7,13 @@ package perfstat #cgo LDFLAGS: -lperfstat #include +#include */ import "C" import ( "fmt" + "unsafe" ) func PartitionStat() (*PartitionConfig, error) { @@ -24,3 +27,14 @@ func PartitionStat() (*PartitionConfig, error) { return &p, nil } + +func LparInfo() (*PartitionInfo, error) { + var pinfo C.lpar_info_format2_t + + rc := C.lpar_get_info(C.LPAR_INFO_FORMAT2, unsafe.Pointer(&pinfo), C.sizeof_lpar_info_format2_t) + if rc != 0 { + return nil, fmt.Errorf("lpar_get_info() error") + } + p := lparinfo2partinfo(pinfo) + return &p, nil +} diff --git a/vendor/github.com/power-devops/perfstat/lvmstat.go b/vendor/github.com/power-devops/perfstat/lvmstat.go index eb2064c8046..2ce99086ad0 100644 --- a/vendor/github.com/power-devops/perfstat/lvmstat.go +++ b/vendor/github.com/power-devops/perfstat/lvmstat.go @@ -1,3 +1,4 @@ +//go:build aix // +build aix package perfstat diff --git a/vendor/github.com/power-devops/perfstat/memstat.go b/vendor/github.com/power-devops/perfstat/memstat.go index d211a73aac8..52133f0a848 100644 --- a/vendor/github.com/power-devops/perfstat/memstat.go +++ b/vendor/github.com/power-devops/perfstat/memstat.go @@ -1,3 +1,4 @@ +//go:build aix // +build aix package perfstat diff --git a/vendor/github.com/power-devops/perfstat/netstat.go b/vendor/github.com/power-devops/perfstat/netstat.go index 4070da211bc..847d2946eec 100644 --- a/vendor/github.com/power-devops/perfstat/netstat.go +++ b/vendor/github.com/power-devops/perfstat/netstat.go @@ -1,3 +1,4 @@ +//go:build aix // +build aix package perfstat diff --git a/vendor/github.com/power-devops/perfstat/procstat.go b/vendor/github.com/power-devops/perfstat/procstat.go index ecafebd8db2..957ec2b33ac 100644 --- a/vendor/github.com/power-devops/perfstat/procstat.go +++ b/vendor/github.com/power-devops/perfstat/procstat.go @@ -1,3 +1,4 @@ +//go:build aix // +build aix package perfstat diff --git a/vendor/github.com/power-devops/perfstat/sysconf.go b/vendor/github.com/power-devops/perfstat/sysconf.go index c7454d03d49..b557da0deaa 100644 --- a/vendor/github.com/power-devops/perfstat/sysconf.go +++ b/vendor/github.com/power-devops/perfstat/sysconf.go @@ -1,3 +1,4 @@ +//go:build aix // +build aix package perfstat diff --git a/vendor/github.com/power-devops/perfstat/systemcfg.go b/vendor/github.com/power-devops/perfstat/systemcfg.go index 6287eb46ab8..b7c7b72592e 100644 --- a/vendor/github.com/power-devops/perfstat/systemcfg.go +++ b/vendor/github.com/power-devops/perfstat/systemcfg.go @@ -1,3 +1,4 @@ +//go:build aix // +build aix package perfstat @@ -70,6 +71,7 @@ const ( SC_TM_VER = 59 /* Transaction Memory version, 0 - not capable */ SC_NX_CAP = 60 /* NX GZIP capable */ SC_PKS_STATE = 61 /* Platform KeyStore */ + SC_MMA_VER = 62 ) /* kernel attributes */ @@ -119,6 +121,7 @@ const ( IMPL_POWER7 = 0x8000 /* 7 class CPU */ IMPL_POWER8 = 0x10000 /* 8 class CPU */ IMPL_POWER9 = 0x20000 /* 9 class CPU */ + IMPL_POWER10 = 0x20000 /* 10 class CPU */ ) // Values for implementation field for IA64 Architectures @@ -151,11 +154,13 @@ const ( PV_7 = 0x200000 /* Power PC 7 */ PV_8 = 0x300000 /* Power PC 8 */ PV_9 = 0x400000 /* Power PC 9 */ + PV_10 = 0x500000 /* Power PC 10 */ PV_5_Compat = 0x0F8000 /* Power PC 5 */ PV_6_Compat = 0x108000 /* Power PC 6 */ PV_7_Compat = 0x208000 /* Power PC 7 */ PV_8_Compat = 0x308000 /* Power PC 8 */ PV_9_Compat = 0x408000 /* Power PC 9 */ + PV_10_Compat = 0x508000 /* Power PC 10 */ PV_RESERVED_2 = 0x0A0000 /* source compatability */ PV_RESERVED_3 = 0x0B0000 /* source compatability */ PV_RS2 = 0x040000 /* Power RS2 */ @@ -181,19 +186,21 @@ const ( // Macros for identifying physical processor const ( - PPI4_1 = 0x35 - PPI4_2 = 0x38 - PPI4_3 = 0x39 - PPI4_4 = 0x3C - PPI4_5 = 0x44 - PPI5_1 = 0x3A - PPI5_2 = 0x3B - PPI6_1 = 0x3E - PPI7_1 = 0x3F - PPI7_2 = 0x4A - PPI8_1 = 0x4B - PPI8_2 = 0x4D - PPI9 = 0x4E + PPI4_1 = 0x35 + PPI4_2 = 0x38 + PPI4_3 = 0x39 + PPI4_4 = 0x3C + PPI4_5 = 0x44 + PPI5_1 = 0x3A + PPI5_2 = 0x3B + PPI6_1 = 0x3E + PPI7_1 = 0x3F + PPI7_2 = 0x4A + PPI8_1 = 0x4B + PPI8_2 = 0x4D + PPI9 = 0x4E + PPI9_1 = 0x4E + PPI10_1 = 0x80 ) // Macros for kernel attributes @@ -291,14 +298,32 @@ func GetCPUImplementation() string { return "POWER8" case impl&IMPL_POWER9 != 0: return "POWER9" + case impl&IMPL_POWER10 != 0: + return "Power10" default: return "Unknown" } } +func POWER10OrNewer() bool { + impl := unix.Getsystemcfg(SC_IMPL) + if impl&IMPL_POWER10 != 0 { + return true + } + return false +} + +func POWER10() bool { + impl := unix.Getsystemcfg(SC_IMPL) + if impl&IMPL_POWER10 != 0 { + return true + } + return false +} + func POWER9OrNewer() bool { impl := unix.Getsystemcfg(SC_IMPL) - if impl&IMPL_POWER9 != 0 { + if impl&IMPL_POWER10 != 0 || impl&IMPL_POWER9 != 0 { return true } return false @@ -314,7 +339,7 @@ func POWER9() bool { func POWER8OrNewer() bool { impl := unix.Getsystemcfg(SC_IMPL) - if impl&IMPL_POWER9 != 0 || impl&IMPL_POWER8 != 0 { + if impl&IMPL_POWER10 != 0 || impl&IMPL_POWER9 != 0 || impl&IMPL_POWER8 != 0 { return true } return false @@ -330,7 +355,7 @@ func POWER8() bool { func POWER7OrNewer() bool { impl := unix.Getsystemcfg(SC_IMPL) - if impl&IMPL_POWER9 != 0 || impl&IMPL_POWER8 != 0 || impl&IMPL_POWER7 != 0 { + if impl&IMPL_POWER10 != 0 || impl&IMPL_POWER9 != 0 || impl&IMPL_POWER8 != 0 || impl&IMPL_POWER7 != 0 { return true } return false @@ -419,6 +444,8 @@ func PksEnabled() bool { func CPUMode() string { impl := unix.Getsystemcfg(SC_VERS) switch impl { + case PV_10, PV_10_Compat: + return "Power10" case PV_9, PV_9_Compat: return "POWER9" case PV_8, PV_8_Compat: diff --git a/vendor/github.com/power-devops/perfstat/types_disk.go b/vendor/github.com/power-devops/perfstat/types_disk.go index ca1493d8726..50e323dbe0b 100644 --- a/vendor/github.com/power-devops/perfstat/types_disk.go +++ b/vendor/github.com/power-devops/perfstat/types_disk.go @@ -29,8 +29,8 @@ type DiskTotal struct { // Disk Adapter Types const ( DA_SCSI = 0 /* 0 ==> SCSI, SAS, other legacy adapter types */ - DA_VSCSI /* 1 ==> Virtual SCSI/SAS Adapter */ - DA_FCA /* 2 ==> Fiber Channel Adapter */ + DA_VSCSI = 1 /* 1 ==> Virtual SCSI/SAS Adapter */ + DA_FCA = 2 /* 2 ==> Fiber Channel Adapter */ ) type DiskAdapter struct { diff --git a/vendor/github.com/power-devops/perfstat/types_lpar.go b/vendor/github.com/power-devops/perfstat/types_lpar.go index 2d3c32fa8cb..f95f8c300c8 100644 --- a/vendor/github.com/power-devops/perfstat/types_lpar.go +++ b/vendor/github.com/power-devops/perfstat/types_lpar.go @@ -66,3 +66,64 @@ type PartitionConfig struct { TargetMemExpSize int64 /* Expanded Memory Size in MB */ SubProcessorMode int32 /* Split core mode, its value can be 0,1,2 or 4. 0 for unsupported, 1 for capable but not enabled, 2 or 4 for enabled*/ } + +const ( + AME_TYPE_V1 = 0x1 + AME_TYPE_V2 = 0x2 + LPAR_INFO_CAPPED = 0x01 /* Parition Capped */ + LPAR_INFO_AUTH_PIC = 0x02 /* Authority granted for poolidle*/ + LPAR_INFO_SMT_ENABLED = 0x04 /* SMT Enabled */ + LPAR_INFO_WPAR_ACTIVE = 0x08 /* Process Running Within a WPAR */ + LPAR_INFO_EXTENDED = 0x10 /* Extended shared processor pool information */ + LPAR_INFO_AME_ENABLED = 0x20 /* Active Mem. Expansion (AME) enabled*/ + LPAR_INFO_SEM_ENABLED = 0x40 /* Speculative Execution Mode enabled */ +) + +type PartitionInfo struct { + Version int /* version for this structure */ + OnlineMemory uint64 /* MB of currently online memory */ + TotalDispatchTime uint64 /* Total lpar dispatch time in nsecs */ + PoolIdleTime uint64 /* Idle time of shared CPU pool nsecs*/ + DispatchLatency uint64 /* Max latency inbetween dispatches of this LPAR on physCPUS in nsecs */ + LparFlags uint /* LPAR flags */ + PCpusInSys uint /* # of active licensed physical CPUs in system */ + OnlineVCpus uint /* # of current online virtual CPUs */ + OnlineLCpus uint /* # of current online logical CPUs */ + PCpusInPool uint /* # physical CPUs in shared pool */ + UnallocCapacity uint /* Unallocated Capacity available in shared pool */ + EntitledCapacity uint /* Entitled Processor Capacity for this partition */ + VariableWeight uint /* Variable Processor Capacity Weight */ + UnallocWeight uint /* Unallocated Variable Weight available for this partition */ + MinReqVCpuCapacity uint /* OS minimum required virtual processor capacity. */ + GroupId uint8 /* ID of a LPAR group/aggregation */ + PoolId uint8 /* ID of a shared pool */ + ShCpusInSys uint /* # of physical processors allocated for shared processor use */ + MaxPoolCapacity uint /* Maximum processor capacity of partition's pool */ + EntitledPoolCapacity uint /* Entitled processor capacity of partition's pool */ + PoolMaxTime uint64 /* Summation of maximum time that could be consumed by the pool, in nanoseconds */ + PoolBusyTime uint64 /* Summation of busy time accumulated across all partitions in the pool, in nanoseconds */ + PoolScaledBusyTime uint64 /* Scaled summation of busy time accumulated across all partitions in the pool, in nanoseconds */ + ShCpuTotalTime uint64 /* Summation of total time across all physical processors allocated for shared processor use, in nanoseconds */ + ShCpuBusyTime uint64 /* Summation of busy time accumulated across all shared processor partitions, in nanoseconds */ + ShCpuScaledBusyTime uint64 /* Scaled summation of busy time accumulated across all shared processor partitions, in nanoseconds */ + EntMemCapacity uint64 /* Partition's current entitlement memory capacity setting */ + PhysMem uint64 /* Amount of physical memory, in bytes, currently backing the partition's logical memory */ + VrmPoolPhysMem uint64 /* Total amount of physical memory in the VRM pool */ + HypPageSize uint /* Page size hypervisor is using to virtualize partition's memory */ + VrmPoolId int /* ID of VRM pool */ + VrmGroupId int /* eWLM VRM group to which partition belongs */ + VarMemWeight int /* Partition's current variable memory capacity weighting setting */ + UnallocVarMemWeight int /* Amount of unallocated variable memory capacity weight available to LPAR's group */ + UnallocEntMemCapacity uint64 /* Amount of unallocated I/O memory entitlement available to LPAR's group */ + TrueOnlineMemory uint64 /* true MB of currently online memory */ + AmeOnlineMemory uint64 /* AME MB of currently online memory */ + AmeType uint8 + SpecExecMode uint8 /* Speculative Execution Mode */ + AmeFactor uint /* memory expansion factor for LPAR */ + EmPartMajorCode uint /* Major and minor codes for our */ + EmPartMinorCode uint /* current energy management mode */ + BytesCoalesced uint64 /* The number of bytes of the calling partition.s logical real memory coalesced because they contained duplicated data */ + BytesCoalescedMemPool uint64 /* If the calling partition is authorized to see pool wide statistics then the number of bytes of logical real memory coalesced because they contained duplicated data in the calling partition.s memory pool else set to zero.*/ + PurrCoalescing uint64 /* If the calling partition is authorized to see pool wide statistics then PURR cycles consumed to coalesce data else set to zero.*/ + SpurrCoalescing uint64 /* If the calling partition is authorized to see pool wide statistics then SPURR cycles consumed to coalesce data else set to zero.*/ +} diff --git a/vendor/github.com/power-devops/perfstat/uptime.go b/vendor/github.com/power-devops/perfstat/uptime.go index 2bd3e568d2d..86087874796 100644 --- a/vendor/github.com/power-devops/perfstat/uptime.go +++ b/vendor/github.com/power-devops/perfstat/uptime.go @@ -1,3 +1,4 @@ +//go:build aix // +build aix package perfstat diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE index dd878a30ee9..b9cc55abbb0 100644 --- a/vendor/github.com/prometheus/client_golang/NOTICE +++ b/vendor/github.com/prometheus/client_golang/NOTICE @@ -16,8 +16,3 @@ Go support for Protocol Buffers - Google's data interchange format http://github.com/golang/protobuf/ Copyright 2010 The Go Authors See source code for license details. - -Support for streaming Protocol Buffer messages for the Go language (golang). -https://github.com/matttproud/golang_protobuf_extensions -Copyright 2013 Matt T. Proud -Licensed under the Apache License, Version 2.0 diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE new file mode 100644 index 00000000000..65d761bc9f2 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go new file mode 100644 index 00000000000..8547c8dfd18 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go @@ -0,0 +1,145 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +// Package header provides functions for parsing HTTP headers. +package header + +import ( + "net/http" + "strings" +) + +// Octet types from RFC 2616. +var octetTypes [256]octetType + +type octetType byte + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) + if strings.ContainsRune(" \t\r\n", rune(c)) { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + +// AcceptSpec describes an Accept* header. +type AcceptSpec struct { + Value string + Q float64 +} + +// ParseAccept parses Accept* headers. +func ParseAccept(header http.Header, key string) (specs []AcceptSpec) { +loop: + for _, s := range header[key] { + for { + var spec AcceptSpec + spec.Value, s = expectTokenSlash(s) + if spec.Value == "" { + continue loop + } + spec.Q = 1.0 + s = skipSpace(s) + if strings.HasPrefix(s, ";") { + s = skipSpace(s[1:]) + if !strings.HasPrefix(s, "q=") { + continue loop + } + spec.Q, s = expectQuality(s[2:]) + if spec.Q < 0.0 { + continue loop + } + } + specs = append(specs, spec) + s = skipSpace(s) + if !strings.HasPrefix(s, ",") { + continue loop + } + s = skipSpace(s[1:]) + } + } + return +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectTokenSlash(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + b := s[i] + if (octetTypes[b]&isToken == 0) && b != '/' { + break + } + } + return s[:i], s[i:] +} + +func expectQuality(s string) (q float64, rest string) { + switch { + case len(s) == 0: + return -1, "" + case s[0] == '0': + q = 0 + case s[0] == '1': + q = 1 + default: + return -1, "" + } + s = s[1:] + if !strings.HasPrefix(s, ".") { + return q, s + } + s = s[1:] + i := 0 + n := 0 + d := 1 + for ; i < len(s); i++ { + b := s[i] + if b < '0' || b > '9' { + break + } + n = n*10 + int(b) - '0' + d *= 10 + } + return q + float64(n)/float64(d), s[i:] +} diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go new file mode 100644 index 00000000000..2e45780b74b --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go @@ -0,0 +1,36 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +package httputil + +import ( + "net/http" + + "github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header" +) + +// NegotiateContentEncoding returns the best offered content encoding for the +// request's Accept-Encoding header. If two offers match with equal weight and +// then the offer earlier in the list is preferred. If no offers are +// acceptable, then "" is returned. +func NegotiateContentEncoding(r *http.Request, offers []string) string { + bestOffer := "identity" + bestQ := -1.0 + specs := header.ParseAccept(r.Header, "Accept-Encoding") + for _, offer := range offers { + for _, spec := range specs { + if spec.Q > bestQ && + (spec.Value == "*" || spec.Value == offer) { + bestQ = spec.Q + bestOffer = offer + } + } + } + if bestQ == 0 { + bestOffer = "" + } + return bestOffer +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go index bcfa4fa10e0..cc4ef1077e8 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go @@ -37,6 +37,9 @@ var ( // MetricsScheduler allows only scheduler metrics to be collected from Go runtime. // e.g. go_sched_goroutines_goroutines MetricsScheduler = GoRuntimeMetricsRule{regexp.MustCompile(`^/sched/.*`)} + // MetricsDebug allows only debug metrics to be collected from Go runtime. + // e.g. go_godebug_non_default_behavior_gocachetest_events_total + MetricsDebug = GoRuntimeMetricsRule{regexp.MustCompile(`^/godebug/.*`)} ) // WithGoCollectorMemStatsMetricsDisabled disables metrics that is gathered in runtime.MemStats structure such as: @@ -44,7 +47,6 @@ var ( // go_memstats_alloc_bytes // go_memstats_alloc_bytes_total // go_memstats_sys_bytes -// go_memstats_lookups_total // go_memstats_mallocs_total // go_memstats_frees_total // go_memstats_heap_alloc_bytes diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go index ad9a71a5e0d..520cbd7d418 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -22,13 +22,13 @@ import ( // goRuntimeMemStats provides the metrics initially provided by runtime.ReadMemStats. // From Go 1.17 those similar (and better) statistics are provided by runtime/metrics, so // while eval closure works on runtime.MemStats, the struct from Go 1.17+ is -// populated using runtime/metrics. +// populated using runtime/metrics. Those are the defaults we can't alter. func goRuntimeMemStats() memStatsMetrics { return memStatsMetrics{ { desc: NewDesc( memstatNamespace("alloc_bytes"), - "Number of bytes allocated and still in use.", + "Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) }, @@ -36,7 +36,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("alloc_bytes_total"), - "Total number of bytes allocated, even if freed.", + "Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) }, @@ -44,23 +44,16 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("sys_bytes"), - "Number of bytes obtained from system.", + "Number of bytes obtained from system. Equals to /memory/classes/total:byte.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("lookups_total"), - "Total number of pointer lookups.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) }, - valType: CounterValue, }, { desc: NewDesc( memstatNamespace("mallocs_total"), - "Total number of mallocs.", + // TODO(bwplotka): We could add go_memstats_heap_objects, probably useful for discovery. Let's gather more feedback, kind of a waste of bytes for everybody for compatibility reasons to keep both, and we can't really rename/remove useful metric. + "Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) }, @@ -68,7 +61,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("frees_total"), - "Total number of frees.", + "Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) }, @@ -76,7 +69,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_alloc_bytes"), - "Number of heap bytes allocated and still in use.", + "Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) }, @@ -84,7 +77,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_sys_bytes"), - "Number of heap bytes obtained from system.", + "Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) }, @@ -92,7 +85,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_idle_bytes"), - "Number of heap bytes waiting to be used.", + "Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) }, @@ -100,7 +93,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_inuse_bytes"), - "Number of heap bytes that are in use.", + "Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) }, @@ -108,7 +101,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_released_bytes"), - "Number of heap bytes released to OS.", + "Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, @@ -116,7 +109,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_objects"), - "Number of allocated objects.", + "Number of currently allocated objects. Equals to /gc/heap/objects:objects.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) }, @@ -124,7 +117,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("stack_inuse_bytes"), - "Number of bytes in use by the stack allocator.", + "Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) }, @@ -132,7 +125,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("stack_sys_bytes"), - "Number of bytes obtained from system for stack allocator.", + "Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) }, @@ -140,7 +133,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mspan_inuse_bytes"), - "Number of bytes in use by mspan structures.", + "Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) }, @@ -148,7 +141,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mspan_sys_bytes"), - "Number of bytes used for mspan structures obtained from system.", + "Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) }, @@ -156,7 +149,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mcache_inuse_bytes"), - "Number of bytes in use by mcache structures.", + "Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) }, @@ -164,7 +157,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mcache_sys_bytes"), - "Number of bytes used for mcache structures obtained from system.", + "Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) }, @@ -172,7 +165,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("buck_hash_sys_bytes"), - "Number of bytes used by the profiling bucket hash table.", + "Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) }, @@ -180,7 +173,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("gc_sys_bytes"), - "Number of bytes used for garbage collection system metadata.", + "Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) }, @@ -188,7 +181,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("other_sys_bytes"), - "Number of bytes used for other system allocations.", + "Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) }, @@ -196,7 +189,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("next_gc_bytes"), - "Number of heap bytes when next garbage collection will take place.", + "Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, @@ -225,7 +218,7 @@ func newBaseGoCollector() baseGoCollector { nil, nil), gcDesc: NewDesc( "go_gc_duration_seconds", - "A summary of the pause duration of garbage collection cycles.", + "A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles.", nil, nil), gcLastTimeDesc: NewDesc( "go_memstats_last_gc_time_seconds", diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go index 2d8d9f64f43..51174641729 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go @@ -17,6 +17,7 @@ package prometheus import ( + "fmt" "math" "runtime" "runtime/metrics" @@ -153,7 +154,8 @@ func defaultGoCollectorOptions() internal.GoCollectorOptions { "/gc/heap/frees-by-size:bytes": goGCHeapFreesBytes, }, RuntimeMetricRules: []internal.GoCollectorRule{ - //{Matcher: regexp.MustCompile("")}, + // Recommended metrics we want by default from runtime/metrics. + {Matcher: internal.GoCollectorDefaultRuntimeMetrics}, }, } } @@ -203,6 +205,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { // to fail here. This condition is tested in TestExpectedRuntimeMetrics. continue } + help := attachOriginalName(d.Description.Description, d.Name) sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name}) sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1] @@ -214,7 +217,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { m = newBatchHistogram( NewDesc( BuildFQName(namespace, subsystem, name), - d.Description.Description, + help, nil, nil, ), @@ -226,7 +229,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { Namespace: namespace, Subsystem: subsystem, Name: name, - Help: d.Description.Description, + Help: help, }, ) } else { @@ -234,7 +237,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { Namespace: namespace, Subsystem: subsystem, Name: name, - Help: d.Description.Description, + Help: help, }) } metricSet = append(metricSet, m) @@ -284,6 +287,10 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { } } +func attachOriginalName(desc, origName string) string { + return fmt.Sprintf("%s Sourced from %s", desc, origName) +} + // Describe returns all descriptions of the collector. func (c *goCollector) Describe(ch chan<- *Desc) { c.base.Describe(ch) @@ -376,13 +383,13 @@ func unwrapScalarRMValue(v metrics.Value) float64 { // // This should never happen because we always populate our metric // set from the runtime/metrics package. - panic("unexpected unsupported metric") + panic("unexpected bad kind metric") default: // Unsupported metric kind. // // This should never happen because we check for this during initialization // and flag and filter metrics whose kinds we don't understand. - panic("unexpected unsupported metric kind") + panic(fmt.Sprintf("unexpected unsupported metric: %v", v.Kind())) } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index b5c8bcb395a..519db348a74 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -440,7 +440,7 @@ type HistogramOpts struct { // constant (or any negative float value). NativeHistogramZeroThreshold float64 - // The remaining fields define a strategy to limit the number of + // The next three fields define a strategy to limit the number of // populated sparse buckets. If NativeHistogramMaxBucketNumber is left // at zero, the number of buckets is not limited. (Note that this might // lead to unbounded memory consumption if the values observed by the @@ -473,6 +473,22 @@ type HistogramOpts struct { NativeHistogramMinResetDuration time.Duration NativeHistogramMaxZeroThreshold float64 + // NativeHistogramMaxExemplars limits the number of exemplars + // that are kept in memory for each native histogram. If you leave it at + // zero, a default value of 10 is used. If no exemplars should be kept specifically + // for native histograms, set it to a negative value. (Scrapers can + // still use the exemplars exposed for classic buckets, which are managed + // independently.) + NativeHistogramMaxExemplars int + // NativeHistogramExemplarTTL is only checked once + // NativeHistogramMaxExemplars is exceeded. In that case, the + // oldest exemplar is removed if it is older than NativeHistogramExemplarTTL. + // Otherwise, the older exemplar in the pair of exemplars that are closest + // together (on an exponential scale) is removed. + // If NativeHistogramExemplarTTL is left at its zero value, a default value of + // 5m is used. To always delete the oldest exemplar, set it to a negative value. + NativeHistogramExemplarTTL time.Duration + // now is for testing purposes, by default it's time.Now. now func() time.Time @@ -532,6 +548,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr if opts.afterFunc == nil { opts.afterFunc = time.AfterFunc } + h := &histogram{ desc: desc, upperBounds: opts.Buckets, @@ -556,6 +573,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr h.nativeHistogramZeroThreshold = DefNativeHistogramZeroThreshold } // Leave h.nativeHistogramZeroThreshold at 0 otherwise. h.nativeHistogramSchema = pickSchema(opts.NativeHistogramBucketFactor) + h.nativeExemplars = makeNativeExemplars(opts.NativeHistogramExemplarTTL, opts.NativeHistogramMaxExemplars) } for i, upperBound := range h.upperBounds { if i < len(h.upperBounds)-1 { @@ -725,7 +743,8 @@ type histogram struct { // resetScheduled is protected by mtx. It is true if a reset is // scheduled for a later time (when nativeHistogramMinResetDuration has // passed). - resetScheduled bool + resetScheduled bool + nativeExemplars nativeExemplars // now is for testing purposes, by default it's time.Now. now func() time.Time @@ -742,6 +761,9 @@ func (h *histogram) Observe(v float64) { h.observe(v, h.findBucket(v)) } +// ObserveWithExemplar should not be called in a high-frequency setting +// for a native histogram with configured exemplars. For this case, +// the implementation isn't lock-free and might suffer from lock contention. func (h *histogram) ObserveWithExemplar(v float64, e Labels) { i := h.findBucket(v) h.observe(v, i) @@ -821,6 +843,13 @@ func (h *histogram) Write(out *dto.Metric) error { Length: proto.Uint32(0), }} } + + if h.nativeExemplars.isEnabled() { + h.nativeExemplars.Lock() + his.Exemplars = append(his.Exemplars, h.nativeExemplars.exemplars...) + h.nativeExemplars.Unlock() + } + } addAndResetCounts(hotCounts, coldCounts) return nil @@ -1091,8 +1120,10 @@ func (h *histogram) resetCounts(counts *histogramCounts) { deleteSyncMap(&counts.nativeHistogramBucketsPositive) } -// updateExemplar replaces the exemplar for the provided bucket. With empty -// labels, it's a no-op. It panics if any of the labels is invalid. +// updateExemplar replaces the exemplar for the provided classic bucket. +// With empty labels, it's a no-op. It panics if any of the labels is invalid. +// If histogram is native, the exemplar will be cached into nativeExemplars, +// which has a limit, and will remove one exemplar when limit is reached. func (h *histogram) updateExemplar(v float64, bucket int, l Labels) { if l == nil { return @@ -1102,6 +1133,10 @@ func (h *histogram) updateExemplar(v float64, bucket int, l Labels) { panic(err) } h.exemplars[bucket].Store(e) + doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v) + if doSparse { + h.nativeExemplars.addExemplar(e) + } } // HistogramVec is a Collector that bundles a set of Histograms that all share the @@ -1336,6 +1371,48 @@ func MustNewConstHistogram( return m } +// NewConstHistogramWithCreatedTimestamp does the same thing as NewConstHistogram but sets the created timestamp. +func NewConstHistogramWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + ct time.Time, + labelValues ...string, +) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { + return nil, err + } + return &constHistogram{ + desc: desc, + count: count, + sum: sum, + buckets: buckets, + labelPairs: MakeLabelPairs(desc, labelValues), + createdTs: timestamppb.New(ct), + }, nil +} + +// MustNewConstHistogramWithCreatedTimestamp is a version of NewConstHistogramWithCreatedTimestamp that panics where +// NewConstHistogramWithCreatedTimestamp would have returned an error. +func MustNewConstHistogramWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + ct time.Time, + labelValues ...string, +) Metric { + m, err := NewConstHistogramWithCreatedTimestamp(desc, count, sum, buckets, ct, labelValues...) + if err != nil { + panic(err) + } + return m +} + type buckSort []*dto.Bucket func (s buckSort) Len() int { @@ -1575,3 +1652,186 @@ func addAndResetCounts(hot, cold *histogramCounts) { atomic.AddUint64(&hot.nativeHistogramZeroBucket, atomic.LoadUint64(&cold.nativeHistogramZeroBucket)) atomic.StoreUint64(&cold.nativeHistogramZeroBucket, 0) } + +type nativeExemplars struct { + sync.Mutex + + // Time-to-live for exemplars, it is set to -1 if exemplars are disabled, that is NativeHistogramMaxExemplars is below 0. + // The ttl is used on insertion to remove an exemplar that is older than ttl, if present. + ttl time.Duration + + exemplars []*dto.Exemplar +} + +func (n *nativeExemplars) isEnabled() bool { + return n.ttl != -1 +} + +func makeNativeExemplars(ttl time.Duration, maxCount int) nativeExemplars { + if ttl == 0 { + ttl = 5 * time.Minute + } + + if maxCount == 0 { + maxCount = 10 + } + + if maxCount < 0 { + maxCount = 0 + ttl = -1 + } + + return nativeExemplars{ + ttl: ttl, + exemplars: make([]*dto.Exemplar, 0, maxCount), + } +} + +func (n *nativeExemplars) addExemplar(e *dto.Exemplar) { + if !n.isEnabled() { + return + } + + n.Lock() + defer n.Unlock() + + // When the number of exemplars has not yet exceeded or + // is equal to cap(n.exemplars), then + // insert the new exemplar directly. + if len(n.exemplars) < cap(n.exemplars) { + var nIdx int + for nIdx = 0; nIdx < len(n.exemplars); nIdx++ { + if *e.Value < *n.exemplars[nIdx].Value { + break + } + } + n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...) + return + } + + if len(n.exemplars) == 1 { + // When the number of exemplars is 1, then + // replace the existing exemplar with the new exemplar. + n.exemplars[0] = e + return + } + // From this point on, the number of exemplars is greater than 1. + + // When the number of exemplars exceeds the limit, remove one exemplar. + var ( + ot = time.Time{} // Oldest timestamp seen. Initial value doesn't matter as we replace it due to otIdx == -1 in the loop. + otIdx = -1 // Index of the exemplar with the oldest timestamp. + + md = -1.0 // Logarithm of the delta of the closest pair of exemplars. + + // The insertion point of the new exemplar in the exemplars slice after insertion. + // This is calculated purely based on the order of the exemplars by value. + // nIdx == len(n.exemplars) means the new exemplar is to be inserted after the end. + nIdx = -1 + + // rIdx is ultimately the index for the exemplar that we are replacing with the new exemplar. + // The aim is to keep a good spread of exemplars by value and not let them bunch up too much. + // It is calculated in 3 steps: + // 1. First we set rIdx to the index of the older exemplar within the closest pair by value. + // That is the following will be true (on log scale): + // either the exemplar pair on index (rIdx-1, rIdx) or (rIdx, rIdx+1) will have + // the closest values to each other from all pairs. + // For example, suppose the values are distributed like this: + // |-----------x-------------x----------------x----x-----| + // ^--rIdx as this is older. + // Or like this: + // |-----------x-------------x----------------x----x-----| + // ^--rIdx as this is older. + // 2. If there is an exemplar that expired, then we simple reset rIdx to that index. + // 3. We check if by inserting the new exemplar we would create a closer pair at + // (nIdx-1, nIdx) or (nIdx, nIdx+1) and set rIdx to nIdx-1 or nIdx accordingly to + // keep the spread of exemplars by value; otherwise we keep rIdx as it is. + rIdx = -1 + cLog float64 // Logarithm of the current exemplar. + pLog float64 // Logarithm of the previous exemplar. + ) + + for i, exemplar := range n.exemplars { + // Find the exemplar with the oldest timestamp. + if otIdx == -1 || exemplar.Timestamp.AsTime().Before(ot) { + ot = exemplar.Timestamp.AsTime() + otIdx = i + } + + // Find the index at which to insert new the exemplar. + if nIdx == -1 && *e.Value <= *exemplar.Value { + nIdx = i + } + + // Find the two closest exemplars and pick the one the with older timestamp. + pLog = cLog + cLog = math.Log(exemplar.GetValue()) + if i == 0 { + continue + } + diff := math.Abs(cLog - pLog) + if md == -1 || diff < md { + // The closest exemplar pair is at index: i-1, i. + // Choose the exemplar with the older timestamp for replacement. + md = diff + if n.exemplars[i].Timestamp.AsTime().Before(n.exemplars[i-1].Timestamp.AsTime()) { + rIdx = i + } else { + rIdx = i - 1 + } + } + + } + + // If all existing exemplar are smaller than new exemplar, + // then the exemplar should be inserted at the end. + if nIdx == -1 { + nIdx = len(n.exemplars) + } + // Here, we have the following relationships: + // n.exemplars[nIdx-1].Value < e.Value (if nIdx > 0) + // e.Value <= n.exemplars[nIdx].Value (if nIdx < len(n.exemplars)) + + if otIdx != -1 && e.Timestamp.AsTime().Sub(ot) > n.ttl { + // If the oldest exemplar has expired, then replace it with the new exemplar. + rIdx = otIdx + } else { + // In the previous for loop, when calculating the closest pair of exemplars, + // we did not take into account the newly inserted exemplar. + // So we need to calculate with the newly inserted exemplar again. + elog := math.Log(e.GetValue()) + if nIdx > 0 { + diff := math.Abs(elog - math.Log(n.exemplars[nIdx-1].GetValue())) + if diff < md { + // The value we are about to insert is closer to the previous exemplar at the insertion point than what we calculated before in rIdx. + // v--rIdx + // |-----------x-n-----------x----------------x----x-----| + // nIdx-1--^ ^--new exemplar value + // Do not make the spread worse, replace nIdx-1 and not rIdx. + md = diff + rIdx = nIdx - 1 + } + } + if nIdx < len(n.exemplars) { + diff := math.Abs(math.Log(n.exemplars[nIdx].GetValue()) - elog) + if diff < md { + // The value we are about to insert is closer to the next exemplar at the insertion point than what we calculated before in rIdx. + // v--rIdx + // |-----------x-----------n-x----------------x----x-----| + // new exemplar value--^ ^--nIdx + // Do not make the spread worse, replace nIdx-1 and not rIdx. + rIdx = nIdx + } + } + } + + // Adjust the slice according to rIdx and nIdx. + switch { + case rIdx == nIdx: + n.exemplars[nIdx] = e + case rIdx < nIdx: + n.exemplars = append(n.exemplars[:rIdx], append(n.exemplars[rIdx+1:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...)...) + case rIdx > nIdx: + n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, append(n.exemplars[nIdx:rIdx], n.exemplars[rIdx+1:]...)...)...) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go index 723b45d6444..a4fa6eabd78 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go @@ -30,3 +30,5 @@ type GoCollectorOptions struct { RuntimeMetricSumForHist map[string]string RuntimeMetricRules []GoCollectorRule } + +var GoCollectorDefaultRuntimeMetrics = regexp.MustCompile(`/gc/gogc:percent|/gc/gomemlimit:bytes|/sched/gomaxprocs:threads`) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go index f018e57237d..9d9b81ab448 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -234,7 +234,7 @@ func NewMetricWithExemplars(m Metric, exemplars ...Exemplar) (Metric, error) { ) for i, e := range exemplars { ts := e.Timestamp - if ts == (time.Time{}) { + if ts.IsZero() { ts = now } exs[i], err = newExemplar(e.Value, ts, e.Labels) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go index 8548dd18ed5..62a4e7ad9a0 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -22,14 +22,15 @@ import ( ) type processCollector struct { - collectFn func(chan<- Metric) - pidFn func() (int, error) - reportErrors bool - cpuTotal *Desc - openFDs, maxFDs *Desc - vsize, maxVsize *Desc - rss *Desc - startTime *Desc + collectFn func(chan<- Metric) + pidFn func() (int, error) + reportErrors bool + cpuTotal *Desc + openFDs, maxFDs *Desc + vsize, maxVsize *Desc + rss *Desc + startTime *Desc + inBytes, outBytes *Desc } // ProcessCollectorOpts defines the behavior of a process metrics collector @@ -100,6 +101,16 @@ func NewProcessCollector(opts ProcessCollectorOpts) Collector { "Start time of the process since unix epoch in seconds.", nil, nil, ), + inBytes: NewDesc( + ns+"process_network_receive_bytes_total", + "Number of bytes received by the process over the network.", + nil, nil, + ), + outBytes: NewDesc( + ns+"process_network_transmit_bytes_total", + "Number of bytes sent by the process over the network.", + nil, nil, + ), } if opts.PidFn == nil { @@ -129,6 +140,8 @@ func (c *processCollector) Describe(ch chan<- *Desc) { ch <- c.maxVsize ch <- c.rss ch <- c.startTime + ch <- c.inBytes + ch <- c.outBytes } // Collect returns the current state of all metrics of the collector. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go index 8c1136ceea3..14d56d2d068 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go @@ -63,4 +63,18 @@ func (c *processCollector) processCollect(ch chan<- Metric) { } else { c.reportError(ch, nil, err) } + + if netstat, err := p.Netstat(); err == nil { + var inOctets, outOctets float64 + if netstat.IpExt.InOctets != nil { + inOctets = *netstat.IpExt.InOctets + } + if netstat.IpExt.OutOctets != nil { + outOctets = *netstat.IpExt.OutOctets + } + ch <- MustNewConstMetric(c.inBytes, CounterValue, inOctets) + ch <- MustNewConstMetric(c.outBytes, CounterValue, outOctets) + } else { + c.reportError(ch, nil, err) + } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go index 9819917b83b..315eab5f179 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go @@ -76,6 +76,12 @@ func (r *responseWriterDelegator) Write(b []byte) (int, error) { return n, err } +// Unwrap lets http.ResponseController get the underlying http.ResponseWriter, +// by implementing the [rwUnwrapper](https://cs.opensource.google/go/go/+/refs/tags/go1.21.4:src/net/http/responsecontroller.go;l=42-44) interface. +func (r *responseWriterDelegator) Unwrap() http.ResponseWriter { + return r.ResponseWriter +} + type ( closeNotifierDelegator struct{ *responseWriterDelegator } flusherDelegator struct{ *responseWriterDelegator } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go index 09b8d2fbead..e598e66e688 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -38,12 +38,13 @@ import ( "io" "net/http" "strconv" - "strings" "sync" "time" + "github.com/klauspost/compress/zstd" "github.com/prometheus/common/expfmt" + "github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil" "github.com/prometheus/client_golang/prometheus" ) @@ -54,6 +55,18 @@ const ( processStartTimeHeader = "Process-Start-Time-Unix" ) +// Compression represents the content encodings handlers support for the HTTP +// responses. +type Compression string + +const ( + Identity Compression = "identity" + Gzip Compression = "gzip" + Zstd Compression = "zstd" +) + +var defaultCompressionFormats = []Compression{Identity, Gzip, Zstd} + var gzipPool = sync.Pool{ New: func() interface{} { return gzip.NewWriter(nil) @@ -122,6 +135,18 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO } } + // Select compression formats to offer based on default or user choice. + var compressions []string + if !opts.DisableCompression { + offers := defaultCompressionFormats + if len(opts.OfferedCompressions) > 0 { + offers = opts.OfferedCompressions + } + for _, comp := range offers { + compressions = append(compressions, string(comp)) + } + } + h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { if !opts.ProcessStartTime.IsZero() { rsp.Header().Set(processStartTimeHeader, strconv.FormatInt(opts.ProcessStartTime.Unix(), 10)) @@ -165,21 +190,23 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO } else { contentType = expfmt.Negotiate(req.Header) } - header := rsp.Header() - header.Set(contentTypeHeader, string(contentType)) + rsp.Header().Set(contentTypeHeader, string(contentType)) - w := io.Writer(rsp) - if !opts.DisableCompression && gzipAccepted(req.Header) { - header.Set(contentEncodingHeader, "gzip") - gz := gzipPool.Get().(*gzip.Writer) - defer gzipPool.Put(gz) + w, encodingHeader, closeWriter, err := negotiateEncodingWriter(req, rsp, compressions) + if err != nil { + if opts.ErrorLog != nil { + opts.ErrorLog.Println("error getting writer", err) + } + w = io.Writer(rsp) + encodingHeader = string(Identity) + } - gz.Reset(w) - defer gz.Close() + defer closeWriter() - w = gz + // Set Content-Encoding only when data is compressed + if encodingHeader != string(Identity) { + rsp.Header().Set(contentEncodingHeader, encodingHeader) } - enc := expfmt.NewEncoder(w, contentType) // handleError handles the error according to opts.ErrorHandling @@ -343,9 +370,19 @@ type HandlerOpts struct { // no effect on the HTTP status code because ErrorHandling is set to // ContinueOnError. Registry prometheus.Registerer - // If DisableCompression is true, the handler will never compress the - // response, even if requested by the client. + // DisableCompression disables the response encoding (compression) and + // encoding negotiation. If true, the handler will + // never compress the response, even if requested + // by the client and the OfferedCompressions field is set. DisableCompression bool + // OfferedCompressions is a set of encodings (compressions) handler will + // try to offer when negotiating with the client. This defaults to identity, gzip + // and zstd. + // NOTE: If handler can't agree with the client on the encodings or + // unsupported or empty encodings are set in OfferedCompressions, + // handler always fallbacks to no compression (identity), for + // compatibility reasons. In such cases ErrorLog will be used if set. + OfferedCompressions []Compression // The number of concurrent HTTP requests is limited to // MaxRequestsInFlight. Additional requests are responded to with 503 // Service Unavailable and a suitable message in the body. If @@ -381,19 +418,6 @@ type HandlerOpts struct { ProcessStartTime time.Time } -// gzipAccepted returns whether the client will accept gzip-encoded content. -func gzipAccepted(header http.Header) bool { - a := header.Get(acceptEncodingHeader) - parts := strings.Split(a, ",") - for _, part := range parts { - part = strings.TrimSpace(part) - if part == "gzip" || strings.HasPrefix(part, "gzip;") { - return true - } - } - return false -} - // httpError removes any content-encoding header and then calls http.Error with // the provided error and http.StatusInternalServerError. Error contents is // supposed to be uncompressed plain text. Same as with a plain http.Error, this @@ -406,3 +430,38 @@ func httpError(rsp http.ResponseWriter, err error) { http.StatusInternalServerError, ) } + +// negotiateEncodingWriter reads the Accept-Encoding header from a request and +// selects the right compression based on an allow-list of supported +// compressions. It returns a writer implementing the compression and an the +// correct value that the caller can set in the response header. +func negotiateEncodingWriter(r *http.Request, rw io.Writer, compressions []string) (_ io.Writer, encodingHeaderValue string, closeWriter func(), _ error) { + if len(compressions) == 0 { + return rw, string(Identity), func() {}, nil + } + + // TODO(mrueg): Replace internal/github.com/gddo once https://github.com/golang/go/issues/19307 is implemented. + selected := httputil.NegotiateContentEncoding(r, compressions) + + switch selected { + case "zstd": + // TODO(mrueg): Replace klauspost/compress with stdlib implementation once https://github.com/golang/go/issues/62513 is implemented. + z, err := zstd.NewWriter(rw, zstd.WithEncoderLevel(zstd.SpeedFastest)) + if err != nil { + return nil, "", func() {}, err + } + + z.Reset(rw) + return z, selected, func() { _ = z.Close() }, nil + case "gzip": + gz := gzipPool.Get().(*gzip.Writer) + gz.Reset(rw) + return gz, selected, func() { _ = gz.Close(); gzipPool.Put(gz) }, nil + case "identity": + // This means the content is not compressed. + return rw, selected, func() {}, nil + default: + // The content encoding was not implemented yet. + return nil, "", func() {}, fmt.Errorf("content compression format not recognized: %s. Valid formats are: %s", selected, defaultCompressionFormats) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index 5e2ced25a02..c6fd2f58b74 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -314,16 +314,17 @@ func (r *Registry) Register(c Collector) error { if dimHash != desc.dimHash { return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) } - } else { - // ...then check the new descriptors already seen. - if dimHash, exists := newDimHashesByName[desc.fqName]; exists { - if dimHash != desc.dimHash { - return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) - } - } else { - newDimHashesByName[desc.fqName] = desc.dimHash + continue + } + + // ...then check the new descriptors already seen. + if dimHash, exists := newDimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) } + continue } + newDimHashesByName[desc.fqName] = desc.dimHash } // A Collector yielding no Desc at all is considered unchecked. if len(newDescIDs) == 0 { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index 1462704446c..1ab0e479655 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -783,3 +783,45 @@ func MustNewConstSummary( } return m } + +// NewConstSummaryWithCreatedTimestamp does the same thing as NewConstSummary but sets the created timestamp. +func NewConstSummaryWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + ct time.Time, + labelValues ...string, +) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { + return nil, err + } + return &constSummary{ + desc: desc, + count: count, + sum: sum, + quantiles: quantiles, + labelPairs: MakeLabelPairs(desc, labelValues), + createdTs: timestamppb.New(ct), + }, nil +} + +// MustNewConstSummaryWithCreatedTimestamp is a version of NewConstSummaryWithCreatedTimestamp that panics where +// NewConstSummaryWithCreatedTimestamp would have returned an error. +func MustNewConstSummaryWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + ct time.Time, + labelValues ...string, +) Metric { + m, err := NewConstSummaryWithCreatedTimestamp(desc, count, sum, quantiles, ct, labelValues...) + if err != nil { + panic(err) + } + return m +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go index f52ad9eab67..e1441598da8 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go @@ -30,4 +30,5 @@ var defaultValidations = []Validation{ validations.LintReservedChars, validations.LintCamelCase, validations.LintUnitAbbreviations, + validations.LintDuplicateMetric, } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/duplicate_validations.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/duplicate_validations.go new file mode 100644 index 00000000000..fdc1e623948 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/duplicate_validations.go @@ -0,0 +1,37 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validations + +import ( + "fmt" + "reflect" + + dto "github.com/prometheus/client_model/go" +) + +// LintDuplicateMetric detects duplicate metric. +func LintDuplicateMetric(mf *dto.MetricFamily) []error { + var problems []error + + for i, m := range mf.Metric { + for _, k := range mf.Metric[i+1:] { + if reflect.DeepEqual(m.Label, k.Label) { + problems = append(problems, fmt.Errorf("metric not unique")) + break + } + } + } + + return problems +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go index bc8dbd1e16b..de52cfee443 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go @@ -44,21 +44,21 @@ func LintMetricUnits(mf *dto.MetricFamily) []error { return problems } -// LintMetricTypeInName detects when metric types are included in the metric name. +// LintMetricTypeInName detects when the metric type is included in the metric name. func LintMetricTypeInName(mf *dto.MetricFamily) []error { + if mf.GetType() == dto.MetricType_UNTYPED { + return nil + } + var problems []error - n := strings.ToLower(mf.GetName()) - for i, t := range dto.MetricType_name { - if i == int32(dto.MetricType_UNTYPED) { - continue - } + n := strings.ToLower(mf.GetName()) + typename := strings.ToLower(mf.GetType().String()) - typename := strings.ToLower(t) - if strings.Contains(n, "_"+typename+"_") || strings.HasSuffix(n, "_"+typename) { - problems = append(problems, fmt.Errorf(`metric name should not include type '%s'`, typename)) - } + if strings.Contains(n, "_"+typename+"_") || strings.HasSuffix(n, "_"+typename) { + problems = append(problems, fmt.Errorf(`metric name should not include type '%s'`, typename)) } + return problems } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go index 9dce15eafa2..6f1200180a7 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go @@ -42,9 +42,8 @@ import ( "fmt" "io" "net/http" - "reflect" - "github.com/davecgh/go-spew/spew" + "github.com/kylelemons/godebug/diff" dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/expfmt" "google.golang.org/protobuf/proto" @@ -159,6 +158,9 @@ func GatherAndCount(g prometheus.Gatherer, metricNames ...string) (int, error) { // ScrapeAndCompare calls a remote exporter's endpoint which is expected to return some metrics in // plain text format. Then it compares it with the results that the `expected` would return. // If the `metricNames` is not empty it would filter the comparison only to the given metric names. +// +// NOTE: Be mindful of accidental discrepancies between expected and metricNames; metricNames filter +// both expected and scraped metrics. See https://github.com/prometheus/client_golang/issues/1351. func ScrapeAndCompare(url string, expected io.Reader, metricNames ...string) error { resp, err := http.Get(url) if err != nil { @@ -184,9 +186,11 @@ func ScrapeAndCompare(url string, expected io.Reader, metricNames ...string) err return compareMetricFamilies(scraped, wanted, metricNames...) } -// CollectAndCompare registers the provided Collector with a newly created -// pedantic Registry. It then calls GatherAndCompare with that Registry and with -// the provided metricNames. +// CollectAndCompare collects the metrics identified by `metricNames` and compares them in the Prometheus text +// exposition format to the data read from expected. +// +// NOTE: Be mindful of accidental discrepancies between expected and metricNames; metricNames filter +// both expected and collected metrics. See https://github.com/prometheus/client_golang/issues/1351. func CollectAndCompare(c prometheus.Collector, expected io.Reader, metricNames ...string) error { reg := prometheus.NewPedanticRegistry() if err := reg.Register(c); err != nil { @@ -199,6 +203,9 @@ func CollectAndCompare(c prometheus.Collector, expected io.Reader, metricNames . // it to an expected output read from the provided Reader in the Prometheus text // exposition format. If any metricNames are provided, only metrics with those // names are compared. +// +// NOTE: Be mindful of accidental discrepancies between expected and metricNames; metricNames filter +// both expected and gathered metrics. See https://github.com/prometheus/client_golang/issues/1351. func GatherAndCompare(g prometheus.Gatherer, expected io.Reader, metricNames ...string) error { return TransactionalGatherAndCompare(prometheus.ToTransactionalGatherer(g), expected, metricNames...) } @@ -207,6 +214,9 @@ func GatherAndCompare(g prometheus.Gatherer, expected io.Reader, metricNames ... // it to an expected output read from the provided Reader in the Prometheus text // exposition format. If any metricNames are provided, only metrics with those // names are compared. +// +// NOTE: Be mindful of accidental discrepancies between expected and metricNames; metricNames filter +// both expected and gathered metrics. See https://github.com/prometheus/client_golang/issues/1351. func TransactionalGatherAndCompare(g prometheus.TransactionalGatherer, expected io.Reader, metricNames ...string) error { got, done, err := g.Gather() defer done() @@ -222,6 +232,31 @@ func TransactionalGatherAndCompare(g prometheus.TransactionalGatherer, expected return compareMetricFamilies(got, wanted, metricNames...) } +// CollectAndFormat collects the metrics identified by `metricNames` and returns them in the given format. +func CollectAndFormat(c prometheus.Collector, format expfmt.FormatType, metricNames ...string) ([]byte, error) { + reg := prometheus.NewPedanticRegistry() + if err := reg.Register(c); err != nil { + return nil, fmt.Errorf("registering collector failed: %w", err) + } + + gotFiltered, err := reg.Gather() + if err != nil { + return nil, fmt.Errorf("gathering metrics failed: %w", err) + } + + gotFiltered = filterMetrics(gotFiltered, metricNames) + + var gotFormatted bytes.Buffer + enc := expfmt.NewEncoder(&gotFormatted, expfmt.NewFormat(format)) + for _, mf := range gotFiltered { + if err := enc.Encode(mf); err != nil { + return nil, fmt.Errorf("encoding gathered metrics failed: %w", err) + } + } + + return gotFormatted.Bytes(), nil +} + // convertReaderToMetricFamily would read from a io.Reader object and convert it to a slice of // dto.MetricFamily. func convertReaderToMetricFamily(reader io.Reader) ([]*dto.MetricFamily, error) { @@ -277,73 +312,12 @@ func compare(got, want []*dto.MetricFamily) error { return fmt.Errorf("encoding expected metrics failed: %w", err) } } - if diffErr := diff(wantBuf, gotBuf); diffErr != "" { + if diffErr := diff.Diff(gotBuf.String(), wantBuf.String()); diffErr != "" { return fmt.Errorf(diffErr) } return nil } -// diff returns a diff of both values as long as both are of the same type and -// are a struct, map, slice, array or string. Otherwise it returns an empty string. -func diff(expected, actual interface{}) string { - if expected == nil || actual == nil { - return "" - } - - et, ek := typeAndKind(expected) - at, _ := typeAndKind(actual) - if et != at { - return "" - } - - if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array && ek != reflect.String { - return "" - } - - var e, a string - c := spew.ConfigState{ - Indent: " ", - DisablePointerAddresses: true, - DisableCapacities: true, - SortKeys: true, - } - if et != reflect.TypeOf("") { - e = c.Sdump(expected) - a = c.Sdump(actual) - } else { - e = reflect.ValueOf(expected).String() - a = reflect.ValueOf(actual).String() - } - - diff, _ := internal.GetUnifiedDiffString(internal.UnifiedDiff{ - A: internal.SplitLines(e), - B: internal.SplitLines(a), - FromFile: "metric output does not match expectation; want", - FromDate: "", - ToFile: "got:", - ToDate: "", - Context: 1, - }) - - if diff == "" { - return "" - } - - return "\n\nDiff:\n" + diff -} - -// typeAndKind returns the type and kind of the given interface{} -func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { - t := reflect.TypeOf(v) - k := t.Kind() - - if k == reflect.Ptr { - t = t.Elem() - k = t.Kind() - } - return t, k -} - func filterMetrics(metrics []*dto.MetricFamily, names []string) []*dto.MetricFamily { var filtered []*dto.MetricFamily for _, m := range metrics { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go index 955cfd59f83..2c808eece0a 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -507,7 +507,7 @@ func (m *metricMap) getOrCreateMetricWithLabelValues( return metric } -// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value +// getOrCreateMetricWithLabels retrieves the metric by hash and label value // or creates it and returns the new one. // // This function holds the mutex. diff --git a/vendor/github.com/prometheus/common/config/headers.go b/vendor/github.com/prometheus/common/config/headers.go index 4a0be4a10e9..7276742ec90 100644 --- a/vendor/github.com/prometheus/common/config/headers.go +++ b/vendor/github.com/prometheus/common/config/headers.go @@ -52,14 +52,6 @@ var reservedHeaders = map[string]struct{}{ // Headers represents the configuration for HTTP headers. type Headers struct { Headers map[string]Header `yaml:",inline"` - dir string -} - -// Header represents the configuration for a single HTTP header. -type Header struct { - Values []string `yaml:"values,omitempty" json:"values,omitempty"` - Secrets []Secret `yaml:"secrets,omitempty" json:"secrets,omitempty"` - Files []string `yaml:"files,omitempty" json:"files,omitempty"` } func (h Headers) MarshalJSON() ([]byte, error) { @@ -67,32 +59,40 @@ func (h Headers) MarshalJSON() ([]byte, error) { return json.Marshal(h.Headers) } -// SetDirectory records the directory to make headers file relative to the -// configuration file. +// SetDirectory make headers file relative to the configuration file. func (h *Headers) SetDirectory(dir string) { if h == nil { return } - h.dir = dir + for _, h := range h.Headers { + h.SetDirectory(dir) + } } // Validate validates the Headers config. func (h *Headers) Validate() error { - for n, header := range h.Headers { + for n := range h.Headers { if _, ok := reservedHeaders[http.CanonicalHeaderKey(n)]; ok { return fmt.Errorf("setting header %q is not allowed", http.CanonicalHeaderKey(n)) } - for _, v := range header.Files { - f := JoinDir(h.dir, v) - _, err := os.ReadFile(f) - if err != nil { - return fmt.Errorf("unable to read header %q from file %s: %w", http.CanonicalHeaderKey(n), f, err) - } - } } return nil } +// Header represents the configuration for a single HTTP header. +type Header struct { + Values []string `yaml:"values,omitempty" json:"values,omitempty"` + Secrets []Secret `yaml:"secrets,omitempty" json:"secrets,omitempty"` + Files []string `yaml:"files,omitempty" json:"files,omitempty"` +} + +// SetDirectory makes headers file relative to the configuration file. +func (h *Header) SetDirectory(dir string) { + for i := range h.Files { + h.Files[i] = JoinDir(dir, h.Files[i]) + } +} + // NewHeadersRoundTripper returns a RoundTripper that sets HTTP headers on // requests as configured. func NewHeadersRoundTripper(config *Headers, next http.RoundTripper) http.RoundTripper { @@ -121,10 +121,9 @@ func (rt *headersRoundTripper) RoundTrip(req *http.Request) (*http.Response, err req.Header.Add(n, string(v)) } for _, v := range h.Files { - f := JoinDir(rt.config.dir, v) - b, err := os.ReadFile(f) + b, err := os.ReadFile(v) if err != nil { - return nil, fmt.Errorf("unable to read headers file %s: %w", f, err) + return nil, fmt.Errorf("unable to read headers file %s: %w", v, err) } req.Header.Add(n, strings.TrimSpace(string(b))) } diff --git a/vendor/github.com/prometheus/common/config/http_config.go b/vendor/github.com/prometheus/common/config/http_config.go index 3e320134776..63809083aca 100644 --- a/vendor/github.com/prometheus/common/config/http_config.go +++ b/vendor/github.com/prometheus/common/config/http_config.go @@ -52,7 +52,8 @@ var ( http2Enabled: true, // 5 minutes is typically above the maximum sane scrape interval. So we can // use keepalive for all configurations. - idleConnTimeout: 5 * time.Minute, + idleConnTimeout: 5 * time.Minute, + newTLSConfigFunc: NewTLSConfigWithContext, } ) @@ -357,33 +358,33 @@ func nonZeroCount[T comparable](values ...T) int { func (c *HTTPClientConfig) Validate() error { // Backwards compatibility with the bearer_token field. if len(c.BearerToken) > 0 && len(c.BearerTokenFile) > 0 { - return fmt.Errorf("at most one of bearer_token & bearer_token_file must be configured") + return errors.New("at most one of bearer_token & bearer_token_file must be configured") } if (c.BasicAuth != nil || c.OAuth2 != nil) && (len(c.BearerToken) > 0 || len(c.BearerTokenFile) > 0) { - return fmt.Errorf("at most one of basic_auth, oauth2, bearer_token & bearer_token_file must be configured") + return errors.New("at most one of basic_auth, oauth2, bearer_token & bearer_token_file must be configured") } if c.BasicAuth != nil && nonZeroCount(string(c.BasicAuth.Username) != "", c.BasicAuth.UsernameFile != "", c.BasicAuth.UsernameRef != "") > 1 { - return fmt.Errorf("at most one of basic_auth username, username_file & username_ref must be configured") + return errors.New("at most one of basic_auth username, username_file & username_ref must be configured") } if c.BasicAuth != nil && nonZeroCount(string(c.BasicAuth.Password) != "", c.BasicAuth.PasswordFile != "", c.BasicAuth.PasswordRef != "") > 1 { - return fmt.Errorf("at most one of basic_auth password, password_file & password_ref must be configured") + return errors.New("at most one of basic_auth password, password_file & password_ref must be configured") } if c.Authorization != nil { if len(c.BearerToken) > 0 || len(c.BearerTokenFile) > 0 { - return fmt.Errorf("authorization is not compatible with bearer_token & bearer_token_file") + return errors.New("authorization is not compatible with bearer_token & bearer_token_file") } if nonZeroCount(string(c.Authorization.Credentials) != "", c.Authorization.CredentialsFile != "", c.Authorization.CredentialsRef != "") > 1 { - return fmt.Errorf("at most one of authorization credentials & credentials_file must be configured") + return errors.New("at most one of authorization credentials & credentials_file must be configured") } c.Authorization.Type = strings.TrimSpace(c.Authorization.Type) if len(c.Authorization.Type) == 0 { c.Authorization.Type = "Bearer" } if strings.ToLower(c.Authorization.Type) == "basic" { - return fmt.Errorf(`authorization type cannot be set to "basic", use "basic_auth" instead`) + return errors.New(`authorization type cannot be set to "basic", use "basic_auth" instead`) } if c.BasicAuth != nil || c.OAuth2 != nil { - return fmt.Errorf("at most one of basic_auth, oauth2 & authorization must be configured") + return errors.New("at most one of basic_auth, oauth2 & authorization must be configured") } } else { if len(c.BearerToken) > 0 { @@ -399,16 +400,16 @@ func (c *HTTPClientConfig) Validate() error { } if c.OAuth2 != nil { if c.BasicAuth != nil { - return fmt.Errorf("at most one of basic_auth, oauth2 & authorization must be configured") + return errors.New("at most one of basic_auth, oauth2 & authorization must be configured") } if len(c.OAuth2.ClientID) == 0 { - return fmt.Errorf("oauth2 client_id must be configured") + return errors.New("oauth2 client_id must be configured") } if len(c.OAuth2.TokenURL) == 0 { - return fmt.Errorf("oauth2 token_url must be configured") + return errors.New("oauth2 token_url must be configured") } if nonZeroCount(len(c.OAuth2.ClientSecret) > 0, len(c.OAuth2.ClientSecretFile) > 0, len(c.OAuth2.ClientSecretRef) > 0) > 1 { - return fmt.Errorf("at most one of oauth2 client_secret, client_secret_file & client_secret_ref must be configured") + return errors.New("at most one of oauth2 client_secret, client_secret_file & client_secret_ref must be configured") } } if err := c.ProxyConfig.Validate(); err != nil { @@ -452,8 +453,12 @@ func (a *BasicAuth) UnmarshalYAML(unmarshal func(interface{}) error) error { // by net.Dialer. type DialContextFunc func(context.Context, string, string) (net.Conn, error) +// NewTLSConfigFunc returns tls.Config. +type NewTLSConfigFunc func(context.Context, *TLSConfig, ...TLSConfigOption) (*tls.Config, error) + type httpClientOptions struct { dialContextFunc DialContextFunc + newTLSConfigFunc NewTLSConfigFunc keepAlivesEnabled bool http2Enabled bool idleConnTimeout time.Duration @@ -473,13 +478,23 @@ func (f httpClientOptionFunc) applyToHTTPClientOptions(options *httpClientOption f(options) } -// WithDialContextFunc allows you to override func gets used for the actual dialing. The default is `net.Dialer.DialContext`. +// WithDialContextFunc allows you to override the func gets used for the dialing. +// The default is `net.Dialer.DialContext`. func WithDialContextFunc(fn DialContextFunc) HTTPClientOption { return httpClientOptionFunc(func(opts *httpClientOptions) { opts.dialContextFunc = fn }) } +// WithNewTLSConfigFunc allows you to override the func that creates the TLS config +// from the prometheus http config. +// The default is `NewTLSConfigWithContext`. +func WithNewTLSConfigFunc(newTLSConfigFunc NewTLSConfigFunc) HTTPClientOption { + return httpClientOptionFunc(func(opts *httpClientOptions) { + opts.newTLSConfigFunc = newTLSConfigFunc + }) +} + // WithKeepAlivesDisabled allows to disable HTTP keepalive. func WithKeepAlivesDisabled() HTTPClientOption { return httpClientOptionFunc(func(opts *httpClientOptions) { @@ -670,7 +685,7 @@ func NewRoundTripperFromConfigWithContext(ctx context.Context, cfg HTTPClientCon return rt, nil } - tlsConfig, err := NewTLSConfig(&cfg.TLSConfig, WithSecretManager(opts.secretManager)) + tlsConfig, err := opts.newTLSConfigFunc(ctx, &cfg.TLSConfig, WithSecretManager(opts.secretManager)) if err != nil { return nil, err } @@ -679,8 +694,9 @@ func NewRoundTripperFromConfigWithContext(ctx context.Context, cfg HTTPClientCon if err != nil { return nil, err } - if tlsSettings.CA == nil || tlsSettings.CA.Immutable() { - // No need for a RoundTripper that reloads the CA file automatically. + + if tlsSettings.immutable() { + // No need for a RoundTripper that reloads the files automatically. return newRT(tlsConfig) } return NewTLSRoundTripperWithContext(ctx, tlsConfig, tlsSettings, newRT) @@ -735,7 +751,7 @@ func (s *FileSecret) Fetch(ctx context.Context) (string, error) { } func (s *FileSecret) Description() string { - return fmt.Sprintf("file %s", s.file) + return "file " + s.file } func (s *FileSecret) Immutable() bool { @@ -753,7 +769,7 @@ func (s *refSecret) Fetch(ctx context.Context) (string, error) { } func (s *refSecret) Description() string { - return fmt.Sprintf("ref %s", s.ref) + return "ref " + s.ref } func (s *refSecret) Immutable() bool { @@ -828,7 +844,7 @@ type basicAuthRoundTripper struct { // NewBasicAuthRoundTripper will apply a BASIC auth authorization header to a request unless it has // already been set. -func NewBasicAuthRoundTripper(username SecretReader, password SecretReader, rt http.RoundTripper) http.RoundTripper { +func NewBasicAuthRoundTripper(username, password SecretReader, rt http.RoundTripper) http.RoundTripper { return &basicAuthRoundTripper{username, password, rt} } @@ -914,7 +930,7 @@ func (rt *oauth2RoundTripper) newOauth2TokenSource(req *http.Request, secret str if err != nil { return nil, nil, err } - if tlsSettings.CA == nil || tlsSettings.CA.Immutable() { + if tlsSettings.immutable() { t, _ = tlsTransport(tlsConfig) } else { t, err = NewTLSRoundTripperWithContext(req.Context(), tlsConfig, tlsSettings, tlsTransport) @@ -964,7 +980,7 @@ func (rt *oauth2RoundTripper) RoundTrip(req *http.Request) (*http.Response, erro } rt.mtx.Lock() - rt.lastSecret = secret + rt.lastSecret = newSecret rt.lastRT.Source = source if rt.client != nil { rt.client.CloseIdleConnections() @@ -1045,7 +1061,7 @@ func NewTLSConfigWithContext(ctx context.Context, cfg *TLSConfig, optFuncs ...TL if cfg.MaxVersion != 0 && cfg.MinVersion != 0 { if cfg.MaxVersion < cfg.MinVersion { - return nil, fmt.Errorf("tls_config.max_version must be greater than or equal to tls_config.min_version if both are specified") + return nil, errors.New("tls_config.max_version must be greater than or equal to tls_config.min_version if both are specified") } } @@ -1144,19 +1160,19 @@ func (c *TLSConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { // used. func (c *TLSConfig) Validate() error { if nonZeroCount(len(c.CA) > 0, len(c.CAFile) > 0, len(c.CARef) > 0) > 1 { - return fmt.Errorf("at most one of ca, ca_file & ca_ref must be configured") + return errors.New("at most one of ca, ca_file & ca_ref must be configured") } if nonZeroCount(len(c.Cert) > 0, len(c.CertFile) > 0, len(c.CertRef) > 0) > 1 { - return fmt.Errorf("at most one of cert, cert_file & cert_ref must be configured") + return errors.New("at most one of cert, cert_file & cert_ref must be configured") } if nonZeroCount(len(c.Key) > 0, len(c.KeyFile) > 0, len(c.KeyRef) > 0) > 1 { - return fmt.Errorf("at most one of key and key_file must be configured") + return errors.New("at most one of key and key_file must be configured") } if c.usingClientCert() && !c.usingClientKey() { - return fmt.Errorf("exactly one of key or key_file must be configured when a client certificate is configured") + return errors.New("exactly one of key or key_file must be configured when a client certificate is configured") } else if c.usingClientKey() && !c.usingClientCert() { - return fmt.Errorf("exactly one of cert or cert_file must be configured when a client key is configured") + return errors.New("exactly one of cert or cert_file must be configured when a client key is configured") } return nil @@ -1259,6 +1275,10 @@ type TLSRoundTripperSettings struct { Key SecretReader } +func (t *TLSRoundTripperSettings) immutable() bool { + return (t.CA == nil || t.CA.Immutable()) && (t.Cert == nil || t.Cert.Immutable()) && (t.Key == nil || t.Key.Immutable()) +} + func NewTLSRoundTripper( cfg *tls.Config, settings TLSRoundTripperSettings, @@ -1456,16 +1476,16 @@ type ProxyConfig struct { // UnmarshalYAML implements the yaml.Unmarshaler interface. func (c *ProxyConfig) Validate() error { if len(c.ProxyConnectHeader) > 0 && (!c.ProxyFromEnvironment && (c.ProxyURL.URL == nil || c.ProxyURL.String() == "")) { - return fmt.Errorf("if proxy_connect_header is configured, proxy_url or proxy_from_environment must also be configured") + return errors.New("if proxy_connect_header is configured, proxy_url or proxy_from_environment must also be configured") } if c.ProxyFromEnvironment && c.ProxyURL.URL != nil && c.ProxyURL.String() != "" { - return fmt.Errorf("if proxy_from_environment is configured, proxy_url must not be configured") + return errors.New("if proxy_from_environment is configured, proxy_url must not be configured") } if c.ProxyFromEnvironment && c.NoProxy != "" { - return fmt.Errorf("if proxy_from_environment is configured, no_proxy must not be configured") + return errors.New("if proxy_from_environment is configured, no_proxy must not be configured") } if c.ProxyURL.URL == nil && c.NoProxy != "" { - return fmt.Errorf("if no_proxy is configured, proxy_url must also be configured") + return errors.New("if no_proxy is configured, proxy_url must also be configured") } return nil } diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index 25cfaa21643..1448439b7f7 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -45,7 +45,7 @@ func ResponseFormat(h http.Header) Format { mediatype, params, err := mime.ParseMediaType(ct) if err != nil { - return fmtUnknown + return FmtUnknown } const textType = "text/plain" @@ -53,21 +53,21 @@ func ResponseFormat(h http.Header) Format { switch mediatype { case ProtoType: if p, ok := params["proto"]; ok && p != ProtoProtocol { - return fmtUnknown + return FmtUnknown } if e, ok := params["encoding"]; ok && e != "delimited" { - return fmtUnknown + return FmtUnknown } - return fmtProtoDelim + return FmtProtoDelim case textType: if v, ok := params["version"]; ok && v != TextVersion { - return fmtUnknown + return FmtUnknown } - return fmtText + return FmtText } - return fmtUnknown + return FmtUnknown } // NewDecoder returns a new decoder based on the given input format. diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go index ff5ef7a9d92..d7f3d76f55d 100644 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -68,7 +68,7 @@ func Negotiate(h http.Header) Format { if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" { switch Format(escapeParam) { case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues: - escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam)) + escapingScheme = Format("; escaping=" + escapeParam) default: // If the escaping parameter is unknown, ignore it. } @@ -77,18 +77,18 @@ func Negotiate(h http.Header) Format { if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { switch ac.Params["encoding"] { case "delimited": - return fmtProtoDelim + escapingScheme + return FmtProtoDelim + escapingScheme case "text": - return fmtProtoText + escapingScheme + return FmtProtoText + escapingScheme case "compact-text": - return fmtProtoCompact + escapingScheme + return FmtProtoCompact + escapingScheme } } if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return fmtText + escapingScheme + return FmtText + escapingScheme } } - return fmtText + escapingScheme + return FmtText + escapingScheme } // NegotiateIncludingOpenMetrics works like Negotiate but includes @@ -101,7 +101,7 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format { if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" { switch Format(escapeParam) { case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues: - escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam)) + escapingScheme = Format("; escaping=" + escapeParam) default: // If the escaping parameter is unknown, ignore it. } @@ -110,26 +110,26 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format { if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { switch ac.Params["encoding"] { case "delimited": - return fmtProtoDelim + escapingScheme + return FmtProtoDelim + escapingScheme case "text": - return fmtProtoText + escapingScheme + return FmtProtoText + escapingScheme case "compact-text": - return fmtProtoCompact + escapingScheme + return FmtProtoCompact + escapingScheme } } if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return fmtText + escapingScheme + return FmtText + escapingScheme } if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") { switch ver { case OpenMetricsVersion_1_0_0: - return fmtOpenMetrics_1_0_0 + escapingScheme + return FmtOpenMetrics_1_0_0 + escapingScheme default: - return fmtOpenMetrics_0_0_1 + escapingScheme + return FmtOpenMetrics_0_0_1 + escapingScheme } } } - return fmtText + escapingScheme + return FmtText + escapingScheme } // NewEncoder returns a new encoder based on content type negotiation. All diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go index 051b38cd178..b26886560d7 100644 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -15,7 +15,7 @@ package expfmt import ( - "fmt" + "errors" "strings" "github.com/prometheus/common/model" @@ -32,24 +32,31 @@ type Format string // it on the wire, new content-type strings will have to be agreed upon and // added here. const ( - TextVersion = "0.0.4" - ProtoType = `application/vnd.google.protobuf` - ProtoProtocol = `io.prometheus.client.MetricFamily` - protoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + TextVersion = "0.0.4" + ProtoType = `application/vnd.google.protobuf` + ProtoProtocol = `io.prometheus.client.MetricFamily` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. + ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" OpenMetricsType = `application/openmetrics-text` OpenMetricsVersion_0_0_1 = "0.0.1" OpenMetricsVersion_1_0_0 = "1.0.0" - // The Content-Type values for the different wire protocols. Note that these - // values are now unexported. If code was relying on comparisons to these - // constants, instead use FormatType(). - fmtUnknown Format = `` - fmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` - fmtProtoDelim Format = protoFmt + ` encoding=delimited` - fmtProtoText Format = protoFmt + ` encoding=text` - fmtProtoCompact Format = protoFmt + ` encoding=compact-text` - fmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` - fmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` + // The Content-Type values for the different wire protocols. Do not do direct + // comparisons to these constants, instead use the comparison functions. + // Deprecated: Use expfmt.NewFormat(expfmt.TypeUnknown) instead. + FmtUnknown Format = `` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeTextPlain) instead. + FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoDelim) instead. + FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoText) instead. + FmtProtoText Format = ProtoFmt + ` encoding=text` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. + FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. + FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. + FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` ) const ( @@ -79,17 +86,17 @@ const ( func NewFormat(t FormatType) Format { switch t { case TypeProtoCompact: - return fmtProtoCompact + return FmtProtoCompact case TypeProtoDelim: - return fmtProtoDelim + return FmtProtoDelim case TypeProtoText: - return fmtProtoText + return FmtProtoText case TypeTextPlain: - return fmtText + return FmtText case TypeOpenMetrics: - return fmtOpenMetrics_1_0_0 + return FmtOpenMetrics_1_0_0 default: - return fmtUnknown + return FmtUnknown } } @@ -97,12 +104,35 @@ func NewFormat(t FormatType) Format { // specified version number. func NewOpenMetricsFormat(version string) (Format, error) { if version == OpenMetricsVersion_0_0_1 { - return fmtOpenMetrics_0_0_1, nil + return FmtOpenMetrics_0_0_1, nil } if version == OpenMetricsVersion_1_0_0 { - return fmtOpenMetrics_1_0_0, nil + return FmtOpenMetrics_1_0_0, nil } - return fmtUnknown, fmt.Errorf("unknown open metrics version string") + return FmtUnknown, errors.New("unknown open metrics version string") +} + +// WithEscapingScheme returns a copy of Format with the specified escaping +// scheme appended to the end. If an escaping scheme already exists it is +// removed. +func (f Format) WithEscapingScheme(s model.EscapingScheme) Format { + var terms []string + for _, p := range strings.Split(string(f), ";") { + toks := strings.Split(p, "=") + if len(toks) != 2 { + trimmed := strings.TrimSpace(p) + if len(trimmed) > 0 { + terms = append(terms, trimmed) + } + continue + } + key := strings.TrimSpace(toks[0]) + if key != model.EscapingKey { + terms = append(terms, strings.TrimSpace(p)) + } + } + terms = append(terms, model.EscapingKey+"="+s.String()) + return Format(strings.Join(terms, "; ")) } // FormatType deduces an overall FormatType for the given format. diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index 353c5e93f92..a21ed4ec1f8 100644 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -38,7 +38,7 @@ type EncoderOption func(*encoderOption) // WithCreatedLines is an EncoderOption that configures the OpenMetrics encoder // to include _created lines (See -// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#counter-1). +// https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#counter-1). // Created timestamps can improve the accuracy of series reset detection, but // come with a bandwidth cost. // @@ -102,7 +102,7 @@ func WithUnit() EncoderOption { // // - According to the OM specs, the `# UNIT` line is optional, but if populated, // the unit has to be present in the metric name as its suffix: -// (see https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#unit). +// (see https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#unit). // However, in order to accommodate any potential scenario where such a change in the // metric name is not desirable, the users are here given the choice of either explicitly // opt in, in case they wish for the unit to be included in the output AND in the metric name @@ -152,8 +152,8 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E if metricType == dto.MetricType_COUNTER && strings.HasSuffix(compliantName, "_total") { compliantName = name[:len(name)-6] } - if toOM.withUnit && in.Unit != nil && !strings.HasSuffix(compliantName, fmt.Sprintf("_%s", *in.Unit)) { - compliantName = compliantName + fmt.Sprintf("_%s", *in.Unit) + if toOM.withUnit && in.Unit != nil && !strings.HasSuffix(compliantName, "_"+*in.Unit) { + compliantName = compliantName + "_" + *in.Unit } // Comments, first HELP, then TYPE. @@ -477,7 +477,7 @@ func writeOpenMetricsNameAndLabelPairs( if name != "" { // If the name does not pass the legacy validity check, we must put the // metric name inside the braces, quoted. - if !model.IsValidLegacyMetricName(model.LabelValue(name)) { + if !model.IsValidLegacyMetricName(name) { metricInsideBraces = true err := w.WriteByte(separator) written++ diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go index f9b8265a9ec..4b86434b332 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -354,7 +354,7 @@ func writeNameAndLabelPairs( if name != "" { // If the name does not pass the legacy validity check, we must put the // metric name inside the braces. - if !model.IsValidLegacyMetricName(model.LabelValue(name)) { + if !model.IsValidLegacyMetricName(name) { metricInsideBraces = true err := w.WriteByte(separator) written++ @@ -498,7 +498,7 @@ func writeInt(w enhancedWriter, i int64) (int, error) { // writeName writes a string as-is if it complies with the legacy naming // scheme, or escapes it in double quotes if not. func writeName(w enhancedWriter, name string) (int, error) { - if model.IsValidLegacyMetricName(model.LabelValue(name)) { + if model.IsValidLegacyMetricName(name) { return w.WriteString(name) } var written int diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index 26490211af2..b4607fe4d27 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -22,9 +22,9 @@ import ( "math" "strconv" "strings" + "unicode/utf8" dto "github.com/prometheus/client_model/go" - "google.golang.org/protobuf/proto" "github.com/prometheus/common/model" @@ -60,6 +60,7 @@ type TextParser struct { currentMF *dto.MetricFamily currentMetric *dto.Metric currentLabelPair *dto.LabelPair + currentLabelPairs []*dto.LabelPair // Temporarily stores label pairs while parsing a metric line. // The remaining member variables are only used for summaries/histograms. currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le' @@ -74,6 +75,9 @@ type TextParser struct { // count and sum of that summary/histogram. currentIsSummaryCount, currentIsSummarySum bool currentIsHistogramCount, currentIsHistogramSum bool + // These indicate if the metric name from the current line being parsed is inside + // braces and if that metric name was found respectively. + currentMetricIsInsideBraces, currentMetricInsideBracesIsPresent bool } // TextToMetricFamilies reads 'in' as the simple and flat text-based exchange @@ -137,12 +141,15 @@ func (p *TextParser) reset(in io.Reader) { } p.currentQuantile = math.NaN() p.currentBucket = math.NaN() + p.currentMF = nil } // startOfLine represents the state where the next byte read from p.buf is the // start of a line (or whitespace leading up to it). func (p *TextParser) startOfLine() stateFn { p.lineCount++ + p.currentMetricIsInsideBraces = false + p.currentMetricInsideBracesIsPresent = false if p.skipBlankTab(); p.err != nil { // This is the only place that we expect to see io.EOF, // which is not an error but the signal that we are done. @@ -158,6 +165,9 @@ func (p *TextParser) startOfLine() stateFn { return p.startComment case '\n': return p.startOfLine // Empty line, start the next one. + case '{': + p.currentMetricIsInsideBraces = true + return p.readingLabels } return p.readingMetricName } @@ -275,6 +285,8 @@ func (p *TextParser) startLabelName() stateFn { return nil // Unexpected end of input. } if p.currentByte == '}' { + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) + p.currentLabelPairs = nil if p.skipBlankTab(); p.err != nil { return nil // Unexpected end of input. } @@ -287,6 +299,45 @@ func (p *TextParser) startLabelName() stateFn { p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName())) return nil } + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '=' { + if p.currentMetricIsInsideBraces { + if p.currentMetricInsideBracesIsPresent { + p.parseError(fmt.Sprintf("multiple metric names for metric %q", p.currentMF.GetName())) + return nil + } + switch p.currentByte { + case ',': + p.setOrCreateCurrentMF() + if p.currentMF.Type == nil { + p.currentMF.Type = dto.MetricType_UNTYPED.Enum() + } + p.currentMetric = &dto.Metric{} + p.currentMetricInsideBracesIsPresent = true + return p.startLabelName + case '}': + p.setOrCreateCurrentMF() + if p.currentMF.Type == nil { + p.currentMF.Type = dto.MetricType_UNTYPED.Enum() + } + p.currentMetric = &dto.Metric{} + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) + p.currentLabelPairs = nil + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingValue + default: + p.parseError(fmt.Sprintf("unexpected end of metric name %q", p.currentByte)) + return nil + } + } + p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) + p.currentLabelPairs = nil + return nil + } p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) @@ -296,23 +347,17 @@ func (p *TextParser) startLabelName() stateFn { // labels to 'real' labels. if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { - p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair) - } - if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte != '=' { - p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) - return nil + p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair) } // Check for duplicate label names. labels := make(map[string]struct{}) - for _, l := range p.currentMetric.Label { + for _, l := range p.currentLabelPairs { lName := l.GetName() if _, exists := labels[lName]; !exists { labels[lName] = struct{}{} } else { p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName())) + p.currentLabelPairs = nil return nil } } @@ -345,6 +390,7 @@ func (p *TextParser) startLabelValue() stateFn { if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { // Create a more helpful error message. p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) + p.currentLabelPairs = nil return nil } } else { @@ -371,12 +417,19 @@ func (p *TextParser) startLabelValue() stateFn { return p.startLabelName case '}': + if p.currentMF == nil { + p.parseError("invalid metric name") + return nil + } + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) + p.currentLabelPairs = nil if p.skipBlankTab(); p.err != nil { return nil // Unexpected end of input. } return p.readingValue default: p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue())) + p.currentLabelPairs = nil return nil } } @@ -585,6 +638,8 @@ func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { p.currentToken.WriteByte(p.currentByte) case 'n': p.currentToken.WriteByte('\n') + case '"': + p.currentToken.WriteByte('"') default: p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) return @@ -610,13 +665,45 @@ func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { // but not into p.currentToken. func (p *TextParser) readTokenAsMetricName() { p.currentToken.Reset() + // A UTF-8 metric name must be quoted and may have escaped characters. + quoted := false + escaped := false if !isValidMetricNameStart(p.currentByte) { return } - for { - p.currentToken.WriteByte(p.currentByte) + for p.err == nil { + if escaped { + switch p.currentByte { + case '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + case '"': + p.currentToken.WriteByte('"') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + } else { + switch p.currentByte { + case '"': + quoted = !quoted + if !quoted { + p.currentByte, p.err = p.buf.ReadByte() + return + } + case '\n': + p.parseError(fmt.Sprintf("metric name %q contains unescaped new-line", p.currentToken.String())) + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } p.currentByte, p.err = p.buf.ReadByte() - if p.err != nil || !isValidMetricNameContinuation(p.currentByte) { + if !isValidMetricNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == ' ') { return } } @@ -628,13 +715,45 @@ func (p *TextParser) readTokenAsMetricName() { // but not into p.currentToken. func (p *TextParser) readTokenAsLabelName() { p.currentToken.Reset() + // A UTF-8 label name must be quoted and may have escaped characters. + quoted := false + escaped := false if !isValidLabelNameStart(p.currentByte) { return } - for { - p.currentToken.WriteByte(p.currentByte) + for p.err == nil { + if escaped { + switch p.currentByte { + case '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + case '"': + p.currentToken.WriteByte('"') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + } else { + switch p.currentByte { + case '"': + quoted = !quoted + if !quoted { + p.currentByte, p.err = p.buf.ReadByte() + return + } + case '\n': + p.parseError(fmt.Sprintf("label name %q contains unescaped new-line", p.currentToken.String())) + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } p.currentByte, p.err = p.buf.ReadByte() - if p.err != nil || !isValidLabelNameContinuation(p.currentByte) { + if !isValidLabelNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == '=') { return } } @@ -660,6 +779,7 @@ func (p *TextParser) readTokenAsLabelValue() { p.currentToken.WriteByte('\n') default: p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + p.currentLabelPairs = nil return } escaped = false @@ -718,19 +838,19 @@ func (p *TextParser) setOrCreateCurrentMF() { } func isValidLabelNameStart(b byte) bool { - return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' + return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == '"' } -func isValidLabelNameContinuation(b byte) bool { - return isValidLabelNameStart(b) || (b >= '0' && b <= '9') +func isValidLabelNameContinuation(b byte, quoted bool) bool { + return isValidLabelNameStart(b) || (b >= '0' && b <= '9') || (quoted && utf8.ValidString(string(b))) } func isValidMetricNameStart(b byte) bool { return isValidLabelNameStart(b) || b == ':' } -func isValidMetricNameContinuation(b byte) bool { - return isValidLabelNameContinuation(b) || b == ':' +func isValidMetricNameContinuation(b byte, quoted bool) bool { + return isValidLabelNameContinuation(b, quoted) || b == ':' } func isBlankOrTab(b byte) bool { @@ -775,7 +895,7 @@ func histogramMetricName(name string) string { func parseFloat(s string) (float64, error) { if strings.ContainsAny(s, "pP_") { - return 0, fmt.Errorf("unsupported character in float") + return 0, errors.New("unsupported character in float") } return strconv.ParseFloat(s, 64) } diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go index 80d1fe944ea..bd3a39e3e14 100644 --- a/vendor/github.com/prometheus/common/model/alert.go +++ b/vendor/github.com/prometheus/common/model/alert.go @@ -14,6 +14,7 @@ package model import ( + "errors" "fmt" "time" ) @@ -89,16 +90,16 @@ func (a *Alert) StatusAt(ts time.Time) AlertStatus { // Validate checks whether the alert data is inconsistent. func (a *Alert) Validate() error { if a.StartsAt.IsZero() { - return fmt.Errorf("start time missing") + return errors.New("start time missing") } if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) { - return fmt.Errorf("start time must be before end time") + return errors.New("start time must be before end time") } if err := a.Labels.Validate(); err != nil { return fmt.Errorf("invalid label set: %w", err) } if len(a.Labels) == 0 { - return fmt.Errorf("at least one label pair required") + return errors.New("at least one label pair required") } if err := a.Annotations.Validate(); err != nil { return fmt.Errorf("invalid annotations: %w", err) diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go index 3317ce22ff7..73b7aa3e60b 100644 --- a/vendor/github.com/prometheus/common/model/labels.go +++ b/vendor/github.com/prometheus/common/model/labels.go @@ -97,26 +97,35 @@ var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") // therewith. type LabelName string -// IsValid returns true iff name matches the pattern of LabelNameRE for legacy -// names, and iff it's valid UTF-8 if NameValidationScheme is set to -// UTF8Validation. For the legacy matching, it does not use LabelNameRE for the -// check but a much faster hardcoded implementation. +// IsValid returns true iff the name matches the pattern of LabelNameRE when +// NameValidationScheme is set to LegacyValidation, or valid UTF-8 if +// NameValidationScheme is set to UTF8Validation. func (ln LabelName) IsValid() bool { if len(ln) == 0 { return false } switch NameValidationScheme { case LegacyValidation: - for i, b := range ln { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { - return false - } - } + return ln.IsValidLegacy() case UTF8Validation: return utf8.ValidString(string(ln)) default: panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) } +} + +// IsValidLegacy returns true iff name matches the pattern of LabelNameRE for +// legacy names. It does not use LabelNameRE for the check but a much faster +// hardcoded implementation. +func (ln LabelName) IsValidLegacy() bool { + if len(ln) == 0 { + return false + } + for i, b := range ln { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { + return false + } + } return true } diff --git a/vendor/github.com/prometheus/common/model/labelset_string.go b/vendor/github.com/prometheus/common/model/labelset_string.go index 481c47b46e5..abb2c900183 100644 --- a/vendor/github.com/prometheus/common/model/labelset_string.go +++ b/vendor/github.com/prometheus/common/model/labelset_string.go @@ -11,8 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build go1.21 - package model import ( diff --git a/vendor/github.com/prometheus/common/model/labelset_string_go120.go b/vendor/github.com/prometheus/common/model/labelset_string_go120.go deleted file mode 100644 index c4212685e71..00000000000 --- a/vendor/github.com/prometheus/common/model/labelset_string_go120.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2024 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !go1.21 - -package model - -import ( - "fmt" - "sort" - "strings" -) - -// String was optimized using functions not available for go 1.20 -// or lower. We keep the old implementation for compatibility with client_golang. -// Once client golang drops support for go 1.20 (scheduled for August 2024), this -// file can be removed. -func (l LabelSet) String() string { - labelNames := make([]string, 0, len(l)) - for name := range l { - labelNames = append(labelNames, string(name)) - } - sort.Strings(labelNames) - lstrs := make([]string, 0, len(l)) - for _, name := range labelNames { - lstrs = append(lstrs, fmt.Sprintf("%s=%q", name, l[LabelName(name)])) - } - return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) -} diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go index eb865e5a59c..5766107cf95 100644 --- a/vendor/github.com/prometheus/common/model/metric.go +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -14,9 +14,11 @@ package model import ( + "errors" "fmt" "regexp" "sort" + "strconv" "strings" "unicode/utf8" @@ -26,18 +28,21 @@ import ( var ( // NameValidationScheme determines the method of name validation to be used by - // all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8 mode - // in isolation from other components that don't support UTF-8 may result in - // bugs or other undefined behavior. This value is intended to be set by - // UTF-8-aware binaries as part of their startup. To avoid need for locking, - // this value should be set once, ideally in an init(), before multiple - // goroutines are started. - NameValidationScheme = LegacyValidation - - // NameEscapingScheme defines the default way that names will be - // escaped when presented to systems that do not support UTF-8 names. If the - // Content-Type "escaping" term is specified, that will override this value. - NameEscapingScheme = ValueEncodingEscaping + // all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8 + // mode in isolation from other components that don't support UTF-8 may result + // in bugs or other undefined behavior. This value can be set to + // LegacyValidation during startup if a binary is not UTF-8-aware binaries. To + // avoid need for locking, this value should be set once, ideally in an + // init(), before multiple goroutines are started. + NameValidationScheme = UTF8Validation + + // NameEscapingScheme defines the default way that names will be escaped when + // presented to systems that do not support UTF-8 names. If the Content-Type + // "escaping" term is specified, that will override this value. + // NameEscapingScheme should not be set to the NoEscaping value. That string + // is used in content negotiation to indicate that a system supports UTF-8 and + // has that feature enabled. + NameEscapingScheme = UnderscoreEscaping ) // ValidationScheme is a Go enum for determining how metric and label names will @@ -161,7 +166,7 @@ func (m Metric) FastFingerprint() Fingerprint { func IsValidMetricName(n LabelValue) bool { switch NameValidationScheme { case LegacyValidation: - return IsValidLegacyMetricName(n) + return IsValidLegacyMetricName(string(n)) case UTF8Validation: if len(n) == 0 { return false @@ -176,7 +181,7 @@ func IsValidMetricName(n LabelValue) bool { // legacy validation scheme regardless of the value of NameValidationScheme. // This function, however, does not use MetricNameRE for the check but a much // faster hardcoded implementation. -func IsValidLegacyMetricName(n LabelValue) bool { +func IsValidLegacyMetricName(n string) bool { if len(n) == 0 { return false } @@ -208,7 +213,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF } // If the name is nil, copy as-is, don't try to escape. - if v.Name == nil || IsValidLegacyMetricName(LabelValue(v.GetName())) { + if v.Name == nil || IsValidLegacyMetricName(v.GetName()) { out.Name = v.Name } else { out.Name = proto.String(EscapeName(v.GetName(), scheme)) @@ -230,7 +235,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF for _, l := range m.Label { if l.GetName() == MetricNameLabel { - if l.Value == nil || IsValidLegacyMetricName(LabelValue(l.GetValue())) { + if l.Value == nil || IsValidLegacyMetricName(l.GetValue()) { escaped.Label = append(escaped.Label, l) continue } @@ -240,7 +245,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF }) continue } - if l.Name == nil || IsValidLegacyMetricName(LabelValue(l.GetName())) { + if l.Name == nil || IsValidLegacyMetricName(l.GetName()) { escaped.Label = append(escaped.Label, l) continue } @@ -256,20 +261,16 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF func metricNeedsEscaping(m *dto.Metric) bool { for _, l := range m.Label { - if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(LabelValue(l.GetValue())) { + if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(l.GetValue()) { return true } - if !IsValidLegacyMetricName(LabelValue(l.GetName())) { + if !IsValidLegacyMetricName(l.GetName()) { return true } } return false } -const ( - lowerhex = "0123456789abcdef" -) - // EscapeName escapes the incoming name according to the provided escaping // scheme. Depending on the rules of escaping, this may cause no change in the // string that is returned. (Especially NoEscaping, which by definition is a @@ -283,7 +284,7 @@ func EscapeName(name string, scheme EscapingScheme) string { case NoEscaping: return name case UnderscoreEscaping: - if IsValidLegacyMetricName(LabelValue(name)) { + if IsValidLegacyMetricName(name) { return name } for i, b := range name { @@ -304,31 +305,25 @@ func EscapeName(name string, scheme EscapingScheme) string { } else if isValidLegacyRune(b, i) { escaped.WriteRune(b) } else { - escaped.WriteRune('_') + escaped.WriteString("__") } } return escaped.String() case ValueEncodingEscaping: - if IsValidLegacyMetricName(LabelValue(name)) { + if IsValidLegacyMetricName(name) { return name } escaped.WriteString("U__") for i, b := range name { - if isValidLegacyRune(b, i) { + if b == '_' { + escaped.WriteString("__") + } else if isValidLegacyRune(b, i) { escaped.WriteRune(b) } else if !utf8.ValidRune(b) { escaped.WriteString("_FFFD_") - } else if b < 0x100 { - escaped.WriteRune('_') - for s := 4; s >= 0; s -= 4 { - escaped.WriteByte(lowerhex[b>>uint(s)&0xF]) - } - escaped.WriteRune('_') - } else if b < 0x10000 { + } else { escaped.WriteRune('_') - for s := 12; s >= 0; s -= 4 { - escaped.WriteByte(lowerhex[b>>uint(s)&0xF]) - } + escaped.WriteString(strconv.FormatInt(int64(b), 16)) escaped.WriteRune('_') } } @@ -386,8 +381,9 @@ func UnescapeName(name string, scheme EscapingScheme) string { // We think we are in a UTF-8 code, process it. var utf8Val uint for j := 0; i < len(escapedName); j++ { - // This is too many characters for a utf8 value. - if j > 4 { + // This is too many characters for a utf8 value based on the MaxRune + // value of '\U0010FFFF'. + if j >= 6 { return name } // Found a closing underscore, convert to a rune, check validity, and append. @@ -440,7 +436,7 @@ func (e EscapingScheme) String() string { func ToEscapingScheme(s string) (EscapingScheme, error) { if s == "" { - return NoEscaping, fmt.Errorf("got empty string instead of escaping scheme") + return NoEscaping, errors.New("got empty string instead of escaping scheme") } switch s { case AllowUTF8: @@ -452,6 +448,6 @@ func ToEscapingScheme(s string) (EscapingScheme, error) { case EscapeValues: return ValueEncodingEscaping, nil default: - return NoEscaping, fmt.Errorf("unknown format scheme " + s) + return NoEscaping, fmt.Errorf("unknown format scheme %s", s) } } diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go index 910b0b71fcc..8f91a9702e0 100644 --- a/vendor/github.com/prometheus/common/model/silence.go +++ b/vendor/github.com/prometheus/common/model/silence.go @@ -15,6 +15,7 @@ package model import ( "encoding/json" + "errors" "fmt" "regexp" "time" @@ -34,7 +35,7 @@ func (m *Matcher) UnmarshalJSON(b []byte) error { } if len(m.Name) == 0 { - return fmt.Errorf("label name in matcher must not be empty") + return errors.New("label name in matcher must not be empty") } if m.IsRegex { if _, err := regexp.Compile(m.Value); err != nil { @@ -77,7 +78,7 @@ type Silence struct { // Validate returns true iff all fields of the silence have valid values. func (s *Silence) Validate() error { if len(s.Matchers) == 0 { - return fmt.Errorf("at least one matcher required") + return errors.New("at least one matcher required") } for _, m := range s.Matchers { if err := m.Validate(); err != nil { @@ -85,22 +86,22 @@ func (s *Silence) Validate() error { } } if s.StartsAt.IsZero() { - return fmt.Errorf("start time missing") + return errors.New("start time missing") } if s.EndsAt.IsZero() { - return fmt.Errorf("end time missing") + return errors.New("end time missing") } if s.EndsAt.Before(s.StartsAt) { - return fmt.Errorf("start time must be before end time") + return errors.New("start time must be before end time") } if s.CreatedBy == "" { - return fmt.Errorf("creator information missing") + return errors.New("creator information missing") } if s.Comment == "" { - return fmt.Errorf("comment missing") + return errors.New("comment missing") } if s.CreatedAt.IsZero() { - return fmt.Errorf("creation timestamp missing") + return errors.New("creation timestamp missing") } return nil } diff --git a/vendor/github.com/prometheus/common/model/value_float.go b/vendor/github.com/prometheus/common/model/value_float.go index ae35cc2ab4b..6bfc757d18b 100644 --- a/vendor/github.com/prometheus/common/model/value_float.go +++ b/vendor/github.com/prometheus/common/model/value_float.go @@ -15,6 +15,7 @@ package model import ( "encoding/json" + "errors" "fmt" "math" "strconv" @@ -39,7 +40,7 @@ func (v SampleValue) MarshalJSON() ([]byte, error) { // UnmarshalJSON implements json.Unmarshaler. func (v *SampleValue) UnmarshalJSON(b []byte) error { if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { - return fmt.Errorf("sample value must be a quoted string") + return errors.New("sample value must be a quoted string") } f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) if err != nil { diff --git a/vendor/github.com/prometheus/common/model/value_histogram.go b/vendor/github.com/prometheus/common/model/value_histogram.go index 54bb038cfff..895e6a3e839 100644 --- a/vendor/github.com/prometheus/common/model/value_histogram.go +++ b/vendor/github.com/prometheus/common/model/value_histogram.go @@ -15,6 +15,7 @@ package model import ( "encoding/json" + "errors" "fmt" "strconv" "strings" @@ -32,7 +33,7 @@ func (v FloatString) MarshalJSON() ([]byte, error) { func (v *FloatString) UnmarshalJSON(b []byte) error { if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { - return fmt.Errorf("float value must be a quoted string") + return errors.New("float value must be a quoted string") } f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) if err != nil { @@ -141,7 +142,7 @@ type SampleHistogramPair struct { func (s SampleHistogramPair) MarshalJSON() ([]byte, error) { if s.Histogram == nil { - return nil, fmt.Errorf("histogram is nil") + return nil, errors.New("histogram is nil") } t, err := json.Marshal(s.Timestamp) if err != nil { @@ -164,7 +165,7 @@ func (s *SampleHistogramPair) UnmarshalJSON(buf []byte) error { return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen) } if s.Histogram == nil { - return fmt.Errorf("histogram is null") + return errors.New("histogram is null") } return nil } diff --git a/vendor/github.com/prometheus/common/version/info.go b/vendor/github.com/prometheus/common/version/info.go index 197d95e5c8b..61ed1ba314b 100644 --- a/vendor/github.com/prometheus/common/version/info.go +++ b/vendor/github.com/prometheus/common/version/info.go @@ -90,6 +90,14 @@ func GetTags() string { return computedTags } +func PrometheusUserAgent() string { + return ComponentUserAgent("Prometheus") +} + +func ComponentUserAgent(component string) string { + return component + "/" + Version +} + func init() { computedRevision, computedTags = computeRevision() } diff --git a/vendor/github.com/prometheus/prometheus/promql/engine.go b/vendor/github.com/prometheus/prometheus/promql/engine.go index 25e67db6330..f6b79f3a466 100644 --- a/vendor/github.com/prometheus/prometheus/promql/engine.go +++ b/vendor/github.com/prometheus/prometheus/promql/engine.go @@ -2356,6 +2356,11 @@ loop: } else { histograms = append(histograms, HPoint{H: &histogram.FloatHistogram{}}) } + if histograms[n].H == nil { + // Make sure to pass non zero H to AtFloatHistogram so that it does a deep-copy. + // Not an issue in the loop above since that uses an intermediate buffer. + histograms[n].H = &histogram.FloatHistogram{} + } histograms[n].T, histograms[n].H = it.AtFloatHistogram(histograms[n].H) if value.IsStaleNaN(histograms[n].H.Sum) { histograms = histograms[:n] diff --git a/vendor/github.com/prometheus/prometheus/promql/histogram_stats_iterator.go b/vendor/github.com/prometheus/prometheus/promql/histogram_stats_iterator.go index dfafea5f8ca..459d5924aec 100644 --- a/vendor/github.com/prometheus/prometheus/promql/histogram_stats_iterator.go +++ b/vendor/github.com/prometheus/prometheus/promql/histogram_stats_iterator.go @@ -48,7 +48,6 @@ func (f *histogramStatsIterator) AtHistogram(h *histogram.Histogram) (int64, *hi var t int64 t, f.currentH = f.Iterator.AtHistogram(f.currentH) if value.IsStaleNaN(f.currentH.Sum) { - f.setLastH(f.currentH) h = &histogram.Histogram{Sum: f.currentH.Sum} return t, h } @@ -63,9 +62,13 @@ func (f *histogramStatsIterator) AtHistogram(h *histogram.Histogram) (int64, *hi return t, h } - h.CounterResetHint = f.getResetHint(f.currentH) - h.Count = f.currentH.Count - h.Sum = f.currentH.Sum + returnValue := histogram.Histogram{ + CounterResetHint: f.getResetHint(f.currentH), + Count: f.currentH.Count, + Sum: f.currentH.Sum, + } + returnValue.CopyTo(h) + f.setLastH(f.currentH) return t, h } @@ -77,7 +80,6 @@ func (f *histogramStatsIterator) AtFloatHistogram(fh *histogram.FloatHistogram) var t int64 t, f.currentFH = f.Iterator.AtFloatHistogram(f.currentFH) if value.IsStaleNaN(f.currentFH.Sum) { - f.setLastFH(f.currentFH) return t, &histogram.FloatHistogram{Sum: f.currentFH.Sum} } @@ -91,9 +93,13 @@ func (f *histogramStatsIterator) AtFloatHistogram(fh *histogram.FloatHistogram) return t, fh } - fh.CounterResetHint = f.getFloatResetHint(f.currentFH.CounterResetHint) - fh.Count = f.currentFH.Count - fh.Sum = f.currentFH.Sum + returnValue := histogram.FloatHistogram{ + CounterResetHint: f.getFloatResetHint(f.currentFH.CounterResetHint), + Count: f.currentFH.Count, + Sum: f.currentFH.Sum, + } + returnValue.CopyTo(fh) + f.setLastFH(f.currentFH) return t, fh } diff --git a/vendor/github.com/prometheus/prometheus/scrape/scrape.go b/vendor/github.com/prometheus/prometheus/scrape/scrape.go index 68411a62e01..ccb068b6805 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/scrape.go +++ b/vendor/github.com/prometheus/prometheus/scrape/scrape.go @@ -1631,7 +1631,7 @@ loop: updateMetadata(lset, true) } - if seriesAlreadyScraped { + if seriesAlreadyScraped && parsedTimestamp == nil { err = storage.ErrDuplicateSampleForTimestamp } else { if ctMs := p.CreatedTimestamp(); sl.enableCTZeroIngestion && ctMs != nil { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/histogram.go b/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/histogram.go index 3c7349cf729..ce934a638d9 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/histogram.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/histogram.go @@ -30,12 +30,10 @@ func GenerateTestHistograms(n int) (r []*histogram.Histogram) { return r } -func GenerateTestHistogramsWithUnknownResetHint(n int) []*histogram.Histogram { - hs := GenerateTestHistograms(n) - for i := range hs { - hs[i].CounterResetHint = histogram.UnknownCounterReset - } - return hs +func GenerateTestHistogramWithHint(n int, hint histogram.CounterResetHint) *histogram.Histogram { + h := GenerateTestHistogram(n) + h.CounterResetHint = hint + return h } // GenerateTestHistogram but it is up to the user to set any known counter reset hint. diff --git a/vendor/github.com/relvacode/iso8601/.gitignore b/vendor/github.com/relvacode/iso8601/.gitignore index daf913b1b34..11b90db8d96 100644 --- a/vendor/github.com/relvacode/iso8601/.gitignore +++ b/vendor/github.com/relvacode/iso8601/.gitignore @@ -22,3 +22,5 @@ _testmain.go *.exe *.test *.prof + +.idea diff --git a/vendor/github.com/relvacode/iso8601/README.md b/vendor/github.com/relvacode/iso8601/README.md index f52d571352d..5c2c1df7772 100644 --- a/vendor/github.com/relvacode/iso8601/README.md +++ b/vendor/github.com/relvacode/iso8601/README.md @@ -33,21 +33,9 @@ func main() { ## Benchmark ``` -BenchmarkParse-16 13364954 77.7 ns/op 0 B/op 0 allocs/op +goos: linux +goarch: amd64 +pkg: github.com/relvacode/iso8601 +cpu: AMD Ryzen 7 7840U w/ Radeon 780M Graphics +BenchmarkParse-16 35880919 30.89 ns/op 0 B/op 0 allocs/op ``` - -## Release History - - - `1.3.0` - - Allow a leading `+` sign in the year component [#11](https://github.com/relvacode/iso8601/issues/11) - - `1.2.0` - - Time range validity checking equivalent to the standard library. - Note that previous versions would not validate that a given date string was in the expected range. Additionally, this version no longer accepts `0000-00-00T00:00:00` as a valid input which can be the zero time representation in other languages nor does it support leap seconds (such that the seconds field is `60`) as is the case in the [standard library](https://github.com/golang/go/issues/15247) - - `1.1.0` - - Check for `-0` time zone - - `1.0.0` - - Initial release diff --git a/vendor/github.com/relvacode/iso8601/iso8601.go b/vendor/github.com/relvacode/iso8601/iso8601.go index fd4c57756d6..266d871c73b 100644 --- a/vendor/github.com/relvacode/iso8601/iso8601.go +++ b/vendor/github.com/relvacode/iso8601/iso8601.go @@ -27,6 +27,7 @@ const ( // ParseISOZone parses the 5 character zone information in an ISO8601 date string. // This function expects input that matches: // +// Z, z (UTC) // -0100 // +0100 // +01:00 @@ -35,11 +36,13 @@ const ( // +01:45 // +0145 func ParseISOZone(inp []byte) (*time.Location, error) { - if len(inp) < 3 || len(inp) > 6 { + if len(inp) != 1 && (len(inp) < 3 || len(inp) > 6) { return nil, ErrZoneCharacters } var neg bool switch inp[0] { + case 'Z', 'z': + return time.UTC, nil case '+': case '-': neg = true @@ -87,6 +90,12 @@ func ParseISOZone(inp []byte) (*time.Location, error) { // Parse parses an ISO8601 compliant date-time byte slice into a time.Time object. // If any component of an input date-time is not within the expected range then an *iso8601.RangeError is returned. func Parse(inp []byte) (time.Time, error) { + return ParseInLocation(inp, time.UTC) +} + +// ParseInLocation parses an ISO8601 compliant date-time byte slice into a time.Time object. +// If the input does not have timezone information, it will use the given location. +func ParseInLocation(inp []byte, loc *time.Location) (time.Time, error) { var ( Y uint M uint @@ -98,9 +107,6 @@ func Parse(inp []byte) (time.Time, error) { nfraction = 1 //counts amount of precision for the second fraction ) - // Always assume UTC by default - var loc = time.UTC - var c uint var p = year @@ -131,7 +137,7 @@ parse: continue } fallthrough - case '+': + case '+', 'Z': if i == 0 { // The ISO8601 technically allows signed year components. // Go does not allow negative years, but let's allow a positive sign to be more compatible with the spec. @@ -185,23 +191,6 @@ parse: s = c c = 0 p++ - case 'Z': - switch p { - case hour: - h = c - case minute: - m = c - case second: - s = c - case millisecond: - fraction = int(c) - default: - return time.Time{}, newUnexpectedCharacterError(inp[i]) - } - c = 0 - if len(inp) != i+1 { - return time.Time{}, ErrRemainingData - } default: return time.Time{}, newUnexpectedCharacterError(inp[i]) } @@ -215,6 +204,9 @@ parse: Y = c M = 1 d = 1 + case month: + M = c + d = 1 case day: d = c case hour: @@ -287,3 +279,9 @@ parse: func ParseString(inp string) (time.Time, error) { return Parse([]byte(inp)) } + +// ParseStringInLocation parses an ISO8601 compliant date-time string into a time.Time object. +// If the input does not have timezone information, it will use the given location. +func ParseStringInLocation(inp string, loc *time.Location) (time.Time, error) { + return ParseInLocation([]byte(inp), loc) +} diff --git a/vendor/github.com/rs/cors/cors.go b/vendor/github.com/rs/cors/cors.go index da80d343b3d..724f242ac6f 100644 --- a/vendor/github.com/rs/cors/cors.go +++ b/vendor/github.com/rs/cors/cors.go @@ -364,9 +364,11 @@ func (c *Cors) handlePreflight(w http.ResponseWriter, r *http.Request) { // Note: the Fetch standard guarantees that at most one // Access-Control-Request-Headers header is present in the preflight request; // see step 5.2 in https://fetch.spec.whatwg.org/#cors-preflight-fetch-0. - reqHeaders, found := first(r.Header, "Access-Control-Request-Headers") - if found && !c.allowedHeadersAll && !c.allowedHeaders.Subsumes(reqHeaders[0]) { - c.logf(" Preflight aborted: headers '%v' not allowed", reqHeaders[0]) + // However, some gateways split that header into multiple headers of the same name; + // see https://github.com/rs/cors/issues/184. + reqHeaders, found := r.Header["Access-Control-Request-Headers"] + if found && !c.allowedHeadersAll && !c.allowedHeaders.Accepts(reqHeaders) { + c.logf(" Preflight aborted: headers '%v' not allowed", reqHeaders) return } if c.allowedOriginsAll { @@ -391,9 +393,7 @@ func (c *Cors) handlePreflight(w http.ResponseWriter, r *http.Request) { if len(c.maxAge) > 0 { headers["Access-Control-Max-Age"] = c.maxAge } - if c.Log != nil { - c.logf(" Preflight response headers: %v", headers) - } + c.logf(" Preflight response headers: %v", headers) } // handleActualRequest handles simple cross-origin requests, actual request or redirects @@ -440,9 +440,7 @@ func (c *Cors) handleActualRequest(w http.ResponseWriter, r *http.Request) { if c.allowCredentials { headers["Access-Control-Allow-Credentials"] = headerTrue } - if c.Log != nil { - c.logf(" Actual response added headers: %v", headers) - } + c.logf(" Actual response added headers: %v", headers) } // convenience method. checks if a logger is set. diff --git a/vendor/github.com/rs/cors/internal/sortedset.go b/vendor/github.com/rs/cors/internal/sortedset.go index 513da20f7d0..844f3f9e035 100644 --- a/vendor/github.com/rs/cors/internal/sortedset.go +++ b/vendor/github.com/rs/cors/internal/sortedset.go @@ -52,46 +52,134 @@ func (set SortedSet) String() string { return strings.Join(elems, ",") } -// Subsumes reports whether csv is a sequence of comma-separated names that are -// - all elements of set, -// - sorted in lexicographically order, +// Accepts reports whether values is a sequence of list-based field values +// whose elements are +// - all members of set, +// - sorted in lexicographical order, // - unique. -func (set SortedSet) Subsumes(csv string) bool { - if csv == "" { - return true +func (set SortedSet) Accepts(values []string) bool { + var ( // effectively constant + maxLen = maxOWSBytes + set.maxLen + maxOWSBytes + 1 // +1 for comma + ) + var ( + posOfLastNameSeen = -1 + name string + commaFound bool + emptyElements int + ok bool + ) + for _, s := range values { + for { + // As a defense against maliciously long names in s, + // we process only a small number of s's leading bytes per iteration. + name, s, commaFound = cutAtComma(s, maxLen) + name, ok = trimOWS(name, maxOWSBytes) + if !ok { + return false + } + if name == "" { + // RFC 9110 requires recipients to tolerate + // "a reasonable number of empty list elements"; see + // https://httpwg.org/specs/rfc9110.html#abnf.extension.recipient. + emptyElements++ + if emptyElements > maxEmptyElements { + return false + } + if !commaFound { // We have now exhausted the names in s. + break + } + continue + } + pos, ok := set.m[name] + if !ok { + return false + } + // The names in s are expected to be sorted in lexicographical order + // and to each appear at most once. + // Therefore, the positions (in set) of the names that + // appear in s should form a strictly increasing sequence. + // If that's not actually the case, bail out. + if pos <= posOfLastNameSeen { + return false + } + posOfLastNameSeen = pos + if !commaFound { // We have now exhausted the names in s. + break + } + } + } + return true +} + +const ( + maxOWSBytes = 1 // number of leading/trailing OWS bytes tolerated + maxEmptyElements = 16 // number of empty list elements tolerated +) + +func cutAtComma(s string, n int) (before, after string, found bool) { + // Note: this implementation draws inspiration from strings.Cut's. + end := min(len(s), n) + if i := strings.IndexByte(s[:end], ','); i >= 0 { + after = s[i+1:] // deal with this first to save one bounds check + return s[:i], after, true + } + return s, "", false +} + +// TrimOWS trims up to n bytes of [optional whitespace (OWS)] +// from the start of and/or the end of s. +// If no more than n bytes of OWS are found at the start of s +// and no more than n bytes of OWS are found at the end of s, +// it returns the trimmed result and true. +// Otherwise, it returns the original string and false. +// +// [optional whitespace (OWS)]: https://httpwg.org/specs/rfc9110.html#whitespace +func trimOWS(s string, n int) (trimmed string, ok bool) { + if s == "" { + return s, true + } + trimmed, ok = trimRightOWS(s, n) + if !ok { + return s, false } - posOfLastNameSeen := -1 - chunkSize := set.maxLen + 1 // (to accommodate for at least one comma) - for { - // As a defense against maliciously long names in csv, - // we only process at most chunkSize bytes per iteration. - end := min(len(csv), chunkSize) - comma := strings.IndexByte(csv[:end], ',') - var name string - if comma == -1 { - name = csv - } else { - name = csv[:comma] + trimmed, ok = trimLeftOWS(trimmed, n) + if !ok { + return s, false + } + return trimmed, true +} + +func trimLeftOWS(s string, n int) (string, bool) { + sCopy := s + var i int + for len(s) > 0 { + if i > n { + return sCopy, false } - pos, found := set.m[name] - if !found { - return false + if !(s[0] == ' ' || s[0] == '\t') { + break } - // The names in csv are expected to be sorted in lexicographical order - // and appear at most once in csv. - // Therefore, the positions (in set) of the names that - // appear in csv should form a strictly increasing sequence. - // If that's not actually the case, bail out. - if pos <= posOfLastNameSeen { - return false + s = s[1:] + i++ + } + return s, true +} + +func trimRightOWS(s string, n int) (string, bool) { + sCopy := s + var i int + for len(s) > 0 { + if i > n { + return sCopy, false } - posOfLastNameSeen = pos - if comma < 0 { // We've now processed all the names in csv. + last := len(s) - 1 + if !(s[last] == ' ' || s[last] == '\t') { break } - csv = csv[comma+1:] + s = s[:last] + i++ } - return true + return s, true } // TODO: when updating go directive to 1.21 or later, diff --git a/vendor/github.com/rs/cors/utils.go b/vendor/github.com/rs/cors/utils.go index 7019f45cd9c..41b0c2836a3 100644 --- a/vendor/github.com/rs/cors/utils.go +++ b/vendor/github.com/rs/cors/utils.go @@ -1,7 +1,6 @@ package cors import ( - "net/http" "strings" ) @@ -24,11 +23,3 @@ func convert(s []string, f func(string) string) []string { } return out } - -func first(hdrs http.Header, k string) ([]string, bool) { - v, found := hdrs[k] - if !found || len(v) == 0 { - return nil, false - } - return v[:1], true -} diff --git a/vendor/github.com/shirou/gopsutil/v3/common/env.go b/vendor/github.com/shirou/gopsutil/v3/common/env.go deleted file mode 100644 index 4b5f4980c21..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/common/env.go +++ /dev/null @@ -1,23 +0,0 @@ -package common - -type EnvKeyType string - -// EnvKey is a context key that can be used to set programmatically the environment -// gopsutil relies on to perform calls against the OS. -// Example of use: -// -// ctx := context.WithValue(context.Background(), common.EnvKey, EnvMap{common.HostProcEnvKey: "/myproc"}) -// avg, err := load.AvgWithContext(ctx) -var EnvKey = EnvKeyType("env") - -const ( - HostProcEnvKey EnvKeyType = "HOST_PROC" - HostSysEnvKey EnvKeyType = "HOST_SYS" - HostEtcEnvKey EnvKeyType = "HOST_ETC" - HostVarEnvKey EnvKeyType = "HOST_VAR" - HostRunEnvKey EnvKeyType = "HOST_RUN" - HostDevEnvKey EnvKeyType = "HOST_DEV" - HostRootEnvKey EnvKeyType = "HOST_ROOT" -) - -type EnvMap map[EnvKeyType]string diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu.go deleted file mode 100644 index 83bc23d45ed..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu.go +++ /dev/null @@ -1,200 +0,0 @@ -package cpu - -import ( - "context" - "encoding/json" - "fmt" - "math" - "runtime" - "strconv" - "strings" - "sync" - "time" - - "github.com/shirou/gopsutil/v3/internal/common" -) - -// TimesStat contains the amounts of time the CPU has spent performing different -// kinds of work. Time units are in seconds. It is based on linux /proc/stat file. -type TimesStat struct { - CPU string `json:"cpu"` - User float64 `json:"user"` - System float64 `json:"system"` - Idle float64 `json:"idle"` - Nice float64 `json:"nice"` - Iowait float64 `json:"iowait"` - Irq float64 `json:"irq"` - Softirq float64 `json:"softirq"` - Steal float64 `json:"steal"` - Guest float64 `json:"guest"` - GuestNice float64 `json:"guestNice"` -} - -type InfoStat struct { - CPU int32 `json:"cpu"` - VendorID string `json:"vendorId"` - Family string `json:"family"` - Model string `json:"model"` - Stepping int32 `json:"stepping"` - PhysicalID string `json:"physicalId"` - CoreID string `json:"coreId"` - Cores int32 `json:"cores"` - ModelName string `json:"modelName"` - Mhz float64 `json:"mhz"` - CacheSize int32 `json:"cacheSize"` - Flags []string `json:"flags"` - Microcode string `json:"microcode"` -} - -type lastPercent struct { - sync.Mutex - lastCPUTimes []TimesStat - lastPerCPUTimes []TimesStat -} - -var ( - lastCPUPercent lastPercent - invoke common.Invoker = common.Invoke{} -) - -func init() { - lastCPUPercent.Lock() - lastCPUPercent.lastCPUTimes, _ = Times(false) - lastCPUPercent.lastPerCPUTimes, _ = Times(true) - lastCPUPercent.Unlock() -} - -// Counts returns the number of physical or logical cores in the system -func Counts(logical bool) (int, error) { - return CountsWithContext(context.Background(), logical) -} - -func (c TimesStat) String() string { - v := []string{ - `"cpu":"` + c.CPU + `"`, - `"user":` + strconv.FormatFloat(c.User, 'f', 1, 64), - `"system":` + strconv.FormatFloat(c.System, 'f', 1, 64), - `"idle":` + strconv.FormatFloat(c.Idle, 'f', 1, 64), - `"nice":` + strconv.FormatFloat(c.Nice, 'f', 1, 64), - `"iowait":` + strconv.FormatFloat(c.Iowait, 'f', 1, 64), - `"irq":` + strconv.FormatFloat(c.Irq, 'f', 1, 64), - `"softirq":` + strconv.FormatFloat(c.Softirq, 'f', 1, 64), - `"steal":` + strconv.FormatFloat(c.Steal, 'f', 1, 64), - `"guest":` + strconv.FormatFloat(c.Guest, 'f', 1, 64), - `"guestNice":` + strconv.FormatFloat(c.GuestNice, 'f', 1, 64), - } - - return `{` + strings.Join(v, ",") + `}` -} - -// Deprecated: Total returns the total number of seconds in a CPUTimesStat -// Please do not use this internal function. -func (c TimesStat) Total() float64 { - total := c.User + c.System + c.Idle + c.Nice + c.Iowait + c.Irq + - c.Softirq + c.Steal + c.Guest + c.GuestNice - - return total -} - -func (c InfoStat) String() string { - s, _ := json.Marshal(c) - return string(s) -} - -func getAllBusy(t TimesStat) (float64, float64) { - tot := t.Total() - if runtime.GOOS == "linux" { - tot -= t.Guest // Linux 2.6.24+ - tot -= t.GuestNice // Linux 3.2.0+ - } - - busy := tot - t.Idle - t.Iowait - - return tot, busy -} - -func calculateBusy(t1, t2 TimesStat) float64 { - t1All, t1Busy := getAllBusy(t1) - t2All, t2Busy := getAllBusy(t2) - - if t2Busy <= t1Busy { - return 0 - } - if t2All <= t1All { - return 100 - } - return math.Min(100, math.Max(0, (t2Busy-t1Busy)/(t2All-t1All)*100)) -} - -func calculateAllBusy(t1, t2 []TimesStat) ([]float64, error) { - // Make sure the CPU measurements have the same length. - if len(t1) != len(t2) { - return nil, fmt.Errorf( - "received two CPU counts: %d != %d", - len(t1), len(t2), - ) - } - - ret := make([]float64, len(t1)) - for i, t := range t2 { - ret[i] = calculateBusy(t1[i], t) - } - return ret, nil -} - -// Percent calculates the percentage of cpu used either per CPU or combined. -// If an interval of 0 is given it will compare the current cpu times against the last call. -// Returns one value per cpu, or a single value if percpu is set to false. -func Percent(interval time.Duration, percpu bool) ([]float64, error) { - return PercentWithContext(context.Background(), interval, percpu) -} - -func PercentWithContext(ctx context.Context, interval time.Duration, percpu bool) ([]float64, error) { - if interval <= 0 { - return percentUsedFromLastCallWithContext(ctx, percpu) - } - - // Get CPU usage at the start of the interval. - cpuTimes1, err := TimesWithContext(ctx, percpu) - if err != nil { - return nil, err - } - - if err := common.Sleep(ctx, interval); err != nil { - return nil, err - } - - // And at the end of the interval. - cpuTimes2, err := TimesWithContext(ctx, percpu) - if err != nil { - return nil, err - } - - return calculateAllBusy(cpuTimes1, cpuTimes2) -} - -func percentUsedFromLastCall(percpu bool) ([]float64, error) { - return percentUsedFromLastCallWithContext(context.Background(), percpu) -} - -func percentUsedFromLastCallWithContext(ctx context.Context, percpu bool) ([]float64, error) { - cpuTimes, err := TimesWithContext(ctx, percpu) - if err != nil { - return nil, err - } - lastCPUPercent.Lock() - defer lastCPUPercent.Unlock() - var lastTimes []TimesStat - if percpu { - lastTimes = lastCPUPercent.lastPerCPUTimes - lastCPUPercent.lastPerCPUTimes = cpuTimes - } else { - lastTimes = lastCPUPercent.lastCPUTimes - lastCPUPercent.lastCPUTimes = cpuTimes - } - - if lastTimes == nil { - return nil, fmt.Errorf("error getting times for cpu percent. lastTimes was nil") - } - return calculateAllBusy(lastTimes, cpuTimes) -} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_nocgo.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_nocgo.go deleted file mode 100644 index a77b4dbb762..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_nocgo.go +++ /dev/null @@ -1,92 +0,0 @@ -//go:build aix && !cgo -// +build aix,!cgo - -package cpu - -import ( - "context" - "strconv" - "strings" - - "github.com/shirou/gopsutil/v3/internal/common" -) - -func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { - if percpu { - return []TimesStat{}, common.ErrNotImplementedError - } else { - out, err := invoke.CommandWithContext(ctx, "sar", "-u", "10", "1") - if err != nil { - return nil, err - } - lines := strings.Split(string(out), "\n") - if len(lines) < 5 { - return []TimesStat{}, common.ErrNotImplementedError - } - - ret := TimesStat{CPU: "cpu-total"} - h := strings.Fields(lines[len(lines)-3]) // headers - v := strings.Fields(lines[len(lines)-2]) // values - for i, header := range h { - if t, err := strconv.ParseFloat(v[i], 64); err == nil { - switch header { - case `%usr`: - ret.User = t - case `%sys`: - ret.System = t - case `%wio`: - ret.Iowait = t - case `%idle`: - ret.Idle = t - } - } - } - - return []TimesStat{ret}, nil - } -} - -func InfoWithContext(ctx context.Context) ([]InfoStat, error) { - out, err := invoke.CommandWithContext(ctx, "prtconf") - if err != nil { - return nil, err - } - - ret := InfoStat{} - for _, line := range strings.Split(string(out), "\n") { - if strings.HasPrefix(line, "Number Of Processors:") { - p := strings.Fields(line) - if len(p) > 3 { - if t, err := strconv.ParseUint(p[3], 10, 64); err == nil { - ret.Cores = int32(t) - } - } - } else if strings.HasPrefix(line, "Processor Clock Speed:") { - p := strings.Fields(line) - if len(p) > 4 { - if t, err := strconv.ParseFloat(p[3], 64); err == nil { - switch strings.ToUpper(p[4]) { - case "MHZ": - ret.Mhz = t - case "GHZ": - ret.Mhz = t * 1000.0 - case "KHZ": - ret.Mhz = t / 1000.0 - default: - ret.Mhz = t - } - } - } - break - } - } - return []InfoStat{ret}, nil -} - -func CountsWithContext(ctx context.Context, logical bool) (int, error) { - info, err := InfoWithContext(ctx) - if err == nil { - return int(info[0].Cores), nil - } - return 0, err -} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin.go deleted file mode 100644 index 41f395e5e05..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin.go +++ /dev/null @@ -1,117 +0,0 @@ -//go:build darwin -// +build darwin - -package cpu - -import ( - "context" - "strconv" - "strings" - - "github.com/shoenig/go-m1cpu" - "github.com/tklauser/go-sysconf" - "golang.org/x/sys/unix" -) - -// sys/resource.h -const ( - CPUser = 0 - cpNice = 1 - cpSys = 2 - cpIntr = 3 - cpIdle = 4 - cpUStates = 5 -) - -// default value. from time.h -var ClocksPerSec = float64(128) - -func init() { - clkTck, err := sysconf.Sysconf(sysconf.SC_CLK_TCK) - // ignore errors - if err == nil { - ClocksPerSec = float64(clkTck) - } -} - -func Times(percpu bool) ([]TimesStat, error) { - return TimesWithContext(context.Background(), percpu) -} - -func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { - if percpu { - return perCPUTimes() - } - - return allCPUTimes() -} - -// Returns only one CPUInfoStat on FreeBSD -func Info() ([]InfoStat, error) { - return InfoWithContext(context.Background()) -} - -func InfoWithContext(ctx context.Context) ([]InfoStat, error) { - var ret []InfoStat - - c := InfoStat{} - c.ModelName, _ = unix.Sysctl("machdep.cpu.brand_string") - family, _ := unix.SysctlUint32("machdep.cpu.family") - c.Family = strconv.FormatUint(uint64(family), 10) - model, _ := unix.SysctlUint32("machdep.cpu.model") - c.Model = strconv.FormatUint(uint64(model), 10) - stepping, _ := unix.SysctlUint32("machdep.cpu.stepping") - c.Stepping = int32(stepping) - features, err := unix.Sysctl("machdep.cpu.features") - if err == nil { - for _, v := range strings.Fields(features) { - c.Flags = append(c.Flags, strings.ToLower(v)) - } - } - leaf7Features, err := unix.Sysctl("machdep.cpu.leaf7_features") - if err == nil { - for _, v := range strings.Fields(leaf7Features) { - c.Flags = append(c.Flags, strings.ToLower(v)) - } - } - extfeatures, err := unix.Sysctl("machdep.cpu.extfeatures") - if err == nil { - for _, v := range strings.Fields(extfeatures) { - c.Flags = append(c.Flags, strings.ToLower(v)) - } - } - cores, _ := unix.SysctlUint32("machdep.cpu.core_count") - c.Cores = int32(cores) - cacheSize, _ := unix.SysctlUint32("machdep.cpu.cache.size") - c.CacheSize = int32(cacheSize) - c.VendorID, _ = unix.Sysctl("machdep.cpu.vendor") - - if m1cpu.IsAppleSilicon() { - c.Mhz = float64(m1cpu.PCoreHz() / 1_000_000) - } else { - // Use the rated frequency of the CPU. This is a static value and does not - // account for low power or Turbo Boost modes. - cpuFrequency, err := unix.SysctlUint64("hw.cpufrequency") - if err == nil { - c.Mhz = float64(cpuFrequency) / 1000000.0 - } - } - - return append(ret, c), nil -} - -func CountsWithContext(ctx context.Context, logical bool) (int, error) { - var cpuArgument string - if logical { - cpuArgument = "hw.logicalcpu" - } else { - cpuArgument = "hw.physicalcpu" - } - - count, err := unix.SysctlUint32(cpuArgument) - if err != nil { - return 0, err - } - - return int(count), nil -} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin_cgo.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin_cgo.go deleted file mode 100644 index 1d5f0772ed7..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin_cgo.go +++ /dev/null @@ -1,111 +0,0 @@ -//go:build darwin && cgo -// +build darwin,cgo - -package cpu - -/* -#include -#include -#include -#include -#include -#include -#include -#if TARGET_OS_MAC -#include -#endif -#include -#include -*/ -import "C" - -import ( - "bytes" - "encoding/binary" - "fmt" - "unsafe" -) - -// these CPU times for darwin is borrowed from influxdb/telegraf. - -func perCPUTimes() ([]TimesStat, error) { - var ( - count C.mach_msg_type_number_t - cpuload *C.processor_cpu_load_info_data_t - ncpu C.natural_t - ) - - status := C.host_processor_info(C.host_t(C.mach_host_self()), - C.PROCESSOR_CPU_LOAD_INFO, - &ncpu, - (*C.processor_info_array_t)(unsafe.Pointer(&cpuload)), - &count) - - if status != C.KERN_SUCCESS { - return nil, fmt.Errorf("host_processor_info error=%d", status) - } - - // jump through some cgo casting hoops and ensure we properly free - // the memory that cpuload points to - target := C.vm_map_t(C.mach_task_self_) - address := C.vm_address_t(uintptr(unsafe.Pointer(cpuload))) - defer C.vm_deallocate(target, address, C.vm_size_t(ncpu)) - - // the body of struct processor_cpu_load_info - // aka processor_cpu_load_info_data_t - var cpu_ticks [C.CPU_STATE_MAX]uint32 - - // copy the cpuload array to a []byte buffer - // where we can binary.Read the data - size := int(ncpu) * binary.Size(cpu_ticks) - buf := (*[1 << 30]byte)(unsafe.Pointer(cpuload))[:size:size] - - bbuf := bytes.NewBuffer(buf) - - var ret []TimesStat - - for i := 0; i < int(ncpu); i++ { - err := binary.Read(bbuf, binary.LittleEndian, &cpu_ticks) - if err != nil { - return nil, err - } - - c := TimesStat{ - CPU: fmt.Sprintf("cpu%d", i), - User: float64(cpu_ticks[C.CPU_STATE_USER]) / ClocksPerSec, - System: float64(cpu_ticks[C.CPU_STATE_SYSTEM]) / ClocksPerSec, - Nice: float64(cpu_ticks[C.CPU_STATE_NICE]) / ClocksPerSec, - Idle: float64(cpu_ticks[C.CPU_STATE_IDLE]) / ClocksPerSec, - } - - ret = append(ret, c) - } - - return ret, nil -} - -func allCPUTimes() ([]TimesStat, error) { - var count C.mach_msg_type_number_t - var cpuload C.host_cpu_load_info_data_t - - count = C.HOST_CPU_LOAD_INFO_COUNT - - status := C.host_statistics(C.host_t(C.mach_host_self()), - C.HOST_CPU_LOAD_INFO, - C.host_info_t(unsafe.Pointer(&cpuload)), - &count) - - if status != C.KERN_SUCCESS { - return nil, fmt.Errorf("host_statistics error=%d", status) - } - - c := TimesStat{ - CPU: "cpu-total", - User: float64(cpuload.cpu_ticks[C.CPU_STATE_USER]) / ClocksPerSec, - System: float64(cpuload.cpu_ticks[C.CPU_STATE_SYSTEM]) / ClocksPerSec, - Nice: float64(cpuload.cpu_ticks[C.CPU_STATE_NICE]) / ClocksPerSec, - Idle: float64(cpuload.cpu_ticks[C.CPU_STATE_IDLE]) / ClocksPerSec, - } - - return []TimesStat{c}, nil -} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin_nocgo.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin_nocgo.go deleted file mode 100644 index e067e99f980..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin_nocgo.go +++ /dev/null @@ -1,14 +0,0 @@ -//go:build darwin && !cgo -// +build darwin,!cgo - -package cpu - -import "github.com/shirou/gopsutil/v3/internal/common" - -func perCPUTimes() ([]TimesStat, error) { - return []TimesStat{}, common.ErrNotImplementedError -} - -func allCPUTimes() ([]TimesStat, error) { - return []TimesStat{}, common.ErrNotImplementedError -} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_dragonfly_amd64.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_dragonfly_amd64.go deleted file mode 100644 index 57e14528db1..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_dragonfly_amd64.go +++ /dev/null @@ -1,9 +0,0 @@ -package cpu - -type cpuTimes struct { - User uint64 - Nice uint64 - Sys uint64 - Intr uint64 - Idle uint64 -} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_386.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_386.go deleted file mode 100644 index 8b7f4c321eb..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_386.go +++ /dev/null @@ -1,9 +0,0 @@ -package cpu - -type cpuTimes struct { - User uint32 - Nice uint32 - Sys uint32 - Intr uint32 - Idle uint32 -} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_amd64.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_amd64.go deleted file mode 100644 index 57e14528db1..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_amd64.go +++ /dev/null @@ -1,9 +0,0 @@ -package cpu - -type cpuTimes struct { - User uint64 - Nice uint64 - Sys uint64 - Intr uint64 - Idle uint64 -} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_arm.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_arm.go deleted file mode 100644 index 8b7f4c321eb..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_arm.go +++ /dev/null @@ -1,9 +0,0 @@ -package cpu - -type cpuTimes struct { - User uint32 - Nice uint32 - Sys uint32 - Intr uint32 - Idle uint32 -} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_arm64.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_arm64.go deleted file mode 100644 index 57e14528db1..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_arm64.go +++ /dev/null @@ -1,9 +0,0 @@ -package cpu - -type cpuTimes struct { - User uint64 - Nice uint64 - Sys uint64 - Intr uint64 - Idle uint64 -} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd_amd64.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd_amd64.go deleted file mode 100644 index 57e14528db1..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd_amd64.go +++ /dev/null @@ -1,9 +0,0 @@ -package cpu - -type cpuTimes struct { - User uint64 - Nice uint64 - Sys uint64 - Intr uint64 - Idle uint64 -} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd_arm64.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd_arm64.go deleted file mode 100644 index 57e14528db1..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd_arm64.go +++ /dev/null @@ -1,9 +0,0 @@ -package cpu - -type cpuTimes struct { - User uint64 - Nice uint64 - Sys uint64 - Intr uint64 - Idle uint64 -} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_386.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_386.go deleted file mode 100644 index 5e878399a38..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_386.go +++ /dev/null @@ -1,10 +0,0 @@ -package cpu - -type cpuTimes struct { - User uint32 - Nice uint32 - Sys uint32 - Spin uint32 - Intr uint32 - Idle uint32 -} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_amd64.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_amd64.go deleted file mode 100644 index d659058cd5e..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_amd64.go +++ /dev/null @@ -1,10 +0,0 @@ -package cpu - -type cpuTimes struct { - User uint64 - Nice uint64 - Sys uint64 - Spin uint64 - Intr uint64 - Idle uint64 -} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_arm.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_arm.go deleted file mode 100644 index 5e878399a38..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_arm.go +++ /dev/null @@ -1,10 +0,0 @@ -package cpu - -type cpuTimes struct { - User uint32 - Nice uint32 - Sys uint32 - Spin uint32 - Intr uint32 - Idle uint32 -} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_arm64.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_arm64.go deleted file mode 100644 index d659058cd5e..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_arm64.go +++ /dev/null @@ -1,10 +0,0 @@ -package cpu - -type cpuTimes struct { - User uint64 - Nice uint64 - Sys uint64 - Spin uint64 - Intr uint64 - Idle uint64 -} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_riscv64.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_riscv64.go deleted file mode 100644 index d659058cd5e..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_riscv64.go +++ /dev/null @@ -1,10 +0,0 @@ -package cpu - -type cpuTimes struct { - User uint64 - Nice uint64 - Sys uint64 - Spin uint64 - Intr uint64 - Idle uint64 -} diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common.go b/vendor/github.com/shirou/gopsutil/v3/internal/common/common.go deleted file mode 100644 index 5e25e507b48..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common.go +++ /dev/null @@ -1,464 +0,0 @@ -package common - -// -// gopsutil is a port of psutil(http://pythonhosted.org/psutil/). -// This covers these architectures. -// - linux (amd64, arm) -// - freebsd (amd64) -// - windows (amd64) - -import ( - "bufio" - "bytes" - "context" - "errors" - "fmt" - "io" - "net/url" - "os" - "os/exec" - "path" - "path/filepath" - "reflect" - "runtime" - "strconv" - "strings" - "time" - - "github.com/shirou/gopsutil/v3/common" -) - -var ( - Timeout = 3 * time.Second - ErrTimeout = errors.New("command timed out") -) - -type Invoker interface { - Command(string, ...string) ([]byte, error) - CommandWithContext(context.Context, string, ...string) ([]byte, error) -} - -type Invoke struct{} - -func (i Invoke) Command(name string, arg ...string) ([]byte, error) { - ctx, cancel := context.WithTimeout(context.Background(), Timeout) - defer cancel() - return i.CommandWithContext(ctx, name, arg...) -} - -func (i Invoke) CommandWithContext(ctx context.Context, name string, arg ...string) ([]byte, error) { - cmd := exec.CommandContext(ctx, name, arg...) - - var buf bytes.Buffer - cmd.Stdout = &buf - cmd.Stderr = &buf - - if err := cmd.Start(); err != nil { - return buf.Bytes(), err - } - - if err := cmd.Wait(); err != nil { - return buf.Bytes(), err - } - - return buf.Bytes(), nil -} - -type FakeInvoke struct { - Suffix string // Suffix species expected file name suffix such as "fail" - Error error // If Error specified, return the error. -} - -// Command in FakeInvoke returns from expected file if exists. -func (i FakeInvoke) Command(name string, arg ...string) ([]byte, error) { - if i.Error != nil { - return []byte{}, i.Error - } - - arch := runtime.GOOS - - commandName := filepath.Base(name) - - fname := strings.Join(append([]string{commandName}, arg...), "") - fname = url.QueryEscape(fname) - fpath := path.Join("testdata", arch, fname) - if i.Suffix != "" { - fpath += "_" + i.Suffix - } - if PathExists(fpath) { - return os.ReadFile(fpath) - } - return []byte{}, fmt.Errorf("could not find testdata: %s", fpath) -} - -func (i FakeInvoke) CommandWithContext(ctx context.Context, name string, arg ...string) ([]byte, error) { - return i.Command(name, arg...) -} - -var ErrNotImplementedError = errors.New("not implemented yet") - -// ReadFile reads contents from a file -func ReadFile(filename string) (string, error) { - content, err := os.ReadFile(filename) - if err != nil { - return "", err - } - - return string(content), nil -} - -// ReadLines reads contents from a file and splits them by new lines. -// A convenience wrapper to ReadLinesOffsetN(filename, 0, -1). -func ReadLines(filename string) ([]string, error) { - return ReadLinesOffsetN(filename, 0, -1) -} - -// ReadLine reads a file and returns the first occurrence of a line that is prefixed with prefix. -func ReadLine(filename string, prefix string) (string, error) { - f, err := os.Open(filename) - if err != nil { - return "", err - } - defer f.Close() - r := bufio.NewReader(f) - for { - line, err := r.ReadString('\n') - if err != nil { - if err == io.EOF { - break - } - return "", err - } - if strings.HasPrefix(line, prefix) { - return line, nil - } - } - - return "", nil -} - -// ReadLinesOffsetN reads contents from file and splits them by new line. -// The offset tells at which line number to start. -// The count determines the number of lines to read (starting from offset): -// n >= 0: at most n lines -// n < 0: whole file -func ReadLinesOffsetN(filename string, offset uint, n int) ([]string, error) { - f, err := os.Open(filename) - if err != nil { - return []string{""}, err - } - defer f.Close() - - var ret []string - - r := bufio.NewReader(f) - for i := 0; i < n+int(offset) || n < 0; i++ { - line, err := r.ReadString('\n') - if err != nil { - if err == io.EOF && len(line) > 0 { - ret = append(ret, strings.Trim(line, "\n")) - } - break - } - if i < int(offset) { - continue - } - ret = append(ret, strings.Trim(line, "\n")) - } - - return ret, nil -} - -func IntToString(orig []int8) string { - ret := make([]byte, len(orig)) - size := -1 - for i, o := range orig { - if o == 0 { - size = i - break - } - ret[i] = byte(o) - } - if size == -1 { - size = len(orig) - } - - return string(ret[0:size]) -} - -func UintToString(orig []uint8) string { - ret := make([]byte, len(orig)) - size := -1 - for i, o := range orig { - if o == 0 { - size = i - break - } - ret[i] = byte(o) - } - if size == -1 { - size = len(orig) - } - - return string(ret[0:size]) -} - -func ByteToString(orig []byte) string { - n := -1 - l := -1 - for i, b := range orig { - // skip left side null - if l == -1 && b == 0 { - continue - } - if l == -1 { - l = i - } - - if b == 0 { - break - } - n = i + 1 - } - if n == -1 { - return string(orig) - } - return string(orig[l:n]) -} - -// ReadInts reads contents from single line file and returns them as []int32. -func ReadInts(filename string) ([]int64, error) { - f, err := os.Open(filename) - if err != nil { - return []int64{}, err - } - defer f.Close() - - var ret []int64 - - r := bufio.NewReader(f) - - // The int files that this is concerned with should only be one liners. - line, err := r.ReadString('\n') - if err != nil { - return []int64{}, err - } - - i, err := strconv.ParseInt(strings.Trim(line, "\n"), 10, 32) - if err != nil { - return []int64{}, err - } - ret = append(ret, i) - - return ret, nil -} - -// Parse Hex to uint32 without error -func HexToUint32(hex string) uint32 { - vv, _ := strconv.ParseUint(hex, 16, 32) - return uint32(vv) -} - -// Parse to int32 without error -func mustParseInt32(val string) int32 { - vv, _ := strconv.ParseInt(val, 10, 32) - return int32(vv) -} - -// Parse to uint64 without error -func mustParseUint64(val string) uint64 { - vv, _ := strconv.ParseInt(val, 10, 64) - return uint64(vv) -} - -// Parse to Float64 without error -func mustParseFloat64(val string) float64 { - vv, _ := strconv.ParseFloat(val, 64) - return vv -} - -// StringsHas checks the target string slice contains src or not -func StringsHas(target []string, src string) bool { - for _, t := range target { - if strings.TrimSpace(t) == src { - return true - } - } - return false -} - -// StringsContains checks the src in any string of the target string slice -func StringsContains(target []string, src string) bool { - for _, t := range target { - if strings.Contains(t, src) { - return true - } - } - return false -} - -// IntContains checks the src in any int of the target int slice. -func IntContains(target []int, src int) bool { - for _, t := range target { - if src == t { - return true - } - } - return false -} - -// get struct attributes. -// This method is used only for debugging platform dependent code. -func attributes(m interface{}) map[string]reflect.Type { - typ := reflect.TypeOf(m) - if typ.Kind() == reflect.Ptr { - typ = typ.Elem() - } - - attrs := make(map[string]reflect.Type) - if typ.Kind() != reflect.Struct { - return nil - } - - for i := 0; i < typ.NumField(); i++ { - p := typ.Field(i) - if !p.Anonymous { - attrs[p.Name] = p.Type - } - } - - return attrs -} - -func PathExists(filename string) bool { - if _, err := os.Stat(filename); err == nil { - return true - } - return false -} - -// PathExistsWithContents returns the filename exists and it is not empty -func PathExistsWithContents(filename string) bool { - info, err := os.Stat(filename) - if err != nil { - return false - } - return info.Size() > 4 && !info.IsDir() // at least 4 bytes -} - -// GetEnvWithContext retrieves the environment variable key. If it does not exist it returns the default. -// The context may optionally contain a map superseding os.EnvKey. -func GetEnvWithContext(ctx context.Context, key string, dfault string, combineWith ...string) string { - var value string - if env, ok := ctx.Value(common.EnvKey).(common.EnvMap); ok { - value = env[common.EnvKeyType(key)] - } - if value == "" { - value = os.Getenv(key) - } - if value == "" { - value = dfault - } - - return combine(value, combineWith) -} - -// GetEnv retrieves the environment variable key. If it does not exist it returns the default. -func GetEnv(key string, dfault string, combineWith ...string) string { - value := os.Getenv(key) - if value == "" { - value = dfault - } - - return combine(value, combineWith) -} - -func combine(value string, combineWith []string) string { - switch len(combineWith) { - case 0: - return value - case 1: - return filepath.Join(value, combineWith[0]) - default: - all := make([]string, len(combineWith)+1) - all[0] = value - copy(all[1:], combineWith) - return filepath.Join(all...) - } -} - -func HostProc(combineWith ...string) string { - return GetEnv("HOST_PROC", "/proc", combineWith...) -} - -func HostSys(combineWith ...string) string { - return GetEnv("HOST_SYS", "/sys", combineWith...) -} - -func HostEtc(combineWith ...string) string { - return GetEnv("HOST_ETC", "/etc", combineWith...) -} - -func HostVar(combineWith ...string) string { - return GetEnv("HOST_VAR", "/var", combineWith...) -} - -func HostRun(combineWith ...string) string { - return GetEnv("HOST_RUN", "/run", combineWith...) -} - -func HostDev(combineWith ...string) string { - return GetEnv("HOST_DEV", "/dev", combineWith...) -} - -func HostRoot(combineWith ...string) string { - return GetEnv("HOST_ROOT", "/", combineWith...) -} - -func HostProcWithContext(ctx context.Context, combineWith ...string) string { - return GetEnvWithContext(ctx, "HOST_PROC", "/proc", combineWith...) -} - -func HostProcMountInfoWithContext(ctx context.Context, combineWith ...string) string { - return GetEnvWithContext(ctx, "HOST_PROC_MOUNTINFO", "", combineWith...) -} - -func HostSysWithContext(ctx context.Context, combineWith ...string) string { - return GetEnvWithContext(ctx, "HOST_SYS", "/sys", combineWith...) -} - -func HostEtcWithContext(ctx context.Context, combineWith ...string) string { - return GetEnvWithContext(ctx, "HOST_ETC", "/etc", combineWith...) -} - -func HostVarWithContext(ctx context.Context, combineWith ...string) string { - return GetEnvWithContext(ctx, "HOST_VAR", "/var", combineWith...) -} - -func HostRunWithContext(ctx context.Context, combineWith ...string) string { - return GetEnvWithContext(ctx, "HOST_RUN", "/run", combineWith...) -} - -func HostDevWithContext(ctx context.Context, combineWith ...string) string { - return GetEnvWithContext(ctx, "HOST_DEV", "/dev", combineWith...) -} - -func HostRootWithContext(ctx context.Context, combineWith ...string) string { - return GetEnvWithContext(ctx, "HOST_ROOT", "/", combineWith...) -} - -// getSysctrlEnv sets LC_ALL=C in a list of env vars for use when running -// sysctl commands (see DoSysctrl). -func getSysctrlEnv(env []string) []string { - foundLC := false - for i, line := range env { - if strings.HasPrefix(line, "LC_ALL") { - env[i] = "LC_ALL=C" - foundLC = true - } - } - if !foundLC { - env = append(env, "LC_ALL=C") - } - return env -} diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_darwin.go b/vendor/github.com/shirou/gopsutil/v3/internal/common/common_darwin.go deleted file mode 100644 index f1a78459777..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_darwin.go +++ /dev/null @@ -1,66 +0,0 @@ -//go:build darwin -// +build darwin - -package common - -import ( - "context" - "os" - "os/exec" - "strings" - "unsafe" - - "golang.org/x/sys/unix" -) - -func DoSysctrlWithContext(ctx context.Context, mib string) ([]string, error) { - cmd := exec.CommandContext(ctx, "sysctl", "-n", mib) - cmd.Env = getSysctrlEnv(os.Environ()) - out, err := cmd.Output() - if err != nil { - return []string{}, err - } - v := strings.Replace(string(out), "{ ", "", 1) - v = strings.Replace(string(v), " }", "", 1) - values := strings.Fields(string(v)) - - return values, nil -} - -func CallSyscall(mib []int32) ([]byte, uint64, error) { - miblen := uint64(len(mib)) - - // get required buffer size - length := uint64(0) - _, _, err := unix.Syscall6( - 202, // unix.SYS___SYSCTL https://github.com/golang/sys/blob/76b94024e4b621e672466e8db3d7f084e7ddcad2/unix/zsysnum_darwin_amd64.go#L146 - uintptr(unsafe.Pointer(&mib[0])), - uintptr(miblen), - 0, - uintptr(unsafe.Pointer(&length)), - 0, - 0) - if err != 0 { - var b []byte - return b, length, err - } - if length == 0 { - var b []byte - return b, length, err - } - // get proc info itself - buf := make([]byte, length) - _, _, err = unix.Syscall6( - 202, // unix.SYS___SYSCTL https://github.com/golang/sys/blob/76b94024e4b621e672466e8db3d7f084e7ddcad2/unix/zsysnum_darwin_amd64.go#L146 - uintptr(unsafe.Pointer(&mib[0])), - uintptr(miblen), - uintptr(unsafe.Pointer(&buf[0])), - uintptr(unsafe.Pointer(&length)), - 0, - 0) - if err != 0 { - return buf, length, err - } - - return buf, length, nil -} diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_unix.go b/vendor/github.com/shirou/gopsutil/v3/internal/common/common_unix.go deleted file mode 100644 index 4af7e5c2aa8..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_unix.go +++ /dev/null @@ -1,62 +0,0 @@ -//go:build linux || freebsd || darwin || openbsd -// +build linux freebsd darwin openbsd - -package common - -import ( - "context" - "errors" - "os/exec" - "strconv" - "strings" -) - -func CallLsofWithContext(ctx context.Context, invoke Invoker, pid int32, args ...string) ([]string, error) { - var cmd []string - if pid == 0 { // will get from all processes. - cmd = []string{"-a", "-n", "-P"} - } else { - cmd = []string{"-a", "-n", "-P", "-p", strconv.Itoa(int(pid))} - } - cmd = append(cmd, args...) - out, err := invoke.CommandWithContext(ctx, "lsof", cmd...) - if err != nil { - if errors.Is(err, exec.ErrNotFound) { - return []string{}, err - } - // if no pid found, lsof returns code 1. - if err.Error() == "exit status 1" && len(out) == 0 { - return []string{}, nil - } - } - lines := strings.Split(string(out), "\n") - - var ret []string - for _, l := range lines[1:] { - if len(l) == 0 { - continue - } - ret = append(ret, l) - } - return ret, nil -} - -func CallPgrepWithContext(ctx context.Context, invoke Invoker, pid int32) ([]int32, error) { - out, err := invoke.CommandWithContext(ctx, "pgrep", "-P", strconv.Itoa(int(pid))) - if err != nil { - return []int32{}, err - } - lines := strings.Split(string(out), "\n") - ret := make([]int32, 0, len(lines)) - for _, l := range lines { - if len(l) == 0 { - continue - } - i, err := strconv.ParseInt(l, 10, 32) - if err != nil { - continue - } - ret = append(ret, int32(i)) - } - return ret, nil -} diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix.go b/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix.go deleted file mode 100644 index 22a6a4e9265..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build aix -// +build aix - -package mem - -import ( - "context" -) - -func VirtualMemory() (*VirtualMemoryStat, error) { - return VirtualMemoryWithContext(context.Background()) -} - -func SwapMemory() (*SwapMemoryStat, error) { - return SwapMemoryWithContext(context.Background()) -} diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin.go b/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin.go deleted file mode 100644 index a05a0faba0b..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin.go +++ /dev/null @@ -1,72 +0,0 @@ -//go:build darwin -// +build darwin - -package mem - -import ( - "context" - "fmt" - "unsafe" - - "golang.org/x/sys/unix" - - "github.com/shirou/gopsutil/v3/internal/common" -) - -func getHwMemsize() (uint64, error) { - total, err := unix.SysctlUint64("hw.memsize") - if err != nil { - return 0, err - } - return total, nil -} - -// xsw_usage in sys/sysctl.h -type swapUsage struct { - Total uint64 - Avail uint64 - Used uint64 - Pagesize int32 - Encrypted bool -} - -// SwapMemory returns swapinfo. -func SwapMemory() (*SwapMemoryStat, error) { - return SwapMemoryWithContext(context.Background()) -} - -func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { - // https://github.com/yanllearnn/go-osstat/blob/ae8a279d26f52ec946a03698c7f50a26cfb427e3/memory/memory_darwin.go - var ret *SwapMemoryStat - - value, err := unix.SysctlRaw("vm.swapusage") - if err != nil { - return ret, err - } - if len(value) != 32 { - return ret, fmt.Errorf("unexpected output of sysctl vm.swapusage: %v (len: %d)", value, len(value)) - } - swap := (*swapUsage)(unsafe.Pointer(&value[0])) - - u := float64(0) - if swap.Total != 0 { - u = ((float64(swap.Total) - float64(swap.Avail)) / float64(swap.Total)) * 100.0 - } - - ret = &SwapMemoryStat{ - Total: swap.Total, - Used: swap.Used, - Free: swap.Avail, - UsedPercent: u, - } - - return ret, nil -} - -func SwapDevices() ([]*SwapDevice, error) { - return SwapDevicesWithContext(context.Background()) -} - -func SwapDevicesWithContext(ctx context.Context) ([]*SwapDevice, error) { - return nil, common.ErrNotImplementedError -} diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_cgo.go b/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_cgo.go deleted file mode 100644 index e5da7dcdb23..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_cgo.go +++ /dev/null @@ -1,58 +0,0 @@ -//go:build darwin && cgo -// +build darwin,cgo - -package mem - -/* -#include -#include -*/ -import "C" - -import ( - "context" - "fmt" - "unsafe" -) - -// VirtualMemory returns VirtualmemoryStat. -func VirtualMemory() (*VirtualMemoryStat, error) { - return VirtualMemoryWithContext(context.Background()) -} - -func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { - count := C.mach_msg_type_number_t(C.HOST_VM_INFO_COUNT) - var vmstat C.vm_statistics_data_t - - status := C.host_statistics(C.host_t(C.mach_host_self()), - C.HOST_VM_INFO, - C.host_info_t(unsafe.Pointer(&vmstat)), - &count) - - if status != C.KERN_SUCCESS { - return nil, fmt.Errorf("host_statistics error=%d", status) - } - - pageSize := uint64(C.vm_kernel_page_size) - total, err := getHwMemsize() - if err != nil { - return nil, err - } - totalCount := C.natural_t(total / pageSize) - - availableCount := vmstat.inactive_count + vmstat.free_count - usedPercent := 100 * float64(totalCount-availableCount) / float64(totalCount) - - usedCount := totalCount - availableCount - - return &VirtualMemoryStat{ - Total: total, - Available: pageSize * uint64(availableCount), - Used: pageSize * uint64(usedCount), - UsedPercent: usedPercent, - Free: pageSize * uint64(vmstat.free_count), - Active: pageSize * uint64(vmstat.active_count), - Inactive: pageSize * uint64(vmstat.inactive_count), - Wired: pageSize * uint64(vmstat.wire_count), - }, nil -} diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_nocgo.go b/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_nocgo.go deleted file mode 100644 index c9393168032..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_nocgo.go +++ /dev/null @@ -1,89 +0,0 @@ -//go:build darwin && !cgo -// +build darwin,!cgo - -package mem - -import ( - "context" - "strconv" - "strings" - - "golang.org/x/sys/unix" -) - -// Runs vm_stat and returns Free and inactive pages -func getVMStat(vms *VirtualMemoryStat) error { - out, err := invoke.Command("vm_stat") - if err != nil { - return err - } - return parseVMStat(string(out), vms) -} - -func parseVMStat(out string, vms *VirtualMemoryStat) error { - var err error - - lines := strings.Split(out, "\n") - pagesize := uint64(unix.Getpagesize()) - for _, line := range lines { - fields := strings.Split(line, ":") - if len(fields) < 2 { - continue - } - key := strings.TrimSpace(fields[0]) - value := strings.Trim(fields[1], " .") - switch key { - case "Pages free": - free, e := strconv.ParseUint(value, 10, 64) - if e != nil { - err = e - } - vms.Free = free * pagesize - case "Pages inactive": - inactive, e := strconv.ParseUint(value, 10, 64) - if e != nil { - err = e - } - vms.Inactive = inactive * pagesize - case "Pages active": - active, e := strconv.ParseUint(value, 10, 64) - if e != nil { - err = e - } - vms.Active = active * pagesize - case "Pages wired down": - wired, e := strconv.ParseUint(value, 10, 64) - if e != nil { - err = e - } - vms.Wired = wired * pagesize - } - } - return err -} - -// VirtualMemory returns VirtualmemoryStat. -func VirtualMemory() (*VirtualMemoryStat, error) { - return VirtualMemoryWithContext(context.Background()) -} - -func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { - ret := &VirtualMemoryStat{} - - total, err := getHwMemsize() - if err != nil { - return nil, err - } - err = getVMStat(ret) - if err != nil { - return nil, err - } - - ret.Available = ret.Free + ret.Inactive - ret.Total = total - - ret.Used = ret.Total - ret.Available - ret.UsedPercent = 100 * float64(ret.Used) / float64(ret.Total) - - return ret, nil -} diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net.go b/vendor/github.com/shirou/gopsutil/v3/net/net.go deleted file mode 100644 index 0f3a62f39c5..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/net/net.go +++ /dev/null @@ -1,273 +0,0 @@ -package net - -import ( - "context" - "encoding/json" - "net" - - "github.com/shirou/gopsutil/v3/internal/common" -) - -var invoke common.Invoker = common.Invoke{} - -type IOCountersStat struct { - Name string `json:"name"` // interface name - BytesSent uint64 `json:"bytesSent"` // number of bytes sent - BytesRecv uint64 `json:"bytesRecv"` // number of bytes received - PacketsSent uint64 `json:"packetsSent"` // number of packets sent - PacketsRecv uint64 `json:"packetsRecv"` // number of packets received - Errin uint64 `json:"errin"` // total number of errors while receiving - Errout uint64 `json:"errout"` // total number of errors while sending - Dropin uint64 `json:"dropin"` // total number of incoming packets which were dropped - Dropout uint64 `json:"dropout"` // total number of outgoing packets which were dropped (always 0 on OSX and BSD) - Fifoin uint64 `json:"fifoin"` // total number of FIFO buffers errors while receiving - Fifoout uint64 `json:"fifoout"` // total number of FIFO buffers errors while sending -} - -// Addr is implemented compatibility to psutil -type Addr struct { - IP string `json:"ip"` - Port uint32 `json:"port"` -} - -type ConnectionStat struct { - Fd uint32 `json:"fd"` - Family uint32 `json:"family"` - Type uint32 `json:"type"` - Laddr Addr `json:"localaddr"` - Raddr Addr `json:"remoteaddr"` - Status string `json:"status"` - Uids []int32 `json:"uids"` - Pid int32 `json:"pid"` -} - -// System wide stats about different network protocols -type ProtoCountersStat struct { - Protocol string `json:"protocol"` - Stats map[string]int64 `json:"stats"` -} - -// NetInterfaceAddr is designed for represent interface addresses -type InterfaceAddr struct { - Addr string `json:"addr"` -} - -// InterfaceAddrList is a list of InterfaceAddr -type InterfaceAddrList []InterfaceAddr - -type InterfaceStat struct { - Index int `json:"index"` - MTU int `json:"mtu"` // maximum transmission unit - Name string `json:"name"` // e.g., "en0", "lo0", "eth0.100" - HardwareAddr string `json:"hardwareAddr"` // IEEE MAC-48, EUI-48 and EUI-64 form - Flags []string `json:"flags"` // e.g., FlagUp, FlagLoopback, FlagMulticast - Addrs InterfaceAddrList `json:"addrs"` -} - -// InterfaceStatList is a list of InterfaceStat -type InterfaceStatList []InterfaceStat - -type FilterStat struct { - ConnTrackCount int64 `json:"connTrackCount"` - ConnTrackMax int64 `json:"connTrackMax"` -} - -// ConntrackStat has conntrack summary info -type ConntrackStat struct { - Entries uint32 `json:"entries"` // Number of entries in the conntrack table - Searched uint32 `json:"searched"` // Number of conntrack table lookups performed - Found uint32 `json:"found"` // Number of searched entries which were successful - New uint32 `json:"new"` // Number of entries added which were not expected before - Invalid uint32 `json:"invalid"` // Number of packets seen which can not be tracked - Ignore uint32 `json:"ignore"` // Packets seen which are already connected to an entry - Delete uint32 `json:"delete"` // Number of entries which were removed - DeleteList uint32 `json:"deleteList"` // Number of entries which were put to dying list - Insert uint32 `json:"insert"` // Number of entries inserted into the list - InsertFailed uint32 `json:"insertFailed"` // # insertion attempted but failed (same entry exists) - Drop uint32 `json:"drop"` // Number of packets dropped due to conntrack failure. - EarlyDrop uint32 `json:"earlyDrop"` // Dropped entries to make room for new ones, if maxsize reached - IcmpError uint32 `json:"icmpError"` // Subset of invalid. Packets that can't be tracked d/t error - ExpectNew uint32 `json:"expectNew"` // Entries added after an expectation was already present - ExpectCreate uint32 `json:"expectCreate"` // Expectations added - ExpectDelete uint32 `json:"expectDelete"` // Expectations deleted - SearchRestart uint32 `json:"searchRestart"` // Conntrack table lookups restarted due to hashtable resizes -} - -func NewConntrackStat(e uint32, s uint32, f uint32, n uint32, inv uint32, ign uint32, del uint32, dlst uint32, ins uint32, insfail uint32, drop uint32, edrop uint32, ie uint32, en uint32, ec uint32, ed uint32, sr uint32) *ConntrackStat { - return &ConntrackStat{ - Entries: e, - Searched: s, - Found: f, - New: n, - Invalid: inv, - Ignore: ign, - Delete: del, - DeleteList: dlst, - Insert: ins, - InsertFailed: insfail, - Drop: drop, - EarlyDrop: edrop, - IcmpError: ie, - ExpectNew: en, - ExpectCreate: ec, - ExpectDelete: ed, - SearchRestart: sr, - } -} - -type ConntrackStatList struct { - items []*ConntrackStat -} - -func NewConntrackStatList() *ConntrackStatList { - return &ConntrackStatList{ - items: []*ConntrackStat{}, - } -} - -func (l *ConntrackStatList) Append(c *ConntrackStat) { - l.items = append(l.items, c) -} - -func (l *ConntrackStatList) Items() []ConntrackStat { - items := make([]ConntrackStat, len(l.items)) - for i, el := range l.items { - items[i] = *el - } - return items -} - -// Summary returns a single-element list with totals from all list items. -func (l *ConntrackStatList) Summary() []ConntrackStat { - summary := NewConntrackStat(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) - for _, cs := range l.items { - summary.Entries += cs.Entries - summary.Searched += cs.Searched - summary.Found += cs.Found - summary.New += cs.New - summary.Invalid += cs.Invalid - summary.Ignore += cs.Ignore - summary.Delete += cs.Delete - summary.DeleteList += cs.DeleteList - summary.Insert += cs.Insert - summary.InsertFailed += cs.InsertFailed - summary.Drop += cs.Drop - summary.EarlyDrop += cs.EarlyDrop - summary.IcmpError += cs.IcmpError - summary.ExpectNew += cs.ExpectNew - summary.ExpectCreate += cs.ExpectCreate - summary.ExpectDelete += cs.ExpectDelete - summary.SearchRestart += cs.SearchRestart - } - return []ConntrackStat{*summary} -} - -func (n IOCountersStat) String() string { - s, _ := json.Marshal(n) - return string(s) -} - -func (n ConnectionStat) String() string { - s, _ := json.Marshal(n) - return string(s) -} - -func (n ProtoCountersStat) String() string { - s, _ := json.Marshal(n) - return string(s) -} - -func (a Addr) String() string { - s, _ := json.Marshal(a) - return string(s) -} - -func (n InterfaceStat) String() string { - s, _ := json.Marshal(n) - return string(s) -} - -func (l InterfaceStatList) String() string { - s, _ := json.Marshal(l) - return string(s) -} - -func (n InterfaceAddr) String() string { - s, _ := json.Marshal(n) - return string(s) -} - -func (n ConntrackStat) String() string { - s, _ := json.Marshal(n) - return string(s) -} - -func Interfaces() (InterfaceStatList, error) { - return InterfacesWithContext(context.Background()) -} - -func InterfacesWithContext(ctx context.Context) (InterfaceStatList, error) { - is, err := net.Interfaces() - if err != nil { - return nil, err - } - ret := make(InterfaceStatList, 0, len(is)) - for _, ifi := range is { - - var flags []string - if ifi.Flags&net.FlagUp != 0 { - flags = append(flags, "up") - } - if ifi.Flags&net.FlagBroadcast != 0 { - flags = append(flags, "broadcast") - } - if ifi.Flags&net.FlagLoopback != 0 { - flags = append(flags, "loopback") - } - if ifi.Flags&net.FlagPointToPoint != 0 { - flags = append(flags, "pointtopoint") - } - if ifi.Flags&net.FlagMulticast != 0 { - flags = append(flags, "multicast") - } - - r := InterfaceStat{ - Index: ifi.Index, - Name: ifi.Name, - MTU: ifi.MTU, - HardwareAddr: ifi.HardwareAddr.String(), - Flags: flags, - } - addrs, err := ifi.Addrs() - if err == nil { - r.Addrs = make(InterfaceAddrList, 0, len(addrs)) - for _, addr := range addrs { - r.Addrs = append(r.Addrs, InterfaceAddr{ - Addr: addr.String(), - }) - } - - } - ret = append(ret, r) - } - - return ret, nil -} - -func getIOCountersAll(n []IOCountersStat) ([]IOCountersStat, error) { - r := IOCountersStat{ - Name: "all", - } - for _, nic := range n { - r.BytesRecv += nic.BytesRecv - r.PacketsRecv += nic.PacketsRecv - r.Errin += nic.Errin - r.Dropin += nic.Dropin - r.BytesSent += nic.BytesSent - r.PacketsSent += nic.PacketsSent - r.Errout += nic.Errout - r.Dropout += nic.Dropout - } - - return []IOCountersStat{r}, nil -} diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_fallback.go b/vendor/github.com/shirou/gopsutil/v3/net/net_fallback.go deleted file mode 100644 index e136be1bace..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_fallback.go +++ /dev/null @@ -1,93 +0,0 @@ -//go:build !aix && !darwin && !linux && !freebsd && !openbsd && !windows && !solaris -// +build !aix,!darwin,!linux,!freebsd,!openbsd,!windows,!solaris - -package net - -import ( - "context" - - "github.com/shirou/gopsutil/v3/internal/common" -) - -func IOCounters(pernic bool) ([]IOCountersStat, error) { - return IOCountersWithContext(context.Background(), pernic) -} - -func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { - return []IOCountersStat{}, common.ErrNotImplementedError -} - -func FilterCounters() ([]FilterStat, error) { - return FilterCountersWithContext(context.Background()) -} - -func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { - return []FilterStat{}, common.ErrNotImplementedError -} - -func ConntrackStats(percpu bool) ([]ConntrackStat, error) { - return ConntrackStatsWithContext(context.Background(), percpu) -} - -func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackStat, error) { - return nil, common.ErrNotImplementedError -} - -func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { - return ProtoCountersWithContext(context.Background(), protocols) -} - -func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { - return []ProtoCountersStat{}, common.ErrNotImplementedError -} - -func Connections(kind string) ([]ConnectionStat, error) { - return ConnectionsWithContext(context.Background(), kind) -} - -func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { - return []ConnectionStat{}, common.ErrNotImplementedError -} - -func ConnectionsMax(kind string, max int) ([]ConnectionStat, error) { - return ConnectionsMaxWithContext(context.Background(), kind, max) -} - -func ConnectionsMaxWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { - return []ConnectionStat{}, common.ErrNotImplementedError -} - -// Return a list of network connections opened, omitting `Uids`. -// WithoutUids functions are reliant on implementation details. They may be altered to be an alias for Connections or be -// removed from the API in the future. -func ConnectionsWithoutUids(kind string) ([]ConnectionStat, error) { - return ConnectionsWithoutUidsWithContext(context.Background(), kind) -} - -func ConnectionsWithoutUidsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { - return ConnectionsMaxWithoutUidsWithContext(ctx, kind, 0) -} - -func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, max) -} - -func ConnectionsPidWithoutUids(kind string, pid int32) ([]ConnectionStat, error) { - return ConnectionsPidWithoutUidsWithContext(context.Background(), kind, pid) -} - -func ConnectionsPidWithoutUidsWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, 0) -} - -func ConnectionsPidMaxWithoutUids(kind string, pid int32, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(context.Background(), kind, pid, max) -} - -func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { - return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, max) -} - -func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { - return []ConnectionStat{}, common.ErrNotImplementedError -} diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_freebsd.go b/vendor/github.com/shirou/gopsutil/v3/net/net_freebsd.go deleted file mode 100644 index bf8baf09495..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_freebsd.go +++ /dev/null @@ -1,128 +0,0 @@ -//go:build freebsd -// +build freebsd - -package net - -import ( - "context" - "strconv" - "strings" - - "github.com/shirou/gopsutil/v3/internal/common" -) - -func IOCounters(pernic bool) ([]IOCountersStat, error) { - return IOCountersWithContext(context.Background(), pernic) -} - -func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { - out, err := invoke.CommandWithContext(ctx, "netstat", "-ibdnW") - if err != nil { - return nil, err - } - - lines := strings.Split(string(out), "\n") - ret := make([]IOCountersStat, 0, len(lines)-1) - exists := make([]string, 0, len(ret)) - - for _, line := range lines { - values := strings.Fields(line) - if len(values) < 1 || values[0] == "Name" { - continue - } - if common.StringsHas(exists, values[0]) { - // skip if already get - continue - } - exists = append(exists, values[0]) - - if len(values) < 12 { - continue - } - base := 1 - // sometimes Address is omitted - if len(values) < 13 { - base = 0 - } - - parsed := make([]uint64, 0, 8) - vv := []string{ - values[base+3], // PacketsRecv - values[base+4], // Errin - values[base+5], // Dropin - values[base+6], // BytesRecvn - values[base+7], // PacketSent - values[base+8], // Errout - values[base+9], // BytesSent - values[base+11], // Dropout - } - for _, target := range vv { - if target == "-" { - parsed = append(parsed, 0) - continue - } - - t, err := strconv.ParseUint(target, 10, 64) - if err != nil { - return nil, err - } - parsed = append(parsed, t) - } - - n := IOCountersStat{ - Name: values[0], - PacketsRecv: parsed[0], - Errin: parsed[1], - Dropin: parsed[2], - BytesRecv: parsed[3], - PacketsSent: parsed[4], - Errout: parsed[5], - BytesSent: parsed[6], - Dropout: parsed[7], - } - ret = append(ret, n) - } - - if pernic == false { - return getIOCountersAll(ret) - } - - return ret, nil -} - -// IOCountersByFile exists just for compatibility with Linux. -func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) { - return IOCountersByFileWithContext(context.Background(), pernic, filename) -} - -func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) { - return IOCounters(pernic) -} - -func FilterCounters() ([]FilterStat, error) { - return FilterCountersWithContext(context.Background()) -} - -func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { - return nil, common.ErrNotImplementedError -} - -func ConntrackStats(percpu bool) ([]ConntrackStat, error) { - return ConntrackStatsWithContext(context.Background(), percpu) -} - -func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackStat, error) { - return nil, common.ErrNotImplementedError -} - -// ProtoCounters returns network statistics for the entire system -// If protocols is empty then all protocols are returned, otherwise -// just the protocols in the list are returned. -// Not Implemented for FreeBSD -func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { - return ProtoCountersWithContext(context.Background(), protocols) -} - -func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { - return nil, common.ErrNotImplementedError -} diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_linux_111.go b/vendor/github.com/shirou/gopsutil/v3/net/net_linux_111.go deleted file mode 100644 index bd5c9587137..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_linux_111.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build !go1.16 -// +build !go1.16 - -package net - -import ( - "os" -) - -func readDir(f *os.File, max int) ([]os.FileInfo, error) { - return f.Readdir(max) -} diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_linux_116.go b/vendor/github.com/shirou/gopsutil/v3/net/net_linux_116.go deleted file mode 100644 index a45072e924a..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_linux_116.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build go1.16 -// +build go1.16 - -package net - -import ( - "os" -) - -func readDir(f *os.File, max int) ([]os.DirEntry, error) { - return f.ReadDir(max) -} diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_solaris.go b/vendor/github.com/shirou/gopsutil/v3/net/net_solaris.go deleted file mode 100644 index 79d8ac30e24..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_solaris.go +++ /dev/null @@ -1,144 +0,0 @@ -//go:build solaris -// +build solaris - -package net - -import ( - "context" - "fmt" - "regexp" - "runtime" - "strconv" - "strings" - - "github.com/shirou/gopsutil/v3/internal/common" -) - -// NetIOCounters returnes network I/O statistics for every network -// interface installed on the system. If pernic argument is false, -// return only sum of all information (which name is 'all'). If true, -// every network interface installed on the system is returned -// separately. -func IOCounters(pernic bool) ([]IOCountersStat, error) { - return IOCountersWithContext(context.Background(), pernic) -} - -var kstatSplit = regexp.MustCompile(`[:\s]+`) - -func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { - // collect all the net class's links with below statistics - filterstr := "/^(?!vnic)/::phys:/^rbytes64$|^ipackets64$|^idrops64$|^ierrors$|^obytes64$|^opackets64$|^odrops64$|^oerrors$/" - if runtime.GOOS == "illumos" { - filterstr = "/[^vnic]/::mac:/^rbytes64$|^ipackets64$|^idrops64$|^ierrors$|^obytes64$|^opackets64$|^odrops64$|^oerrors$/" - } - kstatSysOut, err := invoke.CommandWithContext(ctx, "kstat", "-c", "net", "-p", filterstr) - if err != nil { - return nil, fmt.Errorf("cannot execute kstat: %w", err) - } - - lines := strings.Split(strings.TrimSpace(string(kstatSysOut)), "\n") - if len(lines) == 0 { - return nil, fmt.Errorf("no interface found") - } - rbytes64arr := make(map[string]uint64) - ipackets64arr := make(map[string]uint64) - idrops64arr := make(map[string]uint64) - ierrorsarr := make(map[string]uint64) - obytes64arr := make(map[string]uint64) - opackets64arr := make(map[string]uint64) - odrops64arr := make(map[string]uint64) - oerrorsarr := make(map[string]uint64) - - for _, line := range lines { - fields := kstatSplit.Split(line, -1) - interfaceName := fields[0] - instance := fields[1] - switch fields[3] { - case "rbytes64": - rbytes64arr[interfaceName+instance], err = strconv.ParseUint(fields[4], 10, 64) - if err != nil { - return nil, fmt.Errorf("cannot parse rbytes64: %w", err) - } - case "ipackets64": - ipackets64arr[interfaceName+instance], err = strconv.ParseUint(fields[4], 10, 64) - if err != nil { - return nil, fmt.Errorf("cannot parse ipackets64: %w", err) - } - case "idrops64": - idrops64arr[interfaceName+instance], err = strconv.ParseUint(fields[4], 10, 64) - if err != nil { - return nil, fmt.Errorf("cannot parse idrops64: %w", err) - } - case "ierrors": - ierrorsarr[interfaceName+instance], err = strconv.ParseUint(fields[4], 10, 64) - if err != nil { - return nil, fmt.Errorf("cannot parse ierrors: %w", err) - } - case "obytes64": - obytes64arr[interfaceName+instance], err = strconv.ParseUint(fields[4], 10, 64) - if err != nil { - return nil, fmt.Errorf("cannot parse obytes64: %w", err) - } - case "opackets64": - opackets64arr[interfaceName+instance], err = strconv.ParseUint(fields[4], 10, 64) - if err != nil { - return nil, fmt.Errorf("cannot parse opackets64: %w", err) - } - case "odrops64": - odrops64arr[interfaceName+instance], err = strconv.ParseUint(fields[4], 10, 64) - if err != nil { - return nil, fmt.Errorf("cannot parse odrops64: %w", err) - } - case "oerrors": - oerrorsarr[interfaceName+instance], err = strconv.ParseUint(fields[4], 10, 64) - if err != nil { - return nil, fmt.Errorf("cannot parse oerrors: %w", err) - } - } - } - ret := make([]IOCountersStat, 0) - for k := range rbytes64arr { - nic := IOCountersStat{ - Name: k, - BytesRecv: rbytes64arr[k], - PacketsRecv: ipackets64arr[k], - Errin: ierrorsarr[k], - Dropin: idrops64arr[k], - BytesSent: obytes64arr[k], - PacketsSent: opackets64arr[k], - Errout: oerrorsarr[k], - Dropout: odrops64arr[k], - } - ret = append(ret, nic) - } - - if !pernic { - return getIOCountersAll(ret) - } - - return ret, nil -} - -func Connections(kind string) ([]ConnectionStat, error) { - return ConnectionsWithContext(context.Background(), kind) -} - -func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { - return []ConnectionStat{}, common.ErrNotImplementedError -} - -func FilterCounters() ([]FilterStat, error) { - return FilterCountersWithContext(context.Background()) -} - -func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { - return []FilterStat{}, common.ErrNotImplementedError -} - -func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { - return ProtoCountersWithContext(context.Background(), protocols) -} - -func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { - return []ProtoCountersStat{}, common.ErrNotImplementedError -} diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_unix.go b/vendor/github.com/shirou/gopsutil/v3/net/net_unix.go deleted file mode 100644 index cb846e28a6d..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_unix.go +++ /dev/null @@ -1,224 +0,0 @@ -//go:build freebsd || darwin -// +build freebsd darwin - -package net - -import ( - "context" - "fmt" - "net" - "strconv" - "strings" - "syscall" - - "github.com/shirou/gopsutil/v3/internal/common" -) - -// Return a list of network connections opened. -func Connections(kind string) ([]ConnectionStat, error) { - return ConnectionsWithContext(context.Background(), kind) -} - -func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { - return ConnectionsPidWithContext(ctx, kind, 0) -} - -// Return a list of network connections opened returning at most `max` -// connections for each running process. -func ConnectionsMax(kind string, max int) ([]ConnectionStat, error) { - return ConnectionsMaxWithContext(context.Background(), kind, max) -} - -func ConnectionsMaxWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { - return []ConnectionStat{}, common.ErrNotImplementedError -} - -// Return a list of network connections opened by a process. -func ConnectionsPid(kind string, pid int32) ([]ConnectionStat, error) { - return ConnectionsPidWithContext(context.Background(), kind, pid) -} - -func ConnectionsPidWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { - var ret []ConnectionStat - - args := []string{"-i"} - switch strings.ToLower(kind) { - default: - fallthrough - case "": - fallthrough - case "all": - fallthrough - case "inet": - args = append(args, "tcp", "-i", "udp") - case "inet4": - args = append(args, "4") - case "inet6": - args = append(args, "6") - case "tcp": - args = append(args, "tcp") - case "tcp4": - args = append(args, "4tcp") - case "tcp6": - args = append(args, "6tcp") - case "udp": - args = append(args, "udp") - case "udp4": - args = append(args, "4udp") - case "udp6": - args = append(args, "6udp") - case "unix": - args = []string{"-U"} - } - - r, err := common.CallLsofWithContext(ctx, invoke, pid, args...) - if err != nil { - return nil, err - } - for _, rr := range r { - if strings.HasPrefix(rr, "COMMAND") { - continue - } - n, err := parseNetLine(rr) - if err != nil { - continue - } - - ret = append(ret, n) - } - - return ret, nil -} - -var constMap = map[string]int{ - "unix": syscall.AF_UNIX, - "TCP": syscall.SOCK_STREAM, - "UDP": syscall.SOCK_DGRAM, - "IPv4": syscall.AF_INET, - "IPv6": syscall.AF_INET6, -} - -func parseNetLine(line string) (ConnectionStat, error) { - f := strings.Fields(line) - if len(f) < 8 { - return ConnectionStat{}, fmt.Errorf("wrong line,%s", line) - } - - if len(f) == 8 { - f = append(f, f[7]) - f[7] = "unix" - } - - pid, err := strconv.Atoi(f[1]) - if err != nil { - return ConnectionStat{}, err - } - fd, err := strconv.Atoi(strings.Trim(f[3], "u")) - if err != nil { - return ConnectionStat{}, fmt.Errorf("unknown fd, %s", f[3]) - } - netFamily, ok := constMap[f[4]] - if !ok { - return ConnectionStat{}, fmt.Errorf("unknown family, %s", f[4]) - } - netType, ok := constMap[f[7]] - if !ok { - return ConnectionStat{}, fmt.Errorf("unknown type, %s", f[7]) - } - - var laddr, raddr Addr - if f[7] == "unix" { - laddr.IP = f[8] - } else { - laddr, raddr, err = parseNetAddr(f[8]) - if err != nil { - return ConnectionStat{}, fmt.Errorf("failed to parse netaddr, %s", f[8]) - } - } - - n := ConnectionStat{ - Fd: uint32(fd), - Family: uint32(netFamily), - Type: uint32(netType), - Laddr: laddr, - Raddr: raddr, - Pid: int32(pid), - } - if len(f) == 10 { - n.Status = strings.Trim(f[9], "()") - } - - return n, nil -} - -func parseNetAddr(line string) (laddr Addr, raddr Addr, err error) { - parse := func(l string) (Addr, error) { - host, port, err := net.SplitHostPort(l) - if err != nil { - return Addr{}, fmt.Errorf("wrong addr, %s", l) - } - lport, err := strconv.Atoi(port) - if err != nil { - return Addr{}, err - } - return Addr{IP: host, Port: uint32(lport)}, nil - } - - addrs := strings.Split(line, "->") - if len(addrs) == 0 { - return laddr, raddr, fmt.Errorf("wrong netaddr, %s", line) - } - laddr, err = parse(addrs[0]) - if len(addrs) == 2 { // remote addr exists - raddr, err = parse(addrs[1]) - if err != nil { - return laddr, raddr, err - } - } - - return laddr, raddr, err -} - -// Return up to `max` network connections opened by a process. -func ConnectionsPidMax(kind string, pid int32, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithContext(context.Background(), kind, pid, max) -} - -func ConnectionsPidMaxWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { - return []ConnectionStat{}, common.ErrNotImplementedError -} - -// Return a list of network connections opened, omitting `Uids`. -// WithoutUids functions are reliant on implementation details. They may be altered to be an alias for Connections or be -// removed from the API in the future. -func ConnectionsWithoutUids(kind string) ([]ConnectionStat, error) { - return ConnectionsWithoutUidsWithContext(context.Background(), kind) -} - -func ConnectionsWithoutUidsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { - return ConnectionsMaxWithoutUidsWithContext(ctx, kind, 0) -} - -func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, max) -} - -func ConnectionsPidWithoutUids(kind string, pid int32) ([]ConnectionStat, error) { - return ConnectionsPidWithoutUidsWithContext(context.Background(), kind, pid) -} - -func ConnectionsPidWithoutUidsWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, 0) -} - -func ConnectionsPidMaxWithoutUids(kind string, pid int32, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(context.Background(), kind, pid, max) -} - -func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { - return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, max) -} - -func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { - return []ConnectionStat{}, common.ErrNotImplementedError -} diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process.go b/vendor/github.com/shirou/gopsutil/v3/process/process.go deleted file mode 100644 index 1bb27abf8e8..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/process/process.go +++ /dev/null @@ -1,627 +0,0 @@ -package process - -import ( - "context" - "encoding/json" - "errors" - "runtime" - "sort" - "sync" - "time" - - "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" - "github.com/shirou/gopsutil/v3/mem" - "github.com/shirou/gopsutil/v3/net" -) - -var ( - invoke common.Invoker = common.Invoke{} - ErrorNoChildren = errors.New("process does not have children") - ErrorProcessNotRunning = errors.New("process does not exist") - ErrorNotPermitted = errors.New("operation not permitted") -) - -type Process struct { - Pid int32 `json:"pid"` - name string - status string - parent int32 - parentMutex sync.RWMutex // for windows ppid cache - numCtxSwitches *NumCtxSwitchesStat - uids []int32 - gids []int32 - groups []int32 - numThreads int32 - memInfo *MemoryInfoStat - sigInfo *SignalInfoStat - createTime int64 - - lastCPUTimes *cpu.TimesStat - lastCPUTime time.Time - - tgid int32 -} - -// Process status -const ( - // Running marks a task a running or runnable (on the run queue) - Running = "running" - // Blocked marks a task waiting on a short, uninterruptible operation (usually I/O) - Blocked = "blocked" - // Idle marks a task sleeping for more than about 20 seconds - Idle = "idle" - // Lock marks a task waiting to acquire a lock - Lock = "lock" - // Sleep marks task waiting for short, interruptible operation - Sleep = "sleep" - // Stop marks a stopped process - Stop = "stop" - // Wait marks an idle interrupt thread (or paging in pre 2.6.xx Linux) - Wait = "wait" - // Zombie marks a defunct process, terminated but not reaped by its parent - Zombie = "zombie" - - // Solaris states. See https://github.com/collectd/collectd/blob/1da3305c10c8ff9a63081284cf3d4bb0f6daffd8/src/processes.c#L2115 - Daemon = "daemon" - Detached = "detached" - System = "system" - Orphan = "orphan" - - UnknownState = "" -) - -type OpenFilesStat struct { - Path string `json:"path"` - Fd uint64 `json:"fd"` -} - -type MemoryInfoStat struct { - RSS uint64 `json:"rss"` // bytes - VMS uint64 `json:"vms"` // bytes - HWM uint64 `json:"hwm"` // bytes - Data uint64 `json:"data"` // bytes - Stack uint64 `json:"stack"` // bytes - Locked uint64 `json:"locked"` // bytes - Swap uint64 `json:"swap"` // bytes -} - -type SignalInfoStat struct { - PendingProcess uint64 `json:"pending_process"` - PendingThread uint64 `json:"pending_thread"` - Blocked uint64 `json:"blocked"` - Ignored uint64 `json:"ignored"` - Caught uint64 `json:"caught"` -} - -type RlimitStat struct { - Resource int32 `json:"resource"` - Soft uint64 `json:"soft"` - Hard uint64 `json:"hard"` - Used uint64 `json:"used"` -} - -type IOCountersStat struct { - ReadCount uint64 `json:"readCount"` - WriteCount uint64 `json:"writeCount"` - ReadBytes uint64 `json:"readBytes"` - WriteBytes uint64 `json:"writeBytes"` -} - -type NumCtxSwitchesStat struct { - Voluntary int64 `json:"voluntary"` - Involuntary int64 `json:"involuntary"` -} - -type PageFaultsStat struct { - MinorFaults uint64 `json:"minorFaults"` - MajorFaults uint64 `json:"majorFaults"` - ChildMinorFaults uint64 `json:"childMinorFaults"` - ChildMajorFaults uint64 `json:"childMajorFaults"` -} - -// Resource limit constants are from /usr/include/x86_64-linux-gnu/bits/resource.h -// from libc6-dev package in Ubuntu 16.10 -const ( - RLIMIT_CPU int32 = 0 - RLIMIT_FSIZE int32 = 1 - RLIMIT_DATA int32 = 2 - RLIMIT_STACK int32 = 3 - RLIMIT_CORE int32 = 4 - RLIMIT_RSS int32 = 5 - RLIMIT_NPROC int32 = 6 - RLIMIT_NOFILE int32 = 7 - RLIMIT_MEMLOCK int32 = 8 - RLIMIT_AS int32 = 9 - RLIMIT_LOCKS int32 = 10 - RLIMIT_SIGPENDING int32 = 11 - RLIMIT_MSGQUEUE int32 = 12 - RLIMIT_NICE int32 = 13 - RLIMIT_RTPRIO int32 = 14 - RLIMIT_RTTIME int32 = 15 -) - -func (p Process) String() string { - s, _ := json.Marshal(p) - return string(s) -} - -func (o OpenFilesStat) String() string { - s, _ := json.Marshal(o) - return string(s) -} - -func (m MemoryInfoStat) String() string { - s, _ := json.Marshal(m) - return string(s) -} - -func (r RlimitStat) String() string { - s, _ := json.Marshal(r) - return string(s) -} - -func (i IOCountersStat) String() string { - s, _ := json.Marshal(i) - return string(s) -} - -func (p NumCtxSwitchesStat) String() string { - s, _ := json.Marshal(p) - return string(s) -} - -var enableBootTimeCache bool - -// EnableBootTimeCache change cache behavior of BootTime. If true, cache BootTime value. Default is false. -func EnableBootTimeCache(enable bool) { - enableBootTimeCache = enable -} - -// Pids returns a slice of process ID list which are running now. -func Pids() ([]int32, error) { - return PidsWithContext(context.Background()) -} - -func PidsWithContext(ctx context.Context) ([]int32, error) { - pids, err := pidsWithContext(ctx) - sort.Slice(pids, func(i, j int) bool { return pids[i] < pids[j] }) - return pids, err -} - -// Processes returns a slice of pointers to Process structs for all -// currently running processes. -func Processes() ([]*Process, error) { - return ProcessesWithContext(context.Background()) -} - -// NewProcess creates a new Process instance, it only stores the pid and -// checks that the process exists. Other method on Process can be used -// to get more information about the process. An error will be returned -// if the process does not exist. -func NewProcess(pid int32) (*Process, error) { - return NewProcessWithContext(context.Background(), pid) -} - -func NewProcessWithContext(ctx context.Context, pid int32) (*Process, error) { - p := &Process{ - Pid: pid, - } - - exists, err := PidExistsWithContext(ctx, pid) - if err != nil { - return p, err - } - if !exists { - return p, ErrorProcessNotRunning - } - p.CreateTimeWithContext(ctx) - return p, nil -} - -func PidExists(pid int32) (bool, error) { - return PidExistsWithContext(context.Background(), pid) -} - -// Background returns true if the process is in background, false otherwise. -func (p *Process) Background() (bool, error) { - return p.BackgroundWithContext(context.Background()) -} - -func (p *Process) BackgroundWithContext(ctx context.Context) (bool, error) { - fg, err := p.ForegroundWithContext(ctx) - if err != nil { - return false, err - } - return !fg, err -} - -// If interval is 0, return difference from last call(non-blocking). -// If interval > 0, wait interval sec and return difference between start and end. -func (p *Process) Percent(interval time.Duration) (float64, error) { - return p.PercentWithContext(context.Background(), interval) -} - -func (p *Process) PercentWithContext(ctx context.Context, interval time.Duration) (float64, error) { - cpuTimes, err := p.TimesWithContext(ctx) - if err != nil { - return 0, err - } - now := time.Now() - - if interval > 0 { - p.lastCPUTimes = cpuTimes - p.lastCPUTime = now - if err := common.Sleep(ctx, interval); err != nil { - return 0, err - } - cpuTimes, err = p.TimesWithContext(ctx) - now = time.Now() - if err != nil { - return 0, err - } - } else { - if p.lastCPUTimes == nil { - // invoked first time - p.lastCPUTimes = cpuTimes - p.lastCPUTime = now - return 0, nil - } - } - - numcpu := runtime.NumCPU() - delta := (now.Sub(p.lastCPUTime).Seconds()) * float64(numcpu) - ret := calculatePercent(p.lastCPUTimes, cpuTimes, delta, numcpu) - p.lastCPUTimes = cpuTimes - p.lastCPUTime = now - return ret, nil -} - -// IsRunning returns whether the process is still running or not. -func (p *Process) IsRunning() (bool, error) { - return p.IsRunningWithContext(context.Background()) -} - -func (p *Process) IsRunningWithContext(ctx context.Context) (bool, error) { - createTime, err := p.CreateTimeWithContext(ctx) - if err != nil { - return false, err - } - p2, err := NewProcessWithContext(ctx, p.Pid) - if errors.Is(err, ErrorProcessNotRunning) { - return false, nil - } - createTime2, err := p2.CreateTimeWithContext(ctx) - if err != nil { - return false, err - } - return createTime == createTime2, nil -} - -// CreateTime returns created time of the process in milliseconds since the epoch, in UTC. -func (p *Process) CreateTime() (int64, error) { - return p.CreateTimeWithContext(context.Background()) -} - -func (p *Process) CreateTimeWithContext(ctx context.Context) (int64, error) { - if p.createTime != 0 { - return p.createTime, nil - } - createTime, err := p.createTimeWithContext(ctx) - p.createTime = createTime - return p.createTime, err -} - -func calculatePercent(t1, t2 *cpu.TimesStat, delta float64, numcpu int) float64 { - if delta == 0 { - return 0 - } - delta_proc := t2.Total() - t1.Total() - overall_percent := ((delta_proc / delta) * 100) * float64(numcpu) - return overall_percent -} - -// MemoryPercent returns how many percent of the total RAM this process uses -func (p *Process) MemoryPercent() (float32, error) { - return p.MemoryPercentWithContext(context.Background()) -} - -func (p *Process) MemoryPercentWithContext(ctx context.Context) (float32, error) { - machineMemory, err := mem.VirtualMemoryWithContext(ctx) - if err != nil { - return 0, err - } - total := machineMemory.Total - - processMemory, err := p.MemoryInfoWithContext(ctx) - if err != nil { - return 0, err - } - used := processMemory.RSS - - return (100 * float32(used) / float32(total)), nil -} - -// CPUPercent returns how many percent of the CPU time this process uses -func (p *Process) CPUPercent() (float64, error) { - return p.CPUPercentWithContext(context.Background()) -} - -func (p *Process) CPUPercentWithContext(ctx context.Context) (float64, error) { - crt_time, err := p.createTimeWithContext(ctx) - if err != nil { - return 0, err - } - - cput, err := p.TimesWithContext(ctx) - if err != nil { - return 0, err - } - - created := time.Unix(0, crt_time*int64(time.Millisecond)) - totalTime := time.Since(created).Seconds() - if totalTime <= 0 { - return 0, nil - } - - return 100 * cput.Total() / totalTime, nil -} - -// Groups returns all group IDs(include supplementary groups) of the process as a slice of the int -func (p *Process) Groups() ([]int32, error) { - return p.GroupsWithContext(context.Background()) -} - -// Ppid returns Parent Process ID of the process. -func (p *Process) Ppid() (int32, error) { - return p.PpidWithContext(context.Background()) -} - -// Name returns name of the process. -func (p *Process) Name() (string, error) { - return p.NameWithContext(context.Background()) -} - -// Exe returns executable path of the process. -func (p *Process) Exe() (string, error) { - return p.ExeWithContext(context.Background()) -} - -// Cmdline returns the command line arguments of the process as a string with -// each argument separated by 0x20 ascii character. -func (p *Process) Cmdline() (string, error) { - return p.CmdlineWithContext(context.Background()) -} - -// CmdlineSlice returns the command line arguments of the process as a slice with each -// element being an argument. -func (p *Process) CmdlineSlice() ([]string, error) { - return p.CmdlineSliceWithContext(context.Background()) -} - -// Cwd returns current working directory of the process. -func (p *Process) Cwd() (string, error) { - return p.CwdWithContext(context.Background()) -} - -// Parent returns parent Process of the process. -func (p *Process) Parent() (*Process, error) { - return p.ParentWithContext(context.Background()) -} - -// ParentWithContext returns parent Process of the process. -func (p *Process) ParentWithContext(ctx context.Context) (*Process, error) { - ppid, err := p.PpidWithContext(ctx) - if err != nil { - return nil, err - } - return NewProcessWithContext(ctx, ppid) -} - -// Status returns the process status. -// Return value could be one of these. -// R: Running S: Sleep T: Stop I: Idle -// Z: Zombie W: Wait L: Lock -// The character is same within all supported platforms. -func (p *Process) Status() ([]string, error) { - return p.StatusWithContext(context.Background()) -} - -// Foreground returns true if the process is in foreground, false otherwise. -func (p *Process) Foreground() (bool, error) { - return p.ForegroundWithContext(context.Background()) -} - -// Uids returns user ids of the process as a slice of the int -func (p *Process) Uids() ([]int32, error) { - return p.UidsWithContext(context.Background()) -} - -// Gids returns group ids of the process as a slice of the int -func (p *Process) Gids() ([]int32, error) { - return p.GidsWithContext(context.Background()) -} - -// Terminal returns a terminal which is associated with the process. -func (p *Process) Terminal() (string, error) { - return p.TerminalWithContext(context.Background()) -} - -// Nice returns a nice value (priority). -func (p *Process) Nice() (int32, error) { - return p.NiceWithContext(context.Background()) -} - -// IOnice returns process I/O nice value (priority). -func (p *Process) IOnice() (int32, error) { - return p.IOniceWithContext(context.Background()) -} - -// Rlimit returns Resource Limits. -func (p *Process) Rlimit() ([]RlimitStat, error) { - return p.RlimitWithContext(context.Background()) -} - -// RlimitUsage returns Resource Limits. -// If gatherUsed is true, the currently used value will be gathered and added -// to the resulting RlimitStat. -func (p *Process) RlimitUsage(gatherUsed bool) ([]RlimitStat, error) { - return p.RlimitUsageWithContext(context.Background(), gatherUsed) -} - -// IOCounters returns IO Counters. -func (p *Process) IOCounters() (*IOCountersStat, error) { - return p.IOCountersWithContext(context.Background()) -} - -// NumCtxSwitches returns the number of the context switches of the process. -func (p *Process) NumCtxSwitches() (*NumCtxSwitchesStat, error) { - return p.NumCtxSwitchesWithContext(context.Background()) -} - -// NumFDs returns the number of File Descriptors used by the process. -func (p *Process) NumFDs() (int32, error) { - return p.NumFDsWithContext(context.Background()) -} - -// NumThreads returns the number of threads used by the process. -func (p *Process) NumThreads() (int32, error) { - return p.NumThreadsWithContext(context.Background()) -} - -func (p *Process) Threads() (map[int32]*cpu.TimesStat, error) { - return p.ThreadsWithContext(context.Background()) -} - -// Times returns CPU times of the process. -func (p *Process) Times() (*cpu.TimesStat, error) { - return p.TimesWithContext(context.Background()) -} - -// CPUAffinity returns CPU affinity of the process. -func (p *Process) CPUAffinity() ([]int32, error) { - return p.CPUAffinityWithContext(context.Background()) -} - -// MemoryInfo returns generic process memory information, -// such as RSS and VMS. -func (p *Process) MemoryInfo() (*MemoryInfoStat, error) { - return p.MemoryInfoWithContext(context.Background()) -} - -// MemoryInfoEx returns platform-specific process memory information. -func (p *Process) MemoryInfoEx() (*MemoryInfoExStat, error) { - return p.MemoryInfoExWithContext(context.Background()) -} - -// PageFaults returns the process's page fault counters. -func (p *Process) PageFaults() (*PageFaultsStat, error) { - return p.PageFaultsWithContext(context.Background()) -} - -// Children returns the children of the process represented as a slice -// of pointers to Process type. -func (p *Process) Children() ([]*Process, error) { - return p.ChildrenWithContext(context.Background()) -} - -// OpenFiles returns a slice of OpenFilesStat opend by the process. -// OpenFilesStat includes a file path and file descriptor. -func (p *Process) OpenFiles() ([]OpenFilesStat, error) { - return p.OpenFilesWithContext(context.Background()) -} - -// Connections returns a slice of net.ConnectionStat used by the process. -// This returns all kind of the connection. This means TCP, UDP or UNIX. -func (p *Process) Connections() ([]net.ConnectionStat, error) { - return p.ConnectionsWithContext(context.Background()) -} - -// ConnectionsMax returns a slice of net.ConnectionStat used by the process at most `max`. -func (p *Process) ConnectionsMax(max int) ([]net.ConnectionStat, error) { - return p.ConnectionsMaxWithContext(context.Background(), max) -} - -// MemoryMaps get memory maps from /proc/(pid)/smaps -func (p *Process) MemoryMaps(grouped bool) (*[]MemoryMapsStat, error) { - return p.MemoryMapsWithContext(context.Background(), grouped) -} - -// Tgid returns thread group id of the process. -func (p *Process) Tgid() (int32, error) { - return p.TgidWithContext(context.Background()) -} - -// SendSignal sends a unix.Signal to the process. -func (p *Process) SendSignal(sig Signal) error { - return p.SendSignalWithContext(context.Background(), sig) -} - -// Suspend sends SIGSTOP to the process. -func (p *Process) Suspend() error { - return p.SuspendWithContext(context.Background()) -} - -// Resume sends SIGCONT to the process. -func (p *Process) Resume() error { - return p.ResumeWithContext(context.Background()) -} - -// Terminate sends SIGTERM to the process. -func (p *Process) Terminate() error { - return p.TerminateWithContext(context.Background()) -} - -// Kill sends SIGKILL to the process. -func (p *Process) Kill() error { - return p.KillWithContext(context.Background()) -} - -// Username returns a username of the process. -func (p *Process) Username() (string, error) { - return p.UsernameWithContext(context.Background()) -} - -// Environ returns the environment variables of the process. -func (p *Process) Environ() ([]string, error) { - return p.EnvironWithContext(context.Background()) -} - -// convertStatusChar as reported by the ps command across different platforms. -func convertStatusChar(letter string) string { - // Sources - // Darwin: http://www.mywebuniversity.com/Man_Pages/Darwin/man_ps.html - // FreeBSD: https://www.freebsd.org/cgi/man.cgi?ps - // Linux https://man7.org/linux/man-pages/man1/ps.1.html - // OpenBSD: https://man.openbsd.org/ps.1#state - // Solaris: https://github.com/collectd/collectd/blob/1da3305c10c8ff9a63081284cf3d4bb0f6daffd8/src/processes.c#L2115 - switch letter { - case "A": - return Daemon - case "D", "U": - return Blocked - case "E": - return Detached - case "I": - return Idle - case "L": - return Lock - case "O": - return Orphan - case "R": - return Running - case "S": - return Sleep - case "T", "t": - // "t" is used by Linux to signal stopped by the debugger during tracing - return Stop - case "W": - return Wait - case "Y": - return System - case "Z": - return Zombie - default: - return UnknownState - } -} diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin.go b/vendor/github.com/shirou/gopsutil/v3/process/process_darwin.go deleted file mode 100644 index 176661cbd6b..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin.go +++ /dev/null @@ -1,325 +0,0 @@ -//go:build darwin -// +build darwin - -package process - -import ( - "context" - "fmt" - "path/filepath" - "strconv" - "strings" - - "github.com/tklauser/go-sysconf" - "golang.org/x/sys/unix" - - "github.com/shirou/gopsutil/v3/internal/common" - "github.com/shirou/gopsutil/v3/net" -) - -// copied from sys/sysctl.h -const ( - CTLKern = 1 // "high kernel": proc, limits - KernProc = 14 // struct: process entries - KernProcPID = 1 // by process id - KernProcProc = 8 // only return procs - KernProcAll = 0 // everything - KernProcPathname = 12 // path to executable -) - -var clockTicks = 100 // default value - -func init() { - clkTck, err := sysconf.Sysconf(sysconf.SC_CLK_TCK) - // ignore errors - if err == nil { - clockTicks = int(clkTck) - } -} - -type _Ctype_struct___0 struct { - Pad uint64 -} - -func pidsWithContext(ctx context.Context) ([]int32, error) { - var ret []int32 - - kprocs, err := unix.SysctlKinfoProcSlice("kern.proc.all") - if err != nil { - return ret, err - } - - for _, proc := range kprocs { - ret = append(ret, int32(proc.Proc.P_pid)) - } - - return ret, nil -} - -func (p *Process) PpidWithContext(ctx context.Context) (int32, error) { - k, err := p.getKProc() - if err != nil { - return 0, err - } - - return k.Eproc.Ppid, nil -} - -func (p *Process) NameWithContext(ctx context.Context) (string, error) { - k, err := p.getKProc() - if err != nil { - return "", err - } - - name := common.ByteToString(k.Proc.P_comm[:]) - - if len(name) >= 15 { - cmdName, err := p.cmdNameWithContext(ctx) - if err != nil { - return "", err - } - if len(cmdName) > 0 { - extendedName := filepath.Base(cmdName) - if strings.HasPrefix(extendedName, p.name) { - name = extendedName - } - } - } - - return name, nil -} - -func (p *Process) createTimeWithContext(ctx context.Context) (int64, error) { - k, err := p.getKProc() - if err != nil { - return 0, err - } - - return k.Proc.P_starttime.Sec*1000 + int64(k.Proc.P_starttime.Usec)/1000, nil -} - -func (p *Process) StatusWithContext(ctx context.Context) ([]string, error) { - r, err := callPsWithContext(ctx, "state", p.Pid, false, false) - if err != nil { - return []string{""}, err - } - status := convertStatusChar(r[0][0][0:1]) - return []string{status}, err -} - -func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { - // see https://github.com/shirou/gopsutil/issues/596#issuecomment-432707831 for implementation details - pid := p.Pid - out, err := invoke.CommandWithContext(ctx, "ps", "-o", "stat=", "-p", strconv.Itoa(int(pid))) - if err != nil { - return false, err - } - return strings.IndexByte(string(out), '+') != -1, nil -} - -func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { - k, err := p.getKProc() - if err != nil { - return nil, err - } - - // See: http://unix.superglobalmegacorp.com/Net2/newsrc/sys/ucred.h.html - userEffectiveUID := int32(k.Eproc.Ucred.Uid) - - return []int32{userEffectiveUID}, nil -} - -func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { - k, err := p.getKProc() - if err != nil { - return nil, err - } - - gids := make([]int32, 0, 3) - gids = append(gids, int32(k.Eproc.Pcred.P_rgid), int32(k.Eproc.Pcred.P_rgid), int32(k.Eproc.Pcred.P_svgid)) - - return gids, nil -} - -func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) { - return nil, common.ErrNotImplementedError - // k, err := p.getKProc() - // if err != nil { - // return nil, err - // } - - // groups := make([]int32, k.Eproc.Ucred.Ngroups) - // for i := int16(0); i < k.Eproc.Ucred.Ngroups; i++ { - // groups[i] = int32(k.Eproc.Ucred.Groups[i]) - // } - - // return groups, nil -} - -func (p *Process) TerminalWithContext(ctx context.Context) (string, error) { - return "", common.ErrNotImplementedError - /* - k, err := p.getKProc() - if err != nil { - return "", err - } - - ttyNr := uint64(k.Eproc.Tdev) - termmap, err := getTerminalMap() - if err != nil { - return "", err - } - - return termmap[ttyNr], nil - */ -} - -func (p *Process) NiceWithContext(ctx context.Context) (int32, error) { - k, err := p.getKProc() - if err != nil { - return 0, err - } - return int32(k.Proc.P_nice), nil -} - -func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) { - return nil, common.ErrNotImplementedError -} - -func convertCPUTimes(s string) (ret float64, err error) { - var t int - var _tmp string - if strings.Contains(s, ":") { - _t := strings.Split(s, ":") - switch len(_t) { - case 3: - hour, err := strconv.Atoi(_t[0]) - if err != nil { - return ret, err - } - t += hour * 60 * 60 * clockTicks - - mins, err := strconv.Atoi(_t[1]) - if err != nil { - return ret, err - } - t += mins * 60 * clockTicks - _tmp = _t[2] - case 2: - mins, err := strconv.Atoi(_t[0]) - if err != nil { - return ret, err - } - t += mins * 60 * clockTicks - _tmp = _t[1] - case 1, 0: - _tmp = s - default: - return ret, fmt.Errorf("wrong cpu time string") - } - } else { - _tmp = s - } - - _t := strings.Split(_tmp, ".") - if err != nil { - return ret, err - } - h, err := strconv.Atoi(_t[0]) - t += h * clockTicks - h, err = strconv.Atoi(_t[1]) - t += h - return float64(t) / float64(clockTicks), nil -} - -func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { - pids, err := common.CallPgrepWithContext(ctx, invoke, p.Pid) - if err != nil { - return nil, err - } - ret := make([]*Process, 0, len(pids)) - for _, pid := range pids { - np, err := NewProcessWithContext(ctx, pid) - if err != nil { - return nil, err - } - ret = append(ret, np) - } - return ret, nil -} - -func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionStat, error) { - return net.ConnectionsPidWithContext(ctx, "all", p.Pid) -} - -func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { - return net.ConnectionsPidMaxWithContext(ctx, "all", p.Pid, max) -} - -func ProcessesWithContext(ctx context.Context) ([]*Process, error) { - out := []*Process{} - - pids, err := PidsWithContext(ctx) - if err != nil { - return out, err - } - - for _, pid := range pids { - p, err := NewProcessWithContext(ctx, pid) - if err != nil { - continue - } - out = append(out, p) - } - - return out, nil -} - -// Returns a proc as defined here: -// http://unix.superglobalmegacorp.com/Net2/newsrc/sys/kinfo_proc.h.html -func (p *Process) getKProc() (*unix.KinfoProc, error) { - return unix.SysctlKinfoProc("kern.proc.pid", int(p.Pid)) -} - -// call ps command. -// Return value deletes Header line(you must not input wrong arg). -// And splited by Space. Caller have responsibility to manage. -// If passed arg pid is 0, get information from all process. -func callPsWithContext(ctx context.Context, arg string, pid int32, threadOption bool, nameOption bool) ([][]string, error) { - var cmd []string - if pid == 0 { // will get from all processes. - cmd = []string{"-ax", "-o", arg} - } else if threadOption { - cmd = []string{"-x", "-o", arg, "-M", "-p", strconv.Itoa(int(pid))} - } else { - cmd = []string{"-x", "-o", arg, "-p", strconv.Itoa(int(pid))} - } - if nameOption { - cmd = append(cmd, "-c") - } - out, err := invoke.CommandWithContext(ctx, "ps", cmd...) - if err != nil { - return [][]string{}, err - } - lines := strings.Split(string(out), "\n") - - var ret [][]string - for _, l := range lines[1:] { - var lr []string - if nameOption { - lr = append(lr, l) - } else { - for _, r := range strings.Split(l, " ") { - if r == "" { - continue - } - lr = append(lr, strings.TrimSpace(r)) - } - } - if len(lr) != 0 { - ret = append(ret, lr) - } - } - - return ret, nil -} diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_cgo.go b/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_cgo.go deleted file mode 100644 index 858f08e7a41..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_cgo.go +++ /dev/null @@ -1,222 +0,0 @@ -//go:build darwin && cgo -// +build darwin,cgo - -package process - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -import "C" - -import ( - "bytes" - "context" - "fmt" - "strings" - "syscall" - "unsafe" - - "github.com/shirou/gopsutil/v3/cpu" -) - -var ( - argMax int - timescaleToNanoSeconds float64 -) - -func init() { - argMax = getArgMax() - timescaleToNanoSeconds = getTimeScaleToNanoSeconds() -} - -func getArgMax() int { - var ( - mib = [...]C.int{C.CTL_KERN, C.KERN_ARGMAX} - argmax C.int - size C.size_t = C.ulong(unsafe.Sizeof(argmax)) - ) - retval := C.sysctl(&mib[0], 2, unsafe.Pointer(&argmax), &size, C.NULL, 0) - if retval == 0 { - return int(argmax) - } - return 0 -} - -func getTimeScaleToNanoSeconds() float64 { - var timeBaseInfo C.struct_mach_timebase_info - - C.mach_timebase_info(&timeBaseInfo) - - return float64(timeBaseInfo.numer) / float64(timeBaseInfo.denom) -} - -func (p *Process) ExeWithContext(ctx context.Context) (string, error) { - var c C.char // need a var for unsafe.Sizeof need a var - const bufsize = C.PROC_PIDPATHINFO_MAXSIZE * unsafe.Sizeof(c) - buffer := (*C.char)(C.malloc(C.size_t(bufsize))) - defer C.free(unsafe.Pointer(buffer)) - - ret, err := C.proc_pidpath(C.int(p.Pid), unsafe.Pointer(buffer), C.uint32_t(bufsize)) - if err != nil { - return "", err - } - if ret <= 0 { - return "", fmt.Errorf("unknown error: proc_pidpath returned %d", ret) - } - - return C.GoString(buffer), nil -} - -// CwdWithContext retrieves the Current Working Directory for the given process. -// It uses the proc_pidinfo from libproc and will only work for processes the -// EUID can access. Otherwise "operation not permitted" will be returned as the -// error. -// Note: This might also work for other *BSD OSs. -func (p *Process) CwdWithContext(ctx context.Context) (string, error) { - const vpiSize = C.sizeof_struct_proc_vnodepathinfo - vpi := (*C.struct_proc_vnodepathinfo)(C.malloc(vpiSize)) - defer C.free(unsafe.Pointer(vpi)) - ret, err := C.proc_pidinfo(C.int(p.Pid), C.PROC_PIDVNODEPATHINFO, 0, unsafe.Pointer(vpi), vpiSize) - if err != nil { - // fmt.Printf("ret: %d %T\n", ret, err) - if err == syscall.EPERM { - return "", ErrorNotPermitted - } - return "", err - } - if ret <= 0 { - return "", fmt.Errorf("unknown error: proc_pidinfo returned %d", ret) - } - if ret != C.sizeof_struct_proc_vnodepathinfo { - return "", fmt.Errorf("too few bytes; expected %d, got %d", vpiSize, ret) - } - return C.GoString(&vpi.pvi_cdir.vip_path[0]), err -} - -func procArgs(pid int32) ([]byte, int, error) { - var ( - mib = [...]C.int{C.CTL_KERN, C.KERN_PROCARGS2, C.int(pid)} - size C.size_t = C.ulong(argMax) - nargs C.int - result []byte - ) - procargs := (*C.char)(C.malloc(C.ulong(argMax))) - defer C.free(unsafe.Pointer(procargs)) - retval, err := C.sysctl(&mib[0], 3, unsafe.Pointer(procargs), &size, C.NULL, 0) - if retval == 0 { - C.memcpy(unsafe.Pointer(&nargs), unsafe.Pointer(procargs), C.sizeof_int) - result = C.GoBytes(unsafe.Pointer(procargs), C.int(size)) - // fmt.Printf("size: %d %d\n%s\n", size, nargs, hex.Dump(result)) - return result, int(nargs), nil - } - return nil, 0, err -} - -func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) { - return p.cmdlineSliceWithContext(ctx, true) -} - -func (p *Process) cmdlineSliceWithContext(ctx context.Context, fallback bool) ([]string, error) { - pargs, nargs, err := procArgs(p.Pid) - if err != nil { - return nil, err - } - // The first bytes hold the nargs int, skip it. - args := bytes.Split((pargs)[C.sizeof_int:], []byte{0}) - var argStr string - // The first element is the actual binary/command path. - // command := args[0] - var argSlice []string - // var envSlice []string - // All other, non-zero elements are arguments. The first "nargs" elements - // are the arguments. Everything else in the slice is then the environment - // of the process. - for _, arg := range args[1:] { - argStr = string(arg[:]) - if len(argStr) > 0 { - if nargs > 0 { - argSlice = append(argSlice, argStr) - nargs-- - continue - } - break - // envSlice = append(envSlice, argStr) - } - } - return argSlice, err -} - -// cmdNameWithContext returns the command name (including spaces) without any arguments -func (p *Process) cmdNameWithContext(ctx context.Context) (string, error) { - r, err := p.cmdlineSliceWithContext(ctx, false) - if err != nil { - return "", err - } - - if len(r) == 0 { - return "", nil - } - - return r[0], err -} - -func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) { - r, err := p.CmdlineSliceWithContext(ctx) - if err != nil { - return "", err - } - return strings.Join(r, " "), err -} - -func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { - const tiSize = C.sizeof_struct_proc_taskinfo - ti := (*C.struct_proc_taskinfo)(C.malloc(tiSize)) - defer C.free(unsafe.Pointer(ti)) - - _, err := C.proc_pidinfo(C.int(p.Pid), C.PROC_PIDTASKINFO, 0, unsafe.Pointer(ti), tiSize) - if err != nil { - return 0, err - } - - return int32(ti.pti_threadnum), nil -} - -func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) { - const tiSize = C.sizeof_struct_proc_taskinfo - ti := (*C.struct_proc_taskinfo)(C.malloc(tiSize)) - defer C.free(unsafe.Pointer(ti)) - - _, err := C.proc_pidinfo(C.int(p.Pid), C.PROC_PIDTASKINFO, 0, unsafe.Pointer(ti), tiSize) - if err != nil { - return nil, err - } - - ret := &cpu.TimesStat{ - CPU: "cpu", - User: float64(ti.pti_total_user) * timescaleToNanoSeconds / 1e9, - System: float64(ti.pti_total_system) * timescaleToNanoSeconds / 1e9, - } - return ret, nil -} - -func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) { - const tiSize = C.sizeof_struct_proc_taskinfo - ti := (*C.struct_proc_taskinfo)(C.malloc(tiSize)) - defer C.free(unsafe.Pointer(ti)) - - _, err := C.proc_pidinfo(C.int(p.Pid), C.PROC_PIDTASKINFO, 0, unsafe.Pointer(ti), tiSize) - if err != nil { - return nil, err - } - - ret := &MemoryInfoStat{ - RSS: uint64(ti.pti_resident_size), - VMS: uint64(ti.pti_virtual_size), - Swap: uint64(ti.pti_pageins), - } - return ret, nil -} diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_nocgo.go b/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_nocgo.go deleted file mode 100644 index bc1d357df8c..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_nocgo.go +++ /dev/null @@ -1,127 +0,0 @@ -//go:build darwin && !cgo -// +build darwin,!cgo - -package process - -import ( - "context" - "fmt" - "strconv" - "strings" - - "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" -) - -func (p *Process) CwdWithContext(ctx context.Context) (string, error) { - return "", common.ErrNotImplementedError -} - -func (p *Process) ExeWithContext(ctx context.Context) (string, error) { - out, err := invoke.CommandWithContext(ctx, "lsof", "-p", strconv.Itoa(int(p.Pid)), "-Fpfn") - if err != nil { - return "", fmt.Errorf("bad call to lsof: %s", err) - } - txtFound := 0 - lines := strings.Split(string(out), "\n") - for i := 1; i < len(lines); i++ { - if lines[i] == "ftxt" { - txtFound++ - if txtFound == 2 { - return lines[i-1][1:], nil - } - } - } - return "", fmt.Errorf("missing txt data returned by lsof") -} - -func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) { - r, err := callPsWithContext(ctx, "command", p.Pid, false, false) - if err != nil { - return "", err - } - return strings.Join(r[0], " "), err -} - -func (p *Process) cmdNameWithContext(ctx context.Context) (string, error) { - r, err := callPsWithContext(ctx, "command", p.Pid, false, true) - if err != nil { - return "", err - } - if len(r) > 0 && len(r[0]) > 0 { - return r[0][0], err - } - - return "", err -} - -// CmdlineSliceWithContext returns the command line arguments of the process as a slice with each -// element being an argument. Because of current deficiencies in the way that the command -// line arguments are found, single arguments that have spaces in the will actually be -// reported as two separate items. In order to do something better CGO would be needed -// to use the native darwin functions. -func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) { - r, err := callPsWithContext(ctx, "command", p.Pid, false, false) - if err != nil { - return nil, err - } - return r[0], err -} - -func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { - r, err := callPsWithContext(ctx, "utime,stime", p.Pid, true, false) - if err != nil { - return 0, err - } - return int32(len(r)), nil -} - -func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) { - r, err := callPsWithContext(ctx, "utime,stime", p.Pid, false, false) - if err != nil { - return nil, err - } - - utime, err := convertCPUTimes(r[0][0]) - if err != nil { - return nil, err - } - stime, err := convertCPUTimes(r[0][1]) - if err != nil { - return nil, err - } - - ret := &cpu.TimesStat{ - CPU: "cpu", - User: utime, - System: stime, - } - return ret, nil -} - -func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) { - r, err := callPsWithContext(ctx, "rss,vsize,pagein", p.Pid, false, false) - if err != nil { - return nil, err - } - rss, err := strconv.Atoi(r[0][0]) - if err != nil { - return nil, err - } - vms, err := strconv.Atoi(r[0][1]) - if err != nil { - return nil, err - } - pagein, err := strconv.Atoi(r[0][2]) - if err != nil { - return nil, err - } - - ret := &MemoryInfoStat{ - RSS: uint64(rss) * 1024, - VMS: uint64(vms) * 1024, - Swap: uint64(pagein), - } - - return ret, nil -} diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_amd64.go b/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_amd64.go deleted file mode 100644 index 560e627d249..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_amd64.go +++ /dev/null @@ -1,192 +0,0 @@ -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs types_freebsd.go - -package process - -const ( - CTLKern = 1 - KernProc = 14 - KernProcPID = 1 - KernProcProc = 8 - KernProcPathname = 12 - KernProcArgs = 7 -) - -const ( - sizeofPtr = 0x8 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x8 - sizeofLongLong = 0x8 -) - -const ( - sizeOfKinfoVmentry = 0x488 - sizeOfKinfoProc = 0x440 -) - -const ( - SIDL = 1 - SRUN = 2 - SSLEEP = 3 - SSTOP = 4 - SZOMB = 5 - SWAIT = 6 - SLOCK = 7 -) - -type ( - _C_short int16 - _C_int int32 - _C_long int64 - _C_long_long int64 -) - -type Timespec struct { - Sec int64 - Nsec int64 -} - -type Timeval struct { - Sec int64 - Usec int64 -} - -type Rusage struct { - Utime Timeval - Stime Timeval - Maxrss int64 - Ixrss int64 - Idrss int64 - Isrss int64 - Minflt int64 - Majflt int64 - Nswap int64 - Inblock int64 - Oublock int64 - Msgsnd int64 - Msgrcv int64 - Nsignals int64 - Nvcsw int64 - Nivcsw int64 -} - -type Rlimit struct { - Cur int64 - Max int64 -} - -type KinfoProc struct { - Structsize int32 - Layout int32 - Args int64 /* pargs */ - Paddr int64 /* proc */ - Addr int64 /* user */ - Tracep int64 /* vnode */ - Textvp int64 /* vnode */ - Fd int64 /* filedesc */ - Vmspace int64 /* vmspace */ - Wchan int64 - Pid int32 - Ppid int32 - Pgid int32 - Tpgid int32 - Sid int32 - Tsid int32 - Jobc int16 - Spare_short1 int16 - Tdev uint32 - Siglist [16]byte /* sigset */ - Sigmask [16]byte /* sigset */ - Sigignore [16]byte /* sigset */ - Sigcatch [16]byte /* sigset */ - Uid uint32 - Ruid uint32 - Svuid uint32 - Rgid uint32 - Svgid uint32 - Ngroups int16 - Spare_short2 int16 - Groups [16]uint32 - Size uint64 - Rssize int64 - Swrss int64 - Tsize int64 - Dsize int64 - Ssize int64 - Xstat uint16 - Acflag uint16 - Pctcpu uint32 - Estcpu uint32 - Slptime uint32 - Swtime uint32 - Cow uint32 - Runtime uint64 - Start Timeval - Childtime Timeval - Flag int64 - Kiflag int64 - Traceflag int32 - Stat int8 - Nice int8 - Lock int8 - Rqindex int8 - Oncpu uint8 - Lastcpu uint8 - Tdname [17]int8 - Wmesg [9]int8 - Login [18]int8 - Lockname [9]int8 - Comm [20]int8 - Emul [17]int8 - Loginclass [18]int8 - Sparestrings [50]int8 - Spareints [7]int32 - Flag2 int32 - Fibnum int32 - Cr_flags uint32 - Jid int32 - Numthreads int32 - Tid int32 - Pri Priority - Rusage Rusage - Rusage_ch Rusage - Pcb int64 /* pcb */ - Kstack int64 - Udata int64 - Tdaddr int64 /* thread */ - Spareptrs [6]int64 - Sparelongs [12]int64 - Sflag int64 - Tdflags int64 -} - -type Priority struct { - Class uint8 - Level uint8 - Native uint8 - User uint8 -} - -type KinfoVmentry struct { - Structsize int32 - Type int32 - Start uint64 - End uint64 - Offset uint64 - Vn_fileid uint64 - Vn_fsid uint32 - Flags int32 - Resident int32 - Private_resident int32 - Protection int32 - Ref_count int32 - Shadow_count int32 - Vn_type int32 - Vn_size uint64 - Vn_rdev uint32 - Vn_mode uint16 - Status uint16 - X_kve_ispare [12]int32 - Path [1024]int8 -} diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_posix.go b/vendor/github.com/shirou/gopsutil/v3/process/process_posix.go deleted file mode 100644 index a01f9ecfc0d..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_posix.go +++ /dev/null @@ -1,185 +0,0 @@ -//go:build linux || freebsd || openbsd || darwin || solaris -// +build linux freebsd openbsd darwin solaris - -package process - -import ( - "context" - "errors" - "fmt" - "os" - "os/user" - "path/filepath" - "strconv" - "strings" - "syscall" - - "golang.org/x/sys/unix" - - "github.com/shirou/gopsutil/v3/internal/common" -) - -type Signal = syscall.Signal - -// POSIX -func getTerminalMap() (map[uint64]string, error) { - ret := make(map[uint64]string) - var termfiles []string - - d, err := os.Open("/dev") - if err != nil { - return nil, err - } - defer d.Close() - - devnames, err := d.Readdirnames(-1) - if err != nil { - return nil, err - } - for _, devname := range devnames { - if strings.HasPrefix(devname, "/dev/tty") { - termfiles = append(termfiles, "/dev/tty/"+devname) - } - } - - var ptsnames []string - ptsd, err := os.Open("/dev/pts") - if err != nil { - ptsnames, _ = filepath.Glob("/dev/ttyp*") - if ptsnames == nil { - return nil, err - } - } - defer ptsd.Close() - - if ptsnames == nil { - defer ptsd.Close() - ptsnames, err = ptsd.Readdirnames(-1) - if err != nil { - return nil, err - } - for _, ptsname := range ptsnames { - termfiles = append(termfiles, "/dev/pts/"+ptsname) - } - } else { - termfiles = ptsnames - } - - for _, name := range termfiles { - stat := unix.Stat_t{} - if err = unix.Stat(name, &stat); err != nil { - return nil, err - } - rdev := uint64(stat.Rdev) - ret[rdev] = strings.Replace(name, "/dev", "", -1) - } - return ret, nil -} - -// isMount is a port of python's os.path.ismount() -// https://github.com/python/cpython/blob/08ff4369afca84587b1c82034af4e9f64caddbf2/Lib/posixpath.py#L186-L216 -// https://docs.python.org/3/library/os.path.html#os.path.ismount -func isMount(path string) bool { - // Check symlinkness with os.Lstat; unix.DT_LNK is not portable - fileInfo, err := os.Lstat(path) - if err != nil { - return false - } - if fileInfo.Mode()&os.ModeSymlink != 0 { - return false - } - var stat1 unix.Stat_t - if err := unix.Lstat(path, &stat1); err != nil { - return false - } - parent := filepath.Join(path, "..") - var stat2 unix.Stat_t - if err := unix.Lstat(parent, &stat2); err != nil { - return false - } - return stat1.Dev != stat2.Dev || stat1.Ino == stat2.Ino -} - -func PidExistsWithContext(ctx context.Context, pid int32) (bool, error) { - if pid <= 0 { - return false, fmt.Errorf("invalid pid %v", pid) - } - proc, err := os.FindProcess(int(pid)) - if err != nil { - return false, err - } - - if isMount(common.HostProcWithContext(ctx)) { // if //proc exists and is mounted, check if //proc/ folder exists - _, err := os.Stat(common.HostProcWithContext(ctx, strconv.Itoa(int(pid)))) - if os.IsNotExist(err) { - return false, nil - } - return err == nil, err - } - - // procfs does not exist or is not mounted, check PID existence by signalling the pid - err = proc.Signal(syscall.Signal(0)) - if err == nil { - return true, nil - } - if errors.Is(err, os.ErrProcessDone) { - return false, nil - } - var errno syscall.Errno - if !errors.As(err, &errno) { - return false, err - } - switch errno { - case syscall.ESRCH: - return false, nil - case syscall.EPERM: - return true, nil - } - - return false, err -} - -func (p *Process) SendSignalWithContext(ctx context.Context, sig syscall.Signal) error { - process, err := os.FindProcess(int(p.Pid)) - if err != nil { - return err - } - - err = process.Signal(sig) - if err != nil { - return err - } - - return nil -} - -func (p *Process) SuspendWithContext(ctx context.Context) error { - return p.SendSignalWithContext(ctx, unix.SIGSTOP) -} - -func (p *Process) ResumeWithContext(ctx context.Context) error { - return p.SendSignalWithContext(ctx, unix.SIGCONT) -} - -func (p *Process) TerminateWithContext(ctx context.Context) error { - return p.SendSignalWithContext(ctx, unix.SIGTERM) -} - -func (p *Process) KillWithContext(ctx context.Context) error { - return p.SendSignalWithContext(ctx, unix.SIGKILL) -} - -func (p *Process) UsernameWithContext(ctx context.Context) (string, error) { - uids, err := p.UidsWithContext(ctx) - if err != nil { - return "", err - } - if len(uids) > 0 { - u, err := user.LookupId(strconv.Itoa(int(uids[0]))) - if err != nil { - return "", err - } - return u.Username, nil - } - return "", nil -} diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_windows.go b/vendor/github.com/shirou/gopsutil/v3/process/process_windows.go deleted file mode 100644 index f2053d98553..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_windows.go +++ /dev/null @@ -1,1165 +0,0 @@ -//go:build windows -// +build windows - -package process - -import ( - "bufio" - "context" - "errors" - "fmt" - "io" - "os" - "path/filepath" - "reflect" - "strings" - "syscall" - "time" - "unicode/utf16" - "unsafe" - - "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" - "github.com/shirou/gopsutil/v3/net" - "golang.org/x/sys/windows" -) - -type Signal = syscall.Signal - -var ( - modntdll = windows.NewLazySystemDLL("ntdll.dll") - procNtResumeProcess = modntdll.NewProc("NtResumeProcess") - procNtSuspendProcess = modntdll.NewProc("NtSuspendProcess") - - modpsapi = windows.NewLazySystemDLL("psapi.dll") - procGetProcessMemoryInfo = modpsapi.NewProc("GetProcessMemoryInfo") - procGetProcessImageFileNameW = modpsapi.NewProc("GetProcessImageFileNameW") - - advapi32 = windows.NewLazySystemDLL("advapi32.dll") - procLookupPrivilegeValue = advapi32.NewProc("LookupPrivilegeValueW") - procAdjustTokenPrivileges = advapi32.NewProc("AdjustTokenPrivileges") - - procQueryFullProcessImageNameW = common.Modkernel32.NewProc("QueryFullProcessImageNameW") - procGetPriorityClass = common.Modkernel32.NewProc("GetPriorityClass") - procGetProcessIoCounters = common.Modkernel32.NewProc("GetProcessIoCounters") - procGetNativeSystemInfo = common.Modkernel32.NewProc("GetNativeSystemInfo") - - processorArchitecture uint -) - -const processQueryInformation = windows.PROCESS_QUERY_LIMITED_INFORMATION - -type systemProcessorInformation struct { - ProcessorArchitecture uint16 - ProcessorLevel uint16 - ProcessorRevision uint16 - Reserved uint16 - ProcessorFeatureBits uint16 -} - -type systemInfo struct { - wProcessorArchitecture uint16 - wReserved uint16 - dwpageSize uint32 - lpMinimumApplicationAddress uintptr - lpMaximumApplicationAddress uintptr - dwActiveProcessorMask uintptr - dwNumberOfProcessors uint32 - dwProcessorType uint32 - dwAllocationGranularity uint32 - wProcessorLevel uint16 - wProcessorRevision uint16 -} - -// Memory_info_ex is different between OSes -type MemoryInfoExStat struct{} - -type MemoryMapsStat struct{} - -// ioCounters is an equivalent representation of IO_COUNTERS in the Windows API. -// https://docs.microsoft.com/windows/win32/api/winnt/ns-winnt-io_counters -type ioCounters struct { - ReadOperationCount uint64 - WriteOperationCount uint64 - OtherOperationCount uint64 - ReadTransferCount uint64 - WriteTransferCount uint64 - OtherTransferCount uint64 -} - -type processBasicInformation32 struct { - Reserved1 uint32 - PebBaseAddress uint32 - Reserved2 uint32 - Reserved3 uint32 - UniqueProcessId uint32 - Reserved4 uint32 -} - -type processBasicInformation64 struct { - Reserved1 uint64 - PebBaseAddress uint64 - Reserved2 uint64 - Reserved3 uint64 - UniqueProcessId uint64 - Reserved4 uint64 -} - -type processEnvironmentBlock32 struct { - Reserved1 [2]uint8 - BeingDebugged uint8 - Reserved2 uint8 - Reserved3 [2]uint32 - Ldr uint32 - ProcessParameters uint32 - // More fields which we don't use so far -} - -type processEnvironmentBlock64 struct { - Reserved1 [2]uint8 - BeingDebugged uint8 - Reserved2 uint8 - _ [4]uint8 // padding, since we are 64 bit, the next pointer is 64 bit aligned (when compiling for 32 bit, this is not the case without manual padding) - Reserved3 [2]uint64 - Ldr uint64 - ProcessParameters uint64 - // More fields which we don't use so far -} - -type rtlUserProcessParameters32 struct { - Reserved1 [16]uint8 - ConsoleHandle uint32 - ConsoleFlags uint32 - StdInputHandle uint32 - StdOutputHandle uint32 - StdErrorHandle uint32 - CurrentDirectoryPathNameLength uint16 - _ uint16 // Max Length - CurrentDirectoryPathAddress uint32 - CurrentDirectoryHandle uint32 - DllPathNameLength uint16 - _ uint16 // Max Length - DllPathAddress uint32 - ImagePathNameLength uint16 - _ uint16 // Max Length - ImagePathAddress uint32 - CommandLineLength uint16 - _ uint16 // Max Length - CommandLineAddress uint32 - EnvironmentAddress uint32 - // More fields which we don't use so far -} - -type rtlUserProcessParameters64 struct { - Reserved1 [16]uint8 - ConsoleHandle uint64 - ConsoleFlags uint64 - StdInputHandle uint64 - StdOutputHandle uint64 - StdErrorHandle uint64 - CurrentDirectoryPathNameLength uint16 - _ uint16 // Max Length - _ uint32 // Padding - CurrentDirectoryPathAddress uint64 - CurrentDirectoryHandle uint64 - DllPathNameLength uint16 - _ uint16 // Max Length - _ uint32 // Padding - DllPathAddress uint64 - ImagePathNameLength uint16 - _ uint16 // Max Length - _ uint32 // Padding - ImagePathAddress uint64 - CommandLineLength uint16 - _ uint16 // Max Length - _ uint32 // Padding - CommandLineAddress uint64 - EnvironmentAddress uint64 - // More fields which we don't use so far -} - -type winLUID struct { - LowPart winDWord - HighPart winLong -} - -// LUID_AND_ATTRIBUTES -type winLUIDAndAttributes struct { - Luid winLUID - Attributes winDWord -} - -// TOKEN_PRIVILEGES -type winTokenPrivileges struct { - PrivilegeCount winDWord - Privileges [1]winLUIDAndAttributes -} - -type ( - winLong int32 - winDWord uint32 -) - -func init() { - var systemInfo systemInfo - - procGetNativeSystemInfo.Call(uintptr(unsafe.Pointer(&systemInfo))) - processorArchitecture = uint(systemInfo.wProcessorArchitecture) - - // enable SeDebugPrivilege https://github.com/midstar/proci/blob/6ec79f57b90ba3d9efa2a7b16ef9c9369d4be875/proci_windows.go#L80-L119 - handle, err := syscall.GetCurrentProcess() - if err != nil { - return - } - - var token syscall.Token - err = syscall.OpenProcessToken(handle, 0x0028, &token) - if err != nil { - return - } - defer token.Close() - - tokenPrivileges := winTokenPrivileges{PrivilegeCount: 1} - lpName := syscall.StringToUTF16("SeDebugPrivilege") - ret, _, _ := procLookupPrivilegeValue.Call( - 0, - uintptr(unsafe.Pointer(&lpName[0])), - uintptr(unsafe.Pointer(&tokenPrivileges.Privileges[0].Luid))) - if ret == 0 { - return - } - - tokenPrivileges.Privileges[0].Attributes = 0x00000002 // SE_PRIVILEGE_ENABLED - - procAdjustTokenPrivileges.Call( - uintptr(token), - 0, - uintptr(unsafe.Pointer(&tokenPrivileges)), - uintptr(unsafe.Sizeof(tokenPrivileges)), - 0, - 0) -} - -func pidsWithContext(ctx context.Context) ([]int32, error) { - // inspired by https://gist.github.com/henkman/3083408 - // and https://github.com/giampaolo/psutil/blob/1c3a15f637521ba5c0031283da39c733fda53e4c/psutil/arch/windows/process_info.c#L315-L329 - var ret []int32 - var read uint32 = 0 - var psSize uint32 = 1024 - const dwordSize uint32 = 4 - - for { - ps := make([]uint32, psSize) - if err := windows.EnumProcesses(ps, &read); err != nil { - return nil, err - } - if uint32(len(ps)) == read/dwordSize { // ps buffer was too small to host every results, retry with a bigger one - psSize += 1024 - continue - } - for _, pid := range ps[:read/dwordSize] { - ret = append(ret, int32(pid)) - } - return ret, nil - - } -} - -func PidExistsWithContext(ctx context.Context, pid int32) (bool, error) { - if pid == 0 { // special case for pid 0 System Idle Process - return true, nil - } - if pid < 0 { - return false, fmt.Errorf("invalid pid %v", pid) - } - if pid%4 != 0 { - // OpenProcess will succeed even on non-existing pid here https://devblogs.microsoft.com/oldnewthing/20080606-00/?p=22043 - // so we list every pid just to be sure and be future-proof - pids, err := PidsWithContext(ctx) - if err != nil { - return false, err - } - for _, i := range pids { - if i == pid { - return true, err - } - } - return false, err - } - h, err := windows.OpenProcess(windows.SYNCHRONIZE, false, uint32(pid)) - if err == windows.ERROR_ACCESS_DENIED { - return true, nil - } - if err == windows.ERROR_INVALID_PARAMETER { - return false, nil - } - if err != nil { - return false, err - } - defer windows.CloseHandle(h) - event, err := windows.WaitForSingleObject(h, 0) - return event == uint32(windows.WAIT_TIMEOUT), err -} - -func (p *Process) PpidWithContext(ctx context.Context) (int32, error) { - // if cached already, return from cache - cachedPpid := p.getPpid() - if cachedPpid != 0 { - return cachedPpid, nil - } - - ppid, _, _, err := getFromSnapProcess(p.Pid) - if err != nil { - return 0, err - } - - // no errors and not cached already, so cache it - p.setPpid(ppid) - - return ppid, nil -} - -func (p *Process) NameWithContext(ctx context.Context) (string, error) { - if p.Pid == 0 { - return "System Idle Process", nil - } - if p.Pid == 4 { - return "System", nil - } - - exe, err := p.ExeWithContext(ctx) - if err != nil { - return "", fmt.Errorf("could not get Name: %s", err) - } - - return filepath.Base(exe), nil -} - -func (p *Process) TgidWithContext(ctx context.Context) (int32, error) { - return 0, common.ErrNotImplementedError -} - -func (p *Process) ExeWithContext(ctx context.Context) (string, error) { - c, err := windows.OpenProcess(processQueryInformation, false, uint32(p.Pid)) - if err != nil { - return "", err - } - defer windows.CloseHandle(c) - buf := make([]uint16, syscall.MAX_LONG_PATH) - size := uint32(syscall.MAX_LONG_PATH) - if err := procQueryFullProcessImageNameW.Find(); err == nil { // Vista+ - ret, _, err := procQueryFullProcessImageNameW.Call( - uintptr(c), - uintptr(0), - uintptr(unsafe.Pointer(&buf[0])), - uintptr(unsafe.Pointer(&size))) - if ret == 0 { - return "", err - } - return windows.UTF16ToString(buf[:]), nil - } - // XP fallback - ret, _, err := procGetProcessImageFileNameW.Call(uintptr(c), uintptr(unsafe.Pointer(&buf[0])), uintptr(size)) - if ret == 0 { - return "", err - } - return common.ConvertDOSPath(windows.UTF16ToString(buf[:])), nil -} - -func (p *Process) CmdlineWithContext(_ context.Context) (string, error) { - cmdline, err := getProcessCommandLine(p.Pid) - if err != nil { - return "", fmt.Errorf("could not get CommandLine: %s", err) - } - return cmdline, nil -} - -func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) { - cmdline, err := p.CmdlineWithContext(ctx) - if err != nil { - return nil, err - } - return strings.Split(cmdline, " "), nil -} - -func (p *Process) createTimeWithContext(ctx context.Context) (int64, error) { - ru, err := getRusage(p.Pid) - if err != nil { - return 0, fmt.Errorf("could not get CreationDate: %s", err) - } - - return ru.CreationTime.Nanoseconds() / 1000000, nil -} - -func (p *Process) CwdWithContext(_ context.Context) (string, error) { - h, err := windows.OpenProcess(processQueryInformation|windows.PROCESS_VM_READ, false, uint32(p.Pid)) - if err == windows.ERROR_ACCESS_DENIED || err == windows.ERROR_INVALID_PARAMETER { - return "", nil - } - if err != nil { - return "", err - } - defer syscall.CloseHandle(syscall.Handle(h)) - - procIs32Bits := is32BitProcess(h) - - if procIs32Bits { - userProcParams, err := getUserProcessParams32(h) - if err != nil { - return "", err - } - if userProcParams.CurrentDirectoryPathNameLength > 0 { - cwd := readProcessMemory(syscall.Handle(h), procIs32Bits, uint64(userProcParams.CurrentDirectoryPathAddress), uint(userProcParams.CurrentDirectoryPathNameLength)) - if len(cwd) != int(userProcParams.CurrentDirectoryPathNameLength) { - return "", errors.New("cannot read current working directory") - } - - return convertUTF16ToString(cwd), nil - } - } else { - userProcParams, err := getUserProcessParams64(h) - if err != nil { - return "", err - } - if userProcParams.CurrentDirectoryPathNameLength > 0 { - cwd := readProcessMemory(syscall.Handle(h), procIs32Bits, userProcParams.CurrentDirectoryPathAddress, uint(userProcParams.CurrentDirectoryPathNameLength)) - if len(cwd) != int(userProcParams.CurrentDirectoryPathNameLength) { - return "", errors.New("cannot read current working directory") - } - - return convertUTF16ToString(cwd), nil - } - } - - // if we reach here, we have no cwd - return "", nil -} - -func (p *Process) StatusWithContext(ctx context.Context) ([]string, error) { - return []string{""}, common.ErrNotImplementedError -} - -func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { - return false, common.ErrNotImplementedError -} - -func (p *Process) UsernameWithContext(ctx context.Context) (string, error) { - pid := p.Pid - c, err := windows.OpenProcess(processQueryInformation, false, uint32(pid)) - if err != nil { - return "", err - } - defer windows.CloseHandle(c) - - var token syscall.Token - err = syscall.OpenProcessToken(syscall.Handle(c), syscall.TOKEN_QUERY, &token) - if err != nil { - return "", err - } - defer token.Close() - tokenUser, err := token.GetTokenUser() - if err != nil { - return "", err - } - - user, domain, _, err := tokenUser.User.Sid.LookupAccount("") - return domain + "\\" + user, err -} - -func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) TerminalWithContext(ctx context.Context) (string, error) { - return "", common.ErrNotImplementedError -} - -// priorityClasses maps a win32 priority class to its WMI equivalent Win32_Process.Priority -// https://docs.microsoft.com/en-us/windows/desktop/api/processthreadsapi/nf-processthreadsapi-getpriorityclass -// https://docs.microsoft.com/en-us/windows/desktop/cimwin32prov/win32-process -var priorityClasses = map[int]int32{ - 0x00008000: 10, // ABOVE_NORMAL_PRIORITY_CLASS - 0x00004000: 6, // BELOW_NORMAL_PRIORITY_CLASS - 0x00000080: 13, // HIGH_PRIORITY_CLASS - 0x00000040: 4, // IDLE_PRIORITY_CLASS - 0x00000020: 8, // NORMAL_PRIORITY_CLASS - 0x00000100: 24, // REALTIME_PRIORITY_CLASS -} - -func (p *Process) NiceWithContext(ctx context.Context) (int32, error) { - c, err := windows.OpenProcess(processQueryInformation, false, uint32(p.Pid)) - if err != nil { - return 0, err - } - defer windows.CloseHandle(c) - ret, _, err := procGetPriorityClass.Call(uintptr(c)) - if ret == 0 { - return 0, err - } - priority, ok := priorityClasses[int(ret)] - if !ok { - return 0, fmt.Errorf("unknown priority class %v", ret) - } - return priority, nil -} - -func (p *Process) IOniceWithContext(ctx context.Context) (int32, error) { - return 0, common.ErrNotImplementedError -} - -func (p *Process) RlimitWithContext(ctx context.Context) ([]RlimitStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) RlimitUsageWithContext(ctx context.Context, gatherUsed bool) ([]RlimitStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) { - c, err := windows.OpenProcess(processQueryInformation, false, uint32(p.Pid)) - if err != nil { - return nil, err - } - defer windows.CloseHandle(c) - var ioCounters ioCounters - ret, _, err := procGetProcessIoCounters.Call(uintptr(c), uintptr(unsafe.Pointer(&ioCounters))) - if ret == 0 { - return nil, err - } - stats := &IOCountersStat{ - ReadCount: ioCounters.ReadOperationCount, - ReadBytes: ioCounters.ReadTransferCount, - WriteCount: ioCounters.WriteOperationCount, - WriteBytes: ioCounters.WriteTransferCount, - } - - return stats, nil -} - -func (p *Process) NumCtxSwitchesWithContext(ctx context.Context) (*NumCtxSwitchesStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) NumFDsWithContext(ctx context.Context) (int32, error) { - return 0, common.ErrNotImplementedError -} - -func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { - ppid, ret, _, err := getFromSnapProcess(p.Pid) - if err != nil { - return 0, err - } - - // if no errors and not cached already, cache ppid - p.parent = ppid - if 0 == p.getPpid() { - p.setPpid(ppid) - } - - return ret, nil -} - -func (p *Process) ThreadsWithContext(ctx context.Context) (map[int32]*cpu.TimesStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) { - sysTimes, err := getProcessCPUTimes(p.Pid) - if err != nil { - return nil, err - } - - // User and kernel times are represented as a FILETIME structure - // which contains a 64-bit value representing the number of - // 100-nanosecond intervals since January 1, 1601 (UTC): - // http://msdn.microsoft.com/en-us/library/ms724284(VS.85).aspx - // To convert it into a float representing the seconds that the - // process has executed in user/kernel mode I borrowed the code - // below from psutil's _psutil_windows.c, and in turn from Python's - // Modules/posixmodule.c - - user := float64(sysTimes.UserTime.HighDateTime)*429.4967296 + float64(sysTimes.UserTime.LowDateTime)*1e-7 - kernel := float64(sysTimes.KernelTime.HighDateTime)*429.4967296 + float64(sysTimes.KernelTime.LowDateTime)*1e-7 - - return &cpu.TimesStat{ - User: user, - System: kernel, - }, nil -} - -func (p *Process) CPUAffinityWithContext(ctx context.Context) ([]int32, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) { - mem, err := getMemoryInfo(p.Pid) - if err != nil { - return nil, err - } - - ret := &MemoryInfoStat{ - RSS: uint64(mem.WorkingSetSize), - VMS: uint64(mem.PagefileUsage), - } - - return ret, nil -} - -func (p *Process) MemoryInfoExWithContext(ctx context.Context) (*MemoryInfoExStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) PageFaultsWithContext(ctx context.Context) (*PageFaultsStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { - out := []*Process{} - snap, err := windows.CreateToolhelp32Snapshot(windows.TH32CS_SNAPPROCESS, uint32(0)) - if err != nil { - return out, err - } - defer windows.CloseHandle(snap) - var pe32 windows.ProcessEntry32 - pe32.Size = uint32(unsafe.Sizeof(pe32)) - if err := windows.Process32First(snap, &pe32); err != nil { - return out, err - } - for { - if pe32.ParentProcessID == uint32(p.Pid) { - p, err := NewProcessWithContext(ctx, int32(pe32.ProcessID)) - if err == nil { - out = append(out, p) - } - } - if err = windows.Process32Next(snap, &pe32); err != nil { - break - } - } - return out, nil -} - -func (p *Process) OpenFilesWithContext(ctx context.Context) ([]OpenFilesStat, error) { - files := make([]OpenFilesStat, 0) - fileExists := make(map[string]bool) - - process, err := windows.OpenProcess(common.ProcessQueryInformation, false, uint32(p.Pid)) - if err != nil { - return nil, err - } - - buffer := make([]byte, 1024) - var size uint32 - - st := common.CallWithExpandingBuffer( - func() common.NtStatus { - return common.NtQuerySystemInformation( - common.SystemExtendedHandleInformationClass, - &buffer[0], - uint32(len(buffer)), - &size, - ) - }, - &buffer, - &size, - ) - if st.IsError() { - return nil, st.Error() - } - - handlesList := (*common.SystemExtendedHandleInformation)(unsafe.Pointer(&buffer[0])) - handles := make([]common.SystemExtendedHandleTableEntryInformation, int(handlesList.NumberOfHandles)) - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&handles)) - hdr.Data = uintptr(unsafe.Pointer(&handlesList.Handles[0])) - - currentProcess, err := windows.GetCurrentProcess() - if err != nil { - return nil, err - } - - for _, handle := range handles { - var file uintptr - if int32(handle.UniqueProcessId) != p.Pid { - continue - } - if windows.DuplicateHandle(process, windows.Handle(handle.HandleValue), currentProcess, (*windows.Handle)(&file), - 0, true, windows.DUPLICATE_SAME_ACCESS) != nil { - continue - } - // release the new handle - defer windows.CloseHandle(windows.Handle(file)) - - fileType, err := windows.GetFileType(windows.Handle(file)) - if err != nil || fileType != windows.FILE_TYPE_DISK { - continue - } - - var fileName string - ch := make(chan struct{}) - - go func() { - var buf [syscall.MAX_LONG_PATH]uint16 - n, err := windows.GetFinalPathNameByHandle(windows.Handle(file), &buf[0], syscall.MAX_LONG_PATH, 0) - if err != nil { - return - } - - fileName = string(utf16.Decode(buf[:n])) - ch <- struct{}{} - }() - - select { - case <-time.NewTimer(100 * time.Millisecond).C: - continue - case <-ch: - fileInfo, err := os.Stat(fileName) - if err != nil || fileInfo.IsDir() { - continue - } - - if _, exists := fileExists[fileName]; !exists { - files = append(files, OpenFilesStat{ - Path: fileName, - Fd: uint64(file), - }) - fileExists[fileName] = true - } - case <-ctx.Done(): - return files, ctx.Err() - } - } - - return files, nil -} - -func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionStat, error) { - return net.ConnectionsPidWithContext(ctx, "all", p.Pid) -} - -func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]MemoryMapsStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) SendSignalWithContext(ctx context.Context, sig syscall.Signal) error { - return common.ErrNotImplementedError -} - -func (p *Process) SuspendWithContext(ctx context.Context) error { - c, err := windows.OpenProcess(windows.PROCESS_SUSPEND_RESUME, false, uint32(p.Pid)) - if err != nil { - return err - } - defer windows.CloseHandle(c) - - r1, _, _ := procNtSuspendProcess.Call(uintptr(c)) - if r1 != 0 { - // See https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-erref/596a1078-e883-4972-9bbc-49e60bebca55 - return fmt.Errorf("NtStatus='0x%.8X'", r1) - } - - return nil -} - -func (p *Process) ResumeWithContext(ctx context.Context) error { - c, err := windows.OpenProcess(windows.PROCESS_SUSPEND_RESUME, false, uint32(p.Pid)) - if err != nil { - return err - } - defer windows.CloseHandle(c) - - r1, _, _ := procNtResumeProcess.Call(uintptr(c)) - if r1 != 0 { - // See https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-erref/596a1078-e883-4972-9bbc-49e60bebca55 - return fmt.Errorf("NtStatus='0x%.8X'", r1) - } - - return nil -} - -func (p *Process) TerminateWithContext(ctx context.Context) error { - proc, err := windows.OpenProcess(windows.PROCESS_TERMINATE, false, uint32(p.Pid)) - if err != nil { - return err - } - err = windows.TerminateProcess(proc, 0) - windows.CloseHandle(proc) - return err -} - -func (p *Process) KillWithContext(ctx context.Context) error { - process, err := os.FindProcess(int(p.Pid)) - if err != nil { - return err - } - return process.Kill() -} - -func (p *Process) EnvironWithContext(ctx context.Context) ([]string, error) { - envVars, err := getProcessEnvironmentVariables(p.Pid, ctx) - if err != nil { - return nil, fmt.Errorf("could not get environment variables: %s", err) - } - return envVars, nil -} - -// retrieve Ppid in a thread-safe manner -func (p *Process) getPpid() int32 { - p.parentMutex.RLock() - defer p.parentMutex.RUnlock() - return p.parent -} - -// cache Ppid in a thread-safe manner (WINDOWS ONLY) -// see https://psutil.readthedocs.io/en/latest/#psutil.Process.ppid -func (p *Process) setPpid(ppid int32) { - p.parentMutex.Lock() - defer p.parentMutex.Unlock() - p.parent = ppid -} - -func getFromSnapProcess(pid int32) (int32, int32, string, error) { - snap, err := windows.CreateToolhelp32Snapshot(windows.TH32CS_SNAPPROCESS, uint32(pid)) - if err != nil { - return 0, 0, "", err - } - defer windows.CloseHandle(snap) - var pe32 windows.ProcessEntry32 - pe32.Size = uint32(unsafe.Sizeof(pe32)) - if err = windows.Process32First(snap, &pe32); err != nil { - return 0, 0, "", err - } - for { - if pe32.ProcessID == uint32(pid) { - szexe := windows.UTF16ToString(pe32.ExeFile[:]) - return int32(pe32.ParentProcessID), int32(pe32.Threads), szexe, nil - } - if err = windows.Process32Next(snap, &pe32); err != nil { - break - } - } - return 0, 0, "", fmt.Errorf("couldn't find pid: %d", pid) -} - -func ProcessesWithContext(ctx context.Context) ([]*Process, error) { - out := []*Process{} - - pids, err := PidsWithContext(ctx) - if err != nil { - return out, fmt.Errorf("could not get Processes %s", err) - } - - for _, pid := range pids { - p, err := NewProcessWithContext(ctx, pid) - if err != nil { - continue - } - out = append(out, p) - } - - return out, nil -} - -func getRusage(pid int32) (*windows.Rusage, error) { - var CPU windows.Rusage - - c, err := windows.OpenProcess(processQueryInformation, false, uint32(pid)) - if err != nil { - return nil, err - } - defer windows.CloseHandle(c) - - if err := windows.GetProcessTimes(c, &CPU.CreationTime, &CPU.ExitTime, &CPU.KernelTime, &CPU.UserTime); err != nil { - return nil, err - } - - return &CPU, nil -} - -func getMemoryInfo(pid int32) (PROCESS_MEMORY_COUNTERS, error) { - var mem PROCESS_MEMORY_COUNTERS - c, err := windows.OpenProcess(processQueryInformation, false, uint32(pid)) - if err != nil { - return mem, err - } - defer windows.CloseHandle(c) - if err := getProcessMemoryInfo(c, &mem); err != nil { - return mem, err - } - - return mem, err -} - -func getProcessMemoryInfo(h windows.Handle, mem *PROCESS_MEMORY_COUNTERS) (err error) { - r1, _, e1 := syscall.Syscall(procGetProcessMemoryInfo.Addr(), 3, uintptr(h), uintptr(unsafe.Pointer(mem)), uintptr(unsafe.Sizeof(*mem))) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -type SYSTEM_TIMES struct { - CreateTime syscall.Filetime - ExitTime syscall.Filetime - KernelTime syscall.Filetime - UserTime syscall.Filetime -} - -func getProcessCPUTimes(pid int32) (SYSTEM_TIMES, error) { - var times SYSTEM_TIMES - - h, err := windows.OpenProcess(processQueryInformation, false, uint32(pid)) - if err != nil { - return times, err - } - defer windows.CloseHandle(h) - - err = syscall.GetProcessTimes( - syscall.Handle(h), - ×.CreateTime, - ×.ExitTime, - ×.KernelTime, - ×.UserTime, - ) - - return times, err -} - -func getUserProcessParams32(handle windows.Handle) (rtlUserProcessParameters32, error) { - pebAddress, err := queryPebAddress(syscall.Handle(handle), true) - if err != nil { - return rtlUserProcessParameters32{}, fmt.Errorf("cannot locate process PEB: %w", err) - } - - buf := readProcessMemory(syscall.Handle(handle), true, pebAddress, uint(unsafe.Sizeof(processEnvironmentBlock32{}))) - if len(buf) != int(unsafe.Sizeof(processEnvironmentBlock32{})) { - return rtlUserProcessParameters32{}, fmt.Errorf("cannot read process PEB") - } - peb := (*processEnvironmentBlock32)(unsafe.Pointer(&buf[0])) - userProcessAddress := uint64(peb.ProcessParameters) - buf = readProcessMemory(syscall.Handle(handle), true, userProcessAddress, uint(unsafe.Sizeof(rtlUserProcessParameters32{}))) - if len(buf) != int(unsafe.Sizeof(rtlUserProcessParameters32{})) { - return rtlUserProcessParameters32{}, fmt.Errorf("cannot read user process parameters") - } - return *(*rtlUserProcessParameters32)(unsafe.Pointer(&buf[0])), nil -} - -func getUserProcessParams64(handle windows.Handle) (rtlUserProcessParameters64, error) { - pebAddress, err := queryPebAddress(syscall.Handle(handle), false) - if err != nil { - return rtlUserProcessParameters64{}, fmt.Errorf("cannot locate process PEB: %w", err) - } - - buf := readProcessMemory(syscall.Handle(handle), false, pebAddress, uint(unsafe.Sizeof(processEnvironmentBlock64{}))) - if len(buf) != int(unsafe.Sizeof(processEnvironmentBlock64{})) { - return rtlUserProcessParameters64{}, fmt.Errorf("cannot read process PEB") - } - peb := (*processEnvironmentBlock64)(unsafe.Pointer(&buf[0])) - userProcessAddress := peb.ProcessParameters - buf = readProcessMemory(syscall.Handle(handle), false, userProcessAddress, uint(unsafe.Sizeof(rtlUserProcessParameters64{}))) - if len(buf) != int(unsafe.Sizeof(rtlUserProcessParameters64{})) { - return rtlUserProcessParameters64{}, fmt.Errorf("cannot read user process parameters") - } - return *(*rtlUserProcessParameters64)(unsafe.Pointer(&buf[0])), nil -} - -func is32BitProcess(h windows.Handle) bool { - const ( - PROCESSOR_ARCHITECTURE_INTEL = 0 - PROCESSOR_ARCHITECTURE_ARM = 5 - PROCESSOR_ARCHITECTURE_ARM64 = 12 - PROCESSOR_ARCHITECTURE_IA64 = 6 - PROCESSOR_ARCHITECTURE_AMD64 = 9 - ) - - var procIs32Bits bool - switch processorArchitecture { - case PROCESSOR_ARCHITECTURE_INTEL, PROCESSOR_ARCHITECTURE_ARM: - procIs32Bits = true - case PROCESSOR_ARCHITECTURE_ARM64, PROCESSOR_ARCHITECTURE_IA64, PROCESSOR_ARCHITECTURE_AMD64: - var wow64 uint - - ret, _, _ := common.ProcNtQueryInformationProcess.Call( - uintptr(h), - uintptr(common.ProcessWow64Information), - uintptr(unsafe.Pointer(&wow64)), - uintptr(unsafe.Sizeof(wow64)), - uintptr(0), - ) - if int(ret) >= 0 { - if wow64 != 0 { - procIs32Bits = true - } - } else { - // if the OS does not support the call, we fallback into the bitness of the app - if unsafe.Sizeof(wow64) == 4 { - procIs32Bits = true - } - } - - default: - // for other unknown platforms, we rely on process platform - if unsafe.Sizeof(processorArchitecture) == 8 { - procIs32Bits = false - } else { - procIs32Bits = true - } - } - return procIs32Bits -} - -func getProcessEnvironmentVariables(pid int32, ctx context.Context) ([]string, error) { - h, err := windows.OpenProcess(processQueryInformation|windows.PROCESS_VM_READ, false, uint32(pid)) - if err == windows.ERROR_ACCESS_DENIED || err == windows.ERROR_INVALID_PARAMETER { - return nil, nil - } - if err != nil { - return nil, err - } - defer syscall.CloseHandle(syscall.Handle(h)) - - procIs32Bits := is32BitProcess(h) - - var processParameterBlockAddress uint64 - - if procIs32Bits { - peb, err := getUserProcessParams32(h) - if err != nil { - return nil, err - } - processParameterBlockAddress = uint64(peb.EnvironmentAddress) - } else { - peb, err := getUserProcessParams64(h) - if err != nil { - return nil, err - } - processParameterBlockAddress = peb.EnvironmentAddress - } - envvarScanner := bufio.NewScanner(&processReader{ - processHandle: h, - is32BitProcess: procIs32Bits, - offset: processParameterBlockAddress, - }) - envvarScanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) { - if atEOF && len(data) == 0 { - return 0, nil, nil - } - // Check for UTF-16 zero character - for i := 0; i < len(data)-1; i += 2 { - if data[i] == 0 && data[i+1] == 0 { - return i + 2, data[0:i], nil - } - } - if atEOF { - return len(data), data, nil - } - // Request more data - return 0, nil, nil - }) - var envVars []string - for envvarScanner.Scan() { - entry := envvarScanner.Bytes() - if len(entry) == 0 { - break // Block is finished - } - envVars = append(envVars, convertUTF16ToString(entry)) - select { - case <-ctx.Done(): - break - default: - continue - } - } - if err := envvarScanner.Err(); err != nil { - return nil, err - } - return envVars, nil -} - -type processReader struct { - processHandle windows.Handle - is32BitProcess bool - offset uint64 -} - -func (p *processReader) Read(buf []byte) (int, error) { - processMemory := readProcessMemory(syscall.Handle(p.processHandle), p.is32BitProcess, p.offset, uint(len(buf))) - if len(processMemory) == 0 { - return 0, io.EOF - } - copy(buf, processMemory) - p.offset += uint64(len(processMemory)) - return len(processMemory), nil -} - -func getProcessCommandLine(pid int32) (string, error) { - h, err := windows.OpenProcess(processQueryInformation|windows.PROCESS_VM_READ, false, uint32(pid)) - if err == windows.ERROR_ACCESS_DENIED || err == windows.ERROR_INVALID_PARAMETER { - return "", nil - } - if err != nil { - return "", err - } - defer syscall.CloseHandle(syscall.Handle(h)) - - procIs32Bits := is32BitProcess(h) - - if procIs32Bits { - userProcParams, err := getUserProcessParams32(h) - if err != nil { - return "", err - } - if userProcParams.CommandLineLength > 0 { - cmdLine := readProcessMemory(syscall.Handle(h), procIs32Bits, uint64(userProcParams.CommandLineAddress), uint(userProcParams.CommandLineLength)) - if len(cmdLine) != int(userProcParams.CommandLineLength) { - return "", errors.New("cannot read cmdline") - } - - return convertUTF16ToString(cmdLine), nil - } - } else { - userProcParams, err := getUserProcessParams64(h) - if err != nil { - return "", err - } - if userProcParams.CommandLineLength > 0 { - cmdLine := readProcessMemory(syscall.Handle(h), procIs32Bits, userProcParams.CommandLineAddress, uint(userProcParams.CommandLineLength)) - if len(cmdLine) != int(userProcParams.CommandLineLength) { - return "", errors.New("cannot read cmdline") - } - - return convertUTF16ToString(cmdLine), nil - } - } - - // if we reach here, we have no command line - return "", nil -} - -func convertUTF16ToString(src []byte) string { - srcLen := len(src) / 2 - - codePoints := make([]uint16, srcLen) - - srcIdx := 0 - for i := 0; i < srcLen; i++ { - codePoints[i] = uint16(src[srcIdx]) | uint16(src[srcIdx+1])<<8 - srcIdx += 2 - } - return syscall.UTF16ToString(codePoints) -} diff --git a/vendor/github.com/shirou/gopsutil/v3/LICENSE b/vendor/github.com/shirou/gopsutil/v4/LICENSE similarity index 100% rename from vendor/github.com/shirou/gopsutil/v3/LICENSE rename to vendor/github.com/shirou/gopsutil/v4/LICENSE diff --git a/vendor/github.com/shirou/gopsutil/v4/common/env.go b/vendor/github.com/shirou/gopsutil/v4/common/env.go new file mode 100644 index 00000000000..47e471c402f --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/common/env.go @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: BSD-3-Clause +package common + +type EnvKeyType string + +// EnvKey is a context key that can be used to set programmatically the environment +// gopsutil relies on to perform calls against the OS. +// Example of use: +// +// ctx := context.WithValue(context.Background(), common.EnvKey, EnvMap{common.HostProcEnvKey: "/myproc"}) +// avg, err := load.AvgWithContext(ctx) +var EnvKey = EnvKeyType("env") + +const ( + HostProcEnvKey EnvKeyType = "HOST_PROC" + HostSysEnvKey EnvKeyType = "HOST_SYS" + HostEtcEnvKey EnvKeyType = "HOST_ETC" + HostVarEnvKey EnvKeyType = "HOST_VAR" + HostRunEnvKey EnvKeyType = "HOST_RUN" + HostDevEnvKey EnvKeyType = "HOST_DEV" + HostRootEnvKey EnvKeyType = "HOST_ROOT" + HostProcMountinfo EnvKeyType = "HOST_PROC_MOUNTINFO" +) + +type EnvMap map[EnvKeyType]string diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu.go new file mode 100644 index 00000000000..9bc3dfb51af --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu.go @@ -0,0 +1,202 @@ +// SPDX-License-Identifier: BSD-3-Clause +package cpu + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "math" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "github.com/shirou/gopsutil/v4/internal/common" +) + +// TimesStat contains the amounts of time the CPU has spent performing different +// kinds of work. Time units are in seconds. It is based on linux /proc/stat file. +type TimesStat struct { + CPU string `json:"cpu"` + User float64 `json:"user"` + System float64 `json:"system"` + Idle float64 `json:"idle"` + Nice float64 `json:"nice"` + Iowait float64 `json:"iowait"` + Irq float64 `json:"irq"` + Softirq float64 `json:"softirq"` + Steal float64 `json:"steal"` + Guest float64 `json:"guest"` + GuestNice float64 `json:"guestNice"` +} + +type InfoStat struct { + CPU int32 `json:"cpu"` + VendorID string `json:"vendorId"` + Family string `json:"family"` + Model string `json:"model"` + Stepping int32 `json:"stepping"` + PhysicalID string `json:"physicalId"` + CoreID string `json:"coreId"` + Cores int32 `json:"cores"` + ModelName string `json:"modelName"` + Mhz float64 `json:"mhz"` + CacheSize int32 `json:"cacheSize"` + Flags []string `json:"flags"` + Microcode string `json:"microcode"` +} + +type lastPercent struct { + sync.Mutex + lastCPUTimes []TimesStat + lastPerCPUTimes []TimesStat +} + +var ( + lastCPUPercent lastPercent + invoke common.Invoker = common.Invoke{} +) + +func init() { + lastCPUPercent.Lock() + lastCPUPercent.lastCPUTimes, _ = Times(false) + lastCPUPercent.lastPerCPUTimes, _ = Times(true) + lastCPUPercent.Unlock() +} + +// Counts returns the number of physical or logical cores in the system +func Counts(logical bool) (int, error) { + return CountsWithContext(context.Background(), logical) +} + +func (c TimesStat) String() string { + v := []string{ + `"cpu":"` + c.CPU + `"`, + `"user":` + strconv.FormatFloat(c.User, 'f', 1, 64), + `"system":` + strconv.FormatFloat(c.System, 'f', 1, 64), + `"idle":` + strconv.FormatFloat(c.Idle, 'f', 1, 64), + `"nice":` + strconv.FormatFloat(c.Nice, 'f', 1, 64), + `"iowait":` + strconv.FormatFloat(c.Iowait, 'f', 1, 64), + `"irq":` + strconv.FormatFloat(c.Irq, 'f', 1, 64), + `"softirq":` + strconv.FormatFloat(c.Softirq, 'f', 1, 64), + `"steal":` + strconv.FormatFloat(c.Steal, 'f', 1, 64), + `"guest":` + strconv.FormatFloat(c.Guest, 'f', 1, 64), + `"guestNice":` + strconv.FormatFloat(c.GuestNice, 'f', 1, 64), + } + + return `{` + strings.Join(v, ",") + `}` +} + +// Deprecated: Total returns the total number of seconds in a CPUTimesStat +// Please do not use this internal function. +func (c TimesStat) Total() float64 { + total := c.User + c.System + c.Idle + c.Nice + c.Iowait + c.Irq + + c.Softirq + c.Steal + c.Guest + c.GuestNice + + return total +} + +func (c InfoStat) String() string { + s, _ := json.Marshal(c) + return string(s) +} + +func getAllBusy(t TimesStat) (float64, float64) { + tot := t.Total() + if runtime.GOOS == "linux" { + tot -= t.Guest // Linux 2.6.24+ + tot -= t.GuestNice // Linux 3.2.0+ + } + + busy := tot - t.Idle - t.Iowait + + return tot, busy +} + +func calculateBusy(t1, t2 TimesStat) float64 { + t1All, t1Busy := getAllBusy(t1) + t2All, t2Busy := getAllBusy(t2) + + if t2Busy <= t1Busy { + return 0 + } + if t2All <= t1All { + return 100 + } + return math.Min(100, math.Max(0, (t2Busy-t1Busy)/(t2All-t1All)*100)) +} + +func calculateAllBusy(t1, t2 []TimesStat) ([]float64, error) { + // Make sure the CPU measurements have the same length. + if len(t1) != len(t2) { + return nil, fmt.Errorf( + "received two CPU counts: %d != %d", + len(t1), len(t2), + ) + } + + ret := make([]float64, len(t1)) + for i, t := range t2 { + ret[i] = calculateBusy(t1[i], t) + } + return ret, nil +} + +// Percent calculates the percentage of cpu used either per CPU or combined. +// If an interval of 0 is given it will compare the current cpu times against the last call. +// Returns one value per cpu, or a single value if percpu is set to false. +func Percent(interval time.Duration, percpu bool) ([]float64, error) { + return PercentWithContext(context.Background(), interval, percpu) +} + +func PercentWithContext(ctx context.Context, interval time.Duration, percpu bool) ([]float64, error) { + if interval <= 0 { + return percentUsedFromLastCallWithContext(ctx, percpu) + } + + // Get CPU usage at the start of the interval. + cpuTimes1, err := TimesWithContext(ctx, percpu) + if err != nil { + return nil, err + } + + if err := common.Sleep(ctx, interval); err != nil { + return nil, err + } + + // And at the end of the interval. + cpuTimes2, err := TimesWithContext(ctx, percpu) + if err != nil { + return nil, err + } + + return calculateAllBusy(cpuTimes1, cpuTimes2) +} + +func percentUsedFromLastCall(percpu bool) ([]float64, error) { + return percentUsedFromLastCallWithContext(context.Background(), percpu) +} + +func percentUsedFromLastCallWithContext(ctx context.Context, percpu bool) ([]float64, error) { + cpuTimes, err := TimesWithContext(ctx, percpu) + if err != nil { + return nil, err + } + lastCPUPercent.Lock() + defer lastCPUPercent.Unlock() + var lastTimes []TimesStat + if percpu { + lastTimes = lastCPUPercent.lastPerCPUTimes + lastCPUPercent.lastPerCPUTimes = cpuTimes + } else { + lastTimes = lastCPUPercent.lastCPUTimes + lastCPUPercent.lastCPUTimes = cpuTimes + } + + if lastTimes == nil { + return nil, errors.New("error getting times for cpu percent. lastTimes was nil") + } + return calculateAllBusy(lastTimes, cpuTimes) +} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix.go similarity index 85% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix.go index 1439d1d7939..bc766bd4fe9 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build aix -// +build aix package cpu diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_cgo.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix_cgo.go similarity index 96% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_cgo.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix_cgo.go index 9c1e70b1730..559dc5feafd 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_cgo.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix_cgo.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build aix && cgo -// +build aix,cgo package cpu diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix_nocgo.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix_nocgo.go new file mode 100644 index 00000000000..329ef833666 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix_nocgo.go @@ -0,0 +1,157 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build aix && !cgo + +package cpu + +import ( + "context" + "strconv" + "strings" + + "github.com/shirou/gopsutil/v4/internal/common" +) + +func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { + var ret []TimesStat + if percpu { + per_out, err := invoke.CommandWithContext(ctx, "sar", "-u", "-P", "ALL", "10", "1") + if err != nil { + return nil, err + } + lines := strings.Split(string(per_out), "\n") + if len(lines) < 6 { + return []TimesStat{}, common.ErrNotImplementedError + } + + hp := strings.Fields(lines[5]) // headers + for l := 6; l < len(lines)-1; l++ { + ct := &TimesStat{} + v := strings.Fields(lines[l]) // values + for i, header := range hp { + // We're done in any of these use cases + if i >= len(v) || v[0] == "-" { + break + } + + // Position variable for v + pos := i + // There is a missing field at the beginning of all but the first line + // so adjust the position + if l > 6 { + pos = i - 1 + } + // We don't want invalid positions + if pos < 0 { + continue + } + + if t, err := strconv.ParseFloat(v[pos], 64); err == nil { + switch header { + case `cpu`: + ct.CPU = strconv.FormatFloat(t, 'f', -1, 64) + case `%usr`: + ct.User = t + case `%sys`: + ct.System = t + case `%wio`: + ct.Iowait = t + case `%idle`: + ct.Idle = t + } + } + } + // Valid CPU data, so append it + ret = append(ret, *ct) + } + } else { + out, err := invoke.CommandWithContext(ctx, "sar", "-u", "10", "1") + if err != nil { + return nil, err + } + lines := strings.Split(string(out), "\n") + if len(lines) < 5 { + return []TimesStat{}, common.ErrNotImplementedError + } + + ct := &TimesStat{CPU: "cpu-total"} + h := strings.Fields(lines[len(lines)-3]) // headers + v := strings.Fields(lines[len(lines)-2]) // values + for i, header := range h { + if t, err := strconv.ParseFloat(v[i], 64); err == nil { + switch header { + case `%usr`: + ct.User = t + case `%sys`: + ct.System = t + case `%wio`: + ct.Iowait = t + case `%idle`: + ct.Idle = t + } + } + } + + ret = append(ret, *ct) + } + + return ret, nil +} + +func InfoWithContext(ctx context.Context) ([]InfoStat, error) { + out, err := invoke.CommandWithContext(ctx, "prtconf") + if err != nil { + return nil, err + } + + ret := InfoStat{} + for _, line := range strings.Split(string(out), "\n") { + if strings.HasPrefix(line, "Number Of Processors:") { + p := strings.Fields(line) + if len(p) > 3 { + if t, err := strconv.ParseUint(p[3], 10, 64); err == nil { + ret.Cores = int32(t) + } + } + } else if strings.HasPrefix(line, "Processor Clock Speed:") { + p := strings.Fields(line) + if len(p) > 4 { + if t, err := strconv.ParseFloat(p[3], 64); err == nil { + switch strings.ToUpper(p[4]) { + case "MHZ": + ret.Mhz = t + case "GHZ": + ret.Mhz = t * 1000.0 + case "KHZ": + ret.Mhz = t / 1000.0 + default: + ret.Mhz = t + } + } + } + break + } else if strings.HasPrefix(line, "System Model:") { + p := strings.Split(string(line), ":") + if p != nil { + ret.VendorID = strings.TrimSpace(p[1]) + } + } else if strings.HasPrefix(line, "Processor Type:") { + p := strings.Split(string(line), ":") + if p != nil { + c := strings.Split(string(p[1]), "_") + if c != nil { + ret.Family = strings.TrimSpace(c[0]) + ret.Model = strings.TrimSpace(c[1]) + } + } + } + } + return []InfoStat{ret}, nil +} + +func CountsWithContext(ctx context.Context, logical bool) (int, error) { + info, err := InfoWithContext(ctx) + if err == nil { + return int(info[0].Cores), nil + } + return 0, err +} diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin.go new file mode 100644 index 00000000000..b3e3a668de1 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin.go @@ -0,0 +1,198 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build darwin + +package cpu + +import ( + "context" + "fmt" + "strconv" + "strings" + "unsafe" + + "github.com/tklauser/go-sysconf" + "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v4/internal/common" +) + +// sys/resource.h +const ( + CPUser = 0 + cpNice = 1 + cpSys = 2 + cpIntr = 3 + cpIdle = 4 + cpUStates = 5 +) + +// mach/machine.h +const ( + cpuStateUser = 0 + cpuStateSystem = 1 + cpuStateIdle = 2 + cpuStateNice = 3 + cpuStateMax = 4 +) + +// mach/processor_info.h +const ( + processorCpuLoadInfo = 2 +) + +type hostCpuLoadInfoData struct { + cpuTicks [cpuStateMax]uint32 +} + +// default value. from time.h +var ClocksPerSec = float64(128) + +func init() { + clkTck, err := sysconf.Sysconf(sysconf.SC_CLK_TCK) + // ignore errors + if err == nil { + ClocksPerSec = float64(clkTck) + } +} + +func Times(percpu bool) ([]TimesStat, error) { + return TimesWithContext(context.Background(), percpu) +} + +func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { + lib, err := common.NewLibrary(common.System) + if err != nil { + return nil, err + } + defer lib.Close() + + if percpu { + return perCPUTimes(lib) + } + + return allCPUTimes(lib) +} + +// Returns only one CPUInfoStat on FreeBSD +func Info() ([]InfoStat, error) { + return InfoWithContext(context.Background()) +} + +func InfoWithContext(ctx context.Context) ([]InfoStat, error) { + var ret []InfoStat + + c := InfoStat{} + c.ModelName, _ = unix.Sysctl("machdep.cpu.brand_string") + family, _ := unix.SysctlUint32("machdep.cpu.family") + c.Family = strconv.FormatUint(uint64(family), 10) + model, _ := unix.SysctlUint32("machdep.cpu.model") + c.Model = strconv.FormatUint(uint64(model), 10) + stepping, _ := unix.SysctlUint32("machdep.cpu.stepping") + c.Stepping = int32(stepping) + features, err := unix.Sysctl("machdep.cpu.features") + if err == nil { + for _, v := range strings.Fields(features) { + c.Flags = append(c.Flags, strings.ToLower(v)) + } + } + leaf7Features, err := unix.Sysctl("machdep.cpu.leaf7_features") + if err == nil { + for _, v := range strings.Fields(leaf7Features) { + c.Flags = append(c.Flags, strings.ToLower(v)) + } + } + extfeatures, err := unix.Sysctl("machdep.cpu.extfeatures") + if err == nil { + for _, v := range strings.Fields(extfeatures) { + c.Flags = append(c.Flags, strings.ToLower(v)) + } + } + cores, _ := unix.SysctlUint32("machdep.cpu.core_count") + c.Cores = int32(cores) + cacheSize, _ := unix.SysctlUint32("machdep.cpu.cache.size") + c.CacheSize = int32(cacheSize) + c.VendorID, _ = unix.Sysctl("machdep.cpu.vendor") + + v, err := getFrequency() + if err == nil { + c.Mhz = v + } + + return append(ret, c), nil +} + +func CountsWithContext(ctx context.Context, logical bool) (int, error) { + var cpuArgument string + if logical { + cpuArgument = "hw.logicalcpu" + } else { + cpuArgument = "hw.physicalcpu" + } + + count, err := unix.SysctlUint32(cpuArgument) + if err != nil { + return 0, err + } + + return int(count), nil +} + +func perCPUTimes(machLib *common.Library) ([]TimesStat, error) { + machHostSelf := common.GetFunc[common.MachHostSelfFunc](machLib, common.MachHostSelfSym) + machTaskSelf := common.GetFunc[common.MachTaskSelfFunc](machLib, common.MachTaskSelfSym) + hostProcessorInfo := common.GetFunc[common.HostProcessorInfoFunc](machLib, common.HostProcessorInfoSym) + vmDeallocate := common.GetFunc[common.VMDeallocateFunc](machLib, common.VMDeallocateSym) + + var count, ncpu uint32 + var cpuload *hostCpuLoadInfoData + + status := hostProcessorInfo(machHostSelf(), processorCpuLoadInfo, &ncpu, uintptr(unsafe.Pointer(&cpuload)), &count) + + if status != common.KERN_SUCCESS { + return nil, fmt.Errorf("host_processor_info error=%d", status) + } + + defer vmDeallocate(machTaskSelf(), uintptr(unsafe.Pointer(cpuload)), uintptr(ncpu)) + + ret := []TimesStat{} + loads := unsafe.Slice(cpuload, ncpu) + + for i := 0; i < int(ncpu); i++ { + c := TimesStat{ + CPU: fmt.Sprintf("cpu%d", i), + User: float64(loads[i].cpuTicks[cpuStateUser]) / ClocksPerSec, + System: float64(loads[i].cpuTicks[cpuStateSystem]) / ClocksPerSec, + Nice: float64(loads[i].cpuTicks[cpuStateNice]) / ClocksPerSec, + Idle: float64(loads[i].cpuTicks[cpuStateIdle]) / ClocksPerSec, + } + + ret = append(ret, c) + } + + return ret, nil +} + +func allCPUTimes(machLib *common.Library) ([]TimesStat, error) { + machHostSelf := common.GetFunc[common.MachHostSelfFunc](machLib, common.MachHostSelfSym) + hostStatistics := common.GetFunc[common.HostStatisticsFunc](machLib, common.HostStatisticsSym) + + var cpuload hostCpuLoadInfoData + count := uint32(cpuStateMax) + + status := hostStatistics(machHostSelf(), common.HOST_CPU_LOAD_INFO, + uintptr(unsafe.Pointer(&cpuload)), &count) + + if status != common.KERN_SUCCESS { + return nil, fmt.Errorf("host_statistics error=%d", status) + } + + c := TimesStat{ + CPU: "cpu-total", + User: float64(cpuload.cpuTicks[cpuStateUser]) / ClocksPerSec, + System: float64(cpuload.cpuTicks[cpuStateSystem]) / ClocksPerSec, + Nice: float64(cpuload.cpuTicks[cpuStateNice]) / ClocksPerSec, + Idle: float64(cpuload.cpuTicks[cpuStateIdle]) / ClocksPerSec, + } + + return []TimesStat{c}, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_arm64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_arm64.go new file mode 100644 index 00000000000..09427003403 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_arm64.go @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build darwin && arm64 + +package cpu + +import ( + "encoding/binary" + "fmt" + "unsafe" + + "github.com/shirou/gopsutil/v4/internal/common" +) + +// https://github.com/shoenig/go-m1cpu/blob/v0.1.6/cpu.go +func getFrequency() (float64, error) { + ioKit, err := common.NewLibrary(common.IOKit) + if err != nil { + return 0, err + } + defer ioKit.Close() + + coreFoundation, err := common.NewLibrary(common.CoreFoundation) + if err != nil { + return 0, err + } + defer coreFoundation.Close() + + ioServiceMatching := common.GetFunc[common.IOServiceMatchingFunc](ioKit, common.IOServiceMatchingSym) + ioServiceGetMatchingServices := common.GetFunc[common.IOServiceGetMatchingServicesFunc](ioKit, common.IOServiceGetMatchingServicesSym) + ioIteratorNext := common.GetFunc[common.IOIteratorNextFunc](ioKit, common.IOIteratorNextSym) + ioRegistryEntryGetName := common.GetFunc[common.IORegistryEntryGetNameFunc](ioKit, common.IORegistryEntryGetNameSym) + ioRegistryEntryCreateCFProperty := common.GetFunc[common.IORegistryEntryCreateCFPropertyFunc](ioKit, common.IORegistryEntryCreateCFPropertySym) + ioObjectRelease := common.GetFunc[common.IOObjectReleaseFunc](ioKit, common.IOObjectReleaseSym) + + cfStringCreateWithCString := common.GetFunc[common.CFStringCreateWithCStringFunc](coreFoundation, common.CFStringCreateWithCStringSym) + cfDataGetLength := common.GetFunc[common.CFDataGetLengthFunc](coreFoundation, common.CFDataGetLengthSym) + cfDataGetBytePtr := common.GetFunc[common.CFDataGetBytePtrFunc](coreFoundation, common.CFDataGetBytePtrSym) + cfRelease := common.GetFunc[common.CFReleaseFunc](coreFoundation, common.CFReleaseSym) + + matching := ioServiceMatching("AppleARMIODevice") + + var iterator uint32 + if status := ioServiceGetMatchingServices(common.KIOMainPortDefault, uintptr(matching), &iterator); status != common.KERN_SUCCESS { + return 0.0, fmt.Errorf("IOServiceGetMatchingServices error=%d", status) + } + defer ioObjectRelease(iterator) + + pCorekey := cfStringCreateWithCString(common.KCFAllocatorDefault, "voltage-states5-sram", common.KCFStringEncodingUTF8) + defer cfRelease(uintptr(pCorekey)) + + var pCoreHz uint32 + for { + service := ioIteratorNext(iterator) + if !(service > 0) { + break + } + + buf := common.NewCStr(512) + ioRegistryEntryGetName(service, buf) + + if buf.GoString() == "pmgr" { + pCoreRef := ioRegistryEntryCreateCFProperty(service, uintptr(pCorekey), common.KCFAllocatorDefault, common.KNilOptions) + length := cfDataGetLength(uintptr(pCoreRef)) + data := cfDataGetBytePtr(uintptr(pCoreRef)) + + // composite uint32 from the byte array + buf := unsafe.Slice((*byte)(data), length) + + // combine the bytes into a uint32 value + b := buf[length-8 : length-4] + pCoreHz = binary.LittleEndian.Uint32(b) + ioObjectRelease(service) + break + } + + ioObjectRelease(service) + } + + return float64(pCoreHz / 1_000_000), nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_fallback.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_fallback.go new file mode 100644 index 00000000000..b9e52aba176 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_fallback.go @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build darwin && !arm64 + +package cpu + +import "golang.org/x/sys/unix" + +func getFrequency() (float64, error) { + // Use the rated frequency of the CPU. This is a static value and does not + // account for low power or Turbo Boost modes. + cpuFrequency, err := unix.SysctlUint64("hw.cpufrequency") + return float64(cpuFrequency) / 1000000.0, err +} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_dragonfly.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_dragonfly.go similarity index 97% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_dragonfly.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_dragonfly.go index fef53e5dcc1..19b1e9dd3ef 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_dragonfly.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_dragonfly.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu import ( @@ -10,7 +11,7 @@ import ( "strings" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" "github.com/tklauser/go-sysconf" "golang.org/x/sys/unix" ) diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_dragonfly_amd64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_dragonfly_amd64.go new file mode 100644 index 00000000000..25ececa680e --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_dragonfly_amd64.go @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: BSD-3-Clause +package cpu + +type cpuTimes struct { + User uint64 + Nice uint64 + Sys uint64 + Intr uint64 + Idle uint64 +} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_fallback.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_fallback.go similarity index 83% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_fallback.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_fallback.go index 089f603c8fc..245c1ec98b8 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_fallback.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_fallback.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build !darwin && !linux && !freebsd && !openbsd && !netbsd && !solaris && !windows && !dragonfly && !plan9 && !aix -// +build !darwin,!linux,!freebsd,!openbsd,!netbsd,!solaris,!windows,!dragonfly,!plan9,!aix package cpu @@ -7,7 +7,7 @@ import ( "context" "runtime" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) func Times(percpu bool) ([]TimesStat, error) { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd.go similarity index 96% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd.go index d3f47353cfc..5d17c7e977e 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu import ( @@ -10,9 +11,10 @@ import ( "strings" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" "github.com/tklauser/go-sysconf" "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v4/internal/common" ) var ( @@ -135,7 +137,7 @@ func parseDmesgBoot(fileName string) (InfoStat, int, error) { c.Model = matches[4] t, err := strconv.ParseInt(matches[5], 10, 32) if err != nil { - return c, 0, fmt.Errorf("unable to parse FreeBSD CPU stepping information from %q: %v", line, err) + return c, 0, fmt.Errorf("unable to parse FreeBSD CPU stepping information from %q: %w", line, err) } c.Stepping = int32(t) } else if matches := featuresMatch.FindStringSubmatch(line); matches != nil { @@ -149,12 +151,12 @@ func parseDmesgBoot(fileName string) (InfoStat, int, error) { } else if matches := cpuCores.FindStringSubmatch(line); matches != nil { t, err := strconv.ParseInt(matches[1], 10, 32) if err != nil { - return c, 0, fmt.Errorf("unable to parse FreeBSD CPU Nums from %q: %v", line, err) + return c, 0, fmt.Errorf("unable to parse FreeBSD CPU Nums from %q: %w", line, err) } cpuNum = int(t) t2, err := strconv.ParseInt(matches[2], 10, 32) if err != nil { - return c, 0, fmt.Errorf("unable to parse FreeBSD CPU cores from %q: %v", line, err) + return c, 0, fmt.Errorf("unable to parse FreeBSD CPU cores from %q: %w", line, err) } c.Cores = int32(t2) } diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_386.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_386.go new file mode 100644 index 00000000000..e4799bcf5c4 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_386.go @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: BSD-3-Clause +package cpu + +type cpuTimes struct { + User uint32 + Nice uint32 + Sys uint32 + Intr uint32 + Idle uint32 +} diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_amd64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_amd64.go new file mode 100644 index 00000000000..25ececa680e --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_amd64.go @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: BSD-3-Clause +package cpu + +type cpuTimes struct { + User uint64 + Nice uint64 + Sys uint64 + Intr uint64 + Idle uint64 +} diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_arm.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_arm.go new file mode 100644 index 00000000000..e4799bcf5c4 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_arm.go @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: BSD-3-Clause +package cpu + +type cpuTimes struct { + User uint32 + Nice uint32 + Sys uint32 + Intr uint32 + Idle uint32 +} diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_arm64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_arm64.go new file mode 100644 index 00000000000..25ececa680e --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_arm64.go @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: BSD-3-Clause +package cpu + +type cpuTimes struct { + User uint64 + Nice uint64 + Sys uint64 + Intr uint64 + Idle uint64 +} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_linux.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_linux.go similarity index 97% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_linux.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_linux.go index da467e2dd20..5f595e7b3ef 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_linux.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_linux.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build linux -// +build linux package cpu @@ -13,7 +13,7 @@ import ( "github.com/tklauser/go-sysconf" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) var ClocksPerSec = float64(100) @@ -395,7 +395,7 @@ func CountsWithContext(ctx context.Context, logical bool) (int, error) { for _, line := range lines { line = strings.ToLower(line) if strings.HasPrefix(line, "processor") { - _, err = strconv.Atoi(strings.TrimSpace(line[strings.IndexByte(line, ':')+1:])) + _, err = strconv.ParseInt(strings.TrimSpace(line[strings.IndexByte(line, ':')+1:]), 10, 32) if err == nil { ret++ } @@ -464,11 +464,11 @@ func CountsWithContext(ctx context.Context, logical bool) (int, error) { } fields[0] = strings.TrimSpace(fields[0]) if fields[0] == "physical id" || fields[0] == "cpu cores" { - val, err := strconv.Atoi(strings.TrimSpace(fields[1])) + val, err := strconv.ParseInt(strings.TrimSpace(fields[1]), 10, 32) if err != nil { continue } - currentInfo[fields[0]] = val + currentInfo[fields[0]] = int(val) } } ret := 0 diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd.go similarity index 96% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd.go index 1f66be34255..198be5e644d 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build netbsd -// +build netbsd package cpu @@ -9,9 +9,10 @@ import ( "runtime" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" "github.com/tklauser/go-sysconf" "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v4/internal/common" ) const ( diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_amd64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_amd64.go new file mode 100644 index 00000000000..25ececa680e --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_amd64.go @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: BSD-3-Clause +package cpu + +type cpuTimes struct { + User uint64 + Nice uint64 + Sys uint64 + Intr uint64 + Idle uint64 +} diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_arm.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_arm.go new file mode 100644 index 00000000000..e4799bcf5c4 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_arm.go @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: BSD-3-Clause +package cpu + +type cpuTimes struct { + User uint32 + Nice uint32 + Sys uint32 + Intr uint32 + Idle uint32 +} diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_arm64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_arm64.go new file mode 100644 index 00000000000..25ececa680e --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_arm64.go @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: BSD-3-Clause +package cpu + +type cpuTimes struct { + User uint64 + Nice uint64 + Sys uint64 + Intr uint64 + Idle uint64 +} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd.go similarity index 96% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd.go index fe33290300e..33233d3c74a 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd -// +build openbsd package cpu @@ -9,7 +9,7 @@ import ( "runtime" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" "github.com/tklauser/go-sysconf" "golang.org/x/sys/unix" ) diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_386.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_386.go new file mode 100644 index 00000000000..40a6f43e498 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_386.go @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: BSD-3-Clause +package cpu + +type cpuTimes struct { + User uint32 + Nice uint32 + Sys uint32 + Spin uint32 + Intr uint32 + Idle uint32 +} diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_amd64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_amd64.go new file mode 100644 index 00000000000..464156d5402 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_amd64.go @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: BSD-3-Clause +package cpu + +type cpuTimes struct { + User uint64 + Nice uint64 + Sys uint64 + Spin uint64 + Intr uint64 + Idle uint64 +} diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_arm.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_arm.go new file mode 100644 index 00000000000..40a6f43e498 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_arm.go @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: BSD-3-Clause +package cpu + +type cpuTimes struct { + User uint32 + Nice uint32 + Sys uint32 + Spin uint32 + Intr uint32 + Idle uint32 +} diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_arm64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_arm64.go new file mode 100644 index 00000000000..464156d5402 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_arm64.go @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: BSD-3-Clause +package cpu + +type cpuTimes struct { + User uint64 + Nice uint64 + Sys uint64 + Spin uint64 + Intr uint64 + Idle uint64 +} diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_riscv64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_riscv64.go new file mode 100644 index 00000000000..464156d5402 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_riscv64.go @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: BSD-3-Clause +package cpu + +type cpuTimes struct { + User uint64 + Nice uint64 + Sys uint64 + Spin uint64 + Intr uint64 + Idle uint64 +} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_plan9.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_plan9.go similarity index 91% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_plan9.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_plan9.go index a2e99d8c05e..bff2e0c7584 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_plan9.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_plan9.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build plan9 -// +build plan9 package cpu @@ -9,7 +9,7 @@ import ( "runtime" stats "github.com/lufia/plan9stats" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) func Times(percpu bool) ([]TimesStat, error) { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_solaris.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_solaris.go similarity index 99% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_solaris.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_solaris.go index 4231ad16819..d8ba1d3242e 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_solaris.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_solaris.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu import ( diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_windows.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_windows.go similarity index 97% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_windows.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_windows.go index e10612fd19d..4476b91cb5f 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_windows.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_windows.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build windows -// +build windows package cpu @@ -8,14 +8,12 @@ import ( "fmt" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" "github.com/yusufpapurcu/wmi" "golang.org/x/sys/windows" ) -var ( - procGetNativeSystemInfo = common.Modkernel32.NewProc("GetNativeSystemInfo") -) +var procGetNativeSystemInfo = common.Modkernel32.NewProc("GetNativeSystemInfo") type win32_Processor struct { Family uint16 diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/binary.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/binary.go similarity index 99% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/binary.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/binary.go index 5e8d43db835..6e75e74b018 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/binary.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/binary.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package common // Copyright 2009 The Go Authors. All rights reserved. diff --git a/vendor/github.com/shirou/gopsutil/v4/internal/common/common.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common.go new file mode 100644 index 00000000000..868ea4daeed --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common.go @@ -0,0 +1,474 @@ +// SPDX-License-Identifier: BSD-3-Clause +package common + +// +// gopsutil is a port of psutil(http://pythonhosted.org/psutil/). +// This covers these architectures. +// - linux (amd64, arm) +// - freebsd (amd64) +// - windows (amd64) + +import ( + "bufio" + "bytes" + "context" + "errors" + "fmt" + "io" + "math" + "net/url" + "os" + "os/exec" + "path" + "path/filepath" + "reflect" + "runtime" + "strconv" + "strings" + "time" + + "github.com/shirou/gopsutil/v4/common" +) + +var ( + Timeout = 3 * time.Second + ErrTimeout = errors.New("command timed out") +) + +type Invoker interface { + Command(string, ...string) ([]byte, error) + CommandWithContext(context.Context, string, ...string) ([]byte, error) +} + +type Invoke struct{} + +func (i Invoke) Command(name string, arg ...string) ([]byte, error) { + ctx, cancel := context.WithTimeout(context.Background(), Timeout) + defer cancel() + return i.CommandWithContext(ctx, name, arg...) +} + +func (i Invoke) CommandWithContext(ctx context.Context, name string, arg ...string) ([]byte, error) { + cmd := exec.CommandContext(ctx, name, arg...) + + var buf bytes.Buffer + cmd.Stdout = &buf + cmd.Stderr = &buf + + if err := cmd.Start(); err != nil { + return buf.Bytes(), err + } + + if err := cmd.Wait(); err != nil { + return buf.Bytes(), err + } + + return buf.Bytes(), nil +} + +type FakeInvoke struct { + Suffix string // Suffix species expected file name suffix such as "fail" + Error error // If Error specified, return the error. +} + +// Command in FakeInvoke returns from expected file if exists. +func (i FakeInvoke) Command(name string, arg ...string) ([]byte, error) { + if i.Error != nil { + return []byte{}, i.Error + } + + arch := runtime.GOOS + + commandName := filepath.Base(name) + + fname := strings.Join(append([]string{commandName}, arg...), "") + fname = url.QueryEscape(fname) + fpath := path.Join("testdata", arch, fname) + if i.Suffix != "" { + fpath += "_" + i.Suffix + } + if PathExists(fpath) { + return os.ReadFile(fpath) + } + return []byte{}, fmt.Errorf("could not find testdata: %s", fpath) +} + +func (i FakeInvoke) CommandWithContext(ctx context.Context, name string, arg ...string) ([]byte, error) { + return i.Command(name, arg...) +} + +var ErrNotImplementedError = errors.New("not implemented yet") + +// ReadFile reads contents from a file +func ReadFile(filename string) (string, error) { + content, err := os.ReadFile(filename) + if err != nil { + return "", err + } + + return string(content), nil +} + +// ReadLines reads contents from a file and splits them by new lines. +// A convenience wrapper to ReadLinesOffsetN(filename, 0, -1). +func ReadLines(filename string) ([]string, error) { + return ReadLinesOffsetN(filename, 0, -1) +} + +// ReadLine reads a file and returns the first occurrence of a line that is prefixed with prefix. +func ReadLine(filename string, prefix string) (string, error) { + f, err := os.Open(filename) + if err != nil { + return "", err + } + defer f.Close() + r := bufio.NewReader(f) + for { + line, err := r.ReadString('\n') + if err != nil { + if err == io.EOF { + break + } + return "", err + } + if strings.HasPrefix(line, prefix) { + return line, nil + } + } + + return "", nil +} + +// ReadLinesOffsetN reads contents from file and splits them by new line. +// The offset tells at which line number to start. +// The count determines the number of lines to read (starting from offset): +// n >= 0: at most n lines +// n < 0: whole file +func ReadLinesOffsetN(filename string, offset uint, n int) ([]string, error) { + f, err := os.Open(filename) + if err != nil { + return []string{""}, err + } + defer f.Close() + + var ret []string + + r := bufio.NewReader(f) + for i := uint(0); i < uint(n)+offset || n < 0; i++ { + line, err := r.ReadString('\n') + if err != nil { + if err == io.EOF && len(line) > 0 { + ret = append(ret, strings.Trim(line, "\n")) + } + break + } + if i < offset { + continue + } + ret = append(ret, strings.Trim(line, "\n")) + } + + return ret, nil +} + +func IntToString(orig []int8) string { + ret := make([]byte, len(orig)) + size := -1 + for i, o := range orig { + if o == 0 { + size = i + break + } + ret[i] = byte(o) + } + if size == -1 { + size = len(orig) + } + + return string(ret[0:size]) +} + +func UintToString(orig []uint8) string { + ret := make([]byte, len(orig)) + size := -1 + for i, o := range orig { + if o == 0 { + size = i + break + } + ret[i] = byte(o) + } + if size == -1 { + size = len(orig) + } + + return string(ret[0:size]) +} + +func ByteToString(orig []byte) string { + n := -1 + l := -1 + for i, b := range orig { + // skip left side null + if l == -1 && b == 0 { + continue + } + if l == -1 { + l = i + } + + if b == 0 { + break + } + n = i + 1 + } + if n == -1 { + return string(orig) + } + return string(orig[l:n]) +} + +// ReadInts reads contents from single line file and returns them as []int32. +func ReadInts(filename string) ([]int64, error) { + f, err := os.Open(filename) + if err != nil { + return []int64{}, err + } + defer f.Close() + + var ret []int64 + + r := bufio.NewReader(f) + + // The int files that this is concerned with should only be one liners. + line, err := r.ReadString('\n') + if err != nil { + return []int64{}, err + } + + i, err := strconv.ParseInt(strings.Trim(line, "\n"), 10, 32) + if err != nil { + return []int64{}, err + } + ret = append(ret, i) + + return ret, nil +} + +// Parse Hex to uint32 without error +func HexToUint32(hex string) uint32 { + vv, _ := strconv.ParseUint(hex, 16, 32) + return uint32(vv) +} + +// Parse to int32 without error +func mustParseInt32(val string) int32 { + vv, _ := strconv.ParseInt(val, 10, 32) + return int32(vv) +} + +// Parse to uint64 without error +func mustParseUint64(val string) uint64 { + vv, _ := strconv.ParseInt(val, 10, 64) + return uint64(vv) +} + +// Parse to Float64 without error +func mustParseFloat64(val string) float64 { + vv, _ := strconv.ParseFloat(val, 64) + return vv +} + +// StringsHas checks the target string slice contains src or not +func StringsHas(target []string, src string) bool { + for _, t := range target { + if strings.TrimSpace(t) == src { + return true + } + } + return false +} + +// StringsContains checks the src in any string of the target string slice +func StringsContains(target []string, src string) bool { + for _, t := range target { + if strings.Contains(t, src) { + return true + } + } + return false +} + +// IntContains checks the src in any int of the target int slice. +func IntContains(target []int, src int) bool { + for _, t := range target { + if src == t { + return true + } + } + return false +} + +// get struct attributes. +// This method is used only for debugging platform dependent code. +func attributes(m interface{}) map[string]reflect.Type { + typ := reflect.TypeOf(m) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + } + + attrs := make(map[string]reflect.Type) + if typ.Kind() != reflect.Struct { + return nil + } + + for i := 0; i < typ.NumField(); i++ { + p := typ.Field(i) + if !p.Anonymous { + attrs[p.Name] = p.Type + } + } + + return attrs +} + +func PathExists(filename string) bool { + if _, err := os.Stat(filename); err == nil { + return true + } + return false +} + +// PathExistsWithContents returns the filename exists and it is not empty +func PathExistsWithContents(filename string) bool { + info, err := os.Stat(filename) + if err != nil { + return false + } + return info.Size() > 4 && !info.IsDir() // at least 4 bytes +} + +// GetEnvWithContext retrieves the environment variable key. If it does not exist it returns the default. +// The context may optionally contain a map superseding os.EnvKey. +func GetEnvWithContext(ctx context.Context, key string, dfault string, combineWith ...string) string { + var value string + if env, ok := ctx.Value(common.EnvKey).(common.EnvMap); ok { + value = env[common.EnvKeyType(key)] + } + if value == "" { + value = os.Getenv(key) + } + if value == "" { + value = dfault + } + + return combine(value, combineWith) +} + +// GetEnv retrieves the environment variable key. If it does not exist it returns the default. +func GetEnv(key string, dfault string, combineWith ...string) string { + value := os.Getenv(key) + if value == "" { + value = dfault + } + + return combine(value, combineWith) +} + +func combine(value string, combineWith []string) string { + switch len(combineWith) { + case 0: + return value + case 1: + return filepath.Join(value, combineWith[0]) + default: + all := make([]string, len(combineWith)+1) + all[0] = value + copy(all[1:], combineWith) + return filepath.Join(all...) + } +} + +func HostProc(combineWith ...string) string { + return GetEnv("HOST_PROC", "/proc", combineWith...) +} + +func HostSys(combineWith ...string) string { + return GetEnv("HOST_SYS", "/sys", combineWith...) +} + +func HostEtc(combineWith ...string) string { + return GetEnv("HOST_ETC", "/etc", combineWith...) +} + +func HostVar(combineWith ...string) string { + return GetEnv("HOST_VAR", "/var", combineWith...) +} + +func HostRun(combineWith ...string) string { + return GetEnv("HOST_RUN", "/run", combineWith...) +} + +func HostDev(combineWith ...string) string { + return GetEnv("HOST_DEV", "/dev", combineWith...) +} + +func HostRoot(combineWith ...string) string { + return GetEnv("HOST_ROOT", "/", combineWith...) +} + +func HostProcWithContext(ctx context.Context, combineWith ...string) string { + return GetEnvWithContext(ctx, "HOST_PROC", "/proc", combineWith...) +} + +func HostProcMountInfoWithContext(ctx context.Context, combineWith ...string) string { + return GetEnvWithContext(ctx, "HOST_PROC_MOUNTINFO", "", combineWith...) +} + +func HostSysWithContext(ctx context.Context, combineWith ...string) string { + return GetEnvWithContext(ctx, "HOST_SYS", "/sys", combineWith...) +} + +func HostEtcWithContext(ctx context.Context, combineWith ...string) string { + return GetEnvWithContext(ctx, "HOST_ETC", "/etc", combineWith...) +} + +func HostVarWithContext(ctx context.Context, combineWith ...string) string { + return GetEnvWithContext(ctx, "HOST_VAR", "/var", combineWith...) +} + +func HostRunWithContext(ctx context.Context, combineWith ...string) string { + return GetEnvWithContext(ctx, "HOST_RUN", "/run", combineWith...) +} + +func HostDevWithContext(ctx context.Context, combineWith ...string) string { + return GetEnvWithContext(ctx, "HOST_DEV", "/dev", combineWith...) +} + +func HostRootWithContext(ctx context.Context, combineWith ...string) string { + return GetEnvWithContext(ctx, "HOST_ROOT", "/", combineWith...) +} + +// getSysctrlEnv sets LC_ALL=C in a list of env vars for use when running +// sysctl commands (see DoSysctrl). +func getSysctrlEnv(env []string) []string { + foundLC := false + for i, line := range env { + if strings.HasPrefix(line, "LC_ALL") { + env[i] = "LC_ALL=C" + foundLC = true + } + } + if !foundLC { + env = append(env, "LC_ALL=C") + } + return env +} + +// Round places rounds the number 'val' to 'n' decimal places +func Round(val float64, n int) float64 { + // Calculate the power of 10 to the n + pow10 := math.Pow(10, float64(n)) + // Multiply the value by pow10, round it, then divide it by pow10 + return math.Round(val*pow10) / pow10 +} diff --git a/vendor/github.com/shirou/gopsutil/v4/internal/common/common_darwin.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_darwin.go new file mode 100644 index 00000000000..2de3bb14568 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_darwin.go @@ -0,0 +1,402 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build darwin + +package common + +import ( + "context" + "fmt" + "os" + "os/exec" + "strings" + "unsafe" + + "github.com/ebitengine/purego" + "golang.org/x/sys/unix" +) + +func DoSysctrlWithContext(ctx context.Context, mib string) ([]string, error) { + cmd := exec.CommandContext(ctx, "sysctl", "-n", mib) + cmd.Env = getSysctrlEnv(os.Environ()) + out, err := cmd.Output() + if err != nil { + return []string{}, err + } + v := strings.Replace(string(out), "{ ", "", 1) + v = strings.Replace(string(v), " }", "", 1) + values := strings.Fields(string(v)) + + return values, nil +} + +func CallSyscall(mib []int32) ([]byte, uint64, error) { + miblen := uint64(len(mib)) + + // get required buffer size + length := uint64(0) + _, _, err := unix.Syscall6( + 202, // unix.SYS___SYSCTL https://github.com/golang/sys/blob/76b94024e4b621e672466e8db3d7f084e7ddcad2/unix/zsysnum_darwin_amd64.go#L146 + uintptr(unsafe.Pointer(&mib[0])), + uintptr(miblen), + 0, + uintptr(unsafe.Pointer(&length)), + 0, + 0) + if err != 0 { + var b []byte + return b, length, err + } + if length == 0 { + var b []byte + return b, length, err + } + // get proc info itself + buf := make([]byte, length) + _, _, err = unix.Syscall6( + 202, // unix.SYS___SYSCTL https://github.com/golang/sys/blob/76b94024e4b621e672466e8db3d7f084e7ddcad2/unix/zsysnum_darwin_amd64.go#L146 + uintptr(unsafe.Pointer(&mib[0])), + uintptr(miblen), + uintptr(unsafe.Pointer(&buf[0])), + uintptr(unsafe.Pointer(&length)), + 0, + 0) + if err != 0 { + return buf, length, err + } + + return buf, length, nil +} + +// Library represents a dynamic library loaded by purego. +type Library struct { + addr uintptr + path string + close func() +} + +// library paths +const ( + IOKit = "/System/Library/Frameworks/IOKit.framework/IOKit" + CoreFoundation = "/System/Library/Frameworks/CoreFoundation.framework/CoreFoundation" + System = "/usr/lib/libSystem.B.dylib" +) + +func NewLibrary(path string) (*Library, error) { + lib, err := purego.Dlopen(path, purego.RTLD_LAZY|purego.RTLD_GLOBAL) + if err != nil { + return nil, err + } + + closeFunc := func() { + purego.Dlclose(lib) + } + + return &Library{ + addr: lib, + path: path, + close: closeFunc, + }, nil +} + +func (lib *Library) Dlsym(symbol string) (uintptr, error) { + return purego.Dlsym(lib.addr, symbol) +} + +func GetFunc[T any](lib *Library, symbol string) T { + var fptr T + purego.RegisterLibFunc(&fptr, lib.addr, symbol) + return fptr +} + +func (lib *Library) Close() { + lib.close() +} + +// status codes +const ( + KERN_SUCCESS = 0 +) + +// IOKit functions and symbols. +type ( + IOServiceGetMatchingServiceFunc func(mainPort uint32, matching uintptr) uint32 + IOServiceGetMatchingServicesFunc func(mainPort uint32, matching uintptr, existing *uint32) int + IOServiceMatchingFunc func(name string) unsafe.Pointer + IOServiceOpenFunc func(service, owningTask, connType uint32, connect *uint32) int + IOServiceCloseFunc func(connect uint32) int + IOIteratorNextFunc func(iterator uint32) uint32 + IORegistryEntryGetNameFunc func(entry uint32, name CStr) int + IORegistryEntryGetParentEntryFunc func(entry uint32, plane string, parent *uint32) int + IORegistryEntryCreateCFPropertyFunc func(entry uint32, key, allocator uintptr, options uint32) unsafe.Pointer + IORegistryEntryCreateCFPropertiesFunc func(entry uint32, properties unsafe.Pointer, allocator uintptr, options uint32) int + IOObjectConformsToFunc func(object uint32, className string) bool + IOObjectReleaseFunc func(object uint32) int + IOConnectCallStructMethodFunc func(connection, selector uint32, inputStruct, inputStructCnt, outputStruct uintptr, outputStructCnt *uintptr) int + + IOHIDEventSystemClientCreateFunc func(allocator uintptr) unsafe.Pointer + IOHIDEventSystemClientSetMatchingFunc func(client, match uintptr) int + IOHIDServiceClientCopyEventFunc func(service uintptr, eventType int64, + options int32, timeout int64) unsafe.Pointer + IOHIDServiceClientCopyPropertyFunc func(service, property uintptr) unsafe.Pointer + IOHIDEventGetFloatValueFunc func(event uintptr, field int32) float64 + IOHIDEventSystemClientCopyServicesFunc func(client uintptr) unsafe.Pointer +) + +const ( + IOServiceGetMatchingServiceSym = "IOServiceGetMatchingService" + IOServiceGetMatchingServicesSym = "IOServiceGetMatchingServices" + IOServiceMatchingSym = "IOServiceMatching" + IOServiceOpenSym = "IOServiceOpen" + IOServiceCloseSym = "IOServiceClose" + IOIteratorNextSym = "IOIteratorNext" + IORegistryEntryGetNameSym = "IORegistryEntryGetName" + IORegistryEntryGetParentEntrySym = "IORegistryEntryGetParentEntry" + IORegistryEntryCreateCFPropertySym = "IORegistryEntryCreateCFProperty" + IORegistryEntryCreateCFPropertiesSym = "IORegistryEntryCreateCFProperties" + IOObjectConformsToSym = "IOObjectConformsTo" + IOObjectReleaseSym = "IOObjectRelease" + IOConnectCallStructMethodSym = "IOConnectCallStructMethod" + + IOHIDEventSystemClientCreateSym = "IOHIDEventSystemClientCreate" + IOHIDEventSystemClientSetMatchingSym = "IOHIDEventSystemClientSetMatching" + IOHIDServiceClientCopyEventSym = "IOHIDServiceClientCopyEvent" + IOHIDServiceClientCopyPropertySym = "IOHIDServiceClientCopyProperty" + IOHIDEventGetFloatValueSym = "IOHIDEventGetFloatValue" + IOHIDEventSystemClientCopyServicesSym = "IOHIDEventSystemClientCopyServices" +) + +const ( + KIOMainPortDefault = 0 + + KIOHIDEventTypeTemperature = 15 + + KNilOptions = 0 +) + +const ( + KIOMediaWholeKey = "Media" + KIOServicePlane = "IOService" +) + +// CoreFoundation functions and symbols. +type ( + CFGetTypeIDFunc func(cf uintptr) int32 + CFNumberCreateFunc func(allocator uintptr, theType int32, valuePtr uintptr) unsafe.Pointer + CFNumberGetValueFunc func(num uintptr, theType int32, valuePtr uintptr) bool + CFDictionaryCreateFunc func(allocator uintptr, keys, values *unsafe.Pointer, numValues int32, + keyCallBacks, valueCallBacks uintptr) unsafe.Pointer + CFDictionaryAddValueFunc func(theDict, key, value uintptr) + CFDictionaryGetValueFunc func(theDict, key uintptr) unsafe.Pointer + CFArrayGetCountFunc func(theArray uintptr) int32 + CFArrayGetValueAtIndexFunc func(theArray uintptr, index int32) unsafe.Pointer + CFStringCreateMutableFunc func(alloc uintptr, maxLength int32) unsafe.Pointer + CFStringGetLengthFunc func(theString uintptr) int32 + CFStringGetCStringFunc func(theString uintptr, buffer CStr, bufferSize int32, encoding uint32) + CFStringCreateWithCStringFunc func(alloc uintptr, cStr string, encoding uint32) unsafe.Pointer + CFDataGetLengthFunc func(theData uintptr) int32 + CFDataGetBytePtrFunc func(theData uintptr) unsafe.Pointer + CFReleaseFunc func(cf uintptr) +) + +const ( + CFGetTypeIDSym = "CFGetTypeID" + CFNumberCreateSym = "CFNumberCreate" + CFNumberGetValueSym = "CFNumberGetValue" + CFDictionaryCreateSym = "CFDictionaryCreate" + CFDictionaryAddValueSym = "CFDictionaryAddValue" + CFDictionaryGetValueSym = "CFDictionaryGetValue" + CFArrayGetCountSym = "CFArrayGetCount" + CFArrayGetValueAtIndexSym = "CFArrayGetValueAtIndex" + CFStringCreateMutableSym = "CFStringCreateMutable" + CFStringGetLengthSym = "CFStringGetLength" + CFStringGetCStringSym = "CFStringGetCString" + CFStringCreateWithCStringSym = "CFStringCreateWithCString" + CFDataGetLengthSym = "CFDataGetLength" + CFDataGetBytePtrSym = "CFDataGetBytePtr" + CFReleaseSym = "CFRelease" +) + +const ( + KCFStringEncodingUTF8 = 0x08000100 + KCFNumberSInt64Type = 4 + KCFNumberIntType = 9 + KCFAllocatorDefault = 0 +) + +// Kernel functions and symbols. +type MachTimeBaseInfo struct { + Numer uint32 + Denom uint32 +} + +type ( + HostProcessorInfoFunc func(host uint32, flavor int32, outProcessorCount *uint32, outProcessorInfo uintptr, + outProcessorInfoCnt *uint32) int + HostStatisticsFunc func(host uint32, flavor int32, hostInfoOut uintptr, hostInfoOutCnt *uint32) int + MachHostSelfFunc func() uint32 + MachTaskSelfFunc func() uint32 + MachTimeBaseInfoFunc func(info uintptr) int + VMDeallocateFunc func(targetTask uint32, vmAddress, vmSize uintptr) int +) + +const ( + HostProcessorInfoSym = "host_processor_info" + HostStatisticsSym = "host_statistics" + MachHostSelfSym = "mach_host_self" + MachTaskSelfSym = "mach_task_self" + MachTimeBaseInfoSym = "mach_timebase_info" + VMDeallocateSym = "vm_deallocate" +) + +const ( + CTL_KERN = 1 + KERN_ARGMAX = 8 + KERN_PROCARGS2 = 49 + + HOST_VM_INFO = 2 + HOST_CPU_LOAD_INFO = 3 + + HOST_VM_INFO_COUNT = 0xf +) + +// System functions and symbols. +type ( + ProcPidPathFunc func(pid int32, buffer uintptr, bufferSize uint32) int32 + ProcPidInfoFunc func(pid, flavor int32, arg uint64, buffer uintptr, bufferSize int32) int32 +) + +const ( + SysctlSym = "sysctl" + ProcPidPathSym = "proc_pidpath" + ProcPidInfoSym = "proc_pidinfo" +) + +const ( + MAXPATHLEN = 1024 + PROC_PIDPATHINFO_MAXSIZE = 4 * MAXPATHLEN + PROC_PIDTASKINFO = 4 + PROC_PIDVNODEPATHINFO = 9 +) + +// SMC represents a SMC instance. +type SMC struct { + lib *Library + conn uint32 + callStruct IOConnectCallStructMethodFunc +} + +const ioServiceSMC = "AppleSMC" + +const ( + KSMCUserClientOpen = 0 + KSMCUserClientClose = 1 + KSMCHandleYPCEvent = 2 + KSMCReadKey = 5 + KSMCWriteKey = 6 + KSMCGetKeyCount = 7 + KSMCGetKeyFromIndex = 8 + KSMCGetKeyInfo = 9 +) + +const ( + KSMCSuccess = 0 + KSMCError = 1 + KSMCKeyNotFound = 132 +) + +func NewSMC(ioKit *Library) (*SMC, error) { + if ioKit.path != IOKit { + return nil, fmt.Errorf("library is not IOKit") + } + + ioServiceGetMatchingService := GetFunc[IOServiceGetMatchingServiceFunc](ioKit, IOServiceGetMatchingServiceSym) + ioServiceMatching := GetFunc[IOServiceMatchingFunc](ioKit, IOServiceMatchingSym) + ioServiceOpen := GetFunc[IOServiceOpenFunc](ioKit, IOServiceOpenSym) + ioObjectRelease := GetFunc[IOObjectReleaseFunc](ioKit, IOObjectReleaseSym) + machTaskSelf := GetFunc[MachTaskSelfFunc](ioKit, MachTaskSelfSym) + + ioConnectCallStructMethod := GetFunc[IOConnectCallStructMethodFunc](ioKit, IOConnectCallStructMethodSym) + + service := ioServiceGetMatchingService(0, uintptr(ioServiceMatching(ioServiceSMC))) + if service == 0 { + return nil, fmt.Errorf("ERROR: %s NOT FOUND", ioServiceSMC) + } + + var conn uint32 + if result := ioServiceOpen(service, machTaskSelf(), 0, &conn); result != 0 { + return nil, fmt.Errorf("ERROR: IOServiceOpen failed") + } + + ioObjectRelease(service) + return &SMC{ + lib: ioKit, + conn: conn, + callStruct: ioConnectCallStructMethod, + }, nil +} + +func (s *SMC) CallStruct(selector uint32, inputStruct, inputStructCnt, outputStruct uintptr, outputStructCnt *uintptr) int { + return s.callStruct(s.conn, selector, inputStruct, inputStructCnt, outputStruct, outputStructCnt) +} + +func (s *SMC) Close() error { + ioServiceClose := GetFunc[IOServiceCloseFunc](s.lib, IOServiceCloseSym) + + if result := ioServiceClose(s.conn); result != 0 { + return fmt.Errorf("ERROR: IOServiceClose failed") + } + return nil +} + +type CStr []byte + +func NewCStr(length int32) CStr { + return make(CStr, length) +} + +func (s CStr) Length() int32 { + // Include null terminator to make CFStringGetCString properly functions + return int32(len(s)) + 1 +} + +func (s CStr) Ptr() *byte { + if len(s) < 1 { + return nil + } + + return &s[0] +} + +func (c CStr) Addr() uintptr { + return uintptr(unsafe.Pointer(c.Ptr())) +} + +func (s CStr) GoString() string { + if s == nil { + return "" + } + + var length int + for _, char := range s { + if char == '\x00' { + break + } + length++ + } + return string(s[:length]) +} + +// https://github.com/ebitengine/purego/blob/main/internal/strings/strings.go#L26 +func GoString(cStr *byte) string { + if cStr == nil { + return "" + } + var length int + for { + if *(*byte)(unsafe.Add(unsafe.Pointer(cStr), uintptr(length))) == '\x00' { + break + } + length++ + } + return string(unsafe.Slice(cStr, length)) +} diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_freebsd.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_freebsd.go similarity index 97% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/common_freebsd.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/common_freebsd.go index f590e2e67e6..53cdceeb6d4 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_freebsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_freebsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build freebsd || openbsd -// +build freebsd openbsd package common diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_linux.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_linux.go similarity index 97% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/common_linux.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/common_linux.go index a429e16a2cc..277034f3728 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_linux.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_linux.go @@ -1,11 +1,11 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build linux -// +build linux package common import ( "context" - "fmt" + "errors" "os" "os/exec" "path/filepath" @@ -90,6 +90,8 @@ func BootTimeWithContext(ctx context.Context, enableCache bool) (uint64, error) if enableCache { atomic.StoreUint64(&cachedBootTime, t) } + + return t, nil } filename := HostProcWithContext(ctx, "uptime") @@ -97,15 +99,16 @@ func BootTimeWithContext(ctx context.Context, enableCache bool) (uint64, error) if err != nil { return handleBootTimeFileReadErr(err) } + currentTime := float64(time.Now().UnixNano()) / float64(time.Second) + if len(lines) != 1 { - return 0, fmt.Errorf("wrong uptime format") + return 0, errors.New("wrong uptime format") } f := strings.Fields(lines[0]) b, err := strconv.ParseFloat(f[0], 64) if err != nil { return 0, err } - currentTime := float64(time.Now().UnixNano()) / float64(time.Second) t := currentTime - b if enableCache { @@ -139,7 +142,7 @@ func readBootTimeStat(ctx context.Context) (uint64, error) { if strings.HasPrefix(line, "btime") { f := strings.Fields(line) if len(f) != 2 { - return 0, fmt.Errorf("wrong btime format") + return 0, errors.New("wrong btime format") } b, err := strconv.ParseInt(f[1], 10, 64) if err != nil { @@ -148,7 +151,7 @@ func readBootTimeStat(ctx context.Context) (uint64, error) { t := uint64(b) return t, nil } - return 0, fmt.Errorf("could not find btime") + return 0, errors.New("could not find btime") } func Virtualization() (string, string, error) { diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_netbsd.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_netbsd.go similarity index 96% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/common_netbsd.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/common_netbsd.go index efbc710a5ef..206532126c9 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_netbsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_netbsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build netbsd -// +build netbsd package common diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_openbsd.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_openbsd.go similarity index 96% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/common_openbsd.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/common_openbsd.go index 58d76f334e2..00fa19a2fb4 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_openbsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_openbsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd -// +build openbsd package common diff --git a/vendor/github.com/shirou/gopsutil/v4/internal/common/common_unix.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_unix.go new file mode 100644 index 00000000000..c9f91b1698a --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_unix.go @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build linux || freebsd || darwin || openbsd + +package common + +import ( + "context" + "errors" + "os/exec" + "strconv" + "strings" +) + +func CallLsofWithContext(ctx context.Context, invoke Invoker, pid int32, args ...string) ([]string, error) { + var cmd []string + if pid == 0 { // will get from all processes. + cmd = []string{"-a", "-n", "-P"} + } else { + cmd = []string{"-a", "-n", "-P", "-p", strconv.Itoa(int(pid))} + } + cmd = append(cmd, args...) + out, err := invoke.CommandWithContext(ctx, "lsof", cmd...) + if err != nil { + if errors.Is(err, exec.ErrNotFound) { + return []string{}, err + } + // if no pid found, lsof returns code 1. + if err.Error() == "exit status 1" && len(out) == 0 { + return []string{}, nil + } + } + lines := strings.Split(string(out), "\n") + + var ret []string + for _, l := range lines[1:] { + if len(l) == 0 { + continue + } + ret = append(ret, l) + } + return ret, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_windows.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_windows.go similarity index 99% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/common_windows.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/common_windows.go index 301b2315bb4..766ed2fcba3 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_windows.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_windows.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build windows -// +build windows package common diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/endian.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/endian.go similarity index 88% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/endian.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/endian.go index 147cfdc4b71..113ff2e9f42 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/endian.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/endian.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package common import "unsafe" diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/sleep.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/sleep.go similarity index 89% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/sleep.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/sleep.go index 94cedfd3438..504f13ffd98 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/sleep.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/sleep.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package common import ( diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/warnings.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/warnings.go similarity index 92% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/warnings.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/warnings.go index a4aaadaf54d..888cc57faee 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/warnings.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/warnings.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package common import "fmt" diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/ex_linux.go b/vendor/github.com/shirou/gopsutil/v4/mem/ex_linux.go new file mode 100644 index 00000000000..0a12fe2fe34 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/mem/ex_linux.go @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build linux + +package mem + +import ( + "context" + "encoding/json" +) + +type ExVirtualMemory struct { + ActiveFile uint64 `json:"activefile"` + InactiveFile uint64 `json:"inactivefile"` + ActiveAnon uint64 `json:"activeanon"` + InactiveAnon uint64 `json:"inactiveanon"` + Unevictable uint64 `json:"unevictable"` +} + +func (v ExVirtualMemory) String() string { + s, _ := json.Marshal(v) + return string(s) +} + +type ExLinux struct{} + +func NewExLinux() *ExLinux { + return &ExLinux{} +} + +func (ex *ExLinux) VirtualMemory() (*ExVirtualMemory, error) { + return ex.VirtualMemoryWithContext(context.Background()) +} + +func (ex *ExLinux) VirtualMemoryWithContext(ctx context.Context) (*ExVirtualMemory, error) { + _, vmEx, err := fillFromMeminfoWithContext(ctx) + if err != nil { + return nil, err + } + return vmEx, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/ex_windows.go b/vendor/github.com/shirou/gopsutil/v4/mem/ex_windows.go new file mode 100644 index 00000000000..5c49a478ce5 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/mem/ex_windows.go @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build windows + +package mem + +import ( + "unsafe" + + "golang.org/x/sys/windows" +) + +// ExVirtualMemory represents Windows specific information +// https://learn.microsoft.com/en-us/windows/win32/api/sysinfoapi/ns-sysinfoapi-memorystatusex +// https://learn.microsoft.com/en-us/windows/win32/api/psapi/ns-psapi-performance_information +type ExVirtualMemory struct { + CommitLimit uint64 `json:"commitLimit"` + CommitTotal uint64 `json:"commitTotal"` + VirtualTotal uint64 `json:"virtualTotal"` + VirtualAvail uint64 `json:"virtualAvail"` +} + +type ExWindows struct{} + +func NewExWindows() *ExWindows { + return &ExWindows{} +} + +func (e *ExWindows) VirtualMemory() (*ExVirtualMemory, error) { + var memInfo memoryStatusEx + memInfo.cbSize = uint32(unsafe.Sizeof(memInfo)) + mem, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(&memInfo))) + if mem == 0 { + return nil, windows.GetLastError() + } + + var perfInfo performanceInformation + perfInfo.cb = uint32(unsafe.Sizeof(perfInfo)) + perf, _, _ := procGetPerformanceInfo.Call(uintptr(unsafe.Pointer(&perfInfo)), uintptr(perfInfo.cb)) + if perf == 0 { + return nil, windows.GetLastError() + } + + ret := &ExVirtualMemory{ + CommitLimit: perfInfo.commitLimit * perfInfo.pageSize, + CommitTotal: perfInfo.commitTotal * perfInfo.pageSize, + VirtualTotal: memInfo.ullTotalVirtual, + VirtualAvail: memInfo.ullAvailVirtual, + } + + return ret, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem.go similarity index 97% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem.go index edaf268bbf4..0da71a98863 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem.go @@ -1,9 +1,10 @@ +// SPDX-License-Identifier: BSD-3-Clause package mem import ( "encoding/json" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) var invoke common.Invoker = common.Invoke{} diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/mem_aix.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_aix.go new file mode 100644 index 00000000000..ac2c39dd382 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_aix.go @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build aix + +package mem + +import ( + "context" + + "github.com/shirou/gopsutil/v4/internal/common" +) + +func VirtualMemory() (*VirtualMemoryStat, error) { + return VirtualMemoryWithContext(context.Background()) +} + +func SwapMemory() (*SwapMemoryStat, error) { + return SwapMemoryWithContext(context.Background()) +} + +func SwapDevices() ([]*SwapDevice, error) { + return nil, common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_cgo.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_aix_cgo.go similarity index 97% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_cgo.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_aix_cgo.go index 67e11dff886..2d03dd0c3f7 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_cgo.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_aix_cgo.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build aix && cgo -// +build aix,cgo package mem diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_nocgo.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_aix_nocgo.go similarity index 83% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_nocgo.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_aix_nocgo.go index cc6a76d2f0d..bc3c0ed3b4a 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_nocgo.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_aix_nocgo.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build aix && !cgo -// +build aix,!cgo package mem @@ -8,11 +8,11 @@ import ( "strconv" "strings" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { - vmem, swap, err := callSVMon(ctx) + vmem, swap, err := callSVMon(ctx, true) if err != nil { return nil, err } @@ -25,7 +25,7 @@ func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { } func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { - _, swap, err := callSVMon(ctx) + _, swap, err := callSVMon(ctx, false) if err != nil { return nil, err } @@ -35,7 +35,7 @@ func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { return swap, nil } -func callSVMon(ctx context.Context) (*VirtualMemoryStat, *SwapMemoryStat, error) { +func callSVMon(ctx context.Context, virt bool) (*VirtualMemoryStat, *SwapMemoryStat, error) { out, err := invoke.CommandWithContext(ctx, "svmon", "-G") if err != nil { return nil, nil, err @@ -45,7 +45,7 @@ func callSVMon(ctx context.Context) (*VirtualMemoryStat, *SwapMemoryStat, error) vmem := &VirtualMemoryStat{} swap := &SwapMemoryStat{} for _, line := range strings.Split(string(out), "\n") { - if strings.HasPrefix(line, "memory") { + if virt && strings.HasPrefix(line, "memory") { p := strings.Fields(line) if len(p) > 2 { if t, err := strconv.ParseUint(p[1], 10, 64); err == nil { diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_bsd.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_bsd.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_bsd.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_bsd.go index ef867d74223..4f3e57c038a 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_bsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_bsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build freebsd || openbsd || netbsd -// +build freebsd openbsd netbsd package mem diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin.go new file mode 100644 index 00000000000..a4c15f6915d --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin.go @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build darwin + +package mem + +import ( + "context" + "fmt" + "unsafe" + + "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v4/internal/common" +) + +func getHwMemsize() (uint64, error) { + total, err := unix.SysctlUint64("hw.memsize") + if err != nil { + return 0, err + } + return total, nil +} + +// xsw_usage in sys/sysctl.h +type swapUsage struct { + Total uint64 + Avail uint64 + Used uint64 + Pagesize int32 + Encrypted bool +} + +// SwapMemory returns swapinfo. +func SwapMemory() (*SwapMemoryStat, error) { + return SwapMemoryWithContext(context.Background()) +} + +func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { + // https://github.com/yanllearnn/go-osstat/blob/ae8a279d26f52ec946a03698c7f50a26cfb427e3/memory/memory_darwin.go + var ret *SwapMemoryStat + + value, err := unix.SysctlRaw("vm.swapusage") + if err != nil { + return ret, err + } + if len(value) != 32 { + return ret, fmt.Errorf("unexpected output of sysctl vm.swapusage: %v (len: %d)", value, len(value)) + } + swap := (*swapUsage)(unsafe.Pointer(&value[0])) + + u := float64(0) + if swap.Total != 0 { + u = ((float64(swap.Total) - float64(swap.Avail)) / float64(swap.Total)) * 100.0 + } + + ret = &SwapMemoryStat{ + Total: swap.Total, + Used: swap.Used, + Free: swap.Avail, + UsedPercent: u, + } + + return ret, nil +} + +func SwapDevices() ([]*SwapDevice, error) { + return SwapDevicesWithContext(context.Background()) +} + +func SwapDevicesWithContext(ctx context.Context) ([]*SwapDevice, error) { + return nil, common.ErrNotImplementedError +} + +type vmStatisticsData struct { + freeCount uint32 + activeCount uint32 + inactiveCount uint32 + wireCount uint32 + _ [44]byte // Not used here +} + +// VirtualMemory returns VirtualmemoryStat. +func VirtualMemory() (*VirtualMemoryStat, error) { + return VirtualMemoryWithContext(context.Background()) +} + +func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { + machLib, err := common.NewLibrary(common.System) + if err != nil { + return nil, err + } + defer machLib.Close() + + hostStatistics := common.GetFunc[common.HostStatisticsFunc](machLib, common.HostStatisticsSym) + machHostSelf := common.GetFunc[common.MachHostSelfFunc](machLib, common.MachHostSelfSym) + + count := uint32(common.HOST_VM_INFO_COUNT) + var vmstat vmStatisticsData + + status := hostStatistics(machHostSelf(), common.HOST_VM_INFO, + uintptr(unsafe.Pointer(&vmstat)), &count) + + if status != common.KERN_SUCCESS { + return nil, fmt.Errorf("host_statistics error=%d", status) + } + + pageSizeAddr, _ := machLib.Dlsym("vm_kernel_page_size") + pageSize := **(**uint64)(unsafe.Pointer(&pageSizeAddr)) + total, err := getHwMemsize() + if err != nil { + return nil, err + } + totalCount := uint32(total / pageSize) + + availableCount := vmstat.inactiveCount + vmstat.freeCount + usedPercent := 100 * float64(totalCount-availableCount) / float64(totalCount) + + usedCount := totalCount - availableCount + + return &VirtualMemoryStat{ + Total: total, + Available: pageSize * uint64(availableCount), + Used: pageSize * uint64(usedCount), + UsedPercent: usedPercent, + Free: pageSize * uint64(vmstat.freeCount), + Active: pageSize * uint64(vmstat.activeCount), + Inactive: pageSize * uint64(vmstat.inactiveCount), + Wired: pageSize * uint64(vmstat.wireCount), + }, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_fallback.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_fallback.go similarity index 86% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_fallback.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_fallback.go index 697fd870963..ba882c8bee9 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_fallback.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_fallback.go @@ -1,12 +1,12 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build !darwin && !linux && !freebsd && !openbsd && !solaris && !windows && !plan9 && !aix && !netbsd -// +build !darwin,!linux,!freebsd,!openbsd,!solaris,!windows,!plan9,!aix,!netbsd package mem import ( "context" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) func VirtualMemory() (*VirtualMemoryStat, error) { diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_freebsd.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_freebsd.go similarity index 97% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_freebsd.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_freebsd.go index 9a56785b319..a6deddebdd8 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_freebsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_freebsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build freebsd -// +build freebsd package mem @@ -8,8 +8,9 @@ import ( "errors" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v4/internal/common" ) func VirtualMemory() (*VirtualMemoryStat, error) { @@ -85,7 +86,6 @@ func SwapMemory() (*SwapMemoryStat, error) { } // Constants from vm/vm_param.h -// nolint: golint const ( XSWDEV_VERSION11 = 1 XSWDEV_VERSION = 2 diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_linux.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_linux.go similarity index 93% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_linux.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_linux.go index 214a91e47f6..05bfdaf4e1a 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_linux.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_linux.go @@ -1,12 +1,11 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build linux -// +build linux package mem import ( "bufio" "context" - "encoding/json" "fmt" "io" "math" @@ -16,22 +15,9 @@ import ( "golang.org/x/sys/unix" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) -type VirtualMemoryExStat struct { - ActiveFile uint64 `json:"activefile"` - InactiveFile uint64 `json:"inactivefile"` - ActiveAnon uint64 `json:"activeanon"` - InactiveAnon uint64 `json:"inactiveanon"` - Unevictable uint64 `json:"unevictable"` -} - -func (v VirtualMemoryExStat) String() string { - s, _ := json.Marshal(v) - return string(s) -} - func VirtualMemory() (*VirtualMemoryStat, error) { return VirtualMemoryWithContext(context.Background()) } @@ -44,19 +30,7 @@ func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { return vm, nil } -func VirtualMemoryEx() (*VirtualMemoryExStat, error) { - return VirtualMemoryExWithContext(context.Background()) -} - -func VirtualMemoryExWithContext(ctx context.Context) (*VirtualMemoryExStat, error) { - _, vmEx, err := fillFromMeminfoWithContext(ctx) - if err != nil { - return nil, err - } - return vmEx, nil -} - -func fillFromMeminfoWithContext(ctx context.Context) (*VirtualMemoryStat, *VirtualMemoryExStat, error) { +func fillFromMeminfoWithContext(ctx context.Context) (*VirtualMemoryStat, *ExVirtualMemory, error) { filename := common.HostProcWithContext(ctx, "meminfo") lines, _ := common.ReadLines(filename) @@ -67,7 +41,7 @@ func fillFromMeminfoWithContext(ctx context.Context) (*VirtualMemoryStat, *Virtu sReclaimable := false // "Sreclaimable:" not available: 2.6.19 / Nov 2006 ret := &VirtualMemoryStat{} - retEx := &VirtualMemoryExStat{} + retEx := &ExVirtualMemory{} for _, line := range lines { fields := strings.Split(line, ":") @@ -409,7 +383,7 @@ func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { // calculateAvailVmem is a fallback under kernel 3.14 where /proc/meminfo does not provide // "MemAvailable:" column. It reimplements an algorithm from the link below // https://github.com/giampaolo/psutil/pull/890 -func calculateAvailVmem(ctx context.Context, ret *VirtualMemoryStat, retEx *VirtualMemoryExStat) uint64 { +func calculateAvailVmem(ctx context.Context, ret *VirtualMemoryStat, retEx *ExVirtualMemory) uint64 { var watermarkLow uint64 fn := common.HostProcWithContext(ctx, "zoneinfo") diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_netbsd.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_netbsd.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_netbsd.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_netbsd.go index d1f54ecaf3f..0a41b3e340e 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_netbsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_netbsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build netbsd -// +build netbsd package mem diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd.go similarity index 96% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd.go index e37d5abe0dd..2510bb0d3aa 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd -// +build openbsd package mem @@ -10,7 +10,7 @@ import ( "errors" "fmt" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" "golang.org/x/sys/unix" ) diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_386.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_386.go similarity index 93% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_386.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_386.go index de2b26ca40a..552e93f4a28 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_386.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_386.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd && 386 -// +build openbsd,386 // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs mem/types_openbsd.go diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_amd64.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_amd64.go similarity index 92% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_amd64.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_amd64.go index d187abf01fa..73e5b72aa67 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_amd64.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_amd64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_openbsd.go diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_arm.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_arm.go similarity index 93% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_arm.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_arm.go index 2488f18517c..57b5861de5b 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_arm.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_arm.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd && arm -// +build openbsd,arm // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs mem/types_openbsd.go diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_arm64.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_arm64.go similarity index 93% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_arm64.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_arm64.go index 3661b16fb1b..f39a6456b73 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_arm64.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_arm64.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd && arm64 -// +build openbsd,arm64 // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs mem/types_openbsd.go diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_riscv64.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_riscv64.go similarity index 94% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_riscv64.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_riscv64.go index 7a7b480384e..f9f838f54ed 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_riscv64.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_riscv64.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd && riscv64 -// +build openbsd,riscv64 // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs mem/types_openbsd.go diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_plan9.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_plan9.go similarity index 95% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_plan9.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_plan9.go index b5259f8446e..c17a102ee62 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_plan9.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_plan9.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build plan9 -// +build plan9 package mem @@ -8,7 +8,7 @@ import ( "os" stats "github.com/lufia/plan9stats" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) func SwapMemory() (*SwapMemoryStat, error) { diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_solaris.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_solaris.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_solaris.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_solaris.go index c911267e1ef..06d0d9a006b 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_solaris.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_solaris.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build solaris -// +build solaris package mem @@ -11,7 +11,7 @@ import ( "strconv" "strings" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" "github.com/tklauser/go-sysconf" ) diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_windows.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_windows.go similarity index 84% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_windows.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_windows.go index 8c7fb1a1353..a94b61f4bb5 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_windows.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_windows.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build windows -// +build windows package mem @@ -9,7 +9,7 @@ import ( "syscall" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" "golang.org/x/sys/windows" ) @@ -77,26 +77,42 @@ func SwapMemory() (*SwapMemoryStat, error) { } func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { + // Use the performance counter to get the swap usage percentage + counter, err := common.NewWin32PerformanceCounter("swap_percentage", `\Paging File(_Total)\% Usage`) + if err != nil { + return nil, err + } + defer common.PdhCloseQuery.Call(uintptr(counter.Query)) + + usedPercent, err := counter.GetValue() + if err != nil { + return nil, err + } + + // Get total memory from performance information var perfInfo performanceInformation perfInfo.cb = uint32(unsafe.Sizeof(perfInfo)) mem, _, _ := procGetPerformanceInfo.Call(uintptr(unsafe.Pointer(&perfInfo)), uintptr(perfInfo.cb)) if mem == 0 { return nil, windows.GetLastError() } - tot := perfInfo.commitLimit * perfInfo.pageSize - used := perfInfo.commitTotal * perfInfo.pageSize - free := tot - used - var usedPercent float64 - if tot == 0 { - usedPercent = 0 + totalPhys := perfInfo.physicalTotal * perfInfo.pageSize + totalSys := perfInfo.commitLimit * perfInfo.pageSize + total := totalSys - totalPhys + + var used uint64 + if total > 0 { + used = uint64(0.01 * usedPercent * float64(total)) } else { - usedPercent = float64(used) / float64(tot) * 100 + usedPercent = 0.0 + used = 0 } + ret := &SwapMemoryStat{ - Total: tot, + Total: total, Used: used, - Free: free, - UsedPercent: usedPercent, + Free: total - used, + UsedPercent: common.Round(usedPercent, 1), } return ret, nil diff --git a/vendor/github.com/shirou/gopsutil/v4/net/net.go b/vendor/github.com/shirou/gopsutil/v4/net/net.go new file mode 100644 index 00000000000..74af54a45d8 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/net/net.go @@ -0,0 +1,356 @@ +// SPDX-License-Identifier: BSD-3-Clause +package net + +import ( + "context" + "encoding/json" + "net" + + "github.com/shirou/gopsutil/v4/internal/common" +) + +var invoke common.Invoker = common.Invoke{} + +type IOCountersStat struct { + Name string `json:"name"` // interface name + BytesSent uint64 `json:"bytesSent"` // number of bytes sent + BytesRecv uint64 `json:"bytesRecv"` // number of bytes received + PacketsSent uint64 `json:"packetsSent"` // number of packets sent + PacketsRecv uint64 `json:"packetsRecv"` // number of packets received + Errin uint64 `json:"errin"` // total number of errors while receiving + Errout uint64 `json:"errout"` // total number of errors while sending + Dropin uint64 `json:"dropin"` // total number of incoming packets which were dropped + Dropout uint64 `json:"dropout"` // total number of outgoing packets which were dropped (always 0 on OSX and BSD) + Fifoin uint64 `json:"fifoin"` // total number of FIFO buffers errors while receiving + Fifoout uint64 `json:"fifoout"` // total number of FIFO buffers errors while sending +} + +// Addr is implemented compatibility to psutil +type Addr struct { + IP string `json:"ip"` + Port uint32 `json:"port"` +} + +type ConnectionStat struct { + Fd uint32 `json:"fd"` + Family uint32 `json:"family"` + Type uint32 `json:"type"` + Laddr Addr `json:"localaddr"` + Raddr Addr `json:"remoteaddr"` + Status string `json:"status"` + Uids []int32 `json:"uids"` + Pid int32 `json:"pid"` +} + +// System wide stats about different network protocols +type ProtoCountersStat struct { + Protocol string `json:"protocol"` + Stats map[string]int64 `json:"stats"` +} + +// NetInterfaceAddr is designed for represent interface addresses +type InterfaceAddr struct { + Addr string `json:"addr"` +} + +// InterfaceAddrList is a list of InterfaceAddr +type InterfaceAddrList []InterfaceAddr + +type InterfaceStat struct { + Index int `json:"index"` + MTU int `json:"mtu"` // maximum transmission unit + Name string `json:"name"` // e.g., "en0", "lo0", "eth0.100" + HardwareAddr string `json:"hardwareAddr"` // IEEE MAC-48, EUI-48 and EUI-64 form + Flags []string `json:"flags"` // e.g., FlagUp, FlagLoopback, FlagMulticast + Addrs InterfaceAddrList `json:"addrs"` +} + +// InterfaceStatList is a list of InterfaceStat +type InterfaceStatList []InterfaceStat + +type FilterStat struct { + ConnTrackCount int64 `json:"connTrackCount"` + ConnTrackMax int64 `json:"connTrackMax"` +} + +// ConntrackStat has conntrack summary info +type ConntrackStat struct { + Entries uint32 `json:"entries"` // Number of entries in the conntrack table + Searched uint32 `json:"searched"` // Number of conntrack table lookups performed + Found uint32 `json:"found"` // Number of searched entries which were successful + New uint32 `json:"new"` // Number of entries added which were not expected before + Invalid uint32 `json:"invalid"` // Number of packets seen which can not be tracked + Ignore uint32 `json:"ignore"` // Packets seen which are already connected to an entry + Delete uint32 `json:"delete"` // Number of entries which were removed + DeleteList uint32 `json:"deleteList"` // Number of entries which were put to dying list + Insert uint32 `json:"insert"` // Number of entries inserted into the list + InsertFailed uint32 `json:"insertFailed"` // # insertion attempted but failed (same entry exists) + Drop uint32 `json:"drop"` // Number of packets dropped due to conntrack failure. + EarlyDrop uint32 `json:"earlyDrop"` // Dropped entries to make room for new ones, if maxsize reached + IcmpError uint32 `json:"icmpError"` // Subset of invalid. Packets that can't be tracked d/t error + ExpectNew uint32 `json:"expectNew"` // Entries added after an expectation was already present + ExpectCreate uint32 `json:"expectCreate"` // Expectations added + ExpectDelete uint32 `json:"expectDelete"` // Expectations deleted + SearchRestart uint32 `json:"searchRestart"` // Conntrack table lookups restarted due to hashtable resizes +} + +func NewConntrackStat(e uint32, s uint32, f uint32, n uint32, inv uint32, ign uint32, del uint32, dlst uint32, ins uint32, insfail uint32, drop uint32, edrop uint32, ie uint32, en uint32, ec uint32, ed uint32, sr uint32) *ConntrackStat { + return &ConntrackStat{ + Entries: e, + Searched: s, + Found: f, + New: n, + Invalid: inv, + Ignore: ign, + Delete: del, + DeleteList: dlst, + Insert: ins, + InsertFailed: insfail, + Drop: drop, + EarlyDrop: edrop, + IcmpError: ie, + ExpectNew: en, + ExpectCreate: ec, + ExpectDelete: ed, + SearchRestart: sr, + } +} + +type ConntrackStatList struct { + items []*ConntrackStat +} + +func NewConntrackStatList() *ConntrackStatList { + return &ConntrackStatList{ + items: []*ConntrackStat{}, + } +} + +func (l *ConntrackStatList) Append(c *ConntrackStat) { + l.items = append(l.items, c) +} + +func (l *ConntrackStatList) Items() []ConntrackStat { + items := make([]ConntrackStat, len(l.items)) + for i, el := range l.items { + items[i] = *el + } + return items +} + +// Summary returns a single-element list with totals from all list items. +func (l *ConntrackStatList) Summary() []ConntrackStat { + summary := NewConntrackStat(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) + for _, cs := range l.items { + summary.Entries += cs.Entries + summary.Searched += cs.Searched + summary.Found += cs.Found + summary.New += cs.New + summary.Invalid += cs.Invalid + summary.Ignore += cs.Ignore + summary.Delete += cs.Delete + summary.DeleteList += cs.DeleteList + summary.Insert += cs.Insert + summary.InsertFailed += cs.InsertFailed + summary.Drop += cs.Drop + summary.EarlyDrop += cs.EarlyDrop + summary.IcmpError += cs.IcmpError + summary.ExpectNew += cs.ExpectNew + summary.ExpectCreate += cs.ExpectCreate + summary.ExpectDelete += cs.ExpectDelete + summary.SearchRestart += cs.SearchRestart + } + return []ConntrackStat{*summary} +} + +func (n IOCountersStat) String() string { + s, _ := json.Marshal(n) + return string(s) +} + +func (n ConnectionStat) String() string { + s, _ := json.Marshal(n) + return string(s) +} + +func (n ProtoCountersStat) String() string { + s, _ := json.Marshal(n) + return string(s) +} + +func (a Addr) String() string { + s, _ := json.Marshal(a) + return string(s) +} + +func (n InterfaceStat) String() string { + s, _ := json.Marshal(n) + return string(s) +} + +func (l InterfaceStatList) String() string { + s, _ := json.Marshal(l) + return string(s) +} + +func (n InterfaceAddr) String() string { + s, _ := json.Marshal(n) + return string(s) +} + +func (n ConntrackStat) String() string { + s, _ := json.Marshal(n) + return string(s) +} + +func Interfaces() (InterfaceStatList, error) { + return InterfacesWithContext(context.Background()) +} + +func InterfacesWithContext(ctx context.Context) (InterfaceStatList, error) { + is, err := net.Interfaces() + if err != nil { + return nil, err + } + ret := make(InterfaceStatList, 0, len(is)) + for _, ifi := range is { + + var flags []string + if ifi.Flags&net.FlagUp != 0 { + flags = append(flags, "up") + } + if ifi.Flags&net.FlagBroadcast != 0 { + flags = append(flags, "broadcast") + } + if ifi.Flags&net.FlagLoopback != 0 { + flags = append(flags, "loopback") + } + if ifi.Flags&net.FlagPointToPoint != 0 { + flags = append(flags, "pointtopoint") + } + if ifi.Flags&net.FlagMulticast != 0 { + flags = append(flags, "multicast") + } + + r := InterfaceStat{ + Index: ifi.Index, + Name: ifi.Name, + MTU: ifi.MTU, + HardwareAddr: ifi.HardwareAddr.String(), + Flags: flags, + } + addrs, err := ifi.Addrs() + if err == nil { + r.Addrs = make(InterfaceAddrList, 0, len(addrs)) + for _, addr := range addrs { + r.Addrs = append(r.Addrs, InterfaceAddr{ + Addr: addr.String(), + }) + } + + } + ret = append(ret, r) + } + + return ret, nil +} + +func getIOCountersAll(n []IOCountersStat) ([]IOCountersStat, error) { + r := IOCountersStat{ + Name: "all", + } + for _, nic := range n { + r.BytesRecv += nic.BytesRecv + r.PacketsRecv += nic.PacketsRecv + r.Errin += nic.Errin + r.Dropin += nic.Dropin + r.BytesSent += nic.BytesSent + r.PacketsSent += nic.PacketsSent + r.Errout += nic.Errout + r.Dropout += nic.Dropout + } + + return []IOCountersStat{r}, nil +} + +// NetIOCounters returns network I/O statistics for every network +// interface installed on the system. If pernic argument is false, +// return only sum of all information (which name is 'all'). If true, +// every network interface installed on the system is returned +// separately. +func IOCounters(pernic bool) ([]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), pernic) +} + +func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) { + return IOCountersByFileWithContext(context.Background(), pernic, filename) +} + +// ProtoCounters returns network statistics for the entire system +// If protocols is empty then all protocols are returned, otherwise +// just the protocols in the list are returned. +// Available protocols: +// [ip,icmp,icmpmsg,tcp,udp,udplite] +// Not Implemented for FreeBSD, Windows, OpenBSD, Darwin +func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { + return ProtoCountersWithContext(context.Background(), protocols) +} + +// NetFilterCounters returns iptables conntrack statistics +// the currently in use conntrack count and the max. +// If the file does not exist or is invalid it will return nil. +func FilterCounters() ([]FilterStat, error) { + return FilterCountersWithContext(context.Background()) +} + +// ConntrackStats returns more detailed info about the conntrack table +func ConntrackStats(percpu bool) ([]ConntrackStat, error) { + return ConntrackStatsWithContext(context.Background(), percpu) +} + +// Return a list of network connections opened. +func Connections(kind string) ([]ConnectionStat, error) { + return ConnectionsWithContext(context.Background(), kind) +} + +// Return a list of network connections opened returning at most `max` +// connections for each running process. +func ConnectionsMax(kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsMaxWithContext(context.Background(), kind, maxConn) +} + +// Return a list of network connections opened, omitting `Uids`. +// WithoutUids functions are reliant on implementation details. They may be altered to be an alias for Connections or be +// removed from the API in the future. +func ConnectionsWithoutUids(kind string) ([]ConnectionStat, error) { + return ConnectionsWithoutUidsWithContext(context.Background(), kind) +} + +// Return a list of network connections opened by a process. +func ConnectionsPid(kind string, pid int32) ([]ConnectionStat, error) { + return ConnectionsPidWithContext(context.Background(), kind, pid) +} + +// Return a list of network connections opened, omitting `Uids`. +// WithoutUids functions are reliant on implementation details. They may be altered to be an alias for Connections or be +// removed from the API in the future. +func ConnectionsPidWithoutUids(kind string, pid int32) ([]ConnectionStat, error) { + return ConnectionsPidWithoutUidsWithContext(context.Background(), kind, pid) +} + +func ConnectionsPidMaxWithoutUids(kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(context.Background(), kind, pid, maxConn) +} + +// Return up to `max` network connections opened by a process. +func ConnectionsPidMax(kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithContext(context.Background(), kind, pid, maxConn) +} + +// Pids retunres all pids. +// Note: this is a copy of process_linux.Pids() +// FIXME: Import process occures import cycle. +// move to common made other platform breaking. Need consider. +func Pids() ([]int32, error) { + return PidsWithContext(context.Background()) +} diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_aix.go b/vendor/github.com/shirou/gopsutil/v4/net/net_aix.go similarity index 76% rename from vendor/github.com/shirou/gopsutil/v3/net/net_aix.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_aix.go index 81feaa8d7a0..9df01e31800 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_aix.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_aix.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build aix -// +build aix package net @@ -11,42 +11,26 @@ import ( "strings" "syscall" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) -func IOCounters(pernic bool) ([]IOCountersStat, error) { - return IOCountersWithContext(context.Background(), pernic) -} - -// IOCountersByFile exists just for compatibility with Linux. -func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) { - return IOCountersByFileWithContext(context.Background(), pernic, filename) +// Deprecated: use process.PidsWithContext instead +func PidsWithContext(ctx context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError } func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) { return IOCounters(pernic) } -func FilterCounters() ([]FilterStat, error) { - return FilterCountersWithContext(context.Background()) -} - func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { return nil, common.ErrNotImplementedError } -func ConntrackStats(percpu bool) ([]ConntrackStat, error) { - return ConntrackStatsWithContext(context.Background(), percpu) -} - func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackStat, error) { return nil, common.ErrNotImplementedError } -func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { - return ProtoCountersWithContext(context.Background(), protocols) -} - func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { return nil, common.ErrNotImplementedError } @@ -117,7 +101,7 @@ func parseNetstatAddr(local string, remote string, family uint32) (laddr Addr, r return Addr{}, fmt.Errorf("unknown family, %d", family) } } - lport, err := strconv.Atoi(port) + lport, err := strconv.ParseInt(port, 10, 32) if err != nil { return Addr{}, err } @@ -250,10 +234,6 @@ func parseNetstatA(output string, kind string) ([]ConnectionStat, error) { return ret, nil } -func Connections(kind string) ([]ConnectionStat, error) { - return ConnectionsWithContext(context.Background(), kind) -} - func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { args := []string{"-na"} switch strings.ToLower(kind) { @@ -286,45 +266,34 @@ func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, return ret, nil } -func ConnectionsMax(kind string, max int) ([]ConnectionStat, error) { - return ConnectionsMaxWithContext(context.Background(), kind, max) -} - -func ConnectionsMaxWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { - return []ConnectionStat{}, common.ErrNotImplementedError -} - -// Return a list of network connections opened, omitting `Uids`. -// WithoutUids functions are reliant on implementation details. They may be altered to be an alias for Connections or be -// removed from the API in the future. -func ConnectionsWithoutUids(kind string) ([]ConnectionStat, error) { - return ConnectionsWithoutUidsWithContext(context.Background(), kind) +func ConnectionsMaxWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithContext(ctx, kind, 0, maxConn) } func ConnectionsWithoutUidsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { return ConnectionsMaxWithoutUidsWithContext(ctx, kind, 0) } -func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, max) +func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, maxConn) } -func ConnectionsPidWithoutUids(kind string, pid int32) ([]ConnectionStat, error) { - return ConnectionsPidWithoutUidsWithContext(context.Background(), kind, pid) +func ConnectionsPidWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithContext(ctx, kind, pid, 0) } func ConnectionsPidWithoutUidsWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, 0) } -func ConnectionsPidMaxWithoutUids(kind string, pid int32, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(context.Background(), kind, pid, max) +func ConnectionsPidMaxWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, maxConn, false) } -func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { - return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, max) +func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, maxConn, true) } -func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { +func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, maxConn int, skipUids bool) ([]ConnectionStat, error) { return []ConnectionStat{}, common.ErrNotImplementedError } diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_aix_cgo.go b/vendor/github.com/shirou/gopsutil/v4/net/net_aix_cgo.go similarity index 95% rename from vendor/github.com/shirou/gopsutil/v3/net/net_aix_cgo.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_aix_cgo.go index 8c34f881c0b..a45a5b75cce 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_aix_cgo.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_aix_cgo.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build aix && cgo -// +build aix,cgo package net diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_aix_nocgo.go b/vendor/github.com/shirou/gopsutil/v4/net/net_aix_nocgo.go similarity index 95% rename from vendor/github.com/shirou/gopsutil/v3/net/net_aix_nocgo.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_aix_nocgo.go index e3fce9021ce..f63a21e73bc 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_aix_nocgo.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_aix_nocgo.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build aix && !cgo -// +build aix,!cgo package net @@ -9,7 +9,7 @@ import ( "strconv" "strings" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) func parseNetstatI(output string) ([]IOCountersStat, error) { diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_darwin.go b/vendor/github.com/shirou/gopsutil/v4/net/net_darwin.go similarity index 84% rename from vendor/github.com/shirou/gopsutil/v3/net/net_darwin.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_darwin.go index 8a7b6374438..86c541e93e0 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_darwin.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_darwin.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build darwin -// +build darwin package net @@ -12,7 +12,7 @@ import ( "strconv" "strings" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) var ( @@ -143,8 +143,8 @@ func newMapInterfaceNameUsage(ifaces []netstatInterface) mapInterfaceNameUsage { return output } -func (min mapInterfaceNameUsage) isTruncated() bool { - for _, usage := range min { +func (mapi mapInterfaceNameUsage) isTruncated() bool { + for _, usage := range mapi { if usage > 1 { return true } @@ -152,9 +152,9 @@ func (min mapInterfaceNameUsage) isTruncated() bool { return false } -func (min mapInterfaceNameUsage) notTruncated() []string { +func (mapi mapInterfaceNameUsage) notTruncated() []string { output := make([]string, 0) - for ifaceName, usage := range min { + for ifaceName, usage := range mapi { if usage == 1 { output = append(output, ifaceName) } @@ -162,15 +162,16 @@ func (min mapInterfaceNameUsage) notTruncated() []string { return output } +// Deprecated: use process.PidsWithContext instead +func PidsWithContext(ctx context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError +} + // example of `netstat -ibdnW` output on yosemite // Name Mtu Network Address Ipkts Ierrs Ibytes Opkts Oerrs Obytes Coll Drop // lo0 16384 869107 0 169411755 869107 0 169411755 0 0 // lo0 16384 ::1/128 ::1 869107 - 169411755 869107 - 169411755 - - // lo0 16384 127 127.0.0.1 869107 - 169411755 869107 - 169411755 - - -func IOCounters(pernic bool) ([]IOCountersStat, error) { - return IOCountersWithContext(context.Background(), pernic) -} - func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { var ( ret []IOCountersStat @@ -247,45 +248,24 @@ func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, } } - if pernic == false { + if !pernic { return getIOCountersAll(ret) } return ret, nil } -// IOCountersByFile exists just for compatibility with Linux. -func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) { - return IOCountersByFileWithContext(context.Background(), pernic, filename) -} - func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) { return IOCountersWithContext(ctx, pernic) } -func FilterCounters() ([]FilterStat, error) { - return FilterCountersWithContext(context.Background()) -} - func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { return nil, common.ErrNotImplementedError } -func ConntrackStats(percpu bool) ([]ConntrackStat, error) { - return ConntrackStatsWithContext(context.Background(), percpu) -} - func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackStat, error) { return nil, common.ErrNotImplementedError } -// ProtoCounters returns network statistics for the entire system -// If protocols is empty then all protocols are returned, otherwise -// just the protocols in the list are returned. -// Not Implemented for Darwin -func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { - return ProtoCountersWithContext(context.Background(), protocols) -} - func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { return nil, common.ErrNotImplementedError } diff --git a/vendor/github.com/shirou/gopsutil/v4/net/net_fallback.go b/vendor/github.com/shirou/gopsutil/v4/net/net_fallback.go new file mode 100644 index 00000000000..7d7596a86c3 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_fallback.go @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build !aix && !darwin && !linux && !freebsd && !openbsd && !windows && !solaris + +package net + +import ( + "context" + + "github.com/shirou/gopsutil/v4/internal/common" +) + +func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { + return []IOCountersStat{}, common.ErrNotImplementedError +} + +func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) { + return IOCountersWithContext(ctx, pernic) +} + +func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { + return nil, common.ErrNotImplementedError +} + +func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackStat, error) { + return nil, common.ErrNotImplementedError +} + +func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { + return nil, common.ErrNotImplementedError +} + +// Deprecated: use process.PidsWithContext instead +func PidsWithContext(ctx context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError +} + +func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { + return []ConnectionStat{}, common.ErrNotImplementedError +} + +func ConnectionsMaxWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithContext(ctx, kind, 0, maxConn) +} + +func ConnectionsWithoutUidsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { + return ConnectionsMaxWithoutUidsWithContext(ctx, kind, 0) +} + +func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, maxConn) +} + +func ConnectionsPidWithoutUidsWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, 0) +} + +func ConnectionsPidWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithContext(ctx, kind, pid, 0) +} + +func ConnectionsPidMaxWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, maxConn, false) +} + +func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, maxConn, true) +} + +func connectionsPidMaxWithoutUidsWithContext(_ context.Context, _ string, _ int32, _ int, _ bool) ([]ConnectionStat, error) { + return []ConnectionStat{}, common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/v4/net/net_freebsd.go b/vendor/github.com/shirou/gopsutil/v4/net/net_freebsd.go new file mode 100644 index 00000000000..655e13373d9 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_freebsd.go @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build freebsd + +package net + +import ( + "context" + "strconv" + "strings" + + "github.com/shirou/gopsutil/v4/internal/common" +) + +// Deprecated: use process.PidsWithContext instead +func PidsWithContext(ctx context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError +} + +func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { + out, err := invoke.CommandWithContext(ctx, "netstat", "-ibdnW") + if err != nil { + return nil, err + } + + lines := strings.Split(string(out), "\n") + ret := make([]IOCountersStat, 0, len(lines)-1) + exists := make([]string, 0, len(ret)) + + for _, line := range lines { + values := strings.Fields(line) + if len(values) < 1 || values[0] == "Name" { + continue + } + if common.StringsHas(exists, values[0]) { + // skip if already get + continue + } + exists = append(exists, values[0]) + + if len(values) < 12 { + continue + } + base := 1 + // sometimes Address is omitted + if len(values) < 13 { + base = 0 + } + + parsed := make([]uint64, 0, 8) + vv := []string{ + values[base+3], // PacketsRecv + values[base+4], // Errin + values[base+5], // Dropin + values[base+6], // BytesRecvn + values[base+7], // PacketSent + values[base+8], // Errout + values[base+9], // BytesSent + values[base+11], // Dropout + } + for _, target := range vv { + if target == "-" { + parsed = append(parsed, 0) + continue + } + + t, err := strconv.ParseUint(target, 10, 64) + if err != nil { + return nil, err + } + parsed = append(parsed, t) + } + + n := IOCountersStat{ + Name: values[0], + PacketsRecv: parsed[0], + Errin: parsed[1], + Dropin: parsed[2], + BytesRecv: parsed[3], + PacketsSent: parsed[4], + Errout: parsed[5], + BytesSent: parsed[6], + Dropout: parsed[7], + } + ret = append(ret, n) + } + + if !pernic { + return getIOCountersAll(ret) + } + + return ret, nil +} + +func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) { + return IOCountersWithContext(ctx, pernic) +} + +func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { + return nil, common.ErrNotImplementedError +} + +func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackStat, error) { + return nil, common.ErrNotImplementedError +} + +func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { + return nil, common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_linux.go b/vendor/github.com/shirou/gopsutil/v4/net/net_linux.go similarity index 76% rename from vendor/github.com/shirou/gopsutil/v3/net/net_linux.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_linux.go index 20ca5470a49..23113fea774 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_linux.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_linux.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build linux -// +build linux package net @@ -16,7 +16,7 @@ import ( "strings" "syscall" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) const ( // Conntrack Column numbers @@ -39,24 +39,11 @@ const ( // Conntrack Column numbers ctSEARCH_RESTART ) -// NetIOCounters returns network I/O statistics for every network -// interface installed on the system. If pernic argument is false, -// return only sum of all information (which name is 'all'). If true, -// every network interface installed on the system is returned -// separately. -func IOCounters(pernic bool) ([]IOCountersStat, error) { - return IOCountersWithContext(context.Background(), pernic) -} - func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { filename := common.HostProcWithContext(ctx, "net/dev") return IOCountersByFileWithContext(ctx, pernic, filename) } -func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) { - return IOCountersByFileWithContext(context.Background(), pernic, filename) -} - func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) { lines, err := common.ReadLines(filename) if err != nil { @@ -156,15 +143,6 @@ var netProtocols = []string{ "udplite", } -// ProtoCounters returns network statistics for the entire system -// If protocols is empty then all protocols are returned, otherwise -// just the protocols in the list are returned. -// Available protocols: -// [ip,icmp,icmpmsg,tcp,udp,udplite] -func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { - return ProtoCountersWithContext(context.Background(), protocols) -} - func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { if len(protocols) == 0 { protocols = netProtocols @@ -221,13 +199,6 @@ func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoC return stats, nil } -// NetFilterCounters returns iptables conntrack statistics -// the currently in use conntrack count and the max. -// If the file does not exist or is invalid it will return nil. -func FilterCounters() ([]FilterStat, error) { - return FilterCountersWithContext(context.Background()) -} - func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { countfile := common.HostProcWithContext(ctx, "sys/net/netfilter/nf_conntrack_count") maxfile := common.HostProcWithContext(ctx, "sys/net/netfilter/nf_conntrack_max") @@ -238,25 +209,20 @@ func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { } stats := make([]FilterStat, 0, 1) - max, err := common.ReadInts(maxfile) + maxConn, err := common.ReadInts(maxfile) if err != nil { return nil, err } payload := FilterStat{ ConnTrackCount: count[0], - ConnTrackMax: max[0], + ConnTrackMax: maxConn[0], } stats = append(stats, payload) return stats, nil } -// ConntrackStats returns more detailed info about the conntrack table -func ConntrackStats(percpu bool) ([]ConntrackStat, error) { - return ConntrackStatsWithContext(context.Background(), percpu) -} - // ConntrackStatsWithContext returns more detailed info about the conntrack table func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackStat, error) { return conntrackStatsFromFile(common.HostProcWithContext(ctx, "net/stat/nf_conntrack"), percpu) @@ -385,47 +351,20 @@ type connTmp struct { path string } -// Return a list of network connections opened. -func Connections(kind string) ([]ConnectionStat, error) { - return ConnectionsWithContext(context.Background(), kind) -} - func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { return ConnectionsPidWithContext(ctx, kind, 0) } -// Return a list of network connections opened returning at most `max` -// connections for each running process. -func ConnectionsMax(kind string, max int) ([]ConnectionStat, error) { - return ConnectionsMaxWithContext(context.Background(), kind, max) -} - -func ConnectionsMaxWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithContext(ctx, kind, 0, max) -} - -// Return a list of network connections opened, omitting `Uids`. -// WithoutUids functions are reliant on implementation details. They may be altered to be an alias for Connections or be -// removed from the API in the future. -func ConnectionsWithoutUids(kind string) ([]ConnectionStat, error) { - return ConnectionsWithoutUidsWithContext(context.Background(), kind) +func ConnectionsMaxWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithContext(ctx, kind, 0, maxConn) } func ConnectionsWithoutUidsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { return ConnectionsMaxWithoutUidsWithContext(ctx, kind, 0) } -func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, max) -} - -// Return a list of network connections opened by a process. -func ConnectionsPid(kind string, pid int32) ([]ConnectionStat, error) { - return ConnectionsPidWithContext(context.Background(), kind, pid) -} - -func ConnectionsPidWithoutUids(kind string, pid int32) ([]ConnectionStat, error) { - return ConnectionsPidWithoutUidsWithContext(context.Background(), kind, pid) +func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, maxConn) } func ConnectionsPidWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { @@ -436,24 +375,15 @@ func ConnectionsPidWithoutUidsWithContext(ctx context.Context, kind string, pid return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, 0) } -// Return up to `max` network connections opened by a process. -func ConnectionsPidMax(kind string, pid int32, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithContext(context.Background(), kind, pid, max) +func ConnectionsPidMaxWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, maxConn, false) } -func ConnectionsPidMaxWithoutUids(kind string, pid int32, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(context.Background(), kind, pid, max) +func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, maxConn, true) } -func ConnectionsPidMaxWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { - return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, max, false) -} - -func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { - return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, max, true) -} - -func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int, skipUids bool) ([]ConnectionStat, error) { +func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, maxConn int, skipUids bool) ([]ConnectionStat, error) { tmap, ok := netConnectionKindMap[kind] if !ok { return nil, fmt.Errorf("invalid kind, %s", kind) @@ -462,9 +392,9 @@ func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, p var err error var inodes map[string][]inodeMap if pid == 0 { - inodes, err = getProcInodesAllWithContext(ctx, root, max) + inodes, err = getProcInodesAllWithContext(ctx, root, maxConn) } else { - inodes, err = getProcInodes(root, pid, max) + inodes, err = getProcInodes(root, pid, maxConn) if len(inodes) == 0 { // no connection for the pid return []ConnectionStat{}, nil @@ -476,10 +406,6 @@ func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, p return statsFromInodesWithContext(ctx, root, pid, tmap, inodes, skipUids) } -func statsFromInodes(root string, pid int32, tmap []netConnectionKindType, inodes map[string][]inodeMap, skipUids bool) ([]ConnectionStat, error) { - return statsFromInodesWithContext(context.Background(), root, pid, tmap, inodes, skipUids) -} - func statsFromInodesWithContext(ctx context.Context, root string, pid int32, tmap []netConnectionKindType, inodes map[string][]inodeMap, skipUids bool) ([]ConnectionStat, error) { dupCheckMap := make(map[string]struct{}) var ret []ConnectionStat @@ -496,7 +422,7 @@ func statsFromInodesWithContext(ctx context.Context, root string, pid int32, tma } switch t.family { case syscall.AF_INET, syscall.AF_INET6: - ls, err = processInetWithContext(ctx, path, t, inodes, pid) + ls, err = processInet(path, t, inodes, pid) case syscall.AF_UNIX: ls, err = processUnix(path, t, inodes, pid) } @@ -543,7 +469,7 @@ func statsFromInodesWithContext(ctx context.Context, root string, pid int32, tma } // getProcInodes returns fd of the pid. -func getProcInodes(root string, pid int32, max int) (map[string][]inodeMap, error) { +func getProcInodes(root string, pid int32, maxConn int) (map[string][]inodeMap, error) { ret := make(map[string][]inodeMap) dir := fmt.Sprintf("%s/%d/fd", root, pid) @@ -552,7 +478,7 @@ func getProcInodes(root string, pid int32, max int) (map[string][]inodeMap, erro return ret, err } defer f.Close() - dirEntries, err := readDir(f, max) + dirEntries, err := f.ReadDir(maxConn) if err != nil { return ret, err } @@ -573,7 +499,7 @@ func getProcInodes(root string, pid int32, max int) (map[string][]inodeMap, erro if !ok { ret[inode] = make([]inodeMap, 0) } - fd, err := strconv.Atoi(dirEntry.Name()) + fd, err := strconv.ParseInt(dirEntry.Name(), 10, 32) if err != nil { continue } @@ -587,14 +513,6 @@ func getProcInodes(root string, pid int32, max int) (map[string][]inodeMap, erro return ret, nil } -// Pids retunres all pids. -// Note: this is a copy of process_linux.Pids() -// FIXME: Import process occures import cycle. -// move to common made other platform breaking. Need consider. -func Pids() ([]int32, error) { - return PidsWithContext(context.Background()) -} - func PidsWithContext(ctx context.Context) ([]int32, error) { var ret []int32 @@ -668,11 +586,7 @@ func (p *process) fillFromStatus(ctx context.Context) error { return nil } -func getProcInodesAll(root string, max int) (map[string][]inodeMap, error) { - return getProcInodesAllWithContext(context.Background(), root, max) -} - -func getProcInodesAllWithContext(ctx context.Context, root string, max int) (map[string][]inodeMap, error) { +func getProcInodesAllWithContext(ctx context.Context, root string, maxConn int) (map[string][]inodeMap, error) { pids, err := PidsWithContext(ctx) if err != nil { return nil, err @@ -680,7 +594,7 @@ func getProcInodesAllWithContext(ctx context.Context, root string, max int) (map ret := make(map[string][]inodeMap) for _, pid := range pids { - t, err := getProcInodes(root, pid, max) + t, err := getProcInodes(root, pid, maxConn) if err != nil { // skip if permission error or no longer exists if os.IsPermission(err) || os.IsNotExist(err) || errors.Is(err, io.EOF) { @@ -702,10 +616,6 @@ func getProcInodesAllWithContext(ctx context.Context, root string, max int) (map // "0500000A:0016" -> "10.0.0.5", 22 // "0085002452100113070057A13F025401:0035" -> "2400:8500:1301:1052:a157:7:154:23f", 53 func decodeAddress(family uint32, src string) (Addr, error) { - return decodeAddressWithContext(context.Background(), family, src) -} - -func decodeAddressWithContext(ctx context.Context, family uint32, src string) (Addr, error) { t := strings.Split(src, ":") if len(t) != 2 { return Addr{}, fmt.Errorf("does not contain port, %s", src) @@ -723,12 +633,12 @@ func decodeAddressWithContext(ctx context.Context, family uint32, src string) (A if family == syscall.AF_INET { if common.IsLittleEndian() { - ip = net.IP(ReverseWithContext(ctx, decoded)) + ip = net.IP(Reverse(decoded)) } else { ip = net.IP(decoded) } } else { // IPv6 - ip, err = parseIPv6HexStringWithContext(ctx, decoded) + ip, err = parseIPv6HexString(decoded) if err != nil { return Addr{}, err } @@ -739,12 +649,7 @@ func decodeAddressWithContext(ctx context.Context, family uint32, src string) (A }, nil } -// Reverse reverses array of bytes. func Reverse(s []byte) []byte { - return ReverseWithContext(context.Background(), s) -} - -func ReverseWithContext(ctx context.Context, s []byte) []byte { for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { s[i], s[j] = s[j], s[i] } @@ -753,27 +658,19 @@ func ReverseWithContext(ctx context.Context, s []byte) []byte { // parseIPv6HexString parse array of bytes to IPv6 string func parseIPv6HexString(src []byte) (net.IP, error) { - return parseIPv6HexStringWithContext(context.Background(), src) -} - -func parseIPv6HexStringWithContext(ctx context.Context, src []byte) (net.IP, error) { if len(src) != 16 { - return nil, fmt.Errorf("invalid IPv6 string") + return nil, errors.New("invalid IPv6 string") } buf := make([]byte, 0, 16) for i := 0; i < len(src); i += 4 { - r := ReverseWithContext(ctx, src[i:i+4]) + r := Reverse(src[i : i+4]) buf = append(buf, r...) } return net.IP(buf), nil } func processInet(file string, kind netConnectionKindType, inodes map[string][]inodeMap, filterPid int32) ([]connTmp, error) { - return processInetWithContext(context.Background(), file, kind, inodes, filterPid) -} - -func processInetWithContext(ctx context.Context, file string, kind netConnectionKindType, inodes map[string][]inodeMap, filterPid int32) ([]connTmp, error) { if strings.HasSuffix(file, "6") && !common.PathExists(file) { // IPv6 not supported, return empty. return []connTmp{}, nil @@ -816,11 +713,11 @@ func processInetWithContext(ctx context.Context, file string, kind netConnection } else { status = "NONE" } - la, err := decodeAddressWithContext(ctx, kind.family, laddr) + la, err := decodeAddress(kind.family, laddr) if err != nil { continue } - ra, err := decodeAddressWithContext(ctx, kind.family, raddr) + ra, err := decodeAddress(kind.family, raddr) if err != nil { continue } @@ -858,7 +755,7 @@ func processUnix(file string, kind netConnectionKindType, inodes map[string][]in if len(tokens) < 6 { continue } - st, err := strconv.Atoi(tokens[4]) + st, err := strconv.ParseInt(tokens[4], 10, 32) if err != nil { return nil, err } diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_openbsd.go b/vendor/github.com/shirou/gopsutil/v4/net/net_openbsd.go similarity index 78% rename from vendor/github.com/shirou/gopsutil/v3/net/net_openbsd.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_openbsd.go index 25bbe49ca1f..a90d5904ab2 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_openbsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_openbsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd -// +build openbsd package net @@ -12,13 +12,14 @@ import ( "strings" "syscall" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) var portMatch = regexp.MustCompile(`(.*)\.(\d+)$`) func ParseNetstat(output string, mode string, - iocs map[string]IOCountersStat) error { + iocs map[string]IOCountersStat, +) error { lines := strings.Split(output, "\n") exists := make([]string, 0, len(lines)-1) @@ -96,7 +97,7 @@ func ParseNetstat(output string, mode string, n.PacketsSent = parsed[2] n.Dropout = parsed[3] case "ine": - n.Errin = parsed[0] + n.Errin = parsed[0] n.Errout = parsed[1] } @@ -105,8 +106,9 @@ func ParseNetstat(output string, mode string, return nil } -func IOCounters(pernic bool) ([]IOCountersStat, error) { - return IOCountersWithContext(context.Background(), pernic) +// Deprecated: use process.PidsWithContext instead +func PidsWithContext(ctx context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError } func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { @@ -155,39 +157,18 @@ func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, return ret, nil } -// IOCountersByFile exists just for compatibility with Linux. -func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) { - return IOCountersByFileWithContext(context.Background(), pernic, filename) -} - func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) { return IOCounters(pernic) } -func FilterCounters() ([]FilterStat, error) { - return FilterCountersWithContext(context.Background()) -} - func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { return nil, common.ErrNotImplementedError } -func ConntrackStats(percpu bool) ([]ConntrackStat, error) { - return ConntrackStatsWithContext(context.Background(), percpu) -} - func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackStat, error) { return nil, common.ErrNotImplementedError } -// ProtoCounters returns network statistics for the entire system -// If protocols is empty then all protocols are returned, otherwise -// just the protocols in the list are returned. -// Not Implemented for OpenBSD -func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { - return ProtoCountersWithContext(context.Background(), protocols) -} - func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { return nil, common.ErrNotImplementedError } @@ -254,7 +235,7 @@ func parseNetstatAddr(local string, remote string, family uint32) (laddr Addr, r return Addr{}, fmt.Errorf("unknown family, %d", family) } } - lport, err := strconv.Atoi(port) + lport, err := strconv.ParseInt(port, 10, 32) if err != nil { return Addr{}, err } @@ -272,11 +253,6 @@ func parseNetstatAddr(local string, remote string, family uint32) (laddr Addr, r return laddr, raddr, err } -// Return a list of network connections opened. -func Connections(kind string) ([]ConnectionStat, error) { - return ConnectionsWithContext(context.Background(), kind) -} - func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { var ret []ConnectionStat @@ -333,3 +309,35 @@ func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, return ret, nil } + +func ConnectionsPidWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { + return nil, common.ErrNotImplementedError +} + +func ConnectionsMaxWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return nil, common.ErrNotImplementedError +} + +func ConnectionsPidMaxWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return nil, common.ErrNotImplementedError +} + +func ConnectionsWithoutUidsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { + return ConnectionsMaxWithoutUidsWithContext(ctx, kind, 0) +} + +func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, maxConn) +} + +func ConnectionsPidWithoutUidsWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, 0) +} + +func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, maxConn) +} + +func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return nil, common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/v4/net/net_solaris.go b/vendor/github.com/shirou/gopsutil/v4/net/net_solaris.go new file mode 100644 index 00000000000..83eb1d0f771 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_solaris.go @@ -0,0 +1,168 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build solaris + +package net + +import ( + "context" + "fmt" + "regexp" + "runtime" + "strconv" + "strings" + + "github.com/shirou/gopsutil/v4/internal/common" +) + +var kstatSplit = regexp.MustCompile(`[:\s]+`) + +func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { + // collect all the net class's links with below statistics + filterstr := "/^(?!vnic)/::phys:/^rbytes64$|^ipackets64$|^idrops64$|^ierrors$|^obytes64$|^opackets64$|^odrops64$|^oerrors$/" + if runtime.GOOS == "illumos" { + filterstr = "/[^vnic]/::mac:/^rbytes64$|^ipackets64$|^idrops64$|^ierrors$|^obytes64$|^opackets64$|^odrops64$|^oerrors$/" + } + kstatSysOut, err := invoke.CommandWithContext(ctx, "kstat", "-c", "net", "-p", filterstr) + if err != nil { + return nil, fmt.Errorf("cannot execute kstat: %w", err) + } + + lines := strings.Split(strings.TrimSpace(string(kstatSysOut)), "\n") + if len(lines) == 0 { + return nil, fmt.Errorf("no interface found") + } + rbytes64arr := make(map[string]uint64) + ipackets64arr := make(map[string]uint64) + idrops64arr := make(map[string]uint64) + ierrorsarr := make(map[string]uint64) + obytes64arr := make(map[string]uint64) + opackets64arr := make(map[string]uint64) + odrops64arr := make(map[string]uint64) + oerrorsarr := make(map[string]uint64) + + for _, line := range lines { + fields := kstatSplit.Split(line, -1) + interfaceName := fields[0] + instance := fields[1] + switch fields[3] { + case "rbytes64": + rbytes64arr[interfaceName+instance], err = strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, fmt.Errorf("cannot parse rbytes64: %w", err) + } + case "ipackets64": + ipackets64arr[interfaceName+instance], err = strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, fmt.Errorf("cannot parse ipackets64: %w", err) + } + case "idrops64": + idrops64arr[interfaceName+instance], err = strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, fmt.Errorf("cannot parse idrops64: %w", err) + } + case "ierrors": + ierrorsarr[interfaceName+instance], err = strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, fmt.Errorf("cannot parse ierrors: %w", err) + } + case "obytes64": + obytes64arr[interfaceName+instance], err = strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, fmt.Errorf("cannot parse obytes64: %w", err) + } + case "opackets64": + opackets64arr[interfaceName+instance], err = strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, fmt.Errorf("cannot parse opackets64: %w", err) + } + case "odrops64": + odrops64arr[interfaceName+instance], err = strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, fmt.Errorf("cannot parse odrops64: %w", err) + } + case "oerrors": + oerrorsarr[interfaceName+instance], err = strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, fmt.Errorf("cannot parse oerrors: %w", err) + } + } + } + ret := make([]IOCountersStat, 0) + for k := range rbytes64arr { + nic := IOCountersStat{ + Name: k, + BytesRecv: rbytes64arr[k], + PacketsRecv: ipackets64arr[k], + Errin: ierrorsarr[k], + Dropin: idrops64arr[k], + BytesSent: obytes64arr[k], + PacketsSent: opackets64arr[k], + Errout: oerrorsarr[k], + Dropout: odrops64arr[k], + } + ret = append(ret, nic) + } + + if !pernic { + return getIOCountersAll(ret) + } + + return ret, nil +} + +func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) { + return IOCountersWithContext(ctx, pernic) +} + +func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { + return nil, common.ErrNotImplementedError +} + +func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackStat, error) { + return nil, common.ErrNotImplementedError +} + +func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { + return nil, common.ErrNotImplementedError +} + +// Deprecated: use process.PidsWithContext instead +func PidsWithContext(ctx context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError +} + +func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { + return []ConnectionStat{}, common.ErrNotImplementedError +} + +func ConnectionsMaxWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithContext(ctx, kind, 0, maxConn) +} + +func ConnectionsWithoutUidsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { + return ConnectionsMaxWithoutUidsWithContext(ctx, kind, 0) +} + +func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, maxConn) +} + +func ConnectionsPidWithoutUidsWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, 0) +} + +func ConnectionsPidWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithContext(ctx, kind, pid, 0) +} + +func ConnectionsPidMaxWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, maxConn, false) +} + +func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, maxConn, true) +} + +func connectionsPidMaxWithoutUidsWithContext(_ context.Context, _ string, _ int32, _ int, _ bool) ([]ConnectionStat, error) { + return []ConnectionStat{}, common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/v4/net/net_unix.go b/vendor/github.com/shirou/gopsutil/v4/net/net_unix.go new file mode 100644 index 00000000000..6eb7e2fc521 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_unix.go @@ -0,0 +1,188 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build freebsd || darwin + +package net + +import ( + "context" + "fmt" + "net" + "strconv" + "strings" + "syscall" + + "github.com/shirou/gopsutil/v4/internal/common" +) + +func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { + return ConnectionsPidWithContext(ctx, kind, 0) +} + +func ConnectionsMaxWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return []ConnectionStat{}, common.ErrNotImplementedError +} + +func ConnectionsPidWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { + var ret []ConnectionStat + + args := []string{"-i"} + switch strings.ToLower(kind) { + default: + fallthrough + case "": + fallthrough + case "all": + fallthrough + case "inet": + args = append(args, "tcp", "-i", "udp") + case "inet4": + args = append(args, "4") + case "inet6": + args = append(args, "6") + case "tcp": + args = append(args, "tcp") + case "tcp4": + args = append(args, "4tcp") + case "tcp6": + args = append(args, "6tcp") + case "udp": + args = append(args, "udp") + case "udp4": + args = append(args, "4udp") + case "udp6": + args = append(args, "6udp") + case "unix": + args = []string{"-U"} + } + + r, err := common.CallLsofWithContext(ctx, invoke, pid, args...) + if err != nil { + return nil, err + } + for _, rr := range r { + if strings.HasPrefix(rr, "COMMAND") { + continue + } + n, err := parseNetLine(rr) + if err != nil { + continue + } + + ret = append(ret, n) + } + + return ret, nil +} + +var constMap = map[string]int{ + "unix": syscall.AF_UNIX, + "TCP": syscall.SOCK_STREAM, + "UDP": syscall.SOCK_DGRAM, + "IPv4": syscall.AF_INET, + "IPv6": syscall.AF_INET6, +} + +func parseNetLine(line string) (ConnectionStat, error) { + f := strings.Fields(line) + if len(f) < 8 { + return ConnectionStat{}, fmt.Errorf("wrong line,%s", line) + } + + if len(f) == 8 { + f = append(f, f[7]) + f[7] = "unix" + } + + pid, err := strconv.ParseInt(f[1], 10, 32) + if err != nil { + return ConnectionStat{}, err + } + fd, err := strconv.ParseInt(strings.Trim(f[3], "u"), 10, 32) + if err != nil { + return ConnectionStat{}, fmt.Errorf("unknown fd, %s", f[3]) + } + netFamily, ok := constMap[f[4]] + if !ok { + return ConnectionStat{}, fmt.Errorf("unknown family, %s", f[4]) + } + netType, ok := constMap[f[7]] + if !ok { + return ConnectionStat{}, fmt.Errorf("unknown type, %s", f[7]) + } + + var laddr, raddr Addr + if f[7] == "unix" { + laddr.IP = f[8] + } else { + laddr, raddr, err = parseNetAddr(f[8]) + if err != nil { + return ConnectionStat{}, fmt.Errorf("failed to parse netaddr, %s", f[8]) + } + } + + n := ConnectionStat{ + Fd: uint32(fd), + Family: uint32(netFamily), + Type: uint32(netType), + Laddr: laddr, + Raddr: raddr, + Pid: int32(pid), + } + if len(f) == 10 { + n.Status = strings.Trim(f[9], "()") + } + + return n, nil +} + +func parseNetAddr(line string) (laddr Addr, raddr Addr, err error) { + parse := func(l string) (Addr, error) { + host, port, err := net.SplitHostPort(l) + if err != nil { + return Addr{}, fmt.Errorf("wrong addr, %s", l) + } + lport, err := strconv.ParseInt(port, 10, 32) + if err != nil { + return Addr{}, err + } + return Addr{IP: host, Port: uint32(lport)}, nil + } + + addrs := strings.Split(line, "->") + if len(addrs) == 0 { + return laddr, raddr, fmt.Errorf("wrong netaddr, %s", line) + } + laddr, err = parse(addrs[0]) + if len(addrs) == 2 { // remote addr exists + raddr, err = parse(addrs[1]) + if err != nil { + return laddr, raddr, err + } + } + + return laddr, raddr, err +} + +func ConnectionsPidMaxWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return []ConnectionStat{}, common.ErrNotImplementedError +} + +func ConnectionsWithoutUidsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { + return ConnectionsMaxWithoutUidsWithContext(ctx, kind, 0) +} + +func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, maxConn) +} + +func ConnectionsPidWithoutUidsWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, 0) +} + +func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, maxConn) +} + +func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return []ConnectionStat{}, common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_windows.go b/vendor/github.com/shirou/gopsutil/v4/net/net_windows.go similarity index 86% rename from vendor/github.com/shirou/gopsutil/v3/net/net_windows.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_windows.go index 5d384342f87..00ebf89ef1a 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_windows.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_windows.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build windows -// +build windows package net @@ -11,7 +11,7 @@ import ( "syscall" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" "golang.org/x/sys/windows" ) @@ -136,10 +136,6 @@ type mibIfRow2 struct { OutQLen uint64 } -func IOCounters(pernic bool) ([]IOCountersStat, error) { - return IOCountersWithContext(context.Background(), pernic) -} - func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { ifs, err := net.Interfaces() if err != nil { @@ -200,32 +196,14 @@ func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, return counters, nil } -// IOCountersByFile exists just for compatibility with Linux. -func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) { - return IOCountersByFileWithContext(context.Background(), pernic, filename) -} - func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) { return IOCounters(pernic) } -// Return a list of network connections -// Available kind: -// -// reference to netConnectionKindMap -func Connections(kind string) ([]ConnectionStat, error) { - return ConnectionsWithContext(context.Background(), kind) -} - func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { return ConnectionsPidWithContext(ctx, kind, 0) } -// ConnectionsPid Return a list of network connections opened by a process -func ConnectionsPid(kind string, pid int32) ([]ConnectionStat, error) { - return ConnectionsPidWithContext(context.Background(), kind, pid) -} - func ConnectionsPidWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { tmap, ok := netConnectionKindMap[kind] if !ok { @@ -277,75 +255,47 @@ func getNetStatWithKind(kindType netConnectionKindType) ([]ConnectionStat, error return nil, fmt.Errorf("invalid kind filename, %s", kindType.filename) } -// Return a list of network connections opened returning at most `max` -// connections for each running process. -func ConnectionsMax(kind string, max int) ([]ConnectionStat, error) { - return ConnectionsMaxWithContext(context.Background(), kind, max) -} - -func ConnectionsMaxWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { - return []ConnectionStat{}, common.ErrNotImplementedError +// Deprecated: use process.PidsWithContext instead +func PidsWithContext(ctx context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError } -// Return a list of network connections opened, omitting `Uids`. -// WithoutUids functions are reliant on implementation details. They may be altered to be an alias for Connections or be -// removed from the API in the future. -func ConnectionsWithoutUids(kind string) ([]ConnectionStat, error) { - return ConnectionsWithoutUidsWithContext(context.Background(), kind) +func ConnectionsMaxWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithContext(ctx, kind, 0, maxConn) } func ConnectionsWithoutUidsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { return ConnectionsMaxWithoutUidsWithContext(ctx, kind, 0) } -func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, max) -} - -func ConnectionsPidWithoutUids(kind string, pid int32) ([]ConnectionStat, error) { - return ConnectionsPidWithoutUidsWithContext(context.Background(), kind, pid) +func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, maxConn) } func ConnectionsPidWithoutUidsWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, 0) } -func ConnectionsPidMaxWithoutUids(kind string, pid int32, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(context.Background(), kind, pid, max) +func ConnectionsPidMaxWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, maxConn, false) } -func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { - return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, max) +func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, maxConn, true) } -func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { +func connectionsPidMaxWithoutUidsWithContext(_ context.Context, _ string, _ int32, _ int, _ bool) ([]ConnectionStat, error) { return []ConnectionStat{}, common.ErrNotImplementedError } -func FilterCounters() ([]FilterStat, error) { - return FilterCountersWithContext(context.Background()) -} - func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { return nil, common.ErrNotImplementedError } -func ConntrackStats(percpu bool) ([]ConntrackStat, error) { - return ConntrackStatsWithContext(context.Background(), percpu) -} - func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackStat, error) { return nil, common.ErrNotImplementedError } -// ProtoCounters returns network statistics for the entire system -// If protocols is empty then all protocols are returned, otherwise -// just the protocols in the list are returned. -// Not Implemented for Windows -func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { - return ProtoCountersWithContext(context.Background(), protocols) -} - func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { return nil, common.ErrNotImplementedError } diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process.go b/vendor/github.com/shirou/gopsutil/v4/process/process.go new file mode 100644 index 00000000000..70411c61644 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/process/process.go @@ -0,0 +1,640 @@ +// SPDX-License-Identifier: BSD-3-Clause +package process + +import ( + "context" + "encoding/json" + "errors" + "runtime" + "sort" + "sync" + "time" + + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/mem" + "github.com/shirou/gopsutil/v4/net" +) + +var ( + invoke common.Invoker = common.Invoke{} + ErrorNoChildren = errors.New("process does not have children") // Deprecated: ErrorNoChildren is never returned by process.Children(), check its returned []*Process slice length instead + ErrorProcessNotRunning = errors.New("process does not exist") + ErrorNotPermitted = errors.New("operation not permitted") +) + +type Process struct { + Pid int32 `json:"pid"` + name string + status string + parent int32 + parentMutex sync.RWMutex // for windows ppid cache + numCtxSwitches *NumCtxSwitchesStat + uids []uint32 + gids []uint32 + groups []uint32 + numThreads int32 + memInfo *MemoryInfoStat + sigInfo *SignalInfoStat + createTime int64 + + lastCPUTimes *cpu.TimesStat + lastCPUTime time.Time + + tgid int32 +} + +// Process status +const ( + // Running marks a task a running or runnable (on the run queue) + Running = "running" + // Blocked marks a task waiting on a short, uninterruptible operation (usually I/O) + Blocked = "blocked" + // Idle marks a task sleeping for more than about 20 seconds + Idle = "idle" + // Lock marks a task waiting to acquire a lock + Lock = "lock" + // Sleep marks task waiting for short, interruptible operation + Sleep = "sleep" + // Stop marks a stopped process + Stop = "stop" + // Wait marks an idle interrupt thread (or paging in pre 2.6.xx Linux) + Wait = "wait" + // Zombie marks a defunct process, terminated but not reaped by its parent + Zombie = "zombie" + + // Solaris states. See https://github.com/collectd/collectd/blob/1da3305c10c8ff9a63081284cf3d4bb0f6daffd8/src/processes.c#L2115 + Daemon = "daemon" + Detached = "detached" + System = "system" + Orphan = "orphan" + + UnknownState = "" +) + +type OpenFilesStat struct { + Path string `json:"path"` + Fd uint64 `json:"fd"` +} + +type MemoryInfoStat struct { + RSS uint64 `json:"rss"` // bytes + VMS uint64 `json:"vms"` // bytes + HWM uint64 `json:"hwm"` // bytes + Data uint64 `json:"data"` // bytes + Stack uint64 `json:"stack"` // bytes + Locked uint64 `json:"locked"` // bytes + Swap uint64 `json:"swap"` // bytes +} + +type SignalInfoStat struct { + PendingProcess uint64 `json:"pending_process"` + PendingThread uint64 `json:"pending_thread"` + Blocked uint64 `json:"blocked"` + Ignored uint64 `json:"ignored"` + Caught uint64 `json:"caught"` +} + +type RlimitStat struct { + Resource int32 `json:"resource"` + Soft uint64 `json:"soft"` + Hard uint64 `json:"hard"` + Used uint64 `json:"used"` +} + +type IOCountersStat struct { + // ReadCount is a number of read I/O operations such as syscalls. + ReadCount uint64 `json:"readCount"` + // WriteCount is a number of read I/O operations such as syscalls. + WriteCount uint64 `json:"writeCount"` + // ReadBytes is a number of all I/O read in bytes. This includes disk I/O on Linux and Windows. + ReadBytes uint64 `json:"readBytes"` + // WriteBytes is a number of all I/O write in bytes. This includes disk I/O on Linux and Windows. + WriteBytes uint64 `json:"writeBytes"` + // DiskReadBytes is a number of disk I/O write in bytes. Currently only Linux has this value. + DiskReadBytes uint64 `json:"diskReadBytes"` + // DiskWriteBytes is a number of disk I/O read in bytes. Currently only Linux has this value. + DiskWriteBytes uint64 `json:"diskWriteBytes"` +} + +type NumCtxSwitchesStat struct { + Voluntary int64 `json:"voluntary"` + Involuntary int64 `json:"involuntary"` +} + +type PageFaultsStat struct { + MinorFaults uint64 `json:"minorFaults"` + MajorFaults uint64 `json:"majorFaults"` + ChildMinorFaults uint64 `json:"childMinorFaults"` + ChildMajorFaults uint64 `json:"childMajorFaults"` +} + +// Resource limit constants are from /usr/include/x86_64-linux-gnu/bits/resource.h +// from libc6-dev package in Ubuntu 16.10 +const ( + RLIMIT_CPU int32 = 0 + RLIMIT_FSIZE int32 = 1 + RLIMIT_DATA int32 = 2 + RLIMIT_STACK int32 = 3 + RLIMIT_CORE int32 = 4 + RLIMIT_RSS int32 = 5 + RLIMIT_NPROC int32 = 6 + RLIMIT_NOFILE int32 = 7 + RLIMIT_MEMLOCK int32 = 8 + RLIMIT_AS int32 = 9 + RLIMIT_LOCKS int32 = 10 + RLIMIT_SIGPENDING int32 = 11 + RLIMIT_MSGQUEUE int32 = 12 + RLIMIT_NICE int32 = 13 + RLIMIT_RTPRIO int32 = 14 + RLIMIT_RTTIME int32 = 15 +) + +func (p Process) String() string { + s, _ := json.Marshal(p) + return string(s) +} + +func (o OpenFilesStat) String() string { + s, _ := json.Marshal(o) + return string(s) +} + +func (m MemoryInfoStat) String() string { + s, _ := json.Marshal(m) + return string(s) +} + +func (r RlimitStat) String() string { + s, _ := json.Marshal(r) + return string(s) +} + +func (i IOCountersStat) String() string { + s, _ := json.Marshal(i) + return string(s) +} + +func (p NumCtxSwitchesStat) String() string { + s, _ := json.Marshal(p) + return string(s) +} + +var enableBootTimeCache bool + +// EnableBootTimeCache change cache behavior of BootTime. If true, cache BootTime value. Default is false. +func EnableBootTimeCache(enable bool) { + enableBootTimeCache = enable +} + +// Pids returns a slice of process ID list which are running now. +func Pids() ([]int32, error) { + return PidsWithContext(context.Background()) +} + +func PidsWithContext(ctx context.Context) ([]int32, error) { + pids, err := pidsWithContext(ctx) + sort.Slice(pids, func(i, j int) bool { return pids[i] < pids[j] }) + return pids, err +} + +// Processes returns a slice of pointers to Process structs for all +// currently running processes. +func Processes() ([]*Process, error) { + return ProcessesWithContext(context.Background()) +} + +// NewProcess creates a new Process instance, it only stores the pid and +// checks that the process exists. Other method on Process can be used +// to get more information about the process. An error will be returned +// if the process does not exist. +func NewProcess(pid int32) (*Process, error) { + return NewProcessWithContext(context.Background(), pid) +} + +func NewProcessWithContext(ctx context.Context, pid int32) (*Process, error) { + p := &Process{ + Pid: pid, + } + + exists, err := PidExistsWithContext(ctx, pid) + if err != nil { + return p, err + } + if !exists { + return p, ErrorProcessNotRunning + } + p.CreateTimeWithContext(ctx) + return p, nil +} + +func PidExists(pid int32) (bool, error) { + return PidExistsWithContext(context.Background(), pid) +} + +// Background returns true if the process is in background, false otherwise. +func (p *Process) Background() (bool, error) { + return p.BackgroundWithContext(context.Background()) +} + +func (p *Process) BackgroundWithContext(ctx context.Context) (bool, error) { + fg, err := p.ForegroundWithContext(ctx) + if err != nil { + return false, err + } + return !fg, err +} + +// If interval is 0, return difference from last call(non-blocking). +// If interval > 0, wait interval sec and return difference between start and end. +func (p *Process) Percent(interval time.Duration) (float64, error) { + return p.PercentWithContext(context.Background(), interval) +} + +func (p *Process) PercentWithContext(ctx context.Context, interval time.Duration) (float64, error) { + cpuTimes, err := p.TimesWithContext(ctx) + if err != nil { + return 0, err + } + now := time.Now() + + if interval > 0 { + p.lastCPUTimes = cpuTimes + p.lastCPUTime = now + if err := common.Sleep(ctx, interval); err != nil { + return 0, err + } + cpuTimes, err = p.TimesWithContext(ctx) + now = time.Now() + if err != nil { + return 0, err + } + } else { + if p.lastCPUTimes == nil { + // invoked first time + p.lastCPUTimes = cpuTimes + p.lastCPUTime = now + return 0, nil + } + } + + numcpu := runtime.NumCPU() + delta := (now.Sub(p.lastCPUTime).Seconds()) * float64(numcpu) + ret := calculatePercent(p.lastCPUTimes, cpuTimes, delta, numcpu) + p.lastCPUTimes = cpuTimes + p.lastCPUTime = now + return ret, nil +} + +// IsRunning returns whether the process is still running or not. +func (p *Process) IsRunning() (bool, error) { + return p.IsRunningWithContext(context.Background()) +} + +func (p *Process) IsRunningWithContext(ctx context.Context) (bool, error) { + createTime, err := p.CreateTimeWithContext(ctx) + if err != nil { + return false, err + } + p2, err := NewProcessWithContext(ctx, p.Pid) + if errors.Is(err, ErrorProcessNotRunning) { + return false, nil + } + createTime2, err := p2.CreateTimeWithContext(ctx) + if err != nil { + return false, err + } + return createTime == createTime2, nil +} + +// CreateTime returns created time of the process in milliseconds since the epoch, in UTC. +func (p *Process) CreateTime() (int64, error) { + return p.CreateTimeWithContext(context.Background()) +} + +func (p *Process) CreateTimeWithContext(ctx context.Context) (int64, error) { + if p.createTime != 0 { + return p.createTime, nil + } + createTime, err := p.createTimeWithContext(ctx) + p.createTime = createTime + return p.createTime, err +} + +func calculatePercent(t1, t2 *cpu.TimesStat, delta float64, numcpu int) float64 { + if delta == 0 { + return 0 + } + // https://github.com/giampaolo/psutil/blob/c034e6692cf736b5e87d14418a8153bb03f6cf42/psutil/__init__.py#L1064 + delta_proc := (t2.User - t1.User) + (t2.System - t1.System) + if delta_proc <= 0 { + return 0 + } + overall_percent := ((delta_proc / delta) * 100) * float64(numcpu) + return overall_percent +} + +// MemoryPercent returns how many percent of the total RAM this process uses +func (p *Process) MemoryPercent() (float32, error) { + return p.MemoryPercentWithContext(context.Background()) +} + +func (p *Process) MemoryPercentWithContext(ctx context.Context) (float32, error) { + machineMemory, err := mem.VirtualMemoryWithContext(ctx) + if err != nil { + return 0, err + } + total := machineMemory.Total + + processMemory, err := p.MemoryInfoWithContext(ctx) + if err != nil { + return 0, err + } + used := processMemory.RSS + + return (100 * float32(used) / float32(total)), nil +} + +// CPUPercent returns how many percent of the CPU time this process uses +func (p *Process) CPUPercent() (float64, error) { + return p.CPUPercentWithContext(context.Background()) +} + +func (p *Process) CPUPercentWithContext(ctx context.Context) (float64, error) { + crt_time, err := p.createTimeWithContext(ctx) + if err != nil { + return 0, err + } + + cput, err := p.TimesWithContext(ctx) + if err != nil { + return 0, err + } + + created := time.Unix(0, crt_time*int64(time.Millisecond)) + totalTime := time.Since(created).Seconds() + if totalTime <= 0 { + return 0, nil + } + + return 100 * cput.Total() / totalTime, nil +} + +// Groups returns all group IDs(include supplementary groups) of the process as a slice of the int +func (p *Process) Groups() ([]uint32, error) { + return p.GroupsWithContext(context.Background()) +} + +// Ppid returns Parent Process ID of the process. +func (p *Process) Ppid() (int32, error) { + return p.PpidWithContext(context.Background()) +} + +// Name returns name of the process. +func (p *Process) Name() (string, error) { + return p.NameWithContext(context.Background()) +} + +// Exe returns executable path of the process. +func (p *Process) Exe() (string, error) { + return p.ExeWithContext(context.Background()) +} + +// Cmdline returns the command line arguments of the process as a string with +// each argument separated by 0x20 ascii character. +func (p *Process) Cmdline() (string, error) { + return p.CmdlineWithContext(context.Background()) +} + +// CmdlineSlice returns the command line arguments of the process as a slice with each +// element being an argument. +func (p *Process) CmdlineSlice() ([]string, error) { + return p.CmdlineSliceWithContext(context.Background()) +} + +// Cwd returns current working directory of the process. +func (p *Process) Cwd() (string, error) { + return p.CwdWithContext(context.Background()) +} + +// Parent returns parent Process of the process. +func (p *Process) Parent() (*Process, error) { + return p.ParentWithContext(context.Background()) +} + +// ParentWithContext returns parent Process of the process. +func (p *Process) ParentWithContext(ctx context.Context) (*Process, error) { + ppid, err := p.PpidWithContext(ctx) + if err != nil { + return nil, err + } + return NewProcessWithContext(ctx, ppid) +} + +// Status returns the process status. +// Return value could be one of these. +// R: Running S: Sleep T: Stop I: Idle +// Z: Zombie W: Wait L: Lock +// The character is same within all supported platforms. +func (p *Process) Status() ([]string, error) { + return p.StatusWithContext(context.Background()) +} + +// Foreground returns true if the process is in foreground, false otherwise. +func (p *Process) Foreground() (bool, error) { + return p.ForegroundWithContext(context.Background()) +} + +// Uids returns user ids of the process as a slice of the int +func (p *Process) Uids() ([]uint32, error) { + return p.UidsWithContext(context.Background()) +} + +// Gids returns group ids of the process as a slice of the int +func (p *Process) Gids() ([]uint32, error) { + return p.GidsWithContext(context.Background()) +} + +// Terminal returns a terminal which is associated with the process. +func (p *Process) Terminal() (string, error) { + return p.TerminalWithContext(context.Background()) +} + +// Nice returns a nice value (priority). +func (p *Process) Nice() (int32, error) { + return p.NiceWithContext(context.Background()) +} + +// IOnice returns process I/O nice value (priority). +func (p *Process) IOnice() (int32, error) { + return p.IOniceWithContext(context.Background()) +} + +// Rlimit returns Resource Limits. +func (p *Process) Rlimit() ([]RlimitStat, error) { + return p.RlimitWithContext(context.Background()) +} + +// RlimitUsage returns Resource Limits. +// If gatherUsed is true, the currently used value will be gathered and added +// to the resulting RlimitStat. +func (p *Process) RlimitUsage(gatherUsed bool) ([]RlimitStat, error) { + return p.RlimitUsageWithContext(context.Background(), gatherUsed) +} + +// IOCounters returns IO Counters. +func (p *Process) IOCounters() (*IOCountersStat, error) { + return p.IOCountersWithContext(context.Background()) +} + +// NumCtxSwitches returns the number of the context switches of the process. +func (p *Process) NumCtxSwitches() (*NumCtxSwitchesStat, error) { + return p.NumCtxSwitchesWithContext(context.Background()) +} + +// NumFDs returns the number of File Descriptors used by the process. +func (p *Process) NumFDs() (int32, error) { + return p.NumFDsWithContext(context.Background()) +} + +// NumThreads returns the number of threads used by the process. +func (p *Process) NumThreads() (int32, error) { + return p.NumThreadsWithContext(context.Background()) +} + +func (p *Process) Threads() (map[int32]*cpu.TimesStat, error) { + return p.ThreadsWithContext(context.Background()) +} + +// Times returns CPU times of the process. +func (p *Process) Times() (*cpu.TimesStat, error) { + return p.TimesWithContext(context.Background()) +} + +// CPUAffinity returns CPU affinity of the process. +func (p *Process) CPUAffinity() ([]int32, error) { + return p.CPUAffinityWithContext(context.Background()) +} + +// MemoryInfo returns generic process memory information, +// such as RSS and VMS. +func (p *Process) MemoryInfo() (*MemoryInfoStat, error) { + return p.MemoryInfoWithContext(context.Background()) +} + +// MemoryInfoEx returns platform-specific process memory information. +func (p *Process) MemoryInfoEx() (*MemoryInfoExStat, error) { + return p.MemoryInfoExWithContext(context.Background()) +} + +// PageFaults returns the process's page fault counters. +func (p *Process) PageFaults() (*PageFaultsStat, error) { + return p.PageFaultsWithContext(context.Background()) +} + +// Children returns the children of the process represented as a slice +// of pointers to Process type. +func (p *Process) Children() ([]*Process, error) { + return p.ChildrenWithContext(context.Background()) +} + +// OpenFiles returns a slice of OpenFilesStat opend by the process. +// OpenFilesStat includes a file path and file descriptor. +func (p *Process) OpenFiles() ([]OpenFilesStat, error) { + return p.OpenFilesWithContext(context.Background()) +} + +// Connections returns a slice of net.ConnectionStat used by the process. +// This returns all kind of the connection. This means TCP, UDP or UNIX. +func (p *Process) Connections() ([]net.ConnectionStat, error) { + return p.ConnectionsWithContext(context.Background()) +} + +// ConnectionsMax returns a slice of net.ConnectionStat used by the process at most `max`. +func (p *Process) ConnectionsMax(maxConn int) ([]net.ConnectionStat, error) { + return p.ConnectionsMaxWithContext(context.Background(), maxConn) +} + +// MemoryMaps get memory maps from /proc/(pid)/smaps +func (p *Process) MemoryMaps(grouped bool) (*[]MemoryMapsStat, error) { + return p.MemoryMapsWithContext(context.Background(), grouped) +} + +// Tgid returns thread group id of the process. +func (p *Process) Tgid() (int32, error) { + return p.TgidWithContext(context.Background()) +} + +// SendSignal sends a unix.Signal to the process. +func (p *Process) SendSignal(sig Signal) error { + return p.SendSignalWithContext(context.Background(), sig) +} + +// Suspend sends SIGSTOP to the process. +func (p *Process) Suspend() error { + return p.SuspendWithContext(context.Background()) +} + +// Resume sends SIGCONT to the process. +func (p *Process) Resume() error { + return p.ResumeWithContext(context.Background()) +} + +// Terminate sends SIGTERM to the process. +func (p *Process) Terminate() error { + return p.TerminateWithContext(context.Background()) +} + +// Kill sends SIGKILL to the process. +func (p *Process) Kill() error { + return p.KillWithContext(context.Background()) +} + +// Username returns a username of the process. +func (p *Process) Username() (string, error) { + return p.UsernameWithContext(context.Background()) +} + +// Environ returns the environment variables of the process. +func (p *Process) Environ() ([]string, error) { + return p.EnvironWithContext(context.Background()) +} + +// convertStatusChar as reported by the ps command across different platforms. +func convertStatusChar(letter string) string { + // Sources + // Darwin: http://www.mywebuniversity.com/Man_Pages/Darwin/man_ps.html + // FreeBSD: https://www.freebsd.org/cgi/man.cgi?ps + // Linux https://man7.org/linux/man-pages/man1/ps.1.html + // OpenBSD: https://man.openbsd.org/ps.1#state + // Solaris: https://github.com/collectd/collectd/blob/1da3305c10c8ff9a63081284cf3d4bb0f6daffd8/src/processes.c#L2115 + switch letter { + case "A": + return Daemon + case "D", "U": + return Blocked + case "E": + return Detached + case "I": + return Idle + case "L": + return Lock + case "O": + return Orphan + case "R": + return Running + case "S": + return Sleep + case "T", "t": + // "t" is used by Linux to signal stopped by the debugger during tracing + return Stop + case "W": + return Wait + case "Y": + return System + case "Z": + return Zombie + default: + return UnknownState + } +} diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_bsd.go b/vendor/github.com/shirou/gopsutil/v4/process/process_bsd.go similarity index 94% rename from vendor/github.com/shirou/gopsutil/v3/process/process_bsd.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_bsd.go index 263829ffae5..dcc056101a0 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_bsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_bsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build darwin || freebsd || openbsd -// +build darwin freebsd openbsd package process @@ -8,8 +8,8 @@ import ( "context" "encoding/binary" - "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" ) type MemoryInfoExStat struct{} diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_darwin.go b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin.go new file mode 100644 index 00000000000..33abc10ac35 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin.go @@ -0,0 +1,480 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build darwin + +package process + +import ( + "bytes" + "context" + "encoding/binary" + "fmt" + "path/filepath" + "runtime" + "sort" + "strconv" + "strings" + "unsafe" + + "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/net" +) + +// copied from sys/sysctl.h +const ( + CTLKern = 1 // "high kernel": proc, limits + KernProc = 14 // struct: process entries + KernProcPID = 1 // by process id + KernProcProc = 8 // only return procs + KernProcAll = 0 // everything + KernProcPathname = 12 // path to executable +) + +type _Ctype_struct___0 struct { + Pad uint64 +} + +func pidsWithContext(ctx context.Context) ([]int32, error) { + var ret []int32 + + kprocs, err := unix.SysctlKinfoProcSlice("kern.proc.all") + if err != nil { + return ret, err + } + + for _, proc := range kprocs { + ret = append(ret, int32(proc.Proc.P_pid)) + } + + return ret, nil +} + +func (p *Process) PpidWithContext(ctx context.Context) (int32, error) { + k, err := p.getKProc() + if err != nil { + return 0, err + } + + return k.Eproc.Ppid, nil +} + +func (p *Process) NameWithContext(ctx context.Context) (string, error) { + k, err := p.getKProc() + if err != nil { + return "", err + } + + name := common.ByteToString(k.Proc.P_comm[:]) + + if len(name) >= 15 { + cmdName, err := p.cmdNameWithContext(ctx) + if err != nil { + return "", err + } + if len(cmdName) > 0 { + extendedName := filepath.Base(cmdName) + if strings.HasPrefix(extendedName, p.name) { + name = extendedName + } + } + } + + return name, nil +} + +func (p *Process) createTimeWithContext(ctx context.Context) (int64, error) { + k, err := p.getKProc() + if err != nil { + return 0, err + } + + return k.Proc.P_starttime.Sec*1000 + int64(k.Proc.P_starttime.Usec)/1000, nil +} + +func (p *Process) StatusWithContext(ctx context.Context) ([]string, error) { + r, err := callPsWithContext(ctx, "state", p.Pid, false, false) + if err != nil { + return []string{""}, err + } + status := convertStatusChar(r[0][0][0:1]) + return []string{status}, err +} + +func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { + // see https://github.com/shirou/gopsutil/issues/596#issuecomment-432707831 for implementation details + pid := p.Pid + out, err := invoke.CommandWithContext(ctx, "ps", "-o", "stat=", "-p", strconv.Itoa(int(pid))) + if err != nil { + return false, err + } + return strings.IndexByte(string(out), '+') != -1, nil +} + +func (p *Process) UidsWithContext(ctx context.Context) ([]uint32, error) { + k, err := p.getKProc() + if err != nil { + return nil, err + } + + // See: http://unix.superglobalmegacorp.com/Net2/newsrc/sys/ucred.h.html + userEffectiveUID := uint32(k.Eproc.Ucred.Uid) + + return []uint32{userEffectiveUID}, nil +} + +func (p *Process) GidsWithContext(ctx context.Context) ([]uint32, error) { + k, err := p.getKProc() + if err != nil { + return nil, err + } + + gids := make([]uint32, 0, 3) + gids = append(gids, uint32(k.Eproc.Pcred.P_rgid), uint32(k.Eproc.Pcred.P_rgid), uint32(k.Eproc.Pcred.P_svgid)) + + return gids, nil +} + +func (p *Process) GroupsWithContext(ctx context.Context) ([]uint32, error) { + return nil, common.ErrNotImplementedError + // k, err := p.getKProc() + // if err != nil { + // return nil, err + // } + + // groups := make([]int32, k.Eproc.Ucred.Ngroups) + // for i := int16(0); i < k.Eproc.Ucred.Ngroups; i++ { + // groups[i] = int32(k.Eproc.Ucred.Groups[i]) + // } + + // return groups, nil +} + +func (p *Process) TerminalWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError + /* + k, err := p.getKProc() + if err != nil { + return "", err + } + + ttyNr := uint64(k.Eproc.Tdev) + termmap, err := getTerminalMap() + if err != nil { + return "", err + } + + return termmap[ttyNr], nil + */ +} + +func (p *Process) NiceWithContext(ctx context.Context) (int32, error) { + k, err := p.getKProc() + if err != nil { + return 0, err + } + return int32(k.Proc.P_nice), nil +} + +func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { + procs, err := ProcessesWithContext(ctx) + if err != nil { + return nil, nil + } + ret := make([]*Process, 0, len(procs)) + for _, proc := range procs { + ppid, err := proc.PpidWithContext(ctx) + if err != nil { + continue + } + if ppid == p.Pid { + ret = append(ret, proc) + } + } + sort.Slice(ret, func(i, j int) bool { return ret[i].Pid < ret[j].Pid }) + return ret, nil +} + +func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionStat, error) { + return net.ConnectionsPidWithContext(ctx, "all", p.Pid) +} + +func (p *Process) ConnectionsMaxWithContext(ctx context.Context, maxConn int) ([]net.ConnectionStat, error) { + return net.ConnectionsPidMaxWithContext(ctx, "all", p.Pid, maxConn) +} + +func ProcessesWithContext(ctx context.Context) ([]*Process, error) { + out := []*Process{} + + pids, err := PidsWithContext(ctx) + if err != nil { + return out, err + } + + for _, pid := range pids { + p, err := NewProcessWithContext(ctx, pid) + if err != nil { + continue + } + out = append(out, p) + } + + return out, nil +} + +// Returns a proc as defined here: +// http://unix.superglobalmegacorp.com/Net2/newsrc/sys/kinfo_proc.h.html +func (p *Process) getKProc() (*unix.KinfoProc, error) { + return unix.SysctlKinfoProc("kern.proc.pid", int(p.Pid)) +} + +// call ps command. +// Return value deletes Header line(you must not input wrong arg). +// And splited by Space. Caller have responsibility to manage. +// If passed arg pid is 0, get information from all process. +func callPsWithContext(ctx context.Context, arg string, pid int32, threadOption bool, nameOption bool) ([][]string, error) { + var cmd []string + if pid == 0 { // will get from all processes. + cmd = []string{"-ax", "-o", arg} + } else if threadOption { + cmd = []string{"-x", "-o", arg, "-M", "-p", strconv.Itoa(int(pid))} + } else { + cmd = []string{"-x", "-o", arg, "-p", strconv.Itoa(int(pid))} + } + if nameOption { + cmd = append(cmd, "-c") + } + out, err := invoke.CommandWithContext(ctx, "ps", cmd...) + if err != nil { + return [][]string{}, err + } + lines := strings.Split(string(out), "\n") + + var ret [][]string + for _, l := range lines[1:] { + var lr []string + if nameOption { + lr = append(lr, l) + } else { + for _, r := range strings.Split(l, " ") { + if r == "" { + continue + } + lr = append(lr, strings.TrimSpace(r)) + } + } + if len(lr) != 0 { + ret = append(ret, lr) + } + } + + return ret, nil +} + +var ( + procPidPath common.ProcPidPathFunc + procPidInfo common.ProcPidInfoFunc + machTimeBaseInfo common.MachTimeBaseInfoFunc +) + +func registerFuncs() (*common.Library, error) { + lib, err := common.NewLibrary(common.System) + if err != nil { + return nil, err + } + + procPidPath = common.GetFunc[common.ProcPidPathFunc](lib, common.ProcPidPathSym) + procPidInfo = common.GetFunc[common.ProcPidInfoFunc](lib, common.ProcPidInfoSym) + machTimeBaseInfo = common.GetFunc[common.MachTimeBaseInfoFunc](lib, common.MachTimeBaseInfoSym) + + return lib, nil +} + +func getTimeScaleToNanoSeconds() float64 { + var timeBaseInfo common.MachTimeBaseInfo + + machTimeBaseInfo(uintptr(unsafe.Pointer(&timeBaseInfo))) + + return float64(timeBaseInfo.Numer) / float64(timeBaseInfo.Denom) +} + +func (p *Process) ExeWithContext(ctx context.Context) (string, error) { + lib, err := registerFuncs() + if err != nil { + return "", err + } + defer lib.Close() + + buf := common.NewCStr(common.PROC_PIDPATHINFO_MAXSIZE) + ret := procPidPath(p.Pid, buf.Addr(), common.PROC_PIDPATHINFO_MAXSIZE) + + if ret <= 0 { + return "", fmt.Errorf("unknown error: proc_pidpath returned %d", ret) + } + + return buf.GoString(), nil +} + +// sys/proc_info.h +type vnodePathInfo struct { + _ [152]byte + vipPath [common.MAXPATHLEN]byte + _ [1176]byte +} + +// CwdWithContext retrieves the Current Working Directory for the given process. +// It uses the proc_pidinfo from libproc and will only work for processes the +// EUID can access. Otherwise "operation not permitted" will be returned as the +// error. +// Note: This might also work for other *BSD OSs. +func (p *Process) CwdWithContext(ctx context.Context) (string, error) { + lib, err := registerFuncs() + if err != nil { + return "", err + } + defer lib.Close() + + // Lock OS thread to ensure the errno does not change + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + var vpi vnodePathInfo + const vpiSize = int32(unsafe.Sizeof(vpi)) + ret := procPidInfo(p.Pid, common.PROC_PIDVNODEPATHINFO, 0, uintptr(unsafe.Pointer(&vpi)), vpiSize) + errno, _ := lib.Dlsym("errno") + err = *(**unix.Errno)(unsafe.Pointer(&errno)) + if err == unix.EPERM { + return "", ErrorNotPermitted + } + + if ret <= 0 { + return "", fmt.Errorf("unknown error: proc_pidinfo returned %d", ret) + } + + if ret != vpiSize { + return "", fmt.Errorf("too few bytes; expected %d, got %d", vpiSize, ret) + } + return common.GoString(&vpi.vipPath[0]), nil +} + +func procArgs(pid int32) ([]byte, int, error) { + procargs, _, err := common.CallSyscall([]int32{common.CTL_KERN, common.KERN_PROCARGS2, pid}) + if err != nil { + return nil, 0, err + } + + // The first 4 bytes indicate the number of arguments. + nargs := procargs[:4] + return procargs, int(binary.LittleEndian.Uint32(nargs)), nil +} + +func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) { + return p.cmdlineSliceWithContext(ctx, true) +} + +func (p *Process) cmdlineSliceWithContext(ctx context.Context, fallback bool) ([]string, error) { + pargs, nargs, err := procArgs(p.Pid) + if err != nil { + return nil, err + } + // The first bytes hold the nargs int, skip it. + args := bytes.Split((pargs)[unsafe.Sizeof(int(0)):], []byte{0}) + var argStr string + // The first element is the actual binary/command path. + // command := args[0] + var argSlice []string + // var envSlice []string + // All other, non-zero elements are arguments. The first "nargs" elements + // are the arguments. Everything else in the slice is then the environment + // of the process. + for _, arg := range args[1:] { + argStr = string(arg[:]) + if len(argStr) > 0 { + if nargs > 0 { + argSlice = append(argSlice, argStr) + nargs-- + continue + } + break + // envSlice = append(envSlice, argStr) + } + } + return argSlice, err +} + +// cmdNameWithContext returns the command name (including spaces) without any arguments +func (p *Process) cmdNameWithContext(ctx context.Context) (string, error) { + r, err := p.cmdlineSliceWithContext(ctx, false) + if err != nil { + return "", err + } + + if len(r) == 0 { + return "", nil + } + + return r[0], err +} + +func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) { + r, err := p.CmdlineSliceWithContext(ctx) + if err != nil { + return "", err + } + return strings.Join(r, " "), err +} + +func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { + lib, err := registerFuncs() + if err != nil { + return 0, err + } + defer lib.Close() + + var ti ProcTaskInfo + procPidInfo(p.Pid, common.PROC_PIDTASKINFO, 0, uintptr(unsafe.Pointer(&ti)), int32(unsafe.Sizeof(ti))) + + return int32(ti.Threadnum), nil +} + +func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) { + lib, err := registerFuncs() + if err != nil { + return nil, err + } + defer lib.Close() + + var ti ProcTaskInfo + procPidInfo(p.Pid, common.PROC_PIDTASKINFO, 0, uintptr(unsafe.Pointer(&ti)), int32(unsafe.Sizeof(ti))) + + timescaleToNanoSeconds := getTimeScaleToNanoSeconds() + ret := &cpu.TimesStat{ + CPU: "cpu", + User: float64(ti.Total_user) * timescaleToNanoSeconds / 1e9, + System: float64(ti.Total_system) * timescaleToNanoSeconds / 1e9, + } + return ret, nil +} + +func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) { + lib, err := registerFuncs() + if err != nil { + return nil, err + } + defer lib.Close() + + var ti ProcTaskInfo + procPidInfo(p.Pid, common.PROC_PIDTASKINFO, 0, uintptr(unsafe.Pointer(&ti)), int32(unsafe.Sizeof(ti))) + + ret := &MemoryInfoStat{ + RSS: uint64(ti.Resident_size), + VMS: uint64(ti.Virtual_size), + Swap: uint64(ti.Pageins), + } + return ret, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_amd64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_amd64.go similarity index 87% rename from vendor/github.com/shirou/gopsutil/v3/process/process_darwin_amd64.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_darwin_amd64.go index b353e5eac7b..890a5d5331a 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_amd64.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_amd64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_darwin.go @@ -211,6 +212,27 @@ type Posix_cred struct { type Label struct{} +type ProcTaskInfo struct { + Virtual_size uint64 + Resident_size uint64 + Total_user uint64 + Total_system uint64 + Threads_user uint64 + Threads_system uint64 + Policy int32 + Faults int32 + Pageins int32 + Cow_faults int32 + Messages_sent int32 + Messages_received int32 + Syscalls_mach int32 + Syscalls_unix int32 + Csw int32 + Threadnum int32 + Numrunning int32 + Priority int32 +} + type AuditinfoAddr struct { Auid uint32 Mask AuMask diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_arm64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_arm64.go similarity index 85% rename from vendor/github.com/shirou/gopsutil/v3/process/process_darwin_arm64.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_darwin_arm64.go index cbd6bdc793c..8075cf227d1 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_arm64.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_arm64.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build darwin && arm64 -// +build darwin,arm64 // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs process/types_darwin.go @@ -190,6 +190,27 @@ type Posix_cred struct{} type Label struct{} +type ProcTaskInfo struct { + Virtual_size uint64 + Resident_size uint64 + Total_user uint64 + Total_system uint64 + Threads_user uint64 + Threads_system uint64 + Policy int32 + Faults int32 + Pageins int32 + Cow_faults int32 + Messages_sent int32 + Messages_received int32 + Syscalls_mach int32 + Syscalls_unix int32 + Csw int32 + Threadnum int32 + Numrunning int32 + Priority int32 +} + type AuditinfoAddr struct { Auid uint32 Mask AuMask diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_fallback.go b/vendor/github.com/shirou/gopsutil/v4/process/process_fallback.go similarity index 92% rename from vendor/github.com/shirou/gopsutil/v3/process/process_fallback.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_fallback.go index 1a5d0c4b4a7..e5410ea049c 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_fallback.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_fallback.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build !darwin && !linux && !freebsd && !openbsd && !windows && !solaris && !plan9 -// +build !darwin,!linux,!freebsd,!openbsd,!windows,!solaris,!plan9 package process @@ -7,9 +7,9 @@ import ( "context" "syscall" - "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" - "github.com/shirou/gopsutil/v3/net" + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/net" ) type Signal = syscall.Signal @@ -82,15 +82,15 @@ func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { return false, common.ErrNotImplementedError } -func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) UidsWithContext(ctx context.Context) ([]uint32, error) { return nil, common.ErrNotImplementedError } -func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GidsWithContext(ctx context.Context) ([]uint32, error) { return nil, common.ErrNotImplementedError } -func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GroupsWithContext(ctx context.Context) ([]uint32, error) { return nil, common.ErrNotImplementedError } @@ -166,7 +166,7 @@ func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionS return nil, common.ErrNotImplementedError } -func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { +func (p *Process) ConnectionsMaxWithContext(ctx context.Context, maxConn int) ([]net.ConnectionStat, error) { return nil, common.ErrNotImplementedError } diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd.go b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd.go similarity index 79% rename from vendor/github.com/shirou/gopsutil/v3/process/process_freebsd.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_freebsd.go index 40b10e14fcd..a67ac0ef0c0 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd.go @@ -1,19 +1,23 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build freebsd -// +build freebsd package process import ( "bytes" "context" + "encoding/binary" + "errors" "path/filepath" + "sort" "strconv" "strings" - cpu "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" - net "github.com/shirou/gopsutil/v3/net" "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/net" ) func pidsWithContext(ctx context.Context) ([]int32, error) { @@ -63,7 +67,24 @@ func (p *Process) NameWithContext(ctx context.Context) (string, error) { } func (p *Process) CwdWithContext(ctx context.Context) (string, error) { - return "", common.ErrNotImplementedError + mib := []int32{CTLKern, KernProc, KernProcCwd, p.Pid} + buf, length, err := common.CallSyscall(mib) + if err != nil { + return "", err + } + + if length != sizeOfKinfoFile { + return "", errors.New("unexpected size of KinfoFile") + } + + var k kinfoFile + br := bytes.NewReader(buf) + if err := common.Read(br, binary.LittleEndian, &k); err != nil { + return "", err + } + cwd := common.IntToString(k.Path[:]) + + return cwd, nil } func (p *Process) ExeWithContext(ctx context.Context) (string, error) { @@ -83,10 +104,7 @@ func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) { return "", err } ret := strings.FieldsFunc(string(buf), func(r rune) bool { - if r == '\u0000' { - return true - } - return false + return r == '\u0000' }) return strings.Join(ret, " "), nil @@ -157,40 +175,40 @@ func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { return strings.IndexByte(string(out), '+') != -1, nil } -func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) UidsWithContext(ctx context.Context) ([]uint32, error) { k, err := p.getKProc() if err != nil { return nil, err } - uids := make([]int32, 0, 3) + uids := make([]uint32, 0, 3) - uids = append(uids, int32(k.Ruid), int32(k.Uid), int32(k.Svuid)) + uids = append(uids, uint32(k.Ruid), uint32(k.Uid), uint32(k.Svuid)) return uids, nil } -func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GidsWithContext(ctx context.Context) ([]uint32, error) { k, err := p.getKProc() if err != nil { return nil, err } - gids := make([]int32, 0, 3) - gids = append(gids, int32(k.Rgid), int32(k.Ngroups), int32(k.Svgid)) + gids := make([]uint32, 0, 3) + gids = append(gids, uint32(k.Rgid), uint32(k.Ngroups), uint32(k.Svgid)) return gids, nil } -func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GroupsWithContext(ctx context.Context) ([]uint32, error) { k, err := p.getKProc() if err != nil { return nil, err } - groups := make([]int32, k.Ngroups) + groups := make([]uint32, k.Ngroups) for i := int16(0); i < k.Ngroups; i++ { - groups[i] = int32(k.Groups[i]) + groups[i] = uint32(k.Groups[i]) } return groups, nil @@ -270,18 +288,21 @@ func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, e } func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { - pids, err := common.CallPgrepWithContext(ctx, invoke, p.Pid) + procs, err := ProcessesWithContext(ctx) if err != nil { - return nil, err + return nil, nil } - ret := make([]*Process, 0, len(pids)) - for _, pid := range pids { - np, err := NewProcessWithContext(ctx, pid) + ret := make([]*Process, 0, len(procs)) + for _, proc := range procs { + ppid, err := proc.PpidWithContext(ctx) if err != nil { - return nil, err + continue + } + if ppid == p.Pid { + ret = append(ret, proc) } - ret = append(ret, np) } + sort.Slice(ret, func(i, j int) bool { return ret[i].Pid < ret[j].Pid }) return ret, nil } @@ -289,8 +310,8 @@ func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionS return net.ConnectionsPidWithContext(ctx, "all", p.Pid) } -func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { - return net.ConnectionsPidMaxWithContext(ctx, "all", p.Pid, max) +func (p *Process) ConnectionsMaxWithContext(ctx context.Context, maxConn int) ([]net.ConnectionStat, error) { + return net.ConnectionsPidMaxWithContext(ctx, "all", p.Pid, maxConn) } func ProcessesWithContext(ctx context.Context) ([]*Process, error) { @@ -331,7 +352,7 @@ func (p *Process) getKProc() (*KinfoProc, error) { return nil, err } if length != sizeOfKinfoProc { - return nil, err + return nil, errors.New("unexpected size of KinfoProc") } k, err := parseKinfoProc(buf) diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_386.go b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_386.go similarity index 83% rename from vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_386.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_386.go index 08ab333b435..0193ba25b0b 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_386.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_386.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_freebsd.go @@ -10,6 +11,7 @@ const ( KernProcProc = 8 KernProcPathname = 12 KernProcArgs = 7 + KernProcCwd = 42 ) const ( @@ -23,6 +25,7 @@ const ( const ( sizeOfKinfoVmentry = 0x488 sizeOfKinfoProc = 0x300 + sizeOfKinfoFile = 0x570 // TODO: should be changed by running on the target machine ) const ( @@ -190,3 +193,26 @@ type KinfoVmentry struct { X_kve_ispare [12]int32 Path [1024]int8 } + +// TODO: should be changed by running on the target machine +type kinfoFile struct { + Structsize int32 + Type int32 + Fd int32 + Ref_count int32 + Flags int32 + Pad0 int32 + Offset int64 + Anon0 [304]byte + Status uint16 + Pad1 uint16 + X_kf_ispare0 int32 + Cap_rights capRights + X_kf_cap_spare uint64 + Path [1024]int8 // changed from uint8 by hand +} + +// TODO: should be changed by running on the target machine +type capRights struct { + Rights [2]uint64 +} diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_amd64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_amd64.go new file mode 100644 index 00000000000..67970f64f55 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_amd64.go @@ -0,0 +1,224 @@ +// SPDX-License-Identifier: BSD-3-Clause +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs types_freebsd.go + +package process + +const ( + CTLKern = 1 + KernProc = 14 + KernProcPID = 1 + KernProcProc = 8 + KernProcPathname = 12 + KernProcArgs = 7 + KernProcCwd = 42 +) + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 +) + +const ( + sizeOfKinfoVmentry = 0x488 + sizeOfKinfoProc = 0x440 + sizeOfKinfoFile = 0x570 +) + +const ( + SIDL = 1 + SRUN = 2 + SSLEEP = 3 + SSTOP = 4 + SZOMB = 5 + SWAIT = 6 + SLOCK = 7 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur int64 + Max int64 +} + +type KinfoProc struct { + Structsize int32 + Layout int32 + Args int64 /* pargs */ + Paddr int64 /* proc */ + Addr int64 /* user */ + Tracep int64 /* vnode */ + Textvp int64 /* vnode */ + Fd int64 /* filedesc */ + Vmspace int64 /* vmspace */ + Wchan int64 + Pid int32 + Ppid int32 + Pgid int32 + Tpgid int32 + Sid int32 + Tsid int32 + Jobc int16 + Spare_short1 int16 + Tdev_freebsd11 uint32 + Siglist [16]byte /* sigset */ + Sigmask [16]byte /* sigset */ + Sigignore [16]byte /* sigset */ + Sigcatch [16]byte /* sigset */ + Uid uint32 + Ruid uint32 + Svuid uint32 + Rgid uint32 + Svgid uint32 + Ngroups int16 + Spare_short2 int16 + Groups [16]uint32 + Size uint64 + Rssize int64 + Swrss int64 + Tsize int64 + Dsize int64 + Ssize int64 + Xstat uint16 + Acflag uint16 + Pctcpu uint32 + Estcpu uint32 + Slptime uint32 + Swtime uint32 + Cow uint32 + Runtime uint64 + Start Timeval + Childtime Timeval + Flag int64 + Kiflag int64 + Traceflag int32 + Stat int8 + Nice int8 + Lock int8 + Rqindex int8 + Oncpu_old uint8 + Lastcpu_old uint8 + Tdname [17]int8 + Wmesg [9]int8 + Login [18]int8 + Lockname [9]int8 + Comm [20]int8 + Emul [17]int8 + Loginclass [18]int8 + Moretdname [4]int8 + Sparestrings [46]int8 + Spareints [2]int32 + Tdev uint64 + Oncpu int32 + Lastcpu int32 + Tracer int32 + Flag2 int32 + Fibnum int32 + Cr_flags uint32 + Jid int32 + Numthreads int32 + Tid int32 + Pri Priority + Rusage Rusage + Rusage_ch Rusage + Pcb int64 /* pcb */ + Kstack int64 + Udata int64 + Tdaddr int64 /* thread */ + Pd int64 /* pwddesc, not accurate */ + Spareptrs [5]int64 + Sparelongs [12]int64 + Sflag int64 + Tdflags int64 +} + +type Priority struct { + Class uint8 + Level uint8 + Native uint8 + User uint8 +} + +type KinfoVmentry struct { + Structsize int32 + Type int32 + Start uint64 + End uint64 + Offset uint64 + Vn_fileid uint64 + Vn_fsid_freebsd11 uint32 + Flags int32 + Resident int32 + Private_resident int32 + Protection int32 + Ref_count int32 + Shadow_count int32 + Vn_type int32 + Vn_size uint64 + Vn_rdev_freebsd11 uint32 + Vn_mode uint16 + Status uint16 + Type_spec [8]byte + Vn_rdev uint64 + X_kve_ispare [8]int32 + Path [1024]int8 +} + +type kinfoFile struct { + Structsize int32 + Type int32 + Fd int32 + Ref_count int32 + Flags int32 + Pad0 int32 + Offset int64 + Anon0 [304]byte + Status uint16 + Pad1 uint16 + X_kf_ispare0 int32 + Cap_rights capRights + X_kf_cap_spare uint64 + Path [1024]int8 +} + +type capRights struct { + Rights [2]uint64 +} diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_arm.go b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_arm.go similarity index 83% rename from vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_arm.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_arm.go index 81ae0b9a8d4..6c4fbf69889 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_arm.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_arm.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_freebsd.go @@ -10,6 +11,7 @@ const ( KernProcProc = 8 KernProcPathname = 12 KernProcArgs = 7 + KernProcCwd = 42 ) const ( @@ -23,6 +25,7 @@ const ( const ( sizeOfKinfoVmentry = 0x488 sizeOfKinfoProc = 0x440 + sizeOfKinfoFile = 0x570 // TODO: should be changed by running on the target machine ) const ( @@ -190,3 +193,26 @@ type KinfoVmentry struct { X_kve_ispare [12]int32 Path [1024]int8 } + +// TODO: should be changed by running on the target machine +type kinfoFile struct { + Structsize int32 + Type int32 + Fd int32 + Ref_count int32 + Flags int32 + Pad0 int32 + Offset int64 + Anon0 [304]byte + Status uint16 + Pad1 uint16 + X_kf_ispare0 int32 + Cap_rights capRights + X_kf_cap_spare uint64 + Path [1024]int8 // changed from uint8 by hand +} + +// TODO: should be changed by running on the target machine +type capRights struct { + Rights [2]uint64 +} diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_arm64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_arm64.go similarity index 88% rename from vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_arm64.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_arm64.go index 73ac08201a0..dabdc3e3098 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_arm64.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_arm64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build freebsd && arm64 // Code generated by cmd/cgo -godefs; DO NOT EDIT. @@ -12,6 +13,7 @@ const ( KernProcProc = 8 KernProcPathname = 12 KernProcArgs = 7 + KernProcCwd = 42 ) const ( @@ -25,6 +27,7 @@ const ( const ( sizeOfKinfoVmentry = 0x488 sizeOfKinfoProc = 0x440 + sizeOfKinfoFile = 0x570 ) const ( @@ -200,3 +203,24 @@ type KinfoVmentry struct { X_kve_ispare [8]int32 Path [1024]uint8 } + +type kinfoFile struct { + Structsize int32 + Type int32 + Fd int32 + Ref_count int32 + Flags int32 + Pad0 int32 + Offset int64 + Anon0 [304]byte + Status uint16 + Pad1 uint16 + X_kf_ispare0 int32 + Cap_rights capRights + X_kf_cap_spare uint64 + Path [1024]int8 // changed from uint8 by hand +} + +type capRights struct { + Rights [2]uint64 +} diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_linux.go b/vendor/github.com/shirou/gopsutil/v4/process/process_linux.go similarity index 94% rename from vendor/github.com/shirou/gopsutil/v3/process/process_linux.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_linux.go index 557435b345d..68a8c88c4a9 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_linux.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_linux.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build linux -// +build linux package process @@ -12,15 +12,16 @@ import ( "math" "os" "path/filepath" + "sort" "strconv" "strings" "github.com/tklauser/go-sysconf" "golang.org/x/sys/unix" - "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" - "github.com/shirou/gopsutil/v3/net" + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/net" ) var pageSize = uint64(os.Getpagesize()) @@ -148,26 +149,26 @@ func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { return pgid == tpgid, nil } -func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) UidsWithContext(ctx context.Context) ([]uint32, error) { err := p.fillFromStatusWithContext(ctx) if err != nil { - return []int32{}, err + return []uint32{}, err } return p.uids, nil } -func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GidsWithContext(ctx context.Context) ([]uint32, error) { err := p.fillFromStatusWithContext(ctx) if err != nil { - return []int32{}, err + return []uint32{}, err } return p.gids, nil } -func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GroupsWithContext(ctx context.Context) ([]uint32, error) { err := p.fillFromStatusWithContext(ctx) if err != nil { - return []int32{}, err + return []uint32{}, err } return p.groups, nil } @@ -338,21 +339,34 @@ func (p *Process) PageFaultsWithContext(ctx context.Context) (*PageFaultsStat, e } func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { - pids, err := common.CallPgrepWithContext(ctx, invoke, p.Pid) + statFiles, err := filepath.Glob(common.HostProcWithContext(ctx, "[0-9]*/stat")) if err != nil { return nil, err } - if len(pids) == 0 { - return nil, ErrorNoChildren - } - ret := make([]*Process, 0, len(pids)) - for _, pid := range pids { - np, err := NewProcessWithContext(ctx, pid) + ret := make([]*Process, 0, len(statFiles)) + for _, statFile := range statFiles { + statContents, err := os.ReadFile(statFile) if err != nil { - return nil, err + continue + } + fields := splitProcStat(statContents) + pid, err := strconv.ParseInt(fields[1], 10, 32) + if err != nil { + continue + } + ppid, err := strconv.ParseInt(fields[4], 10, 32) + if err != nil { + continue + } + if int32(ppid) == p.Pid { + np, err := NewProcessWithContext(ctx, int32(pid)) + if err != nil { + continue + } + ret = append(ret, np) } - ret = append(ret, np) } + sort.Slice(ret, func(i, j int) bool { return ret[i].Pid < ret[j].Pid }) return ret, nil } @@ -373,8 +387,8 @@ func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionS return net.ConnectionsPidWithContext(ctx, "all", p.Pid) } -func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { - return net.ConnectionsPidMaxWithContext(ctx, "all", p.Pid, max) +func (p *Process) ConnectionsMaxWithContext(ctx context.Context, maxConn int) ([]net.ConnectionStat, error) { + return net.ConnectionsPidMaxWithContext(ctx, "all", p.Pid, maxConn) } func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]MemoryMapsStat, error) { @@ -399,7 +413,9 @@ func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]M // function of parsing a block getBlock := func(firstLine []string, block []string) (MemoryMapsStat, error) { m := MemoryMapsStat{} - m.Path = firstLine[len(firstLine)-1] + if len(firstLine) >= 6 { + m.Path = strings.Join(firstLine[5:], " ") + } for _, line := range block { if strings.Contains(line, "VmFlags") { @@ -727,8 +743,12 @@ func (p *Process) fillFromIOWithContext(ctx context.Context) (*IOCountersStat, e case "syscw": ret.WriteCount = t case "read_bytes": - ret.ReadBytes = t + ret.DiskReadBytes = t case "write_bytes": + ret.DiskWriteBytes = t + case "rchar": + ret.ReadBytes = t + case "wchar": ret.WriteBytes = t } } @@ -866,32 +886,32 @@ func (p *Process) fillFromStatusWithContext(ctx context.Context) error { } p.tgid = int32(pval) case "Uid": - p.uids = make([]int32, 0, 4) + p.uids = make([]uint32, 0, 4) for _, i := range strings.Split(value, "\t") { v, err := strconv.ParseInt(i, 10, 32) if err != nil { return err } - p.uids = append(p.uids, int32(v)) + p.uids = append(p.uids, uint32(v)) } case "Gid": - p.gids = make([]int32, 0, 4) + p.gids = make([]uint32, 0, 4) for _, i := range strings.Split(value, "\t") { v, err := strconv.ParseInt(i, 10, 32) if err != nil { return err } - p.gids = append(p.gids, int32(v)) + p.gids = append(p.gids, uint32(v)) } case "Groups": groups := strings.Fields(value) - p.groups = make([]int32, 0, len(groups)) + p.groups = make([]uint32, 0, len(groups)) for _, i := range groups { - v, err := strconv.ParseInt(i, 10, 32) + v, err := strconv.ParseUint(i, 10, 32) if err != nil { return err } - p.groups = append(p.groups, int32(v)) + p.groups = append(p.groups, uint32(v)) } case "Threads": v, err := strconv.ParseInt(value, 10, 32) @@ -1076,8 +1096,7 @@ func (p *Process) fillFromTIDStatWithContext(ctx context.Context, tid int32) (ui if err != nil { return 0, 0, nil, 0, 0, 0, nil, err } - ctime := (t / uint64(clockTicks)) + uint64(bootTime) - createTime := int64(ctime * 1000) + createTime := int64((t * 1000 / uint64(clockTicks)) + uint64(bootTime*1000)) rtpriority, err := strconv.ParseInt(fields[18], 10, 32) if err != nil { diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd.go b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd.go similarity index 85% rename from vendor/github.com/shirou/gopsutil/v3/process/process_openbsd.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_openbsd.go index a58c5eb113c..5e8a9e0b45e 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd -// +build openbsd package process @@ -7,17 +7,19 @@ import ( "bytes" "context" "encoding/binary" + "errors" "fmt" "io" "path/filepath" + "sort" "strconv" "strings" "unsafe" - cpu "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" - mem "github.com/shirou/gopsutil/v3/mem" - net "github.com/shirou/gopsutil/v3/net" + cpu "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + mem "github.com/shirou/gopsutil/v4/mem" + net "github.com/shirou/gopsutil/v4/net" "golang.org/x/sys/unix" ) @@ -68,7 +70,12 @@ func (p *Process) NameWithContext(ctx context.Context) (string, error) { } func (p *Process) CwdWithContext(ctx context.Context) (string, error) { - return "", common.ErrNotImplementedError + mib := []int32{CTLKern, KernProcCwd, p.Pid} + buf, _, err := common.CallSyscall(mib) + if err != nil { + return "", err + } + return common.ByteToString(buf), nil } func (p *Process) ExeWithContext(ctx context.Context) (string, error) { @@ -171,40 +178,40 @@ func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { return strings.IndexByte(string(out), '+') != -1, nil } -func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) UidsWithContext(ctx context.Context) ([]uint32, error) { k, err := p.getKProc() if err != nil { return nil, err } - uids := make([]int32, 0, 3) + uids := make([]uint32, 0, 3) - uids = append(uids, int32(k.Ruid), int32(k.Uid), int32(k.Svuid)) + uids = append(uids, uint32(k.Ruid), uint32(k.Uid), uint32(k.Svuid)) return uids, nil } -func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GidsWithContext(ctx context.Context) ([]uint32, error) { k, err := p.getKProc() if err != nil { return nil, err } - gids := make([]int32, 0, 3) - gids = append(gids, int32(k.Rgid), int32(k.Ngroups), int32(k.Svgid)) + gids := make([]uint32, 0, 3) + gids = append(gids, uint32(k.Rgid), uint32(k.Ngroups), uint32(k.Svgid)) return gids, nil } -func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GroupsWithContext(ctx context.Context) ([]uint32, error) { k, err := p.getKProc() if err != nil { return nil, err } - groups := make([]int32, k.Ngroups) + groups := make([]uint32, k.Ngroups) for i := int16(0); i < k.Ngroups; i++ { - groups[i] = int32(k.Groups[i]) + groups[i] = uint32(k.Groups[i]) } return groups, nil @@ -280,18 +287,21 @@ func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, e } func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { - pids, err := common.CallPgrepWithContext(ctx, invoke, p.Pid) + procs, err := ProcessesWithContext(ctx) if err != nil { - return nil, err + return nil, nil } - ret := make([]*Process, 0, len(pids)) - for _, pid := range pids { - np, err := NewProcessWithContext(ctx, pid) + ret := make([]*Process, 0, len(procs)) + for _, proc := range procs { + ppid, err := proc.PpidWithContext(ctx) if err != nil { - return nil, err + continue + } + if ppid == p.Pid { + ret = append(ret, proc) } - ret = append(ret, np) } + sort.Slice(ret, func(i, j int) bool { return ret[i].Pid < ret[j].Pid }) return ret, nil } @@ -299,7 +309,7 @@ func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionS return nil, common.ErrNotImplementedError } -func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { +func (p *Process) ConnectionsMaxWithContext(ctx context.Context, maxConn int) ([]net.ConnectionStat, error) { return nil, common.ErrNotImplementedError } @@ -338,7 +348,7 @@ func (p *Process) getKProc() (*KinfoProc, error) { return nil, err } if length != sizeOfKinfoProc { - return nil, err + return nil, errors.New("unexpected size of KinfoProc") } k, err := parseKinfoProc(buf) diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_386.go b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_386.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_386.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_386.go index f4ed0249172..5b84706a7cf 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_386.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_386.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd && 386 -// +build openbsd,386 // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs process/types_openbsd.go @@ -14,6 +14,7 @@ const ( KernProcProc = 8 KernProcPathname = 12 KernProcArgs = 55 + KernProcCwd = 78 KernProcArgv = 1 KernProcEnv = 3 ) diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_amd64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_amd64.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_amd64.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_amd64.go index 8607422b5f5..3229bb32c28 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_amd64.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_amd64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_openbsd.go @@ -11,6 +12,7 @@ const ( KernProcProc = 8 KernProcPathname = 12 KernProcArgs = 55 + KernProcCwd = 78 KernProcArgv = 1 KernProcEnv = 3 ) diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_arm.go b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_arm.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_arm.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_arm.go index b94429f2e96..6f74ce75637 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_arm.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_arm.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd && arm -// +build openbsd,arm // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs process/types_openbsd.go @@ -14,6 +14,7 @@ const ( KernProcProc = 8 KernProcPathname = 12 KernProcArgs = 55 + KernProcCwd = 78 KernProcArgv = 1 KernProcEnv = 3 ) diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_arm64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_arm64.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_arm64.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_arm64.go index a3291b8caf1..91045456258 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_arm64.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_arm64.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd && arm64 -// +build openbsd,arm64 // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs process/types_openbsd.go @@ -14,6 +14,7 @@ const ( KernProcProc = 8 KernProcPathname = 12 KernProcArgs = 55 + KernProcCwd = 78 KernProcArgv = 1 KernProcEnv = 3 ) diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_riscv64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_riscv64.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_riscv64.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_riscv64.go index 076f095eaae..e3e0d36a09e 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_riscv64.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_riscv64.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd && riscv64 -// +build openbsd,riscv64 // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs process/types_openbsd.go @@ -14,6 +14,7 @@ const ( KernProcProc = 8 KernProcPathname = 12 KernProcArgs = 55 + KernProcCwd = 78 KernProcArgv = 1 KernProcEnv = 3 ) diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_plan9.go b/vendor/github.com/shirou/gopsutil/v4/process/process_plan9.go similarity index 92% rename from vendor/github.com/shirou/gopsutil/v3/process/process_plan9.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_plan9.go index bc4bc062a99..c82e54a75bc 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_plan9.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_plan9.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build plan9 -// +build plan9 package process @@ -7,9 +7,9 @@ import ( "context" "syscall" - "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" - "github.com/shirou/gopsutil/v3/net" + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/net" ) type Signal = syscall.Note @@ -82,15 +82,15 @@ func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { return false, common.ErrNotImplementedError } -func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) UidsWithContext(ctx context.Context) ([]uint32, error) { return nil, common.ErrNotImplementedError } -func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GidsWithContext(ctx context.Context) ([]uint32, error) { return nil, common.ErrNotImplementedError } -func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GroupsWithContext(ctx context.Context) ([]uint32, error) { return nil, common.ErrNotImplementedError } @@ -166,7 +166,7 @@ func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionS return nil, common.ErrNotImplementedError } -func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { +func (p *Process) ConnectionsMaxWithContext(ctx context.Context, maxConn int) ([]net.ConnectionStat, error) { return nil, common.ErrNotImplementedError } diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_posix.go b/vendor/github.com/shirou/gopsutil/v4/process/process_posix.go new file mode 100644 index 00000000000..96c5e065c08 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_posix.go @@ -0,0 +1,187 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build linux || freebsd || openbsd || darwin || solaris + +package process + +import ( + "context" + "errors" + "fmt" + "os" + "os/user" + "path/filepath" + "strconv" + "strings" + "syscall" + + "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v4/internal/common" +) + +type Signal = syscall.Signal + +// POSIX +func getTerminalMap() (map[uint64]string, error) { + ret := make(map[uint64]string) + var termfiles []string + + d, err := os.Open("/dev") + if err != nil { + return nil, err + } + defer d.Close() + + devnames, err := d.Readdirnames(-1) + if err != nil { + return nil, err + } + for _, devname := range devnames { + if strings.HasPrefix(devname, "/dev/tty") { + termfiles = append(termfiles, "/dev/tty/"+devname) + } + } + + var ptsnames []string + ptsd, err := os.Open("/dev/pts") + if err != nil { + ptsnames, _ = filepath.Glob("/dev/ttyp*") + if ptsnames == nil { + return nil, err + } + } + defer ptsd.Close() + + if ptsnames == nil { + defer ptsd.Close() + ptsnames, err = ptsd.Readdirnames(-1) + if err != nil { + return nil, err + } + for _, ptsname := range ptsnames { + termfiles = append(termfiles, "/dev/pts/"+ptsname) + } + } else { + termfiles = ptsnames + } + + for _, name := range termfiles { + stat := unix.Stat_t{} + if err = unix.Stat(name, &stat); err != nil { + return nil, err + } + rdev := uint64(stat.Rdev) + ret[rdev] = strings.Replace(name, "/dev", "", -1) + } + return ret, nil +} + +// isMount is a port of python's os.path.ismount() +// https://github.com/python/cpython/blob/08ff4369afca84587b1c82034af4e9f64caddbf2/Lib/posixpath.py#L186-L216 +// https://docs.python.org/3/library/os.path.html#os.path.ismount +func isMount(path string) bool { + // Check symlinkness with os.Lstat; unix.DT_LNK is not portable + fileInfo, err := os.Lstat(path) + if err != nil { + return false + } + if fileInfo.Mode()&os.ModeSymlink != 0 { + return false + } + var stat1 unix.Stat_t + if err := unix.Lstat(path, &stat1); err != nil { + return false + } + parent := filepath.Join(path, "..") + var stat2 unix.Stat_t + if err := unix.Lstat(parent, &stat2); err != nil { + return false + } + return stat1.Dev != stat2.Dev || stat1.Ino == stat2.Ino +} + +func PidExistsWithContext(ctx context.Context, pid int32) (bool, error) { + if pid <= 0 { + return false, fmt.Errorf("invalid pid %v", pid) + } + proc, err := os.FindProcess(int(pid)) + if err != nil { + return false, err + } + defer proc.Release() + + if isMount(common.HostProcWithContext(ctx)) { // if //proc exists and is mounted, check if //proc/ folder exists + _, err := os.Stat(common.HostProcWithContext(ctx, strconv.Itoa(int(pid)))) + if os.IsNotExist(err) { + return false, nil + } + return err == nil, err + } + + // procfs does not exist or is not mounted, check PID existence by signalling the pid + err = proc.Signal(syscall.Signal(0)) + if err == nil { + return true, nil + } + if errors.Is(err, os.ErrProcessDone) { + return false, nil + } + var errno syscall.Errno + if !errors.As(err, &errno) { + return false, err + } + switch errno { + case syscall.ESRCH: + return false, nil + case syscall.EPERM: + return true, nil + } + + return false, err +} + +func (p *Process) SendSignalWithContext(ctx context.Context, sig syscall.Signal) error { + process, err := os.FindProcess(int(p.Pid)) + if err != nil { + return err + } + defer process.Release() + + err = process.Signal(sig) + if err != nil { + return err + } + + return nil +} + +func (p *Process) SuspendWithContext(ctx context.Context) error { + return p.SendSignalWithContext(ctx, unix.SIGSTOP) +} + +func (p *Process) ResumeWithContext(ctx context.Context) error { + return p.SendSignalWithContext(ctx, unix.SIGCONT) +} + +func (p *Process) TerminateWithContext(ctx context.Context) error { + return p.SendSignalWithContext(ctx, unix.SIGTERM) +} + +func (p *Process) KillWithContext(ctx context.Context) error { + return p.SendSignalWithContext(ctx, unix.SIGKILL) +} + +func (p *Process) UsernameWithContext(ctx context.Context) (string, error) { + uids, err := p.UidsWithContext(ctx) + if err != nil { + return "", err + } + if len(uids) > 0 { + u, err := user.LookupId(strconv.Itoa(int(uids[0]))) + if err != nil { + return "", err + } + return u.Username, nil + } + return "", nil +} diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_solaris.go b/vendor/github.com/shirou/gopsutil/v4/process/process_solaris.go similarity index 94% rename from vendor/github.com/shirou/gopsutil/v3/process/process_solaris.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_solaris.go index dd4bd4760bd..5c8d4d3b1ef 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_solaris.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_solaris.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package process import ( @@ -7,9 +8,9 @@ import ( "strconv" "strings" - "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" - "github.com/shirou/gopsutil/v3/net" + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/net" ) type MemoryMapsStat struct { @@ -95,15 +96,15 @@ func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { return false, common.ErrNotImplementedError } -func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) UidsWithContext(ctx context.Context) ([]uint32, error) { return nil, common.ErrNotImplementedError } -func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GidsWithContext(ctx context.Context) ([]uint32, error) { return nil, common.ErrNotImplementedError } -func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GroupsWithContext(ctx context.Context) ([]uint32, error) { return nil, common.ErrNotImplementedError } @@ -180,7 +181,7 @@ func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionS return nil, common.ErrNotImplementedError } -func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { +func (p *Process) ConnectionsMaxWithContext(ctx context.Context, maxConn int) ([]net.ConnectionStat, error) { return nil, common.ErrNotImplementedError } diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_windows.go b/vendor/github.com/shirou/gopsutil/v4/process/process_windows.go new file mode 100644 index 00000000000..012886d6cdb --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_windows.go @@ -0,0 +1,1180 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build windows + +package process + +import ( + "bufio" + "context" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "reflect" + "strings" + "syscall" + "time" + "unicode/utf16" + "unsafe" + + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/net" + "golang.org/x/sys/windows" +) + +type Signal = syscall.Signal + +var ( + modntdll = windows.NewLazySystemDLL("ntdll.dll") + procNtResumeProcess = modntdll.NewProc("NtResumeProcess") + procNtSuspendProcess = modntdll.NewProc("NtSuspendProcess") + + modpsapi = windows.NewLazySystemDLL("psapi.dll") + procGetProcessMemoryInfo = modpsapi.NewProc("GetProcessMemoryInfo") + procGetProcessImageFileNameW = modpsapi.NewProc("GetProcessImageFileNameW") + + advapi32 = windows.NewLazySystemDLL("advapi32.dll") + procLookupPrivilegeValue = advapi32.NewProc("LookupPrivilegeValueW") + procAdjustTokenPrivileges = advapi32.NewProc("AdjustTokenPrivileges") + + procQueryFullProcessImageNameW = common.Modkernel32.NewProc("QueryFullProcessImageNameW") + procGetPriorityClass = common.Modkernel32.NewProc("GetPriorityClass") + procGetProcessIoCounters = common.Modkernel32.NewProc("GetProcessIoCounters") + procGetNativeSystemInfo = common.Modkernel32.NewProc("GetNativeSystemInfo") + procGetProcessHandleCount = common.Modkernel32.NewProc("GetProcessHandleCount") + + processorArchitecture uint +) + +const processQueryInformation = windows.PROCESS_QUERY_LIMITED_INFORMATION + +type systemProcessorInformation struct { + ProcessorArchitecture uint16 + ProcessorLevel uint16 + ProcessorRevision uint16 + Reserved uint16 + ProcessorFeatureBits uint16 +} + +type systemInfo struct { + wProcessorArchitecture uint16 + wReserved uint16 + dwpageSize uint32 + lpMinimumApplicationAddress uintptr + lpMaximumApplicationAddress uintptr + dwActiveProcessorMask uintptr + dwNumberOfProcessors uint32 + dwProcessorType uint32 + dwAllocationGranularity uint32 + wProcessorLevel uint16 + wProcessorRevision uint16 +} + +// Memory_info_ex is different between OSes +type MemoryInfoExStat struct{} + +type MemoryMapsStat struct{} + +// ioCounters is an equivalent representation of IO_COUNTERS in the Windows API. +// https://docs.microsoft.com/windows/win32/api/winnt/ns-winnt-io_counters +type ioCounters struct { + ReadOperationCount uint64 + WriteOperationCount uint64 + OtherOperationCount uint64 + ReadTransferCount uint64 + WriteTransferCount uint64 + OtherTransferCount uint64 +} + +type processBasicInformation32 struct { + Reserved1 uint32 + PebBaseAddress uint32 + Reserved2 uint32 + Reserved3 uint32 + UniqueProcessId uint32 + Reserved4 uint32 +} + +type processBasicInformation64 struct { + Reserved1 uint64 + PebBaseAddress uint64 + Reserved2 uint64 + Reserved3 uint64 + UniqueProcessId uint64 + Reserved4 uint64 +} + +type processEnvironmentBlock32 struct { + Reserved1 [2]uint8 + BeingDebugged uint8 + Reserved2 uint8 + Reserved3 [2]uint32 + Ldr uint32 + ProcessParameters uint32 + // More fields which we don't use so far +} + +type processEnvironmentBlock64 struct { + Reserved1 [2]uint8 + BeingDebugged uint8 + Reserved2 uint8 + _ [4]uint8 // padding, since we are 64 bit, the next pointer is 64 bit aligned (when compiling for 32 bit, this is not the case without manual padding) + Reserved3 [2]uint64 + Ldr uint64 + ProcessParameters uint64 + // More fields which we don't use so far +} + +type rtlUserProcessParameters32 struct { + Reserved1 [16]uint8 + ConsoleHandle uint32 + ConsoleFlags uint32 + StdInputHandle uint32 + StdOutputHandle uint32 + StdErrorHandle uint32 + CurrentDirectoryPathNameLength uint16 + _ uint16 // Max Length + CurrentDirectoryPathAddress uint32 + CurrentDirectoryHandle uint32 + DllPathNameLength uint16 + _ uint16 // Max Length + DllPathAddress uint32 + ImagePathNameLength uint16 + _ uint16 // Max Length + ImagePathAddress uint32 + CommandLineLength uint16 + _ uint16 // Max Length + CommandLineAddress uint32 + EnvironmentAddress uint32 + // More fields which we don't use so far +} + +type rtlUserProcessParameters64 struct { + Reserved1 [16]uint8 + ConsoleHandle uint64 + ConsoleFlags uint64 + StdInputHandle uint64 + StdOutputHandle uint64 + StdErrorHandle uint64 + CurrentDirectoryPathNameLength uint16 + _ uint16 // Max Length + _ uint32 // Padding + CurrentDirectoryPathAddress uint64 + CurrentDirectoryHandle uint64 + DllPathNameLength uint16 + _ uint16 // Max Length + _ uint32 // Padding + DllPathAddress uint64 + ImagePathNameLength uint16 + _ uint16 // Max Length + _ uint32 // Padding + ImagePathAddress uint64 + CommandLineLength uint16 + _ uint16 // Max Length + _ uint32 // Padding + CommandLineAddress uint64 + EnvironmentAddress uint64 + // More fields which we don't use so far +} + +type winLUID struct { + LowPart winDWord + HighPart winLong +} + +// LUID_AND_ATTRIBUTES +type winLUIDAndAttributes struct { + Luid winLUID + Attributes winDWord +} + +// TOKEN_PRIVILEGES +type winTokenPrivileges struct { + PrivilegeCount winDWord + Privileges [1]winLUIDAndAttributes +} + +type ( + winLong int32 + winDWord uint32 +) + +func init() { + var systemInfo systemInfo + + procGetNativeSystemInfo.Call(uintptr(unsafe.Pointer(&systemInfo))) + processorArchitecture = uint(systemInfo.wProcessorArchitecture) + + // enable SeDebugPrivilege https://github.com/midstar/proci/blob/6ec79f57b90ba3d9efa2a7b16ef9c9369d4be875/proci_windows.go#L80-L119 + handle, err := syscall.GetCurrentProcess() + if err != nil { + return + } + + var token syscall.Token + err = syscall.OpenProcessToken(handle, 0x0028, &token) + if err != nil { + return + } + defer token.Close() + + tokenPrivileges := winTokenPrivileges{PrivilegeCount: 1} + lpName := syscall.StringToUTF16("SeDebugPrivilege") + ret, _, _ := procLookupPrivilegeValue.Call( + 0, + uintptr(unsafe.Pointer(&lpName[0])), + uintptr(unsafe.Pointer(&tokenPrivileges.Privileges[0].Luid))) + if ret == 0 { + return + } + + tokenPrivileges.Privileges[0].Attributes = 0x00000002 // SE_PRIVILEGE_ENABLED + + procAdjustTokenPrivileges.Call( + uintptr(token), + 0, + uintptr(unsafe.Pointer(&tokenPrivileges)), + uintptr(unsafe.Sizeof(tokenPrivileges)), + 0, + 0) +} + +func pidsWithContext(ctx context.Context) ([]int32, error) { + // inspired by https://gist.github.com/henkman/3083408 + // and https://github.com/giampaolo/psutil/blob/1c3a15f637521ba5c0031283da39c733fda53e4c/psutil/arch/windows/process_info.c#L315-L329 + var ret []int32 + var read uint32 = 0 + var psSize uint32 = 1024 + const dwordSize uint32 = 4 + + for { + ps := make([]uint32, psSize) + if err := windows.EnumProcesses(ps, &read); err != nil { + return nil, err + } + if uint32(len(ps)) == read/dwordSize { // ps buffer was too small to host every results, retry with a bigger one + psSize += 1024 + continue + } + for _, pid := range ps[:read/dwordSize] { + ret = append(ret, int32(pid)) + } + return ret, nil + + } +} + +func PidExistsWithContext(ctx context.Context, pid int32) (bool, error) { + if pid == 0 { // special case for pid 0 System Idle Process + return true, nil + } + if pid < 0 { + return false, fmt.Errorf("invalid pid %v", pid) + } + if pid%4 != 0 { + // OpenProcess will succeed even on non-existing pid here https://devblogs.microsoft.com/oldnewthing/20080606-00/?p=22043 + // so we list every pid just to be sure and be future-proof + pids, err := PidsWithContext(ctx) + if err != nil { + return false, err + } + for _, i := range pids { + if i == pid { + return true, err + } + } + return false, err + } + h, err := windows.OpenProcess(windows.SYNCHRONIZE, false, uint32(pid)) + if err == windows.ERROR_ACCESS_DENIED { + return true, nil + } + if err == windows.ERROR_INVALID_PARAMETER { + return false, nil + } + if err != nil { + return false, err + } + defer windows.CloseHandle(h) + event, err := windows.WaitForSingleObject(h, 0) + return event == uint32(windows.WAIT_TIMEOUT), err +} + +func (p *Process) PpidWithContext(ctx context.Context) (int32, error) { + // if cached already, return from cache + cachedPpid := p.getPpid() + if cachedPpid != 0 { + return cachedPpid, nil + } + + ppid, _, _, err := getFromSnapProcess(p.Pid) + if err != nil { + return 0, err + } + + // no errors and not cached already, so cache it + p.setPpid(ppid) + + return ppid, nil +} + +func (p *Process) NameWithContext(ctx context.Context) (string, error) { + if p.Pid == 0 { + return "System Idle Process", nil + } + if p.Pid == 4 { + return "System", nil + } + + exe, err := p.ExeWithContext(ctx) + if err != nil { + return "", fmt.Errorf("could not get Name: %s", err) + } + + return filepath.Base(exe), nil +} + +func (p *Process) TgidWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) ExeWithContext(ctx context.Context) (string, error) { + c, err := windows.OpenProcess(processQueryInformation, false, uint32(p.Pid)) + if err != nil { + return "", err + } + defer windows.CloseHandle(c) + buf := make([]uint16, syscall.MAX_LONG_PATH) + size := uint32(syscall.MAX_LONG_PATH) + if err := procQueryFullProcessImageNameW.Find(); err == nil { // Vista+ + ret, _, err := procQueryFullProcessImageNameW.Call( + uintptr(c), + uintptr(0), + uintptr(unsafe.Pointer(&buf[0])), + uintptr(unsafe.Pointer(&size))) + if ret == 0 { + return "", err + } + return windows.UTF16ToString(buf[:]), nil + } + // XP fallback + ret, _, err := procGetProcessImageFileNameW.Call(uintptr(c), uintptr(unsafe.Pointer(&buf[0])), uintptr(size)) + if ret == 0 { + return "", err + } + return common.ConvertDOSPath(windows.UTF16ToString(buf[:])), nil +} + +func (p *Process) CmdlineWithContext(_ context.Context) (string, error) { + cmdline, err := getProcessCommandLine(p.Pid) + if err != nil { + return "", fmt.Errorf("could not get CommandLine: %s", err) + } + return cmdline, nil +} + +func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) { + cmdline, err := p.CmdlineWithContext(ctx) + if err != nil { + return nil, err + } + return strings.Split(cmdline, " "), nil +} + +func (p *Process) createTimeWithContext(ctx context.Context) (int64, error) { + ru, err := getRusage(p.Pid) + if err != nil { + return 0, fmt.Errorf("could not get CreationDate: %s", err) + } + + return ru.CreationTime.Nanoseconds() / 1000000, nil +} + +func (p *Process) CwdWithContext(_ context.Context) (string, error) { + h, err := windows.OpenProcess(processQueryInformation|windows.PROCESS_VM_READ, false, uint32(p.Pid)) + if err == windows.ERROR_ACCESS_DENIED || err == windows.ERROR_INVALID_PARAMETER { + return "", nil + } + if err != nil { + return "", err + } + defer syscall.CloseHandle(syscall.Handle(h)) + + procIs32Bits := is32BitProcess(h) + + if procIs32Bits { + userProcParams, err := getUserProcessParams32(h) + if err != nil { + return "", err + } + if userProcParams.CurrentDirectoryPathNameLength > 0 { + cwd := readProcessMemory(syscall.Handle(h), procIs32Bits, uint64(userProcParams.CurrentDirectoryPathAddress), uint(userProcParams.CurrentDirectoryPathNameLength)) + if len(cwd) != int(userProcParams.CurrentDirectoryPathNameLength) { + return "", errors.New("cannot read current working directory") + } + + return convertUTF16ToString(cwd), nil + } + } else { + userProcParams, err := getUserProcessParams64(h) + if err != nil { + return "", err + } + if userProcParams.CurrentDirectoryPathNameLength > 0 { + cwd := readProcessMemory(syscall.Handle(h), procIs32Bits, userProcParams.CurrentDirectoryPathAddress, uint(userProcParams.CurrentDirectoryPathNameLength)) + if len(cwd) != int(userProcParams.CurrentDirectoryPathNameLength) { + return "", errors.New("cannot read current working directory") + } + + return convertUTF16ToString(cwd), nil + } + } + + // if we reach here, we have no cwd + return "", nil +} + +func (p *Process) StatusWithContext(ctx context.Context) ([]string, error) { + return []string{""}, common.ErrNotImplementedError +} + +func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { + return false, common.ErrNotImplementedError +} + +func (p *Process) UsernameWithContext(ctx context.Context) (string, error) { + pid := p.Pid + c, err := windows.OpenProcess(processQueryInformation, false, uint32(pid)) + if err != nil { + return "", err + } + defer windows.CloseHandle(c) + + var token syscall.Token + err = syscall.OpenProcessToken(syscall.Handle(c), syscall.TOKEN_QUERY, &token) + if err != nil { + return "", err + } + defer token.Close() + tokenUser, err := token.GetTokenUser() + if err != nil { + return "", err + } + + user, domain, _, err := tokenUser.User.Sid.LookupAccount("") + return domain + "\\" + user, err +} + +func (p *Process) UidsWithContext(ctx context.Context) ([]uint32, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) GidsWithContext(ctx context.Context) ([]uint32, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) GroupsWithContext(ctx context.Context) ([]uint32, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) TerminalWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +// priorityClasses maps a win32 priority class to its WMI equivalent Win32_Process.Priority +// https://docs.microsoft.com/en-us/windows/desktop/api/processthreadsapi/nf-processthreadsapi-getpriorityclass +// https://docs.microsoft.com/en-us/windows/desktop/cimwin32prov/win32-process +var priorityClasses = map[int]int32{ + 0x00008000: 10, // ABOVE_NORMAL_PRIORITY_CLASS + 0x00004000: 6, // BELOW_NORMAL_PRIORITY_CLASS + 0x00000080: 13, // HIGH_PRIORITY_CLASS + 0x00000040: 4, // IDLE_PRIORITY_CLASS + 0x00000020: 8, // NORMAL_PRIORITY_CLASS + 0x00000100: 24, // REALTIME_PRIORITY_CLASS +} + +func (p *Process) NiceWithContext(ctx context.Context) (int32, error) { + c, err := windows.OpenProcess(processQueryInformation, false, uint32(p.Pid)) + if err != nil { + return 0, err + } + defer windows.CloseHandle(c) + ret, _, err := procGetPriorityClass.Call(uintptr(c)) + if ret == 0 { + return 0, err + } + priority, ok := priorityClasses[int(ret)] + if !ok { + return 0, fmt.Errorf("unknown priority class %v", ret) + } + return priority, nil +} + +func (p *Process) IOniceWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) RlimitWithContext(ctx context.Context) ([]RlimitStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) RlimitUsageWithContext(ctx context.Context, gatherUsed bool) ([]RlimitStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) { + c, err := windows.OpenProcess(processQueryInformation, false, uint32(p.Pid)) + if err != nil { + return nil, err + } + defer windows.CloseHandle(c) + var ioCounters ioCounters + ret, _, err := procGetProcessIoCounters.Call(uintptr(c), uintptr(unsafe.Pointer(&ioCounters))) + if ret == 0 { + return nil, err + } + stats := &IOCountersStat{ + ReadCount: ioCounters.ReadOperationCount, + ReadBytes: ioCounters.ReadTransferCount, + WriteCount: ioCounters.WriteOperationCount, + WriteBytes: ioCounters.WriteTransferCount, + } + + return stats, nil +} + +func (p *Process) NumCtxSwitchesWithContext(ctx context.Context) (*NumCtxSwitchesStat, error) { + return nil, common.ErrNotImplementedError +} + +// NumFDsWithContext returns the number of handles for a process on Windows, +// not the number of file descriptors (FDs). +func (p *Process) NumFDsWithContext(ctx context.Context) (int32, error) { + handle, err := windows.OpenProcess(processQueryInformation, false, uint32(p.Pid)) + if err != nil { + return 0, err + } + defer windows.CloseHandle(handle) + + var handleCount uint32 + ret, _, err := procGetProcessHandleCount.Call(uintptr(handle), uintptr(unsafe.Pointer(&handleCount))) + if ret == 0 { + return 0, err + } + return int32(handleCount), nil +} + +func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { + ppid, ret, _, err := getFromSnapProcess(p.Pid) + if err != nil { + return 0, err + } + + // if no errors and not cached already, cache ppid + p.parent = ppid + if 0 == p.getPpid() { + p.setPpid(ppid) + } + + return ret, nil +} + +func (p *Process) ThreadsWithContext(ctx context.Context) (map[int32]*cpu.TimesStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) { + sysTimes, err := getProcessCPUTimes(p.Pid) + if err != nil { + return nil, err + } + + // User and kernel times are represented as a FILETIME structure + // which contains a 64-bit value representing the number of + // 100-nanosecond intervals since January 1, 1601 (UTC): + // http://msdn.microsoft.com/en-us/library/ms724284(VS.85).aspx + // To convert it into a float representing the seconds that the + // process has executed in user/kernel mode I borrowed the code + // below from psutil's _psutil_windows.c, and in turn from Python's + // Modules/posixmodule.c + + user := float64(sysTimes.UserTime.HighDateTime)*429.4967296 + float64(sysTimes.UserTime.LowDateTime)*1e-7 + kernel := float64(sysTimes.KernelTime.HighDateTime)*429.4967296 + float64(sysTimes.KernelTime.LowDateTime)*1e-7 + + return &cpu.TimesStat{ + User: user, + System: kernel, + }, nil +} + +func (p *Process) CPUAffinityWithContext(ctx context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) { + mem, err := getMemoryInfo(p.Pid) + if err != nil { + return nil, err + } + + ret := &MemoryInfoStat{ + RSS: uint64(mem.WorkingSetSize), + VMS: uint64(mem.PagefileUsage), + } + + return ret, nil +} + +func (p *Process) MemoryInfoExWithContext(ctx context.Context) (*MemoryInfoExStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) PageFaultsWithContext(ctx context.Context) (*PageFaultsStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { + out := []*Process{} + snap, err := windows.CreateToolhelp32Snapshot(windows.TH32CS_SNAPPROCESS, uint32(0)) + if err != nil { + return out, err + } + defer windows.CloseHandle(snap) + var pe32 windows.ProcessEntry32 + pe32.Size = uint32(unsafe.Sizeof(pe32)) + if err := windows.Process32First(snap, &pe32); err != nil { + return out, err + } + for { + if pe32.ParentProcessID == uint32(p.Pid) { + p, err := NewProcessWithContext(ctx, int32(pe32.ProcessID)) + if err == nil { + out = append(out, p) + } + } + if err = windows.Process32Next(snap, &pe32); err != nil { + break + } + } + return out, nil +} + +func (p *Process) OpenFilesWithContext(ctx context.Context) ([]OpenFilesStat, error) { + files := make([]OpenFilesStat, 0) + fileExists := make(map[string]bool) + + process, err := windows.OpenProcess(common.ProcessQueryInformation, false, uint32(p.Pid)) + if err != nil { + return nil, err + } + + buffer := make([]byte, 1024) + var size uint32 + + st := common.CallWithExpandingBuffer( + func() common.NtStatus { + return common.NtQuerySystemInformation( + common.SystemExtendedHandleInformationClass, + &buffer[0], + uint32(len(buffer)), + &size, + ) + }, + &buffer, + &size, + ) + if st.IsError() { + return nil, st.Error() + } + + handlesList := (*common.SystemExtendedHandleInformation)(unsafe.Pointer(&buffer[0])) + handles := make([]common.SystemExtendedHandleTableEntryInformation, int(handlesList.NumberOfHandles)) + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&handles)) + hdr.Data = uintptr(unsafe.Pointer(&handlesList.Handles[0])) + + currentProcess, err := windows.GetCurrentProcess() + if err != nil { + return nil, err + } + + for _, handle := range handles { + var file uintptr + if int32(handle.UniqueProcessId) != p.Pid { + continue + } + if windows.DuplicateHandle(process, windows.Handle(handle.HandleValue), currentProcess, (*windows.Handle)(&file), + 0, true, windows.DUPLICATE_SAME_ACCESS) != nil { + continue + } + // release the new handle + defer windows.CloseHandle(windows.Handle(file)) + + fileType, err := windows.GetFileType(windows.Handle(file)) + if err != nil || fileType != windows.FILE_TYPE_DISK { + continue + } + + var fileName string + ch := make(chan struct{}) + + go func() { + var buf [syscall.MAX_LONG_PATH]uint16 + n, err := windows.GetFinalPathNameByHandle(windows.Handle(file), &buf[0], syscall.MAX_LONG_PATH, 0) + if err != nil { + return + } + + fileName = string(utf16.Decode(buf[:n])) + ch <- struct{}{} + }() + + select { + case <-time.NewTimer(100 * time.Millisecond).C: + continue + case <-ch: + fileInfo, err := os.Stat(fileName) + if err != nil || fileInfo.IsDir() { + continue + } + + if _, exists := fileExists[fileName]; !exists { + files = append(files, OpenFilesStat{ + Path: fileName, + Fd: uint64(file), + }) + fileExists[fileName] = true + } + case <-ctx.Done(): + return files, ctx.Err() + } + } + + return files, nil +} + +func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionStat, error) { + return net.ConnectionsPidWithContext(ctx, "all", p.Pid) +} + +func (p *Process) ConnectionsMaxWithContext(ctx context.Context, maxConn int) ([]net.ConnectionStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]MemoryMapsStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) SendSignalWithContext(ctx context.Context, sig syscall.Signal) error { + return common.ErrNotImplementedError +} + +func (p *Process) SuspendWithContext(ctx context.Context) error { + c, err := windows.OpenProcess(windows.PROCESS_SUSPEND_RESUME, false, uint32(p.Pid)) + if err != nil { + return err + } + defer windows.CloseHandle(c) + + r1, _, _ := procNtSuspendProcess.Call(uintptr(c)) + if r1 != 0 { + // See https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-erref/596a1078-e883-4972-9bbc-49e60bebca55 + return fmt.Errorf("NtStatus='0x%.8X'", r1) + } + + return nil +} + +func (p *Process) ResumeWithContext(ctx context.Context) error { + c, err := windows.OpenProcess(windows.PROCESS_SUSPEND_RESUME, false, uint32(p.Pid)) + if err != nil { + return err + } + defer windows.CloseHandle(c) + + r1, _, _ := procNtResumeProcess.Call(uintptr(c)) + if r1 != 0 { + // See https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-erref/596a1078-e883-4972-9bbc-49e60bebca55 + return fmt.Errorf("NtStatus='0x%.8X'", r1) + } + + return nil +} + +func (p *Process) TerminateWithContext(ctx context.Context) error { + proc, err := windows.OpenProcess(windows.PROCESS_TERMINATE, false, uint32(p.Pid)) + if err != nil { + return err + } + err = windows.TerminateProcess(proc, 0) + windows.CloseHandle(proc) + return err +} + +func (p *Process) KillWithContext(ctx context.Context) error { + process, err := os.FindProcess(int(p.Pid)) + if err != nil { + return err + } + defer process.Release() + return process.Kill() +} + +func (p *Process) EnvironWithContext(ctx context.Context) ([]string, error) { + envVars, err := getProcessEnvironmentVariables(p.Pid, ctx) + if err != nil { + return nil, fmt.Errorf("could not get environment variables: %s", err) + } + return envVars, nil +} + +// retrieve Ppid in a thread-safe manner +func (p *Process) getPpid() int32 { + p.parentMutex.RLock() + defer p.parentMutex.RUnlock() + return p.parent +} + +// cache Ppid in a thread-safe manner (WINDOWS ONLY) +// see https://psutil.readthedocs.io/en/latest/#psutil.Process.ppid +func (p *Process) setPpid(ppid int32) { + p.parentMutex.Lock() + defer p.parentMutex.Unlock() + p.parent = ppid +} + +func getFromSnapProcess(pid int32) (int32, int32, string, error) { + snap, err := windows.CreateToolhelp32Snapshot(windows.TH32CS_SNAPPROCESS, uint32(pid)) + if err != nil { + return 0, 0, "", err + } + defer windows.CloseHandle(snap) + var pe32 windows.ProcessEntry32 + pe32.Size = uint32(unsafe.Sizeof(pe32)) + if err = windows.Process32First(snap, &pe32); err != nil { + return 0, 0, "", err + } + for { + if pe32.ProcessID == uint32(pid) { + szexe := windows.UTF16ToString(pe32.ExeFile[:]) + return int32(pe32.ParentProcessID), int32(pe32.Threads), szexe, nil + } + if err = windows.Process32Next(snap, &pe32); err != nil { + break + } + } + return 0, 0, "", fmt.Errorf("couldn't find pid: %d", pid) +} + +func ProcessesWithContext(ctx context.Context) ([]*Process, error) { + out := []*Process{} + + pids, err := PidsWithContext(ctx) + if err != nil { + return out, fmt.Errorf("could not get Processes %s", err) + } + + for _, pid := range pids { + p, err := NewProcessWithContext(ctx, pid) + if err != nil { + continue + } + out = append(out, p) + } + + return out, nil +} + +func getRusage(pid int32) (*windows.Rusage, error) { + var CPU windows.Rusage + + c, err := windows.OpenProcess(processQueryInformation, false, uint32(pid)) + if err != nil { + return nil, err + } + defer windows.CloseHandle(c) + + if err := windows.GetProcessTimes(c, &CPU.CreationTime, &CPU.ExitTime, &CPU.KernelTime, &CPU.UserTime); err != nil { + return nil, err + } + + return &CPU, nil +} + +func getMemoryInfo(pid int32) (PROCESS_MEMORY_COUNTERS, error) { + var mem PROCESS_MEMORY_COUNTERS + c, err := windows.OpenProcess(processQueryInformation, false, uint32(pid)) + if err != nil { + return mem, err + } + defer windows.CloseHandle(c) + if err := getProcessMemoryInfo(c, &mem); err != nil { + return mem, err + } + + return mem, err +} + +func getProcessMemoryInfo(h windows.Handle, mem *PROCESS_MEMORY_COUNTERS) (err error) { + r1, _, e1 := syscall.Syscall(procGetProcessMemoryInfo.Addr(), 3, uintptr(h), uintptr(unsafe.Pointer(mem)), uintptr(unsafe.Sizeof(*mem))) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +type SYSTEM_TIMES struct { + CreateTime syscall.Filetime + ExitTime syscall.Filetime + KernelTime syscall.Filetime + UserTime syscall.Filetime +} + +func getProcessCPUTimes(pid int32) (SYSTEM_TIMES, error) { + var times SYSTEM_TIMES + + h, err := windows.OpenProcess(processQueryInformation, false, uint32(pid)) + if err != nil { + return times, err + } + defer windows.CloseHandle(h) + + err = syscall.GetProcessTimes( + syscall.Handle(h), + ×.CreateTime, + ×.ExitTime, + ×.KernelTime, + ×.UserTime, + ) + + return times, err +} + +func getUserProcessParams32(handle windows.Handle) (rtlUserProcessParameters32, error) { + pebAddress, err := queryPebAddress(syscall.Handle(handle), true) + if err != nil { + return rtlUserProcessParameters32{}, fmt.Errorf("cannot locate process PEB: %w", err) + } + + buf := readProcessMemory(syscall.Handle(handle), true, pebAddress, uint(unsafe.Sizeof(processEnvironmentBlock32{}))) + if len(buf) != int(unsafe.Sizeof(processEnvironmentBlock32{})) { + return rtlUserProcessParameters32{}, fmt.Errorf("cannot read process PEB") + } + peb := (*processEnvironmentBlock32)(unsafe.Pointer(&buf[0])) + userProcessAddress := uint64(peb.ProcessParameters) + buf = readProcessMemory(syscall.Handle(handle), true, userProcessAddress, uint(unsafe.Sizeof(rtlUserProcessParameters32{}))) + if len(buf) != int(unsafe.Sizeof(rtlUserProcessParameters32{})) { + return rtlUserProcessParameters32{}, fmt.Errorf("cannot read user process parameters") + } + return *(*rtlUserProcessParameters32)(unsafe.Pointer(&buf[0])), nil +} + +func getUserProcessParams64(handle windows.Handle) (rtlUserProcessParameters64, error) { + pebAddress, err := queryPebAddress(syscall.Handle(handle), false) + if err != nil { + return rtlUserProcessParameters64{}, fmt.Errorf("cannot locate process PEB: %w", err) + } + + buf := readProcessMemory(syscall.Handle(handle), false, pebAddress, uint(unsafe.Sizeof(processEnvironmentBlock64{}))) + if len(buf) != int(unsafe.Sizeof(processEnvironmentBlock64{})) { + return rtlUserProcessParameters64{}, fmt.Errorf("cannot read process PEB") + } + peb := (*processEnvironmentBlock64)(unsafe.Pointer(&buf[0])) + userProcessAddress := peb.ProcessParameters + buf = readProcessMemory(syscall.Handle(handle), false, userProcessAddress, uint(unsafe.Sizeof(rtlUserProcessParameters64{}))) + if len(buf) != int(unsafe.Sizeof(rtlUserProcessParameters64{})) { + return rtlUserProcessParameters64{}, fmt.Errorf("cannot read user process parameters") + } + return *(*rtlUserProcessParameters64)(unsafe.Pointer(&buf[0])), nil +} + +func is32BitProcess(h windows.Handle) bool { + const ( + PROCESSOR_ARCHITECTURE_INTEL = 0 + PROCESSOR_ARCHITECTURE_ARM = 5 + PROCESSOR_ARCHITECTURE_ARM64 = 12 + PROCESSOR_ARCHITECTURE_IA64 = 6 + PROCESSOR_ARCHITECTURE_AMD64 = 9 + ) + + var procIs32Bits bool + switch processorArchitecture { + case PROCESSOR_ARCHITECTURE_INTEL, PROCESSOR_ARCHITECTURE_ARM: + procIs32Bits = true + case PROCESSOR_ARCHITECTURE_ARM64, PROCESSOR_ARCHITECTURE_IA64, PROCESSOR_ARCHITECTURE_AMD64: + var wow64 uint + + ret, _, _ := common.ProcNtQueryInformationProcess.Call( + uintptr(h), + uintptr(common.ProcessWow64Information), + uintptr(unsafe.Pointer(&wow64)), + uintptr(unsafe.Sizeof(wow64)), + uintptr(0), + ) + if int(ret) >= 0 { + if wow64 != 0 { + procIs32Bits = true + } + } else { + // if the OS does not support the call, we fallback into the bitness of the app + if unsafe.Sizeof(wow64) == 4 { + procIs32Bits = true + } + } + + default: + // for other unknown platforms, we rely on process platform + if unsafe.Sizeof(processorArchitecture) == 8 { + procIs32Bits = false + } else { + procIs32Bits = true + } + } + return procIs32Bits +} + +func getProcessEnvironmentVariables(pid int32, ctx context.Context) ([]string, error) { + h, err := windows.OpenProcess(processQueryInformation|windows.PROCESS_VM_READ, false, uint32(pid)) + if err == windows.ERROR_ACCESS_DENIED || err == windows.ERROR_INVALID_PARAMETER { + return nil, nil + } + if err != nil { + return nil, err + } + defer syscall.CloseHandle(syscall.Handle(h)) + + procIs32Bits := is32BitProcess(h) + + var processParameterBlockAddress uint64 + + if procIs32Bits { + peb, err := getUserProcessParams32(h) + if err != nil { + return nil, err + } + processParameterBlockAddress = uint64(peb.EnvironmentAddress) + } else { + peb, err := getUserProcessParams64(h) + if err != nil { + return nil, err + } + processParameterBlockAddress = peb.EnvironmentAddress + } + envvarScanner := bufio.NewScanner(&processReader{ + processHandle: h, + is32BitProcess: procIs32Bits, + offset: processParameterBlockAddress, + }) + envvarScanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) { + if atEOF && len(data) == 0 { + return 0, nil, nil + } + // Check for UTF-16 zero character + for i := 0; i < len(data)-1; i += 2 { + if data[i] == 0 && data[i+1] == 0 { + return i + 2, data[0:i], nil + } + } + if atEOF { + return len(data), data, nil + } + // Request more data + return 0, nil, nil + }) + var envVars []string + for envvarScanner.Scan() { + entry := envvarScanner.Bytes() + if len(entry) == 0 { + break // Block is finished + } + envVars = append(envVars, convertUTF16ToString(entry)) + select { + case <-ctx.Done(): + break + default: + continue + } + } + if err := envvarScanner.Err(); err != nil { + return nil, err + } + return envVars, nil +} + +type processReader struct { + processHandle windows.Handle + is32BitProcess bool + offset uint64 +} + +func (p *processReader) Read(buf []byte) (int, error) { + processMemory := readProcessMemory(syscall.Handle(p.processHandle), p.is32BitProcess, p.offset, uint(len(buf))) + if len(processMemory) == 0 { + return 0, io.EOF + } + copy(buf, processMemory) + p.offset += uint64(len(processMemory)) + return len(processMemory), nil +} + +func getProcessCommandLine(pid int32) (string, error) { + h, err := windows.OpenProcess(processQueryInformation|windows.PROCESS_VM_READ, false, uint32(pid)) + if err == windows.ERROR_ACCESS_DENIED || err == windows.ERROR_INVALID_PARAMETER { + return "", nil + } + if err != nil { + return "", err + } + defer syscall.CloseHandle(syscall.Handle(h)) + + procIs32Bits := is32BitProcess(h) + + if procIs32Bits { + userProcParams, err := getUserProcessParams32(h) + if err != nil { + return "", err + } + if userProcParams.CommandLineLength > 0 { + cmdLine := readProcessMemory(syscall.Handle(h), procIs32Bits, uint64(userProcParams.CommandLineAddress), uint(userProcParams.CommandLineLength)) + if len(cmdLine) != int(userProcParams.CommandLineLength) { + return "", errors.New("cannot read cmdline") + } + + return convertUTF16ToString(cmdLine), nil + } + } else { + userProcParams, err := getUserProcessParams64(h) + if err != nil { + return "", err + } + if userProcParams.CommandLineLength > 0 { + cmdLine := readProcessMemory(syscall.Handle(h), procIs32Bits, userProcParams.CommandLineAddress, uint(userProcParams.CommandLineLength)) + if len(cmdLine) != int(userProcParams.CommandLineLength) { + return "", errors.New("cannot read cmdline") + } + + return convertUTF16ToString(cmdLine), nil + } + } + + // if we reach here, we have no command line + return "", nil +} + +func convertUTF16ToString(src []byte) string { + srcLen := len(src) / 2 + + codePoints := make([]uint16, srcLen) + + srcIdx := 0 + for i := 0; i < srcLen; i++ { + codePoints[i] = uint16(src[srcIdx]) | uint16(src[srcIdx+1])<<8 + srcIdx += 2 + } + return syscall.UTF16ToString(codePoints) +} diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_windows_32bit.go b/vendor/github.com/shirou/gopsutil/v4/process/process_windows_32bit.go similarity index 97% rename from vendor/github.com/shirou/gopsutil/v3/process/process_windows_32bit.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_windows_32bit.go index db4d453349c..2b231c79d04 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_windows_32bit.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_windows_32bit.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build (windows && 386) || (windows && arm) -// +build windows,386 windows,arm package process @@ -8,7 +8,7 @@ import ( "syscall" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" "golang.org/x/sys/windows" ) diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_windows_64bit.go b/vendor/github.com/shirou/gopsutil/v4/process/process_windows_64bit.go similarity index 95% rename from vendor/github.com/shirou/gopsutil/v3/process/process_windows_64bit.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_windows_64bit.go index 74c6212cfde..befe5213900 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_windows_64bit.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_windows_64bit.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build (windows && amd64) || (windows && arm64) -// +build windows,amd64 windows,arm64 package process @@ -7,7 +7,7 @@ import ( "syscall" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" "golang.org/x/sys/windows" ) diff --git a/vendor/github.com/shoenig/go-m1cpu/.golangci.yaml b/vendor/github.com/shoenig/go-m1cpu/.golangci.yaml deleted file mode 100644 index dc6fefb979e..00000000000 --- a/vendor/github.com/shoenig/go-m1cpu/.golangci.yaml +++ /dev/null @@ -1,12 +0,0 @@ -run: - timeout: 5m -linters: - enable: - - gofmt - - errcheck - - errname - - errorlint - - bodyclose - - durationcheck - - whitespace - diff --git a/vendor/github.com/shoenig/go-m1cpu/LICENSE b/vendor/github.com/shoenig/go-m1cpu/LICENSE deleted file mode 100644 index e87a115e462..00000000000 --- a/vendor/github.com/shoenig/go-m1cpu/LICENSE +++ /dev/null @@ -1,363 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/shoenig/go-m1cpu/Makefile b/vendor/github.com/shoenig/go-m1cpu/Makefile deleted file mode 100644 index 28d786397d4..00000000000 --- a/vendor/github.com/shoenig/go-m1cpu/Makefile +++ /dev/null @@ -1,12 +0,0 @@ -SHELL = bash - -default: test - -.PHONY: test -test: - @echo "--> Running Tests ..." - @go test -v -race ./... - -vet: - @echo "--> Vet Go sources ..." - @go vet ./... diff --git a/vendor/github.com/shoenig/go-m1cpu/README.md b/vendor/github.com/shoenig/go-m1cpu/README.md deleted file mode 100644 index 399657acf86..00000000000 --- a/vendor/github.com/shoenig/go-m1cpu/README.md +++ /dev/null @@ -1,66 +0,0 @@ -# m1cpu - -[![Go Reference](https://pkg.go.dev/badge/github.com/shoenig/go-m1cpu.svg)](https://pkg.go.dev/github.com/shoenig/go-m1cpu) -[![MPL License](https://img.shields.io/github/license/shoenig/go-m1cpu?color=g&style=flat-square)](https://github.com/shoenig/go-m1cpu/blob/main/LICENSE) -[![Run CI Tests](https://github.com/shoenig/go-m1cpu/actions/workflows/ci.yaml/badge.svg)](https://github.com/shoenig/go-m1cpu/actions/workflows/ci.yaml) - -The `go-m1cpu` module is a library for inspecting Apple Silicon CPUs in Go. - -Use the `m1cpu` Go package for looking up the CPU frequency for Apple M1 and M2 CPUs. - -# Install - -```shell -go get github.com/shoenig/go-m1cpu@latest -``` - -# CGO - -This package requires the use of [CGO](https://go.dev/blog/cgo). - -Extracting the CPU properties is done via Apple's [IOKit](https://developer.apple.com/documentation/iokit?language=objc) -framework, which is accessible only through system C libraries. - -# Example - -Simple Go program to print Apple Silicon M1/M2 CPU speeds. - -```go -package main - -import ( - "fmt" - - "github.com/shoenig/go-m1cpu" -) - -func main() { - fmt.Println("Apple Silicon", m1cpu.IsAppleSilicon()) - - fmt.Println("pCore GHz", m1cpu.PCoreGHz()) - fmt.Println("eCore GHz", m1cpu.ECoreGHz()) - - fmt.Println("pCore Hz", m1cpu.PCoreHz()) - fmt.Println("eCore Hz", m1cpu.ECoreHz()) -} -``` - -Using `go test` to print out available information. - -``` -➜ go test -v -run Show -=== RUN Test_Show - cpu_test.go:42: pCore Hz 3504000000 - cpu_test.go:43: eCore Hz 2424000000 - cpu_test.go:44: pCore GHz 3.504 - cpu_test.go:45: eCore GHz 2.424 - cpu_test.go:46: pCore count 8 - cpu_test.go:47: eCoreCount 4 - cpu_test.go:50: pCore Caches 196608 131072 16777216 - cpu_test.go:53: eCore Caches 131072 65536 4194304 ---- PASS: Test_Show (0.00s) -``` - -# License - -Open source under the [MPL](LICENSE) diff --git a/vendor/github.com/shoenig/go-m1cpu/cpu.go b/vendor/github.com/shoenig/go-m1cpu/cpu.go deleted file mode 100644 index 502a8cce92e..00000000000 --- a/vendor/github.com/shoenig/go-m1cpu/cpu.go +++ /dev/null @@ -1,213 +0,0 @@ -//go:build darwin && arm64 && cgo - -package m1cpu - -// #cgo LDFLAGS: -framework CoreFoundation -framework IOKit -// #include -// #include -// #include -// #include -// -// #if !defined(MAC_OS_VERSION_12_0) || MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_VERSION_12_0 -// #define kIOMainPortDefault kIOMasterPortDefault -// #endif -// -// #define HzToGHz(hz) ((hz) / 1000000000.0) -// -// UInt64 global_pCoreHz; -// UInt64 global_eCoreHz; -// int global_pCoreCount; -// int global_eCoreCount; -// int global_pCoreL1InstCacheSize; -// int global_eCoreL1InstCacheSize; -// int global_pCoreL1DataCacheSize; -// int global_eCoreL1DataCacheSize; -// int global_pCoreL2CacheSize; -// int global_eCoreL2CacheSize; -// char global_brand[32]; -// -// UInt64 getFrequency(CFTypeRef typeRef) { -// CFDataRef cfData = typeRef; -// -// CFIndex size = CFDataGetLength(cfData); -// UInt8 buf[size]; -// CFDataGetBytes(cfData, CFRangeMake(0, size), buf); -// -// UInt8 b1 = buf[size-5]; -// UInt8 b2 = buf[size-6]; -// UInt8 b3 = buf[size-7]; -// UInt8 b4 = buf[size-8]; -// -// UInt64 pCoreHz = 0x00000000FFFFFFFF & ((b1<<24) | (b2 << 16) | (b3 << 8) | (b4)); -// return pCoreHz; -// } -// -// int sysctl_int(const char * name) { -// int value = -1; -// size_t size = 8; -// sysctlbyname(name, &value, &size, NULL, 0); -// return value; -// } -// -// void sysctl_string(const char * name, char * dest) { -// size_t size = 32; -// sysctlbyname(name, dest, &size, NULL, 0); -// } -// -// void initialize() { -// global_pCoreCount = sysctl_int("hw.perflevel0.physicalcpu"); -// global_eCoreCount = sysctl_int("hw.perflevel1.physicalcpu"); -// global_pCoreL1InstCacheSize = sysctl_int("hw.perflevel0.l1icachesize"); -// global_eCoreL1InstCacheSize = sysctl_int("hw.perflevel1.l1icachesize"); -// global_pCoreL1DataCacheSize = sysctl_int("hw.perflevel0.l1dcachesize"); -// global_eCoreL1DataCacheSize = sysctl_int("hw.perflevel1.l1dcachesize"); -// global_pCoreL2CacheSize = sysctl_int("hw.perflevel0.l2cachesize"); -// global_eCoreL2CacheSize = sysctl_int("hw.perflevel1.l2cachesize"); -// sysctl_string("machdep.cpu.brand_string", global_brand); -// -// CFMutableDictionaryRef matching = IOServiceMatching("AppleARMIODevice"); -// io_iterator_t iter; -// IOServiceGetMatchingServices(kIOMainPortDefault, matching, &iter); -// -// const size_t bufsize = 512; -// io_object_t obj; -// while ((obj = IOIteratorNext(iter))) { -// char class[bufsize]; -// IOObjectGetClass(obj, class); -// char name[bufsize]; -// IORegistryEntryGetName(obj, name); -// -// if (strncmp(name, "pmgr", bufsize) == 0) { -// CFTypeRef pCoreRef = IORegistryEntryCreateCFProperty(obj, CFSTR("voltage-states5-sram"), kCFAllocatorDefault, 0); -// CFTypeRef eCoreRef = IORegistryEntryCreateCFProperty(obj, CFSTR("voltage-states1-sram"), kCFAllocatorDefault, 0); -// -// long long pCoreHz = getFrequency(pCoreRef); -// long long eCoreHz = getFrequency(eCoreRef); -// -// global_pCoreHz = pCoreHz; -// global_eCoreHz = eCoreHz; -// return; -// } -// } -// } -// -// UInt64 eCoreHz() { -// return global_eCoreHz; -// } -// -// UInt64 pCoreHz() { -// return global_pCoreHz; -// } -// -// Float64 eCoreGHz() { -// return HzToGHz(global_eCoreHz); -// } -// -// Float64 pCoreGHz() { -// return HzToGHz(global_pCoreHz); -// } -// -// int pCoreCount() { -// return global_pCoreCount; -// } -// -// int eCoreCount() { -// return global_eCoreCount; -// } -// -// int pCoreL1InstCacheSize() { -// return global_pCoreL1InstCacheSize; -// } -// -// int pCoreL1DataCacheSize() { -// return global_pCoreL1DataCacheSize; -// } -// -// int pCoreL2CacheSize() { -// return global_pCoreL2CacheSize; -// } -// -// int eCoreL1InstCacheSize() { -// return global_eCoreL1InstCacheSize; -// } -// -// int eCoreL1DataCacheSize() { -// return global_eCoreL1DataCacheSize; -// } -// -// int eCoreL2CacheSize() { -// return global_eCoreL2CacheSize; -// } -// -// char * modelName() { -// return global_brand; -// } -import "C" - -func init() { - C.initialize() -} - -// IsAppleSilicon returns true on this platform. -func IsAppleSilicon() bool { - return true -} - -// PCoreHZ returns the max frequency in Hertz of the P-Core of an Apple Silicon CPU. -func PCoreHz() uint64 { - return uint64(C.pCoreHz()) -} - -// ECoreHZ returns the max frequency in Hertz of the E-Core of an Apple Silicon CPU. -func ECoreHz() uint64 { - return uint64(C.eCoreHz()) -} - -// PCoreGHz returns the max frequency in Gigahertz of the P-Core of an Apple Silicon CPU. -func PCoreGHz() float64 { - return float64(C.pCoreGHz()) -} - -// ECoreGHz returns the max frequency in Gigahertz of the E-Core of an Apple Silicon CPU. -func ECoreGHz() float64 { - return float64(C.eCoreGHz()) -} - -// PCoreCount returns the number of physical P (performance) cores. -func PCoreCount() int { - return int(C.pCoreCount()) -} - -// ECoreCount returns the number of physical E (efficiency) cores. -func ECoreCount() int { - return int(C.eCoreCount()) -} - -// PCoreCacheSize returns the sizes of the P (performance) core cache sizes -// in the order of -// -// - L1 instruction cache -// - L1 data cache -// - L2 cache -func PCoreCache() (int, int, int) { - return int(C.pCoreL1InstCacheSize()), - int(C.pCoreL1DataCacheSize()), - int(C.pCoreL2CacheSize()) -} - -// ECoreCacheSize returns the sizes of the E (efficiency) core cache sizes -// in the order of -// -// - L1 instruction cache -// - L1 data cache -// - L2 cache -func ECoreCache() (int, int, int) { - return int(C.eCoreL1InstCacheSize()), - int(C.eCoreL1DataCacheSize()), - int(C.eCoreL2CacheSize()) -} - -// ModelName returns the model name of the CPU. -func ModelName() string { - return C.GoString(C.modelName()) -} diff --git a/vendor/github.com/shoenig/go-m1cpu/incompatible.go b/vendor/github.com/shoenig/go-m1cpu/incompatible.go deleted file mode 100644 index d425025aa84..00000000000 --- a/vendor/github.com/shoenig/go-m1cpu/incompatible.go +++ /dev/null @@ -1,53 +0,0 @@ -//go:build !darwin || !arm64 || !cgo - -package m1cpu - -// IsAppleSilicon return false on this platform. -func IsAppleSilicon() bool { - return false -} - -// PCoreHZ requires darwin/arm64 -func PCoreHz() uint64 { - panic("m1cpu: not a darwin/arm64 system") -} - -// ECoreHZ requires darwin/arm64 -func ECoreHz() uint64 { - panic("m1cpu: not a darwin/arm64 system") -} - -// PCoreGHz requires darwin/arm64 -func PCoreGHz() float64 { - panic("m1cpu: not a darwin/arm64 system") -} - -// ECoreGHz requires darwin/arm64 -func ECoreGHz() float64 { - panic("m1cpu: not a darwin/arm64 system") -} - -// PCoreCount requires darwin/arm64 -func PCoreCount() int { - panic("m1cpu: not a darwin/arm64 system") -} - -// ECoreCount requires darwin/arm64 -func ECoreCount() int { - panic("m1cpu: not a darwin/arm64 system") -} - -// PCoreCacheSize requires darwin/arm64 -func PCoreCache() (int, int, int) { - panic("m1cpu: not a darwin/arm64 system") -} - -// ECoreCacheSize requires darwin/arm64 -func ECoreCache() (int, int, int) { - panic("m1cpu: not a darwin/arm64 system") -} - -// ModelName requires darwin/arm64 -func ModelName() string { - panic("m1cpu: not a darwin/arm64 system") -} diff --git a/vendor/github.com/spf13/cobra/.golangci.yml b/vendor/github.com/spf13/cobra/.golangci.yml index a618ec24d84..2c8f4808c1a 100644 --- a/vendor/github.com/spf13/cobra/.golangci.yml +++ b/vendor/github.com/spf13/cobra/.golangci.yml @@ -26,33 +26,28 @@ linters: - errcheck #- exhaustive #- funlen - - gas #- gochecknoinits - goconst - #- gocritic + - gocritic #- gocyclo - #- gofmt + - gofmt - goimports - - golint #- gomnd #- goprintffuncname - #- gosec - #- gosimple + - gosec + - gosimple - govet - ineffassign - - interfacer #- lll - - maligned - - megacheck - #- misspell + - misspell #- nakedret #- noctx - #- nolintlint + - nolintlint #- rowserrcheck #- scopelint - #- staticcheck + - staticcheck #- structcheck ! deprecated since v1.49.0; replaced by 'unused' - #- stylecheck + - stylecheck #- typecheck - unconvert #- unparam diff --git a/vendor/github.com/spf13/cobra/active_help.go b/vendor/github.com/spf13/cobra/active_help.go index 5f965e057f2..25c30e3ccc3 100644 --- a/vendor/github.com/spf13/cobra/active_help.go +++ b/vendor/github.com/spf13/cobra/active_help.go @@ -17,21 +17,17 @@ package cobra import ( "fmt" "os" - "regexp" - "strings" ) const ( activeHelpMarker = "_activeHelp_ " // The below values should not be changed: programs will be using them explicitly // in their user documentation, and users will be using them explicitly. - activeHelpEnvVarSuffix = "_ACTIVE_HELP" - activeHelpGlobalEnvVar = "COBRA_ACTIVE_HELP" + activeHelpEnvVarSuffix = "ACTIVE_HELP" + activeHelpGlobalEnvVar = configEnvVarGlobalPrefix + "_" + activeHelpEnvVarSuffix activeHelpGlobalDisable = "0" ) -var activeHelpEnvVarPrefixSubstRegexp = regexp.MustCompile(`[^A-Z0-9_]`) - // AppendActiveHelp adds the specified string to the specified array to be used as ActiveHelp. // Such strings will be processed by the completion script and will be shown as ActiveHelp // to the user. @@ -60,8 +56,5 @@ func GetActiveHelpConfig(cmd *Command) string { // variable. It has the format _ACTIVE_HELP where is the name of the // root command in upper case, with all non-ASCII-alphanumeric characters replaced by `_`. func activeHelpEnvVar(name string) string { - // This format should not be changed: users will be using it explicitly. - activeHelpEnvVar := strings.ToUpper(fmt.Sprintf("%s%s", name, activeHelpEnvVarSuffix)) - activeHelpEnvVar = activeHelpEnvVarPrefixSubstRegexp.ReplaceAllString(activeHelpEnvVar, "_") - return activeHelpEnvVar + return configEnvVar(name, activeHelpEnvVarSuffix) } diff --git a/vendor/github.com/spf13/cobra/args.go b/vendor/github.com/spf13/cobra/args.go index e79ec33a81d..ed1e70ceaa4 100644 --- a/vendor/github.com/spf13/cobra/args.go +++ b/vendor/github.com/spf13/cobra/args.go @@ -52,9 +52,9 @@ func OnlyValidArgs(cmd *Command, args []string) error { if len(cmd.ValidArgs) > 0 { // Remove any description that may be included in ValidArgs. // A description is following a tab character. - var validArgs []string + validArgs := make([]string, 0, len(cmd.ValidArgs)) for _, v := range cmd.ValidArgs { - validArgs = append(validArgs, strings.Split(v, "\t")[0]) + validArgs = append(validArgs, strings.SplitN(v, "\t", 2)[0]) } for _, v := range args { if !stringInSlice(v, validArgs) { diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go index 8a531518409..f4d198cbcbd 100644 --- a/vendor/github.com/spf13/cobra/bash_completions.go +++ b/vendor/github.com/spf13/cobra/bash_completions.go @@ -597,19 +597,16 @@ func writeRequiredFlag(buf io.StringWriter, cmd *Command) { if nonCompletableFlag(flag) { return } - for key := range flag.Annotations { - switch key { - case BashCompOneRequiredFlag: - format := " must_have_one_flag+=(\"--%s" - if flag.Value.Type() != "bool" { - format += "=" - } - format += cbn - WriteStringAndCheck(buf, fmt.Sprintf(format, flag.Name)) - - if len(flag.Shorthand) > 0 { - WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_flag+=(\"-%s"+cbn, flag.Shorthand)) - } + if _, ok := flag.Annotations[BashCompOneRequiredFlag]; ok { + format := " must_have_one_flag+=(\"--%s" + if flag.Value.Type() != "bool" { + format += "=" + } + format += cbn + WriteStringAndCheck(buf, fmt.Sprintf(format, flag.Name)) + + if len(flag.Shorthand) > 0 { + WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_flag+=(\"-%s"+cbn, flag.Shorthand)) } } }) @@ -621,7 +618,7 @@ func writeRequiredNouns(buf io.StringWriter, cmd *Command) { for _, value := range cmd.ValidArgs { // Remove any description that may be included following a tab character. // Descriptions are not supported by bash completion. - value = strings.Split(value, "\t")[0] + value = strings.SplitN(value, "\t", 2)[0] WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_noun+=(%q)\n", value)) } if cmd.ValidArgsFunction != nil { diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go index a6b160ce53c..e0b0947b04c 100644 --- a/vendor/github.com/spf13/cobra/cobra.go +++ b/vendor/github.com/spf13/cobra/cobra.go @@ -193,8 +193,6 @@ func ld(s, t string, ignoreCase bool) int { d := make([][]int, len(s)+1) for i := range d { d[i] = make([]int, len(t)+1) - } - for i := range d { d[i][0] = i } for j := range d[0] { diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go index 2fbe6c131a7..54748fc67eb 100644 --- a/vendor/github.com/spf13/cobra/command.go +++ b/vendor/github.com/spf13/cobra/command.go @@ -154,8 +154,10 @@ type Command struct { // pflags contains persistent flags. pflags *flag.FlagSet // lflags contains local flags. + // This field does not represent internal state, it's used as a cache to optimise LocalFlags function call lflags *flag.FlagSet // iflags contains inherited flags. + // This field does not represent internal state, it's used as a cache to optimise InheritedFlags function call iflags *flag.FlagSet // parentsPflags is all persistent flags of cmd's parents. parentsPflags *flag.FlagSet @@ -706,7 +708,7 @@ Loop: // This is not a flag or a flag value. Check to see if it matches what we're looking for, and if so, // return the args, excluding the one at this position. if s == x { - ret := []string{} + ret := make([]string, 0, len(args)-1) ret = append(ret, args[:pos]...) ret = append(ret, args[pos+1:]...) return ret @@ -754,14 +756,14 @@ func (c *Command) findSuggestions(arg string) string { if c.SuggestionsMinimumDistance <= 0 { c.SuggestionsMinimumDistance = 2 } - suggestionsString := "" + var sb strings.Builder if suggestions := c.SuggestionsFor(arg); len(suggestions) > 0 { - suggestionsString += "\n\nDid you mean this?\n" + sb.WriteString("\n\nDid you mean this?\n") for _, s := range suggestions { - suggestionsString += fmt.Sprintf("\t%v\n", s) + _, _ = fmt.Fprintf(&sb, "\t%v\n", s) } } - return suggestionsString + return sb.String() } func (c *Command) findNext(next string) *Command { @@ -873,7 +875,7 @@ func (c *Command) ArgsLenAtDash() int { func (c *Command) execute(a []string) (err error) { if c == nil { - return fmt.Errorf("Called Execute() on a nil Command") + return fmt.Errorf("called Execute() on a nil Command") } if len(c.Deprecated) > 0 { @@ -1187,10 +1189,11 @@ func (c *Command) InitDefaultHelpFlag() { c.mergePersistentFlags() if c.Flags().Lookup("help") == nil { usage := "help for " - if c.Name() == "" { + name := c.displayName() + if name == "" { usage += "this command" } else { - usage += c.Name() + usage += name } c.Flags().BoolP("help", "h", false, usage) _ = c.Flags().SetAnnotation("help", FlagSetByCobraAnnotation, []string{"true"}) @@ -1236,7 +1239,7 @@ func (c *Command) InitDefaultHelpCmd() { Use: "help [command]", Short: "Help about any command", Long: `Help provides help for any command in the application. -Simply type ` + c.Name() + ` help [path to command] for full details.`, +Simply type ` + c.displayName() + ` help [path to command] for full details.`, ValidArgsFunction: func(c *Command, args []string, toComplete string) ([]string, ShellCompDirective) { var completions []string cmd, _, e := c.Root().Find(args) @@ -1427,6 +1430,10 @@ func (c *Command) CommandPath() string { if c.HasParent() { return c.Parent().CommandPath() + " " + c.Name() } + return c.displayName() +} + +func (c *Command) displayName() string { if displayName, ok := c.Annotations[CommandDisplayNameAnnotation]; ok { return displayName } @@ -1436,10 +1443,11 @@ func (c *Command) CommandPath() string { // UseLine puts out the full usage for a given command (including parents). func (c *Command) UseLine() string { var useline string + use := strings.Replace(c.Use, c.Name(), c.displayName(), 1) if c.HasParent() { - useline = c.parent.CommandPath() + " " + c.Use + useline = c.parent.CommandPath() + " " + use } else { - useline = c.Use + useline = use } if c.DisableFlagsInUseLine { return useline @@ -1452,7 +1460,6 @@ func (c *Command) UseLine() string { // DebugFlags used to determine which flags have been assigned to which commands // and which persist. -// nolint:goconst func (c *Command) DebugFlags() { c.Println("DebugFlags called on", c.Name()) var debugflags func(*Command) @@ -1642,7 +1649,7 @@ func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) f // to this command (local and persistent declared here and by all parents). func (c *Command) Flags() *flag.FlagSet { if c.flags == nil { - c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.flags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) if c.flagErrorBuf == nil { c.flagErrorBuf = new(bytes.Buffer) } @@ -1653,10 +1660,11 @@ func (c *Command) Flags() *flag.FlagSet { } // LocalNonPersistentFlags are flags specific to this command which will NOT persist to subcommands. +// This function does not modify the flags of the current command, it's purpose is to return the current state. func (c *Command) LocalNonPersistentFlags() *flag.FlagSet { persistentFlags := c.PersistentFlags() - out := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + out := flag.NewFlagSet(c.displayName(), flag.ContinueOnError) c.LocalFlags().VisitAll(func(f *flag.Flag) { if persistentFlags.Lookup(f.Name) == nil { out.AddFlag(f) @@ -1666,11 +1674,12 @@ func (c *Command) LocalNonPersistentFlags() *flag.FlagSet { } // LocalFlags returns the local FlagSet specifically set in the current command. +// This function does not modify the flags of the current command, it's purpose is to return the current state. func (c *Command) LocalFlags() *flag.FlagSet { c.mergePersistentFlags() if c.lflags == nil { - c.lflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.lflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) if c.flagErrorBuf == nil { c.flagErrorBuf = new(bytes.Buffer) } @@ -1693,11 +1702,12 @@ func (c *Command) LocalFlags() *flag.FlagSet { } // InheritedFlags returns all flags which were inherited from parent commands. +// This function does not modify the flags of the current command, it's purpose is to return the current state. func (c *Command) InheritedFlags() *flag.FlagSet { c.mergePersistentFlags() if c.iflags == nil { - c.iflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.iflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) if c.flagErrorBuf == nil { c.flagErrorBuf = new(bytes.Buffer) } @@ -1718,6 +1728,7 @@ func (c *Command) InheritedFlags() *flag.FlagSet { } // NonInheritedFlags returns all flags which were not inherited from parent commands. +// This function does not modify the flags of the current command, it's purpose is to return the current state. func (c *Command) NonInheritedFlags() *flag.FlagSet { return c.LocalFlags() } @@ -1725,7 +1736,7 @@ func (c *Command) NonInheritedFlags() *flag.FlagSet { // PersistentFlags returns the persistent FlagSet specifically set in the current command. func (c *Command) PersistentFlags() *flag.FlagSet { if c.pflags == nil { - c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.pflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) if c.flagErrorBuf == nil { c.flagErrorBuf = new(bytes.Buffer) } @@ -1738,9 +1749,9 @@ func (c *Command) PersistentFlags() *flag.FlagSet { func (c *Command) ResetFlags() { c.flagErrorBuf = new(bytes.Buffer) c.flagErrorBuf.Reset() - c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.flags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) c.flags.SetOutput(c.flagErrorBuf) - c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.pflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) c.pflags.SetOutput(c.flagErrorBuf) c.lflags = nil @@ -1857,7 +1868,7 @@ func (c *Command) mergePersistentFlags() { // If c.parentsPflags == nil, it makes new. func (c *Command) updateParentsPflags() { if c.parentsPflags == nil { - c.parentsPflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.parentsPflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) c.parentsPflags.SetOutput(c.flagErrorBuf) c.parentsPflags.SortFlags = false } diff --git a/vendor/github.com/spf13/cobra/completions.go b/vendor/github.com/spf13/cobra/completions.go index b60f6b20007..c0c08b05721 100644 --- a/vendor/github.com/spf13/cobra/completions.go +++ b/vendor/github.com/spf13/cobra/completions.go @@ -17,6 +17,8 @@ package cobra import ( "fmt" "os" + "regexp" + "strconv" "strings" "sync" @@ -211,24 +213,29 @@ func (c *Command) initCompleteCmd(args []string) { // 2- Even without completions, we need to print the directive } - noDescriptions := (cmd.CalledAs() == ShellCompNoDescRequestCmd) + noDescriptions := cmd.CalledAs() == ShellCompNoDescRequestCmd + if !noDescriptions { + if doDescriptions, err := strconv.ParseBool(getEnvConfig(cmd, configEnvVarSuffixDescriptions)); err == nil { + noDescriptions = !doDescriptions + } + } + noActiveHelp := GetActiveHelpConfig(finalCmd) == activeHelpGlobalDisable + out := finalCmd.OutOrStdout() for _, comp := range completions { - if GetActiveHelpConfig(finalCmd) == activeHelpGlobalDisable { - // Remove all activeHelp entries in this case - if strings.HasPrefix(comp, activeHelpMarker) { - continue - } + if noActiveHelp && strings.HasPrefix(comp, activeHelpMarker) { + // Remove all activeHelp entries if it's disabled. + continue } if noDescriptions { // Remove any description that may be included following a tab character. - comp = strings.Split(comp, "\t")[0] + comp = strings.SplitN(comp, "\t", 2)[0] } // Make sure we only write the first line to the output. // This is needed if a description contains a linebreak. // Otherwise the shell scripts will interpret the other lines as new flags // and could therefore provide a wrong completion. - comp = strings.Split(comp, "\n")[0] + comp = strings.SplitN(comp, "\n", 2)[0] // Finally trim the completion. This is especially important to get rid // of a trailing tab when there are no description following it. @@ -237,14 +244,14 @@ func (c *Command) initCompleteCmd(args []string) { // although there is no description). comp = strings.TrimSpace(comp) - // Print each possible completion to stdout for the completion script to consume. - fmt.Fprintln(finalCmd.OutOrStdout(), comp) + // Print each possible completion to the output for the completion script to consume. + fmt.Fprintln(out, comp) } // As the last printout, print the completion directive for the completion script to parse. // The directive integer must be that last character following a single colon (:). // The completion script expects : - fmt.Fprintf(finalCmd.OutOrStdout(), ":%d\n", directive) + fmt.Fprintf(out, ":%d\n", directive) // Print some helpful info to stderr for the user to understand. // Output from stderr must be ignored by the completion script. @@ -291,7 +298,7 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi } if err != nil { // Unable to find the real command. E.g., someInvalidCmd - return c, []string{}, ShellCompDirectiveDefault, fmt.Errorf("Unable to find a command for arguments: %v", trimmedArgs) + return c, []string{}, ShellCompDirectiveDefault, fmt.Errorf("unable to find a command for arguments: %v", trimmedArgs) } finalCmd.ctx = c.ctx @@ -899,3 +906,34 @@ func CompError(msg string) { func CompErrorln(msg string) { CompError(fmt.Sprintf("%s\n", msg)) } + +// These values should not be changed: users will be using them explicitly. +const ( + configEnvVarGlobalPrefix = "COBRA" + configEnvVarSuffixDescriptions = "COMPLETION_DESCRIPTIONS" +) + +var configEnvVarPrefixSubstRegexp = regexp.MustCompile(`[^A-Z0-9_]`) + +// configEnvVar returns the name of the program-specific configuration environment +// variable. It has the format _ where is the name of the +// root command in upper case, with all non-ASCII-alphanumeric characters replaced by `_`. +func configEnvVar(name, suffix string) string { + // This format should not be changed: users will be using it explicitly. + v := strings.ToUpper(fmt.Sprintf("%s_%s", name, suffix)) + v = configEnvVarPrefixSubstRegexp.ReplaceAllString(v, "_") + return v +} + +// getEnvConfig returns the value of the configuration environment variable +// _ where is the name of the root command in upper +// case, with all non-ASCII-alphanumeric characters replaced by `_`. +// If the value is empty or not set, the value of the environment variable +// COBRA_ is returned instead. +func getEnvConfig(cmd *Command, suffix string) string { + v := os.Getenv(configEnvVar(cmd.Root().Name(), suffix)) + if v == "" { + v = os.Getenv(configEnvVar(configEnvVarGlobalPrefix, suffix)) + } + return v +} diff --git a/vendor/github.com/spf13/cobra/flag_groups.go b/vendor/github.com/spf13/cobra/flag_groups.go index 0671ec5f202..560612fd338 100644 --- a/vendor/github.com/spf13/cobra/flag_groups.go +++ b/vendor/github.com/spf13/cobra/flag_groups.go @@ -23,9 +23,9 @@ import ( ) const ( - requiredAsGroup = "cobra_annotation_required_if_others_set" - oneRequired = "cobra_annotation_one_required" - mutuallyExclusive = "cobra_annotation_mutually_exclusive" + requiredAsGroupAnnotation = "cobra_annotation_required_if_others_set" + oneRequiredAnnotation = "cobra_annotation_one_required" + mutuallyExclusiveAnnotation = "cobra_annotation_mutually_exclusive" ) // MarkFlagsRequiredTogether marks the given flags with annotations so that Cobra errors @@ -37,7 +37,7 @@ func (c *Command) MarkFlagsRequiredTogether(flagNames ...string) { if f == nil { panic(fmt.Sprintf("Failed to find flag %q and mark it as being required in a flag group", v)) } - if err := c.Flags().SetAnnotation(v, requiredAsGroup, append(f.Annotations[requiredAsGroup], strings.Join(flagNames, " "))); err != nil { + if err := c.Flags().SetAnnotation(v, requiredAsGroupAnnotation, append(f.Annotations[requiredAsGroupAnnotation], strings.Join(flagNames, " "))); err != nil { // Only errs if the flag isn't found. panic(err) } @@ -53,7 +53,7 @@ func (c *Command) MarkFlagsOneRequired(flagNames ...string) { if f == nil { panic(fmt.Sprintf("Failed to find flag %q and mark it as being in a one-required flag group", v)) } - if err := c.Flags().SetAnnotation(v, oneRequired, append(f.Annotations[oneRequired], strings.Join(flagNames, " "))); err != nil { + if err := c.Flags().SetAnnotation(v, oneRequiredAnnotation, append(f.Annotations[oneRequiredAnnotation], strings.Join(flagNames, " "))); err != nil { // Only errs if the flag isn't found. panic(err) } @@ -70,7 +70,7 @@ func (c *Command) MarkFlagsMutuallyExclusive(flagNames ...string) { panic(fmt.Sprintf("Failed to find flag %q and mark it as being in a mutually exclusive flag group", v)) } // Each time this is called is a single new entry; this allows it to be a member of multiple groups if needed. - if err := c.Flags().SetAnnotation(v, mutuallyExclusive, append(f.Annotations[mutuallyExclusive], strings.Join(flagNames, " "))); err != nil { + if err := c.Flags().SetAnnotation(v, mutuallyExclusiveAnnotation, append(f.Annotations[mutuallyExclusiveAnnotation], strings.Join(flagNames, " "))); err != nil { panic(err) } } @@ -91,9 +91,9 @@ func (c *Command) ValidateFlagGroups() error { oneRequiredGroupStatus := map[string]map[string]bool{} mutuallyExclusiveGroupStatus := map[string]map[string]bool{} flags.VisitAll(func(pflag *flag.Flag) { - processFlagForGroupAnnotation(flags, pflag, requiredAsGroup, groupStatus) - processFlagForGroupAnnotation(flags, pflag, oneRequired, oneRequiredGroupStatus) - processFlagForGroupAnnotation(flags, pflag, mutuallyExclusive, mutuallyExclusiveGroupStatus) + processFlagForGroupAnnotation(flags, pflag, requiredAsGroupAnnotation, groupStatus) + processFlagForGroupAnnotation(flags, pflag, oneRequiredAnnotation, oneRequiredGroupStatus) + processFlagForGroupAnnotation(flags, pflag, mutuallyExclusiveAnnotation, mutuallyExclusiveGroupStatus) }) if err := validateRequiredFlagGroups(groupStatus); err != nil { @@ -130,7 +130,7 @@ func processFlagForGroupAnnotation(flags *flag.FlagSet, pflag *flag.Flag, annota continue } - groupStatus[group] = map[string]bool{} + groupStatus[group] = make(map[string]bool, len(flagnames)) for _, name := range flagnames { groupStatus[group][name] = false } @@ -232,9 +232,9 @@ func (c *Command) enforceFlagGroupsForCompletion() { oneRequiredGroupStatus := map[string]map[string]bool{} mutuallyExclusiveGroupStatus := map[string]map[string]bool{} c.Flags().VisitAll(func(pflag *flag.Flag) { - processFlagForGroupAnnotation(flags, pflag, requiredAsGroup, groupStatus) - processFlagForGroupAnnotation(flags, pflag, oneRequired, oneRequiredGroupStatus) - processFlagForGroupAnnotation(flags, pflag, mutuallyExclusive, mutuallyExclusiveGroupStatus) + processFlagForGroupAnnotation(flags, pflag, requiredAsGroupAnnotation, groupStatus) + processFlagForGroupAnnotation(flags, pflag, oneRequiredAnnotation, oneRequiredGroupStatus) + processFlagForGroupAnnotation(flags, pflag, mutuallyExclusiveAnnotation, mutuallyExclusiveGroupStatus) }) // If a flag that is part of a group is present, we make all the other flags @@ -253,17 +253,17 @@ func (c *Command) enforceFlagGroupsForCompletion() { // If none of the flags of a one-required group are present, we make all the flags // of that group required so that the shell completion suggests them automatically for flagList, flagnameAndStatus := range oneRequiredGroupStatus { - set := 0 + isSet := false - for _, isSet := range flagnameAndStatus { + for _, isSet = range flagnameAndStatus { if isSet { - set++ + break } } // None of the flags of the group are set, mark all flags in the group // as required - if set == 0 { + if !isSet { for _, fName := range strings.Split(flagList, " ") { _ = c.MarkFlagRequired(fName) } diff --git a/vendor/github.com/spf13/cobra/powershell_completions.go b/vendor/github.com/spf13/cobra/powershell_completions.go index 55195193944..a830b7bcad2 100644 --- a/vendor/github.com/spf13/cobra/powershell_completions.go +++ b/vendor/github.com/spf13/cobra/powershell_completions.go @@ -28,8 +28,8 @@ import ( func genPowerShellComp(buf io.StringWriter, name string, includeDesc bool) { // Variables should not contain a '-' or ':' character nameForVar := name - nameForVar = strings.Replace(nameForVar, "-", "_", -1) - nameForVar = strings.Replace(nameForVar, ":", "_", -1) + nameForVar = strings.ReplaceAll(nameForVar, "-", "_") + nameForVar = strings.ReplaceAll(nameForVar, ":", "_") compCmd := ShellCompRequestCmd if !includeDesc { diff --git a/vendor/github.com/spf13/viper/README.md b/vendor/github.com/spf13/viper/README.md index b96180b3b9d..3fc7d84f16c 100644 --- a/vendor/github.com/spf13/viper/README.md +++ b/vendor/github.com/spf13/viper/README.md @@ -11,7 +11,7 @@ [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/spf13/viper/ci.yaml?branch=master&style=flat-square)](https://github.com/spf13/viper/actions?query=workflow%3ACI) [![Join the chat at https://gitter.im/spf13/viper](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/spf13/viper?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![Go Report Card](https://goreportcard.com/badge/github.com/spf13/viper?style=flat-square)](https://goreportcard.com/report/github.com/spf13/viper) -![Go Version](https://img.shields.io/badge/go%20version-%3E=1.19-61CFDD.svg?style=flat-square) +![Go Version](https://img.shields.io/badge/go%20version-%3E=1.20-61CFDD.svg?style=flat-square) [![PkgGoDev](https://pkg.go.dev/badge/mod/github.com/spf13/viper)](https://pkg.go.dev/mod/github.com/spf13/viper) **Go configuration with fangs!** @@ -39,7 +39,7 @@ Many Go projects are built using Viper including: go get github.com/spf13/viper ``` -**Note:** Viper uses [Go Modules](https://github.com/golang/go/wiki/Modules) to manage dependencies. +**Note:** Viper uses [Go Modules](https://go.dev/wiki/Modules) to manage dependencies. ## What is Viper? @@ -420,7 +420,7 @@ flags, or environment variables. Viper supports multiple hosts. To use, pass a list of endpoints separated by `;`. For example `http://127.0.0.1:4001;http://127.0.0.1:4002`. -Viper uses [crypt](https://github.com/bketelsen/crypt) to retrieve +Viper uses [crypt](https://github.com/sagikazarmark/crypt) to retrieve configuration from the K/V store, which means that you can store your configuration values encrypted and have them automatically decrypted if you have the correct gpg keyring. Encryption is optional. @@ -432,7 +432,7 @@ independently of it. K/V store. `crypt` defaults to etcd on http://127.0.0.1:4001. ```bash -$ go get github.com/bketelsen/crypt/bin/crypt +$ go get github.com/sagikazarmark/crypt/bin/crypt $ crypt set -plaintext /config/hugo.json /Users/hugo/settings/config.json ``` diff --git a/vendor/github.com/spf13/viper/TROUBLESHOOTING.md b/vendor/github.com/spf13/viper/TROUBLESHOOTING.md index c4e36c68603..b68993d4123 100644 --- a/vendor/github.com/spf13/viper/TROUBLESHOOTING.md +++ b/vendor/github.com/spf13/viper/TROUBLESHOOTING.md @@ -15,10 +15,10 @@ cannot find package "github.com/hashicorp/hcl/tree/hcl1" in any of: ``` As the error message suggests, Go tries to look up dependencies in `GOPATH` mode (as it's commonly called) from the `GOPATH`. -Viper opted to use [Go Modules](https://github.com/golang/go/wiki/Modules) to manage its dependencies. While in many cases the two methods are interchangeable, once a dependency releases new (major) versions, `GOPATH` mode is no longer able to decide which version to use, so it'll either use one that's already present or pick a version (usually the `master` branch). +Viper opted to use [Go Modules](https://go.dev/wiki/Modules) to manage its dependencies. While in many cases the two methods are interchangeable, once a dependency releases new (major) versions, `GOPATH` mode is no longer able to decide which version to use, so it'll either use one that's already present or pick a version (usually the `master` branch). The solution is easy: switch to using Go Modules. -Please refer to the [wiki](https://github.com/golang/go/wiki/Modules) on how to do that. +Please refer to the [wiki](https://go.dev/wiki/Modules) on how to do that. **tl;dr* `export GO111MODULE=on` diff --git a/vendor/github.com/spf13/viper/flake.lock b/vendor/github.com/spf13/viper/flake.lock index 78da51090b5..3840614fa20 100644 --- a/vendor/github.com/spf13/viper/flake.lock +++ b/vendor/github.com/spf13/viper/flake.lock @@ -8,11 +8,11 @@ "pre-commit-hooks": "pre-commit-hooks" }, "locked": { - "lastModified": 1687972261, - "narHash": "sha256-+mxvZfwMVoaZYETmuQWqTi/7T9UKoAE+WpdSQkOVJ2g=", + "lastModified": 1707817777, + "narHash": "sha256-vHyIs1OULQ3/91wD6xOiuayfI71JXALGA5KLnDKAcy0=", "owner": "cachix", "repo": "devenv", - "rev": "e85df562088573305e55906eaa964341f8cb0d9f", + "rev": "5a30b9e5ac7c6167e61b1f4193d5130bb9f8defa", "type": "github" }, "original": { @@ -42,11 +42,11 @@ "nixpkgs-lib": "nixpkgs-lib" }, "locked": { - "lastModified": 1687762428, - "narHash": "sha256-DIf7mi45PKo+s8dOYF+UlXHzE0Wl/+k3tXUyAoAnoGE=", + "lastModified": 1706830856, + "narHash": "sha256-a0NYyp+h9hlb7ddVz4LUn1vT/PLwqfrWYcHMvFB1xYg=", "owner": "hercules-ci", "repo": "flake-parts", - "rev": "37dd7bb15791c86d55c5121740a1887ab55ee836", + "rev": "b253292d9c0a5ead9bc98c4e9a26c6312e27d69f", "type": "github" }, "original": { @@ -56,12 +56,15 @@ } }, "flake-utils": { + "inputs": { + "systems": "systems" + }, "locked": { - "lastModified": 1667395993, - "narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=", + "lastModified": 1685518550, + "narHash": "sha256-o2d0KcvaXzTrPRIo0kOLV0/QXHhDQ5DTi+OxcjO8xqY=", "owner": "numtide", "repo": "flake-utils", - "rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f", + "rev": "a1720a10a6cfe8234c0e93907ffe81be440f4cef", "type": "github" }, "original": { @@ -151,11 +154,11 @@ "nixpkgs-lib": { "locked": { "dir": "lib", - "lastModified": 1685564631, - "narHash": "sha256-8ywr3AkblY4++3lIVxmrWZFzac7+f32ZEhH/A8pNscI=", + "lastModified": 1706550542, + "narHash": "sha256-UcsnCG6wx++23yeER4Hg18CXWbgNpqNXcHIo5/1Y+hc=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "4f53efe34b3a8877ac923b9350c874e3dcd5dc0a", + "rev": "97b17f32362e475016f942bbdfda4a4a72a8a652", "type": "github" }, "original": { @@ -184,27 +187,27 @@ }, "nixpkgs-stable": { "locked": { - "lastModified": 1678872516, - "narHash": "sha256-/E1YwtMtFAu2KUQKV/1+KFuReYPANM2Rzehk84VxVoc=", + "lastModified": 1685801374, + "narHash": "sha256-otaSUoFEMM+LjBI1XL/xGB5ao6IwnZOXc47qhIgJe8U=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "9b8e5abb18324c7fe9f07cb100c3cd4a29cda8b8", + "rev": "c37ca420157f4abc31e26f436c1145f8951ff373", "type": "github" }, "original": { "owner": "NixOS", - "ref": "nixos-22.11", + "ref": "nixos-23.05", "repo": "nixpkgs", "type": "github" } }, "nixpkgs_2": { "locked": { - "lastModified": 1687886075, - "narHash": "sha256-PeayJDDDy+uw1Ats4moZnRdL1OFuZm1Tj+KiHlD67+o=", + "lastModified": 1707939175, + "narHash": "sha256-D1xan0lgxbmXDyzVqXTiSYHLmAMrMRdD+alKzEO/p3w=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "a565059a348422af5af9026b5174dc5c0dcefdae", + "rev": "f7e8132daca31b1e3859ac0fb49741754375ac3d", "type": "github" }, "original": { @@ -229,11 +232,11 @@ "nixpkgs-stable": "nixpkgs-stable" }, "locked": { - "lastModified": 1686050334, - "narHash": "sha256-R0mczWjDzBpIvM3XXhO908X5e2CQqjyh/gFbwZk/7/Q=", + "lastModified": 1704725188, + "narHash": "sha256-qq8NbkhRZF1vVYQFt1s8Mbgo8knj+83+QlL5LBnYGpI=", "owner": "cachix", "repo": "pre-commit-hooks.nix", - "rev": "6881eb2ae5d8a3516e34714e7a90d9d95914c4dc", + "rev": "ea96f0c05924341c551a797aaba8126334c505d2", "type": "github" }, "original": { @@ -248,6 +251,21 @@ "flake-parts": "flake-parts", "nixpkgs": "nixpkgs_2" } + }, + "systems": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } } }, "root": "root", diff --git a/vendor/github.com/spf13/viper/flake.nix b/vendor/github.com/spf13/viper/flake.nix index 9b26c3fcf5a..0230668cff6 100644 --- a/vendor/github.com/spf13/viper/flake.nix +++ b/vendor/github.com/spf13/viper/flake.nix @@ -20,6 +20,7 @@ default = { languages = { go.enable = true; + go.package = pkgs.go_1_22; }; pre-commit.hooks = { diff --git a/vendor/github.com/spf13/viper/viper.go b/vendor/github.com/spf13/viper/viper.go index 20eb4da177e..da68d9944cc 100644 --- a/vendor/github.com/spf13/viper/viper.go +++ b/vendor/github.com/spf13/viper/viper.go @@ -624,7 +624,7 @@ func (v *Viper) AddRemoteProvider(provider, endpoint, path string) error { // To retrieve a config file called myapp.json from /configs/myapp.json // you should set path to /configs and set config name (SetConfigName()) to // "myapp". -// Secure Remote Providers are implemented with github.com/bketelsen/crypt. +// Secure Remote Providers are implemented with github.com/sagikazarmark/crypt. func AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error { return v.AddSecureRemoteProvider(provider, endpoint, path, secretkeyring) } @@ -1791,12 +1791,6 @@ func (v *Viper) writeConfig(filename string, force bool) error { return f.Sync() } -// Unmarshal a Reader into a map. -// Should probably be an unexported function. -func unmarshalReader(in io.Reader, c map[string]any) error { - return v.unmarshalReader(in, c) -} - func (v *Viper) unmarshalReader(in io.Reader, c map[string]any) error { buf := new(bytes.Buffer) buf.ReadFrom(in) diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go index 4d4b4aad6fe..7e19eba0904 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -7,10 +7,13 @@ import ( "time" ) -type CompareType int +// Deprecated: CompareType has only ever been for internal use and has accidentally been published since v1.6.0. Do not use it. +type CompareType = compareResult + +type compareResult int const ( - compareLess CompareType = iota - 1 + compareLess compareResult = iota - 1 compareEqual compareGreater ) @@ -39,7 +42,7 @@ var ( bytesType = reflect.TypeOf([]byte{}) ) -func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { +func compare(obj1, obj2 interface{}, kind reflect.Kind) (compareResult, bool) { obj1Value := reflect.ValueOf(obj1) obj2Value := reflect.ValueOf(obj2) @@ -325,7 +328,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { timeObj2 = obj2Value.Convert(timeType).Interface().(time.Time) } - return compare(timeObj1.UnixNano(), timeObj2.UnixNano(), reflect.Int64) + if timeObj1.Before(timeObj2) { + return compareLess, true + } + if timeObj1.Equal(timeObj2) { + return compareEqual, true + } + return compareGreater, true } case reflect.Slice: { @@ -345,7 +354,7 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { bytesObj2 = obj2Value.Convert(bytesType).Interface().([]byte) } - return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true + return compareResult(bytes.Compare(bytesObj1, bytesObj2)), true } case reflect.Uintptr: { @@ -381,7 +390,7 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) + return compareTwoValues(t, e1, e2, []compareResult{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) } // GreaterOrEqual asserts that the first element is greater than or equal to the second @@ -394,7 +403,7 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) + return compareTwoValues(t, e1, e2, []compareResult{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) } // Less asserts that the first element is less than the second @@ -406,7 +415,7 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) + return compareTwoValues(t, e1, e2, []compareResult{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) } // LessOrEqual asserts that the first element is less than or equal to the second @@ -419,7 +428,7 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) + return compareTwoValues(t, e1, e2, []compareResult{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) } // Positive asserts that the specified element is positive @@ -431,7 +440,7 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { h.Helper() } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs...) + return compareTwoValues(t, e, zero.Interface(), []compareResult{compareGreater}, "\"%v\" is not positive", msgAndArgs...) } // Negative asserts that the specified element is negative @@ -443,10 +452,10 @@ func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { h.Helper() } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs...) + return compareTwoValues(t, e, zero.Interface(), []compareResult{compareLess}, "\"%v\" is not negative", msgAndArgs...) } -func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool { +func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []compareResult, failMessage string, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() } @@ -469,7 +478,7 @@ func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedCompare return true } -func containsValue(values []CompareType, value CompareType) bool { +func containsValue(values []compareResult, value compareResult) bool { for _, v := range values { if v == value { return true diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go index 3ddab109ad9..19063416577 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -104,8 +104,8 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, return EqualExportedValues(t, expected, actual, append([]interface{}{msg}, args...)...) } -// EqualValuesf asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValuesf asserts that two objects are equal or convertible to the larger +// type and equal. // // assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { @@ -186,7 +186,7 @@ func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick // assert.EventuallyWithTf(t, func(c *assert.CollectT, "error message %s", "formatted") { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func EventuallyWithTf(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -568,6 +568,23 @@ func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, a return NotContains(t, s, contains, append([]interface{}{msg}, args...)...) } +// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// assert.NotElementsMatchf(t, [1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false +// +// assert.NotElementsMatchf(t, [1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true +// +// assert.NotElementsMatchf(t, [1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true +func NotElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...) +} + // NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // @@ -604,7 +621,16 @@ func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg s return NotEqualValues(t, expected, actual, append([]interface{}{msg}, args...)...) } -// NotErrorIsf asserts that at none of the errors in err's chain matches target. +// NotErrorAsf asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func NotErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotErrorAs(t, err, target, append([]interface{}{msg}, args...)...) +} + +// NotErrorIsf asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go index a84e09bd409..21629087baf 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -186,8 +186,8 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface return EqualExportedValuesf(a.t, expected, actual, msg, args...) } -// EqualValues asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValues asserts that two objects are equal or convertible to the larger +// type and equal. // // a.EqualValues(uint32(123), int32(123)) func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { @@ -197,8 +197,8 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn return EqualValues(a.t, expected, actual, msgAndArgs...) } -// EqualValuesf asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValuesf asserts that two objects are equal or convertible to the larger +// type and equal. // // a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { @@ -336,7 +336,7 @@ func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, ti // a.EventuallyWithT(func(c *assert.CollectT) { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func (a *Assertions) EventuallyWithT(condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -361,7 +361,7 @@ func (a *Assertions) EventuallyWithT(condition func(collect *CollectT), waitFor // a.EventuallyWithTf(func(c *assert.CollectT, "error message %s", "formatted") { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func (a *Assertions) EventuallyWithTf(condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1128,6 +1128,40 @@ func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg strin return NotContainsf(a.t, s, contains, msg, args...) } +// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// a.NotElementsMatch([1, 1, 2, 3], [1, 1, 2, 3]) -> false +// +// a.NotElementsMatch([1, 1, 2, 3], [1, 2, 3]) -> true +// +// a.NotElementsMatch([1, 2, 3], [1, 2, 4]) -> true +func (a *Assertions) NotElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotElementsMatch(a.t, listA, listB, msgAndArgs...) +} + +// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// a.NotElementsMatchf([1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false +// +// a.NotElementsMatchf([1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true +// +// a.NotElementsMatchf([1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true +func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotElementsMatchf(a.t, listA, listB, msg, args...) +} + // NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // @@ -1200,7 +1234,25 @@ func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg str return NotEqualf(a.t, expected, actual, msg, args...) } -// NotErrorIs asserts that at none of the errors in err's chain matches target. +// NotErrorAs asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func (a *Assertions) NotErrorAs(err error, target interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotErrorAs(a.t, err, target, msgAndArgs...) +} + +// NotErrorAsf asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func (a *Assertions) NotErrorAsf(err error, target interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotErrorAsf(a.t, err, target, msg, args...) +} + +// NotErrorIs asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { @@ -1209,7 +1261,7 @@ func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface return NotErrorIs(a.t, err, target, msgAndArgs...) } -// NotErrorIsf asserts that at none of the errors in err's chain matches target. +// NotErrorIsf asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go index 00df62a0599..1d2f71824aa 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_order.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go @@ -6,7 +6,7 @@ import ( ) // isOrdered checks that collection contains orderable elements. -func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool { +func isOrdered(t TestingT, object interface{}, allowedComparesResults []compareResult, failMessage string, msgAndArgs ...interface{}) bool { objKind := reflect.TypeOf(object).Kind() if objKind != reflect.Slice && objKind != reflect.Array { return false @@ -50,7 +50,7 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareT // assert.IsIncreasing(t, []float{1, 2}) // assert.IsIncreasing(t, []string{"a", "b"}) func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) + return isOrdered(t, object, []compareResult{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) } // IsNonIncreasing asserts that the collection is not increasing @@ -59,7 +59,7 @@ func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo // assert.IsNonIncreasing(t, []float{2, 1}) // assert.IsNonIncreasing(t, []string{"b", "a"}) func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) + return isOrdered(t, object, []compareResult{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) } // IsDecreasing asserts that the collection is decreasing @@ -68,7 +68,7 @@ func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) // assert.IsDecreasing(t, []float{2, 1}) // assert.IsDecreasing(t, []string{"b", "a"}) func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) + return isOrdered(t, object, []compareResult{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) } // IsNonDecreasing asserts that the collection is not decreasing @@ -77,5 +77,5 @@ func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo // assert.IsNonDecreasing(t, []float{1, 2}) // assert.IsNonDecreasing(t, []string{"a", "b"}) func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) + return isOrdered(t, object, []compareResult{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) } diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index 0b7570f21c6..4e91332bb51 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -19,7 +19,9 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/pmezard/go-difflib/difflib" - "gopkg.in/yaml.v3" + + // Wrapper around gopkg.in/yaml.v3 + "github.com/stretchr/testify/assert/yaml" ) //go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl" @@ -45,6 +47,10 @@ type BoolAssertionFunc func(TestingT, bool, ...interface{}) bool // for table driven tests. type ErrorAssertionFunc func(TestingT, error, ...interface{}) bool +// PanicAssertionFunc is a common function prototype when validating a panic value. Can be useful +// for table driven tests. +type PanicAssertionFunc = func(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool + // Comparison is a custom function that returns true on success and false on failure type Comparison func() (success bool) @@ -496,7 +502,13 @@ func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) b h.Helper() } - if !samePointers(expected, actual) { + same, ok := samePointers(expected, actual) + if !ok { + return Fail(t, "Both arguments must be pointers", msgAndArgs...) + } + + if !same { + // both are pointers but not the same type & pointing to the same address return Fail(t, fmt.Sprintf("Not same: \n"+ "expected: %p %#v\n"+ "actual : %p %#v", expected, expected, actual, actual), msgAndArgs...) @@ -516,7 +528,13 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} h.Helper() } - if samePointers(expected, actual) { + same, ok := samePointers(expected, actual) + if !ok { + //fails when the arguments are not pointers + return !(Fail(t, "Both arguments must be pointers", msgAndArgs...)) + } + + if same { return Fail(t, fmt.Sprintf( "Expected and actual point to the same object: %p %#v", expected, expected), msgAndArgs...) @@ -524,21 +542,23 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} return true } -// samePointers compares two generic interface objects and returns whether -// they point to the same object -func samePointers(first, second interface{}) bool { +// samePointers checks if two generic interface objects are pointers of the same +// type pointing to the same object. It returns two values: same indicating if +// they are the same type and point to the same object, and ok indicating that +// both inputs are pointers. +func samePointers(first, second interface{}) (same bool, ok bool) { firstPtr, secondPtr := reflect.ValueOf(first), reflect.ValueOf(second) if firstPtr.Kind() != reflect.Ptr || secondPtr.Kind() != reflect.Ptr { - return false + return false, false //not both are pointers } firstType, secondType := reflect.TypeOf(first), reflect.TypeOf(second) if firstType != secondType { - return false + return false, true // both are pointers, but of different types } // compare pointer addresses - return first == second + return first == second, true } // formatUnequalValues takes two values of arbitrary types and returns string @@ -572,8 +592,8 @@ func truncatingFormat(data interface{}) string { return value } -// EqualValues asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValues asserts that two objects are equal or convertible to the larger +// type and equal. // // assert.EqualValues(t, uint32(123), int32(123)) func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { @@ -615,21 +635,6 @@ func EqualExportedValues(t TestingT, expected, actual interface{}, msgAndArgs .. return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...) } - if aType.Kind() == reflect.Ptr { - aType = aType.Elem() - } - if bType.Kind() == reflect.Ptr { - bType = bType.Elem() - } - - if aType.Kind() != reflect.Struct { - return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...) - } - - if bType.Kind() != reflect.Struct { - return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...) - } - expected = copyExportedFields(expected) actual = copyExportedFields(actual) @@ -1170,6 +1175,39 @@ func formatListDiff(listA, listB interface{}, extraA, extraB []interface{}) stri return msg.String() } +// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// assert.NotElementsMatch(t, [1, 1, 2, 3], [1, 1, 2, 3]) -> false +// +// assert.NotElementsMatch(t, [1, 1, 2, 3], [1, 2, 3]) -> true +// +// assert.NotElementsMatch(t, [1, 2, 3], [1, 2, 4]) -> true +func NotElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface{}) (ok bool) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if isEmpty(listA) && isEmpty(listB) { + return Fail(t, "listA and listB contain the same elements", msgAndArgs) + } + + if !isList(t, listA, msgAndArgs...) { + return Fail(t, "listA is not a list type", msgAndArgs...) + } + if !isList(t, listB, msgAndArgs...) { + return Fail(t, "listB is not a list type", msgAndArgs...) + } + + extraA, extraB := diffLists(listA, listB) + if len(extraA) == 0 && len(extraB) == 0 { + return Fail(t, "listA and listB contain the same elements", msgAndArgs) + } + + return true +} + // Condition uses a Comparison to assert a complex condition. func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { @@ -1488,6 +1526,9 @@ func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAnd if err != nil { return Fail(t, err.Error(), msgAndArgs...) } + if math.IsNaN(actualEpsilon) { + return Fail(t, "relative error is NaN", msgAndArgs...) + } if actualEpsilon > epsilon { return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+ " < %#v (actual)", epsilon, actualEpsilon), msgAndArgs...) @@ -1611,7 +1652,6 @@ func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...in // matchRegexp return true if a specified regexp matches a string. func matchRegexp(rx interface{}, str interface{}) bool { - var r *regexp.Regexp if rr, ok := rx.(*regexp.Regexp); ok { r = rr @@ -1619,7 +1659,14 @@ func matchRegexp(rx interface{}, str interface{}) bool { r = regexp.MustCompile(fmt.Sprint(rx)) } - return (r.FindStringIndex(fmt.Sprint(str)) != nil) + switch v := str.(type) { + case []byte: + return r.Match(v) + case string: + return r.MatchString(v) + default: + return r.MatchString(fmt.Sprint(v)) + } } @@ -1872,7 +1919,7 @@ var spewConfigStringerEnabled = spew.ConfigState{ MaxDepth: 10, } -type tHelper interface { +type tHelper = interface { Helper() } @@ -1911,6 +1958,9 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t // CollectT implements the TestingT interface and collects all errors. type CollectT struct { + // A slice of errors. Non-nil slice denotes a failure. + // If it's non-nil but len(c.errors) == 0, this is also a failure + // obtained by direct c.FailNow() call. errors []error } @@ -1919,9 +1969,10 @@ func (c *CollectT) Errorf(format string, args ...interface{}) { c.errors = append(c.errors, fmt.Errorf(format, args...)) } -// FailNow panics. -func (*CollectT) FailNow() { - panic("Assertion failed") +// FailNow stops execution by calling runtime.Goexit. +func (c *CollectT) FailNow() { + c.fail() + runtime.Goexit() } // Deprecated: That was a method for internal usage that should not have been published. Now just panics. @@ -1934,6 +1985,16 @@ func (*CollectT) Copy(TestingT) { panic("Copy() is deprecated") } +func (c *CollectT) fail() { + if !c.failed() { + c.errors = []error{} // Make it non-nil to mark a failure. + } +} + +func (c *CollectT) failed() bool { + return c.errors != nil +} + // EventuallyWithT asserts that given condition will be met in waitFor time, // periodically checking target function each tick. In contrast to Eventually, // it supplies a CollectT to the condition function, so that the condition @@ -1951,14 +2012,14 @@ func (*CollectT) Copy(TestingT) { // assert.EventuallyWithT(t, func(c *assert.CollectT) { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() } var lastFinishedTickErrs []error - ch := make(chan []error, 1) + ch := make(chan *CollectT, 1) timer := time.NewTimer(waitFor) defer timer.Stop() @@ -1978,16 +2039,16 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time go func() { collect := new(CollectT) defer func() { - ch <- collect.errors + ch <- collect }() condition(collect) }() - case errs := <-ch: - if len(errs) == 0 { + case collect := <-ch: + if !collect.failed() { return true } // Keep the errors from the last ended condition, so that they can be copied to t if timeout is reached. - lastFinishedTickErrs = errs + lastFinishedTickErrs = collect.errors tick = ticker.C } } @@ -2049,7 +2110,7 @@ func ErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { ), msgAndArgs...) } -// NotErrorIs asserts that at none of the errors in err's chain matches target. +// NotErrorIs asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func NotErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { @@ -2090,6 +2151,24 @@ func ErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{ ), msgAndArgs...) } +// NotErrorAs asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func NotErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if !errors.As(err, target) { + return true + } + + chain := buildErrorChainString(err) + + return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+ + "found: %q\n"+ + "in chain: %s", target, chain, + ), msgAndArgs...) +} + func buildErrorChainString(err error) string { if err == nil { return "" diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go new file mode 100644 index 00000000000..baa0cc7d7fc --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go @@ -0,0 +1,25 @@ +//go:build testify_yaml_custom && !testify_yaml_fail && !testify_yaml_default +// +build testify_yaml_custom,!testify_yaml_fail,!testify_yaml_default + +// Package yaml is an implementation of YAML functions that calls a pluggable implementation. +// +// This implementation is selected with the testify_yaml_custom build tag. +// +// go test -tags testify_yaml_custom +// +// This implementation can be used at build time to replace the default implementation +// to avoid linking with [gopkg.in/yaml.v3]. +// +// In your test package: +// +// import assertYaml "github.com/stretchr/testify/assert/yaml" +// +// func init() { +// assertYaml.Unmarshal = func (in []byte, out interface{}) error { +// // ... +// return nil +// } +// } +package yaml + +var Unmarshal func(in []byte, out interface{}) error diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go new file mode 100644 index 00000000000..b83c6cf64c2 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go @@ -0,0 +1,37 @@ +//go:build !testify_yaml_fail && !testify_yaml_custom +// +build !testify_yaml_fail,!testify_yaml_custom + +// Package yaml is just an indirection to handle YAML deserialization. +// +// This package is just an indirection that allows the builder to override the +// indirection with an alternative implementation of this package that uses +// another implementation of YAML deserialization. This allows to not either not +// use YAML deserialization at all, or to use another implementation than +// [gopkg.in/yaml.v3] (for example for license compatibility reasons, see [PR #1120]). +// +// Alternative implementations are selected using build tags: +// +// - testify_yaml_fail: [Unmarshal] always fails with an error +// - testify_yaml_custom: [Unmarshal] is a variable. Caller must initialize it +// before calling any of [github.com/stretchr/testify/assert.YAMLEq] or +// [github.com/stretchr/testify/assert.YAMLEqf]. +// +// Usage: +// +// go test -tags testify_yaml_fail +// +// You can check with "go list" which implementation is linked: +// +// go list -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml +// go list -tags testify_yaml_fail -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml +// go list -tags testify_yaml_custom -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml +// +// [PR #1120]: https://github.com/stretchr/testify/pull/1120 +package yaml + +import goyaml "gopkg.in/yaml.v3" + +// Unmarshal is just a wrapper of [gopkg.in/yaml.v3.Unmarshal]. +func Unmarshal(in []byte, out interface{}) error { + return goyaml.Unmarshal(in, out) +} diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go new file mode 100644 index 00000000000..e78f7dfe69a --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go @@ -0,0 +1,18 @@ +//go:build testify_yaml_fail && !testify_yaml_custom && !testify_yaml_default +// +build testify_yaml_fail,!testify_yaml_custom,!testify_yaml_default + +// Package yaml is an implementation of YAML functions that always fail. +// +// This implementation can be used at build time to replace the default implementation +// to avoid linking with [gopkg.in/yaml.v3]: +// +// go test -tags testify_yaml_fail +package yaml + +import "errors" + +var errNotImplemented = errors.New("YAML functions are not available (see https://pkg.go.dev/github.com/stretchr/testify/assert/yaml)") + +func Unmarshal([]byte, interface{}) error { + return errNotImplemented +} diff --git a/vendor/github.com/stretchr/testify/mock/mock.go b/vendor/github.com/stretchr/testify/mock/mock.go index 213bde2ea63..eb5682df978 100644 --- a/vendor/github.com/stretchr/testify/mock/mock.go +++ b/vendor/github.com/stretchr/testify/mock/mock.go @@ -80,12 +80,12 @@ type Call struct { requires []*Call } -func newCall(parent *Mock, methodName string, callerInfo []string, methodArguments ...interface{}) *Call { +func newCall(parent *Mock, methodName string, callerInfo []string, methodArguments Arguments, returnArguments Arguments) *Call { return &Call{ Parent: parent, Method: methodName, Arguments: methodArguments, - ReturnArguments: make([]interface{}, 0), + ReturnArguments: returnArguments, callerInfo: callerInfo, Repeatability: 0, WaitFor: nil, @@ -256,7 +256,7 @@ func (c *Call) Unset() *Call { // calls have been called as expected. The referenced calls may be from the // same mock instance and/or other mock instances. // -// Mock.On("Do").Return(nil).Notbefore( +// Mock.On("Do").Return(nil).NotBefore( // Mock.On("Init").Return(nil) // ) func (c *Call) NotBefore(calls ...*Call) *Call { @@ -273,6 +273,20 @@ func (c *Call) NotBefore(calls ...*Call) *Call { return c } +// InOrder defines the order in which the calls should be made +// +// For example: +// +// InOrder( +// Mock.On("init").Return(nil), +// Mock.On("Do").Return(nil), +// ) +func InOrder(calls ...*Call) { + for i := 1; i < len(calls); i++ { + calls[i].NotBefore(calls[i-1]) + } +} + // Mock is the workhorse used to track activity on another object. // For an example of its usage, refer to the "Example Usage" section at the top // of this document. @@ -351,7 +365,8 @@ func (m *Mock) On(methodName string, arguments ...interface{}) *Call { m.mutex.Lock() defer m.mutex.Unlock() - c := newCall(m, methodName, assert.CallerInfo(), arguments...) + + c := newCall(m, methodName, assert.CallerInfo(), arguments, make([]interface{}, 0)) m.ExpectedCalls = append(m.ExpectedCalls, c) return c } @@ -491,11 +506,12 @@ func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Argumen m.mutex.Unlock() if closestCall != nil { - m.fail("\n\nmock: Unexpected Method Call\n-----------------------------\n\n%s\n\nThe closest call I have is: \n\n%s\n\n%s\nDiff: %s", + m.fail("\n\nmock: Unexpected Method Call\n-----------------------------\n\n%s\n\nThe closest call I have is: \n\n%s\n\n%s\nDiff: %s\nat: %s\n", callString(methodName, arguments, true), callString(methodName, closestCall.Arguments, true), diffArguments(closestCall.Arguments, arguments), strings.TrimSpace(mismatch), + assert.CallerInfo(), ) } else { m.fail("\nassert: mock: I don't know what to return because the method call was unexpected.\n\tEither do Mock.On(\"%s\").Return(...) first, or remove the %s() call.\n\tThis method was unexpected:\n\t\t%s\n\tat: %s", methodName, methodName, callString(methodName, arguments, true), assert.CallerInfo()) @@ -529,7 +545,7 @@ func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Argumen call.totalCalls++ // add the call - m.Calls = append(m.Calls, *newCall(m, methodName, assert.CallerInfo(), arguments...)) + m.Calls = append(m.Calls, *newCall(m, methodName, assert.CallerInfo(), arguments, call.ReturnArguments)) m.mutex.Unlock() // block if specified @@ -764,9 +780,17 @@ const ( ) // AnythingOfTypeArgument contains the type of an argument -// for use when type checking. Used in Diff and Assert. +// for use when type checking. Used in [Arguments.Diff] and [Arguments.Assert]. // -// Deprecated: this is an implementation detail that must not be used. Use [AnythingOfType] instead. +// Deprecated: this is an implementation detail that must not be used. Use the [AnythingOfType] constructor instead, example: +// +// m.On("Do", mock.AnythingOfType("string")) +// +// All explicit type declarations can be replaced with interface{} as is expected by [Mock.On], example: +// +// func anyString interface{} { +// return mock.AnythingOfType("string") +// } type AnythingOfTypeArgument = anythingOfTypeArgument // anythingOfTypeArgument is a string that contains the type of an argument @@ -780,53 +804,54 @@ type anythingOfTypeArgument string // // For example: // -// Assert(t, AnythingOfType("string"), AnythingOfType("int")) +// args.Assert(t, AnythingOfType("string"), AnythingOfType("int")) func AnythingOfType(t string) AnythingOfTypeArgument { return anythingOfTypeArgument(t) } // IsTypeArgument is a struct that contains the type of an argument -// for use when type checking. This is an alternative to AnythingOfType. -// Used in Diff and Assert. +// for use when type checking. This is an alternative to [AnythingOfType]. +// Used in [Arguments.Diff] and [Arguments.Assert]. type IsTypeArgument struct { t reflect.Type } // IsType returns an IsTypeArgument object containing the type to check for. // You can provide a zero-value of the type to check. This is an -// alternative to AnythingOfType. Used in Diff and Assert. +// alternative to [AnythingOfType]. Used in [Arguments.Diff] and [Arguments.Assert]. // // For example: -// Assert(t, IsType(""), IsType(0)) +// +// args.Assert(t, IsType(""), IsType(0)) func IsType(t interface{}) *IsTypeArgument { return &IsTypeArgument{t: reflect.TypeOf(t)} } -// FunctionalOptionsArgument is a struct that contains the type and value of an functional option argument -// for use when type checking. +// FunctionalOptionsArgument contains a list of functional options arguments +// expected for use when matching a list of arguments. type FunctionalOptionsArgument struct { - value interface{} + values []interface{} } // String returns the string representation of FunctionalOptionsArgument func (f *FunctionalOptionsArgument) String() string { var name string - tValue := reflect.ValueOf(f.value) - if tValue.Len() > 0 { - name = "[]" + reflect.TypeOf(tValue.Index(0).Interface()).String() + if len(f.values) > 0 { + name = "[]" + reflect.TypeOf(f.values[0]).String() } - return strings.Replace(fmt.Sprintf("%#v", f.value), "[]interface {}", name, 1) + return strings.Replace(fmt.Sprintf("%#v", f.values), "[]interface {}", name, 1) } -// FunctionalOptions returns an FunctionalOptionsArgument object containing the functional option type -// and the values to check of +// FunctionalOptions returns an [FunctionalOptionsArgument] object containing +// the expected functional-options to check for. // // For example: -// Assert(t, FunctionalOptions("[]foo.FunctionalOption", foo.Opt1(), foo.Opt2())) -func FunctionalOptions(value ...interface{}) *FunctionalOptionsArgument { +// +// args.Assert(t, FunctionalOptions(foo.Opt1("strValue"), foo.Opt2(613))) +func FunctionalOptions(values ...interface{}) *FunctionalOptionsArgument { return &FunctionalOptionsArgument{ - value: value, + values: values, } } @@ -873,10 +898,11 @@ func (f argumentMatcher) String() string { // and false otherwise. // // Example: -// m.On("Do", MatchedBy(func(req *http.Request) bool { return req.Host == "example.com" })) // -// |fn|, must be a function accepting a single argument (of the expected type) -// which returns a bool. If |fn| doesn't match the required signature, +// m.On("Do", MatchedBy(func(req *http.Request) bool { return req.Host == "example.com" })) +// +// fn must be a function accepting a single argument (of the expected type) +// which returns a bool. If fn doesn't match the required signature, // MatchedBy() panics. func MatchedBy(fn interface{}) argumentMatcher { fnType := reflect.TypeOf(fn) @@ -979,20 +1005,17 @@ func (args Arguments) Diff(objects []interface{}) (string, int) { output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, expected.t.Name(), actualT.Name(), actualFmt) } case *FunctionalOptionsArgument: - t := expected.value - var name string - tValue := reflect.ValueOf(t) - if tValue.Len() > 0 { - name = "[]" + reflect.TypeOf(tValue.Index(0).Interface()).String() + if len(expected.values) > 0 { + name = "[]" + reflect.TypeOf(expected.values[0]).String() } - tName := reflect.TypeOf(t).Name() - if name != reflect.TypeOf(actual).String() && tValue.Len() != 0 { + const tName = "[]interface{}" + if name != reflect.TypeOf(actual).String() && len(expected.values) != 0 { differences++ output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, tName, reflect.TypeOf(actual).Name(), actualFmt) } else { - if ef, af := assertOpts(t, actual); ef == "" && af == "" { + if ef, af := assertOpts(expected.values, actual); ef == "" && af == "" { // match output = fmt.Sprintf("%s\t%d: PASS: %s == %s\n", output, i, tName, tName) } else { @@ -1092,7 +1115,7 @@ func (args Arguments) Error(index int) error { return nil } if s, ok = obj.(error); !ok { - panic(fmt.Sprintf("assert: arguments: Error(%d) failed because object wasn't correct type: %v", index, args.Get(index))) + panic(fmt.Sprintf("assert: arguments: Error(%d) failed because object wasn't correct type: %v", index, obj)) } return s } @@ -1181,32 +1204,38 @@ type tHelper interface { func assertOpts(expected, actual interface{}) (expectedFmt, actualFmt string) { expectedOpts := reflect.ValueOf(expected) actualOpts := reflect.ValueOf(actual) + + var expectedFuncs []*runtime.Func var expectedNames []string for i := 0; i < expectedOpts.Len(); i++ { - expectedNames = append(expectedNames, funcName(expectedOpts.Index(i).Interface())) + f := runtimeFunc(expectedOpts.Index(i).Interface()) + expectedFuncs = append(expectedFuncs, f) + expectedNames = append(expectedNames, funcName(f)) } + var actualFuncs []*runtime.Func var actualNames []string for i := 0; i < actualOpts.Len(); i++ { - actualNames = append(actualNames, funcName(actualOpts.Index(i).Interface())) + f := runtimeFunc(actualOpts.Index(i).Interface()) + actualFuncs = append(actualFuncs, f) + actualNames = append(actualNames, funcName(f)) } - if !assert.ObjectsAreEqual(expectedNames, actualNames) { + + if expectedOpts.Len() != actualOpts.Len() { expectedFmt = fmt.Sprintf("%v", expectedNames) actualFmt = fmt.Sprintf("%v", actualNames) return } for i := 0; i < expectedOpts.Len(); i++ { - expectedOpt := expectedOpts.Index(i).Interface() - actualOpt := actualOpts.Index(i).Interface() - - expectedFunc := expectedNames[i] - actualFunc := actualNames[i] - if expectedFunc != actualFunc { - expectedFmt = expectedFunc - actualFmt = actualFunc + if !isFuncSame(expectedFuncs[i], actualFuncs[i]) { + expectedFmt = expectedNames[i] + actualFmt = actualNames[i] return } + expectedOpt := expectedOpts.Index(i).Interface() + actualOpt := actualOpts.Index(i).Interface() + ot := reflect.TypeOf(expectedOpt) var expectedValues []reflect.Value var actualValues []reflect.Value @@ -1224,9 +1253,9 @@ func assertOpts(expected, actual interface{}) (expectedFmt, actualFmt string) { reflect.ValueOf(actualOpt).Call(actualValues) for i := 0; i < ot.NumIn(); i++ { - if !assert.ObjectsAreEqual(expectedValues[i].Interface(), actualValues[i].Interface()) { - expectedFmt = fmt.Sprintf("%s %+v", expectedNames[i], expectedValues[i].Interface()) - actualFmt = fmt.Sprintf("%s %+v", expectedNames[i], actualValues[i].Interface()) + if expectedArg, actualArg := expectedValues[i].Interface(), actualValues[i].Interface(); !assert.ObjectsAreEqual(expectedArg, actualArg) { + expectedFmt = fmt.Sprintf("%s(%T) -> %#v", expectedNames[i], expectedArg, expectedArg) + actualFmt = fmt.Sprintf("%s(%T) -> %#v", expectedNames[i], actualArg, actualArg) return } } @@ -1235,7 +1264,25 @@ func assertOpts(expected, actual interface{}) (expectedFmt, actualFmt string) { return "", "" } -func funcName(opt interface{}) string { - n := runtime.FuncForPC(reflect.ValueOf(opt).Pointer()).Name() - return strings.TrimSuffix(path.Base(n), path.Ext(n)) +func runtimeFunc(opt interface{}) *runtime.Func { + return runtime.FuncForPC(reflect.ValueOf(opt).Pointer()) +} + +func funcName(f *runtime.Func) string { + name := f.Name() + trimmed := strings.TrimSuffix(path.Base(name), path.Ext(name)) + splitted := strings.Split(trimmed, ".") + + if len(splitted) == 0 { + return trimmed + } + + return splitted[len(splitted)-1] +} + +func isFuncSame(f1, f2 *runtime.Func) bool { + f1File, f1Loc := f1.FileLine(f1.Entry()) + f2File, f2Loc := f2.FileLine(f2.Entry()) + + return f1File == f2File && f1Loc == f2Loc } diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go index 506a82f8077..d8921950d7b 100644 --- a/vendor/github.com/stretchr/testify/require/require.go +++ b/vendor/github.com/stretchr/testify/require/require.go @@ -34,9 +34,9 @@ func Conditionf(t TestingT, comp assert.Comparison, msg string, args ...interfac // Contains asserts that the specified string, list(array, slice...) or map contains the // specified substring or element. // -// assert.Contains(t, "Hello World", "World") -// assert.Contains(t, ["Hello", "World"], "World") -// assert.Contains(t, {"Hello": "World"}, "Hello") +// require.Contains(t, "Hello World", "World") +// require.Contains(t, ["Hello", "World"], "World") +// require.Contains(t, {"Hello": "World"}, "Hello") func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -50,9 +50,9 @@ func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...int // Containsf asserts that the specified string, list(array, slice...) or map contains the // specified substring or element. // -// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted") -// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") -// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") +// require.Containsf(t, "Hello World", "World", "error message %s", "formatted") +// require.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") +// require.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -91,7 +91,7 @@ func DirExistsf(t TestingT, path string, msg string, args ...interface{}) { // listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, // the number of appearances of each of them in both lists should match. // -// assert.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2]) +// require.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2]) func ElementsMatch(t TestingT, listA interface{}, listB interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -106,7 +106,7 @@ func ElementsMatch(t TestingT, listA interface{}, listB interface{}, msgAndArgs // listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, // the number of appearances of each of them in both lists should match. // -// assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") +// require.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -120,7 +120,7 @@ func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string // Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either // a slice or a channel with len == 0. // -// assert.Empty(t, obj) +// require.Empty(t, obj) func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -134,7 +134,7 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { // Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either // a slice or a channel with len == 0. // -// assert.Emptyf(t, obj, "error message %s", "formatted") +// require.Emptyf(t, obj, "error message %s", "formatted") func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -147,7 +147,7 @@ func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) { // Equal asserts that two objects are equal. // -// assert.Equal(t, 123, 123) +// require.Equal(t, 123, 123) // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). Function equality @@ -166,7 +166,7 @@ func Equal(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...i // and that it is equal to the provided error. // // actualObj, err := SomeFunction() -// assert.EqualError(t, err, expectedErrorString) +// require.EqualError(t, err, expectedErrorString) func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -181,7 +181,7 @@ func EqualError(t TestingT, theError error, errString string, msgAndArgs ...inte // and that it is equal to the provided error. // // actualObj, err := SomeFunction() -// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") +// require.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -200,8 +200,8 @@ func EqualErrorf(t TestingT, theError error, errString string, msg string, args // Exported int // notExported int // } -// assert.EqualExportedValues(t, S{1, 2}, S{1, 3}) => true -// assert.EqualExportedValues(t, S{1, 2}, S{2, 3}) => false +// require.EqualExportedValues(t, S{1, 2}, S{1, 3}) => true +// require.EqualExportedValues(t, S{1, 2}, S{2, 3}) => false func EqualExportedValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -220,8 +220,8 @@ func EqualExportedValues(t TestingT, expected interface{}, actual interface{}, m // Exported int // notExported int // } -// assert.EqualExportedValuesf(t, S{1, 2}, S{1, 3}, "error message %s", "formatted") => true -// assert.EqualExportedValuesf(t, S{1, 2}, S{2, 3}, "error message %s", "formatted") => false +// require.EqualExportedValuesf(t, S{1, 2}, S{1, 3}, "error message %s", "formatted") => true +// require.EqualExportedValuesf(t, S{1, 2}, S{2, 3}, "error message %s", "formatted") => false func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -232,10 +232,10 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, t.FailNow() } -// EqualValues asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValues asserts that two objects are equal or convertible to the larger +// type and equal. // -// assert.EqualValues(t, uint32(123), int32(123)) +// require.EqualValues(t, uint32(123), int32(123)) func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -246,10 +246,10 @@ func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArg t.FailNow() } -// EqualValuesf asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValuesf asserts that two objects are equal or convertible to the larger +// type and equal. // -// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") +// require.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -262,7 +262,7 @@ func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg stri // Equalf asserts that two objects are equal. // -// assert.Equalf(t, 123, 123, "error message %s", "formatted") +// require.Equalf(t, 123, 123, "error message %s", "formatted") // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). Function equality @@ -280,8 +280,8 @@ func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, ar // Error asserts that a function returned an error (i.e. not `nil`). // // actualObj, err := SomeFunction() -// if assert.Error(t, err) { -// assert.Equal(t, expectedError, err) +// if require.Error(t, err) { +// require.Equal(t, expectedError, err) // } func Error(t TestingT, err error, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { @@ -321,7 +321,7 @@ func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...int // and that the error contains the specified substring. // // actualObj, err := SomeFunction() -// assert.ErrorContains(t, err, expectedErrorSubString) +// require.ErrorContains(t, err, expectedErrorSubString) func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -336,7 +336,7 @@ func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...in // and that the error contains the specified substring. // // actualObj, err := SomeFunction() -// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") +// require.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -374,8 +374,8 @@ func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface // Errorf asserts that a function returned an error (i.e. not `nil`). // // actualObj, err := SomeFunction() -// if assert.Errorf(t, err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) +// if require.Errorf(t, err, "error message %s", "formatted") { +// require.Equal(t, expectedErrorf, err) // } func Errorf(t TestingT, err error, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { @@ -390,7 +390,7 @@ func Errorf(t TestingT, err error, msg string, args ...interface{}) { // Eventually asserts that given condition will be met in waitFor time, // periodically checking target function each tick. // -// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond) +// require.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond) func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -415,10 +415,10 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t // time.Sleep(8*time.Second) // externalValue = true // }() -// assert.EventuallyWithT(t, func(c *assert.CollectT) { +// require.EventuallyWithT(t, func(c *require.CollectT) { // // add assertions as needed; any assertion failure will fail the current tick -// assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// require.True(c, externalValue, "expected 'externalValue' to be true") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func EventuallyWithT(t TestingT, condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -443,10 +443,10 @@ func EventuallyWithT(t TestingT, condition func(collect *assert.CollectT), waitF // time.Sleep(8*time.Second) // externalValue = true // }() -// assert.EventuallyWithTf(t, func(c *assert.CollectT, "error message %s", "formatted") { +// require.EventuallyWithTf(t, func(c *require.CollectT, "error message %s", "formatted") { // // add assertions as needed; any assertion failure will fail the current tick -// assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// require.True(c, externalValue, "expected 'externalValue' to be true") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func EventuallyWithTf(t TestingT, condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -460,7 +460,7 @@ func EventuallyWithTf(t TestingT, condition func(collect *assert.CollectT), wait // Eventuallyf asserts that given condition will be met in waitFor time, // periodically checking target function each tick. // -// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +// require.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -473,7 +473,7 @@ func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick // Exactly asserts that two objects are equal in value and type. // -// assert.Exactly(t, int32(123), int64(123)) +// require.Exactly(t, int32(123), int64(123)) func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -486,7 +486,7 @@ func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs .. // Exactlyf asserts that two objects are equal in value and type. // -// assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted") +// require.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted") func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -543,7 +543,7 @@ func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) { // False asserts that the specified value is false. // -// assert.False(t, myBool) +// require.False(t, myBool) func False(t TestingT, value bool, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -556,7 +556,7 @@ func False(t TestingT, value bool, msgAndArgs ...interface{}) { // Falsef asserts that the specified value is false. // -// assert.Falsef(t, myBool, "error message %s", "formatted") +// require.Falsef(t, myBool, "error message %s", "formatted") func Falsef(t TestingT, value bool, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -593,9 +593,9 @@ func FileExistsf(t TestingT, path string, msg string, args ...interface{}) { // Greater asserts that the first element is greater than the second // -// assert.Greater(t, 2, 1) -// assert.Greater(t, float64(2), float64(1)) -// assert.Greater(t, "b", "a") +// require.Greater(t, 2, 1) +// require.Greater(t, float64(2), float64(1)) +// require.Greater(t, "b", "a") func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -608,10 +608,10 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface // GreaterOrEqual asserts that the first element is greater than or equal to the second // -// assert.GreaterOrEqual(t, 2, 1) -// assert.GreaterOrEqual(t, 2, 2) -// assert.GreaterOrEqual(t, "b", "a") -// assert.GreaterOrEqual(t, "b", "b") +// require.GreaterOrEqual(t, 2, 1) +// require.GreaterOrEqual(t, 2, 2) +// require.GreaterOrEqual(t, "b", "a") +// require.GreaterOrEqual(t, "b", "b") func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -624,10 +624,10 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in // GreaterOrEqualf asserts that the first element is greater than or equal to the second // -// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted") -// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted") -// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted") -// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted") +// require.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted") +// require.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted") +// require.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted") +// require.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted") func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -640,9 +640,9 @@ func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, arg // Greaterf asserts that the first element is greater than the second // -// assert.Greaterf(t, 2, 1, "error message %s", "formatted") -// assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted") -// assert.Greaterf(t, "b", "a", "error message %s", "formatted") +// require.Greaterf(t, 2, 1, "error message %s", "formatted") +// require.Greaterf(t, float64(2), float64(1), "error message %s", "formatted") +// require.Greaterf(t, "b", "a", "error message %s", "formatted") func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -656,7 +656,7 @@ func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...in // HTTPBodyContains asserts that a specified handler returns a // body that contains a string. // -// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") +// require.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { @@ -672,7 +672,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url s // HTTPBodyContainsf asserts that a specified handler returns a // body that contains a string. // -// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// require.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { @@ -688,7 +688,7 @@ func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url // HTTPBodyNotContains asserts that a specified handler returns a // body that does not contain a string. // -// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") +// require.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { @@ -704,7 +704,7 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, ur // HTTPBodyNotContainsf asserts that a specified handler returns a // body that does not contain a string. // -// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// require.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { @@ -719,7 +719,7 @@ func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, u // HTTPError asserts that a specified handler returns an error status code. // -// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// require.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { @@ -734,7 +734,7 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, // HTTPErrorf asserts that a specified handler returns an error status code. // -// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// require.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { @@ -749,7 +749,7 @@ func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, // HTTPRedirect asserts that a specified handler returns a redirect status code. // -// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// require.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { @@ -764,7 +764,7 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url strin // HTTPRedirectf asserts that a specified handler returns a redirect status code. // -// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// require.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { @@ -779,7 +779,7 @@ func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url stri // HTTPStatusCode asserts that a specified handler returns a specified status code. // -// assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501) +// require.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501) // // Returns whether the assertion was successful (true) or not (false). func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) { @@ -794,7 +794,7 @@ func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method string, url str // HTTPStatusCodef asserts that a specified handler returns a specified status code. // -// assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") +// require.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) { @@ -809,7 +809,7 @@ func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url st // HTTPSuccess asserts that a specified handler returns a success status code. // -// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) +// require.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) // // Returns whether the assertion was successful (true) or not (false). func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { @@ -824,7 +824,7 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string // HTTPSuccessf asserts that a specified handler returns a success status code. // -// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") +// require.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { @@ -839,7 +839,7 @@ func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url strin // Implements asserts that an object is implemented by the specified interface. // -// assert.Implements(t, (*MyInterface)(nil), new(MyObject)) +// require.Implements(t, (*MyInterface)(nil), new(MyObject)) func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -852,7 +852,7 @@ func Implements(t TestingT, interfaceObject interface{}, object interface{}, msg // Implementsf asserts that an object is implemented by the specified interface. // -// assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +// require.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -865,7 +865,7 @@ func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, ms // InDelta asserts that the two numerals are within delta of each other. // -// assert.InDelta(t, math.Pi, 22/7.0, 0.01) +// require.InDelta(t, math.Pi, 22/7.0, 0.01) func InDelta(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -922,7 +922,7 @@ func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta f // InDeltaf asserts that the two numerals are within delta of each other. // -// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted") +// require.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted") func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -979,9 +979,9 @@ func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon fl // IsDecreasing asserts that the collection is decreasing // -// assert.IsDecreasing(t, []int{2, 1, 0}) -// assert.IsDecreasing(t, []float{2, 1}) -// assert.IsDecreasing(t, []string{"b", "a"}) +// require.IsDecreasing(t, []int{2, 1, 0}) +// require.IsDecreasing(t, []float{2, 1}) +// require.IsDecreasing(t, []string{"b", "a"}) func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -994,9 +994,9 @@ func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { // IsDecreasingf asserts that the collection is decreasing // -// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted") -// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted") -// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted") +// require.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted") +// require.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted") +// require.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted") func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1009,9 +1009,9 @@ func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface // IsIncreasing asserts that the collection is increasing // -// assert.IsIncreasing(t, []int{1, 2, 3}) -// assert.IsIncreasing(t, []float{1, 2}) -// assert.IsIncreasing(t, []string{"a", "b"}) +// require.IsIncreasing(t, []int{1, 2, 3}) +// require.IsIncreasing(t, []float{1, 2}) +// require.IsIncreasing(t, []string{"a", "b"}) func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1024,9 +1024,9 @@ func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { // IsIncreasingf asserts that the collection is increasing // -// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted") -// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted") -// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted") +// require.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted") +// require.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted") +// require.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted") func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1039,9 +1039,9 @@ func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface // IsNonDecreasing asserts that the collection is not decreasing // -// assert.IsNonDecreasing(t, []int{1, 1, 2}) -// assert.IsNonDecreasing(t, []float{1, 2}) -// assert.IsNonDecreasing(t, []string{"a", "b"}) +// require.IsNonDecreasing(t, []int{1, 1, 2}) +// require.IsNonDecreasing(t, []float{1, 2}) +// require.IsNonDecreasing(t, []string{"a", "b"}) func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1054,9 +1054,9 @@ func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) // IsNonDecreasingf asserts that the collection is not decreasing // -// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted") -// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted") -// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted") +// require.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted") +// require.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted") +// require.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted") func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1069,9 +1069,9 @@ func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interf // IsNonIncreasing asserts that the collection is not increasing // -// assert.IsNonIncreasing(t, []int{2, 1, 1}) -// assert.IsNonIncreasing(t, []float{2, 1}) -// assert.IsNonIncreasing(t, []string{"b", "a"}) +// require.IsNonIncreasing(t, []int{2, 1, 1}) +// require.IsNonIncreasing(t, []float{2, 1}) +// require.IsNonIncreasing(t, []string{"b", "a"}) func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1084,9 +1084,9 @@ func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) // IsNonIncreasingf asserts that the collection is not increasing // -// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted") -// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted") -// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted") +// require.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted") +// require.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted") +// require.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted") func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1121,7 +1121,7 @@ func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg strin // JSONEq asserts that two JSON strings are equivalent. // -// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +// require.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1134,7 +1134,7 @@ func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{ // JSONEqf asserts that two JSON strings are equivalent. // -// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") +// require.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1148,7 +1148,7 @@ func JSONEqf(t TestingT, expected string, actual string, msg string, args ...int // Len asserts that the specified object has specific length. // Len also fails if the object has a type that len() not accept. // -// assert.Len(t, mySlice, 3) +// require.Len(t, mySlice, 3) func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1162,7 +1162,7 @@ func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) // Lenf asserts that the specified object has specific length. // Lenf also fails if the object has a type that len() not accept. // -// assert.Lenf(t, mySlice, 3, "error message %s", "formatted") +// require.Lenf(t, mySlice, 3, "error message %s", "formatted") func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1175,9 +1175,9 @@ func Lenf(t TestingT, object interface{}, length int, msg string, args ...interf // Less asserts that the first element is less than the second // -// assert.Less(t, 1, 2) -// assert.Less(t, float64(1), float64(2)) -// assert.Less(t, "a", "b") +// require.Less(t, 1, 2) +// require.Less(t, float64(1), float64(2)) +// require.Less(t, "a", "b") func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1190,10 +1190,10 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) // LessOrEqual asserts that the first element is less than or equal to the second // -// assert.LessOrEqual(t, 1, 2) -// assert.LessOrEqual(t, 2, 2) -// assert.LessOrEqual(t, "a", "b") -// assert.LessOrEqual(t, "b", "b") +// require.LessOrEqual(t, 1, 2) +// require.LessOrEqual(t, 2, 2) +// require.LessOrEqual(t, "a", "b") +// require.LessOrEqual(t, "b", "b") func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1206,10 +1206,10 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter // LessOrEqualf asserts that the first element is less than or equal to the second // -// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted") -// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted") -// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted") -// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted") +// require.LessOrEqualf(t, 1, 2, "error message %s", "formatted") +// require.LessOrEqualf(t, 2, 2, "error message %s", "formatted") +// require.LessOrEqualf(t, "a", "b", "error message %s", "formatted") +// require.LessOrEqualf(t, "b", "b", "error message %s", "formatted") func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1222,9 +1222,9 @@ func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args . // Lessf asserts that the first element is less than the second // -// assert.Lessf(t, 1, 2, "error message %s", "formatted") -// assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted") -// assert.Lessf(t, "a", "b", "error message %s", "formatted") +// require.Lessf(t, 1, 2, "error message %s", "formatted") +// require.Lessf(t, float64(1), float64(2), "error message %s", "formatted") +// require.Lessf(t, "a", "b", "error message %s", "formatted") func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1237,8 +1237,8 @@ func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...inter // Negative asserts that the specified element is negative // -// assert.Negative(t, -1) -// assert.Negative(t, -1.23) +// require.Negative(t, -1) +// require.Negative(t, -1.23) func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1251,8 +1251,8 @@ func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) { // Negativef asserts that the specified element is negative // -// assert.Negativef(t, -1, "error message %s", "formatted") -// assert.Negativef(t, -1.23, "error message %s", "formatted") +// require.Negativef(t, -1, "error message %s", "formatted") +// require.Negativef(t, -1.23, "error message %s", "formatted") func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1266,7 +1266,7 @@ func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) { // Never asserts that the given condition doesn't satisfy in waitFor time, // periodically checking the target function each tick. // -// assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond) +// require.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond) func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1280,7 +1280,7 @@ func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.D // Neverf asserts that the given condition doesn't satisfy in waitFor time, // periodically checking the target function each tick. // -// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +// require.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1293,7 +1293,7 @@ func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time. // Nil asserts that the specified object is nil. // -// assert.Nil(t, err) +// require.Nil(t, err) func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1306,7 +1306,7 @@ func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) { // Nilf asserts that the specified object is nil. // -// assert.Nilf(t, err, "error message %s", "formatted") +// require.Nilf(t, err, "error message %s", "formatted") func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1344,8 +1344,8 @@ func NoDirExistsf(t TestingT, path string, msg string, args ...interface{}) { // NoError asserts that a function returned no error (i.e. `nil`). // // actualObj, err := SomeFunction() -// if assert.NoError(t, err) { -// assert.Equal(t, expectedObj, actualObj) +// if require.NoError(t, err) { +// require.Equal(t, expectedObj, actualObj) // } func NoError(t TestingT, err error, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1360,8 +1360,8 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) { // NoErrorf asserts that a function returned no error (i.e. `nil`). // // actualObj, err := SomeFunction() -// if assert.NoErrorf(t, err, "error message %s", "formatted") { -// assert.Equal(t, expectedObj, actualObj) +// if require.NoErrorf(t, err, "error message %s", "formatted") { +// require.Equal(t, expectedObj, actualObj) // } func NoErrorf(t TestingT, err error, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1400,9 +1400,9 @@ func NoFileExistsf(t TestingT, path string, msg string, args ...interface{}) { // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the // specified substring or element. // -// assert.NotContains(t, "Hello World", "Earth") -// assert.NotContains(t, ["Hello", "World"], "Earth") -// assert.NotContains(t, {"Hello": "World"}, "Earth") +// require.NotContains(t, "Hello World", "Earth") +// require.NotContains(t, ["Hello", "World"], "Earth") +// require.NotContains(t, {"Hello": "World"}, "Earth") func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1416,9 +1416,9 @@ func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ... // NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the // specified substring or element. // -// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") -// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") -// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") +// require.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") +// require.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") +// require.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1429,11 +1429,51 @@ func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, a t.FailNow() } +// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// require.NotElementsMatch(t, [1, 1, 2, 3], [1, 1, 2, 3]) -> false +// +// require.NotElementsMatch(t, [1, 1, 2, 3], [1, 2, 3]) -> true +// +// require.NotElementsMatch(t, [1, 2, 3], [1, 2, 4]) -> true +func NotElementsMatch(t TestingT, listA interface{}, listB interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotElementsMatch(t, listA, listB, msgAndArgs...) { + return + } + t.FailNow() +} + +// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// require.NotElementsMatchf(t, [1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false +// +// require.NotElementsMatchf(t, [1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true +// +// require.NotElementsMatchf(t, [1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true +func NotElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotElementsMatchf(t, listA, listB, msg, args...) { + return + } + t.FailNow() +} + // NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // -// if assert.NotEmpty(t, obj) { -// assert.Equal(t, "two", obj[1]) +// if require.NotEmpty(t, obj) { +// require.Equal(t, "two", obj[1]) // } func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1448,8 +1488,8 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) { // NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // -// if assert.NotEmptyf(t, obj, "error message %s", "formatted") { -// assert.Equal(t, "two", obj[1]) +// if require.NotEmptyf(t, obj, "error message %s", "formatted") { +// require.Equal(t, "two", obj[1]) // } func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1463,7 +1503,7 @@ func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) // NotEqual asserts that the specified values are NOT equal. // -// assert.NotEqual(t, obj1, obj2) +// require.NotEqual(t, obj1, obj2) // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). @@ -1479,7 +1519,7 @@ func NotEqual(t TestingT, expected interface{}, actual interface{}, msgAndArgs . // NotEqualValues asserts that two objects are not equal even when converted to the same type // -// assert.NotEqualValues(t, obj1, obj2) +// require.NotEqualValues(t, obj1, obj2) func NotEqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1492,7 +1532,7 @@ func NotEqualValues(t TestingT, expected interface{}, actual interface{}, msgAnd // NotEqualValuesf asserts that two objects are not equal even when converted to the same type // -// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted") +// require.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted") func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1505,7 +1545,7 @@ func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg s // NotEqualf asserts that the specified values are NOT equal. // -// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted") +// require.NotEqualf(t, obj1, obj2, "error message %s", "formatted") // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). @@ -1519,7 +1559,31 @@ func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, t.FailNow() } -// NotErrorIs asserts that at none of the errors in err's chain matches target. +// NotErrorAs asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func NotErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotErrorAs(t, err, target, msgAndArgs...) { + return + } + t.FailNow() +} + +// NotErrorAsf asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func NotErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotErrorAsf(t, err, target, msg, args...) { + return + } + t.FailNow() +} + +// NotErrorIs asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func NotErrorIs(t TestingT, err error, target error, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1531,7 +1595,7 @@ func NotErrorIs(t TestingT, err error, target error, msgAndArgs ...interface{}) t.FailNow() } -// NotErrorIsf asserts that at none of the errors in err's chain matches target. +// NotErrorIsf asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1545,7 +1609,7 @@ func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interf // NotImplements asserts that an object does not implement the specified interface. // -// assert.NotImplements(t, (*MyInterface)(nil), new(MyObject)) +// require.NotImplements(t, (*MyInterface)(nil), new(MyObject)) func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1558,7 +1622,7 @@ func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, // NotImplementsf asserts that an object does not implement the specified interface. // -// assert.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +// require.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1571,7 +1635,7 @@ func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, // NotNil asserts that the specified object is not nil. // -// assert.NotNil(t, err) +// require.NotNil(t, err) func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1584,7 +1648,7 @@ func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) { // NotNilf asserts that the specified object is not nil. // -// assert.NotNilf(t, err, "error message %s", "formatted") +// require.NotNilf(t, err, "error message %s", "formatted") func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1597,7 +1661,7 @@ func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) { // NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. // -// assert.NotPanics(t, func(){ RemainCalm() }) +// require.NotPanics(t, func(){ RemainCalm() }) func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1610,7 +1674,7 @@ func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { // NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. // -// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") +// require.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") func NotPanicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1623,8 +1687,8 @@ func NotPanicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interfac // NotRegexp asserts that a specified regexp does not match a string. // -// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") -// assert.NotRegexp(t, "^start", "it's not starting") +// require.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") +// require.NotRegexp(t, "^start", "it's not starting") func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1637,8 +1701,8 @@ func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interf // NotRegexpf asserts that a specified regexp does not match a string. // -// assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") -// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") +// require.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") +// require.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1651,7 +1715,7 @@ func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args .. // NotSame asserts that two pointers do not reference the same object. // -// assert.NotSame(t, ptr1, ptr2) +// require.NotSame(t, ptr1, ptr2) // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -1667,7 +1731,7 @@ func NotSame(t TestingT, expected interface{}, actual interface{}, msgAndArgs .. // NotSamef asserts that two pointers do not reference the same object. // -// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted") +// require.NotSamef(t, ptr1, ptr2, "error message %s", "formatted") // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -1685,8 +1749,8 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, // contain all elements given in the specified subset list(array, slice...) or // map. // -// assert.NotSubset(t, [1, 3, 4], [1, 2]) -// assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) +// require.NotSubset(t, [1, 3, 4], [1, 2]) +// require.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1701,8 +1765,8 @@ func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...i // contain all elements given in the specified subset list(array, slice...) or // map. // -// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") -// assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") +// require.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") +// require.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1737,7 +1801,7 @@ func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) { // Panics asserts that the code inside the specified PanicTestFunc panics. // -// assert.Panics(t, func(){ GoCrazy() }) +// require.Panics(t, func(){ GoCrazy() }) func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1752,7 +1816,7 @@ func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { // panics, and that the recovered panic value is an error that satisfies the // EqualError comparison. // -// assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() }) +// require.PanicsWithError(t, "crazy error", func(){ GoCrazy() }) func PanicsWithError(t TestingT, errString string, f assert.PanicTestFunc, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1767,7 +1831,7 @@ func PanicsWithError(t TestingT, errString string, f assert.PanicTestFunc, msgAn // panics, and that the recovered panic value is an error that satisfies the // EqualError comparison. // -// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +// require.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") func PanicsWithErrorf(t TestingT, errString string, f assert.PanicTestFunc, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1781,7 +1845,7 @@ func PanicsWithErrorf(t TestingT, errString string, f assert.PanicTestFunc, msg // PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that // the recovered panic value equals the expected panic value. // -// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) +// require.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) func PanicsWithValue(t TestingT, expected interface{}, f assert.PanicTestFunc, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1795,7 +1859,7 @@ func PanicsWithValue(t TestingT, expected interface{}, f assert.PanicTestFunc, m // PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that // the recovered panic value equals the expected panic value. // -// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +// require.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") func PanicsWithValuef(t TestingT, expected interface{}, f assert.PanicTestFunc, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1808,7 +1872,7 @@ func PanicsWithValuef(t TestingT, expected interface{}, f assert.PanicTestFunc, // Panicsf asserts that the code inside the specified PanicTestFunc panics. // -// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") +// require.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") func Panicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1821,8 +1885,8 @@ func Panicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{} // Positive asserts that the specified element is positive // -// assert.Positive(t, 1) -// assert.Positive(t, 1.23) +// require.Positive(t, 1) +// require.Positive(t, 1.23) func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1835,8 +1899,8 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) { // Positivef asserts that the specified element is positive // -// assert.Positivef(t, 1, "error message %s", "formatted") -// assert.Positivef(t, 1.23, "error message %s", "formatted") +// require.Positivef(t, 1, "error message %s", "formatted") +// require.Positivef(t, 1.23, "error message %s", "formatted") func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1849,8 +1913,8 @@ func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) { // Regexp asserts that a specified regexp matches a string. // -// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") -// assert.Regexp(t, "start...$", "it's not starting") +// require.Regexp(t, regexp.MustCompile("start"), "it's starting") +// require.Regexp(t, "start...$", "it's not starting") func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1863,8 +1927,8 @@ func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface // Regexpf asserts that a specified regexp matches a string. // -// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") -// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") +// require.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") +// require.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1877,7 +1941,7 @@ func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...in // Same asserts that two pointers reference the same object. // -// assert.Same(t, ptr1, ptr2) +// require.Same(t, ptr1, ptr2) // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -1893,7 +1957,7 @@ func Same(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...in // Samef asserts that two pointers reference the same object. // -// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted") +// require.Samef(t, ptr1, ptr2, "error message %s", "formatted") // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -1910,8 +1974,8 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg // Subset asserts that the specified list(array, slice...) or map contains all // elements given in the specified subset list(array, slice...) or map. // -// assert.Subset(t, [1, 2, 3], [1, 2]) -// assert.Subset(t, {"x": 1, "y": 2}, {"x": 1}) +// require.Subset(t, [1, 2, 3], [1, 2]) +// require.Subset(t, {"x": 1, "y": 2}, {"x": 1}) func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1925,8 +1989,8 @@ func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...inte // Subsetf asserts that the specified list(array, slice...) or map contains all // elements given in the specified subset list(array, slice...) or map. // -// assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") -// assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") +// require.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") +// require.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1939,7 +2003,7 @@ func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args // True asserts that the specified value is true. // -// assert.True(t, myBool) +// require.True(t, myBool) func True(t TestingT, value bool, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1952,7 +2016,7 @@ func True(t TestingT, value bool, msgAndArgs ...interface{}) { // Truef asserts that the specified value is true. // -// assert.Truef(t, myBool, "error message %s", "formatted") +// require.Truef(t, myBool, "error message %s", "formatted") func Truef(t TestingT, value bool, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1965,7 +2029,7 @@ func Truef(t TestingT, value bool, msg string, args ...interface{}) { // WithinDuration asserts that the two times are within duration delta of each other. // -// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) +// require.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1978,7 +2042,7 @@ func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time // WithinDurationf asserts that the two times are within duration delta of each other. // -// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") +// require.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1991,7 +2055,7 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim // WithinRange asserts that a time is within a time range (inclusive). // -// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) +// require.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) func WithinRange(t TestingT, actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -2004,7 +2068,7 @@ func WithinRange(t TestingT, actual time.Time, start time.Time, end time.Time, m // WithinRangef asserts that a time is within a time range (inclusive). // -// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") +// require.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") func WithinRangef(t TestingT, actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/require/require.go.tmpl b/vendor/github.com/stretchr/testify/require/require.go.tmpl index 55e42ddebdc..8b328368509 100644 --- a/vendor/github.com/stretchr/testify/require/require.go.tmpl +++ b/vendor/github.com/stretchr/testify/require/require.go.tmpl @@ -1,4 +1,4 @@ -{{.Comment}} +{{ replace .Comment "assert." "require."}} func {{.DocInfo.Name}}(t TestingT, {{.Params}}) { if h, ok := t.(tHelper); ok { h.Helper() } if assert.{{.DocInfo.Name}}(t, {{.ForwardedParams}}) { return } diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go index eee8310a5fa..1bd87304f43 100644 --- a/vendor/github.com/stretchr/testify/require/require_forward.go +++ b/vendor/github.com/stretchr/testify/require/require_forward.go @@ -187,8 +187,8 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface EqualExportedValuesf(a.t, expected, actual, msg, args...) } -// EqualValues asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValues asserts that two objects are equal or convertible to the larger +// type and equal. // // a.EqualValues(uint32(123), int32(123)) func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { @@ -198,8 +198,8 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn EqualValues(a.t, expected, actual, msgAndArgs...) } -// EqualValuesf asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValuesf asserts that two objects are equal or convertible to the larger +// type and equal. // // a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) { @@ -337,7 +337,7 @@ func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, ti // a.EventuallyWithT(func(c *assert.CollectT) { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func (a *Assertions) EventuallyWithT(condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -362,7 +362,7 @@ func (a *Assertions) EventuallyWithT(condition func(collect *assert.CollectT), w // a.EventuallyWithTf(func(c *assert.CollectT, "error message %s", "formatted") { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func (a *Assertions) EventuallyWithTf(condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1129,6 +1129,40 @@ func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg strin NotContainsf(a.t, s, contains, msg, args...) } +// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// a.NotElementsMatch([1, 1, 2, 3], [1, 1, 2, 3]) -> false +// +// a.NotElementsMatch([1, 1, 2, 3], [1, 2, 3]) -> true +// +// a.NotElementsMatch([1, 2, 3], [1, 2, 4]) -> true +func (a *Assertions) NotElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotElementsMatch(a.t, listA, listB, msgAndArgs...) +} + +// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// a.NotElementsMatchf([1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false +// +// a.NotElementsMatchf([1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true +// +// a.NotElementsMatchf([1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true +func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotElementsMatchf(a.t, listA, listB, msg, args...) +} + // NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // @@ -1201,7 +1235,25 @@ func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg str NotEqualf(a.t, expected, actual, msg, args...) } -// NotErrorIs asserts that at none of the errors in err's chain matches target. +// NotErrorAs asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func (a *Assertions) NotErrorAs(err error, target interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotErrorAs(a.t, err, target, msgAndArgs...) +} + +// NotErrorAsf asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func (a *Assertions) NotErrorAsf(err error, target interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotErrorAsf(a.t, err, target, msg, args...) +} + +// NotErrorIs asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { @@ -1210,7 +1262,7 @@ func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface NotErrorIs(a.t, err, target, msgAndArgs...) } -// NotErrorIsf asserts that at none of the errors in err's chain matches target. +// NotErrorIsf asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { diff --git a/vendor/github.com/stretchr/testify/require/requirements.go b/vendor/github.com/stretchr/testify/require/requirements.go index 91772dfeb91..6b7ce929eb1 100644 --- a/vendor/github.com/stretchr/testify/require/requirements.go +++ b/vendor/github.com/stretchr/testify/require/requirements.go @@ -6,7 +6,7 @@ type TestingT interface { FailNow() } -type tHelper interface { +type tHelper = interface { Helper() } diff --git a/vendor/github.com/tklauser/go-sysconf/.cirrus.yml b/vendor/github.com/tklauser/go-sysconf/.cirrus.yml index 1b27f196286..33e6595cca3 100644 --- a/vendor/github.com/tklauser/go-sysconf/.cirrus.yml +++ b/vendor/github.com/tklauser/go-sysconf/.cirrus.yml @@ -1,10 +1,10 @@ env: CIRRUS_CLONE_DEPTH: 1 - GO_VERSION: go1.20 + GO_VERSION: go1.22.2 -freebsd_12_task: +freebsd_13_task: freebsd_instance: - image_family: freebsd-12-3 + image_family: freebsd-13-2 install_script: | pkg install -y go GOBIN=$PWD/bin go install golang.org/dl/${GO_VERSION}@latest @@ -12,9 +12,9 @@ freebsd_12_task: build_script: bin/${GO_VERSION} build -v ./... test_script: bin/${GO_VERSION} test -race ./... -freebsd_13_task: +freebsd_14_task: freebsd_instance: - image_family: freebsd-13-0 + image_family: freebsd-14-0 install_script: | pkg install -y go GOBIN=$PWD/bin go install golang.org/dl/${GO_VERSION}@latest diff --git a/vendor/github.com/tklauser/go-sysconf/sysconf_bsd.go b/vendor/github.com/tklauser/go-sysconf/sysconf_bsd.go index 7c96157bb79..ec81c02ac88 100644 --- a/vendor/github.com/tklauser/go-sysconf/sysconf_bsd.go +++ b/vendor/github.com/tklauser/go-sysconf/sysconf_bsd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || dragonfly || freebsd || netbsd || openbsd -// +build darwin dragonfly freebsd netbsd openbsd package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/sysconf_darwin.go b/vendor/github.com/tklauser/go-sysconf/sysconf_darwin.go index 3f5d83f6920..b471ec10443 100644 --- a/vendor/github.com/tklauser/go-sysconf/sysconf_darwin.go +++ b/vendor/github.com/tklauser/go-sysconf/sysconf_darwin.go @@ -16,6 +16,10 @@ const ( _HOST_NAME_MAX = _MAXHOSTNAMELEN - 1 _LOGIN_NAME_MAX = _MAXLOGNAME _SYMLOOP_MAX = _MAXSYMLINKS + + // _PTHREAD_STACK_MIN changed in macOS 14 + _PTHREAD_STACK_MIN_LT_MACOS14 = 0x2000 + _PTHREAD_STACK_MIN_GE_MACOS14 = 0x4000 ) var uname struct { @@ -23,6 +27,21 @@ var uname struct { macOSMajor int } +func getMacOSMajor() int { + uname.Once.Do(func() { + var u unix.Utsname + err := unix.Uname(&u) + if err != nil { + return + } + rel := unix.ByteSliceToString(u.Release[:]) + ver := strings.Split(rel, ".") + maj, _ := strconv.Atoi(ver[0]) + uname.macOSMajor = maj + }) + return uname.macOSMajor +} + // sysconf implements sysconf(4) as in the Darwin libc (derived from the FreeBSD // libc), version 1534.81.1. // See https://github.com/apple-oss-distributions/Libc/tree/Libc-1534.81.1. @@ -91,7 +110,10 @@ func sysconf(name int) (int64, error) { case SC_THREAD_PRIO_PROTECT: return _POSIX_THREAD_PRIO_PROTECT, nil case SC_THREAD_STACK_MIN: - return _PTHREAD_STACK_MIN, nil + if getMacOSMajor() < 23 { + return _PTHREAD_STACK_MIN_LT_MACOS14, nil + } + return _PTHREAD_STACK_MIN_GE_MACOS14, nil case SC_THREAD_THREADS_MAX: return -1, nil case SC_TIMER_MAX: @@ -140,18 +162,7 @@ func sysconf(name int) (int64, error) { } return _POSIX_SEMAPHORES, nil case SC_SPAWN: - uname.Once.Do(func() { - var u unix.Utsname - err := unix.Uname(&u) - if err != nil { - return - } - rel := unix.ByteSliceToString(u.Release[:]) - ver := strings.Split(rel, ".") - maj, _ := strconv.Atoi(ver[0]) - uname.macOSMajor = maj - }) - if uname.macOSMajor < 22 { + if getMacOSMajor() < 22 { return -1, nil } // macOS 13 (Ventura) and later diff --git a/vendor/github.com/tklauser/go-sysconf/sysconf_generic.go b/vendor/github.com/tklauser/go-sysconf/sysconf_generic.go index 248bdc99cda..7dcc6f4cabf 100644 --- a/vendor/github.com/tklauser/go-sysconf/sysconf_generic.go +++ b/vendor/github.com/tklauser/go-sysconf/sysconf_generic.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd -// +build darwin dragonfly freebsd linux netbsd openbsd package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/sysconf_linux.go b/vendor/github.com/tklauser/go-sysconf/sysconf_linux.go index 5fb49ac7b6a..9af70070e2b 100644 --- a/vendor/github.com/tklauser/go-sysconf/sysconf_linux.go +++ b/vendor/github.com/tklauser/go-sysconf/sysconf_linux.go @@ -6,7 +6,6 @@ package sysconf import ( "bufio" - "io/ioutil" "os" "runtime" "strconv" @@ -26,7 +25,7 @@ const ( ) func readProcFsInt64(path string, fallback int64) int64 { - data, err := ioutil.ReadFile(path) + data, err := os.ReadFile(path) if err != nil { return fallback } @@ -86,10 +85,16 @@ func getNprocsProcStat() (int64, error) { s := bufio.NewScanner(f) for s.Scan() { if line := strings.TrimSpace(s.Text()); strings.HasPrefix(line, "cpu") { - l := strings.SplitN(line, " ", 2) - _, err := strconv.ParseInt(l[0][3:], 10, 64) - if err == nil { - count++ + cpu, _, found := strings.Cut(line, " ") + if found { + // skip first line with accumulated values + if cpu == "cpu" { + continue + } + _, err := strconv.ParseInt(cpu[len("cpu"):], 10, 64) + if err == nil { + count++ + } } } else { // The current format of /proc/stat has all the @@ -98,6 +103,9 @@ func getNprocsProcStat() (int64, error) { break } } + if err := s.Err(); err != nil { + return -1, err + } return count, nil } diff --git a/vendor/github.com/tklauser/go-sysconf/sysconf_netbsd.go b/vendor/github.com/tklauser/go-sysconf/sysconf_netbsd.go index 325d4a6a83f..40f6c345fcd 100644 --- a/vendor/github.com/tklauser/go-sysconf/sysconf_netbsd.go +++ b/vendor/github.com/tklauser/go-sysconf/sysconf_netbsd.go @@ -25,10 +25,10 @@ const ( _POSIX2_UPE = -1 ) -var ( - clktck int64 - clktckOnce sync.Once -) +var clktck struct { + sync.Once + v int64 +} func sysconfPOSIX(name int) (int64, error) { // NetBSD does not define all _POSIX_* values used in sysconf_posix.go @@ -42,7 +42,6 @@ func sysconf(name int) (int64, error) { // Duplicate the relevant values here. switch name { - // 1003.1 case SC_ARG_MAX: return sysctl32("kern.argmax"), nil @@ -55,13 +54,14 @@ func sysconf(name int) (int64, error) { } return -1, nil case SC_CLK_TCK: - clktckOnce.Do(func() { - clktck = -1 + // TODO: use sync.OnceValue once Go 1.21 is the minimal supported version + clktck.Do(func() { + clktck.v = -1 if ci, err := unix.SysctlClockinfo("kern.clockrate"); err == nil { - clktck = int64(ci.Hz) + clktck.v = int64(ci.Hz) } }) - return clktck, nil + return clktck.v, nil case SC_NGROUPS_MAX: return sysctl32("kern.ngroups"), nil case SC_JOB_CONTROL: diff --git a/vendor/github.com/tklauser/go-sysconf/sysconf_posix.go b/vendor/github.com/tklauser/go-sysconf/sysconf_posix.go index e61c0bc73e4..830d8220b51 100644 --- a/vendor/github.com/tklauser/go-sysconf/sysconf_posix.go +++ b/vendor/github.com/tklauser/go-sysconf/sysconf_posix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || dragonfly || freebsd || linux || openbsd -// +build darwin dragonfly freebsd linux openbsd package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/sysconf_unsupported.go b/vendor/github.com/tklauser/go-sysconf/sysconf_unsupported.go index 478d692005e..5aa9119db73 100644 --- a/vendor/github.com/tklauser/go-sysconf/sysconf_unsupported.go +++ b/vendor/github.com/tklauser/go-sysconf/sysconf_unsupported.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_darwin.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_darwin.go index 6fadf3db1fa..80b64393bc6 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_darwin.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_darwin.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_defs_darwin.go //go:build darwin -// +build darwin package sysconf @@ -235,7 +234,6 @@ const ( _PTHREAD_DESTRUCTOR_ITERATIONS = 0x4 _PTHREAD_KEYS_MAX = 0x200 - _PTHREAD_STACK_MIN = 0x2000 ) const ( diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_dragonfly.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_dragonfly.go index 0864cd44827..dae56570c02 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_dragonfly.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_dragonfly.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_defs_dragonfly.go //go:build dragonfly -// +build dragonfly package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_freebsd.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_freebsd.go index 9885411acbd..068f8a7edaf 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_freebsd.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_freebsd.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_defs_freebsd.go //go:build freebsd -// +build freebsd package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_linux.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_linux.go index 8545a342b90..12f289d76fa 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_linux.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_linux.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_defs_linux.go //go:build linux -// +build linux package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_netbsd.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_netbsd.go index d2aaf077704..772af475a46 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_netbsd.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_netbsd.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_defs_netbsd.go //go:build netbsd -// +build netbsd package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_openbsd.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_openbsd.go index badc66cbda8..625b098f913 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_openbsd.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_openbsd.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_defs_openbsd.go //go:build openbsd -// +build openbsd package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_solaris.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_solaris.go index 29b6f8746a1..c155cf57966 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_solaris.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_solaris.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_defs_solaris.go //go:build solaris -// +build solaris package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_386.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_386.go index 478fe63a98e..b5d4807482c 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_386.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_386.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_values_freebsd.go //go:build freebsd && 386 -// +build freebsd,386 package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_amd64.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_amd64.go index 7f58a4d8bac..89c880aae27 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_amd64.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_amd64.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_values_freebsd.go //go:build freebsd && amd64 -// +build freebsd,amd64 package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_arm.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_arm.go index deb47595ba4..7b65fdd6fb3 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_arm.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_arm.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_values_freebsd.go //go:build freebsd && arm -// +build freebsd,arm package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_arm64.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_arm64.go index 556ba3da212..a86cb32bdfa 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_arm64.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_arm64.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_values_freebsd.go //go:build freebsd && arm64 -// +build freebsd,arm64 package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_riscv64.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_riscv64.go index b7cff760b16..6c847aeeaa7 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_riscv64.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_riscv64.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_values_freebsd.go //go:build freebsd && riscv64 -// +build freebsd,riscv64 package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_386.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_386.go index 16ee7ea64cf..90963eb422c 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_386.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_386.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_values_linux.go //go:build linux && 386 -// +build linux,386 package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_amd64.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_amd64.go index 39aee349f21..28ad6f18383 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_amd64.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_amd64.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_values_linux.go //go:build linux && amd64 -// +build linux,amd64 package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_arm.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_arm.go index 2e401164e3f..ffbcf37d403 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_arm.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_arm.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_values_linux.go //go:build linux && arm -// +build linux,arm package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_arm64.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_arm64.go index 362403abccd..cc9f4d88d43 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_arm64.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_arm64.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_values_linux.go //go:build linux && arm64 -// +build linux,arm64 package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_loong64.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_loong64.go index 95a71f4a2c7..f62b15a6978 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_loong64.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_loong64.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_values_linux.go //go:build linux && loong64 -// +build linux,loong64 package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mips.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mips.go index 868b0ffb336..37f492a81f1 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mips.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mips.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_values_linux.go //go:build linux && mips -// +build linux,mips package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mips64.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mips64.go index 5949f3d71fd..ae7b7f9c238 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mips64.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mips64.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_values_linux.go //go:build linux && mips64 -// +build linux,mips64 package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mips64le.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mips64le.go index 1853419a32d..fe14670f2ab 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mips64le.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mips64le.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_values_linux.go //go:build linux && mips64le -// +build linux,mips64le package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mipsle.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mipsle.go index ff41b3469bb..d204585be95 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mipsle.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mipsle.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_values_linux.go //go:build linux && mipsle -// +build linux,mipsle package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_ppc64.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_ppc64.go index 388743728ac..9ec78d335ee 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_ppc64.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_ppc64.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_values_linux.go //go:build linux && ppc64 -// +build linux,ppc64 package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_ppc64le.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_ppc64le.go index 6d76929a64c..a5420672984 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_ppc64le.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_ppc64le.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_values_linux.go //go:build linux && ppc64le -// +build linux,ppc64le package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_riscv64.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_riscv64.go index 3d7d71b3221..bfb923920a1 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_riscv64.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_riscv64.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_values_linux.go //go:build linux && riscv64 -// +build linux,riscv64 package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_s390x.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_s390x.go index 9cf8529f53a..6e935c87359 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_s390x.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_s390x.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_values_linux.go //go:build linux && s390x -// +build linux,s390x package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_386.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_386.go index 3cd64dd6626..ea0b24a822e 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_386.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_386.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_values_netbsd.go //go:build netbsd && 386 -// +build netbsd,386 package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_amd64.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_amd64.go index 02fc1d0ef93..2d377e253ce 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_amd64.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_amd64.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_values_netbsd.go //go:build netbsd && amd64 -// +build netbsd,amd64 package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_arm.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_arm.go index 16f9b6e71eb..4a6d83670a5 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_arm.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_arm.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_values_netbsd.go //go:build netbsd && arm -// +build netbsd,arm package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_arm64.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_arm64.go index e530339ca7e..49fb6725ef4 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_arm64.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_arm64.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_values_netbsd.go //go:build netbsd && arm64 -// +build netbsd,arm64 package sysconf diff --git a/vendor/github.com/tklauser/numcpus/.cirrus.yml b/vendor/github.com/tklauser/numcpus/.cirrus.yml index 69c6ced5c75..33e6595cca3 100644 --- a/vendor/github.com/tklauser/numcpus/.cirrus.yml +++ b/vendor/github.com/tklauser/numcpus/.cirrus.yml @@ -1,13 +1,23 @@ env: CIRRUS_CLONE_DEPTH: 1 - GO_VERSION: go1.20 + GO_VERSION: go1.22.2 -freebsd_12_task: +freebsd_13_task: freebsd_instance: - image_family: freebsd-12-3 + image_family: freebsd-13-2 install_script: | pkg install -y go GOBIN=$PWD/bin go install golang.org/dl/${GO_VERSION}@latest bin/${GO_VERSION} download - build_script: bin/${GO_VERSION} build -buildvcs=false -v ./... - test_script: bin/${GO_VERSION} test -buildvcs=false -race ./... + build_script: bin/${GO_VERSION} build -v ./... + test_script: bin/${GO_VERSION} test -race ./... + +freebsd_14_task: + freebsd_instance: + image_family: freebsd-14-0 + install_script: | + pkg install -y go + GOBIN=$PWD/bin go install golang.org/dl/${GO_VERSION}@latest + bin/${GO_VERSION} download + build_script: bin/${GO_VERSION} build -v ./... + test_script: bin/${GO_VERSION} test -race ./... diff --git a/vendor/github.com/tklauser/numcpus/numcpus_bsd.go b/vendor/github.com/tklauser/numcpus/numcpus_bsd.go index 9e77e38e6e0..efd8db0f1c8 100644 --- a/vendor/github.com/tklauser/numcpus/numcpus_bsd.go +++ b/vendor/github.com/tklauser/numcpus/numcpus_bsd.go @@ -13,7 +13,6 @@ // limitations under the License. //go:build darwin || dragonfly || freebsd || netbsd || openbsd -// +build darwin dragonfly freebsd netbsd openbsd package numcpus diff --git a/vendor/github.com/tklauser/numcpus/numcpus_linux.go b/vendor/github.com/tklauser/numcpus/numcpus_linux.go index 1a30525b873..7e75cb06164 100644 --- a/vendor/github.com/tklauser/numcpus/numcpus_linux.go +++ b/vendor/github.com/tklauser/numcpus/numcpus_linux.go @@ -15,7 +15,6 @@ package numcpus import ( - "io/ioutil" "os" "path/filepath" "strconv" @@ -35,7 +34,7 @@ func getFromCPUAffinity() (int, error) { } func readCPURange(file string) (int, error) { - buf, err := ioutil.ReadFile(filepath.Join(sysfsCPUBasePath, file)) + buf, err := os.ReadFile(filepath.Join(sysfsCPUBasePath, file)) if err != nil { return 0, err } @@ -48,16 +47,16 @@ func parseCPURange(cpus string) (int, error) { if len(cpuRange) == 0 { continue } - rangeOp := strings.SplitN(cpuRange, "-", 2) - first, err := strconv.ParseUint(rangeOp[0], 10, 32) + from, to, found := strings.Cut(cpuRange, "-") + first, err := strconv.ParseUint(from, 10, 32) if err != nil { return 0, err } - if len(rangeOp) == 1 { + if !found { n++ continue } - last, err := strconv.ParseUint(rangeOp[1], 10, 32) + last, err := strconv.ParseUint(to, 10, 32) if err != nil { return 0, err } @@ -89,7 +88,7 @@ func getConfigured() (int, error) { } func getKernelMax() (int, error) { - buf, err := ioutil.ReadFile(filepath.Join(sysfsCPUBasePath, "kernel_max")) + buf, err := os.ReadFile(filepath.Join(sysfsCPUBasePath, "kernel_max")) if err != nil { return 0, err } diff --git a/vendor/github.com/tklauser/numcpus/numcpus_solaris.go b/vendor/github.com/tklauser/numcpus/numcpus_solaris.go index a264323781b..f3b632fe748 100644 --- a/vendor/github.com/tklauser/numcpus/numcpus_solaris.go +++ b/vendor/github.com/tklauser/numcpus/numcpus_solaris.go @@ -13,7 +13,6 @@ // limitations under the License. //go:build solaris -// +build solaris package numcpus diff --git a/vendor/github.com/tklauser/numcpus/numcpus_unsupported.go b/vendor/github.com/tklauser/numcpus/numcpus_unsupported.go index 4a0b7c43d21..e72355eca5f 100644 --- a/vendor/github.com/tklauser/numcpus/numcpus_unsupported.go +++ b/vendor/github.com/tklauser/numcpus/numcpus_unsupported.go @@ -13,7 +13,6 @@ // limitations under the License. //go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package numcpus diff --git a/vendor/github.com/twmb/franz-go/LICENSE b/vendor/github.com/twmb/franz-go/LICENSE new file mode 100644 index 00000000000..36e18034325 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/LICENSE @@ -0,0 +1,24 @@ +Copyright 2020, Travis Bischel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the library nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/twmb/franz-go/pkg/kadm/LICENSE b/vendor/github.com/twmb/franz-go/pkg/kadm/LICENSE new file mode 100644 index 00000000000..36e18034325 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kadm/LICENSE @@ -0,0 +1,24 @@ +Copyright 2020, Travis Bischel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the library nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/twmb/franz-go/pkg/kadm/acls.go b/vendor/github.com/twmb/franz-go/pkg/kadm/acls.go new file mode 100644 index 00000000000..62676b5b8c0 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kadm/acls.go @@ -0,0 +1,1117 @@ +package kadm + +import ( + "context" + "fmt" + "strings" + "sync" + + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" +) + +// ACLBuilder is a builder that is used for batch creating / listing / deleting +// ACLS. +// +// An ACL consists of five components: +// +// - the user (principal) +// - the host the user runs on +// - what resource to access (topic name, group id, etc.) +// - the operation (read, write) +// - whether to allow or deny the above +// +// This builder allows for adding the above five components in batches and then +// creating, listing, or deleting a batch of ACLs in one go. This builder +// merges the fifth component (allowing or denying) into allowing principals +// and hosts and denying principals and hosts. The builder must always have an +// Allow or Deny. For creating, the host is optional and defaults to the +// wildcard * that allows or denies all hosts. For listing / deleting, the host +// is also required (specifying no hosts matches all hosts, but you must +// specify this). +// +// Building works on a multiplying factor: every user, every host, every +// resource, and every operation is combined (principals * hosts * resources * +// operations). +// +// With the Kafka simple authorizer (and most reimplementations), all +// principals are required to have the "User:" prefix. The PrefixUserExcept +// function can be used to easily add the "User:" prefix if missing. +// +// The full set of operations and which requests require what operations is +// described in a large doc comment on the ACLOperation type. +// +// Lastly, resources to access / deny access to can be created / matched based +// on literal (exact) names, or on prefix names, or more. See the ACLPattern +// docs for more information. +type ACLBuilder struct { + any []string + anyResource bool + topics []string + anyTopic bool + groups []string + anyGroup bool + anyCluster bool + txnIDs []string + anyTxn bool + tokens []string + anyToken bool + + allow []string + anyAllow bool + allowHosts []string + anyAllowHosts bool + deny []string + anyDeny bool + denyHosts []string + anyDenyHosts bool + + ops []ACLOperation + + pattern ACLPattern +} + +// PrefixUser prefixes all allowed and denied principals with "User:". +func (b *ACLBuilder) PrefixUser() { + b.PrefixUserExcept() +} + +// PrefixUserExcept prefixes all allowed and denied principals with "User:", +// unless they have any of the given except prefixes. +func (b *ACLBuilder) PrefixUserExcept(except ...string) { + replace := func(u string) string { + if !strings.HasPrefix(u, "User:") { + for _, e := range except { + if strings.HasPrefix(u, e) { + return u + } + } + return "User:" + u + } + return u + } + + for i, u := range b.allow { + b.allow[i] = replace(u) + } + for i, u := range b.deny { + b.deny[i] = replace(u) + } +} + +// NewACLs returns a new ACL builder. +func NewACLs() *ACLBuilder { + return new(ACLBuilder) +} + +// AnyResource lists & deletes ACLs of any type matching the given names +// (pending other filters). If no names are given, this matches all names. +// +// This returns the input pointer. +// +// This function does nothing for creating. +func (b *ACLBuilder) AnyResource(name ...string) *ACLBuilder { + b.any = name + if len(name) == 0 { + b.anyResource = true + } + return b +} + +// Topics lists/deletes/creates ACLs of resource type "topic" for the given +// topics. +// +// This returns the input pointer. +// +// For listing or deleting, if this is provided no topics, all "topic" resource +// type ACLs are matched. For creating, if no topics are provided, this +// function does nothing. +func (b *ACLBuilder) Topics(t ...string) *ACLBuilder { + b.topics = t + if len(t) == 0 { + b.anyTopic = true + } + return b +} + +// MaybeTopics is the same as Topics, but does not match all topics if none are +// provided. +func (b *ACLBuilder) MaybeTopics(t ...string) *ACLBuilder { b.topics = t; return b } + +// Groups lists/deletes/creates ACLs of resource type "group" for the given +// groups. +// +// This returns the input pointer. +// +// For listing or deleting, if this is provided no groups, all "group" resource +// type ACLs are matched. For creating, if no groups are provided, this +// function does nothing. +func (b *ACLBuilder) Groups(g ...string) *ACLBuilder { + b.groups = g + if len(g) == 0 { + b.anyGroup = true + } + return b +} + +// MaybeGroups is the same as Groups, but does not match all groups if none are +// provided. +func (b *ACLBuilder) MaybeGroups(g ...string) *ACLBuilder { b.groups = g; return b } + +// Clusters lists/deletes/creates ACLs of resource type "cluster". +// +// This returns the input pointer. +// +// There is only one type of cluster in Kafka, "kafka-cluster". Opting in to +// listing or deleting by cluster inherently matches all ACLS of resource type +// cluster. For creating, this function allows for creating cluster ACLs. +func (b *ACLBuilder) Clusters() *ACLBuilder { + b.anyCluster = true + return b +} + +// MaybeClusters is the same as Clusters, but only matches clusters if c is +// true. +func (b *ACLBuilder) MaybeClusters(c bool) *ACLBuilder { b.anyCluster = c; return b } + +// TransactionalIDs lists/deletes/creates ACLs of resource type +// "transactional_id" for the given transactional IDs. +// +// This returns the input pointer. +// +// For listing or deleting, if this is provided no IDs, all "transactional_id" +// resource type ACLs matched. For creating, if no IDs are provided, this +// function does nothing. +func (b *ACLBuilder) TransactionalIDs(x ...string) *ACLBuilder { + b.txnIDs = x + if len(x) == 0 { + b.anyTxn = true + } + return b +} + +// MaybeTransactionalIDs is the same as TransactionalIDs, but does not match +// all transactional ID's if none are provided. +func (b *ACLBuilder) MaybeTransactionalIDs(x ...string) *ACLBuilder { b.txnIDs = x; return b } + +// DelegationTokens lists/deletes/creates ACLs of resource type +// "delegation_token" for the given delegation tokens. +// +// This returns the input pointer. +// +// For listing or deleting, if this is provided no tokens, all +// "delegation_token" resource type ACLs are matched. For creating, if no +// tokens are provided, this function does nothing. +func (b *ACLBuilder) DelegationTokens(t ...string) *ACLBuilder { + b.tokens = t + if len(t) == 0 { + b.anyToken = true + } + return b +} + +// MaybeDelegationTokens is the same as DelegationTokens, but does not match +// all tokens if none are provided. +func (b *ACLBuilder) MaybeDelegationTokens(t ...string) *ACLBuilder { b.tokens = t; return b } + +// Allow sets the principals to add allow permissions for. For listing and +// deleting, you must also use AllowHosts. +// +// This returns the input pointer. +// +// For creating, if this is not paired with AllowHosts, the user will have +// access to all hosts (the wildcard *). +// +// For listing & deleting, if the principals are empty, this matches any user. +func (b *ACLBuilder) Allow(principals ...string) *ACLBuilder { + b.allow = principals + if len(principals) == 0 { + b.anyAllow = true + } + return b +} + +// MaybeAllow is the same as Allow, but does not match all allowed principals +// if none are provided. +func (b *ACLBuilder) MaybeAllow(principals ...string) *ACLBuilder { b.allow = principals; return b } + +// AllowHosts sets the hosts to add allow permissions for. If using this, you +// must also use Allow. +// +// This returns the input pointer. +// +// For creating, if this is empty, the user will have access to all hosts (the +// wildcard *) and this function is actually not necessary. +// +// For listing & deleting, if the hosts are empty, this matches any host. +func (b *ACLBuilder) AllowHosts(hosts ...string) *ACLBuilder { + b.allowHosts = hosts + if len(hosts) == 0 { + b.anyAllowHosts = true + } + return b +} + +// MaybeAllowHosts is the same as AllowHosts, but does not match all allowed +// hosts if none are provided. +func (b *ACLBuilder) MaybeAllowHosts(hosts ...string) *ACLBuilder { b.allowHosts = hosts; return b } + +// Deny sets the principals to add deny permissions for. For listing and +// deleting, you must also use DenyHosts. +// +// This returns the input pointer. +// +// For creating, if this is not paired with DenyHosts, the user will be denied +// access to all hosts (the wildcard *). +// +// For listing & deleting, if the principals are empty, this matches any user. +func (b *ACLBuilder) Deny(principals ...string) *ACLBuilder { + b.deny = principals + if len(principals) == 0 { + b.anyDeny = true + } + return b +} + +// MaybeDeny is the same as Deny, but does not match all denied principals if +// none are provided. +func (b *ACLBuilder) MaybeDeny(principals ...string) *ACLBuilder { b.deny = principals; return b } + +// DenyHosts sets the hosts to add deny permissions for. If using this, you +// must also use Deny. +// +// This returns the input pointer. +// +// For creating, if this is empty, the user will be denied access to all hosts +// (the wildcard *) and this function is actually not necessary. +// +// For listing & deleting, if the hosts are empty, this matches any host. +func (b *ACLBuilder) DenyHosts(hosts ...string) *ACLBuilder { + b.denyHosts = hosts + if len(hosts) == 0 { + b.anyDenyHosts = true + } + return b +} + +// MaybeDenyHosts is the same as DenyHosts, but does not match all denied +// hosts if none are provided. +func (b *ACLBuilder) MaybeDenyHosts(hosts ...string) *ACLBuilder { b.denyHosts = hosts; return b } + +// ACLOperation is a type alias for kmsg.ACLOperation, which is an enum +// containing all Kafka ACL operations and has helper functions. +// +// Kafka requests require the following operations (broker <=> broker ACLs +// elided): +// +// PRODUCING/CONSUMING +// =================== +// Produce WRITE on TOPIC for topics +// WRITE on TRANSACTIONAL_ID for txn id (if transactionally producing) +// +// Fetch READ on TOPIC for topics +// +// ListOffsets DESCRIBE on TOPIC for topics +// +// Metadata DESCRIBE on TOPIC for topics +// CREATE on CLUSTER for kafka-cluster (if automatically creating new topics) +// CREATE on TOPIC for topics (if automatically creating new topics) +// +// OffsetForLeaderEpoch DESCRIBE on TOPIC for topics +// +// GROUPS +// ====== +// FindCoordinator DESCRIBE on GROUP for group (if finding group coordinator) +// DESCRIBE on TRANSACTIONAL_ID for id (if finding transactiona coordinator) +// +// OffsetCommit READ on GROUP for group +// READ on TOPIC for topics +// +// OffsetFetch DESCRIBE on GROUP for group +// DESCRIBE on TOPIC for topics +// +// OffsetDelete DELETE on GROUP For group +// READ on TOPIC for topics +// +// JoinGroup READ on GROUP for group +// Heartbeat READ on GROUP for group +// LeaveGroup READ on GROUP for group +// SyncGroup READ on GROUP for group +// +// DescribeGroup DESCRIBE on GROUP for groups +// +// ListGroups DESCRIBE on GROUP for groups +// or, DESCRIBE on CLUSTER for kafka-cluster +// +// DeleteGroups DELETE on GROUP for groups +// +// TRANSACTIONS (including FindCoordinator above) +// ============ +// InitProducerID WRITE on TRANSACTIONAL_ID for id, if using transactions +// or, IDEMPOTENT_WRITE on CLUSTER for kafka-cluster, if pre Kafka 3.0 +// or, WRITE on TOPIC for any topic, if Kafka 3.0+ +// +// AddPartitionsToTxn WRITE on TRANSACTIONAL_ID for id +// WRITE on TOPIC for topics +// +// AddOffsetsToTxn WRITE on TRANSACTIONAL_ID for id +// READ on GROUP for group +// +// EndTxn WRITE on TRANSACTIONAL_ID for id +// +// TxnOffsetCommit WRITE on TRANSACTIONAL_ID for id +// READ on GROUP for group +// READ on TOPIC for topics +// +// TOPIC ADMIN +// =========== +// CreateTopics CREATE on CLUSTER for kafka-cluster +// CREATE on TOPIC for topics +// DESCRIBE_CONFIGS on TOPIC for topics, for returning topic configs on create +// +// CreatePartitions ALTER on TOPIC for topics +// +// DeleteTopics DELETE on TOPIC for topics +// DESCRIBE on TOPIC for topics, if deleting by topic id (in addition to prior ACL) +// +// DeleteRecords DELETE on TOPIC for topics +// +// CONFIG ADMIN +// ============ +// DescribeConfigs DESCRIBE_CONFIGS on CLUSTER for kafka-cluster, for broker or broker-logger describing +// DESCRIBE_CONFIGS on TOPIC for topics, for topic describing +// +// AlterConfigs ALTER_CONFIGS on CLUSTER for kafka-cluster, for broker altering +// ALTER_CONFIGS on TOPIC for topics, for topic altering +// +// IncrementalAlterConfigs ALTER_CONFIGS on CLUSTER for kafka-cluster, for broker or broker-logger altering +// ALTER_CONFIGS on TOPIC for topics, for topic altering +// +// +// MISC ADMIN +// ========== +// AlterReplicaLogDirs ALTER on CLUSTER for kafka-cluster +// DescribeLogDirs DESCRIBE on CLUSTER for kafka-cluster +// +// AlterPartitionAssignments ALTER on CLUSTER for kafka-cluster +// ListPartitionReassignments DESCRIBE on CLUSTER for kafka-cluster +// +// DescribeDelegationTokens DESCRIBE on DELEGATION_TOKEN for id +// +// ElectLeaders ALTER on CLUSTER for kafka-cluster +// +// DescribeClientQuotas DESCRIBE_CONFIGS on CLUSTER for kafka-cluster +// AlterClientQuotas ALTER_CONFIGS on CLUSTER for kafka-cluster +// +// DescribeUserScramCredentials DESCRIBE on CLUSTER for kafka-cluster +// AlterUserScramCredentials ALTER on CLUSTER for kafka-cluster +// +// UpdateFeatures ALTER on CLUSTER for kafka-cluster +// +// DescribeCluster DESCRIBE on CLUSTER for kafka-cluster +// +// DescribeProducerIDs READ on TOPIC for topics +// DescribeTransactions DESCRIBE on TRANSACTIONAL_ID for ids +// DESCRIBE on TOPIC for topics +// ListTransactions DESCRIBE on TRANSACTIONAL_ID for ids +type ACLOperation = kmsg.ACLOperation + +const ( + // OpUnknown is returned for unknown operations. + OpUnknown ACLOperation = kmsg.ACLOperationUnknown + + // OpAny, used for listing and deleting, matches any operation. + OpAny ACLOperation = kmsg.ACLOperationAny + + // OpAll is a shortcut for allowing / denying all operations. + OpAll ACLOperation = kmsg.ACLOperationAll + + // OpRead is the READ operation. + OpRead ACLOperation = kmsg.ACLOperationRead + + // OpWrite is the WRITE operation. + OpWrite ACLOperation = kmsg.ACLOperationWrite + + // OpCreate is the CREATE operation. + OpCreate ACLOperation = kmsg.ACLOperationCreate + + // OpDelete is the DELETE operation. + OpDelete ACLOperation = kmsg.ACLOperationDelete + + // OpAlter is the ALTER operation. + OpAlter ACLOperation = kmsg.ACLOperationAlter + + // OpDescribe is the DESCRIBE operation. + OpDescribe ACLOperation = kmsg.ACLOperationDescribe + + // OpClusterAction is the CLUSTER_ACTION operation. This operation is + // used for any broker<=>broker communication and is not needed by + // clients. + OpClusterAction ACLOperation = kmsg.ACLOperationClusterAction + + // OpDescribeConfigs is the DESCRIBE_CONFIGS operation. + OpDescribeConfigs ACLOperation = kmsg.ACLOperationDescribeConfigs + + // OpAlterConfigs is the ALTER_CONFIGS operation. + OpAlterConfigs ACLOperation = kmsg.ACLOperationAlterConfigs + + // OpIdempotentWrite is the IDEMPOTENT_WRITE operation. As of Kafka + // 3.0+, this has been deprecated and replaced by the ability to WRITE + // on any topic. + OpIdempotentWrite ACLOperation = kmsg.ACLOperationIdempotentWrite +) + +// Operations sets operations to allow or deny. Passing no operations defaults +// to OpAny. +// +// This returns the input pointer. +// +// For creating, OpAny returns an error, for it is strictly used for filters +// (listing & deleting). +func (b *ACLBuilder) Operations(operations ...ACLOperation) *ACLBuilder { + b.ops = operations + if len(operations) == 0 { + b.ops = []ACLOperation{OpAny} + } + return b +} + +// MaybeOperations is the same as Operations, but does not match all operations +// if none are provided. +func (b *ACLBuilder) MaybeOperations(operations ...ACLOperation) *ACLBuilder { + if len(operations) > 0 { + b.Operations(operations...) + } + return b +} + +// ACLPattern is a type alias for kmsg.ACLResourcePatternType, which is an enum +// containing all Kafka ACL resource pattern options. +// +// Creating/listing/deleting ACLs works on a resource name basis: every ACL +// created has a name, and every ACL filtered for listing / deleting matches by +// name. The name by default is "literal", meaning created ACLs will have the +// exact name, and matched ACLs must match completely. +// +// Prefixed names allow for creating an ACL that matches any prefix: principals +// foo-bar and foo-baz both have the prefix "foo-", meaning a READ on TOPIC for +// User:foo- with prefix pattern will allow both of those principals to read +// the topic. +// +// Any and match are used for listing and deleting. Any will match any name, be +// it literal or prefix or a wildcard name. There is no need for specifying +// topics, groups, etc. when using any resource pattern. +// +// Alternatively, match requires a name, but it matches any literal name (exact +// match), any prefix, and any wildcard. +type ACLPattern = kmsg.ACLResourcePatternType + +const ( + // ACLPatternUnknown is returned for unknown patterns. + ACLPatternUnknown ACLPattern = kmsg.ACLResourcePatternTypeUnknown + + // ACLPatternAny is the ANY resource pattern. + ACLPatternAny ACLPattern = kmsg.ACLResourcePatternTypeAny + + // ACLPatternMatch is the MATCH resource pattern. + ACLPatternMatch ACLPattern = kmsg.ACLResourcePatternTypeMatch + + // ACLPatternLiteral is the LITERAL resource pattern, the default. + ACLPatternLiteral ACLPattern = kmsg.ACLResourcePatternTypeLiteral + + // ACLPatternPrefixed is the PREFIXED resource pattern. + ACLPatternPrefixed ACLPattern = kmsg.ACLResourcePatternTypePrefixed +) + +// ResourcePatternType sets the pattern type to use when creating or filtering +// ACL resource names, overriding the default of LITERAL. +// +// This returns the input pointer. +// +// For creating, only LITERAL and PREFIXED are supported. +func (b *ACLBuilder) ResourcePatternType(pattern ACLPattern) *ACLBuilder { + b.pattern = pattern + return b +} + +// ValidateCreate returns an error if the builder is invalid for creating ACLs. +func (b *ACLBuilder) ValidateCreate() error { + for _, op := range b.ops { + switch op { + case OpAny, OpUnknown: + return fmt.Errorf("invalid operation %s for creating ACLs", op) + } + } + + switch b.pattern { + case ACLPatternLiteral, ACLPatternPrefixed: + default: + return fmt.Errorf("invalid acl resource pattern %s for creating ACLs", b.pattern) + } + + if len(b.allowHosts) != 0 && len(b.allow) == 0 { + return fmt.Errorf("invalid allow hosts with no allow principals") + } + if len(b.denyHosts) != 0 && len(b.deny) == 0 { + return fmt.Errorf("invalid deny hosts with no deny principals") + } + return nil +} + +// ValidateDelete is an alias for ValidateFilter. +func (b *ACLBuilder) ValidateDelete() error { return b.ValidateFilter() } + +// ValidateDescribe is an alias for ValidateFilter. +func (b *ACLBuilder) ValidateDescribe() error { return b.ValidateFilter() } + +// ValidateFilter returns an error if the builder is invalid for deleting or +// describing ACLs (which both operate on a filter basis). +func (b *ACLBuilder) ValidateFilter() error { + if len(b.allowHosts) != 0 && len(b.allow) == 0 && !b.anyAllow { + return fmt.Errorf("invalid allow hosts with no allow principals") + } + if len(b.allow) != 0 && len(b.allowHosts) == 0 && !b.anyAllowHosts { + return fmt.Errorf("invalid allow principals with no allow hosts") + } + if len(b.denyHosts) != 0 && len(b.deny) == 0 && !b.anyDeny { + return fmt.Errorf("invalid deny hosts with no deny principals") + } + if len(b.deny) != 0 && len(b.denyHosts) == 0 && !b.anyDenyHosts { + return fmt.Errorf("invalid deny principals with no deny hosts") + } + return nil +} + +// HasAnyFilter returns whether any field in this builder is opted into "any", +// meaning a wide glob. This would be if you used Topics with no topics, and so +// on. This function can be used to detect if you accidentally opted into a +// non-specific ACL. +// +// The evaluated fields are: resources, principals/hosts, a single OpAny +// operation, and an Any pattern. +func (b *ACLBuilder) HasAnyFilter() bool { + return b.anyResource || + b.anyTopic || + b.anyGroup || + b.anyTxn || + b.anyToken || + b.anyAllow || + b.anyAllowHosts || + b.anyDeny || + b.anyDenyHosts || + b.hasOpAny() || + b.pattern == ACLPatternAny +} + +func (b *ACLBuilder) hasOpAny() bool { + for _, op := range b.ops { + if op == OpAny { + return true + } + } + return false +} + +// HasResource returns true if the builder has a non-empty resource (topic, +// group, ...), or if any resource has "any" set to true. +func (b *ACLBuilder) HasResource() bool { + l := len(b.any) + + len(b.topics) + + len(b.groups) + + len(b.txnIDs) + + len(b.tokens) + return l > 0 || + b.anyResource || + b.anyTopic || + b.anyGroup || + b.anyCluster || + b.anyTxn || + b.anyToken +} + +// HasPrincipals returns if any allow or deny principals have been set, or if +// their "any" field is true. +func (b *ACLBuilder) HasPrincipals() bool { + return len(b.allow) > 0 || + b.anyAllow || + len(b.deny) > 0 || + b.anyDeny +} + +// HasHosts returns if any allow or deny hosts have been set, or if their "any" +// field is true. +func (b *ACLBuilder) HasHosts() bool { + return len(b.allowHosts) > 0 || + b.anyAllowHosts || + len(b.denyHosts) > 0 || + b.anyDenyHosts +} + +func (b *ACLBuilder) dup() *ACLBuilder { // shallow copy + d := *b + return &d +} + +// CreateACLsResult is a result for an individual ACL creation. +type CreateACLsResult struct { + Principal string + Host string + + Type kmsg.ACLResourceType // Type is the type of resource this is. + Name string // Name is the name of the resource allowed / denied. + Pattern ACLPattern // Pattern is the name pattern. + Operation ACLOperation // Operation is the operation allowed / denied. + Permission kmsg.ACLPermissionType // Permission is whether this is allowed / denied. + + Err error // Err is the error for this ACL creation. + ErrMessage string // ErrMessage a potential extra message describing any error. +} + +// CreateACLsResults contains all results to created ACLs. +type CreateACLsResults []CreateACLsResult + +// CreateACLs creates a batch of ACLs using the ACL builder, validating the +// input before issuing the CreateACLs request. +// +// If the input is invalid, or if the response fails, or if the response does +// not contain as many ACLs as we issued in our create request, this returns an +// error. +func (cl *Client) CreateACLs(ctx context.Context, b *ACLBuilder) (CreateACLsResults, error) { + if err := b.ValidateCreate(); err != nil { + return nil, err + } + if len(b.allow) != 0 && len(b.allowHosts) == 0 { + b.allowHosts = []string{"*"} + } + if len(b.deny) != 0 && len(b.denyHosts) == 0 { + b.denyHosts = []string{"*"} + } + + var clusters []string + if b.anyCluster { + clusters = []string{"kafka-cluster"} + } + + req := kmsg.NewPtrCreateACLsRequest() + for _, typeNames := range []struct { + t kmsg.ACLResourceType + names []string + }{ + {kmsg.ACLResourceTypeTopic, b.topics}, + {kmsg.ACLResourceTypeGroup, b.groups}, + {kmsg.ACLResourceTypeCluster, clusters}, + {kmsg.ACLResourceTypeTransactionalId, b.txnIDs}, + {kmsg.ACLResourceTypeDelegationToken, b.tokens}, + } { + for _, name := range typeNames.names { + for _, op := range b.ops { + for _, perm := range []struct { + principals []string + hosts []string + permType kmsg.ACLPermissionType + }{ + {b.allow, b.allowHosts, kmsg.ACLPermissionTypeAllow}, + {b.deny, b.denyHosts, kmsg.ACLPermissionTypeDeny}, + } { + for _, principal := range perm.principals { + for _, host := range perm.hosts { + c := kmsg.NewCreateACLsRequestCreation() + c.ResourceType = typeNames.t + c.ResourceName = name + c.ResourcePatternType = b.pattern + c.Operation = op + c.Principal = principal + c.Host = host + c.PermissionType = perm.permType + req.Creations = append(req.Creations, c) + } + } + } + } + } + } + + resp, err := req.RequestWith(ctx, cl.cl) + if err != nil { + return nil, err + } + + if len(resp.Results) != len(req.Creations) { + return nil, fmt.Errorf("received %d results to %d creations", len(resp.Results), len(req.Creations)) + } + + var rs CreateACLsResults + for i, r := range resp.Results { + c := &req.Creations[i] + rs = append(rs, CreateACLsResult{ + Principal: c.Principal, + Host: c.Host, + + Type: c.ResourceType, + Name: c.ResourceName, + Pattern: c.ResourcePatternType, + Operation: c.Operation, + Permission: c.PermissionType, + + Err: kerr.ErrorForCode(r.ErrorCode), + ErrMessage: unptrStr(r.ErrorMessage), + }) + } + + return rs, nil +} + +// DeletedACL an ACL that was deleted. +type DeletedACL struct { + Principal string // Principal is this deleted ACL's principal. + Host string // Host is this deleted ACL's host. + + Type kmsg.ACLResourceType // Type is this deleted ACL's resource type. + Name string // Name is this deleted ACL's resource name. + Pattern ACLPattern // Pattern is this deleted ACL's resource name pattern. + Operation ACLOperation // Operation is this deleted ACL's operation. + Permission kmsg.ACLPermissionType // Permission this deleted ACLs permission. + + Err error // Err is non-nil if this match has an error. + ErrMessage string // ErrMessage a potential extra message describing any error. +} + +// DeletedACLs contains ACLs that were deleted from a single delete filter. +type DeletedACLs []DeletedACL + +// DeleteACLsResult contains the input used for a delete ACL filter, and the +// deletes that the filter matched or the error for this filter. +// +// All fields but Deleted and Err are set from the request input. The response +// sets either Deleted (potentially to nothing if the filter matched nothing) +// or Err. +type DeleteACLsResult struct { + Principal *string // Principal is the optional user that was used in this filter. + Host *string // Host is the optional host that was used in this filter. + + Type kmsg.ACLResourceType // Type is the type of resource used for this filter. + Name *string // Name is the name of the resource used for this filter. + Pattern ACLPattern // Pattern is the name pattern used for this filter. + Operation ACLOperation // Operation is the operation used for this filter. + Permission kmsg.ACLPermissionType // Permission is permission used for this filter. + + Deleted DeletedACLs // Deleted contains all ACLs this delete filter matched. + + Err error // Err is non-nil if this filter has an error. + ErrMessage string // ErrMessage a potential extra message describing any error. +} + +// DeleteACLsResults contains all results to deleted ACLs. +type DeleteACLsResults []DeleteACLsResult + +// DeleteACLs deletes a batch of ACLs using the ACL builder, validating the +// input before issuing the DeleteACLs request. +// +// If the input is invalid, or if the response fails, or if the response does +// not contain as many ACL results as we issued in our delete request, this +// returns an error. +// +// Deleting ACLs works on a filter basis: a single filter can match many ACLs. +// For example, deleting with operation ANY matches any operation. For safety / +// verification purposes, you an DescribeACLs with the same builder first to +// see what would be deleted. +func (cl *Client) DeleteACLs(ctx context.Context, b *ACLBuilder) (DeleteACLsResults, error) { + dels, _, err := createDelDescACL(b) + if err != nil { + return nil, err + } + + req := kmsg.NewPtrDeleteACLsRequest() + req.Filters = dels + resp, err := req.RequestWith(ctx, cl.cl) + if err != nil { + return nil, err + } + if len(resp.Results) != len(req.Filters) { + return nil, fmt.Errorf("received %d results to %d filters", len(resp.Results), len(req.Filters)) + } + + var rs DeleteACLsResults + for i, r := range resp.Results { + f := &req.Filters[i] + var ms DeletedACLs + for _, m := range r.MatchingACLs { + ms = append(ms, DeletedACL{ + Principal: m.Principal, + Host: m.Host, + Type: m.ResourceType, + Name: m.ResourceName, + Pattern: m.ResourcePatternType, + Operation: m.Operation, + Permission: m.PermissionType, + Err: kerr.ErrorForCode(m.ErrorCode), + ErrMessage: unptrStr(m.ErrorMessage), + }) + } + rs = append(rs, DeleteACLsResult{ + Principal: f.Principal, + Host: f.Host, + Type: f.ResourceType, + Name: f.ResourceName, + Pattern: f.ResourcePatternType, + Operation: f.Operation, + Permission: f.PermissionType, + Deleted: ms, + Err: kerr.ErrorForCode(r.ErrorCode), + ErrMessage: unptrStr(r.ErrorMessage), + }) + } + return rs, nil +} + +// DescribedACL is an ACL that was described. +type DescribedACL struct { + Principal string // Principal is this described ACL's principal. + Host string // Host is this described ACL's host. + + Type kmsg.ACLResourceType // Type is this described ACL's resource type. + Name string // Name is this described ACL's resource name. + Pattern ACLPattern // Pattern is this described ACL's resource name pattern. + Operation ACLOperation // Operation is this described ACL's operation. + Permission kmsg.ACLPermissionType // Permission this described ACLs permission. +} + +// DescribedACLs contains ACLs that were described from a single describe +// filter. +type DescribedACLs []DescribedACL + +// DescribeACLsResults contains the input used for a describe ACL filter, and +// the describes that the filter matched or the error for this filter. +// +// All fields but Described and Err are set from the request input. The +// response sets either Described (potentially to nothing if the filter matched +// nothing) or Err. +type DescribeACLsResult struct { + Principal *string // Principal is the optional user that was used in this filter. + Host *string // Host is the optional host that was used in this filter. + + Type kmsg.ACLResourceType // Type is the type of resource used for this filter. + Name *string // Name is the name of the resource used for this filter. + Pattern ACLPattern // Pattern is the name pattern used for this filter. + Operation ACLOperation // Operation is the operation used for this filter. + Permission kmsg.ACLPermissionType // Permission is permission used for this filter. + + Described DescribedACLs // Described contains all ACLs this describe filter matched. + + Err error // Err is non-nil if this filter has an error. + ErrMessage string // ErrMessage a potential extra message describing any error. +} + +// DescribeACLsResults contains all results to described ACLs. +type DescribeACLsResults []DescribeACLsResult + +// DescribeACLs describes a batch of ACLs using the ACL builder, validating the +// input before issuing DescribeACLs requests. +// +// If the input is invalid, or if any response fails, this returns an error. +// +// Listing ACLs works on a filter basis: a single filter can match many ACLs. +// For example, describing with operation ANY matches any operation. Under the +// hood, this method issues one describe request per filter, because describing +// ACLs does not work on a batch basis (unlike creating & deleting). The return +// of this function can be used to see what would be deleted given the same +// builder input. +func (cl *Client) DescribeACLs(ctx context.Context, b *ACLBuilder) (DescribeACLsResults, error) { + _, descs, err := createDelDescACL(b) + if err != nil { + return nil, err + } + + var ( + ictx, cancel = context.WithCancel(ctx) + mu sync.Mutex + wg sync.WaitGroup + firstErr error + resps = make([]*kmsg.DescribeACLsResponse, len(descs)) + ) + defer cancel() + for i := range descs { + req := descs[i] // each req is unique per loop, we are not reusing req, this is safe + myIdx := i + wg.Add(1) + go func() { + defer wg.Done() + resp, err := req.RequestWith(ictx, cl.cl) + resps[myIdx] = resp + if err == nil { + return + } + cancel() + mu.Lock() + defer mu.Unlock() + if firstErr == nil { // keep the first err + firstErr = err + } + }() + } + wg.Wait() + if firstErr != nil { + return nil, firstErr + } + + var rs DescribeACLsResults + for i, r := range resps { + f := descs[i] + var ds DescribedACLs + for _, resource := range r.Resources { + for _, acl := range resource.ACLs { + ds = append(ds, DescribedACL{ + Principal: acl.Principal, + Host: acl.Host, + Type: resource.ResourceType, + Name: resource.ResourceName, + Pattern: resource.ResourcePatternType, + Operation: acl.Operation, + Permission: acl.PermissionType, + }) + } + } + rs = append(rs, DescribeACLsResult{ + Principal: f.Principal, + Host: f.Host, + Type: f.ResourceType, + Name: f.ResourceName, + Pattern: f.ResourcePatternType, + Operation: f.Operation, + Permission: f.PermissionType, + Described: ds, + Err: kerr.ErrorForCode(r.ErrorCode), + ErrMessage: unptrStr(r.ErrorMessage), + }) + } + return rs, nil +} + +var sliceAny = []string{"any"} + +func createDelDescACL(b *ACLBuilder) ([]kmsg.DeleteACLsRequestFilter, []*kmsg.DescribeACLsRequest, error) { + if err := b.ValidateFilter(); err != nil { + return nil, nil, err + } + + // As a special shortcut, if we have any allow and deny principals and + // hosts, we collapse these into one "any" group. The anyAny and + // anyAnyHosts vars are used in our looping below, and if we do this, + // we dup and set all the relevant fields to false to not expand them + // in our loops. + var anyAny, anyAnyHosts bool + if b.anyAllow && b.anyDeny && b.anyAllowHosts && b.anyDenyHosts { + anyAny = true + anyAnyHosts = true + + b = b.dup() + b.allow = nil + b.allowHosts = nil + b.deny = nil + b.denyHosts = nil + b.anyAllow = false + b.anyAllowHosts = false + b.anyDeny = false + b.anyDenyHosts = false + } + + var clusters []string + if b.anyCluster { + clusters = []string{"kafka-cluster"} + } + var deletions []kmsg.DeleteACLsRequestFilter + var describes []*kmsg.DescribeACLsRequest + for _, typeNames := range []struct { + t kmsg.ACLResourceType + names []string + any bool + }{ + {kmsg.ACLResourceTypeAny, b.any, b.anyResource}, + {kmsg.ACLResourceTypeTopic, b.topics, b.anyTopic}, + {kmsg.ACLResourceTypeGroup, b.groups, b.anyGroup}, + {kmsg.ACLResourceTypeCluster, clusters, b.anyCluster}, + {kmsg.ACLResourceTypeTransactionalId, b.txnIDs, b.anyTxn}, + {kmsg.ACLResourceTypeDelegationToken, b.tokens, b.anyToken}, + } { + if typeNames.any { + typeNames.names = sliceAny + } + for _, name := range typeNames.names { + for _, op := range b.ops { + for _, perm := range []struct { + principals []string + anyPrincipal bool + hosts []string + anyHost bool + permType kmsg.ACLPermissionType + }{ + { + b.allow, + b.anyAllow, + b.allowHosts, + b.anyAllowHosts, + kmsg.ACLPermissionTypeAllow, + }, + { + b.deny, + b.anyDeny, + b.denyHosts, + b.anyDenyHosts, + kmsg.ACLPermissionTypeDeny, + }, + { + nil, + anyAny, + nil, + anyAnyHosts, + kmsg.ACLPermissionTypeAny, + }, + } { + if perm.anyPrincipal { + perm.principals = sliceAny + } + if perm.anyHost { + perm.hosts = sliceAny + } + for _, principal := range perm.principals { + for _, host := range perm.hosts { + deletion := kmsg.NewDeleteACLsRequestFilter() + describe := kmsg.NewPtrDescribeACLsRequest() + + deletion.ResourceType = typeNames.t + describe.ResourceType = typeNames.t + + if !typeNames.any { + deletion.ResourceName = kmsg.StringPtr(name) + describe.ResourceName = kmsg.StringPtr(name) + } + + deletion.ResourcePatternType = b.pattern + describe.ResourcePatternType = b.pattern + + deletion.Operation = op + describe.Operation = op + + if !perm.anyPrincipal { + deletion.Principal = kmsg.StringPtr(principal) + describe.Principal = kmsg.StringPtr(principal) + } + + if !perm.anyHost { + deletion.Host = kmsg.StringPtr(host) + describe.Host = kmsg.StringPtr(host) + } + + deletion.PermissionType = perm.permType + describe.PermissionType = perm.permType + + deletions = append(deletions, deletion) + describes = append(describes, describe) + } + } + } + } + } + } + return deletions, describes, nil +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kadm/configs.go b/vendor/github.com/twmb/franz-go/pkg/kadm/configs.go new file mode 100644 index 00000000000..e6e37245f7a --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kadm/configs.go @@ -0,0 +1,417 @@ +package kadm + +import ( + "context" + "strconv" + + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" +) + +// ConfigSynonym is a fallback value for a config. +type ConfigSynonym struct { + Key string // Key is the fallback config name. + Value *string // Value is the fallback config value, if any (sensitive is elided). + Source kmsg.ConfigSource // Source is where this config synonym is defined from. +} + +// Config is a configuration for a resource (topic, broker) +type Config struct { + Key string // Key is the config name. + Value *string // Value is the config value, if any. + Sensitive bool // Sensitive is if this config is sensitive (if so, Value is nil). + Source kmsg.ConfigSource // Source is where this config is defined from. + + // Synonyms contains fallback key/value pairs for this same + // configuration key in order or preference. That is, if a config entry + // is both dynamically defined and has a default value as well, the top + // level config will be the dynamic value, while the synonym will be + // the default. + Synonyms []ConfigSynonym +} + +// MaybeValue returns the config's value if it is non-nil, otherwise an empty +// string. +func (c *Config) MaybeValue() string { + if c.Value != nil { + return *c.Value + } + return "" +} + +// ResourceConfig contains the configuration values for a resource (topic, +// broker, broker logger). +type ResourceConfig struct { + Name string // Name is the name of this resource. + Configs []Config // Configs are the configs for this topic. + Err error // Err is any error preventing configs from loading (likely, an unknown topic). + ErrMessage string // ErrMessage a potential extra message describing any error. +} + +// ResourceConfigs contains the configuration values for many resources. +type ResourceConfigs []ResourceConfig + +// On calls fn for the response config if it exists, returning the config and +// the error returned from fn. If fn is nil, this simply returns the config. +// +// The fn is given a copy of the config. This function returns the copy as +// well; any modifications within fn are modifications on the returned copy. +// +// If the resource does not exist, this returns kerr.UnknownTopicOrPartition. +func (rs ResourceConfigs) On(name string, fn func(*ResourceConfig) error) (ResourceConfig, error) { + for _, r := range rs { + if r.Name == name { + if fn == nil { + return r, nil + } + return r, fn(&r) + } + } + return ResourceConfig{}, kerr.UnknownTopicOrPartition +} + +// DescribeTopicConfigs returns the configuration for the requested topics. +// +// This may return *ShardErrors. +func (cl *Client) DescribeTopicConfigs( + ctx context.Context, + topics ...string, +) (ResourceConfigs, error) { + if len(topics) == 0 { + return nil, nil + } + return cl.describeConfigs(ctx, kmsg.ConfigResourceTypeTopic, topics) +} + +// DescribeBrokerConfigs returns configuration for the requested brokers. If no +// brokers are requested, a single request is issued and any broker in the +// cluster replies with the cluster-level dynamic config values. +// +// This may return *ShardErrors. +func (cl *Client) DescribeBrokerConfigs( + ctx context.Context, + brokers ...int32, +) (ResourceConfigs, error) { + var names []string + if len(brokers) == 0 { + names = append(names, "") + } + for _, b := range brokers { + names = append(names, strconv.Itoa(int(b))) + } + return cl.describeConfigs(ctx, kmsg.ConfigResourceTypeBroker, names) +} + +func (cl *Client) describeConfigs( + ctx context.Context, + kind kmsg.ConfigResourceType, + names []string, +) (ResourceConfigs, error) { + req := kmsg.NewPtrDescribeConfigsRequest() + req.IncludeSynonyms = true + for _, name := range names { + rr := kmsg.NewDescribeConfigsRequestResource() + rr.ResourceName = name + rr.ResourceType = kind + req.Resources = append(req.Resources, rr) + } + shards := cl.cl.RequestSharded(ctx, req) + + var configs []ResourceConfig + return configs, shardErrEach(req, shards, func(kr kmsg.Response) error { + resp := kr.(*kmsg.DescribeConfigsResponse) + for _, r := range resp.Resources { + if err := maybeAuthErr(r.ErrorCode); err != nil { + return err + } + rc := ResourceConfig{ + Name: r.ResourceName, + Err: kerr.ErrorForCode(r.ErrorCode), + ErrMessage: unptrStr(r.ErrorMessage), + } + for _, c := range r.Configs { + rcv := Config{ + Key: c.Name, + Value: c.Value, + Sensitive: c.IsSensitive, + Source: c.Source, + } + for _, syn := range c.ConfigSynonyms { + rcv.Synonyms = append(rcv.Synonyms, ConfigSynonym{ + Key: syn.Name, + Value: syn.Value, + Source: syn.Source, + }) + } + rc.Configs = append(rc.Configs, rcv) + } + configs = append(configs, rc) // we are not storing in a map, no existence-check possible + } + return nil + }) +} + +// IncrementalOp is a typed int8 that is used for incrementally updating +// configuration keys for topics and brokers. +type IncrementalOp int8 + +const ( + // SetConfig is an incremental operation to set an individual config + // key. + SetConfig IncrementalOp = iota + + // DeleteConfig is an incremental operation to delete an individual + // config key. + DeleteConfig + + // AppendConfig is an incremental operation to append a value to a + // config key that is a list type. + AppendConfig + + // SubtractConfig is an incremental operation to remove a value from a + // config key that is a list type. + SubtractConfig +) + +// AlterConfig is an individual key/value operation to perform when altering +// configs. +// +// This package includes a StringPtr function to aid in building config values. +type AlterConfig struct { + Op IncrementalOp // Op is the incremental alter operation to perform. This is ignored for State alter functions. + Name string // Name is the name of the config to alter. + Value *string // Value is the value to use when altering, if any. +} + +// AlteredConfigsResponse contains the response for an individual alteration. +type AlterConfigsResponse struct { + Name string // Name is the name of this resource (topic name or broker number). + Err error // Err is non-nil if the config could not be altered. + ErrMessage string // ErrMessage a potential extra message describing any error. +} + +// AlterConfigsResponses contains responses for many alterations. +type AlterConfigsResponses []AlterConfigsResponse + +// On calls fn for the response name if it exists, returning the response and +// the error returned from fn. If fn is nil, this simply returns the response. +// +// The fn is given a copy of the response. This function returns the copy as +// well; any modifications within fn are modifications on the returned copy. +// +// If the resource does not exist, this returns kerr.UnknownTopicOrPartition. +func (rs AlterConfigsResponses) On(name string, fn func(*AlterConfigsResponse) error) (AlterConfigsResponse, error) { + for _, r := range rs { + if r.Name == name { + if fn == nil { + return r, nil + } + return r, fn(&r) + } + } + return AlterConfigsResponse{}, kerr.UnknownTopicOrPartition +} + +// AlterTopicConfigs incrementally alters topic configuration values. +// +// This method requires talking to a cluster that supports +// IncrementalAlterConfigs (officially introduced in Kafka v2.3, but many +// broker reimplementations support this request even if they do not support +// all other requests from Kafka v2.3). +// +// If you want to alter the entire configs state using the older AlterConfigs +// request, use AlterTopicConfigsState. +// +// This may return *ShardErrors. You may consider checking +// ValidateAlterTopicConfigs before using this method. +func (cl *Client) AlterTopicConfigs(ctx context.Context, configs []AlterConfig, topics ...string) (AlterConfigsResponses, error) { + return cl.alterConfigs(ctx, false, configs, kmsg.ConfigResourceTypeTopic, topics) +} + +// ValidateAlterTopicConfigs validates an incremental alter config for the given +// topics. +// +// This returns exactly what AlterTopicConfigs returns, but does not actually +// alter configurations. +func (cl *Client) ValidateAlterTopicConfigs(ctx context.Context, configs []AlterConfig, topics ...string) (AlterConfigsResponses, error) { + return cl.alterConfigs(ctx, true, configs, kmsg.ConfigResourceTypeTopic, topics) +} + +// AlterBrokerConfigs incrementally alters broker configuration values. If +// brokers are specified, this updates each specific broker. If no brokers are +// specified, this updates whole-cluster broker configuration values. +// +// This method requires talking to a cluster that supports +// IncrementalAlterConfigs (officially introduced in Kafka v2.3, but many +// broker reimplementations support this request even if they do not support +// all other requests from Kafka v2.3). +// +// If you want to alter the entire configs state using the older AlterConfigs +// request, use AlterBrokerConfigsState. +// +// This may return *ShardErrors. You may consider checking +// ValidateAlterBrokerConfigs before using this method. +func (cl *Client) AlterBrokerConfigs(ctx context.Context, configs []AlterConfig, brokers ...int32) (AlterConfigsResponses, error) { + var names []string + if len(brokers) == 0 { + names = append(names, "") + } + for _, broker := range brokers { + names = append(names, strconv.Itoa(int(broker))) + } + return cl.alterConfigs(ctx, false, configs, kmsg.ConfigResourceTypeBroker, names) +} + +// ValidateAlterBrokerConfigs validates an incremental alter config for the given +// brokers. +// +// This returns exactly what AlterBrokerConfigs returns, but does not actually +// alter configurations. +func (cl *Client) ValidateAlterBrokerConfigs(ctx context.Context, configs []AlterConfig, brokers ...int32) (AlterConfigsResponses, error) { + var names []string + if len(brokers) == 0 { + names = append(names, "") + } + for _, broker := range brokers { + names = append(names, strconv.Itoa(int(broker))) + } + return cl.alterConfigs(ctx, true, configs, kmsg.ConfigResourceTypeBroker, names) +} + +func (cl *Client) alterConfigs( + ctx context.Context, + dry bool, + configs []AlterConfig, + kind kmsg.ConfigResourceType, + names []string, +) (AlterConfigsResponses, error) { + req := kmsg.NewPtrIncrementalAlterConfigsRequest() + req.ValidateOnly = dry + for _, name := range names { + rr := kmsg.NewIncrementalAlterConfigsRequestResource() + rr.ResourceType = kind + rr.ResourceName = name + for _, config := range configs { + rc := kmsg.NewIncrementalAlterConfigsRequestResourceConfig() + rc.Name = config.Name + rc.Value = config.Value + switch config.Op { + case SetConfig: + rc.Op = kmsg.IncrementalAlterConfigOpSet + case DeleteConfig: + rc.Op = kmsg.IncrementalAlterConfigOpDelete + case AppendConfig: + rc.Op = kmsg.IncrementalAlterConfigOpAppend + case SubtractConfig: + rc.Op = kmsg.IncrementalAlterConfigOpSubtract + } + rr.Configs = append(rr.Configs, rc) + } + req.Resources = append(req.Resources, rr) + } + + shards := cl.cl.RequestSharded(ctx, req) + + var rs []AlterConfigsResponse + return rs, shardErrEach(req, shards, func(kr kmsg.Response) error { + resp := kr.(*kmsg.IncrementalAlterConfigsResponse) + for _, r := range resp.Resources { + rs = append(rs, AlterConfigsResponse{ // we are not storing in a map, no existence check possible + Name: r.ResourceName, + Err: kerr.ErrorForCode(r.ErrorCode), + ErrMessage: unptrStr(r.ErrorMessage), + }) + } + return nil + }) +} + +// AlterTopicConfigsState alters the full state of topic configurations. +// All prior configuration is lost. +// +// This may return *ShardErrors. You may consider checking +// ValidateAlterTopicConfigs before using this method. +func (cl *Client) AlterTopicConfigsState(ctx context.Context, configs []AlterConfig, topics ...string) (AlterConfigsResponses, error) { + return cl.alterConfigsState(ctx, false, configs, kmsg.ConfigResourceTypeTopic, topics) +} + +// ValidateAlterTopicConfigs validates an AlterTopicConfigsState for the given +// topics. +// +// This returns exactly what AlterTopicConfigsState returns, but does not +// actually alter configurations. +func (cl *Client) ValidateAlterTopicConfigsState(ctx context.Context, configs []AlterConfig, topics ...string) (AlterConfigsResponses, error) { + return cl.alterConfigsState(ctx, true, configs, kmsg.ConfigResourceTypeTopic, topics) +} + +// AlterBrokerConfigs alters the full state of broker configurations. If +// broker are specified, this updates each specific broker. If no brokers are +// specified, this updates whole-cluster broker configuration values. +// All prior configuration is lost. +// +// This may return *ShardErrors. You may consider checking +// ValidateAlterBrokerConfigs before using this method. +func (cl *Client) AlterBrokerConfigsState(ctx context.Context, configs []AlterConfig, brokers ...int32) (AlterConfigsResponses, error) { + var names []string + if len(brokers) == 0 { + names = append(names, "") + } + for _, broker := range brokers { + names = append(names, strconv.Itoa(int(broker))) + } + return cl.alterConfigsState(ctx, false, configs, kmsg.ConfigResourceTypeBroker, names) +} + +// ValidateAlterBrokerConfigs validates an AlterBrokerconfigsState for the +// given brokers. +// +// This returns exactly what AlterBrokerConfigs returns, but does not actually +// alter configurations. +func (cl *Client) ValidateAlterBrokerConfigsState(ctx context.Context, configs []AlterConfig, brokers ...int32) (AlterConfigsResponses, error) { + var names []string + if len(brokers) == 0 { + names = append(names, "") + } + for _, broker := range brokers { + names = append(names, strconv.Itoa(int(broker))) + } + return cl.alterConfigsState(ctx, true, configs, kmsg.ConfigResourceTypeBroker, names) +} + +func (cl *Client) alterConfigsState( + ctx context.Context, + dry bool, + configs []AlterConfig, + kind kmsg.ConfigResourceType, + names []string, +) (AlterConfigsResponses, error) { + req := kmsg.NewPtrAlterConfigsRequest() + req.ValidateOnly = dry + for _, name := range names { + rr := kmsg.NewAlterConfigsRequestResource() + rr.ResourceType = kind + rr.ResourceName = name + for _, config := range configs { + rc := kmsg.NewAlterConfigsRequestResourceConfig() + rc.Name = config.Name + rc.Value = config.Value + rr.Configs = append(rr.Configs, rc) + } + req.Resources = append(req.Resources, rr) + } + + shards := cl.cl.RequestSharded(ctx, req) + + var rs []AlterConfigsResponse + return rs, shardErrEach(req, shards, func(kr kmsg.Response) error { + resp := kr.(*kmsg.AlterConfigsResponse) + for _, r := range resp.Resources { + rs = append(rs, AlterConfigsResponse{ // we are not storing in a map, no existence check possible + Name: r.ResourceName, + Err: kerr.ErrorForCode(r.ErrorCode), + ErrMessage: unptrStr(r.ErrorMessage), + }) + } + return nil + }) +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kadm/dtoken.go b/vendor/github.com/twmb/franz-go/pkg/kadm/dtoken.go new file mode 100644 index 00000000000..7591cf43a32 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kadm/dtoken.go @@ -0,0 +1,229 @@ +package kadm + +import ( + "context" + "time" + + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" +) + +// Principal is a principal that owns or renews a delegation token. This is the +// same as an ACL's principal, but rather than being a single string, the type +// and name are split into two fields. +type Principal struct { + Type string // Type is the type of a principal owner or renewer. If empty, this defaults to "User". + Name string // Name is the name of a principal owner or renewer. +} + +// DelegationToken contains information about a delegation token. +type DelegationToken struct { + // Owner is the owner of the delegation token. + Owner Principal + // TokenRequesterPrincipal is the principal of the creator of the + // token. This exists for v3+, where you can override the owner. + // For prior than v3, this is just the Owner. + TokenRequesterPrincipal Principal + // IssueTimestamp is timestamp the delegation token creation request + // is received within the broker. + IssueTimestamp time.Time + // ExpiryTimestamp is the timestamp the delegation token will expire. + // This field is: + // min(MaxTimestamp, IssueTimestamp+delegation.token.expiry.time.ms) + // where the default expiry is 24hr. + ExpiryTimestamp time.Time + // MaxTimestamp is the timestamp past which the delegation token cannot + // be renewed. This is either the requested MaxLifetime, or the + // broker's delegation.token.max.lifetime.ms which is 7d by default. + MaxTimestamp time.Time + // TokenID is the username of this token for use in authorization. + TokenID string + // HMAC is the password of this token for use for in authorization. + HMAC []byte + // Renewers is the list of principals that can renew this token in + // addition to the owner (which always can). + Renewers []Principal +} + +// DelegationTokens contains a list of delegation tokens. +type DelegationTokens []DelegationToken + +// CreateDelegationToken is a create delegation token request, allowing you to +// create scoped tokens with the same ACLs as the creator. This allows you to +// more easily manage authorization for a wide array of clients. All delegation +// tokens use SCRAM-SHA-256 SASL for authorization. +type CreateDelegationToken struct { + // Owner overrides the owner of the token from the principal issuing + // the request to the principal in this field. This allows a superuser + // to create tokens without requiring individual user credentials, and + // for a superuser to run clients on behalf of another user. These + // fields require Kafka 3.3+; see KIP-373 for more details. + Owner *Principal + // Renewers is a list of principals that can renew the delegation + // token in addition to the owner of the token. This list does not + // include the owner. + Renewers []Principal + // MaxLifetime is how long the delegation token is valid for. + // If -1, the default is the server's delegation.token.max.lifetime.ms, + // which is by default 7d. + MaxLifetime time.Duration +} + +// CreateDelegationToken creates a delegation token, which is a scoped +// SCRAM-SHA-256 username and password. +// +// Creating delegation tokens allows for an (ideally) quicker and easier method +// of enabling authorization for a wide array of clients. Rather than having to +// manage many passwords external to Kafka, you only need to manage a few +// accounts and use those to create delegation tokens per client. +// +// Note that delegation tokens inherit the same ACLs as the user creating the +// token. Thus, if you want to properly scope ACLs, you should not create +// delegation tokens with admin accounts. +// +// This can return *AuthError. +func (cl *Client) CreateDelegationToken(ctx context.Context, d CreateDelegationToken) (DelegationToken, error) { + req := kmsg.NewPtrCreateDelegationTokenRequest() + if d.Owner != nil { + req.OwnerPrincipalType = &d.Owner.Type + req.OwnerPrincipalName = &d.Owner.Name + } + for _, renewer := range d.Renewers { + rr := kmsg.NewCreateDelegationTokenRequestRenewer() + rr.PrincipalType = renewer.Type + rr.PrincipalName = renewer.Name + req.Renewers = append(req.Renewers, rr) + } + req.MaxLifetimeMillis = d.MaxLifetime.Milliseconds() + resp, err := req.RequestWith(ctx, cl.cl) + if err != nil { + return DelegationToken{}, err + } + if err := maybeAuthErr(resp.ErrorCode); err != nil { + return DelegationToken{}, err + } + if err := kerr.ErrorForCode(resp.ErrorCode); err != nil { + return DelegationToken{}, err + } + + t := DelegationToken{ + Owner: Principal{ + Type: resp.PrincipalType, + Name: resp.PrincipalName, + }, + TokenRequesterPrincipal: Principal{ + Type: resp.TokenRequesterPrincipalType, + Name: resp.TokenRequesterPrincipalName, + }, + IssueTimestamp: time.UnixMilli(resp.IssueTimestamp).UTC(), + ExpiryTimestamp: time.UnixMilli(resp.ExpiryTimestamp).UTC(), + MaxTimestamp: time.UnixMilli(resp.MaxTimestamp).UTC(), + TokenID: resp.TokenID, + HMAC: resp.HMAC, + Renewers: append([]Principal(nil), d.Renewers...), + } + if resp.Version < 3 { + t.TokenRequesterPrincipal = t.Owner + } + return t, nil +} + +// RenewDelegationToken renews a delegation token that has not yet hit its max +// timestamp and returns the new expiry timestamp. +// +// This can return *AuthError. +func (cl *Client) RenewDelegationToken(ctx context.Context, hmac []byte, renewTime time.Duration) (expiryTimestamp time.Time, err error) { + req := kmsg.NewPtrRenewDelegationTokenRequest() + req.HMAC = hmac + req.RenewTimeMillis = renewTime.Milliseconds() + resp, err := req.RequestWith(ctx, cl.cl) + if err != nil { + return time.Time{}, err + } + if err := maybeAuthErr(resp.ErrorCode); err != nil { + return time.Time{}, err + } + if err := kerr.ErrorForCode(resp.ErrorCode); err != nil { + return time.Time{}, err + } + return time.UnixMilli(resp.ExpiryTimestamp).UTC(), nil +} + +// ExpireDelegationToken changes a delegation token's expiry timestamp and +// returns the new expiry timestamp, which is min(now+expiry, maxTimestamp). +// This request can be used to force tokens to expire quickly, or to give +// tokens a grace period before expiry. Using an expiry of -1 expires the token +// immediately. +// +// This can return *AuthError. +func (cl *Client) ExpireDelegationToken(ctx context.Context, hmac []byte, expiry time.Duration) (expiryTimestamp time.Time, err error) { + req := kmsg.NewPtrExpireDelegationTokenRequest() + req.HMAC = hmac + req.ExpiryPeriodMillis = expiry.Milliseconds() + resp, err := req.RequestWith(ctx, cl.cl) + if err != nil { + return time.Time{}, err + } + if err := maybeAuthErr(resp.ErrorCode); err != nil { + return time.Time{}, err + } + if err := kerr.ErrorForCode(resp.ErrorCode); err != nil { + return time.Time{}, err + } + return time.UnixMilli(resp.ExpiryTimestamp).UTC(), nil +} + +// DescribeDelegationTokens describes delegation tokens. This returns either +// all delegation tokens, or returns only tokens with owners in the requested +// owners list. +// +// This can return *AuthError. +func (cl *Client) DescribeDelegationTokens(ctx context.Context, owners ...Principal) (DelegationTokens, error) { + req := kmsg.NewPtrDescribeDelegationTokenRequest() + for _, owner := range owners { + ro := kmsg.NewDescribeDelegationTokenRequestOwner() + ro.PrincipalType = owner.Type + ro.PrincipalName = owner.Name + req.Owners = append(req.Owners, ro) + } + resp, err := req.RequestWith(ctx, cl.cl) + if err != nil { + return nil, err + } + if err := maybeAuthErr(resp.ErrorCode); err != nil { + return nil, err + } + if err := kerr.ErrorForCode(resp.ErrorCode); err != nil { + return nil, err + } + + var ts DelegationTokens + for _, d := range resp.TokenDetails { + t := DelegationToken{ + Owner: Principal{ + Type: d.PrincipalType, + Name: d.PrincipalName, + }, + TokenRequesterPrincipal: Principal{ + Type: d.TokenRequesterPrincipalType, + Name: d.TokenRequesterPrincipalName, + }, + IssueTimestamp: time.UnixMilli(d.IssueTimestamp).UTC(), + ExpiryTimestamp: time.UnixMilli(d.ExpiryTimestamp).UTC(), + MaxTimestamp: time.UnixMilli(d.MaxTimestamp).UTC(), + TokenID: d.TokenID, + HMAC: d.HMAC, + } + if resp.Version < 3 { + t.TokenRequesterPrincipal = t.Owner + } + for _, r := range d.Renewers { + t.Renewers = append(t.Renewers, Principal{ + Type: r.PrincipalType, + Name: r.PrincipalName, + }) + } + ts = append(ts, t) + } + return ts, nil +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kadm/errors.go b/vendor/github.com/twmb/franz-go/pkg/kadm/errors.go new file mode 100644 index 00000000000..878e62af93b --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kadm/errors.go @@ -0,0 +1,134 @@ +package kadm + +import ( + "errors" + "fmt" + + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kgo" + "github.com/twmb/franz-go/pkg/kmsg" +) + +// AuthError can be returned from requests for resources that you are not +// authorized for. +type AuthError struct { + Err error // Err is the inner *kerr.Error authorization error. +} + +func (a *AuthError) Error() string { return a.Err.Error() } +func (a *AuthError) Unwrap() error { return a.Err } +func (a *AuthError) Is(err error) bool { return a.Err == err } + +func maybeAuthErr(code int16) error { + switch err := kerr.ErrorForCode(code); err { + case kerr.ClusterAuthorizationFailed, + kerr.TopicAuthorizationFailed, + kerr.GroupAuthorizationFailed, + kerr.TransactionalIDAuthorizationFailed, + kerr.DelegationTokenAuthorizationFailed: + return &AuthError{err} + } + return nil +} + +// ShardError is a piece of a request that failed. See ShardErrors for more +// detail. +type ShardError struct { + Req kmsg.Request // Req is a piece of the original request. + Err error // Err is the error that resulted in this request failing. + + // Broker, if non-nil, is the broker this request was meant to be + // issued to. If the NodeID is -1, then this piece of the request + // failed before being mapped to a broker. + Broker BrokerDetail +} + +// ShardErrors contains each individual error shard of a request. +// +// Under the hood, some requests to Kafka need to be mapped to brokers, split, +// and sent to many brokers. The kgo.Client handles this all internally, but +// returns the individual pieces that were requested as "shards". Internally, +// each of these pieces can also fail, and they can all fail uniquely. +// +// The kadm package takes one further step and hides the failing pieces into +// one meta error, the ShardErrors. Methods in this package that can return +// this meta error are documented; if desired, you can use errors.As to check +// and unwrap any ShardErrors return. +// +// If a request returns ShardErrors, it is possible that some aspects of the +// request were still successful. You can check ShardErrors.AllFailed as a +// shortcut for whether any of the response is usable or not. +type ShardErrors struct { + Name string // Name is the name of the request these shard errors are for. + AllFailed bool // AllFailed indicates if the original request was entirely unsuccessful. + Errs []ShardError // Errs contains all individual shard errors. +} + +func shardErrEach(req kmsg.Request, shards []kgo.ResponseShard, fn func(kmsg.Response) error) error { + return shardErrEachBroker(req, shards, func(_ BrokerDetail, resp kmsg.Response) error { + return fn(resp) + }) +} + +func shardErrEachBroker(req kmsg.Request, shards []kgo.ResponseShard, fn func(BrokerDetail, kmsg.Response) error) error { + se := ShardErrors{ + Name: kmsg.NameForKey(req.Key()), + } + var ae *AuthError + for _, shard := range shards { + if shard.Err != nil { + se.Errs = append(se.Errs, ShardError{ + Req: shard.Req, + Err: shard.Err, + Broker: shard.Meta, + }) + continue + } + if err := fn(shard.Meta, shard.Resp); errors.As(err, &ae) { + return ae + } + } + se.AllFailed = len(shards) == len(se.Errs) + return se.into() +} + +func (se *ShardErrors) into() error { + if se == nil || len(se.Errs) == 0 { + return nil + } + return se +} + +// Merges two shard errors; the input errors should come from the same request. +func mergeShardErrs(e1, e2 error) error { + var se1, se2 *ShardErrors + if !errors.As(e1, &se1) { + return e2 + } + if !errors.As(e2, &se2) { + return e1 + } + se1.Errs = append(se1.Errs, se2.Errs...) + se1.AllFailed = se1.AllFailed && se2.AllFailed + return se1 +} + +// Error returns an error indicating the name of the request that failed, the +// number of separate errors, and the first error. +func (e *ShardErrors) Error() string { + if len(e.Errs) == 0 { + return "INVALID: ShardErrors contains no errors!" + } + return fmt.Sprintf("request %s has %d separate shard errors, first: %s", e.Name, len(e.Errs), e.Errs[0].Err) +} + +// Unwrap returns the underlying errors. +func (e *ShardErrors) Unwrap() []error { + unwrapped := make([]error, 0, len(e.Errs)) + + for _, shardErr := range e.Errs { + unwrapped = append(unwrapped, shardErr.Err) + } + + return unwrapped +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kadm/groups.go b/vendor/github.com/twmb/franz-go/pkg/kadm/groups.go new file mode 100644 index 00000000000..9f1fc60b5a5 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kadm/groups.go @@ -0,0 +1,1841 @@ +package kadm + +import ( + "context" + "errors" + "fmt" + "sort" + + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kgo" + "github.com/twmb/franz-go/pkg/kmsg" +) + +// GroupMemberMetadata is the metadata that a client sent in a JoinGroup request. +// This can have one of three types: +// +// *kmsg.ConsumerMemberMetadata, if the group's ProtocolType is "consumer" +// *kmsg.ConnectMemberMetadata, if the group's ProtocolType is "connect" +// []byte, if the group's ProtocolType is unknown +type GroupMemberMetadata struct{ i any } + +// AsConsumer returns the metadata as a ConsumerMemberMetadata if possible. +func (m GroupMemberMetadata) AsConsumer() (*kmsg.ConsumerMemberMetadata, bool) { + c, ok := m.i.(*kmsg.ConsumerMemberMetadata) + return c, ok +} + +// AsConnect returns the metadata as ConnectMemberMetadata if possible. +func (m GroupMemberMetadata) AsConnect() (*kmsg.ConnectMemberMetadata, bool) { + c, ok := m.i.(*kmsg.ConnectMemberMetadata) + return c, ok +} + +// Raw returns the metadata as a raw byte slice, if it is neither of consumer +// type nor connect type. +func (m GroupMemberMetadata) Raw() ([]byte, bool) { + c, ok := m.i.([]byte) + return c, ok +} + +// GroupMemberAssignment is the assignment that a leader sent / a member +// received in a SyncGroup request. This can have one of three types: +// +// *kmsg.ConsumerMemberAssignment, if the group's ProtocolType is "consumer" +// *kmsg.ConnectMemberAssignment, if the group's ProtocolType is "connect" +// []byte, if the group's ProtocolType is unknown +type GroupMemberAssignment struct{ i any } + +// AsConsumer returns the assignment as a ConsumerMemberAssignment if possible. +func (m GroupMemberAssignment) AsConsumer() (*kmsg.ConsumerMemberAssignment, bool) { + c, ok := m.i.(*kmsg.ConsumerMemberAssignment) + return c, ok +} + +// AsConnect returns the assignment as ConnectMemberAssignment if possible. +func (m GroupMemberAssignment) AsConnect() (*kmsg.ConnectMemberAssignment, bool) { + c, ok := m.i.(*kmsg.ConnectMemberAssignment) + return c, ok +} + +// Raw returns the assignment as a raw byte slice, if it is neither of consumer +// type nor connect type. +func (m GroupMemberAssignment) Raw() ([]byte, bool) { + c, ok := m.i.([]byte) + return c, ok +} + +// DescribedGroupMember is the detail of an individual group member as returned +// by a describe groups response. +type DescribedGroupMember struct { + MemberID string // MemberID is the Kafka assigned member ID of this group member. + InstanceID *string // InstanceID is a potential user assigned instance ID of this group member (KIP-345). + ClientID string // ClientID is the Kafka client given ClientID of this group member. + ClientHost string // ClientHost is the host this member is running on. + + Join GroupMemberMetadata // Join is what this member sent in its join group request; what it wants to consume. + Assigned GroupMemberAssignment // Assigned is what this member was assigned to consume by the leader. +} + +// AssignedPartitions returns the set of unique topics and partitions that are +// assigned across all members in this group. +// +// This function is only relevant if the group is of type "consumer". +func (d *DescribedGroup) AssignedPartitions() TopicsSet { + s := make(TopicsSet) + for _, m := range d.Members { + if c, ok := m.Assigned.AsConsumer(); ok { + for _, t := range c.Topics { + s.Add(t.Topic, t.Partitions...) + } + } + } + return s +} + +// DescribedGroup contains data from a describe groups response for a single +// group. +type DescribedGroup struct { + Group string // Group is the name of the described group. + + Coordinator BrokerDetail // Coordinator is the coordinator broker for this group. + State string // State is the state this group is in (Empty, Dead, Stable, etc.). + ProtocolType string // ProtocolType is the type of protocol the group is using, "consumer" for normal consumers, "connect" for Kafka connect. + Protocol string // Protocol is the partition assignor strategy this group is using. + Members []DescribedGroupMember // Members contains the members of this group sorted first by InstanceID, or if nil, by MemberID. + + Err error // Err is non-nil if the group could not be described. +} + +// DescribedGroups contains data for multiple groups from a describe groups +// response. +type DescribedGroups map[string]DescribedGroup + +// AssignedPartitions returns the set of unique topics and partitions that are +// assigned across all members in all groups. This is the all-group analogue to +// DescribedGroup.AssignedPartitions. +// +// This function is only relevant for groups of type "consumer". +func (ds DescribedGroups) AssignedPartitions() TopicsSet { + s := make(TopicsSet) + for _, g := range ds { + for _, m := range g.Members { + if c, ok := m.Assigned.AsConsumer(); ok { + for _, t := range c.Topics { + s.Add(t.Topic, t.Partitions...) + } + } + } + } + return s +} + +// Sorted returns all groups sorted by group name. +func (ds DescribedGroups) Sorted() []DescribedGroup { + s := make([]DescribedGroup, 0, len(ds)) + for _, d := range ds { + s = append(s, d) + } + sort.Slice(s, func(i, j int) bool { return s[i].Group < s[j].Group }) + return s +} + +// On calls fn for the group if it exists, returning the group and the error +// returned from fn. If fn is nil, this simply returns the group. +// +// The fn is given a shallow copy of the group. This function returns the copy +// as well; any modifications within fn are modifications on the returned copy. +// Modifications on a described group's inner fields are persisted to the +// original map (because slices are pointers). +// +// If the group does not exist, this returns kerr.GroupIDNotFound. +func (rs DescribedGroups) On(group string, fn func(*DescribedGroup) error) (DescribedGroup, error) { + if len(rs) > 0 { + r, ok := rs[group] + if ok { + if fn == nil { + return r, nil + } + return r, fn(&r) + } + } + return DescribedGroup{}, kerr.GroupIDNotFound +} + +// Error iterates over all groups and returns the first error encountered, if +// any. +func (ds DescribedGroups) Error() error { + for _, d := range ds { + if d.Err != nil { + return d.Err + } + } + return nil +} + +// Topics returns a sorted list of all group names. +func (ds DescribedGroups) Names() []string { + all := make([]string, 0, len(ds)) + for g := range ds { + all = append(all, g) + } + sort.Strings(all) + return all +} + +// ListedGroup contains data from a list groups response for a single group. +type ListedGroup struct { + Coordinator int32 // Coordinator is the node ID of the coordinator for this group. + Group string // Group is the name of this group. + ProtocolType string // ProtocolType is the type of protocol the group is using, "consumer" for normal consumers, "connect" for Kafka connect. + State string // State is the state this group is in (Empty, Dead, Stable, etc.; only if talking to Kafka 2.6+). +} + +// ListedGroups contains information from a list groups response. +type ListedGroups map[string]ListedGroup + +// Sorted returns all groups sorted by group name. +func (ls ListedGroups) Sorted() []ListedGroup { + s := make([]ListedGroup, 0, len(ls)) + for _, l := range ls { + s = append(s, l) + } + sort.Slice(s, func(i, j int) bool { return s[i].Group < s[j].Group }) + return s +} + +// Groups returns a sorted list of all group names. +func (ls ListedGroups) Groups() []string { + all := make([]string, 0, len(ls)) + for g := range ls { + all = append(all, g) + } + sort.Strings(all) + return all +} + +// ListGroups returns all groups in the cluster. If you are talking to Kafka +// 2.6+, filter states can be used to return groups only in the requested +// states. By default, this returns all groups. In almost all cases, +// DescribeGroups is more useful. +// +// This may return *ShardErrors or *AuthError. +func (cl *Client) ListGroups(ctx context.Context, filterStates ...string) (ListedGroups, error) { + req := kmsg.NewPtrListGroupsRequest() + req.StatesFilter = append(req.StatesFilter, filterStates...) + shards := cl.cl.RequestSharded(ctx, req) + list := make(ListedGroups) + return list, shardErrEachBroker(req, shards, func(b BrokerDetail, kr kmsg.Response) error { + resp := kr.(*kmsg.ListGroupsResponse) + if err := maybeAuthErr(resp.ErrorCode); err != nil { + return err + } + if err := kerr.ErrorForCode(resp.ErrorCode); err != nil { + return err + } + for _, g := range resp.Groups { + list[g.Group] = ListedGroup{ // group only lives on one broker, no need to exist-check + Coordinator: b.NodeID, + Group: g.Group, + ProtocolType: g.ProtocolType, + State: g.GroupState, + } + } + return nil + }) +} + +// DescribeGroups describes either all groups specified, or all groups in the +// cluster if none are specified. +// +// This may return *ShardErrors or *AuthError. +// +// If no groups are specified and this method first lists groups, and list +// groups returns a *ShardErrors, this function describes all successfully +// listed groups and appends the list shard errors to any describe shard +// errors. +// +// If only one group is described, there will be at most one request issued, +// and there is no need to deeply inspect the error. +func (cl *Client) DescribeGroups(ctx context.Context, groups ...string) (DescribedGroups, error) { + var seList *ShardErrors + if len(groups) == 0 { + listed, err := cl.ListGroups(ctx) + switch { + case err == nil: + case errors.As(err, &seList): + default: + return nil, err + } + groups = listed.Groups() + if len(groups) == 0 { + return nil, err + } + } + + req := kmsg.NewPtrDescribeGroupsRequest() + req.Groups = groups + + shards := cl.cl.RequestSharded(ctx, req) + described := make(DescribedGroups) + err := shardErrEachBroker(req, shards, func(b BrokerDetail, kr kmsg.Response) error { + resp := kr.(*kmsg.DescribeGroupsResponse) + for _, rg := range resp.Groups { + if err := maybeAuthErr(rg.ErrorCode); err != nil { + return err + } + g := DescribedGroup{ + Group: rg.Group, + Coordinator: b, + State: rg.State, + ProtocolType: rg.ProtocolType, + Protocol: rg.Protocol, + Err: kerr.ErrorForCode(rg.ErrorCode), + } + for _, rm := range rg.Members { + gm := DescribedGroupMember{ + MemberID: rm.MemberID, + InstanceID: rm.InstanceID, + ClientID: rm.ClientID, + ClientHost: rm.ClientHost, + } + + var mi, ai any + switch g.ProtocolType { + case "consumer": + m := new(kmsg.ConsumerMemberMetadata) + a := new(kmsg.ConsumerMemberAssignment) + + m.ReadFrom(rm.ProtocolMetadata) + a.ReadFrom(rm.MemberAssignment) + + mi, ai = m, a + case "connect": + m := new(kmsg.ConnectMemberMetadata) + a := new(kmsg.ConnectMemberAssignment) + + m.ReadFrom(rm.ProtocolMetadata) + a.ReadFrom(rm.MemberAssignment) + + mi, ai = m, a + default: + mi, ai = rm.ProtocolMetadata, rm.MemberAssignment + } + + gm.Join = GroupMemberMetadata{mi} + gm.Assigned = GroupMemberAssignment{ai} + g.Members = append(g.Members, gm) + } + sort.Slice(g.Members, func(i, j int) bool { + if g.Members[i].InstanceID != nil { + if g.Members[j].InstanceID == nil { + return true + } + return *g.Members[i].InstanceID < *g.Members[j].InstanceID + } + if g.Members[j].InstanceID != nil { + return false + } + return g.Members[i].MemberID < g.Members[j].MemberID + }) + described[g.Group] = g // group only lives on one broker, no need to exist-check + } + return nil + }) + + var seDesc *ShardErrors + switch { + case err == nil: + return described, seList.into() + case errors.As(err, &seDesc): + if seList != nil { + seDesc.Errs = append(seList.Errs, seDesc.Errs...) + } + return described, seDesc.into() + default: + return nil, err + } +} + +// DeleteGroupResponse contains the response for an individual deleted group. +type DeleteGroupResponse struct { + Group string // Group is the group this response is for. + Err error // Err is non-nil if the group failed to be deleted. +} + +// DeleteGroupResponses contains per-group responses to deleted groups. +type DeleteGroupResponses map[string]DeleteGroupResponse + +// Sorted returns all deleted group responses sorted by group name. +func (ds DeleteGroupResponses) Sorted() []DeleteGroupResponse { + s := make([]DeleteGroupResponse, 0, len(ds)) + for _, d := range ds { + s = append(s, d) + } + sort.Slice(s, func(i, j int) bool { return s[i].Group < s[j].Group }) + return s +} + +// On calls fn for the response group if it exists, returning the response and +// the error returned from fn. If fn is nil, this simply returns the group. +// +// The fn is given a copy of the response. This function returns the copy as +// well; any modifications within fn are modifications on the returned copy. +// +// If the group does not exist, this returns kerr.GroupIDNotFound. +func (rs DeleteGroupResponses) On(group string, fn func(*DeleteGroupResponse) error) (DeleteGroupResponse, error) { + if len(rs) > 0 { + r, ok := rs[group] + if ok { + if fn == nil { + return r, nil + } + return r, fn(&r) + } + } + return DeleteGroupResponse{}, kerr.GroupIDNotFound +} + +// Error iterates over all groups and returns the first error encountered, if +// any. +func (rs DeleteGroupResponses) Error() error { + for _, r := range rs { + if r.Err != nil { + return r.Err + } + } + return nil +} + +// DeleteGroup deletes the specified group. This is similar to DeleteGroups, +// but returns the kerr.ErrorForCode(response.ErrorCode) if the request/response +// is successful. +func (cl *Client) DeleteGroup(ctx context.Context, group string) (DeleteGroupResponse, error) { + rs, err := cl.DeleteGroups(ctx, group) + if err != nil { + return DeleteGroupResponse{}, err + } + g, exists := rs[group] + if !exists { + return DeleteGroupResponse{}, errors.New("requested group was not part of the delete group response") + } + return g, g.Err +} + +// DeleteGroups deletes all groups specified. +// +// The purpose of this request is to allow operators a way to delete groups +// after Kafka 1.1, which removed RetentionTimeMillis from offset commits. See +// KIP-229 for more details. +// +// This may return *ShardErrors. This does not return on authorization +// failures, instead, authorization failures are included in the responses. +func (cl *Client) DeleteGroups(ctx context.Context, groups ...string) (DeleteGroupResponses, error) { + if len(groups) == 0 { + return nil, nil + } + req := kmsg.NewPtrDeleteGroupsRequest() + req.Groups = append(req.Groups, groups...) + shards := cl.cl.RequestSharded(ctx, req) + + rs := make(map[string]DeleteGroupResponse) + return rs, shardErrEach(req, shards, func(kr kmsg.Response) error { + resp := kr.(*kmsg.DeleteGroupsResponse) + for _, g := range resp.Groups { + rs[g.Group] = DeleteGroupResponse{ // group is always on one broker, no need to exist-check + Group: g.Group, + Err: kerr.ErrorForCode(g.ErrorCode), + } + } + return nil + }) +} + +// LeaveGroupBuilder helps build a leave group request, rather than having +// a function signature (string, string, ...string). +// +// All functions on this type accept and return the same pointer, allowing +// for easy build-and-use usage. +type LeaveGroupBuilder struct { + group string + reason *string + instanceIDs []*string +} + +// LeaveGroup returns a LeaveGroupBuilder for the input group. +func LeaveGroup(group string) *LeaveGroupBuilder { + return &LeaveGroupBuilder{ + group: group, + } +} + +// Reason attaches a reason to all members in the leave group request. +// This requires Kafka 3.2+. +func (b *LeaveGroupBuilder) Reason(reason string) *LeaveGroupBuilder { + b.reason = StringPtr(reason) + return b +} + +// InstanceIDs are members to remove from a group. +func (b *LeaveGroupBuilder) InstanceIDs(ids ...string) *LeaveGroupBuilder { + for _, id := range ids { + if id != "" { + b.instanceIDs = append(b.instanceIDs, StringPtr(id)) + } + } + return b +} + +// LeaveGroupResponse contains the response for an individual instance ID that +// left a group. +type LeaveGroupResponse struct { + Group string // Group is the group that was left. + InstanceID string // InstanceID is the instance ID that left the group. + MemberID string // MemberID is the member ID that left the group. + Err error // Err is non-nil if this member did not exist. +} + +// LeaveGroupResponses contains responses for each member of a leave group +// request. The map key is the instance ID that was removed from the group. +type LeaveGroupResponses map[string]LeaveGroupResponse + +// Sorted returns all removed group members by instance ID. +func (ls LeaveGroupResponses) Sorted() []LeaveGroupResponse { + s := make([]LeaveGroupResponse, 0, len(ls)) + for _, l := range ls { + s = append(s, l) + } + sort.Slice(s, func(i, j int) bool { return s[i].InstanceID < s[j].InstanceID }) + return s +} + +// EachError calls fn for every removed member that has a non-nil error. +func (ls LeaveGroupResponses) EachError(fn func(l LeaveGroupResponse)) { + for _, l := range ls { + if l.Err != nil { + fn(l) + } + } +} + +// Each calls fn for every removed member. +func (ls LeaveGroupResponses) Each(fn func(l LeaveGroupResponse)) { + for _, l := range ls { + fn(l) + } +} + +// Error iterates over all removed members and returns the first error +// encountered, if any. +func (ls LeaveGroupResponses) Error() error { + for _, l := range ls { + if l.Err != nil { + return l.Err + } + } + return nil +} + +// Ok returns true if there are no errors. This is a shortcut for ls.Error() == +// nil. +func (ls LeaveGroupResponses) Ok() bool { + return ls.Error() == nil +} + +// LeaveGroup causes instance IDs to leave a group. +// +// This function allows manually removing members using instance IDs from a +// group, which allows for fast scale down / host replacement (see KIP-345 for +// more detail). This returns an *AuthErr if the use is not authorized to +// remove members from groups. +func (cl *Client) LeaveGroup(ctx context.Context, b *LeaveGroupBuilder) (LeaveGroupResponses, error) { + if b == nil || len(b.instanceIDs) == 0 { + return nil, nil + } + req := kmsg.NewPtrLeaveGroupRequest() + req.Group = b.group + for _, id := range b.instanceIDs { + m := kmsg.NewLeaveGroupRequestMember() + id := id + m.InstanceID = id + m.Reason = b.reason + req.Members = append(req.Members, m) + } + + resp, err := req.RequestWith(ctx, cl.cl) + if err != nil { + return nil, err + } + if err := maybeAuthErr(resp.ErrorCode); err != nil { + return nil, err + } + if err := kerr.ErrorForCode(resp.ErrorCode); err != nil { + return nil, err + } + + resps := make(LeaveGroupResponses) + for _, m := range resp.Members { + if m.InstanceID == nil { + continue // highly unexpected, buggy kafka + } + resps[*m.InstanceID] = LeaveGroupResponse{ + Group: b.group, + MemberID: m.MemberID, + InstanceID: *m.InstanceID, + Err: kerr.ErrorForCode(resp.ErrorCode), + } + } + return resps, err +} + +// OffsetResponse contains the response for an individual offset for offset +// methods. +type OffsetResponse struct { + Offset + Err error // Err is non-nil if the offset operation failed. +} + +// OffsetResponses contains per-partition responses to offset methods. +type OffsetResponses map[string]map[int32]OffsetResponse + +// Lookup returns the offset at t and p and whether it exists. +func (os OffsetResponses) Lookup(t string, p int32) (OffsetResponse, bool) { + if len(os) == 0 { + return OffsetResponse{}, false + } + ps := os[t] + if len(ps) == 0 { + return OffsetResponse{}, false + } + o, exists := ps[p] + return o, exists +} + +// Keep filters the responses to only keep the input offsets. +func (os OffsetResponses) Keep(o Offsets) { + os.DeleteFunc(func(r OffsetResponse) bool { + if len(o) == 0 { + return true // keep nothing, delete + } + ot := o[r.Topic] + if ot == nil { + return true // topic missing, delete + } + _, ok := ot[r.Partition] + return !ok // does not exist, delete + }) +} + +// Offsets returns these offset responses as offsets. +func (os OffsetResponses) Offsets() Offsets { + i := make(Offsets) + os.Each(func(o OffsetResponse) { + i.Add(o.Offset) + }) + return i +} + +// KOffsets returns these offset responses as a kgo offset map. +func (os OffsetResponses) KOffsets() map[string]map[int32]kgo.Offset { + return os.Offsets().KOffsets() +} + +// DeleteFunc keeps only the offsets for which fn returns true. +func (os OffsetResponses) KeepFunc(fn func(OffsetResponse) bool) { + for t, ps := range os { + for p, o := range ps { + if !fn(o) { + delete(ps, p) + } + } + if len(ps) == 0 { + delete(os, t) + } + } +} + +// DeleteFunc deletes any offset for which fn returns true. +func (os OffsetResponses) DeleteFunc(fn func(OffsetResponse) bool) { + os.KeepFunc(func(o OffsetResponse) bool { return !fn(o) }) +} + +// Add adds an offset for a given topic/partition to this OffsetResponses map +// (even if it exists). +func (os *OffsetResponses) Add(o OffsetResponse) { + if *os == nil { + *os = make(map[string]map[int32]OffsetResponse) + } + ot := (*os)[o.Topic] + if ot == nil { + ot = make(map[int32]OffsetResponse) + (*os)[o.Topic] = ot + } + ot[o.Partition] = o +} + +// EachError calls fn for every offset that as a non-nil error. +func (os OffsetResponses) EachError(fn func(o OffsetResponse)) { + for _, ps := range os { + for _, o := range ps { + if o.Err != nil { + fn(o) + } + } + } +} + +// Sorted returns the responses sorted by topic and partition. +func (os OffsetResponses) Sorted() []OffsetResponse { + var s []OffsetResponse + os.Each(func(o OffsetResponse) { s = append(s, o) }) + sort.Slice(s, func(i, j int) bool { + return s[i].Topic < s[j].Topic || + s[i].Topic == s[j].Topic && s[i].Partition < s[j].Partition + }) + return s +} + +// Each calls fn for every offset. +func (os OffsetResponses) Each(fn func(OffsetResponse)) { + for _, ps := range os { + for _, o := range ps { + fn(o) + } + } +} + +// Partitions returns the set of unique topics and partitions in these offsets. +func (os OffsetResponses) Partitions() TopicsSet { + s := make(TopicsSet) + os.Each(func(o OffsetResponse) { + s.Add(o.Topic, o.Partition) + }) + return s +} + +// Error iterates over all offsets and returns the first error encountered, if +// any. This can be used to check if an operation was entirely successful or +// not. +// +// Note that offset operations can be partially successful. For example, some +// offsets could succeed in an offset commit while others fail (maybe one topic +// does not exist for some reason, or you are not authorized for one topic). If +// this is something you need to worry about, you may need to check all offsets +// manually. +func (os OffsetResponses) Error() error { + for _, ps := range os { + for _, o := range ps { + if o.Err != nil { + return o.Err + } + } + } + return nil +} + +// Ok returns true if there are no errors. This is a shortcut for os.Error() == +// nil. +func (os OffsetResponses) Ok() bool { + return os.Error() == nil +} + +// CommitOffsets issues an offset commit request for the input offsets. +// +// This function can be used to manually commit offsets when directly consuming +// partitions outside of an actual consumer group. For example, if you assign +// partitions manually, but want still use Kafka to checkpoint what you have +// consumed, you can manually issue an offset commit request with this method. +// +// This does not return on authorization failures, instead, authorization +// failures are included in the responses. +func (cl *Client) CommitOffsets(ctx context.Context, group string, os Offsets) (OffsetResponses, error) { + req := kmsg.NewPtrOffsetCommitRequest() + req.Group = group + for t, ps := range os { + rt := kmsg.NewOffsetCommitRequestTopic() + rt.Topic = t + for p, o := range ps { + rp := kmsg.NewOffsetCommitRequestTopicPartition() + rp.Partition = p + rp.Offset = o.At + rp.LeaderEpoch = o.LeaderEpoch + if len(o.Metadata) > 0 { + rp.Metadata = kmsg.StringPtr(o.Metadata) + } + rt.Partitions = append(rt.Partitions, rp) + } + req.Topics = append(req.Topics, rt) + } + + resp, err := req.RequestWith(ctx, cl.cl) + if err != nil { + return nil, err + } + + rs := make(OffsetResponses) + for _, t := range resp.Topics { + rt := make(map[int32]OffsetResponse) + rs[t.Topic] = rt + for _, p := range t.Partitions { + rt[p.Partition] = OffsetResponse{ + Offset: os[t.Topic][p.Partition], + Err: kerr.ErrorForCode(p.ErrorCode), + } + } + } + + for t, ps := range os { + respt := rs[t] + if respt == nil { + respt = make(map[int32]OffsetResponse) + rs[t] = respt + } + for p, o := range ps { + if _, exists := respt[p]; exists { + continue + } + respt[p] = OffsetResponse{ + Offset: o, + Err: errOffsetCommitMissing, + } + } + } + + return rs, nil +} + +var errOffsetCommitMissing = errors.New("partition missing in commit response") + +// CommitAllOffsets is identical to CommitOffsets, but returns an error if the +// offset commit was successful, but some offset within the commit failed to be +// committed. +// +// This is a shortcut function provided to avoid checking two errors, but you +// must be careful with this if partially successful commits can be a problem +// for you. +func (cl *Client) CommitAllOffsets(ctx context.Context, group string, os Offsets) error { + commits, err := cl.CommitOffsets(ctx, group, os) + if err != nil { + return err + } + return commits.Error() +} + +// FetchOffsets issues an offset fetch requests for all topics and partitions +// in the group. Because Kafka returns only partitions you are authorized to +// fetch, this only returns an auth error if you are not authorized to describe +// the group at all. +// +// This method requires talking to Kafka v0.11+. +func (cl *Client) FetchOffsets(ctx context.Context, group string) (OffsetResponses, error) { + req := kmsg.NewPtrOffsetFetchRequest() + req.Group = group + resp, err := req.RequestWith(ctx, cl.cl) + if err != nil { + return nil, err + } + if err := maybeAuthErr(resp.ErrorCode); err != nil { + return nil, err + } + if err := kerr.ErrorForCode(resp.ErrorCode); err != nil { + return nil, err + } + rs := make(OffsetResponses) + for _, t := range resp.Topics { + rt := make(map[int32]OffsetResponse) + rs[t.Topic] = rt + for _, p := range t.Partitions { + if err := maybeAuthErr(p.ErrorCode); err != nil { + return nil, err + } + var meta string + if p.Metadata != nil { + meta = *p.Metadata + } + rt[p.Partition] = OffsetResponse{ + Offset: Offset{ + Topic: t.Topic, + Partition: p.Partition, + At: p.Offset, + LeaderEpoch: p.LeaderEpoch, + Metadata: meta, + }, + Err: kerr.ErrorForCode(p.ErrorCode), + } + } + } + return rs, nil +} + +// FetchAllGroupTopics is a kadm "internal" topic name that can be used in +// [FetchOffsetsForTopics]. By default, [FetchOffsetsForTopics] only returns +// topics that are explicitly requested. Other topics that may be committed to +// in the group are not returned. Using FetchAllRequestedTopics switches the +// behavior to return the union of all committed topics and all requested +// topics. +const FetchAllGroupTopics = "|fetch-all-group-topics|" + +// FetchOffsetsForTopics is a helper function that returns the currently +// committed offsets for the given group, as well as default -1 offsets for any +// topic/partition that does not yet have a commit. +// +// If any partition fetched or listed has an error, this function returns an +// error. The returned offset responses are ready to be used or converted +// directly to pure offsets with `Into`, and again into kgo offsets with +// another `Into`. +// +// By default, this function returns offsets for only the requested topics. You +// can use the special "topic" [FetchAllGroupTopics] to return all committed-to +// topics in addition to all requested topics. +func (cl *Client) FetchOffsetsForTopics(ctx context.Context, group string, topics ...string) (OffsetResponses, error) { + os := make(Offsets) + + var all bool + keept := topics[:0] + for _, topic := range topics { + if topic == FetchAllGroupTopics { + all = true + continue + } + keept = append(keept, topic) + } + topics = keept + + if !all && len(topics) == 0 { + return make(OffsetResponses), nil + } + + // We have to request metadata to learn all partitions in all the + // topics. The default returned offset for all partitions is filled in + // to be -1. + if len(topics) > 0 { + listed, err := cl.ListTopics(ctx, topics...) + if err != nil { + return nil, fmt.Errorf("unable to list topics: %w", err) + } + + for _, topic := range topics { + t := listed[topic] + if t.Err != nil { + return nil, fmt.Errorf("unable to describe topics, topic err: %w", t.Err) + } + for _, p := range t.Partitions { + os.AddOffset(topic, p.Partition, -1, -1) + } + } + } + + resps, err := cl.FetchOffsets(ctx, group) + if err != nil { + return nil, fmt.Errorf("unable to fetch offsets: %w", err) + } + if err := resps.Error(); err != nil { + return nil, fmt.Errorf("offset fetches had a load error, first error: %w", err) + } + + // For any topic (and any partition) we explicitly asked for, if the + // partition does not exist in the response, we fill the default -1 + // from above. + os.Each(func(o Offset) { + if _, ok := resps.Lookup(o.Topic, o.Partition); !ok { + resps.Add(OffsetResponse{Offset: o}) + } + }) + + // If we are not requesting all group offsets, then we strip any topic + // that was not explicitly requested. + if !all { + tset := make(map[string]struct{}) + for _, t := range topics { + tset[t] = struct{}{} + } + for t := range resps { + if _, ok := tset[t]; !ok { + delete(resps, t) + } + } + } + return resps, nil +} + +// FetchOffsetsResponse contains a fetch offsets response for a single group. +type FetchOffsetsResponse struct { + Group string // Group is the offsets these fetches correspond to. + Fetched OffsetResponses // Fetched contains offsets fetched for this group, if any. + Err error // Err contains any error preventing offsets from being fetched. +} + +// CommittedPartitions returns the set of unique topics and partitions that +// have been committed to in this group. +func (r FetchOffsetsResponse) CommittedPartitions() TopicsSet { + return r.Fetched.Partitions() +} + +// FetchOFfsetsResponses contains responses for many fetch offsets requests. +type FetchOffsetsResponses map[string]FetchOffsetsResponse + +// EachError calls fn for every response that as a non-nil error. +func (rs FetchOffsetsResponses) EachError(fn func(FetchOffsetsResponse)) { + for _, r := range rs { + if r.Err != nil { + fn(r) + } + } +} + +// AllFailed returns whether all fetch offsets requests failed. +func (rs FetchOffsetsResponses) AllFailed() bool { + var n int + rs.EachError(func(FetchOffsetsResponse) { n++ }) + return len(rs) > 0 && n == len(rs) +} + +// CommittedPartitions returns the set of unique topics and partitions that +// have been committed to across all members in all responses. This is the +// all-group analogue to FetchOffsetsResponse.CommittedPartitions. +func (rs FetchOffsetsResponses) CommittedPartitions() TopicsSet { + s := make(TopicsSet) + for _, r := range rs { + s.Merge(r.CommittedPartitions()) + } + return s +} + +// On calls fn for the response group if it exists, returning the response and +// the error returned from fn. If fn is nil, this simply returns the group. +// +// The fn is given a copy of the response. This function returns the copy as +// well; any modifications within fn are modifications on the returned copy. +// +// If the group does not exist, this returns kerr.GroupIDNotFound. +func (rs FetchOffsetsResponses) On(group string, fn func(*FetchOffsetsResponse) error) (FetchOffsetsResponse, error) { + if len(rs) > 0 { + r, ok := rs[group] + if ok { + if fn == nil { + return r, nil + } + return r, fn(&r) + } + } + return FetchOffsetsResponse{}, kerr.GroupIDNotFound +} + +// Error iterates over all responses and returns the first error encountered, +// if any. +func (rs FetchOffsetsResponses) Error() error { + for _, r := range rs { + if r.Err != nil { + return r.Err + } + } + return nil +} + +// FetchManyOffsets issues a fetch offsets requests for each group specified. +// +// This function is a batch version of FetchOffsets. FetchOffsets and +// CommitOffsets are important to provide as simple APIs for users that manage +// group offsets outside of a consumer group. Each individual group may have an +// auth error. +func (cl *Client) FetchManyOffsets(ctx context.Context, groups ...string) FetchOffsetsResponses { + fetched := make(FetchOffsetsResponses) + if len(groups) == 0 { + return fetched + } + + req := kmsg.NewPtrOffsetFetchRequest() + for _, group := range groups { + rg := kmsg.NewOffsetFetchRequestGroup() + rg.Group = group + req.Groups = append(req.Groups, rg) + } + + groupErr := func(g string, err error) { + fetched[g] = FetchOffsetsResponse{ + Group: g, + Err: err, + } + } + allGroupsErr := func(req *kmsg.OffsetFetchRequest, err error) { + for _, g := range req.Groups { + groupErr(g.Group, err) + } + } + + shards := cl.cl.RequestSharded(ctx, req) + for _, shard := range shards { + req := shard.Req.(*kmsg.OffsetFetchRequest) + if shard.Err != nil { + allGroupsErr(req, shard.Err) + continue + } + resp := shard.Resp.(*kmsg.OffsetFetchResponse) + if err := maybeAuthErr(resp.ErrorCode); err != nil { + allGroupsErr(req, err) + continue + } + for _, g := range resp.Groups { + if err := maybeAuthErr(g.ErrorCode); err != nil { + groupErr(g.Group, err) + continue + } + rs := make(OffsetResponses) + fg := FetchOffsetsResponse{ + Group: g.Group, + Fetched: rs, + Err: kerr.ErrorForCode(g.ErrorCode), + } + fetched[g.Group] = fg // group coordinator owns all of a group, no need to check existence + for _, t := range g.Topics { + rt := make(map[int32]OffsetResponse) + rs[t.Topic] = rt + for _, p := range t.Partitions { + var meta string + if p.Metadata != nil { + meta = *p.Metadata + } + rt[p.Partition] = OffsetResponse{ + Offset: Offset{ + Topic: t.Topic, + Partition: p.Partition, + At: p.Offset, + LeaderEpoch: p.LeaderEpoch, + Metadata: meta, + }, + Err: kerr.ErrorForCode(p.ErrorCode), + } + } + } + } + } + return fetched +} + +// DeleteOffsetsResponses contains the per topic, per partition errors. If an +// offset deletion for a partition was successful, the error will be nil. +type DeleteOffsetsResponses map[string]map[int32]error + +// Lookup returns the response at t and p and whether it exists. +func (ds DeleteOffsetsResponses) Lookup(t string, p int32) (error, bool) { + if len(ds) == 0 { + return nil, false + } + ps := ds[t] + if len(ps) == 0 { + return nil, false + } + r, exists := ps[p] + return r, exists +} + +// EachError calls fn for every partition that as a non-nil deletion error. +func (ds DeleteOffsetsResponses) EachError(fn func(string, int32, error)) { + for t, ps := range ds { + for p, err := range ps { + if err != nil { + fn(t, p, err) + } + } + } +} + +// Error iterates over all responses and returns the first error encountered, +// if any. +func (ds DeleteOffsetsResponses) Error() error { + for _, ps := range ds { + for _, err := range ps { + if err != nil { + return err + } + } + } + return nil +} + +// DeleteOffsets deletes offsets for the given group. +// +// Originally, offset commits were persisted in Kafka for some retention time. +// This posed problematic for infrequently committing consumers, so the +// retention time concept was removed in Kafka v2.1 in favor of deleting +// offsets for a group only when the group became empty. However, if a group +// stops consuming from a topic, then the offsets will persist and lag +// monitoring for the group will notice an ever increasing amount of lag for +// these no-longer-consumed topics. Thus, Kafka v2.4 introduced an OffsetDelete +// request to allow admins to manually delete offsets for no longer consumed +// topics. +// +// This method requires talking to Kafka v2.4+. This returns an *AuthErr if the +// user is not authorized to delete offsets in the group at all. This does not +// return on per-topic authorization failures, instead, per-topic authorization +// failures are included in the responses. +func (cl *Client) DeleteOffsets(ctx context.Context, group string, s TopicsSet) (DeleteOffsetsResponses, error) { + if len(s) == 0 { + return nil, nil + } + + req := kmsg.NewPtrOffsetDeleteRequest() + req.Group = group + for t, ps := range s { + rt := kmsg.NewOffsetDeleteRequestTopic() + rt.Topic = t + for p := range ps { + rp := kmsg.NewOffsetDeleteRequestTopicPartition() + rp.Partition = p + rt.Partitions = append(rt.Partitions, rp) + } + req.Topics = append(req.Topics, rt) + } + + resp, err := req.RequestWith(ctx, cl.cl) + if err != nil { + return nil, err + } + if err := maybeAuthErr(resp.ErrorCode); err != nil { + return nil, err + } + if err := kerr.ErrorForCode(resp.ErrorCode); err != nil { + return nil, err + } + + r := make(DeleteOffsetsResponses) + for _, t := range resp.Topics { + rt := make(map[int32]error) + r[t.Topic] = rt + for _, p := range t.Partitions { + rt[p.Partition] = kerr.ErrorForCode(p.ErrorCode) + } + } + return r, nil +} + +// GroupMemberLag is the lag between a group member's current offset commit and +// the current end offset. +// +// If either the offset commits have load errors, or the listed end offsets +// have load errors, the Lag field will be -1 and the Err field will be set (to +// the first of either the commit error, or else the list error). +// +// If the group is in the Empty state, lag is calculated for all partitions in +// a topic, but the member is nil. The calculate function assumes that any +// assigned topic is meant to be entirely consumed. If the group is Empty and +// topics could not be listed, some partitions may be missing. +type GroupMemberLag struct { + // Member is a reference to the group member consuming this partition. + // If the group is in state Empty, the member will be nil. + Member *DescribedGroupMember + Topic string // Topic is the topic this lag is for. + Partition int32 // Partition is the partition this lag is for. + + Commit Offset // Commit is this member's current offset commit. + Start ListedOffset // Start is a reference to the start of this partition, if provided. Start offsets are optional; if not provided, Start.Err is a non-nil error saying this partition is missing from list offsets. This is always present if lag is calculated via Client.Lag. + End ListedOffset // End is a reference to the end offset of this partition. + Lag int64 // Lag is how far behind this member is, or -1 if there is a commit error or list offset error. + + Err error // Err is either the commit error, or the list end offsets error, or nil. +} + +// IsEmpty returns if the this lag is for a group in the Empty state. +func (g *GroupMemberLag) IsEmpty() bool { return g.Member == nil } + +// GroupLag is the per-topic, per-partition lag of members in a group. +type GroupLag map[string]map[int32]GroupMemberLag + +// Lookup returns the lag at t and p and whether it exists. +func (l GroupLag) Lookup(t string, p int32) (GroupMemberLag, bool) { + if len(l) == 0 { + return GroupMemberLag{}, false + } + ps := l[t] + if len(ps) == 0 { + return GroupMemberLag{}, false + } + m, exists := ps[p] + return m, exists +} + +// Sorted returns the per-topic, per-partition lag by member sorted in order by +// topic then partition. +func (l GroupLag) Sorted() []GroupMemberLag { + var all []GroupMemberLag + for _, ps := range l { + for _, l := range ps { + all = append(all, l) + } + } + sort.Slice(all, func(i, j int) bool { + l, r := all[i], all[j] + if l.Topic < r.Topic { + return true + } + if l.Topic > r.Topic { + return false + } + return l.Partition < r.Partition + }) + return all +} + +// IsEmpty returns if the group is empty. +func (l GroupLag) IsEmpty() bool { + for _, ps := range l { + for _, m := range ps { + return m.IsEmpty() + } + } + return false +} + +// Total returns the total lag across all topics. +func (l GroupLag) Total() int64 { + var tot int64 + for _, tl := range l.TotalByTopic() { + tot += tl.Lag + } + return tot +} + +// TotalByTopic returns the total lag for each topic. +func (l GroupLag) TotalByTopic() GroupTopicsLag { + m := make(map[string]TopicLag) + for t, ps := range l { + mt := TopicLag{ + Topic: t, + } + for _, l := range ps { + if l.Lag > 0 { + mt.Lag += l.Lag + } + } + m[t] = mt + } + return m +} + +// GroupTopicsLag is the total lag per topic within a group. +type GroupTopicsLag map[string]TopicLag + +// TopicLag is the lag for an individual topic within a group. +type TopicLag struct { + Topic string + Lag int64 +} + +// Sorted returns the per-topic lag, sorted by topic. +func (l GroupTopicsLag) Sorted() []TopicLag { + var all []TopicLag + for _, tl := range l { + all = append(all, tl) + } + sort.Slice(all, func(i, j int) bool { + return all[i].Topic < all[j].Topic + }) + return all +} + +// DescribedGroupLag contains a described group and its lag, or the errors that +// prevent the lag from being calculated. +type DescribedGroupLag struct { + Group string // Group is the group name. + + Coordinator BrokerDetail // Coordinator is the coordinator broker for this group. + State string // State is the state this group is in (Empty, Dead, Stable, etc.). + ProtocolType string // ProtocolType is the type of protocol the group is using, "consumer" for normal consumers, "connect" for Kafka connect. + Protocol string // Protocol is the partition assignor strategy this group is using. + Members []DescribedGroupMember // Members contains the members of this group sorted first by InstanceID, or if nil, by MemberID. + Lag GroupLag // Lag is the lag for the group. + + DescribeErr error // DescribeErr is the error returned from describing the group, if any. + FetchErr error // FetchErr is the error returned from fetching offsets, if any. +} + +// Err returns the first of DescribeErr or FetchErr that is non-nil. +func (l *DescribedGroupLag) Error() error { + if l.DescribeErr != nil { + return l.DescribeErr + } + return l.FetchErr +} + +// DescribedGroupLags is a map of group names to the described group with its +// lag, or error for those groups. +type DescribedGroupLags map[string]DescribedGroupLag + +// Sorted returns all lags sorted by group name. +func (ls DescribedGroupLags) Sorted() []DescribedGroupLag { + s := make([]DescribedGroupLag, 0, len(ls)) + for _, l := range ls { + s = append(s, l) + } + sort.Slice(s, func(i, j int) bool { return s[i].Group < s[j].Group }) + return s +} + +// EachError calls fn for every group that has a non-nil error. +func (ls DescribedGroupLags) EachError(fn func(l DescribedGroupLag)) { + for _, l := range ls { + if l.Error() != nil { + fn(l) + } + } +} + +// Each calls fn for every group. +func (ls DescribedGroupLags) Each(fn func(l DescribedGroupLag)) { + for _, l := range ls { + fn(l) + } +} + +// Error iterates over all groups and returns the first error encountered, if +// any. +func (ls DescribedGroupLags) Error() error { + for _, l := range ls { + if l.Error() != nil { + return l.Error() + } + } + return nil +} + +// Ok returns true if there are no errors. This is a shortcut for ls.Error() == +// nil. +func (ls DescribedGroupLags) Ok() bool { + return ls.Error() == nil +} + +// Lag returns the lag for all input groups. This function is a shortcut for +// the steps required to use CalculateGroupLagWithStartOffsets properly, with +// some opinionated choices for error handling since calculating lag is +// multi-request process. If a group cannot be described or the offsets cannot +// be fetched, an error is returned for the group. If any topic cannot have its +// end offsets listed, the lag for the partition has a corresponding error. If +// any request fails with an auth error, this returns *AuthError. +func (cl *Client) Lag(ctx context.Context, groups ...string) (DescribedGroupLags, error) { + set := make(map[string]struct{}, len(groups)) + for _, g := range groups { + set[g] = struct{}{} + } + rem := func() []string { + groups = groups[:0] + for g := range set { + groups = append(groups, g) + } + return groups + } + lags := make(DescribedGroupLags) + + described, err := cl.DescribeGroups(ctx, rem()...) + // For auth err: always return. + // For shard errors, if we had some partial success, then we continue + // to the rest of the logic in this function. + // If every shard failed, or on all other errors, we return. + var ae *AuthError + var se *ShardErrors + switch { + case errors.As(err, &ae): + return nil, err + case errors.As(err, &se) && !se.AllFailed: + for _, se := range se.Errs { + for _, g := range se.Req.(*kmsg.DescribeGroupsRequest).Groups { + lags[g] = DescribedGroupLag{ + Group: g, + Coordinator: se.Broker, + DescribeErr: se.Err, + } + delete(set, g) + } + } + case err != nil: + return nil, err + } + for _, g := range described { + lags[g.Group] = DescribedGroupLag{ + Group: g.Group, + Coordinator: g.Coordinator, + State: g.State, + ProtocolType: g.ProtocolType, + Protocol: g.Protocol, + Members: g.Members, + DescribeErr: g.Err, + } + if g.Err != nil { + delete(set, g.Group) + continue + } + + // If the input set of groups is empty, DescribeGroups returns all groups. + // We add to `set` here so that the Lag function itself can calculate + // lag for all groups. + set[g.Group] = struct{}{} + } + if len(set) == 0 { + return lags, nil + } + + // Same thought here. For auth errors, we always return. + // If a group offset fetch failed, we delete it from described + // because we cannot calculate lag for it. + fetched := cl.FetchManyOffsets(ctx, rem()...) + for _, r := range fetched { + switch { + case errors.As(r.Err, &ae): + return nil, err + case r.Err != nil: + l := lags[r.Group] + l.FetchErr = r.Err + lags[r.Group] = l + delete(set, r.Group) + delete(described, r.Group) + } + } + if len(set) == 0 { + return lags, nil + } + + // We have to list the start & end offset for all assigned and + // committed partitions. + var startOffsets, endOffsets ListedOffsets + listPartitions := described.AssignedPartitions() + listPartitions.Merge(fetched.CommittedPartitions()) + if topics := listPartitions.Topics(); len(topics) > 0 { + for _, list := range []struct { + fn func(context.Context, ...string) (ListedOffsets, error) + dst *ListedOffsets + }{ + {cl.ListStartOffsets, &startOffsets}, + {cl.ListEndOffsets, &endOffsets}, + } { + listed, err := list.fn(ctx, topics...) + *list.dst = listed + // As above: return on auth error. If there are shard errors, + // the topics will be missing in the response and then + // CalculateGroupLag will return UnknownTopicOrPartition. + switch { + case errors.As(err, &ae): + return nil, err + case errors.As(err, &se): + // do nothing: these show up as errListMissing + case err != nil: + return nil, err + } + // For anything that lists with a single -1 partition, the + // topic does not exist. We add an UnknownTopicOrPartition + // error for all partitions that were committed to, so that + // this shows up in the lag output as UnknownTopicOrPartition + // rather than errListMissing. + for t, ps := range listed { + if len(ps) != 1 { + continue + } + if _, ok := ps[-1]; !ok { + continue + } + delete(ps, -1) + for p := range listPartitions[t] { + ps[p] = ListedOffset{ + Topic: t, + Partition: p, + Err: kerr.UnknownTopicOrPartition, + } + } + } + } + } + + for _, g := range described { + l := lags[g.Group] + l.Lag = CalculateGroupLagWithStartOffsets(g, fetched[g.Group].Fetched, startOffsets, endOffsets) + lags[g.Group] = l + } + return lags, nil +} + +var noOffsets = make(ListedOffsets) + +// CalculateGroupLagWithStartOffsets returns the per-partition lag of all +// members in a group. This function slightly expands on [CalculateGroupLag] to +// handle calculating lag for partitions that (1) have no commits AND (2) have +// some segments deleted (cleanup.policy=delete) such that the log start offset +// is non-zero. +// +// As an example, if a group is consuming a partition with log end offset 30 +// and log start offset 10 and has not yet committed to the group, this +// function can correctly tell you that the lag is 20, whereas +// CalculateGroupLag would tell you the lag is 30. +// +// This function accepts 'nil' for startOffsets, which will result in the same +// behavior as CalculateGroupLag. This function is useful if you have +// infrequently committing groups against topics that have segments being +// deleted. +func CalculateGroupLagWithStartOffsets( + group DescribedGroup, + commit OffsetResponses, + startOffsets ListedOffsets, + endOffsets ListedOffsets, +) GroupLag { + if commit == nil { // avoid panics below + commit = make(OffsetResponses) + } + if startOffsets == nil { + startOffsets = noOffsets + } + if endOffsets == nil { + endOffsets = noOffsets + } + if group.State == "Empty" { + return calculateEmptyLag(commit, startOffsets, endOffsets) + } + + l := make(map[string]map[int32]GroupMemberLag) + for mi, m := range group.Members { + c, ok := m.Assigned.AsConsumer() + if !ok { + continue + } + for _, t := range c.Topics { + lt := l[t.Topic] + if lt == nil { + lt = make(map[int32]GroupMemberLag) + l[t.Topic] = lt + } + + tcommit := commit[t.Topic] + tstart := startOffsets[t.Topic] + tend := endOffsets[t.Topic] + for _, p := range t.Partitions { + var ( + pcommit = OffsetResponse{Offset: Offset{ + Topic: t.Topic, + Partition: p, + At: -1, + }} + pend = ListedOffset{ + Topic: t.Topic, + Partition: p, + Err: errListMissing, + } + pstart = pend + perr error + ) + + if tcommit != nil { + if pcommitActual, ok := tcommit[p]; ok { + pcommit = pcommitActual + } + } + perr = errListMissing + if tend != nil { + if pendActual, ok := tend[p]; ok { + pend = pendActual + perr = nil + } + } + if perr == nil { + if perr = pcommit.Err; perr == nil { + perr = pend.Err + } + } + if tstart != nil { + if pstartActual, ok := tstart[p]; ok { + pstart = pstartActual + } + } + + lag := int64(-1) + if perr == nil { + lag = pend.Offset + if pstart.Err == nil { + lag = pend.Offset - pstart.Offset + } + if pcommit.At >= 0 { + lag = pend.Offset - pcommit.At + } + // It is possible for a commit to be after the + // end, in which case we will round to 0. We do + // this check here to also handle a potential non-commit + // weird pend < pstart scenario where a segment + // was deleted between listing offsets. + if lag < 0 { + lag = 0 + } + } + + lt[p] = GroupMemberLag{ + Member: &group.Members[mi], + Topic: t.Topic, + Partition: p, + Commit: pcommit.Offset, + Start: pstart, + End: pend, + Lag: lag, + Err: perr, + } + + } + } + } + + return l +} + +// CalculateGroupLag returns the per-partition lag of all members in a group. +// The input to this method is the returns from the following methods (make +// sure to check shard errors): +// +// // Note that FetchOffsets exists to fetch only one group's offsets, +// // but some of the code below slightly changes. +// groups := DescribeGroups(ctx, group) +// commits := FetchManyOffsets(ctx, group) +// var endOffsets ListedOffsets +// listPartitions := described.AssignedPartitions() +// listPartitions.Merge(commits.CommittedPartitions() +// if topics := listPartitions.Topics(); len(topics) > 0 { +// endOffsets = ListEndOffsets(ctx, listPartitions.Topics()) +// } +// for _, group := range groups { +// lag := CalculateGroupLag(group, commits[group.Group].Fetched, endOffsets) +// } +// +// If assigned partitions are missing in the listed end offsets, the partition +// will have an error indicating it is missing. A missing topic or partition in +// the commits is assumed to be nothing committing yet. +func CalculateGroupLag( + group DescribedGroup, + commit OffsetResponses, + endOffsets ListedOffsets, +) GroupLag { + return CalculateGroupLagWithStartOffsets(group, commit, nil, endOffsets) +} + +func calculateEmptyLag(commit OffsetResponses, startOffsets, endOffsets ListedOffsets) GroupLag { + l := make(map[string]map[int32]GroupMemberLag) + for t, ps := range commit { + lt := l[t] + if lt == nil { + lt = make(map[int32]GroupMemberLag) + l[t] = lt + } + tstart := startOffsets[t] + tend := endOffsets[t] + for p, pcommit := range ps { + var ( + pend = ListedOffset{ + Topic: t, + Partition: p, + Err: errListMissing, + } + pstart = pend + perr error + ) + + // In order of priority, perr (the error on the Lag + // calculation) is non-nil if: + // + // * The topic is missing from end ListOffsets + // * The partition is missing from end ListOffsets + // * OffsetFetch has an error on the partition + // * ListOffsets has an error on the partition + // + // If we have no error, then we can calculate lag. + // We *do* allow an error on start ListedOffsets; + // if there are no start offsets or the start offset + // has an error, it is not used for lag calculation. + perr = errListMissing + if tend != nil { + if pendActual, ok := tend[p]; ok { + pend = pendActual + perr = nil + } + } + if perr == nil { + if perr = pcommit.Err; perr == nil { + perr = pend.Err + } + } + if tstart != nil { + if pstartActual, ok := tstart[p]; ok { + pstart = pstartActual + } + } + + lag := int64(-1) + if perr == nil { + lag = pend.Offset + if pstart.Err == nil { + lag = pend.Offset - pstart.Offset + } + if pcommit.At >= 0 { + lag = pend.Offset - pcommit.At + } + if lag < 0 { + lag = 0 + } + } + + lt[p] = GroupMemberLag{ + Topic: t, + Partition: p, + Commit: pcommit.Offset, + Start: pstart, + End: pend, + Lag: lag, + Err: perr, + } + } + } + + // Now we look at all topics that we calculated lag for, and check out + // the partitions we listed. If those partitions are missing from the + // lag calculations above, the partitions were not committed to and we + // count that as entirely lagging. + for t, lt := range l { + tstart := startOffsets[t] + tend := endOffsets[t] + for p, pend := range tend { + if _, ok := lt[p]; ok { + continue + } + pcommit := Offset{ + Topic: t, + Partition: p, + At: -1, + LeaderEpoch: -1, + } + perr := pend.Err + lag := int64(-1) + if perr == nil { + lag = pend.Offset + } + pstart := ListedOffset{ + Topic: t, + Partition: p, + Err: errListMissing, + } + if tstart != nil { + if pstartActual, ok := tstart[p]; ok { + pstart = pstartActual + if pstart.Err == nil { + lag = pend.Offset - pstart.Offset + if lag < 0 { + lag = 0 + } + } + } + } + lt[p] = GroupMemberLag{ + Topic: t, + Partition: p, + Commit: pcommit, + Start: pstart, + End: pend, + Lag: lag, + Err: perr, + } + } + } + + return l +} + +var errListMissing = errors.New("missing from list offsets") diff --git a/vendor/github.com/twmb/franz-go/pkg/kadm/kadm.go b/vendor/github.com/twmb/franz-go/pkg/kadm/kadm.go new file mode 100644 index 00000000000..93df0868d1c --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kadm/kadm.go @@ -0,0 +1,665 @@ +// Package kadm provides a helper Kafka admin client around a *kgo.Client. +// +// This package is meant to cover the common use cases for dropping into an +// "admin" like interface for Kafka. As with any admin client, this package +// must make opinionated decisions on what to provide and what to hide. The +// underlying Kafka protocol gives more detailed information in responses, or +// allows more fine tuning in requests, but most of the time, these details are +// unnecessary. +// +// By virtue of making opinionated decisions, this package cannot satisfy every +// need for requests and responses. If you need more control than this admin +// client provides, you can use the kmsg package directly. +// +// This package contains a lot of types, but the main two types type to know +// are Client and ShardErrors. Every other type is used for inputs or outputs +// to methods on the client. +// +// The Client type is a simple small wrapper around a *kgo.Client that exists +// solely to namespace methods. The ShardErrors type is a bit more complicated. +// When issuing requests, under the hood some of these requests actually need +// to be mapped to brokers and split, issuing different pieces of the input +// request to different brokers. The *kgo.Client handles this all internally, +// but (if using RequestSharded as directed), returns each response to each of +// these split requests individually. Each response can fail or be successful. +// This package goes one step further and merges these failures into one meta +// failure, ShardErrors. Any function that returns ShardErrors is documented as +// such, and if a function returns a non-nil ShardErrors, it is possible that +// the returned data is actually valid and usable. If you care to, you can log +// / react to the partial failures and continue using the partial successful +// result. This is in contrast to other clients, which either require to to +// request individual brokers directly, or they completely hide individual +// failures, or they completely fail on any individual failure. +// +// For methods that list or describe things, this package often completely +// fails responses on auth failures. If you use a method that accepts two +// topics, one that you are authorized to and one that you are not, you will +// not receive a partial successful response. Instead, you will receive an +// AuthError. Methods that do *not* fail on auth errors are explicitly +// documented as such. +// +// Users may often find it easy to work with lists of topics or partitions. +// Rather than needing to build deeply nested maps directly, this package has a +// few helper types that are worth knowing: +// +// TopicsList - a slice of topics and their partitions +// TopicsSet - a set of topics, each containing a set of partitions +// Partitions - a slice of partitions +// OffsetsList - a slice of offsets +// Offsets - a map of offsets +// +// These types are meant to be easy to build and use, and can be used as the +// starting point for other types. +// +// Many functions in this package are variadic and return either a map or a +// list of responses, and you may only use one element as input and are only +// interested in one element of output. This package provides the following +// functions to help: +// +// Any(map) +// AnyE(map, err) +// First(slice) +// FirstE(slice, err) +// +// The intended use case of these is something like `kadm.AnyE(kadm.CreateTopics(..., "my-one-topic"))`, +// such that you can immediately get the response for the one topic you are +// creating. +package kadm + +import ( + "errors" + "regexp" + "runtime/debug" + "sort" + "sync" + + "github.com/twmb/franz-go/pkg/kgo" +) + +func unptrStr(s *string) string { + if s == nil { + return "" + } + return *s +} + +var ( + reVersion *regexp.Regexp + reVersionOnce sync.Once +) + +// Copied from kgo, but we use the kadm package version. +func softwareVersion() string { + info, ok := debug.ReadBuildInfo() + if ok { + reVersionOnce.Do(func() { reVersion = regexp.MustCompile(`^[a-zA-Z0-9](?:[a-zA-Z0-9.-]*[a-zA-Z0-9])?$`) }) + for _, dep := range info.Deps { + if dep.Path == "github.com/twmb/franz-go/pkg/kadm" { + if reVersion.MatchString(dep.Version) { + return dep.Version + } + } + } + } + return "unknown" +} + +// Client is an admin client. +// +// This is a simple wrapper around a *kgo.Client to provide helper admin methods. +type Client struct { + cl *kgo.Client + + timeoutMillis int32 +} + +// NewClient returns an admin client. +func NewClient(cl *kgo.Client) *Client { + return &Client{cl, 15000} // 15s timeout default, matching kmsg +} + +// NewOptClient returns a new client directly from kgo options. This is a +// wrapper around creating a new *kgo.Client and then creating an admin client. +func NewOptClient(opts ...kgo.Opt) (*Client, error) { + cl, err := kgo.NewClient(opts...) + if err != nil { + return nil, err + } + return NewClient(cl), nil +} + +// Close closes the underlying *kgo.Client. +func (cl *Client) Close() { + cl.cl.Close() +} + +// SetTimeoutMillis sets the timeout to use for requests that have a timeout, +// overriding the default of 15,000 (15s). +// +// Not all requests have timeouts. Most requests are expected to return +// immediately or are expected to deliberately hang. The following requests +// have timeout fields: +// +// Produce +// CreateTopics +// DeleteTopics +// DeleteRecords +// CreatePartitions +// ElectLeaders +// AlterPartitionAssignments +// ListPartitionReassignments +// UpdateFeatures +// +// Not all requests above are supported in the admin API. +func (cl *Client) SetTimeoutMillis(millis int32) { + cl.timeoutMillis = millis +} + +// StringPtr is a shortcut function to aid building configs for creating or +// altering topics. +func StringPtr(s string) *string { + return &s +} + +// BrokerDetail is a type alias for kgo.BrokerMetadata. +type BrokerDetail = kgo.BrokerMetadata + +// BrokerDetails contains the details for many brokers. +type BrokerDetails []BrokerDetail + +// NodeIDs returns the IDs of all nodes. +func (ds BrokerDetails) NodeIDs() []int32 { + var all []int32 + for _, d := range ds { + all = append(all, d.NodeID) + } + return int32s(all) +} + +// Partition is a partition for a topic. +type Partition struct { + Topic string // Topic is the topic for this partition. + Partition int32 // Partition is this partition's number. +} + +// Offset is an offset for a topic. +type Offset struct { + Topic string + Partition int32 + At int64 // Offset is the partition to set. + LeaderEpoch int32 // LeaderEpoch is the broker leader epoch of the record at this offset. + Metadata string // Metadata, if non-empty, is used for offset commits. +} + +// NewOffsetFromRecord is a helper to create an Offset for a given Record +func NewOffsetFromRecord(record *kgo.Record) Offset { + return Offset{ + Topic: record.Topic, + Partition: record.Partition, + At: record.Offset + 1, + LeaderEpoch: record.LeaderEpoch, + } +} + +// Partitions wraps many partitions. +type Partitions []Partition + +// TopicsSet returns these partitions as TopicsSet. +func (ps Partitions) TopicsSet() TopicsSet { + s := make(TopicsSet) + for _, p := range ps { + s.Add(p.Topic, p.Partition) + } + return s +} + +// TopicsList returns these partitions as sorted TopicsList. +func (ps Partitions) TopicsList() TopicsList { + return ps.TopicsSet().Sorted() +} + +// OffsetsList wraps many offsets and is a helper for building Offsets. +type OffsetsList []Offset + +// Offsets returns this list as the non-list Offsets. All fields in each +// Offset must be set properly. +func (l OffsetsList) Offsets() Offsets { + os := make(Offsets) + for _, o := range l { + os.Add(o) + } + return os +} + +// KOffsets returns this list as a kgo offset map. +func (l OffsetsList) KOffsets() map[string]map[int32]kgo.Offset { + return l.Offsets().KOffsets() +} + +// Offsets wraps many offsets and is the type used for offset functions. +type Offsets map[string]map[int32]Offset + +// Lookup returns the offset at t and p and whether it exists. +func (os Offsets) Lookup(t string, p int32) (Offset, bool) { + if len(os) == 0 { + return Offset{}, false + } + ps := os[t] + if len(ps) == 0 { + return Offset{}, false + } + o, exists := ps[p] + return o, exists +} + +// Add adds an offset for a given topic/partition to this Offsets map. +// +// If the partition already exists, the offset is only added if: +// +// - the new leader epoch is higher than the old, or +// - the leader epochs equal, and the new offset is higher than the old +// +// If you would like to add offsets forcefully no matter what, use the Delete +// method before this. +func (os *Offsets) Add(o Offset) { + if *os == nil { + *os = make(map[string]map[int32]Offset) + } + ot := (*os)[o.Topic] + if ot == nil { + ot = make(map[int32]Offset) + (*os)[o.Topic] = ot + } + + prior, exists := ot[o.Partition] + if !exists || prior.LeaderEpoch < o.LeaderEpoch || + prior.LeaderEpoch == o.LeaderEpoch && prior.At < o.At { + ot[o.Partition] = o + } +} + +// Delete removes any offset at topic t and partition p. +func (os Offsets) Delete(t string, p int32) { + if os == nil { + return + } + ot := os[t] + if ot == nil { + return + } + delete(ot, p) + if len(ot) == 0 { + delete(os, t) + } +} + +// AddOffset is a helper to add an offset for a given topic and partition. The +// leader epoch field must be -1 if you do not know the leader epoch or if +// you do not have an offset yet. +func (os *Offsets) AddOffset(t string, p int32, o int64, leaderEpoch int32) { + os.Add(Offset{ + Topic: t, + Partition: p, + At: o, + LeaderEpoch: leaderEpoch, + }) +} + +// KeepFunc calls fn for every offset, keeping the offset if fn returns true. +func (os Offsets) KeepFunc(fn func(o Offset) bool) { + for t, ps := range os { + for p, o := range ps { + if !fn(o) { + delete(ps, p) + } + } + if len(ps) == 0 { + delete(os, t) + } + } +} + +// DeleteFunc calls fn for every offset, deleting the offset if fn returns +// true. +func (os Offsets) DeleteFunc(fn func(o Offset) bool) { + os.KeepFunc(func(o Offset) bool { return !fn(o) }) +} + +// Topics returns the set of topics and partitions currently used in these +// offsets. +func (os Offsets) TopicsSet() TopicsSet { + s := make(TopicsSet) + os.Each(func(o Offset) { s.Add(o.Topic, o.Partition) }) + return s +} + +// Each calls fn for each offset in these offsets. +func (os Offsets) Each(fn func(Offset)) { + for _, ps := range os { + for _, o := range ps { + fn(o) + } + } +} + +// KOffsets returns these offsets as a kgo offset map. +func (os Offsets) KOffsets() map[string]map[int32]kgo.Offset { + tskgo := make(map[string]map[int32]kgo.Offset) + for t, ps := range os { + pskgo := make(map[int32]kgo.Offset) + for p, o := range ps { + pskgo[p] = kgo.NewOffset(). + At(o.At). + WithEpoch(o.LeaderEpoch) + } + tskgo[t] = pskgo + } + return tskgo +} + +// Sorted returns the offsets sorted by topic and partition. +func (os Offsets) Sorted() []Offset { + var s []Offset + os.Each(func(o Offset) { s = append(s, o) }) + sort.Slice(s, func(i, j int) bool { + return s[i].Topic < s[j].Topic || + s[i].Topic == s[j].Topic && s[i].Partition < s[j].Partition + }) + return s +} + +// OffsetsFromFetches returns Offsets for the final record in any partition in +// the fetches. This is a helper to enable committing an entire returned batch. +// +// This function looks at only the last record per partition, assuming that the +// last record is the highest offset (which is the behavior returned by kgo's +// Poll functions). The returned offsets are one past the offset contained in +// the records. +func OffsetsFromFetches(fs kgo.Fetches) Offsets { + os := make(Offsets) + fs.EachPartition(func(p kgo.FetchTopicPartition) { + if len(p.Records) == 0 { + return + } + r := p.Records[len(p.Records)-1] + os.Add(NewOffsetFromRecord(r)) + }) + return os +} + +// OffsetsFromRecords returns offsets for all given records, using the highest +// offset per partition. The returned offsets are one past the offset contained +// in the records. +func OffsetsFromRecords(rs ...kgo.Record) Offsets { + os := make(Offsets) + for _, r := range rs { + os.Add(NewOffsetFromRecord(&r)) + } + return os +} + +// TopicsSet is a set of topics and, per topic, a set of partitions. +// +// All methods provided for TopicsSet are safe to use on a nil (default) set. +type TopicsSet map[string]map[int32]struct{} + +// Lookup returns whether the topic and partition exists. +func (s TopicsSet) Lookup(t string, p int32) bool { + if len(s) == 0 { + return false + } + ps := s[t] + if len(ps) == 0 { + return false + } + _, exists := ps[p] + return exists +} + +// Each calls fn for each topic / partition in the topics set. +func (s TopicsSet) Each(fn func(t string, p int32)) { + for t, ps := range s { + for p := range ps { + fn(t, p) + } + } +} + +// EachPartitions calls fn for each topic and its partitions in the topics set. +func (s TopicsSet) EachPartitions(fn func(t string, ps []int32)) { + for t, ps := range s { + sliced := make([]int32, 0, len(ps)) + for p := range ps { + sliced = append(sliced, p) + } + fn(t, sliced) + } +} + +// EmptyTopics returns all topics with no partitions. +func (s TopicsSet) EmptyTopics() []string { + var e []string + for t, ps := range s { + if len(ps) == 0 { + e = append(e, t) + } + } + return e +} + +// Add adds partitions for a topic to the topics set. If no partitions are +// added, this still creates the topic. +func (s *TopicsSet) Add(t string, ps ...int32) { + if *s == nil { + *s = make(map[string]map[int32]struct{}) + } + existing := (*s)[t] + if existing == nil { + existing = make(map[int32]struct{}, len(ps)) + (*s)[t] = existing + } + for _, p := range ps { + existing[p] = struct{}{} + } +} + +// Delete removes partitions from a topic from the topics set. If the topic +// ends up with no partitions, the topic is removed from the set. +func (s TopicsSet) Delete(t string, ps ...int32) { + if s == nil || len(ps) == 0 { + return + } + existing := s[t] + if existing == nil { + return + } + for _, p := range ps { + delete(existing, p) + } + if len(existing) == 0 { + delete(s, t) + } +} + +// Topics returns all topics in this set in sorted order. +func (s TopicsSet) Topics() []string { + ts := make([]string, 0, len(s)) + for t := range s { + ts = append(ts, t) + } + sort.Strings(ts) + return ts +} + +// Merge merges another topic set into this one. +func (s TopicsSet) Merge(other TopicsSet) { + for t, ps := range other { + for p := range ps { + s.Add(t, p) + } + } +} + +// IntoList returns this set as a list. +func (s TopicsSet) IntoList() TopicsList { + l := make(TopicsList, 0, len(s)) + for t, ps := range s { + lps := make([]int32, 0, len(ps)) + for p := range ps { + lps = append(lps, p) + } + l = append(l, TopicPartitions{ + Topic: t, + Partitions: lps, + }) + } + return l +} + +// Sorted returns this set as a list in topic-sorted order, with each topic +// having sorted partitions. +func (s TopicsSet) Sorted() TopicsList { + l := make(TopicsList, 0, len(s)) + for t, ps := range s { + tps := TopicPartitions{ + Topic: t, + Partitions: make([]int32, 0, len(ps)), + } + for p := range ps { + tps.Partitions = append(tps.Partitions, p) + } + tps.Partitions = int32s(tps.Partitions) + l = append(l, tps) + } + sort.Slice(l, func(i, j int) bool { return l[i].Topic < l[j].Topic }) + return l +} + +// TopicPartitions is a topic and partitions. +type TopicPartitions struct { + Topic string + Partitions []int32 +} + +// TopicsList is a list of topics and partitions. +type TopicsList []TopicPartitions + +// Each calls fn for each topic / partition in the topics list. +func (l TopicsList) Each(fn func(t string, p int32)) { + for _, t := range l { + for _, p := range t.Partitions { + fn(t.Topic, p) + } + } +} + +// EachPartitions calls fn for each topic and its partitions in the topics +// list. +func (l TopicsList) EachPartitions(fn func(t string, ps []int32)) { + for _, t := range l { + fn(t.Topic, t.Partitions) + } +} + +// EmptyTopics returns all topics with no partitions. +func (l TopicsList) EmptyTopics() []string { + var e []string + for _, t := range l { + if len(t.Partitions) == 0 { + e = append(e, t.Topic) + } + } + return e +} + +// Topics returns all topics in this set in sorted order. +func (l TopicsList) Topics() []string { + ts := make([]string, 0, len(l)) + for _, t := range l { + ts = append(ts, t.Topic) + } + sort.Strings(ts) + return ts +} + +// IntoSet returns this list as a set. +func (l TopicsList) IntoSet() TopicsSet { + s := make(TopicsSet) + for _, t := range l { + s.Add(t.Topic, t.Partitions...) + } + return s +} + +// First returns the first element of the input slice and whether it exists. +// This is the non-error-accepting equivalent of FirstE. +// +// Many client methods in kadm accept a variadic amount of input arguments and +// return either a slice or a map of responses, but you often use the method +// with only one argument. This function can help extract the one response you +// are interested in. +func First[S ~[]T, T any](s S) (T, bool) { + if len(s) == 0 { + var t T + return t, false + } + return s[0], true +} + +// Any returns the first range element of the input map and whether it exists. +// This is the non-error-accepting equivalent of AnyE. +// +// Many client methods in kadm accept a variadic amount of input arguments and +// return either a slice or a map of responses, but you often use the method +// with only one argument. This function can help extract the one response you +// are interested in. +func Any[M ~map[K]V, K comparable, V any](m M) (V, bool) { + for _, v := range m { + return v, true + } + var v V + return v, false +} + +// ErrEmpty is returned from FirstE or AnyE if the input is empty. +var ErrEmpty = errors.New("empty") + +// FirstE returns the first element of the input slice, or the input error +// if it is non-nil. If the error is nil but the slice is empty, this returns +// ErrEmpty. This is the error-accepting equivalent of First. +// +// Many client methods in kadm accept a variadic amount of input arguments and +// return either a slice or a map of responses, but you often use the method +// with only one argument. This function can help extract the one response you +// are interested in. +func FirstE[S ~[]T, T any](s S, err error) (T, error) { + if err != nil { + var t T + return t, err + } + if len(s) == 0 { + var t T + return t, ErrEmpty + } + return s[0], err +} + +// AnyE returns the first range element of the input map, or the input error if +// it is non-nil. If the error is nil but the map is empty, this returns +// ErrEmpty. This is the error-accepting equivalent of Any. +// +// Many client methods in kadm accept a variadic amount of input arguments and +// return either a slice or a map of responses, but you often use the method +// with only one argument. This function can help extract the one response you +// are interested in. +func AnyE[M ~map[K]V, K comparable, V any](m M, err error) (V, error) { + if err != nil { + var v V + return v, err + } + for _, v := range m { + return v, nil + } + var v V + return v, ErrEmpty +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kadm/logdirs.go b/vendor/github.com/twmb/franz-go/pkg/kadm/logdirs.go new file mode 100644 index 00000000000..c1487cbcce7 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kadm/logdirs.go @@ -0,0 +1,592 @@ +package kadm + +import ( + "context" + "sort" + + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" +) + +// AlterReplicaLogDirsReq is the input for a request to alter replica log +// directories. The key is the directory that all topics and partitions in +// the topic set will move to. +type AlterReplicaLogDirsReq map[string]TopicsSet + +// Add merges the input topic set into the given directory. +func (r *AlterReplicaLogDirsReq) Add(d string, s TopicsSet) { + if *r == nil { + *r = make(map[string]TopicsSet) + } + existing := (*r)[d] + if existing == nil { + existing = make(TopicsSet) + (*r)[d] = existing + } + existing.Merge(s) +} + +func (r AlterReplicaLogDirsReq) req() *kmsg.AlterReplicaLogDirsRequest { + req := kmsg.NewPtrAlterReplicaLogDirsRequest() + for dir, ts := range r { + rd := kmsg.NewAlterReplicaLogDirsRequestDir() + rd.Dir = dir + for t, ps := range ts { + rt := kmsg.NewAlterReplicaLogDirsRequestDirTopic() + rt.Topic = t + for p := range ps { + rt.Partitions = append(rt.Partitions, p) + } + rd.Topics = append(rd.Topics, rt) + } + req.Dirs = append(req.Dirs, rd) + } + return req +} + +func (r AlterReplicaLogDirsReq) dirfor(t string, p int32) string { + for d, dts := range r { + if dts == nil { + continue + } + dtps, ok := dts[t] // does this dir contain this topic? + if !ok { + continue + } + if _, ok = dtps[p]; !ok { // does this topic in this dir contain this partition? + continue + } + return d // yes + } + return "" +} + +// AlterAllReplicaLogDirsResponses contains per-broker responses to altered +// partition directories. +type AlterAllReplicaLogDirsResponses map[int32]AlterReplicaLogDirsResponses + +// Sorted returns the responses sorted by broker, topic, and partition. +func (rs AlterAllReplicaLogDirsResponses) Sorted() []AlterReplicaLogDirsResponse { + var all []AlterReplicaLogDirsResponse + rs.Each(func(r AlterReplicaLogDirsResponse) { + all = append(all, r) + }) + sort.Slice(all, func(i, j int) bool { return all[i].Less(all[j]) }) + return all +} + +// Each calls fn for every response. +func (rs AlterAllReplicaLogDirsResponses) Each(fn func(AlterReplicaLogDirsResponse)) { + for _, ts := range rs { + ts.Each(fn) + } +} + +// AlterReplicaLogDirsResponses contains responses to altered partition +// directories for a single broker. +type AlterReplicaLogDirsResponses map[string]map[int32]AlterReplicaLogDirsResponse + +// Sorted returns the responses sorted by topic and partition. +func (rs AlterReplicaLogDirsResponses) Sorted() []AlterReplicaLogDirsResponse { + var all []AlterReplicaLogDirsResponse + rs.Each(func(r AlterReplicaLogDirsResponse) { + all = append(all, r) + }) + sort.Slice(all, func(i, j int) bool { return all[i].Less(all[j]) }) + return all +} + +// Each calls fn for every response. +func (rs AlterReplicaLogDirsResponses) Each(fn func(AlterReplicaLogDirsResponse)) { + for _, ps := range rs { + for _, r := range ps { + fn(r) + } + } +} + +// AlterReplicaLogDirsResponse contains a the response for an individual +// altered partition directory. +type AlterReplicaLogDirsResponse struct { + Broker int32 // Broker is the broker this response came from. + Dir string // Dir is the directory this partition was requested to be moved to. + Topic string // Topic is the topic for this partition. + Partition int32 // Partition is the partition that was moved. + Err error // Err is non-nil if this move had an error. +} + +// Less returns if the response is less than the other by broker, dir, topic, +// and partition. +func (a AlterReplicaLogDirsResponse) Less(other AlterReplicaLogDirsResponse) bool { + if a.Broker < other.Broker { + return true + } + if a.Broker > other.Broker { + return false + } + if a.Dir < other.Dir { + return true + } + if a.Dir > other.Dir { + return false + } + if a.Topic < other.Topic { + return true + } + if a.Topic > other.Topic { + return false + } + return a.Partition < other.Partition +} + +func newAlterLogDirsResp(node int32, req AlterReplicaLogDirsReq, resp *kmsg.AlterReplicaLogDirsResponse) AlterReplicaLogDirsResponses { + a := make(AlterReplicaLogDirsResponses) + for _, kt := range resp.Topics { + ps := make(map[int32]AlterReplicaLogDirsResponse) + a[kt.Topic] = ps + for _, kp := range kt.Partitions { + ps[kp.Partition] = AlterReplicaLogDirsResponse{ + Broker: node, + Dir: req.dirfor(kt.Topic, kp.Partition), + Topic: kt.Topic, + Partition: kp.Partition, + Err: kerr.ErrorForCode(kp.ErrorCode), + } + } + } + return a +} + +// AlterAllReplicaLogDirs alters the log directories for the input topic +// partitions, moving each partition to the requested directory. This function +// moves all replicas on any broker. +// +// This may return *ShardErrors. +func (cl *Client) AlterAllReplicaLogDirs(ctx context.Context, alter AlterReplicaLogDirsReq) (AlterAllReplicaLogDirsResponses, error) { + if len(alter) == 0 { + return make(AlterAllReplicaLogDirsResponses), nil + } + req := alter.req() + shards := cl.cl.RequestSharded(ctx, req) + resps := make(AlterAllReplicaLogDirsResponses) + return resps, shardErrEachBroker(req, shards, func(b BrokerDetail, kr kmsg.Response) error { + resp := kr.(*kmsg.AlterReplicaLogDirsResponse) + resps[b.NodeID] = newAlterLogDirsResp(b.NodeID, alter, resp) // one node ID, no need to unique-check + return nil + }) +} + +// AlterBrokerReplicaLogDirs alters the log directories for the input topic on the +// given broker, moving each partition to the requested directory. +func (cl *Client) AlterBrokerReplicaLogDirs(ctx context.Context, broker int32, alter AlterReplicaLogDirsReq) (AlterReplicaLogDirsResponses, error) { + if len(alter) == 0 { + return make(AlterReplicaLogDirsResponses), nil + } + b := cl.cl.Broker(int(broker)) + kresp, err := b.RetriableRequest(ctx, alter.req()) + if err != nil { + return nil, err + } + resp := kresp.(*kmsg.AlterReplicaLogDirsResponse) + return newAlterLogDirsResp(broker, alter, resp), nil +} + +func describeLogDirsReq(s TopicsSet) *kmsg.DescribeLogDirsRequest { + req := kmsg.NewPtrDescribeLogDirsRequest() + for t, ps := range s { + rt := kmsg.NewDescribeLogDirsRequestTopic() + rt.Topic = t + for p := range ps { + rt.Partitions = append(rt.Partitions, p) + } + req.Topics = append(req.Topics, rt) + } + return req +} + +// DescribedAllLogDirs contains per-broker responses to described log +// directories. +type DescribedAllLogDirs map[int32]DescribedLogDirs + +// Sorted returns each log directory sorted by broker, then by directory. +func (ds DescribedAllLogDirs) Sorted() []DescribedLogDir { + var all []DescribedLogDir + ds.Each(func(d DescribedLogDir) { + all = append(all, d) + }) + sort.Slice(all, func(i, j int) bool { + l, r := all[i], all[j] + return l.Broker < r.Broker || l.Broker == r.Broker && l.Dir < r.Dir + }) + return all +} + +// Each calls fn for every described log dir in all responses. +func (ds DescribedAllLogDirs) Each(fn func(DescribedLogDir)) { + for _, bds := range ds { + bds.Each(fn) + } +} + +// DescribedLogDirs contains per-directory responses to described log +// directories for a single broker. +type DescribedLogDirs map[string]DescribedLogDir + +// Lookup returns the described partition if it exists. +func (ds DescribedLogDirs) Lookup(d, t string, p int32) (DescribedLogDirPartition, bool) { + dir, exists := ds[d] + if !exists { + return DescribedLogDirPartition{}, false + } + ps, exists := dir.Topics[t] + if !exists { + return DescribedLogDirPartition{}, false + } + dp, exists := ps[p] + if !exists { + return DescribedLogDirPartition{}, false + } + return dp, true +} + +// LookupPartition returns the described partition if it exists in any +// directory. Brokers should only have one replica of a partition, so this +// should always find at most one partition. +func (ds DescribedLogDirs) LookupPartition(t string, p int32) (DescribedLogDirPartition, bool) { + for _, dir := range ds { + ps, exists := dir.Topics[t] + if !exists { + continue + } + dp, exists := ps[p] + if !exists { + continue + } + return dp, true + } + return DescribedLogDirPartition{}, false +} + +// Size returns the total size of all directories. +func (ds DescribedLogDirs) Size() int64 { + var tot int64 + ds.EachPartition(func(d DescribedLogDirPartition) { + tot += d.Size + }) + return tot +} + +// Error iterates over all directories and returns the first error encounted, +// if any. This can be used to check if describing was entirely successful or +// not. +func (ds DescribedLogDirs) Error() error { + for _, d := range ds { + if d.Err != nil { + return d.Err + } + } + return nil +} + +// Ok returns true if there are no errors. This is a shortcut for ds.Error() == +// nil. +func (ds DescribedLogDirs) Ok() bool { + return ds.Error() == nil +} + +// Sorted returns all directories sorted by dir. +func (ds DescribedLogDirs) Sorted() []DescribedLogDir { + var all []DescribedLogDir + ds.Each(func(d DescribedLogDir) { + all = append(all, d) + }) + sort.Slice(all, func(i, j int) bool { + l, r := all[i], all[j] + return l.Broker < r.Broker || l.Broker == r.Broker && l.Dir < r.Dir + }) + return all +} + +// SortedPartitions returns all partitions sorted by dir, then topic, then +// partition. +func (ds DescribedLogDirs) SortedPartitions() []DescribedLogDirPartition { + var all []DescribedLogDirPartition + ds.EachPartition(func(d DescribedLogDirPartition) { + all = append(all, d) + }) + sort.Slice(all, func(i, j int) bool { return all[i].Less(all[j]) }) + return all +} + +// SortedBySize returns all directories sorted from smallest total directory +// size to largest. +func (ds DescribedLogDirs) SortedBySize() []DescribedLogDir { + var all []DescribedLogDir + ds.Each(func(d DescribedLogDir) { + all = append(all, d) + }) + sort.Slice(all, func(i, j int) bool { + l, r := all[i], all[j] + ls, rs := l.Size(), r.Size() + return ls < rs || ls == rs && + (l.Broker < r.Broker || l.Broker == r.Broker && + l.Dir < r.Dir) + }) + return all +} + +// SortedPartitionsBySize returns all partitions across all directories sorted +// by smallest to largest, falling back to by broker, dir, topic, and +// partition. +func (ds DescribedLogDirs) SortedPartitionsBySize() []DescribedLogDirPartition { + var all []DescribedLogDirPartition + ds.EachPartition(func(d DescribedLogDirPartition) { + all = append(all, d) + }) + sort.Slice(all, func(i, j int) bool { return all[i].LessBySize(all[j]) }) + return all +} + +// SmallestPartitionBySize returns the smallest partition by directory size, or +// no partition if there are no partitions. +func (ds DescribedLogDirs) SmallestPartitionBySize() (DescribedLogDirPartition, bool) { + sorted := ds.SortedPartitionsBySize() + if len(sorted) == 0 { + return DescribedLogDirPartition{}, false + } + return sorted[0], true +} + +// LargestPartitionBySize returns the largest partition by directory size, or +// no partition if there are no partitions. +func (ds DescribedLogDirs) LargestPartitionBySize() (DescribedLogDirPartition, bool) { + sorted := ds.SortedPartitionsBySize() + if len(sorted) == 0 { + return DescribedLogDirPartition{}, false + } + return sorted[len(sorted)-1], true +} + +// Each calls fn for each log directory. +func (ds DescribedLogDirs) Each(fn func(DescribedLogDir)) { + for _, d := range ds { + fn(d) + } +} + +// Each calls fn for each partition in any directory. +func (ds DescribedLogDirs) EachPartition(fn func(d DescribedLogDirPartition)) { + for _, d := range ds { + d.Topics.Each(fn) + } +} + +// EachError calls fn for every directory that has a non-nil error. +func (ds DescribedLogDirs) EachError(fn func(DescribedLogDir)) { + for _, d := range ds { + if d.Err != nil { + fn(d) + } + } +} + +// DescribedLogDir is a described log directory. +type DescribedLogDir struct { + Broker int32 // Broker is the broker being described. + Dir string // Dir is the described directory. + Topics DescribedLogDirTopics // Partitions are the partitions in this directory. + Err error // Err is non-nil if this directory could not be described. +} + +// Size returns the total size of all partitions in this directory. This is +// a shortcut for .Topics.Size(). +func (ds DescribedLogDir) Size() int64 { + return ds.Topics.Size() +} + +// DescribedLogDirTopics contains per-partition described log directories. +type DescribedLogDirTopics map[string]map[int32]DescribedLogDirPartition + +// Lookup returns the described partition if it exists. +func (ds DescribedLogDirTopics) Lookup(t string, p int32) (DescribedLogDirPartition, bool) { + ps, exists := ds[t] + if !exists { + return DescribedLogDirPartition{}, false + } + d, exists := ps[p] + return d, exists +} + +// Size returns the total size of all partitions in this directory. +func (ds DescribedLogDirTopics) Size() int64 { + var tot int64 + ds.Each(func(d DescribedLogDirPartition) { + tot += d.Size + }) + return tot +} + +// Sorted returns all partitions sorted by topic then partition. +func (ds DescribedLogDirTopics) Sorted() []DescribedLogDirPartition { + var all []DescribedLogDirPartition + ds.Each(func(d DescribedLogDirPartition) { + all = append(all, d) + }) + sort.Slice(all, func(i, j int) bool { return all[i].Less(all[j]) }) + return all +} + +// SortedBySize returns all partitions sorted by smallest size to largest. If +// partitions are of equal size, the sorting is topic then partition. +func (ds DescribedLogDirTopics) SortedBySize() []DescribedLogDirPartition { + var all []DescribedLogDirPartition + ds.Each(func(d DescribedLogDirPartition) { + all = append(all, d) + }) + sort.Slice(all, func(i, j int) bool { return all[i].LessBySize(all[j]) }) + return all +} + +// Each calls fn for every partition. +func (ds DescribedLogDirTopics) Each(fn func(p DescribedLogDirPartition)) { + for _, ps := range ds { + for _, d := range ps { + fn(d) + } + } +} + +// DescribedLogDirPartition is the information for a single partitions described +// log directory. +type DescribedLogDirPartition struct { + Broker int32 // Broker is the broker this partition is on. + Dir string // Dir is the directory this partition lives in. + Topic string // Topic is the topic for this partition. + Partition int32 // Partition is this partition. + Size int64 // Size is the total size of the log segments of this partition, in bytes. + + // OffsetLag is how far behind the log end offset this partition is. + // The math is: + // + // if IsFuture { + // logEndOffset - futureLogEndOffset + // } else { + // max(highWaterMark - logEndOffset) + // } + // + OffsetLag int64 + // IsFuture is true if this replica was created by an + // AlterReplicaLogDirsRequest and will replace the current log of the + // replica in the future. + IsFuture bool +} + +// Less returns if one dir partition is less than the other, by dir, topic, +// partition, and size. +func (p DescribedLogDirPartition) Less(other DescribedLogDirPartition) bool { + if p.Broker < other.Broker { + return true + } + if p.Broker > other.Broker { + return false + } + if p.Dir < other.Dir { + return true + } + if p.Dir > other.Dir { + return false + } + if p.Topic < other.Topic { + return true + } + if p.Topic > other.Topic { + return false + } + if p.Partition < other.Partition { + return true + } + if p.Partition > other.Partition { + return false + } + return p.Size < other.Size +} + +// LessBySize returns if one dir partition is less than the other by size, +// otherwise by normal Less semantics. +func (p DescribedLogDirPartition) LessBySize(other DescribedLogDirPartition) bool { + if p.Size < other.Size { + return true + } + return p.Less(other) +} + +func newDescribeLogDirsResp(node int32, resp *kmsg.DescribeLogDirsResponse) DescribedLogDirs { + ds := make(DescribedLogDirs) + for _, rd := range resp.Dirs { + d := DescribedLogDir{ + Broker: node, + Dir: rd.Dir, + Topics: make(DescribedLogDirTopics), + Err: kerr.ErrorForCode(rd.ErrorCode), + } + for _, rt := range rd.Topics { + t := make(map[int32]DescribedLogDirPartition) + d.Topics[rt.Topic] = t + for _, rp := range rt.Partitions { + t[rp.Partition] = DescribedLogDirPartition{ + Broker: node, + Dir: rd.Dir, + Topic: rt.Topic, + Partition: rp.Partition, + Size: rp.Size, + OffsetLag: rp.OffsetLag, + IsFuture: rp.IsFuture, + } + } + } + ds[rd.Dir] = d + } + return ds +} + +// DescribeAllLogDirs describes the log directores for every input topic +// partition on every broker. If the input set is nil, this describes all log +// directories. +// +// This may return *ShardErrors. +func (cl *Client) DescribeAllLogDirs(ctx context.Context, s TopicsSet) (DescribedAllLogDirs, error) { + req := describeLogDirsReq(s) + shards := cl.cl.RequestSharded(ctx, req) + resps := make(DescribedAllLogDirs) + return resps, shardErrEachBroker(req, shards, func(b BrokerDetail, kr kmsg.Response) error { + resp := kr.(*kmsg.DescribeLogDirsResponse) + if err := maybeAuthErr(resp.ErrorCode); err != nil { + return err + } + if err := kerr.ErrorForCode(resp.ErrorCode); err != nil { + return err + } + resps[b.NodeID] = newDescribeLogDirsResp(b.NodeID, resp) // one node ID, no need to unique-check + return nil + }) +} + +// DescribeBrokerLogDirs describes the log directories for the input topic +// partitions on the given broker. If the input set is nil, this describes all +// log directories. +func (cl *Client) DescribeBrokerLogDirs(ctx context.Context, broker int32, s TopicsSet) (DescribedLogDirs, error) { + req := describeLogDirsReq(s) + b := cl.cl.Broker(int(broker)) + kresp, err := b.RetriableRequest(ctx, req) + if err != nil { + return nil, err + } + resp := kresp.(*kmsg.DescribeLogDirsResponse) + if err := maybeAuthErr(resp.ErrorCode); err != nil { + return nil, err + } + if err := kerr.ErrorForCode(resp.ErrorCode); err != nil { + return nil, err + } + return newDescribeLogDirsResp(broker, resp), nil +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kadm/metadata.go b/vendor/github.com/twmb/franz-go/pkg/kadm/metadata.go new file mode 100644 index 00000000000..f2797186ee6 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kadm/metadata.go @@ -0,0 +1,518 @@ +package kadm + +import ( + "bytes" + "context" + "encoding/base64" + "fmt" + "sort" + + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kgo" + "github.com/twmb/franz-go/pkg/kmsg" +) + +// TopicID is the 16 byte underlying topic ID. +type TopicID [16]byte + +// String returns the topic ID encoded as base64. +func (t TopicID) String() string { return base64.StdEncoding.EncodeToString(t[:]) } + +// MarshalJSON returns the topic ID encoded as quoted base64. +func (t TopicID) MarshalJSON() ([]byte, error) { return []byte(`"` + t.String() + `"`), nil } + +// Less returns if this ID is less than the other, byte by byte. +func (t TopicID) Less(other TopicID) bool { + return bytes.Compare(t[:], other[:]) == -1 +} + +// PartitionDetail is the detail of a partition as returned by a metadata +// response. If the partition fails to load / has an error, then only the +// partition number itself and the Err fields will be set. +type PartitionDetail struct { + Topic string // Topic is the topic this partition belongs to. + Partition int32 // Partition is the partition number these details are for. + + Leader int32 // Leader is the broker leader, if there is one, otherwise -1. + LeaderEpoch int32 // LeaderEpoch is the leader's current epoch. + Replicas []int32 // Replicas is the list of replicas. + ISR []int32 // ISR is the list of in sync replicas. + OfflineReplicas []int32 // OfflineReplicas is the list of offline replicas. + + Err error // Err is non-nil if the partition currently has a load error. +} + +// PartitionDetails contains details for partitions as returned by a metadata +// response. +type PartitionDetails map[int32]PartitionDetail + +// Sorted returns the partitions in sorted order. +func (ds PartitionDetails) Sorted() []PartitionDetail { + s := make([]PartitionDetail, 0, len(ds)) + for _, d := range ds { + s = append(s, d) + } + sort.Slice(s, func(i, j int) bool { return s[i].Partition < s[j].Partition }) + return s +} + +// Numbers returns a sorted list of all partition numbers. +func (ds PartitionDetails) Numbers() []int32 { + all := make([]int32, 0, len(ds)) + for p := range ds { + all = append(all, p) + } + return int32s(all) +} + +// NumReplicas returns the number of replicas for these partitions +// +// It is assumed that all partitions have the same number of replicas, so this +// simply returns the number of replicas in the first encountered partition. +func (ds PartitionDetails) NumReplicas() int { + for _, p := range ds { + return len(p.Replicas) + } + return 0 +} + +// TopicDetail is the detail of a topic as returned by a metadata response. If +// the topic fails to load / has an error, then there will be no partitions. +type TopicDetail struct { + Topic string // Topic is the topic these details are for. + + ID TopicID // TopicID is the topic's ID, or all 0 if the broker does not support IDs. + IsInternal bool // IsInternal is whether the topic is an internal topic. + Partitions PartitionDetails // Partitions contains details about the topic's partitions. + + Err error // Err is non-nil if the topic could not be loaded. +} + +// TopicDetails contains details for topics as returned by a metadata response. +type TopicDetails map[string]TopicDetail + +// Topics returns a sorted list of all topic names. +func (ds TopicDetails) Names() []string { + all := make([]string, 0, len(ds)) + for t := range ds { + all = append(all, t) + } + sort.Strings(all) + return all +} + +// Sorted returns all topics in sorted order. +func (ds TopicDetails) Sorted() []TopicDetail { + s := make([]TopicDetail, 0, len(ds)) + for _, d := range ds { + s = append(s, d) + } + sort.Slice(s, func(i, j int) bool { + if s[i].Topic == "" { + if s[j].Topic == "" { + return bytes.Compare(s[i].ID[:], s[j].ID[:]) == -1 + } + return true + } + if s[j].Topic == "" { + return false + } + return s[i].Topic < s[j].Topic + }) + return s +} + +// Has returns whether the topic details has the given topic and, if so, that +// the topic's load error is not an unknown topic error. +func (ds TopicDetails) Has(topic string) bool { + d, ok := ds[topic] + return ok && d.Err != kerr.UnknownTopicOrPartition +} + +// FilterInternal deletes any internal topics from this set of topic details. +func (ds TopicDetails) FilterInternal() { + for t, d := range ds { + if d.IsInternal { + delete(ds, t) + } + } +} + +// EachPartition calls fn for every partition in all topics. +func (ds TopicDetails) EachPartition(fn func(PartitionDetail)) { + for _, td := range ds { + for _, d := range td.Partitions { + fn(d) + } + } +} + +// EachError calls fn for each topic that could not be loaded. +func (ds TopicDetails) EachError(fn func(TopicDetail)) { + for _, td := range ds { + if td.Err != nil { + fn(td) + } + } +} + +// Error iterates over all topic details and returns the first error +// encountered, if any. +func (ds TopicDetails) Error() error { + for _, t := range ds { + if t.Err != nil { + return t.Err + } + } + return nil +} + +// TopicsSet returns the topics and partitions as a set. +func (ds TopicDetails) TopicsSet() TopicsSet { + var s TopicsSet + ds.EachPartition(func(d PartitionDetail) { + s.Add(d.Topic, d.Partition) + }) + return s +} + +// TopicsList returns the topics and partitions as a list. +func (ds TopicDetails) TopicsList() TopicsList { + return ds.TopicsSet().Sorted() +} + +// Metadata is the data from a metadata response. +type Metadata struct { + Cluster string // Cluster is the cluster name, if any. + Controller int32 // Controller is the node ID of the controller broker, if available, otherwise -1. + Brokers BrokerDetails // Brokers contains broker details, sorted by default. + Topics TopicDetails // Topics contains topic details. +} + +func int32s(is []int32) []int32 { + sort.Slice(is, func(i, j int) bool { return is[i] < is[j] }) + return is +} + +// ListBrokers issues a metadata request and returns BrokerDetails. This +// returns an error if the request fails to be issued, or an *AuthError. +func (cl *Client) ListBrokers(ctx context.Context) (BrokerDetails, error) { + m, err := cl.Metadata(ctx) + if err != nil { + return nil, err + } + return m.Brokers, nil +} + +// BrokerMetadata issues a metadata request and returns it, and does not ask +// for any topics. +// +// This returns an error if the request fails to be issued, or an *AuthErr. +func (cl *Client) BrokerMetadata(ctx context.Context) (Metadata, error) { + return cl.metadata(ctx, true, nil) +} + +// Metadata issues a metadata request and returns it. Specific topics to +// describe can be passed as additional arguments. If no topics are specified, +// all topics are requested. +// +// This returns an error if the request fails to be issued, or an *AuthErr. +func (cl *Client) Metadata( + ctx context.Context, + topics ...string, +) (Metadata, error) { + return cl.metadata(ctx, false, topics) +} + +func (cl *Client) metadata(ctx context.Context, noTopics bool, topics []string) (Metadata, error) { + req := kmsg.NewPtrMetadataRequest() + for _, t := range topics { + rt := kmsg.NewMetadataRequestTopic() + rt.Topic = kmsg.StringPtr(t) + req.Topics = append(req.Topics, rt) + } + if noTopics { + req.Topics = []kmsg.MetadataRequestTopic{} + } + resp, err := req.RequestWith(ctx, cl.cl) + if err != nil { + return Metadata{}, err + } + + tds := make(map[string]TopicDetail, len(resp.Topics)) + for _, t := range resp.Topics { + if err := maybeAuthErr(t.ErrorCode); err != nil { + return Metadata{}, err + } + td := TopicDetail{ + ID: t.TopicID, + Partitions: make(map[int32]PartitionDetail), + IsInternal: t.IsInternal, + Err: kerr.ErrorForCode(t.ErrorCode), + } + if t.Topic != nil { + td.Topic = *t.Topic + } + for _, p := range t.Partitions { + td.Partitions[p.Partition] = PartitionDetail{ + Topic: td.Topic, + Partition: p.Partition, + + Leader: p.Leader, + LeaderEpoch: p.LeaderEpoch, + Replicas: p.Replicas, + ISR: p.ISR, + OfflineReplicas: p.OfflineReplicas, + + Err: kerr.ErrorForCode(p.ErrorCode), + } + } + tds[*t.Topic] = td + } + + m := Metadata{ + Controller: resp.ControllerID, + Topics: tds, + } + if resp.ClusterID != nil { + m.Cluster = *resp.ClusterID + } + + for _, b := range resp.Brokers { + m.Brokers = append(m.Brokers, kgo.BrokerMetadata{ + NodeID: b.NodeID, + Host: b.Host, + Port: b.Port, + Rack: b.Rack, + }) + } + sort.Slice(m.Brokers, func(i, j int) bool { return m.Brokers[i].NodeID < m.Brokers[j].NodeID }) + + if len(topics) > 0 && len(m.Topics) != len(topics) { + return Metadata{}, fmt.Errorf("metadata returned only %d topics of %d requested", len(m.Topics), len(topics)) + } + + return m, nil +} + +// ListedOffset contains record offset information. +type ListedOffset struct { + Topic string // Topic is the topic this offset is for. + Partition int32 // Partition is the partition this offset is for. + + Timestamp int64 // Timestamp is the millisecond of the offset if listing after a time, otherwise -1. + Offset int64 // Offset is the record offset, or -1 if one could not be found. + LeaderEpoch int32 // LeaderEpoch is the leader epoch at this offset, if any, otherwise -1. + + Err error // Err is non-nil if the partition has a load error. +} + +// ListedOffsets contains per-partition record offset information that is +// returned from any of the List.*Offsets functions. +type ListedOffsets map[string]map[int32]ListedOffset + +// Lookup returns the offset at t and p and whether it exists. +func (l ListedOffsets) Lookup(t string, p int32) (ListedOffset, bool) { + if len(l) == 0 { + return ListedOffset{}, false + } + ps := l[t] + if len(ps) == 0 { + return ListedOffset{}, false + } + o, exists := ps[p] + return o, exists +} + +// Each calls fn for each listed offset. +func (l ListedOffsets) Each(fn func(ListedOffset)) { + for _, ps := range l { + for _, o := range ps { + fn(o) + } + } +} + +// Error iterates over all offsets and returns the first error encountered, if +// any. This can be to check if a listing was entirely successful or not. +// +// Note that offset listing can be partially successful. For example, some +// offsets could succeed to be listed, while other could fail (maybe one +// partition is offline). If this is something you need to worry about, you may +// need to check all offsets manually. +func (l ListedOffsets) Error() error { + for _, ps := range l { + for _, o := range ps { + if o.Err != nil { + return o.Err + } + } + } + return nil +} + +// Offsets returns these listed offsets as offsets. +func (l ListedOffsets) Offsets() Offsets { + o := make(Offsets) + l.Each(func(l ListedOffset) { + o.Add(Offset{ + Topic: l.Topic, + Partition: l.Partition, + At: l.Offset, + LeaderEpoch: l.LeaderEpoch, + }) + }) + return o +} + +// KOffsets returns these listed offsets as a kgo offset map. +func (l ListedOffsets) KOffsets() map[string]map[int32]kgo.Offset { + return l.Offsets().KOffsets() +} + +// ListStartOffsets returns the start (oldest) offsets for each partition in +// each requested topic. In Kafka terms, this returns the log start offset. If +// no topics are specified, all topics are listed. If a requested topic does +// not exist, no offsets for it are listed and it is not present in the +// response. +// +// If any topics being listed do not exist, a special -1 partition is added +// to the response with the expected error code kerr.UnknownTopicOrPartition. +// +// This may return *ShardErrors. +func (cl *Client) ListStartOffsets(ctx context.Context, topics ...string) (ListedOffsets, error) { + return cl.listOffsets(ctx, 0, -2, topics) +} + +// ListEndOffsets returns the end (newest) offsets for each partition in each +// requested topic. In Kafka terms, this returns high watermarks. If no topics +// are specified, all topics are listed. If a requested topic does not exist, +// no offsets for it are listed and it is not present in the response. +// +// If any topics being listed do not exist, a special -1 partition is added +// to the response with the expected error code kerr.UnknownTopicOrPartition. +// +// This may return *ShardErrors. +func (cl *Client) ListEndOffsets(ctx context.Context, topics ...string) (ListedOffsets, error) { + return cl.listOffsets(ctx, 0, -1, topics) +} + +// ListCommittedOffsets returns newest committed offsets for each partition in +// each requested topic. A committed offset may be slightly less than the +// latest offset. In Kafka terms, committed means the last stable offset, and +// newest means the high watermark. Record offsets in active, uncommitted +// transactions will not be returned. If no topics are specified, all topics +// are listed. If a requested topic does not exist, no offsets for it are +// listed and it is not present in the response. +// +// If any topics being listed do not exist, a special -1 partition is added +// to the response with the expected error code kerr.UnknownTopicOrPartition. +// +// This may return *ShardErrors. +func (cl *Client) ListCommittedOffsets(ctx context.Context, topics ...string) (ListedOffsets, error) { + return cl.listOffsets(ctx, 1, -1, topics) +} + +// ListOffsetsAfterMilli returns the first offsets after the requested +// millisecond timestamp. Unlike listing start/end/committed offsets, offsets +// returned from this function also include the timestamp of the offset. If no +// topics are specified, all topics are listed. If a partition has no offsets +// after the requested millisecond, the offset will be the current end offset. +// If a requested topic does not exist, no offsets for it are listed and it is +// not present in the response. +// +// If any topics being listed do not exist, a special -1 partition is added +// to the response with the expected error code kerr.UnknownTopicOrPartition. +// +// This may return *ShardErrors. +func (cl *Client) ListOffsetsAfterMilli(ctx context.Context, millisecond int64, topics ...string) (ListedOffsets, error) { + return cl.listOffsets(ctx, 0, millisecond, topics) +} + +func (cl *Client) listOffsets(ctx context.Context, isolation int8, timestamp int64, topics []string) (ListedOffsets, error) { + tds, err := cl.ListTopics(ctx, topics...) + if err != nil { + return nil, err + } + + // If we request with timestamps, we may request twice: once for after + // timestamps, and once for any -1 (and no error) offsets where the + // timestamp is in the future. + list := make(ListedOffsets) + + for _, td := range tds { + if td.Err != nil { + list[td.Topic] = map[int32]ListedOffset{ + -1: { + Topic: td.Topic, + Partition: -1, + Err: td.Err, + }, + } + } + } + rerequest := make(map[string][]int32) + shardfn := func(kr kmsg.Response) error { + resp := kr.(*kmsg.ListOffsetsResponse) + for _, t := range resp.Topics { + lt, ok := list[t.Topic] + if !ok { + lt = make(map[int32]ListedOffset) + list[t.Topic] = lt + } + for _, p := range t.Partitions { + if err := maybeAuthErr(p.ErrorCode); err != nil { + return err + } + lt[p.Partition] = ListedOffset{ + Topic: t.Topic, + Partition: p.Partition, + Timestamp: p.Timestamp, + Offset: p.Offset, + LeaderEpoch: p.LeaderEpoch, + Err: kerr.ErrorForCode(p.ErrorCode), + } + if timestamp != -1 && p.Offset == -1 && p.ErrorCode == 0 { + rerequest[t.Topic] = append(rerequest[t.Topic], p.Partition) + } + } + } + return nil + } + + req := kmsg.NewPtrListOffsetsRequest() + req.IsolationLevel = isolation + for t, td := range tds { + rt := kmsg.NewListOffsetsRequestTopic() + if td.Err != nil { + continue + } + rt.Topic = t + for p := range td.Partitions { + rp := kmsg.NewListOffsetsRequestTopicPartition() + rp.Partition = p + rp.Timestamp = timestamp + rt.Partitions = append(rt.Partitions, rp) + } + req.Topics = append(req.Topics, rt) + } + shards := cl.cl.RequestSharded(ctx, req) + err = shardErrEach(req, shards, shardfn) + if len(rerequest) > 0 { + req.Topics = req.Topics[:0] + for t, ps := range rerequest { + rt := kmsg.NewListOffsetsRequestTopic() + rt.Topic = t + for _, p := range ps { + rp := kmsg.NewListOffsetsRequestTopicPartition() + rp.Partition = p + rp.Timestamp = -1 // we always list end offsets when rerequesting + rt.Partitions = append(rt.Partitions, rp) + } + req.Topics = append(req.Topics, rt) + } + shards = cl.cl.RequestSharded(ctx, req) + err = mergeShardErrs(err, shardErrEach(req, shards, shardfn)) + } + return list, err +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kadm/misc.go b/vendor/github.com/twmb/franz-go/pkg/kadm/misc.go new file mode 100644 index 00000000000..05add541535 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kadm/misc.go @@ -0,0 +1,959 @@ +package kadm + +import ( + "context" + "crypto/rand" + "crypto/sha256" + "crypto/sha512" + "errors" + "fmt" + "sort" + "strings" + "sync" + + "golang.org/x/crypto/pbkdf2" + + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" + "github.com/twmb/franz-go/pkg/kversion" +) + +// ErrAndMessage is returned as the error from requests that were successfully +// responded to, but the response indicates failure with a message. +type ErrAndMessage struct { + Err error // Err is the response ErrorCode. + ErrMessage string // Message is the response ErrorMessage. +} + +func (e *ErrAndMessage) Error() string { + var ke *kerr.Error + if errors.As(e.Err, &ke) && e.ErrMessage != "" { + return ke.Message + ": " + e.ErrMessage + } + return e.Err.Error() +} + +func (e *ErrAndMessage) Unwrap() error { + return e.Err +} + +// FindCoordinatorResponse contains information for the coordinator for a group +// or transactional ID. +type FindCoordinatorResponse struct { + Name string // Name is the coordinator key this response is for. + NodeID int32 // NodeID is the node ID of the coordinator for this key. + Host string // Host is the host of the coordinator for this key. + Port int32 // Port is the port of the coordinator for this key. + Err error // Err is any error encountered when requesting the coordinator. + ErrMessage string // ErrMessage a potential extra message describing any error. +} + +// FindCoordinatorResponses contains responses to finding coordinators for +// groups or transactions. +type FindCoordinatorResponses map[string]FindCoordinatorResponse + +// AllFailed returns whether all responses are errored. +func (rs FindCoordinatorResponses) AllFailed() bool { + var n int + rs.EachError(func(FindCoordinatorResponse) { n++ }) + return len(rs) > 0 && n == len(rs) +} + +// Sorted returns all coordinator responses sorted by name. +func (rs FindCoordinatorResponses) Sorted() []FindCoordinatorResponse { + s := make([]FindCoordinatorResponse, 0, len(rs)) + for _, r := range rs { + s = append(s, r) + } + sort.Slice(s, func(i, j int) bool { return s[i].Name < s[j].Name }) + return s +} + +// EachError calls fn for every response that has a non-nil error. +func (rs FindCoordinatorResponses) EachError(fn func(FindCoordinatorResponse)) { + for _, r := range rs { + if r.Err != nil { + fn(r) + } + } +} + +// Each calls fn for every response. +func (rs FindCoordinatorResponses) Each(fn func(FindCoordinatorResponse)) { + for _, r := range rs { + fn(r) + } +} + +// Error iterates over all responses and returns the first error encountered, +// if any. +func (rs FindCoordinatorResponses) Error() error { + for _, r := range rs { + if r.Err != nil { + return r.Err + } + } + return nil +} + +// Ok returns true if there are no errors. This is a shortcut for rs.Error() == +// nil. +func (rs FindCoordinatorResponses) Ok() bool { + return rs.Error() == nil +} + +// FindGroupCoordinators returns the coordinator for all requested group names. +// +// This may return *ShardErrors or *AuthError. +func (cl *Client) FindGroupCoordinators(ctx context.Context, groups ...string) FindCoordinatorResponses { + return cl.findCoordinators(ctx, 0, groups...) +} + +// FindTxnCoordinators returns the coordinator for all requested transactional +// IDs. +// +// This may return *ShardErrors or *AuthError. +func (cl *Client) FindTxnCoordinators(ctx context.Context, txnIDs ...string) FindCoordinatorResponses { + return cl.findCoordinators(ctx, 1, txnIDs...) +} + +func (cl *Client) findCoordinators(ctx context.Context, kind int8, names ...string) FindCoordinatorResponses { + resps := make(FindCoordinatorResponses) + if len(names) == 0 { + return resps + } + + req := kmsg.NewPtrFindCoordinatorRequest() + req.CoordinatorType = kind + req.CoordinatorKeys = names + + keyErr := func(k string, err error) { + resps[k] = FindCoordinatorResponse{ + Name: k, + Err: err, + } + } + allKeysErr := func(req *kmsg.FindCoordinatorRequest, err error) { + for _, k := range req.CoordinatorKeys { + keyErr(k, err) + } + } + + shards := cl.cl.RequestSharded(ctx, req) + for _, shard := range shards { + req := shard.Req.(*kmsg.FindCoordinatorRequest) + if shard.Err != nil { + allKeysErr(req, shard.Err) + continue + } + resp := shard.Resp.(*kmsg.FindCoordinatorResponse) + if err := maybeAuthErr(resp.ErrorCode); err != nil { + allKeysErr(req, err) + continue + } + for _, c := range resp.Coordinators { + if err := maybeAuthErr(c.ErrorCode); err != nil { + keyErr(c.Key, err) + continue + } + resps[c.Key] = FindCoordinatorResponse{ // key is always on one broker, no need to check existence + Name: c.Key, + NodeID: c.NodeID, + Host: c.Host, + Port: c.Port, + Err: kerr.ErrorForCode(c.ErrorCode), + ErrMessage: unptrStr(c.ErrorMessage), + } + } + } + return resps +} + +type minmax struct { + min, max int16 +} + +// BrokerApiVersions contains the API versions for a single broker. +type BrokerApiVersions struct { + NodeID int32 // NodeID is the node this API versions response is for. + + raw *kmsg.ApiVersionsResponse + keyVersions map[int16]minmax + + Err error // Err is non-nil if the API versions request failed. +} + +// Raw returns the raw API versions response. +func (v *BrokerApiVersions) Raw() *kmsg.ApiVersionsResponse { + return v.raw +} + +// KeyVersions returns the broker's min and max version for an API key and +// whether this broker supports the request. +func (v *BrokerApiVersions) KeyVersions(key int16) (min, max int16, exists bool) { + vs, exists := v.keyVersions[key] + return vs.min, vs.max, exists +} + +// KeyVersions returns the broker's min version for an API key and whether this +// broker supports the request. +func (v *BrokerApiVersions) KeyMinVersion(key int16) (min int16, exists bool) { + min, _, exists = v.KeyVersions(key) + return min, exists +} + +// KeyVersions returns the broker's max version for an API key and whether this +// broker supports the request. +func (v *BrokerApiVersions) KeyMaxVersion(key int16) (max int16, exists bool) { + _, max, exists = v.KeyVersions(key) + return max, exists +} + +// EachKeySorted calls fn for every API key in the broker response, from the +// smallest API key to the largest. +func (v *BrokerApiVersions) EachKeySorted(fn func(key, min, max int16)) { + type kmm struct { + k, min, max int16 + } + kmms := make([]kmm, 0, len(v.keyVersions)) + for key, minmax := range v.keyVersions { + kmms = append(kmms, kmm{key, minmax.min, minmax.max}) + } + sort.Slice(kmms, func(i, j int) bool { return kmms[i].k < kmms[j].k }) + for _, kmm := range kmms { + fn(kmm.k, kmm.min, kmm.max) + } +} + +// VersionGuess returns the best guess of Kafka that this broker is. This is a +// shorcut for: +// +// kversion.FromApiVersionsResponse(v.Raw()).VersionGuess(opt...) +// +// Check the kversion.VersionGuess API docs for more details. +func (v *BrokerApiVersions) VersionGuess(opt ...kversion.VersionGuessOpt) string { + return kversion.FromApiVersionsResponse(v.raw).VersionGuess(opt...) +} + +// BrokerApiVersions contains API versions for all brokers that are reachable +// from a metadata response. +type BrokersApiVersions map[int32]BrokerApiVersions + +// Sorted returns all broker responses sorted by node ID. +func (vs BrokersApiVersions) Sorted() []BrokerApiVersions { + s := make([]BrokerApiVersions, 0, len(vs)) + for _, v := range vs { + s = append(s, v) + } + sort.Slice(s, func(i, j int) bool { return s[i].NodeID < s[j].NodeID }) + return s +} + +// Each calls fn for every broker response. +func (vs BrokersApiVersions) Each(fn func(BrokerApiVersions)) { + for _, v := range vs { + fn(v) + } +} + +// ApiVersions queries every broker in a metadata response for their API +// versions. This returns an error only if the metadata request fails. +func (cl *Client) ApiVersions(ctx context.Context) (BrokersApiVersions, error) { + m, err := cl.BrokerMetadata(ctx) + if err != nil { + return nil, err + } + + var mu sync.Mutex + var wg sync.WaitGroup + vs := make(BrokersApiVersions, len(m.Brokers)) + for _, n := range m.Brokers.NodeIDs() { + n := n + wg.Add(1) + go func() { + defer wg.Done() + req := kmsg.NewPtrApiVersionsRequest() + req.ClientSoftwareName = "kadm" + req.ClientSoftwareVersion = softwareVersion() + v := BrokerApiVersions{NodeID: n, keyVersions: make(map[int16]minmax)} + v.raw, v.Err = req.RequestWith(ctx, cl.cl.Broker(int(n))) + + mu.Lock() + defer mu.Unlock() + defer func() { vs[n] = v }() + if v.Err != nil { + return + } + + v.Err = kerr.ErrorForCode(v.raw.ErrorCode) + for _, k := range v.raw.ApiKeys { + v.keyVersions[k.ApiKey] = minmax{ + min: k.MinVersion, + max: k.MaxVersion, + } + } + }() + } + wg.Wait() + + return vs, nil +} + +// ClientQuotaEntityComponent is a quota entity component. +type ClientQuotaEntityComponent struct { + Type string // Type is the entity type ("user", "client-id", "ip"). + Name *string // Name is the entity name, or null if the default. +} + +// String returns key=value, or key= if value is nil. +func (d ClientQuotaEntityComponent) String() string { + if d.Name == nil { + return d.Type + "=" + } + return fmt.Sprintf("%s=%s", d.Type, *d.Name) +} + +// ClientQuotaEntity contains the components that make up a single entity. +type ClientQuotaEntity []ClientQuotaEntityComponent + +// String returns {key=value, key=value}, joining all entities with a ", " and +// wrapping in braces. +func (ds ClientQuotaEntity) String() string { + var ss []string + for _, d := range ds { + ss = append(ss, d.String()) + } + return "{" + strings.Join(ss, ", ") + "}" +} + +// ClientQuotaValue is a quota name and value. +type ClientQuotaValue struct { + Key string // Key is the quota configuration key. + Value float64 // Value is the quota configuration value. +} + +// String returns key=value. +func (d ClientQuotaValue) String() string { + return fmt.Sprintf("%s=%f", d.Key, d.Value) +} + +// ClientQuotaValues contains all client quota values. +type ClientQuotaValues []ClientQuotaValue + +// QuotasMatchType specifies how to match a described client quota entity. +// +// 0 means to match the name exactly: user=foo will only match components of +// entity type "user" and entity name "foo". +// +// 1 means to match the default of the name: entity type "user" with a default +// match will return the default quotas for user entities. +// +// 2 means to match any name: entity type "user" with any matching will return +// both names and defaults. +type QuotasMatchType = kmsg.QuotasMatchType + +// DescribeClientQuotaComponent is an input entity component to describing +// client quotas: we define the type of quota ("client-id", "user"), how to +// match, and the match name if needed. +type DescribeClientQuotaComponent struct { + Type string // Type is the type of entity component to describe ("user", "client-id", "ip"). + MatchName *string // MatchName is the name to match again; this is only needed when MatchType is 0 (exact). + MatchType QuotasMatchType // MatchType is how to match an entity. +} + +// DescribedClientQuota contains a described quota. A single quota is made up +// of multiple entities and multiple values, for example, "user=foo" is one +// component of the entity, and "client-id=bar" is another. +type DescribedClientQuota struct { + Entity ClientQuotaEntity // Entity is the entity of this described client quota. + Values ClientQuotaValues // Values contains the quota valies for this entity. +} + +// DescribedClientQuota contains client quotas that were described. +type DescribedClientQuotas []DescribedClientQuota + +// DescribeClientQuotas describes client quotas. If strict is true, the +// response includes only the requested components. +func (cl *Client) DescribeClientQuotas(ctx context.Context, strict bool, entityComponents []DescribeClientQuotaComponent) (DescribedClientQuotas, error) { + req := kmsg.NewPtrDescribeClientQuotasRequest() + req.Strict = strict + for _, entity := range entityComponents { + rc := kmsg.NewDescribeClientQuotasRequestComponent() + rc.EntityType = entity.Type + rc.Match = entity.MatchName + rc.MatchType = entity.MatchType + req.Components = append(req.Components, rc) + } + resp, err := req.RequestWith(ctx, cl.cl) + if err != nil { + return nil, err + } + if err := maybeAuthErr(resp.ErrorCode); err != nil { + return nil, err + } + if err := kerr.ErrorForCode(resp.ErrorCode); err != nil { + return nil, &ErrAndMessage{err, unptrStr(resp.ErrorMessage)} + } + var qs DescribedClientQuotas + for _, entry := range resp.Entries { + var q DescribedClientQuota + for _, e := range entry.Entity { + q.Entity = append(q.Entity, ClientQuotaEntityComponent{ + Type: e.Type, + Name: e.Name, + }) + } + for _, v := range entry.Values { + q.Values = append(q.Values, ClientQuotaValue{ + Key: v.Key, + Value: v.Value, + }) + } + qs = append(qs, q) + } + return qs, nil +} + +// AlterClientQuotaOp sets or remove a client quota. +type AlterClientQuotaOp struct { + Key string // Key is the quota configuration key to set or remove. + Value float64 // Value is the quota configuration value to set or remove. + Remove bool // Remove, if true, removes this quota rather than sets it. +} + +// AlterClientQuotaEntry pairs an entity with quotas to set or remove. +type AlterClientQuotaEntry struct { + Entity ClientQuotaEntity // Entity is the entity to alter quotas for. + Ops []AlterClientQuotaOp // Ops are quotas to set or remove. +} + +// AlteredClientQuota is the result for a single entity that was altered. +type AlteredClientQuota struct { + Entity ClientQuotaEntity // Entity is the entity this result is for. + Err error // Err is non-nil if the alter operation on this entity failed. + ErrMessage string // ErrMessage is an optional additional message on error. +} + +// AlteredClientQuotas contains results for all altered entities. +type AlteredClientQuotas []AlteredClientQuota + +// AlterClientQuotas alters quotas for the input entries. You may consider +// checking ValidateAlterClientQuotas before using this method. +func (cl *Client) AlterClientQuotas(ctx context.Context, entries []AlterClientQuotaEntry) (AlteredClientQuotas, error) { + return cl.alterClientQuotas(ctx, false, entries) +} + +// ValidateAlterClientQuotas validates an alter client quota request. This +// returns exactly what AlterClientQuotas returns, but does not actually alter +// quotas. +func (cl *Client) ValidateAlterClientQuotas(ctx context.Context, entries []AlterClientQuotaEntry) (AlteredClientQuotas, error) { + return cl.alterClientQuotas(ctx, true, entries) +} + +func (cl *Client) alterClientQuotas(ctx context.Context, validate bool, entries []AlterClientQuotaEntry) (AlteredClientQuotas, error) { + req := kmsg.NewPtrAlterClientQuotasRequest() + req.ValidateOnly = validate + for _, entry := range entries { + re := kmsg.NewAlterClientQuotasRequestEntry() + for _, c := range entry.Entity { + rec := kmsg.NewAlterClientQuotasRequestEntryEntity() + rec.Type = c.Type + rec.Name = c.Name + re.Entity = append(re.Entity, rec) + } + for _, op := range entry.Ops { + reo := kmsg.NewAlterClientQuotasRequestEntryOp() + reo.Key = op.Key + reo.Value = op.Value + reo.Remove = op.Remove + re.Ops = append(re.Ops, reo) + } + req.Entries = append(req.Entries, re) + } + resp, err := req.RequestWith(ctx, cl.cl) + if err != nil { + return nil, err + } + var as AlteredClientQuotas + for _, entry := range resp.Entries { + var e ClientQuotaEntity + for _, c := range entry.Entity { + e = append(e, ClientQuotaEntityComponent{ + Type: c.Type, + Name: c.Name, + }) + } + a := AlteredClientQuota{ + Entity: e, + Err: kerr.ErrorForCode(entry.ErrorCode), + ErrMessage: unptrStr(entry.ErrorMessage), + } + as = append(as, a) + } + return as, nil +} + +// ScramMechanism is a SCRAM mechanism. +type ScramMechanism int8 + +const ( + // ScramSha256 represents the SCRAM-SHA-256 mechanism. + ScramSha256 ScramMechanism = 1 + // ScramSha512 represents the SCRAM-SHA-512 mechanism. + ScramSha512 ScramMechanism = 2 +) + +// String returns either SCRAM-SHA-256, SCRAM-SHA-512, or UNKNOWN. +func (s ScramMechanism) String() string { + switch s { + case ScramSha256: + return "SCRAM-SHA-256" + case ScramSha512: + return "SCRAM-SHA-512" + default: + return "UNKNOWN" + } +} + +// CredInfo contains the SCRAM mechanism and iterations for a password. +type CredInfo struct { + // Mechanism is the SCRAM mechanism a password exists for. This is 0 + // for UNKNOWN, 1 for SCRAM-SHA-256, and 2 for SCRAM-SHA-512. + Mechanism ScramMechanism + // Iterations is the number of SCRAM iterations for this password. + Iterations int32 +} + +// String returns MECHANISM=iterations={c.Iterations}. +func (c CredInfo) String() string { + return fmt.Sprintf("%s=iterations=%d", c.Mechanism, c.Iterations) +} + +// DescribedUserSCRAM contains a user, the SCRAM mechanisms that the user has +// passwords for, and if describing the user SCRAM credentials errored. +type DescribedUserSCRAM struct { + User string // User is the user this described user credential is for. + CredInfos []CredInfo // CredInfos contains SCRAM mechanisms the user has passwords for. + Err error // Err is any error encountered when describing the user. + ErrMessage string // ErrMessage a potential extra message describing any error. +} + +// DescribedUserSCRAMs contains described user SCRAM credentials keyed by user. +type DescribedUserSCRAMs map[string]DescribedUserSCRAM + +// Sorted returns the described user credentials ordered by user. +func (ds DescribedUserSCRAMs) Sorted() []DescribedUserSCRAM { + s := make([]DescribedUserSCRAM, 0, len(ds)) + for _, d := range ds { + s = append(s, d) + } + sort.Slice(s, func(i, j int) bool { return s[i].User < s[j].User }) + return s +} + +// AllFailed returns whether all described user credentials are errored. +func (ds DescribedUserSCRAMs) AllFailed() bool { + var n int + ds.EachError(func(DescribedUserSCRAM) { n++ }) + return len(ds) > 0 && n == len(ds) +} + +// EachError calls fn for every described user that has a non-nil error. +func (ds DescribedUserSCRAMs) EachError(fn func(DescribedUserSCRAM)) { + for _, d := range ds { + if d.Err != nil { + fn(d) + } + } +} + +// Each calls fn for every described user. +func (ds DescribedUserSCRAMs) Each(fn func(DescribedUserSCRAM)) { + for _, d := range ds { + fn(d) + } +} + +// Error iterates over all described users and returns the first error +// encountered, if any. +func (ds DescribedUserSCRAMs) Error() error { + for _, d := range ds { + if d.Err != nil { + return d.Err + } + } + return nil +} + +// Ok returns true if there are no errors. This is a shortcut for rs.Error() == +// nil. +func (ds DescribedUserSCRAMs) Ok() bool { + return ds.Error() == nil +} + +// DescribeUserSCRAMs returns a small bit of information about all users in the +// input request that have SCRAM passwords configured. No users requests all +// users. +func (cl *Client) DescribeUserSCRAMs(ctx context.Context, users ...string) (DescribedUserSCRAMs, error) { + req := kmsg.NewPtrDescribeUserSCRAMCredentialsRequest() + for _, u := range users { + ru := kmsg.NewDescribeUserSCRAMCredentialsRequestUser() + ru.Name = u + req.Users = append(req.Users, ru) + } + resp, err := req.RequestWith(ctx, cl.cl) + if err != nil { + return nil, err + } + if err := maybeAuthErr(resp.ErrorCode); err != nil { + return nil, err + } + if err := kerr.ErrorForCode(resp.ErrorCode); err != nil { + return nil, err + } + rs := make(DescribedUserSCRAMs) + for _, res := range resp.Results { + r := DescribedUserSCRAM{ + User: res.User, + Err: kerr.ErrorForCode(res.ErrorCode), + ErrMessage: unptrStr(res.ErrorMessage), + } + for _, i := range res.CredentialInfos { + r.CredInfos = append(r.CredInfos, CredInfo{ + Mechanism: ScramMechanism(i.Mechanism), + Iterations: i.Iterations, + }) + } + rs[r.User] = r + } + return rs, nil +} + +// DeleteSCRAM deletes a password with the given mechanism for the user. +type DeleteSCRAM struct { + User string // User is the username to match for deletion. + Mechanism ScramMechanism // Mechanism is the mechanism to match to delete a password for. +} + +// UpsertSCRAM either updates or creates (inserts) a new password for a user. +// There are two ways to specify a password: either with the Password field +// directly, or by specifying both Salt and SaltedPassword. If you specify just +// a password, this package generates a 24 byte salt and uses pbkdf2 to create +// the salted password. +type UpsertSCRAM struct { + User string // User is the username to use. + Mechanism ScramMechanism // Mechanism is the mechanism to use. + Iterations int32 // Iterations is the SCRAM iterations to use; must be between 4096 and 16384. + Password string // Password is the password to salt and convert to a salted password. Requires Salt and SaltedPassword to be empty. + Salt []byte // Salt must be paired with SaltedPassword and requires Password to be empty. + SaltedPassword []byte // SaltedPassword must be paired with Salt and requires Password to be empty. +} + +// AlteredUserSCRAM is the result of an alter operation. +type AlteredUserSCRAM struct { + User string // User is the username that was altered. + Err error // Err is any error encountered when altering the user. + ErrMessage string // ErrMessage a potential extra message describing any error. +} + +// AlteredUserSCRAMs contains altered user SCRAM credentials keyed by user. +type AlteredUserSCRAMs map[string]AlteredUserSCRAM + +// Sorted returns the altered user credentials ordered by user. +func (as AlteredUserSCRAMs) Sorted() []AlteredUserSCRAM { + s := make([]AlteredUserSCRAM, 0, len(as)) + for _, a := range as { + s = append(s, a) + } + sort.Slice(s, func(i, j int) bool { return s[i].User < s[j].User }) + return s +} + +// AllFailed returns whether all altered user credentials are errored. +func (as AlteredUserSCRAMs) AllFailed() bool { + var n int + as.EachError(func(AlteredUserSCRAM) { n++ }) + return len(as) > 0 && n == len(as) +} + +// EachError calls fn for every altered user that has a non-nil error. +func (as AlteredUserSCRAMs) EachError(fn func(AlteredUserSCRAM)) { + for _, a := range as { + if a.Err != nil { + fn(a) + } + } +} + +// Each calls fn for every altered user. +func (as AlteredUserSCRAMs) Each(fn func(AlteredUserSCRAM)) { + for _, a := range as { + fn(a) + } +} + +// Error iterates over all altered users and returns the first error +// encountered, if any. +func (as AlteredUserSCRAMs) Error() error { + for _, a := range as { + if a.Err != nil { + return a.Err + } + } + return nil +} + +// Ok returns true if there are no errors. This is a shortcut for rs.Error() == +// nil. +func (as AlteredUserSCRAMs) Ok() bool { + return as.Error() == nil +} + +// AlterUserSCRAMs deletes, updates, or creates (inserts) user SCRAM +// credentials. Note that a username can only appear once across both upserts +// and deletes. This modifies elements of the upsert slice that need to have a +// salted password generated. +func (cl *Client) AlterUserSCRAMs(ctx context.Context, del []DeleteSCRAM, upsert []UpsertSCRAM) (AlteredUserSCRAMs, error) { + for i, u := range upsert { + if u.Password != "" { + if len(u.Salt) > 0 || len(u.SaltedPassword) > 0 { + return nil, fmt.Errorf("user %s: cannot specify both a password and a salt / salted password", u.User) + } + u.Salt = make([]byte, 24) + if _, err := rand.Read(u.Salt); err != nil { + return nil, fmt.Errorf("user %s: unable to generate salt: %v", u.User, err) + } + switch u.Mechanism { + case ScramSha256: + u.SaltedPassword = pbkdf2.Key([]byte(u.Password), u.Salt, int(u.Iterations), sha256.Size, sha256.New) + case ScramSha512: + u.SaltedPassword = pbkdf2.Key([]byte(u.Password), u.Salt, int(u.Iterations), sha512.Size, sha512.New) + default: + return nil, fmt.Errorf("user %s: unknown mechanism, unable to generate password", u.User) + } + upsert[i] = u + } else { + if len(u.Salt) == 0 || len(u.SaltedPassword) == 0 { + return nil, fmt.Errorf("user %s: must specify either a password or a salt and salted password", u.User) + } + } + } + + req := kmsg.NewPtrAlterUserSCRAMCredentialsRequest() + for _, d := range del { + rd := kmsg.NewAlterUserSCRAMCredentialsRequestDeletion() + rd.Name = d.User + rd.Mechanism = int8(d.Mechanism) + req.Deletions = append(req.Deletions, rd) + } + for _, u := range upsert { + ru := kmsg.NewAlterUserSCRAMCredentialsRequestUpsertion() + ru.Name = u.User + ru.Mechanism = int8(u.Mechanism) + ru.Iterations = u.Iterations + ru.Salt = u.Salt + ru.SaltedPassword = u.SaltedPassword + req.Upsertions = append(req.Upsertions, ru) + } + resp, err := req.RequestWith(ctx, cl.cl) + if err != nil { + return nil, err + } + rs := make(AlteredUserSCRAMs) + for _, res := range resp.Results { + if err := maybeAuthErr(res.ErrorCode); err != nil { + return nil, err + } + r := AlteredUserSCRAM{ + User: res.User, + Err: kerr.ErrorForCode(res.ErrorCode), + ErrMessage: unptrStr(res.ErrorMessage), + } + rs[r.User] = r + } + return rs, nil +} + +// ElectLeadersHow is how partition leaders should be elected. +type ElectLeadersHow int8 + +const ( + // ElectPreferredReplica elects the preferred replica for a partition. + ElectPreferredReplica ElectLeadersHow = 0 + // ElectLiveReplica elects the first life replica if there are no + // in-sync replicas (i.e., this is unclean leader election). + ElectLiveReplica ElectLeadersHow = 1 +) + +// ElectLeadersResult is the result for a single partition in an elect leaders +// request. +type ElectLeadersResult struct { + Topic string // Topic is the topic this result is for. + Partition int32 // Partition is the partition this result is for. + How ElectLeadersHow // How is the type of election that was performed. + Err error // Err is non-nil if electing this partition's leader failed, such as the partition not existing or the preferred leader is not available and you used ElectPreferredReplica. + ErrMessage string // ErrMessage a potential extra message describing any error. +} + +// ElectLeadersResults contains per-topic, per-partition results for an elect +// leaders request. +type ElectLeadersResults map[string]map[int32]ElectLeadersResult + +// ElectLeaders elects leaders for partitions. This request was added in Kafka +// 2.2 to replace the previously-ZooKeeper-only option of triggering leader +// elections. See KIP-183 for more details. +// +// Kafka 2.4 introduced the ability to use unclean leader election. If you use +// unclean leader election on a Kafka 2.2 or 2.3 cluster, the client will +// instead fall back to preferred replica (clean) leader election. You can +// check the result's How function (or field) to see. +// +// If s is nil, this will elect leaders for all partitions. +// +// This will return *AuthError if you do not have ALTER on CLUSTER for +// kafka-cluster. +func (cl *Client) ElectLeaders(ctx context.Context, how ElectLeadersHow, s TopicsSet) (ElectLeadersResults, error) { + req := kmsg.NewPtrElectLeadersRequest() + req.ElectionType = int8(how) + for _, t := range s.IntoList() { + rt := kmsg.NewElectLeadersRequestTopic() + rt.Topic = t.Topic + rt.Partitions = t.Partitions + req.Topics = append(req.Topics, rt) + } + resp, err := req.RequestWith(ctx, cl.cl) + if err != nil { + return nil, err + } + if err := maybeAuthErr(resp.ErrorCode); err != nil { + return nil, err + } + if resp.Version == 0 { // v0 does not have the election type field + how = ElectPreferredReplica + } + rs := make(ElectLeadersResults) + for _, t := range resp.Topics { + rt := make(map[int32]ElectLeadersResult) + rs[t.Topic] = rt + for _, p := range t.Partitions { + if err := maybeAuthErr(p.ErrorCode); err != nil { + return nil, err // v0 has no top-level err + } + rt[p.Partition] = ElectLeadersResult{ + Topic: t.Topic, + Partition: p.Partition, + How: how, + Err: kerr.ErrorForCode(p.ErrorCode), + ErrMessage: unptrStr(p.ErrorMessage), + } + } + } + return rs, nil +} + +// OffsetForLeaderEpochRequest contains topics, partitions, and leader epochs +// to request offsets for in an OffsetForLeaderEpoch. +type OffsetForLeaderEpochRequest map[string]map[int32]int32 + +// Add adds a topic, partition, and leader epoch to the request. +func (l *OffsetForLeaderEpochRequest) Add(topic string, partition, leaderEpoch int32) { + if *l == nil { + *l = make(map[string]map[int32]int32) + } + t := (*l)[topic] + if t == nil { + t = make(map[int32]int32) + (*l)[topic] = t + } + t[partition] = leaderEpoch +} + +// OffsetForLeaderEpoch contains a response for a single partition in an +// OffsetForLeaderEpoch request. +type OffsetForLeaderEpoch struct { + NodeID int32 // NodeID is the node that is the leader of this topic / partition. + Topic string // Topic is the topic this leader epoch response is for. + Partition int32 // Partition is the partition this leader epoch response is for. + + // LeaderEpoch is either + // + // 1) -1, if the requested LeaderEpoch is unknown. + // + // 2) Less than the requested LeaderEpoch, if the requested LeaderEpoch + // exists but has no records in it. For example, epoch 1 had end offset + // 37, then epoch 2 and 3 had no records: if you request LeaderEpoch 3, + // this will return LeaderEpoch 1 with EndOffset 37. + // + // 3) Equal to the requested LeaderEpoch, if the requested LeaderEpoch + // is equal to or less than the current epoch for the partition. + LeaderEpoch int32 + + // EndOffset is either + // + // 1) The LogEndOffset, if the broker has the same LeaderEpoch as the + // request. + // + // 2) the beginning offset of the next LeaderEpoch, if the broker has a + // higher LeaderEpoch. + // + // The second option allows the user to detect data loss: if the + // consumer consumed past the EndOffset that is returned, then the + // consumer should reset to the returned offset and the consumer knows + // that everything from the returned offset to the requested offset was + // lost. + EndOffset int64 + + // Err is non-nil if this partition had a response error. + Err error +} + +// OffsetsForLeaderEpochs contains responses for partitions in a +// OffsetForLeaderEpochRequest. +type OffsetsForLeaderEpochs map[string]map[int32]OffsetForLeaderEpoch + +// OffsetForLeaderEpoch requests end offsets for the requested leader epoch in +// partitions in the request. This is a relatively advanced and client internal +// request, for more details, see the doc comments on the OffsetForLeaderEpoch +// type. +// +// This may return *ShardErrors or *AuthError. +func (cl *Client) OffetForLeaderEpoch(ctx context.Context, r OffsetForLeaderEpochRequest) (OffsetsForLeaderEpochs, error) { + req := kmsg.NewPtrOffsetForLeaderEpochRequest() + for t, ps := range r { + rt := kmsg.NewOffsetForLeaderEpochRequestTopic() + rt.Topic = t + for p, e := range ps { + rp := kmsg.NewOffsetForLeaderEpochRequestTopicPartition() + rp.Partition = p + rp.LeaderEpoch = e + rt.Partitions = append(rt.Partitions, rp) + } + req.Topics = append(req.Topics, rt) + } + shards := cl.cl.RequestSharded(ctx, req) + ls := make(OffsetsForLeaderEpochs) + return ls, shardErrEachBroker(req, shards, func(b BrokerDetail, kr kmsg.Response) error { + resp := kr.(*kmsg.OffsetForLeaderEpochResponse) + for _, rt := range resp.Topics { + lps, exists := ls[rt.Topic] + if !exists { // topic partitions could be spread around brokers, need to check existence + lps = make(map[int32]OffsetForLeaderEpoch) + ls[rt.Topic] = lps + } + for _, rp := range rt.Partitions { + if err := maybeAuthErr(rp.ErrorCode); err != nil { + return err + } + lps[rp.Partition] = OffsetForLeaderEpoch{ // one partition globally, no need to exist check + NodeID: b.NodeID, + Topic: rt.Topic, + Partition: rp.Partition, + LeaderEpoch: rp.LeaderEpoch, + EndOffset: rp.EndOffset, + Err: kerr.ErrorForCode(rp.ErrorCode), + } + } + } + return nil + }) +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kadm/partas.go b/vendor/github.com/twmb/franz-go/pkg/kadm/partas.go new file mode 100644 index 00000000000..84b67790d88 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kadm/partas.go @@ -0,0 +1,208 @@ +package kadm + +import ( + "context" + "sort" + + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" +) + +// AlterPartitionAssignmentsReq is the input for a request to alter partition +// assignments. The keys are topics and partitions, and the final slice +// corresponds to brokers that replicas will be assigneed to. If the brokers +// for a given partition are null, the request will *cancel* any active +// reassignment for that partition. +type AlterPartitionAssignmentsReq map[string]map[int32][]int32 + +// Assign specifies brokers that a partition should be placed on. Using null +// for the brokers cancels a pending reassignment of the parititon. +func (r *AlterPartitionAssignmentsReq) Assign(t string, p int32, brokers []int32) { + if *r == nil { + *r = make(map[string]map[int32][]int32) + } + ps := (*r)[t] + if ps == nil { + ps = make(map[int32][]int32) + (*r)[t] = ps + } + ps[p] = brokers +} + +// CancelAssign cancels a reassignment of the given partition. +func (r *AlterPartitionAssignmentsReq) CancelAssign(t string, p int32) { + r.Assign(t, p, nil) +} + +// AlterPartitionAssignmentsResponse contains a response for an individual +// partition that was assigned. +type AlterPartitionAssignmentsResponse struct { + Topic string // Topic is the topic that was assigned. + Partition int32 // Partition is the partition that was assigned. + Err error // Err is non-nil if this assignment errored. + ErrMessage string // ErrMessage is an optional additional message on error. +} + +// AlterPartitionAssignmentsResponses contains responses to all partitions in an +// alter assignment request. +type AlterPartitionAssignmentsResponses map[string]map[int32]AlterPartitionAssignmentsResponse + +// Sorted returns the responses sorted by topic and partition. +func (rs AlterPartitionAssignmentsResponses) Sorted() []AlterPartitionAssignmentsResponse { + var all []AlterPartitionAssignmentsResponse + rs.Each(func(r AlterPartitionAssignmentsResponse) { + all = append(all, r) + }) + sort.Slice(all, func(i, j int) bool { + l, r := all[i], all[j] + return l.Topic < r.Topic || l.Topic == r.Topic && l.Partition < r.Partition + }) + return all +} + +// Each calls fn for every response. +func (rs AlterPartitionAssignmentsResponses) Each(fn func(AlterPartitionAssignmentsResponse)) { + for _, ps := range rs { + for _, r := range ps { + fn(r) + } + } +} + +// Error returns the first error in the responses, if any. +func (rs AlterPartitionAssignmentsResponses) Error() error { + for _, ps := range rs { + for _, r := range ps { + if r.Err != nil { + return r.Err + } + } + } + return nil +} + +// AlterPartitionAssignments alters partition assignments for the requested +// partitions, returning an error if the response could not be issued or if +// you do not have permissions. +func (cl *Client) AlterPartitionAssignments(ctx context.Context, req AlterPartitionAssignmentsReq) (AlterPartitionAssignmentsResponses, error) { + if len(req) == 0 { + return make(AlterPartitionAssignmentsResponses), nil + } + + kreq := kmsg.NewPtrAlterPartitionAssignmentsRequest() + kreq.TimeoutMillis = cl.timeoutMillis + for t, ps := range req { + rt := kmsg.NewAlterPartitionAssignmentsRequestTopic() + rt.Topic = t + for p, bs := range ps { + rp := kmsg.NewAlterPartitionAssignmentsRequestTopicPartition() + rp.Partition = p + rp.Replicas = bs + rt.Partitions = append(rt.Partitions, rp) + } + kreq.Topics = append(kreq.Topics, rt) + } + + kresp, err := kreq.RequestWith(ctx, cl.cl) + if err != nil { + return nil, err + } + if err = kerr.ErrorForCode(kresp.ErrorCode); err != nil { + return nil, &ErrAndMessage{err, unptrStr(kresp.ErrorMessage)} + } + + a := make(AlterPartitionAssignmentsResponses) + for _, kt := range kresp.Topics { + ps := make(map[int32]AlterPartitionAssignmentsResponse) + a[kt.Topic] = ps + for _, kp := range kt.Partitions { + ps[kp.Partition] = AlterPartitionAssignmentsResponse{ + Topic: kt.Topic, + Partition: kp.Partition, + Err: kerr.ErrorForCode(kp.ErrorCode), + ErrMessage: unptrStr(kp.ErrorMessage), + } + } + } + return a, nil +} + +// ListPartitionReassignmentsResponse contains a response for an individual +// partition that was listed. +type ListPartitionReassignmentsResponse struct { + Topic string // Topic is the topic that was listed. + Partition int32 // Partition is the partition that was listed. + Replicas []int32 // Replicas are the partition's current replicas. + AddingReplicas []int32 // AddingReplicas are replicas currently being added to the partition. + RemovingReplicas []int32 // RemovingReplicas are replicas currently being removed from the partition. +} + +// ListPartitionReassignmentsResponses contains responses to all partitions in +// a list reassignment request. +type ListPartitionReassignmentsResponses map[string]map[int32]ListPartitionReassignmentsResponse + +// Sorted returns the responses sorted by topic and partition. +func (rs ListPartitionReassignmentsResponses) Sorted() []ListPartitionReassignmentsResponse { + var all []ListPartitionReassignmentsResponse + rs.Each(func(r ListPartitionReassignmentsResponse) { + all = append(all, r) + }) + sort.Slice(all, func(i, j int) bool { + l, r := all[i], all[j] + return l.Topic < r.Topic || l.Topic == r.Topic && l.Partition < r.Partition + }) + return all +} + +// Each calls fn for every response. +func (rs ListPartitionReassignmentsResponses) Each(fn func(ListPartitionReassignmentsResponse)) { + for _, ps := range rs { + for _, r := range ps { + fn(r) + } + } +} + +// ListPartitionReassignments lists the state of any active reassignments for +// all requested partitions, returning an error if the response could not be +// issued or if you do not have permissions. +func (cl *Client) ListPartitionReassignments(ctx context.Context, s TopicsSet) (ListPartitionReassignmentsResponses, error) { + if len(s) == 0 { + return make(ListPartitionReassignmentsResponses), nil + } + + kreq := kmsg.NewPtrListPartitionReassignmentsRequest() + kreq.TimeoutMillis = cl.timeoutMillis + for t, ps := range s { + rt := kmsg.NewListPartitionReassignmentsRequestTopic() + rt.Topic = t + for p := range ps { + rt.Partitions = append(rt.Partitions, p) + } + kreq.Topics = append(kreq.Topics, rt) + } + + kresp, err := kreq.RequestWith(ctx, cl.cl) + if err != nil { + return nil, err + } + if err = kerr.ErrorForCode(kresp.ErrorCode); err != nil { + return nil, &ErrAndMessage{err, unptrStr(kresp.ErrorMessage)} + } + + a := make(ListPartitionReassignmentsResponses) + for _, kt := range kresp.Topics { + ps := make(map[int32]ListPartitionReassignmentsResponse) + a[kt.Topic] = ps + for _, kp := range kt.Partitions { + ps[kp.Partition] = ListPartitionReassignmentsResponse{ + Topic: kt.Topic, + Partition: kp.Partition, + Replicas: kp.Replicas, + AddingReplicas: kp.AddingReplicas, + RemovingReplicas: kp.RemovingReplicas, + } + } + } + return a, nil +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kadm/topics.go b/vendor/github.com/twmb/franz-go/pkg/kadm/topics.go new file mode 100644 index 00000000000..408506e5df4 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kadm/topics.go @@ -0,0 +1,645 @@ +package kadm + +import ( + "context" + "errors" + "sort" + + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" +) + +// ListTopics issues a metadata request and returns TopicDetails. Specific +// topics to describe can be passed as additional arguments. If no topics are +// specified, all topics are requested. Internal topics are not returned unless +// specifically requested. To see all topics including internal topics, use +// ListTopicsWithInternal. +// +// This returns an error if the request fails to be issued, or an *AuthError. +func (cl *Client) ListTopics( + ctx context.Context, + topics ...string, +) (TopicDetails, error) { + t, err := cl.ListTopicsWithInternal(ctx, topics...) + if err != nil { + return nil, err + } + t.FilterInternal() + return t, nil +} + +// ListTopicsWithInternal is the same as ListTopics, but does not filter +// internal topics before returning. +func (cl *Client) ListTopicsWithInternal( + ctx context.Context, + topics ...string, +) (TopicDetails, error) { + m, err := cl.Metadata(ctx, topics...) + if err != nil { + return nil, err + } + return m.Topics, nil +} + +// CreateTopicResponse contains the response for an individual created topic. +type CreateTopicResponse struct { + Topic string // Topic is the topic that was created. + ID TopicID // ID is the topic ID for this topic, if talking to Kafka v2.8+. + Err error // Err is any error preventing this topic from being created. + ErrMessage string // ErrMessage a potential extra message describing any error. + NumPartitions int32 // NumPartitions is the number of partitions in the response, if talking to Kafka v2.4+. + ReplicationFactor int16 // ReplicationFactor is how many replicas every partition has for this topic, if talking to Kafka 2.4+. + Configs map[string]Config // Configs contains the topic configuration (minus config synonyms), if talking to Kafka 2.4+. +} + +// CreateTopicRepsonses contains per-topic responses for created topics. +type CreateTopicResponses map[string]CreateTopicResponse + +// Sorted returns all create topic responses sorted first by topic ID, then by +// topic name. +func (rs CreateTopicResponses) Sorted() []CreateTopicResponse { + s := make([]CreateTopicResponse, 0, len(rs)) + for _, d := range rs { + s = append(s, d) + } + sort.Slice(s, func(i, j int) bool { + l, r := s[i], s[j] + if l.ID.Less(r.ID) { + return true + } + return l.Topic < r.Topic + }) + return s +} + +// On calls fn for the response topic if it exists, returning the response and +// the error returned from fn. If fn is nil, this simply returns the response. +// +// The fn is given a copy of the response. This function returns the copy as +// well; any modifications within fn are modifications on the returned copy. +// +// If the topic does not exist, this returns kerr.UnknownTopicOrPartition. +func (rs CreateTopicResponses) On(topic string, fn func(*CreateTopicResponse) error) (CreateTopicResponse, error) { + if len(rs) > 0 { + r, ok := rs[topic] + if ok { + if fn == nil { + return r, nil + } + return r, fn(&r) + } + } + return CreateTopicResponse{}, kerr.UnknownTopicOrPartition +} + +// Error iterates over all responses and returns the first error +// encountered, if any. +func (rs CreateTopicResponses) Error() error { + for _, r := range rs { + if r.Err != nil { + return r.Err + } + } + return nil +} + +// CreateTopic issues a create topics request with the given partitions, +// replication factor, and (optional) configs for the given topic name. +// This is similar to CreateTopics, but returns the kerr.ErrorForCode(response.ErrorCode) +// if the request/response is successful. +func (cl *Client) CreateTopic( + ctx context.Context, + partitions int32, + replicationFactor int16, + configs map[string]*string, + topic string, +) (CreateTopicResponse, error) { + createTopicResponse, err := cl.CreateTopics( + ctx, + partitions, + replicationFactor, + configs, + topic, + ) + if err != nil { + return CreateTopicResponse{}, err + } + + response, exists := createTopicResponse[topic] + if !exists { + return CreateTopicResponse{}, errors.New("requested topic was not part of create topic response") + } + + return response, response.Err +} + +// CreateTopics issues a create topics request with the given partitions, +// replication factor, and (optional) configs for every topic. Under the hood, +// this uses the default 15s request timeout and lets Kafka choose where to +// place partitions. +// +// Version 4 of the underlying create topic request was introduced in Kafka 2.4 +// and brought client support for creation defaults. If talking to a 2.4+ +// cluster, you can use -1 for partitions and replicationFactor to use broker +// defaults. +// +// This package includes a StringPtr function to aid in building config values. +// +// This does not return an error on authorization failures, instead, +// authorization failures are included in the responses. This only returns an +// error if the request fails to be issued. You may consider checking +// ValidateCreateTopics before using this method. +func (cl *Client) CreateTopics( + ctx context.Context, + partitions int32, + replicationFactor int16, + configs map[string]*string, + topics ...string, +) (CreateTopicResponses, error) { + return cl.createTopics(ctx, false, partitions, replicationFactor, configs, topics) +} + +// ValidateCreateTopics validates a create topics request with the given +// partitions, replication factor, and (optional) configs for every topic. +// +// This package includes a StringPtr function to aid in building config values. +// +// This uses the same logic as CreateTopics, but with the request's +// ValidateOnly field set to true. The response is the same response you would +// receive from CreateTopics, but no topics are actually created. +func (cl *Client) ValidateCreateTopics( + ctx context.Context, + partitions int32, + replicationFactor int16, + configs map[string]*string, + topics ...string, +) (CreateTopicResponses, error) { + return cl.createTopics(ctx, true, partitions, replicationFactor, configs, topics) +} + +func (cl *Client) createTopics(ctx context.Context, dry bool, p int32, rf int16, configs map[string]*string, topics []string) (CreateTopicResponses, error) { + if len(topics) == 0 { + return make(CreateTopicResponses), nil + } + + req := kmsg.NewCreateTopicsRequest() + req.TimeoutMillis = cl.timeoutMillis + req.ValidateOnly = dry + for _, t := range topics { + rt := kmsg.NewCreateTopicsRequestTopic() + rt.Topic = t + rt.NumPartitions = p + rt.ReplicationFactor = rf + for k, v := range configs { + rc := kmsg.NewCreateTopicsRequestTopicConfig() + rc.Name = k + rc.Value = v + rt.Configs = append(rt.Configs, rc) + } + req.Topics = append(req.Topics, rt) + } + + resp, err := req.RequestWith(ctx, cl.cl) + if err != nil { + return nil, err + } + + rs := make(CreateTopicResponses) + for _, t := range resp.Topics { + rt := CreateTopicResponse{ + Topic: t.Topic, + ID: t.TopicID, + Err: kerr.ErrorForCode(t.ErrorCode), + ErrMessage: unptrStr(t.ErrorMessage), + NumPartitions: t.NumPartitions, + ReplicationFactor: t.ReplicationFactor, + Configs: make(map[string]Config), + } + for _, c := range t.Configs { + rt.Configs[c.Name] = Config{ + Key: c.Name, + Value: c.Value, + Source: kmsg.ConfigSource(c.Source), + Sensitive: c.IsSensitive, + } + } + rs[t.Topic] = rt + } + return rs, nil +} + +// DeleteTopicResponse contains the response for an individual deleted topic. +type DeleteTopicResponse struct { + Topic string // Topic is the topic that was deleted, if not using topic IDs. + ID TopicID // ID is the topic ID for this topic, if talking to Kafka v2.8+ and using topic IDs. + Err error // Err is any error preventing this topic from being deleted. + ErrMessage string // ErrMessage a potential extra message describing any error. +} + +// DeleteTopicResponses contains per-topic responses for deleted topics. +type DeleteTopicResponses map[string]DeleteTopicResponse + +// Sorted returns all delete topic responses sorted first by topic ID, then by +// topic name. +func (rs DeleteTopicResponses) Sorted() []DeleteTopicResponse { + s := make([]DeleteTopicResponse, 0, len(rs)) + for _, d := range rs { + s = append(s, d) + } + sort.Slice(s, func(i, j int) bool { + l, r := s[i], s[j] + if l.ID.Less(r.ID) { + return true + } + return l.Topic < r.Topic + }) + return s +} + +// On calls fn for the response topic if it exists, returning the response and +// the error returned from fn. If fn is nil, this simply returns the response. +// +// The fn is given a copy of the response. This function returns the copy as +// well; any modifications within fn are modifications on the returned copy. +// +// If the topic does not exist, this returns kerr.UnknownTopicOrPartition. +func (rs DeleteTopicResponses) On(topic string, fn func(*DeleteTopicResponse) error) (DeleteTopicResponse, error) { + if len(rs) > 0 { + r, ok := rs[topic] + if ok { + if fn == nil { + return r, nil + } + return r, fn(&r) + } + } + return DeleteTopicResponse{}, kerr.UnknownTopicOrPartition +} + +// Error iterates over all responses and returns the first error +// encountered, if any. +func (rs DeleteTopicResponses) Error() error { + for _, r := range rs { + if r.Err != nil { + return r.Err + } + } + return nil +} + +// DeleteTopic issues a delete topic request for the given topic name with a +// (by default) 15s timeout. This is similar to DeleteTopics, but returns the +// kerr.ErrorForCode(response.ErrorCode) if the request/response is successful. +func (cl *Client) DeleteTopic(ctx context.Context, topic string) (DeleteTopicResponse, error) { + rs, err := cl.DeleteTopics(ctx, topic) + if err != nil { + return DeleteTopicResponse{}, err + } + r, exists := rs[topic] + if !exists { + return DeleteTopicResponse{}, errors.New("requested topic was not part of delete topic response") + } + return r, r.Err +} + +// DeleteTopics issues a delete topics request for the given topic names with a +// (by default) 15s timeout. +// +// This does not return an error on authorization failures, instead, +// authorization failures are included in the responses. This only returns an +// error if the request fails to be issued. +func (cl *Client) DeleteTopics(ctx context.Context, topics ...string) (DeleteTopicResponses, error) { + if len(topics) == 0 { + return make(DeleteTopicResponses), nil + } + + req := kmsg.NewDeleteTopicsRequest() + req.TimeoutMillis = cl.timeoutMillis + req.TopicNames = topics + for _, t := range topics { + rt := kmsg.NewDeleteTopicsRequestTopic() + rt.Topic = kmsg.StringPtr(t) + req.Topics = append(req.Topics, rt) + } + + resp, err := req.RequestWith(ctx, cl.cl) + if err != nil { + return nil, err + } + + rs := make(DeleteTopicResponses) + for _, t := range resp.Topics { + // A valid Kafka will return non-nil topics here, because we + // are deleting by topic name, not ID. We still check to be + // sure, but multiple invalid (nil) topics will collide. + var topic string + if t.Topic != nil { + topic = *t.Topic + } + rs[topic] = DeleteTopicResponse{ + Topic: topic, + ID: t.TopicID, + Err: kerr.ErrorForCode(t.ErrorCode), + ErrMessage: unptrStr(t.ErrorMessage), + } + } + return rs, nil +} + +// DeleteRecordsResponse contains the response for an individual partition from +// a delete records request. +type DeleteRecordsResponse struct { + Topic string // Topic is the topic this response is for. + Partition int32 // Partition is the partition this response is for. + LowWatermark int64 // LowWatermark is the new earliest / start offset for this partition if the request was successful. + Err error // Err is any error preventing the delete records request from being successful for this partition. +} + +// DeleteRecordsResponses contains per-partition responses to a delete records request. +type DeleteRecordsResponses map[string]map[int32]DeleteRecordsResponse + +// Lookup returns the response at t and p and whether it exists. +func (ds DeleteRecordsResponses) Lookup(t string, p int32) (DeleteRecordsResponse, bool) { + if len(ds) == 0 { + return DeleteRecordsResponse{}, false + } + ps := ds[t] + if len(ps) == 0 { + return DeleteRecordsResponse{}, false + } + r, exists := ps[p] + return r, exists +} + +// Each calls fn for every delete records response. +func (ds DeleteRecordsResponses) Each(fn func(DeleteRecordsResponse)) { + for _, ps := range ds { + for _, d := range ps { + fn(d) + } + } +} + +// Sorted returns all delete records responses sorted first by topic, then by +// partition. +func (rs DeleteRecordsResponses) Sorted() []DeleteRecordsResponse { + var s []DeleteRecordsResponse + for _, ps := range rs { + for _, d := range ps { + s = append(s, d) + } + } + sort.Slice(s, func(i, j int) bool { + l, r := s[i], s[j] + if l.Topic < r.Topic { + return true + } + if l.Topic > r.Topic { + return false + } + return l.Partition < r.Partition + }) + return s +} + +// On calls fn for the response topic/partition if it exists, returning the +// response and the error returned from fn. If fn is nil, this simply returns +// the response. +// +// The fn is given a copy of the response. This function returns the copy as +// well; any modifications within fn are modifications on the returned copy. +// +// If the topic or partition does not exist, this returns +// kerr.UnknownTopicOrPartition. +func (rs DeleteRecordsResponses) On(topic string, partition int32, fn func(*DeleteRecordsResponse) error) (DeleteRecordsResponse, error) { + if len(rs) > 0 { + t, ok := rs[topic] + if ok { + p, ok := t[partition] + if ok { + if fn == nil { + return p, nil + } + return p, fn(&p) + } + } + } + return DeleteRecordsResponse{}, kerr.UnknownTopicOrPartition +} + +// Error iterates over all responses and returns the first error +// encountered, if any. +func (rs DeleteRecordsResponses) Error() error { + for _, ps := range rs { + for _, r := range ps { + if r.Err != nil { + return r.Err + } + } + } + return nil +} + +// DeleteRecords issues a delete records request for the given offsets. Per +// offset, only the Offset field needs to be set. +// +// To delete records, Kafka sets the LogStartOffset for partitions to the +// requested offset. All segments whose max partition is before the requested +// offset are deleted, and any records within the segment before the requested +// offset can no longer be read. +// +// This does not return an error on authorization failures, instead, +// authorization failures are included in the responses. +// +// This may return *ShardErrors. +func (cl *Client) DeleteRecords(ctx context.Context, os Offsets) (DeleteRecordsResponses, error) { + if len(os) == 0 { + return make(DeleteRecordsResponses), nil + } + + req := kmsg.NewPtrDeleteRecordsRequest() + req.TimeoutMillis = cl.timeoutMillis + for t, ps := range os { + rt := kmsg.NewDeleteRecordsRequestTopic() + rt.Topic = t + for p, o := range ps { + rp := kmsg.NewDeleteRecordsRequestTopicPartition() + rp.Partition = p + rp.Offset = o.At + rt.Partitions = append(rt.Partitions, rp) + } + req.Topics = append(req.Topics, rt) + } + + shards := cl.cl.RequestSharded(ctx, req) + rs := make(DeleteRecordsResponses) + return rs, shardErrEach(req, shards, func(kr kmsg.Response) error { + resp := kr.(*kmsg.DeleteRecordsResponse) + for _, t := range resp.Topics { + rt, exists := rs[t.Topic] + if !exists { // topic could be spread around brokers, we need to check existence + rt = make(map[int32]DeleteRecordsResponse) + rs[t.Topic] = rt + } + for _, p := range t.Partitions { + rt[p.Partition] = DeleteRecordsResponse{ + Topic: t.Topic, + Partition: p.Partition, + LowWatermark: p.LowWatermark, + Err: kerr.ErrorForCode(p.ErrorCode), + } + } + } + return nil + }) +} + +// CreatePartitionsResponse contains the response for an individual topic from +// a create partitions request. +type CreatePartitionsResponse struct { + Topic string // Topic is the topic this response is for. + Err error // Err is non-nil if partitions were unable to be added to this topic. + ErrMessage string // ErrMessage a potential extra message describing any error. +} + +// CreatePartitionsResponses contains per-topic responses for a create +// partitions request. +type CreatePartitionsResponses map[string]CreatePartitionsResponse + +// Sorted returns all create partitions responses sorted by topic. +func (rs CreatePartitionsResponses) Sorted() []CreatePartitionsResponse { + var s []CreatePartitionsResponse + for _, r := range rs { + s = append(s, r) + } + sort.Slice(s, func(i, j int) bool { return s[i].Topic < s[j].Topic }) + return s +} + +// On calls fn for the response topic if it exists, returning the response and +// the error returned from fn. If fn is nil, this simply returns the response. +// +// The fn is given a copy of the response. This function returns the copy as +// well; any modifications within fn are modifications on the returned copy. +// +// If the topic does not exist, this returns kerr.UnknownTopicOrPartition. +func (rs CreatePartitionsResponses) On(topic string, fn func(*CreatePartitionsResponse) error) (CreatePartitionsResponse, error) { + if len(rs) > 0 { + r, ok := rs[topic] + if ok { + if fn == nil { + return r, nil + } + return r, fn(&r) + } + } + return CreatePartitionsResponse{}, kerr.UnknownTopicOrPartition +} + +// Error iterates over all responses and returns the first error +// encountered, if any. +func (rs CreatePartitionsResponses) Error() error { + for _, r := range rs { + if r.Err != nil { + return r.Err + } + } + return nil +} + +// CreatePartitions issues a create partitions request for the given topics, +// adding "add" partitions to each topic. This request lets Kafka choose where +// the new partitions should be. +// +// This does not return an error on authorization failures for the create +// partitions request itself, instead, authorization failures are included in +// the responses. Before adding partitions, this request must issue a metadata +// request to learn the current count of partitions. If that fails, this +// returns the metadata request error. If you already know the final amount of +// partitions you want, you can use UpdatePartitions to set the count directly +// (rather than adding to the current count). You may consider checking +// ValidateCreatePartitions before using this method. +func (cl *Client) CreatePartitions(ctx context.Context, add int, topics ...string) (CreatePartitionsResponses, error) { + return cl.createPartitions(ctx, false, add, -1, topics) +} + +// UpdatePartitions issues a create partitions request for the given topics, +// setting the final partition count to "set" for each topic. This request lets +// Kafka choose where the new partitions should be. +// +// This does not return an error on authorization failures for the create +// partitions request itself, instead, authorization failures are included in +// the responses. Unlike CreatePartitions, this request uses your "set" value +// to set the new final count of partitions. "set" must be equal to or larger +// than the current count of partitions in the topic. All topics will have the +// same final count of partitions (unlike CreatePartitions, which allows you to +// add a specific count of partitions to topics that have a different amount of +// current partitions). You may consider checking ValidateUpdatePartitions +// before using this method. +func (cl *Client) UpdatePartitions(ctx context.Context, set int, topics ...string) (CreatePartitionsResponses, error) { + return cl.createPartitions(ctx, false, -1, set, topics) +} + +// ValidateCreatePartitions validates a create partitions request for adding +// "add" partitions to the given topics. +// +// This uses the same logic as CreatePartitions, but with the request's +// ValidateOnly field set to true. The response is the same response you would +// receive from CreatePartitions, but no partitions are actually added. +func (cl *Client) ValidateCreatePartitions(ctx context.Context, add int, topics ...string) (CreatePartitionsResponses, error) { + return cl.createPartitions(ctx, true, add, -1, topics) +} + +// ValidateUpdatePartitions validates a create partitions request for setting +// the partition count on the given topics to "set". +// +// This uses the same logic as UpdatePartitions, but with the request's +// ValidateOnly field set to true. The response is the same response you would +// receive from UpdatePartitions, but no partitions are actually added. +func (cl *Client) ValidateUpdatePartitions(ctx context.Context, set int, topics ...string) (CreatePartitionsResponses, error) { + return cl.createPartitions(ctx, true, -1, set, topics) +} + +func (cl *Client) createPartitions(ctx context.Context, dry bool, add, set int, topics []string) (CreatePartitionsResponses, error) { + if len(topics) == 0 { + return make(CreatePartitionsResponses), nil + } + + var td TopicDetails + var err error + if add != -1 { + td, err = cl.ListTopics(ctx, topics...) + if err != nil { + return nil, err + } + } + + req := kmsg.NewCreatePartitionsRequest() + req.TimeoutMillis = cl.timeoutMillis + req.ValidateOnly = dry + for _, t := range topics { + rt := kmsg.NewCreatePartitionsRequestTopic() + rt.Topic = t + if add == -1 { + rt.Count = int32(set) + } else { + rt.Count = int32(len(td[t].Partitions) + add) + } + req.Topics = append(req.Topics, rt) + } + + resp, err := req.RequestWith(ctx, cl.cl) + if err != nil { + return nil, err + } + + rs := make(CreatePartitionsResponses) + for _, t := range resp.Topics { + rs[t.Topic] = CreatePartitionsResponse{ + Topic: t.Topic, + Err: kerr.ErrorForCode(t.ErrorCode), + ErrMessage: unptrStr(t.ErrorMessage), + } + } + return rs, nil +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kadm/txn.go b/vendor/github.com/twmb/franz-go/pkg/kadm/txn.go new file mode 100644 index 00000000000..2b8ccbe2e7a --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kadm/txn.go @@ -0,0 +1,761 @@ +package kadm + +import ( + "context" + "errors" + "sort" + + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" +) + +// DescribedProducer contains the state of a transactional producer's last +// produce. +type DescribedProducer struct { + Leader int32 // Leader is the leader broker for this topic / partition. + Topic string // Topic is the topic being produced to. + Partition int32 // Partition is the partition being produced to. + ProducerID int64 // ProducerID is the producer ID that produced. + ProducerEpoch int16 // ProducerEpoch is the epoch that produced. + LastSequence int32 // LastSequence is the last sequence number the producer produced. + LastTimestamp int64 // LastTimestamp is the last time this producer produced. + CoordinatorEpoch int32 // CoordinatorEpoch is the epoch of the transactional coordinator for the last produce. + CurrentTxnStartOffset int64 // CurrentTxnStartOffset is the first offset in the transaction. +} + +// Less returns whether the left described producer is less than the right, +// in order of: +// +// - Topic +// - Partition +// - ProducerID +// - ProducerEpoch +// - LastTimestamp +// - LastSequence +func (l *DescribedProducer) Less(r *DescribedProducer) bool { + if l.Topic < r.Topic { + return true + } + if l.Topic > r.Topic { + return false + } + if l.Partition < r.Partition { + return true + } + if l.Partition > r.Partition { + return false + } + if l.ProducerID < r.ProducerID { + return true + } + if l.ProducerID > r.ProducerID { + return false + } + if l.ProducerEpoch < r.ProducerEpoch { + return true + } + if l.ProducerEpoch > r.ProducerEpoch { + return false + } + if l.LastTimestamp < r.LastTimestamp { + return true + } + if l.LastTimestamp > r.LastTimestamp { + return false + } + return l.LastSequence < r.LastSequence +} + +// DescribedProducers maps producer IDs to the full described producer. +type DescribedProducers map[int64]DescribedProducer + +// Sorted returns the described producers sorted by topic, partition, and +// producer ID. +func (ds DescribedProducers) Sorted() []DescribedProducer { + var all []DescribedProducer + for _, d := range ds { + all = append(all, d) + } + sort.Slice(all, func(i, j int) bool { + l, r := all[i], all[j] + return l.Topic < r.Topic || l.Topic == r.Topic && (l.Partition < r.Partition || l.Partition == r.Partition && l.ProducerID < r.ProducerID) + }) + return all +} + +// Each calls fn for each described producer. +func (ds DescribedProducers) Each(fn func(DescribedProducer)) { + for _, d := range ds { + fn(d) + } +} + +// DescribedProducersPartition is a partition whose producer's were described. +type DescribedProducersPartition struct { + Leader int32 // Leader is the leader broker for this topic / partition. + Topic string // Topic is the topic whose producer's were described. + Partition int32 // Partition is the partition whose producer's were described. + ActiveProducers DescribedProducers // ActiveProducers are producer's actively transactionally producing to this partition. + Err error // Err is non-nil if describing this partition failed. + ErrMessage string // ErrMessage a potential extra message describing any error. +} + +// DescribedProducersPartitions contains partitions whose producer's were described. +type DescribedProducersPartitions map[int32]DescribedProducersPartition + +// Sorted returns the described partitions sorted by topic and partition. +func (ds DescribedProducersPartitions) Sorted() []DescribedProducersPartition { + var all []DescribedProducersPartition + for _, d := range ds { + all = append(all, d) + } + sort.Slice(all, func(i, j int) bool { + l, r := all[i], all[j] + return l.Topic < r.Topic || l.Topic == r.Topic && l.Partition < r.Partition + }) + return all +} + +// SortedProducer returns all producers sorted first by partition, then by producer ID. +func (ds DescribedProducersPartitions) SortedProducers() []DescribedProducer { + var all []DescribedProducer + ds.EachProducer(func(d DescribedProducer) { + all = append(all, d) + }) + sort.Slice(all, func(i, j int) bool { + l, r := all[i], all[j] + return l.Topic < r.Topic || l.Topic == r.Topic && (l.Partition < r.Partition || l.Partition == r.Partition && l.ProducerID < r.ProducerID) + }) + return all +} + +// Each calls fn for each partition. +func (ds DescribedProducersPartitions) Each(fn func(DescribedProducersPartition)) { + for _, d := range ds { + fn(d) + } +} + +// EachProducer calls fn for each producer in all partitions. +func (ds DescribedProducersPartitions) EachProducer(fn func(DescribedProducer)) { + for _, d := range ds { + for _, p := range d.ActiveProducers { + fn(p) + } + } +} + +// DescribedProducersTopic contains topic partitions whose producer's were described. +type DescribedProducersTopic struct { + Topic string // Topic is the topic whose producer's were described. + Partitions DescribedProducersPartitions // Partitions are partitions whose producer's were described. +} + +// DescribedProducersTopics contains topics whose producer's were described. +type DescribedProducersTopics map[string]DescribedProducersTopic + +// Sorted returns the described topics sorted by topic. +func (ds DescribedProducersTopics) Sorted() []DescribedProducersTopic { + var all []DescribedProducersTopic + ds.Each(func(d DescribedProducersTopic) { + all = append(all, d) + }) + sort.Slice(all, func(i, j int) bool { + l, r := all[i], all[j] + return l.Topic < r.Topic + }) + return all +} + +// Sorted returns the described partitions sorted by topic and partition. +func (ds DescribedProducersTopics) SortedPartitions() []DescribedProducersPartition { + var all []DescribedProducersPartition + ds.EachPartition(func(d DescribedProducersPartition) { + all = append(all, d) + }) + sort.Slice(all, func(i, j int) bool { + l, r := all[i], all[j] + return l.Topic < r.Topic || l.Topic == r.Topic && l.Partition < r.Partition + }) + return all +} + +// SortedProducer returns all producers sorted first by partition, then by producer ID. +func (ds DescribedProducersTopics) SortedProducers() []DescribedProducer { + var all []DescribedProducer + ds.EachProducer(func(d DescribedProducer) { + all = append(all, d) + }) + sort.Slice(all, func(i, j int) bool { + l, r := all[i], all[j] + return l.Topic < r.Topic || l.Topic == r.Topic && (l.Partition < r.Partition || l.Partition == r.Partition && l.ProducerID < r.ProducerID) + }) + return all +} + +// Each calls fn for every topic. +func (ds DescribedProducersTopics) Each(fn func(DescribedProducersTopic)) { + for _, d := range ds { + fn(d) + } +} + +// EachPartitions calls fn for all topic partitions. +func (ds DescribedProducersTopics) EachPartition(fn func(DescribedProducersPartition)) { + for _, d := range ds { + for _, p := range d.Partitions { + fn(p) + } + } +} + +// EachProducer calls fn for each producer in all topics and partitions. +func (ds DescribedProducersTopics) EachProducer(fn func(DescribedProducer)) { + for _, d := range ds { + for _, p := range d.Partitions { + for _, b := range p.ActiveProducers { + fn(b) + } + } + } +} + +// DescribeProducers describes all producers that are transactionally producing +// to the requested topic set. This request can be used to detect hanging +// transactions or other transaction related problems. If the input set is +// empty, this requests data for all partitions. +// +// This may return *ShardErrors or *AuthError. +func (cl *Client) DescribeProducers(ctx context.Context, s TopicsSet) (DescribedProducersTopics, error) { + if len(s) == 0 { + m, err := cl.Metadata(ctx) + if err != nil { + return nil, err + } + s = m.Topics.TopicsSet() + } else if e := s.EmptyTopics(); len(e) > 0 { + m, err := cl.Metadata(ctx, e...) + if err != nil { + return nil, err + } + for t, ps := range m.Topics.TopicsSet() { + s[t] = ps + } + } + + req := kmsg.NewPtrDescribeProducersRequest() + for _, t := range s.IntoList() { + rt := kmsg.NewDescribeProducersRequestTopic() + rt.Topic = t.Topic + rt.Partitions = t.Partitions + req.Topics = append(req.Topics, rt) + } + shards := cl.cl.RequestSharded(ctx, req) + dts := make(DescribedProducersTopics) + return dts, shardErrEachBroker(req, shards, func(b BrokerDetail, kr kmsg.Response) error { + resp := kr.(*kmsg.DescribeProducersResponse) + for _, rt := range resp.Topics { + dt, exists := dts[rt.Topic] + if !exists { // topic could be spread around brokers, we need to check existence + dt = DescribedProducersTopic{ + Topic: rt.Topic, + Partitions: make(DescribedProducersPartitions), + } + dts[rt.Topic] = dt + } + dps := dt.Partitions + for _, rp := range rt.Partitions { + if err := maybeAuthErr(rp.ErrorCode); err != nil { + return err + } + drs := make(DescribedProducers) + dp := DescribedProducersPartition{ + Leader: b.NodeID, + Topic: rt.Topic, + Partition: rp.Partition, + ActiveProducers: drs, + Err: kerr.ErrorForCode(rp.ErrorCode), + ErrMessage: unptrStr(rp.ErrorMessage), + } + dps[rp.Partition] = dp // one partition globally, no need to exist-check + for _, rr := range rp.ActiveProducers { + dr := DescribedProducer{ + Leader: b.NodeID, + Topic: rt.Topic, + Partition: rp.Partition, + ProducerID: rr.ProducerID, + ProducerEpoch: int16(rr.ProducerEpoch), + LastSequence: rr.LastSequence, + LastTimestamp: rr.LastTimestamp, + CoordinatorEpoch: rr.CoordinatorEpoch, + CurrentTxnStartOffset: rr.CurrentTxnStartOffset, + } + drs[dr.ProducerID] = dr + } + } + } + return nil + }) +} + +// DescribedTransaction contains data from a describe transactions response for +// a single transactional ID. +type DescribedTransaction struct { + Coordinator int32 // Coordinator is the coordinator broker for this transactional ID. + TxnID string // TxnID is the name of this transactional ID. + State string // State is the state this transaction is in (Empty, Ongoing, PrepareCommit, PrepareAbort, CompleteCommit, CompleteAbort, Dead, PrepareEpochFence). + TimeoutMillis int32 // TimeoutMillis is the timeout of this transaction in milliseconds. + StartTimestamp int64 // StartTimestamp is millisecond when this transaction started. + ProducerID int64 // ProducerID is the ID in use by the transactional ID. + ProducerEpoch int16 // ProducerEpoch is the epoch associated with the produce rID. + + // Topics is the set of partitions in the transaction, if active. When + // preparing to commit or abort, this includes only partitions which do + // not have markers. This does not include topics the user is not + // authorized to describe. + Topics TopicsSet + + Err error // Err is non-nil if the transaction could not be described. +} + +// DescribedTransactions contains information from a describe transactions +// response. +type DescribedTransactions map[string]DescribedTransaction + +// Sorted returns all described transactions sorted by transactional ID. +func (ds DescribedTransactions) Sorted() []DescribedTransaction { + s := make([]DescribedTransaction, 0, len(ds)) + for _, d := range ds { + s = append(s, d) + } + sort.Slice(s, func(i, j int) bool { return s[i].TxnID < s[j].TxnID }) + return s +} + +// Each calls fn for each described transaction. +func (ds DescribedTransactions) Each(fn func(DescribedTransaction)) { + for _, d := range ds { + fn(d) + } +} + +// On calls fn for the transactional ID if it exists, returning the transaction +// and the error returned from fn. If fn is nil, this simply returns the +// transaction. +// +// The fn is given a shallow copy of the transaction. This function returns the +// copy as well; any modifications within fn are modifications on the returned +// copy. Modifications on a described transaction's inner fields are persisted +// to the original map (because slices are pointers). +// +// If the transaction does not exist, this returns +// kerr.TransactionalIDNotFound. +func (rs DescribedTransactions) On(txnID string, fn func(*DescribedTransaction) error) (DescribedTransaction, error) { + if len(rs) > 0 { + r, ok := rs[txnID] + if ok { + if fn == nil { + return r, nil + } + return r, fn(&r) + } + } + return DescribedTransaction{}, kerr.TransactionalIDNotFound +} + +// TransactionalIDs returns a sorted list of all transactional IDs. +func (ds DescribedTransactions) TransactionalIDs() []string { + all := make([]string, 0, len(ds)) + for t := range ds { + all = append(all, t) + } + sort.Strings(all) + return all +} + +// DescribeTransactions describes either all transactional IDs specified, or +// all transactional IDs in the cluster if none are specified. +// +// This may return *ShardErrors or *AuthError. +// +// If no transactional IDs are specified and this method first lists +// transactional IDs, and listing IDs returns a *ShardErrors, this function +// describes all successfully listed IDs and appends the list shard errors to +// any describe shard errors. +// +// If only one ID is described, there will be at most one request issued and +// there is no need to deeply inspect the error. +func (cl *Client) DescribeTransactions(ctx context.Context, txnIDs ...string) (DescribedTransactions, error) { + var seList *ShardErrors + if len(txnIDs) == 0 { + listed, err := cl.ListTransactions(ctx, nil, nil) + switch { + case err == nil: + case errors.As(err, &seList): + default: + return nil, err + } + txnIDs = listed.TransactionalIDs() + if len(txnIDs) == 0 { + return nil, err + } + } + + req := kmsg.NewPtrDescribeTransactionsRequest() + req.TransactionalIDs = txnIDs + + shards := cl.cl.RequestSharded(ctx, req) + described := make(DescribedTransactions) + err := shardErrEachBroker(req, shards, func(b BrokerDetail, kr kmsg.Response) error { + resp := kr.(*kmsg.DescribeTransactionsResponse) + for _, rt := range resp.TransactionStates { + if err := maybeAuthErr(rt.ErrorCode); err != nil { + return err + } + t := DescribedTransaction{ + Coordinator: b.NodeID, + TxnID: rt.TransactionalID, + State: rt.State, + TimeoutMillis: rt.TimeoutMillis, + StartTimestamp: rt.StartTimestamp, + ProducerID: rt.ProducerID, + ProducerEpoch: rt.ProducerEpoch, + Err: kerr.ErrorForCode(rt.ErrorCode), + } + for _, rtt := range rt.Topics { + t.Topics.Add(rtt.Topic, rtt.Partitions...) + } + described[t.TxnID] = t // txnID lives on one coordinator, no need to exist-check + } + return nil + }) + + var seDesc *ShardErrors + switch { + case err == nil: + return described, seList.into() + case errors.As(err, &seDesc): + if seList != nil { + seDesc.Errs = append(seList.Errs, seDesc.Errs...) + } + return described, seDesc.into() + default: + return nil, err + } +} + +// ListedTransaction contains data from a list transactions response for a +// single transactional ID. +type ListedTransaction struct { + Coordinator int32 // Coordinator the coordinator broker for this transactional ID. + TxnID string // TxnID is the name of this transactional ID. + ProducerID int64 // ProducerID is the producer ID for this transaction. + State string // State is the state this transaction is in (Empty, Ongoing, PrepareCommit, PrepareAbort, CompleteCommit, CompleteAbort, Dead, PrepareEpochFence). +} + +// ListedTransactions contains information from a list transactions response. +type ListedTransactions map[string]ListedTransaction + +// Sorted returns all transactions sorted by transactional ID. +func (ls ListedTransactions) Sorted() []ListedTransaction { + s := make([]ListedTransaction, 0, len(ls)) + for _, l := range ls { + s = append(s, l) + } + sort.Slice(s, func(i, j int) bool { return s[i].TxnID < s[j].TxnID }) + return s +} + +// Each calls fn for each listed transaction. +func (ls ListedTransactions) Each(fn func(ListedTransaction)) { + for _, l := range ls { + fn(l) + } +} + +// TransactionalIDs returns a sorted list of all transactional IDs. +func (ls ListedTransactions) TransactionalIDs() []string { + all := make([]string, 0, len(ls)) + for t := range ls { + all = append(all, t) + } + sort.Strings(all) + return all +} + +// ListTransactions returns all transactions and their states in the cluster. +// Filter states can be used to return transactions only in the requested +// states. By default, this returns all transactions you have DESCRIBE access +// to. Producer IDs can be specified to filter for transactions from the given +// producer. +// +// This may return *ShardErrors or *AuthError. +func (cl *Client) ListTransactions(ctx context.Context, producerIDs []int64, filterStates []string) (ListedTransactions, error) { + req := kmsg.NewPtrListTransactionsRequest() + req.ProducerIDFilters = producerIDs + req.StateFilters = filterStates + shards := cl.cl.RequestSharded(ctx, req) + list := make(ListedTransactions) + return list, shardErrEachBroker(req, shards, func(b BrokerDetail, kr kmsg.Response) error { + resp := kr.(*kmsg.ListTransactionsResponse) + if err := maybeAuthErr(resp.ErrorCode); err != nil { + return err + } + if err := kerr.ErrorForCode(resp.ErrorCode); err != nil { + return err + } + for _, t := range resp.TransactionStates { + list[t.TransactionalID] = ListedTransaction{ // txnID lives on one coordinator, no need to exist-check + Coordinator: b.NodeID, + TxnID: t.TransactionalID, + ProducerID: t.ProducerID, + State: t.TransactionState, + } + } + return nil + }) +} + +// TxnMarkers marks the end of a partition: the producer ID / epoch doing the +// writing, whether this is a commit, the coordinator epoch of the broker we +// are writing to (for fencing), and the topics and partitions that we are +// writing this abort or commit for. +// +// This is a very low level admin request and should likely be built from data +// in a DescribeProducers response. See KIP-664 if you are trying to use this. +type TxnMarkers struct { + ProducerID int64 // ProducerID is the ID to write markers for. + ProducerEpoch int16 // ProducerEpoch is the epoch to write markers for. + Commit bool // Commit is true if we are committing, false if we are aborting. + CoordinatorEpoch int32 // CoordinatorEpoch is the epoch of the transactional coordinator we are writing to; this is used for fencing. + Topics TopicsSet // Topics are topics and partitions to write markers for. +} + +// TxnMarkersPartitionResponse is a response to a topic's partition within a +// single marker written. +type TxnMarkersPartitionResponse struct { + NodeID int32 // NodeID is the node that this marker was written to. + ProducerID int64 // ProducerID corresponds to the PID in the write marker request. + Topic string // Topic is the topic being responded to. + Partition int32 // Partition is the partition being responded to. + Err error // Err is non-nil if the WriteTxnMarkers request for this pid/topic/partition failed. +} + +// TxnMarkersPartitionResponses contains per-partition responses to a +// WriteTxnMarkers request. +type TxnMarkersPartitionResponses map[int32]TxnMarkersPartitionResponse + +// Sorted returns all partitions sorted by partition. +func (ps TxnMarkersPartitionResponses) Sorted() []TxnMarkersPartitionResponse { + var all []TxnMarkersPartitionResponse + ps.Each(func(p TxnMarkersPartitionResponse) { + all = append(all, p) + }) + sort.Slice(all, func(i, j int) bool { + l, r := all[i], all[j] + return l.Partition < r.Partition + }) + return all +} + +// Each calls fn for each partition. +func (ps TxnMarkersPartitionResponses) Each(fn func(TxnMarkersPartitionResponse)) { + for _, p := range ps { + fn(p) + } +} + +// TxnMarkersTopicResponse is a response to a topic within a single marker +// written. +type TxnMarkersTopicResponse struct { + ProducerID int64 // ProducerID corresponds to the PID in the write marker request. + Topic string // Topic is the topic being responded to. + Partitions TxnMarkersPartitionResponses // Partitions are the responses for partitions in this marker. +} + +// TxnMarkersTopicResponses contains per-topic responses to a WriteTxnMarkers +// request. +type TxnMarkersTopicResponses map[string]TxnMarkersTopicResponse + +// Sorted returns all topics sorted by topic. +func (ts TxnMarkersTopicResponses) Sorted() []TxnMarkersTopicResponse { + var all []TxnMarkersTopicResponse + ts.Each(func(t TxnMarkersTopicResponse) { + all = append(all, t) + }) + sort.Slice(all, func(i, j int) bool { + l, r := all[i], all[j] + return l.Topic < r.Topic + }) + return all +} + +// SortedPartitions returns all topics sorted by topic then partition. +func (ts TxnMarkersTopicResponses) SortedPartitions() []TxnMarkersPartitionResponse { + var all []TxnMarkersPartitionResponse + ts.EachPartition(func(p TxnMarkersPartitionResponse) { + all = append(all, p) + }) + sort.Slice(all, func(i, j int) bool { + l, r := all[i], all[j] + return l.Topic < r.Topic || l.Topic == r.Topic && l.Partition < r.Partition + }) + return all +} + +// Each calls fn for each topic. +func (ts TxnMarkersTopicResponses) Each(fn func(TxnMarkersTopicResponse)) { + for _, t := range ts { + fn(t) + } +} + +// EachPartition calls fn for every partition in all topics. +func (ts TxnMarkersTopicResponses) EachPartition(fn func(TxnMarkersPartitionResponse)) { + for _, t := range ts { + for _, p := range t.Partitions { + fn(p) + } + } +} + +// TxnMarkersResponse is a response for a single marker written. +type TxnMarkersResponse struct { + ProducerID int64 // ProducerID corresponds to the PID in the write marker request. + Topics TxnMarkersTopicResponses // Topics contains the topics that markers were written for, for this ProducerID. +} + +// TxnMarkersResponse contains per-partition-ID responses to a WriteTxnMarkers +// request. +type TxnMarkersResponses map[int64]TxnMarkersResponse + +// Sorted returns all markers sorted by producer ID. +func (ms TxnMarkersResponses) Sorted() []TxnMarkersResponse { + var all []TxnMarkersResponse + ms.Each(func(m TxnMarkersResponse) { + all = append(all, m) + }) + sort.Slice(all, func(i, j int) bool { + l, r := all[i], all[j] + return l.ProducerID < r.ProducerID + }) + return all +} + +// SortedTopics returns all marker topics sorted by producer ID then topic. +func (ms TxnMarkersResponses) SortedTopics() []TxnMarkersTopicResponse { + var all []TxnMarkersTopicResponse + ms.EachTopic(func(t TxnMarkersTopicResponse) { + all = append(all, t) + }) + sort.Slice(all, func(i, j int) bool { + l, r := all[i], all[j] + return l.ProducerID < r.ProducerID || l.ProducerID == r.ProducerID && l.Topic < r.Topic + }) + return all +} + +// SortedPartitions returns all marker topic partitions sorted by producer ID +// then topic then partition. +func (ms TxnMarkersResponses) SortedPartitions() []TxnMarkersPartitionResponse { + var all []TxnMarkersPartitionResponse + ms.EachPartition(func(p TxnMarkersPartitionResponse) { + all = append(all, p) + }) + sort.Slice(all, func(i, j int) bool { + l, r := all[i], all[j] + return l.ProducerID < r.ProducerID || l.ProducerID == r.ProducerID && l.Topic < r.Topic || l.Topic == r.Topic && l.Partition < r.Partition + }) + return all +} + +// Each calls fn for each marker response. +func (ms TxnMarkersResponses) Each(fn func(TxnMarkersResponse)) { + for _, m := range ms { + fn(m) + } +} + +// EachTopic calls fn for every topic in all marker responses. +func (ms TxnMarkersResponses) EachTopic(fn func(TxnMarkersTopicResponse)) { + for _, m := range ms { + for _, t := range m.Topics { + fn(t) + } + } +} + +// EachPartition calls fn for every partition in all topics in all marker +// responses. +func (ms TxnMarkersResponses) EachPartition(fn func(TxnMarkersPartitionResponse)) { + for _, m := range ms { + for _, t := range m.Topics { + for _, p := range t.Partitions { + fn(p) + } + } + } +} + +// WriteTxnMarkers writes transaction markers to brokers. This is an advanced +// admin way to close out open transactions. See KIP-664 for more details. +// +// This may return *ShardErrors or *AuthError. +func (cl *Client) WriteTxnMarkers(ctx context.Context, markers ...TxnMarkers) (TxnMarkersResponses, error) { + req := kmsg.NewPtrWriteTxnMarkersRequest() + for _, m := range markers { + rm := kmsg.NewWriteTxnMarkersRequestMarker() + rm.ProducerID = m.ProducerID + rm.ProducerEpoch = m.ProducerEpoch + rm.Committed = m.Commit + rm.CoordinatorEpoch = m.CoordinatorEpoch + for t, ps := range m.Topics { + rt := kmsg.NewWriteTxnMarkersRequestMarkerTopic() + rt.Topic = t + for p := range ps { + rt.Partitions = append(rt.Partitions, p) + } + rm.Topics = append(rm.Topics, rt) + } + req.Markers = append(req.Markers, rm) + } + shards := cl.cl.RequestSharded(ctx, req) + rs := make(TxnMarkersResponses) + return rs, shardErrEachBroker(req, shards, func(b BrokerDetail, kr kmsg.Response) error { + resp := kr.(*kmsg.WriteTxnMarkersResponse) + for _, rm := range resp.Markers { + m, exists := rs[rm.ProducerID] // partitions are spread around, our marker could be split: we need to check existence + if !exists { + m = TxnMarkersResponse{ + ProducerID: rm.ProducerID, + Topics: make(TxnMarkersTopicResponses), + } + rs[rm.ProducerID] = m + } + for _, rt := range rm.Topics { + t, exists := m.Topics[rt.Topic] + if !exists { // same thought + t = TxnMarkersTopicResponse{ + ProducerID: rm.ProducerID, + Topic: rt.Topic, + Partitions: make(TxnMarkersPartitionResponses), + } + m.Topics[rt.Topic] = t + } + for _, rp := range rt.Partitions { + if err := maybeAuthErr(rp.ErrorCode); err != nil { + return err + } + t.Partitions[rp.Partition] = TxnMarkersPartitionResponse{ // one partition globally, no need to exist-check + NodeID: b.NodeID, + ProducerID: rm.ProducerID, + Topic: rt.Topic, + Partition: rp.Partition, + Err: kerr.ErrorForCode(rp.ErrorCode), + } + } + } + } + return nil + }) +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kbin/primitives.go b/vendor/github.com/twmb/franz-go/pkg/kbin/primitives.go new file mode 100644 index 00000000000..487e7f6c2a3 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kbin/primitives.go @@ -0,0 +1,856 @@ +// Package kbin contains Kafka primitive reading and writing functions. +package kbin + +import ( + "encoding/binary" + "errors" + "math" + "math/bits" + "reflect" + "unsafe" +) + +// This file contains primitive type encoding and decoding. +// +// The Reader helper can be used even when content runs out +// or an error is hit; all other number requests will return +// zero so a decode will basically no-op. + +// ErrNotEnoughData is returned when a type could not fully decode +// from a slice because the slice did not have enough data. +var ErrNotEnoughData = errors.New("response did not contain enough data to be valid") + +// AppendBool appends 1 for true or 0 for false to dst. +func AppendBool(dst []byte, v bool) []byte { + if v { + return append(dst, 1) + } + return append(dst, 0) +} + +// AppendInt8 appends an int8 to dst. +func AppendInt8(dst []byte, i int8) []byte { + return append(dst, byte(i)) +} + +// AppendInt16 appends a big endian int16 to dst. +func AppendInt16(dst []byte, i int16) []byte { + return AppendUint16(dst, uint16(i)) +} + +// AppendUint16 appends a big endian uint16 to dst. +func AppendUint16(dst []byte, u uint16) []byte { + return append(dst, byte(u>>8), byte(u)) +} + +// AppendInt32 appends a big endian int32 to dst. +func AppendInt32(dst []byte, i int32) []byte { + return AppendUint32(dst, uint32(i)) +} + +// AppendInt64 appends a big endian int64 to dst. +func AppendInt64(dst []byte, i int64) []byte { + return appendUint64(dst, uint64(i)) +} + +// AppendFloat64 appends a big endian float64 to dst. +func AppendFloat64(dst []byte, f float64) []byte { + return appendUint64(dst, math.Float64bits(f)) +} + +// AppendUuid appends the 16 uuid bytes to dst. +func AppendUuid(dst []byte, uuid [16]byte) []byte { + return append(dst, uuid[:]...) +} + +func appendUint64(dst []byte, u uint64) []byte { + return append(dst, byte(u>>56), byte(u>>48), byte(u>>40), byte(u>>32), + byte(u>>24), byte(u>>16), byte(u>>8), byte(u)) +} + +// AppendUint32 appends a big endian uint32 to dst. +func AppendUint32(dst []byte, u uint32) []byte { + return append(dst, byte(u>>24), byte(u>>16), byte(u>>8), byte(u)) +} + +// uvarintLens could only be length 65, but using 256 allows bounds check +// elimination on lookup. +const uvarintLens = "\x01\x01\x01\x01\x01\x01\x01\x01\x02\x02\x02\x02\x02\x02\x02\x03\x03\x03\x03\x03\x03\x03\x04\x04\x04\x04\x04\x04\x04\x05\x05\x05\x05\x05\x05\x05\x06\x06\x06\x06\x06\x06\x06\x07\x07\x07\x07\x07\x07\x07\x08\x08\x08\x08\x08\x08\x08\x09\x09\x09\x09\x09\x09\x09\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + +// VarintLen returns how long i would be if it were varint encoded. +func VarintLen(i int32) int { + u := uint32(i)<<1 ^ uint32(i>>31) + return UvarintLen(u) +} + +// UvarintLen returns how long u would be if it were uvarint encoded. +func UvarintLen(u uint32) int { + return int(uvarintLens[byte(bits.Len32(u))]) +} + +// VarlongLen returns how long i would be if it were varlong encoded. +func VarlongLen(i int64) int { + u := uint64(i)<<1 ^ uint64(i>>63) + return uvarlongLen(u) +} + +func uvarlongLen(u uint64) int { + return int(uvarintLens[byte(bits.Len64(u))]) +} + +// Varint is a loop unrolled 32 bit varint decoder. The return semantics +// are the same as binary.Varint, with the added benefit that overflows +// in 5 byte encodings are handled rather than left to the user. +func Varint(in []byte) (int32, int) { + x, n := Uvarint(in) + return int32((x >> 1) ^ -(x & 1)), n +} + +// Uvarint is a loop unrolled 32 bit uvarint decoder. The return semantics +// are the same as binary.Uvarint, with the added benefit that overflows +// in 5 byte encodings are handled rather than left to the user. +func Uvarint(in []byte) (uint32, int) { + var x uint32 + var overflow int + + if len(in) < 1 { + goto fail + } + + x = uint32(in[0] & 0x7f) + if in[0]&0x80 == 0 { + return x, 1 + } else if len(in) < 2 { + goto fail + } + + x |= uint32(in[1]&0x7f) << 7 + if in[1]&0x80 == 0 { + return x, 2 + } else if len(in) < 3 { + goto fail + } + + x |= uint32(in[2]&0x7f) << 14 + if in[2]&0x80 == 0 { + return x, 3 + } else if len(in) < 4 { + goto fail + } + + x |= uint32(in[3]&0x7f) << 21 + if in[3]&0x80 == 0 { + return x, 4 + } else if len(in) < 5 { + goto fail + } + + x |= uint32(in[4]) << 28 + if in[4] <= 0x0f { + return x, 5 + } + + overflow = -5 + +fail: + return 0, overflow +} + +// Varlong is a loop unrolled 64 bit varint decoder. The return semantics +// are the same as binary.Varint, with the added benefit that overflows +// in 10 byte encodings are handled rather than left to the user. +func Varlong(in []byte) (int64, int) { + x, n := uvarlong(in) + return int64((x >> 1) ^ -(x & 1)), n +} + +func uvarlong(in []byte) (uint64, int) { + var x uint64 + var overflow int + + if len(in) < 1 { + goto fail + } + + x = uint64(in[0] & 0x7f) + if in[0]&0x80 == 0 { + return x, 1 + } else if len(in) < 2 { + goto fail + } + + x |= uint64(in[1]&0x7f) << 7 + if in[1]&0x80 == 0 { + return x, 2 + } else if len(in) < 3 { + goto fail + } + + x |= uint64(in[2]&0x7f) << 14 + if in[2]&0x80 == 0 { + return x, 3 + } else if len(in) < 4 { + goto fail + } + + x |= uint64(in[3]&0x7f) << 21 + if in[3]&0x80 == 0 { + return x, 4 + } else if len(in) < 5 { + goto fail + } + + x |= uint64(in[4]&0x7f) << 28 + if in[4]&0x80 == 0 { + return x, 5 + } else if len(in) < 6 { + goto fail + } + + x |= uint64(in[5]&0x7f) << 35 + if in[5]&0x80 == 0 { + return x, 6 + } else if len(in) < 7 { + goto fail + } + + x |= uint64(in[6]&0x7f) << 42 + if in[6]&0x80 == 0 { + return x, 7 + } else if len(in) < 8 { + goto fail + } + + x |= uint64(in[7]&0x7f) << 49 + if in[7]&0x80 == 0 { + return x, 8 + } else if len(in) < 9 { + goto fail + } + + x |= uint64(in[8]&0x7f) << 56 + if in[8]&0x80 == 0 { + return x, 9 + } else if len(in) < 10 { + goto fail + } + + x |= uint64(in[9]) << 63 + if in[9] <= 0x01 { + return x, 10 + } + + overflow = -10 + +fail: + return 0, overflow +} + +// AppendVarint appends a varint encoded i to dst. +func AppendVarint(dst []byte, i int32) []byte { + return AppendUvarint(dst, uint32(i)<<1^uint32(i>>31)) +} + +// AppendUvarint appends a uvarint encoded u to dst. +func AppendUvarint(dst []byte, u uint32) []byte { + switch UvarintLen(u) { + case 5: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte((u>>21)&0x7f|0x80), + byte(u>>28)) + case 4: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte(u>>21)) + case 3: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte(u>>14)) + case 2: + return append(dst, + byte(u&0x7f|0x80), + byte(u>>7)) + case 1: + return append(dst, byte(u)) + } + return dst +} + +// AppendVarlong appends a varint encoded i to dst. +func AppendVarlong(dst []byte, i int64) []byte { + return appendUvarlong(dst, uint64(i)<<1^uint64(i>>63)) +} + +func appendUvarlong(dst []byte, u uint64) []byte { + switch uvarlongLen(u) { + case 10: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte((u>>21)&0x7f|0x80), + byte((u>>28)&0x7f|0x80), + byte((u>>35)&0x7f|0x80), + byte((u>>42)&0x7f|0x80), + byte((u>>49)&0x7f|0x80), + byte((u>>56)&0x7f|0x80), + byte(u>>63)) + case 9: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte((u>>21)&0x7f|0x80), + byte((u>>28)&0x7f|0x80), + byte((u>>35)&0x7f|0x80), + byte((u>>42)&0x7f|0x80), + byte((u>>49)&0x7f|0x80), + byte(u>>56)) + case 8: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte((u>>21)&0x7f|0x80), + byte((u>>28)&0x7f|0x80), + byte((u>>35)&0x7f|0x80), + byte((u>>42)&0x7f|0x80), + byte(u>>49)) + case 7: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte((u>>21)&0x7f|0x80), + byte((u>>28)&0x7f|0x80), + byte((u>>35)&0x7f|0x80), + byte(u>>42)) + case 6: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte((u>>21)&0x7f|0x80), + byte((u>>28)&0x7f|0x80), + byte(u>>35)) + case 5: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte((u>>21)&0x7f|0x80), + byte(u>>28)) + case 4: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte(u>>21)) + case 3: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte(u>>14)) + case 2: + return append(dst, + byte(u&0x7f|0x80), + byte(u>>7)) + case 1: + return append(dst, byte(u)) + } + return dst +} + +// AppendString appends a string to dst prefixed with its int16 length. +func AppendString(dst []byte, s string) []byte { + dst = AppendInt16(dst, int16(len(s))) + return append(dst, s...) +} + +// AppendCompactString appends a string to dst prefixed with its uvarint length +// starting at 1; 0 is reserved for null, which compact strings are not +// (nullable compact ones are!). Thus, the length is the decoded uvarint - 1. +// +// For KIP-482. +func AppendCompactString(dst []byte, s string) []byte { + dst = AppendUvarint(dst, 1+uint32(len(s))) + return append(dst, s...) +} + +// AppendNullableString appends potentially nil string to dst prefixed with its +// int16 length or int16(-1) if nil. +func AppendNullableString(dst []byte, s *string) []byte { + if s == nil { + return AppendInt16(dst, -1) + } + return AppendString(dst, *s) +} + +// AppendCompactNullableString appends a potentially nil string to dst with its +// uvarint length starting at 1, with 0 indicating null. Thus, the length is +// the decoded uvarint - 1. +// +// For KIP-482. +func AppendCompactNullableString(dst []byte, s *string) []byte { + if s == nil { + return AppendUvarint(dst, 0) + } + return AppendCompactString(dst, *s) +} + +// AppendBytes appends bytes to dst prefixed with its int32 length. +func AppendBytes(dst, b []byte) []byte { + dst = AppendInt32(dst, int32(len(b))) + return append(dst, b...) +} + +// AppendCompactBytes appends bytes to dst prefixed with a its uvarint length +// starting at 1; 0 is reserved for null, which compact bytes are not (nullable +// compact ones are!). Thus, the length is the decoded uvarint - 1. +// +// For KIP-482. +func AppendCompactBytes(dst, b []byte) []byte { + dst = AppendUvarint(dst, 1+uint32(len(b))) + return append(dst, b...) +} + +// AppendNullableBytes appends a potentially nil slice to dst prefixed with its +// int32 length or int32(-1) if nil. +func AppendNullableBytes(dst, b []byte) []byte { + if b == nil { + return AppendInt32(dst, -1) + } + return AppendBytes(dst, b) +} + +// AppendCompactNullableBytes appends a potentially nil slice to dst with its +// uvarint length starting at 1, with 0 indicating null. Thus, the length is +// the decoded uvarint - 1. +// +// For KIP-482. +func AppendCompactNullableBytes(dst, b []byte) []byte { + if b == nil { + return AppendUvarint(dst, 0) + } + return AppendCompactBytes(dst, b) +} + +// AppendVarintString appends a string to dst prefixed with its length encoded +// as a varint. +func AppendVarintString(dst []byte, s string) []byte { + dst = AppendVarint(dst, int32(len(s))) + return append(dst, s...) +} + +// AppendVarintBytes appends a slice to dst prefixed with its length encoded as +// a varint. +func AppendVarintBytes(dst, b []byte) []byte { + if b == nil { + return AppendVarint(dst, -1) + } + dst = AppendVarint(dst, int32(len(b))) + return append(dst, b...) +} + +// AppendArrayLen appends the length of an array as an int32 to dst. +func AppendArrayLen(dst []byte, l int) []byte { + return AppendInt32(dst, int32(l)) +} + +// AppendCompactArrayLen appends the length of an array as a uvarint to dst +// as the length + 1. +// +// For KIP-482. +func AppendCompactArrayLen(dst []byte, l int) []byte { + return AppendUvarint(dst, 1+uint32(l)) +} + +// AppendNullableArrayLen appends the length of an array as an int32 to dst, +// or -1 if isNil is true. +func AppendNullableArrayLen(dst []byte, l int, isNil bool) []byte { + if isNil { + return AppendInt32(dst, -1) + } + return AppendInt32(dst, int32(l)) +} + +// AppendCompactNullableArrayLen appends the length of an array as a uvarint to +// dst as the length + 1; if isNil is true, this appends 0 as a uvarint. +// +// For KIP-482. +func AppendCompactNullableArrayLen(dst []byte, l int, isNil bool) []byte { + if isNil { + return AppendUvarint(dst, 0) + } + return AppendUvarint(dst, 1+uint32(l)) +} + +// Reader is used to decode Kafka messages. +// +// For all functions on Reader, if the reader has been invalidated, functions +// return defaults (false, 0, nil, ""). Use Complete to detect if the reader +// was invalidated or if the reader has remaining data. +type Reader struct { + Src []byte + bad bool +} + +// Bool returns a bool from the reader. +func (b *Reader) Bool() bool { + if len(b.Src) < 1 { + b.bad = true + b.Src = nil + return false + } + t := b.Src[0] != 0 // if '0', false + b.Src = b.Src[1:] + return t +} + +// Int8 returns an int8 from the reader. +func (b *Reader) Int8() int8 { + if len(b.Src) < 1 { + b.bad = true + b.Src = nil + return 0 + } + r := b.Src[0] + b.Src = b.Src[1:] + return int8(r) +} + +// Int16 returns an int16 from the reader. +func (b *Reader) Int16() int16 { + if len(b.Src) < 2 { + b.bad = true + b.Src = nil + return 0 + } + r := int16(binary.BigEndian.Uint16(b.Src)) + b.Src = b.Src[2:] + return r +} + +// Uint16 returns an uint16 from the reader. +func (b *Reader) Uint16() uint16 { + if len(b.Src) < 2 { + b.bad = true + b.Src = nil + return 0 + } + r := binary.BigEndian.Uint16(b.Src) + b.Src = b.Src[2:] + return r +} + +// Int32 returns an int32 from the reader. +func (b *Reader) Int32() int32 { + if len(b.Src) < 4 { + b.bad = true + b.Src = nil + return 0 + } + r := int32(binary.BigEndian.Uint32(b.Src)) + b.Src = b.Src[4:] + return r +} + +// Int64 returns an int64 from the reader. +func (b *Reader) Int64() int64 { + return int64(b.readUint64()) +} + +// Uuid returns a uuid from the reader. +func (b *Reader) Uuid() [16]byte { + var r [16]byte + copy(r[:], b.Span(16)) + return r +} + +// Float64 returns a float64 from the reader. +func (b *Reader) Float64() float64 { + return math.Float64frombits(b.readUint64()) +} + +func (b *Reader) readUint64() uint64 { + if len(b.Src) < 8 { + b.bad = true + b.Src = nil + return 0 + } + r := binary.BigEndian.Uint64(b.Src) + b.Src = b.Src[8:] + return r +} + +// Uint32 returns a uint32 from the reader. +func (b *Reader) Uint32() uint32 { + if len(b.Src) < 4 { + b.bad = true + b.Src = nil + return 0 + } + r := binary.BigEndian.Uint32(b.Src) + b.Src = b.Src[4:] + return r +} + +// Varint returns a varint int32 from the reader. +func (b *Reader) Varint() int32 { + val, n := Varint(b.Src) + if n <= 0 { + b.bad = true + b.Src = nil + return 0 + } + b.Src = b.Src[n:] + return val +} + +// Varlong returns a varlong int64 from the reader. +func (b *Reader) Varlong() int64 { + val, n := Varlong(b.Src) + if n <= 0 { + b.bad = true + b.Src = nil + return 0 + } + b.Src = b.Src[n:] + return val +} + +// Uvarint returns a uvarint encoded uint32 from the reader. +func (b *Reader) Uvarint() uint32 { + val, n := Uvarint(b.Src) + if n <= 0 { + b.bad = true + b.Src = nil + return 0 + } + b.Src = b.Src[n:] + return val +} + +// Span returns l bytes from the reader. +func (b *Reader) Span(l int) []byte { + if len(b.Src) < l || l < 0 { + b.bad = true + b.Src = nil + return nil + } + r := b.Src[:l:l] + b.Src = b.Src[l:] + return r +} + +// UnsafeString returns a Kafka string from the reader without allocating using +// the unsafe package. This must be used with care; note the string holds a +// reference to the original slice. +func (b *Reader) UnsafeString() string { + l := b.Int16() + return UnsafeString(b.Span(int(l))) +} + +// String returns a Kafka string from the reader. +func (b *Reader) String() string { + l := b.Int16() + return string(b.Span(int(l))) +} + +// UnsafeCompactString returns a Kafka compact string from the reader without +// allocating using the unsafe package. This must be used with care; note the +// string holds a reference to the original slice. +func (b *Reader) UnsafeCompactString() string { + l := int(b.Uvarint()) - 1 + return UnsafeString(b.Span(l)) +} + +// CompactString returns a Kafka compact string from the reader. +func (b *Reader) CompactString() string { + l := int(b.Uvarint()) - 1 + return string(b.Span(l)) +} + +// UnsafeNullableString returns a Kafka nullable string from the reader without +// allocating using the unsafe package. This must be used with care; note the +// string holds a reference to the original slice. +func (b *Reader) UnsafeNullableString() *string { + l := b.Int16() + if l < 0 { + return nil + } + s := UnsafeString(b.Span(int(l))) + return &s +} + +// NullableString returns a Kafka nullable string from the reader. +func (b *Reader) NullableString() *string { + l := b.Int16() + if l < 0 { + return nil + } + s := string(b.Span(int(l))) + return &s +} + +// UnsafeCompactNullableString returns a Kafka compact nullable string from the +// reader without allocating using the unsafe package. This must be used with +// care; note the string holds a reference to the original slice. +func (b *Reader) UnsafeCompactNullableString() *string { + l := int(b.Uvarint()) - 1 + if l < 0 { + return nil + } + s := UnsafeString(b.Span(l)) + return &s +} + +// CompactNullableString returns a Kafka compact nullable string from the +// reader. +func (b *Reader) CompactNullableString() *string { + l := int(b.Uvarint()) - 1 + if l < 0 { + return nil + } + s := string(b.Span(l)) + return &s +} + +// Bytes returns a Kafka byte array from the reader. +// +// This never returns nil. +func (b *Reader) Bytes() []byte { + l := b.Int32() + // This is not to spec, but it is not clearly documented and Microsoft + // EventHubs fails here. -1 means null, which should throw an + // exception. EventHubs uses -1 to mean "does not exist" on some + // non-nullable fields. + // + // Until EventHubs is fixed, we return an empty byte slice for null. + if l == -1 { + return []byte{} + } + return b.Span(int(l)) +} + +// CompactBytes returns a Kafka compact byte array from the reader. +// +// This never returns nil. +func (b *Reader) CompactBytes() []byte { + l := int(b.Uvarint()) - 1 + if l == -1 { // same as above: -1 should not be allowed here + return []byte{} + } + return b.Span(l) +} + +// NullableBytes returns a Kafka nullable byte array from the reader, returning +// nil as appropriate. +func (b *Reader) NullableBytes() []byte { + l := b.Int32() + if l < 0 { + return nil + } + r := b.Span(int(l)) + return r +} + +// CompactNullableBytes returns a Kafka compact nullable byte array from the +// reader, returning nil as appropriate. +func (b *Reader) CompactNullableBytes() []byte { + l := int(b.Uvarint()) - 1 + if l < 0 { + return nil + } + r := b.Span(l) + return r +} + +// ArrayLen returns a Kafka array length from the reader. +func (b *Reader) ArrayLen() int32 { + r := b.Int32() + // The min size of a Kafka type is a byte, so if we do not have + // at least the array length of bytes left, it is bad. + if len(b.Src) < int(r) { + b.bad = true + b.Src = nil + return 0 + } + return r +} + +// VarintArrayLen returns a Kafka array length from the reader. +func (b *Reader) VarintArrayLen() int32 { + r := b.Varint() + // The min size of a Kafka type is a byte, so if we do not have + // at least the array length of bytes left, it is bad. + if len(b.Src) < int(r) { + b.bad = true + b.Src = nil + return 0 + } + return r +} + +// CompactArrayLen returns a Kafka compact array length from the reader. +func (b *Reader) CompactArrayLen() int32 { + r := int32(b.Uvarint()) - 1 + // The min size of a Kafka type is a byte, so if we do not have + // at least the array length of bytes left, it is bad. + if len(b.Src) < int(r) { + b.bad = true + b.Src = nil + return 0 + } + return r +} + +// VarintBytes returns a Kafka encoded varint array from the reader, returning +// nil as appropriate. +func (b *Reader) VarintBytes() []byte { + l := b.Varint() + if l < 0 { + return nil + } + return b.Span(int(l)) +} + +// UnsafeVarintString returns a Kafka encoded varint string from the reader +// without allocating using the unsafe package. This must be used with care; +// note the string holds a reference to the original slice. +func (b *Reader) UnsafeVarintString() string { + return UnsafeString(b.VarintBytes()) +} + +// VarintString returns a Kafka encoded varint string from the reader. +func (b *Reader) VarintString() string { + return string(b.VarintBytes()) +} + +// Complete returns ErrNotEnoughData if the source ran out while decoding. +func (b *Reader) Complete() error { + if b.bad { + return ErrNotEnoughData + } + return nil +} + +// Ok returns true if the reader is still ok. +func (b *Reader) Ok() bool { + return !b.bad +} + +// UnsafeString returns the slice as a string using unsafe rule (6). +func UnsafeString(slice []byte) string { + var str string + strhdr := (*reflect.StringHeader)(unsafe.Pointer(&str)) //nolint:gosec // known way to convert slice to string + strhdr.Data = ((*reflect.SliceHeader)(unsafe.Pointer(&slice))).Data //nolint:gosec // known way to convert slice to string + strhdr.Len = len(slice) + return str +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kerr/kerr.go b/vendor/github.com/twmb/franz-go/pkg/kerr/kerr.go new file mode 100644 index 00000000000..9cd71ea4f9a --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kerr/kerr.go @@ -0,0 +1,335 @@ +// Package kerr contains Kafka errors. +// +// The errors are undocumented to avoid duplicating the official descriptions +// that can be found at https://kafka.apache.org/protocol.html#protocol_error_codes (although, +// this code does duplicate the descriptions into the errors themselves, so the +// descriptions can be seen as the documentation). +// +// Since this package is dedicated to errors and the package is named "kerr", +// all errors elide the standard "Err" prefix. +package kerr + +import ( + "errors" + "fmt" +) + +// Error is a Kafka error. +type Error struct { + // Message is the string form of a Kafka error code + // (UNKNOWN_SERVER_ERROR, etc). + Message string + // Code is a Kafka error code. + Code int16 + // Retriable is whether the error is considered retriable by Kafka. + Retriable bool + // Description is a succinct description of what this error means. + Description string +} + +func (e *Error) Error() string { + return fmt.Sprintf("%s: %s", e.Message, e.Description) +} + +// ErrorForCode returns the error corresponding to the given error code. +// +// If the code is unknown, this returns UnknownServerError. +// If the code is 0, this returns nil. +func ErrorForCode(code int16) error { + err, exists := code2err[code] + if !exists { + return UnknownServerError + } + return err +} + +// TypedErrorForCode returns the kerr.Error corresponding to the given error +// code. +// +// If the code is unknown, this returns UnknownServerError. +// If the code is 0, this returns nil. +// +// Note that this function is provided as a simplicity function for code that +// needs to work with the *Error only, but this function comes with caveats. +// Because this can return a typed nil, passing the return of this to a +// function that accepts an error (the Go error interface), the return from +// this will never be considered a nil error. Instead, it will be an error with +// a nil internal value. +func TypedErrorForCode(code int16) *Error { + err, exists := code2err[code] + if !exists { + return UnknownServerError + } + if err == nil { + return nil + } + return err.(*Error) +} + +// IsRetriable returns whether a Kafka error is considered retriable. +func IsRetriable(err error) bool { + var kerr *Error + return errors.As(err, &kerr) && kerr.Retriable +} + +var ( + UnknownServerError = &Error{"UNKNOWN_SERVER_ERROR", -1, false, "The server experienced an unexpected error when processing the request."} + OffsetOutOfRange = &Error{"OFFSET_OUT_OF_RANGE", 1, false, "The requested offset is not within the range of offsets maintained by the server."} + CorruptMessage = &Error{"CORRUPT_MESSAGE", 2, true, "This message has failed its CRC checksum, exceeds the valid size, has a null key for a compacted topic, or is otherwise corrupt."} + UnknownTopicOrPartition = &Error{"UNKNOWN_TOPIC_OR_PARTITION", 3, true, "This server does not host this topic-partition."} + InvalidFetchSize = &Error{"INVALID_FETCH_SIZE", 4, false, "The requested fetch size is invalid."} + LeaderNotAvailable = &Error{"LEADER_NOT_AVAILABLE", 5, true, "There is no leader for this topic-partition as we are in the middle of a leadership election."} + NotLeaderForPartition = &Error{"NOT_LEADER_FOR_PARTITION", 6, true, "This server is not the leader for that topic-partition."} + RequestTimedOut = &Error{"REQUEST_TIMED_OUT", 7, true, "The request timed out."} + BrokerNotAvailable = &Error{"BROKER_NOT_AVAILABLE", 8, true, "The broker is not available."} + ReplicaNotAvailable = &Error{"REPLICA_NOT_AVAILABLE", 9, true, "The replica is not available for the requested topic-partition."} + MessageTooLarge = &Error{"MESSAGE_TOO_LARGE", 10, false, "The request included a message larger than the max message size the server will accept."} + StaleControllerEpoch = &Error{"STALE_CONTROLLER_EPOCH", 11, false, "The controller moved to another broker."} + OffsetMetadataTooLarge = &Error{"OFFSET_METADATA_TOO_LARGE", 12, false, "The metadata field of the offset request was too large."} + NetworkException = &Error{"NETWORK_EXCEPTION", 13, true, "The server disconnected before a response was received."} + CoordinatorLoadInProgress = &Error{"COORDINATOR_LOAD_IN_PROGRESS", 14, true, "The coordinator is loading and hence can't process requests."} + CoordinatorNotAvailable = &Error{"COORDINATOR_NOT_AVAILABLE", 15, true, "The coordinator is not available."} + NotCoordinator = &Error{"NOT_COORDINATOR", 16, true, "This is not the correct coordinator."} + InvalidTopicException = &Error{"INVALID_TOPIC_EXCEPTION", 17, false, "The request attempted to perform an operation on an invalid topic."} + RecordListTooLarge = &Error{"RECORD_LIST_TOO_LARGE", 18, false, "The request included message batch larger than the configured segment size on the server."} + NotEnoughReplicas = &Error{"NOT_ENOUGH_REPLICAS", 19, true, "Messages are rejected since there are fewer in-sync replicas than required."} + NotEnoughReplicasAfterAppend = &Error{"NOT_ENOUGH_REPLICAS_AFTER_APPEND", 20, true, "Messages are written to the log, but to fewer in-sync replicas than required."} + InvalidRequiredAcks = &Error{"INVALID_REQUIRED_ACKS", 21, false, "Produce request specified an invalid value for required acks."} + IllegalGeneration = &Error{"ILLEGAL_GENERATION", 22, false, "Specified group generation id is not valid."} + InconsistentGroupProtocol = &Error{"INCONSISTENT_GROUP_PROTOCOL", 23, false, "The group member's supported protocols are incompatible with those of existing members or first group member tried to join with empty protocol type or empty protocol list."} + InvalidGroupID = &Error{"INVALID_GROUP_ID", 24, false, "The configured groupID is invalid."} + UnknownMemberID = &Error{"UNKNOWN_MEMBER_ID", 25, false, "The coordinator is not aware of this member."} + InvalidSessionTimeout = &Error{"INVALID_SESSION_TIMEOUT", 26, false, "The session timeout is not within the range allowed by the broker (as configured by group.min.session.timeout.ms and group.max.session.timeout.ms)."} + RebalanceInProgress = &Error{"REBALANCE_IN_PROGRESS", 27, false, "The group is rebalancing, so a rejoin is needed."} + InvalidCommitOffsetSize = &Error{"INVALID_COMMIT_OFFSET_SIZE", 28, false, "The committing offset data size is not valid."} + TopicAuthorizationFailed = &Error{"TOPIC_AUTHORIZATION_FAILED", 29, false, "Not authorized to access topics: [Topic authorization failed.]"} + GroupAuthorizationFailed = &Error{"GROUP_AUTHORIZATION_FAILED", 30, false, "Not authorized to access group: Group authorization failed."} + ClusterAuthorizationFailed = &Error{"CLUSTER_AUTHORIZATION_FAILED", 31, false, "Cluster authorization failed."} + InvalidTimestamp = &Error{"INVALID_TIMESTAMP", 32, false, "The timestamp of the message is out of acceptable range."} + UnsupportedSaslMechanism = &Error{"UNSUPPORTED_SASL_MECHANISM", 33, false, "The broker does not support the requested SASL mechanism."} + IllegalSaslState = &Error{"ILLEGAL_SASL_STATE", 34, false, "Request is not valid given the current SASL state."} + UnsupportedVersion = &Error{"UNSUPPORTED_VERSION", 35, false, "The version of API is not supported."} + TopicAlreadyExists = &Error{"TOPIC_ALREADY_EXISTS", 36, false, "Topic with this name already exists."} + InvalidPartitions = &Error{"INVALID_PARTITIONS", 37, false, "Number of partitions is below 1."} + InvalidReplicationFactor = &Error{"INVALID_REPLICATION_FACTOR", 38, false, "Replication factor is below 1 or larger than the number of available brokers."} + InvalidReplicaAssignment = &Error{"INVALID_REPLICA_ASSIGNMENT", 39, false, "Replica assignment is invalid."} + InvalidConfig = &Error{"INVALID_CONFIG", 40, false, "Configuration is invalid."} + NotController = &Error{"NOT_CONTROLLER", 41, true, "This is not the correct controller for this cluster."} + InvalidRequest = &Error{"INVALID_REQUEST", 42, false, "This most likely occurs because of a request being malformed by the client library or the message was sent to an incompatible broker. See the broker logs for more details."} + UnsupportedForMessageFormat = &Error{"UNSUPPORTED_FOR_MESSAGE_FORMAT", 43, false, "The message format version on the broker does not support the request."} + PolicyViolation = &Error{"POLICY_VIOLATION", 44, false, "Request parameters do not satisfy the configured policy."} + OutOfOrderSequenceNumber = &Error{"OUT_OF_ORDER_SEQUENCE_NUMBER", 45, false, "The broker received an out of order sequence number."} + DuplicateSequenceNumber = &Error{"DUPLICATE_SEQUENCE_NUMBER", 46, false, "The broker received a duplicate sequence number."} + InvalidProducerEpoch = &Error{"INVALID_PRODUCER_EPOCH", 47, false, "Producer attempted an operation with an old epoch."} + InvalidTxnState = &Error{"INVALID_TXN_STATE", 48, false, "The producer attempted a transactional operation in an invalid state."} + InvalidProducerIDMapping = &Error{"INVALID_PRODUCER_ID_MAPPING", 49, false, "The producer attempted to use a producer id which is not currently assigned to its transactional id."} + InvalidTransactionTimeout = &Error{"INVALID_TRANSACTION_TIMEOUT", 50, false, "The transaction timeout is larger than the maximum value allowed by the broker (as configured by transaction.max.timeout.ms)."} + ConcurrentTransactions = &Error{"CONCURRENT_TRANSACTIONS", 51, false, "The producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing."} + TransactionCoordinatorFenced = &Error{"TRANSACTION_COORDINATOR_FENCED", 52, false, "Indicates that the transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer."} + TransactionalIDAuthorizationFailed = &Error{"TRANSACTIONAL_ID_AUTHORIZATION_FAILED", 53, false, "Transactional ID authorization failed."} + SecurityDisabled = &Error{"SECURITY_DISABLED", 54, false, "Security features are disabled."} + OperationNotAttempted = &Error{"OPERATION_NOT_ATTEMPTED", 55, false, "The broker did not attempt to execute this operation. This may happen for batched RPCs where some operations in the batch failed, causing the broker to respond without trying the rest."} + KafkaStorageError = &Error{"KAFKA_STORAGE_ERROR", 56, true, "Disk error when trying to access log file on the disk."} + LogDirNotFound = &Error{"LOG_DIR_NOT_FOUND", 57, false, "The user-specified log directory is not found in the broker config."} + SaslAuthenticationFailed = &Error{"SASL_AUTHENTICATION_FAILED", 58, false, "SASL Authentication failed."} + UnknownProducerID = &Error{"UNKNOWN_PRODUCER_ID", 59, false, "This exception is raised by the broker if it could not locate the producer metadata associated with the producerID in question. This could happen if, for instance, the producer's records were deleted because their retention time had elapsed. Once the last records of the producerID are removed, the producer's metadata is removed from the broker, and future appends by the producer will return this exception."} + ReassignmentInProgress = &Error{"REASSIGNMENT_IN_PROGRESS", 60, false, "A partition reassignment is in progress."} + DelegationTokenAuthDisabled = &Error{"DELEGATION_TOKEN_AUTH_DISABLED", 61, false, "Delegation Token feature is not enabled."} + DelegationTokenNotFound = &Error{"DELEGATION_TOKEN_NOT_FOUND", 62, false, "Delegation Token is not found on server."} + DelegationTokenOwnerMismatch = &Error{"DELEGATION_TOKEN_OWNER_MISMATCH", 63, false, "Specified Principal is not valid Owner/Renewer."} + DelegationTokenRequestNotAllowed = &Error{"DELEGATION_TOKEN_REQUEST_NOT_ALLOWED", 64, false, "Delegation Token requests are not allowed on PLAINTEXT/1-way SSL channels and on delegation token authenticated channels."} + DelegationTokenAuthorizationFailed = &Error{"DELEGATION_TOKEN_AUTHORIZATION_FAILED", 65, false, "Delegation Token authorization failed."} + DelegationTokenExpired = &Error{"DELEGATION_TOKEN_EXPIRED", 66, false, "Delegation Token is expired."} + InvalidPrincipalType = &Error{"INVALID_PRINCIPAL_TYPE", 67, false, "Supplied principalType is not supported."} + NonEmptyGroup = &Error{"NON_EMPTY_GROUP", 68, false, "The group is not empty."} + GroupIDNotFound = &Error{"GROUP_ID_NOT_FOUND", 69, false, "The group id does not exist."} + FetchSessionIDNotFound = &Error{"FETCH_SESSION_ID_NOT_FOUND", 70, true, "The fetch session ID was not found."} + InvalidFetchSessionEpoch = &Error{"INVALID_FETCH_SESSION_EPOCH", 71, true, "The fetch session epoch is invalid."} + ListenerNotFound = &Error{"LISTENER_NOT_FOUND", 72, true, "There is no listener on the leader broker that matches the listener on which metadata request was processed."} + TopicDeletionDisabled = &Error{"TOPIC_DELETION_DISABLED", 73, false, "Topic deletion is disabled."} + FencedLeaderEpoch = &Error{"FENCED_LEADER_EPOCH", 74, true, "The leader epoch in the request is older than the epoch on the broker"} + UnknownLeaderEpoch = &Error{"UNKNOWN_LEADER_EPOCH", 75, true, "The leader epoch in the request is newer than the epoch on the broker"} + UnsupportedCompressionType = &Error{"UNSUPPORTED_COMPRESSION_TYPE", 76, false, "The requesting client does not support the compression type of given partition."} + StaleBrokerEpoch = &Error{"STALE_BROKER_EPOCH", 77, false, "Broker epoch has changed"} + OffsetNotAvailable = &Error{"OFFSET_NOT_AVAILABLE", 78, true, "The leader high watermark has not caught up from a recent leader election so the offsets cannot be guaranteed to be monotonically increasing"} + MemberIDRequired = &Error{"MEMBER_ID_REQUIRED", 79, false, "The group member needs to have a valid member id before actually entering a consumer group"} + PreferredLeaderNotAvailable = &Error{"PREFERRED_LEADER_NOT_AVAILABLE", 80, true, "The preferred leader was not available"} + GroupMaxSizeReached = &Error{"GROUP_MAX_SIZE_REACHED", 81, false, "The consumer group has reached its max size"} + FencedInstanceID = &Error{"FENCED_INSTANCE_ID", 82, false, "The broker rejected this static consumer since another consumer with the same group.instance.id has registered with a different member.id."} + EligibleLeadersNotAvailable = &Error{"ELIGIBLE_LEADERS_NOT_AVAILABLE", 83, true, "Eligible topic partition leaders are not available"} + ElectionNotNeeded = &Error{"ELECTION_NOT_NEEDED", 84, true, "Leader election not needed for topic partition"} + NoReassignmentInProgress = &Error{"NO_REASSIGNMENT_IN_PROGRESS", 85, false, "No partition reassignment is in progress."} + GroupSubscribedToTopic = &Error{"GROUP_SUBSCRIBED_TO_TOPIC", 86, false, "Deleting offsets of a topic is forbidden while the consumer group is actively subscribed to it."} + InvalidRecord = &Error{"INVALID_RECORD", 87, false, "This record has failed the validation on the broker and hence been rejected."} + UnstableOffsetCommit = &Error{"UNSTABLE_OFFSET_COMMIT", 88, true, "There are unstable offsets that need to be cleared."} + ThrottlingQuotaExceeded = &Error{"THROTTLING_QUOTA_EXCEEDED", 89, true, "The throttling quota has been exceeded."} + ProducerFenced = &Error{"PRODUCER_FENCED", 90, false, "There is a newer producer with the same transactionalId which fences the current one."} + ResourceNotFound = &Error{"RESOURCE_NOT_FOUND", 91, false, "A request illegally referred to a resource that does not exist."} + DuplicateResource = &Error{"DUPLICATE_RESOURCE", 92, false, "A request illegally referred to the same resource twice."} + UnacceptableCredential = &Error{"UNACCEPTABLE_CREDENTIAL", 93, false, "Requested credential would not meet criteria for acceptability."} + InconsistentVoterSet = &Error{"INCONSISTENT_VOTER_SET", 94, false, "Indicates that either the sender or recipient of a voter-only request is not one of the expected voters."} + InvalidUpdateVersion = &Error{"INVALID_UPDATE_VERSION", 95, false, "The given update version was invalid."} + FeatureUpdateFailed = &Error{"FEATURE_UPDATE_FAILED", 96, false, "Unable to update finalized features due to an unexpected server error."} + PrincipalDeserializationFailure = &Error{"PRINCIPAL_DESERIALIZATION_FAILURE", 97, false, "Request principal deserialization failed during forwarding. This indicates an internal error on the broker cluster security setup."} + SnapshotNotFound = &Error{"SNAPSHOT_NOT_FOUND", 98, false, "Requested snapshot was not found."} + PositionOutOfRange = &Error{"POSITION_OUT_OF_RANGE", 99, false, "Requested position is not greater than or equal to zero, and less than the size of the snapshot."} + UnknownTopicID = &Error{"UNKNOWN_TOPIC_ID", 100, true, "This server does not host this topic ID."} + DuplicateBrokerRegistration = &Error{"DUPLICATE_BROKER_REGISTRATION", 101, false, "This broker ID is already in use."} + BrokerIDNotRegistered = &Error{"BROKER_ID_NOT_REGISTERED", 102, false, "The given broker ID was not registered."} + InconsistentTopicID = &Error{"INCONSISTENT_TOPIC_ID", 103, true, "The log's topic ID did not match the topic ID in the request."} + InconsistentClusterID = &Error{"INCONSISTENT_CLUSTER_ID", 104, false, "The clusterId in the request does not match that found on the server."} + TransactionalIDNotFound = &Error{"TRANSACTIONAL_ID_NOT_FOUND", 105, false, "The transactionalId could not be found."} + FetchSessionTopicIDError = &Error{"FETCH_SESSION_TOPIC_ID_ERROR", 106, true, "The fetch session encountered inconsistent topic ID usage."} + IneligibleReplica = &Error{"INELIGIBLE_REPLICA", 107, false, "The new ISR contains at least one ineligible replica."} + NewLeaderElected = &Error{"NEW_LEADER_ELECTED", 108, false, "The AlterPartition request successfully updated the partition state but the leader has changed."} + OffsetMovedToTieredStorage = &Error{"OFFSET_MOVED_TO_TIERED_STORAGE", 109, false, "The requested offset is moved to tiered storage."} + FencedMemberEpoch = &Error{"FENCED_MEMBER_EPOCH", 110, false, "The member epoch is fenced by the group coordinator. The member must abandon all its partitions and rejoin."} + UnreleasedInstanceID = &Error{"UNRELEASED_INSTANCE_ID", 111, false, "The instance ID is still used by another member in the consumer group. That member must leave first."} + UnsupportedAssignor = &Error{"UNSUPPORTED_ASSIGNOR", 112, false, "The assignor or its version range is not supported by the consumer group."} + StaleMemberEpoch = &Error{"STALE_MEMBER_EPOCH", 113, false, "The member epoch is stale. The member must retry after receiving its updated member epoch via the ConsumerGroupHeartbeat API."} + MismatchedEndpointType = &Error{"MISMATCHED_ENDPOINT_TYPE", 114, false, "The request was sent to an endpoint of the wrong type."} + UnsupportedEndpointType = &Error{"UNSUPPORTED_ENDPOINT_TYPE", 115, false, "This endpoint type is not supported yet."} + UnknownControllerID = &Error{"UNKNOWN_CONTROLLER_ID", 116, false, "This controller ID is not known"} + + // UnknownSubscriptionID = &Error{"UNKNOWN_SUBSCRIPTION_ID", 117, false, "Client sent a push telemetry request with an invalid or outdated subscription ID."} + // TelemetryTooLarge = &Error{"TELEMETRY_TOO_LARGE", 118, false, "Client sent a push telemetry request larger than the maximum size the broker will accept."} + // InvalidRegistration = &Error{"INVALID_REGISTRATION", 119, false, "The controller has considered the broker registration to be invalid."} + + TransactionAbortable = &Error{"TRANSACTION_ABORTABLE", 120, false, "The server encountered an error with the transaction. The client can abort the transaction to continue using this transactional ID."} + + // InvalidRecordState = &Error{"INVALID_RECORD_STATE", 121, false, "The record state is invalid. The acknowledgement of delivery could not be completed."} + // ShareSessionNowFound = &Error{"SHARE_SESSION_NOT_FOUND", 122, false, "The share session was not found."} + // InvalidShareSessionEpoch = &Error{"INVALID_SHARE_SESSION_EPOCH", 123, false, "The share session epoch is invalid."} + // FencedStateEpoch = &Error{"FENCED_STATE_EPOCH", 124, false, "The share coordinator rejected the request because the share-group state epoch did not match."} + // InvalidVoterKey = &Error{"INVALID_VOTER_KEY", 125, false, "The voter key doesn't match the receiving replica's key."} + // DuplicateVoter = &Error{"DUPLICATE_VOTER", 126, false, "The voter is already part of the set of voters."} + // VoterNotFound = &Error{"VOTER_NOT_FOUND", 127, false, "The voter is not part of the set of voters."} + // InvalidRegularExpression = &Error{"INVALID_REGULAR_EXPRESSION", 128, false, "The regular expression is not valid."} +) + +var code2err = map[int16]error{ + -1: UnknownServerError, + 0: nil, + 1: OffsetOutOfRange, + 2: CorruptMessage, + 3: UnknownTopicOrPartition, + 4: InvalidFetchSize, + 5: LeaderNotAvailable, + 6: NotLeaderForPartition, + 7: RequestTimedOut, + 8: BrokerNotAvailable, + 9: ReplicaNotAvailable, + 10: MessageTooLarge, + 11: StaleControllerEpoch, + 12: OffsetMetadataTooLarge, + 13: NetworkException, + 14: CoordinatorLoadInProgress, + 15: CoordinatorNotAvailable, + 16: NotCoordinator, + 17: InvalidTopicException, + 18: RecordListTooLarge, + 19: NotEnoughReplicas, + 20: NotEnoughReplicasAfterAppend, + 21: InvalidRequiredAcks, + 22: IllegalGeneration, + 23: InconsistentGroupProtocol, + 24: InvalidGroupID, + 25: UnknownMemberID, + 26: InvalidSessionTimeout, + 27: RebalanceInProgress, + 28: InvalidCommitOffsetSize, + 29: TopicAuthorizationFailed, + 30: GroupAuthorizationFailed, + 31: ClusterAuthorizationFailed, + 32: InvalidTimestamp, + 33: UnsupportedSaslMechanism, + 34: IllegalSaslState, + 35: UnsupportedVersion, + 36: TopicAlreadyExists, + 37: InvalidPartitions, + 38: InvalidReplicationFactor, + 39: InvalidReplicaAssignment, + 40: InvalidConfig, + 41: NotController, + 42: InvalidRequest, + 43: UnsupportedForMessageFormat, + 44: PolicyViolation, + 45: OutOfOrderSequenceNumber, + 46: DuplicateSequenceNumber, + 47: InvalidProducerEpoch, + 48: InvalidTxnState, + 49: InvalidProducerIDMapping, + 50: InvalidTransactionTimeout, + 51: ConcurrentTransactions, + 52: TransactionCoordinatorFenced, + 53: TransactionalIDAuthorizationFailed, + 54: SecurityDisabled, + 55: OperationNotAttempted, + 56: KafkaStorageError, + 57: LogDirNotFound, + 58: SaslAuthenticationFailed, + 59: UnknownProducerID, + 60: ReassignmentInProgress, + 61: DelegationTokenAuthDisabled, + 62: DelegationTokenNotFound, + 63: DelegationTokenOwnerMismatch, + 64: DelegationTokenRequestNotAllowed, + 65: DelegationTokenAuthorizationFailed, + 66: DelegationTokenExpired, + 67: InvalidPrincipalType, + 68: NonEmptyGroup, + 69: GroupIDNotFound, + 70: FetchSessionIDNotFound, + 71: InvalidFetchSessionEpoch, + 72: ListenerNotFound, + 73: TopicDeletionDisabled, + 74: FencedLeaderEpoch, + 75: UnknownLeaderEpoch, + 76: UnsupportedCompressionType, + 77: StaleBrokerEpoch, + 78: OffsetNotAvailable, + 79: MemberIDRequired, + 80: PreferredLeaderNotAvailable, + 81: GroupMaxSizeReached, + 82: FencedInstanceID, + 83: EligibleLeadersNotAvailable, + 84: ElectionNotNeeded, + 85: NoReassignmentInProgress, + 86: GroupSubscribedToTopic, + 87: InvalidRecord, + 88: UnstableOffsetCommit, + 89: ThrottlingQuotaExceeded, + 90: ProducerFenced, + 91: ResourceNotFound, + 92: DuplicateResource, + 93: UnacceptableCredential, + 94: InconsistentVoterSet, + 95: InvalidUpdateVersion, + 96: FeatureUpdateFailed, + 97: PrincipalDeserializationFailure, + 98: SnapshotNotFound, + 99: PositionOutOfRange, + 100: UnknownTopicID, + 101: DuplicateBrokerRegistration, + 102: BrokerIDNotRegistered, + 103: InconsistentTopicID, + 104: InconsistentClusterID, + 105: TransactionalIDNotFound, + 106: FetchSessionTopicIDError, + 107: IneligibleReplica, + 108: NewLeaderElected, + 109: OffsetMovedToTieredStorage, // KIP-405, v3.5 + 110: FencedMemberEpoch, // KIP-848, released unstable in v3.6, stable in 3.7 + 111: UnreleasedInstanceID, // "" + 112: UnsupportedAssignor, // "" + 113: StaleMemberEpoch, // "" + 114: MismatchedEndpointType, // KIP-919, v3.7 + 115: UnsupportedEndpointType, // "" + 116: UnknownControllerID, // "" + + // 117: UnknownSubscriptionID, // KIP-714 f1819f448 KAFKA-15778 & KAFKA-15779 + // 118: TelemetryTooLarge, // "" + // 119: InvalidRegistration, // KIP-858 f467f6bb4 KAFKA-15361 + + 120: TransactionAbortable, // KIP-890 2e8d69b78 KAFKA-16314 +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kfake/00_produce.go b/vendor/github.com/twmb/franz-go/pkg/kfake/00_produce.go new file mode 100644 index 00000000000..9aeb7668130 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kfake/00_produce.go @@ -0,0 +1,180 @@ +package kfake + +import ( + "hash/crc32" + "net" + "strconv" + "time" + + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" +) + +// TODO +// * Leaders +// * Support txns +// * Multiple batches in one produce +// * Compact + +func init() { regKey(0, 3, 10) } + +func (c *Cluster) handleProduce(b *broker, kreq kmsg.Request) (kmsg.Response, error) { + var ( + req = kreq.(*kmsg.ProduceRequest) + resp = req.ResponseKind().(*kmsg.ProduceResponse) + tdone = make(map[string][]kmsg.ProduceResponseTopicPartition) + ) + + if err := checkReqVersion(req.Key(), req.Version); err != nil { + return nil, err + } + + donep := func(t string, p kmsg.ProduceRequestTopicPartition, errCode int16) *kmsg.ProduceResponseTopicPartition { + sp := kmsg.NewProduceResponseTopicPartition() + sp.Partition = p.Partition + sp.ErrorCode = errCode + ps := tdone[t] + ps = append(ps, sp) + tdone[t] = ps + return &ps[len(ps)-1] + } + donet := func(t kmsg.ProduceRequestTopic, errCode int16) { + for _, p := range t.Partitions { + donep(t.Topic, p, errCode) + } + } + donets := func(errCode int16) { + for _, t := range req.Topics { + donet(t, errCode) + } + } + var includeBrokers bool + toresp := func() kmsg.Response { + for topic, partitions := range tdone { + st := kmsg.NewProduceResponseTopic() + st.Topic = topic + st.Partitions = partitions + resp.Topics = append(resp.Topics, st) + } + if includeBrokers { + for _, b := range c.bs { + sb := kmsg.NewProduceResponseBroker() + h, p, _ := net.SplitHostPort(b.ln.Addr().String()) + p32, _ := strconv.Atoi(p) + sb.NodeID = b.node + sb.Host = h + sb.Port = int32(p32) + resp.Brokers = append(resp.Brokers, sb) + } + } + return resp + } + + if req.TransactionID != nil { + donets(kerr.TransactionalIDAuthorizationFailed.Code) + return toresp(), nil + } + switch req.Acks { + case -1, 0, 1: + default: + donets(kerr.InvalidRequiredAcks.Code) + return toresp(), nil + } + + now := time.Now().UnixMilli() + for _, rt := range req.Topics { + for _, rp := range rt.Partitions { + pd, ok := c.data.tps.getp(rt.Topic, rp.Partition) + if !ok { + donep(rt.Topic, rp, kerr.UnknownTopicOrPartition.Code) + continue + } + if pd.leader != b { + p := donep(rt.Topic, rp, kerr.NotLeaderForPartition.Code) + p.CurrentLeader.LeaderID = pd.leader.node + p.CurrentLeader.LeaderEpoch = pd.epoch + includeBrokers = true + continue + } + + var b kmsg.RecordBatch + if err := b.ReadFrom(rp.Records); err != nil { + donep(rt.Topic, rp, kerr.CorruptMessage.Code) + continue + } + if b.FirstOffset != 0 { + donep(rt.Topic, rp, kerr.CorruptMessage.Code) + continue + } + if int(b.Length) != len(rp.Records)-12 { + donep(rt.Topic, rp, kerr.CorruptMessage.Code) + continue + } + if b.PartitionLeaderEpoch != -1 { + donep(rt.Topic, rp, kerr.CorruptMessage.Code) + continue + } + if b.Magic != 2 { + donep(rt.Topic, rp, kerr.CorruptMessage.Code) + continue + } + if b.CRC != int32(crc32.Checksum(rp.Records[21:], crc32c)) { // crc starts at byte 21 + donep(rt.Topic, rp, kerr.CorruptMessage.Code) + continue + } + attrs := uint16(b.Attributes) + if attrs&0x0007 > 4 { + donep(rt.Topic, rp, kerr.CorruptMessage.Code) + continue + } + logAppendTime := int64(-1) + if attrs&0x0008 > 0 { + b.FirstTimestamp = now + b.MaxTimestamp = now + logAppendTime = now + } + if attrs&0xfff0 != 0 { // TODO txn bit + donep(rt.Topic, rp, kerr.CorruptMessage.Code) + continue + } + if b.LastOffsetDelta != b.NumRecords-1 { + donep(rt.Topic, rp, kerr.CorruptMessage.Code) + continue + } + + seqs, epoch := c.pids.get(b.ProducerID, b.ProducerEpoch, rt.Topic, rp.Partition) + if be := b.ProducerEpoch; be != -1 { + if be < epoch { + donep(rt.Topic, rp, kerr.FencedLeaderEpoch.Code) + continue + } else if be > epoch { + donep(rt.Topic, rp, kerr.UnknownLeaderEpoch.Code) + continue + } + } + ok, dup := seqs.pushAndValidate(b.FirstSequence, b.NumRecords) + if !ok { + donep(rt.Topic, rp, kerr.OutOfOrderSequenceNumber.Code) + continue + } + if dup { + donep(rt.Topic, rp, 0) + continue + } + baseOffset := pd.highWatermark + lso := pd.logStartOffset + pd.pushBatch(len(rp.Records), b) + sp := donep(rt.Topic, rp, 0) + sp.BaseOffset = baseOffset + sp.LogAppendTime = logAppendTime + sp.LogStartOffset = lso + } + } + + if req.Acks == 0 { + return nil, nil + } + return toresp(), nil +} + +var crc32c = crc32.MakeTable(crc32.Castagnoli) diff --git a/vendor/github.com/twmb/franz-go/pkg/kfake/01_fetch.go b/vendor/github.com/twmb/franz-go/pkg/kfake/01_fetch.go new file mode 100644 index 00000000000..53f9a8e6ee9 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kfake/01_fetch.go @@ -0,0 +1,236 @@ +package kfake + +import ( + "net" + "strconv" + "sync" + "time" + + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" +) + +// Behavior: +// +// * If topic does not exist, we hang +// * Topic created while waiting is not returned in final response +// * If any partition is on a different broker, we return immediately +// * Out of range fetch causes early return +// * Raw bytes of batch counts against wait bytes + +func init() { regKey(1, 4, 16) } + +func (c *Cluster) handleFetch(creq *clientReq, w *watchFetch) (kmsg.Response, error) { + var ( + req = creq.kreq.(*kmsg.FetchRequest) + resp = req.ResponseKind().(*kmsg.FetchResponse) + ) + + if err := checkReqVersion(req.Key(), req.Version); err != nil { + return nil, err + } + + var ( + nbytes int + returnEarly bool + needp tps[int] + ) + if w == nil { + out: + for i, rt := range req.Topics { + if req.Version >= 13 { + rt.Topic = c.data.id2t[rt.TopicID] + req.Topics[i].Topic = rt.Topic + } + t, ok := c.data.tps.gett(rt.Topic) + if !ok { + continue + } + for _, rp := range rt.Partitions { + pd, ok := t[rp.Partition] + if !ok || pd.createdAt.After(creq.at) { + continue + } + if pd.leader != creq.cc.b { + returnEarly = true // NotLeaderForPartition + break out + } + i, ok, atEnd := pd.searchOffset(rp.FetchOffset) + if atEnd { + continue + } + if !ok { + returnEarly = true // OffsetOutOfRange + break out + } + pbytes := 0 + for _, b := range pd.batches[i:] { + nbytes += b.nbytes + pbytes += b.nbytes + if pbytes >= int(rp.PartitionMaxBytes) { + returnEarly = true + break out + } + } + needp.set(rt.Topic, rp.Partition, int(rp.PartitionMaxBytes)-pbytes) + } + } + } + + wait := time.Duration(req.MaxWaitMillis) * time.Millisecond + deadline := creq.at.Add(wait) + if w == nil && !returnEarly && nbytes < int(req.MinBytes) && time.Now().Before(deadline) { + w := &watchFetch{ + need: int(req.MinBytes) - nbytes, + needp: needp, + deadline: deadline, + creq: creq, + } + w.cb = func() { + select { + case c.watchFetchCh <- w: + case <-c.die: + } + } + for _, rt := range req.Topics { + t, ok := c.data.tps.gett(rt.Topic) + if !ok { + continue + } + for _, rp := range rt.Partitions { + pd, ok := t[rp.Partition] + if !ok || pd.createdAt.After(creq.at) { + continue + } + pd.watch[w] = struct{}{} + w.in = append(w.in, pd) + } + } + w.t = time.AfterFunc(wait, w.cb) + return nil, nil + } + + id2t := make(map[uuid]string) + tidx := make(map[string]int) + + donet := func(t string, id uuid, errCode int16) *kmsg.FetchResponseTopic { + if i, ok := tidx[t]; ok { + return &resp.Topics[i] + } + id2t[id] = t + tidx[t] = len(resp.Topics) + st := kmsg.NewFetchResponseTopic() + st.Topic = t + st.TopicID = id + resp.Topics = append(resp.Topics, st) + return &resp.Topics[len(resp.Topics)-1] + } + donep := func(t string, id uuid, p int32, errCode int16) *kmsg.FetchResponseTopicPartition { + sp := kmsg.NewFetchResponseTopicPartition() + sp.Partition = p + sp.ErrorCode = errCode + st := donet(t, id, 0) + st.Partitions = append(st.Partitions, sp) + return &st.Partitions[len(st.Partitions)-1] + } + + var includeBrokers bool + defer func() { + if includeBrokers { + for _, b := range c.bs { + sb := kmsg.NewFetchResponseBroker() + h, p, _ := net.SplitHostPort(b.ln.Addr().String()) + p32, _ := strconv.Atoi(p) + sb.NodeID = b.node + sb.Host = h + sb.Port = int32(p32) + resp.Brokers = append(resp.Brokers, sb) + } + } + }() + + var batchesAdded int +full: + for _, rt := range req.Topics { + for _, rp := range rt.Partitions { + pd, ok := c.data.tps.getp(rt.Topic, rp.Partition) + if !ok { + if req.Version >= 13 { + donep(rt.Topic, rt.TopicID, rp.Partition, kerr.UnknownTopicID.Code) + } else { + donep(rt.Topic, rt.TopicID, rp.Partition, kerr.UnknownTopicOrPartition.Code) + } + continue + } + if pd.leader != creq.cc.b { + p := donep(rt.Topic, rt.TopicID, rp.Partition, kerr.NotLeaderForPartition.Code) + p.CurrentLeader.LeaderID = pd.leader.node + p.CurrentLeader.LeaderEpoch = pd.epoch + includeBrokers = true + continue + } + sp := donep(rt.Topic, rt.TopicID, rp.Partition, 0) + sp.HighWatermark = pd.highWatermark + sp.LastStableOffset = pd.lastStableOffset + sp.LogStartOffset = pd.logStartOffset + i, ok, atEnd := pd.searchOffset(rp.FetchOffset) + if atEnd { + continue + } + if !ok { + sp.ErrorCode = kerr.OffsetOutOfRange.Code + continue + } + var pbytes int + for _, b := range pd.batches[i:] { + if nbytes = nbytes + b.nbytes; nbytes > int(req.MaxBytes) && batchesAdded > 1 { + break full + } + if pbytes = pbytes + b.nbytes; pbytes > int(rp.PartitionMaxBytes) && batchesAdded > 1 { + break + } + batchesAdded++ + sp.RecordBatches = b.AppendTo(sp.RecordBatches) + } + } + } + + return resp, nil +} + +type watchFetch struct { + need int + needp tps[int] + deadline time.Time + creq *clientReq + + in []*partData + cb func() + t *time.Timer + + once sync.Once + cleaned bool +} + +func (w *watchFetch) push(nbytes int) { + w.need -= nbytes + if w.need <= 0 { + w.once.Do(func() { + go w.cb() + }) + } +} + +func (w *watchFetch) deleted() { + w.once.Do(func() { + go w.cb() + }) +} + +func (w *watchFetch) cleanup(c *Cluster) { + w.cleaned = true + for _, in := range w.in { + delete(in.watch, w) + } + w.t.Stop() +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kfake/02_list_offsets.go b/vendor/github.com/twmb/franz-go/pkg/kfake/02_list_offsets.go new file mode 100644 index 00000000000..587c41a6c77 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kfake/02_list_offsets.go @@ -0,0 +1,99 @@ +package kfake + +import ( + "sort" + + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" +) + +func init() { regKey(2, 0, 7) } + +func (c *Cluster) handleListOffsets(b *broker, kreq kmsg.Request) (kmsg.Response, error) { + req := kreq.(*kmsg.ListOffsetsRequest) + resp := req.ResponseKind().(*kmsg.ListOffsetsResponse) + + if err := checkReqVersion(req.Key(), req.Version); err != nil { + return nil, err + } + + tidx := make(map[string]int) + donet := func(t string, errCode int16) *kmsg.ListOffsetsResponseTopic { + if i, ok := tidx[t]; ok { + return &resp.Topics[i] + } + tidx[t] = len(resp.Topics) + st := kmsg.NewListOffsetsResponseTopic() + st.Topic = t + resp.Topics = append(resp.Topics, st) + return &resp.Topics[len(resp.Topics)-1] + } + donep := func(t string, p int32, errCode int16) *kmsg.ListOffsetsResponseTopicPartition { + sp := kmsg.NewListOffsetsResponseTopicPartition() + sp.Partition = p + sp.ErrorCode = errCode + st := donet(t, 0) + st.Partitions = append(st.Partitions, sp) + return &st.Partitions[len(st.Partitions)-1] + } + + for _, rt := range req.Topics { + ps, ok := c.data.tps.gett(rt.Topic) + for _, rp := range rt.Partitions { + if !ok { + donep(rt.Topic, rp.Partition, kerr.UnknownTopicOrPartition.Code) + continue + } + pd, ok := ps[rp.Partition] + if !ok { + donep(rt.Topic, rp.Partition, kerr.UnknownTopicOrPartition.Code) + continue + } + if pd.leader != b { + donep(rt.Topic, rp.Partition, kerr.NotLeaderForPartition.Code) + continue + } + if le := rp.CurrentLeaderEpoch; le != -1 { + if le < pd.epoch { + donep(rt.Topic, rp.Partition, kerr.FencedLeaderEpoch.Code) + continue + } else if le > pd.epoch { + donep(rt.Topic, rp.Partition, kerr.UnknownLeaderEpoch.Code) + continue + } + } + + sp := donep(rt.Topic, rp.Partition, 0) + sp.LeaderEpoch = pd.epoch + switch rp.Timestamp { + case -2: + sp.Offset = pd.logStartOffset + case -1: + if req.IsolationLevel == 1 { + sp.Offset = pd.lastStableOffset + } else { + sp.Offset = pd.highWatermark + } + default: + // returns the index of the first batch _after_ the requested timestamp + idx, _ := sort.Find(len(pd.batches), func(idx int) int { + maxEarlier := pd.batches[idx].maxEarlierTimestamp + switch { + case maxEarlier > rp.Timestamp: + return -1 + case maxEarlier == rp.Timestamp: + return 0 + default: + return 1 + } + }) + if idx == len(pd.batches) { + sp.Offset = -1 + } else { + sp.Offset = pd.batches[idx].FirstOffset + } + } + } + } + return resp, nil +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kfake/03_metadata.go b/vendor/github.com/twmb/franz-go/pkg/kfake/03_metadata.go new file mode 100644 index 00000000000..a4f81276725 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kfake/03_metadata.go @@ -0,0 +1,120 @@ +package kfake + +import ( + "net" + "strconv" + + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" +) + +func init() { regKey(3, 0, 12) } + +func (c *Cluster) handleMetadata(kreq kmsg.Request) (kmsg.Response, error) { + req := kreq.(*kmsg.MetadataRequest) + resp := req.ResponseKind().(*kmsg.MetadataResponse) + + if err := checkReqVersion(req.Key(), req.Version); err != nil { + return nil, err + } + + for _, b := range c.bs { + sb := kmsg.NewMetadataResponseBroker() + h, p, _ := net.SplitHostPort(b.ln.Addr().String()) + p32, _ := strconv.Atoi(p) + sb.NodeID = b.node + sb.Host = h + sb.Port = int32(p32) + resp.Brokers = append(resp.Brokers, sb) + } + + resp.ClusterID = &c.cfg.clusterID + resp.ControllerID = c.controller.node + + id2t := make(map[uuid]string) + tidx := make(map[string]int) + + donet := func(t string, id uuid, errCode int16) *kmsg.MetadataResponseTopic { + if i, ok := tidx[t]; ok { + return &resp.Topics[i] + } + id2t[id] = t + tidx[t] = len(resp.Topics) + st := kmsg.NewMetadataResponseTopic() + if t != "" { + st.Topic = kmsg.StringPtr(t) + } + st.TopicID = id + st.ErrorCode = errCode + resp.Topics = append(resp.Topics, st) + return &resp.Topics[len(resp.Topics)-1] + } + donep := func(t string, id uuid, p int32, errCode int16) *kmsg.MetadataResponseTopicPartition { + sp := kmsg.NewMetadataResponseTopicPartition() + sp.Partition = p + sp.ErrorCode = errCode + st := donet(t, id, 0) + st.Partitions = append(st.Partitions, sp) + return &st.Partitions[len(st.Partitions)-1] + } + okp := func(t string, id uuid, p int32, pd *partData) { + nreplicas := c.data.treplicas[t] + if nreplicas > len(c.bs) { + nreplicas = len(c.bs) + } + + sp := donep(t, id, p, 0) + sp.Leader = pd.leader.node + sp.LeaderEpoch = pd.epoch + + for i := 0; i < nreplicas; i++ { + idx := (pd.leader.bsIdx + i) % len(c.bs) + sp.Replicas = append(sp.Replicas, c.bs[idx].node) + } + sp.ISR = sp.Replicas + } + + allowAuto := req.AllowAutoTopicCreation && c.cfg.allowAutoTopic + for _, rt := range req.Topics { + var topic string + var ok bool + // If topic ID is present, we ignore any provided topic. + // Duplicate topics are merged into one response topic. + // Topics with no topic and no ID are ignored. + if rt.TopicID != noID { + if topic, ok = c.data.id2t[rt.TopicID]; !ok { + donet("", rt.TopicID, kerr.UnknownTopicID.Code) + continue + } + } else if rt.Topic == nil { + continue + } else { + topic = *rt.Topic + } + + ps, ok := c.data.tps.gett(topic) + if !ok { + if !allowAuto { + donet(topic, rt.TopicID, kerr.UnknownTopicOrPartition.Code) + continue + } + c.data.mkt(topic, -1, -1, nil) + ps, _ = c.data.tps.gett(topic) + } + + id := c.data.t2id[topic] + for p, pd := range ps { + okp(topic, id, p, pd) + } + } + if req.Topics == nil && c.data.tps != nil { + for topic, ps := range c.data.tps { + id := c.data.t2id[topic] + for p, pd := range ps { + okp(topic, id, p, pd) + } + } + } + + return resp, nil +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kfake/08_offset_commit.go b/vendor/github.com/twmb/franz-go/pkg/kfake/08_offset_commit.go new file mode 100644 index 00000000000..b82400e0f4f --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kfake/08_offset_commit.go @@ -0,0 +1,18 @@ +package kfake + +import ( + "github.com/twmb/franz-go/pkg/kmsg" +) + +func init() { regKey(8, 0, 8) } + +func (c *Cluster) handleOffsetCommit(creq *clientReq) (kmsg.Response, error) { + req := creq.kreq.(*kmsg.OffsetCommitRequest) + + if err := checkReqVersion(req.Key(), req.Version); err != nil { + return nil, err + } + + c.groups.handleOffsetCommit(creq) + return nil, nil +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kfake/09_offset_fetch.go b/vendor/github.com/twmb/franz-go/pkg/kfake/09_offset_fetch.go new file mode 100644 index 00000000000..204339ecff7 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kfake/09_offset_fetch.go @@ -0,0 +1,17 @@ +package kfake + +import ( + "github.com/twmb/franz-go/pkg/kmsg" +) + +func init() { regKey(9, 0, 8) } + +func (c *Cluster) handleOffsetFetch(creq *clientReq) (kmsg.Response, error) { + req := creq.kreq.(*kmsg.OffsetFetchRequest) + + if err := checkReqVersion(req.Key(), req.Version); err != nil { + return nil, err + } + + return c.groups.handleOffsetFetch(creq), nil +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kfake/10_find_coordinator.go b/vendor/github.com/twmb/franz-go/pkg/kfake/10_find_coordinator.go new file mode 100644 index 00000000000..2873a3320bc --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kfake/10_find_coordinator.go @@ -0,0 +1,61 @@ +package kfake + +import ( + "net" + "strconv" + + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" +) + +func init() { regKey(10, 0, 4) } + +func (c *Cluster) handleFindCoordinator(kreq kmsg.Request) (kmsg.Response, error) { + req := kreq.(*kmsg.FindCoordinatorRequest) + resp := req.ResponseKind().(*kmsg.FindCoordinatorResponse) + + if err := checkReqVersion(req.Key(), req.Version); err != nil { + return nil, err + } + + var unknown bool + if req.CoordinatorType != 0 && req.CoordinatorType != 1 { + unknown = true + } + + if req.Version <= 3 { + req.CoordinatorKeys = append(req.CoordinatorKeys, req.CoordinatorKey) + defer func() { + resp.ErrorCode = resp.Coordinators[0].ErrorCode + resp.ErrorMessage = resp.Coordinators[0].ErrorMessage + resp.NodeID = resp.Coordinators[0].NodeID + resp.Host = resp.Coordinators[0].Host + resp.Port = resp.Coordinators[0].Port + }() + } + + addc := func(key string) *kmsg.FindCoordinatorResponseCoordinator { + sc := kmsg.NewFindCoordinatorResponseCoordinator() + sc.Key = key + resp.Coordinators = append(resp.Coordinators, sc) + return &resp.Coordinators[len(resp.Coordinators)-1] + } + + for _, key := range req.CoordinatorKeys { + sc := addc(key) + if unknown { + sc.ErrorCode = kerr.InvalidRequest.Code + continue + } + + b := c.coordinator(key) + host, port, _ := net.SplitHostPort(b.ln.Addr().String()) + iport, _ := strconv.Atoi(port) + + sc.NodeID = b.node + sc.Host = host + sc.Port = int32(iport) + } + + return resp, nil +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kfake/11_join_group.go b/vendor/github.com/twmb/franz-go/pkg/kfake/11_join_group.go new file mode 100644 index 00000000000..70ed1d896e2 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kfake/11_join_group.go @@ -0,0 +1,18 @@ +package kfake + +import ( + "github.com/twmb/franz-go/pkg/kmsg" +) + +func init() { regKey(11, 0, 9) } + +func (c *Cluster) handleJoinGroup(creq *clientReq) (kmsg.Response, error) { + req := creq.kreq.(*kmsg.JoinGroupRequest) + + if err := checkReqVersion(req.Key(), req.Version); err != nil { + return nil, err + } + + c.groups.handleJoin(creq) + return nil, nil +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kfake/12_heartbeat.go b/vendor/github.com/twmb/franz-go/pkg/kfake/12_heartbeat.go new file mode 100644 index 00000000000..59f12712cc0 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kfake/12_heartbeat.go @@ -0,0 +1,23 @@ +package kfake + +import ( + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" +) + +func init() { regKey(12, 0, 4) } + +func (c *Cluster) handleHeartbeat(creq *clientReq) (kmsg.Response, error) { + req := creq.kreq.(*kmsg.HeartbeatRequest) + resp := req.ResponseKind().(*kmsg.HeartbeatResponse) + + if err := checkReqVersion(req.Key(), req.Version); err != nil { + return nil, err + } + + if c.groups.handleHeartbeat(creq) { + return nil, nil + } + resp.ErrorCode = kerr.GroupIDNotFound.Code + return resp, nil +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kfake/13_leave_group.go b/vendor/github.com/twmb/franz-go/pkg/kfake/13_leave_group.go new file mode 100644 index 00000000000..e941f19ede2 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kfake/13_leave_group.go @@ -0,0 +1,23 @@ +package kfake + +import ( + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" +) + +func init() { regKey(13, 0, 5) } + +func (c *Cluster) handleLeaveGroup(creq *clientReq) (kmsg.Response, error) { + req := creq.kreq.(*kmsg.LeaveGroupRequest) + resp := req.ResponseKind().(*kmsg.LeaveGroupResponse) + + if err := checkReqVersion(req.Key(), req.Version); err != nil { + return nil, err + } + + if c.groups.handleLeave(creq) { + return nil, nil + } + resp.ErrorCode = kerr.GroupIDNotFound.Code + return resp, nil +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kfake/14_sync_group.go b/vendor/github.com/twmb/franz-go/pkg/kfake/14_sync_group.go new file mode 100644 index 00000000000..9944b11204a --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kfake/14_sync_group.go @@ -0,0 +1,23 @@ +package kfake + +import ( + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" +) + +func init() { regKey(14, 0, 5) } + +func (c *Cluster) handleSyncGroup(creq *clientReq) (kmsg.Response, error) { + req := creq.kreq.(*kmsg.SyncGroupRequest) + resp := req.ResponseKind().(*kmsg.SyncGroupResponse) + + if err := checkReqVersion(req.Key(), req.Version); err != nil { + return nil, err + } + + if c.groups.handleSync(creq) { + return nil, nil + } + resp.ErrorCode = kerr.GroupIDNotFound.Code + return resp, nil +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kfake/15_describe_groups.go b/vendor/github.com/twmb/franz-go/pkg/kfake/15_describe_groups.go new file mode 100644 index 00000000000..8791759becf --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kfake/15_describe_groups.go @@ -0,0 +1,17 @@ +package kfake + +import ( + "github.com/twmb/franz-go/pkg/kmsg" +) + +func init() { regKey(15, 0, 5) } + +func (c *Cluster) handleDescribeGroups(creq *clientReq) (kmsg.Response, error) { + req := creq.kreq.(*kmsg.DescribeGroupsRequest) + + if err := checkReqVersion(req.Key(), req.Version); err != nil { + return nil, err + } + + return c.groups.handleDescribe(creq), nil +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kfake/16_list_groups.go b/vendor/github.com/twmb/franz-go/pkg/kfake/16_list_groups.go new file mode 100644 index 00000000000..6d0189c4b89 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kfake/16_list_groups.go @@ -0,0 +1,17 @@ +package kfake + +import ( + "github.com/twmb/franz-go/pkg/kmsg" +) + +func init() { regKey(16, 0, 4) } + +func (c *Cluster) handleListGroups(creq *clientReq) (kmsg.Response, error) { + req := creq.kreq.(*kmsg.ListGroupsRequest) + + if err := checkReqVersion(req.Key(), req.Version); err != nil { + return nil, err + } + + return c.groups.handleList(creq), nil +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kfake/17_sasl_handshake.go b/vendor/github.com/twmb/franz-go/pkg/kfake/17_sasl_handshake.go new file mode 100644 index 00000000000..8a80cbf1c82 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kfake/17_sasl_handshake.go @@ -0,0 +1,35 @@ +package kfake + +import ( + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" +) + +func init() { regKey(17, 1, 1) } + +func (c *Cluster) handleSASLHandshake(creq *clientReq) (kmsg.Response, error) { + req := creq.kreq.(*kmsg.SASLHandshakeRequest) + resp := req.ResponseKind().(*kmsg.SASLHandshakeResponse) + + if err := checkReqVersion(req.Key(), req.Version); err != nil { + return nil, err + } + + if creq.cc.saslStage != saslStageBegin { + resp.ErrorCode = kerr.IllegalSaslState.Code + return resp, nil + } + + switch req.Mechanism { + case saslPlain: + creq.cc.saslStage = saslStageAuthPlain + case saslScram256: + creq.cc.saslStage = saslStageAuthScram0_256 + case saslScram512: + creq.cc.saslStage = saslStageAuthScram0_512 + default: + resp.ErrorCode = kerr.UnsupportedSaslMechanism.Code + resp.SupportedMechanisms = []string{saslPlain, saslScram256, saslScram512} + } + return resp, nil +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kfake/18_api_versions.go b/vendor/github.com/twmb/franz-go/pkg/kfake/18_api_versions.go new file mode 100644 index 00000000000..c4378da4b8d --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kfake/18_api_versions.go @@ -0,0 +1,81 @@ +package kfake + +import ( + "fmt" + "sort" + "sync" + + "github.com/twmb/franz-go/pkg/kmsg" +) + +func init() { regKey(18, 0, 3) } + +func (c *Cluster) handleApiVersions(kreq kmsg.Request) (kmsg.Response, error) { + req := kreq.(*kmsg.ApiVersionsRequest) + resp := req.ResponseKind().(*kmsg.ApiVersionsResponse) + + if resp.Version > 3 { + resp.Version = 0 // downgrades to 0 if the version is unknown + } + + if err := checkReqVersion(req.Key(), req.Version); err != nil { + return nil, err + } + + // If we are handling ApiVersions, our package is initialized and we + // build our response once. + apiVersionsOnce.Do(func() { + for _, v := range apiVersionsKeys { + apiVersionsSorted = append(apiVersionsSorted, v) + } + sort.Slice(apiVersionsSorted, func(i, j int) bool { + return apiVersionsSorted[i].ApiKey < apiVersionsSorted[j].ApiKey + }) + }) + resp.ApiKeys = apiVersionsSorted + + return resp, nil +} + +// Called at the beginning of every request, this validates that the client +// is sending requests within version ranges we can handle. +func checkReqVersion(key, version int16) error { + v, exists := apiVersionsKeys[key] + if !exists { + return fmt.Errorf("unsupported request key %d", key) + } + if version < v.MinVersion { + return fmt.Errorf("%s version %d below min supported version %d", kmsg.NameForKey(key), version, v.MinVersion) + } + if version > v.MaxVersion { + return fmt.Errorf("%s version %d above max supported version %d", kmsg.NameForKey(key), version, v.MaxVersion) + } + return nil +} + +var ( + apiVersionsMu sync.Mutex + apiVersionsKeys = make(map[int16]kmsg.ApiVersionsResponseApiKey) + + apiVersionsOnce sync.Once + apiVersionsSorted []kmsg.ApiVersionsResponseApiKey +) + +// Every request we implement calls regKey in an init function, allowing us to +// fully correctly build our ApiVersions response. +func regKey(key, min, max int16) { + apiVersionsMu.Lock() + defer apiVersionsMu.Unlock() + + if key < 0 || min < 0 || max < 0 || max < min { + panic(fmt.Sprintf("invalid registration, key: %d, min: %d, max: %d", key, min, max)) + } + if _, exists := apiVersionsKeys[key]; exists { + panic(fmt.Sprintf("doubly registered key %d", key)) + } + apiVersionsKeys[key] = kmsg.ApiVersionsResponseApiKey{ + ApiKey: key, + MinVersion: min, + MaxVersion: max, + } +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kfake/19_create_topics.go b/vendor/github.com/twmb/franz-go/pkg/kfake/19_create_topics.go new file mode 100644 index 00000000000..1f9d00e620b --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kfake/19_create_topics.go @@ -0,0 +1,85 @@ +package kfake + +import ( + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" +) + +// TODO +// +// * Return InvalidTopicException when names collide + +func init() { regKey(19, 0, 7) } + +func (c *Cluster) handleCreateTopics(b *broker, kreq kmsg.Request) (kmsg.Response, error) { + req := kreq.(*kmsg.CreateTopicsRequest) + resp := req.ResponseKind().(*kmsg.CreateTopicsResponse) + + if err := checkReqVersion(req.Key(), req.Version); err != nil { + return nil, err + } + + donet := func(t string, errCode int16) *kmsg.CreateTopicsResponseTopic { + st := kmsg.NewCreateTopicsResponseTopic() + st.Topic = t + st.ErrorCode = errCode + resp.Topics = append(resp.Topics, st) + return &resp.Topics[len(resp.Topics)-1] + } + donets := func(errCode int16) { + for _, rt := range req.Topics { + donet(rt.Topic, errCode) + } + } + + if b != c.controller { + donets(kerr.NotController.Code) + return resp, nil + } + + uniq := make(map[string]struct{}) + for _, rt := range req.Topics { + if _, ok := uniq[rt.Topic]; ok { + donets(kerr.InvalidRequest.Code) + return resp, nil + } + uniq[rt.Topic] = struct{}{} + } + + for _, rt := range req.Topics { + if _, ok := c.data.tps.gett(rt.Topic); ok { + donet(rt.Topic, kerr.TopicAlreadyExists.Code) + continue + } + if len(rt.ReplicaAssignment) > 0 { + donet(rt.Topic, kerr.InvalidReplicaAssignment.Code) + continue + } + if int(rt.ReplicationFactor) > len(c.bs) { + donet(rt.Topic, kerr.InvalidReplicationFactor.Code) + continue + } + if rt.NumPartitions == 0 { + donet(rt.Topic, kerr.InvalidPartitions.Code) + continue + } + configs := make(map[string]*string) + for _, c := range rt.Configs { + configs[c.Name] = c.Value + } + c.data.mkt(rt.Topic, int(rt.NumPartitions), int(rt.ReplicationFactor), configs) + st := donet(rt.Topic, 0) + st.TopicID = c.data.t2id[rt.Topic] + st.NumPartitions = int32(len(c.data.tps[rt.Topic])) + st.ReplicationFactor = int16(c.data.treplicas[rt.Topic]) + for k, v := range configs { + c := kmsg.NewCreateTopicsResponseTopicConfig() + c.Name = k + c.Value = v + // Source? + st.Configs = append(st.Configs, c) + } + } + + return resp, nil +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kfake/20_delete_topics.go b/vendor/github.com/twmb/franz-go/pkg/kfake/20_delete_topics.go new file mode 100644 index 00000000000..eab80235e6d --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kfake/20_delete_topics.go @@ -0,0 +1,94 @@ +package kfake + +import ( + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" +) + +func init() { regKey(20, 0, 6) } + +func (c *Cluster) handleDeleteTopics(b *broker, kreq kmsg.Request) (kmsg.Response, error) { + req := kreq.(*kmsg.DeleteTopicsRequest) + resp := req.ResponseKind().(*kmsg.DeleteTopicsResponse) + + if err := checkReqVersion(req.Key(), req.Version); err != nil { + return nil, err + } + + donet := func(t *string, id uuid, errCode int16) *kmsg.DeleteTopicsResponseTopic { + st := kmsg.NewDeleteTopicsResponseTopic() + st.Topic = t + st.TopicID = id + st.ErrorCode = errCode + resp.Topics = append(resp.Topics, st) + return &resp.Topics[len(resp.Topics)-1] + } + donets := func(errCode int16) { + for _, rt := range req.Topics { + donet(rt.Topic, rt.TopicID, errCode) + } + } + + if req.Version <= 5 { + for _, topic := range req.TopicNames { + rt := kmsg.NewDeleteTopicsRequestTopic() + rt.Topic = kmsg.StringPtr(topic) + req.Topics = append(req.Topics, rt) + } + } + + if b != c.controller { + donets(kerr.NotController.Code) + return resp, nil + } + for _, rt := range req.Topics { + if rt.TopicID != noID && rt.Topic != nil { + donets(kerr.InvalidRequest.Code) + return resp, nil + } + } + + type toDelete struct { + topic string + id uuid + } + var toDeletes []toDelete + defer func() { + for _, td := range toDeletes { + delete(c.data.tps, td.topic) + delete(c.data.id2t, td.id) + delete(c.data.t2id, td.topic) + + } + }() + for _, rt := range req.Topics { + var topic string + var id uuid + if rt.Topic != nil { + topic = *rt.Topic + id = c.data.t2id[topic] + } else { + topic = c.data.id2t[rt.TopicID] + id = rt.TopicID + } + t, ok := c.data.tps.gett(topic) + if !ok { + if rt.Topic != nil { + donet(&topic, id, kerr.UnknownTopicOrPartition.Code) + } else { + donet(&topic, id, kerr.UnknownTopicID.Code) + } + continue + } + + donet(&topic, id, 0) + toDeletes = append(toDeletes, toDelete{topic, id}) + for _, pd := range t { + for watch := range pd.watch { + watch.deleted() + } + } + } + + return resp, nil +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kfake/21_delete_records.go b/vendor/github.com/twmb/franz-go/pkg/kfake/21_delete_records.go new file mode 100644 index 00000000000..e175045e4cf --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kfake/21_delete_records.go @@ -0,0 +1,74 @@ +package kfake + +import ( + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" +) + +// TODO +// +// * Return InvalidTopicException when names collide + +func init() { regKey(21, 0, 2) } + +func (c *Cluster) handleDeleteRecords(b *broker, kreq kmsg.Request) (kmsg.Response, error) { + req := kreq.(*kmsg.DeleteRecordsRequest) + resp := req.ResponseKind().(*kmsg.DeleteRecordsResponse) + + if err := checkReqVersion(req.Key(), req.Version); err != nil { + return nil, err + } + + tidx := make(map[string]int) + donet := func(t string, errCode int16) *kmsg.DeleteRecordsResponseTopic { + if i, ok := tidx[t]; ok { + return &resp.Topics[i] + } + tidx[t] = len(resp.Topics) + st := kmsg.NewDeleteRecordsResponseTopic() + st.Topic = t + resp.Topics = append(resp.Topics, st) + return &resp.Topics[len(resp.Topics)-1] + } + donep := func(t string, p int32, errCode int16) *kmsg.DeleteRecordsResponseTopicPartition { + sp := kmsg.NewDeleteRecordsResponseTopicPartition() + sp.Partition = p + sp.ErrorCode = errCode + st := donet(t, 0) + st.Partitions = append(st.Partitions, sp) + return &st.Partitions[len(st.Partitions)-1] + } + + for _, rt := range req.Topics { + ps, ok := c.data.tps.gett(rt.Topic) + for _, rp := range rt.Partitions { + if !ok { + donep(rt.Topic, rp.Partition, kerr.UnknownTopicOrPartition.Code) + continue + } + pd, ok := ps[rp.Partition] + if !ok { + donep(rt.Topic, rp.Partition, kerr.UnknownTopicOrPartition.Code) + continue + } + if pd.leader != b { + donep(rt.Topic, rp.Partition, kerr.NotLeaderForPartition.Code) + continue + } + to := rp.Offset + if to == -1 { + to = pd.highWatermark + } + if to < pd.logStartOffset || to > pd.highWatermark { + donep(rt.Topic, rp.Partition, kerr.OffsetOutOfRange.Code) + continue + } + pd.logStartOffset = to + pd.trimLeft() + sp := donep(rt.Topic, rp.Partition, 0) + sp.LowWatermark = to + } + } + + return resp, nil +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kfake/22_init_producer_id.go b/vendor/github.com/twmb/franz-go/pkg/kfake/22_init_producer_id.go new file mode 100644 index 00000000000..a0485905a42 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kfake/22_init_producer_id.go @@ -0,0 +1,34 @@ +package kfake + +import ( + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" +) + +// TODO +// +// * Transactional IDs +// * v3+ + +func init() { regKey(22, 0, 4) } + +func (c *Cluster) handleInitProducerID(kreq kmsg.Request) (kmsg.Response, error) { + var ( + req = kreq.(*kmsg.InitProducerIDRequest) + resp = req.ResponseKind().(*kmsg.InitProducerIDResponse) + ) + + if err := checkReqVersion(req.Key(), req.Version); err != nil { + return nil, err + } + + if req.TransactionalID != nil { + resp.ErrorCode = kerr.UnknownServerError.Code + return resp, nil + } + + pid := c.pids.create(nil) + resp.ProducerID = pid.id + resp.ProducerEpoch = pid.epoch + return resp, nil +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kfake/23_offset_for_leader_epoch.go b/vendor/github.com/twmb/franz-go/pkg/kfake/23_offset_for_leader_epoch.go new file mode 100644 index 00000000000..f531ecf89cf --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kfake/23_offset_for_leader_epoch.go @@ -0,0 +1,121 @@ +package kfake + +import ( + "sort" + + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" +) + +func init() { regKey(23, 3, 4) } + +func (c *Cluster) handleOffsetForLeaderEpoch(b *broker, kreq kmsg.Request) (kmsg.Response, error) { + req := kreq.(*kmsg.OffsetForLeaderEpochRequest) + resp := req.ResponseKind().(*kmsg.OffsetForLeaderEpochResponse) + + if err := checkReqVersion(req.Key(), req.Version); err != nil { + return nil, err + } + + tidx := make(map[string]int) + donet := func(t string, errCode int16) *kmsg.OffsetForLeaderEpochResponseTopic { + if i, ok := tidx[t]; ok { + return &resp.Topics[i] + } + tidx[t] = len(resp.Topics) + st := kmsg.NewOffsetForLeaderEpochResponseTopic() + st.Topic = t + resp.Topics = append(resp.Topics, st) + return &resp.Topics[len(resp.Topics)-1] + } + donep := func(t string, p int32, errCode int16) *kmsg.OffsetForLeaderEpochResponseTopicPartition { + sp := kmsg.NewOffsetForLeaderEpochResponseTopicPartition() + sp.Partition = p + sp.ErrorCode = errCode + st := donet(t, 0) + st.Partitions = append(st.Partitions, sp) + return &st.Partitions[len(st.Partitions)-1] + } + + for _, rt := range req.Topics { + ps, ok := c.data.tps.gett(rt.Topic) + for _, rp := range rt.Partitions { + if req.ReplicaID != -1 { + donep(rt.Topic, rp.Partition, kerr.UnknownServerError.Code) + continue + } + if !ok { + donep(rt.Topic, rp.Partition, kerr.UnknownTopicOrPartition.Code) + continue + } + pd, ok := ps[rp.Partition] + if !ok { + donep(rt.Topic, rp.Partition, kerr.UnknownTopicOrPartition.Code) + continue + } + if pd.leader != b { + donep(rt.Topic, rp.Partition, kerr.NotLeaderForPartition.Code) + continue + } + if rp.CurrentLeaderEpoch < pd.epoch { + donep(rt.Topic, rp.Partition, kerr.FencedLeaderEpoch.Code) + continue + } else if rp.CurrentLeaderEpoch > pd.epoch { + donep(rt.Topic, rp.Partition, kerr.UnknownLeaderEpoch.Code) + continue + } + + sp := donep(rt.Topic, rp.Partition, 0) + + // If the user is requesting our current epoch, we return the HWM. + if rp.LeaderEpoch == pd.epoch { + sp.LeaderEpoch = pd.epoch + sp.EndOffset = pd.highWatermark + continue + } + + // If our epoch was bumped before anything was + // produced, return the epoch and a start offset of 0. + if len(pd.batches) == 0 { + sp.LeaderEpoch = pd.epoch + sp.EndOffset = 0 + if rp.LeaderEpoch > pd.epoch { + sp.LeaderEpoch = -1 + sp.EndOffset = -1 + } + continue + } + + // What is the largest epoch after the requested epoch? + nextEpoch := rp.LeaderEpoch + 1 + idx, _ := sort.Find(len(pd.batches), func(idx int) int { + batchEpoch := pd.batches[idx].epoch + switch { + case nextEpoch <= batchEpoch: + return -1 + default: + return 1 + } + }) + + // Requested epoch is not yet known: keep -1 returns. + if idx == len(pd.batches) { + sp.LeaderEpoch = -1 + sp.EndOffset = -1 + continue + } + + // Next epoch is actually the first epoch: return the + // requested epoch and the LSO. + if idx == 0 { + sp.LeaderEpoch = rp.LeaderEpoch + sp.EndOffset = pd.logStartOffset + continue + } + + sp.LeaderEpoch = pd.batches[idx-1].epoch + sp.EndOffset = pd.batches[idx].FirstOffset + } + } + return resp, nil +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kfake/32_describe_configs.go b/vendor/github.com/twmb/franz-go/pkg/kfake/32_describe_configs.go new file mode 100644 index 00000000000..02662cc5c85 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kfake/32_describe_configs.go @@ -0,0 +1,108 @@ +package kfake + +import ( + "strconv" + + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" +) + +func init() { regKey(32, 0, 4) } + +func (c *Cluster) handleDescribeConfigs(b *broker, kreq kmsg.Request) (kmsg.Response, error) { + req := kreq.(*kmsg.DescribeConfigsRequest) + resp := req.ResponseKind().(*kmsg.DescribeConfigsResponse) + + if err := checkReqVersion(req.Key(), req.Version); err != nil { + return nil, err + } + + doner := func(n string, t kmsg.ConfigResourceType, errCode int16) *kmsg.DescribeConfigsResponseResource { + st := kmsg.NewDescribeConfigsResponseResource() + st.ResourceName = n + st.ResourceType = t + st.ErrorCode = errCode + resp.Resources = append(resp.Resources, st) + return &resp.Resources[len(resp.Resources)-1] + } + + rfn := func(r *kmsg.DescribeConfigsResponseResource) func(k string, v *string, src kmsg.ConfigSource, sensitive bool) { + nameIdxs := make(map[string]int) + return func(k string, v *string, src kmsg.ConfigSource, sensitive bool) { + rc := kmsg.NewDescribeConfigsResponseResourceConfig() + rc.Name = k + rc.Value = v + rc.Source = src + rc.ReadOnly = rc.Source == kmsg.ConfigSourceStaticBrokerConfig + rc.IsDefault = rc.Source == kmsg.ConfigSourceDefaultConfig || rc.Source == kmsg.ConfigSourceStaticBrokerConfig + rc.IsSensitive = sensitive + + // We walk configs from static to default to dynamic, + // if this config already exists previously, we move + // the previous config to a synonym and update the + // previous config. + if idx, ok := nameIdxs[k]; ok { + prior := r.Configs[idx] + syn := kmsg.NewDescribeConfigsResponseResourceConfigConfigSynonym() + syn.Name = prior.Name + syn.Value = prior.Value + syn.Source = prior.Source + rc.ConfigSynonyms = append([]kmsg.DescribeConfigsResponseResourceConfigConfigSynonym{syn}, prior.ConfigSynonyms...) + r.Configs[idx] = rc + return + } + nameIdxs[k] = len(r.Configs) + r.Configs = append(r.Configs, rc) + } + } + filter := func(rr *kmsg.DescribeConfigsRequestResource, r *kmsg.DescribeConfigsResponseResource) { + if rr.ConfigNames == nil { + return + } + names := make(map[string]struct{}) + for _, name := range rr.ConfigNames { + names[name] = struct{}{} + } + keep := r.Configs[:0] + for _, rc := range r.Configs { + if _, ok := names[rc.Name]; ok { + keep = append(keep, rc) + } + } + r.Configs = keep + } + +outer: + for i := range req.Resources { + rr := &req.Resources[i] + switch rr.ResourceType { + case kmsg.ConfigResourceTypeBroker: + id := int32(-1) + if rr.ResourceName != "" { + iid, err := strconv.Atoi(rr.ResourceName) + id = int32(iid) + if err != nil || id != b.node { + doner(rr.ResourceName, rr.ResourceType, kerr.InvalidRequest.Code) + continue outer + } + } + r := doner(rr.ResourceName, rr.ResourceType, 0) + c.brokerConfigs(id, rfn(r)) + filter(rr, r) + + case kmsg.ConfigResourceTypeTopic: + if _, ok := c.data.tps.gett(rr.ResourceName); !ok { + doner(rr.ResourceName, rr.ResourceType, kerr.UnknownTopicOrPartition.Code) + continue + } + r := doner(rr.ResourceName, rr.ResourceType, 0) + c.data.configs(rr.ResourceName, rfn(r)) + filter(rr, r) + + default: + doner(rr.ResourceName, rr.ResourceType, kerr.InvalidRequest.Code) + } + } + + return resp, nil +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kfake/33_alter_configs.go b/vendor/github.com/twmb/franz-go/pkg/kfake/33_alter_configs.go new file mode 100644 index 00000000000..76e1e5fb576 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kfake/33_alter_configs.go @@ -0,0 +1,92 @@ +package kfake + +import ( + "strconv" + + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" +) + +func init() { regKey(33, 0, 2) } + +func (c *Cluster) handleAlterConfigs(b *broker, kreq kmsg.Request) (kmsg.Response, error) { + req := kreq.(*kmsg.AlterConfigsRequest) + resp := req.ResponseKind().(*kmsg.AlterConfigsResponse) + + if err := checkReqVersion(req.Key(), req.Version); err != nil { + return nil, err + } + + doner := func(n string, t kmsg.ConfigResourceType, errCode int16) *kmsg.AlterConfigsResponseResource { + st := kmsg.NewAlterConfigsResponseResource() + st.ResourceName = n + st.ResourceType = t + st.ErrorCode = errCode + resp.Resources = append(resp.Resources, st) + return &resp.Resources[len(resp.Resources)-1] + } + +outer: + for i := range req.Resources { + rr := &req.Resources[i] + switch rr.ResourceType { + case kmsg.ConfigResourceTypeBroker: + id := int32(-1) + if rr.ResourceName != "" { + iid, err := strconv.Atoi(rr.ResourceName) + id = int32(iid) + if err != nil || id != b.node { + doner(rr.ResourceName, rr.ResourceType, kerr.InvalidRequest.Code) + continue outer + } + } + var invalid bool + for i := range rr.Configs { + rc := &rr.Configs[i] + invalid = invalid || !c.setBrokerConfig(rc.Name, rc.Value, true) + } + if invalid { + doner(rr.ResourceName, rr.ResourceType, kerr.InvalidRequest.Code) + continue + } + doner(rr.ResourceName, rr.ResourceType, 0) + if req.ValidateOnly { + continue + } + c.bcfgs = make(map[string]*string) + for i := range rr.Configs { + rc := &rr.Configs[i] + c.setBrokerConfig(rc.Name, rc.Value, false) + } + + case kmsg.ConfigResourceTypeTopic: + if _, ok := c.data.tps.gett(rr.ResourceName); !ok { + doner(rr.ResourceName, rr.ResourceType, kerr.UnknownTopicOrPartition.Code) + continue + } + var invalid bool + for i := range rr.Configs { + rc := &rr.Configs[i] + invalid = invalid || !c.data.setTopicConfig(rr.ResourceName, rc.Name, rc.Value, true) + } + if invalid { + doner(rr.ResourceName, rr.ResourceType, kerr.InvalidRequest.Code) + continue + } + doner(rr.ResourceName, rr.ResourceType, 0) + if req.ValidateOnly { + continue + } + delete(c.data.tcfgs, rr.ResourceName) + for i := range rr.Configs { + rc := &rr.Configs[i] + c.data.setTopicConfig(rr.ResourceName, rc.Name, rc.Value, false) + } + + default: + doner(rr.ResourceName, rr.ResourceType, kerr.InvalidRequest.Code) + } + } + + return resp, nil +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kfake/34_alter_replica_log_dirs.go b/vendor/github.com/twmb/franz-go/pkg/kfake/34_alter_replica_log_dirs.go new file mode 100644 index 00000000000..8031447c327 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kfake/34_alter_replica_log_dirs.go @@ -0,0 +1,53 @@ +package kfake + +import ( + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" +) + +func init() { regKey(34, 0, 2) } + +func (c *Cluster) handleAlterReplicaLogDirs(b *broker, kreq kmsg.Request) (kmsg.Response, error) { + req := kreq.(*kmsg.AlterReplicaLogDirsRequest) + resp := req.ResponseKind().(*kmsg.AlterReplicaLogDirsResponse) + + if err := checkReqVersion(req.Key(), req.Version); err != nil { + return nil, err + } + + tidx := make(map[string]int) + donet := func(t string, errCode int16) *kmsg.AlterReplicaLogDirsResponseTopic { + if i, ok := tidx[t]; ok { + return &resp.Topics[i] + } + tidx[t] = len(resp.Topics) + st := kmsg.NewAlterReplicaLogDirsResponseTopic() + st.Topic = t + resp.Topics = append(resp.Topics, st) + return &resp.Topics[len(resp.Topics)-1] + } + donep := func(t string, p int32, errCode int16) *kmsg.AlterReplicaLogDirsResponseTopicPartition { + sp := kmsg.NewAlterReplicaLogDirsResponseTopicPartition() + sp.Partition = p + sp.ErrorCode = errCode + st := donet(t, 0) + st.Partitions = append(st.Partitions, sp) + return &st.Partitions[len(st.Partitions)-1] + } + + for _, rd := range req.Dirs { + for _, t := range rd.Topics { + for _, p := range t.Partitions { + d, ok := c.data.tps.getp(t.Topic, p) + if !ok { + donep(t.Topic, p, kerr.UnknownTopicOrPartition.Code) + continue + } + d.dir = rd.Dir + donep(t.Topic, p, 0) + } + } + } + + return resp, nil +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kfake/35_describe_log_dirs.go b/vendor/github.com/twmb/franz-go/pkg/kfake/35_describe_log_dirs.go new file mode 100644 index 00000000000..e1ea9b8a9f0 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kfake/35_describe_log_dirs.go @@ -0,0 +1,70 @@ +package kfake + +import ( + "github.com/twmb/franz-go/pkg/kmsg" +) + +func init() { regKey(35, 0, 4) } + +func (c *Cluster) handleDescribeLogDirs(b *broker, kreq kmsg.Request) (kmsg.Response, error) { + req := kreq.(*kmsg.DescribeLogDirsRequest) + resp := req.ResponseKind().(*kmsg.DescribeLogDirsResponse) + + if err := checkReqVersion(req.Key(), req.Version); err != nil { + return nil, err + } + + totalSpace := make(map[string]int64) + individual := make(map[string]map[string]map[int32]int64) + + add := func(d string, t string, p int32, s int64) { + totalSpace[d] += s + ts, ok := individual[d] + if !ok { + ts = make(map[string]map[int32]int64) + individual[d] = ts + } + ps, ok := ts[t] + if !ok { + ps = make(map[int32]int64) + ts[t] = ps + } + ps[p] += s + } + + if req.Topics == nil { + c.data.tps.each(func(t string, p int32, d *partData) { + add(d.dir, t, p, d.nbytes) + }) + } else { + for _, t := range req.Topics { + for _, p := range t.Partitions { + d, ok := c.data.tps.getp(t.Topic, p) + if ok { + add(d.dir, t.Topic, p, d.nbytes) + } + } + } + } + + for dir, ts := range individual { + rd := kmsg.NewDescribeLogDirsResponseDir() + rd.Dir = dir + rd.TotalBytes = totalSpace[dir] + rd.UsableBytes = 32 << 30 + for t, ps := range ts { + rt := kmsg.NewDescribeLogDirsResponseDirTopic() + rt.Topic = t + for p, s := range ps { + rp := kmsg.NewDescribeLogDirsResponseDirTopicPartition() + rp.Partition = p + rp.Size = s + rt.Partitions = append(rt.Partitions, rp) + } + rd.Topics = append(rd.Topics, rt) + } + resp.Dirs = append(resp.Dirs, rd) + } + + return resp, nil +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kfake/36_sasl_authenticate.go b/vendor/github.com/twmb/franz-go/pkg/kfake/36_sasl_authenticate.go new file mode 100644 index 00000000000..b94d2f0118f --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kfake/36_sasl_authenticate.go @@ -0,0 +1,83 @@ +package kfake + +import ( + "errors" + + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" +) + +func init() { regKey(36, 0, 2) } + +func (c *Cluster) handleSASLAuthenticate(creq *clientReq) (kmsg.Response, error) { + req := creq.kreq.(*kmsg.SASLAuthenticateRequest) + resp := req.ResponseKind().(*kmsg.SASLAuthenticateResponse) + + if err := checkReqVersion(req.Key(), req.Version); err != nil { + return nil, err + } + + switch creq.cc.saslStage { + default: + resp.ErrorCode = kerr.IllegalSaslState.Code + return resp, nil + + case saslStageAuthPlain: + u, p, err := saslSplitPlain(req.SASLAuthBytes) + if err != nil { + return nil, err + } + if c.sasls.plain == nil { + return nil, errors.New("invalid sasl") + } + if p != c.sasls.plain[u] { + return nil, errors.New("invalid sasl") + } + creq.cc.saslStage = saslStageComplete + + case saslStageAuthScram0_256: + c0, err := scramParseClient0(req.SASLAuthBytes) + if err != nil { + return nil, err + } + if c.sasls.scram256 == nil { + return nil, errors.New("invalid sasl") + } + a, ok := c.sasls.scram256[c0.user] + if !ok { + return nil, errors.New("invalid sasl") + } + s0, serverFirst := scramServerFirst(c0, a) + resp.SASLAuthBytes = serverFirst + creq.cc.saslStage = saslStageAuthScram1 + creq.cc.s0 = &s0 + + case saslStageAuthScram0_512: + c0, err := scramParseClient0(req.SASLAuthBytes) + if err != nil { + return nil, err + } + if c.sasls.scram512 == nil { + return nil, errors.New("invalid sasl") + } + a, ok := c.sasls.scram512[c0.user] + if !ok { + return nil, errors.New("invalid sasl") + } + s0, serverFirst := scramServerFirst(c0, a) + resp.SASLAuthBytes = serverFirst + creq.cc.saslStage = saslStageAuthScram1 + creq.cc.s0 = &s0 + + case saslStageAuthScram1: + serverFinal, err := creq.cc.s0.serverFinal(req.SASLAuthBytes) + if err != nil { + return nil, err + } + resp.SASLAuthBytes = serverFinal + creq.cc.saslStage = saslStageComplete + creq.cc.s0 = nil + } + + return resp, nil +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kfake/37_create_partitions.go b/vendor/github.com/twmb/franz-go/pkg/kfake/37_create_partitions.go new file mode 100644 index 00000000000..bd4954ef587 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kfake/37_create_partitions.go @@ -0,0 +1,66 @@ +package kfake + +import ( + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" +) + +func init() { regKey(37, 0, 3) } + +func (c *Cluster) handleCreatePartitions(b *broker, kreq kmsg.Request) (kmsg.Response, error) { + req := kreq.(*kmsg.CreatePartitionsRequest) + resp := req.ResponseKind().(*kmsg.CreatePartitionsResponse) + + if err := checkReqVersion(req.Key(), req.Version); err != nil { + return nil, err + } + + donet := func(t string, errCode int16) *kmsg.CreatePartitionsResponseTopic { + st := kmsg.NewCreatePartitionsResponseTopic() + st.Topic = t + st.ErrorCode = errCode + resp.Topics = append(resp.Topics, st) + return &resp.Topics[len(resp.Topics)-1] + } + donets := func(errCode int16) { + for _, rt := range req.Topics { + donet(rt.Topic, errCode) + } + } + + if b != c.controller { + donets(kerr.NotController.Code) + return resp, nil + } + + uniq := make(map[string]struct{}) + for _, rt := range req.Topics { + if _, ok := uniq[rt.Topic]; ok { + donets(kerr.InvalidRequest.Code) + return resp, nil + } + uniq[rt.Topic] = struct{}{} + } + + for _, rt := range req.Topics { + t, ok := c.data.tps.gett(rt.Topic) + if !ok { + donet(rt.Topic, kerr.UnknownTopicOrPartition.Code) + continue + } + if len(rt.Assignment) > 0 { + donet(rt.Topic, kerr.InvalidReplicaAssignment.Code) + continue + } + if rt.Count < int32(len(t)) { + donet(rt.Topic, kerr.InvalidPartitions.Code) + continue + } + for i := int32(len(t)); i < rt.Count; i++ { + c.data.tps.mkp(rt.Topic, i, c.newPartData) + } + donet(rt.Topic, 0) + } + + return resp, nil +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kfake/42_delete_groups.go b/vendor/github.com/twmb/franz-go/pkg/kfake/42_delete_groups.go new file mode 100644 index 00000000000..682574157d6 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kfake/42_delete_groups.go @@ -0,0 +1,17 @@ +package kfake + +import ( + "github.com/twmb/franz-go/pkg/kmsg" +) + +func init() { regKey(42, 0, 2) } + +func (c *Cluster) handleDeleteGroups(creq *clientReq) (kmsg.Response, error) { + req := creq.kreq.(*kmsg.DeleteGroupsRequest) + + if err := checkReqVersion(req.Key(), req.Version); err != nil { + return nil, err + } + + return c.groups.handleDelete(creq), nil +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kfake/44_incremental_alter_configs.go b/vendor/github.com/twmb/franz-go/pkg/kfake/44_incremental_alter_configs.go new file mode 100644 index 00000000000..a8096bcfc9f --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kfake/44_incremental_alter_configs.go @@ -0,0 +1,112 @@ +package kfake + +import ( + "strconv" + + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" +) + +func init() { regKey(44, 0, 1) } + +func (c *Cluster) handleIncrementalAlterConfigs(b *broker, kreq kmsg.Request) (kmsg.Response, error) { + req := kreq.(*kmsg.IncrementalAlterConfigsRequest) + resp := req.ResponseKind().(*kmsg.IncrementalAlterConfigsResponse) + + if err := checkReqVersion(req.Key(), req.Version); err != nil { + return nil, err + } + + doner := func(n string, t kmsg.ConfigResourceType, errCode int16) *kmsg.IncrementalAlterConfigsResponseResource { + st := kmsg.NewIncrementalAlterConfigsResponseResource() + st.ResourceName = n + st.ResourceType = t + st.ErrorCode = errCode + resp.Resources = append(resp.Resources, st) + return &resp.Resources[len(resp.Resources)-1] + } + +outer: + for i := range req.Resources { + rr := &req.Resources[i] + switch rr.ResourceType { + case kmsg.ConfigResourceTypeBroker: + id := int32(-1) + if rr.ResourceName != "" { + iid, err := strconv.Atoi(rr.ResourceName) + id = int32(iid) + if err != nil || id != b.node { + doner(rr.ResourceName, rr.ResourceType, kerr.InvalidRequest.Code) + continue outer + } + } + var invalid bool + for i := range rr.Configs { + rc := &rr.Configs[i] + switch rc.Op { + case kmsg.IncrementalAlterConfigOpSet: + invalid = invalid || !c.setBrokerConfig(rr.Configs[i].Name, rr.Configs[i].Value, true) + case kmsg.IncrementalAlterConfigOpDelete: + default: + invalid = true + } + } + if invalid { + doner(rr.ResourceName, rr.ResourceType, kerr.InvalidRequest.Code) + continue + } + doner(rr.ResourceName, rr.ResourceType, 0) + if req.ValidateOnly { + continue + } + for i := range rr.Configs { + rc := &rr.Configs[i] + switch rc.Op { + case kmsg.IncrementalAlterConfigOpSet: + c.setBrokerConfig(rr.Configs[i].Name, rr.Configs[i].Value, false) + case kmsg.IncrementalAlterConfigOpDelete: + delete(c.bcfgs, rc.Name) + } + } + + case kmsg.ConfigResourceTypeTopic: + if _, ok := c.data.tps.gett(rr.ResourceName); !ok { + doner(rr.ResourceName, rr.ResourceType, kerr.UnknownTopicOrPartition.Code) + continue + } + var invalid bool + for i := range rr.Configs { + rc := &rr.Configs[i] + switch rc.Op { + case kmsg.IncrementalAlterConfigOpSet: + invalid = invalid || !c.data.setTopicConfig(rr.ResourceName, rc.Name, rc.Value, true) + case kmsg.IncrementalAlterConfigOpDelete: + default: + invalid = true + } + } + if invalid { + doner(rr.ResourceName, rr.ResourceType, kerr.InvalidRequest.Code) + continue + } + doner(rr.ResourceName, rr.ResourceType, 0) + if req.ValidateOnly { + continue + } + for i := range rr.Configs { + rc := &rr.Configs[i] + switch rc.Op { + case kmsg.IncrementalAlterConfigOpSet: + c.data.setTopicConfig(rr.ResourceName, rc.Name, rc.Value, false) + case kmsg.IncrementalAlterConfigOpDelete: + delete(c.data.tcfgs[rr.ResourceName], rc.Name) + } + } + + default: + doner(rr.ResourceName, rr.ResourceType, kerr.InvalidRequest.Code) + } + } + + return resp, nil +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kfake/47_offset_delete.go b/vendor/github.com/twmb/franz-go/pkg/kfake/47_offset_delete.go new file mode 100644 index 00000000000..878e83b45a0 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kfake/47_offset_delete.go @@ -0,0 +1,23 @@ +package kfake + +import ( + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" +) + +func init() { regKey(47, 0, 0) } + +func (c *Cluster) handleOffsetDelete(creq *clientReq) (kmsg.Response, error) { + req := creq.kreq.(*kmsg.OffsetDeleteRequest) + resp := req.ResponseKind().(*kmsg.OffsetDeleteResponse) + + if err := checkReqVersion(req.Key(), req.Version); err != nil { + return nil, err + } + + if c.groups.handleOffsetDelete(creq) { + return nil, nil + } + resp.ErrorCode = kerr.GroupIDNotFound.Code + return resp, nil +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kfake/50_describe_user_scram_credentials.go b/vendor/github.com/twmb/franz-go/pkg/kfake/50_describe_user_scram_credentials.go new file mode 100644 index 00000000000..0cb107d2103 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kfake/50_describe_user_scram_credentials.go @@ -0,0 +1,68 @@ +package kfake + +import ( + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" +) + +func init() { regKey(50, 0, 0) } + +func (c *Cluster) handleDescribeUserSCRAMCredentials(kreq kmsg.Request) (kmsg.Response, error) { + var ( + req = kreq.(*kmsg.DescribeUserSCRAMCredentialsRequest) + resp = req.ResponseKind().(*kmsg.DescribeUserSCRAMCredentialsResponse) + ) + + if err := checkReqVersion(req.Key(), req.Version); err != nil { + return nil, err + } + + describe := make(map[string]bool) // if false, user was duplicated + for _, u := range req.Users { + if _, ok := describe[u.Name]; ok { + describe[u.Name] = true + } else { + describe[u.Name] = false + } + } + if req.Users == nil { // null returns all + for u := range c.sasls.scram256 { + describe[u] = false + } + for u := range c.sasls.scram512 { + describe[u] = false + } + } + + addr := func(u string) *kmsg.DescribeUserSCRAMCredentialsResponseResult { + sr := kmsg.NewDescribeUserSCRAMCredentialsResponseResult() + sr.User = u + resp.Results = append(resp.Results, sr) + return &resp.Results[len(resp.Results)-1] + } + + for u, duplicated := range describe { + sr := addr(u) + if duplicated { + sr.ErrorCode = kerr.DuplicateResource.Code + continue + } + if a, ok := c.sasls.scram256[u]; ok { + ci := kmsg.NewDescribeUserSCRAMCredentialsResponseResultCredentialInfo() + ci.Mechanism = 1 + ci.Iterations = int32(a.iterations) + sr.CredentialInfos = append(sr.CredentialInfos, ci) + } + if a, ok := c.sasls.scram512[u]; ok { + ci := kmsg.NewDescribeUserSCRAMCredentialsResponseResultCredentialInfo() + ci.Mechanism = 2 + ci.Iterations = int32(a.iterations) + sr.CredentialInfos = append(sr.CredentialInfos, ci) + } + if len(sr.CredentialInfos) == 0 { + sr.ErrorCode = kerr.ResourceNotFound.Code + } + } + + return resp, nil +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kfake/51_alter_user_scram_credentials.go b/vendor/github.com/twmb/franz-go/pkg/kfake/51_alter_user_scram_credentials.go new file mode 100644 index 00000000000..7f853b5618f --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kfake/51_alter_user_scram_credentials.go @@ -0,0 +1,124 @@ +package kfake + +import ( + "bytes" + + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" +) + +func init() { regKey(51, 0, 0) } + +func (c *Cluster) handleAlterUserSCRAMCredentials(b *broker, kreq kmsg.Request) (kmsg.Response, error) { + var ( + req = kreq.(*kmsg.AlterUserSCRAMCredentialsRequest) + resp = req.ResponseKind().(*kmsg.AlterUserSCRAMCredentialsResponse) + ) + + if err := checkReqVersion(req.Key(), req.Version); err != nil { + return nil, err + } + + addr := func(u string) *kmsg.AlterUserSCRAMCredentialsResponseResult { + sr := kmsg.NewAlterUserSCRAMCredentialsResponseResult() + sr.User = u + resp.Results = append(resp.Results, sr) + return &resp.Results[len(resp.Results)-1] + } + doneu := func(u string, code int16) *kmsg.AlterUserSCRAMCredentialsResponseResult { + sr := addr(u) + sr.ErrorCode = code + return sr + } + + users := make(map[string]int16) + + // Validate everything up front, keeping track of all (and duplicate) + // users. If we are not controller, we fail with our users map. + for _, d := range req.Deletions { + if d.Name == "" { + users[d.Name] = kerr.UnacceptableCredential.Code + continue + } + if d.Mechanism != 1 && d.Mechanism != 2 { + users[d.Name] = kerr.UnsupportedSaslMechanism.Code + continue + } + users[d.Name] = 0 + } + for _, u := range req.Upsertions { + if u.Name == "" || u.Iterations < 4096 || u.Iterations > 16384 { // Kafka min/max + users[u.Name] = kerr.UnacceptableCredential.Code + continue + } + if u.Mechanism != 1 && u.Mechanism != 2 { + users[u.Name] = kerr.UnsupportedSaslMechanism.Code + continue + } + if code, deleting := users[u.Name]; deleting && code == 0 { + users[u.Name] = kerr.DuplicateResource.Code + continue + } + users[u.Name] = 0 + } + + if b != c.controller { + for u := range users { + doneu(u, kerr.NotController.Code) + } + return resp, nil + } + + // Add anything that failed validation. + for u, code := range users { + if code != 0 { + doneu(u, code) + } + } + + // Process all deletions, adding ResourceNotFound as necessary. + for _, d := range req.Deletions { + if users[d.Name] != 0 { + continue + } + m := c.sasls.scram256 + if d.Mechanism == 2 { + m = c.sasls.scram512 + } + if m == nil { + doneu(d.Name, kerr.ResourceNotFound.Code) + continue + } + if _, ok := m[d.Name]; !ok { + doneu(d.Name, kerr.ResourceNotFound.Code) + continue + } + delete(m, d.Name) + doneu(d.Name, 0) + } + + // Process all upsertions. + for _, u := range req.Upsertions { + if users[u.Name] != 0 { + continue + } + m := &c.sasls.scram256 + mech := saslScram256 + if u.Mechanism == 2 { + m = &c.sasls.scram512 + mech = saslScram512 + } + if *m == nil { + *m = make(map[string]scramAuth) + } + (*m)[u.Name] = scramAuth{ + mechanism: mech, + iterations: int(u.Iterations), + saltedPass: bytes.Clone(u.SaltedPassword), + salt: bytes.Clone(u.Salt), + } + doneu(u.Name, 0) + } + + return resp, nil +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kfake/LICENSE b/vendor/github.com/twmb/franz-go/pkg/kfake/LICENSE new file mode 100644 index 00000000000..36e18034325 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kfake/LICENSE @@ -0,0 +1,24 @@ +Copyright 2020, Travis Bischel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the library nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/twmb/franz-go/pkg/kfake/NOTES b/vendor/github.com/twmb/franz-go/pkg/kfake/NOTES new file mode 100644 index 00000000000..fd7583da67e --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kfake/NOTES @@ -0,0 +1,62 @@ +ORDER + +BASIC +x Produce +x Metadata +x CreateTopics +x InitProducerID +x ListOffsets +x Fetch +x DeleteTopics +x CreatePartitions + +GROUPS +x OffsetCommit +x OffsetFetch +x FindCoordinator +x JoinGroup +x Heartbeat +x LeaveGroup +x SyncGroup +x DescribeGroups +x ListGroups +x DeleteGroups + +MISC +x OffsetForLeaderEpoch + +SASL +x SaslHandshake +x SaslAuthenticate +x DescribeUserScramCredentials +x AlterUserScramCredentials + +LOW-PRIO +x DeleteRecords +x DescribeConfigs +x AlterConfigs +x IncrementalAlterConfigs +x OffsetDelete +x AlterReplicaLogDirs +x DescribeLogDirs + +TXNS +* AddPartitionsToTxn +* AddOffsetsToTxn +* EndTxn +* TxnOffsetCommit + +ACLS +* DescribeACLs +* CreateACLs +* DeleteACLs + +LOWER-PRIO +* DescribeProducers +* DescribeTransactions +* ListTransactions +* AlterPartitionAssignments +* ListPartitionReassignments +* DescribeClientQuotas +* AlterClientQuotas +DTOKEN: ignore diff --git a/vendor/github.com/twmb/franz-go/pkg/kfake/client_conn.go b/vendor/github.com/twmb/franz-go/pkg/kfake/client_conn.go new file mode 100644 index 00000000000..a78b574f3f6 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kfake/client_conn.go @@ -0,0 +1,182 @@ +package kfake + +import ( + "encoding/binary" + "io" + "net" + "time" + + "github.com/twmb/franz-go/pkg/kbin" + "github.com/twmb/franz-go/pkg/kmsg" +) + +type ( + clientConn struct { + c *Cluster + b *broker + conn net.Conn + respCh chan clientResp + + saslStage saslStage + s0 *scramServer0 + } + + clientReq struct { + cc *clientConn + kreq kmsg.Request + at time.Time + cid string + corr int32 + seq uint32 + } + + clientResp struct { + kresp kmsg.Response + corr int32 + err error + seq uint32 + } +) + +func (creq *clientReq) empty() bool { return creq == nil || creq.cc == nil || creq.kreq == nil } + +func (cc *clientConn) read() { + defer cc.conn.Close() + + type read struct { + body []byte + err error + } + var ( + who = cc.conn.RemoteAddr() + size = make([]byte, 4) + readCh = make(chan read, 1) + seq uint32 + ) + for { + go func() { + if _, err := io.ReadFull(cc.conn, size); err != nil { + readCh <- read{err: err} + return + } + body := make([]byte, binary.BigEndian.Uint32(size)) + _, err := io.ReadFull(cc.conn, body) + readCh <- read{body: body, err: err} + }() + + var read read + select { + case <-cc.c.die: + return + case read = <-readCh: + } + + if err := read.err; err != nil { + cc.c.cfg.logger.Logf(LogLevelDebug, "client %s disconnected from read: %v", who, err) + return + } + + var ( + body = read.body + reader = kbin.Reader{Src: body} + key = reader.Int16() + version = reader.Int16() + corr = reader.Int32() + clientID = reader.NullableString() + kreq = kmsg.RequestForKey(key) + ) + kreq.SetVersion(version) + if kreq.IsFlexible() { + kmsg.SkipTags(&reader) + } + if err := kreq.ReadFrom(reader.Src); err != nil { + cc.c.cfg.logger.Logf(LogLevelDebug, "client %s unable to parse request: %v", who, err) + return + } + + // Within Kafka, a null client ID is treated as an empty string. + var cid string + if clientID != nil { + cid = *clientID + } + + select { + case cc.c.reqCh <- &clientReq{cc, kreq, time.Now(), cid, corr, seq}: + seq++ + case <-cc.c.die: + return + } + } +} + +func (cc *clientConn) write() { + defer cc.conn.Close() + + var ( + who = cc.conn.RemoteAddr() + writeCh = make(chan error, 1) + buf []byte + seq uint32 + + // If a request is by necessity slow (join&sync), and the + // client sends another request down the same conn, we can + // actually handle them out of order because group state is + // managed independently in its own loop. To ensure + // serialization, we capture out of order responses and only + // send them once the prior requests are replied to. + // + // (this is also why there is a seq in the clientReq) + oooresp = make(map[uint32]clientResp) + ) + for { + resp, ok := oooresp[seq] + if !ok { + select { + case resp = <-cc.respCh: + if resp.seq != seq { + oooresp[resp.seq] = resp + continue + } + seq = resp.seq + 1 + case <-cc.c.die: + return + } + } else { + delete(oooresp, seq) + seq++ + } + if err := resp.err; err != nil { + cc.c.cfg.logger.Logf(LogLevelInfo, "client %s request unable to be handled: %v", who, err) + return + } + + // Size, corr, and empty tag section if flexible: 9 bytes max. + buf = append(buf[:0], 0, 0, 0, 0, 0, 0, 0, 0, 0) + buf = resp.kresp.AppendTo(buf) + + start := 0 + l := len(buf) - 4 + if !resp.kresp.IsFlexible() || resp.kresp.Key() == 18 { + l-- + start++ + } + binary.BigEndian.PutUint32(buf[start:], uint32(l)) + binary.BigEndian.PutUint32(buf[start+4:], uint32(resp.corr)) + + go func() { + _, err := cc.conn.Write(buf[start:]) + writeCh <- err + }() + + var err error + select { + case <-cc.c.die: + return + case err = <-writeCh: + } + if err != nil { + cc.c.cfg.logger.Logf(LogLevelDebug, "client %s disconnected from write: %v", who, err) + return + } + } +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kfake/cluster.go b/vendor/github.com/twmb/franz-go/pkg/kfake/cluster.go new file mode 100644 index 00000000000..3720cf61e7d --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kfake/cluster.go @@ -0,0 +1,1086 @@ +package kfake + +import ( + "crypto/tls" + "errors" + "fmt" + "math/rand" + "net" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/twmb/franz-go/pkg/kmsg" +) + +// TODO +// +// * Add raft and make the brokers independent +// +// * Support multiple replicas -- we just pass this through + +type ( + + // Cluster is a mock Kafka broker cluster. + Cluster struct { + cfg cfg + + controller *broker + bs []*broker + + coordinatorGen atomic.Uint64 + + adminCh chan func() + reqCh chan *clientReq + wakeCh chan *slept + watchFetchCh chan *watchFetch + + controlMu sync.Mutex + control map[int16]map[*controlCtx]struct{} + currentBroker *broker + currentControl *controlCtx + sleeping map[*clientConn]*bsleep + controlSleep chan sleepChs + + data data + pids pids + groups groups + sasls sasls + bcfgs map[string]*string + + die chan struct{} + dead atomic.Bool + } + + broker struct { + c *Cluster + ln net.Listener + node int32 + bsIdx int + } + + controlFn func(kmsg.Request) (kmsg.Response, error, bool) + + controlCtx struct { + key int16 + fn controlFn + keep bool + drop bool + lastReq map[*clientConn]*clientReq // used to not re-run requests that slept, see doc comments below + } + + controlResp struct { + kresp kmsg.Response + err error + handled bool + } +) + +// MustCluster is like NewCluster, but panics on error. +func MustCluster(opts ...Opt) *Cluster { + c, err := NewCluster(opts...) + if err != nil { + panic(err) + } + return c +} + +// NewCluster returns a new mocked Kafka cluster. +func NewCluster(opts ...Opt) (*Cluster, error) { + cfg := cfg{ + nbrokers: 3, + logger: new(nopLogger), + clusterID: "kfake", + defaultNumParts: 10, + + minSessionTimeout: 6 * time.Second, + maxSessionTimeout: 5 * time.Minute, + + sasls: make(map[struct{ m, u string }]string), + } + for _, opt := range opts { + opt.apply(&cfg) + } + if len(cfg.ports) > 0 { + cfg.nbrokers = len(cfg.ports) + } + + c := &Cluster{ + cfg: cfg, + + adminCh: make(chan func()), + reqCh: make(chan *clientReq, 20), + wakeCh: make(chan *slept, 10), + watchFetchCh: make(chan *watchFetch, 20), + control: make(map[int16]map[*controlCtx]struct{}), + controlSleep: make(chan sleepChs, 1), + + sleeping: make(map[*clientConn]*bsleep), + + data: data{ + id2t: make(map[uuid]string), + t2id: make(map[string]uuid), + treplicas: make(map[string]int), + tcfgs: make(map[string]map[string]*string), + }, + bcfgs: make(map[string]*string), + + die: make(chan struct{}), + } + c.data.c = c + c.groups.c = c + var err error + defer func() { + if err != nil { + c.Close() + } + }() + + for mu, p := range cfg.sasls { + switch mu.m { + case saslPlain: + if c.sasls.plain == nil { + c.sasls.plain = make(map[string]string) + } + c.sasls.plain[mu.u] = p + case saslScram256: + if c.sasls.scram256 == nil { + c.sasls.scram256 = make(map[string]scramAuth) + } + c.sasls.scram256[mu.u] = newScramAuth(saslScram256, p) + case saslScram512: + if c.sasls.scram512 == nil { + c.sasls.scram512 = make(map[string]scramAuth) + } + c.sasls.scram512[mu.u] = newScramAuth(saslScram512, p) + default: + return nil, fmt.Errorf("unknown SASL mechanism %v", mu.m) + } + } + cfg.sasls = nil + + if cfg.enableSASL && c.sasls.empty() { + c.sasls.scram256 = map[string]scramAuth{ + "admin": newScramAuth(saslScram256, "admin"), + } + } + + for i := 0; i < cfg.nbrokers; i++ { + var port int + if len(cfg.ports) > 0 { + port = cfg.ports[i] + } + var ln net.Listener + ln, err = newListener(port, c.cfg.tls) + if err != nil { + return nil, err + } + b := &broker{ + c: c, + ln: ln, + node: int32(i), + bsIdx: len(c.bs), + } + c.bs = append(c.bs, b) + defer func() { go b.listen() }() + } + c.controller = c.bs[len(c.bs)-1] + + seedTopics := make(map[string]int32) + for _, sts := range cfg.seedTopics { + p := sts.p + if p < 1 { + p = int32(cfg.defaultNumParts) + } + for _, t := range sts.ts { + seedTopics[t] = p + } + } + for t, p := range seedTopics { + c.data.mkt(t, int(p), -1, nil) + } + + go c.run() + + return c, nil +} + +// ListenAddrs returns the hostports that the cluster is listening on. +func (c *Cluster) ListenAddrs() []string { + var addrs []string + c.admin(func() { + for _, b := range c.bs { + addrs = append(addrs, b.ln.Addr().String()) + } + }) + return addrs +} + +// Close shuts down the cluster. +func (c *Cluster) Close() { + if c.dead.Swap(true) { + return + } + close(c.die) + for _, b := range c.bs { + b.ln.Close() + } +} + +func newListener(port int, tc *tls.Config) (net.Listener, error) { + l, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%d", port)) + if err != nil { + return nil, err + } + if tc != nil { + l = tls.NewListener(l, tc) + } + return l, nil +} + +func (b *broker) listen() { + defer b.ln.Close() + for { + conn, err := b.ln.Accept() + if err != nil { + return + } + + cc := &clientConn{ + c: b.c, + b: b, + conn: conn, + respCh: make(chan clientResp, 2), + } + go cc.read() + go cc.write() + } +} + +func (c *Cluster) run() { +outer: + for { + var ( + creq *clientReq + w *watchFetch + s *slept + kreq kmsg.Request + kresp kmsg.Response + err error + handled bool + ) + + select { + case <-c.die: + return + + case admin := <-c.adminCh: + admin() + continue + + case creq = <-c.reqCh: + if c.cfg.sleepOutOfOrder { + break + } + // If we have any sleeping request on this node, + // we enqueue the new live request to the end and + // wait for the sleeping request to finish. + bs := c.sleeping[creq.cc] + if bs.enqueue(&slept{ + creq: creq, + waiting: true, + }) { + continue + } + + case s = <-c.wakeCh: + // On wakeup, we know we are handling a control + // function that was slept, or a request that was + // waiting for a control function to finish sleeping. + creq = s.creq + if s.waiting { + break + } + + // We continue a previously sleeping request, and + // handle results similar to tryControl. + // + // Control flow is weird here, but is described more + // fully in the finish/resleep/etc methods. + c.continueSleptControl(s) + inner: + for { + select { + case admin := <-c.adminCh: + admin() + continue inner + case res := <-s.res: + c.finishSleptControl(s) + cctx := s.cctx + s = nil + kresp, err, handled = res.kresp, res.err, res.handled + c.maybePopControl(handled, cctx) + if handled { + goto afterControl + } + break inner + case sleepChs := <-c.controlSleep: + c.resleepSleptControl(s, sleepChs) + continue outer + } + } + + case w = <-c.watchFetchCh: + if w.cleaned { + continue // already cleaned up, this is an extraneous timer fire + } + w.cleanup(c) + creq = w.creq + } + + kresp, err, handled = c.tryControl(creq) + if handled { + goto afterControl + } + + if c.cfg.enableSASL { + if allow := c.handleSASL(creq); !allow { + err = errors.New("not allowed given SASL state") + goto afterControl + } + } + + kreq = creq.kreq + switch k := kmsg.Key(kreq.Key()); k { + case kmsg.Produce: + kresp, err = c.handleProduce(creq.cc.b, kreq) + case kmsg.Fetch: + kresp, err = c.handleFetch(creq, w) + case kmsg.ListOffsets: + kresp, err = c.handleListOffsets(creq.cc.b, kreq) + case kmsg.Metadata: + kresp, err = c.handleMetadata(kreq) + case kmsg.OffsetCommit: + kresp, err = c.handleOffsetCommit(creq) + case kmsg.OffsetFetch: + kresp, err = c.handleOffsetFetch(creq) + case kmsg.FindCoordinator: + kresp, err = c.handleFindCoordinator(kreq) + case kmsg.JoinGroup: + kresp, err = c.handleJoinGroup(creq) + case kmsg.Heartbeat: + kresp, err = c.handleHeartbeat(creq) + case kmsg.LeaveGroup: + kresp, err = c.handleLeaveGroup(creq) + case kmsg.SyncGroup: + kresp, err = c.handleSyncGroup(creq) + case kmsg.DescribeGroups: + kresp, err = c.handleDescribeGroups(creq) + case kmsg.ListGroups: + kresp, err = c.handleListGroups(creq) + case kmsg.SASLHandshake: + kresp, err = c.handleSASLHandshake(creq) + case kmsg.ApiVersions: + kresp, err = c.handleApiVersions(kreq) + case kmsg.CreateTopics: + kresp, err = c.handleCreateTopics(creq.cc.b, kreq) + case kmsg.DeleteTopics: + kresp, err = c.handleDeleteTopics(creq.cc.b, kreq) + case kmsg.DeleteRecords: + kresp, err = c.handleDeleteRecords(creq.cc.b, kreq) + case kmsg.InitProducerID: + kresp, err = c.handleInitProducerID(kreq) + case kmsg.OffsetForLeaderEpoch: + kresp, err = c.handleOffsetForLeaderEpoch(creq.cc.b, kreq) + case kmsg.DescribeConfigs: + kresp, err = c.handleDescribeConfigs(creq.cc.b, kreq) + case kmsg.AlterConfigs: + kresp, err = c.handleAlterConfigs(creq.cc.b, kreq) + case kmsg.AlterReplicaLogDirs: + kresp, err = c.handleAlterReplicaLogDirs(creq.cc.b, kreq) + case kmsg.DescribeLogDirs: + kresp, err = c.handleDescribeLogDirs(creq.cc.b, kreq) + case kmsg.SASLAuthenticate: + kresp, err = c.handleSASLAuthenticate(creq) + case kmsg.CreatePartitions: + kresp, err = c.handleCreatePartitions(creq.cc.b, kreq) + case kmsg.DeleteGroups: + kresp, err = c.handleDeleteGroups(creq) + case kmsg.IncrementalAlterConfigs: + kresp, err = c.handleIncrementalAlterConfigs(creq.cc.b, kreq) + case kmsg.OffsetDelete: + kresp, err = c.handleOffsetDelete(creq) + case kmsg.DescribeUserSCRAMCredentials: + kresp, err = c.handleDescribeUserSCRAMCredentials(kreq) + case kmsg.AlterUserSCRAMCredentials: + kresp, err = c.handleAlterUserSCRAMCredentials(creq.cc.b, kreq) + default: + err = fmt.Errorf("unhandled key %v", k) + } + + afterControl: + // If s is non-nil, this is either a previously slept control + // that finished but was not handled, or a previously slept + // waiting request. In either case, we need to signal to the + // sleep dequeue loop to continue. + if s != nil { + s.continueDequeue <- struct{}{} + } + if kresp == nil && err == nil { // produce request with no acks, or otherwise hijacked request (group, sleep) + continue + } + + select { + case creq.cc.respCh <- clientResp{kresp: kresp, corr: creq.corr, err: err, seq: creq.seq}: + case <-c.die: + return + } + } +} + +// Control is a function to call on any client request the cluster handles. +// +// If the control function returns true, then either the response is written +// back to the client or, if there the control function returns an error, the +// client connection is closed. If both returns are nil, then the cluster will +// loop continuing to read from the client and the client will likely have a +// read timeout at some point. +// +// Controlling a request drops the control function from the cluster, meaning +// that a control function can only control *one* request. To keep the control +// function handling more requests, you can call KeepControl within your +// control function. Alternatively, if you want to just run some logic in your +// control function but then have the cluster handle the request as normal, +// you can call DropControl to drop a control function that was not handled. +// +// It is safe to add new control functions within a control function. +// +// Control functions are run serially unless you use SleepControl, multiple +// control functions are "in progress", and you run Cluster.Close. Closing a +// Cluster awakens all sleeping control functions. +func (c *Cluster) Control(fn func(kmsg.Request) (kmsg.Response, error, bool)) { + c.ControlKey(-1, fn) +} + +// Control is a function to call on a specific request key that the cluster +// handles. +// +// If the control function returns true, then either the response is written +// back to the client or, if there the control function returns an error, the +// client connection is closed. If both returns are nil, then the cluster will +// loop continuing to read from the client and the client will likely have a +// read timeout at some point. +// +// Controlling a request drops the control function from the cluster, meaning +// that a control function can only control *one* request. To keep the control +// function handling more requests, you can call KeepControl within your +// control function. Alternatively, if you want to just run some logic in your +// control function but then have the cluster handle the request as normal, +// you can call DropControl to drop a control function that was not handled. +// +// It is safe to add new control functions within a control function. +// +// Control functions are run serially unless you use SleepControl, multiple +// control functions are "in progress", and you run Cluster.Close. Closing a +// Cluster awakens all sleeping control functions. +func (c *Cluster) ControlKey(key int16, fn func(kmsg.Request) (kmsg.Response, error, bool)) { + c.controlMu.Lock() + defer c.controlMu.Unlock() + m := c.control[key] + if m == nil { + m = make(map[*controlCtx]struct{}) + c.control[key] = m + } + m[&controlCtx{ + key: key, + fn: fn, + lastReq: make(map[*clientConn]*clientReq), + }] = struct{}{} +} + +// KeepControl marks the currently running control function to be kept even if +// you handle the request and return true. This can be used to continuously +// control requests without needing to re-add control functions manually. +func (c *Cluster) KeepControl() { + c.controlMu.Lock() + defer c.controlMu.Unlock() + if c.currentControl != nil { + c.currentControl.keep = true + } +} + +// DropControl allows you to drop the current control function. This takes +// precedence over KeepControl. The use of this function is you can run custom +// control logic *once*, drop the control function, and return that the +// function was not handled -- thus allowing other control functions to run, or +// allowing the kfake cluster to process the request as normal. +func (c *Cluster) DropControl() { + c.controlMu.Lock() + defer c.controlMu.Unlock() + if c.currentControl != nil { + c.currentControl.drop = true + } +} + +// SleepControl sleeps the current control function until wakeup returns. This +// yields to run any other connection. +// +// Note that per protocol, requests on the same connection must be replied to +// in order. Many clients write multiple requests to the same connection, so +// if you sleep until a different request runs, you may sleep forever -- you +// must know the semantics of your client to know whether requests run on +// different connections (or, ensure you are writing to different brokers). +// +// For example, franz-go uses a dedicated connection for: +// - produce requests +// - fetch requests +// - join&sync requests +// - requests with a Timeout field +// - all other request +// +// So, for franz-go, there are up to five separate connections depending +// on what you are doing. +// +// You can run SleepControl multiple times in the same control function. If you +// sleep a request you are controlling, and another request of the same key +// comes in, it will run the same control function and may also sleep (i.e., +// you must have logic if you want to avoid sleeping on the same request). +func (c *Cluster) SleepControl(wakeup func()) { + c.controlMu.Lock() + if c.currentControl == nil { + c.controlMu.Unlock() + return + } + c.controlMu.Unlock() + + sleepChs := sleepChs{ + clientWait: make(chan struct{}, 1), + clientCont: make(chan struct{}, 1), + } + go func() { + wakeup() + sleepChs.clientWait <- struct{}{} + }() + + c.controlSleep <- sleepChs + select { + case <-sleepChs.clientCont: + case <-c.die: + } +} + +// CurrentNode is solely valid from within a control function; it returns +// the broker id that the request was received by. +// If there's no request currently inflight, this returns -1. +func (c *Cluster) CurrentNode() int32 { + c.controlMu.Lock() + defer c.controlMu.Unlock() + if b := c.currentBroker; b != nil { + return b.node + } + return -1 +} + +func (c *Cluster) tryControl(creq *clientReq) (kresp kmsg.Response, err error, handled bool) { + c.controlMu.Lock() + defer c.controlMu.Unlock() + if len(c.control) == 0 { + return nil, nil, false + } + kresp, err, handled = c.tryControlKey(creq.kreq.Key(), creq) + if !handled { + kresp, err, handled = c.tryControlKey(-1, creq) + } + return kresp, err, handled +} + +func (c *Cluster) tryControlKey(key int16, creq *clientReq) (kmsg.Response, error, bool) { + for cctx := range c.control[key] { + if cctx.lastReq[creq.cc] == creq { + continue + } + cctx.lastReq[creq.cc] = creq + res := c.runControl(cctx, creq) + for { + select { + case admin := <-c.adminCh: + admin() + continue + case res := <-res: + c.maybePopControl(res.handled, cctx) + return res.kresp, res.err, res.handled + case sleepChs := <-c.controlSleep: + c.beginSleptControl(&slept{ + cctx: cctx, + sleepChs: sleepChs, + res: res, + creq: creq, + }) + return nil, nil, true + } + } + } + return nil, nil, false +} + +func (c *Cluster) runControl(cctx *controlCtx, creq *clientReq) chan controlResp { + res := make(chan controlResp, 1) + c.currentBroker = creq.cc.b + c.currentControl = cctx + // We unlock before entering a control function so that the control + // function can modify / add more control. We re-lock when exiting the + // control function. This does pose some weird control flow issues + // w.r.t. sleeping requests. Here, we have to re-lock before sending + // down res, otherwise we risk unlocking an unlocked mu in + // finishSleepControl. + c.controlMu.Unlock() + go func() { + kresp, err, handled := cctx.fn(creq.kreq) + c.controlMu.Lock() + c.currentControl = nil + c.currentBroker = nil + res <- controlResp{kresp, err, handled} + }() + return res +} + +func (c *Cluster) beginSleptControl(s *slept) { + // Control flow gets really weird here. We unlocked when entering the + // control function, so we have to re-lock now so that tryControl can + // unlock us safely. + bs := c.sleeping[s.creq.cc] + if bs == nil { + bs = &bsleep{ + c: c, + set: make(map[*slept]struct{}), + setWake: make(chan *slept, 1), + } + c.sleeping[s.creq.cc] = bs + } + bs.enqueue(s) + c.controlMu.Lock() + c.currentControl = nil + c.currentBroker = nil +} + +func (c *Cluster) continueSleptControl(s *slept) { + // When continuing a slept control, we are in the main run loop and are + // not currently under the control mu. We need to re-set the current + // broker and current control before resuming. + c.controlMu.Lock() + c.currentBroker = s.creq.cc.b + c.currentControl = s.cctx + c.controlMu.Unlock() + s.sleepChs.clientCont <- struct{}{} +} + +func (c *Cluster) finishSleptControl(s *slept) { + // When finishing a slept control, the control function exited and + // grabbed the control mu. We clear the control, unlock, and allow the + // slept control to be dequeued. + c.currentControl = nil + c.currentBroker = nil + c.controlMu.Unlock() + s.continueDequeue <- struct{}{} +} + +func (c *Cluster) resleepSleptControl(s *slept, sleepChs sleepChs) { + // A control function previously slept and is now again sleeping. We + // need to clear the control broker / etc, update the sleep channels, + // and allow the sleep dequeueing to continue. The control function + // will not be deqeueued in the loop because we updated sleepChs with + // a non-nil clientWait. + c.controlMu.Lock() + c.currentBroker = nil + c.currentControl = nil + c.controlMu.Unlock() + s.sleepChs = sleepChs + s.continueDequeue <- struct{}{} + // For OOO requests, we need to manually trigger a goroutine to + // watch for the sleep to end. + s.bs.maybeWaitOOOWake(s) +} + +func (c *Cluster) maybePopControl(handled bool, cctx *controlCtx) { + if handled && !cctx.keep || cctx.drop { + delete(c.control[cctx.key], cctx) + } +} + +// bsleep manages sleeping requests on a connection to a broker, or +// non-sleeping requests that are waiting for sleeping requests to finish. +type bsleep struct { + c *Cluster + mu sync.Mutex + queue []*slept + set map[*slept]struct{} + setWake chan *slept +} + +type slept struct { + bs *bsleep + cctx *controlCtx + sleepChs sleepChs + res <-chan controlResp + creq *clientReq + waiting bool + + continueDequeue chan struct{} +} + +type sleepChs struct { + clientWait chan struct{} + clientCont chan struct{} +} + +// enqueue has a few potential behaviors. +// +// (1) If s is waiting, this is a new request enqueueing to the back of an +// existing queue, where we are waiting for the head request to finish +// sleeping. Easy case. +// +// (2) If s is not waiting, this is a sleeping request. If the queue is empty, +// this is the first sleeping request on a node. We enqueue and start our wait +// goroutine. Easy. +// +// (3) If s is not waiting, but our queue is non-empty, this must be from a +// convoluted scenario: +// +// (a) the user has SleepOutOfOrder configured, +// (b) or, there was a request in front of us that slept, we were waiting, +// and now we ourselves are sleeping +// (c) or, we are sleeping for the second time in a single control +func (bs *bsleep) enqueue(s *slept) bool { + if bs == nil { + return false // Do not enqueue, nothing sleeping + } + s.continueDequeue = make(chan struct{}, 1) + s.bs = bs + bs.mu.Lock() + defer bs.mu.Unlock() + if s.waiting { + if bs.c.cfg.sleepOutOfOrder { + panic("enqueueing a waiting request even though we are sleeping out of order") + } + if !bs.empty() { + bs.keep(s) // Case (1) + return true + } + return false // We do not enqueue, do not wait: nothing sleeping ahead of us + } + if bs.empty() { + bs.keep(s) + go bs.wait() // Case (2) + return true + } + var q0 *slept + if !bs.c.cfg.sleepOutOfOrder { + q0 = bs.queue[0] // Case (3b) or (3c) -- just update values below + } else { + // Case (3a), out of order sleep: we need to check the entire + // queue to see if this request was already sleeping and, if + // so, update the values. If it was not already sleeping, we + // "keep" the new sleeping item. + bs.keep(s) + return true + } + if q0.creq != s.creq { + panic("internal error: sleeping request not head request") + } + // We do not update continueDequeue because it is actively being read, + // we just reuse the old value. + q0.cctx = s.cctx + q0.sleepChs = s.sleepChs + q0.res = s.res + q0.waiting = s.waiting + return true +} + +// keep stores a sleeping request to be managed. For out of order control, the +// log is a bit more complicated and we need to watch for the control sleep +// finishing here, and forward the "I'm done sleeping" notification to waitSet. +func (bs *bsleep) keep(s *slept) { + if !bs.c.cfg.sleepOutOfOrder { + bs.queue = append(bs.queue, s) + return + } + bs.set[s] = struct{}{} + bs.maybeWaitOOOWake(s) +} + +func (bs *bsleep) maybeWaitOOOWake(s *slept) { + if !bs.c.cfg.sleepOutOfOrder { + return + } + go func() { + select { + case <-bs.c.die: + case <-s.sleepChs.clientWait: + select { + case <-bs.c.die: + case bs.setWake <- s: + } + } + }() +} + +func (bs *bsleep) empty() bool { + return len(bs.queue) == 0 && len(bs.set) == 0 +} + +func (bs *bsleep) wait() { + if bs.c.cfg.sleepOutOfOrder { + bs.waitSet() + } else { + bs.waitQueue() + } +} + +// For out of order control, all control functions run concurrently, serially. +// Whenever they wake up, they send themselves down setWake. waitSet manages +// handling the wake up and interacting with the serial manage goroutine to +// run everything properly. +func (bs *bsleep) waitSet() { + for { + bs.mu.Lock() + if len(bs.set) == 0 { + bs.mu.Unlock() + return + } + bs.mu.Unlock() + + // Wait for a control function to awaken. + var q *slept + select { + case <-bs.c.die: + return + case q = <-bs.setWake: + q.sleepChs.clientWait = nil + } + + // Now, schedule ourselves with the run loop. + select { + case <-bs.c.die: + return + case bs.c.wakeCh <- q: + } + + // Wait for this control function to finish its loop in the run + // function. Once it does, if clientWait is non-nil, the + // control function went back to sleep. If it is nil, the + // control function is done and we remove this from tracking. + select { + case <-bs.c.die: + return + case <-q.continueDequeue: + } + if q.sleepChs.clientWait == nil { + bs.mu.Lock() + delete(bs.set, q) + bs.mu.Unlock() + } + } +} + +// For in-order control functions, the concept is slightly simpler but the +// logic flow is the same. We wait for the head control function to wake up, +// try to run it, and then wait for it to finish. The logic of this function is +// the same as waitSet, minus the middle part where we wait for something to +// wake up. +func (bs *bsleep) waitQueue() { + for { + bs.mu.Lock() + if len(bs.queue) == 0 { + bs.mu.Unlock() + return + } + q0 := bs.queue[0] + bs.mu.Unlock() + + if q0.sleepChs.clientWait != nil { + select { + case <-bs.c.die: + return + case <-q0.sleepChs.clientWait: + q0.sleepChs.clientWait = nil + } + } + + select { + case <-bs.c.die: + return + case bs.c.wakeCh <- q0: + } + + select { + case <-bs.c.die: + return + case <-q0.continueDequeue: + } + if q0.sleepChs.clientWait == nil { + bs.mu.Lock() + bs.queue = bs.queue[1:] + bs.mu.Unlock() + } + } +} + +// Various administrative requests can be passed into the cluster to simulate +// real-world operations. These are performed synchronously in the goroutine +// that handles client requests. + +func (c *Cluster) admin(fn func()) { + ofn := fn + wait := make(chan struct{}) + fn = func() { ofn(); close(wait) } + c.adminCh <- fn + <-wait +} + +// MoveTopicPartition simulates the rebalancing of a partition to an alternative +// broker. This returns an error if the topic, partition, or node does not exit. +func (c *Cluster) MoveTopicPartition(topic string, partition int32, nodeID int32) error { + var err error + c.admin(func() { + var br *broker + for _, b := range c.bs { + if b.node == nodeID { + br = b + break + } + } + if br == nil { + err = fmt.Errorf("node %d not found", nodeID) + return + } + pd, ok := c.data.tps.getp(topic, partition) + if !ok { + err = errors.New("topic/partition not found") + return + } + pd.leader = br + pd.epoch++ + }) + return err +} + +// CoordinatorFor returns the node ID of the group or transaction coordinator +// for the given key. +func (c *Cluster) CoordinatorFor(key string) int32 { + var n int32 + c.admin(func() { + l := len(c.bs) + if l == 0 { + n = -1 + return + } + n = c.coordinator(key).node + }) + return n +} + +// RehashCoordinators simulates group and transacational ID coordinators moving +// around. All group and transactional IDs are rekeyed. This forces clients to +// reload coordinators. +func (c *Cluster) RehashCoordinators() { + c.coordinatorGen.Add(1) +} + +// AddNode adds a node to the cluster. If nodeID is -1, the next node ID is +// used. If port is 0 or negative, a random port is chosen. This returns the +// added node ID and the port used, or an error if the node already exists or +// the port cannot be listened to. +func (c *Cluster) AddNode(nodeID int32, port int) (int32, int, error) { + var err error + c.admin(func() { + if nodeID >= 0 { + for _, b := range c.bs { + if b.node == nodeID { + err = fmt.Errorf("node %d already exists", nodeID) + return + } + } + } else if len(c.bs) > 0 { + // We go one higher than the max current node ID. We + // need to search all nodes because a person may have + // added and removed a bunch, with manual ID overrides. + nodeID = c.bs[0].node + for _, b := range c.bs[1:] { + if b.node > nodeID { + nodeID = b.node + } + } + nodeID++ + } else { + nodeID = 0 + } + if port < 0 { + port = 0 + } + var ln net.Listener + if ln, err = newListener(port, c.cfg.tls); err != nil { + return + } + _, strPort, _ := net.SplitHostPort(ln.Addr().String()) + port, _ = strconv.Atoi(strPort) + b := &broker{ + c: c, + ln: ln, + node: nodeID, + bsIdx: len(c.bs), + } + c.bs = append(c.bs, b) + c.cfg.nbrokers++ + c.shufflePartitionsLocked() + go b.listen() + }) + return nodeID, port, err +} + +// RemoveNode removes a ndoe from the cluster. This returns an error if the +// node does not exist. +func (c *Cluster) RemoveNode(nodeID int32) error { + var err error + c.admin(func() { + for i, b := range c.bs { + if b.node == nodeID { + if len(c.bs) == 1 { + err = errors.New("cannot remove all brokers") + return + } + b.ln.Close() + c.cfg.nbrokers-- + c.bs[i] = c.bs[len(c.bs)-1] + c.bs[i].bsIdx = i + c.bs = c.bs[:len(c.bs)-1] + c.shufflePartitionsLocked() + return + } + } + err = fmt.Errorf("node %d not found", nodeID) + }) + return err +} + +// ShufflePartitionLeaders simulates a leader election for all partitions: all +// partitions have a randomly selected new leader and their internal epochs are +// bumped. +func (c *Cluster) ShufflePartitionLeaders() { + c.admin(func() { + c.shufflePartitionsLocked() + }) +} + +func (c *Cluster) shufflePartitionsLocked() { + c.data.tps.each(func(_ string, _ int32, p *partData) { + var leader *broker + if len(c.bs) == 0 { + leader = c.noLeader() + } else { + leader = c.bs[rand.Intn(len(c.bs))] + } + p.leader = leader + p.epoch++ + }) +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kfake/config.go b/vendor/github.com/twmb/franz-go/pkg/kfake/config.go new file mode 100644 index 00000000000..75b34fb21f6 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kfake/config.go @@ -0,0 +1,126 @@ +package kfake + +import ( + "crypto/tls" + "time" +) + +// Opt is an option to configure a client. +type Opt interface { + apply(*cfg) +} + +type opt struct{ fn func(*cfg) } + +func (opt opt) apply(cfg *cfg) { opt.fn(cfg) } + +type seedTopics struct { + p int32 + ts []string +} + +type cfg struct { + nbrokers int + ports []int + logger Logger + clusterID string + allowAutoTopic bool + defaultNumParts int + seedTopics []seedTopics + + minSessionTimeout time.Duration + maxSessionTimeout time.Duration + + enableSASL bool + sasls map[struct{ m, u string }]string // cleared after client initialization + tls *tls.Config + + sleepOutOfOrder bool +} + +// NumBrokers sets the number of brokers to start in the fake cluster. +func NumBrokers(n int) Opt { + return opt{func(cfg *cfg) { cfg.nbrokers = n }} +} + +// Ports sets the ports to listen on, overriding randomly choosing NumBrokers +// amount of ports. +func Ports(ports ...int) Opt { + return opt{func(cfg *cfg) { cfg.ports = ports }} +} + +// WithLogger sets the logger to use. +func WithLogger(logger Logger) Opt { + return opt{func(cfg *cfg) { cfg.logger = logger }} +} + +// ClusterID sets the cluster ID to return in metadata responses. +func ClusterID(clusterID string) Opt { + return opt{func(cfg *cfg) { cfg.clusterID = clusterID }} +} + +// AllowAutoTopicCreation allows metadata requests to create topics if the +// metadata request has its AllowAutoTopicCreation field set to true. +func AllowAutoTopicCreation() Opt { + return opt{func(cfg *cfg) { cfg.allowAutoTopic = true }} +} + +// DefaultNumPartitions sets the number of partitions to create by default for +// auto created topics / CreateTopics with -1 partitions, overriding the +// default of 10. +func DefaultNumPartitions(n int) Opt { + return opt{func(cfg *cfg) { cfg.defaultNumParts = n }} +} + +// GroupMinSessionTimeout sets the cluster's minimum session timeout allowed +// for groups, overriding the default 6 seconds. +func GroupMinSessionTimeout(d time.Duration) Opt { + return opt{func(cfg *cfg) { cfg.minSessionTimeout = d }} +} + +// GroupMaxSessionTimeout sets the cluster's maximum session timeout allowed +// for groups, overriding the default 5 minutes. +func GroupMaxSessionTimeout(d time.Duration) Opt { + return opt{func(cfg *cfg) { cfg.maxSessionTimeout = d }} +} + +// EnableSASL enables SASL authentication for the cluster. If you do not +// configure a bootstrap user / pass, the default superuser is "admin" / +// "admin" with the SCRAM-SHA-256 SASL mechanisms. +func EnableSASL() Opt { + return opt{func(cfg *cfg) { cfg.enableSASL = true }} +} + +// Superuser seeds the cluster with a superuser. The method must be either +// PLAIN, SCRAM-SHA-256, or SCRAM-SHA-512. +// Note that PLAIN superusers cannot be deleted. +// SCRAM superusers can be modified with AlterUserScramCredentials. +// If you delete all SASL users, the kfake cluster will be unusable. +func Superuser(method, user, pass string) Opt { + return opt{func(cfg *cfg) { cfg.sasls[struct{ m, u string }{method, user}] = pass }} +} + +// TLS enables TLS for the cluster, using the provided TLS config for +// listening. +func TLS(c *tls.Config) Opt { + return opt{func(cfg *cfg) { cfg.tls = c }} +} + +// SeedTopics provides topics to create by default in the cluster. Each topic +// will use the given partitions and use the default internal replication +// factor. If you use a non-positive number for partitions, [DefaultNumPartitions] +// is used. This option can be provided multiple times if you want to seed +// topics with different partition counts. If a topic is provided in multiple +// options, the last specification wins. +func SeedTopics(partitions int32, ts ...string) Opt { + return opt{func(cfg *cfg) { cfg.seedTopics = append(cfg.seedTopics, seedTopics{partitions, ts}) }} +} + +// SleepOutOfOrder allows functions to be handled out of order when control +// functions are sleeping. The functions are be handled internally out of +// order, but responses still wait for the sleeping requests to finish. This +// can be used to set up complicated chains of control where functions only +// advance when you know another request is actively being handled. +func SleepOutOfOrder() Opt { + return opt{func(cfg *cfg) { cfg.sleepOutOfOrder = true }} +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kfake/data.go b/vendor/github.com/twmb/franz-go/pkg/kfake/data.go new file mode 100644 index 00000000000..566d51b6956 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kfake/data.go @@ -0,0 +1,345 @@ +package kfake + +import ( + "crypto/sha256" + "math/rand" + "sort" + "strconv" + "time" + + "github.com/twmb/franz-go/pkg/kmsg" +) + +// TODO +// +// * Write to disk, if configured. +// * When transactional, wait to send out data until txn committed or aborted. + +var noID uuid + +type ( + uuid [16]byte + + data struct { + c *Cluster + tps tps[partData] + + id2t map[uuid]string // topic IDs => topic name + t2id map[string]uuid // topic name => topic IDs + treplicas map[string]int // topic name => # replicas + tcfgs map[string]map[string]*string // topic name => config name => config value + } + + partData struct { + batches []partBatch + dir string + + highWatermark int64 + lastStableOffset int64 + logStartOffset int64 + epoch int32 // current epoch + maxTimestamp int64 // current max timestamp in all batches + nbytes int64 + + // abortedTxns + rf int8 + leader *broker + + watch map[*watchFetch]struct{} + + createdAt time.Time + } + + partBatch struct { + kmsg.RecordBatch + nbytes int + epoch int32 // epoch when appended + + // For list offsets, we may need to return the first offset + // after a given requested timestamp. Client provided + // timestamps gan go forwards and backwards. We answer list + // offsets with a binary search: even if this batch has a small + // timestamp, this is produced _after_ a potentially higher + // timestamp, so it is after it in the list offset response. + // + // When we drop the earlier timestamp, we update all following + // firstMaxTimestamps that match the dropped timestamp. + maxEarlierTimestamp int64 + } +) + +func (d *data) mkt(t string, nparts int, nreplicas int, configs map[string]*string) { + if d.tps != nil { + if _, exists := d.tps[t]; exists { + panic("should have checked existence already") + } + } + var id uuid + for { + sha := sha256.Sum256([]byte(strconv.Itoa(int(time.Now().UnixNano())))) + copy(id[:], sha[:]) + if _, exists := d.id2t[id]; !exists { + break + } + } + + if nparts < 0 { + nparts = d.c.cfg.defaultNumParts + } + if nreplicas < 0 { + nreplicas = 3 // cluster default + } + d.id2t[id] = t + d.t2id[t] = id + d.treplicas[t] = nreplicas + if configs != nil { + d.tcfgs[t] = configs + } + for i := 0; i < nparts; i++ { + d.tps.mkp(t, int32(i), d.c.newPartData) + } +} + +func (c *Cluster) noLeader() *broker { + return &broker{ + c: c, + node: -1, + } +} + +func (c *Cluster) newPartData() *partData { + return &partData{ + dir: defLogDir, + leader: c.bs[rand.Intn(len(c.bs))], + watch: make(map[*watchFetch]struct{}), + createdAt: time.Now(), + } +} + +func (pd *partData) pushBatch(nbytes int, b kmsg.RecordBatch) { + maxEarlierTimestamp := b.FirstTimestamp + if maxEarlierTimestamp < pd.maxTimestamp { + maxEarlierTimestamp = pd.maxTimestamp + } else { + pd.maxTimestamp = maxEarlierTimestamp + } + b.FirstOffset = pd.highWatermark + b.PartitionLeaderEpoch = pd.epoch + pd.batches = append(pd.batches, partBatch{b, nbytes, pd.epoch, maxEarlierTimestamp}) + pd.highWatermark += int64(b.NumRecords) + pd.lastStableOffset += int64(b.NumRecords) // TODO + pd.nbytes += int64(nbytes) + for w := range pd.watch { + w.push(nbytes) + } +} + +func (pd *partData) searchOffset(o int64) (index int, found bool, atEnd bool) { + if o < pd.logStartOffset || o > pd.highWatermark { + return 0, false, false + } + if len(pd.batches) == 0 { + if o == 0 { + return 0, false, true + } + } else { + lastBatch := pd.batches[len(pd.batches)-1] + if end := lastBatch.FirstOffset + int64(lastBatch.LastOffsetDelta) + 1; end == o { + return 0, false, true + } + } + + index, found = sort.Find(len(pd.batches), func(idx int) int { + b := &pd.batches[idx] + if o < b.FirstOffset { + return -1 + } + if o >= b.FirstOffset+int64(b.LastOffsetDelta)+1 { + return 1 + } + return 0 + }) + return index, found, false +} + +func (pd *partData) trimLeft() { + for len(pd.batches) > 0 { + b0 := pd.batches[0] + finRec := b0.FirstOffset + int64(b0.LastOffsetDelta) + if finRec >= pd.logStartOffset { + return + } + pd.batches = pd.batches[1:] + pd.nbytes -= int64(b0.nbytes) + } +} + +///////////// +// CONFIGS // +///////////// + +// TODO support modifying config values changing cluster behavior + +// brokerConfigs calls fn for all: +// - static broker configs (read only) +// - default configs +// - dynamic broker configs +func (c *Cluster) brokerConfigs(node int32, fn func(k string, v *string, src kmsg.ConfigSource, sensitive bool)) { + if node >= 0 { + for _, b := range c.bs { + if b.node == node { + id := strconv.Itoa(int(node)) + fn("broker.id", &id, kmsg.ConfigSourceStaticBrokerConfig, false) + break + } + } + } + for _, c := range []struct { + k string + v string + sens bool + }{ + {k: "broker.rack", v: "krack"}, + {k: "sasl.enabled.mechanisms", v: "PLAIN,SCRAM-SHA-256,SCRAM-SHA-512"}, + {k: "super.users", sens: true}, + } { + v := c.v + fn(c.k, &v, kmsg.ConfigSourceStaticBrokerConfig, c.sens) + } + + for k, v := range configDefaults { + if _, ok := validBrokerConfigs[k]; ok { + v := v + fn(k, &v, kmsg.ConfigSourceDefaultConfig, false) + } + } + + for k, v := range c.bcfgs { + fn(k, v, kmsg.ConfigSourceDynamicBrokerConfig, false) + } +} + +// configs calls fn for all +// - static broker configs (read only) +// - default configs +// - dynamic broker configs +// - dynamic topic configs +// +// This differs from brokerConfigs by also including dynamic topic configs. +func (d *data) configs(t string, fn func(k string, v *string, src kmsg.ConfigSource, sensitive bool)) { + for k, v := range configDefaults { + if _, ok := validTopicConfigs[k]; ok { + v := v + fn(k, &v, kmsg.ConfigSourceDefaultConfig, false) + } + } + for k, v := range d.c.bcfgs { + if topicEquiv, ok := validBrokerConfigs[k]; ok && topicEquiv != "" { + fn(k, v, kmsg.ConfigSourceDynamicBrokerConfig, false) + } + } + for k, v := range d.tcfgs[t] { + fn(k, v, kmsg.ConfigSourceDynamicTopicConfig, false) + } +} + +// Unlike Kafka, we validate the value before allowing it to be set. +func (c *Cluster) setBrokerConfig(k string, v *string, dry bool) bool { + if dry { + return true + } + c.bcfgs[k] = v + return true +} + +func (d *data) setTopicConfig(t string, k string, v *string, dry bool) bool { + if dry { + return true + } + if _, ok := d.tcfgs[t]; !ok { + d.tcfgs[t] = make(map[string]*string) + } + d.tcfgs[t][k] = v + return true +} + +// All valid topic configs we support, as well as the equivalent broker +// config if there is one. +var validTopicConfigs = map[string]string{ + "cleanup.policy": "", + "compression.type": "compression.type", + "max.message.bytes": "log.message.max.bytes", + "message.timestamp.type": "log.message.timestamp.type", + "min.insync.replicas": "min.insync.replicas", + "retention.bytes": "log.retention.bytes", + "retention.ms": "log.retention.ms", +} + +// All valid broker configs we support, as well as their equivalent +// topic config if there is one. +var validBrokerConfigs = map[string]string{ + "broker.id": "", + "broker.rack": "", + "compression.type": "compression.type", + "default.replication.factor": "", + "fetch.max.bytes": "", + "log.dir": "", + "log.message.timestamp.type": "message.timestamp.type", + "log.retention.bytes": "retention.bytes", + "log.retention.ms": "retention.ms", + "message.max.bytes": "max.message.bytes", + "min.insync.replicas": "min.insync.replicas", + "sasl.enabled.mechanisms": "", + "super.users": "", +} + +// Default topic and broker configs. +var configDefaults = map[string]string{ + "cleanup.policy": "delete", + "compression.type": "producer", + "max.message.bytes": "1048588", + "message.timestamp.type": "CreateTime", + "min.insync.replicas": "1", + "retention.bytes": "-1", + "retention.ms": "604800000", + + "default.replication.factor": "3", + "fetch.max.bytes": "57671680", + "log.dir": defLogDir, + "log.message.timestamp.type": "CreateTime", + "log.retention.bytes": "-1", + "log.retention.ms": "604800000", + "message.max.bytes": "1048588", +} + +const defLogDir = "/mem/kfake" + +func staticConfig(s ...string) func(*string) bool { + return func(v *string) bool { + if v == nil { + return false + } + for _, ok := range s { + if *v == ok { + return true + } + } + return false + } +} + +func numberConfig(min int, hasMin bool, max int, hasMax bool) func(*string) bool { + return func(v *string) bool { + if v == nil { + return false + } + i, err := strconv.Atoi(*v) + if err != nil { + return false + } + if hasMin && i < min || hasMax && i > max { + return false + } + return true + } +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kfake/groups.go b/vendor/github.com/twmb/franz-go/pkg/kfake/groups.go new file mode 100644 index 00000000000..bc6934d6dcb --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kfake/groups.go @@ -0,0 +1,1195 @@ +package kfake + +import ( + "bytes" + "fmt" + "sync" + "time" + + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" +) + +// TODO instance IDs +// TODO persisting groups so commits can happen to client-managed groups +// we need lastCommit, and need to better prune empty groups + +type ( + groups struct { + c *Cluster + gs map[string]*group + } + + group struct { + c *Cluster + gs *groups + name string + + state groupState + + leader string + members map[string]*groupMember + pending map[string]*groupMember + + commits tps[offsetCommit] + + generation int32 + protocolType string + protocols map[string]int + protocol string + + reqCh chan *clientReq + controlCh chan func() + + nJoining int + + tRebalance *time.Timer + + quit sync.Once + quitCh chan struct{} + } + + groupMember struct { + memberID string + clientID string + clientHost string + + join *kmsg.JoinGroupRequest // the latest join request + + // waitingReply is non-nil if a client is waiting for a reply + // from us for a JoinGroupRequest or a SyncGroupRequest. + waitingReply *clientReq + + assignment []byte + + t *time.Timer + last time.Time + } + + offsetCommit struct { + offset int64 + leaderEpoch int32 + metadata *string + } + + groupState int8 +) + +const ( + groupEmpty groupState = iota + groupStable + groupPreparingRebalance + groupCompletingRebalance + groupDead +) + +func (gs groupState) String() string { + switch gs { + case groupEmpty: + return "Empty" + case groupStable: + return "Stable" + case groupPreparingRebalance: + return "PreparingRebalance" + case groupCompletingRebalance: + return "CompletingRebalance" + case groupDead: + return "Dead" + default: + return "Unknown" + } +} + +func (c *Cluster) coordinator(id string) *broker { + gen := c.coordinatorGen.Load() + n := hashString(fmt.Sprintf("%d", gen)+"\x00\x00"+id) % uint64(len(c.bs)) + return c.bs[n] +} + +func (c *Cluster) validateGroup(creq *clientReq, group string) *kerr.Error { + switch key := kmsg.Key(creq.kreq.Key()); key { + case kmsg.OffsetCommit, kmsg.OffsetFetch, kmsg.DescribeGroups, kmsg.DeleteGroups: + default: + if group == "" { + return kerr.InvalidGroupID + } + } + coordinator := c.coordinator(group).node + if coordinator != creq.cc.b.node { + return kerr.NotCoordinator + } + return nil +} + +func generateMemberID(clientID string, instanceID *string) string { + if instanceID == nil { + return clientID + "-" + randStrUUID() + } + return *instanceID + "-" + randStrUUID() +} + +//////////// +// GROUPS // +//////////// + +func (gs *groups) newGroup(name string) *group { + return &group{ + c: gs.c, + gs: gs, + name: name, + members: make(map[string]*groupMember), + pending: make(map[string]*groupMember), + protocols: make(map[string]int), + reqCh: make(chan *clientReq), + controlCh: make(chan func()), + quitCh: make(chan struct{}), + } +} + +// handleJoin completely hijacks the incoming request. +func (gs *groups) handleJoin(creq *clientReq) { + if gs.gs == nil { + gs.gs = make(map[string]*group) + } + req := creq.kreq.(*kmsg.JoinGroupRequest) +start: + g := gs.gs[req.Group] + if g == nil { + g = gs.newGroup(req.Group) + waitJoin := make(chan struct{}) + gs.gs[req.Group] = g + go g.manage(func() { close(waitJoin) }) + defer func() { <-waitJoin }() + } + select { + case g.reqCh <- creq: + case <-g.quitCh: + goto start + } +} + +// Returns true if the request is hijacked and handled, otherwise false if the +// group does not exist. +func (gs *groups) handleHijack(group string, creq *clientReq) bool { + if gs.gs == nil { + return false + } + g := gs.gs[group] + if g == nil { + return false + } + select { + case g.reqCh <- creq: + return true + case <-g.quitCh: + return false + } +} + +func (gs *groups) handleSync(creq *clientReq) bool { + return gs.handleHijack(creq.kreq.(*kmsg.SyncGroupRequest).Group, creq) +} + +func (gs *groups) handleHeartbeat(creq *clientReq) bool { + return gs.handleHijack(creq.kreq.(*kmsg.HeartbeatRequest).Group, creq) +} + +func (gs *groups) handleLeave(creq *clientReq) bool { + return gs.handleHijack(creq.kreq.(*kmsg.LeaveGroupRequest).Group, creq) +} + +func (gs *groups) handleOffsetCommit(creq *clientReq) { + if gs.gs == nil { + gs.gs = make(map[string]*group) + } + req := creq.kreq.(*kmsg.OffsetCommitRequest) +start: + g := gs.gs[req.Group] + if g == nil { + g = gs.newGroup(req.Group) + waitCommit := make(chan struct{}) + gs.gs[req.Group] = g + go g.manage(func() { close(waitCommit) }) + defer func() { <-waitCommit }() + } + select { + case g.reqCh <- creq: + case <-g.quitCh: + goto start + } +} + +func (gs *groups) handleOffsetDelete(creq *clientReq) bool { + return gs.handleHijack(creq.kreq.(*kmsg.OffsetDeleteRequest).Group, creq) +} + +func (gs *groups) handleList(creq *clientReq) *kmsg.ListGroupsResponse { + req := creq.kreq.(*kmsg.ListGroupsRequest) + resp := req.ResponseKind().(*kmsg.ListGroupsResponse) + + var states map[string]struct{} + if len(req.StatesFilter) > 0 { + states = make(map[string]struct{}) + for _, state := range req.StatesFilter { + states[state] = struct{}{} + } + } + + for _, g := range gs.gs { + if g.c.coordinator(g.name).node != creq.cc.b.node { + continue + } + g.waitControl(func() { + if states != nil { + if _, ok := states[g.state.String()]; !ok { + return + } + } + sg := kmsg.NewListGroupsResponseGroup() + sg.Group = g.name + sg.ProtocolType = g.protocolType + sg.GroupState = g.state.String() + resp.Groups = append(resp.Groups, sg) + }) + } + return resp +} + +func (gs *groups) handleDescribe(creq *clientReq) *kmsg.DescribeGroupsResponse { + req := creq.kreq.(*kmsg.DescribeGroupsRequest) + resp := req.ResponseKind().(*kmsg.DescribeGroupsResponse) + + doneg := func(name string) *kmsg.DescribeGroupsResponseGroup { + sg := kmsg.NewDescribeGroupsResponseGroup() + sg.Group = name + resp.Groups = append(resp.Groups, sg) + return &resp.Groups[len(resp.Groups)-1] + } + + for _, rg := range req.Groups { + sg := doneg(rg) + if kerr := gs.c.validateGroup(creq, rg); kerr != nil { + sg.ErrorCode = kerr.Code + continue + } + g, ok := gs.gs[rg] + if !ok { + sg.State = groupDead.String() + continue + } + if !g.waitControl(func() { + sg.State = g.state.String() + sg.ProtocolType = g.protocolType + if g.state == groupStable { + sg.Protocol = g.protocol + } + for _, m := range g.members { + sm := kmsg.NewDescribeGroupsResponseGroupMember() + sm.MemberID = m.memberID + sm.ClientID = m.clientID + sm.ClientHost = m.clientHost + if g.state == groupStable { + for _, p := range m.join.Protocols { + if p.Name == g.protocol { + sm.ProtocolMetadata = p.Metadata + break + } + } + sm.MemberAssignment = m.assignment + } + sg.Members = append(sg.Members, sm) + + } + }) { + sg.State = groupDead.String() + } + } + return resp +} + +func (gs *groups) handleDelete(creq *clientReq) *kmsg.DeleteGroupsResponse { + req := creq.kreq.(*kmsg.DeleteGroupsRequest) + resp := req.ResponseKind().(*kmsg.DeleteGroupsResponse) + + doneg := func(name string) *kmsg.DeleteGroupsResponseGroup { + sg := kmsg.NewDeleteGroupsResponseGroup() + sg.Group = name + resp.Groups = append(resp.Groups, sg) + return &resp.Groups[len(resp.Groups)-1] + } + + for _, rg := range req.Groups { + sg := doneg(rg) + if kerr := gs.c.validateGroup(creq, rg); kerr != nil { + sg.ErrorCode = kerr.Code + continue + } + g, ok := gs.gs[rg] + if !ok { + sg.ErrorCode = kerr.GroupIDNotFound.Code + continue + } + if !g.waitControl(func() { + switch g.state { + case groupDead: + sg.ErrorCode = kerr.GroupIDNotFound.Code + case groupEmpty: + g.quitOnce() + delete(gs.gs, rg) + case groupPreparingRebalance, groupCompletingRebalance, groupStable: + sg.ErrorCode = kerr.NonEmptyGroup.Code + } + }) { + sg.ErrorCode = kerr.GroupIDNotFound.Code + } + } + return resp +} + +func (gs *groups) handleOffsetFetch(creq *clientReq) *kmsg.OffsetFetchResponse { + req := creq.kreq.(*kmsg.OffsetFetchRequest) + resp := req.ResponseKind().(*kmsg.OffsetFetchResponse) + + if req.Version <= 7 { + rg := kmsg.NewOffsetFetchRequestGroup() + rg.Group = req.Group + if req.Topics != nil { + rg.Topics = make([]kmsg.OffsetFetchRequestGroupTopic, len(req.Topics)) + } + for _, t := range req.Topics { + rt := kmsg.NewOffsetFetchRequestGroupTopic() + rt.Topic = t.Topic + rt.Partitions = t.Partitions + rg.Topics = append(rg.Topics, rt) + } + req.Groups = append(req.Groups, rg) + + defer func() { + g0 := resp.Groups[0] + resp.ErrorCode = g0.ErrorCode + for _, t := range g0.Topics { + st := kmsg.NewOffsetFetchResponseTopic() + st.Topic = t.Topic + for _, p := range t.Partitions { + sp := kmsg.NewOffsetFetchResponseTopicPartition() + sp.Partition = p.Partition + sp.Offset = p.Offset + sp.LeaderEpoch = p.LeaderEpoch + sp.Metadata = p.Metadata + sp.ErrorCode = p.ErrorCode + st.Partitions = append(st.Partitions, sp) + } + resp.Topics = append(resp.Topics, st) + } + }() + } + + doneg := func(name string) *kmsg.OffsetFetchResponseGroup { + sg := kmsg.NewOffsetFetchResponseGroup() + sg.Group = name + resp.Groups = append(resp.Groups, sg) + return &resp.Groups[len(resp.Groups)-1] + } + + for _, rg := range req.Groups { + sg := doneg(rg.Group) + if kerr := gs.c.validateGroup(creq, rg.Group); kerr != nil { + sg.ErrorCode = kerr.Code + continue + } + g, ok := gs.gs[rg.Group] + if !ok { + sg.ErrorCode = kerr.GroupIDNotFound.Code + continue + } + if !g.waitControl(func() { + if rg.Topics == nil { + for t, ps := range g.commits { + st := kmsg.NewOffsetFetchResponseGroupTopic() + st.Topic = t + for p, c := range ps { + sp := kmsg.NewOffsetFetchResponseGroupTopicPartition() + sp.Partition = p + sp.Offset = c.offset + sp.LeaderEpoch = c.leaderEpoch + sp.Metadata = c.metadata + st.Partitions = append(st.Partitions, sp) + } + sg.Topics = append(sg.Topics, st) + } + } else { + for _, t := range rg.Topics { + st := kmsg.NewOffsetFetchResponseGroupTopic() + st.Topic = t.Topic + for _, p := range t.Partitions { + sp := kmsg.NewOffsetFetchResponseGroupTopicPartition() + sp.Partition = p + c, ok := g.commits.getp(t.Topic, p) + if !ok { + sp.Offset = -1 + sp.LeaderEpoch = -1 + } else { + sp.Offset = c.offset + sp.LeaderEpoch = c.leaderEpoch + sp.Metadata = c.metadata + } + st.Partitions = append(st.Partitions, sp) + } + sg.Topics = append(sg.Topics, st) + } + } + }) { + sg.ErrorCode = kerr.GroupIDNotFound.Code + } + } + return resp +} + +func (g *group) handleOffsetDelete(creq *clientReq) *kmsg.OffsetDeleteResponse { + req := creq.kreq.(*kmsg.OffsetDeleteRequest) + resp := req.ResponseKind().(*kmsg.OffsetDeleteResponse) + + if kerr := g.c.validateGroup(creq, req.Group); kerr != nil { + resp.ErrorCode = kerr.Code + return resp + } + + tidx := make(map[string]int) + donet := func(t string, errCode int16) *kmsg.OffsetDeleteResponseTopic { + if i, ok := tidx[t]; ok { + return &resp.Topics[i] + } + tidx[t] = len(resp.Topics) + st := kmsg.NewOffsetDeleteResponseTopic() + st.Topic = t + resp.Topics = append(resp.Topics, st) + return &resp.Topics[len(resp.Topics)-1] + } + donep := func(t string, p int32, errCode int16) *kmsg.OffsetDeleteResponseTopicPartition { + sp := kmsg.NewOffsetDeleteResponseTopicPartition() + sp.Partition = p + sp.ErrorCode = errCode + st := donet(t, 0) + st.Partitions = append(st.Partitions, sp) + return &st.Partitions[len(st.Partitions)-1] + } + + // empty: delete everything in request + // preparingRebalance, completingRebalance, stable: + // * if consumer, delete everything not subscribed to + // * if not consumer, delete nothing, error with non_empty_group + subTopics := make(map[string]struct{}) + switch g.state { + default: + resp.ErrorCode = kerr.GroupIDNotFound.Code + return resp + case groupEmpty: + case groupPreparingRebalance, groupCompletingRebalance, groupStable: + if g.protocolType != "consumer" { + resp.ErrorCode = kerr.NonEmptyGroup.Code + return resp + } + for _, m := range []map[string]*groupMember{ + g.members, + g.pending, + } { + for _, m := range m { + if m.join == nil { + continue + } + for _, proto := range m.join.Protocols { + var m kmsg.ConsumerMemberMetadata + if err := m.ReadFrom(proto.Metadata); err == nil { + for _, topic := range m.Topics { + subTopics[topic] = struct{}{} + } + } + } + } + } + } + + for _, t := range req.Topics { + for _, p := range t.Partitions { + if _, ok := subTopics[t.Topic]; ok { + donep(t.Topic, p.Partition, kerr.GroupSubscribedToTopic.Code) + continue + } + g.commits.delp(t.Topic, p.Partition) + donep(t.Topic, p.Partition, 0) + } + } + + return resp +} + +//////////////////// +// GROUP HANDLING // +//////////////////// + +func (g *group) manage(detachNew func()) { + // On the first join only, we want to ensure that if the join is + // invalid, we clean the group up before we detach from the cluster + // serialization loop that is initializing us. + var firstJoin func(bool) + firstJoin = func(ok bool) { + firstJoin = func(bool) {} + if !ok { + delete(g.gs.gs, g.name) + g.quitOnce() + } + detachNew() + } + + defer func() { + for _, m := range g.members { + if m.t != nil { + m.t.Stop() + } + } + for _, m := range g.pending { + if m.t != nil { + m.t.Stop() + } + } + }() + + for { + select { + case <-g.quitCh: + return + case creq := <-g.reqCh: + var kresp kmsg.Response + switch creq.kreq.(type) { + case *kmsg.JoinGroupRequest: + var ok bool + kresp, ok = g.handleJoin(creq) + firstJoin(ok) + case *kmsg.SyncGroupRequest: + kresp = g.handleSync(creq) + case *kmsg.HeartbeatRequest: + kresp = g.handleHeartbeat(creq) + case *kmsg.LeaveGroupRequest: + kresp = g.handleLeave(creq) + case *kmsg.OffsetCommitRequest: + var ok bool + kresp, ok = g.handleOffsetCommit(creq) + firstJoin(ok) + case *kmsg.OffsetDeleteRequest: + kresp = g.handleOffsetDelete(creq) + } + if kresp != nil { + g.reply(creq, kresp, nil) + } + + case fn := <-g.controlCh: + fn() + } + } +} + +func (g *group) waitControl(fn func()) bool { + wait := make(chan struct{}) + wfn := func() { fn(); close(wait) } + select { + case <-g.quitCh: + return false + case g.controlCh <- wfn: + <-wait + return true + } +} + +// Called in the manage loop. +func (g *group) quitOnce() { + g.quit.Do(func() { + g.state = groupDead + close(g.quitCh) + }) +} + +// Handles a join. We do not do the delayed join aspects in Kafka, we just punt +// to the client to immediately rejoin if a new client enters the group. +// +// If this returns nil, the request will be replied to later. +func (g *group) handleJoin(creq *clientReq) (kmsg.Response, bool) { + req := creq.kreq.(*kmsg.JoinGroupRequest) + resp := req.ResponseKind().(*kmsg.JoinGroupResponse) + + if kerr := g.c.validateGroup(creq, req.Group); kerr != nil { + resp.ErrorCode = kerr.Code + return resp, false + } + if req.InstanceID != nil { + resp.ErrorCode = kerr.InvalidGroupID.Code + return resp, false + } + if st := int64(req.SessionTimeoutMillis); st < g.c.cfg.minSessionTimeout.Milliseconds() || st > g.c.cfg.maxSessionTimeout.Milliseconds() { + resp.ErrorCode = kerr.InvalidSessionTimeout.Code + return resp, false + } + if !g.protocolsMatch(req.ProtocolType, req.Protocols) { + resp.ErrorCode = kerr.InconsistentGroupProtocol.Code + return resp, false + } + + // Clients first join with no member ID. For join v4+, we generate + // the member ID and add the member to pending. For v3 and below, + // we immediately enter rebalance. + if req.MemberID == "" { + memberID := generateMemberID(creq.cid, req.InstanceID) + resp.MemberID = memberID + m := &groupMember{ + memberID: memberID, + clientID: creq.cid, + clientHost: creq.cc.conn.RemoteAddr().String(), + join: req, + } + if req.Version >= 4 { + g.addPendingRebalance(m) + resp.ErrorCode = kerr.MemberIDRequired.Code + return resp, true + } + g.addMemberAndRebalance(m, creq, req) + return nil, true + } + + // Pending members rejoining immediately enters rebalance. + if m, ok := g.pending[req.MemberID]; ok { + g.addMemberAndRebalance(m, creq, req) + return nil, true + } + m, ok := g.members[req.MemberID] + if !ok { + resp.ErrorCode = kerr.UnknownMemberID.Code + return resp, false + } + + switch g.state { + default: + resp.ErrorCode = kerr.UnknownMemberID.Code + return resp, false + case groupPreparingRebalance: + g.updateMemberAndRebalance(m, creq, req) + case groupCompletingRebalance: + if m.sameJoin(req) { + g.fillJoinResp(req, resp) + return resp, true + } + g.updateMemberAndRebalance(m, creq, req) + case groupStable: + if g.leader != req.MemberID || m.sameJoin(req) { + g.fillJoinResp(req, resp) + return resp, true + } + g.updateMemberAndRebalance(m, creq, req) + } + return nil, true +} + +// Handles a sync, which can transition us to stable. +func (g *group) handleSync(creq *clientReq) kmsg.Response { + req := creq.kreq.(*kmsg.SyncGroupRequest) + resp := req.ResponseKind().(*kmsg.SyncGroupResponse) + + if kerr := g.c.validateGroup(creq, req.Group); kerr != nil { + resp.ErrorCode = kerr.Code + return resp + } + if req.InstanceID != nil { + resp.ErrorCode = kerr.InvalidGroupID.Code + return resp + } + m, ok := g.members[req.MemberID] + if !ok { + resp.ErrorCode = kerr.UnknownMemberID.Code + return resp + } + if req.Generation != g.generation { + resp.ErrorCode = kerr.IllegalGeneration.Code + return resp + } + if req.ProtocolType != nil && *req.ProtocolType != g.protocolType { + resp.ErrorCode = kerr.InconsistentGroupProtocol.Code + return resp + } + if req.Protocol != nil && *req.Protocol != g.protocol { + resp.ErrorCode = kerr.InconsistentGroupProtocol.Code + return resp + } + + switch g.state { + default: + resp.ErrorCode = kerr.UnknownMemberID.Code + case groupPreparingRebalance: + resp.ErrorCode = kerr.RebalanceInProgress.Code + case groupCompletingRebalance: + m.waitingReply = creq + if req.MemberID == g.leader { + g.completeLeaderSync(req) + } + return nil + case groupStable: // member saw join and is now finally calling sync + resp.ProtocolType = kmsg.StringPtr(g.protocolType) + resp.Protocol = kmsg.StringPtr(g.protocol) + resp.MemberAssignment = m.assignment + } + return resp +} + +// Handles a heartbeat, a relatively simple request that just delays our +// session timeout timer. +func (g *group) handleHeartbeat(creq *clientReq) kmsg.Response { + req := creq.kreq.(*kmsg.HeartbeatRequest) + resp := req.ResponseKind().(*kmsg.HeartbeatResponse) + + if kerr := g.c.validateGroup(creq, req.Group); kerr != nil { + resp.ErrorCode = kerr.Code + return resp + } + if req.InstanceID != nil { + resp.ErrorCode = kerr.InvalidGroupID.Code + return resp + } + m, ok := g.members[req.MemberID] + if !ok { + resp.ErrorCode = kerr.UnknownMemberID.Code + return resp + } + if req.Generation != g.generation { + resp.ErrorCode = kerr.IllegalGeneration.Code + return resp + } + + switch g.state { + default: + resp.ErrorCode = kerr.UnknownMemberID.Code + case groupPreparingRebalance: + resp.ErrorCode = kerr.RebalanceInProgress.Code + g.updateHeartbeat(m) + case groupCompletingRebalance, groupStable: + g.updateHeartbeat(m) + } + return resp +} + +// Handles a leave. We trigger a rebalance for every member leaving in a batch +// request, but that's fine because of our manage serialization. +func (g *group) handleLeave(creq *clientReq) kmsg.Response { + req := creq.kreq.(*kmsg.LeaveGroupRequest) + resp := req.ResponseKind().(*kmsg.LeaveGroupResponse) + + if kerr := g.c.validateGroup(creq, req.Group); kerr != nil { + resp.ErrorCode = kerr.Code + return resp + } + if req.Version < 3 { + req.Members = append(req.Members, kmsg.LeaveGroupRequestMember{ + MemberID: req.MemberID, + }) + defer func() { resp.ErrorCode = resp.Members[0].ErrorCode }() + } + + for _, rm := range req.Members { + mresp := kmsg.NewLeaveGroupResponseMember() + mresp.MemberID = rm.MemberID + mresp.InstanceID = rm.InstanceID + resp.Members = append(resp.Members, mresp) + + r := &resp.Members[len(resp.Members)-1] + if rm.InstanceID != nil { + r.ErrorCode = kerr.UnknownMemberID.Code + continue + } + if m, ok := g.members[rm.MemberID]; !ok { + if p, ok := g.pending[rm.MemberID]; !ok { + r.ErrorCode = kerr.UnknownMemberID.Code + } else { + g.stopPending(p) + } + } else { + g.updateMemberAndRebalance(m, nil, nil) + } + } + + return resp +} + +func fillOffsetCommit(req *kmsg.OffsetCommitRequest, resp *kmsg.OffsetCommitResponse, code int16) { + for _, t := range req.Topics { + st := kmsg.NewOffsetCommitResponseTopic() + st.Topic = t.Topic + for _, p := range t.Partitions { + sp := kmsg.NewOffsetCommitResponseTopicPartition() + sp.Partition = p.Partition + sp.ErrorCode = code + st.Partitions = append(st.Partitions, sp) + } + resp.Topics = append(resp.Topics, st) + } +} + +// Handles a commit. +func (g *group) handleOffsetCommit(creq *clientReq) (*kmsg.OffsetCommitResponse, bool) { + req := creq.kreq.(*kmsg.OffsetCommitRequest) + resp := req.ResponseKind().(*kmsg.OffsetCommitResponse) + + if kerr := g.c.validateGroup(creq, req.Group); kerr != nil { + fillOffsetCommit(req, resp, kerr.Code) + return resp, false + } + if req.InstanceID != nil { + fillOffsetCommit(req, resp, kerr.InvalidGroupID.Code) + return resp, false + } + + var m *groupMember + if len(g.members) > 0 { + var ok bool + m, ok = g.members[req.MemberID] + if !ok { + fillOffsetCommit(req, resp, kerr.UnknownMemberID.Code) + return resp, false + } + if req.Generation != g.generation { + fillOffsetCommit(req, resp, kerr.IllegalGeneration.Code) + return resp, false + } + } else { + if req.MemberID != "" { + fillOffsetCommit(req, resp, kerr.UnknownMemberID.Code) + return resp, false + } + if req.Generation != -1 { + fillOffsetCommit(req, resp, kerr.IllegalGeneration.Code) + return resp, false + } + if g.state != groupEmpty { + panic("invalid state: no members, but group not empty") + } + } + + switch g.state { + default: + fillOffsetCommit(req, resp, kerr.GroupIDNotFound.Code) + return resp, true + case groupEmpty: + for _, t := range req.Topics { + for _, p := range t.Partitions { + g.commits.set(t.Topic, p.Partition, offsetCommit{ + offset: p.Offset, + leaderEpoch: p.LeaderEpoch, + metadata: p.Metadata, + }) + } + } + fillOffsetCommit(req, resp, 0) + case groupPreparingRebalance, groupStable: + for _, t := range req.Topics { + for _, p := range t.Partitions { + g.commits.set(t.Topic, p.Partition, offsetCommit{ + offset: p.Offset, + leaderEpoch: p.LeaderEpoch, + metadata: p.Metadata, + }) + } + } + fillOffsetCommit(req, resp, 0) + g.updateHeartbeat(m) + case groupCompletingRebalance: + fillOffsetCommit(req, resp, kerr.RebalanceInProgress.Code) + g.updateHeartbeat(m) + } + return resp, true +} + +// Transitions the group to the preparing rebalance state. We first need to +// clear any member that is currently sitting in sync. If enough members have +// entered join, we immediately proceed to completeRebalance, otherwise we +// begin a wait timer. +func (g *group) rebalance() { + if g.state == groupCompletingRebalance { + for _, m := range g.members { + m.assignment = nil + if m.waitingReply.empty() { + continue + } + sync, ok := m.waitingReply.kreq.(*kmsg.SyncGroupRequest) + if !ok { + continue + } + resp := sync.ResponseKind().(*kmsg.SyncGroupResponse) + resp.ErrorCode = kerr.RebalanceInProgress.Code + g.reply(m.waitingReply, resp, m) + } + } + + g.state = groupPreparingRebalance + + if g.nJoining >= len(g.members) { + g.completeRebalance() + return + } + + var rebalanceTimeoutMs int32 + for _, m := range g.members { + if m.join.RebalanceTimeoutMillis > rebalanceTimeoutMs { + rebalanceTimeoutMs = m.join.RebalanceTimeoutMillis + } + } + if g.tRebalance == nil { + g.tRebalance = time.AfterFunc(time.Duration(rebalanceTimeoutMs)*time.Millisecond, func() { + select { + case <-g.quitCh: + case g.controlCh <- func() { + g.completeRebalance() + }: + } + }) + } +} + +// Transitions the group to either dead or stable, depending on if any members +// remain by the time we clear those that are not waiting in join. +func (g *group) completeRebalance() { + if g.tRebalance != nil { + g.tRebalance.Stop() + g.tRebalance = nil + } + g.nJoining = 0 + + var foundLeader bool + for _, m := range g.members { + if m.waitingReply.empty() { + for _, p := range m.join.Protocols { + g.protocols[p.Name]-- + } + delete(g.members, m.memberID) + if m.t != nil { + m.t.Stop() + } + continue + } + if m.memberID == g.leader { + foundLeader = true + } + } + + g.generation++ + if g.generation < 0 { + g.generation = 1 + } + if len(g.members) == 0 { + g.state = groupEmpty + return + } + g.state = groupCompletingRebalance + + var foundProto bool + for proto, nsupport := range g.protocols { + if nsupport == len(g.members) { + g.protocol = proto + foundProto = true + break + } + } + if !foundProto { + panic(fmt.Sprint("unable to find commonly supported protocol!", g.protocols, len(g.members))) + } + + for _, m := range g.members { + if !foundLeader { + g.leader = m.memberID + } + req := m.join + resp := req.ResponseKind().(*kmsg.JoinGroupResponse) + g.fillJoinResp(req, resp) + g.reply(m.waitingReply, resp, m) + } +} + +// Transitions the group to stable, the final step of a rebalance. +func (g *group) completeLeaderSync(req *kmsg.SyncGroupRequest) { + for _, m := range g.members { + m.assignment = nil + } + for _, a := range req.GroupAssignment { + m, ok := g.members[a.MemberID] + if !ok { + continue + } + m.assignment = a.MemberAssignment + } + for _, m := range g.members { + if m.waitingReply.empty() { + continue // this member saw join but has not yet called sync + } + resp := m.waitingReply.kreq.ResponseKind().(*kmsg.SyncGroupResponse) + resp.ProtocolType = kmsg.StringPtr(g.protocolType) + resp.Protocol = kmsg.StringPtr(g.protocol) + resp.MemberAssignment = m.assignment + g.reply(m.waitingReply, resp, m) + } + g.state = groupStable +} + +func (g *group) updateHeartbeat(m *groupMember) { + g.atSessionTimeout(m, func() { + g.updateMemberAndRebalance(m, nil, nil) + }) +} + +func (g *group) addPendingRebalance(m *groupMember) { + g.pending[m.memberID] = m + g.atSessionTimeout(m, func() { + delete(g.pending, m.memberID) + }) +} + +func (g *group) stopPending(m *groupMember) { + delete(g.pending, m.memberID) + if m.t != nil { + m.t.Stop() + } +} + +func (g *group) atSessionTimeout(m *groupMember, fn func()) { + if m.t != nil { + m.t.Stop() + } + timeout := time.Millisecond * time.Duration(m.join.SessionTimeoutMillis) + m.last = time.Now() + tfn := func() { + select { + case <-g.quitCh: + case g.controlCh <- func() { + if time.Since(m.last) >= timeout { + fn() + } + }: + } + } + m.t = time.AfterFunc(timeout, tfn) +} + +// This is used to update a member from a new join request, or to clear a +// member from failed heartbeats. +func (g *group) updateMemberAndRebalance(m *groupMember, waitingReply *clientReq, newJoin *kmsg.JoinGroupRequest) { + for _, p := range m.join.Protocols { + g.protocols[p.Name]-- + } + m.join = newJoin + if m.join != nil { + for _, p := range m.join.Protocols { + g.protocols[p.Name]++ + } + if m.waitingReply.empty() && !waitingReply.empty() { + g.nJoining++ + } + m.waitingReply = waitingReply + } else { + delete(g.members, m.memberID) + if m.t != nil { + m.t.Stop() + } + if !m.waitingReply.empty() { + g.nJoining-- + } + } + g.rebalance() +} + +// Adds a new member to the group and rebalances. +func (g *group) addMemberAndRebalance(m *groupMember, waitingReply *clientReq, join *kmsg.JoinGroupRequest) { + g.stopPending(m) + m.join = join + for _, p := range m.join.Protocols { + g.protocols[p.Name]++ + } + g.members[m.memberID] = m + g.nJoining++ + m.waitingReply = waitingReply + g.rebalance() +} + +// Returns if a new join can even join the group based on the join's supported +// protocols. +func (g *group) protocolsMatch(protocolType string, protocols []kmsg.JoinGroupRequestProtocol) bool { + if g.protocolType == "" { + if protocolType == "" || len(protocols) == 0 { + return false + } + g.protocolType = protocolType + return true + } + if protocolType != g.protocolType { + return false + } + if len(g.protocols) == 0 { + return true + } + for _, p := range protocols { + if _, ok := g.protocols[p.Name]; ok { + return true + } + } + return false +} + +// Returns if a new join request is the same as an old request; if so, for +// non-leaders, we just return the old join response. +func (m *groupMember) sameJoin(req *kmsg.JoinGroupRequest) bool { + if len(m.join.Protocols) != len(req.Protocols) { + return false + } + for i := range m.join.Protocols { + if m.join.Protocols[i].Name != req.Protocols[i].Name { + return false + } + if !bytes.Equal(m.join.Protocols[i].Metadata, req.Protocols[i].Metadata) { + return false + } + } + return true +} + +func (g *group) fillJoinResp(req *kmsg.JoinGroupRequest, resp *kmsg.JoinGroupResponse) { + resp.Generation = g.generation + resp.ProtocolType = kmsg.StringPtr(g.protocolType) + resp.Protocol = kmsg.StringPtr(g.protocol) + resp.LeaderID = g.leader + resp.MemberID = req.MemberID + if g.leader == req.MemberID { + resp.Members = g.joinResponseMetadata() + } +} + +func (g *group) joinResponseMetadata() []kmsg.JoinGroupResponseMember { + metadata := make([]kmsg.JoinGroupResponseMember, 0, len(g.members)) +members: + for _, m := range g.members { + for _, p := range m.join.Protocols { + if p.Name == g.protocol { + metadata = append(metadata, kmsg.JoinGroupResponseMember{ + MemberID: m.memberID, + ProtocolMetadata: p.Metadata, + }) + continue members + } + } + panic("inconsistent group protocol within saved members") + } + return metadata +} + +func (g *group) reply(creq *clientReq, kresp kmsg.Response, m *groupMember) { + select { + case creq.cc.respCh <- clientResp{kresp: kresp, corr: creq.corr, seq: creq.seq}: + case <-g.c.die: + return + } + if m != nil { + m.waitingReply = nil + g.updateHeartbeat(m) + } +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kfake/logger.go b/vendor/github.com/twmb/franz-go/pkg/kfake/logger.go new file mode 100644 index 00000000000..cc674bc8b7c --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kfake/logger.go @@ -0,0 +1,65 @@ +package kfake + +import ( + "fmt" + "io" +) + +// LogLevel designates which level the logger should log at. +type LogLevel int8 + +const ( + // LogLevelNone disables logging. + LogLevelNone LogLevel = iota + // LogLevelError logs all errors. Generally, these should not happen. + LogLevelError + // LogLevelWarn logs all warnings, such as request failures. + LogLevelWarn + // LogLevelInfo logs informational messages, such as requests. This is + // usually the default log level. + LogLevelInfo + // LogLevelDebug logs verbose information, and is usually not used in + // production. + LogLevelDebug +) + +func (l LogLevel) String() string { + switch l { + case LogLevelError: + return "ERR" + case LogLevelWarn: + return "WRN" + case LogLevelInfo: + return "INF" + case LogLevelDebug: + return "DBG" + default: + return "NON" + } +} + +// Logger can be provided to hook into the fake cluster's logs. +type Logger interface { + Logf(LogLevel, string, ...any) +} + +type nopLogger struct{} + +func (*nopLogger) Logf(LogLevel, string, ...any) {} + +// BasicLogger returns a logger that writes newline delimited messages to dst. +func BasicLogger(dst io.Writer, level LogLevel) Logger { + return &basicLogger{dst, level} +} + +type basicLogger struct { + dst io.Writer + level LogLevel +} + +func (b *basicLogger) Logf(level LogLevel, msg string, args ...any) { + if b.level < level { + return + } + fmt.Fprintf(b.dst, "[%s] "+msg+"\n", append([]any{level}, args...)...) +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kfake/main.go b/vendor/github.com/twmb/franz-go/pkg/kfake/main.go new file mode 100644 index 00000000000..6d6596c9d55 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kfake/main.go @@ -0,0 +1,31 @@ +//go:build none + +package main + +import ( + "fmt" + "os" + "os/signal" + + "github.com/twmb/franz-go/pkg/kfake" +) + +func main() { + c, err := kfake.NewCluster( + kfake.Ports(9092, 9093, 9094), + kfake.SeedTopics(-1, "foo"), + ) + if err != nil { + panic(err) + } + defer c.Close() + + addrs := c.ListenAddrs() + for _, addr := range addrs { + fmt.Println(addr) + } + + sigs := make(chan os.Signal, 2) + signal.Notify(sigs, os.Interrupt) + <-sigs +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kfake/misc.go b/vendor/github.com/twmb/franz-go/pkg/kfake/misc.go new file mode 100644 index 00000000000..75f2f244994 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kfake/misc.go @@ -0,0 +1,63 @@ +package kfake + +import ( + "crypto/rand" + "crypto/sha256" + "encoding/binary" + "fmt" + "io" + "sync" +) + +func randFill(slice []byte) { + randPoolFill(slice) +} + +func randBytes(n int) []byte { + r := make([]byte, n) + randPoolFill(r) + return r +} + +func randUUID() [16]byte { + var uuid [16]byte + randPoolFill(uuid[:]) + return uuid +} + +func randStrUUID() string { + uuid := randUUID() + return fmt.Sprintf("%x", uuid[:]) +} + +func hashString(s string) uint64 { + sum := sha256.Sum256([]byte(s)) + var n uint64 + for i := 0; i < 4; i++ { + v := binary.BigEndian.Uint64(sum[i*8:]) + n ^= v + } + return n +} + +var ( + mu sync.Mutex + randPool = make([]byte, 4<<10) + randPoolAt = len(randPool) +) + +func randPoolFill(into []byte) { + mu.Lock() + defer mu.Unlock() + for len(into) != 0 { + n := copy(into, randPool[randPoolAt:]) + into = into[n:] + randPoolAt += n + if randPoolAt == cap(randPool) { + if _, err := io.ReadFull(rand.Reader, randPool); err != nil { + panic(fmt.Sprintf("unable to read %d bytes from crypto/rand: %v", len(randPool), err)) + } + randPoolAt = 0 + } + } +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kfake/pid.go b/vendor/github.com/twmb/franz-go/pkg/kfake/pid.go new file mode 100644 index 00000000000..eacf2cf87bd --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kfake/pid.go @@ -0,0 +1,96 @@ +package kfake + +import ( + "hash/fnv" + "math" + "math/rand" +) + +// TODO +// +// * Convert pids to struct, add heap of last use, add index to pidseqs, and +// remove pidseqs as they exhaust max # of pids configured. +// +// * Wrap epochs + +type ( + pids map[int64]*pidMap + + pidMap struct { + id int64 + epoch int16 + tps tps[pidseqs] + } + + pid struct { + id int64 + epoch int16 + } + + pidseqs struct { + seqs [5]int32 + at uint8 + } +) + +func (pids *pids) get(id int64, epoch int16, t string, p int32) (*pidseqs, int16) { + if *pids == nil { + return nil, 0 + } + pm := (*pids)[id] + if pm == nil { + return nil, 0 + } + return pm.tps.mkpDefault(t, p), pm.epoch +} + +func (pids *pids) create(txnalID *string) pid { + if *pids == nil { + *pids = make(map[int64]*pidMap) + } + var id int64 + if txnalID != nil { + hasher := fnv.New64() + hasher.Write([]byte(*txnalID)) + id = int64(hasher.Sum64()) & math.MaxInt64 + } else { + for { + id = int64(rand.Uint64()) & math.MaxInt64 + if _, exists := (*pids)[id]; !exists { + break + } + } + } + pm, exists := (*pids)[id] + if exists { + pm.epoch++ + return pid{id, pm.epoch} + } + pm = &pidMap{id: id} + (*pids)[id] = pm + return pid{id, 0} +} + +func (seqs *pidseqs) pushAndValidate(firstSeq, numRecs int32) (ok, dup bool) { + // If there is no pid, we do not do duplicate detection. + if seqs == nil { + return true, false + } + var ( + seq = firstSeq + seq64 = int64(seq) + next64 = (seq64 + int64(numRecs)) % math.MaxInt32 + next = int32(next64) + ) + for i := 0; i < 5; i++ { + if seqs.seqs[i] == seq && seqs.seqs[(i+1)%5] == next { + return true, true + } + } + if seqs.seqs[seqs.at] != seq { + return false, false + } + seqs.at = (seqs.at + 1) % 5 + seqs.seqs[seqs.at] = next + return true, false +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kfake/sasl.go b/vendor/github.com/twmb/franz-go/pkg/kfake/sasl.go new file mode 100644 index 00000000000..413bb3bd1c7 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kfake/sasl.go @@ -0,0 +1,296 @@ +package kfake + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "crypto/sha512" + "encoding/base64" + "errors" + "fmt" + "regexp" + "strings" + + "github.com/twmb/franz-go/pkg/kmsg" + "golang.org/x/crypto/pbkdf2" +) + +// TODO server-error-value in serverFinal + +const ( + saslPlain = "PLAIN" + saslScram256 = "SCRAM-SHA-256" + saslScram512 = "SCRAM-SHA-512" + scramIterations = 4096 +) + +type ( + sasls struct { + plain map[string]string // user => pass + scram256 map[string]scramAuth // user => scram auth + scram512 map[string]scramAuth // user => scram auth + } + + saslStage uint8 +) + +func (s sasls) empty() bool { + return len(s.plain) == 0 && len(s.scram256) == 0 && len(s.scram512) == 0 +} + +const ( + saslStageBegin saslStage = iota + saslStageAuthPlain + saslStageAuthScram0_256 + saslStageAuthScram0_512 + saslStageAuthScram1 + saslStageComplete +) + +func (c *Cluster) handleSASL(creq *clientReq) (allow bool) { + switch creq.cc.saslStage { + case saslStageBegin: + switch creq.kreq.(type) { + case *kmsg.ApiVersionsRequest, + *kmsg.SASLHandshakeRequest: + return true + default: + return false + } + case saslStageAuthPlain, + saslStageAuthScram0_256, + saslStageAuthScram0_512, + saslStageAuthScram1: + switch creq.kreq.(type) { + case *kmsg.ApiVersionsRequest, + *kmsg.SASLAuthenticateRequest: + return true + default: + return false + } + case saslStageComplete: + return true + default: + panic("unreachable") + } +} + +/////////// +// PLAIN // +/////////// + +func saslSplitPlain(auth []byte) (user, pass string, err error) { + parts := strings.SplitN(string(auth), "\x00", 3) + if len(parts) != 3 { + return "", "", errors.New("invalid plain auth") + } + if len(parts[0]) != 0 && parts[0] != parts[1] { + return "", "", errors.New("authzid is not equal to username") // see below + } + return parts[1], parts[2], nil +} + +/////////// +// SCRAM // +/////////// + +func newScramAuth(mechanism, pass string) scramAuth { + var saltedPass []byte + salt := randBytes(10) + switch mechanism { + case saslScram256: + saltedPass = pbkdf2.Key([]byte(pass), salt, scramIterations, sha256.Size, sha256.New) + case saslScram512: + saltedPass = pbkdf2.Key([]byte(pass), salt, scramIterations, sha512.Size, sha512.New) + default: + panic("unreachable") + } + return scramAuth{ + mechanism: mechanism, + iterations: scramIterations, + saltedPass: saltedPass, + salt: salt, + } +} + +type scramAuth struct { + mechanism string // scram 256 or 512 + iterations int + saltedPass []byte + salt []byte +} + +// client-first-message +type scramClient0 struct { + user string + bare []byte // client-first-message-bare + nonce []byte // nonce in client0 +} + +var scramUnescaper = strings.NewReplacer("=3D", "=", "=2C", ",") + +func scramParseClient0(client0 []byte) (scramClient0, error) { + m := reClient0.FindSubmatch(client0) + if len(m) == 0 { + return scramClient0{}, errors.New("invalid client0") + } + var ( + zid = string(m[1]) + bare = bytes.Clone(m[2]) + user = string(m[3]) + nonce = bytes.Clone(m[4]) + ext = string(m[5]) + ) + if len(ext) != 0 { + return scramClient0{}, errors.New("invalid extensions") + } + if zid != "" && zid != user { + return scramClient0{}, errors.New("authzid is not equal to username") // Kafka & Redpanda enforce that a present zid == username + } + return scramClient0{ + user: scramUnescaper.Replace(user), + bare: bare, + nonce: nonce, + }, nil +} + +func scramServerFirst(client0 scramClient0, auth scramAuth) (scramServer0, []byte) { + nonce := append(client0.nonce, base64.RawStdEncoding.EncodeToString(randBytes(16))...) + serverFirst := []byte(fmt.Sprintf("r=%s,s=%s,i=%d", + nonce, + base64.StdEncoding.EncodeToString(auth.salt), + scramIterations, + )) + return scramServer0{ + a: auth, + c0bare: client0.bare, + s0: serverFirst, + }, serverFirst +} + +// server-first-message +type scramServer0 struct { + a scramAuth + c0bare []byte + s0 []byte +} + +// validates client-final-message and replies with server-final-message +func (s *scramServer0) serverFinal(clientFinal []byte) ([]byte, error) { + m := reClientFinal.FindSubmatch(clientFinal) + if len(m) == 0 { + return nil, errors.New("invalid client-final-message") + } + var ( + finalWithoutProof = m[1] + channel = m[2] + clientProof64 = m[3] + h = sha256.New + ) + if s.a.mechanism == saslScram512 { + h = sha512.New + } + if !bytes.Equal(channel, []byte("biws")) { // "biws" == base64("n,,") + return nil, errors.New("invalid channel binding") + } + clientProof, err := base64.StdEncoding.DecodeString(string(clientProof64)) + if err != nil { + return nil, errors.New("client proof is not std-base64") + } + if len(clientProof) != h().Size() { + return nil, fmt.Errorf("len(client proof) %d != expected %d", len(clientProof), h().Size()) + } + + var clientKey []byte // := HMAC(SaltedPass, "Client Key") + { + mac := hmac.New(h, s.a.saltedPass) + mac.Write([]byte("Client Key")) + clientKey = mac.Sum(nil) + } + + var storedKey []byte // := H(ClientKey) + { + h := h() + h.Write(clientKey) + storedKey = h.Sum(nil) + } + + var authMessage []byte // := client-first-bare-message + "," + server-first-message + "," + client-final-message-without-proof + { + authMessage = append(s.c0bare, ',') + authMessage = append(authMessage, s.s0...) + authMessage = append(authMessage, ',') + authMessage = append(authMessage, finalWithoutProof...) + } + + var clientSignature []byte // := HMAC(StoredKey, AuthMessage) + { + mac := hmac.New(h, storedKey) + mac.Write(authMessage) + clientSignature = mac.Sum(nil) + } + + usedKey := clientProof // := ClientKey XOR ClientSignature + { + for i, b := range clientSignature { + usedKey[i] ^= b + } + h := h() + h.Write(usedKey) + usedKey = h.Sum(nil) + } + if !bytes.Equal(usedKey, storedKey) { + return nil, errors.New("invalid password") + } + + var serverKey []byte // := HMAC(SaltedPass, "Server Key") + { + mac := hmac.New(h, s.a.saltedPass) + mac.Write([]byte("Server Key")) + serverKey = mac.Sum(nil) + } + var serverSignature []byte // := HMAC(ServerKey, AuthMessage) + { + mac := hmac.New(h, serverKey) + mac.Write(authMessage) + serverSignature = mac.Sum(nil) + } + + serverFinal := []byte(fmt.Sprintf("v=%s", base64.StdEncoding.EncodeToString(serverSignature))) + return serverFinal, nil +} + +var reClient0, reClientFinal *regexp.Regexp + +func init() { + // https://datatracker.ietf.org/doc/html/rfc5802#section-7 + const ( + valueSafe = "[\x01-\x2b\x2d-\x3c\x3e-\x7f]+" // all except \0 - , + value = "[\x01-\x2b\x2d-\x7f]+" // all except \0 , + printable = "[\x21-\x2b\x2d-\x7e]+" // all except , (and DEL, unnoted) + saslName = "(?:[\x01-\x2b\x2d-\x3c\x3e-\x7f]|=2C|=3D)+" // valueSafe | others; kafka is lazy here + b64 = `[a-zA-Z0-9/+]+={0,3}` // we are lazy here matching up to 3 = + ext = "(?:,[a-zA-Z]+=[\x01-\x2b\x2d-\x7f]+)*" + ) + + // 0: entire match + // 1: authzid + // 2: client-first-message-bare + // 3: username + // 4: nonce + // 5: ext + client0 := fmt.Sprintf("^n,(?:a=(%s))?,((?:m=%s,)?n=(%s),r=(%s)(%s))$", saslName, value, saslName, printable, ext) + + // We reject extensions in client0. Kafka does not validate the nonce + // and some clients may generate it incorrectly (i.e. old franz-go), so + // we do not validate it. + // + // 0: entire match + // 1: channel-final-message-without-proof + // 2: channel binding + // 3: proof + clientFinal := fmt.Sprintf("^(c=(%s),r=%s),p=(%s)$", b64, printable, b64) + + reClient0 = regexp.MustCompile(client0) + reClientFinal = regexp.MustCompile(clientFinal) +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kfake/topic_partition.go b/vendor/github.com/twmb/franz-go/pkg/kfake/topic_partition.go new file mode 100644 index 00000000000..ac409c53e7d --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kfake/topic_partition.go @@ -0,0 +1,75 @@ +package kfake + +type tps[V any] map[string]map[int32]*V + +func (tps *tps[V]) getp(t string, p int32) (*V, bool) { + if *tps == nil { + return nil, false + } + ps := (*tps)[t] + if ps == nil { + return nil, false + } + v, ok := ps[p] + return v, ok +} + +func (tps *tps[V]) gett(t string) (map[int32]*V, bool) { + if tps == nil { + return nil, false + } + ps, ok := (*tps)[t] + return ps, ok +} + +func (tps *tps[V]) mkt(t string) map[int32]*V { + if *tps == nil { + *tps = make(map[string]map[int32]*V) + } + ps := (*tps)[t] + if ps == nil { + ps = make(map[int32]*V) + (*tps)[t] = ps + } + return ps +} + +func (tps *tps[V]) mkp(t string, p int32, newFn func() *V) *V { + ps := tps.mkt(t) + v, ok := ps[p] + if !ok { + v = newFn() + ps[p] = v + } + return v +} + +func (tps *tps[V]) mkpDefault(t string, p int32) *V { + return tps.mkp(t, p, func() *V { return new(V) }) +} + +func (tps *tps[V]) set(t string, p int32, v V) { + *tps.mkpDefault(t, p) = v +} + +func (tps *tps[V]) each(fn func(t string, p int32, v *V)) { + for t, ps := range *tps { + for p, v := range ps { + fn(t, p, v) + } + } +} + +func (tps *tps[V]) delp(t string, p int32) { + if *tps == nil { + return + } + ps := (*tps)[t] + if ps == nil { + return + } + delete(ps, p) + if len(ps) == 0 { + delete(*tps, t) + } +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/atomic_maybe_work.go b/vendor/github.com/twmb/franz-go/pkg/kgo/atomic_maybe_work.go new file mode 100644 index 00000000000..bfdd3c1deb7 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/atomic_maybe_work.go @@ -0,0 +1,76 @@ +package kgo + +import "sync/atomic" + +const ( + stateUnstarted = iota + stateWorking + stateContinueWorking +) + +type workLoop struct{ state atomicU32 } + +// maybeBegin returns whether a work loop should begin. +func (l *workLoop) maybeBegin() bool { + var state uint32 + var done bool + for !done { + switch state = l.state.Load(); state { + case stateUnstarted: + done = l.state.CompareAndSwap(state, stateWorking) + state = stateWorking + case stateWorking: + done = l.state.CompareAndSwap(state, stateContinueWorking) + state = stateContinueWorking + case stateContinueWorking: + done = true + } + } + + return state == stateWorking +} + +// maybeFinish demotes loop's internal state and returns whether work should +// keep going. This function should be called before looping to continue +// work. +// +// If again is true, this will avoid demoting from working to not +// working. Again would be true if the loop knows it should continue working; +// calling this function is necessary even in this case to update loop's +// internal state. +// +// This function is a no-op if the loop is already finished, but generally, +// since the loop itself calls MaybeFinish after it has been started, this +// should never be called if the loop is unstarted. +func (l *workLoop) maybeFinish(again bool) bool { + switch state := l.state.Load(); state { + // Working: + // If again, we know we should continue; keep our state. + // If not again, we try to downgrade state and stop. + // If we cannot, then something slipped in to say keep going. + case stateWorking: + if !again { + again = !l.state.CompareAndSwap(state, stateUnstarted) + } + // Continue: demote ourself and run again no matter what. + case stateContinueWorking: + l.state.Store(stateWorking) + again = true + } + + return again +} + +func (l *workLoop) hardFinish() { + l.state.Store(stateUnstarted) +} + +// lazyI32 is used in a few places where we want atomics _sometimes_. Some +// uses do not need to be atomic (notably, setup), and we do not want the +// noCopy guard. +// +// Specifically, this is used for a few int32 settings in the config. +type lazyI32 int32 + +func (v *lazyI32) store(s int32) { atomic.StoreInt32((*int32)(v), s) } +func (v *lazyI32) load() int32 { return atomic.LoadInt32((*int32)(v)) } diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/broker.go b/vendor/github.com/twmb/franz-go/pkg/kgo/broker.go new file mode 100644 index 00000000000..c3d5a9a7508 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/broker.go @@ -0,0 +1,1507 @@ +package kgo + +import ( + "context" + "crypto/tls" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "math/rand" + "net" + "os" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/twmb/franz-go/pkg/kbin" + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" + "github.com/twmb/franz-go/pkg/sasl" +) + +type pinReq struct { + kmsg.Request + min int16 + max int16 + pinMin bool + pinMax bool +} + +func (p *pinReq) SetVersion(v int16) { + if p.pinMin && v < p.min { + v = p.min + } + if p.pinMax && v > p.max { + v = p.max + } + p.Request.SetVersion(v) +} + +type promisedReq struct { + ctx context.Context + req kmsg.Request + promise func(kmsg.Response, error) + enqueue time.Time // used to calculate writeWait +} + +type promisedResp struct { + ctx context.Context + + corrID int32 + // With flexible headers, we skip tags at the end of the response + // header for now because they're currently unused. However, the + // ApiVersions response uses v0 response header (no tags) even if the + // response body has flexible versions. This is done in support of the + // v0 fallback logic that allows for indexing into an exact offset. + // Thus, for ApiVersions specifically, this is false even if the + // request is flexible. + // + // As a side note, this note was not mentioned in KIP-482 which + // introduced flexible versions, and was mentioned in passing in + // KIP-511 which made ApiVersion flexible, so discovering what was + // wrong was not too fun ("Note that ApiVersionsResponse is flexible + // version but the response header is not flexible" is *it* in the + // entire KIP.) + // + // To see the version pinning, look at the code generator function + // generateHeaderVersion in + // generator/src/main/java/org/apache/kafka/message/ApiMessageTypeGenerator.java + flexibleHeader bool + + resp kmsg.Response + promise func(kmsg.Response, error) + readTimeout time.Duration + + // The following block is used for the read / e2e hooks. + bytesWritten int + writeWait time.Duration + timeToWrite time.Duration + readEnqueue time.Time +} + +// NodeName returns the name of a node, given the kgo internal node ID. +// +// Internally, seed brokers are stored with very negative node IDs, and these +// node IDs are visible in the BrokerMetadata struct. You can use NodeName to +// convert the negative node ID into "seed_#". Brokers discovered through +// metadata responses have standard non-negative numbers and this function just +// returns the number as a string. +func NodeName(nodeID int32) string { + return logID(nodeID) +} + +func logID(id int32) string { + if id >= -10 { + return strconv.FormatInt(int64(id), 10) + } + return "seed_" + strconv.FormatInt(int64(id)-math.MinInt32, 10) +} + +// BrokerMetadata is metadata for a broker. +// +// This struct mirrors kmsg.MetadataResponseBroker. +type BrokerMetadata struct { + // NodeID is the broker node ID. + // + // Seed brokers will have very negative IDs; kgo does not try to map + // seed brokers to loaded brokers. You can use NodeName to convert + // the seed node ID into a formatted string. + NodeID int32 + + // Port is the port of the broker. + Port int32 + + // Host is the hostname of the broker. + Host string + + // Rack is an optional rack of the broker. It is invalid to modify this + // field. + // + // Seed brokers will not have a rack. + Rack *string + + _ struct{} // allow us to add fields later +} + +func (me BrokerMetadata) equals(other kmsg.MetadataResponseBroker) bool { + return me.NodeID == other.NodeID && + me.Port == other.Port && + me.Host == other.Host && + (me.Rack == nil && other.Rack == nil || + me.Rack != nil && other.Rack != nil && *me.Rack == *other.Rack) +} + +// broker manages the concept how a client would interact with a broker. +type broker struct { + cl *Client + + addr string // net.JoinHostPort(meta.Host, meta.Port) + meta BrokerMetadata + + // versions tracks the first load of an ApiVersions. We store this + // after the first connect, which helps speed things up on future + // reconnects (across any of the three broker connections) because we + // will never look up API versions for this broker again. + versions atomic.Value // *brokerVersions + + // The cxn fields each manage a single tcp connection to one broker. + // Each field is managed serially in handleReqs. This means that only + // one write can happen at a time, regardless of which connection the + // write goes to, but the write is expected to be fast whereas the wait + // for the response is expected to be slow. + // + // Produce requests go to cxnProduce, fetch to cxnFetch, join/sync go + // to cxnGroup, anything with TimeoutMillis goes to cxnSlow, and + // everything else goes to cxnNormal. + cxnNormal *brokerCxn + cxnProduce *brokerCxn + cxnFetch *brokerCxn + cxnGroup *brokerCxn + cxnSlow *brokerCxn + + reapMu sync.Mutex // held when modifying a brokerCxn + + // reqs manages incoming message requests. + reqs ringReq + // dead is an atomic so a backed up reqs cannot block broker stoppage. + dead atomicBool +} + +// brokerVersions is loaded once (and potentially a few times concurrently if +// multiple connections are opening at once) and then forever stored for a +// broker. +type brokerVersions struct { + versions [kmsg.MaxKey + 1]int16 +} + +func newBrokerVersions() *brokerVersions { + var v brokerVersions + for i := range &v.versions { + v.versions[i] = -1 + } + return &v +} + +func (*brokerVersions) len() int { return kmsg.MaxKey + 1 } + +func (b *broker) loadVersions() *brokerVersions { + loaded := b.versions.Load() + if loaded == nil { + return nil + } + return loaded.(*brokerVersions) +} + +func (b *broker) storeVersions(v *brokerVersions) { b.versions.Store(v) } + +const unknownControllerID = -1 + +var unknownBrokerMetadata = BrokerMetadata{ + NodeID: -1, +} + +// broker IDs are all positive, but Kafka uses -1 to signify unknown +// controllers. To avoid issues where a client broker ID map knows of +// a -1 ID controller, we start unknown seeds at MinInt32. +func unknownSeedID(seedNum int) int32 { + return int32(math.MinInt32 + seedNum) +} + +func (cl *Client) newBroker(nodeID int32, host string, port int32, rack *string) *broker { + return &broker{ + cl: cl, + + addr: net.JoinHostPort(host, strconv.Itoa(int(port))), + meta: BrokerMetadata{ + NodeID: nodeID, + Host: host, + Port: port, + Rack: rack, + }, + } +} + +// stopForever permanently disables this broker. +func (b *broker) stopForever() { + if b.dead.Swap(true) { + return + } + + b.reqs.die() // no more pushing + + b.reapMu.Lock() + defer b.reapMu.Unlock() + + b.cxnNormal.die() + b.cxnProduce.die() + b.cxnFetch.die() + b.cxnGroup.die() + b.cxnSlow.die() +} + +// do issues a request to the broker, eventually calling the response +// once a the request either fails or is responded to (with failure or not). +// +// The promise will block broker processing. +func (b *broker) do( + ctx context.Context, + req kmsg.Request, + promise func(kmsg.Response, error), +) { + pr := promisedReq{ctx, req, promise, time.Now()} + + first, dead := b.reqs.push(pr) + + if first { + go b.handleReqs(pr) + } else if dead { + promise(nil, errChosenBrokerDead) + } +} + +// waitResp runs a req, waits for the resp and returns the resp and err. +func (b *broker) waitResp(ctx context.Context, req kmsg.Request) (kmsg.Response, error) { + var resp kmsg.Response + var err error + done := make(chan struct{}) + wait := func(kresp kmsg.Response, kerr error) { + resp, err = kresp, kerr + close(done) + } + b.do(ctx, req, wait) + <-done + return resp, err +} + +func (b *broker) handleReqs(pr promisedReq) { + var more, dead bool +start: + if dead { + pr.promise(nil, errChosenBrokerDead) + } else { + b.handleReq(pr) + } + + pr, more, dead = b.reqs.dropPeek() + if more { + goto start + } +} + +func (b *broker) handleReq(pr promisedReq) { + req := pr.req + var cxn *brokerCxn + var retriedOnNewConnection bool +start: + { + var err error + if cxn, err = b.loadConnection(pr.ctx, req); err != nil { + // It is rare, but it is possible that the broker has + // an immediate issue on a new connection. We retry + // once. + if isRetryableBrokerErr(err) && !retriedOnNewConnection { + retriedOnNewConnection = true + goto start + } + pr.promise(nil, err) + return + } + } + + v := b.loadVersions() + + if int(req.Key()) > v.len() || b.cl.cfg.maxVersions != nil && !b.cl.cfg.maxVersions.HasKey(req.Key()) { + pr.promise(nil, errUnknownRequestKey) + return + } + + // If v.versions[0] is non-negative, then we loaded API + // versions. If the version for this request is negative, we + // know the broker cannot handle this request. + if v.versions[0] >= 0 && v.versions[req.Key()] < 0 { + pr.promise(nil, errBrokerTooOld) + return + } + + ourMax := req.MaxVersion() + if b.cl.cfg.maxVersions != nil { + userMax, _ := b.cl.cfg.maxVersions.LookupMaxKeyVersion(req.Key()) // we validated HasKey above + if userMax < ourMax { + ourMax = userMax + } + } + + // If brokerMax is negative at this point, we have no api + // versions because the client is pinned pre 0.10.0 and we + // stick with our max. + version := ourMax + if brokerMax := v.versions[req.Key()]; brokerMax >= 0 && brokerMax < ourMax { + version = brokerMax + } + + minVersion := int16(-1) + + // If the version now (after potential broker downgrading) is + // lower than we desire, we fail the request for the broker is + // too old. + if b.cl.cfg.minVersions != nil { + minVersion, _ = b.cl.cfg.minVersions.LookupMaxKeyVersion(req.Key()) + if minVersion > -1 && version < minVersion { + pr.promise(nil, errBrokerTooOld) + return + } + } + + req.SetVersion(version) // always go for highest version + setVersion := req.GetVersion() + if minVersion > -1 && setVersion < minVersion { + pr.promise(nil, fmt.Errorf("request key %d version returned %d below the user defined min of %d", req.Key(), setVersion, minVersion)) + return + } + if version < setVersion { + // If we want to set an old version, but the request is pinned + // high, we need to fail with errBrokerTooOld. The broker wants + // an old version, we want a high version. We rely on this + // error in backcompat request sharding. + pr.promise(nil, errBrokerTooOld) + return + } + + if !cxn.expiry.IsZero() && time.Now().After(cxn.expiry) { + // If we are after the reauth time, try to reauth. We + // can only have an expiry if we went the authenticate + // flow, so we know we are authenticating again. + // + // Some implementations (AWS) occasionally fail for + // unclear reasons (principals change, somehow). If + // we receive SASL_AUTHENTICATION_FAILED, we retry + // once on a new connection. See #249. + // + // For KIP-368. + cxn.cl.cfg.logger.Log(LogLevelDebug, "sasl expiry limit reached, reauthenticating", "broker", logID(cxn.b.meta.NodeID)) + if err := cxn.sasl(); err != nil { + cxn.die() + if errors.Is(err, kerr.SaslAuthenticationFailed) && !retriedOnNewConnection { + cxn.cl.cfg.logger.Log(LogLevelDebug, "sasl reauth failed, retrying once on new connection", "broker", logID(cxn.b.meta.NodeID), "err", err) + retriedOnNewConnection = true + goto start + } + pr.promise(nil, err) + return + } + } + + // Juuuust before we issue the request, we check if it was + // canceled. We could have previously tried this request, which + // then failed and retried. + // + // Checking the context was canceled here ensures we do not + // loop. We could be more precise with error tracking, though. + select { + case <-pr.ctx.Done(): + pr.promise(nil, pr.ctx.Err()) + return + default: + } + + // Produce requests (and only produce requests) can be written + // without receiving a reply. If we see required acks is 0, + // then we immediately call the promise with no response. + // + // We provide a non-nil *kmsg.ProduceResponse for + // *kmsg.ProduceRequest just to ensure we do not return with no + // error and no kmsg.Response, per the client contract. + // + // As documented on the client's Request function, if this is a + // *kmsg.ProduceRequest, we rewrite the acks to match the + // client configured acks, and we rewrite the timeout millis if + // acks is 0. We do this to ensure that our discard goroutine + // is used correctly, and so that we do not write a request + // with 0 acks and then send it to handleResps where it will + // not get a response. + var isNoResp bool + var noResp *kmsg.ProduceResponse + switch r := req.(type) { + case *produceRequest: + isNoResp = r.acks == 0 + case *kmsg.ProduceRequest: + r.Acks = b.cl.cfg.acks.val + if r.Acks == 0 { + isNoResp = true + r.TimeoutMillis = int32(b.cl.cfg.produceTimeout.Milliseconds()) + } + noResp = kmsg.NewPtrProduceResponse() + noResp.Version = req.GetVersion() + } + + corrID, bytesWritten, writeWait, timeToWrite, readEnqueue, writeErr := cxn.writeRequest(pr.ctx, pr.enqueue, req) + + if writeErr != nil { + pr.promise(nil, writeErr) + cxn.die() + cxn.hookWriteE2E(req.Key(), bytesWritten, writeWait, timeToWrite, writeErr) + return + } + + if isNoResp { + pr.promise(noResp, nil) + cxn.hookWriteE2E(req.Key(), bytesWritten, writeWait, timeToWrite, writeErr) + return + } + + rt, _ := cxn.cl.connTimeouter.timeouts(req) + + cxn.waitResp(promisedResp{ + pr.ctx, + corrID, + req.IsFlexible() && req.Key() != 18, // response header not flexible if ApiVersions; see promisedResp doc + req.ResponseKind(), + pr.promise, + rt, + bytesWritten, + writeWait, + timeToWrite, + readEnqueue, + }) +} + +func (cxn *brokerCxn) hookWriteE2E(key int16, bytesWritten int, writeWait, timeToWrite time.Duration, writeErr error) { + cxn.cl.cfg.hooks.each(func(h Hook) { + if h, ok := h.(HookBrokerE2E); ok { + h.OnBrokerE2E(cxn.b.meta, key, BrokerE2E{ + BytesWritten: bytesWritten, + WriteWait: writeWait, + TimeToWrite: timeToWrite, + WriteErr: writeErr, + }) + } + }) +} + +// bufPool is used to reuse issued-request buffers across writes to brokers. +type bufPool struct{ p *sync.Pool } + +func newBufPool() bufPool { + return bufPool{ + p: &sync.Pool{New: func() any { r := make([]byte, 1<<10); return &r }}, + } +} + +func (p bufPool) get() []byte { return (*p.p.Get().(*[]byte))[:0] } +func (p bufPool) put(b []byte) { p.p.Put(&b) } + +// loadConection returns the broker's connection, creating it if necessary +// and returning an error of if that fails. +func (b *broker) loadConnection(ctx context.Context, req kmsg.Request) (*brokerCxn, error) { + var ( + pcxn = &b.cxnNormal + isProduceCxn bool // see docs on brokerCxn.discard for why we do this + reqKey = req.Key() + _, isTimeout = req.(kmsg.TimeoutRequest) + ) + switch { + case reqKey == 0: + pcxn = &b.cxnProduce + isProduceCxn = true + case reqKey == 1: + pcxn = &b.cxnFetch + case reqKey == 11 || reqKey == 14: // join || sync + pcxn = &b.cxnGroup + case isTimeout: + pcxn = &b.cxnSlow + } + + if *pcxn != nil && !(*pcxn).dead.Load() { + return *pcxn, nil + } + + conn, err := b.connect(ctx) + if err != nil { + return nil, err + } + + cxn := &brokerCxn{ + cl: b.cl, + b: b, + + addr: b.addr, + conn: conn, + deadCh: make(chan struct{}), + } + if err = cxn.init(isProduceCxn); err != nil { + b.cl.cfg.logger.Log(LogLevelDebug, "connection initialization failed", "addr", b.addr, "broker", logID(b.meta.NodeID), "err", err) + cxn.closeConn() + return nil, err + } + b.cl.cfg.logger.Log(LogLevelDebug, "connection initialized successfully", "addr", b.addr, "broker", logID(b.meta.NodeID)) + + b.reapMu.Lock() + defer b.reapMu.Unlock() + *pcxn = cxn + return cxn, nil +} + +func (cl *Client) reapConnectionsLoop() { + idleTimeout := cl.cfg.connIdleTimeout + if idleTimeout < 0 { // impossible due to cfg.validate, but just in case + return + } + + ticker := time.NewTicker(idleTimeout) + defer ticker.Stop() + last := time.Now() + for { + select { + case <-cl.ctx.Done(): + return + case tick := <-ticker.C: + start := time.Now() + reaped := cl.reapConnections(idleTimeout) + dur := time.Since(start) + if reaped > 0 { + cl.cfg.logger.Log(LogLevelDebug, "reaped connections", "time_since_last_reap", tick.Sub(last), "reap_dur", dur, "num_reaped", reaped) + } + last = tick + } + } +} + +func (cl *Client) reapConnections(idleTimeout time.Duration) (total int) { + cl.brokersMu.Lock() + seeds := cl.loadSeeds() + brokers := make([]*broker, 0, len(cl.brokers)+len(seeds)) + brokers = append(brokers, cl.brokers...) + brokers = append(brokers, seeds...) + cl.brokersMu.Unlock() + + for _, broker := range brokers { + total += broker.reapConnections(idleTimeout) + } + return total +} + +func (b *broker) reapConnections(idleTimeout time.Duration) (total int) { + b.reapMu.Lock() + defer b.reapMu.Unlock() + + for _, cxn := range []*brokerCxn{ + b.cxnNormal, + b.cxnProduce, + b.cxnFetch, + b.cxnGroup, + b.cxnSlow, + } { + if cxn == nil || cxn.dead.Load() { + continue + } + + // If we have not written nor read in a long time, the + // connection can be reaped. If only one is idle, the other may + // be busy (or may not happen): + // + // - produce can write but never read + // - fetch can hang for a while reading (infrequent writes) + + lastWrite := time.Unix(0, cxn.lastWrite.Load()) + lastRead := time.Unix(0, cxn.lastRead.Load()) + + writeIdle := time.Since(lastWrite) > idleTimeout && !cxn.writing.Load() + readIdle := time.Since(lastRead) > idleTimeout && !cxn.reading.Load() + + if writeIdle && readIdle { + cxn.die() + total++ + } + } + return total +} + +// connect connects to the broker's addr, returning the new connection. +func (b *broker) connect(ctx context.Context) (net.Conn, error) { + b.cl.cfg.logger.Log(LogLevelDebug, "opening connection to broker", "addr", b.addr, "broker", logID(b.meta.NodeID)) + start := time.Now() + conn, err := b.cl.cfg.dialFn(ctx, "tcp", b.addr) + since := time.Since(start) + b.cl.cfg.hooks.each(func(h Hook) { + if h, ok := h.(HookBrokerConnect); ok { + h.OnBrokerConnect(b.meta, since, conn, err) + } + }) + if err != nil { + if !errors.Is(err, ErrClientClosed) && !errors.Is(err, context.Canceled) && !strings.Contains(err.Error(), "operation was canceled") { + if errors.Is(err, io.EOF) { + b.cl.cfg.logger.Log(LogLevelWarn, "unable to open connection to broker due to an immediate EOF, which often means the client is using TLS when the broker is not expecting it (is TLS misconfigured?)", "addr", b.addr, "broker", logID(b.meta.NodeID), "err", err) + return nil, &ErrFirstReadEOF{kind: firstReadTLS, err: err} + } + b.cl.cfg.logger.Log(LogLevelWarn, "unable to open connection to broker", "addr", b.addr, "broker", logID(b.meta.NodeID), "err", err) + } + return nil, fmt.Errorf("unable to dial: %w", err) + } + b.cl.cfg.logger.Log(LogLevelDebug, "connection opened to broker", "addr", b.addr, "broker", logID(b.meta.NodeID)) + return conn, nil +} + +// brokerCxn manages an actual connection to a Kafka broker. This is separate +// the broker struct to allow lazy connection (re)creation. +type brokerCxn struct { + throttleUntil atomicI64 // atomic nanosec + + conn net.Conn + + cl *Client + b *broker + + addr string + + mechanism sasl.Mechanism + expiry time.Time + + corrID int32 + + // The following four fields are used for connection reaping. + // Write is only updated in one location; read is updated in three + // due to readConn, readConnAsync, and discard. + lastWrite atomicI64 + lastRead atomicI64 + writing atomicBool + reading atomicBool + + successes uint64 + + // resps manages reading kafka responses. + resps ringResp + // dead is an atomic so that a backed up resps cannot block cxn death. + dead atomicBool + // closed in cloneConn; allows throttle waiting to quit + deadCh chan struct{} +} + +func (cxn *brokerCxn) init(isProduceCxn bool) error { + hasVersions := cxn.b.loadVersions() != nil + if !hasVersions { + if cxn.b.cl.cfg.maxVersions == nil || cxn.b.cl.cfg.maxVersions.HasKey(18) { + if err := cxn.requestAPIVersions(); err != nil { + if !errors.Is(err, ErrClientClosed) && !isRetryableBrokerErr(err) { + cxn.cl.cfg.logger.Log(LogLevelError, "unable to request api versions", "broker", logID(cxn.b.meta.NodeID), "err", err) + } + return err + } + } else { + // We have a max versions, and it indicates no support + // for ApiVersions. We just store a default -1 set. + cxn.b.storeVersions(newBrokerVersions()) + } + } + + if err := cxn.sasl(); err != nil { + if !errors.Is(err, ErrClientClosed) && !isRetryableBrokerErr(err) { + cxn.cl.cfg.logger.Log(LogLevelError, "unable to initialize sasl", "broker", logID(cxn.b.meta.NodeID), "err", err) + } + return err + } + + if isProduceCxn && cxn.cl.cfg.acks.val == 0 { + go cxn.discard() // see docs on discard for why we do this + } + return nil +} + +func (cxn *brokerCxn) requestAPIVersions() error { + maxVersion := int16(3) + + // If the user configured a max versions, we check that the key exists + // before entering this function. Thus, we expect exists to be true, + // but we still doubly check it for sanity (as well as userMax, which + // can only be non-negative based off of LookupMaxKeyVersion's API). + if cxn.cl.cfg.maxVersions != nil { + userMax, exists := cxn.cl.cfg.maxVersions.LookupMaxKeyVersion(18) // 18 == api versions + if exists && userMax >= 0 { + maxVersion = userMax + } + } + +start: + req := kmsg.NewPtrApiVersionsRequest() + req.Version = maxVersion + req.ClientSoftwareName = cxn.cl.cfg.softwareName + req.ClientSoftwareVersion = cxn.cl.cfg.softwareVersion + cxn.cl.cfg.logger.Log(LogLevelDebug, "issuing api versions request", "broker", logID(cxn.b.meta.NodeID), "version", maxVersion) + corrID, bytesWritten, writeWait, timeToWrite, readEnqueue, writeErr := cxn.writeRequest(nil, time.Now(), req) + if writeErr != nil { + cxn.hookWriteE2E(req.Key(), bytesWritten, writeWait, timeToWrite, writeErr) + return writeErr + } + + rt, _ := cxn.cl.connTimeouter.timeouts(req) + // api versions does *not* use flexible response headers; see comment in promisedResp + rawResp, err := cxn.readResponse(nil, req.Key(), req.GetVersion(), corrID, false, rt, bytesWritten, writeWait, timeToWrite, readEnqueue) + if err != nil { + return err + } + if len(rawResp) < 2 { + return fmt.Errorf("invalid length %d short response from ApiVersions request", len(rawResp)) + } + + resp := req.ResponseKind().(*kmsg.ApiVersionsResponse) + + // If we used a version larger than Kafka supports, Kafka replies with + // Version 0 and an UNSUPPORTED_VERSION error. + // + // Pre Kafka 2.4, we have to retry the request with version 0. + // Post, Kafka replies with all versions. + if rawResp[1] == 35 { + if maxVersion == 0 { + return errors.New("broker replied with UNSUPPORTED_VERSION to an ApiVersions request of version 0") + } + srawResp := string(rawResp) + if srawResp == "\x00\x23\x00\x00\x00\x00" || + // EventHubs erroneously replies with v1, so we check + // for that as well. + srawResp == "\x00\x23\x00\x00\x00\x00\x00\x00\x00\x00" { + cxn.cl.cfg.logger.Log(LogLevelDebug, "broker does not know our ApiVersions version, downgrading to version 0 and retrying", "broker", logID(cxn.b.meta.NodeID)) + maxVersion = 0 + goto start + } + resp.Version = 0 + } + + if err = resp.ReadFrom(rawResp); err != nil { + return fmt.Errorf("unable to read ApiVersions response: %w", err) + } + if len(resp.ApiKeys) == 0 { + return errors.New("ApiVersions response invalidly contained no ApiKeys") + } + + v := newBrokerVersions() + for _, key := range resp.ApiKeys { + if key.ApiKey > kmsg.MaxKey || key.ApiKey < 0 { + continue + } + v.versions[key.ApiKey] = key.MaxVersion + } + cxn.b.storeVersions(v) + return nil +} + +func (cxn *brokerCxn) sasl() error { + if len(cxn.cl.cfg.sasls) == 0 { + return nil + } + mechanism := cxn.cl.cfg.sasls[0] + retried := false + authenticate := false + + v := cxn.b.loadVersions() + req := kmsg.NewPtrSASLHandshakeRequest() + +start: + if mechanism.Name() != "GSSAPI" && v.versions[req.Key()] >= 0 { + req.Mechanism = mechanism.Name() + req.Version = v.versions[req.Key()] + cxn.cl.cfg.logger.Log(LogLevelDebug, "issuing SASLHandshakeRequest", "broker", logID(cxn.b.meta.NodeID)) + corrID, bytesWritten, writeWait, timeToWrite, readEnqueue, writeErr := cxn.writeRequest(nil, time.Now(), req) + if writeErr != nil { + cxn.hookWriteE2E(req.Key(), bytesWritten, writeWait, timeToWrite, writeErr) + return writeErr + } + + rt, _ := cxn.cl.connTimeouter.timeouts(req) + rawResp, err := cxn.readResponse(nil, req.Key(), req.GetVersion(), corrID, req.IsFlexible(), rt, bytesWritten, writeWait, timeToWrite, readEnqueue) + if err != nil { + return err + } + resp := req.ResponseKind().(*kmsg.SASLHandshakeResponse) + if err = resp.ReadFrom(rawResp); err != nil { + return err + } + + err = kerr.ErrorForCode(resp.ErrorCode) + if err != nil { + if !retried && err == kerr.UnsupportedSaslMechanism { + for _, ours := range cxn.cl.cfg.sasls[1:] { + for _, supported := range resp.SupportedMechanisms { + if supported == ours.Name() { + mechanism = ours + retried = true + goto start + } + } + } + } + return err + } + authenticate = req.Version == 1 + } + cxn.cl.cfg.logger.Log(LogLevelDebug, "beginning sasl authentication", "broker", logID(cxn.b.meta.NodeID), "addr", cxn.addr, "mechanism", mechanism.Name(), "authenticate", authenticate) + cxn.mechanism = mechanism + return cxn.doSasl(authenticate) +} + +func (cxn *brokerCxn) doSasl(authenticate bool) error { + session, clientWrite, err := cxn.mechanism.Authenticate(cxn.cl.ctx, cxn.addr) + if err != nil { + return err + } + if len(clientWrite) == 0 { + return fmt.Errorf("unexpected server-write sasl with mechanism %s", cxn.mechanism.Name()) + } + + prereq := time.Now() // used below for sasl lifetime calculation + var lifetimeMillis int64 + + // Even if we do not wrap our reads/writes in SASLAuthenticate, we + // still use the SASLAuthenticate timeouts. + rt, wt := cxn.cl.connTimeouter.timeouts(kmsg.NewPtrSASLAuthenticateRequest()) + + // We continue writing until both the challenging is done AND the + // responses are done. We can have an additional response once we + // are done with challenges. + step := -1 + for done := false; !done || len(clientWrite) > 0; { + step++ + var challenge []byte + + if !authenticate { + buf := cxn.cl.bufPool.get() + + buf = append(buf[:0], 0, 0, 0, 0) + binary.BigEndian.PutUint32(buf, uint32(len(clientWrite))) + buf = append(buf, clientWrite...) + + cxn.cl.cfg.logger.Log(LogLevelDebug, "issuing raw sasl authenticate", "broker", logID(cxn.b.meta.NodeID), "addr", cxn.addr, "step", step) + _, _, _, _, err = cxn.writeConn(context.Background(), buf, wt, time.Now()) + + cxn.cl.bufPool.put(buf) + + if err != nil { + return err + } + if !done { + if _, challenge, _, _, err = cxn.readConn(context.Background(), rt, time.Now()); err != nil { + return err + } + } + } else { + req := kmsg.NewPtrSASLAuthenticateRequest() + req.SASLAuthBytes = clientWrite + req.Version = cxn.b.loadVersions().versions[req.Key()] + cxn.cl.cfg.logger.Log(LogLevelDebug, "issuing SASLAuthenticate", "broker", logID(cxn.b.meta.NodeID), "version", req.Version, "step", step) + + // Lifetime: we take the timestamp before we write our + // request; see usage below for why. + prereq = time.Now() + corrID, bytesWritten, writeWait, timeToWrite, readEnqueue, writeErr := cxn.writeRequest(nil, time.Now(), req) + + // As mentioned above, we could have one final write + // without reading a response back (kerberos). If this + // is the case, we need to e2e. + if writeErr != nil || done { + cxn.hookWriteE2E(req.Key(), bytesWritten, writeWait, timeToWrite, writeErr) + if writeErr != nil { + return writeErr + } + } + if !done { + rawResp, err := cxn.readResponse(nil, req.Key(), req.GetVersion(), corrID, req.IsFlexible(), rt, bytesWritten, writeWait, timeToWrite, readEnqueue) + if err != nil { + return err + } + resp := req.ResponseKind().(*kmsg.SASLAuthenticateResponse) + if err = resp.ReadFrom(rawResp); err != nil { + return err + } + + if err = kerr.ErrorForCode(resp.ErrorCode); err != nil { + if resp.ErrorMessage != nil { + return fmt.Errorf("%s: %w", *resp.ErrorMessage, err) + } + return err + } + challenge = resp.SASLAuthBytes + lifetimeMillis = resp.SessionLifetimeMillis + } + } + + clientWrite = nil + + if !done { + if done, clientWrite, err = session.Challenge(challenge); err != nil { + return err + } + } + } + + if lifetimeMillis > 0 { + // Lifetime is problematic. We need to be a bit pessimistic. + // + // We want a lowerbound: we use 1s (arbitrary), but if 1.1x our + // e2e sasl latency is more than 1s, we use the latency. + // + // We do not want to reauthenticate too close to the lifetime + // especially for larger lifetimes due to clock issues (#205). + // We take 95% to 98% of the lifetime. + minPessimismMillis := float64(time.Second.Milliseconds()) + latencyMillis := 1.1 * float64(time.Since(prereq).Milliseconds()) + if latencyMillis > minPessimismMillis { + minPessimismMillis = latencyMillis + } + var random float64 + cxn.b.cl.rng(func(r *rand.Rand) { random = r.Float64() }) + maxPessimismMillis := float64(lifetimeMillis) * (0.05 - 0.03*random) // 95 to 98% of lifetime (pessimism 2% to 5%) + + // Our minimum lifetime is always 1s (or latency, if larger). + // When our max pessimism becomes more than min pessimism, + // every second after, we add between 0.05s or 0.08s to our + // backoff. At 12hr, we reauth ~24 to 28min before the + // lifetime. + usePessimismMillis := maxPessimismMillis + if minPessimismMillis > maxPessimismMillis { + usePessimismMillis = minPessimismMillis + } + useLifetimeMillis := lifetimeMillis - int64(usePessimismMillis) + + // Subtracting our min pessimism may result in our connection + // immediately expiring. We always accept this one reauth to + // issue our one request, and our next request will again + // reauth. Brokers should give us longer lifetimes, but that + // may not always happen (see #136, #249). + now := time.Now() + cxn.expiry = now.Add(time.Duration(useLifetimeMillis) * time.Millisecond) + cxn.cl.cfg.logger.Log(LogLevelDebug, "sasl has a limited lifetime", + "broker", logID(cxn.b.meta.NodeID), + "session_lifetime", time.Duration(lifetimeMillis)*time.Millisecond, + "lifetime_pessimism", time.Duration(usePessimismMillis)*time.Millisecond, + "reauthenticate_in", cxn.expiry.Sub(now), + ) + } + return nil +} + +// Some internal requests use the client context to issue requests, so if the +// client is closed, this select case can be selected. We want to return the +// proper error. +// +// This function is used in this file anywhere the client context can cause +// ErrClientClosed. +func maybeUpdateCtxErr(clientCtx, reqCtx context.Context, err *error) { + if clientCtx == reqCtx { + *err = ErrClientClosed + } +} + +// writeRequest writes a message request to the broker connection, bumping the +// connection's correlation ID as appropriate for the next write. +func (cxn *brokerCxn) writeRequest(ctx context.Context, enqueuedForWritingAt time.Time, req kmsg.Request) (corrID int32, bytesWritten int, writeWait, timeToWrite time.Duration, readEnqueue time.Time, writeErr error) { + // A nil ctx means we cannot be throttled. + if ctx != nil { + throttleUntil := time.Unix(0, cxn.throttleUntil.Load()) + if sleep := time.Until(throttleUntil); sleep > 0 { + after := time.NewTimer(sleep) + select { + case <-after.C: + case <-ctx.Done(): + writeErr = ctx.Err() + maybeUpdateCtxErr(cxn.cl.ctx, ctx, &writeErr) + case <-cxn.cl.ctx.Done(): + writeErr = ErrClientClosed + case <-cxn.deadCh: + writeErr = errChosenBrokerDead + } + if writeErr != nil { + after.Stop() + writeWait = time.Since(enqueuedForWritingAt) + return + } + } + } + + buf := cxn.cl.reqFormatter.AppendRequest( + cxn.cl.bufPool.get()[:0], + req, + cxn.corrID, + ) + + _, wt := cxn.cl.connTimeouter.timeouts(req) + bytesWritten, writeWait, timeToWrite, readEnqueue, writeErr = cxn.writeConn(ctx, buf, wt, enqueuedForWritingAt) + + cxn.cl.bufPool.put(buf) + + cxn.cl.cfg.hooks.each(func(h Hook) { + if h, ok := h.(HookBrokerWrite); ok { + h.OnBrokerWrite(cxn.b.meta, req.Key(), bytesWritten, writeWait, timeToWrite, writeErr) + } + }) + if logger := cxn.cl.cfg.logger; logger.Level() >= LogLevelDebug { + logger.Log(LogLevelDebug, fmt.Sprintf("wrote %s v%d", kmsg.NameForKey(req.Key()), req.GetVersion()), "broker", logID(cxn.b.meta.NodeID), "bytes_written", bytesWritten, "write_wait", writeWait, "time_to_write", timeToWrite, "err", writeErr) + } + + if writeErr != nil { + return + } + corrID = cxn.corrID + cxn.corrID++ + if cxn.corrID < 0 { + cxn.corrID = 0 + } + return +} + +func (cxn *brokerCxn) writeConn( + ctx context.Context, + buf []byte, + timeout time.Duration, + enqueuedForWritingAt time.Time, +) (bytesWritten int, writeWait, timeToWrite time.Duration, readEnqueue time.Time, writeErr error) { + cxn.writing.Store(true) + defer func() { + cxn.lastWrite.Store(time.Now().UnixNano()) + cxn.writing.Store(false) + }() + + if ctx == nil { + ctx = context.Background() + } + if timeout > 0 { + cxn.conn.SetWriteDeadline(time.Now().Add(timeout)) + } + defer cxn.conn.SetWriteDeadline(time.Time{}) + writeDone := make(chan struct{}) + go func() { + defer close(writeDone) + writeStart := time.Now() + bytesWritten, writeErr = cxn.conn.Write(buf) + // As soon as we are done writing, we track that we have now + // enqueued this request for reading. + readEnqueue = time.Now() + writeWait = writeStart.Sub(enqueuedForWritingAt) + timeToWrite = readEnqueue.Sub(writeStart) + }() + select { + case <-writeDone: + case <-cxn.cl.ctx.Done(): + cxn.conn.SetWriteDeadline(time.Now()) + <-writeDone + if writeErr != nil { + writeErr = ErrClientClosed + } + case <-ctx.Done(): + cxn.conn.SetWriteDeadline(time.Now()) + <-writeDone + if writeErr != nil && ctx.Err() != nil { + writeErr = ctx.Err() + maybeUpdateCtxErr(cxn.cl.ctx, ctx, &writeErr) + } + } + return +} + +func (cxn *brokerCxn) readConn( + ctx context.Context, + timeout time.Duration, + enqueuedForReadingAt time.Time, +) (nread int, buf []byte, readWait, timeToRead time.Duration, err error) { + cxn.reading.Store(true) + defer func() { + cxn.lastRead.Store(time.Now().UnixNano()) + cxn.reading.Store(false) + }() + + if ctx == nil { + ctx = context.Background() + } + if timeout > 0 { + cxn.conn.SetReadDeadline(time.Now().Add(timeout)) + } + defer cxn.conn.SetReadDeadline(time.Time{}) + readDone := make(chan struct{}) + go func() { + defer close(readDone) + sizeBuf := make([]byte, 4) + readStart := time.Now() + defer func() { + timeToRead = time.Since(readStart) + readWait = readStart.Sub(enqueuedForReadingAt) + }() + if nread, err = io.ReadFull(cxn.conn, sizeBuf); err != nil { + return + } + var size int32 + if size, err = cxn.parseReadSize(sizeBuf); err != nil { + return + } + buf = make([]byte, size) + var nread2 int + nread2, err = io.ReadFull(cxn.conn, buf) + nread += nread2 + buf = buf[:nread2] + if err != nil { + return + } + }() + select { + case <-readDone: + case <-cxn.cl.ctx.Done(): + cxn.conn.SetReadDeadline(time.Now()) + <-readDone + if err != nil { + err = ErrClientClosed + } + case <-ctx.Done(): + cxn.conn.SetReadDeadline(time.Now()) + <-readDone + if err != nil && ctx.Err() != nil { + err = ctx.Err() + maybeUpdateCtxErr(cxn.cl.ctx, ctx, &err) + } + } + return +} + +// Parses a length 4 slice and enforces the min / max read size based off the +// client configuration. +func (cxn *brokerCxn) parseReadSize(sizeBuf []byte) (int32, error) { + size := int32(binary.BigEndian.Uint32(sizeBuf)) + if size < 0 { + return 0, fmt.Errorf("invalid negative response size %d", size) + } + if maxSize := cxn.b.cl.cfg.maxBrokerReadBytes; size > maxSize { + if size == 0x48545450 { // "HTTP" + return 0, fmt.Errorf("invalid large response size %d > limit %d; the four size bytes are 'HTTP' in ascii, the beginning of an HTTP response; is your broker port correct?", size, maxSize) + } + // A TLS alert is 21, and a TLS alert has the version + // following, where all major versions are 03xx. We + // look for an alert and major version byte to suspect + // if this we received a TLS alert. + tlsVersion := uint16(sizeBuf[1])<<8 | uint16(sizeBuf[2]) + if sizeBuf[0] == 21 && tlsVersion&0x0300 != 0 { + versionGuess := fmt.Sprintf("unknown TLS version (hex %x)", tlsVersion) + for _, guess := range []struct { + num uint16 + text string + }{ + {tls.VersionSSL30, "SSL v3"}, + {tls.VersionTLS10, "TLS v1.0"}, + {tls.VersionTLS11, "TLS v1.1"}, + {tls.VersionTLS12, "TLS v1.2"}, + {tls.VersionTLS13, "TLS v1.3"}, + } { + if tlsVersion == guess.num { + versionGuess = guess.text + } + } + return 0, fmt.Errorf("invalid large response size %d > limit %d; the first three bytes received appear to be a tls alert record for %s; is this a plaintext connection speaking to a tls endpoint?", size, maxSize, versionGuess) + } + return 0, fmt.Errorf("invalid large response size %d > limit %d", size, maxSize) + } + return size, nil +} + +// readResponse reads a response from conn, ensures the correlation ID is +// correct, and returns a newly allocated slice on success. +// +// This takes a bunch of extra arguments in support of HookBrokerE2E, overall +// this function takes 11 bytes in arguments. +func (cxn *brokerCxn) readResponse( + ctx context.Context, + key int16, + version int16, + corrID int32, + flexibleHeader bool, + timeout time.Duration, + bytesWritten int, + writeWait time.Duration, + timeToWrite time.Duration, + readEnqueue time.Time, +) ([]byte, error) { + bytesRead, buf, readWait, timeToRead, readErr := cxn.readConn(ctx, timeout, readEnqueue) + + cxn.cl.cfg.hooks.each(func(h Hook) { + if h, ok := h.(HookBrokerRead); ok { + h.OnBrokerRead(cxn.b.meta, key, bytesRead, readWait, timeToRead, readErr) + } + if h, ok := h.(HookBrokerE2E); ok { + h.OnBrokerE2E(cxn.b.meta, key, BrokerE2E{ + BytesWritten: bytesWritten, + BytesRead: bytesRead, + WriteWait: writeWait, + TimeToWrite: timeToWrite, + ReadWait: readWait, + TimeToRead: timeToRead, + ReadErr: readErr, + }) + } + }) + if logger := cxn.cl.cfg.logger; logger.Level() >= LogLevelDebug { + logger.Log(LogLevelDebug, fmt.Sprintf("read %s v%d", kmsg.NameForKey(key), version), "broker", logID(cxn.b.meta.NodeID), "bytes_read", bytesRead, "read_wait", readWait, "time_to_read", timeToRead, "err", readErr) + } + + if readErr != nil { + return nil, readErr + } + if len(buf) < 4 { + return nil, kbin.ErrNotEnoughData + } + gotID := int32(binary.BigEndian.Uint32(buf)) + if gotID != corrID { + return nil, errCorrelationIDMismatch + } + // If the response header is flexible, we skip the tags at the end of + // it. They are currently unused. + if flexibleHeader { + b := kbin.Reader{Src: buf[4:]} + kmsg.SkipTags(&b) + return b.Src, b.Complete() + } + return buf[4:], nil +} + +// closeConn is the one place we close broker connections. This is always done +// in either die, which is called when handleResps returns, or if init fails, +// which means we did not succeed enough to start handleResps. +func (cxn *brokerCxn) closeConn() { + cxn.cl.cfg.hooks.each(func(h Hook) { + if h, ok := h.(HookBrokerDisconnect); ok { + h.OnBrokerDisconnect(cxn.b.meta, cxn.conn) + } + }) + cxn.conn.Close() + close(cxn.deadCh) +} + +// die kills a broker connection (which could be dead already) and replies to +// all requests awaiting responses appropriately. +func (cxn *brokerCxn) die() { + if cxn == nil || cxn.dead.Swap(true) { + return + } + cxn.closeConn() + cxn.resps.die() +} + +// waitResp, called serially by a broker's handleReqs, manages handling a +// message requests's response. +func (cxn *brokerCxn) waitResp(pr promisedResp) { + first, dead := cxn.resps.push(pr) + if first { + go cxn.handleResps(pr) + } else if dead { + pr.promise(nil, errChosenBrokerDead) + cxn.hookWriteE2E(pr.resp.Key(), pr.bytesWritten, pr.writeWait, pr.timeToWrite, errChosenBrokerDead) + } +} + +// If acks are zero, then a real Kafka installation never replies to produce +// requests. Unfortunately, Microsoft EventHubs rolled their own implementation +// and _does_ reply to ack-0 produce requests. We need to process these +// responses, because otherwise kernel buffers will fill up, Microsoft will be +// unable to reply, and then they will stop taking our produce requests. +// +// Thus, we just simply discard everything. +// +// Since we still want to support hooks, we still read the size of a response +// and then read that entire size before calling a hook. There are a few +// differences: +// +// (1) we do not know what version we produced, so we cannot validate the read, +// we just have to trust that the size is valid (and the data follows +// correctly). +// +// (2) rather than creating a slice for the response, we discard the entire +// response into a reusable small slice. The small size is because produce +// responses are relatively small to begin with, so we expect only a few reads +// per response. +// +// (3) we have no time for when the read was enqueued, so we miss that in the +// hook. +// +// (4) we start the time-to-read duration *after* the size bytes are read, +// since we have no idea when a read actually should start, since we should not +// receive responses to begin with. +// +// (5) we set a read deadline *after* the size bytes are read, and only if the +// client has not yet closed. +func (cxn *brokerCxn) discard() { + var firstTimeout bool + defer func() { + if !firstTimeout { // see below + cxn.die() + } else { + cxn.b.cl.cfg.logger.Log(LogLevelDebug, "produce acks==0 discard goroutine exiting; this broker looks to correctly not reply to ack==0 produce requests", "addr", cxn.b.addr, "broker", logID(cxn.b.meta.NodeID)) + } + }() + + discardBuf := make([]byte, 256) + for i := 0; ; i++ { + var ( + nread int + err error + timeToRead time.Duration + + deadlineMu sync.Mutex + deadlineSet bool + + readDone = make(chan struct{}) + ) + + // On all but the first request, we use no deadline. We could + // be hanging reading while we wait for more produce requests. + // We know we are talking to azure when i > 0 and we should not + // quit this goroutine. + // + // However, on the *first* produce request, we know that we are + // writing *right now*. We can deadline our read side with + // ample overhead, and if this first read hits the deadline, + // then we can quit this discard / read goroutine with no + // problems. + // + // We choose 3x our timeouts: + // - first we cover the write, connTimeoutOverhead + produceTimeout + // - then we cover the read, connTimeoutOverhead + // - then we throw in another connTimeoutOverhead just to be sure + // + deadline := time.Time{} + if i == 0 { + deadline = time.Now().Add(3*cxn.cl.cfg.requestTimeoutOverhead + cxn.cl.cfg.produceTimeout) + } + cxn.conn.SetReadDeadline(deadline) + + go func() { + defer close(readDone) + if nread, err = io.ReadFull(cxn.conn, discardBuf[:4]); err != nil { + if i == 0 && errors.Is(err, os.ErrDeadlineExceeded) { + firstTimeout = true + } + return + } + deadlineMu.Lock() + if !deadlineSet { + cxn.conn.SetReadDeadline(time.Now().Add(cxn.cl.cfg.produceTimeout)) + } + deadlineMu.Unlock() + + cxn.reading.Store(true) + defer func() { + cxn.lastRead.Store(time.Now().UnixNano()) + cxn.reading.Store(false) + }() + + readStart := time.Now() + defer func() { timeToRead = time.Since(readStart) }() + var size int32 + if size, err = cxn.parseReadSize(discardBuf[:4]); err != nil { + return + } + + var nread2 int + for size > 0 && err == nil { + discard := discardBuf + if int(size) < len(discard) { + discard = discard[:size] + } + nread2, err = cxn.conn.Read(discard) + nread += nread2 + size -= int32(nread2) // nread2 max is 128 + } + }() + + select { + case <-readDone: + case <-cxn.cl.ctx.Done(): + deadlineMu.Lock() + deadlineSet = true + deadlineMu.Unlock() + cxn.conn.SetReadDeadline(time.Now()) + <-readDone + return + } + + cxn.cl.cfg.hooks.each(func(h Hook) { + if h, ok := h.(HookBrokerRead); ok { + h.OnBrokerRead(cxn.b.meta, 0, nread, 0, timeToRead, err) + } + }) + if err != nil { + return + } + } +} + +// handleResps serially handles all broker responses for an single connection. +func (cxn *brokerCxn) handleResps(pr promisedResp) { + var more, dead bool +start: + if dead { + pr.promise(nil, errChosenBrokerDead) + cxn.hookWriteE2E(pr.resp.Key(), pr.bytesWritten, pr.writeWait, pr.timeToWrite, errChosenBrokerDead) + } else { + cxn.handleResp(pr) + } + + pr, more, dead = cxn.resps.dropPeek() + if more { + goto start + } +} + +func (cxn *brokerCxn) handleResp(pr promisedResp) { + rawResp, err := cxn.readResponse( + pr.ctx, + pr.resp.Key(), + pr.resp.GetVersion(), + pr.corrID, + pr.flexibleHeader, + pr.readTimeout, + pr.bytesWritten, + pr.writeWait, + pr.timeToWrite, + pr.readEnqueue, + ) + if err != nil { + if !errors.Is(err, ErrClientClosed) && !errors.Is(err, context.Canceled) { + if cxn.successes > 0 || len(cxn.b.cl.cfg.sasls) > 0 { + cxn.b.cl.cfg.logger.Log(LogLevelDebug, "read from broker errored, killing connection", "req", kmsg.Key(pr.resp.Key()).Name(), "addr", cxn.b.addr, "broker", logID(cxn.b.meta.NodeID), "successful_reads", cxn.successes, "err", err) + } else { + cxn.b.cl.cfg.logger.Log(LogLevelWarn, "read from broker errored, killing connection after 0 successful responses (is SASL missing?)", "req", kmsg.Key(pr.resp.Key()).Name(), "addr", cxn.b.addr, "broker", logID(cxn.b.meta.NodeID), "err", err) + if err == io.EOF { // specifically avoid checking errors.Is to ensure this is not already wrapped + err = &ErrFirstReadEOF{kind: firstReadSASL, err: err} + } + } + } + pr.promise(nil, err) + cxn.die() + return + } + + cxn.successes++ + readErr := pr.resp.ReadFrom(rawResp) + + // If we had no error, we read the response successfully. + // + // Any response that can cause throttling satisfies the + // kmsg.ThrottleResponse interface. We check that here. + if readErr == nil { + if throttleResponse, ok := pr.resp.(kmsg.ThrottleResponse); ok { + millis, throttlesAfterResp := throttleResponse.Throttle() + if millis > 0 { + cxn.b.cl.cfg.logger.Log(LogLevelInfo, "broker is throttling us in response", "broker", logID(cxn.b.meta.NodeID), "req", kmsg.Key(pr.resp.Key()).Name(), "throttle_millis", millis, "throttles_after_resp", throttlesAfterResp) + if throttlesAfterResp { + throttleUntil := time.Now().Add(time.Millisecond * time.Duration(millis)).UnixNano() + if throttleUntil > cxn.throttleUntil.Load() { + cxn.throttleUntil.Store(throttleUntil) + } + } + cxn.cl.cfg.hooks.each(func(h Hook) { + if h, ok := h.(HookBrokerThrottle); ok { + h.OnBrokerThrottle(cxn.b.meta, time.Duration(millis)*time.Millisecond, throttlesAfterResp) + } + }) + } + } + } + + pr.promise(pr.resp, readErr) +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/client.go b/vendor/github.com/twmb/franz-go/pkg/kgo/client.go new file mode 100644 index 00000000000..775a22e6ee2 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/client.go @@ -0,0 +1,4553 @@ +// Package kgo provides a pure Go efficient Kafka client for Kafka 0.8+ with +// support for transactions, regex topic consuming, the latest partition +// strategies, and more. This client supports all client related KIPs. +// +// This client aims to be simple to use while still interacting with Kafka in a +// near ideal way. For more overview of the entire client itself, please see +// the README on the project's Github page. +package kgo + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "hash/crc32" + "math/rand" + "net" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" + "github.com/twmb/franz-go/pkg/sasl" +) + +var crc32c = crc32.MakeTable(crc32.Castagnoli) // record crc's use Castagnoli table; for consuming/producing + +// Client issues requests and handles responses to a Kafka cluster. +type Client struct { + cfg cfg + opts []Opt + + ctx context.Context + ctxCancel func() + + rng func(func(*rand.Rand)) + + brokersMu sync.RWMutex + brokers []*broker // ordered by broker ID + seeds atomic.Value // []*broker, seed brokers, also ordered by ID + anyBrokerOrd []int32 // shuffled brokers, for random ordering + anySeedIdx int32 + stopBrokers bool // set to true on close to stop updateBrokers + + // A sink and a source is created once per node ID and persists + // forever. We expect the list to be small. + // + // The mutex only exists to allow consumer session stopping to read + // sources to notify when starting a session; all writes happen in the + // metadata loop. + sinksAndSourcesMu sync.Mutex + sinksAndSources map[int32]sinkAndSource + + reqFormatter *kmsg.RequestFormatter + connTimeouter connTimeouter + + bufPool bufPool // for to brokers to share underlying reusable request buffers + prsPool prsPool // for sinks to reuse []promisedNumberedRecord + + controllerIDMu sync.Mutex + controllerID int32 + + // The following two ensure that we only have one fetchBrokerMetadata + // at once. This avoids unnecessary broker metadata requests and + // metadata trampling. + fetchingBrokersMu sync.Mutex + fetchingBrokers *struct { + done chan struct{} + err error + } + + producer producer + consumer consumer + + compressor *compressor + decompressor *decompressor + + coordinatorsMu sync.Mutex + coordinators map[coordinatorKey]*coordinatorLoad + + updateMetadataCh chan string + updateMetadataNowCh chan string // like above, but with high priority + blockingMetadataFnCh chan func() + metawait metawait + metadone chan struct{} + + mappedMetaMu sync.Mutex + mappedMeta map[string]mappedMetadataTopic +} + +func (cl *Client) idempotent() bool { return !cl.cfg.disableIdempotency } + +type sinkAndSource struct { + sink *sink + source *source +} + +func (cl *Client) allSinksAndSources(fn func(sns sinkAndSource)) { + cl.sinksAndSourcesMu.Lock() + defer cl.sinksAndSourcesMu.Unlock() + + for _, sns := range cl.sinksAndSources { + fn(sns) + } +} + +type hostport struct { + host string + port int32 +} + +// ValidateOpts returns an error if the options are invalid. +func ValidateOpts(opts ...Opt) error { + _, _, _, err := validateCfg(opts...) + return err +} + +func parseSeeds(addrs []string) ([]hostport, error) { + seeds := make([]hostport, 0, len(addrs)) + for _, seedBroker := range addrs { + hp, err := parseBrokerAddr(seedBroker) + if err != nil { + return nil, err + } + seeds = append(seeds, hp) + } + return seeds, nil +} + +// This function validates the configuration and returns a few things that we +// initialize while validating. The difference between this and NewClient +// initialization is all NewClient initialization is infallible. +func validateCfg(opts ...Opt) (cfg, []hostport, *compressor, error) { + cfg := defaultCfg() + for _, opt := range opts { + opt.apply(&cfg) + } + if err := cfg.validate(); err != nil { + return cfg, nil, nil, err + } + seeds, err := parseSeeds(cfg.seedBrokers) + if err != nil { + return cfg, nil, nil, err + } + compressor, err := newCompressor(cfg.compression...) + if err != nil { + return cfg, nil, nil, err + } + return cfg, seeds, compressor, nil +} + +func namefn(fn any) string { + v := reflect.ValueOf(fn) + if v.Type().Kind() != reflect.Func { + return "" + } + name := runtime.FuncForPC(v.Pointer()).Name() + dot := strings.LastIndexByte(name, '.') + if dot >= 0 { + return name[dot+1:] + } + return name +} + +// OptValue returns the value for the given configuration option. If the +// given option does not exist, this returns nil. This function takes either a +// raw Opt, or an Opt function name. +// +// If a configuration option has multiple inputs, this function returns only +// the first input. If the function is a boolean function (such as +// BlockRebalanceOnPoll), this function returns the value of the internal bool. +// Variadic option inputs are returned as a single slice. Options that are +// internally stored as a pointer (ClientID, TransactionalID, and InstanceID) +// are returned as their string input; you can see if the option is internally +// nil by looking at the second value returned from OptValues. +// +// var ( +// cl, _ := NewClient( +// InstanceID("foo"), +// ConsumeTopics("foo", "bar"), +// ) +// iid = cl.OptValue(InstanceID) // iid is "foo" +// gid = cl.OptValue(ConsumerGroup) // gid is "" since groups are not used +// topics = cl.OptValue("ConsumeTopics") // topics is []string{"foo", "bar"}; string lookup for the option works +// bpoll = cl.OptValue(BlockRebalanceOnPoll) // bpoll is false +// t = cl.OptValue(SessionTimeout) // t is 45s, the internal default +// td = t.(time.Duration) // safe conversion since SessionTimeout's input is a time.Duration +// unk = cl.OptValue("Unknown"), // unk is nil +// ) +func (cl *Client) OptValue(opt any) any { + vs := cl.OptValues(opt) + if len(vs) > 0 { + return vs[0] + } + return nil +} + +// OptValues returns all values for options. This method is useful for +// options that have multiple inputs (notably, SoftwareNameAndVersion). This is +// also useful for options that are internally stored as a pointer (ClientID, +// TransactionalID, and InstanceID) -- this function will return the string +// value of the option but also whether the option is non-nil. Boolean options +// are returned as a single-element slice with the bool value. Variadic inputs +// are returned as a signle slice. If the input option does not exist, this +// returns nil. +// +// var ( +// cl, _ = NewClient( +// InstanceID("foo"), +// ConsumeTopics("foo", "bar"), +// ) +// idValues = cl.OptValues(InstanceID) // idValues is []any{"foo", true} +// tValues = cl.OptValues(SessionTimeout) // tValues is []any{45 * time.Second} +// topics = cl.OptValues(ConsumeTopics) // topics is []any{[]string{"foo", "bar"} +// bpoll = cl.OptValues(BlockRebalanceOnPoll) // bpoll is []any{false} +// unknown = cl.OptValues("Unknown") // unknown is nil +// ) +func (cl *Client) OptValues(opt any) []any { + name := namefn(opt) + if s, ok := opt.(string); ok { + name = s + } + cfg := &cl.cfg + + switch name { + case namefn(ClientID): + if cfg.id != nil { + return []any{*cfg.id, true} + } + return []any{"", false} + case namefn(SoftwareNameAndVersion): + return []any{cfg.softwareName, cfg.softwareVersion} + case namefn(WithLogger): + if _, wrapped := cfg.logger.(*wrappedLogger); wrapped { + return []any{cfg.logger.(*wrappedLogger).inner} + } + return []any{nil} + case namefn(RequestTimeoutOverhead): + return []any{cfg.requestTimeoutOverhead} + case namefn(ConnIdleTimeout): + return []any{cfg.connIdleTimeout} + case namefn(Dialer): + return []any{cfg.dialFn} + case namefn(DialTLSConfig): + return []any{cfg.dialTLS} + case namefn(DialTLS): + return []any{cfg.dialTLS != nil} + case namefn(SeedBrokers): + return []any{cfg.seedBrokers} + case namefn(MaxVersions): + return []any{cfg.maxVersions} + case namefn(MinVersions): + return []any{cfg.minVersions} + case namefn(RetryBackoffFn): + return []any{cfg.retryBackoff} + case namefn(RequestRetries): + return []any{cfg.retries} + case namefn(RetryTimeout): + return []any{cfg.retryTimeout(0)} + case namefn(RetryTimeoutFn): + return []any{cfg.retryTimeout} + case namefn(AllowAutoTopicCreation): + return []any{cfg.allowAutoTopicCreation} + case namefn(BrokerMaxWriteBytes): + return []any{cfg.maxBrokerWriteBytes} + case namefn(BrokerMaxReadBytes): + return []any{cfg.maxBrokerReadBytes} + case namefn(MetadataMaxAge): + return []any{cfg.metadataMaxAge} + case namefn(MetadataMinAge): + return []any{cfg.metadataMinAge} + case namefn(SASL): + return []any{cfg.sasls} + case namefn(WithHooks): + return []any{cfg.hooks} + case namefn(ConcurrentTransactionsBackoff): + return []any{cfg.txnBackoff} + case namefn(ConsiderMissingTopicDeletedAfter): + return []any{cfg.missingTopicDelete} + + case namefn(DefaultProduceTopic): + return []any{cfg.defaultProduceTopic} + case namefn(RequiredAcks): + return []any{cfg.acks} + case namefn(DisableIdempotentWrite): + return []any{cfg.disableIdempotency} + case namefn(MaxProduceRequestsInflightPerBroker): + return []any{cfg.maxProduceInflight} + case namefn(ProducerBatchCompression): + return []any{cfg.compression} + case namefn(ProducerBatchMaxBytes): + return []any{cfg.maxRecordBatchBytes} + case namefn(MaxBufferedRecords): + return []any{cfg.maxBufferedRecords} + case namefn(MaxBufferedBytes): + return []any{cfg.maxBufferedBytes} + case namefn(RecordPartitioner): + return []any{cfg.partitioner} + case namefn(ProduceRequestTimeout): + return []any{cfg.produceTimeout} + case namefn(RecordRetries): + return []any{cfg.recordRetries} + case namefn(UnknownTopicRetries): + return []any{cfg.maxUnknownFailures} + case namefn(StopProducerOnDataLossDetected): + return []any{cfg.stopOnDataLoss} + case namefn(ProducerOnDataLossDetected): + return []any{cfg.onDataLoss} + case namefn(ProducerLinger): + return []any{cfg.linger} + case namefn(ManualFlushing): + return []any{cfg.manualFlushing} + case namefn(RecordDeliveryTimeout): + return []any{cfg.recordTimeout} + case namefn(TransactionalID): + if cfg.txnID != nil { + return []any{cfg.txnID, true} + } + return []any{"", false} + case namefn(TransactionTimeout): + return []any{cfg.txnTimeout} + + case namefn(ConsumePartitions): + return []any{cfg.partitions} + case namefn(ConsumePreferringLagFn): + return []any{cfg.preferLagFn} + case namefn(ConsumeRegex): + return []any{cfg.regex} + case namefn(ConsumeResetOffset): + return []any{cfg.resetOffset} + case namefn(ConsumeTopics): + return []any{cfg.topics} + case namefn(DisableFetchSessions): + return []any{cfg.disableFetchSessions} + case namefn(FetchIsolationLevel): + return []any{cfg.isolationLevel} + case namefn(FetchMaxBytes): + return []any{int32(cfg.maxBytes)} + case namefn(FetchMaxPartitionBytes): + return []any{int32(cfg.maxPartBytes)} + case namefn(FetchMaxWait): + return []any{time.Duration(cfg.maxWait) * time.Millisecond} + case namefn(FetchMinBytes): + return []any{cfg.minBytes} + case namefn(KeepControlRecords): + return []any{cfg.keepControl} + case namefn(MaxConcurrentFetches): + return []any{cfg.maxConcurrentFetches} + case namefn(Rack): + return []any{cfg.rack} + case namefn(KeepRetryableFetchErrors): + return []any{cfg.keepRetryableFetchErrors} + + case namefn(AdjustFetchOffsetsFn): + return []any{cfg.adjustOffsetsBeforeAssign} + case namefn(AutoCommitCallback): + return []any{cfg.commitCallback} + case namefn(AutoCommitInterval): + return []any{cfg.autocommitInterval} + case namefn(AutoCommitMarks): + return []any{cfg.autocommitMarks} + case namefn(Balancers): + return []any{cfg.balancers} + case namefn(BlockRebalanceOnPoll): + return []any{cfg.blockRebalanceOnPoll} + case namefn(ConsumerGroup): + return []any{cfg.group} + case namefn(DisableAutoCommit): + return []any{cfg.autocommitDisable} + case namefn(GreedyAutoCommit): + return []any{cfg.autocommitGreedy} + case namefn(GroupProtocol): + return []any{cfg.protocol} + case namefn(HeartbeatInterval): + return []any{cfg.heartbeatInterval} + case namefn(InstanceID): + if cfg.instanceID != nil { + return []any{*cfg.instanceID, true} + } + return []any{"", false} + case namefn(OnOffsetsFetched): + return []any{cfg.onFetched} + case namefn(OnPartitionsAssigned): + return []any{cfg.onAssigned} + case namefn(OnPartitionsLost): + return []any{cfg.onLost} + case namefn(OnPartitionsRevoked): + return []any{cfg.onRevoked} + case namefn(RebalanceTimeout): + return []any{cfg.rebalanceTimeout} + case namefn(RequireStableFetchOffsets): + return []any{cfg.requireStable} + case namefn(SessionTimeout): + return []any{cfg.sessionTimeout} + default: + return nil + } +} + +// NewClient returns a new Kafka client with the given options or an error if +// the options are invalid. Connections to brokers are lazily created only when +// requests are written to them. +// +// By default, the client uses the latest stable request versions when talking +// to Kafka. If you use a broker older than 0.10.0, then you need to manually +// set a MaxVersions option. Otherwise, there is usually no harm in defaulting +// to the latest API versions, although occasionally Kafka introduces new +// required parameters that do not have zero value defaults. +// +// NewClient also launches a goroutine which periodically updates the cached +// topic metadata. +func NewClient(opts ...Opt) (*Client, error) { + cfg, seeds, compressor, err := validateCfg(opts...) + if err != nil { + return nil, err + } + + if cfg.retryTimeout == nil { + cfg.retryTimeout = func(key int16) time.Duration { + switch key { + case ((*kmsg.JoinGroupRequest)(nil)).Key(), + ((*kmsg.SyncGroupRequest)(nil)).Key(), + ((*kmsg.HeartbeatRequest)(nil)).Key(): + return cfg.sessionTimeout + } + return 30 * time.Second + } + } + + if cfg.dialFn == nil { + dialer := &net.Dialer{Timeout: cfg.dialTimeout} + cfg.dialFn = dialer.DialContext + if cfg.dialTLS != nil { + cfg.dialFn = func(ctx context.Context, network, host string) (net.Conn, error) { + c := cfg.dialTLS.Clone() + if c.ServerName == "" { + server, _, err := net.SplitHostPort(host) + if err != nil { + return nil, fmt.Errorf("unable to split host:port for dialing: %w", err) + } + c.ServerName = server + } + return (&tls.Dialer{ + NetDialer: dialer, + Config: c, + }).DialContext(ctx, network, host) + } + } + } + + ctx, cancel := context.WithCancel(context.Background()) + + cl := &Client{ + cfg: cfg, + opts: opts, + ctx: ctx, + ctxCancel: cancel, + + rng: func() func(func(*rand.Rand)) { + var mu sync.Mutex + rng := rand.New(rand.NewSource(time.Now().UnixNano())) + return func(fn func(*rand.Rand)) { + mu.Lock() + defer mu.Unlock() + fn(rng) + } + }(), + + controllerID: unknownControllerID, + + sinksAndSources: make(map[int32]sinkAndSource), + + reqFormatter: kmsg.NewRequestFormatter(), + connTimeouter: connTimeouter{def: cfg.requestTimeoutOverhead}, + + bufPool: newBufPool(), + prsPool: newPrsPool(), + + compressor: compressor, + decompressor: newDecompressor(), + + coordinators: make(map[coordinatorKey]*coordinatorLoad), + + updateMetadataCh: make(chan string, 1), + updateMetadataNowCh: make(chan string, 1), + blockingMetadataFnCh: make(chan func()), + metadone: make(chan struct{}), + } + + // Before we start any goroutines below, we must notify any interested + // hooks of our existence. + cl.cfg.hooks.each(func(h Hook) { + if h, ok := h.(HookNewClient); ok { + h.OnNewClient(cl) + } + }) + + cl.producer.init(cl) + cl.consumer.init(cl) + cl.metawait.init() + + if cfg.id != nil { + cl.reqFormatter = kmsg.NewRequestFormatter(kmsg.FormatterClientID(*cfg.id)) + } + + seedBrokers := make([]*broker, 0, len(seeds)) + for i, seed := range seeds { + b := cl.newBroker(unknownSeedID(i), seed.host, seed.port, nil) + seedBrokers = append(seedBrokers, b) + } + cl.seeds.Store(seedBrokers) + go cl.updateMetadataLoop() + go cl.reapConnectionsLoop() + + return cl, nil +} + +// Opts returns the options that were used to create this client. This can be +// as a base to generate a new client, where you can add override options to +// the end of the original input list. If you want to know a specific option +// value, you can use OptValue or OptValues. +func (cl *Client) Opts() []Opt { + return cl.opts +} + +func (cl *Client) loadSeeds() []*broker { + return cl.seeds.Load().([]*broker) +} + +// Ping returns whether any broker is reachable, iterating over any discovered +// broker or seed broker until one returns a successful response to an +// ApiVersions request. No discovered broker nor seed broker is attempted more +// than once. If all requests fail, this returns final error. +func (cl *Client) Ping(ctx context.Context) error { + req := kmsg.NewPtrApiVersionsRequest() + req.ClientSoftwareName = cl.cfg.softwareName + req.ClientSoftwareVersion = cl.cfg.softwareVersion + + cl.brokersMu.RLock() + brokers := append([]*broker(nil), cl.brokers...) + cl.brokersMu.RUnlock() + + var lastErr error + for _, brs := range [2][]*broker{ + brokers, + cl.loadSeeds(), + } { + for _, br := range brs { + _, err := br.waitResp(ctx, req) + if lastErr = err; lastErr == nil { + return nil + } + } + } + return lastErr +} + +// PurgeTopicsFromClient internally removes all internal information about the +// input topics. If you you want to purge information for only consuming or +// only producing, see the related functions [PurgeTopicsFromConsuming] and +// [PurgeTopicsFromProducing]. +// +// For producing, this clears all knowledge that these topics have ever been +// produced to. Producing to the topic again may result in out of order +// sequence number errors, or, if idempotency is disabled and the sequence +// numbers align, may result in invisibly discarded records at the broker. +// Purging a topic that was previously produced to may be useful to free up +// resources if you are producing to many disparate and short lived topic in +// the lifetime of this client and you do not plan to produce to the topic +// anymore. You may want to flush buffered records before purging if records +// for a topic you are purging are currently in flight. +// +// For consuming, this removes all concept of the topic from being consumed. +// This is different from PauseFetchTopics, which literally pauses the fetching +// of topics but keeps the topic information around for resuming fetching +// later. Purging a topic that was being consumed can be useful if you know the +// topic no longer exists, or if you are consuming via regex and know that some +// previously consumed topics no longer exist, or if you simply do not want to +// ever consume from a topic again. If you are group consuming, this function +// will likely cause a rebalance. +// +// For admin requests, this deletes the topic from the cached metadata map for +// sharded requests. Metadata for sharded admin requests is only cached for +// MetadataMinAge anyway, but the map is not cleaned up one the metadata +// expires. This function ensures the map is purged. +func (cl *Client) PurgeTopicsFromClient(topics ...string) { + if len(topics) == 0 { + return + } + sort.Strings(topics) // for logging in the functions + cl.blockingMetadataFn(func() { // make reasoning about concurrency easier + var wg sync.WaitGroup + wg.Add(2) + go func() { + defer wg.Done() + cl.producer.purgeTopics(topics) + }() + go func() { + defer wg.Done() + cl.consumer.purgeTopics(topics) + }() + wg.Wait() + }) + cl.mappedMetaMu.Lock() + for _, t := range topics { + delete(cl.mappedMeta, t) + } + cl.mappedMetaMu.Unlock() +} + +// PurgeTopicsFromProducing internally removes all internal information for +// producing about the input topics. This runs the producer bit of logic that +// is documented in [PurgeTopicsFromClient]; see that function for more +// details. +func (cl *Client) PurgeTopicsFromProducing(topics ...string) { + if len(topics) == 0 { + return + } + sort.Strings(topics) + cl.blockingMetadataFn(func() { + cl.producer.purgeTopics(topics) + }) +} + +// PurgeTopicsFromConsuming internally removes all internal information for +// consuming about the input topics. This runs the consumer bit of logic that +// is documented in [PurgeTopicsFromClient]; see that function for more +// details. +func (cl *Client) PurgeTopicsFromConsuming(topics ...string) { + if len(topics) == 0 { + return + } + sort.Strings(topics) + cl.blockingMetadataFn(func() { + cl.consumer.purgeTopics(topics) + }) +} + +// Parse broker IP/host and port from a string, using the default Kafka port if +// unspecified. Supported address formats: +// +// - IPv4 host/IP without port: "127.0.0.1", "localhost" +// - IPv4 host/IP with port: "127.0.0.1:1234", "localhost:1234" +// - IPv6 IP without port: "[2001:1000:2000::1]", "::1" +// - IPv6 IP with port: "[2001:1000:2000::1]:1234" +func parseBrokerAddr(addr string) (hostport, error) { + const defaultKafkaPort = 9092 + + // Bracketed IPv6 + if strings.IndexByte(addr, '[') == 0 { + parts := strings.Split(addr[1:], "]") + if len(parts) != 2 { + return hostport{}, fmt.Errorf("invalid addr: %s", addr) + } + // No port specified -> use default + if len(parts[1]) == 0 { + return hostport{parts[0], defaultKafkaPort}, nil + } + port, err := strconv.ParseInt(parts[1][1:], 10, 32) + if err != nil { + return hostport{}, fmt.Errorf("unable to parse port from addr: %w", err) + } + return hostport{parts[0], int32(port)}, nil + } + + // IPv4 with no port + if strings.IndexByte(addr, ':') == -1 { + return hostport{addr, defaultKafkaPort}, nil + } + + // Either a IPv6 literal ("::1"), IP:port or host:port + // Try to parse as IP:port or host:port + h, p, err := net.SplitHostPort(addr) + if err != nil { + return hostport{addr, defaultKafkaPort}, nil //nolint:nilerr // ipv6 literal -- use default kafka port + } + port, err := strconv.ParseInt(p, 10, 32) + if err != nil { + return hostport{}, fmt.Errorf("unable to parse port from addr: %w", err) + } + return hostport{h, int32(port)}, nil +} + +type connTimeouter struct { + def time.Duration + joinMu sync.Mutex + lastRebalanceTimeout time.Duration +} + +func (c *connTimeouter) timeouts(req kmsg.Request) (r, w time.Duration) { + def := c.def + millis := func(m int32) time.Duration { return time.Duration(m) * time.Millisecond } + switch t := req.(type) { + default: + if timeoutRequest, ok := req.(kmsg.TimeoutRequest); ok { + timeoutMillis := timeoutRequest.Timeout() + return def + millis(timeoutMillis), def + } + return def, def + + case *produceRequest: + return def + millis(t.timeout), def + case *fetchRequest: + return def + millis(t.maxWait), def + case *kmsg.FetchRequest: + return def + millis(t.MaxWaitMillis), def + + // Join and sync can take a long time. Sync has no notion of + // timeouts, but since the flow of requests should be first + // join, then sync, we can stash the timeout from the join. + + case *kmsg.JoinGroupRequest: + c.joinMu.Lock() + c.lastRebalanceTimeout = millis(t.RebalanceTimeoutMillis) + c.joinMu.Unlock() + + return def + millis(t.RebalanceTimeoutMillis), def + case *kmsg.SyncGroupRequest: + read := def + c.joinMu.Lock() + if c.lastRebalanceTimeout != 0 { + read = c.lastRebalanceTimeout + } + c.joinMu.Unlock() + + return read, def + } +} + +func (cl *Client) reinitAnyBrokerOrd() { + cl.anyBrokerOrd = append(cl.anyBrokerOrd[:0], make([]int32, len(cl.brokers))...) + for i := range cl.anyBrokerOrd { + cl.anyBrokerOrd[i] = int32(i) + } + cl.rng(func(r *rand.Rand) { + r.Shuffle(len(cl.anyBrokerOrd), func(i, j int) { + cl.anyBrokerOrd[i], cl.anyBrokerOrd[j] = cl.anyBrokerOrd[j], cl.anyBrokerOrd[i] + }) + }) +} + +// broker returns a random broker from all brokers ever known. +func (cl *Client) broker() *broker { + cl.brokersMu.Lock() + defer cl.brokersMu.Unlock() + + // Every time we loop through all discovered brokers, we issue one + // request to the next seed. This ensures that if all discovered + // brokers are down, we will *eventually* loop through seeds and + // hopefully have a reachable seed. + var b *broker + + if len(cl.anyBrokerOrd) > 0 { + b = cl.brokers[cl.anyBrokerOrd[0]] + cl.anyBrokerOrd = cl.anyBrokerOrd[1:] + return b + } + + seeds := cl.loadSeeds() + cl.anySeedIdx %= int32(len(seeds)) + b = seeds[cl.anySeedIdx] + cl.anySeedIdx++ + + // If we have brokers, we ranged past discovered brokers. + // We now reset the anyBrokerOrd to begin ranging through + // discovered brokers again. If there are still no brokers, + // this reinit will do nothing and we will keep looping seeds. + cl.reinitAnyBrokerOrd() + return b +} + +func (cl *Client) waitTries(ctx context.Context, backoff time.Duration) bool { + after := time.NewTimer(backoff) + defer after.Stop() + select { + case <-ctx.Done(): + return false + case <-cl.ctx.Done(): + return false + case <-after.C: + return true + } +} + +// A broker may sometimes indicate it supports offset for leader epoch v2+ when +// it does not. We need to catch that and avoid issuing offset for leader +// epoch, because we will just loop continuously failing. We do not catch every +// case, such as when a person explicitly assigns offsets with epochs, but we +// catch a few areas that would be returned from a broker itself. +// +// This function is always used *after* at least one request has been issued. +// +// NOTE: This is a weak check; we check if any broker in the cluster supports +// the request. We use this function in three locations: +// +// 1. When using the LeaderEpoch returned in a metadata response. This guards +// against buggy brokers that return 0 rather than -1 even if they do not +// support OffsetForLeaderEpoch. If any support, the cluster is in the +// middle of an upgrade and we can start using the epoch. +// 2. When deciding whether to keep LeaderEpoch from fetched offsets. +// Realistically, clients should only commit epochs if the cluster supports +// them. +// 3. When receiving OffsetOutOfRange when follower fetching and we fetched +// past the end. +// +// In any of these cases, if we OffsetForLeaderEpoch against a broker that does +// not support (even though one in the cluster does), we will loop fail until +// the rest of the cluster is upgraded and supports the request. +func (cl *Client) supportsOffsetForLeaderEpoch() bool { + return cl.supportsKeyVersion(int16(kmsg.OffsetForLeaderEpoch), 2) +} + +// A broker may not support some requests we want to make. This function checks +// support. This should only be used *after* at least one successful response. +func (cl *Client) supportsKeyVersion(key, version int16) bool { + cl.brokersMu.RLock() + defer cl.brokersMu.RUnlock() + + for _, brokers := range [][]*broker{ + cl.brokers, + cl.loadSeeds(), + } { + for _, b := range brokers { + if v := b.loadVersions(); v != nil && v.versions[key] >= version { + return true + } + } + } + return false +} + +// fetchBrokerMetadata issues a metadata request solely for broker information. +func (cl *Client) fetchBrokerMetadata(ctx context.Context) error { + cl.fetchingBrokersMu.Lock() + wait := cl.fetchingBrokers + if wait != nil { + cl.fetchingBrokersMu.Unlock() + <-wait.done + return wait.err + } + wait = &struct { + done chan struct{} + err error + }{done: make(chan struct{})} + cl.fetchingBrokers = wait + cl.fetchingBrokersMu.Unlock() + + defer func() { + cl.fetchingBrokersMu.Lock() + defer cl.fetchingBrokersMu.Unlock() + cl.fetchingBrokers = nil + close(wait.done) + }() + + _, _, wait.err = cl.fetchMetadata(ctx, kmsg.NewPtrMetadataRequest(), true) + return wait.err +} + +func (cl *Client) fetchMetadataForTopics(ctx context.Context, all bool, topics []string) (*broker, *kmsg.MetadataResponse, error) { + req := kmsg.NewPtrMetadataRequest() + req.AllowAutoTopicCreation = cl.cfg.allowAutoTopicCreation + if all { + req.Topics = nil + } else if len(topics) == 0 { + req.Topics = []kmsg.MetadataRequestTopic{} + } else { + for _, topic := range topics { + reqTopic := kmsg.NewMetadataRequestTopic() + reqTopic.Topic = kmsg.StringPtr(topic) + req.Topics = append(req.Topics, reqTopic) + } + } + return cl.fetchMetadata(ctx, req, true) +} + +func (cl *Client) fetchMetadata(ctx context.Context, req *kmsg.MetadataRequest, limitRetries bool) (*broker, *kmsg.MetadataResponse, error) { + r := cl.retryable() + + // We limit retries for internal metadata refreshes, because these do + // not need to retry forever and are usually blocking *other* requests. + // e.g., producing bumps load errors when metadata returns, so 3 + // failures here will correspond to 1 bumped error count. To make the + // number more accurate, we should *never* retry here, but this is + // pretty intolerant of immediately-temporary network issues. Rather, + // we use a small count of 3 retries, which with the default backoff, + // will be <2s of retrying. This is still intolerant of temporary + // failures, but it does allow recovery from a dns issue / bad path. + if limitRetries { + r.limitRetries = 3 + } + + meta, err := req.RequestWith(ctx, r) + if err == nil { + if meta.ControllerID >= 0 { + cl.controllerIDMu.Lock() + cl.controllerID = meta.ControllerID + cl.controllerIDMu.Unlock() + } + cl.updateBrokers(meta.Brokers) + } + return r.last, meta, err +} + +// updateBrokers is called with the broker portion of every metadata response. +// All metadata responses contain all known live brokers, so we can always +// use the response. +func (cl *Client) updateBrokers(brokers []kmsg.MetadataResponseBroker) { + sort.Slice(brokers, func(i, j int) bool { return brokers[i].NodeID < brokers[j].NodeID }) + newBrokers := make([]*broker, 0, len(brokers)) + + cl.brokersMu.Lock() + defer cl.brokersMu.Unlock() + + if cl.stopBrokers { + return + } + + for len(brokers) > 0 && len(cl.brokers) > 0 { + ob := cl.brokers[0] + nb := brokers[0] + + switch { + case ob.meta.NodeID < nb.NodeID: + ob.stopForever() + cl.brokers = cl.brokers[1:] + + case ob.meta.NodeID == nb.NodeID: + if !ob.meta.equals(nb) { + ob.stopForever() + ob = cl.newBroker(nb.NodeID, nb.Host, nb.Port, nb.Rack) + } + newBrokers = append(newBrokers, ob) + cl.brokers = cl.brokers[1:] + brokers = brokers[1:] + + case ob.meta.NodeID > nb.NodeID: + newBrokers = append(newBrokers, cl.newBroker(nb.NodeID, nb.Host, nb.Port, nb.Rack)) + brokers = brokers[1:] + } + } + + for len(cl.brokers) > 0 { + ob := cl.brokers[0] + ob.stopForever() + cl.brokers = cl.brokers[1:] + } + + for len(brokers) > 0 { + nb := brokers[0] + newBrokers = append(newBrokers, cl.newBroker(nb.NodeID, nb.Host, nb.Port, nb.Rack)) + brokers = brokers[1:] + } + + cl.brokers = newBrokers + cl.reinitAnyBrokerOrd() +} + +// CloseAllowingRebalance allows rebalances, leaves any group, and closes all +// connections and goroutines. This function is only useful if you are using +// the BlockRebalanceOnPoll option. Close itself does not allow rebalances and +// will hang if you polled, did not allow rebalances, and want to close. Close +// does not automatically allow rebalances because leaving a group causes a +// revoke, and the client does not assume that the final revoke is concurrency +// safe. The CloseAllowingRebalance function exists a a shortcut to opt into +// allowing rebalance while closing. +func (cl *Client) CloseAllowingRebalance() { + cl.AllowRebalance() + cl.Close() +} + +// Close leaves any group and closes all connections and goroutines. This +// function waits for the group to be left. If you want to force leave a group +// immediately and ensure a speedy shutdown you can use LeaveGroupContext first +// (and then Close will be immediate). +// +// If you are group consuming and have overridden the default +// OnPartitionsRevoked, you must manually commit offsets before closing the +// client. +// +// If you are using the BlockRebalanceOnPoll option and have polled, this +// function does not automatically allow rebalancing. You must AllowRebalance +// before calling this function. Internally, this function leaves the group, +// and leaving a group causes a rebalance so that you can get one final +// notification of revoked partitions. If you want to automatically allow +// rebalancing, use CloseAllowingRebalance. +func (cl *Client) Close() { + cl.close(cl.ctx) +} + +func (cl *Client) close(ctx context.Context) (rerr error) { + defer cl.cfg.hooks.each(func(h Hook) { + if h, ok := h.(HookClientClosed); ok { + h.OnClientClosed(cl) + } + }) + + c := &cl.consumer + c.kill.Store(true) + if c.g != nil { + rerr = cl.LeaveGroupContext(ctx) + } else if c.d != nil { + c.mu.Lock() // lock for assign + c.assignPartitions(nil, assignInvalidateAll, nil, "") // we do not use a log message when not in a group + c.mu.Unlock() + } + + // After the above, consumers cannot consume anymore. LeaveGroup + // internally assigns nil, which uses noConsumerSession, which prevents + // loopFetch from starting. Assigning also waits for the prior session + // to be complete, meaning loopFetch cannot be running. + + sessCloseCtx, sessCloseCancel := context.WithTimeout(ctx, time.Second) + var wg sync.WaitGroup + cl.allSinksAndSources(func(sns sinkAndSource) { + if sns.source.session.id != 0 { + sns := sns + wg.Add(1) + go func() { + defer wg.Done() + sns.source.killSessionOnClose(sessCloseCtx) + }() + } + }) + wg.Wait() + sessCloseCancel() + + // Now we kill the client context and all brokers, ensuring all + // requests fail. This will finish all producer callbacks and + // stop the metadata loop. + cl.ctxCancel() + cl.brokersMu.Lock() + cl.stopBrokers = true + for _, broker := range cl.brokers { + broker.stopForever() + } + cl.brokersMu.Unlock() + for _, broker := range cl.loadSeeds() { + broker.stopForever() + } + + // Wait for metadata to quit so we know no more erroring topic + // partitions will be created. After metadata has quit, we can + // safely stop sinks and sources, as no more will be made. + <-cl.metadone + + for _, sns := range cl.sinksAndSources { + sns.sink.maybeDrain() // awaken anything in backoff + sns.source.maybeConsume() // same + } + + cl.failBufferedRecords(ErrClientClosed) + + // We need one final poll: if any sources buffered a fetch, then the + // manageFetchConcurrency loop only exits when all fetches have been + // drained, because draining a fetch is what decrements an "active" + // fetch. PollFetches with `nil` is instant. + cl.PollFetches(nil) + + for _, s := range cl.cfg.sasls { + if closing, ok := s.(sasl.ClosingMechanism); ok { + closing.Close() + } + } + + return rerr +} + +// Request issues a request to Kafka, waiting for and returning the response. +// If a retryable network error occurs, or if a retryable group / transaction +// coordinator error occurs, the request is retried. All other errors are +// returned. +// +// If the request is an admin request, this will issue it to the Kafka +// controller. If the controller ID is unknown, this will attempt to fetch it. +// If the fetch errors, this will return an unknown controller error. +// +// If the request is a group or transaction coordinator request, this will +// issue the request to the appropriate group or transaction coordinator. +// +// For transaction requests, the request is issued to the transaction +// coordinator. However, if the request is an init producer ID request and the +// request has no transactional ID, the request goes to any broker. +// +// Some requests need to be split and sent to many brokers. For these requests, +// it is *highly* recommended to use RequestSharded. Not all responses from +// many brokers can be cleanly merged. However, for the requests that are +// split, this does attempt to merge them in a sane way. +// +// The following requests are split: +// +// ListOffsets +// OffsetFetch (if using v8+ for Kafka 3.0+) +// FindCoordinator (if using v4+ for Kafka 3.0+) +// DescribeGroups +// ListGroups +// DeleteRecords +// OffsetForLeaderEpoch +// DescribeConfigs +// AlterConfigs +// AlterReplicaLogDirs +// DescribeLogDirs +// DeleteGroups +// IncrementalAlterConfigs +// DescribeProducers +// DescribeTransactions +// ListTransactions +// +// Kafka 3.0 introduced batch OffsetFetch and batch FindCoordinator requests. +// This function is forward and backward compatible: old requests will be +// batched as necessary, and batched requests will be split as necessary. It is +// recommended to always use batch requests for simplicity. +// +// In short, this method tries to do the correct thing depending on what type +// of request is being issued. +// +// The passed context can be used to cancel a request and return early. Note +// that if the request was written to Kafka but the context canceled before a +// response is received, Kafka may still operate on the received request. +// +// If using this function to issue kmsg.ProduceRequest's, you must configure +// the client with the same RequiredAcks option that you use in the request. +// If you are issuing produce requests with 0 acks, you must configure the +// client with the same timeout you use in the request. The client will +// internally rewrite the incoming request's acks to match the client's +// configuration, and it will rewrite the timeout millis if the acks is 0. It +// is strongly recommended to not issue raw kmsg.ProduceRequest's. +func (cl *Client) Request(ctx context.Context, req kmsg.Request) (kmsg.Response, error) { + resps, merge := cl.shardedRequest(ctx, req) + // If there is no merge function, only one request was issued directly + // to a broker. Return the resp and err directly. + if merge == nil { + return resps[0].Resp, resps[0].Err + } + return merge(resps) +} + +func (cl *Client) retryable() *retryable { + return cl.retryableBrokerFn(func() (*broker, error) { return cl.broker(), nil }) +} + +func (cl *Client) retryableBrokerFn(fn func() (*broker, error)) *retryable { + return &retryable{cl: cl, br: fn} +} + +func (cl *Client) shouldRetry(tries int, err error) bool { + return (kerr.IsRetriable(err) || isRetryableBrokerErr(err)) && int64(tries) < cl.cfg.retries +} + +func (cl *Client) shouldRetryNext(tries int, err error) bool { + return isSkippableBrokerErr(err) && int64(tries) < cl.cfg.retries +} + +type retryable struct { + cl *Client + br func() (*broker, error) + last *broker + + // If non-zero, limitRetries may specify a smaller # of retries than + // the client RequestRetries number. This is used for internal requests + // that can fail / do not need to retry forever. + limitRetries int + + // parseRetryErr, if non-nil, can delete stale cached brokers. We do + // *not* return the error from this function to the caller, but we do + // use it to potentially retry. It is not necessary, but also not + // harmful, to return the input error. + parseRetryErr func(kmsg.Response, error) error +} + +type failDial struct{ fails int8 } + +// The controller and group/txn coordinators are cached. If dialing the broker +// repeatedly fails, we need to forget our cache to force a re-load: the broker +// may have completely died. +func (d *failDial) isRepeatedDialFail(err error) bool { + if isAnyDialErr(err) { + d.fails++ + if d.fails == 3 { + d.fails = 0 + return true + } + } + return false +} + +func (r *retryable) Request(ctx context.Context, req kmsg.Request) (kmsg.Response, error) { + tries := 0 + tryStart := time.Now() + retryTimeout := r.cl.cfg.retryTimeout(req.Key()) + + next, nextErr := r.br() +start: + tries++ + br, err := next, nextErr + r.last = br + var resp kmsg.Response + var retryErr error + if err == nil { + resp, err = r.last.waitResp(ctx, req) + if r.parseRetryErr != nil { + retryErr = r.parseRetryErr(resp, err) + } + } + + if err != nil || retryErr != nil { + if r.limitRetries == 0 || tries < r.limitRetries { + backoff := r.cl.cfg.retryBackoff(tries) + if retryTimeout == 0 || time.Now().Add(backoff).Sub(tryStart) <= retryTimeout { + // If this broker / request had a retryable error, we can + // just retry now. If the error is *not* retryable but + // is a broker-specific network error, and the next + // broker is different than the current, we also retry. + if r.cl.shouldRetry(tries, err) || r.cl.shouldRetry(tries, retryErr) { + r.cl.cfg.logger.Log(LogLevelDebug, "retrying request", + "tries", tries, + "backoff", backoff, + "request_error", err, + "response_error", retryErr, + ) + if r.cl.waitTries(ctx, backoff) { + next, nextErr = r.br() + goto start + } + } else if r.cl.shouldRetryNext(tries, err) { + next, nextErr = r.br() + if next != br && r.cl.waitTries(ctx, backoff) { + goto start + } + } + } + } + } + return resp, err +} + +// ResponseShard ties together a request with either the response it received +// or an error that prevented a response from being received. +type ResponseShard struct { + // Meta contains the broker that this request was issued to, or an + // unknown (node ID -1) metadata if the request could not be issued. + // + // Requests can fail to even be issued if an appropriate broker cannot + // be loaded of if the client cannot understand the request. + Meta BrokerMetadata + + // Req is the request that was issued to this broker. + Req kmsg.Request + + // Resp is the response received from the broker, if any. + Resp kmsg.Response + + // Err, if non-nil, is the error that prevented a response from being + // received or the request from being issued. + Err error +} + +// RequestSharded performs the same logic as Request, but returns all responses +// from any broker that the request was split to. This always returns at least +// one shard. If the request does not need to be issued (describing no groups), +// this issues the request to a random broker just to ensure that one shard +// exists. +// +// There are only a few requests that are strongly recommended to explicitly +// use RequestSharded; the rest can by default use Request. These few requests +// are mentioned in the documentation for Request. +// +// If, in the process of splitting a request, some topics or partitions are +// found to not exist, or Kafka replies that a request should go to a broker +// that does not exist, all those non-existent pieces are grouped into one +// request to the first seed broker. This will show up as a seed broker node ID +// (min int32) and the response will likely contain purely errors. +// +// The response shards are ordered by broker metadata. +func (cl *Client) RequestSharded(ctx context.Context, req kmsg.Request) []ResponseShard { + resps, _ := cl.shardedRequest(ctx, req) + sort.Slice(resps, func(i, j int) bool { + l := &resps[i].Meta + r := &resps[j].Meta + + if l.NodeID < r.NodeID { + return true + } + if r.NodeID < l.NodeID { + return false + } + if l.Host < r.Host { + return true + } + if r.Host < l.Host { + return false + } + if l.Port < r.Port { + return true + } + if r.Port < l.Port { + return false + } + if l.Rack == nil { + return true + } + if r.Rack == nil { + return false + } + return *l.Rack < *r.Rack + }) + return resps +} + +type shardMerge func([]ResponseShard) (kmsg.Response, error) + +func (cl *Client) shardedRequest(ctx context.Context, req kmsg.Request) ([]ResponseShard, shardMerge) { + ctx, cancel := context.WithCancel(ctx) + done := make(chan struct{}) + defer close(done) + go func() { + defer cancel() + select { + case <-done: + case <-ctx.Done(): + case <-cl.ctx.Done(): + } + }() + + // First, handle any sharded request. This comes before the conditional + // below because this handles two group requests, which we do not want + // to fall into the handleCoordinatorReq logic. + switch t := req.(type) { + case *kmsg.ListOffsetsRequest, // key 2 + *kmsg.OffsetFetchRequest, // key 9 + *kmsg.FindCoordinatorRequest, // key 10 + *kmsg.DescribeGroupsRequest, // key 15 + *kmsg.ListGroupsRequest, // key 16 + *kmsg.DeleteRecordsRequest, // key 21 + *kmsg.OffsetForLeaderEpochRequest, // key 23 + *kmsg.AddPartitionsToTxnRequest, // key 24 + *kmsg.WriteTxnMarkersRequest, // key 27 + *kmsg.DescribeConfigsRequest, // key 32 + *kmsg.AlterConfigsRequest, // key 33 + *kmsg.AlterReplicaLogDirsRequest, // key 34 + *kmsg.DescribeLogDirsRequest, // key 35 + *kmsg.DeleteGroupsRequest, // key 42 + *kmsg.IncrementalAlterConfigsRequest, // key 44 + *kmsg.DescribeProducersRequest, // key 61 + *kmsg.DescribeTransactionsRequest, // key 65 + *kmsg.ListTransactionsRequest: // key 66 + return cl.handleShardedReq(ctx, req) + + case *kmsg.MetadataRequest: + // We hijack any metadata request so as to populate our + // own brokers and controller ID. + br, resp, err := cl.fetchMetadata(ctx, t, false) + return shards(shard(br, req, resp, err)), nil + + case kmsg.AdminRequest: + return shards(cl.handleAdminReq(ctx, t)), nil + + case kmsg.GroupCoordinatorRequest, + kmsg.TxnCoordinatorRequest: + return shards(cl.handleCoordinatorReq(ctx, t)), nil + + case *kmsg.ApiVersionsRequest: + // As of v3, software name and version are required. + // If they are missing, we use the config options. + if t.ClientSoftwareName == "" && t.ClientSoftwareVersion == "" { + dup := *t + dup.ClientSoftwareName = cl.cfg.softwareName + dup.ClientSoftwareVersion = cl.cfg.softwareVersion + req = &dup + } + } + + // All other requests not handled above can be issued to any broker + // with the default retryable logic. + r := cl.retryable() + resp, err := r.Request(ctx, req) + return shards(shard(r.last, req, resp, err)), nil +} + +func shard(br *broker, req kmsg.Request, resp kmsg.Response, err error) ResponseShard { + if br == nil { // the broker could be nil if loading the broker failed. + return ResponseShard{unknownBrokerMetadata, req, resp, err} + } + return ResponseShard{br.meta, req, resp, err} +} + +func shards(shard ...ResponseShard) []ResponseShard { + return shard +} + +func findBroker(candidates []*broker, node int32) *broker { + n := sort.Search(len(candidates), func(n int) bool { return candidates[n].meta.NodeID >= node }) + var b *broker + if n < len(candidates) { + c := candidates[n] + if c.meta.NodeID == node { + b = c + } + } + return b +} + +// brokerOrErr returns the broker for ID or the error if the broker does not +// exist. +// +// If tryLoad is true and the broker does not exist, this attempts a broker +// metadata load once before failing. If the metadata load fails, this returns +// that error. +func (cl *Client) brokerOrErr(ctx context.Context, id int32, err error) (*broker, error) { + if id < 0 { + return nil, err + } + + tryLoad := ctx != nil + tries := 0 +start: + var broker *broker + if id < 0 { + broker = findBroker(cl.loadSeeds(), id) + } else { + cl.brokersMu.RLock() + broker = findBroker(cl.brokers, id) + cl.brokersMu.RUnlock() + } + + if broker == nil { + if tryLoad { + if loadErr := cl.fetchBrokerMetadata(ctx); loadErr != nil { + return nil, loadErr + } + // We will retry loading up to two times, if we load broker + // metadata twice successfully but neither load has the broker + // we are looking for, then we say our broker does not exist. + tries++ + if tries < 2 { + goto start + } + } + return nil, err + } + return broker, nil +} + +// controller returns the controller broker, forcing a broker load if +// necessary. +func (cl *Client) controller(ctx context.Context) (b *broker, err error) { + get := func() int32 { + cl.controllerIDMu.Lock() + defer cl.controllerIDMu.Unlock() + return cl.controllerID + } + + defer func() { + if ec := (*errUnknownController)(nil); errors.As(err, &ec) { + cl.forgetControllerID(ec.id) + } + }() + + var id int32 + if id = get(); id < 0 { + if err := cl.fetchBrokerMetadata(ctx); err != nil { + return nil, err + } + if id = get(); id < 0 { + return nil, &errUnknownController{id} + } + } + + return cl.brokerOrErr(nil, id, &errUnknownController{id}) +} + +// forgetControllerID is called once an admin requests sees NOT_CONTROLLER. +func (cl *Client) forgetControllerID(id int32) { + cl.controllerIDMu.Lock() + defer cl.controllerIDMu.Unlock() + if cl.controllerID == id { + cl.controllerID = unknownControllerID + } +} + +const ( + coordinatorTypeGroup int8 = 0 + coordinatorTypeTxn int8 = 1 +) + +type coordinatorKey struct { + name string + typ int8 +} + +type coordinatorLoad struct { + loadWait chan struct{} + node int32 + err error +} + +func (cl *Client) loadCoordinator(ctx context.Context, typ int8, key string) (*broker, error) { + berr := cl.loadCoordinators(ctx, typ, key)[key] + return berr.b, berr.err +} + +func (cl *Client) loadCoordinators(ctx context.Context, typ int8, keys ...string) map[string]brokerOrErr { + mch := make(chan map[string]brokerOrErr, 1) + go func() { mch <- cl.doLoadCoordinators(ctx, typ, keys...) }() + select { + case m := <-mch: + return m + case <-ctx.Done(): + m := make(map[string]brokerOrErr, len(keys)) + for _, k := range keys { + m[k] = brokerOrErr{nil, ctx.Err()} + } + return m + } +} + +// doLoadCoordinators uses the caller context to cancel loading metadata +// (brokerOrErr), but we use the client context to actually issue the request. +// There should be only one direct call to doLoadCoordinators, just above in +// loadCoordinator. It is possible for two requests to be loading the same +// coordinator (in fact, that's the point of this function -- collapse these +// requests). We do not want the first request canceling it's context to cause +// errors for the second request. +// +// It is ok to leave FindCoordinator running even if the caller quits. Worst +// case, we just cache things for some time in the future; yay. +func (cl *Client) doLoadCoordinators(ctx context.Context, typ int8, keys ...string) map[string]brokerOrErr { + m := make(map[string]brokerOrErr, len(keys)) + if len(keys) == 0 { + return m + } + + toRequest := make(map[string]bool, len(keys)) // true == bypass the cache + for _, key := range keys { + toRequest[key] = false + } + + // For each of these keys, we have two cases: + // + // 1) The key is cached. It is either loading or loaded. We do not + // request the key ourselves; we wait for the load to finish. + // + // 2) The key is not cached, and we request it. + // + // If a key is cached but the coordinator no longer exists for us, we + // re-request to refresh the coordinator by setting toRequest[key] to + // true (bypass cache). + // + // If we ever request a key ourselves, we do not request it again. We + // ensure this by deleting from toRequest. We also delete if the key + // was cached with no error. + // + // We could have some keys cached and some that need to be requested. + // We issue a request but do not request what is cached. + // + // Lastly, we only ever trigger one metadata update, which happens if + // we have an unknown coordinator after we load coordinators. + var hasLoadedBrokers bool + for len(toRequest) > 0 { + var loadWait chan struct{} + load2key := make(map[*coordinatorLoad][]string) + + cl.coordinatorsMu.Lock() + for key, bypassCache := range toRequest { + c, ok := cl.coordinators[coordinatorKey{key, typ}] + if !ok || bypassCache { + if loadWait == nil { + loadWait = make(chan struct{}) + } + c = &coordinatorLoad{ + loadWait: loadWait, + err: errors.New("coordinator was not returned in broker response"), + } + cl.coordinators[coordinatorKey{key, typ}] = c + } + load2key[c] = append(load2key[c], key) + } + cl.coordinatorsMu.Unlock() + + if loadWait == nil { // all coordinators were cached + hasLoadedBrokers = cl.waitCoordinatorLoad(ctx, typ, load2key, !hasLoadedBrokers, toRequest, m) + continue + } + + key2load := make(map[string]*coordinatorLoad) + req := kmsg.NewPtrFindCoordinatorRequest() + req.CoordinatorType = typ + for c, keys := range load2key { + if c.loadWait == loadWait { // if this is our wait, this is ours to request + req.CoordinatorKeys = append(req.CoordinatorKeys, keys...) + for _, key := range keys { + key2load[key] = c + delete(toRequest, key) + } + } + } + + cl.cfg.logger.Log(LogLevelDebug, "prepared to issue find coordinator request", + "coordinator_type", typ, + "coordinator_keys", req.CoordinatorKeys, + ) + + shards := cl.RequestSharded(cl.ctx, req) + + for _, shard := range shards { + if shard.Err != nil { + req := shard.Req.(*kmsg.FindCoordinatorRequest) + for _, key := range req.CoordinatorKeys { + c, ok := key2load[key] + if ok { + c.err = shard.Err + } + } + } else { + resp := shard.Resp.(*kmsg.FindCoordinatorResponse) + for _, rc := range resp.Coordinators { + c, ok := key2load[rc.Key] + if ok { + c.err = kerr.ErrorForCode(rc.ErrorCode) + c.node = rc.NodeID + } + } + } + } + + // For anything we loaded, if it has a load failure (including + // not being replied to), we remove the key from the cache. We + // do not want to cache erroring values. + // + // We range key2load, which contains only coordinators we are + // responsible for loading. + cl.coordinatorsMu.Lock() + for key, c := range key2load { + if c.err != nil { + ck := coordinatorKey{key, typ} + if loading, ok := cl.coordinators[ck]; ok && loading == c { + delete(cl.coordinators, ck) + } + } + } + cl.coordinatorsMu.Unlock() + + close(loadWait) + hasLoadedBrokers = cl.waitCoordinatorLoad(ctx, typ, load2key, !hasLoadedBrokers, toRequest, m) + } + return m +} + +// After some prep work, we wait for coordinators to load. We update toRequest +// values with true if the caller should bypass cache and re-load these +// coordinators. +// +// This returns if we load brokers, and populates m with results. +func (cl *Client) waitCoordinatorLoad(ctx context.Context, typ int8, load2key map[*coordinatorLoad][]string, shouldLoadBrokers bool, toRequest map[string]bool, m map[string]brokerOrErr) bool { + var loadedBrokers bool + for c, keys := range load2key { + <-c.loadWait + for _, key := range keys { + if c.err != nil { + delete(toRequest, key) + m[key] = brokerOrErr{nil, c.err} + continue + } + + var brokerCtx context.Context + if shouldLoadBrokers && !loadedBrokers { + brokerCtx = ctx + loadedBrokers = true + } + + b, err := cl.brokerOrErr(brokerCtx, c.node, &errUnknownCoordinator{c.node, coordinatorKey{key, typ}}) + if err != nil { + if _, exists := toRequest[key]; exists { + toRequest[key] = true + continue + } + // If the key does not exist, we just loaded this + // coordinator and also the brokers. We do not + // re-request. + } + delete(toRequest, key) + m[key] = brokerOrErr{b, err} + } + } + return loadedBrokers +} + +func (cl *Client) maybeDeleteStaleCoordinator(name string, typ int8, err error) bool { + switch { + case errors.Is(err, kerr.CoordinatorNotAvailable), + errors.Is(err, kerr.CoordinatorLoadInProgress), + errors.Is(err, kerr.NotCoordinator): + cl.deleteStaleCoordinator(name, typ) + return true + } + return false +} + +func (cl *Client) deleteStaleCoordinator(name string, typ int8) { + cl.coordinatorsMu.Lock() + defer cl.coordinatorsMu.Unlock() + k := coordinatorKey{name, typ} + v := cl.coordinators[k] + if v == nil { + return + } + select { + case <-v.loadWait: + delete(cl.coordinators, k) + default: + // We are actively reloading this coordinator. + } +} + +type brokerOrErr struct { + b *broker + err error +} + +func (cl *Client) handleAdminReq(ctx context.Context, req kmsg.Request) ResponseShard { + // Loading a controller can perform some wait; we accept that and do + // not account for the retries or the time to load the controller as + // part of the retries / time to issue the req. + r := cl.retryableBrokerFn(func() (*broker, error) { + return cl.controller(ctx) + }) + + // The only request that can break mapped metadata is CreatePartitions, + // because our mapping will still be "valid" but behind the scenes, + // more partitions exist. If CreatePartitions is going through this + // client, we preemptively delete any mapping for these topics. + if t, ok := req.(*kmsg.CreatePartitionsRequest); ok { + var topics []string + for i := range t.Topics { + topics = append(topics, t.Topics[i].Topic) + } + cl.maybeDeleteMappedMetadata(false, topics...) + } + + var d failDial + r.parseRetryErr = func(resp kmsg.Response, err error) error { + if err != nil { + if d.isRepeatedDialFail(err) { + cl.forgetControllerID(r.last.meta.NodeID) + } + return err + } + var code int16 + switch t := resp.(type) { + case *kmsg.CreateTopicsResponse: + if len(t.Topics) > 0 { + code = t.Topics[0].ErrorCode + } + case *kmsg.DeleteTopicsResponse: + if len(t.Topics) > 0 { + code = t.Topics[0].ErrorCode + } + case *kmsg.CreatePartitionsResponse: + if len(t.Topics) > 0 { + code = t.Topics[0].ErrorCode + } + case *kmsg.ElectLeadersResponse: + if len(t.Topics) > 0 && len(t.Topics[0].Partitions) > 0 { + code = t.Topics[0].Partitions[0].ErrorCode + } + case *kmsg.AlterPartitionAssignmentsResponse: + code = t.ErrorCode + case *kmsg.ListPartitionReassignmentsResponse: + code = t.ErrorCode + case *kmsg.AlterUserSCRAMCredentialsResponse: + if len(t.Results) > 0 { + code = t.Results[0].ErrorCode + } + case *kmsg.VoteResponse: + code = t.ErrorCode + case *kmsg.BeginQuorumEpochResponse: + code = t.ErrorCode + case *kmsg.EndQuorumEpochResponse: + code = t.ErrorCode + case *kmsg.DescribeQuorumResponse: + code = t.ErrorCode + case *kmsg.AlterPartitionResponse: + code = t.ErrorCode + case *kmsg.UpdateFeaturesResponse: + code = t.ErrorCode + case *kmsg.EnvelopeResponse: + code = t.ErrorCode + } + if err := kerr.ErrorForCode(code); errors.Is(err, kerr.NotController) { + // There must be a last broker if we were able to issue + // the request and get a response. + cl.forgetControllerID(r.last.meta.NodeID) + return err + } + return nil + } + + resp, err := r.Request(ctx, req) + return shard(r.last, req, resp, err) +} + +// handleCoordinatorReq issues simple (non-shardable) group or txn requests. +func (cl *Client) handleCoordinatorReq(ctx context.Context, req kmsg.Request) ResponseShard { + switch t := req.(type) { + default: + // All group requests should be listed below, so if it isn't, + // then we do not know what this request is. + return shard(nil, req, nil, errors.New("client is too old; this client does not know what to do with this request")) + + ///////// + // TXN // -- all txn reqs are simple + ///////// + + case *kmsg.InitProducerIDRequest: + if t.TransactionalID != nil { + return cl.handleCoordinatorReqSimple(ctx, coordinatorTypeTxn, *t.TransactionalID, req) + } + // InitProducerID can go to any broker if the transactional ID + // is nil. By using handleReqWithCoordinator, we get the + // retryable-error parsing, even though we are not actually + // using a defined txn coordinator. This is fine; by passing no + // names, we delete no coordinator. + coordinator, resp, err := cl.handleReqWithCoordinator(ctx, func() (*broker, error) { return cl.broker(), nil }, coordinatorTypeTxn, "", req) + return shard(coordinator, req, resp, err) + case *kmsg.AddOffsetsToTxnRequest: + return cl.handleCoordinatorReqSimple(ctx, coordinatorTypeTxn, t.TransactionalID, req) + case *kmsg.EndTxnRequest: + return cl.handleCoordinatorReqSimple(ctx, coordinatorTypeTxn, t.TransactionalID, req) + + /////////// + // GROUP // -- most group reqs are simple + /////////// + + case *kmsg.OffsetCommitRequest: + return cl.handleCoordinatorReqSimple(ctx, coordinatorTypeGroup, t.Group, req) + case *kmsg.TxnOffsetCommitRequest: + return cl.handleCoordinatorReqSimple(ctx, coordinatorTypeGroup, t.Group, req) + case *kmsg.JoinGroupRequest: + return cl.handleCoordinatorReqSimple(ctx, coordinatorTypeGroup, t.Group, req) + case *kmsg.HeartbeatRequest: + return cl.handleCoordinatorReqSimple(ctx, coordinatorTypeGroup, t.Group, req) + case *kmsg.LeaveGroupRequest: + return cl.handleCoordinatorReqSimple(ctx, coordinatorTypeGroup, t.Group, req) + case *kmsg.SyncGroupRequest: + return cl.handleCoordinatorReqSimple(ctx, coordinatorTypeGroup, t.Group, req) + case *kmsg.OffsetDeleteRequest: + return cl.handleCoordinatorReqSimple(ctx, coordinatorTypeGroup, t.Group, req) + } +} + +// handleCoordinatorReqSimple issues a request that contains a single group or +// txn to its coordinator. +// +// The error is inspected to see if it is a retryable error and, if so, the +// coordinator is deleted. +func (cl *Client) handleCoordinatorReqSimple(ctx context.Context, typ int8, name string, req kmsg.Request) ResponseShard { + coordinator, resp, err := cl.handleReqWithCoordinator(ctx, func() (*broker, error) { + return cl.loadCoordinator(ctx, typ, name) + }, typ, name, req) + return shard(coordinator, req, resp, err) +} + +// handleReqWithCoordinator actually issues a request to a coordinator and +// does retry handling. +// +// This avoids retries on the two group requests that need to be sharded. +func (cl *Client) handleReqWithCoordinator( + ctx context.Context, + coordinator func() (*broker, error), + typ int8, + name string, // group ID or the transactional id + req kmsg.Request, +) (*broker, kmsg.Response, error) { + r := cl.retryableBrokerFn(coordinator) + var d failDial + r.parseRetryErr = func(resp kmsg.Response, err error) error { + if err != nil { + if d.isRepeatedDialFail(err) { + cl.deleteStaleCoordinator(name, typ) + } + return err + } + var code int16 + switch t := resp.(type) { + // TXN + case *kmsg.InitProducerIDResponse: + code = t.ErrorCode + case *kmsg.AddOffsetsToTxnResponse: + code = t.ErrorCode + case *kmsg.EndTxnResponse: + code = t.ErrorCode + + // GROUP + case *kmsg.OffsetCommitResponse: + if len(t.Topics) > 0 && len(t.Topics[0].Partitions) > 0 { + code = t.Topics[0].Partitions[0].ErrorCode + } + case *kmsg.TxnOffsetCommitResponse: + if len(t.Topics) > 0 && len(t.Topics[0].Partitions) > 0 { + code = t.Topics[0].Partitions[0].ErrorCode + } + case *kmsg.JoinGroupResponse: + code = t.ErrorCode + case *kmsg.HeartbeatResponse: + code = t.ErrorCode + case *kmsg.LeaveGroupResponse: + code = t.ErrorCode + case *kmsg.SyncGroupResponse: + code = t.ErrorCode + } + + // ListGroups, OffsetFetch, DeleteGroups, DescribeGroups, and + // DescribeTransactions handled in sharding. + + if err := kerr.ErrorForCode(code); cl.maybeDeleteStaleCoordinator(name, typ, err) { + return err + } + return nil + } + + resp, err := r.Request(ctx, req) + return r.last, resp, err +} + +// Broker returns a handle to a specific broker to directly issue requests to. +// Note that there is no guarantee that this broker exists; if it does not, +// requests will fail with with an unknown broker error. +func (cl *Client) Broker(id int) *Broker { + return &Broker{ + id: int32(id), + cl: cl, + } +} + +// DiscoveredBrokers returns all brokers that were discovered from prior +// metadata responses. This does not actually issue a metadata request to load +// brokers; if you wish to ensure this returns all brokers, be sure to manually +// issue a metadata request before this. This also does not include seed +// brokers, which are internally saved under special internal broker IDs (but, +// it does include those brokers under their normal IDs as returned from a +// metadata response). +func (cl *Client) DiscoveredBrokers() []*Broker { + cl.brokersMu.RLock() + defer cl.brokersMu.RUnlock() + + var bs []*Broker + for _, broker := range cl.brokers { + bs = append(bs, &Broker{id: broker.meta.NodeID, cl: cl}) + } + return bs +} + +// SeedBrokers returns the all seed brokers. +func (cl *Client) SeedBrokers() []*Broker { + var bs []*Broker + for _, broker := range cl.loadSeeds() { + bs = append(bs, &Broker{id: broker.meta.NodeID, cl: cl}) + } + return bs +} + +// UpdateSeedBrokers updates the client's list of seed brokers. Over the course +// of a long period of time, your might replace all brokers that you originally +// specified as seeds. This command allows you to replace the client's list of +// seeds. +// +// This returns an error if any of the input addrs is not a host:port. If the +// input list is empty, the function returns without replacing the seeds. +func (cl *Client) UpdateSeedBrokers(addrs ...string) error { + if len(addrs) == 0 { + return nil + } + seeds, err := parseSeeds(addrs) + if err != nil { + return err + } + + seedBrokers := make([]*broker, 0, len(seeds)) + for i, seed := range seeds { + b := cl.newBroker(unknownSeedID(i), seed.host, seed.port, nil) + seedBrokers = append(seedBrokers, b) + } + + // We lock to guard against concurrently updating seeds; we do not need + // the lock for what this usually guards. + cl.brokersMu.Lock() + old := cl.loadSeeds() + cl.seeds.Store(seedBrokers) + cl.brokersMu.Unlock() + + for _, b := range old { + b.stopForever() + } + + return nil +} + +// Broker pairs a broker ID with a client to directly issue requests to a +// specific broker. +type Broker struct { + id int32 + cl *Client +} + +// Request issues a request to a broker. If the broker does not exist in the +// client, this returns an unknown broker error. Requests are not retried. +// +// The passed context can be used to cancel a request and return early. +// Note that if the request is not canceled before it is written to Kafka, +// you may just end up canceling and not receiving the response to what Kafka +// inevitably does. +// +// It is more beneficial to always use RetriableRequest. +func (b *Broker) Request(ctx context.Context, req kmsg.Request) (kmsg.Response, error) { + return b.request(ctx, false, req) +} + +// RetriableRequest issues a request to a broker the same as Broker, but +// retries in the face of retryable broker connection errors. This does not +// retry on response internal errors. +func (b *Broker) RetriableRequest(ctx context.Context, req kmsg.Request) (kmsg.Response, error) { + return b.request(ctx, true, req) +} + +func (b *Broker) request(ctx context.Context, retry bool, req kmsg.Request) (kmsg.Response, error) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + var resp kmsg.Response + var err error + done := make(chan struct{}) + + go func() { + defer close(done) + + if !retry { + var br *broker + br, err = b.cl.brokerOrErr(ctx, b.id, errUnknownBroker) + if err == nil { + resp, err = br.waitResp(ctx, req) + } + } else { + resp, err = b.cl.retryableBrokerFn(func() (*broker, error) { + return b.cl.brokerOrErr(ctx, b.id, errUnknownBroker) + }).Request(ctx, req) + } + }() + + select { + case <-done: + return resp, err + case <-ctx.Done(): + return nil, ctx.Err() + case <-b.cl.ctx.Done(): + return nil, b.cl.ctx.Err() + } +} + +////////////////////// +// REQUEST SHARDING // +////////////////////// + +// Below here lies all logic to handle requests that need to be split and sent +// to many brokers. A lot of the logic for each sharding function is very +// similar, but each sharding function uses slightly different types. + +// issueShard is a request that has been split and is ready to be sent to the +// given broker ID. +type issueShard struct { + req kmsg.Request + broker int32 + any bool + + // if non-nil, we could not map this request shard to any broker, and + // this error is the reason. + err error +} + +// sharder splits a request. +type sharder interface { + // shard splits a request and returns the requests to issue tied to the + // brokers to issue the requests to. This can return an error if there + // is some pre-loading that needs to happen. If an error is returned, + // the request that was intended for splitting is failed wholesale. + // + // Due to sharded requests not being retryable if a response is + // received, to avoid stale coordinator errors, this function should + // not use any previously cached metadata. + // + // This takes the last error if the request is being retried, which is + // currently only useful for errBrokerTooOld. + shard(context.Context, kmsg.Request, error) ([]issueShard, bool, error) + + // onResp is called on a successful response to investigate the + // response and potentially perform cleanup, and potentially returns an + // error signifying to retry. See onShardRespErr below for more + // details. + onResp(kmsg.Request, kmsg.Response) error + + // merge is a function that can be used to merge sharded responses into + // one response. This is used by the client.Request method. + merge([]ResponseShard) (kmsg.Response, error) +} + +// handleShardedReq splits and issues requests to brokers, recursively +// splitting as necessary if requests fail and need remapping. +func (cl *Client) handleShardedReq(ctx context.Context, req kmsg.Request) ([]ResponseShard, shardMerge) { + // First, determine our sharder. + var sharder sharder + switch req.(type) { + case *kmsg.ListOffsetsRequest: + sharder = &listOffsetsSharder{cl} + case *kmsg.OffsetFetchRequest: + sharder = &offsetFetchSharder{cl} + case *kmsg.FindCoordinatorRequest: + sharder = &findCoordinatorSharder{cl} + case *kmsg.DescribeGroupsRequest: + sharder = &describeGroupsSharder{cl} + case *kmsg.ListGroupsRequest: + sharder = &listGroupsSharder{cl} + case *kmsg.DeleteRecordsRequest: + sharder = &deleteRecordsSharder{cl} + case *kmsg.OffsetForLeaderEpochRequest: + sharder = &offsetForLeaderEpochSharder{cl} + case *kmsg.AddPartitionsToTxnRequest: + sharder = &addPartitionsToTxnSharder{cl} + case *kmsg.WriteTxnMarkersRequest: + sharder = &writeTxnMarkersSharder{cl} + case *kmsg.DescribeConfigsRequest: + sharder = &describeConfigsSharder{cl} + case *kmsg.AlterConfigsRequest: + sharder = &alterConfigsSharder{cl} + case *kmsg.AlterReplicaLogDirsRequest: + sharder = &alterReplicaLogDirsSharder{cl} + case *kmsg.DescribeLogDirsRequest: + sharder = &describeLogDirsSharder{cl} + case *kmsg.DeleteGroupsRequest: + sharder = &deleteGroupsSharder{cl} + case *kmsg.IncrementalAlterConfigsRequest: + sharder = &incrementalAlterConfigsSharder{cl} + case *kmsg.DescribeProducersRequest: + sharder = &describeProducersSharder{cl} + case *kmsg.DescribeTransactionsRequest: + sharder = &describeTransactionsSharder{cl} + case *kmsg.ListTransactionsRequest: + sharder = &listTransactionsSharder{cl} + } + + // If a request fails, we re-shard it (in case it needs to be split + // again). reqTry tracks how many total tries a request piece has had; + // we quit at either the max configured tries or max configured time. + type reqTry struct { + tries int + req kmsg.Request + lastErr error + } + + var ( + shardsMu sync.Mutex + shards []ResponseShard + + addShard = func(shard ResponseShard) { + shardsMu.Lock() + defer shardsMu.Unlock() + shards = append(shards, shard) + } + + start = time.Now() + retryTimeout = cl.cfg.retryTimeout(req.Key()) + + wg sync.WaitGroup + issue func(reqTry) + ) + + l := cl.cfg.logger + debug := l.Level() >= LogLevelDebug + + // issue is called to progressively split and issue requests. + // + // This recursively calls itself if a request fails and can be retried. + // We avoid stack problems because this calls itself in a goroutine. + issue = func(try reqTry) { + issues, reshardable, err := sharder.shard(ctx, try.req, try.lastErr) + if err != nil { + l.Log(LogLevelDebug, "unable to shard request", "req", kmsg.Key(try.req.Key()).Name(), "previous_tries", try.tries, "err", err) + addShard(shard(nil, try.req, nil, err)) // failure to shard means data loading failed; this request is failed + return + } + + // If the request actually does not need to be issued, we issue + // it to a random broker. There is no benefit to this, but at + // least we will return one shard. + if len(issues) == 0 { + issues = []issueShard{{ + req: try.req, + any: true, + }} + reshardable = true + } + + if debug { + var key int16 + var brokerAnys []string + for _, issue := range issues { + key = issue.req.Key() + if issue.err != nil { + brokerAnys = append(brokerAnys, "err") + } else if issue.any { + brokerAnys = append(brokerAnys, "any") + } else { + brokerAnys = append(brokerAnys, fmt.Sprintf("%d", issue.broker)) + } + } + l.Log(LogLevelDebug, "sharded request", "req", kmsg.Key(key).Name(), "destinations", brokerAnys) + } + + for i := range issues { + myIssue := issues[i] + myUnderlyingReq := myIssue.req + var isPinned bool + if pinned, ok := myIssue.req.(*pinReq); ok { + myUnderlyingReq = pinned.Request + isPinned = true + } + + if myIssue.err != nil { + addShard(shard(nil, myUnderlyingReq, nil, myIssue.err)) + continue + } + + tries := try.tries + wg.Add(1) + go func() { + defer wg.Done() + start: + tries++ + + broker := cl.broker() + var err error + if !myIssue.any { + broker, err = cl.brokerOrErr(ctx, myIssue.broker, errUnknownBroker) + } + if err != nil { + addShard(shard(nil, myUnderlyingReq, nil, err)) // failure to load a broker is a failure to issue a request + return + } + + resp, err := broker.waitResp(ctx, myIssue.req) + var errIsFromResp bool + if err == nil { + err = sharder.onResp(myUnderlyingReq, resp) // perform some potential cleanup, and potentially receive an error to retry + if ke := (*kerr.Error)(nil); errors.As(err, &ke) { + errIsFromResp = true + } + } + + // If we failed to issue the request, we *maybe* will retry. + // We could have failed to even issue the request or receive + // a response, which is retryable. + // + // If a pinned req fails with errBrokerTooOld, we always retry + // immediately. The request was not even issued. However, as a + // safety, we only do this 3 times to avoid some super weird + // pathological spin loop. + backoff := cl.cfg.retryBackoff(tries) + if err != nil && + (reshardable && isPinned && errors.Is(err, errBrokerTooOld) && tries <= 3) || + (retryTimeout == 0 || time.Now().Add(backoff).Sub(start) <= retryTimeout) && cl.shouldRetry(tries, err) && cl.waitTries(ctx, backoff) { + // Non-reshardable re-requests just jump back to the + // top where the broker is loaded. This is the case on + // requests where the original request is split to + // dedicated brokers; we do not want to re-shard that. + if !reshardable { + l.Log(LogLevelDebug, "sharded request failed, reissuing without resharding", "req", kmsg.Key(myIssue.req.Key()).Name(), "time_since_start", time.Since(start), "tries", try.tries, "err", err) + goto start + } + l.Log(LogLevelDebug, "sharded request failed, resharding and reissuing", "req", kmsg.Key(myIssue.req.Key()).Name(), "time_since_start", time.Since(start), "tries", try.tries, "err", err) + issue(reqTry{tries, myUnderlyingReq, err}) + return + } + + // If we pulled an error out of the response body in an attempt + // to possibly retry, the request was NOT an error that we want + // to bubble as a shard error. The request was successful, we + // have a response. Before we add the shard, strip the error. + // The end user can parse the response ErrorCode. + if errIsFromResp { + err = nil + } + addShard(shard(broker, myUnderlyingReq, resp, err)) // the error was not retryable + }() + } + } + + issue(reqTry{0, req, nil}) + wg.Wait() + + return shards, sharder.merge +} + +// For sharded errors, we prefer to keep retryable errors rather than +// non-retryable errors. We keep the non-retryable if everything is +// non-retryable. +// +// We favor retryable because retryable means we used a stale cache value; we +// clear the stale entries on failure and the retry uses fresh data. The +// request will be split and remapped, and the non-retryable errors will be +// encountered again. +func onRespShardErr(err *error, newKerr error) { + if newKerr == nil || *err != nil && kerr.IsRetriable(*err) { + return + } + *err = newKerr +} + +// a convenience function for when a request needs to be issued identically to +// all brokers. +func (cl *Client) allBrokersShardedReq(ctx context.Context, fn func() kmsg.Request) ([]issueShard, bool, error) { + if err := cl.fetchBrokerMetadata(ctx); err != nil { + return nil, false, err + } + + var issues []issueShard + cl.brokersMu.RLock() + for _, broker := range cl.brokers { + issues = append(issues, issueShard{ + req: fn(), + broker: broker.meta.NodeID, + }) + } + cl.brokersMu.RUnlock() + + return issues, false, nil // we do NOT re-shard these requests request +} + +// a convenience function for saving the first ResponseShard error. +func firstErrMerger(sresps []ResponseShard, merge func(kresp kmsg.Response)) error { + var firstErr error + for _, sresp := range sresps { + if sresp.Err != nil { + if firstErr == nil { + firstErr = sresp.Err + } + continue + } + merge(sresp.Resp) + } + return firstErr +} + +type mappedMetadataTopic struct { + t kmsg.MetadataResponseTopic + ps map[int32]kmsg.MetadataResponseTopicPartition + when time.Time +} + +// For NOT_LEADER_FOR_PARTITION: +// We always delete stale metadata. It's possible that a leader rebalance +// happened immediately after we requested metadata; we should not pin to +// the stale metadata for 1s. +// +// For UNKNOWN_TOPIC_OR_PARTITION: +// We only delete stale metadata if it is older than the min age or 1s, +// whichever is smaller. We use 1s even if min age is larger, because we want +// to encourage larger min age for caching purposes. More obvious would be to +// *always* evict the cache here, but if we *just* requested metadata, then +// evicting the cache would cause churn for a topic that genuinely does not +// exist. +func (cl *Client) maybeDeleteMappedMetadata(unknownTopic bool, ts ...string) (shouldRetry bool) { + if len(ts) == 0 { + return + } + + var min time.Duration + if unknownTopic { + min = time.Second + if cl.cfg.metadataMinAge < min { + min = cl.cfg.metadataMinAge + } + } + + cl.mappedMetaMu.Lock() + defer cl.mappedMetaMu.Unlock() + for _, t := range ts { + tcached, exists := cl.mappedMeta[t] + if exists && (min == 0 || time.Since(tcached.when) > min) { + shouldRetry = true + delete(cl.mappedMeta, t) + } + } + return shouldRetry +} + +// We only cache for metadata min age. We could theoretically cache forever, +// but an out of band CreatePartitions can result in our metadata being stale +// and us never knowing. So, we choose metadata min age. There are only a few +// requests that are sharded and use metadata, and the one this benefits most +// is ListOffsets. Likely, ListOffsets for the same topic will be issued back +// to back, so not caching for so long is ok. +func (cl *Client) fetchCachedMappedMetadata(ts ...string) (map[string]mappedMetadataTopic, []string) { + cl.mappedMetaMu.Lock() + defer cl.mappedMetaMu.Unlock() + if cl.mappedMeta == nil { + return nil, ts + } + cached := make(map[string]mappedMetadataTopic) + needed := ts[:0] + + for _, t := range ts { + tcached, exists := cl.mappedMeta[t] + if exists && time.Since(tcached.when) < cl.cfg.metadataMinAge { + cached[t] = tcached + } else { + needed = append(needed, t) + delete(cl.mappedMeta, t) + } + } + return cached, needed +} + +// fetchMappedMetadata provides a convenience type of working with metadata; +// this is garbage heavy, so it is only used in one off requests in this +// package. +func (cl *Client) fetchMappedMetadata(ctx context.Context, topics []string, useCache bool) (map[string]mappedMetadataTopic, error) { + var r map[string]mappedMetadataTopic + needed := topics + if useCache { + r, needed = cl.fetchCachedMappedMetadata(topics...) + if len(needed) == 0 { + return r, nil + } + } + if r == nil { + r = make(map[string]mappedMetadataTopic) + } + + _, meta, err := cl.fetchMetadataForTopics(ctx, false, needed) + if err != nil { + return nil, err + } + + // Cache the mapped metadata, and also store each topic in the results. + cl.storeCachedMappedMetadata(meta, func(entry mappedMetadataTopic) { + r[*entry.t.Topic] = entry + }) + + return r, nil +} + +// storeCachedMappedMetadata caches the fetched metadata in the Client, and calls the onEachTopic callback +// function for each topic in the MetadataResponse. +func (cl *Client) storeCachedMappedMetadata(meta *kmsg.MetadataResponse, onEachTopic func(_ mappedMetadataTopic)) { + cl.mappedMetaMu.Lock() + defer cl.mappedMetaMu.Unlock() + if cl.mappedMeta == nil { + cl.mappedMeta = make(map[string]mappedMetadataTopic) + } + when := time.Now() + for _, topic := range meta.Topics { + if topic.Topic == nil { + // We do not request with topic IDs, so we should not + // receive topic IDs in the response. + continue + } + t := mappedMetadataTopic{ + t: topic, + ps: make(map[int32]kmsg.MetadataResponseTopicPartition), + when: when, + } + cl.mappedMeta[*topic.Topic] = t + for _, partition := range topic.Partitions { + t.ps[partition.Partition] = partition + } + + if onEachTopic != nil { + onEachTopic(t) + } + } + if len(meta.Topics) != len(cl.mappedMeta) { + for topic, mapped := range cl.mappedMeta { + if mapped.when.Equal(when) { + continue + } + if time.Since(mapped.when) > cl.cfg.metadataMinAge { + delete(cl.mappedMeta, topic) + } + } + } +} + +func unknownOrCode(exists bool, code int16) error { + if !exists { + return kerr.UnknownTopicOrPartition + } + return kerr.ErrorForCode(code) +} + +func noLeader(l int32) error { + if l < 0 { + return kerr.LeaderNotAvailable + } + return nil +} + +// This is a helper for the sharded requests below; if mapping metadata fails +// to load topics or partitions, we group the failures by error. +// +// We use a lot of reflect magic to make the actual usage much nicer. +type unknownErrShards struct { + // load err => topic => mystery slice type + // + // The mystery type is basically just []Partition, where Partition can + // be any kmsg type. + mapped map[error]map[string]reflect.Value +} + +// err stores a new failing partition with its failing error. +// +// partition's type is equal to the arg1 type of l.fn. +func (l *unknownErrShards) err(err error, topic string, partition any) { + if l.mapped == nil { + l.mapped = make(map[error]map[string]reflect.Value) + } + t := l.mapped[err] + if t == nil { + t = make(map[string]reflect.Value) + l.mapped[err] = t + } + slice, ok := t[topic] + if !ok { + // We make a slice of the input partition type. + slice = reflect.MakeSlice(reflect.SliceOf(reflect.TypeOf(partition)), 0, 1) + } + + t[topic] = reflect.Append(slice, reflect.ValueOf(partition)) +} + +// errs takes an input slice of partitions and stores each with its failing +// error. +// +// partitions is a slice where each element has type of arg1 of l.fn. +func (l *unknownErrShards) errs(err error, topic string, partitions any) { + v := reflect.ValueOf(partitions) + for i := 0; i < v.Len(); i++ { + l.err(err, topic, v.Index(i).Interface()) + } +} + +// Returns issueShards for each error stored in l. +// +// This takes a factory function: the first return is a new kmsg.Request, the +// second is a function that adds a topic and its partitions to that request. +// +// Thus, fn is of type func() (kmsg.Request, func(string, []P)) +func (l *unknownErrShards) collect(mkreq, mergeParts any) []issueShard { + if len(l.mapped) == 0 { + return nil + } + + var shards []issueShard + + factory := reflect.ValueOf(mkreq) + perTopic := reflect.ValueOf(mergeParts) + for err, topics := range l.mapped { + req := factory.Call(nil)[0] + + var ntopics, npartitions int + for topic, partitions := range topics { + ntopics++ + npartitions += partitions.Len() + perTopic.Call([]reflect.Value{req, reflect.ValueOf(topic), partitions}) + } + + shards = append(shards, issueShard{ + req: req.Interface().(kmsg.Request), + err: err, + }) + } + + return shards +} + +// handles sharding ListOffsetsRequest +type listOffsetsSharder struct{ *Client } + +func (cl *listOffsetsSharder) shard(ctx context.Context, kreq kmsg.Request, _ error) ([]issueShard, bool, error) { + req := kreq.(*kmsg.ListOffsetsRequest) + + // For listing offsets, we need the broker leader for each partition we + // are listing. Thus, we first load metadata for the topics. + // + // Metadata loading performs retries; if we fail here, the we do not + // issue sharded requests. + var need []string + for _, topic := range req.Topics { + need = append(need, topic.Topic) + } + mapping, err := cl.fetchMappedMetadata(ctx, need, true) + if err != nil { + return nil, false, err + } + + brokerReqs := make(map[int32]map[string][]kmsg.ListOffsetsRequestTopicPartition) + var unknowns unknownErrShards + + // For any topic or partition that had an error load, we blindly issue + // a load to the first seed broker. We expect the list to fail, but it + // is the best we could do. + for _, topic := range req.Topics { + t := topic.Topic + tmapping, exists := mapping[t] + if err := unknownOrCode(exists, tmapping.t.ErrorCode); err != nil { + unknowns.errs(err, t, topic.Partitions) + continue + } + for _, partition := range topic.Partitions { + p, exists := tmapping.ps[partition.Partition] + if err := unknownOrCode(exists, p.ErrorCode); err != nil { + unknowns.err(err, t, partition) + continue + } + if err := noLeader(p.Leader); err != nil { + unknowns.err(err, t, partition) + continue + } + + brokerReq := brokerReqs[p.Leader] + if brokerReq == nil { + brokerReq = make(map[string][]kmsg.ListOffsetsRequestTopicPartition) + brokerReqs[p.Leader] = brokerReq + } + brokerReq[t] = append(brokerReq[t], partition) + } + } + + mkreq := func() *kmsg.ListOffsetsRequest { + r := kmsg.NewPtrListOffsetsRequest() + r.ReplicaID = req.ReplicaID + r.IsolationLevel = req.IsolationLevel + return r + } + + var issues []issueShard + for brokerID, brokerReq := range brokerReqs { + req := mkreq() + for topic, parts := range brokerReq { + reqTopic := kmsg.NewListOffsetsRequestTopic() + reqTopic.Topic = topic + reqTopic.Partitions = parts + req.Topics = append(req.Topics, reqTopic) + } + issues = append(issues, issueShard{ + req: req, + broker: brokerID, + }) + } + + return append(issues, unknowns.collect(mkreq, func(r *kmsg.ListOffsetsRequest, topic string, parts []kmsg.ListOffsetsRequestTopicPartition) { + reqTopic := kmsg.NewListOffsetsRequestTopic() + reqTopic.Topic = topic + reqTopic.Partitions = parts + r.Topics = append(r.Topics, reqTopic) + })...), true, nil // this is reshardable +} + +func (cl *listOffsetsSharder) onResp(_ kmsg.Request, kresp kmsg.Response) error { + var ( + resp = kresp.(*kmsg.ListOffsetsResponse) + del []string + retErr error + unknownTopic bool + ) + + for i := range resp.Topics { + t := &resp.Topics[i] + for j := range t.Partitions { + p := &t.Partitions[j] + err := kerr.ErrorForCode(p.ErrorCode) + if err == kerr.UnknownTopicOrPartition || err == kerr.NotLeaderForPartition { + del = append(del, t.Topic) + unknownTopic = unknownTopic || err == kerr.UnknownTopicOrPartition + } + onRespShardErr(&retErr, err) + } + } + if cl.maybeDeleteMappedMetadata(unknownTopic, del...) { + return retErr + } + return nil +} + +func (*listOffsetsSharder) merge(sresps []ResponseShard) (kmsg.Response, error) { + merged := kmsg.NewPtrListOffsetsResponse() + topics := make(map[string][]kmsg.ListOffsetsResponseTopicPartition) + + firstErr := firstErrMerger(sresps, func(kresp kmsg.Response) { + resp := kresp.(*kmsg.ListOffsetsResponse) + merged.Version = resp.Version + merged.ThrottleMillis = resp.ThrottleMillis + + for _, topic := range resp.Topics { + topics[topic.Topic] = append(topics[topic.Topic], topic.Partitions...) + } + }) + for topic, partitions := range topics { + respTopic := kmsg.NewListOffsetsResponseTopic() + respTopic.Topic = topic + respTopic.Partitions = partitions + merged.Topics = append(merged.Topics, respTopic) + } + return merged, firstErr +} + +// handles sharding OffsetFetchRequest +type offsetFetchSharder struct{ *Client } + +func offsetFetchReqToGroup(req *kmsg.OffsetFetchRequest) kmsg.OffsetFetchRequestGroup { + g := kmsg.NewOffsetFetchRequestGroup() + g.Group = req.Group + for _, topic := range req.Topics { + reqTopic := kmsg.NewOffsetFetchRequestGroupTopic() + reqTopic.Topic = topic.Topic + reqTopic.Partitions = topic.Partitions + g.Topics = append(g.Topics, reqTopic) + } + return g +} + +func offsetFetchGroupToReq(requireStable bool, group kmsg.OffsetFetchRequestGroup) *kmsg.OffsetFetchRequest { + req := kmsg.NewPtrOffsetFetchRequest() + req.RequireStable = requireStable + req.Group = group.Group + for _, topic := range group.Topics { + reqTopic := kmsg.NewOffsetFetchRequestTopic() + reqTopic.Topic = topic.Topic + reqTopic.Partitions = topic.Partitions + req.Topics = append(req.Topics, reqTopic) + } + return req +} + +func offsetFetchRespToGroup(req *kmsg.OffsetFetchRequest, resp *kmsg.OffsetFetchResponse) kmsg.OffsetFetchResponseGroup { + g := kmsg.NewOffsetFetchResponseGroup() + g.Group = req.Group + g.ErrorCode = resp.ErrorCode + for _, topic := range resp.Topics { + t := kmsg.NewOffsetFetchResponseGroupTopic() + t.Topic = topic.Topic + for _, partition := range topic.Partitions { + p := kmsg.NewOffsetFetchResponseGroupTopicPartition() + p.Partition = partition.Partition + p.Offset = partition.Offset + p.LeaderEpoch = partition.LeaderEpoch + p.Metadata = partition.Metadata + p.ErrorCode = partition.ErrorCode + t.Partitions = append(t.Partitions, p) + } + g.Topics = append(g.Topics, t) + } + return g +} + +func offsetFetchRespGroupIntoResp(g kmsg.OffsetFetchResponseGroup, into *kmsg.OffsetFetchResponse) { + into.ErrorCode = g.ErrorCode + into.Topics = into.Topics[:0] + for _, topic := range g.Topics { + t := kmsg.NewOffsetFetchResponseTopic() + t.Topic = topic.Topic + for _, partition := range topic.Partitions { + p := kmsg.NewOffsetFetchResponseTopicPartition() + p.Partition = partition.Partition + p.Offset = partition.Offset + p.LeaderEpoch = partition.LeaderEpoch + p.Metadata = partition.Metadata + p.ErrorCode = partition.ErrorCode + t.Partitions = append(t.Partitions, p) + } + into.Topics = append(into.Topics, t) + } +} + +func (cl *offsetFetchSharder) shard(ctx context.Context, kreq kmsg.Request, lastErr error) ([]issueShard, bool, error) { + req := kreq.(*kmsg.OffsetFetchRequest) + + // We always try batching and only split at the end if lastErr + // indicates too old. We convert to batching immediately. + dup := *req + req = &dup + + if len(req.Groups) == 0 { + req.Groups = append(req.Groups, offsetFetchReqToGroup(req)) + } + groups := make([]string, 0, len(req.Groups)) + for i := range req.Groups { + groups = append(groups, req.Groups[i].Group) + } + + coordinators := cl.loadCoordinators(ctx, coordinatorTypeGroup, groups...) + + // Loading coordinators can have each group fail with its unique error, + // or with a kerr.Error that can be merged. Unique errors get their own + // failure shard, while kerr.Error's get merged. + type unkerr struct { + err error + group kmsg.OffsetFetchRequestGroup + } + var ( + brokerReqs = make(map[int32]*kmsg.OffsetFetchRequest) + kerrs = make(map[*kerr.Error][]kmsg.OffsetFetchRequestGroup) + unkerrs []unkerr + ) + + newReq := func(groups ...kmsg.OffsetFetchRequestGroup) *kmsg.OffsetFetchRequest { + newReq := kmsg.NewPtrOffsetFetchRequest() + newReq.RequireStable = req.RequireStable + newReq.Groups = groups + return newReq + } + + for _, group := range req.Groups { + berr := coordinators[group.Group] + var ke *kerr.Error + switch { + case berr.err == nil: + brokerReq := brokerReqs[berr.b.meta.NodeID] + if brokerReq == nil { + brokerReq = newReq() + brokerReqs[berr.b.meta.NodeID] = brokerReq + } + brokerReq.Groups = append(brokerReq.Groups, group) + case errors.As(berr.err, &ke): + kerrs[ke] = append(kerrs[ke], group) + default: + unkerrs = append(unkerrs, unkerr{berr.err, group}) + } + } + + splitReq := errors.Is(lastErr, errBrokerTooOld) + + var issues []issueShard + for id, req := range brokerReqs { + if splitReq { + for _, group := range req.Groups { + req := offsetFetchGroupToReq(req.RequireStable, group) + issues = append(issues, issueShard{ + req: &pinReq{Request: req, pinMax: true, max: 7}, + broker: id, + }) + } + } else if len(req.Groups) == 1 { + single := offsetFetchGroupToReq(req.RequireStable, req.Groups[0]) + single.Groups = req.Groups + issues = append(issues, issueShard{ + req: single, + broker: id, + }) + } else { + issues = append(issues, issueShard{ + req: &pinReq{Request: req, pinMin: len(req.Groups) > 1, min: 8}, + broker: id, + }) + } + } + for _, unkerr := range unkerrs { + issues = append(issues, issueShard{ + req: newReq(unkerr.group), + err: unkerr.err, + }) + } + for kerr, groups := range kerrs { + issues = append(issues, issueShard{ + req: newReq(groups...), + err: kerr, + }) + } + + return issues, true, nil // reshardable to load correct coordinators +} + +func (cl *offsetFetchSharder) onResp(kreq kmsg.Request, kresp kmsg.Response) error { + req := kreq.(*kmsg.OffsetFetchRequest) + resp := kresp.(*kmsg.OffsetFetchResponse) + + switch len(resp.Groups) { + case 0: + // Requested no groups: move top level into batch for v0-v7 to + // v8 forward compat. + resp.Groups = append(resp.Groups, offsetFetchRespToGroup(req, resp)) + case 1: + // Requested 1 group v8+: set top level for v0-v7 back-compat. + offsetFetchRespGroupIntoResp(resp.Groups[0], resp) + default: + } + + var retErr error + for i := range resp.Groups { + group := &resp.Groups[i] + err := kerr.ErrorForCode(group.ErrorCode) + cl.maybeDeleteStaleCoordinator(group.Group, coordinatorTypeGroup, err) + onRespShardErr(&retErr, err) + } + + // For a final bit of extra fun, v0 and v1 do not have a top level + // error code but instead a per-partition error code. If the + // coordinator is loading &c, then all per-partition error codes are + // the same so we only need to look at the first partition. + if resp.Version < 2 && len(resp.Topics) > 0 && len(resp.Topics[0].Partitions) > 0 { + code := resp.Topics[0].Partitions[0].ErrorCode + err := kerr.ErrorForCode(code) + cl.maybeDeleteStaleCoordinator(req.Group, coordinatorTypeGroup, err) + onRespShardErr(&retErr, err) + } + + return retErr +} + +func (*offsetFetchSharder) merge(sresps []ResponseShard) (kmsg.Response, error) { + merged := kmsg.NewPtrOffsetFetchResponse() + return merged, firstErrMerger(sresps, func(kresp kmsg.Response) { + resp := kresp.(*kmsg.OffsetFetchResponse) + merged.Version = resp.Version + merged.ThrottleMillis = resp.ThrottleMillis + merged.Groups = append(merged.Groups, resp.Groups...) + + // Old requests only support one group; *either* the commit + // used multiple groups and they are expecting the batch + // response, *or* the commit used one group and we always merge + // that one group into the old format. + if len(resp.Groups) == 1 { + offsetFetchRespGroupIntoResp(resp.Groups[0], merged) + } + }) +} + +// handles sharding FindCoordinatorRequest +type findCoordinatorSharder struct{ *Client } + +func findCoordinatorRespCoordinatorIntoResp(c kmsg.FindCoordinatorResponseCoordinator, into *kmsg.FindCoordinatorResponse) { + into.NodeID = c.NodeID + into.Host = c.Host + into.Port = c.Port + into.ErrorCode = c.ErrorCode + into.ErrorMessage = c.ErrorMessage +} + +func (*findCoordinatorSharder) shard(_ context.Context, kreq kmsg.Request, lastErr error) ([]issueShard, bool, error) { + req := kreq.(*kmsg.FindCoordinatorRequest) + + // We always try batching and only split at the end if lastErr + // indicates too old. We convert to batching immediately. + dup := *req + req = &dup + + uniq := make(map[string]struct{}, len(req.CoordinatorKeys)) + if len(req.CoordinatorKeys) == 0 { + uniq[req.CoordinatorKey] = struct{}{} + } else { + for _, key := range req.CoordinatorKeys { + uniq[key] = struct{}{} + } + } + req.CoordinatorKeys = req.CoordinatorKeys[:0] + for key := range uniq { + req.CoordinatorKeys = append(req.CoordinatorKeys, key) + } + if len(req.CoordinatorKeys) == 1 { + req.CoordinatorKey = req.CoordinatorKeys[0] + } + + splitReq := errors.Is(lastErr, errBrokerTooOld) + if !splitReq { + // With only one key, we do not need to split nor pin this. + if len(req.CoordinatorKeys) <= 1 { + return []issueShard{{req: req, any: true}}, false, nil + } + return []issueShard{{ + req: &pinReq{Request: req, pinMin: true, min: 4}, + any: true, + }}, true, nil // this is "reshardable", in that we will split the request next + } + + var issues []issueShard + for _, key := range req.CoordinatorKeys { + sreq := kmsg.NewPtrFindCoordinatorRequest() + sreq.CoordinatorType = req.CoordinatorType + sreq.CoordinatorKey = key + issues = append(issues, issueShard{ + req: &pinReq{Request: sreq, pinMax: true, max: 3}, + any: true, + }) + } + return issues, false, nil // not reshardable +} + +func (*findCoordinatorSharder) onResp(kreq kmsg.Request, kresp kmsg.Response) error { + req := kreq.(*kmsg.FindCoordinatorRequest) + resp := kresp.(*kmsg.FindCoordinatorResponse) + + switch len(resp.Coordinators) { + case 0: + // Convert v3 and prior to v4+ + rc := kmsg.NewFindCoordinatorResponseCoordinator() + rc.Key = req.CoordinatorKey + rc.NodeID = resp.NodeID + rc.Host = resp.Host + rc.Port = resp.Port + rc.ErrorCode = resp.ErrorCode + rc.ErrorMessage = resp.ErrorMessage + resp.Coordinators = append(resp.Coordinators, rc) + case 1: + // Convert v4 to v3 and prior + findCoordinatorRespCoordinatorIntoResp(resp.Coordinators[0], resp) + } + + return nil +} + +func (*findCoordinatorSharder) merge(sresps []ResponseShard) (kmsg.Response, error) { + merged := kmsg.NewPtrFindCoordinatorResponse() + return merged, firstErrMerger(sresps, func(kresp kmsg.Response) { + resp := kresp.(*kmsg.FindCoordinatorResponse) + merged.Version = resp.Version + merged.ThrottleMillis = resp.ThrottleMillis + merged.Coordinators = append(merged.Coordinators, resp.Coordinators...) + + if len(resp.Coordinators) == 1 { + findCoordinatorRespCoordinatorIntoResp(resp.Coordinators[0], merged) + } + }) +} + +// handles sharding DescribeGroupsRequest +type describeGroupsSharder struct{ *Client } + +func (cl *describeGroupsSharder) shard(ctx context.Context, kreq kmsg.Request, _ error) ([]issueShard, bool, error) { + req := kreq.(*kmsg.DescribeGroupsRequest) + + coordinators := cl.loadCoordinators(ctx, coordinatorTypeGroup, req.Groups...) + type unkerr struct { + err error + group string + } + var ( + brokerReqs = make(map[int32]*kmsg.DescribeGroupsRequest) + kerrs = make(map[*kerr.Error][]string) + unkerrs []unkerr + ) + + newReq := func(groups ...string) *kmsg.DescribeGroupsRequest { + newReq := kmsg.NewPtrDescribeGroupsRequest() + newReq.IncludeAuthorizedOperations = req.IncludeAuthorizedOperations + newReq.Groups = groups + return newReq + } + + for _, group := range req.Groups { + berr := coordinators[group] + var ke *kerr.Error + switch { + case berr.err == nil: + brokerReq := brokerReqs[berr.b.meta.NodeID] + if brokerReq == nil { + brokerReq = newReq() + brokerReqs[berr.b.meta.NodeID] = brokerReq + } + brokerReq.Groups = append(brokerReq.Groups, group) + case errors.As(berr.err, &ke): + kerrs[ke] = append(kerrs[ke], group) + default: + unkerrs = append(unkerrs, unkerr{berr.err, group}) + } + } + + var issues []issueShard + for id, req := range brokerReqs { + issues = append(issues, issueShard{ + req: req, + broker: id, + }) + } + for _, unkerr := range unkerrs { + issues = append(issues, issueShard{ + req: newReq(unkerr.group), + err: unkerr.err, + }) + } + for kerr, groups := range kerrs { + issues = append(issues, issueShard{ + req: newReq(groups...), + err: kerr, + }) + } + + return issues, true, nil // reshardable to load correct coordinators +} + +func (cl *describeGroupsSharder) onResp(_ kmsg.Request, kresp kmsg.Response) error { // cleanup any stale groups + resp := kresp.(*kmsg.DescribeGroupsResponse) + var retErr error + for i := range resp.Groups { + group := &resp.Groups[i] + err := kerr.ErrorForCode(group.ErrorCode) + cl.maybeDeleteStaleCoordinator(group.Group, coordinatorTypeGroup, err) + onRespShardErr(&retErr, err) + } + return retErr +} + +func (*describeGroupsSharder) merge(sresps []ResponseShard) (kmsg.Response, error) { + merged := kmsg.NewPtrDescribeGroupsResponse() + return merged, firstErrMerger(sresps, func(kresp kmsg.Response) { + resp := kresp.(*kmsg.DescribeGroupsResponse) + merged.Version = resp.Version + merged.ThrottleMillis = resp.ThrottleMillis + merged.Groups = append(merged.Groups, resp.Groups...) + }) +} + +// handles sharding ListGroupsRequest +type listGroupsSharder struct{ *Client } + +func (cl *listGroupsSharder) shard(ctx context.Context, kreq kmsg.Request, _ error) ([]issueShard, bool, error) { + req := kreq.(*kmsg.ListGroupsRequest) + return cl.allBrokersShardedReq(ctx, func() kmsg.Request { + dup := *req + return &dup + }) +} + +func (*listGroupsSharder) onResp(_ kmsg.Request, kresp kmsg.Response) error { + resp := kresp.(*kmsg.ListGroupsResponse) + return kerr.ErrorForCode(resp.ErrorCode) +} + +func (*listGroupsSharder) merge(sresps []ResponseShard) (kmsg.Response, error) { + merged := kmsg.NewPtrListGroupsResponse() + return merged, firstErrMerger(sresps, func(kresp kmsg.Response) { + resp := kresp.(*kmsg.ListGroupsResponse) + merged.Version = resp.Version + merged.ThrottleMillis = resp.ThrottleMillis + if merged.ErrorCode == 0 { + merged.ErrorCode = resp.ErrorCode + } + merged.Groups = append(merged.Groups, resp.Groups...) + }) +} + +// handle sharding DeleteRecordsRequest +type deleteRecordsSharder struct{ *Client } + +func (cl *deleteRecordsSharder) shard(ctx context.Context, kreq kmsg.Request, _ error) ([]issueShard, bool, error) { + req := kreq.(*kmsg.DeleteRecordsRequest) + + var need []string + for _, topic := range req.Topics { + need = append(need, topic.Topic) + } + mapping, err := cl.fetchMappedMetadata(ctx, need, true) + if err != nil { + return nil, false, err + } + + brokerReqs := make(map[int32]map[string][]kmsg.DeleteRecordsRequestTopicPartition) + var unknowns unknownErrShards + + for _, topic := range req.Topics { + t := topic.Topic + tmapping, exists := mapping[t] + if err := unknownOrCode(exists, tmapping.t.ErrorCode); err != nil { + unknowns.errs(err, t, topic.Partitions) + continue + } + for _, partition := range topic.Partitions { + p, exists := tmapping.ps[partition.Partition] + if err := unknownOrCode(exists, p.ErrorCode); err != nil { + unknowns.err(err, t, partition) + continue + } + if err := noLeader(p.Leader); err != nil { + unknowns.err(err, t, partition) + continue + } + + brokerReq := brokerReqs[p.Leader] + if brokerReq == nil { + brokerReq = make(map[string][]kmsg.DeleteRecordsRequestTopicPartition) + brokerReqs[p.Leader] = brokerReq + } + brokerReq[t] = append(brokerReq[t], partition) + } + } + + mkreq := func() *kmsg.DeleteRecordsRequest { + r := kmsg.NewPtrDeleteRecordsRequest() + r.TimeoutMillis = req.TimeoutMillis + return r + } + + var issues []issueShard + for brokerID, brokerReq := range brokerReqs { + req := mkreq() + for topic, parts := range brokerReq { + reqTopic := kmsg.NewDeleteRecordsRequestTopic() + reqTopic.Topic = topic + reqTopic.Partitions = parts + req.Topics = append(req.Topics, reqTopic) + } + issues = append(issues, issueShard{ + req: req, + broker: brokerID, + }) + } + + return append(issues, unknowns.collect(mkreq, func(r *kmsg.DeleteRecordsRequest, topic string, parts []kmsg.DeleteRecordsRequestTopicPartition) { + reqTopic := kmsg.NewDeleteRecordsRequestTopic() + reqTopic.Topic = topic + reqTopic.Partitions = parts + r.Topics = append(r.Topics, reqTopic) + })...), true, nil // this is reshardable +} + +func (cl *deleteRecordsSharder) onResp(_ kmsg.Request, kresp kmsg.Response) error { + var ( + resp = kresp.(*kmsg.DeleteRecordsResponse) + del []string + retErr error + unknownTopic bool + ) + for i := range resp.Topics { + t := &resp.Topics[i] + for j := range t.Partitions { + p := &t.Partitions[j] + err := kerr.ErrorForCode(p.ErrorCode) + if err == kerr.UnknownTopicOrPartition || err == kerr.NotLeaderForPartition { + del = append(del, t.Topic) + unknownTopic = unknownTopic || err == kerr.UnknownTopicOrPartition + } + onRespShardErr(&retErr, err) + } + } + if cl.maybeDeleteMappedMetadata(unknownTopic, del...) { + return retErr + } + return nil +} + +func (*deleteRecordsSharder) merge(sresps []ResponseShard) (kmsg.Response, error) { + merged := kmsg.NewPtrDeleteRecordsResponse() + topics := make(map[string][]kmsg.DeleteRecordsResponseTopicPartition) + + firstErr := firstErrMerger(sresps, func(kresp kmsg.Response) { + resp := kresp.(*kmsg.DeleteRecordsResponse) + merged.Version = resp.Version + merged.ThrottleMillis = resp.ThrottleMillis + + for _, topic := range resp.Topics { + topics[topic.Topic] = append(topics[topic.Topic], topic.Partitions...) + } + }) + for topic, partitions := range topics { + respTopic := kmsg.NewDeleteRecordsResponseTopic() + respTopic.Topic = topic + respTopic.Partitions = partitions + merged.Topics = append(merged.Topics, respTopic) + } + return merged, firstErr +} + +// handle sharding OffsetForLeaderEpochRequest +type offsetForLeaderEpochSharder struct{ *Client } + +func (cl *offsetForLeaderEpochSharder) shard(ctx context.Context, kreq kmsg.Request, _ error) ([]issueShard, bool, error) { + req := kreq.(*kmsg.OffsetForLeaderEpochRequest) + + var need []string + for _, topic := range req.Topics { + need = append(need, topic.Topic) + } + mapping, err := cl.fetchMappedMetadata(ctx, need, true) + if err != nil { + return nil, false, err + } + + brokerReqs := make(map[int32]map[string][]kmsg.OffsetForLeaderEpochRequestTopicPartition) + var unknowns unknownErrShards + + for _, topic := range req.Topics { + t := topic.Topic + tmapping, exists := mapping[t] + if err := unknownOrCode(exists, tmapping.t.ErrorCode); err != nil { + unknowns.errs(err, t, topic.Partitions) + continue + } + for _, partition := range topic.Partitions { + p, exists := tmapping.ps[partition.Partition] + if err := unknownOrCode(exists, p.ErrorCode); err != nil { + unknowns.err(err, t, partition) + continue + } + if err := noLeader(p.Leader); err != nil { + unknowns.err(err, t, partition) + continue + } + + brokerReq := brokerReqs[p.Leader] + if brokerReq == nil { + brokerReq = make(map[string][]kmsg.OffsetForLeaderEpochRequestTopicPartition) + brokerReqs[p.Leader] = brokerReq + } + brokerReq[topic.Topic] = append(brokerReq[topic.Topic], partition) + } + } + + mkreq := func() *kmsg.OffsetForLeaderEpochRequest { + r := kmsg.NewPtrOffsetForLeaderEpochRequest() + r.ReplicaID = req.ReplicaID + return r + } + + var issues []issueShard + for brokerID, brokerReq := range brokerReqs { + req := mkreq() + for topic, parts := range brokerReq { + reqTopic := kmsg.NewOffsetForLeaderEpochRequestTopic() + reqTopic.Topic = topic + reqTopic.Partitions = parts + req.Topics = append(req.Topics, reqTopic) + } + issues = append(issues, issueShard{ + req: req, + broker: brokerID, + }) + } + + return append(issues, unknowns.collect(mkreq, func(r *kmsg.OffsetForLeaderEpochRequest, topic string, parts []kmsg.OffsetForLeaderEpochRequestTopicPartition) { + reqTopic := kmsg.NewOffsetForLeaderEpochRequestTopic() + reqTopic.Topic = topic + reqTopic.Partitions = parts + r.Topics = append(r.Topics, reqTopic) + })...), true, nil // this is reshardable +} + +func (cl *offsetForLeaderEpochSharder) onResp(_ kmsg.Request, kresp kmsg.Response) error { + var ( + resp = kresp.(*kmsg.OffsetForLeaderEpochResponse) + del []string + retErr error + unknownTopic bool + ) + for i := range resp.Topics { + t := &resp.Topics[i] + for j := range t.Partitions { + p := &t.Partitions[j] + err := kerr.ErrorForCode(p.ErrorCode) + if err == kerr.UnknownTopicOrPartition || err == kerr.NotLeaderForPartition { + del = append(del, t.Topic) + unknownTopic = unknownTopic || err == kerr.UnknownTopicOrPartition + } + onRespShardErr(&retErr, err) + } + } + if cl.maybeDeleteMappedMetadata(unknownTopic, del...) { + return retErr + } + return nil +} + +func (*offsetForLeaderEpochSharder) merge(sresps []ResponseShard) (kmsg.Response, error) { + merged := kmsg.NewPtrOffsetForLeaderEpochResponse() + topics := make(map[string][]kmsg.OffsetForLeaderEpochResponseTopicPartition) + + firstErr := firstErrMerger(sresps, func(kresp kmsg.Response) { + resp := kresp.(*kmsg.OffsetForLeaderEpochResponse) + merged.Version = resp.Version + merged.ThrottleMillis = resp.ThrottleMillis + + for _, topic := range resp.Topics { + topics[topic.Topic] = append(topics[topic.Topic], topic.Partitions...) + } + }) + for topic, partitions := range topics { + respTopic := kmsg.NewOffsetForLeaderEpochResponseTopic() + respTopic.Topic = topic + respTopic.Partitions = partitions + merged.Topics = append(merged.Topics, respTopic) + } + return merged, firstErr +} + +// handle sharding AddPartitionsToTXn, where v4+ switched to batch requests +type addPartitionsToTxnSharder struct{ *Client } + +func addPartitionsReqToTxn(req *kmsg.AddPartitionsToTxnRequest) { + t := kmsg.NewAddPartitionsToTxnRequestTransaction() + t.TransactionalID = req.TransactionalID + t.ProducerID = req.ProducerID + t.ProducerEpoch = req.ProducerEpoch + for i := range req.Topics { + rt := &req.Topics[i] + tt := kmsg.NewAddPartitionsToTxnRequestTransactionTopic() + tt.Topic = rt.Topic + tt.Partitions = rt.Partitions + t.Topics = append(t.Topics, tt) + } + req.Transactions = append(req.Transactions, t) +} + +func addPartitionsTxnToReq(req *kmsg.AddPartitionsToTxnRequest) { + if len(req.Transactions) != 1 { + return + } + t0 := &req.Transactions[0] + req.TransactionalID = t0.TransactionalID + req.ProducerID = t0.ProducerID + req.ProducerEpoch = t0.ProducerEpoch + for _, tt := range t0.Topics { + rt := kmsg.NewAddPartitionsToTxnRequestTopic() + rt.Topic = tt.Topic + rt.Partitions = tt.Partitions + req.Topics = append(req.Topics, rt) + } +} + +func addPartitionsTxnToResp(resp *kmsg.AddPartitionsToTxnResponse) { + if len(resp.Transactions) == 0 { + return + } + t0 := &resp.Transactions[0] + for _, tt := range t0.Topics { + rt := kmsg.NewAddPartitionsToTxnResponseTopic() + rt.Topic = tt.Topic + for _, tp := range tt.Partitions { + rp := kmsg.NewAddPartitionsToTxnResponseTopicPartition() + rp.Partition = tp.Partition + rp.ErrorCode = tp.ErrorCode + rt.Partitions = append(rt.Partitions, rp) + } + resp.Topics = append(resp.Topics, rt) + } +} + +func (cl *addPartitionsToTxnSharder) shard(ctx context.Context, kreq kmsg.Request, _ error) ([]issueShard, bool, error) { + req := kreq.(*kmsg.AddPartitionsToTxnRequest) + + if len(req.Transactions) == 0 { + addPartitionsReqToTxn(req) + } + txnIDs := make([]string, 0, len(req.Transactions)) + for i := range req.Transactions { + txnIDs = append(txnIDs, req.Transactions[i].TransactionalID) + } + coordinators := cl.loadCoordinators(ctx, coordinatorTypeTxn, txnIDs...) + + type unkerr struct { + err error + txn kmsg.AddPartitionsToTxnRequestTransaction + } + var ( + brokerReqs = make(map[int32]*kmsg.AddPartitionsToTxnRequest) + kerrs = make(map[*kerr.Error][]kmsg.AddPartitionsToTxnRequestTransaction) + unkerrs []unkerr + ) + + newReq := func(txns ...kmsg.AddPartitionsToTxnRequestTransaction) *kmsg.AddPartitionsToTxnRequest { + req := kmsg.NewPtrAddPartitionsToTxnRequest() + req.Transactions = txns + addPartitionsTxnToReq(req) + return req + } + + for _, txn := range req.Transactions { + berr := coordinators[txn.TransactionalID] + var ke *kerr.Error + switch { + case berr.err == nil: + brokerReq := brokerReqs[berr.b.meta.NodeID] + if brokerReq == nil { + brokerReq = newReq(txn) + brokerReqs[berr.b.meta.NodeID] = brokerReq + } else { + brokerReq.Transactions = append(brokerReq.Transactions, txn) + } + case errors.As(berr.err, &ke): + kerrs[ke] = append(kerrs[ke], txn) + default: + unkerrs = append(unkerrs, unkerr{berr.err, txn}) + } + } + + var issues []issueShard + for id, req := range brokerReqs { + if len(req.Transactions) <= 1 || len(req.Transactions) == 1 && !req.Transactions[0].VerifyOnly { + issues = append(issues, issueShard{ + req: &pinReq{Request: req, pinMax: true, max: 3}, + broker: id, + }) + } else { + issues = append(issues, issueShard{ + req: req, + broker: id, + }) + } + } + for _, unkerr := range unkerrs { + issues = append(issues, issueShard{ + req: newReq(unkerr.txn), + err: unkerr.err, + }) + } + for kerr, txns := range kerrs { + issues = append(issues, issueShard{ + req: newReq(txns...), + err: kerr, + }) + } + + return issues, true, nil // reshardable to load correct coordinators +} + +func (cl *addPartitionsToTxnSharder) onResp(kreq kmsg.Request, kresp kmsg.Response) error { + req := kreq.(*kmsg.AddPartitionsToTxnRequest) + resp := kresp.(*kmsg.AddPartitionsToTxnResponse) + + // We default to the top level error, which is used in v4+. For v3 + // (case 0), we use the per-partition error, which is the same for + // every partition on not_coordinator errors. + code := resp.ErrorCode + if code == 0 && len(resp.Transactions) == 0 { + // Convert v3 and prior to v4+ + resptxn := kmsg.NewAddPartitionsToTxnResponseTransaction() + resptxn.TransactionalID = req.TransactionalID + for _, rt := range resp.Topics { + respt := kmsg.NewAddPartitionsToTxnResponseTransactionTopic() + respt.Topic = rt.Topic + for _, rp := range rt.Partitions { + respp := kmsg.NewAddPartitionsToTxnResponseTransactionTopicPartition() + respp.Partition = rp.Partition + respp.ErrorCode = rp.ErrorCode + code = rp.ErrorCode // v3 and prior has per-partition errors, not top level + respt.Partitions = append(respt.Partitions, respp) + } + resptxn.Topics = append(resptxn.Topics, respt) + } + resp.Transactions = append(resp.Transactions, resptxn) + } else { + // Convert v4 to v3 and prior: either we have a top level error + // code or we have at least one transaction. + // + // If the code is non-zero, we convert it to per-partition error + // codes; v3 does not have a top level err. + addPartitionsTxnToResp(resp) + if code != 0 { + for _, reqt := range req.Topics { + respt := kmsg.NewAddPartitionsToTxnResponseTopic() + respt.Topic = reqt.Topic + for _, reqp := range reqt.Partitions { + respp := kmsg.NewAddPartitionsToTxnResponseTopicPartition() + respp.Partition = reqp + respp.ErrorCode = resp.ErrorCode + respt.Partitions = append(respt.Partitions, respp) + } + resp.Topics = append(resp.Topics, respt) + } + } + } + if err := kerr.ErrorForCode(code); cl.maybeDeleteStaleCoordinator(req.TransactionalID, coordinatorTypeTxn, err) { + return err + } + return nil +} + +func (*addPartitionsToTxnSharder) merge(sresps []ResponseShard) (kmsg.Response, error) { + merged := kmsg.NewPtrAddPartitionsToTxnResponse() + + firstErr := firstErrMerger(sresps, func(kresp kmsg.Response) { + resp := kresp.(*kmsg.AddPartitionsToTxnResponse) + merged.Version = resp.Version + merged.ThrottleMillis = resp.ThrottleMillis + merged.ErrorCode = resp.ErrorCode + merged.Transactions = append(merged.Transactions, resp.Transactions...) + }) + addPartitionsTxnToResp(merged) + return merged, firstErr +} + +// handle sharding WriteTxnMarkersRequest +type writeTxnMarkersSharder struct{ *Client } + +func (cl *writeTxnMarkersSharder) shard(ctx context.Context, kreq kmsg.Request, _ error) ([]issueShard, bool, error) { + req := kreq.(*kmsg.WriteTxnMarkersRequest) + + var need []string + for _, marker := range req.Markers { + for _, topic := range marker.Topics { + need = append(need, topic.Topic) + } + } + mapping, err := cl.fetchMappedMetadata(ctx, need, true) + if err != nil { + return nil, false, err + } + + type pidEpochCommit struct { + pid int64 + epoch int16 + commit bool + } + + brokerReqs := make(map[int32]map[pidEpochCommit]map[string][]int32) + unknown := make(map[error]map[pidEpochCommit]map[string][]int32) // err => pec => topic => partitions + + addreq := func(b int32, pec pidEpochCommit, t string, p int32) { + pecs := brokerReqs[b] + if pecs == nil { + pecs = make(map[pidEpochCommit]map[string][]int32) + brokerReqs[b] = pecs + } + ts := pecs[pec] + if ts == nil { + ts = make(map[string][]int32) + pecs[pec] = ts + } + ts[t] = append(ts[t], p) + } + addunk := func(err error, pec pidEpochCommit, t string, p int32) { + pecs := unknown[err] + if pecs == nil { + pecs = make(map[pidEpochCommit]map[string][]int32) + unknown[err] = pecs + } + ts := pecs[pec] + if ts == nil { + ts = make(map[string][]int32) + pecs[pec] = ts + } + ts[t] = append(ts[t], p) + } + + for _, marker := range req.Markers { + pec := pidEpochCommit{ + marker.ProducerID, + marker.ProducerEpoch, + marker.Committed, + } + for _, topic := range marker.Topics { + t := topic.Topic + tmapping, exists := mapping[t] + if err := unknownOrCode(exists, tmapping.t.ErrorCode); err != nil { + for _, partition := range topic.Partitions { + addunk(err, pec, t, partition) + } + continue + } + for _, partition := range topic.Partitions { + p, exists := tmapping.ps[partition] + if err := unknownOrCode(exists, p.ErrorCode); err != nil { + addunk(err, pec, t, partition) + continue + } + if err := noLeader(p.Leader); err != nil { + addunk(err, pec, t, partition) + continue + } + addreq(p.Leader, pec, t, partition) + } + } + } + + mkreq := kmsg.NewPtrWriteTxnMarkersRequest + + var issues []issueShard + for brokerID, brokerReq := range brokerReqs { + req := mkreq() + for pec, topics := range brokerReq { + rm := kmsg.NewWriteTxnMarkersRequestMarker() + rm.ProducerID = pec.pid + rm.ProducerEpoch = pec.epoch + rm.Committed = pec.commit + for topic, parts := range topics { + rt := kmsg.NewWriteTxnMarkersRequestMarkerTopic() + rt.Topic = topic + rt.Partitions = parts + rm.Topics = append(rm.Topics, rt) + } + req.Markers = append(req.Markers, rm) + } + issues = append(issues, issueShard{ + req: req, + broker: brokerID, + }) + } + + for err, errReq := range unknown { + req := mkreq() + for pec, topics := range errReq { + rm := kmsg.NewWriteTxnMarkersRequestMarker() + rm.ProducerID = pec.pid + rm.ProducerEpoch = pec.epoch + rm.Committed = pec.commit + for topic, parts := range topics { + rt := kmsg.NewWriteTxnMarkersRequestMarkerTopic() + rt.Topic = topic + rt.Partitions = parts + rm.Topics = append(rm.Topics, rt) + } + req.Markers = append(req.Markers, rm) + } + issues = append(issues, issueShard{ + req: req, + err: err, + }) + } + return issues, true, nil // this is reshardable +} + +func (cl *writeTxnMarkersSharder) onResp(_ kmsg.Request, kresp kmsg.Response) error { + var ( + resp = kresp.(*kmsg.WriteTxnMarkersResponse) + del []string + retErr error + unknownTopic bool + ) + for i := range resp.Markers { + m := &resp.Markers[i] + for j := range m.Topics { + t := &m.Topics[j] + for k := range t.Partitions { + p := &t.Partitions[k] + err := kerr.ErrorForCode(p.ErrorCode) + if err == kerr.UnknownTopicOrPartition || err == kerr.NotLeaderForPartition { + del = append(del, t.Topic) + unknownTopic = unknownTopic || err == kerr.UnknownTopicOrPartition + } + onRespShardErr(&retErr, err) + } + } + } + if cl.maybeDeleteMappedMetadata(unknownTopic, del...) { + return retErr + } + return nil +} + +func (*writeTxnMarkersSharder) merge(sresps []ResponseShard) (kmsg.Response, error) { + merged := kmsg.NewPtrWriteTxnMarkersResponse() + markers := make(map[int64]map[string][]kmsg.WriteTxnMarkersResponseMarkerTopicPartition) + + firstErr := firstErrMerger(sresps, func(kresp kmsg.Response) { + resp := kresp.(*kmsg.WriteTxnMarkersResponse) + merged.Version = resp.Version + for _, marker := range resp.Markers { + topics := markers[marker.ProducerID] + if topics == nil { + topics = make(map[string][]kmsg.WriteTxnMarkersResponseMarkerTopicPartition) + markers[marker.ProducerID] = topics + } + for _, topic := range marker.Topics { + topics[topic.Topic] = append(topics[topic.Topic], topic.Partitions...) + } + } + }) + for pid, topics := range markers { + respMarker := kmsg.NewWriteTxnMarkersResponseMarker() + respMarker.ProducerID = pid + for topic, partitions := range topics { + respTopic := kmsg.NewWriteTxnMarkersResponseMarkerTopic() + respTopic.Topic = topic + respTopic.Partitions = append(respTopic.Partitions, partitions...) + respMarker.Topics = append(respMarker.Topics, respTopic) + } + merged.Markers = append(merged.Markers, respMarker) + } + return merged, firstErr +} + +// handle sharding DescribeConfigsRequest +type describeConfigsSharder struct{ *Client } + +func (*describeConfigsSharder) shard(_ context.Context, kreq kmsg.Request, _ error) ([]issueShard, bool, error) { + req := kreq.(*kmsg.DescribeConfigsRequest) + + brokerReqs := make(map[int32][]kmsg.DescribeConfigsRequestResource) + var any []kmsg.DescribeConfigsRequestResource + + for i := range req.Resources { + resource := req.Resources[i] + switch resource.ResourceType { + case kmsg.ConfigResourceTypeBroker: + case kmsg.ConfigResourceTypeBrokerLogger: + default: + any = append(any, resource) + continue + } + id, err := strconv.ParseInt(resource.ResourceName, 10, 32) + if err != nil || id < 0 { + any = append(any, resource) + continue + } + brokerReqs[int32(id)] = append(brokerReqs[int32(id)], resource) + } + + var issues []issueShard + for brokerID, brokerReq := range brokerReqs { + newReq := kmsg.NewPtrDescribeConfigsRequest() + newReq.Resources = brokerReq + newReq.IncludeSynonyms = req.IncludeSynonyms + newReq.IncludeDocumentation = req.IncludeDocumentation + + issues = append(issues, issueShard{ + req: newReq, + broker: brokerID, + }) + } + + if len(any) > 0 { + newReq := kmsg.NewPtrDescribeConfigsRequest() + newReq.Resources = any + newReq.IncludeSynonyms = req.IncludeSynonyms + newReq.IncludeDocumentation = req.IncludeDocumentation + issues = append(issues, issueShard{ + req: newReq, + any: true, + }) + } + + return issues, false, nil // this is not reshardable, but the any block can go anywhere +} + +func (*describeConfigsSharder) onResp(kmsg.Request, kmsg.Response) error { return nil } // configs: topics not mapped, nothing retryable + +func (*describeConfigsSharder) merge(sresps []ResponseShard) (kmsg.Response, error) { + merged := kmsg.NewPtrDescribeConfigsResponse() + return merged, firstErrMerger(sresps, func(kresp kmsg.Response) { + resp := kresp.(*kmsg.DescribeConfigsResponse) + merged.Version = resp.Version + merged.ThrottleMillis = resp.ThrottleMillis + merged.Resources = append(merged.Resources, resp.Resources...) + }) +} + +// handle sharding AlterConfigsRequest +type alterConfigsSharder struct{ *Client } + +func (*alterConfigsSharder) shard(_ context.Context, kreq kmsg.Request, _ error) ([]issueShard, bool, error) { + req := kreq.(*kmsg.AlterConfigsRequest) + + brokerReqs := make(map[int32][]kmsg.AlterConfigsRequestResource) + var any []kmsg.AlterConfigsRequestResource + + for i := range req.Resources { + resource := req.Resources[i] + switch resource.ResourceType { + case kmsg.ConfigResourceTypeBroker: + case kmsg.ConfigResourceTypeBrokerLogger: + default: + any = append(any, resource) + continue + } + id, err := strconv.ParseInt(resource.ResourceName, 10, 32) + if err != nil || id < 0 { + any = append(any, resource) + continue + } + brokerReqs[int32(id)] = append(brokerReqs[int32(id)], resource) + } + + var issues []issueShard + for brokerID, brokerReq := range brokerReqs { + newReq := kmsg.NewPtrAlterConfigsRequest() + newReq.Resources = brokerReq + newReq.ValidateOnly = req.ValidateOnly + + issues = append(issues, issueShard{ + req: newReq, + broker: brokerID, + }) + } + + if len(any) > 0 { + newReq := kmsg.NewPtrAlterConfigsRequest() + newReq.Resources = any + newReq.ValidateOnly = req.ValidateOnly + issues = append(issues, issueShard{ + req: newReq, + any: true, + }) + } + + return issues, false, nil // this is not reshardable, but the any block can go anywhere +} + +func (*alterConfigsSharder) onResp(kmsg.Request, kmsg.Response) error { return nil } // configs: topics not mapped, nothing retryable + +func (*alterConfigsSharder) merge(sresps []ResponseShard) (kmsg.Response, error) { + merged := kmsg.NewPtrAlterConfigsResponse() + return merged, firstErrMerger(sresps, func(kresp kmsg.Response) { + resp := kresp.(*kmsg.AlterConfigsResponse) + merged.Version = resp.Version + merged.ThrottleMillis = resp.ThrottleMillis + merged.Resources = append(merged.Resources, resp.Resources...) + }) +} + +// handles sharding AlterReplicaLogDirsRequest +type alterReplicaLogDirsSharder struct{ *Client } + +func (cl *alterReplicaLogDirsSharder) shard(ctx context.Context, kreq kmsg.Request, _ error) ([]issueShard, bool, error) { + req := kreq.(*kmsg.AlterReplicaLogDirsRequest) + + needMap := make(map[string]struct{}) + for _, dir := range req.Dirs { + for _, topic := range dir.Topics { + needMap[topic.Topic] = struct{}{} + } + } + var need []string + for topic := range needMap { + need = append(need, topic) + } + mapping, err := cl.fetchMappedMetadata(ctx, need, false) // bypass cache, tricky to manage response + if err != nil { + return nil, false, err + } + + brokerReqs := make(map[int32]map[string]map[string][]int32) // broker => dir => topic => partitions + unknowns := make(map[error]map[string]map[string][]int32) // err => dir => topic => partitions + + addBroker := func(broker int32, dir, topic string, partition int32) { + brokerDirs := brokerReqs[broker] + if brokerDirs == nil { + brokerDirs = make(map[string]map[string][]int32) + brokerReqs[broker] = brokerDirs + } + dirTopics := brokerDirs[dir] + if dirTopics == nil { + dirTopics = make(map[string][]int32) + brokerDirs[dir] = dirTopics + } + dirTopics[topic] = append(dirTopics[topic], partition) + } + + addUnknown := func(err error, dir, topic string, partition int32) { + dirs := unknowns[err] + if dirs == nil { + dirs = make(map[string]map[string][]int32) + unknowns[err] = dirs + } + dirTopics := dirs[dir] + if dirTopics == nil { + dirTopics = make(map[string][]int32) + dirs[dir] = dirTopics + } + dirTopics[topic] = append(dirTopics[topic], partition) + } + + for _, dir := range req.Dirs { + for _, topic := range dir.Topics { + t := topic.Topic + tmapping, exists := mapping[t] + if err := unknownOrCode(exists, tmapping.t.ErrorCode); err != nil { + for _, partition := range topic.Partitions { + addUnknown(err, dir.Dir, t, partition) + } + continue + } + for _, partition := range topic.Partitions { + p, exists := tmapping.ps[partition] + if err := unknownOrCode(exists, p.ErrorCode); err != nil { + addUnknown(err, dir.Dir, t, partition) + continue + } + + for _, replica := range p.Replicas { + addBroker(replica, dir.Dir, t, partition) + } + } + } + } + + var issues []issueShard + for brokerID, brokerReq := range brokerReqs { + req := kmsg.NewPtrAlterReplicaLogDirsRequest() + for dir, topics := range brokerReq { + rd := kmsg.NewAlterReplicaLogDirsRequestDir() + rd.Dir = dir + for topic, partitions := range topics { + rdTopic := kmsg.NewAlterReplicaLogDirsRequestDirTopic() + rdTopic.Topic = topic + rdTopic.Partitions = partitions + rd.Topics = append(rd.Topics, rdTopic) + } + req.Dirs = append(req.Dirs, rd) + } + + issues = append(issues, issueShard{ + req: req, + broker: brokerID, + }) + } + + for err, dirs := range unknowns { + req := kmsg.NewPtrAlterReplicaLogDirsRequest() + for dir, topics := range dirs { + rd := kmsg.NewAlterReplicaLogDirsRequestDir() + rd.Dir = dir + for topic, partitions := range topics { + rdTopic := kmsg.NewAlterReplicaLogDirsRequestDirTopic() + rdTopic.Topic = topic + rdTopic.Partitions = partitions + rd.Topics = append(rd.Topics, rdTopic) + } + req.Dirs = append(req.Dirs, rd) + } + + issues = append(issues, issueShard{ + req: req, + err: err, + }) + } + + return issues, true, nil // this is reshardable +} + +func (*alterReplicaLogDirsSharder) onResp(kmsg.Request, kmsg.Response) error { return nil } // topic / partitions: not retried + +// merge does not make sense for this function, but we provide a one anyway. +func (*alterReplicaLogDirsSharder) merge(sresps []ResponseShard) (kmsg.Response, error) { + merged := kmsg.NewPtrAlterReplicaLogDirsResponse() + topics := make(map[string][]kmsg.AlterReplicaLogDirsResponseTopicPartition) + + firstErr := firstErrMerger(sresps, func(kresp kmsg.Response) { + resp := kresp.(*kmsg.AlterReplicaLogDirsResponse) + merged.Version = resp.Version + merged.ThrottleMillis = resp.ThrottleMillis + + for _, topic := range resp.Topics { + topics[topic.Topic] = append(topics[topic.Topic], topic.Partitions...) + } + }) + for topic, partitions := range topics { + respTopic := kmsg.NewAlterReplicaLogDirsResponseTopic() + respTopic.Topic = topic + respTopic.Partitions = partitions + merged.Topics = append(merged.Topics, respTopic) + } + return merged, firstErr +} + +// handles sharding DescribeLogDirsRequest +type describeLogDirsSharder struct{ *Client } + +func (cl *describeLogDirsSharder) shard(ctx context.Context, kreq kmsg.Request, _ error) ([]issueShard, bool, error) { + req := kreq.(*kmsg.DescribeLogDirsRequest) + + // If req.Topics is nil, the request is to describe all logdirs. Thus, + // we will issue the request to all brokers (similar to ListGroups). + if req.Topics == nil { + return cl.allBrokersShardedReq(ctx, func() kmsg.Request { + dup := *req + return &dup + }) + } + + var need []string + for _, topic := range req.Topics { + need = append(need, topic.Topic) + } + mapping, err := cl.fetchMappedMetadata(ctx, need, false) // bypass cache, tricky to manage response + if err != nil { + return nil, false, err + } + + brokerReqs := make(map[int32]map[string][]int32) + var unknowns unknownErrShards + + for _, topic := range req.Topics { + t := topic.Topic + tmapping, exists := mapping[t] + if err := unknownOrCode(exists, tmapping.t.ErrorCode); err != nil { + unknowns.errs(err, t, topic.Partitions) + continue + } + for _, partition := range topic.Partitions { + p, exists := tmapping.ps[partition] + if err := unknownOrCode(exists, p.ErrorCode); err != nil { + unknowns.err(err, t, partition) + continue + } + + for _, replica := range p.Replicas { + brokerReq := brokerReqs[replica] + if brokerReq == nil { + brokerReq = make(map[string][]int32) + brokerReqs[replica] = brokerReq + } + brokerReq[topic.Topic] = append(brokerReq[topic.Topic], partition) + } + } + } + + mkreq := kmsg.NewPtrDescribeLogDirsRequest + + var issues []issueShard + for brokerID, brokerReq := range brokerReqs { + req := mkreq() + for topic, parts := range brokerReq { + reqTopic := kmsg.NewDescribeLogDirsRequestTopic() + reqTopic.Topic = topic + reqTopic.Partitions = parts + req.Topics = append(req.Topics, reqTopic) + } + issues = append(issues, issueShard{ + req: req, + broker: brokerID, + }) + } + + return append(issues, unknowns.collect(mkreq, func(r *kmsg.DescribeLogDirsRequest, topic string, parts []int32) { + reqTopic := kmsg.NewDescribeLogDirsRequestTopic() + reqTopic.Topic = topic + reqTopic.Partitions = parts + r.Topics = append(r.Topics, reqTopic) + })...), true, nil // this is reshardable +} + +func (*describeLogDirsSharder) onResp(kmsg.Request, kmsg.Response) error { return nil } // topic / configs: not retried + +// merge does not make sense for this function, but we provide one anyway. +// We lose the error code for directories. +func (*describeLogDirsSharder) merge(sresps []ResponseShard) (kmsg.Response, error) { + merged := kmsg.NewPtrDescribeLogDirsResponse() + dirs := make(map[string]map[string][]kmsg.DescribeLogDirsResponseDirTopicPartition) + + firstErr := firstErrMerger(sresps, func(kresp kmsg.Response) { + resp := kresp.(*kmsg.DescribeLogDirsResponse) + merged.Version = resp.Version + merged.ThrottleMillis = resp.ThrottleMillis + + for _, dir := range resp.Dirs { + mergeDir := dirs[dir.Dir] + if mergeDir == nil { + mergeDir = make(map[string][]kmsg.DescribeLogDirsResponseDirTopicPartition) + dirs[dir.Dir] = mergeDir + } + for _, topic := range dir.Topics { + mergeDir[topic.Topic] = append(mergeDir[topic.Topic], topic.Partitions...) + } + } + }) + for dir, topics := range dirs { + md := kmsg.NewDescribeLogDirsResponseDir() + md.Dir = dir + for topic, partitions := range topics { + mdTopic := kmsg.NewDescribeLogDirsResponseDirTopic() + mdTopic.Topic = topic + mdTopic.Partitions = partitions + md.Topics = append(md.Topics, mdTopic) + } + merged.Dirs = append(merged.Dirs, md) + } + return merged, firstErr +} + +// handles sharding DeleteGroupsRequest +type deleteGroupsSharder struct{ *Client } + +func (cl *deleteGroupsSharder) shard(ctx context.Context, kreq kmsg.Request, _ error) ([]issueShard, bool, error) { + req := kreq.(*kmsg.DeleteGroupsRequest) + + coordinators := cl.loadCoordinators(ctx, coordinatorTypeGroup, req.Groups...) + type unkerr struct { + err error + group string + } + var ( + brokerReqs = make(map[int32]*kmsg.DeleteGroupsRequest) + kerrs = make(map[*kerr.Error][]string) + unkerrs []unkerr + ) + + newReq := func(groups ...string) *kmsg.DeleteGroupsRequest { + newReq := kmsg.NewPtrDeleteGroupsRequest() + newReq.Groups = groups + return newReq + } + + for _, group := range req.Groups { + berr := coordinators[group] + var ke *kerr.Error + switch { + case berr.err == nil: + brokerReq := brokerReqs[berr.b.meta.NodeID] + if brokerReq == nil { + brokerReq = newReq() + brokerReqs[berr.b.meta.NodeID] = brokerReq + } + brokerReq.Groups = append(brokerReq.Groups, group) + case errors.As(berr.err, &ke): + kerrs[ke] = append(kerrs[ke], group) + default: + unkerrs = append(unkerrs, unkerr{berr.err, group}) + } + } + + var issues []issueShard + for id, req := range brokerReqs { + issues = append(issues, issueShard{ + req: req, + broker: id, + }) + } + for _, unkerr := range unkerrs { + issues = append(issues, issueShard{ + req: newReq(unkerr.group), + err: unkerr.err, + }) + } + for kerr, groups := range kerrs { + issues = append(issues, issueShard{ + req: newReq(groups...), + err: kerr, + }) + } + + return issues, true, nil // reshardable to load correct coordinators +} + +func (cl *deleteGroupsSharder) onResp(_ kmsg.Request, kresp kmsg.Response) error { + resp := kresp.(*kmsg.DeleteGroupsResponse) + var retErr error + for i := range resp.Groups { + group := &resp.Groups[i] + err := kerr.ErrorForCode(group.ErrorCode) + cl.maybeDeleteStaleCoordinator(group.Group, coordinatorTypeGroup, err) + onRespShardErr(&retErr, err) + } + return retErr +} + +func (*deleteGroupsSharder) merge(sresps []ResponseShard) (kmsg.Response, error) { + merged := kmsg.NewPtrDeleteGroupsResponse() + return merged, firstErrMerger(sresps, func(kresp kmsg.Response) { + resp := kresp.(*kmsg.DeleteGroupsResponse) + merged.Version = resp.Version + merged.ThrottleMillis = resp.ThrottleMillis + merged.Groups = append(merged.Groups, resp.Groups...) + }) +} + +// handle sharding IncrementalAlterConfigsRequest +type incrementalAlterConfigsSharder struct{ *Client } + +func (*incrementalAlterConfigsSharder) shard(_ context.Context, kreq kmsg.Request, _ error) ([]issueShard, bool, error) { + req := kreq.(*kmsg.IncrementalAlterConfigsRequest) + + brokerReqs := make(map[int32][]kmsg.IncrementalAlterConfigsRequestResource) + var any []kmsg.IncrementalAlterConfigsRequestResource + + for i := range req.Resources { + resource := req.Resources[i] + switch resource.ResourceType { + case kmsg.ConfigResourceTypeBroker: + case kmsg.ConfigResourceTypeBrokerLogger: + default: + any = append(any, resource) + continue + } + id, err := strconv.ParseInt(resource.ResourceName, 10, 32) + if err != nil || id < 0 { + any = append(any, resource) + continue + } + brokerReqs[int32(id)] = append(brokerReqs[int32(id)], resource) + } + + var issues []issueShard + for brokerID, brokerReq := range brokerReqs { + newReq := kmsg.NewPtrIncrementalAlterConfigsRequest() + newReq.Resources = brokerReq + newReq.ValidateOnly = req.ValidateOnly + + issues = append(issues, issueShard{ + req: newReq, + broker: brokerID, + }) + } + + if len(any) > 0 { + newReq := kmsg.NewPtrIncrementalAlterConfigsRequest() + newReq.Resources = any + newReq.ValidateOnly = req.ValidateOnly + issues = append(issues, issueShard{ + req: newReq, + any: true, + }) + } + + return issues, false, nil // this is not reshardable, but the any block can go anywhere +} + +func (*incrementalAlterConfigsSharder) onResp(kmsg.Request, kmsg.Response) error { return nil } // configs: topics not mapped, nothing retryable + +func (*incrementalAlterConfigsSharder) merge(sresps []ResponseShard) (kmsg.Response, error) { + merged := kmsg.NewPtrIncrementalAlterConfigsResponse() + return merged, firstErrMerger(sresps, func(kresp kmsg.Response) { + resp := kresp.(*kmsg.IncrementalAlterConfigsResponse) + merged.Version = resp.Version + merged.ThrottleMillis = resp.ThrottleMillis + merged.Resources = append(merged.Resources, resp.Resources...) + }) +} + +// handle sharding DescribeProducersRequest +type describeProducersSharder struct{ *Client } + +func (cl *describeProducersSharder) shard(ctx context.Context, kreq kmsg.Request, _ error) ([]issueShard, bool, error) { + req := kreq.(*kmsg.DescribeProducersRequest) + + var need []string + for _, topic := range req.Topics { + need = append(need, topic.Topic) + } + mapping, err := cl.fetchMappedMetadata(ctx, need, true) + if err != nil { + return nil, false, err + } + + brokerReqs := make(map[int32]map[string][]int32) // broker => topic => partitions + var unknowns unknownErrShards + + for _, topic := range req.Topics { + t := topic.Topic + tmapping, exists := mapping[t] + if err := unknownOrCode(exists, tmapping.t.ErrorCode); err != nil { + unknowns.errs(err, t, topic.Partitions) + continue + } + for _, partition := range topic.Partitions { + p, exists := tmapping.ps[partition] + if err := unknownOrCode(exists, p.ErrorCode); err != nil { + unknowns.err(err, t, partition) + continue + } + + brokerReq := brokerReqs[p.Leader] + if brokerReq == nil { + brokerReq = make(map[string][]int32) + brokerReqs[p.Leader] = brokerReq + } + brokerReq[topic.Topic] = append(brokerReq[topic.Topic], partition) + } + } + + mkreq := kmsg.NewPtrDescribeProducersRequest + + var issues []issueShard + for brokerID, brokerReq := range brokerReqs { + req := mkreq() + for topic, parts := range brokerReq { + reqTopic := kmsg.NewDescribeProducersRequestTopic() + reqTopic.Topic = topic + reqTopic.Partitions = parts + req.Topics = append(req.Topics, reqTopic) + } + issues = append(issues, issueShard{ + req: req, + broker: brokerID, + }) + } + + return append(issues, unknowns.collect(mkreq, func(r *kmsg.DescribeProducersRequest, topic string, parts []int32) { + reqTopic := kmsg.NewDescribeProducersRequestTopic() + reqTopic.Topic = topic + reqTopic.Partitions = parts + r.Topics = append(r.Topics, reqTopic) + })...), true, nil // this is reshardable +} + +func (cl *describeProducersSharder) onResp(_ kmsg.Request, kresp kmsg.Response) error { + var ( + resp = kresp.(*kmsg.DescribeProducersResponse) + del []string + retErr error + unknownTopic bool + ) + for i := range resp.Topics { + t := &resp.Topics[i] + for j := range t.Partitions { + p := &t.Partitions[j] + err := kerr.ErrorForCode(p.ErrorCode) + if err == kerr.UnknownTopicOrPartition || err == kerr.NotLeaderForPartition { + del = append(del, t.Topic) + unknownTopic = unknownTopic || err == kerr.UnknownTopicOrPartition + } + onRespShardErr(&retErr, err) + } + } + if cl.maybeDeleteMappedMetadata(unknownTopic, del...) { + return retErr + } + return nil +} + +func (*describeProducersSharder) merge(sresps []ResponseShard) (kmsg.Response, error) { + merged := kmsg.NewPtrDescribeProducersResponse() + topics := make(map[string][]kmsg.DescribeProducersResponseTopicPartition) + firstErr := firstErrMerger(sresps, func(kresp kmsg.Response) { + resp := kresp.(*kmsg.DescribeProducersResponse) + merged.Version = resp.Version + merged.ThrottleMillis = resp.ThrottleMillis + + for _, topic := range resp.Topics { + topics[topic.Topic] = append(topics[topic.Topic], topic.Partitions...) + } + }) + for topic, partitions := range topics { + respTopic := kmsg.NewDescribeProducersResponseTopic() + respTopic.Topic = topic + respTopic.Partitions = partitions + merged.Topics = append(merged.Topics, respTopic) + } + return merged, firstErr +} + +// handles sharding DescribeTransactionsRequest +type describeTransactionsSharder struct{ *Client } + +func (cl *describeTransactionsSharder) shard(ctx context.Context, kreq kmsg.Request, _ error) ([]issueShard, bool, error) { + req := kreq.(*kmsg.DescribeTransactionsRequest) + + coordinators := cl.loadCoordinators(ctx, coordinatorTypeTxn, req.TransactionalIDs...) + type unkerr struct { + err error + txnID string + } + var ( + brokerReqs = make(map[int32]*kmsg.DescribeTransactionsRequest) + kerrs = make(map[*kerr.Error][]string) + unkerrs []unkerr + ) + + newReq := func(txnIDs ...string) *kmsg.DescribeTransactionsRequest { + r := kmsg.NewPtrDescribeTransactionsRequest() + r.TransactionalIDs = txnIDs + return r + } + + for _, txnID := range req.TransactionalIDs { + berr := coordinators[txnID] + var ke *kerr.Error + switch { + case berr.err == nil: + brokerReq := brokerReqs[berr.b.meta.NodeID] + if brokerReq == nil { + brokerReq = newReq() + brokerReqs[berr.b.meta.NodeID] = brokerReq + } + brokerReq.TransactionalIDs = append(brokerReq.TransactionalIDs, txnID) + case errors.As(berr.err, &ke): + kerrs[ke] = append(kerrs[ke], txnID) + default: + unkerrs = append(unkerrs, unkerr{berr.err, txnID}) + } + } + + var issues []issueShard + for id, req := range brokerReqs { + issues = append(issues, issueShard{ + req: req, + broker: id, + }) + } + for _, unkerr := range unkerrs { + issues = append(issues, issueShard{ + req: newReq(unkerr.txnID), + err: unkerr.err, + }) + } + for kerr, txnIDs := range kerrs { + issues = append(issues, issueShard{ + req: newReq(txnIDs...), + err: kerr, + }) + } + + return issues, true, nil // reshardable to load correct coordinators +} + +func (cl *describeTransactionsSharder) onResp(_ kmsg.Request, kresp kmsg.Response) error { // cleanup any stale coordinators + resp := kresp.(*kmsg.DescribeTransactionsResponse) + var retErr error + for i := range resp.TransactionStates { + txnState := &resp.TransactionStates[i] + err := kerr.ErrorForCode(txnState.ErrorCode) + cl.maybeDeleteStaleCoordinator(txnState.TransactionalID, coordinatorTypeTxn, err) + onRespShardErr(&retErr, err) + } + return retErr +} + +func (*describeTransactionsSharder) merge(sresps []ResponseShard) (kmsg.Response, error) { + merged := kmsg.NewPtrDescribeTransactionsResponse() + return merged, firstErrMerger(sresps, func(kresp kmsg.Response) { + resp := kresp.(*kmsg.DescribeTransactionsResponse) + merged.Version = resp.Version + merged.ThrottleMillis = resp.ThrottleMillis + merged.TransactionStates = append(merged.TransactionStates, resp.TransactionStates...) + }) +} + +// handles sharding ListTransactionsRequest +type listTransactionsSharder struct{ *Client } + +func (cl *listTransactionsSharder) shard(ctx context.Context, kreq kmsg.Request, _ error) ([]issueShard, bool, error) { + req := kreq.(*kmsg.ListTransactionsRequest) + return cl.allBrokersShardedReq(ctx, func() kmsg.Request { + dup := *req + return &dup + }) +} + +func (*listTransactionsSharder) onResp(_ kmsg.Request, kresp kmsg.Response) error { + resp := kresp.(*kmsg.ListTransactionsResponse) + return kerr.ErrorForCode(resp.ErrorCode) +} + +func (*listTransactionsSharder) merge(sresps []ResponseShard) (kmsg.Response, error) { + merged := kmsg.NewPtrListTransactionsResponse() + + unknownStates := make(map[string]struct{}) + + firstErr := firstErrMerger(sresps, func(kresp kmsg.Response) { + resp := kresp.(*kmsg.ListTransactionsResponse) + merged.Version = resp.Version + merged.ThrottleMillis = resp.ThrottleMillis + if merged.ErrorCode == 0 { + merged.ErrorCode = resp.ErrorCode + } + for _, state := range resp.UnknownStateFilters { + unknownStates[state] = struct{}{} + } + merged.TransactionStates = append(merged.TransactionStates, resp.TransactionStates...) + }) + for unknownState := range unknownStates { + merged.UnknownStateFilters = append(merged.UnknownStateFilters, unknownState) + } + + return merged, firstErr +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/compression.go b/vendor/github.com/twmb/franz-go/pkg/kgo/compression.go new file mode 100644 index 00000000000..fe8ad645bbd --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/compression.go @@ -0,0 +1,346 @@ +package kgo + +import ( + "bytes" + "compress/gzip" + "encoding/binary" + "errors" + "io" + "runtime" + "sync" + + "github.com/klauspost/compress/s2" + "github.com/klauspost/compress/zstd" + "github.com/pierrec/lz4/v4" +) + +var byteBuffers = sync.Pool{New: func() any { return bytes.NewBuffer(make([]byte, 8<<10)) }} + +type codecType int8 + +const ( + codecNone codecType = iota + codecGzip + codecSnappy + codecLZ4 + codecZstd +) + +// CompressionCodec configures how records are compressed before being sent. +// +// Records are compressed within individual topics and partitions, inside of a +// RecordBatch. All records in a RecordBatch are compressed into one record +// for that batch. +type CompressionCodec struct { + codec codecType + level int +} + +// NoCompression is a compression option that avoids compression. This can +// always be used as a fallback compression. +func NoCompression() CompressionCodec { return CompressionCodec{codecNone, 0} } + +// GzipCompression enables gzip compression with the default compression level. +func GzipCompression() CompressionCodec { return CompressionCodec{codecGzip, gzip.DefaultCompression} } + +// SnappyCompression enables snappy compression. +func SnappyCompression() CompressionCodec { return CompressionCodec{codecSnappy, 0} } + +// Lz4Compression enables lz4 compression with the fastest compression level. +func Lz4Compression() CompressionCodec { return CompressionCodec{codecLZ4, 0} } + +// ZstdCompression enables zstd compression with the default compression level. +func ZstdCompression() CompressionCodec { return CompressionCodec{codecZstd, 0} } + +// WithLevel changes the compression codec's "level", effectively allowing for +// higher or lower compression ratios at the expense of CPU speed. +// +// For the zstd package, the level is a typed int; simply convert the type back +// to an int for this function. +// +// If the level is invalid, compressors just use a default level. +func (c CompressionCodec) WithLevel(level int) CompressionCodec { + c.level = level + return c +} + +type compressor struct { + options []codecType + gzPool sync.Pool + lz4Pool sync.Pool + zstdPool sync.Pool +} + +func newCompressor(codecs ...CompressionCodec) (*compressor, error) { + if len(codecs) == 0 { + return nil, nil + } + + used := make(map[codecType]bool) // we keep one type of codec per CompressionCodec + var keepIdx int + for _, codec := range codecs { + if _, exists := used[codec.codec]; exists { + continue + } + used[codec.codec] = true + codecs[keepIdx] = codec + keepIdx++ + } + codecs = codecs[:keepIdx] + + for _, codec := range codecs { + if codec.codec < 0 || codec.codec > 4 { + return nil, errors.New("unknown compression codec") + } + } + + c := new(compressor) + +out: + for _, codec := range codecs { + c.options = append(c.options, codec.codec) + switch codec.codec { + case codecNone: + break out + case codecGzip: + level := gzip.DefaultCompression + if codec.level != 0 { + if _, err := gzip.NewWriterLevel(nil, codec.level); err != nil { + level = codec.level + } + } + c.gzPool = sync.Pool{New: func() any { c, _ := gzip.NewWriterLevel(nil, level); return c }} + case codecSnappy: // (no pool needed for snappy) + case codecLZ4: + level := codec.level + if level < 0 { + level = 0 // 0 == lz4.Fast + } + fn := func() any { return lz4.NewWriter(new(bytes.Buffer)) } + w := lz4.NewWriter(new(bytes.Buffer)) + if err := w.Apply(lz4.CompressionLevelOption(lz4.CompressionLevel(level))); err == nil { + fn = func() any { + w := lz4.NewWriter(new(bytes.Buffer)) + w.Apply(lz4.CompressionLevelOption(lz4.CompressionLevel(level))) + return w + } + } + w.Close() + c.lz4Pool = sync.Pool{New: fn} + case codecZstd: + opts := []zstd.EOption{ + zstd.WithWindowSize(64 << 10), + zstd.WithEncoderConcurrency(1), + zstd.WithZeroFrames(true), + } + fn := func() any { + zstdEnc, _ := zstd.NewWriter(nil, opts...) + r := &zstdEncoder{zstdEnc} + runtime.SetFinalizer(r, func(r *zstdEncoder) { r.inner.Close() }) + return r + } + zstdEnc, err := zstd.NewWriter(nil, append(opts, zstd.WithEncoderLevel(zstd.EncoderLevel(codec.level)))...) + if err == nil { + zstdEnc.Close() + opts = append(opts, zstd.WithEncoderLevel(zstd.EncoderLevel(codec.level))) + } + c.zstdPool = sync.Pool{New: fn} + } + } + + if c.options[0] == codecNone { + return nil, nil // first codec was passthrough + } + + return c, nil +} + +type zstdEncoder struct { + inner *zstd.Encoder +} + +// Compress compresses src to buf, returning buf's inner slice once done or nil +// if an error is encountered. +// +// The writer should be put back to its pool after the returned slice is done +// being used. +func (c *compressor) compress(dst *bytes.Buffer, src []byte, produceRequestVersion int16) ([]byte, codecType) { + var use codecType + for _, option := range c.options { + if option == codecZstd && produceRequestVersion < 7 { + continue + } + use = option + break + } + + var out []byte + switch use { + case codecNone: + return src, 0 + case codecGzip: + gz := c.gzPool.Get().(*gzip.Writer) + defer c.gzPool.Put(gz) + gz.Reset(dst) + if _, err := gz.Write(src); err != nil { + return nil, -1 + } + if err := gz.Close(); err != nil { + return nil, -1 + } + out = dst.Bytes() + case codecLZ4: + lz := c.lz4Pool.Get().(*lz4.Writer) + defer c.lz4Pool.Put(lz) + lz.Reset(dst) + if _, err := lz.Write(src); err != nil { + return nil, -1 + } + if err := lz.Close(); err != nil { + return nil, -1 + } + out = dst.Bytes() + case codecSnappy: + // Because the Snappy and Zstd codecs do not accept an io.Writer interface + // and directly take a []byte slice, here, the underlying []byte slice (`dst`) + // obtained from the bytes.Buffer{} from the pool is passed. + // As the `Write()` method on the buffer isn't used, its internal + // book-keeping goes out of sync, making the buffer unusable for further + // reading and writing via it's (eg: accessing via `Byte()`). For subsequent + // reads, the underlying slice has to be used directly. + // + // In this particular context, it is acceptable as there there are no subsequent + // operations performed on the buffer and it is immediately returned to the + // pool and `Reset()` the next time it is obtained and used where `compress()` + // is called. + if l := s2.MaxEncodedLen(len(src)); l > dst.Cap() { + dst.Grow(l) + } + out = s2.EncodeSnappy(dst.Bytes(), src) + case codecZstd: + zstdEnc := c.zstdPool.Get().(*zstdEncoder) + defer c.zstdPool.Put(zstdEnc) + if l := zstdEnc.inner.MaxEncodedSize(len(src)); l > dst.Cap() { + dst.Grow(l) + } + out = zstdEnc.inner.EncodeAll(src, dst.Bytes()) + } + + return out, use +} + +type decompressor struct { + ungzPool sync.Pool + unlz4Pool sync.Pool + unzstdPool sync.Pool +} + +func newDecompressor() *decompressor { + d := &decompressor{ + ungzPool: sync.Pool{ + New: func() any { return new(gzip.Reader) }, + }, + unlz4Pool: sync.Pool{ + New: func() any { return lz4.NewReader(nil) }, + }, + unzstdPool: sync.Pool{ + New: func() any { + zstdDec, _ := zstd.NewReader(nil, + zstd.WithDecoderLowmem(true), + zstd.WithDecoderConcurrency(1), + ) + r := &zstdDecoder{zstdDec} + runtime.SetFinalizer(r, func(r *zstdDecoder) { + r.inner.Close() + }) + return r + }, + }, + } + return d +} + +type zstdDecoder struct { + inner *zstd.Decoder +} + +func (d *decompressor) decompress(src []byte, codec byte) ([]byte, error) { + // Early return in case there is no compression + compCodec := codecType(codec) + if compCodec == codecNone { + return src, nil + } + out := byteBuffers.Get().(*bytes.Buffer) + out.Reset() + defer byteBuffers.Put(out) + + switch compCodec { + case codecGzip: + ungz := d.ungzPool.Get().(*gzip.Reader) + defer d.ungzPool.Put(ungz) + if err := ungz.Reset(bytes.NewReader(src)); err != nil { + return nil, err + } + if _, err := io.Copy(out, ungz); err != nil { + return nil, err + } + return append([]byte(nil), out.Bytes()...), nil + case codecSnappy: + if len(src) > 16 && bytes.HasPrefix(src, xerialPfx) { + return xerialDecode(src) + } + decoded, err := s2.Decode(out.Bytes(), src) + if err != nil { + return nil, err + } + return append([]byte(nil), decoded...), nil + case codecLZ4: + unlz4 := d.unlz4Pool.Get().(*lz4.Reader) + defer d.unlz4Pool.Put(unlz4) + unlz4.Reset(bytes.NewReader(src)) + if _, err := io.Copy(out, unlz4); err != nil { + return nil, err + } + return append([]byte(nil), out.Bytes()...), nil + case codecZstd: + unzstd := d.unzstdPool.Get().(*zstdDecoder) + defer d.unzstdPool.Put(unzstd) + decoded, err := unzstd.inner.DecodeAll(src, out.Bytes()) + if err != nil { + return nil, err + } + return append([]byte(nil), decoded...), nil + default: + return nil, errors.New("unknown compression codec") + } +} + +var xerialPfx = []byte{130, 83, 78, 65, 80, 80, 89, 0} + +var errMalformedXerial = errors.New("malformed xerial framing") + +func xerialDecode(src []byte) ([]byte, error) { + // bytes 0-8: xerial header + // bytes 8-16: xerial version + // everything after: uint32 chunk size, snappy chunk + // we come into this function knowing src is at least 16 + src = src[16:] + var dst, chunk []byte + var err error + for len(src) > 0 { + if len(src) < 4 { + return nil, errMalformedXerial + } + size := int32(binary.BigEndian.Uint32(src)) + src = src[4:] + if size < 0 || len(src) < int(size) { + return nil, errMalformedXerial + } + if chunk, err = s2.Decode(chunk[:cap(chunk)], src[:size]); err != nil { + return nil, err + } + src = src[size:] + dst = append(dst, chunk...) + } + return dst, nil +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/config.go b/vendor/github.com/twmb/franz-go/pkg/kgo/config.go new file mode 100644 index 00000000000..c7bf6963ef2 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/config.go @@ -0,0 +1,1758 @@ +package kgo + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "math" + "math/rand" + "net" + "regexp" + "runtime/debug" + "sync" + "time" + + "github.com/twmb/franz-go/pkg/kmsg" + "github.com/twmb/franz-go/pkg/kversion" + "github.com/twmb/franz-go/pkg/sasl" +) + +// Opt is an option to configure a client. +type Opt interface { + apply(*cfg) +} + +// ProducerOpt is a producer specific option to configure a client. +// This is simply a namespaced Opt. +type ProducerOpt interface { + Opt + producerOpt() +} + +// ConsumerOpt is a consumer specific option to configure a client. +// This is simply a namespaced Opt. +type ConsumerOpt interface { + Opt + consumerOpt() +} + +// GroupOpt is a consumer group specific option to configure a client. +// This is simply a namespaced Opt. +type GroupOpt interface { + Opt + groupOpt() +} + +type ( + clientOpt struct{ fn func(*cfg) } + producerOpt struct{ fn func(*cfg) } + consumerOpt struct{ fn func(*cfg) } + groupOpt struct{ fn func(*cfg) } +) + +func (opt clientOpt) apply(cfg *cfg) { opt.fn(cfg) } +func (opt producerOpt) apply(cfg *cfg) { opt.fn(cfg) } +func (opt consumerOpt) apply(cfg *cfg) { opt.fn(cfg) } +func (opt groupOpt) apply(cfg *cfg) { opt.fn(cfg) } +func (producerOpt) producerOpt() {} +func (consumerOpt) consumerOpt() {} +func (groupOpt) groupOpt() {} + +// A cfg can be written to while initializing a client, and after that it is +// (mostly) only ever read from. Some areas can continue to be modified -- +// particularly reconfiguring what to consume from -- but most areas are +// static. +type cfg struct { + ///////////////////// + // GENERAL SECTION // + ///////////////////// + + id *string // client ID + dialFn func(context.Context, string, string) (net.Conn, error) + dialTimeout time.Duration + dialTLS *tls.Config + requestTimeoutOverhead time.Duration + connIdleTimeout time.Duration + + softwareName string // KIP-511 + softwareVersion string // KIP-511 + + logger Logger + + seedBrokers []string + maxVersions *kversion.Versions + minVersions *kversion.Versions + + retryBackoff func(int) time.Duration + retries int64 + retryTimeout func(int16) time.Duration + + maxBrokerWriteBytes int32 + maxBrokerReadBytes int32 + + allowAutoTopicCreation bool + + metadataMaxAge time.Duration + metadataMinAge time.Duration + + sasls []sasl.Mechanism + + hooks hooks + + ////////////////////// + // PRODUCER SECTION // + ////////////////////// + + txnID *string + txnTimeout time.Duration + acks Acks + disableIdempotency bool + maxProduceInflight int // if idempotency is disabled, we allow a configurable max inflight + compression []CompressionCodec // order of preference + + defaultProduceTopic string + maxRecordBatchBytes int32 + maxBufferedRecords int64 + maxBufferedBytes int64 + produceTimeout time.Duration + recordRetries int64 + maxUnknownFailures int64 + linger time.Duration + recordTimeout time.Duration + manualFlushing bool + txnBackoff time.Duration + missingTopicDelete time.Duration + + partitioner Partitioner + + stopOnDataLoss bool + onDataLoss func(string, int32) + + ////////////////////// + // CONSUMER SECTION // + ////////////////////// + + maxWait int32 + minBytes int32 + maxBytes lazyI32 + maxPartBytes lazyI32 + resetOffset Offset + isolationLevel int8 + keepControl bool + rack string + preferLagFn PreferLagFn + + maxConcurrentFetches int + disableFetchSessions bool + keepRetryableFetchErrors bool + + topics map[string]*regexp.Regexp // topics to consume; if regex is true, values are compiled regular expressions + partitions map[string]map[int32]Offset // partitions to directly consume from + regex bool + + //////////////////////////// + // CONSUMER GROUP SECTION // + //////////////////////////// + + group string // group we are in + instanceID *string // optional group instance ID + balancers []GroupBalancer // balancers we can use + protocol string // "consumer" by default, expected to never be overridden + + sessionTimeout time.Duration + rebalanceTimeout time.Duration + heartbeatInterval time.Duration + requireStable bool + + onAssigned func(context.Context, *Client, map[string][]int32) + onRevoked func(context.Context, *Client, map[string][]int32) + onLost func(context.Context, *Client, map[string][]int32) + onFetched func(context.Context, *Client, *kmsg.OffsetFetchResponse) error + + adjustOffsetsBeforeAssign func(ctx context.Context, offsets map[string]map[int32]Offset) (map[string]map[int32]Offset, error) + + blockRebalanceOnPoll bool + + setAssigned bool + setRevoked bool + setLost bool + setCommitCallback bool + + autocommitDisable bool // true if autocommit was disabled or we are transactional + autocommitGreedy bool + autocommitMarks bool + autocommitInterval time.Duration + commitCallback func(*Client, *kmsg.OffsetCommitRequest, *kmsg.OffsetCommitResponse, error) +} + +func (cfg *cfg) validate() error { + if len(cfg.seedBrokers) == 0 { + return errors.New("config erroneously has no seed brokers") + } + + // We clamp maxPartBytes to maxBytes because some fake Kafka endpoints + // (Oracle) cannot handle the mismatch correctly. + if cfg.maxPartBytes > cfg.maxBytes { + cfg.maxPartBytes = cfg.maxBytes + } + + if cfg.disableIdempotency { + if cfg.txnID != nil { + return errors.New("cannot both disable idempotent writes and use transactional IDs") + } + if cfg.maxProduceInflight <= 0 { + return fmt.Errorf("invalid max produce inflight %d with idempotency disabled", cfg.maxProduceInflight) + } + } else { + if cfg.acks.val != -1 { + return errors.New("idempotency requires acks=all") + } + if cfg.maxProduceInflight != 1 { + return fmt.Errorf("invalid usage of MaxProduceRequestsInflightPerBroker with idempotency enabled") + } + } + + for _, limit := range []struct { + name string + sp **string // if field is a *string, we take addr to it + s string + allowed int + }{ + // A 256 byte ID / software name & version is good enough and + // fits with our max broker write byte min of 1K. + {name: "client id", sp: &cfg.id, allowed: 256}, + {name: "software name", s: cfg.softwareName, allowed: 256}, + {name: "software version", s: cfg.softwareVersion, allowed: 256}, + + // The following is the limit transitioning from two byte + // prefix for flexible stuff to three bytes; as with above, it + // is more than reasonable. + {name: "transactional id", sp: &cfg.txnID, allowed: 16382}, + + {name: "rack", s: cfg.rack, allowed: 512}, + } { + s := limit.s + if limit.sp != nil && *limit.sp != nil { + s = **limit.sp + } + if len(s) > limit.allowed { + return fmt.Errorf("%s length %d is larger than max allowed %d", limit.name, len(s), limit.allowed) + } + } + + i64lt := func(l, r int64) (bool, string) { return l < r, "less" } + i64gt := func(l, r int64) (bool, string) { return l > r, "larger" } + for _, limit := range []struct { + name string + v int64 + allowed int64 + badcmp func(int64, int64) (bool, string) + + fmt string + durs bool + }{ + // Min write of 1K and max of 1G is reasonable. + {name: "max broker write bytes", v: int64(cfg.maxBrokerWriteBytes), allowed: 1 << 10, badcmp: i64lt}, + {name: "max broker write bytes", v: int64(cfg.maxBrokerWriteBytes), allowed: 1 << 30, badcmp: i64gt}, + + // Same for read bytes. + {name: "max broker read bytes", v: int64(cfg.maxBrokerReadBytes), allowed: 1 << 10, badcmp: i64lt}, + {name: "max broker read bytes", v: int64(cfg.maxBrokerReadBytes), allowed: 1 << 30, badcmp: i64gt}, + + // For batches, we want at least 512 (reasonable), and the + // upper limit is the max num when a uvarint transitions from 4 + // to 5 bytes. The upper limit is also more than reasonable + // (256MiB). + {name: "max record batch bytes", v: int64(cfg.maxRecordBatchBytes), allowed: 512, badcmp: i64lt}, + {name: "max record batch bytes", v: int64(cfg.maxRecordBatchBytes), allowed: 256 << 20, badcmp: i64gt}, + + // We do not want the broker write bytes to be less than the + // record batch bytes, nor the read bytes to be less than what + // we indicate to fetch. + // + // We cannot enforce if a single batch is larger than the max + // fetch bytes limit, but hopefully we do not run into that. + {v: int64(cfg.maxBrokerWriteBytes), allowed: int64(cfg.maxRecordBatchBytes), badcmp: i64lt, fmt: "max broker write bytes %v is erroneously less than max record batch bytes %v"}, + {v: int64(cfg.maxBrokerReadBytes), allowed: int64(cfg.maxBytes), badcmp: i64lt, fmt: "max broker read bytes %v is erroneously less than max fetch bytes %v"}, + + // 0 <= allowed concurrency + {name: "max concurrent fetches", v: int64(cfg.maxConcurrentFetches), allowed: 0, badcmp: i64lt}, + + // 1s <= request timeout overhead <= 15m + {name: "request timeout max overhead", v: int64(cfg.requestTimeoutOverhead), allowed: int64(15 * time.Minute), badcmp: i64gt, durs: true}, + {name: "request timeout min overhead", v: int64(cfg.requestTimeoutOverhead), allowed: int64(time.Second), badcmp: i64lt, durs: true}, + + // 1s <= conn idle <= 15m + {name: "conn min idle timeout", v: int64(cfg.connIdleTimeout), allowed: int64(time.Second), badcmp: i64lt, durs: true}, + {name: "conn max idle timeout", v: int64(cfg.connIdleTimeout), allowed: int64(15 * time.Minute), badcmp: i64gt, durs: true}, + + // 10ms <= metadata <= 1hr + {name: "metadata max age", v: int64(cfg.metadataMaxAge), allowed: int64(time.Hour), badcmp: i64gt, durs: true}, + {name: "metadata min age", v: int64(cfg.metadataMinAge), allowed: int64(10 * time.Millisecond), badcmp: i64lt, durs: true}, + {v: int64(cfg.metadataMaxAge), allowed: int64(cfg.metadataMinAge), badcmp: i64lt, fmt: "metadata max age %v is erroneously less than metadata min age %v", durs: true}, + + // Some random producer settings. + {name: "max buffered records", v: cfg.maxBufferedRecords, allowed: 1, badcmp: i64lt}, + {name: "max buffered bytes", v: cfg.maxBufferedBytes, allowed: 0, badcmp: i64lt}, + {name: "linger", v: int64(cfg.linger), allowed: int64(time.Minute), badcmp: i64gt, durs: true}, + {name: "produce timeout", v: int64(cfg.produceTimeout), allowed: int64(100 * time.Millisecond), badcmp: i64lt, durs: true}, + {name: "record timeout", v: int64(cfg.recordTimeout), allowed: int64(time.Second), badcmp: func(l, r int64) (bool, string) { + if l == 0 { + return false, "" // we print nothing when things are good + } + return l < r, "less" + }, durs: true}, + + // Consumer settings. maxWait is stored as int32 milliseconds, + // but we want the error message to be in the nice + // time.Duration string format. + {name: "max fetch wait", v: int64(cfg.maxWait) * int64(time.Millisecond), allowed: int64(10 * time.Millisecond), badcmp: i64lt, durs: true}, + + // Group settings. + {name: "number of balancers", v: int64(len(cfg.balancers)), allowed: 1, badcmp: i64lt}, + {name: "consumer protocol length", v: int64(len(cfg.protocol)), allowed: 1, badcmp: i64lt}, + + {name: "session timeout", v: int64(cfg.sessionTimeout), allowed: int64(100 * time.Millisecond), badcmp: i64lt, durs: true}, + {name: "rebalance timeout", v: int64(cfg.rebalanceTimeout), allowed: int64(100 * time.Millisecond), badcmp: i64lt, durs: true}, + {name: "autocommit interval", v: int64(cfg.autocommitInterval), allowed: int64(100 * time.Millisecond), badcmp: i64lt, durs: true}, + + {v: int64(cfg.heartbeatInterval), allowed: int64(cfg.rebalanceTimeout) * int64(time.Millisecond), badcmp: i64gt, durs: true, fmt: "heartbeat interval %v is erroneously larger than the session timeout %v"}, + } { + bad, cmp := limit.badcmp(limit.v, limit.allowed) + if bad { + if limit.fmt != "" { + if limit.durs { + return fmt.Errorf(limit.fmt, time.Duration(limit.v), time.Duration(limit.allowed)) + } + return fmt.Errorf(limit.fmt, limit.v, limit.allowed) + } + if limit.durs { + return fmt.Errorf("%s %v is %s than allowed %v", limit.name, time.Duration(limit.v), cmp, time.Duration(limit.allowed)) + } + return fmt.Errorf("%s %v is %s than allowed %v", limit.name, limit.v, cmp, limit.allowed) + } + } + + if cfg.dialFn != nil { + if cfg.dialTLS != nil { + return errors.New("cannot set both Dialer and DialTLSConfig") + } + } + + if len(cfg.group) > 0 { + if len(cfg.partitions) != 0 { + return errors.New("invalid direct-partition consuming option when consuming as a group") + } + } + + if cfg.regex { + if len(cfg.partitions) != 0 { + return errors.New("invalid direct-partition consuming option when consuming as regex") + } + for re := range cfg.topics { + compiled, err := regexp.Compile(re) + if err != nil { + return fmt.Errorf("invalid regular expression %q", re) + } + cfg.topics[re] = compiled + } + } + + if cfg.topics != nil && cfg.partitions != nil { + for topic := range cfg.partitions { + if _, exists := cfg.topics[topic]; exists { + return fmt.Errorf("topic %q seen in both ConsumePartitions and ConsumeTopics; these options are a union, it is invalid to specify specific partitions for a topic while also consuming the entire topic", topic) + } + } + } + + if cfg.autocommitDisable && cfg.autocommitGreedy { + return errors.New("cannot both disable autocommitting and enable greedy autocommitting") + } + if cfg.autocommitDisable && cfg.autocommitMarks { + return errors.New("cannot both disable autocommitting and enable marked autocommitting") + } + if cfg.autocommitGreedy && cfg.autocommitMarks { + return errors.New("cannot enable both greedy autocommitting and marked autocommitting") + } + if (cfg.autocommitGreedy || cfg.autocommitDisable || cfg.autocommitMarks || cfg.setCommitCallback) && len(cfg.group) == 0 { + return errors.New("invalid autocommit options specified when a group was not specified") + } + if (cfg.setLost || cfg.setRevoked || cfg.setAssigned) && len(cfg.group) == 0 { + return errors.New("invalid group partition assigned/revoked/lost functions set when a group was not specified") + } + + processedHooks, err := processHooks(cfg.hooks) + if err != nil { + return err + } + cfg.hooks = processedHooks + + return nil +} + +// processHooks will inspect and recursively unpack slices of hooks stopping +// if the instance implements any hook interface. It will return an error on +// the first instance that implements no hook interface +func processHooks(hooks []Hook) ([]Hook, error) { + var processedHooks []Hook + for _, hook := range hooks { + if implementsAnyHook(hook) { + processedHooks = append(processedHooks, hook) + } else if moreHooks, ok := hook.([]Hook); ok { + more, err := processHooks(moreHooks) + if err != nil { + return nil, err + } + processedHooks = append(processedHooks, more...) + } else { + return nil, errors.New("found an argument that implements no hook interfaces") + } + } + return processedHooks, nil +} + +var reVersion = regexp.MustCompile(`^[a-zA-Z0-9](?:[a-zA-Z0-9.-]*[a-zA-Z0-9])?$`) + +func softwareVersion() string { + info, ok := debug.ReadBuildInfo() + if ok { + for _, dep := range info.Deps { + if dep.Path == "github.com/twmb/franz-go" { + if reVersion.MatchString(dep.Version) { + return dep.Version + } + } + } + } + return "unknown" +} + +func defaultCfg() cfg { + defaultID := "kgo" + return cfg{ + ///////////// + // general // + ///////////// + id: &defaultID, + + dialTimeout: 10 * time.Second, + requestTimeoutOverhead: 10 * time.Second, + connIdleTimeout: 20 * time.Second, + + softwareName: "kgo", + softwareVersion: softwareVersion(), + + logger: new(nopLogger), + + seedBrokers: []string{"127.0.0.1"}, + maxVersions: kversion.Stable(), + + retryBackoff: func() func(int) time.Duration { + var rngMu sync.Mutex + rng := rand.New(rand.NewSource(time.Now().UnixNano())) + return func(fails int) time.Duration { + const ( + min = 250 * time.Millisecond + max = 5 * time.Second / 2 + ) + if fails <= 0 { + return min + } + if fails > 10 { + return max + } + + backoff := min * time.Duration(1<<(fails-1)) + + rngMu.Lock() + jitter := 0.8 + 0.4*rng.Float64() + rngMu.Unlock() + + backoff = time.Duration(float64(backoff) * jitter) + + if backoff > max { + return max + } + return backoff + } + }(), + retries: 20, + + maxBrokerWriteBytes: 100 << 20, // Kafka socket.request.max.bytes default is 100<<20 + maxBrokerReadBytes: 100 << 20, + + metadataMaxAge: 5 * time.Minute, + metadataMinAge: 5 * time.Second, + missingTopicDelete: 15 * time.Second, + + ////////////// + // producer // + ////////////// + + txnTimeout: 40 * time.Second, + acks: AllISRAcks(), + maxProduceInflight: 1, + compression: []CompressionCodec{SnappyCompression(), NoCompression()}, + maxRecordBatchBytes: 1000012, // Kafka max.message.bytes default is 1000012 + maxBufferedRecords: 10000, + produceTimeout: 10 * time.Second, + recordRetries: math.MaxInt64, // effectively unbounded + maxUnknownFailures: 4, + partitioner: UniformBytesPartitioner(64<<10, true, true, nil), + txnBackoff: 20 * time.Millisecond, + + ////////////// + // consumer // + ////////////// + + maxWait: 5000, + minBytes: 1, + maxBytes: 50 << 20, + maxPartBytes: 1 << 20, + resetOffset: NewOffset().AtStart(), + isolationLevel: 0, + + maxConcurrentFetches: 0, // unbounded default + + /////////// + // group // + /////////// + + balancers: []GroupBalancer{ + CooperativeStickyBalancer(), + }, + protocol: "consumer", + + sessionTimeout: 45000 * time.Millisecond, + rebalanceTimeout: 60000 * time.Millisecond, + heartbeatInterval: 3000 * time.Millisecond, + + autocommitInterval: 5 * time.Second, + } +} + +////////////////////////// +// CLIENT CONFIGURATION // +////////////////////////// + +// ClientID uses id for all requests sent to Kafka brokers, overriding the +// default "kgo". +func ClientID(id string) Opt { + return clientOpt{func(cfg *cfg) { cfg.id = &id }} +} + +// SoftwareNameAndVersion sets the client software name and version that will +// be sent to Kafka as part of the ApiVersions request as of Kafka 2.4, +// overriding the default "kgo" and internal version number. +// +// Kafka exposes this through metrics to help operators understand the impact +// of clients. +// +// It is generally not recommended to set this. As well, if you do, the name +// and version must match the following regular expression: +// +// [a-zA-Z0-9](?:[a-zA-Z0-9\.-]*[a-zA-Z0-9])? +// +// Note this means neither the name nor version can be empty. +func SoftwareNameAndVersion(name, version string) Opt { + return clientOpt{func(cfg *cfg) { cfg.softwareName = name; cfg.softwareVersion = version }} +} + +// WithLogger sets the client to use the given logger, overriding the default +// to not use a logger. +// +// It is invalid to use a nil logger; doing so will cause panics. +func WithLogger(l Logger) Opt { + return clientOpt{func(cfg *cfg) { cfg.logger = &wrappedLogger{l} }} +} + +// RequestTimeoutOverhead uses the given time as overhead while deadlining +// requests, overriding the default overhead of 10s. +// +// For most requests, the timeout is set to the overhead. However, for +// any request with a TimeoutMillis field, the overhead is added on top of the +// request's TimeoutMillis. This ensures that we give Kafka enough time to +// actually process the request given the timeout, while still having a +// deadline on the connection as a whole to ensure it does not hang. +// +// For writes, the timeout is always the overhead. We buffer writes in our +// client before one quick flush, so we always expect the write to be fast. +// +// Note that hitting the timeout kills a connection, which will fail any other +// active writes or reads on the connection. +// +// This option is roughly equivalent to request.timeout.ms, but grants +// additional time to requests that have timeout fields. +func RequestTimeoutOverhead(overhead time.Duration) Opt { + return clientOpt{func(cfg *cfg) { cfg.requestTimeoutOverhead = overhead }} +} + +// ConnIdleTimeout is a rough amount of time to allow connections to idle +// before they are closed, overriding the default 20. +// +// In the worst case, a connection can be allowed to idle for up to 2x this +// time, while the average is expected to be 1.5x (essentially, a uniform +// distribution from this interval to 2x the interval). +// +// It is possible that a connection can be reaped just as it is about to be +// written to, but the client internally retries in these cases. +// +// Connections are not reaped if they are actively being written to or read +// from; thus, a request can take a really long time itself and not be reaped +// (however, this may lead to the RequestTimeoutOverhead). +func ConnIdleTimeout(timeout time.Duration) Opt { + return clientOpt{func(cfg *cfg) { cfg.connIdleTimeout = timeout }} +} + +// Dialer uses fn to dial addresses, overriding the default dialer that uses a +// 10s dial timeout and no TLS. +// +// The context passed to the dial function is the context used in the request +// that caused the dial. If the request is a client-internal request, the +// context is the context on the client itself (which is canceled when the +// client is closed). +// +// This function has the same signature as net.Dialer's DialContext and +// tls.Dialer's DialContext, meaning you can use this function like so: +// +// kgo.Dialer((&net.Dialer{Timeout: 10*time.Second}).DialContext) +// +// or +// +// kgo.Dialer((&tls.Dialer{...}).DialContext) +func Dialer(fn func(ctx context.Context, network, host string) (net.Conn, error)) Opt { + return clientOpt{func(cfg *cfg) { cfg.dialFn = fn }} +} + +// DialTimeout sets the dial timeout, overriding the default of 10s. This +// option is useful if you do not want to set a custom dialer, and is useful in +// tandem with DialTLSConfig. +func DialTimeout(timeout time.Duration) Opt { + return clientOpt{func(cfg *cfg) { cfg.dialTimeout = timeout }} +} + +// DialTLSConfig opts into dialing brokers with the given TLS config with a +// 10s dial timeout. This is a shortcut for manually specifying a tls dialer +// using the Dialer option. You can also change the default 10s timeout with +// DialTimeout. +// +// Every dial, the input config is cloned. If the config's ServerName is not +// specified, this function uses net.SplitHostPort to extract the host from the +// broker being dialed and sets the ServerName. In short, it is not necessary +// to set the ServerName. +func DialTLSConfig(c *tls.Config) Opt { + return clientOpt{func(cfg *cfg) { cfg.dialTLS = c }} +} + +// DialTLS opts into dialing brokers with TLS. This is a shortcut for +// DialTLSConfig with an empty config. See DialTLSConfig for more details. +func DialTLS() Opt { + return DialTLSConfig(new(tls.Config)) +} + +// SeedBrokers sets the seed brokers for the client to use, overriding the +// default 127.0.0.1:9092. +// +// Any seeds that are missing a port use the default Kafka port 9092. +func SeedBrokers(seeds ...string) Opt { + return clientOpt{func(cfg *cfg) { cfg.seedBrokers = append(cfg.seedBrokers[:0], seeds...) }} +} + +// MaxVersions sets the maximum Kafka version to try, overriding the +// internal unbounded (latest stable) versions. +// +// Note that specific max version pinning is required if trying to interact +// with versions pre 0.10.0. Otherwise, unless using more complicated requests +// that this client itself does not natively use, it is generally safe to opt +// for the latest version. If using the kmsg package directly to issue +// requests, it is recommended to pin versions so that new fields on requests +// do not get invalid default zero values before you update your usage. +func MaxVersions(versions *kversion.Versions) Opt { + return clientOpt{func(cfg *cfg) { cfg.maxVersions = versions }} +} + +// MinVersions sets the minimum Kafka version a request can be downgraded to, +// overriding the default of the lowest version. +// +// This option is useful if you are issuing requests that you absolutely do not +// want to be downgraded; that is, if you are relying on features in newer +// requests, and you are not sure if your brokers can handle those features. +// By setting a min version, if the client detects it needs to downgrade past +// the version, it will instead avoid issuing the request. +// +// Unlike MaxVersions, if a request is issued that is unknown to the min +// versions, the request is allowed. It is assumed that there is no lower bound +// for that request. +func MinVersions(versions *kversion.Versions) Opt { + return clientOpt{func(cfg *cfg) { cfg.minVersions = versions }} +} + +// RetryBackoffFn sets the backoff strategy for how long to backoff for a given +// amount of retries, overriding the default jittery exponential backoff that +// ranges from 250ms min to 2.5s max. +// +// This (roughly) corresponds to Kafka's retry.backoff.ms setting and +// retry.backoff.max.ms (which is being introduced with KIP-500). +func RetryBackoffFn(backoff func(int) time.Duration) Opt { + return clientOpt{func(cfg *cfg) { cfg.retryBackoff = backoff }} +} + +// RequestRetries sets the number of tries that retryable requests are allowed, +// overriding the default of 20s. +// +// This option does not apply to produce requests; to limit produce request +// retries / record retries, see RecordRetries. +func RequestRetries(n int) Opt { + return clientOpt{func(cfg *cfg) { cfg.retries = int64(n) }} +} + +// RetryTimeout sets the upper limit on how long we allow a request to be +// issued and then reissued on failure. That is, this control the total +// end-to-end maximum time we allow for trying a request, This overrides the +// default of: +// +// JoinGroup: cfg.SessionTimeout (default 45s) +// SyncGroup: cfg.SessionTimeout (default 45s) +// Heartbeat: cfg.SessionTimeout (default 45s) +// others: 30s +// +// This timeout applies to any request issued through a client's Request +// function. It does not apply to fetches nor produces. +// +// A value of zero indicates no request timeout. +// +// The timeout is evaluated after a request errors. If the time since the start +// of the first request plus any backoff for the latest failure is less than +// the retry timeout, the request will be issued again. +func RetryTimeout(t time.Duration) Opt { + return RetryTimeoutFn(func(int16) time.Duration { return t }) +} + +// RetryTimeoutFn sets the upper limit on how long we allow a request to be +// issued and then reissued on failure. That is, this control the total +// end-to-end maximum time we allow for trying a request, This overrides the +// default of: +// +// JoinGroup: cfg.SessionTimeout (default 45s) +// SyncGroup: cfg.SessionTimeout (default 45s) +// Heartbeat: cfg.SessionTimeout (default 45s) +// others: 30s +// +// This timeout applies to any request issued through a client's Request +// function. It does not apply to fetches nor produces. +// +// The function is called with the request key that is being retried. While it +// is not expected that the request key will be used, including it gives users +// the opportinuty to have different retry timeouts for different keys. +// +// If the function returns zero, there is no retry timeout. +// +// The timeout is evaluated after a request errors. If the time since the start +// of the first request plus any backoff for the latest failure is less than +// the retry timeout, the request will be issued again. +func RetryTimeoutFn(t func(int16) time.Duration) Opt { + return clientOpt{func(cfg *cfg) { cfg.retryTimeout = t }} +} + +// AllowAutoTopicCreation enables topics to be auto created if they do +// not exist when fetching their metadata. +func AllowAutoTopicCreation() Opt { + return clientOpt{func(cfg *cfg) { cfg.allowAutoTopicCreation = true }} +} + +// BrokerMaxWriteBytes upper bounds the number of bytes written to a broker +// connection in a single write, overriding the default 100MiB. +// +// This number corresponds to the a broker's socket.request.max.bytes, which +// defaults to 100MiB. +// +// The only Kafka request that could come reasonable close to hitting this +// limit should be produce requests, and thus this limit is only enforced for +// produce requests. +func BrokerMaxWriteBytes(v int32) Opt { + return clientOpt{func(cfg *cfg) { cfg.maxBrokerWriteBytes = v }} +} + +// BrokerMaxReadBytes sets the maximum response size that can be read from +// Kafka, overriding the default 100MiB. +// +// This is a safety measure to avoid OOMing on invalid responses. This is +// slightly double FetchMaxBytes; if bumping that, consider bump this. No other +// response should run the risk of hitting this limit. +func BrokerMaxReadBytes(v int32) Opt { + return clientOpt{func(cfg *cfg) { cfg.maxBrokerReadBytes = v }} +} + +// MetadataMaxAge sets the maximum age for the client's cached metadata, +// overriding the default 5m, to allow detection of new topics, partitions, +// etc. +// +// This corresponds to Kafka's metadata.max.age.ms. +func MetadataMaxAge(age time.Duration) Opt { + return clientOpt{func(cfg *cfg) { cfg.metadataMaxAge = age }} +} + +// MetadataMinAge sets the minimum time between metadata queries, overriding +// the default 5s. You may want to raise or lower this to reduce the number of +// metadata queries the client will make. Notably, if metadata detects an error +// in any topic or partition, it triggers itself to update as soon as allowed. +func MetadataMinAge(age time.Duration) Opt { + return clientOpt{func(cfg *cfg) { cfg.metadataMinAge = age }} +} + +// SASL appends sasl authentication options to use for all connections. +// +// SASL is tried in order; if the broker supports the first mechanism, all +// connections will use that mechanism. If the first mechanism fails, the +// client will pick the first supported mechanism. If the broker does not +// support any client mechanisms, connections will fail. +func SASL(sasls ...sasl.Mechanism) Opt { + return clientOpt{func(cfg *cfg) { cfg.sasls = append(cfg.sasls, sasls...) }} +} + +// WithHooks sets hooks to call whenever relevant. +// +// Hooks can be used to layer in metrics (such as Prometheus hooks) or anything +// else. The client will call all hooks in order. See the Hooks interface for +// more information, as well as any interface that contains "Hook" in the name +// to know the available hooks. A single hook can implement zero or all hook +// interfaces, and only the hooks that it implements will be called. +func WithHooks(hooks ...Hook) Opt { + return clientOpt{func(cfg *cfg) { cfg.hooks = append(cfg.hooks, hooks...) }} +} + +// ConcurrentTransactionsBackoff sets the backoff interval to use during +// transactional requests in case we encounter CONCURRENT_TRANSACTIONS error, +// overriding the default 20ms. +// +// Sometimes, when a client begins a transaction quickly enough after finishing +// a previous one, Kafka will return a CONCURRENT_TRANSACTIONS error. Clients +// are expected to backoff slightly and retry the operation. Lower backoffs may +// increase load on the brokers, while higher backoffs may increase transaction +// latency in clients. +// +// Note that if brokers are hanging in this concurrent transactions state for +// too long, the client progressively increases the backoff. +func ConcurrentTransactionsBackoff(backoff time.Duration) Opt { + return clientOpt{func(cfg *cfg) { cfg.txnBackoff = backoff }} +} + +// ConsiderMissingTopicDeletedAfter sets the amount of time a topic can be +// missing from metadata responses _after_ loading it at least once before it +// is considered deleted, overriding the default of 15s. Note that for newer +// versions of Kafka, it may take a bit of time (~15s) for the cluster to fully +// recognize a newly created topic. If this option is set too low, there is +// some risk that the client will internally purge and re-see a topic a few +// times until the cluster fully broadcasts the topic creation. +func ConsiderMissingTopicDeletedAfter(t time.Duration) Opt { + return clientOpt{func(cfg *cfg) { cfg.missingTopicDelete = t }} +} + +//////////////////////////// +// PRODUCER CONFIGURATION // +//////////////////////////// + +// DefaultProduceTopic sets the default topic to produce to if the topic field +// is empty in a Record. +// +// If this option is not used, if a record has an empty topic, the record +// cannot be produced and will be failed immediately. +func DefaultProduceTopic(t string) ProducerOpt { + return producerOpt{func(cfg *cfg) { cfg.defaultProduceTopic = t }} +} + +// Acks represents the number of acks a broker leader must have before +// a produce request is considered complete. +// +// This controls the durability of written records and corresponds to "acks" in +// Kafka's Producer Configuration documentation. +// +// The default is LeaderAck. +type Acks struct { + val int16 +} + +// NoAck considers records sent as soon as they are written on the wire. +// The leader does not reply to records. +func NoAck() Acks { return Acks{0} } + +// LeaderAck causes Kafka to reply that a record is written after only +// the leader has written a message. The leader does not wait for in-sync +// replica replies. +func LeaderAck() Acks { return Acks{1} } + +// AllISRAcks ensures that all in-sync replicas have acknowledged they +// wrote a record before the leader replies success. +func AllISRAcks() Acks { return Acks{-1} } + +// RequiredAcks sets the required acks for produced records, +// overriding the default RequireAllISRAcks. +func RequiredAcks(acks Acks) ProducerOpt { + return producerOpt{func(cfg *cfg) { cfg.acks = acks }} +} + +// DisableIdempotentWrite disables idempotent produce requests, opting out of +// Kafka server-side deduplication in the face of reissued requests due to +// transient network problems. Disabling idempotent write by default +// upper-bounds the number of in-flight produce requests per broker to 1, vs. +// the default of 5 when using idempotency. +// +// Idempotent production is strictly a win, but does require the +// IDEMPOTENT_WRITE permission on CLUSTER (pre Kafka 3.0), and not all clients +// can have that permission. +// +// This option is incompatible with specifying a transactional id. +func DisableIdempotentWrite() ProducerOpt { + return producerOpt{func(cfg *cfg) { cfg.disableIdempotency = true }} +} + +// MaxProduceRequestsInflightPerBroker changes the number of allowed produce +// requests in flight per broker if you disable idempotency, overriding the +// default of 1. If using idempotency, this option has no effect: the maximum +// in flight for Kafka v0.11 is 1, and from v1 onward is 5. +// +// Using more than 1 may result in out of order records and may result in +// duplicates if there are connection issues. +func MaxProduceRequestsInflightPerBroker(n int) ProducerOpt { + return producerOpt{func(cfg *cfg) { cfg.maxProduceInflight = n }} +} + +// ProducerBatchCompression sets the compression codec to use for producing +// records. +// +// Compression is chosen in the order preferred based on broker support. For +// example, zstd compression was introduced in Kafka 2.1, so the preference +// can be first zstd, fallback snappy, fallback none. +// +// The default preference is [snappy, none], which should be fine for all old +// consumers since snappy compression has existed since Kafka 0.8.0. To use +// zstd, your brokers must be at least 2.1 and all consumers must be upgraded +// to support decoding zstd records. +func ProducerBatchCompression(preference ...CompressionCodec) ProducerOpt { + return producerOpt{func(cfg *cfg) { cfg.compression = preference }} +} + +// ProducerBatchMaxBytes upper bounds the size of a record batch, overriding +// the default 1,000,012 bytes. This mirrors Kafka's max.message.bytes. +// +// Record batches are independent of a ProduceRequest: a record batch is +// specific to a topic and partition, whereas the produce request can contain +// many record batches for many topics. +// +// If a single record encodes larger than this number (before compression), it +// will will not be written and a callback will have the appropriate error. +// +// Note that this is the maximum size of a record batch before compression. If +// a batch compresses poorly and actually grows the batch, the uncompressed +// form will be used. +func ProducerBatchMaxBytes(v int32) ProducerOpt { + return producerOpt{func(cfg *cfg) { cfg.maxRecordBatchBytes = v }} +} + +// MaxBufferedRecords sets the max amount of records the client will buffer, +// blocking produces until records are finished if this limit is reached. +// This overrides the default of 10,000. +func MaxBufferedRecords(n int) ProducerOpt { + return producerOpt{func(cfg *cfg) { cfg.maxBufferedRecords = int64(n) }} +} + +// MaxBufferedBytes sets the max amount of bytes that the client will buffer +// while producing, blocking produces until records are finished if this limit +// is reached. This overrides the unlimited default. +// +// Note that this option does _not_ apply for consuming: the client cannot +// limit bytes buffered for consuming because of decompression. You can roughly +// control consuming memory by using [MaxConcurrentFetches], [FetchMaxBytes], +// and [FetchMaxPartitionBytes]. +// +// If you produce a record that is larger than n, the record is immediately +// failed with kerr.MessageTooLarge. +// +// Note that this limit applies after [MaxBufferedRecords]. +func MaxBufferedBytes(n int) ProducerOpt { + return producerOpt{func(cfg *cfg) { cfg.maxBufferedBytes = int64(n) }} +} + +// RecordPartitioner uses the given partitioner to partition records, overriding +// the default UniformBytesPartitioner(64KiB, true, true, nil). +func RecordPartitioner(partitioner Partitioner) ProducerOpt { + return producerOpt{func(cfg *cfg) { cfg.partitioner = partitioner }} +} + +// ProduceRequestTimeout sets how long Kafka broker's are allowed to respond to +// produce requests, overriding the default 10s. If a broker exceeds this +// duration, it will reply with a request timeout error. +// +// This somewhat corresponds to Kafka's request.timeout.ms setting, but only +// applies to produce requests. This settings sets the TimeoutMillis field in +// the produce request itself. The RequestTimeoutOverhead is applied as a write +// limit and read limit in addition to this. +func ProduceRequestTimeout(limit time.Duration) ProducerOpt { + return producerOpt{func(cfg *cfg) { cfg.produceTimeout = limit }} +} + +// RecordRetries sets the number of tries for producing records, overriding the +// unlimited default. +// +// If idempotency is enabled (as it is by default), this option is only +// enforced if it is safe to do so without creating invalid sequence numbers. +// It is safe to enforce if a record was never issued in a request to Kafka, or +// if it was requested and received a response. +// +// If a record fails due to retries, all records buffered in the same partition +// are failed as well. This ensures gapless ordering: the client will not fail +// one record only to produce a later one successfully. This also allows for +// easier sequence number ordering internally. +// +// If a topic repeatedly fails to load with UNKNOWN_TOPIC_OR_PARTITION, it has +// a different limit (the UnknownTopicRetries option). All records for a topic +// that repeatedly cannot be loaded are failed when that limit is hit. +// +// This option is different from RequestRetries to allow finer grained control +// of when to fail when producing records. +func RecordRetries(n int) ProducerOpt { + return producerOpt{func(cfg *cfg) { cfg.recordRetries = int64(n) }} +} + +// UnknownTopicRetries sets the number of times a record can fail with +// UNKNOWN_TOPIC_OR_PARTITION, overriding the default 4. +// +// This is a separate limit from RecordRetries because unknown topic or +// partition errors should only happen if the topic does not exist. It is +// pointless for the client to continue producing to a topic that does not +// exist, and if we repeatedly see that the topic does not exist across +// multiple metadata queries (which are going to different brokers), then we +// may as well stop trying and fail the records. +// +// If this is -1, the client never fails records with this error. +func UnknownTopicRetries(n int) ProducerOpt { + return producerOpt{func(cfg *cfg) { cfg.maxUnknownFailures = int64(n) }} +} + +// StopProducerOnDataLossDetected sets the client to stop producing if data +// loss is detected, overriding the default false. +// +// Note that if using this option, it is strongly recommended to not have a +// retry limit. Doing so may lead to errors where the client fails a batch on a +// recoverable error, which internally bumps the idempotent sequence number +// used for producing, which may then later cause an inadvertent out of order +// sequence number and false "data loss" detection. +func StopProducerOnDataLossDetected() ProducerOpt { + return producerOpt{func(cfg *cfg) { cfg.stopOnDataLoss = true }} +} + +// ProducerOnDataLossDetected sets a function to call if data loss is detected +// when producing records if the client is configured to continue on data loss. +// Thus, this option is mutually exclusive with StopProducerOnDataLossDetected. +// +// The passed function will be called with the topic and partition that data +// loss was detected on. +func ProducerOnDataLossDetected(fn func(string, int32)) ProducerOpt { + return producerOpt{func(cfg *cfg) { cfg.onDataLoss = fn }} +} + +// ProducerLinger sets how long individual topic partitions will linger waiting +// for more records before triggering a request to be built. +// +// Note that this option should only be used in low volume producers. The only +// benefit of lingering is to potentially build a larger batch to reduce cpu +// usage on the brokers if you have many producers all producing small amounts. +// +// If a produce request is triggered by any topic partition, all partitions +// with a possible batch to be sent are used and all lingers are reset. +// +// As mentioned, the linger is specific to topic partition. A high volume +// producer will likely be producing to many partitions; it is both unnecessary +// to linger in this case and inefficient because the client will have many +// timers running (and stopping and restarting) unnecessarily. +func ProducerLinger(linger time.Duration) ProducerOpt { + return producerOpt{func(cfg *cfg) { cfg.linger = linger }} +} + +// ManualFlushing disables auto-flushing when producing. While you can still +// set lingering, it would be useless to do so. +// +// With manual flushing, producing while MaxBufferedRecords or MaxBufferedBytes +// have already been produced and not flushed will return ErrMaxBuffered. +func ManualFlushing() ProducerOpt { + return producerOpt{func(cfg *cfg) { cfg.manualFlushing = true }} +} + +// RecordDeliveryTimeout sets a rough time of how long a record can sit around +// in a batch before timing out, overriding the unlimited default. +// +// If idempotency is enabled (as it is by default), this option is only +// enforced if it is safe to do so without creating invalid sequence numbers. +// It is safe to enforce if a record was never issued in a request to Kafka, or +// if it was requested and received a response. +// +// The timeout for all records in a batch inherit the timeout of the first +// record in that batch. That is, once the first record's timeout expires, all +// records in the batch are expired. This generally is a non-issue unless using +// this option with lingering. In that case, simply add the linger to the +// record timeout to avoid problems. +// +// If a record times out, all records buffered in the same partition are failed +// as well. This ensures gapless ordering: the client will not fail one record +// only to produce a later one successfully. This also allows for easier +// sequence number ordering internally. +// +// The timeout is only evaluated evaluated before writing a request or after a +// produce response. Thus, a sink backoff may delay record timeout slightly. +// +// This option is roughly equivalent to delivery.timeout.ms. +func RecordDeliveryTimeout(timeout time.Duration) ProducerOpt { + return producerOpt{func(cfg *cfg) { cfg.recordTimeout = timeout }} +} + +// TransactionalID sets a transactional ID for the client, ensuring that +// records are produced transactionally under this ID (exactly once semantics). +// +// For Kafka-to-Kafka transactions, the transactional ID is only one half of +// the equation. You must also assign a group to consume from. +// +// To produce transactionally, you first [BeginTransaction], then produce records +// consumed from a group, then you [EndTransaction]. All records produced outside +// of a transaction will fail immediately with an error. +// +// After producing a batch, you must commit what you consumed. Auto committing +// offsets is disabled during transactional consuming / producing. +// +// Note that unless using Kafka 2.5, a consumer group rebalance may be +// problematic. Production should finish and be committed before the client +// rejoins the group. It may be safer to use an eager group balancer and just +// abort the transaction. Alternatively, any time a partition is revoked, you +// could abort the transaction and reset offsets being consumed. +// +// If the client detects an unrecoverable error, all records produced +// thereafter will fail. +// +// Lastly, the default read level is READ_UNCOMMITTED. Be sure to use the +// ReadIsolationLevel option if you want to only read committed. +func TransactionalID(id string) ProducerOpt { + return producerOpt{func(cfg *cfg) { cfg.txnID = &id }} +} + +// TransactionTimeout sets the allowed for a transaction, overriding the +// default 40s. It is a good idea to keep this less than a group's session +// timeout, so that a group member will always be alive for the duration of a +// transaction even if connectivity dies. This helps prevent a transaction +// finishing after a rebalance, which is problematic pre-Kafka 2.5. If you +// are on Kafka 2.5+, then you can use the RequireStableFetchOffsets option +// when assigning the group, and you can set this to whatever you would like. +// +// Transaction timeouts begin when the first record is produced within a +// transaction, not when a transaction begins. +func TransactionTimeout(timeout time.Duration) ProducerOpt { + return producerOpt{func(cfg *cfg) { cfg.txnTimeout = timeout }} +} + +//////////////////////////// +// CONSUMER CONFIGURATION // +//////////////////////////// + +// FetchMaxWait sets the maximum amount of time a broker will wait for a +// fetch response to hit the minimum number of required bytes before returning, +// overriding the default 5s. +// +// This corresponds to the Java fetch.max.wait.ms setting. +func FetchMaxWait(wait time.Duration) ConsumerOpt { + return consumerOpt{func(cfg *cfg) { cfg.maxWait = int32(wait.Milliseconds()) }} +} + +// FetchMaxBytes sets the maximum amount of bytes a broker will try to send +// during a fetch, overriding the default 50MiB. Note that brokers may not obey +// this limit if it has records larger than this limit. Also note that this +// client sends a fetch to each broker concurrently, meaning the client will +// buffer up to worth of memory. +// +// This corresponds to the Java fetch.max.bytes setting. +// +// If bumping this, consider bumping BrokerMaxReadBytes. +// +// If what you are consuming is compressed, and compressed well, it is strongly +// recommended to set this option so that decompression does not eat all of +// your RAM. +func FetchMaxBytes(b int32) ConsumerOpt { + return consumerOpt{func(cfg *cfg) { cfg.maxBytes = lazyI32(b) }} +} + +// FetchMinBytes sets the minimum amount of bytes a broker will try to send +// during a fetch, overriding the default 1 byte. +// +// With the default of 1, data is sent as soon as it is available. By bumping +// this, the broker will try to wait for more data, which may improve server +// throughput at the expense of added latency. +// +// This corresponds to the Java fetch.min.bytes setting. +func FetchMinBytes(b int32) ConsumerOpt { + return consumerOpt{func(cfg *cfg) { cfg.minBytes = b }} +} + +// FetchMaxPartitionBytes sets the maximum amount of bytes that will be +// consumed for a single partition in a fetch request, overriding the default +// 1MiB. Note that if a single batch is larger than this number, that batch +// will still be returned so the client can make progress. +// +// This corresponds to the Java max.partition.fetch.bytes setting. +func FetchMaxPartitionBytes(b int32) ConsumerOpt { + return consumerOpt{func(cfg *cfg) { cfg.maxPartBytes = lazyI32(b) }} +} + +// MaxConcurrentFetches sets the maximum number of fetch requests to allow in +// flight or buffered at once, overriding the unbounded (i.e. number of +// brokers) default. +// +// This setting, paired with FetchMaxBytes, can upper bound the maximum amount +// of memory that the client can use for consuming. +// +// Requests are issued to brokers in a FIFO order: once the client is ready to +// issue a request to a broker, it registers that request and issues it in +// order with other registrations. +// +// If Kafka replies with any data, the client does not track the fetch as +// completed until the user has polled the buffered fetch. Thus, a concurrent +// fetch is not considered complete until all data from it is done being +// processed and out of the client itself. +// +// Note that brokers are allowed to hang for up to FetchMaxWait before replying +// to a request, so if this option is too constrained and you are consuming a +// low throughput topic, the client may take a long time before requesting a +// broker that has new data. For high throughput topics, or if the allowed +// concurrent fetches is large enough, this should not be a concern. +// +// A value of 0 implies the allowed concurrency is unbounded and will be +// limited only by the number of brokers in the cluster. +func MaxConcurrentFetches(n int) ConsumerOpt { + return consumerOpt{func(cfg *cfg) { cfg.maxConcurrentFetches = n }} +} + +// ConsumeResetOffset sets the offset to start consuming from, or if +// OffsetOutOfRange is seen while fetching, to restart consuming from. The +// default is NewOffset().AtStart(), i.e., the earliest offset. +// +// For direct consumers, this is the offset that partitions begin to consume +// from. For group consumers, this is the offset that partitions begin to +// consume from if a partition has no commits. If partitions have commits, the +// commit offset is used. While fetching, if OffsetOutOfRange is encountered, +// the partition resets to ConsumeResetOffset. Using [NoResetOffset] stops +// consuming a partition if the client encounters OffsetOutOfRange. Using +// [Offset.AtCommitted] prevents consuming a partition in a group if the +// partition has no prior commits. +// +// If you use an exact offset or relative offsets and the offset ends up out of +// range, the client chooses the nearest of either the log start offset or the +// high watermark: using At(3) when the partition starts at 8 results in the +// partition being consumed from offset 8. +// +// In short form, the following determines the offset for when a partition is +// seen for the first time, or reset while fetching: +// +// reset at start? => log start offset +// reset at end? => high watermark +// reset at exact? => this exact offset (3 means offset 3) +// reset relative? => the above, + / - the relative amount +// reset exact or relative out of bounds? => nearest boundary (start or end) +// reset after millisec? => high watermark, or first offset after millisec if one exists +// +// To match Kafka's auto.offset.reset, +// +// NewOffset().AtStart() == auto.offset.reset "earliest" +// NewOffset().AtEnd() == auto.offset.reset "latest" +// NewOffset().AtCommitted() == auto.offset.reset "none" +// +// With the above, make sure to use NoResetOffset() if you want to stop +// consuming when you encounter OffsetOutOfRange. It is highly recommended +// to read the docs for all Offset methods to see a few other alternatives. +func ConsumeResetOffset(offset Offset) ConsumerOpt { + return consumerOpt{func(cfg *cfg) { cfg.resetOffset = offset }} +} + +// Rack specifies where the client is physically located and changes fetch +// requests to consume from the closest replica as opposed to the leader +// replica. +// +// Consuming from a preferred replica can increase latency but can decrease +// cross datacenter costs. See KIP-392 for more information. +func Rack(rack string) ConsumerOpt { + return consumerOpt{func(cfg *cfg) { cfg.rack = rack }} +} + +// IsolationLevel controls whether uncommitted or only committed records are +// returned from fetch requests. +type IsolationLevel struct { + level int8 +} + +// ReadUncommitted (the default) is an isolation level that returns the latest +// produced records, be they committed or not. +func ReadUncommitted() IsolationLevel { return IsolationLevel{0} } + +// ReadCommitted is an isolation level to only fetch committed records. +func ReadCommitted() IsolationLevel { return IsolationLevel{1} } + +// FetchIsolationLevel sets the "isolation level" used for fetching +// records, overriding the default ReadUncommitted. +func FetchIsolationLevel(level IsolationLevel) ConsumerOpt { + return consumerOpt{func(cfg *cfg) { cfg.isolationLevel = level.level }} +} + +// KeepControlRecords sets the client to keep control messages and return +// them with fetches, overriding the default that discards them. +// +// Generally, control messages are not useful. +func KeepControlRecords() ConsumerOpt { + return consumerOpt{func(cfg *cfg) { cfg.keepControl = true }} +} + +// ConsumeTopics adds topics to use for consuming. +// +// By default, consuming will start at the beginning of partitions. To change +// this, use the ConsumeResetOffset option. +func ConsumeTopics(topics ...string) ConsumerOpt { + return consumerOpt{func(cfg *cfg) { + cfg.topics = make(map[string]*regexp.Regexp, len(topics)) + for _, topic := range topics { + cfg.topics[topic] = nil + } + }} +} + +// ConsumePartitions sets partitions to consume from directly and the offsets +// to start consuming those partitions from. +// +// This option is basically a way to explicitly consume from subsets of +// partitions in topics, or to consume at exact offsets. Offsets from this +// option have higher precedence than the ConsumeResetOffset. +// +// This option is not compatible with group consuming and regex consuming. If +// you want to assign partitions directly, but still use Kafka to commit +// offsets, check out the kadm package's FetchOffsets and CommitOffsets +// methods. These will allow you to commit as a group outside the context of a +// Kafka group. +func ConsumePartitions(partitions map[string]map[int32]Offset) ConsumerOpt { + return consumerOpt{func(cfg *cfg) { cfg.partitions = partitions }} +} + +// ConsumeRegex sets the client to parse all topics passed to ConsumeTopics as +// regular expressions. +// +// When consuming via regex, every metadata request loads *all* topics, so that +// all topics can be passed to any regular expressions. Every topic is +// evaluated only once ever across all regular expressions; either it +// permanently is known to match, or is permanently known to not match. +func ConsumeRegex() ConsumerOpt { + return consumerOpt{func(cfg *cfg) { cfg.regex = true }} +} + +// DisableFetchSessions sets the client to not use fetch sessions (Kafka 1.0+). +// +// A "fetch session" is is a way to reduce bandwidth for fetch requests & +// responses, and to potentially reduce the amount of work that brokers have to +// do to handle fetch requests. A fetch session opts into the broker tracking +// some state of what the client is interested in. For example, say that you +// are interested in thousands of topics, and most of these topics are +// receiving data only rarely. A fetch session allows the client to register +// that it is interested in those thousands of topics on the first request. On +// future requests, if the offsets for these topics have not changed, those +// topics will be elided from the request. The broker knows to reply with the +// extra topics if any new data is available, otherwise the topics are also +// elided from the response. This massively reduces the amount of information +// that needs to be included in requests or responses. +// +// Using fetch sessions means more state is stored on brokers. Maintaining this +// state eats some memory. If you have thousands of consumers, you may not want +// fetch sessions to be used for everything. Brokers intelligently handle this +// by not creating sessions if they are at their configured limit, but you may +// consider disabling sessions if they are generally not useful to you. Brokers +// have metrics for the number of fetch sessions active, so you can monitor +// that to determine whether enabling or disabling sessions is beneficial or +// not. +// +// For more details on fetch sessions, see KIP-227. +func DisableFetchSessions() ConsumerOpt { + return consumerOpt{func(cfg *cfg) { cfg.disableFetchSessions = true }} +} + +// ConsumePreferringLagFn allows you to re-order partitions before they are +// fetched, given each partition's current lag. +// +// By default, the client rotates partitions fetched by one after every fetch +// request. Kafka answers fetch requests in the order that partitions are +// requested, filling the fetch response until FetchMaxBytes and +// FetchMaxPartitionBytes are hit. All partitions eventually rotate to the +// front, ensuring no partition is starved. +// +// With this option, you can return topic order and per-topic partition +// ordering. These orders will sort to the front (first by topic, then by +// partition). Any topic or partitions that you do not return are added to the +// end, preserving their original ordering. +// +// For a simple lag preference that sorts the laggiest topics and partitions +// first, use `kgo.ConsumePreferringLagFn(kgo.PreferLagAt(50))` (or some other +// similar lag number). +func ConsumePreferringLagFn(fn PreferLagFn) ConsumerOpt { + return consumerOpt{func(cfg *cfg) { cfg.preferLagFn = fn }} +} + +// KeepRetryableFetchErrors switches the client to always return any retryable +// broker error when fetching, rather than stripping them. By default, the +// client strips retryable errors from fetch responses; these are usually +// signals that a client needs to update its metadata to learn of where a +// partition has moved to (from one broker to another), or they are signals +// that one broker is temporarily unhealthy (broker not available). You can opt +// into keeping these errors if you want to specifically react to certain +// events. For example, if you want to react to you yourself deleting a topic, +// you can watch for either UNKNOWN_TOPIC_OR_PARTITION or UNKNOWN_TOPIC_ID +// errors being returned in fetches (and ignore the other errors). +func KeepRetryableFetchErrors() ConsumerOpt { + return consumerOpt{func(cfg *cfg) { cfg.keepRetryableFetchErrors = true }} +} + +////////////////////////////////// +// CONSUMER GROUP CONFIGURATION // +////////////////////////////////// + +// ConsumerGroup sets the consumer group for the client to join and consume in. +// This option is required if using any other group options. +// +// Note that when group consuming, the default is to autocommit every 5s. To be +// safe, autocommitting only commits what is *previously* polled. If you poll +// once, nothing will be committed. If you poll again, the first poll is +// available to be committed. This ensures at-least-once processing, but does +// mean there is likely some duplicate processing during rebalances. When your +// client shuts down, you should issue one final synchronous commit before +// leaving the group (because you will not be polling again, and you are not +// waiting for an autocommit). +func ConsumerGroup(group string) GroupOpt { + return groupOpt{func(cfg *cfg) { cfg.group = group }} +} + +// Balancers sets the group balancers to use for dividing topic partitions +// among group members, overriding the current default [cooperative-sticky]. +// This option is equivalent to Kafka's partition.assignment.strategies option. +// +// For balancing, Kafka chooses the first protocol that all group members agree +// to support. +// +// Note that if you opt into cooperative-sticky rebalancing, cooperative group +// balancing is incompatible with eager (classical) rebalancing and requires a +// careful rollout strategy (see KIP-429). +func Balancers(balancers ...GroupBalancer) GroupOpt { + return groupOpt{func(cfg *cfg) { cfg.balancers = balancers }} +} + +// SessionTimeout sets how long a member in the group can go between +// heartbeats, overriding the default 45,000ms. If a member does not heartbeat +// in this timeout, the broker will remove the member from the group and +// initiate a rebalance. +// +// If you are using a [GroupTransactSession] for EOS, wish to lower this, and are +// talking to a Kafka cluster pre 2.5, consider lowering the +// TransactionTimeout. If you do not, you risk a transaction finishing after a +// group has rebalanced, which could lead to duplicate processing. If you are +// talking to a Kafka 2.5+ cluster, you can safely use the +// RequireStableFetchOffsets group option and prevent any problems. +// +// This option corresponds to Kafka's session.timeout.ms setting and must be +// within the broker's group.min.session.timeout.ms and +// group.max.session.timeout.ms. +func SessionTimeout(timeout time.Duration) GroupOpt { + return groupOpt{func(cfg *cfg) { cfg.sessionTimeout = timeout }} +} + +// RebalanceTimeout sets how long group members are allowed to take when a a +// rebalance has begun, overriding the default 60,000ms. This timeout is how +// long all members are allowed to complete work and commit offsets, minus the +// time it took to detect the rebalance (from a heartbeat). +// +// Kafka uses the largest rebalance timeout of all members in the group. If a +// member does not rejoin within this timeout, Kafka will kick that member from +// the group. +// +// This corresponds to Kafka's rebalance.timeout.ms. +func RebalanceTimeout(timeout time.Duration) GroupOpt { + return groupOpt{func(cfg *cfg) { cfg.rebalanceTimeout = timeout }} +} + +// HeartbeatInterval sets how long a group member goes between heartbeats to +// Kafka, overriding the default 3,000ms. +// +// Kafka uses heartbeats to ensure that a group member's session stays active. +// This value can be any value lower than the session timeout, but should be no +// higher than 1/3rd the session timeout. +// +// This corresponds to Kafka's heartbeat.interval.ms. +func HeartbeatInterval(interval time.Duration) GroupOpt { + return groupOpt{func(cfg *cfg) { cfg.heartbeatInterval = interval }} +} + +// RequireStableFetchOffsets sets the group consumer to require "stable" fetch +// offsets before consuming from the group. Proposed in KIP-447 and introduced +// in Kafka 2.5, stable offsets are important when consuming from partitions +// that a transactional producer could be committing to. +// +// With this option, Kafka will block group consumers from fetching offsets for +// partitions that are in an active transaction. This option is **strongly** +// recommended to help prevent duplication problems. See this repo's KIP-447 +// doc to learn more. +// +// Because this can block consumption, it is strongly recommended to set +// transactional timeouts to a small value (10s) rather than the default 60s. +// Lowering the transactional timeout will reduce the chance that consumers are +// entirely blocked. +func RequireStableFetchOffsets() GroupOpt { + return groupOpt{func(cfg *cfg) { cfg.requireStable = true }} +} + +// BlockRebalanceOnPoll switches the client to block rebalances whenever you +// poll until you explicitly call AllowRebalance. This option also ensures that +// any OnPartitions{Assigned,Revoked,Lost} callbacks are only called when you +// allow rebalances; they cannot be called if you have polled and are +// processing records. +// +// By default, a consumer group is managed completely independently of +// consuming. A rebalance may occur at any moment. If you poll records, and +// then a rebalance happens, and then you commit, you may be committing to +// partitions you no longer own. This will result in duplicates. In the worst +// case, you could rewind commits that a different member has already made +// (risking duplicates if another rebalance were to happen before that other +// member commits again). +// +// By blocking rebalancing after you poll until you call AllowRebalances, you +// can be sure that you commit records that your member currently owns. +// However, the big tradeoff is that by blocking rebalances, you put your group +// member at risk of waiting so long that the group member is kicked from the +// group because it exceeded the rebalance timeout. To compare clients, Sarama +// takes the default choice of blocking rebalancing; this option makes kgo more +// similar to Sarama. +// +// If you use this option, you should ensure that you always process records +// quickly, and that your OnPartitions{Assigned,Revoked,Lost} callbacks are +// fast. It is recommended you also use PollRecords rather than PollFetches so +// that you can bound how many records you process at once. You must always +// AllowRebalances when you are done processing the records you received. Only +// rebalances that lose partitions are blocked; rebalances that are strictly +// net additions or non-modifications do not block (the On callbacks are always +// blocked so that you can ensure their serialization). +// +// This function can largely replace any commit logic you may want to do in +// OnPartitionsRevoked. +// +// Lastly, note that this actually blocks any rebalance from calling +// OnPartitions{Assigned,Revoked,Lost}. If you are using a cooperative +// rebalancer such as CooperativeSticky, a rebalance can begin right before you +// poll, and you will still receive records because no partitions are lost yet. +// The in-progress rebalance only blocks if you are assigned new partitions or +// if any of your partitions are revoked. +func BlockRebalanceOnPoll() GroupOpt { + return groupOpt{func(cfg *cfg) { cfg.blockRebalanceOnPoll = true }} +} + +// AdjustFetchOffsetsFn sets the function to be called when a group is joined +// after offsets are fetched so that a user can adjust offsets before +// consumption begins. +// +// This function should not exceed the rebalance interval. It is possible +// for the group, immediately after finishing a balance, to re-enter a new balancing +// session. This function is passed a context that is canceled if the current group +// session finishes (i.e., after revoking). +// +// If you are resetting the position of the offset, you may want to clear any existing +// "epoch" with WithEpoch(-1). If the epoch is non-negative, the client performs +// data loss detection, which may result in errors and unexpected behavior. +// +// This function is called after OnPartitionsAssigned and may be called before +// or after OnPartitionsRevoked. +func AdjustFetchOffsetsFn(adjustOffsetsBeforeAssign func(context.Context, map[string]map[int32]Offset) (map[string]map[int32]Offset, error)) GroupOpt { + return groupOpt{func(cfg *cfg) { cfg.adjustOffsetsBeforeAssign = adjustOffsetsBeforeAssign }} +} + +// OnPartitionsAssigned sets the function to be called when a group is joined +// after partitions are assigned before fetches for those partitions begin. +// +// This function, combined with OnPartitionsRevoked, should not exceed the +// rebalance interval. It is possible for the group to re-enter a new balancing +// session immediately after finishing a balance. +// +// This function is passed the client's context, which is only canceled if the +// client is closed. +// +// This function is not called concurrent with any other OnPartitions callback, +// and this function is given a new map that the user is free to modify. +// +// This function can be called at any time you are polling or processing +// records. If you want to ensure this function is called serially with +// processing, consider the BlockRebalanceOnPoll option. +func OnPartitionsAssigned(onAssigned func(context.Context, *Client, map[string][]int32)) GroupOpt { + return groupOpt{func(cfg *cfg) { cfg.onAssigned, cfg.setAssigned = onAssigned, true }} +} + +// OnPartitionsRevoked sets the function to be called once this group member +// has partitions revoked. +// +// This function, combined with [OnPartitionsAssigned], should not exceed the +// rebalance interval. It is possible for the group to re-enter a new balancing +// session immediately after finishing a balance. +// +// If autocommit is enabled, the default OnPartitionsRevoked is a blocking +// commit of all non-dirty offsets (where "dirty" is the most recent poll). +// +// The OnPartitionsRevoked function is passed the client's context, which is +// only canceled if the client is closed. OnPartitionsRevoked function is +// called at the end of a group session even if there are no partitions being +// revoked. If you are committing offsets manually (have disabled +// autocommitting), it is highly recommended to do a proper blocking commit in +// OnPartitionsRevoked. +// +// This function is not called concurrent with any other OnPartitions callback, +// and this function is given a new map that the user is free to modify. +// +// This function can be called at any time you are polling or processing +// records. If you want to ensure this function is called serially with +// processing, consider the BlockRebalanceOnPoll option. +// +// This function is called if a "fatal" group error is encountered and you have +// not set [OnPartitionsLost]. See OnPartitionsLost for more details. +func OnPartitionsRevoked(onRevoked func(context.Context, *Client, map[string][]int32)) GroupOpt { + return groupOpt{func(cfg *cfg) { cfg.onRevoked, cfg.setRevoked = onRevoked, true }} +} + +// OnPartitionsLost sets the function to be called on "fatal" group errors, +// such as IllegalGeneration, UnknownMemberID, and authentication failures. +// This function differs from [OnPartitionsRevoked] in that it is unlikely that +// commits will succeed when partitions are outright lost, whereas commits +// likely will succeed when revoking partitions. +// +// Because this function is called on any fatal group error, it is possible for +// this function to be called without the group ever being joined. +// +// This function is not called concurrent with any other OnPartitions callback, +// and this function is given a new map that the user is free to modify. +// +// This function can be called at any time you are polling or processing +// records. If you want to ensure this function is called serially with +// processing, consider the BlockRebalanceOnPoll option. +func OnPartitionsLost(onLost func(context.Context, *Client, map[string][]int32)) GroupOpt { + return groupOpt{func(cfg *cfg) { cfg.onLost, cfg.setLost = onLost, true }} +} + +// OnOffsetsFetched sets a function to be called after offsets have been +// fetched after a group has been balanced. This function is meant to allow +// users to inspect offset commit metadata. An error can be returned to exit +// this group session and exit back to join group. +// +// This function should not exceed the rebalance interval. It is possible for +// the group, immediately after finishing a balance, to re-enter a new +// balancing session. This function is passed a context that is canceled if the +// current group session finishes (i.e., after revoking). +// +// This function is called after OnPartitionsAssigned and may be called before +// or after OnPartitionsRevoked. +func OnOffsetsFetched(onFetched func(context.Context, *Client, *kmsg.OffsetFetchResponse) error) GroupOpt { + return groupOpt{func(cfg *cfg) { cfg.onFetched = onFetched }} +} + +// DisableAutoCommit disable auto committing. +// +// If you disable autocommitting, you may want to use a custom +// OnPartitionsRevoked, otherwise you may end up doubly processing records +// (which is fine, just leads to duplicate processing). Consider the scenario: +// you, member A, are processing partition 0, and previously committed offset 4 +// and have now locally processed through offset 30. A rebalance happens, and +// partition 0 moves to member B. If you use OnPartitionsRevoked, you can +// detect that you are losing this partition and commit your work through +// offset 30, so that member B can start processing at offset 30. If you do not +// commit (i.e. you do not use a custom OnPartitionsRevoked), the other member +// will start processing at offset 4. It may process through offset 50, leading +// to double processing of offsets 4 through 29. Worse, you, member A, can +// rewind member B's commit, because member B may commit offset 50 and you may +// finally eventually commit offset 30. If a rebalance happens, then even more +// duplicate processing will occur of offsets 30 through 49. +// +// Again, OnPartitionsRevoked is not necessary, and not using it just means +// double processing, which for most workloads is fine since a simple group +// consumer is not EOS / transactional, only at-least-once. But, this is +// something to be aware of. +func DisableAutoCommit() GroupOpt { + return groupOpt{func(cfg *cfg) { cfg.autocommitDisable = true }} +} + +// GreedyAutoCommit opts into committing everything that has been polled when +// autocommitting (the dirty offsets), rather than committing what has +// previously been polled. This option may result in message loss if your +// application crashes. +func GreedyAutoCommit() GroupOpt { + return groupOpt{func(cfg *cfg) { cfg.autocommitGreedy = true }} +} + +// AutoCommitInterval sets how long to go between autocommits, overriding the +// default 5s. +func AutoCommitInterval(interval time.Duration) GroupOpt { + return groupOpt{func(cfg *cfg) { cfg.autocommitInterval = interval }} +} + +// AutoCommitMarks switches the autocommitting behavior to only commit "marked" +// records, which can be done with the MarkCommitRecords method. +// +// This option is basically a halfway point between autocommitting and manually +// committing. If you have slow batch processing of polls, then you can +// manually mark records to be autocommitted before you poll again. This way, +// if you usually take a long time between polls, your partial work can still +// be automatically checkpointed through autocommitting. +func AutoCommitMarks() GroupOpt { + return groupOpt{func(cfg *cfg) { cfg.autocommitMarks = true }} +} + +// InstanceID sets the group consumer's instance ID, switching the group member +// from "dynamic" to "static". +// +// Prior to Kafka 2.3, joining a group gave a group member a new member ID. +// The group leader could not tell if this was a rejoining member. Thus, any +// join caused the group to rebalance. +// +// Kafka 2.3 introduced the concept of an instance ID, which can persist across +// restarts. This allows for avoiding many costly rebalances and allows for +// stickier rebalancing for rejoining members (since the ID for balancing stays +// the same). The main downsides are that you, the user of a client, have to +// manage instance IDs properly, and that it may take longer to rebalance in +// the event that a client legitimately dies. +// +// When using an instance ID, the client does NOT send a leave group request +// when closing. This allows for the client to restart with the same instance +// ID and rejoin the group to avoid a rebalance. It is strongly recommended to +// increase the session timeout enough to allow time for the restart (remember +// that the default session timeout is 10s). +// +// To actually leave the group, you must use an external admin command that +// issues a leave group request on behalf of this instance ID (see kcl), or you +// can manually use the kmsg package with a proper LeaveGroupRequest. +// +// NOTE: Leaving a group with an instance ID is only supported in Kafka 2.4+. +// +// NOTE: If you restart a consumer group leader that is using an instance ID, +// it will not cause a rebalance even if you change which topics the leader is +// consuming. If your cluster is 3.2+, this client internally works around this +// limitation and you do not need to trigger a rebalance manually. +func InstanceID(id string) GroupOpt { + return groupOpt{func(cfg *cfg) { cfg.instanceID = &id }} +} + +// GroupProtocol sets the group's join protocol, overriding the default value +// "consumer". The only reason to override this is if you are implementing +// custom join and sync group logic. +func GroupProtocol(protocol string) GroupOpt { + return groupOpt{func(cfg *cfg) { cfg.protocol = protocol }} +} + +// AutoCommitCallback sets the callback to use if autocommitting is enabled. +// This overrides the default callback that logs errors and continues. +func AutoCommitCallback(fn func(*Client, *kmsg.OffsetCommitRequest, *kmsg.OffsetCommitResponse, error)) GroupOpt { + return groupOpt{func(cfg *cfg) { + if fn != nil { + cfg.commitCallback, cfg.setCommitCallback = fn, true + } + }} +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/consumer.go b/vendor/github.com/twmb/franz-go/pkg/kgo/consumer.go new file mode 100644 index 00000000000..60222497524 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/consumer.go @@ -0,0 +1,2389 @@ +package kgo + +import ( + "context" + "errors" + "fmt" + "math" + "sort" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" +) + +// Offset is a message offset in a partition. +type Offset struct { + at int64 + relative int64 + epoch int32 + + currentEpoch int32 // set by us when mapping offsets to brokers + + noReset bool + afterMilli bool +} + +// Random negative, only significant within this package. +const atCommitted = -999 + +// MarshalJSON implements json.Marshaler. +func (o Offset) MarshalJSON() ([]byte, error) { + if o.relative == 0 { + return []byte(fmt.Sprintf(`{"At":%d,"Epoch":%d,"CurrentEpoch":%d}`, o.at, o.epoch, o.currentEpoch)), nil + } + return []byte(fmt.Sprintf(`{"At":%d,"Relative":%d,"Epoch":%d,"CurrentEpoch":%d}`, o.at, o.relative, o.epoch, o.currentEpoch)), nil +} + +// String returns the offset as a string; the purpose of this is for logs. +func (o Offset) String() string { + if o.relative == 0 { + return fmt.Sprintf("{%d e%d ce%d}", o.at, o.epoch, o.currentEpoch) + } else if o.relative > 0 { + return fmt.Sprintf("{%d+%d e%d ce%d}", o.at, o.relative, o.epoch, o.currentEpoch) + } + return fmt.Sprintf("{%d-%d e%d ce%d}", o.at, -o.relative, o.epoch, o.currentEpoch) +} + +// EpochOffset returns this offset as an EpochOffset, allowing visibility into +// what this offset actually currently is. +func (o Offset) EpochOffset() EpochOffset { + return EpochOffset{ + Epoch: o.epoch, + Offset: o.at, + } +} + +// NewOffset creates and returns an offset to use in [ConsumePartitions] or +// [ConsumeResetOffset]. +// +// The default offset begins at the end. +func NewOffset() Offset { + return Offset{ + at: -1, + epoch: -1, + } +} + +// NoResetOffset returns an offset that can be used as a "none" option for the +// [ConsumeResetOffset] option. By default, NoResetOffset starts consuming from +// the beginning of partitions (similar to NewOffset().AtStart()). This can be +// changed with AtEnd, Relative, etc. +// +// Using this offset will make it such that if OffsetOutOfRange is ever +// encountered while consuming, rather than trying to recover, the client will +// return the error to the user and enter a fatal state (for the affected +// partition). +func NoResetOffset() Offset { + return Offset{ + at: -1, + epoch: -1, + noReset: true, + } +} + +// AfterMilli returns an offset that consumes from the first offset after a +// given timestamp. This option is *not* compatible with any At options (nor +// Relative nor WithEpoch); using any of those will clear the special +// millisecond state. +// +// This option can be used to consume at the end of existing partitions, but at +// the start of any new partitions that are created later: +// +// AfterMilli(time.Now().UnixMilli()) +// +// By default when using this offset, if consuming encounters an +// OffsetOutOfRange error, consuming will reset to the first offset after this +// timestamp. You can use NoResetOffset().AfterMilli(...) to instead switch the +// client to a fatal state (for the affected partition). +func (o Offset) AfterMilli(millisec int64) Offset { + o.at = millisec + o.relative = 0 + o.epoch = -1 + o.afterMilli = true + return o +} + +// AtStart copies 'o' and returns an offset starting at the beginning of a +// partition. +func (o Offset) AtStart() Offset { + o.afterMilli = false + o.at = -2 + return o +} + +// AtEnd copies 'o' and returns an offset starting at the end of a partition. +// If you want to consume at the end of the topic as it exists right now, but +// at the beginning of new partitions as they are added to the topic later, +// check out AfterMilli. +func (o Offset) AtEnd() Offset { + o.afterMilli = false + o.at = -1 + return o +} + +// AtCommitted copies 'o' and returns an offset that is used *only if* +// there is an existing commit. This is only useful for group consumers. +// If a partition being consumed does not have a commit, the partition will +// enter a fatal state and return an error from PollFetches. +// +// Using this function automatically opts into [NoResetOffset]. +func (o Offset) AtCommitted() Offset { + o.noReset = true + o.afterMilli = false + o.at = atCommitted + return o +} + +// Relative copies 'o' and returns an offset that starts 'n' relative to what +// 'o' currently is. If 'o' is at the end (from [AtEnd]), Relative(-100) will +// begin 100 before the end. +func (o Offset) Relative(n int64) Offset { + o.afterMilli = false + o.relative = n + return o +} + +// WithEpoch copies 'o' and returns an offset with the given epoch. to use the +// given epoch. This epoch is used for truncation detection; the default of -1 +// implies no truncation detection. +func (o Offset) WithEpoch(e int32) Offset { + o.afterMilli = false + if e < 0 { + e = -1 + } + o.epoch = e + return o +} + +// At returns a copy of the calling offset, changing the returned offset to +// begin at exactly the requested offset. +// +// There are two potential special offsets to use: -2 allows for consuming at +// the start, and -1 allows for consuming at the end. These two offsets are +// equivalent to calling AtStart or AtEnd. +// +// If the offset is less than -2, the client bounds it to -2 to consume at the +// start. +func (o Offset) At(at int64) Offset { + o.afterMilli = false + if at < -2 { + at = -2 + } + o.at = at + return o +} + +type consumer struct { + bufferedRecords atomicI64 + bufferedBytes atomicI64 + + cl *Client + + pausedMu sync.Mutex // grabbed when updating paused + paused atomic.Value // loaded when issuing fetches + + // mu is grabbed when + // - polling fetches, for quickly draining sources / updating group uncommitted + // - calling assignPartitions (group / direct updates) + mu sync.Mutex + d *directConsumer // if non-nil, we are consuming partitions directly + g *groupConsumer // if non-nil, we are consuming as a group member + + // On metadata update, if the consumer is set (direct or group), the + // client begins a goroutine that updates the consumer kind's + // assignments. + // + // This is done in a goroutine to not block the metadata loop, because + // the update **could** wait on a group consumer leaving if a + // concurrent LeaveGroup is called, or if restarting a session takes + // just a little bit of time. + // + // The update realistically should be instantaneous, but if it is slow, + // some metadata updates could pile up. We loop with our atomic work + // loop, which collapses repeated updates into one extra update, so we + // loop as little as necessary. + outstandingMetadataUpdates workLoop + + // sessionChangeMu is grabbed when a session is stopped and held through + // when a session can be started again. The sole purpose is to block an + // assignment change running concurrently with a metadata update. + sessionChangeMu sync.Mutex + + session atomic.Value // *consumerSession + kill atomic.Bool + + usingCursors usedCursors + + sourcesReadyMu sync.Mutex + sourcesReadyCond *sync.Cond + sourcesReadyForDraining []*source + fakeReadyForDraining []Fetch + + pollWaitMu sync.Mutex + pollWaitC *sync.Cond + pollWaitState uint64 // 0 == nothing, low 32 bits: # pollers, high 32: # waiting rebalances +} + +func (c *consumer) loadPaused() pausedTopics { return c.paused.Load().(pausedTopics) } +func (c *consumer) clonePaused() pausedTopics { return c.paused.Load().(pausedTopics).clone() } +func (c *consumer) storePaused(p pausedTopics) { c.paused.Store(p) } + +func (c *consumer) waitAndAddPoller() { + if !c.cl.cfg.blockRebalanceOnPoll { + return + } + c.pollWaitMu.Lock() + defer c.pollWaitMu.Unlock() + for c.pollWaitState>>32 != 0 { + c.pollWaitC.Wait() + } + // Rebalance always takes priority, but if there are no active + // rebalances, our poll blocks rebalances. + c.pollWaitState++ +} + +func (c *consumer) unaddPoller() { + if !c.cl.cfg.blockRebalanceOnPoll { + return + } + c.pollWaitMu.Lock() + defer c.pollWaitMu.Unlock() + c.pollWaitState-- + c.pollWaitC.Broadcast() +} + +func (c *consumer) allowRebalance() { + if !c.cl.cfg.blockRebalanceOnPoll { + return + } + c.pollWaitMu.Lock() + defer c.pollWaitMu.Unlock() + // When allowing rebalances, the user is explicitly saying all pollers + // are done. We mask them out. + c.pollWaitState &= math.MaxUint32 << 32 + c.pollWaitC.Broadcast() +} + +func (c *consumer) waitAndAddRebalance() { + if !c.cl.cfg.blockRebalanceOnPoll { + return + } + c.pollWaitMu.Lock() + defer c.pollWaitMu.Unlock() + c.pollWaitState += 1 << 32 + for c.pollWaitState&math.MaxUint32 != 0 { + c.pollWaitC.Wait() + } +} + +func (c *consumer) unaddRebalance() { + if !c.cl.cfg.blockRebalanceOnPoll { + return + } + c.pollWaitMu.Lock() + defer c.pollWaitMu.Unlock() + c.pollWaitState -= 1 << 32 + c.pollWaitC.Broadcast() +} + +// BufferedFetchRecords returns the number of records currently buffered from +// fetching within the client. +// +// This can be used as a gauge to determine how behind your application is for +// processing records the client has fetched. Note that it is perfectly normal +// to see a spike of buffered records, which would correspond to a fetch +// response being processed just before a call to this function. It is only +// problematic if for you if this function is consistently returning large +// values. +func (cl *Client) BufferedFetchRecords() int64 { + return cl.consumer.bufferedRecords.Load() +} + +// BufferedFetchBytes returns the number of bytes currently buffered from +// fetching within the client. This is the sum of all keys, values, and header +// keys/values. See the related [BufferedFetchRecords] for more information. +func (cl *Client) BufferedFetchBytes() int64 { + return cl.consumer.bufferedBytes.Load() +} + +type usedCursors map[*cursor]struct{} + +func (u *usedCursors) use(c *cursor) { + if *u == nil { + *u = make(map[*cursor]struct{}) + } + (*u)[c] = struct{}{} +} + +func (c *consumer) init(cl *Client) { + c.cl = cl + c.paused.Store(make(pausedTopics)) + c.sourcesReadyCond = sync.NewCond(&c.sourcesReadyMu) + c.pollWaitC = sync.NewCond(&c.pollWaitMu) + + if len(cl.cfg.topics) > 0 || len(cl.cfg.partitions) > 0 { + defer cl.triggerUpdateMetadataNow("querying metadata for consumer initialization") // we definitely want to trigger a metadata update + } + + if len(cl.cfg.group) == 0 { + c.initDirect() + } else { + c.initGroup() + } +} + +func (c *consumer) consuming() bool { + return c.g != nil || c.d != nil +} + +// addSourceReadyForDraining tracks that a source needs its buffered fetch +// consumed. +func (c *consumer) addSourceReadyForDraining(source *source) { + c.sourcesReadyMu.Lock() + c.sourcesReadyForDraining = append(c.sourcesReadyForDraining, source) + c.sourcesReadyMu.Unlock() + c.sourcesReadyCond.Broadcast() +} + +// addFakeReadyForDraining saves a fake fetch that has important partition +// errors--data loss or auth failures. +func (c *consumer) addFakeReadyForDraining(topic string, partition int32, err error, why string) { + c.cl.cfg.logger.Log(LogLevelInfo, "injecting fake fetch with an error", "err", err, "why", why) + c.sourcesReadyMu.Lock() + c.fakeReadyForDraining = append(c.fakeReadyForDraining, Fetch{Topics: []FetchTopic{{ + Topic: topic, + Partitions: []FetchPartition{{ + Partition: partition, + Err: err, + }}, + }}}) + c.sourcesReadyMu.Unlock() + c.sourcesReadyCond.Broadcast() +} + +// NewErrFetch returns a fake fetch containing a single empty topic with a +// single zero partition with the given error. +func NewErrFetch(err error) Fetches { + return []Fetch{{ + Topics: []FetchTopic{{ + Topic: "", + Partitions: []FetchPartition{{ + Partition: -1, + Err: err, + }}, + }}, + }} +} + +// PollFetches waits for fetches to be available, returning as soon as any +// broker returns a fetch. If the context is nil, this function will return +// immediately with any currently buffered records. +// +// If the client is closed, a fake fetch will be injected that has no topic, a +// partition of 0, and a partition error of ErrClientClosed. If the context is +// canceled, a fake fetch will be injected with ctx.Err. These injected errors +// can be used to break out of a poll loop. +// +// It is important to check all partition errors in the returned fetches. If +// any partition has a fatal error and actually had no records, fake fetch will +// be injected with the error. +// +// If you are group consuming, a rebalance can happen under the hood while you +// process the returned fetches. This can result in duplicate work, and you may +// accidentally commit to partitions that you no longer own. You can prevent +// this by using BlockRebalanceOnPoll, but this comes with different tradeoffs. +// See the documentation on BlockRebalanceOnPoll for more information. +func (cl *Client) PollFetches(ctx context.Context) Fetches { + return cl.PollRecords(ctx, 0) +} + +// PollRecords waits for records to be available, returning as soon as any +// broker returns records in a fetch. If the context is nil, this function will +// return immediately with any currently buffered records. +// +// If the client is closed, a fake fetch will be injected that has no topic, a +// partition of -1, and a partition error of ErrClientClosed. If the context is +// canceled, a fake fetch will be injected with ctx.Err. These injected errors +// can be used to break out of a poll loop. +// +// This returns a maximum of maxPollRecords total across all fetches, or +// returns all buffered records if maxPollRecords is <= 0. +// +// It is important to check all partition errors in the returned fetches. If +// any partition has a fatal error and actually had no records, fake fetch will +// be injected with the error. +// +// If you are group consuming, a rebalance can happen under the hood while you +// process the returned fetches. This can result in duplicate work, and you may +// accidentally commit to partitions that you no longer own. You can prevent +// this by using BlockRebalanceOnPoll, but this comes with different tradeoffs. +// See the documentation on BlockRebalanceOnPoll for more information. +func (cl *Client) PollRecords(ctx context.Context, maxPollRecords int) Fetches { + if maxPollRecords == 0 { + maxPollRecords = -1 + } + c := &cl.consumer + + c.g.undirtyUncommitted() + + // If the user gave us a canceled context, we bail immediately after + // un-dirty-ing marked records. + if ctx != nil { + select { + case <-ctx.Done(): + return NewErrFetch(ctx.Err()) + default: + } + } + + var fetches Fetches + fill := func() { + if c.cl.cfg.blockRebalanceOnPoll { + c.waitAndAddPoller() + defer func() { + if len(fetches) == 0 { + c.unaddPoller() + } + }() + } + + paused := c.loadPaused() + + // A group can grab the consumer lock then the group mu and + // assign partitions. The group mu is grabbed to update its + // uncommitted map. Assigning partitions clears sources ready + // for draining. + // + // We need to grab the consumer mu to ensure proper lock + // ordering and prevent lock inversion. Polling fetches also + // updates the group's uncommitted map; if we do not grab the + // consumer mu at the top, we have a problem: without the lock, + // we could have grabbed some sources, then a group assigned, + // and after the assign, we update uncommitted with fetches + // from the old assignment + c.mu.Lock() + defer c.mu.Unlock() + + c.sourcesReadyMu.Lock() + if maxPollRecords < 0 { + for _, ready := range c.sourcesReadyForDraining { + fetches = append(fetches, ready.takeBuffered(paused)) + } + c.sourcesReadyForDraining = nil + } else { + for len(c.sourcesReadyForDraining) > 0 && maxPollRecords > 0 { + source := c.sourcesReadyForDraining[0] + fetch, taken, drained := source.takeNBuffered(paused, maxPollRecords) + if drained { + c.sourcesReadyForDraining = c.sourcesReadyForDraining[1:] + } + maxPollRecords -= taken + fetches = append(fetches, fetch) + } + } + + realFetches := fetches + + fetches = append(fetches, c.fakeReadyForDraining...) + c.fakeReadyForDraining = nil + + c.sourcesReadyMu.Unlock() + + if len(realFetches) == 0 { + return + } + + // Before returning, we want to update our uncommitted. If we + // updated after, then we could end up with weird interactions + // with group invalidations where we return a stale fetch after + // committing in onRevoke. + // + // A blocking onRevoke commit, on finish, allows a new group + // session to start. If we returned stale fetches that did not + // have their uncommitted offset tracked, then we would allow + // duplicates. + if c.g != nil { + c.g.updateUncommitted(realFetches) + } + } + + // We try filling fetches once before waiting. If we have no context, + // we guarantee that we just drain anything available and return. + fill() + if len(fetches) > 0 || ctx == nil { + return fetches + } + + done := make(chan struct{}) + quit := false + go func() { + c.sourcesReadyMu.Lock() + defer c.sourcesReadyMu.Unlock() + defer close(done) + + for !quit && len(c.sourcesReadyForDraining) == 0 && len(c.fakeReadyForDraining) == 0 { + c.sourcesReadyCond.Wait() + } + }() + + exit := func() { + c.sourcesReadyMu.Lock() + quit = true + c.sourcesReadyMu.Unlock() + c.sourcesReadyCond.Broadcast() + } + + select { + case <-cl.ctx.Done(): + exit() + return NewErrFetch(ErrClientClosed) + case <-ctx.Done(): + exit() + return NewErrFetch(ctx.Err()) + case <-done: + } + + fill() + return fetches +} + +// AllowRebalance allows a consumer group to rebalance if it was blocked by you +// polling records in tandem with the BlockRebalanceOnPoll option. +// +// You can poll many times before calling this function; this function +// internally resets the poll count and allows any blocked rebalances to +// continue. Rebalances take priority: if a rebalance is blocked, and you allow +// rebalances and then immediately poll, your poll will be blocked until the +// rebalance completes. Internally, this function simply waits for lost +// partitions to stop being fetched before allowing you to poll again. +func (cl *Client) AllowRebalance() { + cl.consumer.allowRebalance() +} + +// UpdateFetchMaxBytes updates the max bytes that a fetch request will ask for +// and the max partition bytes that a fetch request will ask for each +// partition. +func (cl *Client) UpdateFetchMaxBytes(maxBytes, maxPartBytes int32) { + cl.cfg.maxBytes.store(maxBytes) + cl.cfg.maxPartBytes.store(maxPartBytes) +} + +// PauseFetchTopics sets the client to no longer fetch the given topics and +// returns all currently paused topics. Paused topics persist until resumed. +// You can call this function with no topics to simply receive the list of +// currently paused topics. +// +// Pausing topics is independent from pausing individual partitions with the +// PauseFetchPartitions method. If you pause partitions for a topic with +// PauseFetchPartitions, and then pause that same topic with PauseFetchTopics, +// the individually paused partitions will not be unpaused if you only call +// ResumeFetchTopics. +func (cl *Client) PauseFetchTopics(topics ...string) []string { + c := &cl.consumer + if len(topics) == 0 { + return c.loadPaused().pausedTopics() + } + c.pausedMu.Lock() + defer c.pausedMu.Unlock() + paused := c.clonePaused() + paused.addTopics(topics...) + c.storePaused(paused) + return paused.pausedTopics() +} + +// PauseFetchPartitions sets the client to no longer fetch the given partitions +// and returns all currently paused partitions. Paused partitions persist until +// resumed. You can call this function with no partitions to simply receive the +// list of currently paused partitions. +// +// Pausing individual partitions is independent from pausing topics with the +// PauseFetchTopics method. If you pause partitions for a topic with +// PauseFetchPartitions, and then pause that same topic with PauseFetchTopics, +// the individually paused partitions will not be unpaused if you only call +// ResumeFetchTopics. +func (cl *Client) PauseFetchPartitions(topicPartitions map[string][]int32) map[string][]int32 { + c := &cl.consumer + if len(topicPartitions) == 0 { + return c.loadPaused().pausedPartitions() + } + c.pausedMu.Lock() + defer c.pausedMu.Unlock() + paused := c.clonePaused() + paused.addPartitions(topicPartitions) + c.storePaused(paused) + return paused.pausedPartitions() +} + +// ResumeFetchTopics resumes fetching the input topics if they were previously +// paused. Resuming topics that are not currently paused is a per-topic no-op. +// See the documentation on PauseFetchTopics for more details. +func (cl *Client) ResumeFetchTopics(topics ...string) { + defer cl.allSinksAndSources(func(sns sinkAndSource) { + sns.source.maybeConsume() + }) + + c := &cl.consumer + c.pausedMu.Lock() + defer c.pausedMu.Unlock() + + paused := c.clonePaused() + paused.delTopics(topics...) + c.storePaused(paused) +} + +// ResumeFetchPartitions resumes fetching the input partitions if they were +// previously paused. Resuming partitions that are not currently paused is a +// per-topic no-op. See the documentation on PauseFetchPartitions for more +// details. +func (cl *Client) ResumeFetchPartitions(topicPartitions map[string][]int32) { + defer cl.allSinksAndSources(func(sns sinkAndSource) { + sns.source.maybeConsume() + }) + + c := &cl.consumer + c.pausedMu.Lock() + defer c.pausedMu.Unlock() + + paused := c.clonePaused() + paused.delPartitions(topicPartitions) + c.storePaused(paused) +} + +// SetOffsets sets any matching offsets in setOffsets to the given +// epoch/offset. Partitions that are not specified are not set. It is invalid +// to set topics that were not yet returned from a PollFetches: this function +// sets only partitions that were previously consumed, any extra partitions are +// skipped. +// +// If directly consuming, this function operates as expected given the caveats +// of the prior paragraph. +// +// If using transactions, it is advised to just use a GroupTransactSession and +// avoid this function entirely. +// +// If using group consuming, It is strongly recommended to use this function +// outside of the context of a PollFetches loop and only when you know the +// group is not revoked (i.e., block any concurrent revoke while issuing this +// call) and to not use this concurrent with committing. Any other usage is +// prone to odd interactions. +func (cl *Client) SetOffsets(setOffsets map[string]map[int32]EpochOffset) { + cl.setOffsets(setOffsets, true) +} + +func (cl *Client) setOffsets(setOffsets map[string]map[int32]EpochOffset, log bool) { + if len(setOffsets) == 0 { + return + } + + // We assignPartitions before returning, so we grab the consumer lock + // first to preserve consumer mu => group mu ordering, or to ensure + // no concurrent metadata assign for direct consuming. + c := &cl.consumer + c.mu.Lock() + defer c.mu.Unlock() + + var assigns map[string]map[int32]Offset + var tps *topicsPartitions + switch { + case c.d != nil: + assigns = c.d.getSetAssigns(setOffsets) + tps = c.d.tps + case c.g != nil: + assigns = c.g.getSetAssigns(setOffsets) + tps = c.g.tps + } + if len(assigns) == 0 { + return + } + if log { + c.assignPartitions(assigns, assignSetMatching, tps, "from manual SetOffsets") + } else { + c.assignPartitions(assigns, assignSetMatching, tps, "") + } +} + +// This is guaranteed to be called in a blocking metadata fn, which ensures +// that metadata does not load the tps we are changing. Basically, we ensure +// everything w.r.t. consuming is at a stand still. +func (c *consumer) purgeTopics(topics []string) { + if c.g == nil && c.d == nil { + return + } + + purgeAssignments := make(map[string]map[int32]Offset, len(topics)) + for _, topic := range topics { + purgeAssignments[topic] = nil + } + + c.waitAndAddRebalance() + defer c.unaddRebalance() + + c.mu.Lock() + defer c.mu.Unlock() + + // The difference for groups is we need to lock the group and there is + // a slight type difference in g.using vs d.using. + if c.g != nil { + c.g.mu.Lock() + defer c.g.mu.Unlock() + c.assignPartitions(purgeAssignments, assignPurgeMatching, c.g.tps, fmt.Sprintf("purge of %v requested", topics)) + for _, topic := range topics { + delete(c.g.using, topic) + delete(c.g.reSeen, topic) + } + c.g.rejoin("rejoin from PurgeFetchTopics") + } else { + c.assignPartitions(purgeAssignments, assignPurgeMatching, c.d.tps, fmt.Sprintf("purge of %v requested", topics)) + for _, topic := range topics { + delete(c.d.using, topic) + delete(c.d.reSeen, topic) + delete(c.d.m, topic) + } + } +} + +// AddConsumeTopics adds new topics to be consumed. This function is a no-op if +// the client is configured to consume via regex. +// +// Note that if you are directly consuming and specified ConsumePartitions, +// this function will not add the rest of the partitions for a topic unless the +// topic has been previously purged. That is, if you directly consumed only one +// of five partitions originally, this will not add the other four until the +// entire topic is purged. +func (cl *Client) AddConsumeTopics(topics ...string) { + c := &cl.consumer + if len(topics) == 0 || c.g == nil && c.d == nil || cl.cfg.regex { + return + } + + // We can do this outside of the metadata loop because we are strictly + // adding new topics and forbid regex consuming. + c.mu.Lock() + defer c.mu.Unlock() + + if c.g != nil { + c.g.tps.storeTopics(topics) + } else { + c.d.tps.storeTopics(topics) + for _, topic := range topics { + c.d.m.addt(topic) + } + } + cl.triggerUpdateMetadataNow("from AddConsumeTopics") +} + +// GetConsumeTopics retrives a list of current topics being consumed. +func (cl *Client) GetConsumeTopics() []string { + c := &cl.consumer + if c.g == nil && c.d == nil { + return nil + } + var m map[string]*topicPartitions + var ok bool + if c.g != nil { + m, ok = c.g.tps.v.Load().(topicsPartitionsData) + } else { + m, ok = c.d.tps.v.Load().(topicsPartitionsData) + } + if !ok { + return nil + } + topics := make([]string, 0, len(m)) + for k := range m { + topics = append(topics, k) + } + return topics +} + +// AddConsumePartitions adds new partitions to be consumed at the given +// offsets. This function works only for direct, non-regex consumers. +func (cl *Client) AddConsumePartitions(partitions map[string]map[int32]Offset) { + c := &cl.consumer + if c.d == nil || cl.cfg.regex { + return + } + var topics []string + for t, ps := range partitions { + if len(ps) == 0 { + delete(partitions, t) + continue + } + topics = append(topics, t) + } + if len(partitions) == 0 { + return + } + + c.mu.Lock() + defer c.mu.Unlock() + + c.d.tps.storeTopics(topics) + for t, ps := range partitions { + if c.d.ps[t] == nil { + c.d.ps[t] = make(map[int32]Offset) + } + for p, o := range ps { + c.d.m.add(t, p) + c.d.ps[t][p] = o + } + } + cl.triggerUpdateMetadataNow("from AddConsumePartitions") +} + +// RemoveConsumePartitions removes partitions from being consumed. This +// function works only for direct, non-regex consumers. +// +// This method does not purge the concept of any topics from the client -- if +// you remove all partitions from a topic that was being consumed, metadata +// fetches will still occur for the topic. If you want to remove the topic +// entirely, use PurgeTopicsFromClient. +// +// If you specified ConsumeTopics and this function removes all partitions for +// a topic, the topic will no longer be consumed. +func (cl *Client) RemoveConsumePartitions(partitions map[string][]int32) { + c := &cl.consumer + if c.d == nil || cl.cfg.regex { + return + } + for t, ps := range partitions { + if len(ps) == 0 { + delete(partitions, t) + continue + } + } + if len(partitions) == 0 { + return + } + + c.mu.Lock() + defer c.mu.Unlock() + + removeOffsets := make(map[string]map[int32]Offset, len(partitions)) + for t, ps := range partitions { + removePartitionOffsets := make(map[int32]Offset, len(ps)) + for _, p := range ps { + removePartitionOffsets[p] = Offset{} + } + removeOffsets[t] = removePartitionOffsets + } + + c.assignPartitions(removeOffsets, assignInvalidateMatching, c.d.tps, fmt.Sprintf("remove of %v requested", partitions)) + for t, ps := range partitions { + for _, p := range ps { + c.d.using.remove(t, p) + c.d.m.remove(t, p) + delete(c.d.ps[t], p) + } + if len(c.d.ps[t]) == 0 { + delete(c.d.ps, t) + } + } +} + +// assignHow controls how assignPartitions operates. +type assignHow int8 + +const ( + // This option simply assigns new offsets, doing nothing with existing + // offsets / active fetches / buffered fetches. + assignWithoutInvalidating assignHow = iota + + // This option invalidates active fetches so they will not buffer and + // drops all buffered fetches, and then continues to assign the new + // assignments. + assignInvalidateAll + + // This option does not assign, but instead invalidates any active + // fetches for "assigned" (actually lost) partitions. This additionally + // drops all buffered fetches, because they could contain partitions we + // lost. Thus, with this option, the actual offset in the map is + // meaningless / a dummy offset. + assignInvalidateMatching + + assignPurgeMatching + + // The counterpart to assignInvalidateMatching, assignSetMatching + // resets all matching partitions to the specified offset / epoch. + assignSetMatching +) + +func (h assignHow) String() string { + switch h { + case assignWithoutInvalidating: + return "assigning everything new, keeping current assignment" + case assignInvalidateAll: + return "unassigning everything" + case assignInvalidateMatching: + return "unassigning any currently assigned matching partition that is in the input" + case assignPurgeMatching: + return "unassigning and purging any partition matching the input topics" + case assignSetMatching: + return "reassigning any currently assigned matching partition to the input" + } + return "" +} + +type fmtAssignment map[string]map[int32]Offset + +func (f fmtAssignment) String() string { + var sb strings.Builder + + var topicsWritten int + for topic, partitions := range f { + topicsWritten++ + sb.WriteString(topic) + sb.WriteString("[") + + var partitionsWritten int + for partition, offset := range partitions { + fmt.Fprintf(&sb, "%d%s", partition, offset) + partitionsWritten++ + if partitionsWritten < len(partitions) { + sb.WriteString(" ") + } + } + + sb.WriteString("]") + if topicsWritten < len(f) { + sb.WriteString(", ") + } + } + + return sb.String() +} + +// assignPartitions, called under the consumer's mu, is used to set new cursors +// or add to the existing cursors. +// +// We do not need to pass tps when we are bumping the session or when we are +// invalidating all. All other cases, we want the tps -- the logic below does +// not fully differentiate needing to start a new session vs. just reusing the +// old (third if case below) +func (c *consumer) assignPartitions(assignments map[string]map[int32]Offset, how assignHow, tps *topicsPartitions, why string) { + if c.mu.TryLock() { + c.mu.Unlock() + panic("assignPartitions called without holding the consumer lock, this is a bug in franz-go, please open an issue at github.com/twmb/franz-go") + } + + // The internal code can avoid giving an assign reason in cases where + // the caller logs itself immediately before assigning. We only log if + // there is a reason. + if len(why) > 0 { + c.cl.cfg.logger.Log(LogLevelInfo, "assigning partitions", + "why", why, + "how", how, + "input", fmtAssignment(assignments), + ) + } + var session *consumerSession + var loadOffsets listOrEpochLoads + + defer func() { + if session == nil { // if nil, we stopped the session + session = c.startNewSession(tps) + } else { // else we guarded it + c.unguardSessionChange(session) + } + loadOffsets.loadWithSession(session, "loading offsets in new session from assign") // odds are this assign came from a metadata update, so no reason to force a refresh with loadWithSessionNow + + // If we started a new session or if we unguarded, we have one + // worker. This one worker allowed us to safely add our load + // offsets before the session could be concurrently stopped + // again. Now that we have added the load offsets, we allow the + // session to be stopped. + session.decWorker() + }() + + if how == assignWithoutInvalidating { + // Guarding a session change can actually create a new session + // if we had no session before, which is why we need to pass in + // our topicPartitions. + session = c.guardSessionChange(tps) + } else { + loadOffsets, _ = c.stopSession() + + // First, over all cursors currently in use, we unset them or set them + // directly as appropriate. Anything we do not unset, we keep. + + var keep usedCursors + for usedCursor := range c.usingCursors { + shouldKeep := true + if how == assignInvalidateAll { + usedCursor.unset() + shouldKeep = false + } else { // invalidateMatching or setMatching + if assignTopic, ok := assignments[usedCursor.topic]; ok { + if how == assignPurgeMatching { // topic level + usedCursor.source.removeCursor(usedCursor) + shouldKeep = false + } else if assignPart, ok := assignTopic[usedCursor.partition]; ok { + if how == assignInvalidateMatching { + usedCursor.unset() + shouldKeep = false + } else { // how == assignSetMatching + usedCursor.setOffset(cursorOffset{ + offset: assignPart.at, + lastConsumedEpoch: assignPart.epoch, + }) + } + } + } + } + if shouldKeep { + keep.use(usedCursor) + } + } + c.usingCursors = keep + + // For any partition that was listing offsets or loading + // epochs, we want to ensure that if we are keeping those + // partitions, we re-start the list/load. + // + // Note that we do not need to unset cursors here; anything + // that actually resulted in a cursor is forever tracked in + // usedCursors. We only do not have used cursors if an + // assignment went straight to listing / epoch loading, and + // that list/epoch never finished. + switch how { + case assignWithoutInvalidating: + // Nothing to do -- this is handled above. + case assignInvalidateAll: + loadOffsets = listOrEpochLoads{} + case assignSetMatching: + // We had not yet loaded this partition, so there is + // nothing to set, and we keep everything. + case assignInvalidateMatching: + loadOffsets.keepFilter(func(t string, p int32) bool { + if assignTopic, ok := assignments[t]; ok { + if _, ok := assignTopic[p]; ok { + return false + } + } + return true + }) + case assignPurgeMatching: + // This is slightly different than invalidate in that + // we invalidate whole topics. + loadOffsets.keepFilter(func(t string, _ int32) bool { + _, ok := assignments[t] + return !ok // assignments are topics to purge -- do NOT keep the topic if it is being purged + }) + // We have to purge from tps _after_ the session is + // stopped. If we purge early while the session is + // ongoing, then another goroutine could be loading and + // using tps and expecting topics not yet removed from + // assignPartitions to still be there. Specifically, + // mapLoadsToBrokers could be expecting topic foo to be + // there (from the session!), so if we purge foo before + // stopping the session, we will panic. + topics := make([]string, 0, len(assignments)) + for t := range assignments { + topics = append(topics, t) + } + tps.purgeTopics(topics) + } + } + + // This assignment could contain nothing (for the purposes of + // invalidating active fetches), so we only do this if needed. + if len(assignments) == 0 || how != assignWithoutInvalidating { + return + } + + c.cl.cfg.logger.Log(LogLevelDebug, "assign requires loading offsets") + + topics := tps.load() + for topic, partitions := range assignments { + topicPartitions := topics.loadTopic(topic) // should be non-nil + if topicPartitions == nil { + c.cl.cfg.logger.Log(LogLevelError, "BUG! consumer was assigned topic that we did not ask for in ConsumeTopics nor ConsumePartitions, skipping!", "topic", topic) + continue + } + + for partition, offset := range partitions { + // If we are loading the first record after a millisec, + // we go directly to listing offsets. Epoch validation + // does not ever set afterMilli. + if offset.afterMilli { + loadOffsets.addLoad(topic, partition, loadTypeList, offsetLoad{ + replica: -1, + Offset: offset, + }) + continue + } + + // First, if the request is exact, get rid of the relative + // portion. We are modifying a copy of the offset, i.e. we + // are appropriately not modfying 'assignments' itself. + if offset.at >= 0 { + offset.at += offset.relative + if offset.at < 0 { + offset.at = 0 + } + offset.relative = 0 + } + + // If we are requesting an exact offset with an epoch, + // we do truncation detection and then use the offset. + // + // Otherwise, an epoch is specified without an exact + // request which is useless for us, or a request is + // specified without a known epoch. + // + // The client ensures the epoch is non-negative from + // fetch offsets only if the broker supports KIP-320, + // but we do not override the user manually specifying + // an epoch. + if offset.at >= 0 && offset.epoch >= 0 { + loadOffsets.addLoad(topic, partition, loadTypeEpoch, offsetLoad{ + replica: -1, + Offset: offset, + }) + continue + } + + // If an exact offset is specified and we have loaded + // the partition, we use it. We have to use epoch -1 + // rather than the latest loaded epoch on the partition + // because the offset being requested to use could be + // from an epoch after OUR loaded epoch. Otherwise, we + // could update the metadata, see the later epoch, + // request the end offset for our prior epoch, and then + // think data loss occurred. + // + // If an offset is unspecified or we have not loaded + // the partition, we list offsets to find out what to + // use. + if offset.at >= 0 && partition >= 0 && partition < int32(len(topicPartitions.partitions)) { + part := topicPartitions.partitions[partition] + cursor := part.cursor + cursor.setOffset(cursorOffset{ + offset: offset.at, + lastConsumedEpoch: -1, + }) + cursor.allowUsable() + c.usingCursors.use(cursor) + continue + } + + // If the offset is atCommitted, then no offset was + // loaded from FetchOffsets. We inject an error and + // avoid using this partition. + if offset.at == atCommitted { + c.addFakeReadyForDraining(topic, partition, errNoCommittedOffset, "notification of uncommitted partition") + continue + } + + loadOffsets.addLoad(topic, partition, loadTypeList, offsetLoad{ + replica: -1, + Offset: offset, + }) + } + } +} + +// filterMetadataAllTopics, called BEFORE doOnMetadataUpdate, evaluates +// all topics received against the user provided regex. +func (c *consumer) filterMetadataAllTopics(topics []string) []string { + c.mu.Lock() + defer c.mu.Unlock() + + var rns reNews + defer rns.log(&c.cl.cfg) + + var reSeen map[string]bool + if c.d != nil { + reSeen = c.d.reSeen + } else { + reSeen = c.g.reSeen + } + + keep := topics[:0] + for _, topic := range topics { + want, seen := reSeen[topic] + if !seen { + for rawRe, re := range c.cl.cfg.topics { + if want = re.MatchString(topic); want { + rns.add(rawRe, topic) + break + } + } + if !want { + rns.skip(topic) + } + reSeen[topic] = want + } + if want { + keep = append(keep, topic) + } + } + return keep +} + +func (c *consumer) doOnMetadataUpdate() { + if !c.consuming() { + return + } + + // See the comment on the outstandingMetadataUpdates field for why this + // block below. + if c.outstandingMetadataUpdates.maybeBegin() { + doUpdate := func() { + // We forbid reassignments while we do a quick check for + // new assignments--for the direct consumer particularly, + // this prevents TOCTOU, and guards against a concurrent + // assignment from SetOffsets. + c.mu.Lock() + defer c.mu.Unlock() + + switch { + case c.d != nil: + if new := c.d.findNewAssignments(); len(new) > 0 { + c.assignPartitions(new, assignWithoutInvalidating, c.d.tps, "new assignments from direct consumer") + } + case c.g != nil: + c.g.findNewAssignments() + } + + go c.loadSession().doOnMetadataUpdate() + } + + go func() { + again := true + for again { + doUpdate() + again = c.outstandingMetadataUpdates.maybeFinish(false) + } + }() + } +} + +func (s *consumerSession) doOnMetadataUpdate() { + if s == nil || s == noConsumerSession { // no session started yet + return + } + + s.listOrEpochMu.Lock() + defer s.listOrEpochMu.Unlock() + + if s.listOrEpochMetaCh == nil { + return // nothing waiting to load epochs / offsets + } + select { + case s.listOrEpochMetaCh <- struct{}{}: + default: + } +} + +type offsetLoadMap map[string]map[int32]offsetLoad + +// offsetLoad is effectively an Offset, but also includes a potential replica +// to directly use if a cursor had a preferred replica. +type offsetLoad struct { + replica int32 // -1 means leader + Offset +} + +func (o offsetLoad) MarshalJSON() ([]byte, error) { + if o.replica == -1 { + return o.Offset.MarshalJSON() + } + if o.relative == 0 { + return []byte(fmt.Sprintf(`{"Replica":%d,"At":%d,"Epoch":%d,"CurrentEpoch":%d}`, o.replica, o.at, o.epoch, o.currentEpoch)), nil + } + return []byte(fmt.Sprintf(`{"Replica":%d,"At":%d,"Relative":%d,"Epoch":%d,"CurrentEpoch":%d}`, o.replica, o.at, o.relative, o.epoch, o.currentEpoch)), nil +} + +func (o offsetLoadMap) errToLoaded(err error) []loadedOffset { + var loaded []loadedOffset + for t, ps := range o { + for p, o := range ps { + loaded = append(loaded, loadedOffset{ + topic: t, + partition: p, + err: err, + request: o, + }) + } + } + return loaded +} + +// Combines list and epoch loads into one type for simplicity. +type listOrEpochLoads struct { + // List and Epoch are public so that anything marshaling through + // reflect (i.e. json) can see the fields. + List offsetLoadMap + Epoch offsetLoadMap +} + +type listOrEpochLoadType uint8 + +const ( + loadTypeList listOrEpochLoadType = iota + loadTypeEpoch +) + +func (l listOrEpochLoadType) String() string { + switch l { + case loadTypeList: + return "list" + default: + return "epoch" + } +} + +// adds an offset to be loaded, ensuring it exists only in the final loadType. +func (l *listOrEpochLoads) addLoad(t string, p int32, loadType listOrEpochLoadType, load offsetLoad) { + l.removeLoad(t, p) + dst := &l.List + if loadType == loadTypeEpoch { + dst = &l.Epoch + } + + if *dst == nil { + *dst = make(offsetLoadMap) + } + ps := (*dst)[t] + if ps == nil { + ps = make(map[int32]offsetLoad) + (*dst)[t] = ps + } + ps[p] = load +} + +func (l *listOrEpochLoads) removeLoad(t string, p int32) { + for _, m := range []offsetLoadMap{ + l.List, + l.Epoch, + } { + if m == nil { + continue + } + ps := m[t] + if ps == nil { + continue + } + delete(ps, p) + if len(ps) == 0 { + delete(m, t) + } + } +} + +func (l listOrEpochLoads) each(fn func(string, int32)) { + for _, m := range []offsetLoadMap{ + l.List, + l.Epoch, + } { + for topic, partitions := range m { + for partition := range partitions { + fn(topic, partition) + } + } + } +} + +func (l *listOrEpochLoads) keepFilter(keep func(string, int32) bool) { + for _, m := range []offsetLoadMap{ + l.List, + l.Epoch, + } { + for t, ps := range m { + for p := range ps { + if !keep(t, p) { + delete(ps, p) + if len(ps) == 0 { + delete(m, t) + } + } + } + } + } +} + +// Merges loads into the caller; used to coalesce loads while a metadata update +// is happening (see the only use below). +func (l *listOrEpochLoads) mergeFrom(src listOrEpochLoads) { + for _, srcs := range []struct { + m offsetLoadMap + loadType listOrEpochLoadType + }{ + {src.List, loadTypeList}, + {src.Epoch, loadTypeEpoch}, + } { + for t, ps := range srcs.m { + for p, load := range ps { + l.addLoad(t, p, srcs.loadType, load) + } + } + } +} + +func (l listOrEpochLoads) isEmpty() bool { return len(l.List) == 0 && len(l.Epoch) == 0 } + +func (l listOrEpochLoads) loadWithSession(s *consumerSession, why string) { + if !l.isEmpty() { + s.incWorker() + go s.listOrEpoch(l, false, why) + } +} + +func (l listOrEpochLoads) loadWithSessionNow(s *consumerSession, why string) bool { + if !l.isEmpty() { + s.incWorker() + go s.listOrEpoch(l, true, why) + return true + } + return false +} + +// A consumer session is responsible for an era of fetching records for a set +// of cursors. The set can be added to without killing an active session, but +// it cannot be removed from. Removing any cursor from being consumed kills the +// current consumer session and begins a new one. +type consumerSession struct { + c *consumer + + ctx context.Context + cancel func() + + // tps tracks the topics that were assigned in this session. We use + // this field to build and handle list offset / load epoch requests. + tps *topicsPartitions + + // desireFetchCh is sized to the number of concurrent fetches we are + // configured to be able to send. + // + // We receive desires from sources, we reply when they can fetch, and + // they send back when they are done. Thus, three level chan. + desireFetchCh chan chan chan struct{} + cancelFetchCh chan chan chan struct{} + allowedFetches int + fetchManagerStarted atomicBool // atomic, once true, we start the fetch manager + + // Workers signify the number of fetch and list / epoch goroutines that + // are currently running within the context of this consumer session. + // Stopping a session only returns once workers hits zero. + workersMu sync.Mutex + workersCond *sync.Cond + workers int + + listOrEpochMu sync.Mutex + listOrEpochLoadsWaiting listOrEpochLoads + listOrEpochMetaCh chan struct{} // non-nil if Loads is non-nil, signalled on meta update + listOrEpochLoadsLoading listOrEpochLoads +} + +func (c *consumer) newConsumerSession(tps *topicsPartitions) *consumerSession { + if tps == nil || len(tps.load()) == 0 { + return noConsumerSession + } + ctx, cancel := context.WithCancel(c.cl.ctx) + session := &consumerSession{ + c: c, + + ctx: ctx, + cancel: cancel, + + tps: tps, + + // NOTE: This channel must be unbuffered. If it is buffered, + // then we can exit manageFetchConcurrency when we should not + // and have a deadlock: + // + // * source sends to desireFetchCh, is buffered + // * source seeds context canceled, tries sending to cancelFetchCh + // * session concurrently sees context canceled + // * session has not drained desireFetchCh, sees activeFetches is 0 + // * session exits + // * source permanently hangs sending to desireFetchCh + // + // By having desireFetchCh unbuffered, we *ensure* that if the + // source indicates it wants a fetch, the session knows it and + // tracks it in wantFetch. + // + // See #198. + desireFetchCh: make(chan chan chan struct{}), + + cancelFetchCh: make(chan chan chan struct{}, 4), + allowedFetches: c.cl.cfg.maxConcurrentFetches, + } + session.workersCond = sync.NewCond(&session.workersMu) + return session +} + +func (s *consumerSession) desireFetch() chan chan chan struct{} { + if !s.fetchManagerStarted.Swap(true) { + go s.manageFetchConcurrency() + } + return s.desireFetchCh +} + +func (s *consumerSession) manageFetchConcurrency() { + var ( + activeFetches int + doneFetch = make(chan struct{}, 20) + wantFetch []chan chan struct{} + + ctxCh = s.ctx.Done() + wantQuit bool + ) + for { + select { + case register := <-s.desireFetchCh: + wantFetch = append(wantFetch, register) + case cancel := <-s.cancelFetchCh: + var found bool + for i, want := range wantFetch { + if want == cancel { + _ = append(wantFetch[i:], wantFetch[i+1:]...) + wantFetch = wantFetch[:len(wantFetch)-1] + found = true + } + } + // If we did not find the channel, then we have already + // sent to it, removed it from our wantFetch list, and + // bumped activeFetches. + if !found { + activeFetches-- + } + + case <-doneFetch: + activeFetches-- + case <-ctxCh: + wantQuit = true + ctxCh = nil + } + + if len(wantFetch) > 0 && (activeFetches < s.allowedFetches || s.allowedFetches == 0) { // 0 means unbounded + wantFetch[0] <- doneFetch + wantFetch = wantFetch[1:] + activeFetches++ + continue + } + + if wantQuit && activeFetches == 0 { + return + } + } +} + +func (s *consumerSession) incWorker() { + if s == noConsumerSession { // from startNewSession + return + } + s.workersMu.Lock() + defer s.workersMu.Unlock() + s.workers++ +} + +func (s *consumerSession) decWorker() { + if s == noConsumerSession { // from followup to startNewSession + return + } + s.workersMu.Lock() + defer s.workersMu.Unlock() + s.workers-- + if s.workers == 0 { + s.workersCond.Broadcast() + } +} + +// noConsumerSession exists because we cannot store nil into an atomic.Value. +var noConsumerSession = new(consumerSession) + +func (c *consumer) loadSession() *consumerSession { + if session := c.session.Load(); session != nil { + return session.(*consumerSession) + } + return noConsumerSession +} + +// Guards against a session being stopped, and must be paired with an unguard. +// This returns a new session if there was no session. +// +// The purpose of this function is when performing additive-only changes to an +// existing session, because additive-only changes can avoid killing a running +// session. +func (c *consumer) guardSessionChange(tps *topicsPartitions) *consumerSession { + c.sessionChangeMu.Lock() + + session := c.loadSession() + if session == noConsumerSession { + // If there is no session, we simply store one. This is fine; + // sources will be able to begin a fetch loop, but they will + // have no cursors to consume yet. + session = c.newConsumerSession(tps) + c.session.Store(session) + } + + return session +} + +// For the same reason below as in startNewSession, we inc a worker before +// unguarding. This allows the unguarding to execute a bit of logic if +// necessary before the session can be stopped. +func (c *consumer) unguardSessionChange(session *consumerSession) { + session.incWorker() + c.sessionChangeMu.Unlock() +} + +// Stops an active consumer session if there is one, and does not return until +// all fetching, listing, offset for leader epoching is complete. This +// invalidates any buffered fetches for the previous session and returns any +// partitions that were listing offsets or loading epochs. +func (c *consumer) stopSession() (listOrEpochLoads, *topicsPartitions) { + c.sessionChangeMu.Lock() + + session := c.loadSession() + + if session == noConsumerSession { + return listOrEpochLoads{}, nil // we had no session + } + + // Before storing noConsumerSession, cancel our old. This pairs + // with the reverse ordering in source, which checks noConsumerSession + // then checks the session context. + session.cancel() + + // At this point, any in progress fetches, offset lists, or epoch loads + // will quickly die. + + c.session.Store(noConsumerSession) + + // At this point, no source can be started, because the session is + // noConsumerSession. + + session.workersMu.Lock() + for session.workers > 0 { + session.workersCond.Wait() + } + session.workersMu.Unlock() + + // At this point, all fetches, lists, and loads are dead. We can close + // our num-fetches manager without worrying about a source trying to + // register itself. + + c.cl.allSinksAndSources(func(sns sinkAndSource) { + sns.source.session.reset() + }) + + // At this point, if we begin fetching anew, then the sources will not + // be using stale fetch sessions. + + c.sourcesReadyMu.Lock() + defer c.sourcesReadyMu.Unlock() + for _, ready := range c.sourcesReadyForDraining { + ready.discardBuffered() + } + c.sourcesReadyForDraining = nil + + // At this point, we have invalidated any buffered data from the prior + // session. We leave any fake things that were ready so that the user + // can act on errors. The session is dead. + + session.listOrEpochLoadsWaiting.mergeFrom(session.listOrEpochLoadsLoading) + return session.listOrEpochLoadsWaiting, session.tps +} + +// Starts a new consumer session, allowing fetches to happen. +// +// If there are no topic partitions to start with, this returns noConsumerSession. +// +// This is returned with 1 worker; decWorker must be called after return. The +// 1 worker allows for initialization work to prevent the session from being +// immediately stopped. +func (c *consumer) startNewSession(tps *topicsPartitions) *consumerSession { + if c.kill.Load() { + tps = nil + } + session := c.newConsumerSession(tps) + c.session.Store(session) + + // Ensure that this session is usable before being stopped immediately. + // The caller must dec workers. + session.incWorker() + + // At this point, sources can start consuming. + + c.sessionChangeMu.Unlock() + + c.cl.allSinksAndSources(func(sns sinkAndSource) { + sns.source.maybeConsume() + }) + + // At this point, any source that was not consuming becauase it saw the + // session was stopped has been notified to potentially start consuming + // again. The session is alive. + + return session +} + +// This function is responsible for issuing ListOffsets or +// OffsetForLeaderEpoch. These requests's responses are only handled within +// the context of a consumer session. +func (s *consumerSession) listOrEpoch(waiting listOrEpochLoads, immediate bool, why string) { + defer s.decWorker() + + // It is possible for a metadata update to try to migrate partition + // loads if the update moves partitions between brokers. If we are + // closing the client, the consumer session could already be stopped, + // but this stops before the metadata goroutine is killed. So, if we + // are in this function but actually have no session, we return. + if s == noConsumerSession { + return + } + + wait := true + if immediate { + s.c.cl.triggerUpdateMetadataNow(why) + } else { + wait = s.c.cl.triggerUpdateMetadata(false, why) // avoid trigger if within refresh interval + } + + s.listOrEpochMu.Lock() // collapse any listOrEpochs that occur during meta update into one + if !s.listOrEpochLoadsWaiting.isEmpty() { + s.listOrEpochLoadsWaiting.mergeFrom(waiting) + s.listOrEpochMu.Unlock() + return + } + s.listOrEpochLoadsWaiting = waiting + s.listOrEpochMetaCh = make(chan struct{}, 1) + s.listOrEpochMu.Unlock() + + if wait { + select { + case <-s.ctx.Done(): + return + case <-s.listOrEpochMetaCh: + } + } + + s.listOrEpochMu.Lock() + loading := s.listOrEpochLoadsWaiting + s.listOrEpochLoadsLoading.mergeFrom(loading) + s.listOrEpochLoadsWaiting = listOrEpochLoads{} + s.listOrEpochMetaCh = nil + s.listOrEpochMu.Unlock() + + brokerLoads := s.mapLoadsToBrokers(loading) + + results := make(chan loadedOffsets, 2*len(brokerLoads)) // each broker can receive up to two requests + + var issued, received int + for broker, brokerLoad := range brokerLoads { + s.c.cl.cfg.logger.Log(LogLevelDebug, "offsets to load broker", "broker", broker.meta.NodeID, "load", brokerLoad) + if len(brokerLoad.List) > 0 { + issued++ + go s.c.cl.listOffsetsForBrokerLoad(s.ctx, broker, brokerLoad.List, s.tps, results) + } + if len(brokerLoad.Epoch) > 0 { + issued++ + go s.c.cl.loadEpochsForBrokerLoad(s.ctx, broker, brokerLoad.Epoch, s.tps, results) + } + } + + var reloads listOrEpochLoads + defer func() { + if !reloads.isEmpty() { + s.incWorker() + go func() { + // Before we dec our worker, we must add the + // reloads back into the session's waiting loads. + // Doing so allows a concurrent stopSession to + // track the waiting loads, whereas if we did not + // add things back to the session, we could abandon + // loading these offsets and have a stuck cursor. + defer s.decWorker() + defer reloads.loadWithSession(s, "reload offsets from load failure") + after := time.NewTimer(time.Second) + defer after.Stop() + select { + case <-after.C: + case <-s.ctx.Done(): + return + } + }() + } + }() + + for received != issued { + select { + case <-s.ctx.Done(): + // If we return early, our session was canceled. We do + // not move loading list or epoch loads back to + // waiting; the session stopping manages that. + return + case loaded := <-results: + received++ + reloads.mergeFrom(s.handleListOrEpochResults(loaded)) + } + } +} + +// Called within a consumer session, this function handles results from list +// offsets or epoch loads and returns any loads that should be retried. +// +// To us, all errors are reloadable. We either have request level retryable +// errors (unknown partition, etc) or non-retryable errors (auth), or we have +// request issuing errors (no dial, connection cut repeatedly). +// +// For retryable request errors, we may as well back off a little bit to allow +// Kafka to harmonize if the topic exists / etc. +// +// For non-retryable request errors, we may as well retry to both (a) allow the +// user more signals about a problem that they can maybe fix within Kafka (i.e. +// the auth), and (b) force the user to notice errors. +// +// For request issuing errors, we may as well continue to retry because there +// is not much else we can do. RequestWith already retries, but returns when +// the retry limit is hit. We will backoff 1s and then allow RequestWith to +// continue requesting and backing off. +func (s *consumerSession) handleListOrEpochResults(loaded loadedOffsets) (reloads listOrEpochLoads) { + // This function can be running twice concurrently, so we need to guard + // listOrEpochLoadsLoading and usingCursors. For simplicity, we just + // guard this entire function. + + debug := s.c.cl.cfg.logger.Level() >= LogLevelDebug + + var using map[string]map[int32]EpochOffset + type epochOffsetWhy struct { + EpochOffset + error + } + var reloading map[string]map[int32]epochOffsetWhy + if debug { + using = make(map[string]map[int32]EpochOffset) + reloading = make(map[string]map[int32]epochOffsetWhy) + defer func() { + t := "list" + if loaded.loadType == loadTypeEpoch { + t = "epoch" + } + s.c.cl.cfg.logger.Log(LogLevelDebug, fmt.Sprintf("handled %s results", t), "broker", logID(loaded.broker), "using", using, "reloading", reloading) + }() + } + + s.listOrEpochMu.Lock() + defer s.listOrEpochMu.Unlock() + + for _, load := range loaded.loaded { + s.listOrEpochLoadsLoading.removeLoad(load.topic, load.partition) // remove the tracking of this load from our session + + use := func() { + if debug { + tusing := using[load.topic] + if tusing == nil { + tusing = make(map[int32]EpochOffset) + using[load.topic] = tusing + } + tusing[load.partition] = EpochOffset{load.leaderEpoch, load.offset} + } + + load.cursor.setOffset(cursorOffset{ + offset: load.offset, + lastConsumedEpoch: load.leaderEpoch, + }) + load.cursor.allowUsable() + s.c.usingCursors.use(load.cursor) + } + + var edl *ErrDataLoss + switch { + case errors.As(load.err, &edl): + s.c.addFakeReadyForDraining(load.topic, load.partition, load.err, "notification of data loss") // signal we lost data, but set the cursor to what we can + use() + + case load.err == nil: + use() + + default: // from ErrorCode in a response, or broker request err, or request is canceled as our session is ending + reloads.addLoad(load.topic, load.partition, loaded.loadType, load.request) + if !kerr.IsRetriable(load.err) && !isRetryableBrokerErr(load.err) && !isDialNonTimeoutErr(load.err) && !isContextErr(load.err) { // non-retryable response error; signal such in a response + s.c.addFakeReadyForDraining(load.topic, load.partition, load.err, fmt.Sprintf("notification of non-retryable error from %s request", loaded.loadType)) + } + + if debug { + treloading := reloading[load.topic] + if treloading == nil { + treloading = make(map[int32]epochOffsetWhy) + reloading[load.topic] = treloading + } + treloading[load.partition] = epochOffsetWhy{EpochOffset{load.leaderEpoch, load.offset}, load.err} + } + } + } + + return reloads +} + +// Splits the loads into per-broker loads, mapping each partition to the broker +// that leads that partition. +func (s *consumerSession) mapLoadsToBrokers(loads listOrEpochLoads) map[*broker]listOrEpochLoads { + brokerLoads := make(map[*broker]listOrEpochLoads) + + s.c.cl.brokersMu.RLock() // hold mu so we can check if partition leaders exist + defer s.c.cl.brokersMu.RUnlock() + + brokers := s.c.cl.brokers + seed := s.c.cl.loadSeeds()[0] + + topics := s.tps.load() + for _, loads := range []struct { + m offsetLoadMap + loadType listOrEpochLoadType + }{ + {loads.List, loadTypeList}, + {loads.Epoch, loadTypeEpoch}, + } { + for topic, partitions := range loads.m { + topicPartitions := topics.loadTopic(topic) // this must exist, it not existing would be a bug + for partition, offset := range partitions { + // We default to the first seed broker if we have no loaded + // the broker leader for this partition (we should have). + // Worst case, we get an error for the partition and retry. + broker := seed + if partition >= 0 && partition < int32(len(topicPartitions.partitions)) { + topicPartition := topicPartitions.partitions[partition] + brokerID := topicPartition.leader + if offset.replica != -1 { + // If we are fetching from a follower, we can list + // offsets against the follower itself. The replica + // being non-negative signals that. + // + // Note this is not actually true (i.e. KIP-392 lies), + // but we keep this logic in case we can revert + // to using non-leaders someday. + brokerID = offset.replica + } + if tryBroker := findBroker(brokers, brokerID); tryBroker != nil { + broker = tryBroker + } + offset.currentEpoch = topicPartition.leaderEpoch // ensure we set our latest epoch for the partition + } + + brokerLoad := brokerLoads[broker] + brokerLoad.addLoad(topic, partition, loads.loadType, offset) + brokerLoads[broker] = brokerLoad + } + } + } + + return brokerLoads +} + +// The result of ListOffsets or OffsetForLeaderEpoch for an individual +// partition. +type loadedOffset struct { + topic string + partition int32 + + // The following three are potentially unset if the error is non-nil + // and not ErrDataLoss; these are what we loaded. + cursor *cursor + offset int64 + leaderEpoch int32 + + // Any error encountered for loading this partition, or for epoch + // loading, potentially ErrDataLoss. If this error is not retryable, we + // avoid reloading the offset and instead inject a fake partition for + // PollFetches containing this error. + err error + + // The original request. + request offsetLoad +} + +// The results of ListOffsets or OffsetForLeaderEpoch for an individual broker. +type loadedOffsets struct { + broker int32 + loaded []loadedOffset + loadType listOrEpochLoadType +} + +func (l *loadedOffsets) add(a loadedOffset) { l.loaded = append(l.loaded, a) } +func (l *loadedOffsets) addAll(as []loadedOffset) loadedOffsets { + l.loaded = append(l.loaded, as...) + return *l +} + +func (cl *Client) listOffsetsForBrokerLoad(ctx context.Context, broker *broker, load offsetLoadMap, tps *topicsPartitions, results chan<- loadedOffsets) { + loaded := loadedOffsets{broker: broker.meta.NodeID, loadType: loadTypeList} + + req1, req2 := load.buildListReq(cl.cfg.isolationLevel) + var ( + wg sync.WaitGroup + kresp2 kmsg.Response + err2 error + ) + if req2 != nil { + wg.Add(1) + go func() { + defer wg.Done() + kresp2, err2 = broker.waitResp(ctx, req2) + }() + } + kresp, err := broker.waitResp(ctx, req1) + wg.Wait() + if err != nil || err2 != nil { + results <- loaded.addAll(load.errToLoaded(err)) + return + } + + topics := tps.load() + resp := kresp.(*kmsg.ListOffsetsResponse) + + // If we issued a second req to check that an exact offset is in + // bounds, then regrettably for safety, we have to ensure that the + // shapes of both responses match, and the topic & partition at each + // index matches. Anything that does not match is skipped (and would be + // a bug from Kafka), and we at the end return UnknownTopicOrPartition. + var resp2 *kmsg.ListOffsetsResponse + if req2 != nil { + resp2 = kresp2.(*kmsg.ListOffsetsResponse) + for _, r := range []*kmsg.ListOffsetsResponse{ + resp, + resp2, + } { + ts := r.Topics + sort.Slice(ts, func(i, j int) bool { + return ts[i].Topic < ts[j].Topic + }) + for i := range ts { + ps := ts[i].Partitions + sort.Slice(ps, func(i, j int) bool { + return ps[i].Partition < ps[j].Partition + }) + } + } + + lt := resp.Topics + rt := resp2.Topics + lkeept := lt[:0] + rkeept := rt[:0] + // Over each response, we only keep the topic if the topics match. + for len(lt) > 0 && len(rt) > 0 { + if lt[0].Topic < rt[0].Topic { + lt = lt[1:] + continue + } + if rt[0].Topic < lt[0].Topic { + rt = rt[1:] + continue + } + // As well, for topics that match, we only keep + // partitions that match. In this case, we also want + // both partitions to be error free, otherwise we keep + // an error on both. If one has old style offsets, + // both must. + lp := lt[0].Partitions + rp := rt[0].Partitions + lkeepp := lp[:0] + rkeepp := rp[:0] + for len(lp) > 0 && len(rp) > 0 { + if lp[0].Partition < rp[0].Partition { + lp = lp[1:] + continue + } + if rp[0].Partition < lp[0].Partition { + rp = rp[1:] + continue + } + if len(lp[0].OldStyleOffsets) > 0 && len(rp[0].OldStyleOffsets) == 0 || + len(lp[0].OldStyleOffsets) == 0 && len(rp[0].OldStyleOffsets) > 0 { + lp = lp[1:] + rp = rp[1:] + continue + } + if lp[0].ErrorCode != 0 { + rp[0].ErrorCode = lp[0].ErrorCode + } else if rp[0].ErrorCode != 0 { + lp[0].ErrorCode = rp[0].ErrorCode + } + lkeepp = append(lkeepp, lp[0]) + rkeepp = append(rkeepp, rp[0]) + lp = lp[1:] + rp = rp[1:] + } + // Now we update the partitions in the topic we are + // keeping, and keep our topic. + lt[0].Partitions = lkeepp + rt[0].Partitions = rkeepp + lkeept = append(lkeept, lt[0]) + rkeept = append(rkeept, rt[0]) + lt = lt[1:] + rt = rt[1:] + } + // Finally, update each response with the topics we kept. The + // shapes and indices are the same. + resp.Topics = lkeept + resp2.Topics = rkeept + } + + poffset := func(p *kmsg.ListOffsetsResponseTopicPartition) int64 { + offset := p.Offset + if len(p.OldStyleOffsets) > 0 { + offset = p.OldStyleOffsets[0] // list offsets v0 + } + return offset + } + + for i, rTopic := range resp.Topics { + topic := rTopic.Topic + loadParts, ok := load[topic] + if !ok { + continue // should not happen: kafka replied with something we did not ask for + } + + topicPartitions := topics.loadTopic(topic) // must be non-nil at this point + for j, rPartition := range rTopic.Partitions { + partition := rPartition.Partition + loadPart, ok := loadParts[partition] + if !ok { + continue // should not happen: kafka replied with something we did not ask for + } + + if err := kerr.ErrorForCode(rPartition.ErrorCode); err != nil { + loaded.add(loadedOffset{ + topic: topic, + partition: partition, + err: err, + request: loadPart, + }) + continue // partition err: handled in results + } + + if partition < 0 || partition >= int32(len(topicPartitions.partitions)) { + continue // should not happen: we have not seen this partition from a metadata response + } + topicPartition := topicPartitions.partitions[partition] + + delete(loadParts, partition) + if len(loadParts) == 0 { + delete(load, topic) + } + + offset := poffset(&rPartition) + end := func() int64 { return poffset(&resp2.Topics[i].Partitions[j]) } + + // We ensured the resp2 shape is as we want and has no + // error, so resp2 lookups are safe. + if loadPart.afterMilli { + // If after a milli, if the milli is after the + // end of a partition, the offset is -1. We use + // our end offset request: anything after the + // end offset *now* is after our milli. + if offset == -1 { + offset = end() + } + } else if loadPart.at >= 0 { + // If an exact offset, we listed start and end. + // We validate the offset is within bounds. + end := end() + want := loadPart.at + loadPart.relative + if want >= offset { + offset = want + } + if want >= end { + offset = end + } + } else if loadPart.at == -2 && loadPart.relative > 0 { + // Relative to the start: both start & end were + // issued, and we bound to the end. + offset += loadPart.relative + if end := end(); offset >= end { + offset = end + } + } else if loadPart.at == -1 && loadPart.relative < 0 { + // Relative to the end: both start & end were + // issued, offset is currently the start, so we + // set to the end and then bound to the start. + start := offset + offset = end() + offset += loadPart.relative + if offset <= start { + offset = start + } + } + if offset < 0 { + offset = 0 // sanity + } + + loaded.add(loadedOffset{ + topic: topic, + partition: partition, + cursor: topicPartition.cursor, + offset: offset, + leaderEpoch: rPartition.LeaderEpoch, + request: loadPart, + }) + } + } + + results <- loaded.addAll(load.errToLoaded(kerr.UnknownTopicOrPartition)) +} + +func (*Client) loadEpochsForBrokerLoad(ctx context.Context, broker *broker, load offsetLoadMap, tps *topicsPartitions, results chan<- loadedOffsets) { + loaded := loadedOffsets{broker: broker.meta.NodeID, loadType: loadTypeEpoch} + + kresp, err := broker.waitResp(ctx, load.buildEpochReq()) + if err != nil { + results <- loaded.addAll(load.errToLoaded(err)) + return + } + + // If the version is < 2, we are speaking to an old broker. We should + // not have an old version, but we could have spoken to a new broker + // first then an old broker in the middle of a broker roll. For now, we + // will just loop retrying until the broker is upgraded. + + topics := tps.load() + resp := kresp.(*kmsg.OffsetForLeaderEpochResponse) + for _, rTopic := range resp.Topics { + topic := rTopic.Topic + loadParts, ok := load[topic] + if !ok { + continue // should not happen: kafka replied with something we did not ask for + } + + topicPartitions := topics.loadTopic(topic) // must be non-nil at this point + for _, rPartition := range rTopic.Partitions { + partition := rPartition.Partition + loadPart, ok := loadParts[partition] + if !ok { + continue // should not happen: kafka replied with something we did not ask for + } + + if err := kerr.ErrorForCode(rPartition.ErrorCode); err != nil { + loaded.add(loadedOffset{ + topic: topic, + partition: partition, + err: err, + request: loadPart, + }) + continue // partition err: handled in results + } + + if partition < 0 || partition >= int32(len(topicPartitions.partitions)) { + continue // should not happen: we have not seen this partition from a metadata response + } + topicPartition := topicPartitions.partitions[partition] + + delete(loadParts, partition) + if len(loadParts) == 0 { + delete(load, topic) + } + + // Epoch loading never uses noReset nor afterMilli; + // this at is the offset we wanted to consume and are + // validating. + offset := loadPart.at + var err error + if rPartition.EndOffset < offset { + err = &ErrDataLoss{topic, partition, offset, rPartition.EndOffset} + offset = rPartition.EndOffset + } + + loaded.add(loadedOffset{ + topic: topic, + partition: partition, + cursor: topicPartition.cursor, + offset: offset, + leaderEpoch: rPartition.LeaderEpoch, + err: err, + request: loadPart, + }) + } + } + + results <- loaded.addAll(load.errToLoaded(kerr.UnknownTopicOrPartition)) +} + +// In general this returns one request, but if the user is using exact offsets +// rather than start/end, then we issue both the start and end requests to +// ensure the user's requested offset is within bounds. +func (o offsetLoadMap) buildListReq(isolationLevel int8) (r1, r2 *kmsg.ListOffsetsRequest) { + r1 = kmsg.NewPtrListOffsetsRequest() + r1.ReplicaID = -1 + r1.IsolationLevel = isolationLevel + r1.Topics = make([]kmsg.ListOffsetsRequestTopic, 0, len(o)) + var createEnd bool + for topic, partitions := range o { + parts := make([]kmsg.ListOffsetsRequestTopicPartition, 0, len(partitions)) + for partition, offset := range partitions { + // If this is a milli request, we issue two lists: if + // our milli is after the end of a partition, we get no + // offset back and we want to know the start offset + // (since it will be after our milli). + // + // If we are using an exact offset request, we issue + // the start and end so that we can bound the exact + // offset to being within that range. + // + // If we are using a relative offset, we potentially + // issue the end request because relative may shift us + // too far in the other direction. + timestamp := offset.at + if offset.afterMilli { + createEnd = true + } else if timestamp >= 0 || timestamp == -2 && offset.relative > 0 || timestamp == -1 && offset.relative < 0 { + timestamp = -2 + createEnd = true + } + p := kmsg.NewListOffsetsRequestTopicPartition() + p.Partition = partition + p.CurrentLeaderEpoch = offset.currentEpoch // KIP-320 + p.Timestamp = timestamp + p.MaxNumOffsets = 1 + + parts = append(parts, p) + } + t := kmsg.NewListOffsetsRequestTopic() + t.Topic = topic + t.Partitions = parts + r1.Topics = append(r1.Topics, t) + } + + if createEnd { + r2 = kmsg.NewPtrListOffsetsRequest() + *r2 = *r1 + r2.Topics = append([]kmsg.ListOffsetsRequestTopic(nil), r1.Topics...) + for i := range r1.Topics { + l := &r2.Topics[i] + r := &r1.Topics[i] + *l = *r + l.Partitions = append([]kmsg.ListOffsetsRequestTopicPartition(nil), r.Partitions...) + for i := range l.Partitions { + l.Partitions[i].Timestamp = -1 + } + } + } + + return r1, r2 +} + +func (o offsetLoadMap) buildEpochReq() *kmsg.OffsetForLeaderEpochRequest { + req := kmsg.NewPtrOffsetForLeaderEpochRequest() + req.ReplicaID = -1 + req.Topics = make([]kmsg.OffsetForLeaderEpochRequestTopic, 0, len(o)) + for topic, partitions := range o { + parts := make([]kmsg.OffsetForLeaderEpochRequestTopicPartition, 0, len(partitions)) + for partition, offset := range partitions { + p := kmsg.NewOffsetForLeaderEpochRequestTopicPartition() + p.Partition = partition + p.CurrentLeaderEpoch = offset.currentEpoch + p.LeaderEpoch = offset.epoch + parts = append(parts, p) + } + t := kmsg.NewOffsetForLeaderEpochRequestTopic() + t.Topic = topic + t.Partitions = parts + req.Topics = append(req.Topics, t) + } + return req +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/consumer_direct.go b/vendor/github.com/twmb/franz-go/pkg/kgo/consumer_direct.go new file mode 100644 index 00000000000..0dcbf9897d8 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/consumer_direct.go @@ -0,0 +1,141 @@ +package kgo + +type directConsumer struct { + cfg *cfg + tps *topicsPartitions // data for topics that the user assigned + using mtmps // topics we are currently using + m mtmps // mirrors cfg.topics and cfg.partitions, but can change with Purge or Add + ps map[string]map[int32]Offset // mirrors cfg.partitions, changed in Purge or Add + reSeen map[string]bool // topics we evaluated against regex, and whether we want them or not +} + +func (c *consumer) initDirect() { + d := &directConsumer{ + cfg: &c.cl.cfg, + tps: newTopicsPartitions(), + reSeen: make(map[string]bool), + using: make(mtmps), + m: make(mtmps), + ps: make(map[string]map[int32]Offset), + } + c.d = d + + if d.cfg.regex { + return + } + + var topics []string + for topic, partitions := range d.cfg.partitions { + topics = append(topics, topic) + for partition := range partitions { + d.m.add(topic, partition) + } + p := make(map[int32]Offset, len(partitions)) + for partition, offset := range partitions { + p[partition] = offset + } + d.ps[topic] = p + } + for topic := range d.cfg.topics { + topics = append(topics, topic) + d.m.addt(topic) + } + d.tps.storeTopics(topics) // prime topics to load if non-regex (this is of no benefit if regex) +} + +// For SetOffsets, unlike the group consumer, we just blindly translate the +// input EpochOffsets into Offsets, and those will be set directly. +func (*directConsumer) getSetAssigns(setOffsets map[string]map[int32]EpochOffset) (assigns map[string]map[int32]Offset) { + assigns = make(map[string]map[int32]Offset) + for topic, partitions := range setOffsets { + set := make(map[int32]Offset) + for partition, eo := range partitions { + set[partition] = Offset{ + at: eo.Offset, + epoch: eo.Epoch, + } + } + assigns[topic] = set + } + return assigns +} + +// findNewAssignments returns new partitions to consume at given offsets +// based off the current topics. +func (d *directConsumer) findNewAssignments() map[string]map[int32]Offset { + topics := d.tps.load() + + toUse := make(map[string]map[int32]Offset, 10) + for topic, topicPartitions := range topics { + var useTopic bool + if d.cfg.regex { + useTopic = d.reSeen[topic] + } else { + useTopic = d.m.onlyt(topic) + } + + // If the above detected that we want to keep this topic, we + // set all partitions as usable. + // + // For internal partitions, we only allow consuming them if + // the topic is explicitly specified. + if useTopic { + partitions := topicPartitions.load() + if d.cfg.regex && partitions.isInternal || len(partitions.partitions) == 0 { + continue + } + toUseTopic := make(map[int32]Offset, len(partitions.partitions)) + for partition := range partitions.partitions { + toUseTopic[int32(partition)] = d.cfg.resetOffset + } + toUse[topic] = toUseTopic + } + + // Lastly, if this topic has some specific partitions pinned, + // we set those. We only use partitions from topics that have + // not been purged. + for topic := range d.m { + for partition, offset := range d.ps[topic] { + toUseTopic, exists := toUse[topic] + if !exists { + toUseTopic = make(map[int32]Offset, 10) + toUse[topic] = toUseTopic + } + toUseTopic[partition] = offset + } + } + } + + // With everything we want to consume, remove what we are already. + for topic, partitions := range d.using { + toUseTopic, exists := toUse[topic] + if !exists { + continue // metadata update did not return this topic (regex or failing load) + } + for partition := range partitions { + delete(toUseTopic, partition) + } + if len(toUseTopic) == 0 { + delete(toUse, topic) + } + } + + if len(toUse) == 0 { + return nil + } + + // Finally, toUse contains new partitions that we must consume. + // Add them to our using map and assign them. + for topic, partitions := range toUse { + topicUsing, exists := d.using[topic] + if !exists { + topicUsing = make(map[int32]struct{}) + d.using[topic] = topicUsing + } + for partition := range partitions { + topicUsing[partition] = struct{}{} + } + } + + return toUse +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/consumer_group.go b/vendor/github.com/twmb/franz-go/pkg/kgo/consumer_group.go new file mode 100644 index 00000000000..dee54bd743a --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/consumer_group.go @@ -0,0 +1,2889 @@ +package kgo + +import ( + "bytes" + "context" + "errors" + "fmt" + "sort" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" +) + +type groupConsumer struct { + c *consumer // used to change consumer state; generally c.mu is grabbed on access + cl *Client // used for running requests / adding to topics map + cfg *cfg + + ctx context.Context + cancel func() + manageDone chan struct{} // closed once when the manage goroutine quits + + cooperative atomicBool // true if the group balancer chosen during Join is cooperative + + // The data for topics that the user assigned. Metadata updates the + // atomic.Value in each pointer atomically. + tps *topicsPartitions + + reSeen map[string]bool // topics we evaluated against regex, and whether we want them or not + + // Full lock grabbed in CommitOffsetsSync, read lock grabbed in + // CommitOffsets, this lock ensures that only one sync commit can + // happen at once, and if it is happening, no other commit can be + // happening. + syncCommitMu sync.RWMutex + + rejoinCh chan string // cap 1; sent to if subscription changes (regex) + + // For EOS, before we commit, we force a heartbeat. If the client and + // group member are both configured properly, then the transactional + // timeout will be less than the session timeout. By forcing a + // heartbeat before the commit, if the heartbeat was successful, then + // we ensure that we will complete the transaction within the group + // session, meaning we will not commit after the group has rebalanced. + heartbeatForceCh chan func(error) + + // The following two are only updated in the manager / join&sync loop + // The nowAssigned map is read when commits fail: if the commit fails + // with ILLEGAL_GENERATION and it contains only partitions that are in + // nowAssigned, we re-issue. + lastAssigned map[string][]int32 + nowAssigned amtps + + // Fetching ensures we continue fetching offsets across cooperative + // rebalance if an offset fetch returns early due to an immediate + // rebalance. See the large comment on adjustCooperativeFetchOffsets + // for more details. + // + // This is modified only in that function, or in the manage loop on a + // hard error once the heartbeat/fetch has returned. + fetching map[string]map[int32]struct{} + + // onFetchedMu ensures we do not call onFetched nor adjustOffsets + // concurrent with onRevoked. + // + // The group session itself ensures that OnPartitions functions are + // serial, but offset fetching is concurrent with heartbeating and can + // finish before or after heartbeating has already detected a revoke. + // To make user lives easier, we guarantee that offset fetch callbacks + // cannot be concurrent with onRevoked with this mu. If fetch callbacks + // are present, we hook this mu into onRevoked, and we grab it in the + // locations fetch callbacks are called. We only have to worry about + // onRevoked because fetching offsets occurs after onAssigned, and + // onLost happens after fetching offsets is done. + onFetchedMu sync.Mutex + + // leader is whether we are the leader right now. This is set to false + // + // - set to false at the beginning of a join group session + // - set to true if join group response indicates we are leader + // - read on metadata updates in findNewAssignments + leader atomicBool + + // Set to true when ending a transaction committing transaction + // offsets, and then set to false immediately after before calling + // EndTransaction. + offsetsAddedToTxn bool + + // If we are leader, then other members may express interest to consume + // topics that we are not interested in consuming. We track the entire + // group's topics in external, and our fetchMetadata loop uses this. + // We store this as a pointer for address comparisons. + external atomic.Value // *groupExternal + + // See the big comment on `commit`. If we allow committing between + // join&sync, we occasionally see RebalanceInProgress or + // IllegalGeneration errors while cooperative consuming. + noCommitDuringJoinAndSync sync.RWMutex + + ////////////// + // mu block // + ////////////// + mu sync.Mutex + + // using is updated when finding new assignments, we always add to this + // if we want to consume a topic (or see there are more potential + // partitions). Only the leader can trigger a new group session if there + // are simply more partitions for existing topics. + // + // This is read when joining a group or leaving a group. + using map[string]int // topics *we* are currently using => # partitions known in that topic + + // uncommitted is read and updated all over: + // - updated before PollFetches returns + // - updated when directly setting offsets (to rewind, for transactions) + // - emptied when leaving a group + // - updated when revoking + // - updated after fetching offsets once we receive our group assignment + // - updated after we commit + // - read when getting uncommitted or committed + uncommitted uncommitted + + // memberID and generation are written to in the join and sync loop, + // and mostly read within that loop. This can be read during commits, + // which can happy any time. It is **recommended** to be done within + // the context of a group session, but (a) users may have some unique + // use cases, and (b) the onRevoke hook may take longer than a user + // expects, which would rotate a session. + memberGen groupMemberGen + + // commitCancel and commitDone are set under mu before firing off an + // async commit request. If another commit happens, it cancels the + // prior commit, waits for the prior to be done, and then starts its + // own. + commitCancel func() + commitDone chan struct{} + + // blockAuto is set and cleared in CommitOffsets{,Sync} to block + // autocommitting if autocommitting is active. This ensures that an + // autocommit does not cancel the user's manual commit. + blockAuto bool + + // We set this once to manage the group lifecycle once. + managing bool + + dying bool // set when closing, read in findNewAssignments + left chan struct{} + leaveErr error // set before left is closed +} + +type groupMemberGen struct { + v atomic.Value // *groupMemberGenT +} + +type groupMemberGenT struct { + memberID string + generation int32 +} + +func (g *groupMemberGen) memberID() string { + memberID, _ := g.load() + return memberID +} + +func (g *groupMemberGen) generation() int32 { + _, generation := g.load() + return generation +} + +func (g *groupMemberGen) load() (memberID string, generation int32) { + v := g.v.Load() + if v == nil { + return "", -1 + } + t := v.(*groupMemberGenT) + return t.memberID, t.generation +} + +func (g *groupMemberGen) store(memberID string, generation int32) { + g.v.Store(&groupMemberGenT{memberID, generation}) +} + +func (g *groupMemberGen) storeMember(memberID string) { + g.store(memberID, g.generation()) +} + +// LeaveGroup leaves a group. Close automatically leaves the group, so this is +// only necessary to call if you plan to leave the group but continue to use +// the client. If a rebalance is in progress, this function waits for the +// rebalance to complete before the group can be left. This is necessary to +// allow you to safely issue one final offset commit in OnPartitionsRevoked. If +// you have overridden the default revoke, you must manually commit offsets +// before leaving the group. +// +// If you have configured the group with an InstanceID, this does not leave the +// group. With instance IDs, it is expected that clients will restart and +// re-use the same instance ID. To leave a group using an instance ID, you must +// manually issue a kmsg.LeaveGroupRequest or use an external tool (kafka +// scripts or kcl). +// +// It is recommended to use LeaveGroupContext to see if the leave was +// successful. +func (cl *Client) LeaveGroup() { + cl.LeaveGroupContext(cl.ctx) +} + +// LeaveGroup leaves a group. Close automatically leaves the group, so this is +// only necessary to call if you plan to leave the group but continue to use +// the client. If a rebalance is in progress, this function waits for the +// rebalance to complete before the group can be left. This is necessary to +// allow you to safely issue one final offset commit in OnPartitionsRevoked. If +// you have overridden the default revoke, you must manually commit offsets +// before leaving the group. +// +// The context can be used to avoid waiting for the client to leave the group. +// Not waiting may result in your client being stuck in the group and the +// partitions this client was consuming being stuck until the session timeout. +// This function returns any leave group error or context cancel error. If the +// context is nil, this immediately leaves the group and does not wait and does +// not return an error. +// +// If you have configured the group with an InstanceID, this does not leave the +// group. With instance IDs, it is expected that clients will restart and +// re-use the same instance ID. To leave a group using an instance ID, you must +// manually issue a kmsg.LeaveGroupRequest or use an external tool (kafka +// scripts or kcl). +func (cl *Client) LeaveGroupContext(ctx context.Context) error { + c := &cl.consumer + if c.g == nil { + return nil + } + var immediate bool + if ctx == nil { + var cancel func() + ctx, cancel = context.WithCancel(context.Background()) + cancel() + immediate = true + } + + go func() { + c.waitAndAddRebalance() + c.mu.Lock() // lock for assign + c.assignPartitions(nil, assignInvalidateAll, nil, "invalidating all assignments in LeaveGroup") + c.g.leave(ctx) + c.mu.Unlock() + c.unaddRebalance() + }() + + select { + case <-ctx.Done(): + if immediate { + return nil + } + return ctx.Err() + case <-c.g.left: + return c.g.leaveErr + } +} + +// GroupMetadata returns the current group member ID and generation, or an +// empty string and -1 if not in the group. +func (cl *Client) GroupMetadata() (string, int32) { + g := cl.consumer.g + if g == nil { + return "", -1 + } + return g.memberGen.load() +} + +func (c *consumer) initGroup() { + ctx, cancel := context.WithCancel(c.cl.ctx) + g := &groupConsumer{ + c: c, + cl: c.cl, + cfg: &c.cl.cfg, + + ctx: ctx, + cancel: cancel, + + reSeen: make(map[string]bool), + + manageDone: make(chan struct{}), + tps: newTopicsPartitions(), + rejoinCh: make(chan string, 1), + heartbeatForceCh: make(chan func(error)), + using: make(map[string]int), + + left: make(chan struct{}), + } + c.g = g + if !g.cfg.setCommitCallback { + g.cfg.commitCallback = g.defaultCommitCallback + } + + if g.cfg.txnID == nil { + // We only override revoked / lost if they were not explicitly + // set by options. + if !g.cfg.setRevoked { + g.cfg.onRevoked = g.defaultRevoke + } + // For onLost, we do not want to commit in onLost, so we + // explicitly set onLost to an empty function to avoid the + // fallback to onRevoked. + if !g.cfg.setLost { + g.cfg.onLost = func(context.Context, *Client, map[string][]int32) {} + } + } else { + g.cfg.autocommitDisable = true + } + + for _, logOn := range []struct { + name string + set *func(context.Context, *Client, map[string][]int32) + }{ + {"OnPartitionsAssigned", &g.cfg.onAssigned}, + {"OnPartitionsRevoked", &g.cfg.onRevoked}, + {"OnPartitionsLost", &g.cfg.onLost}, + } { + user := *logOn.set + name := logOn.name + *logOn.set = func(ctx context.Context, cl *Client, m map[string][]int32) { + var ctxExpired bool + select { + case <-ctx.Done(): + ctxExpired = true + default: + } + if ctxExpired { + cl.cfg.logger.Log(LogLevelDebug, "entering "+name, "with", m, "context_expired", ctxExpired) + } else { + cl.cfg.logger.Log(LogLevelDebug, "entering "+name, "with", m) + } + if user != nil { + dup := make(map[string][]int32) + for k, vs := range m { + dup[k] = append([]int32(nil), vs...) + } + user(ctx, cl, dup) + } + } + } + + if g.cfg.onFetched != nil || g.cfg.adjustOffsetsBeforeAssign != nil { + revoked := g.cfg.onRevoked + g.cfg.onRevoked = func(ctx context.Context, cl *Client, m map[string][]int32) { + g.onFetchedMu.Lock() + defer g.onFetchedMu.Unlock() + revoked(ctx, cl, m) + } + } + + // For non-regex topics, we explicitly ensure they exist for loading + // metadata. This is of no impact if we are *also* consuming via regex, + // but that is no problem. + if len(g.cfg.topics) > 0 && !g.cfg.regex { + topics := make([]string, 0, len(g.cfg.topics)) + for topic := range g.cfg.topics { + topics = append(topics, topic) + } + g.tps.storeTopics(topics) + } +} + +// Manages the group consumer's join / sync / heartbeat / fetch offset flow. +// +// Once a group is assigned, we fire a metadata request for all topics the +// assignment specified interest in. Only after we finally have some topic +// metadata do we join the group, and once joined, this management runs in a +// dedicated goroutine until the group is left. +func (g *groupConsumer) manage() { + defer close(g.manageDone) + g.cfg.logger.Log(LogLevelInfo, "beginning to manage the group lifecycle", "group", g.cfg.group) + if !g.cfg.autocommitDisable && g.cfg.autocommitInterval > 0 { + g.cfg.logger.Log(LogLevelInfo, "beginning autocommit loop", "group", g.cfg.group) + go g.loopCommit() + } + + var consecutiveErrors int + joinWhy := "beginning to manage the group lifecycle" + for { + if joinWhy == "" { + joinWhy = "rejoining from normal rebalance" + } + err := g.joinAndSync(joinWhy) + if err == nil { + if joinWhy, err = g.setupAssignedAndHeartbeat(); err != nil { + if errors.Is(err, kerr.RebalanceInProgress) { + err = nil + } + } + } + if err == nil { + consecutiveErrors = 0 + continue + } + joinWhy = "rejoining after we previously errored and backed off" + + // If the user has BlockPollOnRebalance enabled, we have to + // block around the onLost and assigning. + g.c.waitAndAddRebalance() + + if errors.Is(err, context.Canceled) && g.cfg.onRevoked != nil { + // The cooperative consumer does not revoke everything + // while rebalancing, meaning if our context is + // canceled, we may have uncommitted data. Rather than + // diving into onLost, we should go into onRevoked, + // because for the most part, a context cancelation + // means we are leaving the group. Going into onRevoked + // gives us an opportunity to commit outstanding + // offsets. For the eager consumer, since we always + // revoke before exiting the heartbeat loop, we do not + // really care so much about *needing* to call + // onRevoked, but since we are handling this case for + // the cooperative consumer we may as well just also + // include the eager consumer. + g.cfg.onRevoked(g.cl.ctx, g.cl, g.nowAssigned.read()) + } else { + // Any other error is perceived as a fatal error, + // and we go into onLost as appropriate. + if g.cfg.onLost != nil { + g.cfg.onLost(g.cl.ctx, g.cl, g.nowAssigned.read()) + } + g.cfg.hooks.each(func(h Hook) { + if h, ok := h.(HookGroupManageError); ok { + h.OnGroupManageError(err) + } + }) + g.c.addFakeReadyForDraining("", 0, &ErrGroupSession{err}, "notification of group management loop error") + } + + // If we are eager, we should have invalidated everything + // before getting here, but we do so doubly just in case. + // + // If we are cooperative, the join and sync could have failed + // during the cooperative rebalance where we were still + // consuming. We need to invalidate everything. Waiting to + // resume from poll is necessary, but the user will likely be + // unable to commit. + { + g.c.mu.Lock() + g.c.assignPartitions(nil, assignInvalidateAll, nil, "clearing assignment at end of group management session") + g.mu.Lock() // before allowing poll to touch uncommitted, lock the group + g.c.mu.Unlock() // now part of poll can continue + g.uncommitted = nil + g.mu.Unlock() + + g.nowAssigned.store(nil) + g.lastAssigned = nil + g.fetching = nil + + g.leader.Store(false) + g.resetExternal() + } + + // Unblock bolling now that we have called onLost and + // re-assigned. + g.c.unaddRebalance() + + if errors.Is(err, context.Canceled) { // context was canceled, quit now + return + } + + // Waiting for the backoff is a good time to update our + // metadata; maybe the error is from stale metadata. + consecutiveErrors++ + backoff := g.cfg.retryBackoff(consecutiveErrors) + g.cfg.logger.Log(LogLevelError, "join and sync loop errored", + "group", g.cfg.group, + "err", err, + "consecutive_errors", consecutiveErrors, + "backoff", backoff, + ) + deadline := time.Now().Add(backoff) + g.cl.waitmeta(g.ctx, backoff, "waitmeta during join & sync error backoff") + after := time.NewTimer(time.Until(deadline)) + select { + case <-g.ctx.Done(): + after.Stop() + return + case <-after.C: + } + } +} + +func (g *groupConsumer) leave(ctx context.Context) { + // If g.using is nonzero before this check, then a manage goroutine has + // started. If not, it will never start because we set dying. + g.mu.Lock() + wasDead := g.dying + g.dying = true + wasManaging := g.managing + g.cancel() + g.mu.Unlock() + + go func() { + if wasManaging { + // We want to wait for the manage goroutine to be done + // so that we call the user's on{Assign,RevokeLost}. + <-g.manageDone + } + if wasDead { + // If we already called leave(), then we just wait for + // the prior leave to finish and we avoid re-issuing a + // LeaveGroup request. + return + } + + defer close(g.left) + + if g.cfg.instanceID != nil { + return + } + + memberID := g.memberGen.memberID() + g.cfg.logger.Log(LogLevelInfo, "leaving group", + "group", g.cfg.group, + "member_id", memberID, + ) + // If we error when leaving, there is not much + // we can do. We may as well just return. + req := kmsg.NewPtrLeaveGroupRequest() + req.Group = g.cfg.group + req.MemberID = memberID + member := kmsg.NewLeaveGroupRequestMember() + member.MemberID = memberID + member.Reason = kmsg.StringPtr("client leaving group per normal operation") + req.Members = append(req.Members, member) + + resp, err := req.RequestWith(ctx, g.cl) + if err != nil { + g.leaveErr = err + return + } + g.leaveErr = kerr.ErrorForCode(resp.ErrorCode) + }() +} + +// returns the difference of g.nowAssigned and g.lastAssigned. +func (g *groupConsumer) diffAssigned() (added, lost map[string][]int32) { + nowAssigned := g.nowAssigned.clone() + if !g.cooperative.Load() { + return nowAssigned, nil + } + + added = make(map[string][]int32, len(nowAssigned)) + lost = make(map[string][]int32, len(nowAssigned)) + + // First, we diff lasts: any topic in last but not now is lost, + // otherwise, (1) new partitions are added, (2) common partitions are + // ignored, and (3) partitions no longer in now are lost. + lasts := make(map[int32]struct{}, 100) + for topic, lastPartitions := range g.lastAssigned { + nowPartitions, exists := nowAssigned[topic] + if !exists { + lost[topic] = lastPartitions + continue + } + + for _, lastPartition := range lastPartitions { + lasts[lastPartition] = struct{}{} + } + + // Anything now that does not exist in last is new, + // otherwise it is in common and we ignore it. + for _, nowPartition := range nowPartitions { + if _, exists := lasts[nowPartition]; !exists { + added[topic] = append(added[topic], nowPartition) + } else { + delete(lasts, nowPartition) + } + } + + // Anything remanining in last does not exist now + // and is thus lost. + for last := range lasts { + lost[topic] = append(lost[topic], last) + delete(lasts, last) // reuse lasts + } + } + + // Finally, any new topics in now assigned are strictly added. + for topic, nowPartitions := range nowAssigned { + if _, exists := g.lastAssigned[topic]; !exists { + added[topic] = nowPartitions + } + } + + return added, lost +} + +type revokeStage int8 + +const ( + revokeLastSession = iota + revokeThisSession +) + +// revoke calls onRevoked for partitions that this group member is losing and +// updates the uncommitted map after the revoke. +// +// For eager consumers, this simply revokes g.assigned. This will only be +// called at the end of a group session. +// +// For cooperative consumers, this either +// +// (1) if revoking lost partitions from a prior session (i.e., after sync), +// this revokes the passed in lost +// (2) if revoking at the end of a session, this revokes topics that the +// consumer is no longer interested in consuming +// +// Lastly, for cooperative consumers, this must selectively delete what was +// lost from the uncommitted map. +func (g *groupConsumer) revoke(stage revokeStage, lost map[string][]int32, leaving bool) { + g.c.waitAndAddRebalance() + defer g.c.unaddRebalance() + + if !g.cooperative.Load() || leaving { // stage == revokeThisSession if not cooperative + // If we are an eager consumer, we stop fetching all of our + // current partitions as we will be revoking them. + g.c.mu.Lock() + if leaving { + g.c.assignPartitions(nil, assignInvalidateAll, nil, "revoking all assignments because we are leaving the group") + } else { + g.c.assignPartitions(nil, assignInvalidateAll, nil, "revoking all assignments because we are not cooperative") + } + g.c.mu.Unlock() + + if !g.cooperative.Load() { + g.cfg.logger.Log(LogLevelInfo, "eager consumer revoking prior assigned partitions", "group", g.cfg.group, "revoking", g.nowAssigned.read()) + } else { + g.cfg.logger.Log(LogLevelInfo, "cooperative consumer revoking prior assigned partitions because leaving group", "group", g.cfg.group, "revoking", g.nowAssigned.read()) + } + if g.cfg.onRevoked != nil { + g.cfg.onRevoked(g.cl.ctx, g.cl, g.nowAssigned.read()) + } + g.nowAssigned.store(nil) + g.lastAssigned = nil + + // After nilling uncommitted here, nothing should recreate + // uncommitted until a future fetch after the group is + // rejoined. This _can_ be broken with a manual SetOffsets or + // with CommitOffsets{,Sync} but we explicitly document not + // to do that outside the context of a live group session. + g.mu.Lock() + g.uncommitted = nil + g.mu.Unlock() + return + } + + switch stage { + case revokeLastSession: + // we use lost in this case + + case revokeThisSession: + // lost is nil for cooperative assigning. Instead, we determine + // lost by finding subscriptions we are no longer interested + // in. This would be from a user's PurgeConsumeTopics call. + // + // We just paused metadata, but purging triggers a rebalance + // which causes a new metadata request -- in short, this could + // be concurrent with a metadata findNewAssignments, so we + // lock. + g.nowAssigned.write(func(nowAssigned map[string][]int32) { + g.mu.Lock() + for topic, partitions := range nowAssigned { + if _, exists := g.using[topic]; !exists { + if lost == nil { + lost = make(map[string][]int32) + } + lost[topic] = partitions + delete(nowAssigned, topic) + } + } + g.mu.Unlock() + }) + } + + if len(lost) > 0 { + // We must now stop fetching anything we lost and invalidate + // any buffered fetches before falling into onRevoked. + // + // We want to invalidate buffered fetches since they may + // contain partitions that we lost, and we do not want a future + // poll to return those fetches. + lostOffsets := make(map[string]map[int32]Offset, len(lost)) + + for lostTopic, lostPartitions := range lost { + lostPartitionOffsets := make(map[int32]Offset, len(lostPartitions)) + for _, lostPartition := range lostPartitions { + lostPartitionOffsets[lostPartition] = Offset{} + } + lostOffsets[lostTopic] = lostPartitionOffsets + } + + // We must invalidate before revoking and before updating + // uncommitted, because we want any commits in onRevoke to be + // for the final polled offsets. We do not want to allow the + // logical race of allowing fetches for revoked partitions + // after a revoke but before an invalidation. + g.c.mu.Lock() + g.c.assignPartitions(lostOffsets, assignInvalidateMatching, g.tps, "revoking assignments from cooperative consuming") + g.c.mu.Unlock() + } + + if len(lost) > 0 || stage == revokeThisSession { + if len(lost) == 0 { + g.cfg.logger.Log(LogLevelInfo, "cooperative consumer calling onRevoke at the end of a session even though no partitions were lost", "group", g.cfg.group) + } else { + g.cfg.logger.Log(LogLevelInfo, "cooperative consumer calling onRevoke", "group", g.cfg.group, "lost", lost, "stage", stage) + } + if g.cfg.onRevoked != nil { + g.cfg.onRevoked(g.cl.ctx, g.cl, lost) + } + } + + if len(lost) == 0 { // if we lost nothing, do nothing + return + } + + if stage != revokeThisSession { // cooperative consumers rejoin after they revoking what they lost + defer g.rejoin("cooperative rejoin after revoking what we lost from a rebalance") + } + + // The block below deletes everything lost from our uncommitted map. + // All commits should be **completed** by the time this runs. An async + // commit can undo what we do below. The default revoke runs a sync + // commit. + g.mu.Lock() + defer g.mu.Unlock() + if g.uncommitted == nil { + return + } + for lostTopic, lostPartitions := range lost { + uncommittedPartitions := g.uncommitted[lostTopic] + if uncommittedPartitions == nil { + continue + } + for _, lostPartition := range lostPartitions { + delete(uncommittedPartitions, lostPartition) + } + if len(uncommittedPartitions) == 0 { + delete(g.uncommitted, lostTopic) + } + } + if len(g.uncommitted) == 0 { + g.uncommitted = nil + } +} + +// assignRevokeSession aids in sequencing prerevoke/assign/revoke. +type assignRevokeSession struct { + prerevokeDone chan struct{} + assignDone chan struct{} + revokeDone chan struct{} +} + +func newAssignRevokeSession() *assignRevokeSession { + return &assignRevokeSession{ + prerevokeDone: make(chan struct{}), + assignDone: make(chan struct{}), + revokeDone: make(chan struct{}), + } +} + +// For cooperative consumers, the first thing a cooperative consumer does is to +// diff its last assignment and its new assignment and revoke anything lost. +// We call this a "prerevoke". +func (s *assignRevokeSession) prerevoke(g *groupConsumer, lost map[string][]int32) <-chan struct{} { + go func() { + defer close(s.prerevokeDone) + if g.cooperative.Load() && len(lost) > 0 { + g.revoke(revokeLastSession, lost, false) + } + }() + return s.prerevokeDone +} + +func (s *assignRevokeSession) assign(g *groupConsumer, newAssigned map[string][]int32) <-chan struct{} { + go func() { + defer close(s.assignDone) + <-s.prerevokeDone + if g.cfg.onAssigned != nil { + // We always call on assigned, even if nothing new is + // assigned. This allows consumers to know that + // assignment is done and do setup logic. + // + // If configured, we have to block polling. + g.c.waitAndAddRebalance() + defer g.c.unaddRebalance() + g.cfg.onAssigned(g.cl.ctx, g.cl, newAssigned) + } + }() + return s.assignDone +} + +// At the end of a group session, before we leave the heartbeat loop, we call +// revoke. For non-cooperative consumers, this revokes everything in the +// current session, and before revoking, we invalidate all partitions. For the +// cooperative consumer, this does nothing but does notify the client that a +// revoke has begun / the group session is ending. +// +// This may not run before returning from the heartbeat loop: if we encounter a +// fatal error, we return before revoking so that we can instead call onLost in +// the manage loop. +func (s *assignRevokeSession) revoke(g *groupConsumer, leaving bool) <-chan struct{} { + go func() { + defer close(s.revokeDone) + <-s.assignDone + g.revoke(revokeThisSession, nil, leaving) + }() + return s.revokeDone +} + +// This chunk of code "pre" revokes lost partitions for the cooperative +// consumer and then begins heartbeating while fetching offsets. This returns +// when heartbeating errors (or if fetch offsets errors). +// +// Before returning, this function ensures that +// - onAssigned is complete +// - which ensures that pre revoking is complete +// - fetching is complete +// - heartbeating is complete +func (g *groupConsumer) setupAssignedAndHeartbeat() (string, error) { + type hbquit struct { + rejoinWhy string + err error + } + hbErrCh := make(chan hbquit, 1) + fetchErrCh := make(chan error, 1) + + s := newAssignRevokeSession() + added, lost := g.diffAssigned() + g.lastAssigned = g.nowAssigned.clone() // now that we are done with our last assignment, update it per the new assignment + + g.cfg.logger.Log(LogLevelInfo, "new group session begun", "group", g.cfg.group, "added", mtps(added), "lost", mtps(lost)) + s.prerevoke(g, lost) // for cooperative consumers + + // Since we have joined the group, we immediately begin heartbeating. + // This will continue until the heartbeat errors, the group is killed, + // or the fetch offsets below errors. + ctx, cancel := context.WithCancel(g.ctx) + go func() { + defer cancel() // potentially kill offset fetching + g.cfg.logger.Log(LogLevelInfo, "beginning heartbeat loop", "group", g.cfg.group) + rejoinWhy, err := g.heartbeat(fetchErrCh, s) + hbErrCh <- hbquit{rejoinWhy, err} + }() + + // We immediately begin fetching offsets. We want to wait until the + // fetch function returns, since it assumes within it that another + // assign cannot happen (it assigns partitions itself). Returning + // before the fetch completes would be not good. + // + // The difference between fetchDone and fetchErrCh is that fetchErrCh + // can kill heartbeating, or signal it to continue, while fetchDone + // is specifically used for this function's return. + fetchDone := make(chan struct{}) + defer func() { <-fetchDone }() + + // Before we fetch offsets, we wait for the user's onAssign callback to + // be done. This ensures a few things: + // + // * that we wait for for prerevoking to be done, which updates the + // uncommitted field. Waiting for that ensures that a rejoin and poll + // does not have weird concurrent interaction. + // + // * that our onLost will not be concurrent with onAssign + // + // * that the user can start up any per-partition processors necessary + // before we begin consuming that partition. + // + // We especially need to wait here because heartbeating may not + // necessarily run onRevoke before returning (because of a fatal + // error). + s.assign(g, added) + + // If cooperative consuming, we may have to resume fetches. See the + // comment on adjustCooperativeFetchOffsets. + // + // We do this AFTER the user's callback. If we add more partitions + // to `added` that are from a previously canceled fetch, we do NOT + // want to pass those fetch-resumed partitions to the user callback + // again. See #705. + if g.cooperative.Load() { + added = g.adjustCooperativeFetchOffsets(added, lost) + } + + <-s.assignDone + + if len(added) > 0 { + go func() { + defer close(fetchDone) + defer close(fetchErrCh) + fetchErrCh <- g.fetchOffsets(ctx, added) + }() + } else { + close(fetchDone) + close(fetchErrCh) + } + + // Finally, we simply return whatever the heartbeat error is. This will + // be the fetch offset error if that function is what killed this. + + done := <-hbErrCh + return done.rejoinWhy, done.err +} + +// heartbeat issues heartbeat requests to Kafka for the duration of a group +// session. +// +// This function begins before fetching offsets to allow the consumer's +// onAssigned to be called before fetching. If the eventual offset fetch +// errors, we continue heartbeating until onRevoked finishes and our metadata +// is updated. If the error is not RebalanceInProgress, we return immediately. +// +// If the offset fetch is successful, then we basically sit in this function +// until a heartbeat errors or we, being the leader, decide to re-join. +func (g *groupConsumer) heartbeat(fetchErrCh <-chan error, s *assignRevokeSession) (string, error) { + ticker := time.NewTicker(g.cfg.heartbeatInterval) + defer ticker.Stop() + + // We issue one heartbeat quickly if we are cooperative because + // cooperative consumers rejoin the group immediately, and we want to + // detect that in 500ms rather than 3s. + var cooperativeFastCheck <-chan time.Time + if g.cooperative.Load() { + cooperativeFastCheck = time.After(500 * time.Millisecond) + } + + var metadone, revoked <-chan struct{} + var heartbeat, didMetadone, didRevoke bool + var rejoinWhy string + var lastErr error + + ctxCh := g.ctx.Done() + + for { + var err error + var force func(error) + heartbeat = false + select { + case <-cooperativeFastCheck: + heartbeat = true + case <-ticker.C: + heartbeat = true + case force = <-g.heartbeatForceCh: + heartbeat = true + case rejoinWhy = <-g.rejoinCh: + // If a metadata update changes our subscription, + // we just pretend we are rebalancing. + g.cfg.logger.Log(LogLevelInfo, "forced rejoin quitting heartbeat loop", "why", rejoinWhy) + err = kerr.RebalanceInProgress + case err = <-fetchErrCh: + fetchErrCh = nil + case <-metadone: + metadone = nil + didMetadone = true + case <-revoked: + revoked = nil + didRevoke = true + case <-ctxCh: + // Even if the group is left, we need to wait for our + // revoke to finish before returning, otherwise the + // manage goroutine will race with us setting + // nowAssigned. + ctxCh = nil + err = context.Canceled + } + + if heartbeat { + g.cfg.logger.Log(LogLevelDebug, "heartbeating", "group", g.cfg.group) + req := kmsg.NewPtrHeartbeatRequest() + req.Group = g.cfg.group + memberID, generation := g.memberGen.load() + req.Generation = generation + req.MemberID = memberID + req.InstanceID = g.cfg.instanceID + var resp *kmsg.HeartbeatResponse + if resp, err = req.RequestWith(g.ctx, g.cl); err == nil { + err = kerr.ErrorForCode(resp.ErrorCode) + } + g.cfg.logger.Log(LogLevelDebug, "heartbeat complete", "group", g.cfg.group, "err", err) + if force != nil { + force(err) + } + } + + // The first error either triggers a clean revoke and metadata + // update or it returns immediately. If we triggered the + // revoke, we wait for it to complete regardless of any future + // error. + if didMetadone && didRevoke { + return rejoinWhy, lastErr + } + + if err == nil { + continue + } + + if lastErr == nil { + g.cfg.logger.Log(LogLevelInfo, "heartbeat errored", "group", g.cfg.group, "err", err) + } else { + g.cfg.logger.Log(LogLevelInfo, "heartbeat errored again while waiting for user revoke to finish", "group", g.cfg.group, "err", err) + } + + // Since we errored, we must revoke. + if !didRevoke && revoked == nil { + // If our error is not from rebalancing, then we + // encountered IllegalGeneration or UnknownMemberID or + // our context closed all of which are unexpected and + // unrecoverable. + // + // We return early rather than revoking and updating + // metadata; the groupConsumer's manage function will + // call onLost with all partitions. + // + // setupAssignedAndHeartbeat still waits for onAssigned + // to be done so that we avoid calling onLost + // concurrently. + if !errors.Is(err, kerr.RebalanceInProgress) && revoked == nil { + return "", err + } + + // Now we call the user provided revoke callback, even + // if cooperative: if cooperative, this only revokes + // partitions we no longer want to consume. + // + // If the err is context.Canceled, the group is being + // left and we revoke everything. + revoked = s.revoke(g, errors.Is(err, context.Canceled)) + } + // Since we errored, while waiting for the revoke to finish, we + // update our metadata. A leader may have re-joined with new + // metadata, and we want the update. + if !didMetadone && metadone == nil { + waited := make(chan struct{}) + metadone = waited + go func() { + g.cl.waitmeta(g.ctx, g.cfg.sessionTimeout, "waitmeta after heartbeat error") + close(waited) + }() + } + + // We always save the latest error; generally this should be + // REBALANCE_IN_PROGRESS, but if the revoke takes too long, + // Kafka may boot us and we will get a different error. + lastErr = err + } +} + +// ForceRebalance quits a group member's heartbeat loop so that the member +// rejoins with a JoinGroupRequest. +// +// This function is only useful if you either (a) know that the group member is +// a leader, and want to force a rebalance for any particular reason, or (b) +// are using a custom group balancer, and have changed the metadata that will +// be returned from its JoinGroupMetadata method. This function has no other +// use; see KIP-568 for more details around this function's motivation. +// +// If neither of the cases above are true (this member is not a leader, and the +// join group metadata has not changed), then Kafka will not actually trigger a +// rebalance and will instead reply to the member with its current assignment. +func (cl *Client) ForceRebalance() { + if g := cl.consumer.g; g != nil { + g.rejoin("rejoin from ForceRebalance") + } +} + +// rejoin is called after a cooperative member revokes what it lost at the +// beginning of a session, or if we are leader and detect new partitions to +// consume. +func (g *groupConsumer) rejoin(why string) { + select { + case g.rejoinCh <- why: + default: + } +} + +// Joins and then syncs, issuing the two slow requests in goroutines to allow +// for group cancelation to return early. +func (g *groupConsumer) joinAndSync(joinWhy string) error { + g.noCommitDuringJoinAndSync.Lock() + g.cfg.logger.Log(LogLevelDebug, "blocking commits from join&sync") + defer g.noCommitDuringJoinAndSync.Unlock() + defer g.cfg.logger.Log(LogLevelDebug, "unblocking commits from join&sync") + + g.cfg.logger.Log(LogLevelInfo, "joining group", "group", g.cfg.group) + g.leader.Store(false) + g.getAndResetExternalRejoin() + defer func() { + // If we are not leader, we clear any tracking of external + // topics from when we were previously leader, since tracking + // these is just a waste. + if !g.leader.Load() { + g.resetExternal() + } + }() + +start: + select { + case <-g.rejoinCh: // drain to avoid unnecessary rejoins + default: + } + + joinReq := kmsg.NewPtrJoinGroupRequest() + joinReq.Group = g.cfg.group + joinReq.SessionTimeoutMillis = int32(g.cfg.sessionTimeout.Milliseconds()) + joinReq.RebalanceTimeoutMillis = int32(g.cfg.rebalanceTimeout.Milliseconds()) + joinReq.ProtocolType = g.cfg.protocol + joinReq.MemberID = g.memberGen.memberID() + joinReq.InstanceID = g.cfg.instanceID + joinReq.Protocols = g.joinGroupProtocols() + if joinWhy != "" { + joinReq.Reason = kmsg.StringPtr(joinWhy) + } + var ( + joinResp *kmsg.JoinGroupResponse + err error + joined = make(chan struct{}) + ) + + // NOTE: For this function, we have to use the client context, not the + // group context. We want to allow people to issue one final commit in + // OnPartitionsRevoked before leaving a group, so we need to block + // commits during join&sync. If we used the group context, we would be + // cancled immediately when leaving while a join or sync is inflight, + // and then our final commit will receive either REBALANCE_IN_PROGRESS + // or ILLEGAL_GENERATION. + + go func() { + defer close(joined) + joinResp, err = joinReq.RequestWith(g.cl.ctx, g.cl) + }() + + select { + case <-joined: + case <-g.cl.ctx.Done(): + return g.cl.ctx.Err() // client closed + } + if err != nil { + return err + } + + restart, protocol, plan, err := g.handleJoinResp(joinResp) + if restart { + goto start + } + if err != nil { + g.cfg.logger.Log(LogLevelWarn, "join group failed", "group", g.cfg.group, "err", err) + return err + } + + syncReq := kmsg.NewPtrSyncGroupRequest() + syncReq.Group = g.cfg.group + memberID, generation := g.memberGen.load() + syncReq.Generation = generation + syncReq.MemberID = memberID + syncReq.InstanceID = g.cfg.instanceID + syncReq.ProtocolType = &g.cfg.protocol + syncReq.Protocol = &protocol + if !joinResp.SkipAssignment { + syncReq.GroupAssignment = plan // nil unless we are the leader + } + var ( + syncResp *kmsg.SyncGroupResponse + synced = make(chan struct{}) + ) + + g.cfg.logger.Log(LogLevelInfo, "syncing", "group", g.cfg.group, "protocol_type", g.cfg.protocol, "protocol", protocol) + go func() { + defer close(synced) + syncResp, err = syncReq.RequestWith(g.cl.ctx, g.cl) + }() + + select { + case <-synced: + case <-g.cl.ctx.Done(): + return g.cl.ctx.Err() + } + if err != nil { + return err + } + + if err = g.handleSyncResp(protocol, syncResp); err != nil { + if errors.Is(err, kerr.RebalanceInProgress) { + g.cfg.logger.Log(LogLevelInfo, "sync failed with RebalanceInProgress, rejoining", "group", g.cfg.group) + goto start + } + g.cfg.logger.Log(LogLevelWarn, "sync group failed", "group", g.cfg.group, "err", err) + return err + } + + // KIP-814 fixes one limitation with KIP-345, but has another + // fundamental limitation. When an instance ID leader restarts, its + // first join always gets its old assignment *even if* the member's + // topic interests have changed. The broker tells us to skip doing + // assignment ourselves, but we ignore that for our well known + // balancers. Instead, we balance (but avoid sending it while syncing, + // as we are supposed to), and if our sync assignment differs from our + // own calculated assignment, We know we have a stale broker assignment + // and must trigger a rebalance. + if plan != nil && joinResp.SkipAssignment { + for _, assign := range plan { + if assign.MemberID == memberID { + if !bytes.Equal(assign.MemberAssignment, syncResp.MemberAssignment) { + g.rejoin("instance group leader restarted and was reassigned old plan, our topic interests changed and we must rejoin to force a rebalance") + } + break + } + } + } + + return nil +} + +func (g *groupConsumer) handleJoinResp(resp *kmsg.JoinGroupResponse) (restart bool, protocol string, plan []kmsg.SyncGroupRequestGroupAssignment, err error) { + if err = kerr.ErrorForCode(resp.ErrorCode); err != nil { + switch err { + case kerr.MemberIDRequired: + g.memberGen.storeMember(resp.MemberID) // KIP-394 + g.cfg.logger.Log(LogLevelInfo, "join returned MemberIDRequired, rejoining with response's MemberID", "group", g.cfg.group, "member_id", resp.MemberID) + return true, "", nil, nil + case kerr.UnknownMemberID: + g.memberGen.storeMember("") + g.cfg.logger.Log(LogLevelInfo, "join returned UnknownMemberID, rejoining without a member id", "group", g.cfg.group) + return true, "", nil, nil + } + return // Request retries as necessary, so this must be a failure + } + g.memberGen.store(resp.MemberID, resp.Generation) + + if resp.Protocol != nil { + protocol = *resp.Protocol + } + + for _, balancer := range g.cfg.balancers { + if protocol == balancer.ProtocolName() { + cooperative := balancer.IsCooperative() + if !cooperative && g.cooperative.Load() { + g.cfg.logger.Log(LogLevelWarn, "downgrading from cooperative group to eager group, this is not supported per KIP-429!") + } + g.cooperative.Store(cooperative) + break + } + } + + // KIP-345 has a fundamental limitation that KIP-814 also does not + // solve. + // + // When using instance IDs, if a leader restarts, its first join + // receives its old assignment no matter what. KIP-345 resulted in + // leaderless consumer groups, KIP-814 fixes this by notifying the + // restarted leader that it is still leader but that it should not + // balance. + // + // If the join response is <= v8, we hackily work around the leaderless + // situation by checking if the LeaderID is prefixed with our + // InstanceID. This is how Kafka and Redpanda are both implemented. At + // worst, if we mis-predict the leader, then we may accidentally try to + // cause a rebalance later and it will do nothing. That's fine. At + // least we can cause rebalances now, rather than having a leaderless, + // not-ever-rebalancing client. + // + // KIP-814 does not solve our problem fully: if we restart and rejoin, + // we always get our old assignment even if we changed what topics we + // were interested in. Because we have our old assignment, we think + // that the plan is fine *even with* our new interests, and we wait for + // some external rebalance trigger. We work around this limitation + // above (see "KIP-814") only for well known balancers; we cannot work + // around this limitation for not well known balancers because they may + // do so weird things we cannot control nor reason about. + leader := resp.LeaderID == resp.MemberID + leaderNoPlan := !leader && resp.Version <= 8 && g.cfg.instanceID != nil && strings.HasPrefix(resp.LeaderID, *g.cfg.instanceID+"-") + if leader { + g.leader.Store(true) + g.cfg.logger.Log(LogLevelInfo, "joined, balancing group", + "group", g.cfg.group, + "member_id", resp.MemberID, + "instance_id", strptr{g.cfg.instanceID}, + "generation", resp.Generation, + "balance_protocol", protocol, + "leader", true, + ) + plan, err = g.balanceGroup(protocol, resp.Members, resp.SkipAssignment) + } else if leaderNoPlan { + g.leader.Store(true) + g.cfg.logger.Log(LogLevelInfo, "joined as leader but unable to balance group due to KIP-345 limitations", + "group", g.cfg.group, + "member_id", resp.MemberID, + "instance_id", strptr{g.cfg.instanceID}, + "generation", resp.Generation, + "balance_protocol", protocol, + "leader", true, + ) + } else { + g.cfg.logger.Log(LogLevelInfo, "joined", + "group", g.cfg.group, + "member_id", resp.MemberID, + "instance_id", strptr{g.cfg.instanceID}, + "generation", resp.Generation, + "leader", false, + ) + } + return +} + +type strptr struct { + s *string +} + +func (s strptr) String() string { + if s.s == nil { + return "" + } + return *s.s +} + +// If other group members consume topics we are not interested in, we track the +// entire group's topics in this groupExternal type. On metadata update, we see +// if any partitions for any of these topics have changed, and if so, we as +// leader rejoin the group. +// +// Our external topics are cleared whenever we join and are not leader. We keep +// our previous external topics if we are leader: on the first balance as +// leader, we request metadata for all topics, then on followup balances, we +// already have that metadata and do not need to reload it when balancing. +// +// Whenever metadata updates, we detect if a rejoin is needed and always reset +// the rejoin status. +type groupExternal struct { + tps atomic.Value // map[string]int32 + rejoin atomicBool +} + +func (g *groupConsumer) loadExternal() *groupExternal { + e := g.external.Load() + if e != nil { + return e.(*groupExternal) + } + return nil +} + +// We reset our external topics whenever join&sync loop errors, or when we join +// and are not leader. +func (g *groupConsumer) resetExternal() { + g.external.Store((*groupExternal)(nil)) +} + +// If this is our first join as leader, or if a new member joined with new +// topics we were not tracking, we re-initialize external with the all-topics +// metadata refresh. +func (g *groupConsumer) initExternal(current map[string]int32) { + var e groupExternal + e.tps.Store(dupmsi32(current)) + g.external.Store(&e) +} + +// Reset whenever we join, & potentially used to rejoin when finding new +// assignments (i.e., end of metadata). +func (g *groupConsumer) getAndResetExternalRejoin() bool { + e := g.loadExternal() + if e == nil { + return false + } + defer e.rejoin.Store(false) + return e.rejoin.Load() +} + +// Runs fn over a load, not copy, of our map. +func (g *groupExternal) fn(fn func(map[string]int32)) { + if g == nil { + return + } + v := g.tps.Load() + if v == nil { + return + } + tps := v.(map[string]int32) + fn(tps) +} + +// Runs fn over a clone of our external map and updates the map. +func (g *groupExternal) cloned(fn func(map[string]int32)) { + g.fn(func(tps map[string]int32) { + dup := dupmsi32(tps) + fn(dup) + g.tps.Store(dup) + }) +} + +func (g *groupExternal) eachTopic(fn func(string)) { + g.fn(func(tps map[string]int32) { + for t := range tps { + fn(t) + } + }) +} + +func (g *groupExternal) updateLatest(meta map[string]*metadataTopic) { + g.cloned(func(tps map[string]int32) { + var rejoin bool + for t, ps := range tps { + latest, exists := meta[t] + if !exists || latest.loadErr != nil { + continue + } + if psLatest := int32(len(latest.partitions)); psLatest != ps { + rejoin = true + tps[t] = psLatest + } + } + if rejoin { + g.rejoin.Store(true) + } + }) +} + +func (g *groupConsumer) handleSyncResp(protocol string, resp *kmsg.SyncGroupResponse) error { + if err := kerr.ErrorForCode(resp.ErrorCode); err != nil { + return err + } + + b, err := g.findBalancer("sync assignment", protocol) + if err != nil { + return err + } + + assigned, err := b.ParseSyncAssignment(resp.MemberAssignment) + if err != nil { + g.cfg.logger.Log(LogLevelError, "sync assignment parse failed", "group", g.cfg.group, "err", err) + return err + } + + g.cfg.logger.Log(LogLevelInfo, "synced", "group", g.cfg.group, "assigned", mtps(assigned)) + + // Past this point, we will fall into the setupAssigned prerevoke code, + // meaning for cooperative, we will revoke what we need to. + g.nowAssigned.store(assigned) + return nil +} + +func (g *groupConsumer) joinGroupProtocols() []kmsg.JoinGroupRequestProtocol { + g.mu.Lock() + + topics := make([]string, 0, len(g.using)) + for topic := range g.using { + topics = append(topics, topic) + } + lastDup := make(map[string][]int32, len(g.lastAssigned)) + for t, ps := range g.lastAssigned { + lastDup[t] = append([]int32(nil), ps...) // deep copy to allow modifications + } + + g.mu.Unlock() + + sort.Strings(topics) // we guarantee to JoinGroupMetadata that the input strings are sorted + for _, partitions := range lastDup { + sort.Slice(partitions, func(i, j int) bool { return partitions[i] < partitions[j] }) // same for partitions + } + + gen := g.memberGen.generation() + var protos []kmsg.JoinGroupRequestProtocol + for _, balancer := range g.cfg.balancers { + proto := kmsg.NewJoinGroupRequestProtocol() + proto.Name = balancer.ProtocolName() + proto.Metadata = balancer.JoinGroupMetadata(topics, lastDup, gen) + protos = append(protos, proto) + } + return protos +} + +// If we are cooperatively consuming, we have a potential problem: if fetch +// offsets is canceled due to an immediate rebalance, when we resume, we will +// not re-fetch offsets for partitions we were previously assigned and are +// still assigned. We will only fetch offsets for new assignments. +// +// To work around that issue, we track everything we are fetching in g.fetching +// and only clear g.fetching if fetchOffsets returns with no error. +// +// Now, if fetching returns early due to an error, when we rejoin and re-fetch, +// we will resume fetching what we were previously: +// +// - first we remove what was lost +// - then we add anything new +// - then we translate our total set into the "added" list to be fetched on return +// +// Any time a group is completely lost, the manage loop clears fetching. When +// cooperative consuming, a hard error is basically losing the entire state and +// rejoining from scratch. +func (g *groupConsumer) adjustCooperativeFetchOffsets(added, lost map[string][]int32) map[string][]int32 { + if g.fetching != nil { + // We were fetching previously: remove anything lost. + for topic, partitions := range lost { + ft := g.fetching[topic] + if ft == nil { + continue // we were not fetching this topic + } + for _, partition := range partitions { + delete(ft, partition) + } + if len(ft) == 0 { + delete(g.fetching, topic) + } + } + } else { + // We were not fetching previously: start a new map for what we + // are adding. + g.fetching = make(map[string]map[int32]struct{}) + } + + // Merge everything we are newly fetching to our fetching map. + for topic, partitions := range added { + ft := g.fetching[topic] + if ft == nil { + ft = make(map[int32]struct{}, len(partitions)) + g.fetching[topic] = ft + } + for _, partition := range partitions { + ft[partition] = struct{}{} + } + } + + // Now translate our full set (previously fetching ++ newly fetching -- + // lost) into a new "added" map to be fetched. + added = make(map[string][]int32, len(g.fetching)) + for topic, partitions := range g.fetching { + ps := make([]int32, 0, len(partitions)) + for partition := range partitions { + ps = append(ps, partition) + } + added[topic] = ps + } + return added +} + +// fetchOffsets is issued once we join a group to see what the prior commits +// were for the partitions we were assigned. +func (g *groupConsumer) fetchOffsets(ctx context.Context, added map[string][]int32) (rerr error) { // we must use "rerr"! see introducing commit + // If we fetch successfully, we can clear the cross-group-cycle + // fetching tracking. + defer func() { + if rerr == nil { + g.fetching = nil + } + }() + + // Our client maps the v0 to v7 format to v8+ when sharding this + // request, if we are only requesting one group, as well as maps the + // response back, so we do not need to worry about v8+ here. +start: + req := kmsg.NewPtrOffsetFetchRequest() + req.Group = g.cfg.group + req.RequireStable = g.cfg.requireStable + for topic, partitions := range added { + reqTopic := kmsg.NewOffsetFetchRequestTopic() + reqTopic.Topic = topic + reqTopic.Partitions = partitions + req.Topics = append(req.Topics, reqTopic) + } + + var resp *kmsg.OffsetFetchResponse + var err error + + fetchDone := make(chan struct{}) + go func() { + defer close(fetchDone) + resp, err = req.RequestWith(ctx, g.cl) + }() + select { + case <-fetchDone: + case <-ctx.Done(): + g.cfg.logger.Log(LogLevelInfo, "fetch offsets failed due to context cancelation", "group", g.cfg.group) + return ctx.Err() + } + if err != nil { + g.cfg.logger.Log(LogLevelError, "fetch offsets failed with non-retryable error", "group", g.cfg.group, "err", err) + return err + } + + // Even if a leader epoch is returned, if brokers do not support + // OffsetForLeaderEpoch for some reason (odd set of supported reqs), we + // cannot use the returned leader epoch. + kip320 := g.cl.supportsOffsetForLeaderEpoch() + + offsets := make(map[string]map[int32]Offset) + for _, rTopic := range resp.Topics { + topicOffsets := make(map[int32]Offset) + offsets[rTopic.Topic] = topicOffsets + for _, rPartition := range rTopic.Partitions { + if err = kerr.ErrorForCode(rPartition.ErrorCode); err != nil { + // KIP-447: Unstable offset commit means there is a + // pending transaction that should be committing soon. + // We sleep for 1s and retry fetching offsets. + if errors.Is(err, kerr.UnstableOffsetCommit) { + g.cfg.logger.Log(LogLevelInfo, "fetch offsets failed with UnstableOffsetCommit, waiting 1s and retrying", + "group", g.cfg.group, + "topic", rTopic.Topic, + "partition", rPartition.Partition, + ) + select { + case <-ctx.Done(): + case <-time.After(time.Second): + goto start + } + } + g.cfg.logger.Log(LogLevelError, "fetch offsets failed", + "group", g.cfg.group, + "topic", rTopic.Topic, + "partition", rPartition.Partition, + "err", err, + ) + return err + } + offset := Offset{ + at: rPartition.Offset, + epoch: -1, + } + if resp.Version >= 5 && kip320 { // KIP-320 + offset.epoch = rPartition.LeaderEpoch + } + if rPartition.Offset == -1 { + offset = g.cfg.resetOffset + } + topicOffsets[rPartition.Partition] = offset + } + } + + groupTopics := g.tps.load() + for fetchedTopic := range offsets { + if !groupTopics.hasTopic(fetchedTopic) { + delete(offsets, fetchedTopic) + g.cfg.logger.Log(LogLevelWarn, "member was assigned topic that we did not ask for in ConsumeTopics! skipping assigning this topic!", "group", g.cfg.group, "topic", fetchedTopic) + } + } + + if g.cfg.onFetched != nil { + g.onFetchedMu.Lock() + err = g.cfg.onFetched(ctx, g.cl, resp) + g.onFetchedMu.Unlock() + if err != nil { + return err + } + } + if g.cfg.adjustOffsetsBeforeAssign != nil { + g.onFetchedMu.Lock() + offsets, err = g.cfg.adjustOffsetsBeforeAssign(ctx, offsets) + g.onFetchedMu.Unlock() + if err != nil { + return err + } + } + + // Lock for assign and then updating uncommitted. + g.c.mu.Lock() + defer g.c.mu.Unlock() + g.mu.Lock() + defer g.mu.Unlock() + + // Eager: we already invalidated everything; nothing to re-invalidate. + // Cooperative: assign without invalidating what we are consuming. + g.c.assignPartitions(offsets, assignWithoutInvalidating, g.tps, fmt.Sprintf("newly fetched offsets for group %s", g.cfg.group)) + + // We need to update the uncommitted map so that SetOffsets(Committed) + // does not rewind before the committed offsets we just fetched. + if g.uncommitted == nil { + g.uncommitted = make(uncommitted, 10) + } + for topic, partitions := range offsets { + topicUncommitted := g.uncommitted[topic] + if topicUncommitted == nil { + topicUncommitted = make(map[int32]uncommit, 20) + g.uncommitted[topic] = topicUncommitted + } + for partition, offset := range partitions { + if offset.at < 0 { + continue // not yet committed + } + committed := EpochOffset{ + Epoch: offset.epoch, + Offset: offset.at, + } + topicUncommitted[partition] = uncommit{ + dirty: committed, + head: committed, + committed: committed, + } + } + } + return nil +} + +// findNewAssignments updates topics the group wants to use and other metadata. +// We only grab the group mu at the end if we need to. +// +// This joins the group if +// - the group has never been joined +// - new topics are found for consuming (changing this consumer's join metadata) +// +// Additionally, if the member is the leader, this rejoins the group if the +// leader notices new partitions in an existing topic. +// +// This does not rejoin if the leader notices a partition is lost, which is +// finicky. +func (g *groupConsumer) findNewAssignments() { + topics := g.tps.load() + + type change struct { + isNew bool + delta int + } + + var numNewTopics int + toChange := make(map[string]change, len(topics)) + for topic, topicPartitions := range topics { + parts := topicPartitions.load() + numPartitions := len(parts.partitions) + // If we are already using this topic, add that it changed if + // there are more partitions than we were using prior. + if used, exists := g.using[topic]; exists { + if added := numPartitions - used; added > 0 { + toChange[topic] = change{delta: added} + } + continue + } + + // We are iterating over g.tps, which is initialized in the + // group.init from the config's topics, but can also be added + // to in AddConsumeTopics. By default, we use the topic. If + // this is regex based, the config's topics are regular + // expressions that we need to evaluate against (and we do not + // support adding new regex). + useTopic := true + if g.cfg.regex { + useTopic = g.reSeen[topic] + } + + // We only track using the topic if there are partitions for + // it; if there are none, then the topic was set by _us_ as "we + // want to load the metadata", but the topic was not returned + // in the metadata (or it was returned with an error). + if useTopic && numPartitions > 0 { + if g.cfg.regex && parts.isInternal { + continue + } + toChange[topic] = change{isNew: true, delta: numPartitions} + numNewTopics++ + } + } + + externalRejoin := g.leader.Load() && g.getAndResetExternalRejoin() + + if len(toChange) == 0 && !externalRejoin { + return + } + + g.mu.Lock() + defer g.mu.Unlock() + + if g.dying { + return + } + + for topic, change := range toChange { + g.using[topic] += change.delta + } + + if !g.managing { + g.managing = true + go g.manage() + return + } + + if numNewTopics > 0 { + g.rejoin("rejoining because there are more topics to consume, our interests have changed") + } else if g.leader.Load() { + if len(toChange) > 0 { + g.rejoin("rejoining because we are the leader and noticed some topics have new partitions") + } else if externalRejoin { + g.rejoin("leader detected that partitions on topics another member is consuming have changed, rejoining to trigger rebalance") + } + } +} + +// uncommit tracks the latest offset polled (+1) and the latest commit. +// The reason head is just past the latest offset is because we want +// to commit TO an offset, not BEFORE an offset. +type uncommit struct { + dirty EpochOffset // if autocommitting, what will move to head on next Poll + head EpochOffset // ready to commit + committed EpochOffset // what is committed +} + +// EpochOffset combines a record offset with the leader epoch the broker +// was at when the record was written. +type EpochOffset struct { + // Epoch is the leader epoch of the record being committed. Truncation + // detection relies on the epoch of the CURRENT record. For truncation + // detection, the client asks "what is the the end of this epoch?", + // which returns one after the end offset (see the next field, and + // check the docs on kmsg.OffsetForLeaderEpochRequest). + Epoch int32 + + // Offset is the offset of a record. If committing, this should be one + // AFTER a record's offset. Clients start consuming at the offset that + // is committed. + Offset int64 +} + +// Less returns whether the this EpochOffset is less than another. This is less +// than the other if this one's epoch is less, or the epoch's are equal and +// this one's offset is less. +func (e EpochOffset) Less(o EpochOffset) bool { + return e.Epoch < o.Epoch || e.Epoch == o.Epoch && e.Offset < o.Offset +} + +type uncommitted map[string]map[int32]uncommit + +// updateUncommitted sets the latest uncommitted offset. +func (g *groupConsumer) updateUncommitted(fetches Fetches) { + var b bytes.Buffer + debug := g.cfg.logger.Level() >= LogLevelDebug + + // We set the head offset if autocommitting is disabled (because we + // only use head / committed in that case), or if we are greedily + // autocommitting (so that the latest head is available to autocommit). + setHead := g.cfg.autocommitDisable || g.cfg.autocommitGreedy + + g.mu.Lock() + defer g.mu.Unlock() + + for _, fetch := range fetches { + for _, topic := range fetch.Topics { + if debug { + fmt.Fprintf(&b, "%s[", topic.Topic) + } + var topicOffsets map[int32]uncommit + for _, partition := range topic.Partitions { + if len(partition.Records) == 0 { + continue + } + final := partition.Records[len(partition.Records)-1] + + if topicOffsets == nil { + if g.uncommitted == nil { + g.uncommitted = make(uncommitted, 10) + } + topicOffsets = g.uncommitted[topic.Topic] + if topicOffsets == nil { + topicOffsets = make(map[int32]uncommit, 20) + g.uncommitted[topic.Topic] = topicOffsets + } + } + + // Our new head points just past the final consumed offset, + // that is, if we rejoin, this is the offset to begin at. + set := EpochOffset{ + final.LeaderEpoch, // -1 if old message / unknown + final.Offset + 1, + } + prior := topicOffsets[partition.Partition] + + if debug { + if setHead { + fmt.Fprintf(&b, "%d{%d=>%d r%d}, ", partition.Partition, prior.head.Offset, set.Offset, len(partition.Records)) + } else { + fmt.Fprintf(&b, "%d{%d=>%d=>%d r%d}, ", partition.Partition, prior.head.Offset, prior.dirty.Offset, set.Offset, len(partition.Records)) + } + } + + prior.dirty = set + if setHead { + prior.head = set + } + topicOffsets[partition.Partition] = prior + } + + if debug { + if bytes.HasSuffix(b.Bytes(), []byte(", ")) { + b.Truncate(b.Len() - 2) + } + b.WriteString("], ") + } + } + } + + if debug { + update := b.String() + update = strings.TrimSuffix(update, ", ") // trim trailing comma and space after final topic + g.cfg.logger.Log(LogLevelDebug, "updated uncommitted", "group", g.cfg.group, "to", update) + } +} + +// Called at the start of PollXyz only if autocommitting is enabled and we are +// not committing greedily, this ensures that when we enter poll, everything +// previously consumed is a candidate for autocommitting. +func (g *groupConsumer) undirtyUncommitted() { + if g == nil { + return + } + // Disabling autocommit means we do not use the dirty offset: we always + // update head, and then manual commits use that. + if g.cfg.autocommitDisable { + return + } + // Greedy autocommitting does not use dirty offsets, because we always + // just set head to the latest. + if g.cfg.autocommitGreedy { + return + } + // If we are autocommitting marked records only, then we do not + // automatically un-dirty our offsets. + if g.cfg.autocommitMarks { + return + } + + g.mu.Lock() + defer g.mu.Unlock() + + for _, partitions := range g.uncommitted { + for partition, uncommit := range partitions { + if uncommit.dirty != uncommit.head { + uncommit.head = uncommit.dirty + partitions[partition] = uncommit + } + } + } +} + +// updateCommitted updates the group's uncommitted map. This function triply +// verifies that the resp matches the req as it should and that the req does +// not somehow contain more than what is in our uncommitted map. +func (g *groupConsumer) updateCommitted( + req *kmsg.OffsetCommitRequest, + resp *kmsg.OffsetCommitResponse, +) { + g.mu.Lock() + defer g.mu.Unlock() + + if req.Generation != g.memberGen.generation() { + return + } + if g.uncommitted == nil { + g.cfg.logger.Log(LogLevelWarn, "received an OffsetCommitResponse after our group session has ended, unable to handle this (were we kicked from the group?)") + return + } + if len(req.Topics) != len(resp.Topics) { // bad kafka + g.cfg.logger.Log(LogLevelError, fmt.Sprintf("broker replied to our OffsetCommitRequest incorrectly! Num topics in request: %d, in reply: %d, we cannot handle this!", len(req.Topics), len(resp.Topics)), "group", g.cfg.group) + return + } + + sort.Slice(req.Topics, func(i, j int) bool { + return req.Topics[i].Topic < req.Topics[j].Topic + }) + sort.Slice(resp.Topics, func(i, j int) bool { + return resp.Topics[i].Topic < resp.Topics[j].Topic + }) + + var b bytes.Buffer + debug := g.cfg.logger.Level() >= LogLevelDebug + + for i := range resp.Topics { + reqTopic := &req.Topics[i] + respTopic := &resp.Topics[i] + topic := g.uncommitted[respTopic.Topic] + if topic == nil || // just in case + reqTopic.Topic != respTopic.Topic || // bad kafka + len(reqTopic.Partitions) != len(respTopic.Partitions) { // same + g.cfg.logger.Log(LogLevelError, fmt.Sprintf("broker replied to our OffsetCommitRequest incorrectly! Topic at request index %d: %s, reply at index: %s; num partitions on request topic: %d, in reply: %d, we cannot handle this!", i, reqTopic.Topic, respTopic.Topic, len(reqTopic.Partitions), len(respTopic.Partitions)), "group", g.cfg.group) + continue + } + + sort.Slice(reqTopic.Partitions, func(i, j int) bool { + return reqTopic.Partitions[i].Partition < reqTopic.Partitions[j].Partition + }) + sort.Slice(respTopic.Partitions, func(i, j int) bool { + return respTopic.Partitions[i].Partition < respTopic.Partitions[j].Partition + }) + + if debug { + fmt.Fprintf(&b, "%s[", respTopic.Topic) + } + for i := range respTopic.Partitions { + reqPart := &reqTopic.Partitions[i] + respPart := &respTopic.Partitions[i] + uncommit, exists := topic[respPart.Partition] + if !exists { // just in case + continue + } + if reqPart.Partition != respPart.Partition { // bad kafka + g.cfg.logger.Log(LogLevelError, fmt.Sprintf("broker replied to our OffsetCommitRequest incorrectly! Topic %s partition %d != resp partition %d", reqTopic.Topic, reqPart.Partition, respPart.Partition), "group", g.cfg.group) + continue + } + if respPart.ErrorCode != 0 { + g.cfg.logger.Log(LogLevelWarn, "unable to commit offset for topic partition", + "group", g.cfg.group, + "topic", reqTopic.Topic, + "partition", reqPart.Partition, + "commit_from", uncommit.committed.Offset, + "commit_to", reqPart.Offset, + "commit_epoch", reqPart.LeaderEpoch, + "error_code", respPart.ErrorCode, + ) + continue + } + + if debug { + fmt.Fprintf(&b, "%d{%d=>%d}, ", reqPart.Partition, uncommit.committed.Offset, reqPart.Offset) + } + + set := EpochOffset{ + reqPart.LeaderEpoch, + reqPart.Offset, + } + uncommit.committed = set + + // head is set in four places: + // (1) if manually committing or greedily autocommitting, + // then head is bumped on poll + // (2) if autocommitting normally, then head is bumped + // to the prior poll on poll + // (3) if using marks, head is bumped on mark + // (4) here, and we can be here on autocommit or on + // manual commit (usually manual in an onRevoke) + // + // head is usually at or past the commit: usually, head + // is used to build the commit itself. However, in case 4 + // when the user manually commits in onRevoke, the user + // is likely committing with UncommittedOffsets, i.e., + // the dirty offsets that are past the current head. + // We want to ensure we forward the head so that using + // it later does not rewind the manual commit. + // + // This does not affect the first case, because dirty == head, + // and manually committing dirty changes nothing. + // + // This does not affect the second case, because effectively, + // this is just bumping head early (dirty == head, no change). + // + // This *could* affect the third case, because an + // autocommit could begin, followed by a mark rewind, + // followed by autocommit completion. We document that + // using marks to rewind is not recommended. + // + // The user could also muck the offsets with SetOffsets. + // We document that concurrent committing is not encouraged, + // we do not attempt to guard past that. + // + // w.r.t. leader epoch's, we document that modifying + // leader epoch's is not recommended. + if uncommit.head.Less(set) { + uncommit.head = set + } + + topic[respPart.Partition] = uncommit + } + + if debug { + if bytes.HasSuffix(b.Bytes(), []byte(", ")) { + b.Truncate(b.Len() - 2) + } + b.WriteString("], ") + } + } + + if debug { + update := b.String() + update = strings.TrimSuffix(update, ", ") // trim trailing comma and space after final topic + g.cfg.logger.Log(LogLevelDebug, "updated committed", "group", g.cfg.group, "to", update) + } +} + +func (g *groupConsumer) defaultCommitCallback(_ *Client, _ *kmsg.OffsetCommitRequest, resp *kmsg.OffsetCommitResponse, err error) { + if err != nil { + if !errors.Is(err, context.Canceled) { + g.cfg.logger.Log(LogLevelError, "default commit failed", "group", g.cfg.group, "err", err) + } else { + g.cfg.logger.Log(LogLevelDebug, "default commit canceled", "group", g.cfg.group) + } + return + } + for _, topic := range resp.Topics { + for _, partition := range topic.Partitions { + if err := kerr.ErrorForCode(partition.ErrorCode); err != nil { + g.cfg.logger.Log(LogLevelError, "in default commit: unable to commit offsets for topic partition", + "group", g.cfg.group, + "topic", topic.Topic, + "partition", partition.Partition, + "error", err) + } + } + } +} + +func (g *groupConsumer) loopCommit() { + ticker := time.NewTicker(g.cfg.autocommitInterval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + case <-g.ctx.Done(): + return + } + + // We use the group context for the default autocommit; revokes + // use the client context so that we can be sure we commit even + // after the group context is canceled (which is the first + // thing that happens so as to quit the manage loop before + // leaving a group). + // + // We always commit only the head. If we are autocommitting + // dirty, then updateUncommitted updates the head to dirty + // offsets. + g.noCommitDuringJoinAndSync.RLock() + g.mu.Lock() + if !g.blockAuto { + uncommitted := g.getUncommittedLocked(true, false) + if len(uncommitted) == 0 { + g.cfg.logger.Log(LogLevelDebug, "skipping autocommit due to no offsets to commit", "group", g.cfg.group) + g.noCommitDuringJoinAndSync.RUnlock() + } else { + g.cfg.logger.Log(LogLevelDebug, "autocommitting", "group", g.cfg.group) + g.commit(g.ctx, uncommitted, func(cl *Client, req *kmsg.OffsetCommitRequest, resp *kmsg.OffsetCommitResponse, err error) { + g.noCommitDuringJoinAndSync.RUnlock() + g.cfg.commitCallback(cl, req, resp, err) + }) + } + } else { + g.noCommitDuringJoinAndSync.RUnlock() + } + g.mu.Unlock() + } +} + +// For SetOffsets, the gist of what follows: +// +// We need to set uncommitted.committed; that is the guarantee of this +// function. However, if, for everything we are setting, the head equals the +// commit, then we do not need to actually invalidate our current assignments. +// This is a great optimization for transactions that are resetting their state +// on abort. +func (g *groupConsumer) getSetAssigns(setOffsets map[string]map[int32]EpochOffset) (assigns map[string]map[int32]Offset) { + g.mu.Lock() + defer g.mu.Unlock() + + groupTopics := g.tps.load() + + if g.uncommitted == nil { + g.uncommitted = make(uncommitted) + } + for topic, partitions := range setOffsets { + if !groupTopics.hasTopic(topic) { + continue // trying to set a topic that was not assigned... + } + topicUncommitted := g.uncommitted[topic] + if topicUncommitted == nil { + topicUncommitted = make(map[int32]uncommit) + g.uncommitted[topic] = topicUncommitted + } + var topicAssigns map[int32]Offset + for partition, epochOffset := range partitions { + current, exists := topicUncommitted[partition] + topicUncommitted[partition] = uncommit{ + dirty: epochOffset, + head: epochOffset, + committed: epochOffset, + } + if exists && current.dirty == epochOffset { + continue + } else if topicAssigns == nil { + topicAssigns = make(map[int32]Offset, len(partitions)) + } + topicAssigns[partition] = Offset{ + at: epochOffset.Offset, + epoch: epochOffset.Epoch, + } + } + if len(topicAssigns) > 0 { + if assigns == nil { + assigns = make(map[string]map[int32]Offset, 10) + } + assigns[topic] = topicAssigns + } + } + + return assigns +} + +// UncommittedOffsets returns the latest uncommitted offsets. Uncommitted +// offsets are always updated on calls to PollFetches. +// +// If there are no uncommitted offsets, this returns nil. +func (cl *Client) UncommittedOffsets() map[string]map[int32]EpochOffset { + if g := cl.consumer.g; g != nil { + return g.getUncommitted(true) + } + return nil +} + +// MarkedOffsets returns the latest marked offsets. When autocommitting, a +// marked offset is an offset that can be committed, in comparison to a dirty +// offset that cannot yet be committed. MarkedOffsets returns nil if you are +// not using AutoCommitMarks. +func (cl *Client) MarkedOffsets() map[string]map[int32]EpochOffset { + g := cl.consumer.g + if g == nil || !cl.cfg.autocommitMarks { + return nil + } + return g.getUncommitted(false) +} + +// CommittedOffsets returns the latest committed offsets. Committed offsets are +// updated from commits or from joining a group and fetching offsets. +// +// If there are no committed offsets, this returns nil. +func (cl *Client) CommittedOffsets() map[string]map[int32]EpochOffset { + g := cl.consumer.g + if g == nil { + return nil + } + g.mu.Lock() + defer g.mu.Unlock() + + return g.getUncommittedLocked(false, false) +} + +func (g *groupConsumer) getUncommitted(dirty bool) map[string]map[int32]EpochOffset { + g.mu.Lock() + defer g.mu.Unlock() + return g.getUncommittedLocked(true, dirty) +} + +func (g *groupConsumer) getUncommittedLocked(head, dirty bool) map[string]map[int32]EpochOffset { + if g.uncommitted == nil { + return nil + } + + var uncommitted map[string]map[int32]EpochOffset + for topic, partitions := range g.uncommitted { + var topicUncommitted map[int32]EpochOffset + for partition, uncommit := range partitions { + if head && (dirty && uncommit.dirty == uncommit.committed || !dirty && uncommit.head == uncommit.committed) { + continue + } + if topicUncommitted == nil { + if uncommitted == nil { + uncommitted = make(map[string]map[int32]EpochOffset, len(g.uncommitted)) + } + topicUncommitted = uncommitted[topic] + if topicUncommitted == nil { + topicUncommitted = make(map[int32]EpochOffset, len(partitions)) + uncommitted[topic] = topicUncommitted + } + } + if head { + if dirty { + topicUncommitted[partition] = uncommit.dirty + } else { + topicUncommitted[partition] = uncommit.head + } + } else { + topicUncommitted[partition] = uncommit.committed + } + } + } + return uncommitted +} + +type commitContextFnT struct{} + +var commitContextFn commitContextFnT + +// PreCommitFnContext attaches fn to the context through WithValue. Using the +// context while committing allows fn to be called just before the commit is +// issued. This can be used to modify the actual commit, such as by associating +// metadata with partitions. If fn returns an error, the commit is not +// attempted. +func PreCommitFnContext(ctx context.Context, fn func(*kmsg.OffsetCommitRequest) error) context.Context { + return context.WithValue(ctx, commitContextFn, fn) +} + +type txnCommitContextFnT struct{} + +var txnCommitContextFn txnCommitContextFnT + +// PreTxnCommitFnContext attaches fn to the context through WithValue. Using +// the context while committing a transaction allows fn to be called just +// before the commit is issued. This can be used to modify the actual commit, +// such as by associating metadata with partitions (for transactions, the +// default internal metadata is the client's current member ID). If fn returns +// an error, the commit is not attempted. This context can be used in either +// GroupTransactSession.End or in Client.EndTransaction. +func PreTxnCommitFnContext(ctx context.Context, fn func(*kmsg.TxnOffsetCommitRequest) error) context.Context { + return context.WithValue(ctx, txnCommitContextFn, fn) +} + +// CommitRecords issues a synchronous offset commit for the offsets contained +// within rs. Retryable errors are retried up to the configured retry limit, +// and any unretryable error is returned. +// +// This function is useful as a simple way to commit offsets if you have +// disabled autocommitting. As an alternative if you always want to commit +// everything, see CommitUncommittedOffsets. +// +// Simple usage of this function may lead to duplicate records if a consumer +// group rebalance occurs before or while this function is being executed. You +// can avoid this scenario by calling CommitRecords in a custom +// OnPartitionsRevoked, but for most workloads, a small bit of potential +// duplicate processing is fine. See the documentation on DisableAutoCommit +// for more details. You can also avoid this problem by using +// BlockRebalanceOnPoll, but that option comes with its own tradeoffs (refer to +// its documentation). +// +// It is recommended to always commit records in order (per partition). If you +// call this function twice with record for partition 0 at offset 999 +// initially, and then with record for partition 0 at offset 4, you will rewind +// your commit. +// +// A use case for this function may be to partially process a batch of records, +// commit, and then continue to process the rest of the records. It is not +// recommended to call this for every record processed in a high throughput +// scenario, because you do not want to unnecessarily increase load on Kafka. +// +// If you do not want to wait for this function to complete before continuing +// processing records, you can call this function in a goroutine. +func (cl *Client) CommitRecords(ctx context.Context, rs ...*Record) error { + // First build the offset commit map. We favor the latest epoch, then + // offset, if any records map to the same topic / partition. + offsets := make(map[string]map[int32]EpochOffset) + for _, r := range rs { + toffsets := offsets[r.Topic] + if toffsets == nil { + toffsets = make(map[int32]EpochOffset) + offsets[r.Topic] = toffsets + } + + if at, exists := toffsets[r.Partition]; exists { + if at.Epoch > r.LeaderEpoch || at.Epoch == r.LeaderEpoch && at.Offset > r.Offset { + continue + } + } + toffsets[r.Partition] = EpochOffset{ + r.LeaderEpoch, + r.Offset + 1, // need to advice to next offset to move forward + } + } + + var rerr error // return error + + // Our client retries an OffsetCommitRequest as necessary if the first + // response partition has a retryable group error (group coordinator + // loading, etc), so any partition error is fatal. + cl.CommitOffsetsSync(ctx, offsets, func(_ *Client, _ *kmsg.OffsetCommitRequest, resp *kmsg.OffsetCommitResponse, err error) { + if err != nil { + rerr = err + return + } + + for _, topic := range resp.Topics { + for _, partition := range topic.Partitions { + if err := kerr.ErrorForCode(partition.ErrorCode); err != nil { + rerr = err + return + } + } + } + }) + + return rerr +} + +// MarkCommitRecords marks records to be available for autocommitting. This +// function is only useful if you use the AutoCommitMarks config option, see +// the documentation on that option for more details. This function does not +// allow rewinds. +func (cl *Client) MarkCommitRecords(rs ...*Record) { + g := cl.consumer.g + if g == nil || !cl.cfg.autocommitMarks { + return + } + + sort.Slice(rs, func(i, j int) bool { + return rs[i].Topic < rs[j].Topic || + rs[i].Topic == rs[j].Topic && rs[i].Partition < rs[j].Partition + }) + + // protect g.uncommitted map + g.mu.Lock() + defer g.mu.Unlock() + + if g.uncommitted == nil { + g.uncommitted = make(uncommitted) + } + var curTopic string + var curPartitions map[int32]uncommit + for _, r := range rs { + if curPartitions == nil || r.Topic != curTopic { + curPartitions = g.uncommitted[r.Topic] + if curPartitions == nil { + curPartitions = make(map[int32]uncommit) + g.uncommitted[r.Topic] = curPartitions + } + curTopic = r.Topic + } + + current := curPartitions[r.Partition] + if newHead := (EpochOffset{ + r.LeaderEpoch, + r.Offset + 1, + }); current.head.Less(newHead) { + curPartitions[r.Partition] = uncommit{ + dirty: current.dirty, + committed: current.committed, + head: newHead, + } + } + } +} + +// MarkCommitOffsets marks offsets to be available for autocommitting. This +// function is only useful if you use the AutoCommitMarks config option, see +// the documentation on that option for more details. This function does not +// allow rewinds. +func (cl *Client) MarkCommitOffsets(unmarked map[string]map[int32]EpochOffset) { + g := cl.consumer.g + if g == nil || !cl.cfg.autocommitMarks { + return + } + + // protect g.uncommitted map + g.mu.Lock() + defer g.mu.Unlock() + + if g.uncommitted == nil { + g.uncommitted = make(uncommitted) + } + + for topic, partitions := range unmarked { + curPartitions := g.uncommitted[topic] + if curPartitions == nil { + curPartitions = make(map[int32]uncommit) + g.uncommitted[topic] = curPartitions + } + + for partition, newHead := range partitions { + current := curPartitions[partition] + if current.head.Less(newHead) { + curPartitions[partition] = uncommit{ + dirty: current.dirty, + committed: current.committed, + head: newHead, + } + } + } + } +} + +// CommitUncommittedOffsets issues a synchronous offset commit for any +// partition that has been consumed from that has uncommitted offsets. +// Retryable errors are retried up to the configured retry limit, and any +// unretryable error is returned. +// +// The recommended pattern for using this function is to have a poll / process +// / commit loop. First PollFetches, then process every record, then call +// CommitUncommittedOffsets. +// +// As an alternative if you want to commit specific records, see CommitRecords. +func (cl *Client) CommitUncommittedOffsets(ctx context.Context) error { + // This function is just the tail end of CommitRecords just above. + return cl.commitOffsets(ctx, cl.UncommittedOffsets()) +} + +// CommitMarkedOffsets issues a synchronous offset commit for any partition +// that has been consumed from that has marked offsets. Retryable errors are +// retried up to the configured retry limit, and any unretryable error is +// returned. +// +// This function is only useful if you have marked offsets with +// MarkCommitRecords when using AutoCommitMarks, otherwise this is a no-op. +// +// The recommended pattern for using this function is to have a poll / process +// / commit loop. First PollFetches, then process every record, +// call MarkCommitRecords for the records you wish the commit and then call +// CommitMarkedOffsets. +// +// As an alternative if you want to commit specific records, see CommitRecords. +func (cl *Client) CommitMarkedOffsets(ctx context.Context) error { + // This function is just the tail end of CommitRecords just above. + marked := cl.MarkedOffsets() + if len(marked) == 0 { + return nil + } + return cl.commitOffsets(ctx, marked) +} + +func (cl *Client) commitOffsets(ctx context.Context, offsets map[string]map[int32]EpochOffset) error { + var rerr error + cl.CommitOffsetsSync(ctx, offsets, func(_ *Client, _ *kmsg.OffsetCommitRequest, resp *kmsg.OffsetCommitResponse, err error) { + if err != nil { + rerr = err + return + } + + for _, topic := range resp.Topics { + for _, partition := range topic.Partitions { + if err := kerr.ErrorForCode(partition.ErrorCode); err != nil { + rerr = err + return + } + } + } + }) + return rerr +} + +// CommitOffsetsSync cancels any active CommitOffsets, begins a commit that +// cannot be canceled, and waits for that commit to complete. This function +// will not return until the commit is done and the onDone callback is +// complete. +// +// The purpose of this function is for use in OnPartitionsRevoked or committing +// before leaving a group, because you do not want to have a commit issued in +// OnPartitionsRevoked canceled. +// +// This is an advanced function, and for simpler, more easily understandable +// committing, see CommitRecords and CommitUncommittedOffsets. +// +// For more information about committing and committing asynchronously, see +// CommitOffsets. +func (cl *Client) CommitOffsetsSync( + ctx context.Context, + uncommitted map[string]map[int32]EpochOffset, + onDone func(*Client, *kmsg.OffsetCommitRequest, *kmsg.OffsetCommitResponse, error), +) { + if onDone == nil { + onDone = func(*Client, *kmsg.OffsetCommitRequest, *kmsg.OffsetCommitResponse, error) {} + } + + g := cl.consumer.g + if g == nil { + onDone(cl, kmsg.NewPtrOffsetCommitRequest(), kmsg.NewPtrOffsetCommitResponse(), errNotGroup) + return + } + if len(uncommitted) == 0 { + onDone(cl, kmsg.NewPtrOffsetCommitRequest(), kmsg.NewPtrOffsetCommitResponse(), nil) + return + } + g.commitOffsetsSync(ctx, uncommitted, onDone) +} + +// waitJoinSyncMu is a rather insane way to try to grab a lock, but also return +// early if we have to wait and the context is canceled. +func (g *groupConsumer) waitJoinSyncMu(ctx context.Context) error { + if g.noCommitDuringJoinAndSync.TryRLock() { + g.cfg.logger.Log(LogLevelDebug, "grabbed join/sync mu on first try") + return nil + } + + var ( + blockJoinSyncCh = make(chan struct{}) + mu sync.Mutex + returned bool + maybeRUnlock = func() { + mu.Lock() + defer mu.Unlock() + if returned { + g.noCommitDuringJoinAndSync.RUnlock() + } + returned = true + } + ) + + go func() { + g.noCommitDuringJoinAndSync.RLock() + close(blockJoinSyncCh) + maybeRUnlock() + }() + + select { + case <-blockJoinSyncCh: + g.cfg.logger.Log(LogLevelDebug, "grabbed join/sync mu after waiting") + return nil + case <-ctx.Done(): + g.cfg.logger.Log(LogLevelDebug, "not grabbing mu because context canceled") + maybeRUnlock() + return ctx.Err() + } +} + +func (g *groupConsumer) commitOffsetsSync( + ctx context.Context, + uncommitted map[string]map[int32]EpochOffset, + onDone func(*Client, *kmsg.OffsetCommitRequest, *kmsg.OffsetCommitResponse, error), +) { + g.cfg.logger.Log(LogLevelDebug, "in CommitOffsetsSync", "group", g.cfg.group, "with", uncommitted) + defer g.cfg.logger.Log(LogLevelDebug, "left CommitOffsetsSync", "group", g.cfg.group) + + done := make(chan struct{}) + defer func() { <-done }() + + if onDone == nil { + onDone = func(*Client, *kmsg.OffsetCommitRequest, *kmsg.OffsetCommitResponse, error) {} + } + + if err := g.waitJoinSyncMu(ctx); err != nil { + onDone(g.cl, kmsg.NewPtrOffsetCommitRequest(), kmsg.NewPtrOffsetCommitResponse(), err) + close(done) + return + } + + g.syncCommitMu.Lock() // block all other concurrent commits until our OnDone is done. + unblockCommits := func(cl *Client, req *kmsg.OffsetCommitRequest, resp *kmsg.OffsetCommitResponse, err error) { + g.noCommitDuringJoinAndSync.RUnlock() + defer close(done) + defer g.syncCommitMu.Unlock() + onDone(cl, req, resp, err) + } + + g.mu.Lock() + defer g.mu.Unlock() + + g.blockAuto = true + unblockAuto := func(cl *Client, req *kmsg.OffsetCommitRequest, resp *kmsg.OffsetCommitResponse, err error) { + unblockCommits(cl, req, resp, err) + g.mu.Lock() + defer g.mu.Unlock() + g.blockAuto = false + } + + g.commit(ctx, uncommitted, unblockAuto) +} + +// CommitOffsets commits the given offsets for a group, calling onDone with the +// commit request and either the response or an error if the response was not +// issued. If uncommitted is empty or the client is not consuming as a group, +// onDone is called with (nil, nil, nil) and this function returns immediately. +// It is OK if onDone is nil, but you will not know if your commit succeeded. +// +// This is an advanced function and is difficult to use correctly. For simpler, +// more easily understandable committing, see CommitRecords and +// CommitUncommittedOffsets. +// +// This function itself does not wait for the commit to finish. By default, +// this function is an asynchronous commit. You can use onDone to make it sync. +// If autocommitting is enabled, this function blocks autocommitting until this +// function is complete and the onDone has returned. +// +// It is invalid to use this function to commit offsets for a transaction. +// +// Note that this function ensures absolute ordering of commit requests by +// canceling prior requests and ensuring they are done before executing a new +// one. This means, for absolute control, you can use this function to +// periodically commit async and then issue a final sync commit before quitting +// (this is the behavior of autocommiting and using the default revoke). This +// differs from the Java async commit, which does not retry requests to avoid +// trampling on future commits. +// +// It is highly recommended to check the response's partition's error codes if +// the response is non-nil. While unlikely, individual partitions can error. +// This is most likely to happen if a commit occurs too late in a rebalance +// event. +// +// Do not use this async CommitOffsets in OnPartitionsRevoked, instead use +// CommitOffsetsSync. If you commit async, the rebalance will proceed before +// this function executes, and you will commit offsets for partitions that have +// moved to a different consumer. +func (cl *Client) CommitOffsets( + ctx context.Context, + uncommitted map[string]map[int32]EpochOffset, + onDone func(*Client, *kmsg.OffsetCommitRequest, *kmsg.OffsetCommitResponse, error), +) { + cl.cfg.logger.Log(LogLevelDebug, "in CommitOffsets", "with", uncommitted) + defer cl.cfg.logger.Log(LogLevelDebug, "left CommitOffsets") + if onDone == nil { + onDone = func(*Client, *kmsg.OffsetCommitRequest, *kmsg.OffsetCommitResponse, error) {} + } + + g := cl.consumer.g + if g == nil { + onDone(cl, kmsg.NewPtrOffsetCommitRequest(), kmsg.NewPtrOffsetCommitResponse(), errNotGroup) + return + } + if len(uncommitted) == 0 { + onDone(cl, kmsg.NewPtrOffsetCommitRequest(), kmsg.NewPtrOffsetCommitResponse(), nil) + return + } + + if err := g.waitJoinSyncMu(ctx); err != nil { + onDone(g.cl, kmsg.NewPtrOffsetCommitRequest(), kmsg.NewPtrOffsetCommitResponse(), err) + return + } + + g.syncCommitMu.RLock() // block sync commit, but allow other concurrent Commit to cancel us + unblockJoinSync := func(cl *Client, req *kmsg.OffsetCommitRequest, resp *kmsg.OffsetCommitResponse, err error) { + g.noCommitDuringJoinAndSync.RUnlock() + defer g.syncCommitMu.RUnlock() + onDone(cl, req, resp, err) + } + + g.mu.Lock() + defer g.mu.Unlock() + + g.blockAuto = true + unblockAuto := func(cl *Client, req *kmsg.OffsetCommitRequest, resp *kmsg.OffsetCommitResponse, err error) { + unblockJoinSync(cl, req, resp, err) + g.mu.Lock() + defer g.mu.Unlock() + g.blockAuto = false + } + + g.commit(ctx, uncommitted, unblockAuto) +} + +// defaultRevoke commits the last fetched offsets and waits for the commit to +// finish. This is the default onRevoked function which, when combined with the +// default autocommit, ensures we never miss committing everything. +// +// Note that the heartbeat loop invalidates all buffered, unpolled fetches +// before revoking, meaning this truly will commit all polled fetches. +func (g *groupConsumer) defaultRevoke(context.Context, *Client, map[string][]int32) { + if !g.cfg.autocommitDisable { + // We use the client's context rather than the group context, + // because this could come from the group being left. The group + // context will already be canceled. + g.commitOffsetsSync(g.cl.ctx, g.getUncommitted(false), g.cfg.commitCallback) + } +} + +// The actual logic to commit. This is called under two locks: +// - g.noCommitDuringJoinAndSync.RLock() +// - g.mu.Lock() +// +// By blocking the JoinGroup from being issued, or blocking the commit on join +// & sync finishing, we avoid RebalanceInProgress and IllegalGeneration. The +// former error happens if a commit arrives to the broker between the two, the +// latter error happens when a commit arrives to the broker with the old +// generation (it was in flight before sync finished). +// +// Practically, what this means is that a user's commits will be blocked if +// they try to commit between join and sync. +// +// For eager consuming, the user should not have any partitions to commit +// anyway. For cooperative consuming, a rebalance can happen after at any +// moment. We block only revokation aspects of rebalances with +// BlockRebalanceOnPoll; we want to allow the cooperative part of rebalancing +// to occur. +func (g *groupConsumer) commit( + ctx context.Context, + uncommitted map[string]map[int32]EpochOffset, + onDone func(*Client, *kmsg.OffsetCommitRequest, *kmsg.OffsetCommitResponse, error), +) { + // The user could theoretically give us topics that have no partitions + // to commit. We strip those: Kafka does not reply to them, and we + // expect all partitions in our request to be replied to in + // updateCommitted. If any topic is empty, we deeply clone and then + // strip everything empty. See #186. + var clone bool + for _, ps := range uncommitted { + if len(ps) == 0 { + clone = true + break + } + } + if clone { + dup := make(map[string]map[int32]EpochOffset, len(uncommitted)) + for t, ps := range uncommitted { + if len(ps) == 0 { + continue + } + dupPs := make(map[int32]EpochOffset, len(ps)) + dup[t] = dupPs + for p, eo := range ps { + dupPs[p] = eo + } + } + uncommitted = dup + } + + if len(uncommitted) == 0 { // only empty if called thru autocommit / default revoke + // We have to do this concurrently because the expectation is + // that commit itself does not block. + go onDone(g.cl, kmsg.NewPtrOffsetCommitRequest(), kmsg.NewPtrOffsetCommitResponse(), nil) + return + } + + priorCancel := g.commitCancel + priorDone := g.commitDone + + commitCtx, commitCancel := context.WithCancel(ctx) // enable ours to be canceled and waited for + commitDone := make(chan struct{}) + + g.commitCancel = commitCancel + g.commitDone = commitDone + + req := kmsg.NewPtrOffsetCommitRequest() + req.Group = g.cfg.group + memberID, generation := g.memberGen.load() + req.Generation = generation + req.MemberID = memberID + req.InstanceID = g.cfg.instanceID + + if ctx.Done() != nil { + go func() { + select { + case <-ctx.Done(): + commitCancel() + case <-commitCtx.Done(): + } + }() + } + + go func() { + defer close(commitDone) // allow future commits to continue when we are done + defer commitCancel() + if priorDone != nil { // wait for any prior request to finish + select { + case <-priorDone: + default: + g.cfg.logger.Log(LogLevelDebug, "canceling prior commit to issue another", "group", g.cfg.group) + priorCancel() + <-priorDone + } + } + g.cfg.logger.Log(LogLevelDebug, "issuing commit", "group", g.cfg.group, "uncommitted", uncommitted) + + for topic, partitions := range uncommitted { + reqTopic := kmsg.NewOffsetCommitRequestTopic() + reqTopic.Topic = topic + for partition, eo := range partitions { + reqPartition := kmsg.NewOffsetCommitRequestTopicPartition() + reqPartition.Partition = partition + reqPartition.Offset = eo.Offset + reqPartition.LeaderEpoch = eo.Epoch // KIP-320 + reqPartition.Metadata = &req.MemberID + reqTopic.Partitions = append(reqTopic.Partitions, reqPartition) + } + req.Topics = append(req.Topics, reqTopic) + } + + if fn, ok := ctx.Value(commitContextFn).(func(*kmsg.OffsetCommitRequest) error); ok { + if err := fn(req); err != nil { + onDone(g.cl, req, nil, err) + return + } + } + + resp, err := req.RequestWith(commitCtx, g.cl) + if err != nil { + onDone(g.cl, req, nil, err) + return + } + g.updateCommitted(req, resp) + onDone(g.cl, req, resp, nil) + }() +} + +type reNews struct { + added map[string][]string + skipped []string +} + +func (r *reNews) add(re, match string) { + if r.added == nil { + r.added = make(map[string][]string) + } + r.added[re] = append(r.added[re], match) +} + +func (r *reNews) skip(topic string) { + r.skipped = append(r.skipped, topic) +} + +func (r *reNews) log(cfg *cfg) { + if len(r.added) == 0 && len(r.skipped) == 0 { + return + } + var addeds []string + for re, matches := range r.added { + sort.Strings(matches) + addeds = append(addeds, fmt.Sprintf("%s[%s]", re, strings.Join(matches, " "))) + } + added := strings.Join(addeds, " ") + sort.Strings(r.skipped) + cfg.logger.Log(LogLevelInfo, "consumer regular expressions evaluated on new topics", "added", added, "evaluated_and_skipped", r.skipped) +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/errors.go b/vendor/github.com/twmb/franz-go/pkg/kgo/errors.go new file mode 100644 index 00000000000..749132545ae --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/errors.go @@ -0,0 +1,336 @@ +package kgo + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "os" +) + +func isRetryableBrokerErr(err error) bool { + // The error could be nil if we are evaluating multiple errors at once, + // and only one is non-nil. The intent of this function is to evaluate + // whether an **error** is retryable, not a non-error. We return that + // nil is not retryable -- the calling code evaluating multiple errors + // at once would not call into this function if all errors were nil. + if err == nil { + return false + } + // https://github.com/golang/go/issues/45729 + // + // Temporary is relatively useless. We will still check for the + // temporary interface, and in all cases, even with timeouts, we want + // to retry. + // + // More generally, we will retry for any error that unwraps into an + // os.SyscallError. Looking at Go's net package, the error we care + // about is net.OpError. Looking into that further, any error that + // reaches into the operating system return a syscall error, which is + // then put in net.OpError's Err field as an os.SyscallError. There are + // a few non-os.SyscallError errors, these are where Go itself detects + // a hard failure. We do not retry those. + // + // We blanket retry os.SyscallError because a lot of the times, what + // appears as a hard failure can actually be retried. For example, a + // failed dial can be retried, maybe the resolver temporarily had a + // problem. + // + // We favor testing os.SyscallError first, because net.OpError _always_ + // implements Temporary, so if we test that first, it'll return false + // in many cases when we want to return true from os.SyscallError. + if se := (*os.SyscallError)(nil); errors.As(err, &se) { + // If a dial fails, potentially we could retry if the resolver + // had a temporary hiccup, but we will err on the side of this + // being a slightly less temporary error. + return !isDialNonTimeoutErr(err) + } + // EOF can be returned if a broker kills a connection unexpectedly, and + // we can retry that. Same for ErrClosed. + if errors.Is(err, net.ErrClosed) || errors.Is(err, io.EOF) { + return true + } + // We could have a retryable producer ID failure, which then bubbled up + // as errProducerIDLoadFail so as to be retried later. + if pe := (*errProducerIDLoadFail)(nil); errors.As(err, &pe) { + return true + } + // We could have chosen a broker, and then a concurrent metadata update + // could have removed it. + if errors.Is(err, errChosenBrokerDead) { + return true + } + // A broker kept giving us short sasl lifetimes, so we killed the + // connection ourselves. We can retry on a new connection. + if errors.Is(err, errSaslReauthLoop) { + return true + } + // We really should not get correlation mismatch, but if we do, we can + // retry. + if errors.Is(err, errCorrelationIDMismatch) { + return true + } + // We sometimes load the controller before issuing requests, and the + // cluster may not yet be ready and will return -1 for the controller. + // We can backoff and retry and hope the cluster has stabilized. + if ce := (*errUnknownController)(nil); errors.As(err, &ce) { + return true + } + // Same thought for a non-existing coordinator. + if ce := (*errUnknownCoordinator)(nil); errors.As(err, &ce) { + return true + } + var tempErr interface{ Temporary() bool } + if errors.As(err, &tempErr) { + return tempErr.Temporary() + } + return false +} + +func isDialNonTimeoutErr(err error) bool { + var ne *net.OpError + return errors.As(err, &ne) && ne.Op == "dial" && !ne.Timeout() +} + +func isAnyDialErr(err error) bool { + var ne *net.OpError + return errors.As(err, &ne) && ne.Op == "dial" +} + +func isContextErr(err error) bool { + return errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) +} + +func isSkippableBrokerErr(err error) bool { + // Some broker errors are not retryable for the given broker itself, + // but we *could* skip the broker and try again on the next broker. For + // example, if the user input an invalid address and a valid address + // for seeds, when we fail dialing the first seed, we cannot retry that + // broker, but we can skip to the next. + // + // We take anything that returns an OpError that *is not* a context + // error deep inside. + if errors.Is(err, errUnknownBroker) { + return true + } + var ne *net.OpError + if errors.As(err, &ne) && !isContextErr(err) { + return true + } + return false +} + +var ( + ////////////// + // INTERNAL // -- when used multiple times or checked in different areas of the client + ////////////// + + // Returned when issuing a request to a broker that the client does not + // know about (maybe missing from metadata responses now). + errUnknownBroker = errors.New("unknown broker") + + // A temporary error returned when a broker chosen for a request is + // stopped due to a concurrent metadata response. + errChosenBrokerDead = errors.New("the internal broker struct chosen to issue this request has died--either the broker id is migrating or no longer exists") + + // If a broker repeatedly gives us tiny sasl lifetimes, we fail a + // request after a few tries to forcefully kill the connection and + // restart a new connection ourselves. + errSaslReauthLoop = errors.New("the broker is repeatedly giving us sasl lifetimes that are too short to write a request") + + // A temporary error returned when Kafka replies with a different + // correlation ID than we were expecting for the request the client + // issued. + // + // If this error happens, the client closes the broker connection. + errCorrelationIDMismatch = errors.New("correlation ID mismatch") + + // Returned when using a kmsg.Request with a key larger than kmsg.MaxKey. + errUnknownRequestKey = errors.New("request key is unknown") + + // Returned if a connection has loaded broker ApiVersions and knows + // that the broker cannot handle the request to-be-issued request. + errBrokerTooOld = errors.New("broker is too old; the broker has already indicated it will not know how to handle the request") + + // Returned when trying to call group functions when the client is not + // assigned a group. + errNotGroup = errors.New("invalid group function call when not assigned a group") + + // Returned when trying to begin a transaction with a client that does + // not have a transactional ID. + errNotTransactional = errors.New("invalid attempt to begin a transaction with a non-transactional client") + + // Returned when trying to produce a record outside of a transaction. + errNotInTransaction = errors.New("cannot produce record transactionally if not in a transaction") + + errNoTopic = errors.New("cannot produce record with no topic and no default topic") + + // Returned for all buffered produce records when a user purges topics. + errPurged = errors.New("topic purged while buffered") + + errMissingMetadataPartition = errors.New("metadata update is missing a partition that we were previously using") + + errNoCommittedOffset = errors.New("partition has no prior committed offset") + + ////////////// + // EXTERNAL // + ////////////// + + // ErrRecordTimeout is passed to produce promises when records are + // unable to be produced within the RecordDeliveryTimeout. + ErrRecordTimeout = errors.New("records have timed out before they were able to be produced") + + // ErrRecordRetries is passed to produce promises when records are + // unable to be produced after RecordRetries attempts. + ErrRecordRetries = errors.New("record failed after being retried too many times") + + // ErrMaxBuffered is returned when the maximum amount of records are + // buffered and either manual flushing is enabled or you are using + // TryProduce. + ErrMaxBuffered = errors.New("the maximum amount of records are buffered, cannot buffer more") + + // ErrAborting is returned for all buffered records while + // AbortBufferedRecords is being called. + ErrAborting = errors.New("client is aborting buffered records") + + // ErrClientClosed is returned in various places when the client's + // Close function has been called. + // + // For producing, records are failed with this error. + // + // For consuming, a fake partition is injected into a poll response + // that has this error. + // + // For any request, the request is failed with this error. + ErrClientClosed = errors.New("client closed") +) + +// ErrFirstReadEOF is returned for responses that immediately error with +// io.EOF. This is the client's guess as to why a read from a broker is +// failing with io.EOF. Two cases are currently handled, +// +// - When the client is using TLS but brokers are not, brokers close +// connections immediately because the incoming request looks wrong. +// - When SASL is required but missing, brokers close connections immediately. +// +// There may be other reasons that an immediate io.EOF is encountered (perhaps +// the connection truly was severed before a response was received), but this +// error can help you quickly check common problems. +type ErrFirstReadEOF struct { + kind uint8 + err error +} + +type errProducerIDLoadFail struct { + err error +} + +func (e *errProducerIDLoadFail) Error() string { + if e.err == nil { + return "unable to initialize a producer ID due to request failures" + } + return fmt.Sprintf("unable to initialize a producer ID due to request failures: %v", e.err) +} + +func (e *errProducerIDLoadFail) Unwrap() error { return e.err } + +const ( + firstReadSASL uint8 = iota + firstReadTLS +) + +func (e *ErrFirstReadEOF) Error() string { + switch e.kind { + case firstReadTLS: + return "broker closed the connection immediately after a dial, which happens if the client is using TLS when the broker is not expecting it: is TLS misconfigured on the client or the broker?" + default: // firstReadSASL + return "broker closed the connection immediately after a request was issued, which happens when SASL is required but not provided: is SASL missing?" + } +} + +// Unwrap returns io.EOF (or, if a custom dialer returned a wrapped io.EOF, +// this returns the custom dialer's wrapped error). +func (e *ErrFirstReadEOF) Unwrap() error { return e.err } + +// ErrDataLoss is returned for Kafka >=2.1 when data loss is detected and the +// client is able to reset to the last valid offset. +type ErrDataLoss struct { + // Topic is the topic data loss was detected on. + Topic string + // Partition is the partition data loss was detected on. + Partition int32 + // ConsumedTo is what the client had consumed to for this partition before + // data loss was detected. + ConsumedTo int64 + // ResetTo is what the client reset the partition to; everything from + // ResetTo to ConsumedTo was lost. + ResetTo int64 +} + +func (e *ErrDataLoss) Error() string { + return fmt.Sprintf("topic %s partition %d lost records;"+ + " the client consumed to offset %d but was reset to offset %d", + e.Topic, e.Partition, e.ConsumedTo, e.ResetTo) +} + +type errUnknownController struct { + id int32 +} + +func (e *errUnknownController) Error() string { + if e.id == -1 { + return "broker replied that the controller broker is not available" + } + return fmt.Sprintf("broker replied that the controller broker is %d,"+ + " but did not reply with that broker in the broker list", e.id) +} + +type errUnknownCoordinator struct { + coordinator int32 + key coordinatorKey +} + +func (e *errUnknownCoordinator) Error() string { + switch e.key.typ { + case coordinatorTypeGroup: + return fmt.Sprintf("broker replied that group %s has broker coordinator %d,"+ + " but did not reply with that broker in the broker list", + e.key.name, e.coordinator) + case coordinatorTypeTxn: + return fmt.Sprintf("broker replied that txn id %s has broker coordinator %d,"+ + " but did not reply with that broker in the broker list", + e.key.name, e.coordinator) + default: + return fmt.Sprintf("broker replied to an unknown coordinator key %s (type %d) that it has a broker coordinator %d,"+ + " but did not reply with that broker in the broker list", e.key.name, e.key.typ, e.coordinator) + } +} + +// ErrGroupSession is injected into a poll if an error occurred such that your +// consumer group member was kicked from the group or was never able to join +// the group. +type ErrGroupSession struct { + Err error +} + +func (e *ErrGroupSession) Error() string { + return fmt.Sprintf("unable to join group session: %v", e.Err) +} + +func (e *ErrGroupSession) Unwrap() error { return e.Err } + +type errDecompress struct { + err error +} + +func (e *errDecompress) Error() string { + return fmt.Sprintf("unable to decompress batch: %v", e.err) +} + +func (e *errDecompress) Unwrap() error { return e.err } + +func isDecompressErr(err error) bool { + var ed *errDecompress + return errors.As(err, &ed) +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/go118.go b/vendor/github.com/twmb/franz-go/pkg/kgo/go118.go new file mode 100644 index 00000000000..483c3e91277 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/go118.go @@ -0,0 +1,57 @@ +//go:build !go1.19 +// +build !go1.19 + +package kgo + +import "sync/atomic" + +type atomicBool uint32 + +func (b *atomicBool) Store(v bool) { + if v { + atomic.StoreUint32((*uint32)(b), 1) + } else { + atomic.StoreUint32((*uint32)(b), 0) + } +} + +func (b *atomicBool) Load() bool { return atomic.LoadUint32((*uint32)(b)) == 1 } + +func (b *atomicBool) Swap(v bool) bool { + var swap uint32 + if v { + swap = 1 + } + return atomic.SwapUint32((*uint32)(b), swap) == 1 +} + +type atomicI32 int32 + +func (v *atomicI32) Add(s int32) int32 { return atomic.AddInt32((*int32)(v), s) } +func (v *atomicI32) Store(s int32) { atomic.StoreInt32((*int32)(v), s) } +func (v *atomicI32) Load() int32 { return atomic.LoadInt32((*int32)(v)) } +func (v *atomicI32) Swap(s int32) int32 { return atomic.SwapInt32((*int32)(v), s) } + +type atomicU32 uint32 + +func (v *atomicU32) Add(s uint32) uint32 { return atomic.AddUint32((*uint32)(v), s) } +func (v *atomicU32) Store(s uint32) { atomic.StoreUint32((*uint32)(v), s) } +func (v *atomicU32) Load() uint32 { return atomic.LoadUint32((*uint32)(v)) } +func (v *atomicU32) Swap(s uint32) uint32 { return atomic.SwapUint32((*uint32)(v), s) } +func (v *atomicU32) CompareAndSwap(old, new uint32) bool { + return atomic.CompareAndSwapUint32((*uint32)(v), old, new) +} + +type atomicI64 int64 + +func (v *atomicI64) Add(s int64) int64 { return atomic.AddInt64((*int64)(v), s) } +func (v *atomicI64) Store(s int64) { atomic.StoreInt64((*int64)(v), s) } +func (v *atomicI64) Load() int64 { return atomic.LoadInt64((*int64)(v)) } +func (v *atomicI64) Swap(s int64) int64 { return atomic.SwapInt64((*int64)(v), s) } + +type atomicU64 uint64 + +func (v *atomicU64) Add(s uint64) uint64 { return atomic.AddUint64((*uint64)(v), s) } +func (v *atomicU64) Store(s uint64) { atomic.StoreUint64((*uint64)(v), s) } +func (v *atomicU64) Load() uint64 { return atomic.LoadUint64((*uint64)(v)) } +func (v *atomicU64) Swap(s uint64) uint64 { return atomic.SwapUint64((*uint64)(v), s) } diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/go119.go b/vendor/github.com/twmb/franz-go/pkg/kgo/go119.go new file mode 100644 index 00000000000..7c8ade5e139 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/go119.go @@ -0,0 +1,14 @@ +//go:build go1.19 +// +build go1.19 + +package kgo + +import "sync/atomic" + +type ( + atomicBool struct{ atomic.Bool } + atomicI32 struct{ atomic.Int32 } + atomicU32 struct{ atomic.Uint32 } + atomicI64 struct{ atomic.Int64 } + atomicU64 struct{ atomic.Uint64 } +) diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/group_balancer.go b/vendor/github.com/twmb/franz-go/pkg/kgo/group_balancer.go new file mode 100644 index 00000000000..85f31a5342a --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/group_balancer.go @@ -0,0 +1,959 @@ +package kgo + +import ( + "bytes" + "fmt" + "sort" + "strings" + + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kgo/internal/sticky" + "github.com/twmb/franz-go/pkg/kmsg" +) + +// GroupBalancer balances topics and partitions among group members. +// +// A GroupBalancer is roughly equivalent to Kafka's PartitionAssignor. +type GroupBalancer interface { + // ProtocolName returns the name of the protocol, e.g. roundrobin, + // range, sticky. + ProtocolName() string + + // JoinGroupMetadata returns the metadata to use in JoinGroup, given + // the topic interests and the current assignment and group generation. + // + // It is safe to modify the input topics and currentAssignment. The + // input topics are guaranteed to be sorted, as are the partitions for + // each topic in currentAssignment. It is recommended for your output + // to be ordered by topic and partitions. Since Kafka uses the output + // from this function to determine whether a rebalance is needed, a + // deterministic output will avoid accidental rebalances. + JoinGroupMetadata( + topicInterests []string, + currentAssignment map[string][]int32, + generation int32, + ) []byte + + // ParseSyncAssignment returns assigned topics and partitions from an + // encoded SyncGroupResponse's MemberAssignment. + ParseSyncAssignment(assignment []byte) (map[string][]int32, error) + + // MemberBalancer returns a GroupMemberBalancer for the given group + // members, as well as the topics that all the members are interested + // in. If the client does not have some topics in the returned topics, + // the client issues a metadata request to load the number of + // partitions in those topics before calling the GroupMemberBalancer's + // Balance function. + // + // The input group members are guaranteed to be sorted first by + // instance ID, if non-nil, and then by member ID. + // + // It is up to the user to decide how to decode each member's + // ProtocolMetadata field. The default client group protocol of + // "consumer" by default uses join group metadata's of type + // kmsg.ConsumerMemberMetadata. If this is the case for you, it may be + // useful to use the ConsumerBalancer type to help parse the metadata + // and balance. + // + // If the member metadata cannot be deserialized correctly, this should + // return a relevant error. + MemberBalancer(members []kmsg.JoinGroupResponseMember) (b GroupMemberBalancer, topics map[string]struct{}, err error) + + // IsCooperative returns if this is a cooperative balance strategy. + IsCooperative() bool +} + +// GroupMemberBalancer balances topics amongst group members. If your balancing +// can fail, you can implement GroupMemberBalancerOrError. +type GroupMemberBalancer interface { + // Balance balances topics and partitions among group members, where + // the int32 in the topics map corresponds to the number of partitions + // known to be in each topic. + Balance(topics map[string]int32) IntoSyncAssignment +} + +// GroupMemberBalancerOrError is an optional extension interface for +// GroupMemberBalancer. This can be implemented if your balance function can +// fail. +// +// For interface purposes, it is required to implement GroupMemberBalancer, but +// Balance will never be called. +type GroupMemberBalancerOrError interface { + GroupMemberBalancer + BalanceOrError(topics map[string]int32) (IntoSyncAssignment, error) +} + +// IntoSyncAssignment takes a balance plan and returns a list of assignments to +// use in a kmsg.SyncGroupRequest. +// +// It is recommended to ensure the output is deterministic and ordered by +// member / topic / partitions. +type IntoSyncAssignment interface { + IntoSyncAssignment() []kmsg.SyncGroupRequestGroupAssignment +} + +// ConsumerBalancer is a helper type for writing balance plans that use the +// "consumer" protocol, such that each member uses a kmsg.ConsumerMemberMetadata +// in its join group request. +type ConsumerBalancer struct { + b ConsumerBalancerBalance + members []kmsg.JoinGroupResponseMember + metadatas []kmsg.ConsumerMemberMetadata + topics map[string]struct{} + + err error +} + +// Balance satisfies the GroupMemberBalancer interface, but is never called +// because GroupMemberBalancerOrError exists. +func (*ConsumerBalancer) Balance(map[string]int32) IntoSyncAssignment { + panic("unreachable") +} + +// BalanceOrError satisfies the GroupMemberBalancerOrError interface. +func (b *ConsumerBalancer) BalanceOrError(topics map[string]int32) (IntoSyncAssignment, error) { + return b.b.Balance(b, topics), b.err +} + +// Members returns the list of input members for this group balancer. +func (b *ConsumerBalancer) Members() []kmsg.JoinGroupResponseMember { + return b.members +} + +// EachMember calls fn for each member and its corresponding metadata in the +// consumer group being balanced. +func (b *ConsumerBalancer) EachMember(fn func(member *kmsg.JoinGroupResponseMember, meta *kmsg.ConsumerMemberMetadata)) { + for i := range b.members { + fn(&b.members[i], &b.metadatas[i]) + } +} + +// MemberAt returns the nth member and its corresponding metadata. +func (b *ConsumerBalancer) MemberAt(n int) (*kmsg.JoinGroupResponseMember, *kmsg.ConsumerMemberMetadata) { + return &b.members[n], &b.metadatas[n] +} + +// SetError allows you to set any error that occurred while balancing. This +// allows you to fail balancing and return nil from Balance. +func (b *ConsumerBalancer) SetError(err error) { + b.err = err +} + +// MemberTopics returns the unique set of topics that all members are +// interested in. +// +// This can safely be called if the balancer is nil; if so, this will return +// nil. +func (b *ConsumerBalancer) MemberTopics() map[string]struct{} { + if b == nil { + return nil + } + return b.topics +} + +// NewPlan returns a type that can be used to build a balance plan. The return +// satisfies the IntoSyncAssignment interface. +func (b *ConsumerBalancer) NewPlan() *BalancePlan { + plan := make(map[string]map[string][]int32, len(b.members)) + for i := range b.members { + plan[b.members[i].MemberID] = make(map[string][]int32) + } + return &BalancePlan{plan} +} + +// ConsumerBalancerBalance is what the ConsumerBalancer invokes to balance a +// group. +// +// This is a complicated interface, but in short, this interface has one +// function that implements the actual balancing logic: using the input +// balancer, balance the input topics and partitions. If your balancing can +// fail, you can use ConsumerBalancer.SetError(...) to return an error from +// balancing, and then you can simply return nil from Balance. +type ConsumerBalancerBalance interface { + Balance(*ConsumerBalancer, map[string]int32) IntoSyncAssignment +} + +// ParseConsumerSyncAssignment returns an assignment as specified a +// kmsg.ConsumerMemberAssignment, that is, the type encoded in metadata for the +// consumer protocol. +func ParseConsumerSyncAssignment(assignment []byte) (map[string][]int32, error) { + var kassignment kmsg.ConsumerMemberAssignment + if err := kassignment.ReadFrom(assignment); err != nil { + return nil, fmt.Errorf("sync assignment parse failed: %v", err) + } + + m := make(map[string][]int32, len(kassignment.Topics)) + for _, topic := range kassignment.Topics { + m[topic.Topic] = topic.Partitions + } + return m, nil +} + +// NewConsumerBalancer parses the each member's metadata as a +// kmsg.ConsumerMemberMetadata and returns a ConsumerBalancer to use in balancing. +// +// If any metadata parsing fails, this returns an error. +func NewConsumerBalancer(balance ConsumerBalancerBalance, members []kmsg.JoinGroupResponseMember) (*ConsumerBalancer, error) { + b := &ConsumerBalancer{ + b: balance, + members: members, + metadatas: make([]kmsg.ConsumerMemberMetadata, len(members)), + topics: make(map[string]struct{}), + } + + for i, member := range members { + meta := &b.metadatas[i] + meta.Default() + memberMeta := member.ProtocolMetadata + if err := meta.ReadFrom(memberMeta); err != nil { + // Some buggy clients claimed support for v1 but then + // did not add OwnedPartitions, resulting in a short + // metadata. If we fail at reading and the version is + // v1, we retry again as v0. We do not support other + // versions because hopefully other clients stop + // claiming higher and higher version support and not + // actually supporting them. Sarama has a similarish + // workaround. See #493. + if bytes.HasPrefix(memberMeta, []byte{0, 1}) { + memberMeta[0] = 0 + memberMeta[1] = 0 + if err = meta.ReadFrom(memberMeta); err != nil { + return nil, fmt.Errorf("unable to read member metadata: %v", err) + } + } + } + for _, topic := range meta.Topics { + b.topics[topic] = struct{}{} + } + sort.Strings(meta.Topics) + } + + return b, nil +} + +// BalancePlan is a helper type to build the result of balancing topics +// and partitions among group members. +type BalancePlan struct { + plan map[string]map[string][]int32 // member => topic => partitions +} + +// AsMemberIDMap returns the plan as a map of member IDs to their topic & +// partition assignments. +// +// Internally, a BalancePlan is currently represented as this map. Any +// modification to the map modifies the plan. The internal representation of a +// plan may change in the future to include more metadata. If this happens, the +// map returned from this function may not represent all aspects of a plan. +// The client will attempt to mirror modifications to the map directly back +// into the underlying plan as best as possible. +func (p *BalancePlan) AsMemberIDMap() map[string]map[string][]int32 { + return p.plan +} + +func (p *BalancePlan) String() string { + var sb strings.Builder + + var membersWritten int + for member, topics := range p.plan { + membersWritten++ + sb.WriteString(member) + sb.WriteString("{") + + var topicsWritten int + for topic, partitions := range topics { + fmt.Fprintf(&sb, "%s%v", topic, partitions) + topicsWritten++ + if topicsWritten < len(topics) { + sb.WriteString(", ") + } + } + + sb.WriteString("}") + if membersWritten < len(p.plan) { + sb.WriteString(", ") + } + } + + return sb.String() +} + +// AddPartition assigns a partition for the topic to a given member. +func (p *BalancePlan) AddPartition(member *kmsg.JoinGroupResponseMember, topic string, partition int32) { + memberPlan := p.plan[member.MemberID] + memberPlan[topic] = append(memberPlan[topic], partition) +} + +// AddPartitions assigns many partitions for a topic to a given member. +func (p *BalancePlan) AddPartitions(member *kmsg.JoinGroupResponseMember, topic string, partitions []int32) { + memberPlan := p.plan[member.MemberID] + memberPlan[topic] = append(memberPlan[topic], partitions...) +} + +// IntoSyncAssignment satisfies the IntoSyncAssignment interface. +func (p *BalancePlan) IntoSyncAssignment() []kmsg.SyncGroupRequestGroupAssignment { + kassignments := make([]kmsg.SyncGroupRequestGroupAssignment, 0, len(p.plan)) + for member, assignment := range p.plan { + var kassignment kmsg.ConsumerMemberAssignment + for topic, partitions := range assignment { + sort.Slice(partitions, func(i, j int) bool { return partitions[i] < partitions[j] }) + assnTopic := kmsg.NewConsumerMemberAssignmentTopic() + assnTopic.Topic = topic + assnTopic.Partitions = partitions + kassignment.Topics = append(kassignment.Topics, assnTopic) + } + sort.Slice(kassignment.Topics, func(i, j int) bool { return kassignment.Topics[i].Topic < kassignment.Topics[j].Topic }) + syncAssn := kmsg.NewSyncGroupRequestGroupAssignment() + syncAssn.MemberID = member + syncAssn.MemberAssignment = kassignment.AppendTo(nil) + kassignments = append(kassignments, syncAssn) + } + sort.Slice(kassignments, func(i, j int) bool { return kassignments[i].MemberID < kassignments[j].MemberID }) + return kassignments +} + +func joinMemberLess(l, r *kmsg.JoinGroupResponseMember) bool { + if l.InstanceID != nil { + if r.InstanceID == nil { + return true + } + return *l.InstanceID < *r.InstanceID + } + if r.InstanceID != nil { + return false + } + return l.MemberID < r.MemberID +} + +func sortJoinMembers(members []kmsg.JoinGroupResponseMember) { + sort.Slice(members, func(i, j int) bool { return joinMemberLess(&members[i], &members[j]) }) +} + +func sortJoinMemberPtrs(members []*kmsg.JoinGroupResponseMember) { + sort.Slice(members, func(i, j int) bool { return joinMemberLess(members[i], members[j]) }) +} + +func (g *groupConsumer) findBalancer(from, proto string) (GroupBalancer, error) { + for _, b := range g.cfg.balancers { + if b.ProtocolName() == proto { + return b, nil + } + } + var ours []string + for _, b := range g.cfg.balancers { + ours = append(ours, b.ProtocolName()) + } + g.cl.cfg.logger.Log(LogLevelError, fmt.Sprintf("%s could not find broker-chosen balancer", from), "kafka_choice", proto, "our_set", strings.Join(ours, ", ")) + return nil, fmt.Errorf("unable to balance: none of our balancers have a name equal to the balancer chosen for balancing (%s)", proto) +} + +// balanceGroup returns a balancePlan from a join group response. +// +// If the group has topics this leader does not want to consume, this also +// returns all topics and partitions; the leader will then periodically do its +// own metadata update to see if partition counts have changed for these random +// topics. +func (g *groupConsumer) balanceGroup(proto string, members []kmsg.JoinGroupResponseMember, skipBalance bool) ([]kmsg.SyncGroupRequestGroupAssignment, error) { + g.cl.cfg.logger.Log(LogLevelInfo, "balancing group as leader") + + b, err := g.findBalancer("balance group", proto) + if err != nil { + return nil, err + } + + sortJoinMembers(members) + + memberBalancer, topics, err := b.MemberBalancer(members) + if err != nil { + return nil, fmt.Errorf("unable to create group member balancer: %v", err) + } + + myTopics := g.tps.load() + var needMeta bool + topicPartitionCount := make(map[string]int32, len(topics)) + for topic := range topics { + data, exists := myTopics[topic] + if !exists { + needMeta = true + continue + } + topicPartitionCount[topic] = int32(len(data.load().partitions)) + } + + // If our consumer metadata does not contain all topics, the group is + // expressing interests in topics we are not consuming. Perhaps we have + // those topics saved in our external topics map. + if needMeta { + g.loadExternal().fn(func(m map[string]int32) { + needMeta = false + for topic := range topics { + partitions, exists := m[topic] + if !exists { + needMeta = true + continue + } + topicPartitionCount[topic] = partitions + } + }) + } + + if needMeta { + g.cl.cfg.logger.Log(LogLevelInfo, "group members indicated interest in topics the leader is not assigned, fetching metadata for all group topics") + var metaTopics []string + for topic := range topics { + metaTopics = append(metaTopics, topic) + } + + _, resp, err := g.cl.fetchMetadataForTopics(g.ctx, false, metaTopics) + if err != nil { + return nil, fmt.Errorf("unable to fetch metadata for group topics: %v", err) + } + for i := range resp.Topics { + t := &resp.Topics[i] + if t.Topic == nil { + g.cl.cfg.logger.Log(LogLevelWarn, "metadata resp in balance for topic has nil topic, skipping...", "err", kerr.ErrorForCode(t.ErrorCode)) + continue + } + if t.ErrorCode != 0 { + g.cl.cfg.logger.Log(LogLevelWarn, "metadata resp in balance for topic has error, skipping...", "topic", t.Topic, "err", kerr.ErrorForCode(t.ErrorCode)) + continue + } + topicPartitionCount[*t.Topic] = int32(len(t.Partitions)) + } + + g.initExternal(topicPartitionCount) + } + + // If the returned balancer is a ConsumerBalancer (which it likely + // always will be), then we can print some useful debugging information + // about what member interests are. + if b, ok := memberBalancer.(*ConsumerBalancer); ok { + interests := new(bytes.Buffer) + b.EachMember(func(member *kmsg.JoinGroupResponseMember, meta *kmsg.ConsumerMemberMetadata) { + interests.Reset() + fmt.Fprintf(interests, "interested topics: %v, previously owned: ", meta.Topics) + for _, owned := range meta.OwnedPartitions { + sort.Slice(owned.Partitions, func(i, j int) bool { return owned.Partitions[i] < owned.Partitions[j] }) + fmt.Fprintf(interests, "%s%v, ", owned.Topic, owned.Partitions) + } + strInterests := interests.String() + strInterests = strings.TrimSuffix(strInterests, ", ") + + if member.InstanceID == nil { + g.cl.cfg.logger.Log(LogLevelInfo, "balance group member", "id", member.MemberID, "interests", strInterests) + } else { + g.cl.cfg.logger.Log(LogLevelInfo, "balance group member", "id", member.MemberID, "instance_id", *member.InstanceID, "interests", strInterests) + } + }) + } else { + g.cl.cfg.logger.Log(LogLevelInfo, "unable to log information about group member interests: the user has defined a custom balancer (not a *ConsumerBalancer)") + } + + // KIP-814: we are leader and we know what the entire group is + // consuming. Crucially, we parsed topics that we are potentially not + // interested in and are now tracking them for metadata updates. We + // have logged the current interests, we do not need to actually + // balance. + if skipBalance { + switch proto := b.ProtocolName(); proto { + case RangeBalancer().ProtocolName(), + RoundRobinBalancer().ProtocolName(), + StickyBalancer().ProtocolName(), + CooperativeStickyBalancer().ProtocolName(): + default: + return nil, nil + } + } + + // If the returned IntoSyncAssignment is a BalancePlan, which it likely + // is if the balancer is a ConsumerBalancer, then we can again print + // more useful debugging information. + var into IntoSyncAssignment + if memberBalancerOrErr, ok := memberBalancer.(GroupMemberBalancerOrError); ok { + if into, err = memberBalancerOrErr.BalanceOrError(topicPartitionCount); err != nil { + g.cl.cfg.logger.Log(LogLevelError, "balance failed", "err", err) + return nil, err + } + } else { + into = memberBalancer.Balance(topicPartitionCount) + } + + if p, ok := into.(*BalancePlan); ok { + g.cl.cfg.logger.Log(LogLevelInfo, "balanced", "plan", p.String()) + } else { + g.cl.cfg.logger.Log(LogLevelInfo, "unable to log balance plan: the user has returned a custom IntoSyncAssignment (not a *BalancePlan)") + } + + return into.IntoSyncAssignment(), nil +} + +// helper func; range and roundrobin use v0 +func simpleMemberMetadata(interests []string, generation int32) []byte { + meta := kmsg.NewConsumerMemberMetadata() + meta.Version = 3 // BUMP ME WHEN NEW FIELDS ARE ADDED, AND BUMP BELOW + meta.Topics = interests // input interests are already sorted + // meta.OwnedPartitions is nil, since simple protocols are not cooperative + meta.Generation = generation + return meta.AppendTo(nil) +} + +/////////////////// +// Balance Plans // +/////////////////// + +// RoundRobinBalancer returns a group balancer that evenly maps topics and +// partitions to group members. +// +// Suppose there are two members M0 and M1, two topics t0 and t1, and each +// topic has three partitions p0, p1, and p2. The partition balancing will be +// +// M0: [t0p0, t0p2, t1p1] +// M1: [t0p1, t1p0, t1p2] +// +// If all members subscribe to all topics equally, the roundrobin balancer +// will give a perfect balance. However, if topic subscriptions are quite +// unequal, the roundrobin balancer may lead to a bad balance. See KIP-49 +// for one example (note that the fair strategy mentioned in KIP-49 does +// not exist). +// +// This is equivalent to the Java roundrobin balancer. +func RoundRobinBalancer() GroupBalancer { + return new(roundRobinBalancer) +} + +type roundRobinBalancer struct{} + +func (*roundRobinBalancer) ProtocolName() string { return "roundrobin" } +func (*roundRobinBalancer) IsCooperative() bool { return false } +func (*roundRobinBalancer) JoinGroupMetadata(interests []string, _ map[string][]int32, generation int32) []byte { + return simpleMemberMetadata(interests, generation) +} + +func (*roundRobinBalancer) ParseSyncAssignment(assignment []byte) (map[string][]int32, error) { + return ParseConsumerSyncAssignment(assignment) +} + +func (r *roundRobinBalancer) MemberBalancer(members []kmsg.JoinGroupResponseMember) (GroupMemberBalancer, map[string]struct{}, error) { + b, err := NewConsumerBalancer(r, members) + return b, b.MemberTopics(), err +} + +func (*roundRobinBalancer) Balance(b *ConsumerBalancer, topics map[string]int32) IntoSyncAssignment { + type topicPartition struct { + topic string + partition int32 + } + var nparts int + for _, partitions := range topics { + nparts += int(partitions) + } + // Order all partitions available to balance, filtering out those that + // no members are subscribed to. + allParts := make([]topicPartition, 0, nparts) + for topic := range b.MemberTopics() { + for partition := int32(0); partition < topics[topic]; partition++ { + allParts = append(allParts, topicPartition{ + topic, + partition, + }) + } + } + sort.Slice(allParts, func(i, j int) bool { + l, r := allParts[i], allParts[j] + return l.topic < r.topic || l.topic == r.topic && l.partition < r.partition + }) + + plan := b.NewPlan() + // While parts are unassigned, assign them. + var memberIdx int + for len(allParts) > 0 { + next := allParts[0] + allParts = allParts[1:] + + // The Java roundrobin strategy walks members circularly until + // a member can take this partition, and then starts the next + // partition where the circular iterator left off. + assigned: + for { + member, meta := b.MemberAt(memberIdx) + memberIdx = (memberIdx + 1) % len(b.Members()) + for _, topic := range meta.Topics { + if topic == next.topic { + plan.AddPartition(member, next.topic, next.partition) + break assigned + } + } + } + } + + return plan +} + +// RangeBalancer returns a group balancer that, per topic, maps partitions to +// group members. Since this works on a topic level, uneven partitions per +// topic to the number of members can lead to slight partition consumption +// disparities. +// +// Suppose there are two members M0 and M1, two topics t0 and t1, and each +// topic has three partitions p0, p1, and p2. The partition balancing will be +// +// M0: [t0p0, t0p1, t1p0, t1p1] +// M1: [t0p2, t1p2] +// +// This is equivalent to the Java range balancer. +func RangeBalancer() GroupBalancer { + return new(rangeBalancer) +} + +type rangeBalancer struct{} + +func (*rangeBalancer) ProtocolName() string { return "range" } +func (*rangeBalancer) IsCooperative() bool { return false } +func (*rangeBalancer) JoinGroupMetadata(interests []string, _ map[string][]int32, generation int32) []byte { + return simpleMemberMetadata(interests, generation) +} + +func (*rangeBalancer) ParseSyncAssignment(assignment []byte) (map[string][]int32, error) { + return ParseConsumerSyncAssignment(assignment) +} + +func (r *rangeBalancer) MemberBalancer(members []kmsg.JoinGroupResponseMember) (GroupMemberBalancer, map[string]struct{}, error) { + b, err := NewConsumerBalancer(r, members) + return b, b.MemberTopics(), err +} + +func (*rangeBalancer) Balance(b *ConsumerBalancer, topics map[string]int32) IntoSyncAssignment { + topics2PotentialConsumers := make(map[string][]*kmsg.JoinGroupResponseMember) + b.EachMember(func(member *kmsg.JoinGroupResponseMember, meta *kmsg.ConsumerMemberMetadata) { + for _, topic := range meta.Topics { + topics2PotentialConsumers[topic] = append(topics2PotentialConsumers[topic], member) + } + }) + + plan := b.NewPlan() + for topic, potentialConsumers := range topics2PotentialConsumers { + sortJoinMemberPtrs(potentialConsumers) + + numPartitions := topics[topic] + partitions := make([]int32, numPartitions) + for i := range partitions { + partitions[i] = int32(i) + } + numParts := len(partitions) + div, rem := numParts/len(potentialConsumers), numParts%len(potentialConsumers) + + var consumerIdx int + for len(partitions) > 0 { + num := div + if rem > 0 { + num++ + rem-- + } + + member := potentialConsumers[consumerIdx] + plan.AddPartitions(member, topic, partitions[:num]) + + consumerIdx++ + partitions = partitions[num:] + } + } + + return plan +} + +// StickyBalancer returns a group balancer that ensures minimal partition +// movement on group changes while also ensuring optimal balancing. +// +// Suppose there are three members M0, M1, and M2, and two topics t0 and t1 +// each with three partitions p0, p1, and p2. If the initial balance plan looks +// like +// +// M0: [t0p0, t0p1, t0p2] +// M1: [t1p0, t1p1, t1p2] +// M2: [t2p0, t2p2, t2p2] +// +// If M2 disappears, both roundrobin and range would have mostly destructive +// reassignments. +// +// Range would result in +// +// M0: [t0p0, t0p1, t1p0, t1p1, t2p0, t2p1] +// M1: [t0p2, t1p2, t2p2] +// +// which is imbalanced and has 3 partitions move from members that did not need +// to move (t0p2, t1p0, t1p1). +// +// RoundRobin would result in +// +// M0: [t0p0, t0p2, t1p1, t2p0, t2p2] +// M1: [t0p1, t1p0, t1p2, t2p1] +// +// which is balanced, but has 2 partitions move when they do not need to +// (t0p1, t1p1). +// +// Sticky balancing results in +// +// M0: [t0p0, t0p1, t0p2, t2p0, t2p2] +// M1: [t1p0, t1p1, t1p2, t2p1] +// +// which is balanced and does not cause any unnecessary partition movement. +// The actual t2 partitions may not be in that exact combination, but they +// will be balanced. +// +// An advantage of the sticky consumer is that it allows API users to +// potentially avoid some cleanup until after the consumer knows which +// partitions it is losing when it gets its new assignment. Users can +// then only cleanup state for partitions that changed, which will be +// minimal (see KIP-54; this client also includes the KIP-351 bugfix). +// +// Note that this API implements the sticky partitioning quite differently from +// the Java implementation. The Java implementation is difficult to reason +// about and has many edge cases that result in non-optimal balancing (albeit, +// you likely have to be trying to hit those edge cases). This API uses a +// different algorithm to ensure optimal balancing while being an order of +// magnitude faster. +// +// Since the new strategy is a strict improvement over the Java strategy, it is +// entirely compatible. Any Go client sharing a group with a Java client will +// not have its decisions undone on leadership change from a Go consumer to a +// Java one. Java balancers do not apply the strategy it comes up with if it +// deems the balance score equal to or worse than the original score (the score +// being effectively equal to the standard deviation of the mean number of +// assigned partitions). This Go sticky balancer is optimal and extra sticky. +// Thus, the Java balancer will never back out of a strategy from this +// balancer. +func StickyBalancer() GroupBalancer { + return &stickyBalancer{cooperative: false} +} + +type stickyBalancer struct { + cooperative bool +} + +func (s *stickyBalancer) ProtocolName() string { + if s.cooperative { + return "cooperative-sticky" + } + return "sticky" +} +func (s *stickyBalancer) IsCooperative() bool { return s.cooperative } +func (s *stickyBalancer) JoinGroupMetadata(interests []string, currentAssignment map[string][]int32, generation int32) []byte { + meta := kmsg.NewConsumerMemberMetadata() + meta.Version = 3 // BUMP ME WHEN NEW FIELDS ARE ADDED, AND BUMP ABOVE + meta.Topics = interests + meta.Generation = generation + stickyMeta := kmsg.NewStickyMemberMetadata() + stickyMeta.Generation = generation + for topic, partitions := range currentAssignment { + if s.cooperative { + metaPart := kmsg.NewConsumerMemberMetadataOwnedPartition() + metaPart.Topic = topic + metaPart.Partitions = partitions + meta.OwnedPartitions = append(meta.OwnedPartitions, metaPart) + } + stickyAssn := kmsg.NewStickyMemberMetadataCurrentAssignment() + stickyAssn.Topic = topic + stickyAssn.Partitions = partitions + stickyMeta.CurrentAssignment = append(stickyMeta.CurrentAssignment, stickyAssn) + } + + // KAFKA-12898: ensure our topics are sorted + metaOwned := meta.OwnedPartitions + stickyCurrent := stickyMeta.CurrentAssignment + sort.Slice(metaOwned, func(i, j int) bool { return metaOwned[i].Topic < metaOwned[j].Topic }) + sort.Slice(stickyCurrent, func(i, j int) bool { return stickyCurrent[i].Topic < stickyCurrent[j].Topic }) + + meta.UserData = stickyMeta.AppendTo(nil) + return meta.AppendTo(nil) +} + +func (*stickyBalancer) ParseSyncAssignment(assignment []byte) (map[string][]int32, error) { + return ParseConsumerSyncAssignment(assignment) +} + +func (s *stickyBalancer) MemberBalancer(members []kmsg.JoinGroupResponseMember) (GroupMemberBalancer, map[string]struct{}, error) { + b, err := NewConsumerBalancer(s, members) + return b, b.MemberTopics(), err +} + +func (s *stickyBalancer) Balance(b *ConsumerBalancer, topics map[string]int32) IntoSyncAssignment { + // Since our input into balancing is already sorted by instance ID, + // the sticky strategy does not need to worry about instance IDs at all. + // See my (slightly rambling) comment on KAFKA-8432. + stickyMembers := make([]sticky.GroupMember, 0, len(b.Members())) + b.EachMember(func(member *kmsg.JoinGroupResponseMember, meta *kmsg.ConsumerMemberMetadata) { + stickyMembers = append(stickyMembers, sticky.GroupMember{ + ID: member.MemberID, + Topics: meta.Topics, + UserData: meta.UserData, + Owned: meta.OwnedPartitions, + Generation: meta.Generation, + Cooperative: s.cooperative, + }) + }) + + p := &BalancePlan{sticky.Balance(stickyMembers, topics)} + if s.cooperative { + p.AdjustCooperative(b) + } + return p +} + +// CooperativeStickyBalancer performs the sticky balancing strategy, but +// additionally opts the consumer group into "cooperative" rebalancing. +// +// Cooperative rebalancing differs from "eager" (the original) rebalancing in +// that group members do not stop processing partitions during the rebalance. +// Instead, once they receive their new assignment, each member determines +// which partitions it needs to revoke. If any, they send a new join request +// (before syncing), and the process starts over. This should ultimately end up +// in only two join rounds, with the major benefit being that processing never +// needs to stop. +// +// NOTE once a group is collectively using cooperative balancing, it is unsafe +// to have a member join the group that does not support cooperative balancing. +// If the only-eager member is elected leader, it will not know of the new +// multiple join strategy and things will go awry. Thus, once a group is +// entirely on cooperative rebalancing, it cannot go back. +// +// Migrating an eager group to cooperative balancing requires two rolling +// bounce deploys. The first deploy should add the cooperative-sticky strategy +// as an option (that is, each member goes from using one balance strategy to +// two). During this deploy, Kafka will tell leaders to continue using the old +// eager strategy, since the old eager strategy is the only one in common among +// all members. The second rolling deploy removes the old eager strategy. At +// this point, Kafka will tell the leader to use cooperative-sticky balancing. +// During this roll, all members in the group that still have both strategies +// continue to be eager and give up all of their partitions every rebalance. +// However, once a member only has cooperative-sticky, it can begin using this +// new strategy and things will work correctly. See KIP-429 for more details. +func CooperativeStickyBalancer() GroupBalancer { + return &stickyBalancer{cooperative: true} +} + +// AdjustCooperative performs the final adjustment to a plan for cooperative +// balancing. +// +// Over the plan, we remove all partitions that migrated from one member (where +// it was assigned) to a new member (where it is now planned). +// +// This allows members that had partitions removed to revoke and rejoin, which +// will then do another rebalance, and in that new rebalance, the planned +// partitions are now on the free list to be assigned. +func (p *BalancePlan) AdjustCooperative(b *ConsumerBalancer) { + allAdded := make(map[string]map[int32]string, 100) // topic => partition => member + allRevoked := make(map[string]map[int32]struct{}, 100) + + addT := func(t string) map[int32]string { + addT := allAdded[t] + if addT == nil { + addT = make(map[int32]string, 20) + allAdded[t] = addT + } + return addT + } + revokeT := func(t string) map[int32]struct{} { + revokeT := allRevoked[t] + if revokeT == nil { + revokeT = make(map[int32]struct{}, 20) + allRevoked[t] = revokeT + } + return revokeT + } + + tmap := make(map[string]struct{}) // reusable topic existence map + pmap := make(map[int32]struct{}) // reusable partitions existence map + + plan := p.plan + + // First, on all members, we find what was added and what was removed + // to and from that member. + b.EachMember(func(member *kmsg.JoinGroupResponseMember, meta *kmsg.ConsumerMemberMetadata) { + planned := plan[member.MemberID] + + // added := planned - current + // revoked := current - planned + + for ptopic := range planned { // set existence for all planned topics + tmap[ptopic] = struct{}{} + } + for _, otopic := range meta.OwnedPartitions { // over all prior owned topics, + topic := otopic.Topic + delete(tmap, topic) + ppartitions, exists := planned[topic] + if !exists { // any topic that is no longer planned was entirely revoked, + allRevokedT := revokeT(topic) + for _, opartition := range otopic.Partitions { + allRevokedT[opartition] = struct{}{} + } + continue + } + // calculate what was added by creating a planned existence map, + // then removing what was owned, and anything that remains is new, + for _, ppartition := range ppartitions { + pmap[ppartition] = struct{}{} + } + for _, opartition := range otopic.Partitions { + delete(pmap, opartition) + } + if len(pmap) > 0 { + allAddedT := addT(topic) + for ppartition := range pmap { + delete(pmap, ppartition) + allAddedT[ppartition] = member.MemberID + } + } + // then calculate removal by creating owned existence map, + // then removing what was planned, anything remaining was revoked. + for _, opartition := range otopic.Partitions { + pmap[opartition] = struct{}{} + } + for _, ppartition := range ppartitions { + delete(pmap, ppartition) + } + if len(pmap) > 0 { + allRevokedT := revokeT(topic) + for opartition := range pmap { + delete(pmap, opartition) + allRevokedT[opartition] = struct{}{} + } + } + } + for ptopic := range tmap { // finally, anything remaining in tmap is a new planned topic. + delete(tmap, ptopic) + allAddedT := addT(ptopic) + for _, ppartition := range planned[ptopic] { + allAddedT[ppartition] = member.MemberID + } + } + }) + + // Over all revoked, if the revoked partition was added to a different + // member, we remove that partition from the new member. + for topic, rpartitions := range allRevoked { + atopic, exists := allAdded[topic] + if !exists { + continue + } + for rpartition := range rpartitions { + amember, exists := atopic[rpartition] + if !exists { + continue + } + + ptopics := plan[amember] + ppartitions := ptopics[topic] + for i, ppartition := range ppartitions { + if ppartition == rpartition { + ppartitions[i] = ppartitions[len(ppartitions)-1] + ppartitions = ppartitions[:len(ppartitions)-1] + break + } + } + if len(ppartitions) > 0 { + ptopics[topic] = ppartitions + } else { + delete(ptopics, topic) + } + } + } +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/hooks.go b/vendor/github.com/twmb/franz-go/pkg/kgo/hooks.go new file mode 100644 index 00000000000..0150bab3316 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/hooks.go @@ -0,0 +1,422 @@ +package kgo + +import ( + "net" + "time" +) + +//////////////////////////////////////////////////////////////// +// NOTE: // +// NOTE: Make sure new hooks are checked in implementsAnyHook // +// NOTE: // +//////////////////////////////////////////////////////////////// + +// Hook is a hook to be called when something happens in kgo. +// +// The base Hook interface is useless, but wherever a hook can occur in kgo, +// the client checks if your hook implements an appropriate interface. If so, +// your hook is called. +// +// This allows you to only hook in to behavior you care about, and it allows +// the client to add more hooks in the future. +// +// All hook interfaces in this package have Hook in the name. Hooks must be +// safe for concurrent use. It is expected that hooks are fast; if a hook needs +// to take time, then copy what you need and ensure the hook is async. +type Hook any + +type hooks []Hook + +func (hs hooks) each(fn func(Hook)) { + for _, h := range hs { + fn(h) + } +} + +// HookNewClient is called in NewClient after a client is initialized. This +// hook can be used to perform final setup work in your hooks. +type HookNewClient interface { + // OnNewClient is passed the newly initialized client, before any + // client goroutines are started. + OnNewClient(*Client) +} + +// HookClientClosed is called in Close or CloseAfterRebalance after a client +// has been closed. This hook can be used to perform final cleanup work. +type HookClientClosed interface { + // OnClientClosed is passed the client that has been closed, after + // all client-internal close cleanup has happened. + OnClientClosed(*Client) +} + +////////////////// +// BROKER HOOKS // +////////////////// + +// HookBrokerConnect is called after a connection to a broker is opened. +type HookBrokerConnect interface { + // OnBrokerConnect is passed the broker metadata, how long it took to + // dial, and either the dial's resulting net.Conn or error. + OnBrokerConnect(meta BrokerMetadata, dialDur time.Duration, conn net.Conn, err error) +} + +// HookBrokerDisconnect is called when a connection to a broker is closed. +type HookBrokerDisconnect interface { + // OnBrokerDisconnect is passed the broker metadata and the connection + // that is closing. + OnBrokerDisconnect(meta BrokerMetadata, conn net.Conn) +} + +// HookBrokerWrite is called after a write to a broker. +// +// Kerberos SASL does not cause write hooks, since it directly writes to the +// connection. +type HookBrokerWrite interface { + // OnBrokerWrite is passed the broker metadata, the key for the request + // that was written, the number of bytes that were written (may not be + // the whole request if there was an error), how long the request + // waited before being written (including throttling waiting), how long + // it took to write the request, and any error. + // + // The bytes written does not count any tls overhead. + OnBrokerWrite(meta BrokerMetadata, key int16, bytesWritten int, writeWait, timeToWrite time.Duration, err error) +} + +// HookBrokerRead is called after a read from a broker. +// +// Kerberos SASL does not cause read hooks, since it directly reads from the +// connection. +type HookBrokerRead interface { + // OnBrokerRead is passed the broker metadata, the key for the response + // that was read, the number of bytes read (may not be the whole read + // if there was an error), how long the client waited before reading + // the response, how long it took to read the response, and any error. + // + // The bytes read does not count any tls overhead. + OnBrokerRead(meta BrokerMetadata, key int16, bytesRead int, readWait, timeToRead time.Duration, err error) +} + +// BrokerE2E tracks complete information for a write of a request followed by a +// read of that requests's response. +// +// Note that if this is for a produce request with no acks, there will be no +// read wait / time to read. +type BrokerE2E struct { + // BytesWritten is the number of bytes written for this request. + // + // This may not be the whole request if there was an error while writing. + BytesWritten int + + // BytesRead is the number of bytes read for this requests's response. + // + // This may not be the whole response if there was an error while + // reading, and this will be zero if there was a write error. + BytesRead int + + // WriteWait is the time spent waiting from when this request was + // generated internally in the client to just before the request is + // written to the connection. This number is not included in the + // DurationE2E method. + WriteWait time.Duration + // TimeToWrite is how long a request took to be written on the wire. + // This specifically tracks only how long conn.Write takes. + TimeToWrite time.Duration + // ReadWait tracks the span of time immediately following conn.Write + // until conn.Read begins. + ReadWait time.Duration + // TimeToRead tracks how long conn.Read takes for this request to be + // entirely read. This includes the time it takes to allocate a buffer + // for the response after the initial four size bytes are read. + TimeToRead time.Duration + + // WriteErr is any error encountered during writing. If a write error is + // encountered, no read will be attempted. + WriteErr error + // ReadErr is any error encountered during reading. + ReadErr error +} + +// DurationE2E returns the e2e time from the start of when a request is written +// to the end of when the response for that request was fully read. If a write +// or read error occurs, this hook is called with all information possible at +// the time (e.g., if a write error occurs, all write info is specified). +// +// Kerberos SASL does not cause this hook, since it directly reads from the +// connection. +func (e *BrokerE2E) DurationE2E() time.Duration { + return e.TimeToWrite + e.ReadWait + e.TimeToRead +} + +// Err returns the first of either the write err or the read err. If this +// return is non-nil, the request/response had an error. +func (e *BrokerE2E) Err() error { + if e.WriteErr != nil { + return e.WriteErr + } + return e.ReadErr +} + +// HookBrokerE2E is called after a write to a broker that errors, or after a +// read to a broker. +// +// This differs from HookBrokerRead and HookBrokerWrite by tracking all E2E +// info for a write and a read, which allows for easier e2e metrics. This hook +// can replace both the read and write hook. +type HookBrokerE2E interface { + // OnBrokerE2E is passed the broker metadata, the key for the + // request/response that was written/read, and the e2e info for the + // request and response. + OnBrokerE2E(meta BrokerMetadata, key int16, e2e BrokerE2E) +} + +// HookBrokerThrottle is called after a response to a request is read +// from a broker, and the response identifies throttling in effect. +type HookBrokerThrottle interface { + // OnBrokerThrottle is passed the broker metadata, the imposed + // throttling interval, and whether the throttle was applied before + // Kafka responded to them request or after. + // + // For Kafka < 2.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0, the throttle is applied after issuing a response. + // + // If throttledAfterResponse is false, then Kafka already applied the + // throttle. If it is true, the client internally will not send another + // request until the throttle deadline has passed. + OnBrokerThrottle(meta BrokerMetadata, throttleInterval time.Duration, throttledAfterResponse bool) +} + +////////// +// MISC // +////////// + +// HookGroupManageError is called after every error that causes the client, +// operating as a group member, to break out of the group managing loop and +// backoff temporarily. +// +// Specifically, any error that would result in OnPartitionsLost being called +// will result in this hook being called. +type HookGroupManageError interface { + // OnGroupManageError is passed the error that killed a group session. + // This can be used to detect potentially fatal errors and act on them + // at runtime to recover (such as group auth errors, or group max size + // reached). + OnGroupManageError(error) +} + +/////////////////////////////// +// PRODUCE & CONSUME BATCHES // +/////////////////////////////// + +// ProduceBatchMetrics tracks information about successful produces to +// partitions. +type ProduceBatchMetrics struct { + // NumRecords is the number of records that were produced in this + // batch. + NumRecords int + + // UncompressedBytes is the number of bytes the records serialized as + // before compression. + // + // For record batches (Kafka v0.11.0+), this is the size of the records + // in a batch, and does not include record batch overhead. + // + // For message sets, this size includes message set overhead. + UncompressedBytes int + + // CompressedBytes is the number of bytes actually written for this + // batch, after compression. If compression is not used, this will be + // equal to UncompresedBytes. + // + // For record batches, this is the size of the compressed records, and + // does not include record batch overhead. + // + // For message sets, this is the size of the compressed message set. + CompressedBytes int + + // CompressionType signifies which algorithm the batch was compressed + // with. + // + // 0 is no compression, 1 is gzip, 2 is snappy, 3 is lz4, and 4 is + // zstd. + CompressionType uint8 +} + +// HookProduceBatchWritten is called whenever a batch is known to be +// successfully produced. +type HookProduceBatchWritten interface { + // OnProduceBatchWritten is called per successful batch written to a + // topic partition + OnProduceBatchWritten(meta BrokerMetadata, topic string, partition int32, metrics ProduceBatchMetrics) +} + +// FetchBatchMetrics tracks information about fetches of batches. +type FetchBatchMetrics struct { + // NumRecords is the number of records that were fetched in this batch. + // + // Note that this number includes transaction markers, which are not + // actually returned to the user. + // + // If the batch has an encoding error, this will be 0. + NumRecords int + + // UncompressedBytes is the number of bytes the records deserialized + // into after decompresion. + // + // For record batches (Kafka v0.11.0+), this is the size of the records + // in a batch, and does not include record batch overhead. + // + // For message sets, this size includes message set overhead. + // + // Note that this number may be higher than the corresponding number + // when producing, because as an "optimization", Kafka can return + // partial batches when fetching. + UncompressedBytes int + + // CompressedBytes is the number of bytes actually read for this batch, + // before decompression. If the batch was not compressed, this will be + // equal to UncompressedBytes. + // + // For record batches, this is the size of the compressed records, and + // does not include record batch overhead. + // + // For message sets, this is the size of the compressed message set. + CompressedBytes int + + // CompressionType signifies which algorithm the batch was compressed + // with. + // + // 0 is no compression, 1 is gzip, 2 is snappy, 3 is lz4, and 4 is + // zstd. + CompressionType uint8 +} + +// HookFetchBatchRead is called whenever a batch if read within the client. +// +// Note that this hook is called when processing, but a batch may be internally +// discarded after processing in some uncommon specific circumstances. +// +// If the client reads v0 or v1 message sets, and they are not compressed, then +// this hook will be called per record. +type HookFetchBatchRead interface { + // OnFetchBatchRead is called per batch read from a topic partition. + OnFetchBatchRead(meta BrokerMetadata, topic string, partition int32, metrics FetchBatchMetrics) +} + +/////////////////////////////// +// PRODUCE & CONSUME RECORDS // +/////////////////////////////// + +// HookProduceRecordBuffered is called when a record is buffered internally in +// the client from a call to Produce. +// +// This hook can be used to write metrics that gather the number of records or +// bytes buffered, or the hook can be used to write interceptors that modify a +// record's key / value / headers before being produced. If you just want a +// metric for the number of records buffered, use the client's +// BufferedProduceRecords method, as it is faster. +// +// Note that this hook may slow down high-volume producing a bit. +type HookProduceRecordBuffered interface { + // OnProduceRecordBuffered is passed a record that is buffered. + // + // This hook is called immediately after Produce is called, after the + // function potentially sets the default topic. + OnProduceRecordBuffered(*Record) +} + +// HookProduceRecordPartitioned is called when a record is partitioned and +// internally ready to be flushed. +// +// This hook can be used to create metrics of buffered records per partition, +// and then you can correlate that to partition leaders and determine which +// brokers are having problems. +// +// Note that this hook will slow down high-volume producing and it is +// recommended to only use this temporarily or if you are ok with the +// performance hit. +type HookProduceRecordPartitioned interface { + // OnProduceRecordPartitioned is passed a record that has been + // partitioned and the current broker leader for the partition + // (note that the leader may change if the partition is moved). + // + // This hook is called once a record is queued to be flushed. The + // record's Partition and Timestamp fields are safe to read. + OnProduceRecordPartitioned(*Record, int32) +} + +// HookProduceRecordUnbuffered is called just before a record's promise is +// finished; this is effectively a mirror of a record promise. +// +// As an example, if using HookProduceRecordBuffered for a gauge of how many +// record bytes are buffered, this hook can be used to decrement the gauge. +// +// Note that this hook will slow down high-volume producing a bit. As well, +// records that were buffered but are paused (and stripped internally before +// being returned to the user) will still be passed to this hook. +type HookProduceRecordUnbuffered interface { + // OnProduceRecordUnbuffered is passed a record that is just about to + // have its produce promise called, as well as the error that the + // promise will be called with. + OnProduceRecordUnbuffered(*Record, error) +} + +// HookFetchRecordBuffered is called when a record is internally buffered after +// fetching, ready to be polled. +// +// This hook can be used to write gauge metrics regarding the number of records +// or bytes buffered, or to write interceptors that modify a record before +// being returned from polling. If you just want a metric for the number of +// records buffered, use the client's BufferedFetchRecords method, as it is +// faster. +// +// Note that this hook will slow down high-volume consuming a bit. +type HookFetchRecordBuffered interface { + // OnFetchRecordBuffered is passed a record that is now buffered, ready + // to be polled. + OnFetchRecordBuffered(*Record) +} + +// HookFetchRecordUnbuffered is called when a fetched record is unbuffered. +// +// A record can be internally discarded after being in some scenarios without +// being polled, such as when the internal assignment changes. +// +// As an example, if using HookFetchRecordBuffered for a gauge of how many +// record bytes are buffered ready to be polled, this hook can be used to +// decrement the gauge. +// +// Note that this hook may slow down high-volume consuming a bit. +type HookFetchRecordUnbuffered interface { + // OnFetchRecordUnbuffered is passwed a record that is being + // "unbuffered" within the client, and whether the record is being + // returned from polling. + OnFetchRecordUnbuffered(r *Record, polled bool) +} + +///////////// +// HELPERS // +///////////// + +// implementsAnyHook will check the incoming Hook for any Hook implementation +func implementsAnyHook(h Hook) bool { + switch h.(type) { + case HookNewClient, + HookClientClosed, + HookBrokerConnect, + HookBrokerDisconnect, + HookBrokerWrite, + HookBrokerRead, + HookBrokerE2E, + HookBrokerThrottle, + HookGroupManageError, + HookProduceBatchWritten, + HookFetchBatchRead, + HookProduceRecordBuffered, + HookProduceRecordPartitioned, + HookProduceRecordUnbuffered, + HookFetchRecordBuffered, + HookFetchRecordUnbuffered: + return true + } + return false +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/internal/sticky/go121.go b/vendor/github.com/twmb/franz-go/pkg/kgo/internal/sticky/go121.go new file mode 100644 index 00000000000..3cf972b6edb --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/internal/sticky/go121.go @@ -0,0 +1,28 @@ +//go:build go1.21 +// +build go1.21 + +package sticky + +import "slices" + +func sortPartNums(ps memberPartitions) { + slices.Sort(ps) +} + +func (b *balancer) sortMemberByLiteralPartNum(memberNum int) { + partNums := b.plan[memberNum] + slices.SortFunc(partNums, func(lpNum, rpNum int32) int { + ltNum, rtNum := b.partOwners[lpNum], b.partOwners[rpNum] + li, ri := b.topicInfos[ltNum], b.topicInfos[rtNum] + lt, rt := li.topic, ri.topic + lp, rp := lpNum-li.partNum, rpNum-ri.partNum + if lp < rp { + return -1 + } else if lp > rp { + return 1 + } else if lt < rt { + return -1 + } + return 1 + }) +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/internal/sticky/goold.go b/vendor/github.com/twmb/franz-go/pkg/kgo/internal/sticky/goold.go new file mode 100644 index 00000000000..addd2bbc19c --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/internal/sticky/goold.go @@ -0,0 +1,22 @@ +//go:build !go1.21 +// +build !go1.21 + +package sticky + +import "sort" + +func sortPartNums(partNums memberPartitions) { + sort.Slice(partNums, func(i, j int) bool { return partNums[i] < partNums[j] }) +} + +func (b *balancer) sortMemberByLiteralPartNum(memberNum int) { + partNums := b.plan[memberNum] + sort.Slice(partNums, func(i, j int) bool { + lpNum, rpNum := partNums[i], partNums[j] + ltNum, rtNum := b.partOwners[lpNum], b.partOwners[rpNum] + li, ri := b.topicInfos[ltNum], b.topicInfos[rtNum] + lt, rt := li.topic, ri.topic + lp, rp := lpNum-li.partNum, rpNum-ri.partNum + return lp < rp || (lp == rp && lt < rt) + }) +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/internal/sticky/graph.go b/vendor/github.com/twmb/franz-go/pkg/kgo/internal/sticky/graph.go new file mode 100644 index 00000000000..d6bbb587ed2 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/internal/sticky/graph.go @@ -0,0 +1,226 @@ +package sticky + +import "container/heap" + +// Graph maps members to partitions they want to steal. +// +// The representation was chosen so as to avoid updating all members on any +// partition move; move updates are one map update. +type graph struct { + b *balancer + + // node => edges out + // "from a node (member), which topicNum could we steal?" + out [][]uint32 + + // edge => who owns this edge; built in balancer's assignUnassigned + cxns []partitionConsumer + + // scores are all node scores from a search node. The distance field + // is reset on findSteal to infinityScore.. + scores pathScores + + // heapBuf and pathBuf are backing buffers that are reused every + // findSteal; note that pathBuf must be done being used before + // the next find steal, but it always is. + heapBuf pathHeap + pathBuf []stealSegment +} + +func (b *balancer) newGraph( + partitionConsumers []partitionConsumer, + topicPotentials [][]uint16, +) graph { + g := graph{ + b: b, + out: make([][]uint32, len(b.members)), + cxns: partitionConsumers, + scores: make([]pathScore, len(b.members)), + heapBuf: make([]*pathScore, len(b.members)), + } + outBufs := make([]uint32, len(b.members)*len(topicPotentials)) + for memberNum := range b.plan { + out := outBufs[:0:len(topicPotentials)] + outBufs = outBufs[len(topicPotentials):] + // In the worst case, if every node is linked to each other, + // each node will have nparts edges. We preallocate the worst + // case. It is common for the graph to be highly connected. + g.out[memberNum] = out + } + for topicNum, potentials := range topicPotentials { + for _, potential := range potentials { + g.out[potential] = append(g.out[potential], uint32(topicNum)) + } + } + return g +} + +func (g *graph) changeOwnership(edge int32, newDst uint16) { + g.cxns[edge].memberNum = newDst +} + +// findSteal uses Dijkstra search to find a path from the best node it can reach. +func (g *graph) findSteal(from uint16) ([]stealSegment, bool) { + // First, we must reset our scores from any prior run. This is O(M), + // but is fast and faster than making a map and extending it a lot. + for i := range g.scores { + g.scores[i].distance = infinityScore + g.scores[i].done = false + } + + first, _ := g.getScore(from) + + first.distance = 0 + first.done = true + + g.heapBuf = append(g.heapBuf[:0], first) + rem := &g.heapBuf + for rem.Len() > 0 { + current := heap.Pop(rem).(*pathScore) + if current.level > first.level+1 { + path := g.pathBuf[:0] + for current.parent != nil { + path = append(path, stealSegment{ + current.node, + current.parent.node, + current.srcEdge, + }) + current = current.parent + } + g.pathBuf = path + return path, true + } + + current.done = true + + for _, topicNum := range g.out[current.node] { + info := g.b.topicInfos[topicNum] + firstPartNum, lastPartNum := info.partNum, info.partNum+info.partitions + for edge := firstPartNum; edge < lastPartNum; edge++ { + neighborNode := g.cxns[edge].memberNum + neighbor, isNew := g.getScore(neighborNode) + if neighbor.done { + continue + } + + distance := current.distance + 1 + + // The neighbor is the current node that owns this edge. + // If our node originally owned this partition, then it + // would be preferable to steal edge back. + srcIsOriginal := g.cxns[edge].originalNum == current.node + + // If this is a new neighbor (our first time seeing the neighbor + // in our search), this is also the shortest path to reach them, + // where shortest defers preference to original sources THEN distance. + if isNew { + neighbor.parent = current + neighbor.srcIsOriginal = srcIsOriginal + neighbor.srcEdge = edge + neighbor.distance = distance + neighbor.heapIdx = len(*rem) + heap.Push(rem, neighbor) + } else if !neighbor.srcIsOriginal && srcIsOriginal { + // If the search path has seen this neighbor before, but + // we now are evaluating a partition that would increase + // stickiness if stolen, then fixup the neighbor's parent + // and srcEdge. + neighbor.parent = current + neighbor.srcIsOriginal = true + neighbor.srcEdge = edge + neighbor.distance = distance + heap.Fix(rem, neighbor.heapIdx) + } + } + } + } + + return nil, false +} + +type stealSegment struct { + src uint16 // member num + dst uint16 // member num + part int32 // partNum +} + +// As we traverse a graph, we assign each node a path score, which tracks a few +// numbers for what it would take to reach this node from our first node. +type pathScore struct { + // Done is set to true when we pop a node off of the graph. Once we + // pop a node, it means we have found a best path to that node and + // we do not want to revisit it for processing if any other future + // nodes reach back to this one. + done bool + + // srcIsOriginal is true if, were our parent to steal srcEdge, would + // that put srcEdge back on the original member. That is, if we are B + // and our parent is A, does our srcEdge originally belong do A? + // + // This field exists to work around a very slim edge case where a + // partition is stolen by B and then needs to be stolen back by A + // later. + srcIsOriginal bool + + node uint16 // our member num + distance int32 // how many steals it would take to get here + srcEdge int32 // the partition used to reach us + level int32 // partitions owned on this segment + parent *pathScore + heapIdx int +} + +type pathScores []pathScore + +const infinityScore = 1<<31 - 1 + +func (g *graph) getScore(node uint16) (*pathScore, bool) { + r := &g.scores[node] + exists := r.distance != infinityScore + if !exists { + *r = pathScore{ + node: node, + level: int32(len(g.b.plan[node])), + distance: infinityScore, + } + } + return r, !exists +} + +type pathHeap []*pathScore + +func (p *pathHeap) Len() int { return len(*p) } +func (p *pathHeap) Swap(i, j int) { + h := *p + l, r := h[i], h[j] + l.heapIdx, r.heapIdx = r.heapIdx, l.heapIdx + h[i], h[j] = r, l +} + +// For our path, we always want to prioritize stealing a partition we +// originally owned. This may result in a longer steal path, but it will +// increase stickiness. +// +// Next, our real goal, which is to find a node we can steal from. Because of +// this, we always want to sort by the highest level. The pathHeap stores +// reachable paths, so by sorting by the highest level, we terminate quicker: +// we always check the most likely candidates to quit our search. +// +// Finally, we simply prefer searching through shorter paths and, barring that, +// just sort by node. +func (p *pathHeap) Less(i, j int) bool { + l, r := (*p)[i], (*p)[j] + return l.srcIsOriginal && !r.srcIsOriginal || !l.srcIsOriginal && !r.srcIsOriginal && + (l.level > r.level || l.level == r.level && + (l.distance < r.distance || l.distance == r.distance && + l.node < r.node)) +} + +func (p *pathHeap) Push(x any) { *p = append(*p, x.(*pathScore)) } +func (p *pathHeap) Pop() any { + h := *p + l := len(h) + r := h[l-1] + *p = h[:l-1] + return r +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/internal/sticky/rbtree.go b/vendor/github.com/twmb/franz-go/pkg/kgo/internal/sticky/rbtree.go new file mode 100644 index 00000000000..8d563b7873f --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/internal/sticky/rbtree.go @@ -0,0 +1,392 @@ +package sticky + +// This file contains a vendoring of github.com/twmb/go-rbtree, with interface +// types replaced with *partitionLevel. We do this to simplify (and slightly) +// speed up the rbtree, get rid of a bunch of code we do not need, and to drop +// a dep. + +type color bool + +const red, black color = true, false + +// Tree is a red-black tree. +type treePlan struct { + root *treePlanNode + size int +} + +type treePlanNode struct { + left *treePlanNode + right *treePlanNode + parent *treePlanNode + color color + item *partitionLevel +} + +// liftRightSideOf is rotateLeft. +// +// Graphically speaking, this takes the node on the right and lifts it above +// ourselves. IMO trying to visualize a "rotation" is confusing. +func (t *treePlan) liftRightSideOf(n *treePlanNode) { + r := n.right + t.relinkParenting(n, r) + + // lift the right + n.right = r.left + n.parent = r + + // fix the lifted right's left + if r.left != nil { + r.left.parent = n + } + r.left = n +} + +// liftLeftSideOf is rotateRight, renamed to aid my visualization. +func (t *treePlan) liftLeftSideOf(n *treePlanNode) { + l := n.left + t.relinkParenting(n, l) + + n.left = l.right + n.parent = l + + if l.right != nil { + l.right.parent = n + } + l.right = n +} + +// relinkParenting is called to fix a former child c of node n's parent +// relationship to the parent of n. +// +// After this, the n node can be considered to have no parent. +func (t *treePlan) relinkParenting(n, c *treePlanNode) { + p := n.parent + if c != nil { + c.parent = p + } + if p == nil { + t.root = c + return + } + if n == p.left { + p.left = c + } else { + p.right = c + } +} + +func (n *treePlanNode) sibling() *treePlanNode { + if n.parent == nil { + return nil + } + if n == n.parent.left { + return n.parent.right + } + return n.parent.left +} + +func (n *treePlanNode) uncle() *treePlanNode { + p := n.parent + if p.parent == nil { + return nil + } + return p.sibling() +} + +func (n *treePlanNode) grandparent() *treePlanNode { + return n.parent.parent +} + +func (n *treePlanNode) isBlack() bool { + return n == nil || n.color == black +} + +func (t *treePlan) insert(i *partitionLevel) *treePlanNode { + r := &treePlanNode{item: i} + t.reinsert(r) + return r +} + +func (t *treePlan) reinsert(n *treePlanNode) { + *n = treePlanNode{ + color: red, + item: n.item, + } + t.size++ + if t.root == nil { + n.color = black + t.root = n + return + } + + on := t.root + var set **treePlanNode + for { + if n.item.less(on.item) { + if on.left == nil { + set = &on.left + break + } + on = on.left + } else { + if on.right == nil { + set = &on.right + break + } + on = on.right + } + } + + n.parent = on + *set = n + +repair: + // Case 1: we have jumped back to the root. Paint it black. + if n.parent == nil { + n.color = black + return + } + + // Case 2: if our parent is black, us being red does not add a new black + // to the chain and cannot increase the maximum number of blacks from + // root, so we are done. + if n.parent.color == black { + return + } + + // Case 3: if we have an uncle and it is red, then we flip our + // parent's, uncle's, and grandparent's color. + // + // This stops the red-red from parent to us, but may introduce + // a red-red from grandparent to its parent, so we set ourselves + // to the grandparent and go back to the repair beginning. + if uncle := n.uncle(); uncle != nil && uncle.color == red { + n.parent.color = black + uncle.color = black + n = n.grandparent() + n.color = red + goto repair + } + + // Case 4 step 1: our parent is red but uncle is black. Step 2 relies + // on the node being on the "outside". If we are on the inside, our + // parent lifts ourselves above itself, thus making the parent the + // outside, and then we become that parent. + p := n.parent + g := p.parent + if n == p.right && p == g.left { + t.liftRightSideOf(p) + n = n.left + } else if n == p.left && p == g.right { + t.liftLeftSideOf(p) + n = n.right + } + + // Care 4 step 2: we are on the outside, and we and our parent are red. + // If we are on the left, our grandparent lifts its left and then swaps + // its and our parent's colors. + // + // This fixes the red-red situation while preserving the number of + // blacks from root to leaf property. + p = n.parent + g = p.parent + + if n == p.left { + t.liftLeftSideOf(g) + } else { + t.liftRightSideOf(g) + } + p.color = black + g.color = red +} + +func (t *treePlan) delete(n *treePlanNode) { + t.size-- + + // We only want to delete nodes with at most one child. If this has + // two, we find the max node on the left, set this node's item to that + // node's item, and then delete that max node. + if n.left != nil && n.right != nil { + remove := n.left.max() + n.item, remove.item = remove.item, n.item + n = remove + } + + // Determine which child to elevate into our position now that we know + // we have at most one child. + c := n.right + if n.right == nil { + c = n.left + } + + t.doDelete(n, c) + t.relinkParenting(n, c) +} + +// Since we do not represent leave nodes with objects, we relink the parent +// after deleting. See the Wikipedia note. Most of our deletion operations +// on n (the dubbed "shadow" node) rather than c. +func (t *treePlan) doDelete(n, c *treePlanNode) { + // If the node was red, we deleted a red node; the number of black + // nodes along any path is the same and we can quit. + if n.color != black { + return + } + + // If the node was black, then, if we have a child and it is red, + // we switch the child to black to preserve the path number. + if c != nil && c.color == red { + c.color = black + return + } + + // We either do not have a child (nil is black), or we do and it + // is black. We must preserve the number of blacks. + +case1: + // Case 1: if the child is the new root, then the tree must have only + // had up to two elements and now has one or zero. We are done. + if n.parent == nil { + return + } + + // Note that if we are here, we must have a sibling. + // + // The first time through, from the deleted node, the deleted node was + // black and the child was black. This being two blacks meant that the + // original node's parent required two blacks on the other side. + // + // The second time through, through case 3, the sibling was repainted + // red... so it must still exist. + + // Case 2: if the child's sibling is red, we recolor the parent and + // sibling and lift the sibling, ensuring we have a black sibling. + s := n.sibling() + if s.color == red { + n.parent.color = red + s.color = black + if n == n.parent.left { + t.liftRightSideOf(n.parent) + } else { + t.liftLeftSideOf(n.parent) + } + s = n.sibling() + } + + // Right here, we know the sibling is black. If both sibling children + // are black or nil leaves (black), we enter cases 3 and 4. + if s.left.isBlack() && s.right.isBlack() { + // Case 3: if the parent, sibling, sibling's children are + // black, we can paint the sibling red to fix the imbalance. + // However, the same black imbalance can exist on the other + // side of the parent, so we go back to case 1 on the parent. + s.color = red + if n.parent.color == black { + n = n.parent + goto case1 + } + + // Case 4: if the sibling and sibling's children are black, but + // the parent is red, We can swap parent and sibling colors to + // fix our imbalance. We have no worry of further imbalances up + // the tree since we deleted a black node, replaced it with a + // red node, and then painted that red node black. + n.parent.color = black + return + } + + // Now we know the sibling is black and one of its children is red. + + // Case 5: in preparation for 6, if we are on the left, we want our + // sibling, if it has a right child, for that child's color to be red. + // We swap the sibling and sibling's left's color (since we know the + // sibling has a red child and that the right is black) and we lift the + // left child. + // + // This keeps the same number of black nodes and under the sibling. + if n == n.parent.left && s.right.isBlack() { + s.color = red + s.left.color = black + t.liftLeftSideOf(s) + } else if n == n.parent.right && s.left.isBlack() { + s.color = red + s.right.color = black + t.liftRightSideOf(s) + } + s = n.sibling() // can change from the above case + + // At this point, we know we have a black sibling and, if we are on + // the left, it has a red child on its right. + + // Case 6: we lift the sibling above the parent, swap the sibling's and + // parent's color, and change the sibling's right's color from red to + // black. + // + // This brings in a black above our node to replace the one we deleted, + // while preserves the number of blacks on the other side of the path. + s.color = n.parent.color + n.parent.color = black + if n == n.parent.left { + s.right.color = black + t.liftRightSideOf(n.parent) + } else { + s.left.color = black + t.liftLeftSideOf(n.parent) + } +} + +func (t *treePlan) findWith(cmp func(*partitionLevel) int) *treePlanNode { + on := t.root + for on != nil { + way := cmp(on.item) + switch { + case way < 0: + on = on.left + case way == 0: + return on + case way > 0: + on = on.right + } + } + return nil +} + +func (t *treePlan) findWithOrInsertWith( + find func(*partitionLevel) int, + insert func() *partitionLevel, +) *treePlanNode { + found := t.findWith(find) + if found == nil { + return t.insert(insert()) + } + return found +} + +func (t *treePlan) min() *treePlanNode { + if t.root == nil { + return nil + } + return t.root.min() +} + +func (n *treePlanNode) min() *treePlanNode { + for n.left != nil { + n = n.left + } + return n +} + +func (t *treePlan) max() *treePlanNode { + if t.root == nil { + return nil + } + return t.root.max() +} + +func (n *treePlanNode) max() *treePlanNode { + for n.right != nil { + n = n.right + } + return n +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/internal/sticky/sticky.go b/vendor/github.com/twmb/franz-go/pkg/kgo/internal/sticky/sticky.go new file mode 100644 index 00000000000..a502a2e5613 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/internal/sticky/sticky.go @@ -0,0 +1,733 @@ +// Package sticky provides sticky partitioning strategy for Kafka, with a +// complete overhaul to be faster, more understandable, and optimal. +// +// For some points on how Java's strategy is flawed, see +// https://github.com/IBM/sarama/pull/1416/files/b29086bdaae0da7ce71eae3f854d50685fd6b631#r315005878 +package sticky + +import ( + "math" + + "github.com/twmb/franz-go/pkg/kbin" + "github.com/twmb/franz-go/pkg/kmsg" +) + +// Sticky partitioning has two versions, the latter from KIP-341 preventing a +// bug. The second version introduced generations with the default generation +// from the first generation's consumers defaulting to -1. + +// We can support up to 65533 members; two slots are reserved. +// We can support up to 2,147,483,647 partitions. +// I expect a server to fall over before reaching either of these numbers. + +// GroupMember is a Kafka group member. +type GroupMember struct { + ID string + Topics []string + UserData []byte + Owned []kmsg.ConsumerMemberMetadataOwnedPartition + Generation int32 + Cooperative bool +} + +// Plan is the plan this package came up with (member => topic => partitions). +type Plan map[string]map[string][]int32 + +type balancer struct { + // members are the members in play for this balance. + // This is built in newBalancer mapping member IDs to the GroupMember. + members []GroupMember + + memberNums map[string]uint16 // member id => index into members + + topicNums map[string]uint32 // topic name => index into topicInfos + topicInfos []topicInfo + partOwners []uint32 // partition => owning topicNum + + // Stales tracks partNums that are doubly subscribed in this join + // where one of the subscribers is on an old generation. + // + // The newer generation goes into plan directly, the older gets + // stuffed here. + stales map[int32]uint16 // partNum => stale memberNum + + plan membersPartitions // what we are building and balancing + + // planByNumPartitions orders plan members into partition count levels. + // + // The nodes in the tree reference values in plan, meaning updates in + // this field are visible in plan. + planByNumPartitions treePlan + + // if the subscriptions are complex (all members do _not_ consume the + // same partitions), then we build a graph and use that for assigning. + isComplex bool + + // stealGraph is a graphical representation of members and partitions + // they want to steal. + stealGraph graph +} + +type topicInfo struct { + partNum int32 // base part num + partitions int32 // number of partitions in the topic + topic string +} + +func newBalancer(members []GroupMember, topics map[string]int32) *balancer { + var ( + nparts int + topicNums = make(map[string]uint32, len(topics)) + topicInfos = make([]topicInfo, len(topics)) + ) + for topic, partitions := range topics { + topicNum := uint32(len(topicNums)) + topicNums[topic] = topicNum + topicInfos[topicNum] = topicInfo{ + partNum: int32(nparts), + partitions: partitions, + topic: topic, + } + nparts += int(partitions) + } + partOwners := make([]uint32, 0, nparts) + for topicNum, info := range topicInfos { + for i := int32(0); i < info.partitions; i++ { + partOwners = append(partOwners, uint32(topicNum)) + } + } + memberNums := make(map[string]uint16, len(members)) + for num, member := range members { + memberNums[member.ID] = uint16(num) + } + + b := &balancer{ + members: members, + memberNums: memberNums, + topicNums: topicNums, + topicInfos: topicInfos, + + partOwners: partOwners, + stales: make(map[int32]uint16), + plan: make(membersPartitions, len(members)), + } + + evenDivvy := nparts/len(members) + 1 + planBuf := make(memberPartitions, evenDivvy*len(members)) + for num := range members { + b.plan[num] = planBuf[:0:evenDivvy] + planBuf = planBuf[evenDivvy:] + } + return b +} + +func (b *balancer) into() Plan { + plan := make(Plan, len(b.plan)) + ntopics := 5 * len(b.topicNums) / 4 + + for memberNum, partNums := range b.plan { + member := b.members[memberNum].ID + if len(partNums) == 0 { + plan[member] = make(map[string][]int32, 0) + continue + } + topics := make(map[string][]int32, ntopics) + plan[member] = topics + + // partOwners is created by topic, and partNums refers to + // indices in partOwners. If we sort by partNum, we have sorted + // topics and partitions. + sortPartNums(partNums) + + // We can reuse partNums for our topic partitions. + topicParts := partNums[:0] + + lastTopicNum := b.partOwners[partNums[0]] + lastTopicInfo := b.topicInfos[lastTopicNum] + for _, partNum := range partNums { + topicNum := b.partOwners[partNum] + + if topicNum != lastTopicNum { + topics[lastTopicInfo.topic] = topicParts[:len(topicParts):len(topicParts)] + topicParts = topicParts[len(topicParts):] + + lastTopicNum = topicNum + lastTopicInfo = b.topicInfos[topicNum] + } + + partition := partNum - lastTopicInfo.partNum + topicParts = append(topicParts, partition) + } + topics[lastTopicInfo.topic] = topicParts[:len(topicParts):len(topicParts)] + } + return plan +} + +func (b *balancer) partNumByTopic(topic string, partition int32) (int32, bool) { + topicNum, exists := b.topicNums[topic] + if !exists { + return 0, false + } + topicInfo := b.topicInfos[topicNum] + if partition >= topicInfo.partitions { + return 0, false + } + return topicInfo.partNum + partition, true +} + +// memberPartitions contains partitions for a member. +type memberPartitions []int32 + +func (m *memberPartitions) remove(needle int32) { + s := *m + var d int + for i, check := range s { + if check == needle { + d = i + break + } + } + s[d] = s[len(s)-1] + *m = s[:len(s)-1] +} + +func (m *memberPartitions) takeEnd() int32 { + s := *m + r := s[len(s)-1] + *m = s[:len(s)-1] + return r +} + +func (m *memberPartitions) add(partNum int32) { + *m = append(*m, partNum) +} + +// membersPartitions maps members to their partitions. +type membersPartitions []memberPartitions + +type partitionLevel struct { + level int + members []uint16 +} + +// partitionLevel's members field used to be a map, but removing it gains a +// slight perf boost at the cost of removing members being O(M). +// Even with the worse complexity, scanning a short list can be faster +// than managing a map, and we expect groups to not be _too_ large. +func (l *partitionLevel) removeMember(memberNum uint16) { + for i, v := range l.members { + if v == memberNum { + l.members[i] = l.members[len(l.members)-1] + l.members = l.members[:len(l.members)-1] + return + } + } +} + +func (b *balancer) findLevel(level int) *partitionLevel { + return b.planByNumPartitions.findWithOrInsertWith( + func(n *partitionLevel) int { return level - n.level }, + func() *partitionLevel { return newPartitionLevel(level) }, + ).item +} + +func (b *balancer) fixMemberLevel( + src *treePlanNode, + memberNum uint16, + partNums memberPartitions, +) { + b.removeLevelingMember(src, memberNum) + newLevel := len(partNums) + partLevel := b.findLevel(newLevel) + partLevel.members = append(partLevel.members, memberNum) +} + +func (b *balancer) removeLevelingMember( + src *treePlanNode, + memberNum uint16, +) { + src.item.removeMember(memberNum) + if len(src.item.members) == 0 { + b.planByNumPartitions.delete(src) + } +} + +func (l *partitionLevel) less(r *partitionLevel) bool { + return l.level < r.level +} + +func newPartitionLevel(level int) *partitionLevel { + return &partitionLevel{level: level} +} + +func (b *balancer) initPlanByNumPartitions() { + for memberNum, partNums := range b.plan { + partLevel := b.findLevel(len(partNums)) + partLevel.members = append(partLevel.members, uint16(memberNum)) + } +} + +// Balance performs sticky partitioning for the given group members and topics, +// returning the determined plan. +func Balance(members []GroupMember, topics map[string]int32) Plan { + if len(members) == 0 { + return make(Plan) + } + b := newBalancer(members, topics) + if cap(b.partOwners) == 0 { + return b.into() + } + b.parseMemberMetadata() + b.assignUnassignedAndInitGraph() + b.initPlanByNumPartitions() + b.balance() + return b.into() +} + +// parseMemberMetadata parses all member userdata to initialize the prior plan. +func (b *balancer) parseMemberMetadata() { + // all partitions => members that are consuming those partitions + // Each partition should only have one consumer, but a flaky member + // could rejoin with an old generation (stale user data) and say it + // is consuming something a different member is. See KIP-341. + partitionConsumersByGeneration := make([]memberGeneration, cap(b.partOwners)) + + const highBit uint32 = 1 << 31 + var memberPlan []topicPartition + var gen uint32 + + for _, member := range b.members { + // KAFKA-13715 / KIP-792: cooperative-sticky now includes a + // generation directly with the currently-owned partitions, and + // we can avoid deserializing UserData. This guards against + // some zombie issues (see KIP). + // + // The eager (sticky) balancer revokes all partitions before + // rejoining, so we cannot use Owned. + if member.Cooperative && member.Generation >= 0 { + memberPlan = memberPlan[:0] + for _, t := range member.Owned { + for _, p := range t.Partitions { + memberPlan = append(memberPlan, topicPartition{t.Topic, p}) + } + } + gen = uint32(member.Generation) + } else { + memberPlan, gen = deserializeUserData(member.UserData, memberPlan[:0]) + } + gen |= highBit + memberNum := b.memberNums[member.ID] + for _, topicPartition := range memberPlan { + partNum, exists := b.partNumByTopic(topicPartition.topic, topicPartition.partition) + if !exists { + continue + } + + // We keep the highest generation, and at most two generations. + // If something is doubly consumed, we skip it. + pcs := &partitionConsumersByGeneration[partNum] + switch { + case gen > pcs.genNew: // one consumer already, but new member has higher generation + pcs.memberOld, pcs.genOld = pcs.memberNew, pcs.genNew + pcs.memberNew, pcs.genNew = memberNum, gen + + case gen > pcs.genOld: // one consumer already, we could be second, or if there is a second, we have a high generation + pcs.memberOld, pcs.genOld = memberNum, gen + } + } + } + + for partNum, pcs := range partitionConsumersByGeneration { + if pcs.genNew&highBit != 0 { + b.plan[pcs.memberNew].add(int32(partNum)) + if pcs.genOld&highBit != 0 { + b.stales[int32(partNum)] = pcs.memberOld + } + } + } +} + +type memberGeneration struct { + memberNew uint16 + memberOld uint16 + genNew uint32 + genOld uint32 +} + +type topicPartition struct { + topic string + partition int32 +} + +// deserializeUserData returns the topic partitions a member was consuming and +// the join generation it was consuming from. +// +// If anything fails or we do not understand the userdata parsing generation, +// we return empty defaults. The member will just be assumed to have no +// history. +func deserializeUserData(userdata []byte, base []topicPartition) (memberPlan []topicPartition, generation uint32) { + memberPlan = base[:0] + b := kbin.Reader{Src: userdata} + for numAssignments := b.ArrayLen(); numAssignments > 0; numAssignments-- { + topic := b.UnsafeString() + for numPartitions := b.ArrayLen(); numPartitions > 0; numPartitions-- { + memberPlan = append(memberPlan, topicPartition{ + topic, + b.Int32(), + }) + } + } + if len(b.Src) > 0 { + // A generation of -1 is just as good of a generation as 0, so we use 0 + // and then use the high bit to signify this generation has been set. + if generationI32 := b.Int32(); generationI32 > 0 { + generation = uint32(generationI32) + } + } + if b.Complete() != nil { + memberPlan = memberPlan[:0] + } + return +} + +// assignUnassignedAndInitGraph is a long function that assigns unassigned +// partitions to the least loaded members and initializes our steal graph. +// +// Doing so requires a bunch of metadata, and in the process we want to remove +// partitions from the plan that no longer exist in the client. +func (b *balancer) assignUnassignedAndInitGraph() { + // First, over all members in this assignment, map each partition to + // the members that can consume it. We will use this for assigning. + // + // To do this mapping efficiently, we first map each topic to the + // memberNums that can consume those topics, and then use the results + // below in the partition mapping. Doing this two step process allows + // for a 10x speed boost rather than ranging over all partitions many + // times. + topicPotentialsBuf := make([]uint16, len(b.topicNums)*len(b.members)) + topicPotentials := make([][]uint16, len(b.topicNums)) + for memberNum, member := range b.members { + for _, topic := range member.Topics { + topicNum, exists := b.topicNums[topic] + if !exists { + continue + } + memberNums := topicPotentials[topicNum] + if cap(memberNums) == 0 { + memberNums = topicPotentialsBuf[:0:len(b.members)] + topicPotentialsBuf = topicPotentialsBuf[len(b.members):] + } + topicPotentials[topicNum] = append(memberNums, uint16(memberNum)) + } + } + + for _, topicMembers := range topicPotentials { + // If the number of members interested in this topic is not the + // same as the number of members in this group, then **other** + // members are interested in other topics and not this one, and + // we must go to complex balancing. + // + // We could accidentally fall into isComplex if any member is + // not interested in anything, but realistically we do not + // expect members to join with no interests. + if len(topicMembers) != len(b.members) { + b.isComplex = true + } + } + + // Next, over the prior plan, un-map deleted topics or topics that + // members no longer want. This is where we determine what is now + // unassigned. + partitionConsumers := make([]partitionConsumer, cap(b.partOwners)) // partNum => consuming member + for i := range partitionConsumers { + partitionConsumers[i] = partitionConsumer{unassignedPart, unassignedPart} + } + for memberNum := range b.plan { + partNums := &b.plan[memberNum] + for _, partNum := range *partNums { + topicNum := b.partOwners[partNum] + if len(topicPotentials[topicNum]) == 0 { // all prior subscriptions stopped wanting this partition + partNums.remove(partNum) + continue + } + memberTopics := b.members[memberNum].Topics + var memberStillWantsTopic bool + for _, memberTopic := range memberTopics { + if memberTopic == b.topicInfos[topicNum].topic { + memberStillWantsTopic = true + break + } + } + if !memberStillWantsTopic { + partNums.remove(partNum) + continue + } + partitionConsumers[partNum] = partitionConsumer{uint16(memberNum), uint16(memberNum)} + } + } + + b.tryRestickyStales(topicPotentials, partitionConsumers) + + // For each member, we now sort their current partitions by partition, + // then topic. Sorting the lowest numbers first means that once we + // steal from the end (when adding a member), we steal equally across + // all topics. This benefits the standard case the most, where all + // members consume equally. + for memberNum := range b.plan { + b.sortMemberByLiteralPartNum(memberNum) + } + + if !b.isComplex && len(topicPotentials) > 0 { + potentials := topicPotentials[0] + (&membersByPartitions{potentials, b.plan}).init() + for partNum, owner := range partitionConsumers { + if owner.memberNum != unassignedPart { + continue + } + assigned := potentials[0] + b.plan[assigned].add(int32(partNum)) + (&membersByPartitions{potentials, b.plan}).fix0() + partitionConsumers[partNum].memberNum = assigned + } + } else { + for partNum, owner := range partitionConsumers { + if owner.memberNum != unassignedPart { + continue + } + potentials := topicPotentials[b.partOwners[partNum]] + if len(potentials) == 0 { + continue + } + leastConsumingPotential := potentials[0] + leastConsuming := len(b.plan[leastConsumingPotential]) + for _, potential := range potentials[1:] { + potentialConsuming := len(b.plan[potential]) + if potentialConsuming < leastConsuming { + leastConsumingPotential = potential + leastConsuming = potentialConsuming + } + } + b.plan[leastConsumingPotential].add(int32(partNum)) + partitionConsumers[partNum].memberNum = leastConsumingPotential + } + } + + // Lastly, with everything assigned, we build our steal graph for + // balancing if needed. + if b.isComplex { + b.stealGraph = b.newGraph( + partitionConsumers, + topicPotentials, + ) + } +} + +// unassignedPart is a fake member number that we use to track if a partition +// is deleted or unassigned. +const unassignedPart = math.MaxUint16 - 1 + +// tryRestickyStales is a pre-assigning step where, for all stale members, +// we give partitions back to them if the partition is currently on an +// over loaded member or unassigned. +// +// This effectively re-stickies members before we balance further. +func (b *balancer) tryRestickyStales( + topicPotentials [][]uint16, + partitionConsumers []partitionConsumer, +) { + for staleNum, lastOwnerNum := range b.stales { + potentials := topicPotentials[b.partOwners[staleNum]] // there must be a potential consumer if we are here + var canTake bool + for _, potentialNum := range potentials { + if potentialNum == lastOwnerNum { + canTake = true + } + } + if !canTake { + return + } + + // The part cannot be unassigned here; a stale member + // would just have it. The part also cannot be deleted; + // if it is, there are no potential consumers and the + // logic above continues before getting here. The part + // must be on a different owner (cannot be lastOwner), + // otherwise it would not be a lastOwner in the stales + // map; it would just be the current owner. + currentOwner := partitionConsumers[staleNum].memberNum + lastOwnerPartitions := &b.plan[lastOwnerNum] + currentOwnerPartitions := &b.plan[currentOwner] + if len(*lastOwnerPartitions)+1 < len(*currentOwnerPartitions) { + currentOwnerPartitions.remove(staleNum) + lastOwnerPartitions.add(staleNum) + } + } +} + +type partitionConsumer struct { + memberNum uint16 + originalNum uint16 +} + +// While assigning, we keep members per topic heap sorted by the number of +// partitions they are currently consuming. This allows us to have quick +// assignment vs. always scanning to see the min loaded member. +// +// Our process is to init the heap and then always fix the 0th index after +// making it larger, so we only ever need to sift down. +type membersByPartitions struct { + members []uint16 + plan membersPartitions +} + +func (m *membersByPartitions) init() { + n := len(m.members) + for i := n/2 - 1; i >= 0; i-- { + m.down(i, n) + } +} + +func (m *membersByPartitions) fix0() { + m.down(0, len(m.members)) +} + +func (m *membersByPartitions) down(i0, n int) { + node := i0 + for { + left := 2*node + 1 + if left >= n || left < 0 { // left < 0 after int overflow + break + } + swap := left // left child + swapLen := len(m.plan[m.members[left]]) + if right := left + 1; right < n { + if rightLen := len(m.plan[m.members[right]]); rightLen < swapLen { + swapLen = rightLen + swap = right + } + } + nodeLen := len(m.plan[m.members[node]]) + if nodeLen <= swapLen { + break + } + m.members[node], m.members[swap] = m.members[swap], m.members[node] + node = swap + } +} + +// balance loops trying to move partitions until the plan is as balanced +// as it can be. +func (b *balancer) balance() { + if b.isComplex { + b.balanceComplex() + return + } + + // If all partitions are consumed equally, we have a very easy + // algorithm to balance: while the min and max levels are separated + // by over two, take from the top and give to the bottom. + min := b.planByNumPartitions.min().item + max := b.planByNumPartitions.max().item + for { + if max.level <= min.level+1 { + return + } + + minMems := min.members + maxMems := max.members + for len(minMems) > 0 && len(maxMems) > 0 { + dst := minMems[0] + src := maxMems[0] + + minMems = minMems[1:] + maxMems = maxMems[1:] + + srcPartitions := &b.plan[src] + dstPartitions := &b.plan[dst] + + dstPartitions.add(srcPartitions.takeEnd()) + } + + nextUp := b.findLevel(min.level + 1) + nextDown := b.findLevel(max.level - 1) + + endOfUps := len(min.members) - len(minMems) + endOfDowns := len(max.members) - len(maxMems) + + nextUp.members = append(nextUp.members, min.members[:endOfUps]...) + nextDown.members = append(nextDown.members, max.members[:endOfDowns]...) + + min.members = min.members[endOfUps:] + max.members = max.members[endOfDowns:] + + if len(min.members) == 0 { + b.planByNumPartitions.delete(b.planByNumPartitions.min()) + min = b.planByNumPartitions.min().item + } + if len(max.members) == 0 { + b.planByNumPartitions.delete(b.planByNumPartitions.max()) + max = b.planByNumPartitions.max().item + } + } +} + +func (b *balancer) balanceComplex() { + for min := b.planByNumPartitions.min(); b.planByNumPartitions.size > 1; min = b.planByNumPartitions.min() { + level := min.item + // If this max level is within one of this level, then nothing + // can steal down so we return early. + max := b.planByNumPartitions.max().item + if max.level <= level.level+1 { + return + } + // We continually loop over this level until every member is + // static (deleted) or bumped up a level. + for len(level.members) > 0 { + memberNum := level.members[0] + if stealPath, found := b.stealGraph.findSteal(memberNum); found { + for _, segment := range stealPath { + b.reassignPartition(segment.src, segment.dst, segment.part) + } + if len(max.members) == 0 { + break + } + continue + } + + // If we could not find a steal path, this + // member is not static (will never grow). + level.removeMember(memberNum) + if len(level.members) == 0 { + b.planByNumPartitions.delete(b.planByNumPartitions.min()) + } + } + } +} + +func (b *balancer) reassignPartition(src, dst uint16, partNum int32) { + srcPartitions := &b.plan[src] + dstPartitions := &b.plan[dst] + + oldSrcLevel := len(*srcPartitions) + oldDstLevel := len(*dstPartitions) + + srcPartitions.remove(partNum) + dstPartitions.add(partNum) + + b.fixMemberLevel( + b.planByNumPartitions.findWith(func(n *partitionLevel) int { + return oldSrcLevel - n.level + }), + src, + *srcPartitions, + ) + b.fixMemberLevel( + b.planByNumPartitions.findWith(func(n *partitionLevel) int { + return oldDstLevel - n.level + }), + dst, + *dstPartitions, + ) + + b.stealGraph.changeOwnership(partNum, dst) +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/logger.go b/vendor/github.com/twmb/franz-go/pkg/kgo/logger.go new file mode 100644 index 00000000000..bfc5dc0dd6b --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/logger.go @@ -0,0 +1,124 @@ +package kgo + +import ( + "bytes" + "fmt" + "io" + "strings" +) + +// LogLevel designates which level the logger should log at. +type LogLevel int8 + +const ( + // LogLevelNone disables logging. + LogLevelNone LogLevel = iota + // LogLevelError logs all errors. Generally, these should not happen. + LogLevelError + // LogLevelWarn logs all warnings, such as request failures. + LogLevelWarn + // LogLevelInfo logs informational messages, such as requests. This is + // usually the default log level. + LogLevelInfo + // LogLevelDebug logs verbose information, and is usually not used in + // production. + LogLevelDebug +) + +func (l LogLevel) String() string { + switch l { + case LogLevelError: + return "ERROR" + case LogLevelWarn: + return "WARN" + case LogLevelInfo: + return "INFO" + case LogLevelDebug: + return "DEBUG" + default: + return "NONE" + } +} + +// Logger is used to log informational messages. +type Logger interface { + // Level returns the log level to log at. + // + // Implementations can change their log level on the fly, but this + // function must be safe to call concurrently. + Level() LogLevel + + // Log logs a message with key, value pair arguments for the given log + // level. Keys are always strings, while values can be any type. + // + // This must be safe to call concurrently. + Log(level LogLevel, msg string, keyvals ...any) +} + +// BasicLogger returns a logger that will print to dst in the following format: +// +// prefix [LEVEL] message; key: val, key: val +// +// prefixFn is optional; if non-nil, it is called for a per-message prefix. +// +// Writes to dst are not checked for errors. +func BasicLogger(dst io.Writer, level LogLevel, prefixFn func() string) Logger { + return &basicLogger{dst, level, prefixFn} +} + +type basicLogger struct { + dst io.Writer + level LogLevel + pfxFn func() string +} + +func (b *basicLogger) Level() LogLevel { return b.level } +func (b *basicLogger) Log(level LogLevel, msg string, keyvals ...any) { + buf := byteBuffers.Get().(*bytes.Buffer) + defer byteBuffers.Put(buf) + + buf.Reset() + if b.pfxFn != nil { + buf.WriteString(b.pfxFn()) + } + buf.WriteByte('[') + buf.WriteString(level.String()) + buf.WriteString("] ") + buf.WriteString(msg) + + if len(keyvals) > 0 { + buf.WriteString("; ") + format := strings.Repeat("%v: %v, ", len(keyvals)/2) + format = format[:len(format)-2] // trim trailing comma and space + fmt.Fprintf(buf, format, keyvals...) + } + + buf.WriteByte('\n') + b.dst.Write(buf.Bytes()) +} + +// nopLogger, the default logger, drops everything. +type nopLogger struct{} + +func (*nopLogger) Level() LogLevel { return LogLevelNone } +func (*nopLogger) Log(LogLevel, string, ...any) { +} + +// wrappedLogger wraps the config logger for convenience at logging callsites. +type wrappedLogger struct { + inner Logger +} + +func (w *wrappedLogger) Level() LogLevel { + if w.inner == nil { + return LogLevelNone + } + return w.inner.Level() +} + +func (w *wrappedLogger) Log(level LogLevel, msg string, keyvals ...any) { + if w.Level() < level { + return + } + w.inner.Log(level, msg, keyvals...) +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/metadata.go b/vendor/github.com/twmb/franz-go/pkg/kgo/metadata.go new file mode 100644 index 00000000000..cbe4b5adc5a --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/metadata.go @@ -0,0 +1,975 @@ +package kgo + +import ( + "context" + "errors" + "fmt" + "sort" + "strings" + "sync" + "time" + + "github.com/twmb/franz-go/pkg/kerr" +) + +type metawait struct { + mu sync.Mutex + c *sync.Cond + lastUpdate time.Time +} + +func (m *metawait) init() { m.c = sync.NewCond(&m.mu) } +func (m *metawait) signal() { + m.mu.Lock() + m.lastUpdate = time.Now() + m.mu.Unlock() + m.c.Broadcast() +} + +// ForceMetadataRefresh triggers the client to update the metadata that is +// currently used for producing & consuming. +// +// Internally, the client already properly triggers metadata updates whenever a +// partition is discovered to be out of date (leader moved, epoch is old, etc). +// However, when partitions are added to a topic through a CreatePartitions +// request, it may take up to MetadataMaxAge for the new partitions to be +// discovered. In this case, you may want to forcefully refresh metadata +// manually to discover these new partitions sooner. +func (cl *Client) ForceMetadataRefresh() { + cl.triggerUpdateMetadataNow("from user ForceMetadataRefresh") +} + +// PartitionLeader returns the given topic partition's leader, leader epoch and +// load error. This returns -1, -1, nil if the partition has not been loaded. +func (cl *Client) PartitionLeader(topic string, partition int32) (leader, leaderEpoch int32, err error) { + if partition < 0 { + return -1, -1, errors.New("invalid negative partition") + } + + var t *topicPartitions + + m := cl.producer.topics.load() + if len(m) > 0 { + t = m[topic] + } + if t == nil { + if cl.consumer.g != nil { + if m = cl.consumer.g.tps.load(); len(m) > 0 { + t = m[topic] + } + } else if cl.consumer.d != nil { + if m = cl.consumer.d.tps.load(); len(m) > 0 { + t = m[topic] + } + } + if t == nil { + return -1, -1, nil + } + } + + tv := t.load() + if len(tv.partitions) <= int(partition) { + return -1, -1, tv.loadErr + } + p := tv.partitions[partition] + return p.leader, p.leaderEpoch, p.loadErr +} + +// waitmeta returns immediately if metadata was updated within the last second, +// otherwise this waits for up to wait for a metadata update to complete. +func (cl *Client) waitmeta(ctx context.Context, wait time.Duration, why string) { + now := time.Now() + + cl.metawait.mu.Lock() + if now.Sub(cl.metawait.lastUpdate) < cl.cfg.metadataMinAge { + cl.metawait.mu.Unlock() + return + } + cl.metawait.mu.Unlock() + + cl.triggerUpdateMetadataNow(why) + + quit := false + done := make(chan struct{}) + timeout := time.NewTimer(wait) + defer timeout.Stop() + + go func() { + defer close(done) + cl.metawait.mu.Lock() + defer cl.metawait.mu.Unlock() + + for !quit { + if now.Sub(cl.metawait.lastUpdate) < cl.cfg.metadataMinAge { + return + } + cl.metawait.c.Wait() + } + }() + + select { + case <-done: + return + case <-timeout.C: + case <-ctx.Done(): + case <-cl.ctx.Done(): + } + + cl.metawait.mu.Lock() + quit = true + cl.metawait.mu.Unlock() + cl.metawait.c.Broadcast() +} + +func (cl *Client) triggerUpdateMetadata(must bool, why string) bool { + if !must { + cl.metawait.mu.Lock() + defer cl.metawait.mu.Unlock() + if time.Since(cl.metawait.lastUpdate) < cl.cfg.metadataMinAge { + return false + } + } + + select { + case cl.updateMetadataCh <- why: + default: + } + return true +} + +func (cl *Client) triggerUpdateMetadataNow(why string) { + select { + case cl.updateMetadataNowCh <- why: + default: + } +} + +func (cl *Client) blockingMetadataFn(fn func()) { + var wg sync.WaitGroup + wg.Add(1) + waitfn := func() { + defer wg.Done() + fn() + } + select { + case cl.blockingMetadataFnCh <- waitfn: + wg.Wait() + case <-cl.ctx.Done(): + } +} + +// updateMetadataLoop updates metadata whenever the update ticker ticks, +// or whenever deliberately triggered. +func (cl *Client) updateMetadataLoop() { + defer close(cl.metadone) + var consecutiveErrors int + var lastAt time.Time + + ticker := time.NewTicker(cl.cfg.metadataMaxAge) + defer ticker.Stop() +loop: + for { + var now bool + select { + case <-cl.ctx.Done(): + return + case <-ticker.C: + // We do not log on the standard update case. + case why := <-cl.updateMetadataCh: + cl.cfg.logger.Log(LogLevelInfo, "metadata update triggered", "why", why) + case why := <-cl.updateMetadataNowCh: + cl.cfg.logger.Log(LogLevelInfo, "immediate metadata update triggered", "why", why) + now = true + case fn := <-cl.blockingMetadataFnCh: + fn() + continue loop + } + + var nowTries int + start: + nowTries++ + if !now { + if wait := cl.cfg.metadataMinAge - time.Since(lastAt); wait > 0 { + timer := time.NewTimer(wait) + prewait: + select { + case <-cl.ctx.Done(): + timer.Stop() + return + case why := <-cl.updateMetadataNowCh: + timer.Stop() + cl.cfg.logger.Log(LogLevelInfo, "immediate metadata update triggered, bypassing normal wait", "why", why) + case <-timer.C: + case fn := <-cl.blockingMetadataFnCh: + fn() + goto prewait + } + } + } + + // Even with an "update now", we sleep just a bit to allow some + // potential pile on now triggers. + time.Sleep(time.Until(lastAt.Add(10 * time.Millisecond))) + + // Drain any refires that occurred during our waiting. + out: + for { + select { + case <-cl.updateMetadataCh: + case <-cl.updateMetadataNowCh: + case fn := <-cl.blockingMetadataFnCh: + fn() + default: + break out + } + } + + retryWhy, err := cl.updateMetadata() + if retryWhy != nil || err != nil { + // If err is non-nil, the metadata request failed + // itself and already retried 3x; we do not loop more. + // + // If err is nil, the a topic or partition had a load + // error and is perhaps still being created. We retry a + // few more times to give Kafka a chance to figure + // things out. By default this will put us at 2s of + // looping+waiting (250ms per wait, 8x), and if things + // still fail we will fall into the slower update below + // which waits (default) 5s between tries. + if now && err == nil && nowTries < 8 { + wait := 250 * time.Millisecond + if cl.cfg.metadataMinAge < wait { + wait = cl.cfg.metadataMinAge + } + cl.cfg.logger.Log(LogLevelDebug, "immediate metadata update had inner errors, re-updating", + "errors", retryWhy.reason(""), + "update_after", wait, + ) + timer := time.NewTimer(wait) + quickbackoff: + select { + case <-cl.ctx.Done(): + timer.Stop() + return + case <-timer.C: + case fn := <-cl.blockingMetadataFnCh: + fn() + goto quickbackoff + } + goto start + } + if err != nil { + cl.triggerUpdateMetadata(true, fmt.Sprintf("re-updating metadata due to err: %s", err)) + } else { + cl.triggerUpdateMetadata(true, retryWhy.reason("re-updating due to inner errors")) + } + } + if err == nil { + cl.metawait.signal() + cl.consumer.doOnMetadataUpdate() + lastAt = time.Now() + consecutiveErrors = 0 + continue + } + + consecutiveErrors++ + after := time.NewTimer(cl.cfg.retryBackoff(consecutiveErrors)) + backoff: + select { + case <-cl.ctx.Done(): + after.Stop() + return + case <-after.C: + case fn := <-cl.blockingMetadataFnCh: + fn() + goto backoff + } + } +} + +var errMissingTopic = errors.New("topic_missing") + +// Updates all producer and consumer partition data, returning whether a new +// update needs scheduling or if an error occurred. +// +// The producer and consumer use different topic maps and underlying +// topicPartitionsData pointers, but we update those underlying pointers +// equally. +func (cl *Client) updateMetadata() (retryWhy multiUpdateWhy, err error) { + var ( + tpsProducerLoad = cl.producer.topics.load() + tpsConsumer *topicsPartitions + groupExternal *groupExternal + all = cl.cfg.regex + reqTopics []string + ) + c := &cl.consumer + switch { + case c.g != nil: + tpsConsumer = c.g.tps + groupExternal = c.g.loadExternal() + case c.d != nil: + tpsConsumer = c.d.tps + } + + if !all { + reqTopicsSet := make(map[string]struct{}) + for _, m := range []map[string]*topicPartitions{ + tpsProducerLoad, + tpsConsumer.load(), + } { + for topic := range m { + reqTopicsSet[topic] = struct{}{} + } + } + groupExternal.eachTopic(func(t string) { + reqTopicsSet[t] = struct{}{} + }) + reqTopics = make([]string, 0, len(reqTopicsSet)) + for topic := range reqTopicsSet { + reqTopics = append(reqTopics, topic) + } + } + + latest, err := cl.fetchTopicMetadata(all, reqTopics) + if err != nil { + cl.bumpMetadataFailForTopics( // bump load failures for all topics + tpsProducerLoad, + err, + ) + return nil, err + } + groupExternal.updateLatest(latest) + + // If we are consuming with regex and fetched all topics, the metadata + // may have returned topics the consumer is not yet tracking. We ensure + // that we will store the topics at the end of our metadata update. + tpsConsumerLoad := tpsConsumer.load() + if all { + allTopics := make([]string, 0, len(latest)) + for topic := range latest { + allTopics = append(allTopics, topic) + } + + // We filter out topics will not match any of our regex's. + // This ensures that the `tps` field does not contain topics + // we will never use (the client works with misc. topics in + // there, but it's better to avoid it -- and allows us to use + // `tps` in GetConsumeTopics). + allTopics = c.filterMetadataAllTopics(allTopics) + + tpsConsumerLoad = tpsConsumer.ensureTopics(allTopics) + defer tpsConsumer.storeData(tpsConsumerLoad) + + // For regex consuming, if a topic is not returned in the + // response and for at least missingTopicDelete from when we + // first discovered it, we assume the topic has been deleted + // and purge it. We allow for missingTopicDelete because (in + // testing locally) Kafka can originally broadcast a newly + // created topic exists and then fail to broadcast that info + // again for a while. + var purgeTopics []string + for topic, tps := range tpsConsumerLoad { + if _, ok := latest[topic]; !ok { + if td := tps.load(); td.when != 0 && time.Since(time.Unix(td.when, 0)) > cl.cfg.missingTopicDelete { + purgeTopics = append(purgeTopics, td.topic) + } else { + retryWhy.add(topic, -1, errMissingTopic) + } + } + } + if len(purgeTopics) > 0 { + // We have to `go` because Purge issues a blocking + // metadata fn; this will wait for our current + // execution to finish then purge. + cl.cfg.logger.Log(LogLevelInfo, "regex consumer purging topics that were previously consumed because they are missing in a metadata response, we are assuming they are deleted", "topics", purgeTopics) + go cl.PurgeTopicsFromClient(purgeTopics...) + } + } + + css := &consumerSessionStopper{cl: cl} + defer css.maybeRestart() + + var missingProduceTopics []*topicPartitions + for _, m := range []struct { + priors map[string]*topicPartitions + isProduce bool + }{ + {tpsProducerLoad, true}, + {tpsConsumerLoad, false}, + } { + for topic, priorParts := range m.priors { + newParts, exists := latest[topic] + if !exists { + if m.isProduce { + missingProduceTopics = append(missingProduceTopics, priorParts) + } + continue + } + cl.mergeTopicPartitions( + topic, + priorParts, + newParts, + m.isProduce, + css, + &retryWhy, + ) + } + } + + // For all produce topics that were missing, we want to bump their + // retries that a failure happened. However, if we are regex consuming, + // then it is possible in a rare scenario for the broker to not return + // a topic that actually does exist and that we previously received a + // metadata response for. This is handled above for consuming, we now + // handle it the same way for consuming. + if len(missingProduceTopics) > 0 { + var bumpFail []string + for _, tps := range missingProduceTopics { + if all { + if td := tps.load(); td.when != 0 && time.Since(time.Unix(td.when, 0)) > cl.cfg.missingTopicDelete { + bumpFail = append(bumpFail, td.topic) + } else { + retryWhy.add(td.topic, -1, errMissingTopic) + } + } else { + bumpFail = append(bumpFail, tps.load().topic) + } + } + if len(bumpFail) > 0 { + cl.bumpMetadataFailForTopics( + tpsProducerLoad, + fmt.Errorf("metadata request did not return topics: %v", bumpFail), + bumpFail..., + ) + } + } + + return retryWhy, nil +} + +// We use a special structure to repesent metadata before we *actually* convert +// it to topicPartitionsData. This helps avoid any pointer reuse problems +// because we want to keep the client's producer and consumer maps completely +// independent. If we just returned map[string]*topicPartitionsData, we could +// end up in some really weird pointer reuse scenario that ultimately results +// in a bug. +// +// See #190 for more details, as well as the commit message introducing this. +type metadataTopic struct { + loadErr error + isInternal bool + topic string + partitions []metadataPartition +} + +func (mt *metadataTopic) newPartitions(cl *Client, isProduce bool) *topicPartitionsData { + n := len(mt.partitions) + ps := &topicPartitionsData{ + loadErr: mt.loadErr, + isInternal: mt.isInternal, + partitions: make([]*topicPartition, 0, n), + writablePartitions: make([]*topicPartition, 0, n), + topic: mt.topic, + when: time.Now().Unix(), + } + for i := range mt.partitions { + p := mt.partitions[i].newPartition(cl, isProduce) + ps.partitions = append(ps.partitions, p) + if p.loadErr == nil { + ps.writablePartitions = append(ps.writablePartitions, p) + } + } + return ps +} + +type metadataPartition struct { + topic string + topicID [16]byte + partition int32 + loadErr int16 + leader int32 + leaderEpoch int32 + sns sinkAndSource +} + +func (mp metadataPartition) newPartition(cl *Client, isProduce bool) *topicPartition { + td := topicPartitionData{ + leader: mp.leader, + leaderEpoch: mp.leaderEpoch, + } + p := &topicPartition{ + loadErr: kerr.ErrorForCode(mp.loadErr), + topicPartitionData: td, + } + if isProduce { + p.records = &recBuf{ + cl: cl, + topic: mp.topic, + partition: mp.partition, + maxRecordBatchBytes: cl.maxRecordBatchBytesForTopic(mp.topic), + recBufsIdx: -1, + failing: mp.loadErr != 0, + sink: mp.sns.sink, + topicPartitionData: td, + lastAckedOffset: -1, + } + } else { + p.cursor = &cursor{ + topic: mp.topic, + topicID: mp.topicID, + partition: mp.partition, + keepControl: cl.cfg.keepControl, + cursorsIdx: -1, + source: mp.sns.source, + topicPartitionData: td, + cursorOffset: cursorOffset{ + offset: -1, // required to not consume until needed + lastConsumedEpoch: -1, // required sentinel + }, + } + } + return p +} + +// fetchTopicMetadata fetches metadata for all reqTopics and returns new +// topicPartitionsData for each topic. +func (cl *Client) fetchTopicMetadata(all bool, reqTopics []string) (map[string]*metadataTopic, error) { + _, meta, err := cl.fetchMetadataForTopics(cl.ctx, all, reqTopics) + if err != nil { + return nil, err + } + + // Since we've fetched the metadata for some topics we can optimistically cache it + // for mapped metadata too. This may reduce the number of Metadata requests issued + // by the client. + cl.storeCachedMappedMetadata(meta, nil) + + topics := make(map[string]*metadataTopic, len(meta.Topics)) + + // Even if metadata returns a leader epoch, we do not use it unless we + // can validate it per OffsetForLeaderEpoch. Some brokers may have an + // odd set of support. + useLeaderEpoch := cl.supportsOffsetForLeaderEpoch() + + for i := range meta.Topics { + topicMeta := &meta.Topics[i] + if topicMeta.Topic == nil { + cl.cfg.logger.Log(LogLevelWarn, "metadata response contained nil topic name even though we did not request with topic IDs, skipping") + continue + } + topic := *topicMeta.Topic + + mt := &metadataTopic{ + loadErr: kerr.ErrorForCode(topicMeta.ErrorCode), + isInternal: topicMeta.IsInternal, + topic: topic, + partitions: make([]metadataPartition, 0, len(topicMeta.Partitions)), + } + + topics[topic] = mt + + if mt.loadErr != nil { + continue + } + + // This 249 limit is in Kafka itself, we copy it here to rely on it while producing. + if len(topic) > 249 { + mt.loadErr = fmt.Errorf("invalid long topic name of (len %d) greater than max allowed 249", len(topic)) + continue + } + + // Kafka partitions are strictly increasing from 0. We enforce + // that here; if any partition is missing, we consider this + // topic a load failure. + sort.Slice(topicMeta.Partitions, func(i, j int) bool { + return topicMeta.Partitions[i].Partition < topicMeta.Partitions[j].Partition + }) + for i := range topicMeta.Partitions { + if got := topicMeta.Partitions[i].Partition; got != int32(i) { + mt.loadErr = fmt.Errorf("kafka did not reply with a comprensive set of partitions for a topic; we expected partition %d but saw %d", i, got) + break + } + } + + if mt.loadErr != nil { + continue + } + + for i := range topicMeta.Partitions { + partMeta := &topicMeta.Partitions[i] + leaderEpoch := partMeta.LeaderEpoch + if meta.Version < 7 || !useLeaderEpoch { + leaderEpoch = -1 + } + mp := metadataPartition{ + topic: topic, + topicID: topicMeta.TopicID, + partition: partMeta.Partition, + loadErr: partMeta.ErrorCode, + leader: partMeta.Leader, + leaderEpoch: leaderEpoch, + } + if mp.loadErr != 0 { + mp.leader = unknownSeedID(0) // ensure every records & cursor can use a sink or source + } + cl.sinksAndSourcesMu.Lock() + sns, exists := cl.sinksAndSources[mp.leader] + if !exists { + sns = sinkAndSource{ + sink: cl.newSink(mp.leader), + source: cl.newSource(mp.leader), + } + cl.sinksAndSources[mp.leader] = sns + } + for _, replica := range partMeta.Replicas { + if replica < 0 { + continue + } + if _, exists = cl.sinksAndSources[replica]; !exists { + cl.sinksAndSources[replica] = sinkAndSource{ + sink: cl.newSink(replica), + source: cl.newSource(replica), + } + } + } + cl.sinksAndSourcesMu.Unlock() + mp.sns = sns + mt.partitions = append(mt.partitions, mp) + } + } + + return topics, nil +} + +// mergeTopicPartitions merges a new topicPartition into an old and returns +// whether the metadata update that caused this merge needs to be retried. +// +// Retries are necessary if the topic or any partition has a retryable error. +func (cl *Client) mergeTopicPartitions( + topic string, + l *topicPartitions, + mt *metadataTopic, + isProduce bool, + css *consumerSessionStopper, + retryWhy *multiUpdateWhy, +) { + lv := *l.load() // copy so our field writes do not collide with reads + + r := mt.newPartitions(cl, isProduce) + + // Producers must store the update through a special function that + // manages unknown topic waiting, whereas consumers can just simply + // store the update. + if isProduce { + hadPartitions := len(lv.partitions) != 0 + defer func() { cl.storePartitionsUpdate(topic, l, &lv, hadPartitions) }() + } else { + defer l.v.Store(&lv) + } + + lv.loadErr = r.loadErr + lv.isInternal = r.isInternal + lv.topic = r.topic + if lv.when == 0 { + lv.when = r.when + } + + // If the load had an error for the entire topic, we set the load error + // but keep our stale partition information. For anything being + // produced, we bump the respective error or fail everything. There is + // nothing to be done in a consumer. + if r.loadErr != nil { + if isProduce { + for _, topicPartition := range lv.partitions { + topicPartition.records.bumpRepeatedLoadErr(lv.loadErr) + } + } else if !kerr.IsRetriable(r.loadErr) || cl.cfg.keepRetryableFetchErrors { + cl.consumer.addFakeReadyForDraining(topic, -1, r.loadErr, "metadata refresh has a load error on this entire topic") + } + retryWhy.add(topic, -1, r.loadErr) + return + } + + // Before the atomic update, we keep the latest partitions / writable + // partitions. All updates happen in r's slices, and we keep the + // results and store them in lv. + defer func() { + lv.partitions = r.partitions + lv.writablePartitions = r.writablePartitions + }() + + // We should have no deleted partitions, but there are two cases where + // we could. + // + // 1) an admin added partitions, we saw, then we re-fetched metadata + // from an out of date broker that did not have the new partitions + // + // 2) a topic was deleted and recreated with fewer partitions + // + // Both of these scenarios should be rare to non-existent, and we do + // nothing if we encounter them. + + // Migrating topicPartitions is a little tricky because we have to + // worry about underlying pointers that may currently be loaded. + for part, oldTP := range lv.partitions { + exists := part < len(r.partitions) + if !exists { + // This is the "deleted" case; see the comment above. + // + // We need to keep the partition around. For producing, + // the partition could be loaded and a record could be + // added to it after we bump the load error. For + // consuming, the partition is part of a group or part + // of what was loaded for direct consuming. + // + // We only clear a partition if it is purged from the + // client (which can happen automatically for consumers + // if the user opted into ConsumeRecreatedTopics). + dup := *oldTP + newTP := &dup + newTP.loadErr = errMissingMetadataPartition + + r.partitions = append(r.partitions, newTP) + + cl.cfg.logger.Log(LogLevelDebug, "metadata update is missing partition in topic, we are keeping the partition around for safety -- use PurgeTopicsFromClient if you wish to remove the topic", + "topic", topic, + "partition", part, + ) + if isProduce { + oldTP.records.bumpRepeatedLoadErr(errMissingMetadataPartition) + } + retryWhy.add(topic, int32(part), errMissingMetadataPartition) + continue + } + newTP := r.partitions[part] + + // Like above for the entire topic, an individual partition + // can have a load error. Unlike for the topic, individual + // partition errors are always retryable. + // + // If the load errored, we keep all old information minus the + // load error itself (the new load will have no information). + if newTP.loadErr != nil { + err := newTP.loadErr + *newTP = *oldTP + newTP.loadErr = err + if isProduce { + newTP.records.bumpRepeatedLoadErr(newTP.loadErr) + } else if !kerr.IsRetriable(newTP.loadErr) || cl.cfg.keepRetryableFetchErrors { + cl.consumer.addFakeReadyForDraining(topic, int32(part), newTP.loadErr, "metadata refresh has a load error on this partition") + } + retryWhy.add(topic, int32(part), newTP.loadErr) + continue + } + + // If the new partition has an older leader epoch, then we + // fetched from an out of date broker. We just keep the old + // information. + if newTP.leaderEpoch < oldTP.leaderEpoch { + // If we repeatedly rewind, then perhaps the cluster + // entered some bad state and lost forward progress. + // We will log & allow the rewind to allow the client + // to continue; other requests may encounter fenced + // epoch errors (and respectively recover). + // + // Five is a pretty low amount of retries, but since + // we iterate through known brokers, this basically + // means we keep stale metadata if five brokers all + // agree things rewound. + const maxEpochRewinds = 5 + if oldTP.epochRewinds < maxEpochRewinds { + cl.cfg.logger.Log(LogLevelDebug, "metadata leader epoch went backwards, ignoring update", + "topic", topic, + "partition", part, + "old_leader_epoch", oldTP.leaderEpoch, + "new_leader_epoch", newTP.leaderEpoch, + "current_num_rewinds", oldTP.epochRewinds+1, + ) + *newTP = *oldTP + newTP.epochRewinds++ + retryWhy.add(topic, int32(part), errEpochRewind) + continue + } + + cl.cfg.logger.Log(LogLevelInfo, "metadata leader epoch went backwards repeatedly, we are now keeping the metadata to allow forward progress", + "topic", topic, + "partition", part, + "old_leader_epoch", oldTP.leaderEpoch, + "new_leader_epoch", newTP.leaderEpoch, + ) + } + + if !isProduce { + var noID [16]byte + if newTP.cursor.topicID == noID && oldTP.cursor.topicID != noID { + cl.cfg.logger.Log(LogLevelWarn, "metadata update is missing the topic ID when we previously had one, ignoring update", + "topic", topic, + "partition", part, + ) + retryWhy.add(topic, int32(part), errMissingTopicID) + continue + } + } + + // If the tp data is the same, we simply copy over the records + // and cursor pointers. + // + // If the tp data equals the old, then the sink / source is the + // same, because the sink/source is from the tp leader. + if newTP.topicPartitionData == oldTP.topicPartitionData { + cl.cfg.logger.Log(LogLevelDebug, "metadata refresh has identical topic partition data", + "topic", topic, + "partition", part, + "leader", newTP.leader, + "leader_epoch", newTP.leaderEpoch, + ) + if isProduce { + newTP.records = oldTP.records + newTP.records.clearFailing() // always clear failing state for producing after meta update + } else { + newTP.cursor = oldTP.cursor // unlike records, there is no failing state for a cursor + } + } else { + cl.cfg.logger.Log(LogLevelDebug, "metadata refresh topic partition data changed", + "topic", topic, + "partition", part, + "new_leader", newTP.leader, + "new_leader_epoch", newTP.leaderEpoch, + "old_leader", oldTP.leader, + "old_leader_epoch", oldTP.leaderEpoch, + ) + if isProduce { + oldTP.migrateProductionTo(newTP) // migration clears failing state + } else { + oldTP.migrateCursorTo(newTP, css) + } + } + } + + // For any partitions **not currently in use**, we need to add them to + // the sink or source. If they are in use, they could be getting + // managed or moved by the sink or source itself, so we should not + // check the index field (which may be concurrently modified). + if len(lv.partitions) > len(r.partitions) { + return + } + newPartitions := r.partitions[len(lv.partitions):] + + // Anything left with a negative recBufsIdx / cursorsIdx is a new topic + // partition and must be added to the sink / source. + for _, newTP := range newPartitions { + if isProduce && newTP.records.recBufsIdx == -1 { + newTP.records.sink.addRecBuf(newTP.records) + } else if !isProduce && newTP.cursor.cursorsIdx == -1 { + newTP.cursor.source.addCursor(newTP.cursor) + } + } +} + +var ( + errEpochRewind = errors.New("epoch rewind") + errMissingTopicID = errors.New("missing topic ID") +) + +type multiUpdateWhy map[kerrOrString]map[string]map[int32]struct{} + +type kerrOrString struct { + k *kerr.Error + s string +} + +func (m *multiUpdateWhy) isOnly(err error) bool { + if m == nil { + return false + } + for e := range *m { + if !errors.Is(err, e.k) { + return false + } + } + return true +} + +func (m *multiUpdateWhy) add(t string, p int32, err error) { + if err == nil { + return + } + + if *m == nil { + *m = make(map[kerrOrString]map[string]map[int32]struct{}) + } + var ks kerrOrString + if ke := (*kerr.Error)(nil); errors.As(err, &ke) { + ks = kerrOrString{k: ke} + } else { + ks = kerrOrString{s: err.Error()} + } + + ts := (*m)[ks] + if ts == nil { + ts = make(map[string]map[int32]struct{}) + (*m)[ks] = ts + } + + ps := ts[t] + if ps == nil { + ps = make(map[int32]struct{}) + ts[t] = ps + } + // -1 signals that the entire topic had an error. + if p != -1 { + ps[p] = struct{}{} + } +} + +// err{topic[1 2 3] topic2[4 5 6]} err2{...} +func (m multiUpdateWhy) reason(reason string) string { + if len(m) == 0 { + return "" + } + + ksSorted := make([]kerrOrString, 0, len(m)) + for err := range m { + ksSorted = append(ksSorted, err) + } + sort.Slice(ksSorted, func(i, j int) bool { // order by non-nil kerr's code, otherwise the string + l, r := ksSorted[i], ksSorted[j] + return l.k != nil && (r.k == nil || l.k.Code < r.k.Code) || r.k == nil && l.s < r.s + }) + + var errorStrings []string + for _, ks := range ksSorted { + ts := m[ks] + tsSorted := make([]string, 0, len(ts)) + for t := range ts { + tsSorted = append(tsSorted, t) + } + sort.Strings(tsSorted) + + var topicStrings []string + for _, t := range tsSorted { + ps := ts[t] + if len(ps) == 0 { + topicStrings = append(topicStrings, t) + } else { + psSorted := make([]int32, 0, len(ps)) + for p := range ps { + psSorted = append(psSorted, p) + } + sort.Slice(psSorted, func(i, j int) bool { return psSorted[i] < psSorted[j] }) + topicStrings = append(topicStrings, fmt.Sprintf("%s%v", t, psSorted)) + } + } + + if ks.k != nil { + errorStrings = append(errorStrings, fmt.Sprintf("%s{%s}", ks.k.Message, strings.Join(topicStrings, " "))) + } else { + errorStrings = append(errorStrings, fmt.Sprintf("%s{%s}", ks.s, strings.Join(topicStrings, " "))) + } + } + if reason == "" { + return strings.Join(errorStrings, " ") + } + return reason + ": " + strings.Join(errorStrings, " ") +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/partitioner.go b/vendor/github.com/twmb/franz-go/pkg/kgo/partitioner.go new file mode 100644 index 00000000000..46e7d11d124 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/partitioner.go @@ -0,0 +1,614 @@ +package kgo + +import ( + "math" + "math/rand" + "time" + + "github.com/twmb/franz-go/pkg/kbin" +) + +// Partitioner creates topic partitioners to determine which partition messages +// should be sent to. +// +// Note that a record struct is unmodified (minus a potential default topic) +// from producing through partitioning, so you can set fields in the record +// struct before producing to aid in partitioning with a custom partitioner. +type Partitioner interface { + // forTopic returns a partitioner for an individual topic. It is + // guaranteed that only one record will use the an individual topic's + // topicPartitioner at a time, meaning partitioning within a topic does + // not require locks. + ForTopic(string) TopicPartitioner +} + +// TopicPartitioner partitions records in an individual topic. +type TopicPartitioner interface { + // RequiresConsistency returns true if a record must hash to the same + // partition even if a partition is down. + // If true, a record may hash to a partition that cannot be written to + // and will error until the partition comes back. + RequiresConsistency(*Record) bool + // Partition determines, among a set of n partitions, which index should + // be chosen to use for the partition for r. + Partition(r *Record, n int) int +} + +// TopicPartitionerOnNewBatch is an optional extension interface to +// TopicPartitioner that calls OnNewBatch before any new batch is created. If +// buffering a record would cause a new batch, OnNewBatch is called. +// +// This interface allows for partitioner implementations that effectively pin +// to a partition until a new batch is created, after which the partitioner can +// choose which next partition to use. +type TopicPartitionerOnNewBatch interface { + // OnNewBatch is called when producing a record if that record would + // trigger a new batch on its current partition. + OnNewBatch() +} + +// TopicBackupPartitioner is an optional extension interface to +// TopicPartitioner that can partition by the number of records buffered. +// +// If a partitioner implements this interface, the Partition function will +// never be called. +type TopicBackupPartitioner interface { + TopicPartitioner + + // PartitionByBackup is similar to Partition, but has an additional + // backupIter. This iterator will return the number of buffered records + // per partition index. The iterator's Next function can only be called + // up to n times, calling it any more will panic. + PartitionByBackup(r *Record, n int, backupIter TopicBackupIter) int +} + +// TopicBackupIter is an iterates through partition indices. +type TopicBackupIter interface { + // Next returns the next partition index and the total buffered records + // for the partition. If Rem returns 0, calling this function again + // will panic. + Next() (int, int64) + // Rem returns the number of elements left to iterate through. + Rem() int +} + +//////////// +// SIMPLE // - BasicConsistent, Manual, RoundRobin +//////////// + +// BasicConsistentPartitioner wraps a single function to provide a Partitioner +// and TopicPartitioner (that function is essentially a combination of +// Partitioner.ForTopic and TopicPartitioner.Partition). +// +// As a minimal example, if you do not care about the topic and you set the +// partition before producing: +// +// kgo.BasicConsistentPartitioner(func(topic) func(*Record, int) int { +// return func(r *Record, n int) int { +// return int(r.Partition) +// } +// }) +func BasicConsistentPartitioner(partition func(string) func(r *Record, n int) int) Partitioner { + return &basicPartitioner{partition} +} + +type ( + basicPartitioner struct { + fn func(string) func(*Record, int) int + } + + basicTopicPartitioner struct { + fn func(*Record, int) int + } +) + +func (b *basicPartitioner) ForTopic(t string) TopicPartitioner { + return &basicTopicPartitioner{b.fn(t)} +} + +func (*basicTopicPartitioner) RequiresConsistency(*Record) bool { return true } +func (b *basicTopicPartitioner) Partition(r *Record, n int) int { return b.fn(r, n) } + +// ManualPartitioner is a partitioner that simply returns the Partition field +// that is already set on any record. +// +// Any record with an invalid partition will be immediately failed. This +// partitioner is simply the partitioner that is demonstrated in the +// BasicConsistentPartitioner documentation. +func ManualPartitioner() Partitioner { + return BasicConsistentPartitioner(func(string) func(*Record, int) int { + return func(r *Record, _ int) int { + return int(r.Partition) + } + }) +} + +// RoundRobinPartitioner is a partitioner that round-robin's through all +// available partitions. This algorithm has lower throughput and causes higher +// CPU load on brokers, but can be useful if you want to ensure an even +// distribution of records to partitions. +func RoundRobinPartitioner() Partitioner { + return new(roundRobinPartitioner) +} + +type ( + roundRobinPartitioner struct{} + + roundRobinTopicPartitioner struct { + on int + } +) + +func (*roundRobinPartitioner) ForTopic(string) TopicPartitioner { + return new(roundRobinTopicPartitioner) +} + +func (*roundRobinTopicPartitioner) RequiresConsistency(*Record) bool { return false } +func (r *roundRobinTopicPartitioner) Partition(_ *Record, n int) int { + if r.on >= n { + r.on = 0 + } + ret := r.on + r.on++ + return ret +} + +////////////////// +// LEAST BACKUP // +////////////////// + +// LeastBackupPartitioner prioritizes partitioning by three factors, in order: +// +// 1. pin to the current pick until there is a new batch +// 2. on new batch, choose the least backed up partition (the partition with +// the fewest amount of buffered records) +// 3. if multiple partitions are equally least-backed-up, choose one at random +// +// This algorithm prioritizes least-backed-up throughput, which may result in +// unequal partitioning. It is likely that this algorithm will talk most to the +// broker that it has the best connection to. +// +// This algorithm is resilient to brokers going down: if a few brokers die, it +// is possible your throughput will be so high that the maximum buffered +// records will be reached in the now-offline partitions before metadata +// responds that the broker is offline. With the standard partitioning +// algorithms, the only recovery is if the partition is remapped or if the +// broker comes back online. With the least backup partitioner, downed +// partitions will see slight backup, but then the other partitions that are +// still accepting writes will get all of the writes and your client will not +// be blocked. +// +// Under ideal scenarios (no broker / connection issues), StickyPartitioner is +// equivalent to LeastBackupPartitioner. This partitioner is only recommended +// if you are a producer consistently dealing with flaky connections or +// problematic brokers and do not mind uneven load on your brokers. +func LeastBackupPartitioner() Partitioner { + return new(leastBackupPartitioner) +} + +type ( + leastBackupInput struct{ mapping []*topicPartition } + + leastBackupPartitioner struct{} + + leastBackupTopicPartitioner struct { + onPart int + rng *rand.Rand + } +) + +func (i *leastBackupInput) Next() (int, int64) { + last := len(i.mapping) - 1 + buffered := i.mapping[last].records.buffered.Load() + i.mapping = i.mapping[:last] + return last, buffered +} + +func (i *leastBackupInput) Rem() int { + return len(i.mapping) +} + +func (*leastBackupPartitioner) ForTopic(string) TopicPartitioner { + return &leastBackupTopicPartitioner{ + onPart: -1, + rng: rand.New(rand.NewSource(time.Now().UnixNano())), + } +} + +func (p *leastBackupTopicPartitioner) OnNewBatch() { p.onPart = -1 } +func (*leastBackupTopicPartitioner) RequiresConsistency(*Record) bool { return false } +func (*leastBackupTopicPartitioner) Partition(*Record, int) int { panic("unreachable") } + +func (p *leastBackupTopicPartitioner) PartitionByBackup(_ *Record, n int, backup TopicBackupIter) int { + if p.onPart == -1 || p.onPart >= n { + leastBackup := int64(math.MaxInt64) + npicked := 0 + for ; n > 0; n-- { + pick, backup := backup.Next() + if backup < leastBackup { + leastBackup = backup + p.onPart = pick + npicked = 1 + } else { + npicked++ // reservoir sampling with k = 1 + if p.rng.Intn(npicked) == 0 { + p.onPart = pick + } + } + } + } + return p.onPart +} + +/////////////////// +// UNIFORM BYTES // +/////////////////// + +// UniformBytesPartitioner is a redux of the StickyPartitioner, proposed in +// KIP-794 and release with the Java client in Kafka 3.3. This partitioner +// returns the same partition until 'bytes' is hit. At that point, a +// re-partitioning happens. If adaptive is false, this chooses a new random +// partition, otherwise this chooses a broker based on the inverse of the +// backlog currently buffered for that broker. If keys is true, this uses +// standard hashing based on record key for records with non-nil keys. hasher +// is optional; if nil, the default hasher murmur2 (Kafka's default). +// +// The point of this hasher is to create larger batches while producing the +// same amount to all partitions over the long run. Adaptive opts in to a +// slight imbalance so that this can produce more to brokers that are less +// loaded. +// +// This implementation differs slightly from Kafka's because this does not +// account for the compressed size of a batch, nor batch overhead. For +// overhead, in practice, the overhead is relatively constant so it would +// affect all batches equally. For compression, this client does not compress +// until after a batch is created and frozen, so it is not possible to track +// compression. This client also uses the number of records for backup +// calculation rather than number of bytes, but the heuristic should be +// similar. Lastly, this client does not have a timeout for partition +// availability. Realistically, these will be the most backed up partitions so +// they should be chosen the least. +// +// NOTE: This implementation may create sub-optimal batches if lingering is +// enabled. This client's default is to disable lingering. The patch used to +// address this in Kafka is KAFKA-14156 (which itself is not perfect in the +// context of disabling lingering). For more details, read KAFKA-14156. +func UniformBytesPartitioner(bytes int, adaptive, keys bool, hasher PartitionerHasher) Partitioner { + if hasher == nil { + hasher = KafkaHasher(murmur2) + } + return &uniformBytesPartitioner{ + bytes, + adaptive, + keys, + hasher, + } +} + +type ( + uniformBytesPartitioner struct { + bytes int + adaptive bool + keys bool + hasher PartitionerHasher + } + + uniformBytesTopicPartitioner struct { + u uniformBytesPartitioner + bytes int + onPart int + rng *rand.Rand + + calc []struct { + f float64 + n int + } + } +) + +func (u *uniformBytesPartitioner) ForTopic(string) TopicPartitioner { + return &uniformBytesTopicPartitioner{ + u: *u, + onPart: -1, + rng: rand.New(rand.NewSource(time.Now().UnixNano())), + } +} + +func (p *uniformBytesTopicPartitioner) RequiresConsistency(r *Record) bool { + return p.u.keys && r.Key != nil +} +func (*uniformBytesTopicPartitioner) Partition(*Record, int) int { panic("unreachable") } + +func (p *uniformBytesTopicPartitioner) PartitionByBackup(r *Record, n int, backup TopicBackupIter) int { + if p.u.keys && r.Key != nil { + return p.u.hasher(r.Key, n) + } + + l := 1 + // attributes, int8 unused + 1 + // ts delta, 1 minimum (likely 2 or 3) + 1 + // offset delta, likely 1 + kbin.VarintLen(int32(len(r.Key))) + + len(r.Key) + + kbin.VarintLen(int32(len(r.Value))) + + len(r.Value) + + kbin.VarintLen(int32(len(r.Headers))) // varint array len headers + + for _, h := range r.Headers { + l += kbin.VarintLen(int32(len(h.Key))) + + len(h.Key) + + kbin.VarintLen(int32(len(h.Value))) + + len(h.Value) + } + + p.bytes += l + if p.bytes >= p.u.bytes { + p.bytes = l + p.onPart = -1 + } + + if p.onPart >= 0 && p.onPart < n { + return p.onPart + } + + if !p.u.adaptive { + p.onPart = p.rng.Intn(n) + } else { + p.calc = p.calc[:0] + + // For adaptive, the logic is that we pick by broker according + // to the inverse of the queue size. Presumably this means + // bytes, but we use records for simplicity. + // + // We calculate 1/recs for all brokers and choose the first one + // in this ordering that takes us negative. + // + // e.g., 1/1 + 1/3; pick is 0.2; 0.2*1.3333 = 0.26666; minus 1 + // is negative, meaning our pick is the first. If rng was 0.9, + // scaled is 1.2, meaning our pick is the second (-1, still + // positive, second pick takes us negative). + // + // To guard floating rounding problems, if we pick nothing, + // then this means we pick our last. + var t float64 + for ; n > 0; n-- { + n, backup := backup.Next() + backup++ // ensure non-zero + f := 1 / float64(backup) + t += f + p.calc = append(p.calc, struct { + f float64 + n int + }{f, n}) + } + r := p.rng.Float64() + pick := r * t + for _, c := range p.calc { + pick -= c.f + if pick <= 0 { + p.onPart = c.n + break + } + } + if p.onPart == -1 { + p.onPart = p.calc[len(p.calc)-1].n + } + } + return p.onPart +} + +///////////////////// +// STICKY & COMPAT // - Sticky, Kafka (custom hash), Sarama (custom hash) +///////////////////// + +// StickyPartitioner is the same as StickyKeyPartitioner, but with no logic to +// consistently hash keys. That is, this only partitions according to the +// sticky partition strategy. +func StickyPartitioner() Partitioner { + return new(stickyPartitioner) +} + +type ( + stickyPartitioner struct{} + + stickyTopicPartitioner struct { + lastPart int + onPart int + rng *rand.Rand + } +) + +func (*stickyPartitioner) ForTopic(string) TopicPartitioner { + p := newStickyTopicPartitioner() + return &p +} + +func newStickyTopicPartitioner() stickyTopicPartitioner { + return stickyTopicPartitioner{ + lastPart: -1, + onPart: -1, + rng: rand.New(rand.NewSource(time.Now().UnixNano())), + } +} + +func (p *stickyTopicPartitioner) OnNewBatch() { p.lastPart, p.onPart = p.onPart, -1 } +func (*stickyTopicPartitioner) RequiresConsistency(*Record) bool { return false } +func (p *stickyTopicPartitioner) Partition(_ *Record, n int) int { + if p.onPart == -1 || p.onPart >= n { + p.onPart = p.rng.Intn(n) + if p.onPart == p.lastPart { + p.onPart = (p.onPart + 1) % n + } + } + return p.onPart +} + +// StickyKeyPartitioner mirrors the default Java partitioner from Kafka's 2.4 +// release (see KIP-480 and KAFKA-8601) until their 3.3 release. This was +// replaced in 3.3 with the uniform sticky partitioner (KIP-794), which is +// reimplemented in this client as the UniformBytesPartitioner. +// +// This is the same "hash the key consistently, if no key, choose random +// partition" strategy that the Java partitioner has always used, but rather +// than always choosing a random partition, the partitioner pins a partition to +// produce to until that partition rolls over to a new batch. Only when rolling +// to new batches does this partitioner switch partitions. +// +// The benefit with this pinning is less CPU utilization on Kafka brokers. +// Over time, the random distribution is the same, but the brokers are handling +// on average larger batches. +// +// hasher is optional; if nil, this will return a partitioner that partitions +// exactly how Kafka does. Specifically, the partitioner will use murmur2 to +// hash keys, will mask out the 32nd bit, and then will mod by the number of +// potential partitions. +func StickyKeyPartitioner(hasher PartitionerHasher) Partitioner { + if hasher == nil { + hasher = KafkaHasher(murmur2) + } + return &keyPartitioner{hasher} +} + +// PartitionerHasher returns a partition to use given the input data and number +// of partitions. +type PartitionerHasher func([]byte, int) int + +// KafkaHasher returns a PartitionerHasher using hashFn that mirrors how Kafka +// partitions after hashing data. In Kafka, after hashing into a uint32, the +// hash is converted to an int32 and the high bit is stripped. Kafka by default +// uses murmur2 hashing, and the StickyKeyPartiitoner uses this by default. +// Using this KafkaHasher function is only necessary if you want to change the +// underlying hashing algorithm. +func KafkaHasher(hashFn func([]byte) uint32) PartitionerHasher { + return func(key []byte, n int) int { + // https://github.com/apache/kafka/blob/d91a94e/clients/src/main/java/org/apache/kafka/clients/producer/internals/DefaultPartitioner.java#L59 + // https://github.com/apache/kafka/blob/d91a94e/clients/src/main/java/org/apache/kafka/common/utils/Utils.java#L865-L867 + // Masking before or after the int conversion makes no difference. + return int(hashFn(key)&0x7fffffff) % n + } +} + +// SaramaHasher is a historical misnamed partitioner. This library's original +// implementation of the SaramaHasher was incorrect, if you want an exact +// match for the Sarama partitioner, use the [SaramaCompatHasher]. +// +// This partitioner remains because as it turns out, other ecosystems provide +// a similar partitioner and this partitioner is useful for compatibility. +// +// In particular, using this function with a crc32.ChecksumIEEE hasher makes +// this partitioner match librdkafka's consistent partitioner, or the +// zendesk/ruby-kafka partitioner. +func SaramaHasher(hashFn func([]byte) uint32) PartitionerHasher { + return func(key []byte, n int) int { + p := int(hashFn(key)) % n + if p < 0 { + p = -p + } + return p + } +} + +// SaramaCompatHasher returns a PartitionerHasher using hashFn that mirrors how +// Sarama partitions after hashing data. +// +// Sarama has two differences from Kafka when partitioning: +// +// 1) In Kafka, when converting the uint32 hash to an int32, Kafka masks the +// high bit. In Sarama, if the high bit is 1 (i.e., the number as an int32 is +// negative), Sarama negates the number. +// +// 2) Kafka by default uses the murmur2 hashing algorithm. Sarama by default +// uses fnv-1a. +// +// Sarama added a NewReferenceHashPartitioner function that attempted to align +// with Kafka, but the reference partitioner only fixed the first difference, +// not the second. Further customization options were added later that made it +// possible to exactly match Kafka when hashing. +// +// In short, to *exactly* match the Sarama defaults, use the following: +// +// kgo.StickyKeyPartitioner(kgo.SaramaCompatHasher(fnv32a)) +// +// Where fnv32a is a function returning a new 32 bit fnv-1a hasher. +// +// func fnv32a(b []byte) uint32 { +// h := fnv.New32a() +// h.Reset() +// h.Write(b) +// return h.Sum32() +// } +func SaramaCompatHasher(hashFn func([]byte) uint32) PartitionerHasher { + return func(key []byte, n int) int { + p := int32(hashFn(key)) % int32(n) + if p < 0 { + p = -p + } + return int(p) + } +} + +type ( + keyPartitioner struct { + hasher PartitionerHasher + } + + stickyKeyTopicPartitioner struct { + hasher PartitionerHasher + stickyTopicPartitioner + } +) + +func (k *keyPartitioner) ForTopic(string) TopicPartitioner { + return &stickyKeyTopicPartitioner{k.hasher, newStickyTopicPartitioner()} +} + +func (*stickyKeyTopicPartitioner) RequiresConsistency(r *Record) bool { return r.Key != nil } +func (p *stickyKeyTopicPartitioner) Partition(r *Record, n int) int { + if r.Key != nil { + return p.hasher(r.Key, n) + } + return p.stickyTopicPartitioner.Partition(r, n) +} + +///////////// +// MURMUR2 // +///////////// + +// Straight from the C++ code and from the Java code duplicating it. +// https://github.com/apache/kafka/blob/d91a94e/clients/src/main/java/org/apache/kafka/common/utils/Utils.java#L383-L421 +// https://github.com/aappleby/smhasher/blob/61a0530f/src/MurmurHash2.cpp#L37-L86 +// +// The Java code uses ints but with unsigned shifts; we do not need to. +func murmur2(b []byte) uint32 { + const ( + seed uint32 = 0x9747b28c + m uint32 = 0x5bd1e995 + r = 24 + ) + h := seed ^ uint32(len(b)) + for len(b) >= 4 { + k := uint32(b[3])<<24 + uint32(b[2])<<16 + uint32(b[1])<<8 + uint32(b[0]) + b = b[4:] + k *= m + k ^= k >> r + k *= m + + h *= m + h ^= k + } + switch len(b) { + case 3: + h ^= uint32(b[2]) << 16 + fallthrough + case 2: + h ^= uint32(b[1]) << 8 + fallthrough + case 1: + h ^= uint32(b[0]) + h *= m + } + + h ^= h >> 13 + h *= m + h ^= h >> 15 + return h +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/producer.go b/vendor/github.com/twmb/franz-go/pkg/kgo/producer.go new file mode 100644 index 00000000000..5cf4fc775d5 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/producer.go @@ -0,0 +1,1249 @@ +package kgo + +import ( + "context" + "errors" + "fmt" + "math" + "sync" + "sync/atomic" + "time" + + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" +) + +type producer struct { + inflight atomicI64 // high 16: # waiters, low 48: # inflight + + // mu and c are used for flush and drain notifications; mu is used for + // a few other tight locks. + mu sync.Mutex + c *sync.Cond + + bufferedRecords int64 + bufferedBytes int64 + + cl *Client + + topicsMu sync.Mutex // locked to prevent concurrent updates; reads are always atomic + topics *topicsPartitions + + // Hooks exist behind a pointer because likely they are not used. + // We only take up one byte vs. 6. + hooks *struct { + buffered []HookProduceRecordBuffered + partitioned []HookProduceRecordPartitioned + unbuffered []HookProduceRecordUnbuffered + } + + hasHookBatchWritten bool + + // unknownTopics buffers all records for topics that are not loaded. + // The map is to a pointer to a slice for reasons documented in + // waitUnknownTopic. + unknownTopicsMu sync.Mutex + unknownTopics map[string]*unknownTopicProduces + + id atomic.Value + producingTxn atomicBool + + // We must have a producer field for flushing; we cannot just have a + // field on recBufs that is toggled on flush. If we did, then a new + // recBuf could be created and records sent to while we are flushing. + flushing atomicI32 // >0 if flushing, can Flush many times concurrently + blocked atomicI32 // >0 if over max recs or bytes + blockedBytes int64 + + aborting atomicI32 // >0 if aborting, can abort many times concurrently + + idMu sync.Mutex + idVersion int16 + + batchPromises ringBatchPromise + promisesMu sync.Mutex + + txnMu sync.Mutex + inTxn bool + + // If using EndBeginTxnUnsafe, and any partitions are actually produced + // to, we issue an AddPartitionsToTxn at the end to re-add them to a + // new transaction. We have to due to logic races: the broker may not + // have handled the produce requests yet, and we want to ensure a new + // transaction is started. + // + // If the user stops producing, we want to ensure that our restarted + // transaction is actually ended. Thus, we set readded whenever we have + // partitions we actually restart. We issue EndTxn and reset readded in + // EndAndBegin; if nothing more was produced to, we ensure we finish + // the started txn. + readded bool +} + +// BufferedProduceRecords returns the number of records currently buffered for +// producing within the client. +// +// This can be used as a gauge to determine how far behind the client is for +// flushing records produced by your client (which can help determine network / +// cluster health). +func (cl *Client) BufferedProduceRecords() int64 { + cl.producer.mu.Lock() + defer cl.producer.mu.Unlock() + return cl.producer.bufferedRecords + int64(cl.producer.blocked.Load()) +} + +// BufferedProduceBytes returns the number of bytes currently buffered for +// producing within the client. This is the sum of all keys, values, and header +// keys/values. See the related [BufferedProduceRecords] for more information. +func (cl *Client) BufferedProduceBytes() int64 { + cl.producer.mu.Lock() + defer cl.producer.mu.Unlock() + return cl.producer.bufferedBytes + cl.producer.blockedBytes +} + +type unknownTopicProduces struct { + buffered []promisedRec + wait chan error // retryable errors + fatal chan error // must-signal quit errors; capacity 1 +} + +func (p *producer) init(cl *Client) { + p.cl = cl + p.topics = newTopicsPartitions() + p.unknownTopics = make(map[string]*unknownTopicProduces) + p.idVersion = -1 + p.id.Store(&producerID{ + id: -1, + epoch: -1, + err: errReloadProducerID, + }) + p.c = sync.NewCond(&p.mu) + + inithooks := func() { + if p.hooks == nil { + p.hooks = &struct { + buffered []HookProduceRecordBuffered + partitioned []HookProduceRecordPartitioned + unbuffered []HookProduceRecordUnbuffered + }{} + } + } + + cl.cfg.hooks.each(func(h Hook) { + if h, ok := h.(HookProduceRecordBuffered); ok { + inithooks() + p.hooks.buffered = append(p.hooks.buffered, h) + } + if h, ok := h.(HookProduceRecordPartitioned); ok { + inithooks() + p.hooks.partitioned = append(p.hooks.partitioned, h) + } + if h, ok := h.(HookProduceRecordUnbuffered); ok { + inithooks() + p.hooks.unbuffered = append(p.hooks.unbuffered, h) + } + if _, ok := h.(HookProduceBatchWritten); ok { + p.hasHookBatchWritten = true + } + }) +} + +func (p *producer) purgeTopics(topics []string) { + p.topicsMu.Lock() + defer p.topicsMu.Unlock() + + p.unknownTopicsMu.Lock() + for _, topic := range topics { + if unknown, exists := p.unknownTopics[topic]; exists { + delete(p.unknownTopics, topic) + close(unknown.wait) + p.promiseBatch(batchPromise{ + recs: unknown.buffered, + err: errPurged, + }) + } + } + p.unknownTopicsMu.Unlock() + + toStore := p.topics.clone() + defer p.topics.storeData(toStore) + + for _, topic := range topics { + d := toStore.loadTopic(topic) + if d == nil { + continue + } + delete(toStore, topic) + for _, p := range d.partitions { + r := p.records + + // First we set purged, so that anything in the process + // of being buffered will immediately fail when it goes + // to buffer. + r.mu.Lock() + r.purged = true + r.mu.Unlock() + + // Now we remove from the sink. When we do, the recBuf + // is effectively abandonded. Any active produces may + // finish before we fail the records; if they finish + // after they will no longer belong in the batch, but + // they may have been produced. This is the duplicate + // risk a user runs when purging. + r.sink.removeRecBuf(r) + + // Once abandonded, we now need to fail anything that + // was buffered. + go func() { + r.mu.Lock() + defer r.mu.Unlock() + r.failAllRecords(errPurged) + }() + } + } +} + +func (p *producer) isAborting() bool { return p.aborting.Load() > 0 } + +func noPromise(*Record, error) {} + +// ProduceResult is the result of producing a record in a synchronous manner. +type ProduceResult struct { + // Record is the produced record. It is always non-nil. + // + // If this record was produced successfully, its attrs / offset / id / + // epoch / etc. fields are filled in on return if possible (i.e. when + // producing with acks required). + Record *Record + + // Err is a potential produce error. If this is non-nil, the record was + // not produced successfully. + Err error +} + +// ProduceResults is a collection of produce results. +type ProduceResults []ProduceResult + +// FirstErr returns the first erroring result, if any. +func (rs ProduceResults) FirstErr() error { + for _, r := range rs { + if r.Err != nil { + return r.Err + } + } + return nil +} + +// First the first record and error in the produce results. +// +// This function is useful if you only passed one record to ProduceSync. +func (rs ProduceResults) First() (*Record, error) { + return rs[0].Record, rs[0].Err +} + +// ProduceSync is a synchronous produce. See the Produce documentation for an +// in depth description of how producing works. +// +// This function produces all records in one range loop and waits for them all +// to be produced before returning. +func (cl *Client) ProduceSync(ctx context.Context, rs ...*Record) ProduceResults { + var ( + wg sync.WaitGroup + results = make(ProduceResults, 0, len(rs)) + promise = func(r *Record, err error) { + results = append(results, ProduceResult{r, err}) + wg.Done() + } + ) + + wg.Add(len(rs)) + for _, r := range rs { + cl.Produce(ctx, r, promise) + } + wg.Wait() + + return results +} + +// FirstErrPromise is a helper type to capture only the first failing error +// when producing a batch of records with this type's Promise function. +// +// This is useful for when you only care about any record failing, and can use +// that as a signal (i.e., to abort a batch). The AbortingFirstErrPromise +// function can be used to abort all records as soon as the first error is +// encountered. If you do not need to abort, you can use this type with no +// constructor. +// +// This is similar to using ProduceResult's FirstErr function. +type FirstErrPromise struct { + wg sync.WaitGroup + once atomicBool + err error + cl *Client +} + +// AbortingFirstErrPromise returns a FirstErrPromise that will call the +// client's AbortBufferedRecords function if an error is encountered. +// +// This can be used to quickly exit when any error is encountered, rather than +// waiting while flushing only to discover things errored. +func AbortingFirstErrPromise(cl *Client) *FirstErrPromise { + return &FirstErrPromise{ + cl: cl, + } +} + +// Promise is a promise for producing that will store the first error +// encountered. +func (f *FirstErrPromise) promise(_ *Record, err error) { + defer f.wg.Done() + if err != nil && !f.once.Swap(true) { + f.err = err + if f.cl != nil { + f.wg.Add(1) + go func() { + defer f.wg.Done() + f.cl.AbortBufferedRecords(context.Background()) + }() + } + } +} + +// Promise returns a promise for producing that will store the first error +// encountered. +// +// The returned promise must eventually be called, because a FirstErrPromise +// does not return from 'Err' until all promises are completed. +func (f *FirstErrPromise) Promise() func(*Record, error) { + f.wg.Add(1) + return f.promise +} + +// Err waits for all promises to complete and then returns any stored error. +func (f *FirstErrPromise) Err() error { + f.wg.Wait() + return f.err +} + +// TryProduce is similar to Produce, but rather than blocking if the client +// currently has MaxBufferedRecords or MaxBufferedBytes buffered, this fails +// immediately with ErrMaxBuffered. See the Produce documentation for more +// details. +func (cl *Client) TryProduce( + ctx context.Context, + r *Record, + promise func(*Record, error), +) { + cl.produce(ctx, r, promise, false) +} + +// Produce sends a Kafka record to the topic in the record's Topic field, +// calling an optional `promise` with the record and a potential error when +// Kafka replies. For a synchronous produce, see ProduceSync. Records are +// produced in order per partition if the record is produced successfully. +// Successfully produced records will have their attributes, offset, and +// partition set before the promise is called. All promises are called serially +// (and should be relatively fast). If a record's timestamp is unset, this +// sets the timestamp to time.Now. +// +// If the topic field is empty, the client will use the DefaultProduceTopic; if +// that is also empty, the record is failed immediately. If the record is too +// large to fit in a batch on its own in a produce request, the record will be +// failed with immediately kerr.MessageTooLarge. +// +// If the client is configured to automatically flush the client currently has +// the configured maximum amount of records buffered, Produce will block. The +// context can be used to cancel waiting while records flush to make space. In +// contrast, if flushing is configured, the record will be failed immediately +// with ErrMaxBuffered (this same behavior can be had with TryProduce). +// +// Once a record is buffered into a batch, it can be canceled in three ways: +// canceling the context, the record timing out, or hitting the maximum +// retries. If any of these conditions are hit and it is currently safe to fail +// records, all buffered records for the relevant partition are failed. Only +// the first record's context in a batch is considered when determining whether +// the batch should be canceled. A record is not safe to fail if the client +// is idempotently producing and a request has been sent; in this case, the +// client cannot know if the broker actually processed the request (if so, then +// removing the records from the client will create errors the next time you +// produce). +// +// If the client is transactional and a transaction has not been begun, the +// promise is immediately called with an error corresponding to not being in a +// transaction. +func (cl *Client) Produce( + ctx context.Context, + r *Record, + promise func(*Record, error), +) { + cl.produce(ctx, r, promise, true) +} + +func (cl *Client) produce( + ctx context.Context, + r *Record, + promise func(*Record, error), + block bool, +) { + if ctx == nil { + ctx = context.Background() + } + if r.Context == nil { + r.Context = ctx + } + if promise == nil { + promise = noPromise + } + if r.Topic == "" { + r.Topic = cl.cfg.defaultProduceTopic + } + + p := &cl.producer + if p.hooks != nil && len(p.hooks.buffered) > 0 { + for _, h := range p.hooks.buffered { + h.OnProduceRecordBuffered(r) + } + } + + // We can now fail the rec after the buffered hook. + if r.Topic == "" { + p.promiseRecordBeforeBuf(promisedRec{ctx, promise, r}, errNoTopic) + return + } + if cl.cfg.txnID != nil && !p.producingTxn.Load() { + p.promiseRecordBeforeBuf(promisedRec{ctx, promise, r}, errNotInTransaction) + return + } + + userSize := r.userSize() + if cl.cfg.maxBufferedBytes > 0 && userSize > cl.cfg.maxBufferedBytes { + p.promiseRecordBeforeBuf(promisedRec{ctx, promise, r}, kerr.MessageTooLarge) + return + } + + // We have to grab the produce lock to check if this record will exceed + // configured limits. We try to keep the logic tight since this is + // effectively a global lock around producing. + var ( + nextBufRecs, nextBufBytes int64 + overMaxRecs, overMaxBytes bool + + calcNums = func() { + nextBufRecs = p.bufferedRecords + 1 + nextBufBytes = p.bufferedBytes + userSize + overMaxRecs = nextBufRecs > cl.cfg.maxBufferedRecords + overMaxBytes = cl.cfg.maxBufferedBytes > 0 && nextBufBytes > cl.cfg.maxBufferedBytes + } + ) + p.mu.Lock() + calcNums() + if overMaxRecs || overMaxBytes { + if !block || cl.cfg.manualFlushing { + p.mu.Unlock() + p.promiseRecordBeforeBuf(promisedRec{ctx, promise, r}, ErrMaxBuffered) + return + } + + // Before we potentially unlinger, add that we are blocked to + // ensure we do NOT start a linger anymore. We THEN wakeup + // anything that is actively lingering. Note that blocked is + // also used when finishing promises to see if we need to be + // notified. + p.blocked.Add(1) + p.blockedBytes += userSize + p.mu.Unlock() + + cl.cfg.logger.Log(LogLevelDebug, "blocking Produce because we are either over max buffered records or max buffered bytes", + "over_max_records", overMaxRecs, + "over_max_bytes", overMaxBytes, + ) + + cl.unlingerDueToMaxRecsBuffered() + + // We keep the lock when we exit. If we are flushing, we want + // this blocked record to be produced before we return from + // flushing. This blocked record will be accounted for in the + // bufferedRecords addition below, after being removed from + // blocked in the goroutine. + wait := make(chan struct{}) + var quit bool + go func() { + defer close(wait) + p.mu.Lock() + calcNums() + for !quit && (overMaxRecs || overMaxBytes) { + p.c.Wait() + calcNums() + } + p.blocked.Add(-1) + p.blockedBytes -= userSize + }() + + drainBuffered := func(err error) { + // The expected case here is that a context was + // canceled while we we waiting for space, so we are + // exiting and need to kill the goro above. + // + // However, it is possible that the goro above has + // already exited AND the context was canceled, and + // `select` chose the context-canceled case. + // + // So, to avoid a deadlock, we need to wakeup the + // goro above in another goroutine. + go func() { + p.mu.Lock() + quit = true + p.mu.Unlock() + p.c.Broadcast() + }() + <-wait // we wait for the goroutine to exit, then unlock again (since the goroutine leaves the mutex locked) + p.mu.Unlock() + p.promiseRecordBeforeBuf(promisedRec{ctx, promise, r}, err) + } + + select { + case <-wait: + cl.cfg.logger.Log(LogLevelDebug, "Produce block awoken, we now have space to produce, continuing to partition and produce") + case <-cl.ctx.Done(): + drainBuffered(ErrClientClosed) + cl.cfg.logger.Log(LogLevelDebug, "client ctx canceled while blocked in Produce, returning") + return + case <-ctx.Done(): + drainBuffered(ctx.Err()) + cl.cfg.logger.Log(LogLevelDebug, "produce ctx canceled while blocked in Produce, returning") + return + } + } + p.bufferedRecords = nextBufRecs + p.bufferedBytes = nextBufBytes + p.mu.Unlock() + + cl.partitionRecord(promisedRec{ctx, promise, r}) +} + +type batchPromise struct { + baseOffset int64 + pid int64 + epoch int16 + attrs RecordAttrs + beforeBuf bool + partition int32 + recs []promisedRec + err error +} + +func (p *producer) promiseBatch(b batchPromise) { + if first := p.batchPromises.push(b); first { + go p.finishPromises(b) + } +} + +func (p *producer) promiseRecord(pr promisedRec, err error) { + p.promiseBatch(batchPromise{recs: []promisedRec{pr}, err: err}) +} + +func (p *producer) promiseRecordBeforeBuf(pr promisedRec, err error) { + p.promiseBatch(batchPromise{recs: []promisedRec{pr}, beforeBuf: true, err: err}) +} + +func (p *producer) finishPromises(b batchPromise) { + cl := p.cl + var more bool + var broadcast bool + defer func() { + if broadcast { + p.c.Broadcast() + } + }() +start: + p.promisesMu.Lock() + for i, pr := range b.recs { + pr.LeaderEpoch = 0 + if b.baseOffset == -1 { + // if the base offset is invalid/unknown (-1), all record offsets should + // be treated as unknown + pr.Offset = -1 + } else { + pr.Offset = b.baseOffset + int64(i) + } + pr.Partition = b.partition + pr.ProducerID = b.pid + pr.ProducerEpoch = b.epoch + pr.Attrs = b.attrs + recBroadcast := cl.finishRecordPromise(pr, b.err, b.beforeBuf) + broadcast = broadcast || recBroadcast + b.recs[i] = promisedRec{} + } + p.promisesMu.Unlock() + if cap(b.recs) > 4 { + cl.prsPool.put(b.recs) + } + + b, more = p.batchPromises.dropPeek() + if more { + goto start + } +} + +func (cl *Client) finishRecordPromise(pr promisedRec, err error, beforeBuffering bool) (broadcast bool) { + p := &cl.producer + + if p.hooks != nil && len(p.hooks.unbuffered) > 0 { + for _, h := range p.hooks.unbuffered { + h.OnProduceRecordUnbuffered(pr.Record, err) + } + } + + // Capture user size before potential modification by the promise. + // + // We call the promise before finishing the flush notification, + // allowing users of Flush to know all buf recs are done by the + // time we notify flush below. + userSize := pr.userSize() + pr.promise(pr.Record, err) + + // If this record was never buffered, it's size was never accounted + // for on any p field: return early. + if beforeBuffering { + return + } + + // Keep the lock as tight as possible: the broadcast can come after. + p.mu.Lock() + p.bufferedBytes -= userSize + p.bufferedRecords-- + broadcast = p.blocked.Load() > 0 || p.bufferedRecords == 0 && p.flushing.Load() > 0 + p.mu.Unlock() + + return broadcast +} + +// partitionRecord loads the partitions for a topic and produce to them. If +// the topic does not currently exist, the record is buffered in unknownTopics +// for a metadata update to deal with. +func (cl *Client) partitionRecord(pr promisedRec) { + parts, partsData := cl.partitionsForTopicProduce(pr) + if parts == nil { // saved in unknownTopics + return + } + cl.doPartitionRecord(parts, partsData, pr) +} + +// doPartitionRecord is separate so that metadata updates that load unknown +// partitions can call this directly. +func (cl *Client) doPartitionRecord(parts *topicPartitions, partsData *topicPartitionsData, pr promisedRec) { + if partsData.loadErr != nil && !kerr.IsRetriable(partsData.loadErr) { + cl.producer.promiseRecord(pr, partsData.loadErr) + return + } + + parts.partsMu.Lock() + defer parts.partsMu.Unlock() + if parts.partitioner == nil { + parts.partitioner = cl.cfg.partitioner.ForTopic(pr.Topic) + } + + mapping := partsData.writablePartitions + if parts.partitioner.RequiresConsistency(pr.Record) { + mapping = partsData.partitions + } + if len(mapping) == 0 { + cl.producer.promiseRecord(pr, errors.New("unable to partition record due to no usable partitions")) + return + } + + var pick int + tlp, _ := parts.partitioner.(TopicBackupPartitioner) + if tlp != nil { + if parts.lb == nil { + parts.lb = new(leastBackupInput) + } + parts.lb.mapping = mapping + pick = tlp.PartitionByBackup(pr.Record, len(mapping), parts.lb) + } else { + pick = parts.partitioner.Partition(pr.Record, len(mapping)) + } + if pick < 0 || pick >= len(mapping) { + cl.producer.promiseRecord(pr, fmt.Errorf("invalid record partitioning choice of %d from %d available", pick, len(mapping))) + return + } + + partition := mapping[pick] + + onNewBatch, _ := parts.partitioner.(TopicPartitionerOnNewBatch) + abortOnNewBatch := onNewBatch != nil + processed := partition.records.bufferRecord(pr, abortOnNewBatch) // KIP-480 + if !processed { + onNewBatch.OnNewBatch() + + if tlp != nil { + parts.lb.mapping = mapping + pick = tlp.PartitionByBackup(pr.Record, len(mapping), parts.lb) + } else { + pick = parts.partitioner.Partition(pr.Record, len(mapping)) + } + + if pick < 0 || pick >= len(mapping) { + cl.producer.promiseRecord(pr, fmt.Errorf("invalid record partitioning choice of %d from %d available", pick, len(mapping))) + return + } + partition = mapping[pick] + partition.records.bufferRecord(pr, false) // KIP-480 + } +} + +// ProducerID returns, loading if necessary, the current producer ID and epoch. +// This returns an error if the producer ID could not be loaded, if the +// producer ID has fatally errored, or if the context is canceled. +func (cl *Client) ProducerID(ctx context.Context) (int64, int16, error) { + var ( + id int64 + epoch int16 + err error + + done = make(chan struct{}) + ) + + go func() { + defer close(done) + id, epoch, err = cl.producerID(ctx2fn(ctx)) + }() + + select { + case <-ctx.Done(): + return 0, 0, ctx.Err() + case <-done: + return id, epoch, err + } +} + +type producerID struct { + id int64 + epoch int16 + err error +} + +var errReloadProducerID = errors.New("producer id needs reloading") + +// initProducerID initializes the client's producer ID for idempotent +// producing only (no transactions, which are more special). After the first +// load, this clears all buffered unknown topics. +func (cl *Client) producerID(ctxFn func() context.Context) (int64, int16, error) { + p := &cl.producer + + id := p.id.Load().(*producerID) + if errors.Is(id.err, errReloadProducerID) { + p.idMu.Lock() + defer p.idMu.Unlock() + + if id = p.id.Load().(*producerID); errors.Is(id.err, errReloadProducerID) { + if cl.cfg.disableIdempotency { + cl.cfg.logger.Log(LogLevelInfo, "skipping producer id initialization because the client was configured to disable idempotent writes") + id = &producerID{ + id: -1, + epoch: -1, + err: nil, + } + p.id.Store(id) + } else if cl.cfg.txnID == nil && id.id >= 0 && id.epoch < math.MaxInt16-1 { + // For the idempotent producer, as specified in KIP-360, + // if we had an ID, we can bump the epoch locally. + // If we are at the max epoch, we will ask for a new ID. + cl.resetAllProducerSequences() + id = &producerID{ + id: id.id, + epoch: id.epoch + 1, + err: nil, + } + p.id.Store(id) + } else { + newID, keep := cl.doInitProducerID(ctxFn, id.id, id.epoch) + if keep { + id = newID + // Whenever we have a new producer ID, we need + // our sequence numbers to be 0. On the first + // record produced, this will be true, but if + // we were signaled to reset the producer ID, + // then we definitely still need to reset here. + cl.resetAllProducerSequences() + p.id.Store(id) + } else { + // If we are not keeping the producer ID, + // we will return our old ID but with a + // static error that we can check or bubble + // up where needed. + id = &producerID{ + id: id.id, + epoch: id.epoch, + err: &errProducerIDLoadFail{newID.err}, + } + } + } + } + } + + return id.id, id.epoch, id.err +} + +// As seen in KAFKA-12152, if we bump an epoch, we have to reset sequence nums +// for every partition. Otherwise, we will use a new id/epoch for a partition +// and trigger OOOSN errors. +// +// Pre 2.5, this function is only be called if it is acceptable to continue +// on data loss (idempotent producer with no StopOnDataLoss option). +// +// 2.5+, it is safe to call this if the producer ID can be reset (KIP-360), +// in EndTransaction. +func (cl *Client) resetAllProducerSequences() { + for _, tp := range cl.producer.topics.load() { + for _, p := range tp.load().partitions { + p.records.mu.Lock() + p.records.needSeqReset = true + p.records.mu.Unlock() + } + } +} + +func (cl *Client) failProducerID(id int64, epoch int16, err error) { + p := &cl.producer + + // We do not lock the idMu when failing a producer ID, for two reasons. + // + // 1) With how we store below, we do not need to. We only fail if the + // ID we are failing has not changed and if the ID we are failing has + // not failed already. Failing outside the lock is the same as failing + // within the lock. + // + // 2) Locking would cause a deadlock, because producerID locks + // idMu=>recBuf.Mu, whereas we failing while locked within a recBuf in + // sink.go. + new := &producerID{ + id: id, + epoch: epoch, + err: err, + } + for { + current := p.id.Load().(*producerID) + if current.id != id || current.epoch != epoch { + cl.cfg.logger.Log(LogLevelInfo, "ignoring a fail producer id request due to current id being different", + "current_id", current.id, + "current_epoch", current.epoch, + "current_err", current.err, + "fail_id", id, + "fail_epoch", epoch, + "fail_err", err, + ) + return + } + if current.err != nil { + cl.cfg.logger.Log(LogLevelInfo, "ignoring a fail producer id because our producer id has already been failed", + "current_id", current.id, + "current_epoch", current.epoch, + "current_err", current.err, + "fail_err", err, + ) + return + } + if p.id.CompareAndSwap(current, new) { + return + } + } +} + +// doInitProducerID inits the idempotent ID and potentially the transactional +// producer epoch, returning whether to keep the result. +func (cl *Client) doInitProducerID(ctxFn func() context.Context, lastID int64, lastEpoch int16) (*producerID, bool) { + cl.cfg.logger.Log(LogLevelInfo, "initializing producer id") + req := kmsg.NewPtrInitProducerIDRequest() + req.TransactionalID = cl.cfg.txnID + req.ProducerID = lastID + req.ProducerEpoch = lastEpoch + if cl.cfg.txnID != nil { + req.TransactionTimeoutMillis = int32(cl.cfg.txnTimeout.Milliseconds()) + } + + ctx := ctxFn() + resp, err := req.RequestWith(ctx, cl) + if err != nil { + if errors.Is(err, errUnknownRequestKey) || errors.Is(err, errBrokerTooOld) { + cl.cfg.logger.Log(LogLevelInfo, "unable to initialize a producer id because the broker is too old or the client is pinned to an old version, continuing without a producer id") + return &producerID{-1, -1, nil}, true + } + if errors.Is(err, errChosenBrokerDead) { + select { + case <-cl.ctx.Done(): + cl.cfg.logger.Log(LogLevelInfo, "producer id initialization failure due to dying client", "err", err) + return &producerID{lastID, lastEpoch, ErrClientClosed}, true + default: + } + } + cl.cfg.logger.Log(LogLevelInfo, "producer id initialization failure, discarding initialization attempt", "err", err) + return &producerID{lastID, lastEpoch, err}, false + } + + if err = kerr.ErrorForCode(resp.ErrorCode); err != nil { + // We could receive concurrent transactions; this is ignorable + // and we just want to re-init. + if kerr.IsRetriable(err) || errors.Is(err, kerr.ConcurrentTransactions) { + cl.cfg.logger.Log(LogLevelInfo, "producer id initialization resulted in retryable error, discarding initialization attempt", "err", err) + return &producerID{lastID, lastEpoch, err}, false + } + cl.cfg.logger.Log(LogLevelInfo, "producer id initialization errored", "err", err) + return &producerID{lastID, lastEpoch, err}, true + } + + cl.cfg.logger.Log(LogLevelInfo, "producer id initialization success", "id", resp.ProducerID, "epoch", resp.ProducerEpoch) + + // We track if this was v3. We do not need to gate this behind a mutex, + // because the only other use is EndTransaction's read, which is + // documented to only be called sequentially after producing. + if cl.producer.idVersion == -1 { + cl.producer.idVersion = req.Version + } + + return &producerID{resp.ProducerID, resp.ProducerEpoch, nil}, true +} + +// partitionsForTopicProduce returns the topic partitions for a record. +// If the topic is not loaded yet, this buffers the record and returns +// nil, nil. +func (cl *Client) partitionsForTopicProduce(pr promisedRec) (*topicPartitions, *topicPartitionsData) { + p := &cl.producer + topic := pr.Topic + + topics := p.topics.load() + parts, exists := topics[topic] + if exists { + if v := parts.load(); len(v.partitions) > 0 { + return parts, v + } + } + + if !exists { // topic did not exist: check again under mu and potentially create it + p.topicsMu.Lock() + defer p.topicsMu.Unlock() + + if parts, exists = p.topics.load()[topic]; !exists { // update parts for below + // Before we store the new topic, we lock unknown + // topics to prevent a concurrent metadata update + // seeing our new topic before we are waiting from the + // addUnknownTopicRecord fn. Otherwise, we would wait + // and never be re-notified. + p.unknownTopicsMu.Lock() + defer p.unknownTopicsMu.Unlock() + + p.topics.storeTopics([]string{topic}) + cl.addUnknownTopicRecord(pr) + cl.triggerUpdateMetadataNow("forced load because we are producing to a topic for the first time") + return nil, nil + } + } + + // Here, the topic existed, but maybe has not loaded partitions yet. We + // have to lock unknown topics first to ensure ordering just in case a + // load has not happened. + p.unknownTopicsMu.Lock() + defer p.unknownTopicsMu.Unlock() + + if v := parts.load(); len(v.partitions) > 0 { + return parts, v + } + cl.addUnknownTopicRecord(pr) + cl.triggerUpdateMetadata(false, "reload trigger due to produce topic still not known") + + return nil, nil // our record is buffered waiting for metadata update; nothing to return +} + +// addUnknownTopicRecord adds a record to a topic whose partitions are +// currently unknown. This is always called with the unknownTopicsMu held. +func (cl *Client) addUnknownTopicRecord(pr promisedRec) { + unknown := cl.producer.unknownTopics[pr.Topic] + if unknown == nil { + unknown = &unknownTopicProduces{ + buffered: make([]promisedRec, 0, 100), + wait: make(chan error, 5), + fatal: make(chan error, 1), + } + cl.producer.unknownTopics[pr.Topic] = unknown + } + unknown.buffered = append(unknown.buffered, pr) + if len(unknown.buffered) == 1 { + go cl.waitUnknownTopic(pr.ctx, pr.Record.Context, pr.Topic, unknown) + } +} + +// waitUnknownTopic waits for a notification +func (cl *Client) waitUnknownTopic( + pctx context.Context, // context passed to Produce + rctx context.Context, // context on the record itself + topic string, + unknown *unknownTopicProduces, +) { + cl.cfg.logger.Log(LogLevelInfo, "producing to a new topic for the first time, fetching metadata to learn its partitions", "topic", topic) + + var ( + tries int + unknownTries int64 + err error + after <-chan time.Time + ) + + if timeout := cl.cfg.recordTimeout; timeout > 0 { + timer := time.NewTimer(cl.cfg.recordTimeout) + defer timer.Stop() + after = timer.C + } + + // Ordering: aborting is set first, then unknown topics are manually + // canceled in a lock. New unknown topics after that lock will see + // aborting here and immediately cancel themselves. + if cl.producer.isAborting() { + err = ErrAborting + } + + for err == nil { + select { + case <-pctx.Done(): + err = pctx.Err() + case <-rctx.Done(): + err = rctx.Err() + case <-cl.ctx.Done(): + err = ErrClientClosed + case <-after: + err = ErrRecordTimeout + case err = <-unknown.fatal: + case retryableErr, ok := <-unknown.wait: + if !ok { + cl.cfg.logger.Log(LogLevelInfo, "done waiting for metadata for new topic", "topic", topic) + return // metadata was successful! + } + cl.cfg.logger.Log(LogLevelInfo, "new topic metadata wait failed, retrying wait", "topic", topic, "err", retryableErr) + tries++ + if int64(tries) >= cl.cfg.recordRetries { + err = fmt.Errorf("no partitions available after attempting to refresh metadata %d times, last err: %w", tries, retryableErr) + } + if cl.cfg.maxUnknownFailures >= 0 && errors.Is(retryableErr, kerr.UnknownTopicOrPartition) { + unknownTries++ + if unknownTries > cl.cfg.maxUnknownFailures { + err = retryableErr + } + } + } + } + + // If we errored above, we come down here to potentially clear the + // topic wait and fail all buffered records. However, under some + // extreme conditions, a quickly following metadata update could delete + // our unknown topic, and then a produce could recreate a new unknown + // topic. We only delete and finish promises if the pointer in the + // unknown topic map is still the same. + p := &cl.producer + + p.unknownTopicsMu.Lock() + defer p.unknownTopicsMu.Unlock() + + nowUnknown := p.unknownTopics[topic] + if nowUnknown != unknown { + return + } + cl.cfg.logger.Log(LogLevelInfo, "new topic metadata wait failed, done retrying, failing all records", "topic", topic, "err", err) + + delete(p.unknownTopics, topic) + p.promiseBatch(batchPromise{ + recs: unknown.buffered, + err: err, + }) +} + +func (cl *Client) unlingerDueToMaxRecsBuffered() { + if cl.cfg.linger <= 0 { + return + } + for _, parts := range cl.producer.topics.load() { + for _, part := range parts.load().partitions { + part.records.unlingerAndManuallyDrain() + } + } + cl.cfg.logger.Log(LogLevelDebug, "unlingered all partitions due to hitting max buffered") +} + +// Flush hangs waiting for all buffered records to be flushed, stopping all +// lingers if necessary. +// +// If the context finishes (Done), this returns the context's error. +// +// This function is safe to call multiple times concurrently, and safe to call +// concurrent with Flush. +func (cl *Client) Flush(ctx context.Context) error { + p := &cl.producer + + // Signal to finishRecord that we want to be notified once buffered hits 0. + // Also forbid any new producing to start a linger. + p.flushing.Add(1) + defer p.flushing.Add(-1) + + cl.cfg.logger.Log(LogLevelInfo, "flushing") + defer cl.cfg.logger.Log(LogLevelDebug, "flushed") + + // At this point, if lingering is configured, nothing will _start_ a + // linger because the producer's flushing atomic int32 is nonzero. We + // must wake anything that could be lingering up, after which all sinks + // will loop draining. + if cl.cfg.linger > 0 || cl.cfg.manualFlushing { + for _, parts := range p.topics.load() { + for _, part := range parts.load().partitions { + part.records.unlingerAndManuallyDrain() + } + } + } + + quit := false + done := make(chan struct{}) + go func() { + p.mu.Lock() + defer p.mu.Unlock() + defer close(done) + + for !quit && p.bufferedRecords+int64(p.blocked.Load()) > 0 { + p.c.Wait() + } + }() + + select { + case <-done: + return nil + case <-ctx.Done(): + p.mu.Lock() + quit = true + p.mu.Unlock() + p.c.Broadcast() + return ctx.Err() + } +} + +func (p *producer) pause(ctx context.Context) error { + p.inflight.Add(1 << 48) + + quit := false + done := make(chan struct{}) + go func() { + p.mu.Lock() + defer p.mu.Unlock() + defer close(done) + for !quit && p.inflight.Load()&((1<<48)-1) != 0 { + p.c.Wait() + } + }() + + select { + case <-done: + return nil + case <-ctx.Done(): + p.mu.Lock() + quit = true + p.mu.Unlock() + p.c.Broadcast() + p.resume() // dec our inflight + return ctx.Err() + } +} + +func (p *producer) resume() { + if p.inflight.Add(-1<<48) == 0 { + p.cl.allSinksAndSources(func(sns sinkAndSource) { + sns.sink.maybeDrain() + }) + } +} + +func (p *producer) maybeAddInflight() bool { + if p.inflight.Load()>>48 > 0 { + return false + } + if p.inflight.Add(1)>>48 > 0 { + p.decInflight() + return false + } + return true +} + +func (p *producer) decInflight() { + if p.inflight.Add(-1)>>48 > 0 { + p.mu.Lock() + p.mu.Unlock() //nolint:gocritic,staticcheck // We use the lock as a barrier, unlocking immediately is safe. + p.c.Broadcast() + } +} + +// Bumps the tries for all buffered records in the client. +// +// This is called whenever there is a problematic error that would affect the +// state of all buffered records as a whole: +// +// - if we cannot init a producer ID due to RequestWith errors, producing is useless +// - if we cannot add partitions to a txn due to RequestWith errors, producing is useless +// +// Note that these are specifically due to RequestWith errors, not due to +// receiving a response that has a retryable error code. That is, if our +// request keeps dying. +func (cl *Client) bumpRepeatedLoadErr(err error) { + p := &cl.producer + + for _, partitions := range p.topics.load() { + for _, partition := range partitions.load().partitions { + partition.records.bumpRepeatedLoadErr(err) + } + } + p.unknownTopicsMu.Lock() + defer p.unknownTopicsMu.Unlock() + for _, unknown := range p.unknownTopics { + select { + case unknown.wait <- err: + default: + } + } +} + +// Clears all buffered records in the client with the given error. +// +// - closing client +// - aborting transaction +// - fatal AddPartitionsToTxn +// +// Because the error fails everything, we also empty our unknown topics and +// delete any topics that were still unknown from the producer's topics. +func (cl *Client) failBufferedRecords(err error) { + p := &cl.producer + + for _, partitions := range p.topics.load() { + for _, partition := range partitions.load().partitions { + recBuf := partition.records + recBuf.mu.Lock() + recBuf.failAllRecords(err) + recBuf.mu.Unlock() + } + } + + p.topicsMu.Lock() + defer p.topicsMu.Unlock() + p.unknownTopicsMu.Lock() + defer p.unknownTopicsMu.Unlock() + + toStore := p.topics.clone() + defer p.topics.storeData(toStore) + + var toFail [][]promisedRec + for topic, unknown := range p.unknownTopics { + delete(toStore, topic) + delete(p.unknownTopics, topic) + close(unknown.wait) + toFail = append(toFail, unknown.buffered) + } + + for _, fail := range toFail { + p.promiseBatch(batchPromise{ + recs: fail, + err: err, + }) + } +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/record_and_fetch.go b/vendor/github.com/twmb/franz-go/pkg/kgo/record_and_fetch.go new file mode 100644 index 00000000000..648db4ecbc5 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/record_and_fetch.go @@ -0,0 +1,632 @@ +package kgo + +import ( + "context" + "errors" + "reflect" + "time" + "unsafe" +) + +// RecordHeader contains extra information that can be sent with Records. +type RecordHeader struct { + Key string + Value []byte +} + +// RecordAttrs contains additional meta information about a record, such as its +// compression or timestamp type. +type RecordAttrs struct { + // 6 bits are used right now for record batches, and we use the high + // bit to signify no timestamp due to v0 message set. + // + // bits 1 thru 3: + // 000 no compression + // 001 gzip + // 010 snappy + // 011 lz4 + // 100 zstd + // bit 4: timestamp type + // bit 5: is transactional + // bit 6: is control + // bit 8: no timestamp type + attrs uint8 +} + +// TimestampType specifies how Timestamp was determined. +// +// The default, 0, means that the timestamp was determined in a client +// when the record was produced. +// +// An alternative is 1, which is when the Timestamp is set in Kafka. +// +// Records pre 0.10.0 did not have timestamps and have value -1. +func (a RecordAttrs) TimestampType() int8 { + if a.attrs&0b1000_0000 != 0 { + return -1 + } + return int8(a.attrs&0b0000_1000) >> 3 +} + +// CompressionType signifies with which algorithm this record was compressed. +// +// 0 is no compression, 1 is gzip, 2 is snappy, 3 is lz4, and 4 is zstd. +func (a RecordAttrs) CompressionType() uint8 { + return a.attrs & 0b0000_0111 +} + +// IsTransactional returns whether a record is a part of a transaction. +func (a RecordAttrs) IsTransactional() bool { + return a.attrs&0b0001_0000 != 0 +} + +// IsControl returns whether a record is a "control" record (ABORT or COMMIT). +// These are generally not visible unless explicitly opted into. +func (a RecordAttrs) IsControl() bool { + return a.attrs&0b0010_0000 != 0 +} + +// Record is a record to write to Kafka. +type Record struct { + // Key is an optional field that can be used for partition assignment. + // + // This is generally used with a hash partitioner to cause all records + // with the same key to go to the same partition. + Key []byte + // Value is blob of data to write to Kafka. + Value []byte + + // Headers are optional key/value pairs that are passed along with + // records. + // + // These are purely for producers and consumers; Kafka does not look at + // this field and only writes it to disk. + Headers []RecordHeader + + // NOTE: if logAppendTime, timestamp is MaxTimestamp, not first + delta + // zendesk/ruby-kafka#706 + + // Timestamp is the timestamp that will be used for this record. + // + // Record batches are always written with "CreateTime", meaning that + // timestamps are generated by clients rather than brokers. + // + // When producing, if this field is not yet set, it is set to time.Now. + Timestamp time.Time + + // Topic is the topic that a record is written to. + // + // This must be set for producing. + Topic string + + // Partition is the partition that a record is written to. + // + // For producing, this is left unset. This will be set by the client + // before the record is unbuffered. If you use the ManualPartitioner, + // the value of this field is always the partition chosen when + // producing (i.e., you partition manually ahead of time). + Partition int32 + + // Attrs specifies what attributes were on this record. + // + // For producing, this is left unset. This will be set by the client + // before the record is unbuffered. + Attrs RecordAttrs + + // ProducerEpoch is the producer epoch of this message if it was + // produced with a producer ID. An epoch and ID of 0 means it was not. + // + // For producing, this is left unset. This will be set by the client + // before the record is unbuffered. + ProducerEpoch int16 + + // ProducerID is the producer ID of this message if it was produced + // with a producer ID. An epoch and ID of 0 means it was not. + // + // For producing, this is left unset. This will be set by the client + // before the record is unbuffered. + ProducerID int64 + + // LeaderEpoch is the leader epoch of the broker at the time this + // record was written, or -1 if on message sets. + // + // For committing records, it is not recommended to modify the + // LeaderEpoch. Clients use the LeaderEpoch for data loss detection. + LeaderEpoch int32 + + // Offset is the offset that a record is written as. + // + // For producing, this is left unset. This will be set by the client + // before the record is unbuffered. If you are producing with no acks, + // this will just be the offset used in the produce request and does + // not mirror the offset actually stored within Kafka. + Offset int64 + + // Context is an optional field that is used for enriching records. + // + // If this field is nil when producing, it is set to the Produce ctx + // arg. This field can be used to propagate record enrichment across + // producer hooks. It can also be set in a consumer hook to propagate + // enrichment to consumer clients. + Context context.Context +} + +func (r *Record) userSize() int64 { + s := len(r.Key) + len(r.Value) + for _, h := range r.Headers { + s += len(h.Key) + len(h.Value) + } + return int64(s) +} + +// When buffering records, we calculate the length and tsDelta ahead of time +// (also because number width affects encoding length). We repurpose the Offset +// field to save space. +func (r *Record) setLengthAndTimestampDelta(length int32, tsDelta int64) { + r.LeaderEpoch = length + r.Offset = tsDelta +} + +func (r *Record) lengthAndTimestampDelta() (length int32, tsDelta int64) { + return r.LeaderEpoch, r.Offset +} + +// AppendFormat appends a record to b given the layout or returns an error if +// the layout is invalid. This is a one-off shortcut for using +// NewRecordFormatter. See that function's documentation for the layout +// specification. +func (r *Record) AppendFormat(b []byte, layout string) ([]byte, error) { + f, err := NewRecordFormatter(layout) + if err != nil { + return b, err + } + return f.AppendRecord(b, r), nil +} + +// StringRecord returns a Record with the Value field set to the input value +// string. For producing, this function is useful in tandem with the +// client-level DefaultProduceTopic option. +// +// This function uses the 'unsafe' package to avoid copying value into a slice. +// +// NOTE: It is NOT SAFE to modify the record's value. This function should only +// be used if you only ever read record fields. This function can safely be used +// for producing; the client never modifies a record's key nor value fields. +func StringRecord(value string) *Record { + var slice []byte + slicehdr := (*reflect.SliceHeader)(unsafe.Pointer(&slice)) //nolint:gosec // known way to convert string to slice + slicehdr.Data = ((*reflect.StringHeader)(unsafe.Pointer(&value))).Data //nolint:gosec // known way to convert string to slice + slicehdr.Len = len(value) + slicehdr.Cap = len(value) + + return &Record{Value: slice} +} + +// KeyStringRecord returns a Record with the Key and Value fields set to the +// input key and value strings. For producing, this function is useful in +// tandem with the client-level DefaultProduceTopic option. +// +// This function uses the 'unsafe' package to avoid copying value into a slice. +// +// NOTE: It is NOT SAFE to modify the record's value. This function should only +// be used if you only ever read record fields. This function can safely be used +// for producing; the client never modifies a record's key nor value fields. +func KeyStringRecord(key, value string) *Record { + r := StringRecord(value) + + keyhdr := (*reflect.SliceHeader)(unsafe.Pointer(&r.Key)) //nolint:gosec // known way to convert string to slice + keyhdr.Data = ((*reflect.StringHeader)(unsafe.Pointer(&key))).Data //nolint:gosec // known way to convert string to slice + keyhdr.Len = len(key) + keyhdr.Cap = len(key) + + return r +} + +// SliceRecord returns a Record with the Value field set to the input value +// slice. For producing, this function is useful in tandem with the +// client-level DefaultProduceTopic option. +func SliceRecord(value []byte) *Record { + return &Record{Value: value} +} + +// KeySliceRecord returns a Record with the Key and Value fields set to the +// input key and value slices. For producing, this function is useful in +// tandem with the client-level DefaultProduceTopic option. +func KeySliceRecord(key, value []byte) *Record { + return &Record{Key: key, Value: value} +} + +// FetchPartition is a response for a partition in a fetched topic from a +// broker. +type FetchPartition struct { + // Partition is the partition this is for. + Partition int32 + // Err is an error for this partition in the fetch. + // + // Note that if this is a fatal error, such as data loss or non + // retryable errors, this partition will never be fetched again. + Err error + // HighWatermark is the current high watermark for this partition, that + // is, the current offset that is on all in sync replicas. + HighWatermark int64 + // LastStableOffset is the offset at which all prior offsets have been + // "decided". Non transactional records are always decided immediately, + // but transactional records are only decided once they are committed + // or aborted. + // + // The LastStableOffset will always be at or under the HighWatermark. + LastStableOffset int64 + // LogStartOffset is the low watermark of this partition, otherwise + // known as the earliest offset in the partition. + LogStartOffset int64 + // Records contains feched records for this partition. + Records []*Record +} + +// EachRecord calls fn for each record in the partition. +func (p *FetchPartition) EachRecord(fn func(*Record)) { + for _, r := range p.Records { + fn(r) + } +} + +// FetchTopic is a response for a fetched topic from a broker. +type FetchTopic struct { + // Topic is the topic this is for. + Topic string + // TopicID is the ID of the topic, if your cluster supports returning + // topic IDs in fetch responses (Kafka 3.1+). + TopicID [16]byte + // Partitions contains individual partitions in the topic that were + // fetched. + Partitions []FetchPartition +} + +// EachPartition calls fn for each partition in Fetches. +func (t *FetchTopic) EachPartition(fn func(FetchPartition)) { + for i := range t.Partitions { + fn(t.Partitions[i]) + } +} + +// EachRecord calls fn for each record in the topic, in any partition order. +func (t *FetchTopic) EachRecord(fn func(*Record)) { + for i := range t.Partitions { + for _, r := range t.Partitions[i].Records { + fn(r) + } + } +} + +// Records returns all records in all partitions in this topic. +// +// This is a convenience function that does a single slice allocation. If you +// can process records individually, it is far more efficient to use the Each +// functions. +func (t *FetchTopic) Records() []*Record { + var n int + t.EachPartition(func(p FetchPartition) { + n += len(p.Records) + }) + rs := make([]*Record, 0, n) + t.EachPartition(func(p FetchPartition) { + rs = append(rs, p.Records...) + }) + return rs +} + +// Fetch is an individual response from a broker. +type Fetch struct { + // Topics are all topics being responded to from a fetch to a broker. + Topics []FetchTopic +} + +// Fetches is a group of fetches from brokers. +type Fetches []Fetch + +// FetchError is an error in a fetch along with the topic and partition that +// the error was on. +type FetchError struct { + Topic string + Partition int32 + Err error +} + +// Errors returns all errors in a fetch with the topic and partition that +// errored. +// +// There are a few classes of errors possible: +// +// 1. a normal kerr.Error; these are usually the non-retryable kerr.Errors, +// but theoretically a non-retryable error can be fixed at runtime (auth +// error? fix auth). It is worth restarting the client for these errors if +// you do not intend to fix this problem at runtime. +// +// 2. an injected *ErrDataLoss; these are informational, the client +// automatically resets consuming to where it should and resumes. This +// error is worth logging and investigating, but not worth restarting the +// client for. +// +// 3. an untyped batch parse failure; these are usually unrecoverable by +// restarts, and it may be best to just let the client continue. +// Restarting is an option, but you may need to manually repair your +// partition. +// +// 4. an injected ErrClientClosed; this is a fatal informational error that +// is returned from every Poll call if the client has been closed. +// A corresponding helper function IsClientClosed can be used to detect +// this error. +// +// 5. an injected context error; this can be present if the context you were +// using for polling timed out or was canceled. +// +// 6. an injected ErrGroupSession; this is an informational error that is +// injected once a group session is lost in a way that is not the standard +// rebalance. This error can signify that your consumer member is not able +// to connect to the group (ACL problems, unreachable broker), or you +// blocked rebalancing for too long, or your callbacks took too long. +// +// This list may grow over time. +func (fs Fetches) Errors() []FetchError { + var errs []FetchError + fs.EachError(func(t string, p int32, err error) { + errs = append(errs, FetchError{t, p, err}) + }) + return errs +} + +// When we fetch, it is possible for Kafka to reply with topics / partitions +// that have no records and no errors. This will definitely happen outside of +// fetch sessions, but may also happen at other times (for some reason). +// When that happens we want to ignore the fetch. +func (f Fetch) hasErrorsOrRecords() bool { + for i := range f.Topics { + t := &f.Topics[i] + for j := range t.Partitions { + p := &t.Partitions[j] + if p.Err != nil || len(p.Records) > 0 { + return true + } + } + } + return false +} + +// IsClientClosed returns whether the fetches include an error indicating that +// the client is closed. +// +// This function is useful to break out of a poll loop; you likely want to call +// this function before calling Errors. If you may cancel the context to poll, +// you may want to use Err0 and manually check errors.Is(ErrClientClosed) or +// errors.Is(context.Canceled). +func (fs Fetches) IsClientClosed() bool { + // An injected ErrClientClosed is a single fetch with one topic and + // one partition. We can use this to make IsClientClosed do less work. + return len(fs) == 1 && len(fs[0].Topics) == 1 && len(fs[0].Topics[0].Partitions) == 1 && errors.Is(fs[0].Topics[0].Partitions[0].Err, ErrClientClosed) +} + +// Err0 returns the error at the 0th index fetch, topic, and partition. This +// can be used to quickly check if polling returned early because the client +// was closed or the context was canceled and is faster than performing a +// linear scan over all partitions with Err. When the client is closed or the +// context is canceled, fetches will contain only one partition whose Err field +// indicates the close / cancel. Note that this returns whatever the first +// error is, nil or non-nil, and does not check for a specific error value. +func (fs Fetches) Err0() error { + if len(fs) > 0 && len(fs[0].Topics) > 0 && len(fs[0].Topics[0].Partitions) > 0 { + return fs[0].Topics[0].Partitions[0].Err + } + return nil +} + +// Err returns the first error in all fetches, if any. This can be used to +// quickly check if the client is closed or your poll context was canceled, or +// to check if there's some other error that requires deeper investigation with +// EachError. This function performs a linear scan over all fetched partitions. +// It is recommended to always check all errors. If you would like to more +// quickly check ahead of time if a poll was canceled because of closing the +// client or canceling the context, you can use Err0. +func (fs Fetches) Err() error { + for _, f := range fs { + for i := range f.Topics { + ft := &f.Topics[i] + for j := range ft.Partitions { + fp := &ft.Partitions[j] + if fp.Err != nil { + return fp.Err + } + } + } + } + return nil +} + +// EachError calls fn for every partition that had a fetch error with the +// topic, partition, and error. +// +// This function has the same semantics as the Errors function; refer to the +// documentation on that function for what types of errors are possible. +func (fs Fetches) EachError(fn func(string, int32, error)) { + for _, f := range fs { + for i := range f.Topics { + ft := &f.Topics[i] + for j := range ft.Partitions { + fp := &ft.Partitions[j] + if fp.Err != nil { + fn(ft.Topic, fp.Partition, fp.Err) + } + } + } + } +} + +// RecordIter returns an iterator over all records in a fetch. +// +// Note that errors should be inspected as well. +func (fs Fetches) RecordIter() *FetchesRecordIter { + iter := &FetchesRecordIter{fetches: fs} + iter.prepareNext() + return iter +} + +// FetchesRecordIter iterates over records in a fetch. +type FetchesRecordIter struct { + fetches []Fetch + ti int // index to current topic in fetches[0] + pi int // index to current partition in current topic + ri int // index to current record in current partition +} + +// Done returns whether there are any more records to iterate over. +func (i *FetchesRecordIter) Done() bool { + return len(i.fetches) == 0 +} + +// Next returns the next record from a fetch. +func (i *FetchesRecordIter) Next() *Record { + next := i.fetches[0].Topics[i.ti].Partitions[i.pi].Records[i.ri] + i.ri++ + i.prepareNext() + return next +} + +func (i *FetchesRecordIter) prepareNext() { +beforeFetch0: + if len(i.fetches) == 0 { + return + } + + fetch0 := &i.fetches[0] +beforeTopic: + if i.ti >= len(fetch0.Topics) { + i.fetches = i.fetches[1:] + i.ti = 0 + goto beforeFetch0 + } + + topic := &fetch0.Topics[i.ti] +beforePartition: + if i.pi >= len(topic.Partitions) { + i.ti++ + i.pi = 0 + goto beforeTopic + } + + partition := &topic.Partitions[i.pi] + if i.ri >= len(partition.Records) { + i.pi++ + i.ri = 0 + goto beforePartition + } +} + +// EachPartition calls fn for each partition in Fetches. +// +// Partitions are not visited in any specific order, and a topic may be visited +// multiple times if it is spread across fetches. +func (fs Fetches) EachPartition(fn func(FetchTopicPartition)) { + for _, fetch := range fs { + for _, topic := range fetch.Topics { + for i := range topic.Partitions { + fn(FetchTopicPartition{ + Topic: topic.Topic, + FetchPartition: topic.Partitions[i], + }) + } + } + } +} + +// EachTopic calls fn for each topic in Fetches. +// +// This is a convenience function that groups all partitions for the same topic +// from many fetches into one FetchTopic. A map is internally allocated to +// group partitions per topic before calling fn. +func (fs Fetches) EachTopic(fn func(FetchTopic)) { + switch len(fs) { + case 0: + return + case 1: + for _, topic := range fs[0].Topics { + fn(topic) + } + return + } + + topics := make(map[string][]FetchPartition) + for _, fetch := range fs { + for _, topic := range fetch.Topics { + topics[topic.Topic] = append(topics[topic.Topic], topic.Partitions...) + } + } + + for topic, partitions := range topics { + fn(FetchTopic{ + topic, + [16]byte{}, + partitions, + }) + } +} + +// EachRecord calls fn for each record in Fetches. +// +// This is very similar to using a record iter, and is solely a convenience +// function depending on which style you prefer. +func (fs Fetches) EachRecord(fn func(*Record)) { + for iter := fs.RecordIter(); !iter.Done(); { + fn(iter.Next()) + } +} + +// Records returns all records in all fetches. +// +// This is a convenience function that does a single slice allocation. If you +// can process records individually, it is far more efficient to use the Each +// functions or the RecordIter. +func (fs Fetches) Records() []*Record { + rs := make([]*Record, 0, fs.NumRecords()) + fs.EachPartition(func(p FetchTopicPartition) { + rs = append(rs, p.Records...) + }) + return rs +} + +// NumRecords returns the total number of records across all fetched partitions. +func (fs Fetches) NumRecords() (n int) { + fs.EachPartition(func(p FetchTopicPartition) { + n += len(p.Records) + }) + return n +} + +// Empty checks whether the fetch result empty. This method is faster than NumRecords() == 0. +func (fs Fetches) Empty() bool { + for i := range fs { + for j := range fs[i].Topics { + for k := range fs[i].Topics[j].Partitions { + if len(fs[i].Topics[j].Partitions[k].Records) > 0 { + return false + } + } + } + } + + return true +} + +// FetchTopicPartition is similar to FetchTopic, but for an individual +// partition. +type FetchTopicPartition struct { + // Topic is the topic this is for. + Topic string + // FetchPartition is an individual partition within this topic. + FetchPartition +} + +// EachRecord calls fn for each record in the topic's partition. +func (r *FetchTopicPartition) EachRecord(fn func(*Record)) { + for _, r := range r.Records { + fn(r) + } +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/record_formatter.go b/vendor/github.com/twmb/franz-go/pkg/kgo/record_formatter.go new file mode 100644 index 00000000000..2f5d2ce3c33 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/record_formatter.go @@ -0,0 +1,2246 @@ +package kgo + +import ( + "bufio" + "bytes" + "encoding/base64" + "encoding/binary" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "regexp" + "strconv" + "strings" + "time" + "unicode/utf8" + + "github.com/twmb/franz-go/pkg/kbin" +) + +//////////// +// WRITER // +//////////// + +// RecordFormatter formats records. +type RecordFormatter struct { + calls atomicI64 + fns []func([]byte, *FetchPartition, *Record) []byte +} + +// AppendRecord appends a record to b given the parsed format and returns the +// updated slice. +func (f *RecordFormatter) AppendRecord(b []byte, r *Record) []byte { + for _, fn := range f.fns { + b = fn(b, nil, r) + } + return b +} + +// AppendPartitionRecord appends a record and partition to b given the parsed +// format and returns the updated slice. +func (f *RecordFormatter) AppendPartitionRecord(b []byte, p *FetchPartition, r *Record) []byte { + for _, fn := range f.fns { + b = fn(b, p, r) + } + return b +} + +// NewRecordFormatter returns a formatter for the given layout, or an error if +// the layout is invalid. +// +// The formatter is very powerful, as such there is a lot to describe. This +// documentation attempts to be as succinct as possible. +// +// Similar to the fmt package, record formatting is based off of slash escapes +// and percent "verbs" (copying fmt package lingo). Slashes are used for common +// escapes, +// +// \t \n \r \\ \xNN +// +// printing tabs, newlines, carriage returns, slashes, and hex encoded +// characters. +// +// Percent encoding opts in to printing aspects of either a record or a fetch +// partition: +// +// %t topic +// %T topic length +// %k key +// %K key length +// %v value +// %V value length +// %h begin the header specification +// %H number of headers +// %p partition +// %o offset +// %e leader epoch +// %d timestamp (date, formatting described below) +// %a record attributes (formatting required, described below) +// %x producer id +// %y producer epoch +// +// For AppendPartitionRecord, the formatter also undersands the following three +// formatting options: +// +// %[ partition log start offset +// %| partition last stable offset +// %] partition high watermark +// +// The formatter internally tracks the number of times AppendRecord or +// AppendPartitionRecord have been called. The special option %i prints the +// iteration / call count: +// +// %i format iteration number (starts at 1) +// +// Lastly, there are three escapes to print raw characters that are usually +// used for formatting options: +// +// %% percent sign +// %{ left brace (required if a brace is after another format option) +// %} right brace +// +// # Header specification +// +// Specifying headers is essentially a primitive nested format option, +// accepting the key and value escapes above: +// +// %K header key length +// %k header key +// %V header value length +// %v header value +// +// For example, "%H %h{%k %v }" will print the number of headers, and then each +// header key and value with a space after each. +// +// # Verb modifiers +// +// Most of the previous verb specifications can be modified by adding braces +// with a given modifier, e.g., "%V{ascii}". All modifiers are described below. +// +// # Numbers +// +// All number verbs accept braces that control how the number is printed: +// +// %v{ascii} the default, print the number as ascii +// %v{number} alias for ascii +// +// %v{hex64} print 16 hex characters for the number +// %v{hex32} print 8 hex characters for the number +// %v{hex16} print 4 hex characters for the number +// %v{hex8} print 2 hex characters for the number +// %v{hex4} print 1 hex characters for the number +// %v{hex} print as many hex characters as necessary for the number +// +// %v{big64} print the number in big endian uint64 format +// %v{big32} print the number in big endian uint32 format +// %v{big16} print the number in big endian uint16 format +// %v{big8} alias for byte +// +// %v{little64} print the number in little endian uint64 format +// %v{little32} print the number in little endian uint32 format +// %v{little16} print the number in little endian uint16 format +// %v{little8} alias for byte +// +// %v{byte} print the number as a single byte +// %v{bool} print "true" if the number is non-zero, otherwise "false" +// +// All numbers are truncated as necessary per each given format. +// +// # Timestamps +// +// Timestamps can be specified in three formats: plain number formatting, +// native Go timestamp formatting, or strftime formatting. Number formatting is +// follows the rules above using the millisecond timestamp value. Go and +// strftime have further internal format options: +// +// %d{go##2006-01-02T15:04:05Z07:00##} +// %d{strftime[%F]} +// +// An arbitrary amount of pounds, braces, and brackets are understood before +// beginning the actual timestamp formatting. For Go formatting, the format is +// simply passed to the time package's AppendFormat function. For strftime, all +// "man strftime" options are supported. Time is always in UTC. +// +// # Attributes +// +// Records attributes require formatting, where each formatting option selects +// which attribute to print and how to print it. +// +// %a{compression} +// %a{compression;number} +// %a{compression;big64} +// %a{compression;hex8} +// +// By default, prints the compression as text ("none", "gzip", ...). +// Compression can be printed as a number with ";number", where number is any +// number formatting option described above. +// +// %a{timestamp-type} +// %a{timestamp-type;big64} +// +// Prints -1 for pre-0.10 records, 0 for client generated timestamps, and 1 for +// broker generated. Number formatting can be controlled with ";number". +// +// %a{transactional-bit} +// %a{transactional-bit;bool} +// +// Prints 1 if the record is a part of a transaction or 0 if it is not. Number +// formatting can be controlled with ";number". +// +// %a{control-bit} +// %a{control-bit;bool} +// +// Prints 1 if the record is a commit marker or 0 if it is not. Number +// formatting can be controlled with ";number". +// +// # Text +// +// Topics, keys, and values have "base64", "base64raw", "hex", and "unpack" +// formatting options: +// +// %t{hex} +// %k{unpack{iIqQc.$}} +// %v{base64} +// %v{base64raw} +// +// Unpack formatting is inside of enclosing pounds, braces, or brackets, the +// same way that timestamp formatting is understood. The syntax roughly follows +// Python's struct packing/unpacking rules: +// +// x pad character (does not parse input) +// < parse what follows as little endian +// > parse what follows as big endian +// +// b signed byte +// B unsigned byte +// h int16 ("half word") +// H uint16 ("half word") +// i int32 +// I uint32 +// q int64 ("quad word") +// Q uint64 ("quad word") +// +// c any character +// . alias for c +// s consume the rest of the input as a string +// $ match the end of the line (append error string if anything remains) +// +// Unlike python, a '<' or '>' can appear anywhere in the format string and +// affects everything that follows. It is possible to switch endianness +// multiple times. If the parser needs more data than available, or if the more +// input remains after '$', an error message will be appended. +func NewRecordFormatter(layout string) (*RecordFormatter, error) { + var f RecordFormatter + + var literal []byte // non-formatted raw text to output + var i int + for len(layout) > 0 { + i++ + c, size := utf8.DecodeRuneInString(layout) + rawc := layout[:size] + layout = layout[size:] + switch c { + default: + literal = append(literal, rawc...) + continue + + case '\\': + c, n, err := parseLayoutSlash(layout) + if err != nil { + return nil, err + } + layout = layout[n:] + literal = append(literal, c) + continue + + case '%': + } + + if len(layout) == 0 { + return nil, errors.New("invalid escape sequence at end of layout string") + } + + cNext, size := utf8.DecodeRuneInString(layout) + if cNext == '%' || cNext == '{' || cNext == '}' { + literal = append(literal, byte(cNext)) + layout = layout[size:] + continue + } + + var ( + isOpenBrace = len(layout) > 2 && layout[1] == '{' + handledBrace bool + escaped = layout[0] + ) + layout = layout[1:] + + // We are entering a format string. If we have any built + // literal before, this is now raw text that we will append. + if len(literal) > 0 { + l := literal + literal = nil + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, _ *Record) []byte { return append(b, l...) }) + } + + if isOpenBrace { // opening a brace: layout continues after + layout = layout[1:] + } + + switch escaped { + default: + return nil, fmt.Errorf("unknown escape sequence %%%s", string(escaped)) + + case 'T', 'K', 'V', 'H', 'p', 'o', 'e', 'i', 'x', 'y', '[', '|', ']': + // Numbers default to ascii, but we support a bunch of + // formatting options. We parse the format here, and + // then below is switching on which field to print. + var numfn func([]byte, int64) []byte + if handledBrace = isOpenBrace; handledBrace { + numfn2, n, err := parseNumWriteLayout(layout) + if err != nil { + return nil, err + } + layout = layout[n:] + numfn = numfn2 + } else { + numfn = writeNumASCII + } + switch escaped { + case 'T': + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, r *Record) []byte { + return writeR(b, r, func(b []byte, r *Record) []byte { return numfn(b, int64(len(r.Topic))) }) + }) + case 'K': + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, r *Record) []byte { + return writeR(b, r, func(b []byte, r *Record) []byte { return numfn(b, int64(len(r.Key))) }) + }) + case 'V': + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, r *Record) []byte { + return writeR(b, r, func(b []byte, r *Record) []byte { return numfn(b, int64(len(r.Value))) }) + }) + case 'H': + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, r *Record) []byte { + return writeR(b, r, func(b []byte, r *Record) []byte { return numfn(b, int64(len(r.Headers))) }) + }) + case 'p': + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, r *Record) []byte { + return writeR(b, r, func(b []byte, r *Record) []byte { return numfn(b, int64(r.Partition)) }) + }) + case 'o': + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, r *Record) []byte { + return writeR(b, r, func(b []byte, r *Record) []byte { return numfn(b, r.Offset) }) + }) + case 'e': + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, r *Record) []byte { + return writeR(b, r, func(b []byte, r *Record) []byte { return numfn(b, int64(r.LeaderEpoch)) }) + }) + case 'i': + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, _ *Record) []byte { + return numfn(b, f.calls.Add(1)) + }) + case 'x': + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, r *Record) []byte { + return writeR(b, r, func(b []byte, r *Record) []byte { return numfn(b, r.ProducerID) }) + }) + case 'y': + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, r *Record) []byte { + return writeR(b, r, func(b []byte, r *Record) []byte { return numfn(b, int64(r.ProducerEpoch)) }) + }) + case '[': + f.fns = append(f.fns, func(b []byte, p *FetchPartition, _ *Record) []byte { + return writeP(b, p, func(b []byte, p *FetchPartition) []byte { return numfn(b, p.LogStartOffset) }) + }) + case '|': + f.fns = append(f.fns, func(b []byte, p *FetchPartition, _ *Record) []byte { + return writeP(b, p, func(b []byte, p *FetchPartition) []byte { return numfn(b, p.LastStableOffset) }) + }) + case ']': + f.fns = append(f.fns, func(b []byte, p *FetchPartition, _ *Record) []byte { + return writeP(b, p, func(b []byte, p *FetchPartition) []byte { return numfn(b, p.HighWatermark) }) + }) + } + + case 't', 'k', 'v': + var appendFn func([]byte, []byte) []byte + if handledBrace = isOpenBrace; handledBrace { + switch { + case strings.HasPrefix(layout, "}"): + layout = layout[len("}"):] + appendFn = appendPlain + case strings.HasPrefix(layout, "base64}"): + appendFn = appendBase64 + layout = layout[len("base64}"):] + case strings.HasPrefix(layout, "base64raw}"): + appendFn = appendBase64raw + layout = layout[len("base64raw}"):] + case strings.HasPrefix(layout, "hex}"): + appendFn = appendHex + layout = layout[len("hex}"):] + case strings.HasPrefix(layout, "unpack"): + unpack, rem, err := nomOpenClose(layout[len("unpack"):]) + if err != nil { + return nil, fmt.Errorf("unpack parse err: %v", err) + } + if len(rem) == 0 || rem[0] != '}' { + return nil, fmt.Errorf("unpack missing closing } in %q", layout) + } + layout = rem[1:] + appendFn, err = parseUnpack(unpack) + if err != nil { + return nil, fmt.Errorf("unpack formatting parse err: %v", err) + } + + default: + return nil, fmt.Errorf("unknown %%%s{ escape", string(escaped)) + } + } else { + appendFn = appendPlain + } + switch escaped { + case 't': + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, r *Record) []byte { + return writeR(b, r, func(b []byte, r *Record) []byte { return appendFn(b, []byte(r.Topic)) }) + }) + case 'k': + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, r *Record) []byte { + return writeR(b, r, func(b []byte, r *Record) []byte { return appendFn(b, r.Key) }) + }) + case 'v': + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, r *Record) []byte { + return writeR(b, r, func(b []byte, r *Record) []byte { return appendFn(b, r.Value) }) + }) + } + + case 'a': + if !isOpenBrace { + return nil, errors.New("missing open brace sequence on %a signifying how attributes should be written") + } + handledBrace = true + + num := func(skipText string, rfn func(*Record) int64) error { + layout = layout[len(skipText):] + numfn, n, err := parseNumWriteLayout(layout) + if err != nil { + return err + } + layout = layout[n:] + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, r *Record) []byte { + return writeR(b, r, func(b []byte, r *Record) []byte { return numfn(b, rfn(r)) }) + }) + return nil + } + bi64 := func(b bool) int64 { + if b { + return 1 + } + return 0 + } + + switch { + case strings.HasPrefix(layout, "compression}"): + layout = layout[len("compression}"):] + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, r *Record) []byte { + return writeR(b, r, func(b []byte, r *Record) []byte { + switch codecType(r.Attrs.CompressionType()) { + case codecNone: + return append(b, "none"...) + case codecGzip: + return append(b, "gzip"...) + case codecSnappy: + return append(b, "snappy"...) + case codecLZ4: + return append(b, "lz4"...) + case codecZstd: + return append(b, "zstd"...) + default: + return append(b, "unknown"...) + } + }) + }) + case strings.HasPrefix(layout, "compression;"): + if err := num("compression;", func(r *Record) int64 { return int64(r.Attrs.CompressionType()) }); err != nil { + return nil, err + } + + case strings.HasPrefix(layout, "timestamp-type}"): + layout = layout[len("timestamp-type}"):] + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, r *Record) []byte { + return writeR(b, r, func(b []byte, r *Record) []byte { + return strconv.AppendInt(b, int64(r.Attrs.TimestampType()), 10) + }) + }) + case strings.HasPrefix(layout, "timestamp-type;"): + if err := num("timestamp-type;", func(r *Record) int64 { return int64(r.Attrs.TimestampType()) }); err != nil { + return nil, err + } + + case strings.HasPrefix(layout, "transactional-bit}"): + layout = layout[len("transactional-bit}"):] + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, r *Record) []byte { + return writeR(b, r, func(b []byte, r *Record) []byte { + if r.Attrs.IsTransactional() { + return append(b, '1') + } + return append(b, '0') + }) + }) + case strings.HasPrefix(layout, "transactional-bit;"): + if err := num("transactional-bit;", func(r *Record) int64 { return bi64(r.Attrs.IsTransactional()) }); err != nil { + return nil, err + } + + case strings.HasPrefix(layout, "control-bit}"): + layout = layout[len("control-bit}"):] + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, r *Record) []byte { + return writeR(b, r, func(b []byte, r *Record) []byte { + if r.Attrs.IsControl() { + return append(b, '1') + } + return append(b, '0') + }) + }) + case strings.HasPrefix(layout, "control-bit;"): + if err := num("control-bit;", func(r *Record) int64 { return bi64(r.Attrs.IsControl()) }); err != nil { + return nil, err + } + + default: + return nil, errors.New("unknown %a formatting") + } + + case 'h': + if !isOpenBrace { + return nil, errors.New("missing open brace sequence on %h signifying how headers are written") + } + handledBrace = true + // Headers can have their own internal braces, so we + // must look for a matching end brace. + braces := 1 + at := 0 + for braces != 0 && len(layout[at:]) > 0 { + switch layout[at] { + case '{': + if at > 0 && layout[at-1] != '%' { + braces++ + } + case '}': + if at > 0 && layout[at-1] != '%' { + braces-- + } + } + at++ + } + if braces > 0 { + return nil, fmt.Errorf("invalid header specification: missing closing brace in %q", layout) + } + + spec := layout[:at-1] + layout = layout[at:] + inf, err := NewRecordFormatter(spec) + if err != nil { + return nil, fmt.Errorf("invalid header specification %q: %v", spec, err) + } + + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, r *Record) []byte { + reuse := new(Record) + for _, header := range r.Headers { + reuse.Key = []byte(header.Key) + reuse.Value = header.Value + b = inf.AppendRecord(b, reuse) + } + return b + }) + + case 'd': + // For datetime parsing, we support plain millis in any + // number format, strftime, or go formatting. We + // default to plain ascii millis. + handledBrace = isOpenBrace + if !handledBrace { + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, r *Record) []byte { + return writeR(b, r, func(b []byte, r *Record) []byte { return strconv.AppendInt(b, r.Timestamp.UnixNano()/1e6, 10) }) + }) + continue + } + + switch { + case strings.HasPrefix(layout, "strftime"): + tfmt, rem, err := nomOpenClose(layout[len("strftime"):]) + if err != nil { + return nil, fmt.Errorf("strftime parse err: %v", err) + } + if len(rem) == 0 || rem[0] != '}' { + return nil, fmt.Errorf("%%d{strftime missing closing } in %q", layout) + } + layout = rem[1:] + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, r *Record) []byte { + return writeR(b, r, func(b []byte, r *Record) []byte { return strftimeAppendFormat(b, tfmt, r.Timestamp.UTC()) }) + }) + + case strings.HasPrefix(layout, "go"): + tfmt, rem, err := nomOpenClose(layout[len("go"):]) + if err != nil { + return nil, fmt.Errorf("go parse err: %v", err) + } + if len(rem) == 0 || rem[0] != '}' { + return nil, fmt.Errorf("%%d{go missing closing } in %q", layout) + } + layout = rem[1:] + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, r *Record) []byte { + return writeR(b, r, func(b []byte, r *Record) []byte { return r.Timestamp.UTC().AppendFormat(b, tfmt) }) + }) + + default: + numfn, n, err := parseNumWriteLayout(layout) + if err != nil { + return nil, fmt.Errorf("unknown %%d{ time specification in %q", layout) + } + layout = layout[n:] + + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, r *Record) []byte { + return writeR(b, r, func(b []byte, r *Record) []byte { return numfn(b, r.Timestamp.UnixNano()/1e6) }) + }) + } + } + + // If we opened a brace, we require a closing brace. + if isOpenBrace && !handledBrace { + return nil, fmt.Errorf("unhandled open brace %q", layout) + } + } + + // Ensure we print any trailing text. + if len(literal) > 0 { + f.fns = append(f.fns, func(b []byte, _ *FetchPartition, _ *Record) []byte { return append(b, literal...) }) + } + + return &f, nil +} + +func appendPlain(dst, src []byte) []byte { + return append(dst, src...) +} + +func appendBase64(dst, src []byte) []byte { + fin := append(dst, make([]byte, base64.StdEncoding.EncodedLen(len(src)))...) + base64.StdEncoding.Encode(fin[len(dst):], src) + return fin +} + +func appendBase64raw(dst, src []byte) []byte { + fin := append(dst, make([]byte, base64.RawStdEncoding.EncodedLen(len(src)))...) + base64.RawStdEncoding.Encode(fin[len(dst):], src) + return fin +} + +func appendHex(dst, src []byte) []byte { + fin := append(dst, make([]byte, hex.EncodedLen(len(src)))...) + hex.Encode(fin[len(dst):], src) + return fin +} + +// nomOpenClose extracts a middle section from a string beginning with repeated +// delimiters and returns it as with remaining (past end delimiters) string. +func nomOpenClose(src string) (middle, remaining string, err error) { + if len(src) == 0 { + return "", "", errors.New("empty layout") + } + delim := src[0] + openers := 1 + for openers < len(src) && src[openers] == delim { + openers++ + } + switch delim { + case '{': + delim = '}' + case '[': + delim = ']' + case '(': + delim = ')' + } + src = src[openers:] + end := strings.Repeat(string(delim), openers) + idx := strings.Index(src, end) + if idx < 0 { + return "", "", fmt.Errorf("missing end delim %q", end) + } + middle = src[:idx] + return middle, src[idx+len(end):], nil +} + +func parseUnpack(layout string) (func([]byte, []byte) []byte, error) { + // take dst, src; return dst + // %!q(eof) + // take 8 bytes, decode it, print decoded + var fns []func([]byte, []byte) ([]byte, int) + little := true + var sawEnd bool + for i := range layout { + if sawEnd { + return nil, errors.New("already saw end-of-input parsing character") + } + + var need int + var signed bool + cs := layout[i : i+1] + switch cs[0] { + case 'x': + continue + + case '<': + little = true + continue + case '>': + little = false + continue + + case 'b': + need = 1 + signed = true + case 'B': + need = 1 + case 'h': + need = 2 + signed = true + case 'H': + need = 2 + case 'i': + need = 4 + signed = true + case 'I': + need = 4 + case 'q': + need = 8 + signed = true + case 'Q': + need = 8 + + case 'c', '.': + fns = append(fns, func(dst, src []byte) ([]byte, int) { + if len(src) < 1 { + return append(dst, "%!c(no bytes available)"...), 0 + } + return append(dst, src[0]), 1 + }) + continue + + case 's': + sawEnd = true + fns = append(fns, func(dst, src []byte) ([]byte, int) { + return append(dst, src...), len(src) + }) + continue + + case '$': + fns = append(fns, func(dst, src []byte) ([]byte, int) { + if len(src) != 0 { + dst = append(dst, "%!$(not end-of-input)"...) + } + return dst, len(src) + }) + sawEnd = true + continue + + default: + return nil, fmt.Errorf("invalid unpack parsing character %s", cs) + } + + islittle := little + fns = append(fns, func(dst, src []byte) ([]byte, int) { + if len(src) < need { + return append(dst, fmt.Sprintf("%%!%%s(have %d bytes, need %d)", len(src), need)...), len(src) + } + + var ul, ub uint64 + var il, ib int64 + switch need { + case 1: + ul = uint64(src[0]) + ub = ul + il = int64(byte(ul)) + ib = int64(byte(ub)) + case 2: + ul = uint64(binary.LittleEndian.Uint16(src)) + ub = uint64(binary.BigEndian.Uint16(src)) + il = int64(int16(ul)) + ib = int64(int16(ub)) + case 4: + ul = uint64(binary.LittleEndian.Uint32(src)) + ub = uint64(binary.BigEndian.Uint32(src)) + il = int64(int32(ul)) + ib = int64(int32(ub)) + case 8: + ul = binary.LittleEndian.Uint64(src) + ub = binary.BigEndian.Uint64(src) + il = int64(ul) + ib = int64(ub) + } + u := ub + i := ib + if islittle { + u = ul + i = il + } + + if signed { + return strconv.AppendInt(dst, i, 10), need + } + return strconv.AppendUint(dst, u, 10), need + }) + } + + return func(dst, src []byte) []byte { + for _, fn := range fns { + var n int + dst, n = fn(dst, src) + src = src[n:] + } + return dst + }, nil +} + +func parseNumWriteLayout(layout string) (func([]byte, int64) []byte, int, error) { + braceEnd := strings.IndexByte(layout, '}') + if braceEnd == -1 { + return nil, 0, errors.New("missing brace end } to close number format specification") + } + end := braceEnd + 1 + switch layout = layout[:braceEnd]; layout { + case "ascii", "number": + return writeNumASCII, end, nil + case "hex64": + return writeNumHex64, end, nil + case "hex32": + return writeNumHex32, end, nil + case "hex16": + return writeNumHex16, end, nil + case "hex8": + return writeNumHex8, end, nil + case "hex4": + return writeNumHex4, end, nil + case "hex": + return writeNumHex, end, nil + case "big64": + return writeNumBig64, end, nil + case "big32": + return writeNumBig32, end, nil + case "big16": + return writeNumBig16, end, nil + case "byte", "big8", "little8": + return writeNumByte, end, nil + case "little64": + return writeNumLittle64, end, nil + case "little32": + return writeNumLittle32, end, nil + case "little16": + return writeNumLittle16, end, nil + case "bool": + return writeNumBool, end, nil + default: + return nil, 0, fmt.Errorf("invalid output number layout %q", layout) + } +} + +func writeR(b []byte, r *Record, fn func([]byte, *Record) []byte) []byte { + if r == nil { + return append(b, ""...) + } + return fn(b, r) +} + +func writeP(b []byte, p *FetchPartition, fn func([]byte, *FetchPartition) []byte) []byte { + if p == nil { + return append(b, ""...) + } + return fn(b, p) +} +func writeNumASCII(b []byte, n int64) []byte { return strconv.AppendInt(b, n, 10) } + +const hexc = "0123456789abcdef" + +func writeNumHex64(b []byte, n int64) []byte { + u := uint64(n) + return append(b, + hexc[(u>>60)&0xf], + hexc[(u>>56)&0xf], + hexc[(u>>52)&0xf], + hexc[(u>>48)&0xf], + hexc[(u>>44)&0xf], + hexc[(u>>40)&0xf], + hexc[(u>>36)&0xf], + hexc[(u>>32)&0xf], + hexc[(u>>28)&0xf], + hexc[(u>>24)&0xf], + hexc[(u>>20)&0xf], + hexc[(u>>16)&0xf], + hexc[(u>>12)&0xf], + hexc[(u>>8)&0xf], + hexc[(u>>4)&0xf], + hexc[u&0xf], + ) +} + +func writeNumHex32(b []byte, n int64) []byte { + u := uint64(n) + return append(b, + hexc[(u>>28)&0xf], + hexc[(u>>24)&0xf], + hexc[(u>>20)&0xf], + hexc[(u>>16)&0xf], + hexc[(u>>12)&0xf], + hexc[(u>>8)&0xf], + hexc[(u>>4)&0xf], + hexc[u&0xf], + ) +} + +func writeNumHex16(b []byte, n int64) []byte { + u := uint64(n) + return append(b, + hexc[(u>>12)&0xf], + hexc[(u>>8)&0xf], + hexc[(u>>4)&0xf], + hexc[u&0xf], + ) +} + +func writeNumHex8(b []byte, n int64) []byte { + u := uint64(n) + return append(b, + hexc[(u>>4)&0xf], + hexc[u&0xf], + ) +} + +func writeNumHex4(b []byte, n int64) []byte { + u := uint64(n) + return append(b, + hexc[u&0xf], + ) +} + +func writeNumHex(b []byte, n int64) []byte { + return strconv.AppendUint(b, uint64(n), 16) +} + +func writeNumBig64(b []byte, n int64) []byte { + u := uint64(n) + return append(b, byte(u>>56), byte(u>>48), byte(u>>40), byte(u>>32), byte(u>>24), byte(u>>16), byte(u>>8), byte(u)) +} + +func writeNumLittle64(b []byte, n int64) []byte { + u := uint64(n) + return append(b, byte(u), byte(u>>8), byte(u>>16), byte(u>>24), byte(u>>32), byte(u>>40), byte(u>>48), byte(u>>56)) +} + +func writeNumBig32(b []byte, n int64) []byte { + u := uint64(n) + return append(b, byte(u>>24), byte(u>>16), byte(u>>8), byte(u)) +} + +func writeNumLittle32(b []byte, n int64) []byte { + u := uint64(n) + return append(b, byte(u), byte(u>>8), byte(u>>16), byte(u>>24)) +} +func writeNumBig16(b []byte, n int64) []byte { u := uint64(n); return append(b, byte(u>>8), byte(u)) } +func writeNumLittle16(b []byte, n int64) []byte { + u := uint64(n) + return append(b, byte(u), byte(u>>8)) +} +func writeNumByte(b []byte, n int64) []byte { u := uint64(n); return append(b, byte(u)) } + +func writeNumBool(b []byte, n int64) []byte { + if n == 0 { + return append(b, "false"...) + } + return append(b, "true"...) +} + +//////////// +// READER // +//////////// + +// RecordReader reads records from an io.Reader. +type RecordReader struct { + r *bufio.Reader + + buf []byte + fns []readParse + + done bool +} + +// NewRecordReader returns a record reader for the given layout, or an error if +// the layout is invalid. +// +// Similar to the RecordFormatter, the RecordReader parsing is quite powerful. +// There is a bit less to describe in comparison to RecordFormatter, but still, +// this documentation attempts to be as succinct as possible. +// +// Similar to the fmt package, record parsing is based off of slash escapes and +// percent "verbs" (copying fmt package lingo). Slashes are used for common +// escapes, +// +// \t \n \r \\ \xNN +// +// reading tabs, newlines, carriage returns, slashes, and hex encoded +// characters. +// +// Percent encoding reads into specific values of a Record: +// +// %t topic +// %T topic length +// %k key +// %K key length +// %v value +// %V value length +// %h begin the header specification +// %H number of headers +// %p partition +// %o offset +// %e leader epoch +// %d timestamp +// %x producer id +// %y producer epoch +// +// If using length / number verbs (i.e., "sized" verbs), they must occur before +// what they are sizing. +// +// There are three escapes to parse raw characters, rather than opting into +// some formatting option: +// +// %% percent sign +// %{ left brace +// %} right brace +// +// Unlike record formatting, timestamps can only be read as numbers because Go +// or strftime formatting can both be variable length and do not play too well +// with delimiters. Timestamps numbers are read as milliseconds. +// +// # Numbers +// +// All size numbers can be parsed in the following ways: +// +// %v{ascii} parse numeric digits until a non-numeric +// %v{number} alias for ascii +// +// %v{hex64} read 16 hex characters for the number +// %v{hex32} read 8 hex characters for the number +// %v{hex16} read 4 hex characters for the number +// %v{hex8} read 2 hex characters for the number +// %v{hex4} read 1 hex characters for the number +// +// %v{big64} read the number as big endian uint64 format +// %v{big32} read the number as big endian uint32 format +// %v{big16} read the number as big endian uint16 format +// %v{big8} alias for byte +// +// %v{little64} read the number as little endian uint64 format +// %v{little32} read the number as little endian uint32 format +// %v{little16} read the number as little endian uint16 format +// %v{little8} read the number as a byte +// +// %v{byte} read the number as a byte +// %v{bool} read "true" as 1, "false" as 0 +// %v{3} read 3 characters (any number) +// +// # Header specification +// +// Similar to number formatting, headers are parsed using a nested primitive +// format option, accepting the key and value escapes previously mentioned. +// +// # Text +// +// Topics, keys, and values can be decoded using "base64", "hex", and "json" +// formatting options. Any size specification is the size of the encoded value +// actually being read (i.e., size as seen, not size when decoded). JSON values +// are compacted after being read. +// +// %T%t{hex} - 4abcd reads four hex characters "abcd" +// %V%v{base64} - 2z9 reads two base64 characters "z9" +// %v{json} %k - {"foo" : "bar"} foo reads a JSON object and then "foo" +// +// As well, these text options can be parsed with regular expressions: +// +// %k{re[\d*]}%v{re[\s+]} +func NewRecordReader(reader io.Reader, layout string) (*RecordReader, error) { + r := &RecordReader{r: bufio.NewReader(reader)} + if err := r.parseReadLayout(layout); err != nil { + return nil, err + } + return r, nil +} + +// ReadRecord reads the next record in the reader and returns it, or returns a +// parsing error. +// +// This will return io.EOF only if the underlying reader returns io.EOF at the +// start of a new record. If an io.EOF is returned mid record, this returns +// io.ErrUnexpectedEOF. It is expected for this function to be called until it +// returns io.EOF. +func (r *RecordReader) ReadRecord() (*Record, error) { + rec := new(Record) + return rec, r.ReadRecordInto(rec) +} + +// ReadRecordInto reads the next record into the given record and returns any +// parsing error +// +// This will return io.EOF only if the underlying reader returns io.EOF at the +// start of a new record. If an io.EOF is returned mid record, this returns +// io.ErrUnexpectedEOF. It is expected for this function to be called until it +// returns io.EOF. +func (r *RecordReader) ReadRecordInto(rec *Record) error { + if r.done { + return io.EOF + } + return r.next(rec) +} + +// SetReader replaces the underlying reader with the given reader. +func (r *RecordReader) SetReader(reader io.Reader) { + r.r = bufio.NewReader(reader) + r.done = false +} + +const ( + parsesTopic parseRecordBits = 1 << iota + parsesTopicSize + parsesKey + parsesKeySize + parsesValue + parsesValueSize + parsesHeaders + parsesHeadersNum +) + +// The record reading format must be either entirely sized or entirely unsized. +// This type helps us track what's what. +type parseRecordBits uint8 + +func (p *parseRecordBits) set(r parseRecordBits) { *p |= r } +func (p parseRecordBits) has(r parseRecordBits) bool { return p&r != 0 } + +func (r *RecordReader) parseReadLayout(layout string) error { + if len(layout) == 0 { + return errors.New("RecordReader: invalid empty format") + } + + var ( + // If we are reading by size, we parse the layout size into one + // of these variables. When reading, we use the captured + // variable's value. + topicSize = new(uint64) + keySize = new(uint64) + valueSize = new(uint64) + headersNum = new(uint64) + + bits parseRecordBits + + literal []byte // raw literal we are currently working on + addLiteral = func() { + if len(r.fns) > 0 && r.fns[len(r.fns)-1].read.empty() { + r.fns[len(r.fns)-1].read.delim = literal + } else if len(literal) > 0 { + r.fns = append(r.fns, readParse{ + read: readKind{exact: literal}, + }) + } + literal = nil + } + ) + + for len(layout) > 0 { + c, size := utf8.DecodeRuneInString(layout) + rawc := layout[:size] + layout = layout[size:] + switch c { + default: + literal = append(literal, rawc...) + continue + + case '\\': + c, n, err := parseLayoutSlash(layout) + if err != nil { + return err + } + layout = layout[n:] + literal = append(literal, c) + continue + + case '%': + } + + if len(layout) == 0 { + literal = append(literal, rawc...) + continue + } + + cNext, size := utf8.DecodeRuneInString(layout) + if cNext == '%' || cNext == '{' || cNext == '}' { + literal = append(literal, byte(cNext)) + layout = layout[size:] + continue + } + + var ( + isOpenBrace = len(layout) > 2 && layout[1] == '{' + handledBrace bool + escaped = layout[0] + ) + layout = layout[1:] + addLiteral() + + if isOpenBrace { // opening a brace: layout continues after + layout = layout[1:] + } + + switch escaped { + default: + return fmt.Errorf("unknown percent escape sequence %q", layout[:1]) + + case 'T', 'K', 'V', 'H': + var dst *uint64 + var bit parseRecordBits + switch escaped { + case 'T': + dst, bit = topicSize, parsesTopicSize + case 'K': + dst, bit = keySize, parsesKeySize + case 'V': + dst, bit = valueSize, parsesValueSize + case 'H': + dst, bit = headersNum, parsesHeadersNum + } + if bits.has(bit) { + return fmt.Errorf("%%%s is doubly specified", string(escaped)) + } + if bits.has(bit >> 1) { + return fmt.Errorf("size specification %%%s cannot come after value specification %%%s", string(escaped), strings.ToLower(string(escaped))) + } + bits.set(bit) + fn, n, err := r.parseReadSize("ascii", dst, false) + if handledBrace = isOpenBrace; handledBrace { + fn, n, err = r.parseReadSize(layout, dst, true) + } + if err != nil { + return fmt.Errorf("unable to parse %%%s: %s", string(escaped), err) + } + layout = layout[n:] + r.fns = append(r.fns, fn) + + case 'p', 'o', 'e', 'd', 'x', 'y': + dst := new(uint64) + fn, n, err := r.parseReadSize("ascii", dst, false) + if handledBrace = isOpenBrace; handledBrace { + fn, n, err = r.parseReadSize(layout, dst, true) + } + if err != nil { + return fmt.Errorf("unable to parse %%%s: %s", string(escaped), err) + } + layout = layout[n:] + numParse := fn.parse + switch escaped { + case 'p': + fn.parse = func(b []byte, rec *Record) error { + if err := numParse(b, nil); err != nil { + return err + } + rec.Partition = int32(*dst) + return nil + } + case 'o': + fn.parse = func(b []byte, rec *Record) error { + if err := numParse(b, nil); err != nil { + return err + } + rec.Offset = int64(*dst) + return nil + } + case 'e': + fn.parse = func(b []byte, rec *Record) error { + if err := numParse(b, nil); err != nil { + return err + } + rec.LeaderEpoch = int32(*dst) + return nil + } + case 'd': + fn.parse = func(b []byte, rec *Record) error { + if err := numParse(b, nil); err != nil { + return err + } + rec.Timestamp = time.Unix(0, int64(*dst)*1e6) + return nil + } + case 'x': + fn.parse = func(b []byte, rec *Record) error { + if err := numParse(b, nil); err != nil { + return err + } + rec.ProducerID = int64(*dst) + return nil + } + case 'y': + fn.parse = func(b []byte, rec *Record) error { + if err := numParse(b, nil); err != nil { + return err + } + rec.ProducerEpoch = int16(*dst) + return nil + } + } + r.fns = append(r.fns, fn) + + case 't', 'k', 'v': + var decodeFn func([]byte) ([]byte, error) + var re *regexp.Regexp + var isJson bool + if handledBrace = isOpenBrace; handledBrace { + switch { + case strings.HasPrefix(layout, "}"): + layout = layout[len("}"):] + case strings.HasPrefix(layout, "base64}"): + decodeFn = decodeBase64 + layout = layout[len("base64}"):] + case strings.HasPrefix(layout, "hex}"): + decodeFn = decodeHex + layout = layout[len("hex}"):] + case strings.HasPrefix(layout, "json}"): + isJson = true + decodeFn = func(b []byte) ([]byte, error) { + var buf bytes.Buffer + err := json.Compact(&buf, b) + return buf.Bytes(), err + } + layout = layout[len("json}"):] + case strings.HasPrefix(layout, "re"): + restr, rem, err := nomOpenClose(layout[len("re"):]) + if err != nil { + return fmt.Errorf("re parse err: %v", err) + } + if len(rem) == 0 || rem[0] != '}' { + return fmt.Errorf("re missing closing } in %q", layout) + } + layout = rem[1:] + if !strings.HasPrefix(restr, "^") { + restr = "^" + restr + } + re, err = regexp.Compile(restr) + if err != nil { + return fmt.Errorf("re parse err: %v", err) + } + + default: + return fmt.Errorf("unknown %%%s{ escape", string(escaped)) + } + } + + var bit, bitSize parseRecordBits + var inner func([]byte, *Record) + var size *uint64 + switch escaped { + case 't': + bit, bitSize, size = parsesTopic, parsesTopicSize, topicSize + inner = func(b []byte, r *Record) { r.Topic = string(b) } + case 'k': + bit, bitSize, size = parsesKey, parsesKeySize, keySize + inner = func(b []byte, r *Record) { r.Key = dupslice(b) } + case 'v': + bit, bitSize, size = parsesValue, parsesValueSize, valueSize + inner = func(b []byte, r *Record) { r.Value = dupslice(b) } + } + + fn := readParse{parse: func(b []byte, r *Record) error { + if decodeFn != nil { + dec, err := decodeFn(b) + if err != nil { + return err + } + b = dec + } + inner(b, r) + return nil + }} + bit.set(bit) + if bits.has(bitSize) { + if re != nil { + return errors.New("cannot specify exact size and regular expression") + } + if isJson { + return errors.New("cannot specify exact size and json") + } + fn.read = readKind{sizefn: func() int { return int(*size) }} + } else if re != nil { + fn.read = readKind{re: re} + } else if isJson { + fn.read = readKind{condition: new(jsonReader).read} + } + r.fns = append(r.fns, fn) + + case 'h': + bits.set(parsesHeaders) + if !bits.has(parsesHeadersNum) { + return errors.New("missing header count specification %H before header specification %h") + } + if !isOpenBrace { + return errors.New("missing open brace sequence on %h signifying how headers are encoded") + } + handledBrace = true + // Similar to above, headers can have their own + // internal braces, so we look for a matching end. + braces := 1 + at := 0 + for braces != 0 && len(layout[at:]) > 0 { + switch layout[at] { + case '{': + if at > 0 && layout[at-1] != '%' { + braces++ + } + case '}': + if at > 0 && layout[at-1] != '%' { + braces-- + } + } + at++ + } + if braces > 0 { + return fmt.Errorf("invalid header specification: missing closing brace in %q", layout) + } + + // We parse the header specification recursively, but + // we require that it is sized and contains only keys + // and values. Checking the delimiter checks sizing. + var inr RecordReader + if err := inr.parseReadLayout(layout[:at-1]); err != nil { + return fmt.Errorf("invalid header specification: %v", err) + } + layout = layout[at:] + + // To parse headers, we save the inner reader's parsing + // function stash the current record's key/value before + // parsing, and then capture the key/value as a header. + r.fns = append(r.fns, readParse{read: readKind{handoff: func(r *RecordReader, rec *Record) error { + k, v := rec.Key, rec.Value + defer func() { rec.Key, rec.Value = k, v }() + inr.r = r.r + for i := uint64(0); i < *headersNum; i++ { + rec.Key, rec.Value = nil, nil + if err := inr.next(rec); err != nil { + return err + } + rec.Headers = append(rec.Headers, RecordHeader{Key: string(rec.Key), Value: rec.Value}) + } + return nil + }}}) + } + + if isOpenBrace && !handledBrace { + return fmt.Errorf("unhandled open brace %q", layout) + } + } + + addLiteral() + + // We must sort noreads to the front, we use this guarantee when + // reading to handle EOF properly. + var noreads, reads []readParse + for _, fn := range r.fns { + if fn.read.noread { + noreads = append(noreads, fn) + } else { + reads = append(reads, fn) + } + } + r.fns = make([]readParse, 0, len(noreads)+len(reads)) + r.fns = append(r.fns, noreads...) + r.fns = append(r.fns, reads...) + + return nil +} + +// Returns a function that parses a number from the internal reader into dst. +// +// If needBrace is true, the user is specifying how to read the number, +// otherwise we default to ascii. Reading ascii requires us to peek at bytes +// until we get to a non-number byte. +func (*RecordReader) parseReadSize(layout string, dst *uint64, needBrace bool) (readParse, int, error) { + var end int + if needBrace { + braceEnd := strings.IndexByte(layout, '}') + if braceEnd == -1 { + return readParse{}, 0, errors.New("missing brace end } to close number size specification") + } + layout = layout[:braceEnd] + end = braceEnd + 1 + } + + switch layout { + default: + num, err := strconv.Atoi(layout) + if err != nil { + return readParse{}, 0, fmt.Errorf("unrecognized number reading layout %q: %v", layout, err) + } + if num <= 0 { + return readParse{}, 0, fmt.Errorf("invalid zero or negative number %q when parsing read size", layout) + } + return readParse{ + readKind{noread: true}, + func([]byte, *Record) error { *dst = uint64(num); return nil }, + }, end, nil + + case "ascii", "number": + return readParse{ + readKind{condition: func(b byte) int8 { + if b < '0' || b > '9' { + return -1 + } + return 2 // ignore EOF if we hit it after this + }}, + func(b []byte, _ *Record) (err error) { + *dst, err = strconv.ParseUint(kbin.UnsafeString(b), 10, 64) + return err + }, + }, end, nil + + case "big64": + return readParse{ + readKind{size: 8}, + func(b []byte, _ *Record) error { *dst = binary.BigEndian.Uint64(b); return nil }, + }, end, nil + case "big32": + return readParse{ + readKind{size: 4}, + func(b []byte, _ *Record) error { *dst = uint64(binary.BigEndian.Uint32(b)); return nil }, + }, end, nil + case "big16": + return readParse{ + readKind{size: 2}, + func(b []byte, _ *Record) error { *dst = uint64(binary.BigEndian.Uint16(b)); return nil }, + }, end, nil + + case "little64": + return readParse{ + readKind{size: 8}, + func(b []byte, _ *Record) error { *dst = binary.LittleEndian.Uint64(b); return nil }, + }, end, nil + case "little32": + return readParse{ + readKind{size: 4}, + func(b []byte, _ *Record) error { *dst = uint64(binary.LittleEndian.Uint32(b)); return nil }, + }, end, nil + case "little16": + return readParse{ + readKind{size: 2}, + func(b []byte, _ *Record) error { *dst = uint64(binary.LittleEndian.Uint16(b)); return nil }, + }, end, nil + + case "byte", "big8", "little8": + return readParse{ + readKind{size: 1}, + func(b []byte, _ *Record) error { *dst = uint64(b[0]); return nil }, + }, end, nil + + case "hex64": + return readParse{ + readKind{size: 16}, + func(b []byte, _ *Record) (err error) { + *dst, err = strconv.ParseUint(kbin.UnsafeString(b), 16, 64) + return err + }, + }, end, nil + case "hex32": + return readParse{ + readKind{size: 8}, + func(b []byte, _ *Record) (err error) { + *dst, err = strconv.ParseUint(kbin.UnsafeString(b), 16, 64) + return err + }, + }, end, nil + case "hex16": + return readParse{ + readKind{size: 4}, + func(b []byte, _ *Record) (err error) { + *dst, err = strconv.ParseUint(kbin.UnsafeString(b), 16, 64) + return err + }, + }, end, nil + case "hex8": + return readParse{ + readKind{size: 2}, + func(b []byte, _ *Record) (err error) { + *dst, err = strconv.ParseUint(kbin.UnsafeString(b), 16, 64) + return err + }, + }, end, nil + case "hex4": + return readParse{ + readKind{size: 1}, + func(b []byte, _ *Record) (err error) { + *dst, err = strconv.ParseUint(kbin.UnsafeString(b), 16, 64) + return err + }, + }, end, nil + + case "bool": + const ( + stateUnknown uint8 = iota + stateTrue + stateFalse + ) + var state uint8 + var last byte + return readParse{ + readKind{condition: func(b byte) (done int8) { + defer func() { + if done <= 0 { + state = stateUnknown + last = 0 + } + }() + + switch state { + default: // stateUnknown + if b == 't' { + state = stateTrue + last = b + return 1 + } else if b == 'f' { + state = stateFalse + last = b + return 1 + } + return -1 + + case stateTrue: + if last == 't' && b == 'r' || last == 'r' && b == 'u' { + last = b + return 1 + } else if last == 'u' && b == 'e' { + return 0 + } + return -1 + + case stateFalse: + if last == 'f' && b == 'a' || last == 'a' && b == 'l' || last == 'l' && b == 's' { + last = b + return 1 + } else if last == 's' && b == 'e' { + return 0 + } + return -1 + } + }}, + func(b []byte, _ *Record) error { + switch string(b) { + case "true": + *dst = 1 + case "false": + *dst = 0 + default: + return fmt.Errorf("invalid bool %s", b) + } + return nil + }, + }, end, nil + } +} + +func decodeBase64(b []byte) ([]byte, error) { + n, err := base64.StdEncoding.Decode(b[:base64.StdEncoding.DecodedLen(len(b))], b) + return b[:n], err +} + +func decodeHex(b []byte) ([]byte, error) { + n, err := hex.Decode(b[:hex.DecodedLen(len(b))], b) + return b[:n], err +} + +type readKind struct { + noread bool + exact []byte + condition func(byte) int8 // -2: error, -1: stop, do not consume input; 0: stop, consume input; 1: keep going, consume input, 2: keep going, consume input, can EOF + size int + sizefn func() int + handoff func(*RecordReader, *Record) error + delim []byte + re *regexp.Regexp +} + +func (r *readKind) empty() bool { + return !r.noread && + r.exact == nil && + r.condition == nil && + r.size == 0 && + r.sizefn == nil && + r.handoff == nil && + r.delim == nil && + r.re == nil +} + +type readParse struct { + read readKind + parse func([]byte, *Record) error +} + +func dupslice(b []byte) []byte { + if len(b) == 0 { + return nil + } + dup := make([]byte, len(b)) + copy(dup, b) + return dup +} + +func (r *RecordReader) next(rec *Record) error { + for i, fn := range r.fns { + r.buf = r.buf[:0] + + var err error + switch { + case fn.read.noread: + // do nothing + case fn.read.exact != nil: + err = r.readExact(fn.read.exact) + case fn.read.condition != nil: + err = r.readCondition(fn.read.condition) + case fn.read.size > 0: + err = r.readSize(fn.read.size) + case fn.read.sizefn != nil: + err = r.readSize(fn.read.sizefn()) + case fn.read.handoff != nil: + err = fn.read.handoff(r, rec) + case fn.read.re != nil: + err = r.readRe(fn.read.re) + default: + err = r.readDelim(fn.read.delim) // we *always* fall back to delim parsing + } + + switch err { + default: + return err + case nil: + case io.EOF, io.ErrUnexpectedEOF: + r.done = true + // We guarantee that all noread parses are at + // the front, so if we io.EOF on the first + // non-noread, then we bubble it up. + if len(r.buf) == 0 && (i == 0 || r.fns[i-1].read.noread) { + return io.EOF + } + if i != len(r.fns)-1 || err == io.ErrUnexpectedEOF { + return io.ErrUnexpectedEOF + } + } + + if fn.parse == nil { + continue + } + + if err := fn.parse(r.buf, rec); err != nil { + return err + } + } + return nil +} + +func (r *RecordReader) readCondition(fn func(byte) int8) error { + var ignoreEOF bool + for { + peek, err := r.r.Peek(1) + if err != nil { + if err == io.EOF && ignoreEOF { + err = nil + } + return err + } + ignoreEOF = false + c := peek[0] + switch fn(c) { + case -2: + return fmt.Errorf("invalid input %q", c) + case -1: + return nil + case 0: + r.r.Discard(1) + r.buf = append(r.buf, c) + return nil + case 1: + case 2: + ignoreEOF = true + } + r.r.Discard(1) + r.buf = append(r.buf, c) + } +} + +type reReader struct { + r *RecordReader + peek []byte + err error +} + +func (re *reReader) ReadRune() (r rune, size int, err error) { + re.peek, re.err = re.r.r.Peek(len(re.peek) + 1) + if re.err != nil { + return 0, 0, re.err + } + return rune(re.peek[len(re.peek)-1]), 1, nil +} + +func (r *RecordReader) readRe(re *regexp.Regexp) error { + reader := reReader{r: r} + loc := re.FindReaderIndex(&reader) + if loc == nil { + if reader.err == io.EOF && len(reader.peek) > 0 { + return fmt.Errorf("regexp text mismatch, saw %q", reader.peek) + } + return reader.err + } + n := loc[1] // we ensure the regexp begins with ^, so we only need the end + r.buf = append(r.buf, reader.peek[:n]...) + r.r.Discard(n) + if n == len(reader.peek) { + return reader.err + } + return nil +} + +func (r *RecordReader) readSize(n int) error { + r.buf = append(r.buf, make([]byte, n)...) + n, err := io.ReadFull(r.r, r.buf) + r.buf = r.buf[:n] + return err +} + +func (r *RecordReader) readExact(d []byte) error { + if err := r.readSize(len(d)); err != nil { + return err + } + if !bytes.Equal(d, r.buf) { + return fmt.Errorf("exact text mismatch, read %q when expecting %q", r.buf, d) + } + return nil +} + +func (r *RecordReader) readDelim(d []byte) error { + // Empty delimiters opt in to reading the rest of the text. + if len(d) == 0 { + b, err := io.ReadAll(r.r) + r.buf = b + // ReadAll stops at io.EOF, but we need to bubble that up. + if err == nil { + return io.EOF + } + return err + } + + // We use the simple inefficient search algorithm, which can be O(nm), + // but we aren't expecting huge search spaces. Long term we could + // convert to a two-way search. + for { + peek, err := r.r.Peek(len(d)) + if err != nil { + // If we peek an io.EOF, we were looking for our delim + // and hit the end. This is unexpected. + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + if !bytes.Equal(peek, d) { + // We did not find our delim. Skip the first char + // then continue again. + r.buf = append(r.buf, peek[0]) + r.r.Discard(1) + continue + } + // We found our delim. We discard it and return. + r.r.Discard(len(d)) + return nil + } +} + +type jsonReader struct { + state int8 + n int8 // misc. + nexts []int8 +} + +func (*jsonReader) isHex(c byte) bool { + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', + 'a', 'b', 'c', 'd', 'e', 'f', + 'A', 'B', 'C', 'D', 'E', 'F': + return true + default: + return false + } +} + +func (*jsonReader) isNum(c byte) bool { + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return true + } + return false +} + +func (*jsonReader) isNat(c byte) bool { + switch c { + case '1', '2', '3', '4', '5', '6', '7', '8', '9': + return true + } + return false +} + +func (*jsonReader) isE(c byte) bool { + return c == 'e' || c == 'E' +} + +const ( + jrstAny int8 = iota + jrstObj + jrstObjSep + jrstObjFin + jrstArr + jrstArrFin + jrstStrBegin + jrstStr + jrstStrEsc + jrstStrEscU + jrstTrue + jrstFalse + jrstNull + jrstNeg + jrstOne + jrstDotOrE + jrstDot + jrstE +) + +func (r *jsonReader) read(c byte) (rr int8) { +start: + switch r.state { + case jrstAny: + switch c { + case ' ', '\t', '\n', '\r': + return 1 // skip whitespace, need more + case '{': + r.state = jrstObj + return 1 // object open, need more + case '[': + r.state = jrstArr + return 1 // array open, need more + case '"': + r.state = jrstStr + return 1 // string open, need more + case 't': + r.state = jrstTrue + r.n = 0 + return 1 // beginning of true, need more + case 'f': + r.state = jrstFalse + r.n = 0 + return 1 // beginning of false, need more + case 'n': + r.state = jrstNull + r.n = 0 + return 1 // beginning of null, need more + case '-': + r.state = jrstNeg + return 1 // beginning of negative number, need more + case '0': + r.state = jrstDotOrE + return 1 // beginning of 0e or 0., need more + case '1', '2', '3', '4', '5', '6', '7', '8', '9': + r.state = jrstOne + return 1 // beginning of number, need more + default: + return -2 // invalid json + } + + case jrstObj: + switch c { + case ' ', '\t', '\n', '\r': + return 1 // skip whitespace in json object, need more + case '"': + r.pushState(jrstStr, jrstObjSep) + return 1 // beginning of object key, need to finish, transition to obj sep + case '}': + return r.popState() // end of object, this is valid json end, pop state + default: + return -2 // invalid json: expected object key + } + case jrstObjSep: + switch c { + case ' ', '\t', '\n', '\r': + return 1 // skip whitespace in json object, need more + case ':': + r.pushState(jrstAny, jrstObjFin) + return 1 // beginning of object value, need to finish, transition to obj fin + default: + return -2 // invalid json: expected object separator + } + case jrstObjFin: + switch c { + case ' ', '\r', '\t', '\n': + return 1 // skip whitespace in json object, need more + case ',': + r.pushState(jrstStrBegin, jrstObjSep) + return 1 // beginning of new object key, need to finish, transition to obj sep + case '}': + return r.popState() // end of object, this is valid json end, pop state + default: + return -2 // invalid json + } + + case jrstArr: + switch c { + case ' ', '\r', '\t', '\n': + return 1 // skip whitespace in json array, need more + case ']': + return r.popState() // end of array, this is valid json end, pop state + default: + r.pushState(jrstAny, jrstArrFin) + goto start // array value began: immediately transition to it + } + case jrstArrFin: + switch c { + case ' ', '\r', '\t', '\n': + return 1 // skip whitespace in json array, need more + case ',': + r.state = jrstArr + return 1 // beginning of new array value, need more + case ']': + return r.popState() // end of array, this is valid json end, pop state + default: + return -2 // invalid json + } + + case jrstStrBegin: + switch c { + case ' ', '\r', '\t', '\n': + return 1 // skip whitespace in json object (before beginning of key), need more + case '"': + r.state = jrstStr + return 1 // beginning of object key, need more + default: + return -2 // invalid json + } + + case jrstStr: + switch c { + case 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31: + return -2 // invalid json: control characters not allowed in string + case '"': + return r.popState() // end of string, this is valid json end, pop state + case '\\': + r.state = jrstStrEsc + return 1 // beginning of escape sequence, need more + default: + return 1 // continue string, need more + } + case jrstStrEsc: + switch c { + case 'b', 'f', 'n', 'r', 't', '\\', '/', '"': + r.state = jrstStr + return 1 // end of escape sequence, still need to finish string + case 'u': + r.state = jrstStrEscU + r.n = 0 + return 1 // beginning of unicode escape sequence, need more + default: + return -2 // invalid json: invalid escape sequence + } + case jrstStrEscU: + if !r.isHex(c) { + return -2 // invalid json: invalid unicode escape sequence + } + r.n++ + if r.n == 4 { + r.state = jrstStr + } + return 1 // end of unicode escape sequence, still need to finish string + + case jrstTrue: + switch { + case r.n == 0 && c == 'r': + r.n++ + return 1 + case r.n == 1 && c == 'u': + r.n++ + return 1 + case r.n == 2 && c == 'e': + return r.popState() // end of true, this is valid json end, pop state + } + case jrstFalse: + switch { + case r.n == 0 && c == 'a': + r.n++ + return 1 + case r.n == 1 && c == 'l': + r.n++ + return 1 + case r.n == 2 && c == 's': + r.n++ + return 1 + case r.n == 3 && c == 'e': + return r.popState() // end of false, this is valid json end, pop state + } + case jrstNull: + switch { + case r.n == 0 && c == 'u': + r.n++ + return 1 + case r.n == 1 && c == 'l': + r.n++ + return 1 + case r.n == 2 && c == 'l': + return r.popState() // end of null, this is valid json end, pop state + } + + case jrstNeg: + if c == '0' { + r.state = jrstDotOrE + return r.oneOrTwo() // beginning of -0, need to see if there is more (potentially end) + } else if r.isNat(c) { + r.state = jrstOne + return r.oneOrTwo() // beginning of -1 (or 2,3,..9), need to see if there is more (potentially end) + } + return -2 // invalid, -a or something + case jrstOne: + if r.isNum(c) { + return r.oneOrTwo() // continue the number (potentially end) + } + fallthrough // not a number, check if e or . + case jrstDotOrE: + if r.isE(c) { + r.state = jrstE + return 1 // beginning of exponent, need more + } + if c == '.' { + r.state = jrstDot + r.n = 0 + return 1 // beginning of dot, need more + } + if r.popStateToStart() { + goto start + } + return -1 // done with number, no more state to bubble to: we are done + + case jrstDot: + switch r.n { + case 0: + if !r.isNum(c) { + return -2 // first char after dot must be a number + } + r.n = 1 + return r.oneOrTwo() // saw number, keep and continue (potentially end) + case 1: + if r.isNum(c) { + return r.oneOrTwo() // more number, keep and continue (potentially end) + } + if r.isE(c) { + r.state = jrstE + r.n = 0 + return 1 // beginning of exponent (-0.1e), need more + } + if r.popStateToStart() { + goto start + } + return -1 // done with number, no more state to bubble to: we are done + } + case jrstE: + switch r.n { + case 0: + if c == '+' || c == '-' { + r.n = 1 + return 1 // beginning of exponent sign, need more + } + fallthrough + case 1: + if !r.isNum(c) { + return -2 // first char after exponent must be sign or number + } + r.n = 2 + return r.oneOrTwo() // saw number, keep and continue (potentially end) + case 2: + if r.isNum(c) { + return r.oneOrTwo() // more number, keep and continue (potentially end) + } + if r.popStateToStart() { + goto start + } + return -1 // done with number, no more state to bubble to: we are done + } + } + return -2 // unknown state +} + +func (r *jsonReader) pushState(next, next2 int8) { + r.nexts = append(r.nexts, next2) + r.state = next +} + +func (r *jsonReader) popState() int8 { + if len(r.nexts) == 0 { + r.state = jrstAny + return 0 + } + r.state = r.nexts[len(r.nexts)-1] + r.nexts = r.nexts[:len(r.nexts)-1] + return 1 +} + +func (r *jsonReader) popStateToStart() bool { + if len(r.nexts) == 0 { + r.state = jrstAny + return false + } + r.state = r.nexts[len(r.nexts)-1] + r.nexts = r.nexts[:len(r.nexts)-1] + return true +} + +func (r *jsonReader) oneOrTwo() int8 { + if len(r.nexts) > 0 { + return 1 + } + return 2 +} + +//////////// +// COMMON // +//////////// + +func parseLayoutSlash(layout string) (byte, int, error) { + if len(layout) == 0 { + return 0, 0, errors.New("invalid slash escape at end of delim string") + } + switch layout[0] { + case 't': + return '\t', 1, nil + case 'n': + return '\n', 1, nil + case 'r': + return '\r', 1, nil + case '\\': + return '\\', 1, nil + case 'x': + if len(layout) < 3 { // on x, need two more + return 0, 0, errors.New("invalid non-terminated hex escape sequence at end of delim string") + } + hex := layout[1:3] + n, err := strconv.ParseInt(hex, 16, 8) + if err != nil { + return 0, 0, fmt.Errorf("unable to parse hex escape sequence %q: %v", hex, err) + } + return byte(n), 3, nil + default: + return 0, 0, fmt.Errorf("unknown slash escape sequence %q", layout[:1]) + } +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/ring.go b/vendor/github.com/twmb/franz-go/pkg/kgo/ring.go new file mode 100644 index 00000000000..3ef989f49bf --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/ring.go @@ -0,0 +1,269 @@ +package kgo + +import "sync" + +// The ring types below are fixed sized blocking MPSC ringbuffers. These +// replace channels in a few places in this client. The *main* advantage they +// provide is to allow loops that terminate. +// +// With channels, we always have to have a goroutine draining the channel. We +// cannot start the goroutine when we add the first element, because the +// goroutine will immediately drain the first and if something produces right +// away, it will start a second concurrent draining goroutine. +// +// We cannot fix that by adding a "working" field, because we would need a lock +// around checking if the goroutine still has elements *and* around setting the +// working field to false. If a push was blocked, it would be holding the lock, +// which would block the worker from grabbing the lock. Any other lock ordering +// has TOCTOU problems as well. +// +// We could use a slice that we always push to and pop the front of. This is a +// bit easier to reason about, but constantly reallocates and has no bounded +// capacity. The second we think about adding bounded capacity, we get this +// ringbuffer below. +// +// The key insight is that we only pop the front *after* we are done with it. +// If there are still more elements, the worker goroutine can continue working. +// If there are no more elements, it can quit. When pushing, if the pusher +// pushed the first element, it starts the worker. +// +// Pushes fail if the ring is dead, allowing the pusher to fail any promise. +// If a die happens while a worker is running, all future pops will see the +// ring is dead and can fail promises immediately. If a worker is not running, +// then there are no promises that need to be called. +// +// We use size 8 buffers because eh why not. This gives us a small optimization +// of masking to increment and decrement, rather than modulo arithmetic. + +const ( + mask7 = 0b0000_0111 + eight = mask7 + 1 +) + +type ringReq struct { + mu sync.Mutex + c *sync.Cond + + elems [eight]promisedReq + + head uint8 + tail uint8 + l uint8 + dead bool +} + +func (r *ringReq) die() { + r.mu.Lock() + defer r.mu.Unlock() + + r.dead = true + if r.c != nil { + r.c.Broadcast() + } +} + +func (r *ringReq) push(pr promisedReq) (first, dead bool) { + r.mu.Lock() + defer r.mu.Unlock() + + for r.l == eight && !r.dead { + if r.c == nil { + r.c = sync.NewCond(&r.mu) + } + r.c.Wait() + } + + if r.dead { + return false, true + } + + r.elems[r.tail] = pr + r.tail = (r.tail + 1) & mask7 + r.l++ + + return r.l == 1, false +} + +func (r *ringReq) dropPeek() (next promisedReq, more, dead bool) { + r.mu.Lock() + defer r.mu.Unlock() + + r.elems[r.head] = promisedReq{} + r.head = (r.head + 1) & mask7 + r.l-- + + // If the cond has been initialized, there could potentially be waiters + // and we must always signal. + if r.c != nil { + r.c.Signal() + } + + return r.elems[r.head], r.l > 0, r.dead +} + +// ringResp duplicates the code above, but for promisedResp +type ringResp struct { + mu sync.Mutex + c *sync.Cond + + elems [eight]promisedResp + + head uint8 + tail uint8 + l uint8 + dead bool +} + +func (r *ringResp) die() { + r.mu.Lock() + defer r.mu.Unlock() + + r.dead = true + if r.c != nil { + r.c.Broadcast() + } +} + +func (r *ringResp) push(pr promisedResp) (first, dead bool) { + r.mu.Lock() + defer r.mu.Unlock() + + for r.l == eight && !r.dead { + if r.c == nil { + r.c = sync.NewCond(&r.mu) + } + r.c.Wait() + } + + if r.dead { + return false, true + } + + r.elems[r.tail] = pr + r.tail = (r.tail + 1) & mask7 + r.l++ + + return r.l == 1, false +} + +func (r *ringResp) dropPeek() (next promisedResp, more, dead bool) { + r.mu.Lock() + defer r.mu.Unlock() + + r.elems[r.head] = promisedResp{} + r.head = (r.head + 1) & mask7 + r.l-- + + if r.c != nil { + r.c.Signal() + } + + return r.elems[r.head], r.l > 0, r.dead +} + +// ringSeqResp duplicates the code above, but for *seqResp. We leave off die +// because we do not use it, but we keep `c` for testing lowering eight/mask7. +type ringSeqResp struct { + mu sync.Mutex + c *sync.Cond + + elems [eight]*seqResp + + head uint8 + tail uint8 + l uint8 +} + +func (r *ringSeqResp) push(sr *seqResp) (first bool) { + r.mu.Lock() + defer r.mu.Unlock() + + for r.l == eight { + if r.c == nil { + r.c = sync.NewCond(&r.mu) + } + r.c.Wait() + } + + r.elems[r.tail] = sr + r.tail = (r.tail + 1) & mask7 + r.l++ + + return r.l == 1 +} + +func (r *ringSeqResp) dropPeek() (next *seqResp, more bool) { + r.mu.Lock() + defer r.mu.Unlock() + + r.elems[r.head] = nil + r.head = (r.head + 1) & mask7 + r.l-- + + if r.c != nil { + r.c.Signal() + } + + return r.elems[r.head], r.l > 0 +} + +// Also no die; this type is slightly different because we can have overflow. +// If we have overflow, we add to overflow until overflow is drained -- we +// always want strict odering. +type ringBatchPromise struct { + mu sync.Mutex + + elems [eight]batchPromise + + head uint8 + tail uint8 + l uint8 + + overflow []batchPromise +} + +func (r *ringBatchPromise) push(b batchPromise) (first bool) { + r.mu.Lock() + defer r.mu.Unlock() + + // If the ring is full, we go into overflow; if overflow is non-empty, + // for ordering purposes, we add to the end of overflow. We only go + // back to using the ring once overflow is finally empty. + if r.l == eight || len(r.overflow) > 0 { + r.overflow = append(r.overflow, b) + return false + } + + r.elems[r.tail] = b + r.tail = (r.tail + 1) & mask7 + r.l++ + + return r.l == 1 +} + +func (r *ringBatchPromise) dropPeek() (next batchPromise, more bool) { + r.mu.Lock() + defer r.mu.Unlock() + + // We always drain the ring first. If the ring is ever empty, there + // must be overflow: we would not be here if the ring is not-empty. + if r.l > 1 { + r.elems[r.head] = batchPromise{} + r.head = (r.head + 1) & mask7 + r.l-- + return r.elems[r.head], true + } else if r.l == 1 { + r.elems[r.head] = batchPromise{} + r.head = (r.head + 1) & mask7 + r.l-- + if len(r.overflow) == 0 { + return next, false + } + return r.overflow[0], true + } + r.overflow = r.overflow[1:] + if len(r.overflow) > 0 { + return r.overflow[0], true + } + return next, false +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/sink.go b/vendor/github.com/twmb/franz-go/pkg/kgo/sink.go new file mode 100644 index 00000000000..2b96d916987 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/sink.go @@ -0,0 +1,2418 @@ +package kgo + +import ( + "bytes" + "context" + "errors" + "fmt" + "hash/crc32" + "math" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/twmb/franz-go/pkg/kbin" + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" +) + +type sink struct { + cl *Client // our owning client, for cfg, metadata triggering, context, etc. + nodeID int32 // the node ID of the broker this sink belongs to + + // inflightSem controls the number of concurrent produce requests. We + // start with a limit of 1, which covers Kafka v0.11.0. On the first + // response, we check what version was set in the request. If it is at + // least 4, which 1.0 introduced, we upgrade the sem size. + inflightSem atomic.Value + produceVersion atomicI32 // negative is unset, positive is version + + drainState workLoop + + // seqRespsMu, guarded by seqRespsMu, contains responses that must + // be handled sequentially. These responses are handled asynchronously, + // but sequentially. + seqResps ringSeqResp + + backoffMu sync.Mutex // guards the following + needBackoff bool + backoffSeq uint32 // prevents pile on failures + + // consecutiveFailures is incremented every backoff and cleared every + // successful response. For simplicity, if we have a good response + // following an error response before the error response's backoff + // occurs, the backoff is not cleared. + consecutiveFailures atomicU32 + + recBufsMu sync.Mutex // guards the following + recBufs []*recBuf // contains all partition records for batch building + recBufsStart int // incremented every req to avoid large batch starvation +} + +type seqResp struct { + resp kmsg.Response + err error + done chan struct{} + br *broker + promise func(*broker, kmsg.Response, error) +} + +func (cl *Client) newSink(nodeID int32) *sink { + s := &sink{ + cl: cl, + nodeID: nodeID, + } + s.produceVersion.Store(-1) + maxInflight := 1 + if cl.cfg.disableIdempotency { + maxInflight = cl.cfg.maxProduceInflight + } + s.inflightSem.Store(make(chan struct{}, maxInflight)) + return s +} + +// createReq returns a produceRequest from currently buffered records +// and whether there are more records to create more requests immediately. +func (s *sink) createReq(id int64, epoch int16) (*produceRequest, *kmsg.AddPartitionsToTxnRequest, bool) { + req := &produceRequest{ + txnID: s.cl.cfg.txnID, + acks: s.cl.cfg.acks.val, + timeout: int32(s.cl.cfg.produceTimeout.Milliseconds()), + batches: make(seqRecBatches, 5), + + producerID: id, + producerEpoch: epoch, + + hasHook: s.cl.producer.hasHookBatchWritten, + compressor: s.cl.compressor, + + wireLength: s.cl.baseProduceRequestLength(), // start length with no topics + wireLengthLimit: s.cl.cfg.maxBrokerWriteBytes, + } + txnBuilder := txnReqBuilder{ + txnID: req.txnID, + id: id, + epoch: epoch, + } + + var moreToDrain bool + + s.recBufsMu.Lock() + defer s.recBufsMu.Unlock() + + recBufsIdx := s.recBufsStart + for i := 0; i < len(s.recBufs); i++ { + recBuf := s.recBufs[recBufsIdx] + recBufsIdx = (recBufsIdx + 1) % len(s.recBufs) + + recBuf.mu.Lock() + if recBuf.failing || len(recBuf.batches) == recBuf.batchDrainIdx || recBuf.inflightOnSink != nil && recBuf.inflightOnSink != s || recBuf.inflight != 0 && !recBuf.okOnSink { + recBuf.mu.Unlock() + continue + } + + batch := recBuf.batches[recBuf.batchDrainIdx] + if added := req.tryAddBatch(s.produceVersion.Load(), recBuf, batch); !added { + recBuf.mu.Unlock() + moreToDrain = true + continue + } + + recBuf.inflightOnSink = s + recBuf.inflight++ + + recBuf.batchDrainIdx++ + recBuf.seq = incrementSequence(recBuf.seq, int32(len(batch.records))) + moreToDrain = moreToDrain || recBuf.tryStopLingerForDraining() + recBuf.mu.Unlock() + + txnBuilder.add(recBuf) + } + + // We could have lost our only record buffer just before we grabbed the + // lock above, so we have to check there are recBufs. + if len(s.recBufs) > 0 { + s.recBufsStart = (s.recBufsStart + 1) % len(s.recBufs) + } + return req, txnBuilder.req, moreToDrain +} + +func incrementSequence(sequence, increment int32) int32 { + if sequence > math.MaxInt32-increment { + return increment - (math.MaxInt32 - sequence) - 1 + } + + return sequence + increment +} + +type txnReqBuilder struct { + txnID *string + req *kmsg.AddPartitionsToTxnRequest + id int64 + epoch int16 + addedTopics map[string]int // topic => index into req +} + +func (t *txnReqBuilder) add(rb *recBuf) { + if t.txnID == nil { + return + } + if rb.addedToTxn.Swap(true) { + return + } + if t.req == nil { + req := kmsg.NewPtrAddPartitionsToTxnRequest() + req.TransactionalID = *t.txnID + req.ProducerID = t.id + req.ProducerEpoch = t.epoch + t.req = req + t.addedTopics = make(map[string]int, 10) + } + idx, exists := t.addedTopics[rb.topic] + if !exists { + idx = len(t.req.Topics) + t.addedTopics[rb.topic] = idx + reqTopic := kmsg.NewAddPartitionsToTxnRequestTopic() + reqTopic.Topic = rb.topic + t.req.Topics = append(t.req.Topics, reqTopic) + } + t.req.Topics[idx].Partitions = append(t.req.Topics[idx].Partitions, rb.partition) +} + +func (s *sink) maybeDrain() { + if s.cl.cfg.manualFlushing && s.cl.producer.flushing.Load() == 0 { + return + } + if s.drainState.maybeBegin() { + go s.drain() + } +} + +func (s *sink) maybeBackoff() { + s.backoffMu.Lock() + backoff := s.needBackoff + s.backoffMu.Unlock() + + if !backoff { + return + } + defer s.clearBackoff() + + s.cl.triggerUpdateMetadata(false, "opportunistic load during sink backoff") // as good a time as any + + tries := int(s.consecutiveFailures.Add(1)) + after := time.NewTimer(s.cl.cfg.retryBackoff(tries)) + defer after.Stop() + + select { + case <-after.C: + case <-s.cl.ctx.Done(): + case <-s.anyCtx().Done(): + } +} + +func (s *sink) maybeTriggerBackoff(seq uint32) { + s.backoffMu.Lock() + defer s.backoffMu.Unlock() + if seq == s.backoffSeq { + s.needBackoff = true + } +} + +func (s *sink) clearBackoff() { + s.backoffMu.Lock() + defer s.backoffMu.Unlock() + s.backoffSeq++ + s.needBackoff = false +} + +// drain drains buffered records and issues produce requests. +// +// This function is harmless if there are no records that need draining. +// We rely on that to not worry about accidental triggers of this function. +func (s *sink) drain() { + again := true + for again { + s.maybeBackoff() + + sem := s.inflightSem.Load().(chan struct{}) + select { + case sem <- struct{}{}: + case <-s.cl.ctx.Done(): + s.drainState.hardFinish() + return + } + + again = s.drainState.maybeFinish(s.produce(sem)) + } +} + +// Returns the first context encountered ranging across all records. +// This does not use defers to make it clear at the return that all +// unlocks are called in proper order. Ideally, do not call this func +// due to lock intensity. +func (s *sink) anyCtx() context.Context { + s.recBufsMu.Lock() + for _, recBuf := range s.recBufs { + recBuf.mu.Lock() + if len(recBuf.batches) > 0 { + batch0 := recBuf.batches[0] + batch0.mu.Lock() + if batch0.canFailFromLoadErrs && len(batch0.records) > 0 { + r0 := batch0.records[0] + if rctx := r0.cancelingCtx(); rctx != nil { + batch0.mu.Unlock() + recBuf.mu.Unlock() + s.recBufsMu.Unlock() + return rctx + } + } + batch0.mu.Unlock() + } + recBuf.mu.Unlock() + } + s.recBufsMu.Unlock() + return context.Background() +} + +func (s *sink) produce(sem <-chan struct{}) bool { + var produced bool + defer func() { + if !produced { + <-sem + } + }() + + // We could have been triggered from a metadata update even though the + // user is not producing at all. If we have no buffered records, let's + // avoid potentially creating a producer ID. + if s.cl.BufferedProduceRecords() == 0 { + return false + } + + // producerID can fail from: + // - retry failure + // - auth failure + // - transactional: a produce failure that failed the producer ID + // - AddPartitionsToTxn failure (see just below) + // - some head-of-line context failure + // + // All but the first error is fatal. Recovery may be possible with + // EndTransaction in specific cases, but regardless, all buffered + // records must fail. + // + // NOTE: we init the producer ID before creating a request to ensure we + // are always using the latest id/epoch with the proper sequence + // numbers. (i.e., resetAllSequenceNumbers && producerID logic combo). + // + // For the first-discovered-record-head-of-line context, we want to + // avoid looking it up if possible (which is why producerID takes a + // ctxFn). If we do use one, we want to be sure that the + // context.Canceled error is from *that* context rather than the client + // context or something else. So, we go through some special care to + // track setting the ctx / looking up if it is canceled. + var holCtxMu sync.Mutex + var holCtx context.Context + ctxFn := func() context.Context { + holCtxMu.Lock() + defer holCtxMu.Unlock() + holCtx = s.anyCtx() + return holCtx + } + isHolCtxDone := func() bool { + holCtxMu.Lock() + defer holCtxMu.Unlock() + if holCtx == nil { + return false + } + select { + case <-holCtx.Done(): + return true + default: + } + return false + } + + id, epoch, err := s.cl.producerID(ctxFn) + if err != nil { + var pe *errProducerIDLoadFail + switch { + case errors.As(err, &pe): + if errors.Is(pe.err, context.Canceled) && isHolCtxDone() { + // Some head-of-line record in a partition had a context cancelation. + // We look for any partition with HOL cancelations and fail them all. + s.cl.cfg.logger.Log(LogLevelInfo, "the first record in some partition(s) had a context cancelation; failing all relevant partitions", "broker", logID(s.nodeID)) + s.recBufsMu.Lock() + defer s.recBufsMu.Unlock() + for _, recBuf := range s.recBufs { + recBuf.mu.Lock() + var failAll bool + if len(recBuf.batches) > 0 { + batch0 := recBuf.batches[0] + batch0.mu.Lock() + if batch0.canFailFromLoadErrs && len(batch0.records) > 0 { + r0 := batch0.records[0] + if rctx := r0.cancelingCtx(); rctx != nil { + select { + case <-rctx.Done(): + failAll = true // we must not call failAllRecords here, because failAllRecords locks batches! + default: + } + } + } + batch0.mu.Unlock() + } + if failAll { + recBuf.failAllRecords(err) + } + recBuf.mu.Unlock() + } + return true + } + s.cl.bumpRepeatedLoadErr(err) + s.cl.cfg.logger.Log(LogLevelWarn, "unable to load producer ID, bumping client's buffered record load errors by 1 and retrying") + return true // whatever caused our produce, we did nothing, so keep going + case errors.Is(err, ErrClientClosed): + s.cl.failBufferedRecords(err) + default: + s.cl.cfg.logger.Log(LogLevelError, "fatal InitProducerID error, failing all buffered records", "broker", logID(s.nodeID), "err", err) + s.cl.failBufferedRecords(err) + } + return false + } + + if !s.cl.producer.maybeAddInflight() { // must do before marking recBufs on a txn + return false + } + defer func() { + if !produced { + s.cl.producer.decInflight() + } + }() + + // NOTE: we create the req AFTER getting our producer ID! + // + // If a prior response caused errReloadProducerID, then calling + // producerID() sets needSeqReset, and creating the request resets + // sequence numbers. We need to have that logic occur before we create + // the request, otherwise we will create a request with the old + // sequence numbers using our new producer ID, which will then again + // fail with OOOSN. + req, txnReq, moreToDrain := s.createReq(id, epoch) + if len(req.batches) == 0 { // everything was failing or lingering + return moreToDrain + } + + if txnReq != nil { + // txnReq can fail from: + // - TransactionAbortable + // - retry failure + // - auth failure + // - producer id mapping / epoch errors + // The latter case can potentially recover with the kip logic + // we have defined in EndTransaction. Regardless, on failure + // here, all buffered records must fail. + // We do not need to clear the addedToTxn flag for any recBuf + // it was set on, since producer id recovery resets the flag. + batchesStripped, err := s.doTxnReq(req, txnReq) + if err != nil { + switch { + case errors.Is(err, kerr.TransactionAbortable): + // If we get TransactionAbortable, we continue into producing. + // The produce will fail with the same error, and this is the + // only way to notify the user to abort the txn. + case isRetryableBrokerErr(err) || isDialNonTimeoutErr(err): + s.cl.bumpRepeatedLoadErr(err) + s.cl.cfg.logger.Log(LogLevelWarn, "unable to AddPartitionsToTxn due to retryable broker err, bumping client's buffered record load errors by 1 and retrying", "err", err) + s.cl.triggerUpdateMetadata(false, "attempting to refresh broker list due to failed AddPartitionsToTxn requests") + return moreToDrain || len(req.batches) > 0 // nothing stripped if request-issuing error + default: + // Note that err can be InvalidProducerEpoch, which is + // potentially recoverable in EndTransaction. + // + // We do not fail all buffered records here, + // because that can lead to undesirable behavior + // with produce request vs. end txn (KAFKA-12671) + s.cl.failProducerID(id, epoch, err) + s.cl.cfg.logger.Log(LogLevelError, "fatal AddPartitionsToTxn error, failing all buffered records (it is possible the client can recover after EndTransaction)", "broker", logID(s.nodeID), "err", err) + return false + } + } + + // If we stripped everything, ensure we backoff to force a + // metadata load. If not everything was stripped, we issue our + // request and ensure we will retry a producing until + // everything is stripped (and we eventually back off). + if batchesStripped { + moreToDrain = true + if len(req.batches) == 0 { + s.maybeTriggerBackoff(s.backoffSeq) + } + } + } + + if len(req.batches) == 0 { // txn req could have removed some partitions to retry later (unknown topic, etc.) + return moreToDrain + } + + req.backoffSeq = s.backoffSeq // safe to read outside mu since we are in drain loop + + produced = true + + batches := req.batches.sliced() + s.doSequenced(req, func(br *broker, resp kmsg.Response, err error) { + s.handleReqResp(br, req, resp, err) + s.cl.producer.decInflight() + batches.eachOwnerLocked((*recBatch).decInflight) + <-sem + }) + return moreToDrain +} + +// With handleSeqResps below, this function ensures that all request responses +// are handled in order. We use this guarantee while in handleReqResp below. +func (s *sink) doSequenced( + req kmsg.Request, + promise func(*broker, kmsg.Response, error), +) { + wait := &seqResp{ + done: make(chan struct{}), + promise: promise, + } + + // We can NOT use any record context. If we do, we force the request to + // fail while also force the batch to be unfailable (due to no + // response), + br, err := s.cl.brokerOrErr(s.cl.ctx, s.nodeID, errUnknownBroker) + if err != nil { + wait.err = err + close(wait.done) + } else { + br.do(s.cl.ctx, req, func(resp kmsg.Response, err error) { + wait.resp = resp + wait.err = err + close(wait.done) + }) + wait.br = br + } + + if first := s.seqResps.push(wait); first { + go s.handleSeqResps(wait) + } +} + +// Ensures that all request responses are processed in order. +func (s *sink) handleSeqResps(wait *seqResp) { + var more bool +start: + <-wait.done + wait.promise(wait.br, wait.resp, wait.err) + + wait, more = s.seqResps.dropPeek() + if more { + goto start + } +} + +// Issues an AddPartitionsToTxnRequest before a produce request for all +// partitions that need to be added to a transaction. +func (s *sink) doTxnReq( + req *produceRequest, + txnReq *kmsg.AddPartitionsToTxnRequest, +) (stripped bool, err error) { + // If we return an unretryable error, then we have to reset everything + // to not be in the transaction and begin draining at the start. + // + // These batches must be the first in their recBuf, because we would + // not be trying to add them to a partition if they were not. + defer func() { + if err != nil { + req.batches.eachOwnerLocked(seqRecBatch.removeFromTxn) + } + }() + // We do NOT let record context cancelations fail this request: doing + // so would put the transactional ID in an unknown state. This is + // similar to the warning we give in the txn.go file, but the + // difference there is the user knows explicitly at the function call + // that canceling the context will opt them into invalid state. + err = s.cl.doWithConcurrentTransactions(s.cl.ctx, "AddPartitionsToTxn", func() error { + stripped, err = s.issueTxnReq(req, txnReq) + return err + }) + return stripped, err +} + +// Removing a batch from the transaction means we will not be issuing it +// inflight, and that it was not added to the txn and that we need to reset the +// drain index. +func (b *recBatch) removeFromTxn() { + b.owner.addedToTxn.Store(false) + b.owner.resetBatchDrainIdx() + b.decInflight() +} + +func (s *sink) issueTxnReq( + req *produceRequest, + txnReq *kmsg.AddPartitionsToTxnRequest, +) (stripped bool, fatalErr error) { + resp, err := txnReq.RequestWith(s.cl.ctx, s.cl) + if err != nil { + return false, err + } + + for _, topic := range resp.Topics { + topicBatches, ok := req.batches[topic.Topic] + if !ok { + s.cl.cfg.logger.Log(LogLevelError, "broker replied with topic in AddPartitionsToTxnResponse that was not in request", "topic", topic.Topic) + continue + } + for _, partition := range topic.Partitions { + if err := kerr.ErrorForCode(partition.ErrorCode); err != nil && err != kerr.TransactionAbortable { // see below for txn abortable + // OperationNotAttempted is set for all partitions that are authorized + // if any partition is unauthorized _or_ does not exist. We simply remove + // unattempted partitions and treat them as retryable. + if !kerr.IsRetriable(err) && !errors.Is(err, kerr.OperationNotAttempted) { + fatalErr = err // auth err, etc + continue + } + + batch, ok := topicBatches[partition.Partition] + if !ok { + s.cl.cfg.logger.Log(LogLevelError, "broker replied with partition in AddPartitionsToTxnResponse that was not in request", "topic", topic.Topic, "partition", partition.Partition) + continue + } + + // We are stripping this retryable-err batch from the request, + // so we must reset that it has been added to the txn. + batch.owner.mu.Lock() + batch.removeFromTxn() + batch.owner.mu.Unlock() + + stripped = true + + delete(topicBatches, partition.Partition) + } + if len(topicBatches) == 0 { + delete(req.batches, topic.Topic) + } + } + } + return stripped, fatalErr +} + +// firstRespCheck is effectively a sink.Once. On the first response, if the +// used request version is at least 4, we upgrade our inflight sem. +// +// Starting on version 4, Kafka allowed five inflight requests while +// maintaining idempotency. Before, only one was allowed. +// +// We go through an atomic because drain can be waiting on the sem (with +// capacity one). We store four here, meaning new drain loops will load the +// higher capacity sem without read/write pointer racing a current loop. +// +// This logic does mean that we will never use the full potential 5 in flight +// outside of a small window during the store, but some pages in the Kafka +// confluence basically show that more than two in flight has marginal benefit +// anyway (although that may be due to their Java API). +// +// https://cwiki.apache.org/confluence/display/KAFKA/An+analysis+of+the+impact+of+max.in.flight.requests.per.connection+and+acks+on+Producer+performance +// https://issues.apache.org/jira/browse/KAFKA-5494 +func (s *sink) firstRespCheck(idempotent bool, version int16) { + if s.produceVersion.Load() < 0 { + s.produceVersion.Store(int32(version)) + if idempotent && version >= 4 { + s.inflightSem.Store(make(chan struct{}, 4)) + } + } +} + +// handleReqClientErr is called when the client errors before receiving a +// produce response. +func (s *sink) handleReqClientErr(req *produceRequest, err error) { + switch { + default: + s.cl.cfg.logger.Log(LogLevelWarn, "random error while producing, requeueing unattempted request", "broker", logID(s.nodeID), "err", err) + fallthrough + + case errors.Is(err, errUnknownBroker), + isDialNonTimeoutErr(err), + isRetryableBrokerErr(err): + updateMeta := !isRetryableBrokerErr(err) + if updateMeta { + s.cl.cfg.logger.Log(LogLevelInfo, "produce request failed, triggering metadata update", "broker", logID(s.nodeID), "err", err) + } + s.handleRetryBatches(req.batches, nil, req.backoffSeq, updateMeta, false, "failed produce request triggered metadata update") + + case errors.Is(err, ErrClientClosed): + s.cl.failBufferedRecords(ErrClientClosed) + } +} + +// No acks mean no response. The following block is basically an extremely +// condensed version of the logic in handleReqResp. +func (s *sink) handleReqRespNoack(b *bytes.Buffer, debug bool, req *produceRequest) { + if debug { + fmt.Fprintf(b, "noack ") + } + for topic, partitions := range req.batches { + if debug { + fmt.Fprintf(b, "%s[", topic) + } + for partition, batch := range partitions { + batch.owner.mu.Lock() + if batch.isOwnersFirstBatch() { + if debug { + fmt.Fprintf(b, "%d{0=>%d}, ", partition, len(batch.records)) + } + s.cl.finishBatch(batch.recBatch, req.producerID, req.producerEpoch, partition, 0, nil) + } else if debug { + fmt.Fprintf(b, "%d{skipped}, ", partition) + } + batch.owner.mu.Unlock() + } + if debug { + if bytes.HasSuffix(b.Bytes(), []byte(", ")) { + b.Truncate(b.Len() - 2) + } + b.WriteString("], ") + } + } +} + +func (s *sink) handleReqResp(br *broker, req *produceRequest, resp kmsg.Response, err error) { + if err != nil { + s.handleReqClientErr(req, err) + return + } + s.firstRespCheck(req.idempotent(), req.version) + s.consecutiveFailures.Store(0) + defer req.metrics.hook(&s.cl.cfg, br) // defer to end so that non-written batches are removed + + var b *bytes.Buffer + debug := s.cl.cfg.logger.Level() >= LogLevelDebug + if debug { + b = bytes.NewBuffer(make([]byte, 0, 128)) + defer func() { + update := b.String() + update = strings.TrimSuffix(update, ", ") + s.cl.cfg.logger.Log(LogLevelDebug, "produced", "broker", logID(s.nodeID), "to", update) + }() + } + + if req.acks == 0 { + s.handleReqRespNoack(b, debug, req) + return + } + + var kmove kip951move + var reqRetry seqRecBatches // handled at the end + + kresp := resp.(*kmsg.ProduceResponse) + for i := range kresp.Topics { + rt := &kresp.Topics[i] + topic := rt.Topic + partitions, ok := req.batches[topic] + if !ok { + s.cl.cfg.logger.Log(LogLevelError, "broker erroneously replied with topic in produce request that we did not produce to", "broker", logID(s.nodeID), "topic", topic) + delete(req.metrics, topic) + continue // should not hit this + } + + if debug { + fmt.Fprintf(b, "%s[", topic) + } + + tmetrics := req.metrics[topic] + for j := range rt.Partitions { + rp := &rt.Partitions[j] + partition := rp.Partition + batch, ok := partitions[partition] + if !ok { + s.cl.cfg.logger.Log(LogLevelError, "broker erroneously replied with partition in produce request that we did not produce to", "broker", logID(s.nodeID), "topic", rt.Topic, "partition", partition) + delete(tmetrics, partition) + continue // should not hit this + } + delete(partitions, partition) + + retry, didProduce := s.handleReqRespBatch( + b, + &kmove, + kresp, + topic, + rp, + batch, + req.producerID, + req.producerEpoch, + ) + if retry { + reqRetry.addSeqBatch(topic, partition, batch) + } + if !didProduce { + delete(tmetrics, partition) + } + } + + if debug { + if bytes.HasSuffix(b.Bytes(), []byte(", ")) { + b.Truncate(b.Len() - 2) + } + b.WriteString("], ") + } + + if len(partitions) == 0 { + delete(req.batches, topic) + } + } + + if len(req.batches) > 0 { + s.cl.cfg.logger.Log(LogLevelError, "broker did not reply to all topics / partitions in the produce request! reenqueuing missing partitions", "broker", logID(s.nodeID)) + s.handleRetryBatches(req.batches, nil, 0, true, false, "broker did not reply to all topics in produce request") + } + if len(reqRetry) > 0 { + s.handleRetryBatches(reqRetry, &kmove, 0, true, true, "produce request had retry batches") + } +} + +func (s *sink) handleReqRespBatch( + b *bytes.Buffer, + kmove *kip951move, + resp *kmsg.ProduceResponse, + topic string, + rp *kmsg.ProduceResponseTopicPartition, + batch seqRecBatch, + producerID int64, + producerEpoch int16, +) (retry, didProduce bool) { + batch.owner.mu.Lock() + defer batch.owner.mu.Unlock() + + nrec := len(batch.records) + + debug := b != nil + if debug { + fmt.Fprintf(b, "%d{", rp.Partition) + } + + // We only ever operate on the first batch in a record buf. Batches + // work sequentially; if this is not the first batch then an error + // happened and this later batch is no longer a part of a seq chain. + if !batch.isOwnersFirstBatch() { + if debug { + if err := kerr.ErrorForCode(rp.ErrorCode); err == nil { + if nrec > 0 { + fmt.Fprintf(b, "skipped@%d=>%d}, ", rp.BaseOffset, rp.BaseOffset+int64(nrec)) + } else { + fmt.Fprintf(b, "skipped@%d}, ", rp.BaseOffset) + } + } else { + if nrec > 0 { + fmt.Fprintf(b, "skipped@%d,%d(%s)}, ", rp.BaseOffset, nrec, err) + } else { + fmt.Fprintf(b, "skipped@%d(%s)}, ", rp.BaseOffset, err) + } + } + } + return false, false + } + + // Since we have received a response and we are the first batch, we can + // at this point re-enable failing from load errors. + // + // We do not need a lock since the owner is locked. + batch.canFailFromLoadErrs = true + + // By default, we assume we errored. Non-error updates this back + // to true. + batch.owner.okOnSink = false + + if moving := kmove.maybeAddProducePartition(resp, rp, batch.owner); moving { + if debug { + fmt.Fprintf(b, "move:%d:%d@%d,%d}, ", rp.CurrentLeader.LeaderID, rp.CurrentLeader.LeaderEpoch, rp.BaseOffset, nrec) + } + batch.owner.failing = true + return true, false + } + + err := kerr.ErrorForCode(rp.ErrorCode) + failUnknown := batch.owner.checkUnknownFailLimit(err) + switch { + case kerr.IsRetriable(err) && + !failUnknown && + err != kerr.CorruptMessage && + batch.tries < s.cl.cfg.recordRetries: + + if debug { + fmt.Fprintf(b, "retrying@%d,%d(%s)}, ", rp.BaseOffset, nrec, err) + } + return true, false + + case err == kerr.OutOfOrderSequenceNumber, + err == kerr.UnknownProducerID, + err == kerr.InvalidProducerIDMapping, + err == kerr.InvalidProducerEpoch: + + // OOOSN always means data loss 1.0+ and is ambiguous prior. + // We assume the worst and only continue if requested. + // + // UnknownProducerID was introduced to allow some form of safe + // handling, but KIP-360 demonstrated that resetting sequence + // numbers is fundamentally unsafe, so we treat it like OOOSN. + // + // KAFKA-5793 specifically mentions for OOOSN "when you get it, + // it should always mean data loss". Sometime after KIP-360, + // Kafka changed the client to remove all places + // UnknownProducerID was returned, and then started referring + // to OOOSN as retryable. KIP-890 definitively says OOOSN is + // retryable. However, the Kafka source as of 24-10-10 still + // only retries OOOSN for batches that are NOT the expected + // next batch (i.e., it's next + 1, for when there are multiple + // in flight). With KIP-890, we still just disregard whatever + // supposedly non-retryable / actually-is-retryable error is + // returned if the LogStartOffset is _after_ what we previously + // produced. Specifically, this is step (4) in in wiki link + // within KAFKA-5793. + // + // InvalidMapping is similar to UnknownProducerID, but occurs + // when the txnal coordinator timed out our transaction. + // + // 2.5 + // ===== + // 2.5 introduced some behavior to potentially safely reset + // the sequence numbers by bumping an epoch (see KIP-360). + // + // For the idempotent producer, the solution is to fail all + // buffered records and then let the client user reset things + // with the understanding that they cannot guard against + // potential dups / reordering at that point. Realistically, + // that's no better than a config knob that allows the user + // to continue (our stopOnDataLoss flag), so for the idempotent + // producer, if stopOnDataLoss is false, we just continue. + // + // For the transactional producer, we always fail the producerID. + // EndTransaction will trigger recovery if possible. + // + // 2.7 + // ===== + // InvalidProducerEpoch became retryable in 2.7. Prior, it + // was ambiguous (timeout? fenced?). Now, InvalidProducerEpoch + // is only returned on produce, and then we can recover on other + // txn coordinator requests, which have PRODUCER_FENCED vs + // TRANSACTION_TIMED_OUT. + + if batch.owner.lastAckedOffset >= 0 && rp.LogStartOffset > batch.owner.lastAckedOffset { + s.cl.cfg.logger.Log(LogLevelInfo, "partition prefix truncation to after our last produce caused the broker to forget us; no loss occurred, bumping producer epoch and resetting sequence numbers", + "broker", logID(s.nodeID), + "topic", topic, + "partition", rp.Partition, + "producer_id", producerID, + "producer_epoch", producerEpoch, + "err", err, + ) + s.cl.failProducerID(producerID, producerEpoch, errReloadProducerID) + if debug { + fmt.Fprintf(b, "resetting@%d,%d(%s)}, ", rp.BaseOffset, nrec, err) + } + return true, false + } + + if s.cl.cfg.txnID != nil || s.cl.cfg.stopOnDataLoss { + s.cl.cfg.logger.Log(LogLevelInfo, "batch errored, failing the producer ID", + "broker", logID(s.nodeID), + "topic", topic, + "partition", rp.Partition, + "producer_id", producerID, + "producer_epoch", producerEpoch, + "err", err, + ) + s.cl.failProducerID(producerID, producerEpoch, err) + + s.cl.finishBatch(batch.recBatch, producerID, producerEpoch, rp.Partition, rp.BaseOffset, err) + if debug { + fmt.Fprintf(b, "fatal@%d,%d(%s)}, ", rp.BaseOffset, nrec, err) + } + return false, false + } + if s.cl.cfg.onDataLoss != nil { + s.cl.cfg.onDataLoss(topic, rp.Partition) + } + + // For OOOSN, and UnknownProducerID + // + // The only recovery is to fail the producer ID, which ensures + // that all batches reset sequence numbers and use a new producer + // ID on the next batch. + // + // For InvalidProducerIDMapping && InvalidProducerEpoch, + // + // We should not be here, since this error occurs in the + // context of transactions, which are caught above. + s.cl.cfg.logger.Log(LogLevelInfo, fmt.Sprintf("batch errored with %s, failing the producer ID and resetting all sequence numbers", err.(*kerr.Error).Message), + "broker", logID(s.nodeID), + "topic", topic, + "partition", rp.Partition, + "producer_id", producerID, + "producer_epoch", producerEpoch, + "err", err, + ) + + // After we fail here, any new produce (even new ones + // happening concurrent with this function) will load + // a new epoch-bumped producer ID and all first-batches + // will reset sequence numbers appropriately. + s.cl.failProducerID(producerID, producerEpoch, errReloadProducerID) + if debug { + fmt.Fprintf(b, "resetting@%d,%d(%s)}, ", rp.BaseOffset, nrec, err) + } + return true, false + + case err == kerr.DuplicateSequenceNumber: // ignorable, but we should not get + s.cl.cfg.logger.Log(LogLevelInfo, "received unexpected duplicate sequence number, ignoring and treating batch as successful", + "broker", logID(s.nodeID), + "topic", topic, + "partition", rp.Partition, + ) + err = nil + fallthrough + default: + if err != nil { + s.cl.cfg.logger.Log(LogLevelInfo, "batch in a produce request failed", + "broker", logID(s.nodeID), + "topic", topic, + "partition", rp.Partition, + "err", err, + "err_is_retryable", kerr.IsRetriable(err), + "max_retries_reached", !failUnknown && batch.tries >= s.cl.cfg.recordRetries, + ) + } else { + batch.owner.okOnSink = true + batch.owner.lastAckedOffset = rp.BaseOffset + int64(len(batch.records)) + } + s.cl.finishBatch(batch.recBatch, producerID, producerEpoch, rp.Partition, rp.BaseOffset, err) + didProduce = err == nil + if debug { + if err != nil { + fmt.Fprintf(b, "err@%d,%d(%s)}, ", rp.BaseOffset, nrec, err) + } else { + fmt.Fprintf(b, "%d=>%d}, ", rp.BaseOffset, rp.BaseOffset+int64(nrec)) + } + } + } + return false, didProduce // no retry +} + +// finishBatch removes a batch from its owning record buffer and finishes all +// records in the batch. +// +// This is safe even if the owning recBuf migrated sinks, since we are +// finishing based off the status of an inflight req from the original sink. +func (cl *Client) finishBatch(batch *recBatch, producerID int64, producerEpoch int16, partition int32, baseOffset int64, err error) { + recBuf := batch.owner + + if err != nil { + // We know that Kafka replied this batch is a failure. We can + // fail this batch and all batches in this partition. + // This will keep sequence numbers correct. + recBuf.failAllRecords(err) + return + } + + // We know the batch made it to Kafka successfully without error. + // We remove this batch and finish all records appropriately. + finished := len(batch.records) + recBuf.batch0Seq = incrementSequence(recBuf.batch0Seq, int32(finished)) + recBuf.buffered.Add(-int64(finished)) + recBuf.batches[0] = nil + recBuf.batches = recBuf.batches[1:] + recBuf.batchDrainIdx-- + + batch.mu.Lock() + records, attrs := batch.records, batch.attrs + batch.records = nil + batch.mu.Unlock() + + cl.producer.promiseBatch(batchPromise{ + baseOffset: baseOffset, + pid: producerID, + epoch: producerEpoch, + // A recBuf.attrs is updated when appending to be written. For + // v0 && v1 produce requests, we set bit 8 in the attrs + // corresponding to our own RecordAttr's bit 8 being no + // timestamp type. Thus, we can directly convert the batch + // attrs to our own RecordAttrs. + attrs: RecordAttrs{uint8(attrs)}, + partition: partition, + recs: records, + }) +} + +// handleRetryBatches sets any first-buf-batch to failing and triggers a +// metadata that will eventually clear the failing state and re-drain. +// +// If idempotency is disabled, if a batch is timed out or hit the retry limit, +// we fail it and anything after it. +func (s *sink) handleRetryBatches( + retry seqRecBatches, + kmove *kip951move, + backoffSeq uint32, + updateMeta bool, // if we should maybe update the metadata + canFail bool, // if records can fail if they are at limits + why string, +) { + logger := s.cl.cfg.logger + debug := logger.Level() >= LogLevelDebug + var needsMetaUpdate bool + var shouldBackoff bool + if kmove != nil { + defer kmove.maybeBeginMove(s.cl) + } + var numRetryBatches, numMoveBatches int + retry.eachOwnerLocked(func(batch seqRecBatch) { + numRetryBatches++ + if !batch.isOwnersFirstBatch() { + if debug { + logger.Log(LogLevelDebug, "retry batch is not the first batch in the owner, skipping result", + "topic", batch.owner.topic, + "partition", batch.owner.partition, + ) + } + return + } + + // If the request failed due to a concurrent metadata update + // moving partitions to a different sink (or killing the sink + // this partition was on), we can just reset the drain index + // and trigger draining now the new sink. There is no reason + // to backoff on this sink nor trigger a metadata update. + if batch.owner.sink != s { + if debug { + logger.Log(LogLevelDebug, "transitioned sinks while a request was inflight, retrying immediately on new sink without backoff", + "topic", batch.owner.topic, + "partition", batch.owner.partition, + "old_sink", s.nodeID, + "new_sink", batch.owner.sink.nodeID, + ) + } + batch.owner.resetBatchDrainIdx() + return + } + + if canFail || s.cl.cfg.disableIdempotency { + if err := batch.maybeFailErr(&s.cl.cfg); err != nil { + batch.owner.failAllRecords(err) + return + } + } + + batch.owner.resetBatchDrainIdx() + + // Now that the batch drain index is reset, if this retry is + // caused from a moving batch, return early. We do not need + // to backoff nor do we need to trigger a metadata update. + if kmove.hasRecBuf(batch.owner) { + numMoveBatches++ + return + } + + // If our first batch (seq == 0) fails with unknown topic, we + // retry immediately. Kafka can reply with valid metadata + // immediately after a topic was created, before the leaders + // actually know they are leader. + unknownAndFirstBatch := batch.owner.unknownFailures == 1 && batch.owner.seq == 0 + + if unknownAndFirstBatch { + shouldBackoff = true + return + } + if updateMeta { + batch.owner.failing = true + needsMetaUpdate = true + } + }) + + if debug { + logger.Log(LogLevelDebug, "retry batches processed", + "wanted_metadata_update", updateMeta, + "triggering_metadata_update", needsMetaUpdate, + "should_backoff", shouldBackoff, + ) + } + + // If we do want to metadata update, we only do so if any batch was the + // first batch in its buf / not concurrently failed. + if needsMetaUpdate { + s.cl.triggerUpdateMetadata(true, why) + return + } + + // We could not need a metadata update for two reasons: + // + // * our request died when being issued + // + // * we would update metadata, but what failed was the first batch + // produced and the error was unknown topic / partition. + // + // In either of these cases, we should backoff a little bit to avoid + // spin looping. + // + // If neither of these cases are true, then we entered wanting a + // metadata update, but the batches either were not the first batch, or + // the batches were concurrently failed. + // + // If all partitions are moving, we do not need to backoff nor drain. + if shouldBackoff || (!updateMeta && numRetryBatches != numMoveBatches) { + s.maybeTriggerBackoff(backoffSeq) + s.maybeDrain() + } +} + +// addRecBuf adds a new record buffer to be drained to a sink and clears the +// buffer's failing state. +func (s *sink) addRecBuf(add *recBuf) { + s.recBufsMu.Lock() + add.recBufsIdx = len(s.recBufs) + s.recBufs = append(s.recBufs, add) + s.recBufsMu.Unlock() + + add.clearFailing() +} + +// removeRecBuf removes a record buffer from a sink. +func (s *sink) removeRecBuf(rm *recBuf) { + s.recBufsMu.Lock() + defer s.recBufsMu.Unlock() + + if rm.recBufsIdx != len(s.recBufs)-1 { + s.recBufs[rm.recBufsIdx], s.recBufs[len(s.recBufs)-1] = s.recBufs[len(s.recBufs)-1], nil + s.recBufs[rm.recBufsIdx].recBufsIdx = rm.recBufsIdx + } else { + s.recBufs[rm.recBufsIdx] = nil // do not let this removal hang around + } + + s.recBufs = s.recBufs[:len(s.recBufs)-1] + if s.recBufsStart == len(s.recBufs) { + s.recBufsStart = 0 + } +} + +// recBuf is a buffer of records being produced to a partition and being +// drained by a sink. This is only not drained if the partition has a load +// error and thus does not a have a sink to be drained into. +type recBuf struct { + cl *Client // for cfg, record finishing + + topic string + partition int32 + + // The number of bytes we can buffer in a batch for this particular + // topic/partition. This may be less than the configured + // maxRecordBatchBytes because of produce request overhead. + maxRecordBatchBytes int32 + + // addedToTxn, for transactions only, signifies whether this partition + // has been added to the transaction yet or not. + addedToTxn atomicBool + + // For LoadTopicPartitioner partitioning; atomically tracks the number + // of records buffered in total on this recBuf. + buffered atomicI64 + + mu sync.Mutex // guards r/w access to all fields below + + // sink is who is currently draining us. This can be modified + // concurrently during a metadata update. + // + // The first set to a non-nil sink is done without a mutex. + // + // Since only metadata updates can change the sink, metadata updates + // also read this without a mutex. + sink *sink + // recBufsIdx is our index into our current sink's recBufs field. + // This exists to aid in removing the buffer from the sink. + recBufsIdx int + + // A concurrent metadata update can move a recBuf from one sink to + // another while requests are inflight on the original sink. We do not + // want to allow new requests to start on the new sink until they all + // finish on the old, because with some pathological request order + // finishing, we would allow requests to finish out of order: + // handleSeqResps works per sink, not across sinks. + inflightOnSink *sink + // We only want to allow more than 1 inflight on a sink *if* we are + // currently receiving successful responses. Unimportantly, this allows + // us to save resources if the broker is having a problem or just + // recovered from one. Importantly, we work around an edge case in + // Kafka. Kafka will accept the first produce request for a pid/epoch + // with *any* sequence number. Say we sent two requests inflight. The + // first request Kafka replies to with NOT_LEADER_FOR_PARTITION, the + // second, the broker finished setting up and accepts. The broker now + // has the second request but not the first, we will retry both + // requests and receive OOOSN, and the broker has logs out of order. + // By only allowing more than one inflight if we have seen an ok + // response, we largely eliminate risk of this problem. See #223 for + // more details. + okOnSink bool + // Inflight tracks the number of requests inflight using batches from + // this recBuf. Every time this hits zero, if the batchDrainIdx is not + // at the end, we clear inflightOnSink and trigger the *current* sink + // to drain. + inflight uint8 + + lastAckedOffset int64 // last ProduceResponse's BaseOffset + how many records we produced + + topicPartitionData // updated in metadata migrateProductionTo (same spot sink is updated) + + // seq is used for the seq in each record batch. It is incremented when + // produce requests are made and can be reset on errors to batch0Seq. + // + // If idempotency is disabled, we just use "0" for the first sequence + // when encoding our payload. + // + // This is also used to check the first batch produced (disregarding + // seq resets) -- see handleRetryBatches. + seq int32 + // batch0Seq is the seq of the batch at batchDrainIdx 0. If we reset + // the drain index, we reset seq with this number. If we successfully + // finish batch 0, we bump this. + batch0Seq int32 + // If we need to reset sequence numbers, we set needSeqReset, and then + // when we use the **first** batch, we reset sequences to 0. + needSeqReset bool + + // batches is our list of buffered records. Batches are appended as the + // final batch crosses size thresholds or as drain freezes batches from + // further modification. + // + // Most functions in a sink only operate on a batch if the batch is the + // first batch in a buffer. This is necessary to ensure that all + // records are truly finished without error in order. + batches []*recBatch + // batchDrainIdx is where the next batch will drain from. We only + // remove from the head of batches when a batch is finished. + // This is read while buffering and modified in a few places. + batchDrainIdx int + + // If we fail with UNKNOWN_TOPIC_OR_PARTITION, we bump this and fail + // all records once this exceeds the config's unknown topic fail limit. + // If we ever see a different error (or no error), this is reset. + unknownFailures int64 + + // lingering is a timer that avoids starting maybeDrain until expiry, + // allowing for more records to be buffered in a single batch. + // + // Note that if something else starts a drain, if the first batch of + // this buffer fits into the request, it will be used. + // + // This is on recBuf rather than Sink to avoid some complicated + // interactions of triggering the sink to loop or not. Ideally, with + // the sticky partition hashers, we will only have a few partitions + // lingering and that this is on a RecBuf should not matter. + lingering *time.Timer + + // failing is set when we encounter a temporary partition error during + // producing, such as UnknownTopicOrPartition (signifying the partition + // moved to a different broker). + // + // It is always cleared on metadata update. + failing bool + + // Only possibly set in PurgeTopics, this is used to fail anything that + // was in the process of being buffered. + purged bool +} + +// bufferRecord usually buffers a record, but does not if abortOnNewBatch is +// true and if this function would create a new batch. +// +// This returns whether the promised record was processed or not (buffered or +// immediately errored). +func (recBuf *recBuf) bufferRecord(pr promisedRec, abortOnNewBatch bool) bool { + recBuf.mu.Lock() + defer recBuf.mu.Unlock() + + // We truncate to milliseconds to avoid some accumulated rounding error + // problems (see IBM/sarama#1455) + if pr.Timestamp.IsZero() { + pr.Timestamp = time.Now() + } + pr.Timestamp = pr.Timestamp.Truncate(time.Millisecond) + pr.Partition = recBuf.partition // set now, for the hook below + + if recBuf.purged { + recBuf.cl.producer.promiseRecord(pr, errPurged) + return true + } + + var ( + newBatch = true + onDrainBatch = recBuf.batchDrainIdx == len(recBuf.batches) + produceVersion = recBuf.sink.produceVersion.Load() + ) + + if !onDrainBatch { + batch := recBuf.batches[len(recBuf.batches)-1] + appended, _ := batch.tryBuffer(pr, produceVersion, recBuf.maxRecordBatchBytes, false) + newBatch = !appended + } + + if newBatch { + newBatch := recBuf.newRecordBatch() + appended, aborted := newBatch.tryBuffer(pr, produceVersion, recBuf.maxRecordBatchBytes, abortOnNewBatch) + + switch { + case aborted: // not processed + return false + case appended: // we return true below + default: // processed as failure + recBuf.cl.producer.promiseRecord(pr, kerr.MessageTooLarge) + return true + } + + recBuf.batches = append(recBuf.batches, newBatch) + } + + if recBuf.cl.cfg.linger == 0 { + if onDrainBatch { + recBuf.sink.maybeDrain() + } + } else { + // With linger, if this is a new batch but not the first, we + // stop lingering and begin draining. The drain loop will + // restart our linger once this buffer has one batch left. + if newBatch && !onDrainBatch || + // If this is the first batch, try lingering; if + // we cannot, we are being flushed and must drain. + onDrainBatch && !recBuf.lockedMaybeStartLinger() { + recBuf.lockedStopLinger() + recBuf.sink.maybeDrain() + } + } + + recBuf.buffered.Add(1) + + if recBuf.cl.producer.hooks != nil && len(recBuf.cl.producer.hooks.partitioned) > 0 { + for _, h := range recBuf.cl.producer.hooks.partitioned { + h.OnProduceRecordPartitioned(pr.Record, recBuf.sink.nodeID) + } + } + + return true +} + +// Stops lingering, potentially restarting it, and returns whether there is +// more to drain. +// +// If lingering, if there are more than one batches ready, there is definitely +// more to drain and we should not linger. Otherwise, if we cannot restart +// lingering, then we are flushing and also indicate there is more to drain. +func (recBuf *recBuf) tryStopLingerForDraining() bool { + recBuf.lockedStopLinger() + canLinger := recBuf.cl.cfg.linger == 0 + moreToDrain := !canLinger && len(recBuf.batches) > recBuf.batchDrainIdx || + canLinger && (len(recBuf.batches) > recBuf.batchDrainIdx+1 || + len(recBuf.batches) == recBuf.batchDrainIdx+1 && !recBuf.lockedMaybeStartLinger()) + return moreToDrain +} + +// Begins a linger timer unless the producer is being flushed. +func (recBuf *recBuf) lockedMaybeStartLinger() bool { + if recBuf.cl.producer.flushing.Load() > 0 || recBuf.cl.producer.blocked.Load() > 0 { + return false + } + recBuf.lingering = time.AfterFunc(recBuf.cl.cfg.linger, recBuf.sink.maybeDrain) + return true +} + +func (recBuf *recBuf) lockedStopLinger() { + if recBuf.lingering != nil { + recBuf.lingering.Stop() + recBuf.lingering = nil + } +} + +func (recBuf *recBuf) unlingerAndManuallyDrain() { + recBuf.mu.Lock() + defer recBuf.mu.Unlock() + recBuf.lockedStopLinger() + recBuf.sink.maybeDrain() +} + +// bumpRepeatedLoadErr is provided to bump a buffer's number of consecutive +// load errors during metadata updates. +// +// Partition load errors are generally temporary (leader/listener/replica not +// available), and this try bump is not expected to do much. If for some reason +// a partition errors for a long time and we are not idempotent, this function +// drops all buffered records. +func (recBuf *recBuf) bumpRepeatedLoadErr(err error) { + recBuf.mu.Lock() + defer recBuf.mu.Unlock() + if len(recBuf.batches) == 0 { + return + } + batch0 := recBuf.batches[0] + batch0.tries++ + + // We need to lock the batch as well because there could be a buffered + // request about to be written. Writing requests only grabs the batch + // mu, not the recBuf mu. + batch0.mu.Lock() + var ( + canFail = !recBuf.cl.idempotent() || batch0.canFailFromLoadErrs // we can only fail if we are not idempotent or if we have no outstanding requests + batch0Fail = batch0.maybeFailErr(&recBuf.cl.cfg) != nil // timeout, retries, or aborting + netErr = isRetryableBrokerErr(err) || isDialNonTimeoutErr(err) // we can fail if this is *not* a network error + retryableKerr = kerr.IsRetriable(err) // we fail if this is not a retryable kerr, + isUnknownLimit = recBuf.checkUnknownFailLimit(err) // or if it is, but it is UnknownTopicOrPartition and we are at our limit + + willFail = canFail && (batch0Fail || !netErr && (!retryableKerr || retryableKerr && isUnknownLimit)) + ) + batch0.isFailingFromLoadErr = willFail + batch0.mu.Unlock() + + recBuf.cl.cfg.logger.Log(LogLevelWarn, "produce partition load error, bumping error count on first stored batch", + "broker", logID(recBuf.sink.nodeID), + "topic", recBuf.topic, + "partition", recBuf.partition, + "err", err, + "can_fail", canFail, + "batch0_should_fail", batch0Fail, + "is_network_err", netErr, + "is_retryable_kerr", retryableKerr, + "is_unknown_limit", isUnknownLimit, + "will_fail", willFail, + ) + + if willFail { + recBuf.failAllRecords(err) + } +} + +// Called locked, if err is an unknown error, bumps our limit, otherwise resets +// it. This returns if we have reached or exceeded the limit. +func (recBuf *recBuf) checkUnknownFailLimit(err error) bool { + if errors.Is(err, kerr.UnknownTopicOrPartition) { + recBuf.unknownFailures++ + } else { + recBuf.unknownFailures = 0 + } + return recBuf.cl.cfg.maxUnknownFailures >= 0 && recBuf.unknownFailures > recBuf.cl.cfg.maxUnknownFailures +} + +// failAllRecords fails all buffered records in this recBuf. +// This is used anywhere where we have to fail and remove an entire batch, +// if we just removed the one batch, the seq num chain would be broken. +// +// - from fatal InitProducerID or AddPartitionsToTxn +// - from client closing +// - if not idempotent && hit retry / timeout limit +// - if batch fails fatally when producing +func (recBuf *recBuf) failAllRecords(err error) { + recBuf.lockedStopLinger() + for _, batch := range recBuf.batches { + // We need to guard our clearing of records against a + // concurrent produceRequest's write, which can have this batch + // buffered wile we are failing. + // + // We do not need to worry about concurrent recBuf + // modifications to this batch because the recBuf is already + // locked. + batch.mu.Lock() + records := batch.records + batch.records = nil + batch.mu.Unlock() + + recBuf.cl.producer.promiseBatch(batchPromise{ + recs: records, + err: err, + }) + } + recBuf.resetBatchDrainIdx() + recBuf.buffered.Store(0) + recBuf.batches = nil +} + +// clearFailing clears a buffer's failing state if it is failing. +// +// This is called when a buffer is added to a sink (to clear a failing state +// from migrating buffers between sinks) or when a metadata update sees the +// sink is still on the same source. +func (recBuf *recBuf) clearFailing() { + recBuf.mu.Lock() + defer recBuf.mu.Unlock() + + recBuf.failing = false + if len(recBuf.batches) != recBuf.batchDrainIdx { + recBuf.sink.maybeDrain() + } +} + +func (recBuf *recBuf) resetBatchDrainIdx() { + recBuf.seq = recBuf.batch0Seq + recBuf.batchDrainIdx = 0 +} + +// promisedRec ties a record with the callback that will be called once +// a batch is finally written and receives a response. +type promisedRec struct { + ctx context.Context + promise func(*Record, error) + *Record +} + +func (pr promisedRec) cancelingCtx() context.Context { + if pr.ctx.Done() != nil { + return pr.ctx + } + if pr.Context.Done() != nil { + return pr.Context + } + return nil +} + +// recBatch is the type used for buffering records before they are written. +type recBatch struct { + owner *recBuf // who owns us + + tries int64 // if this was sent before and is thus now immutable + + // We can only fail a batch if we have never issued it, or we have + // issued it and have received a response. If we do not receive a + // response, we cannot know whether we actually wrote bytes that Kafka + // processed or not. So, we set this to false every time we issue a + // request with this batch, and then reset it to true whenever we + // process a response. + canFailFromLoadErrs bool + // If we are going to fail the batch in bumpRepeatedLoadErr, we need to + // set this bool to true. There could be a concurrent request about to + // be written. See more comments below where this is used. + isFailingFromLoadErr bool + + wireLength int32 // tracks total size this batch would currently encode as, including length prefix + v1wireLength int32 // same as wireLength, but for message set v1 + + attrs int16 // updated during apending; read and converted to RecordAttrs on success + firstTimestamp int64 // since unix epoch, in millis + maxTimestampDelta int64 + + mu sync.Mutex // guards appendTo's reading of records against failAllRecords emptying it + records []promisedRec // record w/ length, ts calculated +} + +// Returns an error if the batch should fail. +func (b *recBatch) maybeFailErr(cfg *cfg) error { + if len(b.records) > 0 { + r0 := &b.records[0] + select { + case <-r0.ctx.Done(): + return r0.ctx.Err() + case <-r0.Context.Done(): + return r0.Context.Err() + default: + } + } + switch { + case b.isTimedOut(cfg.recordTimeout): + return ErrRecordTimeout + case b.tries >= cfg.recordRetries: + return ErrRecordRetries + case b.owner.cl.producer.isAborting(): + return ErrAborting + } + return nil +} + +func (b *recBatch) v0wireLength() int32 { return b.v1wireLength - 8 } // no timestamp +func (b *recBatch) batchLength() int32 { return b.wireLength - 4 } // no length prefix +func (b *recBatch) flexibleWireLength() int32 { // uvarint length prefix + batchLength := b.batchLength() + return int32(kbin.UvarintLen(uvar32(batchLength))) + batchLength +} + +// appendRecord saves a new record to a batch. +// +// This is called under the owning recBuf's mu, meaning records cannot be +// concurrently modified by failing. This batch cannot actively be used +// in a request, so we do not need to worry about a concurrent read. +func (b *recBatch) appendRecord(pr promisedRec, nums recordNumbers) { + b.wireLength += nums.wireLength() + b.v1wireLength += messageSet1Length(pr.Record) + if len(b.records) == 0 { + b.firstTimestamp = pr.Timestamp.UnixNano() / 1e6 + } else if nums.tsDelta > b.maxTimestampDelta { + b.maxTimestampDelta = nums.tsDelta + } + b.records = append(b.records, pr) +} + +// newRecordBatch returns a new record batch for a topic and partition. +func (recBuf *recBuf) newRecordBatch() *recBatch { + const recordBatchOverhead = 4 + // array len + 8 + // firstOffset + 4 + // batchLength + 4 + // partitionLeaderEpoch + 1 + // magic + 4 + // crc + 2 + // attributes + 4 + // lastOffsetDelta + 8 + // firstTimestamp + 8 + // maxTimestamp + 8 + // producerID + 2 + // producerEpoch + 4 + // seq + 4 // record array length + return &recBatch{ + owner: recBuf, + records: recBuf.cl.prsPool.get()[:0], + wireLength: recordBatchOverhead, + + canFailFromLoadErrs: true, // until we send this batch, we can fail it + } +} + +type prsPool struct{ p *sync.Pool } + +func newPrsPool() prsPool { + return prsPool{ + p: &sync.Pool{New: func() any { r := make([]promisedRec, 10); return &r }}, + } +} + +func (p prsPool) get() []promisedRec { return (*p.p.Get().(*[]promisedRec))[:0] } +func (p prsPool) put(s []promisedRec) { p.p.Put(&s) } + +// isOwnersFirstBatch returns if the batch in a recBatch is the first batch in +// a records. We only ever want to update batch / buffer logic if the batch is +// the first in the buffer. +func (b *recBatch) isOwnersFirstBatch() bool { + return len(b.owner.batches) > 0 && b.owner.batches[0] == b +} + +// Returns whether the first record in a batch is past the limit. +func (b *recBatch) isTimedOut(limit time.Duration) bool { + if limit == 0 { + return false + } + return time.Since(b.records[0].Timestamp) > limit +} + +// Decrements the inflight count for this batch. +// +// If the inflight count hits zero, this potentially re-triggers a drain on the +// *current* sink. A concurrent metadata update could have moved the recBuf to +// a different sink; that sink will not drain this recBuf until all requests on +// the old sink are finished. +// +// This is always called in the produce request path, not anywhere else (i.e. +// not failAllRecords). We want inflight decrementing to be the last thing that +// happens always for every request. It does not matter if the records were +// independently failed: from the request issuing perspective, the batch is +// still inflight. +func (b *recBatch) decInflight() { + recBuf := b.owner + recBuf.inflight-- + if recBuf.inflight != 0 { + return + } + recBuf.inflightOnSink = nil + if recBuf.batchDrainIdx != len(recBuf.batches) { + recBuf.sink.maybeDrain() + } +} + +//////////////////// +// produceRequest // +//////////////////// + +// produceRequest is a kmsg.Request that is used when we want to +// flush our buffered records. +// +// It is the same as kmsg.ProduceRequest, but with a custom AppendTo. +type produceRequest struct { + version int16 + + backoffSeq uint32 + + txnID *string + acks int16 + timeout int32 + batches seqRecBatches + + producerID int64 + producerEpoch int16 + + // Initialized in AppendTo, metrics tracks uncompressed & compressed + // sizes (in byteS) of each batch. + // + // We use this in handleReqResp for the OnProduceHook. + metrics produceMetrics + hasHook bool + + compressor *compressor + + // wireLength is initially the size of sending a produce request, + // including the request header, with no topics. We start with the + // non-flexible size because it is strictly larger than flexible, but + // we use the proper flexible numbers when calculating. + wireLength int32 + wireLengthLimit int32 +} + +type produceMetrics map[string]map[int32]ProduceBatchMetrics + +func (p produceMetrics) hook(cfg *cfg, br *broker) { + if len(p) == 0 { + return + } + var hooks []HookProduceBatchWritten + cfg.hooks.each(func(h Hook) { + if h, ok := h.(HookProduceBatchWritten); ok { + hooks = append(hooks, h) + } + }) + if len(hooks) == 0 { + return + } + go func() { + for _, h := range hooks { + for topic, partitions := range p { + for partition, metrics := range partitions { + h.OnProduceBatchWritten(br.meta, topic, partition, metrics) + } + } + } + }() +} + +func (p *produceRequest) idempotent() bool { return p.producerID >= 0 } + +func (p *produceRequest) tryAddBatch(produceVersion int32, recBuf *recBuf, batch *recBatch) bool { + batchWireLength, flexible := batch.wireLengthForProduceVersion(produceVersion) + batchWireLength += 4 // int32 partition prefix + + if partitions, exists := p.batches[recBuf.topic]; !exists { + lt := int32(len(recBuf.topic)) + if flexible { + batchWireLength += uvarlen(len(recBuf.topic)) + lt + 1 // compact string len, topic, compact array len for 1 item + } else { + batchWireLength += 2 + lt + 4 // string len, topic, partition array len + } + } else if flexible { + // If the topic exists and we are flexible, adding this + // partition may increase the length of our size prefix. + lastPartitionsLen := uvarlen(len(partitions)) + newPartitionsLen := uvarlen(len(partitions) + 1) + batchWireLength += (newPartitionsLen - lastPartitionsLen) + } + // If we are flexible but do not know it yet, adding partitions may + // increase our length prefix. Since we are pessimistically assuming + // non-flexible, we have 200mil partitions to add before we have to + // worry about hitting 5 bytes vs. the non-flexible 4. We do not worry. + + if p.wireLength+batchWireLength > p.wireLengthLimit { + return false + } + + if recBuf.batches[0] == batch { + if !p.idempotent() || batch.canFailFromLoadErrs { + if err := batch.maybeFailErr(&batch.owner.cl.cfg); err != nil { + recBuf.failAllRecords(err) + return false + } + } + if recBuf.needSeqReset { + recBuf.needSeqReset = false + recBuf.seq = 0 + recBuf.batch0Seq = 0 + } + } + + batch.tries++ + p.wireLength += batchWireLength + p.batches.addBatch( + recBuf.topic, + recBuf.partition, + recBuf.seq, + batch, + ) + return true +} + +// seqRecBatch: a recBatch with a sequence number. +type seqRecBatch struct { + seq int32 + *recBatch +} + +type seqRecBatches map[string]map[int32]seqRecBatch + +func (rbs *seqRecBatches) addBatch(topic string, part, seq int32, batch *recBatch) { + if *rbs == nil { + *rbs = make(seqRecBatches, 5) + } + topicBatches, exists := (*rbs)[topic] + if !exists { + topicBatches = make(map[int32]seqRecBatch, 1) + (*rbs)[topic] = topicBatches + } + topicBatches[part] = seqRecBatch{seq, batch} +} + +func (rbs *seqRecBatches) addSeqBatch(topic string, part int32, batch seqRecBatch) { + if *rbs == nil { + *rbs = make(seqRecBatches, 5) + } + topicBatches, exists := (*rbs)[topic] + if !exists { + topicBatches = make(map[int32]seqRecBatch, 1) + (*rbs)[topic] = topicBatches + } + topicBatches[part] = batch +} + +func (rbs seqRecBatches) each(fn func(seqRecBatch)) { + for _, partitions := range rbs { + for _, batch := range partitions { + fn(batch) + } + } +} + +func (rbs seqRecBatches) eachOwnerLocked(fn func(seqRecBatch)) { + rbs.each(func(batch seqRecBatch) { + batch.owner.mu.Lock() + defer batch.owner.mu.Unlock() + fn(batch) + }) +} + +func (rbs seqRecBatches) sliced() recBatches { + var batches []*recBatch + for _, partitions := range rbs { + for _, batch := range partitions { + batches = append(batches, batch.recBatch) + } + } + return batches +} + +type recBatches []*recBatch + +func (bs recBatches) eachOwnerLocked(fn func(*recBatch)) { + for _, b := range bs { + b.owner.mu.Lock() + fn(b) + b.owner.mu.Unlock() + } +} + +////////////// +// COUNTING // - this section is all about counting how bytes lay out on the wire +////////////// + +// Returns the non-flexible base produce request length (the request header and +// the request itself with no topics). +// +// See the large comment on maxRecordBatchBytesForTopic for why we always use +// non-flexible (in short: it is strictly larger). +func (cl *Client) baseProduceRequestLength() int32 { + const messageRequestOverhead int32 = 4 + // int32 length prefix + 2 + // int16 key + 2 + // int16 version + 4 + // int32 correlation ID + 2 // int16 client ID len (always non flexible) + // empty tag section skipped; see below + + const produceRequestBaseOverhead int32 = 2 + // int16 transactional ID len (flexible or not, since we cap at 16382) + 2 + // int16 acks + 4 + // int32 timeout + 4 // int32 topics non-flexible array length + // empty tag section skipped; see below + + baseLength := messageRequestOverhead + produceRequestBaseOverhead + if cl.cfg.id != nil { + baseLength += int32(len(*cl.cfg.id)) + } + if cl.cfg.txnID != nil { + baseLength += int32(len(*cl.cfg.txnID)) + } + return baseLength +} + +// Returns the maximum size a record batch can be for this given topic, such +// that if just a **single partition** is fully stuffed with records and we +// only encode that one partition, we will not overflow our configured limits. +// +// The maximum topic length is 249, which has a 2 byte prefix for flexible or +// non-flexible. +// +// Non-flexible versions will have a 4 byte length topic array prefix, a 4 byte +// length partition array prefix. and a 4 byte records array length prefix. +// +// Flexible versions would have a 1 byte length topic array prefix, a 1 byte +// length partition array prefix, up to 5 bytes for the records array length +// prefix, and three empty tag sections resulting in 3 bytes (produce request +// struct, topic struct, partition struct). As well, for the request header +// itself, we have an additional 1 byte tag section (that we currently keep +// empty). +// +// Thus in the worst case, we have 14 bytes of prefixes for non-flexible vs. +// 11 bytes for flexible. We default to the more limiting size: non-flexible. +func (cl *Client) maxRecordBatchBytesForTopic(topic string) int32 { + minOnePartitionBatchLength := cl.baseProduceRequestLength() + + 2 + // int16 topic string length prefix length + int32(len(topic)) + + 4 + // int32 partitions array length + 4 + // partition int32 encoding length + 4 // int32 record bytes array length + + wireLengthLimit := cl.cfg.maxBrokerWriteBytes + + recordBatchLimit := wireLengthLimit - minOnePartitionBatchLength + if cfgLimit := cl.cfg.maxRecordBatchBytes; cfgLimit < recordBatchLimit { + recordBatchLimit = cfgLimit + } + return recordBatchLimit +} + +func messageSet0Length(r *Record) int32 { + const length = 4 + // array len + 8 + // offset + 4 + // size + 4 + // crc + 1 + // magic + 1 + // attributes + 4 + // key array bytes len + 4 // value array bytes len + return length + int32(len(r.Key)) + int32(len(r.Value)) +} + +func messageSet1Length(r *Record) int32 { + return messageSet0Length(r) + 8 // timestamp +} + +// Returns the numbers for a record if it were added to the record batch. +func (b *recBatch) calculateRecordNumbers(r *Record) recordNumbers { + tsMillis := r.Timestamp.UnixNano() / 1e6 + tsDelta := tsMillis - b.firstTimestamp + + // If this is to be the first record in the batch, then our timestamp + // delta is actually 0. + if len(b.records) == 0 { + tsDelta = 0 + } + + offsetDelta := int32(len(b.records)) // since called before adding record, delta is the current end + + l := 1 + // attributes, int8 unused + kbin.VarlongLen(tsDelta) + + kbin.VarintLen(offsetDelta) + + kbin.VarintLen(int32(len(r.Key))) + + len(r.Key) + + kbin.VarintLen(int32(len(r.Value))) + + len(r.Value) + + kbin.VarintLen(int32(len(r.Headers))) // varint array len headers + + for _, h := range r.Headers { + l += kbin.VarintLen(int32(len(h.Key))) + + len(h.Key) + + kbin.VarintLen(int32(len(h.Value))) + + len(h.Value) + } + + return recordNumbers{ + lengthField: int32(l), + tsDelta: tsDelta, + } +} + +func uvar32(l int32) uint32 { return 1 + uint32(l) } +func uvarlen(l int) int32 { return int32(kbin.UvarintLen(uvar32(int32(l)))) } + +// recordNumbers tracks a few numbers for a record that is buffered. +type recordNumbers struct { + lengthField int32 // the length field prefix of a record encoded on the wire + tsDelta int64 // the ms delta of when the record was added against the first timestamp +} + +// wireLength is the wire length of a record including its length field prefix. +func (n recordNumbers) wireLength() int32 { + return int32(kbin.VarintLen(n.lengthField)) + n.lengthField +} + +func (b *recBatch) wireLengthForProduceVersion(v int32) (batchWireLength int32, flexible bool) { + batchWireLength = b.wireLength + + // If we do not yet know the produce version, we default to the largest + // size. Our request building sizes will always be an overestimate. + if v < 0 { + v1BatchWireLength := b.v1wireLength + if v1BatchWireLength > batchWireLength { + batchWireLength = v1BatchWireLength + } + flexibleBatchWireLength := b.flexibleWireLength() + if flexibleBatchWireLength > batchWireLength { + batchWireLength = flexibleBatchWireLength + } + } else { + switch v { + case 0, 1: + batchWireLength = b.v0wireLength() + case 2: + batchWireLength = b.v1wireLength + case 3, 4, 5, 6, 7, 8: + batchWireLength = b.wireLength + default: + batchWireLength = b.flexibleWireLength() + flexible = true + } + } + + return +} + +func (b *recBatch) tryBuffer(pr promisedRec, produceVersion, maxBatchBytes int32, abortOnNewBatch bool) (appended, aborted bool) { + nums := b.calculateRecordNumbers(pr.Record) + + batchWireLength, _ := b.wireLengthForProduceVersion(produceVersion) + newBatchLength := batchWireLength + nums.wireLength() + + if b.tries != 0 || newBatchLength > maxBatchBytes { + return false, false + } + if abortOnNewBatch { + return false, true + } + b.appendRecord(pr, nums) + pr.setLengthAndTimestampDelta( + nums.lengthField, + nums.tsDelta, + ) + return true, false +} + +////////////// +// ENCODING // - this section is all about actually writing a produce request +////////////// + +func (*produceRequest) Key() int16 { return 0 } +func (*produceRequest) MaxVersion() int16 { return 11 } +func (p *produceRequest) SetVersion(v int16) { p.version = v } +func (p *produceRequest) GetVersion() int16 { return p.version } +func (p *produceRequest) IsFlexible() bool { return p.version >= 9 } +func (p *produceRequest) AppendTo(dst []byte) []byte { + flexible := p.IsFlexible() + + if p.hasHook { + p.metrics = make(map[string]map[int32]ProduceBatchMetrics) + } + + if p.version >= 3 { + if flexible { + dst = kbin.AppendCompactNullableString(dst, p.txnID) + } else { + dst = kbin.AppendNullableString(dst, p.txnID) + } + } + + dst = kbin.AppendInt16(dst, p.acks) + dst = kbin.AppendInt32(dst, p.timeout) + if flexible { + dst = kbin.AppendCompactArrayLen(dst, len(p.batches)) + } else { + dst = kbin.AppendArrayLen(dst, len(p.batches)) + } + + for topic, partitions := range p.batches { + if flexible { + dst = kbin.AppendCompactString(dst, topic) + dst = kbin.AppendCompactArrayLen(dst, len(partitions)) + } else { + dst = kbin.AppendString(dst, topic) + dst = kbin.AppendArrayLen(dst, len(partitions)) + } + + var tmetrics map[int32]ProduceBatchMetrics + if p.hasHook { + tmetrics = make(map[int32]ProduceBatchMetrics) + p.metrics[topic] = tmetrics + } + + for partition, batch := range partitions { + dst = kbin.AppendInt32(dst, partition) + batch.mu.Lock() + if batch.records == nil || batch.isFailingFromLoadErr { // concurrent failAllRecords OR concurrent bumpRepeatedLoadErr + if flexible { + dst = kbin.AppendCompactNullableBytes(dst, nil) + } else { + dst = kbin.AppendNullableBytes(dst, nil) + } + batch.mu.Unlock() + continue + } + batch.canFailFromLoadErrs = false // we are going to write this batch: the response status is now unknown + var pmetrics ProduceBatchMetrics + if p.version < 3 { + dst, pmetrics = batch.appendToAsMessageSet(dst, uint8(p.version), p.compressor) + } else { + dst, pmetrics = batch.appendTo(dst, p.version, p.producerID, p.producerEpoch, p.txnID != nil, p.compressor) + } + batch.mu.Unlock() + if p.hasHook { + tmetrics[partition] = pmetrics + } + if flexible { + dst = append(dst, 0) + } + } + if flexible { + dst = append(dst, 0) + } + } + if flexible { + dst = append(dst, 0) + } + + return dst +} + +func (*produceRequest) ReadFrom([]byte) error { + panic("unreachable -- the client never uses ReadFrom on its internal produceRequest") +} + +func (p *produceRequest) ResponseKind() kmsg.Response { + r := kmsg.NewPtrProduceResponse() + r.Version = p.version + return r +} + +func (b seqRecBatch) appendTo( + in []byte, + version int16, + producerID int64, + producerEpoch int16, + transactional bool, + compressor *compressor, +) (dst []byte, m ProduceBatchMetrics) { // named return so that our defer for flexible versions can modify it + flexible := version >= 9 + dst = in + nullableBytesLen := b.wireLength - 4 // NULLABLE_BYTES leading length, minus itself + nullableBytesLenAt := len(dst) // in case compression adjusting + dst = kbin.AppendInt32(dst, nullableBytesLen) + + // With flexible versions, the array length prefix can be anywhere from + // 1 byte long to 5 bytes long (covering up to 268MB). + // + // We have to add our initial understanding of the array length as a + // uvarint, but if compressing shrinks what that length would encode + // as, we have to shift everything down. + if flexible { + dst = dst[:nullableBytesLenAt] + batchLength := b.batchLength() + dst = kbin.AppendUvarint(dst, uvar32(batchLength)) // compact array non-null prefix + batchAt := len(dst) + defer func() { + batch := dst[batchAt:] + if int32(len(batch)) == batchLength { // we did not compress: simply return + return + } + + // We *only* could have shrunk the batch bytes, so our + // append here will not overwrite anything we need to + // keep. + newDst := kbin.AppendUvarint(dst[:nullableBytesLenAt], uvar32(int32(len(batch)))) + + // If our append did not shorten the length prefix, we + // can just return the prior dst, otherwise we have to + // shift the batch itself down on newDst. + if len(newDst) != batchAt { + dst = append(newDst, batch...) + } + }() + } + + // Below here, we append the actual record batch, which cannot be + // flexible. Everything encodes properly; flexible adjusting is done in + // the defer just above. + + dst = kbin.AppendInt64(dst, 0) // firstOffset, defined as zero for producing + + batchLen := nullableBytesLen - 8 - 4 // length of what follows this field (so, minus what came before and ourself) + batchLenAt := len(dst) // in case compression adjusting + dst = kbin.AppendInt32(dst, batchLen) + + dst = kbin.AppendInt32(dst, -1) // partitionLeaderEpoch, unused in clients + dst = kbin.AppendInt8(dst, 2) // magic, defined as 2 for records v0.11.0+ + + crcStart := len(dst) // fill at end + dst = kbin.AppendInt32(dst, 0) // reserved crc + + attrsAt := len(dst) // in case compression adjusting + b.attrs = 0 + if transactional { + b.attrs |= 0x0010 // bit 5 is the "is transactional" bit + } + dst = kbin.AppendInt16(dst, b.attrs) + dst = kbin.AppendInt32(dst, int32(len(b.records)-1)) // lastOffsetDelta + dst = kbin.AppendInt64(dst, b.firstTimestamp) + dst = kbin.AppendInt64(dst, b.firstTimestamp+b.maxTimestampDelta) + + seq := b.seq + if producerID < 0 { // a negative producer ID means we are not using idempotence + seq = 0 + } + dst = kbin.AppendInt64(dst, producerID) + dst = kbin.AppendInt16(dst, producerEpoch) + dst = kbin.AppendInt32(dst, seq) + + dst = kbin.AppendArrayLen(dst, len(b.records)) + recordsAt := len(dst) + for i, pr := range b.records { + dst = pr.appendTo(dst, int32(i)) + } + + toCompress := dst[recordsAt:] + m.NumRecords = len(b.records) + m.UncompressedBytes = len(toCompress) + m.CompressedBytes = m.UncompressedBytes + + if compressor != nil { + w := byteBuffers.Get().(*bytes.Buffer) + defer byteBuffers.Put(w) + w.Reset() + + compressed, codec := compressor.compress(w, toCompress, version) + if compressed != nil && // nil would be from an error + len(compressed) < len(toCompress) { + // our compressed was shorter: copy over + copy(dst[recordsAt:], compressed) + dst = dst[:recordsAt+len(compressed)] + m.CompressedBytes = len(compressed) + m.CompressionType = uint8(codec) + + // update the few record batch fields we already wrote + savings := int32(len(toCompress) - len(compressed)) + nullableBytesLen -= savings + batchLen -= savings + b.attrs |= int16(codec) + if !flexible { + kbin.AppendInt32(dst[:nullableBytesLenAt], nullableBytesLen) + } + kbin.AppendInt32(dst[:batchLenAt], batchLen) + kbin.AppendInt16(dst[:attrsAt], b.attrs) + } + } + + kbin.AppendInt32(dst[:crcStart], int32(crc32.Checksum(dst[crcStart+4:], crc32c))) + + return dst, m +} + +func (pr promisedRec) appendTo(dst []byte, offsetDelta int32) []byte { + length, tsDelta := pr.lengthAndTimestampDelta() + dst = kbin.AppendVarint(dst, length) + dst = kbin.AppendInt8(dst, 0) // attributes, currently unused + dst = kbin.AppendVarlong(dst, tsDelta) + dst = kbin.AppendVarint(dst, offsetDelta) + dst = kbin.AppendVarintBytes(dst, pr.Key) + dst = kbin.AppendVarintBytes(dst, pr.Value) + dst = kbin.AppendVarint(dst, int32(len(pr.Headers))) + for _, h := range pr.Headers { + dst = kbin.AppendVarintString(dst, h.Key) + dst = kbin.AppendVarintBytes(dst, h.Value) + } + return dst +} + +func (b seqRecBatch) appendToAsMessageSet(dst []byte, version uint8, compressor *compressor) ([]byte, ProduceBatchMetrics) { + var m ProduceBatchMetrics + + nullableBytesLenAt := len(dst) + dst = append(dst, 0, 0, 0, 0) // nullable bytes len + for i, pr := range b.records { + _, tsDelta := pr.lengthAndTimestampDelta() + dst = appendMessageTo( + dst, + version, + 0, + int64(i), + b.firstTimestamp+tsDelta, + pr.Record, + ) + } + + b.attrs = 0 + + // Produce request v0 and v1 uses message set v0, which does not have + // timestamps. We set bit 8 in our attrs which corresponds with our own + // kgo.RecordAttrs's bit. The attrs field is unused in a sink / recBuf + // outside of the appending functions or finishing records; if we use + // more bits in our internal RecordAttrs, the below will need to + // change. + if version == 0 || version == 1 { + b.attrs |= 0b1000_0000 + } + + toCompress := dst[nullableBytesLenAt+4:] // skip nullable bytes leading prefix + m.NumRecords = len(b.records) + m.UncompressedBytes = len(toCompress) + m.CompressedBytes = m.UncompressedBytes + + if compressor != nil { + w := byteBuffers.Get().(*bytes.Buffer) + defer byteBuffers.Put(w) + w.Reset() + + compressed, codec := compressor.compress(w, toCompress, int16(version)) + inner := &Record{Value: compressed} + wrappedLength := messageSet0Length(inner) + if version == 2 { + wrappedLength += 8 // timestamp + } + + if compressed != nil && int(wrappedLength) < len(toCompress) { + m.CompressedBytes = int(wrappedLength) + m.CompressionType = uint8(codec) + + b.attrs |= int16(codec) + + dst = appendMessageTo( + dst[:nullableBytesLenAt+4], + version, + int8(codec), + int64(len(b.records)-1), + b.firstTimestamp, + inner, + ) + } + } + + kbin.AppendInt32(dst[:nullableBytesLenAt], int32(len(dst[nullableBytesLenAt+4:]))) + return dst, m +} + +func appendMessageTo( + dst []byte, + version uint8, + attributes int8, + offset int64, + timestamp int64, + r *Record, +) []byte { + magic := version >> 1 + dst = kbin.AppendInt64(dst, offset) + msgSizeStart := len(dst) + dst = append(dst, 0, 0, 0, 0) + crc32Start := len(dst) + dst = append(dst, + 0, 0, 0, 0, + magic, + byte(attributes)) + if magic == 1 { + dst = kbin.AppendInt64(dst, timestamp) + } + dst = kbin.AppendNullableBytes(dst, r.Key) + dst = kbin.AppendNullableBytes(dst, r.Value) + kbin.AppendInt32(dst[:crc32Start], int32(crc32.ChecksumIEEE(dst[crc32Start+4:]))) + kbin.AppendInt32(dst[:msgSizeStart], int32(len(dst[msgSizeStart+4:]))) + return dst +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/source.go b/vendor/github.com/twmb/franz-go/pkg/kgo/source.go new file mode 100644 index 00000000000..a5aa849cfec --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/source.go @@ -0,0 +1,2394 @@ +package kgo + +import ( + "context" + "encoding/binary" + "fmt" + "hash/crc32" + "slices" + "sort" + "strings" + "sync" + "time" + + "github.com/twmb/franz-go/pkg/kbin" + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" +) + +type readerFrom interface { + ReadFrom([]byte) error +} + +// A source consumes from an individual broker. +// +// As long as there is at least one active cursor, a source aims to have *one* +// buffered fetch at all times. As soon as the fetch is taken, a source issues +// another fetch in the background. +type source struct { + cl *Client // our owning client, for cfg, metadata triggering, context, etc. + nodeID int32 // the node ID of the broker this sink belongs to + + // Tracks how many _failed_ fetch requests we have in a row (unable to + // receive a response). Any response, even responses with an ErrorCode + // set, are successful. This field is used for backoff purposes. + consecutiveFailures int + + fetchState workLoop + sem chan struct{} // closed when fetchable, recreated when a buffered fetch exists + buffered bufferedFetch // contains a fetch the source has buffered for polling + + session fetchSession // supports fetch sessions as per KIP-227 + + cursorsMu sync.Mutex + cursors []*cursor // contains all partitions being consumed on this source + cursorsStart int // incremented every fetch req to ensure all partitions are fetched +} + +func (cl *Client) newSource(nodeID int32) *source { + s := &source{ + cl: cl, + nodeID: nodeID, + sem: make(chan struct{}), + } + if cl.cfg.disableFetchSessions { + s.session.kill() + } + close(s.sem) + return s +} + +func (s *source) addCursor(add *cursor) { + s.cursorsMu.Lock() + add.cursorsIdx = len(s.cursors) + s.cursors = append(s.cursors, add) + s.cursorsMu.Unlock() + + // Adding a new cursor may allow a new partition to be fetched. + // We do not need to cancel any current fetch nor kill the session, + // since adding a cursor is non-destructive to work in progress. + // If the session is currently stopped, this is a no-op. + s.maybeConsume() +} + +// Removes a cursor from the source. +// +// The caller should do this with a stopped session if necessary, which +// should clear any buffered fetch and reset the source's session. +func (s *source) removeCursor(rm *cursor) { + s.cursorsMu.Lock() + defer s.cursorsMu.Unlock() + + if rm.cursorsIdx != len(s.cursors)-1 { + s.cursors[rm.cursorsIdx], s.cursors[len(s.cursors)-1] = s.cursors[len(s.cursors)-1], nil + s.cursors[rm.cursorsIdx].cursorsIdx = rm.cursorsIdx + } else { + s.cursors[rm.cursorsIdx] = nil // do not let the memory hang around + } + + s.cursors = s.cursors[:len(s.cursors)-1] + if s.cursorsStart == len(s.cursors) { + s.cursorsStart = 0 + } +} + +// cursor is where we are consuming from for an individual partition. +type cursor struct { + topic string + topicID [16]byte + partition int32 + + unknownIDFails atomicI32 + + keepControl bool // whether to keep control records + + cursorsIdx int // updated under source mutex + + // The source we are currently on. This is modified in two scenarios: + // + // * by metadata when the consumer session is completely stopped + // + // * by a fetch when handling a fetch response that returned preferred + // replicas + // + // This is additionally read within a session when cursor is + // transitioning from used to usable. + source *source + + // useState is an atomic that has two states: unusable and usable. A + // cursor can be used in a fetch request if it is in the usable state. + // Once used, the cursor is unusable, and will be set back to usable + // one the request lifecycle is complete (a usable fetch response, or + // once listing offsets or loading epochs completes). + // + // A cursor can be set back to unusable when sources are stopped. This + // can be done if a group loses a partition, for example. + // + // The used state is exclusively updated by either building a fetch + // request or when the source is stopped. + useState atomicBool + + topicPartitionData // updated in metadata when session is stopped + + // cursorOffset is our epoch/offset that we are consuming. When a fetch + // request is issued, we "freeze" a view of the offset and of the + // leader epoch (see cursorOffsetNext for why the leader epoch). When a + // buffered fetch is taken, we update the cursor. + cursorOffset +} + +// cursorOffset tracks offsets/epochs for a cursor. +type cursorOffset struct { + // What the cursor is at: we request this offset next. + offset int64 + + // The epoch of the last record we consumed. Also used for KIP-320, if + // we are fenced or we have an offset out of range error, we go into + // the OffsetForLeaderEpoch recovery. The last consumed epoch tells the + // broker which offset we want: either (a) the next offset if the last + // consumed epoch is the current epoch, or (b) the offset of the first + // record in the next epoch. This allows for exact offset resetting and + // data loss detection. + // + // See kmsg.OffsetForLeaderEpochResponseTopicPartition for more + // details. + lastConsumedEpoch int32 + + // If we receive OFFSET_OUT_OF_RANGE, and we previously *know* we + // consumed an offset, we reset to the nearest offset after our prior + // known valid consumed offset. + lastConsumedTime time.Time + + // The current high watermark of the partition. Uninitialized (0) means + // we do not know the HWM, or there is no lag. + hwm int64 +} + +// use, for fetch requests, freezes a view of the cursorOffset. +func (c *cursor) use() *cursorOffsetNext { + // A source using a cursor has exclusive access to the use field by + // virtue of that source building a request during a live session, + // or by virtue of the session being stopped. + c.useState.Store(false) + return &cursorOffsetNext{ + cursorOffset: c.cursorOffset, + from: c, + currentLeaderEpoch: c.leaderEpoch, + } +} + +// unset transitions a cursor to an unusable state when the cursor is no longer +// to be consumed. This is called exclusively after sources are stopped. +// This also unsets the cursor offset, which is assumed to be unused now. +func (c *cursor) unset() { + c.useState.Store(false) + c.setOffset(cursorOffset{ + offset: -1, + lastConsumedEpoch: -1, + hwm: 0, + }) +} + +// usable returns whether a cursor can be used for building a fetch request. +func (c *cursor) usable() bool { + return c.useState.Load() +} + +// allowUsable allows a cursor to be fetched, and is called either in assigning +// offsets, or when a buffered fetch is taken or discarded, or when listing / +// epoch loading finishes. +func (c *cursor) allowUsable() { + c.useState.Swap(true) + c.source.maybeConsume() +} + +// setOffset sets the cursors offset which will be used the next time a fetch +// request is built. This function is called under the source mutex while the +// source is stopped, and the caller is responsible for calling maybeConsume +// after. +func (c *cursor) setOffset(o cursorOffset) { + c.cursorOffset = o +} + +// cursorOffsetNext is updated while processing a fetch response. +// +// When a buffered fetch is taken, we update a cursor with the final values in +// the modified cursor offset. +type cursorOffsetNext struct { + cursorOffset + from *cursor + + // The leader epoch at the time we took this cursor offset snapshot. We + // need to copy this rather than accessing it through `from` because a + // fetch request can be canceled while it is being written (and reading + // the epoch). + // + // The leader field itself is only read within the context of a session + // while the session is alive, thus it needs no such guard. + // + // Basically, any field read in AppendTo needs to be copied into + // cursorOffsetNext. + currentLeaderEpoch int32 +} + +type cursorOffsetPreferred struct { + cursorOffsetNext + preferredReplica int32 + ooor bool +} + +// Moves a cursor from one source to another. This is done while handling +// a fetch response, which means within the context of a live session. +func (p *cursorOffsetPreferred) move() { + c := p.from + defer c.allowUsable() + + // Before we migrate the cursor, we check if the destination source + // exists. If not, we do not migrate and instead force a metadata. + + c.source.cl.sinksAndSourcesMu.Lock() + sns, exists := c.source.cl.sinksAndSources[p.preferredReplica] + c.source.cl.sinksAndSourcesMu.Unlock() + + if !exists { + c.source.cl.triggerUpdateMetadataNow("cursor moving to a different broker that is not yet known") + return + } + + // This remove clears the source's session and buffered fetch, although + // we will not have a buffered fetch since moving replicas is called + // before buffering a fetch. + c.source.removeCursor(c) + c.source = sns.source + c.source.addCursor(c) +} + +type cursorPreferreds []cursorOffsetPreferred + +func (cs cursorPreferreds) String() string { + type pnext struct { + p int32 + next int32 + ooor bool + } + ts := make(map[string][]pnext) + for _, c := range cs { + t := c.from.topic + p := c.from.partition + ts[t] = append(ts[t], pnext{p, c.preferredReplica, c.ooor}) + } + tsorted := make([]string, 0, len(ts)) + for t, ps := range ts { + tsorted = append(tsorted, t) + slices.SortFunc(ps, func(l, r pnext) int { + if l.p < r.p { + return -1 + } + if l.p > r.p { + return 1 + } + if l.next < r.next { + return -1 + } + if l.next > r.next { + return 1 + } + return 0 + }) + } + slices.Sort(tsorted) + + sb := new(strings.Builder) + for i, t := range tsorted { + ps := ts[t] + fmt.Fprintf(sb, "%s{", t) + + for j, p := range ps { + if j < len(ps)-1 { + if p.ooor { + fmt.Fprintf(sb, "%d=>%d[ooor], ", p.p, p.next) + } else { + fmt.Fprintf(sb, "%d=>%d, ", p.p, p.next) + } + } else { + if p.ooor { + fmt.Fprintf(sb, "%d=>%d[ooor]", p.p, p.next) + } else { + fmt.Fprintf(sb, "%d=>%d", p.p, p.next) + } + } + } + + if i < len(tsorted)-1 { + fmt.Fprint(sb, "}, ") + } else { + fmt.Fprint(sb, "}") + } + } + return sb.String() +} + +func (cs cursorPreferreds) eachPreferred(fn func(cursorOffsetPreferred)) { + for _, c := range cs { + fn(c) + } +} + +type usedOffsets map[string]map[int32]*cursorOffsetNext + +func (os usedOffsets) eachOffset(fn func(*cursorOffsetNext)) { + for _, ps := range os { + for _, o := range ps { + fn(o) + } + } +} + +func (os usedOffsets) finishUsingAllWithSet() { + os.eachOffset(func(o *cursorOffsetNext) { o.from.setOffset(o.cursorOffset); o.from.allowUsable() }) +} + +func (os usedOffsets) finishUsingAll() { + os.eachOffset(func(o *cursorOffsetNext) { o.from.allowUsable() }) +} + +// bufferedFetch is a fetch response waiting to be consumed by the client. +type bufferedFetch struct { + fetch Fetch + + doneFetch chan<- struct{} // when unbuffered, we send down this + usedOffsets usedOffsets // what the offsets will be next if this fetch is used +} + +func (s *source) hook(f *Fetch, buffered, polled bool) { + s.cl.cfg.hooks.each(func(h Hook) { + if buffered { + h, ok := h.(HookFetchRecordBuffered) + if !ok { + return + } + for i := range f.Topics { + t := &f.Topics[i] + for j := range t.Partitions { + p := &t.Partitions[j] + for _, r := range p.Records { + h.OnFetchRecordBuffered(r) + } + } + } + } else { + h, ok := h.(HookFetchRecordUnbuffered) + if !ok { + return + } + for i := range f.Topics { + t := &f.Topics[i] + for j := range t.Partitions { + p := &t.Partitions[j] + for _, r := range p.Records { + h.OnFetchRecordUnbuffered(r, polled) + } + } + } + } + }) + + var nrecs int + var nbytes int64 + for i := range f.Topics { + t := &f.Topics[i] + for j := range t.Partitions { + p := &t.Partitions[j] + nrecs += len(p.Records) + for k := range p.Records { + nbytes += p.Records[k].userSize() + } + } + } + if buffered { + s.cl.consumer.bufferedRecords.Add(int64(nrecs)) + s.cl.consumer.bufferedBytes.Add(nbytes) + } else { + s.cl.consumer.bufferedRecords.Add(-int64(nrecs)) + s.cl.consumer.bufferedBytes.Add(-nbytes) + } +} + +// takeBuffered drains a buffered fetch and updates offsets. +func (s *source) takeBuffered(paused pausedTopics) Fetch { + if len(paused) == 0 { + return s.takeBufferedFn(true, usedOffsets.finishUsingAllWithSet) + } + var strip map[string]map[int32]struct{} + f := s.takeBufferedFn(true, func(os usedOffsets) { + for t, ps := range os { + // If the entire topic is paused, we allowUsable all + // and strip the topic entirely. + pps, ok := paused.t(t) + if !ok { + for _, o := range ps { + o.from.setOffset(o.cursorOffset) + o.from.allowUsable() + } + continue + } + if strip == nil { + strip = make(map[string]map[int32]struct{}) + } + if pps.all { + for _, o := range ps { + o.from.allowUsable() + } + strip[t] = nil // initialize key, for existence-but-len-0 check below + continue + } + stript := make(map[int32]struct{}) + for _, o := range ps { + if _, ok := pps.m[o.from.partition]; ok { + o.from.allowUsable() + stript[o.from.partition] = struct{}{} + continue + } + o.from.setOffset(o.cursorOffset) + o.from.allowUsable() + } + // We only add stript to strip if there are any + // stripped partitions. We could have a paused + // partition that is on another broker, while this + // broker has no paused partitions -- if we add stript + // here, our logic below (stripping this entire topic) + // is more confusing (present nil vs. non-present nil). + if len(stript) > 0 { + strip[t] = stript + } + } + }) + if strip != nil { + keep := f.Topics[:0] + for _, t := range f.Topics { + stript, ok := strip[t.Topic] + if ok { + if len(stript) == 0 { + continue // stripping this entire topic + } + keepp := t.Partitions[:0] + for _, p := range t.Partitions { + if _, ok := stript[p.Partition]; ok { + continue + } + keepp = append(keepp, p) + } + t.Partitions = keepp + } + keep = append(keep, t) + } + f.Topics = keep + } + return f +} + +func (s *source) discardBuffered() { + s.takeBufferedFn(false, usedOffsets.finishUsingAll) +} + +// takeNBuffered takes a limited amount of records from a buffered fetch, +// updating offsets in each partition per records taken. +// +// This only allows a new fetch once every buffered record has been taken. +// +// This returns the number of records taken and whether the source has been +// completely drained. +func (s *source) takeNBuffered(paused pausedTopics, n int) (Fetch, int, bool) { + var ( + r Fetch + rstrip Fetch + taken int + ) + + b := &s.buffered + bf := &b.fetch + for len(bf.Topics) > 0 && n > 0 { + t := &bf.Topics[0] + + // If the topic is outright paused, we allowUsable all + // partitions in the topic and skip the topic entirely. + if paused.has(t.Topic, -1) { + rstrip.Topics = append(rstrip.Topics, *t) + bf.Topics = bf.Topics[1:] + for _, pCursor := range b.usedOffsets[t.Topic] { + pCursor.from.allowUsable() + } + delete(b.usedOffsets, t.Topic) + continue + } + + var rt *FetchTopic + ensureTopicAdded := func() { + if rt != nil { + return + } + r.Topics = append(r.Topics, *t) + rt = &r.Topics[len(r.Topics)-1] + rt.Partitions = nil + } + var rtstrip *FetchTopic + ensureTopicStripped := func() { + if rtstrip != nil { + return + } + rstrip.Topics = append(rstrip.Topics, *t) + rtstrip = &rstrip.Topics[len(rstrip.Topics)-1] + rtstrip.Partitions = nil + } + + tCursors := b.usedOffsets[t.Topic] + + for len(t.Partitions) > 0 && n > 0 { + p := &t.Partitions[0] + + if paused.has(t.Topic, p.Partition) { + ensureTopicStripped() + rtstrip.Partitions = append(rtstrip.Partitions, *p) + t.Partitions = t.Partitions[1:] + pCursor := tCursors[p.Partition] + pCursor.from.allowUsable() + delete(tCursors, p.Partition) + if len(tCursors) == 0 { + delete(b.usedOffsets, t.Topic) + } + continue + } + + ensureTopicAdded() + rt.Partitions = append(rt.Partitions, *p) + rp := &rt.Partitions[len(rt.Partitions)-1] + + take := n + if take > len(p.Records) { + take = len(p.Records) + } + + rp.Records = p.Records[:take:take] + p.Records = p.Records[take:] + + n -= take + taken += take + + pCursor := tCursors[p.Partition] + + if len(p.Records) == 0 { + t.Partitions = t.Partitions[1:] + + pCursor.from.setOffset(pCursor.cursorOffset) + pCursor.from.allowUsable() + delete(tCursors, p.Partition) + if len(tCursors) == 0 { + delete(b.usedOffsets, t.Topic) + } + continue + } + + lastReturnedRecord := rp.Records[len(rp.Records)-1] + pCursor.from.setOffset(cursorOffset{ + offset: lastReturnedRecord.Offset + 1, + lastConsumedEpoch: lastReturnedRecord.LeaderEpoch, + lastConsumedTime: lastReturnedRecord.Timestamp, + hwm: p.HighWatermark, + }) + } + + if len(t.Partitions) == 0 { + bf.Topics = bf.Topics[1:] + } + } + + if len(rstrip.Topics) > 0 { + s.hook(&rstrip, false, true) + } + s.hook(&r, false, true) // unbuffered, polled + + drained := len(bf.Topics) == 0 + if drained { + s.takeBuffered(nil) + } + return r, taken, drained +} + +func (s *source) takeBufferedFn(polled bool, offsetFn func(usedOffsets)) Fetch { + r := s.buffered + s.buffered = bufferedFetch{} + offsetFn(r.usedOffsets) + r.doneFetch <- struct{}{} + close(s.sem) + + s.hook(&r.fetch, false, polled) // unbuffered, potentially polled + + return r.fetch +} + +// createReq actually creates a fetch request. +func (s *source) createReq() *fetchRequest { + req := &fetchRequest{ + maxWait: s.cl.cfg.maxWait, + minBytes: s.cl.cfg.minBytes, + maxBytes: s.cl.cfg.maxBytes.load(), + maxPartBytes: s.cl.cfg.maxPartBytes.load(), + rack: s.cl.cfg.rack, + isolationLevel: s.cl.cfg.isolationLevel, + preferLagFn: s.cl.cfg.preferLagFn, + + // We copy a view of the session for the request, which allows + // modify source while the request may be reading its copy. + session: s.session, + } + + paused := s.cl.consumer.loadPaused() + + s.cursorsMu.Lock() + defer s.cursorsMu.Unlock() + + cursorIdx := s.cursorsStart + for i := 0; i < len(s.cursors); i++ { + c := s.cursors[cursorIdx] + cursorIdx = (cursorIdx + 1) % len(s.cursors) + if !c.usable() || paused.has(c.topic, c.partition) { + continue + } + req.addCursor(c) + } + + // We could have lost our only record buffer just before we grabbed the + // source lock above. + if len(s.cursors) > 0 { + s.cursorsStart = (s.cursorsStart + 1) % len(s.cursors) + } + + return req +} + +func (s *source) maybeConsume() { + if s.fetchState.maybeBegin() { + go s.loopFetch() + } +} + +func (s *source) loopFetch() { + consumer := &s.cl.consumer + session := consumer.loadSession() + + if session == noConsumerSession { + s.fetchState.hardFinish() + // It is possible that we were triggered to consume while we + // had no consumer session, and then *after* loopFetch loaded + // noConsumerSession, the session was saved and triggered to + // consume again. If this function is slow the first time + // around, it could still be running and about to hardFinish. + // The second trigger will do nothing, and then we hardFinish + // and block a new session from actually starting consuming. + // + // To guard against this, after we hard finish, we load the + // session again: if it is *not* noConsumerSession, we trigger + // attempting to consume again. Worst case, the trigger is + // useless and it will exit below when it builds an empty + // request. + sessionNow := consumer.loadSession() + if session != sessionNow { + s.maybeConsume() + } + return + } + + session.incWorker() + defer session.decWorker() + + // After our add, check quickly **without** another select case to + // determine if this context was truly canceled. Any other select that + // has another select case could theoretically race with the other case + // also being selected. + select { + case <-session.ctx.Done(): + s.fetchState.hardFinish() + return + default: + } + + // We receive on canFetch when we can fetch, and we send back when we + // are done fetching. + canFetch := make(chan chan struct{}, 1) + + again := true + for again { + select { + case <-session.ctx.Done(): + s.fetchState.hardFinish() + return + case <-s.sem: + } + + select { + case <-session.ctx.Done(): + s.fetchState.hardFinish() + return + case session.desireFetch() <- canFetch: + } + + select { + case <-session.ctx.Done(): + session.cancelFetchCh <- canFetch + s.fetchState.hardFinish() + return + case doneFetch := <-canFetch: + again = s.fetchState.maybeFinish(s.fetch(session, doneFetch)) + } + } +} + +func (s *source) killSessionOnClose(ctx context.Context) { + br, err := s.cl.brokerOrErr(nil, s.nodeID, errUnknownBroker) + if err != nil { + return + } + s.session.kill() + req := &fetchRequest{ + maxWait: 1, + minBytes: 1, + maxBytes: 1, + maxPartBytes: 1, + rack: s.cl.cfg.rack, + isolationLevel: s.cl.cfg.isolationLevel, + session: s.session, + } + ch := make(chan struct{}) + br.do(ctx, req, func(kmsg.Response, error) { close(ch) }) + <-ch +} + +// fetch is the main logic center of fetching messages. +// +// This is a long function, made much longer by winded documentation, that +// contains a lot of the side effects of fetching and updating. The function +// consists of two main bulks of logic: +// +// - First, issue a request that can be killed if the source needs to be +// stopped. Processing the response modifies no state on the source. +// +// - Second, we keep the fetch response and update everything relevant +// (session, trigger some list or epoch updates, buffer the fetch). +// +// One small part between the first and second step is to update preferred +// replicas. We always keep the preferred replicas from the fetch response +// *even if* the source needs to be stopped. The knowledge of which preferred +// replica to use would not be out of date even if the consumer session is +// changing. +func (s *source) fetch(consumerSession *consumerSession, doneFetch chan<- struct{}) (fetched bool) { + req := s.createReq() + + // For all returns, if we do not buffer our fetch, then we want to + // ensure our used offsets are usable again. + var ( + alreadySentToDoneFetch bool + setOffsets bool + buffered bool + ) + defer func() { + if !buffered { + if req.numOffsets > 0 { + if setOffsets { + req.usedOffsets.finishUsingAllWithSet() + } else { + req.usedOffsets.finishUsingAll() + } + } + if !alreadySentToDoneFetch { + doneFetch <- struct{}{} + } + } + }() + + if req.numOffsets == 0 { // cursors could have been set unusable + return + } + + // If our fetch is killed, we want to cancel waiting for the response. + var ( + kresp kmsg.Response + requested = make(chan struct{}) + ctx, cancel = context.WithCancel(consumerSession.ctx) + ) + defer cancel() + + br, err := s.cl.brokerOrErr(ctx, s.nodeID, errUnknownBroker) + if err != nil { + close(requested) + } else { + br.do(ctx, req, func(k kmsg.Response, e error) { + kresp, err = k, e + close(requested) + }) + } + + select { + case <-requested: + fetched = true + case <-ctx.Done(): + return + } + + var didBackoff bool + backoff := func(why interface{}) { + // We preemptively allow more fetches (since we are not buffering) + // and reset our session because of the error (who knows if kafka + // processed the request but the client failed to receive it). + doneFetch <- struct{}{} + alreadySentToDoneFetch = true + s.session.reset() + didBackoff = true + + s.cl.triggerUpdateMetadata(false, fmt.Sprintf("opportunistic load during source backoff: %v", why)) // as good a time as any + s.consecutiveFailures++ + after := time.NewTimer(s.cl.cfg.retryBackoff(s.consecutiveFailures)) + defer after.Stop() + select { + case <-after.C: + case <-ctx.Done(): + } + } + defer func() { + if !didBackoff { + s.consecutiveFailures = 0 + } + }() + + // If we had an error, we backoff. Killing a fetch quits the backoff, + // but that is fine; we may just re-request too early and fall into + // another backoff. + if err != nil { + backoff(err) + return + } + + resp := kresp.(*kmsg.FetchResponse) + + var ( + fetch Fetch + reloadOffsets listOrEpochLoads + preferreds cursorPreferreds + allErrsStripped bool + updateWhy multiUpdateWhy + handled = make(chan struct{}) + ) + + // Theoretically, handleReqResp could take a bit of CPU time due to + // decompressing and processing the response. We do this in a goroutine + // to allow the session to be canceled at any moment. + // + // Processing the response only needs the source's nodeID and client. + go func() { + defer close(handled) + fetch, reloadOffsets, preferreds, allErrsStripped, updateWhy = s.handleReqResp(br, req, resp) + }() + + select { + case <-handled: + case <-ctx.Done(): + return + } + + // The logic below here should be relatively quick. + // + // Note that fetch runs entirely in the context of a consumer session. + // loopFetch does not return until this function does, meaning we + // cannot concurrently issue a second fetch for partitions that are + // being processed below. + + deleteReqUsedOffset := func(topic string, partition int32) { + t := req.usedOffsets[topic] + delete(t, partition) + if len(t) == 0 { + delete(req.usedOffsets, topic) + } + } + + // Before updating the source, we move all cursors that have new + // preferred replicas and remove them from being tracked in our req + // offsets. We also remove the reload offsets from our req offsets. + // + // These two removals transition responsibility for finishing using the + // cursor from the request's used offsets to the new source or the + // reloading. + if len(preferreds) > 0 { + s.cl.cfg.logger.Log(LogLevelInfo, "fetch partitions returned preferred replicas", + "from_broker", s.nodeID, + "moves", preferreds.String(), + ) + } + preferreds.eachPreferred(func(c cursorOffsetPreferred) { + c.move() + deleteReqUsedOffset(c.from.topic, c.from.partition) + }) + reloadOffsets.each(deleteReqUsedOffset) + + // The session on the request was updated; we keep those updates. + s.session = req.session + + // handleReqResp only parses the body of the response, not the top + // level error code. + // + // The top level error code is related to fetch sessions only, and if + // there was an error, the body was empty (so processing is basically a + // no-op). We process the fetch session error now. + switch err := kerr.ErrorForCode(resp.ErrorCode); err { + case kerr.FetchSessionIDNotFound: + if s.session.epoch == 0 { + // If the epoch was zero, the broker did not even + // establish a session for us (and thus is maxed on + // sessions). We stop trying. + s.cl.cfg.logger.Log(LogLevelInfo, "session failed with SessionIDNotFound while trying to establish a session; broker likely maxed on sessions; continuing on without using sessions", "broker", logID(s.nodeID)) + s.session.kill() + } else { + s.cl.cfg.logger.Log(LogLevelInfo, "received SessionIDNotFound from our in use session, our session was likely evicted; resetting session", "broker", logID(s.nodeID)) + s.session.reset() + } + return + case kerr.InvalidFetchSessionEpoch: + s.cl.cfg.logger.Log(LogLevelInfo, "resetting fetch session", "broker", logID(s.nodeID), "err", err) + s.session.reset() + return + + case kerr.FetchSessionTopicIDError, kerr.InconsistentTopicID: + s.cl.cfg.logger.Log(LogLevelInfo, "topic id issues, resetting session and updating metadata", "broker", logID(s.nodeID), "err", err) + s.session.reset() + s.cl.triggerUpdateMetadataNow("topic id issues") + return + } + + // At this point, we have successfully processed the response. Even if + // the response contains no records, we want to keep any offset + // advancements (we could have consumed only control records, we must + // advance past them). + setOffsets = true + + if resp.Version < 7 || resp.SessionID <= 0 { + // If the version is less than 7, we cannot use fetch sessions, + // so we kill them on the first response. + s.session.kill() + } else { + s.session.bumpEpoch(resp.SessionID) + } + + // If we have a reason to update (per-partition fetch errors), and the + // reason is not just unknown topic or partition, then we immediately + // update metadata. We avoid updating for unknown because it _likely_ + // means the topic does not exist and reloading is wasteful. We only + // trigger a metadata update if we have no reload offsets. Having + // reload offsets *always* triggers a metadata update. + if updateWhy != nil { + why := updateWhy.reason(fmt.Sprintf("fetch had inner topic errors from broker %d", s.nodeID)) + // loadWithSessionNow triggers a metadata update IF there are + // offsets to reload. If there are no offsets to reload, we + // trigger one here. + if !reloadOffsets.loadWithSessionNow(consumerSession, why) { + if updateWhy.isOnly(kerr.UnknownTopicOrPartition) || updateWhy.isOnly(kerr.UnknownTopicID) { + s.cl.triggerUpdateMetadata(false, why) + } else { + s.cl.triggerUpdateMetadataNow(why) + } + } + } + + if fetch.hasErrorsOrRecords() { + buffered = true + s.buffered = bufferedFetch{ + fetch: fetch, + doneFetch: doneFetch, + usedOffsets: req.usedOffsets, + } + s.sem = make(chan struct{}) + s.hook(&fetch, true, false) // buffered, not polled + s.cl.consumer.addSourceReadyForDraining(s) + } else if allErrsStripped { + // If we stripped all errors from the response, we are likely + // fetching from topics that were deleted. We want to back off + // a bit rather than spin-loop immediately re-requesting + // deleted topics. + backoff("empty fetch response due to all partitions having retryable errors") + } + return +} + +// Parses a fetch response into a Fetch, offsets to reload, and whether +// metadata needs updating. +// +// This only uses a source's broker and client, and thus does not need +// the source mutex. +// +// This function, and everything it calls, is side effect free. +func (s *source) handleReqResp(br *broker, req *fetchRequest, resp *kmsg.FetchResponse) ( + f Fetch, + reloadOffsets listOrEpochLoads, + preferreds cursorPreferreds, + allErrsStripped bool, + updateWhy multiUpdateWhy, +) { + f = Fetch{Topics: make([]FetchTopic, 0, len(resp.Topics))} + var ( + debugWhyStripped multiUpdateWhy + numErrsStripped int + kip320 = s.cl.supportsOffsetForLeaderEpoch() + kmove kip951move + ) + defer kmove.maybeBeginMove(s.cl) + + strip := func(t string, p int32, err error) { + numErrsStripped++ + if s.cl.cfg.logger.Level() < LogLevelDebug { + return + } + debugWhyStripped.add(t, p, err) + } + + for _, rt := range resp.Topics { + topic := rt.Topic + // v13 only uses topic IDs, so we have to map the response + // uuid's to our string topics. + if resp.Version >= 13 { + topic = req.id2topic[rt.TopicID] + } + + // We always include all cursors on this source in the fetch; + // we should not receive any topics or partitions we do not + // expect. + topicOffsets, ok := req.usedOffsets[topic] + if !ok { + s.cl.cfg.logger.Log(LogLevelWarn, "broker returned topic from fetch that we did not ask for", + "broker", logID(s.nodeID), + "topic", topic, + ) + continue + } + + fetchTopic := FetchTopic{ + Topic: topic, + TopicID: rt.TopicID, + Partitions: make([]FetchPartition, 0, len(rt.Partitions)), + } + + for i := range rt.Partitions { + rp := &rt.Partitions[i] + partition := rp.Partition + partOffset, ok := topicOffsets[partition] + if !ok { + s.cl.cfg.logger.Log(LogLevelWarn, "broker returned partition from fetch that we did not ask for", + "broker", logID(s.nodeID), + "topic", topic, + "partition", partition, + ) + continue + } + + // If we are fetching from the replica already, Kafka replies with a -1 + // preferred read replica. If Kafka replies with a preferred replica, + // it sends no records. + if preferred := rp.PreferredReadReplica; resp.Version >= 11 && preferred >= 0 { + preferreds = append(preferreds, cursorOffsetPreferred{ + *partOffset, + preferred, + false, + }) + continue + } + + fp := partOffset.processRespPartition(br, rp, s.cl.decompressor, s.cl.cfg.hooks) + if fp.Err != nil { + if moving := kmove.maybeAddFetchPartition(resp, rp, partOffset.from); moving { + strip(topic, partition, fp.Err) + continue + } + updateWhy.add(topic, partition, fp.Err) + } + + // We only keep the partition if it has no error, or an + // error we do not internally retry. + var keep bool + switch fp.Err { + default: + if kerr.IsRetriable(fp.Err) && !s.cl.cfg.keepRetryableFetchErrors { + // UnknownLeaderEpoch: our meta is newer than the broker we fetched from + // OffsetNotAvailable: fetched from out of sync replica or a behind in-sync one (KIP-392 case 1 and case 2) + // UnknownTopicID: kafka has not synced the state on all brokers + // And other standard retryable errors. + strip(topic, partition, fp.Err) + } else { + // - bad auth + // - unsupported compression + // - unsupported message version + // - unknown error + // - or, no error + keep = true + } + + case nil: + partOffset.from.unknownIDFails.Store(0) + keep = true + + case kerr.UnknownTopicID: + // We need to keep UnknownTopicID even though it is + // retryable, because encountering this error means + // the topic has been recreated and we will never + // consume the topic again anymore. This is an error + // worth bubbling up. + // + // Kafka will actually return this error for a brief + // window immediately after creating a topic for the + // first time, meaning the controller has not yet + // propagated to the leader that it is now the leader + // of a new partition. We need to ignore this error + // for a little bit. + if fails := partOffset.from.unknownIDFails.Add(1); fails > 5 { + partOffset.from.unknownIDFails.Add(-1) + keep = true + } else if s.cl.cfg.keepRetryableFetchErrors { + keep = true + } else { + strip(topic, partition, fp.Err) + } + + case kerr.OffsetOutOfRange: + // If we are out of range, we reset to what we can. + // With Kafka >= 2.1, we should only get offset out + // of range if we fetch before the start, but a user + // could start past the end and want to reset to + // the end. We respect that. + // + // KIP-392 (case 3) specifies that if we are consuming + // from a follower, then if our offset request is before + // the low watermark, we list offsets from the follower. + // However, Kafka does not actually implement handling + // ListOffsets from anything from the leader, so we + // need to redirect ourselves back to the leader. + // + // KIP-392 (case 4) specifies that if we are consuming + // a follower and our request is larger than the high + // watermark, then we should first check for truncation + // from the leader and then if we still get out of + // range, reset with list offsets. + // + // It further goes on to say that "out of range errors + // due to ISR propagation delays should be extremely + // rare". Rather than falling back to listing offsets, + // we stay in a cycle of validating the leader epoch + // until the follower has caught up. + // + // In all cases except case 4, we also have to check if + // no reset offset was configured. If so, we ignore + // trying to reset and instead keep our failed partition. + addList := func(replica int32, log bool) { + if s.cl.cfg.resetOffset.noReset { + keep = true + } else if !partOffset.from.lastConsumedTime.IsZero() { + reloadOffsets.addLoad(topic, partition, loadTypeList, offsetLoad{ + replica: replica, + Offset: NewOffset().AfterMilli(partOffset.from.lastConsumedTime.UnixMilli()), + }) + if log { + s.cl.cfg.logger.Log(LogLevelWarn, "received OFFSET_OUT_OF_RANGE, resetting to the nearest offset; either you were consuming too slowly and the broker has deleted the segment you were in the middle of consuming, or the broker has lost data and has not yet transferred leadership", + "broker", logID(s.nodeID), + "topic", topic, + "partition", partition, + "prior_offset", partOffset.offset, + ) + } + } else { + reloadOffsets.addLoad(topic, partition, loadTypeList, offsetLoad{ + replica: replica, + Offset: s.cl.cfg.resetOffset, + }) + if log { + s.cl.cfg.logger.Log(LogLevelInfo, "received OFFSET_OUT_OF_RANGE on the first fetch, resetting to the configured ConsumeResetOffset", + "broker", logID(s.nodeID), + "topic", topic, + "partition", partition, + "prior_offset", partOffset.offset, + ) + } + } + } + + switch { + case s.nodeID == partOffset.from.leader: // non KIP-392 case + addList(-1, true) + + case partOffset.offset < fp.LogStartOffset: // KIP-392 case 3 + // KIP-392 specifies that we should list offsets against the follower, + // but that actually is not supported and the Java client redirects + // back to the leader. The leader then does *not* direct the client + // back to the follower because the follower is not an in sync + // replica. If we did not redirect back to the leader, we would spin + // loop receiving offset_out_of_range from the follower for Fetch, and + // then not_leader_or_follower from the follower for ListOffsets + // (even though it is a follower). So, we just set the preferred replica + // back to the follower. We go directly back to fetching with the + // hope that the offset is available on the leader, and if not, we'll + // just get an OOOR error again and fall into case 1 just above. + preferreds = append(preferreds, cursorOffsetPreferred{ + *partOffset, + partOffset.from.leader, + true, + }) + + default: // partOffset.offset > fp.HighWatermark, KIP-392 case 4 + if kip320 { + reloadOffsets.addLoad(topic, partition, loadTypeEpoch, offsetLoad{ + replica: -1, + Offset: Offset{ + at: partOffset.offset, + epoch: partOffset.lastConsumedEpoch, + }, + }) + } else { + // If the broker does not support offset for leader epoch but + // does support follower fetching for some reason, we have to + // fallback to listing. + addList(-1, true) + } + } + + case kerr.FencedLeaderEpoch: + // With fenced leader epoch, we notify an error only + // if necessary after we find out if loss occurred. + // If we have consumed nothing, then we got unlucky + // by being fenced right after we grabbed metadata. + // We just refresh metadata and try again. + // + // It would be odd for a broker to reply we are fenced + // but not support offset for leader epoch, so we do + // not check KIP-320 support here. + if partOffset.lastConsumedEpoch >= 0 { + reloadOffsets.addLoad(topic, partition, loadTypeEpoch, offsetLoad{ + replica: -1, + Offset: Offset{ + at: partOffset.offset, + epoch: partOffset.lastConsumedEpoch, + }, + }) + } + } + + if keep { + fetchTopic.Partitions = append(fetchTopic.Partitions, fp) + } + } + + if len(fetchTopic.Partitions) > 0 { + f.Topics = append(f.Topics, fetchTopic) + } + } + + if s.cl.cfg.logger.Level() >= LogLevelDebug && len(debugWhyStripped) > 0 { + s.cl.cfg.logger.Log(LogLevelDebug, "fetch stripped partitions", "why", debugWhyStripped.reason("")) + } + + return f, reloadOffsets, preferreds, req.numOffsets == numErrsStripped, updateWhy +} + +// processRespPartition processes all records in all potentially compressed +// batches (or message sets). +func (o *cursorOffsetNext) processRespPartition(br *broker, rp *kmsg.FetchResponseTopicPartition, decompressor *decompressor, hooks hooks) FetchPartition { + fp := FetchPartition{ + Partition: rp.Partition, + Err: kerr.ErrorForCode(rp.ErrorCode), + HighWatermark: rp.HighWatermark, + LastStableOffset: rp.LastStableOffset, + LogStartOffset: rp.LogStartOffset, + } + if rp.ErrorCode == 0 { + o.hwm = rp.HighWatermark + } + + var aborter aborter + if br.cl.cfg.isolationLevel == 1 { + aborter = buildAborter(rp) + } + + // A response could contain any of message v0, message v1, or record + // batches, and this is solely dictated by the magic byte (not the + // fetch response version). The magic byte is located at byte 17. + // + // 1 thru 8: int64 offset / first offset + // 9 thru 12: int32 length + // 13 thru 16: crc (magic 0 or 1), or partition leader epoch (magic 2) + // 17: magic + // + // We decode and validate similarly for messages and record batches, so + // we "abstract" away the high level stuff into a check function just + // below, and then switch based on the magic for how to process. + var ( + in = rp.RecordBatches + + r readerFrom + kind string + length int32 + lengthField *int32 + crcField *int32 + crcTable *crc32.Table + crcAt int + + check = func() bool { + // If we call into check, we know we have a valid + // length, so we should be at least able to parse our + // top level struct and validate the length and CRC. + if err := r.ReadFrom(in[:length]); err != nil { + fp.Err = fmt.Errorf("unable to read %s, not enough data", kind) + return false + } + if length := int32(len(in[12:length])); length != *lengthField { + fp.Err = fmt.Errorf("encoded length %d does not match read length %d", *lengthField, length) + return false + } + // We have already validated that the slice is at least + // 17 bytes, but our CRC may be later (i.e. RecordBatch + // starts at byte 21). Ensure there is at least space + // for a CRC. + if len(in) < crcAt { + fp.Err = fmt.Errorf("length %d is too short to allow for a crc", len(in)) + return false + } + if crcCalc := int32(crc32.Checksum(in[crcAt:length], crcTable)); crcCalc != *crcField { + fp.Err = fmt.Errorf("encoded crc %x does not match calculated crc %x", *crcField, crcCalc) + return false + } + return true + } + ) + + for len(in) > 17 && fp.Err == nil { + offset := int64(binary.BigEndian.Uint64(in)) + length = int32(binary.BigEndian.Uint32(in[8:])) + length += 12 // for the int64 offset we skipped and int32 length field itself + if len(in) < int(length) { + break + } + + switch magic := in[16]; magic { + case 0: + m := new(kmsg.MessageV0) + kind = "message v0" + lengthField = &m.MessageSize + crcField = &m.CRC + crcTable = crc32.IEEETable + crcAt = 16 + r = m + case 1: + m := new(kmsg.MessageV1) + kind = "message v1" + lengthField = &m.MessageSize + crcField = &m.CRC + crcTable = crc32.IEEETable + crcAt = 16 + r = m + case 2: + rb := new(kmsg.RecordBatch) + kind = "record batch" + lengthField = &rb.Length + crcField = &rb.CRC + crcTable = crc32c + crcAt = 21 + r = rb + + default: + fp.Err = fmt.Errorf("unknown magic %d; message offset is %d and length is %d, skipping and setting to next offset", magic, offset, length) + if next := offset + 1; next > o.offset { + o.offset = next + } + return fp + } + + if !check() { + break + } + + in = in[length:] + + var m FetchBatchMetrics + + switch t := r.(type) { + case *kmsg.MessageV0: + m.CompressedBytes = int(length) // for message sets, we include the message set overhead in length + m.CompressionType = uint8(t.Attributes) & 0b0000_0111 + m.NumRecords, m.UncompressedBytes = o.processV0OuterMessage(&fp, t, decompressor) + + case *kmsg.MessageV1: + m.CompressedBytes = int(length) + m.CompressionType = uint8(t.Attributes) & 0b0000_0111 + m.NumRecords, m.UncompressedBytes = o.processV1OuterMessage(&fp, t, decompressor) + + case *kmsg.RecordBatch: + m.CompressedBytes = len(t.Records) // for record batches, we only track the record batch length + m.CompressionType = uint8(t.Attributes) & 0b0000_0111 + m.NumRecords, m.UncompressedBytes = o.processRecordBatch(&fp, t, aborter, decompressor) + } + + if m.UncompressedBytes == 0 { + m.UncompressedBytes = m.CompressedBytes + } + hooks.each(func(h Hook) { + if h, ok := h.(HookFetchBatchRead); ok { + h.OnFetchBatchRead(br.meta, o.from.topic, o.from.partition, m) + } + }) + + // If we encounter a decompression error BUT we have successfully decompressed + // one batch, it is likely that we have received a partial batch. Kafka returns + // UP TO the requested max partition bytes, sometimes truncating data at the end. + // It returns at least one valid batch, but everything after is copied as is + // (i.e. a quick slab copy). We set the error to nil and return what we have. + // + // If we have a decompression error immediately, we keep it and bubble it up. + // The client cannot progress, and the end user needs visibility. + if isDecompressErr(fp.Err) && len(fp.Records) > 0 { + fp.Err = nil + break + } + } + + return fp +} + +type aborter map[int64][]int64 + +func buildAborter(rp *kmsg.FetchResponseTopicPartition) aborter { + if len(rp.AbortedTransactions) == 0 { + return nil + } + a := make(aborter) + for _, abort := range rp.AbortedTransactions { + a[abort.ProducerID] = append(a[abort.ProducerID], abort.FirstOffset) + } + return a +} + +func (a aborter) shouldAbortBatch(b *kmsg.RecordBatch) bool { + if len(a) == 0 || b.Attributes&0b0001_0000 == 0 { + return false + } + pidAborts := a[b.ProducerID] + if len(pidAborts) == 0 { + return false + } + // If the first offset in this batch is less than the first offset + // aborted, then this batch is not aborted. + if b.FirstOffset < pidAborts[0] { + return false + } + return true +} + +func (a aborter) trackAbortedPID(producerID int64) { + remaining := a[producerID][1:] + if len(remaining) == 0 { + delete(a, producerID) + } else { + a[producerID] = remaining + } +} + +////////////////////////////////////// +// processing records to fetch part // +////////////////////////////////////// + +// readRawRecords reads n records from in and returns them, returning early if +// there were partial records. +func readRawRecords(n int, in []byte) []kmsg.Record { + rs := make([]kmsg.Record, n) + for i := 0; i < n; i++ { + length, used := kbin.Varint(in) + total := used + int(length) + if used == 0 || length < 0 || len(in) < total { + return rs[:i] + } + if err := (&rs[i]).ReadFrom(in[:total]); err != nil { + return rs[:i] + } + in = in[total:] + } + return rs +} + +func (o *cursorOffsetNext) processRecordBatch( + fp *FetchPartition, + batch *kmsg.RecordBatch, + aborter aborter, + decompressor *decompressor, +) (int, int) { + if batch.Magic != 2 { + fp.Err = fmt.Errorf("unknown batch magic %d", batch.Magic) + return 0, 0 + } + lastOffset := batch.FirstOffset + int64(batch.LastOffsetDelta) + if lastOffset < o.offset { + // If the last offset in this batch is less than what we asked + // for, we got a batch that we entirely do not need. We can + // avoid all work (although we should not get this batch). + return 0, 0 + } + + rawRecords := batch.Records + if compression := byte(batch.Attributes & 0x0007); compression != 0 { + var err error + if rawRecords, err = decompressor.decompress(rawRecords, compression); err != nil { + fp.Err = &errDecompress{err} + return 0, 0 // truncated batch + } + } + + uncompressedBytes := len(rawRecords) + + numRecords := int(batch.NumRecords) + krecords := readRawRecords(numRecords, rawRecords) + + // KAFKA-5443: compacted topics preserve the last offset in a batch, + // even if the last record is removed, meaning that using offsets from + // records alone may not get us to the next offset we need to ask for. + // + // We only perform this logic if we did not consume a truncated batch. + // If we consume a truncated batch, then what was truncated could have + // been an offset we are interested in consuming. Even if our fetch did + // not advance this partition at all, we will eventually fetch from the + // partition and not have a truncated response, at which point we will + // either advance offsets or will set to nextAskOffset. + nextAskOffset := lastOffset + 1 + defer func() { + if numRecords == len(krecords) && o.offset < nextAskOffset { + o.offset = nextAskOffset + } + }() + + abortBatch := aborter.shouldAbortBatch(batch) + for i := range krecords { + record := recordToRecord( + o.from.topic, + fp.Partition, + batch, + &krecords[i], + ) + o.maybeKeepRecord(fp, record, abortBatch) + + if abortBatch && record.Attrs.IsControl() { + // A control record has a key and a value where the key + // is int16 version and int16 type. Aborted records + // have a type of 0. + if key := record.Key; len(key) >= 4 && key[2] == 0 && key[3] == 0 { + aborter.trackAbortedPID(batch.ProducerID) + } + } + } + + return len(krecords), uncompressedBytes +} + +// Processes an outer v1 message. There could be no inner message, which makes +// this easy, but if not, we decompress and process each inner message as +// either v0 or v1. We only expect the inner message to be v1, but technically +// a crazy pipeline could have v0 anywhere. +func (o *cursorOffsetNext) processV1OuterMessage( + fp *FetchPartition, + message *kmsg.MessageV1, + decompressor *decompressor, +) (int, int) { + compression := byte(message.Attributes & 0x0003) + if compression == 0 { + o.processV1Message(fp, message) + return 1, 0 + } + + rawInner, err := decompressor.decompress(message.Value, compression) + if err != nil { + fp.Err = &errDecompress{err} + return 0, 0 // truncated batch + } + + uncompressedBytes := len(rawInner) + + var innerMessages []readerFrom +out: + for len(rawInner) > 17 { // magic at byte 17 + length := int32(binary.BigEndian.Uint32(rawInner[8:])) + length += 12 // offset and length fields + if len(rawInner) < int(length) { + break + } + + var ( + magic = rawInner[16] + + msg readerFrom + lengthField *int32 + crcField *int32 + ) + + switch magic { + case 0: + m := new(kmsg.MessageV0) + msg = m + lengthField = &m.MessageSize + crcField = &m.CRC + case 1: + m := new(kmsg.MessageV1) + msg = m + lengthField = &m.MessageSize + crcField = &m.CRC + + default: + fp.Err = fmt.Errorf("message set v1 has inner message with invalid magic %d", magic) + break out + } + + if err := msg.ReadFrom(rawInner[:length]); err != nil { + fp.Err = fmt.Errorf("unable to read message v%d, not enough data", magic) + break + } + if length := int32(len(rawInner[12:length])); length != *lengthField { + fp.Err = fmt.Errorf("encoded length %d does not match read length %d", *lengthField, length) + break + } + if crcCalc := int32(crc32.ChecksumIEEE(rawInner[16:length])); crcCalc != *crcField { + fp.Err = fmt.Errorf("encoded crc %x does not match calculated crc %x", *crcField, crcCalc) + break + } + innerMessages = append(innerMessages, msg) + rawInner = rawInner[length:] + } + if len(innerMessages) == 0 { + return 0, uncompressedBytes + } + + firstOffset := message.Offset - int64(len(innerMessages)) + 1 + for i := range innerMessages { + innerMessage := innerMessages[i] + switch innerMessage := innerMessage.(type) { + case *kmsg.MessageV0: + innerMessage.Offset = firstOffset + int64(i) + innerMessage.Attributes |= int8(compression) + if !o.processV0Message(fp, innerMessage) { + return i, uncompressedBytes + } + case *kmsg.MessageV1: + innerMessage.Offset = firstOffset + int64(i) + innerMessage.Attributes |= int8(compression) + if !o.processV1Message(fp, innerMessage) { + return i, uncompressedBytes + } + } + } + return len(innerMessages), uncompressedBytes +} + +func (o *cursorOffsetNext) processV1Message( + fp *FetchPartition, + message *kmsg.MessageV1, +) bool { + if message.Magic != 1 { + fp.Err = fmt.Errorf("unknown message magic %d", message.Magic) + return false + } + if uint8(message.Attributes)&0b1111_0000 != 0 { + fp.Err = fmt.Errorf("unknown attributes on message %d", message.Attributes) + return false + } + record := v1MessageToRecord(o.from.topic, fp.Partition, message) + o.maybeKeepRecord(fp, record, false) + return true +} + +// Processes an outer v0 message. We expect inner messages to be entirely v0 as +// well, so this only tries v0 always. +func (o *cursorOffsetNext) processV0OuterMessage( + fp *FetchPartition, + message *kmsg.MessageV0, + decompressor *decompressor, +) (int, int) { + compression := byte(message.Attributes & 0x0003) + if compression == 0 { + o.processV0Message(fp, message) + return 1, 0 // uncompressed bytes is 0; set to compressed bytes on return + } + + rawInner, err := decompressor.decompress(message.Value, compression) + if err != nil { + fp.Err = &errDecompress{err} + return 0, 0 // truncated batch + } + + uncompressedBytes := len(rawInner) + + var innerMessages []kmsg.MessageV0 + for len(rawInner) > 17 { // magic at byte 17 + length := int32(binary.BigEndian.Uint32(rawInner[8:])) + length += 12 // offset and length fields + if len(rawInner) < int(length) { + break // truncated batch + } + var m kmsg.MessageV0 + if err := m.ReadFrom(rawInner[:length]); err != nil { + fp.Err = fmt.Errorf("unable to read message v0, not enough data") + break + } + if length := int32(len(rawInner[12:length])); length != m.MessageSize { + fp.Err = fmt.Errorf("encoded length %d does not match read length %d", m.MessageSize, length) + break + } + if crcCalc := int32(crc32.ChecksumIEEE(rawInner[16:length])); crcCalc != m.CRC { + fp.Err = fmt.Errorf("encoded crc %x does not match calculated crc %x", m.CRC, crcCalc) + break + } + innerMessages = append(innerMessages, m) + rawInner = rawInner[length:] + } + if len(innerMessages) == 0 { + return 0, uncompressedBytes + } + + firstOffset := message.Offset - int64(len(innerMessages)) + 1 + for i := range innerMessages { + innerMessage := &innerMessages[i] + innerMessage.Attributes |= int8(compression) + innerMessage.Offset = firstOffset + int64(i) + if !o.processV0Message(fp, innerMessage) { + return i, uncompressedBytes + } + } + return len(innerMessages), uncompressedBytes +} + +func (o *cursorOffsetNext) processV0Message( + fp *FetchPartition, + message *kmsg.MessageV0, +) bool { + if message.Magic != 0 { + fp.Err = fmt.Errorf("unknown message magic %d", message.Magic) + return false + } + if uint8(message.Attributes)&0b1111_1000 != 0 { + fp.Err = fmt.Errorf("unknown attributes on message %d", message.Attributes) + return false + } + record := v0MessageToRecord(o.from.topic, fp.Partition, message) + o.maybeKeepRecord(fp, record, false) + return true +} + +// maybeKeepRecord keeps a record if it is within our range of offsets to keep. +// +// If the record is being aborted or the record is a control record and the +// client does not want to keep control records, this does not keep the record. +func (o *cursorOffsetNext) maybeKeepRecord(fp *FetchPartition, record *Record, abort bool) { + if record.Offset < o.offset { + // We asked for offset 5, but that was in the middle of a + // batch; we got offsets 0 thru 4 that we need to skip. + return + } + + // We only keep control records if specifically requested. + if record.Attrs.IsControl() { + abort = !o.from.keepControl + } + if !abort { + fp.Records = append(fp.Records, record) + } + + // The record offset may be much larger than our expected offset if the + // topic is compacted. + o.offset = record.Offset + 1 + o.lastConsumedEpoch = record.LeaderEpoch + o.lastConsumedTime = record.Timestamp +} + +/////////////////////////////// +// kmsg.Record to kgo.Record // +/////////////////////////////// + +func timeFromMillis(millis int64) time.Time { + return time.Unix(0, millis*1e6) +} + +// recordToRecord converts a kmsg.RecordBatch's Record to a kgo Record. +func recordToRecord( + topic string, + partition int32, + batch *kmsg.RecordBatch, + record *kmsg.Record, +) *Record { + h := make([]RecordHeader, 0, len(record.Headers)) + for _, kv := range record.Headers { + h = append(h, RecordHeader{ + Key: kv.Key, + Value: kv.Value, + }) + } + + r := &Record{ + Key: record.Key, + Value: record.Value, + Headers: h, + Topic: topic, + Partition: partition, + Attrs: RecordAttrs{uint8(batch.Attributes)}, + ProducerID: batch.ProducerID, + ProducerEpoch: batch.ProducerEpoch, + LeaderEpoch: batch.PartitionLeaderEpoch, + } + if batch.FirstOffset == -1 { + r.Offset = -1 + } else { + r.Offset = batch.FirstOffset + int64(record.OffsetDelta) + } + if r.Attrs.TimestampType() == 0 { + r.Timestamp = timeFromMillis(batch.FirstTimestamp + record.TimestampDelta64) + } else { + r.Timestamp = timeFromMillis(batch.MaxTimestamp) + } + return r +} + +func messageAttrsToRecordAttrs(attrs int8, v0 bool) RecordAttrs { + uattrs := uint8(attrs) + if v0 { + uattrs |= 0b1000_0000 + } + return RecordAttrs{uattrs} +} + +func v0MessageToRecord( + topic string, + partition int32, + message *kmsg.MessageV0, +) *Record { + return &Record{ + Key: message.Key, + Value: message.Value, + Topic: topic, + Partition: partition, + Attrs: messageAttrsToRecordAttrs(message.Attributes, true), + ProducerID: -1, + ProducerEpoch: -1, + LeaderEpoch: -1, + Offset: message.Offset, + } +} + +func v1MessageToRecord( + topic string, + partition int32, + message *kmsg.MessageV1, +) *Record { + return &Record{ + Key: message.Key, + Value: message.Value, + Timestamp: timeFromMillis(message.Timestamp), + Topic: topic, + Partition: partition, + Attrs: messageAttrsToRecordAttrs(message.Attributes, false), + ProducerID: -1, + ProducerEpoch: -1, + LeaderEpoch: -1, + Offset: message.Offset, + } +} + +////////////////// +// fetchRequest // +////////////////// + +type fetchRequest struct { + version int16 + maxWait int32 + minBytes int32 + maxBytes int32 + maxPartBytes int32 + rack string + + isolationLevel int8 + preferLagFn PreferLagFn + + numOffsets int + usedOffsets usedOffsets + + torder []string // order of topics to write + porder map[string][]int32 // per topic, order of partitions to write + + // topic2id and id2topic track bidirectional lookup of topics and IDs + // that are being added to *this* specific request. topic2id slightly + // duplicates the map t2id in the fetch session, but t2id is different + // in that t2id tracks IDs in use from all prior requests -- and, + // importantly, t2id is cleared of IDs that are no longer used (see + // ForgottenTopics). + // + // We need to have both a session t2id map and a request t2id map: + // + // * The session t2id is what we use when creating forgotten topics. + // If we are forgetting a topic, the ID is not in the req t2id. + // + // * The req topic2id is used for adding to the session t2id. When + // building a request, if the id is in req.topic2id but not + // session.t2id, we promote the ID into the session map. + // + // Lastly, id2topic is used when handling the response, as our reverse + // lookup from the ID back to the topic (and then we work with the + // topic name only). There is no equivalent in the session because + // there is no need for the id2topic lookup ever in the session. + topic2id map[string][16]byte + id2topic map[[16]byte]string + + disableIDs bool // #295: using an old IBP on new Kafka results in ApiVersions advertising 13+ while the broker does not return IDs + + // Session is a copy of the source session at the time a request is + // built. If the source is reset, the session it has is reset at the + // field level only. Our view of the original session is still valid. + session fetchSession +} + +func (f *fetchRequest) addCursor(c *cursor) { + if f.usedOffsets == nil { + f.usedOffsets = make(usedOffsets) + f.id2topic = make(map[[16]byte]string) + f.topic2id = make(map[string][16]byte) + f.porder = make(map[string][]int32) + } + partitions := f.usedOffsets[c.topic] + if partitions == nil { + partitions = make(map[int32]*cursorOffsetNext) + f.usedOffsets[c.topic] = partitions + f.id2topic[c.topicID] = c.topic + f.topic2id[c.topic] = c.topicID + var noID [16]byte + if c.topicID == noID { + f.disableIDs = true + } + f.torder = append(f.torder, c.topic) + } + partitions[c.partition] = c.use() + f.porder[c.topic] = append(f.porder[c.topic], c.partition) + f.numOffsets++ +} + +// PreferLagFn accepts topic and partition lag, the previously determined topic +// order, and the previously determined per-topic partition order, and returns +// a new topic and per-topic partition order. +// +// Most use cases will not need to look at the prior orders, but they exist if +// you want to get fancy. +// +// You can return partial results: if you only return topics, partitions within +// each topic keep their prior ordering. If you only return some topics but not +// all, the topics you do not return / the partitions you do not return will +// retain their original ordering *after* your given ordering. +// +// NOTE: torderPrior and porderPrior must not be modified. To avoid a bit of +// unnecessary allocations, these arguments are views into data that is used to +// build a fetch request. +type PreferLagFn func(lag map[string]map[int32]int64, torderPrior []string, porderPrior map[string][]int32) ([]string, map[string][]int32) + +// PreferLagAt is a simple PreferLagFn that orders the largest lag first, for +// any topic that is collectively lagging more than preferLagAt, and for any +// partition that is lagging more than preferLagAt. +// +// The function does not prescribe any ordering for topics that have the same +// lag. It is recommended to use a number more than 0 or 1: if you use 0, you +// may just always undo client ordering when there is no actual lag. +func PreferLagAt(preferLagAt int64) PreferLagFn { + if preferLagAt < 0 { + return nil + } + return func(lag map[string]map[int32]int64, _ []string, _ map[string][]int32) ([]string, map[string][]int32) { + type plag struct { + p int32 + lag int64 + } + type tlag struct { + t string + lag int64 + ps []plag + } + + // First, collect all partition lag into per-topic lag. + tlags := make(map[string]tlag, len(lag)) + for t, ps := range lag { + for p, lag := range ps { + prior := tlags[t] + tlags[t] = tlag{ + t: t, + lag: prior.lag + lag, + ps: append(prior.ps, plag{p, lag}), + } + } + } + + // We now remove topics and partitions that are not lagging + // enough. Collectively, the topic could be lagging too much, + // but individually, no partition is lagging that much: we will + // sort the topic first and keep the old partition ordering. + for t, tlag := range tlags { + if tlag.lag < preferLagAt { + delete(tlags, t) + continue + } + for i := 0; i < len(tlag.ps); i++ { + plag := tlag.ps[i] + if plag.lag < preferLagAt { + tlag.ps[i] = tlag.ps[len(tlag.ps)-1] + tlag.ps = tlag.ps[:len(tlag.ps)-1] + i-- + } + } + } + if len(tlags) == 0 { + return nil, nil + } + + var sortedLags []tlag + for _, tlag := range tlags { + sort.Slice(tlag.ps, func(i, j int) bool { return tlag.ps[i].lag > tlag.ps[j].lag }) + sortedLags = append(sortedLags, tlag) + } + sort.Slice(sortedLags, func(i, j int) bool { return sortedLags[i].lag > sortedLags[j].lag }) + + // We now return our laggy topics and partitions, and let the + // caller add back any missing topics / partitions in their + // prior order. + torder := make([]string, 0, len(sortedLags)) + for _, t := range sortedLags { + torder = append(torder, t.t) + } + porder := make(map[string][]int32, len(sortedLags)) + for _, tlag := range sortedLags { + ps := make([]int32, 0, len(tlag.ps)) + for _, p := range tlag.ps { + ps = append(ps, p.p) + } + porder[tlag.t] = ps + } + return torder, porder + } +} + +// If the end user prefers to consume lag, we reorder our previously ordered +// partitions, preferring first the laggiest topics, and then within those, the +// laggiest partitions. +func (f *fetchRequest) adjustPreferringLag() { + if f.preferLagFn == nil { + return + } + + tall := make(map[string]struct{}, len(f.torder)) + for _, t := range f.torder { + tall[t] = struct{}{} + } + pall := make(map[string][]int32, len(f.porder)) + for t, ps := range f.porder { + pall[t] = append([]int32(nil), ps...) + } + + lag := make(map[string]map[int32]int64, len(f.torder)) + for t, ps := range f.usedOffsets { + plag := make(map[int32]int64, len(ps)) + lag[t] = plag + for p, c := range ps { + hwm := c.hwm + if c.hwm < 0 { + hwm = 0 + } + lag := hwm - c.offset + if c.offset <= 0 { + lag = hwm + } + if lag < 0 { + lag = 0 + } + plag[p] = lag + } + } + + torder, porder := f.preferLagFn(lag, f.torder, f.porder) + if torder == nil && porder == nil { + return + } + defer func() { f.torder, f.porder = torder, porder }() + + if len(torder) == 0 { + torder = f.torder // user did not modify topic order, keep old order + } else { + // Remove any extra topics the user returned that we were not + // consuming, and add all topics they did not give back. + for i := 0; i < len(torder); i++ { + t := torder[i] + if _, exists := tall[t]; !exists { + torder = append(torder[:i], torder[i+1:]...) // user gave topic we were not fetching + i-- + } + delete(tall, t) + } + for _, t := range f.torder { + if _, exists := tall[t]; exists { + torder = append(torder, t) // user did not return topic we were fetching + delete(tall, t) + } + } + } + + if len(porder) == 0 { + porder = f.porder // user did not modify partition order, keep old order + return + } + + pused := make(map[int32]struct{}) + for t, ps := range pall { + order, exists := porder[t] + if !exists { + porder[t] = ps // shortcut: user did not define this partition's oorder, keep old order + continue + } + for _, p := range ps { + pused[p] = struct{}{} + } + for i := 0; i < len(order); i++ { + p := order[i] + if _, exists := pused[p]; !exists { + order = append(order[:i], order[i+1:]...) + i-- + } + delete(pused, p) + } + for _, p := range f.porder[t] { + if _, exists := pused[p]; exists { + order = append(order, p) + delete(pused, p) + } + } + porder[t] = order + } +} + +func (*fetchRequest) Key() int16 { return 1 } +func (f *fetchRequest) MaxVersion() int16 { + if f.disableIDs || f.session.disableIDs { + return 12 + } + return 16 +} +func (f *fetchRequest) SetVersion(v int16) { f.version = v } +func (f *fetchRequest) GetVersion() int16 { return f.version } +func (f *fetchRequest) IsFlexible() bool { return f.version >= 12 } // version 12+ is flexible +func (f *fetchRequest) AppendTo(dst []byte) []byte { + req := kmsg.NewFetchRequest() + req.Version = f.version + req.ReplicaID = -1 + req.MaxWaitMillis = f.maxWait + req.MinBytes = f.minBytes + req.MaxBytes = f.maxBytes + req.IsolationLevel = f.isolationLevel + req.SessionID = f.session.id + req.SessionEpoch = f.session.epoch + req.Rack = f.rack + + // We track which partitions we add in this request; any partitions + // missing that are already in the session get added to forgotten + // topics at the end. + var sessionUsed map[string]map[int32]struct{} + if !f.session.killed { + sessionUsed = make(map[string]map[int32]struct{}, len(f.usedOffsets)) + } + + f.adjustPreferringLag() + + for _, topic := range f.torder { + partitions := f.usedOffsets[topic] + + var reqTopic *kmsg.FetchRequestTopic + sessionTopic := f.session.lookupTopic(topic, f.topic2id) + + var usedTopic map[int32]struct{} + if sessionUsed != nil { + usedTopic = make(map[int32]struct{}, len(partitions)) + } + + for _, partition := range f.porder[topic] { + cursorOffsetNext := partitions[partition] + + if usedTopic != nil { + usedTopic[partition] = struct{}{} + } + + if !sessionTopic.hasPartitionAt( + partition, + cursorOffsetNext.offset, + cursorOffsetNext.currentLeaderEpoch, + ) { + if reqTopic == nil { + t := kmsg.NewFetchRequestTopic() + t.Topic = topic + t.TopicID = f.topic2id[topic] + req.Topics = append(req.Topics, t) + reqTopic = &req.Topics[len(req.Topics)-1] + } + + reqPartition := kmsg.NewFetchRequestTopicPartition() + reqPartition.Partition = partition + reqPartition.CurrentLeaderEpoch = cursorOffsetNext.currentLeaderEpoch + reqPartition.FetchOffset = cursorOffsetNext.offset + reqPartition.LastFetchedEpoch = -1 + reqPartition.LogStartOffset = -1 + reqPartition.PartitionMaxBytes = f.maxPartBytes + reqTopic.Partitions = append(reqTopic.Partitions, reqPartition) + } + } + + if sessionUsed != nil { + sessionUsed[topic] = usedTopic + } + } + + // Now for everything that we did not use in our session, add it to + // forgotten topics and remove it from the session. + if sessionUsed != nil { + for topic, partitions := range f.session.used { + var forgottenTopic *kmsg.FetchRequestForgottenTopic + topicUsed := sessionUsed[topic] + for partition := range partitions { + if topicUsed != nil { + if _, partitionUsed := topicUsed[partition]; partitionUsed { + continue + } + } + if forgottenTopic == nil { + t := kmsg.NewFetchRequestForgottenTopic() + t.Topic = topic + t.TopicID = f.session.t2id[topic] + req.ForgottenTopics = append(req.ForgottenTopics, t) + forgottenTopic = &req.ForgottenTopics[len(req.ForgottenTopics)-1] + } + forgottenTopic.Partitions = append(forgottenTopic.Partitions, partition) + delete(partitions, partition) + } + if len(partitions) == 0 { + delete(f.session.used, topic) + id := f.session.t2id[topic] + delete(f.session.t2id, topic) + // If we deleted a topic that was missing an ID, then we clear the + // previous disableIDs state. We potentially *reenable* disableIDs + // if any remaining topics in our session are also missing their ID. + var noID [16]byte + if id == noID { + f.session.disableIDs = false + for _, id := range f.session.t2id { + if id == noID { + f.session.disableIDs = true + break + } + } + } + } + } + } + + return req.AppendTo(dst) +} + +func (*fetchRequest) ReadFrom([]byte) error { + panic("unreachable -- the client never uses ReadFrom on its internal fetchRequest") +} + +func (f *fetchRequest) ResponseKind() kmsg.Response { + r := kmsg.NewPtrFetchResponse() + r.Version = f.version + return r +} + +// fetchSessions, introduced in KIP-227, allow us to send less information back +// and forth to a Kafka broker. +type fetchSession struct { + id int32 + epoch int32 + + used map[string]map[int32]fetchSessionOffsetEpoch // what we have in the session so far + t2id map[string][16]byte + + disableIDs bool // if anything in t2id has no ID + killed bool // if we cannot use a session anymore +} + +func (s *fetchSession) kill() { + s.epoch = -1 + s.used = nil + s.t2id = nil + s.disableIDs = false + s.killed = true +} + +// reset resets the session by setting the next request to use epoch 0. +// We do not reset the ID; using epoch 0 for an existing ID unregisters the +// prior session. +func (s *fetchSession) reset() { + if s.killed { + return + } + s.epoch = 0 + s.used = nil + s.t2id = nil + s.disableIDs = false +} + +// bumpEpoch bumps the epoch and saves the session id. +// +// Kafka replies with the session ID of the session to use. When it does, we +// start from epoch 1, wrapping back to 1 if we go negative. +func (s *fetchSession) bumpEpoch(id int32) { + if s.killed { + return + } + if id != s.id { + s.epoch = 0 // new session: reset to 0 for the increment below + } + s.epoch++ + if s.epoch < 0 { + s.epoch = 1 // we wrapped: reset back to 1 to continue this session + } + s.id = id +} + +func (s *fetchSession) lookupTopic(topic string, t2id map[string][16]byte) fetchSessionTopic { + if s.killed { + return nil + } + if s.used == nil { + s.used = make(map[string]map[int32]fetchSessionOffsetEpoch) + s.t2id = make(map[string][16]byte) + } + t := s.used[topic] + if t == nil { + t = make(map[int32]fetchSessionOffsetEpoch) + s.used[topic] = t + id := t2id[topic] + s.t2id[topic] = id + if id == ([16]byte{}) { + s.disableIDs = true + } + } + return t +} + +type fetchSessionOffsetEpoch struct { + offset int64 + epoch int32 +} + +type fetchSessionTopic map[int32]fetchSessionOffsetEpoch + +func (s fetchSessionTopic) hasPartitionAt(partition int32, offset int64, epoch int32) bool { + if s == nil { // if we are nil, the session was killed + return false + } + at, exists := s[partition] + now := fetchSessionOffsetEpoch{offset, epoch} + s[partition] = now + return exists && at == now +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/strftime.go b/vendor/github.com/twmb/franz-go/pkg/kgo/strftime.go new file mode 100644 index 00000000000..6ff862fbf50 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/strftime.go @@ -0,0 +1,205 @@ +package kgo + +import ( + "strconv" + "time" +) + +// NOTE: this code is copied from github.com/twmb/go-strftime, with AppendFormat +// being unexported. + +// appendFormat appends t to dst according to the input strftime format. +// +// this does not take into account locale; some high level differences: +// +// %E and %O are stripped, as well as a single subsequent alpha char +// %x is DD/MM/YY +// %c is time.ANSIC +// +// In normal strftime, %a, %A, %b, %B, %c, %p, %P, %r, %x, and %X are all +// affected by locale. This package hardcodes the implementation to mirror +// LC_TIME=C (minus %x). Every strftime(3) formatter is accounted for. +func strftimeAppendFormat(dst []byte, format string, t time.Time) []byte { + for i := 0; i < len(format); i++ { + c := format[i] + if c != '%' || i == len(format)-1 { + dst = append(dst, c) + continue + } + + i++ + c = format[i] + switch c { + default: + dst = append(dst, '%', c) + case 'a': // abbrev day + dst = t.AppendFormat(dst, "Mon") + case 'A': // full day + dst = t.AppendFormat(dst, "Monday") + case 'b', 'h': // abbrev month, h is equivalent to b + dst = t.AppendFormat(dst, "Jan") + case 'B': // full month + dst = t.AppendFormat(dst, "January") + case 'c': // preferred date and time representation + dst = t.AppendFormat(dst, time.ANSIC) + case 'C': // century (year/100) as two digit num + dst = append0Pad(dst, t.Year()/100, 2) + case 'd': // day of month as two digit num + dst = append0Pad(dst, t.Day(), 2) + case 'D': // %m/%d/%y + dst = append0Pad(dst, int(t.Month()), 2) + dst = append(dst, '/') + dst = append0Pad(dst, t.Day(), 2) + dst = append(dst, '/') + dst = append0Pad(dst, t.Year()%100, 2) + case 'e': // day of month as num like %d, but leading 0 is space instead + dst = appendSpacePad(dst, t.Day()) + case 'E', 'O': // modifier, ignored and skip next (if ascii) + if i+1 < len(format) { + next := format[i+1] + if 'a' <= next && next <= 'z' || 'A' <= next && next <= 'Z' { + i++ + } + } + case 'F': // %Y-%m-%d (iso8601) + dst = strconv.AppendInt(dst, int64(t.Year()), 10) + dst = append(dst, '-') + dst = append0Pad(dst, int(t.Month()), 2) + dst = append(dst, '-') + dst = append0Pad(dst, t.Day(), 2) + case 'G': // iso8601 week-based year + year, _ := t.ISOWeek() + dst = append0Pad(dst, year, 4) + case 'g': // like %G, but two digit year (no century) + year, _ := t.ISOWeek() + dst = append0Pad(dst, year%100, 2) + case 'H': // hour as number on 24hr clock + dst = append0Pad(dst, t.Hour(), 2) + case 'I': // hour as number on 12hr clock + dst = append0Pad(dst, t.Hour()%12, 2) + case 'j': // day of year as decimal number + dst = append0Pad(dst, t.YearDay(), 3) + case 'k': // 24hr as number, space padded + dst = appendSpacePad(dst, t.Hour()) + case 'l': // 12hr as number, space padded + dst = appendSpacePad(dst, t.Hour()%12) + case 'm': // month as number + dst = append0Pad(dst, int(t.Month()), 2) + case 'M': // minute as number + dst = append0Pad(dst, t.Minute(), 2) + case 'n': // newline + dst = append(dst, '\n') + case 'p': // AM or PM + dst = appendAMPM(dst, t.Hour()) + case 'P': // like %p buf lowercase + dst = appendampm(dst, t.Hour()) + case 'r': // %I:%M:%S %p + h := t.Hour() + dst = append0Pad(dst, h%12, 2) + dst = append(dst, ':') + dst = append0Pad(dst, t.Minute(), 2) + dst = append(dst, ':') + dst = append0Pad(dst, t.Second(), 2) + dst = append(dst, ' ') + dst = appendAMPM(dst, h) + case 'R': // %H:%M + dst = append0Pad(dst, t.Hour(), 2) + dst = append(dst, ':') + dst = append0Pad(dst, t.Minute(), 2) + case 's': // seconds since epoch + dst = strconv.AppendInt(dst, t.Unix(), 10) + case 'S': // second as number thru 60 for leap second + dst = append0Pad(dst, t.Second(), 2) + case 't': // tab + dst = append(dst, '\t') + case 'T': // %H:%M:%S + dst = append0Pad(dst, t.Hour(), 2) + dst = append(dst, ':') + dst = append0Pad(dst, t.Minute(), 2) + dst = append(dst, ':') + dst = append0Pad(dst, t.Second(), 2) + case 'u': // day of week as num; Monday is 1 + day := byte(t.Weekday()) + if day == 0 { + day = 7 + } + dst = append(dst, '0'+day) + case 'U': // week number of year starting from first Sunday + dst = append0Pad(dst, (t.YearDay()-int(t.Weekday())+7)/7, 2) + case 'V': // iso8601 week number + _, week := t.ISOWeek() + dst = append0Pad(dst, week, 2) + case 'w': // day of week, 0 to 6, Sunday 0 + dst = strconv.AppendInt(dst, int64(t.Weekday()), 10) + case 'W': // week number of year starting from first Monday + dst = append0Pad(dst, (t.YearDay()-(int(t.Weekday())+6)%7+7)/7, 2) + case 'x': // date representation for current locale; we go DD/MM/YY + dst = append0Pad(dst, t.Day(), 2) + dst = append(dst, '/') + dst = append0Pad(dst, int(t.Month()), 2) + dst = append(dst, '/') + dst = append0Pad(dst, t.Year()%100, 2) + case 'X': // time representation for current locale; we go HH:MM:SS + dst = append0Pad(dst, t.Hour(), 2) + dst = append(dst, ':') + dst = append0Pad(dst, t.Minute(), 2) + dst = append(dst, ':') + dst = append0Pad(dst, t.Second(), 2) + case 'y': // year as num without century + dst = append0Pad(dst, t.Year()%100, 2) + case 'Y': // year as a num + dst = append0Pad(dst, t.Year(), 4) + case 'z': // +hhmm or -hhmm offset from utc + dst = t.AppendFormat(dst, "-0700") + case 'Z': // timezone + dst = t.AppendFormat(dst, "MST") + case '+': // date and time in date(1) format + dst = t.AppendFormat(dst, "Mon Jan _2 15:04:05 MST 2006") + case '%': + dst = append(dst, '%') + } + } + return dst +} + +// all space padded numbers are two length +func appendSpacePad(p []byte, n int) []byte { + if n < 10 { + return append(p, ' ', '0'+byte(n)) + } + return strconv.AppendInt(p, int64(n), 10) +} + +func append0Pad(dst []byte, n, size int) []byte { + switch size { + case 4: + if n < 1000 { + dst = append(dst, '0') + } + fallthrough + case 3: + if n < 100 { + dst = append(dst, '0') + } + fallthrough + case 2: + if n < 10 { + dst = append(dst, '0') + } + } + return strconv.AppendInt(dst, int64(n), 10) +} + +func appendampm(p []byte, h int) []byte { + if h < 12 { + return append(p, 'a', 'm') + } + return append(p, 'p', 'm') +} + +func appendAMPM(p []byte, h int) []byte { + if h < 12 { + return append(p, 'A', 'M') + } + return append(p, 'P', 'M') +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/topics_and_partitions.go b/vendor/github.com/twmb/franz-go/pkg/kgo/topics_and_partitions.go new file mode 100644 index 00000000000..3c25284f31d --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/topics_and_partitions.go @@ -0,0 +1,922 @@ +package kgo + +import ( + "fmt" + "sort" + "strings" + "sync" + "sync/atomic" + + "github.com/twmb/franz-go/pkg/kerr" + "github.com/twmb/franz-go/pkg/kmsg" +) + +///////////// +// HELPERS // -- ugly types to eliminate the toil of nil maps and lookups +///////////// + +func dupmsi32(m map[string]int32) map[string]int32 { + d := make(map[string]int32, len(m)) + for t, ps := range m { + d[t] = ps + } + return d +} + +// "Atomic map of topic partitions", for lack of a better name at this point. +type amtps struct { + v atomic.Value +} + +func (a *amtps) read() map[string][]int32 { + v := a.v.Load() + if v == nil { + return nil + } + return v.(map[string][]int32) +} + +func (a *amtps) write(fn func(map[string][]int32)) { + dup := a.clone() + fn(dup) + a.store(dup) +} + +func (a *amtps) clone() map[string][]int32 { + orig := a.read() + dup := make(map[string][]int32, len(orig)) + for t, ps := range orig { + dup[t] = append(dup[t], ps...) + } + return dup +} + +func (a *amtps) store(m map[string][]int32) { a.v.Store(m) } + +type mtps map[string][]int32 + +func (m mtps) String() string { + var sb strings.Builder + var topicsWritten int + ts := make([]string, 0, len(m)) + var ps []int32 + for t := range m { + ts = append(ts, t) + } + sort.Strings(ts) + for _, t := range ts { + ps = append(ps[:0], m[t]...) + sort.Slice(ps, func(i, j int) bool { return ps[i] < ps[j] }) + topicsWritten++ + fmt.Fprintf(&sb, "%s%v", t, ps) + if topicsWritten < len(m) { + sb.WriteString(", ") + } + } + return sb.String() +} + +type mtmps map[string]map[int32]struct{} // map of topics to map of partitions + +func (m *mtmps) add(t string, p int32) { + if *m == nil { + *m = make(mtmps) + } + mps := (*m)[t] + if mps == nil { + mps = make(map[int32]struct{}) + (*m)[t] = mps + } + mps[p] = struct{}{} +} + +func (m *mtmps) addt(t string) { + if *m == nil { + *m = make(mtmps) + } + mps := (*m)[t] + if mps == nil { + mps = make(map[int32]struct{}) + (*m)[t] = mps + } +} + +func (m mtmps) onlyt(t string) bool { + if m == nil { + return false + } + ps, exists := m[t] + return exists && len(ps) == 0 +} + +func (m mtmps) remove(t string, p int32) { + if m == nil { + return + } + mps, exists := m[t] + if !exists { + return + } + delete(mps, p) + if len(mps) == 0 { + delete(m, t) + } +} + +//////////// +// PAUSED // -- types for pausing topics and partitions +//////////// + +type pausedTopics map[string]pausedPartitions + +type pausedPartitions struct { + all bool + m map[int32]struct{} +} + +func (m pausedTopics) t(topic string) (pausedPartitions, bool) { + if len(m) == 0 { // potentially nil + return pausedPartitions{}, false + } + pps, exists := m[topic] + return pps, exists +} + +func (m pausedTopics) has(topic string, partition int32) (paused bool) { + if len(m) == 0 { + return false + } + pps, exists := m[topic] + if !exists { + return false + } + if pps.all { + return true + } + _, exists = pps.m[partition] + return exists +} + +func (m pausedTopics) addTopics(topics ...string) { + for _, topic := range topics { + pps, exists := m[topic] + if !exists { + pps = pausedPartitions{m: make(map[int32]struct{})} + } + pps.all = true + m[topic] = pps + } +} + +func (m pausedTopics) delTopics(topics ...string) { + for _, topic := range topics { + pps, exists := m[topic] + if !exists { + continue + } + pps.all = false + if !pps.all && len(pps.m) == 0 { + delete(m, topic) + } + } +} + +func (m pausedTopics) addPartitions(topicPartitions map[string][]int32) { + for topic, partitions := range topicPartitions { + pps, exists := m[topic] + if !exists { + pps = pausedPartitions{m: make(map[int32]struct{})} + } + for _, partition := range partitions { + pps.m[partition] = struct{}{} + } + m[topic] = pps + } +} + +func (m pausedTopics) delPartitions(topicPartitions map[string][]int32) { + for topic, partitions := range topicPartitions { + pps, exists := m[topic] + if !exists { + continue + } + for _, partition := range partitions { + delete(pps.m, partition) + } + if !pps.all && len(pps.m) == 0 { + delete(m, topic) + } + } +} + +func (m pausedTopics) pausedTopics() []string { + var r []string + for topic, pps := range m { + if pps.all { + r = append(r, topic) + } + } + return r +} + +func (m pausedTopics) pausedPartitions() map[string][]int32 { + r := make(map[string][]int32) + for topic, pps := range m { + ps := make([]int32, 0, len(pps.m)) + for partition := range pps.m { + ps = append(ps, partition) + } + r[topic] = ps + } + return r +} + +func (m pausedTopics) clone() pausedTopics { + dup := make(pausedTopics) + dup.addTopics(m.pausedTopics()...) + dup.addPartitions(m.pausedPartitions()) + return dup +} + +////////// +// GUTS // -- the key types for storing important metadata for topics & partitions +////////// + +func newTopicPartitions() *topicPartitions { + parts := new(topicPartitions) + parts.v.Store(new(topicPartitionsData)) + return parts +} + +// Contains all information about a topic's partitions. +type topicPartitions struct { + v atomic.Value // *topicPartitionsData + + partsMu sync.Mutex + partitioner TopicPartitioner + lb *leastBackupInput // for partitioning if the partitioner is a LoadTopicPartitioner +} + +func (t *topicPartitions) load() *topicPartitionsData { return t.v.Load().(*topicPartitionsData) } + +func newTopicsPartitions() *topicsPartitions { + var t topicsPartitions + t.v.Store(make(topicsPartitionsData)) + return &t +} + +// A helper type mapping topics to their partitions; +// this is the inner value of topicPartitions.v. +type topicsPartitionsData map[string]*topicPartitions + +func (d topicsPartitionsData) hasTopic(t string) bool { _, exists := d[t]; return exists } +func (d topicsPartitionsData) loadTopic(t string) *topicPartitionsData { + tp, exists := d[t] + if !exists { + return nil + } + return tp.load() +} + +// A helper type mapping topics to their partitions that can be updated +// atomically. +type topicsPartitions struct { + v atomic.Value // topicsPartitionsData (map[string]*topicPartitions) +} + +func (t *topicsPartitions) load() topicsPartitionsData { + if t == nil { + return nil + } + return t.v.Load().(topicsPartitionsData) +} +func (t *topicsPartitions) storeData(d topicsPartitionsData) { t.v.Store(d) } +func (t *topicsPartitions) storeTopics(topics []string) { t.v.Store(t.ensureTopics(topics)) } +func (t *topicsPartitions) clone() topicsPartitionsData { + current := t.load() + clone := make(map[string]*topicPartitions, len(current)) + for k, v := range current { + clone[k] = v + } + return clone +} + +// Ensures that the topics exist in the returned map, but does not store the +// update. This can be used to update the data and store later, rather than +// storing immediately. +func (t *topicsPartitions) ensureTopics(topics []string) topicsPartitionsData { + var cloned bool + current := t.load() + for _, topic := range topics { + if _, exists := current[topic]; !exists { + if !cloned { + current = t.clone() + cloned = true + } + current[topic] = newTopicPartitions() + } + } + return current +} + +// Opposite of ensureTopics, this purges the input topics and *does* store. +func (t *topicsPartitions) purgeTopics(topics []string) { + var cloned bool + current := t.load() + for _, topic := range topics { + if _, exists := current[topic]; exists { + if !cloned { + current = t.clone() + cloned = true + } + delete(current, topic) + } + } + if cloned { + t.storeData(current) + } +} + +// Updates the topic partitions data atomic value. +// +// If this is the first time seeing partitions, we do processing of unknown +// partitions that may be buffered for producing. +func (cl *Client) storePartitionsUpdate(topic string, l *topicPartitions, lv *topicPartitionsData, hadPartitions bool) { + // If the topic already had partitions, then there would be no + // unknown topic waiting and we do not need to notify anything. + if hadPartitions { + l.v.Store(lv) + return + } + + p := &cl.producer + + p.unknownTopicsMu.Lock() + defer p.unknownTopicsMu.Unlock() + + // If the topic did not have partitions, then we need to store the + // partition update BEFORE unlocking the mutex to guard against this + // sequence of events: + // + // - unlock waiters + // - delete waiter + // - new produce recreates waiter + // - we store update + // - we never notify the recreated waiter + // + // By storing before releasing the locks, we ensure that later + // partition loads for this topic under the mu will see our update. + defer l.v.Store(lv) + + // If there are no unknown topics or this topic is not unknown, then we + // have nothing to do. + if len(p.unknownTopics) == 0 { + return + } + unknown, exists := p.unknownTopics[topic] + if !exists { + return + } + + // If we loaded no partitions because of a retryable error, we signal + // the waiting goroutine that a try happened. It is possible the + // goroutine is quitting and will not be draining unknownWait, so we do + // not require the send. + if len(lv.partitions) == 0 && kerr.IsRetriable(lv.loadErr) { + select { + case unknown.wait <- lv.loadErr: + default: + } + return + } + + // Either we have a fatal error or we can successfully partition. + // + // Even with a fatal error, if we loaded any partitions, we partition. + // If we only had a fatal error, we can finish promises in a goroutine. + // If we are partitioning, we have to do it under the unknownMu to + // ensure prior buffered records are produced in order before we + // release the mu. + delete(p.unknownTopics, topic) + close(unknown.wait) // allow waiting goroutine to quit + + if len(lv.partitions) == 0 { + cl.producer.promiseBatch(batchPromise{ + recs: unknown.buffered, + err: lv.loadErr, + }) + } else { + for _, pr := range unknown.buffered { + cl.doPartitionRecord(l, lv, pr) + } + } +} + +// If a metadata request fails after retrying (internally retrying, so only a +// few times), or the metadata request does not return topics that we requested +// (which may also happen additionally consuming via regex), then we need to +// bump errors for topics that were previously loaded, and bump errors for +// topics awaiting load. +// +// This has two modes of operation: +// +// 1. if no topics were missing, then the metadata request failed outright, +// and we need to bump errors on all stored topics and unknown topics. +// +// 2. if topics were missing, then the metadata request was successful but +// had missing data, and we need to bump errors on only what was mising. +func (cl *Client) bumpMetadataFailForTopics(requested map[string]*topicPartitions, err error, missingTopics ...string) { + p := &cl.producer + + // mode 1 + if len(missingTopics) == 0 { + for _, topic := range requested { + for _, topicPartition := range topic.load().partitions { + topicPartition.records.bumpRepeatedLoadErr(err) + } + } + } + + // mode 2 + var missing map[string]bool + for _, failTopic := range missingTopics { + if missing == nil { + missing = make(map[string]bool, len(missingTopics)) + } + missing[failTopic] = true + + if topic, exists := requested[failTopic]; exists { + for _, topicPartition := range topic.load().partitions { + topicPartition.records.bumpRepeatedLoadErr(err) + } + } + } + + p.unknownTopicsMu.Lock() + defer p.unknownTopicsMu.Unlock() + + for topic, unknown := range p.unknownTopics { + // if nil, mode 1 (req err), else mode 2 (missing resp) + if missing != nil && !missing[topic] { + continue + } + + select { + case unknown.wait <- err: + default: + } + } +} + +// topicPartitionsData is the data behind a topicPartitions' v. +// +// We keep this in an atomic because it is expected to be extremely read heavy, +// and if it were behind a lock, the lock would need to be held for a while. +type topicPartitionsData struct { + // NOTE if adding anything to this struct, be sure to fix meta merge. + loadErr error // could be auth, unknown, leader not avail, or creation err + isInternal bool + partitions []*topicPartition // partition num => partition + writablePartitions []*topicPartition // subset of above + topic string + when int64 +} + +// topicPartition contains all information from Kafka for a topic's partition, +// as well as what a client is producing to it or info about consuming from it. +type topicPartition struct { + // If we have a load error (leader/listener/replica not available), we + // keep the old topicPartition data and the new error. + loadErr error + + // If, on metadata refresh, the leader epoch for this partition goes + // backwards, we ignore the metadata refresh and signal the metadata + // should be reloaded: the broker we requested is stale. However, the + // broker could get into a bad state through some weird cluster failure + // scenarios. If we see the epoch rewind repeatedly, we eventually keep + // the metadata refresh. This is not detrimental and at worst will lead + // to the broker telling us to update our metadata. + epochRewinds uint8 + + // If we do not have a load error, we determine if the new + // topicPartition is the same or different from the old based on + // whether the data changed (leader or leader epoch, etc.). + topicPartitionData + + // If we do not have a load error, we copy the records and cursor + // pointers from the old after updating any necessary fields in them + // (see migrate functions below). + // + // Only one of records or cursor is non-nil. + records *recBuf + cursor *cursor +} + +func (tp *topicPartition) partition() int32 { + if tp.records != nil { + return tp.records.partition + } + return tp.cursor.partition +} + +// Contains stuff that changes on metadata update that we copy into a cursor or +// recBuf. +type topicPartitionData struct { + // Our leader; if metadata sees this change, the metadata update + // migrates the cursor to a different source with the session stopped, + // and the recBuf to a different sink under a tight mutex. + leader int32 + + // What we believe to be the epoch of the leader for this partition. + // + // For cursors, for KIP-320, if a broker receives a fetch request where + // the current leader epoch does not match the brokers, either the + // broker is behind and returns UnknownLeaderEpoch, or we are behind + // and the broker returns FencedLeaderEpoch. For the former, we back + // off and retry. For the latter, we update our metadata. + leaderEpoch int32 +} + +// migrateProductionTo is called on metadata update if a topic partition's sink +// has changed. This moves record production from one sink to the other; this +// must be done such that records produced during migration follow those +// already buffered. +func (old *topicPartition) migrateProductionTo(new *topicPartition) { //nolint:revive // old/new naming makes this clearer + // First, remove our record buffer from the old sink. + old.records.sink.removeRecBuf(old.records) + + // Before this next lock, record producing will buffer to the + // in-migration-progress records and may trigger draining to + // the old sink. That is fine, the old sink no longer consumes + // from these records. We just have wasted drain triggers. + + old.records.mu.Lock() // guard setting sink and topic partition data + old.records.sink = new.records.sink + old.records.topicPartitionData = new.topicPartitionData + old.records.mu.Unlock() + + // After the unlock above, record buffering can trigger drains + // on the new sink, which is not yet consuming from these + // records. Again, just more wasted drain triggers. + + old.records.sink.addRecBuf(old.records) // add our record source to the new sink + + // At this point, the new sink will be draining our records. We lastly + // need to copy the records pointer to our new topicPartition. + new.records = old.records +} + +// migrateCursorTo is called on metadata update if a topic partition's leader +// or leader epoch has changed. +// +// This is a little bit different from above, in that we do this logic only +// after stopping a consumer session. With the consumer session stopped, we +// have fewer concurrency issues to worry about. +func (old *topicPartition) migrateCursorTo( //nolint:revive // old/new naming makes this clearer + new *topicPartition, + css *consumerSessionStopper, +) { + css.stop() + + old.cursor.source.removeCursor(old.cursor) + + // With the session stopped, we can update fields on the old cursor + // with no concurrency issue. + old.cursor.source = new.cursor.source + + // KIP-320: if we had consumed some messages, we need to validate the + // leader epoch on the new broker to see if we experienced data loss + // before we can use this cursor. + // + // Metadata ensures that leaderEpoch is non-negative only if the broker + // supports KIP-320. + if new.leaderEpoch != -1 && old.cursor.lastConsumedEpoch >= 0 { + // Since the cursor consumed messages, it is definitely usable. + // We use it so that the epoch load can finish using it + // properly. + old.cursor.use() + css.reloadOffsets.addLoad(old.cursor.topic, old.cursor.partition, loadTypeEpoch, offsetLoad{ + replica: -1, + Offset: Offset{ + at: old.cursor.offset, + epoch: old.cursor.lastConsumedEpoch, + }, + }) + } + + old.cursor.topicPartitionData = new.topicPartitionData + + old.cursor.source.addCursor(old.cursor) + new.cursor = old.cursor +} + +type kip951move struct { + recBufs map[*recBuf]topicPartitionData + cursors map[*cursor]topicPartitionData + brokers []BrokerMetadata +} + +func (k *kip951move) empty() bool { + return len(k.brokers) == 0 +} + +func (k *kip951move) hasRecBuf(rb *recBuf) bool { + if k == nil || k.recBufs == nil { + return false + } + _, ok := k.recBufs[rb] + return ok +} + +func (k *kip951move) maybeAddProducePartition(resp *kmsg.ProduceResponse, p *kmsg.ProduceResponseTopicPartition, rb *recBuf) bool { + if resp.GetVersion() < 10 || + p.ErrorCode != kerr.NotLeaderForPartition.Code || + len(resp.Brokers) == 0 || + p.CurrentLeader.LeaderID < 0 || + p.CurrentLeader.LeaderEpoch < 0 { + return false + } + if len(k.brokers) == 0 { + for _, rb := range resp.Brokers { + b := BrokerMetadata{ + NodeID: rb.NodeID, + Host: rb.Host, + Port: rb.Port, + Rack: rb.Rack, + } + k.brokers = append(k.brokers, b) + } + } + if k.recBufs == nil { + k.recBufs = make(map[*recBuf]topicPartitionData) + } + k.recBufs[rb] = topicPartitionData{ + leader: p.CurrentLeader.LeaderID, + leaderEpoch: p.CurrentLeader.LeaderEpoch, + } + return true +} + +func (k *kip951move) maybeAddFetchPartition(resp *kmsg.FetchResponse, p *kmsg.FetchResponseTopicPartition, c *cursor) bool { + if resp.GetVersion() < 16 || + p.ErrorCode != kerr.NotLeaderForPartition.Code || + len(resp.Brokers) == 0 || + p.CurrentLeader.LeaderID < 0 || + p.CurrentLeader.LeaderEpoch < 0 { + return false + } + + if len(k.brokers) == 0 { + for _, rb := range resp.Brokers { + b := BrokerMetadata{ + NodeID: rb.NodeID, + Host: rb.Host, + Port: rb.Port, + Rack: rb.Rack, + } + k.brokers = append(k.brokers, b) + } + } + if k.cursors == nil { + k.cursors = make(map[*cursor]topicPartitionData) + } + k.cursors[c] = topicPartitionData{ + leader: p.CurrentLeader.LeaderID, + leaderEpoch: p.CurrentLeader.LeaderEpoch, + } + return true +} + +func (k *kip951move) ensureSinksAndSources(cl *Client) { + cl.sinksAndSourcesMu.Lock() + defer cl.sinksAndSourcesMu.Unlock() + + ensure := func(leader int32) { + if _, exists := cl.sinksAndSources[leader]; exists { + return + } + cl.sinksAndSources[leader] = sinkAndSource{ + sink: cl.newSink(leader), + source: cl.newSource(leader), + } + } + + for _, td := range k.recBufs { + ensure(td.leader) + } + for _, td := range k.cursors { + ensure(td.leader) + } +} + +func (k *kip951move) ensureBrokers(cl *Client) { + if len(k.brokers) == 0 { + return + } + + kbs := make([]kmsg.MetadataResponseBroker, 0, len(k.brokers)) + for _, b := range k.brokers { + kbs = append(kbs, kmsg.MetadataResponseBroker{ + NodeID: b.NodeID, + Host: b.Host, + Port: b.Port, + Rack: b.Rack, + }) + } + cl.updateBrokers(kbs) +} + +func (k *kip951move) maybeBeginMove(cl *Client) { + if k.empty() { + return + } + // We want to do the move independent of whatever is calling us, BUT we + // want to ensure it is not concurrent with a metadata request. + go cl.blockingMetadataFn(func() { + k.ensureBrokers(cl) + k.ensureSinksAndSources(cl) + k.doMove(cl) + }) +} + +func (k *kip951move) doMove(cl *Client) { + // Moving partitions is theoretically simple, but the client is written + // in a confusing way around concurrency. + // + // The problem is that topicPartitionsData is read-only after + // initialization. Updates are done via atomic stores of the containing + // topicPartitionsData struct. Moving a single partition requires some + // deep copying. + + // oldNew pairs what NEEDS to be atomically updated (old; left value) + // with the value that WILL be stored (new; right value). + type oldNew struct { + l *topicPartitions + r *topicPartitionsData + } + topics := make(map[string]oldNew) + + // getT returns the oldNew for the topic, performing a shallow clone of + // the old whole-topic struct. + getT := func(m topicsPartitionsData, topic string) (oldNew, bool) { + lr, ok := topics[topic] + if !ok { + l := m[topic] + if l == nil { + return oldNew{}, false + } + dup := *l.load() + r := &dup + r.writablePartitions = append([]*topicPartition{}, r.writablePartitions...) + r.partitions = append([]*topicPartition{}, r.partitions...) + lr = oldNew{l, r} + topics[topic] = lr + } + return lr, true + } + + // modifyP returns the old topicPartition and a new one that will be + // used in migrateTo. The new topicPartition only contains the sink + // and topicPartitionData that will be copied into old under old's + // mutex. The actual migration is done in the migrate function (see + // below). + // + // A migration is not needed if the old value has a higher leader + // epoch. If the leader epoch is equal, we check if the leader is the + // same (this allows easier injection of failures in local testing). A + // higher epoch can come from a concurrent metadata update that + // actually performed the move first. + modifyP := func(d *topicPartitionsData, partition int32, td topicPartitionData) (old, new *topicPartition, modified bool) { + old = d.partitions[partition] + if old.leaderEpoch > td.leaderEpoch { + return nil, nil, false + } + if old.leaderEpoch == td.leaderEpoch && old.leader == td.leader { + return nil, nil, false + } + + cl.sinksAndSourcesMu.Lock() + sns := cl.sinksAndSources[td.leader] + cl.sinksAndSourcesMu.Unlock() + + dup := *old + new = &dup + new.topicPartitionData = topicPartitionData{ + leader: td.leader, + leaderEpoch: td.leaderEpoch, + } + if new.records != nil { + new.records = &recBuf{ + sink: sns.sink, + topicPartitionData: new.topicPartitionData, + } + } else { + new.cursor = &cursor{ + source: sns.source, + topicPartitionData: new.topicPartitionData, + } + } + + // We now have to mirror the new partition back to the topic + // slice that will be atomically stored. + d.partitions[partition] = new + idxWritable := sort.Search(len(d.writablePartitions), func(i int) bool { return d.writablePartitions[i].partition() >= partition }) + if idxWritable < len(d.writablePartitions) && d.writablePartitions[idxWritable].partition() == partition { + if d.writablePartitions[idxWritable] != old { + panic("invalid invariant -- partition in writablePartitions != partition at expected index in partitions") + } + d.writablePartitions[idxWritable] = new + } + + return old, new, true + } + + if k.recBufs != nil { + tpsProducer := cl.producer.topics.load() // must be non-nil, since we have recBufs to move + for recBuf, td := range k.recBufs { + lr, ok := getT(tpsProducer, recBuf.topic) + if !ok { + continue // perhaps concurrently purged + } + old, new, modified := modifyP(lr.r, recBuf.partition, td) + if modified { + cl.cfg.logger.Log(LogLevelInfo, "moving producing partition due to kip-951 not_leader_for_partition", + "topic", recBuf.topic, + "partition", recBuf.partition, + "new_leader", new.leader, + "new_leader_epoch", new.leaderEpoch, + "old_leader", old.leader, + "old_leader_epoch", old.leaderEpoch, + ) + old.migrateProductionTo(new) + } else { + recBuf.clearFailing() + } + } + } else { + var tpsConsumer topicsPartitionsData + c := &cl.consumer + switch { + case c.g != nil: + tpsConsumer = c.g.tps.load() + case c.d != nil: + tpsConsumer = c.d.tps.load() + } + css := &consumerSessionStopper{cl: cl} + defer css.maybeRestart() + for cursor, td := range k.cursors { + lr, ok := getT(tpsConsumer, cursor.topic) + if !ok { + continue // perhaps concurrently purged + } + old, new, modified := modifyP(lr.r, cursor.partition, td) + if modified { + cl.cfg.logger.Log(LogLevelInfo, "moving consuming partition due to kip-951 not_leader_for_partition", + "topic", cursor.topic, + "partition", cursor.partition, + "new_leader", new.leader, + "new_leader_epoch", new.leaderEpoch, + "old_leader", old.leader, + "old_leader_epoch", old.leaderEpoch, + ) + old.migrateCursorTo(new, css) + } + } + } + + // We can always do a simple store. For producing, we *must* have + // had partitions, so this is not updating an unknown topic. + for _, lr := range topics { + lr.l.v.Store(lr.r) + } +} + +// Migrating a cursor requires stopping any consumer session. If we +// stop a session, we need to eventually re-start any offset listing or +// epoch loading that was stopped. Thus, we simply merge what we +// stopped into what we will reload. +type consumerSessionStopper struct { + cl *Client + stopped bool + reloadOffsets listOrEpochLoads + tpsPrior *topicsPartitions +} + +func (css *consumerSessionStopper) stop() { + if css.stopped { + return + } + css.stopped = true + loads, tps := css.cl.consumer.stopSession() + css.reloadOffsets.mergeFrom(loads) + css.tpsPrior = tps +} + +func (css *consumerSessionStopper) maybeRestart() { + if !css.stopped { + return + } + session := css.cl.consumer.startNewSession(css.tpsPrior) + defer session.decWorker() + css.reloadOffsets.loadWithSession(session, "resuming reload offsets after session stopped for cursor migrating in metadata") +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kgo/txn.go b/vendor/github.com/twmb/franz-go/pkg/kgo/txn.go new file mode 100644 index 00000000000..304f28b40aa --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kgo/txn.go @@ -0,0 +1,1276 @@ +package kgo + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + "time" + + "github.com/twmb/franz-go/pkg/kmsg" + + "github.com/twmb/franz-go/pkg/kerr" +) + +func ctx2fn(ctx context.Context) func() context.Context { return func() context.Context { return ctx } } + +// TransactionEndTry is simply a named bool. +type TransactionEndTry bool + +const ( + // TryAbort attempts to end a transaction with an abort. + TryAbort TransactionEndTry = false + + // TryCommit attempts to end a transaction with a commit. + TryCommit TransactionEndTry = true +) + +// GroupTransactSession abstracts away the proper way to begin and end a +// transaction when consuming in a group, modifying records, and producing +// (EOS). +// +// If you are running Kafka 2.5+, it is strongly recommended that you also use +// RequireStableFetchOffsets. See that config option's documentation for more +// details. +type GroupTransactSession struct { + cl *Client + + failMu sync.Mutex + + revoked bool + revokedCh chan struct{} // closed once when revoked is set; reset after End + lost bool + lostCh chan struct{} // closed once when lost is set; reset after End +} + +// NewGroupTransactSession is exactly the same as NewClient, but wraps the +// client's OnPartitionsRevoked / OnPartitionsLost to ensure that transactions +// are correctly aborted whenever necessary so as to properly provide EOS. +// +// When ETLing in a group in a transaction, if a rebalance happens before the +// transaction is ended, you either (a) must block the rebalance from finishing +// until you are done producing, and then commit before unblocking, or (b) +// allow the rebalance to happen, but abort any work you did. +// +// The problem with (a) is that if your ETL work loop is slow, you run the risk +// of exceeding the rebalance timeout and being kicked from the group. You will +// try to commit, and depending on the Kafka version, the commit may even be +// erroneously successful (pre Kafka 2.5). This will lead to duplicates. +// +// Instead, for safety, a GroupTransactSession favors (b). If a rebalance +// occurs at any time before ending a transaction with a commit, this will +// abort the transaction. +// +// This leaves the risk that ending the transaction itself exceeds the +// rebalance timeout, but this is just one request with no cpu logic. With a +// proper rebalance timeout, this single request will not fail and the commit +// will succeed properly. +// +// If this client detects you are talking to a pre-2.5 cluster, OR if you have +// not enabled RequireStableFetchOffsets, the client will sleep for 200ms after +// a successful commit to allow Kafka's txn markers to propagate. This is not +// foolproof in the event of some extremely unlikely communication patterns and +// **potentially** could allow duplicates. See this repo's transaction's doc +// for more details. +func NewGroupTransactSession(opts ...Opt) (*GroupTransactSession, error) { + s := &GroupTransactSession{ + revokedCh: make(chan struct{}), + lostCh: make(chan struct{}), + } + + var noGroup error + + // We append one option, which will get applied last. Because it is + // applied last, we can execute some logic and override some existing + // options. + opts = append(opts, groupOpt{func(cfg *cfg) { + if cfg.group == "" { + cfg.seedBrokers = nil // force a validation error + noGroup = errors.New("missing required group") + return + } + + userRevoked := cfg.onRevoked + cfg.onRevoked = func(ctx context.Context, cl *Client, rev map[string][]int32) { + s.failMu.Lock() + defer s.failMu.Unlock() + if s.revoked { + return + } + + if cl.consumer.g.cooperative.Load() && len(rev) == 0 && !s.revoked { + cl.cfg.logger.Log(LogLevelInfo, "transact session in on_revoke with nothing to revoke; allowing next commit") + } else { + cl.cfg.logger.Log(LogLevelInfo, "transact session in on_revoke; aborting next commit if we are currently in a transaction") + s.revoked = true + close(s.revokedCh) + } + + if userRevoked != nil { + userRevoked(ctx, cl, rev) + } + } + + userLost := cfg.onLost + cfg.onLost = func(ctx context.Context, cl *Client, lost map[string][]int32) { + s.failMu.Lock() + defer s.failMu.Unlock() + if s.lost { + return + } + + cl.cfg.logger.Log(LogLevelInfo, "transact session in on_lost; aborting next commit if we are currently in a transaction") + s.lost = true + close(s.lostCh) + + if userLost != nil { + userLost(ctx, cl, lost) + } else if userRevoked != nil { + userRevoked(ctx, cl, lost) + } + } + }}) + + cl, err := NewClient(opts...) + if err != nil { + if noGroup != nil { + err = noGroup + } + return nil, err + } + s.cl = cl + return s, nil +} + +// Client returns the underlying client that this transact session wraps. This +// can be useful for functions that require a client, such as raw requests. The +// returned client should not be used to manage transactions (leave that to the +// GroupTransactSession). +func (s *GroupTransactSession) Client() *Client { + return s.cl +} + +// Close is a wrapper around Client.Close, with the exact same semantics. +// Refer to that function's documentation. +// +// This function must be called to leave the group before shutting down. +func (s *GroupTransactSession) Close() { + s.cl.Close() +} + +// AllowRebalance is a wrapper around Client.AllowRebalance, with the exact +// same semantics. Refer to that function's documentation. +func (s *GroupTransactSession) AllowRebalance() { + s.cl.AllowRebalance() +} + +// CloseAllowingRebalance is a wrapper around Client.CloseAllowingRebalance, +// with the exact same semantics. Refer to that function's documentation. +func (s *GroupTransactSession) CloseAllowingRebalance() { + s.cl.CloseAllowingRebalance() +} + +// PollFetches is a wrapper around Client.PollFetches, with the exact same +// semantics. Refer to that function's documentation. +// +// It is invalid to call PollFetches concurrently with Begin or End. +func (s *GroupTransactSession) PollFetches(ctx context.Context) Fetches { + return s.cl.PollFetches(ctx) +} + +// PollRecords is a wrapper around Client.PollRecords, with the exact same +// semantics. Refer to that function's documentation. +// +// It is invalid to call PollRecords concurrently with Begin or End. +func (s *GroupTransactSession) PollRecords(ctx context.Context, maxPollRecords int) Fetches { + return s.cl.PollRecords(ctx, maxPollRecords) +} + +// ProduceSync is a wrapper around Client.ProduceSync, with the exact same +// semantics. Refer to that function's documentation. +// +// It is invalid to call ProduceSync concurrently with Begin or End. +func (s *GroupTransactSession) ProduceSync(ctx context.Context, rs ...*Record) ProduceResults { + return s.cl.ProduceSync(ctx, rs...) +} + +// Produce is a wrapper around Client.Produce, with the exact same semantics. +// Refer to that function's documentation. +// +// It is invalid to call Produce concurrently with Begin or End. +func (s *GroupTransactSession) Produce(ctx context.Context, r *Record, promise func(*Record, error)) { + s.cl.Produce(ctx, r, promise) +} + +// TryProduce is a wrapper around Client.TryProduce, with the exact same +// semantics. Refer to that function's documentation. +// +// It is invalid to call TryProduce concurrently with Begin or End. +func (s *GroupTransactSession) TryProduce(ctx context.Context, r *Record, promise func(*Record, error)) { + s.cl.TryProduce(ctx, r, promise) +} + +// Begin begins a transaction, returning an error if the client has no +// transactional id or is already in a transaction. Begin must be called +// before producing records in a transaction. +func (s *GroupTransactSession) Begin() error { + s.cl.cfg.logger.Log(LogLevelInfo, "beginning transact session") + return s.cl.BeginTransaction() +} + +func (s *GroupTransactSession) failed() bool { + return s.revoked || s.lost +} + +// End ends a transaction, committing if commit is true, if the group did not +// rebalance since the transaction began, and if committing offsets is +// successful. If any of these conditions are false, this aborts. This flushes +// or aborts depending on `commit`. +// +// This returns whether the transaction committed or any error that occurred. +// No returned error is retryable. Either the transactional ID has entered a +// failed state, or the client retried so much that the retry limit was hit, +// and odds are you should not continue. While a context is allowed, canceling +// it will likely leave the client in an invalid state. Canceling should only +// be done if you want to shut down. +func (s *GroupTransactSession) End(ctx context.Context, commit TransactionEndTry) (committed bool, err error) { + defer func() { + s.failMu.Lock() + s.revoked = false + s.revokedCh = make(chan struct{}) + s.lost = false + s.lostCh = make(chan struct{}) + s.failMu.Unlock() + }() + + switch commit { + case TryCommit: + if err := s.cl.Flush(ctx); err != nil { + return false, err // we do not abort below, because an error here is ctx closing + } + case TryAbort: + if err := s.cl.AbortBufferedRecords(ctx); err != nil { + return false, err // same + } + } + + wantCommit := bool(commit) + + s.failMu.Lock() + failed := s.failed() + + precommit := s.cl.CommittedOffsets() + postcommit := s.cl.UncommittedOffsets() + s.failMu.Unlock() + + var hasAbortableCommitErr bool + var commitErr error + var g *groupConsumer + + kip447 := false + if wantCommit && !failed { + isAbortableCommitErr := func(err error) bool { + // ILLEGAL_GENERATION: rebalance began and completed + // before we committed. + // + // REBALANCE_IN_PREGRESS: rebalance began, abort. + // + // COORDINATOR_NOT_AVAILABLE, + // COORDINATOR_LOAD_IN_PROGRESS, + // NOT_COORDINATOR: request failed too many times + // + // CONCURRENT_TRANSACTIONS: Kafka not harmonized, + // we can just abort. + // + // UNKNOWN_SERVER_ERROR: technically should not happen, + // but we can just abort. Redpanda returns this in + // certain versions. + switch { + case errors.Is(err, kerr.IllegalGeneration), + errors.Is(err, kerr.RebalanceInProgress), + errors.Is(err, kerr.CoordinatorNotAvailable), + errors.Is(err, kerr.CoordinatorLoadInProgress), + errors.Is(err, kerr.NotCoordinator), + errors.Is(err, kerr.ConcurrentTransactions), + errors.Is(err, kerr.UnknownServerError), + errors.Is(err, kerr.TransactionAbortable): + return true + } + return false + } + + var commitErrs []string + + committed := make(chan struct{}) + g = s.cl.commitTransactionOffsets(ctx, postcommit, + func(_ *kmsg.TxnOffsetCommitRequest, resp *kmsg.TxnOffsetCommitResponse, err error) { + defer close(committed) + if err != nil { + if isAbortableCommitErr(err) { + hasAbortableCommitErr = true + return + } + commitErrs = append(commitErrs, err.Error()) + return + } + kip447 = resp.Version >= 3 + + for _, t := range resp.Topics { + for _, p := range t.Partitions { + if err := kerr.ErrorForCode(p.ErrorCode); err != nil { + if isAbortableCommitErr(err) { + hasAbortableCommitErr = true + } else { + commitErrs = append(commitErrs, fmt.Sprintf("topic %s partition %d: %v", t.Topic, p.Partition, err)) + } + } + } + } + }, + ) + <-committed + + if len(commitErrs) > 0 { + commitErr = fmt.Errorf("unable to commit transaction offsets: %s", strings.Join(commitErrs, ", ")) + } + } + + // Now that we have committed our offsets, before we allow them to be + // used, we force a heartbeat. By forcing a heartbeat, if there is no + // error, then we know we have up to RebalanceTimeout to write our + // EndTxnRequest without a problem. + // + // We should not be booted from the group if we receive an ok + // heartbeat, meaning that, as mentioned, we should be able to end the + // transaction safely. + var okHeartbeat bool + if g != nil && commitErr == nil { + waitHeartbeat := make(chan struct{}) + var heartbeatErr error + select { + case g.heartbeatForceCh <- func(err error) { + defer close(waitHeartbeat) + heartbeatErr = err + }: + select { + case <-waitHeartbeat: + okHeartbeat = heartbeatErr == nil + case <-s.revokedCh: + case <-s.lostCh: + } + case <-s.revokedCh: + case <-s.lostCh: + } + } + + s.failMu.Lock() + + // If we know we are KIP-447 and the user is requiring stable, we can + // unlock immediately because Kafka will itself block a rebalance + // fetching offsets from outstanding transactions. + // + // If either of these are false, we spin up a goroutine that sleeps for + // 200ms before unlocking to give Kafka a chance to avoid some odd race + // that would permit duplicates (i.e., what KIP-447 is preventing). + // + // This 200ms is not perfect but it should be well enough time on a + // stable cluster. On an unstable cluster, I still expect clients to be + // slower than intra-cluster communication, but there is a risk. + if kip447 && s.cl.cfg.requireStable { + defer s.failMu.Unlock() + } else { + defer func() { + if committed { + s.cl.cfg.logger.Log(LogLevelDebug, "sleeping 200ms before allowing a rebalance to continue to give the brokers a chance to write txn markers and avoid duplicates") + go func() { + time.Sleep(200 * time.Millisecond) + s.failMu.Unlock() + }() + } else { + s.failMu.Unlock() + } + }() + } + + tryCommit := !s.failed() && commitErr == nil && !hasAbortableCommitErr && okHeartbeat + willTryCommit := wantCommit && tryCommit + + s.cl.cfg.logger.Log(LogLevelInfo, "transaction session ending", + "was_failed", s.failed(), + "want_commit", wantCommit, + "can_try_commit", tryCommit, + "will_try_commit", willTryCommit, + ) + + // We have a few potential retryable errors from EndTransaction. + // OperationNotAttempted will be returned at most once. + // + // UnknownServerError should not be returned, but some brokers do: + // technically this is fatal, but there is no downside to retrying + // (even retrying a commit) and seeing if we are successful or if we + // get a better error. + var tries int +retry: + endTxnErr := s.cl.EndTransaction(ctx, TransactionEndTry(willTryCommit)) + tries++ + if endTxnErr != nil && tries < 10 { + switch { + case errors.Is(endTxnErr, kerr.OperationNotAttempted): + s.cl.cfg.logger.Log(LogLevelInfo, "end transaction with commit not attempted; retrying as abort") + willTryCommit = false + goto retry + + case errors.Is(endTxnErr, kerr.TransactionAbortable): + s.cl.cfg.logger.Log(LogLevelInfo, "end transaction returned TransactionAbortable; retrying as abort") + willTryCommit = false + goto retry + + case errors.Is(endTxnErr, kerr.UnknownServerError): + s.cl.cfg.logger.Log(LogLevelInfo, "end transaction with commit unknown server error; retrying") + after := time.NewTimer(s.cl.cfg.retryBackoff(tries)) + select { + case <-after.C: // context canceled; we will see when we retry + case <-s.cl.ctx.Done(): + after.Stop() + } + goto retry + } + } + + if !willTryCommit || endTxnErr != nil { + currentCommit := s.cl.CommittedOffsets() + s.cl.cfg.logger.Log(LogLevelInfo, "transact session resetting to current committed state (potentially after a rejoin)", + "tried_commit", willTryCommit, + "commit_err", endTxnErr, + "state_precommit", precommit, + "state_currently_committed", currentCommit, + ) + s.cl.setOffsets(currentCommit, false) + } else if willTryCommit && endTxnErr == nil { + s.cl.cfg.logger.Log(LogLevelInfo, "transact session successful, setting to newly committed state", + "tried_commit", willTryCommit, + "postcommit", postcommit, + ) + s.cl.setOffsets(postcommit, false) + } + + switch { + case commitErr != nil && endTxnErr == nil: + return false, commitErr + + case commitErr == nil && endTxnErr != nil: + return false, endTxnErr + + case commitErr != nil && endTxnErr != nil: + return false, endTxnErr + + default: // both errs nil + committed = willTryCommit + return willTryCommit, nil + } +} + +// BeginTransaction sets the client to a transactional state, erroring if there +// is no transactional ID, or if the producer is currently in a fatal +// (unrecoverable) state, or if the client is already in a transaction. +// +// This must not be called concurrently with other client functions. +func (cl *Client) BeginTransaction() error { + if cl.cfg.txnID == nil { + return errNotTransactional + } + + cl.producer.txnMu.Lock() + defer cl.producer.txnMu.Unlock() + + if cl.producer.inTxn { + return errors.New("invalid attempt to begin a transaction while already in a transaction") + } + + needRecover, didRecover, err := cl.maybeRecoverProducerID(context.Background()) + if needRecover && !didRecover { + cl.cfg.logger.Log(LogLevelInfo, "unable to begin transaction due to unrecoverable producer id error", "err", err) + return fmt.Errorf("producer ID has a fatal, unrecoverable error, err: %w", err) + } + + cl.producer.inTxn = true + cl.producer.producingTxn.Store(true) // allow produces for txns now + cl.cfg.logger.Log(LogLevelInfo, "beginning transaction", "transactional_id", *cl.cfg.txnID) + + return nil +} + +// EndBeginTxnHow controls the safety of how EndAndBeginTransaction executes. +type EndBeginTxnHow uint8 + +const ( + // EndBeginTxnSafe ensures a "safe" execution of EndAndBeginTransaction + // at the expense of speed. This option blocks all produce requests and + // only resumes produce requests when onEnd finishes. Note that some + // produce requests may have finished successfully and records that + // were a part of a transaction may have their promises waiting to be + // called: not all promises are guaranteed to be called. + EndBeginTxnSafe EndBeginTxnHow = iota + + // EndBeginTxnUnsafe opts for less safe EndAndBeginTransaction flow to + // achieve higher throughput. This option allows produce requests to + // continue while EndTxn actually commits. This is unsafe because a + // produce request itself only half begins a transaction. Internally, + // AddPartitionsToTxn actually begins a transaction. If your + // application dies before the client is able to successfully issue + // AddPartitionsToTxn, then a transaction will have partially begun + // within Kafka: the partial transaction will prevent the partition + // from being consumable past where the transaction begun, and the + // transaction will not timeout. You will have to restart your + // application with the SAME transactional ID and produce to all the + // same partitions to ensure to resume the transaction and unstick the + // partitions. + // + // Also note: this option does not work on all broker implementations. + // This relies on Kafka internals. Some brokers (notably Redpanda) are + // more strict with enforcing transaction correctness and this option + // cannot be used and will cause errors. + // + // Deprecated: Kafka 3.6 removed support for the hacky behavior that + // this option was abusing. Thus, as of Kafka 3.6, this option does not + // work against Kafka. This option also has never worked for Redpanda + // because Redpanda always strictly validated that partitions were a + // part of a transaction. Later versions of Kafka and Redpanda will + // remove the need for AddPartitionsToTxn at all and thus this option + // ultimately will be unnecessary anyway. + EndBeginTxnUnsafe +) + +// EndAndBeginTransaction is a combination of EndTransaction and +// BeginTransaction, and relaxes the restriction that the client must have no +// buffered records. This function does not flush nor abort any buffered +// records. It is ok to concurrently produce while this function executes. +// +// This function has different safety guarantees which are up to the user to +// decide. See the documentation on EndBeginTxnHow for which you would like to +// choose. +// +// The onEnd function is called with your input context and the result of +// EndTransaction. Promises are paused while onEnd executes. If onEnd returns +// an error, BeginTransaction is not called and this function returns the +// result of onEnd. Otherwise, this function returns the result of +// BeginTransaction. See the documentation on EndTransaction and +// BeginTransaction for further details. It is invalid to call this function +// more than once at a time, and it is invalid to call concurrent with +// EndTransaction or BeginTransaction. +func (cl *Client) EndAndBeginTransaction( + ctx context.Context, + how EndBeginTxnHow, + commit TransactionEndTry, + onEnd func(context.Context, error) error, +) (rerr error) { + if g := cl.consumer.g; g != nil { + return errors.New("cannot use EndAndBeginTransaction with EOS") + } + + cl.producer.txnMu.Lock() + defer cl.producer.txnMu.Unlock() + + // From BeginTransaction: if we return with no error, we begin. Unlike + // BeginTransaction, we do not error if in a transaction, because we + // expect to be in one. + defer func() { + if rerr == nil { + needRecover, didRecover, err := cl.maybeRecoverProducerID(ctx) + if needRecover && !didRecover { + cl.cfg.logger.Log(LogLevelInfo, "unable to begin transaction due to unrecoverable producer id error", "err", err) + rerr = fmt.Errorf("producer ID has a fatal, unrecoverable error, err: %w", err) + return + } + cl.producer.inTxn = true + cl.cfg.logger.Log(LogLevelInfo, "beginning transaction", "transactional_id", *cl.cfg.txnID) + } + }() + + // If end/beginning safely, we have to pause AddPartitionsToTxn and + // ProduceRequest, and we only resume after the user's onEnd has been + // called. + if how == EndBeginTxnSafe { + if err := cl.producer.pause(ctx); err != nil { + return err + } + defer cl.producer.resume() + } + + // Before BeginTransaction, we block promises & call onEnd with whatever + // the return error is. + cl.producer.promisesMu.Lock() + var promisesUnblocked bool + unblockPromises := func() { + if promisesUnblocked { + return + } + promisesUnblocked = true + defer cl.producer.promisesMu.Unlock() + rerr = onEnd(ctx, rerr) + } + defer unblockPromises() + + if !cl.producer.inTxn { + return nil + } + + var anyAdded bool + var readd map[string][]int32 + for topic, parts := range cl.producer.topics.load() { + for i, part := range parts.load().partitions { + if part.records.addedToTxn.Swap(false) { + if how == EndBeginTxnUnsafe { + if readd == nil { + readd = make(map[string][]int32) + } + readd[topic] = append(readd[topic], int32(i)) + } + anyAdded = true + } + } + } + anyAdded = anyAdded || cl.producer.readded + + // EndTxn when no txn was started returns INVALID_TXN_STATE. + if !anyAdded { + cl.cfg.logger.Log(LogLevelDebug, "no records were produced during the commit; thus no transaction was began; ending without doing anything") + return nil + } + + // From EndTransaction: if the pid has an error, we may try to recover. + id, epoch, err := cl.producerID(ctx2fn(ctx)) + if err != nil { + if commit { + return kerr.OperationNotAttempted + } + if _, didRecover, _ := cl.maybeRecoverProducerID(ctx); didRecover { + return nil + } + } + cl.cfg.logger.Log(LogLevelInfo, "ending transaction", + "transactional_id", *cl.cfg.txnID, + "producer_id", id, + "epoch", epoch, + "commit", commit, + ) + cl.producer.readded = false + err = cl.doWithConcurrentTransactions(ctx, "EndTxn", func() error { + req := kmsg.NewPtrEndTxnRequest() + req.TransactionalID = *cl.cfg.txnID + req.ProducerID = id + req.ProducerEpoch = epoch + req.Commit = bool(commit) + resp, err := req.RequestWith(ctx, cl) + if err != nil { + return err + } + + // When ending a transaction, if the user is using unsafe mode, + // there is a logic race where the user can actually end before + // AddPartitionsToTxn is issued. This should be rare and is + // most likely only to happen whenever a new transaction is + // starting from a not-in-transaction state (i.e., the first + // transaction). If we see InvalidTxnState in unsafe mode, we + // assume that a transaction was not actually begun and we + // return success. + // + // In Kafka, InvalidTxnState is also returned when producing + // non-transactional records from a producer that is currently + // in a transaction. + // + // All other cases it is returned is in EndTxn: + // * state == CompleteCommit and EndTxn != commit + // * state == CompleteAbort and EndTxn != abort + // * state == PrepareCommit and EndTxn != commit (otherwise, returns concurrent transactions) + // * state == PrepareAbort and EndTxn != abort (otherwise, returns concurrent transactions) + // * state == Empty + // + // This basically guards against the final case, all others are + // Kafka internal state transitioning and we should never hit + // them. + if how == EndBeginTxnUnsafe && resp.ErrorCode == kerr.InvalidTxnState.Code { + return nil + } + return kerr.ErrorForCode(resp.ErrorCode) + }) + var ke *kerr.Error + if errors.As(err, &ke) && !ke.Retriable { + cl.failProducerID(id, epoch, err) + } + if err != nil || how != EndBeginTxnUnsafe { + return err + } + unblockPromises() + + // If we are end/beginning unsafely, then we need to re-add all + // partitions to a new transaction immediately. Timing makes it + // impossible to know what was truly added before EndTxn, so we + // pessimistically assume that every partition must be re-added. + // + // We track readd before the txn and swap those to un-added, but we + // also need to track anything that is newly added that raced with our + // EndTxn. We swap before the txn to ensure that *eventually*, + // partitions will be tracked as not in a transaction if people stop + // producing. + // + // We do this before the user callback because we *need* to start a new + // transaction within Kafka to ensure there will be a timeout. Per the + // unsafe aspect, the client could die or this request could error and + // there could be a stranded txn within Kafka's ProducerStateManager, + // but ideally the user will reconnect with the same txnal id. + cl.producer.readded = true + return cl.doWithConcurrentTransactions(ctx, "AddPartitionsToTxn", func() error { + req := kmsg.NewPtrAddPartitionsToTxnRequest() + req.TransactionalID = *cl.cfg.txnID + req.ProducerID = id + req.ProducerEpoch = epoch + + for topic, parts := range cl.producer.topics.load() { + for i, part := range parts.load().partitions { + if part.records.addedToTxn.Load() { + readd[topic] = append(readd[topic], int32(i)) + } + } + } + + ps := make(map[int32]struct{}) + for topic, parts := range readd { + t := kmsg.NewAddPartitionsToTxnRequestTopic() + t.Topic = topic + for _, part := range parts { + ps[part] = struct{}{} + } + for p := range ps { + t.Partitions = append(t.Partitions, p) + delete(ps, p) + } + if len(t.Partitions) > 0 { + req.Topics = append(req.Topics, t) + } + } + + resp, err := req.RequestWith(ctx, cl) + if err != nil { + return err + } + + for i := range resp.Topics { + t := &resp.Topics[i] + for j := range t.Partitions { + p := &t.Partitions[j] + if err := kerr.ErrorForCode(p.ErrorCode); err != nil { + return err + } + } + } + return nil + }) +} + +// AbortBufferedRecords fails all unflushed records with ErrAborted and waits +// for there to be no buffered records. +// +// This accepts a context to quit the wait early, but quitting the wait may +// lead to an invalid state and should only be used if you are quitting your +// application. This function waits to abort records at safe points: if records +// are known to not be in flight. This function is safe to call multiple times +// concurrently, and safe to call concurrent with Flush. +// +// NOTE: This aborting record waits until all inflight requests have known +// responses. The client must wait to ensure no duplicate sequence number +// issues. For more details, and for an immediate alternative, check the +// documentation on UnsafeAbortBufferedRecords. +func (cl *Client) AbortBufferedRecords(ctx context.Context) error { + cl.producer.aborting.Add(1) + defer cl.producer.aborting.Add(-1) + + cl.cfg.logger.Log(LogLevelInfo, "producer state set to aborting; continuing to wait via flushing") + defer cl.cfg.logger.Log(LogLevelDebug, "aborted buffered records") + + // We must clear unknown topics ourselves, because flush just waits + // like normal. + p := &cl.producer + p.unknownTopicsMu.Lock() + for _, unknown := range p.unknownTopics { + select { + case unknown.fatal <- ErrAborting: + default: + } + } + p.unknownTopicsMu.Unlock() + + // Setting the aborting state allows records to fail before + // or after produce requests; thus, now we just flush. + return cl.Flush(ctx) +} + +// UnsafeAbortBufferedRecords fails all unflushed records with ErrAborted and +// waits for there to be no buffered records. This function does NOT wait for +// any inflight produce requests to finish, meaning topics in the client may be +// in an invalid state and producing to an invalid-state topic may cause the +// client to enter a fatal failed state. If you want to produce to topics that +// were unsafely aborted, it is recommended to use PurgeTopicsFromClient to +// forcefully reset the topics before producing to them again. +// +// When producing with idempotency enabled or with transactions, every record +// has a sequence number. The client must wait for inflight requests to have +// responses before failing a record, otherwise the client cannot know if a +// sequence number was seen by the broker and tracked or not seen by the broker +// and not tracked. By unsafely aborting, the client forcefully abandons all +// records, and producing to the topics again may re-use a sequence number and +// cause internal errors. +func (cl *Client) UnsafeAbortBufferedRecords() { + cl.failBufferedRecords(ErrAborting) +} + +// EndTransaction ends a transaction and resets the client's internal state to +// not be in a transaction. +// +// Flush and CommitOffsetsForTransaction must be called before this function; +// this function does not flush and does not itself ensure that all buffered +// records are flushed. If no record yet has caused a partition to be added to +// the transaction, this function does nothing and returns nil. Alternatively, +// AbortBufferedRecords should be called before aborting a transaction to +// ensure that any buffered records not yet flushed will not be a part of a new +// transaction. +// +// If the producer ID has an error and you are trying to commit, this will +// return with kerr.OperationNotAttempted. If this happened, retry +// EndTransaction with TryAbort. If this returns kerr.TransactionAbortable, you +// can retry with TryAbort. No other error is retryable, and you should not +// retry with TryAbort. +// +// If records failed with UnknownProducerID and your Kafka version is at least +// 2.5, then aborting here will potentially allow the client to recover for +// more production. +// +// Note that canceling the context will likely leave the client in an +// undesirable state, because canceling the context may cancel the in-flight +// EndTransaction request, making it impossible to know whether the commit or +// abort was successful. It is recommended to not cancel the context. +func (cl *Client) EndTransaction(ctx context.Context, commit TransactionEndTry) error { + cl.producer.txnMu.Lock() + defer cl.producer.txnMu.Unlock() + + if !cl.producer.inTxn { + return nil + } + cl.producer.inTxn = false + + cl.producer.producingTxn.Store(false) // forbid any new produces while ending txn + + // anyAdded tracks if any partitions were added to this txn, because + // any partitions written to triggers AddPartitionToTxn, which triggers + // the txn to actually begin within Kafka. + // + // If we consumed at all but did not produce, the transaction ending + // issues AddOffsetsToTxn, which internally adds a __consumer_offsets + // partition to the transaction. Thus, if we added offsets, then we + // also produced. + var anyAdded bool + if g := cl.consumer.g; g != nil { + // We do not lock because we expect commitTransactionOffsets to + // be called *before* ending a transaction. + if g.offsetsAddedToTxn { + g.offsetsAddedToTxn = false + anyAdded = true + } + } else { + cl.cfg.logger.Log(LogLevelDebug, "transaction ending, no group loaded; this must be a producer-only transaction, not consume-modify-produce EOS") + } + + // After the flush, no records are being produced to, and we can set + // addedToTxn to false outside of any mutex. + for _, parts := range cl.producer.topics.load() { + for _, part := range parts.load().partitions { + anyAdded = part.records.addedToTxn.Swap(false) || anyAdded + } + } + + // If the user previously used EndAndBeginTransaction with + // EndBeginTxnUnsafe, we may have to end a transaction even though + // nothing may be in it. + anyAdded = anyAdded || cl.producer.readded + + // If no partition was added to a transaction, then we have nothing to commit. + // + // Note that anyAdded is true if the producer ID was failed, meaning we will + // get to the potential recovery logic below if necessary. + if !anyAdded { + cl.cfg.logger.Log(LogLevelDebug, "no records were produced during the commit; thus no transaction was began; ending without doing anything") + return nil + } + + id, epoch, err := cl.producerID(ctx2fn(ctx)) + if err != nil { + if commit { + return kerr.OperationNotAttempted + } + + // If we recovered the producer ID, we return early, since + // there is no reason to issue an abort now that the id is + // different. Otherwise, we issue our EndTxn which will likely + // fail, but that is ok, we will just return error. + _, didRecover, _ := cl.maybeRecoverProducerID(ctx) + if didRecover { + return nil + } + } + + cl.cfg.logger.Log(LogLevelInfo, "ending transaction", + "transactional_id", *cl.cfg.txnID, + "producer_id", id, + "epoch", epoch, + "commit", commit, + ) + + cl.producer.readded = false + err = cl.doWithConcurrentTransactions(ctx, "EndTxn", func() error { + req := kmsg.NewPtrEndTxnRequest() + req.TransactionalID = *cl.cfg.txnID + req.ProducerID = id + req.ProducerEpoch = epoch + req.Commit = bool(commit) + resp, err := req.RequestWith(ctx, cl) + if err != nil { + return err + } + return kerr.ErrorForCode(resp.ErrorCode) + }) + + // If the returned error is still a Kafka error, this is fatal and we + // need to fail our producer ID we loaded above. + // + // UNKNOWN_SERVER_ERROR can theoretically be returned (not all brokers + // do). This technically is fatal, but we do not really know whether it + // is. We can just return this error and let the caller decide to + // continue, if the caller does continue, we will try something and + // eventually then receive our proper transactional error, if any. + var ke *kerr.Error + if errors.As(err, &ke) && !ke.Retriable && ke.Code != kerr.UnknownServerError.Code { + cl.failProducerID(id, epoch, err) + } + + return err +} + +// This returns if it is necessary to recover the producer ID (it has an +// error), whether it is possible to recover, and, if not, the error. +// +// We call this when beginning a transaction or when ending with an abort. +func (cl *Client) maybeRecoverProducerID(ctx context.Context) (necessary, did bool, err error) { + cl.producer.mu.Lock() + defer cl.producer.mu.Unlock() + + id, epoch, err := cl.producerID(ctx2fn(ctx)) + if err == nil { + return false, false, nil + } + + var ke *kerr.Error + if ok := errors.As(err, &ke); !ok { + return true, false, err + } + + kip360 := cl.producer.idVersion >= 3 && (errors.Is(ke, kerr.UnknownProducerID) || errors.Is(ke, kerr.InvalidProducerIDMapping)) + kip588 := cl.producer.idVersion >= 4 && errors.Is(ke, kerr.InvalidProducerEpoch /* || err == kerr.TransactionTimedOut when implemented in Kafka */) + + recoverable := kip360 || kip588 + if !recoverable { + return true, false, err // fatal, unrecoverable + } + + // Storing errReloadProducerID will reset sequence numbers as appropriate + // when the producer ID is reloaded successfully. + cl.producer.id.Store(&producerID{ + id: id, + epoch: epoch, + err: errReloadProducerID, + }) + return true, true, nil +} + +// If a transaction is begun too quickly after finishing an old transaction, +// Kafka may still be finalizing its commit / abort and will return a +// concurrent transactions error. We handle that by retrying for a bit. +func (cl *Client) doWithConcurrentTransactions(ctx context.Context, name string, fn func() error) error { + start := time.Now() + tries := 0 + backoff := cl.cfg.txnBackoff + +start: + err := fn() + if errors.Is(err, kerr.ConcurrentTransactions) { + // The longer we are stalled, the more we enforce a minimum + // backoff. + since := time.Since(start) + switch { + case since > time.Second: + if backoff < 200*time.Millisecond { + backoff = 200 * time.Millisecond + } + case since > 5*time.Second/2: + if backoff < 500*time.Millisecond { + backoff = 500 * time.Millisecond + } + case since > 5*time.Second: + if backoff < time.Second { + backoff = time.Second + } + } + + tries++ + cl.cfg.logger.Log(LogLevelDebug, fmt.Sprintf("%s failed with CONCURRENT_TRANSACTIONS, which may be because we ended a txn and began producing in a new txn too quickly; backing off and retrying", name), + "backoff", backoff, + "since_request_tries_start", time.Since(start), + "tries", tries, + ) + select { + case <-time.After(backoff): + case <-ctx.Done(): + cl.cfg.logger.Log(LogLevelError, fmt.Sprintf("abandoning %s retry due to request ctx quitting", name)) + return err + case <-cl.ctx.Done(): + cl.cfg.logger.Log(LogLevelError, fmt.Sprintf("abandoning %s retry due to client ctx quitting", name)) + return err + } + goto start + } + return err +} + +//////////////////////////////////////////////////////////////////////////////////////////// +// TRANSACTIONAL COMMITTING // +// MOSTLY DUPLICATED CODE DUE TO NO GENERICS AND BECAUSE THE TYPES ARE SLIGHTLY DIFFERENT // +//////////////////////////////////////////////////////////////////////////////////////////// + +// commitTransactionOffsets is exactly like CommitOffsets, but specifically for +// use with transactional consuming and producing. +// +// Since this function is a gigantic footgun if not done properly, we hide this +// and only allow transaction sessions to commit. +// +// Unlike CommitOffsets, we do not update the group's uncommitted map. We leave +// that to the calling code to do properly with SetOffsets depending on whether +// an eventual abort happens or not. +func (cl *Client) commitTransactionOffsets( + ctx context.Context, + uncommitted map[string]map[int32]EpochOffset, + onDone func(*kmsg.TxnOffsetCommitRequest, *kmsg.TxnOffsetCommitResponse, error), +) *groupConsumer { + cl.cfg.logger.Log(LogLevelDebug, "in commitTransactionOffsets", "with", uncommitted) + defer cl.cfg.logger.Log(LogLevelDebug, "left commitTransactionOffsets") + + if cl.cfg.txnID == nil { + onDone(nil, nil, errNotTransactional) + return nil + } + + // Before committing, ensure we are at least in a transaction. We + // unlock the producer txnMu before committing to allow EndTransaction + // to go through, even though that could cut off our commit. + cl.producer.txnMu.Lock() + var unlockedTxn bool + unlockTxn := func() { + if !unlockedTxn { + cl.producer.txnMu.Unlock() + } + unlockedTxn = true + } + defer unlockTxn() + if !cl.producer.inTxn { + onDone(nil, nil, errNotInTransaction) + return nil + } + + g := cl.consumer.g + if g == nil { + onDone(kmsg.NewPtrTxnOffsetCommitRequest(), kmsg.NewPtrTxnOffsetCommitResponse(), errNotGroup) + return nil + } + + req, err := g.prepareTxnOffsetCommit(ctx, uncommitted) + if err != nil { + onDone(req, kmsg.NewPtrTxnOffsetCommitResponse(), err) + return g + } + if len(req.Topics) == 0 { + onDone(kmsg.NewPtrTxnOffsetCommitRequest(), kmsg.NewPtrTxnOffsetCommitResponse(), nil) + return g + } + + if !g.offsetsAddedToTxn { + if err := cl.addOffsetsToTxn(ctx, g.cfg.group); err != nil { + if onDone != nil { + onDone(nil, nil, err) + } + return g + } + g.offsetsAddedToTxn = true + } + + unlockTxn() + + if err := g.waitJoinSyncMu(ctx); err != nil { + onDone(kmsg.NewPtrTxnOffsetCommitRequest(), kmsg.NewPtrTxnOffsetCommitResponse(), err) + return nil + } + unblockJoinSync := func(req *kmsg.TxnOffsetCommitRequest, resp *kmsg.TxnOffsetCommitResponse, err error) { + g.noCommitDuringJoinAndSync.RUnlock() + onDone(req, resp, err) + } + g.mu.Lock() + defer g.mu.Unlock() + + g.commitTxn(ctx, req, unblockJoinSync) + return g +} + +// Ties a transactional producer to a group. Since this requires a producer ID, +// this initializes one if it is not yet initialized. This would only be the +// case if trying to commit before any records have been sent. +func (cl *Client) addOffsetsToTxn(ctx context.Context, group string) error { + id, epoch, err := cl.producerID(ctx2fn(ctx)) + if err != nil { + return err + } + + err = cl.doWithConcurrentTransactions(ctx, "AddOffsetsToTxn", func() error { // committing offsets without producing causes a transaction to begin within Kafka + cl.cfg.logger.Log(LogLevelInfo, "issuing AddOffsetsToTxn", + "txn", *cl.cfg.txnID, + "producerID", id, + "producerEpoch", epoch, + "group", group, + ) + req := kmsg.NewPtrAddOffsetsToTxnRequest() + req.TransactionalID = *cl.cfg.txnID + req.ProducerID = id + req.ProducerEpoch = epoch + req.Group = group + resp, err := req.RequestWith(ctx, cl) + if err != nil { + return err + } + return kerr.ErrorForCode(resp.ErrorCode) + }) + + // If the returned error is still a Kafka error, this is fatal and we + // need to fail our producer ID we created just above. + // + // We special case UNKNOWN_SERVER_ERROR, because we do not really know + // if this is fatal. If it is, we will catch it later on a better + // error. Some brokers send this when things fail internally, we can + // just abort our commit and see if things are still bad in + // EndTransaction. + var ke *kerr.Error + if errors.As(err, &ke) && !ke.Retriable && ke.Code != kerr.UnknownServerError.Code { + cl.failProducerID(id, epoch, err) + } + + return err +} + +// commitTxn is ALMOST EXACTLY THE SAME as commit, but changed for txn types +// and we avoid updateCommitted. We avoid updating because we manually +// SetOffsets when ending the transaction. +func (g *groupConsumer) commitTxn(ctx context.Context, req *kmsg.TxnOffsetCommitRequest, onDone func(*kmsg.TxnOffsetCommitRequest, *kmsg.TxnOffsetCommitResponse, error)) { + if onDone == nil { // note we must always call onDone + onDone = func(_ *kmsg.TxnOffsetCommitRequest, _ *kmsg.TxnOffsetCommitResponse, _ error) {} + } + + if g.commitCancel != nil { + g.commitCancel() // cancel any prior commit + } + priorCancel := g.commitCancel + priorDone := g.commitDone + + // Unlike the non-txn consumer, we use the group context for + // transaction offset committing. We want to quit when the group is + // left, and we are not committing when leaving. We rely on proper + // usage of the GroupTransactSession API to issue commits, so there is + // no reason not to use the group context here. + commitCtx, commitCancel := context.WithCancel(g.ctx) // enable ours to be canceled and waited for + commitDone := make(chan struct{}) + + g.commitCancel = commitCancel + g.commitDone = commitDone + + if ctx.Done() != nil { + go func() { + select { + case <-ctx.Done(): + commitCancel() + case <-commitCtx.Done(): + } + }() + } + + go func() { + defer close(commitDone) // allow future commits to continue when we are done + defer commitCancel() + if priorDone != nil { + select { + case <-priorDone: + default: + g.cl.cfg.logger.Log(LogLevelDebug, "canceling prior txn offset commit to issue another") + priorCancel() + <-priorDone // wait for any prior request to finish + } + } + g.cl.cfg.logger.Log(LogLevelDebug, "issuing txn offset commit", "uncommitted", req) + + var resp *kmsg.TxnOffsetCommitResponse + var err error + if len(req.Topics) > 0 { + resp, err = req.RequestWith(commitCtx, g.cl) + } + if err != nil { + onDone(req, nil, err) + return + } + onDone(req, resp, nil) + }() +} + +func (g *groupConsumer) prepareTxnOffsetCommit(ctx context.Context, uncommitted map[string]map[int32]EpochOffset) (*kmsg.TxnOffsetCommitRequest, error) { + req := kmsg.NewPtrTxnOffsetCommitRequest() + + // We're now generating the producerID before addOffsetsToTxn. + // We will not make this request until after addOffsetsToTxn, but it's possible to fail here due to a failed producerID. + id, epoch, err := g.cl.producerID(ctx2fn(ctx)) + if err != nil { + return req, err + } + + req.TransactionalID = *g.cl.cfg.txnID + req.Group = g.cfg.group + req.ProducerID = id + req.ProducerEpoch = epoch + memberID, generation := g.memberGen.load() + req.Generation = generation + req.MemberID = memberID + req.InstanceID = g.cfg.instanceID + + for topic, partitions := range uncommitted { + reqTopic := kmsg.NewTxnOffsetCommitRequestTopic() + reqTopic.Topic = topic + for partition, eo := range partitions { + reqPartition := kmsg.NewTxnOffsetCommitRequestTopicPartition() + reqPartition.Partition = partition + reqPartition.Offset = eo.Offset + reqPartition.LeaderEpoch = eo.Epoch + reqPartition.Metadata = &req.MemberID + reqTopic.Partitions = append(reqTopic.Partitions, reqPartition) + } + req.Topics = append(req.Topics, reqTopic) + } + + if fn, ok := ctx.Value(txnCommitContextFn).(func(*kmsg.TxnOffsetCommitRequest) error); ok { + if err := fn(req); err != nil { + return req, err + } + } + return req, nil +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kmsg/LICENSE b/vendor/github.com/twmb/franz-go/pkg/kmsg/LICENSE new file mode 100644 index 00000000000..36e18034325 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kmsg/LICENSE @@ -0,0 +1,24 @@ +Copyright 2020, Travis Bischel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the library nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/twmb/franz-go/pkg/kmsg/api.go b/vendor/github.com/twmb/franz-go/pkg/kmsg/api.go new file mode 100644 index 00000000000..6bda2e61bd9 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kmsg/api.go @@ -0,0 +1,423 @@ +// Package kmsg contains Kafka request and response types and autogenerated +// serialization and deserialization functions. +// +// This package may bump major versions whenever Kafka makes a backwards +// incompatible protocol change, per the types chosen for this package. For +// example, Kafka can change a field from non-nullable to nullable, which would +// require changing a field from a non-pointer to a pointer. We could get +// around this by making everything an opaque struct and having getters, but +// that is more tedious than having a few rare major version bumps. +// +// If you are using this package directly with kgo, you should either always +// use New functions, or Default functions after creating structs, or you +// should pin the max supported version. If you use New functions, you will +// have safe defaults as new fields are added. If you pin versions, you will +// avoid new fields being used. If you do neither of these, you may opt in to +// new fields that do not have safe zero value defaults, and this may lead to +// errors or unexpected results. +// +// Thus, whenever you initialize a struct from this package, do the following: +// +// struct := kmsg.NewFoo() +// struct.Field = "value I want to set" +// +// Most of this package is generated, but a few things are manual. What is +// manual: all interfaces, the RequestFormatter, record / message / record +// batch reading, and sticky member metadata serialization. +package kmsg + +import ( + "context" + "sort" + + "github.com/twmb/franz-go/pkg/kmsg/internal/kbin" +) + +//go:generate cp ../kbin/primitives.go internal/kbin/ + +// Requestor issues requests. Notably, the kgo.Client and kgo.Broker implements +// Requestor. All Requests in this package have a RequestWith function to have +// type-safe requests. +type Requestor interface { + // Request issues a Request and returns either a Response or an error. + Request(context.Context, Request) (Response, error) +} + +// Request represents a type that can be requested to Kafka. +type Request interface { + // Key returns the protocol key for this message kind. + Key() int16 + // MaxVersion returns the maximum protocol version this message + // supports. + // + // This function allows one to implement a client that chooses message + // versions based off of the max of a message's max version in the + // client and the broker's max supported version. + MaxVersion() int16 + // SetVersion sets the version to use for this request and response. + SetVersion(int16) + // GetVersion returns the version currently set to use for the request + // and response. + GetVersion() int16 + // IsFlexible returns whether the request at its current version is + // "flexible" as per the KIP-482. + IsFlexible() bool + // AppendTo appends this message in wire protocol form to a slice and + // returns the slice. + AppendTo([]byte) []byte + // ReadFrom parses all of the input slice into the response type. + // + // This should return an error if too little data is input. + ReadFrom([]byte) error + // ResponseKind returns an empty Response that is expected for + // this message request. + ResponseKind() Response +} + +// AdminRequest represents a request that must be issued to Kafka controllers. +type AdminRequest interface { + // IsAdminRequest is a method attached to requests that must be + // issed to Kafka controllers. + IsAdminRequest() + Request +} + +// GroupCoordinatorRequest represents a request that must be issued to a +// group coordinator. +type GroupCoordinatorRequest interface { + // IsGroupCoordinatorRequest is a method attached to requests that + // must be issued to group coordinators. + IsGroupCoordinatorRequest() + Request +} + +// TxnCoordinatorRequest represents a request that must be issued to a +// transaction coordinator. +type TxnCoordinatorRequest interface { + // IsTxnCoordinatorRequest is a method attached to requests that + // must be issued to transaction coordinators. + IsTxnCoordinatorRequest() + Request +} + +// Response represents a type that Kafka responds with. +type Response interface { + // Key returns the protocol key for this message kind. + Key() int16 + // MaxVersion returns the maximum protocol version this message + // supports. + MaxVersion() int16 + // SetVersion sets the version to use for this request and response. + SetVersion(int16) + // GetVersion returns the version currently set to use for the request + // and response. + GetVersion() int16 + // IsFlexible returns whether the request at its current version is + // "flexible" as per the KIP-482. + IsFlexible() bool + // AppendTo appends this message in wire protocol form to a slice and + // returns the slice. + AppendTo([]byte) []byte + // ReadFrom parses all of the input slice into the response type. + // + // This should return an error if too little data is input. + ReadFrom([]byte) error + // RequestKind returns an empty Request that is expected for + // this message request. + RequestKind() Request +} + +// UnsafeReadFrom, implemented by all requests and responses generated in this +// package, switches to using unsafe slice-to-string conversions when reading. +// This can be used to avoid a lot of garbage, but it means to have to be +// careful when using any strings in structs: if you hold onto the string, the +// underlying response slice will not be garbage collected. +type UnsafeReadFrom interface { + UnsafeReadFrom([]byte) error +} + +// ThrottleResponse represents a response that could have a throttle applied by +// Kafka. Any response that implements ThrottleResponse also implements +// SetThrottleResponse. +// +// Kafka 2.0.0 switched throttles from being applied before responses to being +// applied after responses. +type ThrottleResponse interface { + // Throttle returns the response's throttle millis value and + // whether Kafka applies the throttle after the response. + Throttle() (int32, bool) +} + +// SetThrottleResponse sets the throttle in a response that can have a throttle +// applied. Any kmsg interface that implements ThrottleResponse also implements +// SetThrottleResponse. +type SetThrottleResponse interface { + // SetThrottle sets the response's throttle millis value. + SetThrottle(int32) +} + +// TimeoutRequest represents a request that has a TimeoutMillis field. +// Any request that implements TimeoutRequest also implements SetTimeoutRequest. +type TimeoutRequest interface { + // Timeout returns the request's timeout millis value. + Timeout() int32 +} + +// SetTimeoutRequest sets the timeout in a request that can have a timeout +// applied. Any kmsg interface that implements ThrottleRequest also implements +// SetThrottleRequest. +type SetTimeoutRequest interface { + // SetTimeout sets the request's timeout millis value. + SetTimeout(timeoutMillis int32) +} + +// RequestFormatter formats requests. +// +// The default empty struct works correctly, but can be extended with the +// NewRequestFormatter function. +type RequestFormatter struct { + clientID *string +} + +// RequestFormatterOpt applys options to a RequestFormatter. +type RequestFormatterOpt interface { + apply(*RequestFormatter) +} + +type formatterOpt struct{ fn func(*RequestFormatter) } + +func (opt formatterOpt) apply(f *RequestFormatter) { opt.fn(f) } + +// FormatterClientID attaches the given client ID to any issued request, +// minus controlled shutdown v0, which uses its own special format. +func FormatterClientID(id string) RequestFormatterOpt { + return formatterOpt{func(f *RequestFormatter) { f.clientID = &id }} +} + +// NewRequestFormatter returns a RequestFormatter with the opts applied. +func NewRequestFormatter(opts ...RequestFormatterOpt) *RequestFormatter { + a := new(RequestFormatter) + for _, opt := range opts { + opt.apply(a) + } + return a +} + +// AppendRequest appends a full message request to dst, returning the updated +// slice. This message is the full body that needs to be written to issue a +// Kafka request. +func (f *RequestFormatter) AppendRequest( + dst []byte, + r Request, + correlationID int32, +) []byte { + dst = append(dst, 0, 0, 0, 0) // reserve length + k := r.Key() + v := r.GetVersion() + dst = kbin.AppendInt16(dst, k) + dst = kbin.AppendInt16(dst, v) + dst = kbin.AppendInt32(dst, correlationID) + if k == 7 && v == 0 { + return dst + } + + // Even with flexible versions, we do not use a compact client id. + // Clients issue ApiVersions immediately before knowing the broker + // version, and old brokers will not be able to understand a compact + // client id. + dst = kbin.AppendNullableString(dst, f.clientID) + + // The flexible tags end the request header, and then begins the + // request body. + if r.IsFlexible() { + var numTags uint8 + dst = append(dst, numTags) + if numTags != 0 { + // TODO when tags are added + } + } + + // Now the request body. + dst = r.AppendTo(dst) + + kbin.AppendInt32(dst[:0], int32(len(dst[4:]))) + return dst +} + +// StringPtr is a helper to return a pointer to a string. +func StringPtr(in string) *string { + return &in +} + +// ReadFrom provides decoding various versions of sticky member metadata. A key +// point of this type is that it does not contain a version number inside it, +// but it is versioned: if decoding v1 fails, this falls back to v0. +func (s *StickyMemberMetadata) ReadFrom(src []byte) error { + return s.readFrom(src, false) +} + +// UnsafeReadFrom is the same as ReadFrom, but uses unsafe slice to string +// conversions to reduce garbage. +func (s *StickyMemberMetadata) UnsafeReadFrom(src []byte) error { + return s.readFrom(src, true) +} + +func (s *StickyMemberMetadata) readFrom(src []byte, unsafe bool) error { + b := kbin.Reader{Src: src} + numAssignments := b.ArrayLen() + if numAssignments < 0 { + numAssignments = 0 + } + need := numAssignments - int32(cap(s.CurrentAssignment)) + if need > 0 { + s.CurrentAssignment = append(s.CurrentAssignment[:cap(s.CurrentAssignment)], make([]StickyMemberMetadataCurrentAssignment, need)...) + } else { + s.CurrentAssignment = s.CurrentAssignment[:numAssignments] + } + for i := int32(0); i < numAssignments; i++ { + var topic string + if unsafe { + topic = b.UnsafeString() + } else { + topic = b.String() + } + numPartitions := b.ArrayLen() + if numPartitions < 0 { + numPartitions = 0 + } + a := &s.CurrentAssignment[i] + a.Topic = topic + need := numPartitions - int32(cap(a.Partitions)) + if need > 0 { + a.Partitions = append(a.Partitions[:cap(a.Partitions)], make([]int32, need)...) + } else { + a.Partitions = a.Partitions[:numPartitions] + } + for i := range a.Partitions { + a.Partitions[i] = b.Int32() + } + } + if len(b.Src) > 0 { + s.Generation = b.Int32() + } else { + s.Generation = -1 + } + return b.Complete() +} + +// AppendTo provides appending various versions of sticky member metadata to dst. +// If generation is not -1 (default for v0), this appends as version 1. +func (s *StickyMemberMetadata) AppendTo(dst []byte) []byte { + dst = kbin.AppendArrayLen(dst, len(s.CurrentAssignment)) + for _, assignment := range s.CurrentAssignment { + dst = kbin.AppendString(dst, assignment.Topic) + dst = kbin.AppendArrayLen(dst, len(assignment.Partitions)) + for _, partition := range assignment.Partitions { + dst = kbin.AppendInt32(dst, partition) + } + } + if s.Generation != -1 { + dst = kbin.AppendInt32(dst, s.Generation) + } + return dst +} + +// TagReader has is a type that has the ability to skip tags. +// +// This is effectively a trimmed version of the kbin.Reader, with the purpose +// being that kmsg cannot depend on an external package. +type TagReader interface { + // Uvarint returns a uint32. If the reader has read too much and has + // exhausted all bytes, this should set the reader's internal state + // to failed and return 0. + Uvarint() uint32 + + // Span returns n bytes from the reader. If the reader has read too + // much and exhausted all bytes this should set the reader's internal + // to failed and return nil. + Span(n int) []byte +} + +// SkipTags skips tags in a TagReader. +func SkipTags(b TagReader) { + for num := b.Uvarint(); num > 0; num-- { + _, size := b.Uvarint(), b.Uvarint() + b.Span(int(size)) + } +} + +// internalSkipTags skips tags in the duplicated inner kbin.Reader. +func internalSkipTags(b *kbin.Reader) { + for num := b.Uvarint(); num > 0; num-- { + _, size := b.Uvarint(), b.Uvarint() + b.Span(int(size)) + } +} + +// ReadTags reads tags in a TagReader and returns the tags. +func ReadTags(b TagReader) Tags { + var t Tags + for num := b.Uvarint(); num > 0; num-- { + key, size := b.Uvarint(), b.Uvarint() + t.Set(key, b.Span(int(size))) + } + return t +} + +// internalReadTags reads tags in a reader and returns the tags from a +// duplicated inner kbin.Reader. +func internalReadTags(b *kbin.Reader) Tags { + var t Tags + for num := b.Uvarint(); num > 0; num-- { + key, size := b.Uvarint(), b.Uvarint() + t.Set(key, b.Span(int(size))) + } + return t +} + +// Tags is an opaque structure capturing unparsed tags. +type Tags struct { + keyvals map[uint32][]byte +} + +// Len returns the number of keyvals in Tags. +func (t *Tags) Len() int { return len(t.keyvals) } + +// Each calls fn for each key and val in the tags. +func (t *Tags) Each(fn func(uint32, []byte)) { + if len(t.keyvals) == 0 { + return + } + // We must encode keys in order. We expect to have limited (no) unknown + // keys, so for now, we take a lazy approach and allocate an ordered + // slice. + ordered := make([]uint32, 0, len(t.keyvals)) + for key := range t.keyvals { + ordered = append(ordered, key) + } + sort.Slice(ordered, func(i, j int) bool { return ordered[i] < ordered[j] }) + for _, key := range ordered { + fn(key, t.keyvals[key]) + } +} + +// Set sets a tag's key and val. +// +// Note that serializing tags does NOT check if the set key overlaps with an +// existing used key. It is invalid to set a key used by Kafka itself. +func (t *Tags) Set(key uint32, val []byte) { + if t.keyvals == nil { + t.keyvals = make(map[uint32][]byte) + } + t.keyvals[key] = val +} + +// AppendEach appends each keyval in tags to dst and returns the updated dst. +func (t *Tags) AppendEach(dst []byte) []byte { + t.Each(func(key uint32, val []byte) { + dst = kbin.AppendUvarint(dst, key) + dst = kbin.AppendUvarint(dst, uint32(len(val))) + dst = append(dst, val...) + }) + return dst +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kmsg/generated.go b/vendor/github.com/twmb/franz-go/pkg/kmsg/generated.go new file mode 100644 index 00000000000..584f6f37c12 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kmsg/generated.go @@ -0,0 +1,48224 @@ +package kmsg + +import ( + "context" + "fmt" + "reflect" + "strings" + + "github.com/twmb/franz-go/pkg/kmsg/internal/kbin" +) + +// Code generated by franz-go/generate. DO NOT EDIT. + +// MaxKey is the maximum key used for any messages in this package. +// Note that this value will change as Kafka adds more messages. +const MaxKey = 69 + +type AssignmentTopicPartition struct { + TopicID [16]byte + + Topic string + + Partitions []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AssignmentTopicPartition. +func (v *AssignmentTopicPartition) Default() { +} + +// NewAssignmentTopicPartition returns a default AssignmentTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewAssignmentTopicPartition() AssignmentTopicPartition { + var v AssignmentTopicPartition + v.Default() + return v +} + +// MessageV0 is the message format Kafka used prior to 0.10. +// +// To produce or fetch messages, Kafka would write many messages contiguously +// as an array without specifying the array length. +type MessageV0 struct { + // Offset is the offset of this record. + // + // If this is the outer message of a recursive message set (i.e. a + // message set has been compressed and this is the outer message), + // then the offset should be the offset of the last inner value. + Offset int64 + + // MessageSize is the size of everything that follows in this message. + MessageSize int32 + + // CRC is the crc of everything that follows this field (NOT using the + // Castagnoli polynomial, as is the case in the 0.11+ RecordBatch). + CRC int32 + + // Magic is 0. + Magic int8 + + // Attributes describe the attributes of this message. + // + // The first three bits correspond to compression: + // - 00 is no compression + // - 01 is gzip compression + // - 10 is snappy compression + // + // The remaining bits are unused and must be 0. + Attributes int8 + + // Key is an blob of data for a record. + // + // Key's are usually used for hashing the record to specific Kafka partitions. + Key []byte + + // Value is a blob of data. This field is the main "message" portion of a + // record. + Value []byte +} + +func (v *MessageV0) AppendTo(dst []byte) []byte { + { + v := v.Offset + dst = kbin.AppendInt64(dst, v) + } + { + v := v.MessageSize + dst = kbin.AppendInt32(dst, v) + } + { + v := v.CRC + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Magic + dst = kbin.AppendInt8(dst, v) + } + { + v := v.Attributes + dst = kbin.AppendInt8(dst, v) + } + { + v := v.Key + dst = kbin.AppendNullableBytes(dst, v) + } + { + v := v.Value + dst = kbin.AppendNullableBytes(dst, v) + } + return dst +} + +func (v *MessageV0) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *MessageV0) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *MessageV0) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + s := v + { + v := b.Int64() + s.Offset = v + } + { + v := b.Int32() + s.MessageSize = v + } + { + v := b.Int32() + s.CRC = v + } + { + v := b.Int8() + s.Magic = v + } + { + v := b.Int8() + s.Attributes = v + } + { + v := b.NullableBytes() + s.Key = v + } + { + v := b.NullableBytes() + s.Value = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to MessageV0. +func (v *MessageV0) Default() { +} + +// NewMessageV0 returns a default MessageV0 +// This is a shortcut for creating a struct and calling Default yourself. +func NewMessageV0() MessageV0 { + var v MessageV0 + v.Default() + return v +} + +// MessageV1 is the message format Kafka used prior to 0.11. +// +// To produce or fetch messages, Kafka would write many messages contiguously +// as an array without specifying the array length. +// +// To support compression, an entire message set would be compressed and used +// as the Value in another message set (thus being "recursive"). The key for +// this outer message set must be null. +type MessageV1 struct { + // Offset is the offset of this record. + // + // Different from v0, if this message set is a recursive message set + // (that is, compressed and inside another message set), the offset + // on the inner set is relative to the offset of the outer set. + Offset int64 + + // MessageSize is the size of everything that follows in this message. + MessageSize int32 + + // CRC is the crc of everything that follows this field (NOT using the + // Castagnoli polynomial, as is the case in the 0.11+ RecordBatch). + CRC int32 + + // Magic is 1. + Magic int8 + + // Attributes describe the attributes of this message. + // + // The first three bits correspond to compression: + // - 00 is no compression + // - 01 is gzip compression + // - 10 is snappy compression + // + // Bit 4 is the timestamp type, with 0 meaning CreateTime corresponding + // to the timestamp being from the producer, and 1 meaning LogAppendTime + // corresponding to the timestamp being from the broker. + // Setting this to LogAppendTime will cause batches to be rejected. + // + // The remaining bits are unused and must be 0. + Attributes int8 + + // Timestamp is the millisecond timestamp of this message. + Timestamp int64 + + // Key is an blob of data for a record. + // + // Key's are usually used for hashing the record to specific Kafka partitions. + Key []byte + + // Value is a blob of data. This field is the main "message" portion of a + // record. + Value []byte +} + +func (v *MessageV1) AppendTo(dst []byte) []byte { + { + v := v.Offset + dst = kbin.AppendInt64(dst, v) + } + { + v := v.MessageSize + dst = kbin.AppendInt32(dst, v) + } + { + v := v.CRC + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Magic + dst = kbin.AppendInt8(dst, v) + } + { + v := v.Attributes + dst = kbin.AppendInt8(dst, v) + } + { + v := v.Timestamp + dst = kbin.AppendInt64(dst, v) + } + { + v := v.Key + dst = kbin.AppendNullableBytes(dst, v) + } + { + v := v.Value + dst = kbin.AppendNullableBytes(dst, v) + } + return dst +} + +func (v *MessageV1) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *MessageV1) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *MessageV1) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + s := v + { + v := b.Int64() + s.Offset = v + } + { + v := b.Int32() + s.MessageSize = v + } + { + v := b.Int32() + s.CRC = v + } + { + v := b.Int8() + s.Magic = v + } + { + v := b.Int8() + s.Attributes = v + } + { + v := b.Int64() + s.Timestamp = v + } + { + v := b.NullableBytes() + s.Key = v + } + { + v := b.NullableBytes() + s.Value = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to MessageV1. +func (v *MessageV1) Default() { +} + +// NewMessageV1 returns a default MessageV1 +// This is a shortcut for creating a struct and calling Default yourself. +func NewMessageV1() MessageV1 { + var v MessageV1 + v.Default() + return v +} + +// Header is user provided metadata for a record. Kafka does not look at +// headers at all; they are solely for producers and consumers. +type Header struct { + Key string + + Value []byte +} + +func (v *Header) AppendTo(dst []byte) []byte { + { + v := v.Key + dst = kbin.AppendVarintString(dst, v) + } + { + v := v.Value + dst = kbin.AppendVarintBytes(dst, v) + } + return dst +} + +func (v *Header) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *Header) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *Header) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + s := v + { + var v string + if unsafe { + v = b.UnsafeVarintString() + } else { + v = b.VarintString() + } + s.Key = v + } + { + v := b.VarintBytes() + s.Value = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to Header. +func (v *Header) Default() { +} + +// NewHeader returns a default Header +// This is a shortcut for creating a struct and calling Default yourself. +func NewHeader() Header { + var v Header + v.Default() + return v +} + +// RecordBatch is a Kafka concept that groups many individual records together +// in a more optimized format. +type RecordBatch struct { + // FirstOffset is the first offset in a record batch. + // + // For producing, this is usually 0. + FirstOffset int64 + + // Length is the wire length of everything that follows this field. + Length int32 + + // PartitionLeaderEpoch is the leader epoch of the broker at the time + // this batch was written. Kafka uses this for cluster communication, + // but clients can also use this to better aid truncation detection. + // See KIP-320. Producers should set this to -1. + PartitionLeaderEpoch int32 + + // Magic is the current "magic" number of this message format. + // The current magic number is 2. + Magic int8 + + // CRC is the crc of everything that follows this field using the + // Castagnoli polynomial. + CRC int32 + + // Attributes describe the records array of this batch. + // + // The first three bits correspond to compression: + // - 000 is no compression + // - 001 is gzip compression + // - 010 is snappy compression + // - 011 is lz4 compression + // - 100 is zstd compression (produce request version 7+) + // + // Bit 4 is the timestamp type, with 0 meaning CreateTime corresponding + // to the timestamp being from the producer, and 1 meaning LogAppendTime + // corresponding to the timestamp being from the broker. + // Setting this to LogAppendTime will cause batches to be rejected. + // + // Bit 5 indicates whether the batch is part of a transaction (1 is yes). + // + // Bit 6 indicates if the batch includes a control message (1 is yes). + // Control messages are used to enable transactions and are generated from + // the broker. Clients should not return control batches to applications. + Attributes int16 + + // LastOffsetDelta is the offset of the last message in a batch. This is used + // by the broker to ensure correct behavior even with batch compaction. + LastOffsetDelta int32 + + // FirstTimestamp is the timestamp (in milliseconds) of the first record + // in a batch. + FirstTimestamp int64 + + // MaxTimestamp is the timestamp (in milliseconds) of the last record + // in a batch. Similar to LastOffsetDelta, this is used to ensure correct + // behavior with compacting. + MaxTimestamp int64 + + // ProducerID is the broker assigned producerID from an InitProducerID + // request. + // + // Clients that wish to support idempotent messages and transactions must + // set this field. + // + // Note that when not using transactions, any producer here is always + // accepted (and the epoch is always zero). Outside transactions, the ID + // is used only to deduplicate requests (and there must be at max 5 + // concurrent requests). + ProducerID int64 + + // ProducerEpoch is the broker assigned producerEpoch from an InitProducerID + // request. + // + // Clients that wish to support idempotent messages and transactions must + // set this field. + ProducerEpoch int16 + + // FirstSequence is the producer assigned sequence number used by the + // broker to deduplicate messages. + // + // Clients that wish to support idempotent messages and transactions must + // set this field. + // + // The sequence number for each record in a batch is OffsetDelta + FirstSequence. + FirstSequence int32 + + // NumRecords is the number of records in the array below. + // + // This is separate from Records due to the potential for records to be + // compressed. + NumRecords int32 + + // Records contains records, either compressed or uncompressed. + // + // For uncompressed records, this is an array of records ([Record]). + // + // For compressed records, the length of the uncompressed array is kept + // but everything that follows is compressed. + // + // The number of bytes is expected to be the Length field minus 49. + Records []byte +} + +func (v *RecordBatch) AppendTo(dst []byte) []byte { + { + v := v.FirstOffset + dst = kbin.AppendInt64(dst, v) + } + { + v := v.Length + dst = kbin.AppendInt32(dst, v) + } + { + v := v.PartitionLeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Magic + dst = kbin.AppendInt8(dst, v) + } + { + v := v.CRC + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Attributes + dst = kbin.AppendInt16(dst, v) + } + { + v := v.LastOffsetDelta + dst = kbin.AppendInt32(dst, v) + } + { + v := v.FirstTimestamp + dst = kbin.AppendInt64(dst, v) + } + { + v := v.MaxTimestamp + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ProducerID + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ProducerEpoch + dst = kbin.AppendInt16(dst, v) + } + { + v := v.FirstSequence + dst = kbin.AppendInt32(dst, v) + } + { + v := v.NumRecords + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Records + dst = append(dst, v...) + } + return dst +} + +func (v *RecordBatch) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *RecordBatch) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *RecordBatch) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + s := v + { + v := b.Int64() + s.FirstOffset = v + } + { + v := b.Int32() + s.Length = v + } + { + v := b.Int32() + s.PartitionLeaderEpoch = v + } + { + v := b.Int8() + s.Magic = v + } + { + v := b.Int32() + s.CRC = v + } + { + v := b.Int16() + s.Attributes = v + } + { + v := b.Int32() + s.LastOffsetDelta = v + } + { + v := b.Int64() + s.FirstTimestamp = v + } + { + v := b.Int64() + s.MaxTimestamp = v + } + { + v := b.Int64() + s.ProducerID = v + } + { + v := b.Int16() + s.ProducerEpoch = v + } + { + v := b.Int32() + s.FirstSequence = v + } + { + v := b.Int32() + s.NumRecords = v + } + { + v := b.Span(int(s.Length) - 49) + s.Records = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to RecordBatch. +func (v *RecordBatch) Default() { +} + +// NewRecordBatch returns a default RecordBatch +// This is a shortcut for creating a struct and calling Default yourself. +func NewRecordBatch() RecordBatch { + var v RecordBatch + v.Default() + return v +} + +// OffsetCommitKey is the key for the Kafka internal __consumer_offsets topic +// if the key starts with an int16 with a value of 0 or 1. +// +// This type was introduced in KAFKA-1012 commit a670537aa3 with release 0.8.2 +// and has been in use ever since. +type OffsetCommitKey struct { + // Version is which encoding version this value is using. + Version int16 + + // Group is the group being committed. + Group string + + // Topic is the topic being committed. + Topic string + + // Partition is the partition being committed. + Partition int32 +} + +func (v *OffsetCommitKey) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.Version + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Group + dst = kbin.AppendString(dst, v) + } + { + v := v.Topic + dst = kbin.AppendString(dst, v) + } + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + return dst +} + +func (v *OffsetCommitKey) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *OffsetCommitKey) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *OffsetCommitKey) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + v.Version = b.Int16() + version := v.Version + _ = version + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Group = v + } + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Topic = v + } + { + v := b.Int32() + s.Partition = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetCommitKey. +func (v *OffsetCommitKey) Default() { +} + +// NewOffsetCommitKey returns a default OffsetCommitKey +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetCommitKey() OffsetCommitKey { + var v OffsetCommitKey + v.Default() + return v +} + +// OffsetCommitValue is the value for the Kafka internal __consumer_offsets +// topic if the key is of OffsetCommitKey type. +// +// Version 0 was introduced with the key version 0. +// +// KAFKA-1634 commit c5df2a8e3a in 0.9.0 released version 1. +// +// KAFKA-4682 commit 418a91b5d4, proposed in KIP-211 and included in 2.1.0 +// released version 2. +// +// KAFKA-7437 commit 9f7267dd2f, proposed in KIP-320 and included in 2.1.0 +// released version 3. +type OffsetCommitValue struct { + // Version is which encoding version this value is using. + Version int16 + + // Offset is the committed offset. + Offset int64 + + // LeaderEpoch is the epoch of the leader committing this message. + LeaderEpoch int32 // v3+ + + // Metadata is the metadata included in the commit. + Metadata string + + // CommitTimestamp is when this commit occurred. + CommitTimestamp int64 + + // ExpireTimestamp, introduced in v1 and dropped in v2 with KIP-111, + // is when this commit expires. + ExpireTimestamp int64 // v1-v1 +} + +func (v *OffsetCommitValue) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.Version + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Offset + dst = kbin.AppendInt64(dst, v) + } + if version >= 3 { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Metadata + dst = kbin.AppendString(dst, v) + } + { + v := v.CommitTimestamp + dst = kbin.AppendInt64(dst, v) + } + if version >= 1 && version <= 1 { + v := v.ExpireTimestamp + dst = kbin.AppendInt64(dst, v) + } + return dst +} + +func (v *OffsetCommitValue) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *OffsetCommitValue) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *OffsetCommitValue) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + v.Version = b.Int16() + version := v.Version + _ = version + s := v + { + v := b.Int64() + s.Offset = v + } + if version >= 3 { + v := b.Int32() + s.LeaderEpoch = v + } + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Metadata = v + } + { + v := b.Int64() + s.CommitTimestamp = v + } + if version >= 1 && version <= 1 { + v := b.Int64() + s.ExpireTimestamp = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetCommitValue. +func (v *OffsetCommitValue) Default() { +} + +// NewOffsetCommitValue returns a default OffsetCommitValue +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetCommitValue() OffsetCommitValue { + var v OffsetCommitValue + v.Default() + return v +} + +// GroupMetadataKey is the key for the Kafka internal __consumer_offsets topic +// if the key starts with an int16 with a value of 2. +// +// This type was introduced in KAFKA-2017 commit 7c33475274 with release 0.9.0 +// and has been in use ever since. +type GroupMetadataKey struct { + // Version is which encoding version this value is using. + Version int16 + + // Group is the group this metadata is for. + Group string +} + +func (v *GroupMetadataKey) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.Version + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Group + dst = kbin.AppendString(dst, v) + } + return dst +} + +func (v *GroupMetadataKey) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *GroupMetadataKey) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *GroupMetadataKey) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + v.Version = b.Int16() + version := v.Version + _ = version + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Group = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to GroupMetadataKey. +func (v *GroupMetadataKey) Default() { +} + +// NewGroupMetadataKey returns a default GroupMetadataKey +// This is a shortcut for creating a struct and calling Default yourself. +func NewGroupMetadataKey() GroupMetadataKey { + var v GroupMetadataKey + v.Default() + return v +} + +type GroupMetadataValueMember struct { + // MemberID is a group member. + MemberID string + + // InstanceID is the instance ID of this member in the group (KIP-345). + InstanceID *string // v3+ + + // ClientID is the client ID of this group member. + ClientID string + + // ClientHost is the hostname of this group member. + ClientHost string + + // RebalanceTimeoutMillis is the rebalance timeout of this group member. + // + // This field has a default of -1. + RebalanceTimeoutMillis int32 // v1+ + + // SessionTimeoutMillis is the session timeout of this group member. + SessionTimeoutMillis int32 + + // Subscription is the subscription of this group member. + Subscription []byte + + // Assignment is what the leader assigned this group member. + Assignment []byte + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to GroupMetadataValueMember. +func (v *GroupMetadataValueMember) Default() { + v.RebalanceTimeoutMillis = -1 +} + +// NewGroupMetadataValueMember returns a default GroupMetadataValueMember +// This is a shortcut for creating a struct and calling Default yourself. +func NewGroupMetadataValueMember() GroupMetadataValueMember { + var v GroupMetadataValueMember + v.Default() + return v +} + +// GroupMetadataValue is the value for the Kafka internal __consumer_offsets +// topic if the key is of GroupMetadataKey type. +// +// Version 0 was introduced with the key version 0. +// +// KAFKA-3888 commit 40b1dd3f49, proposed in KIP-62 and included in 0.10.1 +// released version 1. +// +// KAFKA-4682 commit 418a91b5d4, proposed in KIP-211 and included in 2.1.0 +// released version 2. +// +// KAFKA-7862 commit 0f995ba6be, proposed in KIP-345 and included in 2.3.0 +// released version 3. +type GroupMetadataValue struct { + // Version is the version of this value. + Version int16 + + // ProtocolType is the type of protocol being used for the group + // (i.e., "consumer"). + ProtocolType string + + // Generation is the generation of this group. + Generation int32 + + // Protocol is the agreed upon protocol all members are using to partition + // (i.e., "sticky"). + Protocol *string + + // Leader is the group leader. + Leader *string + + // CurrentStateTimestamp is the timestamp for this state of the group + // (stable, etc.). + // + // This field has a default of -1. + CurrentStateTimestamp int64 // v2+ + + // Members are the group members. + Members []GroupMetadataValueMember + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +func (v *GroupMetadataValue) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + { + v := v.Version + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ProtocolType + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Generation + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Protocol + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Leader + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if version >= 2 { + v := v.CurrentStateTimestamp + dst = kbin.AppendInt64(dst, v) + } + { + v := v.Members + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.MemberID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 3 { + v := v.InstanceID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.ClientID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ClientHost + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 1 { + v := v.RebalanceTimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.SessionTimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Subscription + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + { + v := v.Assignment + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *GroupMetadataValue) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *GroupMetadataValue) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *GroupMetadataValue) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + v.Version = b.Int16() + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ProtocolType = v + } + { + v := b.Int32() + s.Generation = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Protocol = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Leader = v + } + if version >= 2 { + v := b.Int64() + s.CurrentStateTimestamp = v + } + { + v := s.Members + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]GroupMetadataValueMember, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.MemberID = v + } + if version >= 3 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.InstanceID = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ClientID = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ClientHost = v + } + if version >= 1 { + v := b.Int32() + s.RebalanceTimeoutMillis = v + } + { + v := b.Int32() + s.SessionTimeoutMillis = v + } + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.Subscription = v + } + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.Assignment = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Members = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} +func (v *GroupMetadataValue) IsFlexible() bool { return v.Version >= 4 } + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to GroupMetadataValue. +func (v *GroupMetadataValue) Default() { + v.CurrentStateTimestamp = -1 +} + +// NewGroupMetadataValue returns a default GroupMetadataValue +// This is a shortcut for creating a struct and calling Default yourself. +func NewGroupMetadataValue() GroupMetadataValue { + var v GroupMetadataValue + v.Default() + return v +} + +// TxnMetadataKey is the key for the Kafka internal __transaction_state topic +// if the key starts with an int16 with a value of 0. +type TxnMetadataKey struct { + // Version is the version of this type. + Version int16 + + // TransactionalID is the transactional ID this record is for. + TransactionalID string +} + +func (v *TxnMetadataKey) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.Version + dst = kbin.AppendInt16(dst, v) + } + { + v := v.TransactionalID + dst = kbin.AppendString(dst, v) + } + return dst +} + +func (v *TxnMetadataKey) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *TxnMetadataKey) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *TxnMetadataKey) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + v.Version = b.Int16() + version := v.Version + _ = version + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.TransactionalID = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to TxnMetadataKey. +func (v *TxnMetadataKey) Default() { +} + +// NewTxnMetadataKey returns a default TxnMetadataKey +// This is a shortcut for creating a struct and calling Default yourself. +func NewTxnMetadataKey() TxnMetadataKey { + var v TxnMetadataKey + v.Default() + return v +} + +type TxnMetadataValueTopic struct { + // Topic is a topic involved in this transaction. + Topic string + + // Partitions are partitions in this topic involved in the transaction. + Partitions []int32 +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to TxnMetadataValueTopic. +func (v *TxnMetadataValueTopic) Default() { +} + +// NewTxnMetadataValueTopic returns a default TxnMetadataValueTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewTxnMetadataValueTopic() TxnMetadataValueTopic { + var v TxnMetadataValueTopic + v.Default() + return v +} + +// TxnMetadataValue is the value for the Kafka internal __transaction_state +// topic if the key is of TxnMetadataKey type. +type TxnMetadataValue struct { + // Version is the version of this value. + Version int16 + + // ProducerID is the ID in use by the transactional ID. + ProducerID int64 + + // ProducerEpoch is the epoch associated with the producer ID. + ProducerEpoch int16 + + // TimeoutMillis is the timeout of this transaction in milliseconds. + TimeoutMillis int32 + + // State is the state this transaction is in, + // 0 is Empty, 1 is Ongoing, 2 is PrepareCommit, 3 is PrepareAbort, 4 is + // CompleteCommit, 5 is CompleteAbort, 6 is Dead, and 7 is PrepareEpochFence. + State TransactionState + + // Topics are topics that are involved in this transaction. + Topics []TxnMetadataValueTopic + + // LastUpdateTimestamp is the timestamp in millis of when this transaction + // was last updated. + LastUpdateTimestamp int64 + + // StartTimestamp is the timestamp in millis of when this transaction started. + StartTimestamp int64 +} + +func (v *TxnMetadataValue) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.Version + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ProducerID + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ProducerEpoch + dst = kbin.AppendInt16(dst, v) + } + { + v := v.TimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.State + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.Topics + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Topic + dst = kbin.AppendString(dst, v) + } + { + v := v.Partitions + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + } + } + { + v := v.LastUpdateTimestamp + dst = kbin.AppendInt64(dst, v) + } + { + v := v.StartTimestamp + dst = kbin.AppendInt64(dst, v) + } + return dst +} + +func (v *TxnMetadataValue) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *TxnMetadataValue) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *TxnMetadataValue) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + v.Version = b.Int16() + version := v.Version + _ = version + s := v + { + v := b.Int64() + s.ProducerID = v + } + { + v := b.Int16() + s.ProducerEpoch = v + } + { + v := b.Int32() + s.TimeoutMillis = v + } + { + var t TransactionState + { + v := b.Int8() + t = TransactionState(v) + } + v := t + s.State = v + } + { + v := s.Topics + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]TxnMetadataValueTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + } + v = a + s.Topics = v + } + { + v := b.Int64() + s.LastUpdateTimestamp = v + } + { + v := b.Int64() + s.StartTimestamp = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to TxnMetadataValue. +func (v *TxnMetadataValue) Default() { +} + +// NewTxnMetadataValue returns a default TxnMetadataValue +// This is a shortcut for creating a struct and calling Default yourself. +func NewTxnMetadataValue() TxnMetadataValue { + var v TxnMetadataValue + v.Default() + return v +} + +type StickyMemberMetadataCurrentAssignment struct { + // Topic is a topic the group member is currently assigned. + Topic string + + // Partitions are the partitions within a topic that a group member is + // currently assigned. + Partitions []int32 +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to StickyMemberMetadataCurrentAssignment. +func (v *StickyMemberMetadataCurrentAssignment) Default() { +} + +// NewStickyMemberMetadataCurrentAssignment returns a default StickyMemberMetadataCurrentAssignment +// This is a shortcut for creating a struct and calling Default yourself. +func NewStickyMemberMetadataCurrentAssignment() StickyMemberMetadataCurrentAssignment { + var v StickyMemberMetadataCurrentAssignment + v.Default() + return v +} + +// StickyMemberMetadata is is what is encoded in UserData for +// ConsumerMemberMetadata in group join requests with the sticky partitioning +// strategy. +// +// V1 added generation, which fixed a bug with flaky group members joining +// repeatedly. See KIP-341 for more details. +// +// Note that clients should always try decoding as v1 and, if that fails, +// fall back to v0. This is necessary due to there being no version number +// anywhere in this type. +type StickyMemberMetadata struct { + // CurrentAssignment is the assignment that a group member has when + // issuing a join. + CurrentAssignment []StickyMemberMetadataCurrentAssignment + + // Generation is the generation of this join. This is incremented every join. + // + // This field has a default of -1. + Generation int32 // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to StickyMemberMetadata. +func (v *StickyMemberMetadata) Default() { + v.Generation = -1 +} + +// NewStickyMemberMetadata returns a default StickyMemberMetadata +// This is a shortcut for creating a struct and calling Default yourself. +func NewStickyMemberMetadata() StickyMemberMetadata { + var v StickyMemberMetadata + v.Default() + return v +} + +type ConsumerMemberMetadataOwnedPartition struct { + Topic string + + Partitions []int32 +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ConsumerMemberMetadataOwnedPartition. +func (v *ConsumerMemberMetadataOwnedPartition) Default() { +} + +// NewConsumerMemberMetadataOwnedPartition returns a default ConsumerMemberMetadataOwnedPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewConsumerMemberMetadataOwnedPartition() ConsumerMemberMetadataOwnedPartition { + var v ConsumerMemberMetadataOwnedPartition + v.Default() + return v +} + +// ConsumerMemberMetadata is the metadata that is usually sent with a join group +// request with the "consumer" protocol (normal, non-connect consumers). +type ConsumerMemberMetadata struct { + // Version is 0, 1, 2, or 3. + Version int16 + + // Topics is the list of topics in the group that this member is interested + // in consuming. + Topics []string + + // UserData is arbitrary client data for a given client in the group. + // For sticky assignment, this is StickyMemberMetadata. + UserData []byte + + // OwnedPartitions, introduced for KIP-429, are the partitions that this + // member currently owns. + OwnedPartitions []ConsumerMemberMetadataOwnedPartition // v1+ + + // Generation is the generation of the group. + // + // This field has a default of -1. + Generation int32 // v2+ + + // Rack, if non-nil, opts into rack-aware replica assignment. + Rack *string // v3+ +} + +func (v *ConsumerMemberMetadata) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.Version + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Topics + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := v[i] + dst = kbin.AppendString(dst, v) + } + } + { + v := v.UserData + dst = kbin.AppendNullableBytes(dst, v) + } + if version >= 1 { + v := v.OwnedPartitions + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Topic + dst = kbin.AppendString(dst, v) + } + { + v := v.Partitions + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + } + } + if version >= 2 { + v := v.Generation + dst = kbin.AppendInt32(dst, v) + } + if version >= 3 { + v := v.Rack + dst = kbin.AppendNullableString(dst, v) + } + return dst +} + +func (v *ConsumerMemberMetadata) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ConsumerMemberMetadata) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ConsumerMemberMetadata) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + v.Version = b.Int16() + version := v.Version + _ = version + s := v + { + v := s.Topics + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]string, l)...) + } + for i := int32(0); i < l; i++ { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + a[i] = v + } + v = a + s.Topics = v + } + { + v := b.NullableBytes() + s.UserData = v + } + if version >= 1 { + v := s.OwnedPartitions + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ConsumerMemberMetadataOwnedPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + } + v = a + s.OwnedPartitions = v + } + if version >= 2 { + v := b.Int32() + s.Generation = v + } + if version >= 3 { + var v *string + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + s.Rack = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ConsumerMemberMetadata. +func (v *ConsumerMemberMetadata) Default() { + v.Generation = -1 +} + +// NewConsumerMemberMetadata returns a default ConsumerMemberMetadata +// This is a shortcut for creating a struct and calling Default yourself. +func NewConsumerMemberMetadata() ConsumerMemberMetadata { + var v ConsumerMemberMetadata + v.Default() + return v +} + +type ConsumerMemberAssignmentTopic struct { + // Topic is a topic in the assignment. + Topic string + + // Partitions contains partitions in the assignment. + Partitions []int32 +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ConsumerMemberAssignmentTopic. +func (v *ConsumerMemberAssignmentTopic) Default() { +} + +// NewConsumerMemberAssignmentTopic returns a default ConsumerMemberAssignmentTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewConsumerMemberAssignmentTopic() ConsumerMemberAssignmentTopic { + var v ConsumerMemberAssignmentTopic + v.Default() + return v +} + +// ConsumerMemberAssignment is the assignment data that is usually sent with a +// sync group request with the "consumer" protocol (normal, non-connect +// consumers). +type ConsumerMemberAssignment struct { + // Verson is 0, 1, or 2. + Version int16 + + // Topics contains topics in the assignment. + Topics []ConsumerMemberAssignmentTopic + + // UserData is arbitrary client data for a given client in the group. + UserData []byte +} + +func (v *ConsumerMemberAssignment) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.Version + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Topics + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Topic + dst = kbin.AppendString(dst, v) + } + { + v := v.Partitions + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + } + } + { + v := v.UserData + dst = kbin.AppendNullableBytes(dst, v) + } + return dst +} + +func (v *ConsumerMemberAssignment) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ConsumerMemberAssignment) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ConsumerMemberAssignment) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + v.Version = b.Int16() + version := v.Version + _ = version + s := v + { + v := s.Topics + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ConsumerMemberAssignmentTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + } + v = a + s.Topics = v + } + { + v := b.NullableBytes() + s.UserData = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ConsumerMemberAssignment. +func (v *ConsumerMemberAssignment) Default() { +} + +// NewConsumerMemberAssignment returns a default ConsumerMemberAssignment +// This is a shortcut for creating a struct and calling Default yourself. +func NewConsumerMemberAssignment() ConsumerMemberAssignment { + var v ConsumerMemberAssignment + v.Default() + return v +} + +// ConnectMemberMetadata is the metadata used in a join group request with the +// "connect" protocol. v1 introduced incremental cooperative rebalancing (akin +// to cooperative-sticky) per KIP-415. +// +// v0 defined in connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/ConnectProtocol.java +// v1+ defined in connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/IncrementalCooperativeConnectProtocol.java +type ConnectMemberMetadata struct { + Version int16 + + URL string + + ConfigOffset int64 + + CurrentAssignment []byte // v1+ +} + +func (v *ConnectMemberMetadata) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.Version + dst = kbin.AppendInt16(dst, v) + } + { + v := v.URL + dst = kbin.AppendString(dst, v) + } + { + v := v.ConfigOffset + dst = kbin.AppendInt64(dst, v) + } + if version >= 1 { + v := v.CurrentAssignment + dst = kbin.AppendNullableBytes(dst, v) + } + return dst +} + +func (v *ConnectMemberMetadata) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ConnectMemberMetadata) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ConnectMemberMetadata) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + v.Version = b.Int16() + version := v.Version + _ = version + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.URL = v + } + { + v := b.Int64() + s.ConfigOffset = v + } + if version >= 1 { + v := b.NullableBytes() + s.CurrentAssignment = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ConnectMemberMetadata. +func (v *ConnectMemberMetadata) Default() { +} + +// NewConnectMemberMetadata returns a default ConnectMemberMetadata +// This is a shortcut for creating a struct and calling Default yourself. +func NewConnectMemberMetadata() ConnectMemberMetadata { + var v ConnectMemberMetadata + v.Default() + return v +} + +type ConnectMemberAssignmentAssignment struct { + Connector string + + Tasks []int16 +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ConnectMemberAssignmentAssignment. +func (v *ConnectMemberAssignmentAssignment) Default() { +} + +// NewConnectMemberAssignmentAssignment returns a default ConnectMemberAssignmentAssignment +// This is a shortcut for creating a struct and calling Default yourself. +func NewConnectMemberAssignmentAssignment() ConnectMemberAssignmentAssignment { + var v ConnectMemberAssignmentAssignment + v.Default() + return v +} + +type ConnectMemberAssignmentRevoked struct { + Connector string + + Tasks []int16 +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ConnectMemberAssignmentRevoked. +func (v *ConnectMemberAssignmentRevoked) Default() { +} + +// NewConnectMemberAssignmentRevoked returns a default ConnectMemberAssignmentRevoked +// This is a shortcut for creating a struct and calling Default yourself. +func NewConnectMemberAssignmentRevoked() ConnectMemberAssignmentRevoked { + var v ConnectMemberAssignmentRevoked + v.Default() + return v +} + +// ConnectMemberAssignment is the assignment that is used in a sync group +// request with the "connect" protocol. See ConnectMemberMetadata for links to +// the Kafka code where these fields are defined. +type ConnectMemberAssignment struct { + Version int16 + + Error int16 + + Leader string + + LeaderURL string + + ConfigOffset int64 + + Assignment []ConnectMemberAssignmentAssignment + + Revoked []ConnectMemberAssignmentRevoked // v1+ + + ScheduledDelay int32 // v1+ +} + +func (v *ConnectMemberAssignment) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.Version + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Error + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Leader + dst = kbin.AppendString(dst, v) + } + { + v := v.LeaderURL + dst = kbin.AppendString(dst, v) + } + { + v := v.ConfigOffset + dst = kbin.AppendInt64(dst, v) + } + { + v := v.Assignment + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Connector + dst = kbin.AppendString(dst, v) + } + { + v := v.Tasks + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := v[i] + dst = kbin.AppendInt16(dst, v) + } + } + } + } + if version >= 1 { + v := v.Revoked + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Connector + dst = kbin.AppendString(dst, v) + } + { + v := v.Tasks + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := v[i] + dst = kbin.AppendInt16(dst, v) + } + } + } + } + if version >= 1 { + v := v.ScheduledDelay + dst = kbin.AppendInt32(dst, v) + } + return dst +} + +func (v *ConnectMemberAssignment) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ConnectMemberAssignment) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ConnectMemberAssignment) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + v.Version = b.Int16() + version := v.Version + _ = version + s := v + { + v := b.Int16() + s.Error = v + } + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Leader = v + } + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.LeaderURL = v + } + { + v := b.Int64() + s.ConfigOffset = v + } + { + v := s.Assignment + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ConnectMemberAssignmentAssignment, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Connector = v + } + { + v := s.Tasks + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int16, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int16() + a[i] = v + } + v = a + s.Tasks = v + } + } + v = a + s.Assignment = v + } + if version >= 1 { + v := s.Revoked + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ConnectMemberAssignmentRevoked, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Connector = v + } + { + v := s.Tasks + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int16, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int16() + a[i] = v + } + v = a + s.Tasks = v + } + } + v = a + s.Revoked = v + } + if version >= 1 { + v := b.Int32() + s.ScheduledDelay = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ConnectMemberAssignment. +func (v *ConnectMemberAssignment) Default() { +} + +// NewConnectMemberAssignment returns a default ConnectMemberAssignment +// This is a shortcut for creating a struct and calling Default yourself. +func NewConnectMemberAssignment() ConnectMemberAssignment { + var v ConnectMemberAssignment + v.Default() + return v +} + +// DefaultPrincipalData is the encoded principal data. This is used in an +// envelope request from broker to broker. +type DefaultPrincipalData struct { + Version int16 + + // The principal type. + Type string + + // The principal name. + Name string + + // Whether the principal was authenticated by a delegation token on the forwarding broker. + TokenAuthenticated bool + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (v *DefaultPrincipalData) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.Version + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Type + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.TokenAuthenticated + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DefaultPrincipalData) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DefaultPrincipalData) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DefaultPrincipalData) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + v.Version = b.Int16() + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Type = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Name = v + } + { + v := b.Bool() + s.TokenAuthenticated = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} +func (v *DefaultPrincipalData) IsFlexible() bool { return v.Version >= 0 } + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DefaultPrincipalData. +func (v *DefaultPrincipalData) Default() { +} + +// NewDefaultPrincipalData returns a default DefaultPrincipalData +// This is a shortcut for creating a struct and calling Default yourself. +func NewDefaultPrincipalData() DefaultPrincipalData { + var v DefaultPrincipalData + v.Default() + return v +} + +// ControlRecordKey is the key in a control record. +type ControlRecordKey struct { + Version int16 + + Type ControlRecordKeyType +} + +func (v *ControlRecordKey) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.Version + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Type + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + return dst +} + +func (v *ControlRecordKey) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ControlRecordKey) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ControlRecordKey) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + v.Version = b.Int16() + version := v.Version + _ = version + s := v + { + var t ControlRecordKeyType + { + v := b.Int8() + t = ControlRecordKeyType(v) + } + v := t + s.Type = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ControlRecordKey. +func (v *ControlRecordKey) Default() { +} + +// NewControlRecordKey returns a default ControlRecordKey +// This is a shortcut for creating a struct and calling Default yourself. +func NewControlRecordKey() ControlRecordKey { + var v ControlRecordKey + v.Default() + return v +} + +// EndTxnMarker is the value for a control record when the key is type 0 or 1. +type EndTxnMarker struct { + Version int16 + + CoordinatorEpoch int32 +} + +func (v *EndTxnMarker) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.Version + dst = kbin.AppendInt16(dst, v) + } + { + v := v.CoordinatorEpoch + dst = kbin.AppendInt32(dst, v) + } + return dst +} + +func (v *EndTxnMarker) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *EndTxnMarker) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *EndTxnMarker) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + v.Version = b.Int16() + version := v.Version + _ = version + s := v + { + v := b.Int32() + s.CoordinatorEpoch = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to EndTxnMarker. +func (v *EndTxnMarker) Default() { +} + +// NewEndTxnMarker returns a default EndTxnMarker +// This is a shortcut for creating a struct and calling Default yourself. +func NewEndTxnMarker() EndTxnMarker { + var v EndTxnMarker + v.Default() + return v +} + +type LeaderChangeMessageVoter struct { + VoterID int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to LeaderChangeMessageVoter. +func (v *LeaderChangeMessageVoter) Default() { +} + +// NewLeaderChangeMessageVoter returns a default LeaderChangeMessageVoter +// This is a shortcut for creating a struct and calling Default yourself. +func NewLeaderChangeMessageVoter() LeaderChangeMessageVoter { + var v LeaderChangeMessageVoter + v.Default() + return v +} + +// LeaderChangeMessage is the value for a control record when the key is type 3. +type LeaderChangeMessage struct { + Version int16 + + // The ID of the newly elected leader. + LeaderID int32 + + // The set of voters in the quorum for this epoch. + Voters []LeaderChangeMessageVoter + + // The voters who voted for the leader at the time of election. + GrantingVoters []LeaderChangeMessageVoter + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (v *LeaderChangeMessage) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.Version + dst = kbin.AppendInt16(dst, v) + } + { + v := v.LeaderID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Voters + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.VoterID + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.GrantingVoters + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.VoterID + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *LeaderChangeMessage) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *LeaderChangeMessage) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *LeaderChangeMessage) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + v.Version = b.Int16() + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.LeaderID = v + } + { + v := s.Voters + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]LeaderChangeMessageVoter, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.VoterID = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Voters = v + } + { + v := s.GrantingVoters + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]LeaderChangeMessageVoter, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.VoterID = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.GrantingVoters = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} +func (v *LeaderChangeMessage) IsFlexible() bool { return v.Version >= 0 } + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to LeaderChangeMessage. +func (v *LeaderChangeMessage) Default() { +} + +// NewLeaderChangeMessage returns a default LeaderChangeMessage +// This is a shortcut for creating a struct and calling Default yourself. +func NewLeaderChangeMessage() LeaderChangeMessage { + var v LeaderChangeMessage + v.Default() + return v +} + +type ProduceRequestTopicPartition struct { + // Partition is a partition to send a record batch to. + Partition int32 + + // Records is a batch of records to write to a topic's partition. + // + // For Kafka pre 0.11.0, the contents of the byte array is a serialized + // message set. At or after 0.11.0, the contents of the byte array is a + // serialized RecordBatch. + Records []byte + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v9+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ProduceRequestTopicPartition. +func (v *ProduceRequestTopicPartition) Default() { +} + +// NewProduceRequestTopicPartition returns a default ProduceRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewProduceRequestTopicPartition() ProduceRequestTopicPartition { + var v ProduceRequestTopicPartition + v.Default() + return v +} + +type ProduceRequestTopic struct { + // Topic is a topic to send record batches to. + Topic string + + // Partitions is an array of partitions to send record batches to. + Partitions []ProduceRequestTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v9+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ProduceRequestTopic. +func (v *ProduceRequestTopic) Default() { +} + +// NewProduceRequestTopic returns a default ProduceRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewProduceRequestTopic() ProduceRequestTopic { + var v ProduceRequestTopic + v.Default() + return v +} + +// ProduceRequest issues records to be created to Kafka. +// +// Kafka 0.10.0 (v2) changed Records from MessageSet v0 to MessageSet v1. +// Kafka 0.11.0 (v3) again changed Records to RecordBatch. +// +// Note that the special client ID "__admin_client" will allow you to produce +// records to internal topics. This is generally recommended if you want to +// break your Kafka cluster. +type ProduceRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // TransactionID is the transaction ID to use for this request, allowing for + // exactly once semantics. + TransactionID *string // v3+ + + // Acks specifies the number of acks that the partition leaders must receive + // from in sync replicas before considering a record batch fully written. + // + // Valid values are -1, 0, or 1 corresponding to all, none, or the leader only. + // + // Note that if no acks are requested, Kafka will close the connection + // if any topic or partition errors to trigger a client metadata refresh. + Acks int16 + + // TimeoutMillis is how long Kafka can wait before responding to this request. + // This field has no effect on Kafka's processing of the request; the request + // will continue to be processed if the timeout is reached. If the timeout is + // reached, Kafka will reply with a REQUEST_TIMED_OUT error. + // + // This field has a default of 15000. + TimeoutMillis int32 + + // Topics is an array of topics to send record batches to. + Topics []ProduceRequestTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v9+ +} + +func (*ProduceRequest) Key() int16 { return 0 } +func (*ProduceRequest) MaxVersion() int16 { return 11 } +func (v *ProduceRequest) SetVersion(version int16) { v.Version = version } +func (v *ProduceRequest) GetVersion() int16 { return v.Version } +func (v *ProduceRequest) IsFlexible() bool { return v.Version >= 9 } +func (v *ProduceRequest) Timeout() int32 { return v.TimeoutMillis } +func (v *ProduceRequest) SetTimeout(timeoutMillis int32) { v.TimeoutMillis = timeoutMillis } +func (v *ProduceRequest) ResponseKind() Response { + r := &ProduceResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *ProduceRequest) RequestWith(ctx context.Context, r Requestor) (*ProduceResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*ProduceResponse) + return resp, err +} + +func (v *ProduceRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 9 + _ = isFlexible + if version >= 3 { + v := v.TransactionID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Acks + dst = kbin.AppendInt16(dst, v) + } + { + v := v.TimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Records + if isFlexible { + dst = kbin.AppendCompactNullableBytes(dst, v) + } else { + dst = kbin.AppendNullableBytes(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ProduceRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ProduceRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ProduceRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 9 + _ = isFlexible + s := v + if version >= 3 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.TransactionID = v + } + { + v := b.Int16() + s.Acks = v + } + { + v := b.Int32() + s.TimeoutMillis = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ProduceRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ProduceRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + var v []byte + if isFlexible { + v = b.CompactNullableBytes() + } else { + v = b.NullableBytes() + } + s.Records = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrProduceRequest returns a pointer to a default ProduceRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrProduceRequest() *ProduceRequest { + var v ProduceRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ProduceRequest. +func (v *ProduceRequest) Default() { + v.TimeoutMillis = 15000 +} + +// NewProduceRequest returns a default ProduceRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewProduceRequest() ProduceRequest { + var v ProduceRequest + v.Default() + return v +} + +type ProduceResponseTopicPartitionErrorRecord struct { + // RelativeOffset is the offset of the record that caused problems. + RelativeOffset int32 + + // ErrorMessage is the error of this record. + ErrorMessage *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v9+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ProduceResponseTopicPartitionErrorRecord. +func (v *ProduceResponseTopicPartitionErrorRecord) Default() { +} + +// NewProduceResponseTopicPartitionErrorRecord returns a default ProduceResponseTopicPartitionErrorRecord +// This is a shortcut for creating a struct and calling Default yourself. +func NewProduceResponseTopicPartitionErrorRecord() ProduceResponseTopicPartitionErrorRecord { + var v ProduceResponseTopicPartitionErrorRecord + v.Default() + return v +} + +type ProduceResponseTopicPartitionCurrentLeader struct { + // The ID of the current leader, or -1 if unknown. + // + // This field has a default of -1. + LeaderID int32 + + // The latest known leader epoch. + // + // This field has a default of -1. + LeaderEpoch int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v9+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ProduceResponseTopicPartitionCurrentLeader. +func (v *ProduceResponseTopicPartitionCurrentLeader) Default() { + v.LeaderID = -1 + v.LeaderEpoch = -1 +} + +// NewProduceResponseTopicPartitionCurrentLeader returns a default ProduceResponseTopicPartitionCurrentLeader +// This is a shortcut for creating a struct and calling Default yourself. +func NewProduceResponseTopicPartitionCurrentLeader() ProduceResponseTopicPartitionCurrentLeader { + var v ProduceResponseTopicPartitionCurrentLeader + v.Default() + return v +} + +type ProduceResponseTopicPartition struct { + // Partition is the partition this response pertains to. + Partition int32 + + // ErrorCode is any error for a topic/partition in the request. + // There are many error codes for produce requests. + // + // TRANSACTIONAL_ID_AUTHORIZATION_FAILED is returned for all topics and + // partitions if the request had a transactional ID but the client + // is not authorized for transactions. + // + // CLUSTER_AUTHORIZATION_FAILED is returned for all topics and partitions + // if the request was idempotent but the client is not authorized + // for idempotent requests. + // + // TOPIC_AUTHORIZATION_FAILED is returned for all topics the client + // is not authorized to talk to. + // + // INVALID_REQUIRED_ACKS is returned if the request contained an invalid + // number for "acks". + // + // CORRUPT_MESSAGE is returned for many reasons, generally related to + // problems with messages (invalid magic, size mismatch, etc.). + // + // MESSAGE_TOO_LARGE is returned if a record batch is larger than the + // broker's configured max.message.size. + // + // RECORD_LIST_TOO_LARGE is returned if the record batch is larger than + // the broker's segment.bytes. + // + // INVALID_TIMESTAMP is returned if the record batch uses LogAppendTime + // or if the timestamp delta from when the broker receives the message + // is more than the broker's log.message.timestamp.difference.max.ms. + // + // UNSUPPORTED_FOR_MESSAGE_FORMAT is returned if using a Kafka v2 message + // format (i.e. RecordBatch) feature (idempotence) while sending v1 + // messages (i.e. a MessageSet). + // + // KAFKA_STORAGE_ERROR is returned if the log directory for a partition + // is offline. + // + // NOT_ENOUGH_REPLICAS is returned if all acks are required, but there + // are not enough in sync replicas yet. + // + // NOT_ENOUGH_REPLICAS_AFTER_APPEND is returned on old Kafka versions + // (pre 0.11.0.0) when a message was written to disk and then Kafka + // noticed not enough replicas existed to replicate the message. + // + // DUPLICATE_SEQUENCE_NUMBER is returned for Kafka <1.1.0 when a + // sequence number is detected as a duplicate. After, out of order + // is returned. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if the topic or partition + // is unknown. + // + // NOT_LEADER_FOR_PARTITION is returned if the broker is not a leader + // for this partition. This means that the client has stale metadata. + // + // INVALID_PRODUCER_EPOCH is returned if the produce request was + // attempted with an old epoch. Either there is a newer producer using + // the same transaction ID, or the transaction ID used has expired. + // + // UNKNOWN_PRODUCER_ID, added in Kafka 1.0.0 (message format v5+) is + // returned if the producer used an ID that Kafka does not know about or + // if the request has a larger sequence number than Kafka expects. The + // LogStartOffset must be checked in this case. If the offset is greater + // than the last acknowledged offset, then no data loss has occurred; the + // client just sent data so long ago that Kafka rotated the partition out + // of existence and no longer knows of this producer ID. In this case, + // reset your sequence numbers to 0. If the log start offset is equal to + // or less than what the client sent prior, then data loss has occurred. + // See KAFKA-5793 for more details. NOTE: Unfortunately, even UNKNOWN_PRODUCER_ID + // is unsafe to handle, so this error should likely be treated the same + // as OUT_OF_ORDER_SEQUENCE_NUMER. See KIP-360 for more details. + // + // OUT_OF_ORDER_SEQUENCE_NUMBER is sent if the batch's FirstSequence was + // not what it should be (the last FirstSequence, plus the number of + // records in the last batch, plus one). After 1.0.0, this generally + // means data loss. Before, there could be confusion on if the broker + // actually rotated the partition out of existence (this is why + // UNKNOWN_PRODUCER_ID was introduced). + ErrorCode int16 + + // BaseOffset is the offset that the records in the produce request began + // at in the partition. + BaseOffset int64 + + // LogAppendTime is the millisecond that records were appended to the + // partition inside Kafka. This is only not -1 if records were written + // with the log append time flag (which producers cannot do). + // + // This field has a default of -1. + LogAppendTime int64 // v2+ + + // LogStartOffset, introduced in Kafka 1.0.0, can be used to see if an + // UNKNOWN_PRODUCER_ID means Kafka rotated records containing the used + // producer ID out of existence, or if Kafka lost data. + // + // This field has a default of -1. + LogStartOffset int64 // v5+ + + // ErrorRecords are indices of individual records that caused a batch + // to error. This was added for KIP-467. + ErrorRecords []ProduceResponseTopicPartitionErrorRecord // v8+ + + // ErrorMessage is the global error message of of what caused this batch + // to error. + ErrorMessage *string // v8+ + + CurrentLeader ProduceResponseTopicPartitionCurrentLeader // tag 0 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v9+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ProduceResponseTopicPartition. +func (v *ProduceResponseTopicPartition) Default() { + v.LogAppendTime = -1 + v.LogStartOffset = -1 + { + v := &v.CurrentLeader + _ = v + v.LeaderID = -1 + v.LeaderEpoch = -1 + } +} + +// NewProduceResponseTopicPartition returns a default ProduceResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewProduceResponseTopicPartition() ProduceResponseTopicPartition { + var v ProduceResponseTopicPartition + v.Default() + return v +} + +type ProduceResponseTopic struct { + // Topic is the topic this response pertains to. + Topic string + + // Partitions is an array of responses for the partition's that + // batches were sent to. + Partitions []ProduceResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v9+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ProduceResponseTopic. +func (v *ProduceResponseTopic) Default() { +} + +// NewProduceResponseTopic returns a default ProduceResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewProduceResponseTopic() ProduceResponseTopic { + var v ProduceResponseTopic + v.Default() + return v +} + +type ProduceResponseBroker struct { + // NodeID is the node ID of a Kafka broker. + NodeID int32 + + // Host is the hostname of a Kafka broker. + Host string + + // Port is the port of a Kafka broker. + Port int32 + + // Rack is the rack this Kafka broker is in. + Rack *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v9+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ProduceResponseBroker. +func (v *ProduceResponseBroker) Default() { +} + +// NewProduceResponseBroker returns a default ProduceResponseBroker +// This is a shortcut for creating a struct and calling Default yourself. +func NewProduceResponseBroker() ProduceResponseBroker { + var v ProduceResponseBroker + v.Default() + return v +} + +// ProduceResponse is returned from a ProduceRequest. +type ProduceResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Topics is an array of responses for the topic's that batches were sent + // to. + Topics []ProduceResponseTopic + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 6. + ThrottleMillis int32 // v1+ + + // Brokers is present if any partition responses contain the error + // NOT_LEADER_OR_FOLLOWER. + Brokers []ProduceResponseBroker // tag 0 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v9+ +} + +func (*ProduceResponse) Key() int16 { return 0 } +func (*ProduceResponse) MaxVersion() int16 { return 11 } +func (v *ProduceResponse) SetVersion(version int16) { v.Version = version } +func (v *ProduceResponse) GetVersion() int16 { return v.Version } +func (v *ProduceResponse) IsFlexible() bool { return v.Version >= 9 } +func (v *ProduceResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 6 } +func (v *ProduceResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *ProduceResponse) RequestKind() Request { return &ProduceRequest{Version: v.Version} } + +func (v *ProduceResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 9 + _ = isFlexible + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.BaseOffset + dst = kbin.AppendInt64(dst, v) + } + if version >= 2 { + v := v.LogAppendTime + dst = kbin.AppendInt64(dst, v) + } + if version >= 5 { + v := v.LogStartOffset + dst = kbin.AppendInt64(dst, v) + } + if version >= 8 { + v := v.ErrorRecords + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.RelativeOffset + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 8 { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + var toEncode []uint32 + if !reflect.DeepEqual(v.CurrentLeader, (func() ProduceResponseTopicPartitionCurrentLeader { + var v ProduceResponseTopicPartitionCurrentLeader + v.Default() + return v + })()) { + toEncode = append(toEncode, 0) + } + dst = kbin.AppendUvarint(dst, uint32(len(toEncode)+v.UnknownTags.Len())) + for _, tag := range toEncode { + switch tag { + case 0: + { + v := v.CurrentLeader + dst = kbin.AppendUvarint(dst, 0) + sized := false + lenAt := len(dst) + fCurrentLeader: + { + v := v.LeaderID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + if !sized { + dst = kbin.AppendUvarint(dst[:lenAt], uint32(len(dst[lenAt:]))) + sized = true + goto fCurrentLeader + } + } + } + } + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 1 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + var toEncode []uint32 + if len(v.Brokers) > 0 { + toEncode = append(toEncode, 0) + } + dst = kbin.AppendUvarint(dst, uint32(len(toEncode)+v.UnknownTags.Len())) + for _, tag := range toEncode { + switch tag { + case 0: + { + v := v.Brokers + dst = kbin.AppendUvarint(dst, 0) + sized := false + lenAt := len(dst) + fBrokers: + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.NodeID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Host + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Port + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Rack + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + if !sized { + dst = kbin.AppendUvarint(dst[:lenAt], uint32(len(dst[lenAt:]))) + sized = true + goto fBrokers + } + } + } + } + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ProduceResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ProduceResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ProduceResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 9 + _ = isFlexible + s := v + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ProduceResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ProduceResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Int64() + s.BaseOffset = v + } + if version >= 2 { + v := b.Int64() + s.LogAppendTime = v + } + if version >= 5 { + v := b.Int64() + s.LogStartOffset = v + } + if version >= 8 { + v := s.ErrorRecords + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ProduceResponseTopicPartitionErrorRecord, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.RelativeOffset = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.ErrorRecords = v + } + if version >= 8 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + if isFlexible { + for i := b.Uvarint(); i > 0; i-- { + switch key := b.Uvarint(); key { + default: + s.UnknownTags.Set(key, b.Span(int(b.Uvarint()))) + case 0: + b := kbin.Reader{Src: b.Span(int(b.Uvarint()))} + v := &s.CurrentLeader + v.Default() + s := v + { + v := b.Int32() + s.LeaderID = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + if err := b.Complete(); err != nil { + return err + } + } + } + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if version >= 1 { + v := b.Int32() + s.ThrottleMillis = v + } + if isFlexible { + for i := b.Uvarint(); i > 0; i-- { + switch key := b.Uvarint(); key { + default: + s.UnknownTags.Set(key, b.Span(int(b.Uvarint()))) + case 0: + b := kbin.Reader{Src: b.Span(int(b.Uvarint()))} + v := s.Brokers + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ProduceResponseBroker, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.NodeID = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Host = v + } + { + v := b.Int32() + s.Port = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Rack = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Brokers = v + if err := b.Complete(); err != nil { + return err + } + } + } + } + return b.Complete() +} + +// NewPtrProduceResponse returns a pointer to a default ProduceResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrProduceResponse() *ProduceResponse { + var v ProduceResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ProduceResponse. +func (v *ProduceResponse) Default() { +} + +// NewProduceResponse returns a default ProduceResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewProduceResponse() ProduceResponse { + var v ProduceResponse + v.Default() + return v +} + +type FetchRequestReplicaState struct { + // The replica ID of the follower, or -1 if this request is from a consumer. + // + // This field has a default of -1. + ID int32 + + // The epoch of this follower, or -1 if not available. + // + // This field has a default of -1. + Epoch int64 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v12+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchRequestReplicaState. +func (v *FetchRequestReplicaState) Default() { + v.ID = -1 + v.Epoch = -1 +} + +// NewFetchRequestReplicaState returns a default FetchRequestReplicaState +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchRequestReplicaState() FetchRequestReplicaState { + var v FetchRequestReplicaState + v.Default() + return v +} + +type FetchRequestTopicPartition struct { + // Partition is a partition in a topic to try to fetch records for. + Partition int32 + + // CurrentLeaderEpoch, proposed in KIP-320 and introduced in Kafka 2.1.0, + // allows brokers to check if the client is fenced (has an out of date + // leader) or is using an unknown leader. + // + // The initial leader epoch can be determined from a MetadataResponse. + // To skip log truncation checking, use -1. + // + // This field has a default of -1. + CurrentLeaderEpoch int32 // v9+ + + // FetchOffset is the offset to begin the fetch from. Kafka will + // return records at and after this offset. + FetchOffset int64 + + // The epoch of the last fetched record, or -1 if there is none. + // + // This field has a default of -1. + LastFetchedEpoch int32 // v12+ + + // LogStartOffset is a broker-follower only field added for KIP-107. + // This is the start offset of the partition in a follower. + // + // This field has a default of -1. + LogStartOffset int64 // v5+ + + // PartitionMaxBytes is the maximum bytes to return for this partition. + // This can be used to limit how many bytes an individual partition in + // a request is allotted so that it does not dominate all of MaxBytes. + PartitionMaxBytes int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v12+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchRequestTopicPartition. +func (v *FetchRequestTopicPartition) Default() { + v.CurrentLeaderEpoch = -1 + v.LastFetchedEpoch = -1 + v.LogStartOffset = -1 +} + +// NewFetchRequestTopicPartition returns a default FetchRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchRequestTopicPartition() FetchRequestTopicPartition { + var v FetchRequestTopicPartition + v.Default() + return v +} + +type FetchRequestTopic struct { + // Topic is a topic to try to fetch records for. + Topic string // v0-v12 + + // TopicID is the uuid of the topic to fetch records for. + TopicID [16]byte // v13+ + + // Partitions contains partitions in a topic to try to fetch records for. + Partitions []FetchRequestTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v12+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchRequestTopic. +func (v *FetchRequestTopic) Default() { +} + +// NewFetchRequestTopic returns a default FetchRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchRequestTopic() FetchRequestTopic { + var v FetchRequestTopic + v.Default() + return v +} + +type FetchRequestForgottenTopic struct { + // Topic is a topic to remove from being tracked (with the partitions below). + Topic string // v7-v12 + + // TopicID is the uuid of a topic to remove from being tracked (with the + // partitions below). + TopicID [16]byte // v13+ + + // Partitions are partitions to remove from tracking for a topic. + Partitions []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v12+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchRequestForgottenTopic. +func (v *FetchRequestForgottenTopic) Default() { +} + +// NewFetchRequestForgottenTopic returns a default FetchRequestForgottenTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchRequestForgottenTopic() FetchRequestForgottenTopic { + var v FetchRequestForgottenTopic + v.Default() + return v +} + +// FetchRequest is a long-poll request of records from Kafka. +// +// Kafka 0.11.0.0 released v4 and changed the returned RecordBatches to contain +// the RecordBatch type. Prior, Kafka used the MessageSet type (and, for v0 and +// v1, Kafka used a different type). +// +// Note that starting in v3, Kafka began processing partitions in order, +// meaning the order of partitions in the fetch request is important due to +// potential size constraints. +// +// Starting in v13, topics must use UUIDs rather than their string name +// identifiers. +// +// Version 15 adds the ReplicaState which includes new field ReplicaEpoch and +// the ReplicaID, and deprecates the old ReplicaID (KIP-903). +type FetchRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // The cluster ID, if known. This is used to validate metadata fetches + // prior to broker registration. + // + // This field has a default of null. + ClusterID *string // tag 0 + + // ReplicaID is the broker ID of performing the fetch request. Standard + // clients should use -1. To be a "debug" replica, use -2. The debug + // replica can be used to fetch messages from non-leaders. + // + // This field has a default of -1. + ReplicaID int32 // v0-v14 + + // ReplicaState is a broker-only tag for v15+, see KIP-903 for more details. + ReplicaState FetchRequestReplicaState // tag 1 + + // MaxWaitMillis is how long to wait for MinBytes to be hit before a broker + // responds to a fetch request. + MaxWaitMillis int32 + + // MinBytes is the minimum amount of bytes to attempt to read before a broker + // responds to a fetch request. + MinBytes int32 + + // MaxBytes is the maximum amount of bytes to read in a fetch request. The + // response can exceed MaxBytes if the first record in the first non-empty + // partition is larger than MaxBytes. + // + // This field has a default of 0x7fffffff. + MaxBytes int32 // v3+ + + // IsolationLevel changes which messages are fetched. Follower replica ID's + // (non-negative, non-standard-client) fetch from the end. + // + // Standard clients fetch from the high watermark, which corresponds to + // IsolationLevel 0, READ_UNCOMMITTED. + // + // To only read committed records, use IsolationLevel 1, corresponding to + // READ_COMMITTED. + IsolationLevel int8 // v4+ + + // SessionID is used to potentially reduce the amount of back and forth + // data between a client and a broker. If opting in to sessions, the first + // ID used should be 0, and thereafter (until session resets) the ID should + // be the ID returned in the fetch response. + // + // Read KIP-227 for more details. Use -1 if you want to disable sessions. + SessionID int32 // v7+ + + // SessionEpoch is the session epoch for this request if using sessions. + // + // Read KIP-227 for more details. Use -1 if you are not using sessions. + // + // This field has a default of -1. + SessionEpoch int32 // v7+ + + // Topic contains topics to try to fetch records for. + Topics []FetchRequestTopic + + // ForgottenTopics contains topics and partitions that a fetch session + // wants to remove from its session. + // + // See KIP-227 for more details. + ForgottenTopics []FetchRequestForgottenTopic // v7+ + + // Rack of the consumer making this request (see KIP-392; introduced in + // Kafka 2.2.0). + Rack string // v11+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v12+ +} + +func (*FetchRequest) Key() int16 { return 1 } +func (*FetchRequest) MaxVersion() int16 { return 16 } +func (v *FetchRequest) SetVersion(version int16) { v.Version = version } +func (v *FetchRequest) GetVersion() int16 { return v.Version } +func (v *FetchRequest) IsFlexible() bool { return v.Version >= 12 } +func (v *FetchRequest) ResponseKind() Response { + r := &FetchResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *FetchRequest) RequestWith(ctx context.Context, r Requestor) (*FetchResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*FetchResponse) + return resp, err +} + +func (v *FetchRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 12 + _ = isFlexible + if version >= 0 && version <= 14 { + v := v.ReplicaID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.MaxWaitMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.MinBytes + dst = kbin.AppendInt32(dst, v) + } + if version >= 3 { + v := v.MaxBytes + dst = kbin.AppendInt32(dst, v) + } + if version >= 4 { + v := v.IsolationLevel + dst = kbin.AppendInt8(dst, v) + } + if version >= 7 { + v := v.SessionID + dst = kbin.AppendInt32(dst, v) + } + if version >= 7 { + v := v.SessionEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + if version >= 0 && version <= 12 { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 13 { + v := v.TopicID + dst = kbin.AppendUuid(dst, v) + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + if version >= 9 { + v := v.CurrentLeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.FetchOffset + dst = kbin.AppendInt64(dst, v) + } + if version >= 12 { + v := v.LastFetchedEpoch + dst = kbin.AppendInt32(dst, v) + } + if version >= 5 { + v := v.LogStartOffset + dst = kbin.AppendInt64(dst, v) + } + { + v := v.PartitionMaxBytes + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 7 { + v := v.ForgottenTopics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + if version >= 7 && version <= 12 { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 13 { + v := v.TopicID + dst = kbin.AppendUuid(dst, v) + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 11 { + v := v.Rack + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if isFlexible { + var toEncode []uint32 + if v.ClusterID != nil { + toEncode = append(toEncode, 0) + } + if !reflect.DeepEqual(v.ReplicaState, (func() FetchRequestReplicaState { var v FetchRequestReplicaState; v.Default(); return v })()) { + toEncode = append(toEncode, 1) + } + dst = kbin.AppendUvarint(dst, uint32(len(toEncode)+v.UnknownTags.Len())) + for _, tag := range toEncode { + switch tag { + case 0: + { + v := v.ClusterID + dst = kbin.AppendUvarint(dst, 0) + sized := false + lenAt := len(dst) + fClusterID: + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + if !sized { + dst = kbin.AppendUvarint(dst[:lenAt], uint32(len(dst[lenAt:]))) + sized = true + goto fClusterID + } + } + case 1: + { + v := v.ReplicaState + dst = kbin.AppendUvarint(dst, 1) + sized := false + lenAt := len(dst) + fReplicaState: + { + v := v.ID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Epoch + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + if !sized { + dst = kbin.AppendUvarint(dst[:lenAt], uint32(len(dst[lenAt:]))) + sized = true + goto fReplicaState + } + } + } + } + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *FetchRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *FetchRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *FetchRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 12 + _ = isFlexible + s := v + if version >= 0 && version <= 14 { + v := b.Int32() + s.ReplicaID = v + } + { + v := b.Int32() + s.MaxWaitMillis = v + } + { + v := b.Int32() + s.MinBytes = v + } + if version >= 3 { + v := b.Int32() + s.MaxBytes = v + } + if version >= 4 { + v := b.Int8() + s.IsolationLevel = v + } + if version >= 7 { + v := b.Int32() + s.SessionID = v + } + if version >= 7 { + v := b.Int32() + s.SessionEpoch = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]FetchRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + if version >= 0 && version <= 12 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + if version >= 13 { + v := b.Uuid() + s.TopicID = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]FetchRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + if version >= 9 { + v := b.Int32() + s.CurrentLeaderEpoch = v + } + { + v := b.Int64() + s.FetchOffset = v + } + if version >= 12 { + v := b.Int32() + s.LastFetchedEpoch = v + } + if version >= 5 { + v := b.Int64() + s.LogStartOffset = v + } + { + v := b.Int32() + s.PartitionMaxBytes = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if version >= 7 { + v := s.ForgottenTopics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]FetchRequestForgottenTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + if version >= 7 && version <= 12 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + if version >= 13 { + v := b.Uuid() + s.TopicID = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.ForgottenTopics = v + } + if version >= 11 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Rack = v + } + if isFlexible { + for i := b.Uvarint(); i > 0; i-- { + switch key := b.Uvarint(); key { + default: + s.UnknownTags.Set(key, b.Span(int(b.Uvarint()))) + case 0: + b := kbin.Reader{Src: b.Span(int(b.Uvarint()))} + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ClusterID = v + if err := b.Complete(); err != nil { + return err + } + case 1: + b := kbin.Reader{Src: b.Span(int(b.Uvarint()))} + v := &s.ReplicaState + v.Default() + s := v + { + v := b.Int32() + s.ID = v + } + { + v := b.Int64() + s.Epoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + if err := b.Complete(); err != nil { + return err + } + } + } + } + return b.Complete() +} + +// NewPtrFetchRequest returns a pointer to a default FetchRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrFetchRequest() *FetchRequest { + var v FetchRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchRequest. +func (v *FetchRequest) Default() { + v.ClusterID = nil + v.ReplicaID = -1 + { + v := &v.ReplicaState + _ = v + v.ID = -1 + v.Epoch = -1 + } + v.MaxBytes = 2147483647 + v.SessionEpoch = -1 +} + +// NewFetchRequest returns a default FetchRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchRequest() FetchRequest { + var v FetchRequest + v.Default() + return v +} + +type FetchResponseTopicPartitionDivergingEpoch struct { + // This field has a default of -1. + Epoch int32 + + // This field has a default of -1. + EndOffset int64 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v12+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchResponseTopicPartitionDivergingEpoch. +func (v *FetchResponseTopicPartitionDivergingEpoch) Default() { + v.Epoch = -1 + v.EndOffset = -1 +} + +// NewFetchResponseTopicPartitionDivergingEpoch returns a default FetchResponseTopicPartitionDivergingEpoch +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchResponseTopicPartitionDivergingEpoch() FetchResponseTopicPartitionDivergingEpoch { + var v FetchResponseTopicPartitionDivergingEpoch + v.Default() + return v +} + +type FetchResponseTopicPartitionCurrentLeader struct { + // The ID of the current leader, or -1 if unknown. + // + // This field has a default of -1. + LeaderID int32 + + // The latest known leader epoch. + // + // This field has a default of -1. + LeaderEpoch int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v12+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchResponseTopicPartitionCurrentLeader. +func (v *FetchResponseTopicPartitionCurrentLeader) Default() { + v.LeaderID = -1 + v.LeaderEpoch = -1 +} + +// NewFetchResponseTopicPartitionCurrentLeader returns a default FetchResponseTopicPartitionCurrentLeader +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchResponseTopicPartitionCurrentLeader() FetchResponseTopicPartitionCurrentLeader { + var v FetchResponseTopicPartitionCurrentLeader + v.Default() + return v +} + +type FetchResponseTopicPartitionSnapshotID struct { + // This field has a default of -1. + EndOffset int64 + + // This field has a default of -1. + Epoch int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v12+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchResponseTopicPartitionSnapshotID. +func (v *FetchResponseTopicPartitionSnapshotID) Default() { + v.EndOffset = -1 + v.Epoch = -1 +} + +// NewFetchResponseTopicPartitionSnapshotID returns a default FetchResponseTopicPartitionSnapshotID +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchResponseTopicPartitionSnapshotID() FetchResponseTopicPartitionSnapshotID { + var v FetchResponseTopicPartitionSnapshotID + v.Default() + return v +} + +type FetchResponseTopicPartitionAbortedTransaction struct { + // ProducerID is the producer ID that caused this aborted transaction. + ProducerID int64 + + // FirstOffset is the offset where this aborted transaction began. + FirstOffset int64 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v12+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchResponseTopicPartitionAbortedTransaction. +func (v *FetchResponseTopicPartitionAbortedTransaction) Default() { +} + +// NewFetchResponseTopicPartitionAbortedTransaction returns a default FetchResponseTopicPartitionAbortedTransaction +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchResponseTopicPartitionAbortedTransaction() FetchResponseTopicPartitionAbortedTransaction { + var v FetchResponseTopicPartitionAbortedTransaction + v.Default() + return v +} + +type FetchResponseTopicPartition struct { + // Partition is a partition in a topic that records may have been + // received for. + Partition int32 + + // ErrorCode is an error returned for an individual partition in a + // fetch request. + // + // TOPIC_AUTHORIZATION_FAILED is returned if the client is not + // authorized to read the partition. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if the topic or partition + // does not exist on this broker. + // + // UNSUPPORTED_COMPRESSION_TYPE is returned if the request version was + // under 10 and the batch is compressed with zstd. + // + // UNSUPPORTED_VERSION is returned if the broker has records newer than + // the client can support (magic value) and the broker has disabled + // message downconversion. + // + // NOT_LEADER_FOR_PARTITION is returned if requesting data for this + // partition as a follower (non-negative ReplicaID) and the broker + // is not the leader for this partition. + // + // REPLICA_NOT_AVAILABLE is returned if the partition exists but + // the requested broker is not the leader for it. + // + // KAFKA_STORAGE_EXCEPTION is returned if the requested partition is + // offline. + // + // UNKNOWN_LEADER_EPOCH is returned if the request used a larger leader + // epoch than the broker knows of. + // + // FENCED_LEADER_EPOCH is returned if the request used a smaller leader + // epoch than the broker is at (see KIP-320). + // + // OFFSET_OUT_OF_RANGE is returned if requesting an offset past the + // current end offset or before the beginning offset. + // + // UNKNOWN_TOPIC_ID is returned if using uuid's and the uuid is unknown + // (v13+ / Kafka 3.1+). + // + // OFFSET_MOVED_TO_TIERED_STORAGE is returned if a follower is trying to + // fetch from an offset that is now in tiered storage. + ErrorCode int16 + + // HighWatermark is the current high watermark for this partition, + // that is, the current offset that is on all in sync replicas. + HighWatermark int64 + + // LastStableOffset is the offset at which all prior offsets have + // been "decided". Non transactional records are always decided + // immediately, but transactional records are only decided once + // they are commited or aborted. + // + // The LastStableOffset will always be at or under the HighWatermark. + // + // This field has a default of -1. + LastStableOffset int64 // v4+ + + // LogStartOffset is the beginning offset for this partition. + // This field was added for KIP-107. + // + // This field has a default of -1. + LogStartOffset int64 // v5+ + + // In case divergence is detected based on the LastFetchedEpoch and + // FetchOffset in the request, this field indicates the largest epoch and + // its end offset such that subsequent records are known to diverge. + DivergingEpoch FetchResponseTopicPartitionDivergingEpoch // tag 0 + + // CurrentLeader is the currently known leader ID and epoch for this + // partition. + CurrentLeader FetchResponseTopicPartitionCurrentLeader // tag 1 + + // In the case of fetching an offset less than the LogStartOffset, this + // is the end offset and epoch that should be used in the FetchSnapshot + // request. + SnapshotID FetchResponseTopicPartitionSnapshotID // tag 2 + + // AbortedTransactions is an array of aborted transactions within the + // returned offset range. This is only returned if the requested + // isolation level was READ_COMMITTED. + AbortedTransactions []FetchResponseTopicPartitionAbortedTransaction // v4+ + + // PreferredReadReplica is the preferred replica for the consumer + // to use on its next fetch request. See KIP-392. + // + // This field has a default of -1. + PreferredReadReplica int32 // v11+ + + // RecordBatches is an array of record batches for a topic partition. + // + // This is encoded as a raw byte array, with the standard int32 size + // prefix. One important catch to note is that the final element of the + // array may be **partial**. This is an optimization in Kafka that + // clients must deal with by discarding a partial trailing batch. + // + // Starting v2, this transitioned to the MessageSet v1 format (and this + // would contain many MessageV1 structs). + // + // Starting v4, this transitioned to the RecordBatch format (thus this + // contains many RecordBatch structs). + RecordBatches []byte + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v12+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchResponseTopicPartition. +func (v *FetchResponseTopicPartition) Default() { + v.LastStableOffset = -1 + v.LogStartOffset = -1 + { + v := &v.DivergingEpoch + _ = v + v.Epoch = -1 + v.EndOffset = -1 + } + { + v := &v.CurrentLeader + _ = v + v.LeaderID = -1 + v.LeaderEpoch = -1 + } + { + v := &v.SnapshotID + _ = v + v.EndOffset = -1 + v.Epoch = -1 + } + v.PreferredReadReplica = -1 +} + +// NewFetchResponseTopicPartition returns a default FetchResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchResponseTopicPartition() FetchResponseTopicPartition { + var v FetchResponseTopicPartition + v.Default() + return v +} + +type FetchResponseTopic struct { + // Topic is a topic that records may have been received for. + Topic string // v0-v12 + + // TopicID is the uuid of a topic that records may have been received for. + TopicID [16]byte // v13+ + + // Partitions contains partitions in a topic that records may have + // been received for. + Partitions []FetchResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v12+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchResponseTopic. +func (v *FetchResponseTopic) Default() { +} + +// NewFetchResponseTopic returns a default FetchResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchResponseTopic() FetchResponseTopic { + var v FetchResponseTopic + v.Default() + return v +} + +type FetchResponseBroker struct { + // NodeID is the node ID of a Kafka broker. + NodeID int32 + + // Host is the hostname of a Kafka broker. + Host string + + // Port is the port of a Kafka broker. + Port int32 + + // Rack is the rack this Kafka broker is in. + Rack *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v12+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchResponseBroker. +func (v *FetchResponseBroker) Default() { +} + +// NewFetchResponseBroker returns a default FetchResponseBroker +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchResponseBroker() FetchResponseBroker { + var v FetchResponseBroker + v.Default() + return v +} + +// FetchResponse is returned from a FetchRequest. +type FetchResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 8. + ThrottleMillis int32 // v1+ + + // ErrorCode is a full-response error code for a fetch request. This was + // added in support of KIP-227. This error is only non-zero if using fetch + // sessions. + // + // FETCH_SESSION_ID_NOT_FOUND is returned if the request used a + // session ID that the broker does not know of. + // + // INVALID_FETCH_SESSION_EPOCH is returned if the request used an + // invalid session epoch. + ErrorCode int16 // v7+ + + // SessionID is the id for this session if using sessions. + // + // See KIP-227 for more details. + SessionID int32 // v7+ + + // Topics contains an array of topic partitions and the records received + // for them. + Topics []FetchResponseTopic + + // Brokers is present if any partition responses contain the error + // NOT_LEADER_OR_FOLLOWER. + Brokers []FetchResponseBroker // tag 0 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v12+ +} + +func (*FetchResponse) Key() int16 { return 1 } +func (*FetchResponse) MaxVersion() int16 { return 16 } +func (v *FetchResponse) SetVersion(version int16) { v.Version = version } +func (v *FetchResponse) GetVersion() int16 { return v.Version } +func (v *FetchResponse) IsFlexible() bool { return v.Version >= 12 } +func (v *FetchResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 8 } +func (v *FetchResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *FetchResponse) RequestKind() Request { return &FetchRequest{Version: v.Version} } + +func (v *FetchResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 12 + _ = isFlexible + if version >= 1 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + if version >= 7 { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if version >= 7 { + v := v.SessionID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + if version >= 0 && version <= 12 { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 13 { + v := v.TopicID + dst = kbin.AppendUuid(dst, v) + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.HighWatermark + dst = kbin.AppendInt64(dst, v) + } + if version >= 4 { + v := v.LastStableOffset + dst = kbin.AppendInt64(dst, v) + } + if version >= 5 { + v := v.LogStartOffset + dst = kbin.AppendInt64(dst, v) + } + if version >= 4 { + v := v.AbortedTransactions + if isFlexible { + dst = kbin.AppendCompactNullableArrayLen(dst, len(v), v == nil) + } else { + dst = kbin.AppendNullableArrayLen(dst, len(v), v == nil) + } + for i := range v { + v := &v[i] + { + v := v.ProducerID + dst = kbin.AppendInt64(dst, v) + } + { + v := v.FirstOffset + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 11 { + v := v.PreferredReadReplica + dst = kbin.AppendInt32(dst, v) + } + { + v := v.RecordBatches + if isFlexible { + dst = kbin.AppendCompactNullableBytes(dst, v) + } else { + dst = kbin.AppendNullableBytes(dst, v) + } + } + if isFlexible { + var toEncode []uint32 + if !reflect.DeepEqual(v.DivergingEpoch, (func() FetchResponseTopicPartitionDivergingEpoch { + var v FetchResponseTopicPartitionDivergingEpoch + v.Default() + return v + })()) { + toEncode = append(toEncode, 0) + } + if !reflect.DeepEqual(v.CurrentLeader, (func() FetchResponseTopicPartitionCurrentLeader { + var v FetchResponseTopicPartitionCurrentLeader + v.Default() + return v + })()) { + toEncode = append(toEncode, 1) + } + if !reflect.DeepEqual(v.SnapshotID, (func() FetchResponseTopicPartitionSnapshotID { + var v FetchResponseTopicPartitionSnapshotID + v.Default() + return v + })()) { + toEncode = append(toEncode, 2) + } + dst = kbin.AppendUvarint(dst, uint32(len(toEncode)+v.UnknownTags.Len())) + for _, tag := range toEncode { + switch tag { + case 0: + { + v := v.DivergingEpoch + dst = kbin.AppendUvarint(dst, 0) + sized := false + lenAt := len(dst) + fDivergingEpoch: + { + v := v.Epoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.EndOffset + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + if !sized { + dst = kbin.AppendUvarint(dst[:lenAt], uint32(len(dst[lenAt:]))) + sized = true + goto fDivergingEpoch + } + } + case 1: + { + v := v.CurrentLeader + dst = kbin.AppendUvarint(dst, 1) + sized := false + lenAt := len(dst) + fCurrentLeader: + { + v := v.LeaderID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + if !sized { + dst = kbin.AppendUvarint(dst[:lenAt], uint32(len(dst[lenAt:]))) + sized = true + goto fCurrentLeader + } + } + case 2: + { + v := v.SnapshotID + dst = kbin.AppendUvarint(dst, 2) + sized := false + lenAt := len(dst) + fSnapshotID: + { + v := v.EndOffset + dst = kbin.AppendInt64(dst, v) + } + { + v := v.Epoch + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + if !sized { + dst = kbin.AppendUvarint(dst[:lenAt], uint32(len(dst[lenAt:]))) + sized = true + goto fSnapshotID + } + } + } + } + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + var toEncode []uint32 + if len(v.Brokers) > 0 { + toEncode = append(toEncode, 0) + } + dst = kbin.AppendUvarint(dst, uint32(len(toEncode)+v.UnknownTags.Len())) + for _, tag := range toEncode { + switch tag { + case 0: + { + v := v.Brokers + dst = kbin.AppendUvarint(dst, 0) + sized := false + lenAt := len(dst) + fBrokers: + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.NodeID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Host + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Port + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Rack + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + if !sized { + dst = kbin.AppendUvarint(dst[:lenAt], uint32(len(dst[lenAt:]))) + sized = true + goto fBrokers + } + } + } + } + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *FetchResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *FetchResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *FetchResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 12 + _ = isFlexible + s := v + if version >= 1 { + v := b.Int32() + s.ThrottleMillis = v + } + if version >= 7 { + v := b.Int16() + s.ErrorCode = v + } + if version >= 7 { + v := b.Int32() + s.SessionID = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]FetchResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + if version >= 0 && version <= 12 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + if version >= 13 { + v := b.Uuid() + s.TopicID = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]FetchResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Int64() + s.HighWatermark = v + } + if version >= 4 { + v := b.Int64() + s.LastStableOffset = v + } + if version >= 5 { + v := b.Int64() + s.LogStartOffset = v + } + if version >= 4 { + v := s.AbortedTransactions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if version < 0 || l == 0 { + a = []FetchResponseTopicPartitionAbortedTransaction{} + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]FetchResponseTopicPartitionAbortedTransaction, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int64() + s.ProducerID = v + } + { + v := b.Int64() + s.FirstOffset = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.AbortedTransactions = v + } + if version >= 11 { + v := b.Int32() + s.PreferredReadReplica = v + } + { + var v []byte + if isFlexible { + v = b.CompactNullableBytes() + } else { + v = b.NullableBytes() + } + s.RecordBatches = v + } + if isFlexible { + for i := b.Uvarint(); i > 0; i-- { + switch key := b.Uvarint(); key { + default: + s.UnknownTags.Set(key, b.Span(int(b.Uvarint()))) + case 0: + b := kbin.Reader{Src: b.Span(int(b.Uvarint()))} + v := &s.DivergingEpoch + v.Default() + s := v + { + v := b.Int32() + s.Epoch = v + } + { + v := b.Int64() + s.EndOffset = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + if err := b.Complete(); err != nil { + return err + } + case 1: + b := kbin.Reader{Src: b.Span(int(b.Uvarint()))} + v := &s.CurrentLeader + v.Default() + s := v + { + v := b.Int32() + s.LeaderID = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + if err := b.Complete(); err != nil { + return err + } + case 2: + b := kbin.Reader{Src: b.Span(int(b.Uvarint()))} + v := &s.SnapshotID + v.Default() + s := v + { + v := b.Int64() + s.EndOffset = v + } + { + v := b.Int32() + s.Epoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + if err := b.Complete(); err != nil { + return err + } + } + } + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + for i := b.Uvarint(); i > 0; i-- { + switch key := b.Uvarint(); key { + default: + s.UnknownTags.Set(key, b.Span(int(b.Uvarint()))) + case 0: + b := kbin.Reader{Src: b.Span(int(b.Uvarint()))} + v := s.Brokers + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]FetchResponseBroker, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.NodeID = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Host = v + } + { + v := b.Int32() + s.Port = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Rack = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Brokers = v + if err := b.Complete(); err != nil { + return err + } + } + } + } + return b.Complete() +} + +// NewPtrFetchResponse returns a pointer to a default FetchResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrFetchResponse() *FetchResponse { + var v FetchResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchResponse. +func (v *FetchResponse) Default() { +} + +// NewFetchResponse returns a default FetchResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchResponse() FetchResponse { + var v FetchResponse + v.Default() + return v +} + +type ListOffsetsRequestTopicPartition struct { + // Partition is a partition of a topic to get offsets for. + Partition int32 + + // CurrentLeaderEpoch, proposed in KIP-320 and introduced in Kafka 2.1.0, + // allows brokers to check if the client is fenced (has an out of date + // leader) or is using an unknown leader. + // + // The initial leader epoch can be determined from a MetadataResponse. + // To skip log truncation checking, use -1. + // + // This field has a default of -1. + CurrentLeaderEpoch int32 // v4+ + + // Timestamp controls which offset to return in a response for this + // partition. + // + // The offset returned will be the one of the message whose timestamp is + // the first timestamp greater than or equal to this requested timestamp. + // + // If no such message is found, then no offset is returned (-1). + // + // There exist two special timestamps: -2 corresponds to the earliest + // timestamp, and -1 corresponds to the latest. + // + // If you are talking to Kafka 3.0+, there exists an additional special + // timestamp -3 that returns the latest timestamp produced so far and its + // corresponding offset. This is subtly different from the latest offset, + // because timestamps are client-side generated. More importantly though, + // because this returns the latest produced timestamp, this can be used + // to determine topic "liveness" (when was the last produce?). + // Previously, this was not easy to determine. See KIP-734 for more + // detail. + // + // If you are talking to Kafka 3.4+ and using request version 8+ (for + // KIP-405), the new special timestamp -4 returns the local log start + // offset. In the context of tiered storage, the earliest local log start + // offset is the offset actually available on disk on the broker. + Timestamp int64 + + // MaxNumOffsets is the maximum number of offsets to report. + // This was removed after v0. + // + // This field has a default of 1. + MaxNumOffsets int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListOffsetsRequestTopicPartition. +func (v *ListOffsetsRequestTopicPartition) Default() { + v.CurrentLeaderEpoch = -1 + v.MaxNumOffsets = 1 +} + +// NewListOffsetsRequestTopicPartition returns a default ListOffsetsRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewListOffsetsRequestTopicPartition() ListOffsetsRequestTopicPartition { + var v ListOffsetsRequestTopicPartition + v.Default() + return v +} + +type ListOffsetsRequestTopic struct { + // Topic is a topic to get offsets for. + Topic string + + // Partitions is an array of partitions in a topic to get offsets for. + Partitions []ListOffsetsRequestTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListOffsetsRequestTopic. +func (v *ListOffsetsRequestTopic) Default() { +} + +// NewListOffsetsRequestTopic returns a default ListOffsetsRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewListOffsetsRequestTopic() ListOffsetsRequestTopic { + var v ListOffsetsRequestTopic + v.Default() + return v +} + +// ListOffsetsRequest requests partition offsets from Kafka for use in +// consuming records. +// +// Version 5, introduced in Kafka 2.2.0, is the same as version 4. Using +// version 5 implies you support Kafka's OffsetNotAvailableException +// See KIP-207 for details. +// +// Version 7, introduced in Kafka 3.0, supports -3 as a timestamp to return +// the timestamp and offset for the record with the largest timestamp. +// +// Version 8, introduced in Kafka 3.4, supports -4 as a timestamp to return +// the local log start offset (in the context of tiered storage, see KIP-405). +type ListOffsetsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ReplicaID is the broker ID to get offsets from. As a Kafka client, use -1. + // The consumer replica ID (-1) causes requests to only succeed if issued + // against the leader broker. + // + // This field has a default of -1. + ReplicaID int32 + + // IsolationLevel configures which record offsets are visible in the + // response. READ_UNCOMMITTED (0) makes all records visible. READ_COMMITTED + // (1) makes non-transactional and committed transactional records visible. + // READ_COMMITTED means all offsets smaller than the last stable offset and + // includes aborted transactions (allowing consumers to discard aborted + // records). + IsolationLevel int8 // v2+ + + // Topics is an array of topics to get offsets for. + Topics []ListOffsetsRequestTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +func (*ListOffsetsRequest) Key() int16 { return 2 } +func (*ListOffsetsRequest) MaxVersion() int16 { return 8 } +func (v *ListOffsetsRequest) SetVersion(version int16) { v.Version = version } +func (v *ListOffsetsRequest) GetVersion() int16 { return v.Version } +func (v *ListOffsetsRequest) IsFlexible() bool { return v.Version >= 6 } +func (v *ListOffsetsRequest) ResponseKind() Response { + r := &ListOffsetsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *ListOffsetsRequest) RequestWith(ctx context.Context, r Requestor) (*ListOffsetsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*ListOffsetsResponse) + return resp, err +} + +func (v *ListOffsetsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 6 + _ = isFlexible + { + v := v.ReplicaID + dst = kbin.AppendInt32(dst, v) + } + if version >= 2 { + v := v.IsolationLevel + dst = kbin.AppendInt8(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + if version >= 4 { + v := v.CurrentLeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Timestamp + dst = kbin.AppendInt64(dst, v) + } + if version >= 0 && version <= 0 { + v := v.MaxNumOffsets + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ListOffsetsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ListOffsetsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ListOffsetsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 6 + _ = isFlexible + s := v + { + v := b.Int32() + s.ReplicaID = v + } + if version >= 2 { + v := b.Int8() + s.IsolationLevel = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ListOffsetsRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ListOffsetsRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + if version >= 4 { + v := b.Int32() + s.CurrentLeaderEpoch = v + } + { + v := b.Int64() + s.Timestamp = v + } + if version >= 0 && version <= 0 { + v := b.Int32() + s.MaxNumOffsets = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrListOffsetsRequest returns a pointer to a default ListOffsetsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrListOffsetsRequest() *ListOffsetsRequest { + var v ListOffsetsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListOffsetsRequest. +func (v *ListOffsetsRequest) Default() { + v.ReplicaID = -1 +} + +// NewListOffsetsRequest returns a default ListOffsetsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewListOffsetsRequest() ListOffsetsRequest { + var v ListOffsetsRequest + v.Default() + return v +} + +type ListOffsetsResponseTopicPartition struct { + // Partition is the partition this array slot is for. + Partition int32 + + // ErrorCode is any error for a topic partition in a ListOffsets request. + // + // TOPIC_AUTHORIZATION_FAILED is returned if the client is not authorized + // to describe the topic. + // + // INVALID_REQUEST is returned if the requested topic partitions had + // contained duplicates. + // + // KAFKA_STORAGE_EXCEPTION is returned if the topic / partition is in + // an offline log directory. + // + // UNSUPPORTED_FOR_MESSAGE_FORMAT is returned if the broker is using + // Kafka 0.10.0 messages and the requested timestamp was not -1 nor -2. + // + // NOT_LEADER_FOR_PARTITION is returned if the broker is not a leader + // for this partition. This means that the client has stale metadata. + // If the request used the debug replica ID, the returned error will + // be REPLICA_NOT_AVAILABLE. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if the broker does not know + // of the requested topic or partition. + // + // FENCED_LEADER_EPOCH is returned if the broker has a higher leader + // epoch than what the request sent. + // + // UNKNOWN_LEADER_EPOCH is returned if the request used a leader epoch + // that the broker does not know about. + // + // OFFSET_NOT_AVAILABLE, introduced in Kafka 2.2.0 with produce request + // v5+, is returned when talking to a broker that is a new leader while + // that broker's high water mark catches up. This avoids situations where + // the old broker returned higher offsets than the new broker would. Note + // that if unclean leader election is allowed, you could still run into + // the situation where offsets returned from list offsets requests are + // not monotonically increasing. This error is only returned if the + // request used the consumer replica ID (-1). If the client did not use + // a v5+ list offsets request, LEADER_NOT_AVAILABLE is returned. + // See KIP-207 for more details. + ErrorCode int16 + + // OldStyleOffsets is a list of offsets. This was removed after + // version 0 and, since it is so historic, is undocumented. + OldStyleOffsets []int64 + + // If the request was for the earliest or latest timestamp (-2 or -1), or + // if an offset could not be found after the requested one, this will be -1. + // + // This field has a default of -1. + Timestamp int64 // v1+ + + // Offset is the offset corresponding to the record on or after the + // requested timestamp. If one could not be found, this will be -1. + // + // This field has a default of -1. + Offset int64 // v1+ + + // LeaderEpoch is the leader epoch of the record at this offset, + // or -1 if there was no leader epoch. + // + // This field has a default of -1. + LeaderEpoch int32 // v4+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListOffsetsResponseTopicPartition. +func (v *ListOffsetsResponseTopicPartition) Default() { + v.Timestamp = -1 + v.Offset = -1 + v.LeaderEpoch = -1 +} + +// NewListOffsetsResponseTopicPartition returns a default ListOffsetsResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewListOffsetsResponseTopicPartition() ListOffsetsResponseTopicPartition { + var v ListOffsetsResponseTopicPartition + v.Default() + return v +} + +type ListOffsetsResponseTopic struct { + // Topic is the topic this array slot is for. + Topic string + + // Partitions is an array of partition responses corresponding to + // the requested partitions for a topic. + Partitions []ListOffsetsResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListOffsetsResponseTopic. +func (v *ListOffsetsResponseTopic) Default() { +} + +// NewListOffsetsResponseTopic returns a default ListOffsetsResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewListOffsetsResponseTopic() ListOffsetsResponseTopic { + var v ListOffsetsResponseTopic + v.Default() + return v +} + +// ListOffsetsResponse is returned from a ListOffsetsRequest. +type ListOffsetsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 3. + ThrottleMillis int32 // v2+ + + // Topics is an array of topic / partition responses corresponding to + // the requested topics and partitions. + Topics []ListOffsetsResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +func (*ListOffsetsResponse) Key() int16 { return 2 } +func (*ListOffsetsResponse) MaxVersion() int16 { return 8 } +func (v *ListOffsetsResponse) SetVersion(version int16) { v.Version = version } +func (v *ListOffsetsResponse) GetVersion() int16 { return v.Version } +func (v *ListOffsetsResponse) IsFlexible() bool { return v.Version >= 6 } +func (v *ListOffsetsResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 3 } +func (v *ListOffsetsResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *ListOffsetsResponse) RequestKind() Request { return &ListOffsetsRequest{Version: v.Version} } + +func (v *ListOffsetsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 6 + _ = isFlexible + if version >= 2 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if version >= 0 && version <= 0 { + v := v.OldStyleOffsets + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt64(dst, v) + } + } + if version >= 1 { + v := v.Timestamp + dst = kbin.AppendInt64(dst, v) + } + if version >= 1 { + v := v.Offset + dst = kbin.AppendInt64(dst, v) + } + if version >= 4 { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ListOffsetsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ListOffsetsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ListOffsetsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 6 + _ = isFlexible + s := v + if version >= 2 { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ListOffsetsResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ListOffsetsResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if version >= 0 && version <= 0 { + v := s.OldStyleOffsets + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int64, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int64() + a[i] = v + } + v = a + s.OldStyleOffsets = v + } + if version >= 1 { + v := b.Int64() + s.Timestamp = v + } + if version >= 1 { + v := b.Int64() + s.Offset = v + } + if version >= 4 { + v := b.Int32() + s.LeaderEpoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrListOffsetsResponse returns a pointer to a default ListOffsetsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrListOffsetsResponse() *ListOffsetsResponse { + var v ListOffsetsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListOffsetsResponse. +func (v *ListOffsetsResponse) Default() { +} + +// NewListOffsetsResponse returns a default ListOffsetsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewListOffsetsResponse() ListOffsetsResponse { + var v ListOffsetsResponse + v.Default() + return v +} + +type MetadataRequestTopic struct { + // The topic ID. Only one of either topic ID or topic name should be used. + // If using the topic name, this should just be the default empty value. + TopicID [16]byte // v10+ + + // Topic is the topic to request metadata for. Version 10 switched this + // from a string to a nullable string; if using a topic ID, this field + // should be null. + Topic *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v9+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to MetadataRequestTopic. +func (v *MetadataRequestTopic) Default() { +} + +// NewMetadataRequestTopic returns a default MetadataRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewMetadataRequestTopic() MetadataRequestTopic { + var v MetadataRequestTopic + v.Default() + return v +} + +// MetadataRequest requests metadata from Kafka. +type MetadataRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Topics is a list of topics to return metadata about. If this is null + // in v1+, all topics are included. If this is empty, no topics are. + // For v0 (= 9 } +func (v *MetadataRequest) ResponseKind() Response { + r := &MetadataResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *MetadataRequest) RequestWith(ctx context.Context, r Requestor) (*MetadataResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*MetadataResponse) + return resp, err +} + +func (v *MetadataRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 9 + _ = isFlexible + { + v := v.Topics + if version >= 1 { + if isFlexible { + dst = kbin.AppendCompactNullableArrayLen(dst, len(v), v == nil) + } else { + dst = kbin.AppendNullableArrayLen(dst, len(v), v == nil) + } + } else { + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + } + for i := range v { + v := &v[i] + if version >= 10 { + v := v.TopicID + dst = kbin.AppendUuid(dst, v) + } + { + v := v.Topic + if version < 10 { + var vv string + if v != nil { + vv = *v + } + { + v := vv + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + } else { + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 4 { + v := v.AllowAutoTopicCreation + dst = kbin.AppendBool(dst, v) + } + if version >= 8 && version <= 10 { + v := v.IncludeClusterAuthorizedOperations + dst = kbin.AppendBool(dst, v) + } + if version >= 8 { + v := v.IncludeTopicAuthorizedOperations + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *MetadataRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *MetadataRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *MetadataRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 9 + _ = isFlexible + s := v + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if version < 1 || l == 0 { + a = []MetadataRequestTopic{} + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]MetadataRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + if version >= 10 { + v := b.Uuid() + s.TopicID = v + } + { + var v *string + if version < 10 { + var vv string + if isFlexible { + if unsafe { + vv = b.UnsafeCompactString() + } else { + vv = b.CompactString() + } + } else { + if unsafe { + vv = b.UnsafeString() + } else { + vv = b.String() + } + } + v = &vv + } else { + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + } + s.Topic = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if version >= 4 { + v := b.Bool() + s.AllowAutoTopicCreation = v + } + if version >= 8 && version <= 10 { + v := b.Bool() + s.IncludeClusterAuthorizedOperations = v + } + if version >= 8 { + v := b.Bool() + s.IncludeTopicAuthorizedOperations = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrMetadataRequest returns a pointer to a default MetadataRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrMetadataRequest() *MetadataRequest { + var v MetadataRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to MetadataRequest. +func (v *MetadataRequest) Default() { +} + +// NewMetadataRequest returns a default MetadataRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewMetadataRequest() MetadataRequest { + var v MetadataRequest + v.Default() + return v +} + +type MetadataResponseBroker struct { + // NodeID is the node ID of a Kafka broker. + NodeID int32 + + // Host is the hostname of a Kafka broker. + Host string + + // Port is the port of a Kafka broker. + Port int32 + + // Rack is the rack this Kafka broker is in. + Rack *string // v1+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v9+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to MetadataResponseBroker. +func (v *MetadataResponseBroker) Default() { +} + +// NewMetadataResponseBroker returns a default MetadataResponseBroker +// This is a shortcut for creating a struct and calling Default yourself. +func NewMetadataResponseBroker() MetadataResponseBroker { + var v MetadataResponseBroker + v.Default() + return v +} + +type MetadataResponseTopicPartition struct { + // ErrorCode is any error for a partition in topic metadata. + // + // LEADER_NOT_AVAILABLE is returned if a leader is unavailable for this + // partition. For v0 metadata responses, this is also returned if a + // partition leader's listener does not exist. + // + // LISTENER_NOT_FOUND is returned if a leader ID is known but the + // listener for it is not (v1+). + // + // REPLICA_NOT_AVAILABLE is returned in v0 responses if any replica is + // unavailable. + // + // UNKNOWN_TOPIC_ID is returned if using a topic ID and the ID does not + // exist. + ErrorCode int16 + + // Partition is a partition number for a topic. + Partition int32 + + // Leader is the broker leader for this partition. This will be -1 + // on leader / listener error. + Leader int32 + + // LeaderEpoch, proposed in KIP-320 and introduced in Kafka 2.1.0 is the + // epoch of the broker leader. + // + // This field has a default of -1. + LeaderEpoch int32 // v7+ + + // Replicas returns all broker IDs containing replicas of this partition. + Replicas []int32 + + // ISR returns all broker IDs of in-sync replicas of this partition. + ISR []int32 + + // OfflineReplicas, proposed in KIP-112 and introduced in Kafka 1.0, + // returns all offline broker IDs that should be replicating this partition. + OfflineReplicas []int32 // v5+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v9+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to MetadataResponseTopicPartition. +func (v *MetadataResponseTopicPartition) Default() { + v.LeaderEpoch = -1 +} + +// NewMetadataResponseTopicPartition returns a default MetadataResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewMetadataResponseTopicPartition() MetadataResponseTopicPartition { + var v MetadataResponseTopicPartition + v.Default() + return v +} + +type MetadataResponseTopic struct { + // ErrorCode is any error for a topic in a metadata request. + // + // TOPIC_AUTHORIZATION_FAILED is returned if the client is not authorized + // to describe the topic, or if the metadata request specified topic auto + // creation, the topic did not exist, and the user lacks permission to create. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if a topic does not exist and + // the request did not specify autocreation. + // + // LEADER_NOT_AVAILABLE is returned if a new topic is created successfully + // (since there is no leader on an immediately new topic). + // + // There can be a myriad of other errors for unsuccessful topic creation. + ErrorCode int16 + + // Topic is the topic this metadata corresponds to. + Topic *string + + // The topic ID. + TopicID [16]byte // v10+ + + // IsInternal signifies whether this topic is a Kafka internal topic. + IsInternal bool // v1+ + + // Partitions contains metadata about partitions for a topic. + Partitions []MetadataResponseTopicPartition + + // AuthorizedOperations, proposed in KIP-430 and introduced in Kafka 2.3.0, + // is a bitfield (corresponding to AclOperation) containing which operations + // the client is allowed to perform on this topic. + // This is only returned if requested. + // + // This field has a default of -2147483648. + AuthorizedOperations int32 // v8+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v9+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to MetadataResponseTopic. +func (v *MetadataResponseTopic) Default() { + v.AuthorizedOperations = -2147483648 +} + +// NewMetadataResponseTopic returns a default MetadataResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewMetadataResponseTopic() MetadataResponseTopic { + var v MetadataResponseTopic + v.Default() + return v +} + +// MetadataResponse is returned from a MetdataRequest. +type MetadataResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 6. + ThrottleMillis int32 // v3+ + + // Brokers is a set of alive Kafka brokers. + Brokers []MetadataResponseBroker + + // ClusterID, proposed in KIP-78 and introduced in Kafka 0.10.1.0, is a + // unique string specifying the cluster that the replying Kafka belongs to. + ClusterID *string // v2+ + + // ControllerID is the ID of the controller broker (the admin broker). + // + // This field has a default of -1. + ControllerID int32 // v1+ + + // Topics contains metadata about each topic requested in the + // MetadataRequest. + Topics []MetadataResponseTopic + + // AuthorizedOperations is a bitfield containing which operations the client + // is allowed to perform on this cluster. + // + // This field has a default of -2147483648. + AuthorizedOperations int32 // v8-v10 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v9+ +} + +func (*MetadataResponse) Key() int16 { return 3 } +func (*MetadataResponse) MaxVersion() int16 { return 12 } +func (v *MetadataResponse) SetVersion(version int16) { v.Version = version } +func (v *MetadataResponse) GetVersion() int16 { return v.Version } +func (v *MetadataResponse) IsFlexible() bool { return v.Version >= 9 } +func (v *MetadataResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 6 } +func (v *MetadataResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *MetadataResponse) RequestKind() Request { return &MetadataRequest{Version: v.Version} } + +func (v *MetadataResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 9 + _ = isFlexible + if version >= 3 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Brokers + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.NodeID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Host + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Port + dst = kbin.AppendInt32(dst, v) + } + if version >= 1 { + v := v.Rack + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 2 { + v := v.ClusterID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if version >= 1 { + v := v.ControllerID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Topic + if version < 12 { + var vv string + if v != nil { + vv = *v + } + { + v := vv + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + } else { + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + } + if version >= 10 { + v := v.TopicID + dst = kbin.AppendUuid(dst, v) + } + if version >= 1 { + v := v.IsInternal + dst = kbin.AppendBool(dst, v) + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Leader + dst = kbin.AppendInt32(dst, v) + } + if version >= 7 { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Replicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + { + v := v.ISR + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if version >= 5 { + v := v.OfflineReplicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 8 { + v := v.AuthorizedOperations + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 8 && version <= 10 { + v := v.AuthorizedOperations + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *MetadataResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *MetadataResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *MetadataResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 9 + _ = isFlexible + s := v + if version >= 3 { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Brokers + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]MetadataResponseBroker, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.NodeID = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Host = v + } + { + v := b.Int32() + s.Port = v + } + if version >= 1 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Rack = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Brokers = v + } + if version >= 2 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ClusterID = v + } + if version >= 1 { + v := b.Int32() + s.ControllerID = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]MetadataResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if version < 12 { + var vv string + if isFlexible { + if unsafe { + vv = b.UnsafeCompactString() + } else { + vv = b.CompactString() + } + } else { + if unsafe { + vv = b.UnsafeString() + } else { + vv = b.String() + } + } + v = &vv + } else { + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + } + s.Topic = v + } + if version >= 10 { + v := b.Uuid() + s.TopicID = v + } + if version >= 1 { + v := b.Bool() + s.IsInternal = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]MetadataResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int32() + s.Leader = v + } + if version >= 7 { + v := b.Int32() + s.LeaderEpoch = v + } + { + v := s.Replicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Replicas = v + } + { + v := s.ISR + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.ISR = v + } + if version >= 5 { + v := s.OfflineReplicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.OfflineReplicas = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if version >= 8 { + v := b.Int32() + s.AuthorizedOperations = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if version >= 8 && version <= 10 { + v := b.Int32() + s.AuthorizedOperations = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrMetadataResponse returns a pointer to a default MetadataResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrMetadataResponse() *MetadataResponse { + var v MetadataResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to MetadataResponse. +func (v *MetadataResponse) Default() { + v.ControllerID = -1 + v.AuthorizedOperations = -2147483648 +} + +// NewMetadataResponse returns a default MetadataResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewMetadataResponse() MetadataResponse { + var v MetadataResponse + v.Default() + return v +} + +// LeaderAndISRRequestTopicPartition is a common struct that is used across +// different versions of LeaderAndISRRequest. +type LeaderAndISRRequestTopicPartition struct { + Topic string // v0-v1 + + Partition int32 + + ControllerEpoch int32 + + Leader int32 + + LeaderEpoch int32 + + ISR []int32 + + ZKVersion int32 + + Replicas []int32 + + AddingReplicas []int32 // v3+ + + RemovingReplicas []int32 // v3+ + + IsNew bool // v1+ + + LeaderRecoveryState int8 // v6+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to LeaderAndISRRequestTopicPartition. +func (v *LeaderAndISRRequestTopicPartition) Default() { +} + +// NewLeaderAndISRRequestTopicPartition returns a default LeaderAndISRRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewLeaderAndISRRequestTopicPartition() LeaderAndISRRequestTopicPartition { + var v LeaderAndISRRequestTopicPartition + v.Default() + return v +} + +// LeaderAndISRResponseTopicPartition is a common struct that is used across +// different versions of LeaderAndISRResponse. +type LeaderAndISRResponseTopicPartition struct { + Topic string // v0-v4 + + Partition int32 + + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to LeaderAndISRResponseTopicPartition. +func (v *LeaderAndISRResponseTopicPartition) Default() { +} + +// NewLeaderAndISRResponseTopicPartition returns a default LeaderAndISRResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewLeaderAndISRResponseTopicPartition() LeaderAndISRResponseTopicPartition { + var v LeaderAndISRResponseTopicPartition + v.Default() + return v +} + +type LeaderAndISRRequestTopicState struct { + Topic string + + TopicID [16]byte // v5+ + + PartitionStates []LeaderAndISRRequestTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to LeaderAndISRRequestTopicState. +func (v *LeaderAndISRRequestTopicState) Default() { +} + +// NewLeaderAndISRRequestTopicState returns a default LeaderAndISRRequestTopicState +// This is a shortcut for creating a struct and calling Default yourself. +func NewLeaderAndISRRequestTopicState() LeaderAndISRRequestTopicState { + var v LeaderAndISRRequestTopicState + v.Default() + return v +} + +type LeaderAndISRRequestLiveLeader struct { + BrokerID int32 + + Host string + + Port int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to LeaderAndISRRequestLiveLeader. +func (v *LeaderAndISRRequestLiveLeader) Default() { +} + +// NewLeaderAndISRRequestLiveLeader returns a default LeaderAndISRRequestLiveLeader +// This is a shortcut for creating a struct and calling Default yourself. +func NewLeaderAndISRRequestLiveLeader() LeaderAndISRRequestLiveLeader { + var v LeaderAndISRRequestLiveLeader + v.Default() + return v +} + +// LeaderAndISRRequest is an advanced request that controller brokers use +// to broadcast state to other brokers. Manually using this request is a +// great way to break your cluster. +// +// As this is an advanced request and there is little reason to issue it as a +// client, this request is undocumented. +// +// Kafka 1.0 introduced version 1. Kafka 2.2 introduced version 2, proposed +// in KIP-380, which changed the layout of the struct to be more memory +// efficient. Kafka 2.4.0 introduced version 3 with KIP-455. +// Kafka 3.4 introduced version 7 with KIP-866. +type LeaderAndISRRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + ControllerID int32 + + // If KRaft controller id is used during migration. See KIP-866. + IsKRaftController bool // v7+ + + ControllerEpoch int32 + + // This field has a default of -1. + BrokerEpoch int64 // v2+ + + Type int8 // v5+ + + PartitionStates []LeaderAndISRRequestTopicPartition // v0-v1 + + TopicStates []LeaderAndISRRequestTopicState // v2+ + + LiveLeaders []LeaderAndISRRequestLiveLeader + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +func (*LeaderAndISRRequest) Key() int16 { return 4 } +func (*LeaderAndISRRequest) MaxVersion() int16 { return 7 } +func (v *LeaderAndISRRequest) SetVersion(version int16) { v.Version = version } +func (v *LeaderAndISRRequest) GetVersion() int16 { return v.Version } +func (v *LeaderAndISRRequest) IsFlexible() bool { return v.Version >= 4 } +func (v *LeaderAndISRRequest) ResponseKind() Response { + r := &LeaderAndISRResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *LeaderAndISRRequest) RequestWith(ctx context.Context, r Requestor) (*LeaderAndISRResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*LeaderAndISRResponse) + return resp, err +} + +func (v *LeaderAndISRRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + { + v := v.ControllerID + dst = kbin.AppendInt32(dst, v) + } + if version >= 7 { + v := v.IsKRaftController + dst = kbin.AppendBool(dst, v) + } + { + v := v.ControllerEpoch + dst = kbin.AppendInt32(dst, v) + } + if version >= 2 { + v := v.BrokerEpoch + dst = kbin.AppendInt64(dst, v) + } + if version >= 5 { + v := v.Type + dst = kbin.AppendInt8(dst, v) + } + if version >= 0 && version <= 1 { + v := v.PartitionStates + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + if version >= 0 && version <= 1 { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ControllerEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Leader + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ISR + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + { + v := v.ZKVersion + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Replicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if version >= 3 { + v := v.AddingReplicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if version >= 3 { + v := v.RemovingReplicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if version >= 1 { + v := v.IsNew + dst = kbin.AppendBool(dst, v) + } + if version >= 6 { + v := v.LeaderRecoveryState + dst = kbin.AppendInt8(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 2 { + v := v.TopicStates + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 5 { + v := v.TopicID + dst = kbin.AppendUuid(dst, v) + } + { + v := v.PartitionStates + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + if version >= 0 && version <= 1 { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ControllerEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Leader + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ISR + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + { + v := v.ZKVersion + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Replicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if version >= 3 { + v := v.AddingReplicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if version >= 3 { + v := v.RemovingReplicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if version >= 1 { + v := v.IsNew + dst = kbin.AppendBool(dst, v) + } + if version >= 6 { + v := v.LeaderRecoveryState + dst = kbin.AppendInt8(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.LiveLeaders + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.BrokerID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Host + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Port + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *LeaderAndISRRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *LeaderAndISRRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *LeaderAndISRRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + s := v + { + v := b.Int32() + s.ControllerID = v + } + if version >= 7 { + v := b.Bool() + s.IsKRaftController = v + } + { + v := b.Int32() + s.ControllerEpoch = v + } + if version >= 2 { + v := b.Int64() + s.BrokerEpoch = v + } + if version >= 5 { + v := b.Int8() + s.Type = v + } + if version >= 0 && version <= 1 { + v := s.PartitionStates + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]LeaderAndISRRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + if version >= 0 && version <= 1 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int32() + s.ControllerEpoch = v + } + { + v := b.Int32() + s.Leader = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + { + v := s.ISR + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.ISR = v + } + { + v := b.Int32() + s.ZKVersion = v + } + { + v := s.Replicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Replicas = v + } + if version >= 3 { + v := s.AddingReplicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.AddingReplicas = v + } + if version >= 3 { + v := s.RemovingReplicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.RemovingReplicas = v + } + if version >= 1 { + v := b.Bool() + s.IsNew = v + } + if version >= 6 { + v := b.Int8() + s.LeaderRecoveryState = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.PartitionStates = v + } + if version >= 2 { + v := s.TopicStates + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]LeaderAndISRRequestTopicState, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + if version >= 5 { + v := b.Uuid() + s.TopicID = v + } + { + v := s.PartitionStates + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]LeaderAndISRRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + if version >= 0 && version <= 1 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int32() + s.ControllerEpoch = v + } + { + v := b.Int32() + s.Leader = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + { + v := s.ISR + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.ISR = v + } + { + v := b.Int32() + s.ZKVersion = v + } + { + v := s.Replicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Replicas = v + } + if version >= 3 { + v := s.AddingReplicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.AddingReplicas = v + } + if version >= 3 { + v := s.RemovingReplicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.RemovingReplicas = v + } + if version >= 1 { + v := b.Bool() + s.IsNew = v + } + if version >= 6 { + v := b.Int8() + s.LeaderRecoveryState = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.PartitionStates = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.TopicStates = v + } + { + v := s.LiveLeaders + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]LeaderAndISRRequestLiveLeader, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.BrokerID = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Host = v + } + { + v := b.Int32() + s.Port = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.LiveLeaders = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrLeaderAndISRRequest returns a pointer to a default LeaderAndISRRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrLeaderAndISRRequest() *LeaderAndISRRequest { + var v LeaderAndISRRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to LeaderAndISRRequest. +func (v *LeaderAndISRRequest) Default() { + v.BrokerEpoch = -1 +} + +// NewLeaderAndISRRequest returns a default LeaderAndISRRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewLeaderAndISRRequest() LeaderAndISRRequest { + var v LeaderAndISRRequest + v.Default() + return v +} + +type LeaderAndISRResponseTopic struct { + TopicID [16]byte + + Partitions []LeaderAndISRResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to LeaderAndISRResponseTopic. +func (v *LeaderAndISRResponseTopic) Default() { +} + +// NewLeaderAndISRResponseTopic returns a default LeaderAndISRResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewLeaderAndISRResponseTopic() LeaderAndISRResponseTopic { + var v LeaderAndISRResponseTopic + v.Default() + return v +} + +// LeaderAndISRResponse is returned from a LeaderAndISRRequest. +type LeaderAndISRResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + ErrorCode int16 + + Partitions []LeaderAndISRResponseTopicPartition // v0-v4 + + Topics []LeaderAndISRResponseTopic // v5+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +func (*LeaderAndISRResponse) Key() int16 { return 4 } +func (*LeaderAndISRResponse) MaxVersion() int16 { return 7 } +func (v *LeaderAndISRResponse) SetVersion(version int16) { v.Version = version } +func (v *LeaderAndISRResponse) GetVersion() int16 { return v.Version } +func (v *LeaderAndISRResponse) IsFlexible() bool { return v.Version >= 4 } +func (v *LeaderAndISRResponse) RequestKind() Request { return &LeaderAndISRRequest{Version: v.Version} } + +func (v *LeaderAndISRResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if version >= 0 && version <= 4 { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + if version >= 0 && version <= 4 { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 5 { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.TopicID + dst = kbin.AppendUuid(dst, v) + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + if version >= 0 && version <= 4 { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *LeaderAndISRResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *LeaderAndISRResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *LeaderAndISRResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + s := v + { + v := b.Int16() + s.ErrorCode = v + } + if version >= 0 && version <= 4 { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]LeaderAndISRResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + if version >= 0 && version <= 4 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if version >= 5 { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]LeaderAndISRResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Uuid() + s.TopicID = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]LeaderAndISRResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + if version >= 0 && version <= 4 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrLeaderAndISRResponse returns a pointer to a default LeaderAndISRResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrLeaderAndISRResponse() *LeaderAndISRResponse { + var v LeaderAndISRResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to LeaderAndISRResponse. +func (v *LeaderAndISRResponse) Default() { +} + +// NewLeaderAndISRResponse returns a default LeaderAndISRResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewLeaderAndISRResponse() LeaderAndISRResponse { + var v LeaderAndISRResponse + v.Default() + return v +} + +type StopReplicaRequestTopicPartitionState struct { + Partition int32 + + // This field has a default of -1. + LeaderEpoch int32 + + Delete bool + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to StopReplicaRequestTopicPartitionState. +func (v *StopReplicaRequestTopicPartitionState) Default() { + v.LeaderEpoch = -1 +} + +// NewStopReplicaRequestTopicPartitionState returns a default StopReplicaRequestTopicPartitionState +// This is a shortcut for creating a struct and calling Default yourself. +func NewStopReplicaRequestTopicPartitionState() StopReplicaRequestTopicPartitionState { + var v StopReplicaRequestTopicPartitionState + v.Default() + return v +} + +type StopReplicaRequestTopic struct { + Topic string + + Partition int32 + + Partitions []int32 // v1-v2 + + PartitionStates []StopReplicaRequestTopicPartitionState // v3+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to StopReplicaRequestTopic. +func (v *StopReplicaRequestTopic) Default() { +} + +// NewStopReplicaRequestTopic returns a default StopReplicaRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewStopReplicaRequestTopic() StopReplicaRequestTopic { + var v StopReplicaRequestTopic + v.Default() + return v +} + +// StopReplicaRequest is an advanced request that brokers use to stop replicas. +// +// As this is an advanced request and there is little reason to issue it as a +// client, this request is undocumented. +// +// Kafka 2.2 introduced version 1, proposed in KIP-380, which changed the +// layout of the struct to be more memory efficient. +// +// Kafka 2.6 introduced version 3, proposed in KIP-570, reorganizes partitions +// to be stored and adds the leader epoch and delete partition fields per partition. +// Kafka 3.4 introduced version 4 with KIP-866. +type StopReplicaRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + ControllerID int32 + + ControllerEpoch int32 + + // If KRaft controller id is used during migration. See KIP-866. + IsKRaftController bool // v4+ + + // This field has a default of -1. + BrokerEpoch int64 // v1+ + + DeletePartitions bool // v0-v2 + + Topics []StopReplicaRequestTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*StopReplicaRequest) Key() int16 { return 5 } +func (*StopReplicaRequest) MaxVersion() int16 { return 4 } +func (v *StopReplicaRequest) SetVersion(version int16) { v.Version = version } +func (v *StopReplicaRequest) GetVersion() int16 { return v.Version } +func (v *StopReplicaRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *StopReplicaRequest) ResponseKind() Response { + r := &StopReplicaResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *StopReplicaRequest) RequestWith(ctx context.Context, r Requestor) (*StopReplicaResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*StopReplicaResponse) + return resp, err +} + +func (v *StopReplicaRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ControllerID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ControllerEpoch + dst = kbin.AppendInt32(dst, v) + } + if version >= 4 { + v := v.IsKRaftController + dst = kbin.AppendBool(dst, v) + } + if version >= 1 { + v := v.BrokerEpoch + dst = kbin.AppendInt64(dst, v) + } + if version >= 0 && version <= 2 { + v := v.DeletePartitions + dst = kbin.AppendBool(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 0 && version <= 0 { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + if version >= 1 && version <= 2 { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if version >= 3 { + v := v.PartitionStates + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Delete + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *StopReplicaRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *StopReplicaRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *StopReplicaRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int32() + s.ControllerID = v + } + { + v := b.Int32() + s.ControllerEpoch = v + } + if version >= 4 { + v := b.Bool() + s.IsKRaftController = v + } + if version >= 1 { + v := b.Int64() + s.BrokerEpoch = v + } + if version >= 0 && version <= 2 { + v := b.Bool() + s.DeletePartitions = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]StopReplicaRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + if version >= 0 && version <= 0 { + v := b.Int32() + s.Partition = v + } + if version >= 1 && version <= 2 { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + if version >= 3 { + v := s.PartitionStates + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]StopReplicaRequestTopicPartitionState, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + { + v := b.Bool() + s.Delete = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.PartitionStates = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrStopReplicaRequest returns a pointer to a default StopReplicaRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrStopReplicaRequest() *StopReplicaRequest { + var v StopReplicaRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to StopReplicaRequest. +func (v *StopReplicaRequest) Default() { + v.BrokerEpoch = -1 +} + +// NewStopReplicaRequest returns a default StopReplicaRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewStopReplicaRequest() StopReplicaRequest { + var v StopReplicaRequest + v.Default() + return v +} + +type StopReplicaResponsePartition struct { + Topic string + + Partition int32 + + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to StopReplicaResponsePartition. +func (v *StopReplicaResponsePartition) Default() { +} + +// NewStopReplicaResponsePartition returns a default StopReplicaResponsePartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewStopReplicaResponsePartition() StopReplicaResponsePartition { + var v StopReplicaResponsePartition + v.Default() + return v +} + +// StopReplicasResponse is returned from a StopReplicasRequest. +type StopReplicaResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Version 3 returns FENCED_LEADER_EPOCH if the leader is stale (KIP-570). + ErrorCode int16 + + Partitions []StopReplicaResponsePartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*StopReplicaResponse) Key() int16 { return 5 } +func (*StopReplicaResponse) MaxVersion() int16 { return 4 } +func (v *StopReplicaResponse) SetVersion(version int16) { v.Version = version } +func (v *StopReplicaResponse) GetVersion() int16 { return v.Version } +func (v *StopReplicaResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *StopReplicaResponse) RequestKind() Request { return &StopReplicaRequest{Version: v.Version} } + +func (v *StopReplicaResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *StopReplicaResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *StopReplicaResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *StopReplicaResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]StopReplicaResponsePartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrStopReplicaResponse returns a pointer to a default StopReplicaResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrStopReplicaResponse() *StopReplicaResponse { + var v StopReplicaResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to StopReplicaResponse. +func (v *StopReplicaResponse) Default() { +} + +// NewStopReplicaResponse returns a default StopReplicaResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewStopReplicaResponse() StopReplicaResponse { + var v StopReplicaResponse + v.Default() + return v +} + +type UpdateMetadataRequestTopicPartition struct { + Topic string // v0-v4 + + Partition int32 + + ControllerEpoch int32 + + Leader int32 + + LeaderEpoch int32 + + ISR []int32 + + ZKVersion int32 + + Replicas []int32 + + OfflineReplicas []int32 // v4+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to UpdateMetadataRequestTopicPartition. +func (v *UpdateMetadataRequestTopicPartition) Default() { +} + +// NewUpdateMetadataRequestTopicPartition returns a default UpdateMetadataRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewUpdateMetadataRequestTopicPartition() UpdateMetadataRequestTopicPartition { + var v UpdateMetadataRequestTopicPartition + v.Default() + return v +} + +type UpdateMetadataRequestTopicState struct { + Topic string + + TopicID [16]byte // v7+ + + PartitionStates []UpdateMetadataRequestTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to UpdateMetadataRequestTopicState. +func (v *UpdateMetadataRequestTopicState) Default() { +} + +// NewUpdateMetadataRequestTopicState returns a default UpdateMetadataRequestTopicState +// This is a shortcut for creating a struct and calling Default yourself. +func NewUpdateMetadataRequestTopicState() UpdateMetadataRequestTopicState { + var v UpdateMetadataRequestTopicState + v.Default() + return v +} + +type UpdateMetadataRequestLiveBrokerEndpoint struct { + Port int32 + + Host string + + ListenerName string // v3+ + + SecurityProtocol int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to UpdateMetadataRequestLiveBrokerEndpoint. +func (v *UpdateMetadataRequestLiveBrokerEndpoint) Default() { +} + +// NewUpdateMetadataRequestLiveBrokerEndpoint returns a default UpdateMetadataRequestLiveBrokerEndpoint +// This is a shortcut for creating a struct and calling Default yourself. +func NewUpdateMetadataRequestLiveBrokerEndpoint() UpdateMetadataRequestLiveBrokerEndpoint { + var v UpdateMetadataRequestLiveBrokerEndpoint + v.Default() + return v +} + +type UpdateMetadataRequestLiveBroker struct { + ID int32 + + Host string + + Port int32 + + Endpoints []UpdateMetadataRequestLiveBrokerEndpoint // v1+ + + Rack *string // v2+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to UpdateMetadataRequestLiveBroker. +func (v *UpdateMetadataRequestLiveBroker) Default() { +} + +// NewUpdateMetadataRequestLiveBroker returns a default UpdateMetadataRequestLiveBroker +// This is a shortcut for creating a struct and calling Default yourself. +func NewUpdateMetadataRequestLiveBroker() UpdateMetadataRequestLiveBroker { + var v UpdateMetadataRequestLiveBroker + v.Default() + return v +} + +// UpdateMetadataRequest is an advanced request that brokers use to +// issue metadata updates to each other. +// +// As this is an advanced request and there is little reason to issue it as a +// client, this request is undocumented. +// +// Version 1 changed the layout of the live brokers. +// +// Kafka 2.2 introduced version 5, proposed in KIP-380, which changed the +// layout of the struct to be more memory efficient. +// Kafka 3.4 introduced version 8 with KIP-866. +type UpdateMetadataRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + ControllerID int32 + + // If KRaft controller id is used during migration. See KIP-866. + IsKRaftController bool // v8+ + + ControllerEpoch int32 + + // This field has a default of -1. + BrokerEpoch int64 // v5+ + + PartitionStates []UpdateMetadataRequestTopicPartition // v0-v4 + + TopicStates []UpdateMetadataRequestTopicState // v5+ + + LiveBrokers []UpdateMetadataRequestLiveBroker + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +func (*UpdateMetadataRequest) Key() int16 { return 6 } +func (*UpdateMetadataRequest) MaxVersion() int16 { return 8 } +func (v *UpdateMetadataRequest) SetVersion(version int16) { v.Version = version } +func (v *UpdateMetadataRequest) GetVersion() int16 { return v.Version } +func (v *UpdateMetadataRequest) IsFlexible() bool { return v.Version >= 6 } +func (v *UpdateMetadataRequest) ResponseKind() Response { + r := &UpdateMetadataResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *UpdateMetadataRequest) RequestWith(ctx context.Context, r Requestor) (*UpdateMetadataResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*UpdateMetadataResponse) + return resp, err +} + +func (v *UpdateMetadataRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 6 + _ = isFlexible + { + v := v.ControllerID + dst = kbin.AppendInt32(dst, v) + } + if version >= 8 { + v := v.IsKRaftController + dst = kbin.AppendBool(dst, v) + } + { + v := v.ControllerEpoch + dst = kbin.AppendInt32(dst, v) + } + if version >= 5 { + v := v.BrokerEpoch + dst = kbin.AppendInt64(dst, v) + } + if version >= 0 && version <= 4 { + v := v.PartitionStates + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + if version >= 0 && version <= 4 { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ControllerEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Leader + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ISR + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + { + v := v.ZKVersion + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Replicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if version >= 4 { + v := v.OfflineReplicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 5 { + v := v.TopicStates + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 7 { + v := v.TopicID + dst = kbin.AppendUuid(dst, v) + } + { + v := v.PartitionStates + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + if version >= 0 && version <= 4 { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ControllerEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Leader + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ISR + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + { + v := v.ZKVersion + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Replicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if version >= 4 { + v := v.OfflineReplicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.LiveBrokers + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ID + dst = kbin.AppendInt32(dst, v) + } + if version >= 0 && version <= 0 { + v := v.Host + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 0 && version <= 0 { + v := v.Port + dst = kbin.AppendInt32(dst, v) + } + if version >= 1 { + v := v.Endpoints + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Port + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Host + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 3 { + v := v.ListenerName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.SecurityProtocol + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 2 { + v := v.Rack + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *UpdateMetadataRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *UpdateMetadataRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *UpdateMetadataRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 6 + _ = isFlexible + s := v + { + v := b.Int32() + s.ControllerID = v + } + if version >= 8 { + v := b.Bool() + s.IsKRaftController = v + } + { + v := b.Int32() + s.ControllerEpoch = v + } + if version >= 5 { + v := b.Int64() + s.BrokerEpoch = v + } + if version >= 0 && version <= 4 { + v := s.PartitionStates + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]UpdateMetadataRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + if version >= 0 && version <= 4 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int32() + s.ControllerEpoch = v + } + { + v := b.Int32() + s.Leader = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + { + v := s.ISR + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.ISR = v + } + { + v := b.Int32() + s.ZKVersion = v + } + { + v := s.Replicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Replicas = v + } + if version >= 4 { + v := s.OfflineReplicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.OfflineReplicas = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.PartitionStates = v + } + if version >= 5 { + v := s.TopicStates + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]UpdateMetadataRequestTopicState, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + if version >= 7 { + v := b.Uuid() + s.TopicID = v + } + { + v := s.PartitionStates + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]UpdateMetadataRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + if version >= 0 && version <= 4 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int32() + s.ControllerEpoch = v + } + { + v := b.Int32() + s.Leader = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + { + v := s.ISR + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.ISR = v + } + { + v := b.Int32() + s.ZKVersion = v + } + { + v := s.Replicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Replicas = v + } + if version >= 4 { + v := s.OfflineReplicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.OfflineReplicas = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.PartitionStates = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.TopicStates = v + } + { + v := s.LiveBrokers + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]UpdateMetadataRequestLiveBroker, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.ID = v + } + if version >= 0 && version <= 0 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Host = v + } + if version >= 0 && version <= 0 { + v := b.Int32() + s.Port = v + } + if version >= 1 { + v := s.Endpoints + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]UpdateMetadataRequestLiveBrokerEndpoint, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Port = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Host = v + } + if version >= 3 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ListenerName = v + } + { + v := b.Int16() + s.SecurityProtocol = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Endpoints = v + } + if version >= 2 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Rack = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.LiveBrokers = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrUpdateMetadataRequest returns a pointer to a default UpdateMetadataRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrUpdateMetadataRequest() *UpdateMetadataRequest { + var v UpdateMetadataRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to UpdateMetadataRequest. +func (v *UpdateMetadataRequest) Default() { + v.BrokerEpoch = -1 +} + +// NewUpdateMetadataRequest returns a default UpdateMetadataRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewUpdateMetadataRequest() UpdateMetadataRequest { + var v UpdateMetadataRequest + v.Default() + return v +} + +// UpdateMetadataResponses is returned from an UpdateMetadataRequest. +type UpdateMetadataResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +func (*UpdateMetadataResponse) Key() int16 { return 6 } +func (*UpdateMetadataResponse) MaxVersion() int16 { return 8 } +func (v *UpdateMetadataResponse) SetVersion(version int16) { v.Version = version } +func (v *UpdateMetadataResponse) GetVersion() int16 { return v.Version } +func (v *UpdateMetadataResponse) IsFlexible() bool { return v.Version >= 6 } +func (v *UpdateMetadataResponse) RequestKind() Request { + return &UpdateMetadataRequest{Version: v.Version} +} + +func (v *UpdateMetadataResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 6 + _ = isFlexible + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *UpdateMetadataResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *UpdateMetadataResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *UpdateMetadataResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 6 + _ = isFlexible + s := v + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrUpdateMetadataResponse returns a pointer to a default UpdateMetadataResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrUpdateMetadataResponse() *UpdateMetadataResponse { + var v UpdateMetadataResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to UpdateMetadataResponse. +func (v *UpdateMetadataResponse) Default() { +} + +// NewUpdateMetadataResponse returns a default UpdateMetadataResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewUpdateMetadataResponse() UpdateMetadataResponse { + var v UpdateMetadataResponse + v.Default() + return v +} + +// ControlledShutdownRequest is an advanced request that can be used to +// sthudown a broker in a controlled manner. +// +// As this is an advanced request and there is little reason to issue it as a +// client, this request is undocumented. However, the minimal amount of fields +// here makes the usage rather obvious. +// +// Kafka 2.2.0 introduced version 2, proposed in KIP-380. +// +// Note that version 0 of this request uses a special encoding format +// where the request does not include the client ID. +type ControlledShutdownRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + BrokerID int32 + + // This field has a default of -1. + BrokerEpoch int64 // v2+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +func (*ControlledShutdownRequest) Key() int16 { return 7 } +func (*ControlledShutdownRequest) MaxVersion() int16 { return 3 } +func (v *ControlledShutdownRequest) SetVersion(version int16) { v.Version = version } +func (v *ControlledShutdownRequest) GetVersion() int16 { return v.Version } +func (v *ControlledShutdownRequest) IsFlexible() bool { return v.Version >= 3 } +func (v *ControlledShutdownRequest) ResponseKind() Response { + r := &ControlledShutdownResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *ControlledShutdownRequest) RequestWith(ctx context.Context, r Requestor) (*ControlledShutdownResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*ControlledShutdownResponse) + return resp, err +} + +func (v *ControlledShutdownRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + { + v := v.BrokerID + dst = kbin.AppendInt32(dst, v) + } + if version >= 2 { + v := v.BrokerEpoch + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ControlledShutdownRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ControlledShutdownRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ControlledShutdownRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + s := v + { + v := b.Int32() + s.BrokerID = v + } + if version >= 2 { + v := b.Int64() + s.BrokerEpoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrControlledShutdownRequest returns a pointer to a default ControlledShutdownRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrControlledShutdownRequest() *ControlledShutdownRequest { + var v ControlledShutdownRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ControlledShutdownRequest. +func (v *ControlledShutdownRequest) Default() { + v.BrokerEpoch = -1 +} + +// NewControlledShutdownRequest returns a default ControlledShutdownRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewControlledShutdownRequest() ControlledShutdownRequest { + var v ControlledShutdownRequest + v.Default() + return v +} + +type ControlledShutdownResponsePartitionsRemaining struct { + Topic string + + Partition int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ControlledShutdownResponsePartitionsRemaining. +func (v *ControlledShutdownResponsePartitionsRemaining) Default() { +} + +// NewControlledShutdownResponsePartitionsRemaining returns a default ControlledShutdownResponsePartitionsRemaining +// This is a shortcut for creating a struct and calling Default yourself. +func NewControlledShutdownResponsePartitionsRemaining() ControlledShutdownResponsePartitionsRemaining { + var v ControlledShutdownResponsePartitionsRemaining + v.Default() + return v +} + +// ControlledShutdownResponse is returned from a ControlledShutdownRequest. +type ControlledShutdownResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + ErrorCode int16 + + PartitionsRemaining []ControlledShutdownResponsePartitionsRemaining + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +func (*ControlledShutdownResponse) Key() int16 { return 7 } +func (*ControlledShutdownResponse) MaxVersion() int16 { return 3 } +func (v *ControlledShutdownResponse) SetVersion(version int16) { v.Version = version } +func (v *ControlledShutdownResponse) GetVersion() int16 { return v.Version } +func (v *ControlledShutdownResponse) IsFlexible() bool { return v.Version >= 3 } +func (v *ControlledShutdownResponse) RequestKind() Request { + return &ControlledShutdownRequest{Version: v.Version} +} + +func (v *ControlledShutdownResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.PartitionsRemaining + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ControlledShutdownResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ControlledShutdownResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ControlledShutdownResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + v := s.PartitionsRemaining + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ControlledShutdownResponsePartitionsRemaining, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := b.Int32() + s.Partition = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.PartitionsRemaining = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrControlledShutdownResponse returns a pointer to a default ControlledShutdownResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrControlledShutdownResponse() *ControlledShutdownResponse { + var v ControlledShutdownResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ControlledShutdownResponse. +func (v *ControlledShutdownResponse) Default() { +} + +// NewControlledShutdownResponse returns a default ControlledShutdownResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewControlledShutdownResponse() ControlledShutdownResponse { + var v ControlledShutdownResponse + v.Default() + return v +} + +type OffsetCommitRequestTopicPartition struct { + // Partition if a partition to commit offsets for. + Partition int32 + + // Offset is an offset to commit. + Offset int64 + + // Timestamp is the first iteration of tracking how long offset commits + // should persist in Kafka. This field only existed for v1. + // The expiration would be timestamp + offset.retention.minutes, or, if + // timestamp was zero, current time + offset.retention.minutes. + // + // This field has a default of -1. + Timestamp int64 // v1-v1 + + // LeaderEpoch, proposed in KIP-320 and introduced in Kafka 2.1.0, + // is the leader epoch of the record this request is committing. + // + // The initial leader epoch can be determined from a MetadataResponse. + // To skip log truncation checking, use -1. + // + // This field has a default of -1. + LeaderEpoch int32 // v6+ + + // Metadata is optional data to include with committing the offset. This + // can contain information such as which node is doing the committing, etc. + Metadata *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v8+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetCommitRequestTopicPartition. +func (v *OffsetCommitRequestTopicPartition) Default() { + v.Timestamp = -1 + v.LeaderEpoch = -1 +} + +// NewOffsetCommitRequestTopicPartition returns a default OffsetCommitRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetCommitRequestTopicPartition() OffsetCommitRequestTopicPartition { + var v OffsetCommitRequestTopicPartition + v.Default() + return v +} + +type OffsetCommitRequestTopic struct { + // Topic is a topic to commit offsets for. + Topic string + + // Partitions contains partitions in a topic for which to commit offsets. + Partitions []OffsetCommitRequestTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v8+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetCommitRequestTopic. +func (v *OffsetCommitRequestTopic) Default() { +} + +// NewOffsetCommitRequestTopic returns a default OffsetCommitRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetCommitRequestTopic() OffsetCommitRequestTopic { + var v OffsetCommitRequestTopic + v.Default() + return v +} + +// OffsetCommitRequest commits offsets for consumed topics / partitions in +// a group. +type OffsetCommitRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Group is the group this request is committing offsets to. + Group string + + // Generation being -1 and group being empty means the group is being used + // to store offsets only. No generation validation, no rebalancing. + // + // This field has a default of -1. + Generation int32 // v1+ + + // MemberID is the ID of the client issuing this request in the group. + MemberID string // v1+ + + // InstanceID is the instance ID of this member in the group (KIP-345). + InstanceID *string // v7+ + + // RetentionTimeMillis is how long this commit will persist in Kafka. + // + // This was introduced in v2, replacing an individual topic/partition's + // Timestamp from v1, and was removed in v5 with Kafka 2.1.0. + // + // This was removed because rarely committing consumers could have their + // offsets expired before committing, even though the consumer was still + // active. After restarting or rebalancing, the consumer would now not know + // the last committed offset and would have to start at the beginning or end, + // leading to duplicates or log loss. + // + // Post 2.1.0, if this field is empty, offsets are only deleted once the + // group is empty. Read KIP-211 for more details. + // + // This field has a default of -1. + RetentionTimeMillis int64 // v2-v4 + + // Topics is contains topics and partitions for which to commit offsets. + Topics []OffsetCommitRequestTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v8+ +} + +func (*OffsetCommitRequest) Key() int16 { return 8 } +func (*OffsetCommitRequest) MaxVersion() int16 { return 9 } +func (v *OffsetCommitRequest) SetVersion(version int16) { v.Version = version } +func (v *OffsetCommitRequest) GetVersion() int16 { return v.Version } +func (v *OffsetCommitRequest) IsFlexible() bool { return v.Version >= 8 } +func (v *OffsetCommitRequest) IsGroupCoordinatorRequest() {} +func (v *OffsetCommitRequest) ResponseKind() Response { + r := &OffsetCommitResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *OffsetCommitRequest) RequestWith(ctx context.Context, r Requestor) (*OffsetCommitResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*OffsetCommitResponse) + return resp, err +} + +func (v *OffsetCommitRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 8 + _ = isFlexible + { + v := v.Group + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 1 { + v := v.Generation + dst = kbin.AppendInt32(dst, v) + } + if version >= 1 { + v := v.MemberID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 7 { + v := v.InstanceID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if version >= 2 && version <= 4 { + v := v.RetentionTimeMillis + dst = kbin.AppendInt64(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Offset + dst = kbin.AppendInt64(dst, v) + } + if version >= 1 && version <= 1 { + v := v.Timestamp + dst = kbin.AppendInt64(dst, v) + } + if version >= 6 { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Metadata + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *OffsetCommitRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *OffsetCommitRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *OffsetCommitRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 8 + _ = isFlexible + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Group = v + } + if version >= 1 { + v := b.Int32() + s.Generation = v + } + if version >= 1 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.MemberID = v + } + if version >= 7 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.InstanceID = v + } + if version >= 2 && version <= 4 { + v := b.Int64() + s.RetentionTimeMillis = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetCommitRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetCommitRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int64() + s.Offset = v + } + if version >= 1 && version <= 1 { + v := b.Int64() + s.Timestamp = v + } + if version >= 6 { + v := b.Int32() + s.LeaderEpoch = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Metadata = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrOffsetCommitRequest returns a pointer to a default OffsetCommitRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrOffsetCommitRequest() *OffsetCommitRequest { + var v OffsetCommitRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetCommitRequest. +func (v *OffsetCommitRequest) Default() { + v.Generation = -1 + v.RetentionTimeMillis = -1 +} + +// NewOffsetCommitRequest returns a default OffsetCommitRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetCommitRequest() OffsetCommitRequest { + var v OffsetCommitRequest + v.Default() + return v +} + +type OffsetCommitResponseTopicPartition struct { + // Partition is the partition in a topic this array slot corresponds to. + Partition int32 + + // ErrorCode is the error for this partition response. + // + // GROUP_AUTHORIZATION_FAILED is returned if the client is not authorized + // for the group. + // + // TOPIC_AUTHORIZATION_FAILED is returned if the client is not authorized + // for the topic / partition. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if the topic / partition does + // not exist. + // + // OFFSET_METADATA_TOO_LARGE is returned if the request metadata is + // larger than the brokers offset.metadata.max.bytes. + // + // INVALID_GROUP_ID is returned in the requested group ID is invalid. + // + // COORDINATOR_NOT_AVAILABLE is returned if the coordinator is not available + // (due to the requested broker shutting down or it has not completed startup). + // + // COORDINATOR_LOAD_IN_PROGRESS is returned if the group is loading. + // + // NOT_COORDINATOR is returned if the requested broker is not the coordinator + // for the requested group. + // + // ILLEGAL_GENERATION is returned if the request's generation ID is invalid. + // + // UNKNOWN_MEMBER_ID is returned if the group is dead or the group does not + // know of the request's member ID. + // + // REBALANCE_IN_PROGRESS is returned if the group is finishing a rebalance. + // + // INVALID_COMMIT_OFFSET_SIZE is returned if the offset commit results in + // a record batch that is too large (likely due to large metadata). + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v8+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetCommitResponseTopicPartition. +func (v *OffsetCommitResponseTopicPartition) Default() { +} + +// NewOffsetCommitResponseTopicPartition returns a default OffsetCommitResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetCommitResponseTopicPartition() OffsetCommitResponseTopicPartition { + var v OffsetCommitResponseTopicPartition + v.Default() + return v +} + +type OffsetCommitResponseTopic struct { + // Topic is the topic this offset commit response corresponds to. + Topic string + + // Partitions contains responses for each requested partition in + // a topic. + Partitions []OffsetCommitResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v8+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetCommitResponseTopic. +func (v *OffsetCommitResponseTopic) Default() { +} + +// NewOffsetCommitResponseTopic returns a default OffsetCommitResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetCommitResponseTopic() OffsetCommitResponseTopic { + var v OffsetCommitResponseTopic + v.Default() + return v +} + +// OffsetCommitResponse is returned from an OffsetCommitRequest. +type OffsetCommitResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 4. + ThrottleMillis int32 // v3+ + + // Topics contains responses for each topic / partition in the commit request. + Topics []OffsetCommitResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v8+ +} + +func (*OffsetCommitResponse) Key() int16 { return 8 } +func (*OffsetCommitResponse) MaxVersion() int16 { return 9 } +func (v *OffsetCommitResponse) SetVersion(version int16) { v.Version = version } +func (v *OffsetCommitResponse) GetVersion() int16 { return v.Version } +func (v *OffsetCommitResponse) IsFlexible() bool { return v.Version >= 8 } +func (v *OffsetCommitResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 4 } +func (v *OffsetCommitResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *OffsetCommitResponse) RequestKind() Request { return &OffsetCommitRequest{Version: v.Version} } + +func (v *OffsetCommitResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 8 + _ = isFlexible + if version >= 3 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *OffsetCommitResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *OffsetCommitResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *OffsetCommitResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 8 + _ = isFlexible + s := v + if version >= 3 { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetCommitResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetCommitResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrOffsetCommitResponse returns a pointer to a default OffsetCommitResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrOffsetCommitResponse() *OffsetCommitResponse { + var v OffsetCommitResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetCommitResponse. +func (v *OffsetCommitResponse) Default() { +} + +// NewOffsetCommitResponse returns a default OffsetCommitResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetCommitResponse() OffsetCommitResponse { + var v OffsetCommitResponse + v.Default() + return v +} + +type OffsetFetchRequestTopic struct { + // Topic is a topic to fetch offsets for. + Topic string + + // Partitions in a list of partitions in a group to fetch offsets for. + Partitions []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetFetchRequestTopic. +func (v *OffsetFetchRequestTopic) Default() { +} + +// NewOffsetFetchRequestTopic returns a default OffsetFetchRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetFetchRequestTopic() OffsetFetchRequestTopic { + var v OffsetFetchRequestTopic + v.Default() + return v +} + +type OffsetFetchRequestGroupTopic struct { + Topic string + + Partitions []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetFetchRequestGroupTopic. +func (v *OffsetFetchRequestGroupTopic) Default() { +} + +// NewOffsetFetchRequestGroupTopic returns a default OffsetFetchRequestGroupTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetFetchRequestGroupTopic() OffsetFetchRequestGroupTopic { + var v OffsetFetchRequestGroupTopic + v.Default() + return v +} + +type OffsetFetchRequestGroup struct { + Group string + + // The member ID assigned by the group coordinator if using the new consumer protocol (KIP-848). + MemberID *string // v9+ + + // The member epoch if using the new consumer protocol (KIP-848). + // + // This field has a default of -1. + MemberEpoch int32 // v9+ + + Topics []OffsetFetchRequestGroupTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetFetchRequestGroup. +func (v *OffsetFetchRequestGroup) Default() { + v.MemberEpoch = -1 +} + +// NewOffsetFetchRequestGroup returns a default OffsetFetchRequestGroup +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetFetchRequestGroup() OffsetFetchRequestGroup { + var v OffsetFetchRequestGroup + v.Default() + return v +} + +// OffsetFetchRequest requests the most recent committed offsets for topic +// partitions in a group. +type OffsetFetchRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Group is the group to fetch offsets for. + Group string // v0-v7 + + // Topics contains topics to fetch offets for. Version 2+ allows this to be + // null to return all topics the client is authorized to describe in the group. + Topics []OffsetFetchRequestTopic // v0-v7 + + // Groups, introduced in v8 (Kafka 3.0), allows for fetching offsets for + // multiple groups at a time. + // + // The fields here mirror the old top level fields on the request, thus they + // are left undocumented. Refer to the top level documentation if necessary. + Groups []OffsetFetchRequestGroup // v8+ + + // RequireStable signifies whether the broker should wait on returning + // unstable offsets, instead setting a retryable error on the relevant + // unstable partitions (UNSTABLE_OFFSET_COMMIT). See KIP-447 for more + // details. + RequireStable bool // v7+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +func (*OffsetFetchRequest) Key() int16 { return 9 } +func (*OffsetFetchRequest) MaxVersion() int16 { return 9 } +func (v *OffsetFetchRequest) SetVersion(version int16) { v.Version = version } +func (v *OffsetFetchRequest) GetVersion() int16 { return v.Version } +func (v *OffsetFetchRequest) IsFlexible() bool { return v.Version >= 6 } +func (v *OffsetFetchRequest) IsGroupCoordinatorRequest() {} +func (v *OffsetFetchRequest) ResponseKind() Response { + r := &OffsetFetchResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *OffsetFetchRequest) RequestWith(ctx context.Context, r Requestor) (*OffsetFetchResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*OffsetFetchResponse) + return resp, err +} + +func (v *OffsetFetchRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 6 + _ = isFlexible + if version >= 0 && version <= 7 { + v := v.Group + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 0 && version <= 7 { + v := v.Topics + if version >= 2 { + if isFlexible { + dst = kbin.AppendCompactNullableArrayLen(dst, len(v), v == nil) + } else { + dst = kbin.AppendNullableArrayLen(dst, len(v), v == nil) + } + } else { + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 8 { + v := v.Groups + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Group + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 9 { + v := v.MemberID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if version >= 9 { + v := v.MemberEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactNullableArrayLen(dst, len(v), v == nil) + } else { + dst = kbin.AppendNullableArrayLen(dst, len(v), v == nil) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 7 { + v := v.RequireStable + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *OffsetFetchRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *OffsetFetchRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *OffsetFetchRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 6 + _ = isFlexible + s := v + if version >= 0 && version <= 7 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Group = v + } + if version >= 0 && version <= 7 { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if version < 2 || l == 0 { + a = []OffsetFetchRequestTopic{} + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetFetchRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if version >= 8 { + v := s.Groups + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetFetchRequestGroup, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Group = v + } + if version >= 9 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.MemberID = v + } + if version >= 9 { + v := b.Int32() + s.MemberEpoch = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if version < 0 || l == 0 { + a = []OffsetFetchRequestGroupTopic{} + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetFetchRequestGroupTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Groups = v + } + if version >= 7 { + v := b.Bool() + s.RequireStable = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrOffsetFetchRequest returns a pointer to a default OffsetFetchRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrOffsetFetchRequest() *OffsetFetchRequest { + var v OffsetFetchRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetFetchRequest. +func (v *OffsetFetchRequest) Default() { +} + +// NewOffsetFetchRequest returns a default OffsetFetchRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetFetchRequest() OffsetFetchRequest { + var v OffsetFetchRequest + v.Default() + return v +} + +type OffsetFetchResponseTopicPartition struct { + // Partition is the partition in a topic this array slot corresponds to. + Partition int32 + + // Offset is the most recently committed offset for this topic partition + // in a group. + Offset int64 + + // LeaderEpoch is the leader epoch of the last consumed record. + // + // This was proposed in KIP-320 and introduced in Kafka 2.1.0 and allows + // clients to detect log truncation. See the KIP for more details. + // + // This field has a default of -1. + LeaderEpoch int32 // v5+ + + // Metadata is client provided metadata corresponding to the offset commit. + // This can be useful for adding who made the commit, etc. + Metadata *string + + // ErrorCode is the error for this partition response. + // + // GROUP_AUTHORIZATION_FAILED is returned if the client is not authorized + // to the group. + // + // INVALID_GROUP_ID is returned in the requested group ID is invalid. + // + // COORDINATOR_NOT_AVAILABLE is returned if the coordinator is not available + // (due to the requested broker shutting down or it has not completed startup). + // + // COORDINATOR_LOAD_IN_PROGRESS is returned if the group is loading. + // + // NOT_COORDINATOR is returned if the requested broker is not the coordinator + // for the requested group. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if the requested topic or partition + // is unknown. + // + // UNSTABLE_OFFSET_COMMIT is returned for v7+ if the request set RequireStable. + // See KIP-447 for more details. + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetFetchResponseTopicPartition. +func (v *OffsetFetchResponseTopicPartition) Default() { + v.LeaderEpoch = -1 +} + +// NewOffsetFetchResponseTopicPartition returns a default OffsetFetchResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetFetchResponseTopicPartition() OffsetFetchResponseTopicPartition { + var v OffsetFetchResponseTopicPartition + v.Default() + return v +} + +type OffsetFetchResponseTopic struct { + // Topic is the topic this offset fetch response corresponds to. + Topic string + + // Partitions contains responses for each requested partition in + // a topic. + Partitions []OffsetFetchResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetFetchResponseTopic. +func (v *OffsetFetchResponseTopic) Default() { +} + +// NewOffsetFetchResponseTopic returns a default OffsetFetchResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetFetchResponseTopic() OffsetFetchResponseTopic { + var v OffsetFetchResponseTopic + v.Default() + return v +} + +type OffsetFetchResponseGroupTopicPartition struct { + Partition int32 + + Offset int64 + + // This field has a default of -1. + LeaderEpoch int32 + + Metadata *string + + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetFetchResponseGroupTopicPartition. +func (v *OffsetFetchResponseGroupTopicPartition) Default() { + v.LeaderEpoch = -1 +} + +// NewOffsetFetchResponseGroupTopicPartition returns a default OffsetFetchResponseGroupTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetFetchResponseGroupTopicPartition() OffsetFetchResponseGroupTopicPartition { + var v OffsetFetchResponseGroupTopicPartition + v.Default() + return v +} + +type OffsetFetchResponseGroupTopic struct { + Topic string + + Partitions []OffsetFetchResponseGroupTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetFetchResponseGroupTopic. +func (v *OffsetFetchResponseGroupTopic) Default() { +} + +// NewOffsetFetchResponseGroupTopic returns a default OffsetFetchResponseGroupTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetFetchResponseGroupTopic() OffsetFetchResponseGroupTopic { + var v OffsetFetchResponseGroupTopic + v.Default() + return v +} + +type OffsetFetchResponseGroup struct { + Group string + + Topics []OffsetFetchResponseGroupTopic + + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetFetchResponseGroup. +func (v *OffsetFetchResponseGroup) Default() { +} + +// NewOffsetFetchResponseGroup returns a default OffsetFetchResponseGroup +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetFetchResponseGroup() OffsetFetchResponseGroup { + var v OffsetFetchResponseGroup + v.Default() + return v +} + +// OffsetFetchResponse is returned from an OffsetFetchRequest. +type OffsetFetchResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 4. + ThrottleMillis int32 // v3+ + + // Topics contains responses for each requested topic/partition. + Topics []OffsetFetchResponseTopic // v0-v7 + + // ErrorCode is a top level error code that applies to all topic/partitions. + // This will be any group error. + ErrorCode int16 // v2-v7 + + // Groups is the response for all groups. Each field mirrors the fields in the + // top level request, thus they are left undocumented. Refer to the top level + // documentation if necessary. + Groups []OffsetFetchResponseGroup // v8+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +func (*OffsetFetchResponse) Key() int16 { return 9 } +func (*OffsetFetchResponse) MaxVersion() int16 { return 9 } +func (v *OffsetFetchResponse) SetVersion(version int16) { v.Version = version } +func (v *OffsetFetchResponse) GetVersion() int16 { return v.Version } +func (v *OffsetFetchResponse) IsFlexible() bool { return v.Version >= 6 } +func (v *OffsetFetchResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 4 } +func (v *OffsetFetchResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *OffsetFetchResponse) RequestKind() Request { return &OffsetFetchRequest{Version: v.Version} } + +func (v *OffsetFetchResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 6 + _ = isFlexible + if version >= 3 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + if version >= 0 && version <= 7 { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Offset + dst = kbin.AppendInt64(dst, v) + } + if version >= 5 { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Metadata + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 2 && version <= 7 { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if version >= 8 { + v := v.Groups + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Group + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Offset + dst = kbin.AppendInt64(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Metadata + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *OffsetFetchResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *OffsetFetchResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *OffsetFetchResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 6 + _ = isFlexible + s := v + if version >= 3 { + v := b.Int32() + s.ThrottleMillis = v + } + if version >= 0 && version <= 7 { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetFetchResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetFetchResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int64() + s.Offset = v + } + if version >= 5 { + v := b.Int32() + s.LeaderEpoch = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Metadata = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if version >= 2 && version <= 7 { + v := b.Int16() + s.ErrorCode = v + } + if version >= 8 { + v := s.Groups + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetFetchResponseGroup, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Group = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetFetchResponseGroupTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetFetchResponseGroupTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int64() + s.Offset = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Metadata = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Groups = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrOffsetFetchResponse returns a pointer to a default OffsetFetchResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrOffsetFetchResponse() *OffsetFetchResponse { + var v OffsetFetchResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetFetchResponse. +func (v *OffsetFetchResponse) Default() { +} + +// NewOffsetFetchResponse returns a default OffsetFetchResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetFetchResponse() OffsetFetchResponse { + var v OffsetFetchResponse + v.Default() + return v +} + +// FindCoordinatorRequest requests the coordinator for a group or transaction. +// +// This coordinator is different from the broker leader coordinator. This +// coordinator is the partition leader for the partition that is storing +// the group or transaction ID. +type FindCoordinatorRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // CoordinatorKey is the ID to use for finding the coordinator. For groups, + // this is the group name, for transactional producer, this is the + // transactional ID. + CoordinatorKey string // v0-v3 + + // CoordinatorType is the type that key is. Groups are type 0, + // transactional IDs are type 1. + CoordinatorType int8 // v1+ + + // CoordinatorKeys contains all keys to find the coordinator for. + CoordinatorKeys []string // v4+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +func (*FindCoordinatorRequest) Key() int16 { return 10 } +func (*FindCoordinatorRequest) MaxVersion() int16 { return 5 } +func (v *FindCoordinatorRequest) SetVersion(version int16) { v.Version = version } +func (v *FindCoordinatorRequest) GetVersion() int16 { return v.Version } +func (v *FindCoordinatorRequest) IsFlexible() bool { return v.Version >= 3 } +func (v *FindCoordinatorRequest) ResponseKind() Response { + r := &FindCoordinatorResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *FindCoordinatorRequest) RequestWith(ctx context.Context, r Requestor) (*FindCoordinatorResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*FindCoordinatorResponse) + return resp, err +} + +func (v *FindCoordinatorRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + if version >= 0 && version <= 3 { + v := v.CoordinatorKey + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 1 { + v := v.CoordinatorType + dst = kbin.AppendInt8(dst, v) + } + if version >= 4 { + v := v.CoordinatorKeys + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *FindCoordinatorRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *FindCoordinatorRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *FindCoordinatorRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + s := v + if version >= 0 && version <= 3 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.CoordinatorKey = v + } + if version >= 1 { + v := b.Int8() + s.CoordinatorType = v + } + if version >= 4 { + v := s.CoordinatorKeys + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]string, l)...) + } + for i := int32(0); i < l; i++ { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + a[i] = v + } + v = a + s.CoordinatorKeys = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrFindCoordinatorRequest returns a pointer to a default FindCoordinatorRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrFindCoordinatorRequest() *FindCoordinatorRequest { + var v FindCoordinatorRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FindCoordinatorRequest. +func (v *FindCoordinatorRequest) Default() { +} + +// NewFindCoordinatorRequest returns a default FindCoordinatorRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewFindCoordinatorRequest() FindCoordinatorRequest { + var v FindCoordinatorRequest + v.Default() + return v +} + +type FindCoordinatorResponseCoordinator struct { + Key string + + NodeID int32 + + Host string + + Port int32 + + ErrorCode int16 + + ErrorMessage *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FindCoordinatorResponseCoordinator. +func (v *FindCoordinatorResponseCoordinator) Default() { +} + +// NewFindCoordinatorResponseCoordinator returns a default FindCoordinatorResponseCoordinator +// This is a shortcut for creating a struct and calling Default yourself. +func NewFindCoordinatorResponseCoordinator() FindCoordinatorResponseCoordinator { + var v FindCoordinatorResponseCoordinator + v.Default() + return v +} + +// FindCoordinatorResponse is returned from a FindCoordinatorRequest. +type FindCoordinatorResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 2. + ThrottleMillis int32 // v1+ + + // ErrorCode is the error returned for the request. + // + // GROUP_AUTHORIZATION_FAILED is returned if for a group ID request and the + // client is not authorized to describe groups. + // + // TRANSACTIONAL_ID_AUTHORIZATION_FAILED is returned for a transactional ID + // request and the client is not authorized to describe transactional IDs. + // + // INVALID_REQUEST is returned if not asking for a known type (group, + // or transaction). + // + // COORDINATOR_NOT_AVAILABLE is returned if the coordinator is not available + // for the requested ID, which would be if the group or transactional topic + // does not exist or the partition the requested key maps to is not available. + ErrorCode int16 // v0-v3 + + // ErrorMessage is an informative message if the request errored. + ErrorMessage *string // v1-v3 + + // NodeID is the broker ID of the coordinator. + NodeID int32 // v0-v3 + + // Host is the host of the coordinator. + Host string // v0-v3 + + // Port is the port of the coordinator. + Port int32 // v0-v3 + + // Coordinators, introduced for KIP-699, is the bulk response for + // coordinators. The fields in the struct exactly match the original fields + // in the FindCoordinatorResponse, thus they are left undocumented. + Coordinators []FindCoordinatorResponseCoordinator // v4+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +func (*FindCoordinatorResponse) Key() int16 { return 10 } +func (*FindCoordinatorResponse) MaxVersion() int16 { return 5 } +func (v *FindCoordinatorResponse) SetVersion(version int16) { v.Version = version } +func (v *FindCoordinatorResponse) GetVersion() int16 { return v.Version } +func (v *FindCoordinatorResponse) IsFlexible() bool { return v.Version >= 3 } +func (v *FindCoordinatorResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 2 } +func (v *FindCoordinatorResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *FindCoordinatorResponse) RequestKind() Request { + return &FindCoordinatorRequest{Version: v.Version} +} + +func (v *FindCoordinatorResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + if version >= 1 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + if version >= 0 && version <= 3 { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if version >= 1 && version <= 3 { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if version >= 0 && version <= 3 { + v := v.NodeID + dst = kbin.AppendInt32(dst, v) + } + if version >= 0 && version <= 3 { + v := v.Host + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 0 && version <= 3 { + v := v.Port + dst = kbin.AppendInt32(dst, v) + } + if version >= 4 { + v := v.Coordinators + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Key + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.NodeID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Host + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Port + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *FindCoordinatorResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *FindCoordinatorResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *FindCoordinatorResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + s := v + if version >= 1 { + v := b.Int32() + s.ThrottleMillis = v + } + if version >= 0 && version <= 3 { + v := b.Int16() + s.ErrorCode = v + } + if version >= 1 && version <= 3 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + if version >= 0 && version <= 3 { + v := b.Int32() + s.NodeID = v + } + if version >= 0 && version <= 3 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Host = v + } + if version >= 0 && version <= 3 { + v := b.Int32() + s.Port = v + } + if version >= 4 { + v := s.Coordinators + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]FindCoordinatorResponseCoordinator, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Key = v + } + { + v := b.Int32() + s.NodeID = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Host = v + } + { + v := b.Int32() + s.Port = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Coordinators = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrFindCoordinatorResponse returns a pointer to a default FindCoordinatorResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrFindCoordinatorResponse() *FindCoordinatorResponse { + var v FindCoordinatorResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FindCoordinatorResponse. +func (v *FindCoordinatorResponse) Default() { +} + +// NewFindCoordinatorResponse returns a default FindCoordinatorResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewFindCoordinatorResponse() FindCoordinatorResponse { + var v FindCoordinatorResponse + v.Default() + return v +} + +type JoinGroupRequestProtocol struct { + // Name is a name of a protocol. This is arbitrary, but is used + // in the official client to agree on a partition balancing strategy. + // + // The official client uses range, roundrobin, or sticky (which was + // introduced in KIP-54). + Name string + + // Metadata is arbitrary information to pass along with this + // protocol name for this member. + // + // Note that while this is not documented in any protocol page, + // this is usually a serialized GroupMemberMetadata as described in + // https://cwiki.apache.org/confluence/display/KAFKA/Kafka+Client-side+Assignment+Proposal. + // + // The protocol metadata is where group members will communicate which + // topics they collectively as a group want to consume. + Metadata []byte + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to JoinGroupRequestProtocol. +func (v *JoinGroupRequestProtocol) Default() { +} + +// NewJoinGroupRequestProtocol returns a default JoinGroupRequestProtocol +// This is a shortcut for creating a struct and calling Default yourself. +func NewJoinGroupRequestProtocol() JoinGroupRequestProtocol { + var v JoinGroupRequestProtocol + v.Default() + return v +} + +// JoinGroupRequest issues a request to join a Kafka group. This will create a +// group if one does not exist. If joining an existing group, this may trigger +// a group rebalance. +// +// This will trigger a group rebalance if the request is from the group leader, +// or if the request is from a group member with different metadata, or if the +// request is with a new group member. +// +// Version 4 introduced replying to joins of existing groups with +// MEMBER_ID_REQUIRED, which requires re-issuing the join group with the +// returned member ID. See KIP-394 for more details. +// +// Version 5 introduced InstanceID, allowing for more "static" membership. +// See KIP-345 for more details. +type JoinGroupRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Group is the group to join. + Group string + + // SessionTimeoutMillis is how long a member in the group can go between + // heartbeats. If a member does not send a heartbeat within this timeout, + // the broker will remove the member from the group and initiate a rebalance. + SessionTimeoutMillis int32 + + // RebalanceTimeoutMillis is how long the broker waits for members to join a group + // once a rebalance begins. Kafka waits for the longest rebalance of all + // members in the group. Member sessions are still alive; heartbeats will be + // replied to with REBALANCE_IN_PROGRESS. Those members must transition to + // joining within this rebalance timeout. Members that do not rejoin within + // this timeout will be removed from the group. Members must commit offsets + // within this timeout. + // + // The first join for a new group has a 3 second grace period for other + // members to join; this grace period is extended until the RebalanceTimeoutMillis + // is up or until 3 seconds lapse with no new members. + // + // This field has a default of -1. + RebalanceTimeoutMillis int32 // v1+ + + // MemberID is the member ID to join the group with. When joining a group for + // the first time, use the empty string. The response will contain the member + // ID that should be used going forward. + MemberID string + + // InstanceID is a user configured ID that is used for making a group + // member "static", allowing many rebalances to be avoided. + InstanceID *string // v5+ + + // ProtocolType is the "type" of protocol being used for the join group. + // The initial group creation sets the type; all additional members must + // have the same type or they will be rejected. + // + // This is completely arbitrary, but the Java client and everything else + // uses "consumer" as the protocol type. + ProtocolType string + + // Protocols contains arbitrary information that group members use + // for rebalancing. All group members must agree on at least one protocol + // name. + Protocols []JoinGroupRequestProtocol + + // Reason is an optional reason the member is joining (or rejoining) the + // group (KIP-800, Kafka 3.2+). + Reason *string // v8+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +func (*JoinGroupRequest) Key() int16 { return 11 } +func (*JoinGroupRequest) MaxVersion() int16 { return 9 } +func (v *JoinGroupRequest) SetVersion(version int16) { v.Version = version } +func (v *JoinGroupRequest) GetVersion() int16 { return v.Version } +func (v *JoinGroupRequest) IsFlexible() bool { return v.Version >= 6 } +func (v *JoinGroupRequest) IsGroupCoordinatorRequest() {} +func (v *JoinGroupRequest) ResponseKind() Response { + r := &JoinGroupResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *JoinGroupRequest) RequestWith(ctx context.Context, r Requestor) (*JoinGroupResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*JoinGroupResponse) + return resp, err +} + +func (v *JoinGroupRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 6 + _ = isFlexible + { + v := v.Group + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.SessionTimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + if version >= 1 { + v := v.RebalanceTimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.MemberID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 5 { + v := v.InstanceID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.ProtocolType + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Protocols + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Metadata + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 8 { + v := v.Reason + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *JoinGroupRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *JoinGroupRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *JoinGroupRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 6 + _ = isFlexible + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Group = v + } + { + v := b.Int32() + s.SessionTimeoutMillis = v + } + if version >= 1 { + v := b.Int32() + s.RebalanceTimeoutMillis = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.MemberID = v + } + if version >= 5 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.InstanceID = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ProtocolType = v + } + { + v := s.Protocols + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]JoinGroupRequestProtocol, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Name = v + } + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.Metadata = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Protocols = v + } + if version >= 8 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Reason = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrJoinGroupRequest returns a pointer to a default JoinGroupRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrJoinGroupRequest() *JoinGroupRequest { + var v JoinGroupRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to JoinGroupRequest. +func (v *JoinGroupRequest) Default() { + v.RebalanceTimeoutMillis = -1 +} + +// NewJoinGroupRequest returns a default JoinGroupRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewJoinGroupRequest() JoinGroupRequest { + var v JoinGroupRequest + v.Default() + return v +} + +type JoinGroupResponseMember struct { + // MemberID is a member in this group. + MemberID string + + // InstanceID is an instance ID of a member in this group (KIP-345). + InstanceID *string // v5+ + + // ProtocolMetadata is the metadata for this member for this protocol. + // This is usually of type GroupMemberMetadata. + ProtocolMetadata []byte + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to JoinGroupResponseMember. +func (v *JoinGroupResponseMember) Default() { +} + +// NewJoinGroupResponseMember returns a default JoinGroupResponseMember +// This is a shortcut for creating a struct and calling Default yourself. +func NewJoinGroupResponseMember() JoinGroupResponseMember { + var v JoinGroupResponseMember + v.Default() + return v +} + +// JoinGroupResponse is returned from a JoinGroupRequest. +type JoinGroupResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 3. + ThrottleMillis int32 // v2+ + + // ErrorCode is the error for the join group request. + // + // GROUP_AUTHORIZATION_FAILED is returned if the client is not authorized + // to the group (no read perms). + // + // INVALID_GROUP_ID is returned in the requested group ID is invalid. + // + // COORDINATOR_NOT_AVAILABLE is returned if the coordinator is not available + // (due to the requested broker shutting down or it has not completed startup). + // + // COORDINATOR_LOAD_IN_PROGRESS is returned if the group is loading. + // + // NOT_COORDINATOR is returned if the requested broker is not the coordinator + // for the requested group. + // + // INVALID_SESSION_TIMEOUT is returned if the requested SessionTimeout is + // not within the broker's group.{min,max}.session.timeout.ms. + // + // INCONSISTENT_GROUP_PROTOCOL is returned if the requested protocols are + // incompatible with the existing group member's protocols, or if the join + // was for a new group but contained no protocols. + // + // UNKNOWN_MEMBER_ID is returned is the requested group is dead (likely + // just migrated to another coordinator or the group is temporarily unstable), + // or if the request was for a new group but contained a non-empty member ID, + // or if the group does not have the requested member ID (and the client must + // do the new-join-group dance). + // + // MEMBER_ID_REQUIRED is returned on the initial join of an existing group. + // This error was proposed in KIP-394 and introduced in Kafka 2.2.0 to + // prevent flaky clients from continually triggering rebalances and prevent + // these clients from consuming RAM with metadata. If a client sees + // this error, it should re-issue the join with the MemberID in the response. + // Non-flaky clients will join with this new member ID, but flaky clients + // will not join quickly enough before the pending member ID is rotated out + // due to hitting the session.timeout.ms. + // + // GROUP_MAX_SIZE_REACHED is returned as of Kafka 2.2.0 if the group has + // reached a broker's group.max.size. + ErrorCode int16 + + // Generation is the current "generation" of this group. + // + // This field has a default of -1. + Generation int32 + + // ProtocolType is the "type" of protocol being used for this group. + ProtocolType *string // v7+ + + // Protocol is the agreed upon protocol name (i.e. "sticky", "range"). + // + // v7 of this response changed this field to be nullable. + Protocol *string + + // LeaderID is the leader member. + LeaderID string + + // True if the leader must skip running the assignment (KIP-814, Kafka 3.2+). + SkipAssignment bool // v9+ + + // MemberID is the member of the receiving client. + MemberID string + + // Members contains all other members of this group. Only the group leader + // receives the members. The leader is responsible for balancing subscribed + // topic partitions and replying appropriately in a SyncGroup request. + Members []JoinGroupResponseMember + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v6+ +} + +func (*JoinGroupResponse) Key() int16 { return 11 } +func (*JoinGroupResponse) MaxVersion() int16 { return 9 } +func (v *JoinGroupResponse) SetVersion(version int16) { v.Version = version } +func (v *JoinGroupResponse) GetVersion() int16 { return v.Version } +func (v *JoinGroupResponse) IsFlexible() bool { return v.Version >= 6 } +func (v *JoinGroupResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 3 } +func (v *JoinGroupResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *JoinGroupResponse) RequestKind() Request { return &JoinGroupRequest{Version: v.Version} } + +func (v *JoinGroupResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 6 + _ = isFlexible + if version >= 2 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Generation + dst = kbin.AppendInt32(dst, v) + } + if version >= 7 { + v := v.ProtocolType + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Protocol + if version < 7 { + var vv string + if v != nil { + vv = *v + } + { + v := vv + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + } else { + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + } + { + v := v.LeaderID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 9 { + v := v.SkipAssignment + dst = kbin.AppendBool(dst, v) + } + { + v := v.MemberID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Members + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.MemberID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 5 { + v := v.InstanceID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.ProtocolMetadata + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *JoinGroupResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *JoinGroupResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *JoinGroupResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 6 + _ = isFlexible + s := v + if version >= 2 { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Int32() + s.Generation = v + } + if version >= 7 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ProtocolType = v + } + { + var v *string + if version < 7 { + var vv string + if isFlexible { + if unsafe { + vv = b.UnsafeCompactString() + } else { + vv = b.CompactString() + } + } else { + if unsafe { + vv = b.UnsafeString() + } else { + vv = b.String() + } + } + v = &vv + } else { + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + } + s.Protocol = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.LeaderID = v + } + if version >= 9 { + v := b.Bool() + s.SkipAssignment = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.MemberID = v + } + { + v := s.Members + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]JoinGroupResponseMember, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.MemberID = v + } + if version >= 5 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.InstanceID = v + } + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.ProtocolMetadata = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Members = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrJoinGroupResponse returns a pointer to a default JoinGroupResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrJoinGroupResponse() *JoinGroupResponse { + var v JoinGroupResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to JoinGroupResponse. +func (v *JoinGroupResponse) Default() { + v.Generation = -1 +} + +// NewJoinGroupResponse returns a default JoinGroupResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewJoinGroupResponse() JoinGroupResponse { + var v JoinGroupResponse + v.Default() + return v +} + +// HeartbeatRequest issues a heartbeat for a member in a group, ensuring that +// Kafka does not expire the member from the group. +type HeartbeatRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Group is the group ID this heartbeat is for. + Group string + + // Generation is the group generation this heartbeat is for. + Generation int32 + + // MemberID is the member ID this member is for. + MemberID string + + // InstanceID is the instance ID of this member in the group (KIP-345). + InstanceID *string // v3+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +func (*HeartbeatRequest) Key() int16 { return 12 } +func (*HeartbeatRequest) MaxVersion() int16 { return 4 } +func (v *HeartbeatRequest) SetVersion(version int16) { v.Version = version } +func (v *HeartbeatRequest) GetVersion() int16 { return v.Version } +func (v *HeartbeatRequest) IsFlexible() bool { return v.Version >= 4 } +func (v *HeartbeatRequest) IsGroupCoordinatorRequest() {} +func (v *HeartbeatRequest) ResponseKind() Response { + r := &HeartbeatResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *HeartbeatRequest) RequestWith(ctx context.Context, r Requestor) (*HeartbeatResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*HeartbeatResponse) + return resp, err +} + +func (v *HeartbeatRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + { + v := v.Group + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Generation + dst = kbin.AppendInt32(dst, v) + } + { + v := v.MemberID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 3 { + v := v.InstanceID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *HeartbeatRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *HeartbeatRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *HeartbeatRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Group = v + } + { + v := b.Int32() + s.Generation = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.MemberID = v + } + if version >= 3 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.InstanceID = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrHeartbeatRequest returns a pointer to a default HeartbeatRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrHeartbeatRequest() *HeartbeatRequest { + var v HeartbeatRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to HeartbeatRequest. +func (v *HeartbeatRequest) Default() { +} + +// NewHeartbeatRequest returns a default HeartbeatRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewHeartbeatRequest() HeartbeatRequest { + var v HeartbeatRequest + v.Default() + return v +} + +// HeartbeatResponse is returned from a HeartbeatRequest. +type HeartbeatResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 2. + ThrottleMillis int32 // v1+ + + // ErrorCode is the error for the heartbeat request. + // + // GROUP_AUTHORIZATION_FAILED is returned if the client is not authorized + // to the group (no read perms). + // + // INVALID_GROUP_ID is returned in the requested group ID is invalid. + // + // COORDINATOR_NOT_AVAILABLE is returned if the coordinator is not available + // (due to the requested broker shutting down or it has not completed startup). + // + // NOT_COORDINATOR is returned if the requested broker is not the coordinator + // for the requested group. + // + // UNKNOWN_MEMBER_ID is returned if the member ID is not a part of the group, + // or if the group is empty or dead. + // + // ILLEGAL_GENERATION is returned if the request's generation ID is invalid. + // + // REBALANCE_IN_PROGRESS is returned if the group is currently rebalancing. + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +func (*HeartbeatResponse) Key() int16 { return 12 } +func (*HeartbeatResponse) MaxVersion() int16 { return 4 } +func (v *HeartbeatResponse) SetVersion(version int16) { v.Version = version } +func (v *HeartbeatResponse) GetVersion() int16 { return v.Version } +func (v *HeartbeatResponse) IsFlexible() bool { return v.Version >= 4 } +func (v *HeartbeatResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 2 } +func (v *HeartbeatResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *HeartbeatResponse) RequestKind() Request { return &HeartbeatRequest{Version: v.Version} } + +func (v *HeartbeatResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + if version >= 1 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *HeartbeatResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *HeartbeatResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *HeartbeatResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + s := v + if version >= 1 { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrHeartbeatResponse returns a pointer to a default HeartbeatResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrHeartbeatResponse() *HeartbeatResponse { + var v HeartbeatResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to HeartbeatResponse. +func (v *HeartbeatResponse) Default() { +} + +// NewHeartbeatResponse returns a default HeartbeatResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewHeartbeatResponse() HeartbeatResponse { + var v HeartbeatResponse + v.Default() + return v +} + +type LeaveGroupRequestMember struct { + MemberID string + + InstanceID *string + + // Reason is an optional reason why this member is leaving the group + // (KIP-800, Kafka 3.2+). + Reason *string // v5+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to LeaveGroupRequestMember. +func (v *LeaveGroupRequestMember) Default() { +} + +// NewLeaveGroupRequestMember returns a default LeaveGroupRequestMember +// This is a shortcut for creating a struct and calling Default yourself. +func NewLeaveGroupRequestMember() LeaveGroupRequestMember { + var v LeaveGroupRequestMember + v.Default() + return v +} + +// LeaveGroupRequest issues a request for a group member to leave the group, +// triggering a group rebalance. +// +// Version 3 changed removed MemberID and added a batch instance+member ID +// way of leaving a group. +type LeaveGroupRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Group is the group to leave. + Group string + + // MemberID is the member that is leaving. + MemberID string // v0-v2 + + // Members are member and group instance IDs to cause to leave a group. + Members []LeaveGroupRequestMember // v3+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +func (*LeaveGroupRequest) Key() int16 { return 13 } +func (*LeaveGroupRequest) MaxVersion() int16 { return 5 } +func (v *LeaveGroupRequest) SetVersion(version int16) { v.Version = version } +func (v *LeaveGroupRequest) GetVersion() int16 { return v.Version } +func (v *LeaveGroupRequest) IsFlexible() bool { return v.Version >= 4 } +func (v *LeaveGroupRequest) IsGroupCoordinatorRequest() {} +func (v *LeaveGroupRequest) ResponseKind() Response { + r := &LeaveGroupResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *LeaveGroupRequest) RequestWith(ctx context.Context, r Requestor) (*LeaveGroupResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*LeaveGroupResponse) + return resp, err +} + +func (v *LeaveGroupRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + { + v := v.Group + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 0 && version <= 2 { + v := v.MemberID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 3 { + v := v.Members + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.MemberID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.InstanceID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if version >= 5 { + v := v.Reason + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *LeaveGroupRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *LeaveGroupRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *LeaveGroupRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Group = v + } + if version >= 0 && version <= 2 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.MemberID = v + } + if version >= 3 { + v := s.Members + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]LeaveGroupRequestMember, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.MemberID = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.InstanceID = v + } + if version >= 5 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Reason = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Members = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrLeaveGroupRequest returns a pointer to a default LeaveGroupRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrLeaveGroupRequest() *LeaveGroupRequest { + var v LeaveGroupRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to LeaveGroupRequest. +func (v *LeaveGroupRequest) Default() { +} + +// NewLeaveGroupRequest returns a default LeaveGroupRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewLeaveGroupRequest() LeaveGroupRequest { + var v LeaveGroupRequest + v.Default() + return v +} + +type LeaveGroupResponseMember struct { + MemberID string + + InstanceID *string + + // An individual member's leave error code. + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to LeaveGroupResponseMember. +func (v *LeaveGroupResponseMember) Default() { +} + +// NewLeaveGroupResponseMember returns a default LeaveGroupResponseMember +// This is a shortcut for creating a struct and calling Default yourself. +func NewLeaveGroupResponseMember() LeaveGroupResponseMember { + var v LeaveGroupResponseMember + v.Default() + return v +} + +// LeaveGroupResponse is returned from a LeaveGroupRequest. +type LeaveGroupResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 2. + ThrottleMillis int32 // v1+ + + // ErrorCode is the error for the leave group request. + // + // GROUP_AUTHORIZATION_FAILED is returned if the client is not authorized + // to the group (no read perms). + // + // INVALID_GROUP_ID is returned in the requested group ID is invalid. + // + // COORDINATOR_NOT_AVAILABLE is returned if the coordinator is not available + // (due to the requested broker shutting down or it has not completed startup). + // + // COORDINATOR_LOAD_IN_PROGRESS is returned if the group is loading. + // + // NOT_COORDINATOR is returned if the requested broker is not the coordinator + // for the requested group. + // + // UNKNOWN_MEMBER_ID is returned if the member ID is not a part of the group, + // or if the group is empty or dead. + ErrorCode int16 + + // Members are the list of members and group instance IDs that left the group. + Members []LeaveGroupResponseMember // v3+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +func (*LeaveGroupResponse) Key() int16 { return 13 } +func (*LeaveGroupResponse) MaxVersion() int16 { return 5 } +func (v *LeaveGroupResponse) SetVersion(version int16) { v.Version = version } +func (v *LeaveGroupResponse) GetVersion() int16 { return v.Version } +func (v *LeaveGroupResponse) IsFlexible() bool { return v.Version >= 4 } +func (v *LeaveGroupResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 2 } +func (v *LeaveGroupResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *LeaveGroupResponse) RequestKind() Request { return &LeaveGroupRequest{Version: v.Version} } + +func (v *LeaveGroupResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + if version >= 1 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if version >= 3 { + v := v.Members + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.MemberID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.InstanceID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *LeaveGroupResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *LeaveGroupResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *LeaveGroupResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + s := v + if version >= 1 { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if version >= 3 { + v := s.Members + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]LeaveGroupResponseMember, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.MemberID = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.InstanceID = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Members = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrLeaveGroupResponse returns a pointer to a default LeaveGroupResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrLeaveGroupResponse() *LeaveGroupResponse { + var v LeaveGroupResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to LeaveGroupResponse. +func (v *LeaveGroupResponse) Default() { +} + +// NewLeaveGroupResponse returns a default LeaveGroupResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewLeaveGroupResponse() LeaveGroupResponse { + var v LeaveGroupResponse + v.Default() + return v +} + +type SyncGroupRequestGroupAssignment struct { + // MemberID is the member this assignment is for. + MemberID string + + // MemberAssignment is the assignment for this member. This is typically + // of type GroupMemberAssignment. + MemberAssignment []byte + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to SyncGroupRequestGroupAssignment. +func (v *SyncGroupRequestGroupAssignment) Default() { +} + +// NewSyncGroupRequestGroupAssignment returns a default SyncGroupRequestGroupAssignment +// This is a shortcut for creating a struct and calling Default yourself. +func NewSyncGroupRequestGroupAssignment() SyncGroupRequestGroupAssignment { + var v SyncGroupRequestGroupAssignment + v.Default() + return v +} + +// SyncGroupRequest is issued by all group members after they receive a a +// response for JoinGroup. The group leader is responsible for sending member +// assignments with the request; all other members do not. +// +// Once the leader sends the group assignment, all members will be replied to. +type SyncGroupRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Group is the group ID this sync group is for. + Group string + + // Generation is the group generation this sync is for. + Generation int32 + + // MemberID is the member ID this member is. + MemberID string + + // InstanceID is the instance ID of this member in the group (KIP-345). + InstanceID *string // v3+ + + // ProtocolType is the "type" of protocol being used for this group. + ProtocolType *string // v5+ + + // Protocol is the agreed upon protocol name (i.e. "sticky", "range"). + Protocol *string // v5+ + + // GroupAssignment, sent only from the group leader, is the topic partition + // assignment it has decided on for all members. + GroupAssignment []SyncGroupRequestGroupAssignment + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +func (*SyncGroupRequest) Key() int16 { return 14 } +func (*SyncGroupRequest) MaxVersion() int16 { return 5 } +func (v *SyncGroupRequest) SetVersion(version int16) { v.Version = version } +func (v *SyncGroupRequest) GetVersion() int16 { return v.Version } +func (v *SyncGroupRequest) IsFlexible() bool { return v.Version >= 4 } +func (v *SyncGroupRequest) IsGroupCoordinatorRequest() {} +func (v *SyncGroupRequest) ResponseKind() Response { + r := &SyncGroupResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *SyncGroupRequest) RequestWith(ctx context.Context, r Requestor) (*SyncGroupResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*SyncGroupResponse) + return resp, err +} + +func (v *SyncGroupRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + { + v := v.Group + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Generation + dst = kbin.AppendInt32(dst, v) + } + { + v := v.MemberID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 3 { + v := v.InstanceID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if version >= 5 { + v := v.ProtocolType + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if version >= 5 { + v := v.Protocol + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.GroupAssignment + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.MemberID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.MemberAssignment + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *SyncGroupRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *SyncGroupRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *SyncGroupRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Group = v + } + { + v := b.Int32() + s.Generation = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.MemberID = v + } + if version >= 3 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.InstanceID = v + } + if version >= 5 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ProtocolType = v + } + if version >= 5 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Protocol = v + } + { + v := s.GroupAssignment + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]SyncGroupRequestGroupAssignment, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.MemberID = v + } + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.MemberAssignment = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.GroupAssignment = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrSyncGroupRequest returns a pointer to a default SyncGroupRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrSyncGroupRequest() *SyncGroupRequest { + var v SyncGroupRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to SyncGroupRequest. +func (v *SyncGroupRequest) Default() { +} + +// NewSyncGroupRequest returns a default SyncGroupRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewSyncGroupRequest() SyncGroupRequest { + var v SyncGroupRequest + v.Default() + return v +} + +// SyncGroupResponse is returned from a SyncGroupRequest. +type SyncGroupResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 2. + ThrottleMillis int32 // v1+ + + // ErrorCode is the error for the sync group request. + // + // GROUP_AUTHORIZATION_FAILED is returned if the client is not authorized + // to the group (no read perms). + // + // INVALID_GROUP_ID is returned in the requested group ID is invalid. + // + // COORDINATOR_NOT_AVAILABLE is returned if the coordinator is not available. + // + // NOT_COORDINATOR is returned if the requested broker is not the coordinator + // for the requested group. + // + // UNKNOWN_MEMBER_ID is returned if the member ID is not a part of the group, + // or if the group is empty or dead. + // + // ILLEGAL_GENERATION is returned if the request's generation ID is invalid. + // + // REBALANCE_IN_PROGRESS is returned if the group switched back to rebalancing. + // + // UNKNOWN_SERVER_ERROR is returned if the store of the group assignment + // resulted in a too large message. + ErrorCode int16 + + // ProtocolType is the "type" of protocol being used for this group. + ProtocolType *string // v5+ + + // Protocol is the agreed upon protocol name (i.e. "sticky", "range"). + Protocol *string // v5+ + + // MemberAssignment is the assignment for this member that the leader + // determined. + MemberAssignment []byte + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +func (*SyncGroupResponse) Key() int16 { return 14 } +func (*SyncGroupResponse) MaxVersion() int16 { return 5 } +func (v *SyncGroupResponse) SetVersion(version int16) { v.Version = version } +func (v *SyncGroupResponse) GetVersion() int16 { return v.Version } +func (v *SyncGroupResponse) IsFlexible() bool { return v.Version >= 4 } +func (v *SyncGroupResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 2 } +func (v *SyncGroupResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *SyncGroupResponse) RequestKind() Request { return &SyncGroupRequest{Version: v.Version} } + +func (v *SyncGroupResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + if version >= 1 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if version >= 5 { + v := v.ProtocolType + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if version >= 5 { + v := v.Protocol + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.MemberAssignment + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *SyncGroupResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *SyncGroupResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *SyncGroupResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + s := v + if version >= 1 { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if version >= 5 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ProtocolType = v + } + if version >= 5 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Protocol = v + } + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.MemberAssignment = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrSyncGroupResponse returns a pointer to a default SyncGroupResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrSyncGroupResponse() *SyncGroupResponse { + var v SyncGroupResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to SyncGroupResponse. +func (v *SyncGroupResponse) Default() { +} + +// NewSyncGroupResponse returns a default SyncGroupResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewSyncGroupResponse() SyncGroupResponse { + var v SyncGroupResponse + v.Default() + return v +} + +// DescribeGroupsRequest requests metadata for group IDs. +type DescribeGroupsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Groups is an array of group IDs to request metadata for. + Groups []string + + // IncludeAuthorizedOperations, introduced in Kafka 2.3.0, specifies + // whether to include a bitfield of AclOperations this client can perform + // on the groups. See KIP-430 for more details. + IncludeAuthorizedOperations bool // v3+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v5+ +} + +func (*DescribeGroupsRequest) Key() int16 { return 15 } +func (*DescribeGroupsRequest) MaxVersion() int16 { return 5 } +func (v *DescribeGroupsRequest) SetVersion(version int16) { v.Version = version } +func (v *DescribeGroupsRequest) GetVersion() int16 { return v.Version } +func (v *DescribeGroupsRequest) IsFlexible() bool { return v.Version >= 5 } +func (v *DescribeGroupsRequest) IsGroupCoordinatorRequest() {} +func (v *DescribeGroupsRequest) ResponseKind() Response { + r := &DescribeGroupsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *DescribeGroupsRequest) RequestWith(ctx context.Context, r Requestor) (*DescribeGroupsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*DescribeGroupsResponse) + return resp, err +} + +func (v *DescribeGroupsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 5 + _ = isFlexible + { + v := v.Groups + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + } + if version >= 3 { + v := v.IncludeAuthorizedOperations + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeGroupsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeGroupsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeGroupsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 5 + _ = isFlexible + s := v + { + v := s.Groups + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]string, l)...) + } + for i := int32(0); i < l; i++ { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + a[i] = v + } + v = a + s.Groups = v + } + if version >= 3 { + v := b.Bool() + s.IncludeAuthorizedOperations = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeGroupsRequest returns a pointer to a default DescribeGroupsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeGroupsRequest() *DescribeGroupsRequest { + var v DescribeGroupsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeGroupsRequest. +func (v *DescribeGroupsRequest) Default() { +} + +// NewDescribeGroupsRequest returns a default DescribeGroupsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeGroupsRequest() DescribeGroupsRequest { + var v DescribeGroupsRequest + v.Default() + return v +} + +type DescribeGroupsResponseGroupMember struct { + // MemberID is the member ID of a member in this group. + MemberID string + + // InstanceID is the instance ID of this member in the group (KIP-345). + InstanceID *string // v4+ + + // ClientID is the client ID used by this member. + ClientID string + + // ClientHost is the host this client is running on. + ClientHost string + + // ProtocolMetadata is the metadata this member included when joining + // the group. If using normal (Java-like) consumers, this will be of + // type GroupMemberMetadata. + ProtocolMetadata []byte + + // MemberAssignment is the assignment for this member in the group. + // If using normal (Java-like) consumers, this will be of type + // GroupMemberAssignment. + MemberAssignment []byte + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v5+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeGroupsResponseGroupMember. +func (v *DescribeGroupsResponseGroupMember) Default() { +} + +// NewDescribeGroupsResponseGroupMember returns a default DescribeGroupsResponseGroupMember +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeGroupsResponseGroupMember() DescribeGroupsResponseGroupMember { + var v DescribeGroupsResponseGroupMember + v.Default() + return v +} + +type DescribeGroupsResponseGroup struct { + // ErrorCode is the error code for an individual group in a request. + // + // GROUP_AUTHORIZATION_FAILED is returned if the client is not authorized + // to describe a group. + // + // INVALID_GROUP_ID is returned if the requested group ID is invalid. + // + // COORDINATOR_NOT_AVAILABLE is returned if the coordinator for this + // group is not yet active. + // + // COORDINATOR_LOAD_IN_PROGRESS is returned if the group is loading. + // + // NOT_COORDINATOR is returned if the requested broker is not the + // coordinator for this group. + ErrorCode int16 + + // Group is the id of this group. + Group string + + // State is the state this group is in. + State string + + // ProtocolType is the "type" of protocol being used for this group. + ProtocolType string + + // Protocol is the agreed upon protocol for all members in this group. + Protocol string + + // Members contains members in this group. + Members []DescribeGroupsResponseGroupMember + + // AuthorizedOperations is a bitfield containing which operations the + // the client is allowed to perform on this group. + // This is only returned if requested. + // + // This field has a default of -2147483648. + AuthorizedOperations int32 // v3+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v5+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeGroupsResponseGroup. +func (v *DescribeGroupsResponseGroup) Default() { + v.AuthorizedOperations = -2147483648 +} + +// NewDescribeGroupsResponseGroup returns a default DescribeGroupsResponseGroup +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeGroupsResponseGroup() DescribeGroupsResponseGroup { + var v DescribeGroupsResponseGroup + v.Default() + return v +} + +// DescribeGroupsResponse is returned from a DescribeGroupsRequest. +type DescribeGroupsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 2. + ThrottleMillis int32 // v1+ + + // Groups is an array of group metadata. + Groups []DescribeGroupsResponseGroup + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v5+ +} + +func (*DescribeGroupsResponse) Key() int16 { return 15 } +func (*DescribeGroupsResponse) MaxVersion() int16 { return 5 } +func (v *DescribeGroupsResponse) SetVersion(version int16) { v.Version = version } +func (v *DescribeGroupsResponse) GetVersion() int16 { return v.Version } +func (v *DescribeGroupsResponse) IsFlexible() bool { return v.Version >= 5 } +func (v *DescribeGroupsResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 2 } +func (v *DescribeGroupsResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *DescribeGroupsResponse) RequestKind() Request { + return &DescribeGroupsRequest{Version: v.Version} +} + +func (v *DescribeGroupsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 5 + _ = isFlexible + if version >= 1 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Groups + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Group + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.State + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ProtocolType + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Protocol + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Members + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.MemberID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 4 { + v := v.InstanceID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.ClientID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ClientHost + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ProtocolMetadata + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + { + v := v.MemberAssignment + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 3 { + v := v.AuthorizedOperations + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeGroupsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeGroupsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeGroupsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 5 + _ = isFlexible + s := v + if version >= 1 { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Groups + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeGroupsResponseGroup, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Group = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.State = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ProtocolType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Protocol = v + } + { + v := s.Members + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeGroupsResponseGroupMember, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.MemberID = v + } + if version >= 4 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.InstanceID = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ClientID = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ClientHost = v + } + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.ProtocolMetadata = v + } + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.MemberAssignment = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Members = v + } + if version >= 3 { + v := b.Int32() + s.AuthorizedOperations = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Groups = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeGroupsResponse returns a pointer to a default DescribeGroupsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeGroupsResponse() *DescribeGroupsResponse { + var v DescribeGroupsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeGroupsResponse. +func (v *DescribeGroupsResponse) Default() { +} + +// NewDescribeGroupsResponse returns a default DescribeGroupsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeGroupsResponse() DescribeGroupsResponse { + var v DescribeGroupsResponse + v.Default() + return v +} + +// ListGroupsRequest issues a request to list all groups. +// +// To list all groups in a cluster, this must be issued to every broker. +type ListGroupsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // StatesFilter, proposed in KIP-518 and introduced in Kafka 2.6.0, + // allows filtering groups by state, where a state is any of + // "Preparing", "PreparingRebalance", "CompletingRebalance", "Stable", + // "Dead", or "Empty". If empty, all groups are returned. + StatesFilter []string // v4+ + + // TypesFilter, part of KIP-848, filters the types of groups we want + // to list. If empty, all groups are returned. + TypesFilter []string // v5+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +func (*ListGroupsRequest) Key() int16 { return 16 } +func (*ListGroupsRequest) MaxVersion() int16 { return 5 } +func (v *ListGroupsRequest) SetVersion(version int16) { v.Version = version } +func (v *ListGroupsRequest) GetVersion() int16 { return v.Version } +func (v *ListGroupsRequest) IsFlexible() bool { return v.Version >= 3 } +func (v *ListGroupsRequest) ResponseKind() Response { + r := &ListGroupsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *ListGroupsRequest) RequestWith(ctx context.Context, r Requestor) (*ListGroupsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*ListGroupsResponse) + return resp, err +} + +func (v *ListGroupsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + if version >= 4 { + v := v.StatesFilter + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + } + if version >= 5 { + v := v.TypesFilter + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ListGroupsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ListGroupsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ListGroupsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + s := v + if version >= 4 { + v := s.StatesFilter + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]string, l)...) + } + for i := int32(0); i < l; i++ { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + a[i] = v + } + v = a + s.StatesFilter = v + } + if version >= 5 { + v := s.TypesFilter + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]string, l)...) + } + for i := int32(0); i < l; i++ { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + a[i] = v + } + v = a + s.TypesFilter = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrListGroupsRequest returns a pointer to a default ListGroupsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrListGroupsRequest() *ListGroupsRequest { + var v ListGroupsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListGroupsRequest. +func (v *ListGroupsRequest) Default() { +} + +// NewListGroupsRequest returns a default ListGroupsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewListGroupsRequest() ListGroupsRequest { + var v ListGroupsRequest + v.Default() + return v +} + +type ListGroupsResponseGroup struct { + // Group is a Kafka group. + Group string + + // ProtocolType is the protocol type in use by the group. + ProtocolType string + + // The group state. + GroupState string // v4+ + + // The group type. + GroupType string // v5+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListGroupsResponseGroup. +func (v *ListGroupsResponseGroup) Default() { +} + +// NewListGroupsResponseGroup returns a default ListGroupsResponseGroup +// This is a shortcut for creating a struct and calling Default yourself. +func NewListGroupsResponseGroup() ListGroupsResponseGroup { + var v ListGroupsResponseGroup + v.Default() + return v +} + +// ListGroupsResponse is returned from a ListGroupsRequest. +type ListGroupsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 2. + ThrottleMillis int32 // v1+ + + // ErrorCode is the error returned for the list groups request. + // + // COORDINATOR_NOT_AVAILABLE is returned if the coordinator is not yet active. + // + // COORDINATOR_LOAD_IN_PROGRESS is returned if the group manager is loading. + ErrorCode int16 + + // Groups is the list of groups Kafka knows of. + Groups []ListGroupsResponseGroup + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +func (*ListGroupsResponse) Key() int16 { return 16 } +func (*ListGroupsResponse) MaxVersion() int16 { return 5 } +func (v *ListGroupsResponse) SetVersion(version int16) { v.Version = version } +func (v *ListGroupsResponse) GetVersion() int16 { return v.Version } +func (v *ListGroupsResponse) IsFlexible() bool { return v.Version >= 3 } +func (v *ListGroupsResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 2 } +func (v *ListGroupsResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *ListGroupsResponse) RequestKind() Request { return &ListGroupsRequest{Version: v.Version} } + +func (v *ListGroupsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + if version >= 1 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Groups + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Group + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ProtocolType + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 4 { + v := v.GroupState + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 5 { + v := v.GroupType + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ListGroupsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ListGroupsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ListGroupsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + s := v + if version >= 1 { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := s.Groups + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ListGroupsResponseGroup, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Group = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ProtocolType = v + } + if version >= 4 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.GroupState = v + } + if version >= 5 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.GroupType = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Groups = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrListGroupsResponse returns a pointer to a default ListGroupsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrListGroupsResponse() *ListGroupsResponse { + var v ListGroupsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListGroupsResponse. +func (v *ListGroupsResponse) Default() { +} + +// NewListGroupsResponse returns a default ListGroupsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewListGroupsResponse() ListGroupsResponse { + var v ListGroupsResponse + v.Default() + return v +} + +// SASLHandshakeRequest begins the sasl authentication flow. Note that Kerberos +// GSSAPI authentication has its own unique flow. +type SASLHandshakeRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Mechanism is the mechanism to use for the sasl handshake (e.g., "PLAIN"). + // + // For version 0, if this mechanism is supported, it is expected that the + // client immediately authenticates using this mechanism. Note that the + // only mechanism exclusive to v0 is PLAIN. + // + // For version 1, if the mechanism is supported, the next request to issue + // is SASLHandshakeRequest. + Mechanism string +} + +func (*SASLHandshakeRequest) Key() int16 { return 17 } +func (*SASLHandshakeRequest) MaxVersion() int16 { return 1 } +func (v *SASLHandshakeRequest) SetVersion(version int16) { v.Version = version } +func (v *SASLHandshakeRequest) GetVersion() int16 { return v.Version } +func (v *SASLHandshakeRequest) IsFlexible() bool { return false } +func (v *SASLHandshakeRequest) ResponseKind() Response { + r := &SASLHandshakeResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *SASLHandshakeRequest) RequestWith(ctx context.Context, r Requestor) (*SASLHandshakeResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*SASLHandshakeResponse) + return resp, err +} + +func (v *SASLHandshakeRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.Mechanism + dst = kbin.AppendString(dst, v) + } + return dst +} + +func (v *SASLHandshakeRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *SASLHandshakeRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *SASLHandshakeRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Mechanism = v + } + return b.Complete() +} + +// NewPtrSASLHandshakeRequest returns a pointer to a default SASLHandshakeRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrSASLHandshakeRequest() *SASLHandshakeRequest { + var v SASLHandshakeRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to SASLHandshakeRequest. +func (v *SASLHandshakeRequest) Default() { +} + +// NewSASLHandshakeRequest returns a default SASLHandshakeRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewSASLHandshakeRequest() SASLHandshakeRequest { + var v SASLHandshakeRequest + v.Default() + return v +} + +// SASLHandshakeResponse is returned for a SASLHandshakeRequest. +type SASLHandshakeResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ErrorCode is non-zero for ILLEGAL_SASL_STATE, meaning a sasl handshake + // is not expected at this point in the connection, or UNSUPPORTED_SASL_MECHANISM, + // meaning the requested mechanism is not supported. + ErrorCode int16 + + // SupportedMechanisms is the list of mechanisms supported if this request + // errored. + SupportedMechanisms []string +} + +func (*SASLHandshakeResponse) Key() int16 { return 17 } +func (*SASLHandshakeResponse) MaxVersion() int16 { return 1 } +func (v *SASLHandshakeResponse) SetVersion(version int16) { v.Version = version } +func (v *SASLHandshakeResponse) GetVersion() int16 { return v.Version } +func (v *SASLHandshakeResponse) IsFlexible() bool { return false } +func (v *SASLHandshakeResponse) RequestKind() Request { + return &SASLHandshakeRequest{Version: v.Version} +} + +func (v *SASLHandshakeResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.SupportedMechanisms + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := v[i] + dst = kbin.AppendString(dst, v) + } + } + return dst +} + +func (v *SASLHandshakeResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *SASLHandshakeResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *SASLHandshakeResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + v := s.SupportedMechanisms + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]string, l)...) + } + for i := int32(0); i < l; i++ { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + a[i] = v + } + v = a + s.SupportedMechanisms = v + } + return b.Complete() +} + +// NewPtrSASLHandshakeResponse returns a pointer to a default SASLHandshakeResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrSASLHandshakeResponse() *SASLHandshakeResponse { + var v SASLHandshakeResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to SASLHandshakeResponse. +func (v *SASLHandshakeResponse) Default() { +} + +// NewSASLHandshakeResponse returns a default SASLHandshakeResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewSASLHandshakeResponse() SASLHandshakeResponse { + var v SASLHandshakeResponse + v.Default() + return v +} + +// ApiVersionsRequest requests what API versions a Kafka broker supports. +// +// Note that the client does not know the version a broker supports before +// sending this request. +// +// Before Kafka 2.4.0, if the client used a version larger than the broker +// understands, the broker would reply with an UNSUPPORTED_VERSION error using +// the version 0 message format (i.e., 6 bytes long!). The client should retry +// with a lower version. +// +// After Kafka 2.4.0, if the client uses a version larger than the broker +// understands, the broker replies with UNSUPPORTED_VERSIONS using the version +// 0 message format but additionally includes the api versions the broker does +// support. +type ApiVersionsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ClientSoftwareName, added for KIP-511 with Kafka 2.4.0, is the name of the + // client issuing this request. The broker can use this to enrich its own + // debugging information of which version of what clients are connected. + // + // If using v3, this field is required and must match the following pattern: + // + // [a-zA-Z0-9](?:[a-zA-Z0-9\\-.]*[a-zA-Z0-9])? + ClientSoftwareName string // v3+ + + // ClientSoftwareVersion is the version of the software name in the prior + // field. It must match the same regex (thus, this is also required). + ClientSoftwareVersion string // v3+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +func (*ApiVersionsRequest) Key() int16 { return 18 } +func (*ApiVersionsRequest) MaxVersion() int16 { return 3 } +func (v *ApiVersionsRequest) SetVersion(version int16) { v.Version = version } +func (v *ApiVersionsRequest) GetVersion() int16 { return v.Version } +func (v *ApiVersionsRequest) IsFlexible() bool { return v.Version >= 3 } +func (v *ApiVersionsRequest) ResponseKind() Response { + r := &ApiVersionsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *ApiVersionsRequest) RequestWith(ctx context.Context, r Requestor) (*ApiVersionsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*ApiVersionsResponse) + return resp, err +} + +func (v *ApiVersionsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + if version >= 3 { + v := v.ClientSoftwareName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 3 { + v := v.ClientSoftwareVersion + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ApiVersionsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ApiVersionsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ApiVersionsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + s := v + if version >= 3 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ClientSoftwareName = v + } + if version >= 3 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ClientSoftwareVersion = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrApiVersionsRequest returns a pointer to a default ApiVersionsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrApiVersionsRequest() *ApiVersionsRequest { + var v ApiVersionsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ApiVersionsRequest. +func (v *ApiVersionsRequest) Default() { +} + +// NewApiVersionsRequest returns a default ApiVersionsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewApiVersionsRequest() ApiVersionsRequest { + var v ApiVersionsRequest + v.Default() + return v +} + +type ApiVersionsResponseApiKey struct { + // ApiKey is the key of a message request. + ApiKey int16 + + // MinVersion is the min version a broker supports for an API key. + MinVersion int16 + + // MaxVersion is the max version a broker supports for an API key. + MaxVersion int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ApiVersionsResponseApiKey. +func (v *ApiVersionsResponseApiKey) Default() { +} + +// NewApiVersionsResponseApiKey returns a default ApiVersionsResponseApiKey +// This is a shortcut for creating a struct and calling Default yourself. +func NewApiVersionsResponseApiKey() ApiVersionsResponseApiKey { + var v ApiVersionsResponseApiKey + v.Default() + return v +} + +type ApiVersionsResponseSupportedFeature struct { + // The name of the feature. + Name string + + // The minimum supported version for the feature. + MinVersion int16 + + // The maximum supported version for the feature. + MaxVersion int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ApiVersionsResponseSupportedFeature. +func (v *ApiVersionsResponseSupportedFeature) Default() { +} + +// NewApiVersionsResponseSupportedFeature returns a default ApiVersionsResponseSupportedFeature +// This is a shortcut for creating a struct and calling Default yourself. +func NewApiVersionsResponseSupportedFeature() ApiVersionsResponseSupportedFeature { + var v ApiVersionsResponseSupportedFeature + v.Default() + return v +} + +type ApiVersionsResponseFinalizedFeature struct { + // The name of the feature. + Name string + + // The cluster-wide finalized max version level for the feature. + MaxVersionLevel int16 + + // The cluster-wide finalized min version level for the feature. + MinVersionLevel int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ApiVersionsResponseFinalizedFeature. +func (v *ApiVersionsResponseFinalizedFeature) Default() { +} + +// NewApiVersionsResponseFinalizedFeature returns a default ApiVersionsResponseFinalizedFeature +// This is a shortcut for creating a struct and calling Default yourself. +func NewApiVersionsResponseFinalizedFeature() ApiVersionsResponseFinalizedFeature { + var v ApiVersionsResponseFinalizedFeature + v.Default() + return v +} + +// ApiVersionsResponse is returned from an ApiVersionsRequest. +type ApiVersionsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ErrorCode is UNSUPPORTED_VERSION if the request was issued with a higher + // version than the broker supports. Before Kafka 2.4.0, if this error is + // returned, the rest of this struct will be empty. + // + // Starting in Kafka 2.4.0 (with version 3), even with an UNSUPPORTED_VERSION + // error, the broker still replies with the ApiKeys it supports. + ErrorCode int16 + + // ApiKeys is an array corresponding to API keys the broker supports + // and the range of supported versions for each key. + ApiKeys []ApiVersionsResponseApiKey + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 2. + ThrottleMillis int32 // v1+ + + // Features supported by the broker (see KIP-584). + SupportedFeatures []ApiVersionsResponseSupportedFeature // tag 0 + + // The monotonically increasing epoch for the finalized features information, + // where -1 indicates an unknown epoch. + // + // This field has a default of -1. + FinalizedFeaturesEpoch int64 // tag 1 + + // The list of cluster-wide finalized features (only valid if + // FinalizedFeaturesEpoch is >= 0). + FinalizedFeatures []ApiVersionsResponseFinalizedFeature // tag 2 + + // Set by a KRaft controller if the required configurations for ZK migration + // are present + ZkMigrationReady bool // tag 3 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +func (*ApiVersionsResponse) Key() int16 { return 18 } +func (*ApiVersionsResponse) MaxVersion() int16 { return 3 } +func (v *ApiVersionsResponse) SetVersion(version int16) { v.Version = version } +func (v *ApiVersionsResponse) GetVersion() int16 { return v.Version } +func (v *ApiVersionsResponse) IsFlexible() bool { return v.Version >= 3 } +func (v *ApiVersionsResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 2 } +func (v *ApiVersionsResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *ApiVersionsResponse) RequestKind() Request { return &ApiVersionsRequest{Version: v.Version} } + +func (v *ApiVersionsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ApiKeys + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ApiKey + dst = kbin.AppendInt16(dst, v) + } + { + v := v.MinVersion + dst = kbin.AppendInt16(dst, v) + } + { + v := v.MaxVersion + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 1 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + var toEncode []uint32 + if len(v.SupportedFeatures) > 0 { + toEncode = append(toEncode, 0) + } + if v.FinalizedFeaturesEpoch != -1 { + toEncode = append(toEncode, 1) + } + if len(v.FinalizedFeatures) > 0 { + toEncode = append(toEncode, 2) + } + if v.ZkMigrationReady != false { + toEncode = append(toEncode, 3) + } + dst = kbin.AppendUvarint(dst, uint32(len(toEncode)+v.UnknownTags.Len())) + for _, tag := range toEncode { + switch tag { + case 0: + { + v := v.SupportedFeatures + dst = kbin.AppendUvarint(dst, 0) + sized := false + lenAt := len(dst) + fSupportedFeatures: + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.MinVersion + dst = kbin.AppendInt16(dst, v) + } + { + v := v.MaxVersion + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + if !sized { + dst = kbin.AppendUvarint(dst[:lenAt], uint32(len(dst[lenAt:]))) + sized = true + goto fSupportedFeatures + } + } + case 1: + { + v := v.FinalizedFeaturesEpoch + dst = kbin.AppendUvarint(dst, 1) + dst = kbin.AppendUvarint(dst, 8) + dst = kbin.AppendInt64(dst, v) + } + case 2: + { + v := v.FinalizedFeatures + dst = kbin.AppendUvarint(dst, 2) + sized := false + lenAt := len(dst) + fFinalizedFeatures: + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.MaxVersionLevel + dst = kbin.AppendInt16(dst, v) + } + { + v := v.MinVersionLevel + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + if !sized { + dst = kbin.AppendUvarint(dst[:lenAt], uint32(len(dst[lenAt:]))) + sized = true + goto fFinalizedFeatures + } + } + case 3: + { + v := v.ZkMigrationReady + dst = kbin.AppendUvarint(dst, 3) + dst = kbin.AppendUvarint(dst, 1) + dst = kbin.AppendBool(dst, v) + } + } + } + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ApiVersionsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ApiVersionsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ApiVersionsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + v := s.ApiKeys + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ApiVersionsResponseApiKey, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int16() + s.ApiKey = v + } + { + v := b.Int16() + s.MinVersion = v + } + { + v := b.Int16() + s.MaxVersion = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.ApiKeys = v + } + if version >= 1 { + v := b.Int32() + s.ThrottleMillis = v + } + if isFlexible { + for i := b.Uvarint(); i > 0; i-- { + switch key := b.Uvarint(); key { + default: + s.UnknownTags.Set(key, b.Span(int(b.Uvarint()))) + case 0: + b := kbin.Reader{Src: b.Span(int(b.Uvarint()))} + v := s.SupportedFeatures + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ApiVersionsResponseSupportedFeature, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Name = v + } + { + v := b.Int16() + s.MinVersion = v + } + { + v := b.Int16() + s.MaxVersion = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.SupportedFeatures = v + if err := b.Complete(); err != nil { + return err + } + case 1: + b := kbin.Reader{Src: b.Span(int(b.Uvarint()))} + v := b.Int64() + s.FinalizedFeaturesEpoch = v + if err := b.Complete(); err != nil { + return err + } + case 2: + b := kbin.Reader{Src: b.Span(int(b.Uvarint()))} + v := s.FinalizedFeatures + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ApiVersionsResponseFinalizedFeature, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Name = v + } + { + v := b.Int16() + s.MaxVersionLevel = v + } + { + v := b.Int16() + s.MinVersionLevel = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.FinalizedFeatures = v + if err := b.Complete(); err != nil { + return err + } + case 3: + b := kbin.Reader{Src: b.Span(int(b.Uvarint()))} + v := b.Bool() + s.ZkMigrationReady = v + if err := b.Complete(); err != nil { + return err + } + } + } + } + return b.Complete() +} + +// NewPtrApiVersionsResponse returns a pointer to a default ApiVersionsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrApiVersionsResponse() *ApiVersionsResponse { + var v ApiVersionsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ApiVersionsResponse. +func (v *ApiVersionsResponse) Default() { + v.FinalizedFeaturesEpoch = -1 +} + +// NewApiVersionsResponse returns a default ApiVersionsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewApiVersionsResponse() ApiVersionsResponse { + var v ApiVersionsResponse + v.Default() + return v +} + +type CreateTopicsRequestTopicReplicaAssignment struct { + // Partition is a partition to create. + Partition int32 + + // Replicas are broker IDs the partition must exist on. + Replicas []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v5+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreateTopicsRequestTopicReplicaAssignment. +func (v *CreateTopicsRequestTopicReplicaAssignment) Default() { +} + +// NewCreateTopicsRequestTopicReplicaAssignment returns a default CreateTopicsRequestTopicReplicaAssignment +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreateTopicsRequestTopicReplicaAssignment() CreateTopicsRequestTopicReplicaAssignment { + var v CreateTopicsRequestTopicReplicaAssignment + v.Default() + return v +} + +type CreateTopicsRequestTopicConfig struct { + // Name is a topic level config key (e.g. segment.bytes). + Name string + + // Value is a topic level config value (e.g. 1073741824) + Value *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v5+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreateTopicsRequestTopicConfig. +func (v *CreateTopicsRequestTopicConfig) Default() { +} + +// NewCreateTopicsRequestTopicConfig returns a default CreateTopicsRequestTopicConfig +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreateTopicsRequestTopicConfig() CreateTopicsRequestTopicConfig { + var v CreateTopicsRequestTopicConfig + v.Default() + return v +} + +type CreateTopicsRequestTopic struct { + // Topic is a topic to create. + Topic string + + // NumPartitions is how many partitions to give a topic. This must + // be -1 if specifying partitions manually (see ReplicaAssignment) + // or, starting v4+, to use the broker default partitions. + NumPartitions int32 + + // ReplicationFactor is how many replicas every partition must have. + // This must be -1 if specifying partitions manually (see ReplicaAssignment) + // or, starting v4+, to use the broker default replication factor. + ReplicationFactor int16 + + // ReplicaAssignment is an array to manually dicate replicas and their + // partitions for a topic. If using this, both ReplicationFactor and + // NumPartitions must be -1. + ReplicaAssignment []CreateTopicsRequestTopicReplicaAssignment + + // Configs is an array of key value config pairs for a topic. + // These correspond to Kafka Topic-Level Configs: http://kafka.apache.org/documentation/#topicconfigs. + Configs []CreateTopicsRequestTopicConfig + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v5+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreateTopicsRequestTopic. +func (v *CreateTopicsRequestTopic) Default() { +} + +// NewCreateTopicsRequestTopic returns a default CreateTopicsRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreateTopicsRequestTopic() CreateTopicsRequestTopic { + var v CreateTopicsRequestTopic + v.Default() + return v +} + +// CreateTopicsRequest creates Kafka topics. +// +// Version 4, introduced in Kafka 2.4.0, implies client support for +// creation defaults. See KIP-464. +// +// Version 5, also in 2.4.0, returns topic configs in the response (KIP-525). +type CreateTopicsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Topics is an array of topics to attempt to create. + Topics []CreateTopicsRequestTopic + + // TimeoutMillis is how long Kafka can wait before responding to this request. + // This field has no effect on Kafka's processing of the request; the request + // will continue to be processed if the timeout is reached. If the timeout is + // reached, Kafka will reply with a REQUEST_TIMED_OUT error. + // + // This field has a default of 60000. + TimeoutMillis int32 + + // ValidateOnly is makes this request a dry-run; everything is validated but + // no topics are actually created. + ValidateOnly bool // v1+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v5+ +} + +func (*CreateTopicsRequest) Key() int16 { return 19 } +func (*CreateTopicsRequest) MaxVersion() int16 { return 7 } +func (v *CreateTopicsRequest) SetVersion(version int16) { v.Version = version } +func (v *CreateTopicsRequest) GetVersion() int16 { return v.Version } +func (v *CreateTopicsRequest) IsFlexible() bool { return v.Version >= 5 } +func (v *CreateTopicsRequest) Timeout() int32 { return v.TimeoutMillis } +func (v *CreateTopicsRequest) SetTimeout(timeoutMillis int32) { v.TimeoutMillis = timeoutMillis } +func (v *CreateTopicsRequest) IsAdminRequest() {} +func (v *CreateTopicsRequest) ResponseKind() Response { + r := &CreateTopicsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *CreateTopicsRequest) RequestWith(ctx context.Context, r Requestor) (*CreateTopicsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*CreateTopicsResponse) + return resp, err +} + +func (v *CreateTopicsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 5 + _ = isFlexible + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.NumPartitions + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ReplicationFactor + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ReplicaAssignment + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Replicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.Configs + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Value + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.TimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + if version >= 1 { + v := v.ValidateOnly + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *CreateTopicsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *CreateTopicsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *CreateTopicsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 5 + _ = isFlexible + s := v + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]CreateTopicsRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := b.Int32() + s.NumPartitions = v + } + { + v := b.Int16() + s.ReplicationFactor = v + } + { + v := s.ReplicaAssignment + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]CreateTopicsRequestTopicReplicaAssignment, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := s.Replicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Replicas = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.ReplicaAssignment = v + } + { + v := s.Configs + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]CreateTopicsRequestTopicConfig, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Name = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Value = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Configs = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + { + v := b.Int32() + s.TimeoutMillis = v + } + if version >= 1 { + v := b.Bool() + s.ValidateOnly = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrCreateTopicsRequest returns a pointer to a default CreateTopicsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrCreateTopicsRequest() *CreateTopicsRequest { + var v CreateTopicsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreateTopicsRequest. +func (v *CreateTopicsRequest) Default() { + v.TimeoutMillis = 60000 +} + +// NewCreateTopicsRequest returns a default CreateTopicsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreateTopicsRequest() CreateTopicsRequest { + var v CreateTopicsRequest + v.Default() + return v +} + +type CreateTopicsResponseTopicConfig struct { + // Name is the configuration name (e.g. segment.bytes). + Name string + + // Value is the value for this config key. If the key is sensitive, + // the value will be null. + Value *string + + // ReadOnly signifies whether this is not a dynamic config option. + ReadOnly bool + + // Source is where this config entry is from. See the documentation + // on DescribeConfigsRequest's Source for more details. + // + // This field has a default of -1. + Source int8 + + // IsSensitive signifies whether this is a sensitive config key, which + // is either a password or an unknown type. + IsSensitive bool + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v5+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreateTopicsResponseTopicConfig. +func (v *CreateTopicsResponseTopicConfig) Default() { + v.Source = -1 +} + +// NewCreateTopicsResponseTopicConfig returns a default CreateTopicsResponseTopicConfig +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreateTopicsResponseTopicConfig() CreateTopicsResponseTopicConfig { + var v CreateTopicsResponseTopicConfig + v.Default() + return v +} + +type CreateTopicsResponseTopic struct { + // Topic is the topic this response corresponds to. + Topic string + + // The unique topic ID. + TopicID [16]byte // v7+ + + // ErrorCode is the error code for an individual topic creation. + // + // NOT_CONTROLLER is returned if the request was not issued to a Kafka + // controller. + // + // TOPIC_AUTHORIZATION_FAILED is returned if the client is not authorized. + // + // INVALID_REQUEST is returned if the same topic occurred multiple times + // in the request. + // + // POLICY_VIOLATION is returned if the broker is using a + // create.topic.policy.class.name that returns a policy violation. + // + // INVALID_TOPIC_EXCEPTION if the topic collides with another topic when + // both topic's names' periods are replaced with underscores (e.g. + // topic.foo and topic_foo collide). + // + // TOPIC_ALREADY_EXISTS is returned if the topic already exists. + // + // INVALID_PARTITIONS is returned if the requested number of partitions is + // <= 0. + // + // INVALID_REPLICATION_FACTOR is returned if the requested replication + // factor is <= 0. + // + // INVALID_REPLICA_ASSIGNMENT is returned if not all partitions have the same + // number of replicas, or duplica replicas are assigned, or the partitions + // are not consecutive starting from 0. + // + // INVALID_CONFIG is returned if the requested topic config is invalid. + // to create a topic. + ErrorCode int16 + + // ErrorMessage is an informative message if the topic creation failed. + ErrorMessage *string // v1+ + + // ConfigErrorCode is non-zero if configs are unable to be returned. + // + // This is the first tagged field, introduced in version 5. As such, it is + // only possible to be present in v5+. + ConfigErrorCode int16 // tag 0 + + // NumPartitions is how many partitions were created for this topic. + // + // This field has a default of -1. + NumPartitions int32 // v5+ + + // ReplicationFactor is how many replicas every partition has for this topic. + // + // This field has a default of -1. + ReplicationFactor int16 // v5+ + + // Configs contains this topic's configuration. + Configs []CreateTopicsResponseTopicConfig // v5+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v5+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreateTopicsResponseTopic. +func (v *CreateTopicsResponseTopic) Default() { + v.NumPartitions = -1 + v.ReplicationFactor = -1 +} + +// NewCreateTopicsResponseTopic returns a default CreateTopicsResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreateTopicsResponseTopic() CreateTopicsResponseTopic { + var v CreateTopicsResponseTopic + v.Default() + return v +} + +// CreateTopicsResponse is returned from a CreateTopicsRequest. +type CreateTopicsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 3. + ThrottleMillis int32 // v2+ + + // Topics contains responses to the requested topic creations. + Topics []CreateTopicsResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v5+ +} + +func (*CreateTopicsResponse) Key() int16 { return 19 } +func (*CreateTopicsResponse) MaxVersion() int16 { return 7 } +func (v *CreateTopicsResponse) SetVersion(version int16) { v.Version = version } +func (v *CreateTopicsResponse) GetVersion() int16 { return v.Version } +func (v *CreateTopicsResponse) IsFlexible() bool { return v.Version >= 5 } +func (v *CreateTopicsResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 3 } +func (v *CreateTopicsResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *CreateTopicsResponse) RequestKind() Request { return &CreateTopicsRequest{Version: v.Version} } + +func (v *CreateTopicsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 5 + _ = isFlexible + if version >= 2 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 7 { + v := v.TopicID + dst = kbin.AppendUuid(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if version >= 1 { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if version >= 5 { + v := v.NumPartitions + dst = kbin.AppendInt32(dst, v) + } + if version >= 5 { + v := v.ReplicationFactor + dst = kbin.AppendInt16(dst, v) + } + if version >= 5 { + v := v.Configs + if isFlexible { + dst = kbin.AppendCompactNullableArrayLen(dst, len(v), v == nil) + } else { + dst = kbin.AppendNullableArrayLen(dst, len(v), v == nil) + } + for i := range v { + v := &v[i] + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Value + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.ReadOnly + dst = kbin.AppendBool(dst, v) + } + { + v := v.Source + dst = kbin.AppendInt8(dst, v) + } + { + v := v.IsSensitive + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + var toEncode []uint32 + if v.ConfigErrorCode != 0 { + toEncode = append(toEncode, 0) + } + dst = kbin.AppendUvarint(dst, uint32(len(toEncode)+v.UnknownTags.Len())) + for _, tag := range toEncode { + switch tag { + case 0: + { + v := v.ConfigErrorCode + dst = kbin.AppendUvarint(dst, 0) + dst = kbin.AppendUvarint(dst, 2) + dst = kbin.AppendInt16(dst, v) + } + } + } + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *CreateTopicsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *CreateTopicsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *CreateTopicsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 5 + _ = isFlexible + s := v + if version >= 2 { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]CreateTopicsResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + if version >= 7 { + v := b.Uuid() + s.TopicID = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if version >= 1 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + if version >= 5 { + v := b.Int32() + s.NumPartitions = v + } + if version >= 5 { + v := b.Int16() + s.ReplicationFactor = v + } + if version >= 5 { + v := s.Configs + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if version < 0 || l == 0 { + a = []CreateTopicsResponseTopicConfig{} + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]CreateTopicsResponseTopicConfig, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Name = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Value = v + } + { + v := b.Bool() + s.ReadOnly = v + } + { + v := b.Int8() + s.Source = v + } + { + v := b.Bool() + s.IsSensitive = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Configs = v + } + if isFlexible { + for i := b.Uvarint(); i > 0; i-- { + switch key := b.Uvarint(); key { + default: + s.UnknownTags.Set(key, b.Span(int(b.Uvarint()))) + case 0: + b := kbin.Reader{Src: b.Span(int(b.Uvarint()))} + v := b.Int16() + s.ConfigErrorCode = v + if err := b.Complete(); err != nil { + return err + } + } + } + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrCreateTopicsResponse returns a pointer to a default CreateTopicsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrCreateTopicsResponse() *CreateTopicsResponse { + var v CreateTopicsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreateTopicsResponse. +func (v *CreateTopicsResponse) Default() { +} + +// NewCreateTopicsResponse returns a default CreateTopicsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreateTopicsResponse() CreateTopicsResponse { + var v CreateTopicsResponse + v.Default() + return v +} + +type DeleteTopicsRequestTopic struct { + Topic *string + + TopicID [16]byte + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteTopicsRequestTopic. +func (v *DeleteTopicsRequestTopic) Default() { +} + +// NewDeleteTopicsRequestTopic returns a default DeleteTopicsRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteTopicsRequestTopic() DeleteTopicsRequestTopic { + var v DeleteTopicsRequestTopic + v.Default() + return v +} + +// DeleteTopicsRequest deletes Kafka topics. +type DeleteTopicsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Topics is an array of topics to delete. + TopicNames []string // v0-v5 + + // The name or topic ID of topics to delete. + Topics []DeleteTopicsRequestTopic // v6+ + + // TimeoutMillis is how long Kafka can wait before responding to this request. + // This field has no effect on Kafka's processing of the request; the request + // will continue to be processed if the timeout is reached. If the timeout is + // reached, Kafka will reply with a REQUEST_TIMED_OUT error. + // + // This field has a default of 15000. + TimeoutMillis int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +func (*DeleteTopicsRequest) Key() int16 { return 20 } +func (*DeleteTopicsRequest) MaxVersion() int16 { return 6 } +func (v *DeleteTopicsRequest) SetVersion(version int16) { v.Version = version } +func (v *DeleteTopicsRequest) GetVersion() int16 { return v.Version } +func (v *DeleteTopicsRequest) IsFlexible() bool { return v.Version >= 4 } +func (v *DeleteTopicsRequest) Timeout() int32 { return v.TimeoutMillis } +func (v *DeleteTopicsRequest) SetTimeout(timeoutMillis int32) { v.TimeoutMillis = timeoutMillis } +func (v *DeleteTopicsRequest) IsAdminRequest() {} +func (v *DeleteTopicsRequest) ResponseKind() Response { + r := &DeleteTopicsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *DeleteTopicsRequest) RequestWith(ctx context.Context, r Requestor) (*DeleteTopicsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*DeleteTopicsResponse) + return resp, err +} + +func (v *DeleteTopicsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + if version >= 0 && version <= 5 { + v := v.TopicNames + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + } + if version >= 6 { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.TopicID + dst = kbin.AppendUuid(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.TimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DeleteTopicsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DeleteTopicsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DeleteTopicsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + s := v + if version >= 0 && version <= 5 { + v := s.TopicNames + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]string, l)...) + } + for i := int32(0); i < l; i++ { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + a[i] = v + } + v = a + s.TopicNames = v + } + if version >= 6 { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DeleteTopicsRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Topic = v + } + { + v := b.Uuid() + s.TopicID = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + { + v := b.Int32() + s.TimeoutMillis = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDeleteTopicsRequest returns a pointer to a default DeleteTopicsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDeleteTopicsRequest() *DeleteTopicsRequest { + var v DeleteTopicsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteTopicsRequest. +func (v *DeleteTopicsRequest) Default() { + v.TimeoutMillis = 15000 +} + +// NewDeleteTopicsRequest returns a default DeleteTopicsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteTopicsRequest() DeleteTopicsRequest { + var v DeleteTopicsRequest + v.Default() + return v +} + +type DeleteTopicsResponseTopic struct { + // Topic is the topic requested for deletion. + Topic *string + + // The topic ID requested for deletion. + TopicID [16]byte // v6+ + + // ErrorCode is the error code returned for an individual topic in + // deletion request. + // + // TOPIC_AUTHORIZATION_FAILED is returned if the client is not authorized + // to delete a topic. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if the broker does not know of + // the topic. + // + // NOT_CONTROLLER is returned if the request was not issued to a Kafka + // controller. + // + // TOPIC_DELETION_DISABLED is returned for deletion requests version 3+ + // and brokers >= 2.1.0. INVALID_REQUEST is issued for request versions + // 0-2 against brokers >= 2.1.0. Otherwise, the request hangs until it + // times out. + // + // UNSUPPORTED_VERSION is returned when using topic IDs with a cluster + // that is not yet Kafka v2.8+. + // + // UNKNOWN_TOPIC_ID is returned when using topic IDs to a Kafka cluster + // v2.8+ and the topic ID is not found. + ErrorCode int16 + + // ErrorMessage is a message for an error. + ErrorMessage *string // v5+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteTopicsResponseTopic. +func (v *DeleteTopicsResponseTopic) Default() { +} + +// NewDeleteTopicsResponseTopic returns a default DeleteTopicsResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteTopicsResponseTopic() DeleteTopicsResponseTopic { + var v DeleteTopicsResponseTopic + v.Default() + return v +} + +// DeleteTopicsResponse is returned from a DeleteTopicsRequest. +// Version 3 added the TOPIC_DELETION_DISABLED error proposed in KIP-322 +// and introduced in Kafka 2.1.0. Prior, the request timed out. +type DeleteTopicsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 2. + ThrottleMillis int32 // v1+ + + // Topics contains responses for each topic requested for deletion. + Topics []DeleteTopicsResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +func (*DeleteTopicsResponse) Key() int16 { return 20 } +func (*DeleteTopicsResponse) MaxVersion() int16 { return 6 } +func (v *DeleteTopicsResponse) SetVersion(version int16) { v.Version = version } +func (v *DeleteTopicsResponse) GetVersion() int16 { return v.Version } +func (v *DeleteTopicsResponse) IsFlexible() bool { return v.Version >= 4 } +func (v *DeleteTopicsResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 2 } +func (v *DeleteTopicsResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *DeleteTopicsResponse) RequestKind() Request { return &DeleteTopicsRequest{Version: v.Version} } + +func (v *DeleteTopicsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + if version >= 1 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if version < 6 { + var vv string + if v != nil { + vv = *v + } + { + v := vv + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + } else { + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + } + if version >= 6 { + v := v.TopicID + dst = kbin.AppendUuid(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if version >= 5 { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DeleteTopicsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DeleteTopicsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DeleteTopicsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + s := v + if version >= 1 { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DeleteTopicsResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v *string + if version < 6 { + var vv string + if isFlexible { + if unsafe { + vv = b.UnsafeCompactString() + } else { + vv = b.CompactString() + } + } else { + if unsafe { + vv = b.UnsafeString() + } else { + vv = b.String() + } + } + v = &vv + } else { + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + } + s.Topic = v + } + if version >= 6 { + v := b.Uuid() + s.TopicID = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if version >= 5 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDeleteTopicsResponse returns a pointer to a default DeleteTopicsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDeleteTopicsResponse() *DeleteTopicsResponse { + var v DeleteTopicsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteTopicsResponse. +func (v *DeleteTopicsResponse) Default() { +} + +// NewDeleteTopicsResponse returns a default DeleteTopicsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteTopicsResponse() DeleteTopicsResponse { + var v DeleteTopicsResponse + v.Default() + return v +} + +type DeleteRecordsRequestTopicPartition struct { + // Partition is a partition to delete records from. + Partition int32 + + // Offset is the offset to set the partition's low watermark (start + // offset) to. After a successful response, all records before this + // offset are considered deleted and are no longer readable. + // + // To delete all records, use -1, which is mapped to the partition's + // current high watermark. + Offset int64 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteRecordsRequestTopicPartition. +func (v *DeleteRecordsRequestTopicPartition) Default() { +} + +// NewDeleteRecordsRequestTopicPartition returns a default DeleteRecordsRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteRecordsRequestTopicPartition() DeleteRecordsRequestTopicPartition { + var v DeleteRecordsRequestTopicPartition + v.Default() + return v +} + +type DeleteRecordsRequestTopic struct { + // Topic is a topic to delete records from. + Topic string + + // Partitions contains partitions to delete records from. + Partitions []DeleteRecordsRequestTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteRecordsRequestTopic. +func (v *DeleteRecordsRequestTopic) Default() { +} + +// NewDeleteRecordsRequestTopic returns a default DeleteRecordsRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteRecordsRequestTopic() DeleteRecordsRequestTopic { + var v DeleteRecordsRequestTopic + v.Default() + return v +} + +// DeleteRecordsRequest is an admin request to delete records from Kafka. +// This was added for KIP-107. +// +// To delete records, Kafka sets the LogStartOffset for partitions to +// the requested offset. All segments whose max partition is before the +// requested offset are deleted, and any records within the segment before +// the requested offset can no longer be read. +// +// This request must be issued to the correct brokers that own the partitions +// you intend to delete records for. +type DeleteRecordsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Topics contains topics for which to delete records from. + Topics []DeleteRecordsRequestTopic + + // TimeoutMillis is how long Kafka can wait before responding to this request. + // This field has no effect on Kafka's processing of the request; the request + // will continue to be processed if the timeout is reached. If the timeout is + // reached, Kafka will reply with a REQUEST_TIMED_OUT error. + // + // This field has a default of 15000. + TimeoutMillis int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*DeleteRecordsRequest) Key() int16 { return 21 } +func (*DeleteRecordsRequest) MaxVersion() int16 { return 2 } +func (v *DeleteRecordsRequest) SetVersion(version int16) { v.Version = version } +func (v *DeleteRecordsRequest) GetVersion() int16 { return v.Version } +func (v *DeleteRecordsRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *DeleteRecordsRequest) Timeout() int32 { return v.TimeoutMillis } +func (v *DeleteRecordsRequest) SetTimeout(timeoutMillis int32) { v.TimeoutMillis = timeoutMillis } +func (v *DeleteRecordsRequest) ResponseKind() Response { + r := &DeleteRecordsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *DeleteRecordsRequest) RequestWith(ctx context.Context, r Requestor) (*DeleteRecordsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*DeleteRecordsResponse) + return resp, err +} + +func (v *DeleteRecordsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Offset + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.TimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DeleteRecordsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DeleteRecordsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DeleteRecordsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DeleteRecordsRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DeleteRecordsRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int64() + s.Offset = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + { + v := b.Int32() + s.TimeoutMillis = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDeleteRecordsRequest returns a pointer to a default DeleteRecordsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDeleteRecordsRequest() *DeleteRecordsRequest { + var v DeleteRecordsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteRecordsRequest. +func (v *DeleteRecordsRequest) Default() { + v.TimeoutMillis = 15000 +} + +// NewDeleteRecordsRequest returns a default DeleteRecordsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteRecordsRequest() DeleteRecordsRequest { + var v DeleteRecordsRequest + v.Default() + return v +} + +type DeleteRecordsResponseTopicPartition struct { + // Partition is the partition this response corresponds to. + Partition int32 + + // LowWatermark is the new earliest offset for this partition. + LowWatermark int64 + + // ErrorCode is the error code returned for a given partition in + // the delete request. + // + // TOPIC_AUTHORIZATION_FAILED is returned for all partitions if the + // client is not authorized to delete records. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned for all partitions that + // the requested broker does not know of. + // + // NOT_LEADER_FOR_PARTITION is returned for partitions that the + // requested broker is not a leader of. + // + // OFFSET_OUT_OF_RANGE is returned if the requested offset is + // negative or higher than the current high watermark. + // + // POLICY_VIOLATION is returned if records cannot be deleted due to + // broker configuration. + // + // KAFKA_STORAGE_EXCEPTION is returned if the partition is in an + // offline log directory. + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteRecordsResponseTopicPartition. +func (v *DeleteRecordsResponseTopicPartition) Default() { +} + +// NewDeleteRecordsResponseTopicPartition returns a default DeleteRecordsResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteRecordsResponseTopicPartition() DeleteRecordsResponseTopicPartition { + var v DeleteRecordsResponseTopicPartition + v.Default() + return v +} + +type DeleteRecordsResponseTopic struct { + // Topic is the topic this response corresponds to. + Topic string + + // Partitions contains responses for each partition in a requested topic + // in the delete records request. + Partitions []DeleteRecordsResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteRecordsResponseTopic. +func (v *DeleteRecordsResponseTopic) Default() { +} + +// NewDeleteRecordsResponseTopic returns a default DeleteRecordsResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteRecordsResponseTopic() DeleteRecordsResponseTopic { + var v DeleteRecordsResponseTopic + v.Default() + return v +} + +// DeleteRecordsResponse is returned from a DeleteRecordsRequest. +type DeleteRecordsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // Topics contains responses for each topic in the delete records request. + Topics []DeleteRecordsResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*DeleteRecordsResponse) Key() int16 { return 21 } +func (*DeleteRecordsResponse) MaxVersion() int16 { return 2 } +func (v *DeleteRecordsResponse) SetVersion(version int16) { v.Version = version } +func (v *DeleteRecordsResponse) GetVersion() int16 { return v.Version } +func (v *DeleteRecordsResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *DeleteRecordsResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 1 } +func (v *DeleteRecordsResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *DeleteRecordsResponse) RequestKind() Request { + return &DeleteRecordsRequest{Version: v.Version} +} + +func (v *DeleteRecordsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LowWatermark + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DeleteRecordsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DeleteRecordsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DeleteRecordsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DeleteRecordsResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DeleteRecordsResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int64() + s.LowWatermark = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDeleteRecordsResponse returns a pointer to a default DeleteRecordsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDeleteRecordsResponse() *DeleteRecordsResponse { + var v DeleteRecordsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteRecordsResponse. +func (v *DeleteRecordsResponse) Default() { +} + +// NewDeleteRecordsResponse returns a default DeleteRecordsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteRecordsResponse() DeleteRecordsResponse { + var v DeleteRecordsResponse + v.Default() + return v +} + +// InitProducerIDRequest initializes a producer ID for idempotent transactions, +// and if using transactions, a producer epoch. This is the first request +// necessary to begin idempotent producing or transactions. +// +// Note that you do not need to go to a txn coordinator if you are initializing +// a producer id without a transactional id. +type InitProducerIDRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // TransactionalID is the ID to use for transactions if using transactions. + TransactionalID *string + + // TransactionTimeoutMillis is how long a transaction is allowed before + // EndTxn is required. + // + // Note that this timeout only begins on the first AddPartitionsToTxn + // request. + TransactionTimeoutMillis int32 + + // ProducerID, added for KIP-360, is the current producer ID. This allows + // the client to potentially recover on UNKNOWN_PRODUCER_ID errors. + // + // This field has a default of -1. + ProducerID int64 // v3+ + + // The producer's current epoch. This will be checked against the producer + // epoch on the broker, and the request will return an error if they do not + // match. Also added for KIP-360. + // + // This field has a default of -1. + ProducerEpoch int16 // v3+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*InitProducerIDRequest) Key() int16 { return 22 } +func (*InitProducerIDRequest) MaxVersion() int16 { return 5 } +func (v *InitProducerIDRequest) SetVersion(version int16) { v.Version = version } +func (v *InitProducerIDRequest) GetVersion() int16 { return v.Version } +func (v *InitProducerIDRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *InitProducerIDRequest) IsTxnCoordinatorRequest() {} +func (v *InitProducerIDRequest) ResponseKind() Response { + r := &InitProducerIDResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *InitProducerIDRequest) RequestWith(ctx context.Context, r Requestor) (*InitProducerIDResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*InitProducerIDResponse) + return resp, err +} + +func (v *InitProducerIDRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.TransactionalID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.TransactionTimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + if version >= 3 { + v := v.ProducerID + dst = kbin.AppendInt64(dst, v) + } + if version >= 3 { + v := v.ProducerEpoch + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *InitProducerIDRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *InitProducerIDRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *InitProducerIDRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.TransactionalID = v + } + { + v := b.Int32() + s.TransactionTimeoutMillis = v + } + if version >= 3 { + v := b.Int64() + s.ProducerID = v + } + if version >= 3 { + v := b.Int16() + s.ProducerEpoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrInitProducerIDRequest returns a pointer to a default InitProducerIDRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrInitProducerIDRequest() *InitProducerIDRequest { + var v InitProducerIDRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to InitProducerIDRequest. +func (v *InitProducerIDRequest) Default() { + v.ProducerID = -1 + v.ProducerEpoch = -1 +} + +// NewInitProducerIDRequest returns a default InitProducerIDRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewInitProducerIDRequest() InitProducerIDRequest { + var v InitProducerIDRequest + v.Default() + return v +} + +// InitProducerIDResponse is returned for an InitProducerIDRequest. +type InitProducerIDResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // CLUSTER_AUTHORIZATION_FAILED is returned when not using transactions if + // the client is not authorized for idempotent_write on cluster. + // + // TRANSACTIONAL_ID_AUTHORIZATION_FAILED is returned when using transactions + // if the client is not authorized to write on transactional_id. + // + // INVALID_REQUEST is returned if using transactions and the transactional id + // is an empty, non-null string + // + // COORDINATOR_LOAD_IN_PROGRESS is returned if the coordinator for this + // transactional ID is still loading. + // + // NOT_COORDINATOR is returned if the broker is not the coordinator for + // this transactional ID. + // + // INVALID_TRANSACTION_TIMEOUT is returned if using transactions and the timeout + // is equal to over over transaction.max.timeout.ms or under 0. + // + // CONCURRENT_TRANSACTIONS is returned if there is an ongoing transaction + // that is completing at the time this init is called. + ErrorCode int16 + + // ProducerID is the next producer ID that Kafka generated. This ID is used + // to ensure repeated produce requests do not result in duplicate records. + // + // This field has a default of -1. + ProducerID int64 + + // ProducerEpoch is the producer epoch to use for transactions. + ProducerEpoch int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*InitProducerIDResponse) Key() int16 { return 22 } +func (*InitProducerIDResponse) MaxVersion() int16 { return 5 } +func (v *InitProducerIDResponse) SetVersion(version int16) { v.Version = version } +func (v *InitProducerIDResponse) GetVersion() int16 { return v.Version } +func (v *InitProducerIDResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *InitProducerIDResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 1 } +func (v *InitProducerIDResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *InitProducerIDResponse) RequestKind() Request { + return &InitProducerIDRequest{Version: v.Version} +} + +func (v *InitProducerIDResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ProducerID + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ProducerEpoch + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *InitProducerIDResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *InitProducerIDResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *InitProducerIDResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Int64() + s.ProducerID = v + } + { + v := b.Int16() + s.ProducerEpoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrInitProducerIDResponse returns a pointer to a default InitProducerIDResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrInitProducerIDResponse() *InitProducerIDResponse { + var v InitProducerIDResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to InitProducerIDResponse. +func (v *InitProducerIDResponse) Default() { + v.ProducerID = -1 +} + +// NewInitProducerIDResponse returns a default InitProducerIDResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewInitProducerIDResponse() InitProducerIDResponse { + var v InitProducerIDResponse + v.Default() + return v +} + +type OffsetForLeaderEpochRequestTopicPartition struct { + // Partition is the number of a partition. + Partition int32 + + // CurrentLeaderEpoch, proposed in KIP-320 and introduced in Kafka 2.1.0, + // allows brokers to check if the client is fenced (has an out of date + // leader) or if the client is ahead of the broker. + // + // The initial leader epoch can be determined from a MetadataResponse. + // + // This field has a default of -1. + CurrentLeaderEpoch int32 // v2+ + + // LeaderEpoch is the epoch to fetch the end offset for. + LeaderEpoch int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetForLeaderEpochRequestTopicPartition. +func (v *OffsetForLeaderEpochRequestTopicPartition) Default() { + v.CurrentLeaderEpoch = -1 +} + +// NewOffsetForLeaderEpochRequestTopicPartition returns a default OffsetForLeaderEpochRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetForLeaderEpochRequestTopicPartition() OffsetForLeaderEpochRequestTopicPartition { + var v OffsetForLeaderEpochRequestTopicPartition + v.Default() + return v +} + +type OffsetForLeaderEpochRequestTopic struct { + // Topic is the name of a topic. + Topic string + + // Partitions are partitions within a topic to fetch leader epoch offsets for. + Partitions []OffsetForLeaderEpochRequestTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetForLeaderEpochRequestTopic. +func (v *OffsetForLeaderEpochRequestTopic) Default() { +} + +// NewOffsetForLeaderEpochRequestTopic returns a default OffsetForLeaderEpochRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetForLeaderEpochRequestTopic() OffsetForLeaderEpochRequestTopic { + var v OffsetForLeaderEpochRequestTopic + v.Default() + return v +} + +// OffsetForLeaderEpochRequest requests log end offsets for partitions. +// +// Version 2, proposed in KIP-320 and introduced in Kafka 2.1.0, can be used by +// consumers to perform more accurate offset resetting in the case of data loss. +// +// In support of version 2, this requires DESCRIBE on TOPIC. +type OffsetForLeaderEpochRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ReplicaID, added in support of KIP-392, is the broker ID of the follower, + // or -1 if this request is from a consumer. + // + // This field has a default of -2. + ReplicaID int32 // v3+ + + // Topics are topics to fetch leader epoch offsets for. + Topics []OffsetForLeaderEpochRequestTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +func (*OffsetForLeaderEpochRequest) Key() int16 { return 23 } +func (*OffsetForLeaderEpochRequest) MaxVersion() int16 { return 4 } +func (v *OffsetForLeaderEpochRequest) SetVersion(version int16) { v.Version = version } +func (v *OffsetForLeaderEpochRequest) GetVersion() int16 { return v.Version } +func (v *OffsetForLeaderEpochRequest) IsFlexible() bool { return v.Version >= 4 } +func (v *OffsetForLeaderEpochRequest) ResponseKind() Response { + r := &OffsetForLeaderEpochResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *OffsetForLeaderEpochRequest) RequestWith(ctx context.Context, r Requestor) (*OffsetForLeaderEpochResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*OffsetForLeaderEpochResponse) + return resp, err +} + +func (v *OffsetForLeaderEpochRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + if version >= 3 { + v := v.ReplicaID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + if version >= 2 { + v := v.CurrentLeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *OffsetForLeaderEpochRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *OffsetForLeaderEpochRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *OffsetForLeaderEpochRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + s := v + if version >= 3 { + v := b.Int32() + s.ReplicaID = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetForLeaderEpochRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetForLeaderEpochRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + if version >= 2 { + v := b.Int32() + s.CurrentLeaderEpoch = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrOffsetForLeaderEpochRequest returns a pointer to a default OffsetForLeaderEpochRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrOffsetForLeaderEpochRequest() *OffsetForLeaderEpochRequest { + var v OffsetForLeaderEpochRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetForLeaderEpochRequest. +func (v *OffsetForLeaderEpochRequest) Default() { + v.ReplicaID = -2 +} + +// NewOffsetForLeaderEpochRequest returns a default OffsetForLeaderEpochRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetForLeaderEpochRequest() OffsetForLeaderEpochRequest { + var v OffsetForLeaderEpochRequest + v.Default() + return v +} + +type OffsetForLeaderEpochResponseTopicPartition struct { + // ErrorCode is the error code returned on request failure. + // + // TOPIC_AUTHORIZATION_FAILED is returned if the client does not have + // the necessary permissions to issue this request. + // + // KAFKA_STORAGE_ERROR is returned if the partition is offline. + // + // NOT_LEADER_FOR_PARTITION is returned if the broker knows of the partition + // but does not own it. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if the broker does not know of the + // partition. + // + // FENCED_LEADER_EPOCH is returned if the client is using a current leader epoch + // older than the actual leader epoch. + // + // UNKNOWN_LEADER_EPOCH if returned if the client is using a current leader epoch + // that the actual leader does not know of. This could occur when the client + // has newer metadata than the broker when the broker just became the leader for + // a replica. + ErrorCode int16 + + // Partition is the partition this response is for. + Partition int32 + + // LeaderEpoch is similar to the requested leader epoch, but pairs with the + // next field. If the requested leader epoch is unknown, this is -1. If the + // requested epoch had no records produced during the requested epoch, this + // is the first prior epoch that had records. + // + // This field has a default of -1. + LeaderEpoch int32 // v1+ + + // EndOffset is either (1) just past the last recorded offset in the + // current partition if the broker leader has the same epoch as the + // leader epoch in the request, or (2) the beginning offset of the next + // epoch if the leader is past the requested epoch. The second scenario + // can be seen as equivalent to the first: the beginning offset of the + // next epoch is just past the final offset of the prior epoch. + // + // (2) allows consumers to detect data loss: if the consumer consumed + // past the end offset that is returned, then the consumer should reset + // to the returned offset and the consumer knows everything past the end + // offset was lost. + // + // With the prior field, consumers know that at this offset, the broker + // either has no more records (consumer is caught up), or the broker + // transitioned to a new epoch. + // + // This field has a default of -1. + EndOffset int64 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetForLeaderEpochResponseTopicPartition. +func (v *OffsetForLeaderEpochResponseTopicPartition) Default() { + v.LeaderEpoch = -1 + v.EndOffset = -1 +} + +// NewOffsetForLeaderEpochResponseTopicPartition returns a default OffsetForLeaderEpochResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetForLeaderEpochResponseTopicPartition() OffsetForLeaderEpochResponseTopicPartition { + var v OffsetForLeaderEpochResponseTopicPartition + v.Default() + return v +} + +type OffsetForLeaderEpochResponseTopic struct { + // Topic is the topic this response corresponds to. + Topic string + + // Partitions are responses to partitions in a topic in the request. + Partitions []OffsetForLeaderEpochResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetForLeaderEpochResponseTopic. +func (v *OffsetForLeaderEpochResponseTopic) Default() { +} + +// NewOffsetForLeaderEpochResponseTopic returns a default OffsetForLeaderEpochResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetForLeaderEpochResponseTopic() OffsetForLeaderEpochResponseTopic { + var v OffsetForLeaderEpochResponseTopic + v.Default() + return v +} + +// OffsetForLeaderEpochResponse is returned from an OffsetForLeaderEpochRequest. +type OffsetForLeaderEpochResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 // v2+ + + // Topics are responses to topics in the request. + Topics []OffsetForLeaderEpochResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +func (*OffsetForLeaderEpochResponse) Key() int16 { return 23 } +func (*OffsetForLeaderEpochResponse) MaxVersion() int16 { return 4 } +func (v *OffsetForLeaderEpochResponse) SetVersion(version int16) { v.Version = version } +func (v *OffsetForLeaderEpochResponse) GetVersion() int16 { return v.Version } +func (v *OffsetForLeaderEpochResponse) IsFlexible() bool { return v.Version >= 4 } +func (v *OffsetForLeaderEpochResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 0 +} + +func (v *OffsetForLeaderEpochResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *OffsetForLeaderEpochResponse) RequestKind() Request { + return &OffsetForLeaderEpochRequest{Version: v.Version} +} + +func (v *OffsetForLeaderEpochResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + if version >= 2 { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + if version >= 1 { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.EndOffset + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *OffsetForLeaderEpochResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *OffsetForLeaderEpochResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *OffsetForLeaderEpochResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + s := v + if version >= 2 { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetForLeaderEpochResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetForLeaderEpochResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Int32() + s.Partition = v + } + if version >= 1 { + v := b.Int32() + s.LeaderEpoch = v + } + { + v := b.Int64() + s.EndOffset = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrOffsetForLeaderEpochResponse returns a pointer to a default OffsetForLeaderEpochResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrOffsetForLeaderEpochResponse() *OffsetForLeaderEpochResponse { + var v OffsetForLeaderEpochResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetForLeaderEpochResponse. +func (v *OffsetForLeaderEpochResponse) Default() { +} + +// NewOffsetForLeaderEpochResponse returns a default OffsetForLeaderEpochResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetForLeaderEpochResponse() OffsetForLeaderEpochResponse { + var v OffsetForLeaderEpochResponse + v.Default() + return v +} + +type AddPartitionsToTxnRequestTopic struct { + // Topic is a topic name. + Topic string + + // Partitions are partitions within a topic to add as part of the producer + // side of a transaction. + Partitions []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AddPartitionsToTxnRequestTopic. +func (v *AddPartitionsToTxnRequestTopic) Default() { +} + +// NewAddPartitionsToTxnRequestTopic returns a default AddPartitionsToTxnRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewAddPartitionsToTxnRequestTopic() AddPartitionsToTxnRequestTopic { + var v AddPartitionsToTxnRequestTopic + v.Default() + return v +} + +type AddPartitionsToTxnRequestTransactionTopic struct { + Topic string + + Partitions []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AddPartitionsToTxnRequestTransactionTopic. +func (v *AddPartitionsToTxnRequestTransactionTopic) Default() { +} + +// NewAddPartitionsToTxnRequestTransactionTopic returns a default AddPartitionsToTxnRequestTransactionTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewAddPartitionsToTxnRequestTransactionTopic() AddPartitionsToTxnRequestTransactionTopic { + var v AddPartitionsToTxnRequestTransactionTopic + v.Default() + return v +} + +type AddPartitionsToTxnRequestTransaction struct { + TransactionalID string + + ProducerID int64 + + ProducerEpoch int16 + + // VerifyOnly signifies if we want to check if the partition is in the + // transaction rather than add it. + VerifyOnly bool + + Topics []AddPartitionsToTxnRequestTransactionTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AddPartitionsToTxnRequestTransaction. +func (v *AddPartitionsToTxnRequestTransaction) Default() { +} + +// NewAddPartitionsToTxnRequestTransaction returns a default AddPartitionsToTxnRequestTransaction +// This is a shortcut for creating a struct and calling Default yourself. +func NewAddPartitionsToTxnRequestTransaction() AddPartitionsToTxnRequestTransaction { + var v AddPartitionsToTxnRequestTransaction + v.Default() + return v +} + +// AddPartitionsToTxnRequest begins the producer side of a transaction for all +// partitions in the request. Before producing any records to a partition in +// the transaction, that partition must have been added to the transaction with +// this request. +// +// Versions 3 and below are exclusively used by clients and versions 4 and +// above are used by brokers. +// +// Version 4 adds VerifyOnly field to check if partitions are already in +// transaction and adds support to batch multiple transactions. +type AddPartitionsToTxnRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // TransactionalID is the transactional ID to use for this request. + TransactionalID string // v0-v3 + + // ProducerID is the producer ID of the client for this transactional ID + // as received from InitProducerID. + ProducerID int64 // v0-v3 + + // ProducerEpoch is the producer epoch of the client for this transactional ID + // as received from InitProducerID. + ProducerEpoch int16 // v0-v3 + + // Topics are topics to add as part of the producer side of a transaction. + Topics []AddPartitionsToTxnRequestTopic // v0-v3 + + // The list of transactions to add partitions to, for v4+, for brokers only. + // The fields in this are batch broker requests that duplicate the above fields + // and thus are undocumented (except VerifyOnly, which is new). + Transactions []AddPartitionsToTxnRequestTransaction // v4+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +func (*AddPartitionsToTxnRequest) Key() int16 { return 24 } +func (*AddPartitionsToTxnRequest) MaxVersion() int16 { return 5 } +func (v *AddPartitionsToTxnRequest) SetVersion(version int16) { v.Version = version } +func (v *AddPartitionsToTxnRequest) GetVersion() int16 { return v.Version } +func (v *AddPartitionsToTxnRequest) IsFlexible() bool { return v.Version >= 3 } +func (v *AddPartitionsToTxnRequest) IsTxnCoordinatorRequest() {} +func (v *AddPartitionsToTxnRequest) ResponseKind() Response { + r := &AddPartitionsToTxnResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *AddPartitionsToTxnRequest) RequestWith(ctx context.Context, r Requestor) (*AddPartitionsToTxnResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*AddPartitionsToTxnResponse) + return resp, err +} + +func (v *AddPartitionsToTxnRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + if version >= 0 && version <= 3 { + v := v.TransactionalID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 0 && version <= 3 { + v := v.ProducerID + dst = kbin.AppendInt64(dst, v) + } + if version >= 0 && version <= 3 { + v := v.ProducerEpoch + dst = kbin.AppendInt16(dst, v) + } + if version >= 0 && version <= 3 { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 4 { + v := v.Transactions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.TransactionalID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ProducerID + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ProducerEpoch + dst = kbin.AppendInt16(dst, v) + } + { + v := v.VerifyOnly + dst = kbin.AppendBool(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AddPartitionsToTxnRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AddPartitionsToTxnRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AddPartitionsToTxnRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + s := v + if version >= 0 && version <= 3 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.TransactionalID = v + } + if version >= 0 && version <= 3 { + v := b.Int64() + s.ProducerID = v + } + if version >= 0 && version <= 3 { + v := b.Int16() + s.ProducerEpoch = v + } + if version >= 0 && version <= 3 { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AddPartitionsToTxnRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if version >= 4 { + v := s.Transactions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AddPartitionsToTxnRequestTransaction, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.TransactionalID = v + } + { + v := b.Int64() + s.ProducerID = v + } + { + v := b.Int16() + s.ProducerEpoch = v + } + { + v := b.Bool() + s.VerifyOnly = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AddPartitionsToTxnRequestTransactionTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Transactions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAddPartitionsToTxnRequest returns a pointer to a default AddPartitionsToTxnRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAddPartitionsToTxnRequest() *AddPartitionsToTxnRequest { + var v AddPartitionsToTxnRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AddPartitionsToTxnRequest. +func (v *AddPartitionsToTxnRequest) Default() { +} + +// NewAddPartitionsToTxnRequest returns a default AddPartitionsToTxnRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewAddPartitionsToTxnRequest() AddPartitionsToTxnRequest { + var v AddPartitionsToTxnRequest + v.Default() + return v +} + +type AddPartitionsToTxnResponseTransactionTopicPartition struct { + Partition int32 + + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AddPartitionsToTxnResponseTransactionTopicPartition. +func (v *AddPartitionsToTxnResponseTransactionTopicPartition) Default() { +} + +// NewAddPartitionsToTxnResponseTransactionTopicPartition returns a default AddPartitionsToTxnResponseTransactionTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewAddPartitionsToTxnResponseTransactionTopicPartition() AddPartitionsToTxnResponseTransactionTopicPartition { + var v AddPartitionsToTxnResponseTransactionTopicPartition + v.Default() + return v +} + +type AddPartitionsToTxnResponseTransactionTopic struct { + Topic string + + Partitions []AddPartitionsToTxnResponseTransactionTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AddPartitionsToTxnResponseTransactionTopic. +func (v *AddPartitionsToTxnResponseTransactionTopic) Default() { +} + +// NewAddPartitionsToTxnResponseTransactionTopic returns a default AddPartitionsToTxnResponseTransactionTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewAddPartitionsToTxnResponseTransactionTopic() AddPartitionsToTxnResponseTransactionTopic { + var v AddPartitionsToTxnResponseTransactionTopic + v.Default() + return v +} + +type AddPartitionsToTxnResponseTransaction struct { + // The transactional id corresponding to the transaction. + TransactionalID string + + Topics []AddPartitionsToTxnResponseTransactionTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AddPartitionsToTxnResponseTransaction. +func (v *AddPartitionsToTxnResponseTransaction) Default() { +} + +// NewAddPartitionsToTxnResponseTransaction returns a default AddPartitionsToTxnResponseTransaction +// This is a shortcut for creating a struct and calling Default yourself. +func NewAddPartitionsToTxnResponseTransaction() AddPartitionsToTxnResponseTransaction { + var v AddPartitionsToTxnResponseTransaction + v.Default() + return v +} + +type AddPartitionsToTxnResponseTopicPartition struct { + // Partition is a partition being responded to. + Partition int32 + + // ErrorCode is any error for this topic/partition commit. + // + // TRANSACTIONAL_ID_AUTHORIZATION_FAILED is returned if the client is + // not authorized for write with transactional IDs with the requested + // transactional ID. + // + // TOPIC_AUTHORIZATION_FAILED is returned for all topics that the client + // is not authorized to write to. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned for all topics or partitions + // that the broker does not know of. + // + // OPERATION_NOT_ATTEMPTED is returned if any of the above errors occur + // for all partitions that did not have the above errors. + // + // INVALID_REQUEST is returned if the transactional ID is invalid. + // + // COORDINATOR_LOAD_IN_PROGRESS is returned if the coordinator for this + // transactional ID is still loading. + // + // NOT_COORDINATOR is returned if the broker is not the coordinator for + // this transactional ID. + // + // INVALID_PRODUCER_ID_MAPPING is returned if the produce request used + // a producer ID that is not tied to the transactional ID (i.e., mismatch + // from what was returned from InitProducerID). + // + // INVALID_PRODUCER_EPOCH is returned if the requested epoch does not match + // the broker epoch for this transactional ID. + // + // CONCURRENT_TRANSACTIONS is returned if there is an ongoing transaction for + // this transactional ID, if the producer ID and epoch matches the broker's. + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AddPartitionsToTxnResponseTopicPartition. +func (v *AddPartitionsToTxnResponseTopicPartition) Default() { +} + +// NewAddPartitionsToTxnResponseTopicPartition returns a default AddPartitionsToTxnResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewAddPartitionsToTxnResponseTopicPartition() AddPartitionsToTxnResponseTopicPartition { + var v AddPartitionsToTxnResponseTopicPartition + v.Default() + return v +} + +type AddPartitionsToTxnResponseTopic struct { + // Topic is a topic being responded to. + Topic string + + // Partitions are responses to partitions in the request. + Partitions []AddPartitionsToTxnResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AddPartitionsToTxnResponseTopic. +func (v *AddPartitionsToTxnResponseTopic) Default() { +} + +// NewAddPartitionsToTxnResponseTopic returns a default AddPartitionsToTxnResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewAddPartitionsToTxnResponseTopic() AddPartitionsToTxnResponseTopic { + var v AddPartitionsToTxnResponseTopic + v.Default() + return v +} + +// AddPartitionsToTxnResponse is a response to an AddPartitionsToTxnRequest. +type AddPartitionsToTxnResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // The response top level error code. + ErrorCode int16 // v4+ + + // Results categorized by transactional ID, v4+ only, for brokers only. + // The fields duplicate v3 and below fields (except TransactionalID) and + // are left undocumented. + Transactions []AddPartitionsToTxnResponseTransaction // v4+ + + // Topics are responses to topics in the request. + Topics []AddPartitionsToTxnResponseTopic // v0-v3 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +func (*AddPartitionsToTxnResponse) Key() int16 { return 24 } +func (*AddPartitionsToTxnResponse) MaxVersion() int16 { return 5 } +func (v *AddPartitionsToTxnResponse) SetVersion(version int16) { v.Version = version } +func (v *AddPartitionsToTxnResponse) GetVersion() int16 { return v.Version } +func (v *AddPartitionsToTxnResponse) IsFlexible() bool { return v.Version >= 3 } +func (v *AddPartitionsToTxnResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 1 +} + +func (v *AddPartitionsToTxnResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *AddPartitionsToTxnResponse) RequestKind() Request { + return &AddPartitionsToTxnRequest{Version: v.Version} +} + +func (v *AddPartitionsToTxnResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + if version >= 4 { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if version >= 4 { + v := v.Transactions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.TransactionalID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 0 && version <= 3 { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AddPartitionsToTxnResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AddPartitionsToTxnResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AddPartitionsToTxnResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + if version >= 4 { + v := b.Int16() + s.ErrorCode = v + } + if version >= 4 { + v := s.Transactions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AddPartitionsToTxnResponseTransaction, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.TransactionalID = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AddPartitionsToTxnResponseTransactionTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AddPartitionsToTxnResponseTransactionTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Transactions = v + } + if version >= 0 && version <= 3 { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AddPartitionsToTxnResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AddPartitionsToTxnResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAddPartitionsToTxnResponse returns a pointer to a default AddPartitionsToTxnResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAddPartitionsToTxnResponse() *AddPartitionsToTxnResponse { + var v AddPartitionsToTxnResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AddPartitionsToTxnResponse. +func (v *AddPartitionsToTxnResponse) Default() { +} + +// NewAddPartitionsToTxnResponse returns a default AddPartitionsToTxnResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewAddPartitionsToTxnResponse() AddPartitionsToTxnResponse { + var v AddPartitionsToTxnResponse + v.Default() + return v +} + +// AddOffsetsToTxnRequest is a request that ties produced records to what group +// is being consumed for the transaction. +// +// This request must be called before TxnOffsetCommitRequest. +// +// Internally, this request simply adds the __consumer_offsets topic as a +// partition for this transaction with AddPartitionsToTxn for the partition +// in that topic that contains the group. +type AddOffsetsToTxnRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // TransactionalID is the transactional ID to use for this request. + TransactionalID string + + // ProducerID is the producer ID of the client for this transactional ID + // as received from InitProducerID. + ProducerID int64 + + // ProducerEpoch is the producer epoch of the client for this transactional ID + // as received from InitProducerID. + ProducerEpoch int16 + + // Group is the group to tie this transaction to. + Group string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +func (*AddOffsetsToTxnRequest) Key() int16 { return 25 } +func (*AddOffsetsToTxnRequest) MaxVersion() int16 { return 4 } +func (v *AddOffsetsToTxnRequest) SetVersion(version int16) { v.Version = version } +func (v *AddOffsetsToTxnRequest) GetVersion() int16 { return v.Version } +func (v *AddOffsetsToTxnRequest) IsFlexible() bool { return v.Version >= 3 } +func (v *AddOffsetsToTxnRequest) IsTxnCoordinatorRequest() {} +func (v *AddOffsetsToTxnRequest) ResponseKind() Response { + r := &AddOffsetsToTxnResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *AddOffsetsToTxnRequest) RequestWith(ctx context.Context, r Requestor) (*AddOffsetsToTxnResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*AddOffsetsToTxnResponse) + return resp, err +} + +func (v *AddOffsetsToTxnRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + { + v := v.TransactionalID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ProducerID + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ProducerEpoch + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Group + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AddOffsetsToTxnRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AddOffsetsToTxnRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AddOffsetsToTxnRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.TransactionalID = v + } + { + v := b.Int64() + s.ProducerID = v + } + { + v := b.Int16() + s.ProducerEpoch = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Group = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAddOffsetsToTxnRequest returns a pointer to a default AddOffsetsToTxnRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAddOffsetsToTxnRequest() *AddOffsetsToTxnRequest { + var v AddOffsetsToTxnRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AddOffsetsToTxnRequest. +func (v *AddOffsetsToTxnRequest) Default() { +} + +// NewAddOffsetsToTxnRequest returns a default AddOffsetsToTxnRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewAddOffsetsToTxnRequest() AddOffsetsToTxnRequest { + var v AddOffsetsToTxnRequest + v.Default() + return v +} + +// AddOffsetsToTxnResponse is a response to an AddOffsetsToTxnRequest. +type AddOffsetsToTxnResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // ErrorCode is any error for this topic/partition commit. + // + // TRANSACTIONAL_ID_AUTHORIZATION_FAILED is returned if the client is + // not authorized for write with transactional IDs with the requested + // transactional ID. + // + // GROUP_AUTHORIZATION_FAILED is returned if the client is not authorized + // to read group with the requested group id. + // + // This also can return any error that AddPartitionsToTxn returns. + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +func (*AddOffsetsToTxnResponse) Key() int16 { return 25 } +func (*AddOffsetsToTxnResponse) MaxVersion() int16 { return 4 } +func (v *AddOffsetsToTxnResponse) SetVersion(version int16) { v.Version = version } +func (v *AddOffsetsToTxnResponse) GetVersion() int16 { return v.Version } +func (v *AddOffsetsToTxnResponse) IsFlexible() bool { return v.Version >= 3 } +func (v *AddOffsetsToTxnResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 1 } +func (v *AddOffsetsToTxnResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *AddOffsetsToTxnResponse) RequestKind() Request { + return &AddOffsetsToTxnRequest{Version: v.Version} +} + +func (v *AddOffsetsToTxnResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AddOffsetsToTxnResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AddOffsetsToTxnResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AddOffsetsToTxnResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAddOffsetsToTxnResponse returns a pointer to a default AddOffsetsToTxnResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAddOffsetsToTxnResponse() *AddOffsetsToTxnResponse { + var v AddOffsetsToTxnResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AddOffsetsToTxnResponse. +func (v *AddOffsetsToTxnResponse) Default() { +} + +// NewAddOffsetsToTxnResponse returns a default AddOffsetsToTxnResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewAddOffsetsToTxnResponse() AddOffsetsToTxnResponse { + var v AddOffsetsToTxnResponse + v.Default() + return v +} + +// EndTxnRequest ends a transaction. This should be called after +// TxnOffsetCommitRequest. +type EndTxnRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // TransactionalID is the transactional ID to use for this request. + TransactionalID string + + // ProducerID is the producer ID of the client for this transactional ID + // as received from InitProducerID. + ProducerID int64 + + // ProducerEpoch is the producer epoch of the client for this transactional ID + // as received from InitProducerID. + ProducerEpoch int16 + + // Commit is whether to commit this transaction: true for yes, false for abort. + Commit bool + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +func (*EndTxnRequest) Key() int16 { return 26 } +func (*EndTxnRequest) MaxVersion() int16 { return 4 } +func (v *EndTxnRequest) SetVersion(version int16) { v.Version = version } +func (v *EndTxnRequest) GetVersion() int16 { return v.Version } +func (v *EndTxnRequest) IsFlexible() bool { return v.Version >= 3 } +func (v *EndTxnRequest) IsTxnCoordinatorRequest() {} +func (v *EndTxnRequest) ResponseKind() Response { + r := &EndTxnResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *EndTxnRequest) RequestWith(ctx context.Context, r Requestor) (*EndTxnResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*EndTxnResponse) + return resp, err +} + +func (v *EndTxnRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + { + v := v.TransactionalID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ProducerID + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ProducerEpoch + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Commit + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *EndTxnRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *EndTxnRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *EndTxnRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.TransactionalID = v + } + { + v := b.Int64() + s.ProducerID = v + } + { + v := b.Int16() + s.ProducerEpoch = v + } + { + v := b.Bool() + s.Commit = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrEndTxnRequest returns a pointer to a default EndTxnRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrEndTxnRequest() *EndTxnRequest { + var v EndTxnRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to EndTxnRequest. +func (v *EndTxnRequest) Default() { +} + +// NewEndTxnRequest returns a default EndTxnRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewEndTxnRequest() EndTxnRequest { + var v EndTxnRequest + v.Default() + return v +} + +// EndTxnResponse is a response for an EndTxnRequest. +type EndTxnResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // ErrorCode is any error for this topic/partition commit. + // + // TRANSACTIONAL_ID_AUTHORIZATION_FAILED is returned if the client is + // not authorized for write with transactional IDs with the requested + // transactional ID. + // + // INVALID_REQUEST is returned if the transactional ID is invalid. + // + // INVALID_PRODUCER_ID_MAPPING is returned if the produce request used + // a producer ID that is not tied to the transactional ID (i.e., mismatch + // from what was returned from InitProducerID). + // + // INVALID_PRODUCER_EPOCH is returned if the requested epoch does not match + // the broker epoch for this transactional ID. + // + // CONCURRENT_TRANSACTIONS is returned if there is an ongoing transaction for + // this transactional ID, if the producer ID and epoch matches the broker's. + // + // INVALID_TXN_STATE is returned if this request is attempted at the wrong + // time (given the order of how transaction requests should go). + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +func (*EndTxnResponse) Key() int16 { return 26 } +func (*EndTxnResponse) MaxVersion() int16 { return 4 } +func (v *EndTxnResponse) SetVersion(version int16) { v.Version = version } +func (v *EndTxnResponse) GetVersion() int16 { return v.Version } +func (v *EndTxnResponse) IsFlexible() bool { return v.Version >= 3 } +func (v *EndTxnResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 1 } +func (v *EndTxnResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *EndTxnResponse) RequestKind() Request { return &EndTxnRequest{Version: v.Version} } + +func (v *EndTxnResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *EndTxnResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *EndTxnResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *EndTxnResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrEndTxnResponse returns a pointer to a default EndTxnResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrEndTxnResponse() *EndTxnResponse { + var v EndTxnResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to EndTxnResponse. +func (v *EndTxnResponse) Default() { +} + +// NewEndTxnResponse returns a default EndTxnResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewEndTxnResponse() EndTxnResponse { + var v EndTxnResponse + v.Default() + return v +} + +type WriteTxnMarkersRequestMarkerTopic struct { + // Topic is the name of the topic to write markers for. + Topic string + + // Partitions contains partitions to write markers for. + Partitions []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to WriteTxnMarkersRequestMarkerTopic. +func (v *WriteTxnMarkersRequestMarkerTopic) Default() { +} + +// NewWriteTxnMarkersRequestMarkerTopic returns a default WriteTxnMarkersRequestMarkerTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewWriteTxnMarkersRequestMarkerTopic() WriteTxnMarkersRequestMarkerTopic { + var v WriteTxnMarkersRequestMarkerTopic + v.Default() + return v +} + +type WriteTxnMarkersRequestMarker struct { + // ProducerID is the current producer ID to use when writing a marker. + ProducerID int64 + + // ProducerEpoch is the current producer epoch to use when writing a + // marker. + ProducerEpoch int16 + + // Committed is true if this marker is for a committed transaction, + // otherwise false if this is for an aborted transaction. + Committed bool + + // Topics contains the topics we are writing markers for. + Topics []WriteTxnMarkersRequestMarkerTopic + + // CoordinatorEpoch is the current epoch of the transaction coordinator we + // are writing a marker to. This is used to detect fenced writers. + CoordinatorEpoch int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to WriteTxnMarkersRequestMarker. +func (v *WriteTxnMarkersRequestMarker) Default() { +} + +// NewWriteTxnMarkersRequestMarker returns a default WriteTxnMarkersRequestMarker +// This is a shortcut for creating a struct and calling Default yourself. +func NewWriteTxnMarkersRequestMarker() WriteTxnMarkersRequestMarker { + var v WriteTxnMarkersRequestMarker + v.Default() + return v +} + +// WriteTxnMarkersRequest is a broker-to-broker request that Kafka uses to +// finish transactions. +type WriteTxnMarkersRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Markers contains transactional markers to be written. + Markers []WriteTxnMarkersRequestMarker + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +func (*WriteTxnMarkersRequest) Key() int16 { return 27 } +func (*WriteTxnMarkersRequest) MaxVersion() int16 { return 1 } +func (v *WriteTxnMarkersRequest) SetVersion(version int16) { v.Version = version } +func (v *WriteTxnMarkersRequest) GetVersion() int16 { return v.Version } +func (v *WriteTxnMarkersRequest) IsFlexible() bool { return v.Version >= 1 } +func (v *WriteTxnMarkersRequest) ResponseKind() Response { + r := &WriteTxnMarkersResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *WriteTxnMarkersRequest) RequestWith(ctx context.Context, r Requestor) (*WriteTxnMarkersResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*WriteTxnMarkersResponse) + return resp, err +} + +func (v *WriteTxnMarkersRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 1 + _ = isFlexible + { + v := v.Markers + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ProducerID + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ProducerEpoch + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Committed + dst = kbin.AppendBool(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.CoordinatorEpoch + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *WriteTxnMarkersRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *WriteTxnMarkersRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *WriteTxnMarkersRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 1 + _ = isFlexible + s := v + { + v := s.Markers + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]WriteTxnMarkersRequestMarker, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int64() + s.ProducerID = v + } + { + v := b.Int16() + s.ProducerEpoch = v + } + { + v := b.Bool() + s.Committed = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]WriteTxnMarkersRequestMarkerTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + { + v := b.Int32() + s.CoordinatorEpoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Markers = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrWriteTxnMarkersRequest returns a pointer to a default WriteTxnMarkersRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrWriteTxnMarkersRequest() *WriteTxnMarkersRequest { + var v WriteTxnMarkersRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to WriteTxnMarkersRequest. +func (v *WriteTxnMarkersRequest) Default() { +} + +// NewWriteTxnMarkersRequest returns a default WriteTxnMarkersRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewWriteTxnMarkersRequest() WriteTxnMarkersRequest { + var v WriteTxnMarkersRequest + v.Default() + return v +} + +type WriteTxnMarkersResponseMarkerTopicPartition struct { + // Partition is the partition this result is for. + Partition int32 + + // ErrorCode is non-nil if writing the transansactional marker for this + // partition errored. + // + // CLUSTER_AUTHORIZATION_FAILED is returned if the user does not have + // CLUSTER_ACTION on CLUSTER. + // + // NOT_LEADER_OR_FOLLOWER is returned if the broker receiving this + // request is not the leader of the partition. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if the topic or partition is + // not known to exist. + // + // INVALID_PRODUCER_EPOCH is returned if the cluster epoch is provided + // and the provided epoch does not match. + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to WriteTxnMarkersResponseMarkerTopicPartition. +func (v *WriteTxnMarkersResponseMarkerTopicPartition) Default() { +} + +// NewWriteTxnMarkersResponseMarkerTopicPartition returns a default WriteTxnMarkersResponseMarkerTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewWriteTxnMarkersResponseMarkerTopicPartition() WriteTxnMarkersResponseMarkerTopicPartition { + var v WriteTxnMarkersResponseMarkerTopicPartition + v.Default() + return v +} + +type WriteTxnMarkersResponseMarkerTopic struct { + // Topic is the topic these results are for. + Topic string + + // Partitions contains per-partition results for the write markers + // request. + Partitions []WriteTxnMarkersResponseMarkerTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to WriteTxnMarkersResponseMarkerTopic. +func (v *WriteTxnMarkersResponseMarkerTopic) Default() { +} + +// NewWriteTxnMarkersResponseMarkerTopic returns a default WriteTxnMarkersResponseMarkerTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewWriteTxnMarkersResponseMarkerTopic() WriteTxnMarkersResponseMarkerTopic { + var v WriteTxnMarkersResponseMarkerTopic + v.Default() + return v +} + +type WriteTxnMarkersResponseMarker struct { + // ProducerID is the producer ID these results are for (from the input + // request). + ProducerID int64 + + // Topics contains the results for the write markers request. + Topics []WriteTxnMarkersResponseMarkerTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to WriteTxnMarkersResponseMarker. +func (v *WriteTxnMarkersResponseMarker) Default() { +} + +// NewWriteTxnMarkersResponseMarker returns a default WriteTxnMarkersResponseMarker +// This is a shortcut for creating a struct and calling Default yourself. +func NewWriteTxnMarkersResponseMarker() WriteTxnMarkersResponseMarker { + var v WriteTxnMarkersResponseMarker + v.Default() + return v +} + +// WriteTxnMarkersResponse is a response to a WriteTxnMarkersRequest. +type WriteTxnMarkersResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Markers contains results for writing transactional markers. + Markers []WriteTxnMarkersResponseMarker + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +func (*WriteTxnMarkersResponse) Key() int16 { return 27 } +func (*WriteTxnMarkersResponse) MaxVersion() int16 { return 1 } +func (v *WriteTxnMarkersResponse) SetVersion(version int16) { v.Version = version } +func (v *WriteTxnMarkersResponse) GetVersion() int16 { return v.Version } +func (v *WriteTxnMarkersResponse) IsFlexible() bool { return v.Version >= 1 } +func (v *WriteTxnMarkersResponse) RequestKind() Request { + return &WriteTxnMarkersRequest{Version: v.Version} +} + +func (v *WriteTxnMarkersResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 1 + _ = isFlexible + { + v := v.Markers + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ProducerID + dst = kbin.AppendInt64(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *WriteTxnMarkersResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *WriteTxnMarkersResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *WriteTxnMarkersResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 1 + _ = isFlexible + s := v + { + v := s.Markers + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]WriteTxnMarkersResponseMarker, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int64() + s.ProducerID = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]WriteTxnMarkersResponseMarkerTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]WriteTxnMarkersResponseMarkerTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Markers = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrWriteTxnMarkersResponse returns a pointer to a default WriteTxnMarkersResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrWriteTxnMarkersResponse() *WriteTxnMarkersResponse { + var v WriteTxnMarkersResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to WriteTxnMarkersResponse. +func (v *WriteTxnMarkersResponse) Default() { +} + +// NewWriteTxnMarkersResponse returns a default WriteTxnMarkersResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewWriteTxnMarkersResponse() WriteTxnMarkersResponse { + var v WriteTxnMarkersResponse + v.Default() + return v +} + +type TxnOffsetCommitRequestTopicPartition struct { + // Partition is a partition to add for a pending commit. + Partition int32 + + // Offset is the offset within partition to commit once EndTxnRequest is + // called (with commit; abort obviously aborts). + Offset int64 + + // LeaderEpoch, proposed in KIP-320 and introduced in Kafka 2.1.0, + // allows brokers to check if the client is fenced (has an out of date + // leader) or is using an unknown leader. + // + // The initial leader epoch can be determined from a MetadataResponse. + // To skip log truncation checking, use -1. + // + // This field has a default of -1. + LeaderEpoch int32 // v2+ + + // Metadata is optional metadata the client wants to include with this + // commit. + Metadata *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to TxnOffsetCommitRequestTopicPartition. +func (v *TxnOffsetCommitRequestTopicPartition) Default() { + v.LeaderEpoch = -1 +} + +// NewTxnOffsetCommitRequestTopicPartition returns a default TxnOffsetCommitRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewTxnOffsetCommitRequestTopicPartition() TxnOffsetCommitRequestTopicPartition { + var v TxnOffsetCommitRequestTopicPartition + v.Default() + return v +} + +type TxnOffsetCommitRequestTopic struct { + // Topic is a topic to add for a pending commit. + Topic string + + // Partitions are partitions to add for pending commits. + Partitions []TxnOffsetCommitRequestTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to TxnOffsetCommitRequestTopic. +func (v *TxnOffsetCommitRequestTopic) Default() { +} + +// NewTxnOffsetCommitRequestTopic returns a default TxnOffsetCommitRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewTxnOffsetCommitRequestTopic() TxnOffsetCommitRequestTopic { + var v TxnOffsetCommitRequestTopic + v.Default() + return v +} + +// TxnOffsetCommitRequest sends offsets that are a part of this transaction +// to be committed once the transaction itself finishes. This effectively +// replaces OffsetCommitRequest for when using transactions. +type TxnOffsetCommitRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // TransactionalID is the transactional ID to use for this request. + TransactionalID string + + // Group is the group consumed in this transaction and to be used for + // committing. + Group string + + // ProducerID is the producer ID of the client for this transactional ID + // as received from InitProducerID. + ProducerID int64 + + // ProducerEpoch is the producer epoch of the client for this transactional ID + // as received from InitProducerID. + ProducerEpoch int16 + + // Generation is the group generation this transactional offset commit request is for. + // + // This field has a default of -1. + Generation int32 // v3+ + + // MemberID is the member ID this member is for. + MemberID string // v3+ + + // InstanceID is the instance ID of this member in the group (KIP-345, KIP-447). + InstanceID *string // v3+ + + // Topics are topics to add for pending commits. + Topics []TxnOffsetCommitRequestTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +func (*TxnOffsetCommitRequest) Key() int16 { return 28 } +func (*TxnOffsetCommitRequest) MaxVersion() int16 { return 4 } +func (v *TxnOffsetCommitRequest) SetVersion(version int16) { v.Version = version } +func (v *TxnOffsetCommitRequest) GetVersion() int16 { return v.Version } +func (v *TxnOffsetCommitRequest) IsFlexible() bool { return v.Version >= 3 } +func (v *TxnOffsetCommitRequest) IsGroupCoordinatorRequest() {} +func (v *TxnOffsetCommitRequest) ResponseKind() Response { + r := &TxnOffsetCommitResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *TxnOffsetCommitRequest) RequestWith(ctx context.Context, r Requestor) (*TxnOffsetCommitResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*TxnOffsetCommitResponse) + return resp, err +} + +func (v *TxnOffsetCommitRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + { + v := v.TransactionalID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Group + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ProducerID + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ProducerEpoch + dst = kbin.AppendInt16(dst, v) + } + if version >= 3 { + v := v.Generation + dst = kbin.AppendInt32(dst, v) + } + if version >= 3 { + v := v.MemberID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 3 { + v := v.InstanceID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Offset + dst = kbin.AppendInt64(dst, v) + } + if version >= 2 { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Metadata + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *TxnOffsetCommitRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *TxnOffsetCommitRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *TxnOffsetCommitRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.TransactionalID = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Group = v + } + { + v := b.Int64() + s.ProducerID = v + } + { + v := b.Int16() + s.ProducerEpoch = v + } + if version >= 3 { + v := b.Int32() + s.Generation = v + } + if version >= 3 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.MemberID = v + } + if version >= 3 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.InstanceID = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]TxnOffsetCommitRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]TxnOffsetCommitRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int64() + s.Offset = v + } + if version >= 2 { + v := b.Int32() + s.LeaderEpoch = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Metadata = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrTxnOffsetCommitRequest returns a pointer to a default TxnOffsetCommitRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrTxnOffsetCommitRequest() *TxnOffsetCommitRequest { + var v TxnOffsetCommitRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to TxnOffsetCommitRequest. +func (v *TxnOffsetCommitRequest) Default() { + v.Generation = -1 +} + +// NewTxnOffsetCommitRequest returns a default TxnOffsetCommitRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewTxnOffsetCommitRequest() TxnOffsetCommitRequest { + var v TxnOffsetCommitRequest + v.Default() + return v +} + +type TxnOffsetCommitResponseTopicPartition struct { + // Partition is the partition this response is for. + Partition int32 + + // ErrorCode is any error for this topic/partition commit. + // + // TRANSACTIONAL_ID_AUTHORIZATION_FAILED is returned if the client is + // not authorized for write with transactional IDs with the requested + // transactional ID. + // + // GROUP_AUTHORIZATION_FAILED is returned if the client is not authorized + // to read group with the requested group id. + // + // TOPIC_AUTHORIZATION_FAILED is returned for all topics that the client + // is not authorized to read. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned for all topics or partitions + // that the broker does not know of. + // + // INVALID_GROUP_ID is returned if the requested group does not exist. + // + // COORDINATOR_NOT_AVAILABLE is returned if the broker is not yet fully + // started or is shutting down, or if the group was just deleted or is + // migrating to another broker. + // + // COORDINATOR_LOAD_IN_PROGRESS is returned if the group is still loading. + // + // NOT_COORDINATOR is returned if the broker is not the coordinator for + // the group. + // + // FENCED_INSTANCE_ID is returned if the member is fenced (another newer + // transactional member is using the same instance ID). + // + // UNKNOWN_MEMBER_ID is returned if the consumer group does not know of + // this member. + // + // ILLEGAL_GENERATION is returned if the consumer group's generation is + // different than the requested generation. + // + // OFFSET_METADATA_TOO_LARGE is returned if the commit metadata is too + // large. + // + // REBALANCE_IN_PROGRESS is returned if the group is completing a rebalance. + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to TxnOffsetCommitResponseTopicPartition. +func (v *TxnOffsetCommitResponseTopicPartition) Default() { +} + +// NewTxnOffsetCommitResponseTopicPartition returns a default TxnOffsetCommitResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewTxnOffsetCommitResponseTopicPartition() TxnOffsetCommitResponseTopicPartition { + var v TxnOffsetCommitResponseTopicPartition + v.Default() + return v +} + +type TxnOffsetCommitResponseTopic struct { + // Topic is the topic this response is for. + Topic string + + // Partitions contains responses to the partitions in this topic. + Partitions []TxnOffsetCommitResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to TxnOffsetCommitResponseTopic. +func (v *TxnOffsetCommitResponseTopic) Default() { +} + +// NewTxnOffsetCommitResponseTopic returns a default TxnOffsetCommitResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewTxnOffsetCommitResponseTopic() TxnOffsetCommitResponseTopic { + var v TxnOffsetCommitResponseTopic + v.Default() + return v +} + +// TxnOffsetCommitResponse is a response to a TxnOffsetCommitRequest. +type TxnOffsetCommitResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // Topics contains responses to the topics in the request. + Topics []TxnOffsetCommitResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v3+ +} + +func (*TxnOffsetCommitResponse) Key() int16 { return 28 } +func (*TxnOffsetCommitResponse) MaxVersion() int16 { return 4 } +func (v *TxnOffsetCommitResponse) SetVersion(version int16) { v.Version = version } +func (v *TxnOffsetCommitResponse) GetVersion() int16 { return v.Version } +func (v *TxnOffsetCommitResponse) IsFlexible() bool { return v.Version >= 3 } +func (v *TxnOffsetCommitResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 1 } +func (v *TxnOffsetCommitResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *TxnOffsetCommitResponse) RequestKind() Request { + return &TxnOffsetCommitRequest{Version: v.Version} +} + +func (v *TxnOffsetCommitResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *TxnOffsetCommitResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *TxnOffsetCommitResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *TxnOffsetCommitResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 3 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]TxnOffsetCommitResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]TxnOffsetCommitResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrTxnOffsetCommitResponse returns a pointer to a default TxnOffsetCommitResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrTxnOffsetCommitResponse() *TxnOffsetCommitResponse { + var v TxnOffsetCommitResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to TxnOffsetCommitResponse. +func (v *TxnOffsetCommitResponse) Default() { +} + +// NewTxnOffsetCommitResponse returns a default TxnOffsetCommitResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewTxnOffsetCommitResponse() TxnOffsetCommitResponse { + var v TxnOffsetCommitResponse + v.Default() + return v +} + +// DescribeACLsRequest describes ACLs. Describing ACLs works on a filter basis: +// anything that matches the filter is described. Note that there are two +// "types" of filters in this request: the resource filter and the entry +// filter, with entries corresponding to users. The first three fields form the +// resource filter, the last four the entry filter. +type DescribeACLsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ResourceType is the type of resource to describe. + ResourceType ACLResourceType + + // ResourceName is the name to filter out. For the CLUSTER resource type, + // this must be "kafka-cluster". + ResourceName *string + + // ResourcePatternType is how ResourceName is understood. + // + // This field has a default of 3. + ResourcePatternType ACLResourcePatternType // v1+ + + // Principal is the user to filter for. In Kafka with the simple authorizor, + // all principals begin with "User:". Pluggable authorizors are allowed, but + // Kafka still expects principals to lead with a principal type ("User") and + // have a colon separating the principal name ("bob" in "User:bob"). + Principal *string + + // Host is a host to filter for. + Host *string + + // Operation is an operation to filter for. + // + // Note that READ, WRITE, DELETE, and ALTER imply DESCRIBE, and ALTER_CONFIGS + // implies DESCRIBE_CONFIGS. + Operation ACLOperation + + // PermissionType is the permission type to filter for. UNKNOWN is 0. + PermissionType ACLPermissionType + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*DescribeACLsRequest) Key() int16 { return 29 } +func (*DescribeACLsRequest) MaxVersion() int16 { return 3 } +func (v *DescribeACLsRequest) SetVersion(version int16) { v.Version = version } +func (v *DescribeACLsRequest) GetVersion() int16 { return v.Version } +func (v *DescribeACLsRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *DescribeACLsRequest) ResponseKind() Response { + r := &DescribeACLsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *DescribeACLsRequest) RequestWith(ctx context.Context, r Requestor) (*DescribeACLsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*DescribeACLsResponse) + return resp, err +} + +func (v *DescribeACLsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ResourceType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.ResourceName + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if version >= 1 { + v := v.ResourcePatternType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.Principal + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Host + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Operation + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.PermissionType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeACLsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeACLsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeACLsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + var t ACLResourceType + { + v := b.Int8() + t = ACLResourceType(v) + } + v := t + s.ResourceType = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ResourceName = v + } + if version >= 1 { + var t ACLResourcePatternType + { + v := b.Int8() + t = ACLResourcePatternType(v) + } + v := t + s.ResourcePatternType = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Principal = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Host = v + } + { + var t ACLOperation + { + v := b.Int8() + t = ACLOperation(v) + } + v := t + s.Operation = v + } + { + var t ACLPermissionType + { + v := b.Int8() + t = ACLPermissionType(v) + } + v := t + s.PermissionType = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeACLsRequest returns a pointer to a default DescribeACLsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeACLsRequest() *DescribeACLsRequest { + var v DescribeACLsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeACLsRequest. +func (v *DescribeACLsRequest) Default() { + v.ResourcePatternType = 3 +} + +// NewDescribeACLsRequest returns a default DescribeACLsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeACLsRequest() DescribeACLsRequest { + var v DescribeACLsRequest + v.Default() + return v +} + +type DescribeACLsResponseResourceACL struct { + // Principal is who this ACL applies to. + Principal string + + // Host is on which host this ACL applies. + Host string + + // Operation is the operation being described. + Operation ACLOperation + + // PermissionType is the permission being described. + PermissionType ACLPermissionType + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeACLsResponseResourceACL. +func (v *DescribeACLsResponseResourceACL) Default() { +} + +// NewDescribeACLsResponseResourceACL returns a default DescribeACLsResponseResourceACL +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeACLsResponseResourceACL() DescribeACLsResponseResourceACL { + var v DescribeACLsResponseResourceACL + v.Default() + return v +} + +type DescribeACLsResponseResource struct { + // ResourceType is the resource type being described. + ResourceType ACLResourceType + + // ResourceName is the resource name being described. + ResourceName string + + // ResourcePatternType is the pattern type being described. + // + // This field has a default of 3. + ResourcePatternType ACLResourcePatternType // v1+ + + // ACLs contains users / entries being described. + ACLs []DescribeACLsResponseResourceACL + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeACLsResponseResource. +func (v *DescribeACLsResponseResource) Default() { + v.ResourcePatternType = 3 +} + +// NewDescribeACLsResponseResource returns a default DescribeACLsResponseResource +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeACLsResponseResource() DescribeACLsResponseResource { + var v DescribeACLsResponseResource + v.Default() + return v +} + +// DescribeACLsResponse is a response to a describe acls request. +type DescribeACLsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // ErrorCode is the error code returned on request failure. + // + // SECURITY_DISABLED is returned if there is no authorizer configured on the + // broker. + // + // There can be other authorization failures. + ErrorCode int16 + + // ErrorMessage is a message for an error. + ErrorMessage *string + + // Resources are the describe resources. + Resources []DescribeACLsResponseResource + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*DescribeACLsResponse) Key() int16 { return 29 } +func (*DescribeACLsResponse) MaxVersion() int16 { return 3 } +func (v *DescribeACLsResponse) SetVersion(version int16) { v.Version = version } +func (v *DescribeACLsResponse) GetVersion() int16 { return v.Version } +func (v *DescribeACLsResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *DescribeACLsResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 1 } +func (v *DescribeACLsResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *DescribeACLsResponse) RequestKind() Request { return &DescribeACLsRequest{Version: v.Version} } + +func (v *DescribeACLsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Resources + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ResourceType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.ResourceName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 1 { + v := v.ResourcePatternType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.ACLs + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Principal + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Host + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Operation + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.PermissionType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeACLsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeACLsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeACLsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + v := s.Resources + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeACLsResponseResource, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var t ACLResourceType + { + v := b.Int8() + t = ACLResourceType(v) + } + v := t + s.ResourceType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ResourceName = v + } + if version >= 1 { + var t ACLResourcePatternType + { + v := b.Int8() + t = ACLResourcePatternType(v) + } + v := t + s.ResourcePatternType = v + } + { + v := s.ACLs + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeACLsResponseResourceACL, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Principal = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Host = v + } + { + var t ACLOperation + { + v := b.Int8() + t = ACLOperation(v) + } + v := t + s.Operation = v + } + { + var t ACLPermissionType + { + v := b.Int8() + t = ACLPermissionType(v) + } + v := t + s.PermissionType = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.ACLs = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Resources = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeACLsResponse returns a pointer to a default DescribeACLsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeACLsResponse() *DescribeACLsResponse { + var v DescribeACLsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeACLsResponse. +func (v *DescribeACLsResponse) Default() { +} + +// NewDescribeACLsResponse returns a default DescribeACLsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeACLsResponse() DescribeACLsResponse { + var v DescribeACLsResponse + v.Default() + return v +} + +type CreateACLsRequestCreation struct { + // ResourceType is the type of resource this acl entry will be on. + // It is invalid to use UNKNOWN or ANY. + ResourceType ACLResourceType + + // ResourceName is the name of the resource this acl entry will be on. + // For CLUSTER, this must be "kafka-cluster". + ResourceName string + + // ResourcePatternType is the pattern type to use for the resource name. + // This cannot be UNKNOWN or MATCH (i.e. this must be LITERAL or PREFIXED). + // The default for pre-Kafka 2.0.0 is effectively LITERAL. + // + // This field has a default of 3. + ResourcePatternType ACLResourcePatternType // v1+ + + // Principal is the user to apply this acl for. With the Kafka simple + // authorizer, this must begin with "User:". + Principal string + + // Host is the host address to use for this acl. Each host to allow + // the principal access from must be specified as a new creation. KIP-252 + // might solve this someday. The special wildcard host "*" allows all hosts. + Host string + + // Operation is the operation this acl is for. This must not be UNKNOWN or + // ANY. + Operation ACLOperation + + // PermissionType is the permission of this acl. This must be either ALLOW + // or DENY. + PermissionType ACLPermissionType + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreateACLsRequestCreation. +func (v *CreateACLsRequestCreation) Default() { + v.ResourcePatternType = 3 +} + +// NewCreateACLsRequestCreation returns a default CreateACLsRequestCreation +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreateACLsRequestCreation() CreateACLsRequestCreation { + var v CreateACLsRequestCreation + v.Default() + return v +} + +// CreateACLsRequest creates acls. Creating acls can be done as a batch; each +// "creation" will be an acl entry. +// +// See the DescribeACLsRequest documentation for more descriptions of what +// valid values for the fields in this request are. +type CreateACLsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + Creations []CreateACLsRequestCreation + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*CreateACLsRequest) Key() int16 { return 30 } +func (*CreateACLsRequest) MaxVersion() int16 { return 3 } +func (v *CreateACLsRequest) SetVersion(version int16) { v.Version = version } +func (v *CreateACLsRequest) GetVersion() int16 { return v.Version } +func (v *CreateACLsRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *CreateACLsRequest) ResponseKind() Response { + r := &CreateACLsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *CreateACLsRequest) RequestWith(ctx context.Context, r Requestor) (*CreateACLsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*CreateACLsResponse) + return resp, err +} + +func (v *CreateACLsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.Creations + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ResourceType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.ResourceName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 1 { + v := v.ResourcePatternType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.Principal + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Host + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Operation + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.PermissionType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *CreateACLsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *CreateACLsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *CreateACLsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := s.Creations + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]CreateACLsRequestCreation, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var t ACLResourceType + { + v := b.Int8() + t = ACLResourceType(v) + } + v := t + s.ResourceType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ResourceName = v + } + if version >= 1 { + var t ACLResourcePatternType + { + v := b.Int8() + t = ACLResourcePatternType(v) + } + v := t + s.ResourcePatternType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Principal = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Host = v + } + { + var t ACLOperation + { + v := b.Int8() + t = ACLOperation(v) + } + v := t + s.Operation = v + } + { + var t ACLPermissionType + { + v := b.Int8() + t = ACLPermissionType(v) + } + v := t + s.PermissionType = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Creations = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrCreateACLsRequest returns a pointer to a default CreateACLsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrCreateACLsRequest() *CreateACLsRequest { + var v CreateACLsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreateACLsRequest. +func (v *CreateACLsRequest) Default() { +} + +// NewCreateACLsRequest returns a default CreateACLsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreateACLsRequest() CreateACLsRequest { + var v CreateACLsRequest + v.Default() + return v +} + +type CreateACLsResponseResult struct { + // ErrorCode is an error for this particular creation (index wise). + ErrorCode int16 + + // ErrorMessage is a message for this error. + ErrorMessage *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreateACLsResponseResult. +func (v *CreateACLsResponseResult) Default() { +} + +// NewCreateACLsResponseResult returns a default CreateACLsResponseResult +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreateACLsResponseResult() CreateACLsResponseResult { + var v CreateACLsResponseResult + v.Default() + return v +} + +// CreateACLsResponse is a response for a CreateACLsRequest. +type CreateACLsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // Results contains responses to each creation request. + Results []CreateACLsResponseResult + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*CreateACLsResponse) Key() int16 { return 30 } +func (*CreateACLsResponse) MaxVersion() int16 { return 3 } +func (v *CreateACLsResponse) SetVersion(version int16) { v.Version = version } +func (v *CreateACLsResponse) GetVersion() int16 { return v.Version } +func (v *CreateACLsResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *CreateACLsResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 1 } +func (v *CreateACLsResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *CreateACLsResponse) RequestKind() Request { return &CreateACLsRequest{Version: v.Version} } + +func (v *CreateACLsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Results + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *CreateACLsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *CreateACLsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *CreateACLsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Results + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]CreateACLsResponseResult, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Results = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrCreateACLsResponse returns a pointer to a default CreateACLsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrCreateACLsResponse() *CreateACLsResponse { + var v CreateACLsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreateACLsResponse. +func (v *CreateACLsResponse) Default() { +} + +// NewCreateACLsResponse returns a default CreateACLsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreateACLsResponse() CreateACLsResponse { + var v CreateACLsResponse + v.Default() + return v +} + +type DeleteACLsRequestFilter struct { + ResourceType ACLResourceType + + ResourceName *string + + // This field has a default of 3. + ResourcePatternType ACLResourcePatternType // v1+ + + Principal *string + + Host *string + + Operation ACLOperation + + PermissionType ACLPermissionType + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteACLsRequestFilter. +func (v *DeleteACLsRequestFilter) Default() { + v.ResourcePatternType = 3 +} + +// NewDeleteACLsRequestFilter returns a default DeleteACLsRequestFilter +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteACLsRequestFilter() DeleteACLsRequestFilter { + var v DeleteACLsRequestFilter + v.Default() + return v +} + +// DeleteACLsRequest deletes acls. This request works on filters the same way +// that DescribeACLsRequest does. See DescribeACLsRequest for documentation of +// the fields. +type DeleteACLsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Filters are filters for acls to delete. + Filters []DeleteACLsRequestFilter + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*DeleteACLsRequest) Key() int16 { return 31 } +func (*DeleteACLsRequest) MaxVersion() int16 { return 3 } +func (v *DeleteACLsRequest) SetVersion(version int16) { v.Version = version } +func (v *DeleteACLsRequest) GetVersion() int16 { return v.Version } +func (v *DeleteACLsRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *DeleteACLsRequest) ResponseKind() Response { + r := &DeleteACLsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *DeleteACLsRequest) RequestWith(ctx context.Context, r Requestor) (*DeleteACLsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*DeleteACLsResponse) + return resp, err +} + +func (v *DeleteACLsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.Filters + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ResourceType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.ResourceName + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if version >= 1 { + v := v.ResourcePatternType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.Principal + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Host + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Operation + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.PermissionType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DeleteACLsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DeleteACLsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DeleteACLsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := s.Filters + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DeleteACLsRequestFilter, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var t ACLResourceType + { + v := b.Int8() + t = ACLResourceType(v) + } + v := t + s.ResourceType = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ResourceName = v + } + if version >= 1 { + var t ACLResourcePatternType + { + v := b.Int8() + t = ACLResourcePatternType(v) + } + v := t + s.ResourcePatternType = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Principal = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Host = v + } + { + var t ACLOperation + { + v := b.Int8() + t = ACLOperation(v) + } + v := t + s.Operation = v + } + { + var t ACLPermissionType + { + v := b.Int8() + t = ACLPermissionType(v) + } + v := t + s.PermissionType = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Filters = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDeleteACLsRequest returns a pointer to a default DeleteACLsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDeleteACLsRequest() *DeleteACLsRequest { + var v DeleteACLsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteACLsRequest. +func (v *DeleteACLsRequest) Default() { +} + +// NewDeleteACLsRequest returns a default DeleteACLsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteACLsRequest() DeleteACLsRequest { + var v DeleteACLsRequest + v.Default() + return v +} + +type DeleteACLsResponseResultMatchingACL struct { + // ErrorCode contains an error for this individual acl for this filter. + ErrorCode int16 + + // ErrorMessage is a message for this error. + ErrorMessage *string + + ResourceType ACLResourceType + + ResourceName string + + // This field has a default of 3. + ResourcePatternType ACLResourcePatternType // v1+ + + Principal string + + Host string + + Operation ACLOperation + + PermissionType ACLPermissionType + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteACLsResponseResultMatchingACL. +func (v *DeleteACLsResponseResultMatchingACL) Default() { + v.ResourcePatternType = 3 +} + +// NewDeleteACLsResponseResultMatchingACL returns a default DeleteACLsResponseResultMatchingACL +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteACLsResponseResultMatchingACL() DeleteACLsResponseResultMatchingACL { + var v DeleteACLsResponseResultMatchingACL + v.Default() + return v +} + +type DeleteACLsResponseResult struct { + // ErrorCode is the overall error code for this individual filter. + ErrorCode int16 + + // ErrorMessage is a message for this error. + ErrorMessage *string + + // MatchingACLs contains all acls that were matched for this filter. + MatchingACLs []DeleteACLsResponseResultMatchingACL + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteACLsResponseResult. +func (v *DeleteACLsResponseResult) Default() { +} + +// NewDeleteACLsResponseResult returns a default DeleteACLsResponseResult +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteACLsResponseResult() DeleteACLsResponseResult { + var v DeleteACLsResponseResult + v.Default() + return v +} + +// DeleteACLsResponse is a response for a DeleteACLsRequest. +type DeleteACLsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // Results contains a response to each requested filter. + Results []DeleteACLsResponseResult + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*DeleteACLsResponse) Key() int16 { return 31 } +func (*DeleteACLsResponse) MaxVersion() int16 { return 3 } +func (v *DeleteACLsResponse) SetVersion(version int16) { v.Version = version } +func (v *DeleteACLsResponse) GetVersion() int16 { return v.Version } +func (v *DeleteACLsResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *DeleteACLsResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 1 } +func (v *DeleteACLsResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *DeleteACLsResponse) RequestKind() Request { return &DeleteACLsRequest{Version: v.Version} } + +func (v *DeleteACLsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Results + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.MatchingACLs + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.ResourceType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.ResourceName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 1 { + v := v.ResourcePatternType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.Principal + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Host + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Operation + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.PermissionType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DeleteACLsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DeleteACLsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DeleteACLsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Results + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DeleteACLsResponseResult, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + v := s.MatchingACLs + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DeleteACLsResponseResultMatchingACL, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + var t ACLResourceType + { + v := b.Int8() + t = ACLResourceType(v) + } + v := t + s.ResourceType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ResourceName = v + } + if version >= 1 { + var t ACLResourcePatternType + { + v := b.Int8() + t = ACLResourcePatternType(v) + } + v := t + s.ResourcePatternType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Principal = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Host = v + } + { + var t ACLOperation + { + v := b.Int8() + t = ACLOperation(v) + } + v := t + s.Operation = v + } + { + var t ACLPermissionType + { + v := b.Int8() + t = ACLPermissionType(v) + } + v := t + s.PermissionType = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.MatchingACLs = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Results = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDeleteACLsResponse returns a pointer to a default DeleteACLsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDeleteACLsResponse() *DeleteACLsResponse { + var v DeleteACLsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteACLsResponse. +func (v *DeleteACLsResponse) Default() { +} + +// NewDeleteACLsResponse returns a default DeleteACLsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteACLsResponse() DeleteACLsResponse { + var v DeleteACLsResponse + v.Default() + return v +} + +type DescribeConfigsRequestResource struct { + // ResourceType is an enum corresponding to the type of config to describe. + ResourceType ConfigResourceType + + // ResourceName is the name of config to describe. + // + // If the requested type is a topic, this corresponds to a topic name. + // + // If the requested type if a broker, this should either be empty or be + // the ID of the broker this request is issued to. If it is empty, this + // returns all broker configs, but only the dynamic configuration values. + // If a specific ID, this returns all broker config values. + ResourceName string + + // ConfigNames is a list of config entries to return. Null requests all. + ConfigNames []string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeConfigsRequestResource. +func (v *DescribeConfigsRequestResource) Default() { +} + +// NewDescribeConfigsRequestResource returns a default DescribeConfigsRequestResource +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeConfigsRequestResource() DescribeConfigsRequestResource { + var v DescribeConfigsRequestResource + v.Default() + return v +} + +// DescribeConfigsRequest issues a request to describe configs that Kafka +// currently has. These are the key/value pairs that one uses to configure +// brokers and topics. +type DescribeConfigsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Resources is a list of resources to describe. + Resources []DescribeConfigsRequestResource + + // IncludeSynonyms signifies whether to return config entry synonyms for + // all config entries. + IncludeSynonyms bool // v1+ + + // IncludeDocumentation signifies whether to return documentation for + // config entries. + IncludeDocumentation bool // v3+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +func (*DescribeConfigsRequest) Key() int16 { return 32 } +func (*DescribeConfigsRequest) MaxVersion() int16 { return 4 } +func (v *DescribeConfigsRequest) SetVersion(version int16) { v.Version = version } +func (v *DescribeConfigsRequest) GetVersion() int16 { return v.Version } +func (v *DescribeConfigsRequest) IsFlexible() bool { return v.Version >= 4 } +func (v *DescribeConfigsRequest) ResponseKind() Response { + r := &DescribeConfigsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *DescribeConfigsRequest) RequestWith(ctx context.Context, r Requestor) (*DescribeConfigsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*DescribeConfigsResponse) + return resp, err +} + +func (v *DescribeConfigsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + { + v := v.Resources + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ResourceType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.ResourceName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ConfigNames + if isFlexible { + dst = kbin.AppendCompactNullableArrayLen(dst, len(v), v == nil) + } else { + dst = kbin.AppendNullableArrayLen(dst, len(v), v == nil) + } + for i := range v { + v := v[i] + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 1 { + v := v.IncludeSynonyms + dst = kbin.AppendBool(dst, v) + } + if version >= 3 { + v := v.IncludeDocumentation + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeConfigsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeConfigsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeConfigsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + s := v + { + v := s.Resources + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeConfigsRequestResource, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var t ConfigResourceType + { + v := b.Int8() + t = ConfigResourceType(v) + } + v := t + s.ResourceType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ResourceName = v + } + { + v := s.ConfigNames + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if version < 0 || l == 0 { + a = []string{} + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]string, l)...) + } + for i := int32(0); i < l; i++ { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + a[i] = v + } + v = a + s.ConfigNames = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Resources = v + } + if version >= 1 { + v := b.Bool() + s.IncludeSynonyms = v + } + if version >= 3 { + v := b.Bool() + s.IncludeDocumentation = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeConfigsRequest returns a pointer to a default DescribeConfigsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeConfigsRequest() *DescribeConfigsRequest { + var v DescribeConfigsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeConfigsRequest. +func (v *DescribeConfigsRequest) Default() { +} + +// NewDescribeConfigsRequest returns a default DescribeConfigsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeConfigsRequest() DescribeConfigsRequest { + var v DescribeConfigsRequest + v.Default() + return v +} + +type DescribeConfigsResponseResourceConfigConfigSynonym struct { + Name string + + Value *string + + Source ConfigSource + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeConfigsResponseResourceConfigConfigSynonym. +func (v *DescribeConfigsResponseResourceConfigConfigSynonym) Default() { +} + +// NewDescribeConfigsResponseResourceConfigConfigSynonym returns a default DescribeConfigsResponseResourceConfigConfigSynonym +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeConfigsResponseResourceConfigConfigSynonym() DescribeConfigsResponseResourceConfigConfigSynonym { + var v DescribeConfigsResponseResourceConfigConfigSynonym + v.Default() + return v +} + +type DescribeConfigsResponseResourceConfig struct { + // Name is a key this entry corresponds to (e.g. segment.bytes). + Name string + + // Value is the value for this config key. If the key is sensitive, + // the value will be null. + Value *string + + // ReadOnly signifies whether this is not a dynamic config option. + // + // Note that this field is not always correct, and you may need to check + // whether the Source is any dynamic enum. See franz-go#91 for more details. + ReadOnly bool + + // IsDefault is whether this is a default config option. This has been + // replaced in favor of Source. + IsDefault bool + + // Source is where this config entry is from. + // + // This field has a default of -1. + Source ConfigSource // v1+ + + // IsSensitive signifies whether this is a sensitive config key, which + // is either a password or an unknown type. + IsSensitive bool + + // ConfigSynonyms contains fallback key/value pairs for this config + // entry, in order of preference. That is, if a config entry is both + // dynamically configured and has a default, the top level return will be + // the dynamic configuration, while its "synonym" will be the default. + ConfigSynonyms []DescribeConfigsResponseResourceConfigConfigSynonym // v1+ + + // ConfigType specifies the configuration data type. + ConfigType ConfigType // v3+ + + // Documentation is optional documentation for the config entry. + Documentation *string // v3+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeConfigsResponseResourceConfig. +func (v *DescribeConfigsResponseResourceConfig) Default() { + v.Source = -1 +} + +// NewDescribeConfigsResponseResourceConfig returns a default DescribeConfigsResponseResourceConfig +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeConfigsResponseResourceConfig() DescribeConfigsResponseResourceConfig { + var v DescribeConfigsResponseResourceConfig + v.Default() + return v +} + +type DescribeConfigsResponseResource struct { + // ErrorCode is the error code returned for describing configs. + // + // INVALID_REQUEST is returned if asking to descibe an invalid resource + // type. + // + // CLUSTER_AUTHORIZATION_FAILED is returned if asking to describe broker + // configs but the client is not authorized to do so. + // + // TOPIC_AUTHORIZATION_FAILED is returned if asking to describe topic + // configs but the client is not authorized to do so. + // + // INVALID_TOPIC_EXCEPTION is returned if the requested topic was invalid. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if the broker does not know of + // the requested topic. + ErrorCode int16 + + // ErrorMessage is an informative message if the describe config failed. + ErrorMessage *string + + // ResourceType is the enum corresponding to the type of described config. + ResourceType ConfigResourceType + + // ResourceName is the name corresponding to the describe config request. + ResourceName string + + // Configs contains information about key/value config pairs for + // the requested resource. + Configs []DescribeConfigsResponseResourceConfig + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeConfigsResponseResource. +func (v *DescribeConfigsResponseResource) Default() { +} + +// NewDescribeConfigsResponseResource returns a default DescribeConfigsResponseResource +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeConfigsResponseResource() DescribeConfigsResponseResource { + var v DescribeConfigsResponseResource + v.Default() + return v +} + +// DescribeConfigsResponse is returned from a DescribeConfigsRequest. +type DescribeConfigsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 2. + ThrottleMillis int32 + + // Resources are responses for each resource in the describe config request. + Resources []DescribeConfigsResponseResource + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v4+ +} + +func (*DescribeConfigsResponse) Key() int16 { return 32 } +func (*DescribeConfigsResponse) MaxVersion() int16 { return 4 } +func (v *DescribeConfigsResponse) SetVersion(version int16) { v.Version = version } +func (v *DescribeConfigsResponse) GetVersion() int16 { return v.Version } +func (v *DescribeConfigsResponse) IsFlexible() bool { return v.Version >= 4 } +func (v *DescribeConfigsResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 2 } +func (v *DescribeConfigsResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *DescribeConfigsResponse) RequestKind() Request { + return &DescribeConfigsRequest{Version: v.Version} +} + +func (v *DescribeConfigsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Resources + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.ResourceType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.ResourceName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Configs + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Value + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.ReadOnly + dst = kbin.AppendBool(dst, v) + } + if version >= 0 && version <= 0 { + v := v.IsDefault + dst = kbin.AppendBool(dst, v) + } + if version >= 1 { + v := v.Source + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.IsSensitive + dst = kbin.AppendBool(dst, v) + } + if version >= 1 { + v := v.ConfigSynonyms + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Value + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Source + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 3 { + v := v.ConfigType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + if version >= 3 { + v := v.Documentation + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeConfigsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeConfigsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeConfigsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 4 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Resources + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeConfigsResponseResource, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + var t ConfigResourceType + { + v := b.Int8() + t = ConfigResourceType(v) + } + v := t + s.ResourceType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ResourceName = v + } + { + v := s.Configs + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeConfigsResponseResourceConfig, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Name = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Value = v + } + { + v := b.Bool() + s.ReadOnly = v + } + if version >= 0 && version <= 0 { + v := b.Bool() + s.IsDefault = v + } + if version >= 1 { + var t ConfigSource + { + v := b.Int8() + t = ConfigSource(v) + } + v := t + s.Source = v + } + { + v := b.Bool() + s.IsSensitive = v + } + if version >= 1 { + v := s.ConfigSynonyms + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeConfigsResponseResourceConfigConfigSynonym, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Name = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Value = v + } + { + var t ConfigSource + { + v := b.Int8() + t = ConfigSource(v) + } + v := t + s.Source = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.ConfigSynonyms = v + } + if version >= 3 { + var t ConfigType + { + v := b.Int8() + t = ConfigType(v) + } + v := t + s.ConfigType = v + } + if version >= 3 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Documentation = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Configs = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Resources = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeConfigsResponse returns a pointer to a default DescribeConfigsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeConfigsResponse() *DescribeConfigsResponse { + var v DescribeConfigsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeConfigsResponse. +func (v *DescribeConfigsResponse) Default() { +} + +// NewDescribeConfigsResponse returns a default DescribeConfigsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeConfigsResponse() DescribeConfigsResponse { + var v DescribeConfigsResponse + v.Default() + return v +} + +type AlterConfigsRequestResourceConfig struct { + // Name is a key to set (e.g. segment.bytes). + Name string + + // Value is a value to set for the key (e.g. 10). + Value *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterConfigsRequestResourceConfig. +func (v *AlterConfigsRequestResourceConfig) Default() { +} + +// NewAlterConfigsRequestResourceConfig returns a default AlterConfigsRequestResourceConfig +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterConfigsRequestResourceConfig() AlterConfigsRequestResourceConfig { + var v AlterConfigsRequestResourceConfig + v.Default() + return v +} + +type AlterConfigsRequestResource struct { + // ResourceType is an enum corresponding to the type of config to alter. + // The only two valid values are 2 (for topic) and 4 (for broker). + ResourceType ConfigResourceType + + // ResourceName is the name of config to alter. + // + // If the requested type is a topic, this corresponds to a topic name. + // + // If the requested type if a broker, this should either be empty or be + // the ID of the broker this request is issued to. If it is empty, this + // updates all broker configs. If a specific ID, this updates just the + // broker. Using a specific ID also ensures that brokers reload config + // or secret files even if the file path has not changed. Lastly, password + // config options can only be defined on a per broker basis. + // + // If the type is broker logger, this must be a broker ID. + ResourceName string + + // Configs contains key/value config pairs to set on the resource. + Configs []AlterConfigsRequestResourceConfig + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterConfigsRequestResource. +func (v *AlterConfigsRequestResource) Default() { +} + +// NewAlterConfigsRequestResource returns a default AlterConfigsRequestResource +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterConfigsRequestResource() AlterConfigsRequestResource { + var v AlterConfigsRequestResource + v.Default() + return v +} + +// AlterConfigsRequest issues a request to alter either topic or broker +// configs. +// +// Note that to alter configs, you must specify the whole config on every +// request. All existing non-static values will be removed. This means that +// to add one key/value to a config, you must describe the config and then +// issue an alter request with the current config with the new key value. +// This also means that dynamic sensitive values, which are not returned +// in describe configs, will be lost. +// +// To fix this problem, the AlterConfigs request / response was deprecated +// in Kafka 2.3.0 in favor of the new IncrementalAlterConfigs request / response. +// See KIP-339 for more details. +type AlterConfigsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Resources is an array of configs to alter. + Resources []AlterConfigsRequestResource + + // ValidateOnly validates the request but does not apply it. + ValidateOnly bool + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*AlterConfigsRequest) Key() int16 { return 33 } +func (*AlterConfigsRequest) MaxVersion() int16 { return 2 } +func (v *AlterConfigsRequest) SetVersion(version int16) { v.Version = version } +func (v *AlterConfigsRequest) GetVersion() int16 { return v.Version } +func (v *AlterConfigsRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *AlterConfigsRequest) ResponseKind() Response { + r := &AlterConfigsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *AlterConfigsRequest) RequestWith(ctx context.Context, r Requestor) (*AlterConfigsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*AlterConfigsResponse) + return resp, err +} + +func (v *AlterConfigsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.Resources + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ResourceType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.ResourceName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Configs + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Value + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.ValidateOnly + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AlterConfigsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AlterConfigsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AlterConfigsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := s.Resources + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterConfigsRequestResource, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var t ConfigResourceType + { + v := b.Int8() + t = ConfigResourceType(v) + } + v := t + s.ResourceType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ResourceName = v + } + { + v := s.Configs + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterConfigsRequestResourceConfig, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Name = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Value = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Configs = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Resources = v + } + { + v := b.Bool() + s.ValidateOnly = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAlterConfigsRequest returns a pointer to a default AlterConfigsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAlterConfigsRequest() *AlterConfigsRequest { + var v AlterConfigsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterConfigsRequest. +func (v *AlterConfigsRequest) Default() { +} + +// NewAlterConfigsRequest returns a default AlterConfigsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterConfigsRequest() AlterConfigsRequest { + var v AlterConfigsRequest + v.Default() + return v +} + +type AlterConfigsResponseResource struct { + // ErrorCode is the error code returned for altering configs. + // + // CLUSTER_AUTHORIZATION_FAILED is returned if asking to alter broker + // configs but the client is not authorized to do so. + // + // TOPIC_AUTHORIZATION_FAILED is returned if asking to alter topic + // configs but the client is not authorized to do so. + // + // INVALID_TOPIC_EXCEPTION is returned if the requested topic was invalid. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if the broker does not know of + // the requested topic. + // + // INVALID_REQUEST is returned if the requested config is invalid or if + // asking Kafka to alter an invalid resource. + ErrorCode int16 + + // ErrorMessage is an informative message if the alter config failed. + ErrorMessage *string + + // ResourceType is the enum corresponding to the type of altered config. + ResourceType ConfigResourceType + + // ResourceName is the name corresponding to the alter config request. + ResourceName string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterConfigsResponseResource. +func (v *AlterConfigsResponseResource) Default() { +} + +// NewAlterConfigsResponseResource returns a default AlterConfigsResponseResource +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterConfigsResponseResource() AlterConfigsResponseResource { + var v AlterConfigsResponseResource + v.Default() + return v +} + +// AlterConfigsResponse is returned from an AlterConfigsRequest. +type AlterConfigsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // Resources are responses for each resource in the alter request. + Resources []AlterConfigsResponseResource + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*AlterConfigsResponse) Key() int16 { return 33 } +func (*AlterConfigsResponse) MaxVersion() int16 { return 2 } +func (v *AlterConfigsResponse) SetVersion(version int16) { v.Version = version } +func (v *AlterConfigsResponse) GetVersion() int16 { return v.Version } +func (v *AlterConfigsResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *AlterConfigsResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 1 } +func (v *AlterConfigsResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *AlterConfigsResponse) RequestKind() Request { return &AlterConfigsRequest{Version: v.Version} } + +func (v *AlterConfigsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Resources + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.ResourceType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.ResourceName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AlterConfigsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AlterConfigsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AlterConfigsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Resources + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterConfigsResponseResource, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + var t ConfigResourceType + { + v := b.Int8() + t = ConfigResourceType(v) + } + v := t + s.ResourceType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ResourceName = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Resources = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAlterConfigsResponse returns a pointer to a default AlterConfigsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAlterConfigsResponse() *AlterConfigsResponse { + var v AlterConfigsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterConfigsResponse. +func (v *AlterConfigsResponse) Default() { +} + +// NewAlterConfigsResponse returns a default AlterConfigsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterConfigsResponse() AlterConfigsResponse { + var v AlterConfigsResponse + v.Default() + return v +} + +type AlterReplicaLogDirsRequestDirTopic struct { + // Topic is a topic to move. + Topic string + + // Partitions contains partitions for the topic to move. + Partitions []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterReplicaLogDirsRequestDirTopic. +func (v *AlterReplicaLogDirsRequestDirTopic) Default() { +} + +// NewAlterReplicaLogDirsRequestDirTopic returns a default AlterReplicaLogDirsRequestDirTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterReplicaLogDirsRequestDirTopic() AlterReplicaLogDirsRequestDirTopic { + var v AlterReplicaLogDirsRequestDirTopic + v.Default() + return v +} + +type AlterReplicaLogDirsRequestDir struct { + // Dir is an absolute path where everything listed below should + // end up. + Dir string + + // Topics contains topics to move to the above log directory. + Topics []AlterReplicaLogDirsRequestDirTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterReplicaLogDirsRequestDir. +func (v *AlterReplicaLogDirsRequestDir) Default() { +} + +// NewAlterReplicaLogDirsRequestDir returns a default AlterReplicaLogDirsRequestDir +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterReplicaLogDirsRequestDir() AlterReplicaLogDirsRequestDir { + var v AlterReplicaLogDirsRequestDir + v.Default() + return v +} + +// AlterReplicaLogDirsRequest requests for log directories to be moved +// within Kafka. +// +// This is primarily useful for moving directories between disks. +type AlterReplicaLogDirsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Dirs contains absolute paths of where you want things to end up. + Dirs []AlterReplicaLogDirsRequestDir + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*AlterReplicaLogDirsRequest) Key() int16 { return 34 } +func (*AlterReplicaLogDirsRequest) MaxVersion() int16 { return 2 } +func (v *AlterReplicaLogDirsRequest) SetVersion(version int16) { v.Version = version } +func (v *AlterReplicaLogDirsRequest) GetVersion() int16 { return v.Version } +func (v *AlterReplicaLogDirsRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *AlterReplicaLogDirsRequest) ResponseKind() Response { + r := &AlterReplicaLogDirsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *AlterReplicaLogDirsRequest) RequestWith(ctx context.Context, r Requestor) (*AlterReplicaLogDirsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*AlterReplicaLogDirsResponse) + return resp, err +} + +func (v *AlterReplicaLogDirsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.Dirs + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Dir + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AlterReplicaLogDirsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AlterReplicaLogDirsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AlterReplicaLogDirsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := s.Dirs + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterReplicaLogDirsRequestDir, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Dir = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterReplicaLogDirsRequestDirTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Dirs = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAlterReplicaLogDirsRequest returns a pointer to a default AlterReplicaLogDirsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAlterReplicaLogDirsRequest() *AlterReplicaLogDirsRequest { + var v AlterReplicaLogDirsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterReplicaLogDirsRequest. +func (v *AlterReplicaLogDirsRequest) Default() { +} + +// NewAlterReplicaLogDirsRequest returns a default AlterReplicaLogDirsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterReplicaLogDirsRequest() AlterReplicaLogDirsRequest { + var v AlterReplicaLogDirsRequest + v.Default() + return v +} + +type AlterReplicaLogDirsResponseTopicPartition struct { + // Partition is the partition this array slot corresponds to. + Partition int32 + + // CLUSTER_AUTHORIZATION_FAILED is returned if the client is not + // authorized to alter replica dirs. + // + // LOG_DIR_NOT_FOUND is returned when the requested log directory + // is not in the broker config. + // + // KAFKA_STORAGE_EXCEPTION is returned when destination directory or + // requested replica is offline. + // + // REPLICA_NOT_AVAILABLE is returned if the replica does not exist + // yet. + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterReplicaLogDirsResponseTopicPartition. +func (v *AlterReplicaLogDirsResponseTopicPartition) Default() { +} + +// NewAlterReplicaLogDirsResponseTopicPartition returns a default AlterReplicaLogDirsResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterReplicaLogDirsResponseTopicPartition() AlterReplicaLogDirsResponseTopicPartition { + var v AlterReplicaLogDirsResponseTopicPartition + v.Default() + return v +} + +type AlterReplicaLogDirsResponseTopic struct { + // Topic is the topic this array slot corresponds to. + Topic string + + // Partitions contains responses to each partition that was requested + // to move. + Partitions []AlterReplicaLogDirsResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterReplicaLogDirsResponseTopic. +func (v *AlterReplicaLogDirsResponseTopic) Default() { +} + +// NewAlterReplicaLogDirsResponseTopic returns a default AlterReplicaLogDirsResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterReplicaLogDirsResponseTopic() AlterReplicaLogDirsResponseTopic { + var v AlterReplicaLogDirsResponseTopic + v.Default() + return v +} + +// AlterReplicaLogDirsResponse is returned from an AlterReplicaLogDirsRequest. +type AlterReplicaLogDirsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // Topics contains responses to each topic that had partitions requested + // for moving. + Topics []AlterReplicaLogDirsResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*AlterReplicaLogDirsResponse) Key() int16 { return 34 } +func (*AlterReplicaLogDirsResponse) MaxVersion() int16 { return 2 } +func (v *AlterReplicaLogDirsResponse) SetVersion(version int16) { v.Version = version } +func (v *AlterReplicaLogDirsResponse) GetVersion() int16 { return v.Version } +func (v *AlterReplicaLogDirsResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *AlterReplicaLogDirsResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 1 +} + +func (v *AlterReplicaLogDirsResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *AlterReplicaLogDirsResponse) RequestKind() Request { + return &AlterReplicaLogDirsRequest{Version: v.Version} +} + +func (v *AlterReplicaLogDirsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AlterReplicaLogDirsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AlterReplicaLogDirsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AlterReplicaLogDirsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterReplicaLogDirsResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterReplicaLogDirsResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAlterReplicaLogDirsResponse returns a pointer to a default AlterReplicaLogDirsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAlterReplicaLogDirsResponse() *AlterReplicaLogDirsResponse { + var v AlterReplicaLogDirsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterReplicaLogDirsResponse. +func (v *AlterReplicaLogDirsResponse) Default() { +} + +// NewAlterReplicaLogDirsResponse returns a default AlterReplicaLogDirsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterReplicaLogDirsResponse() AlterReplicaLogDirsResponse { + var v AlterReplicaLogDirsResponse + v.Default() + return v +} + +type DescribeLogDirsRequestTopic struct { + // Topic is a topic to describe the log dir of. + Topic string + + // Partitions contains topic partitions to describe the log dirs of. + Partitions []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeLogDirsRequestTopic. +func (v *DescribeLogDirsRequestTopic) Default() { +} + +// NewDescribeLogDirsRequestTopic returns a default DescribeLogDirsRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeLogDirsRequestTopic() DescribeLogDirsRequestTopic { + var v DescribeLogDirsRequestTopic + v.Default() + return v +} + +// DescribeLogDirsRequest requests directory information for topic partitions. +// This request was added in support of KIP-113. +type DescribeLogDirsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Topics is an array of topics to describe the log dirs of. If this is + // null, the response includes all topics and all of their partitions. + Topics []DescribeLogDirsRequestTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*DescribeLogDirsRequest) Key() int16 { return 35 } +func (*DescribeLogDirsRequest) MaxVersion() int16 { return 4 } +func (v *DescribeLogDirsRequest) SetVersion(version int16) { v.Version = version } +func (v *DescribeLogDirsRequest) GetVersion() int16 { return v.Version } +func (v *DescribeLogDirsRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *DescribeLogDirsRequest) ResponseKind() Response { + r := &DescribeLogDirsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *DescribeLogDirsRequest) RequestWith(ctx context.Context, r Requestor) (*DescribeLogDirsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*DescribeLogDirsResponse) + return resp, err +} + +func (v *DescribeLogDirsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactNullableArrayLen(dst, len(v), v == nil) + } else { + dst = kbin.AppendNullableArrayLen(dst, len(v), v == nil) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeLogDirsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeLogDirsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeLogDirsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if version < 0 || l == 0 { + a = []DescribeLogDirsRequestTopic{} + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeLogDirsRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeLogDirsRequest returns a pointer to a default DescribeLogDirsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeLogDirsRequest() *DescribeLogDirsRequest { + var v DescribeLogDirsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeLogDirsRequest. +func (v *DescribeLogDirsRequest) Default() { +} + +// NewDescribeLogDirsRequest returns a default DescribeLogDirsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeLogDirsRequest() DescribeLogDirsRequest { + var v DescribeLogDirsRequest + v.Default() + return v +} + +type DescribeLogDirsResponseDirTopicPartition struct { + // Partition is a partition ID. + Partition int32 + + // Size is the total size of the log sements of this partition, in bytes. + Size int64 + + // OffsetLag is how far behind the log end offset is compared to + // the partition's high watermark (if this is the current log for + // the partition) or compared to the current replica's log end + // offset (if this is the future log for the patition). + // + // The math is, + // + // if IsFuture, localLogEndOffset - futurelogEndOffset. + // + // otherwise, max(localHighWatermark - logEndOffset, 0). + OffsetLag int64 + + // IsFuture is true if this replica was created by an + // AlterReplicaLogDirsRequest and will replace the current log of the + // replica in the future. + IsFuture bool + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeLogDirsResponseDirTopicPartition. +func (v *DescribeLogDirsResponseDirTopicPartition) Default() { +} + +// NewDescribeLogDirsResponseDirTopicPartition returns a default DescribeLogDirsResponseDirTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeLogDirsResponseDirTopicPartition() DescribeLogDirsResponseDirTopicPartition { + var v DescribeLogDirsResponseDirTopicPartition + v.Default() + return v +} + +type DescribeLogDirsResponseDirTopic struct { + // Topic is the name of a Kafka topic. + Topic string + + // Partitions is the set of queried partitions for a topic that are + // within a log directory. + Partitions []DescribeLogDirsResponseDirTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeLogDirsResponseDirTopic. +func (v *DescribeLogDirsResponseDirTopic) Default() { +} + +// NewDescribeLogDirsResponseDirTopic returns a default DescribeLogDirsResponseDirTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeLogDirsResponseDirTopic() DescribeLogDirsResponseDirTopic { + var v DescribeLogDirsResponseDirTopic + v.Default() + return v +} + +type DescribeLogDirsResponseDir struct { + // ErrorCode is the error code returned for describing log dirs. + // + // KAFKA_STORAGE_ERROR is returned if the log directory is offline. + ErrorCode int16 + + // Dir is the absolute path of a log directory. + Dir string + + // Topics is an array of topics within a log directory. + Topics []DescribeLogDirsResponseDirTopic + + // TotalBytes is the total size in bytes of the volume the log directory is + // in. + // + // This field has a default of -1. + TotalBytes int64 // v4+ + + // UsableBytes is the usable size in bytes of the volume the log directory + // is in. + // + // This field has a default of -1. + UsableBytes int64 // v4+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeLogDirsResponseDir. +func (v *DescribeLogDirsResponseDir) Default() { + v.TotalBytes = -1 + v.UsableBytes = -1 +} + +// NewDescribeLogDirsResponseDir returns a default DescribeLogDirsResponseDir +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeLogDirsResponseDir() DescribeLogDirsResponseDir { + var v DescribeLogDirsResponseDir + v.Default() + return v +} + +// DescribeLogDirsResponse is returned from a DescribeLogDirsRequest. +type DescribeLogDirsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // The error code, or 0 if there was no error. + ErrorCode int16 // v3+ + + // Dirs pairs log directories with the topics and partitions that are + // stored in those directores. + Dirs []DescribeLogDirsResponseDir + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*DescribeLogDirsResponse) Key() int16 { return 35 } +func (*DescribeLogDirsResponse) MaxVersion() int16 { return 4 } +func (v *DescribeLogDirsResponse) SetVersion(version int16) { v.Version = version } +func (v *DescribeLogDirsResponse) GetVersion() int16 { return v.Version } +func (v *DescribeLogDirsResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *DescribeLogDirsResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 1 } +func (v *DescribeLogDirsResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *DescribeLogDirsResponse) RequestKind() Request { + return &DescribeLogDirsRequest{Version: v.Version} +} + +func (v *DescribeLogDirsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + if version >= 3 { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Dirs + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Dir + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Size + dst = kbin.AppendInt64(dst, v) + } + { + v := v.OffsetLag + dst = kbin.AppendInt64(dst, v) + } + { + v := v.IsFuture + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 4 { + v := v.TotalBytes + dst = kbin.AppendInt64(dst, v) + } + if version >= 4 { + v := v.UsableBytes + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeLogDirsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeLogDirsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeLogDirsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + if version >= 3 { + v := b.Int16() + s.ErrorCode = v + } + { + v := s.Dirs + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeLogDirsResponseDir, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Dir = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeLogDirsResponseDirTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeLogDirsResponseDirTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int64() + s.Size = v + } + { + v := b.Int64() + s.OffsetLag = v + } + { + v := b.Bool() + s.IsFuture = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if version >= 4 { + v := b.Int64() + s.TotalBytes = v + } + if version >= 4 { + v := b.Int64() + s.UsableBytes = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Dirs = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeLogDirsResponse returns a pointer to a default DescribeLogDirsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeLogDirsResponse() *DescribeLogDirsResponse { + var v DescribeLogDirsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeLogDirsResponse. +func (v *DescribeLogDirsResponse) Default() { +} + +// NewDescribeLogDirsResponse returns a default DescribeLogDirsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeLogDirsResponse() DescribeLogDirsResponse { + var v DescribeLogDirsResponse + v.Default() + return v +} + +// SASLAuthenticate continues a sasl authentication flow. Prior to Kafka 1.0.0, +// authenticating with sasl involved sending raw blobs of data back and forth. +// After, those blobs are wrapped in a SASLAuthenticateRequest The benefit of +// this wrapping is that Kafka can indicate errors in the response, rather than +// just closing the connection. Additionally, the response allows for further +// extension fields. +type SASLAuthenticateRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // SASLAuthBytes contains bytes for a SASL client request. + SASLAuthBytes []byte + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*SASLAuthenticateRequest) Key() int16 { return 36 } +func (*SASLAuthenticateRequest) MaxVersion() int16 { return 2 } +func (v *SASLAuthenticateRequest) SetVersion(version int16) { v.Version = version } +func (v *SASLAuthenticateRequest) GetVersion() int16 { return v.Version } +func (v *SASLAuthenticateRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *SASLAuthenticateRequest) ResponseKind() Response { + r := &SASLAuthenticateResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *SASLAuthenticateRequest) RequestWith(ctx context.Context, r Requestor) (*SASLAuthenticateResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*SASLAuthenticateResponse) + return resp, err +} + +func (v *SASLAuthenticateRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.SASLAuthBytes + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *SASLAuthenticateRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *SASLAuthenticateRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *SASLAuthenticateRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.SASLAuthBytes = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrSASLAuthenticateRequest returns a pointer to a default SASLAuthenticateRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrSASLAuthenticateRequest() *SASLAuthenticateRequest { + var v SASLAuthenticateRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to SASLAuthenticateRequest. +func (v *SASLAuthenticateRequest) Default() { +} + +// NewSASLAuthenticateRequest returns a default SASLAuthenticateRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewSASLAuthenticateRequest() SASLAuthenticateRequest { + var v SASLAuthenticateRequest + v.Default() + return v +} + +// SASLAuthenticateResponse is returned for a SASLAuthenticateRequest. +type SASLAuthenticateResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ErrorCode is a potential error. + ErrorCode int16 + + // ErrorMessage can contain a message for an error. + ErrorMessage *string + + // SASLAuthBytes is the server challenge continuing SASL flow. + SASLAuthBytes []byte + + // SessionLifetimeMillis, added in Kafka 2.2.0, is how long the SASL + // authentication is valid for. This timeout is only enforced if the request + // was v1. After this timeout, Kafka expects the next bytes on the wire to + // begin reauthentication. Otherwise, Kafka closes the connection. + SessionLifetimeMillis int64 // v1+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*SASLAuthenticateResponse) Key() int16 { return 36 } +func (*SASLAuthenticateResponse) MaxVersion() int16 { return 2 } +func (v *SASLAuthenticateResponse) SetVersion(version int16) { v.Version = version } +func (v *SASLAuthenticateResponse) GetVersion() int16 { return v.Version } +func (v *SASLAuthenticateResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *SASLAuthenticateResponse) RequestKind() Request { + return &SASLAuthenticateRequest{Version: v.Version} +} + +func (v *SASLAuthenticateResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.SASLAuthBytes + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + if version >= 1 { + v := v.SessionLifetimeMillis + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *SASLAuthenticateResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *SASLAuthenticateResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *SASLAuthenticateResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.SASLAuthBytes = v + } + if version >= 1 { + v := b.Int64() + s.SessionLifetimeMillis = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrSASLAuthenticateResponse returns a pointer to a default SASLAuthenticateResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrSASLAuthenticateResponse() *SASLAuthenticateResponse { + var v SASLAuthenticateResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to SASLAuthenticateResponse. +func (v *SASLAuthenticateResponse) Default() { +} + +// NewSASLAuthenticateResponse returns a default SASLAuthenticateResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewSASLAuthenticateResponse() SASLAuthenticateResponse { + var v SASLAuthenticateResponse + v.Default() + return v +} + +type CreatePartitionsRequestTopicAssignment struct { + // Replicas are replicas to assign a new partition to. + Replicas []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreatePartitionsRequestTopicAssignment. +func (v *CreatePartitionsRequestTopicAssignment) Default() { +} + +// NewCreatePartitionsRequestTopicAssignment returns a default CreatePartitionsRequestTopicAssignment +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreatePartitionsRequestTopicAssignment() CreatePartitionsRequestTopicAssignment { + var v CreatePartitionsRequestTopicAssignment + v.Default() + return v +} + +type CreatePartitionsRequestTopic struct { + // Topic is a topic for which to create additional partitions for. + Topic string + + // Count is the final count of partitions this topic must have after this + // request. This must be greater than the current number of partitions. + Count int32 + + // Assignment is a two-level array, the first corresponding to new + // partitions, the second contining broker IDs for where new partition + // replicas should live. + // + // The second level, the replicas, cannot have duplicate broker IDs (i.e. + // you cannot replicate a single partition twice on the same broker). + // Additionally, the number of replicas must match the current number of + // replicas per partition on the topic. + // + // The first level's length must be equal to the delta of Count and the + // current number of partitions. + Assignment []CreatePartitionsRequestTopicAssignment + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreatePartitionsRequestTopic. +func (v *CreatePartitionsRequestTopic) Default() { +} + +// NewCreatePartitionsRequestTopic returns a default CreatePartitionsRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreatePartitionsRequestTopic() CreatePartitionsRequestTopic { + var v CreatePartitionsRequestTopic + v.Default() + return v +} + +// CreatePartitionsRequest creates additional partitions for topics. +type CreatePartitionsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Topics contains topics to create partitions for. + Topics []CreatePartitionsRequestTopic + + // TimeoutMillis is how long Kafka can wait before responding to this request. + // This field has no effect on Kafka's processing of the request; the request + // will continue to be processed if the timeout is reached. If the timeout is + // reached, Kafka will reply with a REQUEST_TIMED_OUT error. + // + // This field has a default of 15000. + TimeoutMillis int32 + + // ValidateOnly is makes this request a dry-run; everything is validated but + // no partitions are actually created. + ValidateOnly bool + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*CreatePartitionsRequest) Key() int16 { return 37 } +func (*CreatePartitionsRequest) MaxVersion() int16 { return 3 } +func (v *CreatePartitionsRequest) SetVersion(version int16) { v.Version = version } +func (v *CreatePartitionsRequest) GetVersion() int16 { return v.Version } +func (v *CreatePartitionsRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *CreatePartitionsRequest) Timeout() int32 { return v.TimeoutMillis } +func (v *CreatePartitionsRequest) SetTimeout(timeoutMillis int32) { v.TimeoutMillis = timeoutMillis } +func (v *CreatePartitionsRequest) IsAdminRequest() {} +func (v *CreatePartitionsRequest) ResponseKind() Response { + r := &CreatePartitionsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *CreatePartitionsRequest) RequestWith(ctx context.Context, r Requestor) (*CreatePartitionsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*CreatePartitionsResponse) + return resp, err +} + +func (v *CreatePartitionsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Count + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Assignment + if isFlexible { + dst = kbin.AppendCompactNullableArrayLen(dst, len(v), v == nil) + } else { + dst = kbin.AppendNullableArrayLen(dst, len(v), v == nil) + } + for i := range v { + v := &v[i] + { + v := v.Replicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.TimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ValidateOnly + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *CreatePartitionsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *CreatePartitionsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *CreatePartitionsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]CreatePartitionsRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := b.Int32() + s.Count = v + } + { + v := s.Assignment + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if version < 0 || l == 0 { + a = []CreatePartitionsRequestTopicAssignment{} + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]CreatePartitionsRequestTopicAssignment, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := s.Replicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Replicas = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Assignment = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + { + v := b.Int32() + s.TimeoutMillis = v + } + { + v := b.Bool() + s.ValidateOnly = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrCreatePartitionsRequest returns a pointer to a default CreatePartitionsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrCreatePartitionsRequest() *CreatePartitionsRequest { + var v CreatePartitionsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreatePartitionsRequest. +func (v *CreatePartitionsRequest) Default() { + v.TimeoutMillis = 15000 +} + +// NewCreatePartitionsRequest returns a default CreatePartitionsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreatePartitionsRequest() CreatePartitionsRequest { + var v CreatePartitionsRequest + v.Default() + return v +} + +type CreatePartitionsResponseTopic struct { + // Topic is the topic that partitions were requested to be made for. + Topic string + + // ErrorCode is the error code returned for each topic in the request. + // + // NOT_CONTROLLER is returned if the request was not issued to a Kafka + // controller. + // + // TOPIC_AUTHORIZATION_FAILED is returned if the client is not authorized + // to create partitions for a topic. + // + // INVALID_REQUEST is returned for duplicate topics in the request. + // + // INVALID_TOPIC_EXCEPTION is returned if the topic is queued for deletion. + // + // REASSIGNMENT_IN_PROGRESS is returned if the request was issued while + // partitions were being reassigned. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if the broker does not know of + // the topic for which to create partitions. + // + // INVALID_PARTITIONS is returned if the request would drop the total + // count of partitions down, or if the request would not add any more + // partitions, or if the request uses unknown brokers, or if the request + // assigns a different number of brokers than the increase in the + // partition count. + ErrorCode int16 + + // ErrorMessage is an informative message if the topic creation failed. + ErrorMessage *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreatePartitionsResponseTopic. +func (v *CreatePartitionsResponseTopic) Default() { +} + +// NewCreatePartitionsResponseTopic returns a default CreatePartitionsResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreatePartitionsResponseTopic() CreatePartitionsResponseTopic { + var v CreatePartitionsResponseTopic + v.Default() + return v +} + +// CreatePartitionsResponse is returned from a CreatePartitionsRequest. +type CreatePartitionsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // Topics is a response to each topic in the creation request. + Topics []CreatePartitionsResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*CreatePartitionsResponse) Key() int16 { return 37 } +func (*CreatePartitionsResponse) MaxVersion() int16 { return 3 } +func (v *CreatePartitionsResponse) SetVersion(version int16) { v.Version = version } +func (v *CreatePartitionsResponse) GetVersion() int16 { return v.Version } +func (v *CreatePartitionsResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *CreatePartitionsResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 1 } +func (v *CreatePartitionsResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *CreatePartitionsResponse) RequestKind() Request { + return &CreatePartitionsRequest{Version: v.Version} +} + +func (v *CreatePartitionsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *CreatePartitionsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *CreatePartitionsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *CreatePartitionsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]CreatePartitionsResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrCreatePartitionsResponse returns a pointer to a default CreatePartitionsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrCreatePartitionsResponse() *CreatePartitionsResponse { + var v CreatePartitionsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreatePartitionsResponse. +func (v *CreatePartitionsResponse) Default() { +} + +// NewCreatePartitionsResponse returns a default CreatePartitionsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreatePartitionsResponse() CreatePartitionsResponse { + var v CreatePartitionsResponse + v.Default() + return v +} + +type CreateDelegationTokenRequestRenewer struct { + // PrincipalType is the "type" this principal is. This must be "User". + PrincipalType string + + // PrincipalName is the user name allowed to renew the returned token. + PrincipalName string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreateDelegationTokenRequestRenewer. +func (v *CreateDelegationTokenRequestRenewer) Default() { +} + +// NewCreateDelegationTokenRequestRenewer returns a default CreateDelegationTokenRequestRenewer +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreateDelegationTokenRequestRenewer() CreateDelegationTokenRequestRenewer { + var v CreateDelegationTokenRequestRenewer + v.Default() + return v +} + +// CreateDelegationTokenRequest issues a request to create a delegation token. +// +// Creating delegation tokens allows for an (ideally) quicker and easier method +// of enabling authorization for a wide array of clients. Rather than having to +// manage many passwords external to Kafka, you only need to manage a few +// accounts and use those to create delegation tokens per client. +// +// Note that delegation tokens inherit the same ACLs as the user creating the +// token. Thus, if you want to properly scope ACLs, you should not create +// delegation tokens with admin accounts. +// +// Delegation tokens live inside of Kafka and use SASL SCRAM-SHA-256 for +// authorization. +type CreateDelegationTokenRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // The principal type of the owner of the token. If null, this defaults + // to the token request principal. + OwnerPrincipalType *string // v3+ + + // Principal name of the owner of the token. If null, this defaults to + // the token request principal. + OwnerPrincipalName *string // v3+ + + // Renewers is a list of who can renew this delegation token. If empty, the + // default is the principal (user) who created the token. + Renewers []CreateDelegationTokenRequestRenewer + + // MaxLifetimeMillis is how long this delegation token will be valid for. + // If -1, the default will be the server's delegation.token.max.lifetime.ms. + MaxLifetimeMillis int64 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*CreateDelegationTokenRequest) Key() int16 { return 38 } +func (*CreateDelegationTokenRequest) MaxVersion() int16 { return 3 } +func (v *CreateDelegationTokenRequest) SetVersion(version int16) { v.Version = version } +func (v *CreateDelegationTokenRequest) GetVersion() int16 { return v.Version } +func (v *CreateDelegationTokenRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *CreateDelegationTokenRequest) ResponseKind() Response { + r := &CreateDelegationTokenResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *CreateDelegationTokenRequest) RequestWith(ctx context.Context, r Requestor) (*CreateDelegationTokenResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*CreateDelegationTokenResponse) + return resp, err +} + +func (v *CreateDelegationTokenRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + if version >= 3 { + v := v.OwnerPrincipalType + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if version >= 3 { + v := v.OwnerPrincipalName + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Renewers + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.PrincipalType + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.PrincipalName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.MaxLifetimeMillis + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *CreateDelegationTokenRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *CreateDelegationTokenRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *CreateDelegationTokenRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + if version >= 3 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.OwnerPrincipalType = v + } + if version >= 3 { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.OwnerPrincipalName = v + } + { + v := s.Renewers + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]CreateDelegationTokenRequestRenewer, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.PrincipalType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.PrincipalName = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Renewers = v + } + { + v := b.Int64() + s.MaxLifetimeMillis = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrCreateDelegationTokenRequest returns a pointer to a default CreateDelegationTokenRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrCreateDelegationTokenRequest() *CreateDelegationTokenRequest { + var v CreateDelegationTokenRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreateDelegationTokenRequest. +func (v *CreateDelegationTokenRequest) Default() { +} + +// NewCreateDelegationTokenRequest returns a default CreateDelegationTokenRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreateDelegationTokenRequest() CreateDelegationTokenRequest { + var v CreateDelegationTokenRequest + v.Default() + return v +} + +// CreateDelegationTokenResponse is a response to a CreateDelegationTokenRequest. +type CreateDelegationTokenResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ErrorCode is any error that caused the request to fail. + ErrorCode int16 + + // PrincipalType is the type of principal that granted this delegation token. + // This will always be "User" with the simple authorizer. + PrincipalType string + + // PrincipalName is the name of the principal that granted this delegation + // token. + PrincipalName string + + // The principal type of the requester of the token. + TokenRequesterPrincipalType string // v3+ + + // The principal name of the requester token. + TokenRequesterPrincipalName string // v3+ + + // IssueTimestamp is the millisecond timestamp this delegation token was + // issued. + IssueTimestamp int64 + + // ExpiryTimestamp is the millisecond timestamp this token will expire. The + // token can be renewed up to MaxTimestamp, past which point, it will be + // invalid. The Kafka default is 24h. + ExpiryTimestamp int64 + + // MaxTimestamp is the millisecond timestamp past which this token cannot + // be renewed. + MaxTimestamp int64 + + // TokenID is the ID of this token; this will be used as the username for + // scram authentication. + TokenID string + + // HMAC is the password of this token; this will be used as the password for + // scram authentication. + HMAC []byte + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*CreateDelegationTokenResponse) Key() int16 { return 38 } +func (*CreateDelegationTokenResponse) MaxVersion() int16 { return 3 } +func (v *CreateDelegationTokenResponse) SetVersion(version int16) { v.Version = version } +func (v *CreateDelegationTokenResponse) GetVersion() int16 { return v.Version } +func (v *CreateDelegationTokenResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *CreateDelegationTokenResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 1 +} + +func (v *CreateDelegationTokenResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *CreateDelegationTokenResponse) RequestKind() Request { + return &CreateDelegationTokenRequest{Version: v.Version} +} + +func (v *CreateDelegationTokenResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.PrincipalType + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.PrincipalName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 3 { + v := v.TokenRequesterPrincipalType + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 3 { + v := v.TokenRequesterPrincipalName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.IssueTimestamp + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ExpiryTimestamp + dst = kbin.AppendInt64(dst, v) + } + { + v := v.MaxTimestamp + dst = kbin.AppendInt64(dst, v) + } + { + v := v.TokenID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.HMAC + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *CreateDelegationTokenResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *CreateDelegationTokenResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *CreateDelegationTokenResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.PrincipalType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.PrincipalName = v + } + if version >= 3 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.TokenRequesterPrincipalType = v + } + if version >= 3 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.TokenRequesterPrincipalName = v + } + { + v := b.Int64() + s.IssueTimestamp = v + } + { + v := b.Int64() + s.ExpiryTimestamp = v + } + { + v := b.Int64() + s.MaxTimestamp = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.TokenID = v + } + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.HMAC = v + } + { + v := b.Int32() + s.ThrottleMillis = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrCreateDelegationTokenResponse returns a pointer to a default CreateDelegationTokenResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrCreateDelegationTokenResponse() *CreateDelegationTokenResponse { + var v CreateDelegationTokenResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to CreateDelegationTokenResponse. +func (v *CreateDelegationTokenResponse) Default() { +} + +// NewCreateDelegationTokenResponse returns a default CreateDelegationTokenResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewCreateDelegationTokenResponse() CreateDelegationTokenResponse { + var v CreateDelegationTokenResponse + v.Default() + return v +} + +// RenewDelegationTokenRequest is a request to renew a delegation token that +// has not yet hit its max timestamp. Note that a client using a token cannot +// renew its own token. +type RenewDelegationTokenRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // HMAC is the HMAC of the token to be renewed. + HMAC []byte + + // RenewTimeMillis is how long to renew the token for. If -1, Kafka uses its + // delegation.token.max.lifetime.ms. + RenewTimeMillis int64 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*RenewDelegationTokenRequest) Key() int16 { return 39 } +func (*RenewDelegationTokenRequest) MaxVersion() int16 { return 2 } +func (v *RenewDelegationTokenRequest) SetVersion(version int16) { v.Version = version } +func (v *RenewDelegationTokenRequest) GetVersion() int16 { return v.Version } +func (v *RenewDelegationTokenRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *RenewDelegationTokenRequest) ResponseKind() Response { + r := &RenewDelegationTokenResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *RenewDelegationTokenRequest) RequestWith(ctx context.Context, r Requestor) (*RenewDelegationTokenResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*RenewDelegationTokenResponse) + return resp, err +} + +func (v *RenewDelegationTokenRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.HMAC + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + { + v := v.RenewTimeMillis + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *RenewDelegationTokenRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *RenewDelegationTokenRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *RenewDelegationTokenRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.HMAC = v + } + { + v := b.Int64() + s.RenewTimeMillis = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrRenewDelegationTokenRequest returns a pointer to a default RenewDelegationTokenRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrRenewDelegationTokenRequest() *RenewDelegationTokenRequest { + var v RenewDelegationTokenRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to RenewDelegationTokenRequest. +func (v *RenewDelegationTokenRequest) Default() { +} + +// NewRenewDelegationTokenRequest returns a default RenewDelegationTokenRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewRenewDelegationTokenRequest() RenewDelegationTokenRequest { + var v RenewDelegationTokenRequest + v.Default() + return v +} + +// RenewDelegationTokenResponse is a response to a RenewDelegationTokenRequest. +type RenewDelegationTokenResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ErrorCode is any error that caused the request to fail. + ErrorCode int16 + + // ExpiryTimestamp is the millisecond timestamp this token will expire. The + // token can be renewed up to MaxTimestamp, past which point, it will be + // invalid. The Kafka default is 24h. + ExpiryTimestamp int64 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*RenewDelegationTokenResponse) Key() int16 { return 39 } +func (*RenewDelegationTokenResponse) MaxVersion() int16 { return 2 } +func (v *RenewDelegationTokenResponse) SetVersion(version int16) { v.Version = version } +func (v *RenewDelegationTokenResponse) GetVersion() int16 { return v.Version } +func (v *RenewDelegationTokenResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *RenewDelegationTokenResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 1 +} + +func (v *RenewDelegationTokenResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *RenewDelegationTokenResponse) RequestKind() Request { + return &RenewDelegationTokenRequest{Version: v.Version} +} + +func (v *RenewDelegationTokenResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ExpiryTimestamp + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *RenewDelegationTokenResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *RenewDelegationTokenResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *RenewDelegationTokenResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Int64() + s.ExpiryTimestamp = v + } + { + v := b.Int32() + s.ThrottleMillis = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrRenewDelegationTokenResponse returns a pointer to a default RenewDelegationTokenResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrRenewDelegationTokenResponse() *RenewDelegationTokenResponse { + var v RenewDelegationTokenResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to RenewDelegationTokenResponse. +func (v *RenewDelegationTokenResponse) Default() { +} + +// NewRenewDelegationTokenResponse returns a default RenewDelegationTokenResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewRenewDelegationTokenResponse() RenewDelegationTokenResponse { + var v RenewDelegationTokenResponse + v.Default() + return v +} + +// ExpireDelegationTokenRequest is a request to change the expiry timestamp +// of a delegation token. Note that a client using a token cannot expire its +// own token. +type ExpireDelegationTokenRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // HMAC is the HMAC of the token to change the expiry timestamp of. + HMAC []byte + + // ExpiryPeriodMillis changes the delegation token's expiry timestamp to + // now + expiry time millis. This can be used to force tokens to expire + // quickly, or to allow tokens a grace period before expiry. You cannot + // add enough expiry that exceeds the original max timestamp. + ExpiryPeriodMillis int64 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*ExpireDelegationTokenRequest) Key() int16 { return 40 } +func (*ExpireDelegationTokenRequest) MaxVersion() int16 { return 2 } +func (v *ExpireDelegationTokenRequest) SetVersion(version int16) { v.Version = version } +func (v *ExpireDelegationTokenRequest) GetVersion() int16 { return v.Version } +func (v *ExpireDelegationTokenRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *ExpireDelegationTokenRequest) ResponseKind() Response { + r := &ExpireDelegationTokenResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *ExpireDelegationTokenRequest) RequestWith(ctx context.Context, r Requestor) (*ExpireDelegationTokenResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*ExpireDelegationTokenResponse) + return resp, err +} + +func (v *ExpireDelegationTokenRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.HMAC + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + { + v := v.ExpiryPeriodMillis + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ExpireDelegationTokenRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ExpireDelegationTokenRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ExpireDelegationTokenRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.HMAC = v + } + { + v := b.Int64() + s.ExpiryPeriodMillis = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrExpireDelegationTokenRequest returns a pointer to a default ExpireDelegationTokenRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrExpireDelegationTokenRequest() *ExpireDelegationTokenRequest { + var v ExpireDelegationTokenRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ExpireDelegationTokenRequest. +func (v *ExpireDelegationTokenRequest) Default() { +} + +// NewExpireDelegationTokenRequest returns a default ExpireDelegationTokenRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewExpireDelegationTokenRequest() ExpireDelegationTokenRequest { + var v ExpireDelegationTokenRequest + v.Default() + return v +} + +// ExpireDelegationTokenResponse is a response to an ExpireDelegationTokenRequest. +type ExpireDelegationTokenResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ErrorCode is any error that caused the request to fail. + ErrorCode int16 + + // ExpiryTimestamp is the new timestamp at which the delegation token will + // expire. + ExpiryTimestamp int64 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*ExpireDelegationTokenResponse) Key() int16 { return 40 } +func (*ExpireDelegationTokenResponse) MaxVersion() int16 { return 2 } +func (v *ExpireDelegationTokenResponse) SetVersion(version int16) { v.Version = version } +func (v *ExpireDelegationTokenResponse) GetVersion() int16 { return v.Version } +func (v *ExpireDelegationTokenResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *ExpireDelegationTokenResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 1 +} + +func (v *ExpireDelegationTokenResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *ExpireDelegationTokenResponse) RequestKind() Request { + return &ExpireDelegationTokenRequest{Version: v.Version} +} + +func (v *ExpireDelegationTokenResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ExpiryTimestamp + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ExpireDelegationTokenResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ExpireDelegationTokenResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ExpireDelegationTokenResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Int64() + s.ExpiryTimestamp = v + } + { + v := b.Int32() + s.ThrottleMillis = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrExpireDelegationTokenResponse returns a pointer to a default ExpireDelegationTokenResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrExpireDelegationTokenResponse() *ExpireDelegationTokenResponse { + var v ExpireDelegationTokenResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ExpireDelegationTokenResponse. +func (v *ExpireDelegationTokenResponse) Default() { +} + +// NewExpireDelegationTokenResponse returns a default ExpireDelegationTokenResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewExpireDelegationTokenResponse() ExpireDelegationTokenResponse { + var v ExpireDelegationTokenResponse + v.Default() + return v +} + +type DescribeDelegationTokenRequestOwner struct { + // PrincipalType is a type to match to describe delegation tokens created + // with this principal. This would be "User" with the simple authorizer. + PrincipalType string + + // PrincipalName is the name to match to describe delegation tokens created + // with this principal. + PrincipalName string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeDelegationTokenRequestOwner. +func (v *DescribeDelegationTokenRequestOwner) Default() { +} + +// NewDescribeDelegationTokenRequestOwner returns a default DescribeDelegationTokenRequestOwner +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeDelegationTokenRequestOwner() DescribeDelegationTokenRequestOwner { + var v DescribeDelegationTokenRequestOwner + v.Default() + return v +} + +// DescribeDelegationTokenRequest is a request to describe delegation tokens. +type DescribeDelegationTokenRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Owners contains owners to describe delegation tokens for, or null for all. + // If non-null, only tokens created from a matching principal type, name + // combination are printed. + Owners []DescribeDelegationTokenRequestOwner + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*DescribeDelegationTokenRequest) Key() int16 { return 41 } +func (*DescribeDelegationTokenRequest) MaxVersion() int16 { return 3 } +func (v *DescribeDelegationTokenRequest) SetVersion(version int16) { v.Version = version } +func (v *DescribeDelegationTokenRequest) GetVersion() int16 { return v.Version } +func (v *DescribeDelegationTokenRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *DescribeDelegationTokenRequest) ResponseKind() Response { + r := &DescribeDelegationTokenResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *DescribeDelegationTokenRequest) RequestWith(ctx context.Context, r Requestor) (*DescribeDelegationTokenResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*DescribeDelegationTokenResponse) + return resp, err +} + +func (v *DescribeDelegationTokenRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.Owners + if isFlexible { + dst = kbin.AppendCompactNullableArrayLen(dst, len(v), v == nil) + } else { + dst = kbin.AppendNullableArrayLen(dst, len(v), v == nil) + } + for i := range v { + v := &v[i] + { + v := v.PrincipalType + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.PrincipalName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeDelegationTokenRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeDelegationTokenRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeDelegationTokenRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := s.Owners + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if version < 0 || l == 0 { + a = []DescribeDelegationTokenRequestOwner{} + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeDelegationTokenRequestOwner, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.PrincipalType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.PrincipalName = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Owners = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeDelegationTokenRequest returns a pointer to a default DescribeDelegationTokenRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeDelegationTokenRequest() *DescribeDelegationTokenRequest { + var v DescribeDelegationTokenRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeDelegationTokenRequest. +func (v *DescribeDelegationTokenRequest) Default() { +} + +// NewDescribeDelegationTokenRequest returns a default DescribeDelegationTokenRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeDelegationTokenRequest() DescribeDelegationTokenRequest { + var v DescribeDelegationTokenRequest + v.Default() + return v +} + +type DescribeDelegationTokenResponseTokenDetailRenewer struct { + PrincipalType string + + PrincipalName string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeDelegationTokenResponseTokenDetailRenewer. +func (v *DescribeDelegationTokenResponseTokenDetailRenewer) Default() { +} + +// NewDescribeDelegationTokenResponseTokenDetailRenewer returns a default DescribeDelegationTokenResponseTokenDetailRenewer +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeDelegationTokenResponseTokenDetailRenewer() DescribeDelegationTokenResponseTokenDetailRenewer { + var v DescribeDelegationTokenResponseTokenDetailRenewer + v.Default() + return v +} + +type DescribeDelegationTokenResponseTokenDetail struct { + // PrincipalType is the principal type of who created this token. + PrincipalType string + + // PrincipalName is the principal name of who created this token. + PrincipalName string + + // The principal type of the requester of the token. + TokenRequesterPrincipalType string // v3+ + + // The principal name of the requester token. + TokenRequesterPrincipalName string // v3+ + + // IssueTimestamp is the millisecond timestamp of when this token was issued. + IssueTimestamp int64 + + // ExpiryTimestamp is the millisecond timestamp of when this token will expire. + ExpiryTimestamp int64 + + // MaxTimestamp is the millisecond timestamp past which whis token cannot + // be renewed. + MaxTimestamp int64 + + // TokenID is the ID (scram username) of this token. + TokenID string + + // HMAC is the password of this token. + HMAC []byte + + // Renewers is a list of users that can renew this token. + Renewers []DescribeDelegationTokenResponseTokenDetailRenewer + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeDelegationTokenResponseTokenDetail. +func (v *DescribeDelegationTokenResponseTokenDetail) Default() { +} + +// NewDescribeDelegationTokenResponseTokenDetail returns a default DescribeDelegationTokenResponseTokenDetail +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeDelegationTokenResponseTokenDetail() DescribeDelegationTokenResponseTokenDetail { + var v DescribeDelegationTokenResponseTokenDetail + v.Default() + return v +} + +// DescribeDelegationTokenResponsee is a response to a DescribeDelegationTokenRequest. +type DescribeDelegationTokenResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ErrorCode is any error that caused the request to fail. + ErrorCode int16 + + // TokenDetails shows information about each token created from any principal + // in the request. + TokenDetails []DescribeDelegationTokenResponseTokenDetail + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*DescribeDelegationTokenResponse) Key() int16 { return 41 } +func (*DescribeDelegationTokenResponse) MaxVersion() int16 { return 3 } +func (v *DescribeDelegationTokenResponse) SetVersion(version int16) { v.Version = version } +func (v *DescribeDelegationTokenResponse) GetVersion() int16 { return v.Version } +func (v *DescribeDelegationTokenResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *DescribeDelegationTokenResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 1 +} + +func (v *DescribeDelegationTokenResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *DescribeDelegationTokenResponse) RequestKind() Request { + return &DescribeDelegationTokenRequest{Version: v.Version} +} + +func (v *DescribeDelegationTokenResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.TokenDetails + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.PrincipalType + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.PrincipalName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 3 { + v := v.TokenRequesterPrincipalType + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 3 { + v := v.TokenRequesterPrincipalName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.IssueTimestamp + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ExpiryTimestamp + dst = kbin.AppendInt64(dst, v) + } + { + v := v.MaxTimestamp + dst = kbin.AppendInt64(dst, v) + } + { + v := v.TokenID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.HMAC + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + { + v := v.Renewers + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.PrincipalType + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.PrincipalName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeDelegationTokenResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeDelegationTokenResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeDelegationTokenResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + v := s.TokenDetails + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeDelegationTokenResponseTokenDetail, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.PrincipalType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.PrincipalName = v + } + if version >= 3 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.TokenRequesterPrincipalType = v + } + if version >= 3 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.TokenRequesterPrincipalName = v + } + { + v := b.Int64() + s.IssueTimestamp = v + } + { + v := b.Int64() + s.ExpiryTimestamp = v + } + { + v := b.Int64() + s.MaxTimestamp = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.TokenID = v + } + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.HMAC = v + } + { + v := s.Renewers + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeDelegationTokenResponseTokenDetailRenewer, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.PrincipalType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.PrincipalName = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Renewers = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.TokenDetails = v + } + { + v := b.Int32() + s.ThrottleMillis = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeDelegationTokenResponse returns a pointer to a default DescribeDelegationTokenResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeDelegationTokenResponse() *DescribeDelegationTokenResponse { + var v DescribeDelegationTokenResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeDelegationTokenResponse. +func (v *DescribeDelegationTokenResponse) Default() { +} + +// NewDescribeDelegationTokenResponse returns a default DescribeDelegationTokenResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeDelegationTokenResponse() DescribeDelegationTokenResponse { + var v DescribeDelegationTokenResponse + v.Default() + return v +} + +// DeleteGroupsRequest deletes consumer groups. This request was added for +// Kafka 1.1.0 corresponding to the removal of RetentionTimeMillis from +// OffsetCommitRequest. See KIP-229 for more details. +type DeleteGroupsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Groups is a list of groups to delete. + Groups []string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*DeleteGroupsRequest) Key() int16 { return 42 } +func (*DeleteGroupsRequest) MaxVersion() int16 { return 2 } +func (v *DeleteGroupsRequest) SetVersion(version int16) { v.Version = version } +func (v *DeleteGroupsRequest) GetVersion() int16 { return v.Version } +func (v *DeleteGroupsRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *DeleteGroupsRequest) IsGroupCoordinatorRequest() {} +func (v *DeleteGroupsRequest) ResponseKind() Response { + r := &DeleteGroupsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *DeleteGroupsRequest) RequestWith(ctx context.Context, r Requestor) (*DeleteGroupsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*DeleteGroupsResponse) + return resp, err +} + +func (v *DeleteGroupsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.Groups + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DeleteGroupsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DeleteGroupsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DeleteGroupsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := s.Groups + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]string, l)...) + } + for i := int32(0); i < l; i++ { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + a[i] = v + } + v = a + s.Groups = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDeleteGroupsRequest returns a pointer to a default DeleteGroupsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDeleteGroupsRequest() *DeleteGroupsRequest { + var v DeleteGroupsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteGroupsRequest. +func (v *DeleteGroupsRequest) Default() { +} + +// NewDeleteGroupsRequest returns a default DeleteGroupsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteGroupsRequest() DeleteGroupsRequest { + var v DeleteGroupsRequest + v.Default() + return v +} + +type DeleteGroupsResponseGroup struct { + // Group is a group ID requested for deletion. + Group string + + // ErrorCode is the error code returned for this group's deletion request. + // + // GROUP_AUTHORIZATION_FAILED is returned if the client is not authorized + // to delete a group. + // + // INVALID_GROUP_ID is returned if the requested group ID is invalid. + // + // COORDINATOR_NOT_AVAILABLE is returned if the coordinator for this + // group is not yet active. + // + // GROUP_ID_NOT_FOUND is returned if the group ID does not exist. + // + // NON_EMPTY_GROUP is returned if attempting to delete a group that is + // not in the empty state. + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteGroupsResponseGroup. +func (v *DeleteGroupsResponseGroup) Default() { +} + +// NewDeleteGroupsResponseGroup returns a default DeleteGroupsResponseGroup +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteGroupsResponseGroup() DeleteGroupsResponseGroup { + var v DeleteGroupsResponseGroup + v.Default() + return v +} + +// DeleteGroupsResponse is returned from a DeleteGroupsRequest. +type DeleteGroupsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after this request. + // For Kafka < 2.0.0, the throttle is applied before issuing a response. + // For Kafka >= 2.0.0, the throttle is applied after issuing a response. + // + // This request switched at version 1. + ThrottleMillis int32 + + // Groups are the responses to each group requested for deletion. + Groups []DeleteGroupsResponseGroup + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*DeleteGroupsResponse) Key() int16 { return 42 } +func (*DeleteGroupsResponse) MaxVersion() int16 { return 2 } +func (v *DeleteGroupsResponse) SetVersion(version int16) { v.Version = version } +func (v *DeleteGroupsResponse) GetVersion() int16 { return v.Version } +func (v *DeleteGroupsResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *DeleteGroupsResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 1 } +func (v *DeleteGroupsResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *DeleteGroupsResponse) RequestKind() Request { return &DeleteGroupsRequest{Version: v.Version} } + +func (v *DeleteGroupsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Groups + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Group + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DeleteGroupsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DeleteGroupsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DeleteGroupsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Groups + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DeleteGroupsResponseGroup, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Group = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Groups = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDeleteGroupsResponse returns a pointer to a default DeleteGroupsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDeleteGroupsResponse() *DeleteGroupsResponse { + var v DeleteGroupsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DeleteGroupsResponse. +func (v *DeleteGroupsResponse) Default() { +} + +// NewDeleteGroupsResponse returns a default DeleteGroupsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewDeleteGroupsResponse() DeleteGroupsResponse { + var v DeleteGroupsResponse + v.Default() + return v +} + +type ElectLeadersRequestTopic struct { + // Topic is a topic to trigger leader elections for (but only for the + // partitions below). + Topic string + + // Partitions is an array of partitions in a topic to trigger leader + // elections for. + Partitions []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ElectLeadersRequestTopic. +func (v *ElectLeadersRequestTopic) Default() { +} + +// NewElectLeadersRequestTopic returns a default ElectLeadersRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewElectLeadersRequestTopic() ElectLeadersRequestTopic { + var v ElectLeadersRequestTopic + v.Default() + return v +} + +// ElectLeadersRequest begins a leader election for all given topic +// partitions. This request was added in Kafka 2.2.0 to replace the zookeeper +// only option of triggering leader elections before. See KIP-183 for more +// details. KIP-460 introduced the ElectionType field with Kafka 2.4.0. +type ElectLeadersRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ElectionType is the type of election to conduct. 0 elects the preferred + // replica, 1 elects the first live replica if there are no in-sync replicas + // (i.e., unclean leader election). + ElectionType int8 // v1+ + + // Topics is an array of topics and corresponding partitions to + // trigger leader elections for, or null for all. + Topics []ElectLeadersRequestTopic + + // TimeoutMillis is how long Kafka can wait before responding to this request. + // This field has no effect on Kafka's processing of the request; the request + // will continue to be processed if the timeout is reached. If the timeout is + // reached, Kafka will reply with a REQUEST_TIMED_OUT error. + // + // This field has a default of 60000. + TimeoutMillis int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*ElectLeadersRequest) Key() int16 { return 43 } +func (*ElectLeadersRequest) MaxVersion() int16 { return 2 } +func (v *ElectLeadersRequest) SetVersion(version int16) { v.Version = version } +func (v *ElectLeadersRequest) GetVersion() int16 { return v.Version } +func (v *ElectLeadersRequest) IsFlexible() bool { return v.Version >= 2 } +func (v *ElectLeadersRequest) Timeout() int32 { return v.TimeoutMillis } +func (v *ElectLeadersRequest) SetTimeout(timeoutMillis int32) { v.TimeoutMillis = timeoutMillis } +func (v *ElectLeadersRequest) IsAdminRequest() {} +func (v *ElectLeadersRequest) ResponseKind() Response { + r := &ElectLeadersResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *ElectLeadersRequest) RequestWith(ctx context.Context, r Requestor) (*ElectLeadersResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*ElectLeadersResponse) + return resp, err +} + +func (v *ElectLeadersRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + if version >= 1 { + v := v.ElectionType + dst = kbin.AppendInt8(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactNullableArrayLen(dst, len(v), v == nil) + } else { + dst = kbin.AppendNullableArrayLen(dst, len(v), v == nil) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.TimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ElectLeadersRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ElectLeadersRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ElectLeadersRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + if version >= 1 { + v := b.Int8() + s.ElectionType = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if version < 0 || l == 0 { + a = []ElectLeadersRequestTopic{} + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ElectLeadersRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + { + v := b.Int32() + s.TimeoutMillis = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrElectLeadersRequest returns a pointer to a default ElectLeadersRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrElectLeadersRequest() *ElectLeadersRequest { + var v ElectLeadersRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ElectLeadersRequest. +func (v *ElectLeadersRequest) Default() { + v.TimeoutMillis = 60000 +} + +// NewElectLeadersRequest returns a default ElectLeadersRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewElectLeadersRequest() ElectLeadersRequest { + var v ElectLeadersRequest + v.Default() + return v +} + +type ElectLeadersResponseTopicPartition struct { + // Partition is the partition for this result. + Partition int32 + + // ErrorCode is the error code returned for this topic/partition leader + // election. + // + // CLUSTER_AUTHORIZATION_FAILED is returned if the client is not + // authorized to trigger leader elections. + // + // NOT_CONTROLLER is returned if the request was not issued to a Kafka + // controller. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if the topic/partition does + // not exist on any broker in the cluster (this is slightly different + // from the usual meaning of a single broker not knowing of the topic + // partition). + // + // PREFERRED_LEADER_NOT_AVAILABLE is returned if the preferred leader + // could not be elected (for example, the preferred leader was not in + // the ISR). + ErrorCode int16 + + // ErrorMessage is an informative message if the leader election failed. + ErrorMessage *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ElectLeadersResponseTopicPartition. +func (v *ElectLeadersResponseTopicPartition) Default() { +} + +// NewElectLeadersResponseTopicPartition returns a default ElectLeadersResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewElectLeadersResponseTopicPartition() ElectLeadersResponseTopicPartition { + var v ElectLeadersResponseTopicPartition + v.Default() + return v +} + +type ElectLeadersResponseTopic struct { + // Topic is topic for the given partition results below. + Topic string + + // Partitions contains election results for a topic's partitions. + Partitions []ElectLeadersResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ElectLeadersResponseTopic. +func (v *ElectLeadersResponseTopic) Default() { +} + +// NewElectLeadersResponseTopic returns a default ElectLeadersResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewElectLeadersResponseTopic() ElectLeadersResponseTopic { + var v ElectLeadersResponseTopic + v.Default() + return v +} + +// ElectLeadersResponse is a response for an ElectLeadersRequest. +type ElectLeadersResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // ErrorCode is any error that applies to all partitions. + // + // CLUSTER_AUTHORIZATION_FAILED is returned if the client is not + // authorized to reassign partitions. + ErrorCode int16 // v1+ + + // Topics contains leader election results for each requested topic. + Topics []ElectLeadersResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v2+ +} + +func (*ElectLeadersResponse) Key() int16 { return 43 } +func (*ElectLeadersResponse) MaxVersion() int16 { return 2 } +func (v *ElectLeadersResponse) SetVersion(version int16) { v.Version = version } +func (v *ElectLeadersResponse) GetVersion() int16 { return v.Version } +func (v *ElectLeadersResponse) IsFlexible() bool { return v.Version >= 2 } +func (v *ElectLeadersResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 0 } +func (v *ElectLeadersResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *ElectLeadersResponse) RequestKind() Request { return &ElectLeadersRequest{Version: v.Version} } + +func (v *ElectLeadersResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + if version >= 1 { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ElectLeadersResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ElectLeadersResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ElectLeadersResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 2 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + if version >= 1 { + v := b.Int16() + s.ErrorCode = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ElectLeadersResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ElectLeadersResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrElectLeadersResponse returns a pointer to a default ElectLeadersResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrElectLeadersResponse() *ElectLeadersResponse { + var v ElectLeadersResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ElectLeadersResponse. +func (v *ElectLeadersResponse) Default() { +} + +// NewElectLeadersResponse returns a default ElectLeadersResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewElectLeadersResponse() ElectLeadersResponse { + var v ElectLeadersResponse + v.Default() + return v +} + +type IncrementalAlterConfigsRequestResourceConfig struct { + // Name is a key to modify (e.g. segment.bytes). + // + // For broker loggers, see KIP-412 section "Request/Response Overview" + // for details on how to change per logger log levels. + Name string + + // Op is the type of operation to perform for this config name. + // + // SET (0) is to set a configuration value; the value must not be null. + // + // DELETE (1) is to delete a configuration key. + // + // APPEND (2) is to add a value to the list of values for a key (if the + // key is for a list of values). + // + // SUBTRACT (3) is to remove a value from a list of values (if the key + // is for a list of values). + Op IncrementalAlterConfigOp + + // Value is a value to set for the key (e.g. 10). + Value *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to IncrementalAlterConfigsRequestResourceConfig. +func (v *IncrementalAlterConfigsRequestResourceConfig) Default() { +} + +// NewIncrementalAlterConfigsRequestResourceConfig returns a default IncrementalAlterConfigsRequestResourceConfig +// This is a shortcut for creating a struct and calling Default yourself. +func NewIncrementalAlterConfigsRequestResourceConfig() IncrementalAlterConfigsRequestResourceConfig { + var v IncrementalAlterConfigsRequestResourceConfig + v.Default() + return v +} + +type IncrementalAlterConfigsRequestResource struct { + // ResourceType is an enum corresponding to the type of config to alter. + ResourceType ConfigResourceType + + // ResourceName is the name of config to alter. + // + // If the requested type is a topic, this corresponds to a topic name. + // + // If the requested type if a broker, this should either be empty or be + // the ID of the broker this request is issued to. If it is empty, this + // updates all broker configs. If a specific ID, this updates just the + // broker. Using a specific ID also ensures that brokers reload config + // or secret files even if the file path has not changed. Lastly, password + // config options can only be defined on a per broker basis. + // + // If the type is broker logger, this must be a broker ID. + ResourceName string + + // Configs contains key/value config pairs to set on the resource. + Configs []IncrementalAlterConfigsRequestResourceConfig + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to IncrementalAlterConfigsRequestResource. +func (v *IncrementalAlterConfigsRequestResource) Default() { +} + +// NewIncrementalAlterConfigsRequestResource returns a default IncrementalAlterConfigsRequestResource +// This is a shortcut for creating a struct and calling Default yourself. +func NewIncrementalAlterConfigsRequestResource() IncrementalAlterConfigsRequestResource { + var v IncrementalAlterConfigsRequestResource + v.Default() + return v +} + +// IncrementalAlterConfigsRequest issues ar equest to alter either topic or +// broker configs. +// +// This API was added in Kafka 2.3.0 to replace AlterConfigs. The key benefit +// of this API is that consumers do not need to know the full config state +// to add or remove new config options. See KIP-339 for more details. +type IncrementalAlterConfigsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Resources is an array of configs to alter. + Resources []IncrementalAlterConfigsRequestResource + + // ValidateOnly validates the request but does not apply it. + ValidateOnly bool + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +func (*IncrementalAlterConfigsRequest) Key() int16 { return 44 } +func (*IncrementalAlterConfigsRequest) MaxVersion() int16 { return 1 } +func (v *IncrementalAlterConfigsRequest) SetVersion(version int16) { v.Version = version } +func (v *IncrementalAlterConfigsRequest) GetVersion() int16 { return v.Version } +func (v *IncrementalAlterConfigsRequest) IsFlexible() bool { return v.Version >= 1 } +func (v *IncrementalAlterConfigsRequest) ResponseKind() Response { + r := &IncrementalAlterConfigsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *IncrementalAlterConfigsRequest) RequestWith(ctx context.Context, r Requestor) (*IncrementalAlterConfigsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*IncrementalAlterConfigsResponse) + return resp, err +} + +func (v *IncrementalAlterConfigsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 1 + _ = isFlexible + { + v := v.Resources + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ResourceType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.ResourceName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Configs + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Op + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.Value + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.ValidateOnly + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *IncrementalAlterConfigsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *IncrementalAlterConfigsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *IncrementalAlterConfigsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 1 + _ = isFlexible + s := v + { + v := s.Resources + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]IncrementalAlterConfigsRequestResource, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var t ConfigResourceType + { + v := b.Int8() + t = ConfigResourceType(v) + } + v := t + s.ResourceType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ResourceName = v + } + { + v := s.Configs + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]IncrementalAlterConfigsRequestResourceConfig, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Name = v + } + { + var t IncrementalAlterConfigOp + { + v := b.Int8() + t = IncrementalAlterConfigOp(v) + } + v := t + s.Op = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Value = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Configs = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Resources = v + } + { + v := b.Bool() + s.ValidateOnly = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrIncrementalAlterConfigsRequest returns a pointer to a default IncrementalAlterConfigsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrIncrementalAlterConfigsRequest() *IncrementalAlterConfigsRequest { + var v IncrementalAlterConfigsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to IncrementalAlterConfigsRequest. +func (v *IncrementalAlterConfigsRequest) Default() { +} + +// NewIncrementalAlterConfigsRequest returns a default IncrementalAlterConfigsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewIncrementalAlterConfigsRequest() IncrementalAlterConfigsRequest { + var v IncrementalAlterConfigsRequest + v.Default() + return v +} + +type IncrementalAlterConfigsResponseResource struct { + // ErrorCode is the error code returned for incrementally altering configs. + // + // CLUSTER_AUTHORIZATION_FAILED is returned if asking to alter broker + // configs but the client is not authorized to do so. + // + // TOPIC_AUTHORIZATION_FAILED is returned if asking to alter topic + // configs but the client is not authorized to do so. + // + // INVALID_TOPIC_EXCEPTION is returned if the requested topic was invalid. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if the broker does not know of + // the requested topic. + // + // INVALID_REQUEST is returned if the requested config is invalid or if + // asking Kafka to alter an invalid resource. + ErrorCode int16 + + // ErrorMessage is an informative message if the incremental alter config failed. + ErrorMessage *string + + // ResourceType is the enum corresponding to the type of altered config. + ResourceType ConfigResourceType + + // ResourceName is the name corresponding to the incremental alter config + // request. + ResourceName string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to IncrementalAlterConfigsResponseResource. +func (v *IncrementalAlterConfigsResponseResource) Default() { +} + +// NewIncrementalAlterConfigsResponseResource returns a default IncrementalAlterConfigsResponseResource +// This is a shortcut for creating a struct and calling Default yourself. +func NewIncrementalAlterConfigsResponseResource() IncrementalAlterConfigsResponseResource { + var v IncrementalAlterConfigsResponseResource + v.Default() + return v +} + +// IncrementalAlterConfigsResponse is returned from an IncrementalAlterConfigsRequest. +type IncrementalAlterConfigsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // Resources are responses for each resources in the alter request. + Resources []IncrementalAlterConfigsResponseResource + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +func (*IncrementalAlterConfigsResponse) Key() int16 { return 44 } +func (*IncrementalAlterConfigsResponse) MaxVersion() int16 { return 1 } +func (v *IncrementalAlterConfigsResponse) SetVersion(version int16) { v.Version = version } +func (v *IncrementalAlterConfigsResponse) GetVersion() int16 { return v.Version } +func (v *IncrementalAlterConfigsResponse) IsFlexible() bool { return v.Version >= 1 } +func (v *IncrementalAlterConfigsResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 0 +} + +func (v *IncrementalAlterConfigsResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *IncrementalAlterConfigsResponse) RequestKind() Request { + return &IncrementalAlterConfigsRequest{Version: v.Version} +} + +func (v *IncrementalAlterConfigsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 1 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Resources + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.ResourceType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.ResourceName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *IncrementalAlterConfigsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *IncrementalAlterConfigsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *IncrementalAlterConfigsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 1 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Resources + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]IncrementalAlterConfigsResponseResource, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + var t ConfigResourceType + { + v := b.Int8() + t = ConfigResourceType(v) + } + v := t + s.ResourceType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ResourceName = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Resources = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrIncrementalAlterConfigsResponse returns a pointer to a default IncrementalAlterConfigsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrIncrementalAlterConfigsResponse() *IncrementalAlterConfigsResponse { + var v IncrementalAlterConfigsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to IncrementalAlterConfigsResponse. +func (v *IncrementalAlterConfigsResponse) Default() { +} + +// NewIncrementalAlterConfigsResponse returns a default IncrementalAlterConfigsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewIncrementalAlterConfigsResponse() IncrementalAlterConfigsResponse { + var v IncrementalAlterConfigsResponse + v.Default() + return v +} + +type AlterPartitionAssignmentsRequestTopicPartition struct { + // Partition is a partition to reassign. + Partition int32 + + // Replicas are replicas to place the partition on, or null to + // cancel a pending reassignment of this partition. + Replicas []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterPartitionAssignmentsRequestTopicPartition. +func (v *AlterPartitionAssignmentsRequestTopicPartition) Default() { +} + +// NewAlterPartitionAssignmentsRequestTopicPartition returns a default AlterPartitionAssignmentsRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterPartitionAssignmentsRequestTopicPartition() AlterPartitionAssignmentsRequestTopicPartition { + var v AlterPartitionAssignmentsRequestTopicPartition + v.Default() + return v +} + +type AlterPartitionAssignmentsRequestTopic struct { + // Topic is a topic to reassign the partitions of. + Topic string + + // Partitions contains partitions to reassign. + Partitions []AlterPartitionAssignmentsRequestTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterPartitionAssignmentsRequestTopic. +func (v *AlterPartitionAssignmentsRequestTopic) Default() { +} + +// NewAlterPartitionAssignmentsRequestTopic returns a default AlterPartitionAssignmentsRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterPartitionAssignmentsRequestTopic() AlterPartitionAssignmentsRequestTopic { + var v AlterPartitionAssignmentsRequestTopic + v.Default() + return v +} + +// AlterPartitionAssignmentsRequest, proposed in KIP-455 and implemented in +// Kafka 2.4.0, is a request to reassign partitions to certain brokers. +// +// ACL wise, this requires ALTER on CLUSTER. +type AlterPartitionAssignmentsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // TimeoutMillis is how long Kafka can wait before responding to this request. + // This field has no effect on Kafka's processing of the request; the request + // will continue to be processed if the timeout is reached. If the timeout is + // reached, Kafka will reply with a REQUEST_TIMED_OUT error. + // + // This field has a default of 60000. + TimeoutMillis int32 + + // Topics are topics for which to reassign partitions of. + Topics []AlterPartitionAssignmentsRequestTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*AlterPartitionAssignmentsRequest) Key() int16 { return 45 } +func (*AlterPartitionAssignmentsRequest) MaxVersion() int16 { return 0 } +func (v *AlterPartitionAssignmentsRequest) SetVersion(version int16) { v.Version = version } +func (v *AlterPartitionAssignmentsRequest) GetVersion() int16 { return v.Version } +func (v *AlterPartitionAssignmentsRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *AlterPartitionAssignmentsRequest) Timeout() int32 { return v.TimeoutMillis } +func (v *AlterPartitionAssignmentsRequest) SetTimeout(timeoutMillis int32) { + v.TimeoutMillis = timeoutMillis +} +func (v *AlterPartitionAssignmentsRequest) IsAdminRequest() {} +func (v *AlterPartitionAssignmentsRequest) ResponseKind() Response { + r := &AlterPartitionAssignmentsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *AlterPartitionAssignmentsRequest) RequestWith(ctx context.Context, r Requestor) (*AlterPartitionAssignmentsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*AlterPartitionAssignmentsResponse) + return resp, err +} + +func (v *AlterPartitionAssignmentsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.TimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Replicas + if isFlexible { + dst = kbin.AppendCompactNullableArrayLen(dst, len(v), v == nil) + } else { + dst = kbin.AppendNullableArrayLen(dst, len(v), v == nil) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AlterPartitionAssignmentsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AlterPartitionAssignmentsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AlterPartitionAssignmentsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.TimeoutMillis = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterPartitionAssignmentsRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterPartitionAssignmentsRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := s.Replicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if version < 0 || l == 0 { + a = []int32{} + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Replicas = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAlterPartitionAssignmentsRequest returns a pointer to a default AlterPartitionAssignmentsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAlterPartitionAssignmentsRequest() *AlterPartitionAssignmentsRequest { + var v AlterPartitionAssignmentsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterPartitionAssignmentsRequest. +func (v *AlterPartitionAssignmentsRequest) Default() { + v.TimeoutMillis = 60000 +} + +// NewAlterPartitionAssignmentsRequest returns a default AlterPartitionAssignmentsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterPartitionAssignmentsRequest() AlterPartitionAssignmentsRequest { + var v AlterPartitionAssignmentsRequest + v.Default() + return v +} + +type AlterPartitionAssignmentsResponseTopicPartition struct { + // Partition is the partition being responded to. + Partition int32 + + // ErrorCode is the error code returned for partition reassignments. + // + // REQUEST_TIMED_OUT is returned if the request timed out. + // + // NOT_CONTROLLER is returned if the request was not issued to a Kafka + // controller. + // + // CLUSTER_AUTHORIZATION_FAILED is returned if the client is not + // authorized to reassign partitions. + // + // NO_REASSIGNMENT_IN_PROGRESS is returned for partition reassignment + // cancellations when the partition was not being reassigned. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if the broker does not know of + // the requested topic or the topic is being deleted. + ErrorCode int16 + + // ErrorMessage is an informative message if the partition reassignment failed. + ErrorMessage *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterPartitionAssignmentsResponseTopicPartition. +func (v *AlterPartitionAssignmentsResponseTopicPartition) Default() { +} + +// NewAlterPartitionAssignmentsResponseTopicPartition returns a default AlterPartitionAssignmentsResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterPartitionAssignmentsResponseTopicPartition() AlterPartitionAssignmentsResponseTopicPartition { + var v AlterPartitionAssignmentsResponseTopicPartition + v.Default() + return v +} + +type AlterPartitionAssignmentsResponseTopic struct { + // Topic is the topic being responded to. + Topic string + + // Partitions contains responses for partitions. + Partitions []AlterPartitionAssignmentsResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterPartitionAssignmentsResponseTopic. +func (v *AlterPartitionAssignmentsResponseTopic) Default() { +} + +// NewAlterPartitionAssignmentsResponseTopic returns a default AlterPartitionAssignmentsResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterPartitionAssignmentsResponseTopic() AlterPartitionAssignmentsResponseTopic { + var v AlterPartitionAssignmentsResponseTopic + v.Default() + return v +} + +// AlterPartitionAssignmentsResponse is returned for an AlterPartitionAssignmentsRequest. +type AlterPartitionAssignmentsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // ErrorCode is any global (applied to all partitions) error code. + ErrorCode int16 + + // ErrorMessage is any global (applied to all partitions) error message. + ErrorMessage *string + + // Topics contains responses for each topic requested. + Topics []AlterPartitionAssignmentsResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*AlterPartitionAssignmentsResponse) Key() int16 { return 45 } +func (*AlterPartitionAssignmentsResponse) MaxVersion() int16 { return 0 } +func (v *AlterPartitionAssignmentsResponse) SetVersion(version int16) { v.Version = version } +func (v *AlterPartitionAssignmentsResponse) GetVersion() int16 { return v.Version } +func (v *AlterPartitionAssignmentsResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *AlterPartitionAssignmentsResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 0 +} + +func (v *AlterPartitionAssignmentsResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *AlterPartitionAssignmentsResponse) RequestKind() Request { + return &AlterPartitionAssignmentsRequest{Version: v.Version} +} + +func (v *AlterPartitionAssignmentsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AlterPartitionAssignmentsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AlterPartitionAssignmentsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AlterPartitionAssignmentsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterPartitionAssignmentsResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterPartitionAssignmentsResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAlterPartitionAssignmentsResponse returns a pointer to a default AlterPartitionAssignmentsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAlterPartitionAssignmentsResponse() *AlterPartitionAssignmentsResponse { + var v AlterPartitionAssignmentsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterPartitionAssignmentsResponse. +func (v *AlterPartitionAssignmentsResponse) Default() { +} + +// NewAlterPartitionAssignmentsResponse returns a default AlterPartitionAssignmentsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterPartitionAssignmentsResponse() AlterPartitionAssignmentsResponse { + var v AlterPartitionAssignmentsResponse + v.Default() + return v +} + +type ListPartitionReassignmentsRequestTopic struct { + // Topic is a topic to list in progress partition reassingments of. + Topic string + + // Partitions are partitions to list in progress reassignments of. + Partitions []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListPartitionReassignmentsRequestTopic. +func (v *ListPartitionReassignmentsRequestTopic) Default() { +} + +// NewListPartitionReassignmentsRequestTopic returns a default ListPartitionReassignmentsRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewListPartitionReassignmentsRequestTopic() ListPartitionReassignmentsRequestTopic { + var v ListPartitionReassignmentsRequestTopic + v.Default() + return v +} + +// ListPartitionReassignmentsRequest, proposed in KIP-455 and implemented in +// Kafka 2.4.0, is a request to list in progress partition reassignments. +// +// ACL wise, this requires DESCRIBE on CLUSTER. +type ListPartitionReassignmentsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // TimeoutMillis is how long Kafka can wait before responding to this request. + // This field has no effect on Kafka's processing of the request; the request + // will continue to be processed if the timeout is reached. If the timeout is + // reached, Kafka will reply with a REQUEST_TIMED_OUT error. + // + // This field has a default of 60000. + TimeoutMillis int32 + + // Topics are topics to list in progress partition reassignments of, or null + // to list everything. + Topics []ListPartitionReassignmentsRequestTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*ListPartitionReassignmentsRequest) Key() int16 { return 46 } +func (*ListPartitionReassignmentsRequest) MaxVersion() int16 { return 0 } +func (v *ListPartitionReassignmentsRequest) SetVersion(version int16) { v.Version = version } +func (v *ListPartitionReassignmentsRequest) GetVersion() int16 { return v.Version } +func (v *ListPartitionReassignmentsRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *ListPartitionReassignmentsRequest) Timeout() int32 { return v.TimeoutMillis } +func (v *ListPartitionReassignmentsRequest) SetTimeout(timeoutMillis int32) { + v.TimeoutMillis = timeoutMillis +} +func (v *ListPartitionReassignmentsRequest) IsAdminRequest() {} +func (v *ListPartitionReassignmentsRequest) ResponseKind() Response { + r := &ListPartitionReassignmentsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *ListPartitionReassignmentsRequest) RequestWith(ctx context.Context, r Requestor) (*ListPartitionReassignmentsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*ListPartitionReassignmentsResponse) + return resp, err +} + +func (v *ListPartitionReassignmentsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.TimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactNullableArrayLen(dst, len(v), v == nil) + } else { + dst = kbin.AppendNullableArrayLen(dst, len(v), v == nil) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ListPartitionReassignmentsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ListPartitionReassignmentsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ListPartitionReassignmentsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.TimeoutMillis = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if version < 0 || l == 0 { + a = []ListPartitionReassignmentsRequestTopic{} + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ListPartitionReassignmentsRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrListPartitionReassignmentsRequest returns a pointer to a default ListPartitionReassignmentsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrListPartitionReassignmentsRequest() *ListPartitionReassignmentsRequest { + var v ListPartitionReassignmentsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListPartitionReassignmentsRequest. +func (v *ListPartitionReassignmentsRequest) Default() { + v.TimeoutMillis = 60000 +} + +// NewListPartitionReassignmentsRequest returns a default ListPartitionReassignmentsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewListPartitionReassignmentsRequest() ListPartitionReassignmentsRequest { + var v ListPartitionReassignmentsRequest + v.Default() + return v +} + +type ListPartitionReassignmentsResponseTopicPartition struct { + // Partition is the partition being responded to. + Partition int32 + + // Replicas is the partition's current replicas. + Replicas []int32 + + // AddingReplicas are replicas currently being added to the partition. + AddingReplicas []int32 + + // RemovingReplicas are replicas currently being removed from the partition. + RemovingReplicas []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListPartitionReassignmentsResponseTopicPartition. +func (v *ListPartitionReassignmentsResponseTopicPartition) Default() { +} + +// NewListPartitionReassignmentsResponseTopicPartition returns a default ListPartitionReassignmentsResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewListPartitionReassignmentsResponseTopicPartition() ListPartitionReassignmentsResponseTopicPartition { + var v ListPartitionReassignmentsResponseTopicPartition + v.Default() + return v +} + +type ListPartitionReassignmentsResponseTopic struct { + // Topic is the topic being responded to. + Topic string + + // Partitions contains responses for partitions. + Partitions []ListPartitionReassignmentsResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListPartitionReassignmentsResponseTopic. +func (v *ListPartitionReassignmentsResponseTopic) Default() { +} + +// NewListPartitionReassignmentsResponseTopic returns a default ListPartitionReassignmentsResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewListPartitionReassignmentsResponseTopic() ListPartitionReassignmentsResponseTopic { + var v ListPartitionReassignmentsResponseTopic + v.Default() + return v +} + +// ListPartitionReassignmentsResponse is returned for a ListPartitionReassignmentsRequest. +type ListPartitionReassignmentsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // ErrorCode is the error code returned for listing reassignments. + // + // REQUEST_TIMED_OUT is returned if the request timed out. + // + // NOT_CONTROLLER is returned if the request was not issued to a Kafka + // controller. + // + // CLUSTER_AUTHORIZATION_FAILED is returned if the client is not + // authorized to reassign partitions. + ErrorCode int16 + + // ErrorMessage is any global (applied to all partitions) error message. + ErrorMessage *string + + // Topics contains responses for each topic requested. + Topics []ListPartitionReassignmentsResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*ListPartitionReassignmentsResponse) Key() int16 { return 46 } +func (*ListPartitionReassignmentsResponse) MaxVersion() int16 { return 0 } +func (v *ListPartitionReassignmentsResponse) SetVersion(version int16) { v.Version = version } +func (v *ListPartitionReassignmentsResponse) GetVersion() int16 { return v.Version } +func (v *ListPartitionReassignmentsResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *ListPartitionReassignmentsResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 0 +} + +func (v *ListPartitionReassignmentsResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *ListPartitionReassignmentsResponse) RequestKind() Request { + return &ListPartitionReassignmentsRequest{Version: v.Version} +} + +func (v *ListPartitionReassignmentsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Replicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + { + v := v.AddingReplicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + { + v := v.RemovingReplicas + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ListPartitionReassignmentsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ListPartitionReassignmentsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ListPartitionReassignmentsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ListPartitionReassignmentsResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ListPartitionReassignmentsResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := s.Replicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Replicas = v + } + { + v := s.AddingReplicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.AddingReplicas = v + } + { + v := s.RemovingReplicas + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.RemovingReplicas = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrListPartitionReassignmentsResponse returns a pointer to a default ListPartitionReassignmentsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrListPartitionReassignmentsResponse() *ListPartitionReassignmentsResponse { + var v ListPartitionReassignmentsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListPartitionReassignmentsResponse. +func (v *ListPartitionReassignmentsResponse) Default() { +} + +// NewListPartitionReassignmentsResponse returns a default ListPartitionReassignmentsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewListPartitionReassignmentsResponse() ListPartitionReassignmentsResponse { + var v ListPartitionReassignmentsResponse + v.Default() + return v +} + +type OffsetDeleteRequestTopicPartition struct { + // Partition is a partition to delete offsets for. + Partition int32 +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetDeleteRequestTopicPartition. +func (v *OffsetDeleteRequestTopicPartition) Default() { +} + +// NewOffsetDeleteRequestTopicPartition returns a default OffsetDeleteRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetDeleteRequestTopicPartition() OffsetDeleteRequestTopicPartition { + var v OffsetDeleteRequestTopicPartition + v.Default() + return v +} + +type OffsetDeleteRequestTopic struct { + // Topic is a topic to delete offsets in. + Topic string + + // Partitions are partitions to delete offsets for. + Partitions []OffsetDeleteRequestTopicPartition +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetDeleteRequestTopic. +func (v *OffsetDeleteRequestTopic) Default() { +} + +// NewOffsetDeleteRequestTopic returns a default OffsetDeleteRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetDeleteRequestTopic() OffsetDeleteRequestTopic { + var v OffsetDeleteRequestTopic + v.Default() + return v +} + +// OffsetDeleteRequest, proposed in KIP-496 and implemented in Kafka 2.4.0, is +// a request to delete group offsets. +// +// ACL wise, this requires DELETE on GROUP for the group and READ on TOPIC for +// each topic. +type OffsetDeleteRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Group is the group to delete offsets in. + Group string + + // Topics are topics to delete offsets in. + Topics []OffsetDeleteRequestTopic +} + +func (*OffsetDeleteRequest) Key() int16 { return 47 } +func (*OffsetDeleteRequest) MaxVersion() int16 { return 0 } +func (v *OffsetDeleteRequest) SetVersion(version int16) { v.Version = version } +func (v *OffsetDeleteRequest) GetVersion() int16 { return v.Version } +func (v *OffsetDeleteRequest) IsFlexible() bool { return false } +func (v *OffsetDeleteRequest) IsGroupCoordinatorRequest() {} +func (v *OffsetDeleteRequest) ResponseKind() Response { + r := &OffsetDeleteResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *OffsetDeleteRequest) RequestWith(ctx context.Context, r Requestor) (*OffsetDeleteResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*OffsetDeleteResponse) + return resp, err +} + +func (v *OffsetDeleteRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.Group + dst = kbin.AppendString(dst, v) + } + { + v := v.Topics + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Topic + dst = kbin.AppendString(dst, v) + } + { + v := v.Partitions + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + } + } + } + } + return dst +} + +func (v *OffsetDeleteRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *OffsetDeleteRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *OffsetDeleteRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Group = v + } + { + v := s.Topics + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetDeleteRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetDeleteRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + } + v = a + s.Partitions = v + } + } + v = a + s.Topics = v + } + return b.Complete() +} + +// NewPtrOffsetDeleteRequest returns a pointer to a default OffsetDeleteRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrOffsetDeleteRequest() *OffsetDeleteRequest { + var v OffsetDeleteRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetDeleteRequest. +func (v *OffsetDeleteRequest) Default() { +} + +// NewOffsetDeleteRequest returns a default OffsetDeleteRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetDeleteRequest() OffsetDeleteRequest { + var v OffsetDeleteRequest + v.Default() + return v +} + +type OffsetDeleteResponseTopicPartition struct { + // Partition is the partition being responded to. + Partition int32 + + // ErrorCode is any per partition error code. + // + // TOPIC_AUTHORIZATION_FAILED is returned if the client is not authorized + // for the topic / partition. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if the broker does not know of + // the requested topic. + // + // GROUP_SUBSCRIBED_TO_TOPIC is returned if the topic is still subscribed to. + ErrorCode int16 +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetDeleteResponseTopicPartition. +func (v *OffsetDeleteResponseTopicPartition) Default() { +} + +// NewOffsetDeleteResponseTopicPartition returns a default OffsetDeleteResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetDeleteResponseTopicPartition() OffsetDeleteResponseTopicPartition { + var v OffsetDeleteResponseTopicPartition + v.Default() + return v +} + +type OffsetDeleteResponseTopic struct { + // Topic is the topic being responded to. + Topic string + + // Partitions are partitions being responded to. + Partitions []OffsetDeleteResponseTopicPartition +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetDeleteResponseTopic. +func (v *OffsetDeleteResponseTopic) Default() { +} + +// NewOffsetDeleteResponseTopic returns a default OffsetDeleteResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetDeleteResponseTopic() OffsetDeleteResponseTopic { + var v OffsetDeleteResponseTopic + v.Default() + return v +} + +// OffsetDeleteResponse is a response to an offset delete request. +type OffsetDeleteResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ErrorCode is any group wide error. + // + // GROUP_AUTHORIZATION_FAILED is returned if the client is not authorized + // for the group. + // + // INVALID_GROUP_ID is returned in the requested group ID is invalid. + // + // COORDINATOR_NOT_AVAILABLE is returned if the coordinator is not available. + // + // COORDINATOR_LOAD_IN_PROGRESS is returned if the group is loading. + // + // NOT_COORDINATOR is returned if the requested broker is not the coordinator + // for the requested group. + // + // GROUP_ID_NOT_FOUND is returned if the group ID does not exist. + ErrorCode int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // Topics are responses to requested topics. + Topics []OffsetDeleteResponseTopic +} + +func (*OffsetDeleteResponse) Key() int16 { return 47 } +func (*OffsetDeleteResponse) MaxVersion() int16 { return 0 } +func (v *OffsetDeleteResponse) SetVersion(version int16) { v.Version = version } +func (v *OffsetDeleteResponse) GetVersion() int16 { return v.Version } +func (v *OffsetDeleteResponse) IsFlexible() bool { return false } +func (v *OffsetDeleteResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 0 } +func (v *OffsetDeleteResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *OffsetDeleteResponse) RequestKind() Request { return &OffsetDeleteRequest{Version: v.Version} } + +func (v *OffsetDeleteResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Topic + dst = kbin.AppendString(dst, v) + } + { + v := v.Partitions + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + } + } + } + } + return dst +} + +func (v *OffsetDeleteResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *OffsetDeleteResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *OffsetDeleteResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Topics + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetDeleteResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]OffsetDeleteResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + } + v = a + s.Partitions = v + } + } + v = a + s.Topics = v + } + return b.Complete() +} + +// NewPtrOffsetDeleteResponse returns a pointer to a default OffsetDeleteResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrOffsetDeleteResponse() *OffsetDeleteResponse { + var v OffsetDeleteResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to OffsetDeleteResponse. +func (v *OffsetDeleteResponse) Default() { +} + +// NewOffsetDeleteResponse returns a default OffsetDeleteResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewOffsetDeleteResponse() OffsetDeleteResponse { + var v OffsetDeleteResponse + v.Default() + return v +} + +type DescribeClientQuotasRequestComponent struct { + // EntityType is the entity component type that this filter component + // applies to; some possible values are "user" or "client-id". + EntityType string + + // MatchType specifies how to match an entity, + // with 0 meaning match on the name exactly, + // 1 meaning match on the default name, + // and 2 meaning any specified name. + MatchType QuotasMatchType + + // Match is the string to match against, or null if unused for the given + // match type. + Match *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeClientQuotasRequestComponent. +func (v *DescribeClientQuotasRequestComponent) Default() { +} + +// NewDescribeClientQuotasRequestComponent returns a default DescribeClientQuotasRequestComponent +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeClientQuotasRequestComponent() DescribeClientQuotasRequestComponent { + var v DescribeClientQuotasRequestComponent + v.Default() + return v +} + +// DescribeClientQuotasRequest, proposed in KIP-546 and introduced with Kafka 2.6.0, +// provides a way to describe client quotas. +type DescribeClientQuotasRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Components is a list of match filters to apply for describing quota entities. + Components []DescribeClientQuotasRequestComponent + + // Strict signifies whether matches are strict; if true, the response + // excludes entities with unspecified entity types. + Strict bool + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +func (*DescribeClientQuotasRequest) Key() int16 { return 48 } +func (*DescribeClientQuotasRequest) MaxVersion() int16 { return 1 } +func (v *DescribeClientQuotasRequest) SetVersion(version int16) { v.Version = version } +func (v *DescribeClientQuotasRequest) GetVersion() int16 { return v.Version } +func (v *DescribeClientQuotasRequest) IsFlexible() bool { return v.Version >= 1 } +func (v *DescribeClientQuotasRequest) ResponseKind() Response { + r := &DescribeClientQuotasResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *DescribeClientQuotasRequest) RequestWith(ctx context.Context, r Requestor) (*DescribeClientQuotasResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*DescribeClientQuotasResponse) + return resp, err +} + +func (v *DescribeClientQuotasRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 1 + _ = isFlexible + { + v := v.Components + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.EntityType + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.MatchType + { + v := int8(v) + dst = kbin.AppendInt8(dst, v) + } + } + { + v := v.Match + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.Strict + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeClientQuotasRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeClientQuotasRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeClientQuotasRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 1 + _ = isFlexible + s := v + { + v := s.Components + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeClientQuotasRequestComponent, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.EntityType = v + } + { + var t QuotasMatchType + { + v := b.Int8() + t = QuotasMatchType(v) + } + v := t + s.MatchType = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Match = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Components = v + } + { + v := b.Bool() + s.Strict = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeClientQuotasRequest returns a pointer to a default DescribeClientQuotasRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeClientQuotasRequest() *DescribeClientQuotasRequest { + var v DescribeClientQuotasRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeClientQuotasRequest. +func (v *DescribeClientQuotasRequest) Default() { +} + +// NewDescribeClientQuotasRequest returns a default DescribeClientQuotasRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeClientQuotasRequest() DescribeClientQuotasRequest { + var v DescribeClientQuotasRequest + v.Default() + return v +} + +type DescribeClientQuotasResponseEntryEntity struct { + // Type is the entity type. + Type string + + // Name is the entity name, or null if the default. + Name *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeClientQuotasResponseEntryEntity. +func (v *DescribeClientQuotasResponseEntryEntity) Default() { +} + +// NewDescribeClientQuotasResponseEntryEntity returns a default DescribeClientQuotasResponseEntryEntity +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeClientQuotasResponseEntryEntity() DescribeClientQuotasResponseEntryEntity { + var v DescribeClientQuotasResponseEntryEntity + v.Default() + return v +} + +type DescribeClientQuotasResponseEntryValue struct { + // Key is the quota configuration key. + Key string + + // Value is the quota configuration value. + Value float64 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeClientQuotasResponseEntryValue. +func (v *DescribeClientQuotasResponseEntryValue) Default() { +} + +// NewDescribeClientQuotasResponseEntryValue returns a default DescribeClientQuotasResponseEntryValue +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeClientQuotasResponseEntryValue() DescribeClientQuotasResponseEntryValue { + var v DescribeClientQuotasResponseEntryValue + v.Default() + return v +} + +type DescribeClientQuotasResponseEntry struct { + // Entity contains the quota entity components being described. + Entity []DescribeClientQuotasResponseEntryEntity + + // Values are quota values for the entity. + Values []DescribeClientQuotasResponseEntryValue + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeClientQuotasResponseEntry. +func (v *DescribeClientQuotasResponseEntry) Default() { +} + +// NewDescribeClientQuotasResponseEntry returns a default DescribeClientQuotasResponseEntry +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeClientQuotasResponseEntry() DescribeClientQuotasResponseEntry { + var v DescribeClientQuotasResponseEntry + v.Default() + return v +} + +// DescribeClientQuotasResponse is a response for a DescribeClientQuotasRequest. +type DescribeClientQuotasResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // ErrorCode is any error for the request. + ErrorCode int16 + + // ErrorMessage is an error message for the request, or null if the request succeeded. + ErrorMessage *string + + // Entries contains entities that were matched. + Entries []DescribeClientQuotasResponseEntry + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +func (*DescribeClientQuotasResponse) Key() int16 { return 48 } +func (*DescribeClientQuotasResponse) MaxVersion() int16 { return 1 } +func (v *DescribeClientQuotasResponse) SetVersion(version int16) { v.Version = version } +func (v *DescribeClientQuotasResponse) GetVersion() int16 { return v.Version } +func (v *DescribeClientQuotasResponse) IsFlexible() bool { return v.Version >= 1 } +func (v *DescribeClientQuotasResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 0 +} + +func (v *DescribeClientQuotasResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *DescribeClientQuotasResponse) RequestKind() Request { + return &DescribeClientQuotasRequest{Version: v.Version} +} + +func (v *DescribeClientQuotasResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 1 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Entries + if isFlexible { + dst = kbin.AppendCompactNullableArrayLen(dst, len(v), v == nil) + } else { + dst = kbin.AppendNullableArrayLen(dst, len(v), v == nil) + } + for i := range v { + v := &v[i] + { + v := v.Entity + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Type + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.Values + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Key + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Value + dst = kbin.AppendFloat64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeClientQuotasResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeClientQuotasResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeClientQuotasResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 1 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + v := s.Entries + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if version < 0 || l == 0 { + a = []DescribeClientQuotasResponseEntry{} + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeClientQuotasResponseEntry, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := s.Entity + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeClientQuotasResponseEntryEntity, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Type = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Name = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Entity = v + } + { + v := s.Values + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeClientQuotasResponseEntryValue, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Key = v + } + { + v := b.Float64() + s.Value = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Values = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Entries = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeClientQuotasResponse returns a pointer to a default DescribeClientQuotasResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeClientQuotasResponse() *DescribeClientQuotasResponse { + var v DescribeClientQuotasResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeClientQuotasResponse. +func (v *DescribeClientQuotasResponse) Default() { +} + +// NewDescribeClientQuotasResponse returns a default DescribeClientQuotasResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeClientQuotasResponse() DescribeClientQuotasResponse { + var v DescribeClientQuotasResponse + v.Default() + return v +} + +type AlterClientQuotasRequestEntryEntity struct { + // Type is the entity component's type; e.g. "client-id", "user" or "ip". + Type string + + // Name is the name of the entity, or null for the default. + Name *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterClientQuotasRequestEntryEntity. +func (v *AlterClientQuotasRequestEntryEntity) Default() { +} + +// NewAlterClientQuotasRequestEntryEntity returns a default AlterClientQuotasRequestEntryEntity +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterClientQuotasRequestEntryEntity() AlterClientQuotasRequestEntryEntity { + var v AlterClientQuotasRequestEntryEntity + v.Default() + return v +} + +type AlterClientQuotasRequestEntryOp struct { + // Key is the quota configuration key to alter. + Key string + + // Value is the value to set; ignored if remove is true. + Value float64 + + // Remove is whether the quota configuration value should be removed or set. + Remove bool + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterClientQuotasRequestEntryOp. +func (v *AlterClientQuotasRequestEntryOp) Default() { +} + +// NewAlterClientQuotasRequestEntryOp returns a default AlterClientQuotasRequestEntryOp +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterClientQuotasRequestEntryOp() AlterClientQuotasRequestEntryOp { + var v AlterClientQuotasRequestEntryOp + v.Default() + return v +} + +type AlterClientQuotasRequestEntry struct { + // Entity contains the components of a quota entity to alter. + Entity []AlterClientQuotasRequestEntryEntity + + // Ops contains quota configuration entries to alter. + Ops []AlterClientQuotasRequestEntryOp + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterClientQuotasRequestEntry. +func (v *AlterClientQuotasRequestEntry) Default() { +} + +// NewAlterClientQuotasRequestEntry returns a default AlterClientQuotasRequestEntry +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterClientQuotasRequestEntry() AlterClientQuotasRequestEntry { + var v AlterClientQuotasRequestEntry + v.Default() + return v +} + +// AlterClientQuotaRequest, proposed in KIP-546 and introduced with Kafka 2.6.0, +// provides a way to alter client quotas. +type AlterClientQuotasRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Entries are quota configuration entries to alter. + Entries []AlterClientQuotasRequestEntry + + // ValidateOnly is makes this request a dry-run; the alteration is validated + // but not performed. + ValidateOnly bool + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +func (*AlterClientQuotasRequest) Key() int16 { return 49 } +func (*AlterClientQuotasRequest) MaxVersion() int16 { return 1 } +func (v *AlterClientQuotasRequest) SetVersion(version int16) { v.Version = version } +func (v *AlterClientQuotasRequest) GetVersion() int16 { return v.Version } +func (v *AlterClientQuotasRequest) IsFlexible() bool { return v.Version >= 1 } +func (v *AlterClientQuotasRequest) ResponseKind() Response { + r := &AlterClientQuotasResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *AlterClientQuotasRequest) RequestWith(ctx context.Context, r Requestor) (*AlterClientQuotasResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*AlterClientQuotasResponse) + return resp, err +} + +func (v *AlterClientQuotasRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 1 + _ = isFlexible + { + v := v.Entries + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Entity + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Type + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.Ops + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Key + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Value + dst = kbin.AppendFloat64(dst, v) + } + { + v := v.Remove + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.ValidateOnly + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AlterClientQuotasRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AlterClientQuotasRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AlterClientQuotasRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 1 + _ = isFlexible + s := v + { + v := s.Entries + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterClientQuotasRequestEntry, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := s.Entity + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterClientQuotasRequestEntryEntity, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Type = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Name = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Entity = v + } + { + v := s.Ops + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterClientQuotasRequestEntryOp, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Key = v + } + { + v := b.Float64() + s.Value = v + } + { + v := b.Bool() + s.Remove = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Ops = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Entries = v + } + { + v := b.Bool() + s.ValidateOnly = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAlterClientQuotasRequest returns a pointer to a default AlterClientQuotasRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAlterClientQuotasRequest() *AlterClientQuotasRequest { + var v AlterClientQuotasRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterClientQuotasRequest. +func (v *AlterClientQuotasRequest) Default() { +} + +// NewAlterClientQuotasRequest returns a default AlterClientQuotasRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterClientQuotasRequest() AlterClientQuotasRequest { + var v AlterClientQuotasRequest + v.Default() + return v +} + +type AlterClientQuotasResponseEntryEntity struct { + // Type is the entity component's type; e.g. "client-id" or "user". + Type string + + // Name is the name of the entity, or null for the default. + Name *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterClientQuotasResponseEntryEntity. +func (v *AlterClientQuotasResponseEntryEntity) Default() { +} + +// NewAlterClientQuotasResponseEntryEntity returns a default AlterClientQuotasResponseEntryEntity +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterClientQuotasResponseEntryEntity() AlterClientQuotasResponseEntryEntity { + var v AlterClientQuotasResponseEntryEntity + v.Default() + return v +} + +type AlterClientQuotasResponseEntry struct { + // ErrorCode is the error code for an alter on a matched entity. + ErrorCode int16 + + // ErrorMessage is an informative message if the alter on this entity failed. + ErrorMessage *string + + // Entity contains the components of a matched entity. + Entity []AlterClientQuotasResponseEntryEntity + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterClientQuotasResponseEntry. +func (v *AlterClientQuotasResponseEntry) Default() { +} + +// NewAlterClientQuotasResponseEntry returns a default AlterClientQuotasResponseEntry +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterClientQuotasResponseEntry() AlterClientQuotasResponseEntry { + var v AlterClientQuotasResponseEntry + v.Default() + return v +} + +// AlterClientQuotasResponse is a response to an AlterClientQuotasRequest. +type AlterClientQuotasResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // Entries contains results for the alter request. + Entries []AlterClientQuotasResponseEntry + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags // v1+ +} + +func (*AlterClientQuotasResponse) Key() int16 { return 49 } +func (*AlterClientQuotasResponse) MaxVersion() int16 { return 1 } +func (v *AlterClientQuotasResponse) SetVersion(version int16) { v.Version = version } +func (v *AlterClientQuotasResponse) GetVersion() int16 { return v.Version } +func (v *AlterClientQuotasResponse) IsFlexible() bool { return v.Version >= 1 } +func (v *AlterClientQuotasResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 0 } +func (v *AlterClientQuotasResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *AlterClientQuotasResponse) RequestKind() Request { + return &AlterClientQuotasRequest{Version: v.Version} +} + +func (v *AlterClientQuotasResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 1 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Entries + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Entity + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Type + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AlterClientQuotasResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AlterClientQuotasResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AlterClientQuotasResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 1 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Entries + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterClientQuotasResponseEntry, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + v := s.Entity + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterClientQuotasResponseEntryEntity, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Type = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Name = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Entity = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Entries = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAlterClientQuotasResponse returns a pointer to a default AlterClientQuotasResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAlterClientQuotasResponse() *AlterClientQuotasResponse { + var v AlterClientQuotasResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterClientQuotasResponse. +func (v *AlterClientQuotasResponse) Default() { +} + +// NewAlterClientQuotasResponse returns a default AlterClientQuotasResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterClientQuotasResponse() AlterClientQuotasResponse { + var v AlterClientQuotasResponse + v.Default() + return v +} + +type DescribeUserSCRAMCredentialsRequestUser struct { + // The user name. + Name string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeUserSCRAMCredentialsRequestUser. +func (v *DescribeUserSCRAMCredentialsRequestUser) Default() { +} + +// NewDescribeUserSCRAMCredentialsRequestUser returns a default DescribeUserSCRAMCredentialsRequestUser +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeUserSCRAMCredentialsRequestUser() DescribeUserSCRAMCredentialsRequestUser { + var v DescribeUserSCRAMCredentialsRequestUser + v.Default() + return v +} + +// DescribeUserSCRAMCredentialsRequest, proposed in KIP-554 and introduced +// with Kafka 2.7.0, describes user SCRAM credentials. +// +// This request was introduced as part of the overarching KIP-500 initiative, +// which is to remove Zookeeper as a dependency. +// +// This request requires DESCRIBE on CLUSTER. +type DescribeUserSCRAMCredentialsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // The users to describe, or null to describe all. + Users []DescribeUserSCRAMCredentialsRequestUser + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*DescribeUserSCRAMCredentialsRequest) Key() int16 { return 50 } +func (*DescribeUserSCRAMCredentialsRequest) MaxVersion() int16 { return 0 } +func (v *DescribeUserSCRAMCredentialsRequest) SetVersion(version int16) { v.Version = version } +func (v *DescribeUserSCRAMCredentialsRequest) GetVersion() int16 { return v.Version } +func (v *DescribeUserSCRAMCredentialsRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *DescribeUserSCRAMCredentialsRequest) ResponseKind() Response { + r := &DescribeUserSCRAMCredentialsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *DescribeUserSCRAMCredentialsRequest) RequestWith(ctx context.Context, r Requestor) (*DescribeUserSCRAMCredentialsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*DescribeUserSCRAMCredentialsResponse) + return resp, err +} + +func (v *DescribeUserSCRAMCredentialsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.Users + if isFlexible { + dst = kbin.AppendCompactNullableArrayLen(dst, len(v), v == nil) + } else { + dst = kbin.AppendNullableArrayLen(dst, len(v), v == nil) + } + for i := range v { + v := &v[i] + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeUserSCRAMCredentialsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeUserSCRAMCredentialsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeUserSCRAMCredentialsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := s.Users + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if version < 0 || l == 0 { + a = []DescribeUserSCRAMCredentialsRequestUser{} + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeUserSCRAMCredentialsRequestUser, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Name = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Users = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeUserSCRAMCredentialsRequest returns a pointer to a default DescribeUserSCRAMCredentialsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeUserSCRAMCredentialsRequest() *DescribeUserSCRAMCredentialsRequest { + var v DescribeUserSCRAMCredentialsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeUserSCRAMCredentialsRequest. +func (v *DescribeUserSCRAMCredentialsRequest) Default() { +} + +// NewDescribeUserSCRAMCredentialsRequest returns a default DescribeUserSCRAMCredentialsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeUserSCRAMCredentialsRequest() DescribeUserSCRAMCredentialsRequest { + var v DescribeUserSCRAMCredentialsRequest + v.Default() + return v +} + +type DescribeUserSCRAMCredentialsResponseResultCredentialInfo struct { + // The SCRAM mechanism for this user, where 0 is UNKNOWN, 1 is SCRAM-SHA-256, + // and 2 is SCRAM-SHA-512. + Mechanism int8 + + // The number of iterations used in the SCRAM credential. + Iterations int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeUserSCRAMCredentialsResponseResultCredentialInfo. +func (v *DescribeUserSCRAMCredentialsResponseResultCredentialInfo) Default() { +} + +// NewDescribeUserSCRAMCredentialsResponseResultCredentialInfo returns a default DescribeUserSCRAMCredentialsResponseResultCredentialInfo +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeUserSCRAMCredentialsResponseResultCredentialInfo() DescribeUserSCRAMCredentialsResponseResultCredentialInfo { + var v DescribeUserSCRAMCredentialsResponseResultCredentialInfo + v.Default() + return v +} + +type DescribeUserSCRAMCredentialsResponseResult struct { + // The name this result corresponds to. + User string + + // The user-level error code. + // + // RESOURCE_NOT_FOUND if the user does not exist or has no credentials. + // + // DUPLICATE_RESOURCE if the user is requested twice+. + ErrorCode int16 + + // The user-level error message, if any. + ErrorMessage *string + + // Information about the SCRAM credentials for this user. + CredentialInfos []DescribeUserSCRAMCredentialsResponseResultCredentialInfo + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeUserSCRAMCredentialsResponseResult. +func (v *DescribeUserSCRAMCredentialsResponseResult) Default() { +} + +// NewDescribeUserSCRAMCredentialsResponseResult returns a default DescribeUserSCRAMCredentialsResponseResult +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeUserSCRAMCredentialsResponseResult() DescribeUserSCRAMCredentialsResponseResult { + var v DescribeUserSCRAMCredentialsResponseResult + v.Default() + return v +} + +// DescribeUserSCRAMCredentialsResponse is a response for a +// DescribeUserSCRAMCredentialsRequest. +type DescribeUserSCRAMCredentialsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // The request-level error code. This is 0 except for auth or infra issues. + // + // CLUSTER_AUTHORIZATION_FAILED if you do not have DESCRIBE on CLUSTER. + ErrorCode int16 + + // The request-level error message, if any. + ErrorMessage *string + + // Results for descriptions, one per user. + Results []DescribeUserSCRAMCredentialsResponseResult + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*DescribeUserSCRAMCredentialsResponse) Key() int16 { return 50 } +func (*DescribeUserSCRAMCredentialsResponse) MaxVersion() int16 { return 0 } +func (v *DescribeUserSCRAMCredentialsResponse) SetVersion(version int16) { v.Version = version } +func (v *DescribeUserSCRAMCredentialsResponse) GetVersion() int16 { return v.Version } +func (v *DescribeUserSCRAMCredentialsResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *DescribeUserSCRAMCredentialsResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 0 +} + +func (v *DescribeUserSCRAMCredentialsResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *DescribeUserSCRAMCredentialsResponse) RequestKind() Request { + return &DescribeUserSCRAMCredentialsRequest{Version: v.Version} +} + +func (v *DescribeUserSCRAMCredentialsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Results + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.User + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.CredentialInfos + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Mechanism + dst = kbin.AppendInt8(dst, v) + } + { + v := v.Iterations + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeUserSCRAMCredentialsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeUserSCRAMCredentialsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeUserSCRAMCredentialsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + v := s.Results + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeUserSCRAMCredentialsResponseResult, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.User = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + v := s.CredentialInfos + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeUserSCRAMCredentialsResponseResultCredentialInfo, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int8() + s.Mechanism = v + } + { + v := b.Int32() + s.Iterations = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.CredentialInfos = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Results = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeUserSCRAMCredentialsResponse returns a pointer to a default DescribeUserSCRAMCredentialsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeUserSCRAMCredentialsResponse() *DescribeUserSCRAMCredentialsResponse { + var v DescribeUserSCRAMCredentialsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeUserSCRAMCredentialsResponse. +func (v *DescribeUserSCRAMCredentialsResponse) Default() { +} + +// NewDescribeUserSCRAMCredentialsResponse returns a default DescribeUserSCRAMCredentialsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeUserSCRAMCredentialsResponse() DescribeUserSCRAMCredentialsResponse { + var v DescribeUserSCRAMCredentialsResponse + v.Default() + return v +} + +type AlterUserSCRAMCredentialsRequestDeletion struct { + // The user name to match for removal. + Name string + + // The mechanism for the user name to remove. + Mechanism int8 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterUserSCRAMCredentialsRequestDeletion. +func (v *AlterUserSCRAMCredentialsRequestDeletion) Default() { +} + +// NewAlterUserSCRAMCredentialsRequestDeletion returns a default AlterUserSCRAMCredentialsRequestDeletion +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterUserSCRAMCredentialsRequestDeletion() AlterUserSCRAMCredentialsRequestDeletion { + var v AlterUserSCRAMCredentialsRequestDeletion + v.Default() + return v +} + +type AlterUserSCRAMCredentialsRequestUpsertion struct { + // The user name to use. + Name string + + // The mechanism to use for creating, where 1 is SCRAM-SHA-256 and 2 is + // SCRAM-SHA-512. + Mechanism int8 + + // The number of iterations to use. This must be more than the minimum for + // the mechanism and cannot be more than 16384. + Iterations int32 + + // A random salt generated by the client. + Salt []byte + + // The salted password to use. + SaltedPassword []byte + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterUserSCRAMCredentialsRequestUpsertion. +func (v *AlterUserSCRAMCredentialsRequestUpsertion) Default() { +} + +// NewAlterUserSCRAMCredentialsRequestUpsertion returns a default AlterUserSCRAMCredentialsRequestUpsertion +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterUserSCRAMCredentialsRequestUpsertion() AlterUserSCRAMCredentialsRequestUpsertion { + var v AlterUserSCRAMCredentialsRequestUpsertion + v.Default() + return v +} + +// AlterUserSCRAMCredentialsRequest, proposed in KIP-554 and introduced +// with Kafka 2.7.0, alters or deletes user SCRAM credentials. +// +// This request was introduced as part of the overarching KIP-500 initiative, +// which is to remove Zookeeper as a dependency. +// +// This request requires ALTER on CLUSTER. +type AlterUserSCRAMCredentialsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // The SCRAM credentials to remove. + Deletions []AlterUserSCRAMCredentialsRequestDeletion + + // The SCRAM credentials to update or insert. + Upsertions []AlterUserSCRAMCredentialsRequestUpsertion + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*AlterUserSCRAMCredentialsRequest) Key() int16 { return 51 } +func (*AlterUserSCRAMCredentialsRequest) MaxVersion() int16 { return 0 } +func (v *AlterUserSCRAMCredentialsRequest) SetVersion(version int16) { v.Version = version } +func (v *AlterUserSCRAMCredentialsRequest) GetVersion() int16 { return v.Version } +func (v *AlterUserSCRAMCredentialsRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *AlterUserSCRAMCredentialsRequest) IsAdminRequest() {} +func (v *AlterUserSCRAMCredentialsRequest) ResponseKind() Response { + r := &AlterUserSCRAMCredentialsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *AlterUserSCRAMCredentialsRequest) RequestWith(ctx context.Context, r Requestor) (*AlterUserSCRAMCredentialsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*AlterUserSCRAMCredentialsResponse) + return resp, err +} + +func (v *AlterUserSCRAMCredentialsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.Deletions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Mechanism + dst = kbin.AppendInt8(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.Upsertions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Mechanism + dst = kbin.AppendInt8(dst, v) + } + { + v := v.Iterations + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Salt + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + { + v := v.SaltedPassword + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AlterUserSCRAMCredentialsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AlterUserSCRAMCredentialsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AlterUserSCRAMCredentialsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := s.Deletions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterUserSCRAMCredentialsRequestDeletion, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Name = v + } + { + v := b.Int8() + s.Mechanism = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Deletions = v + } + { + v := s.Upsertions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterUserSCRAMCredentialsRequestUpsertion, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Name = v + } + { + v := b.Int8() + s.Mechanism = v + } + { + v := b.Int32() + s.Iterations = v + } + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.Salt = v + } + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.SaltedPassword = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Upsertions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAlterUserSCRAMCredentialsRequest returns a pointer to a default AlterUserSCRAMCredentialsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAlterUserSCRAMCredentialsRequest() *AlterUserSCRAMCredentialsRequest { + var v AlterUserSCRAMCredentialsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterUserSCRAMCredentialsRequest. +func (v *AlterUserSCRAMCredentialsRequest) Default() { +} + +// NewAlterUserSCRAMCredentialsRequest returns a default AlterUserSCRAMCredentialsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterUserSCRAMCredentialsRequest() AlterUserSCRAMCredentialsRequest { + var v AlterUserSCRAMCredentialsRequest + v.Default() + return v +} + +type AlterUserSCRAMCredentialsResponseResult struct { + // The name this result corresponds to. + User string + + // The user-level error code. + ErrorCode int16 + + // The user-level error message, if any. + ErrorMessage *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterUserSCRAMCredentialsResponseResult. +func (v *AlterUserSCRAMCredentialsResponseResult) Default() { +} + +// NewAlterUserSCRAMCredentialsResponseResult returns a default AlterUserSCRAMCredentialsResponseResult +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterUserSCRAMCredentialsResponseResult() AlterUserSCRAMCredentialsResponseResult { + var v AlterUserSCRAMCredentialsResponseResult + v.Default() + return v +} + +// AlterUserSCRAMCredentialsResponse is a response for an +// AlterUserSCRAMCredentialsRequest. +type AlterUserSCRAMCredentialsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // The results for deletions and upsertions. + Results []AlterUserSCRAMCredentialsResponseResult + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*AlterUserSCRAMCredentialsResponse) Key() int16 { return 51 } +func (*AlterUserSCRAMCredentialsResponse) MaxVersion() int16 { return 0 } +func (v *AlterUserSCRAMCredentialsResponse) SetVersion(version int16) { v.Version = version } +func (v *AlterUserSCRAMCredentialsResponse) GetVersion() int16 { return v.Version } +func (v *AlterUserSCRAMCredentialsResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *AlterUserSCRAMCredentialsResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 0 +} + +func (v *AlterUserSCRAMCredentialsResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *AlterUserSCRAMCredentialsResponse) RequestKind() Request { + return &AlterUserSCRAMCredentialsRequest{Version: v.Version} +} + +func (v *AlterUserSCRAMCredentialsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Results + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.User + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AlterUserSCRAMCredentialsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AlterUserSCRAMCredentialsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AlterUserSCRAMCredentialsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Results + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterUserSCRAMCredentialsResponseResult, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.User = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Results = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAlterUserSCRAMCredentialsResponse returns a pointer to a default AlterUserSCRAMCredentialsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAlterUserSCRAMCredentialsResponse() *AlterUserSCRAMCredentialsResponse { + var v AlterUserSCRAMCredentialsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterUserSCRAMCredentialsResponse. +func (v *AlterUserSCRAMCredentialsResponse) Default() { +} + +// NewAlterUserSCRAMCredentialsResponse returns a default AlterUserSCRAMCredentialsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterUserSCRAMCredentialsResponse() AlterUserSCRAMCredentialsResponse { + var v AlterUserSCRAMCredentialsResponse + v.Default() + return v +} + +type VoteRequestTopicPartition struct { + Partition int32 + + // The bumped epoch of the candidate sending the request. + CandidateEpoch int32 + + // The ID of the voter sending the request. + CandidateID int32 + + // The epoch of the last record written to the metadata log. + LastOffsetEpoch int32 + + // The offset of the last record written to the metadata log. + LastOffset int64 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to VoteRequestTopicPartition. +func (v *VoteRequestTopicPartition) Default() { +} + +// NewVoteRequestTopicPartition returns a default VoteRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewVoteRequestTopicPartition() VoteRequestTopicPartition { + var v VoteRequestTopicPartition + v.Default() + return v +} + +type VoteRequestTopic struct { + Topic string + + Partitions []VoteRequestTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to VoteRequestTopic. +func (v *VoteRequestTopic) Default() { +} + +// NewVoteRequestTopic returns a default VoteRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewVoteRequestTopic() VoteRequestTopic { + var v VoteRequestTopic + v.Default() + return v +} + +// Part of KIP-595 to replace Kafka's dependence on Zookeeper with a +// Kafka-only raft protocol, +// VoteRequest is used by voters to hold a leader election. +// +// Since this is relatively Kafka internal, most fields are left undocumented. +type VoteRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + ClusterID *string + + Topics []VoteRequestTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*VoteRequest) Key() int16 { return 52 } +func (*VoteRequest) MaxVersion() int16 { return 0 } +func (v *VoteRequest) SetVersion(version int16) { v.Version = version } +func (v *VoteRequest) GetVersion() int16 { return v.Version } +func (v *VoteRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *VoteRequest) IsAdminRequest() {} +func (v *VoteRequest) ResponseKind() Response { + r := &VoteResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *VoteRequest) RequestWith(ctx context.Context, r Requestor) (*VoteResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*VoteResponse) + return resp, err +} + +func (v *VoteRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ClusterID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.CandidateEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.CandidateID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LastOffsetEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LastOffset + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *VoteRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *VoteRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *VoteRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ClusterID = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]VoteRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]VoteRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int32() + s.CandidateEpoch = v + } + { + v := b.Int32() + s.CandidateID = v + } + { + v := b.Int32() + s.LastOffsetEpoch = v + } + { + v := b.Int64() + s.LastOffset = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrVoteRequest returns a pointer to a default VoteRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrVoteRequest() *VoteRequest { + var v VoteRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to VoteRequest. +func (v *VoteRequest) Default() { +} + +// NewVoteRequest returns a default VoteRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewVoteRequest() VoteRequest { + var v VoteRequest + v.Default() + return v +} + +type VoteResponseTopicPartition struct { + Partition int32 + + ErrorCode int16 + + // The ID of the current leader, or -1 if the leader is unknown. + LeaderID int32 + + // The latest known leader epoch. + LeaderEpoch int32 + + // Whether the vote was granted. + VoteGranted bool + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to VoteResponseTopicPartition. +func (v *VoteResponseTopicPartition) Default() { +} + +// NewVoteResponseTopicPartition returns a default VoteResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewVoteResponseTopicPartition() VoteResponseTopicPartition { + var v VoteResponseTopicPartition + v.Default() + return v +} + +type VoteResponseTopic struct { + Topic string + + Partitions []VoteResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to VoteResponseTopic. +func (v *VoteResponseTopic) Default() { +} + +// NewVoteResponseTopic returns a default VoteResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewVoteResponseTopic() VoteResponseTopic { + var v VoteResponseTopic + v.Default() + return v +} + +type VoteResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + ErrorCode int16 + + Topics []VoteResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*VoteResponse) Key() int16 { return 52 } +func (*VoteResponse) MaxVersion() int16 { return 0 } +func (v *VoteResponse) SetVersion(version int16) { v.Version = version } +func (v *VoteResponse) GetVersion() int16 { return v.Version } +func (v *VoteResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *VoteResponse) RequestKind() Request { return &VoteRequest{Version: v.Version} } + +func (v *VoteResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.LeaderID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.VoteGranted + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *VoteResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *VoteResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *VoteResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]VoteResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]VoteResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Int32() + s.LeaderID = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + { + v := b.Bool() + s.VoteGranted = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrVoteResponse returns a pointer to a default VoteResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrVoteResponse() *VoteResponse { + var v VoteResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to VoteResponse. +func (v *VoteResponse) Default() { +} + +// NewVoteResponse returns a default VoteResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewVoteResponse() VoteResponse { + var v VoteResponse + v.Default() + return v +} + +type BeginQuorumEpochRequestTopicPartition struct { + Partition int32 + + // The ID of the newly elected leader. + LeaderID int32 + + // The epoch of the newly elected leader. + LeaderEpoch int32 +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to BeginQuorumEpochRequestTopicPartition. +func (v *BeginQuorumEpochRequestTopicPartition) Default() { +} + +// NewBeginQuorumEpochRequestTopicPartition returns a default BeginQuorumEpochRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewBeginQuorumEpochRequestTopicPartition() BeginQuorumEpochRequestTopicPartition { + var v BeginQuorumEpochRequestTopicPartition + v.Default() + return v +} + +type BeginQuorumEpochRequestTopic struct { + Topic string + + Partitions []BeginQuorumEpochRequestTopicPartition +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to BeginQuorumEpochRequestTopic. +func (v *BeginQuorumEpochRequestTopic) Default() { +} + +// NewBeginQuorumEpochRequestTopic returns a default BeginQuorumEpochRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewBeginQuorumEpochRequestTopic() BeginQuorumEpochRequestTopic { + var v BeginQuorumEpochRequestTopic + v.Default() + return v +} + +// Part of KIP-595 to replace Kafka's dependence on Zookeeper with a +// Kafka-only raft protocol, +// BeginQuorumEpochRequest is sent by a leader (once it has enough votes) +// to all voters in the election. +// +// Since this is relatively Kafka internal, most fields are left undocumented. +type BeginQuorumEpochRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + ClusterID *string + + Topics []BeginQuorumEpochRequestTopic +} + +func (*BeginQuorumEpochRequest) Key() int16 { return 53 } +func (*BeginQuorumEpochRequest) MaxVersion() int16 { return 0 } +func (v *BeginQuorumEpochRequest) SetVersion(version int16) { v.Version = version } +func (v *BeginQuorumEpochRequest) GetVersion() int16 { return v.Version } +func (v *BeginQuorumEpochRequest) IsFlexible() bool { return false } +func (v *BeginQuorumEpochRequest) IsAdminRequest() {} +func (v *BeginQuorumEpochRequest) ResponseKind() Response { + r := &BeginQuorumEpochResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *BeginQuorumEpochRequest) RequestWith(ctx context.Context, r Requestor) (*BeginQuorumEpochResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*BeginQuorumEpochResponse) + return resp, err +} + +func (v *BeginQuorumEpochRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.ClusterID + dst = kbin.AppendNullableString(dst, v) + } + { + v := v.Topics + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Topic + dst = kbin.AppendString(dst, v) + } + { + v := v.Partitions + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + } + } + } + } + return dst +} + +func (v *BeginQuorumEpochRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *BeginQuorumEpochRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *BeginQuorumEpochRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + s := v + { + var v *string + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + s.ClusterID = v + } + { + v := s.Topics + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]BeginQuorumEpochRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]BeginQuorumEpochRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int32() + s.LeaderID = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + } + v = a + s.Partitions = v + } + } + v = a + s.Topics = v + } + return b.Complete() +} + +// NewPtrBeginQuorumEpochRequest returns a pointer to a default BeginQuorumEpochRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrBeginQuorumEpochRequest() *BeginQuorumEpochRequest { + var v BeginQuorumEpochRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to BeginQuorumEpochRequest. +func (v *BeginQuorumEpochRequest) Default() { +} + +// NewBeginQuorumEpochRequest returns a default BeginQuorumEpochRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewBeginQuorumEpochRequest() BeginQuorumEpochRequest { + var v BeginQuorumEpochRequest + v.Default() + return v +} + +type BeginQuorumEpochResponseTopicPartition struct { + Partition int32 + + ErrorCode int16 + + // The ID of the current leader, or -1 if the leader is unknown. + LeaderID int32 + + // The latest known leader epoch. + LeaderEpoch int32 +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to BeginQuorumEpochResponseTopicPartition. +func (v *BeginQuorumEpochResponseTopicPartition) Default() { +} + +// NewBeginQuorumEpochResponseTopicPartition returns a default BeginQuorumEpochResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewBeginQuorumEpochResponseTopicPartition() BeginQuorumEpochResponseTopicPartition { + var v BeginQuorumEpochResponseTopicPartition + v.Default() + return v +} + +type BeginQuorumEpochResponseTopic struct { + Topic string + + Partitions []BeginQuorumEpochResponseTopicPartition +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to BeginQuorumEpochResponseTopic. +func (v *BeginQuorumEpochResponseTopic) Default() { +} + +// NewBeginQuorumEpochResponseTopic returns a default BeginQuorumEpochResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewBeginQuorumEpochResponseTopic() BeginQuorumEpochResponseTopic { + var v BeginQuorumEpochResponseTopic + v.Default() + return v +} + +type BeginQuorumEpochResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + ErrorCode int16 + + Topics []BeginQuorumEpochResponseTopic +} + +func (*BeginQuorumEpochResponse) Key() int16 { return 53 } +func (*BeginQuorumEpochResponse) MaxVersion() int16 { return 0 } +func (v *BeginQuorumEpochResponse) SetVersion(version int16) { v.Version = version } +func (v *BeginQuorumEpochResponse) GetVersion() int16 { return v.Version } +func (v *BeginQuorumEpochResponse) IsFlexible() bool { return false } +func (v *BeginQuorumEpochResponse) RequestKind() Request { + return &BeginQuorumEpochRequest{Version: v.Version} +} + +func (v *BeginQuorumEpochResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Topics + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Topic + dst = kbin.AppendString(dst, v) + } + { + v := v.Partitions + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.LeaderID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + } + } + } + } + return dst +} + +func (v *BeginQuorumEpochResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *BeginQuorumEpochResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *BeginQuorumEpochResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + v := s.Topics + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]BeginQuorumEpochResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]BeginQuorumEpochResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Int32() + s.LeaderID = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + } + v = a + s.Partitions = v + } + } + v = a + s.Topics = v + } + return b.Complete() +} + +// NewPtrBeginQuorumEpochResponse returns a pointer to a default BeginQuorumEpochResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrBeginQuorumEpochResponse() *BeginQuorumEpochResponse { + var v BeginQuorumEpochResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to BeginQuorumEpochResponse. +func (v *BeginQuorumEpochResponse) Default() { +} + +// NewBeginQuorumEpochResponse returns a default BeginQuorumEpochResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewBeginQuorumEpochResponse() BeginQuorumEpochResponse { + var v BeginQuorumEpochResponse + v.Default() + return v +} + +type EndQuorumEpochRequestTopicPartition struct { + Partition int32 + + // The current leader ID that is resigning. + LeaderID int32 + + // The current epoch. + LeaderEpoch int32 + + // A sorted list of preferred successors to start the election. + PreferredSuccessors []int32 +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to EndQuorumEpochRequestTopicPartition. +func (v *EndQuorumEpochRequestTopicPartition) Default() { +} + +// NewEndQuorumEpochRequestTopicPartition returns a default EndQuorumEpochRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewEndQuorumEpochRequestTopicPartition() EndQuorumEpochRequestTopicPartition { + var v EndQuorumEpochRequestTopicPartition + v.Default() + return v +} + +type EndQuorumEpochRequestTopic struct { + Topic string + + Partitions []EndQuorumEpochRequestTopicPartition +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to EndQuorumEpochRequestTopic. +func (v *EndQuorumEpochRequestTopic) Default() { +} + +// NewEndQuorumEpochRequestTopic returns a default EndQuorumEpochRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewEndQuorumEpochRequestTopic() EndQuorumEpochRequestTopic { + var v EndQuorumEpochRequestTopic + v.Default() + return v +} + +// Part of KIP-595 to replace Kafka's dependence on Zookeeper with a +// Kafka-only raft protocol, +// EndQuorumEpochRequest is sent by a leader to gracefully step down as leader +// (i.e. on shutdown). Stepping down begins a new election. +// +// Since this is relatively Kafka internal, most fields are left undocumented. +type EndQuorumEpochRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + ClusterID *string + + Topics []EndQuorumEpochRequestTopic +} + +func (*EndQuorumEpochRequest) Key() int16 { return 54 } +func (*EndQuorumEpochRequest) MaxVersion() int16 { return 0 } +func (v *EndQuorumEpochRequest) SetVersion(version int16) { v.Version = version } +func (v *EndQuorumEpochRequest) GetVersion() int16 { return v.Version } +func (v *EndQuorumEpochRequest) IsFlexible() bool { return false } +func (v *EndQuorumEpochRequest) IsAdminRequest() {} +func (v *EndQuorumEpochRequest) ResponseKind() Response { + r := &EndQuorumEpochResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *EndQuorumEpochRequest) RequestWith(ctx context.Context, r Requestor) (*EndQuorumEpochResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*EndQuorumEpochResponse) + return resp, err +} + +func (v *EndQuorumEpochRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.ClusterID + dst = kbin.AppendNullableString(dst, v) + } + { + v := v.Topics + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Topic + dst = kbin.AppendString(dst, v) + } + { + v := v.Partitions + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.PreferredSuccessors + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + } + } + } + } + return dst +} + +func (v *EndQuorumEpochRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *EndQuorumEpochRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *EndQuorumEpochRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + s := v + { + var v *string + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + s.ClusterID = v + } + { + v := s.Topics + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]EndQuorumEpochRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]EndQuorumEpochRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int32() + s.LeaderID = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + { + v := s.PreferredSuccessors + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.PreferredSuccessors = v + } + } + v = a + s.Partitions = v + } + } + v = a + s.Topics = v + } + return b.Complete() +} + +// NewPtrEndQuorumEpochRequest returns a pointer to a default EndQuorumEpochRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrEndQuorumEpochRequest() *EndQuorumEpochRequest { + var v EndQuorumEpochRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to EndQuorumEpochRequest. +func (v *EndQuorumEpochRequest) Default() { +} + +// NewEndQuorumEpochRequest returns a default EndQuorumEpochRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewEndQuorumEpochRequest() EndQuorumEpochRequest { + var v EndQuorumEpochRequest + v.Default() + return v +} + +type EndQuorumEpochResponseTopicPartition struct { + Partition int32 + + ErrorCode int16 + + // The ID of the current leader, or -1 if the leader is unknown. + LeaderID int32 + + // The latest known leader epoch. + LeaderEpoch int32 +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to EndQuorumEpochResponseTopicPartition. +func (v *EndQuorumEpochResponseTopicPartition) Default() { +} + +// NewEndQuorumEpochResponseTopicPartition returns a default EndQuorumEpochResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewEndQuorumEpochResponseTopicPartition() EndQuorumEpochResponseTopicPartition { + var v EndQuorumEpochResponseTopicPartition + v.Default() + return v +} + +type EndQuorumEpochResponseTopic struct { + Topic string + + Partitions []EndQuorumEpochResponseTopicPartition +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to EndQuorumEpochResponseTopic. +func (v *EndQuorumEpochResponseTopic) Default() { +} + +// NewEndQuorumEpochResponseTopic returns a default EndQuorumEpochResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewEndQuorumEpochResponseTopic() EndQuorumEpochResponseTopic { + var v EndQuorumEpochResponseTopic + v.Default() + return v +} + +type EndQuorumEpochResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + ErrorCode int16 + + Topics []EndQuorumEpochResponseTopic +} + +func (*EndQuorumEpochResponse) Key() int16 { return 54 } +func (*EndQuorumEpochResponse) MaxVersion() int16 { return 0 } +func (v *EndQuorumEpochResponse) SetVersion(version int16) { v.Version = version } +func (v *EndQuorumEpochResponse) GetVersion() int16 { return v.Version } +func (v *EndQuorumEpochResponse) IsFlexible() bool { return false } +func (v *EndQuorumEpochResponse) RequestKind() Request { + return &EndQuorumEpochRequest{Version: v.Version} +} + +func (v *EndQuorumEpochResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Topics + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Topic + dst = kbin.AppendString(dst, v) + } + { + v := v.Partitions + dst = kbin.AppendArrayLen(dst, len(v)) + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.LeaderID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + } + } + } + } + return dst +} + +func (v *EndQuorumEpochResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *EndQuorumEpochResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *EndQuorumEpochResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + v := s.Topics + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]EndQuorumEpochResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + v = b.UnsafeString() + } else { + v = b.String() + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + l = b.ArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]EndQuorumEpochResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Int32() + s.LeaderID = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + } + v = a + s.Partitions = v + } + } + v = a + s.Topics = v + } + return b.Complete() +} + +// NewPtrEndQuorumEpochResponse returns a pointer to a default EndQuorumEpochResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrEndQuorumEpochResponse() *EndQuorumEpochResponse { + var v EndQuorumEpochResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to EndQuorumEpochResponse. +func (v *EndQuorumEpochResponse) Default() { +} + +// NewEndQuorumEpochResponse returns a default EndQuorumEpochResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewEndQuorumEpochResponse() EndQuorumEpochResponse { + var v EndQuorumEpochResponse + v.Default() + return v +} + +// A common struct used in DescribeQuorumResponse. +type DescribeQuorumResponseTopicPartitionReplicaState struct { + ReplicaID int32 + + // The last known log end offset of the follower, or -1 if it is unknown. + LogEndOffset int64 + + // The last known leader wall clock time when a follower fetched from the + // leader, or -1 for the current leader or if unknown for a voter. + // + // This field has a default of -1. + LastFetchTimestamp int64 // v1+ + + // The leader wall clock append time of the offset for which the follower + // made the most recent fetch request, or -1 for the current leader or if + // unknown for a voter. + // + // This field has a default of -1. + LastCaughtUpTimestamp int64 // v1+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeQuorumResponseTopicPartitionReplicaState. +func (v *DescribeQuorumResponseTopicPartitionReplicaState) Default() { + v.LastFetchTimestamp = -1 + v.LastCaughtUpTimestamp = -1 +} + +// NewDescribeQuorumResponseTopicPartitionReplicaState returns a default DescribeQuorumResponseTopicPartitionReplicaState +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeQuorumResponseTopicPartitionReplicaState() DescribeQuorumResponseTopicPartitionReplicaState { + var v DescribeQuorumResponseTopicPartitionReplicaState + v.Default() + return v +} + +type DescribeQuorumRequestTopicPartition struct { + Partition int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeQuorumRequestTopicPartition. +func (v *DescribeQuorumRequestTopicPartition) Default() { +} + +// NewDescribeQuorumRequestTopicPartition returns a default DescribeQuorumRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeQuorumRequestTopicPartition() DescribeQuorumRequestTopicPartition { + var v DescribeQuorumRequestTopicPartition + v.Default() + return v +} + +type DescribeQuorumRequestTopic struct { + Topic string + + Partitions []DescribeQuorumRequestTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeQuorumRequestTopic. +func (v *DescribeQuorumRequestTopic) Default() { +} + +// NewDescribeQuorumRequestTopic returns a default DescribeQuorumRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeQuorumRequestTopic() DescribeQuorumRequestTopic { + var v DescribeQuorumRequestTopic + v.Default() + return v +} + +// Part of KIP-642 (and KIP-595) to replace Kafka's dependence on Zookeeper with a +// Kafka-only raft protocol, +// DescribeQuorumRequest is sent by a leader to describe the quorum. +type DescribeQuorumRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + Topics []DescribeQuorumRequestTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*DescribeQuorumRequest) Key() int16 { return 55 } +func (*DescribeQuorumRequest) MaxVersion() int16 { return 1 } +func (v *DescribeQuorumRequest) SetVersion(version int16) { v.Version = version } +func (v *DescribeQuorumRequest) GetVersion() int16 { return v.Version } +func (v *DescribeQuorumRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *DescribeQuorumRequest) IsAdminRequest() {} +func (v *DescribeQuorumRequest) ResponseKind() Response { + r := &DescribeQuorumResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *DescribeQuorumRequest) RequestWith(ctx context.Context, r Requestor) (*DescribeQuorumResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*DescribeQuorumResponse) + return resp, err +} + +func (v *DescribeQuorumRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeQuorumRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeQuorumRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeQuorumRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeQuorumRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeQuorumRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeQuorumRequest returns a pointer to a default DescribeQuorumRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeQuorumRequest() *DescribeQuorumRequest { + var v DescribeQuorumRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeQuorumRequest. +func (v *DescribeQuorumRequest) Default() { +} + +// NewDescribeQuorumRequest returns a default DescribeQuorumRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeQuorumRequest() DescribeQuorumRequest { + var v DescribeQuorumRequest + v.Default() + return v +} + +type DescribeQuorumResponseTopicPartition struct { + Partition int32 + + ErrorCode int16 + + // The ID of the current leader, or -1 if the leader is unknown. + LeaderID int32 + + // The latest known leader epoch. + LeaderEpoch int32 + + HighWatermark int64 + + CurrentVoters []DescribeQuorumResponseTopicPartitionReplicaState + + Observers []DescribeQuorumResponseTopicPartitionReplicaState + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeQuorumResponseTopicPartition. +func (v *DescribeQuorumResponseTopicPartition) Default() { +} + +// NewDescribeQuorumResponseTopicPartition returns a default DescribeQuorumResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeQuorumResponseTopicPartition() DescribeQuorumResponseTopicPartition { + var v DescribeQuorumResponseTopicPartition + v.Default() + return v +} + +type DescribeQuorumResponseTopic struct { + Topic string + + Partitions []DescribeQuorumResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeQuorumResponseTopic. +func (v *DescribeQuorumResponseTopic) Default() { +} + +// NewDescribeQuorumResponseTopic returns a default DescribeQuorumResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeQuorumResponseTopic() DescribeQuorumResponseTopic { + var v DescribeQuorumResponseTopic + v.Default() + return v +} + +type DescribeQuorumResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + ErrorCode int16 + + Topics []DescribeQuorumResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*DescribeQuorumResponse) Key() int16 { return 55 } +func (*DescribeQuorumResponse) MaxVersion() int16 { return 1 } +func (v *DescribeQuorumResponse) SetVersion(version int16) { v.Version = version } +func (v *DescribeQuorumResponse) GetVersion() int16 { return v.Version } +func (v *DescribeQuorumResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *DescribeQuorumResponse) RequestKind() Request { + return &DescribeQuorumRequest{Version: v.Version} +} + +func (v *DescribeQuorumResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.LeaderID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.HighWatermark + dst = kbin.AppendInt64(dst, v) + } + { + v := v.CurrentVoters + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ReplicaID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LogEndOffset + dst = kbin.AppendInt64(dst, v) + } + if version >= 1 { + v := v.LastFetchTimestamp + dst = kbin.AppendInt64(dst, v) + } + if version >= 1 { + v := v.LastCaughtUpTimestamp + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.Observers + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ReplicaID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LogEndOffset + dst = kbin.AppendInt64(dst, v) + } + if version >= 1 { + v := v.LastFetchTimestamp + dst = kbin.AppendInt64(dst, v) + } + if version >= 1 { + v := v.LastCaughtUpTimestamp + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeQuorumResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeQuorumResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeQuorumResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeQuorumResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeQuorumResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Int32() + s.LeaderID = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + { + v := b.Int64() + s.HighWatermark = v + } + { + v := s.CurrentVoters + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeQuorumResponseTopicPartitionReplicaState, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.ReplicaID = v + } + { + v := b.Int64() + s.LogEndOffset = v + } + if version >= 1 { + v := b.Int64() + s.LastFetchTimestamp = v + } + if version >= 1 { + v := b.Int64() + s.LastCaughtUpTimestamp = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.CurrentVoters = v + } + { + v := s.Observers + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeQuorumResponseTopicPartitionReplicaState, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.ReplicaID = v + } + { + v := b.Int64() + s.LogEndOffset = v + } + if version >= 1 { + v := b.Int64() + s.LastFetchTimestamp = v + } + if version >= 1 { + v := b.Int64() + s.LastCaughtUpTimestamp = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Observers = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeQuorumResponse returns a pointer to a default DescribeQuorumResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeQuorumResponse() *DescribeQuorumResponse { + var v DescribeQuorumResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeQuorumResponse. +func (v *DescribeQuorumResponse) Default() { +} + +// NewDescribeQuorumResponse returns a default DescribeQuorumResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeQuorumResponse() DescribeQuorumResponse { + var v DescribeQuorumResponse + v.Default() + return v +} + +type AlterPartitionRequestTopicPartitionNewEpochISR struct { + // The broker ID . + BrokerID int32 + + // The broker's epoch; -1 if the epoch check is not supported. + // + // This field has a default of -1. + BrokerEpoch int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterPartitionRequestTopicPartitionNewEpochISR. +func (v *AlterPartitionRequestTopicPartitionNewEpochISR) Default() { + v.BrokerEpoch = -1 +} + +// NewAlterPartitionRequestTopicPartitionNewEpochISR returns a default AlterPartitionRequestTopicPartitionNewEpochISR +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterPartitionRequestTopicPartitionNewEpochISR() AlterPartitionRequestTopicPartitionNewEpochISR { + var v AlterPartitionRequestTopicPartitionNewEpochISR + v.Default() + return v +} + +type AlterPartitionRequestTopicPartition struct { + Partition int32 + + // The leader epoch of this partition. + LeaderEpoch int32 + + // The ISR for this partition. + NewISR []int32 // v0-v2 + + NewEpochISR []AlterPartitionRequestTopicPartitionNewEpochISR // v3+ + + // 1 if the partition is recovering from unclean leader election; 0 otherwise + LeaderRecoveryState int8 // v1+ + + // The expected epoch of the partition which is being updated. + // For a legacy cluster, this is the ZkVersion in the LeaderAndISR request. + PartitionEpoch int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterPartitionRequestTopicPartition. +func (v *AlterPartitionRequestTopicPartition) Default() { +} + +// NewAlterPartitionRequestTopicPartition returns a default AlterPartitionRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterPartitionRequestTopicPartition() AlterPartitionRequestTopicPartition { + var v AlterPartitionRequestTopicPartition + v.Default() + return v +} + +type AlterPartitionRequestTopic struct { + Topic string // v0-v1 + + TopicID [16]byte // v2+ + + Partitions []AlterPartitionRequestTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterPartitionRequestTopic. +func (v *AlterPartitionRequestTopic) Default() { +} + +// NewAlterPartitionRequestTopic returns a default AlterPartitionRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterPartitionRequestTopic() AlterPartitionRequestTopic { + var v AlterPartitionRequestTopic + v.Default() + return v +} + +// AlterPartitionRequest, proposed in KIP-497 and introduced in Kafka 2.7.0, +// is an admin request to modify ISR. +// +// Version 3 was added for KIP-903 and replaced NewISR. +type AlterPartitionRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // The ID of the requesting broker. + BrokerID int32 + + // The epoch of the requesting broker. + // + // This field has a default of -1. + BrokerEpoch int64 + + Topics []AlterPartitionRequestTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*AlterPartitionRequest) Key() int16 { return 56 } +func (*AlterPartitionRequest) MaxVersion() int16 { return 3 } +func (v *AlterPartitionRequest) SetVersion(version int16) { v.Version = version } +func (v *AlterPartitionRequest) GetVersion() int16 { return v.Version } +func (v *AlterPartitionRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *AlterPartitionRequest) IsAdminRequest() {} +func (v *AlterPartitionRequest) ResponseKind() Response { + r := &AlterPartitionResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *AlterPartitionRequest) RequestWith(ctx context.Context, r Requestor) (*AlterPartitionResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*AlterPartitionResponse) + return resp, err +} + +func (v *AlterPartitionRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.BrokerID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.BrokerEpoch + dst = kbin.AppendInt64(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + if version >= 0 && version <= 1 { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 2 { + v := v.TopicID + dst = kbin.AppendUuid(dst, v) + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + if version >= 0 && version <= 2 { + v := v.NewISR + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if version >= 3 { + v := v.NewEpochISR + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.BrokerID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.BrokerEpoch + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 1 { + v := v.LeaderRecoveryState + dst = kbin.AppendInt8(dst, v) + } + { + v := v.PartitionEpoch + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AlterPartitionRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AlterPartitionRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AlterPartitionRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.BrokerID = v + } + { + v := b.Int64() + s.BrokerEpoch = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterPartitionRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + if version >= 0 && version <= 1 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + if version >= 2 { + v := b.Uuid() + s.TopicID = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterPartitionRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + if version >= 0 && version <= 2 { + v := s.NewISR + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.NewISR = v + } + if version >= 3 { + v := s.NewEpochISR + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterPartitionRequestTopicPartitionNewEpochISR, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.BrokerID = v + } + { + v := b.Int32() + s.BrokerEpoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.NewEpochISR = v + } + if version >= 1 { + v := b.Int8() + s.LeaderRecoveryState = v + } + { + v := b.Int32() + s.PartitionEpoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAlterPartitionRequest returns a pointer to a default AlterPartitionRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAlterPartitionRequest() *AlterPartitionRequest { + var v AlterPartitionRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterPartitionRequest. +func (v *AlterPartitionRequest) Default() { + v.BrokerEpoch = -1 +} + +// NewAlterPartitionRequest returns a default AlterPartitionRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterPartitionRequest() AlterPartitionRequest { + var v AlterPartitionRequest + v.Default() + return v +} + +type AlterPartitionResponseTopicPartition struct { + Partition int32 + + ErrorCode int16 + + // The broker ID of the leader. + LeaderID int32 + + // The leader epoch of this partition. + LeaderEpoch int32 + + // The in-sync replica ids. + ISR []int32 + + // 1 if the partition is recovering from unclean leader election; 0 otherwise + LeaderRecoveryState int8 // v1+ + + // The current epoch of the partition for KRaft controllers. + // The current ZK version for legacy controllers. + PartitionEpoch int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterPartitionResponseTopicPartition. +func (v *AlterPartitionResponseTopicPartition) Default() { +} + +// NewAlterPartitionResponseTopicPartition returns a default AlterPartitionResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterPartitionResponseTopicPartition() AlterPartitionResponseTopicPartition { + var v AlterPartitionResponseTopicPartition + v.Default() + return v +} + +type AlterPartitionResponseTopic struct { + Topic string // v0-v1 + + TopidID [16]byte // v2+ + + Partitions []AlterPartitionResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterPartitionResponseTopic. +func (v *AlterPartitionResponseTopic) Default() { +} + +// NewAlterPartitionResponseTopic returns a default AlterPartitionResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterPartitionResponseTopic() AlterPartitionResponseTopic { + var v AlterPartitionResponseTopic + v.Default() + return v +} + +type AlterPartitionResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + ErrorCode int16 + + Topics []AlterPartitionResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*AlterPartitionResponse) Key() int16 { return 56 } +func (*AlterPartitionResponse) MaxVersion() int16 { return 3 } +func (v *AlterPartitionResponse) SetVersion(version int16) { v.Version = version } +func (v *AlterPartitionResponse) GetVersion() int16 { return v.Version } +func (v *AlterPartitionResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *AlterPartitionResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 0 } +func (v *AlterPartitionResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *AlterPartitionResponse) RequestKind() Request { + return &AlterPartitionRequest{Version: v.Version} +} + +func (v *AlterPartitionResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + if version >= 0 && version <= 1 { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if version >= 2 { + v := v.TopidID + dst = kbin.AppendUuid(dst, v) + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.LeaderID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ISR + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if version >= 1 { + v := v.LeaderRecoveryState + dst = kbin.AppendInt8(dst, v) + } + { + v := v.PartitionEpoch + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AlterPartitionResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AlterPartitionResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AlterPartitionResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterPartitionResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + if version >= 0 && version <= 1 { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + if version >= 2 { + v := b.Uuid() + s.TopidID = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AlterPartitionResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Int32() + s.LeaderID = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + { + v := s.ISR + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.ISR = v + } + if version >= 1 { + v := b.Int8() + s.LeaderRecoveryState = v + } + { + v := b.Int32() + s.PartitionEpoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAlterPartitionResponse returns a pointer to a default AlterPartitionResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAlterPartitionResponse() *AlterPartitionResponse { + var v AlterPartitionResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AlterPartitionResponse. +func (v *AlterPartitionResponse) Default() { +} + +// NewAlterPartitionResponse returns a default AlterPartitionResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewAlterPartitionResponse() AlterPartitionResponse { + var v AlterPartitionResponse + v.Default() + return v +} + +type UpdateFeaturesRequestFeatureUpdate struct { + // The name of the finalized feature to update. + Feature string + + // The new maximum version level for the finalized feature. A value >= 1 is + // valid. A value < 1, is special, and can be used to request the deletion + // of the finalized feature. + MaxVersionLevel int16 + + // When set to true, the finalized feature version level is allowed to be + // downgraded/deleted. The downgrade request will fail if the new maximum + // version level is a value that's not lower than the existing maximum + // finalized version level. + // + // Replaced in v1 with ValidateOnly. + AllowDowngrade bool + + // Determine which type of upgrade will be performed: 1 will perform an + // upgrade only (default), 2 is safe downgrades only (lossless), 3 is + // unsafe downgrades (lossy). + UpgradeType int8 // v1+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to UpdateFeaturesRequestFeatureUpdate. +func (v *UpdateFeaturesRequestFeatureUpdate) Default() { +} + +// NewUpdateFeaturesRequestFeatureUpdate returns a default UpdateFeaturesRequestFeatureUpdate +// This is a shortcut for creating a struct and calling Default yourself. +func NewUpdateFeaturesRequestFeatureUpdate() UpdateFeaturesRequestFeatureUpdate { + var v UpdateFeaturesRequestFeatureUpdate + v.Default() + return v +} + +// From KIP-584 and introduced in 2.7.0, this request updates broker-wide features. +type UpdateFeaturesRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // TimeoutMillis is how long Kafka can wait before responding to this request. + // This field has no effect on Kafka's processing of the request; the request + // will continue to be processed if the timeout is reached. If the timeout is + // reached, Kafka will reply with a REQUEST_TIMED_OUT error. + // + // This field has a default of 60000. + TimeoutMillis int32 + + // The list of updates to finalized features. + FeatureUpdates []UpdateFeaturesRequestFeatureUpdate + + // True if we should validate the request, but not perform the upgrade or + // downgrade. + ValidateOnly bool // v1+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*UpdateFeaturesRequest) Key() int16 { return 57 } +func (*UpdateFeaturesRequest) MaxVersion() int16 { return 1 } +func (v *UpdateFeaturesRequest) SetVersion(version int16) { v.Version = version } +func (v *UpdateFeaturesRequest) GetVersion() int16 { return v.Version } +func (v *UpdateFeaturesRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *UpdateFeaturesRequest) Timeout() int32 { return v.TimeoutMillis } +func (v *UpdateFeaturesRequest) SetTimeout(timeoutMillis int32) { v.TimeoutMillis = timeoutMillis } +func (v *UpdateFeaturesRequest) IsAdminRequest() {} +func (v *UpdateFeaturesRequest) ResponseKind() Response { + r := &UpdateFeaturesResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *UpdateFeaturesRequest) RequestWith(ctx context.Context, r Requestor) (*UpdateFeaturesResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*UpdateFeaturesResponse) + return resp, err +} + +func (v *UpdateFeaturesRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.TimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.FeatureUpdates + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Feature + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.MaxVersionLevel + dst = kbin.AppendInt16(dst, v) + } + if version >= 0 && version <= 0 { + v := v.AllowDowngrade + dst = kbin.AppendBool(dst, v) + } + if version >= 1 { + v := v.UpgradeType + dst = kbin.AppendInt8(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if version >= 1 { + v := v.ValidateOnly + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *UpdateFeaturesRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *UpdateFeaturesRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *UpdateFeaturesRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.TimeoutMillis = v + } + { + v := s.FeatureUpdates + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]UpdateFeaturesRequestFeatureUpdate, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Feature = v + } + { + v := b.Int16() + s.MaxVersionLevel = v + } + if version >= 0 && version <= 0 { + v := b.Bool() + s.AllowDowngrade = v + } + if version >= 1 { + v := b.Int8() + s.UpgradeType = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.FeatureUpdates = v + } + if version >= 1 { + v := b.Bool() + s.ValidateOnly = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrUpdateFeaturesRequest returns a pointer to a default UpdateFeaturesRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrUpdateFeaturesRequest() *UpdateFeaturesRequest { + var v UpdateFeaturesRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to UpdateFeaturesRequest. +func (v *UpdateFeaturesRequest) Default() { + v.TimeoutMillis = 60000 +} + +// NewUpdateFeaturesRequest returns a default UpdateFeaturesRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewUpdateFeaturesRequest() UpdateFeaturesRequest { + var v UpdateFeaturesRequest + v.Default() + return v +} + +type UpdateFeaturesResponseResult struct { + // The name of the finalized feature. + Feature string + + // The feature update error code, if any. + ErrorCode int16 + + // The feature update error, if any. + ErrorMessage *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to UpdateFeaturesResponseResult. +func (v *UpdateFeaturesResponseResult) Default() { +} + +// NewUpdateFeaturesResponseResult returns a default UpdateFeaturesResponseResult +// This is a shortcut for creating a struct and calling Default yourself. +func NewUpdateFeaturesResponseResult() UpdateFeaturesResponseResult { + var v UpdateFeaturesResponseResult + v.Default() + return v +} + +type UpdateFeaturesResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // The top level error code, if any. + ErrorCode int16 + + // An informative message if the request errored, if any. + ErrorMessage *string + + // The results for each feature update request. + Results []UpdateFeaturesResponseResult + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*UpdateFeaturesResponse) Key() int16 { return 57 } +func (*UpdateFeaturesResponse) MaxVersion() int16 { return 1 } +func (v *UpdateFeaturesResponse) SetVersion(version int16) { v.Version = version } +func (v *UpdateFeaturesResponse) GetVersion() int16 { return v.Version } +func (v *UpdateFeaturesResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *UpdateFeaturesResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 0 } +func (v *UpdateFeaturesResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *UpdateFeaturesResponse) RequestKind() Request { + return &UpdateFeaturesRequest{Version: v.Version} +} + +func (v *UpdateFeaturesResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Results + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Feature + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *UpdateFeaturesResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *UpdateFeaturesResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *UpdateFeaturesResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + v := s.Results + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]UpdateFeaturesResponseResult, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Feature = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Results = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrUpdateFeaturesResponse returns a pointer to a default UpdateFeaturesResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrUpdateFeaturesResponse() *UpdateFeaturesResponse { + var v UpdateFeaturesResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to UpdateFeaturesResponse. +func (v *UpdateFeaturesResponse) Default() { +} + +// NewUpdateFeaturesResponse returns a default UpdateFeaturesResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewUpdateFeaturesResponse() UpdateFeaturesResponse { + var v UpdateFeaturesResponse + v.Default() + return v +} + +// Introduced for KIP-590, EnvelopeRequest is what brokers use to wrap an +// incoming request before forwarding it to another broker. +type EnvelopeRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // The embedded request header and data. + RequestData []byte + + // Value of the initial client principal when the request is redirected by a broker. + RequestPrincipal []byte + + // The original client's address in bytes. + ClientHostAddress []byte + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*EnvelopeRequest) Key() int16 { return 58 } +func (*EnvelopeRequest) MaxVersion() int16 { return 0 } +func (v *EnvelopeRequest) SetVersion(version int16) { v.Version = version } +func (v *EnvelopeRequest) GetVersion() int16 { return v.Version } +func (v *EnvelopeRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *EnvelopeRequest) IsAdminRequest() {} +func (v *EnvelopeRequest) ResponseKind() Response { + r := &EnvelopeResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *EnvelopeRequest) RequestWith(ctx context.Context, r Requestor) (*EnvelopeResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*EnvelopeResponse) + return resp, err +} + +func (v *EnvelopeRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.RequestData + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + { + v := v.RequestPrincipal + if isFlexible { + dst = kbin.AppendCompactNullableBytes(dst, v) + } else { + dst = kbin.AppendNullableBytes(dst, v) + } + } + { + v := v.ClientHostAddress + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *EnvelopeRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *EnvelopeRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *EnvelopeRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.RequestData = v + } + { + var v []byte + if isFlexible { + v = b.CompactNullableBytes() + } else { + v = b.NullableBytes() + } + s.RequestPrincipal = v + } + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.ClientHostAddress = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrEnvelopeRequest returns a pointer to a default EnvelopeRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrEnvelopeRequest() *EnvelopeRequest { + var v EnvelopeRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to EnvelopeRequest. +func (v *EnvelopeRequest) Default() { +} + +// NewEnvelopeRequest returns a default EnvelopeRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewEnvelopeRequest() EnvelopeRequest { + var v EnvelopeRequest + v.Default() + return v +} + +type EnvelopeResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // The embedded response header and data. + ResponseData []byte + + // The error code, or 0 if there was no error. + // + // NOT_CONTROLLER is returned when the request is not sent to the controller. + // + // CLUSTER_AUTHORIZATION_FAILED is returned if inter-broker authorization failed. + ErrorCode int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*EnvelopeResponse) Key() int16 { return 58 } +func (*EnvelopeResponse) MaxVersion() int16 { return 0 } +func (v *EnvelopeResponse) SetVersion(version int16) { v.Version = version } +func (v *EnvelopeResponse) GetVersion() int16 { return v.Version } +func (v *EnvelopeResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *EnvelopeResponse) RequestKind() Request { return &EnvelopeRequest{Version: v.Version} } + +func (v *EnvelopeResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ResponseData + if isFlexible { + dst = kbin.AppendCompactNullableBytes(dst, v) + } else { + dst = kbin.AppendNullableBytes(dst, v) + } + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *EnvelopeResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *EnvelopeResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *EnvelopeResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + var v []byte + if isFlexible { + v = b.CompactNullableBytes() + } else { + v = b.NullableBytes() + } + s.ResponseData = v + } + { + v := b.Int16() + s.ErrorCode = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrEnvelopeResponse returns a pointer to a default EnvelopeResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrEnvelopeResponse() *EnvelopeResponse { + var v EnvelopeResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to EnvelopeResponse. +func (v *EnvelopeResponse) Default() { +} + +// NewEnvelopeResponse returns a default EnvelopeResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewEnvelopeResponse() EnvelopeResponse { + var v EnvelopeResponse + v.Default() + return v +} + +type FetchSnapshotRequestTopicPartitionSnapshotID struct { + EndOffset int64 + + Epoch int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchSnapshotRequestTopicPartitionSnapshotID. +func (v *FetchSnapshotRequestTopicPartitionSnapshotID) Default() { +} + +// NewFetchSnapshotRequestTopicPartitionSnapshotID returns a default FetchSnapshotRequestTopicPartitionSnapshotID +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchSnapshotRequestTopicPartitionSnapshotID() FetchSnapshotRequestTopicPartitionSnapshotID { + var v FetchSnapshotRequestTopicPartitionSnapshotID + v.Default() + return v +} + +type FetchSnapshotRequestTopicPartition struct { + // The partition to fetch. + Partition int32 + + // The current leader epoch of the partition, or -1 for an unknown leader epoch. + CurrentLeaderEpoch int32 + + // The snapshot end offset and epoch to fetch. + SnapshotID FetchSnapshotRequestTopicPartitionSnapshotID + + // The byte position within the snapshot to start fetching from. + Position int64 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchSnapshotRequestTopicPartition. +func (v *FetchSnapshotRequestTopicPartition) Default() { + { + v := &v.SnapshotID + _ = v + } +} + +// NewFetchSnapshotRequestTopicPartition returns a default FetchSnapshotRequestTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchSnapshotRequestTopicPartition() FetchSnapshotRequestTopicPartition { + var v FetchSnapshotRequestTopicPartition + v.Default() + return v +} + +type FetchSnapshotRequestTopic struct { + // The name of the topic to fetch. + Topic string + + // The partitions to fetch. + Partitions []FetchSnapshotRequestTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchSnapshotRequestTopic. +func (v *FetchSnapshotRequestTopic) Default() { +} + +// NewFetchSnapshotRequestTopic returns a default FetchSnapshotRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchSnapshotRequestTopic() FetchSnapshotRequestTopic { + var v FetchSnapshotRequestTopic + v.Default() + return v +} + +// Introduced for KIP-630, FetchSnapshotRequest is a part of the inter-Kafka +// raft protocol to remove the dependency on Zookeeper. +type FetchSnapshotRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // The ClusterID if known, this is used to validate metadata fetches prior to + // broker registration. + ClusterID *string // tag 0 + + // The broker ID of the follower. + // + // This field has a default of -1. + ReplicaID int32 + + // The maximum bytes to fetch from all of the snapshots. + // + // This field has a default of 0x7fffffff. + MaxBytes int32 + + // The topics to fetch. + Topics []FetchSnapshotRequestTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*FetchSnapshotRequest) Key() int16 { return 59 } +func (*FetchSnapshotRequest) MaxVersion() int16 { return 0 } +func (v *FetchSnapshotRequest) SetVersion(version int16) { v.Version = version } +func (v *FetchSnapshotRequest) GetVersion() int16 { return v.Version } +func (v *FetchSnapshotRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *FetchSnapshotRequest) ResponseKind() Response { + r := &FetchSnapshotResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *FetchSnapshotRequest) RequestWith(ctx context.Context, r Requestor) (*FetchSnapshotResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*FetchSnapshotResponse) + return resp, err +} + +func (v *FetchSnapshotRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ReplicaID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.MaxBytes + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.CurrentLeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := &v.SnapshotID + { + v := v.EndOffset + dst = kbin.AppendInt64(dst, v) + } + { + v := v.Epoch + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + { + v := v.Position + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + var toEncode []uint32 + if v.ClusterID != nil { + toEncode = append(toEncode, 0) + } + dst = kbin.AppendUvarint(dst, uint32(len(toEncode)+v.UnknownTags.Len())) + for _, tag := range toEncode { + switch tag { + case 0: + { + v := v.ClusterID + dst = kbin.AppendUvarint(dst, 0) + sized := false + lenAt := len(dst) + fClusterID: + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + if !sized { + dst = kbin.AppendUvarint(dst[:lenAt], uint32(len(dst[lenAt:]))) + sized = true + goto fClusterID + } + } + } + } + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *FetchSnapshotRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *FetchSnapshotRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *FetchSnapshotRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ReplicaID = v + } + { + v := b.Int32() + s.MaxBytes = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]FetchSnapshotRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]FetchSnapshotRequestTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int32() + s.CurrentLeaderEpoch = v + } + { + v := &s.SnapshotID + v.Default() + s := v + { + v := b.Int64() + s.EndOffset = v + } + { + v := b.Int32() + s.Epoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + { + v := b.Int64() + s.Position = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + for i := b.Uvarint(); i > 0; i-- { + switch key := b.Uvarint(); key { + default: + s.UnknownTags.Set(key, b.Span(int(b.Uvarint()))) + case 0: + b := kbin.Reader{Src: b.Span(int(b.Uvarint()))} + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ClusterID = v + if err := b.Complete(); err != nil { + return err + } + } + } + } + return b.Complete() +} + +// NewPtrFetchSnapshotRequest returns a pointer to a default FetchSnapshotRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrFetchSnapshotRequest() *FetchSnapshotRequest { + var v FetchSnapshotRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchSnapshotRequest. +func (v *FetchSnapshotRequest) Default() { + v.ReplicaID = -1 + v.MaxBytes = 2147483647 +} + +// NewFetchSnapshotRequest returns a default FetchSnapshotRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchSnapshotRequest() FetchSnapshotRequest { + var v FetchSnapshotRequest + v.Default() + return v +} + +type FetchSnapshotResponseTopicPartitionSnapshotID struct { + EndOffset int64 + + Epoch int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchSnapshotResponseTopicPartitionSnapshotID. +func (v *FetchSnapshotResponseTopicPartitionSnapshotID) Default() { +} + +// NewFetchSnapshotResponseTopicPartitionSnapshotID returns a default FetchSnapshotResponseTopicPartitionSnapshotID +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchSnapshotResponseTopicPartitionSnapshotID() FetchSnapshotResponseTopicPartitionSnapshotID { + var v FetchSnapshotResponseTopicPartitionSnapshotID + v.Default() + return v +} + +type FetchSnapshotResponseTopicPartitionCurrentLeader struct { + LeaderID int32 + + LeaderEpoch int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchSnapshotResponseTopicPartitionCurrentLeader. +func (v *FetchSnapshotResponseTopicPartitionCurrentLeader) Default() { +} + +// NewFetchSnapshotResponseTopicPartitionCurrentLeader returns a default FetchSnapshotResponseTopicPartitionCurrentLeader +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchSnapshotResponseTopicPartitionCurrentLeader() FetchSnapshotResponseTopicPartitionCurrentLeader { + var v FetchSnapshotResponseTopicPartitionCurrentLeader + v.Default() + return v +} + +type FetchSnapshotResponseTopicPartition struct { + // The partition. + Partition int32 + + // An error code, or 0 if there was no fetch error. + ErrorCode int16 + + // The snapshot end offset and epoch to fetch. + SnapshotID FetchSnapshotResponseTopicPartitionSnapshotID + + // The ID of the current leader (or -1 if unknown) and the latest known + // leader epoch. + CurrentLeader FetchSnapshotResponseTopicPartitionCurrentLeader // tag 0 + + // The total size of the snapshot. + Size int64 + + // The starting byte position within the snapshot included in the Bytes + // field. + Position int64 + + // Snapshot data. + Bytes []byte + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchSnapshotResponseTopicPartition. +func (v *FetchSnapshotResponseTopicPartition) Default() { + { + v := &v.SnapshotID + _ = v + } + { + v := &v.CurrentLeader + _ = v + } +} + +// NewFetchSnapshotResponseTopicPartition returns a default FetchSnapshotResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchSnapshotResponseTopicPartition() FetchSnapshotResponseTopicPartition { + var v FetchSnapshotResponseTopicPartition + v.Default() + return v +} + +type FetchSnapshotResponseTopic struct { + // The name of the topic to fetch. + Topic string + + // The partitions to fetch. + Partitions []FetchSnapshotResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchSnapshotResponseTopic. +func (v *FetchSnapshotResponseTopic) Default() { +} + +// NewFetchSnapshotResponseTopic returns a default FetchSnapshotResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchSnapshotResponseTopic() FetchSnapshotResponseTopic { + var v FetchSnapshotResponseTopic + v.Default() + return v +} + +// FetchSnapshotResponse is a response for a FetchSnapshotRequest. +type FetchSnapshotResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // The top level response error code. + ErrorCode int16 + + // The topics to fetch. + Topics []FetchSnapshotResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*FetchSnapshotResponse) Key() int16 { return 59 } +func (*FetchSnapshotResponse) MaxVersion() int16 { return 0 } +func (v *FetchSnapshotResponse) SetVersion(version int16) { v.Version = version } +func (v *FetchSnapshotResponse) GetVersion() int16 { return v.Version } +func (v *FetchSnapshotResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *FetchSnapshotResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 0 } +func (v *FetchSnapshotResponse) SetThrottle(throttleMillis int32) { v.ThrottleMillis = throttleMillis } +func (v *FetchSnapshotResponse) RequestKind() Request { + return &FetchSnapshotRequest{Version: v.Version} +} + +func (v *FetchSnapshotResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := &v.SnapshotID + { + v := v.EndOffset + dst = kbin.AppendInt64(dst, v) + } + { + v := v.Epoch + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + { + v := v.Size + dst = kbin.AppendInt64(dst, v) + } + { + v := v.Position + dst = kbin.AppendInt64(dst, v) + } + { + v := v.Bytes + if isFlexible { + dst = kbin.AppendCompactBytes(dst, v) + } else { + dst = kbin.AppendBytes(dst, v) + } + } + if isFlexible { + var toEncode []uint32 + if !reflect.DeepEqual(v.CurrentLeader, (func() FetchSnapshotResponseTopicPartitionCurrentLeader { + var v FetchSnapshotResponseTopicPartitionCurrentLeader + v.Default() + return v + })()) { + toEncode = append(toEncode, 0) + } + dst = kbin.AppendUvarint(dst, uint32(len(toEncode)+v.UnknownTags.Len())) + for _, tag := range toEncode { + switch tag { + case 0: + { + v := v.CurrentLeader + dst = kbin.AppendUvarint(dst, 0) + sized := false + lenAt := len(dst) + fCurrentLeader: + { + v := v.LeaderID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LeaderEpoch + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + if !sized { + dst = kbin.AppendUvarint(dst[:lenAt], uint32(len(dst[lenAt:]))) + sized = true + goto fCurrentLeader + } + } + } + } + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *FetchSnapshotResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *FetchSnapshotResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *FetchSnapshotResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]FetchSnapshotResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]FetchSnapshotResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := &s.SnapshotID + v.Default() + s := v + { + v := b.Int64() + s.EndOffset = v + } + { + v := b.Int32() + s.Epoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + { + v := b.Int64() + s.Size = v + } + { + v := b.Int64() + s.Position = v + } + { + var v []byte + if isFlexible { + v = b.CompactBytes() + } else { + v = b.Bytes() + } + s.Bytes = v + } + if isFlexible { + for i := b.Uvarint(); i > 0; i-- { + switch key := b.Uvarint(); key { + default: + s.UnknownTags.Set(key, b.Span(int(b.Uvarint()))) + case 0: + b := kbin.Reader{Src: b.Span(int(b.Uvarint()))} + v := &s.CurrentLeader + v.Default() + s := v + { + v := b.Int32() + s.LeaderID = v + } + { + v := b.Int32() + s.LeaderEpoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + if err := b.Complete(); err != nil { + return err + } + } + } + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrFetchSnapshotResponse returns a pointer to a default FetchSnapshotResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrFetchSnapshotResponse() *FetchSnapshotResponse { + var v FetchSnapshotResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to FetchSnapshotResponse. +func (v *FetchSnapshotResponse) Default() { +} + +// NewFetchSnapshotResponse returns a default FetchSnapshotResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewFetchSnapshotResponse() FetchSnapshotResponse { + var v FetchSnapshotResponse + v.Default() + return v +} + +// Introduced for KIP-700, DescribeClusterRequest is effectively an "admin" +// type metadata request for information that producers or consumers do not +// need to care about. +type DescribeClusterRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Whether to include cluster authorized operations. This requires DESCRIBE + // on CLUSTER. + IncludeClusterAuthorizedOperations bool + + // The endpoint type to describe. 1=brokers, 2=controllers. + // + // This field has a default of 1. + EndpointType int8 // v1+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*DescribeClusterRequest) Key() int16 { return 60 } +func (*DescribeClusterRequest) MaxVersion() int16 { return 1 } +func (v *DescribeClusterRequest) SetVersion(version int16) { v.Version = version } +func (v *DescribeClusterRequest) GetVersion() int16 { return v.Version } +func (v *DescribeClusterRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *DescribeClusterRequest) ResponseKind() Response { + r := &DescribeClusterResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *DescribeClusterRequest) RequestWith(ctx context.Context, r Requestor) (*DescribeClusterResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*DescribeClusterResponse) + return resp, err +} + +func (v *DescribeClusterRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.IncludeClusterAuthorizedOperations + dst = kbin.AppendBool(dst, v) + } + if version >= 1 { + v := v.EndpointType + dst = kbin.AppendInt8(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeClusterRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeClusterRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeClusterRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Bool() + s.IncludeClusterAuthorizedOperations = v + } + if version >= 1 { + v := b.Int8() + s.EndpointType = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeClusterRequest returns a pointer to a default DescribeClusterRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeClusterRequest() *DescribeClusterRequest { + var v DescribeClusterRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeClusterRequest. +func (v *DescribeClusterRequest) Default() { + v.EndpointType = 1 +} + +// NewDescribeClusterRequest returns a default DescribeClusterRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeClusterRequest() DescribeClusterRequest { + var v DescribeClusterRequest + v.Default() + return v +} + +type DescribeClusterResponseBroker struct { + // NodeID is the node ID of a Kafka broker. + NodeID int32 + + // Host is the hostname of a Kafka broker. + Host string + + // Port is the port of a Kafka broker. + Port int32 + + // Rack is the rack this Kafka broker is in, if any. + Rack *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeClusterResponseBroker. +func (v *DescribeClusterResponseBroker) Default() { +} + +// NewDescribeClusterResponseBroker returns a default DescribeClusterResponseBroker +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeClusterResponseBroker() DescribeClusterResponseBroker { + var v DescribeClusterResponseBroker + v.Default() + return v +} + +// DescribeClusterResponse is a response to a DescribeClusterRequest. +type DescribeClusterResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // The top level response error code. + ErrorCode int16 + + // The top level error message, if any. + ErrorMessage *string + + // The endpoint type that was described. 1=brokers, 2=controllers. + // + // This field has a default of 1. + EndpointType int8 // v1+ + + // The cluster ID that responding broker belongs to. + ClusterID string + + // The ID of the controller broker. + // + // This field has a default of -1. + ControllerID int32 + + // Brokers is a set of alive Kafka brokers (this mirrors MetadataResponse.Brokers). + Brokers []DescribeClusterResponseBroker + + // 32-bit bitfield to represent authorized operations for this cluster. + // + // This field has a default of -2147483648. + ClusterAuthorizedOperations int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*DescribeClusterResponse) Key() int16 { return 60 } +func (*DescribeClusterResponse) MaxVersion() int16 { return 1 } +func (v *DescribeClusterResponse) SetVersion(version int16) { v.Version = version } +func (v *DescribeClusterResponse) GetVersion() int16 { return v.Version } +func (v *DescribeClusterResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *DescribeClusterResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 0 } +func (v *DescribeClusterResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *DescribeClusterResponse) RequestKind() Request { + return &DescribeClusterRequest{Version: v.Version} +} + +func (v *DescribeClusterResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if version >= 1 { + v := v.EndpointType + dst = kbin.AppendInt8(dst, v) + } + { + v := v.ClusterID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ControllerID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Brokers + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.NodeID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Host + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Port + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Rack + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.ClusterAuthorizedOperations + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeClusterResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeClusterResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeClusterResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + if version >= 1 { + v := b.Int8() + s.EndpointType = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ClusterID = v + } + { + v := b.Int32() + s.ControllerID = v + } + { + v := s.Brokers + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeClusterResponseBroker, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.NodeID = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Host = v + } + { + v := b.Int32() + s.Port = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Rack = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Brokers = v + } + { + v := b.Int32() + s.ClusterAuthorizedOperations = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeClusterResponse returns a pointer to a default DescribeClusterResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeClusterResponse() *DescribeClusterResponse { + var v DescribeClusterResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeClusterResponse. +func (v *DescribeClusterResponse) Default() { + v.EndpointType = 1 + v.ControllerID = -1 + v.ClusterAuthorizedOperations = -2147483648 +} + +// NewDescribeClusterResponse returns a default DescribeClusterResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeClusterResponse() DescribeClusterResponse { + var v DescribeClusterResponse + v.Default() + return v +} + +type DescribeProducersRequestTopic struct { + Topic string + + // The partitions to list producers for for the given topic. + Partitions []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeProducersRequestTopic. +func (v *DescribeProducersRequestTopic) Default() { +} + +// NewDescribeProducersRequestTopic returns a default DescribeProducersRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeProducersRequestTopic() DescribeProducersRequestTopic { + var v DescribeProducersRequestTopic + v.Default() + return v +} + +// Introduced for KIP-664, DescribeProducersRequest allows for introspecting +// the state of the transaction coordinator. This request can be used to detect +// hanging transactions or other EOS-related problems. +// +// This request allows for describing the state of the active +// idempotent/transactional producers. +type DescribeProducersRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // The topics to describe producers for. + Topics []DescribeProducersRequestTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*DescribeProducersRequest) Key() int16 { return 61 } +func (*DescribeProducersRequest) MaxVersion() int16 { return 0 } +func (v *DescribeProducersRequest) SetVersion(version int16) { v.Version = version } +func (v *DescribeProducersRequest) GetVersion() int16 { return v.Version } +func (v *DescribeProducersRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *DescribeProducersRequest) ResponseKind() Response { + r := &DescribeProducersResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *DescribeProducersRequest) RequestWith(ctx context.Context, r Requestor) (*DescribeProducersResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*DescribeProducersResponse) + return resp, err +} + +func (v *DescribeProducersRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeProducersRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeProducersRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeProducersRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeProducersRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeProducersRequest returns a pointer to a default DescribeProducersRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeProducersRequest() *DescribeProducersRequest { + var v DescribeProducersRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeProducersRequest. +func (v *DescribeProducersRequest) Default() { +} + +// NewDescribeProducersRequest returns a default DescribeProducersRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeProducersRequest() DescribeProducersRequest { + var v DescribeProducersRequest + v.Default() + return v +} + +type DescribeProducersResponseTopicPartitionActiveProducer struct { + ProducerID int64 + + ProducerEpoch int32 + + // The last sequence produced. + // + // This field has a default of -1. + LastSequence int32 + + // The last timestamp produced. + // + // This field has a default of -1. + LastTimestamp int64 + + // The epoch of the transactional coordinator for this last produce. + CoordinatorEpoch int32 + + // The first offset of the transaction. + // + // This field has a default of -1. + CurrentTxnStartOffset int64 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeProducersResponseTopicPartitionActiveProducer. +func (v *DescribeProducersResponseTopicPartitionActiveProducer) Default() { + v.LastSequence = -1 + v.LastTimestamp = -1 + v.CurrentTxnStartOffset = -1 +} + +// NewDescribeProducersResponseTopicPartitionActiveProducer returns a default DescribeProducersResponseTopicPartitionActiveProducer +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeProducersResponseTopicPartitionActiveProducer() DescribeProducersResponseTopicPartitionActiveProducer { + var v DescribeProducersResponseTopicPartitionActiveProducer + v.Default() + return v +} + +type DescribeProducersResponseTopicPartition struct { + Partition int32 + + // The partition error code, or 0 if there was no error. + // + // NOT_LEADER_OR_FOLLOWER is returned if the broker receiving this request + // is not the leader of the partition. + // + // TOPIC_AUTHORIZATION_FAILED is returned if the user does not have Describe + // permissions on the topic. + // + // UNKNOWN_TOPIC_OR_PARTITION is returned if the partition is not known to exist. + // + // Other errors may be returned corresponding to the partition being offline, etc. + ErrorCode int16 + + // The partition error message, which may be null if no additional details are available. + ErrorMessage *string + + // The current idempotent or transactional producers producing to this partition, + // and the metadata related to their produce requests. + ActiveProducers []DescribeProducersResponseTopicPartitionActiveProducer + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeProducersResponseTopicPartition. +func (v *DescribeProducersResponseTopicPartition) Default() { +} + +// NewDescribeProducersResponseTopicPartition returns a default DescribeProducersResponseTopicPartition +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeProducersResponseTopicPartition() DescribeProducersResponseTopicPartition { + var v DescribeProducersResponseTopicPartition + v.Default() + return v +} + +type DescribeProducersResponseTopic struct { + Topic string + + Partitions []DescribeProducersResponseTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeProducersResponseTopic. +func (v *DescribeProducersResponseTopic) Default() { +} + +// NewDescribeProducersResponseTopic returns a default DescribeProducersResponseTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeProducersResponseTopic() DescribeProducersResponseTopic { + var v DescribeProducersResponseTopic + v.Default() + return v +} + +// DescribeProducersResponse is a response to a DescribeProducersRequest. +type DescribeProducersResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + Topics []DescribeProducersResponseTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*DescribeProducersResponse) Key() int16 { return 61 } +func (*DescribeProducersResponse) MaxVersion() int16 { return 0 } +func (v *DescribeProducersResponse) SetVersion(version int16) { v.Version = version } +func (v *DescribeProducersResponse) GetVersion() int16 { return v.Version } +func (v *DescribeProducersResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *DescribeProducersResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 0 } +func (v *DescribeProducersResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *DescribeProducersResponse) RequestKind() Request { + return &DescribeProducersRequest{Version: v.Version} +} + +func (v *DescribeProducersResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Partition + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.ActiveProducers + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ProducerID + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ProducerEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LastSequence + dst = kbin.AppendInt32(dst, v) + } + { + v := v.LastTimestamp + dst = kbin.AppendInt64(dst, v) + } + { + v := v.CoordinatorEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.CurrentTxnStartOffset + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeProducersResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeProducersResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeProducersResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeProducersResponseTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeProducersResponseTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int32() + s.Partition = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + v := s.ActiveProducers + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeProducersResponseTopicPartitionActiveProducer, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int64() + s.ProducerID = v + } + { + v := b.Int32() + s.ProducerEpoch = v + } + { + v := b.Int32() + s.LastSequence = v + } + { + v := b.Int64() + s.LastTimestamp = v + } + { + v := b.Int32() + s.CoordinatorEpoch = v + } + { + v := b.Int64() + s.CurrentTxnStartOffset = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.ActiveProducers = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeProducersResponse returns a pointer to a default DescribeProducersResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeProducersResponse() *DescribeProducersResponse { + var v DescribeProducersResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeProducersResponse. +func (v *DescribeProducersResponse) Default() { +} + +// NewDescribeProducersResponse returns a default DescribeProducersResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeProducersResponse() DescribeProducersResponse { + var v DescribeProducersResponse + v.Default() + return v +} + +type BrokerRegistrationRequestListener struct { + // The name of this endpoint. + Name string + + // The hostname. + Host string + + // The port. + Port uint16 + + // The security protocol. + SecurityProtocol int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to BrokerRegistrationRequestListener. +func (v *BrokerRegistrationRequestListener) Default() { +} + +// NewBrokerRegistrationRequestListener returns a default BrokerRegistrationRequestListener +// This is a shortcut for creating a struct and calling Default yourself. +func NewBrokerRegistrationRequestListener() BrokerRegistrationRequestListener { + var v BrokerRegistrationRequestListener + v.Default() + return v +} + +type BrokerRegistrationRequestFeature struct { + // The name of the feature. + Name string + + // The minimum supported feature level. + MinSupportedVersion int16 + + // The maximum supported feature level. + MaxSupportedVersion int16 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to BrokerRegistrationRequestFeature. +func (v *BrokerRegistrationRequestFeature) Default() { +} + +// NewBrokerRegistrationRequestFeature returns a default BrokerRegistrationRequestFeature +// This is a shortcut for creating a struct and calling Default yourself. +func NewBrokerRegistrationRequestFeature() BrokerRegistrationRequestFeature { + var v BrokerRegistrationRequestFeature + v.Default() + return v +} + +// For KIP-500 / KIP-631, BrokerRegistrationRequest is an internal +// broker-to-broker only request. +type BrokerRegistrationRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // The broker ID. + BrokerID int32 + + // The cluster ID of the broker process. + ClusterID string + + // The incarnation ID of the broker process. + IncarnationID [16]byte + + // The listeners for this broker. + Listeners []BrokerRegistrationRequestListener + + // Features on this broker. + Features []BrokerRegistrationRequestFeature + + // The rack that this broker is in, if any. + Rack *string + + // If the required configurations for ZK migration are present, this value is + // set to true. + IsMigratingZkBroker bool // v1+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*BrokerRegistrationRequest) Key() int16 { return 62 } +func (*BrokerRegistrationRequest) MaxVersion() int16 { return 1 } +func (v *BrokerRegistrationRequest) SetVersion(version int16) { v.Version = version } +func (v *BrokerRegistrationRequest) GetVersion() int16 { return v.Version } +func (v *BrokerRegistrationRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *BrokerRegistrationRequest) ResponseKind() Response { + r := &BrokerRegistrationResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *BrokerRegistrationRequest) RequestWith(ctx context.Context, r Requestor) (*BrokerRegistrationResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*BrokerRegistrationResponse) + return resp, err +} + +func (v *BrokerRegistrationRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.BrokerID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ClusterID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.IncarnationID + dst = kbin.AppendUuid(dst, v) + } + { + v := v.Listeners + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Host + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Port + dst = kbin.AppendUint16(dst, v) + } + { + v := v.SecurityProtocol + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.Features + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Name + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.MinSupportedVersion + dst = kbin.AppendInt16(dst, v) + } + { + v := v.MaxSupportedVersion + dst = kbin.AppendInt16(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.Rack + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if version >= 1 { + v := v.IsMigratingZkBroker + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *BrokerRegistrationRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *BrokerRegistrationRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *BrokerRegistrationRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.BrokerID = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ClusterID = v + } + { + v := b.Uuid() + s.IncarnationID = v + } + { + v := s.Listeners + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]BrokerRegistrationRequestListener, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Name = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Host = v + } + { + v := b.Uint16() + s.Port = v + } + { + v := b.Int16() + s.SecurityProtocol = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Listeners = v + } + { + v := s.Features + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]BrokerRegistrationRequestFeature, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Name = v + } + { + v := b.Int16() + s.MinSupportedVersion = v + } + { + v := b.Int16() + s.MaxSupportedVersion = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Features = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.Rack = v + } + if version >= 1 { + v := b.Bool() + s.IsMigratingZkBroker = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrBrokerRegistrationRequest returns a pointer to a default BrokerRegistrationRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrBrokerRegistrationRequest() *BrokerRegistrationRequest { + var v BrokerRegistrationRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to BrokerRegistrationRequest. +func (v *BrokerRegistrationRequest) Default() { +} + +// NewBrokerRegistrationRequest returns a default BrokerRegistrationRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewBrokerRegistrationRequest() BrokerRegistrationRequest { + var v BrokerRegistrationRequest + v.Default() + return v +} + +// BrokerRegistrationResponse is a response to a BrokerRegistrationRequest. +type BrokerRegistrationResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // Any error code, or 0. + ErrorCode int16 + + // The broker's assigned epoch, or -1 if none was assigned. + // + // This field has a default of -1. + BrokerEpoch int64 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*BrokerRegistrationResponse) Key() int16 { return 62 } +func (*BrokerRegistrationResponse) MaxVersion() int16 { return 1 } +func (v *BrokerRegistrationResponse) SetVersion(version int16) { v.Version = version } +func (v *BrokerRegistrationResponse) GetVersion() int16 { return v.Version } +func (v *BrokerRegistrationResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *BrokerRegistrationResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 0 +} + +func (v *BrokerRegistrationResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *BrokerRegistrationResponse) RequestKind() Request { + return &BrokerRegistrationRequest{Version: v.Version} +} + +func (v *BrokerRegistrationResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.BrokerEpoch + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *BrokerRegistrationResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *BrokerRegistrationResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *BrokerRegistrationResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Int64() + s.BrokerEpoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrBrokerRegistrationResponse returns a pointer to a default BrokerRegistrationResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrBrokerRegistrationResponse() *BrokerRegistrationResponse { + var v BrokerRegistrationResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to BrokerRegistrationResponse. +func (v *BrokerRegistrationResponse) Default() { + v.BrokerEpoch = -1 +} + +// NewBrokerRegistrationResponse returns a default BrokerRegistrationResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewBrokerRegistrationResponse() BrokerRegistrationResponse { + var v BrokerRegistrationResponse + v.Default() + return v +} + +// For KIP-500 / KIP-631, BrokerHeartbeatRequest is an internal +// broker-to-broker only request. +type BrokerHeartbeatRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // The broker ID. + BrokerID int32 + + // The broker's epoch. + // + // This field has a default of -1. + BrokerEpoch int64 + + // The highest metadata offset that the broker has reached. + CurrentMetadataOffset int64 + + // True if the broker wants to be fenced. + WantFence bool + + // True if the broker wants to be shutdown. + WantShutdown bool + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*BrokerHeartbeatRequest) Key() int16 { return 63 } +func (*BrokerHeartbeatRequest) MaxVersion() int16 { return 0 } +func (v *BrokerHeartbeatRequest) SetVersion(version int16) { v.Version = version } +func (v *BrokerHeartbeatRequest) GetVersion() int16 { return v.Version } +func (v *BrokerHeartbeatRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *BrokerHeartbeatRequest) ResponseKind() Response { + r := &BrokerHeartbeatResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *BrokerHeartbeatRequest) RequestWith(ctx context.Context, r Requestor) (*BrokerHeartbeatResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*BrokerHeartbeatResponse) + return resp, err +} + +func (v *BrokerHeartbeatRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.BrokerID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.BrokerEpoch + dst = kbin.AppendInt64(dst, v) + } + { + v := v.CurrentMetadataOffset + dst = kbin.AppendInt64(dst, v) + } + { + v := v.WantFence + dst = kbin.AppendBool(dst, v) + } + { + v := v.WantShutdown + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *BrokerHeartbeatRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *BrokerHeartbeatRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *BrokerHeartbeatRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.BrokerID = v + } + { + v := b.Int64() + s.BrokerEpoch = v + } + { + v := b.Int64() + s.CurrentMetadataOffset = v + } + { + v := b.Bool() + s.WantFence = v + } + { + v := b.Bool() + s.WantShutdown = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrBrokerHeartbeatRequest returns a pointer to a default BrokerHeartbeatRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrBrokerHeartbeatRequest() *BrokerHeartbeatRequest { + var v BrokerHeartbeatRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to BrokerHeartbeatRequest. +func (v *BrokerHeartbeatRequest) Default() { + v.BrokerEpoch = -1 +} + +// NewBrokerHeartbeatRequest returns a default BrokerHeartbeatRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewBrokerHeartbeatRequest() BrokerHeartbeatRequest { + var v BrokerHeartbeatRequest + v.Default() + return v +} + +// BrokerHeartbeatResponse is a response to a BrokerHeartbeatRequest. +type BrokerHeartbeatResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // Any error code, or 0. + ErrorCode int16 + + // True if the broker has approximately caught up with the latest metadata. + IsCaughtUp bool + + // True if the broker is fenced. + // + // This field has a default of true. + IsFenced bool + + // True if the broker should proceed with its shutdown. + ShouldShutdown bool + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*BrokerHeartbeatResponse) Key() int16 { return 63 } +func (*BrokerHeartbeatResponse) MaxVersion() int16 { return 0 } +func (v *BrokerHeartbeatResponse) SetVersion(version int16) { v.Version = version } +func (v *BrokerHeartbeatResponse) GetVersion() int16 { return v.Version } +func (v *BrokerHeartbeatResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *BrokerHeartbeatResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 0 } +func (v *BrokerHeartbeatResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *BrokerHeartbeatResponse) RequestKind() Request { + return &BrokerHeartbeatRequest{Version: v.Version} +} + +func (v *BrokerHeartbeatResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.IsCaughtUp + dst = kbin.AppendBool(dst, v) + } + { + v := v.IsFenced + dst = kbin.AppendBool(dst, v) + } + { + v := v.ShouldShutdown + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *BrokerHeartbeatResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *BrokerHeartbeatResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *BrokerHeartbeatResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Bool() + s.IsCaughtUp = v + } + { + v := b.Bool() + s.IsFenced = v + } + { + v := b.Bool() + s.ShouldShutdown = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrBrokerHeartbeatResponse returns a pointer to a default BrokerHeartbeatResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrBrokerHeartbeatResponse() *BrokerHeartbeatResponse { + var v BrokerHeartbeatResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to BrokerHeartbeatResponse. +func (v *BrokerHeartbeatResponse) Default() { + v.IsFenced = true +} + +// NewBrokerHeartbeatResponse returns a default BrokerHeartbeatResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewBrokerHeartbeatResponse() BrokerHeartbeatResponse { + var v BrokerHeartbeatResponse + v.Default() + return v +} + +// For KIP-500 / KIP-631, UnregisterBrokerRequest is an admin request to +// remove registration of a broker from the cluster. +type UnregisterBrokerRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // The broker ID to unregister. + BrokerID int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*UnregisterBrokerRequest) Key() int16 { return 64 } +func (*UnregisterBrokerRequest) MaxVersion() int16 { return 0 } +func (v *UnregisterBrokerRequest) SetVersion(version int16) { v.Version = version } +func (v *UnregisterBrokerRequest) GetVersion() int16 { return v.Version } +func (v *UnregisterBrokerRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *UnregisterBrokerRequest) ResponseKind() Response { + r := &UnregisterBrokerResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *UnregisterBrokerRequest) RequestWith(ctx context.Context, r Requestor) (*UnregisterBrokerResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*UnregisterBrokerResponse) + return resp, err +} + +func (v *UnregisterBrokerRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.BrokerID + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *UnregisterBrokerRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *UnregisterBrokerRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *UnregisterBrokerRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.BrokerID = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrUnregisterBrokerRequest returns a pointer to a default UnregisterBrokerRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrUnregisterBrokerRequest() *UnregisterBrokerRequest { + var v UnregisterBrokerRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to UnregisterBrokerRequest. +func (v *UnregisterBrokerRequest) Default() { +} + +// NewUnregisterBrokerRequest returns a default UnregisterBrokerRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewUnregisterBrokerRequest() UnregisterBrokerRequest { + var v UnregisterBrokerRequest + v.Default() + return v +} + +// UnregisterBrokerResponse is a response to a UnregisterBrokerRequest. +type UnregisterBrokerResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // Any error code, or 0. + ErrorCode int16 + + // The error message, if any. + ErrorMessage *string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*UnregisterBrokerResponse) Key() int16 { return 64 } +func (*UnregisterBrokerResponse) MaxVersion() int16 { return 0 } +func (v *UnregisterBrokerResponse) SetVersion(version int16) { v.Version = version } +func (v *UnregisterBrokerResponse) GetVersion() int16 { return v.Version } +func (v *UnregisterBrokerResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *UnregisterBrokerResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 0 } +func (v *UnregisterBrokerResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *UnregisterBrokerResponse) RequestKind() Request { + return &UnregisterBrokerRequest{Version: v.Version} +} + +func (v *UnregisterBrokerResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *UnregisterBrokerResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *UnregisterBrokerResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *UnregisterBrokerResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrUnregisterBrokerResponse returns a pointer to a default UnregisterBrokerResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrUnregisterBrokerResponse() *UnregisterBrokerResponse { + var v UnregisterBrokerResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to UnregisterBrokerResponse. +func (v *UnregisterBrokerResponse) Default() { +} + +// NewUnregisterBrokerResponse returns a default UnregisterBrokerResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewUnregisterBrokerResponse() UnregisterBrokerResponse { + var v UnregisterBrokerResponse + v.Default() + return v +} + +// For KIP-664, DescribeTransactionsRequest describes the state of transactions. +type DescribeTransactionsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // Array of transactionalIds to include in describe results. If empty, then + // no results will be returned. + TransactionalIDs []string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*DescribeTransactionsRequest) Key() int16 { return 65 } +func (*DescribeTransactionsRequest) MaxVersion() int16 { return 0 } +func (v *DescribeTransactionsRequest) SetVersion(version int16) { v.Version = version } +func (v *DescribeTransactionsRequest) GetVersion() int16 { return v.Version } +func (v *DescribeTransactionsRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *DescribeTransactionsRequest) ResponseKind() Response { + r := &DescribeTransactionsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *DescribeTransactionsRequest) RequestWith(ctx context.Context, r Requestor) (*DescribeTransactionsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*DescribeTransactionsResponse) + return resp, err +} + +func (v *DescribeTransactionsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.TransactionalIDs + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeTransactionsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeTransactionsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeTransactionsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := s.TransactionalIDs + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]string, l)...) + } + for i := int32(0); i < l; i++ { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + a[i] = v + } + v = a + s.TransactionalIDs = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeTransactionsRequest returns a pointer to a default DescribeTransactionsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeTransactionsRequest() *DescribeTransactionsRequest { + var v DescribeTransactionsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeTransactionsRequest. +func (v *DescribeTransactionsRequest) Default() { +} + +// NewDescribeTransactionsRequest returns a default DescribeTransactionsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeTransactionsRequest() DescribeTransactionsRequest { + var v DescribeTransactionsRequest + v.Default() + return v +} + +type DescribeTransactionsResponseTransactionStateTopic struct { + Topic string + + Partitions []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeTransactionsResponseTransactionStateTopic. +func (v *DescribeTransactionsResponseTransactionStateTopic) Default() { +} + +// NewDescribeTransactionsResponseTransactionStateTopic returns a default DescribeTransactionsResponseTransactionStateTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeTransactionsResponseTransactionStateTopic() DescribeTransactionsResponseTransactionStateTopic { + var v DescribeTransactionsResponseTransactionStateTopic + v.Default() + return v +} + +type DescribeTransactionsResponseTransactionState struct { + // A potential error code for describing this transaction. + // + // NOT_COORDINATOR is returned if the broker receiving this transactional + // ID does not own the ID. + // + // COORDINATOR_LOAD_IN_PROGRESS is returned if the coordiantor is laoding. + // + // COORDINATOR_NOT_AVAILABLE is returned if the coordinator is being shutdown. + // + // TRANSACTIONAL_ID_NOT_FOUND is returned if the transactional ID could not be found. + // + // TRANSACTIONAL_ID_AUTHORIZATION_FAILED is returned if the user does not have + // Describe permissions on the transactional ID. + ErrorCode int16 + + // TransactionalID is the transactional ID this record is for. + TransactionalID string + + // State is the state the transaction is in. + State string + + // TimeoutMillis is the timeout of this transaction in milliseconds. + TimeoutMillis int32 + + // StartTimestamp is the timestamp in millis of when this transaction started. + StartTimestamp int64 + + // ProducerID is the ID in use by the transactional ID. + ProducerID int64 + + // ProducerEpoch is the epoch associated with the producer ID. + ProducerEpoch int16 + + // The set of partitions included in the current transaction (if active). + // When a transaction is preparing to commit or abort, this will include + // only partitions which do not have markers. + // + // This does not include topics the user is not authorized to describe. + Topics []DescribeTransactionsResponseTransactionStateTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeTransactionsResponseTransactionState. +func (v *DescribeTransactionsResponseTransactionState) Default() { +} + +// NewDescribeTransactionsResponseTransactionState returns a default DescribeTransactionsResponseTransactionState +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeTransactionsResponseTransactionState() DescribeTransactionsResponseTransactionState { + var v DescribeTransactionsResponseTransactionState + v.Default() + return v +} + +// DescribeTransactionsResponse is a response to a DescribeTransactionsRequest. +type DescribeTransactionsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + TransactionStates []DescribeTransactionsResponseTransactionState + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*DescribeTransactionsResponse) Key() int16 { return 65 } +func (*DescribeTransactionsResponse) MaxVersion() int16 { return 0 } +func (v *DescribeTransactionsResponse) SetVersion(version int16) { v.Version = version } +func (v *DescribeTransactionsResponse) GetVersion() int16 { return v.Version } +func (v *DescribeTransactionsResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *DescribeTransactionsResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 0 +} + +func (v *DescribeTransactionsResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *DescribeTransactionsResponse) RequestKind() Request { + return &DescribeTransactionsRequest{Version: v.Version} +} + +func (v *DescribeTransactionsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.TransactionStates + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.TransactionalID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.State + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.TimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.StartTimestamp + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ProducerID + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ProducerEpoch + dst = kbin.AppendInt16(dst, v) + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *DescribeTransactionsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *DescribeTransactionsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *DescribeTransactionsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.TransactionStates + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeTransactionsResponseTransactionState, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.TransactionalID = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.State = v + } + { + v := b.Int32() + s.TimeoutMillis = v + } + { + v := b.Int64() + s.StartTimestamp = v + } + { + v := b.Int64() + s.ProducerID = v + } + { + v := b.Int16() + s.ProducerEpoch = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]DescribeTransactionsResponseTransactionStateTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.TransactionStates = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrDescribeTransactionsResponse returns a pointer to a default DescribeTransactionsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrDescribeTransactionsResponse() *DescribeTransactionsResponse { + var v DescribeTransactionsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to DescribeTransactionsResponse. +func (v *DescribeTransactionsResponse) Default() { +} + +// NewDescribeTransactionsResponse returns a default DescribeTransactionsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewDescribeTransactionsResponse() DescribeTransactionsResponse { + var v DescribeTransactionsResponse + v.Default() + return v +} + +// For KIP-664, ListTransactionsRequest lists transactions. +type ListTransactionsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // The transaction states to filter by: if empty, all transactions are + // returned; if non-empty, then only transactions matching one of the + // filtered states will be returned. + // + // For a list of valid states, see the TransactionState enum. + StateFilters []string + + // The producer IDs to filter by: if empty, all transactions will be + // returned; if non-empty, only transactions which match one of the filtered + // producer IDs will be returned. + ProducerIDFilters []int64 + + // Duration (in millis) to filter by: if < 0, all transactions will be + // returned; otherwise, only transactions running longer than this duration + // will be returned. + // + // This field has a default of -1. + DurationFilterMillis int64 // v1+ + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*ListTransactionsRequest) Key() int16 { return 66 } +func (*ListTransactionsRequest) MaxVersion() int16 { return 0 } +func (v *ListTransactionsRequest) SetVersion(version int16) { v.Version = version } +func (v *ListTransactionsRequest) GetVersion() int16 { return v.Version } +func (v *ListTransactionsRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *ListTransactionsRequest) ResponseKind() Response { + r := &ListTransactionsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *ListTransactionsRequest) RequestWith(ctx context.Context, r Requestor) (*ListTransactionsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*ListTransactionsResponse) + return resp, err +} + +func (v *ListTransactionsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.StateFilters + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + } + { + v := v.ProducerIDFilters + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt64(dst, v) + } + } + if version >= 1 { + v := v.DurationFilterMillis + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ListTransactionsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ListTransactionsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ListTransactionsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := s.StateFilters + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]string, l)...) + } + for i := int32(0); i < l; i++ { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + a[i] = v + } + v = a + s.StateFilters = v + } + { + v := s.ProducerIDFilters + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int64, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int64() + a[i] = v + } + v = a + s.ProducerIDFilters = v + } + if version >= 1 { + v := b.Int64() + s.DurationFilterMillis = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrListTransactionsRequest returns a pointer to a default ListTransactionsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrListTransactionsRequest() *ListTransactionsRequest { + var v ListTransactionsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListTransactionsRequest. +func (v *ListTransactionsRequest) Default() { + v.DurationFilterMillis = -1 +} + +// NewListTransactionsRequest returns a default ListTransactionsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewListTransactionsRequest() ListTransactionsRequest { + var v ListTransactionsRequest + v.Default() + return v +} + +type ListTransactionsResponseTransactionState struct { + // The transactional ID being used. + TransactionalID string + + // The producer ID of the producer. + ProducerID int64 + + // The current transaction state of the producer. + TransactionState string + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListTransactionsResponseTransactionState. +func (v *ListTransactionsResponseTransactionState) Default() { +} + +// NewListTransactionsResponseTransactionState returns a default ListTransactionsResponseTransactionState +// This is a shortcut for creating a struct and calling Default yourself. +func NewListTransactionsResponseTransactionState() ListTransactionsResponseTransactionState { + var v ListTransactionsResponseTransactionState + v.Default() + return v +} + +// ListTransactionsResponse is a response to a ListTransactionsRequest. +type ListTransactionsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // A potential error code for the listing, + // + // COORDINATOR_LOAD_IN_PROGRESS is returned if the coordinator is loading. + // + // COORDINATOR_NOT_AVAILABLE is returned if the coordinator receiving this + // request is shutting down. + ErrorCode int16 + + // Set of state filters provided in the request which were unknown to the + // transaction coordinator. + UnknownStateFilters []string + + // TransactionStates contains all transactions that were matched for listing + // in the request. The response elides transactions that the user does not have + // permission to describe (DESCRIBE on TRANSACTIONAL_ID for the transaction). + TransactionStates []ListTransactionsResponseTransactionState + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*ListTransactionsResponse) Key() int16 { return 66 } +func (*ListTransactionsResponse) MaxVersion() int16 { return 0 } +func (v *ListTransactionsResponse) SetVersion(version int16) { v.Version = version } +func (v *ListTransactionsResponse) GetVersion() int16 { return v.Version } +func (v *ListTransactionsResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *ListTransactionsResponse) Throttle() (int32, bool) { return v.ThrottleMillis, v.Version >= 0 } +func (v *ListTransactionsResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *ListTransactionsResponse) RequestKind() Request { + return &ListTransactionsRequest{Version: v.Version} +} + +func (v *ListTransactionsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.UnknownStateFilters + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + } + { + v := v.TransactionStates + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.TransactionalID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ProducerID + dst = kbin.AppendInt64(dst, v) + } + { + v := v.TransactionState + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ListTransactionsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ListTransactionsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ListTransactionsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := s.UnknownStateFilters + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]string, l)...) + } + for i := int32(0); i < l; i++ { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + a[i] = v + } + v = a + s.UnknownStateFilters = v + } + { + v := s.TransactionStates + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ListTransactionsResponseTransactionState, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.TransactionalID = v + } + { + v := b.Int64() + s.ProducerID = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.TransactionState = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.TransactionStates = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrListTransactionsResponse returns a pointer to a default ListTransactionsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrListTransactionsResponse() *ListTransactionsResponse { + var v ListTransactionsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ListTransactionsResponse. +func (v *ListTransactionsResponse) Default() { +} + +// NewListTransactionsResponse returns a default ListTransactionsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewListTransactionsResponse() ListTransactionsResponse { + var v ListTransactionsResponse + v.Default() + return v +} + +// For KIP-730, AllocateProducerIDsRequest is a broker-to-broker request that +// requests a block of producer IDs from the controller broker. This is more +// specifically introduced for raft, but allows for one more request to avoid +// zookeeper in the non-raft world as well. +type AllocateProducerIDsRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // The ID of the requesting broker. + BrokerID int32 + + // The epoch of the requesting broker. + // + // This field has a default of -1. + BrokerEpoch int64 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*AllocateProducerIDsRequest) Key() int16 { return 67 } +func (*AllocateProducerIDsRequest) MaxVersion() int16 { return 0 } +func (v *AllocateProducerIDsRequest) SetVersion(version int16) { v.Version = version } +func (v *AllocateProducerIDsRequest) GetVersion() int16 { return v.Version } +func (v *AllocateProducerIDsRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *AllocateProducerIDsRequest) ResponseKind() Response { + r := &AllocateProducerIDsResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *AllocateProducerIDsRequest) RequestWith(ctx context.Context, r Requestor) (*AllocateProducerIDsResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*AllocateProducerIDsResponse) + return resp, err +} + +func (v *AllocateProducerIDsRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.BrokerID + dst = kbin.AppendInt32(dst, v) + } + { + v := v.BrokerEpoch + dst = kbin.AppendInt64(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AllocateProducerIDsRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AllocateProducerIDsRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AllocateProducerIDsRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.BrokerID = v + } + { + v := b.Int64() + s.BrokerEpoch = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAllocateProducerIDsRequest returns a pointer to a default AllocateProducerIDsRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAllocateProducerIDsRequest() *AllocateProducerIDsRequest { + var v AllocateProducerIDsRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AllocateProducerIDsRequest. +func (v *AllocateProducerIDsRequest) Default() { + v.BrokerEpoch = -1 +} + +// NewAllocateProducerIDsRequest returns a default AllocateProducerIDsRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewAllocateProducerIDsRequest() AllocateProducerIDsRequest { + var v AllocateProducerIDsRequest + v.Default() + return v +} + +// AllocateProducerIDsResponse is a response to an AllocateProducerIDsRequest. +type AllocateProducerIDsResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // An error code, if any. + ErrorCode int16 + + // The first producer ID in this range, inclusive. + ProducerIDStart int64 + + // The number of producer IDs in this range. + ProducerIDLen int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*AllocateProducerIDsResponse) Key() int16 { return 67 } +func (*AllocateProducerIDsResponse) MaxVersion() int16 { return 0 } +func (v *AllocateProducerIDsResponse) SetVersion(version int16) { v.Version = version } +func (v *AllocateProducerIDsResponse) GetVersion() int16 { return v.Version } +func (v *AllocateProducerIDsResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *AllocateProducerIDsResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 0 +} + +func (v *AllocateProducerIDsResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *AllocateProducerIDsResponse) RequestKind() Request { + return &AllocateProducerIDsRequest{Version: v.Version} +} + +func (v *AllocateProducerIDsResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ProducerIDStart + dst = kbin.AppendInt64(dst, v) + } + { + v := v.ProducerIDLen + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *AllocateProducerIDsResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *AllocateProducerIDsResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *AllocateProducerIDsResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + v := b.Int64() + s.ProducerIDStart = v + } + { + v := b.Int32() + s.ProducerIDLen = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrAllocateProducerIDsResponse returns a pointer to a default AllocateProducerIDsResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrAllocateProducerIDsResponse() *AllocateProducerIDsResponse { + var v AllocateProducerIDsResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to AllocateProducerIDsResponse. +func (v *AllocateProducerIDsResponse) Default() { +} + +// NewAllocateProducerIDsResponse returns a default AllocateProducerIDsResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewAllocateProducerIDsResponse() AllocateProducerIDsResponse { + var v AllocateProducerIDsResponse + v.Default() + return v +} + +type ConsumerGroupHeartbeatRequestTopic struct { + // The topic ID. + TopicID [16]byte + + // The partitions. + Partitions []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ConsumerGroupHeartbeatRequestTopic. +func (v *ConsumerGroupHeartbeatRequestTopic) Default() { +} + +// NewConsumerGroupHeartbeatRequestTopic returns a default ConsumerGroupHeartbeatRequestTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewConsumerGroupHeartbeatRequestTopic() ConsumerGroupHeartbeatRequestTopic { + var v ConsumerGroupHeartbeatRequestTopic + v.Default() + return v +} + +// ConsumerGroupHeartbeat is a part of KIP-848; there are a lot of details +// to this request so documentation is left to the KIP itself. +type ConsumerGroupHeartbeatRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // The group ID. + Group string + + // The member ID generated by the coordinator. This must be kept during + // the entire lifetime of the member. + MemberID string + + // The current member epoch; 0 to join the group, -1 to leave, -2 to + // indicate that the static member will rejoin. + MemberEpoch int32 + + // Instance ID of the member; null if not provided or if unchanging. + InstanceID *string + + // The rack ID of the member; null if not provided or if unchanging. + RackID *string + + // RebalanceTimeoutMillis is how long the coordinator will wait on a member + // to revoke its partitions. -1 if unchanging. + // + // This field has a default of -1. + RebalanceTimeoutMillis int32 + + // Subscribed topics; null if unchanging. + SubscribedTopicNames []string + + // The server side assignor to use; null if unchanging. + ServerAssignor *string + + // Topic partitions owned by the member; null if unchanging. + Topics []ConsumerGroupHeartbeatRequestTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*ConsumerGroupHeartbeatRequest) Key() int16 { return 68 } +func (*ConsumerGroupHeartbeatRequest) MaxVersion() int16 { return 0 } +func (v *ConsumerGroupHeartbeatRequest) SetVersion(version int16) { v.Version = version } +func (v *ConsumerGroupHeartbeatRequest) GetVersion() int16 { return v.Version } +func (v *ConsumerGroupHeartbeatRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *ConsumerGroupHeartbeatRequest) ResponseKind() Response { + r := &ConsumerGroupHeartbeatResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *ConsumerGroupHeartbeatRequest) RequestWith(ctx context.Context, r Requestor) (*ConsumerGroupHeartbeatResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*ConsumerGroupHeartbeatResponse) + return resp, err +} + +func (v *ConsumerGroupHeartbeatRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.Group + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.MemberID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.MemberEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.InstanceID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.RackID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.RebalanceTimeoutMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.SubscribedTopicNames + if isFlexible { + dst = kbin.AppendCompactNullableArrayLen(dst, len(v), v == nil) + } else { + dst = kbin.AppendNullableArrayLen(dst, len(v), v == nil) + } + for i := range v { + v := v[i] + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + } + { + v := v.ServerAssignor + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactNullableArrayLen(dst, len(v), v == nil) + } else { + dst = kbin.AppendNullableArrayLen(dst, len(v), v == nil) + } + for i := range v { + v := &v[i] + { + v := v.TopicID + dst = kbin.AppendUuid(dst, v) + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ConsumerGroupHeartbeatRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ConsumerGroupHeartbeatRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ConsumerGroupHeartbeatRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Group = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.MemberID = v + } + { + v := b.Int32() + s.MemberEpoch = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.InstanceID = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.RackID = v + } + { + v := b.Int32() + s.RebalanceTimeoutMillis = v + } + { + v := s.SubscribedTopicNames + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if version < 0 || l == 0 { + a = []string{} + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]string, l)...) + } + for i := int32(0); i < l; i++ { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + a[i] = v + } + v = a + s.SubscribedTopicNames = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ServerAssignor = v + } + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if version < 0 || l == 0 { + a = []ConsumerGroupHeartbeatRequestTopic{} + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ConsumerGroupHeartbeatRequestTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Uuid() + s.TopicID = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrConsumerGroupHeartbeatRequest returns a pointer to a default ConsumerGroupHeartbeatRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrConsumerGroupHeartbeatRequest() *ConsumerGroupHeartbeatRequest { + var v ConsumerGroupHeartbeatRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ConsumerGroupHeartbeatRequest. +func (v *ConsumerGroupHeartbeatRequest) Default() { + v.RebalanceTimeoutMillis = -1 +} + +// NewConsumerGroupHeartbeatRequest returns a default ConsumerGroupHeartbeatRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewConsumerGroupHeartbeatRequest() ConsumerGroupHeartbeatRequest { + var v ConsumerGroupHeartbeatRequest + v.Default() + return v +} + +type ConsumerGroupHeartbeatResponseAssignmentTopic struct { + TopicID [16]byte + + Partitions []int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ConsumerGroupHeartbeatResponseAssignmentTopic. +func (v *ConsumerGroupHeartbeatResponseAssignmentTopic) Default() { +} + +// NewConsumerGroupHeartbeatResponseAssignmentTopic returns a default ConsumerGroupHeartbeatResponseAssignmentTopic +// This is a shortcut for creating a struct and calling Default yourself. +func NewConsumerGroupHeartbeatResponseAssignmentTopic() ConsumerGroupHeartbeatResponseAssignmentTopic { + var v ConsumerGroupHeartbeatResponseAssignmentTopic + v.Default() + return v +} + +type ConsumerGroupHeartbeatResponseAssignment struct { + // The topics partitions that can be used immediately. + Topics []ConsumerGroupHeartbeatResponseAssignmentTopic + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ConsumerGroupHeartbeatResponseAssignment. +func (v *ConsumerGroupHeartbeatResponseAssignment) Default() { +} + +// NewConsumerGroupHeartbeatResponseAssignment returns a default ConsumerGroupHeartbeatResponseAssignment +// This is a shortcut for creating a struct and calling Default yourself. +func NewConsumerGroupHeartbeatResponseAssignment() ConsumerGroupHeartbeatResponseAssignment { + var v ConsumerGroupHeartbeatResponseAssignment + v.Default() + return v +} + +// ConsumerGroupHeartbeatResponse is returned from a ConsumerGroupHeartbeatRequest. +type ConsumerGroupHeartbeatResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + // ErrorCode is the error for this response. + // + // Supported errors: + // - GROUP_AUTHORIZATION_FAILED (version 0+) + // - NOT_COORDINATOR (version 0+) + // - COORDINATOR_NOT_AVAILABLE (version 0+) + // - COORDINATOR_LOAD_IN_PROGRESS (version 0+) + // - INVALID_REQUEST (version 0+) + // - UNKNOWN_MEMBER_ID (version 0+) + // - FENCED_MEMBER_EPOCH (version 0+) + // - UNSUPPORTED_ASSIGNOR (version 0+) + // - UNRELEASED_INSTANCE_ID (version 0+) + // - GROUP_MAX_SIZE_REACHED (version 0+) + ErrorCode int16 + + // A supplementary message if this errored. + ErrorMessage *string + + // The member ID generated by the coordinator; provided when joining + // with MemberEpoch=0. + MemberID *string + + // The member epoch. + MemberEpoch int32 + + // The heartbeat interval, in milliseconds. + HeartbeatIntervalMillis int32 + + // The assignment; null if not provided. + Assignment *ConsumerGroupHeartbeatResponseAssignment + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*ConsumerGroupHeartbeatResponse) Key() int16 { return 68 } +func (*ConsumerGroupHeartbeatResponse) MaxVersion() int16 { return 0 } +func (v *ConsumerGroupHeartbeatResponse) SetVersion(version int16) { v.Version = version } +func (v *ConsumerGroupHeartbeatResponse) GetVersion() int16 { return v.Version } +func (v *ConsumerGroupHeartbeatResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *ConsumerGroupHeartbeatResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 0 +} + +func (v *ConsumerGroupHeartbeatResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *ConsumerGroupHeartbeatResponse) RequestKind() Request { + return &ConsumerGroupHeartbeatRequest{Version: v.Version} +} + +func (v *ConsumerGroupHeartbeatResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.MemberID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.MemberEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.HeartbeatIntervalMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Assignment + if v == nil { + dst = append(dst, 255) + } else { + dst = append(dst, 1) + { + v := v.Topics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.TopicID + dst = kbin.AppendUuid(dst, v) + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ConsumerGroupHeartbeatResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ConsumerGroupHeartbeatResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ConsumerGroupHeartbeatResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.MemberID = v + } + { + v := b.Int32() + s.MemberEpoch = v + } + { + v := b.Int32() + s.HeartbeatIntervalMillis = v + } + { + if present := b.Int8(); present != -1 && b.Ok() { + s.Assignment = new(ConsumerGroupHeartbeatResponseAssignment) + v := s.Assignment + v.Default() + s := v + { + v := s.Topics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ConsumerGroupHeartbeatResponseAssignmentTopic, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Uuid() + s.TopicID = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Topics = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrConsumerGroupHeartbeatResponse returns a pointer to a default ConsumerGroupHeartbeatResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrConsumerGroupHeartbeatResponse() *ConsumerGroupHeartbeatResponse { + var v ConsumerGroupHeartbeatResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ConsumerGroupHeartbeatResponse. +func (v *ConsumerGroupHeartbeatResponse) Default() { + { + v := &v.Assignment + _ = v + } +} + +// NewConsumerGroupHeartbeatResponse returns a default ConsumerGroupHeartbeatResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewConsumerGroupHeartbeatResponse() ConsumerGroupHeartbeatResponse { + var v ConsumerGroupHeartbeatResponse + v.Default() + return v +} + +// Assignment contains consumer group assignments. +type Assignment struct { + // The topics & partitions assigned to the member. + TopicPartitions []AssignmentTopicPartition + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to Assignment. +func (v *Assignment) Default() { +} + +// NewAssignment returns a default Assignment +// This is a shortcut for creating a struct and calling Default yourself. +func NewAssignment() Assignment { + var v Assignment + v.Default() + return v +} + +// ConsumerGroupDescribe is a part of KIP-848; this is the +// "next generation" equivalent of DescribeGroups. +type ConsumerGroupDescribeRequest struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // The IDs of the groups to describe. + Groups []string + + // Whether to include authorized operations. + IncludeAuthorizedOperations bool + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*ConsumerGroupDescribeRequest) Key() int16 { return 69 } +func (*ConsumerGroupDescribeRequest) MaxVersion() int16 { return 0 } +func (v *ConsumerGroupDescribeRequest) SetVersion(version int16) { v.Version = version } +func (v *ConsumerGroupDescribeRequest) GetVersion() int16 { return v.Version } +func (v *ConsumerGroupDescribeRequest) IsFlexible() bool { return v.Version >= 0 } +func (v *ConsumerGroupDescribeRequest) ResponseKind() Response { + r := &ConsumerGroupDescribeResponse{Version: v.Version} + r.Default() + return r +} + +// RequestWith is requests v on r and returns the response or an error. +// For sharded requests, the response may be merged and still return an error. +// It is better to rely on client.RequestSharded than to rely on proper merging behavior. +func (v *ConsumerGroupDescribeRequest) RequestWith(ctx context.Context, r Requestor) (*ConsumerGroupDescribeResponse, error) { + kresp, err := r.Request(ctx, v) + resp, _ := kresp.(*ConsumerGroupDescribeResponse) + return resp, err +} + +func (v *ConsumerGroupDescribeRequest) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.Groups + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + } + { + v := v.IncludeAuthorizedOperations + dst = kbin.AppendBool(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ConsumerGroupDescribeRequest) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ConsumerGroupDescribeRequest) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ConsumerGroupDescribeRequest) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := s.Groups + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]string, l)...) + } + for i := int32(0); i < l; i++ { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + a[i] = v + } + v = a + s.Groups = v + } + { + v := b.Bool() + s.IncludeAuthorizedOperations = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrConsumerGroupDescribeRequest returns a pointer to a default ConsumerGroupDescribeRequest +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrConsumerGroupDescribeRequest() *ConsumerGroupDescribeRequest { + var v ConsumerGroupDescribeRequest + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ConsumerGroupDescribeRequest. +func (v *ConsumerGroupDescribeRequest) Default() { +} + +// NewConsumerGroupDescribeRequest returns a default ConsumerGroupDescribeRequest +// This is a shortcut for creating a struct and calling Default yourself. +func NewConsumerGroupDescribeRequest() ConsumerGroupDescribeRequest { + var v ConsumerGroupDescribeRequest + v.Default() + return v +} + +type ConsumerGroupDescribeResponseGroupMember struct { + // The member ID. + MemberID string + + // The member instance ID, if any. + InstanceID *string + + // The member rack ID, if any. + RackID *string + + // The current member epoch. + MemberEpoch int32 + + // The client ID. + ClientID string + + // The client host. + ClientHost string + + // The subscribed topic names. + SubscribedTopics []string + + // The subscribed topic regex, if any. + SubscribedTopicRegex *string + + // The current assignment. + Assignment Assignment + + // The target assignment. + TargetAssignment Assignment + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ConsumerGroupDescribeResponseGroupMember. +func (v *ConsumerGroupDescribeResponseGroupMember) Default() { + { + v := &v.Assignment + _ = v + } + { + v := &v.TargetAssignment + _ = v + } +} + +// NewConsumerGroupDescribeResponseGroupMember returns a default ConsumerGroupDescribeResponseGroupMember +// This is a shortcut for creating a struct and calling Default yourself. +func NewConsumerGroupDescribeResponseGroupMember() ConsumerGroupDescribeResponseGroupMember { + var v ConsumerGroupDescribeResponseGroupMember + v.Default() + return v +} + +type ConsumerGroupDescribeResponseGroup struct { + // ErrorCode is the error for this response. + // + // Supported errors: + // - GROUP_AUTHORIZATION_FAILED (version 0+) + // - NOT_COORDINATOR (version 0+) + // - COORDINATOR_NOT_AVAILABLE (version 0+) + // - COORDINATOR_LOAD_IN_PROGRESS (version 0+) + // - INVALID_REQUEST (version 0+) + // - INVALID_GROUP_ID (version 0+) + // - GROUP_ID_NOT_FOUND (version 0+) + ErrorCode int16 + + // A supplementary message if this errored. + ErrorMessage *string + + // The group ID. + Group string + + // The group state. + State string + + // The group epoch. + Epoch int32 + + // The assignment epoch. + AssignmentEpoch int32 + + // The selected assignor. + AssignorName string + + // Members of the group. + Members []ConsumerGroupDescribeResponseGroupMember + + // 32 bit bitfield representing authorized operations for the group. + // + // This field has a default of -2147483648. + AuthorizedOperations int32 + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ConsumerGroupDescribeResponseGroup. +func (v *ConsumerGroupDescribeResponseGroup) Default() { + v.AuthorizedOperations = -2147483648 +} + +// NewConsumerGroupDescribeResponseGroup returns a default ConsumerGroupDescribeResponseGroup +// This is a shortcut for creating a struct and calling Default yourself. +func NewConsumerGroupDescribeResponseGroup() ConsumerGroupDescribeResponseGroup { + var v ConsumerGroupDescribeResponseGroup + v.Default() + return v +} + +// ConsumerGroupDescribeResponse is returned from a ConsumerGroupDescribeRequest. +type ConsumerGroupDescribeResponse struct { + // Version is the version of this message used with a Kafka broker. + Version int16 + + // ThrottleMillis is how long of a throttle Kafka will apply to the client + // after responding to this request. + ThrottleMillis int32 + + Groups []ConsumerGroupDescribeResponseGroup + + // UnknownTags are tags Kafka sent that we do not know the purpose of. + UnknownTags Tags +} + +func (*ConsumerGroupDescribeResponse) Key() int16 { return 69 } +func (*ConsumerGroupDescribeResponse) MaxVersion() int16 { return 0 } +func (v *ConsumerGroupDescribeResponse) SetVersion(version int16) { v.Version = version } +func (v *ConsumerGroupDescribeResponse) GetVersion() int16 { return v.Version } +func (v *ConsumerGroupDescribeResponse) IsFlexible() bool { return v.Version >= 0 } +func (v *ConsumerGroupDescribeResponse) Throttle() (int32, bool) { + return v.ThrottleMillis, v.Version >= 0 +} + +func (v *ConsumerGroupDescribeResponse) SetThrottle(throttleMillis int32) { + v.ThrottleMillis = throttleMillis +} + +func (v *ConsumerGroupDescribeResponse) RequestKind() Request { + return &ConsumerGroupDescribeRequest{Version: v.Version} +} + +func (v *ConsumerGroupDescribeResponse) AppendTo(dst []byte) []byte { + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + { + v := v.ThrottleMillis + dst = kbin.AppendInt32(dst, v) + } + { + v := v.Groups + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.ErrorCode + dst = kbin.AppendInt16(dst, v) + } + { + v := v.ErrorMessage + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.Group + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.State + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Epoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.AssignmentEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.AssignorName + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Members + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.MemberID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.InstanceID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.RackID + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := v.MemberEpoch + dst = kbin.AppendInt32(dst, v) + } + { + v := v.ClientID + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.ClientHost + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.SubscribedTopics + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + } + { + v := v.SubscribedTopicRegex + if isFlexible { + dst = kbin.AppendCompactNullableString(dst, v) + } else { + dst = kbin.AppendNullableString(dst, v) + } + } + { + v := &v.Assignment + { + v := v.TopicPartitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.TopicID + dst = kbin.AppendUuid(dst, v) + } + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + { + v := &v.TargetAssignment + { + v := v.TopicPartitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := &v[i] + { + v := v.TopicID + dst = kbin.AppendUuid(dst, v) + } + { + v := v.Topic + if isFlexible { + dst = kbin.AppendCompactString(dst, v) + } else { + dst = kbin.AppendString(dst, v) + } + } + { + v := v.Partitions + if isFlexible { + dst = kbin.AppendCompactArrayLen(dst, len(v)) + } else { + dst = kbin.AppendArrayLen(dst, len(v)) + } + for i := range v { + v := v[i] + dst = kbin.AppendInt32(dst, v) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + { + v := v.AuthorizedOperations + dst = kbin.AppendInt32(dst, v) + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + } + } + if isFlexible { + dst = kbin.AppendUvarint(dst, 0+uint32(v.UnknownTags.Len())) + dst = v.UnknownTags.AppendEach(dst) + } + return dst +} + +func (v *ConsumerGroupDescribeResponse) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *ConsumerGroupDescribeResponse) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *ConsumerGroupDescribeResponse) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + version := v.Version + _ = version + isFlexible := version >= 0 + _ = isFlexible + s := v + { + v := b.Int32() + s.ThrottleMillis = v + } + { + v := s.Groups + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ConsumerGroupDescribeResponseGroup, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Int16() + s.ErrorCode = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.ErrorMessage = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Group = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.State = v + } + { + v := b.Int32() + s.Epoch = v + } + { + v := b.Int32() + s.AssignmentEpoch = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.AssignorName = v + } + { + v := s.Members + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]ConsumerGroupDescribeResponseGroupMember, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.MemberID = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.InstanceID = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.RackID = v + } + { + v := b.Int32() + s.MemberEpoch = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ClientID = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.ClientHost = v + } + { + v := s.SubscribedTopics + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]string, l)...) + } + for i := int32(0); i < l; i++ { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + a[i] = v + } + v = a + s.SubscribedTopics = v + } + { + var v *string + if isFlexible { + if unsafe { + v = b.UnsafeCompactNullableString() + } else { + v = b.CompactNullableString() + } + } else { + if unsafe { + v = b.UnsafeNullableString() + } else { + v = b.NullableString() + } + } + s.SubscribedTopicRegex = v + } + { + v := &s.Assignment + v.Default() + s := v + { + v := s.TopicPartitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AssignmentTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Uuid() + s.TopicID = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.TopicPartitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + { + v := &s.TargetAssignment + v.Default() + s := v + { + v := s.TopicPartitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]AssignmentTopicPartition, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + v := b.Uuid() + s.TopicID = v + } + { + var v string + if unsafe { + if isFlexible { + v = b.UnsafeCompactString() + } else { + v = b.UnsafeString() + } + } else { + if isFlexible { + v = b.CompactString() + } else { + v = b.String() + } + } + s.Topic = v + } + { + v := s.Partitions + a := v + var l int32 + if isFlexible { + l = b.CompactArrayLen() + } else { + l = b.ArrayLen() + } + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]int32, l)...) + } + for i := int32(0); i < l; i++ { + v := b.Int32() + a[i] = v + } + v = a + s.Partitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.TopicPartitions = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Members = v + } + { + v := b.Int32() + s.AuthorizedOperations = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + } + v = a + s.Groups = v + } + if isFlexible { + s.UnknownTags = internalReadTags(&b) + } + return b.Complete() +} + +// NewPtrConsumerGroupDescribeResponse returns a pointer to a default ConsumerGroupDescribeResponse +// This is a shortcut for creating a new(struct) and calling Default yourself. +func NewPtrConsumerGroupDescribeResponse() *ConsumerGroupDescribeResponse { + var v ConsumerGroupDescribeResponse + v.Default() + return &v +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to ConsumerGroupDescribeResponse. +func (v *ConsumerGroupDescribeResponse) Default() { +} + +// NewConsumerGroupDescribeResponse returns a default ConsumerGroupDescribeResponse +// This is a shortcut for creating a struct and calling Default yourself. +func NewConsumerGroupDescribeResponse() ConsumerGroupDescribeResponse { + var v ConsumerGroupDescribeResponse + v.Default() + return v +} + +// RequestForKey returns the request corresponding to the given request key +// or nil if the key is unknown. +func RequestForKey(key int16) Request { + switch key { + default: + return nil + case 0: + return NewPtrProduceRequest() + case 1: + return NewPtrFetchRequest() + case 2: + return NewPtrListOffsetsRequest() + case 3: + return NewPtrMetadataRequest() + case 4: + return NewPtrLeaderAndISRRequest() + case 5: + return NewPtrStopReplicaRequest() + case 6: + return NewPtrUpdateMetadataRequest() + case 7: + return NewPtrControlledShutdownRequest() + case 8: + return NewPtrOffsetCommitRequest() + case 9: + return NewPtrOffsetFetchRequest() + case 10: + return NewPtrFindCoordinatorRequest() + case 11: + return NewPtrJoinGroupRequest() + case 12: + return NewPtrHeartbeatRequest() + case 13: + return NewPtrLeaveGroupRequest() + case 14: + return NewPtrSyncGroupRequest() + case 15: + return NewPtrDescribeGroupsRequest() + case 16: + return NewPtrListGroupsRequest() + case 17: + return NewPtrSASLHandshakeRequest() + case 18: + return NewPtrApiVersionsRequest() + case 19: + return NewPtrCreateTopicsRequest() + case 20: + return NewPtrDeleteTopicsRequest() + case 21: + return NewPtrDeleteRecordsRequest() + case 22: + return NewPtrInitProducerIDRequest() + case 23: + return NewPtrOffsetForLeaderEpochRequest() + case 24: + return NewPtrAddPartitionsToTxnRequest() + case 25: + return NewPtrAddOffsetsToTxnRequest() + case 26: + return NewPtrEndTxnRequest() + case 27: + return NewPtrWriteTxnMarkersRequest() + case 28: + return NewPtrTxnOffsetCommitRequest() + case 29: + return NewPtrDescribeACLsRequest() + case 30: + return NewPtrCreateACLsRequest() + case 31: + return NewPtrDeleteACLsRequest() + case 32: + return NewPtrDescribeConfigsRequest() + case 33: + return NewPtrAlterConfigsRequest() + case 34: + return NewPtrAlterReplicaLogDirsRequest() + case 35: + return NewPtrDescribeLogDirsRequest() + case 36: + return NewPtrSASLAuthenticateRequest() + case 37: + return NewPtrCreatePartitionsRequest() + case 38: + return NewPtrCreateDelegationTokenRequest() + case 39: + return NewPtrRenewDelegationTokenRequest() + case 40: + return NewPtrExpireDelegationTokenRequest() + case 41: + return NewPtrDescribeDelegationTokenRequest() + case 42: + return NewPtrDeleteGroupsRequest() + case 43: + return NewPtrElectLeadersRequest() + case 44: + return NewPtrIncrementalAlterConfigsRequest() + case 45: + return NewPtrAlterPartitionAssignmentsRequest() + case 46: + return NewPtrListPartitionReassignmentsRequest() + case 47: + return NewPtrOffsetDeleteRequest() + case 48: + return NewPtrDescribeClientQuotasRequest() + case 49: + return NewPtrAlterClientQuotasRequest() + case 50: + return NewPtrDescribeUserSCRAMCredentialsRequest() + case 51: + return NewPtrAlterUserSCRAMCredentialsRequest() + case 52: + return NewPtrVoteRequest() + case 53: + return NewPtrBeginQuorumEpochRequest() + case 54: + return NewPtrEndQuorumEpochRequest() + case 55: + return NewPtrDescribeQuorumRequest() + case 56: + return NewPtrAlterPartitionRequest() + case 57: + return NewPtrUpdateFeaturesRequest() + case 58: + return NewPtrEnvelopeRequest() + case 59: + return NewPtrFetchSnapshotRequest() + case 60: + return NewPtrDescribeClusterRequest() + case 61: + return NewPtrDescribeProducersRequest() + case 62: + return NewPtrBrokerRegistrationRequest() + case 63: + return NewPtrBrokerHeartbeatRequest() + case 64: + return NewPtrUnregisterBrokerRequest() + case 65: + return NewPtrDescribeTransactionsRequest() + case 66: + return NewPtrListTransactionsRequest() + case 67: + return NewPtrAllocateProducerIDsRequest() + case 68: + return NewPtrConsumerGroupHeartbeatRequest() + case 69: + return NewPtrConsumerGroupDescribeRequest() + } +} + +// ResponseForKey returns the response corresponding to the given request key +// or nil if the key is unknown. +func ResponseForKey(key int16) Response { + switch key { + default: + return nil + case 0: + return NewPtrProduceResponse() + case 1: + return NewPtrFetchResponse() + case 2: + return NewPtrListOffsetsResponse() + case 3: + return NewPtrMetadataResponse() + case 4: + return NewPtrLeaderAndISRResponse() + case 5: + return NewPtrStopReplicaResponse() + case 6: + return NewPtrUpdateMetadataResponse() + case 7: + return NewPtrControlledShutdownResponse() + case 8: + return NewPtrOffsetCommitResponse() + case 9: + return NewPtrOffsetFetchResponse() + case 10: + return NewPtrFindCoordinatorResponse() + case 11: + return NewPtrJoinGroupResponse() + case 12: + return NewPtrHeartbeatResponse() + case 13: + return NewPtrLeaveGroupResponse() + case 14: + return NewPtrSyncGroupResponse() + case 15: + return NewPtrDescribeGroupsResponse() + case 16: + return NewPtrListGroupsResponse() + case 17: + return NewPtrSASLHandshakeResponse() + case 18: + return NewPtrApiVersionsResponse() + case 19: + return NewPtrCreateTopicsResponse() + case 20: + return NewPtrDeleteTopicsResponse() + case 21: + return NewPtrDeleteRecordsResponse() + case 22: + return NewPtrInitProducerIDResponse() + case 23: + return NewPtrOffsetForLeaderEpochResponse() + case 24: + return NewPtrAddPartitionsToTxnResponse() + case 25: + return NewPtrAddOffsetsToTxnResponse() + case 26: + return NewPtrEndTxnResponse() + case 27: + return NewPtrWriteTxnMarkersResponse() + case 28: + return NewPtrTxnOffsetCommitResponse() + case 29: + return NewPtrDescribeACLsResponse() + case 30: + return NewPtrCreateACLsResponse() + case 31: + return NewPtrDeleteACLsResponse() + case 32: + return NewPtrDescribeConfigsResponse() + case 33: + return NewPtrAlterConfigsResponse() + case 34: + return NewPtrAlterReplicaLogDirsResponse() + case 35: + return NewPtrDescribeLogDirsResponse() + case 36: + return NewPtrSASLAuthenticateResponse() + case 37: + return NewPtrCreatePartitionsResponse() + case 38: + return NewPtrCreateDelegationTokenResponse() + case 39: + return NewPtrRenewDelegationTokenResponse() + case 40: + return NewPtrExpireDelegationTokenResponse() + case 41: + return NewPtrDescribeDelegationTokenResponse() + case 42: + return NewPtrDeleteGroupsResponse() + case 43: + return NewPtrElectLeadersResponse() + case 44: + return NewPtrIncrementalAlterConfigsResponse() + case 45: + return NewPtrAlterPartitionAssignmentsResponse() + case 46: + return NewPtrListPartitionReassignmentsResponse() + case 47: + return NewPtrOffsetDeleteResponse() + case 48: + return NewPtrDescribeClientQuotasResponse() + case 49: + return NewPtrAlterClientQuotasResponse() + case 50: + return NewPtrDescribeUserSCRAMCredentialsResponse() + case 51: + return NewPtrAlterUserSCRAMCredentialsResponse() + case 52: + return NewPtrVoteResponse() + case 53: + return NewPtrBeginQuorumEpochResponse() + case 54: + return NewPtrEndQuorumEpochResponse() + case 55: + return NewPtrDescribeQuorumResponse() + case 56: + return NewPtrAlterPartitionResponse() + case 57: + return NewPtrUpdateFeaturesResponse() + case 58: + return NewPtrEnvelopeResponse() + case 59: + return NewPtrFetchSnapshotResponse() + case 60: + return NewPtrDescribeClusterResponse() + case 61: + return NewPtrDescribeProducersResponse() + case 62: + return NewPtrBrokerRegistrationResponse() + case 63: + return NewPtrBrokerHeartbeatResponse() + case 64: + return NewPtrUnregisterBrokerResponse() + case 65: + return NewPtrDescribeTransactionsResponse() + case 66: + return NewPtrListTransactionsResponse() + case 67: + return NewPtrAllocateProducerIDsResponse() + case 68: + return NewPtrConsumerGroupHeartbeatResponse() + case 69: + return NewPtrConsumerGroupDescribeResponse() + } +} + +// NameForKey returns the name (e.g., "Fetch") corresponding to a given request key +// or "" if the key is unknown. +func NameForKey(key int16) string { + switch key { + default: + return "Unknown" + case 0: + return "Produce" + case 1: + return "Fetch" + case 2: + return "ListOffsets" + case 3: + return "Metadata" + case 4: + return "LeaderAndISR" + case 5: + return "StopReplica" + case 6: + return "UpdateMetadata" + case 7: + return "ControlledShutdown" + case 8: + return "OffsetCommit" + case 9: + return "OffsetFetch" + case 10: + return "FindCoordinator" + case 11: + return "JoinGroup" + case 12: + return "Heartbeat" + case 13: + return "LeaveGroup" + case 14: + return "SyncGroup" + case 15: + return "DescribeGroups" + case 16: + return "ListGroups" + case 17: + return "SASLHandshake" + case 18: + return "ApiVersions" + case 19: + return "CreateTopics" + case 20: + return "DeleteTopics" + case 21: + return "DeleteRecords" + case 22: + return "InitProducerID" + case 23: + return "OffsetForLeaderEpoch" + case 24: + return "AddPartitionsToTxn" + case 25: + return "AddOffsetsToTxn" + case 26: + return "EndTxn" + case 27: + return "WriteTxnMarkers" + case 28: + return "TxnOffsetCommit" + case 29: + return "DescribeACLs" + case 30: + return "CreateACLs" + case 31: + return "DeleteACLs" + case 32: + return "DescribeConfigs" + case 33: + return "AlterConfigs" + case 34: + return "AlterReplicaLogDirs" + case 35: + return "DescribeLogDirs" + case 36: + return "SASLAuthenticate" + case 37: + return "CreatePartitions" + case 38: + return "CreateDelegationToken" + case 39: + return "RenewDelegationToken" + case 40: + return "ExpireDelegationToken" + case 41: + return "DescribeDelegationToken" + case 42: + return "DeleteGroups" + case 43: + return "ElectLeaders" + case 44: + return "IncrementalAlterConfigs" + case 45: + return "AlterPartitionAssignments" + case 46: + return "ListPartitionReassignments" + case 47: + return "OffsetDelete" + case 48: + return "DescribeClientQuotas" + case 49: + return "AlterClientQuotas" + case 50: + return "DescribeUserSCRAMCredentials" + case 51: + return "AlterUserSCRAMCredentials" + case 52: + return "Vote" + case 53: + return "BeginQuorumEpoch" + case 54: + return "EndQuorumEpoch" + case 55: + return "DescribeQuorum" + case 56: + return "AlterPartition" + case 57: + return "UpdateFeatures" + case 58: + return "Envelope" + case 59: + return "FetchSnapshot" + case 60: + return "DescribeCluster" + case 61: + return "DescribeProducers" + case 62: + return "BrokerRegistration" + case 63: + return "BrokerHeartbeat" + case 64: + return "UnregisterBroker" + case 65: + return "DescribeTransactions" + case 66: + return "ListTransactions" + case 67: + return "AllocateProducerIDs" + case 68: + return "ConsumerGroupHeartbeat" + case 69: + return "ConsumerGroupDescribe" + } +} + +// Key is a typed representation of a request key, with helper functions. +type Key int16 + +const ( + Produce Key = 0 + Fetch Key = 1 + ListOffsets Key = 2 + Metadata Key = 3 + LeaderAndISR Key = 4 + StopReplica Key = 5 + UpdateMetadata Key = 6 + ControlledShutdown Key = 7 + OffsetCommit Key = 8 + OffsetFetch Key = 9 + FindCoordinator Key = 10 + JoinGroup Key = 11 + Heartbeat Key = 12 + LeaveGroup Key = 13 + SyncGroup Key = 14 + DescribeGroups Key = 15 + ListGroups Key = 16 + SASLHandshake Key = 17 + ApiVersions Key = 18 + CreateTopics Key = 19 + DeleteTopics Key = 20 + DeleteRecords Key = 21 + InitProducerID Key = 22 + OffsetForLeaderEpoch Key = 23 + AddPartitionsToTxn Key = 24 + AddOffsetsToTxn Key = 25 + EndTxn Key = 26 + WriteTxnMarkers Key = 27 + TxnOffsetCommit Key = 28 + DescribeACLs Key = 29 + CreateACLs Key = 30 + DeleteACLs Key = 31 + DescribeConfigs Key = 32 + AlterConfigs Key = 33 + AlterReplicaLogDirs Key = 34 + DescribeLogDirs Key = 35 + SASLAuthenticate Key = 36 + CreatePartitions Key = 37 + CreateDelegationToken Key = 38 + RenewDelegationToken Key = 39 + ExpireDelegationToken Key = 40 + DescribeDelegationToken Key = 41 + DeleteGroups Key = 42 + ElectLeaders Key = 43 + IncrementalAlterConfigs Key = 44 + AlterPartitionAssignments Key = 45 + ListPartitionReassignments Key = 46 + OffsetDelete Key = 47 + DescribeClientQuotas Key = 48 + AlterClientQuotas Key = 49 + DescribeUserSCRAMCredentials Key = 50 + AlterUserSCRAMCredentials Key = 51 + Vote Key = 52 + BeginQuorumEpoch Key = 53 + EndQuorumEpoch Key = 54 + DescribeQuorum Key = 55 + AlterPartition Key = 56 + UpdateFeatures Key = 57 + Envelope Key = 58 + FetchSnapshot Key = 59 + DescribeCluster Key = 60 + DescribeProducers Key = 61 + BrokerRegistration Key = 62 + BrokerHeartbeat Key = 63 + UnregisterBroker Key = 64 + DescribeTransactions Key = 65 + ListTransactions Key = 66 + AllocateProducerIDs Key = 67 + ConsumerGroupHeartbeat Key = 68 + ConsumerGroupDescribe Key = 69 +) + +// Name returns the name for this key. +func (k Key) Name() string { return NameForKey(int16(k)) } + +// Request returns a new request for this key if the key is known. +func (k Key) Request() Request { return RequestForKey(int16(k)) } + +// Response returns a new response for this key if the key is known. +func (k Key) Response() Response { return ResponseForKey(int16(k)) } + +// Int16 is an alias for int16(k). +func (k Key) Int16() int16 { return int16(k) } + +// A type of config. +// +// Possible values and their meanings: +// +// * 2 (TOPIC) +// +// * 4 (BROKER) +// +// * 8 (BROKER_LOGGER) +type ConfigResourceType int8 + +func (v ConfigResourceType) String() string { + switch v { + default: + return "UNKNOWN" + case 2: + return "TOPIC" + case 4: + return "BROKER" + case 8: + return "BROKER_LOGGER" + } +} + +func ConfigResourceTypeStrings() []string { + return []string{ + "TOPIC", + "BROKER", + "BROKER_LOGGER", + } +} + +// ParseConfigResourceType normalizes the input s and returns +// the value represented by the string. +// +// Normalizing works by stripping all dots, underscores, and dashes, +// trimming spaces, and lowercasing. +func ParseConfigResourceType(s string) (ConfigResourceType, error) { + switch strnorm(s) { + case "topic": + return 2, nil + case "broker": + return 4, nil + case "brokerlogger": + return 8, nil + default: + return 0, fmt.Errorf("ConfigResourceType: unable to parse %q", s) + } +} + +const ( + ConfigResourceTypeUnknown ConfigResourceType = 0 + ConfigResourceTypeTopic ConfigResourceType = 2 + ConfigResourceTypeBroker ConfigResourceType = 4 + ConfigResourceTypeBrokerLogger ConfigResourceType = 8 +) + +// MarshalText implements encoding.TextMarshaler. +func (e ConfigResourceType) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (e *ConfigResourceType) UnmarshalText(text []byte) error { + v, err := ParseConfigResourceType(string(text)) + *e = v + return err +} + +// Where a config entry is from. If there are no config synonyms, +// the source is DEFAULT_CONFIG. +// +// Possible values and their meanings: +// +// * 1 (DYNAMIC_TOPIC_CONFIG) +// Dynamic topic config for a specific topic. +// +// * 2 (DYNAMIC_BROKER_CONFIG) +// Dynamic broker config for a specific broker. +// +// * 3 (DYNAMIC_DEFAULT_BROKER_CONFIG) +// Dynamic broker config used as the default for all brokers in a cluster. +// +// * 4 (STATIC_BROKER_CONFIG) +// Static broker config provided at start up. +// +// * 5 (DEFAULT_CONFIG) +// Built-in default configuration for those that have defaults. +// +// * 6 (DYNAMIC_BROKER_LOGGER_CONFIG) +// Broker logger; see KIP-412. +type ConfigSource int8 + +func (v ConfigSource) String() string { + switch v { + default: + return "UNKNOWN" + case 1: + return "DYNAMIC_TOPIC_CONFIG" + case 2: + return "DYNAMIC_BROKER_CONFIG" + case 3: + return "DYNAMIC_DEFAULT_BROKER_CONFIG" + case 4: + return "STATIC_BROKER_CONFIG" + case 5: + return "DEFAULT_CONFIG" + case 6: + return "DYNAMIC_BROKER_LOGGER_CONFIG" + } +} + +func ConfigSourceStrings() []string { + return []string{ + "DYNAMIC_TOPIC_CONFIG", + "DYNAMIC_BROKER_CONFIG", + "DYNAMIC_DEFAULT_BROKER_CONFIG", + "STATIC_BROKER_CONFIG", + "DEFAULT_CONFIG", + "DYNAMIC_BROKER_LOGGER_CONFIG", + } +} + +// ParseConfigSource normalizes the input s and returns +// the value represented by the string. +// +// Normalizing works by stripping all dots, underscores, and dashes, +// trimming spaces, and lowercasing. +func ParseConfigSource(s string) (ConfigSource, error) { + switch strnorm(s) { + case "dynamictopicconfig": + return 1, nil + case "dynamicbrokerconfig": + return 2, nil + case "dynamicdefaultbrokerconfig": + return 3, nil + case "staticbrokerconfig": + return 4, nil + case "defaultconfig": + return 5, nil + case "dynamicbrokerloggerconfig": + return 6, nil + default: + return 0, fmt.Errorf("ConfigSource: unable to parse %q", s) + } +} + +const ( + ConfigSourceUnknown ConfigSource = 0 + ConfigSourceDynamicTopicConfig ConfigSource = 1 + ConfigSourceDynamicBrokerConfig ConfigSource = 2 + ConfigSourceDynamicDefaultBrokerConfig ConfigSource = 3 + ConfigSourceStaticBrokerConfig ConfigSource = 4 + ConfigSourceDefaultConfig ConfigSource = 5 + ConfigSourceDynamicBrokerLoggerConfig ConfigSource = 6 +) + +// MarshalText implements encoding.TextMarshaler. +func (e ConfigSource) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (e *ConfigSource) UnmarshalText(text []byte) error { + v, err := ParseConfigSource(string(text)) + *e = v + return err +} + +// A configuration data type. +// +// Possible values and their meanings: +// +// * 1 (BOOLEAN) +// +// * 2 (STRING) +// +// * 3 (INT) +// +// * 4 (SHORT) +// +// * 5 (LONG) +// +// * 6 (DOUBLE) +// +// * 7 (LIST) +// +// * 8 (CLASS) +// +// * 9 (PASSWORD) +type ConfigType int8 + +func (v ConfigType) String() string { + switch v { + default: + return "UNKNOWN" + case 1: + return "BOOLEAN" + case 2: + return "STRING" + case 3: + return "INT" + case 4: + return "SHORT" + case 5: + return "LONG" + case 6: + return "DOUBLE" + case 7: + return "LIST" + case 8: + return "CLASS" + case 9: + return "PASSWORD" + } +} + +func ConfigTypeStrings() []string { + return []string{ + "BOOLEAN", + "STRING", + "INT", + "SHORT", + "LONG", + "DOUBLE", + "LIST", + "CLASS", + "PASSWORD", + } +} + +// ParseConfigType normalizes the input s and returns +// the value represented by the string. +// +// Normalizing works by stripping all dots, underscores, and dashes, +// trimming spaces, and lowercasing. +func ParseConfigType(s string) (ConfigType, error) { + switch strnorm(s) { + case "boolean": + return 1, nil + case "string": + return 2, nil + case "int": + return 3, nil + case "short": + return 4, nil + case "long": + return 5, nil + case "double": + return 6, nil + case "list": + return 7, nil + case "class": + return 8, nil + case "password": + return 9, nil + default: + return 0, fmt.Errorf("ConfigType: unable to parse %q", s) + } +} + +const ( + ConfigTypeUnknown ConfigType = 0 + ConfigTypeBoolean ConfigType = 1 + ConfigTypeString ConfigType = 2 + ConfigTypeInt ConfigType = 3 + ConfigTypeShort ConfigType = 4 + ConfigTypeLong ConfigType = 5 + ConfigTypeDouble ConfigType = 6 + ConfigTypeList ConfigType = 7 + ConfigTypeClass ConfigType = 8 + ConfigTypePassword ConfigType = 9 +) + +// MarshalText implements encoding.TextMarshaler. +func (e ConfigType) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (e *ConfigType) UnmarshalText(text []byte) error { + v, err := ParseConfigType(string(text)) + *e = v + return err +} + +// An incremental configuration operation. +// +// Possible values and their meanings: +// +// * 0 (SET) +// +// * 1 (DELETE) +// +// * 2 (APPEND) +// +// * 3 (SUBTRACT) +type IncrementalAlterConfigOp int8 + +func (v IncrementalAlterConfigOp) String() string { + switch v { + default: + return "UNKNOWN" + case 0: + return "SET" + case 1: + return "DELETE" + case 2: + return "APPEND" + case 3: + return "SUBTRACT" + } +} + +func IncrementalAlterConfigOpStrings() []string { + return []string{ + "SET", + "DELETE", + "APPEND", + "SUBTRACT", + } +} + +// ParseIncrementalAlterConfigOp normalizes the input s and returns +// the value represented by the string. +// +// Normalizing works by stripping all dots, underscores, and dashes, +// trimming spaces, and lowercasing. +func ParseIncrementalAlterConfigOp(s string) (IncrementalAlterConfigOp, error) { + switch strnorm(s) { + case "set": + return 0, nil + case "delete": + return 1, nil + case "append": + return 2, nil + case "subtract": + return 3, nil + default: + return 0, fmt.Errorf("IncrementalAlterConfigOp: unable to parse %q", s) + } +} + +const ( + IncrementalAlterConfigOpSet IncrementalAlterConfigOp = 0 + IncrementalAlterConfigOpDelete IncrementalAlterConfigOp = 1 + IncrementalAlterConfigOpAppend IncrementalAlterConfigOp = 2 + IncrementalAlterConfigOpSubtract IncrementalAlterConfigOp = 3 +) + +// MarshalText implements encoding.TextMarshaler. +func (e IncrementalAlterConfigOp) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (e *IncrementalAlterConfigOp) UnmarshalText(text []byte) error { + v, err := ParseIncrementalAlterConfigOp(string(text)) + *e = v + return err +} + +// ACLResourceType is a type of resource to use for ACLs. +// +// Possible values and their meanings: +// +// * 1 (ANY) +// +// * 2 (TOPIC) +// +// * 3 (GROUP) +// +// * 4 (CLUSTER) +// +// * 5 (TRANSACTIONAL_ID) +// +// * 6 (DELEGATION_TOKEN) +// +// * 7 (USER) +type ACLResourceType int8 + +func (v ACLResourceType) String() string { + switch v { + default: + return "UNKNOWN" + case 1: + return "ANY" + case 2: + return "TOPIC" + case 3: + return "GROUP" + case 4: + return "CLUSTER" + case 5: + return "TRANSACTIONAL_ID" + case 6: + return "DELEGATION_TOKEN" + case 7: + return "USER" + } +} + +func ACLResourceTypeStrings() []string { + return []string{ + "ANY", + "TOPIC", + "GROUP", + "CLUSTER", + "TRANSACTIONAL_ID", + "DELEGATION_TOKEN", + "USER", + } +} + +// ParseACLResourceType normalizes the input s and returns +// the value represented by the string. +// +// Normalizing works by stripping all dots, underscores, and dashes, +// trimming spaces, and lowercasing. +func ParseACLResourceType(s string) (ACLResourceType, error) { + switch strnorm(s) { + case "any": + return 1, nil + case "topic": + return 2, nil + case "group": + return 3, nil + case "cluster": + return 4, nil + case "transactionalid": + return 5, nil + case "delegationtoken": + return 6, nil + case "user": + return 7, nil + default: + return 0, fmt.Errorf("ACLResourceType: unable to parse %q", s) + } +} + +const ( + ACLResourceTypeUnknown ACLResourceType = 0 + ACLResourceTypeAny ACLResourceType = 1 + ACLResourceTypeTopic ACLResourceType = 2 + ACLResourceTypeGroup ACLResourceType = 3 + ACLResourceTypeCluster ACLResourceType = 4 + ACLResourceTypeTransactionalId ACLResourceType = 5 + ACLResourceTypeDelegationToken ACLResourceType = 6 + ACLResourceTypeUser ACLResourceType = 7 +) + +// MarshalText implements encoding.TextMarshaler. +func (e ACLResourceType) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (e *ACLResourceType) UnmarshalText(text []byte) error { + v, err := ParseACLResourceType(string(text)) + *e = v + return err +} + +// ACLResourcePatternType is how an acl's ResourceName is understood. +// +// This field was added with Kafka 2.0.0 for KIP-290. +// +// Possible values and their meanings: +// +// * 1 (ANY) +// Matches anything. +// +// * 2 (MATCH) +// Performs pattern matching; i.e., a literal match, or a prefix match, or wildcard. +// +// * 3 (LITERAL) +// The name must be an exact match. +// +// * 4 (PREFIXED) +// The name must have our requested name as a prefix (that is, "foo" will match on "foobar"). +type ACLResourcePatternType int8 + +func (v ACLResourcePatternType) String() string { + switch v { + default: + return "UNKNOWN" + case 1: + return "ANY" + case 2: + return "MATCH" + case 3: + return "LITERAL" + case 4: + return "PREFIXED" + } +} + +func ACLResourcePatternTypeStrings() []string { + return []string{ + "ANY", + "MATCH", + "LITERAL", + "PREFIXED", + } +} + +// ParseACLResourcePatternType normalizes the input s and returns +// the value represented by the string. +// +// Normalizing works by stripping all dots, underscores, and dashes, +// trimming spaces, and lowercasing. +func ParseACLResourcePatternType(s string) (ACLResourcePatternType, error) { + switch strnorm(s) { + case "any": + return 1, nil + case "match": + return 2, nil + case "literal": + return 3, nil + case "prefixed": + return 4, nil + default: + return 0, fmt.Errorf("ACLResourcePatternType: unable to parse %q", s) + } +} + +const ( + ACLResourcePatternTypeUnknown ACLResourcePatternType = 0 + ACLResourcePatternTypeAny ACLResourcePatternType = 1 + ACLResourcePatternTypeMatch ACLResourcePatternType = 2 + ACLResourcePatternTypeLiteral ACLResourcePatternType = 3 + ACLResourcePatternTypePrefixed ACLResourcePatternType = 4 +) + +// MarshalText implements encoding.TextMarshaler. +func (e ACLResourcePatternType) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (e *ACLResourcePatternType) UnmarshalText(text []byte) error { + v, err := ParseACLResourcePatternType(string(text)) + *e = v + return err +} + +// An ACL permission type. +// +// Possible values and their meanings: +// +// * 1 (ANY) +// Any permission. +// +// * 2 (DENY) +// Any deny permission. +// +// * 3 (ALLOW) +// Any allow permission. +type ACLPermissionType int8 + +func (v ACLPermissionType) String() string { + switch v { + default: + return "UNKNOWN" + case 1: + return "ANY" + case 2: + return "DENY" + case 3: + return "ALLOW" + } +} + +func ACLPermissionTypeStrings() []string { + return []string{ + "ANY", + "DENY", + "ALLOW", + } +} + +// ParseACLPermissionType normalizes the input s and returns +// the value represented by the string. +// +// Normalizing works by stripping all dots, underscores, and dashes, +// trimming spaces, and lowercasing. +func ParseACLPermissionType(s string) (ACLPermissionType, error) { + switch strnorm(s) { + case "any": + return 1, nil + case "deny": + return 2, nil + case "allow": + return 3, nil + default: + return 0, fmt.Errorf("ACLPermissionType: unable to parse %q", s) + } +} + +const ( + ACLPermissionTypeUnknown ACLPermissionType = 0 + ACLPermissionTypeAny ACLPermissionType = 1 + ACLPermissionTypeDeny ACLPermissionType = 2 + ACLPermissionTypeAllow ACLPermissionType = 3 +) + +// MarshalText implements encoding.TextMarshaler. +func (e ACLPermissionType) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (e *ACLPermissionType) UnmarshalText(text []byte) error { + v, err := ParseACLPermissionType(string(text)) + *e = v + return err +} + +// An ACL operation. +// +// Possible values and their meanings: +// +// * 1 (ANY) +// Matches anything. +// +// * 2 (ALL) +// Matches anything granted all permissions. +// +// * 3 (READ) +// +// * 4 (WRITE) +// +// * 5 (CREATE) +// +// * 6 (DELETE) +// +// * 7 (ALTER) +// +// * 8 (DESCRIBE) +// +// * 9 (CLUSTER_ACTION) +// +// * 10 (DESCRIBE_CONFIGS) +// +// * 11 (ALTER_CONFIGS) +// +// * 12 (IDEMPOTENT_WRITE) +// +// * 13 (CREATE_TOKENS) +// +// * 14 (DESCRIBE_TOKENS) +type ACLOperation int8 + +func (v ACLOperation) String() string { + switch v { + default: + return "UNKNOWN" + case 1: + return "ANY" + case 2: + return "ALL" + case 3: + return "READ" + case 4: + return "WRITE" + case 5: + return "CREATE" + case 6: + return "DELETE" + case 7: + return "ALTER" + case 8: + return "DESCRIBE" + case 9: + return "CLUSTER_ACTION" + case 10: + return "DESCRIBE_CONFIGS" + case 11: + return "ALTER_CONFIGS" + case 12: + return "IDEMPOTENT_WRITE" + case 13: + return "CREATE_TOKENS" + case 14: + return "DESCRIBE_TOKENS" + } +} + +func ACLOperationStrings() []string { + return []string{ + "ANY", + "ALL", + "READ", + "WRITE", + "CREATE", + "DELETE", + "ALTER", + "DESCRIBE", + "CLUSTER_ACTION", + "DESCRIBE_CONFIGS", + "ALTER_CONFIGS", + "IDEMPOTENT_WRITE", + "CREATE_TOKENS", + "DESCRIBE_TOKENS", + } +} + +// ParseACLOperation normalizes the input s and returns +// the value represented by the string. +// +// Normalizing works by stripping all dots, underscores, and dashes, +// trimming spaces, and lowercasing. +func ParseACLOperation(s string) (ACLOperation, error) { + switch strnorm(s) { + case "any": + return 1, nil + case "all": + return 2, nil + case "read": + return 3, nil + case "write": + return 4, nil + case "create": + return 5, nil + case "delete": + return 6, nil + case "alter": + return 7, nil + case "describe": + return 8, nil + case "clusteraction": + return 9, nil + case "describeconfigs": + return 10, nil + case "alterconfigs": + return 11, nil + case "idempotentwrite": + return 12, nil + case "createtokens": + return 13, nil + case "describetokens": + return 14, nil + default: + return 0, fmt.Errorf("ACLOperation: unable to parse %q", s) + } +} + +const ( + ACLOperationUnknown ACLOperation = 0 + ACLOperationAny ACLOperation = 1 + ACLOperationAll ACLOperation = 2 + ACLOperationRead ACLOperation = 3 + ACLOperationWrite ACLOperation = 4 + ACLOperationCreate ACLOperation = 5 + ACLOperationDelete ACLOperation = 6 + ACLOperationAlter ACLOperation = 7 + ACLOperationDescribe ACLOperation = 8 + ACLOperationClusterAction ACLOperation = 9 + ACLOperationDescribeConfigs ACLOperation = 10 + ACLOperationAlterConfigs ACLOperation = 11 + ACLOperationIdempotentWrite ACLOperation = 12 + ACLOperationCreateTokens ACLOperation = 13 + ACLOperationDescribeTokens ACLOperation = 14 +) + +// MarshalText implements encoding.TextMarshaler. +func (e ACLOperation) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (e *ACLOperation) UnmarshalText(text []byte) error { + v, err := ParseACLOperation(string(text)) + *e = v + return err +} + +// TransactionState is the state of a transaction. +// +// Possible values and their meanings: +// +// * 0 (Empty) +// +// * 1 (Ongoing) +// +// * 2 (PrepareCommit) +// +// * 3 (PrepareAbort) +// +// * 4 (CompleteCommit) +// +// * 5 (CompleteAbort) +// +// * 6 (Dead) +// +// * 7 (PrepareEpochFence) +type TransactionState int8 + +func (v TransactionState) String() string { + switch v { + default: + return "Unknown" + case 0: + return "Empty" + case 1: + return "Ongoing" + case 2: + return "PrepareCommit" + case 3: + return "PrepareAbort" + case 4: + return "CompleteCommit" + case 5: + return "CompleteAbort" + case 6: + return "Dead" + case 7: + return "PrepareEpochFence" + } +} + +func TransactionStateStrings() []string { + return []string{ + "Empty", + "Ongoing", + "PrepareCommit", + "PrepareAbort", + "CompleteCommit", + "CompleteAbort", + "Dead", + "PrepareEpochFence", + } +} + +// ParseTransactionState normalizes the input s and returns +// the value represented by the string. +// +// Normalizing works by stripping all dots, underscores, and dashes, +// trimming spaces, and lowercasing. +func ParseTransactionState(s string) (TransactionState, error) { + switch strnorm(s) { + case "empty": + return 0, nil + case "ongoing": + return 1, nil + case "preparecommit": + return 2, nil + case "prepareabort": + return 3, nil + case "completecommit": + return 4, nil + case "completeabort": + return 5, nil + case "dead": + return 6, nil + case "prepareepochfence": + return 7, nil + default: + return 0, fmt.Errorf("TransactionState: unable to parse %q", s) + } +} + +const ( + TransactionStateEmpty TransactionState = 0 + TransactionStateOngoing TransactionState = 1 + TransactionStatePrepareCommit TransactionState = 2 + TransactionStatePrepareAbort TransactionState = 3 + TransactionStateCompleteCommit TransactionState = 4 + TransactionStateCompleteAbort TransactionState = 5 + TransactionStateDead TransactionState = 6 + TransactionStatePrepareEpochFence TransactionState = 7 +) + +// MarshalText implements encoding.TextMarshaler. +func (e TransactionState) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (e *TransactionState) UnmarshalText(text []byte) error { + v, err := ParseTransactionState(string(text)) + *e = v + return err +} + +// QuotasMatchType specifies how to match a Quota entity as part of the DescribeClientQuotasRequestComponent. +// +// Possible values and their meanings: +// +// * 0 (EXACT) +// Matches all quotas for the given EntityType with names equal to the Match field. +// +// * 1 (DEFAULT) +// Matches the default for the given EntityType. +// +// * 2 (ANY) +// Matches all named quotas and default quotas for the given EntityType. +type QuotasMatchType int8 + +func (v QuotasMatchType) String() string { + switch v { + default: + return "UNKNOWN" + case 0: + return "EXACT" + case 1: + return "DEFAULT" + case 2: + return "ANY" + } +} + +func QuotasMatchTypeStrings() []string { + return []string{ + "EXACT", + "DEFAULT", + "ANY", + } +} + +// ParseQuotasMatchType normalizes the input s and returns +// the value represented by the string. +// +// Normalizing works by stripping all dots, underscores, and dashes, +// trimming spaces, and lowercasing. +func ParseQuotasMatchType(s string) (QuotasMatchType, error) { + switch strnorm(s) { + case "exact": + return 0, nil + case "default": + return 1, nil + case "any": + return 2, nil + default: + return 0, fmt.Errorf("QuotasMatchType: unable to parse %q", s) + } +} + +const ( + QuotasMatchTypeExact QuotasMatchType = 0 + QuotasMatchTypeDefault QuotasMatchType = 1 + QuotasMatchTypeAny QuotasMatchType = 2 +) + +// MarshalText implements encoding.TextMarshaler. +func (e QuotasMatchType) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (e *QuotasMatchType) UnmarshalText(text []byte) error { + v, err := ParseQuotasMatchType(string(text)) + *e = v + return err +} + +// Possible values and their meanings: +// +// * 0 (ABORT) +// +// * 1 (COMMIT) +// +// * 2 (QUORUM_REASSIGNMENT) +// +// * 3 (LEADER_CHANGE) +type ControlRecordKeyType int8 + +func (v ControlRecordKeyType) String() string { + switch v { + default: + return "UNKNOWN" + case 0: + return "ABORT" + case 1: + return "COMMIT" + case 2: + return "QUORUM_REASSIGNMENT" + case 3: + return "LEADER_CHANGE" + } +} + +func ControlRecordKeyTypeStrings() []string { + return []string{ + "ABORT", + "COMMIT", + "QUORUM_REASSIGNMENT", + "LEADER_CHANGE", + } +} + +// ParseControlRecordKeyType normalizes the input s and returns +// the value represented by the string. +// +// Normalizing works by stripping all dots, underscores, and dashes, +// trimming spaces, and lowercasing. +func ParseControlRecordKeyType(s string) (ControlRecordKeyType, error) { + switch strnorm(s) { + case "abort": + return 0, nil + case "commit": + return 1, nil + case "quorumreassignment": + return 2, nil + case "leaderchange": + return 3, nil + default: + return 0, fmt.Errorf("ControlRecordKeyType: unable to parse %q", s) + } +} + +const ( + ControlRecordKeyTypeAbort ControlRecordKeyType = 0 + ControlRecordKeyTypeCommit ControlRecordKeyType = 1 + ControlRecordKeyTypeQuorumReassignment ControlRecordKeyType = 2 + ControlRecordKeyTypeLeaderChange ControlRecordKeyType = 3 +) + +// MarshalText implements encoding.TextMarshaler. +func (e ControlRecordKeyType) MarshalText() (text []byte, err error) { + return []byte(e.String()), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (e *ControlRecordKeyType) UnmarshalText(text []byte) error { + v, err := ParseControlRecordKeyType(string(text)) + *e = v + return err +} + +func strnorm(s string) string { + s = strings.ReplaceAll(s, ".", "") + s = strings.ReplaceAll(s, "_", "") + s = strings.ReplaceAll(s, "-", "") + s = strings.TrimSpace(s) + s = strings.ToLower(s) + return s +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kmsg/internal/kbin/primitives.go b/vendor/github.com/twmb/franz-go/pkg/kmsg/internal/kbin/primitives.go new file mode 100644 index 00000000000..2c5990d06a2 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kmsg/internal/kbin/primitives.go @@ -0,0 +1,850 @@ +// Package kbin contains Kafka primitive reading and writing functions. +package kbin + +import ( + "encoding/binary" + "errors" + "math" + "math/bits" + "reflect" + "unsafe" +) + +// This file contains primitive type encoding and decoding. +// +// The Reader helper can be used even when content runs out +// or an error is hit; all other number requests will return +// zero so a decode will basically no-op. + +// ErrNotEnoughData is returned when a type could not fully decode +// from a slice because the slice did not have enough data. +var ErrNotEnoughData = errors.New("response did not contain enough data to be valid") + +// AppendBool appends 1 for true or 0 for false to dst. +func AppendBool(dst []byte, v bool) []byte { + if v { + return append(dst, 1) + } + return append(dst, 0) +} + +// AppendInt8 appends an int8 to dst. +func AppendInt8(dst []byte, i int8) []byte { + return append(dst, byte(i)) +} + +// AppendInt16 appends a big endian int16 to dst. +func AppendInt16(dst []byte, i int16) []byte { + return AppendUint16(dst, uint16(i)) +} + +// AppendUint16 appends a big endian uint16 to dst. +func AppendUint16(dst []byte, u uint16) []byte { + return append(dst, byte(u>>8), byte(u)) +} + +// AppendInt32 appends a big endian int32 to dst. +func AppendInt32(dst []byte, i int32) []byte { + return AppendUint32(dst, uint32(i)) +} + +// AppendInt64 appends a big endian int64 to dst. +func AppendInt64(dst []byte, i int64) []byte { + return appendUint64(dst, uint64(i)) +} + +// AppendFloat64 appends a big endian float64 to dst. +func AppendFloat64(dst []byte, f float64) []byte { + return appendUint64(dst, math.Float64bits(f)) +} + +// AppendUuid appends the 16 uuid bytes to dst. +func AppendUuid(dst []byte, uuid [16]byte) []byte { + return append(dst, uuid[:]...) +} + +func appendUint64(dst []byte, u uint64) []byte { + return append(dst, byte(u>>56), byte(u>>48), byte(u>>40), byte(u>>32), + byte(u>>24), byte(u>>16), byte(u>>8), byte(u)) +} + +// AppendUint32 appends a big endian uint32 to dst. +func AppendUint32(dst []byte, u uint32) []byte { + return append(dst, byte(u>>24), byte(u>>16), byte(u>>8), byte(u)) +} + +// uvarintLens could only be length 65, but using 256 allows bounds check +// elimination on lookup. +const uvarintLens = "\x01\x01\x01\x01\x01\x01\x01\x01\x02\x02\x02\x02\x02\x02\x02\x03\x03\x03\x03\x03\x03\x03\x04\x04\x04\x04\x04\x04\x04\x05\x05\x05\x05\x05\x05\x05\x06\x06\x06\x06\x06\x06\x06\x07\x07\x07\x07\x07\x07\x07\x08\x08\x08\x08\x08\x08\x08\x09\x09\x09\x09\x09\x09\x09\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + +// VarintLen returns how long i would be if it were varint encoded. +func VarintLen(i int32) int { + u := uint32(i)<<1 ^ uint32(i>>31) + return UvarintLen(u) +} + +// UvarintLen returns how long u would be if it were uvarint encoded. +func UvarintLen(u uint32) int { + return int(uvarintLens[byte(bits.Len32(u))]) +} + +func uvarlongLen(u uint64) int { + return int(uvarintLens[byte(bits.Len64(u))]) +} + +// Varint is a loop unrolled 32 bit varint decoder. The return semantics +// are the same as binary.Varint, with the added benefit that overflows +// in 5 byte encodings are handled rather than left to the user. +func Varint(in []byte) (int32, int) { + x, n := Uvarint(in) + return int32((x >> 1) ^ -(x & 1)), n +} + +// Uvarint is a loop unrolled 32 bit uvarint decoder. The return semantics +// are the same as binary.Uvarint, with the added benefit that overflows +// in 5 byte encodings are handled rather than left to the user. +func Uvarint(in []byte) (uint32, int) { + var x uint32 + var overflow int + + if len(in) < 1 { + goto fail + } + + x = uint32(in[0] & 0x7f) + if in[0]&0x80 == 0 { + return x, 1 + } else if len(in) < 2 { + goto fail + } + + x |= uint32(in[1]&0x7f) << 7 + if in[1]&0x80 == 0 { + return x, 2 + } else if len(in) < 3 { + goto fail + } + + x |= uint32(in[2]&0x7f) << 14 + if in[2]&0x80 == 0 { + return x, 3 + } else if len(in) < 4 { + goto fail + } + + x |= uint32(in[3]&0x7f) << 21 + if in[3]&0x80 == 0 { + return x, 4 + } else if len(in) < 5 { + goto fail + } + + x |= uint32(in[4]) << 28 + if in[4] <= 0x0f { + return x, 5 + } + + overflow = -5 + +fail: + return 0, overflow +} + +// Varlong is a loop unrolled 64 bit varint decoder. The return semantics +// are the same as binary.Varint, with the added benefit that overflows +// in 10 byte encodings are handled rather than left to the user. +func Varlong(in []byte) (int64, int) { + x, n := uvarlong(in) + return int64((x >> 1) ^ -(x & 1)), n +} + +func uvarlong(in []byte) (uint64, int) { + var x uint64 + var overflow int + + if len(in) < 1 { + goto fail + } + + x = uint64(in[0] & 0x7f) + if in[0]&0x80 == 0 { + return x, 1 + } else if len(in) < 2 { + goto fail + } + + x |= uint64(in[1]&0x7f) << 7 + if in[1]&0x80 == 0 { + return x, 2 + } else if len(in) < 3 { + goto fail + } + + x |= uint64(in[2]&0x7f) << 14 + if in[2]&0x80 == 0 { + return x, 3 + } else if len(in) < 4 { + goto fail + } + + x |= uint64(in[3]&0x7f) << 21 + if in[3]&0x80 == 0 { + return x, 4 + } else if len(in) < 5 { + goto fail + } + + x |= uint64(in[4]&0x7f) << 28 + if in[4]&0x80 == 0 { + return x, 5 + } else if len(in) < 6 { + goto fail + } + + x |= uint64(in[5]&0x7f) << 35 + if in[5]&0x80 == 0 { + return x, 6 + } else if len(in) < 7 { + goto fail + } + + x |= uint64(in[6]&0x7f) << 42 + if in[6]&0x80 == 0 { + return x, 7 + } else if len(in) < 8 { + goto fail + } + + x |= uint64(in[7]&0x7f) << 49 + if in[7]&0x80 == 0 { + return x, 8 + } else if len(in) < 9 { + goto fail + } + + x |= uint64(in[8]&0x7f) << 56 + if in[8]&0x80 == 0 { + return x, 9 + } else if len(in) < 10 { + goto fail + } + + x |= uint64(in[9]) << 63 + if in[9] <= 0x01 { + return x, 10 + } + + overflow = -10 + +fail: + return 0, overflow +} + +// AppendVarint appends a varint encoded i to dst. +func AppendVarint(dst []byte, i int32) []byte { + return AppendUvarint(dst, uint32(i)<<1^uint32(i>>31)) +} + +// AppendUvarint appends a uvarint encoded u to dst. +func AppendUvarint(dst []byte, u uint32) []byte { + switch UvarintLen(u) { + case 5: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte((u>>21)&0x7f|0x80), + byte(u>>28)) + case 4: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte(u>>21)) + case 3: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte(u>>14)) + case 2: + return append(dst, + byte(u&0x7f|0x80), + byte(u>>7)) + case 1: + return append(dst, byte(u)) + } + return dst +} + +// AppendVarlong appends a varint encoded i to dst. +func AppendVarlong(dst []byte, i int64) []byte { + return appendUvarlong(dst, uint64(i)<<1^uint64(i>>63)) +} + +func appendUvarlong(dst []byte, u uint64) []byte { + switch uvarlongLen(u) { + case 10: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte((u>>21)&0x7f|0x80), + byte((u>>28)&0x7f|0x80), + byte((u>>35)&0x7f|0x80), + byte((u>>42)&0x7f|0x80), + byte((u>>49)&0x7f|0x80), + byte((u>>56)&0x7f|0x80), + byte(u>>63)) + case 9: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte((u>>21)&0x7f|0x80), + byte((u>>28)&0x7f|0x80), + byte((u>>35)&0x7f|0x80), + byte((u>>42)&0x7f|0x80), + byte((u>>49)&0x7f|0x80), + byte(u>>56)) + case 8: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte((u>>21)&0x7f|0x80), + byte((u>>28)&0x7f|0x80), + byte((u>>35)&0x7f|0x80), + byte((u>>42)&0x7f|0x80), + byte(u>>49)) + case 7: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte((u>>21)&0x7f|0x80), + byte((u>>28)&0x7f|0x80), + byte((u>>35)&0x7f|0x80), + byte(u>>42)) + case 6: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte((u>>21)&0x7f|0x80), + byte((u>>28)&0x7f|0x80), + byte(u>>35)) + case 5: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte((u>>21)&0x7f|0x80), + byte(u>>28)) + case 4: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte((u>>14)&0x7f|0x80), + byte(u>>21)) + case 3: + return append(dst, + byte(u&0x7f|0x80), + byte((u>>7)&0x7f|0x80), + byte(u>>14)) + case 2: + return append(dst, + byte(u&0x7f|0x80), + byte(u>>7)) + case 1: + return append(dst, byte(u)) + } + return dst +} + +// AppendString appends a string to dst prefixed with its int16 length. +func AppendString(dst []byte, s string) []byte { + dst = AppendInt16(dst, int16(len(s))) + return append(dst, s...) +} + +// AppendCompactString appends a string to dst prefixed with its uvarint length +// starting at 1; 0 is reserved for null, which compact strings are not +// (nullable compact ones are!). Thus, the length is the decoded uvarint - 1. +// +// For KIP-482. +func AppendCompactString(dst []byte, s string) []byte { + dst = AppendUvarint(dst, 1+uint32(len(s))) + return append(dst, s...) +} + +// AppendNullableString appends potentially nil string to dst prefixed with its +// int16 length or int16(-1) if nil. +func AppendNullableString(dst []byte, s *string) []byte { + if s == nil { + return AppendInt16(dst, -1) + } + return AppendString(dst, *s) +} + +// AppendCompactNullableString appends a potentially nil string to dst with its +// uvarint length starting at 1, with 0 indicating null. Thus, the length is +// the decoded uvarint - 1. +// +// For KIP-482. +func AppendCompactNullableString(dst []byte, s *string) []byte { + if s == nil { + return AppendUvarint(dst, 0) + } + return AppendCompactString(dst, *s) +} + +// AppendBytes appends bytes to dst prefixed with its int32 length. +func AppendBytes(dst, b []byte) []byte { + dst = AppendInt32(dst, int32(len(b))) + return append(dst, b...) +} + +// AppendCompactBytes appends bytes to dst prefixed with a its uvarint length +// starting at 1; 0 is reserved for null, which compact bytes are not (nullable +// compact ones are!). Thus, the length is the decoded uvarint - 1. +// +// For KIP-482. +func AppendCompactBytes(dst, b []byte) []byte { + dst = AppendUvarint(dst, 1+uint32(len(b))) + return append(dst, b...) +} + +// AppendNullableBytes appends a potentially nil slice to dst prefixed with its +// int32 length or int32(-1) if nil. +func AppendNullableBytes(dst, b []byte) []byte { + if b == nil { + return AppendInt32(dst, -1) + } + return AppendBytes(dst, b) +} + +// AppendCompactNullableBytes appends a potentially nil slice to dst with its +// uvarint length starting at 1, with 0 indicating null. Thus, the length is +// the decoded uvarint - 1. +// +// For KIP-482. +func AppendCompactNullableBytes(dst, b []byte) []byte { + if b == nil { + return AppendUvarint(dst, 0) + } + return AppendCompactBytes(dst, b) +} + +// AppendVarintString appends a string to dst prefixed with its length encoded +// as a varint. +func AppendVarintString(dst []byte, s string) []byte { + dst = AppendVarint(dst, int32(len(s))) + return append(dst, s...) +} + +// AppendVarintBytes appends a slice to dst prefixed with its length encoded as +// a varint. +func AppendVarintBytes(dst, b []byte) []byte { + if b == nil { + return AppendVarint(dst, -1) + } + dst = AppendVarint(dst, int32(len(b))) + return append(dst, b...) +} + +// AppendArrayLen appends the length of an array as an int32 to dst. +func AppendArrayLen(dst []byte, l int) []byte { + return AppendInt32(dst, int32(l)) +} + +// AppendCompactArrayLen appends the length of an array as a uvarint to dst +// as the length + 1. +// +// For KIP-482. +func AppendCompactArrayLen(dst []byte, l int) []byte { + return AppendUvarint(dst, 1+uint32(l)) +} + +// AppendNullableArrayLen appends the length of an array as an int32 to dst, +// or -1 if isNil is true. +func AppendNullableArrayLen(dst []byte, l int, isNil bool) []byte { + if isNil { + return AppendInt32(dst, -1) + } + return AppendInt32(dst, int32(l)) +} + +// AppendCompactNullableArrayLen appends the length of an array as a uvarint to +// dst as the length + 1; if isNil is true, this appends 0 as a uvarint. +// +// For KIP-482. +func AppendCompactNullableArrayLen(dst []byte, l int, isNil bool) []byte { + if isNil { + return AppendUvarint(dst, 0) + } + return AppendUvarint(dst, 1+uint32(l)) +} + +// Reader is used to decode Kafka messages. +// +// For all functions on Reader, if the reader has been invalidated, functions +// return defaults (false, 0, nil, ""). Use Complete to detect if the reader +// was invalidated or if the reader has remaining data. +type Reader struct { + Src []byte + bad bool +} + +// Bool returns a bool from the reader. +func (b *Reader) Bool() bool { + if len(b.Src) < 1 { + b.bad = true + b.Src = nil + return false + } + t := b.Src[0] != 0 // if '0', false + b.Src = b.Src[1:] + return t +} + +// Int8 returns an int8 from the reader. +func (b *Reader) Int8() int8 { + if len(b.Src) < 1 { + b.bad = true + b.Src = nil + return 0 + } + r := b.Src[0] + b.Src = b.Src[1:] + return int8(r) +} + +// Int16 returns an int16 from the reader. +func (b *Reader) Int16() int16 { + if len(b.Src) < 2 { + b.bad = true + b.Src = nil + return 0 + } + r := int16(binary.BigEndian.Uint16(b.Src)) + b.Src = b.Src[2:] + return r +} + +// Uint16 returns an uint16 from the reader. +func (b *Reader) Uint16() uint16 { + if len(b.Src) < 2 { + b.bad = true + b.Src = nil + return 0 + } + r := binary.BigEndian.Uint16(b.Src) + b.Src = b.Src[2:] + return r +} + +// Int32 returns an int32 from the reader. +func (b *Reader) Int32() int32 { + if len(b.Src) < 4 { + b.bad = true + b.Src = nil + return 0 + } + r := int32(binary.BigEndian.Uint32(b.Src)) + b.Src = b.Src[4:] + return r +} + +// Int64 returns an int64 from the reader. +func (b *Reader) Int64() int64 { + return int64(b.readUint64()) +} + +// Uuid returns a uuid from the reader. +func (b *Reader) Uuid() [16]byte { + var r [16]byte + copy(r[:], b.Span(16)) + return r +} + +// Float64 returns a float64 from the reader. +func (b *Reader) Float64() float64 { + return math.Float64frombits(b.readUint64()) +} + +func (b *Reader) readUint64() uint64 { + if len(b.Src) < 8 { + b.bad = true + b.Src = nil + return 0 + } + r := binary.BigEndian.Uint64(b.Src) + b.Src = b.Src[8:] + return r +} + +// Uint32 returns a uint32 from the reader. +func (b *Reader) Uint32() uint32 { + if len(b.Src) < 4 { + b.bad = true + b.Src = nil + return 0 + } + r := binary.BigEndian.Uint32(b.Src) + b.Src = b.Src[4:] + return r +} + +// Varint returns a varint int32 from the reader. +func (b *Reader) Varint() int32 { + val, n := Varint(b.Src) + if n <= 0 { + b.bad = true + b.Src = nil + return 0 + } + b.Src = b.Src[n:] + return val +} + +// Varlong returns a varlong int64 from the reader. +func (b *Reader) Varlong() int64 { + val, n := Varlong(b.Src) + if n <= 0 { + b.bad = true + b.Src = nil + return 0 + } + b.Src = b.Src[n:] + return val +} + +// Uvarint returns a uvarint encoded uint32 from the reader. +func (b *Reader) Uvarint() uint32 { + val, n := Uvarint(b.Src) + if n <= 0 { + b.bad = true + b.Src = nil + return 0 + } + b.Src = b.Src[n:] + return val +} + +// Span returns l bytes from the reader. +func (b *Reader) Span(l int) []byte { + if len(b.Src) < l || l < 0 { + b.bad = true + b.Src = nil + return nil + } + r := b.Src[:l:l] + b.Src = b.Src[l:] + return r +} + +// UnsafeString returns a Kafka string from the reader without allocating using +// the unsafe package. This must be used with care; note the string holds a +// reference to the original slice. +func (b *Reader) UnsafeString() string { + l := b.Int16() + return UnsafeString(b.Span(int(l))) +} + +// String returns a Kafka string from the reader. +func (b *Reader) String() string { + l := b.Int16() + return string(b.Span(int(l))) +} + +// UnsafeCompactString returns a Kafka compact string from the reader without +// allocating using the unsafe package. This must be used with care; note the +// string holds a reference to the original slice. +func (b *Reader) UnsafeCompactString() string { + l := int(b.Uvarint()) - 1 + return UnsafeString(b.Span(l)) +} + +// CompactString returns a Kafka compact string from the reader. +func (b *Reader) CompactString() string { + l := int(b.Uvarint()) - 1 + return string(b.Span(l)) +} + +// UnsafeNullableString returns a Kafka nullable string from the reader without +// allocating using the unsafe package. This must be used with care; note the +// string holds a reference to the original slice. +func (b *Reader) UnsafeNullableString() *string { + l := b.Int16() + if l < 0 { + return nil + } + s := UnsafeString(b.Span(int(l))) + return &s +} + +// NullableString returns a Kafka nullable string from the reader. +func (b *Reader) NullableString() *string { + l := b.Int16() + if l < 0 { + return nil + } + s := string(b.Span(int(l))) + return &s +} + +// UnsafeCompactNullableString returns a Kafka compact nullable string from the +// reader without allocating using the unsafe package. This must be used with +// care; note the string holds a reference to the original slice. +func (b *Reader) UnsafeCompactNullableString() *string { + l := int(b.Uvarint()) - 1 + if l < 0 { + return nil + } + s := UnsafeString(b.Span(l)) + return &s +} + +// CompactNullableString returns a Kafka compact nullable string from the +// reader. +func (b *Reader) CompactNullableString() *string { + l := int(b.Uvarint()) - 1 + if l < 0 { + return nil + } + s := string(b.Span(l)) + return &s +} + +// Bytes returns a Kafka byte array from the reader. +// +// This never returns nil. +func (b *Reader) Bytes() []byte { + l := b.Int32() + // This is not to spec, but it is not clearly documented and Microsoft + // EventHubs fails here. -1 means null, which should throw an + // exception. EventHubs uses -1 to mean "does not exist" on some + // non-nullable fields. + // + // Until EventHubs is fixed, we return an empty byte slice for null. + if l == -1 { + return []byte{} + } + return b.Span(int(l)) +} + +// CompactBytes returns a Kafka compact byte array from the reader. +// +// This never returns nil. +func (b *Reader) CompactBytes() []byte { + l := int(b.Uvarint()) - 1 + if l == -1 { // same as above: -1 should not be allowed here + return []byte{} + } + return b.Span(l) +} + +// NullableBytes returns a Kafka nullable byte array from the reader, returning +// nil as appropriate. +func (b *Reader) NullableBytes() []byte { + l := b.Int32() + if l < 0 { + return nil + } + r := b.Span(int(l)) + return r +} + +// CompactNullableBytes returns a Kafka compact nullable byte array from the +// reader, returning nil as appropriate. +func (b *Reader) CompactNullableBytes() []byte { + l := int(b.Uvarint()) - 1 + if l < 0 { + return nil + } + r := b.Span(l) + return r +} + +// ArrayLen returns a Kafka array length from the reader. +func (b *Reader) ArrayLen() int32 { + r := b.Int32() + // The min size of a Kafka type is a byte, so if we do not have + // at least the array length of bytes left, it is bad. + if len(b.Src) < int(r) { + b.bad = true + b.Src = nil + return 0 + } + return r +} + +// VarintArrayLen returns a Kafka array length from the reader. +func (b *Reader) VarintArrayLen() int32 { + r := b.Varint() + // The min size of a Kafka type is a byte, so if we do not have + // at least the array length of bytes left, it is bad. + if len(b.Src) < int(r) { + b.bad = true + b.Src = nil + return 0 + } + return r +} + +// CompactArrayLen returns a Kafka compact array length from the reader. +func (b *Reader) CompactArrayLen() int32 { + r := int32(b.Uvarint()) - 1 + // The min size of a Kafka type is a byte, so if we do not have + // at least the array length of bytes left, it is bad. + if len(b.Src) < int(r) { + b.bad = true + b.Src = nil + return 0 + } + return r +} + +// VarintBytes returns a Kafka encoded varint array from the reader, returning +// nil as appropriate. +func (b *Reader) VarintBytes() []byte { + l := b.Varint() + if l < 0 { + return nil + } + return b.Span(int(l)) +} + +// UnsafeVarintString returns a Kafka encoded varint string from the reader +// without allocating using the unsafe package. This must be used with care; +// note the string holds a reference to the original slice. +func (b *Reader) UnsafeVarintString() string { + return UnsafeString(b.VarintBytes()) +} + +// VarintString returns a Kafka encoded varint string from the reader. +func (b *Reader) VarintString() string { + return string(b.VarintBytes()) +} + +// Complete returns ErrNotEnoughData if the source ran out while decoding. +func (b *Reader) Complete() error { + if b.bad { + return ErrNotEnoughData + } + return nil +} + +// Ok returns true if the reader is still ok. +func (b *Reader) Ok() bool { + return !b.bad +} + +// UnsafeString returns the slice as a string using unsafe rule (6). +func UnsafeString(slice []byte) string { + var str string + strhdr := (*reflect.StringHeader)(unsafe.Pointer(&str)) //nolint:gosec // known way to convert slice to string + strhdr.Data = ((*reflect.SliceHeader)(unsafe.Pointer(&slice))).Data //nolint:gosec // known way to convert slice to string + strhdr.Len = len(slice) + return str +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kmsg/record.go b/vendor/github.com/twmb/franz-go/pkg/kmsg/record.go new file mode 100644 index 00000000000..86499fd7966 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kmsg/record.go @@ -0,0 +1,174 @@ +package kmsg + +import "github.com/twmb/franz-go/pkg/kmsg/internal/kbin" + +// A Record is a Kafka v0.11.0.0 record. It corresponds to an individual +// message as it is written on the wire. +type Record struct { + // Length is the length of this record on the wire of everything that + // follows this field. It is an int32 encoded as a varint. + Length int32 + + // Attributes are record level attributes. This field currently is unused. + Attributes int8 + + // TimestampDelta is the millisecond delta of this record's timestamp + // from the record's RecordBatch's FirstTimestamp. + // + // NOTE: this is actually an int64 but we cannot change the type for + // backwards compatibility. Use TimestampDelta64. + TimestampDelta int32 + TimestampDelta64 int64 + + // OffsetDelta is the delta of this record's offset from the record's + // RecordBatch's FirstOffset. + // + // For producing, this is usually equal to the index of the record in + // the record batch. + OffsetDelta int32 + + // Key is an blob of data for a record. + // + // Key's are usually used for hashing the record to specific Kafka partitions. + Key []byte + + // Value is a blob of data. This field is the main "message" portion of a + // record. + Value []byte + + // Headers are optional user provided metadata for records. Unlike normal + // arrays, the number of headers is encoded as a varint. + Headers []Header +} + +func (v *Record) AppendTo(dst []byte) []byte { + { + v := v.Length + dst = kbin.AppendVarint(dst, v) + } + { + v := v.Attributes + dst = kbin.AppendInt8(dst, v) + } + { + d := v.TimestampDelta64 + if d == 0 { + d = int64(v.TimestampDelta) + } + dst = kbin.AppendVarlong(dst, d) + } + { + v := v.OffsetDelta + dst = kbin.AppendVarint(dst, v) + } + { + v := v.Key + dst = kbin.AppendVarintBytes(dst, v) + } + { + v := v.Value + dst = kbin.AppendVarintBytes(dst, v) + } + { + v := v.Headers + dst = kbin.AppendVarint(dst, int32(len(v))) + for i := range v { + v := &v[i] + { + v := v.Key + dst = kbin.AppendVarintString(dst, v) + } + { + v := v.Value + dst = kbin.AppendVarintBytes(dst, v) + } + } + } + return dst +} + +func (v *Record) ReadFrom(src []byte) error { + return v.readFrom(src, false) +} + +func (v *Record) UnsafeReadFrom(src []byte) error { + return v.readFrom(src, true) +} + +func (v *Record) readFrom(src []byte, unsafe bool) error { + v.Default() + b := kbin.Reader{Src: src} + s := v + { + v := b.Varint() + s.Length = v + } + { + v := b.Int8() + s.Attributes = v + } + { + v := b.Varlong() + s.TimestampDelta64 = v + s.TimestampDelta = int32(v) + } + { + v := b.Varint() + s.OffsetDelta = v + } + { + v := b.VarintBytes() + s.Key = v + } + { + v := b.VarintBytes() + s.Value = v + } + { + v := s.Headers + a := v + var l int32 + l = b.VarintArrayLen() + if !b.Ok() { + return b.Complete() + } + a = a[:0] + if l > 0 { + a = append(a, make([]Header, l)...) + } + for i := int32(0); i < l; i++ { + v := &a[i] + v.Default() + s := v + { + var v string + if unsafe { + v = b.UnsafeVarintString() + } else { + v = b.VarintString() + } + s.Key = v + } + { + v := b.VarintBytes() + s.Value = v + } + } + v = a + s.Headers = v + } + return b.Complete() +} + +// Default sets any default fields. Calling this allows for future compatibility +// if new fields are added to Record. +func (v *Record) Default() { +} + +// NewRecord returns a default Record +// This is a shortcut for creating a struct and calling Default yourself. +func NewRecord() Record { + var v Record + v.Default() + return v +} diff --git a/vendor/github.com/twmb/franz-go/pkg/kversion/kversion.go b/vendor/github.com/twmb/franz-go/pkg/kversion/kversion.go new file mode 100644 index 00000000000..2267b919298 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/kversion/kversion.go @@ -0,0 +1,1198 @@ +// Package kversion specifies versions for Kafka request keys. +// +// Kafka technically has internal broker versions that bump multiple times per +// release. This package only defines releases and tip. +package kversion + +import ( + "bytes" + "fmt" + "regexp" + "sync" + "text/tabwriter" + + "github.com/twmb/franz-go/pkg/kmsg" +) + +// Versions is a list of versions, with each item corresponding to a Kafka key +// and each item's value corresponding to the max version supported. +// +// Minimum versions are not currently tracked because all keys have a minimum +// version of zero. The internals of a Versions may change in the future to +// support minimum versions; the outward facing API of Versions should not +// change to support this. +// +// As well, supported features may be added in the future. +type Versions struct { + // If any version is -1, then it is left out in that version. + // This was first done in version 2.7.0, where Kafka added support + // for 52, 53, 54, 55, but it was not a part of the 2.7.0 release, + // so ApiVersionsResponse goes from 51 to 56. + k2v []int16 +} + +var ( + reFromString *regexp.Regexp + reFromStringOnce sync.Once +) + +var versions = []struct { + name string + v *Versions +}{ + {"v0.8.0", V0_8_0()}, + {"v0.8.1", V0_8_1()}, + {"v0.8.2", V0_8_2()}, + {"v0.9.0", V0_9_0()}, + {"v0.10.0", V0_10_0()}, + {"v0.10.1", V0_10_1()}, + {"v0.10.2", V0_10_2()}, + {"v0.11.0", V0_11_0()}, + {"v1.0", V1_0_0()}, + {"v1.1", V1_1_0()}, + {"v2.0", V2_0_0()}, + {"v2.1", V2_1_0()}, + {"v2.2", V2_2_0()}, + {"v2.3", V2_3_0()}, + {"v2.4", V2_4_0()}, + {"v2.5", V2_5_0()}, + {"v2.6", V2_6_0()}, + {"v2.7", V2_7_0()}, + {"v2.8", V2_8_0()}, + {"v3.0", V3_0_0()}, + {"v3.1", V3_1_0()}, + {"v3.2", V3_2_0()}, + {"v3.3", V3_3_0()}, + {"v3.4", V3_4_0()}, + {"v3.5", V3_5_0()}, + {"v3.6", V3_6_0()}, + {"v3.7", V3_7_0()}, + {"v3.8", V3_8_0()}, +} + +// VersionStrings returns all recognized versions, minus any patch, that can be +// used as input to FromString. +func VersionStrings() []string { + var vs []string + for _, v := range versions { + vs = append(vs, v.name) + } + return vs +} + +// FromString returns a Versions from v. +// The expected input is: +// - for v0, v0.#.# or v0.#.#.# +// - for v1, v1.# or v1.#.# +// +// The "v" is optional. +func FromString(v string) *Versions { + reFromStringOnce.Do(func() { + // 0: entire string + // 1: v1+ match, minus patch + // 2: v0 match, minus subpatch + reFromString = regexp.MustCompile(`^(?:(v?[1-9]+\.\d+)(?:\.\d+)?|(v?0\.\d+\.\d+)(?:\.\d+)?)$`) + }) + m := reFromString.FindStringSubmatch(v) + if m == nil { + return nil + } + v = m[1] + if m[2] != "" { + v = m[2] + } + withv := "v" + v + for _, v2 := range versions { + if v2.name == v || v2.name == withv { + return v2.v + } + } + return nil +} + +// FromApiVersionsResponse returns a Versions from a kmsg.ApiVersionsResponse. +func FromApiVersionsResponse(r *kmsg.ApiVersionsResponse) *Versions { + var v Versions + for _, key := range r.ApiKeys { + v.SetMaxKeyVersion(key.ApiKey, key.MaxVersion) + } + return &v +} + +// HasKey returns true if the versions contains the given key. +func (vs *Versions) HasKey(k int16) bool { + _, has := vs.LookupMaxKeyVersion(k) + return has +} + +// LookupMaxKeyVersion returns the version for the given key and whether the +// key exists. If the key does not exist, this returns (-1, false). +func (vs *Versions) LookupMaxKeyVersion(k int16) (int16, bool) { + if k < 0 { + return -1, false + } + if int(k) >= len(vs.k2v) { + return -1, false + } + version := vs.k2v[k] + if version < 0 { + return -1, false + } + return version, true +} + +// SetMaxKeyVersion sets the max version for the given key. +// +// Setting a version to -1 unsets the key. +// +// Versions are backed by a slice; if the slice is not long enough, it is +// extended to fit the key. +func (vs *Versions) SetMaxKeyVersion(k, v int16) { + if v < 0 { + v = -1 + } + // If the version is < 0, we are unsetting a version. If we are + // unsetting a version that is more than the amount of keys we already + // have, we have no reason to unset. + if k < 0 || v < 0 && int(k) >= len(vs.k2v)+1 { + return + } + needLen := int(k) + 1 + for len(vs.k2v) < needLen { + vs.k2v = append(vs.k2v, -1) + } + vs.k2v[k] = v +} + +// Equal returns whether two versions are equal. +func (vs *Versions) Equal(other *Versions) bool { + // We allow the version slices to be of different lengths, so long as + // the versions for keys in one and not the other are -1. + // + // Basically, all non-negative-one keys must be equal. + long, short := vs.k2v, other.k2v + if len(short) > len(long) { + long, short = short, long + } + for i, v := range short { + if v != long[i] { + return false + } + } + for _, v := range long[len(short):] { + if v >= 0 { + return false + } + } + return true +} + +// EachMaxKeyVersion calls fn for each key and max version +func (vs *Versions) EachMaxKeyVersion(fn func(k, v int16)) { + for k, v := range vs.k2v { + if v >= 0 { + fn(int16(k), v) + } + } +} + +// VersionGuessOpt is an option to change how version guessing is done. +type VersionGuessOpt interface { + apply(*guessCfg) +} + +type guessOpt struct{ fn func(*guessCfg) } + +func (opt guessOpt) apply(cfg *guessCfg) { opt.fn(cfg) } + +// SkipKeys skips the given keys while guessing versions. +func SkipKeys(keys ...int16) VersionGuessOpt { + return guessOpt{func(cfg *guessCfg) { cfg.skipKeys = keys }} +} + +// TryRaftBroker changes from guessing the version for a classical ZooKeeper +// based broker to guessing for a raft based broker (v2.8+). +// +// Note that with raft, there can be a TryRaftController attempt as well. +func TryRaftBroker() VersionGuessOpt { + return guessOpt{func(cfg *guessCfg) { cfg.listener = rBroker }} +} + +// TryRaftController changes from guessing the version for a classical +// ZooKeeper based broker to guessing for a raft based controller broker +// (v2.8+). +// +// Note that with raft, there can be a TryRaftBroker attempt as well. Odds are +// that if you are an end user speaking to a raft based Kafka cluster, you are +// speaking to a raft broker. The controller is specifically for broker to +// broker communication. +func TryRaftController() VersionGuessOpt { + return guessOpt{func(cfg *guessCfg) { cfg.listener = rController }} +} + +type guessCfg struct { + skipKeys []int16 + listener listener +} + +// VersionGuess attempts to guess which version of Kafka these versions belong +// to. If an exact match can be determined, this returns a string in the format +// v0.#.# or v#.# (depending on whether Kafka is pre-1.0 or post). For +// example, v0.8.0 or v2.7. +// +// Patch numbers are not included in the guess as it is not possible to +// determine the Kafka patch version being used as a client. +// +// If the version is determined to be higher than kversion knows of or is tip, +// this package returns "at least v#.#". +// +// Custom versions, or in-between versions, are detected and return slightly +// more verbose strings. +// +// Options can be specified to change how version guessing is performed, for +// example, certain keys can be skipped, or the guessing can try evaluating the +// versions as Raft broker based versions. +// +// Internally, this function tries guessing the version against both KRaft and +// Kafka APIs. The more exact match is returned. +func (vs *Versions) VersionGuess(opts ...VersionGuessOpt) string { + standard := vs.versionGuess(opts...) + raftBroker := vs.versionGuess(append(opts, TryRaftBroker())...) + raftController := vs.versionGuess(append(opts, TryRaftController())...) + + // If any of these are exact, return the exact guess. + for _, g := range []guess{ + standard, + raftBroker, + raftController, + } { + if g.how == guessExact { + return g.String() + } + } + + // If any are atLeast, that means it is newer than we can guess and we + // return the highest version. + for _, g := range []guess{ + standard, + raftBroker, + raftController, + } { + if g.how == guessAtLeast { + return g.String() + } + } + + // This is a custom version. We could do some advanced logic to try to + // return highest of all three guesses, but that may be inaccurate: + // KRaft may detect a higher guess because not all requests exist in + // KRaft. Instead, we just return our standard guess. + return standard.String() +} + +type guess struct { + v1 string + v2 string // for between + how int8 +} + +const ( + guessExact = iota + guessAtLeast + guessCustomUnknown + guessCustomAtLeast + guessBetween + guessNotEven +) + +func (g guess) String() string { + switch g.how { + case guessExact: + return g.v1 + case guessAtLeast: + return "at least " + g.v1 + case guessCustomUnknown: + return "unknown custom version" + case guessCustomAtLeast: + return "unknown custom version at least " + g.v1 + case guessBetween: + return "between " + g.v1 + " and " + g.v2 + case guessNotEven: + return "not even " + g.v1 + } + return g.v1 +} + +func (vs *Versions) versionGuess(opts ...VersionGuessOpt) guess { + cfg := guessCfg{ + listener: zkBroker, + // Envelope was added in 2.7 for kraft and zkBroker in 3.4; we + // need to skip it for 2.7 through 3.4 otherwise the version + // detection fails. We can just skip it generally since there + // are enough differentiating factors that accurately detecting + // envelope doesn't matter. + // + // TODO: add introduced-version to differentiate some specific + // keys. + skipKeys: []int16{4, 5, 6, 7, 27, 52, 53, 54, 55, 56, 57, 58, 59, 62, 63, 64, 67, 74, 75}, + } + for _, opt := range opts { + opt.apply(&cfg) + } + + skip := make(map[int16]bool, len(cfg.skipKeys)) + for _, k := range cfg.skipKeys { + skip[k] = true + } + + var last string + cmp := make(map[int16]int16, len(maxTip)) + cmpskip := make(map[int16]int16) + for _, comparison := range []struct { + cmp listenerKeys + name string + }{ + {max080, "v0.8.0"}, + {max081, "v0.8.1"}, + {max082, "v0.8.2"}, + {max090, "v0.9.0"}, + {max0100, "v0.10.0"}, + {max0101, "v0.10.1"}, + {max0102, "v0.10.2"}, + {max0110, "v0.11.0"}, + {max100, "v1.0"}, + {max110, "v1.1"}, + {max200, "v2.0"}, + {max210, "v2.1"}, + {max220, "v2.2"}, + {max230, "v2.3"}, + {max240, "v2.4"}, + {max250, "v2.5"}, + {max260, "v2.6"}, + {max270, "v2.7"}, + {max280, "v2.8"}, + {max300, "v3.0"}, + {max310, "v3.1"}, + {max320, "v3.2"}, + {max330, "v3.3"}, + {max340, "v3.4"}, + {max350, "v3.5"}, + {max360, "v3.6"}, + {max370, "v3.7"}, + {max380, "v3.8"}, + } { + for k, v := range comparison.cmp.filter(cfg.listener) { + if v == -1 { + continue + } + k16 := int16(k) + if skip[k16] { + cmpskip[k16] = v + } else { + cmp[k16] = v + } + } + + var under, equal, over bool + + for k, v := range vs.k2v { + k16 := int16(k) + if skip[k16] { + skipv, ok := cmpskip[k16] + if v == -1 || !ok { + continue + } + cmp[k16] = skipv + } + cmpv, has := cmp[k16] + if has { + // If our version for this key is less than the + // comparison versions, then we are less than what we + // are comparing. + if v < cmpv { + under = true + } else if v > cmpv { + // Similarly, if our version is more, then we + // are over what we are comparing. + over = true + } else { + equal = true + } + delete(cmp, k16) + } else if v >= 0 { + // If what we are comparing to does not even have this + // key **and** our version is larger non-zero, then our + // version is larger than what we are comparing to. + // + // We can have a negative version if a key was manually + // unset. + over = true + } + // If the version is < 0, the key is unset. + } + + // If our versions did not clear out what we are comparing against, we + // do not have all keys that we need for this version. + if len(cmp) > 0 { + under = true + } + + current := comparison.name + switch { + case under && over: + // Regardless of equal being true or not, this is a custom version. + if last != "" { + return guess{v1: last, how: guessCustomAtLeast} + } + return guess{v1: last, how: guessCustomUnknown} + + case under: + // Regardless of equal being true or not, we have not yet hit + // this version. + if last != "" { + return guess{v1: last, v2: current, how: guessBetween} + } + return guess{v1: current, how: guessNotEven} + + case over: + // Regardless of equal being true or not, we try again. + last = current + + case equal: + return guess{v1: current, how: guessExact} + } + // At least one of under, equal, or over must be true, so there + // is no default case. + } + + return guess{v1: last, how: guessAtLeast} +} + +// String returns a string representation of the versions; the format may +// change. +func (vs *Versions) String() string { + var buf bytes.Buffer + w := tabwriter.NewWriter(&buf, 0, 0, 2, ' ', 0) + for k, v := range vs.k2v { + if v < 0 { + continue + } + name := kmsg.NameForKey(int16(k)) + if name == "" { + name = "Unknown" + } + fmt.Fprintf(w, "%s\t%d\n", name, v) + } + w.Flush() + return buf.String() +} + +// Stable is a shortcut for the latest _released_ Kafka versions. +// +// This is the default version used in kgo to avoid breaking tip changes. +func Stable() *Versions { return zkBrokerOf(maxStable) } + +// Tip is the latest defined Kafka key versions; this may be slightly out of date. +func Tip() *Versions { return zkBrokerOf(maxTip) } + +func V0_8_0() *Versions { return zkBrokerOf(max080) } +func V0_8_1() *Versions { return zkBrokerOf(max081) } +func V0_8_2() *Versions { return zkBrokerOf(max082) } +func V0_9_0() *Versions { return zkBrokerOf(max090) } +func V0_10_0() *Versions { return zkBrokerOf(max0100) } +func V0_10_1() *Versions { return zkBrokerOf(max0101) } +func V0_10_2() *Versions { return zkBrokerOf(max0102) } +func V0_11_0() *Versions { return zkBrokerOf(max0110) } +func V1_0_0() *Versions { return zkBrokerOf(max100) } +func V1_1_0() *Versions { return zkBrokerOf(max110) } +func V2_0_0() *Versions { return zkBrokerOf(max200) } +func V2_1_0() *Versions { return zkBrokerOf(max210) } +func V2_2_0() *Versions { return zkBrokerOf(max220) } +func V2_3_0() *Versions { return zkBrokerOf(max230) } +func V2_4_0() *Versions { return zkBrokerOf(max240) } +func V2_5_0() *Versions { return zkBrokerOf(max250) } +func V2_6_0() *Versions { return zkBrokerOf(max260) } +func V2_7_0() *Versions { return zkBrokerOf(max270) } +func V2_8_0() *Versions { return zkBrokerOf(max280) } +func V3_0_0() *Versions { return zkBrokerOf(max300) } +func V3_1_0() *Versions { return zkBrokerOf(max310) } +func V3_2_0() *Versions { return zkBrokerOf(max320) } +func V3_3_0() *Versions { return zkBrokerOf(max330) } +func V3_4_0() *Versions { return zkBrokerOf(max340) } +func V3_5_0() *Versions { return zkBrokerOf(max350) } +func V3_6_0() *Versions { return zkBrokerOf(max360) } +func V3_7_0() *Versions { return zkBrokerOf(max370) } +func V3_8_0() *Versions { return zkBrokerOf(max380) } + +func zkBrokerOf(lks listenerKeys) *Versions { + return &Versions{lks.filter(zkBroker)} +} + +type listener uint8 + +func (l listener) has(target listener) bool { + return l&target != 0 +} + +const ( + zkBroker listener = 1 << iota + rBroker + rController +) + +type listenerKey struct { + listener listener + version int16 +} + +type listenerKeys []listenerKey + +func (lks listenerKeys) filter(listener listener) []int16 { + r := make([]int16, 0, len(lks)) + for _, lk := range lks { + if lk.listener.has(listener) { + r = append(r, lk.version) + } else { + r = append(r, -1) + } + } + return r +} + +// All requests before KRaft started being introduced support the zkBroker, but +// KRaft changed that. Kafka commit 698319b8e2c1f6cb574f339eede6f2a5b1919b55 +// added which listeners support which API keys. +func k(listeners ...listener) listenerKey { + var k listenerKey + for _, listener := range listeners { + k.listener |= listener + } + return k +} + +func (l *listenerKey) inc() { + l.version++ +} + +// For the comments below, appends are annotated with the key being introduced, +// while incs are annotated with the version the inc results in. + +func nextMax(prev listenerKeys, do func(listenerKeys) listenerKeys) listenerKeys { + return do(append(listenerKeys(nil), prev...)) +} + +var max080 = nextMax(nil, func(listenerKeys) listenerKeys { + return listenerKeys{ + k(zkBroker, rBroker), // 0 produce + k(zkBroker, rBroker, rController), // 1 fetch + k(zkBroker, rBroker), // 2 list offset + k(zkBroker, rBroker), // 3 metadata + k(zkBroker), // 4 leader and isr + k(zkBroker), // 5 stop replica + k(zkBroker), // 6 update metadata, actually not supported for a bit + k(zkBroker, rController), // 7 controlled shutdown, actually not supported for a bit + } +}) + +var max081 = nextMax(max080, func(v listenerKeys) listenerKeys { + return append(v, + k(zkBroker, rBroker), // 8 offset commit KAFKA-965 db37ed0054 + k(zkBroker, rBroker), // 9 offset fetch (same) + ) +}) + +var max082 = nextMax(max081, func(v listenerKeys) listenerKeys { + v[8].inc() // 1 offset commit KAFKA-1462 + v[9].inc() // 1 offset fetch KAFKA-1841 161b1aa16e I think? + return append(v, + k(zkBroker, rBroker), // 10 find coordinator KAFKA-1012 a670537aa3 + k(zkBroker, rBroker), // 11 join group (same) + k(zkBroker, rBroker), // 12 heartbeat (same) + ) +}) + +var max090 = nextMax(max082, func(v listenerKeys) listenerKeys { + v[0].inc() // 1 produce KAFKA-2136 436b7ddc38; KAFKA-2083 ?? KIP-13 + v[1].inc() // 1 fetch (same) + v[6].inc() // 1 update metadata KAFKA-2411 d02ca36ca1 + v[7].inc() // 1 controlled shutdown (same) + v[8].inc() // 2 offset commit KAFKA-1634 + return append(v, + k(zkBroker, rBroker), // 13 leave group KAFKA-2397 636e14a991 + k(zkBroker, rBroker), // 14 sync group KAFKA-2464 86eb74d923 + k(zkBroker, rBroker), // 15 describe groups KAFKA-2687 596c203af1 + k(zkBroker, rBroker), // 16 list groups KAFKA-2687 596c203af1 + ) +}) + +var max0100 = nextMax(max090, func(v listenerKeys) listenerKeys { + v[0].inc() // 2 produce KAFKA-3025 45c8195fa1 KIP-31 KIP-32 + v[1].inc() // 2 fetch (same) + v[3].inc() // 1 metadata KAFKA-3306 33d745e2dc + v[6].inc() // 2 update metadata KAFKA-1215 951e30adc6 + return append(v, + k(zkBroker, rBroker, rController), // 17 sasl handshake KAFKA-3149 5b375d7bf9 + k(zkBroker, rBroker, rController), // 18 api versions KAFKA-3307 8407dac6ee + ) +}) + +var max0101 = nextMax(max0100, func(v listenerKeys) listenerKeys { + v[1].inc() // 3 fetch KAFKA-2063 d04b0998c0 KIP-74 + v[2].inc() // 1 list offset KAFKA-4148 eaaa433fc9 KIP-79 + v[3].inc() // 2 metadata KAFKA-4093 ecc1fb10fa KIP-78 + v[11].inc() // 1 join group KAFKA-3888 40b1dd3f49 KIP-62 + return append(v, + k(zkBroker, rBroker, rController), // 19 create topics KAFKA-2945 fc47b9fa6b + k(zkBroker, rBroker, rController), // 20 delete topics KAFKA-2946 539633ba0e + ) +}) + +var max0102 = nextMax(max0101, func(v listenerKeys) listenerKeys { + v[6].inc() // 3 update metadata KAFKA-4565 d25671884b KIP-103 + v[19].inc() // 1 create topics KAFKA-4591 da57bc27e7 KIP-108 + return v +}) + +var max0110 = nextMax(max0102, func(v listenerKeys) listenerKeys { + v[0].inc() // 3 produce KAFKA-4816 5bd06f1d54 KIP-98 + v[1].inc() // 4 fetch (same) + v[1].inc() // 5 fetch KAFKA-4586 8b05ad406d KIP-107 + v[3].inc() // 4 metadata KAFKA-5291 7311dcbc53 (3 below) + v[9].inc() // 2 offset fetch KAFKA-3853 c2d9b95f36 KIP-98 + v[10].inc() // 1 find coordinator KAFKA-5043 d0e7c6b930 KIP-98 + v = append(v, + k(zkBroker, rBroker), // 21 delete records KAFKA-4586 see above + k(zkBroker, rBroker), // 22 init producer id KAFKA-4817 bdf4cba047 KIP-98 (raft added in KAFKA-12620 e97cff2702b6ba836c7925caa36ab18066a7c95d KIP-730) + k(zkBroker, rBroker), // 23 offset for leader epoch KAFKA-1211 0baea2ac13 KIP-101 + + k(zkBroker, rBroker), // 24 add partitions to txn KAFKA-4990 865d82af2c KIP-98 (raft 3.0 6e857c531f14d07d5b05f174e6063a124c917324) + k(zkBroker, rBroker), // 25 add offsets to txn (same, same raft) + k(zkBroker, rBroker), // 26 end txn (same, same raft) + k(zkBroker, rBroker), // 27 write txn markers (same) + k(zkBroker, rBroker), // 28 txn offset commit (same, same raft) + + // raft broker / controller added in 5b0c58ed53c420e93957369516f34346580dac95 + k(zkBroker, rBroker, rController), // 29 describe acls KAFKA-3266 9815e18fef KIP-140 + k(zkBroker, rBroker, rController), // 30 create acls (same) + k(zkBroker, rBroker, rController), // 31 delete acls (same) + + k(zkBroker, rBroker), // 32 describe configs KAFKA-3267 972b754536 KIP-133 + k(zkBroker, rBroker, rController), // 33 alter configs (same) (raft broker 3.0 6e857c531f14d07d5b05f174e6063a124c917324, controller 273d66479dbee2398b09e478ffaf996498d1ab34) + ) + + // KAFKA-4954 0104b657a1 KIP-124 + v[2].inc() // 2 list offset (reused in e71dce89c0 KIP-98) + v[3].inc() // 3 metadata + v[8].inc() // 3 offset commit + v[9].inc() // 3 offset fetch + v[11].inc() // 2 join group + v[12].inc() // 1 heartbeat + v[13].inc() // 1 leave group + v[14].inc() // 1 sync group + v[15].inc() // 1 describe groups + v[16].inc() // 1 list group + v[18].inc() // 1 api versions + v[19].inc() // 2 create topics + v[20].inc() // 1 delete topics + + return v +}) + +var max100 = nextMax(max0110, func(v listenerKeys) listenerKeys { + v[0].inc() // 4 produce KAFKA-4763 fc93fb4b61 KIP-112 + v[1].inc() // 6 fetch (same) + v[3].inc() // 5 metadata (same) + v[4].inc() // 1 leader and isr (same) + v[6].inc() // 4 update metadata (same) + + v[0].inc() // 5 produce KAFKA-5793 94692288be + v[17].inc() // 1 sasl handshake KAFKA-4764 8fca432223 KIP-152 + + return append(v, + k(zkBroker, rBroker), // 34 alter replica log dirs KAFKA-5694 adefc8ea07 KIP-113 + k(zkBroker, rBroker), // 35 describe log dirs (same) + k(zkBroker, rBroker, rController), // 36 sasl authenticate KAFKA-4764 (see above) + k(zkBroker, rBroker, rController), // 37 create partitions KAFKA-5856 5f6393f9b1 KIP-195 (raft 3.0 6e857c531f14d07d5b05f174e6063a124c917324) + ) +}) + +var max110 = nextMax(max100, func(v listenerKeys) listenerKeys { + v = append(v, + k(zkBroker), // 38 create delegation token KAFKA-4541 27a8d0f9e7 under KAFKA-1696 KIP-48 + k(zkBroker), // 39 renew delegation token (same) + k(zkBroker), // 40 expire delegation token (same) + k(zkBroker), // 41 describe delegation token (same) + k(zkBroker, rBroker), // 42 delete groups KAFKA-6275 1ed6da7cc8 KIP-229 + ) + + v[1].inc() // 7 fetch KAFKA-6254 7fe1c2b3d3 KIP-227 + v[32].inc() // 1 describe configs KAFKA-6241 b814a16b96 KIP-226 + + return v +}) + +var max200 = nextMax(max110, func(v listenerKeys) listenerKeys { + v[0].inc() // 6 produce KAFKA-6028 1facab387f KIP-219 + v[1].inc() // 8 fetch (same) + v[2].inc() // 3 list offset (same) + v[3].inc() // 6 metadata (same) + v[8].inc() // 4 offset commit (same) + v[9].inc() // 4 offset fetch (same) + v[10].inc() // 2 find coordinator (same) + v[11].inc() // 3 join group (same) + v[12].inc() // 2 heartbeat (same) + v[13].inc() // 2 leave group (same) + v[14].inc() // 2 sync group (same) + v[15].inc() // 2 describe groups (same) + v[16].inc() // 2 list group (same) + v[18].inc() // 2 api versions (same) + v[19].inc() // 3 create topics (same) + v[20].inc() // 2 delete topics (same) + v[21].inc() // 1 delete records (same) + v[22].inc() // 1 init producer id (same) + v[24].inc() // 1 add partitions to txn (same) + v[25].inc() // 1 add offsets to txn (same) + v[26].inc() // 1 end txn (same) + v[28].inc() // 1 txn offset commit (same) + // 29, 30, 31 bumped below, but also had throttle changes + v[32].inc() // 2 describe configs (same) + v[33].inc() // 1 alter configs (same) + v[34].inc() // 1 alter replica log dirs (same) + v[35].inc() // 1 describe log dirs (same) + v[37].inc() // 1 create partitions (same) + v[38].inc() // 1 create delegation token (same) + v[39].inc() // 1 renew delegation token (same) + v[40].inc() // 1 expire delegation token (same) + v[41].inc() // 1 describe delegation token (same) + v[42].inc() // 1 delete groups (same) + + v[29].inc() // 1 describe acls KAFKA-6841 b3aa655a70 KIP-290 + v[30].inc() // 1 create acls (same) + v[31].inc() // 1 delete acls (same) + + v[23].inc() // 1 offset for leader epoch KAFKA-6361 9679c44d2b KIP-279 + return v +}) + +var max210 = nextMax(max200, func(v listenerKeys) listenerKeys { + v[8].inc() // 5 offset commit KAFKA-4682 418a91b5d4 KIP-211 + + v[20].inc() // 3 delete topics KAFKA-5975 04770916a7 KIP-322 + + v[1].inc() // 9 fetch KAFKA-7333 05ba5aa008 KIP-320 + v[2].inc() // 4 list offset (same) + v[3].inc() // 7 metadata (same) + v[8].inc() // 6 offset commit (same) + v[9].inc() // 5 offset fetch (same) + v[23].inc() // 2 offset for leader epoch (same, also in Kafka PR #5635 79ad9026a6) + v[28].inc() // 2 txn offset commit (same) + + v[0].inc() // 7 produce KAFKA-4514 741cb761c5 KIP-110 + v[1].inc() // 10 fetch (same) + return v +}) + +var max220 = nextMax(max210, func(v listenerKeys) listenerKeys { + v[2].inc() // 5 list offset KAFKA-2334 152292994e KIP-207 + v[11].inc() // 4 join group KAFKA-7824 9a9310d074 KIP-394 + v[36].inc() // 1 sasl authenticate KAFKA-7352 e8a3bc7425 KIP-368 + + v[4].inc() // 2 leader and isr KAFKA-7235 2155c6d54b KIP-380 + v[5].inc() // 1 stop replica (same) + v[6].inc() // 5 update metadata (same) + v[7].inc() // 2 controlled shutdown (same) + + return append(v, + k(zkBroker, rBroker, rController), // 43 elect preferred leaders KAFKA-5692 269b65279c KIP-183 (raft 3.0 6e857c531f14d07d5b05f174e6063a124c917324) + ) +}) + +var max230 = nextMax(max220, func(v listenerKeys) listenerKeys { + v[3].inc() // 8 metadata KAFKA-7922 a42f16f980 KIP-430 + v[15].inc() // 3 describe groups KAFKA-7922 f11fa5ef40 KIP-430 + + v[1].inc() // 11 fetch KAFKA-8365 e2847e8603 KIP-392 + v[23].inc() // 3 offset for leader epoch (same) + + v[11].inc() // 5 join group KAFKA-7862 0f995ba6be KIP-345 + v[8].inc() // 7 offset commit KAFKA-8225 9fa331b811 KIP-345 + v[12].inc() // 3 heartbeat (same) + v[14].inc() // 3 sync group (same) + + return append(v, + k(zkBroker, rBroker, rController), // 44 incremental alter configs KAFKA-7466 3b1524c5df KIP-339 + ) +}) + +var max240 = nextMax(max230, func(v listenerKeys) listenerKeys { + v[4].inc() // 3 leader and isr KAFKA-8345 81900d0ba0 KIP-455 + v[15].inc() // 4 describe groups KAFKA-8538 f8db022b08 KIP-345 + v[19].inc() // 4 create topics KAFKA-8305 8e161580b8 KIP-464 + v[43].inc() // 1 elect preferred leaders KAFKA-8286 121308cc7a KIP-460 + v = append(v, + // raft added in e07de97a4ce730a2755db7eeacb9b3e1f69a12c8 for the following two + k(zkBroker, rBroker, rController), // 45 alter partition reassignments KAFKA-8345 81900d0ba0 KIP-455 + k(zkBroker, rBroker, rController), // 46 list partition reassignments (same) + + k(zkBroker, rBroker), // 47 offset delete KAFKA-8730 e24d0e22ab KIP-496 + ) + + v[13].inc() // 3 leave group KAFKA-8221 74c90f46c3 KIP-345 + + // introducing flexible versions; 24 were bumped + v[3].inc() // 9 metadata KAFKA-8885 apache/kafka#7325 KIP-482 + v[4].inc() // 4 leader and isr (same) + v[5].inc() // 2 stop replica (same) + v[6].inc() // 6 update metadata (same) + v[7].inc() // 3 controlled shutdown (same) + v[8].inc() // 8 offset commit (same) + v[9].inc() // 6 offset fetch (same) + v[10].inc() // 3 find coordinator (same) + v[11].inc() // 6 join group (same) + v[12].inc() // 4 heartbeat (same) + v[13].inc() // 4 leave group (same) + v[14].inc() // 4 sync group (same) + v[15].inc() // 5 describe groups (same) + v[16].inc() // 3 list group (same) + v[18].inc() // 3 api versions (same, also KIP-511 [non-flexible fields added]) + v[19].inc() // 5 create topics (same) + v[20].inc() // 4 delete topics (same) + v[22].inc() // 2 init producer id (same) + v[38].inc() // 2 create delegation token (same) + v[42].inc() // 2 delete groups (same) + v[43].inc() // 2 elect preferred leaders (same) + v[44].inc() // 1 incremental alter configs (same) + // also 45, 46; not bumped since in same release + + // Create topics (19) was bumped up to 5 in KAFKA-8907 5d0052fe00 + // KIP-525, then 6 in the above bump, then back down to 5 once the + // tagged PR was merged (KAFKA-8932 1f1179ea64 for the bump down). + + v[0].inc() // 8 produce KAFKA-8729 f6f24c4700 KIP-467 + + return v +}) + +var max250 = nextMax(max240, func(v listenerKeys) listenerKeys { + v[22].inc() // 3 init producer id KAFKA-8710 fecb977b25 KIP-360 + v[9].inc() // 7 offset fetch KAFKA-9346 6da70f9b95 KIP-447 + + // more flexible versions, KAFKA-9420 0a2569e2b99 KIP-482 + // 6 bumped, then sasl handshake reverted later in 1a8dcffe4 + v[36].inc() // 2 sasl authenticate + v[37].inc() // 2 create partitions + v[39].inc() // 2 renew delegation token + v[40].inc() // 2 expire delegation token + v[41].inc() // 2 describe delegation token + + v[28].inc() // 3 txn offset commit KAFKA-9365 ed7c071e07f KIP-447 + + v[29].inc() // 2 describe acls KAFKA-9026 40b35178e5 KIP-482 (for flexible versions) + v[30].inc() // 2 create acls KAFKA-9027 738e14edb KIP-482 (flexible) + v[31].inc() // 2 delete acls KAFKA-9028 738e14edb KIP-482 (flexible) + + v[11].inc() // 7 join group KAFKA-9437 96c4ce480 KIP-559 + v[14].inc() // 5 sync group (same) + + return v +}) + +var max260 = nextMax(max250, func(v listenerKeys) listenerKeys { + v[21].inc() // 2 delete records KAFKA-8768 f869e33ab KIP-482 (opportunistic bump for flexible versions) + v[35].inc() // 2 describe log dirs KAFKA-9435 4f1e8331ff9 KIP-482 (same) + + v = append(v, + k(zkBroker, rBroker), // 48 describe client quotas KAFKA-7740 227a7322b KIP-546 (raft in 5964401bf9aab611bd4a072941bd1c927e044258) + k(zkBroker, rBroker, rController), // 49 alter client quotas (same) + ) + + v[5].inc() // 3 stop replica KAFKA-9539 7c7d55dbd KIP-570 + + v[16].inc() // 4 list group KAFKA-9130 fe948d39e KIP-518 + v[32].inc() // 3 describe configs KAFKA-9494 af3b8b50f2 KIP-569 + + return v +}) + +var max270 = nextMax(max260, func(v listenerKeys) listenerKeys { + // KAFKA-10163 a5ffd1ca44c KIP-599 + v[37].inc() // 3 create partitions + v[19].inc() // 6 create topics (same) + v[20].inc() // 5 delete topics (same) + + // KAFKA-9911 b937ec7567 KIP-588 + v[22].inc() // 4 init producer id + v[24].inc() // 2 add partitions to txn + v[25].inc() // 2 add offsets to txn + v[26].inc() // 2 end txn + + v = append(v, + k(zkBroker, rBroker, rController), // 50 describe user scram creds, KAFKA-10259 e8524ccd8fca0caac79b844d87e98e9c055f76fb KIP-554; 38c409cf33c kraft + k(zkBroker, rBroker, rController), // 51 alter user scram creds, same + ) + + // KAFKA-10435 634c9175054cc69d10b6da22ea1e95edff6a4747 KIP-595 + // This opted in fetch request to flexible versions. + // + // KAFKA-10487: further change in aa5263fba903c85812c0c31443f7d49ee371e9db + v[1].inc() // 12 fetch + + // KAFKA-10492 b7c8490cf47b0c18253d6a776b2b35c76c71c65d KIP-595 + // + // These are the first requests that are raft only. + v = append(v, + k(rController), // 52 vote + k(rController), // 53 begin quorum epoch + k(rController), // 54 end quorum epoch + k(rBroker, rController), // 55 describe quorum + ) + + // KAFKA-8836 57de67db22eb373f92ec5dd449d317ed2bc8b8d1 KIP-497 + v = append(v, + k(zkBroker, rController), // 56 alter isr + ) + + // KAFKA-10028 fb4f297207ef62f71e4a6d2d0dac75752933043d KIP-584 + return append(v, + k(zkBroker, rBroker, rController), // 57 update features (rbroker 3.0 6e857c531f14d07d5b05f174e6063a124c917324; rcontroller 3.2 55ff5d360381af370fe5b3a215831beac49571a4 KIP-778 KAFKA-13823) + ) +}) + +var max280 = nextMax(max270, func(v listenerKeys) listenerKeys { + // KAFKA-10181 KAFKA-10181 KIP-590 + v = append(v, + k(zkBroker, rController), // 58 envelope, controller first, zk in KAFKA-14446 8b045dcbf6b89e1a9594ff95642d4882765e4b0d KIP-866 Kafka 3.4 + ) + + // KAFKA-10729 85f94d50271c952c3e9ee49c4fc814c0da411618 KIP-482 + // (flexible bumps) + v[0].inc() // 9 produce + v[2].inc() // 6 list offsets + v[23].inc() // 4 offset for leader epoch + v[24].inc() // 3 add partitions to txn + v[25].inc() // 3 add offsets to txn + v[26].inc() // 3 end txn + v[27].inc() // 1 write txn markers + v[32].inc() // 4 describe configs + v[33].inc() // 2 alter configs + v[34].inc() // 2 alter replica log dirs + v[48].inc() // 1 describe client quotas + v[49].inc() // 1 alter client quotas + + // KAFKA-10547 5c921afa4a593478f7d1c49e5db9d787558d0d5e KIP-516 + v[3].inc() // 10 metadata + v[6].inc() // 7 update metadata + + // KAFKA-10545 1dd1e7f945d7a8c1dc177223cd88800680f1ff46 KIP-516 + v[4].inc() // 5 leader and isr + + // KAFKA-10427 2023aed59d863278a6302e03066d387f994f085c KIP-630 + v = append(v, + k(rController), // 59 fetch snapshot + ) + + // KAFKA-12204 / KAFKA-10851 302eee63c479fd4b955c44f1058a5e5d111acb57 KIP-700 + v = append(v, + k(zkBroker, rBroker, rController), // 60 describe cluster; rController in KAFKA-15396 41b695b6e30baa4243d9ca4f359b833e17ed0e77 KIP-919 + ) + + // KAFKA-12212 7a1d1d9a69a241efd68e572badee999229b3942f KIP-700 + v[3].inc() // 11 metadata + + // KAFKA-10764 4f588f7ca2a1c5e8dd845863da81425ac69bac92 KIP-516 + v[19].inc() // 7 create topics + v[20].inc() // 6 delete topics + + // KAFKA-12238 e9edf104866822d9e6c3b637ffbf338767b5bf27 KIP-664 + v = append(v, + k(zkBroker, rBroker), // 61 describe producers + ) + + // KAFKA-12248 a022072df3c8175950c03263d2bbf2e3ea7a7a5d KIP-500 + // (commit mentions KIP-500, these are actually described in KIP-631) + // Broker registration was later updated in d9bb2ef596343da9402bff4903b129cff1f7c22b + v = append(v, + k(rController), // 62 broker registration + k(rController), // 63 broker heartbeat + ) + + // KAFKA-12249 3f36f9a7ca153a9d221f6bedeb7d1503aa18eff1 KIP-500 / KIP-631 + // Renamed from Decommission to Unregister in 06dce721ec0185d49fac37775dbf191d0e80e687 + v = append(v, + // kraft broker added in 7143267f71ca0c14957d8560fbc42a5f8aac564d + k(rBroker, rController), // 64 unregister broker + ) + return v +}) + +var max300 = nextMax(max280, func(v listenerKeys) listenerKeys { + // KAFKA-12267 3f09fb97b6943c0612488dfa8e5eab8078fd7ca0 KIP-664 + v = append(v, + k(zkBroker, rBroker), // 65 describe transactions + ) + + // KAFKA-12369 3708a7c6c1ecf1304f091dda1e79ae53ba2df489 KIP-664 + v = append(v, + k(zkBroker, rBroker), // 66 list transactions + ) + + // KAFKA-12620 72d108274c98dca44514007254552481c731c958 KIP-730 + // raft broker added in e97cff2702b6ba836c7925caa36ab18066a7c95d + v = append(v, + k(zkBroker, rController), // 67 allocate producer ids + ) + + // KAFKA-12541 bd72ef1bf1e40feb3bc17349a385b479fa5fa530 KIP-734 + v[2].inc() // 7 list offsets + + // KAFKA-12663 f5d5f654db359af077088685e29fbe5ea69616cf KIP-699 + v[10].inc() // 4 find coordinator + + // KAFKA-12234 e00c0f3719ad0803620752159ef8315d668735d6 KIP-709 + v[9].inc() // 8 offset fetch + + return v +}) + +var max310 = nextMax(max300, func(v listenerKeys) listenerKeys { + // KAFKA-10580 2b8aff58b575c199ee8372e5689420c9d77357a5 KIP-516 + v[1].inc() // 13 fetch + + // KAFKA-10744 1d22b0d70686aef5689b775ea2ea7610a37f3e8c KIP-516 + v[3].inc() // 12 metadata + + return v +}) + +var max320 = nextMax(max310, func(v listenerKeys) listenerKeys { + // KAFKA-13495 69645f1fe5103adb00de6fa43152e7df989f3aea KIP-800 + v[11].inc() // 8 join group + + // KAFKA-13496 bf609694f83931990ce63e0123f811e6475820c5 KIP-800 + v[13].inc() // 5 leave group + + // KAFKA-13527 31fca1611a6780e8a8aa3ac21618135201718e32 KIP-784 + v[35].inc() // 3 describe log dirs + + // KAFKA-13435 c8fbe26f3bd3a7c018e7619deba002ee454208b9 KIP-814 + v[11].inc() // 9 join group + + // KAFKA-13587 52621613fd386203773ba93903abd50b46fa093a KIP-704 + v[4].inc() // 6 leader and isr + v[56].inc() // 1 alter isr => alter partition + + return v +}) + +var max330 = nextMax(max320, func(v listenerKeys) listenerKeys { + // KAFKA-13823 55ff5d360381af370fe5b3a215831beac49571a4 KIP-778 + v[57].inc() // 1 update features + + // KAFKA-13958 4fcfd9ddc4a8da3d4cfbb69268c06763352e29a9 KIP-827 + v[35].inc() // 4 describe log dirs + + // KAFKA-841 f83d95d9a28 KIP-841 + v[56].inc() // 2 alter partition + + // KAFKA-13888 a126e3a622f KIP-836 + v[55].inc() // 1 describe quorum + + // KAFKA-6945 d65d8867983 KIP-373 + v[29].inc() // 3 describe acls + v[30].inc() // 3 create acls + v[31].inc() // 3 delete acls + v[38].inc() // 3 create delegation token + v[41].inc() // 3 describe delegation token + + return v +}) + +var max340 = nextMax(max330, func(v listenerKeys) listenerKeys { + // KAFKA-14304 7b7e40a536a79cebf35cc278b9375c8352d342b9 KIP-866 + // KAFKA-14448 67c72596afe58363eceeb32084c5c04637a33831 added BrokerRegistration + // KAFKA-14493 db490707606855c265bc938e1b236070e0e2eba5 changed BrokerRegistration + // KAFKA-14304 0bb05d8679b684ad8fbb2eb40dfc00066186a75a changed BrokerRegistration back to a bool... + // 5b521031edea8ea7cbcca7dc24a58429423740ff added tag to ApiVersions + v[4].inc() // 7 leader and isr + v[5].inc() // 4 stop replica + v[6].inc() // 8 update metadata + v[62].inc() // 1 broker registration + return v +}) + +var max350 = nextMax(max340, func(v listenerKeys) listenerKeys { + // KAFKA-13369 7146ac57ba9ddd035dac992b9f188a8e7677c08d KIP-405 + v[1].inc() // 14 fetch + v[2].inc() // 8 list offsets + + v[1].inc() // 15 fetch // KAFKA-14617 79b5f7f1ce2 KIP-903 + v[56].inc() // 3 alter partition // KAFKA-14617 8c88cdb7186b1d594f991eb324356dcfcabdf18a KIP-903 + return v +}) + +var max360 = nextMax(max350, func(v listenerKeys) listenerKeys { + // KAFKA-14402 29a1a16668d76a1cc04ec9e39ea13026f2dce1de KIP-890 + // Later commit swapped to stable + v[24].inc() // 4 add partitions to txn + return v +}) + +var max370 = nextMax(max360, func(v listenerKeys) listenerKeys { + // KAFKA-15661 c8f687ac1505456cb568de2b60df235eb1ceb5f0 KIP-951 + v[0].inc() // 10 produce + v[1].inc() // 16 fetch + + // 7826d5fc8ab695a5ad927338469ddc01b435a298 KIP-848 + // (change introduced in 3.6 but was marked unstable and not visible) + v[8].inc() // 9 offset commit + // KAFKA-14499 7054625c45dc6edb3c07271fe4a6c24b4638424f KIP-848 (and prior) + v[9].inc() // 9 offset fetch + + // KAFKA-15368 41b695b6e30baa4243d9ca4f359b833e17ed0e77 KIP-919 + // (added rController as well, see above) + v[60].inc() // 1 describe cluster + + // KAFKA-14391 3be7f7d611d0786f2f98159d5c7492b0d94a2bb7 KIP-848 + // as well as some patches following + v = append(v, + k(zkBroker, rBroker), // 68 consumer group heartbeat + ) + + return v +}) + +var max380 = nextMax(max370, func(v listenerKeys) listenerKeys { + // KAFKA-16314 2e8d69b78ca52196decd851c8520798aa856c073 KIP-890 + // Then error rename in cf1ba099c0723f9cf65dda4cd334d36b7ede6327 + v[0].inc() // 11 produce + v[10].inc() // 5 find coordinator + v[22].inc() // 5 init producer id + v[24].inc() // 5 add partitions to txn + v[25].inc() // 4 add offsets to txn + v[26].inc() // 4 end txn + v[28].inc() // 4 txn offset commit + + // KAFKA-15460 68745ef21a9d8fe0f37a8c5fbc7761a598718d46 KIP-848 + v[16].inc() // 5 list groups + + // KAFKA-14509 90e646052a17e3f6ec1a013d76c1e6af2fbb756e KIP-848 added + // 7b0352f1bd9b923b79e60b18b40f570d4bfafcc0 + // b7c99e22a77392d6053fe231209e1de32b50a98b + // 68389c244e720566aaa8443cd3fc0b9d2ec4bb7a + // 5f410ceb04878ca44d2d007655155b5303a47907 stabilized + v = append(v, + k(zkBroker, rBroker), // 69 consumer group describe + ) + + // KAFKA-16265 b4e96913cc6c827968e47a31261e0bd8fdf677b5 KIP-994 (part 1) + v[66].inc() + + return v +}) + +var ( + maxStable = max380 + maxTip = nextMax(maxStable, func(v listenerKeys) listenerKeys { + return v + }) +) diff --git a/vendor/github.com/twmb/franz-go/pkg/sasl/plain/plain.go b/vendor/github.com/twmb/franz-go/pkg/sasl/plain/plain.go new file mode 100644 index 00000000000..97a9369d137 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/sasl/plain/plain.go @@ -0,0 +1,60 @@ +// Package plain provides PLAIN sasl authentication as specified in RFC4616. +package plain + +import ( + "context" + "errors" + + "github.com/twmb/franz-go/pkg/sasl" +) + +// Auth contains information for authentication. +type Auth struct { + // Zid is an optional authorization ID to use in authenticating. + Zid string + + // User is username to use for authentication. + User string + + // Pass is the password to use for authentication. + Pass string + + _ struct{} // require explicit field initialization +} + +// AsMechanism returns a sasl mechanism that will use 'a' as credentials for +// all sasl sessions. +// +// This is a shortcut for using the Plain function and is useful when you do +// not need to live-rotate credentials. +func (a Auth) AsMechanism() sasl.Mechanism { + return Plain(func(context.Context) (Auth, error) { + return a, nil + }) +} + +// Plain returns a sasl mechanism that will call authFn whenever sasl +// authentication is needed. The returned Auth is used for a single session. +func Plain(authFn func(context.Context) (Auth, error)) sasl.Mechanism { + return plain(authFn) +} + +type plain func(context.Context) (Auth, error) + +func (plain) Name() string { return "PLAIN" } +func (fn plain) Authenticate(ctx context.Context, _ string) (sasl.Session, []byte, error) { + auth, err := fn(ctx) + if err != nil { + return nil, nil, err + } + if auth.User == "" || auth.Pass == "" { + return nil, nil, errors.New("PLAIN user and pass must be non-empty") + } + return session{}, []byte(auth.Zid + "\x00" + auth.User + "\x00" + auth.Pass), nil +} + +type session struct{} + +func (session) Challenge([]byte) (bool, []byte, error) { + return true, nil, nil +} diff --git a/vendor/github.com/twmb/franz-go/pkg/sasl/sasl.go b/vendor/github.com/twmb/franz-go/pkg/sasl/sasl.go new file mode 100644 index 00000000000..dd85a02a188 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/pkg/sasl/sasl.go @@ -0,0 +1,41 @@ +// Package sasl specifies interfaces that any SASL authentication must provide +// to interop with Kafka SASL. +package sasl + +import "context" + +// Session is an authentication session. +type Session interface { + // Challenge is called with a server response. This must return + // if the authentication is done, or, if not, the next message + // to send. If the authentication is done, this can return an + // additional last message to be written (for which we will not + // read a response). + // + // Returning an error stops the authentication flow. + Challenge([]byte) (bool, []byte, error) +} + +// Mechanism authenticates with SASL. +type Mechanism interface { + // Name is the name of this SASL authentication mechanism. + Name() string + + // Authenticate initializes an authentication session to the provided + // host:port. If the mechanism is a client-first authentication + // mechanism, this also returns the first message to write. + // + // If initializing a session fails, this can return an error to stop + // the authentication flow. + // + // The provided context can be used through the duration of the session. + Authenticate(ctx context.Context, host string) (Session, []byte, error) +} + +// ClosingMechanism is an optional interface for SASL mechanisms. Implementing +// this interface signals that the mechanism should be closed if it will never +// be used again. +type ClosingMechanism interface { + // Close permanently closes a mechanism. + Close() +} diff --git a/vendor/github.com/twmb/franz-go/plugin/kotel/LICENSE b/vendor/github.com/twmb/franz-go/plugin/kotel/LICENSE new file mode 100644 index 00000000000..36e18034325 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/plugin/kotel/LICENSE @@ -0,0 +1,24 @@ +Copyright 2020, Travis Bischel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the library nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/twmb/franz-go/plugin/kotel/README.md b/vendor/github.com/twmb/franz-go/plugin/kotel/README.md new file mode 100644 index 00000000000..4ed1b5ae781 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/plugin/kotel/README.md @@ -0,0 +1,194 @@ +kotel +=== + +Kotel is an OpenTelemetry instrumentation plug-in package for franz-go. It +provides [tracing](https://pkg.go.dev/go.opentelemetry.io/otel/trace) +and [metrics](https://pkg.go.dev/go.opentelemetry.io/otel/metric) options +through +a [`kgo.Hook`](https://pkg.go.dev/github.com/twmb/franz-go/pkg/kgo#Hook). With +kotel, you can trace records produced or consumed with franz-go. You can pass +parent traces into records and extract parent traces from records. It also +tracks metrics related to connections, errors, and bytes transferred. + +To learn more about how to use kotel, see the usage sections in the README and +refer to the [OpenTelemetry documentation](https://opentelemetry.io/docs) for +additional information about OpenTelemetry and how it can be used in your +franz-go projects. + +## Tracing + +kotel provides tracing capabilities for Kafka using OpenTelemetry +specifications. It allows for the creation of three different span +operations: "publish", "receive", and "process". Additionally, it also provides +a set of attributes to use with these spans. + +### How it works + +The kotel tracer module uses hooks to automatically create and close "publish" +and "receive" spans as a `kgo.Record` flows through the application. However, +for the "process" span, it uses a convenience method that must be manually +invoked and closed in the consumer code to capture processing. + +The following table provides a visual representation of the lineage of the +span operations: + +| Order | Hook/Method | Operation | State | +|-------|---------------------------------|-----------|-------| +| 1 | kgo.HookProduceRecordBuffered | Publish | Start | +| 2 | kgo.HookProduceRecordUnbuffered | Publish | End | +| 3 | kgo.HookFetchRecordBuffered | Receive | Start | +| 4 | kgo.HookFetchRecordUnbuffered | Receive | End | +| 5 | kotel.Tracer.WithProcessSpan | Process | Start | + +### Getting started + +To start using kotel for tracing, you will need to: + +1. Set up a tracer provider +2. Configure any desired tracer options +3. Create a new kotel tracer +4. Create a new kotel service hook +5. Create a new Kafka client and pass in the kotel hook + +Here's an example of how you might do this: + +```go +// Initialize tracer provider. +tracerProvider, err := initTracerProvider() + +// Create a new kotel tracer. +tracerOpts := []kotel.TracerOpt{ + kotel.TracerProvider(tracerProvider), + kotel.TracerPropagator(propagation.NewCompositeTextMapPropagator(propagation.TraceContext{})), +} +tracer := kotel.NewTracer(tracerOpts...) + +// Create a new kotel service. +kotelOps := []kotel.Opt{ + kotel.WithTracer(tracer), +} +kotelService := kotel.NewKotel(kotelOps...) + +// Create a new Kafka client. +cl, err := kgo.NewClient( + // Pass in the kotel hook. + kgo.WithHooks(kotelService.Hooks()...), + // ...other opts. +) +``` + +### Sending records + +When producing a record with franz-go, it will traced by kotel. To include +parent traces, pass in an instrumented context. + +Here's an example of how to do this: + +```go +func httpHandler(w http.ResponseWriter, r *http.Request) { + // Start a new span with options. + opts := []trace.SpanStartOption{ + trace.WithSpanKind(trace.SpanKindServer), + trace.WithAttributes([]attribute.KeyValue{attribute.String("some-key", "foo")}...), + } + ctx, span := tracer.Start(r.Context(), "request", opts...) + // End the span when function exits. + defer span.End() + + var wg sync.WaitGroup + wg.Add(1) + record := &kgo.Record{Topic: "topic", Value: []byte("foo")} + // Pass in the context from the tracer.Start() call to ensure that the span + // created is linked to the parent span. + cl.Produce(ctx, record, func(_ *kgo.Record, err error) { + defer wg.Done() + if err != nil { + fmt.Printf("record had a produce error: %v\n", err) + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + } + }) + wg.Wait() +} +``` + +### Processing Records + +Use the `kotel.Tracer.WithProcessSpan` method to start a "process" span. Make +sure to end the span after you finish processing the record. The trace can be +continued to the next processing step if desired. + +Here is an example of how you might do this: + +```go +func processRecord(record *kgo.Record, tracer *kotel.Tracer) { + ctx, span := tracer.WithProcessSpan(record) + // Process the record here. + // End the span when function exits. + defer span.End() + // optionally pass the context to the next processing step. + fmt.Printf( + "processed offset '%s' with key '%s' and value '%s'\n", + strconv.FormatInt(record.Offset, 10), + string(record.Key), + string(record.Value), + ) +} +``` + +## Metrics + +The kotel meter module tracks various metrics related to the processing of +records, such as the number of successful and unsuccessful connections, bytes +written and read, and the number of buffered records. These metrics are all +counters and are tracked under the following names: + +``` +messaging.kafka.connects.count{node_id = "#{node}"} +messaging.kafka.connect_errors.count{node_id = "#{node}"} +messaging.kafka.disconnects.count{node_id = "#{node}"} +messaging.kafka.write_errors.count{node_id = "#{node}"} +messaging.kafka.write_bytes{node_id = "#{node}"} +messaging.kafka.read_errors.count{node_id = "#{node}"} +messaging.kafka.read_bytes.count{node_id = "#{node}"} +messaging.kafka.produce_bytes.count{node_id = "#{node}", topic = "#{topic}"} +messaging.kafka.produce_records.count{node_id = "#{node}", topic = "#{topic}"} +messaging.kafka.fetch_bytes.count{node_id = "#{node}", topic = "#{topic}"} +messaging.kafka.fetch_records.count{node_id = "#{node}", topic = "#{topic}"} +``` + +### Getting started + +To start using kotel for metrics, you will need to: + +1. Set up a meter provider +2. Configure any desired meter options +3. Create a new kotel meter +4. Create a new kotel service hook +5. Create a new Kafka client and pass in the kotel hook + +Here's an example of how you might do this: + +```go +// Initialize meter provider. +meterProvider, err := initMeterProvider() + +// Create a new kotel meter. +meterOpts := []kotel.MeterOpt{kotel.MeterProvider(meterProvider)} +meter := kotel.NewMeter(meterOpts...) + +// Pass the meter to NewKotel hook. +kotelOps := []kotel.Opt{ + kotel.WithMeter(meter), +} + +// Create a new kotel service. +kotelService := kotel.NewKotel(kotelOps...) + +// Create a new Kafka client. +cl, err := kgo.NewClient( + // Pass in the kotel hook. + kgo.WithHooks(kotelService.Hooks()...), + // ...other opts. +) +``` diff --git a/vendor/github.com/twmb/franz-go/plugin/kotel/carrier.go b/vendor/github.com/twmb/franz-go/plugin/kotel/carrier.go new file mode 100644 index 00000000000..e851ab6f9fa --- /dev/null +++ b/vendor/github.com/twmb/franz-go/plugin/kotel/carrier.go @@ -0,0 +1,53 @@ +package kotel + +import ( + "github.com/twmb/franz-go/pkg/kgo" +) + +// RecordCarrier injects and extracts traces from a kgo.Record. +// +// This type exists to satisfy the otel/propagation.TextMapCarrier interface. +type RecordCarrier struct { + record *kgo.Record +} + +// NewRecordCarrier creates a new RecordCarrier. +func NewRecordCarrier(record *kgo.Record) RecordCarrier { + return RecordCarrier{record: record} +} + +// Get retrieves a single value for a given key if it exists. +func (c RecordCarrier) Get(key string) string { + for _, h := range c.record.Headers { + if h.Key == key { + return string(h.Value) + } + } + return "" +} + +// Set sets a header. +func (c RecordCarrier) Set(key, val string) { + // Check if key already exists. + for i, h := range c.record.Headers { + if h.Key == key { + // Key exist, update the value. + c.record.Headers[i].Value = []byte(val) + return + } + } + // Key does not exist, append new header. + c.record.Headers = append(c.record.Headers, kgo.RecordHeader{ + Key: key, + Value: []byte(val), + }) +} + +// Keys returns a slice of all key identifiers in the carrier. +func (c RecordCarrier) Keys() []string { + out := make([]string, len(c.record.Headers)) + for i, h := range c.record.Headers { + out[i] = h.Key + } + return out +} diff --git a/vendor/github.com/twmb/franz-go/plugin/kotel/kotel.go b/vendor/github.com/twmb/franz-go/plugin/kotel/kotel.go new file mode 100644 index 00000000000..47e443b8ea4 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/plugin/kotel/kotel.go @@ -0,0 +1,61 @@ +package kotel + +import ( + "github.com/twmb/franz-go/pkg/kgo" +) + +const ( + instrumentationName = "github.com/twmb/franz-go/plugin/kotel" +) + +// Kotel represents the configuration options available for the kotel plugin. +type Kotel struct { + meter *Meter + tracer *Tracer +} + +// Opt interface used for setting optional kotel properties. +type Opt interface{ apply(*Kotel) } + +type optFunc func(*Kotel) + +func (o optFunc) apply(c *Kotel) { o(c) } + +// WithTracer configures Kotel with a Tracer. +func WithTracer(t *Tracer) Opt { + return optFunc(func(k *Kotel) { + if t != nil { + k.tracer = t + } + }) +} + +// WithMeter configures Kotel with a Meter. +func WithMeter(m *Meter) Opt { + return optFunc(func(k *Kotel) { + if m != nil { + k.meter = m + } + }) +} + +// Hooks return a list of kgo.hooks compatible with its interface. +func (k *Kotel) Hooks() []kgo.Hook { + var hooks []kgo.Hook + if k.tracer != nil { + hooks = append(hooks, k.tracer) + } + if k.meter != nil { + hooks = append(hooks, k.meter) + } + return hooks +} + +// NewKotel creates a new Kotel struct and applies opts to it. +func NewKotel(opts ...Opt) *Kotel { + k := &Kotel{} + for _, opt := range opts { + opt.apply(k) + } + return k +} diff --git a/vendor/github.com/twmb/franz-go/plugin/kotel/meter.go b/vendor/github.com/twmb/franz-go/plugin/kotel/meter.go new file mode 100644 index 00000000000..8c56a8c7002 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/plugin/kotel/meter.go @@ -0,0 +1,375 @@ +package kotel + +import ( + "context" + "log" + "math" + "net" + "strconv" + "time" + + "github.com/twmb/franz-go/pkg/kgo" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + semconv "go.opentelemetry.io/otel/semconv/v1.18.0" +) + +var ( // interface checks to ensure we implement the hooks properly + _ kgo.HookBrokerConnect = new(Meter) + _ kgo.HookBrokerDisconnect = new(Meter) + _ kgo.HookBrokerWrite = new(Meter) + _ kgo.HookBrokerRead = new(Meter) + _ kgo.HookProduceBatchWritten = new(Meter) + _ kgo.HookFetchBatchRead = new(Meter) +) + +const ( + dimensionless = "1" + bytes = "by" +) + +type Meter struct { + provider metric.MeterProvider + meter metric.Meter + instruments instruments + + mergeConnectsMeter bool +} + +// MeterOpt interface used for setting optional config properties. +type MeterOpt interface { + apply(*Meter) +} + +type meterOptFunc func(*Meter) + +// MeterProvider takes a metric.MeterProvider and applies it to the Meter +// If none is specified, the global provider is used. +func MeterProvider(provider metric.MeterProvider) MeterOpt { + return meterOptFunc(func(m *Meter) { + if provider != nil { + m.provider = provider + } + }) +} + +// WithMergedConnectsMeter merges the `messaging.kafka.connect_errors.count` +// counter into the `messaging.kafka.connects.count` counter, adding an +// attribute "outcome" with the values "success" or "failure". This option +// shall be used when a single metric with different dimensions is preferred +// over two separate metrics that produce data at alternating intervals. +// For example, it becomes possible to alert on the metric no longer +// producing data. +func WithMergedConnectsMeter() MeterOpt { + return meterOptFunc(func(m *Meter) { + m.mergeConnectsMeter = true + }) + +} + +func (o meterOptFunc) apply(m *Meter) { + o(m) +} + +// NewMeter returns a Meter, used as option for kotel to instrument franz-go +// with instruments. +func NewMeter(opts ...MeterOpt) *Meter { + m := &Meter{} + for _, opt := range opts { + opt.apply(m) + } + if m.provider == nil { + m.provider = otel.GetMeterProvider() + } + m.meter = m.provider.Meter( + instrumentationName, + metric.WithInstrumentationVersion(semVersion()), + metric.WithSchemaURL(semconv.SchemaURL), + ) + m.instruments = m.newInstruments() + return m +} + +// instruments --------------------------------------------------------------- + +type instruments struct { + connects metric.Int64Counter + connectErrs metric.Int64Counter + disconnects metric.Int64Counter + + writeErrs metric.Int64Counter + writeBytes metric.Int64Counter + + readErrs metric.Int64Counter + readBytes metric.Int64Counter + + produceBytes metric.Int64Counter + produceRecords metric.Int64Counter + fetchBytes metric.Int64Counter + fetchRecords metric.Int64Counter +} + +func (m *Meter) newInstruments() instruments { + // connects and disconnects + connects, err := m.meter.Int64Counter( + "messaging.kafka.connects.count", + metric.WithUnit(dimensionless), + metric.WithDescription("Total number of connections opened, by broker"), + ) + if err != nil { + log.Printf("failed to create connects instrument, %v", err) + } + + var connectErrs metric.Int64Counter + if !m.mergeConnectsMeter { + var err error + connectErrs, err = m.meter.Int64Counter( + "messaging.kafka.connect_errors.count", + metric.WithUnit(dimensionless), + metric.WithDescription("Total number of connection errors, by broker"), + ) + if err != nil { + log.Printf("failed to create connectErrs instrument, %v", err) + } + } + + disconnects, err := m.meter.Int64Counter( + "messaging.kafka.disconnects.count", + metric.WithUnit(dimensionless), + metric.WithDescription("Total number of connections closed, by broker"), + ) + if err != nil { + log.Printf("failed to create disconnects instrument, %v", err) + } + + // write + + writeErrs, err := m.meter.Int64Counter( + "messaging.kafka.write_errors.count", + metric.WithUnit(dimensionless), + metric.WithDescription("Total number of write errors, by broker"), + ) + if err != nil { + log.Printf("failed to create writeErrs instrument, %v", err) + } + + writeBytes, err := m.meter.Int64Counter( + "messaging.kafka.write_bytes", + metric.WithUnit(bytes), + metric.WithDescription("Total number of bytes written, by broker"), + ) + if err != nil { + log.Printf("failed to create writeBytes instrument, %v", err) + } + + // read + + readErrs, err := m.meter.Int64Counter( + "messaging.kafka.read_errors.count", + metric.WithUnit(dimensionless), + metric.WithDescription("Total number of read errors, by broker"), + ) + if err != nil { + log.Printf("failed to create readErrs instrument, %v", err) + } + + readBytes, err := m.meter.Int64Counter( + "messaging.kafka.read_bytes.count", + metric.WithUnit(bytes), + metric.WithDescription("Total number of bytes read, by broker"), + ) + if err != nil { + log.Printf("failed to create readBytes instrument, %v", err) + } + + // produce & consume + + produceBytes, err := m.meter.Int64Counter( + "messaging.kafka.produce_bytes.count", + metric.WithUnit(bytes), + metric.WithDescription("Total number of uncompressed bytes produced, by broker and topic"), + ) + if err != nil { + log.Printf("failed to create produceBytes instrument, %v", err) + } + + produceRecords, err := m.meter.Int64Counter( + "messaging.kafka.produce_records.count", + metric.WithUnit(dimensionless), + metric.WithDescription("Total number of produced records, by broker and topic"), + ) + if err != nil { + log.Printf("failed to create produceRecords instrument, %v", err) + } + + fetchBytes, err := m.meter.Int64Counter( + "messaging.kafka.fetch_bytes.count", + metric.WithUnit(bytes), + metric.WithDescription("Total number of uncompressed bytes fetched, by broker and topic"), + ) + if err != nil { + log.Printf("failed to create fetchBytes instrument, %v", err) + } + + fetchRecords, err := m.meter.Int64Counter( + "messaging.kafka.fetch_records.count", + metric.WithUnit(dimensionless), + metric.WithDescription("Total number of fetched records, by broker and topic"), + ) + if err != nil { + log.Printf("failed to create fetchRecords instrument, %v", err) + } + + return instruments{ + connects: connects, + connectErrs: connectErrs, + disconnects: disconnects, + + writeErrs: writeErrs, + writeBytes: writeBytes, + + readErrs: readErrs, + readBytes: readBytes, + + produceBytes: produceBytes, + produceRecords: produceRecords, + fetchBytes: fetchBytes, + fetchRecords: fetchRecords, + } +} + +// Helpers ------------------------------------------------------------------- + +func strnode(node int32) string { + if node < 0 { + return "seed_" + strconv.Itoa(int(node)-math.MinInt32) + } + return strconv.Itoa(int(node)) +} + +// Hooks --------------------------------------------------------------------- + +func (m *Meter) OnBrokerConnect(meta kgo.BrokerMetadata, _ time.Duration, _ net.Conn, err error) { + node := strnode(meta.NodeID) + + if m.mergeConnectsMeter { + if err != nil { + m.instruments.connects.Add( + context.Background(), + 1, + metric.WithAttributeSet(attribute.NewSet( + attribute.String("node_id", node), + attribute.String("outcome", "failure"), + )), + ) + return + } + m.instruments.connects.Add( + context.Background(), + 1, + metric.WithAttributeSet(attribute.NewSet( + attribute.String("node_id", node), + attribute.String("outcome", "success"), + )), + ) + return + } + + attributes := attribute.NewSet(attribute.String("node_id", node)) + if err != nil { + m.instruments.connectErrs.Add( + context.Background(), + 1, + metric.WithAttributeSet(attributes), + ) + return + } + m.instruments.connects.Add( + context.Background(), + 1, + metric.WithAttributeSet(attributes), + ) +} + +func (m *Meter) OnBrokerDisconnect(meta kgo.BrokerMetadata, _ net.Conn) { + node := strnode(meta.NodeID) + attributes := attribute.NewSet(attribute.String("node_id", node)) + m.instruments.disconnects.Add( + context.Background(), + 1, + metric.WithAttributeSet(attributes), + ) +} + +func (m *Meter) OnBrokerWrite(meta kgo.BrokerMetadata, _ int16, bytesWritten int, _, _ time.Duration, err error) { + node := strnode(meta.NodeID) + attributes := attribute.NewSet(attribute.String("node_id", node)) + if err != nil { + m.instruments.writeErrs.Add( + context.Background(), + 1, + metric.WithAttributeSet(attributes), + ) + return + } + m.instruments.writeBytes.Add( + context.Background(), + int64(bytesWritten), + metric.WithAttributeSet(attributes), + ) +} + +func (m *Meter) OnBrokerRead(meta kgo.BrokerMetadata, _ int16, bytesRead int, _, _ time.Duration, err error) { + node := strnode(meta.NodeID) + attributes := attribute.NewSet(attribute.String("node_id", node)) + if err != nil { + m.instruments.readErrs.Add( + context.Background(), + 1, + metric.WithAttributeSet(attributes), + ) + return + } + m.instruments.readBytes.Add( + context.Background(), + int64(bytesRead), + metric.WithAttributeSet(attributes), + ) +} + +func (m *Meter) OnProduceBatchWritten(meta kgo.BrokerMetadata, topic string, _ int32, pbm kgo.ProduceBatchMetrics) { + node := strnode(meta.NodeID) + attributes := attribute.NewSet( + attribute.String("node_id", node), + attribute.String("topic", topic), + ) + m.instruments.produceBytes.Add( + context.Background(), + int64(pbm.UncompressedBytes), + metric.WithAttributeSet(attributes), + ) + m.instruments.produceRecords.Add( + context.Background(), + int64(pbm.NumRecords), + metric.WithAttributeSet(attributes), + ) +} + +func (m *Meter) OnFetchBatchRead(meta kgo.BrokerMetadata, topic string, _ int32, fbm kgo.FetchBatchMetrics) { + node := strnode(meta.NodeID) + attributes := attribute.NewSet( + attribute.String("node_id", node), + attribute.String("topic", topic), + ) + m.instruments.fetchBytes.Add( + context.Background(), + int64(fbm.UncompressedBytes), + metric.WithAttributeSet(attributes), + ) + m.instruments.fetchRecords.Add( + context.Background(), + int64(fbm.NumRecords), + metric.WithAttributeSet(attributes), + ) +} diff --git a/vendor/github.com/twmb/franz-go/plugin/kotel/tracer.go b/vendor/github.com/twmb/franz-go/plugin/kotel/tracer.go new file mode 100644 index 00000000000..0e3f6cf518b --- /dev/null +++ b/vendor/github.com/twmb/franz-go/plugin/kotel/tracer.go @@ -0,0 +1,254 @@ +package kotel + +import ( + "context" + "unicode/utf8" + + "github.com/twmb/franz-go/pkg/kgo" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/propagation" + semconv "go.opentelemetry.io/otel/semconv/v1.18.0" + "go.opentelemetry.io/otel/trace" +) + +var ( // interface checks to ensure we implement the hooks properly. + _ kgo.HookProduceRecordBuffered = new(Tracer) + _ kgo.HookProduceRecordUnbuffered = new(Tracer) + _ kgo.HookFetchRecordBuffered = new(Tracer) + _ kgo.HookFetchRecordUnbuffered = new(Tracer) +) + +type Tracer struct { + tracerProvider trace.TracerProvider + propagators propagation.TextMapPropagator + tracer trace.Tracer + clientID string + consumerGroup string + keyFormatter func(*kgo.Record) (string, error) +} + +// TracerOpt interface used for setting optional config properties. +type TracerOpt interface{ apply(*Tracer) } + +type tracerOptFunc func(*Tracer) + +func (o tracerOptFunc) apply(t *Tracer) { o(t) } + +// TracerProvider takes a trace.TracerProvider and applies it to the Tracer. +// If none is specified, the global provider is used. +func TracerProvider(provider trace.TracerProvider) TracerOpt { + return tracerOptFunc(func(t *Tracer) { t.tracerProvider = provider }) +} + +// TracerPropagator takes a propagation.TextMapPropagator and applies it to the +// Tracer. +// +// If none is specified, the global Propagator is used. +func TracerPropagator(propagator propagation.TextMapPropagator) TracerOpt { + return tracerOptFunc(func(t *Tracer) { t.propagators = propagator }) +} + +// ClientID sets the optional client_id attribute value. +func ClientID(id string) TracerOpt { + return tracerOptFunc(func(t *Tracer) { t.clientID = id }) +} + +// ConsumerGroup sets the optional group attribute value. +func ConsumerGroup(group string) TracerOpt { + return tracerOptFunc(func(t *Tracer) { t.consumerGroup = group }) +} + +// KeyFormatter formats a Record's key for use in a span's attributes, +// overriding the default of string(Record.Key). +// +// This option can be used to parse binary data and return a canonical string +// representation. If the returned string is not valid UTF-8 or if the +// formatter returns an error, the key is not attached to the span. +func KeyFormatter(fn func(*kgo.Record) (string, error)) TracerOpt { + return tracerOptFunc(func(t *Tracer) { t.keyFormatter = fn }) +} + +// NewTracer returns a Tracer, used as option for kotel to instrument franz-go +// with tracing. +func NewTracer(opts ...TracerOpt) *Tracer { + t := &Tracer{} + for _, opt := range opts { + opt.apply(t) + } + if t.tracerProvider == nil { + t.tracerProvider = otel.GetTracerProvider() + } + if t.propagators == nil { + t.propagators = otel.GetTextMapPropagator() + } + t.tracer = t.tracerProvider.Tracer( + instrumentationName, + trace.WithInstrumentationVersion(semVersion()), + trace.WithSchemaURL(semconv.SchemaURL), + ) + return t +} + +func (t *Tracer) maybeKeyAttr(attrs []attribute.KeyValue, r *kgo.Record) []attribute.KeyValue { + if r.Key == nil { + return attrs + } + var keykey string + if t.keyFormatter != nil { + k, err := t.keyFormatter(r) + if err != nil || !utf8.ValidString(k) { + return attrs + } + keykey = k + } else { + if !utf8.Valid(r.Key) { + return attrs + } + keykey = string(r.Key) + } + return append(attrs, semconv.MessagingKafkaMessageKeyKey.String(keykey)) +} + +// WithProcessSpan starts a new span for the "process" operation on a consumer +// record. +// +// It sets up the span options. The user's application code is responsible for +// ending the span. +// +// This should only ever be called within a polling loop of a consumed record and +// not a record which has been created for producing, so call this at the start of each +// iteration of your processing for the record. +func (t *Tracer) WithProcessSpan(r *kgo.Record) (context.Context, trace.Span) { + // Set up the span options. + attrs := []attribute.KeyValue{ + semconv.MessagingSystemKey.String("kafka"), + semconv.MessagingSourceKindTopic, + semconv.MessagingSourceName(r.Topic), + semconv.MessagingOperationProcess, + semconv.MessagingKafkaSourcePartition(int(r.Partition)), + semconv.MessagingKafkaMessageOffset(int(r.Offset)), + } + attrs = t.maybeKeyAttr(attrs, r) + if t.clientID != "" { + attrs = append(attrs, semconv.MessagingKafkaClientIDKey.String(t.clientID)) + } + if t.consumerGroup != "" { + attrs = append(attrs, semconv.MessagingKafkaConsumerGroupKey.String(t.consumerGroup)) + } + if r.Key != nil && r.Value == nil { + attrs = append(attrs, semconv.MessagingKafkaMessageTombstoneKey.Bool(true)) + } + opts := []trace.SpanStartOption{ + trace.WithAttributes(attrs...), + trace.WithSpanKind(trace.SpanKindConsumer), + } + + if r.Context == nil { + r.Context = context.Background() + } + // Start a new span using the provided context and options. + return t.tracer.Start(r.Context, r.Topic+" process", opts...) +} + +// Hooks ---------------------------------------------------------------------- + +// OnProduceRecordBuffered starts a new span for the "publish" operation on a +// buffered record. +// +// It sets span options and injects the span context into record and updates +// the record's context, so it can be ended in the OnProduceRecordUnbuffered +// hook. +func (t *Tracer) OnProduceRecordBuffered(r *kgo.Record) { + // Set up span options. + attrs := []attribute.KeyValue{ + semconv.MessagingSystemKey.String("kafka"), + semconv.MessagingDestinationKindTopic, + semconv.MessagingDestinationName(r.Topic), + semconv.MessagingOperationPublish, + } + attrs = t.maybeKeyAttr(attrs, r) + if t.clientID != "" { + attrs = append(attrs, semconv.MessagingKafkaClientIDKey.String(t.clientID)) + } + if r.Key != nil && r.Value == nil { + attrs = append(attrs, semconv.MessagingKafkaMessageTombstoneKey.Bool(true)) + } + opts := []trace.SpanStartOption{ + trace.WithAttributes(attrs...), + trace.WithSpanKind(trace.SpanKindProducer), + } + // Start the "publish" span. + ctx, _ := t.tracer.Start(r.Context, r.Topic+" publish", opts...) + // Inject the span context into the record. + t.propagators.Inject(ctx, NewRecordCarrier(r)) + // Update the record context. + r.Context = ctx +} + +// OnProduceRecordUnbuffered continues and ends the "publish" span for an +// unbuffered record. +// +// It sets attributes with values unset when producing and records any error +// that occurred during the publish operation. +func (t *Tracer) OnProduceRecordUnbuffered(r *kgo.Record, err error) { + span := trace.SpanFromContext(r.Context) + defer span.End() + span.SetAttributes( + semconv.MessagingKafkaDestinationPartition(int(r.Partition)), + ) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + } +} + +// OnFetchRecordBuffered starts a new span for the "receive" operation on a +// buffered record. +// +// It sets the span options and extracts the span context from the record, +// updates the record's context to ensure it can be ended in the +// OnFetchRecordUnbuffered hook and can be used in downstream consumer +// processing. +func (t *Tracer) OnFetchRecordBuffered(r *kgo.Record) { + // Set up the span options. + attrs := []attribute.KeyValue{ + semconv.MessagingSystemKey.String("kafka"), + semconv.MessagingSourceKindTopic, + semconv.MessagingSourceName(r.Topic), + semconv.MessagingOperationReceive, + semconv.MessagingKafkaSourcePartition(int(r.Partition)), + } + attrs = t.maybeKeyAttr(attrs, r) + if t.clientID != "" { + attrs = append(attrs, semconv.MessagingKafkaClientIDKey.String(t.clientID)) + } + if t.consumerGroup != "" { + attrs = append(attrs, semconv.MessagingKafkaConsumerGroupKey.String(t.consumerGroup)) + } + if r.Key != nil && r.Value == nil { + attrs = append(attrs, semconv.MessagingKafkaMessageTombstoneKey.Bool(true)) + } + opts := []trace.SpanStartOption{ + trace.WithAttributes(attrs...), + trace.WithSpanKind(trace.SpanKindConsumer), + } + + if r.Context == nil { + r.Context = context.Background() + } + // Extract the span context from the record. + ctx := t.propagators.Extract(r.Context, NewRecordCarrier(r)) + // Start the "receive" span. + newCtx, _ := t.tracer.Start(ctx, r.Topic+" receive", opts...) + // Update the record context. + r.Context = newCtx +} + +// OnFetchRecordUnbuffered continues and ends the "receive" span for an +// unbuffered record. +func (t *Tracer) OnFetchRecordUnbuffered(r *kgo.Record, _ bool) { + span := trace.SpanFromContext(r.Context) + defer span.End() +} diff --git a/vendor/github.com/twmb/franz-go/plugin/kotel/version.go b/vendor/github.com/twmb/franz-go/plugin/kotel/version.go new file mode 100644 index 00000000000..0152aa7012c --- /dev/null +++ b/vendor/github.com/twmb/franz-go/plugin/kotel/version.go @@ -0,0 +1,21 @@ +package kotel + +import "runtime/debug" + +// version is the current release version of the kotel instrumentation. +func version() string { + info, ok := debug.ReadBuildInfo() + if ok { + for _, dep := range info.Deps { + if dep.Path == instrumentationName { + return dep.Version + } + } + } + return "unknown" +} + +// semVersion is the semantic version to be supplied to tracer/meter creation. +func semVersion() string { + return "semver:" + version() +} diff --git a/vendor/github.com/twmb/franz-go/plugin/kprom/LICENSE b/vendor/github.com/twmb/franz-go/plugin/kprom/LICENSE new file mode 100644 index 00000000000..36e18034325 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/plugin/kprom/LICENSE @@ -0,0 +1,24 @@ +Copyright 2020, Travis Bischel. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the library nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/twmb/franz-go/plugin/kprom/README.md b/vendor/github.com/twmb/franz-go/plugin/kprom/README.md new file mode 100644 index 00000000000..5c0db3b0447 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/plugin/kprom/README.md @@ -0,0 +1,42 @@ +kprom +=== + +kprom is a plug-in package to provide prometheus +[metrics](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus) +through a +[`kgo.Hook`](https://pkg.go.dev/github.com/twmb/franz-go/pkg/kgo#Hook). + +This package tracks the following metrics under the following names, all +metrics being counter vecs: + +```go +#{ns}_connects_total{node_id="#{node}"} +#{ns}_connect_errors_total{node_id="#{node}"} +#{ns}_write_errors_total{node_id="#{node}"} +#{ns}_write_bytes_total{node_id="#{node}"} +#{ns}_read_errors_total{node_id="#{node}"} +#{ns}_read_bytes_total{node_id="#{node}"} +#{ns}_produce_bytes_total{node_id="#{node}",topic="#{topic}"} +#{ns}_fetch_bytes_total{node_id="#{node}",topic="#{topic}"} +#{ns}_buffered_produce_records_total +#{ns}_buffered_fetch_records_total +``` + +The above metrics can be expanded considerably with options in this package, +allowing timings, uncompressed and compressed bytes, and different labels. + +Note that seed brokers use broker IDs prefixed with "seed_", with the number +corresponding to which seed it is. + +To use, + +```go +metrics := kprom.NewMetrics("namespace") +cl, err := kgo.NewClient( + kgo.WithHooks(metrics), + // ...other opts +) +``` + +You can use your own prometheus registry, as well as a few other options. +See the package [documentation](https://pkg.go.dev/github.com/twmb/franz-go/plugin/kprom) for more info! diff --git a/vendor/github.com/twmb/franz-go/plugin/kprom/config.go b/vendor/github.com/twmb/franz-go/plugin/kprom/config.go new file mode 100644 index 00000000000..d907bebe522 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/plugin/kprom/config.go @@ -0,0 +1,233 @@ +package kprom + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +type cfg struct { + namespace string + subsystem string + + reg prometheus.Registerer + gatherer prometheus.Gatherer + + withClientLabel bool + histograms map[Histogram][]float64 + defBuckets []float64 + fetchProduceOpts fetchProduceOpts + + handlerOpts promhttp.HandlerOpts + goCollectors bool +} + +func newCfg(namespace string, opts ...Opt) cfg { + regGatherer := RegistererGatherer(prometheus.NewRegistry()) + cfg := cfg{ + namespace: namespace, + reg: regGatherer, + gatherer: regGatherer, + + defBuckets: DefBuckets, + fetchProduceOpts: fetchProduceOpts{ + uncompressedBytes: true, + labels: []string{"node_id", "topic"}, + }, + } + + for _, opt := range opts { + opt.apply(&cfg) + } + + if cfg.goCollectors { + cfg.reg.MustRegister(prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{})) + cfg.reg.MustRegister(prometheus.NewGoCollector()) + } + + return cfg +} + +// Opt is an option to configure Metrics. +type Opt interface { + apply(*cfg) +} + +type opt struct{ fn func(*cfg) } + +func (o opt) apply(c *cfg) { o.fn(c) } + +type RegistererGatherer interface { + prometheus.Registerer + prometheus.Gatherer +} + +// Registry sets the registerer and gatherer to add metrics to, rather than a +// new registry. Use this option if you want to configure both Gatherer and +// Registerer with the same object. +func Registry(rg RegistererGatherer) Opt { + return opt{func(c *cfg) { + c.reg = rg + c.gatherer = rg + }} +} + +// Registerer sets the registerer to add register to, rather than a new registry. +func Registerer(reg prometheus.Registerer) Opt { + return opt{func(c *cfg) { c.reg = reg }} +} + +// Gatherer sets the gatherer to add gather to, rather than a new registry. +func Gatherer(gatherer prometheus.Gatherer) Opt { + return opt{func(c *cfg) { c.gatherer = gatherer }} +} + +// GoCollectors adds the prometheus.NewProcessCollector and +// prometheus.NewGoCollector collectors the the Metric's registry. +func GoCollectors() Opt { + return opt{func(c *cfg) { c.goCollectors = true }} +} + +// HandlerOpts sets handler options to use if you wish you use the +// Metrics.Handler function. +// +// This is only useful if you both (a) do not want to provide your own registry +// and (b) want to override the default handler options. +func HandlerOpts(opts promhttp.HandlerOpts) Opt { + return opt{func(c *cfg) { c.handlerOpts = opts }} +} + +// WithClientLabel adds a "cliend_id" label to all metrics. +func WithClientLabel() Opt { + return opt{func(c *cfg) { c.withClientLabel = true }} +} + +// Subsystem sets the subsystem for the kprom metrics, overriding the default +// empty string. +func Subsystem(ss string) Opt { + return opt{func(c *cfg) { c.subsystem = ss }} +} + +// Buckets sets the buckets to be used with Histograms, overriding the default +// of [kprom.DefBuckets]. If custom buckets per histogram is needed, +// HistogramOpts can be used. +func Buckets(buckets []float64) Opt { + return opt{func(c *cfg) { c.defBuckets = buckets }} +} + +// DefBuckets are the default Histogram buckets. The default buckets are +// tailored to broadly measure the kafka timings (in seconds). +var DefBuckets = []float64{0.001, 0.002, 0.004, 0.008, 0.016, 0.032, 0.064, 0.128, 0.256, 0.512, 1.024, 2.048} + +// A Histogram is an identifier for a kprom histogram that can be enabled +type Histogram uint8 + +const ( + ReadWait Histogram = iota // Enables {ns}_{ss}_read_wait_seconds. + ReadTime // Enables {ns}_{ss}_read_time_seconds. + WriteWait // Enables {ns}_{ss}_write_wait_seconds. + WriteTime // Enables {ns}_{ss}_write_time_seconds. + RequestDurationE2E // Enables {ns}_{ss}_request_durationE2E_seconds. + RequestThrottled // Enables {ns}_{ss}_request_throttled_seconds. +) + +// HistogramOpts allows histograms to be enabled with custom buckets +type HistogramOpts struct { + Enable Histogram + Buckets []float64 +} + +// HistogramsFromOpts allows the user full control of what histograms to enable +// and define buckets to be used with each histogram. +// +// metrics, _ := kprom.NewMetrics( +// kprom.HistogramsFromOpts( +// kprom.HistogramOpts{ +// Enable: kprom.ReadWait, +// Buckets: prometheus.LinearBuckets(10, 10, 8), +// }, +// kprom.HistogramOpts{ +// Enable: kprom.ReadeTime, +// // kprom default bucket will be used +// }, +// ), +// ) +func HistogramsFromOpts(hs ...HistogramOpts) Opt { + return opt{func(c *cfg) { + c.histograms = make(map[Histogram][]float64) + for _, h := range hs { + c.histograms[h.Enable] = h.Buckets + } + }} +} + +// Histograms sets the histograms to be enabled for kprom, overiding the +// default of disabling all histograms. +// +// metrics, _ := kprom.NewMetrics( +// kprom.Histograms( +// kprom.RequestDurationE2E, +// ), +// ) +func Histograms(hs ...Histogram) Opt { + hos := make([]HistogramOpts, 0) + for _, h := range hs { + hos = append(hos, HistogramOpts{Enable: h}) + } + return HistogramsFromOpts(hos...) +} + +// A Detail is a label that can be set on fetch/produce metrics +type Detail uint8 + +const ( + ByNode Detail = iota // Include label "node_id" for fetch and produce metrics. + ByTopic // Include label "topic" for fetch and produce metrics. + Batches // Report number of fetched and produced batches. + Records // Report the number of fetched and produced records. + CompressedBytes // Report the number of fetched and produced compressed bytes. + UncompressedBytes // Report the number of fetched and produced uncompressed bytes. + ConsistentNaming // Renames {fetch,produce}_bytes_total to {fetch,produce}_uncompressed_bytes_total, making the names consistent with the CompressedBytes detail. +) + +type fetchProduceOpts struct { + labels []string + batches bool + records bool + compressedBytes bool + uncompressedBytes bool + consistentNaming bool +} + +// FetchAndProduceDetail determines details for fetch/produce metrics, +// overriding the default of (UncompressedBytes, ByTopic, ByNode). +func FetchAndProduceDetail(details ...Detail) Opt { + return opt{ + func(c *cfg) { + labelsDeduped := make(map[Detail]string) + c.fetchProduceOpts = fetchProduceOpts{} + for _, l := range details { + switch l { + case ByTopic: + labelsDeduped[ByTopic] = "topic" + case ByNode: + labelsDeduped[ByNode] = "node_id" + case Batches: + c.fetchProduceOpts.batches = true + case Records: + c.fetchProduceOpts.records = true + case UncompressedBytes: + c.fetchProduceOpts.uncompressedBytes = true + case CompressedBytes: + c.fetchProduceOpts.compressedBytes = true + case ConsistentNaming: + c.fetchProduceOpts.consistentNaming = true + } + } + var labels []string + for _, l := range labelsDeduped { + labels = append(labels, l) + } + c.fetchProduceOpts.labels = labels + }, + } +} diff --git a/vendor/github.com/twmb/franz-go/plugin/kprom/kprom.go b/vendor/github.com/twmb/franz-go/plugin/kprom/kprom.go new file mode 100644 index 00000000000..9d6a4700487 --- /dev/null +++ b/vendor/github.com/twmb/franz-go/plugin/kprom/kprom.go @@ -0,0 +1,510 @@ +// Package kprom provides prometheus plug-in metrics for a kgo client. +// +// This package tracks the following metrics under the following names, +// all metrics being counter vecs: +// +// #{ns}_connects_total{node_id="#{node}"} +// #{ns}_connect_errors_total{node_id="#{node}"} +// #{ns}_write_errors_total{node_id="#{node}"} +// #{ns}_write_bytes_total{node_id="#{node}"} +// #{ns}_read_errors_total{node_id="#{node}"} +// #{ns}_read_bytes_total{node_id="#{node}"} +// #{ns}_produce_bytes_total{node_id="#{node}",topic="#{topic}"} +// #{ns}_fetch_bytes_total{node_id="#{node}",topic="#{topic}"} +// #{ns}_buffered_produce_records_total +// #{ns}_buffered_fetch_records_total +// +// The above metrics can be expanded considerably with options in this package, +// allowing timings, uncompressed and compressed bytes, and different labels. +// +// This can be used in a client like so: +// +// m := kprom.NewMetrics("my_namespace") +// cl, err := kgo.NewClient( +// kgo.WithHooks(m), +// // ...other opts +// ) +// +// More examples are linked in the main project readme: https://github.com/twmb/franz-go/#metrics--logging +// +// By default, metrics are installed under the a new prometheus registry, but +// this can be overridden with the Registry option. +// +// Note that seed brokers use broker IDs prefixed with "seed_", with the number +// corresponding to which seed it is. +package kprom + +import ( + "net" + "net/http" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/prometheus/client_golang/prometheus/promhttp" + + "github.com/twmb/franz-go/pkg/kgo" +) + +var ( // interface checks to ensure we implement the hooks properly + _ kgo.HookBrokerConnect = new(Metrics) + _ kgo.HookBrokerDisconnect = new(Metrics) + _ kgo.HookBrokerWrite = new(Metrics) + _ kgo.HookBrokerRead = new(Metrics) + _ kgo.HookProduceBatchWritten = new(Metrics) + _ kgo.HookFetchBatchRead = new(Metrics) + _ kgo.HookBrokerE2E = new(Metrics) + _ kgo.HookBrokerThrottle = new(Metrics) + _ kgo.HookNewClient = new(Metrics) + _ kgo.HookClientClosed = new(Metrics) +) + +// Metrics provides prometheus metrics +type Metrics struct { + cfg cfg + + // Connection + connConnectsTotal *prometheus.CounterVec + connConnectErrorsTotal *prometheus.CounterVec + connDisconnectsTotal *prometheus.CounterVec + + // Write + writeBytesTotal *prometheus.CounterVec + writeErrorsTotal *prometheus.CounterVec + writeWaitSeconds *prometheus.HistogramVec + writeTimeSeconds *prometheus.HistogramVec + + // Read + readBytesTotal *prometheus.CounterVec + readErrorsTotal *prometheus.CounterVec + readWaitSeconds *prometheus.HistogramVec + readTimeSeconds *prometheus.HistogramVec + + // Request E2E & Throttle + requestDurationE2ESeconds *prometheus.HistogramVec + requestThrottledSeconds *prometheus.HistogramVec + + // Produce + produceCompressedBytes *prometheus.CounterVec + produceUncompressedBytes *prometheus.CounterVec + produceBatchesTotal *prometheus.CounterVec + produceRecordsTotal *prometheus.CounterVec + + // Fetch + fetchCompressedBytes *prometheus.CounterVec + fetchUncompressedBytes *prometheus.CounterVec + fetchBatchesTotal *prometheus.CounterVec + fetchRecordsTotal *prometheus.CounterVec + + // Buffered + bufferedFetchRecords prometheus.GaugeFunc + bufferedProduceRecords prometheus.GaugeFunc +} + +// NewMetrics returns a new Metrics that adds prometheus metrics to the +// registry under the given namespace. +func NewMetrics(namespace string, opts ...Opt) *Metrics { + return &Metrics{cfg: newCfg(namespace, opts...)} +} + +// Registry returns the prometheus registry that metrics were added to. +// +// This is useful if you want the Metrics type to create its own registry for +// you to add additional metrics to. +func (m *Metrics) Registry() prometheus.Registerer { + return m.cfg.reg +} + +// Handler returns an http.Handler providing prometheus metrics. +func (m *Metrics) Handler() http.Handler { + return promhttp.HandlerFor(m.cfg.gatherer, m.cfg.handlerOpts) +} + +// OnNewClient implements the HookNewClient interface for metrics +// gathering. +// This method is meant to be called by the hook system and not by the user +func (m *Metrics) OnNewClient(client *kgo.Client) { + var ( + factory = promauto.With(m.cfg.reg) + namespace = m.cfg.namespace + subsystem = m.cfg.subsystem + constLabels prometheus.Labels + ) + if m.cfg.withClientLabel { + constLabels = make(prometheus.Labels) + constLabels["client_id"] = client.OptValue(kgo.ClientID).(string) + } + + // returns Hist buckets if set, otherwise defBucket + getHistogramBuckets := func(h Histogram) []float64 { + if buckets, ok := m.cfg.histograms[h]; ok && len(buckets) != 0 { + return buckets + } + return m.cfg.defBuckets + } + + // Connection + + m.connConnectsTotal = factory.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + ConstLabels: constLabels, + Name: "connects_total", + Help: "Total number of connections opened", + }, []string{"node_id"}) + + m.connConnectErrorsTotal = factory.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + ConstLabels: constLabels, + Name: "connect_errors_total", + Help: "Total number of connection errors", + }, []string{"node_id"}) + + m.connDisconnectsTotal = factory.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + ConstLabels: constLabels, + Name: "disconnects_total", + Help: "Total number of connections closed", + }, []string{"node_id"}) + + // Write + + m.writeBytesTotal = factory.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + ConstLabels: constLabels, + Name: "write_bytes_total", + Help: "Total number of bytes written", + }, []string{"node_id"}) + + m.writeErrorsTotal = factory.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + ConstLabels: constLabels, + Name: "write_errors_total", + Help: "Total number of write errors", + }, []string{"node_id"}) + + m.writeWaitSeconds = factory.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + ConstLabels: constLabels, + Name: "write_wait_seconds", + Help: "Time spent waiting to write to Kafka", + Buckets: getHistogramBuckets(WriteWait), + }, []string{"node_id"}) + + m.writeTimeSeconds = factory.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + ConstLabels: constLabels, + Name: "write_time_seconds", + Help: "Time spent writing to Kafka", + Buckets: getHistogramBuckets(WriteTime), + }, []string{"node_id"}) + + // Read + + m.readBytesTotal = factory.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + ConstLabels: constLabels, + Name: "read_bytes_total", + Help: "Total number of bytes read", + }, []string{"node_id"}) + + m.readErrorsTotal = factory.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + ConstLabels: constLabels, + Name: "read_errors_total", + Help: "Total number of read errors", + }, []string{"node_id"}) + + m.readWaitSeconds = factory.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + ConstLabels: constLabels, + Name: "read_wait_seconds", + Help: "Time spent waiting to read from Kafka", + Buckets: getHistogramBuckets(ReadWait), + }, []string{"node_id"}) + + m.readTimeSeconds = factory.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + ConstLabels: constLabels, + Name: "read_time_seconds", + Help: "Time spent reading from Kafka", + Buckets: getHistogramBuckets(ReadTime), + }, []string{"node_id"}) + + // Request E2E duration & Throttle + + m.requestDurationE2ESeconds = factory.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + ConstLabels: constLabels, + Name: "request_duration_e2e_seconds", + Help: "Time from the start of when a request is written to the end of when the response for that request was fully read", + Buckets: getHistogramBuckets(RequestDurationE2E), + }, []string{"node_id"}) + + m.requestThrottledSeconds = factory.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + ConstLabels: constLabels, + Name: "request_throttled_seconds", + Help: "Time the request was throttled", + Buckets: getHistogramBuckets(RequestThrottled), + }, []string{"node_id"}) + + // Produce + + m.produceCompressedBytes = factory.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + ConstLabels: constLabels, + Name: "produce_compressed_bytes_total", + Help: "Total number of compressed bytes produced", + }, m.cfg.fetchProduceOpts.labels) + + produceUncompressedBytesName := "produce_bytes_total" + if m.cfg.fetchProduceOpts.consistentNaming { + produceUncompressedBytesName = "produce_uncompressed_bytes_total" + } + m.produceUncompressedBytes = factory.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + ConstLabels: constLabels, + Name: produceUncompressedBytesName, + Help: "Total number of uncompressed bytes produced", + }, m.cfg.fetchProduceOpts.labels) + + m.produceBatchesTotal = factory.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + ConstLabels: constLabels, + Name: "produce_batches_total", + Help: "Total number of batches produced", + }, m.cfg.fetchProduceOpts.labels) + + m.produceRecordsTotal = factory.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + ConstLabels: constLabels, + Name: "produce_records_total", + Help: "Total number of records produced", + }, m.cfg.fetchProduceOpts.labels) + + // Fetch + + m.fetchCompressedBytes = factory.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + ConstLabels: constLabels, + Name: "fetch_compressed_bytes_total", + Help: "Total number of compressed bytes fetched", + }, m.cfg.fetchProduceOpts.labels) + + fetchUncompressedBytesName := "fetch_bytes_total" + if m.cfg.fetchProduceOpts.consistentNaming { + fetchUncompressedBytesName = "fetch_uncompressed_bytes_total" + } + m.fetchUncompressedBytes = factory.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + ConstLabels: constLabels, + Name: fetchUncompressedBytesName, + Help: "Total number of uncompressed bytes fetched", + }, m.cfg.fetchProduceOpts.labels) + + m.fetchBatchesTotal = factory.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + ConstLabels: constLabels, + Name: "fetch_batches_total", + Help: "Total number of batches fetched", + }, m.cfg.fetchProduceOpts.labels) + + m.fetchRecordsTotal = factory.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + ConstLabels: constLabels, + Name: "fetch_records_total", + Help: "Total number of records fetched", + }, m.cfg.fetchProduceOpts.labels) + + // Buffers + + m.bufferedProduceRecords = factory.NewGaugeFunc( + prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + ConstLabels: constLabels, + Name: "buffered_produce_records_total", + Help: "Total number of records buffered within the client ready to be produced", + }, + func() float64 { return float64(client.BufferedProduceRecords()) }, + ) + + m.bufferedFetchRecords = factory.NewGaugeFunc( + prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + ConstLabels: constLabels, + Name: "buffered_fetch_records_total", + Help: "Total number of records buffered within the client ready to be consumed", + }, + func() float64 { return float64(client.BufferedFetchRecords()) }, + ) +} + +// OnClientClosed will unregister kprom metrics from kprom registerer +func (m *Metrics) OnClientClosed(*kgo.Client) { + _ = m.cfg.reg.Unregister(m.connConnectsTotal) + _ = m.cfg.reg.Unregister(m.connConnectErrorsTotal) + _ = m.cfg.reg.Unregister(m.connDisconnectsTotal) + _ = m.cfg.reg.Unregister(m.writeBytesTotal) + _ = m.cfg.reg.Unregister(m.writeErrorsTotal) + _ = m.cfg.reg.Unregister(m.writeWaitSeconds) + _ = m.cfg.reg.Unregister(m.writeTimeSeconds) + _ = m.cfg.reg.Unregister(m.readBytesTotal) + _ = m.cfg.reg.Unregister(m.readErrorsTotal) + _ = m.cfg.reg.Unregister(m.readWaitSeconds) + _ = m.cfg.reg.Unregister(m.readTimeSeconds) + _ = m.cfg.reg.Unregister(m.requestDurationE2ESeconds) + _ = m.cfg.reg.Unregister(m.requestThrottledSeconds) + _ = m.cfg.reg.Unregister(m.produceCompressedBytes) + _ = m.cfg.reg.Unregister(m.produceUncompressedBytes) + _ = m.cfg.reg.Unregister(m.produceBatchesTotal) + _ = m.cfg.reg.Unregister(m.produceRecordsTotal) + _ = m.cfg.reg.Unregister(m.fetchCompressedBytes) + _ = m.cfg.reg.Unregister(m.fetchUncompressedBytes) + _ = m.cfg.reg.Unregister(m.fetchBatchesTotal) + _ = m.cfg.reg.Unregister(m.fetchRecordsTotal) + _ = m.cfg.reg.Unregister(m.bufferedFetchRecords) + _ = m.cfg.reg.Unregister(m.bufferedProduceRecords) +} + +// OnBrokerConnect implements the HookBrokerConnect interface for metrics +// gathering. +// This method is meant to be called by the hook system and not by the user +func (m *Metrics) OnBrokerConnect(meta kgo.BrokerMetadata, _ time.Duration, _ net.Conn, err error) { + nodeId := kgo.NodeName(meta.NodeID) + if err != nil { + m.connConnectErrorsTotal.WithLabelValues(nodeId).Inc() + return + } + m.connConnectsTotal.WithLabelValues(nodeId).Inc() +} + +// OnBrokerDisconnect implements the HookBrokerDisconnect interface for metrics +// gathering. +// This method is meant to be called by the hook system and not by the user +func (m *Metrics) OnBrokerDisconnect(meta kgo.BrokerMetadata, _ net.Conn) { + nodeId := kgo.NodeName(meta.NodeID) + m.connDisconnectsTotal.WithLabelValues(nodeId).Inc() +} + +// OnBrokerThrottle implements the HookBrokerThrottle interface for metrics +// gathering. +// This method is meant to be called by the hook system and not by the user +func (m *Metrics) OnBrokerThrottle(meta kgo.BrokerMetadata, throttleInterval time.Duration, _ bool) { + if _, ok := m.cfg.histograms[RequestThrottled]; ok { + nodeId := kgo.NodeName(meta.NodeID) + m.requestThrottledSeconds.WithLabelValues(nodeId).Observe(throttleInterval.Seconds()) + } +} + +// OnProduceBatchWritten implements the HookProduceBatchWritten interface for +// metrics gathering. +// This method is meant to be called by the hook system and not by the user +func (m *Metrics) OnProduceBatchWritten(meta kgo.BrokerMetadata, topic string, _ int32, metrics kgo.ProduceBatchMetrics) { + labels := m.fetchProducerLabels(kgo.NodeName(meta.NodeID), topic) + if m.cfg.fetchProduceOpts.uncompressedBytes { + m.produceUncompressedBytes.With(labels).Add(float64(metrics.UncompressedBytes)) + } + if m.cfg.fetchProduceOpts.compressedBytes { + m.produceCompressedBytes.With(labels).Add(float64(metrics.CompressedBytes)) + } + if m.cfg.fetchProduceOpts.batches { + m.produceBatchesTotal.With(labels).Inc() + } + if m.cfg.fetchProduceOpts.records { + m.produceRecordsTotal.With(labels).Add(float64(metrics.NumRecords)) + } +} + +// OnFetchBatchRead implements the HookFetchBatchRead interface for metrics +// gathering. +// This method is meant to be called by the hook system and not by the user +func (m *Metrics) OnFetchBatchRead(meta kgo.BrokerMetadata, topic string, _ int32, metrics kgo.FetchBatchMetrics) { + labels := m.fetchProducerLabels(kgo.NodeName(meta.NodeID), topic) + if m.cfg.fetchProduceOpts.uncompressedBytes { + m.fetchUncompressedBytes.With(labels).Add(float64(metrics.UncompressedBytes)) + } + if m.cfg.fetchProduceOpts.compressedBytes { + m.fetchCompressedBytes.With(labels).Add(float64(metrics.CompressedBytes)) + } + if m.cfg.fetchProduceOpts.batches { + m.fetchBatchesTotal.With(labels).Inc() + } + if m.cfg.fetchProduceOpts.records { + m.fetchRecordsTotal.With(labels).Add(float64(metrics.NumRecords)) + } +} + +// // Nop hook for compat, logic moved to OnBrokerE2E +func (m *Metrics) OnBrokerRead(meta kgo.BrokerMetadata, _ int16, bytesRead int, _, _ time.Duration, err error) { +} + +// Nop hook for compat, logic moved to OnBrokerE2E +func (m *Metrics) OnBrokerWrite(meta kgo.BrokerMetadata, _ int16, bytesWritten int, _, _ time.Duration, err error) { +} + +// OnBrokerE2E implements the HookBrokerE2E interface for metrics gathering +// This method is meant to be called by the hook system and not by the user +func (m *Metrics) OnBrokerE2E(meta kgo.BrokerMetadata, _ int16, e2e kgo.BrokerE2E) { + nodeId := kgo.NodeName(meta.NodeID) + if e2e.WriteErr != nil { + m.writeErrorsTotal.WithLabelValues(nodeId).Inc() + return + } + m.writeBytesTotal.WithLabelValues(nodeId).Add(float64(e2e.BytesWritten)) + if _, ok := m.cfg.histograms[WriteWait]; ok { + m.writeWaitSeconds.WithLabelValues(nodeId).Observe(e2e.WriteWait.Seconds()) + } + if _, ok := m.cfg.histograms[WriteTime]; ok { + m.writeTimeSeconds.WithLabelValues(nodeId).Observe(e2e.TimeToWrite.Seconds()) + } + if e2e.ReadErr != nil { + m.readErrorsTotal.WithLabelValues(nodeId).Inc() + return + } + m.readBytesTotal.WithLabelValues(nodeId).Add(float64(e2e.BytesRead)) + if _, ok := m.cfg.histograms[ReadWait]; ok { + m.readWaitSeconds.WithLabelValues(nodeId).Observe(e2e.ReadWait.Seconds()) + } + if _, ok := m.cfg.histograms[ReadTime]; ok { + m.readTimeSeconds.WithLabelValues(nodeId).Observe(e2e.TimeToRead.Seconds()) + } + if _, ok := m.cfg.histograms[RequestDurationE2E]; ok { + m.requestDurationE2ESeconds.WithLabelValues(nodeId).Observe(e2e.DurationE2E().Seconds()) + } +} + +func (m *Metrics) fetchProducerLabels(nodeId, topic string) prometheus.Labels { + labels := make(prometheus.Labels, 2) + for _, l := range m.cfg.fetchProduceOpts.labels { + switch l { + case "topic": + labels[l] = topic + case "node_id": + labels[l] = nodeId + } + } + return labels +} diff --git a/vendor/github.com/ua-parser/uap-go/LICENSE b/vendor/github.com/ua-parser/uap-go/LICENSE new file mode 100644 index 00000000000..8ceb9a6c0c0 --- /dev/null +++ b/vendor/github.com/ua-parser/uap-go/LICENSE @@ -0,0 +1,16 @@ +Apache License, Version 2.0 +=========================== + +Copyright 2009 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/ua-parser/uap-go/uaparser/.gitignore b/vendor/github.com/ua-parser/uap-go/uaparser/.gitignore new file mode 100644 index 00000000000..385d6a8d0c8 --- /dev/null +++ b/vendor/github.com/ua-parser/uap-go/uaparser/.gitignore @@ -0,0 +1,2 @@ +*.out +*.test diff --git a/vendor/github.com/ua-parser/uap-go/uaparser/LICENSE.md b/vendor/github.com/ua-parser/uap-go/uaparser/LICENSE.md new file mode 100644 index 00000000000..5c7c52930d6 --- /dev/null +++ b/vendor/github.com/ua-parser/uap-go/uaparser/LICENSE.md @@ -0,0 +1,8 @@ +The MIT License (MIT) +Copyright (c) 2013 Yihuan Zhou + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/ua-parser/uap-go/uaparser/cache.go b/vendor/github.com/ua-parser/uap-go/uaparser/cache.go new file mode 100644 index 00000000000..1838b81e1e2 --- /dev/null +++ b/vendor/github.com/ua-parser/uap-go/uaparser/cache.go @@ -0,0 +1,36 @@ +package uaparser + +import lru "github.com/hashicorp/golang-lru" + +// cache caches user-agent properties. +// Without the cache, the parser performs hundreds of expensive regex operations, +// taking 10+ ms. This can lead to significant performance degradation when UA parsing is +// done on a per-request basis. +type cache struct { + device *lru.ARCCache + os *lru.ARCCache + userAgent *lru.ARCCache +} + +func newCache() *cache { + var ( + c cache + err error + ) + const cacheSize = 1024 + // NewARC only fails when cacheSize <= 0. + // Also, returning an error up the stack would break the API. + c.device, err = lru.NewARC(cacheSize) + if err != nil { + panic(err) + } + c.os, err = lru.NewARC(cacheSize) + if err != nil { + panic(err) + } + c.userAgent, err = lru.NewARC(cacheSize) + if err != nil { + panic(err) + } + return &c +} diff --git a/vendor/github.com/ua-parser/uap-go/uaparser/device.go b/vendor/github.com/ua-parser/uap-go/uaparser/device.go new file mode 100644 index 00000000000..7a115e7511c --- /dev/null +++ b/vendor/github.com/ua-parser/uap-go/uaparser/device.go @@ -0,0 +1,30 @@ +package uaparser + +import "strings" + +type Device struct { + Family string + Brand string + Model string +} + +func (parser *deviceParser) Match(line string, dvc *Device) { + matches := parser.Reg.FindStringSubmatchIndex(line) + + if len(matches) == 0 { + return + } + + dvc.Family = string(parser.Reg.ExpandString(nil, parser.DeviceReplacement, line, matches)) + dvc.Family = strings.TrimSpace(dvc.Family) + + dvc.Brand = string(parser.Reg.ExpandString(nil, parser.BrandReplacement, line, matches)) + dvc.Brand = strings.TrimSpace(dvc.Brand) + + dvc.Model = string(parser.Reg.ExpandString(nil, parser.ModelReplacement, line, matches)) + dvc.Model = strings.TrimSpace(dvc.Model) +} + +func (dvc *Device) ToString() string { + return dvc.Family +} diff --git a/vendor/github.com/ua-parser/uap-go/uaparser/os.go b/vendor/github.com/ua-parser/uap-go/uaparser/os.go new file mode 100644 index 00000000000..b30e8a193f3 --- /dev/null +++ b/vendor/github.com/ua-parser/uap-go/uaparser/os.go @@ -0,0 +1,49 @@ +package uaparser + +type Os struct { + Family string + Major string + Minor string + Patch string + PatchMinor string `yaml:"patch_minor"` +} + +func (parser *osParser) Match(line string, os *Os) { + matches := parser.Reg.FindStringSubmatchIndex(line) + if len(matches) > 0 { + os.Family = string(parser.Reg.ExpandString(nil, parser.OSReplacement, line, matches)) + os.Major = string(parser.Reg.ExpandString(nil, parser.V1Replacement, line, matches)) + os.Minor = string(parser.Reg.ExpandString(nil, parser.V2Replacement, line, matches)) + os.Patch = string(parser.Reg.ExpandString(nil, parser.V3Replacement, line, matches)) + os.PatchMinor = string(parser.Reg.ExpandString(nil, parser.V4Replacement, line, matches)) + } +} + +func (os *Os) ToString() string { + var str string + if os.Family != "" { + str += os.Family + } + version := os.ToVersionString() + if version != "" { + str += " " + version + } + return str +} + +func (os *Os) ToVersionString() string { + var version string + if os.Major != "" { + version += os.Major + } + if os.Minor != "" { + version += "." + os.Minor + } + if os.Patch != "" { + version += "." + os.Patch + } + if os.PatchMinor != "" { + version += "." + os.PatchMinor + } + return version +} diff --git a/vendor/github.com/ua-parser/uap-go/uaparser/parser.go b/vendor/github.com/ua-parser/uap-go/uaparser/parser.go new file mode 100644 index 00000000000..350fb19635d --- /dev/null +++ b/vendor/github.com/ua-parser/uap-go/uaparser/parser.go @@ -0,0 +1,394 @@ +package uaparser + +import ( + "fmt" + "io/ioutil" + "regexp" + "sort" + "sync" + "sync/atomic" + "time" + + "gopkg.in/yaml.v2" +) + +type RegexesDefinitions struct { + UA []*uaParser `yaml:"user_agent_parsers"` + OS []*osParser `yaml:"os_parsers"` + Device []*deviceParser `yaml:"device_parsers"` + _ [4]byte // padding for alignment + sync.RWMutex +} + +type UserAgentSorter []*uaParser + +func (a UserAgentSorter) Len() int { return len(a) } +func (a UserAgentSorter) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a UserAgentSorter) Less(i, j int) bool { + return atomic.LoadUint64(&a[i].MatchesCount) > atomic.LoadUint64(&a[j].MatchesCount) +} + +type uaParser struct { + Reg *regexp.Regexp + Expr string `yaml:"regex"` + Flags string `yaml:"regex_flag"` + FamilyReplacement string `yaml:"family_replacement"` + V1Replacement string `yaml:"v1_replacement"` + V2Replacement string `yaml:"v2_replacement"` + V3Replacement string `yaml:"v3_replacement"` + _ [4]byte // padding for alignment + MatchesCount uint64 +} + +func (ua *uaParser) setDefaults() { + if ua.FamilyReplacement == "" { + ua.FamilyReplacement = "$1" + } + if ua.V1Replacement == "" { + ua.V1Replacement = "$2" + } + if ua.V2Replacement == "" { + ua.V2Replacement = "$3" + } + if ua.V3Replacement == "" { + ua.V3Replacement = "$4" + } +} + +type OsSorter []*osParser + +func (a OsSorter) Len() int { return len(a) } +func (a OsSorter) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a OsSorter) Less(i, j int) bool { + return atomic.LoadUint64(&a[i].MatchesCount) > atomic.LoadUint64(&a[j].MatchesCount) +} + +type osParser struct { + Reg *regexp.Regexp + Expr string `yaml:"regex"` + Flags string `yaml:"regex_flag"` + OSReplacement string `yaml:"os_replacement"` + V1Replacement string `yaml:"os_v1_replacement"` + V2Replacement string `yaml:"os_v2_replacement"` + V3Replacement string `yaml:"os_v3_replacement"` + V4Replacement string `yaml:"os_v4_replacement"` + _ [4]byte // padding for alignment + MatchesCount uint64 +} + +func (os *osParser) setDefaults() { + if os.OSReplacement == "" { + os.OSReplacement = "$1" + } + if os.V1Replacement == "" { + os.V1Replacement = "$2" + } + if os.V2Replacement == "" { + os.V2Replacement = "$3" + } + if os.V3Replacement == "" { + os.V3Replacement = "$4" + } + if os.V4Replacement == "" { + os.V4Replacement = "$5" + } +} + +type DeviceSorter []*deviceParser + +func (a DeviceSorter) Len() int { return len(a) } +func (a DeviceSorter) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a DeviceSorter) Less(i, j int) bool { + return atomic.LoadUint64(&a[i].MatchesCount) > atomic.LoadUint64(&a[j].MatchesCount) +} + +type deviceParser struct { + Reg *regexp.Regexp + Expr string `yaml:"regex"` + Flags string `yaml:"regex_flag"` + DeviceReplacement string `yaml:"device_replacement"` + BrandReplacement string `yaml:"brand_replacement"` + ModelReplacement string `yaml:"model_replacement"` + _ [4]byte // padding for alignment + MatchesCount uint64 +} + +func (device *deviceParser) setDefaults() { + if device.DeviceReplacement == "" { + device.DeviceReplacement = "$1" + } + if device.ModelReplacement == "" { + device.ModelReplacement = "$1" + } +} + +type Client struct { + UserAgent *UserAgent + Os *Os + Device *Device +} + +type Parser struct { + /* atomic operation are done on the following unit64. + * These must be 64bit aligned. On 32bit architectures + * this is only guaranteed to be on the beginning of a struct */ + UserAgentMisses uint64 + OsMisses uint64 + DeviceMisses uint64 + + cache *cache + + RegexesDefinitions + Mode int + UseSort bool + debugMode bool +} + +const ( + EOsLookUpMode = 1 /* 00000001 */ + EUserAgentLookUpMode = 2 /* 00000010 */ + EDeviceLookUpMode = 4 /* 00000100 */ + cMinMissesTreshold = 100000 + cDefaultMissesTreshold = 500000 + cDefaultMatchIdxNotOk = 20 + cDefaultSortOption = false +) + +var ( + missesTreshold = uint64(500000) + matchIdxNotOk = 20 +) + +func (parser *Parser) mustCompile() { // until we can use yaml.UnmarshalYAML with embedded pointer struct + for _, p := range parser.UA { + p.Reg = compileRegex(p.Flags, p.Expr) + p.setDefaults() + } + for _, p := range parser.OS { + p.Reg = compileRegex(p.Flags, p.Expr) + p.setDefaults() + } + for _, p := range parser.Device { + p.Reg = compileRegex(p.Flags, p.Expr) + p.setDefaults() + } +} + +func NewWithOptions(regexFile string, mode, treshold, topCnt int, useSort, debugMode bool) (*Parser, error) { + data, err := ioutil.ReadFile(regexFile) + if nil != err { + return nil, err + } + if topCnt >= 0 { + matchIdxNotOk = topCnt + } + if treshold > cMinMissesTreshold { + missesTreshold = uint64(treshold) + } + parser, err := NewFromBytes(data) + if err != nil { + return nil, err + } + parser.Mode = mode + parser.UseSort = useSort + parser.debugMode = debugMode + return parser, nil +} + +func New(regexFile string) (*Parser, error) { + data, err := ioutil.ReadFile(regexFile) + if nil != err { + return nil, err + } + matchIdxNotOk = cDefaultMatchIdxNotOk + missesTreshold = cDefaultMissesTreshold + parser, err := NewFromBytes(data) + if err != nil { + return nil, err + } + return parser, nil +} + +func NewFromSaved() *Parser { + parser, err := NewFromBytes(DefinitionYaml) + if err != nil { + // if the YAML is malformed, it's a programmatic error inside what + // we've statically-compiled in our binary. Panic! + panic(err.Error()) + } + return parser +} + +func NewFromBytes(data []byte) (*Parser, error) { + parser := &Parser{ + Mode: EOsLookUpMode | EUserAgentLookUpMode | EDeviceLookUpMode, + cache: newCache(), + } + if err := yaml.Unmarshal(data, &parser.RegexesDefinitions); err != nil { + return nil, err + } + + parser.mustCompile() + + return parser, nil +} + +func (parser *Parser) Parse(line string) *Client { + cli := new(Client) + var wg sync.WaitGroup + if EUserAgentLookUpMode&parser.Mode == EUserAgentLookUpMode { + wg.Add(1) + go func() { + defer wg.Done() + parser.RLock() + cli.UserAgent = parser.ParseUserAgent(line) + parser.RUnlock() + }() + } + if EOsLookUpMode&parser.Mode == EOsLookUpMode { + wg.Add(1) + go func() { + defer wg.Done() + parser.RLock() + cli.Os = parser.ParseOs(line) + parser.RUnlock() + }() + } + if EDeviceLookUpMode&parser.Mode == EDeviceLookUpMode { + wg.Add(1) + go func() { + defer wg.Done() + parser.RLock() + cli.Device = parser.ParseDevice(line) + parser.RUnlock() + }() + } + wg.Wait() + if parser.UseSort == true { + checkAndSort(parser) + } + return cli +} + +func (parser *Parser) ParseUserAgent(line string) *UserAgent { + cachedUA, ok := parser.cache.userAgent.Get(line) + if ok { + return cachedUA.(*UserAgent) + } + ua := new(UserAgent) + foundIdx := -1 + found := false + for i, uaPattern := range parser.UA { + uaPattern.Match(line, ua) + if len(ua.Family) > 0 { + found = true + foundIdx = i + atomic.AddUint64(&uaPattern.MatchesCount, 1) + break + } + } + if !found { + ua.Family = "Other" + } + if foundIdx > matchIdxNotOk { + atomic.AddUint64(&parser.UserAgentMisses, 1) + } + parser.cache.userAgent.Add(line, ua) + return ua +} + +func (parser *Parser) ParseOs(line string) *Os { + cachedOS, ok := parser.cache.os.Get(line) + if ok { + return cachedOS.(*Os) + } + + os := new(Os) + foundIdx := -1 + found := false + for i, osPattern := range parser.OS { + osPattern.Match(line, os) + if len(os.Family) > 0 { + found = true + foundIdx = i + atomic.AddUint64(&osPattern.MatchesCount, 1) + break + } + } + if !found { + os.Family = "Other" + } + if foundIdx > matchIdxNotOk { + atomic.AddUint64(&parser.OsMisses, 1) + } + + parser.cache.os.Add(line, os) + return os +} + +func (parser *Parser) ParseDevice(line string) *Device { + cachedDevice, ok := parser.cache.device.Get(line) + if ok { + return cachedDevice.(*Device) + } + + dvc := new(Device) + foundIdx := -1 + found := false + for i, dvcPattern := range parser.Device { + dvcPattern.Match(line, dvc) + if len(dvc.Family) > 0 { + found = true + foundIdx = i + atomic.AddUint64(&dvcPattern.MatchesCount, 1) + break + } + } + if !found { + dvc.Family = "Other" + } + if foundIdx > matchIdxNotOk { + atomic.AddUint64(&parser.DeviceMisses, 1) + } + + parser.cache.device.Add(line, dvc) + return dvc +} + +func checkAndSort(parser *Parser) { + parser.Lock() + if atomic.LoadUint64(&parser.UserAgentMisses) >= missesTreshold { + if parser.debugMode { + fmt.Printf("%s\tSorting UserAgents slice\n", time.Now()) + } + parser.UserAgentMisses = 0 + sort.Sort(UserAgentSorter(parser.UA)) + } + parser.Unlock() + parser.Lock() + if atomic.LoadUint64(&parser.OsMisses) >= missesTreshold { + if parser.debugMode { + fmt.Printf("%s\tSorting OS slice\n", time.Now()) + } + parser.OsMisses = 0 + sort.Sort(OsSorter(parser.OS)) + } + parser.Unlock() + parser.Lock() + if atomic.LoadUint64(&parser.DeviceMisses) >= missesTreshold { + if parser.debugMode { + fmt.Printf("%s\tSorting Device slice\n", time.Now()) + } + parser.DeviceMisses = 0 + sort.Sort(DeviceSorter(parser.Device)) + } + parser.Unlock() +} + +func compileRegex(flags, expr string) *regexp.Regexp { + if flags == "" { + return regexp.MustCompile(expr) + } else { + return regexp.MustCompile(fmt.Sprintf("(?%s)%s", flags, expr)) + } +} diff --git a/vendor/github.com/ua-parser/uap-go/uaparser/user_agent.go b/vendor/github.com/ua-parser/uap-go/uaparser/user_agent.go new file mode 100644 index 00000000000..ad4073a58fb --- /dev/null +++ b/vendor/github.com/ua-parser/uap-go/uaparser/user_agent.go @@ -0,0 +1,44 @@ +package uaparser + +type UserAgent struct { + Family string + Major string + Minor string + Patch string +} + +func (parser *uaParser) Match(line string, ua *UserAgent) { + matches := parser.Reg.FindStringSubmatchIndex(line) + if len(matches) > 0 { + ua.Family = string(parser.Reg.ExpandString(nil, parser.FamilyReplacement, line, matches)) + ua.Major = string(parser.Reg.ExpandString(nil, parser.V1Replacement, line, matches)) + ua.Minor = string(parser.Reg.ExpandString(nil, parser.V2Replacement, line, matches)) + ua.Patch = string(parser.Reg.ExpandString(nil, parser.V3Replacement, line, matches)) + } +} + +func (ua *UserAgent) ToString() string { + var str string + if ua.Family != "" { + str += ua.Family + } + version := ua.ToVersionString() + if version != "" { + str += " " + version + } + return str +} + +func (ua *UserAgent) ToVersionString() string { + var version string + if ua.Major != "" { + version += ua.Major + } + if ua.Minor != "" { + version += "." + ua.Minor + } + if ua.Patch != "" { + version += "." + ua.Patch + } + return version +} diff --git a/vendor/github.com/ua-parser/uap-go/uaparser/yaml.go b/vendor/github.com/ua-parser/uap-go/uaparser/yaml.go new file mode 100644 index 00000000000..c16729222a2 --- /dev/null +++ b/vendor/github.com/ua-parser/uap-go/uaparser/yaml.go @@ -0,0 +1,3736 @@ +package uaparser + +var DefinitionYaml = []byte(`user_agent_parsers: + - regex: '(GeoEvent Server) (\d+)(?:\.(\d+)(?:\.(\d+)|)|)' + - regex: '(ArcGIS Pro)(?: (\d+)\.(\d+)\.([^ ]+)|)' + - regex: 'ArcGIS Client Using WinInet' + family_replacement: 'ArcMap' + - regex: '(OperationsDashboard)-(?:Windows)-(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Operations Dashboard for ArcGIS' + - regex: '(arcgisearth)/(\d+)\.(\d+)(?:\.(\d+)|)' + family_replacement: 'ArcGIS Earth' + - regex: 'com.esri.(earth).phone/(\d+)\.(\d+)(?:\.(\d+)|)' + family_replacement: 'ArcGIS Earth' + - regex: '(arcgis-explorer)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Explorer for ArcGIS' + - regex: 'arcgis-(collector|aurora)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Collector for ArcGIS' + - regex: '(arcgis-workforce)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Workforce for ArcGIS' + - regex: '(Collector|Explorer|Workforce)-(?:Android|iOS)-(\d+)\.(\d+)(?:\.(\d+)|)' + family_replacement: '$1 for ArcGIS' + - regex: '(Explorer|Collector)/(\d+) CFNetwork' + family_replacement: '$1 for ArcGIS' + - regex: 'ArcGISRuntime-(Android|iOS|NET|Qt)/(\d+)\.(\d+)(?:\.(\d+)|)' + family_replacement: 'ArcGIS Runtime SDK for $1' + - regex: 'ArcGIS\.?(iOS|Android|NET|Qt)(?:-|\.)(\d+)\.(\d+)(?:\.(\d+)|)' + family_replacement: 'ArcGIS Runtime SDK for $1' + - regex: 'ArcGIS\.Runtime\.(Qt)\.(\d+)\.(\d+)(?:\.(\d+)|)' + family_replacement: 'ArcGIS Runtime SDK for $1' + - regex: '^(Luminary)[Stage]+/(\d+) CFNetwork' + - regex: '(ESPN)[%20| ]+Radio/(\d+)\.(\d+)\.(\d+) CFNetwork' + - regex: '(Antenna)/(\d+) CFNetwork' + family_replacement: 'AntennaPod' + - regex: '(TopPodcasts)Pro/(\d+) CFNetwork' + - regex: '(MusicDownloader)Lite/(\d+)\.(\d+)\.(\d+) CFNetwork' + - regex: '^(.{0,200})-iPad\/(\d+)(?:\.(\d+)|)(?:\.(\d+)|)(?:\.(\d+)|) CFNetwork' + - regex: '^(.{0,200})-iPhone/(\d+)(?:\.(\d+)|)(?:\.(\d+)|)(?:\.(\d+)|) CFNetwork' + - regex: '^(.{0,200})/(\d+)(?:\.(\d+)|)(?:\.(\d+)|)(?:\.(\d+)|) CFNetwork' + - regex: '^(Luminary)/(\d+)(?:\.(\d+)|)(?:\.(\d+)|)' + - regex: '(espn\.go)' + family_replacement: 'ESPN' + - regex: '(espnradio\.com)' + family_replacement: 'ESPN' + - regex: 'ESPN APP$' + family_replacement: 'ESPN' + - regex: '(audioboom\.com)' + family_replacement: 'AudioBoom' + - regex: ' (Rivo) RHYTHM' + - regex: '(CFNetwork)(?:/(\d+)\.(\d+)(?:\.(\d+)|)|)' + family_replacement: 'CFNetwork' + - regex: '(Pingdom\.com_bot_version_)(\d+)\.(\d+)' + family_replacement: 'PingdomBot' + - regex: '(PingdomTMS)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'PingdomBot' + - regex: '(PingdomPageSpeed)/(\d+)\.(\d+)' + family_replacement: 'PingdomBot' + - regex: ' (PTST)/(\d+)(?:\.(\d+)|)$' + family_replacement: 'WebPageTest.org bot' + - regex: 'X11; (Datanyze); Linux' + - regex: '(NewRelicPinger)/(\d+)\.(\d+)' + family_replacement: 'NewRelicPingerBot' + - regex: '(Tableau)/(\d+)\.(\d+)' + family_replacement: 'Tableau' + - regex: 'AppleWebKit/\d{1,10}\.\d{1,10}.{0,200} Safari.{0,200} (CreativeCloud)/(\d+)\.(\d+).(\d+)' + family_replacement: 'Adobe CreativeCloud' + - regex: '(Salesforce)(?:.)\/(\d+)\.(\d?)' + - regex: '(\(StatusCake\))' + family_replacement: 'StatusCakeBot' + - regex: '(facebookexternalhit)/(\d+)\.(\d+)' + family_replacement: 'FacebookBot' + - regex: 'Google.{0,50}/\+/web/snippet' + family_replacement: 'GooglePlusBot' + - regex: 'via ggpht\.com GoogleImageProxy' + family_replacement: 'GmailImageProxy' + - regex: 'YahooMailProxy; https://help\.yahoo\.com/kb/yahoo-mail-proxy-SLN28749\.html' + family_replacement: 'YahooMailProxy' + - regex: '(Twitterbot)/(\d+)\.(\d+)' + family_replacement: 'Twitterbot' + - regex: '/((?:Ant-|)Nutch|[A-z]+[Bb]ot|[A-z]+[Ss]pider|Axtaris|fetchurl|Isara|ShopSalad|Tailsweep)[ \-](\d+)(?:\.(\d+)|)(?:\.(\d+)|)' + - regex: '\b(008|Altresium|Argus|BaiduMobaider|BoardReader|DNSGroup|DataparkSearch|EDI|Goodzer|Grub|INGRID|Infohelfer|LinkedInBot|LOOQ|Nutch|OgScrper|Pandora|PathDefender|Peew|PostPost|Steeler|Twitterbot|VSE|WebCrunch|WebZIP|Y!J-BR[A-Z]|YahooSeeker|envolk|sproose|wminer)/(\d+)(?:\.(\d+)|)(?:\.(\d+)|)' + - regex: '(MSIE) (\d+)\.(\d+)([a-z]\d|[a-z]|);.{0,200} MSIECrawler' + family_replacement: 'MSIECrawler' + - regex: '(DAVdroid)/(\d+)\.(\d+)(?:\.(\d+)|)' + - regex: '(Google-HTTP-Java-Client|Apache-HttpClient|PostmanRuntime|Go-http-client|scalaj-http|http%20client|Python-urllib|HttpMonitor|TLSProber|WinHTTP|JNLP|okhttp|aihttp|reqwest|axios|unirest-(?:java|python|ruby|nodejs|php|net))(?:[ /](\d+)(?:\.(\d+)|)(?:\.(\d+)|)|)' + - regex: '(Pinterest(?:bot|))/(\d+)(?:\.(\d+)|)(?:\.(\d+)|)[;\s(]+\+https://www.pinterest.com/bot.html' + family_replacement: 'Pinterestbot' + - regex: '(CSimpleSpider|Cityreview Robot|CrawlDaddy|CrawlFire|Finderbots|Index crawler|Job Roboter|KiwiStatus Spider|Lijit Crawler|QuerySeekerSpider|ScollSpider|Trends Crawler|USyd-NLP-Spider|SiteCat Webbot|BotName\/\$BotVersion|123metaspider-Bot|1470\.net crawler|50\.nu|8bo Crawler Bot|Aboundex|Accoona-[A-z]{1,30}-Agent|AdsBot-Google(?:-[a-z]{1,30}|)|altavista|AppEngine-Google|archive.{0,30}\.org_bot|archiver|Ask Jeeves|[Bb]ai[Dd]u[Ss]pider(?:-[A-Za-z]{1,30})(?:-[A-Za-z]{1,30}|)|bingbot|BingPreview|blitzbot|BlogBridge|Bloglovin|BoardReader Blog Indexer|BoardReader Favicon Fetcher|boitho.com-dc|BotSeer|BUbiNG|\b\w{0,30}favicon\w{0,30}\b|\bYeti(?:-[a-z]{1,30}|)|Catchpoint(?: bot|)|[Cc]harlotte|Checklinks|clumboot|Comodo HTTP\(S\) Crawler|Comodo-Webinspector-Crawler|ConveraCrawler|CRAWL-E|CrawlConvera|Daumoa(?:-feedfetcher|)|Feed Seeker Bot|Feedbin|findlinks|Flamingo_SearchEngine|FollowSite Bot|furlbot|Genieo|gigabot|GomezAgent|gonzo1|(?:[a-zA-Z]{1,30}-|)Googlebot(?:-[a-zA-Z]{1,30}|)|Google SketchUp|grub-client|gsa-crawler|heritrix|HiddenMarket|holmes|HooWWWer|htdig|ia_archiver|ICC-Crawler|Icarus6j|ichiro(?:/mobile|)|IconSurf|IlTrovatore(?:-Setaccio|)|InfuzApp|Innovazion Crawler|InternetArchive|IP2[a-z]{1,30}Bot|jbot\b|KaloogaBot|Kraken|Kurzor|larbin|LEIA|LesnikBot|Linguee Bot|LinkAider|LinkedInBot|Lite Bot|Llaut|lycos|Mail\.RU_Bot|masscan|masidani_bot|Mediapartners-Google|Microsoft .{0,30} Bot|mogimogi|mozDex|MJ12bot|msnbot(?:-media {0,2}|)|msrbot|Mtps Feed Aggregation System|netresearch|Netvibes|NewsGator[^/]{0,30}|^NING|Nutch[^/]{0,30}|Nymesis|ObjectsSearch|OgScrper|Orbiter|OOZBOT|PagePeeker|PagesInventory|PaxleFramework|Peeplo Screenshot Bot|PHPCrawl|PlantyNet_WebRobot|Pompos|Qwantify|Read%20Later|Reaper|RedCarpet|Retreiver|Riddler|Rival IQ|scooter|Scrapy|Scrubby|searchsight|seekbot|semanticdiscovery|SemrushBot|Simpy|SimplePie|SEOstats|SimpleRSS|SiteCon|Slackbot-LinkExpanding|Slack-ImgProxy|Slurp|snappy|Speedy Spider|Squrl Java|Stringer|TheUsefulbot|ThumbShotsBot|Thumbshots\.ru|Tiny Tiny RSS|Twitterbot|WhatsApp|URL2PNG|Vagabondo|VoilaBot|^vortex|Votay bot|^voyager|WASALive.Bot|Web-sniffer|WebThumb|WeSEE:[A-z]{1,30}|WhatWeb|WIRE|WordPress|Wotbox|www\.almaden\.ibm\.com|Xenu(?:.s|) Link Sleuth|Xerka [A-z]{1,30}Bot|yacy(?:bot|)|YahooSeeker|Yahoo! Slurp|Yandex\w{1,30}|YodaoBot(?:-[A-z]{1,30}|)|YottaaMonitor|Yowedo|^Zao|^Zao-Crawler|ZeBot_www\.ze\.bz|ZooShot|ZyBorg|ArcGIS Hub Indexer)(?:[ /]v?(\d+)(?:\.(\d+)(?:\.(\d+)|)|)|)' + - regex: '\b(Boto3?|JetS3t|aws-(?:cli|sdk-(?:cpp|go|go-v\d|java|nodejs|ruby2?|dotnet-(?:\d{1,2}|core)))|s3fs)/(\d+)\.(\d+)(?:\.(\d+)|)' + - regex: '(FME)\/(\d+\.\d+)\.(\d+)\.(\d+)' + - regex: '(QGIS)\/(\d)\.?0?(\d{1,2})\.?0?(\d{1,2})' + - regex: '(JOSM)/(\d+)\.(\d+)' + - regex: '(Tygron Platform) \((\d+)\.(\d+)\.(\d+(?:\.\d+| RC \d+\.\d+))' + - regex: '\[(FBAN/MessengerForiOS|FB_IAB/MESSENGER);FBAV/(\d+)(?:\.(\d+)(?:\.(\d+)(?:\.(\d+)|)|)|)' + family_replacement: 'Facebook Messenger' + - regex: '\[FB.{0,300};(FBAV)/(\d+)(?:\.(\d+)|)(?:\.(\d+)|)' + family_replacement: 'Facebook' + - regex: '\[FB.{0,300};' + family_replacement: 'Facebook' + - regex: '(RecipeRadar)/(\d+)\.(\d+)(?:\.(\d+)|)' + - regex: '^.{0,200}?(?:\/[A-Za-z0-9\.]{0,50}|) {0,2}([A-Za-z0-9 \-_\!\[\]:]{0,50}(?:[Aa]rchiver|[Ii]ndexer|[Ss]craper|[Bb]ot|[Ss]pider|[Cc]rawl[a-z]{0,50}))[/ ](\d+)(?:\.(\d+)(?:\.(\d+)|)|)' + - regex: '^.{0,200}?((?:[A-Za-z][A-Za-z0-9 -]{0,50}|)[^C][^Uu][Bb]ot)\b(?:(?:[ /]| v)(\d+)(?:\.(\d+)|)(?:\.(\d+)|)|)' + - regex: '^.{0,200}?((?:[A-z0-9]{1,50}|[A-z\-]{1,50} ?|)(?: the |)(?:[Ss][Pp][Ii][Dd][Ee][Rr]|[Ss]crape|[Cc][Rr][Aa][Ww][Ll])[A-z0-9]{0,50})(?:(?:[ /]| v)(\d+)(?:\.(\d+)|)(?:\.(\d+)|)|)' + - regex: '(HbbTV)/(\d+)\.(\d+)\.(\d+) \(' + - regex: '(Chimera|SeaMonkey|Camino|Waterfox)/(\d+)\.(\d+)\.?([ab]?\d+[a-z]*|)' + - regex: '(SailfishBrowser)/(\d+)\.(\d+)(?:\.(\d+)|)' + family_replacement: 'Sailfish Browser' + - regex: '\[(Pinterest)/[^\]]{1,50}\]' + - regex: '(Pinterest)(?: for Android(?: Tablet|)|)/(\d+)(?:\.(\d+)|)(?:\.(\d+)|)' + - regex: 'Mozilla.{1,200}Mobile.{1,100}(Instagram).(\d+)\.(\d+)\.(\d+)' + - regex: 'Mozilla.{1,200}Mobile.{1,100}(Flipboard).(\d+)\.(\d+)\.(\d+)' + - regex: 'Mozilla.{1,200}Mobile.{1,100}(Flipboard-Briefing).(\d+)\.(\d+)\.(\d+)' + - regex: 'Mozilla.{1,200}Mobile.{1,100}(Onefootball)\/Android.(\d+)\.(\d+)\.(\d+)' + - regex: '(Snapchat)\/(\d+)\.(\d+)\.(\d+)\.(\d+)' + - regex: '(Twitter for (?:iPhone|iPad)|TwitterAndroid)(?:\/(\d+)\.(\d+)|)' + family_replacement: 'Twitter' + - regex: 'Mozilla.{1,200}Mobile.{1,100}(Phantom\/ios|Phantom\/android).(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Phantom' + - regex: 'Mozilla.{1,100}Mobile.{1,100}(AspiegelBot|PetalBot)' + family_replacement: 'Spider' + - regex: 'AspiegelBot|PetalBot' + family_replacement: 'Spider' + - regex: '(Firefox)/(\d+)\.(\d+) Basilisk/(\d+)' + family_replacement: 'Basilisk' + - regex: '(PaleMoon)/(\d+)\.(\d+)(?:\.(\d+)|)' + family_replacement: 'Pale Moon' + - regex: '(Fennec)/(\d+)\.(\d+)\.?([ab]?\d+[a-z]*)' + family_replacement: 'Firefox Mobile' + - regex: '(Fennec)/(\d+)\.(\d+)(pre)' + family_replacement: 'Firefox Mobile' + - regex: '(Fennec)/(\d+)\.(\d+)' + family_replacement: 'Firefox Mobile' + - regex: '(?:Mobile|Tablet);.{0,200}(Firefox)/(\d+)\.(\d+)' + family_replacement: 'Firefox Mobile' + - regex: '(Namoroka|Shiretoko|Minefield)/(\d+)\.(\d+)\.(\d+(?:pre|))' + family_replacement: 'Firefox ($1)' + - regex: '(Firefox)/(\d+)\.(\d+)(a\d+[a-z]*)' + family_replacement: 'Firefox Alpha' + - regex: '(Firefox)/(\d+)\.(\d+)(b\d+[a-z]*)' + family_replacement: 'Firefox Beta' + - regex: '(Firefox)-(?:\d+\.\d+|)/(\d+)\.(\d+)(a\d+[a-z]*)' + family_replacement: 'Firefox Alpha' + - regex: '(Firefox)-(?:\d+\.\d+|)/(\d+)\.(\d+)(b\d+[a-z]*)' + family_replacement: 'Firefox Beta' + - regex: '(Namoroka|Shiretoko|Minefield)/(\d+)\.(\d+)([ab]\d+[a-z]*|)' + family_replacement: 'Firefox ($1)' + - regex: '(Firefox).{0,200}Tablet browser (\d+)\.(\d+)\.(\d+)' + family_replacement: 'MicroB' + - regex: '(MozillaDeveloperPreview)/(\d+)\.(\d+)([ab]\d+[a-z]*|)' + - regex: '(FxiOS)/(\d+)\.(\d+)(\.(\d+)|)(\.(\d+)|)' + family_replacement: 'Firefox iOS' + - regex: '(Flock)/(\d+)\.(\d+)(b\d+?)' + - regex: '(RockMelt)/(\d+)\.(\d+)\.(\d+)' + - regex: '(Navigator)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Netscape' + - regex: '(Navigator)/(\d+)\.(\d+)([ab]\d+)' + family_replacement: 'Netscape' + - regex: '(Netscape6)/(\d+)\.(\d+)\.?([ab]?\d+|)' + family_replacement: 'Netscape' + - regex: '(MyIBrow)/(\d+)\.(\d+)' + family_replacement: 'My Internet Browser' + - regex: '(UC? ?Browser|UCWEB|U3)[ /]?(\d+)\.(\d+)\.(\d+)' + family_replacement: 'UC Browser' + - regex: '(Opera Tablet).{0,200}Version/(\d+)\.(\d+)(?:\.(\d+)|)' + - regex: '(Opera Mini)(?:/att|)/?(\d+|)(?:\.(\d+)|)(?:\.(\d+)|)' + - regex: '(Opera)/.{1,100}Opera Mobi.{1,100}Version/(\d+)\.(\d+)' + family_replacement: 'Opera Mobile' + - regex: '(Opera)/(\d+)\.(\d+).{1,100}Opera Mobi' + family_replacement: 'Opera Mobile' + - regex: 'Opera Mobi.{1,100}(Opera)(?:/|\s+)(\d+)\.(\d+)' + family_replacement: 'Opera Mobile' + - regex: 'Opera Mobi' + family_replacement: 'Opera Mobile' + - regex: '(Opera)/9.80.{0,200}Version/(\d+)\.(\d+)(?:\.(\d+)|)' + - regex: '(?:Mobile Safari).{1,300}(OPR)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Opera Mobile' + - regex: '(?:Chrome).{1,300}(OPR)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Opera' + - regex: '(Coast)/(\d+).(\d+).(\d+)' + family_replacement: 'Opera Coast' + - regex: '(OPiOS)/(\d+).(\d+).(\d+)' + family_replacement: 'Opera Mini' + - regex: 'Chrome/.{1,200}( MMS)/(\d+).(\d+).(\d+)' + family_replacement: 'Opera Neon' + - regex: '(hpw|web)OS/(\d+)\.(\d+)(?:\.(\d+)|)' + family_replacement: 'webOS Browser' + - regex: '(luakit)' + family_replacement: 'LuaKit' + - regex: '(Snowshoe)/(\d+)\.(\d+).(\d+)' + - regex: 'Gecko/\d+ (Lightning)/(\d+)\.(\d+)\.?((?:[ab]?\d+[a-z]*)|(?:\d*))' + - regex: '(Firefox)/(\d+)\.(\d+)\.(\d+(?:pre|)) \(Swiftfox\)' + family_replacement: 'Swiftfox' + - regex: '(Firefox)/(\d+)\.(\d+)([ab]\d+[a-z]*|) \(Swiftfox\)' + family_replacement: 'Swiftfox' + - regex: '(rekonq)/(\d+)\.(\d+)(?:\.(\d+)|) Safari' + family_replacement: 'Rekonq' + - regex: 'rekonq' + family_replacement: 'Rekonq' + - regex: '(conkeror|Conkeror)/(\d+)\.(\d+)(?:\.(\d+)|)' + family_replacement: 'Conkeror' + - regex: '(konqueror)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Konqueror' + - regex: '(WeTab)-Browser' + - regex: '(Comodo_Dragon)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Comodo Dragon' + - regex: '(Symphony) (\d+).(\d+)' + - regex: 'PLAYSTATION 3.{1,200}WebKit' + family_replacement: 'NetFront NX' + - regex: 'PLAYSTATION 3' + family_replacement: 'NetFront' + - regex: '(PlayStation Portable)' + family_replacement: 'NetFront' + - regex: '(PlayStation Vita)' + family_replacement: 'NetFront NX' + - regex: 'AppleWebKit.{1,200} (NX)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'NetFront NX' + - regex: '(Nintendo 3DS)' + family_replacement: 'NetFront NX' + - regex: '(HuaweiBrowser)/(\d+)\.(\d+)\.(\d+)\.\d+' + family_replacement: 'Huawei Browser' + - regex: '(AVG)/(\d+)\.(\d+)\.(\d+)\.\d+' + family_replacement: 'AVG' + - regex: '(AvastSecureBrowser|Avast)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Avast Secure Browser' + - regex: '(Instabridge)/(\d+)(?:\.(\d+)|)(?:\.(\d+)|)' + - regex: '(AlohaBrowser)/(\d+)\.(\d+)\.(\d+)(?:\.(\d+)|)' + family_replacement: 'Aloha Browser' + - regex: '((?:B|b)rave(?:\sChrome)?)/(\d+)(?:\.(\d+)|)(?:\.(\d+)|)(?:\.(\d+)|)' + family_replacement: 'Brave' + - regex: '(Silk)/(\d+)\.(\d+)(?:\.([0-9\-]+)|)' + family_replacement: 'Amazon Silk' + - regex: '(Puffin)/(\d+)\.(\d+)(?:\.(\d+)|)' + - regex: 'Windows Phone .{0,200}(Edge)/(\d+)\.(\d+)' + family_replacement: 'Edge Mobile' + - regex: '(EdgiOS|EdgA)/(\d+)(?:\.(\d+)|)(?:\.(\d+)|)(?:\.(\d+)|)' + family_replacement: 'Edge Mobile' + - regex: '(OculusBrowser)/(\d+)\.(\d+).0.0(?:\.([0-9\-]+)|)' + family_replacement: 'Oculus Browser' + - regex: '(SamsungBrowser)/(\d+)\.(\d+)' + family_replacement: 'Samsung Internet' + - regex: '(SznProhlizec)/(\d+)\.(\d+)(?:\.(\d+)|)' + family_replacement: 'Seznam prohlížeč' + - regex: '(coc_coc_browser)/(\d+)\.(\d+)(?:\.(\d+)|)' + family_replacement: 'Coc Coc' + - regex: '(baidubrowser)[/\s](\d+)(?:\.(\d+)|)(?:\.(\d+)|)' + family_replacement: 'Baidu Browser' + - regex: '(FlyFlow)/(\d+)\.(\d+)' + family_replacement: 'Baidu Explorer' + - regex: '(MxBrowser)/(\d+)\.(\d+)(?:\.(\d+)|)' + family_replacement: 'Maxthon' + - regex: '(Crosswalk)/(\d+)\.(\d+)\.(\d+)\.(\d+)' + - regex: '(Line)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'LINE' + - regex: '(MiuiBrowser)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'MiuiBrowser' + - regex: '(Mint Browser)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Mint Browser' + - regex: '(TopBuzz)/(\d+).(\d+).(\d+)' + family_replacement: 'TopBuzz' + - regex: 'Mozilla.{1,200}Android.{1,200}(GSA)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Google' + - regex: '(MQQBrowser/Mini)(?:(\d+)(?:\.(\d+)|)(?:\.(\d+)|)|)' + family_replacement: 'QQ Browser Mini' + - regex: '(MQQBrowser)(?:/(\d+)(?:\.(\d+)|)(?:\.(\d+)|)|)' + family_replacement: 'QQ Browser Mobile' + - regex: '(QQBrowser)(?:/(\d+)(?:\.(\d+)\.(\d+)(?:\.(\d+)|)|)|)' + family_replacement: 'QQ Browser' + - regex: 'Mozilla.{1,200}Mobile.{1,100}(DuckDuckGo)/(\d+)' + family_replacement: 'DuckDuckGo Mobile' + - regex: 'Mozilla.{1,200}(DuckDuckGo)/(\d+)' + family_replacement: 'DuckDuckGo' + - regex: 'Mozilla.{1,200}Mobile.{1,100}(Ddg)/(\d+)(?:\.(\d+)|)' + family_replacement: 'DuckDuckGo Mobile' + - regex: 'Mozilla.{1,200}(Ddg)/(\d+)(?:\.(\d+)|)' + family_replacement: 'DuckDuckGo' + - regex: '(Tenta/)(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Tenta Browser' + - regex: '(Ecosia) ios@(\d+)(?:\.(\d+)|)(?:\.(\d+)|)(?:\.(\d+)|)' + family_replacement: 'Ecosia iOS' + - regex: '(Ecosia) android@(\d+)(?:\.(\d+)|)(?:\.(\d+)|)(?:\.(\d+)|)' + family_replacement: 'Ecosia Android' + - regex: '(VivoBrowser)\/(\d+)\.(\d+)\.(\d+)\.(\d+)' + - regex: '(HiBrowser)\/v(\d+)\.(\d+)\.(\d+)\.(\d+)' + - regex: 'Version/.{1,300}(Chrome)/(\d+)\.(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Chrome Mobile WebView' + - regex: '; wv\).{1,300}(Chrome)/(\d+)\.(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Chrome Mobile WebView' + - regex: '(CrMo)/(\d+)\.(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Chrome Mobile' + - regex: '(CriOS)/(\d+)(?:\.(\d+)|)(?:\.(\d+)|)(?:\.(\d+)|)' + family_replacement: 'Chrome Mobile iOS' + - regex: '(Chrome)/(\d+)\.(\d+)\.(\d+)\.(\d+) Mobile(?:[ /]|$)' + family_replacement: 'Chrome Mobile' + - regex: ' Mobile .{1,300}(Chrome)/(\d+)\.(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Chrome Mobile' + - regex: '(chromeframe)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Chrome Frame' + - regex: '(SLP Browser)/(\d+)\.(\d+)' + family_replacement: 'Tizen Browser' + - regex: '(SE 2\.X) MetaSr (\d+)\.(\d+)' + family_replacement: 'Sogou Explorer' + - regex: '(Rackspace Monitoring)/(\d+)\.(\d+)' + family_replacement: 'RackspaceBot' + - regex: '(PRTG Network Monitor)' + - regex: '(PyAMF)/(\d+)\.(\d+)\.(\d+)' + - regex: '(YaBrowser)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Yandex Browser' + - regex: '(YaSearchBrowser)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Yandex Browser' + - regex: '(Chrome)/(\d+)\.(\d+)\.(\d+).{0,100} MRCHROME' + family_replacement: 'Mail.ru Chromium Browser' + - regex: '(AOL) (\d+)\.(\d+); AOLBuild (\d+)' + - regex: '(PodCruncher|Downcast)[ /]?(\d+)(?:\.(\d+)|)(?:\.(\d+)|)(?:\.(\d+)|)' + - regex: ' (BoxNotes)/(\d+)\.(\d+)\.(\d+)' + - regex: '(Whale)/(\d+)\.(\d+)\.(\d+)\.(\d+) Mobile(?:[ /]|$)' + family_replacement: 'Whale' + - regex: '(Whale)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Whale' + - regex: '(1Password)/(\d+)\.(\d+)\.(\d+)' + - regex: '(Ghost)/(\d+)\.(\d+)\.(\d+)' + - regex: 'PAN (GlobalProtect)/(\d+)\.(\d+)\.(\d+) .{1,100} \(X11; Linux x86_64\)' + - regex: '^(surveyon)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Surveyon' + - regex: '(Slack_SSB)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Slack Desktop Client' + - regex: '(HipChat)/?(\d+|)' + family_replacement: 'HipChat Desktop Client' + - regex: '\b(MobileIron|FireWeb|Jasmine|ANTGalio|Midori|Fresco|Lobo|PaleMoon|Maxthon|Lynx|OmniWeb|Dillo|Camino|Demeter|Fluid|Fennec|Epiphany|Shiira|Sunrise|Spotify|Flock|Netscape|Lunascape|WebPilot|NetFront|Netfront|Konqueror|SeaMonkey|Kazehakase|Vienna|Iceape|Iceweasel|IceWeasel|Iron|K-Meleon|Sleipnir|Galeon|GranParadiso|Opera Mini|iCab|NetNewsWire|ThunderBrowse|Iris|UP\.Browser|Bunjalloo|Google Earth|Raven for Mac|Openwave|MacOutlook|Electron|OktaMobile)/(\d+)\.(\d+)\.(\d+)' + - regex: 'Microsoft Office Outlook 12\.\d+\.\d+|MSOffice 12' + family_replacement: 'Outlook' + v1_replacement: '2007' + - regex: 'Microsoft Outlook 14\.\d+\.\d+|MSOffice 14' + family_replacement: 'Outlook' + v1_replacement: '2010' + - regex: 'Microsoft Outlook 15\.\d+\.\d+' + family_replacement: 'Outlook' + v1_replacement: '2013' + - regex: 'Microsoft Outlook (?:Mail )?16\.\d+\.\d+|MSOffice 16' + family_replacement: 'Outlook' + v1_replacement: '2016' + - regex: 'Microsoft Office (Word) 2014' + - regex: 'Outlook-Express\/7\.0' + family_replacement: 'Windows Live Mail' + - regex: '(Airmail) (\d+)\.(\d+)(?:\.(\d+)|)' + - regex: '(Thunderbird)/(\d+)\.(\d+)(?:\.(\d+(?:pre|))|)' + family_replacement: 'Thunderbird' + - regex: '(Postbox)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Postbox' + - regex: '(Barca(?:Pro)?)/(\d+)\.(\d+)(?:\.(\d+)|)' + family_replacement: 'Barca' + - regex: '(Lotus-Notes)/(\d+)\.(\d+)(?:\.(\d+)|)' + family_replacement: 'Lotus Notes' + - regex: 'Superhuman' + family_replacement: 'Superhuman' + - regex: '(Vivaldi)/(\d+)(?:\.(\d+)|)(?:\.(\d+)|)' + - regex: '(Edge?)/(\d+)(?:\.(\d+)|)(?:\.(\d+)|)(?:\.(\d+)|)' + family_replacement: 'Edge' + - regex: '(Chrome)/(\d+)\.(\d+)\.(\d+)[\d.]{0,100} Iron[^/]' + family_replacement: 'Iron' + - regex: '\b(Dolphin)(?: |HDCN/|/INT\-)(\d+)\.(\d+)(?:\.(\d+)|)' + - regex: '(HeadlessChrome)(?:/(\d+)\.(\d+)\.(\d+)|)' + - regex: '(Evolution)/(\d+)\.(\d+)\.(\d+\.\d+)' + - regex: '(RCM CardDAV plugin)/(\d+)\.(\d+)\.(\d+(?:-dev|))' + - regex: '(bingbot|Bolt|AdobeAIR|Jasmine|IceCat|Skyfire|Midori|Maxthon|Lynx|Arora|IBrowse|Dillo|Camino|Shiira|Fennec|Phoenix|Flock|Netscape|Lunascape|Epiphany|WebPilot|Opera Mini|Opera|NetFront|Netfront|Konqueror|Googlebot|SeaMonkey|Kazehakase|Vienna|Iceape|Iceweasel|IceWeasel|Iron|K-Meleon|Sleipnir|Galeon|GranParadiso|iCab|iTunes|MacAppStore|NetNewsWire|Space Bison|Stainless|Orca|Dolfin|BOLT|Minimo|Tizen Browser|Polaris|Abrowser|Planetweb|ICE Browser|mDolphin|qutebrowser|Otter|QupZilla|MailBar|kmail2|YahooMobileMail|ExchangeWebServices|ExchangeServicesClient|Dragon|Outlook-iOS-Android)/(\d+)\.(\d+)(?:\.(\d+)|)' + - regex: '(Chromium|Chrome)/(\d+)\.(\d+)(?:\.(\d+)|)(?:\.(\d+)|)' + - regex: '(IEMobile)[ /](\d+)\.(\d+)' + family_replacement: 'IE Mobile' + - regex: '(BacaBerita App)\/(\d+)\.(\d+)\.(\d+)' + - regex: '^(bPod|Pocket Casts|Player FM)$' + - regex: '^(AlexaMediaPlayer|VLC)/(\d+)\.(\d+)\.([^.\s]+)' + - regex: '^(AntennaPod|WMPlayer|Zune|Podkicker|Radio|ExoPlayerDemo|Overcast|PocketTunes|NSPlayer|okhttp|DoggCatcher|QuickNews|QuickTime|Peapod|Podcasts|GoldenPod|VLC|Spotify|Miro|MediaGo|Juice|iPodder|gPodder|Banshee)/(\d+)\.(\d+)(?:\.(\d+)|)(?:\.(\d+)|)' + - regex: '^(Peapod|Liferea)/([^.\s]+)\.([^.\s]+|)\.?([^.\s]+|)' + - regex: '^(bPod|Player FM) BMID/(\S+)' + - regex: '^(Podcast ?Addict)/v(\d+) ' + - regex: '^(Podcast ?Addict) ' + family_replacement: 'PodcastAddict' + - regex: '(Replay) AV' + - regex: '(VOX) Music Player' + - regex: '(CITA) RSS Aggregator/(\d+)\.(\d+)' + - regex: '(Pocket Casts)$' + - regex: '(Player FM)$' + - regex: '(LG Player|Doppler|FancyMusic|MediaMonkey|Clementine) (\d+)\.(\d+)\.?([^.\s]+|)\.?([^.\s]+|)' + - regex: '(philpodder)/(\d+)\.(\d+)\.?([^.\s]+|)\.?([^.\s]+|)' + - regex: '(Player FM|Pocket Casts|DoggCatcher|Spotify|MediaMonkey|MediaGo|BashPodder)' + - regex: '(QuickTime)\.(\d+)\.(\d+)\.(\d+)' + - regex: '(Kinoma)(\d+)' + - regex: '(Fancy) Cloud Music (\d+)\.(\d+)' + family_replacement: 'FancyMusic' + - regex: 'EspnDownloadManager' + family_replacement: 'ESPN' + - regex: '(ESPN) Radio (\d+)\.(\d+)(?:\.(\d+)|) ?(?:rv:(\d+)|) ' + - regex: '(podracer|jPodder) v ?(\d+)\.(\d+)(?:\.(\d+)|)' + - regex: '(ZDM)/(\d+)\.(\d+)[; ]?' + - regex: '(Zune|BeyondPod) (\d+)(?:\.(\d+)|)[\);]' + - regex: '(WMPlayer)/(\d+)\.(\d+)\.(\d+)\.(\d+)' + - regex: '^(Lavf)' + family_replacement: 'WMPlayer' + - regex: '^(RSSRadio)[ /]?(\d+|)' + - regex: '(RSS_Radio) (\d+)\.(\d+)' + family_replacement: 'RSSRadio' + - regex: '(Podkicker) \S+/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Podkicker' + - regex: '^(HTC) Streaming Player \S+ / \S+ / \S+ / (\d+)\.(\d+)(?:\.(\d+)|)' + - regex: '^(Stitcher)/iOS' + - regex: '^(Stitcher)/Android' + - regex: '^(VLC) .{0,200}version (\d+)\.(\d+)\.(\d+)' + - regex: ' (VLC) for' + - regex: '(vlc)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'VLC' + - regex: '^(foobar)\S{1,10}/(\d+)\.(\d+|)\.?([\da-z]+|)' + - regex: '^(Clementine)\S{1,10} (\d+)\.(\d+|)\.?(\d+|)' + - regex: '(amarok)/(\d+)\.(\d+|)\.?(\d+|)' + family_replacement: 'Amarok' + - regex: '(Custom)-Feed Reader' + - regex: '(iRider|Crazy Browser|SkipStone|iCab|Lunascape|Sleipnir|Maemo Browser) (\d+)\.(\d+)\.(\d+)' + - regex: '(iCab|Lunascape|Opera|Android|Jasmine|Polaris|Microsoft SkyDriveSync|The Bat!) (\d+)(?:\.(\d+)|)(?:\.(\d+)|)' + - regex: '(Kindle)/(\d+)\.(\d+)' + - regex: '(Android) Donut' + v1_replacement: '1' + v2_replacement: '2' + - regex: '(Android) Eclair' + v1_replacement: '2' + v2_replacement: '1' + - regex: '(Android) Froyo' + v1_replacement: '2' + v2_replacement: '2' + - regex: '(Android) Gingerbread' + v1_replacement: '2' + v2_replacement: '3' + - regex: '(Android) Honeycomb' + v1_replacement: '3' + - regex: '(MSIE) (\d+)\.(\d+).{0,100}XBLWP7' + family_replacement: 'IE Large Screen' + - regex: '(Nextcloud)' + - regex: '(mirall)/(\d+)\.(\d+)\.(\d+)' + - regex: '(ownCloud-android)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Owncloud' + - regex: '(OC)/(\d+)\.(\d+)\.(\d+)\.(\d+) \(Skype for Business\)' + family_replacement: 'Skype' + - regex: '(OpenVAS)(?:-VT)?(?:[ \/](\d+)(?:\.(\d+)|)(?:\.(\d+)|)|)' + family_replacement: 'OpenVAS Scanner' + - regex: '(AnyConnect)\/(\d+)(?:\.(\d+)(?:\.(\d+)|)|)' + - regex: 'compatible; monitis' + family_replacement: 'Monitis' + - regex: '(Obigo)InternetBrowser' + - regex: '(Obigo)\-Browser' + - regex: '(Obigo|OBIGO)[^\d]*(\d+)(?:.(\d+)|)' + family_replacement: 'Obigo' + - regex: '(MAXTHON|Maxthon) (\d+)\.(\d+)' + family_replacement: 'Maxthon' + - regex: '(Maxthon|MyIE2|Uzbl|Shiira)' + v1_replacement: '0' + - regex: '(BrowseX) \((\d+)\.(\d+)\.(\d+)' + - regex: '(NCSA_Mosaic)/(\d+)\.(\d+)' + family_replacement: 'NCSA Mosaic' + - regex: '(POLARIS)/(\d+)\.(\d+)' + family_replacement: 'Polaris' + - regex: '(Embider)/(\d+)\.(\d+)' + family_replacement: 'Polaris' + - regex: '(BonEcho)/(\d+)\.(\d+)\.?([ab]?\d+|)' + family_replacement: 'Bon Echo' + - regex: '(TopBuzz) com.alex.NewsMaster/(\d+).(\d+).(\d+)' + family_replacement: 'TopBuzz' + - regex: '(TopBuzz) com.mobilesrepublic.newsrepublic/(\d+).(\d+).(\d+)' + family_replacement: 'TopBuzz' + - regex: '(TopBuzz) com.topbuzz.videoen/(\d+).(\d+).(\d+)' + family_replacement: 'TopBuzz' + - regex: '(iPod|iPhone|iPad).{1,200}GSA/(\d+)\.(\d+)\.(\d+)(?:\.(\d+)|) Mobile' + family_replacement: 'Google' + - regex: '(iPod|iPhone|iPad).{1,200}Version/(\d+)\.(\d+)(?:\.(\d+)|).{1,200}[ +]Safari' + family_replacement: 'Mobile Safari' + - regex: '(iPod|iPod touch|iPhone|iPad);.{0,30}CPU.{0,30}OS[ +](\d+)_(\d+)(?:_(\d+)|).{0,30} AppleNews\/\d+\.\d+(?:\.\d+|)' + family_replacement: 'Mobile Safari UI/WKWebView' + - regex: '(iPod|iPhone|iPad).{1,200}Version/(\d+)\.(\d+)(?:\.(\d+)|)' + family_replacement: 'Mobile Safari UI/WKWebView' + - regex: '(iPod|iPod touch|iPhone|iPad).{0,200} Safari' + family_replacement: 'Mobile Safari' + - regex: '(iPod|iPod touch|iPhone|iPad)' + family_replacement: 'Mobile Safari UI/WKWebView' + - regex: '(Watch)(\d+),(\d+)' + family_replacement: 'Apple $1 App' + - regex: '(Outlook-iOS)/\d+\.\d+\.prod\.iphone \((\d+)\.(\d+)\.(\d+)\)' + - regex: '(AvantGo) (\d+).(\d+)' + - regex: '(OneBrowser)/(\d+).(\d+)' + family_replacement: 'ONE Browser' + - regex: '(Avant)' + v1_replacement: '1' + - regex: '(QtCarBrowser)' + v1_replacement: '1' + - regex: '^(iBrowser/Mini)(\d+).(\d+)' + family_replacement: 'iBrowser Mini' + - regex: '^(iBrowser|iRAPP)/(\d+).(\d+)' + - regex: '^(Nokia)' + family_replacement: 'Nokia Services (WAP) Browser' + - regex: '(NokiaBrowser)/(\d+)\.(\d+).(\d+)\.(\d+)' + family_replacement: 'Nokia Browser' + - regex: '(NokiaBrowser)/(\d+)\.(\d+).(\d+)' + family_replacement: 'Nokia Browser' + - regex: '(NokiaBrowser)/(\d+)\.(\d+)' + family_replacement: 'Nokia Browser' + - regex: '(BrowserNG)/(\d+)\.(\d+).(\d+)' + family_replacement: 'Nokia Browser' + - regex: '(Series60)/5\.0' + family_replacement: 'Nokia Browser' + v1_replacement: '7' + v2_replacement: '0' + - regex: '(Series60)/(\d+)\.(\d+)' + family_replacement: 'Nokia OSS Browser' + - regex: '(S40OviBrowser)/(\d+)\.(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Ovi Browser' + - regex: '(Nokia)[EN]?(\d+)' + - regex: '(PlayBook).{1,200}RIM Tablet OS (\d+)\.(\d+)\.(\d+)' + family_replacement: 'BlackBerry WebKit' + - regex: '(Black[bB]erry|BB10).{1,200}Version/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'BlackBerry WebKit' + - regex: '(Black[bB]erry)\s?(\d+)' + family_replacement: 'BlackBerry' + - regex: '(OmniWeb)/v(\d+)\.(\d+)' + - regex: '(Blazer)/(\d+)\.(\d+)' + family_replacement: 'Palm Blazer' + - regex: '(Pre)/(\d+)\.(\d+)' + family_replacement: 'Palm Pre' + - regex: '(ELinks)/(\d+)\.(\d+)' + - regex: '(ELinks) \((\d+)\.(\d+)' + - regex: '(Links) \((\d+)\.(\d+)' + - regex: '(QtWeb) Internet Browser/(\d+)\.(\d+)' + - regex: '(PhantomJS)/(\d+)\.(\d+)\.(\d+)' + - regex: '(AppleWebKit)/(\d+)(?:\.(\d+)|)\+ .{0,200} Safari' + family_replacement: 'WebKit Nightly' + - regex: '(Version)/(\d+)\.(\d+)(?:\.(\d+)|).{0,100}Safari/' + family_replacement: 'Safari' + - regex: '(Safari)/\d+' + - regex: '(OLPC)/Update(\d+)\.(\d+)' + - regex: '(OLPC)/Update()\.(\d+)' + v1_replacement: '0' + - regex: '(SEMC\-Browser)/(\d+)\.(\d+)' + - regex: '(Teleca)' + family_replacement: 'Teleca Browser' + - regex: '(Phantom)/V(\d+)\.(\d+)' + family_replacement: 'Phantom Browser' + - regex: '(Trident)/(7|8)\.(0)' + family_replacement: 'IE' + v1_replacement: '11' + - regex: '(Trident)/(6)\.(0)' + family_replacement: 'IE' + v1_replacement: '10' + - regex: '(Trident)/(5)\.(0)' + family_replacement: 'IE' + v1_replacement: '9' + - regex: '(Trident)/(4)\.(0)' + family_replacement: 'IE' + v1_replacement: '8' + - regex: '(Espial)/(\d+)(?:\.(\d+)|)(?:\.(\d+)|)' + - regex: '(AppleWebKit)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Apple Mail' + - regex: '(Firefox)/(\d+)\.(\d+)(?:\.(\d+)|$)' + - regex: '(Firefox)/(\d+)\.(\d+)(pre|[ab]\d+[a-z]*|)' + - regex: '([MS]?IE) (\d+)\.(\d+)' + family_replacement: 'IE' + - regex: '(python-requests)/(\d+)\.(\d+)' + family_replacement: 'Python Requests' + - regex: '\b(Windows-Update-Agent|WindowsPowerShell|Microsoft-CryptoAPI|SophosUpdateManager|SophosAgent|Debian APT-HTTP|Ubuntu APT-HTTP|libcurl-agent|libwww-perl|urlgrabber|curl|PycURL|Wget|wget2|aria2|Axel|OpenBSD ftp|lftp|jupdate|insomnia|fetch libfetch|akka-http|got|CloudCockpitBackend|ReactorNetty|axios|Jersey|Vert.x-WebClient|Apache-CXF|Go-CF-client|go-resty|AHC|HTTPie)(?:[ /](\d+)(?:\.(\d+)|)(?:\.(\d+)|)|)' + - regex: '^(cf)\/(\d+)\.(\d+)\.(\S+)' + family_replacement: 'CloudFoundry' + - regex: '^(sap-leonardo-iot-sdk-nodejs) \/ (\d+)\.(\d+)\.(\d+)' + - regex: '^(SAP NetWeaver Application Server) \(1.0;(\d{1})(\d{2})\)' + - regex: '^(\w+-HTTPClient)\/(\d+)\.(\d+)-(\S+)' + family_replacement: 'HTTPClient' + - regex: '^(go-cli)\s(\d+)\.(\d+).(\S+)' + - regex: '^(Java-EurekaClient|Java-EurekaClient-Replication|HTTPClient|lua-resty-http)\/v?(\d+)\.(\d+)\.?(\d*)' + - regex: '^(ping-service|sap xsuaa|Node-oauth|Site24x7|SAP CPI|JAEGER_SECURITY)' + - regex: '(Python/3\.\d{1,3} aiohttp)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Python aiohttp' + - regex: '(Java)[/ ]?\d{1}\.(\d+)\.(\d+)[_-]*([a-zA-Z0-9]+|)' + - regex: '(Java)[/ ]?(\d+)\.(\d+)\.(\d+)' + - regex: '(minio-go)/v(\d+)\.(\d+)\.(\d+)' + - regex: '^(ureq)[/ ](\d+)\.(\d+).(\d+)' + - regex: '^(http\.rb)/(\d+)\.(\d+).(\d+)' + - regex: '^(GuzzleHttp)/(\d+)\.(\d+).(\d+)' + - regex: '^(grab)\b' + - regex: '^(Cyberduck)/(\d+)\.(\d+)\.(\d+)(?:\.\d+|)' + - regex: '^(S3 Browser) (\d+)[.-](\d+)[.-](\d+)(?:\s*https?://s3browser\.com|)' + - regex: '(S3Gof3r)' + - regex: '\b(ibm-cos-sdk-(?:core|java|js|python))/(\d+)\.(\d+)(?:\.(\d+)|)' + - regex: '^(rusoto)/(\d+)\.(\d+)\.(\d+)' + - regex: '^(rclone)/v(\d+)\.(\d+)' + - regex: '^(Roku)/DVP-(\d+)\.(\d+)' + - regex: '(Kurio)\/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Kurio App' + - regex: '^(Box(?: Sync)?)/(\d+)\.(\d+)\.(\d+)' + - regex: '^(ViaFree|Viafree)-(?:tvOS-)?[A-Z]{2}/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'ViaFree' + - regex: '(Transmit)/(\d+)\.(\d+)\.(\d+)' + - regex: '(Download Master)' + - regex: '\b(HTTrack) (\d+)\.(\d+)(?:[\.\-](\d+)|)' + - regex: '(Ladybird)\/(\d+)\.(\d+)' + - regex: '(MullvadBrowser)/(\d+)(?:\.(\d+)|)(?:\.(\d+)|)' +os_parsers: + - regex: 'HbbTV/\d+\.\d+\.\d+ \( ;(LG)E ;NetCast 4.0' + os_v1_replacement: '2013' + - regex: 'HbbTV/\d+\.\d+\.\d+ \( ;(LG)E ;NetCast 3.0' + os_v1_replacement: '2012' + - regex: 'HbbTV/1.1.1 \(;;;;;\) Maple_2011' + os_replacement: 'Samsung' + os_v1_replacement: '2011' + - regex: 'HbbTV/\d+\.\d+\.\d+ \(;(Samsung);SmartTV([0-9]{4});.{0,200}FXPDEUC' + os_v2_replacement: 'UE40F7000' + - regex: 'HbbTV/\d+\.\d+\.\d+ \(;(Samsung);SmartTV([0-9]{4});.{0,200}MST12DEUC' + os_v2_replacement: 'UE32F4500' + - regex: 'HbbTV/1\.1\.1 \(; (Philips);.{0,200}NETTV/4' + os_v1_replacement: '2013' + - regex: 'HbbTV/1\.1\.1 \(; (Philips);.{0,200}NETTV/3' + os_v1_replacement: '2012' + - regex: 'HbbTV/1\.1\.1 \(; (Philips);.{0,200}NETTV/2' + os_v1_replacement: '2011' + - regex: 'HbbTV/\d+\.\d+\.\d+.{0,100}(firetv)-firefox-plugin (\d+).(\d+).(\d+)' + os_replacement: 'FireHbbTV' + - regex: 'HbbTV/\d+\.\d+\.\d+ \(.{0,30}; ?([a-zA-Z]+) ?;.{0,30}(201[1-9]).{0,30}\)' + - regex: 'AspiegelBot|PetalBot' + os_replacement: 'Other' + - regex: '(Windows Phone) (?:OS[ /])?(\d+)\.(\d+)' + - regex: '(CPU[ +]OS|iPhone[ +]OS|CPU[ +]iPhone)[ +]+(\d+)[_\.](\d+)(?:[_\.](\d+)|).{0,100}Outlook-iOS-Android' + os_replacement: 'iOS' + - regex: 'ArcGIS\.?(iOS|Android)-\d+\.\d+(?:\.\d+|)(?:[^\/]{1,50}|)\/(\d+)(?:\.(\d+)(?:\.(\d+)|)|)' + - regex: 'ArcGISRuntime-(?:Android|iOS)\/\d+\.\d+(?:\.\d+|) \((Android|iOS) (\d+)(?:\.(\d+)(?:\.(\d+)|)|);' + - regex: '(Android) (\d+)(?:\.(\d+)).*CrKey' + os_replacement: 'Chromecast Android' + - regex: 'Fuchsia.*(CrKey)(?:[/](\d+)\.(\d+)(?:\.(\d+)|)|)' + os_replacement: 'Chromecast Fuchsia' + - regex: 'Linux.*(CrKey)(?:[/](\d+)\.(\d+)(?:\.(\d+)|)|).*DeviceType/SmartSpeaker' + os_replacement: 'Chromecast SmartSpeaker' + - regex: 'Linux.*(CrKey)(?:[/](\d+)\.(\d+)(?:\.(\d+)|)|)' + os_replacement: 'Chromecast Linux' + - regex: '(Android)[ \-/](\d+)(?:\.(\d+)|)(?:[.\-]([a-z0-9]+)|)' + - regex: '(Android) Donut' + os_v1_replacement: '1' + os_v2_replacement: '2' + - regex: '(Android) Eclair' + os_v1_replacement: '2' + os_v2_replacement: '1' + - regex: '(Android) Froyo' + os_v1_replacement: '2' + os_v2_replacement: '2' + - regex: '(Android) Gingerbread' + os_v1_replacement: '2' + os_v2_replacement: '3' + - regex: '(Android) Honeycomb' + os_v1_replacement: '3' + - regex: '(Android) (\d+);' + - regex: '(Android): (\d+)(?:\.(\d+)(?:\.(\d+)|)|);' + - regex: '^UCWEB.{0,200}; (Adr) (\d+)\.(\d+)(?:[.\-]([a-z0-9]{1,100})|);' + os_replacement: 'Android' + - regex: '^UCWEB.{0,200}; (iPad|iPh|iPd) OS (\d+)_(\d+)(?:_(\d+)|);' + os_replacement: 'iOS' + - regex: '^UCWEB.{0,200}; (wds) (\d+)\.(\d+)(?:\.(\d+)|);' + os_replacement: 'Windows Phone' + - regex: '^(JUC).{0,200}; ?U; ?(?:Android|)(\d+)\.(\d+)(?:[\.\-]([a-z0-9]{1,100})|)' + os_replacement: 'Android' + - regex: '(android)\s(?:mobile\/)(\d+)(?:\.(\d+)(?:\.(\d+)|)|)' + os_replacement: 'Android' + - regex: 'Quest' + os_replacement: 'Android' + - regex: '(Silk-Accelerated=[a-z]{4,5})' + os_replacement: 'Android' + - regex: '(x86_64|aarch64)\ (\d+)\.(\d+)\.(\d+).{0,100}Chrome.{0,100}(?:CitrixChromeApp)$' + os_replacement: 'Chrome OS' + - regex: '(XBLWP7)' + os_replacement: 'Windows Phone' + - regex: '(Windows ?Mobile)' + os_replacement: 'Windows Mobile' + - regex: '(Windows 10)' + os_replacement: 'Windows' + os_v1_replacement: '10' + - regex: '(Windows (?:NT 5\.2|NT 5\.1))' + os_replacement: 'Windows' + os_v1_replacement: 'XP' + - regex: '(Win(?:dows NT |32NT\/)6\.1)' + os_replacement: 'Windows' + os_v1_replacement: '7' + - regex: '(Win(?:dows NT |32NT\/)6\.0)' + os_replacement: 'Windows' + os_v1_replacement: 'Vista' + - regex: '(Win 9x 4\.90)' + os_replacement: 'Windows' + os_v1_replacement: 'ME' + - regex: '(Windows NT 6\.2; ARM;)' + os_replacement: 'Windows' + os_v1_replacement: 'RT' + - regex: '(Win(?:dows NT |32NT\/)6\.2)' + os_replacement: 'Windows' + os_v1_replacement: '8' + - regex: '(Windows NT 6\.3; ARM;)' + os_replacement: 'Windows' + os_v1_replacement: 'RT 8' + os_v2_replacement: '1' + - regex: '(Win(?:dows NT |32NT\/)6\.3)' + os_replacement: 'Windows' + os_v1_replacement: '8' + os_v2_replacement: '1' + - regex: '(Win(?:dows NT |32NT\/)6\.4)' + os_replacement: 'Windows' + os_v1_replacement: '10' + - regex: '(Windows NT 10\.0)' + os_replacement: 'Windows' + os_v1_replacement: '10' + - regex: '(Windows NT 5\.0)' + os_replacement: 'Windows' + os_v1_replacement: '2000' + - regex: '(WinNT4.0)' + os_replacement: 'Windows' + os_v1_replacement: 'NT 4.0' + - regex: '(Windows ?CE)' + os_replacement: 'Windows' + os_v1_replacement: 'CE' + - regex: 'Win(?:dows)? ?(95|98|3.1|NT|ME|2000|XP|Vista|7|CE)' + os_replacement: 'Windows' + os_v1_replacement: '$1' + - regex: 'Win16' + os_replacement: 'Windows' + os_v1_replacement: '3.1' + - regex: 'Win32' + os_replacement: 'Windows' + os_v1_replacement: '95' + - regex: '^Box.{0,200}Windows/([\d.]+);' + os_replacement: 'Windows' + os_v1_replacement: '$1' + - regex: '(Tizen)[/ ](\d+)\.(\d+)' + - regex: 'Intel Mac OS X.+(CriOS|EdgiOS)/\d+' + os_replacement: 'iOS' + - regex: '((?:Mac[ +]?|; )OS[ +]X)[\s+/](?:(\d+)[_.](\d+)(?:[_.](\d+)|)|Mach-O)' + os_replacement: 'Mac OS X' + - regex: 'Mac OS X\s.{1,50}\s(\d+).(\d+).(\d+)' + os_replacement: 'Mac OS X' + os_v1_replacement: '$1' + os_v2_replacement: '$2' + os_v3_replacement: '$3' + - regex: ' (Dar)(win)/(9).(\d+).{0,100}\((?:i386|x86_64|Power Macintosh)\)' + os_replacement: 'Mac OS X' + os_v1_replacement: '10' + os_v2_replacement: '5' + - regex: ' (Dar)(win)/(10).(\d+).{0,100}\((?:i386|x86_64)\)' + os_replacement: 'Mac OS X' + os_v1_replacement: '10' + os_v2_replacement: '6' + - regex: ' (Dar)(win)/(11).(\d+).{0,100}\((?:i386|x86_64)\)' + os_replacement: 'Mac OS X' + os_v1_replacement: '10' + os_v2_replacement: '7' + - regex: ' (Dar)(win)/(12).(\d+).{0,100}\((?:i386|x86_64)\)' + os_replacement: 'Mac OS X' + os_v1_replacement: '10' + os_v2_replacement: '8' + - regex: ' (Dar)(win)/(13).(\d+).{0,100}\((?:i386|x86_64)\)' + os_replacement: 'Mac OS X' + os_v1_replacement: '10' + os_v2_replacement: '9' + - regex: 'Mac_PowerPC' + os_replacement: 'Mac OS' + - regex: '(?:PPC|Intel) (Mac OS X)' + - regex: '^Box.{0,200};(Darwin)/(10)\.(1\d)(?:\.(\d+)|)' + os_replacement: 'Mac OS X' + - regex: 'darwin; arm64' + os_replacement: 'Mac OS X' + - regex: '(Apple\s?TV)(?:/(\d+)\.(\d+)|)' + os_replacement: 'ATV OS X' + - regex: '(CPU[ +]OS|iPhone[ +]OS|CPU[ +]iPhone|CPU IPhone OS|CPU iPad OS)[ +]+(\d+)[_\.](\d+)(?:[_\.](\d+)|)' + os_replacement: 'iOS' + - regex: '(iPhone|iPad|iPod); Opera' + os_replacement: 'iOS' + - regex: '(iPhone|iPad|iPod).{0,100}Mac OS X.{0,100}Version/(\d+)\.(\d+)' + os_replacement: 'iOS' + - regex: '(CFNetwork)/(5)48\.0\.3.{0,100} Darwin/11\.0\.0' + os_replacement: 'iOS' + - regex: '(CFNetwork)/(5)48\.(0)\.4.{0,100} Darwin/(1)1\.0\.0' + os_replacement: 'iOS' + - regex: '(CFNetwork)/(5)48\.(1)\.4' + os_replacement: 'iOS' + - regex: '(CFNetwork)/(4)85\.1(3)\.9' + os_replacement: 'iOS' + - regex: '(CFNetwork)/(6)09\.(1)\.4' + os_replacement: 'iOS' + - regex: '(CFNetwork)/(6)(0)9' + os_replacement: 'iOS' + - regex: '(CFNetwork)/6(7)2\.(1)\.13' + os_replacement: 'iOS' + - regex: '(CFNetwork)/6(7)2\.(1)\.(1)4' + os_replacement: 'iOS' + - regex: '(CF)(Network)/6(7)(2)\.1\.15' + os_replacement: 'iOS' + os_v1_replacement: '7' + os_v2_replacement: '1' + - regex: '(CFNetwork)/6(7)2\.(0)\.(?:2|8)' + os_replacement: 'iOS' + - regex: '(CFNetwork)/709\.1' + os_replacement: 'iOS' + os_v1_replacement: '8' + os_v2_replacement: '0.b5' + - regex: '(CF)(Network)/711\.(\d)' + os_replacement: 'iOS' + os_v1_replacement: '8' + - regex: '(CF)(Network)/(720)\.(\d)' + os_replacement: 'Mac OS X' + os_v1_replacement: '10' + os_v2_replacement: '10' + - regex: '(CF)(Network)/(760)\.(\d)' + os_replacement: 'Mac OS X' + os_v1_replacement: '10' + os_v2_replacement: '11' + - regex: 'CFNetwork/7.{0,100} Darwin/15\.4\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '9' + os_v2_replacement: '3' + os_v3_replacement: '1' + - regex: 'CFNetwork/7.{0,100} Darwin/15\.5\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '9' + os_v2_replacement: '3' + os_v3_replacement: '2' + - regex: 'CFNetwork/7.{0,100} Darwin/15\.6\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '9' + os_v2_replacement: '3' + os_v3_replacement: '5' + - regex: '(CF)(Network)/758\.(\d)' + os_replacement: 'iOS' + os_v1_replacement: '9' + - regex: 'CFNetwork/808\.3 Darwin/16\.3\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '10' + os_v2_replacement: '2' + os_v3_replacement: '1' + - regex: '(CF)(Network)/808\.(\d)' + os_replacement: 'iOS' + os_v1_replacement: '10' + - regex: 'CFNetwork/.{0,100} Darwin/17\.\d+.{0,100}\(x86_64\)' + os_replacement: 'Mac OS X' + os_v1_replacement: '10' + os_v2_replacement: '13' + - regex: 'CFNetwork/.{0,100} Darwin/16\.\d+.{0,100}\(x86_64\)' + os_replacement: 'Mac OS X' + os_v1_replacement: '10' + os_v2_replacement: '12' + - regex: 'CFNetwork/8.{0,100} Darwin/15\.\d+.{0,100}\(x86_64\)' + os_replacement: 'Mac OS X' + os_v1_replacement: '10' + os_v2_replacement: '11' + - regex: 'CFNetwork/.{0,100} Darwin/(9)\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '1' + - regex: 'CFNetwork/.{0,100} Darwin/(10)\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '4' + - regex: 'CFNetwork/.{0,100} Darwin/(11)\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '5' + - regex: 'CFNetwork/.{0,100} Darwin/(13)\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '6' + - regex: 'CFNetwork/6.{0,100} Darwin/(14)\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '7' + - regex: 'CFNetwork/7.{0,100} Darwin/(14)\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '8' + os_v2_replacement: '0' + - regex: 'CFNetwork/7.{0,100} Darwin/(15)\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '9' + os_v2_replacement: '0' + - regex: 'CFNetwork/8.{0,100} Darwin/16\.5\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '10' + os_v2_replacement: '3' + - regex: 'CFNetwork/8.{0,100} Darwin/16\.6\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '10' + os_v2_replacement: '3' + os_v3_replacement: '2' + - regex: 'CFNetwork/8.{0,100} Darwin/16\.7\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '10' + os_v2_replacement: '3' + os_v3_replacement: '3' + - regex: 'CFNetwork/8.{0,100} Darwin/(16)\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '10' + - regex: 'CFNetwork/8.{0,100} Darwin/17\.0\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '11' + os_v2_replacement: '0' + - regex: 'CFNetwork/8.{0,100} Darwin/17\.2\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '11' + os_v2_replacement: '1' + - regex: 'CFNetwork/8.{0,100} Darwin/17\.3\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '11' + os_v2_replacement: '2' + - regex: 'CFNetwork/8.{0,100} Darwin/17\.4\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '11' + os_v2_replacement: '2' + os_v3_replacement: '6' + - regex: 'CFNetwork/8.{0,100} Darwin/17\.5\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '11' + os_v2_replacement: '3' + - regex: 'CFNetwork/9.{0,100} Darwin/17\.6\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '11' + os_v2_replacement: '4' + - regex: 'CFNetwork/9.{0,100} Darwin/17\.7\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '11' + os_v2_replacement: '4' + os_v3_replacement: '1' + - regex: 'CFNetwork/8.{0,100} Darwin/(17)\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '11' + - regex: 'CFNetwork/9.{0,100} Darwin/18\.0\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '12' + os_v2_replacement: '0' + - regex: 'CFNetwork/9.{0,100} Darwin/18\.2\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '12' + os_v2_replacement: '1' + - regex: 'CFNetwork/9.{0,100} Darwin/18\.5\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '12' + os_v2_replacement: '2' + - regex: 'CFNetwork/9.{0,100} Darwin/18\.6\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '12' + os_v2_replacement: '3' + - regex: 'CFNetwork/9.{0,100} Darwin/18\.7\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '12' + os_v2_replacement: '4' + - regex: 'CFNetwork/9.{0,100} Darwin/(18)\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '12' + - regex: 'CFNetwork/11.{0,100} Darwin/19\.2\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '13' + os_v2_replacement: '3' + - regex: 'CFNetwork/11.{0,100} Darwin/19\.3\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '13' + os_v2_replacement: '3' + os_v3_replacement: '1' + - regex: 'CFNetwork/11.{0,100} Darwin/19\.4\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '13' + os_v2_replacement: '4' + - regex: 'CFNetwork/11.{0,100} Darwin/19\.5\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '13' + os_v2_replacement: '5' + - regex: 'CFNetwork/11.{0,100} Darwin/19\.6\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '13' + os_v2_replacement: '6' + - regex: 'CFNetwork/1[01].{0,100} Darwin/19\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '13' + - regex: 'CFNetwork/12.{0,100} Darwin/20\.1\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '14' + os_v2_replacement: '2' + - regex: 'CFNetwork/12.{0,100} Darwin/20\.2\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '14' + os_v2_replacement: '3' + - regex: 'CFNetwork/12.{0,100} Darwin/20\.3\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '14' + os_v2_replacement: '4' + - regex: 'CFNetwork/12.{0,100} Darwin/20\.4\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '14' + os_v2_replacement: '5' + - regex: 'CFNetwork/12.{0,100} Darwin/20\.5\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '14' + os_v2_replacement: '6' + - regex: 'CFNetwork/12.{0,100} Darwin/20\.6\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '14' + os_v2_replacement: '8' + - regex: 'CFNetwork/.{0,100} Darwin/(20)\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '14' + - regex: 'CFNetwork/13.{0,100} Darwin/21\.0\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '15' + os_v2_replacement: '0' + - regex: 'CFNetwork/13.{0,100} Darwin/21\.1\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '15' + os_v2_replacement: '1' + - regex: 'CFNetwork/13.{0,100} Darwin/21\.2\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '15' + os_v2_replacement: '2' + - regex: 'CFNetwork/13.{0,100} Darwin/21\.3\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '15' + os_v2_replacement: '3' + - regex: 'CFNetwork/13.{0,100} Darwin/21\.4\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '15' + os_v2_replacement: '4' + - regex: 'CFNetwork/13.{0,100} Darwin/21\.5\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '15' + os_v2_replacement: '5' + - regex: 'CFNetwork/13.{0,100} Darwin/21\.6\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '15' + os_v2_replacement: '6' + - regex: 'CFNetwork/.{0,100} Darwin/(21)\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '15' + - regex: 'CFNetwork/.{0,100} Darwin/22\.([0-5])\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '16' + os_v2_replacement: '$1' + - regex: 'CFNetwork/.{0,100} Darwin/(22)\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '16' + - regex: 'CFNetwork/.{0,100} Darwin/23\.([0-5])\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '17' + os_v2_replacement: '$1' + - regex: 'CFNetwork/.{0,100} Darwin/(23)\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '17' + - regex: 'CFNetwork/.{0,100} Darwin/24\.([0-5])\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '18' + os_v2_replacement: '$1' + - regex: 'CFNetwork/.{0,100} Darwin/(24)\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '18' + - regex: 'CFNetwork/.{0,100} Darwin/' + os_replacement: 'iOS' + - regex: '\b(iOS[ /]|iOS; |iPhone(?:/| v|[ _]OS[/,]|; | OS : |\d,\d/|\d,\d; )|iPad/)(\d{1,2})[_\.](\d{1,2})(?:[_\.](\d+)|)' + os_replacement: 'iOS' + - regex: '\((iOS);' + - regex: '(watchOS)[/ ](\d+)\.(\d+)(?:\.(\d+)|)' + os_replacement: 'WatchOS' + - regex: 'Outlook-(iOS)/\d+\.\d+\.prod\.iphone' + - regex: '(iPod|iPhone|iPad)' + os_replacement: 'iOS' + - regex: '(tvOS)[/ ](\d+)\.(\d+)(?:\.(\d+)|)' + os_replacement: 'tvOS' + - regex: '(CrOS) [a-z0-9_]+ (\d+)\.(\d+)(?:\.(\d+)|)' + os_replacement: 'Chrome OS' + - regex: '([Dd]ebian)' + os_replacement: 'Debian' + - regex: '(Linux Mint)(?:/(\d+)|)' + - regex: '(Mandriva)(?: Linux|)/(?:[\d.-]+m[a-z]{2}(\d+).(\d)|)' + - regex: '(Symbian[Oo][Ss])[/ ](\d+)\.(\d+)' + os_replacement: 'Symbian OS' + - regex: '(Symbian/3).{1,200}NokiaBrowser/7\.3' + os_replacement: 'Symbian^3 Anna' + - regex: '(Symbian/3).{1,200}NokiaBrowser/7\.4' + os_replacement: 'Symbian^3 Belle' + - regex: '(Symbian/3)' + os_replacement: 'Symbian^3' + - regex: '\b(Series 60|SymbOS|S60Version|S60V\d|S60\b)' + os_replacement: 'Symbian OS' + - regex: '(MeeGo)' + - regex: 'Symbian [Oo][Ss]' + os_replacement: 'Symbian OS' + - regex: 'Series40;' + os_replacement: 'Nokia Series 40' + - regex: 'Series30Plus;' + os_replacement: 'Nokia Series 30 Plus' + - regex: '(BB10);.{1,200}Version/(\d+)\.(\d+)\.(\d+)' + os_replacement: 'BlackBerry OS' + - regex: '(Black[Bb]erry)[0-9a-z]+/(\d+)\.(\d+)\.(\d+)(?:\.(\d+)|)' + os_replacement: 'BlackBerry OS' + - regex: '(Black[Bb]erry).{1,200}Version/(\d+)\.(\d+)\.(\d+)(?:\.(\d+)|)' + os_replacement: 'BlackBerry OS' + - regex: '(RIM Tablet OS) (\d+)\.(\d+)\.(\d+)' + os_replacement: 'BlackBerry Tablet OS' + - regex: '(Play[Bb]ook)' + os_replacement: 'BlackBerry Tablet OS' + - regex: '(Black[Bb]erry)' + os_replacement: 'BlackBerry OS' + - regex: '(K[Aa][Ii]OS)\/(\d+)\.(\d+)(?:\.(\d+)|)' + os_replacement: 'KaiOS' + - regex: '\((?:Mobile|Tablet);.{1,200}Gecko/18.0 Firefox/\d+\.\d+' + os_replacement: 'Firefox OS' + os_v1_replacement: '1' + os_v2_replacement: '0' + os_v3_replacement: '1' + - regex: '\((?:Mobile|Tablet);.{1,200}Gecko/18.1 Firefox/\d+\.\d+' + os_replacement: 'Firefox OS' + os_v1_replacement: '1' + os_v2_replacement: '1' + - regex: '\((?:Mobile|Tablet);.{1,200}Gecko/26.0 Firefox/\d+\.\d+' + os_replacement: 'Firefox OS' + os_v1_replacement: '1' + os_v2_replacement: '2' + - regex: '\((?:Mobile|Tablet);.{1,200}Gecko/28.0 Firefox/\d+\.\d+' + os_replacement: 'Firefox OS' + os_v1_replacement: '1' + os_v2_replacement: '3' + - regex: '\((?:Mobile|Tablet);.{1,200}Gecko/30.0 Firefox/\d+\.\d+' + os_replacement: 'Firefox OS' + os_v1_replacement: '1' + os_v2_replacement: '4' + - regex: '\((?:Mobile|Tablet);.{1,200}Gecko/32.0 Firefox/\d+\.\d+' + os_replacement: 'Firefox OS' + os_v1_replacement: '2' + os_v2_replacement: '0' + - regex: '\((?:Mobile|Tablet);.{1,200}Gecko/34.0 Firefox/\d+\.\d+' + os_replacement: 'Firefox OS' + os_v1_replacement: '2' + os_v2_replacement: '1' + - regex: '\((?:Mobile|Tablet);.{1,200}Firefox/\d+\.\d+' + os_replacement: 'Firefox OS' + - regex: '(BREW)[ /](\d+)\.(\d+)\.(\d+)' + - regex: '(BREW);' + - regex: '(Brew MP|BMP)[ /](\d+)\.(\d+)\.(\d+)' + os_replacement: 'Brew MP' + - regex: 'BMP;' + os_replacement: 'Brew MP' + - regex: '(GoogleTV)(?: (\d+)\.(\d+)(?:\.(\d+)|)|/[\da-z]+)' + - regex: '(WebTV)/(\d+).(\d+)' + - regex: '(hpw|web)OS/(\d+)\.(\d+)(?:\.(\d+)|)' + os_replacement: 'webOS' + - regex: '(VRE);' + - regex: '(Fedora|Red Hat|PCLinuxOS|Puppy|Ubuntu|Kindle|Bada|Sailfish|Lubuntu|BackTrack|Slackware|(?:Free|Open|Net|\b)BSD)[/ ](\d+)\.(\d+)(?:\.(\d+)|)(?:\.(\d+)|)' + - regex: '(Linux)[ /](\d+)\.(\d+)(?:\.(\d+)|).{0,100}gentoo' + os_replacement: 'Gentoo' + - regex: '\((Bada);' + - regex: '(Windows|Android|WeTab|Maemo|Web0S)' + - regex: '(Ubuntu|Kubuntu|Arch Linux|CentOS|Slackware|Gentoo|openSUSE|SUSE|Red Hat|Fedora|PCLinuxOS|Mageia|SerenityOS|(?:Free|Open|Net|\b)BSD)' + - regex: '(Linux)(?:[ /](\d+)\.(\d+)(?:\.(\d+)|)|)' + - regex: 'SunOS' + os_replacement: 'Solaris' + - regex: '\(linux-gnu\)' + os_replacement: 'Linux' + - regex: '\(x86_64-redhat-linux-gnu\)' + os_replacement: 'Red Hat' + - regex: '\((freebsd)(\d+)\.(\d+)\)' + os_replacement: 'FreeBSD' + - regex: 'linux' + os_replacement: 'Linux' + - regex: '^(Roku)/DVP-(\d+)\.(\d+)' + os_replacement: 'Mac OS X' + os_v1_replacement: '$1' + os_v2_replacement: '$2' + os_v3_replacement: '$3' +device_parsers: + - regex: '^.{0,100}?(?:(?:iPhone|Windows CE|Windows Phone|Android).{0,300}(?:(?:Bot|Yeti)-Mobile|YRSpider|BingPreview|bots?/\d|(?:bot|spider)\.html)|AdsBot-Google-Mobile.{0,200}iPhone)' + regex_flag: 'i' + device_replacement: 'Spider' + brand_replacement: 'Spider' + model_replacement: 'Smartphone' + - regex: '^.{0,100}?(?:DoCoMo|\bMOT\b|\bLG\b|Nokia|Samsung|SonyEricsson).{0,200}(?:(?:Bot|Yeti)-Mobile|bots?/\d|(?:bot|crawler)\.html|(?:jump|google|Wukong)bot|ichiro/mobile|/spider|YahooSeeker)' + regex_flag: 'i' + device_replacement: 'Spider' + brand_replacement: 'Spider' + model_replacement: 'Feature Phone' + - regex: ' PTST/\d+(?:\.\d+|)$' + device_replacement: 'Spider' + brand_replacement: 'Spider' + - regex: 'X11; Datanyze; Linux' + device_replacement: 'Spider' + brand_replacement: 'Spider' + - regex: 'Mozilla.{1,100}Mobile.{1,100}(AspiegelBot|PetalBot)' + device_replacement: 'Spider' + brand_replacement: 'Spider' + model_replacement: 'Smartphone' + - regex: 'Mozilla.{0,200}(AspiegelBot|PetalBot)' + device_replacement: 'Spider' + brand_replacement: 'Spider' + model_replacement: 'Desktop' + - regex: '\bSmartWatch {0,2}\( {0,2}([^;]{1,200}) {0,2}; {0,2}([^;]{1,200}) {0,2};' + device_replacement: '$1 $2' + brand_replacement: '$1' + model_replacement: '$2' + - regex: 'Android Application[^\-]{1,300} - (Sony) ?(Ericsson|) (.{1,200}) \w{1,20} - ' + device_replacement: '$1 $2' + brand_replacement: '$1$2' + model_replacement: '$3' + - regex: 'Android Application[^\-]{1,300} - (?:HTC|HUAWEI|LGE|LENOVO|MEDION|TCT) (HTC|HUAWEI|LG|LENOVO|MEDION|ALCATEL)[ _\-](.{1,200}) \w{1,20} - ' + regex_flag: 'i' + device_replacement: '$1 $2' + brand_replacement: '$1' + model_replacement: '$2' + - regex: 'Android Application[^\-]{1,300} - ([^ ]+) (.{1,200}) \w{1,20} - ' + device_replacement: '$1 $2' + brand_replacement: '$1' + model_replacement: '$2' + - regex: '; {0,2}([BLRQ]C\d{4}[A-Z]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '3Q $1' + brand_replacement: '3Q' + model_replacement: '$1' + - regex: '; {0,2}(?:3Q_)([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '3Q $1' + brand_replacement: '3Q' + model_replacement: '$1' + - regex: 'Android [34].{0,200}; {0,2}(A100|A101|A110|A200|A210|A211|A500|A501|A510|A511|A700(?: Lite| 3G|)|A701|B1-A71|A1-\d{3}|B1-\d{3}|V360|V370|W500|W500P|W501|W501P|W510|W511|W700|Slider SL101|DA22[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Acer' + model_replacement: '$1' + - regex: '; {0,2}Acer Iconia Tab ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Acer' + model_replacement: '$1' + - regex: '; {0,2}(Z1[1235]0|E320[^/]{0,10}|S500|S510|Liquid[^;/]{0,30}|Iconia A\d+)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Acer' + model_replacement: '$1' + - regex: '; {0,2}(Acer |ACER )([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1$2' + brand_replacement: 'Acer' + model_replacement: '$2' + - regex: '; {0,2}(Advent |)(Vega(?:Bean|Comb|)).{0,200}?(?: Build|\) AppleWebKit)' + device_replacement: '$1$2' + brand_replacement: 'Advent' + model_replacement: '$2' + - regex: '; {0,2}(Ainol |)((?:NOVO|[Nn]ovo)[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1$2' + brand_replacement: 'Ainol' + model_replacement: '$2' + - regex: '; {0,2}AIRIS[ _\-]?([^/;\)]+) {0,2}(?:;|\)|Build)' + regex_flag: 'i' + device_replacement: '$1' + brand_replacement: 'Airis' + model_replacement: '$1' + - regex: '; {0,2}(OnePAD[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: '$1' + brand_replacement: 'Airis' + model_replacement: '$1' + - regex: '; {0,2}Airpad[ \-]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Airpad $1' + brand_replacement: 'Airpad' + model_replacement: '$1' + - regex: '; {0,2}(one ?touch) (EVO7|T10|T20)(?: Build|\) AppleWebKit)' + device_replacement: 'Alcatel One Touch $2' + brand_replacement: 'Alcatel' + model_replacement: 'One Touch $2' + - regex: '; {0,2}(?:alcatel[ _]|)(?:(?:one[ _]?touch[ _])|ot[ \-])([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: 'Alcatel One Touch $1' + brand_replacement: 'Alcatel' + model_replacement: 'One Touch $1' + - regex: '; {0,2}(TCL)[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: '$1' + model_replacement: '$2' + - regex: '; {0,2}(Vodafone Smart II|Optimus_Madrid)(?: Build|\) AppleWebKit)' + device_replacement: 'Alcatel $1' + brand_replacement: 'Alcatel' + model_replacement: '$1' + - regex: '; {0,2}BASE_Lutea_3(?: Build|\) AppleWebKit)' + device_replacement: 'Alcatel One Touch 998' + brand_replacement: 'Alcatel' + model_replacement: 'One Touch 998' + - regex: '; {0,2}BASE_Varia(?: Build|\) AppleWebKit)' + device_replacement: 'Alcatel One Touch 918D' + brand_replacement: 'Alcatel' + model_replacement: 'One Touch 918D' + - regex: '; {0,2}((?:FINE|Fine)\d[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Allfine' + model_replacement: '$1' + - regex: '; {0,2}(ALLVIEW[ _]?|Allview[ _]?)((?:Speed|SPEED).{0,200}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1$2' + brand_replacement: 'Allview' + model_replacement: '$2' + - regex: '; {0,2}(ALLVIEW[ _]?|Allview[ _]?|)(AX1_Shine|AX2_Frenzy)(?: Build|\) AppleWebKit)' + device_replacement: '$1$2' + brand_replacement: 'Allview' + model_replacement: '$2' + - regex: '; {0,2}(ALLVIEW[ _]?|Allview[ _]?)([^;/]*?)(?: Build|\) AppleWebKit)' + device_replacement: '$1$2' + brand_replacement: 'Allview' + model_replacement: '$2' + - regex: '; {0,2}(A13-MID)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Allwinner' + model_replacement: '$1' + - regex: '; {0,2}(Allwinner)[ _\-]?([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'Allwinner' + model_replacement: '$1' + - regex: '; {0,2}(A651|A701B?|A702|A703|A705|A706|A707|A711|A712|A713|A717|A722|A785|A801|A802|A803|A901|A902|A1002|A1003|A1006|A1007|A9701|A9703|Q710|Q80)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Amaway' + model_replacement: '$1' + - regex: '; {0,2}(?:AMOI|Amoi)[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Amoi $1' + brand_replacement: 'Amoi' + model_replacement: '$1' + - regex: '^(?:AMOI|Amoi)[ _]([^;/]{1,100}?) Linux' + device_replacement: 'Amoi $1' + brand_replacement: 'Amoi' + model_replacement: '$1' + - regex: '; {0,2}(MW(?:0[789]|10)[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Aoc' + model_replacement: '$1' + - regex: '; {0,2}(G7|M1013|M1015G|M11[CG]?|M-?12[B]?|M15|M19[G]?|M30[ACQ]?|M31[GQ]|M32|M33[GQ]|M36|M37|M38|M701T|M710|M712B|M713|M715G|M716G|M71(?:G|GS|T|)|M72[T]?|M73[T]?|M75[GT]?|M77G|M79T|M7L|M7LN|M81|M810|M81T|M82|M92|M92KS|M92S|M717G|M721|M722G|M723|M725G|M739|M785|M791|M92SK|M93D)(?: Build|\) AppleWebKit)' + device_replacement: 'Aoson $1' + brand_replacement: 'Aoson' + model_replacement: '$1' + - regex: '; {0,2}Aoson ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: 'Aoson $1' + brand_replacement: 'Aoson' + model_replacement: '$1' + - regex: '; {0,2}[Aa]panda[ _\-]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Apanda $1' + brand_replacement: 'Apanda' + model_replacement: '$1' + - regex: '; {0,2}(?:ARCHOS|Archos) ?(GAMEPAD.{0,200}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Archos $1' + brand_replacement: 'Archos' + model_replacement: '$1' + - regex: 'ARCHOS; GOGI; ([^;]{1,200});' + device_replacement: 'Archos $1' + brand_replacement: 'Archos' + model_replacement: '$1' + - regex: '(?:ARCHOS|Archos)[ _]?(.{0,200}?)(?: Build|[;/\(\)\-]|$)' + device_replacement: 'Archos $1' + brand_replacement: 'Archos' + model_replacement: '$1' + - regex: '; {0,2}(AN(?:7|8|9|10|13)[A-Z0-9]{1,4})(?: Build|\) AppleWebKit)' + device_replacement: 'Archos $1' + brand_replacement: 'Archos' + model_replacement: '$1' + - regex: '; {0,2}(A28|A32|A43|A70(?:BHT|CHT|HB|S|X)|A101(?:B|C|IT)|A7EB|A7EB-WK|101G9|80G9)(?: Build|\) AppleWebKit)' + device_replacement: 'Archos $1' + brand_replacement: 'Archos' + model_replacement: '$1' + - regex: '; {0,2}(PAD-FMD[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Arival' + model_replacement: '$1' + - regex: '; {0,2}(BioniQ) ?([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'Arival' + model_replacement: '$1 $2' + - regex: '; {0,2}(AN\d[^;/]{1,100}|ARCHM\d+)(?: Build|\) AppleWebKit)' + device_replacement: 'Arnova $1' + brand_replacement: 'Arnova' + model_replacement: '$1' + - regex: '; {0,2}(?:ARNOVA|Arnova) ?([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Arnova $1' + brand_replacement: 'Arnova' + model_replacement: '$1' + - regex: '; {0,2}(?:ASSISTANT |)(AP)-?([1789]\d{2}[A-Z]{0,2}|80104)(?: Build|\) AppleWebKit)' + device_replacement: 'Assistant $1-$2' + brand_replacement: 'Assistant' + model_replacement: '$1-$2' + - regex: '; {0,2}(ME17\d[^;/]*|ME3\d{2}[^;/]{1,100}|K00[A-Z]|Nexus 10|Nexus 7(?: 2013|)|PadFone[^;/]*|Transformer[^;/]*|TF\d{3}[^;/]*|eeepc)(?: Build|\) AppleWebKit)' + device_replacement: 'Asus $1' + brand_replacement: 'Asus' + model_replacement: '$1' + - regex: '; {0,2}ASUS[ _]{0,10}([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Asus $1' + brand_replacement: 'Asus' + model_replacement: '$1' + - regex: '; {0,2}Garmin-Asus ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Garmin-Asus $1' + brand_replacement: 'Garmin-Asus' + model_replacement: '$1' + - regex: '; {0,2}(Garminfone)(?: Build|\) AppleWebKit)' + device_replacement: 'Garmin $1' + brand_replacement: 'Garmin-Asus' + model_replacement: '$1' + - regex: '; (@TAB-[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Attab' + model_replacement: '$1' + - regex: '; {0,2}(T-(?:07|[^0]\d)[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Audiosonic' + model_replacement: '$1' + - regex: '; {0,2}(?:Axioo[ _\-]([^;/]{1,100}?)|(picopad)[ _\-]([^;/]{1,100}?))(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: 'Axioo $1$2 $3' + brand_replacement: 'Axioo' + model_replacement: '$1$2 $3' + - regex: '; {0,2}(V(?:100|700|800)[^;/]*)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Azend' + model_replacement: '$1' + - regex: '; {0,2}(IBAK\-[^;/]*)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: '$1' + brand_replacement: 'Bak' + model_replacement: '$1' + - regex: '; {0,2}(HY5001|HY6501|X12|X21|I5)(?: Build|\) AppleWebKit)' + device_replacement: 'Bedove $1' + brand_replacement: 'Bedove' + model_replacement: '$1' + - regex: '; {0,2}(JC-[^;/]*)(?: Build|\) AppleWebKit)' + device_replacement: 'Benss $1' + brand_replacement: 'Benss' + model_replacement: '$1' + - regex: '; {0,2}(BB) ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'Blackberry' + model_replacement: '$2' + - regex: '; {0,2}(BlackBird)[ _](I8.{0,200}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: '$1' + model_replacement: '$2' + - regex: '; {0,2}(BlackBird)[ _](.{0,200}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: '$1' + model_replacement: '$2' + - regex: '; {0,2}([0-9]+BP[EM][^;/]*|Endeavour[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Blaupunkt $1' + brand_replacement: 'Blaupunkt' + model_replacement: '$1' + - regex: '; {0,2}((?:BLU|Blu)[ _\-])([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1$2' + brand_replacement: 'Blu' + model_replacement: '$2' + - regex: '; {0,2}(?:BMOBILE )?(Blu|BLU|DASH [^;/]{1,100}|VIVO 4\.3|TANK 4\.5)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Blu' + model_replacement: '$1' + - regex: '; {0,2}(TOUCH\d[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Blusens' + model_replacement: '$1' + - regex: '; {0,2}(AX5\d+)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Bmobile' + model_replacement: '$1' + - regex: '; {0,2}([Bb]q) ([^;/]{1,100}?);?(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'bq' + model_replacement: '$2' + - regex: '; {0,2}(Maxwell [^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'bq' + model_replacement: '$1' + - regex: '; {0,2}((?:B-Tab|B-TAB) ?\d[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Braun' + model_replacement: '$1' + - regex: '; {0,2}(Broncho) ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: '$1' + model_replacement: '$2' + - regex: '; {0,2}CAPTIVA ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Captiva $1' + brand_replacement: 'Captiva' + model_replacement: '$1' + - regex: '; {0,2}(C771|CAL21|IS11CA)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Casio' + model_replacement: '$1' + - regex: '; {0,2}(?:Cat|CAT) ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Cat $1' + brand_replacement: 'Cat' + model_replacement: '$1' + - regex: '; {0,2}(?:Cat)(Nova.{0,200}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Cat $1' + brand_replacement: 'Cat' + model_replacement: '$1' + - regex: '; {0,2}(INM8002KP|ADM8000KP_[AB])(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Cat' + model_replacement: 'Tablet PHOENIX 8.1J0' + - regex: '; {0,2}(?:[Cc]elkon[ _\*]|CELKON[ _\*])([^;/\)]+) ?(?:Build|;|\))' + device_replacement: '$1' + brand_replacement: 'Celkon' + model_replacement: '$1' + - regex: 'Build/(?:[Cc]elkon)+_?([^;/_\)]+)' + device_replacement: '$1' + brand_replacement: 'Celkon' + model_replacement: '$1' + - regex: '; {0,2}(CT)-?(\d+)(?: Build|\) AppleWebKit)' + device_replacement: '$1$2' + brand_replacement: 'Celkon' + model_replacement: '$1$2' + - regex: '; {0,2}(A19|A19Q|A105|A107[^;/\)]*) ?(?:Build|;|\))' + device_replacement: '$1' + brand_replacement: 'Celkon' + model_replacement: '$1' + - regex: '; {0,2}(TPC[0-9]{4,5})(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'ChangJia' + model_replacement: '$1' + - regex: 'CrKey.*DeviceType/([^/]*)' + brand_replacement: 'Google' + device_replacement: 'Chromecast' + model_replacement: '$1' + - regex: 'Fuchsia.*CrKey' + brand_replacement: 'Google' + device_replacement: 'Chromecast' + model_replacement: 'Nest Hub' + - regex: 'Linux.*CrKey/1.36' + brand_replacement: 'Google' + device_replacement: 'Chromecast' + model_replacement: 'First Generation' + - regex: 'CrKey/' + brand_replacement: 'Google' + device_replacement: 'Chromecast' + model_replacement: 'Chromecast' + - regex: '; {0,2}(Cloudfone)[ _](Excite)([^ ][^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2 $3' + brand_replacement: 'Cloudfone' + model_replacement: '$1 $2 $3' + - regex: '; {0,2}(Excite|ICE)[ _](\d+[^;/]{0,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Cloudfone $1 $2' + brand_replacement: 'Cloudfone' + model_replacement: 'Cloudfone $1 $2' + - regex: '; {0,2}(Cloudfone|CloudPad)[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'Cloudfone' + model_replacement: '$1 $2' + - regex: '; {0,2}((?:Aquila|Clanga|Rapax)[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: '$1' + brand_replacement: 'Cmx' + model_replacement: '$1' + - regex: '; {0,2}(?:CFW-|Kyros )?(MID[0-9]{4}(?:[ABC]|SR|TV)?)(\(3G\)-4G| GB 8K| 3G| 8K| GB)? {0,2}(?:Build|[;\)])' + device_replacement: 'CobyKyros $1$2' + brand_replacement: 'CobyKyros' + model_replacement: '$1$2' + - regex: '; {0,2}([^;/]{0,50})Coolpad[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1$2' + brand_replacement: 'Coolpad' + model_replacement: '$1$2' + - regex: '; {0,2}(CUBE[ _])?([KU][0-9]+ ?GT.{0,200}?|A5300)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: '$1$2' + brand_replacement: 'Cube' + model_replacement: '$2' + - regex: '; {0,2}CUBOT ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: '$1' + brand_replacement: 'Cubot' + model_replacement: '$1' + - regex: '; {0,2}(BOBBY)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: '$1' + brand_replacement: 'Cubot' + model_replacement: '$1' + - regex: '; {0,2}(Dslide [^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Danew' + model_replacement: '$1' + - regex: '; {0,2}(XCD)[ _]?(28|35)(?: Build|\) AppleWebKit)' + device_replacement: 'Dell $1$2' + brand_replacement: 'Dell' + model_replacement: '$1$2' + - regex: '; {0,2}(001DL)(?: Build|\) AppleWebKit)' + device_replacement: 'Dell $1' + brand_replacement: 'Dell' + model_replacement: 'Streak' + - regex: '; {0,2}(?:Dell|DELL) (Streak)(?: Build|\) AppleWebKit)' + device_replacement: 'Dell $1' + brand_replacement: 'Dell' + model_replacement: 'Streak' + - regex: '; {0,2}(101DL|GS01|Streak Pro[^;/]{0,100})(?: Build|\) AppleWebKit)' + device_replacement: 'Dell $1' + brand_replacement: 'Dell' + model_replacement: 'Streak Pro' + - regex: '; {0,2}([Ss]treak ?7)(?: Build|\) AppleWebKit)' + device_replacement: 'Dell $1' + brand_replacement: 'Dell' + model_replacement: 'Streak 7' + - regex: '; {0,2}(Mini-3iX)(?: Build|\) AppleWebKit)' + device_replacement: 'Dell $1' + brand_replacement: 'Dell' + model_replacement: '$1' + - regex: '; {0,2}(?:Dell|DELL)[ _](Aero|Venue|Thunder|Mini.{0,200}?|Streak[ _]Pro)(?: Build|\) AppleWebKit)' + device_replacement: 'Dell $1' + brand_replacement: 'Dell' + model_replacement: '$1' + - regex: '; {0,2}Dell[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Dell $1' + brand_replacement: 'Dell' + model_replacement: '$1' + - regex: '; {0,2}(TA[CD]-\d+[^;/]{0,100})(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Denver' + model_replacement: '$1' + - regex: '; {0,2}(iP[789]\d{2}(?:-3G)?|IP10\d{2}(?:-8GB)?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Dex' + model_replacement: '$1' + - regex: '; {0,2}(AirTab)[ _\-]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'DNS' + model_replacement: '$1 $2' + - regex: '; {0,2}(F\-\d[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Fujitsu' + model_replacement: '$1' + - regex: '; {0,2}(HT-03A)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'HTC' + model_replacement: 'Magic' + - regex: '; {0,2}(HT\-\d[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'HTC' + model_replacement: '$1' + - regex: '; {0,2}(L\-\d[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'LG' + model_replacement: '$1' + - regex: '; {0,2}(N\-\d[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Nec' + model_replacement: '$1' + - regex: '; {0,2}(P\-\d[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Panasonic' + model_replacement: '$1' + - regex: '; {0,2}(SC\-\d[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Samsung' + model_replacement: '$1' + - regex: '; {0,2}(SH\-\d[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Sharp' + model_replacement: '$1' + - regex: '; {0,2}(SO\-\d[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'SonyEricsson' + model_replacement: '$1' + - regex: '; {0,2}(T\-0[12][^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Toshiba' + model_replacement: '$1' + - regex: '; {0,2}(DOOV)[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'DOOV' + model_replacement: '$2' + - regex: '; {0,2}(Enot|ENOT)[ -]?([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'Enot' + model_replacement: '$2' + - regex: '; {0,2}[^;/]{1,100} Build/(?:CROSS|Cross)+[ _\-]([^\)]+)' + device_replacement: 'CROSS $1' + brand_replacement: 'Evercoss' + model_replacement: 'Cross $1' + - regex: '; {0,2}(CROSS|Cross)[ _\-]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'Evercoss' + model_replacement: 'Cross $2' + - regex: '; {0,2}Explay[_ ](.{1,200}?)(?:[\)]| Build)' + device_replacement: '$1' + brand_replacement: 'Explay' + model_replacement: '$1' + - regex: '; {0,2}(IQ.{0,200}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Fly' + model_replacement: '$1' + - regex: '; {0,2}(Fly|FLY)[ _](IQ[^;]{1,100}?|F[34]\d+[^;]{0,100}?);?(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'Fly' + model_replacement: '$2' + - regex: '; {0,2}(M532|Q572|FJL21)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Fujitsu' + model_replacement: '$1' + - regex: '; {0,2}(G1)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Galapad' + model_replacement: '$1' + - regex: '; {0,2}(Geeksphone) ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: '$1' + model_replacement: '$2' + - regex: '; {0,2}(G[^F]?FIVE) ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'Gfive' + model_replacement: '$2' + - regex: '; {0,2}(Gionee)[ _\-]([^;/]{1,100}?)(?:/[^;/]{1,100}|)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: '$1 $2' + brand_replacement: 'Gionee' + model_replacement: '$2' + - regex: '; {0,2}(GN\d+[A-Z]?|INFINITY_PASSION|Ctrl_V1)(?: Build|\) AppleWebKit)' + device_replacement: 'Gionee $1' + brand_replacement: 'Gionee' + model_replacement: '$1' + - regex: '; {0,2}(E3) Build/JOP40D' + device_replacement: 'Gionee $1' + brand_replacement: 'Gionee' + model_replacement: '$1' + - regex: '\sGIONEE[-\s_](\w*)' + regex_flag: 'i' + device_replacement: 'Gionee $1' + brand_replacement: 'Gionee' + model_replacement: '$1' + - regex: '; {0,2}((?:FONE|QUANTUM|INSIGNIA) \d+[^;/]{0,100}|PLAYTAB)(?: Build|\) AppleWebKit)' + device_replacement: 'GoClever $1' + brand_replacement: 'GoClever' + model_replacement: '$1' + - regex: '; {0,2}GOCLEVER ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'GoClever $1' + brand_replacement: 'GoClever' + model_replacement: '$1' + - regex: '; {0,2}(Glass \d+)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Google' + model_replacement: '$1' + - regex: '; {0,2}([g|G]oogle)? (Pixel[ a-zA-z0-9]{1,100});(?: Build|.{0,50}\) AppleWebKit)' + device_replacement: '$2' + brand_replacement: 'Google' + model_replacement: '$2' + - regex: '; {0,2}([g|G]oogle)? (Pixel.{0,200}?)(?: Build|\) AppleWebKit)' + device_replacement: '$2' + brand_replacement: 'Google' + model_replacement: '$2' + - regex: '; {0,2}(GSmart)[ -]([^/]{1,50})(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'Gigabyte' + model_replacement: '$1 $2' + - regex: '; {0,2}(imx5[13]_[^/]{1,50})(?: Build|\) AppleWebKit)' + device_replacement: 'Freescale $1' + brand_replacement: 'Freescale' + model_replacement: '$1' + - regex: '; {0,2}Haier[ _\-]([^/]{1,50})(?: Build|\) AppleWebKit)' + device_replacement: 'Haier $1' + brand_replacement: 'Haier' + model_replacement: '$1' + - regex: '; {0,2}(PAD1016)(?: Build|\) AppleWebKit)' + device_replacement: 'Haipad $1' + brand_replacement: 'Haipad' + model_replacement: '$1' + - regex: '; {0,2}(M701|M7|M8|M9)(?: Build|\) AppleWebKit)' + device_replacement: 'Haipad $1' + brand_replacement: 'Haipad' + model_replacement: '$1' + - regex: '; {0,2}(SN\d+T[^;\)/]*)(?: Build|[;\)])' + device_replacement: 'Hannspree $1' + brand_replacement: 'Hannspree' + model_replacement: '$1' + - regex: 'Build/HCL ME Tablet ([^;\)]{1,3})[\);]' + device_replacement: 'HCLme $1' + brand_replacement: 'HCLme' + model_replacement: '$1' + - regex: '; {0,2}([^;\/]+) Build/HCL' + device_replacement: 'HCLme $1' + brand_replacement: 'HCLme' + model_replacement: '$1' + - regex: '; {0,2}(MID-?\d{4}C[EM])(?: Build|\) AppleWebKit)' + device_replacement: 'Hena $1' + brand_replacement: 'Hena' + model_replacement: '$1' + - regex: '; {0,2}(EG\d{2,}|HS-[^;/]{1,100}|MIRA[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Hisense $1' + brand_replacement: 'Hisense' + model_replacement: '$1' + - regex: '; {0,2}(andromax[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: 'Hisense $1' + brand_replacement: 'Hisense' + model_replacement: '$1' + - regex: '; {0,2}(?:AMAZE[ _](S\d+)|(S\d+)[ _]AMAZE)(?: Build|\) AppleWebKit)' + device_replacement: 'AMAZE $1$2' + brand_replacement: 'hitech' + model_replacement: 'AMAZE $1$2' + - regex: '; {0,2}(PlayBook)(?: Build|\) AppleWebKit)' + device_replacement: 'HP $1' + brand_replacement: 'HP' + model_replacement: '$1' + - regex: '; {0,2}HP ([^/]{1,50})(?: Build|\) AppleWebKit)' + device_replacement: 'HP $1' + brand_replacement: 'HP' + model_replacement: '$1' + - regex: '; {0,2}([^/]{1,30}_tenderloin)(?: Build|\) AppleWebKit)' + device_replacement: 'HP TouchPad' + brand_replacement: 'HP' + model_replacement: 'TouchPad' + - regex: '; {0,2}(HUAWEI |Huawei-|)([UY][^;/]{1,100}) Build/(?:Huawei|HUAWEI)([UY][^\);]+)\)' + device_replacement: '$1$2' + brand_replacement: 'Huawei' + model_replacement: '$2' + - regex: '; {0,2}([^;/]{1,100}) Build[/ ]Huawei(MT1-U06|[A-Z]{1,50}\d+[^\);]{1,50})\)' + device_replacement: '$1' + brand_replacement: 'Huawei' + model_replacement: '$2' + - regex: '; {0,2}(S7|M860) Build' + device_replacement: '$1' + brand_replacement: 'Huawei' + model_replacement: '$1' + - regex: '; {0,2}((?:HUAWEI|Huawei)[ \-]?)(MediaPad) Build' + device_replacement: '$1$2' + brand_replacement: 'Huawei' + model_replacement: '$2' + - regex: '; {0,2}((?:HUAWEI[ _]?|Huawei[ _]|)Ascend[ _])([^;/]{1,100}) Build' + device_replacement: '$1$2' + brand_replacement: 'Huawei' + model_replacement: '$2' + - regex: '; {0,2}((?:HUAWEI|Huawei)[ _\-]?)((?:G700-|MT-)[^;/]{1,100}) Build' + device_replacement: '$1$2' + brand_replacement: 'Huawei' + model_replacement: '$2' + - regex: '; {0,2}((?:HUAWEI|Huawei)[ _\-]?)([^;/]{1,100}) Build' + device_replacement: '$1$2' + brand_replacement: 'Huawei' + model_replacement: '$2' + - regex: '; {0,2}(MediaPad[^;]{1,200}|SpringBoard) Build/Huawei' + device_replacement: '$1' + brand_replacement: 'Huawei' + model_replacement: '$1' + - regex: '; {0,2}([^;]{1,200}) Build/(?:Huawei|HUAWEI)' + device_replacement: '$1' + brand_replacement: 'Huawei' + model_replacement: '$1' + - regex: '; {0,2}([Uu])([89]\d{3}) Build' + device_replacement: '$1$2' + brand_replacement: 'Huawei' + model_replacement: 'U$2' + - regex: '; {0,2}(?:Ideos |IDEOS )(S7) Build' + device_replacement: 'Huawei Ideos$1' + brand_replacement: 'Huawei' + model_replacement: 'Ideos$1' + - regex: '; {0,2}(?:Ideos |IDEOS )([^;/]{1,50}\s{0,5}|\s{0,5})Build' + device_replacement: 'Huawei Ideos$1' + brand_replacement: 'Huawei' + model_replacement: 'Ideos$1' + - regex: '; {0,2}(Orange Daytona|Pulse|Pulse Mini|Vodafone 858|C8500|C8600|C8650|C8660|Nexus 6P|ATH-.{1,200}?) Build[/ ]' + device_replacement: 'Huawei $1' + brand_replacement: 'Huawei' + model_replacement: '$1' + - regex: '; {0,2}((?:[A-Z]{3})\-L[A-Za0-9]{2})[\)]' + device_replacement: 'Huawei $1' + brand_replacement: 'Huawei' + model_replacement: '$1' + - regex: '; {0,2}([^;]{1,200}) Build/(HONOR|Honor)' + device_replacement: 'Huawei Honor $1' + brand_replacement: 'Huawei' + model_replacement: 'Honor $1' + - regex: '; {0,2}HTC[ _]([^;]{1,200}); Windows Phone' + device_replacement: 'HTC $1' + brand_replacement: 'HTC' + model_replacement: '$1' + - regex: '; {0,2}(?:HTC[ _/])+([^ _/]+)(?:[/\\]1\.0 | V|/| +)\d+\.\d[\d\.]*(?: {0,2}Build|\))' + device_replacement: 'HTC $1' + brand_replacement: 'HTC' + model_replacement: '$1' + - regex: '; {0,2}(?:HTC[ _/])+([^ _/]+)(?:[ _/]([^ _/]+)|)(?:[/\\]1\.0 | V|/| +)\d+\.\d[\d\.]*(?: {0,2}Build|\))' + device_replacement: 'HTC $1 $2' + brand_replacement: 'HTC' + model_replacement: '$1 $2' + - regex: '; {0,2}(?:HTC[ _/])+([^ _/]+)(?:[ _/]([^ _/]+)(?:[ _/]([^ _/]+)|)|)(?:[/\\]1\.0 | V|/| +)\d+\.\d[\d\.]*(?: {0,2}Build|\))' + device_replacement: 'HTC $1 $2 $3' + brand_replacement: 'HTC' + model_replacement: '$1 $2 $3' + - regex: '; {0,2}(?:HTC[ _/])+([^ _/]+)(?:[ _/]([^ _/]+)(?:[ _/]([^ _/]+)(?:[ _/]([^ _/]+)|)|)|)(?:[/\\]1\.0 | V|/| +)\d+\.\d[\d\.]*(?: {0,2}Build|\))' + device_replacement: 'HTC $1 $2 $3 $4' + brand_replacement: 'HTC' + model_replacement: '$1 $2 $3 $4' + - regex: '; {0,2}(?:(?:HTC|htc)(?:_blocked|)[ _/])+([^ _/;]+)(?: {0,2}Build|[;\)]| - )' + device_replacement: 'HTC $1' + brand_replacement: 'HTC' + model_replacement: '$1' + - regex: '; {0,2}(?:(?:HTC|htc)(?:_blocked|)[ _/])+([^ _/]+)(?:[ _/]([^ _/;\)]+)|)(?: {0,2}Build|[;\)]| - )' + device_replacement: 'HTC $1 $2' + brand_replacement: 'HTC' + model_replacement: '$1 $2' + - regex: '; {0,2}(?:(?:HTC|htc)(?:_blocked|)[ _/])+([^ _/]+)(?:[ _/]([^ _/]+)(?:[ _/]([^ _/;\)]+)|)|)(?: {0,2}Build|[;\)]| - )' + device_replacement: 'HTC $1 $2 $3' + brand_replacement: 'HTC' + model_replacement: '$1 $2 $3' + - regex: '; {0,2}(?:(?:HTC|htc)(?:_blocked|)[ _/])+([^ _/]+)(?:[ _/]([^ _/]+)(?:[ _/]([^ _/]+)(?:[ _/]([^ /;]+)|)|)|)(?: {0,2}Build|[;\)]| - )' + device_replacement: 'HTC $1 $2 $3 $4' + brand_replacement: 'HTC' + model_replacement: '$1 $2 $3 $4' + - regex: 'HTC Streaming Player [^\/]{0,30}/[^\/]{0,10}/ htc_([^/]{1,10}) /' + device_replacement: 'HTC $1' + brand_replacement: 'HTC' + model_replacement: '$1' + - regex: '(?:[;,] {0,2}|^)(?:htccn_chs-|)HTC[ _-]?([^;]{1,200}?)(?: {0,2}Build|clay|Android|-?Mozilla| Opera| Profile| UNTRUSTED|[;/\(\)]|$)' + regex_flag: 'i' + device_replacement: 'HTC $1' + brand_replacement: 'HTC' + model_replacement: '$1' + - regex: '; {0,2}(A6277|ADR6200|ADR6300|ADR6350|ADR6400[A-Z]*|ADR6425[A-Z]*|APX515CKT|ARIA|Desire[^_ ]*|Dream|EndeavorU|Eris|Evo|Flyer|HD2|Hero|HERO200|Hero CDMA|HTL21|Incredible|Inspire[A-Z0-9]*|Legend|Liberty|Nexus ?(?:One|HD2)|One|One S C2|One[ _]?(?:S|V|X\+?)\w*|PC36100|PG06100|PG86100|S31HT|Sensation|Wildfire)(?: Build|[/;\(\)])' + regex_flag: 'i' + device_replacement: 'HTC $1' + brand_replacement: 'HTC' + model_replacement: '$1' + - regex: '; {0,2}(ADR6200|ADR6400L|ADR6425LVW|Amaze|DesireS?|EndeavorU|Eris|EVO|Evo\d[A-Z]+|HD2|IncredibleS?|Inspire[A-Z0-9]*|Inspire[A-Z0-9]*|Sensation[A-Z0-9]*|Wildfire)[ _-](.{1,200}?)(?:[/;\)]|Build|MIUI|1\.0)' + regex_flag: 'i' + device_replacement: 'HTC $1 $2' + brand_replacement: 'HTC' + model_replacement: '$1 $2' + - regex: '; {0,2}HYUNDAI (T\d[^/]{0,10})(?: Build|\) AppleWebKit)' + device_replacement: 'Hyundai $1' + brand_replacement: 'Hyundai' + model_replacement: '$1' + - regex: '; {0,2}HYUNDAI ([^;/]{1,10}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Hyundai $1' + brand_replacement: 'Hyundai' + model_replacement: '$1' + - regex: '; {0,2}(X700|Hold X|MB-6900)(?: Build|\) AppleWebKit)' + device_replacement: 'Hyundai $1' + brand_replacement: 'Hyundai' + model_replacement: '$1' + - regex: '; {0,2}(?:iBall[ _\-]|)(Andi)[ _]?(\d[^;/]*)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: '$1 $2' + brand_replacement: 'iBall' + model_replacement: '$1 $2' + - regex: '; {0,2}(IBall)(?:[ _]([^;/]{1,100}?)|)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: '$1 $2' + brand_replacement: 'iBall' + model_replacement: '$2' + - regex: '; {0,2}(NT-\d+[^ ;/]{0,50}|Net[Tt]AB [^;/]{1,50}|Mercury [A-Z]{1,50}|iconBIT)(?: S/N:[^;/]{1,50}|)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'IconBIT' + model_replacement: '$1' + - regex: '; {0,2}(IMO)[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: '$1 $2' + brand_replacement: 'IMO' + model_replacement: '$2' + - regex: '; {0,2}i-?mobile[ _]([^/]{1,50})(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: 'i-mobile $1' + brand_replacement: 'imobile' + model_replacement: '$1' + - regex: '; {0,2}(i-(?:style|note)[^/]{0,10})(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: 'i-mobile $1' + brand_replacement: 'imobile' + model_replacement: '$1' + - regex: '; {0,2}(ImPAD) ?(\d+(?:.){0,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'Impression' + model_replacement: '$1 $2' + - regex: '; {0,2}(Infinix)[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'Infinix' + model_replacement: '$2' + - regex: '; {0,2}(Informer)[ \-]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'Informer' + model_replacement: '$2' + - regex: '; {0,2}(TAB) ?([78][12]4)(?: Build|\) AppleWebKit)' + device_replacement: 'Intenso $1' + brand_replacement: 'Intenso' + model_replacement: '$1 $2' + - regex: '; {0,2}(?:Intex[ _]|)(AQUA|Aqua)([ _\.\-])([^;/]{1,100}?) {0,2}(?:Build|;)' + device_replacement: '$1$2$3' + brand_replacement: 'Intex' + model_replacement: '$1 $3' + - regex: '; {0,2}(?:INTEX|Intex)(?:[_ ]([^\ _;/]+))(?:[_ ]([^\ _;/]+)|) {0,2}(?:Build|;)' + device_replacement: '$1 $2' + brand_replacement: 'Intex' + model_replacement: '$1 $2' + - regex: '; {0,2}([iI]Buddy)[ _]?(Connect)(?:_|\?_| |)([^;/]{0,50}) {0,2}(?:Build|;)' + device_replacement: '$1 $2 $3' + brand_replacement: 'Intex' + model_replacement: 'iBuddy $2 $3' + - regex: '; {0,2}(I-Buddy)[ _]([^;/]{1,100}?) {0,2}(?:Build|;)' + device_replacement: '$1 $2' + brand_replacement: 'Intex' + model_replacement: 'iBuddy $2' + - regex: '; {0,2}(iOCEAN) ([^/]{1,50})(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: '$1 $2' + brand_replacement: 'iOCEAN' + model_replacement: '$2' + - regex: '; {0,2}(TP\d+(?:\.\d+|)\-\d[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'ionik $1' + brand_replacement: 'ionik' + model_replacement: '$1' + - regex: '; {0,2}(M702pro)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Iru' + model_replacement: '$1' + - regex: '; {0,2}itel ([^;/]*)(?: Build|\) AppleWebKit)' + device_replacement: 'Itel $1' + brand_replacement: 'Itel' + model_replacement: '$1' + - regex: '; {0,2}(DE88Plus|MD70)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Ivio' + model_replacement: '$1' + - regex: '; {0,2}IVIO[_\-]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Ivio' + model_replacement: '$1' + - regex: '; {0,2}(TPC-\d+|JAY-TECH)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Jaytech' + model_replacement: '$1' + - regex: '; {0,2}(JY-[^;/]{1,100}|G[234]S?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Jiayu' + model_replacement: '$1' + - regex: '; {0,2}(JXD)[ _\-]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'JXD' + model_replacement: '$2' + - regex: '; {0,2}Karbonn[ _]?([^;/]{1,100}) {0,2}(?:Build|;)' + regex_flag: 'i' + device_replacement: '$1' + brand_replacement: 'Karbonn' + model_replacement: '$1' + - regex: '; {0,2}([^;]{1,200}) Build/Karbonn' + device_replacement: '$1' + brand_replacement: 'Karbonn' + model_replacement: '$1' + - regex: '; {0,2}(A11|A39|A37|A34|ST8|ST10|ST7|Smart Tab3|Smart Tab2|Titanium S\d) +Build' + device_replacement: '$1' + brand_replacement: 'Karbonn' + model_replacement: '$1' + - regex: '; {0,2}(IS01|IS03|IS05|IS\d{2}SH)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Sharp' + model_replacement: '$1' + - regex: '; {0,2}(IS04)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Regza' + model_replacement: '$1' + - regex: '; {0,2}(IS06|IS\d{2}PT)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Pantech' + model_replacement: '$1' + - regex: '; {0,2}(IS11S)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'SonyEricsson' + model_replacement: 'Xperia Acro' + - regex: '; {0,2}(IS11CA)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Casio' + model_replacement: 'GzOne $1' + - regex: '; {0,2}(IS11LG)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'LG' + model_replacement: 'Optimus X' + - regex: '; {0,2}(IS11N)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Medias' + model_replacement: '$1' + - regex: '; {0,2}(IS11PT)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Pantech' + model_replacement: 'MIRACH' + - regex: '; {0,2}(IS12F)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Fujitsu' + model_replacement: 'Arrows ES' + - regex: '; {0,2}(IS12M)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Motorola' + model_replacement: 'XT909' + - regex: '; {0,2}(IS12S)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'SonyEricsson' + model_replacement: 'Xperia Acro HD' + - regex: '; {0,2}(ISW11F)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Fujitsu' + model_replacement: 'Arrowz Z' + - regex: '; {0,2}(ISW11HT)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'HTC' + model_replacement: 'EVO' + - regex: '; {0,2}(ISW11K)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Kyocera' + model_replacement: 'DIGNO' + - regex: '; {0,2}(ISW11M)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Motorola' + model_replacement: 'Photon' + - regex: '; {0,2}(ISW11SC)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Samsung' + model_replacement: 'GALAXY S II WiMAX' + - regex: '; {0,2}(ISW12HT)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'HTC' + model_replacement: 'EVO 3D' + - regex: '; {0,2}(ISW13HT)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'HTC' + model_replacement: 'J' + - regex: '; {0,2}(ISW?[0-9]{2}[A-Z]{0,2})(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'KDDI' + model_replacement: '$1' + - regex: '; {0,2}(INFOBAR [^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'KDDI' + model_replacement: '$1' + - regex: '; {0,2}(JOYPAD|Joypad)[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'Kingcom' + model_replacement: '$1 $2' + - regex: '; {0,2}(Vox|VOX|Arc|K080)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: '$1' + brand_replacement: 'Kobo' + model_replacement: '$1' + - regex: '\b(Kobo Touch)\b' + device_replacement: '$1' + brand_replacement: 'Kobo' + model_replacement: '$1' + - regex: '; {0,2}(K-Touch)[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: '$1 $2' + brand_replacement: 'Ktouch' + model_replacement: '$2' + - regex: '; {0,2}((?:EV|KM)-S\d+[A-Z]?)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: '$1' + brand_replacement: 'KTtech' + model_replacement: '$1' + - regex: '; {0,2}(Zio|Hydro|Torque|Event|EVENT|Echo|Milano|Rise|URBANO PROGRESSO|WX04K|WX06K|WX10K|KYL21|101K|C5[12]\d{2})(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Kyocera' + model_replacement: '$1' + - regex: '; {0,2}(?:LAVA[ _]|)IRIS[ _\-]?([^/;\)]+) {0,2}(?:;|\)|Build)' + regex_flag: 'i' + device_replacement: 'Iris $1' + brand_replacement: 'Lava' + model_replacement: 'Iris $1' + - regex: '; {0,2}LAVA[ _]([^;/]{1,100}) Build' + device_replacement: '$1' + brand_replacement: 'Lava' + model_replacement: '$1' + - regex: '; {0,2}(?:(Aspire A1)|(?:LEMON|Lemon)[ _]([^;/]{1,100}))_?(?: Build|\) AppleWebKit)' + device_replacement: 'Lemon $1$2' + brand_replacement: 'Lemon' + model_replacement: '$1$2' + - regex: '; {0,2}(TAB-1012)(?: Build|\) AppleWebKit)' + device_replacement: 'Lenco $1' + brand_replacement: 'Lenco' + model_replacement: '$1' + - regex: '; Lenco ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Lenco $1' + brand_replacement: 'Lenco' + model_replacement: '$1' + - regex: '; {0,2}(A1_07|A2107A-H|S2005A-H|S1-37AH0) Build' + device_replacement: '$1' + brand_replacement: 'Lenovo' + model_replacement: '$1' + - regex: '; {0,2}(Idea[Tp]ab)[ _]([^;/]{1,100});? Build' + device_replacement: 'Lenovo $1 $2' + brand_replacement: 'Lenovo' + model_replacement: '$1 $2' + - regex: '; {0,2}(Idea(?:Tab|pad)) ?([^;/]{1,100}) Build' + device_replacement: 'Lenovo $1 $2' + brand_replacement: 'Lenovo' + model_replacement: '$1 $2' + - regex: '; {0,2}(ThinkPad) ?(Tablet) Build/' + device_replacement: 'Lenovo $1 $2' + brand_replacement: 'Lenovo' + model_replacement: '$1 $2' + - regex: '; {0,2}(?:LNV-|)(?:=?[Ll]enovo[ _\-]?|LENOVO[ _])(.{1,200}?)(?:Build|[;/\)])' + device_replacement: 'Lenovo $1' + brand_replacement: 'Lenovo' + model_replacement: '$1' + - regex: '[;,] (?:Vodafone |)(SmartTab) ?(II) ?(\d+) Build/' + device_replacement: 'Lenovo $1 $2 $3' + brand_replacement: 'Lenovo' + model_replacement: '$1 $2 $3' + - regex: '; {0,2}(?:Ideapad |)K1 Build/' + device_replacement: 'Lenovo Ideapad K1' + brand_replacement: 'Lenovo' + model_replacement: 'Ideapad K1' + - regex: '; {0,2}(3GC101|3GW10[01]|A390) Build/' + device_replacement: '$1' + brand_replacement: 'Lenovo' + model_replacement: '$1' + - regex: '\b(?:Lenovo|LENOVO)+[ _\-]?([^,;:/ ]+)' + device_replacement: 'Lenovo $1' + brand_replacement: 'Lenovo' + model_replacement: '$1' + - regex: '; {0,2}(MFC\d+)[A-Z]{2}([^;,/]*),?(?: Build|\) AppleWebKit)' + device_replacement: '$1$2' + brand_replacement: 'Lexibook' + model_replacement: '$1$2' + - regex: '; {0,2}(E[34][0-9]{2}|LS[6-8][0-9]{2}|VS[6-9][0-9]+[^;/]{1,30}|Nexus 4|Nexus 5X?|GT540f?|Optimus (?:2X|G|4X HD)|OptimusX4HD) {0,2}(?:Build|;)' + device_replacement: '$1' + brand_replacement: 'LG' + model_replacement: '$1' + - regex: '[;:] {0,2}(L-\d+[A-Z]|LGL\d+[A-Z]?)(?:/V\d+|) {0,2}(?:Build|[;\)])' + device_replacement: '$1' + brand_replacement: 'LG' + model_replacement: '$1' + - regex: '; {0,2}(LG-)([A-Z]{1,2}\d{2,}[^,;/\)\(]*?)(?:Build| V\d+|[,;/\)\(]|$)' + device_replacement: '$1$2' + brand_replacement: 'LG' + model_replacement: '$2' + - regex: '; {0,2}(LG[ \-]|LG)([^;/]{1,100})[;/]? Build' + device_replacement: '$1$2' + brand_replacement: 'LG' + model_replacement: '$2' + - regex: '^(LG)-([^;/]{1,100})/ Mozilla/.{0,200}; Android' + device_replacement: '$1 $2' + brand_replacement: 'LG' + model_replacement: '$2' + - regex: '(Web0S); Linux/(SmartTV)' + device_replacement: 'LG $1 $2' + brand_replacement: 'LG' + model_replacement: '$1 $2' + - regex: '; {0,2}((?:SMB|smb)[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Malata' + model_replacement: '$1' + - regex: '; {0,2}(?:Malata|MALATA) ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Malata' + model_replacement: '$1' + - regex: '; {0,2}(MS[45][0-9]{3}|MID0[568][NS]?|MID[1-9]|MID[78]0[1-9]|MID970[1-9]|MID100[1-9])(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Manta' + model_replacement: '$1' + - regex: '; {0,2}(M1052|M806|M9000|M9100|M9701|MID100|MID120|MID125|MID130|MID135|MID140|MID701|MID710|MID713|MID727|MID728|MID731|MID732|MID733|MID735|MID736|MID737|MID760|MID800|MID810|MID820|MID830|MID833|MID835|MID860|MID900|MID930|MID933|MID960|MID980)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Match' + model_replacement: '$1' + - regex: '; {0,2}(GenxDroid7|MSD7.{0,200}?|AX\d.{0,200}?|Tab 701|Tab 722)(?: Build|\) AppleWebKit)' + device_replacement: 'Maxx $1' + brand_replacement: 'Maxx' + model_replacement: '$1' + - regex: '; {0,2}(M-PP[^;/]{1,30}|PhonePad ?\d{2,}[^;/]{1,30}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Mediacom $1' + brand_replacement: 'Mediacom' + model_replacement: '$1' + - regex: '; {0,2}(M-MP[^;/]{1,30}|SmartPad ?\d{2,}[^;/]{1,30}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Mediacom $1' + brand_replacement: 'Mediacom' + model_replacement: '$1' + - regex: '; {0,2}(?:MD_|)LIFETAB[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: 'Medion Lifetab $1' + brand_replacement: 'Medion' + model_replacement: 'Lifetab $1' + - regex: '; {0,2}MEDION ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Medion $1' + brand_replacement: 'Medion' + model_replacement: '$1' + - regex: '; {0,2}(M030|M031|M035|M040|M065|m9)(?: Build|\) AppleWebKit)' + device_replacement: 'Meizu $1' + brand_replacement: 'Meizu' + model_replacement: '$1' + - regex: '; {0,2}(?:meizu_|MEIZU )(.{1,200}?) {0,2}(?:Build|[;\)])' + device_replacement: 'Meizu $1' + brand_replacement: 'Meizu' + model_replacement: '$1' + - regex: 'Quest 3' + device_replacement: 'Quest' + brand_replacement: 'Meta' + model_replacement: 'Quest 3' + - regex: 'Quest 2' + device_replacement: 'Quest' + brand_replacement: 'Meta' + model_replacement: 'Quest 2' + - regex: 'Quest Pro' + device_replacement: 'Quest' + brand_replacement: 'Meta' + model_replacement: 'Quest Pro' + - regex: 'Quest' + device_replacement: 'Quest' + brand_replacement: 'Meta' + model_replacement: 'Quest' + - regex: '; {0,2}(?:Micromax[ _](A111|A240)|(A111|A240)) Build' + regex_flag: 'i' + device_replacement: 'Micromax $1$2' + brand_replacement: 'Micromax' + model_replacement: '$1$2' + - regex: '; {0,2}Micromax[ _](A\d{2,3}[^;/]*) Build' + regex_flag: 'i' + device_replacement: 'Micromax $1' + brand_replacement: 'Micromax' + model_replacement: '$1' + - regex: '; {0,2}(A\d{2}|A[12]\d{2}|A90S|A110Q) Build' + regex_flag: 'i' + device_replacement: 'Micromax $1' + brand_replacement: 'Micromax' + model_replacement: '$1' + - regex: '; {0,2}Micromax[ _](P\d{3}[^;/]*) Build' + regex_flag: 'i' + device_replacement: 'Micromax $1' + brand_replacement: 'Micromax' + model_replacement: '$1' + - regex: '; {0,2}(P\d{3}|P\d{3}\(Funbook\)) Build' + regex_flag: 'i' + device_replacement: 'Micromax $1' + brand_replacement: 'Micromax' + model_replacement: '$1' + - regex: '; {0,2}(MITO)[ _\-]?([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: '$1 $2' + brand_replacement: 'Mito' + model_replacement: '$2' + - regex: '; {0,2}(Cynus)[ _](F5|T\d|.{1,200}?) {0,2}(?:Build|[;/\)])' + regex_flag: 'i' + device_replacement: '$1 $2' + brand_replacement: 'Mobistel' + model_replacement: '$1 $2' + - regex: '; {0,2}(MODECOM |)(FreeTab) ?([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: '$1$2 $3' + brand_replacement: 'Modecom' + model_replacement: '$2 $3' + - regex: '; {0,2}(MODECOM )([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: '$1 $2' + brand_replacement: 'Modecom' + model_replacement: '$2' + - regex: '; {0,2}(MZ\d{3}\+?|MZ\d{3} 4G|Xoom|XOOM[^;/]*) Build' + device_replacement: 'Motorola $1' + brand_replacement: 'Motorola' + model_replacement: '$1' + - regex: '; {0,2}(Milestone )(XT[^;/]*) Build' + device_replacement: 'Motorola $1$2' + brand_replacement: 'Motorola' + model_replacement: '$2' + - regex: '; {0,2}(Motoroi ?x|Droid X|DROIDX) Build' + regex_flag: 'i' + device_replacement: 'Motorola $1' + brand_replacement: 'Motorola' + model_replacement: 'DROID X' + - regex: '; {0,2}(Droid[^;/]*|DROID[^;/]*|Milestone[^;/]*|Photon|Triumph|Devour|Titanium) Build' + device_replacement: 'Motorola $1' + brand_replacement: 'Motorola' + model_replacement: '$1' + - regex: '; {0,2}(A555|A85[34][^;/]*|A95[356]|ME[58]\d{2}\+?|ME600|ME632|ME722|MB\d{3}\+?|MT680|MT710|MT870|MT887|MT917|WX435|WX453|WX44[25]|XT\d{3,4}[A-Z\+]*|CL[iI]Q|CL[iI]Q XT) Build' + device_replacement: '$1' + brand_replacement: 'Motorola' + model_replacement: '$1' + - regex: '; {0,2}(Motorola MOT-|Motorola[ _\-]|MOT\-?)([^;/]{1,100}) Build' + device_replacement: '$1$2' + brand_replacement: 'Motorola' + model_replacement: '$2' + - regex: '; {0,2}(Moto[_ ]?|MOT\-)([^;/]{1,100}) Build' + device_replacement: '$1$2' + brand_replacement: 'Motorola' + model_replacement: '$2' + - regex: '; {0,2}((?:MP[DQ]C|MPG\d{1,4}|MP\d{3,4}|MID(?:(?:10[234]|114|43|7[247]|8[24]|7)C|8[01]1))[^;/]*)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Mpman' + model_replacement: '$1' + - regex: '; {0,2}(?:MSI[ _]|)(Primo\d+|Enjoy[ _\-][^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: '$1' + brand_replacement: 'Msi' + model_replacement: '$1' + - regex: '; {0,2}Multilaser[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Multilaser' + model_replacement: '$1' + - regex: '; {0,2}(My)[_]?(Pad)[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1$2 $3' + brand_replacement: 'MyPhone' + model_replacement: '$1$2 $3' + - regex: '; {0,2}(My)\|?(Phone)[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1$2 $3' + brand_replacement: 'MyPhone' + model_replacement: '$3' + - regex: '; {0,2}(A\d+)[ _](Duo|)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: '$1 $2' + brand_replacement: 'MyPhone' + model_replacement: '$1 $2' + - regex: '; {0,2}(myTab[^;/]*)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Mytab' + model_replacement: '$1' + - regex: '; {0,2}(NABI2?-)([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1$2' + brand_replacement: 'Nabi' + model_replacement: '$2' + - regex: '; {0,2}(N-\d+[CDE])(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Nec' + model_replacement: '$1' + - regex: '; ?(NEC-)(.{0,200}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1$2' + brand_replacement: 'Nec' + model_replacement: '$2' + - regex: '; {0,2}(LT-NA7)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Nec' + model_replacement: 'Lifetouch Note' + - regex: '; {0,2}(NXM\d+[A-Za-z0-9_]{0,50}|Next\d[A-Za-z0-9_ \-]{0,50}|NEXT\d[A-Za-z0-9_ \-]{0,50}|Nextbook [A-Za-z0-9_ ]{0,50}|DATAM803HC|M805)(?: Build|[\);])' + device_replacement: '$1' + brand_replacement: 'Nextbook' + model_replacement: '$1' + - regex: '; {0,2}(Nokia)([ _\-]{0,5})([^;/]{0,50}) Build' + regex_flag: 'i' + device_replacement: '$1$2$3' + brand_replacement: 'Nokia' + model_replacement: '$3' + - regex: '; {0,2}(TA\-\d{4})(?: Build|\) AppleWebKit)' + device_replacement: 'Nokia $1' + brand_replacement: 'Nokia' + model_replacement: '$1' + - regex: '; {0,2}(Nook ?|Barnes & Noble Nook |BN )([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1$2' + brand_replacement: 'Nook' + model_replacement: '$2' + - regex: '; {0,2}(NOOK |)(BNRV200|BNRV200A|BNTV250|BNTV250A|BNTV400|BNTV600|LogicPD Zoom2)(?: Build|\) AppleWebKit)' + device_replacement: '$1$2' + brand_replacement: 'Nook' + model_replacement: '$2' + - regex: '; Build/(Nook)' + device_replacement: '$1' + brand_replacement: 'Nook' + model_replacement: 'Tablet' + - regex: '; {0,2}(OP110|OliPad[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Olivetti $1' + brand_replacement: 'Olivetti' + model_replacement: '$1' + - regex: '; {0,2}OMEGA[ _\-](MID[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Omega $1' + brand_replacement: 'Omega' + model_replacement: '$1' + - regex: '^(MID7500|MID\d+) Mozilla/5\.0 \(iPad;' + device_replacement: 'Omega $1' + brand_replacement: 'Omega' + model_replacement: '$1' + - regex: '; {0,2}((?:CIUS|cius)[^;/]*)(?: Build|\) AppleWebKit)' + device_replacement: 'Openpeak $1' + brand_replacement: 'Openpeak' + model_replacement: '$1' + - regex: '; {0,2}(Find ?(?:5|7a)|R8[012]\d{1,2}|T703\d?|U70\d{1,2}T?|X90\d{1,2}|[AFR]\d{1,2}[a-z]{1,2})(?: Build|\) AppleWebKit)' + device_replacement: 'Oppo $1' + brand_replacement: 'Oppo' + model_replacement: '$1' + - regex: '; {0,2}OPPO ?([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Oppo $1' + brand_replacement: 'Oppo' + model_replacement: '$1' + - regex: '; {0,2}(CPH\d{1,4}|RMX\d{1,4}|P[A-Z]{3}\d{2})(?: Build|\) AppleWebKit)' + device_replacement: 'Oppo $1' + brand_replacement: 'Oppo' + - regex: '; {0,2}(A1601)(?: Build|\) AppleWebKit)' + device_replacement: 'Oppo F1s' + brand_replacement: 'Oppo' + model_replacement: '$1' + - regex: '; {0,2}(?:Odys\-|ODYS\-|ODYS )([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Odys $1' + brand_replacement: 'Odys' + model_replacement: '$1' + - regex: '; {0,2}(SELECT) ?(7)(?: Build|\) AppleWebKit)' + device_replacement: 'Odys $1 $2' + brand_replacement: 'Odys' + model_replacement: '$1 $2' + - regex: '; {0,2}(PEDI)_(PLUS)_(W)(?: Build|\) AppleWebKit)' + device_replacement: 'Odys $1 $2 $3' + brand_replacement: 'Odys' + model_replacement: '$1 $2 $3' + - regex: '; {0,2}(AEON|BRAVIO|FUSION|FUSION2IN1|Genio|EOS10|IEOS[^;/]*|IRON|Loox|LOOX|LOOX Plus|Motion|NOON|NOON_PRO|NEXT|OPOS|PEDI[^;/]*|PRIME[^;/]*|STUDYTAB|TABLO|Tablet-PC-4|UNO_X8|XELIO[^;/]*|Xelio ?\d+ ?[Pp]ro|XENO10|XPRESS PRO)(?: Build|\) AppleWebKit)' + device_replacement: 'Odys $1' + brand_replacement: 'Odys' + model_replacement: '$1' + - regex: '; (ONE [a-zA-Z]\d+)(?: Build|\) AppleWebKit)' + device_replacement: 'OnePlus $1' + brand_replacement: 'OnePlus' + model_replacement: '$1' + - regex: '; (ONEPLUS [a-zA-Z]\d+)(?: Build|\) AppleWebKit)' + device_replacement: 'OnePlus $1' + brand_replacement: 'OnePlus' + model_replacement: '$1' + - regex: '; {0,2}(HD1903|GM1917|IN2025|LE2115|LE2127|HD1907|BE2012|BE2025|BE2026|BE2028|BE2029|DE2117|DE2118|EB2101|GM1900|GM1910|GM1915|HD1905|HD1925|IN2015|IN2017|IN2019|KB2005|KB2007|LE2117|LE2125|BE2015|GM1903|HD1900|HD1901|HD1910|HD1913|IN2010|IN2013|IN2020|LE2111|LE2120|LE2121|LE2123|BE2011|IN2023|KB2003|LE2113|NE2215|DN2101)(?: Build|\) AppleWebKit)' + device_replacement: 'OnePlus $1' + brand_replacement: 'OnePlus' + model_replacement: 'OnePlus $1' + - regex: '; (OnePlus[ a-zA-z0-9]{0,50});((?: Build|.{0,50}\) AppleWebKit))' + device_replacement: '$1' + brand_replacement: 'OnePlus' + model_replacement: '$1' + - regex: '; (OnePlus[ a-zA-z0-9]{0,50})((?: Build|\) AppleWebKit))' + device_replacement: '$1' + brand_replacement: 'OnePlus' + model_replacement: '$1' + - regex: '; {0,2}(TP-\d+)(?: Build|\) AppleWebKit)' + device_replacement: 'Orion $1' + brand_replacement: 'Orion' + model_replacement: '$1' + - regex: '; {0,2}(G100W?)(?: Build|\) AppleWebKit)' + device_replacement: 'PackardBell $1' + brand_replacement: 'PackardBell' + model_replacement: '$1' + - regex: '; {0,2}(Panasonic)[_ ]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: '$1' + model_replacement: '$2' + - regex: '; {0,2}(FZ-A1B|JT-B1)(?: Build|\) AppleWebKit)' + device_replacement: 'Panasonic $1' + brand_replacement: 'Panasonic' + model_replacement: '$1' + - regex: '; {0,2}(dL1|DL1)(?: Build|\) AppleWebKit)' + device_replacement: 'Panasonic $1' + brand_replacement: 'Panasonic' + model_replacement: '$1' + - regex: '; {0,2}(SKY[ _]|)(IM\-[AT]\d{3}[^;/]{1,100}).{0,30} Build/' + device_replacement: 'Pantech $1$2' + brand_replacement: 'Pantech' + model_replacement: '$1$2' + - regex: '; {0,2}((?:ADR8995|ADR910L|ADR930L|ADR930VW|PTL21|P8000)(?: 4G|)) Build/' + device_replacement: '$1' + brand_replacement: 'Pantech' + model_replacement: '$1' + - regex: '; {0,2}Pantech([^;/]{1,30}).{0,200}? Build/' + device_replacement: 'Pantech $1' + brand_replacement: 'Pantech' + model_replacement: '$1' + - regex: '; {0,2}(papyre)[ _\-]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: '$1 $2' + brand_replacement: 'Papyre' + model_replacement: '$2' + - regex: '; {0,2}(?:Touchlet )?(X10\.[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Pearl $1' + brand_replacement: 'Pearl' + model_replacement: '$1' + - regex: '; PHICOMM (i800)(?: Build|\) AppleWebKit)' + device_replacement: 'Phicomm $1' + brand_replacement: 'Phicomm' + model_replacement: '$1' + - regex: '; PHICOMM ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Phicomm $1' + brand_replacement: 'Phicomm' + model_replacement: '$1' + - regex: '; {0,2}(FWS\d{3}[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Phicomm $1' + brand_replacement: 'Phicomm' + model_replacement: '$1' + - regex: '; {0,2}(D633|D822|D833|T539|T939|V726|W335|W336|W337|W3568|W536|W5510|W626|W632|W6350|W6360|W6500|W732|W736|W737|W7376|W820|W832|W8355|W8500|W8510|W930)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Philips' + model_replacement: '$1' + - regex: '; {0,2}(?:Philips|PHILIPS)[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Philips $1' + brand_replacement: 'Philips' + model_replacement: '$1' + - regex: 'Android 4\..{0,200}; {0,2}(M[12356789]|U[12368]|S[123])\ ?(pro)?(?: Build|\) AppleWebKit)' + device_replacement: 'Pipo $1$2' + brand_replacement: 'Pipo' + model_replacement: '$1$2' + - regex: '; {0,2}(MOMO[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Ployer' + model_replacement: '$1' + - regex: '; {0,2}(?:Polaroid[ _]|)((?:MIDC\d{3,}|PMID\d{2,}|PTAB\d{3,})[^;/]{0,30}?)(\/[^;/]{0,30}|)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Polaroid' + model_replacement: '$1' + - regex: '; {0,2}(?:Polaroid )(Tablet)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Polaroid' + model_replacement: '$1' + - regex: '; {0,2}(POMP)[ _\-](.{1,200}?) {0,2}(?:Build|[;/\)])' + device_replacement: '$1 $2' + brand_replacement: 'Pomp' + model_replacement: '$2' + - regex: '; {0,2}(TB07STA|TB10STA|TB07FTA|TB10FTA)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Positivo' + model_replacement: '$1' + - regex: '; {0,2}(?:Positivo |)((?:YPY|Ypy)[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Positivo' + model_replacement: '$1' + - regex: '; {0,2}(MOB-[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'POV' + model_replacement: '$1' + - regex: '; {0,2}POV[ _\-]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'POV $1' + brand_replacement: 'POV' + model_replacement: '$1' + - regex: '; {0,2}((?:TAB-PLAYTAB|TAB-PROTAB|PROTAB|PlayTabPro|Mobii[ _\-]|TAB-P)[^;/]*)(?: Build|\) AppleWebKit)' + device_replacement: 'POV $1' + brand_replacement: 'POV' + model_replacement: '$1' + - regex: '; {0,2}(?:Prestigio |)((?:PAP|PMP)\d[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Prestigio $1' + brand_replacement: 'Prestigio' + model_replacement: '$1' + - regex: '; {0,2}(PLT[0-9]{4}.{0,200}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Proscan' + model_replacement: '$1' + - regex: '; {0,2}(A2|A5|A8|A900)_?(Classic|)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'Qmobile' + model_replacement: '$1 $2' + - regex: '; {0,2}(Q[Mm]obile)_([^_]+)_([^_]+?)(?: Build|\) AppleWebKit)' + device_replacement: 'Qmobile $2 $3' + brand_replacement: 'Qmobile' + model_replacement: '$2 $3' + - regex: '; {0,2}(Q\-?[Mm]obile)[_ ](A[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Qmobile $2' + brand_replacement: 'Qmobile' + model_replacement: '$2' + - regex: '; {0,2}(Q\-Smart)[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'Qmobilevn' + model_replacement: '$2' + - regex: '; {0,2}(Q\-?[Mm]obile)[ _\-](S[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'Qmobilevn' + model_replacement: '$2' + - regex: '; {0,2}(TA1013)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Quanta' + model_replacement: '$1' + - regex: '; (RCT\w+)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'RCA' + model_replacement: '$1' + - regex: '; RCA (\w+)(?: Build|\) AppleWebKit)' + device_replacement: 'RCA $1' + brand_replacement: 'RCA' + model_replacement: '$1' + - regex: '; {0,2}(RK\d+),?(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Rockchip' + model_replacement: '$1' + - regex: ' Build/(RK\d+)' + device_replacement: '$1' + brand_replacement: 'Rockchip' + model_replacement: '$1' + - regex: '; {0,2}(SAMSUNG |Samsung |)((?:Galaxy (?:Note II|S\d)|GT-I9082|GT-I9205|GT-N7\d{3}|SM-N9005)[^;/]{0,100})\/?[^;/]{0,50} Build/' + device_replacement: 'Samsung $1$2' + brand_replacement: 'Samsung' + model_replacement: '$2' + - regex: '; {0,2}(Google |)(Nexus [Ss](?: 4G|)) Build/' + device_replacement: 'Samsung $1$2' + brand_replacement: 'Samsung' + model_replacement: '$2' + - regex: '; {0,2}(SAMSUNG |Samsung )([^\/]{0,50})\/[^ ]{0,50} Build/' + device_replacement: 'Samsung $2' + brand_replacement: 'Samsung' + model_replacement: '$2' + - regex: '; {0,2}(Galaxy(?: Ace| Nexus| S ?II+|Nexus S| with MCR 1.2| Mini Plus 4G|)) Build/' + device_replacement: 'Samsung $1' + brand_replacement: 'Samsung' + model_replacement: '$1' + - regex: '; {0,2}(SAMSUNG[ _\-]|)(?:SAMSUNG[ _\-])([^;/]{1,100}) Build' + device_replacement: 'Samsung $2' + brand_replacement: 'Samsung' + model_replacement: '$2' + - regex: '; {0,2}(SAMSUNG-|)(GT\-[BINPS]\d{4}[^\/]{0,50})(\/[^ ]{0,50}) Build' + device_replacement: 'Samsung $1$2$3' + brand_replacement: 'Samsung' + model_replacement: '$2' + - regex: '(?:; {0,2}|^)((?:GT\-[BIiNPS]\d{4}|I9\d{2}0[A-Za-z\+]?\b)[^;/\)]*?)(?:Build|Linux|MIUI|[;/\)])' + device_replacement: 'Samsung $1' + brand_replacement: 'Samsung' + model_replacement: '$1' + - regex: '; (SAMSUNG-)([A-Za-z0-9\-]{0,50}).{0,200} Build/' + device_replacement: 'Samsung $1$2' + brand_replacement: 'Samsung' + model_replacement: '$2' + - regex: '; {0,2}((?:SCH|SGH|SHV|SHW|SPH|SC|SM)\-[A-Za-z0-9 ]{1,50})(/?[^ ]*|) Build' + device_replacement: 'Samsung $1' + brand_replacement: 'Samsung' + model_replacement: '$1' + - regex: '; {0,2}((?:SC)\-[A-Za-z0-9 ]{1,50})(/?[^ ]*|)\)' + device_replacement: 'Samsung $1' + brand_replacement: 'Samsung' + model_replacement: '$1' + - regex: ' ((?:SCH)\-[A-Za-z0-9 ]{1,50})(/?[^ ]*|) Build' + device_replacement: 'Samsung $1' + brand_replacement: 'Samsung' + model_replacement: '$1' + - regex: '; {0,2}(Behold ?(?:2|II)|YP\-G[^;/]{1,100}|EK-GC100|SCL21|I9300) Build' + device_replacement: 'Samsung $1' + brand_replacement: 'Samsung' + model_replacement: '$1' + - regex: '; {0,2}((?:SCH|SGH|SHV|SHW|SPH|SC|SM)\-[A-Za-z0-9]{5,6})[\)]' + device_replacement: 'Samsung $1' + brand_replacement: 'Samsung' + model_replacement: '$1' + - regex: '; {0,2}(SH\-?\d\d[^;/]{1,100}|SBM\d[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Sharp' + model_replacement: '$1' + - regex: '; {0,2}(SHARP[ -])([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1$2' + brand_replacement: 'Sharp' + model_replacement: '$2' + - regex: '; {0,2}(SPX[_\-]\d[^;/]*)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Simvalley' + model_replacement: '$1' + - regex: '; {0,2}(SX7\-PEARL\.GmbH)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Simvalley' + model_replacement: '$1' + - regex: '; {0,2}(SP[T]?\-\d{2}[^;/]*)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Simvalley' + model_replacement: '$1' + - regex: '; {0,2}(SK\-.{0,200}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'SKtelesys' + model_replacement: '$1' + - regex: '; {0,2}(?:SKYTEX|SX)-([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Skytex' + model_replacement: '$1' + - regex: '; {0,2}(IMAGINE [^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Skytex' + model_replacement: '$1' + - regex: '; {0,2}(SmartQ) ?([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: '$1' + model_replacement: '$2' + - regex: '; {0,2}(WF7C|WF10C|SBT[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Smartbitt' + model_replacement: '$1' + - regex: '; {0,2}(SBM(?:003SH|005SH|006SH|007SH|102SH)) Build' + device_replacement: '$1' + brand_replacement: 'Sharp' + model_replacement: '$1' + - regex: '; {0,2}(003P|101P|101P11C|102P) Build' + device_replacement: '$1' + brand_replacement: 'Panasonic' + model_replacement: '$1' + - regex: '; {0,2}(00\dZ) Build/' + device_replacement: '$1' + brand_replacement: 'ZTE' + model_replacement: '$1' + - regex: '; HTC(X06HT) Build' + device_replacement: '$1' + brand_replacement: 'HTC' + model_replacement: '$1' + - regex: '; {0,2}(001HT|X06HT) Build' + device_replacement: '$1' + brand_replacement: 'HTC' + model_replacement: '$1' + - regex: '; {0,2}(201M) Build' + device_replacement: '$1' + brand_replacement: 'Motorola' + model_replacement: 'XT902' + - regex: '; {0,2}(ST\d{4}.{0,200})Build/ST' + device_replacement: 'Trekstor $1' + brand_replacement: 'Trekstor' + model_replacement: '$1' + - regex: '; {0,2}(ST\d{4}.{0,200}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Trekstor $1' + brand_replacement: 'Trekstor' + model_replacement: '$1' + - regex: '; {0,2}(Sony ?Ericsson ?)([^;/]{1,100}) Build' + device_replacement: '$1$2' + brand_replacement: 'SonyEricsson' + model_replacement: '$2' + - regex: '; {0,2}((?:SK|ST|E|X|LT|MK|MT|WT)\d{2}[a-z0-9]*(?:-o|)|R800i|U20i) Build' + device_replacement: '$1' + brand_replacement: 'SonyEricsson' + model_replacement: '$1' + - regex: '; {0,2}(Xperia (?:A8|Arc|Acro|Active|Live with Walkman|Mini|Neo|Play|Pro|Ray|X\d+)[^;/]{0,50}) Build' + regex_flag: 'i' + device_replacement: '$1' + brand_replacement: 'SonyEricsson' + model_replacement: '$1' + - regex: '; Sony (Tablet[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Sony $1' + brand_replacement: 'Sony' + model_replacement: '$1' + - regex: '; Sony ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Sony $1' + brand_replacement: 'Sony' + model_replacement: '$1' + - regex: '; {0,2}(Sony)([A-Za-z0-9\-]+)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: '$1' + model_replacement: '$2' + - regex: '; {0,2}(Xperia [^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Sony' + model_replacement: '$1' + - regex: '; {0,2}(C(?:1[0-9]|2[0-9]|53|55|6[0-9])[0-9]{2}|D[25]\d{3}|D6[56]\d{2})(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Sony' + model_replacement: '$1' + - regex: '; {0,2}(SGP\d{3}|SGPT\d{2})(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Sony' + model_replacement: '$1' + - regex: '; {0,2}(NW-Z1000Series)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Sony' + model_replacement: '$1' + - regex: 'PLAYSTATION 3' + device_replacement: 'PlayStation 3' + brand_replacement: 'Sony' + model_replacement: 'PlayStation 3' + - regex: '(PlayStation (?:Portable|Vita|\d+))' + device_replacement: '$1' + brand_replacement: 'Sony' + model_replacement: '$1' + - regex: '; {0,2}((?:CSL_Spice|Spice|SPICE|CSL)[ _\-]?|)([Mm][Ii])([ _\-]|)(\d{3}[^;/]*)(?: Build|\) AppleWebKit)' + device_replacement: '$1$2$3$4' + brand_replacement: 'Spice' + model_replacement: 'Mi$4' + - regex: '; {0,2}(Sprint )(.{1,200}?) {0,2}(?:Build|[;/])' + device_replacement: '$1$2' + brand_replacement: 'Sprint' + model_replacement: '$2' + - regex: '\b(Sprint)[: ]([^;,/ ]+)' + device_replacement: '$1$2' + brand_replacement: 'Sprint' + model_replacement: '$2' + - regex: '; {0,2}(TAGI[ ]?)(MID) ?([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1$2$3' + brand_replacement: 'Tagi' + model_replacement: '$2$3' + - regex: '; {0,2}(Oyster500|Opal 800)(?: Build|\) AppleWebKit)' + device_replacement: 'Tecmobile $1' + brand_replacement: 'Tecmobile' + model_replacement: '$1' + - regex: '; {0,2}(TECNO[ _])([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1$2' + brand_replacement: 'Tecno' + model_replacement: '$2' + - regex: '; {0,2}Android for (Telechips|Techvision) ([^ ]+) ' + regex_flag: 'i' + device_replacement: '$1 $2' + brand_replacement: '$1' + model_replacement: '$2' + - regex: '; {0,2}(T-Hub2)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Telstra' + model_replacement: '$1' + - regex: '; {0,2}(PAD) ?(100[12])(?: Build|\) AppleWebKit)' + device_replacement: 'Terra $1$2' + brand_replacement: 'Terra' + model_replacement: '$1$2' + - regex: '; {0,2}(T[BM]-\d{3}[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Texet' + model_replacement: '$1' + - regex: '; {0,2}(tolino [^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Thalia' + model_replacement: '$1' + - regex: '; {0,2}Build/.{0,200} (TOLINO_BROWSER)' + device_replacement: '$1' + brand_replacement: 'Thalia' + model_replacement: 'Tolino Shine' + - regex: '; {0,2}(?:CJ[ -])?(ThL|THL)[ -]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'Thl' + model_replacement: '$2' + - regex: '; {0,2}(T100|T200|T5|W100|W200|W8s)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Thl' + model_replacement: '$1' + - regex: '; {0,2}(T-Mobile[ _]G2[ _]Touch) Build' + device_replacement: '$1' + brand_replacement: 'HTC' + model_replacement: 'Hero' + - regex: '; {0,2}(T-Mobile[ _]G2) Build' + device_replacement: '$1' + brand_replacement: 'HTC' + model_replacement: 'Desire Z' + - regex: '; {0,2}(T-Mobile myTouch Q) Build' + device_replacement: '$1' + brand_replacement: 'Huawei' + model_replacement: 'U8730' + - regex: '; {0,2}(T-Mobile myTouch) Build' + device_replacement: '$1' + brand_replacement: 'Huawei' + model_replacement: 'U8680' + - regex: '; {0,2}(T-Mobile_Espresso) Build' + device_replacement: '$1' + brand_replacement: 'HTC' + model_replacement: 'Espresso' + - regex: '; {0,2}(T-Mobile G1) Build' + device_replacement: '$1' + brand_replacement: 'HTC' + model_replacement: 'Dream' + - regex: '\b(T-Mobile ?|)(myTouch)[ _]?([34]G)[ _]?([^\/]*) (?:Mozilla|Build)' + device_replacement: '$1$2 $3 $4' + brand_replacement: 'HTC' + model_replacement: '$2 $3 $4' + - regex: '\b(T-Mobile)_([^_]+)_(.{0,200}) Build' + device_replacement: '$1 $2 $3' + brand_replacement: 'Tmobile' + model_replacement: '$2 $3' + - regex: '\b(T-Mobile)[_ ]?(.{0,200}?)Build' + device_replacement: '$1 $2' + brand_replacement: 'Tmobile' + model_replacement: '$2' + - regex: ' (ATP[0-9]{4})(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Tomtec' + model_replacement: '$1' + - regex: ' ?(TOOKY)[ _\-]([^;/]{1,100}) ?(?:Build|;)' + regex_flag: 'i' + device_replacement: '$1 $2' + brand_replacement: 'Tooky' + model_replacement: '$2' + - regex: '\b(TOSHIBA_AC_AND_AZ|TOSHIBA_FOLIO_AND_A|FOLIO_AND_A)' + device_replacement: '$1' + brand_replacement: 'Toshiba' + model_replacement: 'Folio 100' + - regex: '; {0,2}([Ff]olio ?100)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Toshiba' + model_replacement: 'Folio 100' + - regex: '; {0,2}(AT[0-9]{2,3}(?:\-A|LE\-A|PE\-A|SE|a|)|AT7-A|AT1S0|Hikari-iFrame/WDPF-[^;/]{1,100}|THRiVE|Thrive)(?: Build|\) AppleWebKit)' + device_replacement: 'Toshiba $1' + brand_replacement: 'Toshiba' + model_replacement: '$1' + - regex: '; {0,2}(TM-MID\d+[^;/]{1,50}|TOUCHMATE|MID-750)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Touchmate' + model_replacement: '$1' + - regex: '; {0,2}(TM-SM\d+[^;/]{1,50}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Touchmate' + model_replacement: '$1' + - regex: '; {0,2}(A10 [Bb]asic2?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Treq' + model_replacement: '$1' + - regex: '; {0,2}(TREQ[ _\-])([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: '$1$2' + brand_replacement: 'Treq' + model_replacement: '$2' + - regex: '; {0,2}(X-?5|X-?3)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Umeox' + model_replacement: '$1' + - regex: '; {0,2}(A502\+?|A936|A603|X1|X2)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Umeox' + model_replacement: '$1' + - regex: '; thor Build/' + device_replacement: 'Thor' + brand_replacement: 'Vernee' + model_replacement: 'Thor' + - regex: '; Thor (E)? Build/' + device_replacement: 'Thor $1' + brand_replacement: 'Vernee' + model_replacement: 'Thor' + - regex: '; Apollo Lite Build/' + device_replacement: 'Apollo Lite' + brand_replacement: 'Vernee' + model_replacement: 'Apollo' + - regex: '(TOUCH(?:TAB|PAD).{1,200}?)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: 'Versus $1' + brand_replacement: 'Versus' + model_replacement: '$1' + - regex: '(VERTU) ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'Vertu' + model_replacement: '$2' + - regex: '; {0,2}(Videocon)[ _\-]([^;/]{1,100}?) {0,2}(?:Build|;)' + device_replacement: '$1 $2' + brand_replacement: 'Videocon' + model_replacement: '$2' + - regex: ' (VT\d{2}[A-Za-z]*)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Videocon' + model_replacement: '$1' + - regex: '; {0,2}((?:ViewPad|ViewPhone|VSD)[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Viewsonic' + model_replacement: '$1' + - regex: '; {0,2}(ViewSonic-)([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1$2' + brand_replacement: 'Viewsonic' + model_replacement: '$2' + - regex: '; {0,2}(GTablet.{0,200}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Viewsonic' + model_replacement: '$1' + - regex: '; {0,2}([Vv]ivo)[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'vivo' + model_replacement: '$2' + - regex: '(Vodafone) (.{0,200}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: '$1' + model_replacement: '$2' + - regex: '; {0,2}(?:Walton[ _\-]|)(Primo[ _\-][^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: 'Walton $1' + brand_replacement: 'Walton' + model_replacement: '$1' + - regex: '; {0,2}(?:WIKO[ \-]|)(CINK\+?|BARRY|BLOOM|DARKFULL|DARKMOON|DARKNIGHT|DARKSIDE|FIZZ|HIGHWAY|IGGY|OZZY|RAINBOW|STAIRWAY|SUBLIM|WAX|CINK [^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: 'Wiko $1' + brand_replacement: 'Wiko' + model_replacement: '$1' + - regex: '; {0,2}WellcoM-([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Wellcom $1' + brand_replacement: 'Wellcom' + model_replacement: '$1' + - regex: '(?:(WeTab)-Browser|; (wetab) Build)' + device_replacement: '$1' + brand_replacement: 'WeTab' + model_replacement: 'WeTab' + - regex: '; {0,2}(AT-AS[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Wolfgang $1' + brand_replacement: 'Wolfgang' + model_replacement: '$1' + - regex: '; {0,2}(?:Woxter|Wxt) ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Woxter $1' + brand_replacement: 'Woxter' + model_replacement: '$1' + - regex: '; {0,2}(?:Xenta |Luna |)(TAB[234][0-9]{2}|TAB0[78]-\d{3}|TAB0?9-\d{3}|TAB1[03]-\d{3}|SMP\d{2}-\d{3})(?: Build|\) AppleWebKit)' + device_replacement: 'Yarvik $1' + brand_replacement: 'Yarvik' + model_replacement: '$1' + - regex: '; {0,2}([A-Z]{2,4})(M\d{3,}[A-Z]{2})([^;\)\/]*)(?: Build|[;\)])' + device_replacement: 'Yifang $1$2$3' + brand_replacement: 'Yifang' + model_replacement: '$2' + - regex: '; {0,2}((Mi|MI|HM|MI-ONE|Redmi)[ -](NOTE |Note |)[^;/]*) (Build|MIUI)/' + device_replacement: 'XiaoMi $1' + brand_replacement: 'XiaoMi' + model_replacement: '$1' + - regex: '; {0,2}((Mi|MI|HM|MI-ONE|Redmi)[ -](NOTE |Note |)[^;/\)]*)' + device_replacement: 'XiaoMi $1' + brand_replacement: 'XiaoMi' + model_replacement: '$1' + - regex: '; {0,2}(MIX) (Build|MIUI)/' + device_replacement: 'XiaoMi $1' + brand_replacement: 'XiaoMi' + model_replacement: '$1' + - regex: '; {0,2}((MIX) ([^;/]*)) (Build|MIUI)/' + device_replacement: 'XiaoMi $1' + brand_replacement: 'XiaoMi' + model_replacement: '$1' + - regex: '; {0,2}XOLO[ _]([^;/]{0,30}tab.{0,30})(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: 'Xolo $1' + brand_replacement: 'Xolo' + model_replacement: '$1' + - regex: '; {0,2}XOLO[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: 'Xolo $1' + brand_replacement: 'Xolo' + model_replacement: '$1' + - regex: '; {0,2}(q\d0{2,3}[a-z]?)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: 'Xolo $1' + brand_replacement: 'Xolo' + model_replacement: '$1' + - regex: '; {0,2}(PAD ?[79]\d+[^;/]{0,50}|TelePAD\d+[^;/])(?: Build|\) AppleWebKit)' + device_replacement: 'Xoro $1' + brand_replacement: 'Xoro' + model_replacement: '$1' + - regex: '; {0,2}(?:(?:ZOPO|Zopo)[ _]([^;/]{1,100}?)|(ZP ?(?:\d{2}[^;/]{1,100}|C2))|(C[2379]))(?: Build|\) AppleWebKit)' + device_replacement: '$1$2$3' + brand_replacement: 'Zopo' + model_replacement: '$1$2$3' + - regex: '; {0,2}(ZiiLABS) (Zii[^;/]*)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'ZiiLabs' + model_replacement: '$2' + - regex: '; {0,2}(Zii)_([^;/]*)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'ZiiLabs' + model_replacement: '$2' + - regex: '; {0,2}(ARIZONA|(?:ATLAS|Atlas) W|D930|Grand (?:[SX][^;]{0,200}?|Era|Memo[^;]{0,200}?)|JOE|(?:Kis|KIS)\b[^;]{0,200}?|Libra|Light [^;]{0,200}?|N8[056][01]|N850L|N8000|N9[15]\d{2}|N9810|NX501|Optik|(?:Vip )Racer[^;]{0,200}?|RacerII|RACERII|San Francisco[^;]{0,200}?|V9[AC]|V55|V881|Z[679][0-9]{2}[A-z]?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'ZTE' + model_replacement: '$1' + - regex: '; {0,2}([A-Z]\d+)_USA_[^;]{0,200}(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'ZTE' + model_replacement: '$1' + - regex: '; {0,2}(SmartTab\d+)[^;]{0,50}(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'ZTE' + model_replacement: '$1' + - regex: '; {0,2}(?:Blade|BLADE|ZTE-BLADE)([^;/]*)(?: Build|\) AppleWebKit)' + device_replacement: 'ZTE Blade$1' + brand_replacement: 'ZTE' + model_replacement: 'Blade$1' + - regex: '; {0,2}(?:Skate|SKATE|ZTE-SKATE)([^;/]*)(?: Build|\) AppleWebKit)' + device_replacement: 'ZTE Skate$1' + brand_replacement: 'ZTE' + model_replacement: 'Skate$1' + - regex: '; {0,2}(Orange |Optimus )(Monte Carlo|San Francisco)(?: Build|\) AppleWebKit)' + device_replacement: '$1$2' + brand_replacement: 'ZTE' + model_replacement: '$1$2' + - regex: '; {0,2}(?:ZXY-ZTE_|ZTE\-U |ZTE[\- _]|ZTE-C[_ ])([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'ZTE $1' + brand_replacement: 'ZTE' + model_replacement: '$1' + - regex: '; (BASE) (lutea|Lutea 2|Tab[^;]{0,200}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'ZTE' + model_replacement: '$1 $2' + - regex: '; (Avea inTouch 2|soft stone|tmn smart a7|Movistar[ _]Link)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: '$1' + brand_replacement: 'ZTE' + model_replacement: '$1' + - regex: '; {0,2}(vp9plus)\)' + device_replacement: '$1' + brand_replacement: 'ZTE' + model_replacement: '$1' + - regex: '; ?(Cloud[ _]Z5|z1000|Z99 2G|z99|z930|z999|z990|z909|Z919|z900)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Zync' + model_replacement: '$1' + - regex: '; ?(KFOT|Kindle Fire) Build\b' + device_replacement: 'Kindle Fire' + brand_replacement: 'Amazon' + model_replacement: 'Kindle Fire' + - regex: '; ?(KFOTE|Amazon Kindle Fire2) Build\b' + device_replacement: 'Kindle Fire 2' + brand_replacement: 'Amazon' + model_replacement: 'Kindle Fire 2' + - regex: '; ?(KFTT) Build\b' + device_replacement: 'Kindle Fire HD' + brand_replacement: 'Amazon' + model_replacement: 'Kindle Fire HD 7"' + - regex: '; ?(KFJWI) Build\b' + device_replacement: 'Kindle Fire HD 8.9" WiFi' + brand_replacement: 'Amazon' + model_replacement: 'Kindle Fire HD 8.9" WiFi' + - regex: '; ?(KFJWA) Build\b' + device_replacement: 'Kindle Fire HD 8.9" 4G' + brand_replacement: 'Amazon' + model_replacement: 'Kindle Fire HD 8.9" 4G' + - regex: '; ?(KFSOWI) Build\b' + device_replacement: 'Kindle Fire HD 7" WiFi' + brand_replacement: 'Amazon' + model_replacement: 'Kindle Fire HD 7" WiFi' + - regex: '; ?(KFTHWI) Build\b' + device_replacement: 'Kindle Fire HDX 7" WiFi' + brand_replacement: 'Amazon' + model_replacement: 'Kindle Fire HDX 7" WiFi' + - regex: '; ?(KFTHWA) Build\b' + device_replacement: 'Kindle Fire HDX 7" 4G' + brand_replacement: 'Amazon' + model_replacement: 'Kindle Fire HDX 7" 4G' + - regex: '; ?(KFAPWI) Build\b' + device_replacement: 'Kindle Fire HDX 8.9" WiFi' + brand_replacement: 'Amazon' + model_replacement: 'Kindle Fire HDX 8.9" WiFi' + - regex: '; ?(KFAPWA) Build\b' + device_replacement: 'Kindle Fire HDX 8.9" 4G' + brand_replacement: 'Amazon' + model_replacement: 'Kindle Fire HDX 8.9" 4G' + - regex: '; ?Amazon ([^;/]{1,100}) Build\b' + device_replacement: '$1' + brand_replacement: 'Amazon' + model_replacement: '$1' + - regex: '; ?(Kindle) Build\b' + device_replacement: 'Kindle' + brand_replacement: 'Amazon' + model_replacement: 'Kindle' + - regex: '; ?(Silk)/(\d+)\.(\d+)(?:\.([0-9\-]+)|) Build\b' + device_replacement: 'Kindle Fire' + brand_replacement: 'Amazon' + model_replacement: 'Kindle Fire$2' + - regex: ' (Kindle)/(\d+\.\d+)' + device_replacement: 'Kindle' + brand_replacement: 'Amazon' + model_replacement: '$1 $2' + - regex: ' (Silk|Kindle)/(\d+)\.' + device_replacement: 'Kindle' + brand_replacement: 'Amazon' + model_replacement: 'Kindle' + - regex: '(sprd)\-([^/]{1,50})/' + device_replacement: '$1 $2' + brand_replacement: '$1' + model_replacement: '$2' + - regex: '; {0,2}(H\d{2}00\+?) Build' + device_replacement: '$1' + brand_replacement: 'Hero' + model_replacement: '$1' + - regex: '; {0,2}(iphone|iPhone5) Build/' + device_replacement: 'Xianghe $1' + brand_replacement: 'Xianghe' + model_replacement: '$1' + - regex: '; {0,2}(e\d{4}[a-z]?_?v\d+|v89_[^;/]{1,100})[^;/]{1,30} Build/' + device_replacement: 'Xianghe $1' + brand_replacement: 'Xianghe' + model_replacement: '$1' + - regex: '\bUSCC[_\-]?([^ ;/\)]+)' + device_replacement: '$1' + brand_replacement: 'Cellular' + model_replacement: '$1' + - regex: 'Windows Phone [^;]{1,30}; .{0,100}?IEMobile/[^;\)]+[;\)] ?(?:ARM; ?Touch; ?|Touch; ?|)(?:ALCATEL)[^;]{0,200}; {0,2}([^;,\)]+)' + device_replacement: 'Alcatel $1' + brand_replacement: 'Alcatel' + model_replacement: '$1' + - regex: 'Windows Phone [^;]{1,30}; .{0,100}?IEMobile/[^;\)]+[;\)] ?(?:ARM; ?Touch; ?|Touch; ?|WpsLondonTest; ?|)(?:ASUS|Asus)[^;]{0,200}; {0,2}([^;,\)]+)' + device_replacement: 'Asus $1' + brand_replacement: 'Asus' + model_replacement: '$1' + - regex: 'Windows Phone [^;]{1,30}; .{0,100}?IEMobile/[^;\)]+[;\)] ?(?:ARM; ?Touch; ?|Touch; ?|)(?:DELL|Dell)[^;]{0,200}; {0,2}([^;,\)]+)' + device_replacement: 'Dell $1' + brand_replacement: 'Dell' + model_replacement: '$1' + - regex: 'Windows Phone [^;]{1,30}; .{0,100}?IEMobile/[^;\)]+[;\)] ?(?:ARM; ?Touch; ?|Touch; ?|WpsLondonTest; ?|)(?:HTC|Htc|HTC_blocked[^;]{0,200})[^;]{0,200}; {0,2}(?:HTC|)([^;,\)]+)' + device_replacement: 'HTC $1' + brand_replacement: 'HTC' + model_replacement: '$1' + - regex: 'Windows Phone [^;]{1,30}; .{0,100}?IEMobile/[^;\)]+[;\)] ?(?:ARM; ?Touch; ?|Touch; ?|)(?:HUAWEI)[^;]{0,200}; {0,2}(?:HUAWEI |)([^;,\)]+)' + device_replacement: 'Huawei $1' + brand_replacement: 'Huawei' + model_replacement: '$1' + - regex: 'Windows Phone [^;]{1,30}; .{0,100}?IEMobile/[^;\)]+[;\)] ?(?:ARM; ?Touch; ?|Touch; ?|)(?:LG|Lg)[^;]{0,200}; {0,2}(?:LG[ \-]|)([^;,\)]+)' + device_replacement: 'LG $1' + brand_replacement: 'LG' + model_replacement: '$1' + - regex: 'Windows Phone [^;]{1,30}; .{0,100}?IEMobile/[^;\)]+[;\)] ?(?:ARM; ?Touch; ?|Touch; ?|)(?:rv:11; |)(?:NOKIA|Nokia)[^;]{0,200}; {0,2}(?:NOKIA ?|Nokia ?|LUMIA ?|[Ll]umia ?|)(\d{3,10}[^;\)]*)' + device_replacement: 'Lumia $1' + brand_replacement: 'Nokia' + model_replacement: 'Lumia $1' + - regex: 'Windows Phone [^;]{1,30}; .{0,100}?IEMobile/[^;\)]+[;\)] ?(?:ARM; ?Touch; ?|Touch; ?|)(?:NOKIA|Nokia)[^;]{0,200}; {0,2}(RM-\d{3,})' + device_replacement: 'Nokia $1' + brand_replacement: 'Nokia' + model_replacement: '$1' + - regex: '(?:Windows Phone [^;]{1,30}; .{0,100}?IEMobile/[^;\)]+[;\)]|WPDesktop;) ?(?:ARM; ?Touch; ?|Touch; ?|)(?:NOKIA|Nokia)[^;]{0,200}; {0,2}(?:NOKIA ?|Nokia ?|LUMIA ?|[Ll]umia ?|)([^;\)]+)' + device_replacement: 'Nokia $1' + brand_replacement: 'Nokia' + model_replacement: '$1' + - regex: 'Windows Phone [^;]{1,30}; .{0,100}?IEMobile/[^;\)]+[;\)] ?(?:ARM; ?Touch; ?|Touch; ?|)(?:Microsoft(?: Corporation|))[^;]{0,200}; {0,2}([^;,\)]+)' + device_replacement: 'Microsoft $1' + brand_replacement: 'Microsoft' + model_replacement: '$1' + - regex: 'Windows Phone [^;]{1,30}; .{0,100}?IEMobile/[^;\)]+[;\)] ?(?:ARM; ?Touch; ?|Touch; ?|WpsLondonTest; ?|)(?:SAMSUNG)[^;]{0,200}; {0,2}(?:SAMSUNG |)([^;,\.\)]+)' + device_replacement: 'Samsung $1' + brand_replacement: 'Samsung' + model_replacement: '$1' + - regex: 'Windows Phone [^;]{1,30}; .{0,100}?IEMobile/[^;\)]+[;\)] ?(?:ARM; ?Touch; ?|Touch; ?|WpsLondonTest; ?|)(?:TOSHIBA|FujitsuToshibaMobileCommun)[^;]{0,200}; {0,2}([^;,\)]+)' + device_replacement: 'Toshiba $1' + brand_replacement: 'Toshiba' + model_replacement: '$1' + - regex: 'Windows Phone [^;]{1,30}; .{0,100}?IEMobile/[^;\)]+[;\)] ?(?:ARM; ?Touch; ?|Touch; ?|WpsLondonTest; ?|)([^;]{1,200}); {0,2}([^;,\)]+)' + device_replacement: '$1 $2' + brand_replacement: '$1' + model_replacement: '$2' + - regex: '(?:^|; )SAMSUNG\-([A-Za-z0-9\-]{1,50}).{0,200} Bada/' + device_replacement: 'Samsung $1' + brand_replacement: 'Samsung' + model_replacement: '$1' + - regex: '\(Mobile; ALCATEL ?(One|ONE) ?(Touch|TOUCH) ?([^;/]{1,100}?)(?:/[^;]{1,200}|); rv:[^\)]{1,200}\) Gecko/[^\/]{1,200} Firefox/' + device_replacement: 'Alcatel $1 $2 $3' + brand_replacement: 'Alcatel' + model_replacement: 'One Touch $3' + - regex: '\(Mobile; (?:ZTE([^;]{1,200})|(OpenC)); rv:[^\)]{1,200}\) Gecko/[^\/]{1,200} Firefox/' + device_replacement: 'ZTE $1$2' + brand_replacement: 'ZTE' + model_replacement: '$1$2' + - regex: '\(Mobile; ALCATEL([A-Za-z0-9\-]+); rv:[^\)]{1,200}\) Gecko/[^\/]{1,200} Firefox/[^\/]{1,200} KaiOS/' + device_replacement: 'Alcatel $1' + brand_replacement: 'Alcatel' + model_replacement: '$1' + - regex: '\(Mobile; LYF\/([A-Za-z0-9\-]{1,100})\/.{0,100};.{0,100}rv:[^\)]{1,100}\) Gecko/[^\/]{1,100} Firefox/[^\/]{1,100} KAIOS/' + device_replacement: 'LYF $1' + brand_replacement: 'LYF' + model_replacement: '$1' + - regex: '\(Mobile; Nokia_([A-Za-z0-9\-]{1,100})_.{1,100}; rv:[^\)]{1,100}\) Gecko/[^\/]{1,100} Firefox/[^\/]{1,100} KAIOS/' + device_replacement: 'Nokia $1' + brand_replacement: 'Nokia' + model_replacement: '$1' + - regex: 'Nokia(N[0-9]+)([A-Za-z_\-][A-Za-z0-9_\-]*)' + device_replacement: 'Nokia $1' + brand_replacement: 'Nokia' + model_replacement: '$1$2' + - regex: '(?:NOKIA|Nokia)(?:\-| {0,2})(?:([A-Za-z0-9]+)\-[0-9a-f]{32}|([A-Za-z0-9\-]+)(?:UCBrowser)|([A-Za-z0-9\-]+))' + device_replacement: 'Nokia $1$2$3' + brand_replacement: 'Nokia' + model_replacement: '$1$2$3' + - regex: 'Lumia ([A-Za-z0-9\-]+)' + device_replacement: 'Lumia $1' + brand_replacement: 'Nokia' + model_replacement: 'Lumia $1' + - regex: '\(Symbian; U; S60 V5; [A-z]{2}\-[A-z]{2}; (SonyEricsson|Samsung|Nokia|LG)([^;/]{1,100}?)\)' + device_replacement: '$1 $2' + brand_replacement: '$1' + model_replacement: '$2' + - regex: '\(Symbian(?:/3|); U; ([^;]{1,200});' + device_replacement: 'Nokia $1' + brand_replacement: 'Nokia' + model_replacement: '$1' + - regex: 'BB10; ([A-Za-z0-9\- ]+)\)' + device_replacement: 'BlackBerry $1' + brand_replacement: 'BlackBerry' + model_replacement: '$1' + - regex: 'Play[Bb]ook.{1,200}RIM Tablet OS' + device_replacement: 'BlackBerry Playbook' + brand_replacement: 'BlackBerry' + model_replacement: 'Playbook' + - regex: 'Black[Bb]erry ([0-9]+);' + device_replacement: 'BlackBerry $1' + brand_replacement: 'BlackBerry' + model_replacement: '$1' + - regex: 'Black[Bb]erry([0-9]+)' + device_replacement: 'BlackBerry $1' + brand_replacement: 'BlackBerry' + model_replacement: '$1' + - regex: 'Black[Bb]erry;' + device_replacement: 'BlackBerry' + brand_replacement: 'BlackBerry' + - regex: '(Pre|Pixi)/\d+\.\d+' + device_replacement: 'Palm $1' + brand_replacement: 'Palm' + model_replacement: '$1' + - regex: 'Palm([0-9]+)' + device_replacement: 'Palm $1' + brand_replacement: 'Palm' + model_replacement: '$1' + - regex: 'Treo([A-Za-z0-9]+)' + device_replacement: 'Palm Treo $1' + brand_replacement: 'Palm' + model_replacement: 'Treo $1' + - regex: 'webOS.{0,200}(P160U(?:NA|))/(\d+).(\d+)' + device_replacement: 'HP Veer' + brand_replacement: 'HP' + model_replacement: 'Veer' + - regex: '(Touch[Pp]ad)/\d+\.\d+' + device_replacement: 'HP TouchPad' + brand_replacement: 'HP' + model_replacement: 'TouchPad' + - regex: 'HPiPAQ([A-Za-z0-9]{1,20})/\d+\.\d+' + device_replacement: 'HP iPAQ $1' + brand_replacement: 'HP' + model_replacement: 'iPAQ $1' + - regex: 'PDA; (PalmOS)/sony/model ([a-z]+)/Revision' + device_replacement: '$1' + brand_replacement: 'Sony' + model_replacement: '$1 $2' + - regex: '(Apple\s?TV)' + device_replacement: 'AppleTV' + brand_replacement: 'Apple' + model_replacement: 'AppleTV' + - regex: '(QtCarBrowser)' + device_replacement: 'Tesla Model S' + brand_replacement: 'Tesla' + model_replacement: 'Model S' + - regex: '(iPhone|iPad|iPod)(\d+,\d+)' + device_replacement: '$1' + brand_replacement: 'Apple' + model_replacement: '$1$2' + - regex: '(iPad)(?:;| Simulator;)' + device_replacement: '$1' + brand_replacement: 'Apple' + model_replacement: '$1' + - regex: '(iPod)(?:;| touch;| Simulator;)' + device_replacement: '$1' + brand_replacement: 'Apple' + model_replacement: '$1' + - regex: '(iPhone)(?:;| Simulator;)' + device_replacement: '$1' + brand_replacement: 'Apple' + model_replacement: '$1' + - regex: '(Watch)(\d+,\d+)' + device_replacement: 'Apple $1' + brand_replacement: 'Apple' + model_replacement: '$1$2' + - regex: '(Apple Watch)(?:;| Simulator;)' + device_replacement: '$1' + brand_replacement: 'Apple' + model_replacement: '$1' + - regex: '(HomePod)(?:;| Simulator;)' + device_replacement: '$1' + brand_replacement: 'Apple' + model_replacement: '$1' + - regex: 'iPhone' + device_replacement: 'iPhone' + brand_replacement: 'Apple' + model_replacement: 'iPhone' + - regex: 'CFNetwork/.{0,100} Darwin/\d.{0,100}\(((?:Mac|iMac|PowerMac|PowerBook)[^\d]*)(\d+)(?:,|%2C)(\d+)' + device_replacement: '$1$2,$3' + brand_replacement: 'Apple' + model_replacement: '$1$2,$3' + - regex: 'CFNetwork/.{0,100} Darwin/\d+\.\d+\.\d+ \(x86_64\)' + device_replacement: 'Mac' + brand_replacement: 'Apple' + model_replacement: 'Mac' + - regex: 'CFNetwork/.{0,100} Darwin/\d' + device_replacement: 'iOS-Device' + brand_replacement: 'Apple' + model_replacement: 'iOS-Device' + - regex: 'Outlook-(iOS)/\d+\.\d+\.prod\.iphone' + brand_replacement: 'Apple' + device_replacement: 'iPhone' + model_replacement: 'iPhone' + - regex: 'acer_([A-Za-z0-9]+)_' + device_replacement: 'Acer $1' + brand_replacement: 'Acer' + model_replacement: '$1' + - regex: '(?:ALCATEL|Alcatel)-([A-Za-z0-9\-]+)' + device_replacement: 'Alcatel $1' + brand_replacement: 'Alcatel' + model_replacement: '$1' + - regex: '(?:Amoi|AMOI)\-([A-Za-z0-9]+)' + device_replacement: 'Amoi $1' + brand_replacement: 'Amoi' + model_replacement: '$1' + - regex: '(?:; |\/|^)((?:Transformer (?:Pad|Prime) |Transformer |PadFone[ _]?)[A-Za-z0-9]*)' + device_replacement: 'Asus $1' + brand_replacement: 'Asus' + model_replacement: '$1' + - regex: '(?:asus.{0,200}?ASUS|Asus|ASUS|asus)[\- ;]*((?:Transformer (?:Pad|Prime) |Transformer |Padfone |Nexus[ _]|)[A-Za-z0-9]+)' + device_replacement: 'Asus $1' + brand_replacement: 'Asus' + model_replacement: '$1' + - regex: '(?:ASUS)_([A-Za-z0-9\-]+)' + device_replacement: 'Asus $1' + brand_replacement: 'Asus' + model_replacement: '$1' + - regex: '\bBIRD[ \-\.]([A-Za-z0-9]+)' + device_replacement: 'Bird $1' + brand_replacement: 'Bird' + model_replacement: '$1' + - regex: '\bDell ([A-Za-z0-9]+)' + device_replacement: 'Dell $1' + brand_replacement: 'Dell' + model_replacement: '$1' + - regex: 'DoCoMo/2\.0 ([A-Za-z0-9]+)' + device_replacement: 'DoCoMo $1' + brand_replacement: 'DoCoMo' + model_replacement: '$1' + - regex: '^.{0,50}?([A-Za-z0-9]{1,30})_W;FOMA' + device_replacement: 'DoCoMo $1' + brand_replacement: 'DoCoMo' + model_replacement: '$1' + - regex: '^.{0,50}?([A-Za-z0-9]{1,30});FOMA' + device_replacement: 'DoCoMo $1' + brand_replacement: 'DoCoMo' + model_replacement: '$1' + - regex: '\b(?:HTC/|HTC/[a-z0-9]{1,20}/|)HTC[ _\-;]? {0,2}(.{0,200}?)(?:-?Mozilla|fingerPrint|[;/\(\)]|$)' + device_replacement: 'HTC $1' + brand_replacement: 'HTC' + model_replacement: '$1' + - regex: 'Huawei([A-Za-z0-9]+)' + device_replacement: 'Huawei $1' + brand_replacement: 'Huawei' + model_replacement: '$1' + - regex: 'HUAWEI-([A-Za-z0-9]+)' + device_replacement: 'Huawei $1' + brand_replacement: 'Huawei' + model_replacement: '$1' + - regex: 'HUAWEI ([A-Za-z0-9\-]+)' + device_replacement: 'Huawei $1' + brand_replacement: 'Huawei' + model_replacement: '$1' + - regex: 'vodafone([A-Za-z0-9]+)' + device_replacement: 'Huawei Vodafone $1' + brand_replacement: 'Huawei' + model_replacement: 'Vodafone $1' + - regex: 'i\-mate ([A-Za-z0-9]+)' + device_replacement: 'i-mate $1' + brand_replacement: 'i-mate' + model_replacement: '$1' + - regex: 'Kyocera\-([A-Za-z0-9]+)' + device_replacement: 'Kyocera $1' + brand_replacement: 'Kyocera' + model_replacement: '$1' + - regex: 'KWC\-([A-Za-z0-9]+)' + device_replacement: 'Kyocera $1' + brand_replacement: 'Kyocera' + model_replacement: '$1' + - regex: 'Lenovo[_\-]([A-Za-z0-9]+)' + device_replacement: 'Lenovo $1' + brand_replacement: 'Lenovo' + model_replacement: '$1' + - regex: '(HbbTV)/[0-9]+\.[0-9]+\.[0-9]+ \( ?;(LG)E ?;([^;]{0,30})' + device_replacement: '$1' + brand_replacement: '$2' + model_replacement: '$3' + - regex: '(HbbTV)/1\.1\.1.{0,200}CE-HTML/1\.\d;(Vendor/|)(THOM[^;]{0,200}?)[;\s].{0,30}(LF[^;]{1,200});?' + device_replacement: '$1' + brand_replacement: 'Thomson' + model_replacement: '$4' + - regex: '(HbbTV)(?:/1\.1\.1|) ?(?: \(;;;;;\)|); {0,2}CE-HTML(?:/1\.\d|); {0,2}([^ ]{1,30}) ([^;]{1,200});' + device_replacement: '$1' + brand_replacement: '$2' + model_replacement: '$3' + - regex: '(HbbTV)/1\.1\.1 \(;;;;;\) Maple_2011' + device_replacement: '$1' + brand_replacement: 'Samsung' + - regex: '(HbbTV)/[0-9]+\.[0-9]+\.[0-9]+ \([^;]{0,30}; ?(?:CUS:([^;]{0,200})|([^;]{1,200})) ?; ?([^;]{0,30})' + device_replacement: '$1' + brand_replacement: '$2$3' + model_replacement: '$4' + - regex: '(HbbTV)/[0-9]+\.[0-9]+\.[0-9]+' + device_replacement: '$1' + - regex: 'LGE; (?:Media\/|)([^;]{0,200});[^;]{0,200};[^;]{0,200};?\); "?LG NetCast(\.TV|\.Media|)-\d+' + device_replacement: 'NetCast$2' + brand_replacement: 'LG' + model_replacement: '$1' + - regex: 'InettvBrowser/[0-9]{1,30}\.[0-9A-Z]{1,30} \([^;]{0,200};(Sony)([^;]{0,200});[^;]{0,200};[^\)]{0,10}\)' + device_replacement: 'Inettv' + brand_replacement: '$1' + model_replacement: '$2' + - regex: 'InettvBrowser/[0-9]{1,30}\.[0-9A-Z]{1,30} \([^;]{0,200};([^;]{0,200});[^;]{0,200};[^\)]{0,10}\)' + device_replacement: 'Inettv' + brand_replacement: 'Generic_Inettv' + model_replacement: '$1' + - regex: '(?:InettvBrowser|TSBNetTV|NETTV|HBBTV)' + device_replacement: 'Inettv' + brand_replacement: 'Generic_Inettv' + - regex: 'Series60/\d\.\d (LG)[\-]?([A-Za-z0-9 \-]+)' + device_replacement: '$1 $2' + brand_replacement: '$1' + model_replacement: '$2' + - regex: '\b(?:LGE[ \-]LG\-(?:AX|)|LGE |LGE?-LG|LGE?[ \-]|LG[ /\-]|lg[\-])([A-Za-z0-9]+)\b' + device_replacement: 'LG $1' + brand_replacement: 'LG' + model_replacement: '$1' + - regex: '(?:^LG[\-]?|^LGE[\-/]?)([A-Za-z]+[0-9]+[A-Za-z]*)' + device_replacement: 'LG $1' + brand_replacement: 'LG' + model_replacement: '$1' + - regex: '^LG([0-9]+[A-Za-z]*)' + device_replacement: 'LG $1' + brand_replacement: 'LG' + model_replacement: '$1' + - regex: '(KIN\.[^ ]+) (\d+)\.(\d+)' + device_replacement: 'Microsoft $1' + brand_replacement: 'Microsoft' + model_replacement: '$1' + - regex: '(?:MSIE|XBMC).{0,200}\b(Xbox)\b' + device_replacement: '$1' + brand_replacement: 'Microsoft' + model_replacement: '$1' + - regex: '; ARM; Trident/6\.0; Touch[\);]' + device_replacement: 'Microsoft Surface RT' + brand_replacement: 'Microsoft' + model_replacement: 'Surface RT' + - regex: 'Motorola\-([A-Za-z0-9]+)' + device_replacement: 'Motorola $1' + brand_replacement: 'Motorola' + model_replacement: '$1' + - regex: 'MOTO\-([A-Za-z0-9]+)' + device_replacement: 'Motorola $1' + brand_replacement: 'Motorola' + model_replacement: '$1' + - regex: 'MOT\-([A-z0-9][A-z0-9\-]*)' + device_replacement: 'Motorola $1' + brand_replacement: 'Motorola' + model_replacement: '$1' + - regex: '; (moto[ a-zA-z0-9()]{0,50});((?: Build|.{0,50}\) AppleWebKit))' + device_replacement: '$1' + brand_replacement: 'Motorola' + model_replacement: '$1' + - regex: '; {0,2}(moto)(.{0,50})(?: Build|\) AppleWebKit)' + device_replacement: 'Motorola$2' + brand_replacement: 'Motorola' + model_replacement: '$2' + - regex: 'Nintendo WiiU' + device_replacement: 'Nintendo Wii U' + brand_replacement: 'Nintendo' + model_replacement: 'Wii U' + - regex: 'Nintendo (Switch|DS|3DS|DSi|Wii);' + device_replacement: 'Nintendo $1' + brand_replacement: 'Nintendo' + model_replacement: '$1' + - regex: '(?:Pantech|PANTECH)[ _-]?([A-Za-z0-9\-]+)' + device_replacement: 'Pantech $1' + brand_replacement: 'Pantech' + model_replacement: '$1' + - regex: 'Philips([A-Za-z0-9]+)' + device_replacement: 'Philips $1' + brand_replacement: 'Philips' + model_replacement: '$1' + - regex: 'Philips ([A-Za-z0-9]+)' + device_replacement: 'Philips $1' + brand_replacement: 'Philips' + model_replacement: '$1' + - regex: '(SMART-TV); .{0,200} Tizen ' + device_replacement: 'Samsung $1' + brand_replacement: 'Samsung' + model_replacement: '$1' + - regex: 'SymbianOS/9\.\d.{0,200} Samsung[/\-]([A-Za-z0-9 \-]+)' + device_replacement: 'Samsung $1' + brand_replacement: 'Samsung' + model_replacement: '$1' + - regex: '(Samsung)(SGH)(i[0-9]+)' + device_replacement: '$1 $2$3' + brand_replacement: '$1' + model_replacement: '$2-$3' + - regex: 'SAMSUNG-ANDROID-MMS/([^;/]{1,100})' + device_replacement: '$1' + brand_replacement: 'Samsung' + model_replacement: '$1' + - regex: 'SAMSUNG(?:; |[ -/])([A-Za-z0-9\-]+)' + regex_flag: 'i' + device_replacement: 'Samsung $1' + brand_replacement: 'Samsung' + model_replacement: '$1' + - regex: '(Dreamcast)' + device_replacement: 'Sega $1' + brand_replacement: 'Sega' + model_replacement: '$1' + - regex: '^SIE-([A-Za-z0-9]+)' + device_replacement: 'Siemens $1' + brand_replacement: 'Siemens' + model_replacement: '$1' + - regex: 'Softbank/[12]\.0/([A-Za-z0-9]+)' + device_replacement: 'Softbank $1' + brand_replacement: 'Softbank' + model_replacement: '$1' + - regex: 'SonyEricsson ?([A-Za-z0-9\-]+)' + device_replacement: 'Ericsson $1' + brand_replacement: 'SonyEricsson' + model_replacement: '$1' + - regex: 'Android [^;]{1,200}; ([^ ]+) (Sony)/' + device_replacement: '$2 $1' + brand_replacement: '$2' + model_replacement: '$1' + - regex: '(Sony)(?:BDP\/|\/|)([^ /;\)]+)[ /;\)]' + device_replacement: '$1 $2' + brand_replacement: '$1' + model_replacement: '$2' + - regex: 'Puffin/[\d\.]+IT' + device_replacement: 'iPad' + brand_replacement: 'Apple' + model_replacement: 'iPad' + - regex: 'Puffin/[\d\.]+IP' + device_replacement: 'iPhone' + brand_replacement: 'Apple' + model_replacement: 'iPhone' + - regex: 'Puffin/[\d\.]+AT' + device_replacement: 'Generic Tablet' + brand_replacement: 'Generic' + model_replacement: 'Tablet' + - regex: 'Puffin/[\d\.]+AP' + device_replacement: 'Generic Smartphone' + brand_replacement: 'Generic' + model_replacement: 'Smartphone' + - regex: 'Android[\- ][\d]+\.[\d]+; [A-Za-z]{2}\-[A-Za-z]{0,2}; WOWMobile (.{1,200})( Build[/ ]|\))' + brand_replacement: 'Generic_Android' + model_replacement: '$1' + - regex: 'Android[\- ][\d]+\.[\d]+\-update1; [A-Za-z]{2}\-[A-Za-z]{0,2} {0,2}; {0,2}(.{1,200}?)( Build[/ ]|\))' + brand_replacement: 'Generic_Android' + model_replacement: '$1' + - regex: 'Android[\- ][\d]+(?:\.[\d]+)(?:\.[\d]+|); {0,2}[A-Za-z]{2}[_\-][A-Za-z]{0,2}\-? {0,2}; {0,2}(.{1,200}?)( Build[/ ]|\))' + brand_replacement: 'Generic_Android' + model_replacement: '$1' + - regex: 'Android[\- ][\d]+(?:\.[\d]+)(?:\.[\d]+|); {0,2}[A-Za-z]{0,2}\- {0,2}; {0,2}(.{1,200}?)( Build[/ ]|\))' + brand_replacement: 'Generic_Android' + model_replacement: '$1' + - regex: 'Android[\- ][\d]+(?:\.[\d]+)(?:\.[\d]+|); {0,2}[a-z]{0,2}[_\-]?[A-Za-z]{0,2};?( Build[/ ]|\))' + device_replacement: 'Generic Smartphone' + brand_replacement: 'Generic' + model_replacement: 'Smartphone' + - regex: 'Android[\- ][\d]+(?:\.[\d]+)(?:\.[\d]+|); {0,3}\-?[A-Za-z]{2}; {0,2}(.{1,50}?)( Build[/ ]|\))' + brand_replacement: 'Generic_Android' + model_replacement: '$1' + - regex: 'Android \d+?(?:\.\d+|)(?:\.\d+|); ([^;]{1,100}?)(?: Build|\) AppleWebKit).{1,200}? Mobile Safari' + brand_replacement: 'Generic_Android' + model_replacement: '$1' + - regex: 'Android \d+?(?:\.\d+|)(?:\.\d+|); ([^;]{1,100}?)(?: Build|\) AppleWebKit).{1,200}? Safari' + brand_replacement: 'Generic_Android_Tablet' + model_replacement: '$1' + - regex: 'Android \d+?(?:\.\d+|)(?:\.\d+|); ([^;]{1,100}?)(?: Build|\))' + brand_replacement: 'Generic_Android' + model_replacement: '$1' + - regex: '(GoogleTV)' + brand_replacement: 'Generic_Inettv' + model_replacement: '$1' + - regex: '(WebTV)/\d+.\d+' + brand_replacement: 'Generic_Inettv' + model_replacement: '$1' + - regex: '^(Roku)/DVP-\d+\.\d+' + brand_replacement: 'Generic_Inettv' + model_replacement: '$1' + - regex: '(Android 3\.\d|Opera Tablet|Tablet; .{1,100}Firefox/|Android.{0,100}(?:Tab|Pad))' + regex_flag: 'i' + device_replacement: 'Generic Tablet' + brand_replacement: 'Generic' + model_replacement: 'Tablet' + - regex: '(Symbian|\bS60(Version|V\d)|\bS60\b|\((Series 60|Windows Mobile|Palm OS|Bada); Opera Mini|Windows CE|Opera Mobi|BREW|Brew|Mobile; .{1,200}Firefox/|iPhone OS|Android|MobileSafari|Windows {0,2}Phone|\(webOS/|PalmOS)' + device_replacement: 'Generic Smartphone' + brand_replacement: 'Generic' + model_replacement: 'Smartphone' + - regex: '(hiptop|avantgo|plucker|xiino|blazer|elaine)' + regex_flag: 'i' + device_replacement: 'Generic Smartphone' + brand_replacement: 'Generic' + model_replacement: 'Smartphone' + - regex: '^.{0,100}(bot|BUbiNG|zao|borg|DBot|oegp|silk|Xenu|zeal|^NING|CCBot|crawl|htdig|lycos|slurp|teoma|voila|yahoo|Sogou|CiBra|Nutch|^Java/|^JNLP/|Daumoa|Daum|Genieo|ichiro|larbin|pompos|Scrapy|snappy|speedy|spider|msnbot|msrbot|vortex|^vortex|crawler|favicon|indexer|Riddler|scooter|scraper|scrubby|WhatWeb|WinHTTP|bingbot|BingPreview|openbot|gigabot|furlbot|polybot|seekbot|^voyager|archiver|Icarus6j|mogimogi|Netvibes|blitzbot|altavista|charlotte|findlinks|Retreiver|TLSProber|WordPress|SeznamBot|ProoXiBot|wsr\-agent|Squrl Java|EtaoSpider|PaperLiBot|SputnikBot|A6\-Indexer|netresearch|searchsight|baiduspider|YisouSpider|ICC\-Crawler|http%20client|Python-urllib|dataparksearch|converacrawler|Screaming Frog|AppEngine-Google|YahooCacheSystem|fast\-webcrawler|Sogou Pic Spider|semanticdiscovery|Innovazion Crawler|facebookexternalhit|Google.{0,200}/\+/web/snippet|Google-HTTP-Java-Client|BlogBridge|IlTrovatore-Setaccio|InternetArchive|GomezAgent|WebThumbnail|heritrix|NewsGator|PagePeeker|Reaper|ZooShot|holmes|NL-Crawler|Pingdom|StatusCake|WhatsApp|masscan|Google Web Preview|Qwantify|Yeti|OgScrper|RecipeRadar)' + regex_flag: 'i' + device_replacement: 'Spider' + brand_replacement: 'Spider' + model_replacement: 'Desktop' + - regex: '^(1207|3gso|4thp|501i|502i|503i|504i|505i|506i|6310|6590|770s|802s|a wa|acer|acs\-|airn|alav|asus|attw|au\-m|aur |aus |abac|acoo|aiko|alco|alca|amoi|anex|anny|anyw|aptu|arch|argo|bmobile|bell|bird|bw\-n|bw\-u|beck|benq|bilb|blac|c55/|cdm\-|chtm|capi|comp|cond|dall|dbte|dc\-s|dica|ds\-d|ds12|dait|devi|dmob|doco|dopo|dorado|el(?:38|39|48|49|50|55|58|68)|el[3456]\d{2}dual|erk0|esl8|ex300|ez40|ez60|ez70|ezos|ezze|elai|emul|eric|ezwa|fake|fly\-|fly_|g\-mo|g1 u|g560|gf\-5|grun|gene|go.w|good|grad|hcit|hd\-m|hd\-p|hd\-t|hei\-|hp i|hpip|hs\-c|htc |htc\-|htca|htcg)' + regex_flag: 'i' + device_replacement: 'Generic Feature Phone' + brand_replacement: 'Generic' + model_replacement: 'Feature Phone' + - regex: '^(htcp|htcs|htct|htc_|haie|hita|huaw|hutc|i\-20|i\-go|i\-ma|i\-mobile|i230|iac|iac\-|iac/|ig01|im1k|inno|iris|jata|kddi|kgt|kgt/|kpt |kwc\-|klon|lexi|lg g|lg\-a|lg\-b|lg\-c|lg\-d|lg\-f|lg\-g|lg\-k|lg\-l|lg\-m|lg\-o|lg\-p|lg\-s|lg\-t|lg\-u|lg\-w|lg/k|lg/l|lg/u|lg50|lg54|lge\-|lge/|leno|m1\-w|m3ga|m50/|maui|mc01|mc21|mcca|medi|meri|mio8|mioa|mo01|mo02|mode|modo|mot |mot\-|mt50|mtp1|mtv |mate|maxo|merc|mits|mobi|motv|mozz|n100|n101|n102|n202|n203|n300|n302|n500|n502|n505|n700|n701|n710|nec\-|nem\-|newg|neon)' + regex_flag: 'i' + device_replacement: 'Generic Feature Phone' + brand_replacement: 'Generic' + model_replacement: 'Feature Phone' + - regex: '^(netf|noki|nzph|o2 x|o2\-x|opwv|owg1|opti|oran|ot\-s|p800|pand|pg\-1|pg\-2|pg\-3|pg\-6|pg\-8|pg\-c|pg13|phil|pn\-2|pt\-g|palm|pana|pire|pock|pose|psio|qa\-a|qc\-2|qc\-3|qc\-5|qc\-7|qc07|qc12|qc21|qc32|qc60|qci\-|qwap|qtek|r380|r600|raks|rim9|rove|s55/|sage|sams|sc01|sch\-|scp\-|sdk/|se47|sec\-|sec0|sec1|semc|sgh\-|shar|sie\-|sk\-0|sl45|slid|smb3|smt5|sp01|sph\-|spv |spv\-|sy01|samm|sany|sava|scoo|send|siem|smar|smit|soft|sony|t\-mo|t218|t250|t600|t610|t618|tcl\-|tdg\-|telm|tim\-|ts70|tsm\-|tsm3|tsm5|tx\-9|tagt)' + regex_flag: 'i' + device_replacement: 'Generic Feature Phone' + brand_replacement: 'Generic' + model_replacement: 'Feature Phone' + - regex: '^(talk|teli|topl|tosh|up.b|upg1|utst|v400|v750|veri|vk\-v|vk40|vk50|vk52|vk53|vm40|vx98|virg|vertu|vite|voda|vulc|w3c |w3c\-|wapj|wapp|wapu|wapm|wig |wapi|wapr|wapv|wapy|wapa|waps|wapt|winc|winw|wonu|x700|xda2|xdag|yas\-|your|zte\-|zeto|aste|audi|avan|blaz|brew|brvw|bumb|ccwa|cell|cldc|cmd\-|dang|eml2|fetc|hipt|http|ibro|idea|ikom|ipaq|jbro|jemu|jigs|keji|kyoc|kyok|libw|m\-cr|midp|mmef|moto|mwbp|mywa|newt|nok6|o2im|pant|pdxg|play|pluc|port|prox|rozo|sama|seri|smal|symb|treo|upsi|vx52|vx53|vx60|vx61|vx70|vx80|vx81|vx83|vx85|wap\-|webc|whit|wmlb|xda\-|xda_)' + regex_flag: 'i' + device_replacement: 'Generic Feature Phone' + brand_replacement: 'Generic' + model_replacement: 'Feature Phone' + - regex: '^(Ice)$' + device_replacement: 'Generic Feature Phone' + brand_replacement: 'Generic' + model_replacement: 'Feature Phone' + - regex: '(wap[\-\ ]browser|maui|netfront|obigo|teleca|up\.browser|midp|Opera Mini)' + regex_flag: 'i' + device_replacement: 'Generic Feature Phone' + brand_replacement: 'Generic' + model_replacement: 'Feature Phone' + - regex: 'Mac OS' + device_replacement: 'Mac' + brand_replacement: 'Apple' + model_replacement: 'Mac'`) diff --git a/vendor/github.com/yuin/gopher-lua/.gitignore b/vendor/github.com/yuin/gopher-lua/.gitignore new file mode 100644 index 00000000000..485dee64bcf --- /dev/null +++ b/vendor/github.com/yuin/gopher-lua/.gitignore @@ -0,0 +1 @@ +.idea diff --git a/vendor/github.com/yuin/gopher-lua/.travis.yml b/vendor/github.com/yuin/gopher-lua/.travis.yml deleted file mode 100644 index 68df5e7b1a1..00000000000 --- a/vendor/github.com/yuin/gopher-lua/.travis.yml +++ /dev/null @@ -1,18 +0,0 @@ -language: go - -go: - - "1.9.x" - - "1.10.x" - - "1.11.x" -env: - global: - GO111MODULE=off - -before_install: - - go get github.com/axw/gocov/gocov - - go get github.com/mattn/goveralls - - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi -install: - - go get -u -v $(go list -f '{{join .Imports "\n"}}{{"\n"}}{{join .TestImports "\n"}}' ./... | sort | uniq | grep '\.' | grep -v gopher-lua) -script: - - $HOME/gopath/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/yuin/gopher-lua/README.rst b/vendor/github.com/yuin/gopher-lua/README.rst index 2b6de3259ac..cc936311069 100644 --- a/vendor/github.com/yuin/gopher-lua/README.rst +++ b/vendor/github.com/yuin/gopher-lua/README.rst @@ -3,14 +3,14 @@ GopherLua: VM and compiler for Lua in Go. =============================================================================== -.. image:: https://godoc.org/github.com/yuin/gopher-lua?status.svg - :target: http://godoc.org/github.com/yuin/gopher-lua +.. image:: https://pkg.go.dev/badge/github.com/yuin/gopher-lua.svg + :target: https://pkg.go.dev/github.com/yuin/gopher-lua -.. image:: https://travis-ci.org/yuin/gopher-lua.svg - :target: https://travis-ci.org/yuin/gopher-lua +.. image:: https://github.com/yuin/gopher-lua/workflows/test/badge.svg?branch=master&event=push + :target: https://github.com/yuin/gopher-lua/actions?query=workflow:test -.. image:: https://coveralls.io/repos/yuin/gopher-lua/badge.svg - :target: https://coveralls.io/r/yuin/gopher-lua +.. image:: https://coveralls.io/repos/github/yuin/gopher-lua/badge.svg?branch=master + :target: https://coveralls.io/github/yuin/gopher-lua .. image:: https://badges.gitter.im/Join%20Chat.svg :alt: Join the chat at https://gitter.im/yuin/gopher-lua @@ -19,7 +19,7 @@ GopherLua: VM and compiler for Lua in Go. | -GopherLua is a Lua5.1 VM and compiler written in Go. GopherLua has a same goal +GopherLua is a Lua5.1(+ `goto` statement in Lua5.2) VM and compiler written in Go. GopherLua has a same goal with Lua: **Be a scripting language with extensible semantics** . It provides Go APIs that allow you to easily embed a scripting language to your Go host programs. @@ -830,6 +830,8 @@ Miscellaneous notes - ``file:setvbuf`` does not support a line buffering. - Daylight saving time is not supported. - GopherLua has a function to set an environment variable : ``os.setenv(name, value)`` +- GopherLua support ``goto`` and ``::label::`` statement in Lua5.2. + - `goto` is a keyword and not a valid variable name. ---------------------------------------------------------------- Standalone interpreter diff --git a/vendor/github.com/yuin/gopher-lua/_state.go b/vendor/github.com/yuin/gopher-lua/_state.go index 960e8810e4d..6e9febfb6d0 100644 --- a/vendor/github.com/yuin/gopher-lua/_state.go +++ b/vendor/github.com/yuin/gopher-lua/_state.go @@ -398,17 +398,18 @@ func (rg *registry) forceResize(newSize int) { copy(newSlice, rg.array[:rg.top]) // should we copy the area beyond top? there shouldn't be any valid values there so it shouldn't be necessary. rg.array = newSlice } -func (rg *registry) SetTop(top int) { - // +inline-call rg.checkSize top - oldtop := rg.top - rg.top = top - for i := oldtop; i < rg.top; i++ { + +func (rg *registry) SetTop(topi int) { // +inline-start + // +inline-call rg.checkSize topi + oldtopi := rg.top + rg.top = topi + for i := oldtopi; i < rg.top; i++ { rg.array[i] = LNil } // values beyond top don't need to be valid LValues, so setting them to nil is fine // setting them to nil rather than LNil lets us invoke the golang memclr opto - if rg.top < oldtop { - nilRange := rg.array[rg.top:oldtop] + if rg.top < oldtopi { + nilRange := rg.array[rg.top:oldtopi] for i := range nilRange { nilRange[i] = nil } @@ -416,7 +417,7 @@ func (rg *registry) SetTop(top int) { //for i := rg.top; i < oldtop; i++ { // rg.array[i] = LNil //} -} +} // +inline-end func (rg *registry) Top() int { return rg.top @@ -498,34 +499,34 @@ func (rg *registry) FillNil(regm, n int) { // +inline-start func (rg *registry) Insert(value LValue, reg int) { top := rg.Top() if reg >= top { - rg.Set(reg, value) + // +inline-call rg.Set reg value return } top-- for ; top >= reg; top-- { // FIXME consider using copy() here if Insert() is called enough - rg.Set(top+1, rg.Get(top)) + // +inline-call rg.Set top+1 rg.Get(top) } - rg.Set(reg, value) + // +inline-call rg.Set reg value } -func (rg *registry) Set(reg int, val LValue) { - newSize := reg + 1 +func (rg *registry) Set(regi int, vali LValue) { // +inline-start + newSize := regi + 1 // +inline-call rg.checkSize newSize - rg.array[reg] = val - if reg >= rg.top { - rg.top = reg + 1 + rg.array[regi] = vali + if regi >= rg.top { + rg.top = regi + 1 } -} +} // +inline-end -func (rg *registry) SetNumber(reg int, val LNumber) { - newSize := reg + 1 +func (rg *registry) SetNumber(regi int, vali LNumber) { // +inline-start + newSize := regi + 1 // +inline-call rg.checkSize newSize - rg.array[reg] = rg.alloc.LNumber2I(val) - if reg >= rg.top { - rg.top = reg + 1 + rg.array[regi] = rg.alloc.LNumber2I(vali) + if regi >= rg.top { + rg.top = regi + 1 } -} +} // +inline-end func (rg *registry) IsFull() bool { return rg.top >= cap(rg.array) @@ -769,6 +770,9 @@ func (ls *LState) isStarted() bool { func (ls *LState) kill() { ls.Dead = true + if ls.ctxCancelFn != nil { + ls.ctxCancelFn() + } } func (ls *LState) indexToReg(idx int) int { @@ -1402,6 +1406,7 @@ func (ls *LState) NewThread() (*LState, context.CancelFunc) { if ls.ctx != nil { thread.mainLoop = mainLoopWithContext thread.ctx, f = context.WithCancel(ls.ctx) + thread.ctxCancelFn = f } return thread, f } @@ -1851,6 +1856,9 @@ func (ls *LState) PCall(nargs, nret int, errfunc *LFunction) (err error) { err = rcv.(*ApiError) err.(*ApiError).StackTrace = ls.stackTrace(0) } + ls.stack.SetSp(sp) + ls.currentFrame = ls.stack.Last() + ls.reg.SetTop(base) } }() ls.Call(1, 1) diff --git a/vendor/github.com/yuin/gopher-lua/_vm.go b/vendor/github.com/yuin/gopher-lua/_vm.go index 049107e1774..ee2be040251 100644 --- a/vendor/github.com/yuin/gopher-lua/_vm.go +++ b/vendor/github.com/yuin/gopher-lua/_vm.go @@ -173,7 +173,8 @@ func init() { A := int(inst>>18) & 0xff //GETA RA := lbase + A B := int(inst & 0x1ff) //GETB - reg.Set(RA, reg.Get(lbase+B)) + v := reg.Get(lbase + B) + // +inline-call reg.Set RA v return 0 }, func(L *LState, inst uint32, baseframe *callFrame) int { //OP_MOVEN @@ -183,7 +184,8 @@ func init() { A := int(inst>>18) & 0xff //GETA B := int(inst & 0x1ff) //GETB C := int(inst>>9) & 0x1ff //GETC - reg.Set(lbase+A, reg.Get(lbase+B)) + v := reg.Get(lbase + B) + // +inline-call reg.Set lbase+A v code := cf.Fn.Proto.Code pc := cf.Pc for i := 0; i < C; i++ { @@ -191,7 +193,8 @@ func init() { pc++ A = int(inst>>18) & 0xff //GETA B = int(inst & 0x1ff) //GETB - reg.Set(lbase+A, reg.Get(lbase+B)) + v := reg.Get(lbase + B) + // +inline-call reg.Set lbase+A v } cf.Pc = pc return 0 @@ -203,7 +206,8 @@ func init() { A := int(inst>>18) & 0xff //GETA RA := lbase + A Bx := int(inst & 0x3ffff) //GETBX - reg.Set(RA, cf.Fn.Proto.Constants[Bx]) + v := cf.Fn.Proto.Constants[Bx] + // +inline-call reg.Set RA v return 0 }, func(L *LState, inst uint32, baseframe *callFrame) int { //OP_LOADBOOL @@ -215,9 +219,9 @@ func init() { B := int(inst & 0x1ff) //GETB C := int(inst>>9) & 0x1ff //GETC if B != 0 { - reg.Set(RA, LTrue) + // +inline-call reg.Set RA LTrue } else { - reg.Set(RA, LFalse) + // +inline-call reg.Set RA LFalse } if C != 0 { cf.Pc++ @@ -232,7 +236,7 @@ func init() { RA := lbase + A B := int(inst & 0x1ff) //GETB for i := RA; i <= lbase+B; i++ { - reg.Set(i, LNil) + // +inline-call reg.Set i LNil } return 0 }, @@ -243,7 +247,8 @@ func init() { A := int(inst>>18) & 0xff //GETA RA := lbase + A B := int(inst & 0x1ff) //GETB - reg.Set(RA, cf.Fn.Upvalues[B].Value()) + v := cf.Fn.Upvalues[B].Value() + // +inline-call reg.Set RA v return 0 }, func(L *LState, inst uint32, baseframe *callFrame) int { //OP_GETGLOBAL @@ -254,7 +259,8 @@ func init() { RA := lbase + A Bx := int(inst & 0x3ffff) //GETBX //reg.Set(RA, L.getField(cf.Fn.Env, cf.Fn.Proto.Constants[Bx])) - reg.Set(RA, L.getFieldString(cf.Fn.Env, cf.Fn.Proto.stringConstants[Bx])) + v := L.getFieldString(cf.Fn.Env, cf.Fn.Proto.stringConstants[Bx]) + // +inline-call reg.Set RA v return 0 }, func(L *LState, inst uint32, baseframe *callFrame) int { //OP_GETTABLE @@ -265,7 +271,8 @@ func init() { RA := lbase + A B := int(inst & 0x1ff) //GETB C := int(inst>>9) & 0x1ff //GETC - reg.Set(RA, L.getField(reg.Get(lbase+B), L.rkValue(C))) + v := L.getField(reg.Get(lbase+B), L.rkValue(C)) + // +inline-call reg.Set RA v return 0 }, func(L *LState, inst uint32, baseframe *callFrame) int { //OP_GETTABLEKS @@ -276,7 +283,8 @@ func init() { RA := lbase + A B := int(inst & 0x1ff) //GETB C := int(inst>>9) & 0x1ff //GETC - reg.Set(RA, L.getFieldString(reg.Get(lbase+B), L.rkString(C))) + v := L.getFieldString(reg.Get(lbase+B), L.rkString(C)) + // +inline-call reg.Set RA v return 0 }, func(L *LState, inst uint32, baseframe *callFrame) int { //OP_SETGLOBAL @@ -330,7 +338,8 @@ func init() { RA := lbase + A B := int(inst & 0x1ff) //GETB C := int(inst>>9) & 0x1ff //GETC - reg.Set(RA, newLTable(B, C)) + v := newLTable(B, C) + // +inline-call reg.Set RA v return 0 }, func(L *LState, inst uint32, baseframe *callFrame) int { //OP_SELF @@ -342,8 +351,9 @@ func init() { B := int(inst & 0x1ff) //GETB C := int(inst>>9) & 0x1ff //GETC selfobj := reg.Get(lbase + B) - reg.Set(RA, L.getFieldString(selfobj, L.rkString(C))) - reg.Set(RA+1, selfobj) + v := L.getFieldString(selfobj, L.rkString(C)) + // +inline-call reg.Set RA v + // +inline-call reg.Set RA+1 selfobj return 0 }, opArith, // OP_ADD @@ -361,17 +371,17 @@ func init() { B := int(inst & 0x1ff) //GETB unaryv := L.rkValue(B) if nm, ok := unaryv.(LNumber); ok { - reg.SetNumber(RA, -nm) + // +inline-call reg.Set RA -nm } else { op := L.metaOp1(unaryv, "__unm") if op.Type() == LTFunction { reg.Push(op) reg.Push(unaryv) L.Call(1, 1) - reg.Set(RA, reg.Pop()) + // +inline-call reg.Set RA reg.Pop() } else if str, ok1 := unaryv.(LString); ok1 { if num, err := parseNumber(string(str)); err == nil { - reg.Set(RA, -num) + // +inline-call reg.Set RA -num } else { L.RaiseError("__unm undefined") } @@ -389,9 +399,9 @@ func init() { RA := lbase + A B := int(inst & 0x1ff) //GETB if LVIsFalse(reg.Get(lbase + B)) { - reg.Set(RA, LTrue) + // +inline-call reg.Set RA LTrue } else { - reg.Set(RA, LFalse) + // +inline-call reg.Set RA LFalse } return 0 }, @@ -404,7 +414,7 @@ func init() { B := int(inst & 0x1ff) //GETB switch lv := L.rkValue(B).(type) { case LString: - reg.SetNumber(RA, LNumber(len(lv))) + // +inline-call reg.SetNumber RA LNumber(len(lv)) default: op := L.metaOp1(lv, "__len") if op.Type() == LTFunction { @@ -413,12 +423,13 @@ func init() { L.Call(1, 1) ret := reg.Pop() if ret.Type() == LTNumber { - reg.SetNumber(RA, ret.(LNumber)) + v, _ := ret.(LNumber) + // +inline-call reg.SetNumber RA v } else { - reg.Set(RA, ret) + // +inline-call reg.Set RA ret } } else if lv.Type() == LTTable { - reg.SetNumber(RA, LNumber(lv.(*LTable).Len())) + // +inline-call reg.SetNumber RA LNumber(lv.(*LTable).Len()) } else { L.RaiseError("__len undefined") } @@ -435,7 +446,8 @@ func init() { C := int(inst>>9) & 0x1ff //GETC RC := lbase + C RB := lbase + B - reg.Set(RA, stringConcat(L, RC-RB+1, RC)) + v := stringConcat(L, RC-RB+1, RC) + // +inline-call reg.Set RA v return 0 }, func(L *LState, inst uint32, baseframe *callFrame) int { //OP_JMP @@ -483,8 +495,8 @@ func init() { rhs := L.rkValue(C) ret := false - if v1, ok1 := lhs.assertFloat64(); ok1 { - if v2, ok2 := rhs.assertFloat64(); ok2 { + if v1, ok1 := lhs.(LNumber); ok1 { + if v2, ok2 := rhs.(LNumber); ok2 { ret = v1 <= v2 } else { L.RaiseError("attempt to compare %v with %v", lhs.Type().String(), rhs.Type().String()) @@ -538,7 +550,7 @@ func init() { B := int(inst & 0x1ff) //GETB C := int(inst>>9) & 0x1ff //GETC if value := reg.Get(lbase + B); LVAsBool(value) != (C == 0) { - reg.Set(RA, value) + // +inline-call reg.Set RA value } else { cf.Pc++ } @@ -560,7 +572,7 @@ func init() { nret := C - 1 var callable *LFunction var meta bool - if fn, ok := lv.assertFunction(); ok { + if fn, ok := lv.(*LFunction); ok { callable = fn meta = false } else { @@ -586,7 +598,7 @@ func init() { lv := reg.Get(RA) var callable *LFunction var meta bool - if fn, ok := lv.assertFunction(); ok { + if fn, ok := lv.(*LFunction); ok { callable = fn meta = false } else { @@ -673,17 +685,18 @@ func init() { lbase := cf.LocalBase A := int(inst>>18) & 0xff //GETA RA := lbase + A - if init, ok1 := reg.Get(RA).assertFloat64(); ok1 { - if limit, ok2 := reg.Get(RA + 1).assertFloat64(); ok2 { - if step, ok3 := reg.Get(RA + 2).assertFloat64(); ok3 { + if init, ok1 := reg.Get(RA).(LNumber); ok1 { + if limit, ok2 := reg.Get(RA + 1).(LNumber); ok2 { + if step, ok3 := reg.Get(RA + 2).(LNumber); ok3 { init += step - reg.SetNumber(RA, LNumber(init)) + v := LNumber(init) + // +inline-call reg.SetNumber RA v if (step > 0 && init <= limit) || (step <= 0 && init >= limit) { Sbx := int(inst&0x3ffff) - opMaxArgSbx //GETSBX cf.Pc += Sbx - reg.SetNumber(RA+3, LNumber(init)) + // +inline-call reg.SetNumber RA+3 v } else { - reg.SetTop(RA + 1) + // +inline-call reg.SetTop RA+1 } } else { L.RaiseError("for statement step must be a number") @@ -703,9 +716,9 @@ func init() { A := int(inst>>18) & 0xff //GETA RA := lbase + A Sbx := int(inst&0x3ffff) - opMaxArgSbx //GETSBX - if init, ok1 := reg.Get(RA).assertFloat64(); ok1 { - if step, ok2 := reg.Get(RA + 2).assertFloat64(); ok2 { - reg.SetNumber(RA, LNumber(init-step)) + if init, ok1 := reg.Get(RA).(LNumber); ok1 { + if step, ok2 := reg.Get(RA + 2).(LNumber); ok2 { + // +inline-call reg.SetNumber RA LNumber(init-step) } else { L.RaiseError("for statement step must be a number") } @@ -723,13 +736,13 @@ func init() { RA := lbase + A C := int(inst>>9) & 0x1ff //GETC nret := C - reg.SetTop(RA + 3 + 2) - reg.Set(RA+3+2, reg.Get(RA+2)) - reg.Set(RA+3+1, reg.Get(RA+1)) - reg.Set(RA+3, reg.Get(RA)) + // +inline-call reg.SetTop RA+3+2 + // +inline-call reg.Set RA+3+2 reg.Get(RA+2) + // +inline-call reg.Set RA+3+1 reg.Get(RA+1) + // +inline-call reg.Set RA+3 reg.Get(RA) L.callR(2, nret, RA+3) if value := reg.Get(RA + 3); value != LNil { - reg.Set(RA+2, value) + // +inline-call reg.Set RA+2 value pc := cf.Fn.Proto.Code[cf.Pc] cf.Pc += int(pc&0x3ffff) - opMaxArgSbx } @@ -776,7 +789,7 @@ func init() { Bx := int(inst & 0x3ffff) //GETBX proto := cf.Fn.Proto.FunctionPrototypes[Bx] closure := newLFunctionL(proto, cf.Fn.Env, int(proto.NumUpvalues)) - reg.Set(RA, closure) + // +inline-call reg.Set RA closure for i := 0; i < int(proto.NumUpvalues); i++ { inst = cf.Fn.Proto.Code[cf.Pc] cf.Pc++ @@ -826,12 +839,14 @@ func opArith(L *LState, inst uint32, baseframe *callFrame) int { //OP_ADD, OP_SU C := int(inst>>9) & 0x1ff //GETC lhs := L.rkValue(B) rhs := L.rkValue(C) - v1, ok1 := lhs.assertFloat64() - v2, ok2 := rhs.assertFloat64() + v1, ok1 := lhs.(LNumber) + v2, ok2 := rhs.(LNumber) if ok1 && ok2 { - reg.SetNumber(RA, numberArith(L, opcode, LNumber(v1), LNumber(v2))) + v := numberArith(L, opcode, LNumber(v1), LNumber(v2)) + // +inline-call reg.SetNumber RA v } else { - reg.Set(RA, objectArith(L, opcode, lhs, rhs)) + v := objectArith(L, opcode, lhs, rhs) + // +inline-call reg.Set RA v } return 0 } @@ -840,7 +855,7 @@ func luaModulo(lhs, rhs LNumber) LNumber { flhs := float64(lhs) frhs := float64(rhs) v := math.Mod(flhs, frhs) - if flhs < 0 || frhs < 0 && !(flhs < 0 && frhs < 0) { + if frhs > 0 && v < 0 || frhs < 0 && v > 0 { v += frhs } return LNumber(v) @@ -884,7 +899,7 @@ func objectArith(L *LState, opcode int, lhs, rhs LValue) LValue { event = "__pow" } op := L.metaOp2(lhs, rhs, event) - if op.Type() == LTFunction { + if _, ok := op.(*LFunction); ok { L.reg.Push(op) L.reg.Push(lhs) L.reg.Push(rhs) @@ -901,8 +916,8 @@ func objectArith(L *LState, opcode int, lhs, rhs LValue) LValue { rhs = rnum } } - if v1, ok1 := lhs.assertFloat64(); ok1 { - if v2, ok2 := rhs.assertFloat64(); ok2 { + if v1, ok1 := lhs.(LNumber); ok1 { + if v2, ok2 := rhs.(LNumber); ok2 { return numberArith(L, opcode, LNumber(v1), LNumber(v2)) } } @@ -951,8 +966,8 @@ func stringConcat(L *LState, total, last int) LValue { func lessThan(L *LState, lhs, rhs LValue) bool { // optimization for numbers - if v1, ok1 := lhs.assertFloat64(); ok1 { - if v2, ok2 := rhs.assertFloat64(); ok2 { + if v1, ok1 := lhs.(LNumber); ok1 { + if v2, ok2 := rhs.(LNumber); ok2 { return v1 < v2 } L.RaiseError("attempt to compare %v with %v", lhs.Type().String(), rhs.Type().String()) @@ -972,17 +987,18 @@ func lessThan(L *LState, lhs, rhs LValue) bool { } func equals(L *LState, lhs, rhs LValue, raw bool) bool { - if lhs.Type() != rhs.Type() { + lt := lhs.Type() + if lt != rhs.Type() { return false } ret := false - switch lhs.Type() { + switch lt { case LTNil: ret = true case LTNumber: - v1, _ := lhs.assertFloat64() - v2, _ := rhs.assertFloat64() + v1, _ := lhs.(LNumber) + v2, _ := rhs.(LNumber) ret = v1 == v2 case LTBool: ret = bool(lhs.(LBool)) == bool(rhs.(LBool)) diff --git a/vendor/github.com/yuin/gopher-lua/ast/stmt.go b/vendor/github.com/yuin/gopher-lua/ast/stmt.go index 56ea6d1a23a..e24880b6e0c 100644 --- a/vendor/github.com/yuin/gopher-lua/ast/stmt.go +++ b/vendor/github.com/yuin/gopher-lua/ast/stmt.go @@ -93,3 +93,15 @@ type ReturnStmt struct { type BreakStmt struct { StmtBase } + +type LabelStmt struct { + StmtBase + + Name string +} + +type GotoStmt struct { + StmtBase + + Label string +} diff --git a/vendor/github.com/yuin/gopher-lua/auxlib.go b/vendor/github.com/yuin/gopher-lua/auxlib.go index 61a3b8b6100..a022bdd89ed 100644 --- a/vendor/github.com/yuin/gopher-lua/auxlib.go +++ b/vendor/github.com/yuin/gopher-lua/auxlib.go @@ -40,6 +40,11 @@ func (ls *LState) CheckNumber(n int) LNumber { if lv, ok := v.(LNumber); ok { return lv } + if lv, ok := v.(LString); ok { + if num, err := parseNumber(string(lv)); err == nil { + return num + } + } ls.TypeError(n, LTNumber) return 0 } @@ -413,7 +418,7 @@ func (ls *LState) DoString(source string) error { // ToStringMeta returns string representation of given LValue. // This method calls the `__tostring` meta method if defined. func (ls *LState) ToStringMeta(lv LValue) LValue { - if fn, ok := ls.metaOp1(lv, "__tostring").assertFunction(); ok { + if fn, ok := ls.metaOp1(lv, "__tostring").(*LFunction); ok { ls.Push(fn) ls.Push(lv) ls.Call(1, 1) diff --git a/vendor/github.com/yuin/gopher-lua/compile.go b/vendor/github.com/yuin/gopher-lua/compile.go index 75c75550e42..c3736777a4d 100644 --- a/vendor/github.com/yuin/gopher-lua/compile.go +++ b/vendor/github.com/yuin/gopher-lua/compile.go @@ -2,9 +2,10 @@ package lua import ( "fmt" - "github.com/yuin/gopher-lua/ast" "math" "reflect" + + "github.com/yuin/gopher-lua/ast" ) /* internal constants & structs {{{ */ @@ -94,7 +95,11 @@ func sline(pos ast.PositionHolder) int { } func eline(pos ast.PositionHolder) int { - return pos.LastLine() + line := pos.LastLine() + if line == 0 { + return pos.Line() + } + return line } func savereg(ec *expcontext, reg int) int { @@ -134,6 +139,28 @@ func lnumberValue(expr ast.Expr) (LNumber, bool) { /* utilities }}} */ +type gotoLabelDesc struct { // {{{ + Id int + Name string + Pc int + Line int + NumActiveLocalVars int +} + +func newLabelDesc(id int, name string, pc, line, n int) *gotoLabelDesc { + return &gotoLabelDesc{ + Id: id, + Name: name, + Pc: pc, + Line: line, + NumActiveLocalVars: n, + } +} + +func (l *gotoLabelDesc) SetNumActiveLocalVars(n int) { + l.NumActiveLocalVars = n +} // }}} + type CompileError struct { // {{{ context *funcContext Line int @@ -328,16 +355,18 @@ func (vp *varNamePool) Register(name string) int { /* FuncContext {{{ */ type codeBlock struct { - LocalVars *varNamePool - BreakLabel int - Parent *codeBlock - RefUpvalue bool - LineStart int - LastLine int + LocalVars *varNamePool + BreakLabel int + Parent *codeBlock + RefUpvalue bool + LineStart int + LastLine int + labels map[string]*gotoLabelDesc + firstGotoIndex int } -func newCodeBlock(localvars *varNamePool, blabel int, parent *codeBlock, pos ast.PositionHolder) *codeBlock { - bl := &codeBlock{localvars, blabel, parent, false, 0, 0} +func newCodeBlock(localvars *varNamePool, blabel int, parent *codeBlock, pos ast.PositionHolder, firstGotoIndex int) *codeBlock { + bl := &codeBlock{localvars, blabel, parent, false, 0, 0, map[string]*gotoLabelDesc{}, firstGotoIndex} if pos != nil { bl.LineStart = pos.Line() bl.LastLine = pos.LastLine() @@ -345,33 +374,136 @@ func newCodeBlock(localvars *varNamePool, blabel int, parent *codeBlock, pos ast return bl } +func (b *codeBlock) AddLabel(label *gotoLabelDesc) *gotoLabelDesc { + if old, ok := b.labels[label.Name]; ok { + return old + } + b.labels[label.Name] = label + return nil +} + +func (b *codeBlock) GetLabel(label string) *gotoLabelDesc { + if v, ok := b.labels[label]; ok { + return v + } + return nil +} + +func (b *codeBlock) LocalVarsCount() int { + count := 0 + for block := b; block != nil; block = block.Parent { + count += len(block.LocalVars.Names()) + } + return count +} + type funcContext struct { - Proto *FunctionProto - Code *codeStore - Parent *funcContext - Upvalues *varNamePool - Block *codeBlock - Blocks []*codeBlock - regTop int - labelId int - labelPc map[int]int + Proto *FunctionProto + Code *codeStore + Parent *funcContext + Upvalues *varNamePool + Block *codeBlock + Blocks []*codeBlock + regTop int + labelId int + labelPc map[int]int + gotosCount int + unresolvedGotos map[int]*gotoLabelDesc } func newFuncContext(sourcename string, parent *funcContext) *funcContext { fc := &funcContext{ - Proto: newFunctionProto(sourcename), - Code: &codeStore{make([]uint32, 0, 1024), make([]int, 0, 1024), 0}, - Parent: parent, - Upvalues: newVarNamePool(0), - Block: newCodeBlock(newVarNamePool(0), labelNoJump, nil, nil), - regTop: 0, - labelId: 1, - labelPc: map[int]int{}, + Proto: newFunctionProto(sourcename), + Code: &codeStore{make([]uint32, 0, 1024), make([]int, 0, 1024), 0}, + Parent: parent, + Upvalues: newVarNamePool(0), + Block: newCodeBlock(newVarNamePool(0), labelNoJump, nil, nil, 0), + regTop: 0, + labelId: 1, + labelPc: map[int]int{}, + gotosCount: 0, + unresolvedGotos: map[int]*gotoLabelDesc{}, } fc.Blocks = []*codeBlock{fc.Block} return fc } +func (fc *funcContext) CheckUnresolvedGoto() { + for i := fc.Block.firstGotoIndex; i < fc.gotosCount; i++ { + gotoLabel, ok := fc.unresolvedGotos[i] + if !ok { + continue + } + raiseCompileError(fc, fc.Proto.LastLineDefined, "no visible label '%s' for at line %d", gotoLabel.Name, gotoLabel.Line) + } +} + +func (fc *funcContext) AddUnresolvedGoto(label *gotoLabelDesc) { + fc.unresolvedGotos[fc.gotosCount] = label + fc.gotosCount++ +} + +func (fc *funcContext) AddNamedLabel(label *gotoLabelDesc) { + if old := fc.Block.AddLabel(label); old != nil { + raiseCompileError(fc, label.Line+1, "label '%s' already defined on line %d", label.Name, old.Line) + } + fc.SetLabelPc(label.Id, label.Pc) +} + +func (fc *funcContext) GetNamedLabel(name string) *gotoLabelDesc { + return fc.Block.GetLabel(name) +} + +func (fc *funcContext) ResolveGoto(from, to *gotoLabelDesc, index int) { + if from.NumActiveLocalVars < to.NumActiveLocalVars { + varName := fc.Block.LocalVars.Names()[len(fc.Block.LocalVars.Names())-1] + raiseCompileError(fc, to.Line+1, " at line %d jumps into the scope of local '%s'", to.Name, from.Line, varName) + } + fc.Code.SetSbx(from.Pc, to.Id) + delete(fc.unresolvedGotos, index) +} + +func (fc *funcContext) FindLabel(block *codeBlock, gotoLabel *gotoLabelDesc, i int) bool { + target := block.GetLabel(gotoLabel.Name) + if target != nil { + if gotoLabel.NumActiveLocalVars > target.NumActiveLocalVars && block.RefUpvalue { + fc.Code.SetA(gotoLabel.Pc-1, target.NumActiveLocalVars) + } + fc.ResolveGoto(gotoLabel, target, i) + return true + } + return false +} + +func (fc *funcContext) ResolveCurrentBlockGotosWithParentBlock() { + blockActiveLocalVars := fc.Block.Parent.LocalVarsCount() + for i := fc.Block.firstGotoIndex; i < fc.gotosCount; i++ { + gotoLabel, ok := fc.unresolvedGotos[i] + if !ok { + continue + } + if gotoLabel.NumActiveLocalVars > blockActiveLocalVars { + if fc.Block.RefUpvalue { + fc.Code.SetA(gotoLabel.Pc-1, blockActiveLocalVars) + } + gotoLabel.SetNumActiveLocalVars(blockActiveLocalVars) + } + fc.FindLabel(fc.Block.Parent, gotoLabel, i) + } +} + +func (fc *funcContext) ResolveForwardGoto(target *gotoLabelDesc) { + for i := fc.Block.firstGotoIndex; i <= fc.gotosCount; i++ { + gotoLabel, ok := fc.unresolvedGotos[i] + if !ok { + continue + } + if gotoLabel.Name == target.Name { + fc.ResolveGoto(gotoLabel, target, i) + } + } +} + func (fc *funcContext) NewLabel() int { ret := fc.labelId fc.labelId++ @@ -400,6 +532,13 @@ func (fc *funcContext) ConstIndex(value LValue) int { } return v } +func (fc *funcContext) BlockLocalVarsCount() int { + count := 0 + for block := fc.Block; block != nil; block = block.Parent { + count += len(block.LocalVars.Names()) + } + return count +} func (fc *funcContext) RegisterLocalVar(name string) int { ret := fc.Block.LocalVars.Register(name) @@ -431,7 +570,7 @@ func (fc *funcContext) LocalVars() []varNamePoolValue { } func (fc *funcContext) EnterBlock(blabel int, pos ast.PositionHolder) { - fc.Block = newCodeBlock(newVarNamePool(fc.RegTop()), blabel, fc.Block, pos) + fc.Block = newCodeBlock(newVarNamePool(fc.RegTop()), blabel, fc.Block, pos, fc.gotosCount) fc.Blocks = append(fc.Blocks, fc.Block) } @@ -447,6 +586,10 @@ func (fc *funcContext) CloseUpvalues() int { func (fc *funcContext) LeaveBlock() int { closed := fc.CloseUpvalues() fc.EndScope() + + if fc.Block.Parent != nil { + fc.ResolveCurrentBlockGotosWithParentBlock() + } fc.Block = fc.Block.Parent fc.SetRegTop(fc.Block.LocalVars.LastIndex()) return closed @@ -471,9 +614,17 @@ func (fc *funcContext) RegTop() int { /* FuncContext }}} */ -func compileChunk(context *funcContext, chunk []ast.Stmt) { // {{{ - for _, stmt := range chunk { - compileStmt(context, stmt) +func compileChunk(context *funcContext, chunk []ast.Stmt, untilFollows bool) { // {{{ + for i, stmt := range chunk { + lastStmt := true + for j := i + 1; j < len(chunk); j++ { + _, ok := chunk[j].(*ast.LabelStmt) + if !ok { + lastStmt = false + break + } + } + compileStmt(context, stmt, lastStmt && !untilFollows) } } // }}} @@ -485,13 +636,21 @@ func compileBlock(context *funcContext, chunk []ast.Stmt) { // {{{ ph.SetLine(sline(chunk[0])) ph.SetLastLine(eline(chunk[len(chunk)-1])) context.EnterBlock(labelNoJump, ph) - for _, stmt := range chunk { - compileStmt(context, stmt) + for i, stmt := range chunk { + lastStmt := true + for j := i + 1; j < len(chunk); j++ { + _, ok := chunk[j].(*ast.LabelStmt) + if !ok { + lastStmt = false + break + } + } + compileStmt(context, stmt, lastStmt) } context.LeaveBlock() } // }}} -func compileStmt(context *funcContext, stmt ast.Stmt) { // {{{ +func compileStmt(context *funcContext, stmt ast.Stmt, isLastStmt bool) { // {{{ switch st := stmt.(type) { case *ast.AssignStmt: compileAssignStmt(context, st) @@ -501,7 +660,7 @@ func compileStmt(context *funcContext, stmt ast.Stmt) { // {{{ compileFuncCallExpr(context, context.RegTop(), st.Expr.(*ast.FuncCallExpr), ecnone(-1)) case *ast.DoBlockStmt: context.EnterBlock(labelNoJump, st) - compileChunk(context, st.Stmts) + compileChunk(context, st.Stmts, false) context.LeaveBlock() case *ast.WhileStmt: compileWhileStmt(context, st) @@ -519,14 +678,17 @@ func compileStmt(context *funcContext, stmt ast.Stmt) { // {{{ compileNumberForStmt(context, st) case *ast.GenericForStmt: compileGenericForStmt(context, st) + case *ast.LabelStmt: + compileLabelStmt(context, st, isLastStmt) + case *ast.GotoStmt: + compileGotoStmt(context, st) } } // }}} func compileAssignStmtLeft(context *funcContext, stmt *ast.AssignStmt) (int, []*assigncontext) { // {{{ reg := context.RegTop() acs := make([]*assigncontext, 0, len(stmt.Lhs)) - for i, lhs := range stmt.Lhs { - islast := i == len(stmt.Lhs)-1 + for _, lhs := range stmt.Lhs { switch st := lhs.(type) { case *ast.IdentExpr: identtype := getIdentRefType(context, context, st) @@ -537,9 +699,7 @@ func compileAssignStmtLeft(context *funcContext, stmt *ast.AssignStmt) (int, []* case ecUpvalue: context.Upvalues.RegisterUnique(st.Value) case ecLocal: - if islast { - ec.reg = context.FindLocalVar(st.Value) - } + ec.reg = context.FindLocalVar(st.Value) } acs = append(acs, &assigncontext{ec, 0, 0, false, false}) case *ast.AttrGetExpr: @@ -825,7 +985,7 @@ func compileWhileStmt(context *funcContext, stmt *ast.WhileStmt) { // {{{ compileBranchCondition(context, context.RegTop(), stmt.Condition, thenlabel, elselabel, false) context.SetLabelPc(thenlabel, context.Code.LastPC()) context.EnterBlock(elselabel, stmt) - compileChunk(context, stmt.Stmts) + compileChunk(context, stmt.Stmts, false) context.CloseUpvalues() context.Code.AddASbx(OP_JMP, 0, condlabel, eline(stmt)) context.LeaveBlock() @@ -840,7 +1000,7 @@ func compileRepeatStmt(context *funcContext, stmt *ast.RepeatStmt) { // {{{ context.SetLabelPc(initlabel, context.Code.LastPC()) context.SetLabelPc(elselabel, context.Code.LastPC()) context.EnterBlock(thenlabel, stmt) - compileChunk(context, stmt.Stmts) + compileChunk(context, stmt.Stmts, true) compileBranchCondition(context, context.RegTop(), stmt.Condition, thenlabel, elselabel, false) context.SetLabelPc(thenlabel, context.Code.LastPC()) @@ -916,7 +1076,7 @@ func compileNumberForStmt(context *funcContext, stmt *ast.NumberForStmt) { // {{ context.RegisterLocalVar(stmt.Name) bodypc := code.LastPC() - compileChunk(context, stmt.Stmts) + compileChunk(context, stmt.Stmts, false) context.LeaveBlock() @@ -949,7 +1109,7 @@ func compileGenericForStmt(context *funcContext, stmt *ast.GenericForStmt) { // } context.SetLabelPc(bodylabel, code.LastPC()) - compileChunk(context, stmt.Stmts) + compileChunk(context, stmt.Stmts, false) context.LeaveBlock() @@ -960,6 +1120,24 @@ func compileGenericForStmt(context *funcContext, stmt *ast.GenericForStmt) { // context.SetLabelPc(endlabel, code.LastPC()) } // }}} +func compileLabelStmt(context *funcContext, stmt *ast.LabelStmt, isLastStmt bool) { // {{{ + labelId := context.NewLabel() + label := newLabelDesc(labelId, stmt.Name, context.Code.LastPC(), sline(stmt), context.BlockLocalVarsCount()) + context.AddNamedLabel(label) + if isLastStmt { + label.SetNumActiveLocalVars(context.Block.Parent.LocalVarsCount()) + } + context.ResolveForwardGoto(label) +} // }}} + +func compileGotoStmt(context *funcContext, stmt *ast.GotoStmt) { // {{{ + context.Code.AddABC(OP_CLOSE, 0, 0, 0, sline(stmt)) + context.Code.AddASbx(OP_JMP, 0, labelNoJump, sline(stmt)) + label := newLabelDesc(-1, stmt.Label, context.Code.LastPC(), sline(stmt), context.BlockLocalVarsCount()) + context.AddUnresolvedGoto(label) + context.FindLabel(context.Block, label, context.gotosCount-1) +} // }}} + func compileExpr(context *funcContext, reg int, expr ast.Expr, ec *expcontext) int { // {{{ code := context.Code sreg := savereg(ec, reg) @@ -1149,10 +1327,11 @@ func compileFunctionExpr(context *funcContext, funcexpr *ast.FunctionExpr, ec *e context.Proto.IsVarArg |= VarArgIsVarArg } - compileChunk(context, funcexpr.Stmts) + compileChunk(context, funcexpr.Stmts, false) context.Code.AddABC(OP_RETURN, 0, 1, 0, eline(funcexpr)) context.EndScope() + context.CheckUnresolvedGoto() context.Proto.Code = context.Code.List() context.Proto.DbgSourcePositions = context.Code.PosList() context.Proto.DbgUpvalues = context.Upvalues.Names() @@ -1468,7 +1647,17 @@ func compileLogicalOpExprAux(context *funcContext, reg int, expr ast.Expr, ec *e a := reg sreg := savereg(ec, a) - if !hasnextcond && thenlabel == elselabel { + isLastAnd := elselabel == lb.e && thenlabel != elselabel + isLastOr := thenlabel == lb.e && hasnextcond + + if ident, ok := expr.(*ast.IdentExpr); ok && (isLastAnd || isLastOr) && getIdentRefType(context, context, ident) == ecLocal { + b := context.FindLocalVar(ident.Value) + op := OP_TESTSET + if sreg == b { + op = OP_TEST + } + code.AddABC(op, sreg, b, 0^flip, sline(expr)) + } else if !hasnextcond && thenlabel == elselabel { reg += compileExpr(context, reg, expr, &expcontext{ec.ctype, intMax(a, sreg), ec.varargopt}) last := context.Code.Last() if opGetOpCode(last) == OP_MOVE && opGetArgA(last) == a { @@ -1478,7 +1667,7 @@ func compileLogicalOpExprAux(context *funcContext, reg int, expr ast.Expr, ec *e } } else { reg += compileExpr(context, reg, expr, ecnone(0)) - if sreg == a { + if !hasnextcond { code.AddABC(OP_TEST, a, 0, 0^flip, sline(expr)) } else { code.AddABC(OP_TESTSET, sreg, a, 0^flip, sline(expr)) @@ -1669,6 +1858,10 @@ func Compile(chunk []ast.Stmt, name string) (proto *FunctionProto, err error) { err = nil parlist := &ast.ParList{HasVargs: true, Names: []string{}} funcexpr := &ast.FunctionExpr{ParList: parlist, Stmts: chunk} + if len(chunk) > 0 { + funcexpr.SetLastLine(sline(chunk[0])) + funcexpr.SetLastLine(eline(chunk[len(chunk)-1]) + 1) + } context := newFuncContext(name, nil) compileFunctionExpr(context, funcexpr, ecnone(0)) proto = context.Proto diff --git a/vendor/github.com/yuin/gopher-lua/debuglib.go b/vendor/github.com/yuin/gopher-lua/debuglib.go index 41f883f1d06..da8f5254b06 100644 --- a/vendor/github.com/yuin/gopher-lua/debuglib.go +++ b/vendor/github.com/yuin/gopher-lua/debuglib.go @@ -155,8 +155,8 @@ func debugTraceback(L *LState) int { level := L.OptInt(2, 1) ls := L if L.GetTop() > 0 { - if s, ok := L.Get(1).assertString(); ok { - msg = s + if s, ok := L.Get(1).(LString); ok { + msg = string(s) } if l, ok := L.Get(1).(*LState); ok { ls = l diff --git a/vendor/github.com/yuin/gopher-lua/iolib.go b/vendor/github.com/yuin/gopher-lua/iolib.go index 4a86f89362d..3f5f295cedc 100644 --- a/vendor/github.com/yuin/gopher-lua/iolib.go +++ b/vendor/github.com/yuin/gopher-lua/iolib.go @@ -629,7 +629,7 @@ func ioOpenFile(L *LState) int { mode = os.O_RDONLY writable = false case "w", "wb": - mode = os.O_WRONLY | os.O_CREATE + mode = os.O_WRONLY | os.O_TRUNC | os.O_CREATE readable = false case "a", "ab": mode = os.O_WRONLY | os.O_APPEND | os.O_CREATE @@ -658,6 +658,9 @@ func ioPopen(L *LState) int { cmd := L.CheckString(1) if L.GetTop() == 1 { L.Push(LString("r")) + } else if L.GetTop() > 1 && (L.Get(2)).Type() == LTNil { + L.SetTop(1) + L.Push(LString("r")) } var file *LUserData var err error diff --git a/vendor/github.com/yuin/gopher-lua/oslib.go b/vendor/github.com/yuin/gopher-lua/oslib.go index 256c8811300..5448cc1f8b2 100644 --- a/vendor/github.com/yuin/gopher-lua/oslib.go +++ b/vendor/github.com/yuin/gopher-lua/oslib.go @@ -23,7 +23,7 @@ func getIntField(L *LState, tb *LTable, key string, v int) int { slv := string(lv) slv = strings.TrimLeft(slv, " ") if strings.HasPrefix(slv, "0") && !strings.HasPrefix(slv, "0x") && !strings.HasPrefix(slv, "0X") { - //Standard lua interpreter only support decimal and hexadecimal + // Standard lua interpreter only support decimal and hexadecimal slv = strings.TrimLeft(slv, "0") if slv == "" { return 0 @@ -106,16 +106,20 @@ func osExit(L *LState) int { func osDate(L *LState) int { t := time.Now() + isUTC := false cfmt := "%c" if L.GetTop() >= 1 { cfmt = L.CheckString(1) if strings.HasPrefix(cfmt, "!") { - t = time.Now().UTC() cfmt = strings.TrimLeft(cfmt, "!") + isUTC = true } if L.GetTop() >= 2 { t = time.Unix(L.CheckInt64(2), 0) } + if isUTC { + t = t.UTC() + } if strings.HasPrefix(cfmt, "*t") { ret := L.NewTable() ret.RawSetString("year", LNumber(t.Year())) diff --git a/vendor/github.com/yuin/gopher-lua/parse/lexer.go b/vendor/github.com/yuin/gopher-lua/parse/lexer.go index 6ad57ceed7d..c20a0bdd69e 100644 --- a/vendor/github.com/yuin/gopher-lua/parse/lexer.go +++ b/vendor/github.com/yuin/gopher-lua/parse/lexer.go @@ -4,11 +4,12 @@ import ( "bufio" "bytes" "fmt" - "github.com/yuin/gopher-lua/ast" "io" "reflect" "strconv" "strings" + + "github.com/yuin/gopher-lua/ast" ) const EOF = -1 @@ -286,7 +287,7 @@ var reservedWords = map[string]int{ "end": TEnd, "false": TFalse, "for": TFor, "function": TFunction, "if": TIf, "in": TIn, "local": TLocal, "nil": TNil, "not": TNot, "or": TOr, "return": TReturn, "repeat": TRepeat, "then": TThen, "true": TTrue, - "until": TUntil, "while": TWhile} + "until": TUntil, "while": TWhile, "goto": TGoto} func (sc *Scanner) Scan(lexer *Lexer) (ast.Token, error) { redo: @@ -408,7 +409,16 @@ redo: tok.Type = '.' } tok.Str = buf.String() - case '+', '*', '/', '%', '^', '#', '(', ')', '{', '}', ']', ';', ':', ',': + case ':': + if sc.Peek() == ':' { + tok.Type = T2Colon + tok.Str = "::" + sc.Next() + } else { + tok.Type = ch + tok.Str = string(rune(ch)) + } + case '+', '*', '/', '%', '^', '#', '(', ')', '{', '}', ']', ';', ',': tok.Type = ch tok.Str = string(rune(ch)) default: diff --git a/vendor/github.com/yuin/gopher-lua/parse/parser.go b/vendor/github.com/yuin/gopher-lua/parse/parser.go index c7565816490..aee06c8c812 100644 --- a/vendor/github.com/yuin/gopher-lua/parse/parser.go +++ b/vendor/github.com/yuin/gopher-lua/parse/parser.go @@ -54,16 +54,18 @@ const TThen = 57363 const TTrue = 57364 const TUntil = 57365 const TWhile = 57366 -const TEqeq = 57367 -const TNeq = 57368 -const TLte = 57369 -const TGte = 57370 -const T2Comma = 57371 -const T3Comma = 57372 -const TIdent = 57373 -const TNumber = 57374 -const TString = 57375 -const UNARY = 57376 +const TGoto = 57367 +const TEqeq = 57368 +const TNeq = 57369 +const TLte = 57370 +const TGte = 57371 +const T2Comma = 57372 +const T3Comma = 57373 +const T2Colon = 57374 +const TIdent = 57375 +const TNumber = 57376 +const TString = 57377 +const UNARY = 57378 var yyToknames = [...]string{ "$end", @@ -90,12 +92,14 @@ var yyToknames = [...]string{ "TTrue", "TUntil", "TWhile", + "TGoto", "TEqeq", "TNeq", "TLte", "TGte", "T2Comma", "T3Comma", + "T2Colon", "TIdent", "TNumber", "TString", @@ -128,7 +132,7 @@ const yyEofCode = 1 const yyErrCode = 2 const yyInitialStackSize = 16 -//line parser.go.y:517 +//line parser.go.y:525 func TokenName(c int) string { if c >= TAnd && c-TAnd < len(yyToknames) { @@ -144,200 +148,207 @@ var yyExca = [...]int8{ -1, 1, 1, -1, -2, 0, - -1, 17, - 46, 31, - 47, 31, - -2, 68, - -1, 93, - 46, 32, - 47, 32, - -2, 68, + -1, 19, + 48, 33, + 49, 33, + -2, 70, + -1, 97, + 48, 34, + 49, 34, + -2, 70, } const yyPrivate = 57344 -const yyLast = 579 +const yyLast = 616 var yyAct = [...]uint8{ - 24, 88, 50, 23, 45, 84, 56, 65, 137, 153, - 136, 113, 52, 142, 54, 53, 33, 134, 65, 132, - 62, 63, 32, 61, 108, 109, 48, 111, 106, 41, - 42, 105, 49, 155, 166, 81, 82, 83, 138, 104, - 22, 91, 131, 80, 95, 92, 162, 74, 48, 85, - 150, 99, 165, 148, 49, 149, 75, 76, 77, 78, - 79, 67, 80, 107, 106, 148, 114, 115, 116, 117, - 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, - 128, 129, 72, 73, 71, 70, 74, 65, 39, 40, - 47, 139, 133, 68, 69, 75, 76, 77, 78, 79, - 60, 80, 141, 144, 143, 146, 145, 31, 67, 147, - 9, 48, 110, 97, 48, 152, 151, 49, 38, 62, - 49, 17, 66, 77, 78, 79, 96, 80, 59, 72, - 73, 71, 70, 74, 154, 102, 91, 156, 55, 157, - 68, 69, 75, 76, 77, 78, 79, 21, 80, 187, - 94, 20, 26, 184, 37, 179, 163, 112, 25, 35, - 178, 93, 170, 172, 27, 171, 164, 173, 19, 159, - 175, 174, 29, 89, 28, 39, 40, 20, 182, 181, - 100, 34, 135, 183, 67, 39, 40, 47, 186, 64, - 51, 1, 90, 87, 36, 130, 86, 30, 66, 18, - 46, 44, 43, 8, 58, 72, 73, 71, 70, 74, - 57, 67, 168, 169, 167, 3, 68, 69, 75, 76, - 77, 78, 79, 160, 80, 66, 4, 2, 0, 0, - 0, 158, 72, 73, 71, 70, 74, 0, 0, 0, - 0, 0, 0, 68, 69, 75, 76, 77, 78, 79, - 26, 80, 37, 0, 0, 0, 25, 35, 140, 0, - 0, 0, 27, 0, 0, 0, 0, 0, 0, 0, - 29, 21, 28, 39, 40, 20, 26, 0, 37, 34, - 0, 0, 25, 35, 0, 0, 0, 0, 27, 0, - 0, 0, 36, 98, 0, 0, 29, 89, 28, 39, - 40, 20, 26, 0, 37, 34, 0, 0, 25, 35, - 0, 0, 0, 0, 27, 67, 90, 176, 36, 0, - 0, 0, 29, 21, 28, 39, 40, 20, 0, 66, - 0, 34, 0, 0, 0, 0, 72, 73, 71, 70, - 74, 0, 67, 0, 36, 0, 0, 68, 69, 75, - 76, 77, 78, 79, 0, 80, 66, 0, 177, 0, - 0, 0, 0, 72, 73, 71, 70, 74, 0, 67, - 0, 185, 0, 0, 68, 69, 75, 76, 77, 78, - 79, 0, 80, 66, 0, 161, 0, 0, 0, 0, - 72, 73, 71, 70, 74, 0, 67, 0, 0, 0, - 0, 68, 69, 75, 76, 77, 78, 79, 0, 80, - 66, 0, 0, 180, 0, 0, 0, 72, 73, 71, - 70, 74, 0, 67, 0, 0, 0, 0, 68, 69, - 75, 76, 77, 78, 79, 0, 80, 66, 0, 0, - 103, 0, 0, 0, 72, 73, 71, 70, 74, 0, - 67, 0, 101, 0, 0, 68, 69, 75, 76, 77, - 78, 79, 0, 80, 66, 0, 0, 0, 0, 0, - 0, 72, 73, 71, 70, 74, 0, 67, 0, 0, - 0, 0, 68, 69, 75, 76, 77, 78, 79, 0, - 80, 66, 0, 0, 0, 0, 0, 0, 72, 73, - 71, 70, 74, 0, 0, 0, 0, 0, 0, 68, - 69, 75, 76, 77, 78, 79, 0, 80, 72, 73, - 71, 70, 74, 0, 0, 0, 0, 0, 0, 68, - 69, 75, 76, 77, 78, 79, 0, 80, 7, 10, - 0, 0, 0, 0, 14, 15, 13, 0, 16, 0, - 0, 0, 6, 12, 0, 0, 0, 11, 0, 0, - 0, 0, 0, 0, 21, 0, 0, 0, 20, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 5, + 26, 92, 52, 25, 47, 88, 158, 58, 142, 118, + 141, 167, 54, 69, 56, 55, 35, 139, 41, 42, + 49, 160, 34, 67, 63, 137, 50, 64, 112, 113, + 69, 109, 51, 48, 46, 45, 147, 85, 86, 87, + 115, 110, 24, 95, 43, 44, 99, 96, 78, 136, + 50, 171, 143, 103, 69, 108, 51, 84, 79, 80, + 81, 82, 83, 89, 84, 111, 110, 41, 42, 49, + 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, + 129, 130, 131, 132, 133, 134, 81, 82, 83, 170, + 84, 153, 155, 33, 154, 144, 9, 138, 40, 23, + 153, 19, 62, 22, 114, 101, 146, 149, 148, 151, + 150, 71, 100, 152, 66, 50, 65, 61, 50, 157, + 156, 51, 57, 64, 51, 70, 116, 106, 192, 21, + 173, 174, 172, 76, 77, 75, 74, 78, 98, 159, + 189, 95, 161, 97, 162, 72, 73, 79, 80, 81, + 82, 83, 68, 84, 184, 183, 177, 169, 164, 104, + 140, 168, 117, 53, 1, 91, 135, 175, 32, 20, + 176, 8, 178, 60, 59, 180, 179, 3, 165, 28, + 4, 39, 2, 187, 186, 27, 37, 0, 188, 0, + 0, 29, 0, 191, 71, 0, 0, 0, 0, 0, + 31, 0, 93, 30, 41, 42, 22, 0, 70, 0, + 36, 0, 0, 0, 0, 0, 76, 77, 75, 74, + 78, 94, 0, 38, 71, 90, 0, 0, 72, 73, + 79, 80, 81, 82, 83, 0, 84, 0, 70, 0, + 0, 0, 0, 163, 0, 0, 76, 77, 75, 74, + 78, 0, 0, 0, 0, 0, 0, 0, 72, 73, + 79, 80, 81, 82, 83, 28, 84, 39, 0, 0, + 0, 27, 37, 145, 0, 0, 0, 29, 0, 0, + 0, 0, 0, 0, 0, 0, 31, 0, 23, 30, + 41, 42, 22, 28, 0, 39, 36, 0, 0, 27, + 37, 0, 0, 0, 0, 29, 0, 0, 0, 38, + 102, 0, 0, 0, 31, 0, 93, 30, 41, 42, + 22, 28, 0, 39, 36, 0, 0, 27, 37, 0, + 0, 0, 0, 29, 0, 94, 71, 38, 181, 0, + 0, 0, 31, 0, 23, 30, 41, 42, 22, 0, + 70, 0, 36, 0, 0, 0, 0, 0, 76, 77, + 75, 74, 78, 71, 0, 38, 0, 0, 0, 0, + 72, 73, 79, 80, 81, 82, 83, 70, 84, 0, + 0, 182, 0, 0, 0, 76, 77, 75, 74, 78, + 71, 0, 190, 0, 0, 0, 0, 72, 73, 79, + 80, 81, 82, 83, 70, 84, 0, 0, 166, 0, + 0, 0, 76, 77, 75, 74, 78, 71, 0, 0, + 0, 0, 0, 0, 72, 73, 79, 80, 81, 82, + 83, 70, 84, 0, 185, 0, 0, 0, 0, 76, + 77, 75, 74, 78, 71, 0, 0, 0, 0, 0, + 0, 72, 73, 79, 80, 81, 82, 83, 70, 84, + 0, 107, 0, 0, 0, 0, 76, 77, 75, 74, + 78, 71, 0, 105, 0, 0, 0, 0, 72, 73, + 79, 80, 81, 82, 83, 70, 84, 0, 0, 0, + 0, 0, 0, 76, 77, 75, 74, 78, 71, 0, + 0, 0, 0, 0, 0, 72, 73, 79, 80, 81, + 82, 83, 70, 84, 0, 0, 0, 0, 0, 0, + 76, 77, 75, 74, 78, 0, 0, 0, 0, 0, + 0, 0, 72, 73, 79, 80, 81, 82, 83, 0, + 84, 7, 10, 0, 0, 0, 0, 14, 15, 13, + 0, 16, 71, 0, 0, 6, 12, 0, 0, 0, + 11, 18, 0, 0, 0, 0, 0, 0, 17, 23, + 0, 0, 0, 22, 76, 77, 75, 74, 78, 0, + 0, 0, 0, 5, 0, 0, 72, 73, 79, 80, + 81, 82, 83, 0, 84, 76, 77, 75, 74, 78, + 0, 0, 0, 0, 0, 0, 0, 72, 73, 79, + 80, 81, 82, 83, 0, 84, } var yyPact = [...]int16{ - -1000, -1000, 533, -5, -1000, -1000, 292, -1000, -17, 152, - -1000, 292, -1000, 292, 107, 97, 88, -1000, -1000, -1000, - 292, -1000, -1000, -29, 473, -1000, -1000, -1000, -1000, -1000, - -1000, 152, -1000, -1000, 292, 292, 292, 14, -1000, -1000, - 142, 292, 116, 292, 95, -1000, 82, 240, -1000, -1000, - 171, -1000, 446, 112, 419, -7, 17, 14, -24, -1000, - 81, -19, -1000, 104, -42, 292, 292, 292, 292, 292, - 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, - 292, -1, -1, -1, -1000, -11, -1000, -37, -1000, -8, - 292, 473, -29, -1000, 152, 207, -1000, 55, -1000, -40, - -1000, -1000, 292, -1000, 292, 292, 34, -1000, 24, 19, - 14, 292, -1000, -1000, 473, 57, 493, 18, 18, 18, - 18, 18, 18, 18, 83, 83, -1, -1, -1, -1, - -44, -1000, -1000, -14, -1000, 266, -1000, -1000, 292, 180, - -1000, -1000, -1000, 160, 473, -1000, 338, 40, -1000, -1000, - -1000, -1000, -29, -1000, 157, 22, -1000, 473, -12, -1000, - 205, 292, -1000, 154, -1000, -1000, 292, -1000, -1000, 292, - 311, 151, -1000, 473, 146, 392, -1000, 292, -1000, -1000, - -1000, 144, 365, -1000, -1000, -1000, 140, -1000, + -1000, -1000, 536, -5, -1000, -1000, 311, -1000, -4, -17, + -1000, 311, -1000, 311, 89, 84, 90, 83, 81, -1000, + -1000, -1000, 311, -1000, -1000, -36, 494, -1000, -1000, -1000, + -1000, -1000, -1000, -17, -1000, -1000, 311, 311, 311, 26, + -1000, -1000, 169, 311, 66, 311, 79, -1000, 72, 255, + -1000, -1000, 150, -1000, 467, 104, 440, 7, 17, 26, + -22, -1000, 71, -8, -1000, 94, -1000, 107, -46, 311, + 311, 311, 311, 311, 311, 311, 311, 311, 311, 311, + 311, 311, 311, 311, 311, 11, 11, 11, -1000, -6, + -1000, -39, -1000, 4, 311, 494, -36, -1000, -17, 220, + -1000, 32, -1000, -19, -1000, -1000, 311, -1000, 311, 311, + 67, -1000, 61, 59, 26, 311, -1000, -1000, -1000, 494, + 548, 569, 18, 18, 18, 18, 18, 18, 18, 44, + 44, 11, 11, 11, 11, -49, -1000, -1000, -28, -1000, + 283, -1000, -1000, 311, 190, -1000, -1000, -1000, 149, 494, + -1000, 359, 5, -1000, -1000, -1000, -1000, -36, -1000, 148, + 58, -1000, 494, 3, -1000, 123, 311, -1000, 147, -1000, + -1000, 311, -1000, -1000, 311, 332, 146, -1000, 494, 145, + 413, -1000, 311, -1000, -1000, -1000, 131, 386, -1000, -1000, + -1000, 119, -1000, } var yyPgo = [...]uint8{ - 0, 190, 227, 2, 226, 223, 215, 210, 204, 203, - 118, 6, 3, 0, 22, 107, 168, 199, 4, 197, - 5, 195, 16, 193, 1, 182, + 0, 163, 182, 2, 180, 178, 177, 174, 173, 171, + 98, 7, 3, 0, 22, 93, 129, 169, 4, 168, + 5, 166, 16, 165, 1, 160, } var yyR1 = [...]int8{ 0, 1, 1, 1, 2, 2, 2, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 5, 5, 6, 6, 6, 7, 7, 8, - 8, 9, 9, 10, 10, 10, 11, 11, 12, 12, + 4, 4, 4, 4, 5, 5, 6, 6, 6, 7, + 7, 8, 8, 9, 9, 10, 10, 10, 11, 11, + 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, - 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, - 13, 13, 13, 13, 13, 13, 13, 14, 15, 15, - 15, 15, 17, 16, 16, 18, 18, 18, 18, 19, - 20, 20, 21, 21, 21, 22, 22, 23, 23, 23, - 24, 24, 24, 25, 25, + 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, + 15, 15, 15, 15, 17, 16, 16, 18, 18, 18, + 18, 19, 20, 20, 21, 21, 21, 22, 22, 23, + 23, 23, 24, 24, 24, 25, 25, } var yyR2 = [...]int8{ 0, 1, 2, 3, 0, 2, 2, 1, 3, 1, 3, 5, 4, 6, 8, 9, 11, 7, 3, 4, - 4, 2, 0, 5, 1, 2, 1, 1, 3, 1, - 3, 1, 3, 1, 4, 3, 1, 3, 1, 3, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, - 1, 3, 3, 2, 4, 2, 3, 1, 1, 2, - 5, 4, 1, 1, 3, 2, 3, 1, 3, 2, - 3, 5, 1, 1, 1, + 4, 2, 3, 2, 0, 5, 1, 2, 1, 1, + 3, 1, 3, 1, 3, 1, 4, 3, 1, 3, + 1, 3, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, + 1, 1, 1, 3, 3, 2, 4, 2, 3, 1, + 1, 2, 5, 4, 1, 1, 3, 2, 3, 1, + 3, 2, 3, 5, 1, 1, 1, } var yyChk = [...]int16{ - -1000, -1, -2, -6, -4, 45, 19, 5, -9, -15, - 6, 24, 20, 13, 11, 12, 15, -10, -17, -16, - 35, 31, 45, -12, -13, 16, 10, 22, 32, 30, - -19, -15, -14, -22, 39, 17, 52, 12, -10, 33, - 34, 46, 47, 50, 49, -18, 48, 35, -22, -14, - -3, -1, -13, -3, -13, 31, -11, -7, -8, 31, - 12, -11, 31, -13, -16, 47, 18, 4, 36, 37, - 28, 27, 25, 26, 29, 38, 39, 40, 41, 42, - 44, -13, -13, -13, -20, 35, 54, -23, -24, 31, - 50, -13, -12, -10, -15, -13, 31, 31, 53, -12, - 9, 6, 23, 21, 46, 14, 47, -20, 48, 49, - 31, 46, 53, 53, -13, -13, -13, -13, -13, -13, + -1000, -1, -2, -6, -4, 47, 19, 5, -9, -15, + 6, 24, 20, 13, 11, 12, 15, 32, 25, -10, + -17, -16, 37, 33, 47, -12, -13, 16, 10, 22, + 34, 31, -19, -15, -14, -22, 41, 17, 54, 12, + -10, 35, 36, 48, 49, 52, 51, -18, 50, 37, + -22, -14, -3, -1, -13, -3, -13, 33, -11, -7, + -8, 33, 12, -11, 33, 33, 33, -13, -16, 49, + 18, 4, 38, 39, 29, 28, 26, 27, 30, 40, + 41, 42, 43, 44, 46, -13, -13, -13, -20, 37, + 56, -23, -24, 33, 52, -13, -12, -10, -15, -13, + 33, 33, 55, -12, 9, 6, 23, 21, 48, 14, + 49, -20, 50, 51, 33, 48, 32, 55, 55, -13, -13, -13, -13, -13, -13, -13, -13, -13, -13, -13, - -21, 53, 30, -11, 54, -25, 47, 45, 46, -13, - 51, -18, 53, -3, -13, -3, -13, -12, 31, 31, - 31, -20, -12, 53, -3, 47, -24, -13, 51, 9, - -5, 47, 6, -3, 9, 30, 46, 9, 7, 8, - -13, -3, 9, -13, -3, -13, 6, 47, 9, 9, - 21, -3, -13, -3, 9, 6, -3, 9, + -13, -13, -13, -13, -13, -21, 55, 31, -11, 56, + -25, 49, 47, 48, -13, 53, -18, 55, -3, -13, + -3, -13, -12, 33, 33, 33, -20, -12, 55, -3, + 49, -24, -13, 53, 9, -5, 49, 6, -3, 9, + 31, 48, 9, 7, 8, -13, -3, 9, -13, -3, + -13, 6, 49, 9, 9, 21, -3, -13, -3, 9, + 6, -3, 9, } var yyDef = [...]int8{ - 4, -2, 1, 2, 5, 6, 24, 26, 0, 9, - 4, 0, 4, 0, 0, 0, 0, -2, 69, 70, - 0, 33, 3, 25, 38, 40, 41, 42, 43, 44, - 45, 46, 47, 48, 0, 0, 0, 0, 68, 67, - 0, 0, 0, 0, 0, 73, 0, 0, 77, 78, - 0, 7, 0, 0, 0, 36, 0, 0, 27, 29, - 0, 21, 36, 0, 70, 0, 0, 0, 0, 0, + 4, -2, 1, 2, 5, 6, 26, 28, 0, 9, + 4, 0, 4, 0, 0, 0, 0, 0, 0, -2, + 71, 72, 0, 35, 3, 27, 40, 42, 43, 44, + 45, 46, 47, 48, 49, 50, 0, 0, 0, 0, + 70, 69, 0, 0, 0, 0, 0, 75, 0, 0, + 79, 80, 0, 7, 0, 0, 0, 38, 0, 0, + 29, 31, 0, 21, 38, 0, 23, 0, 72, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 64, 65, 66, 79, 0, 85, 0, 87, 33, - 0, 92, 8, -2, 0, 0, 35, 0, 75, 0, - 10, 4, 0, 4, 0, 0, 0, 18, 0, 0, - 0, 0, 71, 72, 39, 49, 50, 51, 52, 53, - 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, - 0, 4, 82, 83, 86, 89, 93, 94, 0, 0, - 34, 74, 76, 0, 12, 22, 0, 0, 37, 28, - 30, 19, 20, 4, 0, 0, 88, 90, 0, 11, - 0, 0, 4, 0, 81, 84, 0, 13, 4, 0, - 0, 0, 80, 91, 0, 0, 4, 0, 17, 14, - 4, 0, 0, 23, 15, 4, 0, 16, + 0, 0, 0, 0, 0, 66, 67, 68, 81, 0, + 87, 0, 89, 35, 0, 94, 8, -2, 0, 0, + 37, 0, 77, 0, 10, 4, 0, 4, 0, 0, + 0, 18, 0, 0, 0, 0, 22, 73, 74, 41, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 63, 64, 65, 0, 4, 84, 85, 88, + 91, 95, 96, 0, 0, 36, 76, 78, 0, 12, + 24, 0, 0, 39, 30, 32, 19, 20, 4, 0, + 0, 90, 92, 0, 11, 0, 0, 4, 0, 83, + 86, 0, 13, 4, 0, 0, 0, 82, 93, 0, + 0, 4, 0, 17, 14, 4, 0, 0, 25, 15, + 4, 0, 16, } var yyTok1 = [...]int8{ 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 52, 3, 42, 3, 3, - 35, 53, 40, 38, 47, 39, 49, 41, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, 48, 45, - 37, 46, 36, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 54, 3, 44, 3, 3, + 37, 55, 42, 40, 49, 41, 51, 43, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 50, 47, + 39, 48, 38, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 50, 3, 51, 44, 3, 3, 3, 3, 3, + 3, 52, 3, 53, 46, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 34, 3, 54, + 3, 3, 3, 36, 3, 56, } var yyTok2 = [...]int8{ 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, - 32, 33, 43, + 32, 33, 34, 35, 45, } var yyTok3 = [...]int8{ @@ -856,61 +867,75 @@ yydefault: yyVAL.stmt.SetLine(yyDollar[1].token.Pos.Line) } case 22: + yyDollar = yyS[yypt-3 : yypt+1] +//line parser.go.y:191 + { + yyVAL.stmt = &ast.LabelStmt{Name: yyDollar[2].token.Str} + yyVAL.stmt.SetLine(yyDollar[1].token.Pos.Line) + } + case 23: + yyDollar = yyS[yypt-2 : yypt+1] +//line parser.go.y:195 + { + yyVAL.stmt = &ast.GotoStmt{Label: yyDollar[2].token.Str} + yyVAL.stmt.SetLine(yyDollar[1].token.Pos.Line) + } + case 24: yyDollar = yyS[yypt-0 : yypt+1] -//line parser.go.y:193 +//line parser.go.y:201 { yyVAL.stmts = []ast.Stmt{} } - case 23: + case 25: yyDollar = yyS[yypt-5 : yypt+1] -//line parser.go.y:196 +//line parser.go.y:204 { yyVAL.stmts = append(yyDollar[1].stmts, &ast.IfStmt{Condition: yyDollar[3].expr, Then: yyDollar[5].stmts}) yyVAL.stmts[len(yyVAL.stmts)-1].SetLine(yyDollar[2].token.Pos.Line) } - case 24: + case 26: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:202 +//line parser.go.y:210 { yyVAL.stmt = &ast.ReturnStmt{Exprs: nil} yyVAL.stmt.SetLine(yyDollar[1].token.Pos.Line) } - case 25: + case 27: yyDollar = yyS[yypt-2 : yypt+1] -//line parser.go.y:206 +//line parser.go.y:214 { yyVAL.stmt = &ast.ReturnStmt{Exprs: yyDollar[2].exprlist} yyVAL.stmt.SetLine(yyDollar[1].token.Pos.Line) } - case 26: + case 28: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:210 +//line parser.go.y:218 { yyVAL.stmt = &ast.BreakStmt{} yyVAL.stmt.SetLine(yyDollar[1].token.Pos.Line) } - case 27: + case 29: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:216 +//line parser.go.y:224 { yyVAL.funcname = yyDollar[1].funcname } - case 28: + case 30: yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:219 +//line parser.go.y:227 { yyVAL.funcname = &ast.FuncName{Func: nil, Receiver: yyDollar[1].funcname.Func, Method: yyDollar[3].token.Str} } - case 29: + case 31: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:224 +//line parser.go.y:232 { yyVAL.funcname = &ast.FuncName{Func: &ast.IdentExpr{Value: yyDollar[1].token.Str}} yyVAL.funcname.Func.SetLine(yyDollar[1].token.Pos.Line) } - case 30: + case 32: yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:228 +//line parser.go.y:236 { key := &ast.StringExpr{Value: yyDollar[3].token.Str} key.SetLine(yyDollar[3].token.Pos.Line) @@ -918,278 +943,278 @@ yydefault: fn.SetLine(yyDollar[3].token.Pos.Line) yyVAL.funcname = &ast.FuncName{Func: fn} } - case 31: + case 33: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:237 +//line parser.go.y:245 { yyVAL.exprlist = []ast.Expr{yyDollar[1].expr} } - case 32: + case 34: yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:240 +//line parser.go.y:248 { yyVAL.exprlist = append(yyDollar[1].exprlist, yyDollar[3].expr) } - case 33: + case 35: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:245 +//line parser.go.y:253 { yyVAL.expr = &ast.IdentExpr{Value: yyDollar[1].token.Str} yyVAL.expr.SetLine(yyDollar[1].token.Pos.Line) } - case 34: + case 36: yyDollar = yyS[yypt-4 : yypt+1] -//line parser.go.y:249 +//line parser.go.y:257 { yyVAL.expr = &ast.AttrGetExpr{Object: yyDollar[1].expr, Key: yyDollar[3].expr} yyVAL.expr.SetLine(yyDollar[1].expr.Line()) } - case 35: + case 37: yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:253 +//line parser.go.y:261 { key := &ast.StringExpr{Value: yyDollar[3].token.Str} key.SetLine(yyDollar[3].token.Pos.Line) yyVAL.expr = &ast.AttrGetExpr{Object: yyDollar[1].expr, Key: key} yyVAL.expr.SetLine(yyDollar[1].expr.Line()) } - case 36: - yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:261 - { - yyVAL.namelist = []string{yyDollar[1].token.Str} - } - case 37: - yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:264 - { - yyVAL.namelist = append(yyDollar[1].namelist, yyDollar[3].token.Str) - } case 38: yyDollar = yyS[yypt-1 : yypt+1] //line parser.go.y:269 { - yyVAL.exprlist = []ast.Expr{yyDollar[1].expr} + yyVAL.namelist = []string{yyDollar[1].token.Str} } case 39: yyDollar = yyS[yypt-3 : yypt+1] //line parser.go.y:272 { - yyVAL.exprlist = append(yyDollar[1].exprlist, yyDollar[3].expr) + yyVAL.namelist = append(yyDollar[1].namelist, yyDollar[3].token.Str) } case 40: yyDollar = yyS[yypt-1 : yypt+1] //line parser.go.y:277 { - yyVAL.expr = &ast.NilExpr{} - yyVAL.expr.SetLine(yyDollar[1].token.Pos.Line) + yyVAL.exprlist = []ast.Expr{yyDollar[1].expr} } case 41: - yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:281 + yyDollar = yyS[yypt-3 : yypt+1] +//line parser.go.y:280 { - yyVAL.expr = &ast.FalseExpr{} - yyVAL.expr.SetLine(yyDollar[1].token.Pos.Line) + yyVAL.exprlist = append(yyDollar[1].exprlist, yyDollar[3].expr) } case 42: yyDollar = yyS[yypt-1 : yypt+1] //line parser.go.y:285 { - yyVAL.expr = &ast.TrueExpr{} + yyVAL.expr = &ast.NilExpr{} yyVAL.expr.SetLine(yyDollar[1].token.Pos.Line) } case 43: yyDollar = yyS[yypt-1 : yypt+1] //line parser.go.y:289 { - yyVAL.expr = &ast.NumberExpr{Value: yyDollar[1].token.Str} + yyVAL.expr = &ast.FalseExpr{} yyVAL.expr.SetLine(yyDollar[1].token.Pos.Line) } case 44: yyDollar = yyS[yypt-1 : yypt+1] //line parser.go.y:293 { - yyVAL.expr = &ast.Comma3Expr{} + yyVAL.expr = &ast.TrueExpr{} yyVAL.expr.SetLine(yyDollar[1].token.Pos.Line) } case 45: yyDollar = yyS[yypt-1 : yypt+1] //line parser.go.y:297 { - yyVAL.expr = yyDollar[1].expr + yyVAL.expr = &ast.NumberExpr{Value: yyDollar[1].token.Str} + yyVAL.expr.SetLine(yyDollar[1].token.Pos.Line) } case 46: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:300 +//line parser.go.y:301 { - yyVAL.expr = yyDollar[1].expr + yyVAL.expr = &ast.Comma3Expr{} + yyVAL.expr.SetLine(yyDollar[1].token.Pos.Line) } case 47: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:303 +//line parser.go.y:305 { yyVAL.expr = yyDollar[1].expr } case 48: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:306 +//line parser.go.y:308 { yyVAL.expr = yyDollar[1].expr } case 49: - yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:309 + yyDollar = yyS[yypt-1 : yypt+1] +//line parser.go.y:311 { - yyVAL.expr = &ast.LogicalOpExpr{Lhs: yyDollar[1].expr, Operator: "or", Rhs: yyDollar[3].expr} - yyVAL.expr.SetLine(yyDollar[1].expr.Line()) + yyVAL.expr = yyDollar[1].expr } case 50: - yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:313 + yyDollar = yyS[yypt-1 : yypt+1] +//line parser.go.y:314 { - yyVAL.expr = &ast.LogicalOpExpr{Lhs: yyDollar[1].expr, Operator: "and", Rhs: yyDollar[3].expr} - yyVAL.expr.SetLine(yyDollar[1].expr.Line()) + yyVAL.expr = yyDollar[1].expr } case 51: yyDollar = yyS[yypt-3 : yypt+1] //line parser.go.y:317 { - yyVAL.expr = &ast.RelationalOpExpr{Lhs: yyDollar[1].expr, Operator: ">", Rhs: yyDollar[3].expr} + yyVAL.expr = &ast.LogicalOpExpr{Lhs: yyDollar[1].expr, Operator: "or", Rhs: yyDollar[3].expr} yyVAL.expr.SetLine(yyDollar[1].expr.Line()) } case 52: yyDollar = yyS[yypt-3 : yypt+1] //line parser.go.y:321 { - yyVAL.expr = &ast.RelationalOpExpr{Lhs: yyDollar[1].expr, Operator: "<", Rhs: yyDollar[3].expr} + yyVAL.expr = &ast.LogicalOpExpr{Lhs: yyDollar[1].expr, Operator: "and", Rhs: yyDollar[3].expr} yyVAL.expr.SetLine(yyDollar[1].expr.Line()) } case 53: yyDollar = yyS[yypt-3 : yypt+1] //line parser.go.y:325 { - yyVAL.expr = &ast.RelationalOpExpr{Lhs: yyDollar[1].expr, Operator: ">=", Rhs: yyDollar[3].expr} + yyVAL.expr = &ast.RelationalOpExpr{Lhs: yyDollar[1].expr, Operator: ">", Rhs: yyDollar[3].expr} yyVAL.expr.SetLine(yyDollar[1].expr.Line()) } case 54: yyDollar = yyS[yypt-3 : yypt+1] //line parser.go.y:329 { - yyVAL.expr = &ast.RelationalOpExpr{Lhs: yyDollar[1].expr, Operator: "<=", Rhs: yyDollar[3].expr} + yyVAL.expr = &ast.RelationalOpExpr{Lhs: yyDollar[1].expr, Operator: "<", Rhs: yyDollar[3].expr} yyVAL.expr.SetLine(yyDollar[1].expr.Line()) } case 55: yyDollar = yyS[yypt-3 : yypt+1] //line parser.go.y:333 { - yyVAL.expr = &ast.RelationalOpExpr{Lhs: yyDollar[1].expr, Operator: "==", Rhs: yyDollar[3].expr} + yyVAL.expr = &ast.RelationalOpExpr{Lhs: yyDollar[1].expr, Operator: ">=", Rhs: yyDollar[3].expr} yyVAL.expr.SetLine(yyDollar[1].expr.Line()) } case 56: yyDollar = yyS[yypt-3 : yypt+1] //line parser.go.y:337 { - yyVAL.expr = &ast.RelationalOpExpr{Lhs: yyDollar[1].expr, Operator: "~=", Rhs: yyDollar[3].expr} + yyVAL.expr = &ast.RelationalOpExpr{Lhs: yyDollar[1].expr, Operator: "<=", Rhs: yyDollar[3].expr} yyVAL.expr.SetLine(yyDollar[1].expr.Line()) } case 57: yyDollar = yyS[yypt-3 : yypt+1] //line parser.go.y:341 { - yyVAL.expr = &ast.StringConcatOpExpr{Lhs: yyDollar[1].expr, Rhs: yyDollar[3].expr} + yyVAL.expr = &ast.RelationalOpExpr{Lhs: yyDollar[1].expr, Operator: "==", Rhs: yyDollar[3].expr} yyVAL.expr.SetLine(yyDollar[1].expr.Line()) } case 58: yyDollar = yyS[yypt-3 : yypt+1] //line parser.go.y:345 { - yyVAL.expr = &ast.ArithmeticOpExpr{Lhs: yyDollar[1].expr, Operator: "+", Rhs: yyDollar[3].expr} + yyVAL.expr = &ast.RelationalOpExpr{Lhs: yyDollar[1].expr, Operator: "~=", Rhs: yyDollar[3].expr} yyVAL.expr.SetLine(yyDollar[1].expr.Line()) } case 59: yyDollar = yyS[yypt-3 : yypt+1] //line parser.go.y:349 { - yyVAL.expr = &ast.ArithmeticOpExpr{Lhs: yyDollar[1].expr, Operator: "-", Rhs: yyDollar[3].expr} + yyVAL.expr = &ast.StringConcatOpExpr{Lhs: yyDollar[1].expr, Rhs: yyDollar[3].expr} yyVAL.expr.SetLine(yyDollar[1].expr.Line()) } case 60: yyDollar = yyS[yypt-3 : yypt+1] //line parser.go.y:353 { - yyVAL.expr = &ast.ArithmeticOpExpr{Lhs: yyDollar[1].expr, Operator: "*", Rhs: yyDollar[3].expr} + yyVAL.expr = &ast.ArithmeticOpExpr{Lhs: yyDollar[1].expr, Operator: "+", Rhs: yyDollar[3].expr} yyVAL.expr.SetLine(yyDollar[1].expr.Line()) } case 61: yyDollar = yyS[yypt-3 : yypt+1] //line parser.go.y:357 { - yyVAL.expr = &ast.ArithmeticOpExpr{Lhs: yyDollar[1].expr, Operator: "/", Rhs: yyDollar[3].expr} + yyVAL.expr = &ast.ArithmeticOpExpr{Lhs: yyDollar[1].expr, Operator: "-", Rhs: yyDollar[3].expr} yyVAL.expr.SetLine(yyDollar[1].expr.Line()) } case 62: yyDollar = yyS[yypt-3 : yypt+1] //line parser.go.y:361 { - yyVAL.expr = &ast.ArithmeticOpExpr{Lhs: yyDollar[1].expr, Operator: "%", Rhs: yyDollar[3].expr} + yyVAL.expr = &ast.ArithmeticOpExpr{Lhs: yyDollar[1].expr, Operator: "*", Rhs: yyDollar[3].expr} yyVAL.expr.SetLine(yyDollar[1].expr.Line()) } case 63: yyDollar = yyS[yypt-3 : yypt+1] //line parser.go.y:365 { - yyVAL.expr = &ast.ArithmeticOpExpr{Lhs: yyDollar[1].expr, Operator: "^", Rhs: yyDollar[3].expr} + yyVAL.expr = &ast.ArithmeticOpExpr{Lhs: yyDollar[1].expr, Operator: "/", Rhs: yyDollar[3].expr} yyVAL.expr.SetLine(yyDollar[1].expr.Line()) } case 64: - yyDollar = yyS[yypt-2 : yypt+1] + yyDollar = yyS[yypt-3 : yypt+1] //line parser.go.y:369 + { + yyVAL.expr = &ast.ArithmeticOpExpr{Lhs: yyDollar[1].expr, Operator: "%", Rhs: yyDollar[3].expr} + yyVAL.expr.SetLine(yyDollar[1].expr.Line()) + } + case 65: + yyDollar = yyS[yypt-3 : yypt+1] +//line parser.go.y:373 + { + yyVAL.expr = &ast.ArithmeticOpExpr{Lhs: yyDollar[1].expr, Operator: "^", Rhs: yyDollar[3].expr} + yyVAL.expr.SetLine(yyDollar[1].expr.Line()) + } + case 66: + yyDollar = yyS[yypt-2 : yypt+1] +//line parser.go.y:377 { yyVAL.expr = &ast.UnaryMinusOpExpr{Expr: yyDollar[2].expr} yyVAL.expr.SetLine(yyDollar[2].expr.Line()) } - case 65: + case 67: yyDollar = yyS[yypt-2 : yypt+1] -//line parser.go.y:373 +//line parser.go.y:381 { yyVAL.expr = &ast.UnaryNotOpExpr{Expr: yyDollar[2].expr} yyVAL.expr.SetLine(yyDollar[2].expr.Line()) } - case 66: + case 68: yyDollar = yyS[yypt-2 : yypt+1] -//line parser.go.y:377 +//line parser.go.y:385 { yyVAL.expr = &ast.UnaryLenOpExpr{Expr: yyDollar[2].expr} yyVAL.expr.SetLine(yyDollar[2].expr.Line()) } - case 67: + case 69: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:383 +//line parser.go.y:391 { yyVAL.expr = &ast.StringExpr{Value: yyDollar[1].token.Str} yyVAL.expr.SetLine(yyDollar[1].token.Pos.Line) } - case 68: + case 70: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:389 +//line parser.go.y:397 { yyVAL.expr = yyDollar[1].expr } - case 69: + case 71: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:392 +//line parser.go.y:400 { yyVAL.expr = yyDollar[1].expr } - case 70: + case 72: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:395 +//line parser.go.y:403 { yyVAL.expr = yyDollar[1].expr } - case 71: + case 73: yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:398 +//line parser.go.y:406 { if ex, ok := yyDollar[2].expr.(*ast.Comma3Expr); ok { ex.AdjustRet = true @@ -1197,161 +1222,161 @@ yydefault: yyVAL.expr = yyDollar[2].expr yyVAL.expr.SetLine(yyDollar[1].token.Pos.Line) } - case 72: + case 74: yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:407 +//line parser.go.y:415 { yyDollar[2].expr.(*ast.FuncCallExpr).AdjustRet = true yyVAL.expr = yyDollar[2].expr } - case 73: + case 75: yyDollar = yyS[yypt-2 : yypt+1] -//line parser.go.y:413 +//line parser.go.y:421 { yyVAL.expr = &ast.FuncCallExpr{Func: yyDollar[1].expr, Args: yyDollar[2].exprlist} yyVAL.expr.SetLine(yyDollar[1].expr.Line()) } - case 74: + case 76: yyDollar = yyS[yypt-4 : yypt+1] -//line parser.go.y:417 +//line parser.go.y:425 { yyVAL.expr = &ast.FuncCallExpr{Method: yyDollar[3].token.Str, Receiver: yyDollar[1].expr, Args: yyDollar[4].exprlist} yyVAL.expr.SetLine(yyDollar[1].expr.Line()) } - case 75: + case 77: yyDollar = yyS[yypt-2 : yypt+1] -//line parser.go.y:423 +//line parser.go.y:431 { if yylex.(*Lexer).PNewLine { yylex.(*Lexer).TokenError(yyDollar[1].token, "ambiguous syntax (function call x new statement)") } yyVAL.exprlist = []ast.Expr{} } - case 76: + case 78: yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:429 +//line parser.go.y:437 { if yylex.(*Lexer).PNewLine { yylex.(*Lexer).TokenError(yyDollar[1].token, "ambiguous syntax (function call x new statement)") } yyVAL.exprlist = yyDollar[2].exprlist } - case 77: + case 79: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:435 +//line parser.go.y:443 { yyVAL.exprlist = []ast.Expr{yyDollar[1].expr} } - case 78: + case 80: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:438 +//line parser.go.y:446 { yyVAL.exprlist = []ast.Expr{yyDollar[1].expr} } - case 79: + case 81: yyDollar = yyS[yypt-2 : yypt+1] -//line parser.go.y:443 +//line parser.go.y:451 { yyVAL.expr = &ast.FunctionExpr{ParList: yyDollar[2].funcexpr.ParList, Stmts: yyDollar[2].funcexpr.Stmts} yyVAL.expr.SetLine(yyDollar[1].token.Pos.Line) yyVAL.expr.SetLastLine(yyDollar[2].funcexpr.LastLine()) } - case 80: + case 82: yyDollar = yyS[yypt-5 : yypt+1] -//line parser.go.y:450 +//line parser.go.y:458 { yyVAL.funcexpr = &ast.FunctionExpr{ParList: yyDollar[2].parlist, Stmts: yyDollar[4].stmts} yyVAL.funcexpr.SetLine(yyDollar[1].token.Pos.Line) yyVAL.funcexpr.SetLastLine(yyDollar[5].token.Pos.Line) } - case 81: + case 83: yyDollar = yyS[yypt-4 : yypt+1] -//line parser.go.y:455 +//line parser.go.y:463 { yyVAL.funcexpr = &ast.FunctionExpr{ParList: &ast.ParList{HasVargs: false, Names: []string{}}, Stmts: yyDollar[3].stmts} yyVAL.funcexpr.SetLine(yyDollar[1].token.Pos.Line) yyVAL.funcexpr.SetLastLine(yyDollar[4].token.Pos.Line) } - case 82: + case 84: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:462 +//line parser.go.y:470 { yyVAL.parlist = &ast.ParList{HasVargs: true, Names: []string{}} } - case 83: + case 85: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:465 +//line parser.go.y:473 { yyVAL.parlist = &ast.ParList{HasVargs: false, Names: []string{}} yyVAL.parlist.Names = append(yyVAL.parlist.Names, yyDollar[1].namelist...) } - case 84: + case 86: yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:469 +//line parser.go.y:477 { yyVAL.parlist = &ast.ParList{HasVargs: true, Names: []string{}} yyVAL.parlist.Names = append(yyVAL.parlist.Names, yyDollar[1].namelist...) } - case 85: + case 87: yyDollar = yyS[yypt-2 : yypt+1] -//line parser.go.y:476 +//line parser.go.y:484 { yyVAL.expr = &ast.TableExpr{Fields: []*ast.Field{}} yyVAL.expr.SetLine(yyDollar[1].token.Pos.Line) } - case 86: + case 88: yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:480 +//line parser.go.y:488 { yyVAL.expr = &ast.TableExpr{Fields: yyDollar[2].fieldlist} yyVAL.expr.SetLine(yyDollar[1].token.Pos.Line) } - case 87: + case 89: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:487 +//line parser.go.y:495 { yyVAL.fieldlist = []*ast.Field{yyDollar[1].field} } - case 88: + case 90: yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:490 +//line parser.go.y:498 { yyVAL.fieldlist = append(yyDollar[1].fieldlist, yyDollar[3].field) } - case 89: + case 91: yyDollar = yyS[yypt-2 : yypt+1] -//line parser.go.y:493 +//line parser.go.y:501 { yyVAL.fieldlist = yyDollar[1].fieldlist } - case 90: + case 92: yyDollar = yyS[yypt-3 : yypt+1] -//line parser.go.y:498 +//line parser.go.y:506 { yyVAL.field = &ast.Field{Key: &ast.StringExpr{Value: yyDollar[1].token.Str}, Value: yyDollar[3].expr} yyVAL.field.Key.SetLine(yyDollar[1].token.Pos.Line) } - case 91: + case 93: yyDollar = yyS[yypt-5 : yypt+1] -//line parser.go.y:502 +//line parser.go.y:510 { yyVAL.field = &ast.Field{Key: yyDollar[2].expr, Value: yyDollar[5].expr} } - case 92: + case 94: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:505 +//line parser.go.y:513 { yyVAL.field = &ast.Field{Value: yyDollar[1].expr} } - case 93: + case 95: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:510 +//line parser.go.y:518 { yyVAL.fieldsep = "," } - case 94: + case 96: yyDollar = yyS[yypt-1 : yypt+1] -//line parser.go.y:513 +//line parser.go.y:521 { yyVAL.fieldsep = ";" } diff --git a/vendor/github.com/yuin/gopher-lua/parse/parser.go.y b/vendor/github.com/yuin/gopher-lua/parse/parser.go.y index 9a9f831e611..52bcd55fab4 100644 --- a/vendor/github.com/yuin/gopher-lua/parse/parser.go.y +++ b/vendor/github.com/yuin/gopher-lua/parse/parser.go.y @@ -52,10 +52,10 @@ import ( } /* Reserved words */ -%token TAnd TBreak TDo TElse TElseIf TEnd TFalse TFor TFunction TIf TIn TLocal TNil TNot TOr TReturn TRepeat TThen TTrue TUntil TWhile +%token TAnd TBreak TDo TElse TElseIf TEnd TFalse TFor TFunction TIf TIn TLocal TNil TNot TOr TReturn TRepeat TThen TTrue TUntil TWhile TGoto /* Literals */ -%token TEqeq TNeq TLte TGte T2Comma T3Comma TIdent TNumber TString '{' '(' +%token TEqeq TNeq TLte TGte T2Comma T3Comma T2Colon TIdent TNumber TString '{' '(' /* Operators */ %left TOr @@ -187,6 +187,14 @@ stat: TLocal namelist { $$ = &ast.LocalAssignStmt{Names: $2, Exprs:[]ast.Expr{}} $$.SetLine($1.Pos.Line) + } | + T2Colon TIdent T2Colon { + $$ = &ast.LabelStmt{Name: $2.Str} + $$.SetLine($1.Pos.Line) + } | + TGoto TIdent { + $$ = &ast.GotoStmt{Label: $2.Str} + $$.SetLine($1.Pos.Line) } elseifs: diff --git a/vendor/github.com/yuin/gopher-lua/state.go b/vendor/github.com/yuin/gopher-lua/state.go index a1ee672e9cb..292f93b48ad 100644 --- a/vendor/github.com/yuin/gopher-lua/state.go +++ b/vendor/github.com/yuin/gopher-lua/state.go @@ -402,24 +402,25 @@ func (rg *registry) forceResize(newSize int) { copy(newSlice, rg.array[:rg.top]) // should we copy the area beyond top? there shouldn't be any valid values there so it shouldn't be necessary. rg.array = newSlice } -func (rg *registry) SetTop(top int) { + +func (rg *registry) SetTop(topi int) { // +inline-start // this section is inlined by go-inline // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' { - requiredSize := top + requiredSize := topi if requiredSize > cap(rg.array) { rg.resize(requiredSize) } } - oldtop := rg.top - rg.top = top - for i := oldtop; i < rg.top; i++ { + oldtopi := rg.top + rg.top = topi + for i := oldtopi; i < rg.top; i++ { rg.array[i] = LNil } // values beyond top don't need to be valid LValues, so setting them to nil is fine // setting them to nil rather than LNil lets us invoke the golang memclr opto - if rg.top < oldtop { - nilRange := rg.array[rg.top:oldtop] + if rg.top < oldtopi { + nilRange := rg.array[rg.top:oldtopi] for i := range nilRange { nilRange[i] = nil } @@ -427,7 +428,7 @@ func (rg *registry) SetTop(top int) { //for i := rg.top; i < oldtop; i++ { // rg.array[i] = LNil //} -} +} // +inline-end func (rg *registry) Top() int { return rg.top @@ -530,19 +531,73 @@ func (rg *registry) FillNil(regm, n int) { // +inline-start func (rg *registry) Insert(value LValue, reg int) { top := rg.Top() if reg >= top { - rg.Set(reg, value) + // this section is inlined by go-inline + // source function is 'func (rg *registry) Set(regi int, vali LValue) ' in '_state.go' + { + regi := reg + vali := value + newSize := regi + 1 + // this section is inlined by go-inline + // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' + { + requiredSize := newSize + if requiredSize > cap(rg.array) { + rg.resize(requiredSize) + } + } + rg.array[regi] = vali + if regi >= rg.top { + rg.top = regi + 1 + } + } return } top-- for ; top >= reg; top-- { // FIXME consider using copy() here if Insert() is called enough - rg.Set(top+1, rg.Get(top)) + // this section is inlined by go-inline + // source function is 'func (rg *registry) Set(regi int, vali LValue) ' in '_state.go' + { + regi := top + 1 + vali := rg.Get(top) + newSize := regi + 1 + // this section is inlined by go-inline + // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' + { + requiredSize := newSize + if requiredSize > cap(rg.array) { + rg.resize(requiredSize) + } + } + rg.array[regi] = vali + if regi >= rg.top { + rg.top = regi + 1 + } + } + } + // this section is inlined by go-inline + // source function is 'func (rg *registry) Set(regi int, vali LValue) ' in '_state.go' + { + regi := reg + vali := value + newSize := regi + 1 + // this section is inlined by go-inline + // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' + { + requiredSize := newSize + if requiredSize > cap(rg.array) { + rg.resize(requiredSize) + } + } + rg.array[regi] = vali + if regi >= rg.top { + rg.top = regi + 1 + } } - rg.Set(reg, value) } -func (rg *registry) Set(reg int, val LValue) { - newSize := reg + 1 +func (rg *registry) Set(regi int, vali LValue) { // +inline-start + newSize := regi + 1 // this section is inlined by go-inline // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' { @@ -551,14 +606,14 @@ func (rg *registry) Set(reg int, val LValue) { rg.resize(requiredSize) } } - rg.array[reg] = val - if reg >= rg.top { - rg.top = reg + 1 + rg.array[regi] = vali + if regi >= rg.top { + rg.top = regi + 1 } -} +} // +inline-end -func (rg *registry) SetNumber(reg int, val LNumber) { - newSize := reg + 1 +func (rg *registry) SetNumber(regi int, vali LNumber) { // +inline-start + newSize := regi + 1 // this section is inlined by go-inline // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' { @@ -567,11 +622,11 @@ func (rg *registry) SetNumber(reg int, val LNumber) { rg.resize(requiredSize) } } - rg.array[reg] = rg.alloc.LNumber2I(val) - if reg >= rg.top { - rg.top = reg + 1 + rg.array[regi] = rg.alloc.LNumber2I(vali) + if regi >= rg.top { + rg.top = regi + 1 } -} +} // +inline-end func (rg *registry) IsFull() bool { return rg.top >= cap(rg.array) @@ -815,6 +870,9 @@ func (ls *LState) isStarted() bool { func (ls *LState) kill() { ls.Dead = true + if ls.ctxCancelFn != nil { + ls.ctxCancelFn() + } } func (ls *LState) indexToReg(idx int) int { @@ -1561,6 +1619,7 @@ func (ls *LState) NewThread() (*LState, context.CancelFunc) { if ls.ctx != nil { thread.mainLoop = mainLoopWithContext thread.ctx, f = context.WithCancel(ls.ctx) + thread.ctxCancelFn = f } return thread, f } @@ -2010,6 +2069,9 @@ func (ls *LState) PCall(nargs, nret int, errfunc *LFunction) (err error) { err = rcv.(*ApiError) err.(*ApiError).StackTrace = ls.stackTrace(0) } + ls.stack.SetSp(sp) + ls.currentFrame = ls.stack.Last() + ls.reg.SetTop(base) } }() ls.Call(1, 1) diff --git a/vendor/github.com/yuin/gopher-lua/value.go b/vendor/github.com/yuin/gopher-lua/value.go index 0d4af80816e..4156e9d5b26 100644 --- a/vendor/github.com/yuin/gopher-lua/value.go +++ b/vendor/github.com/yuin/gopher-lua/value.go @@ -29,12 +29,6 @@ func (vt LValueType) String() string { type LValue interface { String() string Type() LValueType - // to reduce `runtime.assertI2T2` costs, this method should be used instead of the type assertion in heavy paths(typically inside the VM). - assertFloat64() (float64, bool) - // to reduce `runtime.assertI2T2` costs, this method should be used instead of the type assertion in heavy paths(typically inside the VM). - assertString() (string, bool) - // to reduce `runtime.assertI2T2` costs, this method should be used instead of the type assertion in heavy paths(typically inside the VM). - assertFunction() (*LFunction, bool) } // LVIsFalse returns true if a given LValue is a nil or false otherwise false. @@ -80,11 +74,8 @@ func LVAsNumber(v LValue) LNumber { type LNilType struct{} -func (nl *LNilType) String() string { return "nil" } -func (nl *LNilType) Type() LValueType { return LTNil } -func (nl *LNilType) assertFloat64() (float64, bool) { return 0, false } -func (nl *LNilType) assertString() (string, bool) { return "", false } -func (nl *LNilType) assertFunction() (*LFunction, bool) { return nil, false } +func (nl *LNilType) String() string { return "nil" } +func (nl *LNilType) Type() LValueType { return LTNil } var LNil = LValue(&LNilType{}) @@ -96,21 +87,15 @@ func (bl LBool) String() string { } return "false" } -func (bl LBool) Type() LValueType { return LTBool } -func (bl LBool) assertFloat64() (float64, bool) { return 0, false } -func (bl LBool) assertString() (string, bool) { return "", false } -func (bl LBool) assertFunction() (*LFunction, bool) { return nil, false } +func (bl LBool) Type() LValueType { return LTBool } var LTrue = LBool(true) var LFalse = LBool(false) type LString string -func (st LString) String() string { return string(st) } -func (st LString) Type() LValueType { return LTString } -func (st LString) assertFloat64() (float64, bool) { return 0, false } -func (st LString) assertString() (string, bool) { return string(st), true } -func (st LString) assertFunction() (*LFunction, bool) { return nil, false } +func (st LString) String() string { return string(st) } +func (st LString) Type() LValueType { return LTString } // fmt.Formatter interface func (st LString) Format(f fmt.State, c rune) { @@ -133,10 +118,7 @@ func (nm LNumber) String() string { return fmt.Sprint(float64(nm)) } -func (nm LNumber) Type() LValueType { return LTNumber } -func (nm LNumber) assertFloat64() (float64, bool) { return float64(nm), true } -func (nm LNumber) assertString() (string, bool) { return "", false } -func (nm LNumber) assertFunction() (*LFunction, bool) { return nil, false } +func (nm LNumber) Type() LValueType { return LTNumber } // fmt.Formatter interface func (nm LNumber) Format(f fmt.State, c rune) { @@ -168,11 +150,8 @@ type LTable struct { k2i map[LValue]int } -func (tb *LTable) String() string { return fmt.Sprintf("table: %p", tb) } -func (tb *LTable) Type() LValueType { return LTTable } -func (tb *LTable) assertFloat64() (float64, bool) { return 0, false } -func (tb *LTable) assertString() (string, bool) { return "", false } -func (tb *LTable) assertFunction() (*LFunction, bool) { return nil, false } +func (tb *LTable) String() string { return fmt.Sprintf("table: %p", tb) } +func (tb *LTable) Type() LValueType { return LTTable } type LFunction struct { IsG bool @@ -183,11 +162,8 @@ type LFunction struct { } type LGFunction func(*LState) int -func (fn *LFunction) String() string { return fmt.Sprintf("function: %p", fn) } -func (fn *LFunction) Type() LValueType { return LTFunction } -func (fn *LFunction) assertFloat64() (float64, bool) { return 0, false } -func (fn *LFunction) assertString() (string, bool) { return "", false } -func (fn *LFunction) assertFunction() (*LFunction, bool) { return fn, true } +func (fn *LFunction) String() string { return fmt.Sprintf("function: %p", fn) } +func (fn *LFunction) Type() LValueType { return LTFunction } type Global struct { MainThread *LState @@ -218,13 +194,11 @@ type LState struct { hasErrorFunc bool mainLoop func(*LState, *callFrame) ctx context.Context + ctxCancelFn context.CancelFunc } -func (ls *LState) String() string { return fmt.Sprintf("thread: %p", ls) } -func (ls *LState) Type() LValueType { return LTThread } -func (ls *LState) assertFloat64() (float64, bool) { return 0, false } -func (ls *LState) assertString() (string, bool) { return "", false } -func (ls *LState) assertFunction() (*LFunction, bool) { return nil, false } +func (ls *LState) String() string { return fmt.Sprintf("thread: %p", ls) } +func (ls *LState) Type() LValueType { return LTThread } type LUserData struct { Value interface{} @@ -232,16 +206,10 @@ type LUserData struct { Metatable LValue } -func (ud *LUserData) String() string { return fmt.Sprintf("userdata: %p", ud) } -func (ud *LUserData) Type() LValueType { return LTUserData } -func (ud *LUserData) assertFloat64() (float64, bool) { return 0, false } -func (ud *LUserData) assertString() (string, bool) { return "", false } -func (ud *LUserData) assertFunction() (*LFunction, bool) { return nil, false } +func (ud *LUserData) String() string { return fmt.Sprintf("userdata: %p", ud) } +func (ud *LUserData) Type() LValueType { return LTUserData } type LChannel chan LValue -func (ch LChannel) String() string { return fmt.Sprintf("channel: %p", ch) } -func (ch LChannel) Type() LValueType { return LTChannel } -func (ch LChannel) assertFloat64() (float64, bool) { return 0, false } -func (ch LChannel) assertString() (string, bool) { return "", false } -func (ch LChannel) assertFunction() (*LFunction, bool) { return nil, false } +func (ch LChannel) String() string { return fmt.Sprintf("channel: %p", ch) } +func (ch LChannel) Type() LValueType { return LTChannel } diff --git a/vendor/github.com/yuin/gopher-lua/vm.go b/vendor/github.com/yuin/gopher-lua/vm.go index aaa04dc9aad..97335a75098 100644 --- a/vendor/github.com/yuin/gopher-lua/vm.go +++ b/vendor/github.com/yuin/gopher-lua/vm.go @@ -307,7 +307,27 @@ func init() { A := int(inst>>18) & 0xff //GETA RA := lbase + A B := int(inst & 0x1ff) //GETB - reg.Set(RA, reg.Get(lbase+B)) + v := reg.Get(lbase + B) + // this section is inlined by go-inline + // source function is 'func (rg *registry) Set(regi int, vali LValue) ' in '_state.go' + { + rg := reg + regi := RA + vali := v + newSize := regi + 1 + // this section is inlined by go-inline + // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' + { + requiredSize := newSize + if requiredSize > cap(rg.array) { + rg.resize(requiredSize) + } + } + rg.array[regi] = vali + if regi >= rg.top { + rg.top = regi + 1 + } + } return 0 }, func(L *LState, inst uint32, baseframe *callFrame) int { //OP_MOVEN @@ -317,7 +337,27 @@ func init() { A := int(inst>>18) & 0xff //GETA B := int(inst & 0x1ff) //GETB C := int(inst>>9) & 0x1ff //GETC - reg.Set(lbase+A, reg.Get(lbase+B)) + v := reg.Get(lbase + B) + // this section is inlined by go-inline + // source function is 'func (rg *registry) Set(regi int, vali LValue) ' in '_state.go' + { + rg := reg + regi := lbase + A + vali := v + newSize := regi + 1 + // this section is inlined by go-inline + // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' + { + requiredSize := newSize + if requiredSize > cap(rg.array) { + rg.resize(requiredSize) + } + } + rg.array[regi] = vali + if regi >= rg.top { + rg.top = regi + 1 + } + } code := cf.Fn.Proto.Code pc := cf.Pc for i := 0; i < C; i++ { @@ -325,7 +365,27 @@ func init() { pc++ A = int(inst>>18) & 0xff //GETA B = int(inst & 0x1ff) //GETB - reg.Set(lbase+A, reg.Get(lbase+B)) + v := reg.Get(lbase + B) + // this section is inlined by go-inline + // source function is 'func (rg *registry) Set(regi int, vali LValue) ' in '_state.go' + { + rg := reg + regi := lbase + A + vali := v + newSize := regi + 1 + // this section is inlined by go-inline + // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' + { + requiredSize := newSize + if requiredSize > cap(rg.array) { + rg.resize(requiredSize) + } + } + rg.array[regi] = vali + if regi >= rg.top { + rg.top = regi + 1 + } + } } cf.Pc = pc return 0 @@ -337,7 +397,27 @@ func init() { A := int(inst>>18) & 0xff //GETA RA := lbase + A Bx := int(inst & 0x3ffff) //GETBX - reg.Set(RA, cf.Fn.Proto.Constants[Bx]) + v := cf.Fn.Proto.Constants[Bx] + // this section is inlined by go-inline + // source function is 'func (rg *registry) Set(regi int, vali LValue) ' in '_state.go' + { + rg := reg + regi := RA + vali := v + newSize := regi + 1 + // this section is inlined by go-inline + // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' + { + requiredSize := newSize + if requiredSize > cap(rg.array) { + rg.resize(requiredSize) + } + } + rg.array[regi] = vali + if regi >= rg.top { + rg.top = regi + 1 + } + } return 0 }, func(L *LState, inst uint32, baseframe *callFrame) int { //OP_LOADBOOL @@ -349,9 +429,47 @@ func init() { B := int(inst & 0x1ff) //GETB C := int(inst>>9) & 0x1ff //GETC if B != 0 { - reg.Set(RA, LTrue) + // this section is inlined by go-inline + // source function is 'func (rg *registry) Set(regi int, vali LValue) ' in '_state.go' + { + rg := reg + regi := RA + vali := LTrue + newSize := regi + 1 + // this section is inlined by go-inline + // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' + { + requiredSize := newSize + if requiredSize > cap(rg.array) { + rg.resize(requiredSize) + } + } + rg.array[regi] = vali + if regi >= rg.top { + rg.top = regi + 1 + } + } } else { - reg.Set(RA, LFalse) + // this section is inlined by go-inline + // source function is 'func (rg *registry) Set(regi int, vali LValue) ' in '_state.go' + { + rg := reg + regi := RA + vali := LFalse + newSize := regi + 1 + // this section is inlined by go-inline + // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' + { + requiredSize := newSize + if requiredSize > cap(rg.array) { + rg.resize(requiredSize) + } + } + rg.array[regi] = vali + if regi >= rg.top { + rg.top = regi + 1 + } + } } if C != 0 { cf.Pc++ @@ -366,7 +484,26 @@ func init() { RA := lbase + A B := int(inst & 0x1ff) //GETB for i := RA; i <= lbase+B; i++ { - reg.Set(i, LNil) + // this section is inlined by go-inline + // source function is 'func (rg *registry) Set(regi int, vali LValue) ' in '_state.go' + { + rg := reg + regi := i + vali := LNil + newSize := regi + 1 + // this section is inlined by go-inline + // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' + { + requiredSize := newSize + if requiredSize > cap(rg.array) { + rg.resize(requiredSize) + } + } + rg.array[regi] = vali + if regi >= rg.top { + rg.top = regi + 1 + } + } } return 0 }, @@ -377,7 +514,27 @@ func init() { A := int(inst>>18) & 0xff //GETA RA := lbase + A B := int(inst & 0x1ff) //GETB - reg.Set(RA, cf.Fn.Upvalues[B].Value()) + v := cf.Fn.Upvalues[B].Value() + // this section is inlined by go-inline + // source function is 'func (rg *registry) Set(regi int, vali LValue) ' in '_state.go' + { + rg := reg + regi := RA + vali := v + newSize := regi + 1 + // this section is inlined by go-inline + // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' + { + requiredSize := newSize + if requiredSize > cap(rg.array) { + rg.resize(requiredSize) + } + } + rg.array[regi] = vali + if regi >= rg.top { + rg.top = regi + 1 + } + } return 0 }, func(L *LState, inst uint32, baseframe *callFrame) int { //OP_GETGLOBAL @@ -388,7 +545,27 @@ func init() { RA := lbase + A Bx := int(inst & 0x3ffff) //GETBX //reg.Set(RA, L.getField(cf.Fn.Env, cf.Fn.Proto.Constants[Bx])) - reg.Set(RA, L.getFieldString(cf.Fn.Env, cf.Fn.Proto.stringConstants[Bx])) + v := L.getFieldString(cf.Fn.Env, cf.Fn.Proto.stringConstants[Bx]) + // this section is inlined by go-inline + // source function is 'func (rg *registry) Set(regi int, vali LValue) ' in '_state.go' + { + rg := reg + regi := RA + vali := v + newSize := regi + 1 + // this section is inlined by go-inline + // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' + { + requiredSize := newSize + if requiredSize > cap(rg.array) { + rg.resize(requiredSize) + } + } + rg.array[regi] = vali + if regi >= rg.top { + rg.top = regi + 1 + } + } return 0 }, func(L *LState, inst uint32, baseframe *callFrame) int { //OP_GETTABLE @@ -399,7 +576,27 @@ func init() { RA := lbase + A B := int(inst & 0x1ff) //GETB C := int(inst>>9) & 0x1ff //GETC - reg.Set(RA, L.getField(reg.Get(lbase+B), L.rkValue(C))) + v := L.getField(reg.Get(lbase+B), L.rkValue(C)) + // this section is inlined by go-inline + // source function is 'func (rg *registry) Set(regi int, vali LValue) ' in '_state.go' + { + rg := reg + regi := RA + vali := v + newSize := regi + 1 + // this section is inlined by go-inline + // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' + { + requiredSize := newSize + if requiredSize > cap(rg.array) { + rg.resize(requiredSize) + } + } + rg.array[regi] = vali + if regi >= rg.top { + rg.top = regi + 1 + } + } return 0 }, func(L *LState, inst uint32, baseframe *callFrame) int { //OP_GETTABLEKS @@ -410,7 +607,27 @@ func init() { RA := lbase + A B := int(inst & 0x1ff) //GETB C := int(inst>>9) & 0x1ff //GETC - reg.Set(RA, L.getFieldString(reg.Get(lbase+B), L.rkString(C))) + v := L.getFieldString(reg.Get(lbase+B), L.rkString(C)) + // this section is inlined by go-inline + // source function is 'func (rg *registry) Set(regi int, vali LValue) ' in '_state.go' + { + rg := reg + regi := RA + vali := v + newSize := regi + 1 + // this section is inlined by go-inline + // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' + { + requiredSize := newSize + if requiredSize > cap(rg.array) { + rg.resize(requiredSize) + } + } + rg.array[regi] = vali + if regi >= rg.top { + rg.top = regi + 1 + } + } return 0 }, func(L *LState, inst uint32, baseframe *callFrame) int { //OP_SETGLOBAL @@ -464,7 +681,27 @@ func init() { RA := lbase + A B := int(inst & 0x1ff) //GETB C := int(inst>>9) & 0x1ff //GETC - reg.Set(RA, newLTable(B, C)) + v := newLTable(B, C) + // this section is inlined by go-inline + // source function is 'func (rg *registry) Set(regi int, vali LValue) ' in '_state.go' + { + rg := reg + regi := RA + vali := v + newSize := regi + 1 + // this section is inlined by go-inline + // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' + { + requiredSize := newSize + if requiredSize > cap(rg.array) { + rg.resize(requiredSize) + } + } + rg.array[regi] = vali + if regi >= rg.top { + rg.top = regi + 1 + } + } return 0 }, func(L *LState, inst uint32, baseframe *callFrame) int { //OP_SELF @@ -476,8 +713,47 @@ func init() { B := int(inst & 0x1ff) //GETB C := int(inst>>9) & 0x1ff //GETC selfobj := reg.Get(lbase + B) - reg.Set(RA, L.getFieldString(selfobj, L.rkString(C))) - reg.Set(RA+1, selfobj) + v := L.getFieldString(selfobj, L.rkString(C)) + // this section is inlined by go-inline + // source function is 'func (rg *registry) Set(regi int, vali LValue) ' in '_state.go' + { + rg := reg + regi := RA + vali := v + newSize := regi + 1 + // this section is inlined by go-inline + // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' + { + requiredSize := newSize + if requiredSize > cap(rg.array) { + rg.resize(requiredSize) + } + } + rg.array[regi] = vali + if regi >= rg.top { + rg.top = regi + 1 + } + } + // this section is inlined by go-inline + // source function is 'func (rg *registry) Set(regi int, vali LValue) ' in '_state.go' + { + rg := reg + regi := RA + 1 + vali := selfobj + newSize := regi + 1 + // this section is inlined by go-inline + // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' + { + requiredSize := newSize + if requiredSize > cap(rg.array) { + rg.resize(requiredSize) + } + } + rg.array[regi] = vali + if regi >= rg.top { + rg.top = regi + 1 + } + } return 0 }, opArith, // OP_ADD @@ -495,17 +771,74 @@ func init() { B := int(inst & 0x1ff) //GETB unaryv := L.rkValue(B) if nm, ok := unaryv.(LNumber); ok { - reg.SetNumber(RA, -nm) + // this section is inlined by go-inline + // source function is 'func (rg *registry) Set(regi int, vali LValue) ' in '_state.go' + { + rg := reg + regi := RA + vali := -nm + newSize := regi + 1 + // this section is inlined by go-inline + // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' + { + requiredSize := newSize + if requiredSize > cap(rg.array) { + rg.resize(requiredSize) + } + } + rg.array[regi] = vali + if regi >= rg.top { + rg.top = regi + 1 + } + } } else { op := L.metaOp1(unaryv, "__unm") if op.Type() == LTFunction { reg.Push(op) reg.Push(unaryv) L.Call(1, 1) - reg.Set(RA, reg.Pop()) + // this section is inlined by go-inline + // source function is 'func (rg *registry) Set(regi int, vali LValue) ' in '_state.go' + { + rg := reg + regi := RA + vali := reg.Pop() + newSize := regi + 1 + // this section is inlined by go-inline + // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' + { + requiredSize := newSize + if requiredSize > cap(rg.array) { + rg.resize(requiredSize) + } + } + rg.array[regi] = vali + if regi >= rg.top { + rg.top = regi + 1 + } + } } else if str, ok1 := unaryv.(LString); ok1 { if num, err := parseNumber(string(str)); err == nil { - reg.Set(RA, -num) + // this section is inlined by go-inline + // source function is 'func (rg *registry) Set(regi int, vali LValue) ' in '_state.go' + { + rg := reg + regi := RA + vali := -num + newSize := regi + 1 + // this section is inlined by go-inline + // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' + { + requiredSize := newSize + if requiredSize > cap(rg.array) { + rg.resize(requiredSize) + } + } + rg.array[regi] = vali + if regi >= rg.top { + rg.top = regi + 1 + } + } } else { L.RaiseError("__unm undefined") } @@ -523,9 +856,47 @@ func init() { RA := lbase + A B := int(inst & 0x1ff) //GETB if LVIsFalse(reg.Get(lbase + B)) { - reg.Set(RA, LTrue) + // this section is inlined by go-inline + // source function is 'func (rg *registry) Set(regi int, vali LValue) ' in '_state.go' + { + rg := reg + regi := RA + vali := LTrue + newSize := regi + 1 + // this section is inlined by go-inline + // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' + { + requiredSize := newSize + if requiredSize > cap(rg.array) { + rg.resize(requiredSize) + } + } + rg.array[regi] = vali + if regi >= rg.top { + rg.top = regi + 1 + } + } } else { - reg.Set(RA, LFalse) + // this section is inlined by go-inline + // source function is 'func (rg *registry) Set(regi int, vali LValue) ' in '_state.go' + { + rg := reg + regi := RA + vali := LFalse + newSize := regi + 1 + // this section is inlined by go-inline + // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' + { + requiredSize := newSize + if requiredSize > cap(rg.array) { + rg.resize(requiredSize) + } + } + rg.array[regi] = vali + if regi >= rg.top { + rg.top = regi + 1 + } + } } return 0 }, @@ -538,7 +909,26 @@ func init() { B := int(inst & 0x1ff) //GETB switch lv := L.rkValue(B).(type) { case LString: - reg.SetNumber(RA, LNumber(len(lv))) + // this section is inlined by go-inline + // source function is 'func (rg *registry) SetNumber(regi int, vali LNumber) ' in '_state.go' + { + rg := reg + regi := RA + vali := LNumber(len(lv)) + newSize := regi + 1 + // this section is inlined by go-inline + // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' + { + requiredSize := newSize + if requiredSize > cap(rg.array) { + rg.resize(requiredSize) + } + } + rg.array[regi] = rg.alloc.LNumber2I(vali) + if regi >= rg.top { + rg.top = regi + 1 + } + } default: op := L.metaOp1(lv, "__len") if op.Type() == LTFunction { @@ -547,12 +937,70 @@ func init() { L.Call(1, 1) ret := reg.Pop() if ret.Type() == LTNumber { - reg.SetNumber(RA, ret.(LNumber)) + v, _ := ret.(LNumber) + // this section is inlined by go-inline + // source function is 'func (rg *registry) SetNumber(regi int, vali LNumber) ' in '_state.go' + { + rg := reg + regi := RA + vali := v + newSize := regi + 1 + // this section is inlined by go-inline + // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' + { + requiredSize := newSize + if requiredSize > cap(rg.array) { + rg.resize(requiredSize) + } + } + rg.array[regi] = rg.alloc.LNumber2I(vali) + if regi >= rg.top { + rg.top = regi + 1 + } + } } else { - reg.Set(RA, ret) + // this section is inlined by go-inline + // source function is 'func (rg *registry) Set(regi int, vali LValue) ' in '_state.go' + { + rg := reg + regi := RA + vali := ret + newSize := regi + 1 + // this section is inlined by go-inline + // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' + { + requiredSize := newSize + if requiredSize > cap(rg.array) { + rg.resize(requiredSize) + } + } + rg.array[regi] = vali + if regi >= rg.top { + rg.top = regi + 1 + } + } } } else if lv.Type() == LTTable { - reg.SetNumber(RA, LNumber(lv.(*LTable).Len())) + // this section is inlined by go-inline + // source function is 'func (rg *registry) SetNumber(regi int, vali LNumber) ' in '_state.go' + { + rg := reg + regi := RA + vali := LNumber(lv.(*LTable).Len()) + newSize := regi + 1 + // this section is inlined by go-inline + // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' + { + requiredSize := newSize + if requiredSize > cap(rg.array) { + rg.resize(requiredSize) + } + } + rg.array[regi] = rg.alloc.LNumber2I(vali) + if regi >= rg.top { + rg.top = regi + 1 + } + } } else { L.RaiseError("__len undefined") } @@ -569,7 +1017,27 @@ func init() { C := int(inst>>9) & 0x1ff //GETC RC := lbase + C RB := lbase + B - reg.Set(RA, stringConcat(L, RC-RB+1, RC)) + v := stringConcat(L, RC-RB+1, RC) + // this section is inlined by go-inline + // source function is 'func (rg *registry) Set(regi int, vali LValue) ' in '_state.go' + { + rg := reg + regi := RA + vali := v + newSize := regi + 1 + // this section is inlined by go-inline + // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' + { + requiredSize := newSize + if requiredSize > cap(rg.array) { + rg.resize(requiredSize) + } + } + rg.array[regi] = vali + if regi >= rg.top { + rg.top = regi + 1 + } + } return 0 }, func(L *LState, inst uint32, baseframe *callFrame) int { //OP_JMP @@ -617,8 +1085,8 @@ func init() { rhs := L.rkValue(C) ret := false - if v1, ok1 := lhs.assertFloat64(); ok1 { - if v2, ok2 := rhs.assertFloat64(); ok2 { + if v1, ok1 := lhs.(LNumber); ok1 { + if v2, ok2 := rhs.(LNumber); ok2 { ret = v1 <= v2 } else { L.RaiseError("attempt to compare %v with %v", lhs.Type().String(), rhs.Type().String()) @@ -672,7 +1140,26 @@ func init() { B := int(inst & 0x1ff) //GETB C := int(inst>>9) & 0x1ff //GETC if value := reg.Get(lbase + B); LVAsBool(value) != (C == 0) { - reg.Set(RA, value) + // this section is inlined by go-inline + // source function is 'func (rg *registry) Set(regi int, vali LValue) ' in '_state.go' + { + rg := reg + regi := RA + vali := value + newSize := regi + 1 + // this section is inlined by go-inline + // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' + { + requiredSize := newSize + if requiredSize > cap(rg.array) { + rg.resize(requiredSize) + } + } + rg.array[regi] = vali + if regi >= rg.top { + rg.top = regi + 1 + } + } } else { cf.Pc++ } @@ -694,7 +1181,7 @@ func init() { nret := C - 1 var callable *LFunction var meta bool - if fn, ok := lv.assertFunction(); ok { + if fn, ok := lv.(*LFunction); ok { callable = fn meta = false } else { @@ -837,7 +1324,7 @@ func init() { lv := reg.Get(RA) var callable *LFunction var meta bool - if fn, ok := lv.assertFunction(); ok { + if fn, ok := lv.(*LFunction); ok { callable = fn meta = false } else { @@ -1308,17 +1795,85 @@ func init() { lbase := cf.LocalBase A := int(inst>>18) & 0xff //GETA RA := lbase + A - if init, ok1 := reg.Get(RA).assertFloat64(); ok1 { - if limit, ok2 := reg.Get(RA + 1).assertFloat64(); ok2 { - if step, ok3 := reg.Get(RA + 2).assertFloat64(); ok3 { + if init, ok1 := reg.Get(RA).(LNumber); ok1 { + if limit, ok2 := reg.Get(RA + 1).(LNumber); ok2 { + if step, ok3 := reg.Get(RA + 2).(LNumber); ok3 { init += step - reg.SetNumber(RA, LNumber(init)) + v := LNumber(init) + // this section is inlined by go-inline + // source function is 'func (rg *registry) SetNumber(regi int, vali LNumber) ' in '_state.go' + { + rg := reg + regi := RA + vali := v + newSize := regi + 1 + // this section is inlined by go-inline + // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' + { + requiredSize := newSize + if requiredSize > cap(rg.array) { + rg.resize(requiredSize) + } + } + rg.array[regi] = rg.alloc.LNumber2I(vali) + if regi >= rg.top { + rg.top = regi + 1 + } + } if (step > 0 && init <= limit) || (step <= 0 && init >= limit) { Sbx := int(inst&0x3ffff) - opMaxArgSbx //GETSBX cf.Pc += Sbx - reg.SetNumber(RA+3, LNumber(init)) + // this section is inlined by go-inline + // source function is 'func (rg *registry) SetNumber(regi int, vali LNumber) ' in '_state.go' + { + rg := reg + regi := RA + 3 + vali := v + newSize := regi + 1 + // this section is inlined by go-inline + // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' + { + requiredSize := newSize + if requiredSize > cap(rg.array) { + rg.resize(requiredSize) + } + } + rg.array[regi] = rg.alloc.LNumber2I(vali) + if regi >= rg.top { + rg.top = regi + 1 + } + } } else { - reg.SetTop(RA + 1) + // this section is inlined by go-inline + // source function is 'func (rg *registry) SetTop(topi int) ' in '_state.go' + { + rg := reg + topi := RA + 1 + // this section is inlined by go-inline + // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' + { + requiredSize := topi + if requiredSize > cap(rg.array) { + rg.resize(requiredSize) + } + } + oldtopi := rg.top + rg.top = topi + for i := oldtopi; i < rg.top; i++ { + rg.array[i] = LNil + } + // values beyond top don't need to be valid LValues, so setting them to nil is fine + // setting them to nil rather than LNil lets us invoke the golang memclr opto + if rg.top < oldtopi { + nilRange := rg.array[rg.top:oldtopi] + for i := range nilRange { + nilRange[i] = nil + } + } + //for i := rg.top; i < oldtop; i++ { + // rg.array[i] = LNil + //} + } } } else { L.RaiseError("for statement step must be a number") @@ -1338,9 +1893,28 @@ func init() { A := int(inst>>18) & 0xff //GETA RA := lbase + A Sbx := int(inst&0x3ffff) - opMaxArgSbx //GETSBX - if init, ok1 := reg.Get(RA).assertFloat64(); ok1 { - if step, ok2 := reg.Get(RA + 2).assertFloat64(); ok2 { - reg.SetNumber(RA, LNumber(init-step)) + if init, ok1 := reg.Get(RA).(LNumber); ok1 { + if step, ok2 := reg.Get(RA + 2).(LNumber); ok2 { + // this section is inlined by go-inline + // source function is 'func (rg *registry) SetNumber(regi int, vali LNumber) ' in '_state.go' + { + rg := reg + regi := RA + vali := LNumber(init - step) + newSize := regi + 1 + // this section is inlined by go-inline + // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' + { + requiredSize := newSize + if requiredSize > cap(rg.array) { + rg.resize(requiredSize) + } + } + rg.array[regi] = rg.alloc.LNumber2I(vali) + if regi >= rg.top { + rg.top = regi + 1 + } + } } else { L.RaiseError("for statement step must be a number") } @@ -1358,13 +1932,118 @@ func init() { RA := lbase + A C := int(inst>>9) & 0x1ff //GETC nret := C - reg.SetTop(RA + 3 + 2) - reg.Set(RA+3+2, reg.Get(RA+2)) - reg.Set(RA+3+1, reg.Get(RA+1)) - reg.Set(RA+3, reg.Get(RA)) + // this section is inlined by go-inline + // source function is 'func (rg *registry) SetTop(topi int) ' in '_state.go' + { + rg := reg + topi := RA + 3 + 2 + // this section is inlined by go-inline + // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' + { + requiredSize := topi + if requiredSize > cap(rg.array) { + rg.resize(requiredSize) + } + } + oldtopi := rg.top + rg.top = topi + for i := oldtopi; i < rg.top; i++ { + rg.array[i] = LNil + } + // values beyond top don't need to be valid LValues, so setting them to nil is fine + // setting them to nil rather than LNil lets us invoke the golang memclr opto + if rg.top < oldtopi { + nilRange := rg.array[rg.top:oldtopi] + for i := range nilRange { + nilRange[i] = nil + } + } + //for i := rg.top; i < oldtop; i++ { + // rg.array[i] = LNil + //} + } + // this section is inlined by go-inline + // source function is 'func (rg *registry) Set(regi int, vali LValue) ' in '_state.go' + { + rg := reg + regi := RA + 3 + 2 + vali := reg.Get(RA + 2) + newSize := regi + 1 + // this section is inlined by go-inline + // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' + { + requiredSize := newSize + if requiredSize > cap(rg.array) { + rg.resize(requiredSize) + } + } + rg.array[regi] = vali + if regi >= rg.top { + rg.top = regi + 1 + } + } + // this section is inlined by go-inline + // source function is 'func (rg *registry) Set(regi int, vali LValue) ' in '_state.go' + { + rg := reg + regi := RA + 3 + 1 + vali := reg.Get(RA + 1) + newSize := regi + 1 + // this section is inlined by go-inline + // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' + { + requiredSize := newSize + if requiredSize > cap(rg.array) { + rg.resize(requiredSize) + } + } + rg.array[regi] = vali + if regi >= rg.top { + rg.top = regi + 1 + } + } + // this section is inlined by go-inline + // source function is 'func (rg *registry) Set(regi int, vali LValue) ' in '_state.go' + { + rg := reg + regi := RA + 3 + vali := reg.Get(RA) + newSize := regi + 1 + // this section is inlined by go-inline + // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' + { + requiredSize := newSize + if requiredSize > cap(rg.array) { + rg.resize(requiredSize) + } + } + rg.array[regi] = vali + if regi >= rg.top { + rg.top = regi + 1 + } + } L.callR(2, nret, RA+3) if value := reg.Get(RA + 3); value != LNil { - reg.Set(RA+2, value) + // this section is inlined by go-inline + // source function is 'func (rg *registry) Set(regi int, vali LValue) ' in '_state.go' + { + rg := reg + regi := RA + 2 + vali := value + newSize := regi + 1 + // this section is inlined by go-inline + // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' + { + requiredSize := newSize + if requiredSize > cap(rg.array) { + rg.resize(requiredSize) + } + } + rg.array[regi] = vali + if regi >= rg.top { + rg.top = regi + 1 + } + } pc := cf.Fn.Proto.Code[cf.Pc] cf.Pc += int(pc&0x3ffff) - opMaxArgSbx } @@ -1430,7 +2109,26 @@ func init() { Bx := int(inst & 0x3ffff) //GETBX proto := cf.Fn.Proto.FunctionPrototypes[Bx] closure := newLFunctionL(proto, cf.Fn.Env, int(proto.NumUpvalues)) - reg.Set(RA, closure) + // this section is inlined by go-inline + // source function is 'func (rg *registry) Set(regi int, vali LValue) ' in '_state.go' + { + rg := reg + regi := RA + vali := closure + newSize := regi + 1 + // this section is inlined by go-inline + // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' + { + requiredSize := newSize + if requiredSize > cap(rg.array) { + rg.resize(requiredSize) + } + } + rg.array[regi] = vali + if regi >= rg.top { + rg.top = regi + 1 + } + } for i := 0; i < int(proto.NumUpvalues); i++ { inst = cf.Fn.Proto.Code[cf.Pc] cf.Pc++ @@ -1519,12 +2217,52 @@ func opArith(L *LState, inst uint32, baseframe *callFrame) int { //OP_ADD, OP_SU C := int(inst>>9) & 0x1ff //GETC lhs := L.rkValue(B) rhs := L.rkValue(C) - v1, ok1 := lhs.assertFloat64() - v2, ok2 := rhs.assertFloat64() + v1, ok1 := lhs.(LNumber) + v2, ok2 := rhs.(LNumber) if ok1 && ok2 { - reg.SetNumber(RA, numberArith(L, opcode, LNumber(v1), LNumber(v2))) + v := numberArith(L, opcode, LNumber(v1), LNumber(v2)) + // this section is inlined by go-inline + // source function is 'func (rg *registry) SetNumber(regi int, vali LNumber) ' in '_state.go' + { + rg := reg + regi := RA + vali := v + newSize := regi + 1 + // this section is inlined by go-inline + // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' + { + requiredSize := newSize + if requiredSize > cap(rg.array) { + rg.resize(requiredSize) + } + } + rg.array[regi] = rg.alloc.LNumber2I(vali) + if regi >= rg.top { + rg.top = regi + 1 + } + } } else { - reg.Set(RA, objectArith(L, opcode, lhs, rhs)) + v := objectArith(L, opcode, lhs, rhs) + // this section is inlined by go-inline + // source function is 'func (rg *registry) Set(regi int, vali LValue) ' in '_state.go' + { + rg := reg + regi := RA + vali := v + newSize := regi + 1 + // this section is inlined by go-inline + // source function is 'func (rg *registry) checkSize(requiredSize int) ' in '_state.go' + { + requiredSize := newSize + if requiredSize > cap(rg.array) { + rg.resize(requiredSize) + } + } + rg.array[regi] = vali + if regi >= rg.top { + rg.top = regi + 1 + } + } } return 0 } @@ -1533,7 +2271,7 @@ func luaModulo(lhs, rhs LNumber) LNumber { flhs := float64(lhs) frhs := float64(rhs) v := math.Mod(flhs, frhs) - if flhs < 0 || frhs < 0 && !(flhs < 0 && frhs < 0) { + if frhs > 0 && v < 0 || frhs < 0 && v > 0 { v += frhs } return LNumber(v) @@ -1577,7 +2315,7 @@ func objectArith(L *LState, opcode int, lhs, rhs LValue) LValue { event = "__pow" } op := L.metaOp2(lhs, rhs, event) - if op.Type() == LTFunction { + if _, ok := op.(*LFunction); ok { L.reg.Push(op) L.reg.Push(lhs) L.reg.Push(rhs) @@ -1594,8 +2332,8 @@ func objectArith(L *LState, opcode int, lhs, rhs LValue) LValue { rhs = rnum } } - if v1, ok1 := lhs.assertFloat64(); ok1 { - if v2, ok2 := rhs.assertFloat64(); ok2 { + if v1, ok1 := lhs.(LNumber); ok1 { + if v2, ok2 := rhs.(LNumber); ok2 { return numberArith(L, opcode, LNumber(v1), LNumber(v2)) } } @@ -1644,8 +2382,8 @@ func stringConcat(L *LState, total, last int) LValue { func lessThan(L *LState, lhs, rhs LValue) bool { // optimization for numbers - if v1, ok1 := lhs.assertFloat64(); ok1 { - if v2, ok2 := rhs.assertFloat64(); ok2 { + if v1, ok1 := lhs.(LNumber); ok1 { + if v2, ok2 := rhs.(LNumber); ok2 { return v1 < v2 } L.RaiseError("attempt to compare %v with %v", lhs.Type().String(), rhs.Type().String()) @@ -1665,17 +2403,18 @@ func lessThan(L *LState, lhs, rhs LValue) bool { } func equals(L *LState, lhs, rhs LValue, raw bool) bool { - if lhs.Type() != rhs.Type() { + lt := lhs.Type() + if lt != rhs.Type() { return false } ret := false - switch lhs.Type() { + switch lt { case LTNil: ret = true case LTNumber: - v1, _ := lhs.assertFloat64() - v2, _ := rhs.assertFloat64() + v1, _ := lhs.(LNumber) + v2, _ := rhs.(LNumber) ret = v1 == v2 case LTBool: ret = bool(lhs.(LBool)) == bool(rhs.(LBool)) diff --git a/vendor/go.etcd.io/etcd/api/v3/version/version.go b/vendor/go.etcd.io/etcd/api/v3/version/version.go index 1819ead7290..4858a08bfe3 100644 --- a/vendor/go.etcd.io/etcd/api/v3/version/version.go +++ b/vendor/go.etcd.io/etcd/api/v3/version/version.go @@ -26,7 +26,7 @@ import ( var ( // MinClusterVersion is the min cluster version this etcd binary is compatible with. MinClusterVersion = "3.0.0" - Version = "3.5.10" + Version = "3.5.12" APIVersion = "unknown" // Git SHA Value will be set during build diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/purge.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/purge.go index f4492009d6c..b314e068fea 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/purge.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/purge.go @@ -25,18 +25,24 @@ import ( ) func PurgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) <-chan error { - return purgeFile(lg, dirname, suffix, max, interval, stop, nil, nil) + return purgeFile(lg, dirname, suffix, max, interval, stop, nil, nil, true) } func PurgeFileWithDoneNotify(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) (<-chan struct{}, <-chan error) { doneC := make(chan struct{}) - errC := purgeFile(lg, dirname, suffix, max, interval, stop, nil, doneC) + errC := purgeFile(lg, dirname, suffix, max, interval, stop, nil, doneC, true) + return doneC, errC +} + +func PurgeFileWithoutFlock(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) (<-chan struct{}, <-chan error) { + doneC := make(chan struct{}) + errC := purgeFile(lg, dirname, suffix, max, interval, stop, nil, doneC, false) return doneC, errC } // purgeFile is the internal implementation for PurgeFile which can post purged files to purgec if non-nil. // if donec is non-nil, the function closes it to notify its exit. -func purgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}, purgec chan<- string, donec chan<- struct{}) <-chan error { +func purgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}, purgec chan<- string, donec chan<- struct{}, flock bool) <-chan error { if lg == nil { lg = zap.NewNop() } @@ -67,20 +73,25 @@ func purgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval fnames = newfnames for len(newfnames) > int(max) { f := filepath.Join(dirname, newfnames[0]) - l, err := TryLockFile(f, os.O_WRONLY, PrivateFileMode) - if err != nil { - lg.Warn("failed to lock file", zap.String("path", f), zap.Error(err)) - break + var l *LockedFile + if flock { + l, err = TryLockFile(f, os.O_WRONLY, PrivateFileMode) + if err != nil { + lg.Warn("failed to lock file", zap.String("path", f), zap.Error(err)) + break + } } if err = os.Remove(f); err != nil { lg.Error("failed to remove file", zap.String("path", f), zap.Error(err)) errC <- err return } - if err = l.Close(); err != nil { - lg.Error("failed to unlock/close", zap.String("path", l.Name()), zap.Error(err)) - errC <- err - return + if flock { + if err = l.Close(); err != nil { + lg.Error("failed to unlock/close", zap.String("path", l.Name()), zap.Error(err)) + errC <- err + return + } } lg.Info("purged", zap.String("path", f)) newfnames = newfnames[1:] diff --git a/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go b/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go deleted file mode 100644 index 41b2c3fc038..00000000000 --- a/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -// Package tagencoding contains the tag encoding -// used interally by the stats collector. -package tagencoding // import "go.opencensus.io/internal/tagencoding" - -// Values represent the encoded buffer for the values. -type Values struct { - Buffer []byte - WriteIndex int - ReadIndex int -} - -func (vb *Values) growIfRequired(expected int) { - if len(vb.Buffer)-vb.WriteIndex < expected { - tmp := make([]byte, 2*(len(vb.Buffer)+1)+expected) - copy(tmp, vb.Buffer) - vb.Buffer = tmp - } -} - -// WriteValue is the helper method to encode Values from map[Key][]byte. -func (vb *Values) WriteValue(v []byte) { - length := len(v) & 0xff - vb.growIfRequired(1 + length) - - // writing length of v - vb.Buffer[vb.WriteIndex] = byte(length) - vb.WriteIndex++ - - if length == 0 { - // No value was encoded for this key - return - } - - // writing v - copy(vb.Buffer[vb.WriteIndex:], v[:length]) - vb.WriteIndex += length -} - -// ReadValue is the helper method to decode Values to a map[Key][]byte. -func (vb *Values) ReadValue() []byte { - // read length of v - length := int(vb.Buffer[vb.ReadIndex]) - vb.ReadIndex++ - if length == 0 { - // No value was encoded for this key - return nil - } - - // read value of v - v := make([]byte, length) - endIdx := vb.ReadIndex + length - copy(v, vb.Buffer[vb.ReadIndex:endIdx]) - vb.ReadIndex = endIdx - return v -} - -// Bytes returns a reference to already written bytes in the Buffer. -func (vb *Values) Bytes() []byte { - return vb.Buffer[:vb.WriteIndex] -} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client.go b/vendor/go.opencensus.io/plugin/ocgrpc/client.go deleted file mode 100644 index 2063b6f76a1..00000000000 --- a/vendor/go.opencensus.io/plugin/ocgrpc/client.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ocgrpc - -import ( - "context" - - "go.opencensus.io/trace" - "google.golang.org/grpc/stats" -) - -// ClientHandler implements a gRPC stats.Handler for recording OpenCensus stats and -// traces. Use with gRPC clients only. -type ClientHandler struct { - // StartOptions allows configuring the StartOptions used to create new spans. - // - // StartOptions.SpanKind will always be set to trace.SpanKindClient - // for spans started by this handler. - StartOptions trace.StartOptions -} - -// HandleConn exists to satisfy gRPC stats.Handler. -func (c *ClientHandler) HandleConn(ctx context.Context, cs stats.ConnStats) { - // no-op -} - -// TagConn exists to satisfy gRPC stats.Handler. -func (c *ClientHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) context.Context { - // no-op - return ctx -} - -// HandleRPC implements per-RPC tracing and stats instrumentation. -func (c *ClientHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { - traceHandleRPC(ctx, rs) - statsHandleRPC(ctx, rs) -} - -// TagRPC implements per-RPC context management. -func (c *ClientHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { - ctx = c.traceTagRPC(ctx, rti) - ctx = c.statsTagRPC(ctx, rti) - return ctx -} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go b/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go deleted file mode 100644 index fb3c19d6b64..00000000000 --- a/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package ocgrpc - -import ( - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" -) - -// The following variables are measures are recorded by ClientHandler: -var ( - ClientSentMessagesPerRPC = stats.Int64("grpc.io/client/sent_messages_per_rpc", "Number of messages sent in the RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless) - ClientSentBytesPerRPC = stats.Int64("grpc.io/client/sent_bytes_per_rpc", "Total bytes sent across all request messages per RPC.", stats.UnitBytes) - ClientReceivedMessagesPerRPC = stats.Int64("grpc.io/client/received_messages_per_rpc", "Number of response messages received per RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless) - ClientReceivedBytesPerRPC = stats.Int64("grpc.io/client/received_bytes_per_rpc", "Total bytes received across all response messages per RPC.", stats.UnitBytes) - ClientRoundtripLatency = stats.Float64("grpc.io/client/roundtrip_latency", "Time between first byte of request sent to last byte of response received, or terminal error.", stats.UnitMilliseconds) - ClientStartedRPCs = stats.Int64("grpc.io/client/started_rpcs", "Number of started client RPCs.", stats.UnitDimensionless) - ClientServerLatency = stats.Float64("grpc.io/client/server_latency", `Propagated from the server and should have the same value as "grpc.io/server/latency".`, stats.UnitMilliseconds) -) - -// Predefined views may be registered to collect data for the above measures. -// As always, you may also define your own custom views over measures collected by this -// package. These are declared as a convenience only; none are registered by -// default. -var ( - ClientSentBytesPerRPCView = &view.View{ - Measure: ClientSentBytesPerRPC, - Name: "grpc.io/client/sent_bytes_per_rpc", - Description: "Distribution of bytes sent per RPC, by method.", - TagKeys: []tag.Key{KeyClientMethod}, - Aggregation: DefaultBytesDistribution, - } - - ClientReceivedBytesPerRPCView = &view.View{ - Measure: ClientReceivedBytesPerRPC, - Name: "grpc.io/client/received_bytes_per_rpc", - Description: "Distribution of bytes received per RPC, by method.", - TagKeys: []tag.Key{KeyClientMethod}, - Aggregation: DefaultBytesDistribution, - } - - ClientRoundtripLatencyView = &view.View{ - Measure: ClientRoundtripLatency, - Name: "grpc.io/client/roundtrip_latency", - Description: "Distribution of round-trip latency, by method.", - TagKeys: []tag.Key{KeyClientMethod}, - Aggregation: DefaultMillisecondsDistribution, - } - - // Purposely reuses the count from `ClientRoundtripLatency`, tagging - // with method and status to result in ClientCompletedRpcs. - ClientCompletedRPCsView = &view.View{ - Measure: ClientRoundtripLatency, - Name: "grpc.io/client/completed_rpcs", - Description: "Count of RPCs by method and status.", - TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, - Aggregation: view.Count(), - } - - ClientStartedRPCsView = &view.View{ - Measure: ClientStartedRPCs, - Name: "grpc.io/client/started_rpcs", - Description: "Number of started client RPCs.", - TagKeys: []tag.Key{KeyClientMethod}, - Aggregation: view.Count(), - } - - ClientSentMessagesPerRPCView = &view.View{ - Measure: ClientSentMessagesPerRPC, - Name: "grpc.io/client/sent_messages_per_rpc", - Description: "Distribution of sent messages count per RPC, by method.", - TagKeys: []tag.Key{KeyClientMethod}, - Aggregation: DefaultMessageCountDistribution, - } - - ClientReceivedMessagesPerRPCView = &view.View{ - Measure: ClientReceivedMessagesPerRPC, - Name: "grpc.io/client/received_messages_per_rpc", - Description: "Distribution of received messages count per RPC, by method.", - TagKeys: []tag.Key{KeyClientMethod}, - Aggregation: DefaultMessageCountDistribution, - } - - ClientServerLatencyView = &view.View{ - Measure: ClientServerLatency, - Name: "grpc.io/client/server_latency", - Description: "Distribution of server latency as viewed by client, by method.", - TagKeys: []tag.Key{KeyClientMethod}, - Aggregation: DefaultMillisecondsDistribution, - } -) - -// DefaultClientViews are the default client views provided by this package. -var DefaultClientViews = []*view.View{ - ClientSentBytesPerRPCView, - ClientReceivedBytesPerRPCView, - ClientRoundtripLatencyView, - ClientCompletedRPCsView, -} - -// TODO(jbd): Add roundtrip_latency, uncompressed_request_bytes, uncompressed_response_bytes, request_count, response_count. -// TODO(acetechnologist): This is temporary and will need to be replaced by a -// mechanism to load these defaults from a common repository/config shared by -// all supported languages. Likely a serialized protobuf of these defaults. diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go b/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go deleted file mode 100644 index b36349820d9..00000000000 --- a/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package ocgrpc - -import ( - "context" - "time" - - "go.opencensus.io/tag" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/stats" -) - -// statsTagRPC gets the tag.Map populated by the application code, serializes -// its tags into the GRPC metadata in order to be sent to the server. -func (h *ClientHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { - startTime := time.Now() - if info == nil { - if grpclog.V(2) { - grpclog.Info("clientHandler.TagRPC called with nil info.") - } - return ctx - } - - d := &rpcData{ - startTime: startTime, - method: info.FullMethodName, - } - ts := tag.FromContext(ctx) - if ts != nil { - encoded := tag.Encode(ts) - ctx = stats.SetTags(ctx, encoded) - } - - return context.WithValue(ctx, rpcDataKey, d) -} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/doc.go b/vendor/go.opencensus.io/plugin/ocgrpc/doc.go deleted file mode 100644 index 1370323fb71..00000000000 --- a/vendor/go.opencensus.io/plugin/ocgrpc/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package ocgrpc contains OpenCensus stats and trace -// integrations for gRPC. -// -// Use ServerHandler for servers and ClientHandler for clients. -package ocgrpc // import "go.opencensus.io/plugin/ocgrpc" diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server.go b/vendor/go.opencensus.io/plugin/ocgrpc/server.go deleted file mode 100644 index 8a53e097274..00000000000 --- a/vendor/go.opencensus.io/plugin/ocgrpc/server.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ocgrpc - -import ( - "context" - - "google.golang.org/grpc/stats" - - "go.opencensus.io/trace" -) - -// ServerHandler implements gRPC stats.Handler recording OpenCensus stats and -// traces. Use with gRPC servers. -// -// When installed (see Example), tracing metadata is read from inbound RPCs -// by default. If no tracing metadata is present, or if the tracing metadata is -// present but the SpanContext isn't sampled, then a new trace may be started -// (as determined by Sampler). -type ServerHandler struct { - // IsPublicEndpoint may be set to true to always start a new trace around - // each RPC. Any SpanContext in the RPC metadata will be added as a linked - // span instead of making it the parent of the span created around the - // server RPC. - // - // Be aware that if you leave this false (the default) on a public-facing - // server, callers will be able to send tracing metadata in gRPC headers - // and trigger traces in your backend. - IsPublicEndpoint bool - - // StartOptions to use for to spans started around RPCs handled by this server. - // - // These will apply even if there is tracing metadata already - // present on the inbound RPC but the SpanContext is not sampled. This - // ensures that each service has some opportunity to be traced. If you would - // like to not add any additional traces for this gRPC service, set: - // - // StartOptions.Sampler = trace.ProbabilitySampler(0.0) - // - // StartOptions.SpanKind will always be set to trace.SpanKindServer - // for spans started by this handler. - StartOptions trace.StartOptions -} - -var _ stats.Handler = (*ServerHandler)(nil) - -// HandleConn exists to satisfy gRPC stats.Handler. -func (s *ServerHandler) HandleConn(ctx context.Context, cs stats.ConnStats) { - // no-op -} - -// TagConn exists to satisfy gRPC stats.Handler. -func (s *ServerHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) context.Context { - // no-op - return ctx -} - -// HandleRPC implements per-RPC tracing and stats instrumentation. -func (s *ServerHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { - traceHandleRPC(ctx, rs) - statsHandleRPC(ctx, rs) -} - -// TagRPC implements per-RPC context management. -func (s *ServerHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { - ctx = s.traceTagRPC(ctx, rti) - ctx = s.statsTagRPC(ctx, rti) - return ctx -} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go b/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go deleted file mode 100644 index fe0e971086e..00000000000 --- a/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package ocgrpc - -import ( - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" -) - -// The following variables are measures are recorded by ServerHandler: -var ( - ServerReceivedMessagesPerRPC = stats.Int64("grpc.io/server/received_messages_per_rpc", "Number of messages received in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless) - ServerReceivedBytesPerRPC = stats.Int64("grpc.io/server/received_bytes_per_rpc", "Total bytes received across all messages per RPC.", stats.UnitBytes) - ServerSentMessagesPerRPC = stats.Int64("grpc.io/server/sent_messages_per_rpc", "Number of messages sent in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless) - ServerSentBytesPerRPC = stats.Int64("grpc.io/server/sent_bytes_per_rpc", "Total bytes sent in across all response messages per RPC.", stats.UnitBytes) - ServerStartedRPCs = stats.Int64("grpc.io/server/started_rpcs", "Number of started server RPCs.", stats.UnitDimensionless) - ServerLatency = stats.Float64("grpc.io/server/server_latency", "Time between first byte of request received to last byte of response sent, or terminal error.", stats.UnitMilliseconds) -) - -// TODO(acetechnologist): This is temporary and will need to be replaced by a -// mechanism to load these defaults from a common repository/config shared by -// all supported languages. Likely a serialized protobuf of these defaults. - -// Predefined views may be registered to collect data for the above measures. -// As always, you may also define your own custom views over measures collected by this -// package. These are declared as a convenience only; none are registered by -// default. -var ( - ServerReceivedBytesPerRPCView = &view.View{ - Name: "grpc.io/server/received_bytes_per_rpc", - Description: "Distribution of received bytes per RPC, by method.", - Measure: ServerReceivedBytesPerRPC, - TagKeys: []tag.Key{KeyServerMethod}, - Aggregation: DefaultBytesDistribution, - } - - ServerSentBytesPerRPCView = &view.View{ - Name: "grpc.io/server/sent_bytes_per_rpc", - Description: "Distribution of total sent bytes per RPC, by method.", - Measure: ServerSentBytesPerRPC, - TagKeys: []tag.Key{KeyServerMethod}, - Aggregation: DefaultBytesDistribution, - } - - ServerLatencyView = &view.View{ - Name: "grpc.io/server/server_latency", - Description: "Distribution of server latency in milliseconds, by method.", - TagKeys: []tag.Key{KeyServerMethod}, - Measure: ServerLatency, - Aggregation: DefaultMillisecondsDistribution, - } - - // Purposely reuses the count from `ServerLatency`, tagging - // with method and status to result in ServerCompletedRpcs. - ServerCompletedRPCsView = &view.View{ - Name: "grpc.io/server/completed_rpcs", - Description: "Count of RPCs by method and status.", - TagKeys: []tag.Key{KeyServerMethod, KeyServerStatus}, - Measure: ServerLatency, - Aggregation: view.Count(), - } - - ServerStartedRPCsView = &view.View{ - Measure: ServerStartedRPCs, - Name: "grpc.io/server/started_rpcs", - Description: "Number of started server RPCs.", - TagKeys: []tag.Key{KeyServerMethod}, - Aggregation: view.Count(), - } - - ServerReceivedMessagesPerRPCView = &view.View{ - Name: "grpc.io/server/received_messages_per_rpc", - Description: "Distribution of messages received count per RPC, by method.", - TagKeys: []tag.Key{KeyServerMethod}, - Measure: ServerReceivedMessagesPerRPC, - Aggregation: DefaultMessageCountDistribution, - } - - ServerSentMessagesPerRPCView = &view.View{ - Name: "grpc.io/server/sent_messages_per_rpc", - Description: "Distribution of messages sent count per RPC, by method.", - TagKeys: []tag.Key{KeyServerMethod}, - Measure: ServerSentMessagesPerRPC, - Aggregation: DefaultMessageCountDistribution, - } -) - -// DefaultServerViews are the default server views provided by this package. -var DefaultServerViews = []*view.View{ - ServerReceivedBytesPerRPCView, - ServerSentBytesPerRPCView, - ServerLatencyView, - ServerCompletedRPCsView, -} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go b/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go deleted file mode 100644 index afcef023afb..00000000000 --- a/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package ocgrpc - -import ( - "time" - - "context" - - "go.opencensus.io/tag" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/stats" -) - -// statsTagRPC gets the metadata from gRPC context, extracts the encoded tags from -// it and creates a new tag.Map and puts them into the returned context. -func (h *ServerHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { - startTime := time.Now() - if info == nil { - if grpclog.V(2) { - grpclog.Infof("opencensus: TagRPC called with nil info.") - } - return ctx - } - d := &rpcData{ - startTime: startTime, - method: info.FullMethodName, - } - propagated := h.extractPropagatedTags(ctx) - ctx = tag.NewContext(ctx, propagated) - ctx, _ = tag.New(ctx, tag.Upsert(KeyServerMethod, methodName(info.FullMethodName))) - return context.WithValue(ctx, rpcDataKey, d) -} - -// extractPropagatedTags creates a new tag map containing the tags extracted from the -// gRPC metadata. -func (h *ServerHandler) extractPropagatedTags(ctx context.Context) *tag.Map { - buf := stats.Tags(ctx) - if buf == nil { - return nil - } - propagated, err := tag.Decode(buf) - if err != nil { - if grpclog.V(2) { - grpclog.Warningf("opencensus: Failed to decode tags from gRPC metadata failed to decode: %v", err) - } - return nil - } - return propagated -} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go b/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go deleted file mode 100644 index 9cb27320ca1..00000000000 --- a/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go +++ /dev/null @@ -1,248 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package ocgrpc - -import ( - "context" - "strconv" - "strings" - "sync/atomic" - "time" - - "go.opencensus.io/metric/metricdata" - ocstats "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" - "go.opencensus.io/trace" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/stats" - "google.golang.org/grpc/status" -) - -type grpcInstrumentationKey string - -// rpcData holds the instrumentation RPC data that is needed between the start -// and end of an call. It holds the info that this package needs to keep track -// of between the various GRPC events. -type rpcData struct { - // reqCount and respCount has to be the first words - // in order to be 64-aligned on 32-bit architectures. - sentCount, sentBytes, recvCount, recvBytes int64 // access atomically - - // startTime represents the time at which TagRPC was invoked at the - // beginning of an RPC. It is an appoximation of the time when the - // application code invoked GRPC code. - startTime time.Time - method string -} - -// The following variables define the default hard-coded auxiliary data used by -// both the default GRPC client and GRPC server metrics. -var ( - DefaultBytesDistribution = view.Distribution(1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296) - DefaultMillisecondsDistribution = view.Distribution(0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) - DefaultMessageCountDistribution = view.Distribution(1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536) -) - -// Server tags are applied to the context used to process each RPC, as well as -// the measures at the end of each RPC. -var ( - KeyServerMethod = tag.MustNewKey("grpc_server_method") - KeyServerStatus = tag.MustNewKey("grpc_server_status") -) - -// Client tags are applied to measures at the end of each RPC. -var ( - KeyClientMethod = tag.MustNewKey("grpc_client_method") - KeyClientStatus = tag.MustNewKey("grpc_client_status") -) - -var ( - rpcDataKey = grpcInstrumentationKey("opencensus-rpcData") -) - -func methodName(fullname string) string { - return strings.TrimLeft(fullname, "/") -} - -// statsHandleRPC processes the RPC events. -func statsHandleRPC(ctx context.Context, s stats.RPCStats) { - switch st := s.(type) { - case *stats.OutHeader, *stats.InHeader, *stats.InTrailer, *stats.OutTrailer: - // do nothing for client - case *stats.Begin: - handleRPCBegin(ctx, st) - case *stats.OutPayload: - handleRPCOutPayload(ctx, st) - case *stats.InPayload: - handleRPCInPayload(ctx, st) - case *stats.End: - handleRPCEnd(ctx, st) - default: - grpclog.Infof("unexpected stats: %T", st) - } -} - -func handleRPCBegin(ctx context.Context, s *stats.Begin) { - d, ok := ctx.Value(rpcDataKey).(*rpcData) - if !ok { - if grpclog.V(2) { - grpclog.Infoln("Failed to retrieve *rpcData from context.") - } - } - - if s.IsClient() { - ocstats.RecordWithOptions(ctx, - ocstats.WithTags(tag.Upsert(KeyClientMethod, methodName(d.method))), - ocstats.WithMeasurements(ClientStartedRPCs.M(1))) - } else { - ocstats.RecordWithOptions(ctx, - ocstats.WithTags(tag.Upsert(KeyClientMethod, methodName(d.method))), - ocstats.WithMeasurements(ServerStartedRPCs.M(1))) - } -} - -func handleRPCOutPayload(ctx context.Context, s *stats.OutPayload) { - d, ok := ctx.Value(rpcDataKey).(*rpcData) - if !ok { - if grpclog.V(2) { - grpclog.Infoln("Failed to retrieve *rpcData from context.") - } - return - } - - atomic.AddInt64(&d.sentBytes, int64(s.Length)) - atomic.AddInt64(&d.sentCount, 1) -} - -func handleRPCInPayload(ctx context.Context, s *stats.InPayload) { - d, ok := ctx.Value(rpcDataKey).(*rpcData) - if !ok { - if grpclog.V(2) { - grpclog.Infoln("Failed to retrieve *rpcData from context.") - } - return - } - - atomic.AddInt64(&d.recvBytes, int64(s.Length)) - atomic.AddInt64(&d.recvCount, 1) -} - -func handleRPCEnd(ctx context.Context, s *stats.End) { - d, ok := ctx.Value(rpcDataKey).(*rpcData) - if !ok { - if grpclog.V(2) { - grpclog.Infoln("Failed to retrieve *rpcData from context.") - } - return - } - - elapsedTime := time.Since(d.startTime) - - var st string - if s.Error != nil { - s, ok := status.FromError(s.Error) - if ok { - st = statusCodeToString(s) - } - } else { - st = "OK" - } - - latencyMillis := float64(elapsedTime) / float64(time.Millisecond) - attachments := getSpanCtxAttachment(ctx) - if s.Client { - ocstats.RecordWithOptions(ctx, - ocstats.WithTags( - tag.Upsert(KeyClientMethod, methodName(d.method)), - tag.Upsert(KeyClientStatus, st)), - ocstats.WithAttachments(attachments), - ocstats.WithMeasurements( - ClientSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)), - ClientSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)), - ClientReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)), - ClientReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)), - ClientRoundtripLatency.M(latencyMillis))) - } else { - ocstats.RecordWithOptions(ctx, - ocstats.WithTags( - tag.Upsert(KeyServerStatus, st), - ), - ocstats.WithAttachments(attachments), - ocstats.WithMeasurements( - ServerSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)), - ServerSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)), - ServerReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)), - ServerReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)), - ServerLatency.M(latencyMillis))) - } -} - -func statusCodeToString(s *status.Status) string { - // see https://github.com/grpc/grpc/blob/master/doc/statuscodes.md - switch c := s.Code(); c { - case codes.OK: - return "OK" - case codes.Canceled: - return "CANCELLED" - case codes.Unknown: - return "UNKNOWN" - case codes.InvalidArgument: - return "INVALID_ARGUMENT" - case codes.DeadlineExceeded: - return "DEADLINE_EXCEEDED" - case codes.NotFound: - return "NOT_FOUND" - case codes.AlreadyExists: - return "ALREADY_EXISTS" - case codes.PermissionDenied: - return "PERMISSION_DENIED" - case codes.ResourceExhausted: - return "RESOURCE_EXHAUSTED" - case codes.FailedPrecondition: - return "FAILED_PRECONDITION" - case codes.Aborted: - return "ABORTED" - case codes.OutOfRange: - return "OUT_OF_RANGE" - case codes.Unimplemented: - return "UNIMPLEMENTED" - case codes.Internal: - return "INTERNAL" - case codes.Unavailable: - return "UNAVAILABLE" - case codes.DataLoss: - return "DATA_LOSS" - case codes.Unauthenticated: - return "UNAUTHENTICATED" - default: - return "CODE_" + strconv.FormatInt(int64(c), 10) - } -} - -func getSpanCtxAttachment(ctx context.Context) metricdata.Attachments { - attachments := map[string]interface{}{} - span := trace.FromContext(ctx) - if span == nil { - return attachments - } - spanCtx := span.SpanContext() - if spanCtx.IsSampled() { - attachments[metricdata.AttachmentKeySpanContext] = spanCtx - } - return attachments -} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go b/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go deleted file mode 100644 index 61bc543d0a2..00000000000 --- a/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ocgrpc - -import ( - "context" - "strings" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/stats" - "google.golang.org/grpc/status" - - "go.opencensus.io/trace" - "go.opencensus.io/trace/propagation" -) - -const traceContextKey = "grpc-trace-bin" - -// TagRPC creates a new trace span for the client side of the RPC. -// -// It returns ctx with the new trace span added and a serialization of the -// SpanContext added to the outgoing gRPC metadata. -func (c *ClientHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { - name := strings.TrimPrefix(rti.FullMethodName, "/") - name = strings.Replace(name, "/", ".", -1) - ctx, span := trace.StartSpan(ctx, name, - trace.WithSampler(c.StartOptions.Sampler), - trace.WithSpanKind(trace.SpanKindClient)) // span is ended by traceHandleRPC - traceContextBinary := propagation.Binary(span.SpanContext()) - return metadata.AppendToOutgoingContext(ctx, traceContextKey, string(traceContextBinary)) -} - -// TagRPC creates a new trace span for the server side of the RPC. -// -// It checks the incoming gRPC metadata in ctx for a SpanContext, and if -// it finds one, uses that SpanContext as the parent context of the new span. -// -// It returns ctx, with the new trace span added. -func (s *ServerHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { - md, _ := metadata.FromIncomingContext(ctx) - name := strings.TrimPrefix(rti.FullMethodName, "/") - name = strings.Replace(name, "/", ".", -1) - traceContext := md[traceContextKey] - var ( - parent trace.SpanContext - haveParent bool - ) - if len(traceContext) > 0 { - // Metadata with keys ending in -bin are actually binary. They are base64 - // encoded before being put on the wire, see: - // https://github.com/grpc/grpc-go/blob/08d6261/Documentation/grpc-metadata.md#storing-binary-data-in-metadata - traceContextBinary := []byte(traceContext[0]) - parent, haveParent = propagation.FromBinary(traceContextBinary) - if haveParent && !s.IsPublicEndpoint { - ctx, _ := trace.StartSpanWithRemoteParent(ctx, name, parent, - trace.WithSpanKind(trace.SpanKindServer), - trace.WithSampler(s.StartOptions.Sampler), - ) - return ctx - } - } - ctx, span := trace.StartSpan(ctx, name, - trace.WithSpanKind(trace.SpanKindServer), - trace.WithSampler(s.StartOptions.Sampler)) - if haveParent { - span.AddLink(trace.Link{TraceID: parent.TraceID, SpanID: parent.SpanID, Type: trace.LinkTypeChild}) - } - return ctx -} - -func traceHandleRPC(ctx context.Context, rs stats.RPCStats) { - span := trace.FromContext(ctx) - // TODO: compressed and uncompressed sizes are not populated in every message. - switch rs := rs.(type) { - case *stats.Begin: - span.AddAttributes( - trace.BoolAttribute("Client", rs.Client), - trace.BoolAttribute("FailFast", rs.FailFast)) - case *stats.InPayload: - span.AddMessageReceiveEvent(0 /* TODO: messageID */, int64(rs.Length), int64(rs.WireLength)) - case *stats.OutPayload: - span.AddMessageSendEvent(0, int64(rs.Length), int64(rs.WireLength)) - case *stats.End: - if rs.Error != nil { - s, ok := status.FromError(rs.Error) - if ok { - span.SetStatus(trace.Status{Code: int32(s.Code()), Message: s.Message()}) - } else { - span.SetStatus(trace.Status{Code: int32(codes.Internal), Message: rs.Error.Error()}) - } - } - span.End() - } -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/client.go b/vendor/go.opencensus.io/plugin/ochttp/client.go deleted file mode 100644 index da815b2a734..00000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/client.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ochttp - -import ( - "net/http" - "net/http/httptrace" - - "go.opencensus.io/trace" - "go.opencensus.io/trace/propagation" -) - -// Transport is an http.RoundTripper that instruments all outgoing requests with -// OpenCensus stats and tracing. -// -// The zero value is intended to be a useful default, but for -// now it's recommended that you explicitly set Propagation, since the default -// for this may change. -type Transport struct { - // Base may be set to wrap another http.RoundTripper that does the actual - // requests. By default http.DefaultTransport is used. - // - // If base HTTP roundtripper implements CancelRequest, - // the returned round tripper will be cancelable. - Base http.RoundTripper - - // Propagation defines how traces are propagated. If unspecified, a default - // (currently B3 format) will be used. - Propagation propagation.HTTPFormat - - // StartOptions are applied to the span started by this Transport around each - // request. - // - // StartOptions.SpanKind will always be set to trace.SpanKindClient - // for spans started by this transport. - StartOptions trace.StartOptions - - // GetStartOptions allows to set start options per request. If set, - // StartOptions is going to be ignored. - GetStartOptions func(*http.Request) trace.StartOptions - - // NameFromRequest holds the function to use for generating the span name - // from the information found in the outgoing HTTP Request. By default the - // name equals the URL Path. - FormatSpanName func(*http.Request) string - - // NewClientTrace may be set to a function allowing the current *trace.Span - // to be annotated with HTTP request event information emitted by the - // httptrace package. - NewClientTrace func(*http.Request, *trace.Span) *httptrace.ClientTrace - - // TODO: Implement tag propagation for HTTP. -} - -// RoundTrip implements http.RoundTripper, delegating to Base and recording stats and traces for the request. -func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { - rt := t.base() - if isHealthEndpoint(req.URL.Path) { - return rt.RoundTrip(req) - } - // TODO: remove excessive nesting of http.RoundTrippers here. - format := t.Propagation - if format == nil { - format = defaultFormat - } - spanNameFormatter := t.FormatSpanName - if spanNameFormatter == nil { - spanNameFormatter = spanNameFromURL - } - - startOpts := t.StartOptions - if t.GetStartOptions != nil { - startOpts = t.GetStartOptions(req) - } - - rt = &traceTransport{ - base: rt, - format: format, - startOptions: trace.StartOptions{ - Sampler: startOpts.Sampler, - SpanKind: trace.SpanKindClient, - }, - formatSpanName: spanNameFormatter, - newClientTrace: t.NewClientTrace, - } - rt = statsTransport{base: rt} - return rt.RoundTrip(req) -} - -func (t *Transport) base() http.RoundTripper { - if t.Base != nil { - return t.Base - } - return http.DefaultTransport -} - -// CancelRequest cancels an in-flight request by closing its connection. -func (t *Transport) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := t.base().(canceler); ok { - cr.CancelRequest(req) - } -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/client_stats.go b/vendor/go.opencensus.io/plugin/ochttp/client_stats.go deleted file mode 100644 index 17142aabe00..00000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/client_stats.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ochttp - -import ( - "context" - "io" - "net/http" - "strconv" - "sync" - "time" - - "go.opencensus.io/stats" - "go.opencensus.io/tag" -) - -// statsTransport is an http.RoundTripper that collects stats for the outgoing requests. -type statsTransport struct { - base http.RoundTripper -} - -// RoundTrip implements http.RoundTripper, delegating to Base and recording stats for the request. -func (t statsTransport) RoundTrip(req *http.Request) (*http.Response, error) { - ctx, _ := tag.New(req.Context(), - tag.Upsert(KeyClientHost, req.Host), - tag.Upsert(Host, req.Host), - tag.Upsert(KeyClientPath, req.URL.Path), - tag.Upsert(Path, req.URL.Path), - tag.Upsert(KeyClientMethod, req.Method), - tag.Upsert(Method, req.Method)) - req = req.WithContext(ctx) - track := &tracker{ - start: time.Now(), - ctx: ctx, - } - if req.Body == nil { - // TODO: Handle cases where ContentLength is not set. - track.reqSize = -1 - } else if req.ContentLength > 0 { - track.reqSize = req.ContentLength - } - stats.Record(ctx, ClientRequestCount.M(1)) - - // Perform request. - resp, err := t.base.RoundTrip(req) - - if err != nil { - track.statusCode = http.StatusInternalServerError - track.end() - } else { - track.statusCode = resp.StatusCode - if req.Method != "HEAD" { - track.respContentLength = resp.ContentLength - } - if resp.Body == nil { - track.end() - } else { - track.body = resp.Body - resp.Body = wrappedBody(track, resp.Body) - } - } - return resp, err -} - -// CancelRequest cancels an in-flight request by closing its connection. -func (t statsTransport) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := t.base.(canceler); ok { - cr.CancelRequest(req) - } -} - -type tracker struct { - ctx context.Context - respSize int64 - respContentLength int64 - reqSize int64 - start time.Time - body io.ReadCloser - statusCode int - endOnce sync.Once -} - -var _ io.ReadCloser = (*tracker)(nil) - -func (t *tracker) end() { - t.endOnce.Do(func() { - latencyMs := float64(time.Since(t.start)) / float64(time.Millisecond) - respSize := t.respSize - if t.respSize == 0 && t.respContentLength > 0 { - respSize = t.respContentLength - } - m := []stats.Measurement{ - ClientSentBytes.M(t.reqSize), - ClientReceivedBytes.M(respSize), - ClientRoundtripLatency.M(latencyMs), - ClientLatency.M(latencyMs), - ClientResponseBytes.M(t.respSize), - } - if t.reqSize >= 0 { - m = append(m, ClientRequestBytes.M(t.reqSize)) - } - - stats.RecordWithTags(t.ctx, []tag.Mutator{ - tag.Upsert(StatusCode, strconv.Itoa(t.statusCode)), - tag.Upsert(KeyClientStatus, strconv.Itoa(t.statusCode)), - }, m...) - }) -} - -func (t *tracker) Read(b []byte) (int, error) { - n, err := t.body.Read(b) - t.respSize += int64(n) - switch err { - case nil: - return n, nil - case io.EOF: - t.end() - } - return n, err -} - -func (t *tracker) Close() error { - // Invoking endSpan on Close will help catch the cases - // in which a read returned a non-nil error, we set the - // span status but didn't end the span. - t.end() - return t.body.Close() -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/doc.go b/vendor/go.opencensus.io/plugin/ochttp/doc.go deleted file mode 100644 index 10e626b16e6..00000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package ochttp provides OpenCensus instrumentation for net/http package. -// -// For server instrumentation, see Handler. For client-side instrumentation, -// see Transport. -package ochttp // import "go.opencensus.io/plugin/ochttp" diff --git a/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go b/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go deleted file mode 100644 index 9ad8852198d..00000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package b3 contains a propagation.HTTPFormat implementation -// for B3 propagation. See https://github.com/openzipkin/b3-propagation -// for more details. -package b3 // import "go.opencensus.io/plugin/ochttp/propagation/b3" - -import ( - "encoding/hex" - "net/http" - - "go.opencensus.io/trace" - "go.opencensus.io/trace/propagation" -) - -// B3 headers that OpenCensus understands. -const ( - TraceIDHeader = "X-B3-TraceId" - SpanIDHeader = "X-B3-SpanId" - SampledHeader = "X-B3-Sampled" -) - -// HTTPFormat implements propagation.HTTPFormat to propagate -// traces in HTTP headers in B3 propagation format. -// HTTPFormat skips the X-B3-ParentId and X-B3-Flags headers -// because there are additional fields not represented in the -// OpenCensus span context. Spans created from the incoming -// header will be the direct children of the client-side span. -// Similarly, receiver of the outgoing spans should use client-side -// span created by OpenCensus as the parent. -type HTTPFormat struct{} - -var _ propagation.HTTPFormat = (*HTTPFormat)(nil) - -// SpanContextFromRequest extracts a B3 span context from incoming requests. -func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { - tid, ok := ParseTraceID(req.Header.Get(TraceIDHeader)) - if !ok { - return trace.SpanContext{}, false - } - sid, ok := ParseSpanID(req.Header.Get(SpanIDHeader)) - if !ok { - return trace.SpanContext{}, false - } - sampled, _ := ParseSampled(req.Header.Get(SampledHeader)) - return trace.SpanContext{ - TraceID: tid, - SpanID: sid, - TraceOptions: sampled, - }, true -} - -// ParseTraceID parses the value of the X-B3-TraceId header. -func ParseTraceID(tid string) (trace.TraceID, bool) { - if tid == "" { - return trace.TraceID{}, false - } - b, err := hex.DecodeString(tid) - if err != nil || len(b) > 16 { - return trace.TraceID{}, false - } - var traceID trace.TraceID - if len(b) <= 8 { - // The lower 64-bits. - start := 8 + (8 - len(b)) - copy(traceID[start:], b) - } else { - start := 16 - len(b) - copy(traceID[start:], b) - } - - return traceID, true -} - -// ParseSpanID parses the value of the X-B3-SpanId or X-B3-ParentSpanId headers. -func ParseSpanID(sid string) (spanID trace.SpanID, ok bool) { - if sid == "" { - return trace.SpanID{}, false - } - b, err := hex.DecodeString(sid) - if err != nil || len(b) > 8 { - return trace.SpanID{}, false - } - start := 8 - len(b) - copy(spanID[start:], b) - return spanID, true -} - -// ParseSampled parses the value of the X-B3-Sampled header. -func ParseSampled(sampled string) (trace.TraceOptions, bool) { - switch sampled { - case "true", "1": - return trace.TraceOptions(1), true - default: - return trace.TraceOptions(0), false - } -} - -// SpanContextToRequest modifies the given request to include B3 headers. -func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { - req.Header.Set(TraceIDHeader, hex.EncodeToString(sc.TraceID[:])) - req.Header.Set(SpanIDHeader, hex.EncodeToString(sc.SpanID[:])) - - var sampled string - if sc.IsSampled() { - sampled = "1" - } else { - sampled = "0" - } - req.Header.Set(SampledHeader, sampled) -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/route.go b/vendor/go.opencensus.io/plugin/ochttp/route.go deleted file mode 100644 index 5e6a3430760..00000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/route.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ochttp - -import ( - "context" - "net/http" - - "go.opencensus.io/tag" -) - -// SetRoute sets the http_server_route tag to the given value. -// It's useful when an HTTP framework does not support the http.Handler interface -// and using WithRouteTag is not an option, but provides a way to hook into the request flow. -func SetRoute(ctx context.Context, route string) { - if a, ok := ctx.Value(addedTagsKey{}).(*addedTags); ok { - a.t = append(a.t, tag.Upsert(KeyServerRoute, route)) - } -} - -// WithRouteTag returns an http.Handler that records stats with the -// http_server_route tag set to the given value. -func WithRouteTag(handler http.Handler, route string) http.Handler { - return taggedHandlerFunc(func(w http.ResponseWriter, r *http.Request) []tag.Mutator { - addRoute := []tag.Mutator{tag.Upsert(KeyServerRoute, route)} - ctx, _ := tag.New(r.Context(), addRoute...) - r = r.WithContext(ctx) - handler.ServeHTTP(w, r) - return addRoute - }) -} - -// taggedHandlerFunc is a http.Handler that returns tags describing the -// processing of the request. These tags will be recorded along with the -// measures in this package at the end of the request. -type taggedHandlerFunc func(w http.ResponseWriter, r *http.Request) []tag.Mutator - -func (h taggedHandlerFunc) ServeHTTP(w http.ResponseWriter, r *http.Request) { - tags := h(w, r) - if a, ok := r.Context().Value(addedTagsKey{}).(*addedTags); ok { - a.t = append(a.t, tags...) - } -} - -type addedTagsKey struct{} - -type addedTags struct { - t []tag.Mutator -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/server.go b/vendor/go.opencensus.io/plugin/ochttp/server.go deleted file mode 100644 index f7c8434be06..00000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/server.go +++ /dev/null @@ -1,455 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ochttp - -import ( - "context" - "io" - "net/http" - "strconv" - "sync" - "time" - - "go.opencensus.io/stats" - "go.opencensus.io/tag" - "go.opencensus.io/trace" - "go.opencensus.io/trace/propagation" -) - -// Handler is an http.Handler wrapper to instrument your HTTP server with -// OpenCensus. It supports both stats and tracing. -// -// # Tracing -// -// This handler is aware of the incoming request's span, reading it from request -// headers as configured using the Propagation field. -// The extracted span can be accessed from the incoming request's -// context. -// -// span := trace.FromContext(r.Context()) -// -// The server span will be automatically ended at the end of ServeHTTP. -type Handler struct { - // Propagation defines how traces are propagated. If unspecified, - // B3 propagation will be used. - Propagation propagation.HTTPFormat - - // Handler is the handler used to handle the incoming request. - Handler http.Handler - - // StartOptions are applied to the span started by this Handler around each - // request. - // - // StartOptions.SpanKind will always be set to trace.SpanKindServer - // for spans started by this transport. - StartOptions trace.StartOptions - - // GetStartOptions allows to set start options per request. If set, - // StartOptions is going to be ignored. - GetStartOptions func(*http.Request) trace.StartOptions - - // IsPublicEndpoint should be set to true for publicly accessible HTTP(S) - // servers. If true, any trace metadata set on the incoming request will - // be added as a linked trace instead of being added as a parent of the - // current trace. - IsPublicEndpoint bool - - // FormatSpanName holds the function to use for generating the span name - // from the information found in the incoming HTTP Request. By default the - // name equals the URL Path. - FormatSpanName func(*http.Request) string - - // IsHealthEndpoint holds the function to use for determining if the - // incoming HTTP request should be considered a health check. This is in - // addition to the private isHealthEndpoint func which may also indicate - // tracing should be skipped. - IsHealthEndpoint func(*http.Request) bool -} - -func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - var tags addedTags - r, traceEnd := h.startTrace(w, r) - defer traceEnd() - w, statsEnd := h.startStats(w, r) - defer statsEnd(&tags) - handler := h.Handler - if handler == nil { - handler = http.DefaultServeMux - } - r = r.WithContext(context.WithValue(r.Context(), addedTagsKey{}, &tags)) - handler.ServeHTTP(w, r) -} - -func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Request, func()) { - if h.IsHealthEndpoint != nil && h.IsHealthEndpoint(r) || isHealthEndpoint(r.URL.Path) { - return r, func() {} - } - var name string - if h.FormatSpanName == nil { - name = spanNameFromURL(r) - } else { - name = h.FormatSpanName(r) - } - ctx := r.Context() - - startOpts := h.StartOptions - if h.GetStartOptions != nil { - startOpts = h.GetStartOptions(r) - } - - var span *trace.Span - sc, ok := h.extractSpanContext(r) - if ok && !h.IsPublicEndpoint { - ctx, span = trace.StartSpanWithRemoteParent(ctx, name, sc, - trace.WithSampler(startOpts.Sampler), - trace.WithSpanKind(trace.SpanKindServer)) - } else { - ctx, span = trace.StartSpan(ctx, name, - trace.WithSampler(startOpts.Sampler), - trace.WithSpanKind(trace.SpanKindServer), - ) - if ok { - span.AddLink(trace.Link{ - TraceID: sc.TraceID, - SpanID: sc.SpanID, - Type: trace.LinkTypeParent, - Attributes: nil, - }) - } - } - span.AddAttributes(requestAttrs(r)...) - if r.Body == nil { - // TODO: Handle cases where ContentLength is not set. - } else if r.ContentLength > 0 { - span.AddMessageReceiveEvent(0, /* TODO: messageID */ - r.ContentLength, -1) - } - return r.WithContext(ctx), span.End -} - -func (h *Handler) extractSpanContext(r *http.Request) (trace.SpanContext, bool) { - if h.Propagation == nil { - return defaultFormat.SpanContextFromRequest(r) - } - return h.Propagation.SpanContextFromRequest(r) -} - -func (h *Handler) startStats(w http.ResponseWriter, r *http.Request) (http.ResponseWriter, func(tags *addedTags)) { - ctx, _ := tag.New(r.Context(), - tag.Upsert(Host, r.Host), - tag.Upsert(Path, r.URL.Path), - tag.Upsert(Method, r.Method)) - track := &trackingResponseWriter{ - start: time.Now(), - ctx: ctx, - writer: w, - } - if r.Body == nil { - // TODO: Handle cases where ContentLength is not set. - track.reqSize = -1 - } else if r.ContentLength > 0 { - track.reqSize = r.ContentLength - } - stats.Record(ctx, ServerRequestCount.M(1)) - return track.wrappedResponseWriter(), track.end -} - -type trackingResponseWriter struct { - ctx context.Context - reqSize int64 - respSize int64 - start time.Time - statusCode int - statusLine string - endOnce sync.Once - writer http.ResponseWriter -} - -// Compile time assertion for ResponseWriter interface -var _ http.ResponseWriter = (*trackingResponseWriter)(nil) - -func (t *trackingResponseWriter) end(tags *addedTags) { - t.endOnce.Do(func() { - if t.statusCode == 0 { - t.statusCode = 200 - } - - span := trace.FromContext(t.ctx) - span.SetStatus(TraceStatus(t.statusCode, t.statusLine)) - span.AddAttributes(trace.Int64Attribute(StatusCodeAttribute, int64(t.statusCode))) - - m := []stats.Measurement{ - ServerLatency.M(float64(time.Since(t.start)) / float64(time.Millisecond)), - ServerResponseBytes.M(t.respSize), - } - if t.reqSize >= 0 { - m = append(m, ServerRequestBytes.M(t.reqSize)) - } - allTags := make([]tag.Mutator, len(tags.t)+1) - allTags[0] = tag.Upsert(StatusCode, strconv.Itoa(t.statusCode)) - copy(allTags[1:], tags.t) - stats.RecordWithTags(t.ctx, allTags, m...) - }) -} - -func (t *trackingResponseWriter) Header() http.Header { - return t.writer.Header() -} - -func (t *trackingResponseWriter) Write(data []byte) (int, error) { - n, err := t.writer.Write(data) - t.respSize += int64(n) - // Add message event for request bytes sent. - span := trace.FromContext(t.ctx) - span.AddMessageSendEvent(0 /* TODO: messageID */, int64(n), -1) - return n, err -} - -func (t *trackingResponseWriter) WriteHeader(statusCode int) { - t.writer.WriteHeader(statusCode) - t.statusCode = statusCode - t.statusLine = http.StatusText(t.statusCode) -} - -// wrappedResponseWriter returns a wrapped version of the original -// -// ResponseWriter and only implements the same combination of additional -// -// interfaces as the original. -// This implementation is based on https://github.com/felixge/httpsnoop. -func (t *trackingResponseWriter) wrappedResponseWriter() http.ResponseWriter { - var ( - hj, i0 = t.writer.(http.Hijacker) - cn, i1 = t.writer.(http.CloseNotifier) - pu, i2 = t.writer.(http.Pusher) - fl, i3 = t.writer.(http.Flusher) - rf, i4 = t.writer.(io.ReaderFrom) - ) - - switch { - case !i0 && !i1 && !i2 && !i3 && !i4: - return struct { - http.ResponseWriter - }{t} - case !i0 && !i1 && !i2 && !i3 && i4: - return struct { - http.ResponseWriter - io.ReaderFrom - }{t, rf} - case !i0 && !i1 && !i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.Flusher - }{t, fl} - case !i0 && !i1 && !i2 && i3 && i4: - return struct { - http.ResponseWriter - http.Flusher - io.ReaderFrom - }{t, fl, rf} - case !i0 && !i1 && i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.Pusher - }{t, pu} - case !i0 && !i1 && i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.Pusher - io.ReaderFrom - }{t, pu, rf} - case !i0 && !i1 && i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.Pusher - http.Flusher - }{t, pu, fl} - case !i0 && !i1 && i2 && i3 && i4: - return struct { - http.ResponseWriter - http.Pusher - http.Flusher - io.ReaderFrom - }{t, pu, fl, rf} - case !i0 && i1 && !i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.CloseNotifier - }{t, cn} - case !i0 && i1 && !i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.CloseNotifier - io.ReaderFrom - }{t, cn, rf} - case !i0 && i1 && !i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.CloseNotifier - http.Flusher - }{t, cn, fl} - case !i0 && i1 && !i2 && i3 && i4: - return struct { - http.ResponseWriter - http.CloseNotifier - http.Flusher - io.ReaderFrom - }{t, cn, fl, rf} - case !i0 && i1 && i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.CloseNotifier - http.Pusher - }{t, cn, pu} - case !i0 && i1 && i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.CloseNotifier - http.Pusher - io.ReaderFrom - }{t, cn, pu, rf} - case !i0 && i1 && i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.CloseNotifier - http.Pusher - http.Flusher - }{t, cn, pu, fl} - case !i0 && i1 && i2 && i3 && i4: - return struct { - http.ResponseWriter - http.CloseNotifier - http.Pusher - http.Flusher - io.ReaderFrom - }{t, cn, pu, fl, rf} - case i0 && !i1 && !i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - }{t, hj} - case i0 && !i1 && !i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - io.ReaderFrom - }{t, hj, rf} - case i0 && !i1 && !i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - http.Flusher - }{t, hj, fl} - case i0 && !i1 && !i2 && i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - http.Flusher - io.ReaderFrom - }{t, hj, fl, rf} - case i0 && !i1 && i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - http.Pusher - }{t, hj, pu} - case i0 && !i1 && i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - http.Pusher - io.ReaderFrom - }{t, hj, pu, rf} - case i0 && !i1 && i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - http.Pusher - http.Flusher - }{t, hj, pu, fl} - case i0 && !i1 && i2 && i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - http.Pusher - http.Flusher - io.ReaderFrom - }{t, hj, pu, fl, rf} - case i0 && i1 && !i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - }{t, hj, cn} - case i0 && i1 && !i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - io.ReaderFrom - }{t, hj, cn, rf} - case i0 && i1 && !i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - http.Flusher - }{t, hj, cn, fl} - case i0 && i1 && !i2 && i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - http.Flusher - io.ReaderFrom - }{t, hj, cn, fl, rf} - case i0 && i1 && i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - http.Pusher - }{t, hj, cn, pu} - case i0 && i1 && i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - http.Pusher - io.ReaderFrom - }{t, hj, cn, pu, rf} - case i0 && i1 && i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - http.Pusher - http.Flusher - }{t, hj, cn, pu, fl} - case i0 && i1 && i2 && i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - http.Pusher - http.Flusher - io.ReaderFrom - }{t, hj, cn, pu, fl, rf} - default: - return struct { - http.ResponseWriter - }{t} - } -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go b/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go deleted file mode 100644 index 05c6c56cc79..00000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ochttp - -import ( - "crypto/tls" - "net/http" - "net/http/httptrace" - "strings" - - "go.opencensus.io/trace" -) - -type spanAnnotator struct { - sp *trace.Span -} - -// TODO: Remove NewSpanAnnotator at the next release. - -// NewSpanAnnotator returns a httptrace.ClientTrace which annotates -// all emitted httptrace events on the provided Span. -// Deprecated: Use NewSpanAnnotatingClientTrace instead -func NewSpanAnnotator(r *http.Request, s *trace.Span) *httptrace.ClientTrace { - return NewSpanAnnotatingClientTrace(r, s) -} - -// NewSpanAnnotatingClientTrace returns a httptrace.ClientTrace which annotates -// all emitted httptrace events on the provided Span. -func NewSpanAnnotatingClientTrace(_ *http.Request, s *trace.Span) *httptrace.ClientTrace { - sa := spanAnnotator{sp: s} - - return &httptrace.ClientTrace{ - GetConn: sa.getConn, - GotConn: sa.gotConn, - PutIdleConn: sa.putIdleConn, - GotFirstResponseByte: sa.gotFirstResponseByte, - Got100Continue: sa.got100Continue, - DNSStart: sa.dnsStart, - DNSDone: sa.dnsDone, - ConnectStart: sa.connectStart, - ConnectDone: sa.connectDone, - TLSHandshakeStart: sa.tlsHandshakeStart, - TLSHandshakeDone: sa.tlsHandshakeDone, - WroteHeaders: sa.wroteHeaders, - Wait100Continue: sa.wait100Continue, - WroteRequest: sa.wroteRequest, - } -} - -func (s spanAnnotator) getConn(hostPort string) { - attrs := []trace.Attribute{ - trace.StringAttribute("httptrace.get_connection.host_port", hostPort), - } - s.sp.Annotate(attrs, "GetConn") -} - -func (s spanAnnotator) gotConn(info httptrace.GotConnInfo) { - attrs := []trace.Attribute{ - trace.BoolAttribute("httptrace.got_connection.reused", info.Reused), - trace.BoolAttribute("httptrace.got_connection.was_idle", info.WasIdle), - } - if info.WasIdle { - attrs = append(attrs, - trace.StringAttribute("httptrace.got_connection.idle_time", info.IdleTime.String())) - } - s.sp.Annotate(attrs, "GotConn") -} - -// PutIdleConn implements a httptrace.ClientTrace hook -func (s spanAnnotator) putIdleConn(err error) { - var attrs []trace.Attribute - if err != nil { - attrs = append(attrs, - trace.StringAttribute("httptrace.put_idle_connection.error", err.Error())) - } - s.sp.Annotate(attrs, "PutIdleConn") -} - -func (s spanAnnotator) gotFirstResponseByte() { - s.sp.Annotate(nil, "GotFirstResponseByte") -} - -func (s spanAnnotator) got100Continue() { - s.sp.Annotate(nil, "Got100Continue") -} - -func (s spanAnnotator) dnsStart(info httptrace.DNSStartInfo) { - attrs := []trace.Attribute{ - trace.StringAttribute("httptrace.dns_start.host", info.Host), - } - s.sp.Annotate(attrs, "DNSStart") -} - -func (s spanAnnotator) dnsDone(info httptrace.DNSDoneInfo) { - var addrs []string - for _, addr := range info.Addrs { - addrs = append(addrs, addr.String()) - } - attrs := []trace.Attribute{ - trace.StringAttribute("httptrace.dns_done.addrs", strings.Join(addrs, " , ")), - } - if info.Err != nil { - attrs = append(attrs, - trace.StringAttribute("httptrace.dns_done.error", info.Err.Error())) - } - s.sp.Annotate(attrs, "DNSDone") -} - -func (s spanAnnotator) connectStart(network, addr string) { - attrs := []trace.Attribute{ - trace.StringAttribute("httptrace.connect_start.network", network), - trace.StringAttribute("httptrace.connect_start.addr", addr), - } - s.sp.Annotate(attrs, "ConnectStart") -} - -func (s spanAnnotator) connectDone(network, addr string, err error) { - attrs := []trace.Attribute{ - trace.StringAttribute("httptrace.connect_done.network", network), - trace.StringAttribute("httptrace.connect_done.addr", addr), - } - if err != nil { - attrs = append(attrs, - trace.StringAttribute("httptrace.connect_done.error", err.Error())) - } - s.sp.Annotate(attrs, "ConnectDone") -} - -func (s spanAnnotator) tlsHandshakeStart() { - s.sp.Annotate(nil, "TLSHandshakeStart") -} - -func (s spanAnnotator) tlsHandshakeDone(_ tls.ConnectionState, err error) { - var attrs []trace.Attribute - if err != nil { - attrs = append(attrs, - trace.StringAttribute("httptrace.tls_handshake_done.error", err.Error())) - } - s.sp.Annotate(attrs, "TLSHandshakeDone") -} - -func (s spanAnnotator) wroteHeaders() { - s.sp.Annotate(nil, "WroteHeaders") -} - -func (s spanAnnotator) wait100Continue() { - s.sp.Annotate(nil, "Wait100Continue") -} - -func (s spanAnnotator) wroteRequest(info httptrace.WroteRequestInfo) { - var attrs []trace.Attribute - if info.Err != nil { - attrs = append(attrs, - trace.StringAttribute("httptrace.wrote_request.error", info.Err.Error())) - } - s.sp.Annotate(attrs, "WroteRequest") -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/stats.go b/vendor/go.opencensus.io/plugin/ochttp/stats.go deleted file mode 100644 index ee3729040dd..00000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/stats.go +++ /dev/null @@ -1,292 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ochttp - -import ( - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" -) - -// Deprecated: client HTTP measures. -var ( - // Deprecated: Use a Count aggregation over one of the other client measures to achieve the same effect. - ClientRequestCount = stats.Int64( - "opencensus.io/http/client/request_count", - "Number of HTTP requests started", - stats.UnitDimensionless) - // Deprecated: Use ClientSentBytes. - ClientRequestBytes = stats.Int64( - "opencensus.io/http/client/request_bytes", - "HTTP request body size if set as ContentLength (uncompressed)", - stats.UnitBytes) - // Deprecated: Use ClientReceivedBytes. - ClientResponseBytes = stats.Int64( - "opencensus.io/http/client/response_bytes", - "HTTP response body size (uncompressed)", - stats.UnitBytes) - // Deprecated: Use ClientRoundtripLatency. - ClientLatency = stats.Float64( - "opencensus.io/http/client/latency", - "End-to-end latency", - stats.UnitMilliseconds) -) - -// The following client HTTP measures are supported for use in custom views. -var ( - ClientSentBytes = stats.Int64( - "opencensus.io/http/client/sent_bytes", - "Total bytes sent in request body (not including headers)", - stats.UnitBytes, - ) - ClientReceivedBytes = stats.Int64( - "opencensus.io/http/client/received_bytes", - "Total bytes received in response bodies (not including headers but including error responses with bodies)", - stats.UnitBytes, - ) - ClientRoundtripLatency = stats.Float64( - "opencensus.io/http/client/roundtrip_latency", - "Time between first byte of request headers sent to last byte of response received, or terminal error", - stats.UnitMilliseconds, - ) -) - -// The following server HTTP measures are supported for use in custom views: -var ( - ServerRequestCount = stats.Int64( - "opencensus.io/http/server/request_count", - "Number of HTTP requests started", - stats.UnitDimensionless) - ServerRequestBytes = stats.Int64( - "opencensus.io/http/server/request_bytes", - "HTTP request body size if set as ContentLength (uncompressed)", - stats.UnitBytes) - ServerResponseBytes = stats.Int64( - "opencensus.io/http/server/response_bytes", - "HTTP response body size (uncompressed)", - stats.UnitBytes) - ServerLatency = stats.Float64( - "opencensus.io/http/server/latency", - "End-to-end latency", - stats.UnitMilliseconds) -) - -// The following tags are applied to stats recorded by this package. Host, Path -// and Method are applied to all measures. StatusCode is not applied to -// ClientRequestCount or ServerRequestCount, since it is recorded before the status is known. -var ( - // Host is the value of the HTTP Host header. - // - // The value of this tag can be controlled by the HTTP client, so you need - // to watch out for potentially generating high-cardinality labels in your - // metrics backend if you use this tag in views. - Host = tag.MustNewKey("http.host") - - // StatusCode is the numeric HTTP response status code, - // or "error" if a transport error occurred and no status code was read. - StatusCode = tag.MustNewKey("http.status") - - // Path is the URL path (not including query string) in the request. - // - // The value of this tag can be controlled by the HTTP client, so you need - // to watch out for potentially generating high-cardinality labels in your - // metrics backend if you use this tag in views. - Path = tag.MustNewKey("http.path") - - // Method is the HTTP method of the request, capitalized (GET, POST, etc.). - Method = tag.MustNewKey("http.method") - - // KeyServerRoute is a low cardinality string representing the logical - // handler of the request. This is usually the pattern registered on the a - // ServeMux (or similar string). - KeyServerRoute = tag.MustNewKey("http_server_route") -) - -// Client tag keys. -var ( - // KeyClientMethod is the HTTP method, capitalized (i.e. GET, POST, PUT, DELETE, etc.). - KeyClientMethod = tag.MustNewKey("http_client_method") - // KeyClientPath is the URL path (not including query string). - KeyClientPath = tag.MustNewKey("http_client_path") - // KeyClientStatus is the HTTP status code as an integer (e.g. 200, 404, 500.), or "error" if no response status line was received. - KeyClientStatus = tag.MustNewKey("http_client_status") - // KeyClientHost is the value of the request Host header. - KeyClientHost = tag.MustNewKey("http_client_host") -) - -// Default distributions used by views in this package. -var ( - DefaultSizeDistribution = view.Distribution(1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296) - DefaultLatencyDistribution = view.Distribution(1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) -) - -// Package ochttp provides some convenience views for client measures. -// You still need to register these views for data to actually be collected. -var ( - ClientSentBytesDistribution = &view.View{ - Name: "opencensus.io/http/client/sent_bytes", - Measure: ClientSentBytes, - Aggregation: DefaultSizeDistribution, - Description: "Total bytes sent in request body (not including headers), by HTTP method and response status", - TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, - } - - ClientReceivedBytesDistribution = &view.View{ - Name: "opencensus.io/http/client/received_bytes", - Measure: ClientReceivedBytes, - Aggregation: DefaultSizeDistribution, - Description: "Total bytes received in response bodies (not including headers but including error responses with bodies), by HTTP method and response status", - TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, - } - - ClientRoundtripLatencyDistribution = &view.View{ - Name: "opencensus.io/http/client/roundtrip_latency", - Measure: ClientRoundtripLatency, - Aggregation: DefaultLatencyDistribution, - Description: "End-to-end latency, by HTTP method and response status", - TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, - } - - ClientCompletedCount = &view.View{ - Name: "opencensus.io/http/client/completed_count", - Measure: ClientRoundtripLatency, - Aggregation: view.Count(), - Description: "Count of completed requests, by HTTP method and response status", - TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, - } -) - -// Deprecated: Old client Views. -var ( - // Deprecated: No direct replacement, but see ClientCompletedCount. - ClientRequestCountView = &view.View{ - Name: "opencensus.io/http/client/request_count", - Description: "Count of HTTP requests started", - Measure: ClientRequestCount, - Aggregation: view.Count(), - } - - // Deprecated: Use ClientSentBytesDistribution. - ClientRequestBytesView = &view.View{ - Name: "opencensus.io/http/client/request_bytes", - Description: "Size distribution of HTTP request body", - Measure: ClientSentBytes, - Aggregation: DefaultSizeDistribution, - } - - // Deprecated: Use ClientReceivedBytesDistribution instead. - ClientResponseBytesView = &view.View{ - Name: "opencensus.io/http/client/response_bytes", - Description: "Size distribution of HTTP response body", - Measure: ClientReceivedBytes, - Aggregation: DefaultSizeDistribution, - } - - // Deprecated: Use ClientRoundtripLatencyDistribution instead. - ClientLatencyView = &view.View{ - Name: "opencensus.io/http/client/latency", - Description: "Latency distribution of HTTP requests", - Measure: ClientRoundtripLatency, - Aggregation: DefaultLatencyDistribution, - } - - // Deprecated: Use ClientCompletedCount instead. - ClientRequestCountByMethod = &view.View{ - Name: "opencensus.io/http/client/request_count_by_method", - Description: "Client request count by HTTP method", - TagKeys: []tag.Key{Method}, - Measure: ClientSentBytes, - Aggregation: view.Count(), - } - - // Deprecated: Use ClientCompletedCount instead. - ClientResponseCountByStatusCode = &view.View{ - Name: "opencensus.io/http/client/response_count_by_status_code", - Description: "Client response count by status code", - TagKeys: []tag.Key{StatusCode}, - Measure: ClientRoundtripLatency, - Aggregation: view.Count(), - } -) - -// Package ochttp provides some convenience views for server measures. -// You still need to register these views for data to actually be collected. -var ( - ServerRequestCountView = &view.View{ - Name: "opencensus.io/http/server/request_count", - Description: "Count of HTTP requests started", - Measure: ServerRequestCount, - Aggregation: view.Count(), - } - - ServerRequestBytesView = &view.View{ - Name: "opencensus.io/http/server/request_bytes", - Description: "Size distribution of HTTP request body", - Measure: ServerRequestBytes, - Aggregation: DefaultSizeDistribution, - } - - ServerResponseBytesView = &view.View{ - Name: "opencensus.io/http/server/response_bytes", - Description: "Size distribution of HTTP response body", - Measure: ServerResponseBytes, - Aggregation: DefaultSizeDistribution, - } - - ServerLatencyView = &view.View{ - Name: "opencensus.io/http/server/latency", - Description: "Latency distribution of HTTP requests", - Measure: ServerLatency, - Aggregation: DefaultLatencyDistribution, - } - - ServerRequestCountByMethod = &view.View{ - Name: "opencensus.io/http/server/request_count_by_method", - Description: "Server request count by HTTP method", - TagKeys: []tag.Key{Method}, - Measure: ServerRequestCount, - Aggregation: view.Count(), - } - - ServerResponseCountByStatusCode = &view.View{ - Name: "opencensus.io/http/server/response_count_by_status_code", - Description: "Server response count by status code", - TagKeys: []tag.Key{StatusCode}, - Measure: ServerLatency, - Aggregation: view.Count(), - } -) - -// DefaultClientViews are the default client views provided by this package. -// Deprecated: No replacement. Register the views you would like individually. -var DefaultClientViews = []*view.View{ - ClientRequestCountView, - ClientRequestBytesView, - ClientResponseBytesView, - ClientLatencyView, - ClientRequestCountByMethod, - ClientResponseCountByStatusCode, -} - -// DefaultServerViews are the default server views provided by this package. -// Deprecated: No replacement. Register the views you would like individually. -var DefaultServerViews = []*view.View{ - ServerRequestCountView, - ServerRequestBytesView, - ServerResponseBytesView, - ServerLatencyView, - ServerRequestCountByMethod, - ServerResponseCountByStatusCode, -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/trace.go b/vendor/go.opencensus.io/plugin/ochttp/trace.go deleted file mode 100644 index ed3a5db5611..00000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/trace.go +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ochttp - -import ( - "io" - "net/http" - "net/http/httptrace" - - "go.opencensus.io/plugin/ochttp/propagation/b3" - "go.opencensus.io/trace" - "go.opencensus.io/trace/propagation" -) - -// TODO(jbd): Add godoc examples. - -var defaultFormat propagation.HTTPFormat = &b3.HTTPFormat{} - -// Attributes recorded on the span for the requests. -// Only trace exporters will need them. -const ( - HostAttribute = "http.host" - MethodAttribute = "http.method" - PathAttribute = "http.path" - URLAttribute = "http.url" - UserAgentAttribute = "http.user_agent" - StatusCodeAttribute = "http.status_code" -) - -type traceTransport struct { - base http.RoundTripper - startOptions trace.StartOptions - format propagation.HTTPFormat - formatSpanName func(*http.Request) string - newClientTrace func(*http.Request, *trace.Span) *httptrace.ClientTrace -} - -// TODO(jbd): Add message events for request and response size. - -// RoundTrip creates a trace.Span and inserts it into the outgoing request's headers. -// The created span can follow a parent span, if a parent is presented in -// the request's context. -func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) { - name := t.formatSpanName(req) - // TODO(jbd): Discuss whether we want to prefix - // outgoing requests with Sent. - ctx, span := trace.StartSpan(req.Context(), name, - trace.WithSampler(t.startOptions.Sampler), - trace.WithSpanKind(trace.SpanKindClient)) - - if t.newClientTrace != nil { - req = req.WithContext(httptrace.WithClientTrace(ctx, t.newClientTrace(req, span))) - } else { - req = req.WithContext(ctx) - } - - if t.format != nil { - // SpanContextToRequest will modify its Request argument, which is - // contrary to the contract for http.RoundTripper, so we need to - // pass it a copy of the Request. - // However, the Request struct itself was already copied by - // the WithContext calls above and so we just need to copy the header. - header := make(http.Header) - for k, v := range req.Header { - header[k] = v - } - req.Header = header - t.format.SpanContextToRequest(span.SpanContext(), req) - } - - span.AddAttributes(requestAttrs(req)...) - resp, err := t.base.RoundTrip(req) - if err != nil { - span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()}) - span.End() - return resp, err - } - - span.AddAttributes(responseAttrs(resp)...) - span.SetStatus(TraceStatus(resp.StatusCode, resp.Status)) - - // span.End() will be invoked after - // a read from resp.Body returns io.EOF or when - // resp.Body.Close() is invoked. - bt := &bodyTracker{rc: resp.Body, span: span} - resp.Body = wrappedBody(bt, resp.Body) - return resp, err -} - -// bodyTracker wraps a response.Body and invokes -// trace.EndSpan on encountering io.EOF on reading -// the body of the original response. -type bodyTracker struct { - rc io.ReadCloser - span *trace.Span -} - -var _ io.ReadCloser = (*bodyTracker)(nil) - -func (bt *bodyTracker) Read(b []byte) (int, error) { - n, err := bt.rc.Read(b) - - switch err { - case nil: - return n, nil - case io.EOF: - bt.span.End() - default: - // For all other errors, set the span status - bt.span.SetStatus(trace.Status{ - // Code 2 is the error code for Internal server error. - Code: 2, - Message: err.Error(), - }) - } - return n, err -} - -func (bt *bodyTracker) Close() error { - // Invoking endSpan on Close will help catch the cases - // in which a read returned a non-nil error, we set the - // span status but didn't end the span. - bt.span.End() - return bt.rc.Close() -} - -// CancelRequest cancels an in-flight request by closing its connection. -func (t *traceTransport) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := t.base.(canceler); ok { - cr.CancelRequest(req) - } -} - -func spanNameFromURL(req *http.Request) string { - return req.URL.Path -} - -func requestAttrs(r *http.Request) []trace.Attribute { - userAgent := r.UserAgent() - - attrs := make([]trace.Attribute, 0, 5) - attrs = append(attrs, - trace.StringAttribute(PathAttribute, r.URL.Path), - trace.StringAttribute(URLAttribute, r.URL.String()), - trace.StringAttribute(HostAttribute, r.Host), - trace.StringAttribute(MethodAttribute, r.Method), - ) - - if userAgent != "" { - attrs = append(attrs, trace.StringAttribute(UserAgentAttribute, userAgent)) - } - - return attrs -} - -func responseAttrs(resp *http.Response) []trace.Attribute { - return []trace.Attribute{ - trace.Int64Attribute(StatusCodeAttribute, int64(resp.StatusCode)), - } -} - -// TraceStatus is a utility to convert the HTTP status code to a trace.Status that -// represents the outcome as closely as possible. -func TraceStatus(httpStatusCode int, statusLine string) trace.Status { - var code int32 - if httpStatusCode < 200 || httpStatusCode >= 400 { - code = trace.StatusCodeUnknown - } - switch httpStatusCode { - case 499: - code = trace.StatusCodeCancelled - case http.StatusBadRequest: - code = trace.StatusCodeInvalidArgument - case http.StatusUnprocessableEntity: - code = trace.StatusCodeInvalidArgument - case http.StatusGatewayTimeout: - code = trace.StatusCodeDeadlineExceeded - case http.StatusNotFound: - code = trace.StatusCodeNotFound - case http.StatusForbidden: - code = trace.StatusCodePermissionDenied - case http.StatusUnauthorized: // 401 is actually unauthenticated. - code = trace.StatusCodeUnauthenticated - case http.StatusTooManyRequests: - code = trace.StatusCodeResourceExhausted - case http.StatusNotImplemented: - code = trace.StatusCodeUnimplemented - case http.StatusServiceUnavailable: - code = trace.StatusCodeUnavailable - case http.StatusOK: - code = trace.StatusCodeOK - case http.StatusConflict: - code = trace.StatusCodeAlreadyExists - } - - return trace.Status{Code: code, Message: codeToStr[code]} -} - -var codeToStr = map[int32]string{ - trace.StatusCodeOK: `OK`, - trace.StatusCodeCancelled: `CANCELLED`, - trace.StatusCodeUnknown: `UNKNOWN`, - trace.StatusCodeInvalidArgument: `INVALID_ARGUMENT`, - trace.StatusCodeDeadlineExceeded: `DEADLINE_EXCEEDED`, - trace.StatusCodeNotFound: `NOT_FOUND`, - trace.StatusCodeAlreadyExists: `ALREADY_EXISTS`, - trace.StatusCodePermissionDenied: `PERMISSION_DENIED`, - trace.StatusCodeResourceExhausted: `RESOURCE_EXHAUSTED`, - trace.StatusCodeFailedPrecondition: `FAILED_PRECONDITION`, - trace.StatusCodeAborted: `ABORTED`, - trace.StatusCodeOutOfRange: `OUT_OF_RANGE`, - trace.StatusCodeUnimplemented: `UNIMPLEMENTED`, - trace.StatusCodeInternal: `INTERNAL`, - trace.StatusCodeUnavailable: `UNAVAILABLE`, - trace.StatusCodeDataLoss: `DATA_LOSS`, - trace.StatusCodeUnauthenticated: `UNAUTHENTICATED`, -} - -func isHealthEndpoint(path string) bool { - // Health checking is pretty frequent and - // traces collected for health endpoints - // can be extremely noisy and expensive. - // Disable canonical health checking endpoints - // like /healthz and /_ah/health for now. - if path == "/healthz" || path == "/_ah/health" { - return true - } - return false -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go b/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go deleted file mode 100644 index 7d75cae2b18..00000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2019, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ochttp - -import ( - "io" -) - -// wrappedBody returns a wrapped version of the original -// Body and only implements the same combination of additional -// interfaces as the original. -func wrappedBody(wrapper io.ReadCloser, body io.ReadCloser) io.ReadCloser { - var ( - wr, i0 = body.(io.Writer) - ) - switch { - case !i0: - return struct { - io.ReadCloser - }{wrapper} - - case i0: - return struct { - io.ReadCloser - io.Writer - }{wrapper, wr} - default: - return struct { - io.ReadCloser - }{wrapper} - } -} diff --git a/vendor/go.opencensus.io/stats/doc.go b/vendor/go.opencensus.io/stats/doc.go deleted file mode 100644 index 31477a464fd..00000000000 --- a/vendor/go.opencensus.io/stats/doc.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -/* -Package stats contains support for OpenCensus stats recording. - -OpenCensus allows users to create typed measures, record measurements, -aggregate the collected data, and export the aggregated data. - -# Measures - -A measure represents a type of data point to be tracked and recorded. -For example, latency, request Mb/s, and response Mb/s are measures -to collect from a server. - -Measure constructors such as Int64 and Float64 automatically -register the measure by the given name. Each registered measure needs -to be unique by name. Measures also have a description and a unit. - -Libraries can define and export measures. Application authors can then -create views and collect and break down measures by the tags they are -interested in. - -# Recording measurements - -Measurement is a data point to be collected for a measure. For example, -for a latency (ms) measure, 100 is a measurement that represents a 100ms -latency event. Measurements are created from measures with -the current context. Tags from the current context are recorded with the -measurements if they are any. - -Recorded measurements are dropped immediately if no views are registered for them. -There is usually no need to conditionally enable and disable -recording to reduce cost. Recording of measurements is cheap. - -Libraries can always record measurements, and applications can later decide -on which measurements they want to collect by registering views. This allows -libraries to turn on the instrumentation by default. - -# Exemplars - -For a given recorded measurement, the associated exemplar is a diagnostic map -that gives more information about the measurement. - -When aggregated using a Distribution aggregation, an exemplar is kept for each -bucket in the Distribution. This allows you to easily find an example of a -measurement that fell into each bucket. - -For example, if you also use the OpenCensus trace package and you -record a measurement with a context that contains a sampled trace span, -then the trace span will be added to the exemplar associated with the measurement. - -When exported to a supporting back end, you should be able to easily navigate -to example traces that fell into each bucket in the Distribution. -*/ -package stats // import "go.opencensus.io/stats" diff --git a/vendor/go.opencensus.io/stats/internal/record.go b/vendor/go.opencensus.io/stats/internal/record.go deleted file mode 100644 index 436dc791f83..00000000000 --- a/vendor/go.opencensus.io/stats/internal/record.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -import ( - "go.opencensus.io/tag" -) - -// DefaultRecorder will be called for each Record call. -var DefaultRecorder func(tags *tag.Map, measurement interface{}, attachments map[string]interface{}) - -// MeasurementRecorder will be called for each Record call. This is the same as DefaultRecorder but -// avoids interface{} conversion. -// This will be a func(tags *tag.Map, measurement []Measurement, attachments map[string]interface{}) type, -// but is interface{} here to avoid import loops -var MeasurementRecorder interface{} - -// SubscriptionReporter reports when a view subscribed with a measure. -var SubscriptionReporter func(measure string) diff --git a/vendor/go.opencensus.io/stats/measure.go b/vendor/go.opencensus.io/stats/measure.go deleted file mode 100644 index 1ffd3cefc73..00000000000 --- a/vendor/go.opencensus.io/stats/measure.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package stats - -import ( - "sync" - "sync/atomic" -) - -// Measure represents a single numeric value to be tracked and recorded. -// For example, latency, request bytes, and response bytes could be measures -// to collect from a server. -// -// Measures by themselves have no outside effects. In order to be exported, -// the measure needs to be used in a View. If no Views are defined over a -// measure, there is very little cost in recording it. -type Measure interface { - // Name returns the name of this measure. - // - // Measure names are globally unique (among all libraries linked into your program). - // We recommend prefixing the measure name with a domain name relevant to your - // project or application. - // - // Measure names are never sent over the wire or exported to backends. - // They are only used to create Views. - Name() string - - // Description returns the human-readable description of this measure. - Description() string - - // Unit returns the units for the values this measure takes on. - // - // Units are encoded according to the case-sensitive abbreviations from the - // Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html - Unit() string -} - -// measureDescriptor is the untyped descriptor associated with each measure. -// Int64Measure and Float64Measure wrap measureDescriptor to provide typed -// recording APIs. -// Two Measures with the same name will have the same measureDescriptor. -type measureDescriptor struct { - subs int32 // access atomically - - name string - description string - unit string -} - -func (m *measureDescriptor) subscribe() { - atomic.StoreInt32(&m.subs, 1) -} - -func (m *measureDescriptor) subscribed() bool { - return atomic.LoadInt32(&m.subs) == 1 -} - -var ( - mu sync.RWMutex - measures = make(map[string]*measureDescriptor) -) - -func registerMeasureHandle(name, desc, unit string) *measureDescriptor { - mu.Lock() - defer mu.Unlock() - - if stored, ok := measures[name]; ok { - return stored - } - m := &measureDescriptor{ - name: name, - description: desc, - unit: unit, - } - measures[name] = m - return m -} - -// Measurement is the numeric value measured when recording stats. Each measure -// provides methods to create measurements of their kind. For example, Int64Measure -// provides M to convert an int64 into a measurement. -type Measurement struct { - v float64 - m Measure - desc *measureDescriptor -} - -// Value returns the value of the Measurement as a float64. -func (m Measurement) Value() float64 { - return m.v -} - -// Measure returns the Measure from which this Measurement was created. -func (m Measurement) Measure() Measure { - return m.m -} diff --git a/vendor/go.opencensus.io/stats/measure_float64.go b/vendor/go.opencensus.io/stats/measure_float64.go deleted file mode 100644 index f02c1eda845..00000000000 --- a/vendor/go.opencensus.io/stats/measure_float64.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package stats - -// Float64Measure is a measure for float64 values. -type Float64Measure struct { - desc *measureDescriptor -} - -// M creates a new float64 measurement. -// Use Record to record measurements. -func (m *Float64Measure) M(v float64) Measurement { - return Measurement{ - m: m, - desc: m.desc, - v: v, - } -} - -// Float64 creates a new measure for float64 values. -// -// See the documentation for interface Measure for more guidance on the -// parameters of this function. -func Float64(name, description, unit string) *Float64Measure { - mi := registerMeasureHandle(name, description, unit) - return &Float64Measure{mi} -} - -// Name returns the name of the measure. -func (m *Float64Measure) Name() string { - return m.desc.name -} - -// Description returns the description of the measure. -func (m *Float64Measure) Description() string { - return m.desc.description -} - -// Unit returns the unit of the measure. -func (m *Float64Measure) Unit() string { - return m.desc.unit -} diff --git a/vendor/go.opencensus.io/stats/measure_int64.go b/vendor/go.opencensus.io/stats/measure_int64.go deleted file mode 100644 index d101d797358..00000000000 --- a/vendor/go.opencensus.io/stats/measure_int64.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package stats - -// Int64Measure is a measure for int64 values. -type Int64Measure struct { - desc *measureDescriptor -} - -// M creates a new int64 measurement. -// Use Record to record measurements. -func (m *Int64Measure) M(v int64) Measurement { - return Measurement{ - m: m, - desc: m.desc, - v: float64(v), - } -} - -// Int64 creates a new measure for int64 values. -// -// See the documentation for interface Measure for more guidance on the -// parameters of this function. -func Int64(name, description, unit string) *Int64Measure { - mi := registerMeasureHandle(name, description, unit) - return &Int64Measure{mi} -} - -// Name returns the name of the measure. -func (m *Int64Measure) Name() string { - return m.desc.name -} - -// Description returns the description of the measure. -func (m *Int64Measure) Description() string { - return m.desc.description -} - -// Unit returns the unit of the measure. -func (m *Int64Measure) Unit() string { - return m.desc.unit -} diff --git a/vendor/go.opencensus.io/stats/record.go b/vendor/go.opencensus.io/stats/record.go deleted file mode 100644 index 8b5b99803ce..00000000000 --- a/vendor/go.opencensus.io/stats/record.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package stats - -import ( - "context" - - "go.opencensus.io/metric/metricdata" - "go.opencensus.io/stats/internal" - "go.opencensus.io/tag" -) - -func init() { - internal.SubscriptionReporter = func(measure string) { - mu.Lock() - measures[measure].subscribe() - mu.Unlock() - } -} - -// Recorder provides an interface for exporting measurement information from -// the static Record method by using the WithRecorder option. -type Recorder interface { - // Record records a set of measurements associated with the given tags and attachments. - // The second argument is a `[]Measurement`. - Record(*tag.Map, interface{}, map[string]interface{}) -} - -type recordOptions struct { - attachments metricdata.Attachments - mutators []tag.Mutator - measurements []Measurement - recorder Recorder -} - -// WithAttachments applies provided exemplar attachments. -func WithAttachments(attachments metricdata.Attachments) Options { - return func(ro *recordOptions) { - ro.attachments = attachments - } -} - -// WithTags applies provided tag mutators. -func WithTags(mutators ...tag.Mutator) Options { - return func(ro *recordOptions) { - ro.mutators = mutators - } -} - -// WithMeasurements applies provided measurements. -func WithMeasurements(measurements ...Measurement) Options { - return func(ro *recordOptions) { - ro.measurements = measurements - } -} - -// WithRecorder records the measurements to the specified `Recorder`, rather -// than to the global metrics recorder. -func WithRecorder(meter Recorder) Options { - return func(ro *recordOptions) { - ro.recorder = meter - } -} - -// Options apply changes to recordOptions. -type Options func(*recordOptions) - -func createRecordOption(ros ...Options) *recordOptions { - o := &recordOptions{} - for _, ro := range ros { - ro(o) - } - return o -} - -type measurementRecorder = func(tags *tag.Map, measurement []Measurement, attachments map[string]interface{}) - -// Record records one or multiple measurements with the same context at once. -// If there are any tags in the context, measurements will be tagged with them. -func Record(ctx context.Context, ms ...Measurement) { - // Record behaves the same as RecordWithOptions, but because we do not have to handle generic functionality - // (RecordOptions) we can reduce some allocations to speed up this hot path - if len(ms) == 0 { - return - } - recorder := internal.MeasurementRecorder.(measurementRecorder) - record := false - for _, m := range ms { - if m.desc.subscribed() { - record = true - break - } - } - if !record { - return - } - recorder(tag.FromContext(ctx), ms, nil) - return -} - -// RecordWithTags records one or multiple measurements at once. -// -// Measurements will be tagged with the tags in the context mutated by the mutators. -// RecordWithTags is useful if you want to record with tag mutations but don't want -// to propagate the mutations in the context. -func RecordWithTags(ctx context.Context, mutators []tag.Mutator, ms ...Measurement) error { - return RecordWithOptions(ctx, WithTags(mutators...), WithMeasurements(ms...)) -} - -// RecordWithOptions records measurements from the given options (if any) against context -// and tags and attachments in the options (if any). -// If there are any tags in the context, measurements will be tagged with them. -func RecordWithOptions(ctx context.Context, ros ...Options) error { - o := createRecordOption(ros...) - if len(o.measurements) == 0 { - return nil - } - recorder := internal.DefaultRecorder - if o.recorder != nil { - recorder = o.recorder.Record - } - if recorder == nil { - return nil - } - record := false - for _, m := range o.measurements { - if m.desc.subscribed() { - record = true - break - } - } - if !record { - return nil - } - if len(o.mutators) > 0 { - var err error - if ctx, err = tag.New(ctx, o.mutators...); err != nil { - return err - } - } - recorder(tag.FromContext(ctx), o.measurements, o.attachments) - return nil -} diff --git a/vendor/go.opencensus.io/stats/units.go b/vendor/go.opencensus.io/stats/units.go deleted file mode 100644 index 736399652cc..00000000000 --- a/vendor/go.opencensus.io/stats/units.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package stats - -// Units are encoded according to the case-sensitive abbreviations from the -// Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html -const ( - UnitNone = "1" // Deprecated: Use UnitDimensionless. - UnitDimensionless = "1" - UnitBytes = "By" - UnitMilliseconds = "ms" - UnitSeconds = "s" -) diff --git a/vendor/go.opencensus.io/stats/view/aggregation.go b/vendor/go.opencensus.io/stats/view/aggregation.go deleted file mode 100644 index 61f72d20da3..00000000000 --- a/vendor/go.opencensus.io/stats/view/aggregation.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package view - -import "time" - -// AggType represents the type of aggregation function used on a View. -type AggType int - -// All available aggregation types. -const ( - AggTypeNone AggType = iota // no aggregation; reserved for future use. - AggTypeCount // the count aggregation, see Count. - AggTypeSum // the sum aggregation, see Sum. - AggTypeDistribution // the distribution aggregation, see Distribution. - AggTypeLastValue // the last value aggregation, see LastValue. -) - -func (t AggType) String() string { - return aggTypeName[t] -} - -var aggTypeName = map[AggType]string{ - AggTypeNone: "None", - AggTypeCount: "Count", - AggTypeSum: "Sum", - AggTypeDistribution: "Distribution", - AggTypeLastValue: "LastValue", -} - -// Aggregation represents a data aggregation method. Use one of the functions: -// Count, Sum, or Distribution to construct an Aggregation. -type Aggregation struct { - Type AggType // Type is the AggType of this Aggregation. - Buckets []float64 // Buckets are the bucket endpoints if this Aggregation represents a distribution, see Distribution. - - newData func(time.Time) AggregationData -} - -var ( - aggCount = &Aggregation{ - Type: AggTypeCount, - newData: func(t time.Time) AggregationData { - return &CountData{Start: t} - }, - } - aggSum = &Aggregation{ - Type: AggTypeSum, - newData: func(t time.Time) AggregationData { - return &SumData{Start: t} - }, - } -) - -// Count indicates that data collected and aggregated -// with this method will be turned into a count value. -// For example, total number of accepted requests can be -// aggregated by using Count. -func Count() *Aggregation { - return aggCount -} - -// Sum indicates that data collected and aggregated -// with this method will be summed up. -// For example, accumulated request bytes can be aggregated by using -// Sum. -func Sum() *Aggregation { - return aggSum -} - -// Distribution indicates that the desired aggregation is -// a histogram distribution. -// -// A distribution aggregation may contain a histogram of the values in the -// population. The bucket boundaries for that histogram are described -// by the bounds. This defines len(bounds)+1 buckets. -// -// If len(bounds) >= 2 then the boundaries for bucket index i are: -// -// [-infinity, bounds[i]) for i = 0 -// [bounds[i-1], bounds[i]) for 0 < i < length -// [bounds[i-1], +infinity) for i = length -// -// If len(bounds) is 0 then there is no histogram associated with the -// distribution. There will be a single bucket with boundaries -// (-infinity, +infinity). -// -// If len(bounds) is 1 then there is no finite buckets, and that single -// element is the common boundary of the overflow and underflow buckets. -func Distribution(bounds ...float64) *Aggregation { - agg := &Aggregation{ - Type: AggTypeDistribution, - Buckets: bounds, - } - agg.newData = func(t time.Time) AggregationData { - return newDistributionData(agg, t) - } - return agg -} - -// LastValue only reports the last value recorded using this -// aggregation. All other measurements will be dropped. -func LastValue() *Aggregation { - return &Aggregation{ - Type: AggTypeLastValue, - newData: func(_ time.Time) AggregationData { - return &LastValueData{} - }, - } -} diff --git a/vendor/go.opencensus.io/stats/view/aggregation_data.go b/vendor/go.opencensus.io/stats/view/aggregation_data.go deleted file mode 100644 index d93b520662d..00000000000 --- a/vendor/go.opencensus.io/stats/view/aggregation_data.go +++ /dev/null @@ -1,336 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package view - -import ( - "math" - "time" - - "go.opencensus.io/metric/metricdata" -) - -// AggregationData represents an aggregated value from a collection. -// They are reported on the view data during exporting. -// Mosts users won't directly access aggregration data. -type AggregationData interface { - isAggregationData() bool - addSample(v float64, attachments map[string]interface{}, t time.Time) - clone() AggregationData - equal(other AggregationData) bool - toPoint(t metricdata.Type, time time.Time) metricdata.Point - StartTime() time.Time -} - -const epsilon = 1e-9 - -// CountData is the aggregated data for the Count aggregation. -// A count aggregation processes data and counts the recordings. -// -// Most users won't directly access count data. -type CountData struct { - Start time.Time - Value int64 -} - -func (a *CountData) isAggregationData() bool { return true } - -func (a *CountData) addSample(_ float64, _ map[string]interface{}, _ time.Time) { - a.Value = a.Value + 1 -} - -func (a *CountData) clone() AggregationData { - return &CountData{Value: a.Value, Start: a.Start} -} - -func (a *CountData) equal(other AggregationData) bool { - a2, ok := other.(*CountData) - if !ok { - return false - } - - return a.Start.Equal(a2.Start) && a.Value == a2.Value -} - -func (a *CountData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { - switch metricType { - case metricdata.TypeCumulativeInt64: - return metricdata.NewInt64Point(t, a.Value) - default: - panic("unsupported metricdata.Type") - } -} - -// StartTime returns the start time of the data being aggregated by CountData. -func (a *CountData) StartTime() time.Time { - return a.Start -} - -// SumData is the aggregated data for the Sum aggregation. -// A sum aggregation processes data and sums up the recordings. -// -// Most users won't directly access sum data. -type SumData struct { - Start time.Time - Value float64 -} - -func (a *SumData) isAggregationData() bool { return true } - -func (a *SumData) addSample(v float64, _ map[string]interface{}, _ time.Time) { - a.Value += v -} - -func (a *SumData) clone() AggregationData { - return &SumData{Value: a.Value, Start: a.Start} -} - -func (a *SumData) equal(other AggregationData) bool { - a2, ok := other.(*SumData) - if !ok { - return false - } - return a.Start.Equal(a2.Start) && math.Pow(a.Value-a2.Value, 2) < epsilon -} - -func (a *SumData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { - switch metricType { - case metricdata.TypeCumulativeInt64: - return metricdata.NewInt64Point(t, int64(a.Value)) - case metricdata.TypeCumulativeFloat64: - return metricdata.NewFloat64Point(t, a.Value) - default: - panic("unsupported metricdata.Type") - } -} - -// StartTime returns the start time of the data being aggregated by SumData. -func (a *SumData) StartTime() time.Time { - return a.Start -} - -// DistributionData is the aggregated data for the -// Distribution aggregation. -// -// Most users won't directly access distribution data. -// -// For a distribution with N bounds, the associated DistributionData will have -// N+1 buckets. -type DistributionData struct { - Count int64 // number of data points aggregated - Min float64 // minimum value in the distribution - Max float64 // max value in the distribution - Mean float64 // mean of the distribution - SumOfSquaredDev float64 // sum of the squared deviation from the mean - CountPerBucket []int64 // number of occurrences per bucket - // ExemplarsPerBucket is slice the same length as CountPerBucket containing - // an exemplar for the associated bucket, or nil. - ExemplarsPerBucket []*metricdata.Exemplar - bounds []float64 // histogram distribution of the values - Start time.Time -} - -func newDistributionData(agg *Aggregation, t time.Time) *DistributionData { - bucketCount := len(agg.Buckets) + 1 - return &DistributionData{ - CountPerBucket: make([]int64, bucketCount), - ExemplarsPerBucket: make([]*metricdata.Exemplar, bucketCount), - bounds: agg.Buckets, - Min: math.MaxFloat64, - Max: math.SmallestNonzeroFloat64, - Start: t, - } -} - -// Sum returns the sum of all samples collected. -func (a *DistributionData) Sum() float64 { return a.Mean * float64(a.Count) } - -func (a *DistributionData) variance() float64 { - if a.Count <= 1 { - return 0 - } - return a.SumOfSquaredDev / float64(a.Count-1) -} - -func (a *DistributionData) isAggregationData() bool { return true } - -// TODO(songy23): support exemplar attachments. -func (a *DistributionData) addSample(v float64, attachments map[string]interface{}, t time.Time) { - if v < a.Min { - a.Min = v - } - if v > a.Max { - a.Max = v - } - a.Count++ - a.addToBucket(v, attachments, t) - - if a.Count == 1 { - a.Mean = v - return - } - - oldMean := a.Mean - a.Mean = a.Mean + (v-a.Mean)/float64(a.Count) - a.SumOfSquaredDev = a.SumOfSquaredDev + (v-oldMean)*(v-a.Mean) -} - -func (a *DistributionData) addToBucket(v float64, attachments map[string]interface{}, t time.Time) { - var count *int64 - var i int - var b float64 - for i, b = range a.bounds { - if v < b { - count = &a.CountPerBucket[i] - break - } - } - if count == nil { // Last bucket. - i = len(a.bounds) - count = &a.CountPerBucket[i] - } - *count++ - if exemplar := getExemplar(v, attachments, t); exemplar != nil { - a.ExemplarsPerBucket[i] = exemplar - } -} - -func getExemplar(v float64, attachments map[string]interface{}, t time.Time) *metricdata.Exemplar { - if len(attachments) == 0 { - return nil - } - return &metricdata.Exemplar{ - Value: v, - Timestamp: t, - Attachments: attachments, - } -} - -func (a *DistributionData) clone() AggregationData { - c := *a - c.CountPerBucket = append([]int64(nil), a.CountPerBucket...) - c.ExemplarsPerBucket = append([]*metricdata.Exemplar(nil), a.ExemplarsPerBucket...) - return &c -} - -func (a *DistributionData) equal(other AggregationData) bool { - a2, ok := other.(*DistributionData) - if !ok { - return false - } - if a2 == nil { - return false - } - if len(a.CountPerBucket) != len(a2.CountPerBucket) { - return false - } - for i := range a.CountPerBucket { - if a.CountPerBucket[i] != a2.CountPerBucket[i] { - return false - } - } - return a.Start.Equal(a2.Start) && - a.Count == a2.Count && - a.Min == a2.Min && - a.Max == a2.Max && - math.Pow(a.Mean-a2.Mean, 2) < epsilon && math.Pow(a.variance()-a2.variance(), 2) < epsilon -} - -func (a *DistributionData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { - switch metricType { - case metricdata.TypeCumulativeDistribution: - buckets := []metricdata.Bucket{} - for i := 0; i < len(a.CountPerBucket); i++ { - buckets = append(buckets, metricdata.Bucket{ - Count: a.CountPerBucket[i], - Exemplar: a.ExemplarsPerBucket[i], - }) - } - bucketOptions := &metricdata.BucketOptions{Bounds: a.bounds} - - val := &metricdata.Distribution{ - Count: a.Count, - Sum: a.Sum(), - SumOfSquaredDeviation: a.SumOfSquaredDev, - BucketOptions: bucketOptions, - Buckets: buckets, - } - return metricdata.NewDistributionPoint(t, val) - - default: - // TODO: [rghetia] when we have a use case for TypeGaugeDistribution. - panic("unsupported metricdata.Type") - } -} - -// StartTime returns the start time of the data being aggregated by DistributionData. -func (a *DistributionData) StartTime() time.Time { - return a.Start -} - -// LastValueData returns the last value recorded for LastValue aggregation. -type LastValueData struct { - Value float64 -} - -func (l *LastValueData) isAggregationData() bool { - return true -} - -func (l *LastValueData) addSample(v float64, _ map[string]interface{}, _ time.Time) { - l.Value = v -} - -func (l *LastValueData) clone() AggregationData { - return &LastValueData{l.Value} -} - -func (l *LastValueData) equal(other AggregationData) bool { - a2, ok := other.(*LastValueData) - if !ok { - return false - } - return l.Value == a2.Value -} - -func (l *LastValueData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { - switch metricType { - case metricdata.TypeGaugeInt64: - return metricdata.NewInt64Point(t, int64(l.Value)) - case metricdata.TypeGaugeFloat64: - return metricdata.NewFloat64Point(t, l.Value) - default: - panic("unsupported metricdata.Type") - } -} - -// StartTime returns an empty time value as start time is not recorded when using last value -// aggregation. -func (l *LastValueData) StartTime() time.Time { - return time.Time{} -} - -// ClearStart clears the Start field from data if present. Useful for testing in cases where the -// start time will be nondeterministic. -func ClearStart(data AggregationData) { - switch data := data.(type) { - case *CountData: - data.Start = time.Time{} - case *SumData: - data.Start = time.Time{} - case *DistributionData: - data.Start = time.Time{} - } -} diff --git a/vendor/go.opencensus.io/stats/view/collector.go b/vendor/go.opencensus.io/stats/view/collector.go deleted file mode 100644 index bcd6e08c748..00000000000 --- a/vendor/go.opencensus.io/stats/view/collector.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package view - -import ( - "sort" - "time" - - "go.opencensus.io/internal/tagencoding" - "go.opencensus.io/tag" -) - -type collector struct { - // signatures holds the aggregations values for each unique tag signature - // (values for all keys) to its aggregator. - signatures map[string]AggregationData - // Aggregation is the description of the aggregation to perform for this - // view. - a *Aggregation -} - -func (c *collector) addSample(s string, v float64, attachments map[string]interface{}, t time.Time) { - aggregator, ok := c.signatures[s] - if !ok { - aggregator = c.a.newData(t) - c.signatures[s] = aggregator - } - aggregator.addSample(v, attachments, t) -} - -// collectRows returns a snapshot of the collected Row values. -func (c *collector) collectedRows(keys []tag.Key) []*Row { - rows := make([]*Row, 0, len(c.signatures)) - for sig, aggregator := range c.signatures { - tags := decodeTags([]byte(sig), keys) - row := &Row{Tags: tags, Data: aggregator.clone()} - rows = append(rows, row) - } - return rows -} - -func (c *collector) clearRows() { - c.signatures = make(map[string]AggregationData) -} - -// encodeWithKeys encodes the map by using values -// only associated with the keys provided. -func encodeWithKeys(m *tag.Map, keys []tag.Key) []byte { - // Compute the buffer length we will need ahead of time to avoid resizing later - reqLen := 0 - for _, k := range keys { - s, _ := m.Value(k) - // We will store each key + its length - reqLen += len(s) + 1 - } - vb := &tagencoding.Values{ - Buffer: make([]byte, reqLen), - } - for _, k := range keys { - v, _ := m.Value(k) - vb.WriteValue([]byte(v)) - } - return vb.Bytes() -} - -// decodeTags decodes tags from the buffer and -// orders them by the keys. -func decodeTags(buf []byte, keys []tag.Key) []tag.Tag { - vb := &tagencoding.Values{Buffer: buf} - var tags []tag.Tag - for _, k := range keys { - v := vb.ReadValue() - if v != nil { - tags = append(tags, tag.Tag{Key: k, Value: string(v)}) - } - } - vb.ReadIndex = 0 - sort.Slice(tags, func(i, j int) bool { return tags[i].Key.Name() < tags[j].Key.Name() }) - return tags -} diff --git a/vendor/go.opencensus.io/stats/view/doc.go b/vendor/go.opencensus.io/stats/view/doc.go deleted file mode 100644 index 60bf0e39254..00000000000 --- a/vendor/go.opencensus.io/stats/view/doc.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -// Package view contains support for collecting and exposing aggregates over stats. -// -// In order to collect measurements, views need to be defined and registered. -// A view allows recorded measurements to be filtered and aggregated. -// -// All recorded measurements can be grouped by a list of tags. -// -// OpenCensus provides several aggregation methods: Count, Distribution and Sum. -// -// Count only counts the number of measurement points recorded. -// Distribution provides statistical summary of the aggregated data by counting -// how many recorded measurements fall into each bucket. -// Sum adds up the measurement values. -// LastValue just keeps track of the most recently recorded measurement value. -// All aggregations are cumulative. -// -// Views can be registered and unregistered at any time during program execution. -// -// Libraries can define views but it is recommended that in most cases registering -// views be left up to applications. -// -// # Exporting -// -// Collected and aggregated data can be exported to a metric collection -// backend by registering its exporter. -// -// Multiple exporters can be registered to upload the data to various -// different back ends. -package view // import "go.opencensus.io/stats/view" - -// TODO(acetechnologist): Add a link to the language independent OpenCensus -// spec when it is available. diff --git a/vendor/go.opencensus.io/stats/view/export.go b/vendor/go.opencensus.io/stats/view/export.go deleted file mode 100644 index 73ba11f5b6e..00000000000 --- a/vendor/go.opencensus.io/stats/view/export.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package view - -// Exporter exports the collected records as view data. -// -// The ExportView method should return quickly; if an -// Exporter takes a significant amount of time to -// process a Data, that work should be done on another goroutine. -// -// It is safe to assume that ExportView will not be called concurrently from -// multiple goroutines. -// -// The Data should not be modified. -type Exporter interface { - ExportView(viewData *Data) -} - -// RegisterExporter registers an exporter. -// Collected data will be reported via all the -// registered exporters. Once you no longer -// want data to be exported, invoke UnregisterExporter -// with the previously registered exporter. -// -// Binaries can register exporters, libraries shouldn't register exporters. -func RegisterExporter(e Exporter) { - defaultWorker.RegisterExporter(e) -} - -// UnregisterExporter unregisters an exporter. -func UnregisterExporter(e Exporter) { - defaultWorker.UnregisterExporter(e) -} diff --git a/vendor/go.opencensus.io/stats/view/view.go b/vendor/go.opencensus.io/stats/view/view.go deleted file mode 100644 index 293b54ecbed..00000000000 --- a/vendor/go.opencensus.io/stats/view/view.go +++ /dev/null @@ -1,221 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package view - -import ( - "bytes" - "errors" - "fmt" - "reflect" - "sort" - "sync/atomic" - "time" - - "go.opencensus.io/metric/metricdata" - "go.opencensus.io/stats" - "go.opencensus.io/tag" -) - -// View allows users to aggregate the recorded stats.Measurements. -// Views need to be passed to the Register function before data will be -// collected and sent to Exporters. -type View struct { - Name string // Name of View. Must be unique. If unset, will default to the name of the Measure. - Description string // Description is a human-readable description for this view. - - // TagKeys are the tag keys describing the grouping of this view. - // A single Row will be produced for each combination of associated tag values. - TagKeys []tag.Key - - // Measure is a stats.Measure to aggregate in this view. - Measure stats.Measure - - // Aggregation is the aggregation function to apply to the set of Measurements. - Aggregation *Aggregation -} - -// WithName returns a copy of the View with a new name. This is useful for -// renaming views to cope with limitations placed on metric names by various -// backends. -func (v *View) WithName(name string) *View { - vNew := *v - vNew.Name = name - return &vNew -} - -// same compares two views and returns true if they represent the same aggregation. -func (v *View) same(other *View) bool { - if v == other { - return true - } - if v == nil { - return false - } - return reflect.DeepEqual(v.Aggregation, other.Aggregation) && - v.Measure.Name() == other.Measure.Name() -} - -// ErrNegativeBucketBounds error returned if histogram contains negative bounds. -// -// Deprecated: this should not be public. -var ErrNegativeBucketBounds = errors.New("negative bucket bounds not supported") - -// canonicalize canonicalizes v by setting explicit -// defaults for Name and Description and sorting the TagKeys -func (v *View) canonicalize() error { - if v.Measure == nil { - return fmt.Errorf("cannot register view %q: measure not set", v.Name) - } - if v.Aggregation == nil { - return fmt.Errorf("cannot register view %q: aggregation not set", v.Name) - } - if v.Name == "" { - v.Name = v.Measure.Name() - } - if v.Description == "" { - v.Description = v.Measure.Description() - } - if err := checkViewName(v.Name); err != nil { - return err - } - sort.Slice(v.TagKeys, func(i, j int) bool { - return v.TagKeys[i].Name() < v.TagKeys[j].Name() - }) - sort.Float64s(v.Aggregation.Buckets) - for _, b := range v.Aggregation.Buckets { - if b < 0 { - return ErrNegativeBucketBounds - } - } - // drop 0 bucket silently. - v.Aggregation.Buckets = dropZeroBounds(v.Aggregation.Buckets...) - - return nil -} - -func dropZeroBounds(bounds ...float64) []float64 { - for i, bound := range bounds { - if bound > 0 { - return bounds[i:] - } - } - return []float64{} -} - -// viewInternal is the internal representation of a View. -type viewInternal struct { - view *View // view is the canonicalized View definition associated with this view. - subscribed uint32 // 1 if someone is subscribed and data need to be exported, use atomic to access - collector *collector - metricDescriptor *metricdata.Descriptor -} - -func newViewInternal(v *View) (*viewInternal, error) { - return &viewInternal{ - view: v, - collector: &collector{make(map[string]AggregationData), v.Aggregation}, - metricDescriptor: viewToMetricDescriptor(v), - }, nil -} - -func (v *viewInternal) subscribe() { - atomic.StoreUint32(&v.subscribed, 1) -} - -func (v *viewInternal) unsubscribe() { - atomic.StoreUint32(&v.subscribed, 0) -} - -// isSubscribed returns true if the view is exporting -// data by subscription. -func (v *viewInternal) isSubscribed() bool { - return atomic.LoadUint32(&v.subscribed) == 1 -} - -func (v *viewInternal) clearRows() { - v.collector.clearRows() -} - -func (v *viewInternal) collectedRows() []*Row { - return v.collector.collectedRows(v.view.TagKeys) -} - -func (v *viewInternal) addSample(m *tag.Map, val float64, attachments map[string]interface{}, t time.Time) { - if !v.isSubscribed() { - return - } - sig := string(encodeWithKeys(m, v.view.TagKeys)) - v.collector.addSample(sig, val, attachments, t) -} - -// A Data is a set of rows about usage of the single measure associated -// with the given view. Each row is specific to a unique set of tags. -type Data struct { - View *View - Start, End time.Time - Rows []*Row -} - -// Row is the collected value for a specific set of key value pairs a.k.a tags. -type Row struct { - Tags []tag.Tag - Data AggregationData -} - -func (r *Row) String() string { - var buffer bytes.Buffer - buffer.WriteString("{ ") - buffer.WriteString("{ ") - for _, t := range r.Tags { - buffer.WriteString(fmt.Sprintf("{%v %v}", t.Key.Name(), t.Value)) - } - buffer.WriteString(" }") - buffer.WriteString(fmt.Sprintf("%v", r.Data)) - buffer.WriteString(" }") - return buffer.String() -} - -// Equal returns true if both rows are equal. Tags are expected to be ordered -// by the key name. Even if both rows have the same tags but the tags appear in -// different orders it will return false. -func (r *Row) Equal(other *Row) bool { - if r == other { - return true - } - return reflect.DeepEqual(r.Tags, other.Tags) && r.Data.equal(other.Data) -} - -const maxNameLength = 255 - -// Returns true if the given string contains only printable characters. -func isPrintable(str string) bool { - for _, r := range str { - if !(r >= ' ' && r <= '~') { - return false - } - } - return true -} - -func checkViewName(name string) error { - if len(name) > maxNameLength { - return fmt.Errorf("view name cannot be larger than %v", maxNameLength) - } - if !isPrintable(name) { - return fmt.Errorf("view name needs to be an ASCII string") - } - return nil -} diff --git a/vendor/go.opencensus.io/stats/view/view_to_metric.go b/vendor/go.opencensus.io/stats/view/view_to_metric.go deleted file mode 100644 index 57d615ec7e1..00000000000 --- a/vendor/go.opencensus.io/stats/view/view_to_metric.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2019, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package view - -import ( - "time" - - "go.opencensus.io/resource" - - "go.opencensus.io/metric/metricdata" - "go.opencensus.io/stats" -) - -func getUnit(unit string) metricdata.Unit { - switch unit { - case "1": - return metricdata.UnitDimensionless - case "ms": - return metricdata.UnitMilliseconds - case "By": - return metricdata.UnitBytes - } - return metricdata.UnitDimensionless -} - -func getType(v *View) metricdata.Type { - m := v.Measure - agg := v.Aggregation - - switch agg.Type { - case AggTypeSum: - switch m.(type) { - case *stats.Int64Measure: - return metricdata.TypeCumulativeInt64 - case *stats.Float64Measure: - return metricdata.TypeCumulativeFloat64 - default: - panic("unexpected measure type") - } - case AggTypeDistribution: - return metricdata.TypeCumulativeDistribution - case AggTypeLastValue: - switch m.(type) { - case *stats.Int64Measure: - return metricdata.TypeGaugeInt64 - case *stats.Float64Measure: - return metricdata.TypeGaugeFloat64 - default: - panic("unexpected measure type") - } - case AggTypeCount: - switch m.(type) { - case *stats.Int64Measure: - return metricdata.TypeCumulativeInt64 - case *stats.Float64Measure: - return metricdata.TypeCumulativeInt64 - default: - panic("unexpected measure type") - } - default: - panic("unexpected aggregation type") - } -} - -func getLabelKeys(v *View) []metricdata.LabelKey { - labelKeys := []metricdata.LabelKey{} - for _, k := range v.TagKeys { - labelKeys = append(labelKeys, metricdata.LabelKey{Key: k.Name()}) - } - return labelKeys -} - -func viewToMetricDescriptor(v *View) *metricdata.Descriptor { - return &metricdata.Descriptor{ - Name: v.Name, - Description: v.Description, - Unit: convertUnit(v), - Type: getType(v), - LabelKeys: getLabelKeys(v), - } -} - -func convertUnit(v *View) metricdata.Unit { - switch v.Aggregation.Type { - case AggTypeCount: - return metricdata.UnitDimensionless - default: - return getUnit(v.Measure.Unit()) - } -} - -func toLabelValues(row *Row, expectedKeys []metricdata.LabelKey) []metricdata.LabelValue { - labelValues := []metricdata.LabelValue{} - tagMap := make(map[string]string) - for _, tag := range row.Tags { - tagMap[tag.Key.Name()] = tag.Value - } - - for _, key := range expectedKeys { - if val, ok := tagMap[key.Key]; ok { - labelValues = append(labelValues, metricdata.NewLabelValue(val)) - } else { - labelValues = append(labelValues, metricdata.LabelValue{}) - } - } - return labelValues -} - -func rowToTimeseries(v *viewInternal, row *Row, now time.Time) *metricdata.TimeSeries { - return &metricdata.TimeSeries{ - Points: []metricdata.Point{row.Data.toPoint(v.metricDescriptor.Type, now)}, - LabelValues: toLabelValues(row, v.metricDescriptor.LabelKeys), - StartTime: row.Data.StartTime(), - } -} - -func viewToMetric(v *viewInternal, r *resource.Resource, now time.Time) *metricdata.Metric { - rows := v.collectedRows() - if len(rows) == 0 { - return nil - } - - ts := []*metricdata.TimeSeries{} - for _, row := range rows { - ts = append(ts, rowToTimeseries(v, row, now)) - } - - m := &metricdata.Metric{ - Descriptor: *v.metricDescriptor, - TimeSeries: ts, - Resource: r, - } - return m -} diff --git a/vendor/go.opencensus.io/stats/view/worker.go b/vendor/go.opencensus.io/stats/view/worker.go deleted file mode 100644 index 6a79cd8a34c..00000000000 --- a/vendor/go.opencensus.io/stats/view/worker.go +++ /dev/null @@ -1,424 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package view - -import ( - "fmt" - "sync" - "time" - - "go.opencensus.io/resource" - - "go.opencensus.io/metric/metricdata" - "go.opencensus.io/metric/metricproducer" - "go.opencensus.io/stats" - "go.opencensus.io/stats/internal" - "go.opencensus.io/tag" -) - -func init() { - defaultWorker = NewMeter().(*worker) - go defaultWorker.start() - internal.DefaultRecorder = record - internal.MeasurementRecorder = recordMeasurement -} - -type measureRef struct { - measure string - views map[*viewInternal]struct{} -} - -type worker struct { - measures map[string]*measureRef - views map[string]*viewInternal - viewStartTimes map[*viewInternal]time.Time - - timer *time.Ticker - c chan command - quit, done chan bool - mu sync.RWMutex - r *resource.Resource - - exportersMu sync.RWMutex - exporters map[Exporter]struct{} -} - -// Meter defines an interface which allows a single process to maintain -// multiple sets of metrics exports (intended for the advanced case where a -// single process wants to report metrics about multiple objects, such as -// multiple databases or HTTP services). -// -// Note that this is an advanced use case, and the static functions in this -// module should cover the common use cases. -type Meter interface { - stats.Recorder - // Find returns a registered view associated with this name. - // If no registered view is found, nil is returned. - Find(name string) *View - // Register begins collecting data for the given views. - // Once a view is registered, it reports data to the registered exporters. - Register(views ...*View) error - // Unregister the given views. Data will not longer be exported for these views - // after Unregister returns. - // It is not necessary to unregister from views you expect to collect for the - // duration of your program execution. - Unregister(views ...*View) - // SetReportingPeriod sets the interval between reporting aggregated views in - // the program. If duration is less than or equal to zero, it enables the - // default behavior. - // - // Note: each exporter makes different promises about what the lowest supported - // duration is. For example, the Stackdriver exporter recommends a value no - // lower than 1 minute. Consult each exporter per your needs. - SetReportingPeriod(time.Duration) - - // RegisterExporter registers an exporter. - // Collected data will be reported via all the - // registered exporters. Once you no longer - // want data to be exported, invoke UnregisterExporter - // with the previously registered exporter. - // - // Binaries can register exporters, libraries shouldn't register exporters. - RegisterExporter(Exporter) - // UnregisterExporter unregisters an exporter. - UnregisterExporter(Exporter) - // SetResource may be used to set the Resource associated with this registry. - // This is intended to be used in cases where a single process exports metrics - // for multiple Resources, typically in a multi-tenant situation. - SetResource(*resource.Resource) - - // Start causes the Meter to start processing Record calls and aggregating - // statistics as well as exporting data. - Start() - // Stop causes the Meter to stop processing calls and terminate data export. - Stop() - - // RetrieveData gets a snapshot of the data collected for the the view registered - // with the given name. It is intended for testing only. - RetrieveData(viewName string) ([]*Row, error) -} - -var _ Meter = (*worker)(nil) - -var defaultWorker *worker - -var defaultReportingDuration = 10 * time.Second - -// Find returns a registered view associated with this name. -// If no registered view is found, nil is returned. -func Find(name string) (v *View) { - return defaultWorker.Find(name) -} - -// Find returns a registered view associated with this name. -// If no registered view is found, nil is returned. -func (w *worker) Find(name string) (v *View) { - req := &getViewByNameReq{ - name: name, - c: make(chan *getViewByNameResp), - } - w.c <- req - resp := <-req.c - return resp.v -} - -// Register begins collecting data for the given views. -// Once a view is registered, it reports data to the registered exporters. -func Register(views ...*View) error { - return defaultWorker.Register(views...) -} - -// Register begins collecting data for the given views. -// Once a view is registered, it reports data to the registered exporters. -func (w *worker) Register(views ...*View) error { - req := ®isterViewReq{ - views: views, - err: make(chan error), - } - w.c <- req - return <-req.err -} - -// Unregister the given views. Data will not longer be exported for these views -// after Unregister returns. -// It is not necessary to unregister from views you expect to collect for the -// duration of your program execution. -func Unregister(views ...*View) { - defaultWorker.Unregister(views...) -} - -// Unregister the given views. Data will not longer be exported for these views -// after Unregister returns. -// It is not necessary to unregister from views you expect to collect for the -// duration of your program execution. -func (w *worker) Unregister(views ...*View) { - names := make([]string, len(views)) - for i := range views { - names[i] = views[i].Name - } - req := &unregisterFromViewReq{ - views: names, - done: make(chan struct{}), - } - w.c <- req - <-req.done -} - -// RetrieveData gets a snapshot of the data collected for the the view registered -// with the given name. It is intended for testing only. -func RetrieveData(viewName string) ([]*Row, error) { - return defaultWorker.RetrieveData(viewName) -} - -// RetrieveData gets a snapshot of the data collected for the the view registered -// with the given name. It is intended for testing only. -func (w *worker) RetrieveData(viewName string) ([]*Row, error) { - req := &retrieveDataReq{ - now: time.Now(), - v: viewName, - c: make(chan *retrieveDataResp), - } - w.c <- req - resp := <-req.c - return resp.rows, resp.err -} - -func record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { - defaultWorker.Record(tags, ms, attachments) -} - -func recordMeasurement(tags *tag.Map, ms []stats.Measurement, attachments map[string]interface{}) { - defaultWorker.recordMeasurement(tags, ms, attachments) -} - -// Record records a set of measurements ms associated with the given tags and attachments. -func (w *worker) Record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { - w.recordMeasurement(tags, ms.([]stats.Measurement), attachments) -} - -// recordMeasurement records a set of measurements ms associated with the given tags and attachments. -// This is the same as Record but without an interface{} type to avoid allocations -func (w *worker) recordMeasurement(tags *tag.Map, ms []stats.Measurement, attachments map[string]interface{}) { - req := &recordReq{ - tm: tags, - ms: ms, - attachments: attachments, - t: time.Now(), - } - w.c <- req -} - -// SetReportingPeriod sets the interval between reporting aggregated views in -// the program. If duration is less than or equal to zero, it enables the -// default behavior. -// -// Note: each exporter makes different promises about what the lowest supported -// duration is. For example, the Stackdriver exporter recommends a value no -// lower than 1 minute. Consult each exporter per your needs. -func SetReportingPeriod(d time.Duration) { - defaultWorker.SetReportingPeriod(d) -} - -// Stop stops the default worker. -func Stop() { - defaultWorker.Stop() -} - -// SetReportingPeriod sets the interval between reporting aggregated views in -// the program. If duration is less than or equal to zero, it enables the -// default behavior. -// -// Note: each exporter makes different promises about what the lowest supported -// duration is. For example, the Stackdriver exporter recommends a value no -// lower than 1 minute. Consult each exporter per your needs. -func (w *worker) SetReportingPeriod(d time.Duration) { - // TODO(acetechnologist): ensure that the duration d is more than a certain - // value. e.g. 1s - req := &setReportingPeriodReq{ - d: d, - c: make(chan bool), - } - w.c <- req - <-req.c // don't return until the timer is set to the new duration. -} - -// NewMeter constructs a Meter instance. You should only need to use this if -// you need to separate out Measurement recordings and View aggregations within -// a single process. -func NewMeter() Meter { - return &worker{ - measures: make(map[string]*measureRef), - views: make(map[string]*viewInternal), - viewStartTimes: make(map[*viewInternal]time.Time), - timer: time.NewTicker(defaultReportingDuration), - c: make(chan command, 1024), - quit: make(chan bool), - done: make(chan bool), - - exporters: make(map[Exporter]struct{}), - } -} - -// SetResource associates all data collected by this Meter with the specified -// resource. This resource is reported when using metricexport.ReadAndExport; -// it is not provided when used with ExportView/RegisterExporter, because that -// interface does not provide a means for reporting the Resource. -func (w *worker) SetResource(r *resource.Resource) { - w.r = r -} - -func (w *worker) Start() { - go w.start() -} - -func (w *worker) start() { - prodMgr := metricproducer.GlobalManager() - prodMgr.AddProducer(w) - - for { - select { - case cmd := <-w.c: - cmd.handleCommand(w) - case <-w.timer.C: - w.reportUsage() - case <-w.quit: - w.timer.Stop() - close(w.c) - close(w.done) - return - } - } -} - -func (w *worker) Stop() { - prodMgr := metricproducer.GlobalManager() - prodMgr.DeleteProducer(w) - select { - case <-w.quit: - default: - close(w.quit) - } - <-w.done -} - -func (w *worker) getMeasureRef(name string) *measureRef { - if mr, ok := w.measures[name]; ok { - return mr - } - mr := &measureRef{ - measure: name, - views: make(map[*viewInternal]struct{}), - } - w.measures[name] = mr - return mr -} - -func (w *worker) tryRegisterView(v *View) (*viewInternal, error) { - w.mu.Lock() - defer w.mu.Unlock() - vi, err := newViewInternal(v) - if err != nil { - return nil, err - } - if x, ok := w.views[vi.view.Name]; ok { - if !x.view.same(vi.view) { - return nil, fmt.Errorf("cannot register view %q; a different view with the same name is already registered", v.Name) - } - - // the view is already registered so there is nothing to do and the - // command is considered successful. - return x, nil - } - w.views[vi.view.Name] = vi - w.viewStartTimes[vi] = time.Now() - ref := w.getMeasureRef(vi.view.Measure.Name()) - ref.views[vi] = struct{}{} - return vi, nil -} - -func (w *worker) unregisterView(v *viewInternal) { - w.mu.Lock() - defer w.mu.Unlock() - delete(w.views, v.view.Name) - delete(w.viewStartTimes, v) - if measure := w.measures[v.view.Measure.Name()]; measure != nil { - delete(measure.views, v) - } -} - -func (w *worker) reportView(v *viewInternal) { - if !v.isSubscribed() { - return - } - rows := v.collectedRows() - viewData := &Data{ - View: v.view, - Start: w.viewStartTimes[v], - End: time.Now(), - Rows: rows, - } - w.exportersMu.Lock() - defer w.exportersMu.Unlock() - for e := range w.exporters { - e.ExportView(viewData) - } -} - -func (w *worker) reportUsage() { - w.mu.Lock() - defer w.mu.Unlock() - for _, v := range w.views { - w.reportView(v) - } -} - -func (w *worker) toMetric(v *viewInternal, now time.Time) *metricdata.Metric { - if !v.isSubscribed() { - return nil - } - - return viewToMetric(v, w.r, now) -} - -// Read reads all view data and returns them as metrics. -// It is typically invoked by metric reader to export stats in metric format. -func (w *worker) Read() []*metricdata.Metric { - w.mu.Lock() - defer w.mu.Unlock() - now := time.Now() - metrics := make([]*metricdata.Metric, 0, len(w.views)) - for _, v := range w.views { - metric := w.toMetric(v, now) - if metric != nil { - metrics = append(metrics, metric) - } - } - return metrics -} - -func (w *worker) RegisterExporter(e Exporter) { - w.exportersMu.Lock() - defer w.exportersMu.Unlock() - - w.exporters[e] = struct{}{} -} - -func (w *worker) UnregisterExporter(e Exporter) { - w.exportersMu.Lock() - defer w.exportersMu.Unlock() - - delete(w.exporters, e) -} diff --git a/vendor/go.opencensus.io/stats/view/worker_commands.go b/vendor/go.opencensus.io/stats/view/worker_commands.go deleted file mode 100644 index 9ac4cc05992..00000000000 --- a/vendor/go.opencensus.io/stats/view/worker_commands.go +++ /dev/null @@ -1,186 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package view - -import ( - "errors" - "fmt" - "strings" - "time" - - "go.opencensus.io/stats" - "go.opencensus.io/stats/internal" - "go.opencensus.io/tag" -) - -type command interface { - handleCommand(w *worker) -} - -// getViewByNameReq is the command to get a view given its name. -type getViewByNameReq struct { - name string - c chan *getViewByNameResp -} - -type getViewByNameResp struct { - v *View -} - -func (cmd *getViewByNameReq) handleCommand(w *worker) { - v := w.views[cmd.name] - if v == nil { - cmd.c <- &getViewByNameResp{nil} - return - } - cmd.c <- &getViewByNameResp{v.view} -} - -// registerViewReq is the command to register a view. -type registerViewReq struct { - views []*View - err chan error -} - -func (cmd *registerViewReq) handleCommand(w *worker) { - for _, v := range cmd.views { - if err := v.canonicalize(); err != nil { - cmd.err <- err - return - } - } - var errstr []string - for _, view := range cmd.views { - vi, err := w.tryRegisterView(view) - if err != nil { - errstr = append(errstr, fmt.Sprintf("%s: %v", view.Name, err)) - continue - } - internal.SubscriptionReporter(view.Measure.Name()) - vi.subscribe() - } - if len(errstr) > 0 { - cmd.err <- errors.New(strings.Join(errstr, "\n")) - } else { - cmd.err <- nil - } -} - -// unregisterFromViewReq is the command to unregister to a view. Has no -// impact on the data collection for client that are pulling data from the -// library. -type unregisterFromViewReq struct { - views []string - done chan struct{} -} - -func (cmd *unregisterFromViewReq) handleCommand(w *worker) { - for _, name := range cmd.views { - vi, ok := w.views[name] - if !ok { - continue - } - - // Report pending data for this view before removing it. - w.reportView(vi) - - vi.unsubscribe() - if !vi.isSubscribed() { - // this was the last subscription and view is not collecting anymore. - // The collected data can be cleared. - vi.clearRows() - } - w.unregisterView(vi) - } - cmd.done <- struct{}{} -} - -// retrieveDataReq is the command to retrieve data for a view. -type retrieveDataReq struct { - now time.Time - v string - c chan *retrieveDataResp -} - -type retrieveDataResp struct { - rows []*Row - err error -} - -func (cmd *retrieveDataReq) handleCommand(w *worker) { - w.mu.Lock() - defer w.mu.Unlock() - vi, ok := w.views[cmd.v] - if !ok { - cmd.c <- &retrieveDataResp{ - nil, - fmt.Errorf("cannot retrieve data; view %q is not registered", cmd.v), - } - return - } - - if !vi.isSubscribed() { - cmd.c <- &retrieveDataResp{ - nil, - fmt.Errorf("cannot retrieve data; view %q has no subscriptions or collection is not forcibly started", cmd.v), - } - return - } - cmd.c <- &retrieveDataResp{ - vi.collectedRows(), - nil, - } -} - -// recordReq is the command to record data related to multiple measures -// at once. -type recordReq struct { - tm *tag.Map - ms []stats.Measurement - attachments map[string]interface{} - t time.Time -} - -func (cmd *recordReq) handleCommand(w *worker) { - w.mu.Lock() - defer w.mu.Unlock() - for _, m := range cmd.ms { - if (m == stats.Measurement{}) { // not registered - continue - } - ref := w.getMeasureRef(m.Measure().Name()) - for v := range ref.views { - v.addSample(cmd.tm, m.Value(), cmd.attachments, cmd.t) - } - } -} - -// setReportingPeriodReq is the command to modify the duration between -// reporting the collected data to the registered clients. -type setReportingPeriodReq struct { - d time.Duration - c chan bool -} - -func (cmd *setReportingPeriodReq) handleCommand(w *worker) { - w.timer.Stop() - if cmd.d <= 0 { - w.timer = time.NewTicker(defaultReportingDuration) - } else { - w.timer = time.NewTicker(cmd.d) - } - cmd.c <- true -} diff --git a/vendor/go.opencensus.io/tag/context.go b/vendor/go.opencensus.io/tag/context.go deleted file mode 100644 index b27d1b26b13..00000000000 --- a/vendor/go.opencensus.io/tag/context.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package tag - -import ( - "context" -) - -// FromContext returns the tag map stored in the context. -func FromContext(ctx context.Context) *Map { - // The returned tag map shouldn't be mutated. - ts := ctx.Value(mapCtxKey) - if ts == nil { - return nil - } - return ts.(*Map) -} - -// NewContext creates a new context with the given tag map. -// To propagate a tag map to downstream methods and downstream RPCs, add a tag map -// to the current context. NewContext will return a copy of the current context, -// and put the tag map into the returned one. -// If there is already a tag map in the current context, it will be replaced with m. -func NewContext(ctx context.Context, m *Map) context.Context { - return context.WithValue(ctx, mapCtxKey, m) -} - -type ctxKey struct{} - -var mapCtxKey = ctxKey{} diff --git a/vendor/go.opencensus.io/tag/doc.go b/vendor/go.opencensus.io/tag/doc.go deleted file mode 100644 index da16b74e4de..00000000000 --- a/vendor/go.opencensus.io/tag/doc.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -/* -Package tag contains OpenCensus tags. - -Tags are key-value pairs. Tags provide additional cardinality to -the OpenCensus instrumentation data. - -Tags can be propagated on the wire and in the same -process via context.Context. Encode and Decode should be -used to represent tags into their binary propagation form. -*/ -package tag // import "go.opencensus.io/tag" diff --git a/vendor/go.opencensus.io/tag/key.go b/vendor/go.opencensus.io/tag/key.go deleted file mode 100644 index 71ec913657b..00000000000 --- a/vendor/go.opencensus.io/tag/key.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package tag - -// Key represents a tag key. -type Key struct { - name string -} - -// NewKey creates or retrieves a string key identified by name. -// Calling NewKey more than once with the same name returns the same key. -func NewKey(name string) (Key, error) { - if !checkKeyName(name) { - return Key{}, errInvalidKeyName - } - return Key{name: name}, nil -} - -// MustNewKey returns a key with the given name, and panics if name is an invalid key name. -func MustNewKey(name string) Key { - k, err := NewKey(name) - if err != nil { - panic(err) - } - return k -} - -// Name returns the name of the key. -func (k Key) Name() string { - return k.name -} diff --git a/vendor/go.opencensus.io/tag/map.go b/vendor/go.opencensus.io/tag/map.go deleted file mode 100644 index 0272ef85a4c..00000000000 --- a/vendor/go.opencensus.io/tag/map.go +++ /dev/null @@ -1,229 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package tag - -import ( - "bytes" - "context" - "fmt" - "sort" -) - -// Tag is a key value pair that can be propagated on wire. -type Tag struct { - Key Key - Value string -} - -type tagContent struct { - value string - m metadatas -} - -// Map is a map of tags. Use New to create a context containing -// a new Map. -type Map struct { - m map[Key]tagContent -} - -// Value returns the value for the key if a value for the key exists. -func (m *Map) Value(k Key) (string, bool) { - if m == nil { - return "", false - } - v, ok := m.m[k] - return v.value, ok -} - -func (m *Map) String() string { - if m == nil { - return "nil" - } - keys := make([]Key, 0, len(m.m)) - for k := range m.m { - keys = append(keys, k) - } - sort.Slice(keys, func(i, j int) bool { return keys[i].Name() < keys[j].Name() }) - - var buffer bytes.Buffer - buffer.WriteString("{ ") - for _, k := range keys { - buffer.WriteString(fmt.Sprintf("{%v %v}", k.name, m.m[k])) - } - buffer.WriteString(" }") - return buffer.String() -} - -func (m *Map) insert(k Key, v string, md metadatas) { - if _, ok := m.m[k]; ok { - return - } - m.m[k] = tagContent{value: v, m: md} -} - -func (m *Map) update(k Key, v string, md metadatas) { - if _, ok := m.m[k]; ok { - m.m[k] = tagContent{value: v, m: md} - } -} - -func (m *Map) upsert(k Key, v string, md metadatas) { - m.m[k] = tagContent{value: v, m: md} -} - -func (m *Map) delete(k Key) { - delete(m.m, k) -} - -func newMap() *Map { - return &Map{m: make(map[Key]tagContent)} -} - -// Mutator modifies a tag map. -type Mutator interface { - Mutate(t *Map) (*Map, error) -} - -// Insert returns a mutator that inserts a -// value associated with k. If k already exists in the tag map, -// mutator doesn't update the value. -// Metadata applies metadata to the tag. It is optional. -// Metadatas are applied in the order in which it is provided. -// If more than one metadata updates the same attribute then -// the update from the last metadata prevails. -func Insert(k Key, v string, mds ...Metadata) Mutator { - return &mutator{ - fn: func(m *Map) (*Map, error) { - if !checkValue(v) { - return nil, errInvalidValue - } - m.insert(k, v, createMetadatas(mds...)) - return m, nil - }, - } -} - -// Update returns a mutator that updates the -// value of the tag associated with k with v. If k doesn't -// exists in the tag map, the mutator doesn't insert the value. -// Metadata applies metadata to the tag. It is optional. -// Metadatas are applied in the order in which it is provided. -// If more than one metadata updates the same attribute then -// the update from the last metadata prevails. -func Update(k Key, v string, mds ...Metadata) Mutator { - return &mutator{ - fn: func(m *Map) (*Map, error) { - if !checkValue(v) { - return nil, errInvalidValue - } - m.update(k, v, createMetadatas(mds...)) - return m, nil - }, - } -} - -// Upsert returns a mutator that upserts the -// value of the tag associated with k with v. It inserts the -// value if k doesn't exist already. It mutates the value -// if k already exists. -// Metadata applies metadata to the tag. It is optional. -// Metadatas are applied in the order in which it is provided. -// If more than one metadata updates the same attribute then -// the update from the last metadata prevails. -func Upsert(k Key, v string, mds ...Metadata) Mutator { - return &mutator{ - fn: func(m *Map) (*Map, error) { - if !checkValue(v) { - return nil, errInvalidValue - } - m.upsert(k, v, createMetadatas(mds...)) - return m, nil - }, - } -} - -func createMetadatas(mds ...Metadata) metadatas { - var metas metadatas - if len(mds) > 0 { - for _, md := range mds { - if md != nil { - md(&metas) - } - } - } else { - WithTTL(TTLUnlimitedPropagation)(&metas) - } - return metas - -} - -// Delete returns a mutator that deletes -// the value associated with k. -func Delete(k Key) Mutator { - return &mutator{ - fn: func(m *Map) (*Map, error) { - m.delete(k) - return m, nil - }, - } -} - -// New returns a new context that contains a tag map -// originated from the incoming context and modified -// with the provided mutators. -func New(ctx context.Context, mutator ...Mutator) (context.Context, error) { - m := newMap() - orig := FromContext(ctx) - if orig != nil { - for k, v := range orig.m { - if !checkKeyName(k.Name()) { - return ctx, fmt.Errorf("key:%q: %v", k, errInvalidKeyName) - } - if !checkValue(v.value) { - return ctx, fmt.Errorf("key:%q value:%q: %v", k.Name(), v, errInvalidValue) - } - m.insert(k, v.value, v.m) - } - } - var err error - for _, mod := range mutator { - m, err = mod.Mutate(m) - if err != nil { - return ctx, err - } - } - return NewContext(ctx, m), nil -} - -// Do is similar to pprof.Do: a convenience for installing the tags -// from the context as Go profiler labels. This allows you to -// correlated runtime profiling with stats. -// -// It converts the key/values from the given map to Go profiler labels -// and calls pprof.Do. -// -// Do is going to do nothing if your Go version is below 1.9. -func Do(ctx context.Context, f func(ctx context.Context)) { - do(ctx, f) -} - -type mutator struct { - fn func(t *Map) (*Map, error) -} - -func (m *mutator) Mutate(t *Map) (*Map, error) { - return m.fn(t) -} diff --git a/vendor/go.opencensus.io/tag/map_codec.go b/vendor/go.opencensus.io/tag/map_codec.go deleted file mode 100644 index c242e695c8c..00000000000 --- a/vendor/go.opencensus.io/tag/map_codec.go +++ /dev/null @@ -1,239 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package tag - -import ( - "encoding/binary" - "fmt" -) - -// KeyType defines the types of keys allowed. Currently only keyTypeString is -// supported. -type keyType byte - -const ( - keyTypeString keyType = iota - keyTypeInt64 - keyTypeTrue - keyTypeFalse - - tagsVersionID = byte(0) -) - -type encoderGRPC struct { - buf []byte - writeIdx, readIdx int -} - -// writeKeyString writes the fieldID '0' followed by the key string and value -// string. -func (eg *encoderGRPC) writeTagString(k, v string) { - eg.writeByte(byte(keyTypeString)) - eg.writeStringWithVarintLen(k) - eg.writeStringWithVarintLen(v) -} - -func (eg *encoderGRPC) writeTagUint64(k string, i uint64) { - eg.writeByte(byte(keyTypeInt64)) - eg.writeStringWithVarintLen(k) - eg.writeUint64(i) -} - -func (eg *encoderGRPC) writeTagTrue(k string) { - eg.writeByte(byte(keyTypeTrue)) - eg.writeStringWithVarintLen(k) -} - -func (eg *encoderGRPC) writeTagFalse(k string) { - eg.writeByte(byte(keyTypeFalse)) - eg.writeStringWithVarintLen(k) -} - -func (eg *encoderGRPC) writeBytesWithVarintLen(bytes []byte) { - length := len(bytes) - - eg.growIfRequired(binary.MaxVarintLen64 + length) - eg.writeIdx += binary.PutUvarint(eg.buf[eg.writeIdx:], uint64(length)) - copy(eg.buf[eg.writeIdx:], bytes) - eg.writeIdx += length -} - -func (eg *encoderGRPC) writeStringWithVarintLen(s string) { - length := len(s) - - eg.growIfRequired(binary.MaxVarintLen64 + length) - eg.writeIdx += binary.PutUvarint(eg.buf[eg.writeIdx:], uint64(length)) - copy(eg.buf[eg.writeIdx:], s) - eg.writeIdx += length -} - -func (eg *encoderGRPC) writeByte(v byte) { - eg.growIfRequired(1) - eg.buf[eg.writeIdx] = v - eg.writeIdx++ -} - -func (eg *encoderGRPC) writeUint32(i uint32) { - eg.growIfRequired(4) - binary.LittleEndian.PutUint32(eg.buf[eg.writeIdx:], i) - eg.writeIdx += 4 -} - -func (eg *encoderGRPC) writeUint64(i uint64) { - eg.growIfRequired(8) - binary.LittleEndian.PutUint64(eg.buf[eg.writeIdx:], i) - eg.writeIdx += 8 -} - -func (eg *encoderGRPC) readByte() byte { - b := eg.buf[eg.readIdx] - eg.readIdx++ - return b -} - -func (eg *encoderGRPC) readUint32() uint32 { - i := binary.LittleEndian.Uint32(eg.buf[eg.readIdx:]) - eg.readIdx += 4 - return i -} - -func (eg *encoderGRPC) readUint64() uint64 { - i := binary.LittleEndian.Uint64(eg.buf[eg.readIdx:]) - eg.readIdx += 8 - return i -} - -func (eg *encoderGRPC) readBytesWithVarintLen() ([]byte, error) { - if eg.readEnded() { - return nil, fmt.Errorf("unexpected end while readBytesWithVarintLen '%x' starting at idx '%v'", eg.buf, eg.readIdx) - } - length, valueStart := binary.Uvarint(eg.buf[eg.readIdx:]) - if valueStart <= 0 { - return nil, fmt.Errorf("unexpected end while readBytesWithVarintLen '%x' starting at idx '%v'", eg.buf, eg.readIdx) - } - - valueStart += eg.readIdx - valueEnd := valueStart + int(length) - if valueEnd > len(eg.buf) { - return nil, fmt.Errorf("malformed encoding: length:%v, upper:%v, maxLength:%v", length, valueEnd, len(eg.buf)) - } - - eg.readIdx = valueEnd - return eg.buf[valueStart:valueEnd], nil -} - -func (eg *encoderGRPC) readStringWithVarintLen() (string, error) { - bytes, err := eg.readBytesWithVarintLen() - if err != nil { - return "", err - } - return string(bytes), nil -} - -func (eg *encoderGRPC) growIfRequired(expected int) { - if len(eg.buf)-eg.writeIdx < expected { - tmp := make([]byte, 2*(len(eg.buf)+1)+expected) - copy(tmp, eg.buf) - eg.buf = tmp - } -} - -func (eg *encoderGRPC) readEnded() bool { - return eg.readIdx >= len(eg.buf) -} - -func (eg *encoderGRPC) bytes() []byte { - return eg.buf[:eg.writeIdx] -} - -// Encode encodes the tag map into a []byte. It is useful to propagate -// the tag maps on wire in binary format. -func Encode(m *Map) []byte { - if m == nil { - return nil - } - eg := &encoderGRPC{ - buf: make([]byte, len(m.m)), - } - eg.writeByte(tagsVersionID) - for k, v := range m.m { - if v.m.ttl.ttl == valueTTLUnlimitedPropagation { - eg.writeByte(byte(keyTypeString)) - eg.writeStringWithVarintLen(k.name) - eg.writeBytesWithVarintLen([]byte(v.value)) - } - } - return eg.bytes() -} - -// Decode decodes the given []byte into a tag map. -func Decode(bytes []byte) (*Map, error) { - ts := newMap() - err := DecodeEach(bytes, ts.upsert) - if err != nil { - // no partial failures - return nil, err - } - return ts, nil -} - -// DecodeEach decodes the given serialized tag map, calling handler for each -// tag key and value decoded. -func DecodeEach(bytes []byte, fn func(key Key, val string, md metadatas)) error { - eg := &encoderGRPC{ - buf: bytes, - } - if len(eg.buf) == 0 { - return nil - } - - version := eg.readByte() - if version > tagsVersionID { - return fmt.Errorf("cannot decode: unsupported version: %q; supports only up to: %q", version, tagsVersionID) - } - - for !eg.readEnded() { - typ := keyType(eg.readByte()) - - if typ != keyTypeString { - return fmt.Errorf("cannot decode: invalid key type: %q", typ) - } - - k, err := eg.readBytesWithVarintLen() - if err != nil { - return err - } - - v, err := eg.readBytesWithVarintLen() - if err != nil { - return err - } - - key, err := NewKey(string(k)) - if err != nil { - return err - } - val := string(v) - if !checkValue(val) { - return errInvalidValue - } - fn(key, val, createMetadatas(WithTTL(TTLUnlimitedPropagation))) - if err != nil { - return err - } - } - return nil -} diff --git a/vendor/go.opencensus.io/tag/metadata.go b/vendor/go.opencensus.io/tag/metadata.go deleted file mode 100644 index 6571a583ea6..00000000000 --- a/vendor/go.opencensus.io/tag/metadata.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2019, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package tag - -const ( - // valueTTLNoPropagation prevents tag from propagating. - valueTTLNoPropagation = 0 - - // valueTTLUnlimitedPropagation allows tag to propagate without any limits on number of hops. - valueTTLUnlimitedPropagation = -1 -) - -// TTL is metadata that specifies number of hops a tag can propagate. -// Details about TTL metadata is specified at https://github.com/census-instrumentation/opencensus-specs/blob/master/tags/TagMap.md#tagmetadata -type TTL struct { - ttl int -} - -var ( - // TTLUnlimitedPropagation is TTL metadata that allows tag to propagate without any limits on number of hops. - TTLUnlimitedPropagation = TTL{ttl: valueTTLUnlimitedPropagation} - - // TTLNoPropagation is TTL metadata that prevents tag from propagating. - TTLNoPropagation = TTL{ttl: valueTTLNoPropagation} -) - -type metadatas struct { - ttl TTL -} - -// Metadata applies metadatas specified by the function. -type Metadata func(*metadatas) - -// WithTTL applies metadata with provided ttl. -func WithTTL(ttl TTL) Metadata { - return func(m *metadatas) { - m.ttl = ttl - } -} diff --git a/vendor/go.opencensus.io/tag/profile_19.go b/vendor/go.opencensus.io/tag/profile_19.go deleted file mode 100644 index 8fb17226fe3..00000000000 --- a/vendor/go.opencensus.io/tag/profile_19.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build go1.9 -// +build go1.9 - -package tag - -import ( - "context" - "runtime/pprof" -) - -func do(ctx context.Context, f func(ctx context.Context)) { - m := FromContext(ctx) - keyvals := make([]string, 0, 2*len(m.m)) - for k, v := range m.m { - keyvals = append(keyvals, k.Name(), v.value) - } - pprof.Do(ctx, pprof.Labels(keyvals...), f) -} diff --git a/vendor/go.opencensus.io/tag/profile_not19.go b/vendor/go.opencensus.io/tag/profile_not19.go deleted file mode 100644 index e28cf13cde9..00000000000 --- a/vendor/go.opencensus.io/tag/profile_not19.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !go1.9 -// +build !go1.9 - -package tag - -import "context" - -func do(ctx context.Context, f func(ctx context.Context)) { - f(ctx) -} diff --git a/vendor/go.opencensus.io/tag/validate.go b/vendor/go.opencensus.io/tag/validate.go deleted file mode 100644 index 0939fc67483..00000000000 --- a/vendor/go.opencensus.io/tag/validate.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tag - -import "errors" - -const ( - maxKeyLength = 255 - - // valid are restricted to US-ASCII subset (range 0x20 (' ') to 0x7e ('~')). - validKeyValueMin = 32 - validKeyValueMax = 126 -) - -var ( - errInvalidKeyName = errors.New("invalid key name: only ASCII characters accepted; max length must be 255 characters") - errInvalidValue = errors.New("invalid value: only ASCII characters accepted; max length must be 255 characters") -) - -func checkKeyName(name string) bool { - if len(name) == 0 { - return false - } - if len(name) > maxKeyLength { - return false - } - return isASCII(name) -} - -func isASCII(s string) bool { - for _, c := range s { - if (c < validKeyValueMin) || (c > validKeyValueMax) { - return false - } - } - return true -} - -func checkValue(v string) bool { - if len(v) > maxKeyLength { - return false - } - return isASCII(v) -} diff --git a/vendor/go.opencensus.io/trace/propagation/propagation.go b/vendor/go.opencensus.io/trace/propagation/propagation.go deleted file mode 100644 index 1eb190a96a3..00000000000 --- a/vendor/go.opencensus.io/trace/propagation/propagation.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package propagation implements the binary trace context format. -package propagation // import "go.opencensus.io/trace/propagation" - -// TODO: link to external spec document. - -// BinaryFormat format: -// -// Binary value: -// version_id: 1 byte representing the version id. -// -// For version_id = 0: -// -// version_format: -// field_format: -// -// Fields: -// -// TraceId: (field_id = 0, len = 16, default = "0000000000000000") - 16-byte array representing the trace_id. -// SpanId: (field_id = 1, len = 8, default = "00000000") - 8-byte array representing the span_id. -// TraceOptions: (field_id = 2, len = 1, default = "0") - 1-byte array representing the trace_options. -// -// Fields MUST be encoded using the field id order (smaller to higher). -// -// Valid value example: -// -// {0, 0, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 1, 97, -// 98, 99, 100, 101, 102, 103, 104, 2, 1} -// -// version_id = 0; -// trace_id = {64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79} -// span_id = {97, 98, 99, 100, 101, 102, 103, 104}; -// trace_options = {1}; - -import ( - "net/http" - - "go.opencensus.io/trace" -) - -// Binary returns the binary format representation of a SpanContext. -// -// If sc is the zero value, Binary returns nil. -func Binary(sc trace.SpanContext) []byte { - if sc == (trace.SpanContext{}) { - return nil - } - var b [29]byte - copy(b[2:18], sc.TraceID[:]) - b[18] = 1 - copy(b[19:27], sc.SpanID[:]) - b[27] = 2 - b[28] = uint8(sc.TraceOptions) - return b[:] -} - -// FromBinary returns the SpanContext represented by b. -// -// If b has an unsupported version ID or contains no TraceID, FromBinary -// returns with ok==false. -func FromBinary(b []byte) (sc trace.SpanContext, ok bool) { - if len(b) == 0 || b[0] != 0 { - return trace.SpanContext{}, false - } - b = b[1:] - if len(b) >= 17 && b[0] == 0 { - copy(sc.TraceID[:], b[1:17]) - b = b[17:] - } else { - return trace.SpanContext{}, false - } - if len(b) >= 9 && b[0] == 1 { - copy(sc.SpanID[:], b[1:9]) - b = b[9:] - } - if len(b) >= 2 && b[0] == 2 { - sc.TraceOptions = trace.TraceOptions(b[1]) - } - return sc, true -} - -// HTTPFormat implementations propagate span contexts -// in HTTP requests. -// -// SpanContextFromRequest extracts a span context from incoming -// requests. -// -// SpanContextToRequest modifies the given request to include the given -// span context. -type HTTPFormat interface { - SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) - SpanContextToRequest(sc trace.SpanContext, req *http.Request) -} - -// TODO(jbd): Find a more representative but short name for HTTPFormat. diff --git a/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md b/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md new file mode 100644 index 00000000000..773c9b6431f --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md @@ -0,0 +1,27 @@ +# Contributing to go.opentelemetry.io/auto/sdk + +The `go.opentelemetry.io/auto/sdk` module is a purpose built OpenTelemetry SDK. +It is designed to be: + +0. An OpenTelemetry compliant SDK +1. Instrumented by auto-instrumentation (serializable into OTLP JSON) +2. Lightweight +3. User-friendly + +These design choices are listed in the order of their importance. + +The primary design goal of this module is to be an OpenTelemetry SDK. +This means that it needs to implement the Go APIs found in `go.opentelemetry.io/otel`. + +Having met the requirement of SDK compliance, this module needs to provide code that the `go.opentelemetry.io/auto` module can instrument. +The chosen approach to meet this goal is to ensure the telemetry from the SDK is serializable into JSON encoded OTLP. +This ensures then that the serialized form is compatible with other OpenTelemetry systems, and the auto-instrumentation can use these systems to deserialize any telemetry it is sent. + +Outside of these first two goals, the intended use becomes relevant. +This package is intended to be used in the `go.opentelemetry.io/otel` global API as a default when the auto-instrumentation is running. +Because of this, this package needs to not add unnecessary dependencies to that API. +Ideally, it adds none. +It also needs to operate efficiently. + +Finally, this module is designed to be user-friendly to Go development. +It hides complexity in order to provide simpler APIs when the previous goals can all still be met. diff --git a/vendor/go.opentelemetry.io/auto/sdk/LICENSE b/vendor/go.opentelemetry.io/auto/sdk/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md b/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md new file mode 100644 index 00000000000..088d19a6ce7 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md @@ -0,0 +1,15 @@ +# Versioning + +This document describes the versioning policy for this module. +This policy is designed so the following goals can be achieved. + +**Users are provided a codebase of value that is stable and secure.** + +## Policy + +* Versioning of this module will be idiomatic of a Go project using [Go modules](https://github.com/golang/go/wiki/Modules). + * [Semantic import versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) will be used. + * Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html). + * Any `v2` or higher version of this module will be included as a `/vN` at the end of the module path used in `go.mod` files and in the package import path. + +* GitHub releases will be made for all releases. diff --git a/vendor/go.opentelemetry.io/auto/sdk/doc.go b/vendor/go.opentelemetry.io/auto/sdk/doc.go new file mode 100644 index 00000000000..ad73d8cb9d2 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/doc.go @@ -0,0 +1,14 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package sdk provides an auto-instrumentable OpenTelemetry SDK. + +An [go.opentelemetry.io/auto.Instrumentation] can be configured to target the +process running this SDK. In that case, all telemetry the SDK produces will be +processed and handled by that [go.opentelemetry.io/auto.Instrumentation]. + +By default, if there is no [go.opentelemetry.io/auto.Instrumentation] set to +auto-instrument the SDK, the SDK will not generate any telemetry. +*/ +package sdk diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go new file mode 100644 index 00000000000..af6ef171f6a --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go @@ -0,0 +1,58 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +// Attr is a key-value pair. +type Attr struct { + Key string `json:"key,omitempty"` + Value Value `json:"value,omitempty"` +} + +// String returns an Attr for a string value. +func String(key, value string) Attr { + return Attr{key, StringValue(value)} +} + +// Int64 returns an Attr for an int64 value. +func Int64(key string, value int64) Attr { + return Attr{key, Int64Value(value)} +} + +// Int returns an Attr for an int value. +func Int(key string, value int) Attr { + return Int64(key, int64(value)) +} + +// Float64 returns an Attr for a float64 value. +func Float64(key string, value float64) Attr { + return Attr{key, Float64Value(value)} +} + +// Bool returns an Attr for a bool value. +func Bool(key string, value bool) Attr { + return Attr{key, BoolValue(value)} +} + +// Bytes returns an Attr for a []byte value. +// The passed slice must not be changed after it is passed. +func Bytes(key string, value []byte) Attr { + return Attr{key, BytesValue(value)} +} + +// Slice returns an Attr for a []Value value. +// The passed slice must not be changed after it is passed. +func Slice(key string, value ...Value) Attr { + return Attr{key, SliceValue(value...)} +} + +// Map returns an Attr for a map value. +// The passed slice must not be changed after it is passed. +func Map(key string, value ...Attr) Attr { + return Attr{key, MapValue(value...)} +} + +// Equal returns if a is equal to b. +func (a Attr) Equal(b Attr) bool { + return a.Key == b.Key && a.Value.Equal(b.Value) +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go new file mode 100644 index 00000000000..949e2165c05 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go @@ -0,0 +1,8 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package telemetry provides a lightweight representations of OpenTelemetry +telemetry that is compatible with the OTLP JSON protobuf encoding. +*/ +package telemetry diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go new file mode 100644 index 00000000000..e854d7e84e8 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go @@ -0,0 +1,103 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "encoding/hex" + "errors" + "fmt" +) + +const ( + traceIDSize = 16 + spanIDSize = 8 +) + +// TraceID is a custom data type that is used for all trace IDs. +type TraceID [traceIDSize]byte + +// String returns the hex string representation form of a TraceID. +func (tid TraceID) String() string { + return hex.EncodeToString(tid[:]) +} + +// IsEmpty returns false if id contains at least one non-zero byte. +func (tid TraceID) IsEmpty() bool { + return tid == [traceIDSize]byte{} +} + +// MarshalJSON converts the trace ID into a hex string enclosed in quotes. +func (tid TraceID) MarshalJSON() ([]byte, error) { + if tid.IsEmpty() { + return []byte(`""`), nil + } + return marshalJSON(tid[:]) +} + +// UnmarshalJSON inflates the trace ID from hex string, possibly enclosed in +// quotes. +func (tid *TraceID) UnmarshalJSON(data []byte) error { + *tid = [traceIDSize]byte{} + return unmarshalJSON(tid[:], data) +} + +// SpanID is a custom data type that is used for all span IDs. +type SpanID [spanIDSize]byte + +// String returns the hex string representation form of a SpanID. +func (sid SpanID) String() string { + return hex.EncodeToString(sid[:]) +} + +// IsEmpty returns true if the span ID contains at least one non-zero byte. +func (sid SpanID) IsEmpty() bool { + return sid == [spanIDSize]byte{} +} + +// MarshalJSON converts span ID into a hex string enclosed in quotes. +func (sid SpanID) MarshalJSON() ([]byte, error) { + if sid.IsEmpty() { + return []byte(`""`), nil + } + return marshalJSON(sid[:]) +} + +// UnmarshalJSON decodes span ID from hex string, possibly enclosed in quotes. +func (sid *SpanID) UnmarshalJSON(data []byte) error { + *sid = [spanIDSize]byte{} + return unmarshalJSON(sid[:], data) +} + +// marshalJSON converts id into a hex string enclosed in quotes. +func marshalJSON(id []byte) ([]byte, error) { + // Plus 2 quote chars at the start and end. + hexLen := hex.EncodedLen(len(id)) + 2 + + b := make([]byte, hexLen) + hex.Encode(b[1:hexLen-1], id) + b[0], b[hexLen-1] = '"', '"' + + return b, nil +} + +// unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes. +func unmarshalJSON(dst []byte, src []byte) error { + if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' { + src = src[1 : l-1] + } + nLen := len(src) + if nLen == 0 { + return nil + } + + if len(dst) != hex.DecodedLen(nLen) { + return errors.New("invalid length for ID") + } + + _, err := hex.Decode(dst, src) + if err != nil { + return fmt.Errorf("cannot unmarshal ID from string '%s': %w", string(src), err) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go new file mode 100644 index 00000000000..29e629d6674 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "encoding/json" + "strconv" +) + +// protoInt64 represents the protobuf encoding of integers which can be either +// strings or integers. +type protoInt64 int64 + +// Int64 returns the protoInt64 as an int64. +func (i *protoInt64) Int64() int64 { return int64(*i) } + +// UnmarshalJSON decodes both strings and integers. +func (i *protoInt64) UnmarshalJSON(data []byte) error { + if data[0] == '"' { + var str string + if err := json.Unmarshal(data, &str); err != nil { + return err + } + parsedInt, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return err + } + *i = protoInt64(parsedInt) + } else { + var parsedInt int64 + if err := json.Unmarshal(data, &parsedInt); err != nil { + return err + } + *i = protoInt64(parsedInt) + } + return nil +} + +// protoUint64 represents the protobuf encoding of integers which can be either +// strings or integers. +type protoUint64 uint64 + +// Int64 returns the protoUint64 as a uint64. +func (i *protoUint64) Uint64() uint64 { return uint64(*i) } + +// UnmarshalJSON decodes both strings and integers. +func (i *protoUint64) UnmarshalJSON(data []byte) error { + if data[0] == '"' { + var str string + if err := json.Unmarshal(data, &str); err != nil { + return err + } + parsedUint, err := strconv.ParseUint(str, 10, 64) + if err != nil { + return err + } + *i = protoUint64(parsedUint) + } else { + var parsedUint uint64 + if err := json.Unmarshal(data, &parsedUint); err != nil { + return err + } + *i = protoUint64(parsedUint) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go new file mode 100644 index 00000000000..cecad8bae3c --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Resource information. +type Resource struct { + // Attrs are the set of attributes that describe the resource. Attribute + // keys MUST be unique (it is not allowed to have more than one attribute + // with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // DroppedAttrs is the number of dropped attributes. If the value + // is 0, then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r. +func (r *Resource) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Resource type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Resource field: %#v", keyIface) + } + + switch key { + case "attributes": + err = decoder.Decode(&r.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&r.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go new file mode 100644 index 00000000000..b6f2e28d408 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Scope is the identifying values of the instrumentation scope. +type Scope struct { + Name string `json:"name,omitempty"` + Version string `json:"version,omitempty"` + Attrs []Attr `json:"attributes,omitempty"` + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r. +func (s *Scope) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Scope type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Scope field: %#v", keyIface) + } + + switch key { + case "name": + err = decoder.Decode(&s.Name) + case "version": + err = decoder.Decode(&s.Version) + case "attributes": + err = decoder.Decode(&s.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&s.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go new file mode 100644 index 00000000000..a13a6b733da --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go @@ -0,0 +1,456 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "bytes" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "time" +) + +// A Span represents a single operation performed by a single component of the +// system. +type Span struct { + // A unique identifier for a trace. All spans from the same trace share + // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR + // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + TraceID TraceID `json:"traceId,omitempty"` + // A unique identifier for a span within a trace, assigned when the span + // is created. The ID is an 8-byte array. An ID with all zeroes OR of length + // other than 8 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + SpanID SpanID `json:"spanId,omitempty"` + // trace_state conveys information about request position in multiple distributed tracing graphs. + // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header + // See also https://github.com/w3c/distributed-tracing for more details about this field. + TraceState string `json:"traceState,omitempty"` + // The `span_id` of this span's parent span. If this is a root span, then this + // field must be empty. The ID is an 8-byte array. + ParentSpanID SpanID `json:"parentSpanId,omitempty"` + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether a span's parent + // is remote. The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // When creating span messages, if the message is logically forwarded from another source + // with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD + // be copied as-is. If creating from a source that does not have an equivalent flags field + // (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST + // be set to zero. + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // + // [Optional]. + Flags uint32 `json:"flags,omitempty"` + // A description of the span's operation. + // + // For example, the name can be a qualified method name or a file name + // and a line number where the operation is called. A best practice is to use + // the same display name at the same call point in an application. + // This makes it easier to correlate spans in different traces. + // + // This field is semantically required to be set to non-empty string. + // Empty value is equivalent to an unknown span name. + // + // This field is required. + Name string `json:"name"` + // Distinguishes between spans generated in a particular context. For example, + // two spans with the same name may be distinguished using `CLIENT` (caller) + // and `SERVER` (callee) to identify queueing latency associated with the span. + Kind SpanKind `json:"kind,omitempty"` + // start_time_unix_nano is the start time of the span. On the client side, this is the time + // kept by the local machine where the span execution starts. On the server side, this + // is the time when the server's application handler starts running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + StartTime time.Time `json:"startTimeUnixNano,omitempty"` + // end_time_unix_nano is the end time of the span. On the client side, this is the time + // kept by the local machine where the span execution ends. On the server side, this + // is the time when the server application handler stops running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + EndTime time.Time `json:"endTimeUnixNano,omitempty"` + // attributes is a collection of key/value pairs. Note, global attributes + // like server name can be set using the resource API. Examples of attributes: + // + // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + // "/http/server_latency": 300 + // "example.com/myattribute": true + // "example.com/score": 10.239 + // + // The OpenTelemetry API specification further restricts the allowed value types: + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of attributes that were discarded. Attributes + // can be discarded because their keys are too long or because there are too many + // attributes. If this value is 0, then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` + // events is a collection of Event items. + Events []*SpanEvent `json:"events,omitempty"` + // dropped_events_count is the number of dropped events. If the value is 0, then no + // events were dropped. + DroppedEvents uint32 `json:"droppedEventsCount,omitempty"` + // links is a collection of Links, which are references from this span to a span + // in the same or different trace. + Links []*SpanLink `json:"links,omitempty"` + // dropped_links_count is the number of dropped links after the maximum size was + // enforced. If this value is 0, then no links were dropped. + DroppedLinks uint32 `json:"droppedLinksCount,omitempty"` + // An optional final status for this span. Semantically when Status isn't set, it means + // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0). + Status *Status `json:"status,omitempty"` +} + +// MarshalJSON encodes s into OTLP formatted JSON. +func (s Span) MarshalJSON() ([]byte, error) { + startT := s.StartTime.UnixNano() + if s.StartTime.IsZero() || startT < 0 { + startT = 0 + } + + endT := s.EndTime.UnixNano() + if s.EndTime.IsZero() || endT < 0 { + endT = 0 + } + + // Override non-empty default SpanID marshal and omitempty. + var parentSpanId string + if !s.ParentSpanID.IsEmpty() { + b := make([]byte, hex.EncodedLen(spanIDSize)) + hex.Encode(b, s.ParentSpanID[:]) + parentSpanId = string(b) + } + + type Alias Span + return json.Marshal(struct { + Alias + ParentSpanID string `json:"parentSpanId,omitempty"` + StartTime uint64 `json:"startTimeUnixNano,omitempty"` + EndTime uint64 `json:"endTimeUnixNano,omitempty"` + }{ + Alias: Alias(s), + ParentSpanID: parentSpanId, + StartTime: uint64(startT), + EndTime: uint64(endT), + }) +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into s. +func (s *Span) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Span type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Span field: %#v", keyIface) + } + + switch key { + case "traceId", "trace_id": + err = decoder.Decode(&s.TraceID) + case "spanId", "span_id": + err = decoder.Decode(&s.SpanID) + case "traceState", "trace_state": + err = decoder.Decode(&s.TraceState) + case "parentSpanId", "parent_span_id": + err = decoder.Decode(&s.ParentSpanID) + case "flags": + err = decoder.Decode(&s.Flags) + case "name": + err = decoder.Decode(&s.Name) + case "kind": + err = decoder.Decode(&s.Kind) + case "startTimeUnixNano", "start_time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + s.StartTime = time.Unix(0, int64(val.Uint64())) + case "endTimeUnixNano", "end_time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + s.EndTime = time.Unix(0, int64(val.Uint64())) + case "attributes": + err = decoder.Decode(&s.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&s.DroppedAttrs) + case "events": + err = decoder.Decode(&s.Events) + case "droppedEventsCount", "dropped_events_count": + err = decoder.Decode(&s.DroppedEvents) + case "links": + err = decoder.Decode(&s.Links) + case "droppedLinksCount", "dropped_links_count": + err = decoder.Decode(&s.DroppedLinks) + case "status": + err = decoder.Decode(&s.Status) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// SpanFlags represents constants used to interpret the +// Span.flags field, which is protobuf 'fixed32' type and is to +// be used as bit-fields. Each non-zero value defined in this enum is +// a bit-mask. To extract the bit-field, for example, use an +// expression like: +// +// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK) +// +// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. +// +// Note that Span flags were introduced in version 1.1 of the +// OpenTelemetry protocol. Older Span producers do not set this +// field, consequently consumers should not rely on the absence of a +// particular flag bit to indicate the presence of a particular feature. +type SpanFlags int32 + +const ( + // Bits 0-7 are used for trace flags. + SpanFlagsTraceFlagsMask SpanFlags = 255 + // Bits 8 and 9 are used to indicate that the parent span or link span is remote. + // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. + // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote. + SpanFlagsContextHasIsRemoteMask SpanFlags = 256 + // SpanFlagsContextHasIsRemoteMask indicates the Span is remote. + SpanFlagsContextIsRemoteMask SpanFlags = 512 +) + +// SpanKind is the type of span. Can be used to specify additional relationships between spans +// in addition to a parent/child relationship. +type SpanKind int32 + +const ( + // Indicates that the span represents an internal operation within an application, + // as opposed to an operation happening at the boundaries. Default value. + SpanKindInternal SpanKind = 1 + // Indicates that the span covers server-side handling of an RPC or other + // remote network request. + SpanKindServer SpanKind = 2 + // Indicates that the span describes a request to some remote service. + SpanKindClient SpanKind = 3 + // Indicates that the span describes a producer sending a message to a broker. + // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship + // between producer and consumer spans. A PRODUCER span ends when the message was accepted + // by the broker while the logical processing of the message might span a much longer time. + SpanKindProducer SpanKind = 4 + // Indicates that the span describes consumer receiving a message from a broker. + // Like the PRODUCER kind, there is often no direct critical path latency relationship + // between producer and consumer spans. + SpanKindConsumer SpanKind = 5 +) + +// Event is a time-stamped annotation of the span, consisting of user-supplied +// text description and key-value pairs. +type SpanEvent struct { + // time_unix_nano is the time the event occurred. + Time time.Time `json:"timeUnixNano,omitempty"` + // name of the event. + // This field is semantically required to be set to non-empty string. + Name string `json:"name,omitempty"` + // attributes is a collection of attribute key/value pairs on the event. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// MarshalJSON encodes e into OTLP formatted JSON. +func (e SpanEvent) MarshalJSON() ([]byte, error) { + t := e.Time.UnixNano() + if e.Time.IsZero() || t < 0 { + t = 0 + } + + type Alias SpanEvent + return json.Marshal(struct { + Alias + Time uint64 `json:"timeUnixNano,omitempty"` + }{ + Alias: Alias(e), + Time: uint64(t), + }) +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into se. +func (se *SpanEvent) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid SpanEvent type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid SpanEvent field: %#v", keyIface) + } + + switch key { + case "timeUnixNano", "time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + se.Time = time.Unix(0, int64(val.Uint64())) + case "name": + err = decoder.Decode(&se.Name) + case "attributes": + err = decoder.Decode(&se.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&se.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// A pointer from the current span to another span in the same trace or in a +// different trace. For example, this can be used in batching operations, +// where a single batch handler processes multiple requests from different +// traces or when the handler receives a request from a different project. +type SpanLink struct { + // A unique identifier of a trace that this linked span is part of. The ID is a + // 16-byte array. + TraceID TraceID `json:"traceId,omitempty"` + // A unique identifier for the linked span. The ID is an 8-byte array. + SpanID SpanID `json:"spanId,omitempty"` + // The trace_state associated with the link. + TraceState string `json:"traceState,omitempty"` + // attributes is a collection of attribute key/value pairs on the link. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether the link is remote. + // The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero. + // + // [Optional]. + Flags uint32 `json:"flags,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into sl. +func (sl *SpanLink) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid SpanLink type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid SpanLink field: %#v", keyIface) + } + + switch key { + case "traceId", "trace_id": + err = decoder.Decode(&sl.TraceID) + case "spanId", "span_id": + err = decoder.Decode(&sl.SpanID) + case "traceState", "trace_state": + err = decoder.Decode(&sl.TraceState) + case "attributes": + err = decoder.Decode(&sl.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&sl.DroppedAttrs) + case "flags": + err = decoder.Decode(&sl.Flags) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go new file mode 100644 index 00000000000..1217776ead1 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go @@ -0,0 +1,40 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +// For the semantics of status codes see +// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status +type StatusCode int32 + +const ( + // The default status. + StatusCodeUnset StatusCode = 0 + // The Span has been validated by an Application developer or Operator to + // have completed successfully. + StatusCodeOK StatusCode = 1 + // The Span contains an error. + StatusCodeError StatusCode = 2 +) + +var statusCodeStrings = []string{ + "Unset", + "OK", + "Error", +} + +func (s StatusCode) String() string { + if s >= 0 && int(s) < len(statusCodeStrings) { + return statusCodeStrings[s] + } + return "" +} + +// The Status type defines a logical error model that is suitable for different +// programming environments, including REST APIs and RPC APIs. +type Status struct { + // A developer-facing human readable error message. + Message string `json:"message,omitempty"` + // The status code. + Code StatusCode `json:"code,omitempty"` +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go new file mode 100644 index 00000000000..69a348f0f06 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go @@ -0,0 +1,189 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Traces represents the traces data that can be stored in a persistent storage, +// OR can be embedded by other protocols that transfer OTLP traces data but do +// not implement the OTLP protocol. +// +// The main difference between this message and collector protocol is that +// in this message there will not be any "control" or "metadata" specific to +// OTLP protocol. +// +// When new fields are added into this message, the OTLP request MUST be updated +// as well. +type Traces struct { + // An array of ResourceSpans. + // For data coming from a single resource this array will typically contain + // one element. Intermediary nodes that receive data from multiple origins + // typically batch the data before forwarding further and in that case this + // array will contain multiple elements. + ResourceSpans []*ResourceSpans `json:"resourceSpans,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into td. +func (td *Traces) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid TracesData type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid TracesData field: %#v", keyIface) + } + + switch key { + case "resourceSpans", "resource_spans": + err = decoder.Decode(&td.ResourceSpans) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// A collection of ScopeSpans from a Resource. +type ResourceSpans struct { + // The resource for the spans in this message. + // If this field is not set then no resource info is known. + Resource Resource `json:"resource"` + // A list of ScopeSpans that originate from a resource. + ScopeSpans []*ScopeSpans `json:"scopeSpans,omitempty"` + // This schema_url applies to the data in the "resource" field. It does not apply + // to the data in the "scope_spans" field which have their own schema_url field. + SchemaURL string `json:"schemaUrl,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into rs. +func (rs *ResourceSpans) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid ResourceSpans type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid ResourceSpans field: %#v", keyIface) + } + + switch key { + case "resource": + err = decoder.Decode(&rs.Resource) + case "scopeSpans", "scope_spans": + err = decoder.Decode(&rs.ScopeSpans) + case "schemaUrl", "schema_url": + err = decoder.Decode(&rs.SchemaURL) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// A collection of Spans produced by an InstrumentationScope. +type ScopeSpans struct { + // The instrumentation scope information for the spans in this message. + // Semantically when InstrumentationScope isn't set, it is equivalent with + // an empty instrumentation scope name (unknown). + Scope *Scope `json:"scope"` + // A list of Spans that originate from an instrumentation scope. + Spans []*Span `json:"spans,omitempty"` + // The Schema URL, if known. This is the identifier of the Schema that the span data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to all spans and span events in the "spans" field. + SchemaURL string `json:"schemaUrl,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into ss. +func (ss *ScopeSpans) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid ScopeSpans type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid ScopeSpans field: %#v", keyIface) + } + + switch key { + case "scope": + err = decoder.Decode(&ss.Scope) + case "spans": + err = decoder.Decode(&ss.Spans) + case "schemaUrl", "schema_url": + err = decoder.Decode(&ss.SchemaURL) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go new file mode 100644 index 00000000000..0dd01b063a3 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go @@ -0,0 +1,452 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:generate stringer -type=ValueKind -trimprefix=ValueKind + +package telemetry + +import ( + "bytes" + "cmp" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "slices" + "strconv" + "unsafe" +) + +// A Value represents a structured value. +// A zero value is valid and represents an empty value. +type Value struct { + // Ensure forward compatibility by explicitly making this not comparable. + noCmp [0]func() //nolint: unused // This is indeed used. + + // num holds the value for Int64, Float64, and Bool. It holds the length + // for String, Bytes, Slice, Map. + num uint64 + // any holds either the KindBool, KindInt64, KindFloat64, stringptr, + // bytesptr, sliceptr, or mapptr. If KindBool, KindInt64, or KindFloat64 + // then the value of Value is in num as described above. Otherwise, it + // contains the value wrapped in the appropriate type. + any any +} + +type ( + // sliceptr represents a value in Value.any for KindString Values. + stringptr *byte + // bytesptr represents a value in Value.any for KindBytes Values. + bytesptr *byte + // sliceptr represents a value in Value.any for KindSlice Values. + sliceptr *Value + // mapptr represents a value in Value.any for KindMap Values. + mapptr *Attr +) + +// ValueKind is the kind of a [Value]. +type ValueKind int + +// ValueKind values. +const ( + ValueKindEmpty ValueKind = iota + ValueKindBool + ValueKindFloat64 + ValueKindInt64 + ValueKindString + ValueKindBytes + ValueKindSlice + ValueKindMap +) + +var valueKindStrings = []string{ + "Empty", + "Bool", + "Float64", + "Int64", + "String", + "Bytes", + "Slice", + "Map", +} + +func (k ValueKind) String() string { + if k >= 0 && int(k) < len(valueKindStrings) { + return valueKindStrings[k] + } + return "" +} + +// StringValue returns a new [Value] for a string. +func StringValue(v string) Value { + return Value{ + num: uint64(len(v)), + any: stringptr(unsafe.StringData(v)), + } +} + +// IntValue returns a [Value] for an int. +func IntValue(v int) Value { return Int64Value(int64(v)) } + +// Int64Value returns a [Value] for an int64. +func Int64Value(v int64) Value { + return Value{num: uint64(v), any: ValueKindInt64} +} + +// Float64Value returns a [Value] for a float64. +func Float64Value(v float64) Value { + return Value{num: math.Float64bits(v), any: ValueKindFloat64} +} + +// BoolValue returns a [Value] for a bool. +func BoolValue(v bool) Value { //nolint:revive // Not a control flag. + var n uint64 + if v { + n = 1 + } + return Value{num: n, any: ValueKindBool} +} + +// BytesValue returns a [Value] for a byte slice. The passed slice must not be +// changed after it is passed. +func BytesValue(v []byte) Value { + return Value{ + num: uint64(len(v)), + any: bytesptr(unsafe.SliceData(v)), + } +} + +// SliceValue returns a [Value] for a slice of [Value]. The passed slice must +// not be changed after it is passed. +func SliceValue(vs ...Value) Value { + return Value{ + num: uint64(len(vs)), + any: sliceptr(unsafe.SliceData(vs)), + } +} + +// MapValue returns a new [Value] for a slice of key-value pairs. The passed +// slice must not be changed after it is passed. +func MapValue(kvs ...Attr) Value { + return Value{ + num: uint64(len(kvs)), + any: mapptr(unsafe.SliceData(kvs)), + } +} + +// AsString returns the value held by v as a string. +func (v Value) AsString() string { + if sp, ok := v.any.(stringptr); ok { + return unsafe.String(sp, v.num) + } + // TODO: error handle + return "" +} + +// asString returns the value held by v as a string. It will panic if the Value +// is not KindString. +func (v Value) asString() string { + return unsafe.String(v.any.(stringptr), v.num) +} + +// AsInt64 returns the value held by v as an int64. +func (v Value) AsInt64() int64 { + if v.Kind() != ValueKindInt64 { + // TODO: error handle + return 0 + } + return v.asInt64() +} + +// asInt64 returns the value held by v as an int64. If v is not of KindInt64, +// this will return garbage. +func (v Value) asInt64() int64 { + // Assumes v.num was a valid int64 (overflow not checked). + return int64(v.num) // nolint: gosec +} + +// AsBool returns the value held by v as a bool. +func (v Value) AsBool() bool { + if v.Kind() != ValueKindBool { + // TODO: error handle + return false + } + return v.asBool() +} + +// asBool returns the value held by v as a bool. If v is not of KindBool, this +// will return garbage. +func (v Value) asBool() bool { return v.num == 1 } + +// AsFloat64 returns the value held by v as a float64. +func (v Value) AsFloat64() float64 { + if v.Kind() != ValueKindFloat64 { + // TODO: error handle + return 0 + } + return v.asFloat64() +} + +// asFloat64 returns the value held by v as a float64. If v is not of +// KindFloat64, this will return garbage. +func (v Value) asFloat64() float64 { return math.Float64frombits(v.num) } + +// AsBytes returns the value held by v as a []byte. +func (v Value) AsBytes() []byte { + if sp, ok := v.any.(bytesptr); ok { + return unsafe.Slice((*byte)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asBytes returns the value held by v as a []byte. It will panic if the Value +// is not KindBytes. +func (v Value) asBytes() []byte { + return unsafe.Slice((*byte)(v.any.(bytesptr)), v.num) +} + +// AsSlice returns the value held by v as a []Value. +func (v Value) AsSlice() []Value { + if sp, ok := v.any.(sliceptr); ok { + return unsafe.Slice((*Value)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asSlice returns the value held by v as a []Value. It will panic if the Value +// is not KindSlice. +func (v Value) asSlice() []Value { + return unsafe.Slice((*Value)(v.any.(sliceptr)), v.num) +} + +// AsMap returns the value held by v as a []Attr. +func (v Value) AsMap() []Attr { + if sp, ok := v.any.(mapptr); ok { + return unsafe.Slice((*Attr)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asMap returns the value held by v as a []Attr. It will panic if the +// Value is not KindMap. +func (v Value) asMap() []Attr { + return unsafe.Slice((*Attr)(v.any.(mapptr)), v.num) +} + +// Kind returns the Kind of v. +func (v Value) Kind() ValueKind { + switch x := v.any.(type) { + case ValueKind: + return x + case stringptr: + return ValueKindString + case bytesptr: + return ValueKindBytes + case sliceptr: + return ValueKindSlice + case mapptr: + return ValueKindMap + default: + return ValueKindEmpty + } +} + +// Empty returns if v does not hold any value. +func (v Value) Empty() bool { return v.Kind() == ValueKindEmpty } + +// Equal returns if v is equal to w. +func (v Value) Equal(w Value) bool { + k1 := v.Kind() + k2 := w.Kind() + if k1 != k2 { + return false + } + switch k1 { + case ValueKindInt64, ValueKindBool: + return v.num == w.num + case ValueKindString: + return v.asString() == w.asString() + case ValueKindFloat64: + return v.asFloat64() == w.asFloat64() + case ValueKindSlice: + return slices.EqualFunc(v.asSlice(), w.asSlice(), Value.Equal) + case ValueKindMap: + sv := sortMap(v.asMap()) + sw := sortMap(w.asMap()) + return slices.EqualFunc(sv, sw, Attr.Equal) + case ValueKindBytes: + return bytes.Equal(v.asBytes(), w.asBytes()) + case ValueKindEmpty: + return true + default: + // TODO: error handle + return false + } +} + +func sortMap(m []Attr) []Attr { + sm := make([]Attr, len(m)) + copy(sm, m) + slices.SortFunc(sm, func(a, b Attr) int { + return cmp.Compare(a.Key, b.Key) + }) + + return sm +} + +// String returns Value's value as a string, formatted like [fmt.Sprint]. +// +// The returned string is meant for debugging; +// the string representation is not stable. +func (v Value) String() string { + switch v.Kind() { + case ValueKindString: + return v.asString() + case ValueKindInt64: + // Assumes v.num was a valid int64 (overflow not checked). + return strconv.FormatInt(int64(v.num), 10) // nolint: gosec + case ValueKindFloat64: + return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64) + case ValueKindBool: + return strconv.FormatBool(v.asBool()) + case ValueKindBytes: + return fmt.Sprint(v.asBytes()) + case ValueKindMap: + return fmt.Sprint(v.asMap()) + case ValueKindSlice: + return fmt.Sprint(v.asSlice()) + case ValueKindEmpty: + return "" + default: + // Try to handle this as gracefully as possible. + // + // Don't panic here. The goal here is to have developers find this + // first if a slog.Kind is is not handled. It is + // preferable to have user's open issue asking why their attributes + // have a "unhandled: " prefix than say that their code is panicking. + return fmt.Sprintf("", v.Kind()) + } +} + +// MarshalJSON encodes v into OTLP formatted JSON. +func (v *Value) MarshalJSON() ([]byte, error) { + switch v.Kind() { + case ValueKindString: + return json.Marshal(struct { + Value string `json:"stringValue"` + }{v.asString()}) + case ValueKindInt64: + return json.Marshal(struct { + Value string `json:"intValue"` + }{strconv.FormatInt(int64(v.num), 10)}) + case ValueKindFloat64: + return json.Marshal(struct { + Value float64 `json:"doubleValue"` + }{v.asFloat64()}) + case ValueKindBool: + return json.Marshal(struct { + Value bool `json:"boolValue"` + }{v.asBool()}) + case ValueKindBytes: + return json.Marshal(struct { + Value []byte `json:"bytesValue"` + }{v.asBytes()}) + case ValueKindMap: + return json.Marshal(struct { + Value struct { + Values []Attr `json:"values"` + } `json:"kvlistValue"` + }{struct { + Values []Attr `json:"values"` + }{v.asMap()}}) + case ValueKindSlice: + return json.Marshal(struct { + Value struct { + Values []Value `json:"values"` + } `json:"arrayValue"` + }{struct { + Values []Value `json:"values"` + }{v.asSlice()}}) + case ValueKindEmpty: + return nil, nil + default: + return nil, fmt.Errorf("unknown Value kind: %s", v.Kind().String()) + } +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into v. +func (v *Value) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Value type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Value key: %#v", keyIface) + } + + switch key { + case "stringValue", "string_value": + var val string + err = decoder.Decode(&val) + *v = StringValue(val) + case "boolValue", "bool_value": + var val bool + err = decoder.Decode(&val) + *v = BoolValue(val) + case "intValue", "int_value": + var val protoInt64 + err = decoder.Decode(&val) + *v = Int64Value(val.Int64()) + case "doubleValue", "double_value": + var val float64 + err = decoder.Decode(&val) + *v = Float64Value(val) + case "bytesValue", "bytes_value": + var val64 string + if err := decoder.Decode(&val64); err != nil { + return err + } + var val []byte + val, err = base64.StdEncoding.DecodeString(val64) + *v = BytesValue(val) + case "arrayValue", "array_value": + var val struct{ Values []Value } + err = decoder.Decode(&val) + *v = SliceValue(val.Values...) + case "kvlistValue", "kvlist_value": + var val struct{ Values []Attr } + err = decoder.Decode(&val) + *v = MapValue(val.Values...) + default: + // Skip unknown. + continue + } + // Use first valid. Ignore the rest. + return err + } + + // Only unknown fields. Return nil without unmarshaling any value. + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/limit.go b/vendor/go.opentelemetry.io/auto/sdk/limit.go new file mode 100644 index 00000000000..86babf1a885 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/limit.go @@ -0,0 +1,94 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk + +import ( + "log/slog" + "os" + "strconv" +) + +// maxSpan are the span limits resolved during startup. +var maxSpan = newSpanLimits() + +type spanLimits struct { + // Attrs is the number of allowed attributes for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT key if it exists. Otherwise, the + // environment variable value for OTEL_ATTRIBUTE_COUNT_LIMIT, or 128 if + // that is not set, is used. + Attrs int + // AttrValueLen is the maximum attribute value length allowed for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT key if it exists. Otherwise, the + // environment variable value for OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT, or -1 + // if that is not set, is used. + AttrValueLen int + // Events is the number of allowed events for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_EVENT_COUNT_LIMIT key, or 128 is used if that is not set. + Events int + // EventAttrs is the number of allowed attributes for a span event. + // + // The is resolved from the environment variable value for the + // OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT key, or 128 is used if that is not set. + EventAttrs int + // Links is the number of allowed Links for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_LINK_COUNT_LIMIT, or 128 is used if that is not set. + Links int + // LinkAttrs is the number of allowed attributes for a span link. + // + // This is resolved from the environment variable value for the + // OTEL_LINK_ATTRIBUTE_COUNT_LIMIT, or 128 is used if that is not set. + LinkAttrs int +} + +func newSpanLimits() spanLimits { + return spanLimits{ + Attrs: firstEnv( + 128, + "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT", + "OTEL_ATTRIBUTE_COUNT_LIMIT", + ), + AttrValueLen: firstEnv( + -1, // Unlimited. + "OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT", + "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT", + ), + Events: firstEnv(128, "OTEL_SPAN_EVENT_COUNT_LIMIT"), + EventAttrs: firstEnv(128, "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT"), + Links: firstEnv(128, "OTEL_SPAN_LINK_COUNT_LIMIT"), + LinkAttrs: firstEnv(128, "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT"), + } +} + +// firstEnv returns the parsed integer value of the first matching environment +// variable from keys. The defaultVal is returned if the value is not an +// integer or no match is found. +func firstEnv(defaultVal int, keys ...string) int { + for _, key := range keys { + strV := os.Getenv(key) + if strV == "" { + continue + } + + v, err := strconv.Atoi(strV) + if err == nil { + return v + } + slog.Warn( + "invalid limit environment variable", + "error", err, + "key", key, + "value", strV, + ) + } + + return defaultVal +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/span.go b/vendor/go.opentelemetry.io/auto/sdk/span.go new file mode 100644 index 00000000000..6ebea12a9e9 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/span.go @@ -0,0 +1,432 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk + +import ( + "encoding/json" + "fmt" + "reflect" + "runtime" + "strings" + "sync" + "sync/atomic" + "time" + "unicode/utf8" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" + + "go.opentelemetry.io/auto/sdk/internal/telemetry" +) + +type span struct { + noop.Span + + spanContext trace.SpanContext + sampled atomic.Bool + + mu sync.Mutex + traces *telemetry.Traces + span *telemetry.Span +} + +func (s *span) SpanContext() trace.SpanContext { + if s == nil { + return trace.SpanContext{} + } + // s.spanContext is immutable, do not acquire lock s.mu. + return s.spanContext +} + +func (s *span) IsRecording() bool { + if s == nil { + return false + } + + return s.sampled.Load() +} + +func (s *span) SetStatus(c codes.Code, msg string) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + if s.span.Status == nil { + s.span.Status = new(telemetry.Status) + } + + s.span.Status.Message = msg + + switch c { + case codes.Unset: + s.span.Status.Code = telemetry.StatusCodeUnset + case codes.Error: + s.span.Status.Code = telemetry.StatusCodeError + case codes.Ok: + s.span.Status.Code = telemetry.StatusCodeOK + } +} + +func (s *span) SetAttributes(attrs ...attribute.KeyValue) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + limit := maxSpan.Attrs + if limit == 0 { + // No attributes allowed. + s.span.DroppedAttrs += uint32(len(attrs)) + return + } + + m := make(map[string]int) + for i, a := range s.span.Attrs { + m[a.Key] = i + } + + for _, a := range attrs { + val := convAttrValue(a.Value) + if val.Empty() { + s.span.DroppedAttrs++ + continue + } + + if idx, ok := m[string(a.Key)]; ok { + s.span.Attrs[idx] = telemetry.Attr{ + Key: string(a.Key), + Value: val, + } + } else if limit < 0 || len(s.span.Attrs) < limit { + s.span.Attrs = append(s.span.Attrs, telemetry.Attr{ + Key: string(a.Key), + Value: val, + }) + m[string(a.Key)] = len(s.span.Attrs) - 1 + } else { + s.span.DroppedAttrs++ + } + } +} + +// convCappedAttrs converts up to limit attrs into a []telemetry.Attr. The +// number of dropped attributes is also returned. +func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, uint32) { + if limit == 0 { + return nil, uint32(len(attrs)) + } + + if limit < 0 { + // Unlimited. + return convAttrs(attrs), 0 + } + + limit = min(len(attrs), limit) + return convAttrs(attrs[:limit]), uint32(len(attrs) - limit) +} + +func convAttrs(attrs []attribute.KeyValue) []telemetry.Attr { + if len(attrs) == 0 { + // Avoid allocations if not necessary. + return nil + } + + out := make([]telemetry.Attr, 0, len(attrs)) + for _, attr := range attrs { + key := string(attr.Key) + val := convAttrValue(attr.Value) + if val.Empty() { + continue + } + out = append(out, telemetry.Attr{Key: key, Value: val}) + } + return out +} + +func convAttrValue(value attribute.Value) telemetry.Value { + switch value.Type() { + case attribute.BOOL: + return telemetry.BoolValue(value.AsBool()) + case attribute.INT64: + return telemetry.Int64Value(value.AsInt64()) + case attribute.FLOAT64: + return telemetry.Float64Value(value.AsFloat64()) + case attribute.STRING: + v := truncate(maxSpan.AttrValueLen, value.AsString()) + return telemetry.StringValue(v) + case attribute.BOOLSLICE: + slice := value.AsBoolSlice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.BoolValue(v)) + } + return telemetry.SliceValue(out...) + case attribute.INT64SLICE: + slice := value.AsInt64Slice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.Int64Value(v)) + } + return telemetry.SliceValue(out...) + case attribute.FLOAT64SLICE: + slice := value.AsFloat64Slice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.Float64Value(v)) + } + return telemetry.SliceValue(out...) + case attribute.STRINGSLICE: + slice := value.AsStringSlice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + v = truncate(maxSpan.AttrValueLen, v) + out = append(out, telemetry.StringValue(v)) + } + return telemetry.SliceValue(out...) + } + return telemetry.Value{} +} + +// truncate returns a truncated version of s such that it contains less than +// the limit number of characters. Truncation is applied by returning the limit +// number of valid characters contained in s. +// +// If limit is negative, it returns the original string. +// +// UTF-8 is supported. When truncating, all invalid characters are dropped +// before applying truncation. +// +// If s already contains less than the limit number of bytes, it is returned +// unchanged. No invalid characters are removed. +func truncate(limit int, s string) string { + // This prioritize performance in the following order based on the most + // common expected use-cases. + // + // - Short values less than the default limit (128). + // - Strings with valid encodings that exceed the limit. + // - No limit. + // - Strings with invalid encodings that exceed the limit. + if limit < 0 || len(s) <= limit { + return s + } + + // Optimistically, assume all valid UTF-8. + var b strings.Builder + count := 0 + for i, c := range s { + if c != utf8.RuneError { + count++ + if count > limit { + return s[:i] + } + continue + } + + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // Invalid encoding. + b.Grow(len(s) - 1) + _, _ = b.WriteString(s[:i]) + s = s[i:] + break + } + } + + // Fast-path, no invalid input. + if b.Cap() == 0 { + return s + } + + // Truncate while validating UTF-8. + for i := 0; i < len(s) && count < limit; { + c := s[i] + if c < utf8.RuneSelf { + // Optimization for single byte runes (common case). + _ = b.WriteByte(c) + i++ + count++ + continue + } + + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // We checked for all 1-byte runes above, this is a RuneError. + i++ + continue + } + + _, _ = b.WriteString(s[i : i+size]) + i += size + count++ + } + + return b.String() +} + +func (s *span) End(opts ...trace.SpanEndOption) { + if s == nil || !s.sampled.Swap(false) { + return + } + + // s.end exists so the lock (s.mu) is not held while s.ended is called. + s.ended(s.end(opts)) +} + +func (s *span) end(opts []trace.SpanEndOption) []byte { + s.mu.Lock() + defer s.mu.Unlock() + + cfg := trace.NewSpanEndConfig(opts...) + if t := cfg.Timestamp(); !t.IsZero() { + s.span.EndTime = cfg.Timestamp() + } else { + s.span.EndTime = time.Now() + } + + b, _ := json.Marshal(s.traces) // TODO: do not ignore this error. + return b +} + +// Expected to be implemented in eBPF. +// +//go:noinline +func (*span) ended(buf []byte) { ended(buf) } + +// ended is used for testing. +var ended = func([]byte) {} + +func (s *span) RecordError(err error, opts ...trace.EventOption) { + if s == nil || err == nil || !s.sampled.Load() { + return + } + + cfg := trace.NewEventConfig(opts...) + + attrs := cfg.Attributes() + attrs = append(attrs, + semconv.ExceptionType(typeStr(err)), + semconv.ExceptionMessage(err.Error()), + ) + if cfg.StackTrace() { + buf := make([]byte, 2048) + n := runtime.Stack(buf, false) + attrs = append(attrs, semconv.ExceptionStacktrace(string(buf[0:n]))) + } + + s.mu.Lock() + defer s.mu.Unlock() + + s.addEvent(semconv.ExceptionEventName, cfg.Timestamp(), attrs) +} + +func typeStr(i any) string { + t := reflect.TypeOf(i) + if t.PkgPath() == "" && t.Name() == "" { + // Likely a builtin type. + return t.String() + } + return fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) +} + +func (s *span) AddEvent(name string, opts ...trace.EventOption) { + if s == nil || !s.sampled.Load() { + return + } + + cfg := trace.NewEventConfig(opts...) + + s.mu.Lock() + defer s.mu.Unlock() + + s.addEvent(name, cfg.Timestamp(), cfg.Attributes()) +} + +// addEvent adds an event with name and attrs at tStamp to the span. The span +// lock (s.mu) needs to be held by the caller. +func (s *span) addEvent(name string, tStamp time.Time, attrs []attribute.KeyValue) { + limit := maxSpan.Events + + if limit == 0 { + s.span.DroppedEvents++ + return + } + + if limit > 0 && len(s.span.Events) == limit { + // Drop head while avoiding allocation of more capacity. + copy(s.span.Events[:limit-1], s.span.Events[1:]) + s.span.Events = s.span.Events[:limit-1] + s.span.DroppedEvents++ + } + + e := &telemetry.SpanEvent{Time: tStamp, Name: name} + e.Attrs, e.DroppedAttrs = convCappedAttrs(maxSpan.EventAttrs, attrs) + + s.span.Events = append(s.span.Events, e) +} + +func (s *span) AddLink(link trace.Link) { + if s == nil || !s.sampled.Load() { + return + } + + l := maxSpan.Links + + s.mu.Lock() + defer s.mu.Unlock() + + if l == 0 { + s.span.DroppedLinks++ + return + } + + if l > 0 && len(s.span.Links) == l { + // Drop head while avoiding allocation of more capacity. + copy(s.span.Links[:l-1], s.span.Links[1:]) + s.span.Links = s.span.Links[:l-1] + s.span.DroppedLinks++ + } + + s.span.Links = append(s.span.Links, convLink(link)) +} + +func convLinks(links []trace.Link) []*telemetry.SpanLink { + out := make([]*telemetry.SpanLink, 0, len(links)) + for _, link := range links { + out = append(out, convLink(link)) + } + return out +} + +func convLink(link trace.Link) *telemetry.SpanLink { + l := &telemetry.SpanLink{ + TraceID: telemetry.TraceID(link.SpanContext.TraceID()), + SpanID: telemetry.SpanID(link.SpanContext.SpanID()), + TraceState: link.SpanContext.TraceState().String(), + Flags: uint32(link.SpanContext.TraceFlags()), + } + l.Attrs, l.DroppedAttrs = convCappedAttrs(maxSpan.LinkAttrs, link.Attributes) + + return l +} + +func (s *span) SetName(name string) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + s.span.Name = name +} + +func (*span) TracerProvider() trace.TracerProvider { return TracerProvider() } diff --git a/vendor/go.opentelemetry.io/auto/sdk/tracer.go b/vendor/go.opentelemetry.io/auto/sdk/tracer.go new file mode 100644 index 00000000000..cbcfabde3b1 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/tracer.go @@ -0,0 +1,124 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" + + "go.opentelemetry.io/auto/sdk/internal/telemetry" +) + +type tracer struct { + noop.Tracer + + name, schemaURL, version string +} + +var _ trace.Tracer = tracer{} + +func (t tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { + var psc trace.SpanContext + sampled := true + span := new(span) + + // Ask eBPF for sampling decision and span context info. + t.start(ctx, span, &psc, &sampled, &span.spanContext) + + span.sampled.Store(sampled) + + ctx = trace.ContextWithSpan(ctx, span) + + if sampled { + // Only build traces if sampled. + cfg := trace.NewSpanStartConfig(opts...) + span.traces, span.span = t.traces(name, cfg, span.spanContext, psc) + } + + return ctx, span +} + +// Expected to be implemented in eBPF. +// +//go:noinline +func (t *tracer) start( + ctx context.Context, + spanPtr *span, + psc *trace.SpanContext, + sampled *bool, + sc *trace.SpanContext, +) { + start(ctx, spanPtr, psc, sampled, sc) +} + +// start is used for testing. +var start = func(context.Context, *span, *trace.SpanContext, *bool, *trace.SpanContext) {} + +func (t tracer) traces(name string, cfg trace.SpanConfig, sc, psc trace.SpanContext) (*telemetry.Traces, *telemetry.Span) { + span := &telemetry.Span{ + TraceID: telemetry.TraceID(sc.TraceID()), + SpanID: telemetry.SpanID(sc.SpanID()), + Flags: uint32(sc.TraceFlags()), + TraceState: sc.TraceState().String(), + ParentSpanID: telemetry.SpanID(psc.SpanID()), + Name: name, + Kind: spanKind(cfg.SpanKind()), + } + + span.Attrs, span.DroppedAttrs = convCappedAttrs(maxSpan.Attrs, cfg.Attributes()) + + links := cfg.Links() + if limit := maxSpan.Links; limit == 0 { + span.DroppedLinks = uint32(len(links)) + } else { + if limit > 0 { + n := max(len(links)-limit, 0) + span.DroppedLinks = uint32(n) + links = links[n:] + } + span.Links = convLinks(links) + } + + if t := cfg.Timestamp(); !t.IsZero() { + span.StartTime = cfg.Timestamp() + } else { + span.StartTime = time.Now() + } + + return &telemetry.Traces{ + ResourceSpans: []*telemetry.ResourceSpans{ + { + ScopeSpans: []*telemetry.ScopeSpans{ + { + Scope: &telemetry.Scope{ + Name: t.name, + Version: t.version, + }, + Spans: []*telemetry.Span{span}, + SchemaURL: t.schemaURL, + }, + }, + }, + }, + }, span +} + +func spanKind(kind trace.SpanKind) telemetry.SpanKind { + switch kind { + case trace.SpanKindInternal: + return telemetry.SpanKindInternal + case trace.SpanKindServer: + return telemetry.SpanKindServer + case trace.SpanKindClient: + return telemetry.SpanKindClient + case trace.SpanKindProducer: + return telemetry.SpanKindProducer + case trace.SpanKindConsumer: + return telemetry.SpanKindConsumer + } + return telemetry.SpanKind(0) // undefined. +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go b/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go new file mode 100644 index 00000000000..dbc477a59ad --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go @@ -0,0 +1,33 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk + +import ( + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" +) + +// TracerProvider returns an auto-instrumentable [trace.TracerProvider]. +// +// If an [go.opentelemetry.io/auto.Instrumentation] is configured to instrument +// the process using the returned TracerProvider, all of the telemetry it +// produces will be processed and handled by that Instrumentation. By default, +// if no Instrumentation instruments the TracerProvider it will not generate +// any trace telemetry. +func TracerProvider() trace.TracerProvider { return tracerProviderInstance } + +var tracerProviderInstance = new(tracerProvider) + +type tracerProvider struct{ noop.TracerProvider } + +var _ trace.TracerProvider = tracerProvider{} + +func (p tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { + cfg := trace.NewTracerConfig(opts...) + return tracer{ + name: name, + version: cfg.InstrumentationVersion(), + schemaURL: cfg.SchemaURL(), + } +} diff --git a/vendor/go.opentelemetry.io/collector/client/LICENSE b/vendor/go.opentelemetry.io/collector/client/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/client/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/client/Makefile b/vendor/go.opentelemetry.io/collector/client/Makefile new file mode 100644 index 00000000000..39734bfaebb --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/client/Makefile @@ -0,0 +1 @@ +include ../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/component/component.go b/vendor/go.opentelemetry.io/collector/component/component.go index 794fc9235a9..0a0c160fc59 100644 --- a/vendor/go.opentelemetry.io/collector/component/component.go +++ b/vendor/go.opentelemetry.io/collector/component/component.go @@ -1,22 +1,17 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -// Package component outlines the abstraction of components within the OpenTelemetry Collector. It provides details on the component +// Package component outlines the abstraction of components within the OpenTelemetry Collector. It provides details on the component // lifecycle as well as defining the interface that components must fulfill. package component // import "go.opentelemetry.io/collector/component" import ( "context" - "errors" + "fmt" + "strings" ) -var ( - // ErrDataTypeIsNotSupported can be returned by receiver, exporter or processor factory funcs that create the - // Component if the particular telemetry data type is not supported by the receiver, exporter or processor. - ErrDataTypeIsNotSupported = errors.New("telemetry type is not supported") -) - -// Component is either a receiver, exporter, processor, or an extension. +// Component is either a receiver, exporter, processor, connector, or an extension. // // A component's lifecycle has the following phases: // @@ -113,7 +108,7 @@ func (k Kind) String() string { // StabilityLevel represents the stability level of the component created by the factory. // The stability level is used to determine if the component should be used in production // or not. For more details see: -// https://github.com/open-telemetry/opentelemetry-collector#stability-levels +// https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#stability-levels type StabilityLevel int const ( @@ -126,6 +121,29 @@ const ( StabilityLevelStable ) +func (sl *StabilityLevel) UnmarshalText(in []byte) error { + str := strings.ToLower(string(in)) + switch str { + case "undefined": + *sl = StabilityLevelUndefined + case "unmaintained": + *sl = StabilityLevelUnmaintained + case "deprecated": + *sl = StabilityLevelDeprecated + case "development": + *sl = StabilityLevelDevelopment + case "alpha": + *sl = StabilityLevelAlpha + case "beta": + *sl = StabilityLevelBeta + case "stable": + *sl = StabilityLevelStable + default: + return fmt.Errorf("unsupported stability level: %q", string(in)) + } + return nil +} + func (sl StabilityLevel) String() string { switch sl { case StabilityLevelUndefined: @@ -149,7 +167,7 @@ func (sl StabilityLevel) String() string { func (sl StabilityLevel) LogMessage() string { switch sl { case StabilityLevelUnmaintained: - return "Unmaintained component. Actively looking for contributors. Component will become deprecated after 6 months of remaining unmaintained." + return "Unmaintained component. Actively looking for contributors. Component will become deprecated after 3 months of remaining unmaintained." case StabilityLevelDeprecated: return "Deprecated component. Will be removed in future releases." case StabilityLevelDevelopment: @@ -160,21 +178,19 @@ func (sl StabilityLevel) LogMessage() string { return "Beta component. May change in the future." case StabilityLevelStable: return "Stable component." + default: + return "Stability level of component is undefined" } - return "Stability level of component is undefined" } // Factory is implemented by all Component factories. -// -// This interface cannot be directly implemented. Implementations must -// use the factory helpers for the appropriate component type. type Factory interface { // Type gets the type of the component created by this factory. Type() Type // CreateDefaultConfig creates the default configuration for the Component. // This method can be called multiple times depending on the pipeline - // configuration and should not cause side-effects that prevent the creation + // configuration and should not cause side effects that prevent the creation // of multiple instances of the Component. // The object returned by this method needs to pass the checks implemented by // 'componenttest.CheckConfigStruct'. It is recommended to have these checks in the @@ -189,10 +205,3 @@ type CreateDefaultConfigFunc func() Config func (f CreateDefaultConfigFunc) CreateDefaultConfig() Config { return f() } - -// InstanceID uniquely identifies a component instance -type InstanceID struct { - ID ID - Kind Kind - PipelineIDs map[ID]struct{} -} diff --git a/vendor/go.opentelemetry.io/collector/component/componentstatus/LICENSE b/vendor/go.opentelemetry.io/collector/component/componentstatus/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/component/componentstatus/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/config/internal/Makefile b/vendor/go.opentelemetry.io/collector/component/componentstatus/Makefile similarity index 100% rename from vendor/go.opentelemetry.io/collector/config/internal/Makefile rename to vendor/go.opentelemetry.io/collector/component/componentstatus/Makefile diff --git a/vendor/go.opentelemetry.io/collector/component/componentstatus/instance.go b/vendor/go.opentelemetry.io/collector/component/componentstatus/instance.go new file mode 100644 index 00000000000..a654b7f7a87 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/component/componentstatus/instance.go @@ -0,0 +1,91 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package componentstatus // import "go.opentelemetry.io/collector/component/componentstatus" + +import ( + "slices" + "sort" + "strings" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/pipeline" +) + +// pipelineDelim is the delimiter for internal representation of pipeline +// component IDs. +const pipelineDelim = byte(0x20) + +// InstanceID uniquely identifies a component instance +// +// TODO: consider moving this struct to a new package/module like `extension/statuswatcher` +// https://github.com/open-telemetry/opentelemetry-collector/issues/10764 +type InstanceID struct { + componentID component.ID + kind component.Kind + pipelineIDs string // IDs encoded as a string so InstanceID is Comparable. +} + +// NewInstanceID returns an ID that uniquely identifies a component. +func NewInstanceID(componentID component.ID, kind component.Kind, pipelineIDs ...pipeline.ID) *InstanceID { + instanceID := &InstanceID{ + componentID: componentID, + kind: kind, + } + instanceID.addPipelines(pipelineIDs) + return instanceID +} + +// ComponentID returns the ComponentID associated with this instance. +func (id *InstanceID) ComponentID() component.ID { + return id.componentID +} + +// Kind returns the component Kind associated with this instance. +func (id *InstanceID) Kind() component.Kind { + return id.kind +} + +// AllPipelineIDs calls f for each pipeline this instance is associated with. If +// f returns false it will stop iteration. +func (id *InstanceID) AllPipelineIDs(f func(pipeline.ID) bool) { + var bs []byte + for _, b := range []byte(id.pipelineIDs) { + if b != pipelineDelim { + bs = append(bs, b) + continue + } + pipelineID := pipeline.ID{} + err := pipelineID.UnmarshalText(bs) + bs = bs[:0] + if err != nil { + continue + } + if !f(pipelineID) { + break + } + } +} + +// WithPipelines returns a new InstanceID updated to include the given +// pipelineIDs. +func (id *InstanceID) WithPipelines(pipelineIDs ...pipeline.ID) *InstanceID { + instanceID := &InstanceID{ + componentID: id.componentID, + kind: id.kind, + pipelineIDs: id.pipelineIDs, + } + instanceID.addPipelines(pipelineIDs) + return instanceID +} + +func (id *InstanceID) addPipelines(pipelineIDs []pipeline.ID) { + delim := string(pipelineDelim) + strIDs := strings.Split(id.pipelineIDs, delim) + for _, pID := range pipelineIDs { + strIDs = append(strIDs, pID.String()) + } + sort.Strings(strIDs) + strIDs = slices.Compact(strIDs) + id.pipelineIDs = strings.Join(strIDs, delim) + delim +} diff --git a/vendor/go.opentelemetry.io/collector/component/componentstatus/status.go b/vendor/go.opentelemetry.io/collector/component/componentstatus/status.go new file mode 100644 index 00000000000..c55fff3ffa3 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/component/componentstatus/status.go @@ -0,0 +1,160 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package componentstatus is an experimental module that defines how components should +// report health statues, how collector hosts should facilitate component status reporting, +// and how extensions should watch for new component statuses. +// +// This package is currently under development and is exempt from the Collector SIG's +// breaking change policy. +package componentstatus // import "go.opentelemetry.io/collector/component/componentstatus" + +import ( + "time" + + "go.opentelemetry.io/collector/component" +) + +// Reporter is an extra interface for `component.Host` implementations. +// A Reporter defines how to report a `componentstatus.Event`. +type Reporter interface { + // Report allows a component to report runtime changes in status. The service + // will automatically report status for a component during startup and shutdown. Components can + // use this method to report status after start and before shutdown. For more details about + // component status reporting see: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-status.md + Report(*Event) +} + +// Watcher is an extra interface for Extension hosted by the OpenTelemetry +// Collector that is to be implemented by extensions interested in changes to component +// status. +// +// TODO: consider moving this interface to a new package/module like `extension/statuswatcher` +// https://github.com/open-telemetry/opentelemetry-collector/issues/10764 +type Watcher interface { + // ComponentStatusChanged notifies about a change in the source component status. + // Extensions that implement this interface must be ready that the ComponentStatusChanged + // may be called before, after or concurrently with calls to Component.Start() and Component.Shutdown(). + // The function may be called concurrently with itself. + ComponentStatusChanged(source *InstanceID, event *Event) +} + +type Status int32 + +// Enumeration of possible component statuses +const ( + // StatusNone indicates absence of component status. + StatusNone Status = iota + // StatusStarting indicates the component is starting. + StatusStarting + // StatusOK indicates the component is running without issues. + StatusOK + // StatusRecoverableError indicates that the component has experienced a transient error and may recover. + StatusRecoverableError + // StatusPermanentError indicates that the component has detected a condition at runtime that will need human intervention to fix. The collector will continue to run in a degraded mode. + StatusPermanentError + // StatusFatalError indicates that the collector has experienced a fatal runtime error and will shut down. + StatusFatalError + // StatusStopping indicates that the component is in the process of shutting down. + StatusStopping + // StatusStopped indicates that the component has completed shutdown. + StatusStopped +) + +// String returns a string representation of a Status +func (s Status) String() string { + switch s { + case StatusStarting: + return "StatusStarting" + case StatusOK: + return "StatusOK" + case StatusRecoverableError: + return "StatusRecoverableError" + case StatusPermanentError: + return "StatusPermanentError" + case StatusFatalError: + return "StatusFatalError" + case StatusStopping: + return "StatusStopping" + case StatusStopped: + return "StatusStopped" + } + return "StatusNone" +} + +// Event contains a status and timestamp, and can contain an error +type Event struct { + status Status + err error + // TODO: consider if a timestamp is necessary in the default Event struct or is needed only for the healthcheckv2 extension + // https://github.com/open-telemetry/opentelemetry-collector/issues/10763 + timestamp time.Time +} + +// Status returns the Status (enum) associated with the Event +func (ev *Event) Status() Status { + return ev.status +} + +// Err returns the error associated with the Event. +func (ev *Event) Err() error { + return ev.err +} + +// Timestamp returns the timestamp associated with the Event +func (ev *Event) Timestamp() time.Time { + return ev.timestamp +} + +// NewEvent creates and returns a Event with the specified status and sets the timestamp +// time.Now(). To set an error on the event for an error status use one of the dedicated +// constructors (e.g. NewRecoverableErrorEvent, NewPermanentErrorEvent, NewFatalErrorEvent) +func NewEvent(status Status) *Event { + return &Event{ + status: status, + timestamp: time.Now(), + } +} + +// NewRecoverableErrorEvent wraps a transient error +// passed as argument as a Event with a status StatusRecoverableError +// and a timestamp set to time.Now(). +func NewRecoverableErrorEvent(err error) *Event { + ev := NewEvent(StatusRecoverableError) + ev.err = err + return ev +} + +// NewPermanentErrorEvent wraps an error requiring human intervention to fix +// passed as argument as a Event with a status StatusPermanentError +// and a timestamp set to time.Now(). +func NewPermanentErrorEvent(err error) *Event { + ev := NewEvent(StatusPermanentError) + ev.err = err + return ev +} + +// NewFatalErrorEvent wraps the fatal runtime error passed as argument as a Event +// with a status StatusFatalError and a timestamp set to time.Now(). +func NewFatalErrorEvent(err error) *Event { + ev := NewEvent(StatusFatalError) + ev.err = err + return ev +} + +// StatusIsError returns true for error statuses (e.g. StatusRecoverableError, +// StatusPermanentError, or StatusFatalError) +func StatusIsError(status Status) bool { + return status == StatusRecoverableError || + status == StatusPermanentError || + status == StatusFatalError +} + +// ReportStatus is a helper function that handles checking if the component.Host has implemented Reporter. +// If it has, the Event is reported. Otherwise, nothing happens. +func ReportStatus(host component.Host, e *Event) { + statusReporter, ok := host.(Reporter) + if ok { + statusReporter.Report(e) + } +} diff --git a/vendor/go.opentelemetry.io/collector/component/componenttest/LICENSE b/vendor/go.opentelemetry.io/collector/component/componenttest/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/component/componenttest/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/extension/experimental/storage/Makefile b/vendor/go.opentelemetry.io/collector/component/componenttest/Makefile similarity index 100% rename from vendor/go.opentelemetry.io/collector/extension/experimental/storage/Makefile rename to vendor/go.opentelemetry.io/collector/component/componenttest/Makefile diff --git a/vendor/go.opentelemetry.io/collector/component/componenttest/configtest.go b/vendor/go.opentelemetry.io/collector/component/componenttest/configtest.go index 3750dfd0295..8d1f1d157da 100644 --- a/vendor/go.opentelemetry.io/collector/component/componenttest/configtest.go +++ b/vendor/go.opentelemetry.io/collector/component/componenttest/configtest.go @@ -65,10 +65,8 @@ func validateConfigDataType(t reflect.Type) error { // checkStructFieldTags inspects the tags of a struct field. func checkStructFieldTags(f reflect.StructField) error { - tagValue := f.Tag.Get("mapstructure") if tagValue == "" { - // Ignore special types. switch f.Type.Kind() { case reflect.Interface, reflect.Chan, reflect.Func, reflect.Uintptr, reflect.UnsafePointer: diff --git a/vendor/go.opentelemetry.io/collector/component/componenttest/nop_host.go b/vendor/go.opentelemetry.io/collector/component/componenttest/nop_host.go index ea85ccc8bbe..3c9206456a3 100644 --- a/vendor/go.opentelemetry.io/collector/component/componenttest/nop_host.go +++ b/vendor/go.opentelemetry.io/collector/component/componenttest/nop_host.go @@ -22,7 +22,3 @@ func (nh *nopHost) GetFactory(component.Kind, component.Type) component.Factory func (nh *nopHost) GetExtensions() map[component.ID]component.Component { return nil } - -func (nh *nopHost) GetExporters() map[component.DataType]map[component.ID]component.Component { - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/component/componenttest/nop_telemetry.go b/vendor/go.opentelemetry.io/collector/component/componenttest/nop_telemetry.go index 171e9daa47e..0324f65c980 100644 --- a/vendor/go.opentelemetry.io/collector/component/componenttest/nop_telemetry.go +++ b/vendor/go.opentelemetry.io/collector/component/componenttest/nop_telemetry.go @@ -21,7 +21,5 @@ func NewNopTelemetrySettings() component.TelemetrySettings { MeterProvider: noopmetric.NewMeterProvider(), MetricsLevel: configtelemetry.LevelNone, Resource: pcommon.NewResource(), - ReportStatus: func(*component.StatusEvent) { - }, } } diff --git a/vendor/go.opentelemetry.io/collector/component/componenttest/obsreporttest.go b/vendor/go.opentelemetry.io/collector/component/componenttest/obsreporttest.go index f342879a533..a0eefcda1af 100644 --- a/vendor/go.opentelemetry.io/collector/component/componenttest/obsreporttest.go +++ b/vendor/go.opentelemetry.io/collector/component/componenttest/obsreporttest.go @@ -5,18 +5,18 @@ package componenttest // import "go.opentelemetry.io/collector/component/compone import ( "context" + "errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp" - otelprom "go.opentelemetry.io/otel/exporters/prometheus" + "go.opentelemetry.io/otel/attribute" sdkmetric "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/resource" sdktrace "go.opentelemetry.io/otel/sdk/trace" "go.opentelemetry.io/otel/sdk/trace/tracetest" - "go.uber.org/multierr" + "go.uber.org/zap" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configtelemetry" + "go.opentelemetry.io/collector/pdata/pcommon" ) const ( @@ -30,102 +30,73 @@ const ( scraperTag = "scraper" transportTag = "transport" exporterTag = "exporter" - processorTag = "processor" ) type TestTelemetry struct { - ts component.TelemetrySettings id component.ID + ts component.TelemetrySettings SpanRecorder *tracetest.SpanRecorder - - prometheusChecker *prometheusChecker - meterProvider *sdkmetric.MeterProvider + reader *sdkmetric.ManualReader } // CheckExporterTraces checks that for the current exported values for trace exporter metrics match given values. -// When this function is called it is required to also call SetupTelemetry as first thing. func (tts *TestTelemetry) CheckExporterTraces(sentSpans, sendFailedSpans int64) error { - return tts.prometheusChecker.checkExporterTraces(tts.id, sentSpans, sendFailedSpans) + return checkExporterTraces(tts.reader, tts.id, sentSpans, sendFailedSpans) } // CheckExporterMetrics checks that for the current exported values for metrics exporter metrics match given values. -// When this function is called it is required to also call SetupTelemetry as first thing. func (tts *TestTelemetry) CheckExporterMetrics(sentMetricsPoints, sendFailedMetricsPoints int64) error { - return tts.prometheusChecker.checkExporterMetrics(tts.id, sentMetricsPoints, sendFailedMetricsPoints) + return checkExporterMetrics(tts.reader, tts.id, sentMetricsPoints, sendFailedMetricsPoints) } func (tts *TestTelemetry) CheckExporterEnqueueFailedMetrics(enqueueFailed int64) error { - return tts.prometheusChecker.checkExporterEnqueueFailed(tts.id, "metric_points", enqueueFailed) + return checkExporterEnqueueFailed(tts.reader, tts.id, "metric_points", enqueueFailed) } func (tts *TestTelemetry) CheckExporterEnqueueFailedTraces(enqueueFailed int64) error { - return tts.prometheusChecker.checkExporterEnqueueFailed(tts.id, "spans", enqueueFailed) + return checkExporterEnqueueFailed(tts.reader, tts.id, "spans", enqueueFailed) } func (tts *TestTelemetry) CheckExporterEnqueueFailedLogs(enqueueFailed int64) error { - return tts.prometheusChecker.checkExporterEnqueueFailed(tts.id, "log_records", enqueueFailed) + return checkExporterEnqueueFailed(tts.reader, tts.id, "log_records", enqueueFailed) } // CheckExporterLogs checks that for the current exported values for logs exporter metrics match given values. -// When this function is called it is required to also call SetupTelemetry as first thing. func (tts *TestTelemetry) CheckExporterLogs(sentLogRecords, sendFailedLogRecords int64) error { - return tts.prometheusChecker.checkExporterLogs(tts.id, sentLogRecords, sendFailedLogRecords) -} - -func (tts *TestTelemetry) CheckExporterMetricGauge(metric string, val int64) error { - return tts.prometheusChecker.checkExporterMetricGauge(tts.id, metric, val) -} - -// CheckProcessorTraces checks that for the current exported values for trace exporter metrics match given values. -// When this function is called it is required to also call SetupTelemetry as first thing. -func (tts *TestTelemetry) CheckProcessorTraces(acceptedSpans, refusedSpans, droppedSpans int64) error { - return tts.prometheusChecker.checkProcessorTraces(tts.id, acceptedSpans, refusedSpans, droppedSpans) + return checkExporterLogs(tts.reader, tts.id, sentLogRecords, sendFailedLogRecords) } -// CheckProcessorMetrics checks that for the current exported values for metrics exporter metrics match given values. -// When this function is called it is required to also call SetupTelemetry as first thing. -func (tts *TestTelemetry) CheckProcessorMetrics(acceptedMetricPoints, refusedMetricPoints, droppedMetricPoints int64) error { - return tts.prometheusChecker.checkProcessorMetrics(tts.id, acceptedMetricPoints, refusedMetricPoints, droppedMetricPoints) -} - -// CheckProcessorLogs checks that for the current exported values for logs exporter metrics match given values. -// When this function is called it is required to also call SetupTelemetry as first thing. -func (tts *TestTelemetry) CheckProcessorLogs(acceptedLogRecords, refusedLogRecords, droppedLogRecords int64) error { - return tts.prometheusChecker.checkProcessorLogs(tts.id, acceptedLogRecords, refusedLogRecords, droppedLogRecords) +func (tts *TestTelemetry) CheckExporterMetricGauge(metric string, val int64, extraAttrs ...attribute.KeyValue) error { + attrs := attributesForExporterMetrics(tts.id, extraAttrs...) + return checkIntGauge(tts.reader, metric, val, attrs) } // CheckReceiverTraces checks that for the current exported values for trace receiver metrics match given values. -// When this function is called it is required to also call SetupTelemetry as first thing. func (tts *TestTelemetry) CheckReceiverTraces(protocol string, acceptedSpans, droppedSpans int64) error { - return tts.prometheusChecker.checkReceiverTraces(tts.id, protocol, acceptedSpans, droppedSpans) + return checkReceiverTraces(tts.reader, tts.id, protocol, acceptedSpans, droppedSpans) } // CheckReceiverLogs checks that for the current exported values for logs receiver metrics match given values. -// When this function is called it is required to also call SetupTelemetry as first thing. func (tts *TestTelemetry) CheckReceiverLogs(protocol string, acceptedLogRecords, droppedLogRecords int64) error { - return tts.prometheusChecker.checkReceiverLogs(tts.id, protocol, acceptedLogRecords, droppedLogRecords) + return checkReceiverLogs(tts.reader, tts.id, protocol, acceptedLogRecords, droppedLogRecords) } // CheckReceiverMetrics checks that for the current exported values for metrics receiver metrics match given values. -// When this function is called it is required to also call SetupTelemetry as first thing. func (tts *TestTelemetry) CheckReceiverMetrics(protocol string, acceptedMetricPoints, droppedMetricPoints int64) error { - return tts.prometheusChecker.checkReceiverMetrics(tts.id, protocol, acceptedMetricPoints, droppedMetricPoints) + return checkReceiverMetrics(tts.reader, tts.id, protocol, acceptedMetricPoints, droppedMetricPoints) } +// Deprecated: [v0.118.0] use metadatatest.AssertMetrics instead. // CheckScraperMetrics checks that for the current exported values for metrics scraper metrics match given values. -// When this function is called it is required to also call SetupTelemetry as first thing. func (tts *TestTelemetry) CheckScraperMetrics(receiver component.ID, scraper component.ID, scrapedMetricPoints, erroredMetricPoints int64) error { - return tts.prometheusChecker.checkScraperMetrics(receiver, scraper, scrapedMetricPoints, erroredMetricPoints) + return checkScraperMetrics(tts.reader, receiver, scraper, scrapedMetricPoints, erroredMetricPoints) } // Shutdown unregisters any views and shuts down the SpanRecorder func (tts *TestTelemetry) Shutdown(ctx context.Context) error { - var errs error - errs = multierr.Append(errs, tts.SpanRecorder.Shutdown(ctx)) - if tts.meterProvider != nil { - errs = multierr.Append(errs, tts.meterProvider.Shutdown(ctx)) - } - return errs + return errors.Join( + tts.ts.TracerProvider.(*sdktrace.TracerProvider).Shutdown(ctx), + tts.ts.MeterProvider.(*sdkmetric.MeterProvider).Shutdown(ctx)) } // TelemetrySettings returns the TestTelemetry's TelemetrySettings @@ -133,36 +104,27 @@ func (tts *TestTelemetry) TelemetrySettings() component.TelemetrySettings { return tts.ts } -// SetupTelemetry does setup the testing environment to check the metrics recorded by receivers, producers or exporters. -// The caller must pass the ID of the component that intends to test, so the CreateSettings and Check methods will use. -// The caller should defer a call to Shutdown the returned TestTelemetry. +// SetupTelemetry sets up the testing environment to check the metrics recorded by receivers, producers, or exporters. +// The caller must pass the ID of the component being tested. The ID will be used by the CreateSettings and Check methods. +// The caller must defer a call to `Shutdown` on the returned TestTelemetry. func SetupTelemetry(id component.ID) (TestTelemetry, error) { - sr := new(tracetest.SpanRecorder) - tp := sdktrace.NewTracerProvider(sdktrace.WithSpanProcessor(sr)) - settings := TestTelemetry{ - ts: NewNopTelemetrySettings(), id: id, - SpanRecorder: sr, - } - settings.ts.TracerProvider = tp - settings.ts.MetricsLevel = configtelemetry.LevelNormal - - promRegOtel := prometheus.NewRegistry() - - exp, err := otelprom.New(otelprom.WithRegisterer(promRegOtel), otelprom.WithoutUnits(), otelprom.WithoutScopeInfo(), otelprom.WithoutCounterSuffixes()) - if err != nil { - return settings, err + reader: sdkmetric.NewManualReader(), + SpanRecorder: new(tracetest.SpanRecorder), } - settings.meterProvider = sdkmetric.NewMeterProvider( + mp := sdkmetric.NewMeterProvider( sdkmetric.WithResource(resource.Empty()), - sdkmetric.WithReader(exp), + sdkmetric.WithReader(settings.reader), ) - settings.ts.MeterProvider = settings.meterProvider - settings.prometheusChecker = &prometheusChecker{ - otelHandler: promhttp.HandlerFor(promRegOtel, promhttp.HandlerOpts{}), + settings.ts = component.TelemetrySettings{ + Logger: zap.NewNop(), + TracerProvider: sdktrace.NewTracerProvider(sdktrace.WithSpanProcessor(settings.SpanRecorder)), + MeterProvider: mp, + MetricsLevel: configtelemetry.LevelDetailed, + Resource: pcommon.NewResource(), } return settings, nil diff --git a/vendor/go.opentelemetry.io/collector/component/componenttest/otelchecker.go b/vendor/go.opentelemetry.io/collector/component/componenttest/otelchecker.go new file mode 100644 index 00000000000..c3196f7d66e --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/component/componenttest/otelchecker.go @@ -0,0 +1,173 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package componenttest // import "go.opentelemetry.io/collector/component/componenttest" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/otel/attribute" + sdkmetric "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + "go.uber.org/multierr" + + "go.opentelemetry.io/collector/component" +) + +func checkScraperMetrics(reader *sdkmetric.ManualReader, receiver component.ID, scraper component.ID, scrapedMetricPoints, erroredMetricPoints int64) error { + scraperAttrs := attributesForScraperMetrics(receiver, scraper) + return multierr.Combine( + checkIntSum(reader, "otelcol_scraper_scraped_metric_points", scrapedMetricPoints, scraperAttrs), + checkIntSum(reader, "otelcol_scraper_errored_metric_points", erroredMetricPoints, scraperAttrs)) +} + +func checkReceiverTraces(reader *sdkmetric.ManualReader, receiver component.ID, protocol string, accepted, dropped int64) error { + return checkReceiver(reader, receiver, "spans", protocol, accepted, dropped) +} + +func checkReceiverLogs(reader *sdkmetric.ManualReader, receiver component.ID, protocol string, accepted, dropped int64) error { + return checkReceiver(reader, receiver, "log_records", protocol, accepted, dropped) +} + +func checkReceiverMetrics(reader *sdkmetric.ManualReader, receiver component.ID, protocol string, accepted, dropped int64) error { + return checkReceiver(reader, receiver, "metric_points", protocol, accepted, dropped) +} + +func checkReceiver(reader *sdkmetric.ManualReader, receiver component.ID, datatype, protocol string, acceptedMetricPoints, droppedMetricPoints int64) error { + receiverAttrs := attributesForReceiverMetrics(receiver, protocol) + return multierr.Combine( + checkIntSum(reader, "otelcol_receiver_accepted_"+datatype, acceptedMetricPoints, receiverAttrs), + checkIntSum(reader, "otelcol_receiver_refused_"+datatype, droppedMetricPoints, receiverAttrs)) +} + +func checkExporterTraces(reader *sdkmetric.ManualReader, exporter component.ID, sent, sendFailed int64) error { + return checkExporter(reader, exporter, "spans", sent, sendFailed) +} + +func checkExporterLogs(reader *sdkmetric.ManualReader, exporter component.ID, sent, sendFailed int64) error { + return checkExporter(reader, exporter, "log_records", sent, sendFailed) +} + +func checkExporterMetrics(reader *sdkmetric.ManualReader, exporter component.ID, sent, sendFailed int64) error { + return checkExporter(reader, exporter, "metric_points", sent, sendFailed) +} + +func checkExporter(reader *sdkmetric.ManualReader, exporter component.ID, datatype string, sent, sendFailed int64) error { + exporterAttrs := attributesForExporterMetrics(exporter) + errs := checkIntSum(reader, "otelcol_exporter_sent_"+datatype, sent, exporterAttrs) + if sendFailed > 0 { + errs = multierr.Append(errs, + checkIntSum(reader, "otelcol_exporter_send_failed_"+datatype, sendFailed, exporterAttrs)) + } + return errs +} + +func checkExporterEnqueueFailed(reader *sdkmetric.ManualReader, exporter component.ID, datatype string, enqueueFailed int64) error { + if enqueueFailed == 0 { + return nil + } + exporterAttrs := attributesForExporterMetrics(exporter) + return checkIntSum(reader, "otelcol_exporter_enqueue_failed_"+datatype, enqueueFailed, exporterAttrs) +} + +func checkIntGauge(reader *sdkmetric.ManualReader, metric string, expected int64, expectedAttrs attribute.Set) error { + dp, err := getGaugeDataPoint[int64](reader, metric, expectedAttrs) + if err != nil { + return err + } + + if dp.Value != expected { + return fmt.Errorf("values for metric '%s' did not match, expected '%d' got '%d'", metric, expected, dp.Value) + } + + return nil +} + +func checkIntSum(reader *sdkmetric.ManualReader, expectedMetric string, expected int64, expectedAttrs attribute.Set) error { + dp, err := getSumDataPoint[int64](reader, expectedMetric, expectedAttrs) + if err != nil { + return err + } + + if dp.Value != expected { + return fmt.Errorf("values for metric '%s' did not match, expected '%d' got '%d'", expectedMetric, expected, dp.Value) + } + + return nil +} + +func getSumDataPoint[N int64 | float64](reader *sdkmetric.ManualReader, expectedName string, expectedAttrs attribute.Set) (metricdata.DataPoint[N], error) { + m, err := getMetric(reader, expectedName) + if err != nil { + return metricdata.DataPoint[N]{}, err + } + + switch a := m.Data.(type) { + case metricdata.Sum[N]: + return getDataPoint(a.DataPoints, expectedName, expectedAttrs) + default: + return metricdata.DataPoint[N]{}, fmt.Errorf("unknown metric type: %T", a) + } +} + +func getGaugeDataPoint[N int64 | float64](reader *sdkmetric.ManualReader, expectedName string, expectedAttrs attribute.Set) (metricdata.DataPoint[N], error) { + m, err := getMetric(reader, expectedName) + if err != nil { + return metricdata.DataPoint[N]{}, err + } + + switch a := m.Data.(type) { + case metricdata.Gauge[N]: + return getDataPoint(a.DataPoints, expectedName, expectedAttrs) + default: + return metricdata.DataPoint[N]{}, fmt.Errorf("unknown metric type: %T", a) + } +} + +func getDataPoint[N int64 | float64](dps []metricdata.DataPoint[N], expectedName string, expectedAttrs attribute.Set) (metricdata.DataPoint[N], error) { + for _, dp := range dps { + if expectedAttrs.Equals(&dp.Attributes) { + return dp, nil + } + } + return metricdata.DataPoint[N]{}, fmt.Errorf("metric '%s' doesn't have a data point with the given attributes: %s", expectedName, expectedAttrs.Encoded(attribute.DefaultEncoder())) +} + +func getMetric(reader *sdkmetric.ManualReader, expectedName string) (metricdata.Metrics, error) { + var rm metricdata.ResourceMetrics + if err := reader.Collect(context.Background(), &rm); err != nil { + return metricdata.Metrics{}, err + } + + for _, sm := range rm.ScopeMetrics { + for _, m := range sm.Metrics { + if m.Name == expectedName { + return m, nil + } + } + } + return metricdata.Metrics{}, fmt.Errorf("metric '%s' not found", expectedName) +} + +func attributesForScraperMetrics(receiver component.ID, scraper component.ID) attribute.Set { + return attribute.NewSet( + attribute.String(receiverTag, receiver.String()), + attribute.String(scraperTag, scraper.String()), + ) +} + +// attributesForReceiverMetrics returns the attributes that are needed for the receiver metrics. +func attributesForReceiverMetrics(receiver component.ID, transport string) attribute.Set { + return attribute.NewSet( + attribute.String(receiverTag, receiver.String()), + attribute.String(transportTag, transport), + ) +} + +// attributesForExporterMetrics returns the attributes that are needed for the receiver metrics. +func attributesForExporterMetrics(exporter component.ID, extraAttrs ...attribute.KeyValue) attribute.Set { + attrs := []attribute.KeyValue{attribute.String(exporterTag, exporter.String())} + attrs = append(attrs, extraAttrs...) + return attribute.NewSet(attrs...) +} diff --git a/vendor/go.opentelemetry.io/collector/component/componenttest/otelprometheuschecker.go b/vendor/go.opentelemetry.io/collector/component/componenttest/otelprometheuschecker.go deleted file mode 100644 index 5beef52373a..00000000000 --- a/vendor/go.opentelemetry.io/collector/component/componenttest/otelprometheuschecker.go +++ /dev/null @@ -1,202 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package componenttest // import "go.opentelemetry.io/collector/component/componenttest" - -import ( - "fmt" - "math" - "net/http" - "net/http/httptest" - - io_prometheus_client "github.com/prometheus/client_model/go" - "github.com/prometheus/common/expfmt" - "go.opentelemetry.io/otel/attribute" - "go.uber.org/multierr" - - "go.opentelemetry.io/collector/component" -) - -// prometheusChecker is used to assert exported metrics from a prometheus handler. -type prometheusChecker struct { - otelHandler http.Handler -} - -func (pc *prometheusChecker) checkScraperMetrics(receiver component.ID, scraper component.ID, scrapedMetricPoints, erroredMetricPoints int64) error { - scraperAttrs := attributesForScraperMetrics(receiver, scraper) - return multierr.Combine( - pc.checkCounter("scraper_scraped_metric_points", scrapedMetricPoints, scraperAttrs), - pc.checkCounter("scraper_errored_metric_points", erroredMetricPoints, scraperAttrs)) -} - -func (pc *prometheusChecker) checkReceiverTraces(receiver component.ID, protocol string, accepted, dropped int64) error { - return pc.checkReceiver(receiver, "spans", protocol, accepted, dropped) -} - -func (pc *prometheusChecker) checkReceiverLogs(receiver component.ID, protocol string, accepted, dropped int64) error { - return pc.checkReceiver(receiver, "log_records", protocol, accepted, dropped) -} - -func (pc *prometheusChecker) checkReceiverMetrics(receiver component.ID, protocol string, accepted, dropped int64) error { - return pc.checkReceiver(receiver, "metric_points", protocol, accepted, dropped) -} - -func (pc *prometheusChecker) checkReceiver(receiver component.ID, datatype, protocol string, acceptedMetricPoints, droppedMetricPoints int64) error { - receiverAttrs := attributesForReceiverMetrics(receiver, protocol) - return multierr.Combine( - pc.checkCounter(fmt.Sprintf("receiver_accepted_%s", datatype), acceptedMetricPoints, receiverAttrs), - pc.checkCounter(fmt.Sprintf("receiver_refused_%s", datatype), droppedMetricPoints, receiverAttrs)) -} - -func (pc *prometheusChecker) checkProcessorTraces(processor component.ID, accepted, refused, dropped int64) error { - return pc.checkProcessor(processor, "spans", accepted, refused, dropped) -} - -func (pc *prometheusChecker) checkProcessorMetrics(processor component.ID, accepted, refused, dropped int64) error { - return pc.checkProcessor(processor, "metric_points", accepted, refused, dropped) -} - -func (pc *prometheusChecker) checkProcessorLogs(processor component.ID, accepted, refused, dropped int64) error { - return pc.checkProcessor(processor, "log_records", accepted, refused, dropped) -} - -func (pc *prometheusChecker) checkProcessor(processor component.ID, datatype string, accepted, refused, dropped int64) error { - processorAttrs := attributesForProcessorMetrics(processor) - return multierr.Combine( - pc.checkCounter(fmt.Sprintf("processor_accepted_%s", datatype), accepted, processorAttrs), - pc.checkCounter(fmt.Sprintf("processor_refused_%s", datatype), refused, processorAttrs), - pc.checkCounter(fmt.Sprintf("processor_dropped_%s", datatype), dropped, processorAttrs)) -} - -func (pc *prometheusChecker) checkExporterTraces(exporter component.ID, sent, sendFailed int64) error { - return pc.checkExporter(exporter, "spans", sent, sendFailed) -} - -func (pc *prometheusChecker) checkExporterLogs(exporter component.ID, sent, sendFailed int64) error { - return pc.checkExporter(exporter, "log_records", sent, sendFailed) -} - -func (pc *prometheusChecker) checkExporterMetrics(exporter component.ID, sent, sendFailed int64) error { - return pc.checkExporter(exporter, "metric_points", sent, sendFailed) -} - -func (pc *prometheusChecker) checkExporter(exporter component.ID, datatype string, sent, sendFailed int64) error { - exporterAttrs := attributesForExporterMetrics(exporter) - errs := pc.checkCounter(fmt.Sprintf("exporter_sent_%s", datatype), sent, exporterAttrs) - if sendFailed > 0 { - errs = multierr.Append(errs, - pc.checkCounter(fmt.Sprintf("exporter_send_failed_%s", datatype), sendFailed, exporterAttrs)) - } - return errs -} - -func (pc *prometheusChecker) checkExporterEnqueueFailed(exporter component.ID, datatype string, enqueueFailed int64) error { - if enqueueFailed == 0 { - return nil - } - exporterAttrs := attributesForExporterMetrics(exporter) - return pc.checkCounter(fmt.Sprintf("exporter_enqueue_failed_%s", datatype), enqueueFailed, exporterAttrs) -} - -func (pc *prometheusChecker) checkExporterMetricGauge(exporter component.ID, metric string, val int64) error { - exporterAttrs := attributesForExporterMetrics(exporter) - - ts, err := pc.getMetric(metric, io_prometheus_client.MetricType_GAUGE, exporterAttrs) - if err != nil { - return err - } - - expected := float64(val) - if math.Abs(ts.GetGauge().GetValue()-expected) > 0.0001 { - return fmt.Errorf("values for metric '%s' did not match, expected '%f' got '%f'", metric, expected, ts.GetGauge().GetValue()) - } - - return nil -} - -func (pc *prometheusChecker) checkCounter(expectedMetric string, value int64, attrs []attribute.KeyValue) error { - - ts, err := pc.getMetric(expectedMetric, io_prometheus_client.MetricType_COUNTER, attrs) - if err != nil { - return err - } - - expected := float64(value) - if math.Abs(expected-ts.GetCounter().GetValue()) > 0.0001 { - return fmt.Errorf("values for metric '%s' did not match, expected '%f' got '%f'", expectedMetric, expected, ts.GetCounter().GetValue()) - } - - return nil -} - -// getMetric returns the metric time series that matches the given name, type and set of attributes -// it fetches data from the prometheus endpoint and parse them, ideally OTel Go should provide a MeterRecorder of some kind. -func (pc *prometheusChecker) getMetric(expectedName string, expectedType io_prometheus_client.MetricType, expectedAttrs []attribute.KeyValue) (*io_prometheus_client.Metric, error) { - parsed, err := fetchPrometheusMetrics(pc.otelHandler) - if err != nil { - return nil, err - } - - metricFamily, ok := parsed[expectedName] - if !ok { - return nil, fmt.Errorf("metric '%s' not found", expectedName) - } - - if metricFamily.Type.String() != expectedType.String() { - return nil, fmt.Errorf("metric '%v' has type '%s' instead of '%s'", expectedName, metricFamily.Type.String(), expectedType.String()) - } - - expectedSet := attribute.NewSet(expectedAttrs...) - - for _, metric := range metricFamily.Metric { - var attrs []attribute.KeyValue - - for _, label := range metric.Label { - attrs = append(attrs, attribute.String(label.GetName(), label.GetValue())) - } - set := attribute.NewSet(attrs...) - - if expectedSet.Equals(&set) { - return metric, nil - } - } - - return nil, fmt.Errorf("metric '%s' doesn't have a timeseries with the given attributes: %s", expectedName, expectedSet.Encoded(attribute.DefaultEncoder())) -} - -func fetchPrometheusMetrics(handler http.Handler) (map[string]*io_prometheus_client.MetricFamily, error) { - req, err := http.NewRequest(http.MethodGet, "/metrics", nil) - if err != nil { - return nil, err - } - - rr := httptest.NewRecorder() - handler.ServeHTTP(rr, req) - - var parser expfmt.TextParser - return parser.TextToMetricFamilies(rr.Body) -} - -func attributesForScraperMetrics(receiver component.ID, scraper component.ID) []attribute.KeyValue { - return []attribute.KeyValue{ - attribute.String(receiverTag, receiver.String()), - attribute.String(scraperTag, scraper.String()), - } -} - -// attributesForReceiverMetrics returns the attributes that are needed for the receiver metrics. -func attributesForReceiverMetrics(receiver component.ID, transport string) []attribute.KeyValue { - return []attribute.KeyValue{ - attribute.String(receiverTag, receiver.String()), - attribute.String(transportTag, transport), - } -} - -func attributesForProcessorMetrics(processor component.ID) []attribute.KeyValue { - return []attribute.KeyValue{attribute.String(processorTag, processor.String())} -} - -// attributesForExporterMetrics returns the attributes that are needed for the receiver metrics. -func attributesForExporterMetrics(exporter component.ID) []attribute.KeyValue { - return []attribute.KeyValue{attribute.String(exporterTag, exporter.String())} -} diff --git a/vendor/go.opentelemetry.io/collector/component/config.go b/vendor/go.opentelemetry.io/collector/component/config.go index b53ff872fda..599b9be3236 100644 --- a/vendor/go.opentelemetry.io/collector/component/config.go +++ b/vendor/go.opentelemetry.io/collector/component/config.go @@ -4,13 +4,9 @@ package component // import "go.opentelemetry.io/collector/component" import ( - "fmt" "reflect" - "regexp" "go.uber.org/multierr" - - "go.opentelemetry.io/collector/confmap" ) // Config defines the configuration for a component.Component. @@ -26,12 +22,6 @@ type Config any // for an interface type Foo is to use a *Foo value. var configValidatorType = reflect.TypeOf((*ConfigValidator)(nil)).Elem() -// UnmarshalConfig helper function to UnmarshalConfig a Config. -// Deprecated: [v0.101.0] Use conf.Unmarshal(&intoCfg) -func UnmarshalConfig(conf *confmap.Conf, intoCfg Config) error { - return conf.Unmarshal(intoCfg) -} - // ConfigValidator defines an optional interface for configurations to implement to do validation. type ConfigValidator interface { // Validate the configuration and returns an error if invalid. @@ -91,10 +81,10 @@ func callValidateIfPossible(v reflect.Value) error { } // If the pointer type implements ConfigValidator call Validate on the pointer to the current value. - if reflect.PtrTo(v.Type()).Implements(configValidatorType) { + if reflect.PointerTo(v.Type()).Implements(configValidatorType) { // If not addressable, then create a new *V pointer and set the value to current v. if !v.CanAddr() { - pv := reflect.New(reflect.PtrTo(v.Type()).Elem()) + pv := reflect.New(reflect.PointerTo(v.Type()).Elem()) pv.Elem().Set(v) v = pv.Elem() } @@ -103,72 +93,3 @@ func callValidateIfPossible(v reflect.Value) error { return nil } - -// Type is the component type as it is used in the config. -type Type struct { - name string -} - -// String returns the string representation of the type. -func (t Type) String() string { - return t.name -} - -// MarshalText marshals returns the Type name. -func (t Type) MarshalText() ([]byte, error) { - return []byte(t.name), nil -} - -// typeRegexp is used to validate the type of a component. -// A type must start with an ASCII alphabetic character and -// can only contain ASCII alphanumeric characters and '_'. -// This must be kept in sync with the regex in cmd/mdatagen/validate.go. -var typeRegexp = regexp.MustCompile(`^[a-zA-Z][0-9a-zA-Z_]{0,62}$`) - -// NewType creates a type. It returns an error if the type is invalid. -// A type must -// - have at least one character, -// - start with an ASCII alphabetic character and -// - can only contain ASCII alphanumeric characters and '_'. -func NewType(ty string) (Type, error) { - if len(ty) == 0 { - return Type{}, fmt.Errorf("id must not be empty") - } - if !typeRegexp.MatchString(ty) { - return Type{}, fmt.Errorf("invalid character(s) in type %q", ty) - } - return Type{name: ty}, nil -} - -// MustNewType creates a type. It panics if the type is invalid. -// A type must -// - have at least one character, -// - start with an ASCII alphabetic character and -// - can only contain ASCII alphanumeric characters and '_'. -func MustNewType(strType string) Type { - ty, err := NewType(strType) - if err != nil { - panic(err) - } - return ty -} - -// DataType is a special Type that represents the data types supported by the collector. We currently support -// collecting metrics, traces and logs, this can expand in the future. -type DataType = Type - -func mustNewDataType(strType string) DataType { - return MustNewType(strType) -} - -// Currently supported data types. Add new data types here when new types are supported in the future. -var ( - // DataTypeTraces is the data type tag for traces. - DataTypeTraces = mustNewDataType("traces") - - // DataTypeMetrics is the data type tag for metrics. - DataTypeMetrics = mustNewDataType("metrics") - - // DataTypeLogs is the data type tag for logs. - DataTypeLogs = mustNewDataType("logs") -) diff --git a/vendor/go.opentelemetry.io/collector/component/host.go b/vendor/go.opentelemetry.io/collector/component/host.go index 50472a6d88c..dc8210ffbe3 100644 --- a/vendor/go.opentelemetry.io/collector/component/host.go +++ b/vendor/go.opentelemetry.io/collector/component/host.go @@ -5,22 +5,13 @@ package component // import "go.opentelemetry.io/collector/component" // Host represents the entity that is hosting a Component. It is used to allow communication // between the Component and its host (normally the service.Collector is the host). +// +// Components may require `component.Host` to implement additional interfaces to properly function. +// The component is expected to cast the `component.Host` to the interface it needs and return +// an error if the type assertion fails. type Host interface { - // GetFactory of the specified kind. Returns the factory for a component type. - // This allows components to create other components. For example: - // func (r MyReceiver) Start(host component.Host) error { - // apacheFactory := host.GetFactory(KindReceiver,"apache").(receiver.Factory) - // receiver, err := apacheFactory.CreateMetrics(...) - // ... - // } - // - // GetFactory can be called by the component anytime after Component.Start() begins and - // until Component.Shutdown() ends. Note that the component is responsible for destroying - // other components that it creates. - GetFactory(kind Kind, componentType Type) Factory - // GetExtensions returns the map of extensions. Only enabled and created extensions will be returned. - // Typically is used to find an extension by type or by full config name. Both cases + // Typically, it is used to find an extension by type or by full config name. Both cases // can be done by iterating the returned map. There are typically very few extensions, // so there are no performance implications due to iteration. // diff --git a/vendor/go.opentelemetry.io/collector/component/identifiable.go b/vendor/go.opentelemetry.io/collector/component/identifiable.go index d2d65a5e24f..63b890b47ac 100644 --- a/vendor/go.opentelemetry.io/collector/component/identifiable.go +++ b/vendor/go.opentelemetry.io/collector/component/identifiable.go @@ -6,12 +6,71 @@ package component // import "go.opentelemetry.io/collector/component" import ( "errors" "fmt" + "regexp" "strings" ) // typeAndNameSeparator is the separator that is used between type and name in type/name composite keys. const typeAndNameSeparator = "/" +var ( + // typeRegexp is used to validate the type of component. + // A type must start with an ASCII alphabetic character and + // can only contain ASCII alphanumeric characters and '_'. + // This must be kept in sync with the regex in cmd/mdatagen/validate.go. + typeRegexp = regexp.MustCompile(`^[a-zA-Z][0-9a-zA-Z_]{0,62}$`) + + // nameRegexp is used to validate the name of a component. A name can consist of + // 1 to 1024 Unicode characters excluding whitespace, control characters, and + // symbols. + nameRegexp = regexp.MustCompile(`^[^\pZ\pC\pS]+$`) +) + +var _ fmt.Stringer = Type{} + +// Type is the component type as it is used in the config. +type Type struct { + name string +} + +// String returns the string representation of the type. +func (t Type) String() string { + return t.name +} + +// MarshalText marshals returns the Type name. +func (t Type) MarshalText() ([]byte, error) { + return []byte(t.name), nil +} + +// NewType creates a type. It returns an error if the type is invalid. +// A type must +// - have at least one character, +// - start with an ASCII alphabetic character and +// - can only contain ASCII alphanumeric characters and '_'. +func NewType(ty string) (Type, error) { + if len(ty) == 0 { + return Type{}, errors.New("id must not be empty") + } + if !typeRegexp.MatchString(ty) { + return Type{}, fmt.Errorf("invalid character(s) in type %q", ty) + } + return Type{name: ty}, nil +} + +// MustNewType creates a type. It panics if the type is invalid. +// A type must +// - have at least one character, +// - start with an ASCII alphabetic character and +// - can only contain ASCII alphanumeric characters and '_'. +func MustNewType(strType string) Type { + ty, err := NewType(strType) + if err != nil { + panic(err) + } + return ty +} + // ID represents the identity for a component. It combines two values: // * type - the Type of the component. // * name - the name of that component. @@ -27,9 +86,10 @@ func NewID(typeVal Type) ID { } // MustNewID builds a Type and returns a new ID with the given Type and empty name. +// This is equivalent to NewID(MustNewType(typeVal)). // See MustNewType to check the valid values of typeVal. func MustNewID(typeVal string) ID { - return ID{typeVal: MustNewType(typeVal)} + return NewID(MustNewType(typeVal)) } // NewIDWithName returns a new ID with the given Type and name. @@ -38,9 +98,10 @@ func NewIDWithName(typeVal Type, nameVal string) ID { } // MustNewIDWithName builds a Type and returns a new ID with the given Type and name. +// This is equivalent to NewIDWithName(MustNewType(typeVal), nameVal). // See MustNewType to check the valid values of typeVal. func MustNewIDWithName(typeVal string, nameVal string) ID { - return ID{typeVal: MustNewType(typeVal), nameVal: nameVal} + return NewIDWithName(MustNewType(typeVal), nameVal) } // Type returns the type of the component. @@ -82,6 +143,9 @@ func (id *ID) UnmarshalText(text []byte) error { if nameStr == "" { return fmt.Errorf("in %q id: the part after %s should not be empty", idStr, typeAndNameSeparator) } + if err := validateName(nameStr); err != nil { + return fmt.Errorf("in %q id: %w", nameStr, err) + } } var err error @@ -101,3 +165,13 @@ func (id ID) String() string { return id.typeVal.String() + typeAndNameSeparator + id.nameVal } + +func validateName(nameStr string) error { + if len(nameStr) > 1024 { + return fmt.Errorf("name %q is longer than 1024 characters (%d characters)", nameStr, len(nameStr)) + } + if !nameRegexp.MatchString(nameStr) { + return fmt.Errorf("invalid character(s) in name %q", nameStr) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/collector/component/status.go b/vendor/go.opentelemetry.io/collector/component/status.go deleted file mode 100644 index 8cd4d802644..00000000000 --- a/vendor/go.opentelemetry.io/collector/component/status.go +++ /dev/null @@ -1,190 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package component // import "go.opentelemetry.io/collector/component" - -import ( - "time" -) - -type Status int32 - -// Enumeration of possible component statuses -const ( - StatusNone Status = iota - StatusStarting - StatusOK - StatusRecoverableError - StatusPermanentError - StatusFatalError - StatusStopping - StatusStopped -) - -// String returns a string representation of a Status -func (s Status) String() string { - switch s { - case StatusStarting: - return "StatusStarting" - case StatusOK: - return "StatusOK" - case StatusRecoverableError: - return "StatusRecoverableError" - case StatusPermanentError: - return "StatusPermanentError" - case StatusFatalError: - return "StatusFatalError" - case StatusStopping: - return "StatusStopping" - case StatusStopped: - return "StatusStopped" - } - return "StatusNone" -} - -// StatusEvent contains a status and timestamp, and can contain an error -type StatusEvent struct { - status Status - err error - timestamp time.Time -} - -// Status returns the Status (enum) associated with the StatusEvent -func (ev *StatusEvent) Status() Status { - return ev.status -} - -// Err returns the error associated with the StatusEvent. -func (ev *StatusEvent) Err() error { - return ev.err -} - -// Timestamp returns the timestamp associated with the StatusEvent -func (ev *StatusEvent) Timestamp() time.Time { - return ev.timestamp -} - -// NewStatusEvent creates and returns a StatusEvent with the specified status and sets the timestamp -// time.Now(). To set an error on the event for an error status use one of the dedicated -// constructors (e.g. NewRecoverableErrorEvent, NewPermanentErrorEvent, NewFatalErrorEvent) -func NewStatusEvent(status Status) *StatusEvent { - return &StatusEvent{ - status: status, - timestamp: time.Now(), - } -} - -// NewRecoverableErrorEvent creates and returns a StatusEvent with StatusRecoverableError, the -// specified error, and a timestamp set to time.Now(). -func NewRecoverableErrorEvent(err error) *StatusEvent { - ev := NewStatusEvent(StatusRecoverableError) - ev.err = err - return ev -} - -// NewPermanentErrorEvent creates and returns a StatusEvent with StatusPermanentError, the -// specified error, and a timestamp set to time.Now(). -func NewPermanentErrorEvent(err error) *StatusEvent { - ev := NewStatusEvent(StatusPermanentError) - ev.err = err - return ev -} - -// NewFatalErrorEvent creates and returns a StatusEvent with StatusFatalError, the -// specified error, and a timestamp set to time.Now(). -func NewFatalErrorEvent(err error) *StatusEvent { - ev := NewStatusEvent(StatusFatalError) - ev.err = err - return ev -} - -// AggregateStatus will derive a status for the given input using the following rules in order: -// 1. If all instances have the same status, there is nothing to aggregate, return it. -// 2. If any instance encounters a fatal error, the component is in a Fatal Error state. -// 3. If any instance is in a Permanent Error state, the component status is Permanent Error. -// 4. If any instance is Stopping, the component is in a Stopping state. -// 5. An instance is Stopped, but not all instances are Stopped, we must be in the process of Stopping the component. -// 6. If any instance is in a Recoverable Error state, the component status is Recoverable Error. -// 7. By process of elimination, the only remaining state is starting. -func AggregateStatus[K comparable](eventMap map[K]*StatusEvent) Status { - seen := make(map[Status]struct{}) - for _, ev := range eventMap { - seen[ev.Status()] = struct{}{} - } - - // All statuses are the same. Note, this will handle StatusOK and StatusStopped as these two - // cases require all components be in the same state. - if len(seen) == 1 { - for st := range seen { - return st - } - } - - // Handle mixed status cases - if _, isFatal := seen[StatusFatalError]; isFatal { - return StatusFatalError - } - - if _, isPermanent := seen[StatusPermanentError]; isPermanent { - return StatusPermanentError - } - - if _, isStopping := seen[StatusStopping]; isStopping { - return StatusStopping - } - - if _, isStopped := seen[StatusStopped]; isStopped { - return StatusStopping - } - - if _, isRecoverable := seen[StatusRecoverableError]; isRecoverable { - return StatusRecoverableError - } - - // By process of elimination, this is the last possible status; no check necessary. - return StatusStarting -} - -// StatusIsError returns true for error statuses (e.g. StatusRecoverableError, -// StatusPermanentError, or StatusFatalError) -func StatusIsError(status Status) bool { - return status == StatusRecoverableError || - status == StatusPermanentError || - status == StatusFatalError -} - -// AggregateStatusEvent returns a status event where: -// - The status is set to the aggregate status of the events in the eventMap -// - The timestamp is set to the latest timestamp of the events in the eventMap -// - For an error status, the event will have same error as the most current event of the same -// error type from the eventMap -func AggregateStatusEvent[K comparable](eventMap map[K]*StatusEvent) *StatusEvent { - var lastEvent, lastMatchingEvent *StatusEvent - aggregateStatus := AggregateStatus[K](eventMap) - - for _, ev := range eventMap { - if lastEvent == nil || lastEvent.timestamp.Before(ev.timestamp) { - lastEvent = ev - } - if aggregateStatus == ev.Status() && - (lastMatchingEvent == nil || lastMatchingEvent.timestamp.Before(ev.timestamp)) { - lastMatchingEvent = ev - } - } - - // the effective status matches an existing event - if lastEvent.Status() == aggregateStatus { - return lastEvent - } - - // the effective status requires a synthetic event - aggregateEvent := &StatusEvent{ - status: aggregateStatus, - timestamp: lastEvent.timestamp, - } - if StatusIsError(aggregateStatus) { - aggregateEvent.err = lastMatchingEvent.err - } - - return aggregateEvent -} diff --git a/vendor/go.opentelemetry.io/collector/component/telemetry.go b/vendor/go.opentelemetry.io/collector/component/telemetry.go index 17ca3dcab67..359562e5f92 100644 --- a/vendor/go.opentelemetry.io/collector/component/telemetry.go +++ b/vendor/go.opentelemetry.io/collector/component/telemetry.go @@ -13,10 +13,6 @@ import ( ) // TelemetrySettings provides components with APIs to report telemetry. -// -// Note: there is a service version of this struct, servicetelemetry.TelemetrySettings, that mirrors -// this struct with the exception of ReportStatus. When adding or removing anything from -// this struct consider whether or not the same should be done for the service version. type TelemetrySettings struct { // Logger that the factory can use during creation and can pass to the created // component to be used later as well. @@ -28,15 +24,11 @@ type TelemetrySettings struct { // MeterProvider that the factory can pass to other instrumented third-party libraries. MeterProvider metric.MeterProvider - // MetricsLevel controls the level of detail for metrics emitted by the collector. - // Experimental: *NOTE* this field is experimental and may be changed or removed. + // MetricsLevel represents the configuration value set when the collector + // is configured. Components may use this level to decide whether it is + // appropriate to avoid computationally expensive calculations. MetricsLevel configtelemetry.Level // Resource contains the resource attributes for the collector's telemetry. Resource pcommon.Resource - - // ReportStatus allows a component to report runtime changes in status. The service - // will automatically report status for a component during startup and shutdown. Components can - // use this method to report status after start and before shutdown. - ReportStatus func(*StatusEvent) } diff --git a/vendor/go.opentelemetry.io/collector/config/configauth/configauth.go b/vendor/go.opentelemetry.io/collector/config/configauth/configauth.go index aa7002c270b..a3501cd0bed 100644 --- a/vendor/go.opentelemetry.io/collector/config/configauth/configauth.go +++ b/vendor/go.opentelemetry.io/collector/config/configauth/configauth.go @@ -7,6 +7,7 @@ package configauth // import "go.opentelemetry.io/collector/config/configauth" import ( + "context" "errors" "fmt" @@ -33,7 +34,7 @@ func NewDefaultAuthentication() *Authentication { // GetServerAuthenticator attempts to select the appropriate auth.Server from the list of extensions, // based on the requested extension name. If an authenticator is not found, an error is returned. -func (a Authentication) GetServerAuthenticator(extensions map[component.ID]component.Component) (auth.Server, error) { +func (a Authentication) GetServerAuthenticator(_ context.Context, extensions map[component.ID]component.Component) (auth.Server, error) { if ext, found := extensions[a.AuthenticatorID]; found { if server, ok := ext.(auth.Server); ok { return server, nil @@ -47,7 +48,7 @@ func (a Authentication) GetServerAuthenticator(extensions map[component.ID]compo // GetClientAuthenticator attempts to select the appropriate auth.Client from the list of extensions, // based on the component id of the extension. If an authenticator is not found, an error is returned. // This should be only used by HTTP clients. -func (a Authentication) GetClientAuthenticator(extensions map[component.ID]component.Component) (auth.Client, error) { +func (a Authentication) GetClientAuthenticator(_ context.Context, extensions map[component.ID]component.Component) (auth.Client, error) { if ext, found := extensions[a.AuthenticatorID]; found { if client, ok := ext.(auth.Client); ok { return client, nil diff --git a/vendor/go.opentelemetry.io/collector/config/configcompression/compressiontype.go b/vendor/go.opentelemetry.io/collector/config/configcompression/compressiontype.go index 004e9558665..d2bc4b16d5b 100644 --- a/vendor/go.opentelemetry.io/collector/config/configcompression/compressiontype.go +++ b/vendor/go.opentelemetry.io/collector/config/configcompression/compressiontype.go @@ -3,19 +3,30 @@ package configcompression // import "go.opentelemetry.io/collector/config/configcompression" -import "fmt" +import ( + "compress/zlib" + "fmt" +) // Type represents a compression method type Type string +type Level int + +type CompressionParams struct { + Level Level `mapstructure:"level"` +} + const ( - TypeGzip Type = "gzip" - TypeZlib Type = "zlib" - TypeDeflate Type = "deflate" - TypeSnappy Type = "snappy" - TypeZstd Type = "zstd" - typeNone Type = "none" - typeEmpty Type = "" + TypeGzip Type = "gzip" + TypeZlib Type = "zlib" + TypeDeflate Type = "deflate" + TypeSnappy Type = "snappy" + TypeZstd Type = "zstd" + TypeLz4 Type = "lz4" + typeNone Type = "none" + typeEmpty Type = "" + DefaultCompressionLevel = zlib.DefaultCompression ) // IsCompressed returns false if CompressionType is nil, none, or empty. @@ -31,11 +42,31 @@ func (ct *Type) UnmarshalText(in []byte) error { typ == TypeDeflate || typ == TypeSnappy || typ == TypeZstd || + typ == TypeLz4 || typ == typeNone || typ == typeEmpty { *ct = typ return nil } return fmt.Errorf("unsupported compression type %q", typ) +} +func (ct *Type) ValidateParams(p CompressionParams) error { + switch *ct { + case TypeGzip, TypeZlib, TypeDeflate: + if p.Level == zlib.DefaultCompression || + p.Level == zlib.HuffmanOnly || + p.Level == zlib.NoCompression || + (p.Level >= zlib.BestSpeed && p.Level <= zlib.BestCompression) { + return nil + } + case TypeZstd: + // Supports arbitrary levels: zstd will map any given + // level to the nearest internally supported level. + return nil + } + if p.Level != 0 { + return fmt.Errorf("unsupported parameters %+v for compression type %q", p, *ct) + } + return nil } diff --git a/vendor/go.opentelemetry.io/collector/config/configgrpc/README.md b/vendor/go.opentelemetry.io/collector/config/configgrpc/README.md index b9cf2f01be2..e0db0421e7e 100644 --- a/vendor/go.opentelemetry.io/collector/config/configgrpc/README.md +++ b/vendor/go.opentelemetry.io/collector/config/configgrpc/README.md @@ -15,8 +15,8 @@ configuration parameters are also defined under `tls` like server configuration. For more information, see [configtls README](../configtls/README.md). -- [`balancer_name`](https://github.com/grpc/grpc-go/blob/master/examples/features/load_balancing/README.md) -- `compression` Compression type to use among `gzip`, `snappy`, `zstd`, and `none`. +- [`balancer_name`](https://github.com/grpc/grpc-go/blob/master/examples/features/load_balancing/README.md): Default before v0.103.0 is `pick_first`, default for v0.103.0 is `round_robin`. See [issue](https://github.com/open-telemetry/opentelemetry-collector/issues/10298). To restore the previous behavior, set `balancer_name` to `pick_first`. +- `compression`: Compression type to use among `gzip`, `snappy`, `zstd`, and `none`. - `endpoint`: Valid value syntax available [here](https://github.com/grpc/grpc/blob/master/doc/naming.md) - [`tls`](../configtls/README.md) - `headers`: name/value pairs added to the request diff --git a/vendor/go.opentelemetry.io/collector/config/configgrpc/configgrpc.go b/vendor/go.opentelemetry.io/collector/config/configgrpc/configgrpc.go index b57a199461c..b9c99d4d997 100644 --- a/vendor/go.opentelemetry.io/collector/config/configgrpc/configgrpc.go +++ b/vendor/go.opentelemetry.io/collector/config/configgrpc/configgrpc.go @@ -8,31 +8,35 @@ import ( "crypto/tls" "errors" "fmt" + "math" "strings" "time" "github.com/mostynb/go-grpc-compression/nonclobbering/snappy" + "github.com/mostynb/go-grpc-compression/nonclobbering/zstd" "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" "google.golang.org/grpc" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/encoding/gzip" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" + "google.golang.org/grpc/status" "go.opentelemetry.io/collector/client" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configauth" "go.opentelemetry.io/collector/config/configcompression" - grpcInternal "go.opentelemetry.io/collector/config/configgrpc/internal" "go.opentelemetry.io/collector/config/confignet" "go.opentelemetry.io/collector/config/configopaque" "go.opentelemetry.io/collector/config/configtelemetry" "go.opentelemetry.io/collector/config/configtls" - "go.opentelemetry.io/collector/config/internal" "go.opentelemetry.io/collector/extension/auth" ) @@ -55,6 +59,11 @@ func NewDefaultKeepaliveClientConfig() *KeepaliveClientConfig { } } +// BalancerName returns a string with default load balancer value +func BalancerName() string { + return "round_robin" +} + // ClientConfig defines common settings for a gRPC client configuration. type ClientConfig struct { // The target to which the exporter is going to send traces or metrics, @@ -102,9 +111,10 @@ type ClientConfig struct { // NewDefaultClientConfig returns a new instance of ClientConfig with default values. func NewDefaultClientConfig() *ClientConfig { return &ClientConfig{ - TLSSetting: configtls.NewDefaultClientConfig(), - Keepalive: NewDefaultKeepaliveClientConfig(), - Auth: configauth.NewDefaultAuthentication(), + TLSSetting: configtls.NewDefaultClientConfig(), + Keepalive: NewDefaultKeepaliveClientConfig(), + Auth: configauth.NewDefaultAuthentication(), + BalancerName: BalancerName(), } } @@ -161,7 +171,7 @@ type ServerConfig struct { TLSSetting *configtls.ServerConfig `mapstructure:"tls"` // MaxRecvMsgSizeMiB sets the maximum size (in MiB) of messages accepted by the server. - MaxRecvMsgSizeMiB uint64 `mapstructure:"max_recv_msg_size_mib"` + MaxRecvMsgSizeMiB int `mapstructure:"max_recv_msg_size_mib"` // MaxConcurrentStreams sets the limit on the number of concurrent streams to each ServerTransport. // It has effect only for streaming RPCs. @@ -182,7 +192,6 @@ type ServerConfig struct { Auth *configauth.Authentication `mapstructure:"auth"` // Include propagates the incoming connection's metadata to downstream consumers. - // Experimental: *NOTE* this option is subject to change or removal in the future. IncludeMetadata bool `mapstructure:"include_metadata"` } @@ -194,6 +203,16 @@ func NewDefaultServerConfig() *ServerConfig { } } +func (gcs *ClientConfig) Validate() error { + if gcs.BalancerName != "" { + if balancer.Get(gcs.BalancerName) == nil { + return fmt.Errorf("invalid balancer_name: %s", gcs.BalancerName) + } + } + + return nil +} + // sanitizedEndpoint strips the prefix of either http:// or https:// from configgrpc.ClientConfig.Endpoint. func (gcs *ClientConfig) sanitizedEndpoint() string { switch { @@ -214,20 +233,45 @@ func (gcs *ClientConfig) isSchemeHTTPS() bool { return strings.HasPrefix(gcs.Endpoint, "https://") } +// ToClientConnOption is a sealed interface wrapping options for [ClientConfig.ToClientConn]. +type ToClientConnOption interface { + isToClientConnOption() +} + +type grpcDialOptionWrapper struct { + opt grpc.DialOption +} + +// WithGrpcDialOption wraps a [grpc.DialOption] into a [ToClientConnOption]. +func WithGrpcDialOption(opt grpc.DialOption) ToClientConnOption { + return grpcDialOptionWrapper{opt: opt} +} +func (grpcDialOptionWrapper) isToClientConnOption() {} + // ToClientConn creates a client connection to the given target. By default, it's // a non-blocking dial (the function won't wait for connections to be // established, and connecting happens in the background). To make it a blocking -// dial, use grpc.WithBlock() dial option. -func (gcs *ClientConfig) ToClientConn(_ context.Context, host component.Host, settings component.TelemetrySettings, extraOpts ...grpc.DialOption) (*grpc.ClientConn, error) { - opts, err := gcs.toDialOptions(host, settings) +// dial, use the WithGrpcDialOption(grpc.WithBlock()) option. +func (gcs *ClientConfig) ToClientConn( + ctx context.Context, + host component.Host, + settings component.TelemetrySettings, + extraOpts ...ToClientConnOption, +) (*grpc.ClientConn, error) { + grpcOpts, err := gcs.getGrpcDialOptions(ctx, host, settings, extraOpts) if err != nil { return nil, err } - opts = append(opts, extraOpts...) - return grpc.NewClient(gcs.sanitizedEndpoint(), opts...) + //nolint:staticcheck //SA1019 see https://github.com/open-telemetry/opentelemetry-collector/pull/11575 + return grpc.DialContext(ctx, gcs.sanitizedEndpoint(), grpcOpts...) } -func (gcs *ClientConfig) toDialOptions(host component.Host, settings component.TelemetrySettings) ([]grpc.DialOption, error) { +func (gcs *ClientConfig) getGrpcDialOptions( + ctx context.Context, + host component.Host, + settings component.TelemetrySettings, + extraOpts []ToClientConnOption, +) ([]grpc.DialOption, error) { var opts []grpc.DialOption if gcs.Compression.IsCompressed() { cp, err := getGRPCCompressionName(gcs.Compression) @@ -237,7 +281,7 @@ func (gcs *ClientConfig) toDialOptions(host component.Host, settings component.T opts = append(opts, grpc.WithDefaultCallOptions(grpc.UseCompressor(cp))) } - tlsCfg, err := gcs.TLSSetting.LoadTLSConfig(context.Background()) + tlsCfg, err := gcs.TLSSetting.LoadTLSConfig(ctx) if err != nil { return nil, err } @@ -271,7 +315,7 @@ func (gcs *ClientConfig) toDialOptions(host component.Host, settings component.T return nil, errors.New("no extensions configuration available") } - grpcAuthenticator, cerr := gcs.Auth.GetClientAuthenticator(host.GetExtensions()) + grpcAuthenticator, cerr := gcs.Auth.GetClientAuthenticator(ctx, host.GetExtensions()) if cerr != nil { return nil, cerr } @@ -284,10 +328,6 @@ func (gcs *ClientConfig) toDialOptions(host component.Host, settings component.T } if gcs.BalancerName != "" { - valid := validateBalancerName(gcs.BalancerName) - if !valid { - return nil, fmt.Errorf("invalid balancer_name: %s", gcs.BalancerName) - } opts = append(opts, grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingPolicy":"%s"}`, gcs.BalancerName))) } @@ -298,37 +338,71 @@ func (gcs *ClientConfig) toDialOptions(host component.Host, settings component.T otelOpts := []otelgrpc.Option{ otelgrpc.WithTracerProvider(settings.TracerProvider), otelgrpc.WithPropagators(otel.GetTextMapPropagator()), - } - if settings.MetricsLevel >= configtelemetry.LevelDetailed { - otelOpts = append(otelOpts, otelgrpc.WithMeterProvider(settings.MeterProvider)) + otelgrpc.WithMeterProvider(getLeveledMeterProvider(settings)), } // Enable OpenTelemetry observability plugin. opts = append(opts, grpc.WithStatsHandler(otelgrpc.NewClientHandler(otelOpts...))) + for _, opt := range extraOpts { + if wrapper, ok := opt.(grpcDialOptionWrapper); ok { + opts = append(opts, wrapper.opt) + } + } + return opts, nil } -func validateBalancerName(balancerName string) bool { - return balancer.Get(balancerName) != nil +func (gss *ServerConfig) Validate() error { + if gss.MaxRecvMsgSizeMiB*1024*1024 < 0 { + return fmt.Errorf("invalid max_recv_msg_size_mib value, must be between 1 and %d: %d", math.MaxInt/1024/1024, gss.MaxRecvMsgSizeMiB) + } + + if gss.ReadBufferSize < 0 { + return fmt.Errorf("invalid read_buffer_size value: %d", gss.ReadBufferSize) + } + + if gss.WriteBufferSize < 0 { + return fmt.Errorf("invalid write_buffer_size value: %d", gss.WriteBufferSize) + } + + return nil +} + +// ToServerOption is a sealed interface wrapping options for [ServerConfig.ToServer]. +type ToServerOption interface { + isToServerOption() } -// ToServer returns a grpc.Server for the configuration -func (gss *ServerConfig) ToServer(_ context.Context, host component.Host, settings component.TelemetrySettings, extraOpts ...grpc.ServerOption) (*grpc.Server, error) { - opts, err := gss.toServerOption(host, settings) +type grpcServerOptionWrapper struct { + opt grpc.ServerOption +} + +// WithGrpcServerOption wraps a [grpc.ServerOption] into a [ToServerOption]. +func WithGrpcServerOption(opt grpc.ServerOption) ToServerOption { + return grpcServerOptionWrapper{opt: opt} +} +func (grpcServerOptionWrapper) isToServerOption() {} + +// ToServer returns a [grpc.Server] for the configuration. +func (gss *ServerConfig) ToServer( + _ context.Context, + host component.Host, + settings component.TelemetrySettings, + extraOpts ...ToServerOption, +) (*grpc.Server, error) { + grpcOpts, err := gss.getGrpcServerOptions(host, settings, extraOpts) if err != nil { return nil, err } - opts = append(opts, extraOpts...) - return grpc.NewServer(opts...), nil + return grpc.NewServer(grpcOpts...), nil } -func (gss *ServerConfig) toServerOption(host component.Host, settings component.TelemetrySettings) ([]grpc.ServerOption, error) { - switch gss.NetAddr.Transport { - case confignet.TransportTypeTCP, confignet.TransportTypeTCP4, confignet.TransportTypeTCP6, confignet.TransportTypeUDP, confignet.TransportTypeUDP4, confignet.TransportTypeUDP6: - internal.WarnOnUnspecifiedHost(settings.Logger, gss.NetAddr.Endpoint) - } - +func (gss *ServerConfig) getGrpcServerOptions( + host component.Host, + settings component.TelemetrySettings, + extraOpts []ToServerOption, +) ([]grpc.ServerOption, error) { var opts []grpc.ServerOption if gss.TLSSetting != nil { @@ -339,8 +413,8 @@ func (gss *ServerConfig) toServerOption(host component.Host, settings component. opts = append(opts, grpc.Creds(credentials.NewTLS(tlsCfg))) } - if gss.MaxRecvMsgSizeMiB > 0 { - opts = append(opts, grpc.MaxRecvMsgSize(int(gss.MaxRecvMsgSizeMiB*1024*1024))) + if gss.MaxRecvMsgSizeMiB > 0 && gss.MaxRecvMsgSizeMiB*1024*1024 > 0 { + opts = append(opts, grpc.MaxRecvMsgSize(gss.MaxRecvMsgSizeMiB*1024*1024)) } if gss.MaxConcurrentStreams > 0 { @@ -387,7 +461,7 @@ func (gss *ServerConfig) toServerOption(host component.Host, settings component. var sInterceptors []grpc.StreamServerInterceptor if gss.Auth != nil { - authenticator, err := gss.Auth.GetServerAuthenticator(host.GetExtensions()) + authenticator, err := gss.Auth.GetServerAuthenticator(context.Background(), host.GetExtensions()) if err != nil { return nil, err } @@ -403,9 +477,7 @@ func (gss *ServerConfig) toServerOption(host component.Host, settings component. otelOpts := []otelgrpc.Option{ otelgrpc.WithTracerProvider(settings.TracerProvider), otelgrpc.WithPropagators(otel.GetTextMapPropagator()), - } - if settings.MetricsLevel >= configtelemetry.LevelDetailed { - otelOpts = append(otelOpts, otelgrpc.WithMeterProvider(settings.MeterProvider)) + otelgrpc.WithMeterProvider(getLeveledMeterProvider(settings)), } // Enable OpenTelemetry observability plugin. @@ -415,6 +487,12 @@ func (gss *ServerConfig) toServerOption(host component.Host, settings component. opts = append(opts, grpc.StatsHandler(otelgrpc.NewServerHandler(otelOpts...)), grpc.ChainUnaryInterceptor(uInterceptors...), grpc.ChainStreamInterceptor(sInterceptors...)) + for _, opt := range extraOpts { + if wrapper, ok := opt.(grpcServerOptionWrapper); ok { + opts = append(opts, wrapper.opt) + } + } + return opts, nil } @@ -426,7 +504,7 @@ func getGRPCCompressionName(compressionType configcompression.Type) (string, err case configcompression.TypeSnappy: return snappy.Name, nil case configcompression.TypeZstd: - return grpcInternal.ZstdName, nil + return zstd.Name, nil default: return "", fmt.Errorf("unsupported compression type %q", compressionType) } @@ -473,7 +551,7 @@ func authUnaryServerInterceptor(ctx context.Context, req any, _ *grpc.UnaryServe ctx, err := server.Authenticate(ctx, headers) if err != nil { - return nil, err + return nil, status.Error(codes.Unauthenticated, err.Error()) } return handler(ctx, req) @@ -488,8 +566,15 @@ func authStreamServerInterceptor(srv any, stream grpc.ServerStream, _ *grpc.Stre ctx, err := server.Authenticate(ctx, headers) if err != nil { - return err + return status.Error(codes.Unauthenticated, err.Error()) } return handler(srv, wrapServerStream(ctx, stream)) } + +func getLeveledMeterProvider(settings component.TelemetrySettings) metric.MeterProvider { + if configtelemetry.LevelDetailed <= settings.MetricsLevel { + return settings.MeterProvider + } + return noop.MeterProvider{} +} diff --git a/vendor/go.opentelemetry.io/collector/config/configgrpc/internal/zstd.go b/vendor/go.opentelemetry.io/collector/config/configgrpc/internal/zstd.go deleted file mode 100644 index 0718b73535f..00000000000 --- a/vendor/go.opentelemetry.io/collector/config/configgrpc/internal/zstd.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright The OpenTelemetry Authors -// Copyright 2017 gRPC authors -// SPDX-License-Identifier: Apache-2.0 - -package internal // import "go.opentelemetry.io/collector/config/configgrpc/internal" - -import ( - "errors" - "io" - "sync" - - "github.com/klauspost/compress/zstd" - "google.golang.org/grpc/encoding" -) - -const ZstdName = "zstd" - -func init() { - encoding.RegisterCompressor(NewZstdCodec()) -} - -type writer struct { - *zstd.Encoder - pool *sync.Pool -} - -func NewZstdCodec() encoding.Compressor { - c := &compressor{} - c.poolCompressor.New = func() any { - zw, _ := zstd.NewWriter(nil, zstd.WithEncoderConcurrency(1), zstd.WithWindowSize(512*1024)) - return &writer{Encoder: zw, pool: &c.poolCompressor} - } - return c -} - -func (c *compressor) Compress(w io.Writer) (io.WriteCloser, error) { - z := c.poolCompressor.Get().(*writer) - z.Encoder.Reset(w) - return z, nil -} - -func (z *writer) Close() error { - defer z.pool.Put(z) - return z.Encoder.Close() -} - -type reader struct { - *zstd.Decoder - pool *sync.Pool -} - -func (c *compressor) Decompress(r io.Reader) (io.Reader, error) { - z, inPool := c.poolDecompressor.Get().(*reader) - if !inPool { - newZ, err := zstd.NewReader(r) - if err != nil { - return nil, err - } - return &reader{Decoder: newZ, pool: &c.poolDecompressor}, nil - } - if err := z.Reset(r); err != nil { - c.poolDecompressor.Put(z) - return nil, err - } - return z, nil -} - -func (z *reader) Read(p []byte) (n int, err error) { - n, err = z.Decoder.Read(p) - if errors.Is(err, io.EOF) { - z.pool.Put(z) - } - return n, err -} - -func (c *compressor) Name() string { - return ZstdName -} - -type compressor struct { - poolCompressor sync.Pool - poolDecompressor sync.Pool -} diff --git a/vendor/go.opentelemetry.io/collector/config/confighttp/README.md b/vendor/go.opentelemetry.io/collector/config/confighttp/README.md index a0227c2402b..1f1cb1bba27 100644 --- a/vendor/go.opentelemetry.io/collector/config/confighttp/README.md +++ b/vendor/go.opentelemetry.io/collector/config/confighttp/README.md @@ -23,9 +23,31 @@ README](../configtls/README.md). - [`read_buffer_size`](https://golang.org/pkg/net/http/#Transport) - [`timeout`](https://golang.org/pkg/net/http/#Client) - [`write_buffer_size`](https://golang.org/pkg/net/http/#Transport) -- `compression`: Compression type to use among `gzip`, `zstd`, `snappy`, `zlib`, and `deflate`. +- `compression`: Compression type to use among `gzip`, `zstd`, `snappy`, `zlib`, `deflate`, and `lz4`. - look at the documentation for the server-side of the communication. - `none` will be treated as uncompressed, and any other inputs will cause an error. +- `compression_params` : Configure advanced compression options + - `level`: Configure compression level for `compression` type + - The following are valid combinations of `compression` and `level` + - `gzip` + - BestSpeed: `1` + - BestCompression: `9` + - DefaultCompression: `-1` + - `zlib` + - BestSpeed: `1` + - BestCompression: `9` + - DefaultCompression: `-1` + - `deflate` + - BestSpeed: `1` + - BestCompression: `9` + - DefaultCompression: `-1` + - `zstd` + - SpeedFastest: `1` + - SpeedDefault: `3` + - SpeedBetterCompression: `6` + - SpeedBestCompression: `11` + - `snappy` + No compression levels supported yet - [`max_idle_conns`](https://golang.org/pkg/net/http/#Transport) - [`max_idle_conns_per_host`](https://golang.org/pkg/net/http/#Transport) - [`max_conns_per_host`](https://golang.org/pkg/net/http/#Transport) @@ -34,6 +56,8 @@ README](../configtls/README.md). - [`disable_keep_alives`](https://golang.org/pkg/net/http/#Transport) - [`http2_read_idle_timeout`](https://pkg.go.dev/golang.org/x/net/http2#Transport) - [`http2_ping_timeout`](https://pkg.go.dev/golang.org/x/net/http2#Transport) +- [`cookies`](https://pkg.go.dev/net/http#CookieJar) + - [`enabled`] if enabled, the client will store cookies from server responses and reuse them in subsequent requests. Example: @@ -50,7 +74,11 @@ exporter: headers: test1: "value1" "test 2": "value 2" - compression: zstd + compression: gzip + compression_params: + level: 1 + cookies: + enabled: true ``` ## Server Configuration @@ -64,8 +92,8 @@ is hosted at a different [origin][origin]. If left blank or set to `null`, CORS will not be enabled. - `allowed_origins`: A list of [origins][origin] allowed to send requests to the receiver. An origin may contain a wildcard (`*`) to replace 0 or more - characters (e.g., `https://*.example.com`). To allow any origin, set to - `["*"]`. If no origins are listed, CORS will not be enabled. + characters (e.g., `https://*.example.com`). **Do not use** a plain wildcard + `["*"]`, as our CORS response includes `Access-Control-Allow-Credentials: true`, which makes browsers to **disallow a plain wildcard** (this is a security standard). To allow any origin, you can specify at least the protocol, for example `["https://*", "http://*"]`. If no origins are listed, CORS will not be enabled. - `allowed_headers`: Allow CORS requests to include headers outside the [default safelist][cors-headers]. By default, safelist headers and `X-Requested-With` will be allowed. To allow any request header, set to @@ -74,9 +102,11 @@ will not be enabled. header, allowing clients to cache the response to CORS preflight requests. If not set, browsers use a default of 5 seconds. - `endpoint`: Valid value syntax available [here](https://github.com/grpc/grpc/blob/master/doc/naming.md) -- `max_request_body_size`: configures the maximum allowed body size in bytes for a single request. Default: `0` (no restriction) +- `max_request_body_size`: configures the maximum allowed body size in bytes for a single request. Default: `20971520` (20MiB) +- `compression_algorithms`: configures the list of compression algorithms the server can accept. Default: ["", "gzip", "zstd", "zlib", "snappy", "deflate", "lz4"] - [`tls`](../configtls/README.md) - [`auth`](../configauth/README.md) + - `request_params`: a list of query parameter names to add to the auth context, along with the HTTP headers You can enable [`attribute processor`][attribute-processor] to append any http header to span's attribute using custom key. You also need to enable the "include_metadata" @@ -89,6 +119,8 @@ receivers: http: include_metadata: true auth: + request_params: + - token authenticator: some-authenticator-extension cors: allowed_origins: @@ -98,11 +130,12 @@ receivers: - Example-Header max_age: 7200 endpoint: 0.0.0.0:55690 + compression_algorithms: ["", "gzip"] processors: attributes: actions: - key: http.client_ip - from_context: X-Forwarded-For + from_context: metadata.x-forwarded-for action: upsert ``` diff --git a/vendor/go.opentelemetry.io/collector/config/confighttp/compress_readcloser.go b/vendor/go.opentelemetry.io/collector/config/confighttp/compress_readcloser.go new file mode 100644 index 00000000000..411a06a7f2b --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/config/confighttp/compress_readcloser.go @@ -0,0 +1,23 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package confighttp // import "go.opentelemetry.io/collector/config/confighttp" + +import "io" + +// compressReadCloser couples the original compressed reader +// and the compression reader to ensure that the original body +// is correctly closed to ensure resources are freed. +type compressReadCloser struct { + io.Reader + orig io.ReadCloser +} + +var ( + _ io.Reader = (*compressReadCloser)(nil) + _ io.Closer = (*compressReadCloser)(nil) +) + +func (crc *compressReadCloser) Close() error { + return crc.orig.Close() +} diff --git a/vendor/go.opentelemetry.io/collector/config/confighttp/compression.go b/vendor/go.opentelemetry.io/collector/config/confighttp/compression.go index a700bec845b..62638e0c7be 100644 --- a/vendor/go.opentelemetry.io/collector/config/confighttp/compression.go +++ b/vendor/go.opentelemetry.io/collector/config/confighttp/compression.go @@ -15,25 +15,84 @@ import ( "github.com/golang/snappy" "github.com/klauspost/compress/zstd" + "github.com/pierrec/lz4/v4" "go.opentelemetry.io/collector/config/configcompression" ) type compressRoundTripper struct { - rt http.RoundTripper - compressionType configcompression.Type - compressor *compressor + rt http.RoundTripper + compressionType configcompression.Type + compressionParams configcompression.CompressionParams + compressor *compressor } -func newCompressRoundTripper(rt http.RoundTripper, compressionType configcompression.Type) (*compressRoundTripper, error) { - encoder, err := newCompressor(compressionType) +var availableDecoders = map[string]func(body io.ReadCloser) (io.ReadCloser, error){ + "": func(io.ReadCloser) (io.ReadCloser, error) { + // Not a compressed payload. Nothing to do. + return nil, nil + }, + "gzip": func(body io.ReadCloser) (io.ReadCloser, error) { + gr, err := gzip.NewReader(body) + if err != nil { + return nil, err + } + return gr, nil + }, + "zstd": func(body io.ReadCloser) (io.ReadCloser, error) { + zr, err := zstd.NewReader( + body, + // Concurrency 1 disables async decoding. We don't need async decoding, it is pointless + // for our use-case (a server accepting decoding http requests). + // Disabling async improves performance (I benchmarked it previously when working + // on https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/23257). + zstd.WithDecoderConcurrency(1), + ) + if err != nil { + return nil, err + } + return zr.IOReadCloser(), nil + }, + "zlib": func(body io.ReadCloser) (io.ReadCloser, error) { + zr, err := zlib.NewReader(body) + if err != nil { + return nil, err + } + return zr, nil + }, + //nolint:unparam // Ignoring the linter request to remove error return since it needs to match the method signature + "snappy": func(body io.ReadCloser) (io.ReadCloser, error) { + // Lazy Reading content to improve memory efficiency + return &compressReadCloser{ + Reader: snappy.NewReader(body), + orig: body, + }, nil + }, + //nolint:unparam // Ignoring the linter request to remove error return since it needs to match the method signature + "lz4": func(body io.ReadCloser) (io.ReadCloser, error) { + return &compressReadCloser{ + Reader: lz4.NewReader(body), + orig: body, + }, nil + }, +} + +func newCompressionParams(level configcompression.Level) configcompression.CompressionParams { + return configcompression.CompressionParams{ + Level: level, + } +} + +func newCompressRoundTripper(rt http.RoundTripper, compressionType configcompression.Type, compressionParams configcompression.CompressionParams) (*compressRoundTripper, error) { + encoder, err := newCompressor(compressionType, compressionParams) if err != nil { return nil, err } return &compressRoundTripper{ - rt: rt, - compressionType: compressionType, - compressor: encoder, + rt: rt, + compressionType: compressionType, + compressionParams: compressionParams, + compressor: encoder, }, nil } @@ -76,65 +135,27 @@ type decompressor struct { // httpContentDecompressor offloads the task of handling compressed HTTP requests // by identifying the compression format in the "Content-Encoding" header and re-writing // request body so that the handlers further in the chain can work on decompressed data. -// It supports gzip and deflate/zlib compression. -func httpContentDecompressor(h http.Handler, maxRequestBodySize int64, eh func(w http.ResponseWriter, r *http.Request, errorMsg string, statusCode int), decoders map[string]func(body io.ReadCloser) (io.ReadCloser, error)) http.Handler { +func httpContentDecompressor(h http.Handler, maxRequestBodySize int64, eh func(w http.ResponseWriter, r *http.Request, errorMsg string, statusCode int), enableDecoders []string, decoders map[string]func(body io.ReadCloser) (io.ReadCloser, error)) http.Handler { errHandler := defaultErrorHandler if eh != nil { errHandler = eh } + enabled := map[string]func(body io.ReadCloser) (io.ReadCloser, error){} + for _, dec := range enableDecoders { + enabled[dec] = availableDecoders[dec] + + if dec == "deflate" { + enabled["deflate"] = availableDecoders["zlib"] + } + } + d := &decompressor{ maxRequestBodySize: maxRequestBodySize, errHandler: errHandler, base: h, - decoders: map[string]func(body io.ReadCloser) (io.ReadCloser, error){ - "": func(io.ReadCloser) (io.ReadCloser, error) { - // Not a compressed payload. Nothing to do. - return nil, nil - }, - "gzip": func(body io.ReadCloser) (io.ReadCloser, error) { - gr, err := gzip.NewReader(body) - if err != nil { - return nil, err - } - return gr, nil - }, - "zstd": func(body io.ReadCloser) (io.ReadCloser, error) { - zr, err := zstd.NewReader( - body, - // Concurrency 1 disables async decoding. We don't need async decoding, it is pointless - // for our use-case (a server accepting decoding http requests). - // Disabling async improves performance (I benchmarked it previously when working - // on https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/23257). - zstd.WithDecoderConcurrency(1), - ) - if err != nil { - return nil, err - } - return zr.IOReadCloser(), nil - }, - "zlib": func(body io.ReadCloser) (io.ReadCloser, error) { - zr, err := zlib.NewReader(body) - if err != nil { - return nil, err - } - return zr, nil - }, - "snappy": func(body io.ReadCloser) (io.ReadCloser, error) { - sr := snappy.NewReader(body) - sb := new(bytes.Buffer) - _, err := io.Copy(sb, sr) - if err != nil { - return nil, err - } - if err = body.Close(); err != nil { - return nil, err - } - return io.NopCloser(sb), nil - }, - }, + decoders: enabled, } - d.decoders["deflate"] = d.decoders["zlib"] for key, dec := range decoders { d.decoders[key] = dec diff --git a/vendor/go.opentelemetry.io/collector/config/confighttp/compressor.go b/vendor/go.opentelemetry.io/collector/config/confighttp/compressor.go index 660fa83ce51..750bda5795d 100644 --- a/vendor/go.opentelemetry.io/collector/config/confighttp/compressor.go +++ b/vendor/go.opentelemetry.io/collector/config/confighttp/compressor.go @@ -13,6 +13,7 @@ import ( "github.com/golang/snappy" "github.com/klauspost/compress/zstd" + "github.com/pierrec/lz4/v4" "go.opentelemetry.io/collector/config/configcompression" ) @@ -22,36 +23,72 @@ type writeCloserReset interface { Reset(w io.Writer) } -var ( - _ writeCloserReset = (*gzip.Writer)(nil) - gZipPool = &compressor{pool: sync.Pool{New: func() any { return gzip.NewWriter(nil) }}} - _ writeCloserReset = (*snappy.Writer)(nil) - snappyPool = &compressor{pool: sync.Pool{New: func() any { return snappy.NewBufferedWriter(nil) }}} - _ writeCloserReset = (*zstd.Encoder)(nil) - // Concurrency 1 disables async decoding via goroutines. This is useful to reduce memory usage and isn't a bottleneck for compression using sync.Pool. - zStdPool = &compressor{pool: sync.Pool{New: func() any { zw, _ := zstd.NewWriter(nil, zstd.WithEncoderConcurrency(1)); return zw }}} - _ writeCloserReset = (*zlib.Writer)(nil) - zLibPool = &compressor{pool: sync.Pool{New: func() any { return zlib.NewWriter(nil) }}} -) - type compressor struct { pool sync.Pool } +type compressorMap map[compressionMapKey]*compressor + +type compressionMapKey struct { + compressionType configcompression.Type + compressionParams configcompression.CompressionParams +} + +var ( + compressorPools = make(compressorMap) + compressorPoolsMu sync.Mutex +) + // writerFactory defines writer field in CompressRoundTripper. // The validity of input is already checked when NewCompressRoundTripper was called in confighttp, -func newCompressor(compressionType configcompression.Type) (*compressor, error) { +func newCompressor(compressionType configcompression.Type, compressionParams configcompression.CompressionParams) (*compressor, error) { + compressorPoolsMu.Lock() + defer compressorPoolsMu.Unlock() + mapKey := compressionMapKey{compressionType, compressionParams} + c, ok := compressorPools[mapKey] + if ok { + return c, nil + } + + f, err := newWriteCloserResetFunc(compressionType, compressionParams) + if err != nil { + return nil, err + } + c = &compressor{pool: sync.Pool{New: func() any { return f() }}} + compressorPools[mapKey] = c + return c, nil +} + +func newWriteCloserResetFunc(compressionType configcompression.Type, compressionParams configcompression.CompressionParams) (func() writeCloserReset, error) { switch compressionType { case configcompression.TypeGzip: - return gZipPool, nil + return func() writeCloserReset { + w, _ := gzip.NewWriterLevel(nil, int(compressionParams.Level)) + return w + }, nil case configcompression.TypeSnappy: - return snappyPool, nil + return func() writeCloserReset { + return snappy.NewBufferedWriter(nil) + }, nil case configcompression.TypeZstd: - return zStdPool, nil + level := zstd.WithEncoderLevel(zstd.EncoderLevelFromZstd(int(compressionParams.Level))) + return func() writeCloserReset { + zw, _ := zstd.NewWriter(nil, zstd.WithEncoderConcurrency(1), level) + return zw + }, nil case configcompression.TypeZlib, configcompression.TypeDeflate: - return zLibPool, nil + return func() writeCloserReset { + w, _ := zlib.NewWriterLevel(nil, int(compressionParams.Level)) + return w + }, nil + case configcompression.TypeLz4: + return func() writeCloserReset { + lz := lz4.NewWriter(nil) + _ = lz.Apply(lz4.ConcurrencyOption(1)) + return lz + }, nil } - return nil, errors.New("unsupported compression type, ") + return nil, errors.New("unsupported compression type") } func (p *compressor) compress(buf *bytes.Buffer, body io.ReadCloser) error { diff --git a/vendor/go.opentelemetry.io/collector/config/confighttp/confighttp.go b/vendor/go.opentelemetry.io/collector/config/confighttp/confighttp.go index 71b2f17ee2f..c3a19211c2d 100644 --- a/vendor/go.opentelemetry.io/collector/config/confighttp/confighttp.go +++ b/vendor/go.opentelemetry.io/collector/config/confighttp/confighttp.go @@ -11,26 +11,34 @@ import ( "io" "net" "net/http" + "net/http/cookiejar" "net/url" "time" "github.com/rs/cors" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" "golang.org/x/net/http2" + "golang.org/x/net/publicsuffix" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configauth" "go.opentelemetry.io/collector/config/configcompression" + "go.opentelemetry.io/collector/config/confighttp/internal" "go.opentelemetry.io/collector/config/configopaque" "go.opentelemetry.io/collector/config/configtelemetry" "go.opentelemetry.io/collector/config/configtls" - "go.opentelemetry.io/collector/config/internal" "go.opentelemetry.io/collector/extension/auth" ) -const headerContentEncoding = "Content-Encoding" -const defaultMaxRequestBodySize = 20 * 1024 * 1024 // 20MiB +const ( + headerContentEncoding = "Content-Encoding" + defaultMaxRequestBodySize = 20 * 1024 * 1024 // 20MiB +) + +var defaultCompressionAlgorithms = []string{"", "gzip", "zstd", "zlib", "snappy", "deflate", "lz4"} // ClientConfig defines settings for creating an HTTP client. type ClientConfig struct { @@ -44,12 +52,15 @@ type ClientConfig struct { TLSSetting configtls.ClientConfig `mapstructure:"tls"` // ReadBufferSize for HTTP client. See http.Transport.ReadBufferSize. + // Default is 0. ReadBufferSize int `mapstructure:"read_buffer_size"` // WriteBufferSize for HTTP client. See http.Transport.WriteBufferSize. + // Default is 0. WriteBufferSize int `mapstructure:"write_buffer_size"` // Timeout parameter configures `http.Client.Timeout`. + // Default is 0 (unlimited). Timeout time.Duration `mapstructure:"timeout"` // Additional headers attached to each HTTP request sent by the client. @@ -57,30 +68,30 @@ type ClientConfig struct { // Header values are opaque since they may be sensitive. Headers map[string]configopaque.String `mapstructure:"headers"` - // Custom Round Tripper to allow for individual components to intercept HTTP requests - CustomRoundTripper func(next http.RoundTripper) (http.RoundTripper, error) - // Auth configuration for outgoing HTTP calls. Auth *configauth.Authentication `mapstructure:"auth"` // The compression key for supported compression types within collector. Compression configcompression.Type `mapstructure:"compression"` + // Advanced configuration options for the Compression + CompressionParams configcompression.CompressionParams `mapstructure:"compression_params"` + // MaxIdleConns is used to set a limit to the maximum idle HTTP connections the client can keep open. - // There's an already set value, and we want to override it only if an explicit value provided + // By default, it is set to 100. MaxIdleConns *int `mapstructure:"max_idle_conns"` // MaxIdleConnsPerHost is used to set a limit to the maximum idle HTTP connections the host can keep open. - // There's an already set value, and we want to override it only if an explicit value provided + // By default, it is set to [http.DefaultTransport.MaxIdleConnsPerHost]. MaxIdleConnsPerHost *int `mapstructure:"max_idle_conns_per_host"` // MaxConnsPerHost limits the total number of connections per host, including connections in the dialing, // active, and idle states. - // There's an already set value, and we want to override it only if an explicit value provided + // By default, it is set to [http.DefaultTransport.MaxConnsPerHost]. MaxConnsPerHost *int `mapstructure:"max_conns_per_host"` // IdleConnTimeout is the maximum amount of time a connection will remain open before closing itself. - // There's an already set value, and we want to override it only if an explicit value provided + // By default, it is set to [http.DefaultTransport.IdleConnTimeout] IdleConnTimeout *time.Duration `mapstructure:"idle_conn_timeout"` // DisableKeepAlives, if true, disables HTTP keep-alives and will only use the connection to the server @@ -100,23 +111,44 @@ type ClientConfig struct { // HTTP2PingTimeout if there's no response to the ping within the configured value, the connection will be closed. // If not set or set to 0, it defaults to 15s. HTTP2PingTimeout time.Duration `mapstructure:"http2_ping_timeout"` + // Cookies configures the cookie management of the HTTP client. + Cookies *CookiesConfig `mapstructure:"cookies"` +} + +// CookiesConfig defines the configuration of the HTTP client regarding cookies served by the server. +type CookiesConfig struct { + // Enabled if true, cookies from HTTP responses will be reused in further HTTP requests with the same server. + Enabled bool `mapstructure:"enabled"` } // NewDefaultClientConfig returns ClientConfig type object with -// the default values of 'MaxIdleConns' and 'IdleConnTimeout'. +// the default values of 'MaxIdleConns' and 'IdleConnTimeout', as well as [http.DefaultTransport] values. // Other config options are not added as they are initialized with 'zero value' by GoLang as default. // We encourage to use this function to create an object of ClientConfig. func NewDefaultClientConfig() ClientConfig { // The default values are taken from the values of 'DefaultTransport' of 'http' package. - maxIdleConns := 100 - idleConnTimeout := 90 * time.Second + defaultTransport := http.DefaultTransport.(*http.Transport) return ClientConfig{ - MaxIdleConns: &maxIdleConns, - IdleConnTimeout: &idleConnTimeout, + ReadBufferSize: defaultTransport.ReadBufferSize, + WriteBufferSize: defaultTransport.WriteBufferSize, + Headers: map[string]configopaque.String{}, + MaxIdleConns: &defaultTransport.MaxIdleConns, + MaxIdleConnsPerHost: &defaultTransport.MaxIdleConnsPerHost, + MaxConnsPerHost: &defaultTransport.MaxConnsPerHost, + IdleConnTimeout: &defaultTransport.IdleConnTimeout, } } +func (hcs *ClientConfig) Validate() error { + if hcs.Compression.IsCompressed() { + if err := hcs.Compression.ValidateParams(hcs.CompressionParams); err != nil { + return err + } + } + return nil +} + // ToClient creates an HTTP client. func (hcs *ClientConfig) ToClient(ctx context.Context, host component.Host, settings component.TelemetrySettings) (*http.Client, error) { tlsCfg, err := hcs.TLSSetting.LoadTLSConfig(ctx) @@ -181,7 +213,7 @@ func (hcs *ClientConfig) ToClient(ctx context.Context, host component.Host, sett return nil, errors.New("extensions configuration not found") } - httpCustomAuthRoundTripper, aerr := hcs.Auth.GetClientAuthenticator(ext) + httpCustomAuthRoundTripper, aerr := hcs.Auth.GetClientAuthenticator(ctx, ext) if aerr != nil { return nil, aerr } @@ -202,7 +234,11 @@ func (hcs *ClientConfig) ToClient(ctx context.Context, host component.Host, sett // Compress the body using specified compression methods if non-empty string is provided. // Supporting gzip, zlib, deflate, snappy, and zstd; none is treated as uncompressed. if hcs.Compression.IsCompressed() { - clientTransport, err = newCompressRoundTripper(clientTransport, hcs.Compression) + // If the compression level is not set, use the default level. + if hcs.CompressionParams.Level == 0 { + hcs.CompressionParams.Level = configcompression.DefaultCompressionLevel + } + clientTransport, err = newCompressRoundTripper(clientTransport, hcs.Compression, hcs.CompressionParams) if err != nil { return nil, err } @@ -211,18 +247,16 @@ func (hcs *ClientConfig) ToClient(ctx context.Context, host component.Host, sett otelOpts := []otelhttp.Option{ otelhttp.WithTracerProvider(settings.TracerProvider), otelhttp.WithPropagators(otel.GetTextMapPropagator()), + otelhttp.WithMeterProvider(getLeveledMeterProvider(settings)), } - if settings.MetricsLevel >= configtelemetry.LevelDetailed { - otelOpts = append(otelOpts, otelhttp.WithMeterProvider(settings.MeterProvider)) - } - // wrapping http transport with otelhttp transport to enable otel instrumentation if settings.TracerProvider != nil && settings.MeterProvider != nil { clientTransport = otelhttp.NewTransport(clientTransport, otelOpts...) } - if hcs.CustomRoundTripper != nil { - clientTransport, err = hcs.CustomRoundTripper(clientTransport) + var jar http.CookieJar + if hcs.Cookies != nil && hcs.Cookies.Enabled { + jar, err = cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List}) if err != nil { return nil, err } @@ -231,6 +265,7 @@ func (hcs *ClientConfig) ToClient(ctx context.Context, host component.Host, sett return &http.Client{ Transport: clientTransport, Timeout: hcs.Timeout, + Jar: jar, }, nil } @@ -268,18 +303,74 @@ type ServerConfig struct { CORS *CORSConfig `mapstructure:"cors"` // Auth for this receiver - Auth *configauth.Authentication `mapstructure:"auth"` + Auth *AuthConfig `mapstructure:"auth"` // MaxRequestBodySize sets the maximum request body size in bytes. Default: 20MiB. MaxRequestBodySize int64 `mapstructure:"max_request_body_size"` // IncludeMetadata propagates the client metadata from the incoming requests to the downstream consumers - // Experimental: *NOTE* this option is subject to change or removal in the future. IncludeMetadata bool `mapstructure:"include_metadata"` // Additional headers attached to each HTTP response sent to the client. // Header values are opaque since they may be sensitive. ResponseHeaders map[string]configopaque.String `mapstructure:"response_headers"` + + // CompressionAlgorithms configures the list of compression algorithms the server can accept. Default: ["", "gzip", "zstd", "zlib", "snappy", "deflate"] + CompressionAlgorithms []string `mapstructure:"compression_algorithms"` + + // ReadTimeout is the maximum duration for reading the entire + // request, including the body. A zero or negative value means + // there will be no timeout. + // + // Because ReadTimeout does not let Handlers make per-request + // decisions on each request body's acceptable deadline or + // upload rate, most users will prefer to use + // ReadHeaderTimeout. It is valid to use them both. + ReadTimeout time.Duration `mapstructure:"read_timeout"` + + // ReadHeaderTimeout is the amount of time allowed to read + // request headers. The connection's read deadline is reset + // after reading the headers and the Handler can decide what + // is considered too slow for the body. If ReadHeaderTimeout + // is zero, the value of ReadTimeout is used. If both are + // zero, there is no timeout. + ReadHeaderTimeout time.Duration `mapstructure:"read_header_timeout"` + + // WriteTimeout is the maximum duration before timing out + // writes of the response. It is reset whenever a new + // request's header is read. Like ReadTimeout, it does not + // let Handlers make decisions on a per-request basis. + // A zero or negative value means there will be no timeout. + WriteTimeout time.Duration `mapstructure:"write_timeout"` + + // IdleTimeout is the maximum amount of time to wait for the + // next request when keep-alives are enabled. If IdleTimeout + // is zero, the value of ReadTimeout is used. If both are + // zero, there is no timeout. + IdleTimeout time.Duration `mapstructure:"idle_timeout"` +} + +// NewDefaultServerConfig returns ServerConfig type object with default values. +// We encourage to use this function to create an object of ServerConfig. +func NewDefaultServerConfig() ServerConfig { + tlsDefaultServerConfig := configtls.NewDefaultServerConfig() + return ServerConfig{ + ResponseHeaders: map[string]configopaque.String{}, + TLSSetting: &tlsDefaultServerConfig, + CORS: NewDefaultCORSConfig(), + WriteTimeout: 30 * time.Second, + ReadHeaderTimeout: 1 * time.Minute, + IdleTimeout: 1 * time.Minute, + } +} + +type AuthConfig struct { + // Auth for this receiver. + configauth.Authentication `mapstructure:",squash"` + + // RequestParameters is a list of parameters that should be extracted from the request and added to the context. + // When a parameter is found in both the query string and the header, the value from the query string will be used. + RequestParameters []string `mapstructure:"request_params"` } // ToListener creates a net.Listener. @@ -304,60 +395,63 @@ func (hss *ServerConfig) ToListener(ctx context.Context) (net.Listener, error) { // toServerOptions has options that change the behavior of the HTTP server // returned by ServerConfig.ToServer(). -type toServerOptions struct { - errHandler func(w http.ResponseWriter, r *http.Request, errorMsg string, statusCode int) - decoders map[string]func(body io.ReadCloser) (io.ReadCloser, error) -} +type toServerOptions = internal.ToServerOptions // ToServerOption is an option to change the behavior of the HTTP server // returned by ServerConfig.ToServer(). -type ToServerOption func(opts *toServerOptions) +type ToServerOption = internal.ToServerOption // WithErrorHandler overrides the HTTP error handler that gets invoked // when there is a failure inside httpContentDecompressor. func WithErrorHandler(e func(w http.ResponseWriter, r *http.Request, errorMsg string, statusCode int)) ToServerOption { - return func(opts *toServerOptions) { - opts.errHandler = e - } + return internal.ToServerOptionFunc(func(opts *toServerOptions) { + opts.ErrHandler = e + }) } // WithDecoder provides support for additional decoders to be configured // by the caller. func WithDecoder(key string, dec func(body io.ReadCloser) (io.ReadCloser, error)) ToServerOption { - return func(opts *toServerOptions) { - if opts.decoders == nil { - opts.decoders = map[string]func(body io.ReadCloser) (io.ReadCloser, error){} + return internal.ToServerOptionFunc(func(opts *toServerOptions) { + if opts.Decoders == nil { + opts.Decoders = map[string]func(body io.ReadCloser) (io.ReadCloser, error){} } - opts.decoders[key] = dec - } + opts.Decoders[key] = dec + }) } // ToServer creates an http.Server from settings object. func (hss *ServerConfig) ToServer(_ context.Context, host component.Host, settings component.TelemetrySettings, handler http.Handler, opts ...ToServerOption) (*http.Server, error) { - internal.WarnOnUnspecifiedHost(settings.Logger, hss.Endpoint) - serverOpts := &toServerOptions{} - for _, o := range opts { - o(serverOpts) - } + serverOpts.Apply(opts...) if hss.MaxRequestBodySize <= 0 { hss.MaxRequestBodySize = defaultMaxRequestBodySize } - handler = httpContentDecompressor(handler, hss.MaxRequestBodySize, serverOpts.errHandler, serverOpts.decoders) + if hss.CompressionAlgorithms == nil { + hss.CompressionAlgorithms = defaultCompressionAlgorithms + } + + handler = httpContentDecompressor( + handler, + hss.MaxRequestBodySize, + serverOpts.ErrHandler, + hss.CompressionAlgorithms, + serverOpts.Decoders, + ) if hss.MaxRequestBodySize > 0 { handler = maxRequestBodySizeInterceptor(handler, hss.MaxRequestBodySize) } if hss.Auth != nil { - server, err := hss.Auth.GetServerAuthenticator(host.GetExtensions()) + server, err := hss.Auth.GetServerAuthenticator(context.Background(), host.GetExtensions()) if err != nil { return nil, err } - handler = authInterceptor(handler, server) + handler = authInterceptor(handler, server, hss.Auth.RequestParameters) } if hss.CORS != nil && len(hss.CORS.AllowedOrigins) > 0 { @@ -377,16 +471,16 @@ func (hss *ServerConfig) ToServer(_ context.Context, host component.Host, settin handler = responseHeadersHandler(handler, hss.ResponseHeaders) } - otelOpts := []otelhttp.Option{ - otelhttp.WithTracerProvider(settings.TracerProvider), - otelhttp.WithPropagators(otel.GetTextMapPropagator()), - otelhttp.WithSpanNameFormatter(func(_ string, r *http.Request) string { - return r.URL.Path - }), - } - if settings.MetricsLevel >= configtelemetry.LevelDetailed { - otelOpts = append(otelOpts, otelhttp.WithMeterProvider(settings.MeterProvider)) - } + otelOpts := append( + []otelhttp.Option{ + otelhttp.WithTracerProvider(settings.TracerProvider), + otelhttp.WithPropagators(otel.GetTextMapPropagator()), + otelhttp.WithSpanNameFormatter(func(_ string, r *http.Request) string { + return r.URL.Path + }), + otelhttp.WithMeterProvider(getLeveledMeterProvider(settings)), + }, + serverOpts.OtelhttpOpts...) // Enable OpenTelemetry observability plugin. // TODO: Consider to use component ID string as prefix for all the operations. @@ -398,9 +492,15 @@ func (hss *ServerConfig) ToServer(_ context.Context, host component.Host, settin includeMetadata: hss.IncludeMetadata, } - return &http.Server{ - Handler: handler, - }, nil + server := &http.Server{ + Handler: handler, + ReadTimeout: hss.ReadTimeout, + ReadHeaderTimeout: hss.ReadHeaderTimeout, + WriteTimeout: hss.WriteTimeout, + IdleTimeout: hss.IdleTimeout, + } + + return server, nil } func responseHeadersHandler(handler http.Handler, headers map[string]configopaque.String) http.Handler { @@ -437,9 +537,21 @@ type CORSConfig struct { MaxAge int `mapstructure:"max_age"` } -func authInterceptor(next http.Handler, server auth.Server) http.Handler { +// NewDefaultCORSConfig creates a default cross-origin resource sharing (CORS) configuration. +func NewDefaultCORSConfig() *CORSConfig { + return &CORSConfig{} +} + +func authInterceptor(next http.Handler, server auth.Server, requestParams []string) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx, err := server.Authenticate(r.Context(), r.Header) + sources := r.Header + query := r.URL.Query() + for _, param := range requestParams { + if val, ok := query[param]; ok { + sources[param] = val + } + } + ctx, err := server.Authenticate(r.Context(), sources) if err != nil { http.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized) return @@ -455,3 +567,10 @@ func maxRequestBodySizeInterceptor(next http.Handler, maxRecvSize int64) http.Ha next.ServeHTTP(w, r) }) } + +func getLeveledMeterProvider(settings component.TelemetrySettings) metric.MeterProvider { + if configtelemetry.LevelDetailed <= settings.MetricsLevel { + return settings.MeterProvider + } + return noop.MeterProvider{} +} diff --git a/vendor/go.opentelemetry.io/collector/config/confighttp/internal/options.go b/vendor/go.opentelemetry.io/collector/config/confighttp/internal/options.go new file mode 100644 index 00000000000..e8b9612f39a --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/config/confighttp/internal/options.go @@ -0,0 +1,38 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/config/confighttp/internal" + +import ( + "io" + "net/http" + + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" +) + +// toServerOptions has options that change the behavior of the HTTP server +// returned by ServerConfig.ToServer(). +type ToServerOptions struct { + ErrHandler func(w http.ResponseWriter, r *http.Request, errorMsg string, statusCode int) + Decoders map[string]func(body io.ReadCloser) (io.ReadCloser, error) + OtelhttpOpts []otelhttp.Option +} + +func (tso *ToServerOptions) Apply(opts ...ToServerOption) { + for _, o := range opts { + o.apply(tso) + } +} + +// ToServerOption is an option to change the behavior of the HTTP server +// returned by ServerConfig.ToServer(). +type ToServerOption interface { + apply(*ToServerOptions) +} + +// ToServerOptionFunc converts a function into ToServerOption interface. +type ToServerOptionFunc func(*ToServerOptions) + +func (of ToServerOptionFunc) apply(e *ToServerOptions) { + of(e) +} diff --git a/vendor/go.opentelemetry.io/collector/config/confignet/README.md b/vendor/go.opentelemetry.io/collector/config/confignet/README.md index cd8150e6303..02bc3d1b237 100644 --- a/vendor/go.opentelemetry.io/collector/config/confignet/README.md +++ b/vendor/go.opentelemetry.io/collector/config/confignet/README.md @@ -13,7 +13,8 @@ leverage network configuration to set connection and transport information. - `transport`: Known protocols are "tcp", "tcp4" (IPv4-only), "tcp6" (IPv6-only), "udp", "udp4" (IPv4-only), "udp6" (IPv6-only), "ip", "ip4" (IPv4-only), "ip6" (IPv6-only), "unix", "unixgram" and "unixpacket". -- `dialer_timeout`: DialerTimeout is the maximum amount of time a dial will wait for a connect to complete. The default is no timeout. +- `dialer`: Dialer configuration + - `timeout`: Dialer timeout is the maximum amount of time a dial will wait for a connect to complete. The default is no timeout. Note that for TCP receivers only the `endpoint` configuration setting is required. diff --git a/vendor/go.opentelemetry.io/collector/config/configretry/backoff.go b/vendor/go.opentelemetry.io/collector/config/configretry/backoff.go index 1fc3f8c5852..476c350b05f 100644 --- a/vendor/go.opentelemetry.io/collector/config/configretry/backoff.go +++ b/vendor/go.opentelemetry.io/collector/config/configretry/backoff.go @@ -68,7 +68,6 @@ func (bs *BackOffConfig) Validate() error { if bs.MaxElapsedTime < bs.MaxInterval { return errors.New("'max_elapsed_time' must not be less than 'max_interval'") } - } return nil } diff --git a/vendor/go.opentelemetry.io/collector/config/configtelemetry/doc.go b/vendor/go.opentelemetry.io/collector/config/configtelemetry/doc.go index e3a6bdfe321..646aeb2d7c7 100644 --- a/vendor/go.opentelemetry.io/collector/config/configtelemetry/doc.go +++ b/vendor/go.opentelemetry.io/collector/config/configtelemetry/doc.go @@ -4,4 +4,44 @@ // Package configtelemetry defines various telemetry level for configuration. // It enables every component to have access to telemetry level // to enable metrics only when necessary. +// +// This document provides guidance on which telemetry level to adopt for Collector metrics. +// When adopting a telemetry level, component authors are expected to rely on this guidance to +// justify their choice of telemetry level. +// +// 1. configtelemetry.None +// +// No telemetry data is recorded. +// +// 2. configtelemetry.Basic +// +// Telemetry associated with this level provides essential coverage of the collector telemetry. +// It should only be used for internal collector telemetry generated by the collector core API. Components outside of +// the core API MUST NOT record additional telemetry at this level. +// +// 3. configtelemetry.Normal +// +// Telemetry associated with this level provides complete coverage of the collector telemetry. +// It should be the default for component authors. +// +// Component authors using this telemetry level can use this guidance: +// +// - The signals associated with this level must control cardinality. +// It is acceptable at this level for cardinality to scale linearly with the monitored resources. +// +// - The signals associated with this level must represent a controlled data volume. Examples follow: +// +// a. A max cardinality (total possible combinations of dimension values) for a given metric of at most 100. +// +// b. At most 5 spans actively recording simultaneously per active request. +// +// This is the default level recommended when running the Collector. +// +// 4. configtelemetry.Detailed +// +// Telemetry associated with this level provides complete coverage of the collector telemetry. +// +// The signals associated with this level may exhibit high cardinality and/or high dimensionality. +// +// There is no limit on data volume. package configtelemetry // import "go.opentelemetry.io/collector/config/configtelemetry" diff --git a/vendor/go.opentelemetry.io/collector/config/configtls/clientcasfilereloader.go b/vendor/go.opentelemetry.io/collector/config/configtls/clientcasfilereloader.go index 1ee9c72ef9f..e6ca9b2962e 100644 --- a/vendor/go.opentelemetry.io/collector/config/configtls/clientcasfilereloader.go +++ b/vendor/go.opentelemetry.io/collector/config/configtls/clientcasfilereloader.go @@ -6,6 +6,7 @@ package configtls // import "go.opentelemetry.io/collector/config/configtls" import ( "crypto/tls" "crypto/x509" + "errors" "fmt" "sync" @@ -78,7 +79,7 @@ func (r *clientCAsFileReloader) getLastError() error { func (r *clientCAsFileReloader) startWatching() error { if r.shutdownCH != nil { - return fmt.Errorf("client CA file watcher already started") + return errors.New("client CA file watcher already started") } watcher, err := fsnotify.NewWatcher() @@ -132,7 +133,7 @@ func (r *clientCAsFileReloader) handleWatcherEvents() { func (r *clientCAsFileReloader) shutdown() error { if r.shutdownCH == nil { - return fmt.Errorf("client CAs file watcher is not running") + return errors.New("client CAs file watcher is not running") } r.shutdownCH <- true close(r.shutdownCH) diff --git a/vendor/go.opentelemetry.io/collector/config/configtls/configtls.go b/vendor/go.opentelemetry.io/collector/config/configtls/configtls.go index 2ce0490b16c..0cbcc5f4072 100644 --- a/vendor/go.opentelemetry.io/collector/config/configtls/configtls.go +++ b/vendor/go.opentelemetry.io/collector/config/configtls/configtls.go @@ -179,7 +179,7 @@ func (r *certReloader) GetCertificate() (*tls.Certificate, error) { func (c Config) Validate() error { if c.hasCAFile() && c.hasCAPem() { - return fmt.Errorf("provide either a CA file or the PEM-encoded string, but not both") + return errors.New("provide either a CA file or the PEM-encoded string, but not both") } minTLS, err := convertVersion(c.MinVersion, defaultMinTLSVersion) @@ -269,7 +269,7 @@ func (c Config) loadCACertPool() (*x509.CertPool, error) { switch { case c.hasCAFile() && c.hasCAPem(): - return nil, fmt.Errorf("failed to load CA CertPool: provide either a CA file or the PEM-encoded string, but not both") + return nil, errors.New("failed to load CA CertPool: provide either a CA file or the PEM-encoded string, but not both") case c.hasCAFile(): // Set up user specified truststore from file certPool, err = c.loadCertFile(c.CAFile) @@ -308,7 +308,7 @@ func (c Config) loadCertPem(certPem []byte) (*x509.CertPool, error) { } } if !certPool.AppendCertsFromPEM(certPem) { - return nil, fmt.Errorf("failed to parse cert") + return nil, errors.New("failed to parse cert") } return certPool, nil } @@ -316,13 +316,13 @@ func (c Config) loadCertPem(certPem []byte) (*x509.CertPool, error) { func (c Config) loadCertificate() (tls.Certificate, error) { switch { case c.hasCert() != c.hasKey(): - return tls.Certificate{}, fmt.Errorf("for auth via TLS, provide both certificate and key, or neither") + return tls.Certificate{}, errors.New("for auth via TLS, provide both certificate and key, or neither") case !c.hasCert() && !c.hasKey(): return tls.Certificate{}, nil case c.hasCertFile() && c.hasCertPem(): - return tls.Certificate{}, fmt.Errorf("for auth via TLS, provide either a certificate or the PEM-encoded string, but not both") + return tls.Certificate{}, errors.New("for auth via TLS, provide either a certificate or the PEM-encoded string, but not both") case c.hasKeyFile() && c.hasKeyPem(): - return tls.Certificate{}, fmt.Errorf("for auth via TLS, provide either a key or the PEM-encoded string, but not both") + return tls.Certificate{}, errors.New("for auth via TLS, provide either a key or the PEM-encoded string, but not both") } var certPem, keyPem []byte diff --git a/vendor/go.opentelemetry.io/collector/config/internal/warning.go b/vendor/go.opentelemetry.io/collector/config/internal/warning.go deleted file mode 100644 index f9e32fc1c64..00000000000 --- a/vendor/go.opentelemetry.io/collector/config/internal/warning.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package internal // import "go.opentelemetry.io/collector/config/internal" - -import ( - "net" - "strconv" - "strings" - - "go.uber.org/zap" - - "go.opentelemetry.io/collector/internal/localhostgate" -) - -func shouldWarn(endpoint string) bool { - if endpoint == ":" { - // : (aka 0.0.0.0:0) - return true - } - - if strings.HasPrefix(endpoint, ":") { - // : (aka 0.0.0.0:) - _, err := strconv.ParseInt(endpoint[1:], 10, 64) - // If it's not a number, it's probably invalid, don't warn. - return err == nil - } - - // : - host, _, err := net.SplitHostPort(endpoint) - if err != nil { // Probably invalid, don't warn. - return false - } - - ip := net.ParseIP(host) - return ip != nil && ip.IsUnspecified() -} - -// WarnOnUnspecifiedHost emits a warning if an endpoint has an unspecified host. -func WarnOnUnspecifiedHost(logger *zap.Logger, endpoint string) { - if !localhostgate.UseLocalHostAsDefaultHostfeatureGate.IsEnabled() && shouldWarn(endpoint) { - logger.Warn( - "Using the 0.0.0.0 address exposes this server to every network interface, which may facilitate Denial of Service attacks. Enable the feature gate to change the default and remove this warning.", - zap.String( - "documentation", - "https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/security-best-practices.md#safeguards-against-denial-of-service-attacks", - ), - zap.String("feature gate ID", localhostgate.UseLocalHostAsDefaultHostID), - ) - } -} diff --git a/vendor/go.opentelemetry.io/collector/confmap/README.md b/vendor/go.opentelemetry.io/collector/confmap/README.md index 42dcf548d89..bfa24c8bcc0 100644 --- a/vendor/go.opentelemetry.io/collector/confmap/README.md +++ b/vendor/go.opentelemetry.io/collector/confmap/README.md @@ -102,3 +102,60 @@ configuration retrieved via the `Provider` used to retrieve the “initial” co ``` The `Resolver` does that by passing an `onChange` func to each `Provider.Retrieve` call and capturing all watch events. + +## Troubleshooting + +### Null Maps + +Due to how our underlying merge library, [koanf](https://github.com/knadh/koanf), behaves, configuration resolution +will treat configuration such as + +```yaml +processors: +``` + +as null, which is a valid value. As a result if you have configuration `A`: + +```yaml +receivers: + nop: + +processors: + nop: + +exporters: + nop: + +extensions: + nop: + +service: + extensions: [nop] + pipelines: + traces: + receivers: [nop] + processors: [nop] + exporters: [nop] +``` + +and configuration `B`: + +```yaml +processors: +``` + +and do `./otelcorecol --config A.yaml --config B.yaml` + +The result will be an error: + +``` +Error: invalid configuration: service::pipelines::traces: references processor "nop" which is not configured +2024/06/10 14:37:14 collector server run finished with error: invalid configuration: service::pipelines::traces: references processor "nop" which is not configured +``` + +This happens because configuration `B` sets `processors` to null, removing the `nop` processor defined in configuration `A`, +so the `nop` processor referenced in configuration `A`'s pipeline no longer exists. + +This situation can be remedied 2 ways: +1. Use `{}` when you want to represent an empty map, such as `processors: {}` instead of `processors:`. +2. Omit configuration like `processors:` from your configuration. diff --git a/vendor/go.opentelemetry.io/collector/confmap/confmap.go b/vendor/go.opentelemetry.io/collector/confmap/confmap.go index 8a32216a2df..aab1730687b 100644 --- a/vendor/go.opentelemetry.io/collector/confmap/confmap.go +++ b/vendor/go.opentelemetry.io/collector/confmap/confmap.go @@ -102,14 +102,54 @@ func (l *Conf) Marshal(rawVal any, _ ...MarshalOption) error { } out, ok := data.(map[string]any) if !ok { - return fmt.Errorf("invalid config encoding") + return errors.New("invalid config encoding") } return l.Merge(NewFromStringMap(out)) } +func (l *Conf) unsanitizedGet(key string) any { + return l.k.Get(key) +} + +// sanitize recursively removes expandedValue references from the given data. +// It uses the expandedValue.Value field to replace the expandedValue references. +func sanitize(a any) any { + return sanitizeExpanded(a, false) +} + +// sanitizeToStringMap recursively removes expandedValue references from the given data. +// It uses the expandedValue.Original field to replace the expandedValue references. +func sanitizeToStr(a any) any { + return sanitizeExpanded(a, true) +} + +func sanitizeExpanded(a any, useOriginal bool) any { + switch m := a.(type) { + case map[string]any: + c := maps.Copy(m) + for k, v := range m { + c[k] = sanitizeExpanded(v, useOriginal) + } + return c + case []any: + var newSlice []any + for _, e := range m { + newSlice = append(newSlice, sanitizeExpanded(e, useOriginal)) + } + return newSlice + case expandedValue: + if useOriginal { + return m.Original + } + return m.Value + } + return a +} + // Get can retrieve any value given the key to use. func (l *Conf) Get(key string) any { - return l.k.Get(key) + val := l.unsanitizedGet(key) + return sanitizeExpanded(val, false) } // IsSet checks to see if the key has been set in any of the data locations. @@ -127,21 +167,31 @@ func (l *Conf) Merge(in *Conf) error { // It returns an error is the sub-config is not a map[string]any (use Get()), and an empty Map if none exists. func (l *Conf) Sub(key string) (*Conf, error) { // Code inspired by the koanf "Cut" func, but returns an error instead of empty map for unsupported sub-config type. - data := l.Get(key) + data := l.unsanitizedGet(key) if data == nil { return New(), nil } - if v, ok := data.(map[string]any); ok { + switch v := data.(type) { + case map[string]any: return NewFromStringMap(v), nil + case expandedValue: + if m, ok := v.Value.(map[string]any); ok { + return NewFromStringMap(m), nil + } } - return nil, fmt.Errorf("unexpected sub-config value kind for key:%s value:%v kind:%v)", key, data, reflect.TypeOf(data).Kind()) + return nil, fmt.Errorf("unexpected sub-config value kind for key:%s value:%v kind:%v", key, data, reflect.TypeOf(data).Kind()) +} + +func (l *Conf) toStringMapWithExpand() map[string]any { + m := maps.Unflatten(l.k.All(), KeyDelimiter) + return m } // ToStringMap creates a map[string]any from a Parser. func (l *Conf) ToStringMap() map[string]any { - return maps.Unflatten(l.k.All(), KeyDelimiter) + return sanitize(l.toStringMapWithExpand()).(map[string]any) } // decodeConfig decodes the contents of the Conf into the result argument, using a @@ -156,9 +206,10 @@ func decodeConfig(m *Conf, result any, errorUnused bool, skipTopLevelUnmarshaler ErrorUnused: errorUnused, Result: result, TagName: "mapstructure", - WeaklyTypedInput: true, + WeaklyTypedInput: false, MatchName: caseSensitiveMatchName, DecodeHook: mapstructure.ComposeDecodeHookFunc( + useExpandValue(), expandNilStructPointersHookFunc(), mapstructure.StringToSliceHookFunc(","), mapKeyStringToMapKeyTextUnmarshalerHookFunc(), @@ -169,14 +220,13 @@ func decodeConfig(m *Conf, result any, errorUnused bool, skipTopLevelUnmarshaler // we unmarshal the embedded structs if present to merge with the result: unmarshalerEmbeddedStructsHookFunc(), zeroSliceHookFunc(), - negativeUintHookFunc(), ), } decoder, err := mapstructure.NewDecoder(dc) if err != nil { return err } - if err = decoder.Decode(m.ToStringMap()); err != nil { + if err = decoder.Decode(m.toStringMapWithExpand()); err != nil { if strings.HasPrefix(err.Error(), "error decoding ''") { return errors.Unwrap(err) } @@ -191,6 +241,7 @@ func decodeConfig(m *Conf, result any, errorUnused bool, skipTopLevelUnmarshaler func encoderConfig(rawVal any) *encoder.EncoderConfig { return &encoder.EncoderConfig{ EncodeHook: mapstructure.ComposeDecodeHookFunc( + encoder.YamlMarshalerHookFunc(), encoder.TextMarshalerHookFunc(), marshalerHookFunc(rawVal), ), @@ -204,6 +255,64 @@ func caseSensitiveMatchName(a, b string) bool { return a == b } +func castTo(exp expandedValue, useOriginal bool) any { + // If the target field is a string, use `exp.Original` or fail if not available. + if useOriginal { + return exp.Original + } + // Otherwise, use the parsed value (previous behavior). + return exp.Value +} + +// Check if a reflect.Type is of the form T, where: +// X is any type or interface +// T = string | map[X]T | []T | [n]T +func isStringyStructure(t reflect.Type) bool { + if t.Kind() == reflect.String { + return true + } + if t.Kind() == reflect.Map { + return isStringyStructure(t.Elem()) + } + if t.Kind() == reflect.Slice || t.Kind() == reflect.Array { + return isStringyStructure(t.Elem()) + } + return false +} + +// When a value has been loaded from an external source via a provider, we keep both the +// parsed value and the original string value. This allows us to expand the value to its +// original string representation when decoding into a string field, and use the original otherwise. +func useExpandValue() mapstructure.DecodeHookFuncType { + return func( + _ reflect.Type, + to reflect.Type, + data any, + ) (any, error) { + if exp, ok := data.(expandedValue); ok { + v := castTo(exp, to.Kind() == reflect.String) + // See https://github.com/open-telemetry/opentelemetry-collector/issues/10949 + // If the `to.Kind` is not a string, then expandValue's original value is useless and + // the casted-to value will be nil. In that scenario, we need to use the default value of `to`'s kind. + if v == nil { + return reflect.Zero(to).Interface(), nil + } + return v, nil + } + + switch to.Kind() { + case reflect.Array, reflect.Slice, reflect.Map: + if isStringyStructure(to) { + // If the target field is a stringy structure, sanitize to use the original string value everywhere. + return sanitizeToStr(data), nil + } + // Otherwise, sanitize to use the parsed value everywhere. + return sanitize(data), nil + } + return data, nil + } +} + // In cases where a config has a mapping of something to a struct pointers // we want nil values to resolve to a pointer to the zero value of the // underlying struct just as we want nil values of a mapping of something @@ -415,10 +524,10 @@ type Marshaler interface { // } // // The configuration provided by users may have following cases -// 1. configuration have `keys` field and have a non-nil values for this key, the output should be overrided +// 1. configuration have `keys` field and have a non-nil values for this key, the output should be overridden // - for example, input is {"keys", ["c"]}, then output is Config{ Keys: ["c"]} // -// 2. configuration have `keys` field and have an empty slice for this key, the output should be overrided by empty slics +// 2. configuration have `keys` field and have an empty slice for this key, the output should be overridden by empty slices // - for example, input is {"keys", []}, then output is Config{ Keys: []} // // 3. configuration have `keys` field and have nil value for this key, the output should be default config @@ -427,7 +536,7 @@ type Marshaler interface { // 4. configuration have no `keys` field specified, the output should be default config // - for example, input is {}, then output is Config{ Keys: ["a", "b"]} func zeroSliceHookFunc() mapstructure.DecodeHookFuncValue { - return func(from reflect.Value, to reflect.Value) (interface{}, error) { + return func(from reflect.Value, to reflect.Value) (any, error) { if to.CanSet() && to.Kind() == reflect.Slice && from.Kind() == reflect.Slice { to.Set(reflect.MakeSlice(to.Type(), from.Len(), from.Cap())) } @@ -436,19 +545,6 @@ func zeroSliceHookFunc() mapstructure.DecodeHookFuncValue { } } -// This hook is used to solve the issue: https://github.com/open-telemetry/opentelemetry-collector/issues/9060 -// Decoding should fail when converting a negative integer to any type of unsigned integer. This prevents -// negative values being decoded as large uint values. -// TODO: This should be removed as a part of https://github.com/open-telemetry/opentelemetry-collector/issues/9532 -func negativeUintHookFunc() mapstructure.DecodeHookFuncValue { - return func(from reflect.Value, to reflect.Value) (interface{}, error) { - if from.CanInt() && from.Int() < 0 && to.CanUint() { - return nil, fmt.Errorf("cannot convert negative value %v to an unsigned integer", from.Int()) - } - return from.Interface(), nil - } -} - type moduleFactory[T any, S any] interface { Create(s S) T } diff --git a/vendor/go.opentelemetry.io/collector/confmap/converter/expandconverter/expand.go b/vendor/go.opentelemetry.io/collector/confmap/converter/expandconverter/expand.go deleted file mode 100644 index 029b48eaf17..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/converter/expandconverter/expand.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package expandconverter // import "go.opentelemetry.io/collector/confmap/converter/expandconverter" - -import ( - "context" - "fmt" - "os" - "regexp" - - "go.uber.org/zap" - - "go.opentelemetry.io/collector/confmap" - "go.opentelemetry.io/collector/confmap/internal/envvar" -) - -type converter struct { - logger *zap.Logger - - // Record of which env vars we have logged a warning for - loggedDeprecations map[string]struct{} -} - -// NewFactory returns a factory for a confmap.Converter, -// which expands all environment variables for a given confmap.Conf. -func NewFactory() confmap.ConverterFactory { - return confmap.NewConverterFactory(newConverter) -} - -func newConverter(set confmap.ConverterSettings) confmap.Converter { - return converter{ - loggedDeprecations: make(map[string]struct{}), - logger: set.Logger, - } -} - -func (c converter) Convert(_ context.Context, conf *confmap.Conf) error { - var err error - out := make(map[string]any) - for _, k := range conf.AllKeys() { - out[k], err = c.expandStringValues(conf.Get(k)) - if err != nil { - return err - } - } - return conf.Merge(confmap.NewFromStringMap(out)) -} - -func (c converter) expandStringValues(value any) (any, error) { - var err error - switch v := value.(type) { - case string: - return c.expandEnv(v) - case []any: - nslice := make([]any, 0, len(v)) - for _, vint := range v { - var nv any - nv, err = c.expandStringValues(vint) - if err != nil { - return nil, err - } - nslice = append(nslice, nv) - } - return nslice, nil - case map[string]any: - nmap := map[string]any{} - for mk, mv := range v { - nmap[mk], err = c.expandStringValues(mv) - if err != nil { - return nil, err - } - } - return nmap, nil - default: - return v, nil - } -} - -func (c converter) expandEnv(s string) (string, error) { - var err error - res := os.Expand(s, func(str string) string { - // Matches on $VAR style environment variables - // in order to make sure we don't log a warning for ${VAR} - var regex = regexp.MustCompile(fmt.Sprintf(`\$%s`, regexp.QuoteMeta(str))) - if _, exists := c.loggedDeprecations[str]; !exists && regex.MatchString(s) { - msg := fmt.Sprintf("Variable substitution using $VAR will be deprecated in favor of ${VAR} and ${env:VAR}, please update $%s", str) - c.logger.Warn(msg, zap.String("variable", str)) - c.loggedDeprecations[str] = struct{}{} - } - // This allows escaping environment variable substitution via $$, e.g. - // - $FOO will be substituted with env var FOO - // - $$FOO will be replaced with $FOO - // - $$$FOO will be replaced with $ + substituted env var FOO - if str == "$" { - return "$" - } - // For $ENV style environment variables os.Expand returns once it hits a character that isn't an underscore or - // an alphanumeric character - so we cannot detect those malformed environment variables. - // For ${ENV} style variables we can detect those kinds of env var names! - if !envvar.ValidationRegexp.MatchString(str) { - err = fmt.Errorf("environment variable %q has invalid name: must match regex %s", str, envvar.ValidationPattern) - return "" - } - val, exists := os.LookupEnv(str) - if !exists { - c.logger.Warn("Configuration references unset environment variable", zap.String("name", str)) - } else if len(val) == 0 { - c.logger.Info("Configuration references empty environment variable", zap.String("name", str)) - } - return val - }) - return res, err -} diff --git a/vendor/go.opentelemetry.io/collector/confmap/expand.go b/vendor/go.opentelemetry.io/collector/confmap/expand.go index 768395f76fd..42f3b6296da 100644 --- a/vendor/go.opentelemetry.io/collector/confmap/expand.go +++ b/vendor/go.opentelemetry.io/collector/confmap/expand.go @@ -7,9 +7,7 @@ import ( "context" "errors" "fmt" - "reflect" "regexp" - "strconv" "strings" ) @@ -26,7 +24,7 @@ var ( ) func (mr *Resolver) expandValueRecursively(ctx context.Context, value any) (any, error) { - for i := 0; i < 100; i++ { + for i := 0; i < 1000; i++ { val, changed, err := mr.expandValue(ctx, value) if err != nil { return nil, err @@ -41,6 +39,34 @@ func (mr *Resolver) expandValueRecursively(ctx context.Context, value any) (any, func (mr *Resolver) expandValue(ctx context.Context, value any) (any, bool, error) { switch v := value.(type) { + case expandedValue: + expanded, changed, err := mr.expandValue(ctx, v.Value) + if err != nil { + return nil, false, err + } + + switch exp := expanded.(type) { + case expandedValue, string: + // Return expanded values or strings verbatim. + return exp, changed, nil + } + + // At this point we don't know the target field type, so we need to expand the original representation as well. + originalExpanded, originalChanged, err := mr.expandValue(ctx, v.Original) + if err != nil { + // The original representation is not valid, return the expanded value. + return expanded, changed, nil + } + + if originalExpanded, ok := originalExpanded.(string); ok { + // If the original representation is a string, return the expanded value with the original representation. + return expandedValue{ + Value: expanded, + Original: originalExpanded, + }, changed || originalChanged, nil + } + + return expanded, changed, nil case string: if !strings.Contains(v, "${") || !strings.Contains(v, "}") { // No URIs to expand. @@ -79,6 +105,7 @@ func (mr *Resolver) expandValue(ctx context.Context, value any) (any, bool, erro // findURI attempts to find the first potentially expandable URI in input. It returns a potentially expandable // URI, or an empty string if none are found. // Note: findURI is only called when input contains a closing bracket. +// We do not support escaping nested URIs (such as ${env:$${FOO}}, since that would result in an invalid outer URI (${env:${FOO}}). func (mr *Resolver) findURI(input string) string { closeIndex := strings.Index(input, "}") remaining := input[closeIndex+1:] @@ -96,9 +123,35 @@ func (mr *Resolver) findURI(input string) string { return mr.findURI(remaining) } + index := openIndex - 1 + currentRune := '$' + count := 0 + for index >= 0 && currentRune == '$' { + currentRune = rune(input[index]) + if currentRune == '$' { + count++ + } + index-- + } + // if we found an odd number of immediately $ preceding ${, then the expansion is escaped + if count%2 == 1 { + return "" + } + return input[openIndex : closeIndex+1] } +// expandedValue holds the YAML parsed value and original representation of a value. +// It keeps track of the original representation to be used by the 'useExpandValue' hook +// if the target field is a string. We need to keep both representations because we don't know +// what the target field type is until `Unmarshal` is called. +type expandedValue struct { + // Value is the expanded value. + Value any + // Original is the original representation of the value. + Original string +} + // findAndExpandURI attempts to find and expand the first occurrence of an expandable URI in input. If an expandable URI is found it // returns the input with the URI expanded, true and nil. Otherwise, it returns the unchanged input, false and the expanding error. // This method expects input to start with ${ and end with } @@ -111,38 +164,38 @@ func (mr *Resolver) findAndExpandURI(ctx context.Context, input string) (any, bo if uri == input { // If the value is a single URI, then the return value can be anything. // This is the case `foo: ${file:some_extra_config.yml}`. - return mr.expandURI(ctx, input) + ret, err := mr.expandURI(ctx, input) + if err != nil { + return input, false, err + } + + val, err := ret.AsRaw() + if err != nil { + return input, false, err + } + + if asStr, err2 := ret.AsString(); err2 == nil { + return expandedValue{ + Value: val, + Original: asStr, + }, true, nil + } + + return val, true, nil } - expanded, changed, err := mr.expandURI(ctx, uri) + expanded, err := mr.expandURI(ctx, uri) if err != nil { return input, false, err } - repl, err := toString(expanded) + + repl, err := expanded.AsString() if err != nil { return input, false, fmt.Errorf("expanding %v: %w", uri, err) } - return strings.ReplaceAll(input, uri, repl), changed, err -} - -// toString attempts to convert input to a string. -func toString(input any) (string, error) { - // This list must be kept in sync with checkRawConfType. - val := reflect.ValueOf(input) - switch val.Kind() { - case reflect.String: - return val.String(), nil - case reflect.Int, reflect.Int32, reflect.Int64: - return strconv.FormatInt(val.Int(), 10), nil - case reflect.Float32, reflect.Float64: - return strconv.FormatFloat(val.Float(), 'f', -1, 64), nil - case reflect.Bool: - return strconv.FormatBool(val.Bool()), nil - default: - return "", fmt.Errorf("expected convertable to string value type, got %q(%T)", input, input) - } + return strings.ReplaceAll(input, uri, repl), true, err } -func (mr *Resolver) expandURI(ctx context.Context, input string) (any, bool, error) { +func (mr *Resolver) expandURI(ctx context.Context, input string) (*Retrieved, error) { // strip ${ and } uri := input[2 : len(input)-1] @@ -152,19 +205,18 @@ func (mr *Resolver) expandURI(ctx context.Context, input string) (any, bool, err lURI, err := newLocation(uri) if err != nil { - return nil, false, err + return nil, err } if strings.Contains(lURI.opaqueValue, "$") { - return nil, false, fmt.Errorf("the uri %q contains unsupported characters ('$')", lURI.asString()) + return nil, fmt.Errorf("the uri %q contains unsupported characters ('$')", lURI.asString()) } ret, err := mr.retrieveValue(ctx, lURI) if err != nil { - return nil, false, err + return nil, err } mr.closers = append(mr.closers, ret.Close) - val, err := ret.AsRaw() - return val, true, err + return ret, nil } type location struct { diff --git a/vendor/go.opentelemetry.io/collector/confmap/internal/envvar/pattern.go b/vendor/go.opentelemetry.io/collector/confmap/internal/envvar/pattern.go deleted file mode 100644 index 6ce639ec275..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/internal/envvar/pattern.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package envvar // import "go.opentelemetry.io/collector/confmap/internal/envvar" - -import "regexp" - -const ValidationPattern = `^[a-zA-Z_][a-zA-Z0-9_]*$` - -var ValidationRegexp = regexp.MustCompile(ValidationPattern) diff --git a/vendor/go.opentelemetry.io/collector/confmap/internal/mapstructure/encoder.go b/vendor/go.opentelemetry.io/collector/confmap/internal/mapstructure/encoder.go index d0e222e08b6..ffc0bdc2985 100644 --- a/vendor/go.opentelemetry.io/collector/confmap/internal/mapstructure/encoder.go +++ b/vendor/go.opentelemetry.io/collector/confmap/internal/mapstructure/encoder.go @@ -11,6 +11,7 @@ import ( "strings" "github.com/go-viper/mapstructure/v2" + "gopkg.in/yaml.v3" ) const ( @@ -22,9 +23,7 @@ const ( optionSkip = "-" ) -var ( - errNonStringEncodedKey = errors.New("non string-encoded key") -) +var errNonStringEncodedKey = errors.New("non string-encoded key") // tagInfo stores the mapstructure tag details. type tagInfo struct { @@ -228,3 +227,35 @@ func TextMarshalerHookFunc() mapstructure.DecodeHookFuncValue { return string(out), nil } } + +// YamlMarshalerHookFunc returns a DecodeHookFuncValue that checks for structs +// that have yaml tags but no mapstructure tags. If found, it will convert the struct +// to map[string]any using the yaml package, which respects the yaml tags. Ultimately, +// this allows mapstructure to later marshal the map[string]any in a generic way. +func YamlMarshalerHookFunc() mapstructure.DecodeHookFuncValue { + return func(from reflect.Value, _ reflect.Value) (any, error) { + if from.Kind() == reflect.Struct { + for i := 0; i < from.NumField(); i++ { + if _, ok := from.Type().Field(i).Tag.Lookup("mapstructure"); ok { + // The struct has at least one mapstructure tag so don't do anything. + return from.Interface(), nil + } + + if _, ok := from.Type().Field(i).Tag.Lookup("yaml"); ok { + // The struct has at least one yaml tag, so convert it to map[string]any using yaml. + yamlBytes, err := yaml.Marshal(from.Interface()) + if err != nil { + return nil, err + } + var m map[string]any + err = yaml.Unmarshal(yamlBytes, &m) + if err != nil { + return nil, err + } + return m, nil + } + } + } + return from.Interface(), nil + } +} diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider.go b/vendor/go.opentelemetry.io/collector/confmap/provider.go index 192577ed4d8..c462b9bb6fe 100644 --- a/vendor/go.opentelemetry.io/collector/confmap/provider.go +++ b/vendor/go.opentelemetry.io/collector/confmap/provider.go @@ -6,8 +6,10 @@ package confmap // import "go.opentelemetry.io/collector/confmap" import ( "context" "fmt" + "time" "go.uber.org/zap" + "gopkg.in/yaml.v3" ) // ProviderSettings are the settings to initialize a Provider. @@ -99,21 +101,63 @@ type ChangeEvent struct { type Retrieved struct { rawConf any closeFunc CloseFunc + + stringRepresentation string + isSetString bool } type retrievedSettings struct { - closeFunc CloseFunc + stringRepresentation string + isSetString bool + closeFunc CloseFunc } // RetrievedOption options to customize Retrieved values. -type RetrievedOption func(*retrievedSettings) +type RetrievedOption interface { + apply(*retrievedSettings) +} + +type retrievedOptionFunc func(*retrievedSettings) + +func (of retrievedOptionFunc) apply(e *retrievedSettings) { + of(e) +} // WithRetrievedClose overrides the default Retrieved.Close function. // The default Retrieved.Close function does nothing and always returns nil. func WithRetrievedClose(closeFunc CloseFunc) RetrievedOption { - return func(settings *retrievedSettings) { + return retrievedOptionFunc(func(settings *retrievedSettings) { settings.closeFunc = closeFunc + }) +} + +func withStringRepresentation(stringRepresentation string) RetrievedOption { + return retrievedOptionFunc(func(settings *retrievedSettings) { + settings.stringRepresentation = stringRepresentation + settings.isSetString = true + }) +} + +// NewRetrievedFromYAML returns a new Retrieved instance that contains the deserialized data from the yaml bytes. +// * yamlBytes the yaml bytes that will be deserialized. +// * opts specifies options associated with this Retrieved value, such as CloseFunc. +func NewRetrievedFromYAML(yamlBytes []byte, opts ...RetrievedOption) (*Retrieved, error) { + var rawConf any + if err := yaml.Unmarshal(yamlBytes, &rawConf); err != nil { + // If the string is not valid YAML, we try to use it verbatim as a string. + strRep := string(yamlBytes) + return NewRetrieved(strRep, append(opts, withStringRepresentation(strRep))...) + } + + switch rawConf.(type) { + case string: + val := string(yamlBytes) + return NewRetrieved(val, append(opts, withStringRepresentation(val))...) + default: + opts = append(opts, withStringRepresentation(string(yamlBytes))) } + + return NewRetrieved(rawConf, opts...) } // NewRetrieved returns a new Retrieved instance that contains the data from the raw deserialized config. @@ -127,9 +171,14 @@ func NewRetrieved(rawConf any, opts ...RetrievedOption) (*Retrieved, error) { } set := retrievedSettings{} for _, opt := range opts { - opt(&set) + opt.apply(&set) } - return &Retrieved{rawConf: rawConf, closeFunc: set.closeFunc}, nil + return &Retrieved{ + rawConf: rawConf, + closeFunc: set.closeFunc, + stringRepresentation: set.stringRepresentation, + isSetString: set.isSetString, + }, nil } // AsConf returns the retrieved configuration parsed as a Conf. @@ -152,6 +201,20 @@ func (r *Retrieved) AsRaw() (any, error) { return r.rawConf, nil } +// AsString returns the retrieved configuration as a string. +// If the retrieved configuration is not convertible to a string unambiguously, an error is returned. +// If the retrieved configuration is a string, the string is returned. +// This method is used to resolve ${} references in inline position. +func (r *Retrieved) AsString() (string, error) { + if !r.isSetString { + if str, ok := r.rawConf.(string); ok { + return str, nil + } + return "", fmt.Errorf("retrieved value does not have unambiguous string representation: %v", r.rawConf) + } + return r.stringRepresentation, nil +} + // Close and release any watchers that Provider.Retrieve may have created. // // Should block until all resources are closed, and guarantee that `onChange` is not @@ -173,7 +236,7 @@ func checkRawConfType(rawConf any) error { return nil } switch rawConf.(type) { - case int, int32, int64, float32, float64, bool, string, []any, map[string]any: + case int, int32, int64, float32, float64, bool, string, []any, map[string]any, time.Time: return nil default: return fmt.Errorf( diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/envprovider/provider.go b/vendor/go.opentelemetry.io/collector/confmap/provider/envprovider/provider.go deleted file mode 100644 index 50192b5d994..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/provider/envprovider/provider.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package envprovider // import "go.opentelemetry.io/collector/confmap/provider/envprovider" - -import ( - "context" - "fmt" - "os" - "strings" - - "go.uber.org/zap" - - "go.opentelemetry.io/collector/confmap" - "go.opentelemetry.io/collector/confmap/internal/envvar" - "go.opentelemetry.io/collector/confmap/provider/internal" -) - -const ( - schemeName = "env" -) - -type provider struct { - logger *zap.Logger -} - -// NewFactory returns a factory for a confmap.Provider that reads the configuration from the given environment variable. -// -// This Provider supports "env" scheme, and can be called with a selector: -// `env:NAME_OF_ENVIRONMENT_VARIABLE` -func NewFactory() confmap.ProviderFactory { - return confmap.NewProviderFactory(newProvider) -} - -func newProvider(ps confmap.ProviderSettings) confmap.Provider { - return &provider{ - logger: ps.Logger, - } -} - -func (emp *provider) Retrieve(_ context.Context, uri string, _ confmap.WatcherFunc) (*confmap.Retrieved, error) { - if !strings.HasPrefix(uri, schemeName+":") { - return nil, fmt.Errorf("%q uri is not supported by %q provider", uri, schemeName) - } - envVarName := uri[len(schemeName)+1:] - if !envvar.ValidationRegexp.MatchString(envVarName) { - return nil, fmt.Errorf("environment variable %q has invalid name: must match regex %s", envVarName, envvar.ValidationPattern) - - } - val, exists := os.LookupEnv(envVarName) - if !exists { - emp.logger.Warn("Configuration references unset environment variable", zap.String("name", envVarName)) - } else if len(val) == 0 { - emp.logger.Info("Configuration references empty environment variable", zap.String("name", envVarName)) - } - - return internal.NewRetrievedFromYAML([]byte(val)) -} - -func (*provider) Scheme() string { - return schemeName -} - -func (*provider) Shutdown(context.Context) error { - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/fileprovider/provider.go b/vendor/go.opentelemetry.io/collector/confmap/provider/fileprovider/provider.go deleted file mode 100644 index fe958280cfb..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/provider/fileprovider/provider.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package fileprovider // import "go.opentelemetry.io/collector/confmap/provider/fileprovider" - -import ( - "context" - "fmt" - "os" - "path/filepath" - "strings" - - "go.opentelemetry.io/collector/confmap" - "go.opentelemetry.io/collector/confmap/provider/internal" -) - -const schemeName = "file" - -type provider struct{} - -// NewFactory returns a factory for a confmap.Provider that reads the configuration from a file. -// -// This Provider supports "file" scheme, and can be called with a "uri" that follows: -// -// file-uri = "file:" local-path -// local-path = [ drive-letter ] file-path -// drive-letter = ALPHA ":" -// -// The "file-path" can be relative or absolute, and it can be any OS supported format. -// -// Examples: -// `file:path/to/file` - relative path (unix, windows) -// `file:/path/to/file` - absolute path (unix, windows) -// `file:c:/path/to/file` - absolute path including drive-letter (windows) -// `file:c:\path\to\file` - absolute path including drive-letter (windows) -func NewFactory() confmap.ProviderFactory { - return confmap.NewProviderFactory(newProvider) -} - -func newProvider(confmap.ProviderSettings) confmap.Provider { - return &provider{} -} - -func (fmp *provider) Retrieve(_ context.Context, uri string, _ confmap.WatcherFunc) (*confmap.Retrieved, error) { - if !strings.HasPrefix(uri, schemeName+":") { - return nil, fmt.Errorf("%q uri is not supported by %q provider", uri, schemeName) - } - - // Clean the path before using it. - content, err := os.ReadFile(filepath.Clean(uri[len(schemeName)+1:])) - if err != nil { - return nil, fmt.Errorf("unable to read the file %v: %w", uri, err) - } - - return internal.NewRetrievedFromYAML(content) -} - -func (*provider) Scheme() string { - return schemeName -} - -func (*provider) Shutdown(context.Context) error { - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/httpprovider/Makefile b/vendor/go.opentelemetry.io/collector/confmap/provider/httpprovider/Makefile deleted file mode 100644 index bdd863a203b..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/provider/httpprovider/Makefile +++ /dev/null @@ -1 +0,0 @@ -include ../../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/httpprovider/README.md b/vendor/go.opentelemetry.io/collector/confmap/provider/httpprovider/README.md deleted file mode 100644 index 673fd2ccd2e..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/provider/httpprovider/README.md +++ /dev/null @@ -1,13 +0,0 @@ -What is this new component httpprovider? -- An implementation of `confmap.Provider` for HTTP (httpprovider) allows OTEL Collector the ability to load configuration for itself by fetching and reading config files stored in HTTP servers. - -How this new component httpprovider works? -- It will be called by `confmap.Resolver` to load configurations for OTEL Collector. -- By giving a config URI starting with prefix 'http://', this httpprovider will be used to download config files from given HTTP URIs, and then used the downloaded config files to deploy the OTEL Collector. -- In our code, we check the validity scheme and string pattern of HTTP URIs. And also check if there are any problems on config downloading and config deserialization. - -Expected URI format: -- http://... - -Prerequistes: -- Need to setup a HTTP server ahead, which returns with a config files according to the given URI \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/httpprovider/provider.go b/vendor/go.opentelemetry.io/collector/confmap/provider/httpprovider/provider.go deleted file mode 100644 index c47c1df5d99..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/provider/httpprovider/provider.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package httpprovider // import "go.opentelemetry.io/collector/confmap/provider/httpprovider" - -import ( - "go.opentelemetry.io/collector/confmap" - "go.opentelemetry.io/collector/confmap/provider/internal/configurablehttpprovider" -) - -// NewFactory returns a factory for a confmap.Provider that reads the configuration from a http server. -// -// This Provider supports "http" scheme. -// -// One example for HTTP URI is: http://localhost:3333/getConfig -func NewFactory() confmap.ProviderFactory { - return confmap.NewProviderFactory(newProvider) -} - -func newProvider(set confmap.ProviderSettings) confmap.Provider { - return configurablehttpprovider.New(configurablehttpprovider.HTTPScheme, set) -} diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/httpsprovider/Makefile b/vendor/go.opentelemetry.io/collector/confmap/provider/httpsprovider/Makefile deleted file mode 100644 index bdd863a203b..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/provider/httpsprovider/Makefile +++ /dev/null @@ -1 +0,0 @@ -include ../../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/httpsprovider/README.md b/vendor/go.opentelemetry.io/collector/confmap/provider/httpsprovider/README.md deleted file mode 100644 index 270dd1b23dd..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/provider/httpsprovider/README.md +++ /dev/null @@ -1,18 +0,0 @@ -### What is the httpsprovider? - -An implementation of `confmap.Provider` for HTTPS (httpsprovider) allows OTEL Collector to use the HTTPS protocol to -load configuration files stored in web servers. - -Expected URI format: -- https://... - -### Prerequistes - -You need to setup a HTTP server with support to HTTPS. The server must have a certificate that can be validated in the -host running the collector using system root certificates. - -### Configuration - -At this moment, this component only support communicating with servers whose certificate can be verified using the root -CA certificates installed in the system. The process of adding more root CA certificates to the system is operating -system dependent. For Linux, please refer to the `update-ca-trust` command. diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/httpsprovider/provider.go b/vendor/go.opentelemetry.io/collector/confmap/provider/httpsprovider/provider.go deleted file mode 100644 index 579c15babf2..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/provider/httpsprovider/provider.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package httpsprovider // import "go.opentelemetry.io/collector/confmap/provider/httpsprovider" - -import ( - "go.opentelemetry.io/collector/confmap" - "go.opentelemetry.io/collector/confmap/provider/internal/configurablehttpprovider" -) - -// NewFactory returns a factory for a confmap.Provider that reads the configuration from a https server. -// -// This Provider supports "https" scheme. One example of an HTTPS URI is: https://localhost:3333/getConfig -// -// To add extra CA certificates you need to install certificates in the system pool. This procedure is operating system -// dependent. E.g.: on Linux please refer to the `update-ca-trust` command. -func NewFactory() confmap.ProviderFactory { - return confmap.NewProviderFactory(newProvider) -} - -func newProvider(set confmap.ProviderSettings) confmap.Provider { - return configurablehttpprovider.New(configurablehttpprovider.HTTPSScheme, set) -} diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/internal/configurablehttpprovider/provider.go b/vendor/go.opentelemetry.io/collector/confmap/provider/internal/configurablehttpprovider/provider.go deleted file mode 100644 index c683d3d9a4e..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/provider/internal/configurablehttpprovider/provider.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package configurablehttpprovider // import "go.opentelemetry.io/collector/confmap/provider/internal/configurablehttpprovider" - -import ( - "context" - "crypto/tls" - "crypto/x509" - "fmt" - "io" - "net/http" - "os" - "path/filepath" - "strings" - - "go.opentelemetry.io/collector/confmap" - "go.opentelemetry.io/collector/confmap/provider/internal" -) - -type SchemeType string - -const ( - HTTPScheme SchemeType = "http" - HTTPSScheme SchemeType = "https" -) - -type provider struct { - scheme SchemeType - caCertPath string // Used for tests - insecureSkipVerify bool // Used for tests -} - -// New returns a new provider that reads the configuration from http server using the configured transport mechanism -// depending on the selected scheme. -// There are two types of transport supported: PlainText (HTTPScheme) and TLS (HTTPSScheme). -// -// One example for http-uri: http://localhost:3333/getConfig -// One example for https-uri: https://localhost:3333/getConfig -// This is used by the http and https external implementations. -func New(scheme SchemeType, _ confmap.ProviderSettings) confmap.Provider { - return &provider{scheme: scheme} -} - -// Create the client based on the type of scheme that was selected. -func (fmp *provider) createClient() (*http.Client, error) { - switch fmp.scheme { - case HTTPScheme: - return &http.Client{}, nil - case HTTPSScheme: - pool, err := x509.SystemCertPool() - - if err != nil { - return nil, fmt.Errorf("unable to create a cert pool: %w", err) - } - - if fmp.caCertPath != "" { - cert, err := os.ReadFile(filepath.Clean(fmp.caCertPath)) - - if err != nil { - return nil, fmt.Errorf("unable to read CA from %q URI: %w", fmp.caCertPath, err) - } - - if ok := pool.AppendCertsFromPEM(cert); !ok { - return nil, fmt.Errorf("unable to add CA from uri: %s into the cert pool", fmp.caCertPath) - } - } - - return &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: &tls.Config{ - InsecureSkipVerify: fmp.insecureSkipVerify, - RootCAs: pool, - }, - }, - }, nil - default: - return nil, fmt.Errorf("invalid scheme type: %s", fmp.scheme) - } -} - -func (fmp *provider) Retrieve(_ context.Context, uri string, _ confmap.WatcherFunc) (*confmap.Retrieved, error) { - - if !strings.HasPrefix(uri, string(fmp.scheme)+":") { - return nil, fmt.Errorf("%q uri is not supported by %q provider", uri, string(fmp.scheme)) - } - - client, err := fmp.createClient() - - if err != nil { - return nil, fmt.Errorf("unable to configure http transport layer: %w", err) - } - - // send a HTTP GET request - resp, err := client.Get(uri) - if err != nil { - return nil, fmt.Errorf("unable to download the file via HTTP GET for uri %q: %w ", uri, err) - } - defer resp.Body.Close() - - // check the HTTP status code - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("failed to load resource from uri %q. status code: %d", uri, resp.StatusCode) - } - - // read the response body - body, err := io.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("fail to read the response body from uri %q: %w", uri, err) - } - - return internal.NewRetrievedFromYAML(body) -} - -func (fmp *provider) Scheme() string { - return string(fmp.scheme) -} - -func (*provider) Shutdown(context.Context) error { - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/internal/provider.go b/vendor/go.opentelemetry.io/collector/confmap/provider/internal/provider.go deleted file mode 100644 index 5a378997529..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/provider/internal/provider.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package internal // import "go.opentelemetry.io/collector/confmap/provider/internal" - -import ( - "gopkg.in/yaml.v3" - - "go.opentelemetry.io/collector/confmap" -) - -// NewRetrievedFromYAML returns a new Retrieved instance that contains the deserialized data from the yaml bytes. -// * yamlBytes the yaml bytes that will be deserialized. -// * opts specifies options associated with this Retrieved value, such as CloseFunc. -func NewRetrievedFromYAML(yamlBytes []byte, opts ...confmap.RetrievedOption) (*confmap.Retrieved, error) { - var rawConf any - if err := yaml.Unmarshal(yamlBytes, &rawConf); err != nil { - return nil, err - } - return confmap.NewRetrieved(rawConf, opts...) -} diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/yamlprovider/Makefile b/vendor/go.opentelemetry.io/collector/confmap/provider/yamlprovider/Makefile deleted file mode 100644 index bdd863a203b..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/provider/yamlprovider/Makefile +++ /dev/null @@ -1 +0,0 @@ -include ../../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/yamlprovider/provider.go b/vendor/go.opentelemetry.io/collector/confmap/provider/yamlprovider/provider.go deleted file mode 100644 index 949fc71bf7d..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/provider/yamlprovider/provider.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package yamlprovider // import "go.opentelemetry.io/collector/confmap/provider/yamlprovider" - -import ( - "context" - "fmt" - "strings" - - "go.opentelemetry.io/collector/confmap" - "go.opentelemetry.io/collector/confmap/provider/internal" -) - -const schemeName = "yaml" - -type provider struct{} - -// NewFactory returns a factory for a confmap.Provider that allows to provide yaml bytes. -// -// This Provider supports "yaml" scheme, and can be called with a "uri" that follows: -// -// bytes-uri = "yaml:" yaml-bytes -// -// Examples: -// `yaml:processors::batch::timeout: 2s` -// `yaml:processors::batch/foo::timeout: 3s` -func NewFactory() confmap.ProviderFactory { - return confmap.NewProviderFactory(newProvider) -} - -func newProvider(confmap.ProviderSettings) confmap.Provider { - return &provider{} -} - -func (s *provider) Retrieve(_ context.Context, uri string, _ confmap.WatcherFunc) (*confmap.Retrieved, error) { - if !strings.HasPrefix(uri, schemeName+":") { - return nil, fmt.Errorf("%q uri is not supported by %q provider", uri, schemeName) - } - - return internal.NewRetrievedFromYAML([]byte(uri[len(schemeName)+1:])) -} - -func (*provider) Scheme() string { - return schemeName -} - -func (s *provider) Shutdown(context.Context) error { - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/confmap/resolver.go b/vendor/go.opentelemetry.io/collector/confmap/resolver.go index 7c9ed303c73..e635ea99564 100644 --- a/vendor/go.opentelemetry.io/collector/confmap/resolver.go +++ b/vendor/go.opentelemetry.io/collector/confmap/resolver.go @@ -96,7 +96,17 @@ func NewResolver(set ResolverSettings) (*Resolver, error) { providers := make(map[string]Provider, len(set.ProviderFactories)) for _, factory := range set.ProviderFactories { provider := factory.Create(set.ProviderSettings) - providers[provider.Scheme()] = provider + scheme := provider.Scheme() + // Check that the scheme follows the pattern. + if !regexp.MustCompile(schemePattern).MatchString(scheme) { + return nil, fmt.Errorf("invalid 'confmap.Provider' scheme %q", scheme) + } + // Check that the scheme is unique. + if _, ok := providers[scheme]; ok { + return nil, fmt.Errorf("duplicate 'confmap.Provider' scheme %q", scheme) + } + + providers[scheme] = provider } if set.DefaultScheme != "" { @@ -167,11 +177,11 @@ func (mr *Resolver) Resolve(ctx context.Context) (*Conf, error) { cfgMap := make(map[string]any) for _, k := range retMap.AllKeys() { - val, err := mr.expandValueRecursively(ctx, retMap.Get(k)) + val, err := mr.expandValueRecursively(ctx, retMap.unsanitizedGet(k)) if err != nil { return nil, err } - cfgMap[k] = val + cfgMap[k] = escapeDollarSigns(val) } retMap = NewFromStringMap(cfgMap) @@ -185,6 +195,31 @@ func (mr *Resolver) Resolve(ctx context.Context) (*Conf, error) { return retMap, nil } +func escapeDollarSigns(val any) any { + switch v := val.(type) { + case string: + return strings.ReplaceAll(v, "$$", "$") + case expandedValue: + v.Original = strings.ReplaceAll(v.Original, "$$", "$") + v.Value = escapeDollarSigns(v.Value) + return v + case []any: + nslice := make([]any, len(v)) + for i, x := range v { + nslice[i] = escapeDollarSigns(x) + } + return nslice + case map[string]any: + nmap := make(map[string]any, len(v)) + for k, x := range v { + nmap[k] = escapeDollarSigns(x) + } + return nmap + default: + return val + } +} + // Watch blocks until any configuration change was detected or an unrecoverable error // happened during monitoring the configuration changes. // diff --git a/vendor/go.opentelemetry.io/collector/connector/README.md b/vendor/go.opentelemetry.io/collector/connector/README.md index 51377534628..02477644c4d 100644 --- a/vendor/go.opentelemetry.io/collector/connector/README.md +++ b/vendor/go.opentelemetry.io/collector/connector/README.md @@ -1,7 +1,7 @@ # Connectors A connector is both an exporter and receiver. As the name suggests a Connector connects -two pipelines: it consumes data as an exporter at the end of one pipeline and emits data +two pipelines: it emits data as an exporter at the end of one pipeline and consumes data as a receiver at the start of another pipeline. It may consume and emit data of the same data type, or of different data types. A connector may generate and emit data to summarize the consumed data, or it may simply replicate or route data. diff --git a/vendor/go.opentelemetry.io/collector/connector/connector.go b/vendor/go.opentelemetry.io/collector/connector/connector.go index 94927092117..2ae78f26a6b 100644 --- a/vendor/go.opentelemetry.io/collector/connector/connector.go +++ b/vendor/go.opentelemetry.io/collector/connector/connector.go @@ -5,17 +5,12 @@ package connector // import "go.opentelemetry.io/collector/connector" import ( "context" - "errors" "fmt" - "go.uber.org/zap" - "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/connector/internal" "go.opentelemetry.io/collector/consumer" -) - -var ( - errNilNextConsumer = errors.New("nil next Consumer") + "go.opentelemetry.io/collector/pipeline" ) // A Traces connector acts as an exporter from a traces pipeline and a receiver @@ -66,8 +61,8 @@ type Logs interface { consumer.Logs } -// CreateSettings configures Connector creators. -type CreateSettings struct { +// Settings configures Connector creators. +type Settings struct { // ID returns the ID of the component that will be created. ID component.ID @@ -77,7 +72,7 @@ type CreateSettings struct { BuildInfo component.BuildInfo } -// Factory is factory interface for connectors. +// Factory is a factory interface for connectors. // // This interface cannot be directly implemented. Implementations must // use the NewFactory to implement it. @@ -93,17 +88,17 @@ type Factory interface { // tests of any implementation of the Factory interface. CreateDefaultConfig() component.Config - CreateTracesToTraces(ctx context.Context, set CreateSettings, cfg component.Config, nextConsumer consumer.Traces) (Traces, error) - CreateTracesToMetrics(ctx context.Context, set CreateSettings, cfg component.Config, nextConsumer consumer.Metrics) (Traces, error) - CreateTracesToLogs(ctx context.Context, set CreateSettings, cfg component.Config, nextConsumer consumer.Logs) (Traces, error) + CreateTracesToTraces(ctx context.Context, set Settings, cfg component.Config, next consumer.Traces) (Traces, error) + CreateTracesToMetrics(ctx context.Context, set Settings, cfg component.Config, next consumer.Metrics) (Traces, error) + CreateTracesToLogs(ctx context.Context, set Settings, cfg component.Config, next consumer.Logs) (Traces, error) - CreateMetricsToTraces(ctx context.Context, set CreateSettings, cfg component.Config, nextConsumer consumer.Traces) (Metrics, error) - CreateMetricsToMetrics(ctx context.Context, set CreateSettings, cfg component.Config, nextConsumer consumer.Metrics) (Metrics, error) - CreateMetricsToLogs(ctx context.Context, set CreateSettings, cfg component.Config, nextConsumer consumer.Logs) (Metrics, error) + CreateMetricsToTraces(ctx context.Context, set Settings, cfg component.Config, next consumer.Traces) (Metrics, error) + CreateMetricsToMetrics(ctx context.Context, set Settings, cfg component.Config, next consumer.Metrics) (Metrics, error) + CreateMetricsToLogs(ctx context.Context, set Settings, cfg component.Config, next consumer.Logs) (Metrics, error) - CreateLogsToTraces(ctx context.Context, set CreateSettings, cfg component.Config, nextConsumer consumer.Traces) (Logs, error) - CreateLogsToMetrics(ctx context.Context, set CreateSettings, cfg component.Config, nextConsumer consumer.Metrics) (Logs, error) - CreateLogsToLogs(ctx context.Context, set CreateSettings, cfg component.Config, nextConsumer consumer.Logs) (Logs, error) + CreateLogsToTraces(ctx context.Context, set Settings, cfg component.Config, next consumer.Traces) (Logs, error) + CreateLogsToMetrics(ctx context.Context, set Settings, cfg component.Config, next consumer.Metrics) (Logs, error) + CreateLogsToLogs(ctx context.Context, set Settings, cfg component.Config, next consumer.Logs) (Logs, error) TracesToTracesStability() component.StabilityLevel TracesToMetricsStability() component.StabilityLevel @@ -136,185 +131,104 @@ func (f factoryOptionFunc) apply(o *factory) { } // CreateTracesToTracesFunc is the equivalent of Factory.CreateTracesToTraces(). -type CreateTracesToTracesFunc func(context.Context, CreateSettings, component.Config, consumer.Traces) (Traces, error) +type CreateTracesToTracesFunc func(context.Context, Settings, component.Config, consumer.Traces) (Traces, error) // CreateTracesToTraces implements Factory.CreateTracesToTraces(). -func (f CreateTracesToTracesFunc) CreateTracesToTraces( - ctx context.Context, - set CreateSettings, - cfg component.Config, - nextConsumer consumer.Traces) (Traces, error) { +func (f CreateTracesToTracesFunc) CreateTracesToTraces(ctx context.Context, set Settings, cfg component.Config, next consumer.Traces) (Traces, error) { if f == nil { - return nil, errDataTypes(set.ID, component.DataTypeTraces, component.DataTypeTraces) + return nil, internal.ErrDataTypes(set.ID, pipeline.SignalTraces, pipeline.SignalTraces) } - return f(ctx, set, cfg, nextConsumer) + return f(ctx, set, cfg, next) } // CreateTracesToMetricsFunc is the equivalent of Factory.CreateTracesToMetrics(). -type CreateTracesToMetricsFunc func(context.Context, CreateSettings, component.Config, consumer.Metrics) (Traces, error) +type CreateTracesToMetricsFunc func(context.Context, Settings, component.Config, consumer.Metrics) (Traces, error) // CreateTracesToMetrics implements Factory.CreateTracesToMetrics(). -func (f CreateTracesToMetricsFunc) CreateTracesToMetrics( - ctx context.Context, - set CreateSettings, - cfg component.Config, - nextConsumer consumer.Metrics, -) (Traces, error) { +func (f CreateTracesToMetricsFunc) CreateTracesToMetrics(ctx context.Context, set Settings, cfg component.Config, next consumer.Metrics) (Traces, error) { if f == nil { - return nil, errDataTypes(set.ID, component.DataTypeTraces, component.DataTypeMetrics) + return nil, internal.ErrDataTypes(set.ID, pipeline.SignalTraces, pipeline.SignalMetrics) } - return f(ctx, set, cfg, nextConsumer) + return f(ctx, set, cfg, next) } // CreateTracesToLogsFunc is the equivalent of Factory.CreateTracesToLogs(). -type CreateTracesToLogsFunc func(context.Context, CreateSettings, component.Config, consumer.Logs) (Traces, error) +type CreateTracesToLogsFunc func(context.Context, Settings, component.Config, consumer.Logs) (Traces, error) // CreateTracesToLogs implements Factory.CreateTracesToLogs(). -func (f CreateTracesToLogsFunc) CreateTracesToLogs( - ctx context.Context, - set CreateSettings, - cfg component.Config, - nextConsumer consumer.Logs, -) (Traces, error) { +func (f CreateTracesToLogsFunc) CreateTracesToLogs(ctx context.Context, set Settings, cfg component.Config, next consumer.Logs) (Traces, error) { if f == nil { - return nil, errDataTypes(set.ID, component.DataTypeTraces, component.DataTypeLogs) + return nil, internal.ErrDataTypes(set.ID, pipeline.SignalTraces, pipeline.SignalLogs) } - return f(ctx, set, cfg, nextConsumer) + return f(ctx, set, cfg, next) } // CreateMetricsToTracesFunc is the equivalent of Factory.CreateMetricsToTraces(). -type CreateMetricsToTracesFunc func(context.Context, CreateSettings, component.Config, consumer.Traces) (Metrics, error) +type CreateMetricsToTracesFunc func(context.Context, Settings, component.Config, consumer.Traces) (Metrics, error) // CreateMetricsToTraces implements Factory.CreateMetricsToTraces(). -func (f CreateMetricsToTracesFunc) CreateMetricsToTraces( - ctx context.Context, - set CreateSettings, - cfg component.Config, - nextConsumer consumer.Traces, -) (Metrics, error) { +func (f CreateMetricsToTracesFunc) CreateMetricsToTraces(ctx context.Context, set Settings, cfg component.Config, next consumer.Traces) (Metrics, error) { if f == nil { - return nil, errDataTypes(set.ID, component.DataTypeMetrics, component.DataTypeTraces) + return nil, internal.ErrDataTypes(set.ID, pipeline.SignalMetrics, pipeline.SignalTraces) } - return f(ctx, set, cfg, nextConsumer) + return f(ctx, set, cfg, next) } // CreateMetricsToMetricsFunc is the equivalent of Factory.CreateMetricsToTraces(). -type CreateMetricsToMetricsFunc func(context.Context, CreateSettings, component.Config, consumer.Metrics) (Metrics, error) +type CreateMetricsToMetricsFunc func(context.Context, Settings, component.Config, consumer.Metrics) (Metrics, error) // CreateMetricsToMetrics implements Factory.CreateMetricsToTraces(). -func (f CreateMetricsToMetricsFunc) CreateMetricsToMetrics( - ctx context.Context, - set CreateSettings, - cfg component.Config, - nextConsumer consumer.Metrics, -) (Metrics, error) { +func (f CreateMetricsToMetricsFunc) CreateMetricsToMetrics(ctx context.Context, set Settings, cfg component.Config, next consumer.Metrics) (Metrics, error) { if f == nil { - return nil, errDataTypes(set.ID, component.DataTypeMetrics, component.DataTypeMetrics) + return nil, internal.ErrDataTypes(set.ID, pipeline.SignalMetrics, pipeline.SignalMetrics) } - return f(ctx, set, cfg, nextConsumer) + return f(ctx, set, cfg, next) } // CreateMetricsToLogsFunc is the equivalent of Factory.CreateMetricsToLogs(). -type CreateMetricsToLogsFunc func(context.Context, CreateSettings, component.Config, consumer.Logs) (Metrics, error) +type CreateMetricsToLogsFunc func(context.Context, Settings, component.Config, consumer.Logs) (Metrics, error) // CreateMetricsToLogs implements Factory.CreateMetricsToLogs(). -func (f CreateMetricsToLogsFunc) CreateMetricsToLogs( - ctx context.Context, - set CreateSettings, - cfg component.Config, - nextConsumer consumer.Logs, -) (Metrics, error) { +func (f CreateMetricsToLogsFunc) CreateMetricsToLogs(ctx context.Context, set Settings, cfg component.Config, next consumer.Logs) (Metrics, error) { if f == nil { - return nil, errDataTypes(set.ID, component.DataTypeMetrics, component.DataTypeLogs) + return nil, internal.ErrDataTypes(set.ID, pipeline.SignalMetrics, pipeline.SignalLogs) } - return f(ctx, set, cfg, nextConsumer) + return f(ctx, set, cfg, next) } // CreateLogsToTracesFunc is the equivalent of Factory.CreateLogsToTraces(). -type CreateLogsToTracesFunc func(context.Context, CreateSettings, component.Config, consumer.Traces) (Logs, error) +type CreateLogsToTracesFunc func(context.Context, Settings, component.Config, consumer.Traces) (Logs, error) // CreateLogsToTraces implements Factory.CreateLogsToTraces(). -func (f CreateLogsToTracesFunc) CreateLogsToTraces( - ctx context.Context, - set CreateSettings, - cfg component.Config, - nextConsumer consumer.Traces, -) (Logs, error) { +func (f CreateLogsToTracesFunc) CreateLogsToTraces(ctx context.Context, set Settings, cfg component.Config, next consumer.Traces) (Logs, error) { if f == nil { - return nil, errDataTypes(set.ID, component.DataTypeLogs, component.DataTypeTraces) + return nil, internal.ErrDataTypes(set.ID, pipeline.SignalLogs, pipeline.SignalTraces) } - return f(ctx, set, cfg, nextConsumer) + return f(ctx, set, cfg, next) } // CreateLogsToMetricsFunc is the equivalent of Factory.CreateLogsToMetrics(). -type CreateLogsToMetricsFunc func(context.Context, CreateSettings, component.Config, consumer.Metrics) (Logs, error) +type CreateLogsToMetricsFunc func(context.Context, Settings, component.Config, consumer.Metrics) (Logs, error) // CreateLogsToMetrics implements Factory.CreateLogsToMetrics(). -func (f CreateLogsToMetricsFunc) CreateLogsToMetrics( - ctx context.Context, - set CreateSettings, - cfg component.Config, - nextConsumer consumer.Metrics, -) (Logs, error) { +func (f CreateLogsToMetricsFunc) CreateLogsToMetrics(ctx context.Context, set Settings, cfg component.Config, next consumer.Metrics) (Logs, error) { if f == nil { - return nil, errDataTypes(set.ID, component.DataTypeLogs, component.DataTypeMetrics) + return nil, internal.ErrDataTypes(set.ID, pipeline.SignalLogs, pipeline.SignalMetrics) } - return f(ctx, set, cfg, nextConsumer) + return f(ctx, set, cfg, next) } // CreateLogsToLogsFunc is the equivalent of Factory.CreateLogsToLogs(). -type CreateLogsToLogsFunc func(context.Context, CreateSettings, component.Config, consumer.Logs) (Logs, error) +type CreateLogsToLogsFunc func(context.Context, Settings, component.Config, consumer.Logs) (Logs, error) // CreateLogsToLogs implements Factory.CreateLogsToLogs(). -func (f CreateLogsToLogsFunc) CreateLogsToLogs( - ctx context.Context, - set CreateSettings, - cfg component.Config, - nextConsumer consumer.Logs, -) (Logs, error) { +func (f CreateLogsToLogsFunc) CreateLogsToLogs(ctx context.Context, set Settings, cfg component.Config, next consumer.Logs) (Logs, error) { if f == nil { - return nil, errDataTypes(set.ID, component.DataTypeLogs, component.DataTypeLogs) + return nil, internal.ErrDataTypes(set.ID, pipeline.SignalLogs, pipeline.SignalLogs) } - return f(ctx, set, cfg, nextConsumer) -} - -// factory implements Factory. -type factory struct { - cfgType component.Type - component.CreateDefaultConfigFunc - - CreateTracesToTracesFunc - CreateTracesToMetricsFunc - CreateTracesToLogsFunc - - CreateMetricsToTracesFunc - CreateMetricsToMetricsFunc - CreateMetricsToLogsFunc - - CreateLogsToTracesFunc - CreateLogsToMetricsFunc - CreateLogsToLogsFunc - - tracesToTracesStabilityLevel component.StabilityLevel - tracesToMetricsStabilityLevel component.StabilityLevel - tracesToLogsStabilityLevel component.StabilityLevel - - metricsToTracesStabilityLevel component.StabilityLevel - metricsToMetricsStabilityLevel component.StabilityLevel - metricsToLogsStabilityLevel component.StabilityLevel - - logsToTracesStabilityLevel component.StabilityLevel - logsToMetricsStabilityLevel component.StabilityLevel - logsToLogsStabilityLevel component.StabilityLevel + return f(ctx, set, cfg, next) } -// Type returns the type of component. -func (f *factory) Type() component.Type { - return f.cfgType -} - -func (f *factory) unexportedFactoryFunc() {} - // WithTracesToTraces overrides the default "error not supported" implementation for WithTracesToTraces and the default "undefined" stability level. func WithTracesToTraces(createTracesToTraces CreateTracesToTracesFunc, sl component.StabilityLevel) FactoryOption { return factoryOptionFunc(func(o *factory) { @@ -387,39 +301,76 @@ func WithLogsToLogs(createLogsToLogs CreateLogsToLogsFunc, sl component.Stabilit }) } -func (f factory) TracesToTracesStability() component.StabilityLevel { +// factory implements the Factory interface. +type factory struct { + cfgType component.Type + component.CreateDefaultConfigFunc + + CreateTracesToTracesFunc + CreateTracesToMetricsFunc + CreateTracesToLogsFunc + + CreateMetricsToTracesFunc + CreateMetricsToMetricsFunc + CreateMetricsToLogsFunc + + CreateLogsToTracesFunc + CreateLogsToMetricsFunc + CreateLogsToLogsFunc + + tracesToTracesStabilityLevel component.StabilityLevel + tracesToMetricsStabilityLevel component.StabilityLevel + tracesToLogsStabilityLevel component.StabilityLevel + + metricsToTracesStabilityLevel component.StabilityLevel + metricsToMetricsStabilityLevel component.StabilityLevel + metricsToLogsStabilityLevel component.StabilityLevel + + logsToTracesStabilityLevel component.StabilityLevel + logsToMetricsStabilityLevel component.StabilityLevel + logsToLogsStabilityLevel component.StabilityLevel +} + +// Type returns the type of component. +func (f *factory) Type() component.Type { + return f.cfgType +} + +func (f *factory) unexportedFactoryFunc() {} + +func (f *factory) TracesToTracesStability() component.StabilityLevel { return f.tracesToTracesStabilityLevel } -func (f factory) TracesToMetricsStability() component.StabilityLevel { +func (f *factory) TracesToMetricsStability() component.StabilityLevel { return f.tracesToMetricsStabilityLevel } -func (f factory) TracesToLogsStability() component.StabilityLevel { +func (f *factory) TracesToLogsStability() component.StabilityLevel { return f.tracesToLogsStabilityLevel } -func (f factory) MetricsToTracesStability() component.StabilityLevel { +func (f *factory) MetricsToTracesStability() component.StabilityLevel { return f.metricsToTracesStabilityLevel } -func (f factory) MetricsToMetricsStability() component.StabilityLevel { +func (f *factory) MetricsToMetricsStability() component.StabilityLevel { return f.metricsToMetricsStabilityLevel } -func (f factory) MetricsToLogsStability() component.StabilityLevel { +func (f *factory) MetricsToLogsStability() component.StabilityLevel { return f.metricsToLogsStabilityLevel } -func (f factory) LogsToTracesStability() component.StabilityLevel { +func (f *factory) LogsToTracesStability() component.StabilityLevel { return f.logsToTracesStabilityLevel } -func (f factory) LogsToMetricsStability() component.StabilityLevel { +func (f *factory) LogsToMetricsStability() component.StabilityLevel { return f.logsToMetricsStabilityLevel } -func (f factory) LogsToLogsStability() component.StabilityLevel { +func (f *factory) LogsToLogsStability() component.StabilityLevel { return f.logsToLogsStabilityLevel } @@ -447,209 +398,3 @@ func MakeFactoryMap(factories ...Factory) (map[component.Type]Factory, error) { } return fMap, nil } - -// Builder processor is a helper struct that given a set of Configs and Factories helps with creating processors. -type Builder struct { - cfgs map[component.ID]component.Config - factories map[component.Type]Factory -} - -// NewBuilder creates a new connector.Builder to help with creating components form a set of configs and factories. -func NewBuilder(cfgs map[component.ID]component.Config, factories map[component.Type]Factory) *Builder { - return &Builder{cfgs: cfgs, factories: factories} -} - -// CreateTracesToTraces creates a Traces connector based on the settings and config. -func (b *Builder) CreateTracesToTraces(ctx context.Context, set CreateSettings, next consumer.Traces) (Traces, error) { - if next == nil { - return nil, errNilNextConsumer - } - cfg, existsCfg := b.cfgs[set.ID] - if !existsCfg { - return nil, fmt.Errorf("connector %q is not configured", set.ID) - } - - f, existsFactory := b.factories[set.ID.Type()] - if !existsFactory { - return nil, fmt.Errorf("connector factory not available for: %q", set.ID) - } - - logStabilityLevel(set.Logger, f.TracesToTracesStability()) - return f.CreateTracesToTraces(ctx, set, cfg, next) -} - -// CreateTracesToMetrics creates a Traces connector based on the settings and config. -func (b *Builder) CreateTracesToMetrics(ctx context.Context, set CreateSettings, next consumer.Metrics) (Traces, error) { - if next == nil { - return nil, errNilNextConsumer - } - cfg, existsCfg := b.cfgs[set.ID] - if !existsCfg { - return nil, fmt.Errorf("connector %q is not configured", set.ID) - } - - f, existsFactory := b.factories[set.ID.Type()] - if !existsFactory { - return nil, fmt.Errorf("connector factory not available for: %q", set.ID) - } - - logStabilityLevel(set.Logger, f.TracesToMetricsStability()) - return f.CreateTracesToMetrics(ctx, set, cfg, next) -} - -// CreateTracesToLogs creates a Traces connector based on the settings and config. -func (b *Builder) CreateTracesToLogs(ctx context.Context, set CreateSettings, next consumer.Logs) (Traces, error) { - if next == nil { - return nil, errNilNextConsumer - } - cfg, existsCfg := b.cfgs[set.ID] - if !existsCfg { - return nil, fmt.Errorf("connector %q is not configured", set.ID) - } - - f, existsFactory := b.factories[set.ID.Type()] - if !existsFactory { - return nil, fmt.Errorf("connector factory not available for: %q", set.ID) - } - - logStabilityLevel(set.Logger, f.TracesToLogsStability()) - return f.CreateTracesToLogs(ctx, set, cfg, next) -} - -// CreateMetricsToTraces creates a Metrics connector based on the settings and config. -func (b *Builder) CreateMetricsToTraces(ctx context.Context, set CreateSettings, next consumer.Traces) (Metrics, error) { - if next == nil { - return nil, errNilNextConsumer - } - cfg, existsCfg := b.cfgs[set.ID] - if !existsCfg { - return nil, fmt.Errorf("connector %q is not configured", set.ID) - } - - f, existsFactory := b.factories[set.ID.Type()] - if !existsFactory { - return nil, fmt.Errorf("connector factory not available for: %q", set.ID) - } - - logStabilityLevel(set.Logger, f.MetricsToTracesStability()) - return f.CreateMetricsToTraces(ctx, set, cfg, next) -} - -// CreateMetricsToMetrics creates a Metrics connector based on the settings and config. -func (b *Builder) CreateMetricsToMetrics(ctx context.Context, set CreateSettings, next consumer.Metrics) (Metrics, error) { - if next == nil { - return nil, errNilNextConsumer - } - cfg, existsCfg := b.cfgs[set.ID] - if !existsCfg { - return nil, fmt.Errorf("connector %q is not configured", set.ID) - } - - f, existsFactory := b.factories[set.ID.Type()] - if !existsFactory { - return nil, fmt.Errorf("connector factory not available for: %q", set.ID) - } - - logStabilityLevel(set.Logger, f.MetricsToMetricsStability()) - return f.CreateMetricsToMetrics(ctx, set, cfg, next) -} - -// CreateMetricsToLogs creates a Metrics connector based on the settings and config. -func (b *Builder) CreateMetricsToLogs(ctx context.Context, set CreateSettings, next consumer.Logs) (Metrics, error) { - if next == nil { - return nil, errNilNextConsumer - } - cfg, existsCfg := b.cfgs[set.ID] - if !existsCfg { - return nil, fmt.Errorf("connector %q is not configured", set.ID) - } - - f, existsFactory := b.factories[set.ID.Type()] - if !existsFactory { - return nil, fmt.Errorf("connector factory not available for: %q", set.ID) - } - - logStabilityLevel(set.Logger, f.MetricsToLogsStability()) - return f.CreateMetricsToLogs(ctx, set, cfg, next) -} - -// CreateLogsToTraces creates a Logs connector based on the settings and config. -func (b *Builder) CreateLogsToTraces(ctx context.Context, set CreateSettings, next consumer.Traces) (Logs, error) { - if next == nil { - return nil, errNilNextConsumer - } - cfg, existsCfg := b.cfgs[set.ID] - if !existsCfg { - return nil, fmt.Errorf("connector %q is not configured", set.ID) - } - - f, existsFactory := b.factories[set.ID.Type()] - if !existsFactory { - return nil, fmt.Errorf("connector factory not available for: %q", set.ID) - } - - logStabilityLevel(set.Logger, f.LogsToTracesStability()) - return f.CreateLogsToTraces(ctx, set, cfg, next) -} - -// CreateLogsToMetrics creates a Logs connector based on the settings and config. -func (b *Builder) CreateLogsToMetrics(ctx context.Context, set CreateSettings, next consumer.Metrics) (Logs, error) { - if next == nil { - return nil, errNilNextConsumer - } - cfg, existsCfg := b.cfgs[set.ID] - if !existsCfg { - return nil, fmt.Errorf("connector %q is not configured", set.ID) - } - - f, existsFactory := b.factories[set.ID.Type()] - if !existsFactory { - return nil, fmt.Errorf("connector factory not available for: %q", set.ID) - } - - logStabilityLevel(set.Logger, f.LogsToMetricsStability()) - return f.CreateLogsToMetrics(ctx, set, cfg, next) -} - -// CreateLogsToLogs creates a Logs connector based on the settings and config. -func (b *Builder) CreateLogsToLogs(ctx context.Context, set CreateSettings, next consumer.Logs) (Logs, error) { - if next == nil { - return nil, errNilNextConsumer - } - cfg, existsCfg := b.cfgs[set.ID] - if !existsCfg { - return nil, fmt.Errorf("connector %q is not configured", set.ID) - } - - f, existsFactory := b.factories[set.ID.Type()] - if !existsFactory { - return nil, fmt.Errorf("connector factory not available for: %q", set.ID) - } - - logStabilityLevel(set.Logger, f.LogsToLogsStability()) - return f.CreateLogsToLogs(ctx, set, cfg, next) -} - -func (b *Builder) IsConfigured(componentID component.ID) bool { - _, ok := b.cfgs[componentID] - return ok -} - -func (b *Builder) Factory(componentType component.Type) component.Factory { - return b.factories[componentType] -} - -// logStabilityLevel logs the stability level of a component. The log level is set to info for -// undefined, unmaintained, deprecated and development. The log level is set to debug -// for alpha, beta and stable. -func logStabilityLevel(logger *zap.Logger, sl component.StabilityLevel) { - if sl >= component.StabilityLevelAlpha { - logger.Debug(sl.LogMessage()) - } else { - logger.Info(sl.LogMessage()) - } -} - -func errDataTypes(id component.ID, from, to component.DataType) error { - return fmt.Errorf("connector %q cannot connect from %s to %s: %w", id, from, to, component.ErrDataTypeIsNotSupported) -} diff --git a/vendor/go.opentelemetry.io/collector/connector/connectortest/LICENSE b/vendor/go.opentelemetry.io/collector/connector/connectortest/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/connector/connectortest/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/connector/connectortest/Makefile b/vendor/go.opentelemetry.io/collector/connector/connectortest/Makefile new file mode 100644 index 00000000000..ded7a36092d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/connector/connectortest/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/connector/connectortest/connector.go b/vendor/go.opentelemetry.io/collector/connector/connectortest/connector.go new file mode 100644 index 00000000000..fbc8febe94f --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/connector/connectortest/connector.go @@ -0,0 +1,128 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package connectortest // import "go.opentelemetry.io/collector/connector/connectortest" + +import ( + "context" + + "github.com/google/uuid" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/connector" + "go.opentelemetry.io/collector/connector/xconnector" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/consumer/xconsumer" +) + +var nopType = component.MustNewType("nop") + +// NewNopSettings returns a new nop settings for Create* functions. +func NewNopSettings() connector.Settings { + return connector.Settings{ + ID: component.NewIDWithName(nopType, uuid.NewString()), + TelemetrySettings: componenttest.NewNopTelemetrySettings(), + BuildInfo: component.NewDefaultBuildInfo(), + } +} + +type nopConfig struct{} + +// NewNopFactory returns a connector.Factory that constructs nop processors. +func NewNopFactory() connector.Factory { + return xconnector.NewFactory( + nopType, + func() component.Config { + return &nopConfig{} + }, + xconnector.WithTracesToTraces(createTracesToTracesConnector, component.StabilityLevelDevelopment), + xconnector.WithTracesToMetrics(createTracesToMetricsConnector, component.StabilityLevelDevelopment), + xconnector.WithTracesToLogs(createTracesToLogsConnector, component.StabilityLevelDevelopment), + xconnector.WithTracesToProfiles(createTracesToProfilesConnector, component.StabilityLevelAlpha), + xconnector.WithMetricsToTraces(createMetricsToTracesConnector, component.StabilityLevelDevelopment), + xconnector.WithMetricsToMetrics(createMetricsToMetricsConnector, component.StabilityLevelDevelopment), + xconnector.WithMetricsToLogs(createMetricsToLogsConnector, component.StabilityLevelDevelopment), + xconnector.WithMetricsToProfiles(createMetricsToProfilesConnector, component.StabilityLevelAlpha), + xconnector.WithLogsToTraces(createLogsToTracesConnector, component.StabilityLevelDevelopment), + xconnector.WithLogsToMetrics(createLogsToMetricsConnector, component.StabilityLevelDevelopment), + xconnector.WithLogsToLogs(createLogsToLogsConnector, component.StabilityLevelDevelopment), + xconnector.WithLogsToProfiles(createLogsToProfilesConnector, component.StabilityLevelAlpha), + xconnector.WithProfilesToTraces(createProfilesToTracesConnector, component.StabilityLevelAlpha), + xconnector.WithProfilesToMetrics(createProfilesToMetricsConnector, component.StabilityLevelAlpha), + xconnector.WithProfilesToLogs(createProfilesToLogsConnector, component.StabilityLevelAlpha), + xconnector.WithProfilesToProfiles(createProfilesToProfilesConnector, component.StabilityLevelAlpha), + ) +} + +func createTracesToTracesConnector(context.Context, connector.Settings, component.Config, consumer.Traces) (connector.Traces, error) { + return &nopConnector{Consumer: consumertest.NewNop()}, nil +} + +func createTracesToMetricsConnector(context.Context, connector.Settings, component.Config, consumer.Metrics) (connector.Traces, error) { + return &nopConnector{Consumer: consumertest.NewNop()}, nil +} + +func createTracesToLogsConnector(context.Context, connector.Settings, component.Config, consumer.Logs) (connector.Traces, error) { + return &nopConnector{Consumer: consumertest.NewNop()}, nil +} + +func createTracesToProfilesConnector(context.Context, connector.Settings, component.Config, xconsumer.Profiles) (connector.Traces, error) { + return &nopConnector{Consumer: consumertest.NewNop()}, nil +} + +func createMetricsToTracesConnector(context.Context, connector.Settings, component.Config, consumer.Traces) (connector.Metrics, error) { + return &nopConnector{Consumer: consumertest.NewNop()}, nil +} + +func createMetricsToMetricsConnector(context.Context, connector.Settings, component.Config, consumer.Metrics) (connector.Metrics, error) { + return &nopConnector{Consumer: consumertest.NewNop()}, nil +} + +func createMetricsToLogsConnector(context.Context, connector.Settings, component.Config, consumer.Logs) (connector.Metrics, error) { + return &nopConnector{Consumer: consumertest.NewNop()}, nil +} + +func createMetricsToProfilesConnector(context.Context, connector.Settings, component.Config, xconsumer.Profiles) (connector.Metrics, error) { + return &nopConnector{Consumer: consumertest.NewNop()}, nil +} + +func createLogsToTracesConnector(context.Context, connector.Settings, component.Config, consumer.Traces) (connector.Logs, error) { + return &nopConnector{Consumer: consumertest.NewNop()}, nil +} + +func createLogsToMetricsConnector(context.Context, connector.Settings, component.Config, consumer.Metrics) (connector.Logs, error) { + return &nopConnector{Consumer: consumertest.NewNop()}, nil +} + +func createLogsToLogsConnector(context.Context, connector.Settings, component.Config, consumer.Logs) (connector.Logs, error) { + return &nopConnector{Consumer: consumertest.NewNop()}, nil +} + +func createLogsToProfilesConnector(context.Context, connector.Settings, component.Config, xconsumer.Profiles) (connector.Logs, error) { + return &nopConnector{Consumer: consumertest.NewNop()}, nil +} + +func createProfilesToTracesConnector(context.Context, connector.Settings, component.Config, consumer.Traces) (xconnector.Profiles, error) { + return &nopConnector{Consumer: consumertest.NewNop()}, nil +} + +func createProfilesToMetricsConnector(context.Context, connector.Settings, component.Config, consumer.Metrics) (xconnector.Profiles, error) { + return &nopConnector{Consumer: consumertest.NewNop()}, nil +} + +func createProfilesToLogsConnector(context.Context, connector.Settings, component.Config, consumer.Logs) (xconnector.Profiles, error) { + return &nopConnector{Consumer: consumertest.NewNop()}, nil +} + +func createProfilesToProfilesConnector(context.Context, connector.Settings, component.Config, xconsumer.Profiles) (xconnector.Profiles, error) { + return &nopConnector{Consumer: consumertest.NewNop()}, nil +} + +// nopConnector stores consumed traces and metrics for testing purposes. +type nopConnector struct { + component.StartFunc + component.ShutdownFunc + consumertest.Consumer +} diff --git a/vendor/go.opentelemetry.io/collector/connector/internal/factory.go b/vendor/go.opentelemetry.io/collector/connector/internal/factory.go new file mode 100644 index 00000000000..b865f6311cb --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/connector/internal/factory.go @@ -0,0 +1,15 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/connector/internal" + +import ( + "fmt" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/pipeline" +) + +func ErrDataTypes(id component.ID, from, to pipeline.Signal) error { + return fmt.Errorf("connector %q cannot connect from %s to %s: %w", id, from, to, pipeline.ErrSignalNotSupported) +} diff --git a/vendor/go.opentelemetry.io/collector/connector/internal/router.go b/vendor/go.opentelemetry.io/collector/connector/internal/router.go new file mode 100644 index 00000000000..e951abd134f --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/connector/internal/router.go @@ -0,0 +1,56 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/connector/internal" + +import ( + "errors" + "fmt" + + "go.uber.org/multierr" + + "go.opentelemetry.io/collector/pipeline" +) + +type BaseRouter[T any] struct { + fanout func([]T) T + Consumers map[pipeline.ID]T +} + +func NewBaseRouter[T any](fanout func([]T) T, cm map[pipeline.ID]T) BaseRouter[T] { + consumers := make(map[pipeline.ID]T, len(cm)) + for k, v := range cm { + consumers[k] = v + } + return BaseRouter[T]{fanout: fanout, Consumers: consumers} +} + +func (r *BaseRouter[T]) PipelineIDs() []pipeline.ID { + ids := make([]pipeline.ID, 0, len(r.Consumers)) + for id := range r.Consumers { + ids = append(ids, id) + } + return ids +} + +func (r *BaseRouter[T]) Consumer(pipelineIDs ...pipeline.ID) (T, error) { + var ret T + if len(pipelineIDs) == 0 { + return ret, errors.New("missing consumers") + } + consumers := make([]T, 0, len(pipelineIDs)) + var errors error + for _, pipelineID := range pipelineIDs { + c, ok := r.Consumers[pipelineID] + if ok { + consumers = append(consumers, c) + } else { + errors = multierr.Append(errors, fmt.Errorf("missing consumer: %q", pipelineID)) + } + } + if errors != nil { + // TODO potentially this could return a NewTraces with the valid consumers + return ret, errors + } + return r.fanout(consumers), nil +} diff --git a/vendor/go.opentelemetry.io/collector/connector/logs_router.go b/vendor/go.opentelemetry.io/collector/connector/logs_router.go index 0db9ea7799d..a2d74de29a3 100644 --- a/vendor/go.opentelemetry.io/collector/connector/logs_router.go +++ b/vendor/go.opentelemetry.io/collector/connector/logs_router.go @@ -4,55 +4,57 @@ package connector // import "go.opentelemetry.io/collector/connector" import ( + "errors" "fmt" "go.uber.org/multierr" - "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/connector/internal" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/internal/fanoutconsumer" + "go.opentelemetry.io/collector/pipeline" ) // LogsRouterAndConsumer feeds the first consumer.Logs in each of the specified pipelines. type LogsRouterAndConsumer interface { consumer.Logs - Consumer(...component.ID) (consumer.Logs, error) - PipelineIDs() []component.ID + Consumer(...pipeline.ID) (consumer.Logs, error) + PipelineIDs() []pipeline.ID privateFunc() } type logsRouter struct { consumer.Logs - baseRouter[consumer.Logs] + internal.BaseRouter[consumer.Logs] } -func NewLogsRouter(cm map[component.ID]consumer.Logs) LogsRouterAndConsumer { +func NewLogsRouter(cm map[pipeline.ID]consumer.Logs) LogsRouterAndConsumer { consumers := make([]consumer.Logs, 0, len(cm)) for _, cons := range cm { consumers = append(consumers, cons) } return &logsRouter{ Logs: fanoutconsumer.NewLogs(consumers), - baseRouter: newBaseRouter(fanoutconsumer.NewLogs, cm), + BaseRouter: internal.NewBaseRouter(fanoutconsumer.NewLogs, cm), } } -func (r *logsRouter) PipelineIDs() []component.ID { - ids := make([]component.ID, 0, len(r.consumers)) - for id := range r.consumers { +func (r *logsRouter) PipelineIDs() []pipeline.ID { + ids := make([]pipeline.ID, 0, len(r.Consumers)) + for id := range r.Consumers { ids = append(ids, id) } return ids } -func (r *logsRouter) Consumer(pipelineIDs ...component.ID) (consumer.Logs, error) { +func (r *logsRouter) Consumer(pipelineIDs ...pipeline.ID) (consumer.Logs, error) { if len(pipelineIDs) == 0 { - return nil, fmt.Errorf("missing consumers") + return nil, errors.New("missing consumers") } consumers := make([]consumer.Logs, 0, len(pipelineIDs)) var errors error for _, pipelineID := range pipelineIDs { - c, ok := r.consumers[pipelineID] + c, ok := r.Consumers[pipelineID] if ok { consumers = append(consumers, c) } else { diff --git a/vendor/go.opentelemetry.io/collector/connector/metrics_router.go b/vendor/go.opentelemetry.io/collector/connector/metrics_router.go index 3e688261bfe..a45a6397fe0 100644 --- a/vendor/go.opentelemetry.io/collector/connector/metrics_router.go +++ b/vendor/go.opentelemetry.io/collector/connector/metrics_router.go @@ -4,32 +4,33 @@ package connector // import "go.opentelemetry.io/collector/connector" import ( - "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/connector/internal" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/internal/fanoutconsumer" + "go.opentelemetry.io/collector/pipeline" ) // MetricsRouterAndConsumer feeds the first consumer.Metrics in each of the specified pipelines. type MetricsRouterAndConsumer interface { consumer.Metrics - Consumer(...component.ID) (consumer.Metrics, error) - PipelineIDs() []component.ID + Consumer(...pipeline.ID) (consumer.Metrics, error) + PipelineIDs() []pipeline.ID privateFunc() } type metricsRouter struct { consumer.Metrics - baseRouter[consumer.Metrics] + internal.BaseRouter[consumer.Metrics] } -func NewMetricsRouter(cm map[component.ID]consumer.Metrics) MetricsRouterAndConsumer { +func NewMetricsRouter(cm map[pipeline.ID]consumer.Metrics) MetricsRouterAndConsumer { consumers := make([]consumer.Metrics, 0, len(cm)) for _, cons := range cm { consumers = append(consumers, cons) } return &metricsRouter{ Metrics: fanoutconsumer.NewMetrics(consumers), - baseRouter: newBaseRouter(fanoutconsumer.NewMetrics, cm), + BaseRouter: internal.NewBaseRouter(fanoutconsumer.NewMetrics, cm), } } diff --git a/vendor/go.opentelemetry.io/collector/connector/router.go b/vendor/go.opentelemetry.io/collector/connector/router.go deleted file mode 100644 index bba7ee76bba..00000000000 --- a/vendor/go.opentelemetry.io/collector/connector/router.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package connector // import "go.opentelemetry.io/collector/connector" - -import ( - "fmt" - - "go.uber.org/multierr" - - "go.opentelemetry.io/collector/component" -) - -type baseRouter[T any] struct { - fanout func([]T) T - consumers map[component.ID]T -} - -func newBaseRouter[T any](fanout func([]T) T, cm map[component.ID]T) baseRouter[T] { - consumers := make(map[component.ID]T, len(cm)) - for k, v := range cm { - consumers[k] = v - } - return baseRouter[T]{fanout: fanout, consumers: consumers} -} - -func (r *baseRouter[T]) PipelineIDs() []component.ID { - ids := make([]component.ID, 0, len(r.consumers)) - for id := range r.consumers { - ids = append(ids, id) - } - return ids -} - -func (r *baseRouter[T]) Consumer(pipelineIDs ...component.ID) (T, error) { - var ret T - if len(pipelineIDs) == 0 { - return ret, fmt.Errorf("missing consumers") - } - consumers := make([]T, 0, len(pipelineIDs)) - var errors error - for _, pipelineID := range pipelineIDs { - c, ok := r.consumers[pipelineID] - if ok { - consumers = append(consumers, c) - } else { - errors = multierr.Append(errors, fmt.Errorf("missing consumer: %q", pipelineID)) - } - } - if errors != nil { - // TODO potentially this could return a NewTraces with the valid consumers - return ret, errors - } - return r.fanout(consumers), nil -} diff --git a/vendor/go.opentelemetry.io/collector/connector/traces_router.go b/vendor/go.opentelemetry.io/collector/connector/traces_router.go index 84eb889c05a..5622b78bf94 100644 --- a/vendor/go.opentelemetry.io/collector/connector/traces_router.go +++ b/vendor/go.opentelemetry.io/collector/connector/traces_router.go @@ -4,32 +4,33 @@ package connector // import "go.opentelemetry.io/collector/connector" import ( - "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/connector/internal" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/internal/fanoutconsumer" + "go.opentelemetry.io/collector/pipeline" ) // TracesRouterAndConsumer feeds the first consumer.Traces in each of the specified pipelines. type TracesRouterAndConsumer interface { consumer.Traces - Consumer(...component.ID) (consumer.Traces, error) - PipelineIDs() []component.ID + Consumer(...pipeline.ID) (consumer.Traces, error) + PipelineIDs() []pipeline.ID privateFunc() } type tracesRouter struct { consumer.Traces - baseRouter[consumer.Traces] + internal.BaseRouter[consumer.Traces] } -func NewTracesRouter(cm map[component.ID]consumer.Traces) TracesRouterAndConsumer { +func NewTracesRouter(cm map[pipeline.ID]consumer.Traces) TracesRouterAndConsumer { consumers := make([]consumer.Traces, 0, len(cm)) for _, cons := range cm { consumers = append(consumers, cons) } return &tracesRouter{ Traces: fanoutconsumer.NewTraces(consumers), - baseRouter: newBaseRouter(fanoutconsumer.NewTraces, cm), + BaseRouter: internal.NewBaseRouter(fanoutconsumer.NewTraces, cm), } } diff --git a/vendor/go.opentelemetry.io/collector/connector/xconnector/LICENSE b/vendor/go.opentelemetry.io/collector/connector/xconnector/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/connector/xconnector/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/connector/xconnector/Makefile b/vendor/go.opentelemetry.io/collector/connector/xconnector/Makefile new file mode 100644 index 00000000000..ded7a36092d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/connector/xconnector/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/connector/xconnector/connector.go b/vendor/go.opentelemetry.io/collector/connector/xconnector/connector.go new file mode 100644 index 00000000000..d697e06a8e4 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/connector/xconnector/connector.go @@ -0,0 +1,331 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package xconnector // import "go.opentelemetry.io/collector/connector/xconnector" + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/connector" + "go.opentelemetry.io/collector/connector/internal" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/xconsumer" + "go.opentelemetry.io/collector/pipeline" + "go.opentelemetry.io/collector/pipeline/xpipeline" +) + +type Factory interface { + connector.Factory + + CreateTracesToProfiles(ctx context.Context, set connector.Settings, cfg component.Config, next xconsumer.Profiles) (connector.Traces, error) + CreateMetricsToProfiles(ctx context.Context, set connector.Settings, cfg component.Config, next xconsumer.Profiles) (connector.Metrics, error) + CreateLogsToProfiles(ctx context.Context, set connector.Settings, cfg component.Config, next xconsumer.Profiles) (connector.Logs, error) + + TracesToProfilesStability() component.StabilityLevel + MetricsToProfilesStability() component.StabilityLevel + LogsToProfilesStability() component.StabilityLevel + + CreateProfilesToProfiles(ctx context.Context, set connector.Settings, cfg component.Config, next xconsumer.Profiles) (Profiles, error) + CreateProfilesToTraces(ctx context.Context, set connector.Settings, cfg component.Config, next consumer.Traces) (Profiles, error) + CreateProfilesToMetrics(ctx context.Context, set connector.Settings, cfg component.Config, next consumer.Metrics) (Profiles, error) + CreateProfilesToLogs(ctx context.Context, set connector.Settings, cfg component.Config, next consumer.Logs) (Profiles, error) + + ProfilesToProfilesStability() component.StabilityLevel + ProfilesToTracesStability() component.StabilityLevel + ProfilesToMetricsStability() component.StabilityLevel + ProfilesToLogsStability() component.StabilityLevel +} + +// A Profiles connector acts as an exporter from a profiles pipeline and a receiver +// to one or more traces, metrics, logs, or profiles pipelines. +// Profiles feeds a consumer.Traces, consumer.Metrics, consumer.Logs, or xconsumer.Profiles with data. +// +// Examples: +// - Profiles could be collected in one pipeline and routed to another profiles pipeline +// based on criteria such as attributes or other content of the profile. The second +// pipeline can then process and export the profile to the appropriate backend. +// - Profiles could be summarized by a metrics connector that emits statistics describing +// the number of profiles observed. +// - Profiles could be analyzed by a logs connector that emits events when particular +// criteria are met. +type Profiles interface { + component.Component + xconsumer.Profiles +} + +// CreateTracesToProfilesFunc is the equivalent of Factory.CreateTracesToProfiles(). +type CreateTracesToProfilesFunc func(context.Context, connector.Settings, component.Config, xconsumer.Profiles) (connector.Traces, error) + +// CreateTracesToProfiles implements Factory.CreateTracesToProfiles(). +func (f CreateTracesToProfilesFunc) CreateTracesToProfiles(ctx context.Context, set connector.Settings, cfg component.Config, next xconsumer.Profiles) (connector.Traces, error) { + if f == nil { + return nil, internal.ErrDataTypes(set.ID, pipeline.SignalTraces, xpipeline.SignalProfiles) + } + return f(ctx, set, cfg, next) +} + +// CreateMetricsToProfilesFunc is the equivalent of Factory.CreateMetricsToProfiles(). +type CreateMetricsToProfilesFunc func(context.Context, connector.Settings, component.Config, xconsumer.Profiles) (connector.Metrics, error) + +// CreateMetricsToProfiles implements Factory.CreateMetricsToProfiles(). +func (f CreateMetricsToProfilesFunc) CreateMetricsToProfiles(ctx context.Context, set connector.Settings, cfg component.Config, next xconsumer.Profiles) (connector.Metrics, error) { + if f == nil { + return nil, internal.ErrDataTypes(set.ID, pipeline.SignalMetrics, xpipeline.SignalProfiles) + } + return f(ctx, set, cfg, next) +} + +// CreateLogsToProfilesFunc is the equivalent of Factory.CreateLogsToProfiles(). +type CreateLogsToProfilesFunc func(context.Context, connector.Settings, component.Config, xconsumer.Profiles) (connector.Logs, error) + +// CreateLogsToProfiles implements Factory.CreateLogsToProfiles(). +func (f CreateLogsToProfilesFunc) CreateLogsToProfiles(ctx context.Context, set connector.Settings, cfg component.Config, next xconsumer.Profiles) (connector.Logs, error) { + if f == nil { + return nil, internal.ErrDataTypes(set.ID, pipeline.SignalLogs, xpipeline.SignalProfiles) + } + return f(ctx, set, cfg, next) +} + +// CreateProfilesToProfilesFunc is the equivalent of Factory.CreateProfilesToProfiles(). +type CreateProfilesToProfilesFunc func(context.Context, connector.Settings, component.Config, xconsumer.Profiles) (Profiles, error) + +// CreateProfilesToProfiles implements Factory.CreateProfilesToProfiles(). +func (f CreateProfilesToProfilesFunc) CreateProfilesToProfiles(ctx context.Context, set connector.Settings, cfg component.Config, next xconsumer.Profiles) (Profiles, error) { + if f == nil { + return nil, internal.ErrDataTypes(set.ID, xpipeline.SignalProfiles, xpipeline.SignalProfiles) + } + return f(ctx, set, cfg, next) +} + +// CreateProfilesToTracesFunc is the equivalent of Factory.CreateProfilesToTraces(). +type CreateProfilesToTracesFunc func(context.Context, connector.Settings, component.Config, consumer.Traces) (Profiles, error) + +// CreateProfilesToTraces implements Factory.CreateProfilesToTraces(). +func (f CreateProfilesToTracesFunc) CreateProfilesToTraces(ctx context.Context, set connector.Settings, cfg component.Config, next consumer.Traces) (Profiles, error) { + if f == nil { + return nil, internal.ErrDataTypes(set.ID, xpipeline.SignalProfiles, pipeline.SignalTraces) + } + return f(ctx, set, cfg, next) +} + +// CreateProfilesToMetricsFunc is the equivalent of Factory.CreateProfilesToMetrics(). +type CreateProfilesToMetricsFunc func(context.Context, connector.Settings, component.Config, consumer.Metrics) (Profiles, error) + +// CreateProfilesToMetrics implements Factory.CreateProfilesToMetrics(). +func (f CreateProfilesToMetricsFunc) CreateProfilesToMetrics(ctx context.Context, set connector.Settings, cfg component.Config, next consumer.Metrics) (Profiles, error) { + if f == nil { + return nil, internal.ErrDataTypes(set.ID, xpipeline.SignalProfiles, pipeline.SignalMetrics) + } + return f(ctx, set, cfg, next) +} + +// CreateProfilesToLogsFunc is the equivalent of Factory.CreateProfilesToLogs(). +type CreateProfilesToLogsFunc func(context.Context, connector.Settings, component.Config, consumer.Logs) (Profiles, error) + +// CreateProfilesToLogs implements Factory.CreateProfilesToLogs(). +func (f CreateProfilesToLogsFunc) CreateProfilesToLogs(ctx context.Context, set connector.Settings, cfg component.Config, next consumer.Logs) (Profiles, error) { + if f == nil { + return nil, internal.ErrDataTypes(set.ID, xpipeline.SignalProfiles, pipeline.SignalLogs) + } + return f(ctx, set, cfg, next) +} + +// FactoryOption apply changes to ReceiverOptions. +type FactoryOption interface { + // applyOption applies the option. + applyOption(o *factoryOpts) +} + +// factoryOptionFunc is an ReceiverFactoryOption created through a function. +type factoryOptionFunc func(*factoryOpts) + +func (f factoryOptionFunc) applyOption(o *factoryOpts) { + f(o) +} + +type factoryOpts struct { + opts []connector.FactoryOption + + *factory +} + +// WithTracesToTraces overrides the default "error not supported" implementation for WithTracesToTraces and the default "undefined" stability level. +func WithTracesToTraces(createTracesToTraces connector.CreateTracesToTracesFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.opts = append(o.opts, connector.WithTracesToTraces(createTracesToTraces, sl)) + }) +} + +// WithTracesToMetrics overrides the default "error not supported" implementation for WithTracesToMetrics and the default "undefined" stability level. +func WithTracesToMetrics(createTracesToMetrics connector.CreateTracesToMetricsFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.opts = append(o.opts, connector.WithTracesToMetrics(createTracesToMetrics, sl)) + }) +} + +// WithTracesToLogs overrides the default "error not supported" implementation for WithTracesToLogs and the default "undefined" stability level. +func WithTracesToLogs(createTracesToLogs connector.CreateTracesToLogsFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.opts = append(o.opts, connector.WithTracesToLogs(createTracesToLogs, sl)) + }) +} + +// WithMetricsToTraces overrides the default "error not supported" implementation for WithMetricsToTraces and the default "undefined" stability level. +func WithMetricsToTraces(createMetricsToTraces connector.CreateMetricsToTracesFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.opts = append(o.opts, connector.WithMetricsToTraces(createMetricsToTraces, sl)) + }) +} + +// WithMetricsToMetrics overrides the default "error not supported" implementation for WithMetricsToMetrics and the default "undefined" stability level. +func WithMetricsToMetrics(createMetricsToMetrics connector.CreateMetricsToMetricsFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.opts = append(o.opts, connector.WithMetricsToMetrics(createMetricsToMetrics, sl)) + }) +} + +// WithMetricsToLogs overrides the default "error not supported" implementation for WithMetricsToLogs and the default "undefined" stability level. +func WithMetricsToLogs(createMetricsToLogs connector.CreateMetricsToLogsFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.opts = append(o.opts, connector.WithMetricsToLogs(createMetricsToLogs, sl)) + }) +} + +// WithLogsToTraces overrides the default "error not supported" implementation for WithLogsToTraces and the default "undefined" stability level. +func WithLogsToTraces(createLogsToTraces connector.CreateLogsToTracesFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.opts = append(o.opts, connector.WithLogsToTraces(createLogsToTraces, sl)) + }) +} + +// WithLogsToMetrics overrides the default "error not supported" implementation for WithLogsToMetrics and the default "undefined" stability level. +func WithLogsToMetrics(createLogsToMetrics connector.CreateLogsToMetricsFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.opts = append(o.opts, connector.WithLogsToMetrics(createLogsToMetrics, sl)) + }) +} + +// WithLogsToLogs overrides the default "error not supported" implementation for WithLogsToLogs and the default "undefined" stability level. +func WithLogsToLogs(createLogsToLogs connector.CreateLogsToLogsFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.opts = append(o.opts, connector.WithLogsToLogs(createLogsToLogs, sl)) + }) +} + +// WithTracesToProfiles overrides the default "error not supported" implementation for WithTracesToProfiles and the default "undefined" stability level. +func WithTracesToProfiles(createTracesToProfiles CreateTracesToProfilesFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.tracesToProfilesStabilityLevel = sl + o.CreateTracesToProfilesFunc = createTracesToProfiles + }) +} + +// WithMetricsToProfiles overrides the default "error not supported" implementation for WithMetricsToProfiles and the default "undefined" stability level. +func WithMetricsToProfiles(createMetricsToProfiles CreateMetricsToProfilesFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.metricsToProfilesStabilityLevel = sl + o.CreateMetricsToProfilesFunc = createMetricsToProfiles + }) +} + +// WithLogsToProfiles overrides the default "error not supported" implementation for WithLogsToProfiles and the default "undefined" stability level. +func WithLogsToProfiles(createLogsToProfiles CreateLogsToProfilesFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.logsToProfilesStabilityLevel = sl + o.CreateLogsToProfilesFunc = createLogsToProfiles + }) +} + +// WithProfilesToProfiles overrides the default "error not supported" implementation for WithProfilesToProfiles and the default "undefined" stability level. +func WithProfilesToProfiles(createProfilesToProfiles CreateProfilesToProfilesFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.profilesToProfilesStabilityLevel = sl + o.CreateProfilesToProfilesFunc = createProfilesToProfiles + }) +} + +// WithProfilesToTraces overrides the default "error not supported" implementation for WithProfilesToTraces and the default "undefined" stability level. +func WithProfilesToTraces(createProfilesToTraces CreateProfilesToTracesFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.profilesToTracesStabilityLevel = sl + o.CreateProfilesToTracesFunc = createProfilesToTraces + }) +} + +// WithProfilesToMetrics overrides the default "error not supported" implementation for WithProfilesToMetrics and the default "undefined" stability level. +func WithProfilesToMetrics(createProfilesToMetrics CreateProfilesToMetricsFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.profilesToMetricsStabilityLevel = sl + o.CreateProfilesToMetricsFunc = createProfilesToMetrics + }) +} + +// WithProfilesToLogs overrides the default "error not supported" implementation for WithProfilesToLogs and the default "undefined" stability level. +func WithProfilesToLogs(createProfilesToLogs CreateProfilesToLogsFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.profilesToLogsStabilityLevel = sl + o.CreateProfilesToLogsFunc = createProfilesToLogs + }) +} + +// factory implements the Factory interface. +type factory struct { + connector.Factory + + CreateTracesToProfilesFunc + CreateMetricsToProfilesFunc + CreateLogsToProfilesFunc + + CreateProfilesToProfilesFunc + CreateProfilesToTracesFunc + CreateProfilesToMetricsFunc + CreateProfilesToLogsFunc + + tracesToProfilesStabilityLevel component.StabilityLevel + metricsToProfilesStabilityLevel component.StabilityLevel + logsToProfilesStabilityLevel component.StabilityLevel + + profilesToProfilesStabilityLevel component.StabilityLevel + profilesToTracesStabilityLevel component.StabilityLevel + profilesToMetricsStabilityLevel component.StabilityLevel + profilesToLogsStabilityLevel component.StabilityLevel +} + +func (f *factory) TracesToProfilesStability() component.StabilityLevel { + return f.tracesToProfilesStabilityLevel +} + +func (f *factory) MetricsToProfilesStability() component.StabilityLevel { + return f.metricsToProfilesStabilityLevel +} + +func (f *factory) LogsToProfilesStability() component.StabilityLevel { + return f.logsToProfilesStabilityLevel +} + +func (f *factory) ProfilesToProfilesStability() component.StabilityLevel { + return f.profilesToProfilesStabilityLevel +} + +func (f *factory) ProfilesToTracesStability() component.StabilityLevel { + return f.profilesToTracesStabilityLevel +} + +func (f *factory) ProfilesToMetricsStability() component.StabilityLevel { + return f.profilesToMetricsStabilityLevel +} + +func (f *factory) ProfilesToLogsStability() component.StabilityLevel { + return f.profilesToLogsStabilityLevel +} + +// NewFactory returns a Factory. +func NewFactory(cfgType component.Type, createDefaultConfig component.CreateDefaultConfigFunc, options ...FactoryOption) Factory { + opts := factoryOpts{factory: &factory{}} + for _, opt := range options { + opt.applyOption(&opts) + } + opts.Factory = connector.NewFactory(cfgType, createDefaultConfig, opts.opts...) + return opts.factory +} diff --git a/vendor/go.opentelemetry.io/collector/connector/xconnector/profiles_router.go b/vendor/go.opentelemetry.io/collector/connector/xconnector/profiles_router.go new file mode 100644 index 00000000000..085305ee557 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/connector/xconnector/profiles_router.go @@ -0,0 +1,36 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package xconnector // import "go.opentelemetry.io/collector/connector/xconnector" + +import ( + "go.opentelemetry.io/collector/connector/internal" + "go.opentelemetry.io/collector/consumer/xconsumer" + "go.opentelemetry.io/collector/internal/fanoutconsumer" + "go.opentelemetry.io/collector/pipeline" +) + +type ProfilesRouterAndConsumer interface { + xconsumer.Profiles + Consumer(...pipeline.ID) (xconsumer.Profiles, error) + PipelineIDs() []pipeline.ID + privateFunc() +} + +type profilesRouter struct { + xconsumer.Profiles + internal.BaseRouter[xconsumer.Profiles] +} + +func NewProfilesRouter(cm map[pipeline.ID]xconsumer.Profiles) ProfilesRouterAndConsumer { + consumers := make([]xconsumer.Profiles, 0, len(cm)) + for _, cons := range cm { + consumers = append(consumers, cons) + } + return &profilesRouter{ + Profiles: fanoutconsumer.NewProfiles(consumers), + BaseRouter: internal.NewBaseRouter(fanoutconsumer.NewProfiles, cm), + } +} + +func (r *profilesRouter) privateFunc() {} diff --git a/vendor/go.opentelemetry.io/collector/consumer/consumer.go b/vendor/go.opentelemetry.io/collector/consumer/consumer.go index 503750ad7cb..b1b588a85c0 100644 --- a/vendor/go.opentelemetry.io/collector/consumer/consumer.go +++ b/vendor/go.opentelemetry.io/collector/consumer/consumer.go @@ -5,52 +5,22 @@ package consumer // import "go.opentelemetry.io/collector/consumer" import ( "errors" + + "go.opentelemetry.io/collector/consumer/internal" ) // Capabilities describes the capabilities of a Processor. -type Capabilities struct { - // MutatesData is set to true if Consume* function of the - // processor modifies the input Traces, Logs or Metrics argument. - // Processors which modify the input data MUST set this flag to true. If the processor - // does not modify the data it MUST set this flag to false. If the processor creates - // a copy of the data before modifying then this flag can be safely set to false. - MutatesData bool -} - -type baseConsumer interface { - Capabilities() Capabilities -} +type Capabilities = internal.Capabilities var errNilFunc = errors.New("nil consumer func") -type baseImpl struct { - capabilities Capabilities -} - // Option to construct new consumers. -type Option func(*baseImpl) +type Option = internal.Option // WithCapabilities overrides the default GetCapabilities function for a processor. // The default GetCapabilities function returns mutable capabilities. func WithCapabilities(capabilities Capabilities) Option { - return func(o *baseImpl) { - o.capabilities = capabilities - } -} - -// Capabilities returns the capabilities of the component -func (bs baseImpl) Capabilities() Capabilities { - return bs.capabilities -} - -func newBaseImpl(options ...Option) *baseImpl { - bs := &baseImpl{ - capabilities: Capabilities{MutatesData: false}, - } - - for _, op := range options { - op(bs) - } - - return bs + return internal.OptionFunc(func(o *internal.BaseImpl) { + o.Cap = capabilities + }) } diff --git a/vendor/go.opentelemetry.io/collector/consumer/consumererror/LICENSE b/vendor/go.opentelemetry.io/collector/consumer/consumererror/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/consumer/consumererror/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/consumer/consumererror/Makefile b/vendor/go.opentelemetry.io/collector/consumer/consumererror/Makefile new file mode 100644 index 00000000000..ded7a36092d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/consumer/consumererror/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/consumer/consumererror/internal/retryable.go b/vendor/go.opentelemetry.io/collector/consumer/consumererror/internal/retryable.go new file mode 100644 index 00000000000..feed1bc5bc7 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/consumer/consumererror/internal/retryable.go @@ -0,0 +1,31 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/consumer/consumererror/internal" + +import ( + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/pprofile" + "go.opentelemetry.io/collector/pdata/ptrace" +) + +type Retryable[V ptrace.Traces | pmetric.Metrics | plog.Logs | pprofile.Profiles] struct { + Err error + Value V +} + +// Error provides the error message +func (err Retryable[V]) Error() string { + return err.Err.Error() +} + +// Unwrap returns the wrapped error for functions Is and As in standard package errors. +func (err Retryable[V]) Unwrap() error { + return err.Err +} + +// Data returns the telemetry data that failed to be processed or sent. +func (err Retryable[V]) Data() V { + return err.Value +} diff --git a/vendor/go.opentelemetry.io/collector/consumer/consumererror/signalerrors.go b/vendor/go.opentelemetry.io/collector/consumer/consumererror/signalerrors.go index 1d7558ce1ca..69af253dae7 100644 --- a/vendor/go.opentelemetry.io/collector/consumer/consumererror/signalerrors.go +++ b/vendor/go.opentelemetry.io/collector/consumer/consumererror/signalerrors.go @@ -4,38 +4,24 @@ package consumererror // import "go.opentelemetry.io/collector/consumer/consumererror" import ( + "go.opentelemetry.io/collector/consumer/consumererror/internal" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/ptrace" ) -type retryable[V ptrace.Traces | pmetric.Metrics | plog.Logs] struct { - error - data V -} - -// Unwrap returns the wrapped error for functions Is and As in standard package errors. -func (err retryable[V]) Unwrap() error { - return err.error -} - -// Data returns the telemetry data that failed to be processed or sent. -func (err retryable[V]) Data() V { - return err.data -} - // Traces is an error that may carry associated Trace data for a subset of received data // that failed to be processed or sent. type Traces struct { - retryable[ptrace.Traces] + internal.Retryable[ptrace.Traces] } // NewTraces creates a Traces that can encapsulate received data that failed to be processed or sent. func NewTraces(err error, data ptrace.Traces) error { return Traces{ - retryable: retryable[ptrace.Traces]{ - error: err, - data: data, + Retryable: internal.Retryable[ptrace.Traces]{ + Err: err, + Value: data, }, } } @@ -43,15 +29,15 @@ func NewTraces(err error, data ptrace.Traces) error { // Logs is an error that may carry associated Log data for a subset of received data // that failed to be processed or sent. type Logs struct { - retryable[plog.Logs] + internal.Retryable[plog.Logs] } // NewLogs creates a Logs that can encapsulate received data that failed to be processed or sent. func NewLogs(err error, data plog.Logs) error { return Logs{ - retryable: retryable[plog.Logs]{ - error: err, - data: data, + Retryable: internal.Retryable[plog.Logs]{ + Err: err, + Value: data, }, } } @@ -59,15 +45,15 @@ func NewLogs(err error, data plog.Logs) error { // Metrics is an error that may carry associated Metrics data for a subset of received data // that failed to be processed or sent. type Metrics struct { - retryable[pmetric.Metrics] + internal.Retryable[pmetric.Metrics] } // NewMetrics creates a Metrics that can encapsulate received data that failed to be processed or sent. func NewMetrics(err error, data pmetric.Metrics) error { return Metrics{ - retryable: retryable[pmetric.Metrics]{ - error: err, - data: data, + Retryable: internal.Retryable[pmetric.Metrics]{ + Err: err, + Value: data, }, } } diff --git a/vendor/go.opentelemetry.io/collector/consumer/consumererror/xconsumererror/LICENSE b/vendor/go.opentelemetry.io/collector/consumer/consumererror/xconsumererror/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/consumer/consumererror/xconsumererror/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/envprovider/Makefile b/vendor/go.opentelemetry.io/collector/consumer/consumererror/xconsumererror/Makefile similarity index 100% rename from vendor/go.opentelemetry.io/collector/confmap/provider/envprovider/Makefile rename to vendor/go.opentelemetry.io/collector/consumer/consumererror/xconsumererror/Makefile diff --git a/vendor/go.opentelemetry.io/collector/consumer/consumererror/xconsumererror/signalerrors.go b/vendor/go.opentelemetry.io/collector/consumer/consumererror/xconsumererror/signalerrors.go new file mode 100644 index 00000000000..6200c7fc503 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/consumer/consumererror/xconsumererror/signalerrors.go @@ -0,0 +1,25 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package xconsumererror // import "go.opentelemetry.io/collector/consumer/consumererror/xconsumererror" + +import ( + "go.opentelemetry.io/collector/consumer/consumererror/internal" + "go.opentelemetry.io/collector/pdata/pprofile" +) + +// Profiles is an error that may carry associated Profile data for a subset of received data +// that failed to be processed or sent. +type Profiles struct { + internal.Retryable[pprofile.Profiles] +} + +// NewProfiles creates a Profiles that can encapsulate received data that failed to be processed or sent. +func NewProfiles(err error, data pprofile.Profiles) error { + return Profiles{ + Retryable: internal.Retryable[pprofile.Profiles]{ + Err: err, + Value: data, + }, + } +} diff --git a/vendor/go.opentelemetry.io/collector/consumer/consumertest/LICENSE b/vendor/go.opentelemetry.io/collector/consumer/consumertest/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/consumer/consumertest/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/consumer/consumertest/Makefile b/vendor/go.opentelemetry.io/collector/consumer/consumertest/Makefile new file mode 100644 index 00000000000..ded7a36092d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/consumer/consumertest/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/consumer/consumertest/consumer.go b/vendor/go.opentelemetry.io/collector/consumer/consumertest/consumer.go index 147ed55c7ed..ec7a3030631 100644 --- a/vendor/go.opentelemetry.io/collector/consumer/consumertest/consumer.go +++ b/vendor/go.opentelemetry.io/collector/consumer/consumertest/consumer.go @@ -7,8 +7,10 @@ import ( "context" "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/xconsumer" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/pprofile" "go.opentelemetry.io/collector/pdata/ptrace" ) @@ -29,12 +31,18 @@ type Consumer interface { // ConsumeLogs to implement the consumer.Logs. ConsumeLogs(context.Context, plog.Logs) error + // ConsumeProfiles to implement the xconsumer.Profiles. + ConsumeProfiles(context.Context, pprofile.Profiles) error + unexported() } -var _ consumer.Logs = (Consumer)(nil) -var _ consumer.Metrics = (Consumer)(nil) -var _ consumer.Traces = (Consumer)(nil) +var ( + _ consumer.Logs = (Consumer)(nil) + _ consumer.Metrics = (Consumer)(nil) + _ consumer.Traces = (Consumer)(nil) + _ xconsumer.Profiles = (Consumer)(nil) +) type nonMutatingConsumer struct{} @@ -48,6 +56,7 @@ type baseConsumer struct { consumer.ConsumeTracesFunc consumer.ConsumeMetricsFunc consumer.ConsumeLogsFunc + xconsumer.ConsumeProfilesFunc } func (bc baseConsumer) unexported() {} diff --git a/vendor/go.opentelemetry.io/collector/consumer/consumertest/err.go b/vendor/go.opentelemetry.io/collector/consumer/consumertest/err.go index d147453aaf7..fdc54ae2452 100644 --- a/vendor/go.opentelemetry.io/collector/consumer/consumertest/err.go +++ b/vendor/go.opentelemetry.io/collector/consumer/consumertest/err.go @@ -7,14 +7,16 @@ import ( "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/pprofile" "go.opentelemetry.io/collector/pdata/ptrace" ) // NewErr returns a Consumer that just drops all received data and returns the specified error to Consume* callers. func NewErr(err error) Consumer { return &baseConsumer{ - ConsumeTracesFunc: func(context.Context, ptrace.Traces) error { return err }, - ConsumeMetricsFunc: func(context.Context, pmetric.Metrics) error { return err }, - ConsumeLogsFunc: func(context.Context, plog.Logs) error { return err }, + ConsumeTracesFunc: func(context.Context, ptrace.Traces) error { return err }, + ConsumeMetricsFunc: func(context.Context, pmetric.Metrics) error { return err }, + ConsumeLogsFunc: func(context.Context, plog.Logs) error { return err }, + ConsumeProfilesFunc: func(context.Context, pprofile.Profiles) error { return err }, } } diff --git a/vendor/go.opentelemetry.io/collector/consumer/consumertest/nop.go b/vendor/go.opentelemetry.io/collector/consumer/consumertest/nop.go index fbb01e3bb98..25b898a7751 100644 --- a/vendor/go.opentelemetry.io/collector/consumer/consumertest/nop.go +++ b/vendor/go.opentelemetry.io/collector/consumer/consumertest/nop.go @@ -8,14 +8,16 @@ import ( "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/pprofile" "go.opentelemetry.io/collector/pdata/ptrace" ) // NewNop returns a Consumer that just drops all received data and returns no error. func NewNop() Consumer { return &baseConsumer{ - ConsumeTracesFunc: func(context.Context, ptrace.Traces) error { return nil }, - ConsumeMetricsFunc: func(context.Context, pmetric.Metrics) error { return nil }, - ConsumeLogsFunc: func(context.Context, plog.Logs) error { return nil }, + ConsumeTracesFunc: func(context.Context, ptrace.Traces) error { return nil }, + ConsumeMetricsFunc: func(context.Context, pmetric.Metrics) error { return nil }, + ConsumeLogsFunc: func(context.Context, plog.Logs) error { return nil }, + ConsumeProfilesFunc: func(context.Context, pprofile.Profiles) error { return nil }, } } diff --git a/vendor/go.opentelemetry.io/collector/consumer/consumertest/sink.go b/vendor/go.opentelemetry.io/collector/consumer/consumertest/sink.go index be7195af18f..e05dfccff98 100644 --- a/vendor/go.opentelemetry.io/collector/consumer/consumertest/sink.go +++ b/vendor/go.opentelemetry.io/collector/consumer/consumertest/sink.go @@ -8,8 +8,10 @@ import ( "sync" "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/xconsumer" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/pprofile" "go.opentelemetry.io/collector/pdata/ptrace" ) @@ -156,3 +158,51 @@ func (sle *LogsSink) Reset() { sle.logs = nil sle.logRecordCount = 0 } + +// ProfilesSink is a xconsumer.Profiles that acts like a sink that +// stores all profiles and allows querying them for testing. +type ProfilesSink struct { + nonMutatingConsumer + mu sync.Mutex + profiles []pprofile.Profiles + sampleCount int +} + +var _ xconsumer.Profiles = (*ProfilesSink)(nil) + +// ConsumeProfiles stores profiles to this sink. +func (ste *ProfilesSink) ConsumeProfiles(_ context.Context, td pprofile.Profiles) error { + ste.mu.Lock() + defer ste.mu.Unlock() + + ste.profiles = append(ste.profiles, td) + ste.sampleCount += td.SampleCount() + + return nil +} + +// AllProfiles returns the profiles stored by this sink since last Reset. +func (ste *ProfilesSink) AllProfiles() []pprofile.Profiles { + ste.mu.Lock() + defer ste.mu.Unlock() + + copyProfiles := make([]pprofile.Profiles, len(ste.profiles)) + copy(copyProfiles, ste.profiles) + return copyProfiles +} + +// ProfileRecordCount returns the number of profiles stored by this sink since last Reset. +func (ste *ProfilesSink) SampleCount() int { + ste.mu.Lock() + defer ste.mu.Unlock() + return ste.sampleCount +} + +// Reset deletes any stored data. +func (ste *ProfilesSink) Reset() { + ste.mu.Lock() + defer ste.mu.Unlock() + + ste.profiles = nil + ste.sampleCount = 0 +} diff --git a/vendor/go.opentelemetry.io/collector/consumer/internal/consumer.go b/vendor/go.opentelemetry.io/collector/consumer/internal/consumer.go new file mode 100644 index 00000000000..45c90dd4341 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/consumer/internal/consumer.go @@ -0,0 +1,50 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/consumer/internal" + +// Capabilities describes the capabilities of a Processor. +type Capabilities struct { + // MutatesData is set to true if Consume* function of the + // processor modifies the input Traces, Logs or Metrics argument. + // Processors which modify the input data MUST set this flag to true. If the processor + // does not modify the data it MUST set this flag to false. If the processor creates + // a copy of the data before modifying then this flag can be safely set to false. + MutatesData bool +} + +type BaseConsumer interface { + Capabilities() Capabilities +} + +type BaseImpl struct { + Cap Capabilities +} + +// Option to construct new consumers. +type Option interface { + apply(*BaseImpl) +} + +type OptionFunc func(*BaseImpl) + +func (of OptionFunc) apply(e *BaseImpl) { + of(e) +} + +// Capabilities returns the capabilities of the component +func (bs BaseImpl) Capabilities() Capabilities { + return bs.Cap +} + +func NewBaseImpl(options ...Option) *BaseImpl { + bs := &BaseImpl{ + Cap: Capabilities{MutatesData: false}, + } + + for _, op := range options { + op.apply(bs) + } + + return bs +} diff --git a/vendor/go.opentelemetry.io/collector/consumer/logs.go b/vendor/go.opentelemetry.io/collector/consumer/logs.go index 5bf89a52f7a..15166ef1196 100644 --- a/vendor/go.opentelemetry.io/collector/consumer/logs.go +++ b/vendor/go.opentelemetry.io/collector/consumer/logs.go @@ -6,13 +6,14 @@ package consumer // import "go.opentelemetry.io/collector/consumer" import ( "context" + "go.opentelemetry.io/collector/consumer/internal" "go.opentelemetry.io/collector/pdata/plog" ) // Logs is an interface that receives plog.Logs, processes it // as needed, and sends it to the next processing node if any or to the destination. type Logs interface { - baseConsumer + internal.BaseConsumer // ConsumeLogs receives plog.Logs for consumption. ConsumeLogs(ctx context.Context, ld plog.Logs) error } @@ -26,7 +27,7 @@ func (f ConsumeLogsFunc) ConsumeLogs(ctx context.Context, ld plog.Logs) error { } type baseLogs struct { - *baseImpl + *internal.BaseImpl ConsumeLogsFunc } @@ -36,7 +37,7 @@ func NewLogs(consume ConsumeLogsFunc, options ...Option) (Logs, error) { return nil, errNilFunc } return &baseLogs{ - baseImpl: newBaseImpl(options...), + BaseImpl: internal.NewBaseImpl(options...), ConsumeLogsFunc: consume, }, nil } diff --git a/vendor/go.opentelemetry.io/collector/consumer/metrics.go b/vendor/go.opentelemetry.io/collector/consumer/metrics.go index 50df60f02d0..47897f9363a 100644 --- a/vendor/go.opentelemetry.io/collector/consumer/metrics.go +++ b/vendor/go.opentelemetry.io/collector/consumer/metrics.go @@ -6,13 +6,14 @@ package consumer // import "go.opentelemetry.io/collector/consumer" import ( "context" + "go.opentelemetry.io/collector/consumer/internal" "go.opentelemetry.io/collector/pdata/pmetric" ) // Metrics is an interface that receives pmetric.Metrics, processes it // as needed, and sends it to the next processing node if any or to the destination. type Metrics interface { - baseConsumer + internal.BaseConsumer // ConsumeMetrics receives pmetric.Metrics for consumption. ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error } @@ -26,7 +27,7 @@ func (f ConsumeMetricsFunc) ConsumeMetrics(ctx context.Context, md pmetric.Metri } type baseMetrics struct { - *baseImpl + *internal.BaseImpl ConsumeMetricsFunc } @@ -36,7 +37,7 @@ func NewMetrics(consume ConsumeMetricsFunc, options ...Option) (Metrics, error) return nil, errNilFunc } return &baseMetrics{ - baseImpl: newBaseImpl(options...), + BaseImpl: internal.NewBaseImpl(options...), ConsumeMetricsFunc: consume, }, nil } diff --git a/vendor/go.opentelemetry.io/collector/consumer/traces.go b/vendor/go.opentelemetry.io/collector/consumer/traces.go index 56cebd53b37..60df2d04536 100644 --- a/vendor/go.opentelemetry.io/collector/consumer/traces.go +++ b/vendor/go.opentelemetry.io/collector/consumer/traces.go @@ -6,13 +6,14 @@ package consumer // import "go.opentelemetry.io/collector/consumer" import ( "context" + "go.opentelemetry.io/collector/consumer/internal" "go.opentelemetry.io/collector/pdata/ptrace" ) // Traces is an interface that receives ptrace.Traces, processes it // as needed, and sends it to the next processing node if any or to the destination. type Traces interface { - baseConsumer + internal.BaseConsumer // ConsumeTraces receives ptrace.Traces for consumption. ConsumeTraces(ctx context.Context, td ptrace.Traces) error } @@ -26,7 +27,7 @@ func (f ConsumeTracesFunc) ConsumeTraces(ctx context.Context, td ptrace.Traces) } type baseTraces struct { - *baseImpl + *internal.BaseImpl ConsumeTracesFunc } @@ -36,7 +37,7 @@ func NewTraces(consume ConsumeTracesFunc, options ...Option) (Traces, error) { return nil, errNilFunc } return &baseTraces{ - baseImpl: newBaseImpl(options...), + BaseImpl: internal.NewBaseImpl(options...), ConsumeTracesFunc: consume, }, nil } diff --git a/vendor/go.opentelemetry.io/collector/consumer/xconsumer/LICENSE b/vendor/go.opentelemetry.io/collector/consumer/xconsumer/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/consumer/xconsumer/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/consumer/xconsumer/Makefile b/vendor/go.opentelemetry.io/collector/consumer/xconsumer/Makefile new file mode 100644 index 00000000000..ded7a36092d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/consumer/xconsumer/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/consumer/xconsumer/profiles.go b/vendor/go.opentelemetry.io/collector/consumer/xconsumer/profiles.go new file mode 100644 index 00000000000..88ba2eb5a38 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/consumer/xconsumer/profiles.go @@ -0,0 +1,47 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package xconsumer // import "go.opentelemetry.io/collector/consumer/xconsumer" + +import ( + "context" + "errors" + + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/internal" + "go.opentelemetry.io/collector/pdata/pprofile" +) + +var errNilFunc = errors.New("nil consumer func") + +// Profiles is an interface that receives pprofile.Profiles, processes it +// as needed, and sends it to the next processing node if any or to the destination. +type Profiles interface { + internal.BaseConsumer + // ConsumeProfiles receives pprofile.Profiles for consumption. + ConsumeProfiles(ctx context.Context, td pprofile.Profiles) error +} + +// ConsumeProfilesFunc is a helper function that is similar to ConsumeProfiles. +type ConsumeProfilesFunc func(ctx context.Context, td pprofile.Profiles) error + +// ConsumeProfiles calls f(ctx, td). +func (f ConsumeProfilesFunc) ConsumeProfiles(ctx context.Context, td pprofile.Profiles) error { + return f(ctx, td) +} + +type baseProfiles struct { + *internal.BaseImpl + ConsumeProfilesFunc +} + +// NewProfiles returns a Profiles configured with the provided options. +func NewProfiles(consume ConsumeProfilesFunc, options ...consumer.Option) (Profiles, error) { + if consume == nil { + return nil, errNilFunc + } + return &baseProfiles{ + BaseImpl: internal.NewBaseImpl(options...), + ConsumeProfilesFunc: consume, + }, nil +} diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporter.go b/vendor/go.opentelemetry.io/collector/exporter/exporter.go index 818fa3ecac3..0e54bfb1b3c 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporter.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporter.go @@ -7,10 +7,9 @@ import ( "context" "fmt" - "go.uber.org/zap" - "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/pipeline" ) // Traces is an exporter that can consume traces. @@ -31,8 +30,8 @@ type Logs interface { consumer.Logs } -// CreateSettings configures exporter creators. -type CreateSettings struct { +// Settings configures exporter creators. +type Settings struct { // ID returns the ID of the component that will be created. ID component.ID @@ -49,77 +48,77 @@ type CreateSettings struct { type Factory interface { component.Factory - // CreateTracesExporter creates a TracesExporter based on this config. - // If the exporter type does not support tracing or if the config is not valid, - // an error will be returned instead. - CreateTracesExporter(ctx context.Context, set CreateSettings, cfg component.Config) (Traces, error) + // CreateTraces creates a Traces exporter based on this config. + // If the exporter type does not support tracing, + // this function returns the error [pipeline.ErrSignalNotSupported]. + CreateTraces(ctx context.Context, set Settings, cfg component.Config) (Traces, error) - // TracesExporterStability gets the stability level of the TracesExporter. - TracesExporterStability() component.StabilityLevel + // TracesStability gets the stability level of the Traces exporter. + TracesStability() component.StabilityLevel - // CreateMetricsExporter creates a MetricsExporter based on this config. - // If the exporter type does not support metrics or if the config is not valid, - // an error will be returned instead. - CreateMetricsExporter(ctx context.Context, set CreateSettings, cfg component.Config) (Metrics, error) + // CreateMetrics creates a Metrics exporter based on this config. + // If the exporter type does not support metrics, + // this function returns the error [pipeline.ErrSignalNotSupported]. + CreateMetrics(ctx context.Context, set Settings, cfg component.Config) (Metrics, error) - // MetricsExporterStability gets the stability level of the MetricsExporter. - MetricsExporterStability() component.StabilityLevel + // MetricsStability gets the stability level of the Metrics exporter. + MetricsStability() component.StabilityLevel - // CreateLogsExporter creates a LogsExporter based on the config. - // If the exporter type does not support logs or if the config is not valid, - // an error will be returned instead. - CreateLogsExporter(ctx context.Context, set CreateSettings, cfg component.Config) (Logs, error) + // CreateLogs creates a Logs exporter based on the config. + // If the exporter type does not support logs, + // this function returns the error [pipeline.ErrSignalNotSupported]. + CreateLogs(ctx context.Context, set Settings, cfg component.Config) (Logs, error) - // LogsExporterStability gets the stability level of the LogsExporter. - LogsExporterStability() component.StabilityLevel + // LogsStability gets the stability level of the Logs exporter. + LogsStability() component.StabilityLevel unexportedFactoryFunc() } // FactoryOption apply changes to Factory. type FactoryOption interface { - // applyExporterFactoryOption applies the option. - applyExporterFactoryOption(o *factory) + // applyOption applies the option. + applyOption(o *factory) } var _ FactoryOption = (*factoryOptionFunc)(nil) -// factoryOptionFunc is an ExporterFactoryOption created through a function. +// factoryOptionFunc is an FactoryOption created through a function. type factoryOptionFunc func(*factory) -func (f factoryOptionFunc) applyExporterFactoryOption(o *factory) { +func (f factoryOptionFunc) applyOption(o *factory) { f(o) } // CreateTracesFunc is the equivalent of Factory.CreateTraces. -type CreateTracesFunc func(context.Context, CreateSettings, component.Config) (Traces, error) +type CreateTracesFunc func(context.Context, Settings, component.Config) (Traces, error) -// CreateTracesExporter implements ExporterFactory.CreateTracesExporter(). -func (f CreateTracesFunc) CreateTracesExporter(ctx context.Context, set CreateSettings, cfg component.Config) (Traces, error) { +// CreateTraces implements Factory.CreateTraces. +func (f CreateTracesFunc) CreateTraces(ctx context.Context, set Settings, cfg component.Config) (Traces, error) { if f == nil { - return nil, component.ErrDataTypeIsNotSupported + return nil, pipeline.ErrSignalNotSupported } return f(ctx, set, cfg) } // CreateMetricsFunc is the equivalent of Factory.CreateMetrics. -type CreateMetricsFunc func(context.Context, CreateSettings, component.Config) (Metrics, error) +type CreateMetricsFunc func(context.Context, Settings, component.Config) (Metrics, error) -// CreateMetricsExporter implements ExporterFactory.CreateMetricsExporter(). -func (f CreateMetricsFunc) CreateMetricsExporter(ctx context.Context, set CreateSettings, cfg component.Config) (Metrics, error) { +// CreateMetrics implements Factory.CreateMetrics. +func (f CreateMetricsFunc) CreateMetrics(ctx context.Context, set Settings, cfg component.Config) (Metrics, error) { if f == nil { - return nil, component.ErrDataTypeIsNotSupported + return nil, pipeline.ErrSignalNotSupported } return f(ctx, set, cfg) } // CreateLogsFunc is the equivalent of Factory.CreateLogs. -type CreateLogsFunc func(context.Context, CreateSettings, component.Config) (Logs, error) +type CreateLogsFunc func(context.Context, Settings, component.Config) (Logs, error) -// CreateLogsExporter implements Factory.CreateLogsExporter(). -func (f CreateLogsFunc) CreateLogsExporter(ctx context.Context, set CreateSettings, cfg component.Config) (Logs, error) { +// CreateLogs implements Factory.CreateLogs. +func (f CreateLogsFunc) CreateLogs(ctx context.Context, set Settings, cfg component.Config) (Logs, error) { if f == nil { - return nil, component.ErrDataTypeIsNotSupported + return nil, pipeline.ErrSignalNotSupported } return f(ctx, set, cfg) } @@ -141,19 +140,19 @@ func (f *factory) Type() component.Type { func (f *factory) unexportedFactoryFunc() {} -func (f *factory) TracesExporterStability() component.StabilityLevel { +func (f *factory) TracesStability() component.StabilityLevel { return f.tracesStabilityLevel } -func (f *factory) MetricsExporterStability() component.StabilityLevel { +func (f *factory) MetricsStability() component.StabilityLevel { return f.metricsStabilityLevel } -func (f *factory) LogsExporterStability() component.StabilityLevel { +func (f *factory) LogsStability() component.StabilityLevel { return f.logsStabilityLevel } -// WithTraces overrides the default "error not supported" implementation for CreateTracesExporter and the default "undefined" stability level. +// WithTraces overrides the default "error not supported" implementation for Factory.CreateTraces and the default "undefined" stability level. func WithTraces(createTraces CreateTracesFunc, sl component.StabilityLevel) FactoryOption { return factoryOptionFunc(func(o *factory) { o.tracesStabilityLevel = sl @@ -161,7 +160,7 @@ func WithTraces(createTraces CreateTracesFunc, sl component.StabilityLevel) Fact }) } -// WithMetrics overrides the default "error not supported" implementation for CreateMetricsExporter and the default "undefined" stability level. +// WithMetrics overrides the default "error not supported" implementation for Factory.CreateMetrics and the default "undefined" stability level. func WithMetrics(createMetrics CreateMetricsFunc, sl component.StabilityLevel) FactoryOption { return factoryOptionFunc(func(o *factory) { o.metricsStabilityLevel = sl @@ -169,7 +168,7 @@ func WithMetrics(createMetrics CreateMetricsFunc, sl component.StabilityLevel) F }) } -// WithLogs overrides the default "error not supported" implementation for CreateLogsExporter and the default "undefined" stability level. +// WithLogs overrides the default "error not supported" implementation for Factory.CreateLogs and the default "undefined" stability level. func WithLogs(createLogs CreateLogsFunc, sl component.StabilityLevel) FactoryOption { return factoryOptionFunc(func(o *factory) { o.logsStabilityLevel = sl @@ -184,7 +183,7 @@ func NewFactory(cfgType component.Type, createDefaultConfig component.CreateDefa CreateDefaultConfigFunc: createDefaultConfig, } for _, opt := range options { - opt.applyExporterFactoryOption(f) + opt.applyOption(f) } return f } @@ -201,77 +200,3 @@ func MakeFactoryMap(factories ...Factory) (map[component.Type]Factory, error) { } return fMap, nil } - -// Builder exporter is a helper struct that given a set of Configs and Factories helps with creating exporters. -type Builder struct { - cfgs map[component.ID]component.Config - factories map[component.Type]Factory -} - -// NewBuilder creates a new exporter.Builder to help with creating components form a set of configs and factories. -func NewBuilder(cfgs map[component.ID]component.Config, factories map[component.Type]Factory) *Builder { - return &Builder{cfgs: cfgs, factories: factories} -} - -// CreateTraces creates a Traces exporter based on the settings and config. -func (b *Builder) CreateTraces(ctx context.Context, set CreateSettings) (Traces, error) { - cfg, existsCfg := b.cfgs[set.ID] - if !existsCfg { - return nil, fmt.Errorf("exporter %q is not configured", set.ID) - } - - f, existsFactory := b.factories[set.ID.Type()] - if !existsFactory { - return nil, fmt.Errorf("exporter factory not available for: %q", set.ID) - } - - logStabilityLevel(set.Logger, f.TracesExporterStability()) - return f.CreateTracesExporter(ctx, set, cfg) -} - -// CreateMetrics creates a Metrics exporter based on the settings and config. -func (b *Builder) CreateMetrics(ctx context.Context, set CreateSettings) (Metrics, error) { - cfg, existsCfg := b.cfgs[set.ID] - if !existsCfg { - return nil, fmt.Errorf("exporter %q is not configured", set.ID) - } - - f, existsFactory := b.factories[set.ID.Type()] - if !existsFactory { - return nil, fmt.Errorf("exporter factory not available for: %q", set.ID) - } - - logStabilityLevel(set.Logger, f.MetricsExporterStability()) - return f.CreateMetricsExporter(ctx, set, cfg) -} - -// CreateLogs creates a Logs exporter based on the settings and config. -func (b *Builder) CreateLogs(ctx context.Context, set CreateSettings) (Logs, error) { - cfg, existsCfg := b.cfgs[set.ID] - if !existsCfg { - return nil, fmt.Errorf("exporter %q is not configured", set.ID) - } - - f, existsFactory := b.factories[set.ID.Type()] - if !existsFactory { - return nil, fmt.Errorf("exporter factory not available for: %q", set.ID) - } - - logStabilityLevel(set.Logger, f.LogsExporterStability()) - return f.CreateLogsExporter(ctx, set, cfg) -} - -func (b *Builder) Factory(componentType component.Type) component.Factory { - return b.factories[componentType] -} - -// logStabilityLevel logs the stability level of a component. The log level is set to info for -// undefined, unmaintained, deprecated and development. The log level is set to debug -// for alpha, beta and stable. -func logStabilityLevel(logger *zap.Logger, sl component.StabilityLevel) { - if sl >= component.StabilityLevelAlpha { - logger.Debug(sl.LogMessage()) - } else { - logger.Info(sl.LogMessage()) - } -} diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterbatcher/batch_func.go b/vendor/go.opentelemetry.io/collector/exporter/exporterbatcher/batch_func.go deleted file mode 100644 index 0298276ba7b..00000000000 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterbatcher/batch_func.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package exporterbatcher // import "go.opentelemetry.io/collector/exporter/exporterbatcher" - -import "context" - -// BatchMergeFunc is a function that merges two requests into a single request. -// Do not mutate the requests passed to the function if error can be returned after mutation or if the exporter is -// marked as not mutable. -// Experimental: This API is at the early stage of development and may change without backward compatibility -// until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. -type BatchMergeFunc[T any] func(context.Context, T, T) (T, error) - -// BatchMergeSplitFunc is a function that merge and/or splits one or two requests into multiple requests based on the -// configured limit provided in MaxSizeConfig. -// All the returned requests MUST have a number of items that does not exceed the maximum number of items. -// Size of the last returned request MUST be less or equal than the size of any other returned request. -// The original request MUST not be mutated if error is returned after mutation or if the exporter is -// marked as not mutable. The length of the returned slice MUST not be 0. The optionalReq argument can be nil, -// make sure to check it before using. -// Experimental: This API is at the early stage of development and may change without backward compatibility -// until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. -type BatchMergeSplitFunc[T any] func(ctx context.Context, cfg MaxSizeConfig, optionalReq T, req T) ([]T, error) diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/README.md b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/README.md index e32c948c924..e62c9d4c419 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/README.md +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/README.md @@ -17,6 +17,7 @@ The following configuration options can be modified: - `enabled` (default = true) - `num_consumers` (default = 10): Number of consumers that dequeue batches; ignored if `enabled` is `false` - `queue_size` (default = 1000): Maximum number of batches kept in memory before dropping; ignored if `enabled` is `false` + - `blocking` (default = false): If true blocks until queue has space for the request otherwise returns immediately; ignored if `enabled` is `false` User should calculate this as `num_seconds * requests_per_second / requests_per_batch` where: - `num_seconds` is the number of seconds to buffer in case of a backend outage - `requests_per_second` is the average number of requests per seconds @@ -109,4 +110,3 @@ service: ``` [filestorage]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/extension/storage/filestorage -[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/batch_sender.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/batch_sender.go deleted file mode 100644 index 086c9724aa3..00000000000 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/batch_sender.go +++ /dev/null @@ -1,233 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package exporterhelper // import "go.opentelemetry.io/collector/exporter/exporterhelper" - -import ( - "context" - "sync" - "sync/atomic" - "time" - - "go.uber.org/zap" - - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/exporter" - "go.opentelemetry.io/collector/exporter/exporterbatcher" -) - -// batchSender is a component that places requests into batches before passing them to the downstream senders. -// Batches are sent out with any of the following conditions: -// - batch size reaches cfg.MinSizeItems -// - cfg.FlushTimeout is elapsed since the timestamp when the previous batch was sent out. -// - concurrencyLimit is reached. -type batchSender struct { - baseRequestSender - cfg exporterbatcher.Config - mergeFunc exporterbatcher.BatchMergeFunc[Request] - mergeSplitFunc exporterbatcher.BatchMergeSplitFunc[Request] - - // concurrencyLimit is the maximum number of goroutines that can be blocked by the batcher. - // If this number is reached and all the goroutines are busy, the batch will be sent right away. - // Populated from the number of queue consumers if queue is enabled. - concurrencyLimit uint64 - activeRequests atomic.Uint64 - - resetTimerCh chan struct{} - - mu sync.Mutex - activeBatch *batch - - logger *zap.Logger - - shutdownCh chan struct{} - shutdownCompleteCh chan struct{} - stopped *atomic.Bool -} - -// newBatchSender returns a new batch consumer component. -func newBatchSender(cfg exporterbatcher.Config, set exporter.CreateSettings, - mf exporterbatcher.BatchMergeFunc[Request], msf exporterbatcher.BatchMergeSplitFunc[Request]) *batchSender { - bs := &batchSender{ - activeBatch: newEmptyBatch(), - cfg: cfg, - logger: set.Logger, - mergeFunc: mf, - mergeSplitFunc: msf, - shutdownCh: make(chan struct{}), - shutdownCompleteCh: make(chan struct{}), - stopped: &atomic.Bool{}, - resetTimerCh: make(chan struct{}), - } - return bs -} - -func (bs *batchSender) Start(_ context.Context, _ component.Host) error { - timer := time.NewTimer(bs.cfg.FlushTimeout) - go func() { - for { - select { - case <-bs.shutdownCh: - // There is a minimal chance that another request is added after the shutdown signal. - // This loop will handle that case. - for bs.activeRequests.Load() > 0 { - bs.mu.Lock() - if bs.activeBatch.request != nil { - bs.exportActiveBatch() - } - bs.mu.Unlock() - } - if !timer.Stop() { - <-timer.C - } - close(bs.shutdownCompleteCh) - return - case <-timer.C: - bs.mu.Lock() - if bs.activeBatch.request != nil { - bs.exportActiveBatch() - } - bs.mu.Unlock() - timer.Reset(bs.cfg.FlushTimeout) - case <-bs.resetTimerCh: - if !timer.Stop() { - <-timer.C - } - timer.Reset(bs.cfg.FlushTimeout) - } - } - }() - - return nil -} - -type batch struct { - ctx context.Context - request Request - done chan struct{} - err error -} - -func newEmptyBatch() *batch { - return &batch{ - ctx: context.Background(), - done: make(chan struct{}), - } -} - -// exportActiveBatch exports the active batch asynchronously and replaces it with a new one. -// Caller must hold the lock. -func (bs *batchSender) exportActiveBatch() { - go func(b *batch) { - b.err = b.request.Export(b.ctx) - close(b.done) - }(bs.activeBatch) - bs.activeBatch = newEmptyBatch() -} - -func (bs *batchSender) resetTimer() { - if !bs.stopped.Load() { - bs.resetTimerCh <- struct{}{} - } -} - -// isActiveBatchReady returns true if the active batch is ready to be exported. -// The batch is ready if it has reached the minimum size or the concurrency limit is reached. -// Caller must hold the lock. -func (bs *batchSender) isActiveBatchReady() bool { - return bs.activeBatch.request.ItemsCount() >= bs.cfg.MinSizeItems || - (bs.concurrencyLimit > 0 && bs.activeRequests.Load() >= bs.concurrencyLimit) -} - -func (bs *batchSender) send(ctx context.Context, req Request) error { - // Stopped batch sender should act as pass-through to allow the queue to be drained. - if bs.stopped.Load() { - return bs.nextSender.send(ctx, req) - } - - if bs.cfg.MaxSizeItems > 0 { - return bs.sendMergeSplitBatch(ctx, req) - } - return bs.sendMergeBatch(ctx, req) -} - -// sendMergeSplitBatch sends the request to the batch which may be split into multiple requests. -func (bs *batchSender) sendMergeSplitBatch(ctx context.Context, req Request) error { - bs.mu.Lock() - bs.activeRequests.Add(1) - defer bs.activeRequests.Add(^uint64(0)) - - reqs, err := bs.mergeSplitFunc(ctx, bs.cfg.MaxSizeConfig, bs.activeBatch.request, req) - if err != nil || len(reqs) == 0 { - bs.mu.Unlock() - return err - } - if len(reqs) == 1 || bs.activeBatch.request != nil { - bs.updateActiveBatch(ctx, reqs[0]) - batch := bs.activeBatch - if bs.isActiveBatchReady() || len(reqs) > 1 { - bs.exportActiveBatch() - bs.resetTimer() - } - bs.mu.Unlock() - <-batch.done - if batch.err != nil { - return batch.err - } - reqs = reqs[1:] - } else { - bs.mu.Unlock() - } - - // Intentionally do not put the last request in the active batch to not block it. - // TODO: Consider including the partial request in the error to avoid double publishing. - for _, r := range reqs { - if err := r.Export(ctx); err != nil { - return err - } - } - return nil -} - -// sendMergeBatch sends the request to the batch and waits for the batch to be exported. -func (bs *batchSender) sendMergeBatch(ctx context.Context, req Request) error { - bs.mu.Lock() - bs.activeRequests.Add(1) - defer bs.activeRequests.Add(^uint64(0)) - - if bs.activeBatch.request != nil { - var err error - req, err = bs.mergeFunc(ctx, bs.activeBatch.request, req) - if err != nil { - bs.mu.Unlock() - return err - } - } - bs.updateActiveBatch(ctx, req) - batch := bs.activeBatch - if bs.isActiveBatchReady() { - bs.exportActiveBatch() - bs.resetTimer() - } - bs.mu.Unlock() - <-batch.done - return batch.err -} - -// updateActiveBatch update the active batch to the new merged request and context. -// The context is only set once and is not updated after the first call. -// Merging the context would be complex and require an additional goroutine to handle the context cancellation. -// We take the approach of using the context from the first request since it's likely to have the shortest timeout. -func (bs *batchSender) updateActiveBatch(ctx context.Context, req Request) { - if bs.activeBatch.request == nil { - bs.activeBatch.ctx = ctx - } - bs.activeBatch.request = req -} - -func (bs *batchSender) Shutdown(context.Context) error { - bs.stopped.Store(true) - close(bs.shutdownCh) - <-bs.shutdownCompleteCh - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/common.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/common.go index aa84e9019f2..ab1f0db4e0b 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/common.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/common.go @@ -4,115 +4,46 @@ package exporterhelper // import "go.opentelemetry.io/collector/exporter/exporterhelper" import ( - "context" - "fmt" - - "go.uber.org/multierr" - "go.uber.org/zap" - "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configretry" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/exporter" "go.opentelemetry.io/collector/exporter/exporterbatcher" + "go.opentelemetry.io/collector/exporter/exporterhelper/internal" "go.opentelemetry.io/collector/exporter/exporterqueue" ) -// requestSender is an abstraction of a sender for a request independent of the type of the data (traces, metrics, logs). -type requestSender interface { - component.Component - send(context.Context, Request) error - setNextSender(nextSender requestSender) -} - -type baseRequestSender struct { - component.StartFunc - component.ShutdownFunc - nextSender requestSender -} - -var _ requestSender = (*baseRequestSender)(nil) - -func (b *baseRequestSender) send(ctx context.Context, req Request) error { - return b.nextSender.send(ctx, req) -} - -func (b *baseRequestSender) setNextSender(nextSender requestSender) { - b.nextSender = nextSender -} - -type obsrepSenderFactory func(obsrep *ObsReport) requestSender - -// Option apply changes to baseExporter. -type Option func(*baseExporter) error +// Option apply changes to BaseExporter. +type Option = internal.Option // WithStart overrides the default Start function for an exporter. // The default start function does nothing and always returns nil. func WithStart(start component.StartFunc) Option { - return func(o *baseExporter) error { - o.StartFunc = start - return nil - } + return internal.WithStart(start) } // WithShutdown overrides the default Shutdown function for an exporter. // The default shutdown function does nothing and always returns nil. func WithShutdown(shutdown component.ShutdownFunc) Option { - return func(o *baseExporter) error { - o.ShutdownFunc = shutdown - return nil - } + return internal.WithShutdown(shutdown) } -// WithTimeout overrides the default TimeoutSettings for an exporter. -// The default TimeoutSettings is 5 seconds. -func WithTimeout(timeoutSettings TimeoutSettings) Option { - return func(o *baseExporter) error { - o.timeoutSender.cfg = timeoutSettings - return nil - } +// WithTimeout overrides the default TimeoutConfig for an exporter. +// The default TimeoutConfig is 5 seconds. +func WithTimeout(timeoutConfig TimeoutConfig) Option { + return internal.WithTimeout(timeoutConfig) } // WithRetry overrides the default configretry.BackOffConfig for an exporter. // The default configretry.BackOffConfig is to disable retries. func WithRetry(config configretry.BackOffConfig) Option { - return func(o *baseExporter) error { - if !config.Enabled { - o.exportFailureMessage += " Try enabling retry_on_failure config option to retry on retryable errors." - return nil - } - o.retrySender = newRetrySender(config, o.set) - return nil - } + return internal.WithRetry(config) } -// WithQueue overrides the default QueueSettings for an exporter. -// The default QueueSettings is to disable queueing. +// WithQueue overrides the default QueueConfig for an exporter. +// The default QueueConfig is to disable queueing. // This option cannot be used with the new exporter helpers New[Traces|Metrics|Logs]RequestExporter. -func WithQueue(config QueueSettings) Option { - return func(o *baseExporter) error { - if o.marshaler == nil || o.unmarshaler == nil { - return fmt.Errorf("WithQueue option is not available for the new request exporters, use WithRequestQueue instead") - } - if !config.Enabled { - o.exportFailureMessage += " Try enabling sending_queue to survive temporary failures." - return nil - } - qf := exporterqueue.NewPersistentQueueFactory[Request](config.StorageID, exporterqueue.PersistentQueueSettings[Request]{ - Marshaler: o.marshaler, - Unmarshaler: o.unmarshaler, - }) - q := qf(context.Background(), exporterqueue.Settings{ - DataType: o.signal, - ExporterSettings: o.set, - }, exporterqueue.Config{ - Enabled: config.Enabled, - NumConsumers: config.NumConsumers, - QueueSize: config.QueueSize, - }) - o.queueSender = newQueueSender(q, o.set, config.NumConsumers, o.exportFailureMessage) - return nil - } +func WithQueue(config internal.QueueConfig) Option { + return internal.WithQueue(config) } // WithRequestQueue enables queueing for an exporter. @@ -120,49 +51,14 @@ func WithQueue(config QueueSettings) Option { // Experimental: This API is at the early stage of development and may change without backward compatibility // until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. func WithRequestQueue(cfg exporterqueue.Config, queueFactory exporterqueue.Factory[Request]) Option { - return func(o *baseExporter) error { - if o.marshaler != nil || o.unmarshaler != nil { - return fmt.Errorf("WithRequestQueue option must be used with the new request exporters only, use WithQueue instead") - } - if !cfg.Enabled { - o.exportFailureMessage += " Try enabling sending_queue to survive temporary failures." - return nil - } - set := exporterqueue.Settings{ - DataType: o.signal, - ExporterSettings: o.set, - } - o.queueSender = newQueueSender(queueFactory(context.Background(), set, cfg), o.set, cfg.NumConsumers, o.exportFailureMessage) - return nil - } + return internal.WithRequestQueue(cfg, queueFactory) } // WithCapabilities overrides the default Capabilities() function for a Consumer. // The default is non-mutable data. // TODO: Verify if we can change the default to be mutable as we do for processors. func WithCapabilities(capabilities consumer.Capabilities) Option { - return func(o *baseExporter) error { - o.consumerOptions = append(o.consumerOptions, consumer.WithCapabilities(capabilities)) - return nil - } -} - -// BatcherOption apply changes to batcher sender. -type BatcherOption func(*batchSender) error - -// WithRequestBatchFuncs sets the functions for merging and splitting batches for an exporter built for custom request types. -func WithRequestBatchFuncs(mf exporterbatcher.BatchMergeFunc[Request], msf exporterbatcher.BatchMergeSplitFunc[Request]) BatcherOption { - return func(bs *batchSender) error { - if mf == nil || msf == nil { - return fmt.Errorf("WithRequestBatchFuncs must be provided with non-nil functions") - } - if bs.mergeFunc != nil || bs.mergeSplitFunc != nil { - return fmt.Errorf("WithRequestBatchFuncs can only be used once with request-based exporters") - } - bs.mergeFunc = mf - bs.mergeSplitFunc = msf - return nil - } + return internal.WithCapabilities(capabilities) } // WithBatcher enables batching for an exporter based on custom request types. @@ -170,166 +66,6 @@ func WithRequestBatchFuncs(mf exporterbatcher.BatchMergeFunc[Request], msf expor // WithRequestBatchFuncs provided. // This API is at the early stage of development and may change without backward compatibility // until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. -func WithBatcher(cfg exporterbatcher.Config, opts ...BatcherOption) Option { - return func(o *baseExporter) error { - if !cfg.Enabled { - return nil - } - - bs := newBatchSender(cfg, o.set, o.batchMergeFunc, o.batchMergeSplitfunc) - for _, opt := range opts { - if err := opt(bs); err != nil { - return err - } - } - if bs.mergeFunc == nil || bs.mergeSplitFunc == nil { - return fmt.Errorf("WithRequestBatchFuncs must be provided for the batcher applied to the request-based exporters") - } - o.batchSender = bs - return nil - } -} - -// withMarshaler is used to set the request marshaler for the new exporter helper. -// It must be provided as the first option when creating a new exporter helper. -func withMarshaler(marshaler exporterqueue.Marshaler[Request]) Option { - return func(o *baseExporter) error { - o.marshaler = marshaler - return nil - } -} - -// withUnmarshaler is used to set the request unmarshaler for the new exporter helper. -// It must be provided as the first option when creating a new exporter helper. -func withUnmarshaler(unmarshaler exporterqueue.Unmarshaler[Request]) Option { - return func(o *baseExporter) error { - o.unmarshaler = unmarshaler - return nil - } -} - -// withBatchFuncs is used to set the functions for merging and splitting batches for OLTP-based exporters. -// It must be provided as the first option when creating a new exporter helper. -func withBatchFuncs(mf exporterbatcher.BatchMergeFunc[Request], msf exporterbatcher.BatchMergeSplitFunc[Request]) Option { - return func(o *baseExporter) error { - o.batchMergeFunc = mf - o.batchMergeSplitfunc = msf - return nil - } -} - -// baseExporter contains common fields between different exporter types. -type baseExporter struct { - component.StartFunc - component.ShutdownFunc - - signal component.DataType - - batchMergeFunc exporterbatcher.BatchMergeFunc[Request] - batchMergeSplitfunc exporterbatcher.BatchMergeSplitFunc[Request] - - marshaler exporterqueue.Marshaler[Request] - unmarshaler exporterqueue.Unmarshaler[Request] - - set exporter.CreateSettings - obsrep *ObsReport - - // Message for the user to be added with an export failure message. - exportFailureMessage string - - // Chain of senders that the exporter helper applies before passing the data to the actual exporter. - // The data is handled by each sender in the respective order starting from the queueSender. - // Most of the senders are optional, and initialized with a no-op path-through sender. - batchSender requestSender - queueSender requestSender - obsrepSender requestSender - retrySender requestSender - timeoutSender *timeoutSender // timeoutSender is always initialized. - - consumerOptions []consumer.Option -} - -func newBaseExporter(set exporter.CreateSettings, signal component.DataType, osf obsrepSenderFactory, options ...Option) (*baseExporter, error) { - obsReport, err := NewObsReport(ObsReportSettings{ExporterID: set.ID, ExporterCreateSettings: set}) - if err != nil { - return nil, err - } - - be := &baseExporter{ - signal: signal, - - batchSender: &baseRequestSender{}, - queueSender: &baseRequestSender{}, - obsrepSender: osf(obsReport), - retrySender: &baseRequestSender{}, - timeoutSender: &timeoutSender{cfg: NewDefaultTimeoutSettings()}, - - set: set, - obsrep: obsReport, - } - - for _, op := range options { - err = multierr.Append(err, op(be)) - } - if err != nil { - return nil, err - } - - be.connectSenders() - - if bs, ok := be.batchSender.(*batchSender); ok { - // If queue sender is enabled assign to the batch sender the same number of workers. - if qs, ok := be.queueSender.(*queueSender); ok { - bs.concurrencyLimit = uint64(qs.numConsumers) - } - // Batcher sender mutates the data. - be.consumerOptions = append(be.consumerOptions, consumer.WithCapabilities(consumer.Capabilities{MutatesData: true})) - } - - return be, nil -} - -// send sends the request using the first sender in the chain. -func (be *baseExporter) send(ctx context.Context, req Request) error { - err := be.queueSender.send(ctx, req) - if err != nil { - be.set.Logger.Error("Exporting failed. Rejecting data."+be.exportFailureMessage, - zap.Error(err), zap.Int("rejected_items", req.ItemsCount())) - } - return err -} - -// connectSenders connects the senders in the predefined order. -func (be *baseExporter) connectSenders() { - be.queueSender.setNextSender(be.batchSender) - be.batchSender.setNextSender(be.obsrepSender) - be.obsrepSender.setNextSender(be.retrySender) - be.retrySender.setNextSender(be.timeoutSender) -} - -func (be *baseExporter) Start(ctx context.Context, host component.Host) error { - // First start the wrapped exporter. - if err := be.StartFunc.Start(ctx, host); err != nil { - return err - } - - // If no error then start the batchSender. - if err := be.batchSender.Start(ctx, host); err != nil { - return err - } - - // Last start the queueSender. - return be.queueSender.Start(ctx, host) -} - -func (be *baseExporter) Shutdown(ctx context.Context) error { - return multierr.Combine( - // First shutdown the retry sender, so the queue sender can flush the queue without retries. - be.retrySender.Shutdown(ctx), - // Then shutdown the batch sender - be.batchSender.Shutdown(ctx), - // Then shutdown the queue sender. - be.queueSender.Shutdown(ctx), - // Last shutdown the wrapped exporter itself. - be.ShutdownFunc.Shutdown(ctx)) +func WithBatcher(cfg exporterbatcher.Config) Option { + return internal.WithBatcher(cfg) } diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/documentation.md b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/documentation.md index ac974e01dd2..6a94947aeba 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/documentation.md +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/documentation.md @@ -6,74 +6,90 @@ The following telemetry is emitted by this component. -### exporter_enqueue_failed_log_records +### otelcol_exporter_enqueue_failed_log_records -Number of log records failed to be added to the sending queue. +Number of log records failed to be added to the sending queue. [alpha] | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {records} | Sum | Int | true | -### exporter_enqueue_failed_metric_points +### otelcol_exporter_enqueue_failed_metric_points -Number of metric points failed to be added to the sending queue. +Number of metric points failed to be added to the sending queue. [alpha] | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {datapoints} | Sum | Int | true | -### exporter_enqueue_failed_spans +### otelcol_exporter_enqueue_failed_spans -Number of spans failed to be added to the sending queue. +Number of spans failed to be added to the sending queue. [alpha] | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {spans} | Sum | Int | true | -### exporter_send_failed_log_records +### otelcol_exporter_queue_capacity -Number of log records in failed attempts to send to destination. +Fixed capacity of the retry queue (in batches) [alpha] + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {batches} | Gauge | Int | + +### otelcol_exporter_queue_size + +Current size of the retry queue (in batches) [alpha] + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {batches} | Gauge | Int | + +### otelcol_exporter_send_failed_log_records + +Number of log records in failed attempts to send to destination. [alpha] | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {records} | Sum | Int | true | -### exporter_send_failed_metric_points +### otelcol_exporter_send_failed_metric_points -Number of metric points in failed attempts to send to destination. +Number of metric points in failed attempts to send to destination. [alpha] | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {datapoints} | Sum | Int | true | -### exporter_send_failed_spans +### otelcol_exporter_send_failed_spans -Number of spans in failed attempts to send to destination. +Number of spans in failed attempts to send to destination. [alpha] | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {spans} | Sum | Int | true | -### exporter_sent_log_records +### otelcol_exporter_sent_log_records -Number of log record successfully sent to destination. +Number of log record successfully sent to destination. [alpha] | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {records} | Sum | Int | true | -### exporter_sent_metric_points +### otelcol_exporter_sent_metric_points -Number of metric points successfully sent to destination. +Number of metric points successfully sent to destination. [alpha] | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {datapoints} | Sum | Int | true | -### exporter_sent_spans +### otelcol_exporter_sent_spans -Number of spans successfully sent to destination. +Number of spans successfully sent to destination. [alpha] | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {spans} | Sum | Int | true | diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelper.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelper.go new file mode 100644 index 00000000000..cb2c10931fa --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelper.go @@ -0,0 +1,19 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exporterhelper // import "go.opentelemetry.io/collector/exporter/exporterhelper" + +import "go.opentelemetry.io/collector/exporter/internal" + +// Request represents a single request that can be sent to an external endpoint. +// Experimental: This API is at the early stage of development and may change without backward compatibility +// until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. +type Request = internal.Request + +// RequestErrorHandler is an optional interface that can be implemented by Request to provide a way handle partial +// temporary failures. For example, if some items failed to process and can be retried, this interface allows to +// return a new Request that contains the items left to be sent. Otherwise, the original Request should be returned. +// If not implemented, the original Request will be returned assuming the error is applied to the whole Request. +// Experimental: This API is at the early stage of development and may change without backward compatibility +// until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. +type RequestErrorHandler = internal.RequestErrorHandler diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/base_exporter.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/base_exporter.go new file mode 100644 index 00000000000..da68c37092c --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/base_exporter.go @@ -0,0 +1,301 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/exporter/exporterhelper/internal" + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel/codes" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + "go.uber.org/multierr" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configretry" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exporterbatcher" + "go.opentelemetry.io/collector/exporter/exporterqueue" // BaseExporter contains common fields between different exporter types. + "go.opentelemetry.io/collector/exporter/internal" + "go.opentelemetry.io/collector/featuregate" + "go.opentelemetry.io/collector/pipeline" +) + +var usePullingBasedExporterQueueBatcher = featuregate.GlobalRegistry().MustRegister( + "exporter.UsePullingBasedExporterQueueBatcher", + featuregate.StageAlpha, + featuregate.WithRegisterFromVersion("v0.115.0"), + featuregate.WithRegisterDescription("if set to true, turns on the pulling-based exporter queue bathcer"), +) + +type ObsrepSenderFactory = func(obsrep *ObsReport) Sender[internal.Request] + +// Option apply changes to BaseExporter. +type Option func(*BaseExporter) error + +type BaseExporter struct { + component.StartFunc + component.ShutdownFunc + + Marshaler exporterqueue.Marshaler[internal.Request] + Unmarshaler exporterqueue.Unmarshaler[internal.Request] + + Set exporter.Settings + Obsrep *ObsReport + + // Message for the user to be added with an export failure message. + ExportFailureMessage string + + // Chain of senders that the exporter helper applies before passing the data to the actual exporter. + // The data is handled by each sender in the respective order starting from the queueSender. + // Most of the senders are optional, and initialized with a no-op path-through sender. + BatchSender Sender[internal.Request] + QueueSender Sender[internal.Request] + ObsrepSender Sender[internal.Request] + RetrySender Sender[internal.Request] + TimeoutSender *TimeoutSender // TimeoutSender is always initialized. + + ConsumerOptions []consumer.Option + + queueCfg exporterqueue.Config + queueFactory exporterqueue.Factory[internal.Request] + BatcherCfg exporterbatcher.Config +} + +func NewBaseExporter(set exporter.Settings, signal pipeline.Signal, osf ObsrepSenderFactory, options ...Option) (*BaseExporter, error) { + obsReport, err := NewExporter(ObsReportSettings{ExporterID: set.ID, ExporterCreateSettings: set, Signal: signal}) + if err != nil { + return nil, err + } + + be := &BaseExporter{ + BatchSender: &BaseSender[internal.Request]{}, + QueueSender: &BaseSender[internal.Request]{}, + ObsrepSender: osf(obsReport), + RetrySender: &BaseSender[internal.Request]{}, + TimeoutSender: &TimeoutSender{cfg: NewDefaultTimeoutConfig()}, + + Set: set, + Obsrep: obsReport, + } + + for _, op := range options { + err = multierr.Append(err, op(be)) + } + if err != nil { + return nil, err + } + + if be.queueCfg.Enabled { + q := be.queueFactory( + context.Background(), + exporterqueue.Settings{ + Signal: signal, + ExporterSettings: be.Set, + }, + be.queueCfg) + be.QueueSender = NewQueueSender(q, be.Set, be.queueCfg.NumConsumers, be.ExportFailureMessage, be.Obsrep, be.BatcherCfg) + } + + if !usePullingBasedExporterQueueBatcher.IsEnabled() && be.BatcherCfg.Enabled || + usePullingBasedExporterQueueBatcher.IsEnabled() && be.BatcherCfg.Enabled && !be.queueCfg.Enabled { + bs := NewBatchSender(be.BatcherCfg, be.Set) + be.BatchSender = bs + } + + be.connectSenders() + + if bs, ok := be.BatchSender.(*BatchSender); ok { + // If queue sender is enabled assign to the batch sender the same number of workers. + if qs, ok := be.QueueSender.(*QueueSender); ok { + bs.concurrencyLimit = int64(qs.numConsumers) + } + // Batcher sender mutates the data. + be.ConsumerOptions = append(be.ConsumerOptions, consumer.WithCapabilities(consumer.Capabilities{MutatesData: true})) + } + + return be, nil +} + +// send sends the request using the first sender in the chain. +func (be *BaseExporter) Send(ctx context.Context, req internal.Request) error { + err := be.QueueSender.Send(ctx, req) + if err != nil { + be.Set.Logger.Error("Exporting failed. Rejecting data."+be.ExportFailureMessage, + zap.Error(err), zap.Int("rejected_items", req.ItemsCount())) + } + return err +} + +// connectSenders connects the senders in the predefined order. +func (be *BaseExporter) connectSenders() { + be.QueueSender.SetNextSender(be.BatchSender) + be.BatchSender.SetNextSender(be.ObsrepSender) + be.ObsrepSender.SetNextSender(be.RetrySender) + be.RetrySender.SetNextSender(be.TimeoutSender) +} + +func (be *BaseExporter) Start(ctx context.Context, host component.Host) error { + // First start the wrapped exporter. + if err := be.StartFunc.Start(ctx, host); err != nil { + return err + } + + // If no error then start the BatchSender. + if err := be.BatchSender.Start(ctx, host); err != nil { + return err + } + + // Last start the queueSender. + return be.QueueSender.Start(ctx, host) +} + +func (be *BaseExporter) Shutdown(ctx context.Context) error { + return multierr.Combine( + // First shutdown the retry sender, so the queue sender can flush the queue without retries. + be.RetrySender.Shutdown(ctx), + // Then shutdown the batch sender + be.BatchSender.Shutdown(ctx), + // Then shutdown the queue sender. + be.QueueSender.Shutdown(ctx), + // Last shutdown the wrapped exporter itself. + be.ShutdownFunc.Shutdown(ctx)) +} + +// WithStart overrides the default Start function for an exporter. +// The default start function does nothing and always returns nil. +func WithStart(start component.StartFunc) Option { + return func(o *BaseExporter) error { + o.StartFunc = start + return nil + } +} + +// WithShutdown overrides the default Shutdown function for an exporter. +// The default shutdown function does nothing and always returns nil. +func WithShutdown(shutdown component.ShutdownFunc) Option { + return func(o *BaseExporter) error { + o.ShutdownFunc = shutdown + return nil + } +} + +// WithTimeout overrides the default TimeoutConfig for an exporter. +// The default TimeoutConfig is 5 seconds. +func WithTimeout(timeoutConfig TimeoutConfig) Option { + return func(o *BaseExporter) error { + o.TimeoutSender.cfg = timeoutConfig + return nil + } +} + +// WithRetry overrides the default configretry.BackOffConfig for an exporter. +// The default configretry.BackOffConfig is to disable retries. +func WithRetry(config configretry.BackOffConfig) Option { + return func(o *BaseExporter) error { + if !config.Enabled { + o.ExportFailureMessage += " Try enabling retry_on_failure config option to retry on retryable errors." + return nil + } + o.RetrySender = newRetrySender(config, o.Set) + return nil + } +} + +// WithQueue overrides the default QueueConfig for an exporter. +// The default QueueConfig is to disable queueing. +// This option cannot be used with the new exporter helpers New[Traces|Metrics|Logs]RequestExporter. +func WithQueue(config QueueConfig) Option { + return func(o *BaseExporter) error { + if o.Marshaler == nil || o.Unmarshaler == nil { + return errors.New("WithQueue option is not available for the new request exporters, use WithRequestQueue instead") + } + if !config.Enabled { + o.ExportFailureMessage += " Try enabling sending_queue to survive temporary failures." + return nil + } + o.queueCfg = exporterqueue.Config{ + Enabled: config.Enabled, + NumConsumers: config.NumConsumers, + QueueSize: config.QueueSize, + Blocking: config.Blocking, + } + o.queueFactory = exporterqueue.NewPersistentQueueFactory[internal.Request](config.StorageID, exporterqueue.PersistentQueueSettings[internal.Request]{ + Marshaler: o.Marshaler, + Unmarshaler: o.Unmarshaler, + }) + return nil + } +} + +// WithRequestQueue enables queueing for an exporter. +// This option should be used with the new exporter helpers New[Traces|Metrics|Logs]RequestExporter. +// Experimental: This API is at the early stage of development and may change without backward compatibility +// until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. +func WithRequestQueue(cfg exporterqueue.Config, queueFactory exporterqueue.Factory[internal.Request]) Option { + return func(o *BaseExporter) error { + if o.Marshaler != nil || o.Unmarshaler != nil { + return errors.New("WithRequestQueue option must be used with the new request exporters only, use WithQueue instead") + } + if !cfg.Enabled { + o.ExportFailureMessage += " Try enabling sending_queue to survive temporary failures." + return nil + } + o.queueCfg = cfg + o.queueFactory = queueFactory + return nil + } +} + +// WithCapabilities overrides the default Capabilities() function for a Consumer. +// The default is non-mutable data. +// TODO: Verify if we can change the default to be mutable as we do for processors. +func WithCapabilities(capabilities consumer.Capabilities) Option { + return func(o *BaseExporter) error { + o.ConsumerOptions = append(o.ConsumerOptions, consumer.WithCapabilities(capabilities)) + return nil + } +} + +// WithBatcher enables batching for an exporter based on custom request types. +// For now, it can be used only with the New[Traces|Metrics|Logs]RequestExporter exporter helpers and +// WithRequestBatchFuncs provided. +// This API is at the early stage of development and may change without backward compatibility +// until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. +func WithBatcher(cfg exporterbatcher.Config) Option { + return func(o *BaseExporter) error { + o.BatcherCfg = cfg + return nil + } +} + +// WithMarshaler is used to set the request marshaler for the new exporter helper. +// It must be provided as the first option when creating a new exporter helper. +func WithMarshaler(marshaler exporterqueue.Marshaler[internal.Request]) Option { + return func(o *BaseExporter) error { + o.Marshaler = marshaler + return nil + } +} + +// withUnmarshaler is used to set the request unmarshaler for the new exporter helper. +// It must be provided as the first option when creating a new exporter helper. +func WithUnmarshaler(unmarshaler exporterqueue.Unmarshaler[internal.Request]) Option { + return func(o *BaseExporter) error { + o.Unmarshaler = unmarshaler + return nil + } +} + +func CheckStatus(t *testing.T, sd sdktrace.ReadOnlySpan, err error) { + if err != nil { + require.Equal(t, codes.Error, sd.Status().Code, "SpanData %v", sd) + require.EqualError(t, err, sd.Status().Description, "SpanData %v", sd) + } else { + require.Equal(t, codes.Unset, sd.Status().Code, "SpanData %v", sd) + } +} diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/batch_sender.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/batch_sender.go new file mode 100644 index 00000000000..4cb3ace63b0 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/batch_sender.go @@ -0,0 +1,243 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/exporter/exporterhelper/internal" + +import ( + "context" + "sync" + "sync/atomic" + "time" + + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exporterbatcher" + "go.opentelemetry.io/collector/exporter/internal" +) + +// BatchSender is a component that places requests into batches before passing them to the downstream senders. +// Batches are sent out with any of the following conditions: +// - batch size reaches cfg.MinSizeItems +// - cfg.FlushTimeout is elapsed since the timestamp when the previous batch was sent out. +// - concurrencyLimit is reached. +type BatchSender struct { + BaseSender[internal.Request] + cfg exporterbatcher.Config + + // concurrencyLimit is the maximum number of goroutines that can be blocked by the batcher. + // If this number is reached and all the goroutines are busy, the batch will be sent right away. + // Populated from the number of queue consumers if queue is enabled. + concurrencyLimit int64 + activeRequests atomic.Int64 + + mu sync.Mutex + activeBatch *batch + lastFlushed time.Time + + logger *zap.Logger + + shutdownCh chan struct{} + shutdownCompleteCh chan struct{} + stopped *atomic.Bool +} + +// newBatchSender returns a new batch consumer component. +func NewBatchSender(cfg exporterbatcher.Config, set exporter.Settings) *BatchSender { + bs := &BatchSender{ + activeBatch: newEmptyBatch(), + cfg: cfg, + logger: set.Logger, + shutdownCh: nil, + shutdownCompleteCh: make(chan struct{}), + stopped: &atomic.Bool{}, + } + return bs +} + +func (bs *BatchSender) Start(_ context.Context, _ component.Host) error { + bs.shutdownCh = make(chan struct{}) + timer := time.NewTimer(bs.cfg.FlushTimeout) + go func() { + for { + select { + case <-bs.shutdownCh: + // There is a minimal chance that another request is added after the shutdown signal. + // This loop will handle that case. + for bs.activeRequests.Load() > 0 { + bs.mu.Lock() + if bs.activeBatch.request != nil { + bs.exportActiveBatch() + } + bs.mu.Unlock() + } + if !timer.Stop() { + <-timer.C + } + close(bs.shutdownCompleteCh) + return + case <-timer.C: + bs.mu.Lock() + nextFlush := bs.cfg.FlushTimeout + if bs.activeBatch.request != nil { + sinceLastFlush := time.Since(bs.lastFlushed) + if sinceLastFlush >= bs.cfg.FlushTimeout { + bs.exportActiveBatch() + } else { + nextFlush = bs.cfg.FlushTimeout - sinceLastFlush + } + } + bs.mu.Unlock() + timer.Reset(nextFlush) + } + } + }() + + return nil +} + +type batch struct { + ctx context.Context + request internal.Request + done chan struct{} + err error + + // requestsBlocked is the number of requests blocked in this batch + // that can be immediately released from activeRequests when batch sending completes. + requestsBlocked int64 +} + +func newEmptyBatch() *batch { + return &batch{ + ctx: context.Background(), + done: make(chan struct{}), + } +} + +// exportActiveBatch exports the active batch asynchronously and replaces it with a new one. +// Caller must hold the lock. +func (bs *BatchSender) exportActiveBatch() { + go func(b *batch) { + b.err = bs.NextSender.Send(b.ctx, b.request) + close(b.done) + bs.activeRequests.Add(-b.requestsBlocked) + }(bs.activeBatch) + bs.lastFlushed = time.Now() + bs.activeBatch = newEmptyBatch() +} + +// isActiveBatchReady returns true if the active batch is ready to be exported. +// The batch is ready if it has reached the minimum size or the concurrency limit is reached. +// Caller must hold the lock. +func (bs *BatchSender) isActiveBatchReady() bool { + return bs.activeBatch.request.ItemsCount() >= bs.cfg.MinSizeItems || + (bs.concurrencyLimit > 0 && bs.activeRequests.Load() >= bs.concurrencyLimit) +} + +func (bs *BatchSender) Send(ctx context.Context, req internal.Request) error { + // Stopped batch sender should act as pass-through to allow the queue to be drained. + if bs.stopped.Load() { + return bs.NextSender.Send(ctx, req) + } + + if bs.cfg.MaxSizeItems > 0 { + return bs.sendMergeSplitBatch(ctx, req) + } + return bs.sendMergeBatch(ctx, req) +} + +// sendMergeSplitBatch sends the request to the batch which may be split into multiple requests. +func (bs *BatchSender) sendMergeSplitBatch(ctx context.Context, req internal.Request) error { + bs.mu.Lock() + + var reqs []internal.Request + var mergeSplitErr error + if bs.activeBatch.request == nil { + reqs, mergeSplitErr = req.MergeSplit(ctx, bs.cfg.MaxSizeConfig, nil) + } else { + reqs, mergeSplitErr = bs.activeBatch.request.MergeSplit(ctx, bs.cfg.MaxSizeConfig, req) + } + + if mergeSplitErr != nil || len(reqs) == 0 { + bs.mu.Unlock() + return mergeSplitErr + } + + bs.activeRequests.Add(1) + if len(reqs) == 1 { + bs.activeBatch.requestsBlocked++ + } else { + // if there was a split, we want to make sure that bs.activeRequests is released once all of the parts are sent instead of using batch.requestsBlocked + defer bs.activeRequests.Add(-1) + } + if len(reqs) == 1 || bs.activeBatch.request != nil { + bs.updateActiveBatch(ctx, reqs[0]) + batch := bs.activeBatch + if bs.isActiveBatchReady() || len(reqs) > 1 { + bs.exportActiveBatch() + } + bs.mu.Unlock() + <-batch.done + if batch.err != nil { + return batch.err + } + reqs = reqs[1:] + } else { + bs.mu.Unlock() + } + + // Intentionally do not put the last request in the active batch to not block it. + // TODO: Consider including the partial request in the error to avoid double publishing. + for _, r := range reqs { + if err := bs.NextSender.Send(ctx, r); err != nil { + return err + } + } + return nil +} + +// sendMergeBatch sends the request to the batch and waits for the batch to be exported. +func (bs *BatchSender) sendMergeBatch(ctx context.Context, req internal.Request) error { + bs.mu.Lock() + + if bs.activeBatch.request != nil { + res, err := bs.activeBatch.request.MergeSplit(ctx, bs.cfg.MaxSizeConfig, req) + if err != nil { + bs.mu.Unlock() + return err + } + req = res[0] + } + + bs.activeRequests.Add(1) + bs.updateActiveBatch(ctx, req) + batch := bs.activeBatch + batch.requestsBlocked++ + if bs.isActiveBatchReady() { + bs.exportActiveBatch() + } + bs.mu.Unlock() + <-batch.done + return batch.err +} + +// updateActiveBatch update the active batch to the new merged request and context. +// The context is only set once and is not updated after the first call. +// Merging the context would be complex and require an additional goroutine to handle the context cancellation. +// We take the approach of using the context from the first request since it's likely to have the shortest timeout. +func (bs *BatchSender) updateActiveBatch(ctx context.Context, req internal.Request) { + if bs.activeBatch.request == nil { + bs.activeBatch.ctx = ctx + } + bs.activeBatch.request = req +} + +func (bs *BatchSender) Shutdown(context.Context) error { + bs.stopped.Store(true) + if bs.shutdownCh != nil { + close(bs.shutdownCh) + <-bs.shutdownCompleteCh + } + return nil +} diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/metadata/generated_telemetry.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/metadata/generated_telemetry.go index 481cc2b27ec..bef767b0a89 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/metadata/generated_telemetry.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/metadata/generated_telemetry.go @@ -3,10 +3,11 @@ package metadata import ( + "context" "errors" "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/metric/noop" + noopmetric "go.opentelemetry.io/otel/metric/noop" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/collector/component" @@ -24,97 +25,136 @@ func Tracer(settings component.TelemetrySettings) trace.Tracer { // TelemetryBuilder provides an interface for components to report telemetry // as defined in metadata and user config. type TelemetryBuilder struct { + meter metric.Meter ExporterEnqueueFailedLogRecords metric.Int64Counter ExporterEnqueueFailedMetricPoints metric.Int64Counter ExporterEnqueueFailedSpans metric.Int64Counter + ExporterQueueCapacity metric.Int64ObservableGauge + ExporterQueueSize metric.Int64ObservableGauge ExporterSendFailedLogRecords metric.Int64Counter ExporterSendFailedMetricPoints metric.Int64Counter ExporterSendFailedSpans metric.Int64Counter ExporterSentLogRecords metric.Int64Counter ExporterSentMetricPoints metric.Int64Counter ExporterSentSpans metric.Int64Counter - level configtelemetry.Level } -// telemetryBuilderOption applies changes to default builder. -type telemetryBuilderOption func(*TelemetryBuilder) +// TelemetryBuilderOption applies changes to default builder. +type TelemetryBuilderOption interface { + apply(*TelemetryBuilder) +} + +type telemetryBuilderOptionFunc func(mb *TelemetryBuilder) + +func (tbof telemetryBuilderOptionFunc) apply(mb *TelemetryBuilder) { + tbof(mb) +} + +// InitExporterQueueCapacity configures the ExporterQueueCapacity metric. +func (builder *TelemetryBuilder) InitExporterQueueCapacity(cb func() int64, opts ...metric.ObserveOption) (metric.Registration, error) { + var err error + builder.ExporterQueueCapacity, err = builder.meter.Int64ObservableGauge( + "otelcol_exporter_queue_capacity", + metric.WithDescription("Fixed capacity of the retry queue (in batches)"), + metric.WithUnit("{batches}"), + ) + if err != nil { + return nil, err + } + reg, err := builder.meter.RegisterCallback(func(_ context.Context, o metric.Observer) error { + o.ObserveInt64(builder.ExporterQueueCapacity, cb(), opts...) + return nil + }, builder.ExporterQueueCapacity) + return reg, err +} -// WithLevel sets the current telemetry level for the component. -func WithLevel(lvl configtelemetry.Level) telemetryBuilderOption { - return func(builder *TelemetryBuilder) { - builder.level = lvl +// InitExporterQueueSize configures the ExporterQueueSize metric. +func (builder *TelemetryBuilder) InitExporterQueueSize(cb func() int64, opts ...metric.ObserveOption) (metric.Registration, error) { + var err error + builder.ExporterQueueSize, err = builder.meter.Int64ObservableGauge( + "otelcol_exporter_queue_size", + metric.WithDescription("Current size of the retry queue (in batches)"), + metric.WithUnit("{batches}"), + ) + if err != nil { + return nil, err } + reg, err := builder.meter.RegisterCallback(func(_ context.Context, o metric.Observer) error { + o.ObserveInt64(builder.ExporterQueueSize, cb(), opts...) + return nil + }, builder.ExporterQueueSize) + return reg, err } // NewTelemetryBuilder provides a struct with methods to update all internal telemetry // for a component -func NewTelemetryBuilder(settings component.TelemetrySettings, options ...telemetryBuilderOption) (*TelemetryBuilder, error) { - builder := TelemetryBuilder{level: configtelemetry.LevelBasic} +func NewTelemetryBuilder(settings component.TelemetrySettings, options ...TelemetryBuilderOption) (*TelemetryBuilder, error) { + builder := TelemetryBuilder{} for _, op := range options { - op(&builder) - } - var ( - err, errs error - meter metric.Meter - ) - if builder.level >= configtelemetry.LevelBasic { - meter = Meter(settings) - } else { - meter = noop.Meter{} + op.apply(&builder) } - builder.ExporterEnqueueFailedLogRecords, err = meter.Int64Counter( - "exporter_enqueue_failed_log_records", - metric.WithDescription("Number of log records failed to be added to the sending queue."), - metric.WithUnit("1"), + builder.meter = Meter(settings) + var err, errs error + builder.ExporterEnqueueFailedLogRecords, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_exporter_enqueue_failed_log_records", + metric.WithDescription("Number of log records failed to be added to the sending queue. [alpha]"), + metric.WithUnit("{records}"), ) errs = errors.Join(errs, err) - builder.ExporterEnqueueFailedMetricPoints, err = meter.Int64Counter( - "exporter_enqueue_failed_metric_points", - metric.WithDescription("Number of metric points failed to be added to the sending queue."), - metric.WithUnit("1"), + builder.ExporterEnqueueFailedMetricPoints, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_exporter_enqueue_failed_metric_points", + metric.WithDescription("Number of metric points failed to be added to the sending queue. [alpha]"), + metric.WithUnit("{datapoints}"), ) errs = errors.Join(errs, err) - builder.ExporterEnqueueFailedSpans, err = meter.Int64Counter( - "exporter_enqueue_failed_spans", - metric.WithDescription("Number of spans failed to be added to the sending queue."), - metric.WithUnit("1"), + builder.ExporterEnqueueFailedSpans, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_exporter_enqueue_failed_spans", + metric.WithDescription("Number of spans failed to be added to the sending queue. [alpha]"), + metric.WithUnit("{spans}"), ) errs = errors.Join(errs, err) - builder.ExporterSendFailedLogRecords, err = meter.Int64Counter( - "exporter_send_failed_log_records", - metric.WithDescription("Number of log records in failed attempts to send to destination."), - metric.WithUnit("1"), + builder.ExporterSendFailedLogRecords, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_exporter_send_failed_log_records", + metric.WithDescription("Number of log records in failed attempts to send to destination. [alpha]"), + metric.WithUnit("{records}"), ) errs = errors.Join(errs, err) - builder.ExporterSendFailedMetricPoints, err = meter.Int64Counter( - "exporter_send_failed_metric_points", - metric.WithDescription("Number of metric points in failed attempts to send to destination."), - metric.WithUnit("1"), + builder.ExporterSendFailedMetricPoints, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_exporter_send_failed_metric_points", + metric.WithDescription("Number of metric points in failed attempts to send to destination. [alpha]"), + metric.WithUnit("{datapoints}"), ) errs = errors.Join(errs, err) - builder.ExporterSendFailedSpans, err = meter.Int64Counter( - "exporter_send_failed_spans", - metric.WithDescription("Number of spans in failed attempts to send to destination."), - metric.WithUnit("1"), + builder.ExporterSendFailedSpans, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_exporter_send_failed_spans", + metric.WithDescription("Number of spans in failed attempts to send to destination. [alpha]"), + metric.WithUnit("{spans}"), ) errs = errors.Join(errs, err) - builder.ExporterSentLogRecords, err = meter.Int64Counter( - "exporter_sent_log_records", - metric.WithDescription("Number of log record successfully sent to destination."), - metric.WithUnit("1"), + builder.ExporterSentLogRecords, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_exporter_sent_log_records", + metric.WithDescription("Number of log record successfully sent to destination. [alpha]"), + metric.WithUnit("{records}"), ) errs = errors.Join(errs, err) - builder.ExporterSentMetricPoints, err = meter.Int64Counter( - "exporter_sent_metric_points", - metric.WithDescription("Number of metric points successfully sent to destination."), - metric.WithUnit("1"), + builder.ExporterSentMetricPoints, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_exporter_sent_metric_points", + metric.WithDescription("Number of metric points successfully sent to destination. [alpha]"), + metric.WithUnit("{datapoints}"), ) errs = errors.Join(errs, err) - builder.ExporterSentSpans, err = meter.Int64Counter( - "exporter_sent_spans", - metric.WithDescription("Number of spans successfully sent to destination."), - metric.WithUnit("1"), + builder.ExporterSentSpans, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_exporter_sent_spans", + metric.WithDescription("Number of spans successfully sent to destination. [alpha]"), + metric.WithUnit("{spans}"), ) errs = errors.Join(errs, err) return &builder, errs } + +func getLeveledMeter(meter metric.Meter, cfgLevel, srvLevel configtelemetry.Level) metric.Meter { + if cfgLevel <= srvLevel { + return meter + } + return noopmetric.Meter{} +} diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/obsexporter.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/obsexporter.go new file mode 100644 index 00000000000..004e5c48248 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/obsexporter.go @@ -0,0 +1,170 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/exporter/exporterhelper/internal" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/trace" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exporterhelper/internal/metadata" + "go.opentelemetry.io/collector/pipeline" +) + +// ObsReport is a helper to add observability to an exporter. +type ObsReport struct { + spanNamePrefix string + tracer trace.Tracer + Signal pipeline.Signal + + otelAttrs metric.MeasurementOption + TelemetryBuilder *metadata.TelemetryBuilder +} + +// ObsReportSettings are settings for creating an ObsReport. +type ObsReportSettings struct { + ExporterID component.ID + ExporterCreateSettings exporter.Settings + Signal pipeline.Signal +} + +func NewExporter(cfg ObsReportSettings) (*ObsReport, error) { + telemetryBuilder, err := metadata.NewTelemetryBuilder(cfg.ExporterCreateSettings.TelemetrySettings) + if err != nil { + return nil, err + } + + return &ObsReport{ + spanNamePrefix: ExporterPrefix + cfg.ExporterID.String(), + tracer: cfg.ExporterCreateSettings.TracerProvider.Tracer(cfg.ExporterID.String()), + Signal: cfg.Signal, + otelAttrs: metric.WithAttributeSet(attribute.NewSet(attribute.String(ExporterKey, cfg.ExporterID.String()))), + TelemetryBuilder: telemetryBuilder, + }, nil +} + +// StartTracesOp is called at the start of an Export operation. +// The returned context should be used in other calls to the Exporter functions +// dealing with the same export operation. +func (or *ObsReport) StartTracesOp(ctx context.Context) context.Context { + return or.startOp(ctx, ExportTraceDataOperationSuffix) +} + +// EndTracesOp completes the export operation that was started with startTracesOp. +func (or *ObsReport) EndTracesOp(ctx context.Context, numSpans int, err error) { + numSent, numFailedToSend := toNumItems(numSpans, err) + or.recordMetrics(context.WithoutCancel(ctx), pipeline.SignalTraces, numSent, numFailedToSend) + endSpan(ctx, err, numSent, numFailedToSend, SentSpansKey, FailedToSendSpansKey) +} + +// StartMetricsOp is called at the start of an Export operation. +// The returned context should be used in other calls to the Exporter functions +// dealing with the same export operation. +func (or *ObsReport) StartMetricsOp(ctx context.Context) context.Context { + return or.startOp(ctx, ExportMetricsOperationSuffix) +} + +// EndMetricsOp completes the export operation that was started with +// startMetricsOp. +// +// If needed, report your use case in https://github.com/open-telemetry/opentelemetry-collector/issues/10592. +func (or *ObsReport) EndMetricsOp(ctx context.Context, numMetricPoints int, err error) { + numSent, numFailedToSend := toNumItems(numMetricPoints, err) + or.recordMetrics(context.WithoutCancel(ctx), pipeline.SignalMetrics, numSent, numFailedToSend) + endSpan(ctx, err, numSent, numFailedToSend, SentMetricPointsKey, FailedToSendMetricPointsKey) +} + +// StartLogsOp is called at the start of an Export operation. +// The returned context should be used in other calls to the Exporter functions +// dealing with the same export operation. +func (or *ObsReport) StartLogsOp(ctx context.Context) context.Context { + return or.startOp(ctx, ExportLogsOperationSuffix) +} + +// EndLogsOp completes the export operation that was started with startLogsOp. +func (or *ObsReport) EndLogsOp(ctx context.Context, numLogRecords int, err error) { + numSent, numFailedToSend := toNumItems(numLogRecords, err) + or.recordMetrics(context.WithoutCancel(ctx), pipeline.SignalLogs, numSent, numFailedToSend) + endSpan(ctx, err, numSent, numFailedToSend, SentLogRecordsKey, FailedToSendLogRecordsKey) +} + +// StartProfilesOp is called at the start of an Export operation. +// The returned context should be used in other calls to the Exporter functions +// dealing with the same export operation. +func (or *ObsReport) StartProfilesOp(ctx context.Context) context.Context { + return or.startOp(ctx, ExportTraceDataOperationSuffix) +} + +// EndProfilesOp completes the export operation that was started with startProfilesOp. +func (or *ObsReport) EndProfilesOp(ctx context.Context, numSpans int, err error) { + numSent, numFailedToSend := toNumItems(numSpans, err) + endSpan(ctx, err, numSent, numFailedToSend, SentSamplesKey, FailedToSendSamplesKey) +} + +// startOp creates the span used to trace the operation. Returning +// the updated context and the created span. +func (or *ObsReport) startOp(ctx context.Context, operationSuffix string) context.Context { + spanName := or.spanNamePrefix + operationSuffix + ctx, _ = or.tracer.Start(ctx, spanName) + return ctx +} + +func (or *ObsReport) recordMetrics(ctx context.Context, signal pipeline.Signal, sent, failed int64) { + var sentMeasure, failedMeasure metric.Int64Counter + switch signal { + case pipeline.SignalTraces: + sentMeasure = or.TelemetryBuilder.ExporterSentSpans + failedMeasure = or.TelemetryBuilder.ExporterSendFailedSpans + case pipeline.SignalMetrics: + sentMeasure = or.TelemetryBuilder.ExporterSentMetricPoints + failedMeasure = or.TelemetryBuilder.ExporterSendFailedMetricPoints + case pipeline.SignalLogs: + sentMeasure = or.TelemetryBuilder.ExporterSentLogRecords + failedMeasure = or.TelemetryBuilder.ExporterSendFailedLogRecords + } + + sentMeasure.Add(ctx, sent, or.otelAttrs) + failedMeasure.Add(ctx, failed, or.otelAttrs) +} + +func endSpan(ctx context.Context, err error, numSent, numFailedToSend int64, sentItemsKey, failedToSendItemsKey string) { + span := trace.SpanFromContext(ctx) + // End the span according to errors. + if span.IsRecording() { + span.SetAttributes( + attribute.Int64(sentItemsKey, numSent), + attribute.Int64(failedToSendItemsKey, numFailedToSend), + ) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + } + } + span.End() +} + +func toNumItems(numExportedItems int, err error) (int64, int64) { + if err != nil { + return 0, int64(numExportedItems) + } + return int64(numExportedItems), 0 +} + +func (or *ObsReport) RecordEnqueueFailure(ctx context.Context, signal pipeline.Signal, failed int64) { + var enqueueFailedMeasure metric.Int64Counter + switch signal { + case pipeline.SignalTraces: + enqueueFailedMeasure = or.TelemetryBuilder.ExporterEnqueueFailedSpans + case pipeline.SignalMetrics: + enqueueFailedMeasure = or.TelemetryBuilder.ExporterEnqueueFailedMetricPoints + case pipeline.SignalLogs: + enqueueFailedMeasure = or.TelemetryBuilder.ExporterEnqueueFailedLogRecords + } + + enqueueFailedMeasure.Add(ctx, failed, or.otelAttrs) +} diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/obsmetrics.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/obsmetrics.go new file mode 100644 index 00000000000..ae9e89942b3 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/obsmetrics.go @@ -0,0 +1,40 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/exporter/exporterhelper/internal" + +const ( + // spanNameSep is duplicate between receiver and exporter. + spanNameSep = "/" + + // ExporterKey used to identify exporters in metrics and traces. + ExporterKey = "exporter" + + // DataTypeKey used to identify the data type in the queue size metric. + DataTypeKey = "data_type" + + // SentSpansKey used to track spans sent by exporters. + SentSpansKey = "sent_spans" + // FailedToSendSpansKey used to track spans that failed to be sent by exporters. + FailedToSendSpansKey = "send_failed_spans" + + // SentMetricPointsKey used to track metric points sent by exporters. + SentMetricPointsKey = "sent_metric_points" + // FailedToSendMetricPointsKey used to track metric points that failed to be sent by exporters. + FailedToSendMetricPointsKey = "send_failed_metric_points" + + // SentLogRecordsKey used to track logs sent by exporters. + SentLogRecordsKey = "sent_log_records" + // FailedToSendLogRecordsKey used to track logs that failed to be sent by exporters. + FailedToSendLogRecordsKey = "send_failed_log_records" + + // SentSamplesKey used to track profiles samples sent by exporters. + SentSamplesKey = "sent_samples" + // FailedToSendSamplesKey used to track samples that failed to be sent by exporters. + FailedToSendSamplesKey = "send_failed_samples" + + ExporterPrefix = ExporterKey + spanNameSep + ExportTraceDataOperationSuffix = spanNameSep + "traces" + ExportMetricsOperationSuffix = spanNameSep + "metrics" + ExportLogsOperationSuffix = spanNameSep + "logs" +) diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/queue_sender.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/queue_sender.go new file mode 100644 index 00000000000..66628e6a357 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/queue_sender.go @@ -0,0 +1,202 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/exporter/exporterhelper/internal" + +import ( + "context" + "errors" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exporterbatcher" + "go.opentelemetry.io/collector/exporter/exporterqueue" + "go.opentelemetry.io/collector/exporter/internal" + "go.opentelemetry.io/collector/exporter/internal/queue" +) + +// QueueConfig defines configuration for queueing batches before sending to the consumerSender. +type QueueConfig struct { + // Enabled indicates whether to not enqueue batches before sending to the consumerSender. + Enabled bool `mapstructure:"enabled"` + // NumConsumers is the number of consumers from the queue. Defaults to 10. + // If batching is enabled, a combined batch cannot contain more requests than the number of consumers. + // So it's recommended to set higher number of consumers if batching is enabled. + NumConsumers int `mapstructure:"num_consumers"` + // QueueSize is the maximum number of batches allowed in queue at a given time. + QueueSize int `mapstructure:"queue_size"` + // Blocking controls the queue behavior when full. + // If true it blocks until enough space to add the new request to the queue. + Blocking bool `mapstructure:"blocking"` + // StorageID if not empty, enables the persistent storage and uses the component specified + // as a storage extension for the persistent queue + StorageID *component.ID `mapstructure:"storage"` +} + +// NewDefaultQueueConfig returns the default config for QueueConfig. +func NewDefaultQueueConfig() QueueConfig { + return QueueConfig{ + Enabled: true, + NumConsumers: 10, + // By default, batches are 8192 spans, for a total of up to 8 million spans in the queue + // This can be estimated at 1-4 GB worth of maximum memory usage + // This default is probably still too high, and may be adjusted further down in a future release + QueueSize: 1_000, + Blocking: false, + } +} + +// Validate checks if the QueueConfig configuration is valid +func (qCfg *QueueConfig) Validate() error { + if !qCfg.Enabled { + return nil + } + + if qCfg.QueueSize <= 0 { + return errors.New("`queue_size` must be positive") + } + + if qCfg.NumConsumers <= 0 { + return errors.New("`num_consumers` must be positive") + } + + return nil +} + +type QueueSender struct { + BaseSender[internal.Request] + queue exporterqueue.Queue[internal.Request] + numConsumers int + traceAttribute attribute.KeyValue + batcher queue.Batcher + consumers *queue.Consumers[internal.Request] + + obsrep *ObsReport + exporterID component.ID + logger *zap.Logger + shutdownFns []component.ShutdownFunc +} + +func NewQueueSender( + q exporterqueue.Queue[internal.Request], + set exporter.Settings, + numConsumers int, + exportFailureMessage string, + obsrep *ObsReport, + batcherCfg exporterbatcher.Config, +) *QueueSender { + qs := &QueueSender{ + queue: q, + numConsumers: numConsumers, + traceAttribute: attribute.String(ExporterKey, set.ID.String()), + obsrep: obsrep, + exporterID: set.ID, + logger: set.Logger, + } + + exportFunc := func(ctx context.Context, req internal.Request) error { + err := qs.NextSender.Send(ctx, req) + if err != nil { + set.Logger.Error("Exporting failed. Dropping data."+exportFailureMessage, + zap.Error(err), zap.Int("dropped_items", req.ItemsCount())) + } + return err + } + if usePullingBasedExporterQueueBatcher.IsEnabled() { + qs.batcher, _ = queue.NewBatcher(batcherCfg, q, exportFunc, numConsumers) + } else { + qs.consumers = queue.NewQueueConsumers[internal.Request](q, numConsumers, exportFunc) + } + return qs +} + +// Start is invoked during service startup. +func (qs *QueueSender) Start(ctx context.Context, host component.Host) error { + if err := qs.queue.Start(ctx, host); err != nil { + return err + } + + if usePullingBasedExporterQueueBatcher.IsEnabled() { + if err := qs.batcher.Start(ctx, host); err != nil { + return err + } + } else { + if err := qs.consumers.Start(ctx, host); err != nil { + return err + } + } + + dataTypeAttr := attribute.String(DataTypeKey, qs.obsrep.Signal.String()) + + reg1, err1 := qs.obsrep.TelemetryBuilder.InitExporterQueueSize(func() int64 { return qs.queue.Size() }, + metric.WithAttributeSet(attribute.NewSet(qs.traceAttribute, dataTypeAttr))) + + if reg1 != nil { + qs.shutdownFns = append(qs.shutdownFns, func(context.Context) error { + return reg1.Unregister() + }) + } + + reg2, err2 := qs.obsrep.TelemetryBuilder.InitExporterQueueCapacity(func() int64 { return qs.queue.Capacity() }, + metric.WithAttributeSet(attribute.NewSet(qs.traceAttribute))) + + if reg2 != nil { + qs.shutdownFns = append(qs.shutdownFns, func(context.Context) error { + return reg2.Unregister() + }) + } + + return errors.Join(err1, err2) +} + +// Shutdown is invoked during service shutdown. +func (qs *QueueSender) Shutdown(ctx context.Context) error { + // Stop the queue and consumers, this will drain the queue and will call the retry (which is stopped) that will only + // try once every request. + + for _, fn := range qs.shutdownFns { + err := fn(ctx) + if err != nil { + qs.logger.Warn("Error while shutting down QueueSender", zap.Error(err)) + } + } + qs.shutdownFns = nil + + if err := qs.queue.Shutdown(ctx); err != nil { + return err + } + if usePullingBasedExporterQueueBatcher.IsEnabled() { + return qs.batcher.Shutdown(ctx) + } + return qs.consumers.Shutdown(ctx) +} + +// send implements the requestSender interface. It puts the request in the queue. +func (qs *QueueSender) Send(ctx context.Context, req internal.Request) error { + // Prevent cancellation and deadline to propagate to the context stored in the queue. + // The grpc/http based receivers will cancel the request context after this function returns. + c := context.WithoutCancel(ctx) + + span := trace.SpanFromContext(c) + if err := qs.queue.Offer(c, req); err != nil { + span.AddEvent("Failed to enqueue item.", trace.WithAttributes(qs.traceAttribute)) + return err + } + + span.AddEvent("Enqueued item.", trace.WithAttributes(qs.traceAttribute)) + return nil +} + +type MockHost struct { + component.Host + Ext map[component.ID]component.Component +} + +func (nh *MockHost) GetExtensions() map[component.ID]component.Component { + return nh.Ext +} diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/request.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/request.go new file mode 100644 index 00000000000..b47ff63f0d5 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/request.go @@ -0,0 +1,32 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/exporter/exporterhelper/internal" + +import ( + "context" + + "go.opentelemetry.io/collector/exporter/internal" + "go.opentelemetry.io/collector/exporter/internal/requesttest" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" +) + +func RequestFromMetricsFunc(reqErr error) func(context.Context, pmetric.Metrics) (internal.Request, error) { + return func(_ context.Context, md pmetric.Metrics) (internal.Request, error) { + return &requesttest.FakeRequest{Items: md.DataPointCount(), ExportErr: reqErr}, nil + } +} + +func RequestFromTracesFunc(reqErr error) func(context.Context, ptrace.Traces) (internal.Request, error) { + return func(_ context.Context, td ptrace.Traces) (internal.Request, error) { + return &requesttest.FakeRequest{Items: td.SpanCount(), ExportErr: reqErr}, nil + } +} + +func RequestFromLogsFunc(reqErr error) func(context.Context, plog.Logs) (internal.Request, error) { + return func(_ context.Context, ld plog.Logs) (internal.Request, error) { + return &requesttest.FakeRequest{Items: ld.LogRecordCount(), ExportErr: reqErr}, nil + } +} diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/request_sender.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/request_sender.go new file mode 100644 index 00000000000..8ca75f66fb6 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/request_sender.go @@ -0,0 +1,33 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/exporter/exporterhelper/internal" + +import ( + "context" // Sender is an abstraction of a sender for a request independent of the type of the data (traces, metrics, logs). + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/exporter/internal" +) + +type Sender[K any] interface { + component.Component + Send(context.Context, K) error + SetNextSender(nextSender Sender[K]) +} + +type BaseSender[K any] struct { + component.StartFunc + component.ShutdownFunc + NextSender Sender[K] +} + +var _ Sender[internal.Request] = (*BaseSender[internal.Request])(nil) + +func (b *BaseSender[K]) Send(ctx context.Context, req K) error { + return b.NextSender.Send(ctx, req) +} + +func (b *BaseSender[K]) SetNextSender(nextSender Sender[K]) { + b.NextSender = nextSender +} diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/retry_sender.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/retry_sender.go new file mode 100644 index 00000000000..1af3256344c --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/retry_sender.go @@ -0,0 +1,142 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/exporter/exporterhelper/internal" + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/cenkalti/backoff/v4" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/config/configretry" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/internal" + "go.opentelemetry.io/collector/exporter/internal/experr" +) + +// TODO: Clean this by forcing all exporters to return an internal error type that always include the information about retries. +type throttleRetry struct { + err error + delay time.Duration +} + +func (t throttleRetry) Error() string { + return "Throttle (" + t.delay.String() + "), error: " + t.err.Error() +} + +func (t throttleRetry) Unwrap() error { + return t.err +} + +// NewThrottleRetry creates a new throttle retry error. +func NewThrottleRetry(err error, delay time.Duration) error { + return throttleRetry{ + err: err, + delay: delay, + } +} + +type retrySender struct { + BaseSender[internal.Request] + traceAttribute attribute.KeyValue + cfg configretry.BackOffConfig + stopCh chan struct{} + logger *zap.Logger +} + +func newRetrySender(config configretry.BackOffConfig, set exporter.Settings) *retrySender { + return &retrySender{ + traceAttribute: attribute.String(ExporterKey, set.ID.String()), + cfg: config, + stopCh: make(chan struct{}), + logger: set.Logger, + } +} + +func (rs *retrySender) Shutdown(context.Context) error { + close(rs.stopCh) + return nil +} + +// Send implements the requestSender interface +func (rs *retrySender) Send(ctx context.Context, req internal.Request) error { + // Do not use NewExponentialBackOff since it calls Reset and the code here must + // call Reset after changing the InitialInterval (this saves an unnecessary call to Now). + expBackoff := backoff.ExponentialBackOff{ + InitialInterval: rs.cfg.InitialInterval, + RandomizationFactor: rs.cfg.RandomizationFactor, + Multiplier: rs.cfg.Multiplier, + MaxInterval: rs.cfg.MaxInterval, + MaxElapsedTime: rs.cfg.MaxElapsedTime, + Stop: backoff.Stop, + Clock: backoff.SystemClock, + } + expBackoff.Reset() + span := trace.SpanFromContext(ctx) + retryNum := int64(0) + for { + span.AddEvent( + "Sending request.", + trace.WithAttributes(rs.traceAttribute, attribute.Int64("retry_num", retryNum))) + + err := rs.NextSender.Send(ctx, req) + if err == nil { + return nil + } + + // Immediately drop data on permanent errors. + if consumererror.IsPermanent(err) { + return fmt.Errorf("not retryable error: %w", err) + } + + if errReq, ok := req.(internal.RequestErrorHandler); ok { + req = errReq.OnError(err) + } + + backoffDelay := expBackoff.NextBackOff() + if backoffDelay == backoff.Stop { + return fmt.Errorf("no more retries left: %w", err) + } + + throttleErr := throttleRetry{} + if errors.As(err, &throttleErr) { + backoffDelay = max(backoffDelay, throttleErr.delay) + } + + if deadline, has := ctx.Deadline(); has && time.Until(deadline) < backoffDelay { + // The delay is longer than the deadline. There is no point in + // waiting for cancelation. + return fmt.Errorf("request will be cancelled before next retry: %w", err) + } + + backoffDelayStr := backoffDelay.String() + span.AddEvent( + "Exporting failed. Will retry the request after interval.", + trace.WithAttributes( + rs.traceAttribute, + attribute.String("interval", backoffDelayStr), + attribute.String("error", err.Error()))) + rs.logger.Info( + "Exporting failed. Will retry the request after interval.", + zap.Error(err), + zap.String("interval", backoffDelayStr), + ) + retryNum++ + + // back-off, but get interrupted when shutting down or request is cancelled or timed out. + select { + case <-ctx.Done(): + return fmt.Errorf("request is cancelled or timed out %w", err) + case <-rs.stopCh: + return experr.NewShutdownErr(err) + case <-time.After(backoffDelay): + } + } +} diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/test_util.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/test_util.go new file mode 100644 index 00000000000..3c0f8faa166 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/test_util.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/exporter/exporterhelper/internal" + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/featuregate" +) + +func setFeatureGateForTest(tb testing.TB, gate *featuregate.Gate, enabled bool) func() { + originalValue := gate.IsEnabled() + require.NoError(tb, featuregate.GlobalRegistry().Set(gate.ID(), enabled)) + return func() { + require.NoError(tb, featuregate.GlobalRegistry().Set(gate.ID(), originalValue)) + } +} diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/timeout_sender.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/timeout_sender.go new file mode 100644 index 00000000000..a47ddccfb8c --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/timeout_sender.go @@ -0,0 +1,52 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/exporter/exporterhelper/internal" + +import ( + "context" + "errors" + "time" + + "go.opentelemetry.io/collector/exporter/internal" +) + +// TimeoutConfig for timeout. The timeout applies to individual attempts to send data to the backend. +type TimeoutConfig struct { + // Timeout is the timeout for every attempt to send data to the backend. + // A zero timeout means no timeout. + Timeout time.Duration `mapstructure:"timeout"` +} + +func (ts *TimeoutConfig) Validate() error { + // Negative timeouts are not acceptable, since all sends will fail. + if ts.Timeout < 0 { + return errors.New("'timeout' must be non-negative") + } + return nil +} + +// NewDefaultTimeoutConfig returns the default config for TimeoutConfig. +func NewDefaultTimeoutConfig() TimeoutConfig { + return TimeoutConfig{ + Timeout: 5 * time.Second, + } +} + +// TimeoutSender is a requestSender that adds a `timeout` to every request that passes this sender. +type TimeoutSender struct { + BaseSender[internal.Request] + cfg TimeoutConfig +} + +func (ts *TimeoutSender) Send(ctx context.Context, req internal.Request) error { + // TODO: Remove this by avoiding to create the timeout sender if timeout is 0. + if ts.cfg.Timeout == 0 { + return req.Export(ctx) + } + // Intentionally don't overwrite the context inside the request, because in case of retries deadline will not be + // updated because this deadline most likely is before the next one. + tCtx, cancelFunc := context.WithTimeout(ctx, ts.cfg.Timeout) + defer cancelFunc() + return req.Export(tCtx) +} diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/logs.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/logs.go index 2706ec7fe92..74a658b98fe 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/logs.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/logs.go @@ -13,13 +13,16 @@ import ( "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exporterhelper/internal" "go.opentelemetry.io/collector/exporter/exporterqueue" - "go.opentelemetry.io/collector/exporter/internal/queue" "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pipeline" ) -var logsMarshaler = &plog.ProtoMarshaler{} -var logsUnmarshaler = &plog.ProtoUnmarshaler{} +var ( + logsMarshaler = &plog.ProtoMarshaler{} + logsUnmarshaler = &plog.ProtoUnmarshaler{} +) type logsRequest struct { ld plog.Logs @@ -64,14 +67,14 @@ func (req *logsRequest) ItemsCount() int { } type logsExporter struct { - *baseExporter + *internal.BaseExporter consumer.Logs } -// NewLogsExporter creates an exporter.Logs that records observability metrics and wraps every request with a Span. -func NewLogsExporter( +// NewLogs creates an exporter.Logs that records observability metrics and wraps every request with a Span. +func NewLogs( ctx context.Context, - set exporter.CreateSettings, + set exporter.Settings, cfg component.Config, pusher consumer.ConsumeLogsFunc, options ...Option, @@ -83,10 +86,9 @@ func NewLogsExporter( return nil, errNilPushLogsData } logsOpts := []Option{ - withMarshaler(logsRequestMarshaler), withUnmarshaler(newLogsRequestUnmarshalerFunc(pusher)), - withBatchFuncs(mergeLogs, mergeSplitLogs), + internal.WithMarshaler(logsRequestMarshaler), internal.WithUnmarshaler(newLogsRequestUnmarshalerFunc(pusher)), } - return NewLogsRequestExporter(ctx, set, requestFromLogs(pusher), append(logsOpts, options...)...) + return NewLogsRequest(ctx, set, requestFromLogs(pusher), append(logsOpts, options...)...) } // RequestFromLogsFunc converts plog.Logs data into a user-defined request. @@ -101,12 +103,12 @@ func requestFromLogs(pusher consumer.ConsumeLogsFunc) RequestFromLogsFunc { } } -// NewLogsRequestExporter creates new logs exporter based on custom LogsConverter and RequestSender. +// NewLogsRequest creates new logs exporter based on custom LogsConverter and Sender. // Experimental: This API is at the early stage of development and may change without backward compatibility // until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. -func NewLogsRequestExporter( +func NewLogsRequest( _ context.Context, - set exporter.CreateSettings, + set exporter.Settings, converter RequestFromLogsFunc, options ...Option, ) (exporter.Logs, error) { @@ -118,7 +120,7 @@ func NewLogsRequestExporter( return nil, errNilLogsConverter } - be, err := newBaseExporter(set, component.DataTypeLogs, newLogsExporterWithObservability, options...) + be, err := internal.NewBaseExporter(set, pipeline.SignalLogs, newLogsWithObservability, options...) if err != nil { return nil, err } @@ -131,31 +133,32 @@ func NewLogsRequestExporter( zap.Error(err)) return consumererror.NewPermanent(cErr) } - sErr := be.send(ctx, req) - if errors.Is(sErr, queue.ErrQueueIsFull) { - be.obsrep.recordEnqueueFailure(ctx, component.DataTypeLogs, int64(req.ItemsCount())) + sErr := be.Send(ctx, req) + if errors.Is(sErr, exporterqueue.ErrQueueIsFull) { + be.Obsrep.RecordEnqueueFailure(ctx, pipeline.SignalLogs, int64(req.ItemsCount())) } return sErr - }, be.consumerOptions...) + }, be.ConsumerOptions...) return &logsExporter{ - baseExporter: be, + BaseExporter: be, Logs: lc, }, err } type logsExporterWithObservability struct { - baseRequestSender - obsrep *ObsReport + internal.BaseSender[Request] + obsrep *internal.ObsReport } -func newLogsExporterWithObservability(obsrep *ObsReport) requestSender { +func newLogsWithObservability(obsrep *internal.ObsReport) internal.Sender[Request] { return &logsExporterWithObservability{obsrep: obsrep} } -func (lewo *logsExporterWithObservability) send(ctx context.Context, req Request) error { +func (lewo *logsExporterWithObservability) Send(ctx context.Context, req Request) error { c := lewo.obsrep.StartLogsOp(ctx) - err := lewo.nextSender.send(c, req) - lewo.obsrep.EndLogsOp(c, req.ItemsCount(), err) + numLogRecords := req.ItemsCount() + err := lewo.NextSender.Send(c, req) + lewo.obsrep.EndLogsOp(c, numLogRecords, err) return err } diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/logs_batch.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/logs_batch.go index 296538bc0e0..4e4609b18ca 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/logs_batch.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/logs_batch.go @@ -11,39 +11,41 @@ import ( "go.opentelemetry.io/collector/pdata/plog" ) -// mergeLogs merges two logs requests into one. -func mergeLogs(_ context.Context, r1 Request, r2 Request) (Request, error) { - lr1, ok1 := r1.(*logsRequest) - lr2, ok2 := r2.(*logsRequest) - if !ok1 || !ok2 { - return nil, errors.New("invalid input type") +// MergeSplit splits and/or merges the provided logs request and the current request into one or more requests +// conforming with the MaxSizeConfig. +func (req *logsRequest) MergeSplit(_ context.Context, cfg exporterbatcher.MaxSizeConfig, r2 Request) ([]Request, error) { + var req2 *logsRequest + if r2 != nil { + var ok bool + req2, ok = r2.(*logsRequest) + if !ok { + return nil, errors.New("invalid input type") + } + } + + if cfg.MaxSizeItems == 0 { + req2.ld.ResourceLogs().MoveAndAppendTo(req.ld.ResourceLogs()) + return []Request{req}, nil } - lr2.ld.ResourceLogs().MoveAndAppendTo(lr1.ld.ResourceLogs()) - return lr1, nil -} -// mergeSplitLogs splits and/or merges the logs into multiple requests based on the MaxSizeConfig. -func mergeSplitLogs(_ context.Context, cfg exporterbatcher.MaxSizeConfig, r1 Request, r2 Request) ([]Request, error) { var ( res []Request destReq *logsRequest capacityLeft = cfg.MaxSizeItems ) - for _, req := range []Request{r1, r2} { - if req == nil { + for _, srcReq := range []*logsRequest{req, req2} { + if srcReq == nil { continue } - srcReq, ok := req.(*logsRequest) - if !ok { - return nil, errors.New("invalid input type") - } - if srcReq.ld.LogRecordCount() <= capacityLeft { + + srcCount := srcReq.ld.LogRecordCount() + if srcCount <= capacityLeft { if destReq == nil { destReq = srcReq } else { srcReq.ld.ResourceLogs().MoveAndAppendTo(destReq.ld.ResourceLogs()) } - capacityLeft -= destReq.ld.LogRecordCount() + capacityLeft -= srcCount continue } diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/metadata.yaml b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/metadata.yaml index 26b06a37536..9ce34e0306a 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/metadata.yaml +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/metadata.yaml @@ -1,82 +1,122 @@ type: exporterhelper +github_project: open-telemetry/opentelemetry-collector status: class: exporter not_component: true stability: beta: [traces, metrics, logs] - distributions: [core, contrib] telemetry: metrics: exporter_sent_spans: enabled: true + stability: + level: alpha description: Number of spans successfully sent to destination. - unit: 1 + unit: "{spans}" sum: value_type: int monotonic: true exporter_send_failed_spans: enabled: true + stability: + level: alpha description: Number of spans in failed attempts to send to destination. - unit: 1 + unit: "{spans}" sum: value_type: int monotonic: true exporter_enqueue_failed_spans: enabled: true + stability: + level: alpha description: Number of spans failed to be added to the sending queue. - unit: 1 + unit: "{spans}" sum: value_type: int monotonic: true exporter_sent_metric_points: enabled: true + stability: + level: alpha description: Number of metric points successfully sent to destination. - unit: 1 + unit: "{datapoints}" sum: value_type: int monotonic: true exporter_send_failed_metric_points: enabled: true + stability: + level: alpha description: Number of metric points in failed attempts to send to destination. - unit: 1 + unit: "{datapoints}" sum: value_type: int monotonic: true exporter_enqueue_failed_metric_points: enabled: true + stability: + level: alpha description: Number of metric points failed to be added to the sending queue. - unit: 1 + unit: "{datapoints}" sum: value_type: int monotonic: true exporter_sent_log_records: enabled: true + stability: + level: alpha description: Number of log record successfully sent to destination. - unit: 1 + unit: "{records}" sum: value_type: int monotonic: true exporter_send_failed_log_records: enabled: true + stability: + level: alpha description: Number of log records in failed attempts to send to destination. - unit: 1 + unit: "{records}" sum: value_type: int monotonic: true exporter_enqueue_failed_log_records: enabled: true + stability: + level: alpha description: Number of log records failed to be added to the sending queue. - unit: 1 + unit: "{records}" sum: value_type: int monotonic: true + + exporter_queue_size: + enabled: true + stability: + level: alpha + description: Current size of the retry queue (in batches) + unit: "{batches}" + optional: true + gauge: + value_type: int + async: true + + exporter_queue_capacity: + enabled: true + stability: + level: alpha + description: Fixed capacity of the retry queue (in batches) + unit: "{batches}" + optional: true + gauge: + value_type: int + async: true diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/metrics.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/metrics.go index 83db18229dd..e3d4ccbd2ae 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/metrics.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/metrics.go @@ -13,13 +13,16 @@ import ( "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exporterhelper/internal" "go.opentelemetry.io/collector/exporter/exporterqueue" - "go.opentelemetry.io/collector/exporter/internal/queue" "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pipeline" ) -var metricsMarshaler = &pmetric.ProtoMarshaler{} -var metricsUnmarshaler = &pmetric.ProtoUnmarshaler{} +var ( + metricsMarshaler = &pmetric.ProtoMarshaler{} + metricsUnmarshaler = &pmetric.ProtoUnmarshaler{} +) type metricsRequest struct { md pmetric.Metrics @@ -64,14 +67,14 @@ func (req *metricsRequest) ItemsCount() int { } type metricsExporter struct { - *baseExporter + *internal.BaseExporter consumer.Metrics } -// NewMetricsExporter creates an exporter.Metrics that records observability metrics and wraps every request with a Span. -func NewMetricsExporter( +// NewMetrics creates an exporter.Metrics that records observability metrics and wraps every request with a Span. +func NewMetrics( ctx context.Context, - set exporter.CreateSettings, + set exporter.Settings, cfg component.Config, pusher consumer.ConsumeMetricsFunc, options ...Option, @@ -83,10 +86,9 @@ func NewMetricsExporter( return nil, errNilPushMetricsData } metricsOpts := []Option{ - withMarshaler(metricsRequestMarshaler), withUnmarshaler(newMetricsRequestUnmarshalerFunc(pusher)), - withBatchFuncs(mergeMetrics, mergeSplitMetrics), + internal.WithMarshaler(metricsRequestMarshaler), internal.WithUnmarshaler(newMetricsRequestUnmarshalerFunc(pusher)), } - return NewMetricsRequestExporter(ctx, set, requestFromMetrics(pusher), append(metricsOpts, options...)...) + return NewMetricsRequest(ctx, set, requestFromMetrics(pusher), append(metricsOpts, options...)...) } // RequestFromMetricsFunc converts pdata.Metrics into a user-defined request. @@ -101,12 +103,12 @@ func requestFromMetrics(pusher consumer.ConsumeMetricsFunc) RequestFromMetricsFu } } -// NewMetricsRequestExporter creates a new metrics exporter based on a custom MetricsConverter and RequestSender. +// NewMetricsRequest creates a new metrics exporter based on a custom MetricsConverter and Sender. // Experimental: This API is at the early stage of development and may change without backward compatibility // until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. -func NewMetricsRequestExporter( +func NewMetricsRequest( _ context.Context, - set exporter.CreateSettings, + set exporter.Settings, converter RequestFromMetricsFunc, options ...Option, ) (exporter.Metrics, error) { @@ -118,7 +120,7 @@ func NewMetricsRequestExporter( return nil, errNilMetricsConverter } - be, err := newBaseExporter(set, component.DataTypeMetrics, newMetricsSenderWithObservability, options...) + be, err := internal.NewBaseExporter(set, pipeline.SignalMetrics, newMetricsSenderWithObservability, options...) if err != nil { return nil, err } @@ -131,31 +133,32 @@ func NewMetricsRequestExporter( zap.Error(err)) return consumererror.NewPermanent(cErr) } - sErr := be.send(ctx, req) - if errors.Is(sErr, queue.ErrQueueIsFull) { - be.obsrep.recordEnqueueFailure(ctx, component.DataTypeMetrics, int64(req.ItemsCount())) + sErr := be.Send(ctx, req) + if errors.Is(sErr, exporterqueue.ErrQueueIsFull) { + be.Obsrep.RecordEnqueueFailure(ctx, pipeline.SignalMetrics, int64(req.ItemsCount())) } return sErr - }, be.consumerOptions...) + }, be.ConsumerOptions...) return &metricsExporter{ - baseExporter: be, + BaseExporter: be, Metrics: mc, }, err } type metricsSenderWithObservability struct { - baseRequestSender - obsrep *ObsReport + internal.BaseSender[Request] + obsrep *internal.ObsReport } -func newMetricsSenderWithObservability(obsrep *ObsReport) requestSender { +func newMetricsSenderWithObservability(obsrep *internal.ObsReport) internal.Sender[Request] { return &metricsSenderWithObservability{obsrep: obsrep} } -func (mewo *metricsSenderWithObservability) send(ctx context.Context, req Request) error { +func (mewo *metricsSenderWithObservability) Send(ctx context.Context, req Request) error { c := mewo.obsrep.StartMetricsOp(ctx) - err := mewo.nextSender.send(c, req) - mewo.obsrep.EndMetricsOp(c, req.ItemsCount(), err) + numMetricDataPoints := req.ItemsCount() + err := mewo.NextSender.Send(c, req) + mewo.obsrep.EndMetricsOp(c, numMetricDataPoints, err) return err } diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/metrics_batch.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/metrics_batch.go index 1a6448c8496..3ec240d40a6 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/metrics_batch.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/metrics_batch.go @@ -11,39 +11,41 @@ import ( "go.opentelemetry.io/collector/pdata/pmetric" ) -// mergeMetrics merges two metrics requests into one. -func mergeMetrics(_ context.Context, r1 Request, r2 Request) (Request, error) { - mr1, ok1 := r1.(*metricsRequest) - mr2, ok2 := r2.(*metricsRequest) - if !ok1 || !ok2 { - return nil, errors.New("invalid input type") +// MergeSplit splits and/or merges the provided metrics request and the current request into one or more requests +// conforming with the MaxSizeConfig. +func (req *metricsRequest) MergeSplit(_ context.Context, cfg exporterbatcher.MaxSizeConfig, r2 Request) ([]Request, error) { + var req2 *metricsRequest + if r2 != nil { + var ok bool + req2, ok = r2.(*metricsRequest) + if !ok { + return nil, errors.New("invalid input type") + } + } + + if cfg.MaxSizeItems == 0 { + req2.md.ResourceMetrics().MoveAndAppendTo(req.md.ResourceMetrics()) + return []Request{req}, nil } - mr2.md.ResourceMetrics().MoveAndAppendTo(mr1.md.ResourceMetrics()) - return mr1, nil -} -// mergeSplitMetrics splits and/or merges the metrics into multiple requests based on the MaxSizeConfig. -func mergeSplitMetrics(_ context.Context, cfg exporterbatcher.MaxSizeConfig, r1 Request, r2 Request) ([]Request, error) { var ( res []Request destReq *metricsRequest capacityLeft = cfg.MaxSizeItems ) - for _, req := range []Request{r1, r2} { - if req == nil { + for _, srcReq := range []*metricsRequest{req, req2} { + if srcReq == nil { continue } - srcReq, ok := req.(*metricsRequest) - if !ok { - return nil, errors.New("invalid input type") - } - if srcReq.md.DataPointCount() <= capacityLeft { + + srcCount := srcReq.md.DataPointCount() + if srcCount <= capacityLeft { if destReq == nil { destReq = srcReq } else { srcReq.md.ResourceMetrics().MoveAndAppendTo(destReq.md.ResourceMetrics()) } - capacityLeft -= destReq.md.DataPointCount() + capacityLeft -= srcCount continue } diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/obsexporter.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/obsexporter.go deleted file mode 100644 index e3a78c34b04..00000000000 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/obsexporter.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package exporterhelper // import "go.opentelemetry.io/collector/exporter/exporterhelper" - -import ( - "context" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" - - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/config/configtelemetry" - "go.opentelemetry.io/collector/exporter" - "go.opentelemetry.io/collector/exporter/exporterhelper/internal/metadata" - "go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics" -) - -// ObsReport is a helper to add observability to an exporter. -type ObsReport struct { - level configtelemetry.Level - spanNamePrefix string - tracer trace.Tracer - logger *zap.Logger - - otelAttrs []attribute.KeyValue - telemetryBuilder *metadata.TelemetryBuilder -} - -// ObsReportSettings are settings for creating an ObsReport. -type ObsReportSettings struct { - ExporterID component.ID - ExporterCreateSettings exporter.CreateSettings -} - -// NewObsReport creates a new Exporter. -func NewObsReport(cfg ObsReportSettings) (*ObsReport, error) { - return newExporter(cfg) -} - -func newExporter(cfg ObsReportSettings) (*ObsReport, error) { - telemetryBuilder, err := metadata.NewTelemetryBuilder(cfg.ExporterCreateSettings.TelemetrySettings) - if err != nil { - return nil, err - } - - return &ObsReport{ - level: cfg.ExporterCreateSettings.TelemetrySettings.MetricsLevel, - spanNamePrefix: obsmetrics.ExporterPrefix + cfg.ExporterID.String(), - tracer: cfg.ExporterCreateSettings.TracerProvider.Tracer(cfg.ExporterID.String()), - logger: cfg.ExporterCreateSettings.Logger, - - otelAttrs: []attribute.KeyValue{ - attribute.String(obsmetrics.ExporterKey, cfg.ExporterID.String()), - }, - telemetryBuilder: telemetryBuilder, - }, nil -} - -// StartTracesOp is called at the start of an Export operation. -// The returned context should be used in other calls to the Exporter functions -// dealing with the same export operation. -func (or *ObsReport) StartTracesOp(ctx context.Context) context.Context { - return or.startOp(ctx, obsmetrics.ExportTraceDataOperationSuffix) -} - -// EndTracesOp completes the export operation that was started with StartTracesOp. -func (or *ObsReport) EndTracesOp(ctx context.Context, numSpans int, err error) { - numSent, numFailedToSend := toNumItems(numSpans, err) - or.recordMetrics(noCancellationContext{Context: ctx}, component.DataTypeTraces, numSent, numFailedToSend) - endSpan(ctx, err, numSent, numFailedToSend, obsmetrics.SentSpansKey, obsmetrics.FailedToSendSpansKey) -} - -// StartMetricsOp is called at the start of an Export operation. -// The returned context should be used in other calls to the Exporter functions -// dealing with the same export operation. -func (or *ObsReport) StartMetricsOp(ctx context.Context) context.Context { - return or.startOp(ctx, obsmetrics.ExportMetricsOperationSuffix) -} - -// EndMetricsOp completes the export operation that was started with -// StartMetricsOp. -func (or *ObsReport) EndMetricsOp(ctx context.Context, numMetricPoints int, err error) { - numSent, numFailedToSend := toNumItems(numMetricPoints, err) - or.recordMetrics(noCancellationContext{Context: ctx}, component.DataTypeMetrics, numSent, numFailedToSend) - endSpan(ctx, err, numSent, numFailedToSend, obsmetrics.SentMetricPointsKey, obsmetrics.FailedToSendMetricPointsKey) -} - -// StartLogsOp is called at the start of an Export operation. -// The returned context should be used in other calls to the Exporter functions -// dealing with the same export operation. -func (or *ObsReport) StartLogsOp(ctx context.Context) context.Context { - return or.startOp(ctx, obsmetrics.ExportLogsOperationSuffix) -} - -// EndLogsOp completes the export operation that was started with StartLogsOp. -func (or *ObsReport) EndLogsOp(ctx context.Context, numLogRecords int, err error) { - numSent, numFailedToSend := toNumItems(numLogRecords, err) - or.recordMetrics(noCancellationContext{Context: ctx}, component.DataTypeLogs, numSent, numFailedToSend) - endSpan(ctx, err, numSent, numFailedToSend, obsmetrics.SentLogRecordsKey, obsmetrics.FailedToSendLogRecordsKey) -} - -// startOp creates the span used to trace the operation. Returning -// the updated context and the created span. -func (or *ObsReport) startOp(ctx context.Context, operationSuffix string) context.Context { - spanName := or.spanNamePrefix + operationSuffix - ctx, _ = or.tracer.Start(ctx, spanName) - return ctx -} - -func (or *ObsReport) recordMetrics(ctx context.Context, dataType component.DataType, sent, failed int64) { - if or.level == configtelemetry.LevelNone { - return - } - var sentMeasure, failedMeasure metric.Int64Counter - switch dataType { - case component.DataTypeTraces: - sentMeasure = or.telemetryBuilder.ExporterSentSpans - failedMeasure = or.telemetryBuilder.ExporterSendFailedSpans - case component.DataTypeMetrics: - sentMeasure = or.telemetryBuilder.ExporterSentMetricPoints - failedMeasure = or.telemetryBuilder.ExporterSendFailedMetricPoints - case component.DataTypeLogs: - sentMeasure = or.telemetryBuilder.ExporterSentLogRecords - failedMeasure = or.telemetryBuilder.ExporterSendFailedLogRecords - } - - sentMeasure.Add(ctx, sent, metric.WithAttributes(or.otelAttrs...)) - failedMeasure.Add(ctx, failed, metric.WithAttributes(or.otelAttrs...)) -} - -func endSpan(ctx context.Context, err error, numSent, numFailedToSend int64, sentItemsKey, failedToSendItemsKey string) { - span := trace.SpanFromContext(ctx) - // End the span according to errors. - if span.IsRecording() { - span.SetAttributes( - attribute.Int64(sentItemsKey, numSent), - attribute.Int64(failedToSendItemsKey, numFailedToSend), - ) - if err != nil { - span.SetStatus(codes.Error, err.Error()) - } - } - span.End() -} - -func toNumItems(numExportedItems int, err error) (int64, int64) { - if err != nil { - return 0, int64(numExportedItems) - } - return int64(numExportedItems), 0 -} - -func (or *ObsReport) recordEnqueueFailure(ctx context.Context, dataType component.DataType, failed int64) { - var enqueueFailedMeasure metric.Int64Counter - switch dataType { - case component.DataTypeTraces: - enqueueFailedMeasure = or.telemetryBuilder.ExporterEnqueueFailedSpans - case component.DataTypeMetrics: - enqueueFailedMeasure = or.telemetryBuilder.ExporterEnqueueFailedMetricPoints - case component.DataTypeLogs: - enqueueFailedMeasure = or.telemetryBuilder.ExporterEnqueueFailedLogRecords - } - - enqueueFailedMeasure.Add(ctx, failed, metric.WithAttributes(or.otelAttrs...)) -} diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/queue_sender.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/queue_sender.go index 3e539d7f9a0..f09932c5b86 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/queue_sender.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/queue_sender.go @@ -3,179 +3,12 @@ package exporterhelper // import "go.opentelemetry.io/collector/exporter/exporterhelper" -import ( - "context" - "errors" - "time" +import "go.opentelemetry.io/collector/exporter/exporterhelper/internal" - "go.opentelemetry.io/otel/attribute" - otelmetric "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/trace" - "go.uber.org/multierr" - "go.uber.org/zap" +// QueueConfig defines configuration for queueing batches before sending to the consumerSender. +type QueueConfig = internal.QueueConfig - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/exporter" - "go.opentelemetry.io/collector/exporter/exporterqueue" - "go.opentelemetry.io/collector/exporter/internal/queue" - "go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics" -) - -const defaultQueueSize = 1000 - -var ( - scopeName = "go.opentelemetry.io/collector/exporterhelper" -) - -// QueueSettings defines configuration for queueing batches before sending to the consumerSender. -type QueueSettings struct { - // Enabled indicates whether to not enqueue batches before sending to the consumerSender. - Enabled bool `mapstructure:"enabled"` - // NumConsumers is the number of consumers from the queue. Defaults to 10. - // If batching is enabled, a combined batch cannot contain more requests than the number of consumers. - // So it's recommended to set higher number of consumers if batching is enabled. - NumConsumers int `mapstructure:"num_consumers"` - // QueueSize is the maximum number of batches allowed in queue at a given time. - QueueSize int `mapstructure:"queue_size"` - // StorageID if not empty, enables the persistent storage and uses the component specified - // as a storage extension for the persistent queue - StorageID *component.ID `mapstructure:"storage"` -} - -// NewDefaultQueueSettings returns the default settings for QueueSettings. -func NewDefaultQueueSettings() QueueSettings { - return QueueSettings{ - Enabled: true, - NumConsumers: 10, - // By default, batches are 8192 spans, for a total of up to 8 million spans in the queue - // This can be estimated at 1-4 GB worth of maximum memory usage - // This default is probably still too high, and may be adjusted further down in a future release - QueueSize: defaultQueueSize, - } -} - -// Validate checks if the QueueSettings configuration is valid -func (qCfg *QueueSettings) Validate() error { - if !qCfg.Enabled { - return nil - } - - if qCfg.QueueSize <= 0 { - return errors.New("queue size must be positive") - } - - if qCfg.NumConsumers <= 0 { - return errors.New("number of queue consumers must be positive") - } - - return nil -} - -type queueSender struct { - baseRequestSender - fullName string - queue exporterqueue.Queue[Request] - numConsumers int - traceAttribute attribute.KeyValue - logger *zap.Logger - meter otelmetric.Meter - consumers *queue.Consumers[Request] - - metricCapacity otelmetric.Int64ObservableGauge - metricSize otelmetric.Int64ObservableGauge -} - -func newQueueSender(q exporterqueue.Queue[Request], set exporter.CreateSettings, numConsumers int, - exportFailureMessage string) *queueSender { - qs := &queueSender{ - fullName: set.ID.String(), - queue: q, - numConsumers: numConsumers, - traceAttribute: attribute.String(obsmetrics.ExporterKey, set.ID.String()), - logger: set.TelemetrySettings.Logger, - meter: set.TelemetrySettings.MeterProvider.Meter(scopeName), - } - consumeFunc := func(ctx context.Context, req Request) error { - err := qs.nextSender.send(ctx, req) - if err != nil { - set.Logger.Error("Exporting failed. Dropping data."+exportFailureMessage, - zap.Error(err), zap.Int("dropped_items", req.ItemsCount())) - } - return err - } - qs.consumers = queue.NewQueueConsumers[Request](q, numConsumers, consumeFunc) - return qs -} - -// Start is invoked during service startup. -func (qs *queueSender) Start(ctx context.Context, host component.Host) error { - if err := qs.consumers.Start(ctx, host); err != nil { - return err - } - - var err, errs error - - attrs := otelmetric.WithAttributeSet(attribute.NewSet(attribute.String(obsmetrics.ExporterKey, qs.fullName))) - - qs.metricSize, err = qs.meter.Int64ObservableGauge( - obsmetrics.ExporterKey+"/queue_size", - otelmetric.WithDescription("Current size of the retry queue (in batches)"), - otelmetric.WithUnit("1"), - otelmetric.WithInt64Callback(func(_ context.Context, o otelmetric.Int64Observer) error { - o.Observe(int64(qs.queue.Size()), attrs) - return nil - }), - ) - errs = multierr.Append(errs, err) - - qs.metricCapacity, err = qs.meter.Int64ObservableGauge( - obsmetrics.ExporterKey+"/queue_capacity", - otelmetric.WithDescription("Fixed capacity of the retry queue (in batches)"), - otelmetric.WithUnit("1"), - otelmetric.WithInt64Callback(func(_ context.Context, o otelmetric.Int64Observer) error { - o.Observe(int64(qs.queue.Capacity()), attrs) - return nil - })) - - errs = multierr.Append(errs, err) - return errs -} - -// Shutdown is invoked during service shutdown. -func (qs *queueSender) Shutdown(ctx context.Context) error { - // Stop the queue and consumers, this will drain the queue and will call the retry (which is stopped) that will only - // try once every request. - return qs.consumers.Shutdown(ctx) -} - -// send implements the requestSender interface. It puts the request in the queue. -func (qs *queueSender) send(ctx context.Context, req Request) error { - // Prevent cancellation and deadline to propagate to the context stored in the queue. - // The grpc/http based receivers will cancel the request context after this function returns. - c := noCancellationContext{Context: ctx} - - span := trace.SpanFromContext(c) - if err := qs.queue.Offer(c, req); err != nil { - span.AddEvent("Failed to enqueue item.", trace.WithAttributes(qs.traceAttribute)) - return err - } - - span.AddEvent("Enqueued item.", trace.WithAttributes(qs.traceAttribute)) - return nil -} - -type noCancellationContext struct { - context.Context -} - -func (noCancellationContext) Deadline() (deadline time.Time, ok bool) { - return -} - -func (noCancellationContext) Done() <-chan struct{} { - return nil -} - -func (noCancellationContext) Err() error { - return nil +// NewDefaultQueueConfig returns the default config for QueueConfig. +func NewDefaultQueueConfig() QueueConfig { + return internal.NewDefaultQueueConfig() } diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/request.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/request.go deleted file mode 100644 index 74f0186541f..00000000000 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/request.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package exporterhelper // import "go.opentelemetry.io/collector/exporter/exporterhelper" - -import ( - "context" -) - -// Request represents a single request that can be sent to an external endpoint. -// Experimental: This API is at the early stage of development and may change without backward compatibility -// until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. -type Request interface { - // Export exports the request to an external endpoint. - Export(ctx context.Context) error - // ItemsCount returns a number of basic items in the request where item is the smallest piece of data that can be - // sent. For example, for OTLP exporter, this value represents the number of spans, - // metric data points or log records. - ItemsCount() int -} - -// RequestErrorHandler is an optional interface that can be implemented by Request to provide a way handle partial -// temporary failures. For example, if some items failed to process and can be retried, this interface allows to -// return a new Request that contains the items left to be sent. Otherwise, the original Request should be returned. -// If not implemented, the original Request will be returned assuming the error is applied to the whole Request. -// Experimental: This API is at the early stage of development and may change without backward compatibility -// until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. -type RequestErrorHandler interface { - Request - // OnError returns a new Request may contain the items left to be sent if some items failed to process and can be retried. - // Otherwise, it should return the original Request. - OnError(error) Request -} - -// extractPartialRequest returns a new Request that may contain the items left to be sent -// if only some items failed to process and can be retried. Otherwise, it returns the original Request. -func extractPartialRequest(req Request, err error) Request { - if errReq, ok := req.(RequestErrorHandler); ok { - return errReq.OnError(err) - } - return req -} diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/retry_sender.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/retry_sender.go index 6e8a36f9ef4..5b4476bb1f6 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/retry_sender.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/retry_sender.go @@ -4,139 +4,12 @@ package exporterhelper // import "go.opentelemetry.io/collector/exporter/exporterhelper" import ( - "context" - "errors" - "fmt" "time" - "github.com/cenkalti/backoff/v4" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" - - "go.opentelemetry.io/collector/config/configretry" - "go.opentelemetry.io/collector/consumer/consumererror" - "go.opentelemetry.io/collector/exporter" - "go.opentelemetry.io/collector/exporter/internal/experr" - "go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics" + "go.opentelemetry.io/collector/exporter/exporterhelper/internal" ) -// TODO: Clean this by forcing all exporters to return an internal error type that always include the information about retries. -type throttleRetry struct { - err error - delay time.Duration -} - -func (t throttleRetry) Error() string { - return "Throttle (" + t.delay.String() + "), error: " + t.err.Error() -} - -func (t throttleRetry) Unwrap() error { - return t.err -} - // NewThrottleRetry creates a new throttle retry error. func NewThrottleRetry(err error, delay time.Duration) error { - return throttleRetry{ - err: err, - delay: delay, - } -} - -type retrySender struct { - baseRequestSender - traceAttribute attribute.KeyValue - cfg configretry.BackOffConfig - stopCh chan struct{} - logger *zap.Logger -} - -func newRetrySender(config configretry.BackOffConfig, set exporter.CreateSettings) *retrySender { - return &retrySender{ - traceAttribute: attribute.String(obsmetrics.ExporterKey, set.ID.String()), - cfg: config, - stopCh: make(chan struct{}), - logger: set.Logger, - } -} - -func (rs *retrySender) Shutdown(context.Context) error { - close(rs.stopCh) - return nil -} - -// send implements the requestSender interface -func (rs *retrySender) send(ctx context.Context, req Request) error { - // Do not use NewExponentialBackOff since it calls Reset and the code here must - // call Reset after changing the InitialInterval (this saves an unnecessary call to Now). - expBackoff := backoff.ExponentialBackOff{ - InitialInterval: rs.cfg.InitialInterval, - RandomizationFactor: rs.cfg.RandomizationFactor, - Multiplier: rs.cfg.Multiplier, - MaxInterval: rs.cfg.MaxInterval, - MaxElapsedTime: rs.cfg.MaxElapsedTime, - Stop: backoff.Stop, - Clock: backoff.SystemClock, - } - expBackoff.Reset() - span := trace.SpanFromContext(ctx) - retryNum := int64(0) - for { - span.AddEvent( - "Sending request.", - trace.WithAttributes(rs.traceAttribute, attribute.Int64("retry_num", retryNum))) - - err := rs.nextSender.send(ctx, req) - if err == nil { - return nil - } - - // Immediately drop data on permanent errors. - if consumererror.IsPermanent(err) { - return fmt.Errorf("not retryable error: %w", err) - } - - req = extractPartialRequest(req, err) - - backoffDelay := expBackoff.NextBackOff() - if backoffDelay == backoff.Stop { - return fmt.Errorf("no more retries left: %w", err) - } - - throttleErr := throttleRetry{} - if errors.As(err, &throttleErr) { - backoffDelay = max(backoffDelay, throttleErr.delay) - } - - backoffDelayStr := backoffDelay.String() - span.AddEvent( - "Exporting failed. Will retry the request after interval.", - trace.WithAttributes( - rs.traceAttribute, - attribute.String("interval", backoffDelayStr), - attribute.String("error", err.Error()))) - rs.logger.Info( - "Exporting failed. Will retry the request after interval.", - zap.Error(err), - zap.String("interval", backoffDelayStr), - ) - retryNum++ - - // back-off, but get interrupted when shutting down or request is cancelled or timed out. - select { - case <-ctx.Done(): - return fmt.Errorf("request is cancelled or timed out %w", err) - case <-rs.stopCh: - return experr.NewShutdownErr(err) - case <-time.After(backoffDelay): - } - } -} - -// max returns the larger of x or y. -func max(x, y time.Duration) time.Duration { - if x < y { - return y - } - return x + return internal.NewThrottleRetry(err, delay) } diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/timeout_sender.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/timeout_sender.go index 2a6364a2aaf..090caf2d7d9 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/timeout_sender.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/timeout_sender.go @@ -4,47 +4,12 @@ package exporterhelper // import "go.opentelemetry.io/collector/exporter/exporterhelper" import ( - "context" - "errors" - "time" + "go.opentelemetry.io/collector/exporter/exporterhelper/internal" ) -// TimeoutSettings for timeout. The timeout applies to individual attempts to send data to the backend. -type TimeoutSettings struct { - // Timeout is the timeout for every attempt to send data to the backend. - // A zero timeout means no timeout. - Timeout time.Duration `mapstructure:"timeout"` -} - -func (ts *TimeoutSettings) Validate() error { - // Negative timeouts are not acceptable, since all sends will fail. - if ts.Timeout < 0 { - return errors.New("'timeout' must be non-negative") - } - return nil -} - -// NewDefaultTimeoutSettings returns the default settings for TimeoutSettings. -func NewDefaultTimeoutSettings() TimeoutSettings { - return TimeoutSettings{ - Timeout: 5 * time.Second, - } -} - -// timeoutSender is a requestSender that adds a `timeout` to every request that passes this sender. -type timeoutSender struct { - baseRequestSender - cfg TimeoutSettings -} +type TimeoutConfig = internal.TimeoutConfig -func (ts *timeoutSender) send(ctx context.Context, req Request) error { - // TODO: Remove this by avoiding to create the timeout sender if timeout is 0. - if ts.cfg.Timeout == 0 { - return req.Export(ctx) - } - // Intentionally don't overwrite the context inside the request, because in case of retries deadline will not be - // updated because this deadline most likely is before the next one. - tCtx, cancelFunc := context.WithTimeout(ctx, ts.cfg.Timeout) - defer cancelFunc() - return req.Export(tCtx) +// NewDefaultTimeoutConfig returns the default config for TimeoutConfig. +func NewDefaultTimeoutConfig() TimeoutConfig { + return internal.NewDefaultTimeoutConfig() } diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/traces.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/traces.go index 6f5f39f3de5..f8387d5a3b8 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/traces.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/traces.go @@ -13,13 +13,16 @@ import ( "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exporterhelper/internal" "go.opentelemetry.io/collector/exporter/exporterqueue" - "go.opentelemetry.io/collector/exporter/internal/queue" "go.opentelemetry.io/collector/pdata/ptrace" + "go.opentelemetry.io/collector/pipeline" ) -var tracesMarshaler = &ptrace.ProtoMarshaler{} -var tracesUnmarshaler = &ptrace.ProtoUnmarshaler{} +var ( + tracesMarshaler = &ptrace.ProtoMarshaler{} + tracesUnmarshaler = &ptrace.ProtoUnmarshaler{} +) type tracesRequest struct { td ptrace.Traces @@ -63,15 +66,15 @@ func (req *tracesRequest) ItemsCount() int { return req.td.SpanCount() } -type traceExporter struct { - *baseExporter +type tracesExporter struct { + *internal.BaseExporter consumer.Traces } -// NewTracesExporter creates an exporter.Traces that records observability metrics and wraps every request with a Span. -func NewTracesExporter( +// NewTraces creates an exporter.Traces that records observability metrics and wraps every request with a Span. +func NewTraces( ctx context.Context, - set exporter.CreateSettings, + set exporter.Settings, cfg component.Config, pusher consumer.ConsumeTracesFunc, options ...Option, @@ -83,10 +86,9 @@ func NewTracesExporter( return nil, errNilPushTraceData } tracesOpts := []Option{ - withMarshaler(tracesRequestMarshaler), withUnmarshaler(newTraceRequestUnmarshalerFunc(pusher)), - withBatchFuncs(mergeTraces, mergeSplitTraces), + internal.WithMarshaler(tracesRequestMarshaler), internal.WithUnmarshaler(newTraceRequestUnmarshalerFunc(pusher)), } - return NewTracesRequestExporter(ctx, set, requestFromTraces(pusher), append(tracesOpts, options...)...) + return NewTracesRequest(ctx, set, requestFromTraces(pusher), append(tracesOpts, options...)...) } // RequestFromTracesFunc converts ptrace.Traces into a user-defined Request. @@ -101,12 +103,12 @@ func requestFromTraces(pusher consumer.ConsumeTracesFunc) RequestFromTracesFunc } } -// NewTracesRequestExporter creates a new traces exporter based on a custom TracesConverter and RequestSender. +// NewTracesRequest creates a new traces exporter based on a custom TracesConverter and Sender. // Experimental: This API is at the early stage of development and may change without backward compatibility // until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. -func NewTracesRequestExporter( +func NewTracesRequest( _ context.Context, - set exporter.CreateSettings, + set exporter.Settings, converter RequestFromTracesFunc, options ...Option, ) (exporter.Traces, error) { @@ -118,7 +120,7 @@ func NewTracesRequestExporter( return nil, errNilTracesConverter } - be, err := newBaseExporter(set, component.DataTypeTraces, newTracesExporterWithObservability, options...) + be, err := internal.NewBaseExporter(set, pipeline.SignalTraces, newTracesWithObservability, options...) if err != nil { return nil, err } @@ -131,32 +133,33 @@ func NewTracesRequestExporter( zap.Error(err)) return consumererror.NewPermanent(cErr) } - sErr := be.send(ctx, req) - if errors.Is(sErr, queue.ErrQueueIsFull) { - be.obsrep.recordEnqueueFailure(ctx, component.DataTypeTraces, int64(req.ItemsCount())) + sErr := be.Send(ctx, req) + if errors.Is(sErr, exporterqueue.ErrQueueIsFull) { + be.Obsrep.RecordEnqueueFailure(ctx, pipeline.SignalTraces, int64(req.ItemsCount())) } return sErr - }, be.consumerOptions...) + }, be.ConsumerOptions...) - return &traceExporter{ - baseExporter: be, + return &tracesExporter{ + BaseExporter: be, Traces: tc, }, err } -type tracesExporterWithObservability struct { - baseRequestSender - obsrep *ObsReport +type tracesWithObservability struct { + internal.BaseSender[Request] + obsrep *internal.ObsReport } -func newTracesExporterWithObservability(obsrep *ObsReport) requestSender { - return &tracesExporterWithObservability{obsrep: obsrep} +func newTracesWithObservability(obsrep *internal.ObsReport) internal.Sender[Request] { + return &tracesWithObservability{obsrep: obsrep} } -func (tewo *tracesExporterWithObservability) send(ctx context.Context, req Request) error { +func (tewo *tracesWithObservability) Send(ctx context.Context, req Request) error { c := tewo.obsrep.StartTracesOp(ctx) + numTraceSpans := req.ItemsCount() // Forward the data to the next consumer (this pusher is the next). - err := tewo.nextSender.send(c, req) - tewo.obsrep.EndTracesOp(c, req.ItemsCount(), err) + err := tewo.NextSender.Send(c, req) + tewo.obsrep.EndTracesOp(c, numTraceSpans, err) return err } diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/traces_batch.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/traces_batch.go index 1bdada95b7b..07a3025d73a 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/traces_batch.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/traces_batch.go @@ -11,39 +11,41 @@ import ( "go.opentelemetry.io/collector/pdata/ptrace" ) -// mergeTraces merges two traces requests into one. -func mergeTraces(_ context.Context, r1 Request, r2 Request) (Request, error) { - tr1, ok1 := r1.(*tracesRequest) - tr2, ok2 := r2.(*tracesRequest) - if !ok1 || !ok2 { - return nil, errors.New("invalid input type") +// MergeSplit splits and/or merges the provided traces request and the current request into one or more requests +// conforming with the MaxSizeConfig. +func (req *tracesRequest) MergeSplit(_ context.Context, cfg exporterbatcher.MaxSizeConfig, r2 Request) ([]Request, error) { + var req2 *tracesRequest + if r2 != nil { + var ok bool + req2, ok = r2.(*tracesRequest) + if !ok { + return nil, errors.New("invalid input type") + } + } + + if cfg.MaxSizeItems == 0 { + req2.td.ResourceSpans().MoveAndAppendTo(req.td.ResourceSpans()) + return []Request{req}, nil } - tr2.td.ResourceSpans().MoveAndAppendTo(tr1.td.ResourceSpans()) - return tr1, nil -} -// mergeSplitTraces splits and/or merges the traces into multiple requests based on the MaxSizeConfig. -func mergeSplitTraces(_ context.Context, cfg exporterbatcher.MaxSizeConfig, r1 Request, r2 Request) ([]Request, error) { var ( res []Request destReq *tracesRequest capacityLeft = cfg.MaxSizeItems ) - for _, req := range []Request{r1, r2} { - if req == nil { + for _, srcReq := range []*tracesRequest{req, req2} { + if srcReq == nil { continue } - srcReq, ok := req.(*tracesRequest) - if !ok { - return nil, errors.New("invalid input type") - } - if srcReq.td.SpanCount() <= capacityLeft { + + srcCount := srcReq.td.SpanCount() + if srcCount <= capacityLeft { if destReq == nil { destReq = srcReq } else { srcReq.td.ResourceSpans().MoveAndAppendTo(destReq.td.ResourceSpans()) } - capacityLeft -= destReq.td.SpanCount() + capacityLeft -= srcCount continue } diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper/LICENSE b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/fileprovider/Makefile b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper/Makefile similarity index 100% rename from vendor/go.opentelemetry.io/collector/confmap/provider/fileprovider/Makefile rename to vendor/go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper/Makefile diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper/constants.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper/constants.go new file mode 100644 index 00000000000..ae681facd9a --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper/constants.go @@ -0,0 +1,19 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package xexporterhelper // import "go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper" + +import ( + "errors" +) + +var ( + // errNilConfig is returned when an empty name is given. + errNilConfig = errors.New("nil config") + // errNilLogger is returned when a logger is nil + errNilLogger = errors.New("nil logger") + // errNilPushProfileData is returned when a nil PushProfiles is given. + errNilPushProfileData = errors.New("nil PushProfiles") + // errNilProfilesConverter is returned when a nil RequestFromProfilesFunc is given. + errNilProfilesConverter = errors.New("nil RequestFromProfilesFunc") +) diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper/profiles.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper/profiles.go new file mode 100644 index 00000000000..d9eb55b3ef7 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper/profiles.go @@ -0,0 +1,164 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package xexporterhelper // import "go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper" + +import ( + "context" + "errors" + + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/consumer/consumererror/xconsumererror" + "go.opentelemetry.io/collector/consumer/xconsumer" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exporterhelper" + "go.opentelemetry.io/collector/exporter/exporterhelper/internal" + "go.opentelemetry.io/collector/exporter/exporterqueue" + "go.opentelemetry.io/collector/exporter/xexporter" + "go.opentelemetry.io/collector/pdata/pprofile" + "go.opentelemetry.io/collector/pipeline/xpipeline" +) + +var ( + profilesMarshaler = &pprofile.ProtoMarshaler{} + profilesUnmarshaler = &pprofile.ProtoUnmarshaler{} +) + +type profilesRequest struct { + pd pprofile.Profiles + pusher xconsumer.ConsumeProfilesFunc +} + +func newProfilesRequest(pd pprofile.Profiles, pusher xconsumer.ConsumeProfilesFunc) exporterhelper.Request { + return &profilesRequest{ + pd: pd, + pusher: pusher, + } +} + +func newProfileRequestUnmarshalerFunc(pusher xconsumer.ConsumeProfilesFunc) exporterqueue.Unmarshaler[exporterhelper.Request] { + return func(bytes []byte) (exporterhelper.Request, error) { + profiles, err := profilesUnmarshaler.UnmarshalProfiles(bytes) + if err != nil { + return nil, err + } + return newProfilesRequest(profiles, pusher), nil + } +} + +func profilesRequestMarshaler(req exporterhelper.Request) ([]byte, error) { + return profilesMarshaler.MarshalProfiles(req.(*profilesRequest).pd) +} + +func (req *profilesRequest) OnError(err error) exporterhelper.Request { + var profileError xconsumererror.Profiles + if errors.As(err, &profileError) { + return newProfilesRequest(profileError.Data(), req.pusher) + } + return req +} + +func (req *profilesRequest) Export(ctx context.Context) error { + return req.pusher(ctx, req.pd) +} + +func (req *profilesRequest) ItemsCount() int { + return req.pd.SampleCount() +} + +type profileExporter struct { + *internal.BaseExporter + xconsumer.Profiles +} + +// NewProfilesExporter creates an xexporter.Profiles that records observability metrics and wraps every request with a Span. +func NewProfilesExporter( + ctx context.Context, + set exporter.Settings, + cfg component.Config, + pusher xconsumer.ConsumeProfilesFunc, + options ...exporterhelper.Option, +) (xexporter.Profiles, error) { + if cfg == nil { + return nil, errNilConfig + } + if pusher == nil { + return nil, errNilPushProfileData + } + profilesOpts := []exporterhelper.Option{ + internal.WithMarshaler(profilesRequestMarshaler), internal.WithUnmarshaler(newProfileRequestUnmarshalerFunc(pusher)), + } + return NewProfilesRequestExporter(ctx, set, requestFromProfiles(pusher), append(profilesOpts, options...)...) +} + +// RequestFromProfilesFunc converts pprofile.Profiles into a user-defined Request. +// Experimental: This API is at the early stage of development and may change without backward compatibility +// until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. +type RequestFromProfilesFunc func(context.Context, pprofile.Profiles) (exporterhelper.Request, error) + +// requestFromProfiles returns a RequestFromProfilesFunc that converts pprofile.Profiles into a Request. +func requestFromProfiles(pusher xconsumer.ConsumeProfilesFunc) RequestFromProfilesFunc { + return func(_ context.Context, profiles pprofile.Profiles) (exporterhelper.Request, error) { + return newProfilesRequest(profiles, pusher), nil + } +} + +// NewProfilesRequestExporter creates a new profiles exporter based on a custom ProfilesConverter and Sender. +// Experimental: This API is at the early stage of development and may change without backward compatibility +// until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. +func NewProfilesRequestExporter( + _ context.Context, + set exporter.Settings, + converter RequestFromProfilesFunc, + options ...exporterhelper.Option, +) (xexporter.Profiles, error) { + if set.Logger == nil { + return nil, errNilLogger + } + + if converter == nil { + return nil, errNilProfilesConverter + } + + be, err := internal.NewBaseExporter(set, xpipeline.SignalProfiles, newProfilesExporterWithObservability, options...) + if err != nil { + return nil, err + } + + tc, err := xconsumer.NewProfiles(func(ctx context.Context, pd pprofile.Profiles) error { + req, cErr := converter(ctx, pd) + if cErr != nil { + set.Logger.Error("Failed to convert profiles. Dropping data.", + zap.Int("dropped_samples", pd.SampleCount()), + zap.Error(err)) + return consumererror.NewPermanent(cErr) + } + return be.Send(ctx, req) + }, be.ConsumerOptions...) + + return &profileExporter{ + BaseExporter: be, + Profiles: tc, + }, err +} + +type profilesExporterWithObservability struct { + internal.BaseSender[exporterhelper.Request] + obsrep *internal.ObsReport +} + +func newProfilesExporterWithObservability(obsrep *internal.ObsReport) internal.Sender[exporterhelper.Request] { + return &profilesExporterWithObservability{obsrep: obsrep} +} + +func (tewo *profilesExporterWithObservability) Send(ctx context.Context, req exporterhelper.Request) error { + c := tewo.obsrep.StartProfilesOp(ctx) + numSamples := req.ItemsCount() + // Forward the data to the next consumer (this pusher is the next). + err := tewo.NextSender.Send(c, req) + tewo.obsrep.EndProfilesOp(c, numSamples, err) + return err +} diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper/profiles_batch.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper/profiles_batch.go new file mode 100644 index 00000000000..2aa47c95043 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper/profiles_batch.go @@ -0,0 +1,144 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package xexporterhelper // import "go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper" + +import ( + "context" + "errors" + + "go.opentelemetry.io/collector/exporter/exporterbatcher" + "go.opentelemetry.io/collector/exporter/exporterhelper" + "go.opentelemetry.io/collector/pdata/pprofile" +) + +// MergeSplit splits and/or merges the profiles into multiple requests based on the MaxSizeConfig. +func (req *profilesRequest) MergeSplit(_ context.Context, cfg exporterbatcher.MaxSizeConfig, r2 exporterhelper.Request) ([]exporterhelper.Request, error) { + var req2 *profilesRequest + if r2 != nil { + var ok bool + req2, ok = r2.(*profilesRequest) + if !ok { + return nil, errors.New("invalid input type") + } + } + + if cfg.MaxSizeItems == 0 { + req2.pd.ResourceProfiles().MoveAndAppendTo(req.pd.ResourceProfiles()) + return []exporterhelper.Request{req}, nil + } + + var ( + res []exporterhelper.Request + destReq *profilesRequest + capacityLeft = cfg.MaxSizeItems + ) + for _, srcReq := range []*profilesRequest{req, req2} { + if srcReq == nil { + continue + } + + srcCount := srcReq.pd.SampleCount() + if srcCount <= capacityLeft { + if destReq == nil { + destReq = srcReq + } else { + srcReq.pd.ResourceProfiles().MoveAndAppendTo(destReq.pd.ResourceProfiles()) + } + capacityLeft -= srcCount + continue + } + + for { + extractedProfiles := extractProfiles(srcReq.pd, capacityLeft) + if extractedProfiles.SampleCount() == 0 { + break + } + capacityLeft -= extractedProfiles.SampleCount() + if destReq == nil { + destReq = &profilesRequest{pd: extractedProfiles, pusher: srcReq.pusher} + } else { + extractedProfiles.ResourceProfiles().MoveAndAppendTo(destReq.pd.ResourceProfiles()) + } + // Create new batch once capacity is reached. + if capacityLeft == 0 { + res = append(res, destReq) + destReq = nil + capacityLeft = cfg.MaxSizeItems + } + } + } + + if destReq != nil { + res = append(res, destReq) + } + return res, nil +} + +// extractProfiles extracts a new profiles with a maximum number of samples. +func extractProfiles(srcProfiles pprofile.Profiles, count int) pprofile.Profiles { + destProfiles := pprofile.NewProfiles() + srcProfiles.ResourceProfiles().RemoveIf(func(srcRS pprofile.ResourceProfiles) bool { + if count == 0 { + return false + } + needToExtract := samplesCount(srcRS) > count + if needToExtract { + srcRS = extractResourceProfiles(srcRS, count) + } + count -= samplesCount(srcRS) + srcRS.MoveTo(destProfiles.ResourceProfiles().AppendEmpty()) + return !needToExtract + }) + return destProfiles +} + +// extractResourceProfiles extracts profiles and returns a new resource profiles with the specified number of profiles. +func extractResourceProfiles(srcRS pprofile.ResourceProfiles, count int) pprofile.ResourceProfiles { + destRS := pprofile.NewResourceProfiles() + destRS.SetSchemaUrl(srcRS.SchemaUrl()) + srcRS.Resource().CopyTo(destRS.Resource()) + srcRS.ScopeProfiles().RemoveIf(func(srcSS pprofile.ScopeProfiles) bool { + if count == 0 { + return false + } + needToExtract := srcSS.Profiles().Len() > count + if needToExtract { + srcSS = extractScopeProfiles(srcSS, count) + } + count -= srcSS.Profiles().Len() + srcSS.MoveTo(destRS.ScopeProfiles().AppendEmpty()) + return !needToExtract + }) + srcRS.Resource().CopyTo(destRS.Resource()) + return destRS +} + +// extractScopeProfiles extracts profiles and returns a new scope profiles with the specified number of profiles. +func extractScopeProfiles(srcSS pprofile.ScopeProfiles, count int) pprofile.ScopeProfiles { + destSS := pprofile.NewScopeProfiles() + destSS.SetSchemaUrl(srcSS.SchemaUrl()) + srcSS.Scope().CopyTo(destSS.Scope()) + srcSS.Profiles().RemoveIf(func(srcProfile pprofile.Profile) bool { + if count == 0 { + return false + } + srcProfile.MoveTo(destSS.Profiles().AppendEmpty()) + count-- + return true + }) + return destSS +} + +// resourceProfilessCount calculates the total number of profiles in the pdata.ResourceProfiles. +func samplesCount(rs pprofile.ResourceProfiles) int { + count := 0 + rs.ScopeProfiles().RemoveIf(func(ss pprofile.ScopeProfiles) bool { + ss.Profiles().RemoveIf(func(sp pprofile.Profile) bool { + count += sp.Sample().Len() + return false + }) + return false + }) + return count +} diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterqueue/bounded_memory_queue.go b/vendor/go.opentelemetry.io/collector/exporter/exporterqueue/bounded_memory_queue.go new file mode 100644 index 00000000000..e94eb5da1c3 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterqueue/bounded_memory_queue.go @@ -0,0 +1,44 @@ +// Copyright The OpenTelemetry Authors +// Copyright (c) 2019 The Jaeger Authors. +// Copyright (c) 2017 Uber Technologies, Inc. +// SPDX-License-Identifier: Apache-2.0 + +package exporterqueue // import "go.opentelemetry.io/collector/exporter/exporterqueue" + +import ( + "context" + + "go.opentelemetry.io/collector/component" +) + +// boundedMemoryQueue implements a producer-consumer exchange similar to a ring buffer queue, +// where the queue is bounded and if it fills up due to slow consumers, the new items written by +// the producer are dropped. +type boundedMemoryQueue[T any] struct { + component.StartFunc + *sizedQueue[T] +} + +// memoryQueueSettings defines internal parameters for boundedMemoryQueue creation. +type memoryQueueSettings[T any] struct { + sizer sizer[T] + capacity int64 + blocking bool +} + +// newBoundedMemoryQueue constructs the new queue of specified capacity, and with an optional +// callback for dropped items (e.g. useful to emit metrics). +func newBoundedMemoryQueue[T any](set memoryQueueSettings[T]) Queue[T] { + return &boundedMemoryQueue[T]{ + sizedQueue: newSizedQueue[T](set.capacity, set.sizer, set.blocking), + } +} + +func (q *boundedMemoryQueue[T]) Read(context.Context) (uint64, context.Context, T, bool) { + ctx, req, ok := q.sizedQueue.pop() + return 0, ctx, req, ok +} + +// OnProcessingFinished should be called to remove the item of the given index from the queue once processing is finished. +// For in memory queue, this function is noop. +func (q *boundedMemoryQueue[T]) OnProcessingFinished(uint64, error) {} diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterqueue/cond.go b/vendor/go.opentelemetry.io/collector/exporter/exporterqueue/cond.go new file mode 100644 index 00000000000..9b3293731c4 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterqueue/cond.go @@ -0,0 +1,63 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exporterqueue // import "go.opentelemetry.io/collector/exporter/exporterqueue" + +import ( + "context" + "sync" +) + +// cond is equivalent with sync.Cond, but context.Context aware. +// Which means Wait() will return if context is done before any signal is received. +// Also, it requires the caller to hold the c.L during all calls. +type cond struct { + L sync.Locker + ch chan struct{} + waiting int64 +} + +func newCond(l sync.Locker) *cond { + return &cond{L: l, ch: make(chan struct{}, 1)} +} + +// Signal wakes one goroutine waiting on c, if there is any. +// It requires for the caller to hold c.L during the call. +func (c *cond) Signal() { + if c.waiting == 0 { + return + } + c.waiting-- + c.ch <- struct{}{} +} + +// Broadcast wakes all goroutines waiting on c. +// It requires for the caller to hold c.L during the call. +func (c *cond) Broadcast() { + for ; c.waiting > 0; c.waiting-- { + c.ch <- struct{}{} + } +} + +// Wait atomically unlocks c.L and suspends execution of the calling goroutine. After later resuming execution, Wait locks c.L before returning. +func (c *cond) Wait(ctx context.Context) error { + c.waiting++ + c.L.Unlock() + select { + case <-ctx.Done(): + c.L.Lock() + if c.waiting == 0 { + // If waiting is 0, it means that there was a signal sent and nobody else waits for it. + // Consume it, so that we don't unblock other consumer unnecessary, + // or we don't block the producer because the channel buffer is full. + <-c.ch + } else { + // Decrease the number of waiting routines. + c.waiting-- + } + return ctx.Err() + case <-c.ch: + c.L.Lock() + return nil + } +} diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterqueue/config.go b/vendor/go.opentelemetry.io/collector/exporter/exporterqueue/config.go index fcc0e0e57b1..f68599a0d17 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterqueue/config.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterqueue/config.go @@ -20,6 +20,9 @@ type Config struct { NumConsumers int `mapstructure:"num_consumers"` // QueueSize is the maximum number of requests allowed in queue at any given time. QueueSize int `mapstructure:"queue_size"` + // Blocking controls the queue behavior when full. + // If true it blocks until enough space to add the new request to the queue. + Blocking bool `mapstructure:"blocking"` } // NewDefaultConfig returns the default Config. @@ -30,19 +33,20 @@ func NewDefaultConfig() Config { Enabled: true, NumConsumers: 10, QueueSize: 1_000, + Blocking: true, } } -// Validate checks if the QueueSettings configuration is valid +// Validate checks if the Config is valid func (qCfg *Config) Validate() error { if !qCfg.Enabled { return nil } if qCfg.NumConsumers <= 0 { - return errors.New("number of consumers must be positive") + return errors.New("`num_consumers` must be positive") } if qCfg.QueueSize <= 0 { - return errors.New("queue size must be positive") + return errors.New("`queue_size` must be positive") } return nil } diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterqueue/persistent_queue.go b/vendor/go.opentelemetry.io/collector/exporter/exporterqueue/persistent_queue.go new file mode 100644 index 00000000000..c6e04eb72e2 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterqueue/persistent_queue.go @@ -0,0 +1,556 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exporterqueue // import "go.opentelemetry.io/collector/exporter/exporterqueue" + +import ( + "context" + "encoding/binary" + "errors" + "fmt" + "strconv" + "sync" + + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/internal/experr" + "go.opentelemetry.io/collector/extension/xextension/storage" + "go.opentelemetry.io/collector/pipeline" +) + +const ( + zapKey = "key" + zapErrorCount = "errorCount" + zapNumberOfItems = "numberOfItems" + + readIndexKey = "ri" + writeIndexKey = "wi" + currentlyDispatchedItemsKey = "di" + queueSizeKey = "si" +) + +var ( + errValueNotSet = errors.New("value not set") + errInvalidValue = errors.New("invalid value") + errNoStorageClient = errors.New("no storage client extension found") + errWrongExtensionType = errors.New("requested extension is not a storage extension") +) + +type persistentQueueSettings[T any] struct { + sizer sizer[T] + capacity int64 + blocking bool + signal pipeline.Signal + storageID component.ID + marshaler Marshaler[T] + unmarshaler Unmarshaler[T] + set exporter.Settings +} + +// persistentQueue provides a persistent queue implementation backed by file storage extension +// +// Write index describes the position at which next item is going to be stored. +// Read index describes which item needs to be read next. +// When Write index = Read index, no elements are in the queue. +// +// The items currently dispatched by consumers are not deleted until the processing is finished. +// Their list is stored under a separate key. +// +// ┌───────file extension-backed queue───────┐ +// │ │ +// │ ┌───┐ ┌───┐ ┌───┐ ┌───┐ ┌───┐ │ +// │ n+1 │ n │ ... │ 4 │ │ 3 │ │ 2 │ │ 1 │ │ +// │ └───┘ └───┘ └─x─┘ └─|─┘ └─x─┘ │ +// │ x | x │ +// └───────────────────────x─────|─────x─────┘ +// ▲ ▲ x | x +// │ │ x | xxxx deleted +// │ │ x | +// write read x └── currently dispatched item +// index index x +// xxxx deleted +type persistentQueue[T any] struct { + set persistentQueueSettings[T] + logger *zap.Logger + client storage.Client + + // isRequestSized indicates whether the queue is sized by the number of requests. + isRequestSized bool + + // mu guards everything declared below. + mu sync.Mutex + hasMoreElements *sync.Cond + hasMoreSpace *cond + readIndex uint64 + writeIndex uint64 + currentlyDispatchedItems []uint64 + queueSize int64 + refClient int64 + stopped bool +} + +// newPersistentQueue creates a new queue backed by file storage; name and signal must be a unique combination that identifies the queue storage +func newPersistentQueue[T any](set persistentQueueSettings[T]) Queue[T] { + _, isRequestSized := set.sizer.(*requestSizer[T]) + pq := &persistentQueue[T]{ + set: set, + logger: set.set.Logger, + isRequestSized: isRequestSized, + } + pq.hasMoreElements = sync.NewCond(&pq.mu) + pq.hasMoreSpace = newCond(&pq.mu) + return pq +} + +// Start starts the persistentQueue with the given number of consumers. +func (pq *persistentQueue[T]) Start(ctx context.Context, host component.Host) error { + storageClient, err := toStorageClient(ctx, pq.set.storageID, host, pq.set.set.ID, pq.set.signal) + if err != nil { + return err + } + pq.initClient(ctx, storageClient) + return nil +} + +func (pq *persistentQueue[T]) Size() int64 { + pq.mu.Lock() + defer pq.mu.Unlock() + return pq.queueSize +} + +func (pq *persistentQueue[T]) Capacity() int64 { + return pq.set.capacity +} + +func (pq *persistentQueue[T]) initClient(ctx context.Context, client storage.Client) { + pq.client = client + // Start with a reference 1 which is the reference we use for the producer goroutines and initialization. + pq.refClient = 1 + pq.initPersistentContiguousStorage(ctx) + // Make sure the leftover requests are handled + pq.retrieveAndEnqueueNotDispatchedReqs(ctx) +} + +func (pq *persistentQueue[T]) initPersistentContiguousStorage(ctx context.Context) { + riOp := storage.GetOperation(readIndexKey) + wiOp := storage.GetOperation(writeIndexKey) + + err := pq.client.Batch(ctx, riOp, wiOp) + if err == nil { + pq.readIndex, err = bytesToItemIndex(riOp.Value) + } + + if err == nil { + pq.writeIndex, err = bytesToItemIndex(wiOp.Value) + } + + if err != nil { + if errors.Is(err, errValueNotSet) { + pq.logger.Info("Initializing new persistent queue") + } else { + pq.logger.Error("Failed getting read/write index, starting with new ones", zap.Error(err)) + } + pq.readIndex = 0 + pq.writeIndex = 0 + } + + queueSize := pq.writeIndex - pq.readIndex + + // If the queue is sized by the number of requests, no need to read the queue size from storage. + if queueSize > 0 && !pq.isRequestSized { + if restoredQueueSize, err := pq.restoreQueueSizeFromStorage(ctx); err == nil { + queueSize = restoredQueueSize + } + } + // nolint: gosec + pq.queueSize = int64(queueSize) +} + +// restoreQueueSizeFromStorage restores the queue size from storage. +func (pq *persistentQueue[T]) restoreQueueSizeFromStorage(ctx context.Context) (uint64, error) { + val, err := pq.client.Get(ctx, queueSizeKey) + if err != nil { + if errors.Is(err, errValueNotSet) { + pq.logger.Warn("Cannot read the queue size snapshot from storage. "+ + "The reported queue size will be inaccurate until the initial queue is drained. "+ + "It's expected when the items sized queue enabled for the first time", zap.Error(err)) + } else { + pq.logger.Error("Failed to read the queue size snapshot from storage. "+ + "The reported queue size will be inaccurate until the initial queue is drained.", zap.Error(err)) + } + return 0, err + } + return bytesToItemIndex(val) +} + +func (pq *persistentQueue[T]) Shutdown(ctx context.Context) error { + // If the queue is not initialized, there is nothing to shut down. + if pq.client == nil { + return nil + } + + pq.mu.Lock() + defer pq.mu.Unlock() + backupErr := pq.backupQueueSize(ctx) + // Mark this queue as stopped, so consumer don't start any more work. + pq.stopped = true + pq.hasMoreElements.Broadcast() + return errors.Join(backupErr, pq.unrefClient(ctx)) +} + +// backupQueueSize writes the current queue size to storage. The value is used to recover the queue size +// in case if the collector is killed. +func (pq *persistentQueue[T]) backupQueueSize(ctx context.Context) error { + // No need to write the queue size if the queue is sized by the number of requests. + // That information is already stored as difference between read and write indexes. + if pq.isRequestSized { + return nil + } + + // nolint: gosec + return pq.client.Set(ctx, queueSizeKey, itemIndexToBytes(uint64(pq.queueSize))) +} + +// unrefClient unrefs the client, and closes if no more references. Callers MUST hold the mutex. +// This is needed because consumers of the queue may still process the requests while the queue is shutting down or immediately after. +func (pq *persistentQueue[T]) unrefClient(ctx context.Context) error { + pq.refClient-- + if pq.refClient == 0 { + return pq.client.Close(ctx) + } + return nil +} + +// Offer inserts the specified element into this queue if it is possible to do so immediately +// without violating capacity restrictions. If success returns no error. +// It returns ErrQueueIsFull if no space is currently available. +func (pq *persistentQueue[T]) Offer(ctx context.Context, req T) error { + pq.mu.Lock() + defer pq.mu.Unlock() + return pq.putInternal(ctx, req) +} + +// putInternal is the internal version that requires caller to hold the mutex lock. +func (pq *persistentQueue[T]) putInternal(ctx context.Context, req T) error { + reqSize := pq.set.sizer.Sizeof(req) + for pq.queueSize+reqSize > pq.set.capacity { + if !pq.set.blocking { + return ErrQueueIsFull + } + if err := pq.hasMoreSpace.Wait(ctx); err != nil { + return err + } + } + + reqBuf, err := pq.set.marshaler(req) + if err != nil { + return err + } + + // Carry out a transaction where we both add the item and update the write index + ops := []*storage.Operation{ + storage.SetOperation(writeIndexKey, itemIndexToBytes(pq.writeIndex+1)), + storage.SetOperation(getItemKey(pq.writeIndex), reqBuf), + } + if err = pq.client.Batch(ctx, ops...); err != nil { + return err + } + + pq.writeIndex++ + pq.queueSize += reqSize + pq.hasMoreElements.Signal() + + // Back up the queue size to storage every 10 writes. The stored value is used to recover the queue size + // in case if the collector is killed. The recovered queue size is allowed to be inaccurate. + if (pq.writeIndex % 10) == 5 { + if err := pq.backupQueueSize(ctx); err != nil { + pq.logger.Error("Error writing queue size to storage", zap.Error(err)) + } + } + + return nil +} + +func (pq *persistentQueue[T]) Read(ctx context.Context) (uint64, context.Context, T, bool) { + pq.mu.Lock() + defer pq.mu.Unlock() + + for { + if pq.stopped { + var req T + return 0, context.Background(), req, false + } + + // Read until either a successful retrieved element or no more elements in the storage. + for pq.readIndex != pq.writeIndex { + index, req, consumed := pq.getNextItem(ctx) + // Ensure the used size and the channel size are in sync. + if pq.readIndex == pq.writeIndex { + pq.queueSize = 0 + pq.hasMoreSpace.Signal() + } + if consumed { + pq.queueSize -= pq.set.sizer.Sizeof(req) + // The size might be not in sync with the queue in case it's restored from the disk + // because we don't flush the current queue size on the disk on every read/write. + // In that case we need to make sure it doesn't go below 0. + if pq.queueSize < 0 { + pq.queueSize = 0 + } + pq.hasMoreSpace.Signal() + + return index, context.Background(), req, true + } + } + + // TODO: Need to change the Queue interface to return an error to allow distinguish between shutdown and context canceled. + // Until then use the sync.Cond. + pq.hasMoreElements.Wait() + } +} + +// getNextItem pulls the next available item from the persistent storage along with its index. Once processing is +// finished, the index should be called with OnProcessingFinished to clean up the storage. If no new item is available, +// returns false. +func (pq *persistentQueue[T]) getNextItem(ctx context.Context) (uint64, T, bool) { + index := pq.readIndex + // Increase here, so even if errors happen below, it always iterates + pq.readIndex++ + pq.currentlyDispatchedItems = append(pq.currentlyDispatchedItems, index) + getOp := storage.GetOperation(getItemKey(index)) + err := pq.client.Batch(ctx, + storage.SetOperation(readIndexKey, itemIndexToBytes(pq.readIndex)), + storage.SetOperation(currentlyDispatchedItemsKey, itemIndexArrayToBytes(pq.currentlyDispatchedItems)), + getOp) + + var request T + if err == nil { + request, err = pq.set.unmarshaler(getOp.Value) + } + + if err != nil { + pq.logger.Debug("Failed to dispatch item", zap.Error(err)) + // We need to make sure that currently dispatched items list is cleaned + if err = pq.itemDispatchingFinish(ctx, index); err != nil { + pq.logger.Error("Error deleting item from queue", zap.Error(err)) + } + + return 0, request, false + } + + // Increase the reference count, so the client is not closed while the request is being processed. + // The client cannot be closed because we hold the lock since last we checked `stopped`. + pq.refClient++ + + return index, request, true +} + +// OnProcessingFinished should be called to remove the item of the given index from the queue once processing is finished. +func (pq *persistentQueue[T]) OnProcessingFinished(index uint64, consumeErr error) { + // Delete the item from the persistent storage after it was processed. + pq.mu.Lock() + // Always unref client even if the consumer is shutdown because we always ref it for every valid request. + defer func() { + if err := pq.unrefClient(context.Background()); err != nil { + pq.logger.Error("Error closing the storage client", zap.Error(err)) + } + pq.mu.Unlock() + }() + + if experr.IsShutdownErr(consumeErr) { + // The queue is shutting down, don't mark the item as dispatched, so it's picked up again after restart. + // TODO: Handle partially delivered requests by updating their values in the storage. + return + } + + if err := pq.itemDispatchingFinish(context.Background(), index); err != nil { + pq.logger.Error("Error deleting item from queue", zap.Error(err)) + } + + // Back up the queue size to storage on every 10 reads. The stored value is used to recover the queue size + // in case if the collector is killed. The recovered queue size is allowed to be inaccurate. + if (pq.readIndex % 10) == 0 { + if qsErr := pq.backupQueueSize(context.Background()); qsErr != nil { + pq.logger.Error("Error writing queue size to storage", zap.Error(qsErr)) + } + } +} + +// retrieveAndEnqueueNotDispatchedReqs gets the items for which sending was not finished, cleans the storage +// and moves the items at the back of the queue. +func (pq *persistentQueue[T]) retrieveAndEnqueueNotDispatchedReqs(ctx context.Context) { + var dispatchedItems []uint64 + + pq.mu.Lock() + defer pq.mu.Unlock() + pq.logger.Debug("Checking if there are items left for dispatch by consumers") + itemKeysBuf, err := pq.client.Get(ctx, currentlyDispatchedItemsKey) + if err == nil { + dispatchedItems, err = bytesToItemIndexArray(itemKeysBuf) + } + if err != nil { + pq.logger.Error("Could not fetch items left for dispatch by consumers", zap.Error(err)) + return + } + + if len(dispatchedItems) == 0 { + pq.logger.Debug("No items left for dispatch by consumers") + return + } + + pq.logger.Info("Fetching items left for dispatch by consumers", zap.Int(zapNumberOfItems, + len(dispatchedItems))) + retrieveBatch := make([]*storage.Operation, len(dispatchedItems)) + cleanupBatch := make([]*storage.Operation, len(dispatchedItems)) + for i, it := range dispatchedItems { + key := getItemKey(it) + retrieveBatch[i] = storage.GetOperation(key) + cleanupBatch[i] = storage.DeleteOperation(key) + } + retrieveErr := pq.client.Batch(ctx, retrieveBatch...) + cleanupErr := pq.client.Batch(ctx, cleanupBatch...) + + if cleanupErr != nil { + pq.logger.Debug("Failed cleaning items left by consumers", zap.Error(cleanupErr)) + } + + if retrieveErr != nil { + pq.logger.Warn("Failed retrieving items left by consumers", zap.Error(retrieveErr)) + return + } + + errCount := 0 + for _, op := range retrieveBatch { + if op.Value == nil { + pq.logger.Warn("Failed retrieving item", zap.String(zapKey, op.Key), zap.Error(errValueNotSet)) + continue + } + req, err := pq.set.unmarshaler(op.Value) + // If error happened or item is nil, it will be efficiently ignored + if err != nil { + pq.logger.Warn("Failed unmarshalling item", zap.String(zapKey, op.Key), zap.Error(err)) + continue + } + if pq.putInternal(ctx, req) != nil { + errCount++ + } + } + + if errCount > 0 { + pq.logger.Error("Errors occurred while moving items for dispatching back to queue", + zap.Int(zapNumberOfItems, len(retrieveBatch)), zap.Int(zapErrorCount, errCount)) + } else { + pq.logger.Info("Moved items for dispatching back to queue", + zap.Int(zapNumberOfItems, len(retrieveBatch))) + } +} + +// itemDispatchingFinish removes the item from the list of currently dispatched items and deletes it from the persistent queue +func (pq *persistentQueue[T]) itemDispatchingFinish(ctx context.Context, index uint64) error { + lenCDI := len(pq.currentlyDispatchedItems) + for i := 0; i < lenCDI; i++ { + if pq.currentlyDispatchedItems[i] == index { + pq.currentlyDispatchedItems[i] = pq.currentlyDispatchedItems[lenCDI-1] + pq.currentlyDispatchedItems = pq.currentlyDispatchedItems[:lenCDI-1] + break + } + } + + setOp := storage.SetOperation(currentlyDispatchedItemsKey, itemIndexArrayToBytes(pq.currentlyDispatchedItems)) + deleteOp := storage.DeleteOperation(getItemKey(index)) + if err := pq.client.Batch(ctx, setOp, deleteOp); err != nil { + // got an error, try to gracefully handle it + pq.logger.Warn("Failed updating currently dispatched items, trying to delete the item first", + zap.Error(err)) + } else { + // Everything ok, exit + return nil + } + + if err := pq.client.Batch(ctx, deleteOp); err != nil { + // Return an error here, as this indicates an issue with the underlying storage medium + return fmt.Errorf("failed deleting item from queue, got error from storage: %w", err) + } + + if err := pq.client.Batch(ctx, setOp); err != nil { + // even if this fails, we still have the right dispatched items in memory + // at worst, we'll have the wrong list in storage, and we'll discard the nonexistent items during startup + return fmt.Errorf("failed updating currently dispatched items, but deleted item successfully: %w", err) + } + + return nil +} + +func toStorageClient(ctx context.Context, storageID component.ID, host component.Host, ownerID component.ID, signal pipeline.Signal) (storage.Client, error) { + ext, found := host.GetExtensions()[storageID] + if !found { + return nil, errNoStorageClient + } + + storageExt, ok := ext.(storage.Extension) + if !ok { + return nil, errWrongExtensionType + } + + return storageExt.GetClient(ctx, component.KindExporter, ownerID, signal.String()) +} + +func getItemKey(index uint64) string { + return strconv.FormatUint(index, 10) +} + +func itemIndexToBytes(value uint64) []byte { + return binary.LittleEndian.AppendUint64([]byte{}, value) +} + +func bytesToItemIndex(buf []byte) (uint64, error) { + if buf == nil { + return uint64(0), errValueNotSet + } + // The sizeof uint64 in binary is 8. + if len(buf) < 8 { + return 0, errInvalidValue + } + return binary.LittleEndian.Uint64(buf), nil +} + +func itemIndexArrayToBytes(arr []uint64) []byte { + size := len(arr) + buf := make([]byte, 0, 4+size*8) + // nolint: gosec + buf = binary.LittleEndian.AppendUint32(buf, uint32(size)) + for _, item := range arr { + buf = binary.LittleEndian.AppendUint64(buf, item) + } + return buf +} + +func bytesToItemIndexArray(buf []byte) ([]uint64, error) { + if len(buf) == 0 { + return nil, nil + } + + // The sizeof uint32 in binary is 4. + if len(buf) < 4 { + return nil, errInvalidValue + } + size := int(binary.LittleEndian.Uint32(buf)) + if size == 0 { + return nil, nil + } + + buf = buf[4:] + // The sizeof uint64 in binary is 8, so we need to have size*8 bytes. + if len(buf) < size*8 { + return nil, errInvalidValue + } + + val := make([]uint64, size) + for i := 0; i < size; i++ { + val[i] = binary.LittleEndian.Uint64(buf) + buf = buf[8:] + } + return val, nil +} diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterqueue/queue.go b/vendor/go.opentelemetry.io/collector/exporter/exporterqueue/queue.go index 5b568851b66..549f1ce7635 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterqueue/queue.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterqueue/queue.go @@ -5,27 +5,45 @@ package exporterqueue // import "go.opentelemetry.io/collector/exporter/exporter import ( "context" + "errors" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/exporter" - "go.opentelemetry.io/collector/exporter/internal/queue" + "go.opentelemetry.io/collector/pipeline" ) -// ErrQueueIsFull is the error that Queue returns when full. +// ErrQueueIsFull is the error returned when an item is offered to the Queue and the queue is full. // Experimental: This API is at the early stage of development and may change without backward compatibility // until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. -var ErrQueueIsFull = queue.ErrQueueIsFull +var ErrQueueIsFull = errors.New("sending queue is full") // Queue defines a producer-consumer exchange which can be backed by e.g. the memory-based ring buffer queue // (boundedMemoryQueue) or via a disk-based queue (persistentQueue) // Experimental: This API is at the early stage of development and may change without backward compatibility // until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. -type Queue[T any] queue.Queue[T] +type Queue[T any] interface { + component.Component + // Offer inserts the specified element into this queue if it is possible to do so immediately + // without violating capacity restrictions. If success returns no error. + // It returns ErrQueueIsFull if no space is currently available. + Offer(ctx context.Context, item T) error + // Size returns the current Size of the queue + Size() int64 + // Capacity returns the capacity of the queue. + Capacity() int64 + // Read pulls the next available item from the queue along with its index. Once processing is + // finished, the index should be called with OnProcessingFinished to clean up the storage. + // The function blocks until an item is available or if the queue is stopped. + // Returns false if reading failed or if the queue is stopped. + Read(context.Context) (uint64, context.Context, T, bool) + // OnProcessingFinished should be called to remove the item of the given index from the queue once processing is finished. + OnProcessingFinished(index uint64, consumeErr error) +} // Settings defines settings for creating a queue. type Settings struct { - DataType component.DataType - ExporterSettings exporter.CreateSettings + Signal pipeline.Signal + ExporterSettings exporter.Settings } // Marshaler is a function that can marshal a request into bytes. @@ -46,11 +64,12 @@ type Factory[T any] func(context.Context, Settings, Config) Queue[T] // NewMemoryQueueFactory returns a factory to create a new memory queue. // Experimental: This API is at the early stage of development and may change without backward compatibility // until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. -func NewMemoryQueueFactory[T itemsCounter]() Factory[T] { +func NewMemoryQueueFactory[T any]() Factory[T] { return func(_ context.Context, _ Settings, cfg Config) Queue[T] { - return queue.NewBoundedMemoryQueue[T](queue.MemoryQueueSettings[T]{ - Sizer: sizerFromConfig[T](cfg), - Capacity: capacityFromConfig(cfg), + return newBoundedMemoryQueue[T](memoryQueueSettings[T]{ + sizer: &requestSizer[T]{}, + capacity: int64(cfg.QueueSize), + blocking: cfg.Blocking, }) } } @@ -66,36 +85,23 @@ type PersistentQueueSettings[T any] struct { } // NewPersistentQueueFactory returns a factory to create a new persistent queue. -// If cfg.StorageID is nil then it falls back to memory queue. +// If cfg.storageID is nil then it falls back to memory queue. // Experimental: This API is at the early stage of development and may change without backward compatibility // until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. -func NewPersistentQueueFactory[T itemsCounter](storageID *component.ID, factorySettings PersistentQueueSettings[T]) Factory[T] { +func NewPersistentQueueFactory[T any](storageID *component.ID, factorySettings PersistentQueueSettings[T]) Factory[T] { if storageID == nil { return NewMemoryQueueFactory[T]() } return func(_ context.Context, set Settings, cfg Config) Queue[T] { - return queue.NewPersistentQueue[T](queue.PersistentQueueSettings[T]{ - Sizer: sizerFromConfig[T](cfg), - Capacity: capacityFromConfig(cfg), - DataType: set.DataType, - StorageID: *storageID, - Marshaler: factorySettings.Marshaler, - Unmarshaler: factorySettings.Unmarshaler, - ExporterSettings: set.ExporterSettings, + return newPersistentQueue[T](persistentQueueSettings[T]{ + sizer: &requestSizer[T]{}, + capacity: int64(cfg.QueueSize), + blocking: cfg.Blocking, + signal: set.Signal, + storageID: *storageID, + marshaler: factorySettings.Marshaler, + unmarshaler: factorySettings.Unmarshaler, + set: set.ExporterSettings, }) } } - -type itemsCounter interface { - ItemsCount() int -} - -func sizerFromConfig[T itemsCounter](Config) queue.Sizer[T] { - // TODO: Handle other ways to measure the queue size once they are added. - return &queue.RequestSizer[T]{} -} - -func capacityFromConfig(cfg Config) int64 { - // TODO: Handle other ways to measure the queue size once they are added. - return int64(cfg.QueueSize) -} diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterqueue/sized_queue.go b/vendor/go.opentelemetry.io/collector/exporter/exporterqueue/sized_queue.go new file mode 100644 index 00000000000..20f0809abc2 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterqueue/sized_queue.go @@ -0,0 +1,151 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exporterqueue // import "go.opentelemetry.io/collector/exporter/exporterqueue" + +import ( + "context" + "errors" + "sync" +) + +var errInvalidSize = errors.New("invalid element size") + +type node[T any] struct { + ctx context.Context + data T + size int64 + next *node[T] +} + +type linkedQueue[T any] struct { + head *node[T] + tail *node[T] +} + +func (l *linkedQueue[T]) push(ctx context.Context, data T, size int64) { + n := &node[T]{ctx: ctx, data: data, size: size} + if l.tail == nil { + l.head = n + l.tail = n + return + } + l.tail.next = n + l.tail = n +} + +func (l *linkedQueue[T]) pop() (context.Context, T, int64) { + n := l.head + l.head = n.next + if l.head == nil { + l.tail = nil + } + n.next = nil + return n.ctx, n.data, n.size +} + +// sizedQueue is a channel wrapper for sized elements with a capacity set to a total size of all the elements. +// The channel will accept elements until the total size of the elements reaches the capacity. +type sizedQueue[T any] struct { + sizer sizer[T] + cap int64 + + mu sync.Mutex + hasMoreElements *sync.Cond + hasMoreSpace *cond + items *linkedQueue[T] + size int64 + stopped bool + blocking bool +} + +// newSizedQueue creates a sized elements channel. Each element is assigned a size by the provided sizer. +// capacity is the capacity of the queue. +func newSizedQueue[T any](capacity int64, sizer sizer[T], blocking bool) *sizedQueue[T] { + sq := &sizedQueue[T]{ + sizer: sizer, + cap: capacity, + items: &linkedQueue[T]{}, + blocking: blocking, + } + sq.hasMoreElements = sync.NewCond(&sq.mu) + sq.hasMoreSpace = newCond(&sq.mu) + return sq +} + +// Offer puts the element into the queue with the given sized if there is enough capacity. +// Returns an error if the queue is full. +func (sq *sizedQueue[T]) Offer(ctx context.Context, el T) error { + elSize := sq.sizer.Sizeof(el) + if elSize == 0 { + return nil + } + + if elSize <= 0 { + return errInvalidSize + } + + sq.mu.Lock() + defer sq.mu.Unlock() + + for sq.size+elSize > sq.cap { + if !sq.blocking { + return ErrQueueIsFull + } + // Wait for more space or before the ctx is Done. + if err := sq.hasMoreSpace.Wait(ctx); err != nil { + return err + } + } + + sq.size += elSize + sq.items.push(ctx, el, elSize) + // Signal one consumer if any. + sq.hasMoreElements.Signal() + return nil +} + +// pop removes the element from the queue and returns it. +// The call blocks until there is an item available or the queue is stopped. +// The function returns true when an item is consumed or false if the queue is stopped and emptied. +func (sq *sizedQueue[T]) pop() (context.Context, T, bool) { + sq.mu.Lock() + defer sq.mu.Unlock() + + for { + if sq.size > 0 { + elCtx, el, elSize := sq.items.pop() + sq.size -= elSize + sq.hasMoreSpace.Signal() + return elCtx, el, true + } + + if sq.stopped { + var el T + return context.Background(), el, false + } + + // TODO: Need to change the Queue interface to return an error to allow distinguish between shutdown and context canceled. + // Until then use the sync.Cond. + sq.hasMoreElements.Wait() + } +} + +// Shutdown closes the queue channel to initiate draining of the queue. +func (sq *sizedQueue[T]) Shutdown(context.Context) error { + sq.mu.Lock() + defer sq.mu.Unlock() + sq.stopped = true + sq.hasMoreElements.Broadcast() + return nil +} + +func (sq *sizedQueue[T]) Size() int64 { + sq.mu.Lock() + defer sq.mu.Unlock() + return sq.size +} + +func (sq *sizedQueue[T]) Capacity() int64 { + return sq.cap +} diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterqueue/sizer.go b/vendor/go.opentelemetry.io/collector/exporter/exporterqueue/sizer.go new file mode 100644 index 00000000000..dad7343dab8 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterqueue/sizer.go @@ -0,0 +1,16 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exporterqueue // import "go.opentelemetry.io/collector/exporter/exporterqueue" + +// sizer is an interface that returns the size of the given element. +type sizer[T any] interface { + Sizeof(T) int64 +} + +// requestSizer is a sizer implementation that returns the size of a queue element as one request. +type requestSizer[T any] struct{} + +func (rs *requestSizer[T]) Sizeof(T) int64 { + return 1 +} diff --git a/vendor/go.opentelemetry.io/collector/exporter/exportertest/LICENSE b/vendor/go.opentelemetry.io/collector/exporter/exportertest/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/exportertest/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/exporter/exportertest/Makefile b/vendor/go.opentelemetry.io/collector/exporter/exportertest/Makefile new file mode 100644 index 00000000000..c1496226e59 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/exportertest/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/collector/exporter/exportertest/contract_checker.go b/vendor/go.opentelemetry.io/collector/exporter/exportertest/contract_checker.go index 7b3f55c1902..d708c097a68 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exportertest/contract_checker.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exportertest/contract_checker.go @@ -19,6 +19,7 @@ import ( "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/ptrace" + "go.opentelemetry.io/collector/pipeline" "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/receiver/receivertest" ) @@ -32,8 +33,7 @@ type uniqueIDAttrVal string type CheckConsumeContractParams struct { T *testing.T NumberOfTestElements int - // DataType to test for. - DataType component.DataType + Signal pipeline.Signal // ExporterFactory to create an exporter to be tested. ExporterFactory exporter.Factory ExporterConfig component.Config @@ -84,19 +84,19 @@ func CheckConsumeContract(params CheckConsumeContractParams) { func checkConsumeContractScenario(t *testing.T, params CheckConsumeContractParams, decisionFunc func() error, checkIfTestPassed func(*testing.T, int, requestCounter)) { mockConsumerInstance := newMockConsumer(decisionFunc) - switch params.DataType { - case component.DataTypeLogs: - r, err := params.ReceiverFactory.CreateLogsReceiver(context.Background(), receivertest.NewNopCreateSettings(), params.ReceiverConfig, &mockConsumerInstance) + switch params.Signal { + case pipeline.SignalLogs: + r, err := params.ReceiverFactory.CreateLogs(context.Background(), receivertest.NewNopSettings(), params.ReceiverConfig, &mockConsumerInstance) require.NoError(t, err) require.NoError(t, r.Start(context.Background(), componenttest.NewNopHost())) checkLogs(t, params, r, &mockConsumerInstance, checkIfTestPassed) - case component.DataTypeTraces: - r, err := params.ReceiverFactory.CreateTracesReceiver(context.Background(), receivertest.NewNopCreateSettings(), params.ReceiverConfig, &mockConsumerInstance) + case pipeline.SignalTraces: + r, err := params.ReceiverFactory.CreateTraces(context.Background(), receivertest.NewNopSettings(), params.ReceiverConfig, &mockConsumerInstance) require.NoError(t, err) require.NoError(t, r.Start(context.Background(), componenttest.NewNopHost())) checkTraces(t, params, r, &mockConsumerInstance, checkIfTestPassed) - case component.DataTypeMetrics: - r, err := params.ReceiverFactory.CreateMetricsReceiver(context.Background(), receivertest.NewNopCreateSettings(), params.ReceiverConfig, &mockConsumerInstance) + case pipeline.SignalMetrics: + r, err := params.ReceiverFactory.CreateMetrics(context.Background(), receivertest.NewNopSettings(), params.ReceiverConfig, &mockConsumerInstance) require.NoError(t, err) require.NoError(t, r.Start(context.Background(), componenttest.NewNopHost())) checkMetrics(t, params, r, &mockConsumerInstance, checkIfTestPassed) @@ -106,11 +106,12 @@ func checkConsumeContractScenario(t *testing.T, params CheckConsumeContractParam } func checkMetrics(t *testing.T, params CheckConsumeContractParams, mockReceiver component.Component, - mockConsumer *mockConsumer, checkIfTestPassed func(*testing.T, int, requestCounter)) { + mockConsumer *mockConsumer, checkIfTestPassed func(*testing.T, int, requestCounter), +) { ctx := context.Background() var exp exporter.Metrics var err error - exp, err = params.ExporterFactory.CreateMetricsExporter(ctx, NewNopCreateSettings(), params.ExporterConfig) + exp, err = params.ExporterFactory.CreateMetrics(ctx, NewNopSettings(), params.ExporterConfig) require.NoError(t, err) require.NotNil(t, exp) @@ -150,7 +151,7 @@ func checkTraces(t *testing.T, params CheckConsumeContractParams, mockReceiver c ctx := context.Background() var exp exporter.Traces var err error - exp, err = params.ExporterFactory.CreateTracesExporter(ctx, NewNopCreateSettings(), params.ExporterConfig) + exp, err = params.ExporterFactory.CreateTraces(ctx, NewNopSettings(), params.ExporterConfig) require.NoError(t, err) require.NotNil(t, exp) @@ -190,7 +191,7 @@ func checkLogs(t *testing.T, params CheckConsumeContractParams, mockReceiver com ctx := context.Background() var exp exporter.Logs var err error - exp, err = params.ExporterFactory.CreateLogsExporter(ctx, NewNopCreateSettings(), params.ExporterConfig) + exp, err = params.ExporterFactory.CreateLogs(ctx, NewNopSettings(), params.ExporterConfig) require.NoError(t, err) require.NotNil(t, exp) @@ -228,9 +229,9 @@ func checkLogs(t *testing.T, params CheckConsumeContractParams, mockReceiver com // Test is successful if all the elements were received successfully and no error was returned func alwaysSucceedsPassed(t *testing.T, allRecordsNumber int, reqCounter requestCounter) { require.Equal(t, allRecordsNumber, reqCounter.success) - require.Equal(t, reqCounter.total, allRecordsNumber) - require.Equal(t, reqCounter.error.nonpermanent, 0) - require.Equal(t, reqCounter.error.permanent, 0) + require.Equal(t, allRecordsNumber, reqCounter.total) + require.Equal(t, 0, reqCounter.error.nonpermanent) + require.Equal(t, 0, reqCounter.error.permanent) } // Test is successful if all the elements were retried on non-permanent errors diff --git a/vendor/go.opentelemetry.io/collector/exporter/exportertest/mock_consumer.go b/vendor/go.opentelemetry.io/collector/exporter/exportertest/mock_consumer.go index b513e524735..0290ed69510 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exportertest/mock_consumer.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exportertest/mock_consumer.go @@ -5,7 +5,7 @@ package exportertest // import "go.opentelemetry.io/collector/exporter/exportert import ( "context" "fmt" - "math/rand" + "math/rand/v2" "sync" "google.golang.org/grpc/codes" @@ -19,8 +19,10 @@ import ( "go.opentelemetry.io/collector/pdata/ptrace" ) -var errNonPermanent = status.Error(codes.DeadlineExceeded, "non Permanent error") -var errPermanent = status.Error(codes.Internal, "Permanent error") +var ( + errNonPermanent = status.Error(codes.DeadlineExceeded, "non Permanent error") + errPermanent = status.Error(codes.Internal, "Permanent error") +) // // randomNonPermanentErrorConsumeDecision is a decision function that succeeds approximately // // half of the time and fails with a non-permanent error the rest of the time. diff --git a/vendor/go.opentelemetry.io/collector/exporter/exportertest/nop_exporter.go b/vendor/go.opentelemetry.io/collector/exporter/exportertest/nop_exporter.go index 6589d867bec..19e062e45e9 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exportertest/nop_exporter.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exportertest/nop_exporter.go @@ -12,13 +12,14 @@ import ( "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/consumer/consumertest" "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/xexporter" ) var nopType = component.MustNewType("nop") -// NewNopCreateSettings returns a new nop settings for Create*Exporter functions. -func NewNopCreateSettings() exporter.CreateSettings { - return exporter.CreateSettings{ +// NewNopSettings returns a new nop settings for Create* functions. +func NewNopSettings() exporter.Settings { + return exporter.Settings{ ID: component.NewIDWithName(nopType, uuid.NewString()), TelemetrySettings: componenttest.NewNopTelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo(), @@ -27,44 +28,41 @@ func NewNopCreateSettings() exporter.CreateSettings { // NewNopFactory returns an exporter.Factory that constructs nop exporters. func NewNopFactory() exporter.Factory { - return exporter.NewFactory( + return xexporter.NewFactory( nopType, func() component.Config { return &nopConfig{} }, - exporter.WithTraces(createTracesExporter, component.StabilityLevelStable), - exporter.WithMetrics(createMetricsExporter, component.StabilityLevelStable), - exporter.WithLogs(createLogsExporter, component.StabilityLevelStable), + xexporter.WithTraces(createTraces, component.StabilityLevelStable), + xexporter.WithMetrics(createMetrics, component.StabilityLevelStable), + xexporter.WithLogs(createLogs, component.StabilityLevelStable), + xexporter.WithProfiles(createProfiles, component.StabilityLevelAlpha), ) } -func createTracesExporter(context.Context, exporter.CreateSettings, component.Config) (exporter.Traces, error) { +func createTraces(context.Context, exporter.Settings, component.Config) (exporter.Traces, error) { return nopInstance, nil } -func createMetricsExporter(context.Context, exporter.CreateSettings, component.Config) (exporter.Metrics, error) { +func createMetrics(context.Context, exporter.Settings, component.Config) (exporter.Metrics, error) { return nopInstance, nil } -func createLogsExporter(context.Context, exporter.CreateSettings, component.Config) (exporter.Logs, error) { +func createLogs(context.Context, exporter.Settings, component.Config) (exporter.Logs, error) { + return nopInstance, nil +} + +func createProfiles(context.Context, exporter.Settings, component.Config) (xexporter.Profiles, error) { return nopInstance, nil } type nopConfig struct{} -var nopInstance = &nopExporter{ +var nopInstance = &nop{ Consumer: consumertest.NewNop(), } -// nopExporter stores consumed traces and metrics for testing purposes. -type nopExporter struct { +// nop stores consumed traces, metrics, logs and profiles for testing purposes. +type nop struct { component.StartFunc component.ShutdownFunc consumertest.Consumer } - -// NewNopBuilder returns an exporter.Builder that constructs nop receivers. -func NewNopBuilder() *exporter.Builder { - nopFactory := NewNopFactory() - return exporter.NewBuilder( - map[component.ID]component.Config{component.NewID(nopType): nopFactory.CreateDefaultConfig()}, - map[component.Type]exporter.Factory{nopType: nopFactory}) -} diff --git a/vendor/go.opentelemetry.io/collector/exporter/internal/queue/batcher.go b/vendor/go.opentelemetry.io/collector/exporter/internal/queue/batcher.go new file mode 100644 index 00000000000..df45a7b97c9 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/internal/queue/batcher.go @@ -0,0 +1,86 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package queue // import "go.opentelemetry.io/collector/exporter/internal/queue" + +import ( + "context" + "sync" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/exporter/exporterbatcher" + "go.opentelemetry.io/collector/exporter/exporterqueue" + "go.opentelemetry.io/collector/exporter/internal" +) + +type batch struct { + ctx context.Context + req internal.Request + idxList []uint64 +} + +// Batcher is in charge of reading items from the queue and send them out asynchronously. +type Batcher interface { + component.Component +} + +type BaseBatcher struct { + batchCfg exporterbatcher.Config + queue exporterqueue.Queue[internal.Request] + // TODO: Remove when the -1 hack for testing is removed. + maxWorkers int + workerPool chan bool + exportFunc func(ctx context.Context, req internal.Request) error + stopWG sync.WaitGroup +} + +func NewBatcher(batchCfg exporterbatcher.Config, + queue exporterqueue.Queue[internal.Request], + exportFunc func(ctx context.Context, req internal.Request) error, + maxWorkers int, +) (Batcher, error) { + if !batchCfg.Enabled { + return &DisabledBatcher{BaseBatcher: newBaseBatcher(batchCfg, queue, exportFunc, maxWorkers)}, nil + } + return &DefaultBatcher{BaseBatcher: newBaseBatcher(batchCfg, queue, exportFunc, maxWorkers)}, nil +} + +func newBaseBatcher(batchCfg exporterbatcher.Config, + queue exporterqueue.Queue[internal.Request], + exportFunc func(ctx context.Context, req internal.Request) error, + maxWorkers int, +) BaseBatcher { + var workerPool chan bool + if maxWorkers > 0 { + workerPool = make(chan bool, maxWorkers) + for i := 0; i < maxWorkers; i++ { + workerPool <- true + } + } + return BaseBatcher{ + batchCfg: batchCfg, + queue: queue, + maxWorkers: maxWorkers, + workerPool: workerPool, + exportFunc: exportFunc, + stopWG: sync.WaitGroup{}, + } +} + +// flush starts a goroutine that calls exportFunc. It blocks until a worker is available if necessary. +func (qb *BaseBatcher) flush(batchToFlush batch) { + qb.stopWG.Add(1) + if qb.workerPool != nil { + <-qb.workerPool + } + go func() { + defer qb.stopWG.Done() + err := qb.exportFunc(batchToFlush.ctx, batchToFlush.req) + for _, idx := range batchToFlush.idxList { + qb.queue.OnProcessingFinished(idx, err) + } + if qb.workerPool != nil { + qb.workerPool <- true + } + }() +} diff --git a/vendor/go.opentelemetry.io/collector/exporter/internal/queue/bounded_memory_queue.go b/vendor/go.opentelemetry.io/collector/exporter/internal/queue/bounded_memory_queue.go deleted file mode 100644 index 98e1b281176..00000000000 --- a/vendor/go.opentelemetry.io/collector/exporter/internal/queue/bounded_memory_queue.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright The OpenTelemetry Authors -// Copyright (c) 2019 The Jaeger Authors. -// Copyright (c) 2017 Uber Technologies, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package queue // import "go.opentelemetry.io/collector/exporter/internal/queue" - -import ( - "context" - - "go.opentelemetry.io/collector/component" -) - -// boundedMemoryQueue implements a producer-consumer exchange similar to a ring buffer queue, -// where the queue is bounded and if it fills up due to slow consumers, the new items written by -// the producer are dropped. -type boundedMemoryQueue[T any] struct { - component.StartFunc - *sizedChannel[memQueueEl[T]] - sizer Sizer[T] -} - -// MemoryQueueSettings defines internal parameters for boundedMemoryQueue creation. -type MemoryQueueSettings[T any] struct { - Sizer Sizer[T] - Capacity int64 -} - -// NewBoundedMemoryQueue constructs the new queue of specified capacity, and with an optional -// callback for dropped items (e.g. useful to emit metrics). -func NewBoundedMemoryQueue[T any](set MemoryQueueSettings[T]) Queue[T] { - return &boundedMemoryQueue[T]{ - sizedChannel: newSizedChannel[memQueueEl[T]](set.Capacity, nil, 0), - sizer: set.Sizer, - } -} - -// Offer is used by the producer to submit new item to the queue. Calling this method on a stopped queue will panic. -func (q *boundedMemoryQueue[T]) Offer(ctx context.Context, req T) error { - return q.sizedChannel.push(memQueueEl[T]{ctx: ctx, req: req}, q.sizer.Sizeof(req), nil) -} - -// Consume applies the provided function on the head of queue. -// The call blocks until there is an item available or the queue is stopped. -// The function returns true when an item is consumed or false if the queue is stopped and emptied. -func (q *boundedMemoryQueue[T]) Consume(consumeFunc func(context.Context, T) error) bool { - item, ok := q.sizedChannel.pop(func(el memQueueEl[T]) int64 { return q.sizer.Sizeof(el.req) }) - if !ok { - return false - } - // the memory queue doesn't handle consume errors - _ = consumeFunc(item.ctx, item.req) - return true -} - -// Shutdown closes the queue channel to initiate draining of the queue. -func (q *boundedMemoryQueue[T]) Shutdown(context.Context) error { - q.sizedChannel.shutdown() - return nil -} - -type memQueueEl[T any] struct { - req T - ctx context.Context -} diff --git a/vendor/go.opentelemetry.io/collector/exporter/internal/queue/consumers.go b/vendor/go.opentelemetry.io/collector/exporter/internal/queue/consumers.go index 7c57fea9620..4db4b830e3c 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/internal/queue/consumers.go +++ b/vendor/go.opentelemetry.io/collector/exporter/internal/queue/consumers.go @@ -8,30 +8,26 @@ import ( "sync" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/exporter/exporterqueue" ) type Consumers[T any] struct { - queue Queue[T] + queue exporterqueue.Queue[T] numConsumers int consumeFunc func(context.Context, T) error stopWG sync.WaitGroup } -func NewQueueConsumers[T any](q Queue[T], numConsumers int, consumeFunc func(context.Context, T) error) *Consumers[T] { +func NewQueueConsumers[T any](q exporterqueue.Queue[T], numConsumers int, consumeFunc func(context.Context, T) error) *Consumers[T] { return &Consumers[T]{ queue: q, numConsumers: numConsumers, consumeFunc: consumeFunc, - stopWG: sync.WaitGroup{}, } } // Start ensures that queue and all consumers are started. -func (qc *Consumers[T]) Start(ctx context.Context, host component.Host) error { - if err := qc.queue.Start(ctx, host); err != nil { - return err - } - +func (qc *Consumers[T]) Start(_ context.Context, _ component.Host) error { var startWG sync.WaitGroup for i := 0; i < qc.numConsumers; i++ { qc.stopWG.Add(1) @@ -40,9 +36,12 @@ func (qc *Consumers[T]) Start(ctx context.Context, host component.Host) error { startWG.Done() defer qc.stopWG.Done() for { - if !qc.queue.Consume(qc.consumeFunc) { + index, ctx, req, ok := qc.queue.Read(context.Background()) + if !ok { return } + consumeErr := qc.consumeFunc(ctx, req) + qc.queue.OnProcessingFinished(index, consumeErr) } }() } @@ -52,10 +51,7 @@ func (qc *Consumers[T]) Start(ctx context.Context, host component.Host) error { } // Shutdown ensures that queue and all consumers are stopped. -func (qc *Consumers[T]) Shutdown(ctx context.Context) error { - if err := qc.queue.Shutdown(ctx); err != nil { - return err - } +func (qc *Consumers[T]) Shutdown(_ context.Context) error { qc.stopWG.Wait() return nil } diff --git a/vendor/go.opentelemetry.io/collector/exporter/internal/queue/default_batcher.go b/vendor/go.opentelemetry.io/collector/exporter/internal/queue/default_batcher.go new file mode 100644 index 00000000000..dced12e9e2b --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/internal/queue/default_batcher.go @@ -0,0 +1,180 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package queue // import "go.opentelemetry.io/collector/exporter/internal/queue" + +import ( + "context" + "math" + "sync" + "time" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/exporter/internal" +) + +// DefaultBatcher continuously reads from the queue and flushes asynchronously if size limit is met or on timeout. +type DefaultBatcher struct { + BaseBatcher + currentBatchMu sync.Mutex + currentBatch *batch + timer *time.Timer + shutdownCh chan bool +} + +func (qb *DefaultBatcher) resetTimer() { + if qb.batchCfg.FlushTimeout != 0 { + qb.timer.Reset(qb.batchCfg.FlushTimeout) + } +} + +// startReadingFlushingGoroutine starts a goroutine that reads and then flushes. +func (qb *DefaultBatcher) startReadingFlushingGoroutine() { + qb.stopWG.Add(1) + go func() { + defer qb.stopWG.Done() + for { + // Read() blocks until the queue is non-empty or until the queue is stopped. + idx, ctx, req, ok := qb.queue.Read(context.Background()) + if !ok { + qb.shutdownCh <- true + return + } + + qb.currentBatchMu.Lock() + + if qb.batchCfg.MaxSizeItems > 0 { + var reqList []internal.Request + var mergeSplitErr error + if qb.currentBatch == nil || qb.currentBatch.req == nil { + qb.resetTimer() + reqList, mergeSplitErr = req.MergeSplit(ctx, qb.batchCfg.MaxSizeConfig, nil) + } else { + reqList, mergeSplitErr = qb.currentBatch.req.MergeSplit(ctx, qb.batchCfg.MaxSizeConfig, req) + } + + if mergeSplitErr != nil || reqList == nil { + qb.queue.OnProcessingFinished(idx, mergeSplitErr) + qb.currentBatchMu.Unlock() + continue + } + + // If there was a split, we flush everything immediately. + if reqList[0].ItemsCount() >= qb.batchCfg.MinSizeItems || len(reqList) > 1 { + qb.currentBatch = nil + qb.currentBatchMu.Unlock() + for i := 0; i < len(reqList); i++ { + qb.flush(batch{ + req: reqList[i], + ctx: ctx, + idxList: []uint64{idx}, + }) + // TODO: handle partial failure + } + qb.resetTimer() + } else { + qb.currentBatch = &batch{ + req: reqList[0], + ctx: ctx, + idxList: []uint64{idx}, + } + qb.currentBatchMu.Unlock() + } + } else { + if qb.currentBatch == nil || qb.currentBatch.req == nil { + qb.resetTimer() + qb.currentBatch = &batch{ + req: req, + ctx: ctx, + idxList: []uint64{idx}, + } + } else { + // TODO: consolidate implementation for the cases where MaxSizeConfig is specified and the case where it is not specified + mergedReq, mergeErr := qb.currentBatch.req.MergeSplit(qb.currentBatch.ctx, qb.batchCfg.MaxSizeConfig, req) + if mergeErr != nil { + qb.queue.OnProcessingFinished(idx, mergeErr) + qb.currentBatchMu.Unlock() + continue + } + qb.currentBatch = &batch{ + req: mergedReq[0], + ctx: qb.currentBatch.ctx, + idxList: append(qb.currentBatch.idxList, idx), + } + } + + if qb.currentBatch.req.ItemsCount() >= qb.batchCfg.MinSizeItems { + batchToFlush := *qb.currentBatch + qb.currentBatch = nil + qb.currentBatchMu.Unlock() + + // flush() blocks until successfully started a goroutine for flushing. + qb.flush(batchToFlush) + qb.resetTimer() + } else { + qb.currentBatchMu.Unlock() + } + } + } + }() +} + +// startTimeBasedFlushingGoroutine starts a goroutine that flushes on timeout. +func (qb *DefaultBatcher) startTimeBasedFlushingGoroutine() { + qb.stopWG.Add(1) + go func() { + defer qb.stopWG.Done() + for { + select { + case <-qb.shutdownCh: + return + case <-qb.timer.C: + qb.flushCurrentBatchIfNecessary() + } + } + }() +} + +// Start starts the goroutine that reads from the queue and flushes asynchronously. +func (qb *DefaultBatcher) Start(_ context.Context, _ component.Host) error { + // maxWorker being -1 means batcher is disabled. This is for testing queue sender metrics. + if qb.maxWorkers == -1 { + return nil + } + + qb.shutdownCh = make(chan bool, 1) + + if qb.batchCfg.FlushTimeout == 0 { + qb.timer = time.NewTimer(math.MaxInt) + qb.timer.Stop() + } else { + qb.timer = time.NewTimer(qb.batchCfg.FlushTimeout) + } + + qb.startReadingFlushingGoroutine() + qb.startTimeBasedFlushingGoroutine() + return nil +} + +// flushCurrentBatchIfNecessary sends out the current request batch if it is not nil +func (qb *DefaultBatcher) flushCurrentBatchIfNecessary() { + qb.currentBatchMu.Lock() + if qb.currentBatch == nil || qb.currentBatch.req == nil { + qb.currentBatchMu.Unlock() + return + } + batchToFlush := *qb.currentBatch + qb.currentBatch = nil + qb.currentBatchMu.Unlock() + + // flush() blocks until successfully started a goroutine for flushing. + qb.flush(batchToFlush) + qb.resetTimer() +} + +// Shutdown ensures that queue and all Batcher are stopped. +func (qb *DefaultBatcher) Shutdown(_ context.Context) error { + qb.flushCurrentBatchIfNecessary() + qb.stopWG.Wait() + return nil +} diff --git a/vendor/go.opentelemetry.io/collector/exporter/internal/queue/disabled_batcher.go b/vendor/go.opentelemetry.io/collector/exporter/internal/queue/disabled_batcher.go new file mode 100644 index 00000000000..be89f58e011 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/internal/queue/disabled_batcher.go @@ -0,0 +1,50 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package queue // import "go.opentelemetry.io/collector/exporter/internal/queue" + +import ( + "context" + + "go.opentelemetry.io/collector/component" +) + +// DisabledBatcher is a special-case of Batcher that has no size limit for sending. Any items read from the queue will +// be sent out (asynchronously) immediately regardless of the size. +type DisabledBatcher struct { + BaseBatcher +} + +// Start starts the goroutine that reads from the queue and flushes asynchronously. +func (qb *DisabledBatcher) Start(_ context.Context, _ component.Host) error { + // maxWorker being -1 means batcher is disabled. This is for testing queue sender metrics. + if qb.maxWorkers == -1 { + return nil + } + + // This goroutine reads and then flushes. + // 1. Reading from the queue is blocked until the queue is non-empty or until the queue is stopped. + // 2. flush() blocks until there are idle workers in the worker pool. + qb.stopWG.Add(1) + go func() { + defer qb.stopWG.Done() + for { + idx, _, req, ok := qb.queue.Read(context.Background()) + if !ok { + return + } + qb.flush(batch{ + req: req, + ctx: context.Background(), + idxList: []uint64{idx}, + }) + } + }() + return nil +} + +// Shutdown ensures that queue and all Batcher are stopped. +func (qb *DisabledBatcher) Shutdown(_ context.Context) error { + qb.stopWG.Wait() + return nil +} diff --git a/vendor/go.opentelemetry.io/collector/exporter/internal/queue/mock_storage.go b/vendor/go.opentelemetry.io/collector/exporter/internal/queue/mock_storage.go deleted file mode 100644 index 6ef9810529b..00000000000 --- a/vendor/go.opentelemetry.io/collector/exporter/internal/queue/mock_storage.go +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package queue // import "go.opentelemetry.io/collector/exporter/internal/queue" - -import ( - "context" - "errors" - "fmt" - "sync" - "sync/atomic" - "syscall" - - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/extension/experimental/storage" -) - -type mockStorageExtension struct { - component.StartFunc - component.ShutdownFunc - st sync.Map - getClientError error -} - -func (m *mockStorageExtension) GetClient(context.Context, component.Kind, component.ID, string) (storage.Client, error) { - if m.getClientError != nil { - return nil, m.getClientError - } - return &mockStorageClient{st: &m.st, closed: &atomic.Bool{}}, nil -} - -func NewMockStorageExtension(getClientError error) storage.Extension { - return &mockStorageExtension{getClientError: getClientError} -} - -type mockStorageClient struct { - st *sync.Map - closed *atomic.Bool -} - -func (m *mockStorageClient) Get(ctx context.Context, s string) ([]byte, error) { - getOp := storage.GetOperation(s) - err := m.Batch(ctx, getOp) - return getOp.Value, err -} - -func (m *mockStorageClient) Set(ctx context.Context, s string, bytes []byte) error { - return m.Batch(ctx, storage.SetOperation(s, bytes)) -} - -func (m *mockStorageClient) Delete(ctx context.Context, s string) error { - return m.Batch(ctx, storage.DeleteOperation(s)) -} - -func (m *mockStorageClient) Close(context.Context) error { - m.closed.Store(true) - return nil -} - -func (m *mockStorageClient) Batch(_ context.Context, ops ...storage.Operation) error { - if m.isClosed() { - panic("client already closed") - } - for _, op := range ops { - switch op.Type { - case storage.Get: - val, found := m.st.Load(op.Key) - if !found { - break - } - op.Value = val.([]byte) - case storage.Set: - m.st.Store(op.Key, op.Value) - case storage.Delete: - m.st.Delete(op.Key) - default: - return errors.New("wrong operation type") - } - } - - return nil -} - -func (m *mockStorageClient) isClosed() bool { - return m.closed.Load() -} - -func newFakeBoundedStorageClient(maxSizeInBytes int) *fakeBoundedStorageClient { - return &fakeBoundedStorageClient{ - st: map[string][]byte{}, - MaxSizeInBytes: maxSizeInBytes, - } -} - -// this storage client mimics the behavior of actual storage engines with limited storage space available -// in general, real storage engines often have a per-write-transaction storage overhead, needing to keep -// both the old and the new value stored until the transaction is committed -// this is useful for testing the persistent queue queue behavior with a full disk -type fakeBoundedStorageClient struct { - MaxSizeInBytes int - st map[string][]byte - sizeInBytes int - mux sync.Mutex -} - -func (m *fakeBoundedStorageClient) Get(ctx context.Context, key string) ([]byte, error) { - op := storage.GetOperation(key) - if err := m.Batch(ctx, op); err != nil { - return nil, err - } - - return op.Value, nil -} - -func (m *fakeBoundedStorageClient) Set(ctx context.Context, key string, value []byte) error { - return m.Batch(ctx, storage.SetOperation(key, value)) -} - -func (m *fakeBoundedStorageClient) Delete(ctx context.Context, key string) error { - return m.Batch(ctx, storage.DeleteOperation(key)) -} - -func (m *fakeBoundedStorageClient) Close(context.Context) error { - return nil -} - -func (m *fakeBoundedStorageClient) Batch(_ context.Context, ops ...storage.Operation) error { - m.mux.Lock() - defer m.mux.Unlock() - - totalAdded, totalRemoved := m.getTotalSizeChange(ops) - - // the assumption here is that the new data needs to coexist with the old data on disk - // for the transaction to succeed - // this seems to be true for the file storage extension at least - if m.sizeInBytes+totalAdded > m.MaxSizeInBytes { - return fmt.Errorf("insufficient space available: %w", syscall.ENOSPC) - } - - for _, op := range ops { - switch op.Type { - case storage.Get: - op.Value = m.st[op.Key] - case storage.Set: - m.st[op.Key] = op.Value - case storage.Delete: - delete(m.st, op.Key) - default: - return errors.New("wrong operation type") - } - } - - m.sizeInBytes += totalAdded - totalRemoved - - return nil -} - -func (m *fakeBoundedStorageClient) SetMaxSizeInBytes(newMaxSize int) { - m.mux.Lock() - defer m.mux.Unlock() - m.MaxSizeInBytes = newMaxSize -} - -func (m *fakeBoundedStorageClient) GetSizeInBytes() int { - m.mux.Lock() - defer m.mux.Unlock() - return m.sizeInBytes -} - -func (m *fakeBoundedStorageClient) getTotalSizeChange(ops []storage.Operation) (totalAdded int, totalRemoved int) { - totalAdded, totalRemoved = 0, 0 - for _, op := range ops { - switch op.Type { - case storage.Set: - if oldValue, ok := m.st[op.Key]; ok { - totalRemoved += len(oldValue) - } else { - totalAdded += len(op.Key) - } - totalAdded += len(op.Value) - case storage.Delete: - if value, ok := m.st[op.Key]; ok { - totalRemoved += len(op.Key) - totalRemoved += len(value) - } - default: - } - } - return totalAdded, totalRemoved -} - -func newFakeStorageClientWithErrors(errors []error) *fakeStorageClientWithErrors { - return &fakeStorageClientWithErrors{ - errors: errors, - } -} - -// this storage client just returns errors from a list in order -// used for testing error handling -type fakeStorageClientWithErrors struct { - errors []error - nextErrorIndex int - mux sync.Mutex -} - -func (m *fakeStorageClientWithErrors) Get(ctx context.Context, key string) ([]byte, error) { - op := storage.GetOperation(key) - err := m.Batch(ctx, op) - if err != nil { - return nil, err - } - - return op.Value, nil -} - -func (m *fakeStorageClientWithErrors) Set(ctx context.Context, key string, value []byte) error { - return m.Batch(ctx, storage.SetOperation(key, value)) -} - -func (m *fakeStorageClientWithErrors) Delete(ctx context.Context, key string) error { - return m.Batch(ctx, storage.DeleteOperation(key)) -} - -func (m *fakeStorageClientWithErrors) Close(context.Context) error { - return nil -} - -func (m *fakeStorageClientWithErrors) Batch(context.Context, ...storage.Operation) error { - m.mux.Lock() - defer m.mux.Unlock() - - if m.nextErrorIndex >= len(m.errors) { - return nil - } - - m.nextErrorIndex++ - return m.errors[m.nextErrorIndex-1] -} - -func (m *fakeStorageClientWithErrors) Reset() { - m.mux.Lock() - defer m.mux.Unlock() - m.nextErrorIndex = 0 -} diff --git a/vendor/go.opentelemetry.io/collector/exporter/internal/queue/persistent_queue.go b/vendor/go.opentelemetry.io/collector/exporter/internal/queue/persistent_queue.go deleted file mode 100644 index 07b0d1fb628..00000000000 --- a/vendor/go.opentelemetry.io/collector/exporter/internal/queue/persistent_queue.go +++ /dev/null @@ -1,557 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package queue // import "go.opentelemetry.io/collector/exporter/internal/queue" - -import ( - "context" - "encoding/binary" - "errors" - "fmt" - "strconv" - "sync" - - "go.uber.org/multierr" - "go.uber.org/zap" - - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/exporter" - "go.opentelemetry.io/collector/exporter/internal/experr" - "go.opentelemetry.io/collector/extension/experimental/storage" -) - -// persistentQueue provides a persistent queue implementation backed by file storage extension -// -// Write index describes the position at which next item is going to be stored. -// Read index describes which item needs to be read next. -// When Write index = Read index, no elements are in the queue. -// -// The items currently dispatched by consumers are not deleted until the processing is finished. -// Their list is stored under a separate key. -// -// ┌───────file extension-backed queue───────┐ -// │ │ -// │ ┌───┐ ┌───┐ ┌───┐ ┌───┐ ┌───┐ │ -// │ n+1 │ n │ ... │ 4 │ │ 3 │ │ 2 │ │ 1 │ │ -// │ └───┘ └───┘ └─x─┘ └─|─┘ └─x─┘ │ -// │ x | x │ -// └───────────────────────x─────|─────x─────┘ -// ▲ ▲ x | x -// │ │ x | xxxx deleted -// │ │ x | -// write read x └── currently dispatched item -// index index x -// xxxx deleted -type persistentQueue[T any] struct { - // sizedChannel is used by the persistent queue for two purposes: - // 1. a communication channel notifying the consumer that a new item is available. - // 2. capacity control based on the size of the items. - *sizedChannel[permanentQueueEl] - - set PersistentQueueSettings[T] - logger *zap.Logger - client storage.Client - - // isRequestSized indicates whether the queue is sized by the number of requests. - isRequestSized bool - - // mu guards everything declared below. - mu sync.Mutex - readIndex uint64 - writeIndex uint64 - currentlyDispatchedItems []uint64 - refClient int64 - stopped bool -} - -const ( - zapKey = "key" - zapErrorCount = "errorCount" - zapNumberOfItems = "numberOfItems" - - readIndexKey = "ri" - writeIndexKey = "wi" - currentlyDispatchedItemsKey = "di" - queueSizeKey = "si" -) - -var ( - errValueNotSet = errors.New("value not set") - errInvalidValue = errors.New("invalid value") - errNoStorageClient = errors.New("no storage client extension found") - errWrongExtensionType = errors.New("requested extension is not a storage extension") -) - -type PersistentQueueSettings[T any] struct { - Sizer Sizer[T] - Capacity int64 - DataType component.DataType - StorageID component.ID - Marshaler func(req T) ([]byte, error) - Unmarshaler func([]byte) (T, error) - ExporterSettings exporter.CreateSettings -} - -// NewPersistentQueue creates a new queue backed by file storage; name and signal must be a unique combination that identifies the queue storage -func NewPersistentQueue[T any](set PersistentQueueSettings[T]) Queue[T] { - _, isRequestSized := set.Sizer.(*RequestSizer[T]) - return &persistentQueue[T]{ - set: set, - logger: set.ExporterSettings.Logger, - isRequestSized: isRequestSized, - } -} - -// Start starts the persistentQueue with the given number of consumers. -func (pq *persistentQueue[T]) Start(ctx context.Context, host component.Host) error { - storageClient, err := toStorageClient(ctx, pq.set.StorageID, host, pq.set.ExporterSettings.ID, pq.set.DataType) - if err != nil { - return err - } - pq.initClient(ctx, storageClient) - return nil -} - -func (pq *persistentQueue[T]) initClient(ctx context.Context, client storage.Client) { - pq.client = client - // Start with a reference 1 which is the reference we use for the producer goroutines and initialization. - pq.refClient = 1 - pq.initPersistentContiguousStorage(ctx) - // Make sure the leftover requests are handled - pq.retrieveAndEnqueueNotDispatchedReqs(ctx) -} - -func (pq *persistentQueue[T]) initPersistentContiguousStorage(ctx context.Context) { - riOp := storage.GetOperation(readIndexKey) - wiOp := storage.GetOperation(writeIndexKey) - - err := pq.client.Batch(ctx, riOp, wiOp) - if err == nil { - pq.readIndex, err = bytesToItemIndex(riOp.Value) - } - - if err == nil { - pq.writeIndex, err = bytesToItemIndex(wiOp.Value) - } - - if err != nil { - if errors.Is(err, errValueNotSet) { - pq.logger.Info("Initializing new persistent queue") - } else { - pq.logger.Error("Failed getting read/write index, starting with new ones", zap.Error(err)) - } - pq.readIndex = 0 - pq.writeIndex = 0 - } - - initIndexSize := pq.writeIndex - pq.readIndex - - var ( - initEls []permanentQueueEl - initQueueSize uint64 - ) - - // Pre-allocate the communication channel with the size of the restored queue. - if initIndexSize > 0 { - initQueueSize = initIndexSize - // If the queue is sized by the number of requests, no need to read the queue size from storage. - if !pq.isRequestSized { - if restoredQueueSize, err := pq.restoreQueueSizeFromStorage(ctx); err == nil { - initQueueSize = restoredQueueSize - } - } - - // Ensure the communication channel filled with evenly sized elements up to the total restored queue size. - initEls = make([]permanentQueueEl, initIndexSize) - } - - pq.sizedChannel = newSizedChannel[permanentQueueEl](pq.set.Capacity, initEls, int64(initQueueSize)) -} - -// permanentQueueEl is the type of the elements passed to the sizedChannel by the persistentQueue. -type permanentQueueEl struct{} - -// restoreQueueSizeFromStorage restores the queue size from storage. -func (pq *persistentQueue[T]) restoreQueueSizeFromStorage(ctx context.Context) (uint64, error) { - val, err := pq.client.Get(ctx, queueSizeKey) - if err != nil { - if errors.Is(err, errValueNotSet) { - pq.logger.Warn("Cannot read the queue size snapshot from storage. "+ - "The reported queue size will be inaccurate until the initial queue is drained. "+ - "It's expected when the items sized queue enabled for the first time", zap.Error(err)) - } else { - pq.logger.Error("Failed to read the queue size snapshot from storage. "+ - "The reported queue size will be inaccurate until the initial queue is drained.", zap.Error(err)) - } - return 0, err - } - return bytesToItemIndex(val) -} - -// Consume applies the provided function on the head of queue. -// The call blocks until there is an item available or the queue is stopped. -// The function returns true when an item is consumed or false if the queue is stopped. -func (pq *persistentQueue[T]) Consume(consumeFunc func(context.Context, T) error) bool { - for { - var ( - req T - onProcessingFinished func(error) - consumed bool - ) - - // If we are stopped we still process all the other events in the channel before, but we - // return fast in the `getNextItem`, so we will free the channel fast and get to the stop. - _, ok := pq.sizedChannel.pop(func(permanentQueueEl) int64 { - req, onProcessingFinished, consumed = pq.getNextItem(context.Background()) - if !consumed { - return 0 - } - return pq.set.Sizer.Sizeof(req) - }) - if !ok { - return false - } - if consumed { - onProcessingFinished(consumeFunc(context.Background(), req)) - return true - } - } -} - -func (pq *persistentQueue[T]) Shutdown(ctx context.Context) error { - // If the queue is not initialized, there is nothing to shut down. - if pq.client == nil { - return nil - } - - pq.mu.Lock() - defer pq.mu.Unlock() - backupErr := pq.backupQueueSize(ctx) - pq.sizedChannel.shutdown() - // Mark this queue as stopped, so consumer don't start any more work. - pq.stopped = true - return multierr.Combine(backupErr, pq.unrefClient(ctx)) -} - -// backupQueueSize writes the current queue size to storage. The value is used to recover the queue size -// in case if the collector is killed. -func (pq *persistentQueue[T]) backupQueueSize(ctx context.Context) error { - // No need to write the queue size if the queue is sized by the number of requests. - // That information is already stored as difference between read and write indexes. - if pq.isRequestSized { - return nil - } - - return pq.client.Set(ctx, queueSizeKey, itemIndexToBytes(uint64(pq.Size()))) -} - -// unrefClient unrefs the client, and closes if no more references. Callers MUST hold the mutex. -// This is needed because consumers of the queue may still process the requests while the queue is shutting down or immediately after. -func (pq *persistentQueue[T]) unrefClient(ctx context.Context) error { - pq.refClient-- - if pq.refClient == 0 { - return pq.client.Close(ctx) - } - return nil -} - -// Offer inserts the specified element into this queue if it is possible to do so immediately -// without violating capacity restrictions. If success returns no error. -// It returns ErrQueueIsFull if no space is currently available. -func (pq *persistentQueue[T]) Offer(ctx context.Context, req T) error { - pq.mu.Lock() - defer pq.mu.Unlock() - return pq.putInternal(ctx, req) -} - -// putInternal is the internal version that requires caller to hold the mutex lock. -func (pq *persistentQueue[T]) putInternal(ctx context.Context, req T) error { - err := pq.sizedChannel.push(permanentQueueEl{}, pq.set.Sizer.Sizeof(req), func() error { - itemKey := getItemKey(pq.writeIndex) - newIndex := pq.writeIndex + 1 - - reqBuf, err := pq.set.Marshaler(req) - if err != nil { - return err - } - - // Carry out a transaction where we both add the item and update the write index - ops := []storage.Operation{ - storage.SetOperation(writeIndexKey, itemIndexToBytes(newIndex)), - storage.SetOperation(itemKey, reqBuf), - } - if storageErr := pq.client.Batch(ctx, ops...); storageErr != nil { - return storageErr - } - - pq.writeIndex = newIndex - return nil - }) - if err != nil { - return err - } - - // Back up the queue size to storage every 10 writes. The stored value is used to recover the queue size - // in case if the collector is killed. The recovered queue size is allowed to be inaccurate. - if (pq.writeIndex % 10) == 5 { - if err := pq.backupQueueSize(ctx); err != nil { - pq.logger.Error("Error writing queue size to storage", zap.Error(err)) - } - } - - return nil -} - -// getNextItem pulls the next available item from the persistent storage along with a callback function that should be -// called after the item is processed to clean up the storage. If no new item is available, returns false. -func (pq *persistentQueue[T]) getNextItem(ctx context.Context) (T, func(error), bool) { - pq.mu.Lock() - defer pq.mu.Unlock() - - var request T - - if pq.stopped { - return request, nil, false - } - - if pq.readIndex == pq.writeIndex { - return request, nil, false - } - - index := pq.readIndex - // Increase here, so even if errors happen below, it always iterates - pq.readIndex++ - pq.currentlyDispatchedItems = append(pq.currentlyDispatchedItems, index) - getOp := storage.GetOperation(getItemKey(index)) - err := pq.client.Batch(ctx, - storage.SetOperation(readIndexKey, itemIndexToBytes(pq.readIndex)), - storage.SetOperation(currentlyDispatchedItemsKey, itemIndexArrayToBytes(pq.currentlyDispatchedItems)), - getOp) - - if err == nil { - request, err = pq.set.Unmarshaler(getOp.Value) - } - - if err != nil { - pq.logger.Debug("Failed to dispatch item", zap.Error(err)) - // We need to make sure that currently dispatched items list is cleaned - if err = pq.itemDispatchingFinish(ctx, index); err != nil { - pq.logger.Error("Error deleting item from queue", zap.Error(err)) - } - - return request, nil, false - } - - // Increase the reference count, so the client is not closed while the request is being processed. - // The client cannot be closed because we hold the lock since last we checked `stopped`. - pq.refClient++ - return request, func(consumeErr error) { - // Delete the item from the persistent storage after it was processed. - pq.mu.Lock() - // Always unref client even if the consumer is shutdown because we always ref it for every valid request. - defer func() { - if err = pq.unrefClient(ctx); err != nil { - pq.logger.Error("Error closing the storage client", zap.Error(err)) - } - pq.mu.Unlock() - }() - - if experr.IsShutdownErr(consumeErr) { - // The queue is shutting down, don't mark the item as dispatched, so it's picked up again after restart. - // TODO: Handle partially delivered requests by updating their values in the storage. - return - } - - if err = pq.itemDispatchingFinish(ctx, index); err != nil { - pq.logger.Error("Error deleting item from queue", zap.Error(err)) - } - - // Back up the queue size to storage on every 10 reads. The stored value is used to recover the queue size - // in case if the collector is killed. The recovered queue size is allowed to be inaccurate. - if (pq.readIndex % 10) == 0 { - if qsErr := pq.backupQueueSize(ctx); qsErr != nil { - pq.logger.Error("Error writing queue size to storage", zap.Error(err)) - } - } - - // Ensure the used size and the channel size are in sync. - pq.sizedChannel.syncSize() - - }, true -} - -// retrieveAndEnqueueNotDispatchedReqs gets the items for which sending was not finished, cleans the storage -// and moves the items at the back of the queue. -func (pq *persistentQueue[T]) retrieveAndEnqueueNotDispatchedReqs(ctx context.Context) { - var dispatchedItems []uint64 - - pq.mu.Lock() - defer pq.mu.Unlock() - pq.logger.Debug("Checking if there are items left for dispatch by consumers") - itemKeysBuf, err := pq.client.Get(ctx, currentlyDispatchedItemsKey) - if err == nil { - dispatchedItems, err = bytesToItemIndexArray(itemKeysBuf) - } - if err != nil { - pq.logger.Error("Could not fetch items left for dispatch by consumers", zap.Error(err)) - return - } - - if len(dispatchedItems) == 0 { - pq.logger.Debug("No items left for dispatch by consumers") - return - } - - pq.logger.Info("Fetching items left for dispatch by consumers", zap.Int(zapNumberOfItems, - len(dispatchedItems))) - retrieveBatch := make([]storage.Operation, len(dispatchedItems)) - cleanupBatch := make([]storage.Operation, len(dispatchedItems)) - for i, it := range dispatchedItems { - key := getItemKey(it) - retrieveBatch[i] = storage.GetOperation(key) - cleanupBatch[i] = storage.DeleteOperation(key) - } - retrieveErr := pq.client.Batch(ctx, retrieveBatch...) - cleanupErr := pq.client.Batch(ctx, cleanupBatch...) - - if cleanupErr != nil { - pq.logger.Debug("Failed cleaning items left by consumers", zap.Error(cleanupErr)) - } - - if retrieveErr != nil { - pq.logger.Warn("Failed retrieving items left by consumers", zap.Error(retrieveErr)) - return - } - - errCount := 0 - for _, op := range retrieveBatch { - if op.Value == nil { - pq.logger.Warn("Failed retrieving item", zap.String(zapKey, op.Key), zap.Error(errValueNotSet)) - continue - } - req, err := pq.set.Unmarshaler(op.Value) - // If error happened or item is nil, it will be efficiently ignored - if err != nil { - pq.logger.Warn("Failed unmarshalling item", zap.String(zapKey, op.Key), zap.Error(err)) - continue - } - if pq.putInternal(ctx, req) != nil { - errCount++ - } - } - - if errCount > 0 { - pq.logger.Error("Errors occurred while moving items for dispatching back to queue", - zap.Int(zapNumberOfItems, len(retrieveBatch)), zap.Int(zapErrorCount, errCount)) - } else { - pq.logger.Info("Moved items for dispatching back to queue", - zap.Int(zapNumberOfItems, len(retrieveBatch))) - } -} - -// itemDispatchingFinish removes the item from the list of currently dispatched items and deletes it from the persistent queue -func (pq *persistentQueue[T]) itemDispatchingFinish(ctx context.Context, index uint64) error { - lenCDI := len(pq.currentlyDispatchedItems) - for i := 0; i < lenCDI; i++ { - if pq.currentlyDispatchedItems[i] == index { - pq.currentlyDispatchedItems[i] = pq.currentlyDispatchedItems[lenCDI-1] - pq.currentlyDispatchedItems = pq.currentlyDispatchedItems[:lenCDI-1] - break - } - } - - setOp := storage.SetOperation(currentlyDispatchedItemsKey, itemIndexArrayToBytes(pq.currentlyDispatchedItems)) - deleteOp := storage.DeleteOperation(getItemKey(index)) - if err := pq.client.Batch(ctx, setOp, deleteOp); err != nil { - // got an error, try to gracefully handle it - pq.logger.Warn("Failed updating currently dispatched items, trying to delete the item first", - zap.Error(err)) - } else { - // Everything ok, exit - return nil - } - - if err := pq.client.Batch(ctx, deleteOp); err != nil { - // Return an error here, as this indicates an issue with the underlying storage medium - return fmt.Errorf("failed deleting item from queue, got error from storage: %w", err) - } - - if err := pq.client.Batch(ctx, setOp); err != nil { - // even if this fails, we still have the right dispatched items in memory - // at worst, we'll have the wrong list in storage, and we'll discard the nonexistent items during startup - return fmt.Errorf("failed updating currently dispatched items, but deleted item successfully: %w", err) - } - - return nil -} - -func toStorageClient(ctx context.Context, storageID component.ID, host component.Host, ownerID component.ID, signal component.DataType) (storage.Client, error) { - ext, found := host.GetExtensions()[storageID] - if !found { - return nil, errNoStorageClient - } - - storageExt, ok := ext.(storage.Extension) - if !ok { - return nil, errWrongExtensionType - } - - return storageExt.GetClient(ctx, component.KindExporter, ownerID, signal.String()) -} - -func getItemKey(index uint64) string { - return strconv.FormatUint(index, 10) -} - -func itemIndexToBytes(value uint64) []byte { - return binary.LittleEndian.AppendUint64([]byte{}, value) -} - -func bytesToItemIndex(buf []byte) (uint64, error) { - if buf == nil { - return uint64(0), errValueNotSet - } - // The sizeof uint64 in binary is 8. - if len(buf) < 8 { - return 0, errInvalidValue - } - return binary.LittleEndian.Uint64(buf), nil -} - -func itemIndexArrayToBytes(arr []uint64) []byte { - size := len(arr) - buf := make([]byte, 0, 4+size*8) - buf = binary.LittleEndian.AppendUint32(buf, uint32(size)) - for _, item := range arr { - buf = binary.LittleEndian.AppendUint64(buf, item) - } - return buf -} - -func bytesToItemIndexArray(buf []byte) ([]uint64, error) { - if len(buf) == 0 { - return nil, nil - } - - // The sizeof uint32 in binary is 4. - if len(buf) < 4 { - return nil, errInvalidValue - } - size := int(binary.LittleEndian.Uint32(buf)) - if size == 0 { - return nil, nil - } - - buf = buf[4:] - // The sizeof uint64 in binary is 8, so we need to have size*8 bytes. - if len(buf) < size*8 { - return nil, errInvalidValue - } - - val := make([]uint64, size) - for i := 0; i < size; i++ { - val[i] = binary.LittleEndian.Uint64(buf) - buf = buf[8:] - } - return val, nil -} diff --git a/vendor/go.opentelemetry.io/collector/exporter/internal/queue/queue.go b/vendor/go.opentelemetry.io/collector/exporter/internal/queue/queue.go deleted file mode 100644 index 35bc504579e..00000000000 --- a/vendor/go.opentelemetry.io/collector/exporter/internal/queue/queue.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright The OpenTelemetry Authors -// Copyright (c) 2019 The Jaeger Authors. -// Copyright (c) 2017 Uber Technologies, Inc. -// SPDX-License-Identifier: Apache-2.0 - -package queue // import "go.opentelemetry.io/collector/exporter/internal/queue" - -import ( - "context" - "errors" - - "go.opentelemetry.io/collector/component" -) - -var ( - // ErrQueueIsFull is the error returned when an item is offered to the Queue and the queue is full. - ErrQueueIsFull = errors.New("sending queue is full") -) - -// Queue defines a producer-consumer exchange which can be backed by e.g. the memory-based ring buffer queue -// (boundedMemoryQueue) or via a disk-based queue (persistentQueue) -type Queue[T any] interface { - component.Component - // Offer inserts the specified element into this queue if it is possible to do so immediately - // without violating capacity restrictions. If success returns no error. - // It returns ErrQueueIsFull if no space is currently available. - Offer(ctx context.Context, item T) error - // Consume applies the provided function on the head of queue. - // The call blocks until there is an item available or the queue is stopped. - // The function returns true when an item is consumed or false if the queue is stopped. - Consume(func(ctx context.Context, item T) error) bool - // Size returns the current Size of the queue - Size() int - // Capacity returns the capacity of the queue. - Capacity() int -} - -type itemsCounter interface { - ItemsCount() int -} - -// Sizer is an interface that returns the size of the given element. -type Sizer[T any] interface { - Sizeof(T) int64 -} - -// ItemsSizer is a Sizer implementation that returns the size of a queue element as the number of items it contains. -type ItemsSizer[T itemsCounter] struct{} - -func (is *ItemsSizer[T]) Sizeof(el T) int64 { - return int64(el.ItemsCount()) -} - -// RequestSizer is a Sizer implementation that returns the size of a queue element as one request. -type RequestSizer[T any] struct{} - -func (rs *RequestSizer[T]) Sizeof(T) int64 { - return 1 -} diff --git a/vendor/go.opentelemetry.io/collector/exporter/internal/queue/sized_channel.go b/vendor/go.opentelemetry.io/collector/exporter/internal/queue/sized_channel.go deleted file mode 100644 index 1702a38ac2f..00000000000 --- a/vendor/go.opentelemetry.io/collector/exporter/internal/queue/sized_channel.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package queue // import "go.opentelemetry.io/collector/exporter/internal/queue" - -import "sync/atomic" - -// sizedChannel is a channel wrapper for sized elements with a capacity set to a total size of all the elements. -// The channel will accept elements until the total size of the elements reaches the capacity. -type sizedChannel[T any] struct { - used *atomic.Int64 - - // We need to store the capacity in a separate field because the capacity of the channel can be higher. - // It happens when we restore a persistent queue from a disk that is bigger than the pre-configured capacity. - cap int64 - ch chan T -} - -// newSizedChannel creates a sized elements channel. Each element is assigned a size by the provided sizer. -// chanCapacity is the capacity of the underlying channel which usually should be equal to the capacity of the queue to -// avoid blocking the producer. Optionally, the channel can be preloaded with the elements and their total size. -func newSizedChannel[T any](capacity int64, els []T, totalSize int64) *sizedChannel[T] { - used := &atomic.Int64{} - used.Store(totalSize) - - chCap := capacity - if chCap < int64(len(els)) { - chCap = int64(len(els)) - } - - ch := make(chan T, chCap) - for _, el := range els { - ch <- el - } - - return &sizedChannel[T]{ - used: used, - cap: capacity, - ch: ch, - } -} - -// push puts the element into the queue with the given sized if there is enough capacity. -// Returns an error if the queue is full. The callback is called before the element is committed to the queue. -// If the callback returns an error, the element is not put into the queue and the error is returned. -// The size is the size of the element MUST be positive. -func (vcq *sizedChannel[T]) push(el T, size int64, callback func() error) error { - if vcq.used.Add(size) > vcq.cap { - vcq.used.Add(-size) - return ErrQueueIsFull - } - if callback != nil { - if err := callback(); err != nil { - vcq.used.Add(-size) - return err - } - } - vcq.ch <- el - return nil -} - -// pop removes the element from the queue and returns it. -// The call blocks until there is an item available or the queue is stopped. -// The function returns true when an item is consumed or false if the queue is stopped and emptied. -// The callback is called before the element is removed from the queue. It must return the size of the element. -func (vcq *sizedChannel[T]) pop(callback func(T) (size int64)) (T, bool) { - el, ok := <-vcq.ch - if !ok { - return el, false - } - - size := callback(el) - - // The used size and the channel size might be not in sync with the queue in case it's restored from the disk - // because we don't flush the current queue size on the disk on every read/write. - // In that case we need to make sure it doesn't go below 0. - if vcq.used.Add(-size) < 0 { - vcq.used.Store(0) - } - return el, true -} - -// syncSize updates the used size to 0 if the queue is empty. -// The caller must ensure that this call is not called concurrently with push. -// It's used by the persistent queue to ensure the used value correctly reflects the reality which may not be always -// the case in case if the queue size is restored from the disk after a crash. -func (vcq *sizedChannel[T]) syncSize() { - if len(vcq.ch) == 0 { - vcq.used.Store(0) - } -} - -// shutdown closes the queue channel to initiate draining of the queue. -func (vcq *sizedChannel[T]) shutdown() { - close(vcq.ch) -} - -func (vcq *sizedChannel[T]) Size() int { - return int(vcq.used.Load()) -} - -func (vcq *sizedChannel[T]) Capacity() int { - return int(vcq.cap) -} diff --git a/vendor/go.opentelemetry.io/collector/exporter/internal/request.go b/vendor/go.opentelemetry.io/collector/exporter/internal/request.go new file mode 100644 index 00000000000..88a914b9e36 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/internal/request.go @@ -0,0 +1,45 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/exporter/internal" + +import ( + "context" + + "go.opentelemetry.io/collector/exporter/exporterbatcher" +) + +// Request represents a single request that can be sent to an external endpoint. +// Experimental: This API is at the early stage of development and may change without backward compatibility +// until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. +type Request interface { + // Export exports the request to an external endpoint. + Export(ctx context.Context) error + // ItemsCount returns a number of basic items in the request where item is the smallest piece of data that can be + // sent. For example, for OTLP exporter, this value represents the number of spans, + // metric data points or log records. + ItemsCount() int + // MergeSplit is a function that merge and/or splits this request with another one into multiple requests based on the + // configured limit provided in MaxSizeConfig. + // MergeSplit does not split if all fields in MaxSizeConfig are not initialized (zero). + // All the returned requests MUST have a number of items that does not exceed the maximum number of items. + // Size of the last returned request MUST be less or equal than the size of any other returned request. + // The original request MUST not be mutated if error is returned after mutation or if the exporter is + // marked as not mutable. The length of the returned slice MUST not be 0. + // Experimental: This API is at the early stage of development and may change without backward compatibility + // until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. + MergeSplit(context.Context, exporterbatcher.MaxSizeConfig, Request) ([]Request, error) +} + +// RequestErrorHandler is an optional interface that can be implemented by Request to provide a way handle partial +// temporary failures. For example, if some items failed to process and can be retried, this interface allows to +// return a new Request that contains the items left to be sent. Otherwise, the original Request should be returned. +// If not implemented, the original Request will be returned assuming the error is applied to the whole Request. +// Experimental: This API is at the early stage of development and may change without backward compatibility +// until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. +type RequestErrorHandler interface { + Request + // OnError returns a new Request may contain the items left to be sent if some items failed to process and can be retried. + // Otherwise, it should return the original Request. + OnError(error) Request +} diff --git a/vendor/go.opentelemetry.io/collector/exporter/internal/requesttest/fake_request.go b/vendor/go.opentelemetry.io/collector/exporter/internal/requesttest/fake_request.go new file mode 100644 index 00000000000..d5e8e511f2c --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/internal/requesttest/fake_request.go @@ -0,0 +1,122 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package requesttest // import "go.opentelemetry.io/collector/exporter/internal/requesttest" + +import ( + "context" + "sync/atomic" + "time" + + "go.opentelemetry.io/collector/exporter/exporterbatcher" + "go.opentelemetry.io/collector/exporter/internal" +) + +type Sink struct { + requestsCount *atomic.Int64 + itemsCount *atomic.Int64 +} + +func (s *Sink) RequestsCount() int64 { + return s.requestsCount.Load() +} + +func (s *Sink) ItemsCount() int64 { + return s.itemsCount.Load() +} + +func NewSink() *Sink { + return &Sink{ + requestsCount: new(atomic.Int64), + itemsCount: new(atomic.Int64), + } +} + +type FakeRequest struct { + Items int + Sink *Sink + ExportErr error + MergeErr error + Delay time.Duration +} + +func (r *FakeRequest) Export(ctx context.Context) error { + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(r.Delay): + } + if r.ExportErr != nil { + return r.ExportErr + } + if r.Sink != nil { + r.Sink.requestsCount.Add(1) + r.Sink.itemsCount.Add(int64(r.Items)) + } + return nil +} + +func (r *FakeRequest) ItemsCount() int { + return r.Items +} + +func (r *FakeRequest) MergeSplit(_ context.Context, cfg exporterbatcher.MaxSizeConfig, r2 internal.Request) ([]internal.Request, error) { + if r.MergeErr != nil { + return nil, r.MergeErr + } + + maxItems := cfg.MaxSizeItems + if maxItems == 0 { + fr2 := r2.(*FakeRequest) + if fr2.MergeErr != nil { + return nil, fr2.MergeErr + } + return []internal.Request{ + &FakeRequest{ + Items: r.Items + fr2.Items, + Sink: r.Sink, + ExportErr: fr2.ExportErr, + Delay: r.Delay + fr2.Delay, + }, + }, nil + } + + var fr2 *FakeRequest + if r2 == nil { + fr2 = &FakeRequest{Sink: r.Sink, ExportErr: r.ExportErr, Delay: r.Delay} + } else { + if r2.(*FakeRequest).MergeErr != nil { + return nil, r2.(*FakeRequest).MergeErr + } + fr2 = r2.(*FakeRequest) + fr2 = &FakeRequest{Items: fr2.Items, Sink: fr2.Sink, ExportErr: fr2.ExportErr, Delay: fr2.Delay} + } + var res []internal.Request + + // fill fr1 to maxItems if it's not nil + + r = &FakeRequest{Items: r.Items, Sink: r.Sink, ExportErr: r.ExportErr, Delay: r.Delay} + if fr2.Items <= maxItems-r.Items { + r.Items += fr2.Items + if fr2.ExportErr != nil { + r.ExportErr = fr2.ExportErr + } + return []internal.Request{r}, nil + } + // if split is needed, we don't propagate ExportErr from fr2 to fr1 to test more cases + fr2.Items -= maxItems - r.Items + r.Items = maxItems + res = append(res, r) + + // split fr2 to maxItems + for { + if fr2.Items <= maxItems { + res = append(res, &FakeRequest{Items: fr2.Items, Sink: fr2.Sink, ExportErr: fr2.ExportErr, Delay: fr2.Delay}) + break + } + res = append(res, &FakeRequest{Items: maxItems, Sink: fr2.Sink, ExportErr: fr2.ExportErr, Delay: fr2.Delay}) + fr2.Items -= maxItems + } + + return res, nil +} diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/README.md b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/README.md index 8926a14369b..bcee108135f 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/README.md +++ b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/README.md @@ -3,15 +3,19 @@ | Status | | | ------------- |-----------| -| Stability | [beta]: logs | +| Stability | [development]: profiles | +| | [beta]: logs | | | [stable]: traces, metrics | -| Distributions | [core], [contrib] | -| Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Aexporter%2Fotlp%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Aexporter%2Fotlp) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Aexporter%2Fotlp%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Aexporter%2Fotlp) | +| Distributions | [core], [contrib], [k8s], [otlp] | +| Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector?query=is%3Aissue%20is%3Aopen%20label%3Aexporter%2Fotlp%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector/issues?q=is%3Aopen+is%3Aissue+label%3Aexporter%2Fotlp) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector?query=is%3Aissue%20is%3Aclosed%20label%3Aexporter%2Fotlp%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector/issues?q=is%3Aclosed+is%3Aissue+label%3Aexporter%2Fotlp) | -[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta -[stable]: https://github.com/open-telemetry/opentelemetry-collector#stable +[development]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#development +[beta]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#beta +[stable]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#stable [core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol [contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib +[k8s]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-k8s +[otlp]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-otlp Export data via gRPC using [OTLP]( @@ -58,4 +62,4 @@ Several helper files are leveraged to provide additional capabilities automatica - [gRPC settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configgrpc/README.md) - [TLS and mTLS settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md) -- [Queuing, retry and timeout settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md) +- [Queuing, batching, retry and timeout settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md) diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/cfg-schema.yaml b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/cfg-schema.yaml index 073ec5707ee..a57fd2fd230 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/cfg-schema.yaml +++ b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/cfg-schema.yaml @@ -7,7 +7,7 @@ fields: doc: | Timeout is the timeout for every attempt to send data to the backend. - name: sending_queue - type: exporterhelper.QueueSettings + type: exporterhelper.QueueConfig kind: struct fields: - name: enabled @@ -68,8 +68,8 @@ fields: - name: endpoint kind: string doc: | - The target to which the exporter is going to send traces or metrics, - using the gRPC protocol. The valid syntax is described at + The target to which the exporter is going to send traces, metrics, logs or + profiles using the gRPC protocol. The valid syntax is described at https://github.com/grpc/grpc/blob/master/doc/naming.md. - name: compression kind: string diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/config.go b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/config.go index 62956f293ed..b9697125377 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/config.go +++ b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/config.go @@ -7,20 +7,26 @@ import ( "errors" "fmt" "net" + "regexp" "strconv" "strings" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configgrpc" "go.opentelemetry.io/collector/config/configretry" + "go.opentelemetry.io/collector/exporter/exporterbatcher" "go.opentelemetry.io/collector/exporter/exporterhelper" ) // Config defines configuration for OTLP exporter. type Config struct { - exporterhelper.TimeoutSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. - QueueConfig exporterhelper.QueueSettings `mapstructure:"sending_queue"` - RetryConfig configretry.BackOffConfig `mapstructure:"retry_on_failure"` + exporterhelper.TimeoutConfig `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. + exporterhelper.QueueConfig `mapstructure:"sending_queue"` + RetryConfig configretry.BackOffConfig `mapstructure:"retry_on_failure"` + + // Experimental: This configuration is at the early stage of development and may change without backward compatibility + // until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved + BatcherConfig exporterbatcher.Config `mapstructure:"batcher"` configgrpc.ClientConfig `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. } @@ -49,8 +55,9 @@ func (c *Config) sanitizedEndpoint() string { return strings.TrimPrefix(c.Endpoint, "http://") case strings.HasPrefix(c.Endpoint, "https://"): return strings.TrimPrefix(c.Endpoint, "https://") - case strings.HasPrefix(c.Endpoint, "dns:///"): - return strings.TrimPrefix(c.Endpoint, "dns:///") + case strings.HasPrefix(c.Endpoint, "dns://"): + r := regexp.MustCompile("^dns://[/]?") + return r.ReplaceAllString(c.Endpoint, "") default: return c.Endpoint } diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/doc.go b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/doc.go index 3f09a753285..9f87b392974 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/doc.go +++ b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/doc.go @@ -3,5 +3,5 @@ //go:generate mdatagen metadata.yaml -// Package otlpexporter exports data by using the OTLP format to a gPRC endpoint. +// Package otlpexporter exports data by using the OTLP format to a gRPC endpoint. package otlpexporter // import "go.opentelemetry.io/collector/exporter/otlpexporter" diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/factory.go b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/factory.go index 8a72aab8cdc..52133573de0 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/factory.go +++ b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/factory.go @@ -13,26 +13,34 @@ import ( "go.opentelemetry.io/collector/config/configretry" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exporterbatcher" "go.opentelemetry.io/collector/exporter/exporterhelper" + "go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper" "go.opentelemetry.io/collector/exporter/otlpexporter/internal/metadata" + "go.opentelemetry.io/collector/exporter/xexporter" ) // NewFactory creates a factory for OTLP exporter. func NewFactory() exporter.Factory { - return exporter.NewFactory( + return xexporter.NewFactory( metadata.Type, createDefaultConfig, - exporter.WithTraces(createTracesExporter, metadata.TracesStability), - exporter.WithMetrics(createMetricsExporter, metadata.MetricsStability), - exporter.WithLogs(createLogsExporter, metadata.LogsStability), + xexporter.WithTraces(createTraces, metadata.TracesStability), + xexporter.WithMetrics(createMetrics, metadata.MetricsStability), + xexporter.WithLogs(createLogs, metadata.LogsStability), + xexporter.WithProfiles(createProfilesExporter, metadata.ProfilesStability), ) } func createDefaultConfig() component.Config { + batcherCfg := exporterbatcher.NewDefaultConfig() + batcherCfg.Enabled = false + return &Config{ - TimeoutSettings: exporterhelper.NewDefaultTimeoutSettings(), - RetryConfig: configretry.NewDefaultBackOffConfig(), - QueueConfig: exporterhelper.NewDefaultQueueSettings(), + TimeoutConfig: exporterhelper.NewDefaultTimeoutConfig(), + RetryConfig: configretry.NewDefaultBackOffConfig(), + QueueConfig: exporterhelper.NewDefaultQueueConfig(), + BatcherConfig: batcherCfg, ClientConfig: configgrpc.ClientConfig{ Headers: map[string]configopaque.String{}, // Default to gzip compression @@ -43,54 +51,77 @@ func createDefaultConfig() component.Config { } } -func createTracesExporter( +func createTraces( ctx context.Context, - set exporter.CreateSettings, + set exporter.Settings, cfg component.Config, ) (exporter.Traces, error) { oce := newExporter(cfg, set) oCfg := cfg.(*Config) - return exporterhelper.NewTracesExporter(ctx, set, cfg, + return exporterhelper.NewTraces(ctx, set, cfg, oce.pushTraces, exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), - exporterhelper.WithTimeout(oCfg.TimeoutSettings), + exporterhelper.WithTimeout(oCfg.TimeoutConfig), exporterhelper.WithRetry(oCfg.RetryConfig), exporterhelper.WithQueue(oCfg.QueueConfig), + exporterhelper.WithBatcher(oCfg.BatcherConfig), exporterhelper.WithStart(oce.start), - exporterhelper.WithShutdown(oce.shutdown)) + exporterhelper.WithShutdown(oce.shutdown), + ) } -func createMetricsExporter( +func createMetrics( ctx context.Context, - set exporter.CreateSettings, + set exporter.Settings, cfg component.Config, ) (exporter.Metrics, error) { oce := newExporter(cfg, set) oCfg := cfg.(*Config) - return exporterhelper.NewMetricsExporter(ctx, set, cfg, + return exporterhelper.NewMetrics(ctx, set, cfg, oce.pushMetrics, exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), - exporterhelper.WithTimeout(oCfg.TimeoutSettings), + exporterhelper.WithTimeout(oCfg.TimeoutConfig), exporterhelper.WithRetry(oCfg.RetryConfig), exporterhelper.WithQueue(oCfg.QueueConfig), + exporterhelper.WithBatcher(oCfg.BatcherConfig), exporterhelper.WithStart(oce.start), exporterhelper.WithShutdown(oce.shutdown), ) } -func createLogsExporter( +func createLogs( ctx context.Context, - set exporter.CreateSettings, + set exporter.Settings, cfg component.Config, ) (exporter.Logs, error) { oce := newExporter(cfg, set) oCfg := cfg.(*Config) - return exporterhelper.NewLogsExporter(ctx, set, cfg, + return exporterhelper.NewLogs(ctx, set, cfg, oce.pushLogs, exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), - exporterhelper.WithTimeout(oCfg.TimeoutSettings), + exporterhelper.WithTimeout(oCfg.TimeoutConfig), + exporterhelper.WithRetry(oCfg.RetryConfig), + exporterhelper.WithQueue(oCfg.QueueConfig), + exporterhelper.WithBatcher(oCfg.BatcherConfig), + exporterhelper.WithStart(oce.start), + exporterhelper.WithShutdown(oce.shutdown), + ) +} + +func createProfilesExporter( + ctx context.Context, + set exporter.Settings, + cfg component.Config, +) (xexporter.Profiles, error) { + oce := newExporter(cfg, set) + oCfg := cfg.(*Config) + return xexporterhelper.NewProfilesExporter(ctx, set, cfg, + oce.pushProfiles, + exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), + exporterhelper.WithTimeout(oCfg.TimeoutConfig), exporterhelper.WithRetry(oCfg.RetryConfig), exporterhelper.WithQueue(oCfg.QueueConfig), + exporterhelper.WithBatcher(oCfg.BatcherConfig), exporterhelper.WithStart(oce.start), exporterhelper.WithShutdown(oce.shutdown), ) diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/internal/metadata/generated_status.go b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/internal/metadata/generated_status.go index c9cff844fa2..82f0f448a86 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/internal/metadata/generated_status.go +++ b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/internal/metadata/generated_status.go @@ -7,11 +7,13 @@ import ( ) var ( - Type = component.MustNewType("otlp") + Type = component.MustNewType("otlp") + ScopeName = "go.opentelemetry.io/collector/exporter/otlpexporter" ) const ( - LogsStability = component.StabilityLevelBeta - TracesStability = component.StabilityLevelStable - MetricsStability = component.StabilityLevelStable + ProfilesStability = component.StabilityLevelDevelopment + LogsStability = component.StabilityLevelBeta + TracesStability = component.StabilityLevelStable + MetricsStability = component.StabilityLevelStable ) diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/metadata.yaml b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/metadata.yaml index f24e40e1f91..0c909a5496e 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/metadata.yaml +++ b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/metadata.yaml @@ -1,12 +1,14 @@ type: otlp +github_project: open-telemetry/opentelemetry-collector status: class: exporter stability: stable: [traces, metrics] beta: [logs] - distributions: [core, contrib] + development: [profiles] + distributions: [core, contrib, k8s, otlp] tests: config: - endpoint: otelcol:4317 \ No newline at end of file + endpoint: otelcol:4317 diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/otlp.go b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/otlp.go index 21864d7723a..20bf32f8411 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/otlp.go +++ b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/otlp.go @@ -17,6 +17,7 @@ import ( "google.golang.org/grpc/status" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configgrpc" "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/exporter" "go.opentelemetry.io/collector/exporter/exporterhelper" @@ -24,6 +25,8 @@ import ( "go.opentelemetry.io/collector/pdata/plog/plogotlp" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp" + "go.opentelemetry.io/collector/pdata/pprofile" + "go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp" "go.opentelemetry.io/collector/pdata/ptrace" "go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp" ) @@ -33,12 +36,13 @@ type baseExporter struct { config *Config // gRPC clients and connection. - traceExporter ptraceotlp.GRPCClient - metricExporter pmetricotlp.GRPCClient - logExporter plogotlp.GRPCClient - clientConn *grpc.ClientConn - metadata metadata.MD - callOptions []grpc.CallOption + traceExporter ptraceotlp.GRPCClient + metricExporter pmetricotlp.GRPCClient + logExporter plogotlp.GRPCClient + profileExporter pprofileotlp.GRPCClient + clientConn *grpc.ClientConn + metadata metadata.MD + callOptions []grpc.CallOption settings component.TelemetrySettings @@ -46,7 +50,7 @@ type baseExporter struct { userAgent string } -func newExporter(cfg component.Config, set exporter.CreateSettings) *baseExporter { +func newExporter(cfg component.Config, set exporter.Settings) *baseExporter { oCfg := cfg.(*Config) userAgent := fmt.Sprintf("%s/%s (%s/%s)", @@ -58,12 +62,14 @@ func newExporter(cfg component.Config, set exporter.CreateSettings) *baseExporte // start actually creates the gRPC connection. The client construction is deferred till this point as this // is the only place we get hold of Extensions which are required to construct auth round tripper. func (e *baseExporter) start(ctx context.Context, host component.Host) (err error) { - if e.clientConn, err = e.config.ClientConfig.ToClientConn(ctx, host, e.settings, grpc.WithUserAgent(e.userAgent)); err != nil { + agentOpt := configgrpc.WithGrpcDialOption(grpc.WithUserAgent(e.userAgent)) + if e.clientConn, err = e.config.ClientConfig.ToClientConn(ctx, host, e.settings, agentOpt); err != nil { return err } e.traceExporter = ptraceotlp.NewGRPCClient(e.clientConn) e.metricExporter = pmetricotlp.NewGRPCClient(e.clientConn) e.logExporter = plogotlp.NewGRPCClient(e.clientConn) + e.profileExporter = pprofileotlp.NewGRPCClient(e.clientConn) headers := map[string]string{} for k, v := range e.config.ClientConfig.Headers { headers[k] = string(v) @@ -131,6 +137,22 @@ func (e *baseExporter) pushLogs(ctx context.Context, ld plog.Logs) error { return nil } +func (e *baseExporter) pushProfiles(ctx context.Context, td pprofile.Profiles) error { + req := pprofileotlp.NewExportRequestFromProfiles(td) + resp, respErr := e.profileExporter.Export(e.enhanceContext(ctx), req, e.callOptions...) + if err := processError(respErr); err != nil { + return err + } + partialSuccess := resp.PartialSuccess() + if !(partialSuccess.ErrorMessage() == "" && partialSuccess.RejectedProfiles() == 0) { + e.settings.Logger.Warn("Partial success response", + zap.String("message", resp.PartialSuccess().ErrorMessage()), + zap.Int64("dropped_profiles", resp.PartialSuccess().RejectedProfiles()), + ) + } + return nil +} + func (e *baseExporter) enhanceContext(ctx context.Context) context.Context { if e.metadata.Len() > 0 { return metadata.NewOutgoingContext(ctx, e.metadata) @@ -151,8 +173,7 @@ func processError(err error) error { return nil } - // Now, this is this a real error. - + // Now, this is a real error. retryInfo := getRetryInfo(st) if !shouldRetry(st.Code(), retryInfo) { @@ -168,7 +189,6 @@ func processError(err error) error { } // Need to retry. - return err } diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/LICENSE b/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/Makefile b/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/Makefile new file mode 100644 index 00000000000..ded7a36092d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/README.md b/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/README.md new file mode 100644 index 00000000000..f4adf1731c7 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/README.md @@ -0,0 +1,73 @@ +# OTLP/HTTP Exporter + + +| Status | | +| ------------- |-----------| +| Stability | [development]: profiles | +| | [beta]: logs | +| | [stable]: traces, metrics | +| Distributions | [core], [contrib], [k8s], [otlp] | +| Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector?query=is%3Aissue%20is%3Aopen%20label%3Aexporter%2Fotlphttp%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector/issues?q=is%3Aopen+is%3Aissue+label%3Aexporter%2Fotlphttp) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector?query=is%3Aissue%20is%3Aclosed%20label%3Aexporter%2Fotlphttp%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector/issues?q=is%3Aclosed+is%3Aissue+label%3Aexporter%2Fotlphttp) | + +[development]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#development +[beta]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#beta +[stable]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#stable +[core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol +[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib +[k8s]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-k8s +[otlp]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-otlp + + +Export traces and/or metrics via HTTP using [OTLP]( +https://github.com/open-telemetry/opentelemetry-proto/blob/main/docs/specification.md) +format. + +The following settings are required: + +- `endpoint` (no default): The target base URL to send data to (e.g.: https://example.com:4318). + To send each signal a corresponding path will be added to this base URL, i.e. for traces + "/v1/traces" will appended, for metrics "/v1/metrics" will be appended, for logs + "/v1/logs" will be appended. + +The following settings can be optionally configured: + +- `traces_endpoint` (no default): The target URL to send trace data to (e.g.: https://example.com:4318/v1/traces). + If this setting is present the `endpoint` setting is ignored for traces. +- `metrics_endpoint` (no default): The target URL to send metric data to (e.g.: https://example.com:4318/v1/metrics). + If this setting is present the `endpoint` setting is ignored for metrics. +- `logs_endpoint` (no default): The target URL to send log data to (e.g.: https://example.com:4318/v1/logs). + If this setting is present the `endpoint` setting is ignored logs. +- `tls`: see [TLS Configuration Settings](../../config/configtls/README.md) for the full set of available options. +- `timeout` (default = 30s): HTTP request time limit. For details see https://golang.org/pkg/net/http/#Client +- `read_buffer_size` (default = 0): ReadBufferSize for HTTP client. +- `write_buffer_size` (default = 512 * 1024): WriteBufferSize for HTTP client. +- `encoding` (default = proto): The encoding to use for the messages (valid options: `proto`, `json`) + +Example: + +```yaml +exporters: + otlphttp: + endpoint: https://example.com:4318 +``` + +By default `gzip` compression is enabled. See [compression comparison](../../config/configgrpc/README.md#compression-comparison) for details benchmark information. To disable, configure as follows: + +```yaml +exporters: + otlphttp: + ... + compression: none +``` + +By default `proto` encoding is used, to change the content encoding of the message configure it as follows: + +```yaml +exporters: + otlphttp: + ... + encoding: json +``` + +The full list of settings exposed for this exporter are documented [here](./config.go) +with detailed sample configurations [here](./testdata/config.yaml). diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/config.go b/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/config.go new file mode 100644 index 00000000000..f3a70fa2337 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/config.go @@ -0,0 +1,73 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlphttpexporter // import "go.opentelemetry.io/collector/exporter/otlphttpexporter" + +import ( + "encoding" + "errors" + "fmt" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/config/configretry" + "go.opentelemetry.io/collector/exporter/exporterhelper" +) + +// EncodingType defines the type for content encoding +type EncodingType string + +const ( + EncodingProto EncodingType = "proto" + EncodingJSON EncodingType = "json" +) + +var _ encoding.TextUnmarshaler = (*EncodingType)(nil) + +// UnmarshalText unmarshalls text to an EncodingType. +func (e *EncodingType) UnmarshalText(text []byte) error { + if e == nil { + return errors.New("cannot unmarshal to a nil *EncodingType") + } + + str := string(text) + switch str { + case string(EncodingProto): + *e = EncodingProto + case string(EncodingJSON): + *e = EncodingJSON + default: + return fmt.Errorf("invalid encoding type: %s", str) + } + + return nil +} + +// Config defines configuration for OTLP/HTTP exporter. +type Config struct { + confighttp.ClientConfig `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. + exporterhelper.QueueConfig `mapstructure:"sending_queue"` + RetryConfig configretry.BackOffConfig `mapstructure:"retry_on_failure"` + + // The URL to send traces to. If omitted the Endpoint + "/v1/traces" will be used. + TracesEndpoint string `mapstructure:"traces_endpoint"` + + // The URL to send metrics to. If omitted the Endpoint + "/v1/metrics" will be used. + MetricsEndpoint string `mapstructure:"metrics_endpoint"` + + // The URL to send logs to. If omitted the Endpoint + "/v1/logs" will be used. + LogsEndpoint string `mapstructure:"logs_endpoint"` + + // The encoding to export telemetry (default: "proto") + Encoding EncodingType `mapstructure:"encoding"` +} + +var _ component.Config = (*Config)(nil) + +// Validate checks if the exporter configuration is valid +func (cfg *Config) Validate() error { + if cfg.Endpoint == "" && cfg.TracesEndpoint == "" && cfg.MetricsEndpoint == "" && cfg.LogsEndpoint == "" { + return errors.New("at least one endpoint must be specified") + } + return nil +} diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/doc.go b/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/doc.go new file mode 100644 index 00000000000..16eccd7cbd4 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/doc.go @@ -0,0 +1,7 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:generate mdatagen metadata.yaml + +// Package otlphttpexporter exports data by using the OTLP format to an HTTP endpoint. +package otlphttpexporter // import "go.opentelemetry.io/collector/exporter/otlphttpexporter" diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/factory.go b/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/factory.go new file mode 100644 index 00000000000..01fcc0436e4 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/factory.go @@ -0,0 +1,177 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlphttpexporter // import "go.opentelemetry.io/collector/exporter/otlphttpexporter" + +import ( + "context" + "fmt" + "net/url" + "strings" + "time" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configcompression" + "go.opentelemetry.io/collector/config/confighttp" + "go.opentelemetry.io/collector/config/configretry" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exporterhelper" + "go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper" + "go.opentelemetry.io/collector/exporter/otlphttpexporter/internal/metadata" + "go.opentelemetry.io/collector/exporter/xexporter" +) + +// NewFactory creates a factory for OTLP exporter. +func NewFactory() exporter.Factory { + return xexporter.NewFactory( + metadata.Type, + createDefaultConfig, + xexporter.WithTraces(createTraces, metadata.TracesStability), + xexporter.WithMetrics(createMetrics, metadata.MetricsStability), + xexporter.WithLogs(createLogs, metadata.LogsStability), + xexporter.WithProfiles(createProfiles, metadata.ProfilesStability), + ) +} + +func createDefaultConfig() component.Config { + clientConfig := confighttp.NewDefaultClientConfig() + clientConfig.Timeout = 30 * time.Second + // Default to gzip compression + clientConfig.Compression = configcompression.TypeGzip + // We almost read 0 bytes, so no need to tune ReadBufferSize. + clientConfig.WriteBufferSize = 512 * 1024 + + return &Config{ + RetryConfig: configretry.NewDefaultBackOffConfig(), + QueueConfig: exporterhelper.NewDefaultQueueConfig(), + Encoding: EncodingProto, + ClientConfig: clientConfig, + } +} + +// composeSignalURL composes the final URL for the signal (traces, metrics, logs) based on the configuration. +// oCfg is the configuration of the exporter. +// signalOverrideURL is the URL specified in the signal specific configuration (empty if not specified). +// signalName is the name of the signal, e.g. "traces", "metrics", "logs". +// signalVersion is the version of the signal, e.g. "v1" or "v1development". +func composeSignalURL(oCfg *Config, signalOverrideURL string, signalName string, signalVersion string) (string, error) { + switch { + case signalOverrideURL != "": + _, err := url.Parse(signalOverrideURL) + if err != nil { + return "", fmt.Errorf("%s_endpoint must be a valid URL", signalName) + } + return signalOverrideURL, nil + case oCfg.Endpoint == "": + return "", fmt.Errorf("either endpoint or %s_endpoint must be specified", signalName) + default: + if strings.HasSuffix(oCfg.Endpoint, "/") { + return oCfg.Endpoint + signalVersion + "/" + signalName, nil + } + return oCfg.Endpoint + "/" + signalVersion + "/" + signalName, nil + } +} + +func createTraces( + ctx context.Context, + set exporter.Settings, + cfg component.Config, +) (exporter.Traces, error) { + oce, err := newExporter(cfg, set) + if err != nil { + return nil, err + } + oCfg := cfg.(*Config) + + oce.tracesURL, err = composeSignalURL(oCfg, oCfg.TracesEndpoint, "traces", "v1") + if err != nil { + return nil, err + } + + return exporterhelper.NewTraces(ctx, set, cfg, + oce.pushTraces, + exporterhelper.WithStart(oce.start), + exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), + // explicitly disable since we rely on http.Client timeout logic. + exporterhelper.WithTimeout(exporterhelper.TimeoutConfig{Timeout: 0}), + exporterhelper.WithRetry(oCfg.RetryConfig), + exporterhelper.WithQueue(oCfg.QueueConfig)) +} + +func createMetrics( + ctx context.Context, + set exporter.Settings, + cfg component.Config, +) (exporter.Metrics, error) { + oce, err := newExporter(cfg, set) + if err != nil { + return nil, err + } + oCfg := cfg.(*Config) + + oce.metricsURL, err = composeSignalURL(oCfg, oCfg.MetricsEndpoint, "metrics", "v1") + if err != nil { + return nil, err + } + + return exporterhelper.NewMetrics(ctx, set, cfg, + oce.pushMetrics, + exporterhelper.WithStart(oce.start), + exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), + // explicitly disable since we rely on http.Client timeout logic. + exporterhelper.WithTimeout(exporterhelper.TimeoutConfig{Timeout: 0}), + exporterhelper.WithRetry(oCfg.RetryConfig), + exporterhelper.WithQueue(oCfg.QueueConfig)) +} + +func createLogs( + ctx context.Context, + set exporter.Settings, + cfg component.Config, +) (exporter.Logs, error) { + oce, err := newExporter(cfg, set) + if err != nil { + return nil, err + } + oCfg := cfg.(*Config) + oce.logsURL, err = composeSignalURL(oCfg, oCfg.LogsEndpoint, "logs", "v1") + if err != nil { + return nil, err + } + + return exporterhelper.NewLogs(ctx, set, cfg, + oce.pushLogs, + exporterhelper.WithStart(oce.start), + exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), + // explicitly disable since we rely on http.Client timeout logic. + exporterhelper.WithTimeout(exporterhelper.TimeoutConfig{Timeout: 0}), + exporterhelper.WithRetry(oCfg.RetryConfig), + exporterhelper.WithQueue(oCfg.QueueConfig)) +} + +func createProfiles( + ctx context.Context, + set exporter.Settings, + cfg component.Config, +) (xexporter.Profiles, error) { + oce, err := newExporter(cfg, set) + if err != nil { + return nil, err + } + oCfg := cfg.(*Config) + + oce.profilesURL, err = composeSignalURL(oCfg, "", "profiles", "v1development") + if err != nil { + return nil, err + } + + return xexporterhelper.NewProfilesExporter(ctx, set, cfg, + oce.pushProfiles, + exporterhelper.WithStart(oce.start), + exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), + // explicitly disable since we rely on http.Client timeout logic. + exporterhelper.WithTimeout(exporterhelper.TimeoutConfig{Timeout: 0}), + exporterhelper.WithRetry(oCfg.RetryConfig), + exporterhelper.WithQueue(oCfg.QueueConfig)) +} diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/internal/metadata/generated_status.go b/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/internal/metadata/generated_status.go new file mode 100644 index 00000000000..07bd3d1d7a6 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/internal/metadata/generated_status.go @@ -0,0 +1,19 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "go.opentelemetry.io/collector/component" +) + +var ( + Type = component.MustNewType("otlphttp") + ScopeName = "go.opentelemetry.io/collector/exporter/otlphttpexporter" +) + +const ( + ProfilesStability = component.StabilityLevelDevelopment + LogsStability = component.StabilityLevelBeta + TracesStability = component.StabilityLevelStable + MetricsStability = component.StabilityLevelStable +) diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/metadata.yaml b/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/metadata.yaml new file mode 100644 index 00000000000..5e3b73fd355 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/metadata.yaml @@ -0,0 +1,15 @@ +type: otlphttp +github_project: open-telemetry/opentelemetry-collector + +status: + class: exporter + stability: + stable: [traces, metrics] + beta: [logs] + development: [profiles] + distributions: [core, contrib, k8s, otlp] + +tests: + config: + endpoint: "https://1.2.3.4:1234" + diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/otlp.go b/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/otlp.go new file mode 100644 index 00000000000..5f8ef580ebe --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/otlp.go @@ -0,0 +1,448 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlphttpexporter // import "go.opentelemetry.io/collector/exporter/otlphttpexporter" + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "runtime" + "strconv" + "time" + + "go.uber.org/zap" + "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/protobuf/proto" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exporterhelper" + "go.opentelemetry.io/collector/internal/httphelper" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/plog/plogotlp" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp" + "go.opentelemetry.io/collector/pdata/pprofile" + "go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp" + "go.opentelemetry.io/collector/pdata/ptrace" + "go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp" +) + +type baseExporter struct { + // Input configuration. + config *Config + client *http.Client + tracesURL string + metricsURL string + logsURL string + profilesURL string + logger *zap.Logger + settings component.TelemetrySettings + // Default user-agent header. + userAgent string +} + +const ( + headerRetryAfter = "Retry-After" + maxHTTPResponseReadBytes = 64 * 1024 + + jsonContentType = "application/json" + protobufContentType = "application/x-protobuf" +) + +// Create new exporter. +func newExporter(cfg component.Config, set exporter.Settings) (*baseExporter, error) { + oCfg := cfg.(*Config) + + if oCfg.Endpoint != "" { + _, err := url.Parse(oCfg.Endpoint) + if err != nil { + return nil, errors.New("endpoint must be a valid URL") + } + } + + userAgent := fmt.Sprintf("%s/%s (%s/%s)", + set.BuildInfo.Description, set.BuildInfo.Version, runtime.GOOS, runtime.GOARCH) + + // client construction is deferred to start + return &baseExporter{ + config: oCfg, + logger: set.Logger, + userAgent: userAgent, + settings: set.TelemetrySettings, + }, nil +} + +// start actually creates the HTTP client. The client construction is deferred till this point as this +// is the only place we get hold of Extensions which are required to construct auth round tripper. +func (e *baseExporter) start(ctx context.Context, host component.Host) error { + client, err := e.config.ClientConfig.ToClient(ctx, host, e.settings) + if err != nil { + return err + } + e.client = client + return nil +} + +func (e *baseExporter) pushTraces(ctx context.Context, td ptrace.Traces) error { + tr := ptraceotlp.NewExportRequestFromTraces(td) + + var err error + var request []byte + switch e.config.Encoding { + case EncodingJSON: + request, err = tr.MarshalJSON() + case EncodingProto: + request, err = tr.MarshalProto() + default: + err = fmt.Errorf("invalid encoding: %s", e.config.Encoding) + } + + if err != nil { + return consumererror.NewPermanent(err) + } + + return e.export(ctx, e.tracesURL, request, e.tracesPartialSuccessHandler) +} + +func (e *baseExporter) pushMetrics(ctx context.Context, md pmetric.Metrics) error { + tr := pmetricotlp.NewExportRequestFromMetrics(md) + + var err error + var request []byte + switch e.config.Encoding { + case EncodingJSON: + request, err = tr.MarshalJSON() + case EncodingProto: + request, err = tr.MarshalProto() + default: + err = fmt.Errorf("invalid encoding: %s", e.config.Encoding) + } + + if err != nil { + return consumererror.NewPermanent(err) + } + return e.export(ctx, e.metricsURL, request, e.metricsPartialSuccessHandler) +} + +func (e *baseExporter) pushLogs(ctx context.Context, ld plog.Logs) error { + tr := plogotlp.NewExportRequestFromLogs(ld) + + var err error + var request []byte + switch e.config.Encoding { + case EncodingJSON: + request, err = tr.MarshalJSON() + case EncodingProto: + request, err = tr.MarshalProto() + default: + err = fmt.Errorf("invalid encoding: %s", e.config.Encoding) + } + + if err != nil { + return consumererror.NewPermanent(err) + } + + return e.export(ctx, e.logsURL, request, e.logsPartialSuccessHandler) +} + +func (e *baseExporter) pushProfiles(ctx context.Context, td pprofile.Profiles) error { + tr := pprofileotlp.NewExportRequestFromProfiles(td) + + var err error + var request []byte + switch e.config.Encoding { + case EncodingJSON: + request, err = tr.MarshalJSON() + case EncodingProto: + request, err = tr.MarshalProto() + default: + err = fmt.Errorf("invalid encoding: %s", e.config.Encoding) + } + + if err != nil { + return consumererror.NewPermanent(err) + } + + return e.export(ctx, e.profilesURL, request, e.profilesPartialSuccessHandler) +} + +func (e *baseExporter) export(ctx context.Context, url string, request []byte, partialSuccessHandler partialSuccessHandler) error { + e.logger.Debug("Preparing to make HTTP request", zap.String("url", url)) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(request)) + if err != nil { + return consumererror.NewPermanent(err) + } + + switch e.config.Encoding { + case EncodingJSON: + req.Header.Set("Content-Type", jsonContentType) + case EncodingProto: + req.Header.Set("Content-Type", protobufContentType) + default: + return fmt.Errorf("invalid encoding: %s", e.config.Encoding) + } + + req.Header.Set("User-Agent", e.userAgent) + + resp, err := e.client.Do(req) + if err != nil { + return fmt.Errorf("failed to make an HTTP request: %w", err) + } + + defer func() { + // Discard any remaining response body when we are done reading. + _, _ = io.CopyN(io.Discard, resp.Body, maxHTTPResponseReadBytes) + resp.Body.Close() + }() + + if resp.StatusCode >= 200 && resp.StatusCode <= 299 { + return handlePartialSuccessResponse(resp, partialSuccessHandler) + } + + respStatus := readResponseStatus(resp) + + // Format the error message. Use the status if it is present in the response. + var errString string + var formattedErr error + if respStatus != nil { + errString = fmt.Sprintf( + "error exporting items, request to %s responded with HTTP Status Code %d, Message=%s, Details=%v", + url, resp.StatusCode, respStatus.Message, respStatus.Details) + } else { + errString = fmt.Sprintf( + "error exporting items, request to %s responded with HTTP Status Code %d", + url, resp.StatusCode) + } + formattedErr = httphelper.NewStatusFromMsgAndHTTPCode(errString, resp.StatusCode).Err() + + if isRetryableStatusCode(resp.StatusCode) { + // A retry duration of 0 seconds will trigger the default backoff policy + // of our caller (retry handler). + retryAfter := 0 + + // Check if the server is overwhelmed. + // See spec https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/otlp.md#otlphttp-throttling + isThrottleError := resp.StatusCode == http.StatusTooManyRequests || resp.StatusCode == http.StatusServiceUnavailable + if val := resp.Header.Get(headerRetryAfter); isThrottleError && val != "" { + if seconds, err2 := strconv.Atoi(val); err2 == nil { + retryAfter = seconds + } + } + + return exporterhelper.NewThrottleRetry(formattedErr, time.Duration(retryAfter)*time.Second) + } + + return consumererror.NewPermanent(formattedErr) +} + +// Determine if the status code is retryable according to the specification. +// For more, see https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/otlp.md#failures-1 +func isRetryableStatusCode(code int) bool { + switch code { + case http.StatusTooManyRequests: + return true + case http.StatusBadGateway: + return true + case http.StatusServiceUnavailable: + return true + case http.StatusGatewayTimeout: + return true + default: + return false + } +} + +func readResponseBody(resp *http.Response) ([]byte, error) { + if resp.ContentLength == 0 { + return nil, nil + } + + maxRead := resp.ContentLength + + // if maxRead == -1, the ContentLength header has not been sent, so read up to + // the maximum permitted body size. If it is larger than the permitted body + // size, still try to read from the body in case the value is an error. If the + // body is larger than the maximum size, proto unmarshaling will likely fail. + if maxRead == -1 || maxRead > maxHTTPResponseReadBytes { + maxRead = maxHTTPResponseReadBytes + } + protoBytes := make([]byte, maxRead) + n, err := io.ReadFull(resp.Body, protoBytes) + + // No bytes read and an EOF error indicates there is no body to read. + if n == 0 && (err == nil || errors.Is(err, io.EOF)) { + return nil, nil + } + + // io.ReadFull will return io.ErrorUnexpectedEOF if the Content-Length header + // wasn't set, since we will try to read past the length of the body. If this + // is the case, the body will still have the full message in it, so we want to + // ignore the error and parse the message. + if err != nil && !errors.Is(err, io.ErrUnexpectedEOF) { + return nil, err + } + + return protoBytes[:n], nil +} + +// Read the response and decode the status.Status from the body. +// Returns nil if the response is empty or cannot be decoded. +func readResponseStatus(resp *http.Response) *status.Status { + var respStatus *status.Status + if resp.StatusCode >= 400 && resp.StatusCode <= 599 { + // Request failed. Read the body. OTLP spec says: + // "Response body for all HTTP 4xx and HTTP 5xx responses MUST be a + // Protobuf-encoded Status message that describes the problem." + respBytes, err := readResponseBody(resp) + if err != nil { + return nil + } + + // Decode it as Status struct. See https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/otlp.md#failures + respStatus = &status.Status{} + err = proto.Unmarshal(respBytes, respStatus) + if err != nil { + return nil + } + } + + return respStatus +} + +func handlePartialSuccessResponse(resp *http.Response, partialSuccessHandler partialSuccessHandler) error { + bodyBytes, err := readResponseBody(resp) + if err != nil { + return err + } + + return partialSuccessHandler(bodyBytes, resp.Header.Get("Content-Type")) +} + +type partialSuccessHandler func(bytes []byte, contentType string) error + +func (e *baseExporter) tracesPartialSuccessHandler(protoBytes []byte, contentType string) error { + if protoBytes == nil { + return nil + } + exportResponse := ptraceotlp.NewExportResponse() + switch contentType { + case protobufContentType: + err := exportResponse.UnmarshalProto(protoBytes) + if err != nil { + return fmt.Errorf("error parsing protobuf response: %w", err) + } + case jsonContentType: + err := exportResponse.UnmarshalJSON(protoBytes) + if err != nil { + return fmt.Errorf("error parsing json response: %w", err) + } + default: + return nil + } + + partialSuccess := exportResponse.PartialSuccess() + if !(partialSuccess.ErrorMessage() == "" && partialSuccess.RejectedSpans() == 0) { + e.logger.Warn("Partial success response", + zap.String("message", exportResponse.PartialSuccess().ErrorMessage()), + zap.Int64("dropped_spans", exportResponse.PartialSuccess().RejectedSpans()), + ) + } + return nil +} + +func (e *baseExporter) metricsPartialSuccessHandler(protoBytes []byte, contentType string) error { + if protoBytes == nil { + return nil + } + exportResponse := pmetricotlp.NewExportResponse() + switch contentType { + case protobufContentType: + err := exportResponse.UnmarshalProto(protoBytes) + if err != nil { + return fmt.Errorf("error parsing protobuf response: %w", err) + } + case jsonContentType: + err := exportResponse.UnmarshalJSON(protoBytes) + if err != nil { + return fmt.Errorf("error parsing json response: %w", err) + } + default: + return nil + } + + partialSuccess := exportResponse.PartialSuccess() + if !(partialSuccess.ErrorMessage() == "" && partialSuccess.RejectedDataPoints() == 0) { + e.logger.Warn("Partial success response", + zap.String("message", exportResponse.PartialSuccess().ErrorMessage()), + zap.Int64("dropped_data_points", exportResponse.PartialSuccess().RejectedDataPoints()), + ) + } + return nil +} + +func (e *baseExporter) logsPartialSuccessHandler(protoBytes []byte, contentType string) error { + if protoBytes == nil { + return nil + } + exportResponse := plogotlp.NewExportResponse() + switch contentType { + case protobufContentType: + err := exportResponse.UnmarshalProto(protoBytes) + if err != nil { + return fmt.Errorf("error parsing protobuf response: %w", err) + } + case jsonContentType: + err := exportResponse.UnmarshalJSON(protoBytes) + if err != nil { + return fmt.Errorf("error parsing json response: %w", err) + } + default: + return nil + } + + partialSuccess := exportResponse.PartialSuccess() + if !(partialSuccess.ErrorMessage() == "" && partialSuccess.RejectedLogRecords() == 0) { + e.logger.Warn("Partial success response", + zap.String("message", exportResponse.PartialSuccess().ErrorMessage()), + zap.Int64("dropped_log_records", exportResponse.PartialSuccess().RejectedLogRecords()), + ) + } + return nil +} + +func (e *baseExporter) profilesPartialSuccessHandler(protoBytes []byte, contentType string) error { + if protoBytes == nil { + return nil + } + exportResponse := pprofileotlp.NewExportResponse() + switch contentType { + case protobufContentType: + err := exportResponse.UnmarshalProto(protoBytes) + if err != nil { + return fmt.Errorf("error parsing protobuf response: %w", err) + } + case jsonContentType: + err := exportResponse.UnmarshalJSON(protoBytes) + if err != nil { + return fmt.Errorf("error parsing json response: %w", err) + } + default: + return nil + } + + partialSuccess := exportResponse.PartialSuccess() + if !(partialSuccess.ErrorMessage() == "" && partialSuccess.RejectedProfiles() == 0) { + e.logger.Warn("Partial success response", + zap.String("message", exportResponse.PartialSuccess().ErrorMessage()), + zap.Int64("dropped_samples", exportResponse.PartialSuccess().RejectedProfiles()), + ) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/collector/exporter/xexporter/LICENSE b/vendor/go.opentelemetry.io/collector/exporter/xexporter/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/xexporter/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/exporter/xexporter/Makefile b/vendor/go.opentelemetry.io/collector/exporter/xexporter/Makefile new file mode 100644 index 00000000000..ded7a36092d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/xexporter/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/exporter/xexporter/exporter.go b/vendor/go.opentelemetry.io/collector/exporter/xexporter/exporter.go new file mode 100644 index 00000000000..7d83e92c8a6 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/xexporter/exporter.go @@ -0,0 +1,109 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package xexporter // import "go.opentelemetry.io/collector/exporter/xexporter" + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer/xconsumer" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/pipeline" +) + +// Profiles is an exporter that can consume profiles. +type Profiles interface { + component.Component + xconsumer.Profiles +} + +type Factory interface { + exporter.Factory + + // CreateProfiles creates a Profiles exporter based on this config. + // If the exporter type does not support tracing, + // this function returns the error [pipeline.ErrSignalNotSupported]. + CreateProfiles(ctx context.Context, set exporter.Settings, cfg component.Config) (Profiles, error) + + // ProfilesStability gets the stability level of the Profiles exporter. + ProfilesStability() component.StabilityLevel +} + +// FactoryOption apply changes to ReceiverOptions. +type FactoryOption interface { + // applyOption applies the option. + applyOption(o *factoryOpts) +} + +// factoryOptionFunc is an ReceiverFactoryOption created through a function. +type factoryOptionFunc func(*factoryOpts) + +func (f factoryOptionFunc) applyOption(o *factoryOpts) { + f(o) +} + +type factoryOpts struct { + opts []exporter.FactoryOption + *factory +} + +// CreateProfilesFunc is the equivalent of Factory.CreateProfiles. +type CreateProfilesFunc func(context.Context, exporter.Settings, component.Config) (Profiles, error) + +// CreateProfiles implements Factory.CreateProfiles. +func (f CreateProfilesFunc) CreateProfiles(ctx context.Context, set exporter.Settings, cfg component.Config) (Profiles, error) { + if f == nil { + return nil, pipeline.ErrSignalNotSupported + } + return f(ctx, set, cfg) +} + +// WithTraces overrides the default "error not supported" implementation for CreateTraces and the default "undefined" stability level. +func WithTraces(createTraces exporter.CreateTracesFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.opts = append(o.opts, exporter.WithTraces(createTraces, sl)) + }) +} + +// WithMetrics overrides the default "error not supported" implementation for CreateMetrics and the default "undefined" stability level. +func WithMetrics(createMetrics exporter.CreateMetricsFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.opts = append(o.opts, exporter.WithMetrics(createMetrics, sl)) + }) +} + +// WithLogs overrides the default "error not supported" implementation for CreateLogs and the default "undefined" stability level. +func WithLogs(createLogs exporter.CreateLogsFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.opts = append(o.opts, exporter.WithLogs(createLogs, sl)) + }) +} + +// WithProfiles overrides the default "error not supported" implementation for CreateProfilesExporter and the default "undefined" stability level. +func WithProfiles(createProfiles CreateProfilesFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.profilesStabilityLevel = sl + o.CreateProfilesFunc = createProfiles + }) +} + +type factory struct { + exporter.Factory + CreateProfilesFunc + profilesStabilityLevel component.StabilityLevel +} + +func (f *factory) ProfilesStability() component.StabilityLevel { + return f.profilesStabilityLevel +} + +// NewFactory returns a Factory. +func NewFactory(cfgType component.Type, createDefaultConfig component.CreateDefaultConfigFunc, options ...FactoryOption) Factory { + opts := factoryOpts{factory: &factory{}} + for _, opt := range options { + opt.applyOption(&opts) + } + opts.factory.Factory = exporter.NewFactory(cfgType, createDefaultConfig, opts.opts...) + return opts.factory +} diff --git a/vendor/go.opentelemetry.io/collector/extension/README.md b/vendor/go.opentelemetry.io/collector/extension/README.md index 4f9661e1414..da453dac647 100644 --- a/vendor/go.opentelemetry.io/collector/extension/README.md +++ b/vendor/go.opentelemetry.io/collector/extension/README.md @@ -10,7 +10,6 @@ performance profile. Supported service extensions (sorted alphabetically): -- [Memory Ballast](ballastextension/README.md) - [zPages](zpagesextension/README.md) The [contributors @@ -28,5 +27,5 @@ will be shutdown. The ordering is determined in the `extensions` tag under the service: # Extensions specified below are going to be loaded by the service in the # order given below, and shutdown on reverse order. - extensions: [memory_ballast, zpages] + extensions: [extension1, extension2] ``` diff --git a/vendor/go.opentelemetry.io/collector/extension/auth/client.go b/vendor/go.opentelemetry.io/collector/extension/auth/client.go index d1855d8aece..03eba4ed336 100644 --- a/vendor/go.opentelemetry.io/collector/extension/auth/client.go +++ b/vendor/go.opentelemetry.io/collector/extension/auth/client.go @@ -26,7 +26,15 @@ type Client interface { } // ClientOption represents the possible options for NewClient. -type ClientOption func(*defaultClient) +type ClientOption interface { + apply(*defaultClient) +} + +type clientOptionFunc func(*defaultClient) + +func (of clientOptionFunc) apply(e *defaultClient) { + of(e) +} // ClientRoundTripperFunc specifies the function that returns a RoundTripper that can be used to authenticate HTTP requests. type ClientRoundTripperFunc func(base http.RoundTripper) (http.RoundTripper, error) @@ -58,33 +66,33 @@ type defaultClient struct { // WithClientStart overrides the default `Start` function for a component.Component. // The default always returns nil. func WithClientStart(startFunc component.StartFunc) ClientOption { - return func(o *defaultClient) { + return clientOptionFunc(func(o *defaultClient) { o.StartFunc = startFunc - } + }) } // WithClientShutdown overrides the default `Shutdown` function for a component.Component. // The default always returns nil. func WithClientShutdown(shutdownFunc component.ShutdownFunc) ClientOption { - return func(o *defaultClient) { + return clientOptionFunc(func(o *defaultClient) { o.ShutdownFunc = shutdownFunc - } + }) } // WithClientRoundTripper provides a `RoundTripper` function for this client authenticator. // The default round tripper is no-op. func WithClientRoundTripper(roundTripperFunc ClientRoundTripperFunc) ClientOption { - return func(o *defaultClient) { + return clientOptionFunc(func(o *defaultClient) { o.ClientRoundTripperFunc = roundTripperFunc - } + }) } // WithClientPerRPCCredentials provides a `PerRPCCredentials` function for this client authenticator. // There's no default. func WithClientPerRPCCredentials(perRPCCredentialsFunc ClientPerRPCCredentialsFunc) ClientOption { - return func(o *defaultClient) { + return clientOptionFunc(func(o *defaultClient) { o.ClientPerRPCCredentialsFunc = perRPCCredentialsFunc - } + }) } // NewClient returns a Client configured with the provided options. @@ -92,7 +100,7 @@ func NewClient(options ...ClientOption) Client { bc := &defaultClient{} for _, op := range options { - op(bc) + op.apply(bc) } return bc diff --git a/vendor/go.opentelemetry.io/collector/extension/auth/server.go b/vendor/go.opentelemetry.io/collector/extension/auth/server.go index 6e552e4285d..256dc51ce9c 100644 --- a/vendor/go.opentelemetry.io/collector/extension/auth/server.go +++ b/vendor/go.opentelemetry.io/collector/extension/auth/server.go @@ -18,7 +18,7 @@ import ( type Server interface { extension.Extension - // Authenticate checks whether the given headers map contains valid auth data. Successfully authenticated calls will always return a nil error. + // Authenticate checks whether the given map contains valid auth data. Successfully authenticated calls will always return a nil error. // When the authentication fails, an error must be returned and the caller must not retry. This function is typically called from interceptors, // on behalf of receivers, but receivers can still call this directly if the usage of interceptors isn't suitable. // The deadline and cancellation given to this function must be respected, but note that authentication data has to be part of the map, not context. @@ -26,7 +26,7 @@ type Server interface { // authentication data (if possible). This will allow other components in the pipeline to make decisions based on that data, such as routing based // on tenancy as determined by the group membership, or passing through the authentication data to the next collector/backend. // The context keys to be used are not defined yet. - Authenticate(ctx context.Context, headers map[string][]string) (context.Context, error) + Authenticate(ctx context.Context, sources map[string][]string) (context.Context, error) } type defaultServer struct { @@ -36,40 +36,48 @@ type defaultServer struct { } // ServerOption represents the possible options for NewServer. -type ServerOption func(*defaultServer) +type ServerOption interface { + apply(*defaultServer) +} + +type serverOptionFunc func(*defaultServer) + +func (of serverOptionFunc) apply(e *defaultServer) { + of(e) +} // ServerAuthenticateFunc defines the signature for the function responsible for performing the authentication based -// on the given headers map. See Server.Authenticate. -type ServerAuthenticateFunc func(ctx context.Context, headers map[string][]string) (context.Context, error) +// on the given sources map. See Server.Authenticate. +type ServerAuthenticateFunc func(ctx context.Context, sources map[string][]string) (context.Context, error) -func (f ServerAuthenticateFunc) Authenticate(ctx context.Context, headers map[string][]string) (context.Context, error) { +func (f ServerAuthenticateFunc) Authenticate(ctx context.Context, sources map[string][]string) (context.Context, error) { if f == nil { return ctx, nil } - return f(ctx, headers) + return f(ctx, sources) } // WithServerAuthenticate specifies which function to use to perform the authentication. func WithServerAuthenticate(authFunc ServerAuthenticateFunc) ServerOption { - return func(o *defaultServer) { + return serverOptionFunc(func(o *defaultServer) { o.ServerAuthenticateFunc = authFunc - } + }) } // WithServerStart overrides the default `Start` function for a component.Component. // The default always returns nil. func WithServerStart(startFunc component.StartFunc) ServerOption { - return func(o *defaultServer) { + return serverOptionFunc(func(o *defaultServer) { o.StartFunc = startFunc - } + }) } // WithServerShutdown overrides the default `Shutdown` function for a component.Component. // The default always returns nil. func WithServerShutdown(shutdownFunc component.ShutdownFunc) ServerOption { - return func(o *defaultServer) { + return serverOptionFunc(func(o *defaultServer) { o.ShutdownFunc = shutdownFunc - } + }) } // NewServer returns a Server configured with the provided options. @@ -77,7 +85,7 @@ func NewServer(options ...ServerOption) Server { bc := &defaultServer{} for _, op := range options { - op(bc) + op.apply(bc) } return bc diff --git a/vendor/go.opentelemetry.io/collector/extension/experimental/storage/doc.go b/vendor/go.opentelemetry.io/collector/extension/experimental/storage/doc.go deleted file mode 100644 index 57efc310f68..00000000000 --- a/vendor/go.opentelemetry.io/collector/extension/experimental/storage/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package storage implements an extension that can -// persist state beyond the collector process. -package storage // import "go.opentelemetry.io/collector/extension/experimental/storage" diff --git a/vendor/go.opentelemetry.io/collector/extension/experimental/storage/storage.go b/vendor/go.opentelemetry.io/collector/extension/experimental/storage/storage.go deleted file mode 100644 index 184787db0f7..00000000000 --- a/vendor/go.opentelemetry.io/collector/extension/experimental/storage/storage.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package storage // import "go.opentelemetry.io/collector/extension/experimental/storage" - -import ( - "context" - - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/extension" -) - -// Extension is the interface that storage extensions must implement -type Extension interface { - extension.Extension - - // GetClient will create a client for use by the specified component. - // Each component can have multiple storages (e.g. one for each signal), - // which can be identified using storageName parameter. - // The component can use the client to manage state - GetClient(ctx context.Context, kind component.Kind, id component.ID, storageName string) (Client, error) -} - -// Client is the interface that storage clients must implement -// All methods should return error only if a problem occurred. -// This mirrors the behavior of a golang map: -// - Set doesn't error if a key already exists - it just overwrites the value. -// - Get doesn't error if a key is not found - it just returns nil. -// - Delete doesn't error if the key doesn't exist - it just no-ops. -// -// Similarly: -// - Batch doesn't error if any of the above happens for either retrieved or updated keys -// -// This also provides a way to differentiate data operations -// -// [overwrite | not-found | no-op] from "real" problems -type Client interface { - - // Get will retrieve data from storage that corresponds to the - // specified key. It should return (nil, nil) if not found - Get(ctx context.Context, key string) ([]byte, error) - - // Set will store data. The data can be retrieved by the same - // component after a process restart, using the same key - Set(ctx context.Context, key string, value []byte) error - - // Delete will delete data associated with the specified key - Delete(ctx context.Context, key string) error - - // Batch handles specified operations in batch. Get operation results are put in-place - Batch(ctx context.Context, ops ...Operation) error - - // Close will release any resources held by the client - Close(ctx context.Context) error -} - -type opType int - -const ( - Get opType = iota - Set - Delete -) - -type operation struct { - // Key specifies key which is going to be get/set/deleted - Key string - // Value specifies value that is going to be set or holds result of get operation - Value []byte - // Type describes the operation type - Type opType -} - -type Operation *operation - -func SetOperation(key string, value []byte) Operation { - return &operation{ - Key: key, - Value: value, - Type: Set, - } -} - -func GetOperation(key string) Operation { - return &operation{ - Key: key, - Type: Get, - } -} - -func DeleteOperation(key string) Operation { - return &operation{ - Key: key, - Type: Delete, - } -} diff --git a/vendor/go.opentelemetry.io/collector/extension/extension.go b/vendor/go.opentelemetry.io/collector/extension/extension.go index 292358b33ba..5bc630a9e4d 100644 --- a/vendor/go.opentelemetry.io/collector/extension/extension.go +++ b/vendor/go.opentelemetry.io/collector/extension/extension.go @@ -8,7 +8,6 @@ import ( "fmt" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/confmap" ) // Extension is the interface for objects hosted by the OpenTelemetry Collector that @@ -16,54 +15,17 @@ import ( // to the service, examples: health check endpoint, z-pages, etc. type Extension = component.Component -// Dependent is an optional interface that can be implemented by extensions -// that depend on other extensions and must be started only after their dependencies. -// See https://github.com/open-telemetry/opentelemetry-collector/pull/8768 for examples. -type Dependent interface { - Extension - Dependencies() []component.ID +// ModuleInfo describes the go module for each component. +type ModuleInfo struct { + Receiver map[component.Type]string + Processor map[component.Type]string + Exporter map[component.Type]string + Extension map[component.Type]string + Connector map[component.Type]string } -// PipelineWatcher is an extra interface for Extension hosted by the OpenTelemetry -// Collector that is to be implemented by extensions interested in changes to pipeline -// states. Typically this will be used by extensions that change their behavior if data is -// being ingested or not, e.g.: a k8s readiness probe. -type PipelineWatcher interface { - // Ready notifies the Extension that all pipelines were built and the - // receivers were started, i.e.: the service is ready to receive data - // (note that it may already have received data when this method is called). - Ready() error - - // NotReady notifies the Extension that all receivers are about to be stopped, - // i.e.: pipeline receivers will not accept new data. - // This is sent before receivers are stopped, so the Extension can take any - // appropriate actions before that happens. - NotReady() error -} - -// ConfigWatcher is an interface that should be implemented by an extension that -// wishes to be notified of the Collector's effective configuration. -type ConfigWatcher interface { - // NotifyConfig notifies the extension of the Collector's current effective configuration. - // The extension owns the `confmap.Conf`. Callers must ensure that it's safe for - // extensions to store the `conf` pointer and use it concurrently with any other - // instances of `conf`. - NotifyConfig(ctx context.Context, conf *confmap.Conf) error -} - -// StatusWatcher is an extra interface for Extension hosted by the OpenTelemetry -// Collector that is to be implemented by extensions interested in changes to component -// status. -type StatusWatcher interface { - // ComponentStatusChanged notifies about a change in the source component status. - // Extensions that implement this interface must be ready that the ComponentStatusChanged - // may be called before, after or concurrently with calls to Component.Start() and Component.Shutdown(). - // The function may be called concurrently with itself. - ComponentStatusChanged(source *component.InstanceID, event *component.StatusEvent) -} - -// CreateSettings is passed to Factory.Create(...) function. -type CreateSettings struct { +// Settings is passed to Factory.Create(...) function. +type Settings struct { // ID returns the ID of the component that will be created. ID component.ID @@ -71,24 +33,27 @@ type CreateSettings struct { // BuildInfo can be used by components for informational purposes BuildInfo component.BuildInfo + + // ModuleInfo describes the go module for each component. + ModuleInfo ModuleInfo } // CreateFunc is the equivalent of Factory.Create(...) function. -type CreateFunc func(context.Context, CreateSettings, component.Config) (Extension, error) +type CreateFunc func(context.Context, Settings, component.Config) (Extension, error) -// CreateExtension implements Factory.Create. -func (f CreateFunc) CreateExtension(ctx context.Context, set CreateSettings, cfg component.Config) (Extension, error) { +// Create implements Factory.Create. +func (f CreateFunc) Create(ctx context.Context, set Settings, cfg component.Config) (Extension, error) { return f(ctx, set, cfg) } type Factory interface { component.Factory - // CreateExtension creates an extension based on the given config. - CreateExtension(ctx context.Context, set CreateSettings, cfg component.Config) (Extension, error) + // Create an extension based on the given config. + Create(ctx context.Context, set Settings, cfg component.Config) (Extension, error) - // ExtensionStability gets the stability level of the Extension. - ExtensionStability() component.StabilityLevel + // Stability gets the stability level of the Extension. + Stability() component.StabilityLevel unexportedFactoryFunc() } @@ -106,7 +71,7 @@ func (f *factory) Type() component.Type { func (f *factory) unexportedFactoryFunc() {} -func (f *factory) ExtensionStability() component.StabilityLevel { +func (f *factory) Stability() component.StabilityLevel { return f.extensionStability } @@ -115,7 +80,8 @@ func NewFactory( cfgType component.Type, createDefaultConfig component.CreateDefaultConfigFunc, createServiceExtension CreateFunc, - sl component.StabilityLevel) Factory { + sl component.StabilityLevel, +) Factory { return &factory{ cfgType: cfgType, CreateDefaultConfigFunc: createDefaultConfig, @@ -136,39 +102,3 @@ func MakeFactoryMap(factories ...Factory) (map[component.Type]Factory, error) { } return fMap, nil } - -// Builder extension is a helper struct that given a set of Configs and Factories helps with creating extensions. -type Builder struct { - cfgs map[component.ID]component.Config - factories map[component.Type]Factory -} - -// NewBuilder creates a new extension.Builder to help with creating components form a set of configs and factories. -func NewBuilder(cfgs map[component.ID]component.Config, factories map[component.Type]Factory) *Builder { - return &Builder{cfgs: cfgs, factories: factories} -} - -// Create creates an extension based on the settings and configs available. -func (b *Builder) Create(ctx context.Context, set CreateSettings) (Extension, error) { - cfg, existsCfg := b.cfgs[set.ID] - if !existsCfg { - return nil, fmt.Errorf("extension %q is not configured", set.ID) - } - - f, existsFactory := b.factories[set.ID.Type()] - if !existsFactory { - return nil, fmt.Errorf("extension factory not available for: %q", set.ID) - } - - sl := f.ExtensionStability() - if sl >= component.StabilityLevelAlpha { - set.Logger.Debug(sl.LogMessage()) - } else { - set.Logger.Info(sl.LogMessage()) - } - return f.CreateExtension(ctx, set, cfg) -} - -func (b *Builder) Factory(componentType component.Type) component.Factory { - return b.factories[componentType] -} diff --git a/vendor/go.opentelemetry.io/collector/extension/extensioncapabilities/LICENSE b/vendor/go.opentelemetry.io/collector/extension/extensioncapabilities/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/extension/extensioncapabilities/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/extension/extensioncapabilities/Makefile b/vendor/go.opentelemetry.io/collector/extension/extensioncapabilities/Makefile new file mode 100644 index 00000000000..ded7a36092d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/extension/extensioncapabilities/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/extension/extensioncapabilities/interfaces.go b/vendor/go.opentelemetry.io/collector/extension/extensioncapabilities/interfaces.go new file mode 100644 index 00000000000..370210970ed --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/extension/extensioncapabilities/interfaces.go @@ -0,0 +1,49 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package extensioncapabilities provides interfaces that can be implemented by extensions +// to provide additional capabilities. +package extensioncapabilities // import "go.opentelemetry.io/collector/extension/extensioncapabilities" + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/extension" +) + +// Dependent is an optional interface that can be implemented by extensions +// that depend on other extensions and must be started only after their dependencies. +// See https://github.com/open-telemetry/opentelemetry-collector/pull/8768 for examples. +type Dependent interface { + extension.Extension + Dependencies() []component.ID +} + +// PipelineWatcher is an extra interface for Extension hosted by the OpenTelemetry +// Collector that is to be implemented by extensions interested in changes to pipeline +// states. Typically this will be used by extensions that change their behavior if data is +// being ingested or not, e.g.: a k8s readiness probe. +type PipelineWatcher interface { + // Ready notifies the Extension that all pipelines were built and the + // receivers were started, i.e.: the service is ready to receive data + // (note that it may already have received data when this method is called). + Ready() error + + // NotReady notifies the Extension that all receivers are about to be stopped, + // i.e.: pipeline receivers will not accept new data. + // This is sent before receivers are stopped, so the Extension can take any + // appropriate actions before that happens. + NotReady() error +} + +// ConfigWatcher is an interface that should be implemented by an extension that +// wishes to be notified of the Collector's effective configuration. +type ConfigWatcher interface { + // NotifyConfig notifies the extension of the Collector's current effective configuration. + // The extension owns the `confmap.Conf`. Callers must ensure that it's safe for + // extensions to store the `conf` pointer and use it concurrently with any other + // instances of `conf`. + NotifyConfig(ctx context.Context, conf *confmap.Conf) error +} diff --git a/vendor/go.opentelemetry.io/collector/extension/extensiontest/LICENSE b/vendor/go.opentelemetry.io/collector/extension/extensiontest/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/extension/extensiontest/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/extension/extensiontest/Makefile b/vendor/go.opentelemetry.io/collector/extension/extensiontest/Makefile new file mode 100644 index 00000000000..ded7a36092d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/extension/extensiontest/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/extension/extensiontest/nop_extension.go b/vendor/go.opentelemetry.io/collector/extension/extensiontest/nop_extension.go new file mode 100644 index 00000000000..6fef44d513b --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/extension/extensiontest/nop_extension.go @@ -0,0 +1,48 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package extensiontest // import "go.opentelemetry.io/collector/extension/extensiontest" + +import ( + "context" + + "github.com/google/uuid" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/extension" +) + +var nopType = component.MustNewType("nop") + +// NewNopSettings returns a new nop settings for extension.Factory Create* functions. +func NewNopSettings() extension.Settings { + return extension.Settings{ + ID: component.NewIDWithName(nopType, uuid.NewString()), + TelemetrySettings: componenttest.NewNopTelemetrySettings(), + BuildInfo: component.NewDefaultBuildInfo(), + } +} + +// NewNopFactory returns an extension.Factory that constructs nop extensions. +func NewNopFactory() extension.Factory { + return extension.NewFactory( + nopType, + func() component.Config { + return &nopConfig{} + }, + func(context.Context, extension.Settings, component.Config) (extension.Extension, error) { + return nopInstance, nil + }, + component.StabilityLevelStable) +} + +type nopConfig struct{} + +var nopInstance = &nopExtension{} + +// nopExtension acts as an extension for testing purposes. +type nopExtension struct { + component.StartFunc + component.ShutdownFunc +} diff --git a/vendor/go.opentelemetry.io/collector/extension/xextension/LICENSE b/vendor/go.opentelemetry.io/collector/extension/xextension/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/extension/xextension/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/extension/experimental/storage/README.md b/vendor/go.opentelemetry.io/collector/extension/xextension/storage/README.md similarity index 100% rename from vendor/go.opentelemetry.io/collector/extension/experimental/storage/README.md rename to vendor/go.opentelemetry.io/collector/extension/xextension/storage/README.md diff --git a/vendor/go.opentelemetry.io/collector/extension/xextension/storage/doc.go b/vendor/go.opentelemetry.io/collector/extension/xextension/storage/doc.go new file mode 100644 index 00000000000..2021129bf09 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/extension/xextension/storage/doc.go @@ -0,0 +1,6 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package storage implements an extension that can +// persist state beyond the collector process. +package storage // import "go.opentelemetry.io/collector/extension/xextension/storage" diff --git a/vendor/go.opentelemetry.io/collector/extension/experimental/storage/nop_client.go b/vendor/go.opentelemetry.io/collector/extension/xextension/storage/nop_client.go similarity index 91% rename from vendor/go.opentelemetry.io/collector/extension/experimental/storage/nop_client.go rename to vendor/go.opentelemetry.io/collector/extension/xextension/storage/nop_client.go index 587767fd96a..d105e6cf698 100644 --- a/vendor/go.opentelemetry.io/collector/extension/experimental/storage/nop_client.go +++ b/vendor/go.opentelemetry.io/collector/extension/xextension/storage/nop_client.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package storage // import "go.opentelemetry.io/collector/extension/experimental/storage" +package storage // import "go.opentelemetry.io/collector/extension/xextension/storage" import "context" @@ -35,6 +35,6 @@ func (c nopClient) Close(context.Context) error { } // Batch does nothing, and returns nil, nil -func (c nopClient) Batch(context.Context, ...Operation) error { +func (c nopClient) Batch(context.Context, ...*Operation) error { return nil // no result, but no problem } diff --git a/vendor/go.opentelemetry.io/collector/extension/xextension/storage/storage.go b/vendor/go.opentelemetry.io/collector/extension/xextension/storage/storage.go new file mode 100644 index 00000000000..dd707f71673 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/extension/xextension/storage/storage.go @@ -0,0 +1,93 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package storage // import "go.opentelemetry.io/collector/extension/xextension/storage" + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/extension" +) + +// Extension is the interface that storage extensions must implement +type Extension interface { + extension.Extension + + // GetClient will create a client for use by the specified component. + // Each component can have multiple storages (e.g. one for each signal), + // which can be identified using storageName parameter. + // The component can use the client to manage state + GetClient(ctx context.Context, kind component.Kind, id component.ID, storageName string) (Client, error) +} + +// Client is the interface that storage clients must implement +// All methods should return error only if a problem occurred. +// This mirrors the behavior of a golang map: +// - Set doesn't error if a key already exists - it just overwrites the value. +// - Get doesn't error if a key is not found - it just returns nil. +// - Delete doesn't error if the key doesn't exist - it just no-ops. +// +// Similarly: +// - Batch doesn't error if any of the above happens for either retrieved or updated keys +// +// This also provides a way to differentiate data operations +// +// [overwrite | not-found | no-op] from "real" problems +type Client interface { + // Get will retrieve data from storage that corresponds to the + // specified key. It should return (nil, nil) if not found + Get(ctx context.Context, key string) ([]byte, error) + + // Set will store data. The data can be retrieved by the same + // component after a process restart, using the same key + Set(ctx context.Context, key string, value []byte) error + + // Delete will delete data associated with the specified key + Delete(ctx context.Context, key string) error + + // Batch handles specified operations in batch. Get operation results are put in-place + Batch(ctx context.Context, ops ...*Operation) error + + // Close will release any resources held by the client + Close(ctx context.Context) error +} + +type OpType int + +const ( + Get OpType = iota + Set + Delete +) + +type Operation struct { + // Key specifies key which is going to be get/set/deleted + Key string + // Value specifies value that is going to be set or holds result of get operation + Value []byte + // Type describes the operation type + Type OpType +} + +func SetOperation(key string, value []byte) *Operation { + return &Operation{ + Key: key, + Value: value, + Type: Set, + } +} + +func GetOperation(key string) *Operation { + return &Operation{ + Key: key, + Type: Get, + } +} + +func DeleteOperation(key string) *Operation { + return &Operation{ + Key: key, + Type: Delete, + } +} diff --git a/vendor/go.opentelemetry.io/collector/featuregate/flag.go b/vendor/go.opentelemetry.io/collector/featuregate/flag.go index 95012968126..1c6f3a5e873 100644 --- a/vendor/go.opentelemetry.io/collector/featuregate/flag.go +++ b/vendor/go.opentelemetry.io/collector/featuregate/flag.go @@ -31,6 +31,12 @@ type flagValue struct { } func (f *flagValue) String() string { + // This function can be called by isZeroValue https://github.com/golang/go/blob/go1.23.3/src/flag/flag.go#L630 + // which creates an instance of flagValue using reflect.New. In this case, the field `reg` is nil. + if f.reg == nil { + return "" + } + var ids []string f.reg.VisitAll(func(g *Gate) { id := g.ID() diff --git a/vendor/go.opentelemetry.io/collector/featuregate/registry.go b/vendor/go.opentelemetry.io/collector/featuregate/registry.go index 69486d623ee..9309024c38b 100644 --- a/vendor/go.opentelemetry.io/collector/featuregate/registry.go +++ b/vendor/go.opentelemetry.io/collector/featuregate/registry.go @@ -23,10 +23,8 @@ var ( idRegexp = regexp.MustCompile(`^[0-9a-zA-Z\.]*$`) ) -var ( - // ErrAlreadyRegistered is returned when adding a Gate that is already registered. - ErrAlreadyRegistered = errors.New("gate is already registered") -) +// ErrAlreadyRegistered is returned when adding a Gate that is already registered. +var ErrAlreadyRegistered = errors.New("gate is already registered") // GlobalRegistry returns the global Registry. func GlobalRegistry() *Registry { @@ -118,11 +116,11 @@ func (r *Registry) MustRegister(id string, stage Stage, opts ...RegisterOption) func validateID(id string) error { if id == "" { - return fmt.Errorf("empty ID") + return errors.New("empty ID") } if !idRegexp.MatchString(id) { - return fmt.Errorf("invalid character(s) in ID") + return errors.New("invalid character(s) in ID") } return nil } diff --git a/vendor/go.opentelemetry.io/collector/internal/fanoutconsumer/LICENSE b/vendor/go.opentelemetry.io/collector/internal/fanoutconsumer/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/internal/fanoutconsumer/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/internal/fanoutconsumer/Makefile b/vendor/go.opentelemetry.io/collector/internal/fanoutconsumer/Makefile new file mode 100644 index 00000000000..ded7a36092d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/internal/fanoutconsumer/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/internal/fanoutconsumer/logs.go b/vendor/go.opentelemetry.io/collector/internal/fanoutconsumer/logs.go index bff5c1a897e..0574ee3194f 100644 --- a/vendor/go.opentelemetry.io/collector/internal/fanoutconsumer/logs.go +++ b/vendor/go.opentelemetry.io/collector/internal/fanoutconsumer/logs.go @@ -15,7 +15,7 @@ import ( ) // NewLogs wraps multiple log consumers in a single one. -// It fanouts the incoming data to all the consumers, and does smart routing: +// It fans out the incoming data to all the consumers, and does smart routing: // - Clones only to the consumer that needs to mutate the data. // - If all consumers needs to mutate the data one will get the original mutable data. func NewLogs(lcs []consumer.Logs) consumer.Logs { diff --git a/vendor/go.opentelemetry.io/collector/internal/fanoutconsumer/metrics.go b/vendor/go.opentelemetry.io/collector/internal/fanoutconsumer/metrics.go index 32d9514561d..57cf1066810 100644 --- a/vendor/go.opentelemetry.io/collector/internal/fanoutconsumer/metrics.go +++ b/vendor/go.opentelemetry.io/collector/internal/fanoutconsumer/metrics.go @@ -13,7 +13,7 @@ import ( ) // NewMetrics wraps multiple metrics consumers in a single one. -// It fanouts the incoming data to all the consumers, and does smart routing: +// It fans out the incoming data to all the consumers, and does smart routing: // - Clones only to the consumer that needs to mutate the data. // - If all consumers needs to mutate the data one will get the original mutable data. func NewMetrics(mcs []consumer.Metrics) consumer.Metrics { diff --git a/vendor/go.opentelemetry.io/collector/internal/fanoutconsumer/profiles.go b/vendor/go.opentelemetry.io/collector/internal/fanoutconsumer/profiles.go new file mode 100644 index 00000000000..17c6d1dce81 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/internal/fanoutconsumer/profiles.go @@ -0,0 +1,82 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package fanoutconsumer // import "go.opentelemetry.io/collector/internal/fanoutconsumer" + +import ( + "context" + + "go.uber.org/multierr" + + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/xconsumer" + "go.opentelemetry.io/collector/pdata/pprofile" +) + +// NewProfiles wraps multiple profile consumers in a single one. +// It fans out the incoming data to all the consumers, and does smart routing: +// - Clones only to the consumer that needs to mutate the data. +// - If all consumers needs to mutate the data one will get the original mutable data. +func NewProfiles(tcs []xconsumer.Profiles) xconsumer.Profiles { + // Don't wrap if there is only one non-mutating consumer. + if len(tcs) == 1 && !tcs[0].Capabilities().MutatesData { + return tcs[0] + } + + tc := &profilesConsumer{} + for i := 0; i < len(tcs); i++ { + if tcs[i].Capabilities().MutatesData { + tc.mutable = append(tc.mutable, tcs[i]) + } else { + tc.readonly = append(tc.readonly, tcs[i]) + } + } + return tc +} + +type profilesConsumer struct { + mutable []xconsumer.Profiles + readonly []xconsumer.Profiles +} + +func (tsc *profilesConsumer) Capabilities() consumer.Capabilities { + // If all consumers are mutating, then the original data will be passed to one of them. + return consumer.Capabilities{MutatesData: len(tsc.mutable) > 0 && len(tsc.readonly) == 0} +} + +// ConsumeProfiles exports the pprofile.Profiles to all consumers wrapped by the current one. +func (tsc *profilesConsumer) ConsumeProfiles(ctx context.Context, td pprofile.Profiles) error { + var errs error + + if len(tsc.mutable) > 0 { + // Clone the data before sending to all mutating consumers except the last one. + for i := 0; i < len(tsc.mutable)-1; i++ { + errs = multierr.Append(errs, tsc.mutable[i].ConsumeProfiles(ctx, cloneProfiles(td))) + } + // Send data as is to the last mutating consumer only if there are no other non-mutating consumers and the + // data is mutable. Never share the same data between a mutating and a non-mutating consumer since the + // non-mutating consumer may process data async and the mutating consumer may change the data before that. + lastConsumer := tsc.mutable[len(tsc.mutable)-1] + if len(tsc.readonly) == 0 && !td.IsReadOnly() { + errs = multierr.Append(errs, lastConsumer.ConsumeProfiles(ctx, td)) + } else { + errs = multierr.Append(errs, lastConsumer.ConsumeProfiles(ctx, cloneProfiles(td))) + } + } + + // Mark the data as read-only if it will be sent to more than one read-only consumer. + if len(tsc.readonly) > 1 && !td.IsReadOnly() { + td.MarkReadOnly() + } + for _, tc := range tsc.readonly { + errs = multierr.Append(errs, tc.ConsumeProfiles(ctx, td)) + } + + return errs +} + +func cloneProfiles(td pprofile.Profiles) pprofile.Profiles { + clonedProfiles := pprofile.NewProfiles() + td.CopyTo(clonedProfiles) + return clonedProfiles +} diff --git a/vendor/go.opentelemetry.io/collector/internal/fanoutconsumer/traces.go b/vendor/go.opentelemetry.io/collector/internal/fanoutconsumer/traces.go index f9a34027017..ff415e9db9b 100644 --- a/vendor/go.opentelemetry.io/collector/internal/fanoutconsumer/traces.go +++ b/vendor/go.opentelemetry.io/collector/internal/fanoutconsumer/traces.go @@ -13,7 +13,7 @@ import ( ) // NewTraces wraps multiple trace consumers in a single one. -// It fanouts the incoming data to all the consumers, and does smart routing: +// It fans out the incoming data to all the consumers, and does smart routing: // - Clones only to the consumer that needs to mutate the data. // - If all consumers needs to mutate the data one will get the original mutable data. func NewTraces(tcs []consumer.Traces) consumer.Traces { diff --git a/vendor/go.opentelemetry.io/collector/internal/localhostgate/featuregate.go b/vendor/go.opentelemetry.io/collector/internal/localhostgate/featuregate.go deleted file mode 100644 index 78c19622967..00000000000 --- a/vendor/go.opentelemetry.io/collector/internal/localhostgate/featuregate.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// package localhostgate defines a feature gate that controls whether server-like receivers and extensions use localhost as the default host for their endpoints. -// This package is duplicated across core and contrib to avoid exposing the feature gate as part of the public API. -// To do this we define a `registerOrLoad` helper and try to register the gate in both modules. -// IMPORTANT NOTE: ANY CHANGES TO THIS PACKAGE MUST BE MIRRORED IN THE CONTRIB COUNTERPART. -package localhostgate // import "go.opentelemetry.io/collector/internal/localhostgate" - -import ( - "errors" - "fmt" - - "go.uber.org/zap" - - "go.opentelemetry.io/collector/featuregate" -) - -const UseLocalHostAsDefaultHostID = "component.UseLocalHostAsDefaultHost" - -// UseLocalHostAsDefaultHostfeatureGate is the feature gate that controls whether -// server-like receivers and extensions such as the OTLP receiver use localhost as the default host for their endpoints. -var UseLocalHostAsDefaultHostfeatureGate = mustRegisterOrLoad( - featuregate.GlobalRegistry(), - UseLocalHostAsDefaultHostID, - featuregate.StageAlpha, - featuregate.WithRegisterDescription("controls whether server-like receivers and extensions such as the OTLP receiver use localhost as the default host for their endpoints"), -) - -// mustRegisterOrLoad tries to register the feature gate and loads it if it already exists. -// It panics on any other error. -func mustRegisterOrLoad(reg *featuregate.Registry, id string, stage featuregate.Stage, opts ...featuregate.RegisterOption) *featuregate.Gate { - gate, err := reg.Register(id, stage, opts...) - - if errors.Is(err, featuregate.ErrAlreadyRegistered) { - // Gate is already registered; find it. - // Only a handful of feature gates are registered, so it's fine to iterate over all of them. - reg.VisitAll(func(g *featuregate.Gate) { - if g.ID() == id { - gate = g - return - } - }) - } else if err != nil { - panic(err) - } - - return gate -} - -// EndpointForPort gets the endpoint for a given port using localhost or 0.0.0.0 depending on the feature gate. -func EndpointForPort(port int) string { - host := "localhost" - if !UseLocalHostAsDefaultHostfeatureGate.IsEnabled() { - host = "0.0.0.0" - } - return fmt.Sprintf("%s:%d", host, port) -} - -// LogAboutUseLocalHostAsDefault logs about the upcoming change from 0.0.0.0 to localhost on server-like components. -func LogAboutUseLocalHostAsDefault(logger *zap.Logger) { - if !UseLocalHostAsDefaultHostfeatureGate.IsEnabled() { - logger.Warn( - "The default endpoints for all servers in components will change to use localhost instead of 0.0.0.0 in a future version. Use the feature gate to preview the new default.", - zap.String("feature gate ID", UseLocalHostAsDefaultHostID), - ) - } -} diff --git a/vendor/go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics/obs_exporter.go b/vendor/go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics/obs_exporter.go deleted file mode 100644 index b87270f5e5a..00000000000 --- a/vendor/go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics/obs_exporter.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package obsmetrics // import "go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics" - -const ( - // ExporterKey used to identify exporters in metrics and traces. - ExporterKey = "exporter" - - // SentSpansKey used to track spans sent by exporters. - SentSpansKey = "sent_spans" - // FailedToSendSpansKey used to track spans that failed to be sent by exporters. - FailedToSendSpansKey = "send_failed_spans" - // FailedToEnqueueSpansKey used to track spans that failed to be enqueued by exporters. - FailedToEnqueueSpansKey = "enqueue_failed_spans" - - // SentMetricPointsKey used to track metric points sent by exporters. - SentMetricPointsKey = "sent_metric_points" - // FailedToSendMetricPointsKey used to track metric points that failed to be sent by exporters. - FailedToSendMetricPointsKey = "send_failed_metric_points" - // FailedToEnqueueMetricPointsKey used to track metric points that failed to be enqueued by exporters. - FailedToEnqueueMetricPointsKey = "enqueue_failed_metric_points" - - // SentLogRecordsKey used to track logs sent by exporters. - SentLogRecordsKey = "sent_log_records" - // FailedToSendLogRecordsKey used to track logs that failed to be sent by exporters. - FailedToSendLogRecordsKey = "send_failed_log_records" - // FailedToEnqueueLogRecordsKey used to track logs that failed to be enqueued by exporters. - FailedToEnqueueLogRecordsKey = "enqueue_failed_log_records" -) - -var ( - ExporterPrefix = ExporterKey + SpanNameSep - ExporterMetricPrefix = ExporterKey + MetricNameSep - ExportTraceDataOperationSuffix = SpanNameSep + "traces" - ExportMetricsOperationSuffix = SpanNameSep + "metrics" - ExportLogsOperationSuffix = SpanNameSep + "logs" -) diff --git a/vendor/go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics/obs_processor.go b/vendor/go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics/obs_processor.go deleted file mode 100644 index 3cbd8fc4a83..00000000000 --- a/vendor/go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics/obs_processor.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package obsmetrics // import "go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics" - -const ( - // ProcessorKey is the key used to identify processors in metrics and traces. - ProcessorKey = "processor" - - // DroppedSpansKey is the key used to identify spans dropped by the Collector. - DroppedSpansKey = "dropped_spans" - - // DroppedMetricPointsKey is the key used to identify metric points dropped by the Collector. - DroppedMetricPointsKey = "dropped_metric_points" - - // DroppedLogRecordsKey is the key used to identify log records dropped by the Collector. - DroppedLogRecordsKey = "dropped_log_records" -) - -var ( - ProcessorMetricPrefix = ProcessorKey + MetricNameSep -) diff --git a/vendor/go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics/obs_receiver.go b/vendor/go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics/obs_receiver.go deleted file mode 100644 index bcb6306f81b..00000000000 --- a/vendor/go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics/obs_receiver.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package obsmetrics // import "go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics" - -const ( - // ReceiverKey used to identify receivers in metrics and traces. - ReceiverKey = "receiver" - // TransportKey used to identify the transport used to received the data. - TransportKey = "transport" - // FormatKey used to identify the format of the data received. - FormatKey = "format" - - // AcceptedSpansKey used to identify spans accepted by the Collector. - AcceptedSpansKey = "accepted_spans" - // RefusedSpansKey used to identify spans refused (ie.: not ingested) by the Collector. - RefusedSpansKey = "refused_spans" - - // AcceptedMetricPointsKey used to identify metric points accepted by the Collector. - AcceptedMetricPointsKey = "accepted_metric_points" - // RefusedMetricPointsKey used to identify metric points refused (ie.: not ingested) by the - // Collector. - RefusedMetricPointsKey = "refused_metric_points" - - // AcceptedLogRecordsKey used to identify log records accepted by the Collector. - AcceptedLogRecordsKey = "accepted_log_records" - // RefusedLogRecordsKey used to identify log records refused (ie.: not ingested) by the - // Collector. - RefusedLogRecordsKey = "refused_log_records" -) - -var ( - ReceiverPrefix = ReceiverKey + SpanNameSep - ReceiverMetricPrefix = ReceiverKey + MetricNameSep - ReceiveTraceDataOperationSuffix = SpanNameSep + "TraceDataReceived" - ReceiverMetricsOperationSuffix = SpanNameSep + "MetricsReceived" - ReceiverLogsOperationSuffix = SpanNameSep + "LogsReceived" -) diff --git a/vendor/go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics/obs_scraper.go b/vendor/go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics/obs_scraper.go deleted file mode 100644 index 4254891f33a..00000000000 --- a/vendor/go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics/obs_scraper.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package obsmetrics // import "go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics" - -const ( - // ScraperKey used to identify scrapers in metrics and traces. - ScraperKey = "scraper" - - // ScrapedMetricPointsKey used to identify metric points scraped by the - // Collector. - ScrapedMetricPointsKey = "scraped_metric_points" - // ErroredMetricPointsKey used to identify metric points errored (i.e. - // unable to be scraped) by the Collector. - ErroredMetricPointsKey = "errored_metric_points" -) - -const ( - ScraperPrefix = ScraperKey + SpanNameSep - ScraperMetricPrefix = ScraperKey + MetricNameSep - ScraperMetricsOperationSuffix = SpanNameSep + "MetricsScraped" -) diff --git a/vendor/go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics/obsmetrics.go b/vendor/go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics/obsmetrics.go deleted file mode 100644 index 02ce450d3a9..00000000000 --- a/vendor/go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics/obsmetrics.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package obsmetrics defines the obsreport metrics for each components -// all the metrics is in OpenCensus format which will be replaced with OTEL Metrics -// in the future -package obsmetrics // import "go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics" - -const ( - SpanNameSep = "/" - MetricNameSep = "_" - Scope = "go.opentelemetry.io/collector/obsreport" -) diff --git a/vendor/go.opentelemetry.io/collector/internal/obsreportconfig/obsreportconfig.go b/vendor/go.opentelemetry.io/collector/internal/obsreportconfig/obsreportconfig.go deleted file mode 100644 index dc5d4915569..00000000000 --- a/vendor/go.opentelemetry.io/collector/internal/obsreportconfig/obsreportconfig.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package obsreportconfig // import "go.opentelemetry.io/collector/internal/obsreportconfig" - -import ( - "go.opentelemetry.io/collector/featuregate" -) - -// DisableHighCardinalityMetricsfeatureGate is the feature gate that controls whether the collector should enable -// potentially high cardinality metrics. The gate will be removed when the collector allows for view configuration. -var DisableHighCardinalityMetricsfeatureGate = featuregate.GlobalRegistry().MustRegister( - "telemetry.disableHighCardinalityMetrics", - featuregate.StageAlpha, - featuregate.WithRegisterDescription("controls whether the collector should enable potentially high"+ - "cardinality metrics. The gate will be removed when the collector allows for view configuration.")) - -// UseOtelWithSDKConfigurationForInternalTelemetryFeatureGate is the feature gate that controls whether the collector -// supports configuring the OpenTelemetry SDK via configuration -var UseOtelWithSDKConfigurationForInternalTelemetryFeatureGate = featuregate.GlobalRegistry().MustRegister( - "telemetry.useOtelWithSDKConfigurationForInternalTelemetry", - featuregate.StageAlpha, - featuregate.WithRegisterDescription("controls whether the collector supports extended OpenTelemetry"+ - "configuration for internal telemetry")) diff --git a/vendor/go.opentelemetry.io/collector/internal/sharedcomponent/LICENSE b/vendor/go.opentelemetry.io/collector/internal/sharedcomponent/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/internal/sharedcomponent/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/internal/sharedcomponent/Makefile b/vendor/go.opentelemetry.io/collector/internal/sharedcomponent/Makefile new file mode 100644 index 00000000000..ded7a36092d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/internal/sharedcomponent/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/internal/sharedcomponent/sharedcomponent.go b/vendor/go.opentelemetry.io/collector/internal/sharedcomponent/sharedcomponent.go index 1a3e65878c2..d4b7bbdb80f 100644 --- a/vendor/go.opentelemetry.io/collector/internal/sharedcomponent/sharedcomponent.go +++ b/vendor/go.opentelemetry.io/collector/internal/sharedcomponent/sharedcomponent.go @@ -7,10 +7,12 @@ package sharedcomponent // import "go.opentelemetry.io/collector/internal/sharedcomponent" import ( + "container/ring" "context" "sync" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componentstatus" ) func NewMap[K comparable, V component.Component]() *Map[K, V] { @@ -27,21 +29,10 @@ type Map[K comparable, V component.Component] struct { // LoadOrStore returns the already created instance if exists, otherwise creates a new instance // and adds it to the map of references. -func (m *Map[K, V]) LoadOrStore(key K, create func() (V, error), telemetrySettings *component.TelemetrySettings) (*Component[V], error) { +func (m *Map[K, V]) LoadOrStore(key K, create func() (V, error)) (*Component[V], error) { m.lock.Lock() defer m.lock.Unlock() if c, ok := m.components[key]; ok { - // If we haven't already seen this telemetry settings, this shared component represents - // another instance. Wrap ReportStatus to report for all instances this shared - // component represents. - if _, ok := c.seenSettings[telemetrySettings]; !ok { - c.seenSettings[telemetrySettings] = struct{}{} - prev := c.telemetry.ReportStatus - c.telemetry.ReportStatus = func(ev *component.StatusEvent) { - telemetrySettings.ReportStatus(ev) - prev(ev) - } - } return c, nil } comp, err := create() @@ -56,10 +47,6 @@ func (m *Map[K, V]) LoadOrStore(key K, create func() (V, error), telemetrySettin defer m.lock.Unlock() delete(m.components, key) }, - telemetry: telemetrySettings, - seenSettings: map[*component.TelemetrySettings]struct{}{ - telemetrySettings: {}, - }, } m.components[key] = newComp return newComp, nil @@ -74,8 +61,7 @@ type Component[V component.Component] struct { stopOnce sync.Once removeFunc func() - telemetry *component.TelemetrySettings - seenSettings map[*component.TelemetrySettings]struct{} + hostWrapper *hostWrapper } // Unwrap returns the original component. @@ -85,18 +71,75 @@ func (c *Component[V]) Unwrap() V { // Start starts the underlying component if it never started before. func (c *Component[V]) Start(ctx context.Context, host component.Host) error { - var err error - c.startOnce.Do(func() { - // It's important that status for a shared component is reported through its - // telemetry settings to keep status in sync and avoid race conditions. This logic duplicates - // and takes priority over the automated status reporting that happens in graph, making the - // status reporting in graph a no-op. - c.telemetry.ReportStatus(component.NewStatusEvent(component.StatusStarting)) - if err = c.component.Start(ctx, host); err != nil { - c.telemetry.ReportStatus(component.NewPermanentErrorEvent(err)) + if c.hostWrapper == nil { + var err error + c.startOnce.Do(func() { + c.hostWrapper = &hostWrapper{ + host: host, + sources: make([]componentstatus.Reporter, 0), + previousEvents: ring.New(5), + } + statusReporter, isStatusReporter := host.(componentstatus.Reporter) + if isStatusReporter { + c.hostWrapper.addSource(statusReporter) + } + + // It's important that status for a shared component is reported through its + // telemetry settings to keep status in sync and avoid race conditions. This logic duplicates + // and takes priority over the automated status reporting that happens in graph, making the + // status reporting in graph a no-op. + c.hostWrapper.Report(componentstatus.NewEvent(componentstatus.StatusStarting)) + if err = c.component.Start(ctx, c.hostWrapper); err != nil { + c.hostWrapper.Report(componentstatus.NewPermanentErrorEvent(err)) + } + }) + return err + } + statusReporter, isStatusReporter := host.(componentstatus.Reporter) + if isStatusReporter { + c.hostWrapper.addSource(statusReporter) + } + return nil +} + +var ( + _ component.Host = (*hostWrapper)(nil) + _ componentstatus.Reporter = (*hostWrapper)(nil) +) + +type hostWrapper struct { + host component.Host + sources []componentstatus.Reporter + previousEvents *ring.Ring + lock sync.Mutex +} + +func (h *hostWrapper) GetExtensions() map[component.ID]component.Component { + return h.host.GetExtensions() +} + +func (h *hostWrapper) Report(e *componentstatus.Event) { + // Only remember an event if it will be emitted and it has not been sent already. + h.lock.Lock() + defer h.lock.Unlock() + if len(h.sources) > 0 { + h.previousEvents.Value = e + h.previousEvents = h.previousEvents.Next() + } + for _, s := range h.sources { + s.Report(e) + } +} + +func (h *hostWrapper) addSource(s componentstatus.Reporter) { + h.lock.Lock() + defer h.lock.Unlock() + h.previousEvents.Do(func(a any) { + if e, ok := a.(*componentstatus.Event); ok { + s.Report(e) } }) - return err + h.sources = append(h.sources, s) } // Shutdown shuts down the underlying component. @@ -107,12 +150,16 @@ func (c *Component[V]) Shutdown(ctx context.Context) error { // telemetry settings to keep status in sync and avoid race conditions. This logic duplicates // and takes priority over the automated status reporting that happens in graph, making the // status reporting in graph a no-op. - c.telemetry.ReportStatus(component.NewStatusEvent(component.StatusStopping)) + if c.hostWrapper != nil { + c.hostWrapper.Report(componentstatus.NewEvent(componentstatus.StatusStopping)) + } err = c.component.Shutdown(ctx) - if err != nil { - c.telemetry.ReportStatus(component.NewPermanentErrorEvent(err)) - } else { - c.telemetry.ReportStatus(component.NewStatusEvent(component.StatusStopped)) + if c.hostWrapper != nil { + if err != nil { + c.hostWrapper.Report(componentstatus.NewPermanentErrorEvent(err)) + } else { + c.hostWrapper.Report(componentstatus.NewEvent(componentstatus.StatusStopped)) + } } c.removeFunc() }) diff --git a/vendor/go.opentelemetry.io/collector/otelcol/buffered_core.go b/vendor/go.opentelemetry.io/collector/otelcol/buffered_core.go index 447bf13cb52..dd334a8454e 100644 --- a/vendor/go.opentelemetry.io/collector/otelcol/buffered_core.go +++ b/vendor/go.opentelemetry.io/collector/otelcol/buffered_core.go @@ -6,7 +6,7 @@ package otelcol // import "go.opentelemetry.io/collector/otelcol" import ( - "fmt" + "errors" "sync" "go.uber.org/zap/zapcore" @@ -55,7 +55,7 @@ func (bc *bufferedCore) Write(ent zapcore.Entry, fields []zapcore.Field) error { bc.mu.Lock() defer bc.mu.Unlock() if bc.logsTaken { - return fmt.Errorf("the buffered logs have already been taken so writing is no longer supported") + return errors.New("the buffered logs have already been taken so writing is no longer supported") } all := make([]zapcore.Field, 0, len(fields)+len(bc.context)) all = append(all, bc.context...) diff --git a/vendor/go.opentelemetry.io/collector/otelcol/collector.go b/vendor/go.opentelemetry.io/collector/otelcol/collector.go index 8f9839af6c8..dac8c860ec3 100644 --- a/vendor/go.opentelemetry.io/collector/otelcol/collector.go +++ b/vendor/go.opentelemetry.io/collector/otelcol/collector.go @@ -21,12 +21,8 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" - "go.opentelemetry.io/collector/connector" - "go.opentelemetry.io/collector/exporter" "go.opentelemetry.io/collector/extension" "go.opentelemetry.io/collector/otelcol/internal/grpclog" - "go.opentelemetry.io/collector/processor" - "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/service" ) @@ -99,7 +95,7 @@ type Collector struct { serviceConfig *service.Config service *service.Service - state *atomic.Int32 + state *atomic.Int64 // shutdownChan is used to terminate the collector. shutdownChan chan struct{} @@ -125,8 +121,8 @@ func NewCollector(set CollectorSettings) (*Collector, error) { return nil, err } - state := &atomic.Int32{} - state.Store(int32(StateStarting)) + state := new(atomic.Int64) + state.Store(int64(StateStarting)) return &Collector{ set: set, state: state, @@ -152,7 +148,7 @@ func (col *Collector) Shutdown() { state := col.GetState() if state == StateRunning || state == StateStarting { defer func() { - recover() // nolint:errcheck + _ = recover() }() close(col.shutdownChan) } @@ -163,17 +159,6 @@ func (col *Collector) Shutdown() { func (col *Collector) setupConfigurationComponents(ctx context.Context) error { col.setCollectorState(StateStarting) - var conf *confmap.Conf - - if cp, ok := col.configProvider.(ConfmapProvider); ok { - var err error - conf, err = cp.GetConfmap(ctx) - - if err != nil { - return fmt.Errorf("failed to resolve config: %w", err) - } - } - factories, err := col.set.Factories() if err != nil { return fmt.Errorf("failed to initialize factories: %w", err) @@ -189,14 +174,34 @@ func (col *Collector) setupConfigurationComponents(ctx context.Context) error { col.serviceConfig = &cfg.Service + conf := confmap.New() + + if err = conf.Marshal(cfg); err != nil { + return fmt.Errorf("could not marshal configuration: %w", err) + } + col.service, err = service.New(ctx, service.Settings{ - BuildInfo: col.set.BuildInfo, - CollectorConf: conf, - Receivers: receiver.NewBuilder(cfg.Receivers, factories.Receivers), - Processors: processor.NewBuilder(cfg.Processors, factories.Processors), - Exporters: exporter.NewBuilder(cfg.Exporters, factories.Exporters), - Connectors: connector.NewBuilder(cfg.Connectors, factories.Connectors), - Extensions: extension.NewBuilder(cfg.Extensions, factories.Extensions), + BuildInfo: col.set.BuildInfo, + CollectorConf: conf, + + ReceiversConfigs: cfg.Receivers, + ReceiversFactories: factories.Receivers, + ProcessorsConfigs: cfg.Processors, + ProcessorsFactories: factories.Processors, + ExportersConfigs: cfg.Exporters, + ExportersFactories: factories.Exporters, + ConnectorsConfigs: cfg.Connectors, + ConnectorsFactories: factories.Connectors, + ExtensionsConfigs: cfg.Extensions, + ExtensionsFactories: factories.Extensions, + + ModuleInfo: extension.ModuleInfo{ + Receiver: factories.ReceiverModules, + Processor: factories.ProcessorModules, + Exporter: factories.ExporterModules, + Extension: factories.ExtensionModules, + Connector: factories.ConnectorModules, + }, AsyncErrorChannel: col.asyncErrorChannel, LoggingOptions: col.set.LoggingOptions, }, cfg.Service) @@ -361,5 +366,5 @@ func (col *Collector) shutdown(ctx context.Context) error { // setCollectorState provides current state of the collector func (col *Collector) setCollectorState(state State) { - col.state.Store(int32(state)) + col.state.Store(int64(state)) } diff --git a/vendor/go.opentelemetry.io/collector/otelcol/collector_windows.go b/vendor/go.opentelemetry.io/collector/otelcol/collector_windows.go index 3df08386bbf..ce52a4647d4 100644 --- a/vendor/go.opentelemetry.io/collector/otelcol/collector_windows.go +++ b/vendor/go.opentelemetry.io/collector/otelcol/collector_windows.go @@ -99,10 +99,7 @@ func (s *windowsService) start(elog *eventlog.Log, colErrorChannel chan error) e // only read at the time of the Run method call. To work around this, we pass the // serviceConfig as a pointer to the logging options, and then read its value // when the zap.Logger is created by the telemetry. - s.col.set.LoggingOptions = append( - s.col.set.LoggingOptions, - zap.WrapCore(withWindowsCore(elog, &s.col.serviceConfig)), - ) + s.col.set.LoggingOptions = loggingOptionsWithEventLogCore(elog, &s.col.serviceConfig, s.col.set.LoggingOptions) // col.Run blocks until receiving a SIGTERM signal, so needs to be started // asynchronously, but it will exit early if an error occurs on startup @@ -141,6 +138,20 @@ func openEventLog(serviceName string) (*eventlog.Log, error) { return elog, nil } +func loggingOptionsWithEventLogCore( + elog *eventlog.Log, + serviceConfig **service.Config, + userOptions []zap.Option, +) []zap.Option { + return append( + // The order below must be preserved - see PR #11051 + // The event log core must run *after* any user provided options, so it + // must be the first option in this list. + []zap.Option{zap.WrapCore(withWindowsCore(elog, serviceConfig))}, + userOptions..., + ) +} + var _ zapcore.Core = (*windowsEventLogCore)(nil) type windowsEventLogCore struct { @@ -214,6 +225,7 @@ func withWindowsCore(elog *eventlog.Log, serviceConfig **service.Config) func(za // Use the Windows Event Log encoderConfig := zap.NewProductionEncoderConfig() encoderConfig.LineEnding = "\r\n" + encoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder return windowsEventLogCore{core, elog, zapcore.NewConsoleEncoder(encoderConfig)} } } diff --git a/vendor/go.opentelemetry.io/collector/otelcol/command.go b/vendor/go.opentelemetry.io/collector/otelcol/command.go index 648b3a5ffda..6efd16df562 100644 --- a/vendor/go.opentelemetry.io/collector/otelcol/command.go +++ b/vendor/go.opentelemetry.io/collector/otelcol/command.go @@ -16,6 +16,7 @@ import ( // Any URIs specified in CollectorSettings.ConfigProviderSettings.ResolverSettings.URIs // are considered defaults and will be overwritten by config flags passed as // command-line arguments to the executable. +// At least one Provider must be set. func NewCommand(set CollectorSettings) *cobra.Command { flagSet := flags(featuregate.GlobalRegistry()) rootCmd := &cobra.Command{ @@ -52,11 +53,13 @@ func updateSettingsUsingFlags(set *CollectorSettings, flags *flag.FlagSet) error if len(resolverSet.URIs) == 0 { return errors.New("at least one config flag must be provided") } - // Provide a default set of providers and converters if none have been specified. - // TODO: Remove this after CollectorSettings.ConfigProvider is removed and instead - // do it in the builder. - if len(resolverSet.ProviderFactories) == 0 && len(resolverSet.ConverterFactories) == 0 { - set.ConfigProviderSettings = newDefaultConfigProviderSettings(resolverSet.URIs) + + if set.ConfigProviderSettings.ResolverSettings.DefaultScheme == "" { + set.ConfigProviderSettings.ResolverSettings.DefaultScheme = "env" + } + + if len(resolverSet.ProviderFactories) == 0 { + return errors.New("at least one Provider must be supplied") } return nil } diff --git a/vendor/go.opentelemetry.io/collector/otelcol/command_components.go b/vendor/go.opentelemetry.io/collector/otelcol/command_components.go index da13d6779a7..7b3f8e1a8b7 100644 --- a/vendor/go.opentelemetry.io/collector/otelcol/command_components.go +++ b/vendor/go.opentelemetry.io/collector/otelcol/command_components.go @@ -20,6 +20,7 @@ import ( type componentWithStability struct { Name component.Type + Module string Stability map[string]string } @@ -40,7 +41,6 @@ func newComponentsCommand(set CollectorSettings) *cobra.Command { Long: "Outputs available components in this collector distribution including their stability levels. The output format is not stable and can change between releases.", Args: cobra.ExactArgs(0), RunE: func(cmd *cobra.Command, _ []string) error { - factories, err := set.Factories() if err != nil { return fmt.Errorf("failed to initialize factories: %w", err) @@ -49,7 +49,8 @@ func newComponentsCommand(set CollectorSettings) *cobra.Command { components := componentsOutput{} for _, con := range sortFactoriesByType[connector.Factory](factories.Connectors) { components.Connectors = append(components.Connectors, componentWithStability{ - Name: con.Type(), + Name: con.Type(), + Module: factories.ConnectorModules[con.Type()], Stability: map[string]string{ "logs-to-logs": con.LogsToLogsStability().String(), "logs-to-metrics": con.LogsToMetricsStability().String(), @@ -67,39 +68,43 @@ func newComponentsCommand(set CollectorSettings) *cobra.Command { } for _, ext := range sortFactoriesByType[extension.Factory](factories.Extensions) { components.Extensions = append(components.Extensions, componentWithStability{ - Name: ext.Type(), + Name: ext.Type(), + Module: factories.ExtensionModules[ext.Type()], Stability: map[string]string{ - "extension": ext.ExtensionStability().String(), + "extension": ext.Stability().String(), }, }) } for _, prs := range sortFactoriesByType[processor.Factory](factories.Processors) { components.Processors = append(components.Processors, componentWithStability{ - Name: prs.Type(), + Name: prs.Type(), + Module: factories.ProcessorModules[prs.Type()], Stability: map[string]string{ - "logs": prs.LogsProcessorStability().String(), - "metrics": prs.MetricsProcessorStability().String(), - "traces": prs.TracesProcessorStability().String(), + "logs": prs.LogsStability().String(), + "metrics": prs.MetricsStability().String(), + "traces": prs.TracesStability().String(), }, }) } for _, rcv := range sortFactoriesByType[receiver.Factory](factories.Receivers) { components.Receivers = append(components.Receivers, componentWithStability{ - Name: rcv.Type(), + Name: rcv.Type(), + Module: factories.ReceiverModules[rcv.Type()], Stability: map[string]string{ - "logs": rcv.LogsReceiverStability().String(), - "metrics": rcv.MetricsReceiverStability().String(), - "traces": rcv.TracesReceiverStability().String(), + "logs": rcv.LogsStability().String(), + "metrics": rcv.MetricsStability().String(), + "traces": rcv.TracesStability().String(), }, }) } for _, exp := range sortFactoriesByType[exporter.Factory](factories.Exporters) { components.Exporters = append(components.Exporters, componentWithStability{ - Name: exp.Type(), + Name: exp.Type(), + Module: factories.ExporterModules[exp.Type()], Stability: map[string]string{ - "logs": exp.LogsExporterStability().String(), - "metrics": exp.MetricsExporterStability().String(), - "traces": exp.TracesExporterStability().String(), + "logs": exp.LogsStability().String(), + "metrics": exp.MetricsStability().String(), + "traces": exp.TracesStability().String(), }, }) } diff --git a/vendor/go.opentelemetry.io/collector/otelcol/config.go b/vendor/go.opentelemetry.io/collector/otelcol/config.go index e622617721b..33975957282 100644 --- a/vendor/go.opentelemetry.io/collector/otelcol/config.go +++ b/vendor/go.opentelemetry.io/collector/otelcol/config.go @@ -130,14 +130,14 @@ func (cfg *Config) Validate() error { if _, ok := cfg.Connectors[ref]; ok { continue } - return fmt.Errorf("service::pipelines::%s: references receiver %q which is not configured", pipelineID, ref) + return fmt.Errorf("service::pipelines::%s: references receiver %q which is not configured", pipelineID.String(), ref) } // Validate pipeline processor name references. for _, ref := range pipeline.Processors { // Check that the name referenced in the pipeline's processors exists in the top-level processors. if cfg.Processors[ref] == nil { - return fmt.Errorf("service::pipelines::%s: references processor %q which is not configured", pipelineID, ref) + return fmt.Errorf("service::pipelines::%s: references processor %q which is not configured", pipelineID.String(), ref) } } @@ -150,7 +150,7 @@ func (cfg *Config) Validate() error { if _, ok := cfg.Connectors[ref]; ok { continue } - return fmt.Errorf("service::pipelines::%s: references exporter %q which is not configured", pipelineID, ref) + return fmt.Errorf("service::pipelines::%s: references exporter %q which is not configured", pipelineID.String(), ref) } } return nil diff --git a/vendor/go.opentelemetry.io/collector/otelcol/configprovider.go b/vendor/go.opentelemetry.io/collector/otelcol/configprovider.go index 6360b536509..440ffad5254 100644 --- a/vendor/go.opentelemetry.io/collector/otelcol/configprovider.go +++ b/vendor/go.opentelemetry.io/collector/otelcol/configprovider.go @@ -8,12 +8,6 @@ import ( "fmt" "go.opentelemetry.io/collector/confmap" - "go.opentelemetry.io/collector/confmap/converter/expandconverter" - "go.opentelemetry.io/collector/confmap/provider/envprovider" - "go.opentelemetry.io/collector/confmap/provider/fileprovider" - "go.opentelemetry.io/collector/confmap/provider/httpprovider" - "go.opentelemetry.io/collector/confmap/provider/httpsprovider" - "go.opentelemetry.io/collector/confmap/provider/yamlprovider" ) // ConfigProvider provides the service configuration. @@ -50,25 +44,11 @@ type ConfigProvider interface { Shutdown(ctx context.Context) error } -// ConfmapProvider is an optional interface to be implemented by ConfigProviders -// to provide confmap.Conf objects representing a marshaled version of the -// Collector's configuration. -// -// The purpose of this interface is that otelcol.ConfigProvider structs do not -// necessarily need to use confmap.Conf as their underlying config structure. -type ConfmapProvider interface { - // GetConfmap resolves the Collector's configuration and provides it as a confmap.Conf object. - // - // Should never be called concurrently with itself or any ConfigProvider method. - GetConfmap(ctx context.Context) (*confmap.Conf, error) -} - type configProvider struct { mapResolver *confmap.Resolver } var _ ConfigProvider = (*configProvider)(nil) -var _ ConfmapProvider = (*configProvider)(nil) // ConfigProviderSettings are the settings to configure the behavior of the ConfigProvider. type ConfigProviderSettings struct { @@ -121,28 +101,3 @@ func (cm *configProvider) Watch() <-chan error { func (cm *configProvider) Shutdown(ctx context.Context) error { return cm.mapResolver.Shutdown(ctx) } - -func (cm *configProvider) GetConfmap(ctx context.Context) (*confmap.Conf, error) { - conf, err := cm.mapResolver.Resolve(ctx) - if err != nil { - return nil, fmt.Errorf("cannot resolve the configuration: %w", err) - } - - return conf, nil -} - -func newDefaultConfigProviderSettings(uris []string) ConfigProviderSettings { - return ConfigProviderSettings{ - ResolverSettings: confmap.ResolverSettings{ - URIs: uris, - ProviderFactories: []confmap.ProviderFactory{ - fileprovider.NewFactory(), - envprovider.NewFactory(), - yamlprovider.NewFactory(), - httpprovider.NewFactory(), - httpsprovider.NewFactory(), - }, - ConverterFactories: []confmap.ConverterFactory{expandconverter.NewFactory()}, - }, - } -} diff --git a/vendor/go.opentelemetry.io/collector/otelcol/factories.go b/vendor/go.opentelemetry.io/collector/otelcol/factories.go index a61bd05060b..7528fa00666 100644 --- a/vendor/go.opentelemetry.io/collector/otelcol/factories.go +++ b/vendor/go.opentelemetry.io/collector/otelcol/factories.go @@ -29,4 +29,19 @@ type Factories struct { // Connectors maps connector type names in the config to the respective factory. Connectors map[component.Type]connector.Factory + + // ReceiverModules maps receiver types to their respective go modules. + ReceiverModules map[component.Type]string + + // ProcessorModules maps processor types to their respective go modules. + ProcessorModules map[component.Type]string + + // ExporterModules maps exporter types to their respective go modules. + ExporterModules map[component.Type]string + + // ExtensionModules maps extension types to their respective go modules. + ExtensionModules map[component.Type]string + + // ConnectorModules maps connector types to their respective go modules. + ConnectorModules map[component.Type]string } diff --git a/vendor/go.opentelemetry.io/collector/otelcol/internal/configunmarshaler/configs.go b/vendor/go.opentelemetry.io/collector/otelcol/internal/configunmarshaler/configs.go index 7f864fdfd32..e60ab5d29b4 100644 --- a/vendor/go.opentelemetry.io/collector/otelcol/internal/configunmarshaler/configs.go +++ b/vendor/go.opentelemetry.io/collector/otelcol/internal/configunmarshaler/configs.go @@ -4,6 +4,7 @@ package configunmarshaler // import "go.opentelemetry.io/collector/otelcol/internal/configunmarshaler" import ( + "errors" "fmt" "golang.org/x/exp/maps" @@ -31,19 +32,25 @@ func (c *Configs[F]) Unmarshal(conf *confmap.Conf) error { // Prepare resulting map. c.cfgs = make(map[component.ID]component.Config) // Iterate over raw configs and create a config for each. - for id, value := range rawCfgs { + for id := range rawCfgs { // Find factory based on component kind and type that we read from config source. factory, ok := c.factories[id.Type()] if !ok { return errorUnknownType(id, maps.Keys(c.factories)) } + // Get the configuration from the confmap.Conf to preserve internal representation. + sub, err := conf.Sub(id.String()) + if err != nil { + return errorUnmarshalError(id, err) + } + // Create the default config for this component. cfg := factory.CreateDefaultConfig() // Now that the default config struct is created we can Unmarshal into it, // and it will apply user-defined config on top of the default. - if err := confmap.NewFromStringMap(value).Unmarshal(&cfg); err != nil { + if err := sub.Unmarshal(&cfg); err != nil { return errorUnmarshalError(id, err) } @@ -58,6 +65,9 @@ func (c *Configs[F]) Configs() map[component.ID]component.Config { } func errorUnknownType(id component.ID, factories []component.Type) error { + if id.Type().String() == "logging" { + return errors.New("the logging exporter has been deprecated, use the debug exporter instead") + } return fmt.Errorf("unknown type: %q for id: %q (valid values: %v)", id.Type(), id, factories) } diff --git a/vendor/go.opentelemetry.io/collector/otelcol/internal/grpclog/logger.go b/vendor/go.opentelemetry.io/collector/otelcol/internal/grpclog/logger.go index 39b56f74890..8f48fe7207d 100644 --- a/vendor/go.opentelemetry.io/collector/otelcol/internal/grpclog/logger.go +++ b/vendor/go.opentelemetry.io/collector/otelcol/internal/grpclog/logger.go @@ -27,7 +27,7 @@ func SetLogger(baseLogger *zap.Logger, loglevel zapcore.Level) *zapgrpc.Logger { c = core } return c.With([]zapcore.Field{zap.Bool("grpc_log", true)}) - }))) + }), zap.AddCallerSkip(5))) grpclog.SetLoggerV2(logger) return logger diff --git a/vendor/go.opentelemetry.io/collector/otelcol/unmarshaler.go b/vendor/go.opentelemetry.io/collector/otelcol/unmarshaler.go index 4967b0a731d..8f2b8e583a1 100644 --- a/vendor/go.opentelemetry.io/collector/otelcol/unmarshaler.go +++ b/vendor/go.opentelemetry.io/collector/otelcol/unmarshaler.go @@ -27,7 +27,6 @@ type configSettings struct { // unmarshal the configSettings from a confmap.Conf. // After the config is unmarshalled, `Validate()` must be called to validate. func unmarshal(v *confmap.Conf, factories Factories) (*configSettings, error) { - telFactory := telemetry.NewFactory() defaultTelConfig := *telFactory.CreateDefaultConfig().(*telemetry.Config) diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/profileid.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/profileid.go new file mode 100644 index 00000000000..5b4e6f53ceb --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/data/profileid.go @@ -0,0 +1,79 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package data // import "go.opentelemetry.io/collector/pdata/internal/data" + +import ( + "errors" + + "github.com/gogo/protobuf/proto" +) + +const profileIDSize = 16 + +var ( + errMarshalProfileID = errors.New("marshal: invalid buffer length for ProfileID") + errUnmarshalProfileID = errors.New("unmarshal: invalid ProfileID length") +) + +// ProfileID is a custom data type that is used for all profile_id fields in OTLP +// Protobuf messages. +type ProfileID [profileIDSize]byte + +var _ proto.Sizer = (*SpanID)(nil) + +// Size returns the size of the data to serialize. +func (tid ProfileID) Size() int { + if tid.IsEmpty() { + return 0 + } + return profileIDSize +} + +// IsEmpty returns true if id contains at leas one non-zero byte. +func (tid ProfileID) IsEmpty() bool { + return tid == [profileIDSize]byte{} +} + +// MarshalTo converts profile ID into a binary representation. Called by Protobuf serialization. +func (tid ProfileID) MarshalTo(data []byte) (n int, err error) { + if tid.IsEmpty() { + return 0, nil + } + + if len(data) < profileIDSize { + return 0, errMarshalProfileID + } + + return copy(data, tid[:]), nil +} + +// Unmarshal inflates this profile ID from binary representation. Called by Protobuf serialization. +func (tid *ProfileID) Unmarshal(data []byte) error { + if len(data) == 0 { + *tid = [profileIDSize]byte{} + return nil + } + + if len(data) != profileIDSize { + return errUnmarshalProfileID + } + + copy(tid[:], data) + return nil +} + +// MarshalJSON converts profile id into a hex string enclosed in quotes. +func (tid ProfileID) MarshalJSON() ([]byte, error) { + if tid.IsEmpty() { + return []byte(`""`), nil + } + return marshalJSON(tid[:]) +} + +// UnmarshalJSON inflates profile id from hex string, possibly enclosed in quotes. +// Called by Protobuf JSON deserialization. +func (tid *ProfileID) UnmarshalJSON(data []byte) error { + *tid = [profileIDSize]byte{} + return unmarshalJSON(tid[:], data) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1experimental/profiles_service.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development/profiles_service.pb.go similarity index 84% rename from vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1experimental/profiles_service.pb.go rename to vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development/profiles_service.pb.go index f998cd3f8c5..9e0475e6875 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1experimental/profiles_service.pb.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development/profiles_service.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: opentelemetry/proto/collector/profiles/v1experimental/profiles_service.proto +// source: opentelemetry/proto/collector/profiles/v1development/profiles_service.proto -package v1experimental +package v1development import ( context "context" @@ -16,7 +16,7 @@ import ( codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" - v1experimental "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1experimental" + v1development "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" ) // Reference imports to suppress errors if they are not otherwise used. @@ -36,14 +36,14 @@ type ExportProfilesServiceRequest struct { // element. Intermediary nodes (such as OpenTelemetry Collector) that receive // data from multiple origins typically batch the data before forwarding further and // in that case this array will contain multiple elements. - ResourceProfiles []*v1experimental.ResourceProfiles `protobuf:"bytes,1,rep,name=resource_profiles,json=resourceProfiles,proto3" json:"resource_profiles,omitempty"` + ResourceProfiles []*v1development.ResourceProfiles `protobuf:"bytes,1,rep,name=resource_profiles,json=resourceProfiles,proto3" json:"resource_profiles,omitempty"` } func (m *ExportProfilesServiceRequest) Reset() { *m = ExportProfilesServiceRequest{} } func (m *ExportProfilesServiceRequest) String() string { return proto.CompactTextString(m) } func (*ExportProfilesServiceRequest) ProtoMessage() {} func (*ExportProfilesServiceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_3d903b74e05b443d, []int{0} + return fileDescriptor_ad3943ce836e7720, []int{0} } func (m *ExportProfilesServiceRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -72,7 +72,7 @@ func (m *ExportProfilesServiceRequest) XXX_DiscardUnknown() { var xxx_messageInfo_ExportProfilesServiceRequest proto.InternalMessageInfo -func (m *ExportProfilesServiceRequest) GetResourceProfiles() []*v1experimental.ResourceProfiles { +func (m *ExportProfilesServiceRequest) GetResourceProfiles() []*v1development.ResourceProfiles { if m != nil { return m.ResourceProfiles } @@ -102,7 +102,7 @@ func (m *ExportProfilesServiceResponse) Reset() { *m = ExportProfilesSer func (m *ExportProfilesServiceResponse) String() string { return proto.CompactTextString(m) } func (*ExportProfilesServiceResponse) ProtoMessage() {} func (*ExportProfilesServiceResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_3d903b74e05b443d, []int{1} + return fileDescriptor_ad3943ce836e7720, []int{1} } func (m *ExportProfilesServiceResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -158,7 +158,7 @@ func (m *ExportProfilesPartialSuccess) Reset() { *m = ExportProfilesPart func (m *ExportProfilesPartialSuccess) String() string { return proto.CompactTextString(m) } func (*ExportProfilesPartialSuccess) ProtoMessage() {} func (*ExportProfilesPartialSuccess) Descriptor() ([]byte, []int) { - return fileDescriptor_3d903b74e05b443d, []int{2} + return fileDescriptor_ad3943ce836e7720, []int{2} } func (m *ExportProfilesPartialSuccess) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -202,45 +202,45 @@ func (m *ExportProfilesPartialSuccess) GetErrorMessage() string { } func init() { - proto.RegisterType((*ExportProfilesServiceRequest)(nil), "opentelemetry.proto.collector.profiles.v1experimental.ExportProfilesServiceRequest") - proto.RegisterType((*ExportProfilesServiceResponse)(nil), "opentelemetry.proto.collector.profiles.v1experimental.ExportProfilesServiceResponse") - proto.RegisterType((*ExportProfilesPartialSuccess)(nil), "opentelemetry.proto.collector.profiles.v1experimental.ExportProfilesPartialSuccess") + proto.RegisterType((*ExportProfilesServiceRequest)(nil), "opentelemetry.proto.collector.profiles.v1development.ExportProfilesServiceRequest") + proto.RegisterType((*ExportProfilesServiceResponse)(nil), "opentelemetry.proto.collector.profiles.v1development.ExportProfilesServiceResponse") + proto.RegisterType((*ExportProfilesPartialSuccess)(nil), "opentelemetry.proto.collector.profiles.v1development.ExportProfilesPartialSuccess") } func init() { - proto.RegisterFile("opentelemetry/proto/collector/profiles/v1experimental/profiles_service.proto", fileDescriptor_3d903b74e05b443d) -} - -var fileDescriptor_3d903b74e05b443d = []byte{ - // 437 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x53, 0x3f, 0xcb, 0xd3, 0x40, - 0x18, 0xcf, 0xf5, 0x95, 0x17, 0xbc, 0xaa, 0xd5, 0xd0, 0xa1, 0x14, 0x8d, 0x25, 0x2e, 0x01, 0xe1, - 0x42, 0x2b, 0x05, 0x11, 0x5c, 0x2a, 0xdd, 0x14, 0x43, 0x5a, 0x1c, 0x44, 0x08, 0x31, 0x7d, 0x0c, - 0x29, 0x69, 0xee, 0xbc, 0xbb, 0x96, 0xba, 0x89, 0xa3, 0x93, 0x1f, 0xc2, 0xc9, 0xdd, 0xef, 0x50, - 0xb7, 0x8e, 0x4e, 0x22, 0xed, 0x17, 0x79, 0x49, 0xae, 0x09, 0x4d, 0x68, 0x29, 0x94, 0x6e, 0x77, - 0xbf, 0xe3, 0xf7, 0xe7, 0xf9, 0x1d, 0x0f, 0x7e, 0x4d, 0x19, 0x24, 0x12, 0x62, 0x98, 0x81, 0xe4, - 0x5f, 0x6c, 0xc6, 0xa9, 0xa4, 0x76, 0x40, 0xe3, 0x18, 0x02, 0x49, 0x79, 0x7a, 0xff, 0x14, 0xc5, - 0x20, 0xec, 0x45, 0x17, 0x96, 0x0c, 0x78, 0x34, 0x83, 0x44, 0xfa, 0x71, 0x81, 0x7b, 0x02, 0xf8, - 0x22, 0x0a, 0x80, 0x64, 0x44, 0xbd, 0x5f, 0x52, 0x53, 0x20, 0x29, 0xd4, 0x48, 0xce, 0x22, 0x65, - 0xb5, 0x76, 0x33, 0xa4, 0x21, 0x55, 0xd6, 0xe9, 0x49, 0xf1, 0xda, 0x2f, 0x0e, 0x45, 0x3b, 0x15, - 0x48, 0x71, 0xcd, 0xef, 0x08, 0x3f, 0x1c, 0x2e, 0x19, 0xe5, 0xd2, 0xd9, 0x3d, 0x8c, 0x54, 0x50, - 0x17, 0x3e, 0xcf, 0x41, 0x48, 0x7d, 0x8a, 0x1f, 0x70, 0x10, 0x74, 0xce, 0x03, 0xf0, 0x72, 0x6e, - 0x0b, 0x75, 0xae, 0xac, 0x7a, 0xef, 0x25, 0x39, 0x34, 0xc5, 0x91, 0xec, 0xc4, 0xdd, 0xa9, 0xe4, - 0x3e, 0xee, 0x7d, 0x5e, 0x41, 0xcc, 0x9f, 0x08, 0x3f, 0x3a, 0x12, 0x46, 0x30, 0x9a, 0x08, 0xd0, - 0xbf, 0x21, 0xdc, 0x60, 0x3e, 0x97, 0x91, 0x1f, 0x7b, 0x62, 0x1e, 0x04, 0x20, 0xd2, 0x30, 0xc8, - 0xaa, 0xf7, 0x46, 0xe4, 0xac, 0x4a, 0x49, 0xd9, 0xcf, 0x51, 0xda, 0x23, 0x25, 0x3d, 0xb8, 0xb5, - 0xfa, 0xf7, 0x58, 0x73, 0xef, 0xb1, 0x12, 0x6a, 0xb2, 0x6a, 0x65, 0x65, 0x96, 0xfe, 0x34, 0xad, - 0x6c, 0x0a, 0x81, 0x84, 0xc9, 0x7e, 0x65, 0xc8, 0xba, 0x4a, 0x67, 0x56, 0x0f, 0x39, 0x55, 0x7f, - 0x82, 0xef, 0x02, 0xe7, 0x94, 0x7b, 0x33, 0x10, 0xc2, 0x0f, 0xa1, 0x55, 0xeb, 0x20, 0xeb, 0xb6, - 0x7b, 0x27, 0x03, 0xdf, 0x28, 0xac, 0xf7, 0x07, 0xe1, 0x46, 0xa5, 0x12, 0xfd, 0x37, 0xc2, 0xd7, - 0x2a, 0x86, 0x7e, 0x99, 0xd9, 0xcb, 0x1f, 0xdf, 0x1e, 0x5f, 0x56, 0x54, 0x7d, 0xa0, 0xa9, 0x0d, - 0xbe, 0xd6, 0x56, 0x1b, 0x03, 0xad, 0x37, 0x06, 0xfa, 0xbf, 0x31, 0xd0, 0x8f, 0xad, 0xa1, 0xad, - 0xb7, 0x86, 0xf6, 0x77, 0x6b, 0x68, 0xf8, 0x79, 0x44, 0xcf, 0x33, 0x1d, 0x34, 0x2b, 0x7e, 0x4e, - 0xca, 0x73, 0xd0, 0xfb, 0x0f, 0x61, 0x55, 0x31, 0x2a, 0x6d, 0xed, 0xc4, 0x97, 0xbe, 0x1d, 0x25, - 0x12, 0x78, 0xe2, 0xc7, 0x76, 0x76, 0xcb, 0x2c, 0x43, 0x48, 0x4e, 0x2f, 0xf7, 0xaf, 0x5a, 0xff, - 0x2d, 0x83, 0x64, 0x5c, 0x68, 0x67, 0xae, 0xe4, 0x55, 0x91, 0x36, 0x0f, 0x45, 0xde, 0x75, 0x87, - 0x7b, 0xbc, 0x8f, 0xd7, 0x99, 0xc7, 0xb3, 0x9b, 0x00, 0x00, 0x00, 0xff, 0xff, 0x8e, 0x3d, 0x57, - 0xb0, 0x54, 0x04, 0x00, 0x00, + proto.RegisterFile("opentelemetry/proto/collector/profiles/v1development/profiles_service.proto", fileDescriptor_ad3943ce836e7720) +} + +var fileDescriptor_ad3943ce836e7720 = []byte{ + // 438 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x53, 0x4d, 0x8b, 0xd3, 0x40, + 0x18, 0xce, 0xb4, 0x52, 0x70, 0xaa, 0x56, 0x43, 0x0f, 0xa5, 0x68, 0x2c, 0xf1, 0x12, 0x10, 0x26, + 0xb4, 0x16, 0x44, 0xf0, 0x54, 0xf5, 0x24, 0x62, 0x48, 0xc5, 0x83, 0x1e, 0x42, 0x4c, 0x5f, 0x43, + 0x24, 0xcd, 0x8c, 0x33, 0xd3, 0xa2, 0x47, 0x8f, 0xde, 0xf6, 0x3f, 0xec, 0x6d, 0xaf, 0xfb, 0x23, + 0xb6, 0xc7, 0x1e, 0xf7, 0xb4, 0x2c, 0xed, 0xef, 0x58, 0x58, 0x92, 0x69, 0xb2, 0x9b, 0xd0, 0xa5, + 0x50, 0x7a, 0x9b, 0x79, 0x86, 0xe7, 0xe3, 0x7d, 0x86, 0x17, 0x7f, 0xa4, 0x0c, 0x12, 0x09, 0x31, + 0x4c, 0x41, 0xf2, 0xbf, 0x36, 0xe3, 0x54, 0x52, 0x3b, 0xa0, 0x71, 0x0c, 0x81, 0xa4, 0x3c, 0xbd, + 0xff, 0x8c, 0x62, 0x10, 0xf6, 0xbc, 0x3f, 0x81, 0x39, 0xc4, 0x94, 0x4d, 0x21, 0x91, 0x05, 0xec, + 0x09, 0xe0, 0xf3, 0x28, 0x00, 0x92, 0xf1, 0xf4, 0x61, 0x49, 0x4c, 0x81, 0xa4, 0x10, 0x23, 0x39, + 0x8b, 0x94, 0xc4, 0xba, 0xed, 0x90, 0x86, 0x54, 0x19, 0xa7, 0x27, 0x45, 0xeb, 0xbe, 0xd9, 0x16, + 0x6c, 0x47, 0x1c, 0x45, 0x35, 0xff, 0x23, 0xfc, 0xf4, 0xc3, 0x1f, 0x46, 0xb9, 0x74, 0x36, 0x0f, + 0x63, 0x15, 0xd3, 0x85, 0xdf, 0x33, 0x10, 0x52, 0x8f, 0xf0, 0x13, 0x0e, 0x82, 0xce, 0x78, 0x00, + 0x5e, 0xce, 0xed, 0xa0, 0x5e, 0xdd, 0x6a, 0x0e, 0xde, 0x92, 0x6d, 0x33, 0x6c, 0x4f, 0x4e, 0xdc, + 0x8d, 0x48, 0x6e, 0xe3, 0x3e, 0xe6, 0x15, 0xc4, 0x3c, 0x46, 0xf8, 0xd9, 0x1d, 0x59, 0x04, 0xa3, + 0x89, 0x00, 0xfd, 0x1f, 0xc2, 0x2d, 0xe6, 0x73, 0x19, 0xf9, 0xb1, 0x27, 0x66, 0x41, 0x00, 0x22, + 0xcd, 0x82, 0xac, 0xe6, 0xc0, 0x25, 0xfb, 0xf4, 0x49, 0xca, 0x76, 0x8e, 0x92, 0x1e, 0x2b, 0xe5, + 0xd1, 0xbd, 0xc5, 0xc5, 0x73, 0xcd, 0x7d, 0xc4, 0x4a, 0xa8, 0xc9, 0xaa, 0x85, 0x95, 0x59, 0xfa, + 0xcb, 0xb4, 0xb0, 0x5f, 0x10, 0x48, 0x98, 0xdc, 0x2e, 0x0c, 0x59, 0xf5, 0x74, 0x64, 0xf5, 0x90, + 0x53, 0xf5, 0x17, 0xf8, 0x21, 0x70, 0x4e, 0xb9, 0x37, 0x05, 0x21, 0xfc, 0x10, 0x3a, 0xb5, 0x1e, + 0xb2, 0xee, 0xbb, 0x0f, 0x32, 0xf0, 0x93, 0xc2, 0x06, 0x67, 0x08, 0xb7, 0x2a, 0x8d, 0xe8, 0xa7, + 0x08, 0x37, 0x54, 0x0c, 0xfd, 0x20, 0xa3, 0x97, 0x7f, 0xbd, 0x3b, 0x3e, 0xa8, 0xa6, 0xfa, 0x3d, + 0x53, 0x1b, 0x5d, 0xa1, 0xc5, 0xca, 0x40, 0xcb, 0x95, 0x81, 0x2e, 0x57, 0x06, 0x3a, 0x5a, 0x1b, + 0xda, 0x72, 0x6d, 0x68, 0xe7, 0x6b, 0x43, 0xc3, 0xaf, 0x23, 0xba, 0x97, 0xe7, 0xa8, 0x5d, 0xb1, + 0x73, 0x52, 0x9a, 0x83, 0xbe, 0x7d, 0x0f, 0xab, 0x82, 0x51, 0x69, 0x5b, 0x27, 0xbe, 0xf4, 0xed, + 0x28, 0x91, 0xc0, 0x13, 0x3f, 0xb6, 0xb3, 0x5b, 0xe6, 0x18, 0x42, 0xb2, 0x73, 0xa9, 0x4f, 0x6a, + 0xc3, 0xcf, 0x0c, 0x92, 0x2f, 0x85, 0x74, 0x66, 0x4a, 0xde, 0x15, 0x59, 0xf3, 0x4c, 0xe4, 0x6b, + 0xff, 0xfd, 0x0d, 0xed, 0x47, 0x23, 0x73, 0x78, 0x75, 0x1d, 0x00, 0x00, 0xff, 0xff, 0x8f, 0x35, + 0x8c, 0xea, 0x4a, 0x04, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -270,7 +270,7 @@ func NewProfilesServiceClient(cc *grpc.ClientConn) ProfilesServiceClient { func (c *profilesServiceClient) Export(ctx context.Context, in *ExportProfilesServiceRequest, opts ...grpc.CallOption) (*ExportProfilesServiceResponse, error) { out := new(ExportProfilesServiceResponse) - err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.profiles.v1experimental.ProfilesService/Export", in, out, opts...) + err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.profiles.v1development.ProfilesService/Export", in, out, opts...) if err != nil { return nil, err } @@ -306,7 +306,7 @@ func _ProfilesService_Export_Handler(srv interface{}, ctx context.Context, dec f } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/opentelemetry.proto.collector.profiles.v1experimental.ProfilesService/Export", + FullMethod: "/opentelemetry.proto.collector.profiles.v1development.ProfilesService/Export", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ProfilesServiceServer).Export(ctx, req.(*ExportProfilesServiceRequest)) @@ -315,7 +315,7 @@ func _ProfilesService_Export_Handler(srv interface{}, ctx context.Context, dec f } var _ProfilesService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "opentelemetry.proto.collector.profiles.v1experimental.ProfilesService", + ServiceName: "opentelemetry.proto.collector.profiles.v1development.ProfilesService", HandlerType: (*ProfilesServiceServer)(nil), Methods: []grpc.MethodDesc{ { @@ -324,7 +324,7 @@ var _ProfilesService_serviceDesc = grpc.ServiceDesc{ }, }, Streams: []grpc.StreamDesc{}, - Metadata: "opentelemetry/proto/collector/profiles/v1experimental/profiles_service.proto", + Metadata: "opentelemetry/proto/collector/profiles/v1development/profiles_service.proto", } func (m *ExportProfilesServiceRequest) Marshal() (dAtA []byte, err error) { @@ -549,7 +549,7 @@ func (m *ExportProfilesServiceRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ResourceProfiles = append(m.ResourceProfiles, &v1experimental.ResourceProfiles{}) + m.ResourceProfiles = append(m.ResourceProfiles, &v1development.ResourceProfiles{}) if err := m.ResourceProfiles[len(m.ResourceProfiles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1/logs.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1/logs.pb.go index 65b11fbe286..1fb0f2b6857 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1/logs.pb.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1/logs.pb.go @@ -228,7 +228,8 @@ type ResourceLogs struct { // A list of ScopeLogs that originate from a resource. ScopeLogs []*ScopeLogs `protobuf:"bytes,2,rep,name=scope_logs,json=scopeLogs,proto3" json:"scope_logs,omitempty"` // The Schema URL, if known. This is the identifier of the Schema that the resource data - // is recorded in. To learn more about Schema URL see + // is recorded in. Notably, the last part of the URL path is the version number of the + // schema: http[s]://server[:port]/path/. To learn more about Schema URL see // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url // This schema_url applies to the data in the "resource" field. It does not apply // to the data in the "scope_logs" field which have their own schema_url field. @@ -305,7 +306,8 @@ type ScopeLogs struct { // A list of log records. LogRecords []*LogRecord `protobuf:"bytes,2,rep,name=log_records,json=logRecords,proto3" json:"log_records,omitempty"` // The Schema URL, if known. This is the identifier of the Schema that the log data - // is recorded in. To learn more about Schema URL see + // is recorded in. Notably, the last part of the URL path is the version number of the + // schema: http[s]://server[:port]/path/. To learn more about Schema URL see // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url // This schema_url applies to all logs in the "logs" field. SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` @@ -434,6 +436,19 @@ type LogRecord struct { // - the field is not present, // - the field contains an invalid value. SpanId go_opentelemetry_io_collector_pdata_internal_data.SpanID `protobuf:"bytes,10,opt,name=span_id,json=spanId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.SpanID" json:"span_id"` + // A unique identifier of event category/type. + // All events with the same event_name are expected to conform to the same + // schema for both their attributes and their body. + // + // Recommended to be fully qualified and short (no longer than 256 characters). + // + // Presence of event_name on the log record identifies this record + // as an event. + // + // [Optional]. + // + // Status: [Development] + EventName string `protobuf:"bytes,12,opt,name=event_name,json=eventName,proto3" json:"event_name,omitempty"` } func (m *LogRecord) Reset() { *m = LogRecord{} } @@ -525,6 +540,13 @@ func (m *LogRecord) GetFlags() uint32 { return 0 } +func (m *LogRecord) GetEventName() string { + if m != nil { + return m.EventName + } + return "" +} + func init() { proto.RegisterEnum("opentelemetry.proto.logs.v1.SeverityNumber", SeverityNumber_name, SeverityNumber_value) proto.RegisterEnum("opentelemetry.proto.logs.v1.LogRecordFlags", LogRecordFlags_name, LogRecordFlags_value) @@ -539,67 +561,68 @@ func init() { } var fileDescriptor_d1c030a3ec7e961e = []byte{ - // 951 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x96, 0xcf, 0x6e, 0xea, 0x46, - 0x14, 0xc6, 0x31, 0xe1, 0xef, 0x84, 0x70, 0xa7, 0x73, 0x49, 0xae, 0x9b, 0xa8, 0x84, 0xa6, 0x55, - 0x4a, 0x53, 0x09, 0x14, 0xa0, 0xd2, 0xed, 0xae, 0x26, 0x98, 0x88, 0x86, 0x40, 0x34, 0x40, 0xaa, - 0xdc, 0x56, 0xb2, 0x0c, 0x9e, 0x52, 0x4b, 0xc6, 0x63, 0xd9, 0x03, 0x4a, 0xf6, 0x7d, 0x80, 0x3e, - 0x41, 0xb7, 0x95, 0xfa, 0x1a, 0xed, 0xe2, 0x2e, 0xef, 0xb2, 0xea, 0x22, 0xaa, 0x92, 0x4d, 0xdf, - 0xa2, 0xd5, 0x0c, 0x86, 0x90, 0xd4, 0x4e, 0x6e, 0x56, 0x99, 0x39, 0xbf, 0xef, 0x7c, 0xe7, 0x8c, - 0x4f, 0x3c, 0x18, 0xec, 0x53, 0x87, 0xd8, 0x8c, 0x58, 0x64, 0x42, 0x98, 0x7b, 0x55, 0x76, 0x5c, - 0xca, 0x68, 0xd9, 0xa2, 0x63, 0xaf, 0x3c, 0x3b, 0x14, 0x7f, 0x4b, 0x22, 0x84, 0x76, 0xee, 0xe9, - 0xe6, 0xc1, 0x92, 0xe0, 0xb3, 0xc3, 0xed, 0xdc, 0x98, 0x8e, 0xe9, 0x3c, 0x95, 0xaf, 0xe6, 0x74, - 0xfb, 0x20, 0xc8, 0x7a, 0x44, 0x27, 0x13, 0x6a, 0x73, 0xf3, 0xf9, 0xca, 0xd7, 0x96, 0x82, 0xb4, - 0x2e, 0xf1, 0xe8, 0xd4, 0x1d, 0x11, 0xae, 0x5e, 0xac, 0xe7, 0xfa, 0xbd, 0x37, 0x20, 0xd5, 0xa6, - 0x63, 0xaf, 0xa1, 0x33, 0x1d, 0x75, 0xc0, 0xc6, 0x82, 0x6a, 0xbc, 0x23, 0x59, 0x2a, 0xac, 0x15, - 0xd7, 0x2b, 0x9f, 0x97, 0x1e, 0x69, 0xb9, 0x84, 0xfd, 0x0c, 0xee, 0x82, 0x33, 0xee, 0xca, 0x6e, - 0xef, 0x97, 0x28, 0xc8, 0xac, 0x62, 0xf4, 0x1d, 0xd8, 0x34, 0x88, 0xe3, 0x92, 0x91, 0xce, 0x88, - 0xa1, 0x79, 0x23, 0xea, 0xf8, 0x85, 0xfe, 0x49, 0x8a, 0x4a, 0xfb, 0x8f, 0x56, 0xea, 0x71, 0xbd, - 0x28, 0xf3, 0xf2, 0xce, 0x65, 0x19, 0x44, 0x27, 0x20, 0xb5, 0xa8, 0x2e, 0x4b, 0x05, 0x29, 0xb4, - 0xf1, 0xe5, 0x03, 0x58, 0x69, 0xbe, 0x1e, 0x7b, 0x7b, 0xbd, 0x1b, 0xc1, 0x4b, 0x03, 0xa4, 0x02, - 0xb0, 0xd2, 0x5e, 0xf4, 0x59, 0xdd, 0xa5, 0xbd, 0x65, 0x4f, 0x1f, 0x71, 0x9b, 0x1f, 0xc9, 0x44, - 0xd7, 0xa6, 0xae, 0x25, 0xaf, 0x15, 0xa4, 0x62, 0x9a, 0x63, 0x1e, 0x19, 0xb8, 0xd6, 0xde, 0x1f, - 0x12, 0x48, 0xdf, 0x1d, 0xa0, 0x0b, 0xe2, 0x22, 0xd3, 0xef, 0xbe, 0x1a, 0x58, 0xce, 0x1f, 0xf6, - 0xec, 0xb0, 0xd4, 0xb2, 0x3d, 0xe6, 0x4e, 0x27, 0xc4, 0x66, 0x3a, 0x33, 0xa9, 0x2d, 0x7c, 0xfc, - 0x73, 0xcc, 0x7d, 0xd0, 0x31, 0x58, 0xb7, 0xe8, 0x58, 0x73, 0xc9, 0x88, 0xba, 0xc6, 0xfb, 0x9d, - 0xa2, 0x4d, 0xc7, 0x58, 0xc8, 0x31, 0xb0, 0x16, 0xcb, 0x27, 0x8f, 0xf1, 0x53, 0x1c, 0xa4, 0x97, - 0x89, 0xe8, 0x53, 0x90, 0x65, 0xe6, 0x84, 0x68, 0x53, 0xdb, 0xbc, 0xd4, 0x6c, 0xdd, 0xa6, 0xe2, - 0x3c, 0x09, 0x9c, 0xe1, 0xd1, 0x81, 0x6d, 0x5e, 0x76, 0x74, 0x9b, 0xa2, 0x2f, 0xc1, 0x2b, 0x3a, - 0xf4, 0x88, 0x3b, 0x23, 0x86, 0xf6, 0x40, 0xbe, 0x2e, 0xe4, 0xb9, 0x05, 0xee, 0xaf, 0xa6, 0xf5, - 0xc1, 0x0b, 0x8f, 0xcc, 0x88, 0x6b, 0xb2, 0x2b, 0xcd, 0x9e, 0x4e, 0x86, 0xc4, 0x95, 0xa3, 0x05, - 0xa9, 0x98, 0xad, 0x7c, 0xf1, 0xf8, 0x70, 0xfc, 0x9c, 0x8e, 0x48, 0xc1, 0x59, 0xef, 0xde, 0x1e, - 0x7d, 0x02, 0x36, 0x96, 0xae, 0x8c, 0x5c, 0x32, 0xff, 0x88, 0x99, 0x45, 0xb0, 0x4f, 0x2e, 0x19, - 0x52, 0x40, 0x6c, 0x48, 0x8d, 0x2b, 0x39, 0x2e, 0xa6, 0xf3, 0xd9, 0x13, 0xd3, 0x51, 0xec, 0xab, - 0x73, 0xdd, 0x9a, 0x2e, 0x26, 0x22, 0x52, 0xd1, 0x29, 0x00, 0x3a, 0x63, 0xae, 0x39, 0x9c, 0x32, - 0xe2, 0xc9, 0x09, 0x31, 0x8f, 0xa7, 0x8c, 0x4e, 0xc8, 0x3d, 0xa3, 0x15, 0x03, 0xf4, 0x1a, 0xc8, - 0x86, 0x4b, 0x1d, 0x87, 0x18, 0xda, 0x5d, 0x54, 0x1b, 0xd1, 0xa9, 0xcd, 0xe4, 0x64, 0x41, 0x2a, - 0x6e, 0xe0, 0x2d, 0x9f, 0x2b, 0x4b, 0x7c, 0xc4, 0x29, 0xca, 0x81, 0xf8, 0x0f, 0x96, 0x3e, 0xf6, - 0xe4, 0x54, 0x41, 0x2a, 0x26, 0xf1, 0x7c, 0x83, 0xbe, 0x07, 0x29, 0xe6, 0xea, 0x23, 0xa2, 0x99, - 0x86, 0x9c, 0x2e, 0x48, 0xc5, 0x4c, 0x5d, 0xe1, 0x35, 0xff, 0xba, 0xde, 0xfd, 0x6a, 0x4c, 0x1f, - 0xb4, 0x69, 0xf2, 0x1b, 0xc8, 0xb2, 0xc8, 0x88, 0x51, 0xb7, 0xec, 0x18, 0x3a, 0xd3, 0xcb, 0xa6, - 0xcd, 0x88, 0x6b, 0xeb, 0x56, 0x99, 0xef, 0x4a, 0x7d, 0xee, 0xd4, 0x6a, 0xe0, 0xa4, 0xb0, 0x6c, - 0x19, 0xe8, 0x02, 0x24, 0x3d, 0x47, 0xb7, 0xb9, 0x39, 0x10, 0xe6, 0x5f, 0xfb, 0xe6, 0xaf, 0x9f, - 0x6f, 0xde, 0x73, 0x74, 0xbb, 0xd5, 0xc0, 0x09, 0x6e, 0xd8, 0x32, 0xbe, 0x89, 0xa5, 0x62, 0x30, - 0x7e, 0xf0, 0x7b, 0x1c, 0x64, 0xef, 0x0f, 0x1a, 0xed, 0x82, 0x9d, 0x9e, 0x7a, 0xae, 0xe2, 0x56, - 0xff, 0x42, 0xeb, 0x0c, 0x4e, 0xeb, 0x2a, 0xd6, 0x06, 0x9d, 0xde, 0x99, 0x7a, 0xd4, 0x6a, 0xb6, - 0xd4, 0x06, 0x8c, 0xa0, 0x0f, 0xc1, 0xe6, 0x43, 0x41, 0x1f, 0x2b, 0x47, 0x2a, 0x94, 0xd0, 0x36, - 0xd8, 0x0a, 0x44, 0x15, 0x18, 0x0d, 0x65, 0x55, 0xb8, 0x16, 0xca, 0x6a, 0x30, 0x16, 0x54, 0xae, - 0xa1, 0xd6, 0x07, 0xc7, 0x30, 0x1e, 0x94, 0x26, 0x50, 0x05, 0x26, 0x42, 0x59, 0x15, 0x26, 0x43, - 0x59, 0x0d, 0xa6, 0x90, 0x0c, 0x72, 0x0f, 0x59, 0xab, 0xd3, 0xec, 0xc2, 0x74, 0x50, 0x23, 0x9c, - 0x54, 0x20, 0x08, 0x43, 0x55, 0xb8, 0x1e, 0x86, 0x6a, 0x30, 0x13, 0x54, 0xea, 0x5b, 0x05, 0x77, - 0xe0, 0x46, 0x50, 0x12, 0x27, 0x15, 0x98, 0x0d, 0x43, 0x55, 0xf8, 0x22, 0x0c, 0xd5, 0x20, 0x0c, - 0x42, 0x2a, 0xc6, 0x5d, 0x0c, 0x3f, 0x08, 0x7a, 0x18, 0x02, 0x55, 0x20, 0x0a, 0x65, 0x55, 0xf8, - 0x32, 0x94, 0xd5, 0x60, 0x2e, 0xa8, 0x5c, 0x53, 0xe9, 0x2b, 0x6d, 0xb8, 0x19, 0x94, 0x26, 0x50, - 0x05, 0x6e, 0x85, 0xb2, 0x2a, 0x7c, 0x15, 0xca, 0x6a, 0x50, 0x3e, 0xb8, 0x00, 0xd9, 0xe5, 0x5d, - 0xda, 0x14, 0xaf, 0xe5, 0x2e, 0xd8, 0x69, 0x77, 0x8f, 0x35, 0xac, 0x1e, 0x75, 0x71, 0x43, 0x6b, - 0xb6, 0x95, 0xe3, 0x9e, 0xd6, 0xe8, 0x6a, 0x9d, 0x6e, 0x5f, 0x1b, 0xf4, 0x54, 0x18, 0x41, 0xfb, - 0xe0, 0xe3, 0xff, 0x09, 0xc4, 0xbf, 0x9c, 0xbf, 0x3e, 0x55, 0x7a, 0x27, 0xf0, 0x5f, 0xa9, 0xfe, - 0xab, 0xf4, 0xf6, 0x26, 0x2f, 0xbd, 0xbb, 0xc9, 0x4b, 0x7f, 0xdf, 0xe4, 0xa5, 0x9f, 0x6f, 0xf3, - 0x91, 0x77, 0xb7, 0xf9, 0xc8, 0x9f, 0xb7, 0xf9, 0x08, 0xc8, 0x9b, 0xf4, 0xb1, 0x0b, 0xb4, 0xce, - 0xef, 0x77, 0xef, 0x8c, 0x87, 0xce, 0xa4, 0x37, 0xf5, 0x67, 0xbf, 0xb0, 0xf3, 0xef, 0x90, 0x31, - 0xb1, 0x17, 0x5f, 0x44, 0xbf, 0x45, 0x77, 0xba, 0x0e, 0xb1, 0xfb, 0x4b, 0x07, 0xe1, 0xcd, 0x7f, - 0x7e, 0xbc, 0xd2, 0xf9, 0xe1, 0x30, 0x21, 0xf4, 0xd5, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x48, - 0x86, 0x67, 0x14, 0x55, 0x09, 0x00, 0x00, + // 971 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x96, 0x41, 0x6f, 0xe2, 0x46, + 0x1b, 0xc7, 0x71, 0x12, 0x02, 0x4c, 0x08, 0x3b, 0xef, 0x2c, 0xc9, 0xfa, 0x4d, 0x54, 0x42, 0xd3, + 0x2a, 0xa5, 0xa9, 0x04, 0x0a, 0x50, 0x69, 0x7b, 0xab, 0x09, 0x26, 0xa2, 0x21, 0x10, 0x0d, 0x90, + 0x2a, 0xdb, 0x4a, 0x96, 0xc1, 0x53, 0x6a, 0xc9, 0xcc, 0x58, 0xf6, 0x80, 0x92, 0x6f, 0xd1, 0x4f, + 0xd0, 0x4b, 0x0f, 0x95, 0xfa, 0x35, 0xda, 0xc3, 0x1e, 0xf7, 0x58, 0xf5, 0xb0, 0xaa, 0x92, 0x4b, + 0xbf, 0x45, 0xab, 0x19, 0x0c, 0x21, 0xa9, 0x9d, 0x34, 0x27, 0x66, 0x9e, 0xdf, 0xff, 0xf9, 0x3f, + 0xcf, 0x78, 0xc6, 0x83, 0xc1, 0x01, 0x73, 0x09, 0xe5, 0xc4, 0x21, 0x63, 0xc2, 0xbd, 0xeb, 0x92, + 0xeb, 0x31, 0xce, 0x4a, 0x0e, 0x1b, 0xf9, 0xa5, 0xe9, 0x91, 0xfc, 0x2d, 0xca, 0x10, 0xda, 0xbd, + 0xa7, 0x9b, 0x05, 0x8b, 0x92, 0x4f, 0x8f, 0x76, 0xb2, 0x23, 0x36, 0x62, 0xb3, 0x54, 0x31, 0x9a, + 0xd1, 0x9d, 0xc3, 0x30, 0xeb, 0x21, 0x1b, 0x8f, 0x19, 0x15, 0xe6, 0xb3, 0x51, 0xa0, 0x2d, 0x86, + 0x69, 0x3d, 0xe2, 0xb3, 0x89, 0x37, 0x24, 0x42, 0x3d, 0x1f, 0xcf, 0xf4, 0xfb, 0x6f, 0x40, 0xb2, + 0xc5, 0x46, 0x7e, 0xdd, 0xe4, 0x26, 0x6a, 0x83, 0xcd, 0x39, 0x35, 0x44, 0x47, 0xaa, 0x92, 0x5f, + 0x2d, 0x6c, 0x94, 0x3f, 0x2d, 0x3e, 0xd2, 0x72, 0x11, 0x07, 0x19, 0xc2, 0x05, 0xa7, 0xbd, 0xa5, + 0xd9, 0xfe, 0x8f, 0x2b, 0x20, 0xbd, 0x8c, 0xd1, 0x37, 0x60, 0xcb, 0x22, 0xae, 0x47, 0x86, 0x26, + 0x27, 0x96, 0xe1, 0x0f, 0x99, 0x1b, 0x14, 0xfa, 0x2b, 0x21, 0x2b, 0x1d, 0x3c, 0x5a, 0xa9, 0x2b, + 0xf4, 0xb2, 0xcc, 0xcb, 0x3b, 0x97, 0x45, 0x10, 0x9d, 0x82, 0xe4, 0xbc, 0xba, 0xaa, 0xe4, 0x95, + 0xc8, 0xc6, 0x17, 0x0f, 0x60, 0xa9, 0xf9, 0xda, 0xda, 0xdb, 0xf7, 0x7b, 0x31, 0xbc, 0x30, 0x40, + 0x3a, 0x00, 0x4b, 0xed, 0xad, 0x3c, 0xab, 0xbb, 0x94, 0xbf, 0xe8, 0xe9, 0x03, 0x61, 0xf3, 0x3d, + 0x19, 0x9b, 0xc6, 0xc4, 0x73, 0xd4, 0xd5, 0xbc, 0x52, 0x48, 0x09, 0x2c, 0x22, 0x7d, 0xcf, 0xd9, + 0xff, 0x4d, 0x01, 0xa9, 0xbb, 0x05, 0x74, 0x40, 0x5c, 0x66, 0x06, 0xdd, 0x57, 0x42, 0xcb, 0x05, + 0x9b, 0x3d, 0x3d, 0x2a, 0x36, 0xa9, 0xcf, 0xbd, 0xc9, 0x98, 0x50, 0x6e, 0x72, 0x9b, 0x51, 0xe9, + 0x13, 0xac, 0x63, 0xe6, 0x83, 0x4e, 0xc0, 0x86, 0xc3, 0x46, 0x86, 0x47, 0x86, 0xcc, 0xb3, 0xfe, + 0xdb, 0x2a, 0x5a, 0x6c, 0x84, 0xa5, 0x1c, 0x03, 0x67, 0x3e, 0x7c, 0x72, 0x19, 0x3f, 0xc5, 0x41, + 0x6a, 0x91, 0x88, 0x3e, 0x06, 0x19, 0x6e, 0x8f, 0x89, 0x31, 0xa1, 0xf6, 0x95, 0x41, 0x4d, 0xca, + 0xe4, 0x7a, 0xd6, 0x71, 0x5a, 0x44, 0xfb, 0xd4, 0xbe, 0x6a, 0x9b, 0x94, 0xa1, 0xcf, 0xc1, 0x2b, + 0x36, 0xf0, 0x89, 0x37, 0x25, 0x96, 0xf1, 0x40, 0xbe, 0x21, 0xe5, 0xd9, 0x39, 0xee, 0x2d, 0xa7, + 0xf5, 0xc0, 0x0b, 0x9f, 0x4c, 0x89, 0x67, 0xf3, 0x6b, 0x83, 0x4e, 0xc6, 0x03, 0xe2, 0xa9, 0x2b, + 0x79, 0xa5, 0x90, 0x29, 0x7f, 0xf6, 0xf8, 0xe6, 0x04, 0x39, 0x6d, 0x99, 0x82, 0x33, 0xfe, 0xbd, + 0x39, 0xfa, 0x08, 0x6c, 0x2e, 0x5c, 0x39, 0xb9, 0xe2, 0xc1, 0x12, 0xd3, 0xf3, 0x60, 0x8f, 0x5c, + 0x71, 0xa4, 0x81, 0xb5, 0x01, 0xb3, 0xae, 0xd5, 0xb8, 0xdc, 0x9d, 0x4f, 0x9e, 0xd8, 0x1d, 0x8d, + 0x5e, 0x5f, 0x98, 0xce, 0x64, 0xbe, 0x23, 0x32, 0x15, 0x9d, 0x01, 0x60, 0x72, 0xee, 0xd9, 0x83, + 0x09, 0x27, 0xbe, 0xba, 0x2e, 0xf7, 0xe3, 0x29, 0xa3, 0x53, 0x72, 0xcf, 0x68, 0xc9, 0x00, 0xbd, + 0x06, 0xaa, 0xe5, 0x31, 0xd7, 0x25, 0x96, 0x71, 0x17, 0x35, 0x86, 0x6c, 0x42, 0xb9, 0x9a, 0xc8, + 0x2b, 0x85, 0x4d, 0xbc, 0x1d, 0x70, 0x6d, 0x81, 0x8f, 0x05, 0x45, 0x59, 0x10, 0xff, 0xce, 0x31, + 0x47, 0xbe, 0x9a, 0xcc, 0x2b, 0x85, 0x04, 0x9e, 0x4d, 0xd0, 0xb7, 0x20, 0xc9, 0x3d, 0x73, 0x48, + 0x0c, 0xdb, 0x52, 0x53, 0x79, 0xa5, 0x90, 0xae, 0x69, 0xa2, 0xe6, 0x1f, 0xef, 0xf7, 0xbe, 0x18, + 0xb1, 0x07, 0x6d, 0xda, 0xe2, 0x06, 0x72, 0x1c, 0x32, 0xe4, 0xcc, 0x2b, 0xb9, 0x96, 0xc9, 0xcd, + 0x92, 0x4d, 0x39, 0xf1, 0xa8, 0xe9, 0x94, 0xc4, 0xac, 0xd8, 0x13, 0x4e, 0xcd, 0x3a, 0x4e, 0x48, + 0xcb, 0xa6, 0x85, 0x2e, 0x41, 0xc2, 0x77, 0x4d, 0x2a, 0xcc, 0x81, 0x34, 0xff, 0x32, 0x30, 0x7f, + 0xfd, 0x7c, 0xf3, 0xae, 0x6b, 0xd2, 0x66, 0x1d, 0xaf, 0x0b, 0xc3, 0xa6, 0x25, 0xce, 0x27, 0x99, + 0x12, 0xca, 0x0d, 0x6a, 0x8e, 0x89, 0x9a, 0x9e, 0x9d, 0x4f, 0x19, 0x69, 0x9b, 0x63, 0xf2, 0xd5, + 0x5a, 0x72, 0x0d, 0xc6, 0x0f, 0x7f, 0x8d, 0x83, 0xcc, 0xfd, 0x73, 0x80, 0xf6, 0xc0, 0x6e, 0x57, + 0xbf, 0xd0, 0x71, 0xb3, 0x77, 0x69, 0xb4, 0xfb, 0x67, 0x35, 0x1d, 0x1b, 0xfd, 0x76, 0xf7, 0x5c, + 0x3f, 0x6e, 0x36, 0x9a, 0x7a, 0x1d, 0xc6, 0xd0, 0xff, 0xc1, 0xd6, 0x43, 0x41, 0x0f, 0x6b, 0xc7, + 0x3a, 0x54, 0xd0, 0x0e, 0xd8, 0x0e, 0x45, 0x65, 0xb8, 0x12, 0xc9, 0x2a, 0x70, 0x35, 0x92, 0x55, + 0xe1, 0x5a, 0x58, 0xb9, 0xba, 0x5e, 0xeb, 0x9f, 0xc0, 0x78, 0x58, 0x9a, 0x44, 0x65, 0xb8, 0x1e, + 0xc9, 0x2a, 0x30, 0x11, 0xc9, 0xaa, 0x30, 0x89, 0x54, 0x90, 0x7d, 0xc8, 0x9a, 0xed, 0x46, 0x07, + 0xa6, 0xc2, 0x1a, 0x11, 0xa4, 0x0c, 0x41, 0x14, 0xaa, 0xc0, 0x8d, 0x28, 0x54, 0x85, 0xe9, 0xb0, + 0x52, 0x5f, 0x6b, 0xb8, 0x0d, 0x37, 0xc3, 0x92, 0x04, 0x29, 0xc3, 0x4c, 0x14, 0xaa, 0xc0, 0x17, + 0x51, 0xa8, 0x0a, 0x61, 0x18, 0xd2, 0x31, 0xee, 0x60, 0xf8, 0xbf, 0xb0, 0x87, 0x21, 0x51, 0x19, + 0xa2, 0x48, 0x56, 0x81, 0x2f, 0x23, 0x59, 0x15, 0x66, 0xc3, 0xca, 0x35, 0xb4, 0x9e, 0xd6, 0x82, + 0x5b, 0x61, 0x69, 0x12, 0x95, 0xe1, 0x76, 0x24, 0xab, 0xc0, 0x57, 0x91, 0xac, 0x0a, 0xd5, 0xc3, + 0x4b, 0x90, 0x59, 0x5c, 0xb5, 0x0d, 0xf9, 0xd6, 0xee, 0x81, 0xdd, 0x56, 0xe7, 0xc4, 0xc0, 0xfa, + 0x71, 0x07, 0xd7, 0x8d, 0x46, 0x4b, 0x3b, 0xe9, 0x1a, 0xf5, 0x8e, 0xd1, 0xee, 0xf4, 0x8c, 0x7e, + 0x57, 0x87, 0x31, 0x74, 0x00, 0x3e, 0xfc, 0x97, 0x40, 0x1e, 0xb9, 0x60, 0x7c, 0xa6, 0x75, 0x4f, + 0xe1, 0xdf, 0x4a, 0xed, 0x67, 0xe5, 0xed, 0x4d, 0x4e, 0x79, 0x77, 0x93, 0x53, 0xfe, 0xbc, 0xc9, + 0x29, 0x3f, 0xdc, 0xe6, 0x62, 0xef, 0x6e, 0x73, 0xb1, 0xdf, 0x6f, 0x73, 0x31, 0x90, 0xb3, 0xd9, + 0x63, 0xf7, 0x6b, 0x4d, 0x5c, 0xff, 0xfe, 0xb9, 0x08, 0x9d, 0x2b, 0x6f, 0x6a, 0xcf, 0x7e, 0x9f, + 0x67, 0x9f, 0x29, 0x23, 0x42, 0xe7, 0x1f, 0x4c, 0xbf, 0xac, 0xec, 0x76, 0x5c, 0x42, 0x7b, 0x0b, + 0x07, 0xe9, 0x2d, 0xfe, 0x9d, 0xfc, 0xe2, 0xc5, 0xd1, 0x60, 0x5d, 0xea, 0x2b, 0xff, 0x04, 0x00, + 0x00, 0xff, 0xff, 0xc9, 0xbc, 0x36, 0x44, 0x74, 0x09, 0x00, 0x00, } func (m *LogsData) Marshal() (dAtA []byte, err error) { @@ -783,6 +806,13 @@ func (m *LogRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.EventName) > 0 { + i -= len(m.EventName) + copy(dAtA[i:], m.EventName) + i = encodeVarintLogs(dAtA, i, uint64(len(m.EventName))) + i-- + dAtA[i] = 0x62 + } if m.ObservedTimeUnixNano != 0 { i -= 8 encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.ObservedTimeUnixNano)) @@ -976,6 +1006,10 @@ func (m *LogRecord) Size() (n int) { if m.ObservedTimeUnixNano != 0 { n += 9 } + l = len(m.EventName) + if l > 0 { + n += 1 + l + sovLogs(uint64(l)) + } return n } @@ -1663,6 +1697,38 @@ func (m *LogRecord) Unmarshal(dAtA []byte) error { } m.ObservedTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) iNdEx += 8 + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EventName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogs + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLogs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EventName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipLogs(dAtA[iNdEx:]) diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1/metrics.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1/metrics.pb.go index 3649bd83f81..9021e432c52 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1/metrics.pb.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1/metrics.pb.go @@ -160,6 +160,25 @@ func (DataPointFlags) EnumDescriptor() ([]byte, []int) { // storage, OR can be embedded by other protocols that transfer OTLP metrics // data but do not implement the OTLP protocol. // +// MetricsData +// └─── ResourceMetrics +// +// ├── Resource +// ├── SchemaURL +// └── ScopeMetrics +// ├── Scope +// ├── SchemaURL +// └── Metric +// ├── Name +// ├── Description +// ├── Unit +// └── data +// ├── Gauge +// ├── Sum +// ├── Histogram +// ├── ExponentialHistogram +// └── Summary +// // The main difference between this message and collector protocol is that // in this message there will not be any "control" or "metadata" specific to // OTLP protocol. @@ -224,7 +243,8 @@ type ResourceMetrics struct { // A list of metrics that originate from a resource. ScopeMetrics []*ScopeMetrics `protobuf:"bytes,2,rep,name=scope_metrics,json=scopeMetrics,proto3" json:"scope_metrics,omitempty"` // The Schema URL, if known. This is the identifier of the Schema that the resource data - // is recorded in. To learn more about Schema URL see + // is recorded in. Notably, the last part of the URL path is the version number of the + // schema: http[s]://server[:port]/path/. To learn more about Schema URL see // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url // This schema_url applies to the data in the "resource" field. It does not apply // to the data in the "scope_metrics" field which have their own schema_url field. @@ -301,7 +321,8 @@ type ScopeMetrics struct { // A list of metrics that originate from an instrumentation library. Metrics []*Metric `protobuf:"bytes,2,rep,name=metrics,proto3" json:"metrics,omitempty"` // The Schema URL, if known. This is the identifier of the Schema that the metric data - // is recorded in. To learn more about Schema URL see + // is recorded in. Notably, the last part of the URL path is the version number of the + // schema: http[s]://server[:port]/path/. To learn more about Schema URL see // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url // This schema_url applies to all metrics in the "metrics" field. SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` @@ -855,6 +876,9 @@ func (m *ExponentialHistogram) GetAggregationTemporality() AggregationTemporalit // data type. These data points cannot always be merged in a meaningful way. // While they can be useful in some applications, histogram data points are // recommended for new applications. +// Summary metrics do not have an aggregation temporality field. This is +// because the count and sum fields of a SummaryDataPoint are assumed to be +// cumulative values. type Summary struct { DataPoints []*SummaryDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` } @@ -1084,7 +1108,7 @@ type HistogramDataPoint struct { // events, and is assumed to be monotonic over the values of these events. // Negative events *can* be recorded, but sum should not be filled out when // doing so. This is specifically to enforce compatibility w/ OpenMetrics, - // see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram + // see: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#histogram // // Types that are valid to be assigned to Sum_: // *HistogramDataPoint_Sum @@ -1329,7 +1353,7 @@ type ExponentialHistogramDataPoint struct { // events, and is assumed to be monotonic over the values of these events. // Negative events *can* be recorded, but sum should not be filled out when // doing so. This is specifically to enforce compatibility w/ OpenMetrics, - // see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram + // see: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#histogram // // Types that are valid to be assigned to Sum_: // *ExponentialHistogramDataPoint_Sum @@ -1644,7 +1668,8 @@ func (m *ExponentialHistogramDataPoint_Buckets) GetBucketCounts() []uint64 { } // SummaryDataPoint is a single data point in a timeseries that describes the -// time-varying values of a Summary metric. +// time-varying values of a Summary metric. The count and sum fields represent +// cumulative values. type SummaryDataPoint struct { // The set of key/value pairs that uniquely identify the timeseries from // where this point belongs. The list may be empty (may contain 0 elements). @@ -1671,7 +1696,7 @@ type SummaryDataPoint struct { // events, and is assumed to be monotonic over the values of these events. // Negative events *can* be recorded, but sum should not be filled out when // doing so. This is specifically to enforce compatibility w/ OpenMetrics, - // see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#summary + // see: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#summary Sum float64 `protobuf:"fixed64,5,opt,name=sum,proto3" json:"sum,omitempty"` // (Optional) list of values at different quantiles of the distribution calculated // from the current snapshot. The quantiles must be strictly increasing. diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development/profiles.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development/profiles.pb.go new file mode 100644 index 00000000000..bafc9d85f09 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development/profiles.pb.go @@ -0,0 +1,5348 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: opentelemetry/proto/profiles/v1development/profiles.proto + +package v1development + +import ( + fmt "fmt" + io "io" + math "math" + math_bits "math/bits" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + + go_opentelemetry_io_collector_pdata_internal_data "go.opentelemetry.io/collector/pdata/internal/data" + v11 "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" + v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Specifies the method of aggregating metric values, either DELTA (change since last report) +// or CUMULATIVE (total since a fixed start time). +type AggregationTemporality int32 + +const ( + // UNSPECIFIED is the default AggregationTemporality, it MUST not be used. + AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED AggregationTemporality = 0 + //* DELTA is an AggregationTemporality for a profiler which reports + //changes since last report time. Successive metrics contain aggregation of + //values from continuous and non-overlapping intervals. + // + //The values for a DELTA metric are based only on the time interval + //associated with one measurement cycle. There is no dependency on + //previous measurements like is the case for CUMULATIVE metrics. + // + //For example, consider a system measuring the number of requests that + //it receives and reports the sum of these requests every second as a + //DELTA metric: + // + //1. The system starts receiving at time=t_0. + //2. A request is received, the system measures 1 request. + //3. A request is received, the system measures 1 request. + //4. A request is received, the system measures 1 request. + //5. The 1 second collection cycle ends. A metric is exported for the + //number of requests received over the interval of time t_0 to + //t_0+1 with a value of 3. + //6. A request is received, the system measures 1 request. + //7. A request is received, the system measures 1 request. + //8. The 1 second collection cycle ends. A metric is exported for the + //number of requests received over the interval of time t_0+1 to + //t_0+2 with a value of 2. + AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA AggregationTemporality = 1 + //* CUMULATIVE is an AggregationTemporality for a profiler which + //reports changes since a fixed start time. This means that current values + //of a CUMULATIVE metric depend on all previous measurements since the + //start time. Because of this, the sender is required to retain this state + //in some form. If this state is lost or invalidated, the CUMULATIVE metric + //values MUST be reset and a new fixed start time following the last + //reported measurement time sent MUST be used. + // + //For example, consider a system measuring the number of requests that + //it receives and reports the sum of these requests every second as a + //CUMULATIVE metric: + // + //1. The system starts receiving at time=t_0. + //2. A request is received, the system measures 1 request. + //3. A request is received, the system measures 1 request. + //4. A request is received, the system measures 1 request. + //5. The 1 second collection cycle ends. A metric is exported for the + //number of requests received over the interval of time t_0 to + //t_0+1 with a value of 3. + //6. A request is received, the system measures 1 request. + //7. A request is received, the system measures 1 request. + //8. The 1 second collection cycle ends. A metric is exported for the + //number of requests received over the interval of time t_0 to + //t_0+2 with a value of 5. + //9. The system experiences a fault and loses state. + //10. The system recovers and resumes receiving at time=t_1. + //11. A request is received, the system measures 1 request. + //12. The 1 second collection cycle ends. A metric is exported for the + //number of requests received over the interval of time t_1 to + //t_1+1 with a value of 1. + // + //Note: Even though, when reporting changes since last report time, using + //CUMULATIVE is valid, it is not recommended. + AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE AggregationTemporality = 2 +) + +var AggregationTemporality_name = map[int32]string{ + 0: "AGGREGATION_TEMPORALITY_UNSPECIFIED", + 1: "AGGREGATION_TEMPORALITY_DELTA", + 2: "AGGREGATION_TEMPORALITY_CUMULATIVE", +} + +var AggregationTemporality_value = map[string]int32{ + "AGGREGATION_TEMPORALITY_UNSPECIFIED": 0, + "AGGREGATION_TEMPORALITY_DELTA": 1, + "AGGREGATION_TEMPORALITY_CUMULATIVE": 2, +} + +func (x AggregationTemporality) String() string { + return proto.EnumName(AggregationTemporality_name, int32(x)) +} + +func (AggregationTemporality) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_ddd0cf081a2fe76f, []int{0} +} + +// ProfilesData represents the profiles data that can be stored in persistent storage, +// OR can be embedded by other protocols that transfer OTLP profiles data but do not +// implement the OTLP protocol. +// +// The main difference between this message and collector protocol is that +// in this message there will not be any "control" or "metadata" specific to +// OTLP protocol. +// +// When new fields are added into this message, the OTLP request MUST be updated +// as well. +type ProfilesData struct { + // An array of ResourceProfiles. + // For data coming from a single resource this array will typically contain + // one element. Intermediary nodes that receive data from multiple origins + // typically batch the data before forwarding further and in that case this + // array will contain multiple elements. + ResourceProfiles []*ResourceProfiles `protobuf:"bytes,1,rep,name=resource_profiles,json=resourceProfiles,proto3" json:"resource_profiles,omitempty"` +} + +func (m *ProfilesData) Reset() { *m = ProfilesData{} } +func (m *ProfilesData) String() string { return proto.CompactTextString(m) } +func (*ProfilesData) ProtoMessage() {} +func (*ProfilesData) Descriptor() ([]byte, []int) { + return fileDescriptor_ddd0cf081a2fe76f, []int{0} +} +func (m *ProfilesData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProfilesData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ProfilesData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ProfilesData) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProfilesData.Merge(m, src) +} +func (m *ProfilesData) XXX_Size() int { + return m.Size() +} +func (m *ProfilesData) XXX_DiscardUnknown() { + xxx_messageInfo_ProfilesData.DiscardUnknown(m) +} + +var xxx_messageInfo_ProfilesData proto.InternalMessageInfo + +func (m *ProfilesData) GetResourceProfiles() []*ResourceProfiles { + if m != nil { + return m.ResourceProfiles + } + return nil +} + +// A collection of ScopeProfiles from a Resource. +type ResourceProfiles struct { + // The resource for the profiles in this message. + // If this field is not set then no resource info is known. + Resource v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource"` + // A list of ScopeProfiles that originate from a resource. + ScopeProfiles []*ScopeProfiles `protobuf:"bytes,2,rep,name=scope_profiles,json=scopeProfiles,proto3" json:"scope_profiles,omitempty"` + // The Schema URL, if known. This is the identifier of the Schema that the resource data + // is recorded in. Notably, the last part of the URL path is the version number of the + // schema: http[s]://server[:port]/path/. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to the data in the "resource" field. It does not apply + // to the data in the "scope_profiles" field which have their own schema_url field. + SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` +} + +func (m *ResourceProfiles) Reset() { *m = ResourceProfiles{} } +func (m *ResourceProfiles) String() string { return proto.CompactTextString(m) } +func (*ResourceProfiles) ProtoMessage() {} +func (*ResourceProfiles) Descriptor() ([]byte, []int) { + return fileDescriptor_ddd0cf081a2fe76f, []int{1} +} +func (m *ResourceProfiles) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceProfiles) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResourceProfiles.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResourceProfiles) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceProfiles.Merge(m, src) +} +func (m *ResourceProfiles) XXX_Size() int { + return m.Size() +} +func (m *ResourceProfiles) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceProfiles.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceProfiles proto.InternalMessageInfo + +func (m *ResourceProfiles) GetResource() v1.Resource { + if m != nil { + return m.Resource + } + return v1.Resource{} +} + +func (m *ResourceProfiles) GetScopeProfiles() []*ScopeProfiles { + if m != nil { + return m.ScopeProfiles + } + return nil +} + +func (m *ResourceProfiles) GetSchemaUrl() string { + if m != nil { + return m.SchemaUrl + } + return "" +} + +// A collection of Profiles produced by an InstrumentationScope. +type ScopeProfiles struct { + // The instrumentation scope information for the profiles in this message. + // Semantically when InstrumentationScope isn't set, it is equivalent with + // an empty instrumentation scope name (unknown). + Scope v11.InstrumentationScope `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope"` + // A list of Profiles that originate from an instrumentation scope. + Profiles []*Profile `protobuf:"bytes,2,rep,name=profiles,proto3" json:"profiles,omitempty"` + // The Schema URL, if known. This is the identifier of the Schema that the profile data + // is recorded in. Notably, the last part of the URL path is the version number of the + // schema: http[s]://server[:port]/path/. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to all profiles in the "profiles" field. + SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` +} + +func (m *ScopeProfiles) Reset() { *m = ScopeProfiles{} } +func (m *ScopeProfiles) String() string { return proto.CompactTextString(m) } +func (*ScopeProfiles) ProtoMessage() {} +func (*ScopeProfiles) Descriptor() ([]byte, []int) { + return fileDescriptor_ddd0cf081a2fe76f, []int{2} +} +func (m *ScopeProfiles) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScopeProfiles) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ScopeProfiles.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ScopeProfiles) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScopeProfiles.Merge(m, src) +} +func (m *ScopeProfiles) XXX_Size() int { + return m.Size() +} +func (m *ScopeProfiles) XXX_DiscardUnknown() { + xxx_messageInfo_ScopeProfiles.DiscardUnknown(m) +} + +var xxx_messageInfo_ScopeProfiles proto.InternalMessageInfo + +func (m *ScopeProfiles) GetScope() v11.InstrumentationScope { + if m != nil { + return m.Scope + } + return v11.InstrumentationScope{} +} + +func (m *ScopeProfiles) GetProfiles() []*Profile { + if m != nil { + return m.Profiles + } + return nil +} + +func (m *ScopeProfiles) GetSchemaUrl() string { + if m != nil { + return m.SchemaUrl + } + return "" +} + +// Represents a complete profile, including sample types, samples, +// mappings to binaries, locations, functions, string table, and additional metadata. +// It modifies and annotates pprof Profile with OpenTelemetry specific fields. +// +// Note that whilst fields in this message retain the name and field id from pprof in most cases +// for ease of understanding data migration, it is not intended that pprof:Profile and +// OpenTelemetry:Profile encoding be wire compatible. +type Profile struct { + // A description of the samples associated with each Sample.value. + // For a cpu profile this might be: + // [["cpu","nanoseconds"]] or [["wall","seconds"]] or [["syscall","count"]] + // For a heap profile, this might be: + // [["allocations","count"], ["space","bytes"]], + // If one of the values represents the number of events represented + // by the sample, by convention it should be at index 0 and use + // sample_type.unit == "count". + SampleType []*ValueType `protobuf:"bytes,1,rep,name=sample_type,json=sampleType,proto3" json:"sample_type,omitempty"` + // The set of samples recorded in this profile. + Sample []*Sample `protobuf:"bytes,2,rep,name=sample,proto3" json:"sample,omitempty"` + // Mapping from address ranges to the image/binary/library mapped + // into that address range. mapping[0] will be the main binary. + // If multiple binaries contribute to the Profile and no main + // binary can be identified, mapping[0] has no special meaning. + MappingTable []*Mapping `protobuf:"bytes,3,rep,name=mapping_table,json=mappingTable,proto3" json:"mapping_table,omitempty"` + // Locations referenced by samples via location_indices. + LocationTable []*Location `protobuf:"bytes,4,rep,name=location_table,json=locationTable,proto3" json:"location_table,omitempty"` + // Array of locations referenced by samples. + LocationIndices []int32 `protobuf:"varint,5,rep,packed,name=location_indices,json=locationIndices,proto3" json:"location_indices,omitempty"` + // Functions referenced by locations. + FunctionTable []*Function `protobuf:"bytes,6,rep,name=function_table,json=functionTable,proto3" json:"function_table,omitempty"` + // Lookup table for attributes. + AttributeTable []v11.KeyValue `protobuf:"bytes,7,rep,name=attribute_table,json=attributeTable,proto3" json:"attribute_table"` + // Represents a mapping between Attribute Keys and Units. + AttributeUnits []*AttributeUnit `protobuf:"bytes,8,rep,name=attribute_units,json=attributeUnits,proto3" json:"attribute_units,omitempty"` + // Lookup table for links. + LinkTable []*Link `protobuf:"bytes,9,rep,name=link_table,json=linkTable,proto3" json:"link_table,omitempty"` + // A common table for strings referenced by various messages. + // string_table[0] must always be "". + StringTable []string `protobuf:"bytes,10,rep,name=string_table,json=stringTable,proto3" json:"string_table,omitempty"` + // Time of collection (UTC) represented as nanoseconds past the epoch. + TimeNanos int64 `protobuf:"varint,11,opt,name=time_nanos,json=timeNanos,proto3" json:"time_nanos,omitempty"` + // Duration of the profile, if a duration makes sense. + DurationNanos int64 `protobuf:"varint,12,opt,name=duration_nanos,json=durationNanos,proto3" json:"duration_nanos,omitempty"` + // The kind of events between sampled occurrences. + // e.g [ "cpu","cycles" ] or [ "heap","bytes" ] + PeriodType ValueType `protobuf:"bytes,13,opt,name=period_type,json=periodType,proto3" json:"period_type"` + // The number of events between sampled occurrences. + Period int64 `protobuf:"varint,14,opt,name=period,proto3" json:"period,omitempty"` + // Free-form text associated with the profile. The text is displayed as is + // to the user by the tools that read profiles (e.g. by pprof). This field + // should not be used to store any machine-readable information, it is only + // for human-friendly content. The profile must stay functional if this field + // is cleaned. + CommentStrindices []int32 `protobuf:"varint,15,rep,packed,name=comment_strindices,json=commentStrindices,proto3" json:"comment_strindices,omitempty"` + // Index into the string table of the type of the preferred sample + // value. If unset, clients should default to the last sample value. + DefaultSampleTypeStrindex int32 `protobuf:"varint,16,opt,name=default_sample_type_strindex,json=defaultSampleTypeStrindex,proto3" json:"default_sample_type_strindex,omitempty"` + // A globally unique identifier for a profile. The ID is a 16-byte array. An ID with + // all zeroes is considered invalid. + // + // This field is required. + ProfileId go_opentelemetry_io_collector_pdata_internal_data.ProfileID `protobuf:"bytes,17,opt,name=profile_id,json=profileId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.ProfileID" json:"profile_id"` + // dropped_attributes_count is the number of attributes that were discarded. Attributes + // can be discarded because their keys are too long or because there are too many + // attributes. If this value is 0, then no attributes were dropped. + DroppedAttributesCount uint32 `protobuf:"varint,19,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` + // Specifies format of the original payload. Common values are defined in semantic conventions. [required if original_payload is present] + OriginalPayloadFormat string `protobuf:"bytes,20,opt,name=original_payload_format,json=originalPayloadFormat,proto3" json:"original_payload_format,omitempty"` + // Original payload can be stored in this field. This can be useful for users who want to get the original payload. + // Formats such as JFR are highly extensible and can contain more information than what is defined in this spec. + // Inclusion of original payload should be configurable by the user. Default behavior should be to not include the original payload. + // If the original payload is in pprof format, it SHOULD not be included in this field. + // The field is optional, however if it is present then equivalent converted data should be populated in other fields + // of this message as far as is practicable. + OriginalPayload []byte `protobuf:"bytes,21,opt,name=original_payload,json=originalPayload,proto3" json:"original_payload,omitempty"` + // References to attributes in attribute_table. [optional] + // It is a collection of key/value pairs. Note, global attributes + // like server name can be set using the resource API. Examples of attributes: + // + // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + // "/http/server_latency": 300 + // "abc.com/myattribute": true + // "abc.com/score": 10.239 + // + // The OpenTelemetry API specification further restricts the allowed value types: + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + AttributeIndices []int32 `protobuf:"varint,22,rep,packed,name=attribute_indices,json=attributeIndices,proto3" json:"attribute_indices,omitempty"` +} + +func (m *Profile) Reset() { *m = Profile{} } +func (m *Profile) String() string { return proto.CompactTextString(m) } +func (*Profile) ProtoMessage() {} +func (*Profile) Descriptor() ([]byte, []int) { + return fileDescriptor_ddd0cf081a2fe76f, []int{3} +} +func (m *Profile) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Profile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Profile.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Profile) XXX_Merge(src proto.Message) { + xxx_messageInfo_Profile.Merge(m, src) +} +func (m *Profile) XXX_Size() int { + return m.Size() +} +func (m *Profile) XXX_DiscardUnknown() { + xxx_messageInfo_Profile.DiscardUnknown(m) +} + +var xxx_messageInfo_Profile proto.InternalMessageInfo + +func (m *Profile) GetSampleType() []*ValueType { + if m != nil { + return m.SampleType + } + return nil +} + +func (m *Profile) GetSample() []*Sample { + if m != nil { + return m.Sample + } + return nil +} + +func (m *Profile) GetMappingTable() []*Mapping { + if m != nil { + return m.MappingTable + } + return nil +} + +func (m *Profile) GetLocationTable() []*Location { + if m != nil { + return m.LocationTable + } + return nil +} + +func (m *Profile) GetLocationIndices() []int32 { + if m != nil { + return m.LocationIndices + } + return nil +} + +func (m *Profile) GetFunctionTable() []*Function { + if m != nil { + return m.FunctionTable + } + return nil +} + +func (m *Profile) GetAttributeTable() []v11.KeyValue { + if m != nil { + return m.AttributeTable + } + return nil +} + +func (m *Profile) GetAttributeUnits() []*AttributeUnit { + if m != nil { + return m.AttributeUnits + } + return nil +} + +func (m *Profile) GetLinkTable() []*Link { + if m != nil { + return m.LinkTable + } + return nil +} + +func (m *Profile) GetStringTable() []string { + if m != nil { + return m.StringTable + } + return nil +} + +func (m *Profile) GetTimeNanos() int64 { + if m != nil { + return m.TimeNanos + } + return 0 +} + +func (m *Profile) GetDurationNanos() int64 { + if m != nil { + return m.DurationNanos + } + return 0 +} + +func (m *Profile) GetPeriodType() ValueType { + if m != nil { + return m.PeriodType + } + return ValueType{} +} + +func (m *Profile) GetPeriod() int64 { + if m != nil { + return m.Period + } + return 0 +} + +func (m *Profile) GetCommentStrindices() []int32 { + if m != nil { + return m.CommentStrindices + } + return nil +} + +func (m *Profile) GetDefaultSampleTypeStrindex() int32 { + if m != nil { + return m.DefaultSampleTypeStrindex + } + return 0 +} + +func (m *Profile) GetDroppedAttributesCount() uint32 { + if m != nil { + return m.DroppedAttributesCount + } + return 0 +} + +func (m *Profile) GetOriginalPayloadFormat() string { + if m != nil { + return m.OriginalPayloadFormat + } + return "" +} + +func (m *Profile) GetOriginalPayload() []byte { + if m != nil { + return m.OriginalPayload + } + return nil +} + +func (m *Profile) GetAttributeIndices() []int32 { + if m != nil { + return m.AttributeIndices + } + return nil +} + +// Represents a mapping between Attribute Keys and Units. +type AttributeUnit struct { + // Index into string table. + AttributeKeyStrindex int32 `protobuf:"varint,1,opt,name=attribute_key_strindex,json=attributeKeyStrindex,proto3" json:"attribute_key_strindex,omitempty"` + // Index into string table. + UnitStrindex int32 `protobuf:"varint,2,opt,name=unit_strindex,json=unitStrindex,proto3" json:"unit_strindex,omitempty"` +} + +func (m *AttributeUnit) Reset() { *m = AttributeUnit{} } +func (m *AttributeUnit) String() string { return proto.CompactTextString(m) } +func (*AttributeUnit) ProtoMessage() {} +func (*AttributeUnit) Descriptor() ([]byte, []int) { + return fileDescriptor_ddd0cf081a2fe76f, []int{4} +} +func (m *AttributeUnit) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AttributeUnit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AttributeUnit.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AttributeUnit) XXX_Merge(src proto.Message) { + xxx_messageInfo_AttributeUnit.Merge(m, src) +} +func (m *AttributeUnit) XXX_Size() int { + return m.Size() +} +func (m *AttributeUnit) XXX_DiscardUnknown() { + xxx_messageInfo_AttributeUnit.DiscardUnknown(m) +} + +var xxx_messageInfo_AttributeUnit proto.InternalMessageInfo + +func (m *AttributeUnit) GetAttributeKeyStrindex() int32 { + if m != nil { + return m.AttributeKeyStrindex + } + return 0 +} + +func (m *AttributeUnit) GetUnitStrindex() int32 { + if m != nil { + return m.UnitStrindex + } + return 0 +} + +// A pointer from a profile Sample to a trace Span. +// Connects a profile sample to a trace span, identified by unique trace and span IDs. +type Link struct { + // A unique identifier of a trace that this linked span is part of. The ID is a + // 16-byte array. + TraceId go_opentelemetry_io_collector_pdata_internal_data.TraceID `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.TraceID" json:"trace_id"` + // A unique identifier for the linked span. The ID is an 8-byte array. + SpanId go_opentelemetry_io_collector_pdata_internal_data.SpanID `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.SpanID" json:"span_id"` +} + +func (m *Link) Reset() { *m = Link{} } +func (m *Link) String() string { return proto.CompactTextString(m) } +func (*Link) ProtoMessage() {} +func (*Link) Descriptor() ([]byte, []int) { + return fileDescriptor_ddd0cf081a2fe76f, []int{5} +} +func (m *Link) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Link) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Link.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Link) XXX_Merge(src proto.Message) { + xxx_messageInfo_Link.Merge(m, src) +} +func (m *Link) XXX_Size() int { + return m.Size() +} +func (m *Link) XXX_DiscardUnknown() { + xxx_messageInfo_Link.DiscardUnknown(m) +} + +var xxx_messageInfo_Link proto.InternalMessageInfo + +// ValueType describes the type and units of a value, with an optional aggregation temporality. +type ValueType struct { + TypeStrindex int32 `protobuf:"varint,1,opt,name=type_strindex,json=typeStrindex,proto3" json:"type_strindex,omitempty"` + UnitStrindex int32 `protobuf:"varint,2,opt,name=unit_strindex,json=unitStrindex,proto3" json:"unit_strindex,omitempty"` + AggregationTemporality AggregationTemporality `protobuf:"varint,3,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.profiles.v1development.AggregationTemporality" json:"aggregation_temporality,omitempty"` +} + +func (m *ValueType) Reset() { *m = ValueType{} } +func (m *ValueType) String() string { return proto.CompactTextString(m) } +func (*ValueType) ProtoMessage() {} +func (*ValueType) Descriptor() ([]byte, []int) { + return fileDescriptor_ddd0cf081a2fe76f, []int{6} +} +func (m *ValueType) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValueType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ValueType.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ValueType) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValueType.Merge(m, src) +} +func (m *ValueType) XXX_Size() int { + return m.Size() +} +func (m *ValueType) XXX_DiscardUnknown() { + xxx_messageInfo_ValueType.DiscardUnknown(m) +} + +var xxx_messageInfo_ValueType proto.InternalMessageInfo + +func (m *ValueType) GetTypeStrindex() int32 { + if m != nil { + return m.TypeStrindex + } + return 0 +} + +func (m *ValueType) GetUnitStrindex() int32 { + if m != nil { + return m.UnitStrindex + } + return 0 +} + +func (m *ValueType) GetAggregationTemporality() AggregationTemporality { + if m != nil { + return m.AggregationTemporality + } + return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED +} + +// Each Sample records values encountered in some program +// context. The program context is typically a stack trace, perhaps +// augmented with auxiliary information like the thread-id, some +// indicator of a higher level request being handled etc. +type Sample struct { + // locations_start_index along with locations_length refers to to a slice of locations in Profile.location_indices. + LocationsStartIndex int32 `protobuf:"varint,1,opt,name=locations_start_index,json=locationsStartIndex,proto3" json:"locations_start_index,omitempty"` + // locations_length along with locations_start_index refers to a slice of locations in Profile.location_indices. + // Supersedes location_index. + LocationsLength int32 `protobuf:"varint,2,opt,name=locations_length,json=locationsLength,proto3" json:"locations_length,omitempty"` + // The type and unit of each value is defined by the corresponding + // entry in Profile.sample_type. All samples must have the same + // number of values, the same as the length of Profile.sample_type. + // When aggregating multiple samples into a single sample, the + // result has a list of values that is the element-wise sum of the + // lists of the originals. + Value []int64 `protobuf:"varint,3,rep,packed,name=value,proto3" json:"value,omitempty"` + // References to attributes in Profile.attribute_table. [optional] + AttributeIndices []int32 `protobuf:"varint,4,rep,packed,name=attribute_indices,json=attributeIndices,proto3" json:"attribute_indices,omitempty"` + // Reference to link in Profile.link_table. [optional] + // + // Types that are valid to be assigned to LinkIndex_: + // *Sample_LinkIndex + LinkIndex_ isSample_LinkIndex_ `protobuf_oneof:"link_index_"` + // Timestamps associated with Sample represented in nanoseconds. These timestamps are expected + // to fall within the Profile's time range. [optional] + TimestampsUnixNano []uint64 `protobuf:"varint,6,rep,packed,name=timestamps_unix_nano,json=timestampsUnixNano,proto3" json:"timestamps_unix_nano,omitempty"` +} + +func (m *Sample) Reset() { *m = Sample{} } +func (m *Sample) String() string { return proto.CompactTextString(m) } +func (*Sample) ProtoMessage() {} +func (*Sample) Descriptor() ([]byte, []int) { + return fileDescriptor_ddd0cf081a2fe76f, []int{7} +} +func (m *Sample) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Sample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Sample.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Sample) XXX_Merge(src proto.Message) { + xxx_messageInfo_Sample.Merge(m, src) +} +func (m *Sample) XXX_Size() int { + return m.Size() +} +func (m *Sample) XXX_DiscardUnknown() { + xxx_messageInfo_Sample.DiscardUnknown(m) +} + +var xxx_messageInfo_Sample proto.InternalMessageInfo + +type isSample_LinkIndex_ interface { + isSample_LinkIndex_() + MarshalTo([]byte) (int, error) + Size() int +} + +type Sample_LinkIndex struct { + LinkIndex int32 `protobuf:"varint,5,opt,name=link_index,json=linkIndex,proto3,oneof" json:"link_index,omitempty"` +} + +func (*Sample_LinkIndex) isSample_LinkIndex_() {} + +func (m *Sample) GetLinkIndex_() isSample_LinkIndex_ { + if m != nil { + return m.LinkIndex_ + } + return nil +} + +func (m *Sample) GetLocationsStartIndex() int32 { + if m != nil { + return m.LocationsStartIndex + } + return 0 +} + +func (m *Sample) GetLocationsLength() int32 { + if m != nil { + return m.LocationsLength + } + return 0 +} + +func (m *Sample) GetValue() []int64 { + if m != nil { + return m.Value + } + return nil +} + +func (m *Sample) GetAttributeIndices() []int32 { + if m != nil { + return m.AttributeIndices + } + return nil +} + +func (m *Sample) GetLinkIndex() int32 { + if x, ok := m.GetLinkIndex_().(*Sample_LinkIndex); ok { + return x.LinkIndex + } + return 0 +} + +func (m *Sample) GetTimestampsUnixNano() []uint64 { + if m != nil { + return m.TimestampsUnixNano + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Sample) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Sample_LinkIndex)(nil), + } +} + +// Describes the mapping of a binary in memory, including its address range, +// file offset, and metadata like build ID +type Mapping struct { + // Address at which the binary (or DLL) is loaded into memory. + MemoryStart uint64 `protobuf:"varint,1,opt,name=memory_start,json=memoryStart,proto3" json:"memory_start,omitempty"` + // The limit of the address range occupied by this mapping. + MemoryLimit uint64 `protobuf:"varint,2,opt,name=memory_limit,json=memoryLimit,proto3" json:"memory_limit,omitempty"` + // Offset in the binary that corresponds to the first mapped address. + FileOffset uint64 `protobuf:"varint,3,opt,name=file_offset,json=fileOffset,proto3" json:"file_offset,omitempty"` + // The object this entry is loaded from. This can be a filename on + // disk for the main binary and shared libraries, or virtual + // abstractions like "[vdso]". + FilenameStrindex int32 `protobuf:"varint,4,opt,name=filename_strindex,json=filenameStrindex,proto3" json:"filename_strindex,omitempty"` + // References to attributes in Profile.attribute_table. [optional] + AttributeIndices []int32 `protobuf:"varint,5,rep,packed,name=attribute_indices,json=attributeIndices,proto3" json:"attribute_indices,omitempty"` + // The following fields indicate the resolution of symbolic info. + HasFunctions bool `protobuf:"varint,6,opt,name=has_functions,json=hasFunctions,proto3" json:"has_functions,omitempty"` + HasFilenames bool `protobuf:"varint,7,opt,name=has_filenames,json=hasFilenames,proto3" json:"has_filenames,omitempty"` + HasLineNumbers bool `protobuf:"varint,8,opt,name=has_line_numbers,json=hasLineNumbers,proto3" json:"has_line_numbers,omitempty"` + HasInlineFrames bool `protobuf:"varint,9,opt,name=has_inline_frames,json=hasInlineFrames,proto3" json:"has_inline_frames,omitempty"` +} + +func (m *Mapping) Reset() { *m = Mapping{} } +func (m *Mapping) String() string { return proto.CompactTextString(m) } +func (*Mapping) ProtoMessage() {} +func (*Mapping) Descriptor() ([]byte, []int) { + return fileDescriptor_ddd0cf081a2fe76f, []int{8} +} +func (m *Mapping) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Mapping) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Mapping.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Mapping) XXX_Merge(src proto.Message) { + xxx_messageInfo_Mapping.Merge(m, src) +} +func (m *Mapping) XXX_Size() int { + return m.Size() +} +func (m *Mapping) XXX_DiscardUnknown() { + xxx_messageInfo_Mapping.DiscardUnknown(m) +} + +var xxx_messageInfo_Mapping proto.InternalMessageInfo + +func (m *Mapping) GetMemoryStart() uint64 { + if m != nil { + return m.MemoryStart + } + return 0 +} + +func (m *Mapping) GetMemoryLimit() uint64 { + if m != nil { + return m.MemoryLimit + } + return 0 +} + +func (m *Mapping) GetFileOffset() uint64 { + if m != nil { + return m.FileOffset + } + return 0 +} + +func (m *Mapping) GetFilenameStrindex() int32 { + if m != nil { + return m.FilenameStrindex + } + return 0 +} + +func (m *Mapping) GetAttributeIndices() []int32 { + if m != nil { + return m.AttributeIndices + } + return nil +} + +func (m *Mapping) GetHasFunctions() bool { + if m != nil { + return m.HasFunctions + } + return false +} + +func (m *Mapping) GetHasFilenames() bool { + if m != nil { + return m.HasFilenames + } + return false +} + +func (m *Mapping) GetHasLineNumbers() bool { + if m != nil { + return m.HasLineNumbers + } + return false +} + +func (m *Mapping) GetHasInlineFrames() bool { + if m != nil { + return m.HasInlineFrames + } + return false +} + +// Describes function and line table debug information. +type Location struct { + // Reference to mapping in Profile.mapping_table. + // It can be unset if the mapping is unknown or not applicable for + // this profile type. + // + // Types that are valid to be assigned to MappingIndex_: + // *Location_MappingIndex + MappingIndex_ isLocation_MappingIndex_ `protobuf_oneof:"mapping_index_"` + // The instruction address for this location, if available. It + // should be within [Mapping.memory_start...Mapping.memory_limit] + // for the corresponding mapping. A non-leaf address may be in the + // middle of a call instruction. It is up to display tools to find + // the beginning of the instruction if necessary. + Address uint64 `protobuf:"varint,2,opt,name=address,proto3" json:"address,omitempty"` + // Multiple line indicates this location has inlined functions, + // where the last entry represents the caller into which the + // preceding entries were inlined. + // + // E.g., if memcpy() is inlined into printf: + // line[0].function_name == "memcpy" + // line[1].function_name == "printf" + Line []*Line `protobuf:"bytes,3,rep,name=line,proto3" json:"line,omitempty"` + // Provides an indication that multiple symbols map to this location's + // address, for example due to identical code folding by the linker. In that + // case the line information above represents one of the multiple + // symbols. This field must be recomputed when the symbolization state of the + // profile changes. + IsFolded bool `protobuf:"varint,4,opt,name=is_folded,json=isFolded,proto3" json:"is_folded,omitempty"` + // References to attributes in Profile.attribute_table. [optional] + AttributeIndices []int32 `protobuf:"varint,5,rep,packed,name=attribute_indices,json=attributeIndices,proto3" json:"attribute_indices,omitempty"` +} + +func (m *Location) Reset() { *m = Location{} } +func (m *Location) String() string { return proto.CompactTextString(m) } +func (*Location) ProtoMessage() {} +func (*Location) Descriptor() ([]byte, []int) { + return fileDescriptor_ddd0cf081a2fe76f, []int{9} +} +func (m *Location) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Location.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Location) XXX_Merge(src proto.Message) { + xxx_messageInfo_Location.Merge(m, src) +} +func (m *Location) XXX_Size() int { + return m.Size() +} +func (m *Location) XXX_DiscardUnknown() { + xxx_messageInfo_Location.DiscardUnknown(m) +} + +var xxx_messageInfo_Location proto.InternalMessageInfo + +type isLocation_MappingIndex_ interface { + isLocation_MappingIndex_() + MarshalTo([]byte) (int, error) + Size() int +} + +type Location_MappingIndex struct { + MappingIndex int32 `protobuf:"varint,1,opt,name=mapping_index,json=mappingIndex,proto3,oneof" json:"mapping_index,omitempty"` +} + +func (*Location_MappingIndex) isLocation_MappingIndex_() {} + +func (m *Location) GetMappingIndex_() isLocation_MappingIndex_ { + if m != nil { + return m.MappingIndex_ + } + return nil +} + +func (m *Location) GetMappingIndex() int32 { + if x, ok := m.GetMappingIndex_().(*Location_MappingIndex); ok { + return x.MappingIndex + } + return 0 +} + +func (m *Location) GetAddress() uint64 { + if m != nil { + return m.Address + } + return 0 +} + +func (m *Location) GetLine() []*Line { + if m != nil { + return m.Line + } + return nil +} + +func (m *Location) GetIsFolded() bool { + if m != nil { + return m.IsFolded + } + return false +} + +func (m *Location) GetAttributeIndices() []int32 { + if m != nil { + return m.AttributeIndices + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Location) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Location_MappingIndex)(nil), + } +} + +// Details a specific line in a source code, linked to a function. +type Line struct { + // Reference to function in Profile.function_table. + FunctionIndex int32 `protobuf:"varint,1,opt,name=function_index,json=functionIndex,proto3" json:"function_index,omitempty"` + // Line number in source code. + Line int64 `protobuf:"varint,2,opt,name=line,proto3" json:"line,omitempty"` + // Column number in source code. + Column int64 `protobuf:"varint,3,opt,name=column,proto3" json:"column,omitempty"` +} + +func (m *Line) Reset() { *m = Line{} } +func (m *Line) String() string { return proto.CompactTextString(m) } +func (*Line) ProtoMessage() {} +func (*Line) Descriptor() ([]byte, []int) { + return fileDescriptor_ddd0cf081a2fe76f, []int{10} +} +func (m *Line) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Line) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Line.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Line) XXX_Merge(src proto.Message) { + xxx_messageInfo_Line.Merge(m, src) +} +func (m *Line) XXX_Size() int { + return m.Size() +} +func (m *Line) XXX_DiscardUnknown() { + xxx_messageInfo_Line.DiscardUnknown(m) +} + +var xxx_messageInfo_Line proto.InternalMessageInfo + +func (m *Line) GetFunctionIndex() int32 { + if m != nil { + return m.FunctionIndex + } + return 0 +} + +func (m *Line) GetLine() int64 { + if m != nil { + return m.Line + } + return 0 +} + +func (m *Line) GetColumn() int64 { + if m != nil { + return m.Column + } + return 0 +} + +// Describes a function, including its human-readable name, system name, +// source file, and starting line number in the source. +type Function struct { + // Name of the function, in human-readable form if available. + NameStrindex int32 `protobuf:"varint,1,opt,name=name_strindex,json=nameStrindex,proto3" json:"name_strindex,omitempty"` + // Name of the function, as identified by the system. + // For instance, it can be a C++ mangled name. + SystemNameStrindex int32 `protobuf:"varint,2,opt,name=system_name_strindex,json=systemNameStrindex,proto3" json:"system_name_strindex,omitempty"` + // Source file containing the function. + FilenameStrindex int32 `protobuf:"varint,3,opt,name=filename_strindex,json=filenameStrindex,proto3" json:"filename_strindex,omitempty"` + // Line number in source file. + StartLine int64 `protobuf:"varint,4,opt,name=start_line,json=startLine,proto3" json:"start_line,omitempty"` +} + +func (m *Function) Reset() { *m = Function{} } +func (m *Function) String() string { return proto.CompactTextString(m) } +func (*Function) ProtoMessage() {} +func (*Function) Descriptor() ([]byte, []int) { + return fileDescriptor_ddd0cf081a2fe76f, []int{11} +} +func (m *Function) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Function) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Function.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Function) XXX_Merge(src proto.Message) { + xxx_messageInfo_Function.Merge(m, src) +} +func (m *Function) XXX_Size() int { + return m.Size() +} +func (m *Function) XXX_DiscardUnknown() { + xxx_messageInfo_Function.DiscardUnknown(m) +} + +var xxx_messageInfo_Function proto.InternalMessageInfo + +func (m *Function) GetNameStrindex() int32 { + if m != nil { + return m.NameStrindex + } + return 0 +} + +func (m *Function) GetSystemNameStrindex() int32 { + if m != nil { + return m.SystemNameStrindex + } + return 0 +} + +func (m *Function) GetFilenameStrindex() int32 { + if m != nil { + return m.FilenameStrindex + } + return 0 +} + +func (m *Function) GetStartLine() int64 { + if m != nil { + return m.StartLine + } + return 0 +} + +func init() { + proto.RegisterEnum("opentelemetry.proto.profiles.v1development.AggregationTemporality", AggregationTemporality_name, AggregationTemporality_value) + proto.RegisterType((*ProfilesData)(nil), "opentelemetry.proto.profiles.v1development.ProfilesData") + proto.RegisterType((*ResourceProfiles)(nil), "opentelemetry.proto.profiles.v1development.ResourceProfiles") + proto.RegisterType((*ScopeProfiles)(nil), "opentelemetry.proto.profiles.v1development.ScopeProfiles") + proto.RegisterType((*Profile)(nil), "opentelemetry.proto.profiles.v1development.Profile") + proto.RegisterType((*AttributeUnit)(nil), "opentelemetry.proto.profiles.v1development.AttributeUnit") + proto.RegisterType((*Link)(nil), "opentelemetry.proto.profiles.v1development.Link") + proto.RegisterType((*ValueType)(nil), "opentelemetry.proto.profiles.v1development.ValueType") + proto.RegisterType((*Sample)(nil), "opentelemetry.proto.profiles.v1development.Sample") + proto.RegisterType((*Mapping)(nil), "opentelemetry.proto.profiles.v1development.Mapping") + proto.RegisterType((*Location)(nil), "opentelemetry.proto.profiles.v1development.Location") + proto.RegisterType((*Line)(nil), "opentelemetry.proto.profiles.v1development.Line") + proto.RegisterType((*Function)(nil), "opentelemetry.proto.profiles.v1development.Function") +} + +func init() { + proto.RegisterFile("opentelemetry/proto/profiles/v1development/profiles.proto", fileDescriptor_ddd0cf081a2fe76f) +} + +var fileDescriptor_ddd0cf081a2fe76f = []byte{ + // 1591 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x58, 0xdb, 0x6f, 0x13, 0x57, + 0x1a, 0xcf, 0xd8, 0x8e, 0x2f, 0x9f, 0x2f, 0x71, 0x0e, 0x21, 0xcc, 0xb2, 0x4b, 0x62, 0x8c, 0x58, + 0x4c, 0x56, 0x24, 0x24, 0xb0, 0x2b, 0xd0, 0xae, 0xb4, 0x9b, 0xc4, 0x09, 0x18, 0x42, 0x92, 0x9d, + 0x38, 0x51, 0x69, 0x91, 0xa6, 0x27, 0x9e, 0x63, 0x67, 0xca, 0xdc, 0x34, 0xe7, 0x38, 0xc2, 0xea, + 0xbf, 0xc0, 0x43, 0xff, 0x8e, 0x4a, 0xfd, 0x1b, 0xfa, 0xca, 0x23, 0xea, 0x13, 0xea, 0x03, 0xaa, + 0xe0, 0x85, 0x56, 0xea, 0x6b, 0x9f, 0xab, 0x73, 0x99, 0xb1, 0x9d, 0x3a, 0x82, 0xe9, 0x4b, 0x34, + 0xe7, 0x3b, 0xbf, 0xf3, 0x3b, 0xdf, 0xfd, 0x3b, 0x0e, 0xdc, 0xf7, 0x03, 0xe2, 0x31, 0xe2, 0x10, + 0x97, 0xb0, 0x70, 0xb0, 0x12, 0x84, 0x3e, 0xf3, 0xf9, 0xdf, 0xae, 0xed, 0x10, 0xba, 0x72, 0xba, + 0x6a, 0x91, 0x53, 0xe2, 0xf8, 0x81, 0x4b, 0x3c, 0x16, 0x8b, 0x97, 0x05, 0x0a, 0x2d, 0x8d, 0x1d, + 0x95, 0xc2, 0xe5, 0x18, 0x33, 0x76, 0xf4, 0xf2, 0x5c, 0xcf, 0xef, 0xf9, 0x92, 0x9c, 0x7f, 0x49, + 0xf0, 0xe5, 0xa5, 0x49, 0x97, 0x77, 0x7c, 0xd7, 0xf5, 0xbd, 0x95, 0xd3, 0x55, 0xf5, 0xa5, 0xb0, + 0xcb, 0x93, 0xb0, 0x21, 0xa1, 0x7e, 0x3f, 0xec, 0x10, 0x8e, 0x8e, 0xbe, 0x25, 0xbe, 0x3e, 0x80, + 0xd2, 0xbe, 0xd2, 0xa5, 0x89, 0x19, 0x46, 0x36, 0xcc, 0x46, 0x08, 0x33, 0x52, 0x52, 0xd7, 0x6a, + 0xe9, 0x46, 0x71, 0xed, 0x3f, 0xcb, 0x9f, 0x6e, 0xc9, 0xb2, 0xa1, 0x48, 0x22, 0x72, 0xa3, 0x1a, + 0x9e, 0x91, 0xd4, 0x3f, 0x68, 0x50, 0x3d, 0x0b, 0x43, 0x8f, 0x21, 0x1f, 0x01, 0x75, 0xad, 0xa6, + 0x35, 0x8a, 0x6b, 0x37, 0x27, 0x5e, 0x1b, 0x9b, 0x71, 0xba, 0x1a, 0xdf, 0xb5, 0x91, 0x79, 0xf5, + 0x76, 0x71, 0xca, 0x88, 0x09, 0xd0, 0x97, 0x50, 0xa1, 0x1d, 0x3f, 0x18, 0xb1, 0x24, 0x25, 0x2c, + 0xb9, 0x9f, 0xc4, 0x92, 0x03, 0xce, 0x10, 0x9b, 0x51, 0xa6, 0xa3, 0x4b, 0x74, 0x05, 0x80, 0x76, + 0x4e, 0x88, 0x8b, 0xcd, 0x7e, 0xe8, 0xe8, 0xe9, 0x9a, 0xd6, 0x28, 0x18, 0x05, 0x29, 0x39, 0x0c, + 0x9d, 0x47, 0xd9, 0xfc, 0x87, 0x5c, 0xf5, 0xe7, 0x5c, 0xfd, 0xb5, 0x06, 0xe5, 0x31, 0x1e, 0xb4, + 0x07, 0xd3, 0x82, 0x49, 0x19, 0x79, 0x67, 0xa2, 0x46, 0x2a, 0xb2, 0xa7, 0xab, 0xcb, 0x2d, 0x8f, + 0xb2, 0xb0, 0xcf, 0xf5, 0xc1, 0xcc, 0xf6, 0x3d, 0xc1, 0xa5, 0xcc, 0x95, 0x3c, 0x68, 0x0f, 0xf2, + 0x67, 0xac, 0xbc, 0x93, 0xc4, 0x4a, 0xa5, 0x98, 0x11, 0x93, 0x7c, 0xc4, 0xb4, 0xfa, 0x6f, 0x00, + 0x39, 0x75, 0x08, 0x1d, 0x41, 0x91, 0x62, 0x37, 0x70, 0x88, 0xc9, 0x06, 0xc2, 0x24, 0x7e, 0xfd, + 0x3f, 0x93, 0x5c, 0x7f, 0x84, 0x9d, 0x3e, 0x69, 0x0f, 0x02, 0x62, 0x80, 0x64, 0xe2, 0xdf, 0xe8, + 0x11, 0x64, 0xe5, 0x4a, 0x59, 0xb4, 0x96, 0x28, 0x6e, 0xe2, 0xa4, 0xa1, 0x18, 0xd0, 0x67, 0x50, + 0x76, 0x71, 0x10, 0xd8, 0x5e, 0xcf, 0x64, 0xf8, 0xd8, 0x21, 0x7a, 0x3a, 0xb9, 0x93, 0x9e, 0x48, + 0x02, 0xa3, 0xa4, 0x98, 0xda, 0x9c, 0x08, 0x7d, 0x01, 0x15, 0xc7, 0xef, 0x88, 0xb8, 0x28, 0xea, + 0x8c, 0xa0, 0xbe, 0x9b, 0x84, 0x7a, 0x47, 0x31, 0x18, 0xe5, 0x88, 0x4b, 0x92, 0xdf, 0x84, 0x6a, + 0x4c, 0x6e, 0x7b, 0x96, 0xdd, 0x21, 0x54, 0x9f, 0xae, 0xa5, 0x1b, 0xd3, 0xc6, 0x4c, 0x24, 0x6f, + 0x49, 0x31, 0xd7, 0xa3, 0xdb, 0xf7, 0x3a, 0x23, 0x7a, 0x64, 0x93, 0xeb, 0xb1, 0xad, 0x18, 0x8c, + 0x72, 0xc4, 0x25, 0xf5, 0x38, 0x82, 0x19, 0xcc, 0x58, 0x68, 0x1f, 0xf7, 0x19, 0x51, 0xec, 0x39, + 0xc1, 0x7e, 0xe3, 0x23, 0x99, 0xfb, 0x98, 0x0c, 0x44, 0x70, 0x55, 0xb6, 0x56, 0x62, 0x16, 0xc9, + 0x7b, 0x3c, 0xca, 0xdb, 0xf7, 0x6c, 0x46, 0xf5, 0x7c, 0xf2, 0x1a, 0x5d, 0x8f, 0x28, 0x0e, 0x3d, + 0x9b, 0x8d, 0xdc, 0xc1, 0x97, 0xbc, 0xd6, 0xc0, 0xb1, 0xbd, 0xe7, 0x4a, 0xed, 0x82, 0xa0, 0xbf, + 0x9d, 0x28, 0x38, 0xb6, 0xf7, 0xdc, 0x28, 0x70, 0x0e, 0xa9, 0xf4, 0x55, 0x28, 0x51, 0x16, 0x0e, + 0x53, 0x09, 0x6a, 0xe9, 0x46, 0xc1, 0x28, 0x4a, 0x99, 0x84, 0x5c, 0x01, 0x60, 0xb6, 0x4b, 0x4c, + 0x0f, 0x7b, 0x3e, 0xd5, 0x8b, 0x35, 0xad, 0x91, 0x36, 0x0a, 0x5c, 0xb2, 0xcb, 0x05, 0xe8, 0x3a, + 0x54, 0xac, 0x7e, 0x28, 0xc3, 0x2a, 0x21, 0x25, 0x01, 0x29, 0x47, 0x52, 0x09, 0x7b, 0x06, 0xc5, + 0x80, 0x84, 0xb6, 0x6f, 0xc9, 0xc2, 0x2a, 0x8b, 0x5e, 0xf1, 0xe7, 0x0a, 0x4b, 0xf9, 0x1f, 0x24, + 0x9f, 0x28, 0xaf, 0x79, 0xc8, 0xca, 0x95, 0x5e, 0x11, 0x97, 0xab, 0x15, 0xba, 0x05, 0x88, 0xc7, + 0x8f, 0x78, 0xcc, 0x14, 0x26, 0xc9, 0xac, 0x9b, 0x11, 0x59, 0x37, 0xab, 0x76, 0x0e, 0xe2, 0x0d, + 0xf4, 0x5f, 0xf8, 0x9b, 0x45, 0xba, 0xb8, 0xef, 0x30, 0x73, 0xa4, 0x0b, 0xa8, 0xa3, 0xe4, 0x85, + 0x5e, 0xad, 0x69, 0x8d, 0x69, 0xe3, 0x2f, 0x0a, 0x73, 0x10, 0x97, 0xf7, 0x81, 0x02, 0xa0, 0x63, + 0x00, 0xa5, 0xbd, 0x69, 0x5b, 0xfa, 0x6c, 0x4d, 0x6b, 0x94, 0x36, 0x36, 0xb9, 0xb6, 0x3f, 0xbe, + 0x5d, 0xfc, 0x77, 0xcf, 0x3f, 0x63, 0xae, 0xcd, 0x67, 0x9f, 0xe3, 0x90, 0x0e, 0xf3, 0xc3, 0x95, + 0xc0, 0xc2, 0x0c, 0xaf, 0xd8, 0x1e, 0x23, 0xa1, 0x87, 0x9d, 0x15, 0xbe, 0x8a, 0x5a, 0x59, 0xab, + 0x69, 0x14, 0x14, 0x6d, 0xcb, 0x42, 0xf7, 0x40, 0xb7, 0x42, 0x3f, 0x08, 0x88, 0x65, 0xc6, 0xd9, + 0x41, 0xcd, 0x8e, 0xdf, 0xf7, 0x98, 0x7e, 0xa1, 0xa6, 0x35, 0xca, 0xc6, 0xbc, 0xda, 0x8f, 0x73, + 0x89, 0x6e, 0xf2, 0x5d, 0xf4, 0x2f, 0xb8, 0xe4, 0x87, 0x76, 0xcf, 0xf6, 0xb0, 0x63, 0x06, 0x78, + 0xe0, 0xf8, 0xd8, 0x32, 0xbb, 0x7e, 0xe8, 0x62, 0xa6, 0xcf, 0x89, 0xa6, 0x78, 0x31, 0xda, 0xde, + 0x97, 0xbb, 0xdb, 0x62, 0x93, 0x57, 0xee, 0xd9, 0x73, 0xfa, 0x45, 0x6e, 0x9b, 0x31, 0x73, 0xe6, + 0x00, 0xfa, 0x07, 0xcc, 0x0e, 0x8b, 0x20, 0xf2, 0xf7, 0xbc, 0xf0, 0x77, 0x35, 0xde, 0x50, 0x65, + 0x5e, 0xff, 0x0a, 0xca, 0x63, 0xe9, 0x8e, 0xee, 0xc2, 0xfc, 0xf0, 0xf4, 0x73, 0x32, 0x18, 0x7a, + 0x5e, 0x13, 0x9e, 0x9f, 0x8b, 0x77, 0x1f, 0x93, 0x41, 0xec, 0xf4, 0x6b, 0x50, 0xe6, 0xe5, 0x36, + 0x04, 0xa7, 0x04, 0xb8, 0xc4, 0x85, 0x11, 0xa8, 0xfe, 0xbd, 0x06, 0x19, 0x9e, 0xfc, 0xe8, 0x19, + 0xe4, 0x59, 0x88, 0x3b, 0x22, 0x40, 0x9a, 0x08, 0xd0, 0xba, 0x0a, 0xd0, 0xfd, 0xe4, 0x01, 0x6a, + 0x73, 0xa6, 0x56, 0xd3, 0xc8, 0x09, 0xca, 0x96, 0x85, 0x9e, 0x42, 0x8e, 0x06, 0xd8, 0xe3, 0xe4, + 0x29, 0x41, 0xfe, 0x3f, 0x45, 0x7e, 0x2f, 0x39, 0xf9, 0x41, 0x80, 0xbd, 0x56, 0xd3, 0xc8, 0x72, + 0xc2, 0x96, 0x55, 0xff, 0x41, 0x83, 0x42, 0x5c, 0x03, 0xdc, 0xe8, 0xf1, 0xdc, 0x94, 0x1e, 0x2a, + 0xb1, 0xd1, 0x74, 0xfc, 0x14, 0xcf, 0xa0, 0xaf, 0xe1, 0x12, 0xee, 0xf5, 0x42, 0xd2, 0x53, 0x7d, + 0x9f, 0xb8, 0x81, 0x1f, 0x62, 0xc7, 0x66, 0x03, 0x31, 0x2a, 0x2b, 0x6b, 0x1b, 0x89, 0xfa, 0xd7, + 0x90, 0xaa, 0x3d, 0x64, 0x32, 0xe6, 0xf1, 0x44, 0x79, 0xfd, 0x65, 0x0a, 0xb2, 0xb2, 0x8e, 0xd0, + 0x1a, 0x5c, 0x8c, 0xe6, 0x00, 0x35, 0x29, 0xc3, 0x21, 0x33, 0x47, 0x2d, 0xbb, 0x10, 0x6f, 0x1e, + 0xf0, 0xbd, 0x96, 0xd0, 0x7d, 0x64, 0xa6, 0x50, 0xd3, 0x21, 0x5e, 0x8f, 0x9d, 0x28, 0x1b, 0xe3, + 0x99, 0x42, 0x77, 0x84, 0x18, 0xcd, 0xc1, 0xf4, 0x29, 0xf7, 0x9e, 0x98, 0x96, 0x69, 0x43, 0x2e, + 0x26, 0xe7, 0x6b, 0x66, 0x72, 0xbe, 0xa2, 0x45, 0xd5, 0x7d, 0xa5, 0x5a, 0xd3, 0xfc, 0x9e, 0x87, + 0x53, 0xb2, 0x9b, 0x4a, 0x75, 0x6e, 0xc3, 0x1c, 0x6f, 0x8c, 0x94, 0x61, 0x37, 0xa0, 0x7c, 0x06, + 0xbc, 0x10, 0x2d, 0x51, 0x4c, 0xaf, 0x8c, 0x81, 0x86, 0x7b, 0x87, 0x9e, 0xfd, 0x82, 0xf7, 0xc5, + 0x8d, 0x32, 0x14, 0x87, 0x94, 0x66, 0xfd, 0x97, 0x14, 0xe4, 0xd4, 0x68, 0xe6, 0xad, 0xd9, 0x25, + 0xae, 0x1f, 0x0e, 0xa4, 0x33, 0x84, 0x1b, 0x32, 0x46, 0x51, 0xca, 0x84, 0x0f, 0x46, 0x20, 0x8e, + 0xed, 0xda, 0x4c, 0x98, 0x1e, 0x43, 0x76, 0xb8, 0x08, 0x2d, 0x42, 0x51, 0xb4, 0x23, 0xbf, 0xdb, + 0xa5, 0x84, 0x89, 0x88, 0x66, 0x0c, 0xe0, 0xa2, 0x3d, 0x21, 0xe1, 0x1e, 0xe0, 0x2b, 0x0f, 0xbb, + 0x23, 0xc9, 0x94, 0x11, 0x3e, 0xac, 0x46, 0x1b, 0x71, 0xae, 0x4c, 0x74, 0xd7, 0xf4, 0x39, 0xee, + 0xba, 0x06, 0xe5, 0x13, 0x4c, 0xcd, 0x68, 0xfa, 0x52, 0x3d, 0x5b, 0xd3, 0x1a, 0x79, 0xa3, 0x74, + 0x82, 0x69, 0x34, 0x9b, 0x87, 0x20, 0x75, 0x13, 0xd5, 0x73, 0x43, 0x50, 0x24, 0x43, 0x0d, 0xa8, + 0x72, 0x90, 0x63, 0x7b, 0xc4, 0xf4, 0xfa, 0xee, 0x31, 0x09, 0xf9, 0x6c, 0xe5, 0xb8, 0xca, 0x09, + 0xa6, 0x3b, 0xb6, 0x47, 0x76, 0xa5, 0x14, 0x2d, 0xc1, 0x2c, 0x47, 0xda, 0x9e, 0xc0, 0x76, 0x43, + 0x41, 0x59, 0x10, 0xd0, 0x99, 0x13, 0x4c, 0x5b, 0x42, 0xbe, 0x2d, 0xc4, 0xf5, 0x5f, 0x35, 0xc8, + 0x47, 0x8f, 0x15, 0x74, 0x7d, 0xf8, 0xa8, 0x1a, 0xc9, 0xba, 0x87, 0x53, 0xf1, 0x0b, 0x49, 0x46, + 0x58, 0x87, 0x1c, 0xb6, 0xac, 0x90, 0x50, 0xaa, 0x9c, 0x1d, 0x2d, 0x51, 0x13, 0x32, 0x9c, 0x5b, + 0x3d, 0xc6, 0x92, 0x0e, 0x65, 0x62, 0x88, 0xd3, 0xe8, 0xaf, 0x50, 0xb0, 0xa9, 0xd9, 0xf5, 0x1d, + 0x8b, 0x58, 0x22, 0x0a, 0x79, 0x23, 0x6f, 0xd3, 0x6d, 0xb1, 0x4e, 0xe4, 0xfd, 0x8d, 0x2a, 0x54, + 0xc6, 0x0c, 0x32, 0xeb, 0x4f, 0x45, 0x07, 0x24, 0x7c, 0x62, 0xc7, 0xaf, 0xab, 0xd1, 0x0a, 0x8b, + 0xdf, 0x49, 0xd2, 0x54, 0xa4, 0x0c, 0x4a, 0x89, 0x89, 0x2a, 0xd5, 0x9b, 0x87, 0x6c, 0xc7, 0x77, + 0xfa, 0xae, 0x27, 0x12, 0x29, 0x6d, 0xa8, 0x55, 0xfd, 0x3b, 0x0d, 0xf2, 0x51, 0x4c, 0x79, 0x48, + 0xc7, 0xb3, 0x49, 0xb5, 0xa6, 0xb1, 0x4c, 0xba, 0x0d, 0x73, 0x74, 0x40, 0x19, 0x71, 0xcd, 0x71, + 0xac, 0xac, 0x5e, 0x24, 0xf7, 0x76, 0xcf, 0xe4, 0xde, 0x1f, 0x13, 0x35, 0x7d, 0x4e, 0xa2, 0xf2, + 0x27, 0xbf, 0x68, 0x21, 0xc2, 0x84, 0x8c, 0x7c, 0xb4, 0x08, 0x09, 0x77, 0xc1, 0xd2, 0x4b, 0x0d, + 0xe6, 0x27, 0x77, 0x2a, 0x74, 0x03, 0xae, 0xad, 0x3f, 0x78, 0x60, 0x6c, 0x3d, 0x58, 0x6f, 0xb7, + 0xf6, 0x76, 0xcd, 0xf6, 0xd6, 0x93, 0xfd, 0x3d, 0x63, 0x7d, 0xa7, 0xd5, 0x7e, 0x6a, 0x1e, 0xee, + 0x1e, 0xec, 0x6f, 0x6d, 0xb6, 0xb6, 0x5b, 0x5b, 0xcd, 0xea, 0x14, 0xba, 0x0a, 0x57, 0xce, 0x03, + 0x36, 0xb7, 0x76, 0xda, 0xeb, 0x55, 0x0d, 0xfd, 0x1d, 0xea, 0xe7, 0x41, 0x36, 0x0f, 0x9f, 0x1c, + 0xee, 0xac, 0xb7, 0x5b, 0x47, 0x5b, 0xd5, 0xd4, 0xc6, 0x1b, 0xed, 0xd5, 0xbb, 0x05, 0xed, 0xf5, + 0xbb, 0x05, 0xed, 0xa7, 0x77, 0x0b, 0xda, 0x37, 0xef, 0x17, 0xa6, 0x5e, 0xbf, 0x5f, 0x98, 0x7a, + 0xf3, 0x7e, 0x61, 0x0a, 0x6e, 0xd9, 0x7e, 0x82, 0x54, 0xda, 0x28, 0x47, 0x3f, 0xcb, 0xf6, 0x39, + 0x6a, 0x5f, 0xfb, 0xfc, 0xff, 0x89, 0xe7, 0x8e, 0xfc, 0xa5, 0xdd, 0x23, 0xde, 0x39, 0xff, 0x15, + 0xf8, 0x36, 0xb5, 0xb4, 0x17, 0x10, 0xaf, 0x1d, 0x13, 0x8a, 0xab, 0xa2, 0xb7, 0x0a, 0x5d, 0x3e, + 0x5a, 0x6d, 0x0e, 0xc1, 0xc7, 0x59, 0xc1, 0x76, 0xe7, 0xf7, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd8, + 0x20, 0xd9, 0xad, 0x77, 0x10, 0x00, 0x00, +} + +func (m *ProfilesData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProfilesData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProfilesData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ResourceProfiles) > 0 { + for iNdEx := len(m.ResourceProfiles) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceProfiles[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProfiles(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ResourceProfiles) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceProfiles) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceProfiles) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.SchemaUrl) > 0 { + i -= len(m.SchemaUrl) + copy(dAtA[i:], m.SchemaUrl) + i = encodeVarintProfiles(dAtA, i, uint64(len(m.SchemaUrl))) + i-- + dAtA[i] = 0x1a + } + if len(m.ScopeProfiles) > 0 { + for iNdEx := len(m.ScopeProfiles) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ScopeProfiles[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProfiles(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProfiles(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ScopeProfiles) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopeProfiles) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScopeProfiles) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.SchemaUrl) > 0 { + i -= len(m.SchemaUrl) + copy(dAtA[i:], m.SchemaUrl) + i = encodeVarintProfiles(dAtA, i, uint64(len(m.SchemaUrl))) + i-- + dAtA[i] = 0x1a + } + if len(m.Profiles) > 0 { + for iNdEx := len(m.Profiles) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Profiles[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProfiles(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Scope.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProfiles(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Profile) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Profile) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Profile) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AttributeIndices) > 0 { + dAtA4 := make([]byte, len(m.AttributeIndices)*10) + var j3 int + for _, num1 := range m.AttributeIndices { + num := uint64(num1) + for num >= 1<<7 { + dAtA4[j3] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j3++ + } + dAtA4[j3] = uint8(num) + j3++ + } + i -= j3 + copy(dAtA[i:], dAtA4[:j3]) + i = encodeVarintProfiles(dAtA, i, uint64(j3)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + if len(m.OriginalPayload) > 0 { + i -= len(m.OriginalPayload) + copy(dAtA[i:], m.OriginalPayload) + i = encodeVarintProfiles(dAtA, i, uint64(len(m.OriginalPayload))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + if len(m.OriginalPayloadFormat) > 0 { + i -= len(m.OriginalPayloadFormat) + copy(dAtA[i:], m.OriginalPayloadFormat) + i = encodeVarintProfiles(dAtA, i, uint64(len(m.OriginalPayloadFormat))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + if m.DroppedAttributesCount != 0 { + i = encodeVarintProfiles(dAtA, i, uint64(m.DroppedAttributesCount)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x98 + } + { + size := m.ProfileId.Size() + i -= size + if _, err := m.ProfileId.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintProfiles(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + if m.DefaultSampleTypeStrindex != 0 { + i = encodeVarintProfiles(dAtA, i, uint64(m.DefaultSampleTypeStrindex)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + } + if len(m.CommentStrindices) > 0 { + dAtA6 := make([]byte, len(m.CommentStrindices)*10) + var j5 int + for _, num1 := range m.CommentStrindices { + num := uint64(num1) + for num >= 1<<7 { + dAtA6[j5] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j5++ + } + dAtA6[j5] = uint8(num) + j5++ + } + i -= j5 + copy(dAtA[i:], dAtA6[:j5]) + i = encodeVarintProfiles(dAtA, i, uint64(j5)) + i-- + dAtA[i] = 0x7a + } + if m.Period != 0 { + i = encodeVarintProfiles(dAtA, i, uint64(m.Period)) + i-- + dAtA[i] = 0x70 + } + { + size, err := m.PeriodType.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProfiles(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + if m.DurationNanos != 0 { + i = encodeVarintProfiles(dAtA, i, uint64(m.DurationNanos)) + i-- + dAtA[i] = 0x60 + } + if m.TimeNanos != 0 { + i = encodeVarintProfiles(dAtA, i, uint64(m.TimeNanos)) + i-- + dAtA[i] = 0x58 + } + if len(m.StringTable) > 0 { + for iNdEx := len(m.StringTable) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.StringTable[iNdEx]) + copy(dAtA[i:], m.StringTable[iNdEx]) + i = encodeVarintProfiles(dAtA, i, uint64(len(m.StringTable[iNdEx]))) + i-- + dAtA[i] = 0x52 + } + } + if len(m.LinkTable) > 0 { + for iNdEx := len(m.LinkTable) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.LinkTable[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProfiles(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + } + if len(m.AttributeUnits) > 0 { + for iNdEx := len(m.AttributeUnits) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AttributeUnits[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProfiles(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if len(m.AttributeTable) > 0 { + for iNdEx := len(m.AttributeTable) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AttributeTable[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProfiles(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if len(m.FunctionTable) > 0 { + for iNdEx := len(m.FunctionTable) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.FunctionTable[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProfiles(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if len(m.LocationIndices) > 0 { + dAtA9 := make([]byte, len(m.LocationIndices)*10) + var j8 int + for _, num1 := range m.LocationIndices { + num := uint64(num1) + for num >= 1<<7 { + dAtA9[j8] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j8++ + } + dAtA9[j8] = uint8(num) + j8++ + } + i -= j8 + copy(dAtA[i:], dAtA9[:j8]) + i = encodeVarintProfiles(dAtA, i, uint64(j8)) + i-- + dAtA[i] = 0x2a + } + if len(m.LocationTable) > 0 { + for iNdEx := len(m.LocationTable) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.LocationTable[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProfiles(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.MappingTable) > 0 { + for iNdEx := len(m.MappingTable) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MappingTable[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProfiles(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Sample) > 0 { + for iNdEx := len(m.Sample) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Sample[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProfiles(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.SampleType) > 0 { + for iNdEx := len(m.SampleType) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.SampleType[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProfiles(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *AttributeUnit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AttributeUnit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AttributeUnit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.UnitStrindex != 0 { + i = encodeVarintProfiles(dAtA, i, uint64(m.UnitStrindex)) + i-- + dAtA[i] = 0x10 + } + if m.AttributeKeyStrindex != 0 { + i = encodeVarintProfiles(dAtA, i, uint64(m.AttributeKeyStrindex)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Link) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Link) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Link) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size := m.SpanId.Size() + i -= size + if _, err := m.SpanId.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintProfiles(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size := m.TraceId.Size() + i -= size + if _, err := m.TraceId.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintProfiles(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ValueType) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValueType) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValueType) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AggregationTemporality != 0 { + i = encodeVarintProfiles(dAtA, i, uint64(m.AggregationTemporality)) + i-- + dAtA[i] = 0x18 + } + if m.UnitStrindex != 0 { + i = encodeVarintProfiles(dAtA, i, uint64(m.UnitStrindex)) + i-- + dAtA[i] = 0x10 + } + if m.TypeStrindex != 0 { + i = encodeVarintProfiles(dAtA, i, uint64(m.TypeStrindex)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Sample) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Sample) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.TimestampsUnixNano) > 0 { + dAtA11 := make([]byte, len(m.TimestampsUnixNano)*10) + var j10 int + for _, num := range m.TimestampsUnixNano { + for num >= 1<<7 { + dAtA11[j10] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j10++ + } + dAtA11[j10] = uint8(num) + j10++ + } + i -= j10 + copy(dAtA[i:], dAtA11[:j10]) + i = encodeVarintProfiles(dAtA, i, uint64(j10)) + i-- + dAtA[i] = 0x32 + } + if m.LinkIndex_ != nil { + { + size := m.LinkIndex_.Size() + i -= size + if _, err := m.LinkIndex_.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + if len(m.AttributeIndices) > 0 { + dAtA13 := make([]byte, len(m.AttributeIndices)*10) + var j12 int + for _, num1 := range m.AttributeIndices { + num := uint64(num1) + for num >= 1<<7 { + dAtA13[j12] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j12++ + } + dAtA13[j12] = uint8(num) + j12++ + } + i -= j12 + copy(dAtA[i:], dAtA13[:j12]) + i = encodeVarintProfiles(dAtA, i, uint64(j12)) + i-- + dAtA[i] = 0x22 + } + if len(m.Value) > 0 { + dAtA15 := make([]byte, len(m.Value)*10) + var j14 int + for _, num1 := range m.Value { + num := uint64(num1) + for num >= 1<<7 { + dAtA15[j14] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j14++ + } + dAtA15[j14] = uint8(num) + j14++ + } + i -= j14 + copy(dAtA[i:], dAtA15[:j14]) + i = encodeVarintProfiles(dAtA, i, uint64(j14)) + i-- + dAtA[i] = 0x1a + } + if m.LocationsLength != 0 { + i = encodeVarintProfiles(dAtA, i, uint64(m.LocationsLength)) + i-- + dAtA[i] = 0x10 + } + if m.LocationsStartIndex != 0 { + i = encodeVarintProfiles(dAtA, i, uint64(m.LocationsStartIndex)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Sample_LinkIndex) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Sample_LinkIndex) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i = encodeVarintProfiles(dAtA, i, uint64(m.LinkIndex)) + i-- + dAtA[i] = 0x28 + return len(dAtA) - i, nil +} +func (m *Mapping) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Mapping) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Mapping) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.HasInlineFrames { + i-- + if m.HasInlineFrames { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + } + if m.HasLineNumbers { + i-- + if m.HasLineNumbers { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + } + if m.HasFilenames { + i-- + if m.HasFilenames { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if m.HasFunctions { + i-- + if m.HasFunctions { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if len(m.AttributeIndices) > 0 { + dAtA17 := make([]byte, len(m.AttributeIndices)*10) + var j16 int + for _, num1 := range m.AttributeIndices { + num := uint64(num1) + for num >= 1<<7 { + dAtA17[j16] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j16++ + } + dAtA17[j16] = uint8(num) + j16++ + } + i -= j16 + copy(dAtA[i:], dAtA17[:j16]) + i = encodeVarintProfiles(dAtA, i, uint64(j16)) + i-- + dAtA[i] = 0x2a + } + if m.FilenameStrindex != 0 { + i = encodeVarintProfiles(dAtA, i, uint64(m.FilenameStrindex)) + i-- + dAtA[i] = 0x20 + } + if m.FileOffset != 0 { + i = encodeVarintProfiles(dAtA, i, uint64(m.FileOffset)) + i-- + dAtA[i] = 0x18 + } + if m.MemoryLimit != 0 { + i = encodeVarintProfiles(dAtA, i, uint64(m.MemoryLimit)) + i-- + dAtA[i] = 0x10 + } + if m.MemoryStart != 0 { + i = encodeVarintProfiles(dAtA, i, uint64(m.MemoryStart)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Location) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Location) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Location) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AttributeIndices) > 0 { + dAtA19 := make([]byte, len(m.AttributeIndices)*10) + var j18 int + for _, num1 := range m.AttributeIndices { + num := uint64(num1) + for num >= 1<<7 { + dAtA19[j18] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j18++ + } + dAtA19[j18] = uint8(num) + j18++ + } + i -= j18 + copy(dAtA[i:], dAtA19[:j18]) + i = encodeVarintProfiles(dAtA, i, uint64(j18)) + i-- + dAtA[i] = 0x2a + } + if m.IsFolded { + i-- + if m.IsFolded { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if len(m.Line) > 0 { + for iNdEx := len(m.Line) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Line[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProfiles(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.Address != 0 { + i = encodeVarintProfiles(dAtA, i, uint64(m.Address)) + i-- + dAtA[i] = 0x10 + } + if m.MappingIndex_ != nil { + { + size := m.MappingIndex_.Size() + i -= size + if _, err := m.MappingIndex_.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *Location_MappingIndex) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Location_MappingIndex) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i = encodeVarintProfiles(dAtA, i, uint64(m.MappingIndex)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} +func (m *Line) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Line) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Line) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Column != 0 { + i = encodeVarintProfiles(dAtA, i, uint64(m.Column)) + i-- + dAtA[i] = 0x18 + } + if m.Line != 0 { + i = encodeVarintProfiles(dAtA, i, uint64(m.Line)) + i-- + dAtA[i] = 0x10 + } + if m.FunctionIndex != 0 { + i = encodeVarintProfiles(dAtA, i, uint64(m.FunctionIndex)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Function) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Function) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Function) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.StartLine != 0 { + i = encodeVarintProfiles(dAtA, i, uint64(m.StartLine)) + i-- + dAtA[i] = 0x20 + } + if m.FilenameStrindex != 0 { + i = encodeVarintProfiles(dAtA, i, uint64(m.FilenameStrindex)) + i-- + dAtA[i] = 0x18 + } + if m.SystemNameStrindex != 0 { + i = encodeVarintProfiles(dAtA, i, uint64(m.SystemNameStrindex)) + i-- + dAtA[i] = 0x10 + } + if m.NameStrindex != 0 { + i = encodeVarintProfiles(dAtA, i, uint64(m.NameStrindex)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintProfiles(dAtA []byte, offset int, v uint64) int { + offset -= sovProfiles(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ProfilesData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ResourceProfiles) > 0 { + for _, e := range m.ResourceProfiles { + l = e.Size() + n += 1 + l + sovProfiles(uint64(l)) + } + } + return n +} + +func (m *ResourceProfiles) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Resource.Size() + n += 1 + l + sovProfiles(uint64(l)) + if len(m.ScopeProfiles) > 0 { + for _, e := range m.ScopeProfiles { + l = e.Size() + n += 1 + l + sovProfiles(uint64(l)) + } + } + l = len(m.SchemaUrl) + if l > 0 { + n += 1 + l + sovProfiles(uint64(l)) + } + return n +} + +func (m *ScopeProfiles) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Scope.Size() + n += 1 + l + sovProfiles(uint64(l)) + if len(m.Profiles) > 0 { + for _, e := range m.Profiles { + l = e.Size() + n += 1 + l + sovProfiles(uint64(l)) + } + } + l = len(m.SchemaUrl) + if l > 0 { + n += 1 + l + sovProfiles(uint64(l)) + } + return n +} + +func (m *Profile) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.SampleType) > 0 { + for _, e := range m.SampleType { + l = e.Size() + n += 1 + l + sovProfiles(uint64(l)) + } + } + if len(m.Sample) > 0 { + for _, e := range m.Sample { + l = e.Size() + n += 1 + l + sovProfiles(uint64(l)) + } + } + if len(m.MappingTable) > 0 { + for _, e := range m.MappingTable { + l = e.Size() + n += 1 + l + sovProfiles(uint64(l)) + } + } + if len(m.LocationTable) > 0 { + for _, e := range m.LocationTable { + l = e.Size() + n += 1 + l + sovProfiles(uint64(l)) + } + } + if len(m.LocationIndices) > 0 { + l = 0 + for _, e := range m.LocationIndices { + l += sovProfiles(uint64(e)) + } + n += 1 + sovProfiles(uint64(l)) + l + } + if len(m.FunctionTable) > 0 { + for _, e := range m.FunctionTable { + l = e.Size() + n += 1 + l + sovProfiles(uint64(l)) + } + } + if len(m.AttributeTable) > 0 { + for _, e := range m.AttributeTable { + l = e.Size() + n += 1 + l + sovProfiles(uint64(l)) + } + } + if len(m.AttributeUnits) > 0 { + for _, e := range m.AttributeUnits { + l = e.Size() + n += 1 + l + sovProfiles(uint64(l)) + } + } + if len(m.LinkTable) > 0 { + for _, e := range m.LinkTable { + l = e.Size() + n += 1 + l + sovProfiles(uint64(l)) + } + } + if len(m.StringTable) > 0 { + for _, s := range m.StringTable { + l = len(s) + n += 1 + l + sovProfiles(uint64(l)) + } + } + if m.TimeNanos != 0 { + n += 1 + sovProfiles(uint64(m.TimeNanos)) + } + if m.DurationNanos != 0 { + n += 1 + sovProfiles(uint64(m.DurationNanos)) + } + l = m.PeriodType.Size() + n += 1 + l + sovProfiles(uint64(l)) + if m.Period != 0 { + n += 1 + sovProfiles(uint64(m.Period)) + } + if len(m.CommentStrindices) > 0 { + l = 0 + for _, e := range m.CommentStrindices { + l += sovProfiles(uint64(e)) + } + n += 1 + sovProfiles(uint64(l)) + l + } + if m.DefaultSampleTypeStrindex != 0 { + n += 2 + sovProfiles(uint64(m.DefaultSampleTypeStrindex)) + } + l = m.ProfileId.Size() + n += 2 + l + sovProfiles(uint64(l)) + if m.DroppedAttributesCount != 0 { + n += 2 + sovProfiles(uint64(m.DroppedAttributesCount)) + } + l = len(m.OriginalPayloadFormat) + if l > 0 { + n += 2 + l + sovProfiles(uint64(l)) + } + l = len(m.OriginalPayload) + if l > 0 { + n += 2 + l + sovProfiles(uint64(l)) + } + if len(m.AttributeIndices) > 0 { + l = 0 + for _, e := range m.AttributeIndices { + l += sovProfiles(uint64(e)) + } + n += 2 + sovProfiles(uint64(l)) + l + } + return n +} + +func (m *AttributeUnit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AttributeKeyStrindex != 0 { + n += 1 + sovProfiles(uint64(m.AttributeKeyStrindex)) + } + if m.UnitStrindex != 0 { + n += 1 + sovProfiles(uint64(m.UnitStrindex)) + } + return n +} + +func (m *Link) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.TraceId.Size() + n += 1 + l + sovProfiles(uint64(l)) + l = m.SpanId.Size() + n += 1 + l + sovProfiles(uint64(l)) + return n +} + +func (m *ValueType) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TypeStrindex != 0 { + n += 1 + sovProfiles(uint64(m.TypeStrindex)) + } + if m.UnitStrindex != 0 { + n += 1 + sovProfiles(uint64(m.UnitStrindex)) + } + if m.AggregationTemporality != 0 { + n += 1 + sovProfiles(uint64(m.AggregationTemporality)) + } + return n +} + +func (m *Sample) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LocationsStartIndex != 0 { + n += 1 + sovProfiles(uint64(m.LocationsStartIndex)) + } + if m.LocationsLength != 0 { + n += 1 + sovProfiles(uint64(m.LocationsLength)) + } + if len(m.Value) > 0 { + l = 0 + for _, e := range m.Value { + l += sovProfiles(uint64(e)) + } + n += 1 + sovProfiles(uint64(l)) + l + } + if len(m.AttributeIndices) > 0 { + l = 0 + for _, e := range m.AttributeIndices { + l += sovProfiles(uint64(e)) + } + n += 1 + sovProfiles(uint64(l)) + l + } + if m.LinkIndex_ != nil { + n += m.LinkIndex_.Size() + } + if len(m.TimestampsUnixNano) > 0 { + l = 0 + for _, e := range m.TimestampsUnixNano { + l += sovProfiles(uint64(e)) + } + n += 1 + sovProfiles(uint64(l)) + l + } + return n +} + +func (m *Sample_LinkIndex) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovProfiles(uint64(m.LinkIndex)) + return n +} +func (m *Mapping) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MemoryStart != 0 { + n += 1 + sovProfiles(uint64(m.MemoryStart)) + } + if m.MemoryLimit != 0 { + n += 1 + sovProfiles(uint64(m.MemoryLimit)) + } + if m.FileOffset != 0 { + n += 1 + sovProfiles(uint64(m.FileOffset)) + } + if m.FilenameStrindex != 0 { + n += 1 + sovProfiles(uint64(m.FilenameStrindex)) + } + if len(m.AttributeIndices) > 0 { + l = 0 + for _, e := range m.AttributeIndices { + l += sovProfiles(uint64(e)) + } + n += 1 + sovProfiles(uint64(l)) + l + } + if m.HasFunctions { + n += 2 + } + if m.HasFilenames { + n += 2 + } + if m.HasLineNumbers { + n += 2 + } + if m.HasInlineFrames { + n += 2 + } + return n +} + +func (m *Location) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MappingIndex_ != nil { + n += m.MappingIndex_.Size() + } + if m.Address != 0 { + n += 1 + sovProfiles(uint64(m.Address)) + } + if len(m.Line) > 0 { + for _, e := range m.Line { + l = e.Size() + n += 1 + l + sovProfiles(uint64(l)) + } + } + if m.IsFolded { + n += 2 + } + if len(m.AttributeIndices) > 0 { + l = 0 + for _, e := range m.AttributeIndices { + l += sovProfiles(uint64(e)) + } + n += 1 + sovProfiles(uint64(l)) + l + } + return n +} + +func (m *Location_MappingIndex) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovProfiles(uint64(m.MappingIndex)) + return n +} +func (m *Line) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FunctionIndex != 0 { + n += 1 + sovProfiles(uint64(m.FunctionIndex)) + } + if m.Line != 0 { + n += 1 + sovProfiles(uint64(m.Line)) + } + if m.Column != 0 { + n += 1 + sovProfiles(uint64(m.Column)) + } + return n +} + +func (m *Function) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NameStrindex != 0 { + n += 1 + sovProfiles(uint64(m.NameStrindex)) + } + if m.SystemNameStrindex != 0 { + n += 1 + sovProfiles(uint64(m.SystemNameStrindex)) + } + if m.FilenameStrindex != 0 { + n += 1 + sovProfiles(uint64(m.FilenameStrindex)) + } + if m.StartLine != 0 { + n += 1 + sovProfiles(uint64(m.StartLine)) + } + return n +} + +func sovProfiles(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozProfiles(x uint64) (n int) { + return sovProfiles(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ProfilesData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProfilesData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProfilesData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceProfiles", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceProfiles = append(m.ResourceProfiles, &ResourceProfiles{}) + if err := m.ResourceProfiles[len(m.ResourceProfiles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProfiles(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProfiles + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceProfiles) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceProfiles: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceProfiles: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScopeProfiles", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ScopeProfiles = append(m.ScopeProfiles, &ScopeProfiles{}) + if err := m.ScopeProfiles[len(m.ScopeProfiles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SchemaUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProfiles(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProfiles + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScopeProfiles) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScopeProfiles: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScopeProfiles: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Scope.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Profiles", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Profiles = append(m.Profiles, &Profile{}) + if err := m.Profiles[len(m.Profiles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SchemaUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProfiles(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProfiles + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Profile) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Profile: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Profile: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SampleType", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SampleType = append(m.SampleType, &ValueType{}) + if err := m.SampleType[len(m.SampleType)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sample", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sample = append(m.Sample, &Sample{}) + if err := m.Sample[len(m.Sample)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MappingTable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MappingTable = append(m.MappingTable, &Mapping{}) + if err := m.MappingTable[len(m.MappingTable)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocationTable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LocationTable = append(m.LocationTable, &Location{}) + if err := m.LocationTable[len(m.LocationTable)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType == 0 { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LocationIndices = append(m.LocationIndices, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.LocationIndices) == 0 { + m.LocationIndices = make([]int32, 0, elementCount) + } + for iNdEx < postIndex { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LocationIndices = append(m.LocationIndices, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field LocationIndices", wireType) + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FunctionTable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FunctionTable = append(m.FunctionTable, &Function{}) + if err := m.FunctionTable[len(m.FunctionTable)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AttributeTable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AttributeTable = append(m.AttributeTable, v11.KeyValue{}) + if err := m.AttributeTable[len(m.AttributeTable)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AttributeUnits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AttributeUnits = append(m.AttributeUnits, &AttributeUnit{}) + if err := m.AttributeUnits[len(m.AttributeUnits)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LinkTable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LinkTable = append(m.LinkTable, &Link{}) + if err := m.LinkTable[len(m.LinkTable)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StringTable", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StringTable = append(m.StringTable, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeNanos", wireType) + } + m.TimeNanos = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimeNanos |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DurationNanos", wireType) + } + m.DurationNanos = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DurationNanos |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PeriodType", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PeriodType.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Period", wireType) + } + m.Period = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Period |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 15: + if wireType == 0 { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.CommentStrindices = append(m.CommentStrindices, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.CommentStrindices) == 0 { + m.CommentStrindices = make([]int32, 0, elementCount) + } + for iNdEx < postIndex { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.CommentStrindices = append(m.CommentStrindices, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field CommentStrindices", wireType) + } + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultSampleTypeStrindex", wireType) + } + m.DefaultSampleTypeStrindex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DefaultSampleTypeStrindex |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProfileId", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ProfileId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 19: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType) + } + m.DroppedAttributesCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DroppedAttributesCount |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OriginalPayloadFormat", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OriginalPayloadFormat = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OriginalPayload", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OriginalPayload = append(m.OriginalPayload[:0], dAtA[iNdEx:postIndex]...) + if m.OriginalPayload == nil { + m.OriginalPayload = []byte{} + } + iNdEx = postIndex + case 22: + if wireType == 0 { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AttributeIndices = append(m.AttributeIndices, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.AttributeIndices) == 0 { + m.AttributeIndices = make([]int32, 0, elementCount) + } + for iNdEx < postIndex { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AttributeIndices = append(m.AttributeIndices, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field AttributeIndices", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipProfiles(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProfiles + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AttributeUnit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AttributeUnit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AttributeUnit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AttributeKeyStrindex", wireType) + } + m.AttributeKeyStrindex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AttributeKeyStrindex |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UnitStrindex", wireType) + } + m.UnitStrindex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UnitStrindex |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipProfiles(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProfiles + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Link) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Link: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Link: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TraceId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SpanId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProfiles(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProfiles + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValueType) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValueType: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValueType: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TypeStrindex", wireType) + } + m.TypeStrindex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TypeStrindex |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UnitStrindex", wireType) + } + m.UnitStrindex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UnitStrindex |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType) + } + m.AggregationTemporality = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AggregationTemporality |= AggregationTemporality(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipProfiles(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProfiles + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Sample) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Sample: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Sample: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LocationsStartIndex", wireType) + } + m.LocationsStartIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LocationsStartIndex |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LocationsLength", wireType) + } + m.LocationsLength = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LocationsLength |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType == 0 { + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Value = append(m.Value, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.Value) == 0 { + m.Value = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Value = append(m.Value, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + case 4: + if wireType == 0 { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AttributeIndices = append(m.AttributeIndices, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.AttributeIndices) == 0 { + m.AttributeIndices = make([]int32, 0, elementCount) + } + for iNdEx < postIndex { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AttributeIndices = append(m.AttributeIndices, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field AttributeIndices", wireType) + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LinkIndex", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LinkIndex_ = &Sample_LinkIndex{v} + case 6: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TimestampsUnixNano = append(m.TimestampsUnixNano, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.TimestampsUnixNano) == 0 { + m.TimestampsUnixNano = make([]uint64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TimestampsUnixNano = append(m.TimestampsUnixNano, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field TimestampsUnixNano", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipProfiles(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProfiles + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Mapping) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Mapping: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Mapping: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemoryStart", wireType) + } + m.MemoryStart = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MemoryStart |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemoryLimit", wireType) + } + m.MemoryLimit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MemoryLimit |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FileOffset", wireType) + } + m.FileOffset = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FileOffset |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FilenameStrindex", wireType) + } + m.FilenameStrindex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FilenameStrindex |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType == 0 { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AttributeIndices = append(m.AttributeIndices, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.AttributeIndices) == 0 { + m.AttributeIndices = make([]int32, 0, elementCount) + } + for iNdEx < postIndex { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AttributeIndices = append(m.AttributeIndices, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field AttributeIndices", wireType) + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HasFunctions", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.HasFunctions = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HasFilenames", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.HasFilenames = bool(v != 0) + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HasLineNumbers", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.HasLineNumbers = bool(v != 0) + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HasInlineFrames", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.HasInlineFrames = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipProfiles(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProfiles + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Location) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Location: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Location: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MappingIndex", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MappingIndex_ = &Location_MappingIndex{v} + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } + m.Address = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Address |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Line", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Line = append(m.Line, &Line{}) + if err := m.Line[len(m.Line)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsFolded", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsFolded = bool(v != 0) + case 5: + if wireType == 0 { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AttributeIndices = append(m.AttributeIndices, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.AttributeIndices) == 0 { + m.AttributeIndices = make([]int32, 0, elementCount) + } + for iNdEx < postIndex { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AttributeIndices = append(m.AttributeIndices, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field AttributeIndices", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipProfiles(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProfiles + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Line) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Line: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Line: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FunctionIndex", wireType) + } + m.FunctionIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FunctionIndex |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Line", wireType) + } + m.Line = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Line |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Column", wireType) + } + m.Column = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Column |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipProfiles(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProfiles + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Function) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Function: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Function: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NameStrindex", wireType) + } + m.NameStrindex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NameStrindex |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SystemNameStrindex", wireType) + } + m.SystemNameStrindex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SystemNameStrindex |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FilenameStrindex", wireType) + } + m.FilenameStrindex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FilenameStrindex |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartLine", wireType) + } + m.StartLine = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartLine |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipProfiles(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProfiles + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipProfiles(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowProfiles + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowProfiles + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowProfiles + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthProfiles + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupProfiles + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthProfiles + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthProfiles = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowProfiles = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupProfiles = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1experimental/pprofextended.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1experimental/pprofextended.pb.go deleted file mode 100644 index 64c91b3221e..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1experimental/pprofextended.pb.go +++ /dev/null @@ -1,4919 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: opentelemetry/proto/profiles/v1experimental/pprofextended.proto - -package v1experimental - -import ( - fmt "fmt" - io "io" - math "math" - math_bits "math/bits" - - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - - go_opentelemetry_io_collector_pdata_internal_data "go.opentelemetry.io/collector/pdata/internal/data" - v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// Specifies the method of aggregating metric values, either DELTA (change since last report) -// or CUMULATIVE (total since a fixed start time). -type AggregationTemporality int32 - -const ( - // UNSPECIFIED is the default AggregationTemporality, it MUST not be used. - AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED AggregationTemporality = 0 - //* DELTA is an AggregationTemporality for a profiler which reports - //changes since last report time. Successive metrics contain aggregation of - //values from continuous and non-overlapping intervals. - // - //The values for a DELTA metric are based only on the time interval - //associated with one measurement cycle. There is no dependency on - //previous measurements like is the case for CUMULATIVE metrics. - // - //For example, consider a system measuring the number of requests that - //it receives and reports the sum of these requests every second as a - //DELTA metric: - // - //1. The system starts receiving at time=t_0. - //2. A request is received, the system measures 1 request. - //3. A request is received, the system measures 1 request. - //4. A request is received, the system measures 1 request. - //5. The 1 second collection cycle ends. A metric is exported for the - //number of requests received over the interval of time t_0 to - //t_0+1 with a value of 3. - //6. A request is received, the system measures 1 request. - //7. A request is received, the system measures 1 request. - //8. The 1 second collection cycle ends. A metric is exported for the - //number of requests received over the interval of time t_0+1 to - //t_0+2 with a value of 2. - AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA AggregationTemporality = 1 - //* CUMULATIVE is an AggregationTemporality for a profiler which - //reports changes since a fixed start time. This means that current values - //of a CUMULATIVE metric depend on all previous measurements since the - //start time. Because of this, the sender is required to retain this state - //in some form. If this state is lost or invalidated, the CUMULATIVE metric - //values MUST be reset and a new fixed start time following the last - //reported measurement time sent MUST be used. - // - //For example, consider a system measuring the number of requests that - //it receives and reports the sum of these requests every second as a - //CUMULATIVE metric: - // - //1. The system starts receiving at time=t_0. - //2. A request is received, the system measures 1 request. - //3. A request is received, the system measures 1 request. - //4. A request is received, the system measures 1 request. - //5. The 1 second collection cycle ends. A metric is exported for the - //number of requests received over the interval of time t_0 to - //t_0+1 with a value of 3. - //6. A request is received, the system measures 1 request. - //7. A request is received, the system measures 1 request. - //8. The 1 second collection cycle ends. A metric is exported for the - //number of requests received over the interval of time t_0 to - //t_0+2 with a value of 5. - //9. The system experiences a fault and loses state. - //10. The system recovers and resumes receiving at time=t_1. - //11. A request is received, the system measures 1 request. - //12. The 1 second collection cycle ends. A metric is exported for the - //number of requests received over the interval of time t_1 to - //t_0+1 with a value of 1. - // - //Note: Even though, when reporting changes since last report time, using - //CUMULATIVE is valid, it is not recommended. - AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE AggregationTemporality = 2 -) - -var AggregationTemporality_name = map[int32]string{ - 0: "AGGREGATION_TEMPORALITY_UNSPECIFIED", - 1: "AGGREGATION_TEMPORALITY_DELTA", - 2: "AGGREGATION_TEMPORALITY_CUMULATIVE", -} - -var AggregationTemporality_value = map[string]int32{ - "AGGREGATION_TEMPORALITY_UNSPECIFIED": 0, - "AGGREGATION_TEMPORALITY_DELTA": 1, - "AGGREGATION_TEMPORALITY_CUMULATIVE": 2, -} - -func (x AggregationTemporality) String() string { - return proto.EnumName(AggregationTemporality_name, int32(x)) -} - -func (AggregationTemporality) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_05f9ce3fdbeb046f, []int{0} -} - -// Indicates the semantics of the build_id field. -type BuildIdKind int32 - -const ( - // Linker-generated build ID, stored in the ELF binary notes. - BuildIdKind_BUILD_ID_LINKER BuildIdKind = 0 - // Build ID based on the content hash of the binary. Currently no particular - // hashing approach is standardized, so a given producer needs to define it - // themselves and thus unlike BUILD_ID_LINKER this kind of hash is producer-specific. - // We may choose to provide a standardized stable hash recommendation later. - BuildIdKind_BUILD_ID_BINARY_HASH BuildIdKind = 1 -) - -var BuildIdKind_name = map[int32]string{ - 0: "BUILD_ID_LINKER", - 1: "BUILD_ID_BINARY_HASH", -} - -var BuildIdKind_value = map[string]int32{ - "BUILD_ID_LINKER": 0, - "BUILD_ID_BINARY_HASH": 1, -} - -func (x BuildIdKind) String() string { - return proto.EnumName(BuildIdKind_name, int32(x)) -} - -func (BuildIdKind) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_05f9ce3fdbeb046f, []int{1} -} - -// Represents a complete profile, including sample types, samples, -// mappings to binaries, locations, functions, string table, and additional metadata. -type Profile struct { - // A description of the samples associated with each Sample.value. - // For a cpu profile this might be: - // [["cpu","nanoseconds"]] or [["wall","seconds"]] or [["syscall","count"]] - // For a heap profile, this might be: - // [["allocations","count"], ["space","bytes"]], - // If one of the values represents the number of events represented - // by the sample, by convention it should be at index 0 and use - // sample_type.unit == "count". - SampleType []ValueType `protobuf:"bytes,1,rep,name=sample_type,json=sampleType,proto3" json:"sample_type"` - // The set of samples recorded in this profile. - Sample []Sample `protobuf:"bytes,2,rep,name=sample,proto3" json:"sample"` - // Mapping from address ranges to the image/binary/library mapped - // into that address range. mapping[0] will be the main binary. - Mapping []Mapping `protobuf:"bytes,3,rep,name=mapping,proto3" json:"mapping"` - // Locations referenced by samples via location_indices. - Location []Location `protobuf:"bytes,4,rep,name=location,proto3" json:"location"` - // Array of locations referenced by samples. - LocationIndices []int64 `protobuf:"varint,15,rep,packed,name=location_indices,json=locationIndices,proto3" json:"location_indices,omitempty"` - // Functions referenced by locations. - Function []Function `protobuf:"bytes,5,rep,name=function,proto3" json:"function"` - // Lookup table for attributes. - AttributeTable []v1.KeyValue `protobuf:"bytes,16,rep,name=attribute_table,json=attributeTable,proto3" json:"attribute_table"` - // Represents a mapping between Attribute Keys and Units. - AttributeUnits []AttributeUnit `protobuf:"bytes,17,rep,name=attribute_units,json=attributeUnits,proto3" json:"attribute_units"` - // Lookup table for links. - LinkTable []Link `protobuf:"bytes,18,rep,name=link_table,json=linkTable,proto3" json:"link_table"` - // A common table for strings referenced by various messages. - // string_table[0] must always be "". - StringTable []string `protobuf:"bytes,6,rep,name=string_table,json=stringTable,proto3" json:"string_table,omitempty"` - // frames with Function.function_name fully matching the following - // regexp will be dropped from the samples, along with their successors. - DropFrames int64 `protobuf:"varint,7,opt,name=drop_frames,json=dropFrames,proto3" json:"drop_frames,omitempty"` - // frames with Function.function_name fully matching the following - // regexp will be kept, even if it matches drop_frames. - KeepFrames int64 `protobuf:"varint,8,opt,name=keep_frames,json=keepFrames,proto3" json:"keep_frames,omitempty"` - // Time of collection (UTC) represented as nanoseconds past the epoch. - TimeNanos int64 `protobuf:"varint,9,opt,name=time_nanos,json=timeNanos,proto3" json:"time_nanos,omitempty"` - // Duration of the profile, if a duration makes sense. - DurationNanos int64 `protobuf:"varint,10,opt,name=duration_nanos,json=durationNanos,proto3" json:"duration_nanos,omitempty"` - // The kind of events between sampled occurrences. - // e.g [ "cpu","cycles" ] or [ "heap","bytes" ] - PeriodType ValueType `protobuf:"bytes,11,opt,name=period_type,json=periodType,proto3" json:"period_type"` - // The number of events between sampled occurrences. - Period int64 `protobuf:"varint,12,opt,name=period,proto3" json:"period,omitempty"` - // Free-form text associated with the profile. The text is displayed as is - // to the user by the tools that read profiles (e.g. by pprof). This field - // should not be used to store any machine-readable information, it is only - // for human-friendly content. The profile must stay functional if this field - // is cleaned. - Comment []int64 `protobuf:"varint,13,rep,packed,name=comment,proto3" json:"comment,omitempty"` - // Index into the string table of the type of the preferred sample - // value. If unset, clients should default to the last sample value. - DefaultSampleType int64 `protobuf:"varint,14,opt,name=default_sample_type,json=defaultSampleType,proto3" json:"default_sample_type,omitempty"` -} - -func (m *Profile) Reset() { *m = Profile{} } -func (m *Profile) String() string { return proto.CompactTextString(m) } -func (*Profile) ProtoMessage() {} -func (*Profile) Descriptor() ([]byte, []int) { - return fileDescriptor_05f9ce3fdbeb046f, []int{0} -} -func (m *Profile) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Profile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Profile.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Profile) XXX_Merge(src proto.Message) { - xxx_messageInfo_Profile.Merge(m, src) -} -func (m *Profile) XXX_Size() int { - return m.Size() -} -func (m *Profile) XXX_DiscardUnknown() { - xxx_messageInfo_Profile.DiscardUnknown(m) -} - -var xxx_messageInfo_Profile proto.InternalMessageInfo - -func (m *Profile) GetSampleType() []ValueType { - if m != nil { - return m.SampleType - } - return nil -} - -func (m *Profile) GetSample() []Sample { - if m != nil { - return m.Sample - } - return nil -} - -func (m *Profile) GetMapping() []Mapping { - if m != nil { - return m.Mapping - } - return nil -} - -func (m *Profile) GetLocation() []Location { - if m != nil { - return m.Location - } - return nil -} - -func (m *Profile) GetLocationIndices() []int64 { - if m != nil { - return m.LocationIndices - } - return nil -} - -func (m *Profile) GetFunction() []Function { - if m != nil { - return m.Function - } - return nil -} - -func (m *Profile) GetAttributeTable() []v1.KeyValue { - if m != nil { - return m.AttributeTable - } - return nil -} - -func (m *Profile) GetAttributeUnits() []AttributeUnit { - if m != nil { - return m.AttributeUnits - } - return nil -} - -func (m *Profile) GetLinkTable() []Link { - if m != nil { - return m.LinkTable - } - return nil -} - -func (m *Profile) GetStringTable() []string { - if m != nil { - return m.StringTable - } - return nil -} - -func (m *Profile) GetDropFrames() int64 { - if m != nil { - return m.DropFrames - } - return 0 -} - -func (m *Profile) GetKeepFrames() int64 { - if m != nil { - return m.KeepFrames - } - return 0 -} - -func (m *Profile) GetTimeNanos() int64 { - if m != nil { - return m.TimeNanos - } - return 0 -} - -func (m *Profile) GetDurationNanos() int64 { - if m != nil { - return m.DurationNanos - } - return 0 -} - -func (m *Profile) GetPeriodType() ValueType { - if m != nil { - return m.PeriodType - } - return ValueType{} -} - -func (m *Profile) GetPeriod() int64 { - if m != nil { - return m.Period - } - return 0 -} - -func (m *Profile) GetComment() []int64 { - if m != nil { - return m.Comment - } - return nil -} - -func (m *Profile) GetDefaultSampleType() int64 { - if m != nil { - return m.DefaultSampleType - } - return 0 -} - -// Represents a mapping between Attribute Keys and Units. -type AttributeUnit struct { - // Index into string table. - AttributeKey int64 `protobuf:"varint,1,opt,name=attribute_key,json=attributeKey,proto3" json:"attribute_key,omitempty"` - // Index into string table. - Unit int64 `protobuf:"varint,2,opt,name=unit,proto3" json:"unit,omitempty"` -} - -func (m *AttributeUnit) Reset() { *m = AttributeUnit{} } -func (m *AttributeUnit) String() string { return proto.CompactTextString(m) } -func (*AttributeUnit) ProtoMessage() {} -func (*AttributeUnit) Descriptor() ([]byte, []int) { - return fileDescriptor_05f9ce3fdbeb046f, []int{1} -} -func (m *AttributeUnit) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AttributeUnit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AttributeUnit.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AttributeUnit) XXX_Merge(src proto.Message) { - xxx_messageInfo_AttributeUnit.Merge(m, src) -} -func (m *AttributeUnit) XXX_Size() int { - return m.Size() -} -func (m *AttributeUnit) XXX_DiscardUnknown() { - xxx_messageInfo_AttributeUnit.DiscardUnknown(m) -} - -var xxx_messageInfo_AttributeUnit proto.InternalMessageInfo - -func (m *AttributeUnit) GetAttributeKey() int64 { - if m != nil { - return m.AttributeKey - } - return 0 -} - -func (m *AttributeUnit) GetUnit() int64 { - if m != nil { - return m.Unit - } - return 0 -} - -// A pointer from a profile Sample to a trace Span. -// Connects a profile sample to a trace span, identified by unique trace and span IDs. -type Link struct { - // A unique identifier of a trace that this linked span is part of. The ID is a - // 16-byte array. - TraceId go_opentelemetry_io_collector_pdata_internal_data.TraceID `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.TraceID" json:"trace_id"` - // A unique identifier for the linked span. The ID is an 8-byte array. - SpanId go_opentelemetry_io_collector_pdata_internal_data.SpanID `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.SpanID" json:"span_id"` -} - -func (m *Link) Reset() { *m = Link{} } -func (m *Link) String() string { return proto.CompactTextString(m) } -func (*Link) ProtoMessage() {} -func (*Link) Descriptor() ([]byte, []int) { - return fileDescriptor_05f9ce3fdbeb046f, []int{2} -} -func (m *Link) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Link) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Link.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Link) XXX_Merge(src proto.Message) { - xxx_messageInfo_Link.Merge(m, src) -} -func (m *Link) XXX_Size() int { - return m.Size() -} -func (m *Link) XXX_DiscardUnknown() { - xxx_messageInfo_Link.DiscardUnknown(m) -} - -var xxx_messageInfo_Link proto.InternalMessageInfo - -// ValueType describes the type and units of a value, with an optional aggregation temporality. -type ValueType struct { - Type int64 `protobuf:"varint,1,opt,name=type,proto3" json:"type,omitempty"` - Unit int64 `protobuf:"varint,2,opt,name=unit,proto3" json:"unit,omitempty"` - AggregationTemporality AggregationTemporality `protobuf:"varint,3,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.profiles.v1experimental.AggregationTemporality" json:"aggregation_temporality,omitempty"` -} - -func (m *ValueType) Reset() { *m = ValueType{} } -func (m *ValueType) String() string { return proto.CompactTextString(m) } -func (*ValueType) ProtoMessage() {} -func (*ValueType) Descriptor() ([]byte, []int) { - return fileDescriptor_05f9ce3fdbeb046f, []int{3} -} -func (m *ValueType) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ValueType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ValueType.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ValueType) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValueType.Merge(m, src) -} -func (m *ValueType) XXX_Size() int { - return m.Size() -} -func (m *ValueType) XXX_DiscardUnknown() { - xxx_messageInfo_ValueType.DiscardUnknown(m) -} - -var xxx_messageInfo_ValueType proto.InternalMessageInfo - -func (m *ValueType) GetType() int64 { - if m != nil { - return m.Type - } - return 0 -} - -func (m *ValueType) GetUnit() int64 { - if m != nil { - return m.Unit - } - return 0 -} - -func (m *ValueType) GetAggregationTemporality() AggregationTemporality { - if m != nil { - return m.AggregationTemporality - } - return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED -} - -// Each Sample records values encountered in some program -// context. The program context is typically a stack trace, perhaps -// augmented with auxiliary information like the thread-id, some -// indicator of a higher level request being handled etc. -type Sample struct { - // The indices recorded here correspond to locations in Profile.location. - // The leaf is at location_index[0]. [deprecated, superseded by locations_start_index / locations_length] - LocationIndex []uint64 `protobuf:"varint,1,rep,packed,name=location_index,json=locationIndex,proto3" json:"location_index,omitempty"` - // locations_start_index along with locations_length refers to to a slice of locations in Profile.location. - // Supersedes location_index. - LocationsStartIndex uint64 `protobuf:"varint,7,opt,name=locations_start_index,json=locationsStartIndex,proto3" json:"locations_start_index,omitempty"` - // locations_length along with locations_start_index refers to a slice of locations in Profile.location. - // Supersedes location_index. - LocationsLength uint64 `protobuf:"varint,8,opt,name=locations_length,json=locationsLength,proto3" json:"locations_length,omitempty"` - // A 128bit id that uniquely identifies this stacktrace, globally. Index into string table. [optional] - StacktraceIdIndex uint32 `protobuf:"varint,9,opt,name=stacktrace_id_index,json=stacktraceIdIndex,proto3" json:"stacktrace_id_index,omitempty"` - // The type and unit of each value is defined by the corresponding - // entry in Profile.sample_type. All samples must have the same - // number of values, the same as the length of Profile.sample_type. - // When aggregating multiple samples into a single sample, the - // result has a list of values that is the element-wise sum of the - // lists of the originals. - Value []int64 `protobuf:"varint,2,rep,packed,name=value,proto3" json:"value,omitempty"` - // label includes additional context for this sample. It can include - // things like a thread id, allocation size, etc. - // - // NOTE: While possible, having multiple values for the same label key is - // strongly discouraged and should never be used. Most tools (e.g. pprof) do - // not have good (or any) support for multi-value labels. And an even more - // discouraged case is having a string label and a numeric label of the same - // name on a sample. Again, possible to express, but should not be used. - // [deprecated, superseded by attributes] - Label []Label `protobuf:"bytes,3,rep,name=label,proto3" json:"label"` - // References to attributes in Profile.attribute_table. [optional] - Attributes []uint64 `protobuf:"varint,10,rep,packed,name=attributes,proto3" json:"attributes,omitempty"` - // Reference to link in Profile.link_table. [optional] - Link uint64 `protobuf:"varint,12,opt,name=link,proto3" json:"link,omitempty"` - // Timestamps associated with Sample represented in nanoseconds. These timestamps are expected - // to fall within the Profile's time range. [optional] - TimestampsUnixNano []uint64 `protobuf:"varint,13,rep,packed,name=timestamps_unix_nano,json=timestampsUnixNano,proto3" json:"timestamps_unix_nano,omitempty"` -} - -func (m *Sample) Reset() { *m = Sample{} } -func (m *Sample) String() string { return proto.CompactTextString(m) } -func (*Sample) ProtoMessage() {} -func (*Sample) Descriptor() ([]byte, []int) { - return fileDescriptor_05f9ce3fdbeb046f, []int{4} -} -func (m *Sample) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Sample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Sample.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Sample) XXX_Merge(src proto.Message) { - xxx_messageInfo_Sample.Merge(m, src) -} -func (m *Sample) XXX_Size() int { - return m.Size() -} -func (m *Sample) XXX_DiscardUnknown() { - xxx_messageInfo_Sample.DiscardUnknown(m) -} - -var xxx_messageInfo_Sample proto.InternalMessageInfo - -func (m *Sample) GetLocationIndex() []uint64 { - if m != nil { - return m.LocationIndex - } - return nil -} - -func (m *Sample) GetLocationsStartIndex() uint64 { - if m != nil { - return m.LocationsStartIndex - } - return 0 -} - -func (m *Sample) GetLocationsLength() uint64 { - if m != nil { - return m.LocationsLength - } - return 0 -} - -func (m *Sample) GetStacktraceIdIndex() uint32 { - if m != nil { - return m.StacktraceIdIndex - } - return 0 -} - -func (m *Sample) GetValue() []int64 { - if m != nil { - return m.Value - } - return nil -} - -func (m *Sample) GetLabel() []Label { - if m != nil { - return m.Label - } - return nil -} - -func (m *Sample) GetAttributes() []uint64 { - if m != nil { - return m.Attributes - } - return nil -} - -func (m *Sample) GetLink() uint64 { - if m != nil { - return m.Link - } - return 0 -} - -func (m *Sample) GetTimestampsUnixNano() []uint64 { - if m != nil { - return m.TimestampsUnixNano - } - return nil -} - -// Provides additional context for a sample, -// such as thread ID or allocation size, with optional units. [deprecated] -type Label struct { - Key int64 `protobuf:"varint,1,opt,name=key,proto3" json:"key,omitempty"` - // At most one of the following must be present - Str int64 `protobuf:"varint,2,opt,name=str,proto3" json:"str,omitempty"` - Num int64 `protobuf:"varint,3,opt,name=num,proto3" json:"num,omitempty"` - // Should only be present when num is present. - // Specifies the units of num. - // Use arbitrary string (for example, "requests") as a custom count unit. - // If no unit is specified, consumer may apply heuristic to deduce the unit. - // Consumers may also interpret units like "bytes" and "kilobytes" as memory - // units and units like "seconds" and "nanoseconds" as time units, - // and apply appropriate unit conversions to these. - NumUnit int64 `protobuf:"varint,4,opt,name=num_unit,json=numUnit,proto3" json:"num_unit,omitempty"` -} - -func (m *Label) Reset() { *m = Label{} } -func (m *Label) String() string { return proto.CompactTextString(m) } -func (*Label) ProtoMessage() {} -func (*Label) Descriptor() ([]byte, []int) { - return fileDescriptor_05f9ce3fdbeb046f, []int{5} -} -func (m *Label) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Label) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Label.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Label) XXX_Merge(src proto.Message) { - xxx_messageInfo_Label.Merge(m, src) -} -func (m *Label) XXX_Size() int { - return m.Size() -} -func (m *Label) XXX_DiscardUnknown() { - xxx_messageInfo_Label.DiscardUnknown(m) -} - -var xxx_messageInfo_Label proto.InternalMessageInfo - -func (m *Label) GetKey() int64 { - if m != nil { - return m.Key - } - return 0 -} - -func (m *Label) GetStr() int64 { - if m != nil { - return m.Str - } - return 0 -} - -func (m *Label) GetNum() int64 { - if m != nil { - return m.Num - } - return 0 -} - -func (m *Label) GetNumUnit() int64 { - if m != nil { - return m.NumUnit - } - return 0 -} - -// Describes the mapping of a binary in memory, including its address range, -// file offset, and metadata like build ID -type Mapping struct { - // Unique nonzero id for the mapping. [deprecated] - Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` - // Address at which the binary (or DLL) is loaded into memory. - MemoryStart uint64 `protobuf:"varint,2,opt,name=memory_start,json=memoryStart,proto3" json:"memory_start,omitempty"` - // The limit of the address range occupied by this mapping. - MemoryLimit uint64 `protobuf:"varint,3,opt,name=memory_limit,json=memoryLimit,proto3" json:"memory_limit,omitempty"` - // Offset in the binary that corresponds to the first mapped address. - FileOffset uint64 `protobuf:"varint,4,opt,name=file_offset,json=fileOffset,proto3" json:"file_offset,omitempty"` - // The object this entry is loaded from. This can be a filename on - // disk for the main binary and shared libraries, or virtual - // abstractions like "[vdso]". - Filename int64 `protobuf:"varint,5,opt,name=filename,proto3" json:"filename,omitempty"` - // A string that uniquely identifies a particular program version - // with high probability. E.g., for binaries generated by GNU tools, - // it could be the contents of the .note.gnu.build-id field. - BuildId int64 `protobuf:"varint,6,opt,name=build_id,json=buildId,proto3" json:"build_id,omitempty"` - // Specifies the kind of build id. See BuildIdKind enum for more details [optional] - BuildIdKind BuildIdKind `protobuf:"varint,11,opt,name=build_id_kind,json=buildIdKind,proto3,enum=opentelemetry.proto.profiles.v1experimental.BuildIdKind" json:"build_id_kind,omitempty"` - // References to attributes in Profile.attribute_table. [optional] - Attributes []uint64 `protobuf:"varint,12,rep,packed,name=attributes,proto3" json:"attributes,omitempty"` - // The following fields indicate the resolution of symbolic info. - HasFunctions bool `protobuf:"varint,7,opt,name=has_functions,json=hasFunctions,proto3" json:"has_functions,omitempty"` - HasFilenames bool `protobuf:"varint,8,opt,name=has_filenames,json=hasFilenames,proto3" json:"has_filenames,omitempty"` - HasLineNumbers bool `protobuf:"varint,9,opt,name=has_line_numbers,json=hasLineNumbers,proto3" json:"has_line_numbers,omitempty"` - HasInlineFrames bool `protobuf:"varint,10,opt,name=has_inline_frames,json=hasInlineFrames,proto3" json:"has_inline_frames,omitempty"` -} - -func (m *Mapping) Reset() { *m = Mapping{} } -func (m *Mapping) String() string { return proto.CompactTextString(m) } -func (*Mapping) ProtoMessage() {} -func (*Mapping) Descriptor() ([]byte, []int) { - return fileDescriptor_05f9ce3fdbeb046f, []int{6} -} -func (m *Mapping) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Mapping) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Mapping.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Mapping) XXX_Merge(src proto.Message) { - xxx_messageInfo_Mapping.Merge(m, src) -} -func (m *Mapping) XXX_Size() int { - return m.Size() -} -func (m *Mapping) XXX_DiscardUnknown() { - xxx_messageInfo_Mapping.DiscardUnknown(m) -} - -var xxx_messageInfo_Mapping proto.InternalMessageInfo - -func (m *Mapping) GetId() uint64 { - if m != nil { - return m.Id - } - return 0 -} - -func (m *Mapping) GetMemoryStart() uint64 { - if m != nil { - return m.MemoryStart - } - return 0 -} - -func (m *Mapping) GetMemoryLimit() uint64 { - if m != nil { - return m.MemoryLimit - } - return 0 -} - -func (m *Mapping) GetFileOffset() uint64 { - if m != nil { - return m.FileOffset - } - return 0 -} - -func (m *Mapping) GetFilename() int64 { - if m != nil { - return m.Filename - } - return 0 -} - -func (m *Mapping) GetBuildId() int64 { - if m != nil { - return m.BuildId - } - return 0 -} - -func (m *Mapping) GetBuildIdKind() BuildIdKind { - if m != nil { - return m.BuildIdKind - } - return BuildIdKind_BUILD_ID_LINKER -} - -func (m *Mapping) GetAttributes() []uint64 { - if m != nil { - return m.Attributes - } - return nil -} - -func (m *Mapping) GetHasFunctions() bool { - if m != nil { - return m.HasFunctions - } - return false -} - -func (m *Mapping) GetHasFilenames() bool { - if m != nil { - return m.HasFilenames - } - return false -} - -func (m *Mapping) GetHasLineNumbers() bool { - if m != nil { - return m.HasLineNumbers - } - return false -} - -func (m *Mapping) GetHasInlineFrames() bool { - if m != nil { - return m.HasInlineFrames - } - return false -} - -// Describes function and line table debug information. -type Location struct { - // Unique nonzero id for the location. A profile could use - // instruction addresses or any integer sequence as ids. [deprecated] - Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` - // The index of the corresponding profile.Mapping for this location. - // It can be unset if the mapping is unknown or not applicable for - // this profile type. - MappingIndex uint64 `protobuf:"varint,2,opt,name=mapping_index,json=mappingIndex,proto3" json:"mapping_index,omitempty"` - // The instruction address for this location, if available. It - // should be within [Mapping.memory_start...Mapping.memory_limit] - // for the corresponding mapping. A non-leaf address may be in the - // middle of a call instruction. It is up to display tools to find - // the beginning of the instruction if necessary. - Address uint64 `protobuf:"varint,3,opt,name=address,proto3" json:"address,omitempty"` - // Multiple line indicates this location has inlined functions, - // where the last entry represents the caller into which the - // preceding entries were inlined. - // - // E.g., if memcpy() is inlined into printf: - // line[0].function_name == "memcpy" - // line[1].function_name == "printf" - Line []Line `protobuf:"bytes,4,rep,name=line,proto3" json:"line"` - // Provides an indication that multiple symbols map to this location's - // address, for example due to identical code folding by the linker. In that - // case the line information above represents one of the multiple - // symbols. This field must be recomputed when the symbolization state of the - // profile changes. - IsFolded bool `protobuf:"varint,5,opt,name=is_folded,json=isFolded,proto3" json:"is_folded,omitempty"` - // Type of frame (e.g. kernel, native, python, hotspot, php). Index into string table. - TypeIndex uint32 `protobuf:"varint,6,opt,name=type_index,json=typeIndex,proto3" json:"type_index,omitempty"` - // References to attributes in Profile.attribute_table. [optional] - Attributes []uint64 `protobuf:"varint,7,rep,packed,name=attributes,proto3" json:"attributes,omitempty"` -} - -func (m *Location) Reset() { *m = Location{} } -func (m *Location) String() string { return proto.CompactTextString(m) } -func (*Location) ProtoMessage() {} -func (*Location) Descriptor() ([]byte, []int) { - return fileDescriptor_05f9ce3fdbeb046f, []int{7} -} -func (m *Location) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Location.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Location) XXX_Merge(src proto.Message) { - xxx_messageInfo_Location.Merge(m, src) -} -func (m *Location) XXX_Size() int { - return m.Size() -} -func (m *Location) XXX_DiscardUnknown() { - xxx_messageInfo_Location.DiscardUnknown(m) -} - -var xxx_messageInfo_Location proto.InternalMessageInfo - -func (m *Location) GetId() uint64 { - if m != nil { - return m.Id - } - return 0 -} - -func (m *Location) GetMappingIndex() uint64 { - if m != nil { - return m.MappingIndex - } - return 0 -} - -func (m *Location) GetAddress() uint64 { - if m != nil { - return m.Address - } - return 0 -} - -func (m *Location) GetLine() []Line { - if m != nil { - return m.Line - } - return nil -} - -func (m *Location) GetIsFolded() bool { - if m != nil { - return m.IsFolded - } - return false -} - -func (m *Location) GetTypeIndex() uint32 { - if m != nil { - return m.TypeIndex - } - return 0 -} - -func (m *Location) GetAttributes() []uint64 { - if m != nil { - return m.Attributes - } - return nil -} - -// Details a specific line in a source code, linked to a function. -type Line struct { - // The index of the corresponding profile.Function for this line. - FunctionIndex uint64 `protobuf:"varint,1,opt,name=function_index,json=functionIndex,proto3" json:"function_index,omitempty"` - // Line number in source code. - Line int64 `protobuf:"varint,2,opt,name=line,proto3" json:"line,omitempty"` - // Column number in source code. - Column int64 `protobuf:"varint,3,opt,name=column,proto3" json:"column,omitempty"` -} - -func (m *Line) Reset() { *m = Line{} } -func (m *Line) String() string { return proto.CompactTextString(m) } -func (*Line) ProtoMessage() {} -func (*Line) Descriptor() ([]byte, []int) { - return fileDescriptor_05f9ce3fdbeb046f, []int{8} -} -func (m *Line) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Line) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Line.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Line) XXX_Merge(src proto.Message) { - xxx_messageInfo_Line.Merge(m, src) -} -func (m *Line) XXX_Size() int { - return m.Size() -} -func (m *Line) XXX_DiscardUnknown() { - xxx_messageInfo_Line.DiscardUnknown(m) -} - -var xxx_messageInfo_Line proto.InternalMessageInfo - -func (m *Line) GetFunctionIndex() uint64 { - if m != nil { - return m.FunctionIndex - } - return 0 -} - -func (m *Line) GetLine() int64 { - if m != nil { - return m.Line - } - return 0 -} - -func (m *Line) GetColumn() int64 { - if m != nil { - return m.Column - } - return 0 -} - -// Describes a function, including its human-readable name, system name, -// source file, and starting line number in the source. -type Function struct { - // Unique nonzero id for the function. [deprecated] - Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` - // Name of the function, in human-readable form if available. - Name int64 `protobuf:"varint,2,opt,name=name,proto3" json:"name,omitempty"` - // Name of the function, as identified by the system. - // For instance, it can be a C++ mangled name. - SystemName int64 `protobuf:"varint,3,opt,name=system_name,json=systemName,proto3" json:"system_name,omitempty"` - // Source file containing the function. - Filename int64 `protobuf:"varint,4,opt,name=filename,proto3" json:"filename,omitempty"` - // Line number in source file. - StartLine int64 `protobuf:"varint,5,opt,name=start_line,json=startLine,proto3" json:"start_line,omitempty"` -} - -func (m *Function) Reset() { *m = Function{} } -func (m *Function) String() string { return proto.CompactTextString(m) } -func (*Function) ProtoMessage() {} -func (*Function) Descriptor() ([]byte, []int) { - return fileDescriptor_05f9ce3fdbeb046f, []int{9} -} -func (m *Function) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Function) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Function.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Function) XXX_Merge(src proto.Message) { - xxx_messageInfo_Function.Merge(m, src) -} -func (m *Function) XXX_Size() int { - return m.Size() -} -func (m *Function) XXX_DiscardUnknown() { - xxx_messageInfo_Function.DiscardUnknown(m) -} - -var xxx_messageInfo_Function proto.InternalMessageInfo - -func (m *Function) GetId() uint64 { - if m != nil { - return m.Id - } - return 0 -} - -func (m *Function) GetName() int64 { - if m != nil { - return m.Name - } - return 0 -} - -func (m *Function) GetSystemName() int64 { - if m != nil { - return m.SystemName - } - return 0 -} - -func (m *Function) GetFilename() int64 { - if m != nil { - return m.Filename - } - return 0 -} - -func (m *Function) GetStartLine() int64 { - if m != nil { - return m.StartLine - } - return 0 -} - -func init() { - proto.RegisterEnum("opentelemetry.proto.profiles.v1experimental.AggregationTemporality", AggregationTemporality_name, AggregationTemporality_value) - proto.RegisterEnum("opentelemetry.proto.profiles.v1experimental.BuildIdKind", BuildIdKind_name, BuildIdKind_value) - proto.RegisterType((*Profile)(nil), "opentelemetry.proto.profiles.v1experimental.Profile") - proto.RegisterType((*AttributeUnit)(nil), "opentelemetry.proto.profiles.v1experimental.AttributeUnit") - proto.RegisterType((*Link)(nil), "opentelemetry.proto.profiles.v1experimental.Link") - proto.RegisterType((*ValueType)(nil), "opentelemetry.proto.profiles.v1experimental.ValueType") - proto.RegisterType((*Sample)(nil), "opentelemetry.proto.profiles.v1experimental.Sample") - proto.RegisterType((*Label)(nil), "opentelemetry.proto.profiles.v1experimental.Label") - proto.RegisterType((*Mapping)(nil), "opentelemetry.proto.profiles.v1experimental.Mapping") - proto.RegisterType((*Location)(nil), "opentelemetry.proto.profiles.v1experimental.Location") - proto.RegisterType((*Line)(nil), "opentelemetry.proto.profiles.v1experimental.Line") - proto.RegisterType((*Function)(nil), "opentelemetry.proto.profiles.v1experimental.Function") -} - -func init() { - proto.RegisterFile("opentelemetry/proto/profiles/v1experimental/pprofextended.proto", fileDescriptor_05f9ce3fdbeb046f) -} - -var fileDescriptor_05f9ce3fdbeb046f = []byte{ - // 1483 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x57, 0xcd, 0x4f, 0x1b, 0x47, - 0x1b, 0xc7, 0x1f, 0xf8, 0xe3, 0x31, 0x06, 0x33, 0xe1, 0xe5, 0xdd, 0x37, 0xaf, 0x02, 0xc4, 0xa8, - 0x0d, 0x25, 0x92, 0x29, 0xa4, 0xad, 0xd2, 0xaa, 0x52, 0x6b, 0x82, 0x49, 0x56, 0x38, 0x86, 0x2e, - 0x86, 0x96, 0x2a, 0xd1, 0x6a, 0xf1, 0x0e, 0x66, 0xc4, 0xee, 0xec, 0x6a, 0x77, 0x8c, 0xb0, 0xd4, - 0x53, 0x8f, 0x51, 0x0f, 0x3d, 0xf7, 0x4f, 0xe8, 0xad, 0x7f, 0x41, 0xaf, 0x39, 0xe6, 0x52, 0xa9, - 0xea, 0x21, 0xaa, 0x92, 0xbf, 0xa1, 0xf7, 0x6a, 0x9e, 0x99, 0xb5, 0xcd, 0x47, 0x0e, 0x6e, 0x2f, - 0x68, 0x9e, 0xdf, 0xfc, 0xe6, 0x37, 0xcf, 0xec, 0xf3, 0x65, 0xe0, 0x8b, 0x20, 0xa4, 0x5c, 0x50, - 0x8f, 0xfa, 0x54, 0x44, 0xfd, 0xb5, 0x30, 0x0a, 0x44, 0x20, 0xff, 0x9e, 0x30, 0x8f, 0xc6, 0x6b, - 0xe7, 0xeb, 0xf4, 0x22, 0xa4, 0x11, 0xf3, 0x29, 0x17, 0x8e, 0xb7, 0x16, 0xca, 0x0d, 0x7a, 0x21, - 0x28, 0x77, 0xa9, 0x5b, 0x43, 0x2e, 0xb9, 0x7f, 0x49, 0x40, 0x81, 0xb5, 0x44, 0xa0, 0x76, 0x59, - 0xe0, 0xf6, 0x5c, 0x37, 0xe8, 0x06, 0xea, 0x0e, 0xb9, 0x52, 0xec, 0xdb, 0xab, 0x37, 0xf9, 0xd0, - 0x09, 0x7c, 0x3f, 0xe0, 0x6b, 0xe7, 0xeb, 0x7a, 0xa5, 0xb8, 0xd5, 0xbf, 0x0a, 0x90, 0xdf, 0x53, - 0xea, 0xe4, 0x39, 0x94, 0x62, 0xc7, 0x0f, 0x3d, 0x6a, 0x8b, 0x7e, 0x48, 0x8d, 0xd4, 0x52, 0x66, - 0xa5, 0xb4, 0xf1, 0x49, 0x6d, 0x0c, 0x87, 0x6a, 0x87, 0x8e, 0xd7, 0xa3, 0xed, 0x7e, 0x48, 0x37, - 0xb3, 0x2f, 0x5f, 0x2f, 0x4e, 0x58, 0xa0, 0x04, 0x25, 0x42, 0xbe, 0x82, 0x9c, 0xb2, 0x8c, 0x34, - 0x2a, 0x3f, 0x18, 0x4b, 0x79, 0x1f, 0x8f, 0x6a, 0x59, 0x2d, 0x44, 0xda, 0x90, 0xf7, 0x9d, 0x30, - 0x64, 0xbc, 0x6b, 0x64, 0x50, 0xf3, 0xa3, 0xb1, 0x34, 0x9f, 0xaa, 0xb3, 0x5a, 0x34, 0x91, 0x22, - 0x5f, 0x43, 0xc1, 0x0b, 0x3a, 0x8e, 0x60, 0x01, 0x37, 0xb2, 0x28, 0xfb, 0xf1, 0x58, 0xb2, 0x4d, - 0x7d, 0x58, 0xeb, 0x0e, 0xc4, 0xc8, 0x07, 0x50, 0x49, 0xd6, 0x36, 0xe3, 0x2e, 0xeb, 0xd0, 0xd8, - 0x98, 0x59, 0xca, 0xac, 0x64, 0xac, 0x99, 0x04, 0x37, 0x15, 0x2c, 0x7d, 0x38, 0xe9, 0xf1, 0x0e, - 0xfa, 0x30, 0xf9, 0x0f, 0x7c, 0xd8, 0xd6, 0x87, 0x13, 0x1f, 0x12, 0x31, 0x72, 0x08, 0x33, 0x8e, - 0x10, 0x11, 0x3b, 0xee, 0x09, 0x6a, 0x0b, 0xe7, 0xd8, 0xa3, 0x46, 0x05, 0xf5, 0xef, 0xdd, 0xa8, - 0xaf, 0x93, 0xe5, 0x7c, 0xbd, 0xb6, 0x43, 0xfb, 0x18, 0x5d, 0xad, 0x38, 0x3d, 0x50, 0x69, 0x4b, - 0x11, 0xc2, 0x46, 0x75, 0x7b, 0x9c, 0x89, 0xd8, 0x98, 0x45, 0xdd, 0xcf, 0xc6, 0xf2, 0xbb, 0x9e, - 0x68, 0x1c, 0x70, 0x26, 0xae, 0x5d, 0x25, 0xc1, 0x98, 0x1c, 0x02, 0x78, 0x8c, 0x9f, 0x69, 0xef, - 0x09, 0xde, 0xb2, 0x3e, 0x5e, 0x84, 0x18, 0x3f, 0xd3, 0xe2, 0x45, 0x29, 0xa5, 0x9e, 0x70, 0x17, - 0xa6, 0x62, 0x11, 0x31, 0xde, 0xd5, 0xca, 0xb9, 0xa5, 0xcc, 0x4a, 0xd1, 0x2a, 0x29, 0x4c, 0x51, - 0x16, 0xa1, 0xe4, 0x46, 0x41, 0x68, 0x9f, 0x44, 0x8e, 0x4f, 0x63, 0x23, 0xbf, 0x94, 0x5a, 0xc9, - 0x58, 0x20, 0xa1, 0x6d, 0x44, 0x24, 0xe1, 0x8c, 0xd2, 0x01, 0xa1, 0xa0, 0x08, 0x12, 0xd2, 0x84, - 0x3b, 0x00, 0x82, 0xf9, 0xd4, 0xe6, 0x0e, 0x0f, 0x62, 0xa3, 0x88, 0xfb, 0x45, 0x89, 0xb4, 0x24, - 0x40, 0xde, 0x83, 0x69, 0xb7, 0x17, 0xa9, 0x14, 0x51, 0x14, 0x40, 0x4a, 0x39, 0x41, 0x15, 0xed, - 0x39, 0x94, 0xe4, 0x73, 0x02, 0x57, 0x95, 0x6a, 0x69, 0x29, 0xf5, 0xef, 0x4b, 0x55, 0x09, 0x62, - 0xa9, 0xce, 0x43, 0x4e, 0x59, 0xc6, 0x14, 0xde, 0xae, 0x2d, 0x62, 0x40, 0x5e, 0x26, 0x04, 0xe5, - 0xc2, 0x28, 0x63, 0xde, 0x26, 0x26, 0xa9, 0xc1, 0x2d, 0x97, 0x9e, 0x38, 0x3d, 0x4f, 0xd8, 0xa3, - 0x3d, 0x64, 0x1a, 0x8f, 0xcf, 0xea, 0xad, 0xfd, 0x41, 0x33, 0xa8, 0x3e, 0x81, 0xf2, 0xa5, 0x50, - 0x93, 0x65, 0x28, 0x0f, 0xf3, 0xe7, 0x8c, 0xf6, 0x8d, 0x14, 0x1e, 0x9d, 0x1a, 0x80, 0x3b, 0xb4, - 0x4f, 0x08, 0x64, 0x65, 0x6a, 0x19, 0x69, 0xdc, 0xc3, 0x75, 0xf5, 0xd7, 0x14, 0x64, 0x65, 0x3c, - 0xc9, 0x33, 0x28, 0x88, 0xc8, 0xe9, 0x50, 0x9b, 0xb9, 0x78, 0x78, 0x6a, 0xb3, 0x2e, 0x1f, 0xf6, - 0xc7, 0xeb, 0xc5, 0x4f, 0xbb, 0xc1, 0x95, 0x4f, 0xc3, 0x64, 0x43, 0xf4, 0x3c, 0xda, 0x11, 0x41, - 0xb4, 0x16, 0xba, 0x8e, 0x70, 0xd6, 0x18, 0x17, 0x34, 0xe2, 0x8e, 0xb7, 0x26, 0xad, 0x5a, 0x5b, - 0x2a, 0x99, 0x5b, 0x56, 0x1e, 0x25, 0x4d, 0x97, 0x1c, 0x41, 0x3e, 0x0e, 0x1d, 0x2e, 0xc5, 0xd3, - 0x28, 0xfe, 0xa5, 0x16, 0x7f, 0x38, 0xbe, 0xf8, 0x7e, 0xe8, 0x70, 0x73, 0xcb, 0xca, 0x49, 0x41, - 0xd3, 0xad, 0xfe, 0x92, 0x82, 0xe2, 0x20, 0x1a, 0xf2, 0x8d, 0xba, 0xfd, 0xe2, 0x1b, 0x85, 0xc6, - 0xae, 0xbe, 0x9b, 0x7c, 0x07, 0xff, 0x75, 0xba, 0xdd, 0x88, 0x76, 0x55, 0xb2, 0x08, 0xea, 0x87, - 0x41, 0xe4, 0x78, 0x4c, 0xf4, 0x8d, 0xcc, 0x52, 0x6a, 0x65, 0x7a, 0xe3, 0xd1, 0x78, 0x85, 0x37, - 0xd4, 0x6a, 0x0f, 0xa5, 0xac, 0x79, 0xe7, 0x46, 0xbc, 0xfa, 0x22, 0x03, 0x39, 0x15, 0x4e, 0x99, - 0xb2, 0xa3, 0x5d, 0x8d, 0x5e, 0xe0, 0xe4, 0xc8, 0x5a, 0xe5, 0x91, 0x9e, 0x46, 0x2f, 0xc8, 0x06, - 0xfc, 0x27, 0x01, 0x62, 0x3b, 0x16, 0x4e, 0x24, 0x34, 0x5b, 0x16, 0x51, 0xd6, 0xba, 0x35, 0xd8, - 0xdc, 0x97, 0x7b, 0xea, 0xcc, 0x48, 0xc3, 0x8c, 0x6d, 0x8f, 0xf2, 0xae, 0x38, 0xc5, 0x92, 0xca, - 0x0e, 0x1b, 0x66, 0xdc, 0x44, 0x58, 0x26, 0x60, 0x2c, 0x9c, 0xce, 0x59, 0x92, 0x02, 0x5a, 0x5c, - 0x16, 0x58, 0xd9, 0x9a, 0x1d, 0x6e, 0x99, 0xae, 0x92, 0x9e, 0x83, 0xc9, 0x73, 0xf9, 0xcd, 0x71, - 0x18, 0x65, 0x2c, 0x65, 0x90, 0x16, 0x4c, 0x7a, 0xce, 0x31, 0xf5, 0xf4, 0x38, 0xd9, 0x18, 0xaf, - 0xab, 0xc8, 0x93, 0xba, 0x9a, 0x94, 0x0c, 0x59, 0x00, 0x18, 0x24, 0xb0, 0x2c, 0x65, 0xf9, 0x5d, - 0x46, 0x10, 0x19, 0x58, 0xd9, 0x7f, 0xb0, 0xcc, 0xb2, 0x16, 0xae, 0xc9, 0x87, 0x30, 0x27, 0xfb, - 0x41, 0x2c, 0x1c, 0x3f, 0x8c, 0x65, 0x2b, 0xbd, 0xc0, 0x4e, 0x80, 0x15, 0x97, 0xb5, 0xc8, 0x70, - 0xef, 0x80, 0xb3, 0x0b, 0xd9, 0x0e, 0xaa, 0xdf, 0xc0, 0x24, 0xde, 0x4d, 0x2a, 0x90, 0x19, 0x96, - 0x8e, 0x5c, 0x4a, 0x24, 0x16, 0x91, 0x4e, 0x1c, 0xb9, 0x94, 0x08, 0xef, 0xf9, 0x98, 0x23, 0x19, - 0x4b, 0x2e, 0xc9, 0xff, 0xa0, 0xc0, 0x7b, 0x3e, 0x36, 0x6d, 0x23, 0x8b, 0x70, 0x9e, 0xf7, 0x7c, - 0x59, 0x95, 0xd5, 0xdf, 0x32, 0x90, 0xd7, 0x53, 0x92, 0x4c, 0x43, 0x5a, 0x57, 0x56, 0xd6, 0x4a, - 0x33, 0x57, 0xb6, 0x4b, 0x9f, 0xfa, 0x41, 0xd4, 0x57, 0xd1, 0xc4, 0x3b, 0xb2, 0x56, 0x49, 0x61, - 0x18, 0xc4, 0x11, 0x8a, 0xc7, 0x7c, 0x26, 0xf0, 0xd2, 0x01, 0xa5, 0x29, 0x21, 0xd9, 0x30, 0xe5, - 0xc7, 0xb4, 0x83, 0x93, 0x93, 0x98, 0xaa, 0xfb, 0xb3, 0x16, 0x48, 0x68, 0x17, 0x11, 0x72, 0x1b, - 0x0a, 0xd2, 0xe2, 0x8e, 0x4f, 0x8d, 0x49, 0xf4, 0x6e, 0x60, 0x4b, 0xcf, 0x8f, 0x7b, 0xcc, 0x73, - 0x65, 0x55, 0xe6, 0x94, 0xe7, 0x68, 0x9b, 0x2e, 0x79, 0x06, 0xe5, 0x64, 0xcb, 0x3e, 0x63, 0xdc, - 0xc5, 0x1e, 0x39, 0xbd, 0xf1, 0x70, 0xac, 0x88, 0x6e, 0x2a, 0xb1, 0x1d, 0xc6, 0x5d, 0xab, 0x74, - 0x3c, 0x34, 0xae, 0xc4, 0x75, 0xea, 0x5a, 0x5c, 0x97, 0xa1, 0x7c, 0xea, 0xc4, 0x76, 0x32, 0x75, - 0xd5, 0xa4, 0x28, 0x58, 0x53, 0xa7, 0x4e, 0x9c, 0x4c, 0xe6, 0x21, 0x49, 0xbf, 0x46, 0x4d, 0x0b, - 0x4d, 0x4a, 0x30, 0xb2, 0x02, 0x15, 0x49, 0xf2, 0x18, 0xa7, 0x36, 0xef, 0xf9, 0xc7, 0x34, 0x52, - 0x53, 0xa3, 0x60, 0x4d, 0x9f, 0x3a, 0x71, 0x93, 0x71, 0xda, 0x52, 0x28, 0x59, 0x85, 0x59, 0xc9, - 0x64, 0x1c, 0xb9, 0x7a, 0x00, 0x01, 0x52, 0x67, 0x4e, 0x9d, 0xd8, 0x44, 0x5c, 0x4d, 0xa1, 0xea, - 0xf7, 0x69, 0x28, 0x24, 0x3f, 0x53, 0xae, 0x05, 0x76, 0x19, 0xca, 0xfa, 0xa7, 0x90, 0x2e, 0x22, - 0x15, 0xd9, 0x29, 0x0d, 0xaa, 0xfa, 0x31, 0x20, 0xef, 0xb8, 0x6e, 0x44, 0xe3, 0x58, 0x47, 0x35, - 0x31, 0xc9, 0x0e, 0xe6, 0x34, 0xd5, 0x3f, 0x9d, 0xc6, 0x1e, 0xcc, 0xc9, 0x3c, 0x42, 0x11, 0xf2, - 0x7f, 0x28, 0xb2, 0xd8, 0x3e, 0x09, 0x3c, 0x97, 0xba, 0x18, 0xfe, 0x82, 0x55, 0x60, 0xf1, 0x36, - 0xda, 0x38, 0x4b, 0xfb, 0x21, 0xd5, 0x5e, 0xe6, 0xb0, 0xd4, 0x8b, 0x12, 0x51, 0x2e, 0x5e, 0x0e, - 0x52, 0xfe, 0x6a, 0x90, 0xaa, 0x47, 0x38, 0x38, 0xb0, 0x81, 0x25, 0x81, 0x1a, 0x34, 0x30, 0xf9, - 0xa2, 0x72, 0x82, 0x2a, 0x39, 0xa2, 0xdf, 0xa5, 0x9b, 0x30, 0xba, 0x37, 0x0f, 0xb9, 0x4e, 0xe0, - 0xf5, 0x7c, 0xae, 0xeb, 0x49, 0x5b, 0xd5, 0x17, 0x29, 0x28, 0x24, 0x81, 0xbe, 0xf6, 0x7d, 0x09, - 0x64, 0x31, 0x9b, 0xb5, 0x10, 0x66, 0xf2, 0x22, 0x94, 0xe2, 0x7e, 0x2c, 0xa8, 0x6f, 0xe3, 0x96, - 0x52, 0x03, 0x05, 0xb5, 0x24, 0x61, 0xb4, 0x0c, 0xb2, 0x57, 0xca, 0xe0, 0x0e, 0x80, 0x6a, 0xa8, - 0xe8, 0x9f, 0x2a, 0x92, 0x22, 0x22, 0xf2, 0x7d, 0xab, 0x3f, 0xa4, 0x60, 0xfe, 0xe6, 0xf6, 0x4e, - 0xee, 0xc1, 0x72, 0xfd, 0xf1, 0x63, 0xab, 0xf1, 0xb8, 0xde, 0x36, 0x77, 0x5b, 0x76, 0xbb, 0xf1, - 0x74, 0x6f, 0xd7, 0xaa, 0x37, 0xcd, 0xf6, 0x91, 0x7d, 0xd0, 0xda, 0xdf, 0x6b, 0x3c, 0x32, 0xb7, - 0xcd, 0xc6, 0x56, 0x65, 0x82, 0xdc, 0x85, 0x3b, 0xef, 0x22, 0x6e, 0x35, 0x9a, 0xed, 0x7a, 0x25, - 0x45, 0xde, 0x87, 0xea, 0xbb, 0x28, 0x8f, 0x0e, 0x9e, 0x1e, 0x34, 0xeb, 0x6d, 0xf3, 0xb0, 0x51, - 0x49, 0xaf, 0x7e, 0x0e, 0xa5, 0x91, 0xba, 0x22, 0xb7, 0x60, 0x66, 0xf3, 0xc0, 0x6c, 0x6e, 0xd9, - 0xe6, 0x96, 0xdd, 0x34, 0x5b, 0x3b, 0x0d, 0xab, 0x32, 0x41, 0x0c, 0x98, 0x1b, 0x80, 0x9b, 0x66, - 0xab, 0x6e, 0x1d, 0xd9, 0x4f, 0xea, 0xfb, 0x4f, 0x2a, 0xa9, 0xcd, 0x9f, 0x52, 0x2f, 0xdf, 0x2c, - 0xa4, 0x5e, 0xbd, 0x59, 0x48, 0xfd, 0xf9, 0x66, 0x21, 0xf5, 0xe3, 0xdb, 0x85, 0x89, 0x57, 0x6f, - 0x17, 0x26, 0x7e, 0x7f, 0xbb, 0x30, 0xf1, 0xad, 0x35, 0xf6, 0x24, 0x56, 0xff, 0x1b, 0x75, 0x29, - 0x7f, 0xd7, 0xbf, 0x68, 0x3f, 0xa7, 0xef, 0xef, 0x86, 0x94, 0xb7, 0x07, 0x8a, 0x7b, 0x98, 0xbe, - 0x7b, 0x49, 0xfa, 0x1e, 0xae, 0x37, 0x46, 0xd8, 0xc7, 0x39, 0xd4, 0x7b, 0xf0, 0x77, 0x00, 0x00, - 0x00, 0xff, 0xff, 0xef, 0x03, 0x47, 0x6d, 0x06, 0x0e, 0x00, 0x00, -} - -func (m *Profile) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Profile) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Profile) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.LinkTable) > 0 { - for iNdEx := len(m.LinkTable) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.LinkTable[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPprofextended(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x92 - } - } - if len(m.AttributeUnits) > 0 { - for iNdEx := len(m.AttributeUnits) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.AttributeUnits[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPprofextended(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x8a - } - } - if len(m.AttributeTable) > 0 { - for iNdEx := len(m.AttributeTable) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.AttributeTable[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPprofextended(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x82 - } - } - if len(m.LocationIndices) > 0 { - dAtA2 := make([]byte, len(m.LocationIndices)*10) - var j1 int - for _, num1 := range m.LocationIndices { - num := uint64(num1) - for num >= 1<<7 { - dAtA2[j1] = uint8(uint64(num)&0x7f | 0x80) - num >>= 7 - j1++ - } - dAtA2[j1] = uint8(num) - j1++ - } - i -= j1 - copy(dAtA[i:], dAtA2[:j1]) - i = encodeVarintPprofextended(dAtA, i, uint64(j1)) - i-- - dAtA[i] = 0x7a - } - if m.DefaultSampleType != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.DefaultSampleType)) - i-- - dAtA[i] = 0x70 - } - if len(m.Comment) > 0 { - dAtA4 := make([]byte, len(m.Comment)*10) - var j3 int - for _, num1 := range m.Comment { - num := uint64(num1) - for num >= 1<<7 { - dAtA4[j3] = uint8(uint64(num)&0x7f | 0x80) - num >>= 7 - j3++ - } - dAtA4[j3] = uint8(num) - j3++ - } - i -= j3 - copy(dAtA[i:], dAtA4[:j3]) - i = encodeVarintPprofextended(dAtA, i, uint64(j3)) - i-- - dAtA[i] = 0x6a - } - if m.Period != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.Period)) - i-- - dAtA[i] = 0x60 - } - { - size, err := m.PeriodType.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPprofextended(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x5a - if m.DurationNanos != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.DurationNanos)) - i-- - dAtA[i] = 0x50 - } - if m.TimeNanos != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.TimeNanos)) - i-- - dAtA[i] = 0x48 - } - if m.KeepFrames != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.KeepFrames)) - i-- - dAtA[i] = 0x40 - } - if m.DropFrames != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.DropFrames)) - i-- - dAtA[i] = 0x38 - } - if len(m.StringTable) > 0 { - for iNdEx := len(m.StringTable) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.StringTable[iNdEx]) - copy(dAtA[i:], m.StringTable[iNdEx]) - i = encodeVarintPprofextended(dAtA, i, uint64(len(m.StringTable[iNdEx]))) - i-- - dAtA[i] = 0x32 - } - } - if len(m.Function) > 0 { - for iNdEx := len(m.Function) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Function[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPprofextended(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - } - if len(m.Location) > 0 { - for iNdEx := len(m.Location) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Location[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPprofextended(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - if len(m.Mapping) > 0 { - for iNdEx := len(m.Mapping) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Mapping[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPprofextended(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.Sample) > 0 { - for iNdEx := len(m.Sample) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Sample[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPprofextended(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.SampleType) > 0 { - for iNdEx := len(m.SampleType) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.SampleType[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPprofextended(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *AttributeUnit) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AttributeUnit) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AttributeUnit) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Unit != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.Unit)) - i-- - dAtA[i] = 0x10 - } - if m.AttributeKey != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.AttributeKey)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *Link) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Link) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Link) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size := m.SpanId.Size() - i -= size - if _, err := m.SpanId.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintPprofextended(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size := m.TraceId.Size() - i -= size - if _, err := m.TraceId.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintPprofextended(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ValueType) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ValueType) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ValueType) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.AggregationTemporality != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.AggregationTemporality)) - i-- - dAtA[i] = 0x18 - } - if m.Unit != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.Unit)) - i-- - dAtA[i] = 0x10 - } - if m.Type != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.Type)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *Sample) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Sample) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.TimestampsUnixNano) > 0 { - dAtA7 := make([]byte, len(m.TimestampsUnixNano)*10) - var j6 int - for _, num := range m.TimestampsUnixNano { - for num >= 1<<7 { - dAtA7[j6] = uint8(uint64(num)&0x7f | 0x80) - num >>= 7 - j6++ - } - dAtA7[j6] = uint8(num) - j6++ - } - i -= j6 - copy(dAtA[i:], dAtA7[:j6]) - i = encodeVarintPprofextended(dAtA, i, uint64(j6)) - i-- - dAtA[i] = 0x6a - } - if m.Link != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.Link)) - i-- - dAtA[i] = 0x60 - } - if len(m.Attributes) > 0 { - dAtA9 := make([]byte, len(m.Attributes)*10) - var j8 int - for _, num := range m.Attributes { - for num >= 1<<7 { - dAtA9[j8] = uint8(uint64(num)&0x7f | 0x80) - num >>= 7 - j8++ - } - dAtA9[j8] = uint8(num) - j8++ - } - i -= j8 - copy(dAtA[i:], dAtA9[:j8]) - i = encodeVarintPprofextended(dAtA, i, uint64(j8)) - i-- - dAtA[i] = 0x52 - } - if m.StacktraceIdIndex != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.StacktraceIdIndex)) - i-- - dAtA[i] = 0x48 - } - if m.LocationsLength != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.LocationsLength)) - i-- - dAtA[i] = 0x40 - } - if m.LocationsStartIndex != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.LocationsStartIndex)) - i-- - dAtA[i] = 0x38 - } - if len(m.Label) > 0 { - for iNdEx := len(m.Label) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Label[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPprofextended(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.Value) > 0 { - dAtA11 := make([]byte, len(m.Value)*10) - var j10 int - for _, num1 := range m.Value { - num := uint64(num1) - for num >= 1<<7 { - dAtA11[j10] = uint8(uint64(num)&0x7f | 0x80) - num >>= 7 - j10++ - } - dAtA11[j10] = uint8(num) - j10++ - } - i -= j10 - copy(dAtA[i:], dAtA11[:j10]) - i = encodeVarintPprofextended(dAtA, i, uint64(j10)) - i-- - dAtA[i] = 0x12 - } - if len(m.LocationIndex) > 0 { - dAtA13 := make([]byte, len(m.LocationIndex)*10) - var j12 int - for _, num := range m.LocationIndex { - for num >= 1<<7 { - dAtA13[j12] = uint8(uint64(num)&0x7f | 0x80) - num >>= 7 - j12++ - } - dAtA13[j12] = uint8(num) - j12++ - } - i -= j12 - copy(dAtA[i:], dAtA13[:j12]) - i = encodeVarintPprofextended(dAtA, i, uint64(j12)) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Label) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Label) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Label) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.NumUnit != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.NumUnit)) - i-- - dAtA[i] = 0x20 - } - if m.Num != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.Num)) - i-- - dAtA[i] = 0x18 - } - if m.Str != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.Str)) - i-- - dAtA[i] = 0x10 - } - if m.Key != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.Key)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *Mapping) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Mapping) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Mapping) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Attributes) > 0 { - dAtA15 := make([]byte, len(m.Attributes)*10) - var j14 int - for _, num := range m.Attributes { - for num >= 1<<7 { - dAtA15[j14] = uint8(uint64(num)&0x7f | 0x80) - num >>= 7 - j14++ - } - dAtA15[j14] = uint8(num) - j14++ - } - i -= j14 - copy(dAtA[i:], dAtA15[:j14]) - i = encodeVarintPprofextended(dAtA, i, uint64(j14)) - i-- - dAtA[i] = 0x62 - } - if m.BuildIdKind != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.BuildIdKind)) - i-- - dAtA[i] = 0x58 - } - if m.HasInlineFrames { - i-- - if m.HasInlineFrames { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x50 - } - if m.HasLineNumbers { - i-- - if m.HasLineNumbers { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x48 - } - if m.HasFilenames { - i-- - if m.HasFilenames { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x40 - } - if m.HasFunctions { - i-- - if m.HasFunctions { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x38 - } - if m.BuildId != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.BuildId)) - i-- - dAtA[i] = 0x30 - } - if m.Filename != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.Filename)) - i-- - dAtA[i] = 0x28 - } - if m.FileOffset != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.FileOffset)) - i-- - dAtA[i] = 0x20 - } - if m.MemoryLimit != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.MemoryLimit)) - i-- - dAtA[i] = 0x18 - } - if m.MemoryStart != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.MemoryStart)) - i-- - dAtA[i] = 0x10 - } - if m.Id != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.Id)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *Location) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Location) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Location) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Attributes) > 0 { - dAtA17 := make([]byte, len(m.Attributes)*10) - var j16 int - for _, num := range m.Attributes { - for num >= 1<<7 { - dAtA17[j16] = uint8(uint64(num)&0x7f | 0x80) - num >>= 7 - j16++ - } - dAtA17[j16] = uint8(num) - j16++ - } - i -= j16 - copy(dAtA[i:], dAtA17[:j16]) - i = encodeVarintPprofextended(dAtA, i, uint64(j16)) - i-- - dAtA[i] = 0x3a - } - if m.TypeIndex != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.TypeIndex)) - i-- - dAtA[i] = 0x30 - } - if m.IsFolded { - i-- - if m.IsFolded { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 - } - if len(m.Line) > 0 { - for iNdEx := len(m.Line) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Line[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPprofextended(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - if m.Address != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.Address)) - i-- - dAtA[i] = 0x18 - } - if m.MappingIndex != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.MappingIndex)) - i-- - dAtA[i] = 0x10 - } - if m.Id != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.Id)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *Line) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Line) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Line) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Column != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.Column)) - i-- - dAtA[i] = 0x18 - } - if m.Line != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.Line)) - i-- - dAtA[i] = 0x10 - } - if m.FunctionIndex != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.FunctionIndex)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *Function) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Function) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Function) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.StartLine != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.StartLine)) - i-- - dAtA[i] = 0x28 - } - if m.Filename != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.Filename)) - i-- - dAtA[i] = 0x20 - } - if m.SystemName != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.SystemName)) - i-- - dAtA[i] = 0x18 - } - if m.Name != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.Name)) - i-- - dAtA[i] = 0x10 - } - if m.Id != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.Id)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintPprofextended(dAtA []byte, offset int, v uint64) int { - offset -= sovPprofextended(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Profile) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.SampleType) > 0 { - for _, e := range m.SampleType { - l = e.Size() - n += 1 + l + sovPprofextended(uint64(l)) - } - } - if len(m.Sample) > 0 { - for _, e := range m.Sample { - l = e.Size() - n += 1 + l + sovPprofextended(uint64(l)) - } - } - if len(m.Mapping) > 0 { - for _, e := range m.Mapping { - l = e.Size() - n += 1 + l + sovPprofextended(uint64(l)) - } - } - if len(m.Location) > 0 { - for _, e := range m.Location { - l = e.Size() - n += 1 + l + sovPprofextended(uint64(l)) - } - } - if len(m.Function) > 0 { - for _, e := range m.Function { - l = e.Size() - n += 1 + l + sovPprofextended(uint64(l)) - } - } - if len(m.StringTable) > 0 { - for _, s := range m.StringTable { - l = len(s) - n += 1 + l + sovPprofextended(uint64(l)) - } - } - if m.DropFrames != 0 { - n += 1 + sovPprofextended(uint64(m.DropFrames)) - } - if m.KeepFrames != 0 { - n += 1 + sovPprofextended(uint64(m.KeepFrames)) - } - if m.TimeNanos != 0 { - n += 1 + sovPprofextended(uint64(m.TimeNanos)) - } - if m.DurationNanos != 0 { - n += 1 + sovPprofextended(uint64(m.DurationNanos)) - } - l = m.PeriodType.Size() - n += 1 + l + sovPprofextended(uint64(l)) - if m.Period != 0 { - n += 1 + sovPprofextended(uint64(m.Period)) - } - if len(m.Comment) > 0 { - l = 0 - for _, e := range m.Comment { - l += sovPprofextended(uint64(e)) - } - n += 1 + sovPprofextended(uint64(l)) + l - } - if m.DefaultSampleType != 0 { - n += 1 + sovPprofextended(uint64(m.DefaultSampleType)) - } - if len(m.LocationIndices) > 0 { - l = 0 - for _, e := range m.LocationIndices { - l += sovPprofextended(uint64(e)) - } - n += 1 + sovPprofextended(uint64(l)) + l - } - if len(m.AttributeTable) > 0 { - for _, e := range m.AttributeTable { - l = e.Size() - n += 2 + l + sovPprofextended(uint64(l)) - } - } - if len(m.AttributeUnits) > 0 { - for _, e := range m.AttributeUnits { - l = e.Size() - n += 2 + l + sovPprofextended(uint64(l)) - } - } - if len(m.LinkTable) > 0 { - for _, e := range m.LinkTable { - l = e.Size() - n += 2 + l + sovPprofextended(uint64(l)) - } - } - return n -} - -func (m *AttributeUnit) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.AttributeKey != 0 { - n += 1 + sovPprofextended(uint64(m.AttributeKey)) - } - if m.Unit != 0 { - n += 1 + sovPprofextended(uint64(m.Unit)) - } - return n -} - -func (m *Link) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.TraceId.Size() - n += 1 + l + sovPprofextended(uint64(l)) - l = m.SpanId.Size() - n += 1 + l + sovPprofextended(uint64(l)) - return n -} - -func (m *ValueType) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Type != 0 { - n += 1 + sovPprofextended(uint64(m.Type)) - } - if m.Unit != 0 { - n += 1 + sovPprofextended(uint64(m.Unit)) - } - if m.AggregationTemporality != 0 { - n += 1 + sovPprofextended(uint64(m.AggregationTemporality)) - } - return n -} - -func (m *Sample) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.LocationIndex) > 0 { - l = 0 - for _, e := range m.LocationIndex { - l += sovPprofextended(uint64(e)) - } - n += 1 + sovPprofextended(uint64(l)) + l - } - if len(m.Value) > 0 { - l = 0 - for _, e := range m.Value { - l += sovPprofextended(uint64(e)) - } - n += 1 + sovPprofextended(uint64(l)) + l - } - if len(m.Label) > 0 { - for _, e := range m.Label { - l = e.Size() - n += 1 + l + sovPprofextended(uint64(l)) - } - } - if m.LocationsStartIndex != 0 { - n += 1 + sovPprofextended(uint64(m.LocationsStartIndex)) - } - if m.LocationsLength != 0 { - n += 1 + sovPprofextended(uint64(m.LocationsLength)) - } - if m.StacktraceIdIndex != 0 { - n += 1 + sovPprofextended(uint64(m.StacktraceIdIndex)) - } - if len(m.Attributes) > 0 { - l = 0 - for _, e := range m.Attributes { - l += sovPprofextended(uint64(e)) - } - n += 1 + sovPprofextended(uint64(l)) + l - } - if m.Link != 0 { - n += 1 + sovPprofextended(uint64(m.Link)) - } - if len(m.TimestampsUnixNano) > 0 { - l = 0 - for _, e := range m.TimestampsUnixNano { - l += sovPprofextended(uint64(e)) - } - n += 1 + sovPprofextended(uint64(l)) + l - } - return n -} - -func (m *Label) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Key != 0 { - n += 1 + sovPprofextended(uint64(m.Key)) - } - if m.Str != 0 { - n += 1 + sovPprofextended(uint64(m.Str)) - } - if m.Num != 0 { - n += 1 + sovPprofextended(uint64(m.Num)) - } - if m.NumUnit != 0 { - n += 1 + sovPprofextended(uint64(m.NumUnit)) - } - return n -} - -func (m *Mapping) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Id != 0 { - n += 1 + sovPprofextended(uint64(m.Id)) - } - if m.MemoryStart != 0 { - n += 1 + sovPprofextended(uint64(m.MemoryStart)) - } - if m.MemoryLimit != 0 { - n += 1 + sovPprofextended(uint64(m.MemoryLimit)) - } - if m.FileOffset != 0 { - n += 1 + sovPprofextended(uint64(m.FileOffset)) - } - if m.Filename != 0 { - n += 1 + sovPprofextended(uint64(m.Filename)) - } - if m.BuildId != 0 { - n += 1 + sovPprofextended(uint64(m.BuildId)) - } - if m.HasFunctions { - n += 2 - } - if m.HasFilenames { - n += 2 - } - if m.HasLineNumbers { - n += 2 - } - if m.HasInlineFrames { - n += 2 - } - if m.BuildIdKind != 0 { - n += 1 + sovPprofextended(uint64(m.BuildIdKind)) - } - if len(m.Attributes) > 0 { - l = 0 - for _, e := range m.Attributes { - l += sovPprofextended(uint64(e)) - } - n += 1 + sovPprofextended(uint64(l)) + l - } - return n -} - -func (m *Location) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Id != 0 { - n += 1 + sovPprofextended(uint64(m.Id)) - } - if m.MappingIndex != 0 { - n += 1 + sovPprofextended(uint64(m.MappingIndex)) - } - if m.Address != 0 { - n += 1 + sovPprofextended(uint64(m.Address)) - } - if len(m.Line) > 0 { - for _, e := range m.Line { - l = e.Size() - n += 1 + l + sovPprofextended(uint64(l)) - } - } - if m.IsFolded { - n += 2 - } - if m.TypeIndex != 0 { - n += 1 + sovPprofextended(uint64(m.TypeIndex)) - } - if len(m.Attributes) > 0 { - l = 0 - for _, e := range m.Attributes { - l += sovPprofextended(uint64(e)) - } - n += 1 + sovPprofextended(uint64(l)) + l - } - return n -} - -func (m *Line) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.FunctionIndex != 0 { - n += 1 + sovPprofextended(uint64(m.FunctionIndex)) - } - if m.Line != 0 { - n += 1 + sovPprofextended(uint64(m.Line)) - } - if m.Column != 0 { - n += 1 + sovPprofextended(uint64(m.Column)) - } - return n -} - -func (m *Function) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Id != 0 { - n += 1 + sovPprofextended(uint64(m.Id)) - } - if m.Name != 0 { - n += 1 + sovPprofextended(uint64(m.Name)) - } - if m.SystemName != 0 { - n += 1 + sovPprofextended(uint64(m.SystemName)) - } - if m.Filename != 0 { - n += 1 + sovPprofextended(uint64(m.Filename)) - } - if m.StartLine != 0 { - n += 1 + sovPprofextended(uint64(m.StartLine)) - } - return n -} - -func sovPprofextended(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozPprofextended(x uint64) (n int) { - return sovPprofextended(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Profile) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Profile: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Profile: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SampleType", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPprofextended - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPprofextended - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SampleType = append(m.SampleType, ValueType{}) - if err := m.SampleType[len(m.SampleType)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Sample", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPprofextended - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPprofextended - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Sample = append(m.Sample, Sample{}) - if err := m.Sample[len(m.Sample)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Mapping", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPprofextended - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPprofextended - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Mapping = append(m.Mapping, Mapping{}) - if err := m.Mapping[len(m.Mapping)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Location", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPprofextended - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPprofextended - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Location = append(m.Location, Location{}) - if err := m.Location[len(m.Location)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Function", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPprofextended - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPprofextended - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Function = append(m.Function, Function{}) - if err := m.Function[len(m.Function)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StringTable", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPprofextended - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPprofextended - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.StringTable = append(m.StringTable, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DropFrames", wireType) - } - m.DropFrames = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DropFrames |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field KeepFrames", wireType) - } - m.KeepFrames = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.KeepFrames |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeNanos", wireType) - } - m.TimeNanos = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TimeNanos |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DurationNanos", wireType) - } - m.DurationNanos = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DurationNanos |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeriodType", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPprofextended - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPprofextended - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.PeriodType.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 12: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Period", wireType) - } - m.Period = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Period |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 13: - if wireType == 0 { - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Comment = append(m.Comment, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthPprofextended - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthPprofextended - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - var count int - for _, integer := range dAtA[iNdEx:postIndex] { - if integer < 128 { - count++ - } - } - elementCount = count - if elementCount != 0 && len(m.Comment) == 0 { - m.Comment = make([]int64, 0, elementCount) - } - for iNdEx < postIndex { - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Comment = append(m.Comment, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field Comment", wireType) - } - case 14: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultSampleType", wireType) - } - m.DefaultSampleType = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DefaultSampleType |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 15: - if wireType == 0 { - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.LocationIndices = append(m.LocationIndices, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthPprofextended - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthPprofextended - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - var count int - for _, integer := range dAtA[iNdEx:postIndex] { - if integer < 128 { - count++ - } - } - elementCount = count - if elementCount != 0 && len(m.LocationIndices) == 0 { - m.LocationIndices = make([]int64, 0, elementCount) - } - for iNdEx < postIndex { - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.LocationIndices = append(m.LocationIndices, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field LocationIndices", wireType) - } - case 16: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AttributeTable", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPprofextended - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPprofextended - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AttributeTable = append(m.AttributeTable, v1.KeyValue{}) - if err := m.AttributeTable[len(m.AttributeTable)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 17: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AttributeUnits", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPprofextended - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPprofextended - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AttributeUnits = append(m.AttributeUnits, AttributeUnit{}) - if err := m.AttributeUnits[len(m.AttributeUnits)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 18: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LinkTable", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPprofextended - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPprofextended - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LinkTable = append(m.LinkTable, Link{}) - if err := m.LinkTable[len(m.LinkTable)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPprofextended(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPprofextended - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AttributeUnit) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AttributeUnit: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AttributeUnit: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AttributeKey", wireType) - } - m.AttributeKey = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.AttributeKey |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Unit", wireType) - } - m.Unit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Unit |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipPprofextended(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPprofextended - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Link) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Link: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Link: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthPprofextended - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthPprofextended - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.TraceId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthPprofextended - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthPprofextended - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.SpanId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPprofextended(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPprofextended - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ValueType) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ValueType: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ValueType: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Type |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Unit", wireType) - } - m.Unit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Unit |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType) - } - m.AggregationTemporality = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.AggregationTemporality |= AggregationTemporality(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipPprofextended(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPprofextended - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Sample) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Sample: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Sample: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType == 0 { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.LocationIndex = append(m.LocationIndex, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthPprofextended - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthPprofextended - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - var count int - for _, integer := range dAtA[iNdEx:postIndex] { - if integer < 128 { - count++ - } - } - elementCount = count - if elementCount != 0 && len(m.LocationIndex) == 0 { - m.LocationIndex = make([]uint64, 0, elementCount) - } - for iNdEx < postIndex { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.LocationIndex = append(m.LocationIndex, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field LocationIndex", wireType) - } - case 2: - if wireType == 0 { - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Value = append(m.Value, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthPprofextended - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthPprofextended - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - var count int - for _, integer := range dAtA[iNdEx:postIndex] { - if integer < 128 { - count++ - } - } - elementCount = count - if elementCount != 0 && len(m.Value) == 0 { - m.Value = make([]int64, 0, elementCount) - } - for iNdEx < postIndex { - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Value = append(m.Value, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Label", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPprofextended - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPprofextended - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Label = append(m.Label, Label{}) - if err := m.Label[len(m.Label)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field LocationsStartIndex", wireType) - } - m.LocationsStartIndex = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.LocationsStartIndex |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field LocationsLength", wireType) - } - m.LocationsLength = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.LocationsLength |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StacktraceIdIndex", wireType) - } - m.StacktraceIdIndex = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.StacktraceIdIndex |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 10: - if wireType == 0 { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Attributes = append(m.Attributes, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthPprofextended - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthPprofextended - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - var count int - for _, integer := range dAtA[iNdEx:postIndex] { - if integer < 128 { - count++ - } - } - elementCount = count - if elementCount != 0 && len(m.Attributes) == 0 { - m.Attributes = make([]uint64, 0, elementCount) - } - for iNdEx < postIndex { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Attributes = append(m.Attributes, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - case 12: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Link", wireType) - } - m.Link = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Link |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 13: - if wireType == 0 { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.TimestampsUnixNano = append(m.TimestampsUnixNano, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthPprofextended - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthPprofextended - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - var count int - for _, integer := range dAtA[iNdEx:postIndex] { - if integer < 128 { - count++ - } - } - elementCount = count - if elementCount != 0 && len(m.TimestampsUnixNano) == 0 { - m.TimestampsUnixNano = make([]uint64, 0, elementCount) - } - for iNdEx < postIndex { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.TimestampsUnixNano = append(m.TimestampsUnixNano, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field TimestampsUnixNano", wireType) - } - default: - iNdEx = preIndex - skippy, err := skipPprofextended(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPprofextended - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Label) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Label: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Label: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - m.Key = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Key |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Str", wireType) - } - m.Str = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Str |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Num", wireType) - } - m.Num = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Num |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumUnit", wireType) - } - m.NumUnit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumUnit |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipPprofextended(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPprofextended - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Mapping) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Mapping: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Mapping: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) - } - m.Id = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Id |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MemoryStart", wireType) - } - m.MemoryStart = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MemoryStart |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MemoryLimit", wireType) - } - m.MemoryLimit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MemoryLimit |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FileOffset", wireType) - } - m.FileOffset = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.FileOffset |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Filename", wireType) - } - m.Filename = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Filename |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field BuildId", wireType) - } - m.BuildId = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.BuildId |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HasFunctions", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.HasFunctions = bool(v != 0) - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HasFilenames", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.HasFilenames = bool(v != 0) - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HasLineNumbers", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.HasLineNumbers = bool(v != 0) - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HasInlineFrames", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.HasInlineFrames = bool(v != 0) - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field BuildIdKind", wireType) - } - m.BuildIdKind = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.BuildIdKind |= BuildIdKind(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 12: - if wireType == 0 { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Attributes = append(m.Attributes, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthPprofextended - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthPprofextended - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - var count int - for _, integer := range dAtA[iNdEx:postIndex] { - if integer < 128 { - count++ - } - } - elementCount = count - if elementCount != 0 && len(m.Attributes) == 0 { - m.Attributes = make([]uint64, 0, elementCount) - } - for iNdEx < postIndex { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Attributes = append(m.Attributes, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - default: - iNdEx = preIndex - skippy, err := skipPprofextended(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPprofextended - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Location) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Location: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Location: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) - } - m.Id = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Id |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MappingIndex", wireType) - } - m.MappingIndex = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MappingIndex |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) - } - m.Address = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Address |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Line", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPprofextended - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPprofextended - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Line = append(m.Line, Line{}) - if err := m.Line[len(m.Line)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IsFolded", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.IsFolded = bool(v != 0) - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TypeIndex", wireType) - } - m.TypeIndex = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TypeIndex |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType == 0 { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Attributes = append(m.Attributes, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthPprofextended - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthPprofextended - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - var count int - for _, integer := range dAtA[iNdEx:postIndex] { - if integer < 128 { - count++ - } - } - elementCount = count - if elementCount != 0 && len(m.Attributes) == 0 { - m.Attributes = make([]uint64, 0, elementCount) - } - for iNdEx < postIndex { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Attributes = append(m.Attributes, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - default: - iNdEx = preIndex - skippy, err := skipPprofextended(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPprofextended - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Line) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Line: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Line: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FunctionIndex", wireType) - } - m.FunctionIndex = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.FunctionIndex |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Line", wireType) - } - m.Line = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Line |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Column", wireType) - } - m.Column = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Column |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipPprofextended(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPprofextended - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Function) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Function: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Function: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) - } - m.Id = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Id |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - m.Name = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Name |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SystemName", wireType) - } - m.SystemName = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.SystemName |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Filename", wireType) - } - m.Filename = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Filename |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StartLine", wireType) - } - m.StartLine = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.StartLine |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipPprofextended(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPprofextended - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipPprofextended(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPprofextended - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPprofextended - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPprofextended - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthPprofextended - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupPprofextended - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthPprofextended - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthPprofextended = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowPprofextended = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupPprofextended = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1experimental/profiles.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1experimental/profiles.pb.go deleted file mode 100644 index 6e4662c248b..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1experimental/profiles.pb.go +++ /dev/null @@ -1,1482 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: opentelemetry/proto/profiles/v1experimental/profiles.proto - -package v1experimental - -import ( - encoding_binary "encoding/binary" - fmt "fmt" - io "io" - math "math" - math_bits "math/bits" - - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - - v11 "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" - v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// ProfilesData represents the profiles data that can be stored in persistent storage, -// OR can be embedded by other protocols that transfer OTLP profiles data but do not -// implement the OTLP protocol. -// -// The main difference between this message and collector protocol is that -// in this message there will not be any "control" or "metadata" specific to -// OTLP protocol. -// -// When new fields are added into this message, the OTLP request MUST be updated -// as well. -type ProfilesData struct { - // An array of ResourceProfiles. - // For data coming from a single resource this array will typically contain - // one element. Intermediary nodes that receive data from multiple origins - // typically batch the data before forwarding further and in that case this - // array will contain multiple elements. - ResourceProfiles []*ResourceProfiles `protobuf:"bytes,1,rep,name=resource_profiles,json=resourceProfiles,proto3" json:"resource_profiles,omitempty"` -} - -func (m *ProfilesData) Reset() { *m = ProfilesData{} } -func (m *ProfilesData) String() string { return proto.CompactTextString(m) } -func (*ProfilesData) ProtoMessage() {} -func (*ProfilesData) Descriptor() ([]byte, []int) { - return fileDescriptor_394731f2296acea3, []int{0} -} -func (m *ProfilesData) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ProfilesData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ProfilesData.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ProfilesData) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProfilesData.Merge(m, src) -} -func (m *ProfilesData) XXX_Size() int { - return m.Size() -} -func (m *ProfilesData) XXX_DiscardUnknown() { - xxx_messageInfo_ProfilesData.DiscardUnknown(m) -} - -var xxx_messageInfo_ProfilesData proto.InternalMessageInfo - -func (m *ProfilesData) GetResourceProfiles() []*ResourceProfiles { - if m != nil { - return m.ResourceProfiles - } - return nil -} - -// A collection of ScopeProfiles from a Resource. -type ResourceProfiles struct { - // The resource for the profiles in this message. - // If this field is not set then no resource info is known. - Resource v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource"` - // A list of ScopeProfiles that originate from a resource. - ScopeProfiles []*ScopeProfiles `protobuf:"bytes,2,rep,name=scope_profiles,json=scopeProfiles,proto3" json:"scope_profiles,omitempty"` - // The Schema URL, if known. This is the identifier of the Schema that the resource data - // is recorded in. To learn more about Schema URL see - // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to the data in the "resource" field. It does not apply - // to the data in the "scope_profiles" field which have their own schema_url field. - SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` -} - -func (m *ResourceProfiles) Reset() { *m = ResourceProfiles{} } -func (m *ResourceProfiles) String() string { return proto.CompactTextString(m) } -func (*ResourceProfiles) ProtoMessage() {} -func (*ResourceProfiles) Descriptor() ([]byte, []int) { - return fileDescriptor_394731f2296acea3, []int{1} -} -func (m *ResourceProfiles) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceProfiles) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResourceProfiles.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResourceProfiles) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceProfiles.Merge(m, src) -} -func (m *ResourceProfiles) XXX_Size() int { - return m.Size() -} -func (m *ResourceProfiles) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceProfiles.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceProfiles proto.InternalMessageInfo - -func (m *ResourceProfiles) GetResource() v1.Resource { - if m != nil { - return m.Resource - } - return v1.Resource{} -} - -func (m *ResourceProfiles) GetScopeProfiles() []*ScopeProfiles { - if m != nil { - return m.ScopeProfiles - } - return nil -} - -func (m *ResourceProfiles) GetSchemaUrl() string { - if m != nil { - return m.SchemaUrl - } - return "" -} - -// A collection of ProfileContainers produced by an InstrumentationScope. -type ScopeProfiles struct { - // The instrumentation scope information for the profiles in this message. - // Semantically when InstrumentationScope isn't set, it is equivalent with - // an empty instrumentation scope name (unknown). - Scope v11.InstrumentationScope `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope"` - // A list of ProfileContainers that originate from an instrumentation scope. - Profiles []*ProfileContainer `protobuf:"bytes,2,rep,name=profiles,proto3" json:"profiles,omitempty"` - // The Schema URL, if known. This is the identifier of the Schema that the metric data - // is recorded in. To learn more about Schema URL see - // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to all profiles in the "profiles" field. - SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` -} - -func (m *ScopeProfiles) Reset() { *m = ScopeProfiles{} } -func (m *ScopeProfiles) String() string { return proto.CompactTextString(m) } -func (*ScopeProfiles) ProtoMessage() {} -func (*ScopeProfiles) Descriptor() ([]byte, []int) { - return fileDescriptor_394731f2296acea3, []int{2} -} -func (m *ScopeProfiles) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ScopeProfiles) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ScopeProfiles.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ScopeProfiles) XXX_Merge(src proto.Message) { - xxx_messageInfo_ScopeProfiles.Merge(m, src) -} -func (m *ScopeProfiles) XXX_Size() int { - return m.Size() -} -func (m *ScopeProfiles) XXX_DiscardUnknown() { - xxx_messageInfo_ScopeProfiles.DiscardUnknown(m) -} - -var xxx_messageInfo_ScopeProfiles proto.InternalMessageInfo - -func (m *ScopeProfiles) GetScope() v11.InstrumentationScope { - if m != nil { - return m.Scope - } - return v11.InstrumentationScope{} -} - -func (m *ScopeProfiles) GetProfiles() []*ProfileContainer { - if m != nil { - return m.Profiles - } - return nil -} - -func (m *ScopeProfiles) GetSchemaUrl() string { - if m != nil { - return m.SchemaUrl - } - return "" -} - -// A ProfileContainer represents a single profile. It wraps pprof profile with OpenTelemetry specific metadata. -type ProfileContainer struct { - // A globally unique identifier for a profile. The ID is a 16-byte array. An ID with - // all zeroes is considered invalid. - // - // This field is required. - ProfileId []byte `protobuf:"bytes,1,opt,name=profile_id,json=profileId,proto3" json:"profile_id,omitempty"` - // start_time_unix_nano is the start time of the profile. - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. - // - // This field is semantically required and it is expected that end_time >= start_time. - StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` - // end_time_unix_nano is the end time of the profile. - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. - // - // This field is semantically required and it is expected that end_time >= start_time. - EndTimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=end_time_unix_nano,json=endTimeUnixNano,proto3" json:"end_time_unix_nano,omitempty"` - // attributes is a collection of key/value pairs. Note, global attributes - // like server name can be set using the resource API. Examples of attributes: - // - // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" - // "/http/server_latency": 300 - // "abc.com/myattribute": true - // "abc.com/score": 10.239 - // - // The OpenTelemetry API specification further restricts the allowed value types: - // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - Attributes []v11.KeyValue `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes"` - // dropped_attributes_count is the number of attributes that were discarded. Attributes - // can be discarded because their keys are too long or because there are too many - // attributes. If this value is 0, then no attributes were dropped. - DroppedAttributesCount uint32 `protobuf:"varint,5,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` - // Specifies format of the original payload. Common values are defined in semantic conventions. [required if original_payload is present] - OriginalPayloadFormat string `protobuf:"bytes,6,opt,name=original_payload_format,json=originalPayloadFormat,proto3" json:"original_payload_format,omitempty"` - // Original payload can be stored in this field. This can be useful for users who want to get the original payload. - // Formats such as JFR are highly extensible and can contain more information than what is defined in this spec. - // Inclusion of original payload should be configurable by the user. Default behavior should be to not include the original payload. - // If the original payload is in pprof format, it SHOULD not be included in this field. - // The field is optional, however if it is present `profile` MUST be present and contain the same profiling information. - OriginalPayload []byte `protobuf:"bytes,7,opt,name=original_payload,json=originalPayload,proto3" json:"original_payload,omitempty"` - // This is a reference to a pprof profile. Required, even when original_payload is present. - Profile Profile `protobuf:"bytes,8,opt,name=profile,proto3" json:"profile"` -} - -func (m *ProfileContainer) Reset() { *m = ProfileContainer{} } -func (m *ProfileContainer) String() string { return proto.CompactTextString(m) } -func (*ProfileContainer) ProtoMessage() {} -func (*ProfileContainer) Descriptor() ([]byte, []int) { - return fileDescriptor_394731f2296acea3, []int{3} -} -func (m *ProfileContainer) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ProfileContainer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ProfileContainer.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ProfileContainer) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProfileContainer.Merge(m, src) -} -func (m *ProfileContainer) XXX_Size() int { - return m.Size() -} -func (m *ProfileContainer) XXX_DiscardUnknown() { - xxx_messageInfo_ProfileContainer.DiscardUnknown(m) -} - -var xxx_messageInfo_ProfileContainer proto.InternalMessageInfo - -func (m *ProfileContainer) GetProfileId() []byte { - if m != nil { - return m.ProfileId - } - return nil -} - -func (m *ProfileContainer) GetStartTimeUnixNano() uint64 { - if m != nil { - return m.StartTimeUnixNano - } - return 0 -} - -func (m *ProfileContainer) GetEndTimeUnixNano() uint64 { - if m != nil { - return m.EndTimeUnixNano - } - return 0 -} - -func (m *ProfileContainer) GetAttributes() []v11.KeyValue { - if m != nil { - return m.Attributes - } - return nil -} - -func (m *ProfileContainer) GetDroppedAttributesCount() uint32 { - if m != nil { - return m.DroppedAttributesCount - } - return 0 -} - -func (m *ProfileContainer) GetOriginalPayloadFormat() string { - if m != nil { - return m.OriginalPayloadFormat - } - return "" -} - -func (m *ProfileContainer) GetOriginalPayload() []byte { - if m != nil { - return m.OriginalPayload - } - return nil -} - -func (m *ProfileContainer) GetProfile() Profile { - if m != nil { - return m.Profile - } - return Profile{} -} - -func init() { - proto.RegisterType((*ProfilesData)(nil), "opentelemetry.proto.profiles.v1experimental.ProfilesData") - proto.RegisterType((*ResourceProfiles)(nil), "opentelemetry.proto.profiles.v1experimental.ResourceProfiles") - proto.RegisterType((*ScopeProfiles)(nil), "opentelemetry.proto.profiles.v1experimental.ScopeProfiles") - proto.RegisterType((*ProfileContainer)(nil), "opentelemetry.proto.profiles.v1experimental.ProfileContainer") -} - -func init() { - proto.RegisterFile("opentelemetry/proto/profiles/v1experimental/profiles.proto", fileDescriptor_394731f2296acea3) -} - -var fileDescriptor_394731f2296acea3 = []byte{ - // 652 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0x51, 0x6b, 0xdb, 0x3a, - 0x14, 0xc7, 0xe3, 0xa6, 0x4d, 0x52, 0xb5, 0xb9, 0x4d, 0x45, 0xef, 0xbd, 0xa6, 0x70, 0x73, 0x43, - 0x5e, 0x96, 0xae, 0x60, 0x93, 0x76, 0x8c, 0x51, 0x18, 0x63, 0xed, 0x36, 0xe8, 0xca, 0xd6, 0xe0, - 0xb5, 0x85, 0xed, 0xc5, 0xa8, 0xf1, 0x69, 0xa6, 0x61, 0x4b, 0x46, 0x96, 0x43, 0xba, 0x4f, 0xb1, - 0xcf, 0xb1, 0x4f, 0xd2, 0xc7, 0xee, 0x6d, 0x6c, 0x30, 0x46, 0xfb, 0xb2, 0x7e, 0x8b, 0x61, 0x59, - 0xf6, 0x12, 0x93, 0x51, 0xb2, 0x17, 0x23, 0x9f, 0xf3, 0x3f, 0xbf, 0xa3, 0xff, 0x91, 0x10, 0xda, - 0xe1, 0x21, 0x30, 0x09, 0x3e, 0x04, 0x20, 0xc5, 0xb9, 0x1d, 0x0a, 0x2e, 0x79, 0xf2, 0x3d, 0xa3, - 0x3e, 0x44, 0xf6, 0xb0, 0x0b, 0xa3, 0x10, 0x04, 0x0d, 0x80, 0x49, 0xe2, 0xe7, 0x71, 0x4b, 0xc9, - 0xf0, 0xe6, 0x44, 0x6d, 0x1a, 0xb4, 0x72, 0xcd, 0x64, 0xed, 0xfa, 0xda, 0x80, 0x0f, 0x78, 0x8a, - 0x4f, 0x56, 0xa9, 0x7a, 0xfd, 0xee, 0xb4, 0xf6, 0x7d, 0x1e, 0x04, 0x9c, 0xd9, 0xc3, 0xae, 0x5e, - 0x69, 0xad, 0x35, 0x4d, 0x2b, 0x20, 0xe2, 0xb1, 0xe8, 0x43, 0xa2, 0xce, 0xd6, 0x5a, 0xff, 0x68, - 0x26, 0x6b, 0x49, 0x02, 0x46, 0x12, 0x98, 0x07, 0x5e, 0x0a, 0x68, 0xbf, 0x47, 0xcb, 0x3d, 0x2d, - 0x7f, 0x42, 0x24, 0xc1, 0xef, 0xd0, 0x6a, 0xd6, 0xc2, 0xcd, 0x38, 0xa6, 0xd1, 0x2a, 0x77, 0x96, - 0xb6, 0x1e, 0x5a, 0x33, 0xcc, 0xc2, 0x72, 0x34, 0x25, 0xa3, 0x3b, 0x0d, 0x51, 0x88, 0xb4, 0x6f, - 0x0c, 0xd4, 0x28, 0xca, 0xf0, 0x01, 0xaa, 0x65, 0x42, 0xd3, 0x68, 0x19, 0x9d, 0xa5, 0xad, 0x8d, - 0xa9, 0x7d, 0xf3, 0x41, 0x0c, 0xbb, 0x79, 0xaf, 0xdd, 0xf9, 0x8b, 0x6f, 0xff, 0x97, 0x9c, 0x1c, - 0x80, 0x09, 0xfa, 0x2b, 0xea, 0xf3, 0x70, 0xcc, 0xca, 0x9c, 0xb2, 0xb2, 0x33, 0x93, 0x95, 0x57, - 0x09, 0x22, 0xf7, 0x51, 0x8f, 0xc6, 0x7f, 0xf1, 0x7f, 0x08, 0x45, 0xfd, 0xb7, 0x10, 0x10, 0x37, - 0x16, 0xbe, 0x59, 0x6e, 0x19, 0x9d, 0x45, 0x67, 0x31, 0x8d, 0x1c, 0x0b, 0xff, 0x79, 0xa5, 0xf6, - 0xa3, 0xda, 0xb8, 0xa9, 0xb6, 0xbf, 0x18, 0xa8, 0x3e, 0xc1, 0xc1, 0x87, 0x68, 0x41, 0x91, 0xb4, - 0xcb, 0xed, 0xa9, 0x5b, 0xd2, 0x97, 0x63, 0xd8, 0xb5, 0xf6, 0x59, 0x24, 0x45, 0xac, 0x76, 0x24, - 0x29, 0x67, 0x8a, 0xa5, 0xfd, 0xa6, 0x1c, 0xfc, 0x1a, 0xd5, 0x0a, 0x36, 0x67, 0x3b, 0x31, 0xbd, - 0xb3, 0x3d, 0xce, 0x24, 0xa1, 0x0c, 0x84, 0x93, 0xe3, 0x6e, 0x31, 0xd9, 0xfe, 0x54, 0x46, 0x8d, - 0x62, 0x75, 0x52, 0xa3, 0xeb, 0x5d, 0xea, 0x29, 0x93, 0xcb, 0xce, 0xa2, 0x8e, 0xec, 0x7b, 0xd8, - 0x46, 0x6b, 0x91, 0x24, 0x42, 0xba, 0x92, 0x06, 0xe0, 0xc6, 0x8c, 0x8e, 0x5c, 0x46, 0x18, 0x37, - 0xe7, 0x5a, 0x46, 0xa7, 0xe2, 0xac, 0xaa, 0xdc, 0x11, 0x0d, 0xe0, 0x98, 0xd1, 0xd1, 0x4b, 0xc2, - 0x38, 0xde, 0x44, 0x18, 0x98, 0x57, 0x94, 0x97, 0x95, 0x7c, 0x05, 0x98, 0x37, 0x21, 0x7e, 0x81, - 0x10, 0x91, 0x52, 0xd0, 0xd3, 0x58, 0x42, 0x64, 0xce, 0xab, 0x69, 0xdc, 0xb9, 0x65, 0xc2, 0x07, - 0x70, 0x7e, 0x42, 0xfc, 0x38, 0x9b, 0xea, 0x18, 0x00, 0x3f, 0x40, 0xa6, 0x27, 0x78, 0x18, 0x82, - 0xe7, 0xfe, 0x8a, 0xba, 0x7d, 0x1e, 0x33, 0x69, 0x2e, 0xb4, 0x8c, 0x4e, 0xdd, 0xf9, 0x47, 0xe7, - 0x1f, 0xe7, 0xe9, 0xbd, 0x24, 0x8b, 0xef, 0xa3, 0x7f, 0xb9, 0xa0, 0x03, 0xca, 0x88, 0xef, 0x86, - 0xe4, 0xdc, 0xe7, 0xc4, 0x73, 0xcf, 0xb8, 0x08, 0x88, 0x34, 0x2b, 0x6a, 0x8c, 0x7f, 0x67, 0xe9, - 0x5e, 0x9a, 0x7d, 0xa6, 0x92, 0x78, 0x03, 0x35, 0x8a, 0x75, 0x66, 0x55, 0xcd, 0x70, 0xa5, 0x50, - 0x80, 0x8f, 0x50, 0x55, 0x8f, 0xd5, 0xac, 0xa9, 0xab, 0x74, 0xef, 0x4f, 0x8e, 0x5d, 0xbb, 0xce, - 0x50, 0xbb, 0x5f, 0x8d, 0x8b, 0xab, 0xa6, 0x71, 0x79, 0xd5, 0x34, 0xbe, 0x5f, 0x35, 0x8d, 0x0f, - 0xd7, 0xcd, 0xd2, 0xe5, 0x75, 0xb3, 0xf4, 0xf9, 0xba, 0x59, 0x42, 0x16, 0xe5, 0xb3, 0x74, 0xd8, - 0xad, 0x67, 0x77, 0xbe, 0x97, 0xc8, 0x7a, 0xc6, 0x1b, 0x67, 0x50, 0x04, 0xd0, 0xe4, 0x45, 0xf4, - 0x7d, 0xe8, 0x4b, 0x2e, 0xec, 0xd0, 0x23, 0x92, 0xd8, 0x94, 0x49, 0x10, 0x8c, 0xf8, 0xb6, 0xfa, - 0x53, 0x1d, 0x06, 0xc0, 0x7e, 0xf7, 0xb8, 0x7d, 0x9c, 0xdb, 0x3c, 0x0c, 0x81, 0x1d, 0xe5, 0x44, - 0xd5, 0x2b, 0x33, 0x17, 0x59, 0x27, 0xdd, 0xa7, 0x63, 0xea, 0xd3, 0x8a, 0xe2, 0x6d, 0xff, 0x0c, - 0x00, 0x00, 0xff, 0xff, 0xe1, 0x89, 0x49, 0xa4, 0x1b, 0x06, 0x00, 0x00, -} - -func (m *ProfilesData) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ProfilesData) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ProfilesData) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ResourceProfiles) > 0 { - for iNdEx := len(m.ResourceProfiles) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ResourceProfiles[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProfiles(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ResourceProfiles) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceProfiles) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceProfiles) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.SchemaUrl) > 0 { - i -= len(m.SchemaUrl) - copy(dAtA[i:], m.SchemaUrl) - i = encodeVarintProfiles(dAtA, i, uint64(len(m.SchemaUrl))) - i-- - dAtA[i] = 0x1a - } - if len(m.ScopeProfiles) > 0 { - for iNdEx := len(m.ScopeProfiles) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ScopeProfiles[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProfiles(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProfiles(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ScopeProfiles) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ScopeProfiles) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ScopeProfiles) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.SchemaUrl) > 0 { - i -= len(m.SchemaUrl) - copy(dAtA[i:], m.SchemaUrl) - i = encodeVarintProfiles(dAtA, i, uint64(len(m.SchemaUrl))) - i-- - dAtA[i] = 0x1a - } - if len(m.Profiles) > 0 { - for iNdEx := len(m.Profiles) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Profiles[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProfiles(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.Scope.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProfiles(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ProfileContainer) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ProfileContainer) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ProfileContainer) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Profile.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProfiles(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - if len(m.OriginalPayload) > 0 { - i -= len(m.OriginalPayload) - copy(dAtA[i:], m.OriginalPayload) - i = encodeVarintProfiles(dAtA, i, uint64(len(m.OriginalPayload))) - i-- - dAtA[i] = 0x3a - } - if len(m.OriginalPayloadFormat) > 0 { - i -= len(m.OriginalPayloadFormat) - copy(dAtA[i:], m.OriginalPayloadFormat) - i = encodeVarintProfiles(dAtA, i, uint64(len(m.OriginalPayloadFormat))) - i-- - dAtA[i] = 0x32 - } - if m.DroppedAttributesCount != 0 { - i = encodeVarintProfiles(dAtA, i, uint64(m.DroppedAttributesCount)) - i-- - dAtA[i] = 0x28 - } - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProfiles(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - if m.EndTimeUnixNano != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.EndTimeUnixNano)) - i-- - dAtA[i] = 0x19 - } - if m.StartTimeUnixNano != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano)) - i-- - dAtA[i] = 0x11 - } - if len(m.ProfileId) > 0 { - i -= len(m.ProfileId) - copy(dAtA[i:], m.ProfileId) - i = encodeVarintProfiles(dAtA, i, uint64(len(m.ProfileId))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintProfiles(dAtA []byte, offset int, v uint64) int { - offset -= sovProfiles(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ProfilesData) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.ResourceProfiles) > 0 { - for _, e := range m.ResourceProfiles { - l = e.Size() - n += 1 + l + sovProfiles(uint64(l)) - } - } - return n -} - -func (m *ResourceProfiles) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Resource.Size() - n += 1 + l + sovProfiles(uint64(l)) - if len(m.ScopeProfiles) > 0 { - for _, e := range m.ScopeProfiles { - l = e.Size() - n += 1 + l + sovProfiles(uint64(l)) - } - } - l = len(m.SchemaUrl) - if l > 0 { - n += 1 + l + sovProfiles(uint64(l)) - } - return n -} - -func (m *ScopeProfiles) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Scope.Size() - n += 1 + l + sovProfiles(uint64(l)) - if len(m.Profiles) > 0 { - for _, e := range m.Profiles { - l = e.Size() - n += 1 + l + sovProfiles(uint64(l)) - } - } - l = len(m.SchemaUrl) - if l > 0 { - n += 1 + l + sovProfiles(uint64(l)) - } - return n -} - -func (m *ProfileContainer) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ProfileId) - if l > 0 { - n += 1 + l + sovProfiles(uint64(l)) - } - if m.StartTimeUnixNano != 0 { - n += 9 - } - if m.EndTimeUnixNano != 0 { - n += 9 - } - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovProfiles(uint64(l)) - } - } - if m.DroppedAttributesCount != 0 { - n += 1 + sovProfiles(uint64(m.DroppedAttributesCount)) - } - l = len(m.OriginalPayloadFormat) - if l > 0 { - n += 1 + l + sovProfiles(uint64(l)) - } - l = len(m.OriginalPayload) - if l > 0 { - n += 1 + l + sovProfiles(uint64(l)) - } - l = m.Profile.Size() - n += 1 + l + sovProfiles(uint64(l)) - return n -} - -func sovProfiles(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozProfiles(x uint64) (n int) { - return sovProfiles(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *ProfilesData) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ProfilesData: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ProfilesData: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceProfiles", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResourceProfiles = append(m.ResourceProfiles, &ResourceProfiles{}) - if err := m.ResourceProfiles[len(m.ResourceProfiles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipProfiles(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProfiles - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceProfiles) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceProfiles: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceProfiles: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ScopeProfiles", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ScopeProfiles = append(m.ScopeProfiles, &ScopeProfiles{}) - if err := m.ScopeProfiles[len(m.ScopeProfiles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SchemaUrl = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipProfiles(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProfiles - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ScopeProfiles) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ScopeProfiles: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ScopeProfiles: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Scope.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Profiles", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Profiles = append(m.Profiles, &ProfileContainer{}) - if err := m.Profiles[len(m.Profiles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SchemaUrl = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipProfiles(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProfiles - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ProfileContainer) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ProfileContainer: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ProfileContainer: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProfileId", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ProfileId = append(m.ProfileId[:0], dAtA[iNdEx:postIndex]...) - if m.ProfileId == nil { - m.ProfileId = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType) - } - m.StartTimeUnixNano = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.StartTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 3: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field EndTimeUnixNano", wireType) - } - m.EndTimeUnixNano = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.EndTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, v11.KeyValue{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType) - } - m.DroppedAttributesCount = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DroppedAttributesCount |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OriginalPayloadFormat", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.OriginalPayloadFormat = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OriginalPayload", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.OriginalPayload = append(m.OriginalPayload[:0], dAtA[iNdEx:postIndex]...) - if m.OriginalPayload == nil { - m.OriginalPayload = []byte{} - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Profile", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Profile.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipProfiles(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProfiles - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipProfiles(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowProfiles - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowProfiles - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowProfiles - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthProfiles - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupProfiles - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthProfiles - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthProfiles = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowProfiles = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupProfiles = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1/trace.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1/trace.pb.go index c1fcf0764e0..b0bddfb985f 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1/trace.pb.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1/trace.pb.go @@ -235,7 +235,8 @@ type ResourceSpans struct { // A list of ScopeSpans that originate from a resource. ScopeSpans []*ScopeSpans `protobuf:"bytes,2,rep,name=scope_spans,json=scopeSpans,proto3" json:"scope_spans,omitempty"` // The Schema URL, if known. This is the identifier of the Schema that the resource data - // is recorded in. To learn more about Schema URL see + // is recorded in. Notably, the last part of the URL path is the version number of the + // schema: http[s]://server[:port]/path/. To learn more about Schema URL see // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url // This schema_url applies to the data in the "resource" field. It does not apply // to the data in the "scope_spans" field which have their own schema_url field. @@ -312,7 +313,8 @@ type ScopeSpans struct { // A list of Spans that originate from an instrumentation scope. Spans []*Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"` // The Schema URL, if known. This is the identifier of the Schema that the span data - // is recorded in. To learn more about Schema URL see + // is recorded in. Notably, the last part of the URL path is the version number of the + // schema: http[s]://server[:port]/path/. To learn more about Schema URL see // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url // This schema_url applies to all spans and span events in the "spans" field. SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_int32slice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_int32slice.go new file mode 100644 index 00000000000..31f642d75bd --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_int32slice.go @@ -0,0 +1,34 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +type Int32Slice struct { + orig *[]int32 + state *State +} + +func GetOrigInt32Slice(ms Int32Slice) *[]int32 { + return ms.orig +} + +func GetInt32SliceState(ms Int32Slice) *State { + return ms.state +} + +func NewInt32Slice(orig *[]int32, state *State) Int32Slice { + return Int32Slice{orig: orig, state: state} +} + +func FillTestInt32Slice(tv Int32Slice) { +} + +func GenerateTestInt32Slice() Int32Slice { + state := StateMutable + var orig []int32 = nil + + return Int32Slice{&orig, &state} +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_intslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_intslice.go new file mode 100644 index 00000000000..5f3fe569ba5 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_intslice.go @@ -0,0 +1,34 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +type IntSlice struct { + orig *[]int + state *State +} + +func GetOrigIntSlice(ms IntSlice) *[]int { + return ms.orig +} + +func GetIntSliceState(ms IntSlice) *State { + return ms.state +} + +func NewIntSlice(orig *[]int, state *State) IntSlice { + return IntSlice{orig: orig, state: state} +} + +func FillTestIntSlice(tv IntSlice) { +} + +func GenerateTestIntSlice() IntSlice { + state := StateMutable + var orig []int = nil + + return IntSlice{&orig, &state} +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/json/enum.go b/vendor/go.opentelemetry.io/collector/pdata/internal/json/enum.go index 4fbe193430e..02dd2b7768c 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/json/enum.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/json/enum.go @@ -15,7 +15,7 @@ func ReadEnumValue(iter *jsoniter.Iterator, valueMap map[string]int32) int32 { return iter.ReadInt32() case jsoniter.StringValue: val, ok := valueMap[iter.ReadString()] - // Same behavior with official protbuf JSON decoder, + // Same behavior with official protobuf JSON decoder, // see https://github.com/open-telemetry/opentelemetry-proto-go/pull/81 if !ok { iter.ReportError("ReadEnumValue", "unknown string value") diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/profiles.go b/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/profiles.go new file mode 100644 index 00000000000..59c23cc672b --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/profiles.go @@ -0,0 +1,12 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlp // import "go.opentelemetry.io/collector/pdata/internal/otlp" + +import ( + otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" +) + +// MigrateProfiles implements any translation needed due to deprecation in OTLP profiles protocol. +// Any pprofile.Unmarshaler implementation from OTLP (proto/json) MUST call this, and the gRPC Server implementation. +func MigrateProfiles(_ []*otlpprofiles.ResourceProfiles) {} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_profiles.go b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_profiles.go index 564c8945862..2230b079c36 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_profiles.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_profiles.go @@ -4,8 +4,8 @@ package internal // import "go.opentelemetry.io/collector/pdata/internal" import ( - otlpcollectorprofile "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1experimental" - otlpprofile "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1experimental" + otlpcollectorprofile "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development" + otlpprofile "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" ) type Profiles struct { diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_int32slice.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_int32slice.go new file mode 100644 index 00000000000..35a40bd079c --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_int32slice.go @@ -0,0 +1,108 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pcommon + +import ( + "go.opentelemetry.io/collector/pdata/internal" +) + +// Int32Slice represents a []int32 slice. +// The instance of Int32Slice can be assigned to multiple objects since it's immutable. +// +// Must use NewInt32Slice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type Int32Slice internal.Int32Slice + +func (ms Int32Slice) getOrig() *[]int32 { + return internal.GetOrigInt32Slice(internal.Int32Slice(ms)) +} + +func (ms Int32Slice) getState() *internal.State { + return internal.GetInt32SliceState(internal.Int32Slice(ms)) +} + +// NewInt32Slice creates a new empty Int32Slice. +func NewInt32Slice() Int32Slice { + orig := []int32(nil) + state := internal.StateMutable + return Int32Slice(internal.NewInt32Slice(&orig, &state)) +} + +// AsRaw returns a copy of the []int32 slice. +func (ms Int32Slice) AsRaw() []int32 { + return copyInt32Slice(nil, *ms.getOrig()) +} + +// FromRaw copies raw []int32 into the slice Int32Slice. +func (ms Int32Slice) FromRaw(val []int32) { + ms.getState().AssertMutable() + *ms.getOrig() = copyInt32Slice(*ms.getOrig(), val) +} + +// Len returns length of the []int32 slice value. +// Equivalent of len(int32Slice). +func (ms Int32Slice) Len() int { + return len(*ms.getOrig()) +} + +// At returns an item from particular index. +// Equivalent of int32Slice[i]. +func (ms Int32Slice) At(i int) int32 { + return (*ms.getOrig())[i] +} + +// SetAt sets int32 item at particular index. +// Equivalent of int32Slice[i] = val +func (ms Int32Slice) SetAt(i int, val int32) { + ms.getState().AssertMutable() + (*ms.getOrig())[i] = val +} + +// EnsureCapacity ensures Int32Slice has at least the specified capacity. +// 1. If the newCap <= cap, then is no change in capacity. +// 2. If the newCap > cap, then the slice capacity will be expanded to the provided value which will be equivalent of: +// buf := make([]int32, len(int32Slice), newCap) +// copy(buf, int32Slice) +// int32Slice = buf +func (ms Int32Slice) EnsureCapacity(newCap int) { + ms.getState().AssertMutable() + oldCap := cap(*ms.getOrig()) + if newCap <= oldCap { + return + } + + newOrig := make([]int32, len(*ms.getOrig()), newCap) + copy(newOrig, *ms.getOrig()) + *ms.getOrig() = newOrig +} + +// Append appends extra elements to Int32Slice. +// Equivalent of int32Slice = append(int32Slice, elms...) +func (ms Int32Slice) Append(elms ...int32) { + ms.getState().AssertMutable() + *ms.getOrig() = append(*ms.getOrig(), elms...) +} + +// MoveTo moves all elements from the current slice overriding the destination and +// resetting the current instance to its zero value. +func (ms Int32Slice) MoveTo(dest Int32Slice) { + ms.getState().AssertMutable() + dest.getState().AssertMutable() + *dest.getOrig() = *ms.getOrig() + *ms.getOrig() = nil +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (ms Int32Slice) CopyTo(dest Int32Slice) { + dest.getState().AssertMutable() + *dest.getOrig() = copyInt32Slice(*dest.getOrig(), *ms.getOrig()) +} + +func copyInt32Slice(dst, src []int32) []int32 { + dst = dst[:0] + return append(dst, src...) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_intslice.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_intslice.go new file mode 100644 index 00000000000..1a72889d554 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_intslice.go @@ -0,0 +1,108 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pcommon + +import ( + "go.opentelemetry.io/collector/pdata/internal" +) + +// IntSlice represents a []int slice. +// The instance of IntSlice can be assigned to multiple objects since it's immutable. +// +// Must use NewIntSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type IntSlice internal.IntSlice + +func (ms IntSlice) getOrig() *[]int { + return internal.GetOrigIntSlice(internal.IntSlice(ms)) +} + +func (ms IntSlice) getState() *internal.State { + return internal.GetIntSliceState(internal.IntSlice(ms)) +} + +// NewIntSlice creates a new empty IntSlice. +func NewIntSlice() IntSlice { + orig := []int(nil) + state := internal.StateMutable + return IntSlice(internal.NewIntSlice(&orig, &state)) +} + +// AsRaw returns a copy of the []int slice. +func (ms IntSlice) AsRaw() []int { + return copyIntSlice(nil, *ms.getOrig()) +} + +// FromRaw copies raw []int into the slice IntSlice. +func (ms IntSlice) FromRaw(val []int) { + ms.getState().AssertMutable() + *ms.getOrig() = copyIntSlice(*ms.getOrig(), val) +} + +// Len returns length of the []int slice value. +// Equivalent of len(intSlice). +func (ms IntSlice) Len() int { + return len(*ms.getOrig()) +} + +// At returns an item from particular index. +// Equivalent of intSlice[i]. +func (ms IntSlice) At(i int) int { + return (*ms.getOrig())[i] +} + +// SetAt sets int item at particular index. +// Equivalent of intSlice[i] = val +func (ms IntSlice) SetAt(i int, val int) { + ms.getState().AssertMutable() + (*ms.getOrig())[i] = val +} + +// EnsureCapacity ensures IntSlice has at least the specified capacity. +// 1. If the newCap <= cap, then is no change in capacity. +// 2. If the newCap > cap, then the slice capacity will be expanded to the provided value which will be equivalent of: +// buf := make([]int, len(intSlice), newCap) +// copy(buf, intSlice) +// intSlice = buf +func (ms IntSlice) EnsureCapacity(newCap int) { + ms.getState().AssertMutable() + oldCap := cap(*ms.getOrig()) + if newCap <= oldCap { + return + } + + newOrig := make([]int, len(*ms.getOrig()), newCap) + copy(newOrig, *ms.getOrig()) + *ms.getOrig() = newOrig +} + +// Append appends extra elements to IntSlice. +// Equivalent of intSlice = append(intSlice, elms...) +func (ms IntSlice) Append(elms ...int) { + ms.getState().AssertMutable() + *ms.getOrig() = append(*ms.getOrig(), elms...) +} + +// MoveTo moves all elements from the current slice overriding the destination and +// resetting the current instance to its zero value. +func (ms IntSlice) MoveTo(dest IntSlice) { + ms.getState().AssertMutable() + dest.getState().AssertMutable() + *dest.getOrig() = *ms.getOrig() + *ms.getOrig() = nil +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (ms IntSlice) CopyTo(dest IntSlice) { + dest.getState().AssertMutable() + *dest.getOrig() = copyIntSlice(*dest.getOrig(), *ms.getOrig()) +} + +func copyIntSlice(dst, src []int) []int { + dst = dst[:0] + return append(dst, src...) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/map.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/map.go index 5bbfab962b0..91b803922a3 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/map.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/map.go @@ -225,6 +225,15 @@ func (m Map) Range(f func(k string, v Value) bool) { } } +// MoveTo moves all key/values from the current map overriding the destination and +// resetting the current instance to its zero value +func (m Map) MoveTo(dest Map) { + m.getState().AssertMutable() + dest.getState().AssertMutable() + *dest.getOrig() = *m.getOrig() + *m.getOrig() = nil +} + // CopyTo copies all elements from the current map overriding the destination. func (m Map) CopyTo(dest Map) { dest.getState().AssertMutable() diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/timestamp.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/timestamp.go index 5fd1758b1be..666f86f43f6 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/timestamp.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/timestamp.go @@ -13,11 +13,13 @@ type Timestamp uint64 // NewTimestampFromTime constructs a new Timestamp from the provided time.Time. func NewTimestampFromTime(t time.Time) Timestamp { + // nolint:gosec return Timestamp(uint64(t.UnixNano())) } // AsTime converts this to a time.Time. func (ts Timestamp) AsTime() time.Time { + // nolint:gosec return time.Unix(0, int64(ts)).UTC() } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/value.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/value.go index 77a84e51758..ad2e1c7ae47 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/value.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/value.go @@ -148,6 +148,7 @@ func (v Value) FromRaw(iv any) error { case int64: v.SetInt(tv) case uint: + // nolint:gosec v.SetInt(int64(tv)) case uint8: v.SetInt(int64(tv)) @@ -156,6 +157,7 @@ func (v Value) FromRaw(iv any) error { case uint32: v.SetInt(int64(tv)) case uint64: + // nolint:gosec v.SetInt(int64(tv)) case float32: v.SetDouble(float64(tv)) @@ -401,7 +403,7 @@ func (v Value) AsString() string { // This allows us to avoid using reflection. func float64AsString(f float64) string { if math.IsInf(f, 0) || math.IsNaN(f) { - return fmt.Sprintf("json: unsupported value: %s", strconv.FormatFloat(f, 'g', -1, 64)) + return "json: unsupported value: " + strconv.FormatFloat(f, 'g', -1, 64) } // Convert as if by ES6 number to string conversion. diff --git a/vendor/go.opentelemetry.io/collector/pdata/plog/generated_logrecord.go b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_logrecord.go index 5e9984a244c..ee79ddec3cd 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/plog/generated_logrecord.go +++ b/vendor/go.opentelemetry.io/collector/pdata/plog/generated_logrecord.go @@ -102,6 +102,17 @@ func (ms LogRecord) SetFlags(v LogRecordFlags) { ms.orig.Flags = uint32(v) } +// EventName returns the eventname associated with this LogRecord. +func (ms LogRecord) EventName() string { + return ms.orig.EventName +} + +// SetEventName replaces the eventname associated with this LogRecord. +func (ms LogRecord) SetEventName(v string) { + ms.state.AssertMutable() + ms.orig.EventName = v +} + // SeverityText returns the severitytext associated with this LogRecord. func (ms LogRecord) SeverityText() string { return ms.orig.SeverityText @@ -153,6 +164,7 @@ func (ms LogRecord) CopyTo(dest LogRecord) { dest.SetTraceID(ms.TraceID()) dest.SetSpanID(ms.SpanID()) dest.SetFlags(ms.Flags()) + dest.SetEventName(ms.EventName()) dest.SetSeverityText(ms.SeverityText()) dest.SetSeverityNumber(ms.SeverityNumber()) ms.Body().CopyTo(dest.Body()) diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/LICENSE b/vendor/go.opentelemetry.io/collector/pdata/pprofile/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/Makefile b/vendor/go.opentelemetry.io/collector/pdata/pprofile/Makefile new file mode 100644 index 00000000000..ded7a36092d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/encoding.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/encoding.go new file mode 100644 index 00000000000..ed37cd6f65b --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/encoding.go @@ -0,0 +1,31 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile" + +// MarshalSizer is the interface that groups the basic Marshal and Size methods +type MarshalSizer interface { + Marshaler + Sizer +} + +// Marshaler marshals pprofile.Profiles into bytes. +type Marshaler interface { + // MarshalProfiles the given pprofile.Profiles into bytes. + // If the error is not nil, the returned bytes slice cannot be used. + MarshalProfiles(td Profiles) ([]byte, error) +} + +// Unmarshaler unmarshalls bytes into pprofile.Profiles. +type Unmarshaler interface { + // UnmarshalProfiles the given bytes into pprofile.Profiles. + // If the error is not nil, the returned pprofile.Profiles cannot be used. + UnmarshalProfiles(buf []byte) (Profiles, error) +} + +// Sizer is an optional interface implemented by the Marshaler, +// that calculates the size of a marshaled Profiles. +type Sizer interface { + // ProfilesSize returns the size in bytes of a marshaled Profiles. + ProfilesSize(td Profiles) int +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_attribute.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_attribute.go new file mode 100644 index 00000000000..059b89be727 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_attribute.go @@ -0,0 +1,70 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "go.opentelemetry.io/collector/pdata/internal" + v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// Attribute describes an attribute stored in a profile's attribute table. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewAttribute function to create new instances. +// Important: zero-initialized instance is not valid for use. +type Attribute struct { + orig *v1.KeyValue + state *internal.State +} + +func newAttribute(orig *v1.KeyValue, state *internal.State) Attribute { + return Attribute{orig: orig, state: state} +} + +// NewAttribute creates a new empty Attribute. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewAttribute() Attribute { + state := internal.StateMutable + return newAttribute(&v1.KeyValue{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms Attribute) MoveTo(dest Attribute) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = v1.KeyValue{} +} + +// Key returns the key associated with this Attribute. +func (ms Attribute) Key() string { + return ms.orig.Key +} + +// SetKey replaces the key associated with this Attribute. +func (ms Attribute) SetKey(v string) { + ms.state.AssertMutable() + ms.orig.Key = v +} + +// Value returns the value associated with this Attribute. +func (ms Attribute) Value() pcommon.Value { + return pcommon.Value(internal.NewValue(&ms.orig.Value, ms.state)) +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms Attribute) CopyTo(dest Attribute) { + dest.state.AssertMutable() + dest.SetKey(ms.Key()) + ms.Value().CopyTo(dest.Value()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_attributetableslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_attributetableslice.go new file mode 100644 index 00000000000..731847d000b --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_attributetableslice.go @@ -0,0 +1,136 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "go.opentelemetry.io/collector/pdata/internal" + v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" +) + +// AttributeTableSlice logically represents a slice of Attribute. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewAttributeTableSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type AttributeTableSlice struct { + orig *[]v1.KeyValue + state *internal.State +} + +func newAttributeTableSlice(orig *[]v1.KeyValue, state *internal.State) AttributeTableSlice { + return AttributeTableSlice{orig: orig, state: state} +} + +// NewAttributeTableSlice creates a AttributeTableSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewAttributeTableSlice() AttributeTableSlice { + orig := []v1.KeyValue(nil) + state := internal.StateMutable + return newAttributeTableSlice(&orig, &state) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewAttributeTableSlice()". +func (es AttributeTableSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es AttributeTableSlice) At(i int) Attribute { + return newAttribute(&(*es.orig)[i], es.state) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new AttributeTableSlice can be initialized: +// +// es := NewAttributeTableSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es AttributeTableSlice) EnsureCapacity(newCap int) { + es.state.AssertMutable() + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]v1.KeyValue, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty Attribute. +// It returns the newly added Attribute. +func (es AttributeTableSlice) AppendEmpty() Attribute { + es.state.AssertMutable() + *es.orig = append(*es.orig, v1.KeyValue{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es AttributeTableSlice) MoveAndAppendTo(dest AttributeTableSlice) { + es.state.AssertMutable() + dest.state.AssertMutable() + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es AttributeTableSlice) RemoveIf(f func(Attribute) bool) { + es.state.AssertMutable() + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + *es.orig = (*es.orig)[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es AttributeTableSlice) CopyTo(dest AttributeTableSlice) { + dest.state.AssertMutable() + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + } else { + (*dest.orig) = make([]v1.KeyValue, srcLen) + } + for i := range *es.orig { + newAttribute(&(*es.orig)[i], es.state).CopyTo(newAttribute(&(*dest.orig)[i], dest.state)) + } +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_attributeunit.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_attributeunit.go new file mode 100644 index 00000000000..a7d98f425d4 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_attributeunit.go @@ -0,0 +1,75 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" +) + +// AttributeUnit Represents a mapping between Attribute Keys and Units. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewAttributeUnit function to create new instances. +// Important: zero-initialized instance is not valid for use. +type AttributeUnit struct { + orig *otlpprofiles.AttributeUnit + state *internal.State +} + +func newAttributeUnit(orig *otlpprofiles.AttributeUnit, state *internal.State) AttributeUnit { + return AttributeUnit{orig: orig, state: state} +} + +// NewAttributeUnit creates a new empty AttributeUnit. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewAttributeUnit() AttributeUnit { + state := internal.StateMutable + return newAttributeUnit(&otlpprofiles.AttributeUnit{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms AttributeUnit) MoveTo(dest AttributeUnit) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlpprofiles.AttributeUnit{} +} + +// AttributeKeyStrindex returns the attributekeystrindex associated with this AttributeUnit. +func (ms AttributeUnit) AttributeKeyStrindex() int32 { + return ms.orig.AttributeKeyStrindex +} + +// SetAttributeKeyStrindex replaces the attributekeystrindex associated with this AttributeUnit. +func (ms AttributeUnit) SetAttributeKeyStrindex(v int32) { + ms.state.AssertMutable() + ms.orig.AttributeKeyStrindex = v +} + +// UnitStrindex returns the unitstrindex associated with this AttributeUnit. +func (ms AttributeUnit) UnitStrindex() int32 { + return ms.orig.UnitStrindex +} + +// SetUnitStrindex replaces the unitstrindex associated with this AttributeUnit. +func (ms AttributeUnit) SetUnitStrindex(v int32) { + ms.state.AssertMutable() + ms.orig.UnitStrindex = v +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms AttributeUnit) CopyTo(dest AttributeUnit) { + dest.state.AssertMutable() + dest.SetAttributeKeyStrindex(ms.AttributeKeyStrindex()) + dest.SetUnitStrindex(ms.UnitStrindex()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_attributeunitslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_attributeunitslice.go new file mode 100644 index 00000000000..f7d1c41a1e1 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_attributeunitslice.go @@ -0,0 +1,152 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "sort" + + "go.opentelemetry.io/collector/pdata/internal" + otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" +) + +// AttributeUnitSlice logically represents a slice of AttributeUnit. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewAttributeUnitSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type AttributeUnitSlice struct { + orig *[]*otlpprofiles.AttributeUnit + state *internal.State +} + +func newAttributeUnitSlice(orig *[]*otlpprofiles.AttributeUnit, state *internal.State) AttributeUnitSlice { + return AttributeUnitSlice{orig: orig, state: state} +} + +// NewAttributeUnitSlice creates a AttributeUnitSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewAttributeUnitSlice() AttributeUnitSlice { + orig := []*otlpprofiles.AttributeUnit(nil) + state := internal.StateMutable + return newAttributeUnitSlice(&orig, &state) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewAttributeUnitSlice()". +func (es AttributeUnitSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es AttributeUnitSlice) At(i int) AttributeUnit { + return newAttributeUnit((*es.orig)[i], es.state) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new AttributeUnitSlice can be initialized: +// +// es := NewAttributeUnitSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es AttributeUnitSlice) EnsureCapacity(newCap int) { + es.state.AssertMutable() + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]*otlpprofiles.AttributeUnit, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty AttributeUnit. +// It returns the newly added AttributeUnit. +func (es AttributeUnitSlice) AppendEmpty() AttributeUnit { + es.state.AssertMutable() + *es.orig = append(*es.orig, &otlpprofiles.AttributeUnit{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es AttributeUnitSlice) MoveAndAppendTo(dest AttributeUnitSlice) { + es.state.AssertMutable() + dest.state.AssertMutable() + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es AttributeUnitSlice) RemoveIf(f func(AttributeUnit) bool) { + es.state.AssertMutable() + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + *es.orig = (*es.orig)[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es AttributeUnitSlice) CopyTo(dest AttributeUnitSlice) { + dest.state.AssertMutable() + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newAttributeUnit((*es.orig)[i], es.state).CopyTo(newAttributeUnit((*dest.orig)[i], dest.state)) + } + return + } + origs := make([]otlpprofiles.AttributeUnit, srcLen) + wrappers := make([]*otlpprofiles.AttributeUnit, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newAttributeUnit((*es.orig)[i], es.state).CopyTo(newAttributeUnit(wrappers[i], dest.state)) + } + *dest.orig = wrappers +} + +// Sort sorts the AttributeUnit elements within AttributeUnitSlice given the +// provided less function so that two instances of AttributeUnitSlice +// can be compared. +func (es AttributeUnitSlice) Sort(less func(a, b AttributeUnit) bool) { + es.state.AssertMutable() + sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_function.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_function.go new file mode 100644 index 00000000000..21fe358de8f --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_function.go @@ -0,0 +1,99 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" +) + +// Function describes a function, including its human-readable name, system name, source file, and starting line number in the source. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewFunction function to create new instances. +// Important: zero-initialized instance is not valid for use. +type Function struct { + orig *otlpprofiles.Function + state *internal.State +} + +func newFunction(orig *otlpprofiles.Function, state *internal.State) Function { + return Function{orig: orig, state: state} +} + +// NewFunction creates a new empty Function. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewFunction() Function { + state := internal.StateMutable + return newFunction(&otlpprofiles.Function{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms Function) MoveTo(dest Function) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlpprofiles.Function{} +} + +// NameStrindex returns the namestrindex associated with this Function. +func (ms Function) NameStrindex() int32 { + return ms.orig.NameStrindex +} + +// SetNameStrindex replaces the namestrindex associated with this Function. +func (ms Function) SetNameStrindex(v int32) { + ms.state.AssertMutable() + ms.orig.NameStrindex = v +} + +// SystemNameStrindex returns the systemnamestrindex associated with this Function. +func (ms Function) SystemNameStrindex() int32 { + return ms.orig.SystemNameStrindex +} + +// SetSystemNameStrindex replaces the systemnamestrindex associated with this Function. +func (ms Function) SetSystemNameStrindex(v int32) { + ms.state.AssertMutable() + ms.orig.SystemNameStrindex = v +} + +// FilenameStrindex returns the filenamestrindex associated with this Function. +func (ms Function) FilenameStrindex() int32 { + return ms.orig.FilenameStrindex +} + +// SetFilenameStrindex replaces the filenamestrindex associated with this Function. +func (ms Function) SetFilenameStrindex(v int32) { + ms.state.AssertMutable() + ms.orig.FilenameStrindex = v +} + +// StartLine returns the startline associated with this Function. +func (ms Function) StartLine() int64 { + return ms.orig.StartLine +} + +// SetStartLine replaces the startline associated with this Function. +func (ms Function) SetStartLine(v int64) { + ms.state.AssertMutable() + ms.orig.StartLine = v +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms Function) CopyTo(dest Function) { + dest.state.AssertMutable() + dest.SetNameStrindex(ms.NameStrindex()) + dest.SetSystemNameStrindex(ms.SystemNameStrindex()) + dest.SetFilenameStrindex(ms.FilenameStrindex()) + dest.SetStartLine(ms.StartLine()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_functionslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_functionslice.go new file mode 100644 index 00000000000..51dd4f2aca1 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_functionslice.go @@ -0,0 +1,152 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "sort" + + "go.opentelemetry.io/collector/pdata/internal" + otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" +) + +// FunctionSlice logically represents a slice of Function. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewFunctionSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type FunctionSlice struct { + orig *[]*otlpprofiles.Function + state *internal.State +} + +func newFunctionSlice(orig *[]*otlpprofiles.Function, state *internal.State) FunctionSlice { + return FunctionSlice{orig: orig, state: state} +} + +// NewFunctionSlice creates a FunctionSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewFunctionSlice() FunctionSlice { + orig := []*otlpprofiles.Function(nil) + state := internal.StateMutable + return newFunctionSlice(&orig, &state) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewFunctionSlice()". +func (es FunctionSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es FunctionSlice) At(i int) Function { + return newFunction((*es.orig)[i], es.state) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new FunctionSlice can be initialized: +// +// es := NewFunctionSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es FunctionSlice) EnsureCapacity(newCap int) { + es.state.AssertMutable() + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]*otlpprofiles.Function, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty Function. +// It returns the newly added Function. +func (es FunctionSlice) AppendEmpty() Function { + es.state.AssertMutable() + *es.orig = append(*es.orig, &otlpprofiles.Function{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es FunctionSlice) MoveAndAppendTo(dest FunctionSlice) { + es.state.AssertMutable() + dest.state.AssertMutable() + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es FunctionSlice) RemoveIf(f func(Function) bool) { + es.state.AssertMutable() + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + *es.orig = (*es.orig)[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es FunctionSlice) CopyTo(dest FunctionSlice) { + dest.state.AssertMutable() + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newFunction((*es.orig)[i], es.state).CopyTo(newFunction((*dest.orig)[i], dest.state)) + } + return + } + origs := make([]otlpprofiles.Function, srcLen) + wrappers := make([]*otlpprofiles.Function, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newFunction((*es.orig)[i], es.state).CopyTo(newFunction(wrappers[i], dest.state)) + } + *dest.orig = wrappers +} + +// Sort sorts the Function elements within FunctionSlice given the +// provided less function so that two instances of FunctionSlice +// can be compared. +func (es FunctionSlice) Sort(less func(a, b Function) bool) { + es.state.AssertMutable() + sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_line.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_line.go new file mode 100644 index 00000000000..daae7435237 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_line.go @@ -0,0 +1,87 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" +) + +// Line details a specific line in a source code, linked to a function. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewLine function to create new instances. +// Important: zero-initialized instance is not valid for use. +type Line struct { + orig *otlpprofiles.Line + state *internal.State +} + +func newLine(orig *otlpprofiles.Line, state *internal.State) Line { + return Line{orig: orig, state: state} +} + +// NewLine creates a new empty Line. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewLine() Line { + state := internal.StateMutable + return newLine(&otlpprofiles.Line{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms Line) MoveTo(dest Line) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlpprofiles.Line{} +} + +// FunctionIndex returns the functionindex associated with this Line. +func (ms Line) FunctionIndex() int32 { + return ms.orig.FunctionIndex +} + +// SetFunctionIndex replaces the functionindex associated with this Line. +func (ms Line) SetFunctionIndex(v int32) { + ms.state.AssertMutable() + ms.orig.FunctionIndex = v +} + +// Line returns the line associated with this Line. +func (ms Line) Line() int64 { + return ms.orig.Line +} + +// SetLine replaces the line associated with this Line. +func (ms Line) SetLine(v int64) { + ms.state.AssertMutable() + ms.orig.Line = v +} + +// Column returns the column associated with this Line. +func (ms Line) Column() int64 { + return ms.orig.Column +} + +// SetColumn replaces the column associated with this Line. +func (ms Line) SetColumn(v int64) { + ms.state.AssertMutable() + ms.orig.Column = v +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms Line) CopyTo(dest Line) { + dest.state.AssertMutable() + dest.SetFunctionIndex(ms.FunctionIndex()) + dest.SetLine(ms.Line()) + dest.SetColumn(ms.Column()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_lineslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_lineslice.go new file mode 100644 index 00000000000..d5b94978bb5 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_lineslice.go @@ -0,0 +1,152 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "sort" + + "go.opentelemetry.io/collector/pdata/internal" + otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" +) + +// LineSlice logically represents a slice of Line. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewLineSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type LineSlice struct { + orig *[]*otlpprofiles.Line + state *internal.State +} + +func newLineSlice(orig *[]*otlpprofiles.Line, state *internal.State) LineSlice { + return LineSlice{orig: orig, state: state} +} + +// NewLineSlice creates a LineSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewLineSlice() LineSlice { + orig := []*otlpprofiles.Line(nil) + state := internal.StateMutable + return newLineSlice(&orig, &state) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewLineSlice()". +func (es LineSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es LineSlice) At(i int) Line { + return newLine((*es.orig)[i], es.state) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new LineSlice can be initialized: +// +// es := NewLineSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es LineSlice) EnsureCapacity(newCap int) { + es.state.AssertMutable() + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]*otlpprofiles.Line, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty Line. +// It returns the newly added Line. +func (es LineSlice) AppendEmpty() Line { + es.state.AssertMutable() + *es.orig = append(*es.orig, &otlpprofiles.Line{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es LineSlice) MoveAndAppendTo(dest LineSlice) { + es.state.AssertMutable() + dest.state.AssertMutable() + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es LineSlice) RemoveIf(f func(Line) bool) { + es.state.AssertMutable() + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + *es.orig = (*es.orig)[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es LineSlice) CopyTo(dest LineSlice) { + dest.state.AssertMutable() + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newLine((*es.orig)[i], es.state).CopyTo(newLine((*dest.orig)[i], dest.state)) + } + return + } + origs := make([]otlpprofiles.Line, srcLen) + wrappers := make([]*otlpprofiles.Line, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newLine((*es.orig)[i], es.state).CopyTo(newLine(wrappers[i], dest.state)) + } + *dest.orig = wrappers +} + +// Sort sorts the Line elements within LineSlice given the +// provided less function so that two instances of LineSlice +// can be compared. +func (es LineSlice) Sort(less func(a, b Line) bool) { + es.state.AssertMutable() + sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_link.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_link.go new file mode 100644 index 00000000000..22966f15545 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_link.go @@ -0,0 +1,77 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "go.opentelemetry.io/collector/pdata/internal" + "go.opentelemetry.io/collector/pdata/internal/data" + otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// Link represents a pointer from a profile Sample to a trace Span. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewLink function to create new instances. +// Important: zero-initialized instance is not valid for use. +type Link struct { + orig *otlpprofiles.Link + state *internal.State +} + +func newLink(orig *otlpprofiles.Link, state *internal.State) Link { + return Link{orig: orig, state: state} +} + +// NewLink creates a new empty Link. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewLink() Link { + state := internal.StateMutable + return newLink(&otlpprofiles.Link{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms Link) MoveTo(dest Link) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlpprofiles.Link{} +} + +// TraceID returns the traceid associated with this Link. +func (ms Link) TraceID() pcommon.TraceID { + return pcommon.TraceID(ms.orig.TraceId) +} + +// SetTraceID replaces the traceid associated with this Link. +func (ms Link) SetTraceID(v pcommon.TraceID) { + ms.state.AssertMutable() + ms.orig.TraceId = data.TraceID(v) +} + +// SpanID returns the spanid associated with this Link. +func (ms Link) SpanID() pcommon.SpanID { + return pcommon.SpanID(ms.orig.SpanId) +} + +// SetSpanID replaces the spanid associated with this Link. +func (ms Link) SetSpanID(v pcommon.SpanID) { + ms.state.AssertMutable() + ms.orig.SpanId = data.SpanID(v) +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms Link) CopyTo(dest Link) { + dest.state.AssertMutable() + dest.SetTraceID(ms.TraceID()) + dest.SetSpanID(ms.SpanID()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_linkslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_linkslice.go new file mode 100644 index 00000000000..b2a7a1ab211 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_linkslice.go @@ -0,0 +1,152 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "sort" + + "go.opentelemetry.io/collector/pdata/internal" + otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" +) + +// LinkSlice logically represents a slice of Link. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewLinkSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type LinkSlice struct { + orig *[]*otlpprofiles.Link + state *internal.State +} + +func newLinkSlice(orig *[]*otlpprofiles.Link, state *internal.State) LinkSlice { + return LinkSlice{orig: orig, state: state} +} + +// NewLinkSlice creates a LinkSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewLinkSlice() LinkSlice { + orig := []*otlpprofiles.Link(nil) + state := internal.StateMutable + return newLinkSlice(&orig, &state) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewLinkSlice()". +func (es LinkSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es LinkSlice) At(i int) Link { + return newLink((*es.orig)[i], es.state) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new LinkSlice can be initialized: +// +// es := NewLinkSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es LinkSlice) EnsureCapacity(newCap int) { + es.state.AssertMutable() + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]*otlpprofiles.Link, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty Link. +// It returns the newly added Link. +func (es LinkSlice) AppendEmpty() Link { + es.state.AssertMutable() + *es.orig = append(*es.orig, &otlpprofiles.Link{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es LinkSlice) MoveAndAppendTo(dest LinkSlice) { + es.state.AssertMutable() + dest.state.AssertMutable() + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es LinkSlice) RemoveIf(f func(Link) bool) { + es.state.AssertMutable() + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + *es.orig = (*es.orig)[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es LinkSlice) CopyTo(dest LinkSlice) { + dest.state.AssertMutable() + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newLink((*es.orig)[i], es.state).CopyTo(newLink((*dest.orig)[i], dest.state)) + } + return + } + origs := make([]otlpprofiles.Link, srcLen) + wrappers := make([]*otlpprofiles.Link, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newLink((*es.orig)[i], es.state).CopyTo(newLink(wrappers[i], dest.state)) + } + *dest.orig = wrappers +} + +// Sort sorts the Link elements within LinkSlice given the +// provided less function so that two instances of LinkSlice +// can be compared. +func (es LinkSlice) Sort(less func(a, b Link) bool) { + es.state.AssertMutable() + sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_location.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_location.go new file mode 100644 index 00000000000..8b968f973b9 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_location.go @@ -0,0 +1,115 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// Location describes function and line table debug information. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewLocation function to create new instances. +// Important: zero-initialized instance is not valid for use. +type Location struct { + orig *otlpprofiles.Location + state *internal.State +} + +func newLocation(orig *otlpprofiles.Location, state *internal.State) Location { + return Location{orig: orig, state: state} +} + +// NewLocation creates a new empty Location. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewLocation() Location { + state := internal.StateMutable + return newLocation(&otlpprofiles.Location{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms Location) MoveTo(dest Location) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlpprofiles.Location{} +} + +// MappingIndex returns the mappingindex associated with this Location. +func (ms Location) MappingIndex() int32 { + return ms.orig.GetMappingIndex() +} + +// HasMappingIndex returns true if the Location contains a +// MappingIndex value, false otherwise. +func (ms Location) HasMappingIndex() bool { + return ms.orig.MappingIndex_ != nil +} + +// SetMappingIndex replaces the mappingindex associated with this Location. +func (ms Location) SetMappingIndex(v int32) { + ms.state.AssertMutable() + ms.orig.MappingIndex_ = &otlpprofiles.Location_MappingIndex{MappingIndex: v} +} + +// RemoveMappingIndex removes the mappingindex associated with this Location. +func (ms Location) RemoveMappingIndex() { + ms.state.AssertMutable() + ms.orig.MappingIndex_ = nil +} + +// Address returns the address associated with this Location. +func (ms Location) Address() uint64 { + return ms.orig.Address +} + +// SetAddress replaces the address associated with this Location. +func (ms Location) SetAddress(v uint64) { + ms.state.AssertMutable() + ms.orig.Address = v +} + +// Line returns the Line associated with this Location. +func (ms Location) Line() LineSlice { + return newLineSlice(&ms.orig.Line, ms.state) +} + +// IsFolded returns the isfolded associated with this Location. +func (ms Location) IsFolded() bool { + return ms.orig.IsFolded +} + +// SetIsFolded replaces the isfolded associated with this Location. +func (ms Location) SetIsFolded(v bool) { + ms.state.AssertMutable() + ms.orig.IsFolded = v +} + +// AttributeIndices returns the AttributeIndices associated with this Location. +func (ms Location) AttributeIndices() pcommon.Int32Slice { + return pcommon.Int32Slice(internal.NewInt32Slice(&ms.orig.AttributeIndices, ms.state)) +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms Location) CopyTo(dest Location) { + dest.state.AssertMutable() + if ms.HasMappingIndex() { + dest.SetMappingIndex(ms.MappingIndex()) + } + + dest.SetAddress(ms.Address()) + ms.Line().CopyTo(dest.Line()) + dest.SetIsFolded(ms.IsFolded()) + ms.AttributeIndices().CopyTo(dest.AttributeIndices()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_locationslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_locationslice.go new file mode 100644 index 00000000000..42039af4ebe --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_locationslice.go @@ -0,0 +1,152 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "sort" + + "go.opentelemetry.io/collector/pdata/internal" + otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" +) + +// LocationSlice logically represents a slice of Location. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewLocationSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type LocationSlice struct { + orig *[]*otlpprofiles.Location + state *internal.State +} + +func newLocationSlice(orig *[]*otlpprofiles.Location, state *internal.State) LocationSlice { + return LocationSlice{orig: orig, state: state} +} + +// NewLocationSlice creates a LocationSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewLocationSlice() LocationSlice { + orig := []*otlpprofiles.Location(nil) + state := internal.StateMutable + return newLocationSlice(&orig, &state) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewLocationSlice()". +func (es LocationSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es LocationSlice) At(i int) Location { + return newLocation((*es.orig)[i], es.state) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new LocationSlice can be initialized: +// +// es := NewLocationSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es LocationSlice) EnsureCapacity(newCap int) { + es.state.AssertMutable() + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]*otlpprofiles.Location, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty Location. +// It returns the newly added Location. +func (es LocationSlice) AppendEmpty() Location { + es.state.AssertMutable() + *es.orig = append(*es.orig, &otlpprofiles.Location{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es LocationSlice) MoveAndAppendTo(dest LocationSlice) { + es.state.AssertMutable() + dest.state.AssertMutable() + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es LocationSlice) RemoveIf(f func(Location) bool) { + es.state.AssertMutable() + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + *es.orig = (*es.orig)[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es LocationSlice) CopyTo(dest LocationSlice) { + dest.state.AssertMutable() + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newLocation((*es.orig)[i], es.state).CopyTo(newLocation((*dest.orig)[i], dest.state)) + } + return + } + origs := make([]otlpprofiles.Location, srcLen) + wrappers := make([]*otlpprofiles.Location, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newLocation((*es.orig)[i], es.state).CopyTo(newLocation(wrappers[i], dest.state)) + } + *dest.orig = wrappers +} + +// Sort sorts the Location elements within LocationSlice given the +// provided less function so that two instances of LocationSlice +// can be compared. +func (es LocationSlice) Sort(less func(a, b Location) bool) { + es.state.AssertMutable() + sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_mapping.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_mapping.go new file mode 100644 index 00000000000..65ef57fa4db --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_mapping.go @@ -0,0 +1,154 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// Mapping describes the mapping of a binary in memory, including its address range, file offset, and metadata like build ID +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewMapping function to create new instances. +// Important: zero-initialized instance is not valid for use. +type Mapping struct { + orig *otlpprofiles.Mapping + state *internal.State +} + +func newMapping(orig *otlpprofiles.Mapping, state *internal.State) Mapping { + return Mapping{orig: orig, state: state} +} + +// NewMapping creates a new empty Mapping. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewMapping() Mapping { + state := internal.StateMutable + return newMapping(&otlpprofiles.Mapping{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms Mapping) MoveTo(dest Mapping) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlpprofiles.Mapping{} +} + +// MemoryStart returns the memorystart associated with this Mapping. +func (ms Mapping) MemoryStart() uint64 { + return ms.orig.MemoryStart +} + +// SetMemoryStart replaces the memorystart associated with this Mapping. +func (ms Mapping) SetMemoryStart(v uint64) { + ms.state.AssertMutable() + ms.orig.MemoryStart = v +} + +// MemoryLimit returns the memorylimit associated with this Mapping. +func (ms Mapping) MemoryLimit() uint64 { + return ms.orig.MemoryLimit +} + +// SetMemoryLimit replaces the memorylimit associated with this Mapping. +func (ms Mapping) SetMemoryLimit(v uint64) { + ms.state.AssertMutable() + ms.orig.MemoryLimit = v +} + +// FileOffset returns the fileoffset associated with this Mapping. +func (ms Mapping) FileOffset() uint64 { + return ms.orig.FileOffset +} + +// SetFileOffset replaces the fileoffset associated with this Mapping. +func (ms Mapping) SetFileOffset(v uint64) { + ms.state.AssertMutable() + ms.orig.FileOffset = v +} + +// FilenameStrindex returns the filenamestrindex associated with this Mapping. +func (ms Mapping) FilenameStrindex() int32 { + return ms.orig.FilenameStrindex +} + +// SetFilenameStrindex replaces the filenamestrindex associated with this Mapping. +func (ms Mapping) SetFilenameStrindex(v int32) { + ms.state.AssertMutable() + ms.orig.FilenameStrindex = v +} + +// AttributeIndices returns the AttributeIndices associated with this Mapping. +func (ms Mapping) AttributeIndices() pcommon.Int32Slice { + return pcommon.Int32Slice(internal.NewInt32Slice(&ms.orig.AttributeIndices, ms.state)) +} + +// HasFunctions returns the hasfunctions associated with this Mapping. +func (ms Mapping) HasFunctions() bool { + return ms.orig.HasFunctions +} + +// SetHasFunctions replaces the hasfunctions associated with this Mapping. +func (ms Mapping) SetHasFunctions(v bool) { + ms.state.AssertMutable() + ms.orig.HasFunctions = v +} + +// HasFilenames returns the hasfilenames associated with this Mapping. +func (ms Mapping) HasFilenames() bool { + return ms.orig.HasFilenames +} + +// SetHasFilenames replaces the hasfilenames associated with this Mapping. +func (ms Mapping) SetHasFilenames(v bool) { + ms.state.AssertMutable() + ms.orig.HasFilenames = v +} + +// HasLineNumbers returns the haslinenumbers associated with this Mapping. +func (ms Mapping) HasLineNumbers() bool { + return ms.orig.HasLineNumbers +} + +// SetHasLineNumbers replaces the haslinenumbers associated with this Mapping. +func (ms Mapping) SetHasLineNumbers(v bool) { + ms.state.AssertMutable() + ms.orig.HasLineNumbers = v +} + +// HasInlineFrames returns the hasinlineframes associated with this Mapping. +func (ms Mapping) HasInlineFrames() bool { + return ms.orig.HasInlineFrames +} + +// SetHasInlineFrames replaces the hasinlineframes associated with this Mapping. +func (ms Mapping) SetHasInlineFrames(v bool) { + ms.state.AssertMutable() + ms.orig.HasInlineFrames = v +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms Mapping) CopyTo(dest Mapping) { + dest.state.AssertMutable() + dest.SetMemoryStart(ms.MemoryStart()) + dest.SetMemoryLimit(ms.MemoryLimit()) + dest.SetFileOffset(ms.FileOffset()) + dest.SetFilenameStrindex(ms.FilenameStrindex()) + ms.AttributeIndices().CopyTo(dest.AttributeIndices()) + dest.SetHasFunctions(ms.HasFunctions()) + dest.SetHasFilenames(ms.HasFilenames()) + dest.SetHasLineNumbers(ms.HasLineNumbers()) + dest.SetHasInlineFrames(ms.HasInlineFrames()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_mappingslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_mappingslice.go new file mode 100644 index 00000000000..af20e2dc4fc --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_mappingslice.go @@ -0,0 +1,152 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "sort" + + "go.opentelemetry.io/collector/pdata/internal" + otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" +) + +// MappingSlice logically represents a slice of Mapping. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewMappingSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type MappingSlice struct { + orig *[]*otlpprofiles.Mapping + state *internal.State +} + +func newMappingSlice(orig *[]*otlpprofiles.Mapping, state *internal.State) MappingSlice { + return MappingSlice{orig: orig, state: state} +} + +// NewMappingSlice creates a MappingSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewMappingSlice() MappingSlice { + orig := []*otlpprofiles.Mapping(nil) + state := internal.StateMutable + return newMappingSlice(&orig, &state) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewMappingSlice()". +func (es MappingSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es MappingSlice) At(i int) Mapping { + return newMapping((*es.orig)[i], es.state) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new MappingSlice can be initialized: +// +// es := NewMappingSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es MappingSlice) EnsureCapacity(newCap int) { + es.state.AssertMutable() + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]*otlpprofiles.Mapping, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty Mapping. +// It returns the newly added Mapping. +func (es MappingSlice) AppendEmpty() Mapping { + es.state.AssertMutable() + *es.orig = append(*es.orig, &otlpprofiles.Mapping{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es MappingSlice) MoveAndAppendTo(dest MappingSlice) { + es.state.AssertMutable() + dest.state.AssertMutable() + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es MappingSlice) RemoveIf(f func(Mapping) bool) { + es.state.AssertMutable() + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + *es.orig = (*es.orig)[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es MappingSlice) CopyTo(dest MappingSlice) { + dest.state.AssertMutable() + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newMapping((*es.orig)[i], es.state).CopyTo(newMapping((*dest.orig)[i], dest.state)) + } + return + } + origs := make([]otlpprofiles.Mapping, srcLen) + wrappers := make([]*otlpprofiles.Mapping, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newMapping((*es.orig)[i], es.state).CopyTo(newMapping(wrappers[i], dest.state)) + } + *dest.orig = wrappers +} + +// Sort sorts the Mapping elements within MappingSlice given the +// provided less function so that two instances of MappingSlice +// can be compared. +func (es MappingSlice) Sort(less func(a, b Mapping) bool) { + es.state.AssertMutable() + sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_profile.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_profile.go new file mode 100644 index 00000000000..48d11af1e31 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_profile.go @@ -0,0 +1,233 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "go.opentelemetry.io/collector/pdata/internal" + "go.opentelemetry.io/collector/pdata/internal/data" + otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// Profile are an implementation of the pprofextended data model. + +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewProfile function to create new instances. +// Important: zero-initialized instance is not valid for use. +type Profile struct { + orig *otlpprofiles.Profile + state *internal.State +} + +func newProfile(orig *otlpprofiles.Profile, state *internal.State) Profile { + return Profile{orig: orig, state: state} +} + +// NewProfile creates a new empty Profile. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewProfile() Profile { + state := internal.StateMutable + return newProfile(&otlpprofiles.Profile{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms Profile) MoveTo(dest Profile) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlpprofiles.Profile{} +} + +// SampleType returns the SampleType associated with this Profile. +func (ms Profile) SampleType() ValueTypeSlice { + return newValueTypeSlice(&ms.orig.SampleType, ms.state) +} + +// Sample returns the Sample associated with this Profile. +func (ms Profile) Sample() SampleSlice { + return newSampleSlice(&ms.orig.Sample, ms.state) +} + +// MappingTable returns the MappingTable associated with this Profile. +func (ms Profile) MappingTable() MappingSlice { + return newMappingSlice(&ms.orig.MappingTable, ms.state) +} + +// LocationTable returns the LocationTable associated with this Profile. +func (ms Profile) LocationTable() LocationSlice { + return newLocationSlice(&ms.orig.LocationTable, ms.state) +} + +// LocationIndices returns the LocationIndices associated with this Profile. +func (ms Profile) LocationIndices() pcommon.Int32Slice { + return pcommon.Int32Slice(internal.NewInt32Slice(&ms.orig.LocationIndices, ms.state)) +} + +// FunctionTable returns the FunctionTable associated with this Profile. +func (ms Profile) FunctionTable() FunctionSlice { + return newFunctionSlice(&ms.orig.FunctionTable, ms.state) +} + +// AttributeTable returns the AttributeTable associated with this Profile. +func (ms Profile) AttributeTable() AttributeTableSlice { + return newAttributeTableSlice(&ms.orig.AttributeTable, ms.state) +} + +// AttributeUnits returns the AttributeUnits associated with this Profile. +func (ms Profile) AttributeUnits() AttributeUnitSlice { + return newAttributeUnitSlice(&ms.orig.AttributeUnits, ms.state) +} + +// LinkTable returns the LinkTable associated with this Profile. +func (ms Profile) LinkTable() LinkSlice { + return newLinkSlice(&ms.orig.LinkTable, ms.state) +} + +// StringTable returns the StringTable associated with this Profile. +func (ms Profile) StringTable() pcommon.StringSlice { + return pcommon.StringSlice(internal.NewStringSlice(&ms.orig.StringTable, ms.state)) +} + +// Time returns the time associated with this Profile. +func (ms Profile) Time() pcommon.Timestamp { + return pcommon.Timestamp(ms.orig.TimeNanos) +} + +// SetTime replaces the time associated with this Profile. +func (ms Profile) SetTime(v pcommon.Timestamp) { + ms.state.AssertMutable() + ms.orig.TimeNanos = int64(v) +} + +// Duration returns the duration associated with this Profile. +func (ms Profile) Duration() pcommon.Timestamp { + return pcommon.Timestamp(ms.orig.DurationNanos) +} + +// SetDuration replaces the duration associated with this Profile. +func (ms Profile) SetDuration(v pcommon.Timestamp) { + ms.state.AssertMutable() + ms.orig.DurationNanos = int64(v) +} + +// StartTime returns the starttime associated with this Profile. +func (ms Profile) StartTime() pcommon.Timestamp { + return pcommon.Timestamp(ms.orig.TimeNanos) +} + +// SetStartTime replaces the starttime associated with this Profile. +func (ms Profile) SetStartTime(v pcommon.Timestamp) { + ms.state.AssertMutable() + ms.orig.TimeNanos = int64(v) +} + +// PeriodType returns the periodtype associated with this Profile. +func (ms Profile) PeriodType() ValueType { + return newValueType(&ms.orig.PeriodType, ms.state) +} + +// Period returns the period associated with this Profile. +func (ms Profile) Period() int64 { + return ms.orig.Period +} + +// SetPeriod replaces the period associated with this Profile. +func (ms Profile) SetPeriod(v int64) { + ms.state.AssertMutable() + ms.orig.Period = v +} + +// CommentStrindices returns the CommentStrindices associated with this Profile. +func (ms Profile) CommentStrindices() pcommon.Int32Slice { + return pcommon.Int32Slice(internal.NewInt32Slice(&ms.orig.CommentStrindices, ms.state)) +} + +// DefaultSampleTypeStrindex returns the defaultsampletypestrindex associated with this Profile. +func (ms Profile) DefaultSampleTypeStrindex() int32 { + return ms.orig.DefaultSampleTypeStrindex +} + +// SetDefaultSampleTypeStrindex replaces the defaultsampletypestrindex associated with this Profile. +func (ms Profile) SetDefaultSampleTypeStrindex(v int32) { + ms.state.AssertMutable() + ms.orig.DefaultSampleTypeStrindex = v +} + +// ProfileID returns the profileid associated with this Profile. +func (ms Profile) ProfileID() ProfileID { + return ProfileID(ms.orig.ProfileId) +} + +// SetProfileID replaces the profileid associated with this Profile. +func (ms Profile) SetProfileID(v ProfileID) { + ms.state.AssertMutable() + ms.orig.ProfileId = data.ProfileID(v) +} + +// AttributeIndices returns the AttributeIndices associated with this Profile. +func (ms Profile) AttributeIndices() pcommon.Int32Slice { + return pcommon.Int32Slice(internal.NewInt32Slice(&ms.orig.AttributeIndices, ms.state)) +} + +// DroppedAttributesCount returns the droppedattributescount associated with this Profile. +func (ms Profile) DroppedAttributesCount() uint32 { + return ms.orig.DroppedAttributesCount +} + +// SetDroppedAttributesCount replaces the droppedattributescount associated with this Profile. +func (ms Profile) SetDroppedAttributesCount(v uint32) { + ms.state.AssertMutable() + ms.orig.DroppedAttributesCount = v +} + +// OriginalPayloadFormat returns the originalpayloadformat associated with this Profile. +func (ms Profile) OriginalPayloadFormat() string { + return ms.orig.OriginalPayloadFormat +} + +// SetOriginalPayloadFormat replaces the originalpayloadformat associated with this Profile. +func (ms Profile) SetOriginalPayloadFormat(v string) { + ms.state.AssertMutable() + ms.orig.OriginalPayloadFormat = v +} + +// OriginalPayload returns the OriginalPayload associated with this Profile. +func (ms Profile) OriginalPayload() pcommon.ByteSlice { + return pcommon.ByteSlice(internal.NewByteSlice(&ms.orig.OriginalPayload, ms.state)) +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms Profile) CopyTo(dest Profile) { + dest.state.AssertMutable() + ms.SampleType().CopyTo(dest.SampleType()) + ms.Sample().CopyTo(dest.Sample()) + ms.MappingTable().CopyTo(dest.MappingTable()) + ms.LocationTable().CopyTo(dest.LocationTable()) + ms.LocationIndices().CopyTo(dest.LocationIndices()) + ms.FunctionTable().CopyTo(dest.FunctionTable()) + ms.AttributeTable().CopyTo(dest.AttributeTable()) + ms.AttributeUnits().CopyTo(dest.AttributeUnits()) + ms.LinkTable().CopyTo(dest.LinkTable()) + ms.StringTable().CopyTo(dest.StringTable()) + dest.SetTime(ms.Time()) + dest.SetDuration(ms.Duration()) + dest.SetStartTime(ms.StartTime()) + ms.PeriodType().CopyTo(dest.PeriodType()) + dest.SetPeriod(ms.Period()) + ms.CommentStrindices().CopyTo(dest.CommentStrindices()) + dest.SetDefaultSampleTypeStrindex(ms.DefaultSampleTypeStrindex()) + dest.SetProfileID(ms.ProfileID()) + ms.AttributeIndices().CopyTo(dest.AttributeIndices()) + dest.SetDroppedAttributesCount(ms.DroppedAttributesCount()) + dest.SetOriginalPayloadFormat(ms.OriginalPayloadFormat()) + ms.OriginalPayload().CopyTo(dest.OriginalPayload()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_profilesslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_profilesslice.go new file mode 100644 index 00000000000..69a6309bbb8 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_profilesslice.go @@ -0,0 +1,152 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "sort" + + "go.opentelemetry.io/collector/pdata/internal" + otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" +) + +// ProfilesSlice logically represents a slice of Profile. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewProfilesSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ProfilesSlice struct { + orig *[]*otlpprofiles.Profile + state *internal.State +} + +func newProfilesSlice(orig *[]*otlpprofiles.Profile, state *internal.State) ProfilesSlice { + return ProfilesSlice{orig: orig, state: state} +} + +// NewProfilesSlice creates a ProfilesSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewProfilesSlice() ProfilesSlice { + orig := []*otlpprofiles.Profile(nil) + state := internal.StateMutable + return newProfilesSlice(&orig, &state) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewProfilesSlice()". +func (es ProfilesSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es ProfilesSlice) At(i int) Profile { + return newProfile((*es.orig)[i], es.state) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new ProfilesSlice can be initialized: +// +// es := NewProfilesSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es ProfilesSlice) EnsureCapacity(newCap int) { + es.state.AssertMutable() + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]*otlpprofiles.Profile, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty Profile. +// It returns the newly added Profile. +func (es ProfilesSlice) AppendEmpty() Profile { + es.state.AssertMutable() + *es.orig = append(*es.orig, &otlpprofiles.Profile{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es ProfilesSlice) MoveAndAppendTo(dest ProfilesSlice) { + es.state.AssertMutable() + dest.state.AssertMutable() + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es ProfilesSlice) RemoveIf(f func(Profile) bool) { + es.state.AssertMutable() + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + *es.orig = (*es.orig)[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es ProfilesSlice) CopyTo(dest ProfilesSlice) { + dest.state.AssertMutable() + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newProfile((*es.orig)[i], es.state).CopyTo(newProfile((*dest.orig)[i], dest.state)) + } + return + } + origs := make([]otlpprofiles.Profile, srcLen) + wrappers := make([]*otlpprofiles.Profile, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newProfile((*es.orig)[i], es.state).CopyTo(newProfile(wrappers[i], dest.state)) + } + *dest.orig = wrappers +} + +// Sort sorts the Profile elements within ProfilesSlice given the +// provided less function so that two instances of ProfilesSlice +// can be compared. +func (es ProfilesSlice) Sort(less func(a, b Profile) bool) { + es.state.AssertMutable() + sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_resourceprofiles.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_resourceprofiles.go new file mode 100644 index 00000000000..649e5bbf33d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_resourceprofiles.go @@ -0,0 +1,76 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// ResourceProfiles is a collection of profiles from a Resource. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewResourceProfiles function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ResourceProfiles struct { + orig *otlpprofiles.ResourceProfiles + state *internal.State +} + +func newResourceProfiles(orig *otlpprofiles.ResourceProfiles, state *internal.State) ResourceProfiles { + return ResourceProfiles{orig: orig, state: state} +} + +// NewResourceProfiles creates a new empty ResourceProfiles. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewResourceProfiles() ResourceProfiles { + state := internal.StateMutable + return newResourceProfiles(&otlpprofiles.ResourceProfiles{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms ResourceProfiles) MoveTo(dest ResourceProfiles) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlpprofiles.ResourceProfiles{} +} + +// Resource returns the resource associated with this ResourceProfiles. +func (ms ResourceProfiles) Resource() pcommon.Resource { + return pcommon.Resource(internal.NewResource(&ms.orig.Resource, ms.state)) +} + +// SchemaUrl returns the schemaurl associated with this ResourceProfiles. +func (ms ResourceProfiles) SchemaUrl() string { + return ms.orig.SchemaUrl +} + +// SetSchemaUrl replaces the schemaurl associated with this ResourceProfiles. +func (ms ResourceProfiles) SetSchemaUrl(v string) { + ms.state.AssertMutable() + ms.orig.SchemaUrl = v +} + +// ScopeProfiles returns the ScopeProfiles associated with this ResourceProfiles. +func (ms ResourceProfiles) ScopeProfiles() ScopeProfilesSlice { + return newScopeProfilesSlice(&ms.orig.ScopeProfiles, ms.state) +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms ResourceProfiles) CopyTo(dest ResourceProfiles) { + dest.state.AssertMutable() + ms.Resource().CopyTo(dest.Resource()) + dest.SetSchemaUrl(ms.SchemaUrl()) + ms.ScopeProfiles().CopyTo(dest.ScopeProfiles()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_resourceprofilesslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_resourceprofilesslice.go new file mode 100644 index 00000000000..c3f7f1053a6 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_resourceprofilesslice.go @@ -0,0 +1,152 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "sort" + + "go.opentelemetry.io/collector/pdata/internal" + otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" +) + +// ResourceProfilesSlice logically represents a slice of ResourceProfiles. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewResourceProfilesSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ResourceProfilesSlice struct { + orig *[]*otlpprofiles.ResourceProfiles + state *internal.State +} + +func newResourceProfilesSlice(orig *[]*otlpprofiles.ResourceProfiles, state *internal.State) ResourceProfilesSlice { + return ResourceProfilesSlice{orig: orig, state: state} +} + +// NewResourceProfilesSlice creates a ResourceProfilesSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewResourceProfilesSlice() ResourceProfilesSlice { + orig := []*otlpprofiles.ResourceProfiles(nil) + state := internal.StateMutable + return newResourceProfilesSlice(&orig, &state) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewResourceProfilesSlice()". +func (es ResourceProfilesSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es ResourceProfilesSlice) At(i int) ResourceProfiles { + return newResourceProfiles((*es.orig)[i], es.state) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new ResourceProfilesSlice can be initialized: +// +// es := NewResourceProfilesSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es ResourceProfilesSlice) EnsureCapacity(newCap int) { + es.state.AssertMutable() + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]*otlpprofiles.ResourceProfiles, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty ResourceProfiles. +// It returns the newly added ResourceProfiles. +func (es ResourceProfilesSlice) AppendEmpty() ResourceProfiles { + es.state.AssertMutable() + *es.orig = append(*es.orig, &otlpprofiles.ResourceProfiles{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es ResourceProfilesSlice) MoveAndAppendTo(dest ResourceProfilesSlice) { + es.state.AssertMutable() + dest.state.AssertMutable() + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es ResourceProfilesSlice) RemoveIf(f func(ResourceProfiles) bool) { + es.state.AssertMutable() + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + *es.orig = (*es.orig)[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es ResourceProfilesSlice) CopyTo(dest ResourceProfilesSlice) { + dest.state.AssertMutable() + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newResourceProfiles((*es.orig)[i], es.state).CopyTo(newResourceProfiles((*dest.orig)[i], dest.state)) + } + return + } + origs := make([]otlpprofiles.ResourceProfiles, srcLen) + wrappers := make([]*otlpprofiles.ResourceProfiles, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newResourceProfiles((*es.orig)[i], es.state).CopyTo(newResourceProfiles(wrappers[i], dest.state)) + } + *dest.orig = wrappers +} + +// Sort sorts the ResourceProfiles elements within ResourceProfilesSlice given the +// provided less function so that two instances of ResourceProfilesSlice +// can be compared. +func (es ResourceProfilesSlice) Sort(less func(a, b ResourceProfiles) bool) { + es.state.AssertMutable() + sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_sample.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_sample.go new file mode 100644 index 00000000000..c62027753d2 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_sample.go @@ -0,0 +1,94 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// Sample represents each record value encountered within a profiled program. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewSample function to create new instances. +// Important: zero-initialized instance is not valid for use. +type Sample struct { + orig *otlpprofiles.Sample + state *internal.State +} + +func newSample(orig *otlpprofiles.Sample, state *internal.State) Sample { + return Sample{orig: orig, state: state} +} + +// NewSample creates a new empty Sample. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewSample() Sample { + state := internal.StateMutable + return newSample(&otlpprofiles.Sample{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms Sample) MoveTo(dest Sample) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlpprofiles.Sample{} +} + +// LocationsStartIndex returns the locationsstartindex associated with this Sample. +func (ms Sample) LocationsStartIndex() int32 { + return ms.orig.LocationsStartIndex +} + +// SetLocationsStartIndex replaces the locationsstartindex associated with this Sample. +func (ms Sample) SetLocationsStartIndex(v int32) { + ms.state.AssertMutable() + ms.orig.LocationsStartIndex = v +} + +// LocationsLength returns the locationslength associated with this Sample. +func (ms Sample) LocationsLength() int32 { + return ms.orig.LocationsLength +} + +// SetLocationsLength replaces the locationslength associated with this Sample. +func (ms Sample) SetLocationsLength(v int32) { + ms.state.AssertMutable() + ms.orig.LocationsLength = v +} + +// Value returns the Value associated with this Sample. +func (ms Sample) Value() pcommon.Int64Slice { + return pcommon.Int64Slice(internal.NewInt64Slice(&ms.orig.Value, ms.state)) +} + +// AttributeIndices returns the AttributeIndices associated with this Sample. +func (ms Sample) AttributeIndices() pcommon.Int32Slice { + return pcommon.Int32Slice(internal.NewInt32Slice(&ms.orig.AttributeIndices, ms.state)) +} + +// TimestampsUnixNano returns the TimestampsUnixNano associated with this Sample. +func (ms Sample) TimestampsUnixNano() pcommon.UInt64Slice { + return pcommon.UInt64Slice(internal.NewUInt64Slice(&ms.orig.TimestampsUnixNano, ms.state)) +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms Sample) CopyTo(dest Sample) { + dest.state.AssertMutable() + dest.SetLocationsStartIndex(ms.LocationsStartIndex()) + dest.SetLocationsLength(ms.LocationsLength()) + ms.Value().CopyTo(dest.Value()) + ms.AttributeIndices().CopyTo(dest.AttributeIndices()) + ms.TimestampsUnixNano().CopyTo(dest.TimestampsUnixNano()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_sampleslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_sampleslice.go new file mode 100644 index 00000000000..cf624434824 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_sampleslice.go @@ -0,0 +1,152 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "sort" + + "go.opentelemetry.io/collector/pdata/internal" + otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" +) + +// SampleSlice logically represents a slice of Sample. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewSampleSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type SampleSlice struct { + orig *[]*otlpprofiles.Sample + state *internal.State +} + +func newSampleSlice(orig *[]*otlpprofiles.Sample, state *internal.State) SampleSlice { + return SampleSlice{orig: orig, state: state} +} + +// NewSampleSlice creates a SampleSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewSampleSlice() SampleSlice { + orig := []*otlpprofiles.Sample(nil) + state := internal.StateMutable + return newSampleSlice(&orig, &state) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewSampleSlice()". +func (es SampleSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es SampleSlice) At(i int) Sample { + return newSample((*es.orig)[i], es.state) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new SampleSlice can be initialized: +// +// es := NewSampleSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es SampleSlice) EnsureCapacity(newCap int) { + es.state.AssertMutable() + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]*otlpprofiles.Sample, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty Sample. +// It returns the newly added Sample. +func (es SampleSlice) AppendEmpty() Sample { + es.state.AssertMutable() + *es.orig = append(*es.orig, &otlpprofiles.Sample{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es SampleSlice) MoveAndAppendTo(dest SampleSlice) { + es.state.AssertMutable() + dest.state.AssertMutable() + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es SampleSlice) RemoveIf(f func(Sample) bool) { + es.state.AssertMutable() + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + *es.orig = (*es.orig)[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es SampleSlice) CopyTo(dest SampleSlice) { + dest.state.AssertMutable() + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newSample((*es.orig)[i], es.state).CopyTo(newSample((*dest.orig)[i], dest.state)) + } + return + } + origs := make([]otlpprofiles.Sample, srcLen) + wrappers := make([]*otlpprofiles.Sample, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newSample((*es.orig)[i], es.state).CopyTo(newSample(wrappers[i], dest.state)) + } + *dest.orig = wrappers +} + +// Sort sorts the Sample elements within SampleSlice given the +// provided less function so that two instances of SampleSlice +// can be compared. +func (es SampleSlice) Sort(less func(a, b Sample) bool) { + es.state.AssertMutable() + sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_scopeprofiles.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_scopeprofiles.go new file mode 100644 index 00000000000..920b578a5be --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_scopeprofiles.go @@ -0,0 +1,76 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// ScopeProfiles is a collection of profiles from a LibraryInstrumentation. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewScopeProfiles function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ScopeProfiles struct { + orig *otlpprofiles.ScopeProfiles + state *internal.State +} + +func newScopeProfiles(orig *otlpprofiles.ScopeProfiles, state *internal.State) ScopeProfiles { + return ScopeProfiles{orig: orig, state: state} +} + +// NewScopeProfiles creates a new empty ScopeProfiles. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewScopeProfiles() ScopeProfiles { + state := internal.StateMutable + return newScopeProfiles(&otlpprofiles.ScopeProfiles{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms ScopeProfiles) MoveTo(dest ScopeProfiles) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlpprofiles.ScopeProfiles{} +} + +// Scope returns the scope associated with this ScopeProfiles. +func (ms ScopeProfiles) Scope() pcommon.InstrumentationScope { + return pcommon.InstrumentationScope(internal.NewInstrumentationScope(&ms.orig.Scope, ms.state)) +} + +// SchemaUrl returns the schemaurl associated with this ScopeProfiles. +func (ms ScopeProfiles) SchemaUrl() string { + return ms.orig.SchemaUrl +} + +// SetSchemaUrl replaces the schemaurl associated with this ScopeProfiles. +func (ms ScopeProfiles) SetSchemaUrl(v string) { + ms.state.AssertMutable() + ms.orig.SchemaUrl = v +} + +// Profiles returns the Profiles associated with this ScopeProfiles. +func (ms ScopeProfiles) Profiles() ProfilesSlice { + return newProfilesSlice(&ms.orig.Profiles, ms.state) +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms ScopeProfiles) CopyTo(dest ScopeProfiles) { + dest.state.AssertMutable() + ms.Scope().CopyTo(dest.Scope()) + dest.SetSchemaUrl(ms.SchemaUrl()) + ms.Profiles().CopyTo(dest.Profiles()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_scopeprofilesslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_scopeprofilesslice.go new file mode 100644 index 00000000000..3f2b452cece --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_scopeprofilesslice.go @@ -0,0 +1,152 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "sort" + + "go.opentelemetry.io/collector/pdata/internal" + otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" +) + +// ScopeProfilesSlice logically represents a slice of ScopeProfiles. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewScopeProfilesSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ScopeProfilesSlice struct { + orig *[]*otlpprofiles.ScopeProfiles + state *internal.State +} + +func newScopeProfilesSlice(orig *[]*otlpprofiles.ScopeProfiles, state *internal.State) ScopeProfilesSlice { + return ScopeProfilesSlice{orig: orig, state: state} +} + +// NewScopeProfilesSlice creates a ScopeProfilesSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewScopeProfilesSlice() ScopeProfilesSlice { + orig := []*otlpprofiles.ScopeProfiles(nil) + state := internal.StateMutable + return newScopeProfilesSlice(&orig, &state) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewScopeProfilesSlice()". +func (es ScopeProfilesSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es ScopeProfilesSlice) At(i int) ScopeProfiles { + return newScopeProfiles((*es.orig)[i], es.state) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new ScopeProfilesSlice can be initialized: +// +// es := NewScopeProfilesSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es ScopeProfilesSlice) EnsureCapacity(newCap int) { + es.state.AssertMutable() + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]*otlpprofiles.ScopeProfiles, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty ScopeProfiles. +// It returns the newly added ScopeProfiles. +func (es ScopeProfilesSlice) AppendEmpty() ScopeProfiles { + es.state.AssertMutable() + *es.orig = append(*es.orig, &otlpprofiles.ScopeProfiles{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es ScopeProfilesSlice) MoveAndAppendTo(dest ScopeProfilesSlice) { + es.state.AssertMutable() + dest.state.AssertMutable() + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es ScopeProfilesSlice) RemoveIf(f func(ScopeProfiles) bool) { + es.state.AssertMutable() + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + *es.orig = (*es.orig)[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es ScopeProfilesSlice) CopyTo(dest ScopeProfilesSlice) { + dest.state.AssertMutable() + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newScopeProfiles((*es.orig)[i], es.state).CopyTo(newScopeProfiles((*dest.orig)[i], dest.state)) + } + return + } + origs := make([]otlpprofiles.ScopeProfiles, srcLen) + wrappers := make([]*otlpprofiles.ScopeProfiles, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newScopeProfiles((*es.orig)[i], es.state).CopyTo(newScopeProfiles(wrappers[i], dest.state)) + } + *dest.orig = wrappers +} + +// Sort sorts the ScopeProfiles elements within ScopeProfilesSlice given the +// provided less function so that two instances of ScopeProfilesSlice +// can be compared. +func (es ScopeProfilesSlice) Sort(less func(a, b ScopeProfiles) bool) { + es.state.AssertMutable() + sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_valuetype.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_valuetype.go new file mode 100644 index 00000000000..2afc8614048 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_valuetype.go @@ -0,0 +1,87 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" +) + +// ValueType describes the type and units of a value, with an optional aggregation temporality. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewValueType function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ValueType struct { + orig *otlpprofiles.ValueType + state *internal.State +} + +func newValueType(orig *otlpprofiles.ValueType, state *internal.State) ValueType { + return ValueType{orig: orig, state: state} +} + +// NewValueType creates a new empty ValueType. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewValueType() ValueType { + state := internal.StateMutable + return newValueType(&otlpprofiles.ValueType{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms ValueType) MoveTo(dest ValueType) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlpprofiles.ValueType{} +} + +// TypeStrindex returns the typestrindex associated with this ValueType. +func (ms ValueType) TypeStrindex() int32 { + return ms.orig.TypeStrindex +} + +// SetTypeStrindex replaces the typestrindex associated with this ValueType. +func (ms ValueType) SetTypeStrindex(v int32) { + ms.state.AssertMutable() + ms.orig.TypeStrindex = v +} + +// UnitStrindex returns the unitstrindex associated with this ValueType. +func (ms ValueType) UnitStrindex() int32 { + return ms.orig.UnitStrindex +} + +// SetUnitStrindex replaces the unitstrindex associated with this ValueType. +func (ms ValueType) SetUnitStrindex(v int32) { + ms.state.AssertMutable() + ms.orig.UnitStrindex = v +} + +// AggregationTemporality returns the aggregationtemporality associated with this ValueType. +func (ms ValueType) AggregationTemporality() otlpprofiles.AggregationTemporality { + return ms.orig.AggregationTemporality +} + +// SetAggregationTemporality replaces the aggregationtemporality associated with this ValueType. +func (ms ValueType) SetAggregationTemporality(v otlpprofiles.AggregationTemporality) { + ms.state.AssertMutable() + ms.orig.AggregationTemporality = v +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms ValueType) CopyTo(dest ValueType) { + dest.state.AssertMutable() + dest.SetTypeStrindex(ms.TypeStrindex()) + dest.SetUnitStrindex(ms.UnitStrindex()) + dest.SetAggregationTemporality(ms.AggregationTemporality()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_valuetypeslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_valuetypeslice.go new file mode 100644 index 00000000000..65d5f78bd7d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_valuetypeslice.go @@ -0,0 +1,152 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "sort" + + "go.opentelemetry.io/collector/pdata/internal" + otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" +) + +// ValueTypeSlice logically represents a slice of ValueType. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewValueTypeSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ValueTypeSlice struct { + orig *[]*otlpprofiles.ValueType + state *internal.State +} + +func newValueTypeSlice(orig *[]*otlpprofiles.ValueType, state *internal.State) ValueTypeSlice { + return ValueTypeSlice{orig: orig, state: state} +} + +// NewValueTypeSlice creates a ValueTypeSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewValueTypeSlice() ValueTypeSlice { + orig := []*otlpprofiles.ValueType(nil) + state := internal.StateMutable + return newValueTypeSlice(&orig, &state) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewValueTypeSlice()". +func (es ValueTypeSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es ValueTypeSlice) At(i int) ValueType { + return newValueType((*es.orig)[i], es.state) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new ValueTypeSlice can be initialized: +// +// es := NewValueTypeSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es ValueTypeSlice) EnsureCapacity(newCap int) { + es.state.AssertMutable() + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]*otlpprofiles.ValueType, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty ValueType. +// It returns the newly added ValueType. +func (es ValueTypeSlice) AppendEmpty() ValueType { + es.state.AssertMutable() + *es.orig = append(*es.orig, &otlpprofiles.ValueType{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es ValueTypeSlice) MoveAndAppendTo(dest ValueTypeSlice) { + es.state.AssertMutable() + dest.state.AssertMutable() + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es ValueTypeSlice) RemoveIf(f func(ValueType) bool) { + es.state.AssertMutable() + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + *es.orig = (*es.orig)[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es ValueTypeSlice) CopyTo(dest ValueTypeSlice) { + dest.state.AssertMutable() + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newValueType((*es.orig)[i], es.state).CopyTo(newValueType((*dest.orig)[i], dest.state)) + } + return + } + origs := make([]otlpprofiles.ValueType, srcLen) + wrappers := make([]*otlpprofiles.ValueType, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newValueType((*es.orig)[i], es.state).CopyTo(newValueType(wrappers[i], dest.state)) + } + *dest.orig = wrappers +} + +// Sort sorts the ValueType elements within ValueTypeSlice given the +// provided less function so that two instances of ValueTypeSlice +// can be compared. +func (es ValueTypeSlice) Sort(less func(a, b ValueType) bool) { + es.state.AssertMutable() + sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/json.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/json.go new file mode 100644 index 00000000000..8300fb5955b --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/json.go @@ -0,0 +1,356 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile" + +import ( + "bytes" + "fmt" + + jsoniter "github.com/json-iterator/go" + + "go.opentelemetry.io/collector/pdata/internal" + otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/otlp" +) + +// JSONMarshaler marshals pprofile.Profiles to JSON bytes using the OTLP/JSON format. +type JSONMarshaler struct{} + +// MarshalProfiles to the OTLP/JSON format. +func (*JSONMarshaler) MarshalProfiles(td Profiles) ([]byte, error) { + buf := bytes.Buffer{} + pb := internal.ProfilesToProto(internal.Profiles(td)) + err := json.Marshal(&buf, &pb) + return buf.Bytes(), err +} + +// JSONUnmarshaler unmarshals OTLP/JSON formatted-bytes to pprofile.Profiles. +type JSONUnmarshaler struct{} + +// UnmarshalProfiles from OTLP/JSON format into pprofile.Profiles. +func (*JSONUnmarshaler) UnmarshalProfiles(buf []byte) (Profiles, error) { + iter := jsoniter.ConfigFastest.BorrowIterator(buf) + defer jsoniter.ConfigFastest.ReturnIterator(iter) + td := NewProfiles() + td.unmarshalJsoniter(iter) + if iter.Error != nil { + return Profiles{}, iter.Error + } + otlp.MigrateProfiles(td.getOrig().ResourceProfiles) + return td, nil +} + +func (p Profiles) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "resourceProfiles", "resource_profiles": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + p.ResourceProfiles().AppendEmpty().unmarshalJsoniter(iter) + return true + }) + default: + iter.Skip() + } + return true + }) +} + +func (rp ResourceProfiles) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "resource": + json.ReadResource(iter, internal.GetOrigResource(internal.Resource(rp.Resource()))) + case "scopeProfiles", "scope_profiles": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + rp.ScopeProfiles().AppendEmpty().unmarshalJsoniter(iter) + return true + }) + case "schemaUrl", "schema_url": + rp.orig.SchemaUrl = iter.ReadString() + default: + iter.Skip() + } + return true + }) +} + +func (sp ScopeProfiles) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "scope": + json.ReadScope(iter, &sp.orig.Scope) + case "profiles": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + sp.Profiles().AppendEmpty().unmarshalJsoniter(iter) + return true + }) + case "schemaUrl", "schema_url": + sp.orig.SchemaUrl = iter.ReadString() + default: + iter.Skip() + } + return true + }) +} + +func (p Profile) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "profileId", "profile_id": + if err := p.orig.ProfileId.UnmarshalJSON([]byte(iter.ReadString())); err != nil { + iter.ReportError("profileContainer.profileId", fmt.Sprintf("parse profile_id:%v", err)) + } + case "sampleType", "sample_type": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + p.SampleType().AppendEmpty().unmarshalJsoniter(iter) + return true + }) + case "sample": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + p.Sample().AppendEmpty().unmarshalJsoniter(iter) + return true + }) + case "mappingTable", "mapping_table": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + p.MappingTable().AppendEmpty().unmarshalJsoniter(iter) + return true + }) + case "locationTable", "location_table": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + p.LocationTable().AppendEmpty().unmarshalJsoniter(iter) + return true + }) + case "locationIndices", "location_indices": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + p.LocationIndices().Append(json.ReadInt32(iter)) + return true + }) + case "functionTable", "function_table": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + p.FunctionTable().AppendEmpty().unmarshalJsoniter(iter) + return true + }) + case "attributeTable", "attribute_table": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + p.orig.AttributeTable = append(p.orig.AttributeTable, json.ReadAttribute(iter)) + return true + }) + case "attributeUnits", "attribute_units": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + p.AttributeUnits().AppendEmpty().unmarshalJsoniter(iter) + return true + }) + case "linkTable", "link_table": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + p.LinkTable().AppendEmpty().unmarshalJsoniter(iter) + return true + }) + case "stringTable", "string_table": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + p.StringTable().Append(iter.ReadString()) + return true + }) + case "timeNanos", "time_nanos": + p.orig.TimeNanos = json.ReadInt64(iter) + case "durationNanos", "duration_nanos": + p.orig.DurationNanos = json.ReadInt64(iter) + case "periodType", "period_type": + p.PeriodType().unmarshalJsoniter(iter) + case "period": + p.orig.Period = json.ReadInt64(iter) + case "commentStrindices", "comment_strindices": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + p.CommentStrindices().Append(json.ReadInt32(iter)) + return true + }) + case "defaultSampleTypeStrindex", "default_sample_type_strindex": + p.orig.DefaultSampleTypeStrindex = json.ReadInt32(iter) + case "attributeIndices", "attribute_indices": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + p.AttributeIndices().Append(json.ReadInt32(iter)) + return true + }) + case "droppedAttributesCount", "dropped_attributes_count": + p.orig.DroppedAttributesCount = json.ReadUint32(iter) + case "originalPayloadFormat", "original_payload_format": + p.orig.OriginalPayloadFormat = iter.ReadString() + case "originalPayload", "original_payload": + p.orig.OriginalPayload = iter.ReadStringAsSlice() + default: + iter.Skip() + } + return true + }) +} + +func (vt ValueType) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "typeStrindex", "type_strindex": + vt.orig.TypeStrindex = json.ReadInt32(iter) + case "unitStrindex", "unit_strindex": + vt.orig.UnitStrindex = json.ReadInt32(iter) + case "aggregationTemporality", "aggregation_temporality": + vt.orig.AggregationTemporality = otlpprofiles.AggregationTemporality(json.ReadInt32(iter)) + default: + iter.Skip() + } + return true + }) +} + +func (st Sample) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "locationsStartIndex", "locations_start_index": + st.orig.LocationsStartIndex = json.ReadInt32(iter) + case "locationsLength", "locations_length": + st.orig.LocationsLength = json.ReadInt32(iter) + case "value": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + st.Value().Append(json.ReadInt64(iter)) + return true + }) + case "attributeIndices", "attribute_indices": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + st.AttributeIndices().Append(json.ReadInt32(iter)) + return true + }) + case "linkIndex", "link_index": + st.orig.LinkIndex_ = &otlpprofiles.Sample_LinkIndex{LinkIndex: json.ReadInt32(iter)} + case "timestampsUnixNano", "timestamps_unix_nano": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + st.TimestampsUnixNano().Append(json.ReadUint64(iter)) + return true + }) + default: + iter.Skip() + } + return true + }) +} + +func (m Mapping) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "memoryStart", "memory_start": + m.orig.MemoryStart = json.ReadUint64(iter) + case "memoryLimit", "memory_limit": + m.orig.MemoryLimit = json.ReadUint64(iter) + case "fileOffset", "file_offset": + m.orig.FileOffset = json.ReadUint64(iter) + case "filenameStrindex", "filename_strindex": + m.orig.FilenameStrindex = json.ReadInt32(iter) + case "attributeIndices", "attribute_indices": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + m.AttributeIndices().Append(json.ReadInt32(iter)) + return true + }) + case "hasFunctions", "has_functions": + m.orig.HasFunctions = iter.ReadBool() + case "hasFilenames", "has_filenames": + m.orig.HasFilenames = iter.ReadBool() + case "hasLineNumbers", "has_line_numbers": + m.orig.HasLineNumbers = iter.ReadBool() + case "hasInlineFrames", "has_inline_frames": + m.orig.HasInlineFrames = iter.ReadBool() + default: + iter.Skip() + } + return true + }) +} + +func (l Location) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "mappingIndex", "mapping_index": + l.orig.MappingIndex_ = &otlpprofiles.Location_MappingIndex{MappingIndex: json.ReadInt32(iter)} + case "address": + l.orig.Address = json.ReadUint64(iter) + case "line": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + l.Line().AppendEmpty().unmarshalJsoniter(iter) + return true + }) + case "isFolded", "is_folded": + l.orig.IsFolded = iter.ReadBool() + case "attributeIndices", "attribute_indices": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + l.AttributeIndices().Append(json.ReadInt32(iter)) + return true + }) + default: + iter.Skip() + } + return true + }) +} + +func (l Line) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "functionIndex", "function_index": + l.orig.FunctionIndex = json.ReadInt32(iter) + case "line": + l.orig.Line = json.ReadInt64(iter) + case "column": + l.orig.Column = json.ReadInt64(iter) + default: + iter.Skip() + } + return true + }) +} + +func (fn Function) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "nameStrindex", "name_strindex": + fn.orig.NameStrindex = json.ReadInt32(iter) + case "systemNameStrindex", "system_name_strindex": + fn.orig.SystemNameStrindex = json.ReadInt32(iter) + case "filenameStrindex", "filename_strindex": + fn.orig.FilenameStrindex = json.ReadInt32(iter) + case "startLine", "start_line": + fn.orig.StartLine = json.ReadInt64(iter) + default: + iter.Skip() + } + return true + }) +} + +func (at AttributeUnit) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "attributeKeyStrindex", "attribute_key_strindex": + at.orig.AttributeKeyStrindex = json.ReadInt32(iter) + case "unitStrindex", "unit_strindex": + at.orig.UnitStrindex = json.ReadInt32(iter) + default: + iter.Skip() + } + return true + }) +} + +func (l Link) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "traceId", "trace_id": + if err := l.orig.TraceId.UnmarshalJSON([]byte(iter.ReadString())); err != nil { + iter.ReportError("link.traceId", fmt.Sprintf("parse trace_id:%v", err)) + } + case "spanId", "span_id": + if err := l.orig.SpanId.UnmarshalJSON([]byte(iter.ReadString())); err != nil { + iter.ReportError("link.spanId", fmt.Sprintf("parse span_id:%v", err)) + } + default: + iter.Skip() + } + return true + }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/pb.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/pb.go new file mode 100644 index 00000000000..5da9ceda2f0 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/pb.go @@ -0,0 +1,31 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile" + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpprofile "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" +) + +var _ MarshalSizer = (*ProtoMarshaler)(nil) + +type ProtoMarshaler struct{} + +func (e *ProtoMarshaler) MarshalProfiles(td Profiles) ([]byte, error) { + pb := internal.ProfilesToProto(internal.Profiles(td)) + return pb.Marshal() +} + +func (e *ProtoMarshaler) ProfilesSize(td Profiles) int { + pb := internal.ProfilesToProto(internal.Profiles(td)) + return pb.Size() +} + +type ProtoUnmarshaler struct{} + +func (d *ProtoUnmarshaler) UnmarshalProfiles(buf []byte) (Profiles, error) { + pb := otlpprofile.ProfilesData{} + err := pb.Unmarshal(buf) + return Profiles(internal.ProfilesFromProto(pb)), err +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp/generated_exportpartialsuccess.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp/generated_exportpartialsuccess.go new file mode 100644 index 00000000000..284c10f096a --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp/generated_exportpartialsuccess.go @@ -0,0 +1,75 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofileotlp + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpcollectorprofile "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development" +) + +// ExportPartialSuccess represents the details of a partially successful export request. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewExportPartialSuccess function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ExportPartialSuccess struct { + orig *otlpcollectorprofile.ExportProfilesPartialSuccess + state *internal.State +} + +func newExportPartialSuccess(orig *otlpcollectorprofile.ExportProfilesPartialSuccess, state *internal.State) ExportPartialSuccess { + return ExportPartialSuccess{orig: orig, state: state} +} + +// NewExportPartialSuccess creates a new empty ExportPartialSuccess. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewExportPartialSuccess() ExportPartialSuccess { + state := internal.StateMutable + return newExportPartialSuccess(&otlpcollectorprofile.ExportProfilesPartialSuccess{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms ExportPartialSuccess) MoveTo(dest ExportPartialSuccess) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlpcollectorprofile.ExportProfilesPartialSuccess{} +} + +// RejectedProfiles returns the rejectedprofiles associated with this ExportPartialSuccess. +func (ms ExportPartialSuccess) RejectedProfiles() int64 { + return ms.orig.RejectedProfiles +} + +// SetRejectedProfiles replaces the rejectedprofiles associated with this ExportPartialSuccess. +func (ms ExportPartialSuccess) SetRejectedProfiles(v int64) { + ms.state.AssertMutable() + ms.orig.RejectedProfiles = v +} + +// ErrorMessage returns the errormessage associated with this ExportPartialSuccess. +func (ms ExportPartialSuccess) ErrorMessage() string { + return ms.orig.ErrorMessage +} + +// SetErrorMessage replaces the errormessage associated with this ExportPartialSuccess. +func (ms ExportPartialSuccess) SetErrorMessage(v string) { + ms.state.AssertMutable() + ms.orig.ErrorMessage = v +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms ExportPartialSuccess) CopyTo(dest ExportPartialSuccess) { + dest.state.AssertMutable() + dest.SetRejectedProfiles(ms.RejectedProfiles()) + dest.SetErrorMessage(ms.ErrorMessage()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp/grpc.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp/grpc.go new file mode 100644 index 00000000000..cb6c5e8093e --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp/grpc.go @@ -0,0 +1,91 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pprofileotlp // import "go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp" + +import ( + "context" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "go.opentelemetry.io/collector/pdata/internal" + otlpcollectorprofile "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development" + "go.opentelemetry.io/collector/pdata/internal/otlp" +) + +// GRPCClient is the client API for OTLP-GRPC Profiles service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type GRPCClient interface { + // Export pprofile.Profiles to the server. + // + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(ctx context.Context, request ExportRequest, opts ...grpc.CallOption) (ExportResponse, error) + + // unexported disallow implementation of the GRPCClient. + unexported() +} + +// NewGRPCClient returns a new GRPCClient connected using the given connection. +func NewGRPCClient(cc *grpc.ClientConn) GRPCClient { + return &grpcClient{rawClient: otlpcollectorprofile.NewProfilesServiceClient(cc)} +} + +type grpcClient struct { + rawClient otlpcollectorprofile.ProfilesServiceClient +} + +// Export implements the Client interface. +func (c *grpcClient) Export(ctx context.Context, request ExportRequest, opts ...grpc.CallOption) (ExportResponse, error) { + rsp, err := c.rawClient.Export(ctx, request.orig, opts...) + if err != nil { + return ExportResponse{}, err + } + state := internal.StateMutable + return ExportResponse{orig: rsp, state: &state}, err +} + +func (c *grpcClient) unexported() {} + +// GRPCServer is the server API for OTLP gRPC ProfilesService service. +// Implementations MUST embed UnimplementedGRPCServer. +type GRPCServer interface { + // Export is called every time a new request is received. + // + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(context.Context, ExportRequest) (ExportResponse, error) + + // unexported disallow implementation of the GRPCServer. + unexported() +} + +var _ GRPCServer = (*UnimplementedGRPCServer)(nil) + +// UnimplementedGRPCServer MUST be embedded to have forward compatible implementations. +type UnimplementedGRPCServer struct{} + +func (*UnimplementedGRPCServer) Export(context.Context, ExportRequest) (ExportResponse, error) { + return ExportResponse{}, status.Errorf(codes.Unimplemented, "method Export not implemented") +} + +func (*UnimplementedGRPCServer) unexported() {} + +// RegisterGRPCServer registers the GRPCServer to the grpc.Server. +func RegisterGRPCServer(s *grpc.Server, srv GRPCServer) { + otlpcollectorprofile.RegisterProfilesServiceServer(s, &rawProfilesServer{srv: srv}) +} + +type rawProfilesServer struct { + srv GRPCServer +} + +func (s rawProfilesServer) Export(ctx context.Context, request *otlpcollectorprofile.ExportProfilesServiceRequest) (*otlpcollectorprofile.ExportProfilesServiceResponse, error) { + otlp.MigrateProfiles(request.ResourceProfiles) + state := internal.StateMutable + rsp, err := s.srv.Export(ctx, ExportRequest{orig: request, state: &state}) + return rsp.orig, err +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp/request.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp/request.go new file mode 100644 index 00000000000..8c304944cab --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp/request.go @@ -0,0 +1,79 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pprofileotlp // import "go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp" + +import ( + "bytes" + + "go.opentelemetry.io/collector/pdata/internal" + otlpcollectorprofile "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development" + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/otlp" + "go.opentelemetry.io/collector/pdata/pprofile" +) + +var jsonUnmarshaler = &pprofile.JSONUnmarshaler{} + +// ExportRequest represents the request for gRPC/HTTP client/server. +// It's a wrapper for pprofile.Profiles data. +type ExportRequest struct { + orig *otlpcollectorprofile.ExportProfilesServiceRequest + state *internal.State +} + +// NewExportRequest returns an empty ExportRequest. +func NewExportRequest() ExportRequest { + state := internal.StateMutable + return ExportRequest{ + orig: &otlpcollectorprofile.ExportProfilesServiceRequest{}, + state: &state, + } +} + +// NewExportRequestFromProfiles returns a ExportRequest from pprofile.Profiles. +// Because ExportRequest is a wrapper for pprofile.Profiles, +// any changes to the provided Profiles struct will be reflected in the ExportRequest and vice versa. +func NewExportRequestFromProfiles(td pprofile.Profiles) ExportRequest { + return ExportRequest{ + orig: internal.GetOrigProfiles(internal.Profiles(td)), + state: internal.GetProfilesState(internal.Profiles(td)), + } +} + +// MarshalProto marshals ExportRequest into proto bytes. +func (ms ExportRequest) MarshalProto() ([]byte, error) { + return ms.orig.Marshal() +} + +// UnmarshalProto unmarshalls ExportRequest from proto bytes. +func (ms ExportRequest) UnmarshalProto(data []byte) error { + if err := ms.orig.Unmarshal(data); err != nil { + return err + } + otlp.MigrateProfiles(ms.orig.ResourceProfiles) + return nil +} + +// MarshalJSON marshals ExportRequest into JSON bytes. +func (ms ExportRequest) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + if err := json.Marshal(&buf, ms.orig); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// UnmarshalJSON unmarshalls ExportRequest from JSON bytes. +func (ms ExportRequest) UnmarshalJSON(data []byte) error { + td, err := jsonUnmarshaler.UnmarshalProfiles(data) + if err != nil { + return err + } + *ms.orig = *internal.GetOrigProfiles(internal.Profiles(td)) + return nil +} + +func (ms ExportRequest) Profiles() pprofile.Profiles { + return pprofile.Profiles(internal.NewProfiles(ms.orig, ms.state)) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp/response.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp/response.go new file mode 100644 index 00000000000..5b87f8b3418 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp/response.go @@ -0,0 +1,87 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pprofileotlp // import "go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp" + +import ( + "bytes" + + jsoniter "github.com/json-iterator/go" + + "go.opentelemetry.io/collector/pdata/internal" + otlpcollectorprofile "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development" + "go.opentelemetry.io/collector/pdata/internal/json" +) + +// ExportResponse represents the response for gRPC/HTTP client/server. +type ExportResponse struct { + orig *otlpcollectorprofile.ExportProfilesServiceResponse + state *internal.State +} + +// NewExportResponse returns an empty ExportResponse. +func NewExportResponse() ExportResponse { + state := internal.StateMutable + return ExportResponse{ + orig: &otlpcollectorprofile.ExportProfilesServiceResponse{}, + state: &state, + } +} + +// MarshalProto marshals ExportResponse into proto bytes. +func (ms ExportResponse) MarshalProto() ([]byte, error) { + return ms.orig.Marshal() +} + +// UnmarshalProto unmarshalls ExportResponse from proto bytes. +func (ms ExportResponse) UnmarshalProto(data []byte) error { + return ms.orig.Unmarshal(data) +} + +// MarshalJSON marshals ExportResponse into JSON bytes. +func (ms ExportResponse) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + if err := json.Marshal(&buf, ms.orig); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// UnmarshalJSON unmarshalls ExportResponse from JSON bytes. +func (ms ExportResponse) UnmarshalJSON(data []byte) error { + iter := jsoniter.ConfigFastest.BorrowIterator(data) + defer jsoniter.ConfigFastest.ReturnIterator(iter) + ms.unmarshalJsoniter(iter) + return iter.Error +} + +func (ms ExportResponse) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "partial_success", "partialSuccess": + ms.PartialSuccess().unmarshalJsoniter(iter) + default: + iter.Skip() + } + return true + }) +} + +// PartialSuccess returns the ExportLogsPartialSuccess associated with this ExportResponse. +func (ms ExportResponse) PartialSuccess() ExportPartialSuccess { + return newExportPartialSuccess(&ms.orig.PartialSuccess, ms.state) +} + +func (ms ExportPartialSuccess) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(_ *jsoniter.Iterator, f string) bool { + switch f { + case "rejected_profiles", "rejectedProfiles": + ms.orig.RejectedProfiles = json.ReadInt64(iter) + case "error_message", "errorMessage": + ms.orig.ErrorMessage = iter.ReadString() + default: + iter.Skip() + } + return true + }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/profileid.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/profileid.go new file mode 100644 index 00000000000..52ee2e812b6 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/profileid.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile" + +import ( + "encoding/hex" + + "go.opentelemetry.io/collector/pdata/internal/data" +) + +var emptyProfileID = ProfileID([16]byte{}) + +// ProfileID is a profile identifier. +type ProfileID [16]byte + +// NewProfileIDEmpty returns a new empty (all zero bytes) ProfileID. +func NewProfileIDEmpty() ProfileID { + return emptyProfileID +} + +// String returns string representation of the ProfileID. +// +// Important: Don't rely on this method to get a string identifier of ProfileID. +// Use hex.EncodeToString explicitly instead. +// This method is meant to implement Stringer interface for display purposes only. +func (ms ProfileID) String() string { + if ms.IsEmpty() { + return "" + } + return hex.EncodeToString(ms[:]) +} + +// IsEmpty returns true if id doesn't contain at least one non-zero byte. +func (ms ProfileID) IsEmpty() bool { + return data.ProfileID(ms).IsEmpty() +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/profiles.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/profiles.go new file mode 100644 index 00000000000..de235a18506 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/profiles.go @@ -0,0 +1,68 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile" + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpcollectorprofile "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development" +) + +// profiles is the top-level struct that is propagated through the profiles pipeline. +// Use NewProfiles to create new instance, zero-initialized instance is not valid for use. +type Profiles internal.Profiles + +func newProfiles(orig *otlpcollectorprofile.ExportProfilesServiceRequest) Profiles { + state := internal.StateMutable + return Profiles(internal.NewProfiles(orig, &state)) +} + +func (ms Profiles) getOrig() *otlpcollectorprofile.ExportProfilesServiceRequest { + return internal.GetOrigProfiles(internal.Profiles(ms)) +} + +func (ms Profiles) getState() *internal.State { + return internal.GetProfilesState(internal.Profiles(ms)) +} + +// NewProfiles creates a new Profiles struct. +func NewProfiles() Profiles { + return newProfiles(&otlpcollectorprofile.ExportProfilesServiceRequest{}) +} + +// IsReadOnly returns true if this ResourceProfiles instance is read-only. +func (ms Profiles) IsReadOnly() bool { + return *ms.getState() == internal.StateReadOnly +} + +// CopyTo copies the Profiles instance overriding the destination. +func (ms Profiles) CopyTo(dest Profiles) { + ms.ResourceProfiles().CopyTo(dest.ResourceProfiles()) +} + +// ResourceProfiles returns the ResourceProfilesSlice associated with this Profiles. +func (ms Profiles) ResourceProfiles() ResourceProfilesSlice { + return newResourceProfilesSlice(&ms.getOrig().ResourceProfiles, internal.GetProfilesState(internal.Profiles(ms))) +} + +// MarkReadOnly marks the ResourceProfiles as shared so that no further modifications can be done on it. +func (ms Profiles) MarkReadOnly() { + internal.SetProfilesState(internal.Profiles(ms), internal.StateReadOnly) +} + +// SampleCount calculates the total number of samples. +func (ms Profiles) SampleCount() int { + sampleCount := 0 + rps := ms.ResourceProfiles() + for i := 0; i < rps.Len(); i++ { + rp := rps.At(i) + sps := rp.ScopeProfiles() + for j := 0; j < sps.Len(); j++ { + pcs := sps.At(j).Profiles() + for k := 0; k < pcs.Len(); k++ { + sampleCount += pcs.At(k).Sample().Len() + } + } + } + return sampleCount +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/json.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/json.go index 64bd273ba96..2e35a95913a 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/json.go +++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/json.go @@ -112,6 +112,8 @@ func (dest Span) unmarshalJsoniter(iter *jsoniter.Iterator) { if err := dest.orig.ParentSpanId.UnmarshalJSON([]byte(iter.ReadString())); err != nil { iter.ReportError("readSpan.parentSpanId", fmt.Sprintf("parse parent_span_id:%v", err)) } + case "flags": + dest.orig.Flags = json.ReadUint32(iter) case "name": dest.orig.Name = iter.ReadString() case "kind": @@ -184,6 +186,8 @@ func (dest SpanLink) unmarshalJsoniter(iter *jsoniter.Iterator) { }) case "droppedAttributesCount", "dropped_attributes_count": dest.orig.DroppedAttributesCount = json.ReadUint32(iter) + case "flags": + dest.orig.Flags = json.ReadUint32(iter) default: iter.Skip() } diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp/response.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp/response.go index 78096bff77c..a5017e27627 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp/response.go +++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp/response.go @@ -65,7 +65,6 @@ func (ms ExportResponse) unmarshalJsoniter(iter *jsoniter.Iterator) { } return true }) - } // PartialSuccess returns the ExportLogsPartialSuccess associated with this ExportResponse. diff --git a/vendor/go.opentelemetry.io/collector/pdata/testdata/LICENSE b/vendor/go.opentelemetry.io/collector/pdata/testdata/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/testdata/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/pdata/testdata/Makefile b/vendor/go.opentelemetry.io/collector/pdata/testdata/Makefile new file mode 100644 index 00000000000..ded7a36092d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/testdata/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/pdata/testdata/common.go b/vendor/go.opentelemetry.io/collector/pdata/testdata/common.go new file mode 100644 index 00000000000..b62cf24724d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/testdata/common.go @@ -0,0 +1,30 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package testdata + +import ( + "go.opentelemetry.io/collector/pdata/pcommon" +) + +func initMetricExemplarAttributes(dest pcommon.Map) { + dest.PutStr("exemplar-attachment", "exemplar-attachment-value") +} + +func initMetricAttributes1(dest pcommon.Map) { + dest.PutStr("label-1", "label-value-1") +} + +func initMetricAttributes2(dest pcommon.Map) { + dest.PutStr("label-2", "label-value-2") +} + +func initMetricAttributes12(dest pcommon.Map) { + initMetricAttributes1(dest) + initMetricAttributes2(dest) +} + +func initMetricAttributes13(dest pcommon.Map) { + initMetricAttributes1(dest) + dest.PutStr("label-3", "label-value-3") +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/testdata/log.go b/vendor/go.opentelemetry.io/collector/pdata/testdata/log.go new file mode 100644 index 00000000000..be9c8b2bce6 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/testdata/log.go @@ -0,0 +1,57 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package testdata + +import ( + "time" + + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" +) + +var logTimestamp = pcommon.NewTimestampFromTime(time.Date(2020, 2, 11, 20, 26, 13, 789, time.UTC)) + +func GenerateLogs(count int) plog.Logs { + ld := plog.NewLogs() + initResource(ld.ResourceLogs().AppendEmpty().Resource()) + logs := ld.ResourceLogs().At(0).ScopeLogs().AppendEmpty().LogRecords() + logs.EnsureCapacity(count) + for i := 0; i < count; i++ { + switch i % 2 { + case 0: + fillLogOne(logs.AppendEmpty()) + case 1: + fillLogTwo(logs.AppendEmpty()) + } + } + return ld +} + +func fillLogOne(log plog.LogRecord) { + log.SetTimestamp(logTimestamp) + log.SetDroppedAttributesCount(1) + log.SetSeverityNumber(plog.SeverityNumberInfo) + log.SetSeverityText("Info") + log.SetSpanID([8]byte{0x01, 0x02, 0x04, 0x08}) + log.SetTraceID([16]byte{0x08, 0x04, 0x02, 0x01}) + + attrs := log.Attributes() + attrs.PutStr("app", "server") + attrs.PutInt("instance_num", 1) + + log.Body().SetStr("This is a log message") +} + +func fillLogTwo(log plog.LogRecord) { + log.SetTimestamp(logTimestamp) + log.SetDroppedAttributesCount(1) + log.SetSeverityNumber(plog.SeverityNumberInfo) + log.SetSeverityText("Info") + + attrs := log.Attributes() + attrs.PutStr("customer", "acme") + attrs.PutStr("env", "dev") + + log.Body().SetStr("something happened") +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/testdata/metric.go b/vendor/go.opentelemetry.io/collector/pdata/testdata/metric.go new file mode 100644 index 00000000000..4ccb091a398 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/testdata/metric.go @@ -0,0 +1,297 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package testdata + +import ( + "time" + + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" +) + +var ( + metricStartTimestamp = pcommon.NewTimestampFromTime(time.Date(2020, 2, 11, 20, 26, 12, 321, time.UTC)) + metricExemplarTimestamp = pcommon.NewTimestampFromTime(time.Date(2020, 2, 11, 20, 26, 13, 123, time.UTC)) + metricTimestamp = pcommon.NewTimestampFromTime(time.Date(2020, 2, 11, 20, 26, 13, 789, time.UTC)) +) + +const ( + TestGaugeDoubleMetricName = "gauge-double" + TestGaugeIntMetricName = "gauge-int" + TestSumDoubleMetricName = "sum-double" + TestSumIntMetricName = "sum-int" + TestHistogramMetricName = "histogram" + TestExponentialHistogramMetricName = "exponential-histogram" + TestSummaryMetricName = "summary" +) + +func generateMetricsOneEmptyInstrumentationScope() pmetric.Metrics { + md := pmetric.NewMetrics() + initResource(md.ResourceMetrics().AppendEmpty().Resource()) + md.ResourceMetrics().At(0).ScopeMetrics().AppendEmpty() + return md +} + +func GenerateMetricsAllTypesEmpty() pmetric.Metrics { + md := generateMetricsOneEmptyInstrumentationScope() + ms := md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics() + + doubleGauge := ms.AppendEmpty() + initMetric(doubleGauge, TestGaugeDoubleMetricName, pmetric.MetricTypeGauge) + doubleGauge.Gauge().DataPoints().AppendEmpty() + intGauge := ms.AppendEmpty() + initMetric(intGauge, TestGaugeIntMetricName, pmetric.MetricTypeGauge) + intGauge.Gauge().DataPoints().AppendEmpty() + doubleSum := ms.AppendEmpty() + initMetric(doubleSum, TestSumDoubleMetricName, pmetric.MetricTypeSum) + doubleSum.Sum().DataPoints().AppendEmpty() + intSum := ms.AppendEmpty() + initMetric(intSum, TestSumIntMetricName, pmetric.MetricTypeSum) + intSum.Sum().DataPoints().AppendEmpty() + histogram := ms.AppendEmpty() + initMetric(histogram, TestHistogramMetricName, pmetric.MetricTypeHistogram) + histogram.Histogram().DataPoints().AppendEmpty() + summary := ms.AppendEmpty() + initMetric(summary, TestSummaryMetricName, pmetric.MetricTypeSummary) + summary.Summary().DataPoints().AppendEmpty() + return md +} + +func GenerateMetricsMetricTypeInvalid() pmetric.Metrics { + md := generateMetricsOneEmptyInstrumentationScope() + initMetric(md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().AppendEmpty(), TestSumIntMetricName, pmetric.MetricTypeEmpty) + return md +} + +func GenerateMetricsAllTypes() pmetric.Metrics { + md := generateMetricsOneEmptyInstrumentationScope() + ms := md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics() + initGaugeIntMetric(ms.AppendEmpty()) + initGaugeDoubleMetric(ms.AppendEmpty()) + initSumIntMetric(ms.AppendEmpty()) + initSumDoubleMetric(ms.AppendEmpty()) + initHistogramMetric(ms.AppendEmpty()) + initExponentialHistogramMetric(ms.AppendEmpty()) + initSummaryMetric(ms.AppendEmpty()) + return md +} + +func GenerateMetrics(count int) pmetric.Metrics { + md := generateMetricsOneEmptyInstrumentationScope() + ms := md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics() + ms.EnsureCapacity(count) + for i := 0; i < count; i++ { + switch i % 7 { + case 0: + initGaugeIntMetric(ms.AppendEmpty()) + case 1: + initGaugeDoubleMetric(ms.AppendEmpty()) + case 2: + initSumIntMetric(ms.AppendEmpty()) + case 3: + initSumDoubleMetric(ms.AppendEmpty()) + case 4: + initHistogramMetric(ms.AppendEmpty()) + case 5: + initExponentialHistogramMetric(ms.AppendEmpty()) + case 6: + initSummaryMetric(ms.AppendEmpty()) + } + } + return md +} + +func initGaugeIntMetric(im pmetric.Metric) { + initMetric(im, TestGaugeIntMetricName, pmetric.MetricTypeGauge) + + idps := im.Gauge().DataPoints() + idp0 := idps.AppendEmpty() + initMetricAttributes1(idp0.Attributes()) + idp0.SetStartTimestamp(metricStartTimestamp) + idp0.SetTimestamp(metricTimestamp) + idp0.SetIntValue(123) + idp1 := idps.AppendEmpty() + initMetricAttributes2(idp1.Attributes()) + idp1.SetStartTimestamp(metricStartTimestamp) + idp1.SetTimestamp(metricTimestamp) + idp1.SetIntValue(456) +} + +func initGaugeDoubleMetric(im pmetric.Metric) { + initMetric(im, TestGaugeDoubleMetricName, pmetric.MetricTypeGauge) + + idps := im.Gauge().DataPoints() + idp0 := idps.AppendEmpty() + initMetricAttributes12(idp0.Attributes()) + idp0.SetStartTimestamp(metricStartTimestamp) + idp0.SetTimestamp(metricTimestamp) + idp0.SetDoubleValue(1.23) + idp1 := idps.AppendEmpty() + initMetricAttributes13(idp1.Attributes()) + idp1.SetStartTimestamp(metricStartTimestamp) + idp1.SetTimestamp(metricTimestamp) + idp1.SetDoubleValue(4.56) +} + +func initSumIntMetric(im pmetric.Metric) { + initMetric(im, TestSumIntMetricName, pmetric.MetricTypeSum) + + idps := im.Sum().DataPoints() + idp0 := idps.AppendEmpty() + initMetricAttributes1(idp0.Attributes()) + idp0.SetStartTimestamp(metricStartTimestamp) + idp0.SetTimestamp(metricTimestamp) + idp0.SetIntValue(123) + idp1 := idps.AppendEmpty() + initMetricAttributes2(idp1.Attributes()) + idp1.SetStartTimestamp(metricStartTimestamp) + idp1.SetTimestamp(metricTimestamp) + idp1.SetIntValue(456) +} + +func initSumDoubleMetric(dm pmetric.Metric) { + initMetric(dm, TestSumDoubleMetricName, pmetric.MetricTypeSum) + + ddps := dm.Sum().DataPoints() + ddp0 := ddps.AppendEmpty() + initMetricAttributes12(ddp0.Attributes()) + ddp0.SetStartTimestamp(metricStartTimestamp) + ddp0.SetTimestamp(metricTimestamp) + ddp0.SetDoubleValue(1.23) + + ddp1 := ddps.AppendEmpty() + initMetricAttributes13(ddp1.Attributes()) + ddp1.SetStartTimestamp(metricStartTimestamp) + ddp1.SetTimestamp(metricTimestamp) + ddp1.SetDoubleValue(4.56) +} + +func initHistogramMetric(hm pmetric.Metric) { + initMetric(hm, TestHistogramMetricName, pmetric.MetricTypeHistogram) + + hdps := hm.Histogram().DataPoints() + hdp0 := hdps.AppendEmpty() + initMetricAttributes13(hdp0.Attributes()) + hdp0.SetStartTimestamp(metricStartTimestamp) + hdp0.SetTimestamp(metricTimestamp) + hdp0.SetCount(1) + hdp0.SetSum(15) + + hdp1 := hdps.AppendEmpty() + initMetricAttributes2(hdp1.Attributes()) + hdp1.SetStartTimestamp(metricStartTimestamp) + hdp1.SetTimestamp(metricTimestamp) + hdp1.SetCount(1) + hdp1.SetSum(15) + hdp1.SetMin(15) + hdp1.SetMax(15) + hdp1.BucketCounts().FromRaw([]uint64{0, 1}) + exemplar := hdp1.Exemplars().AppendEmpty() + exemplar.SetTraceID([16]byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10}) + exemplar.SetSpanID([8]byte{0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}) + exemplar.SetTimestamp(metricExemplarTimestamp) + exemplar.SetDoubleValue(15) + initMetricExemplarAttributes(exemplar.FilteredAttributes()) + hdp1.ExplicitBounds().FromRaw([]float64{1}) +} + +func initExponentialHistogramMetric(hm pmetric.Metric) { + initMetric(hm, TestExponentialHistogramMetricName, pmetric.MetricTypeExponentialHistogram) + + hdps := hm.ExponentialHistogram().DataPoints() + hdp0 := hdps.AppendEmpty() + initMetricAttributes13(hdp0.Attributes()) + hdp0.SetStartTimestamp(metricStartTimestamp) + hdp0.SetTimestamp(metricTimestamp) + hdp0.SetCount(5) + hdp0.SetSum(0.15) + hdp0.SetZeroCount(1) + hdp0.SetScale(1) + + // positive index 1 and 2 are values sqrt(2), 2 at scale 1 + hdp0.Positive().SetOffset(1) + hdp0.Positive().BucketCounts().FromRaw([]uint64{1, 1}) + // negative index -1 and 0 are values -1/sqrt(2), -1 at scale 1 + hdp0.Negative().SetOffset(-1) + hdp0.Negative().BucketCounts().FromRaw([]uint64{1, 1}) + + // The above will print: + // Bucket (-1.414214, -1.000000], Count: 1 + // Bucket (-1.000000, -0.707107], Count: 1 + // Bucket [0, 0], Count: 1 + // Bucket [0.707107, 1.000000), Count: 1 + // Bucket [1.000000, 1.414214), Count: 1 + + hdp1 := hdps.AppendEmpty() + initMetricAttributes2(hdp1.Attributes()) + hdp1.SetStartTimestamp(metricStartTimestamp) + hdp1.SetTimestamp(metricTimestamp) + hdp1.SetCount(3) + hdp1.SetSum(1.25) + hdp1.SetMin(0) + hdp1.SetMax(1) + hdp1.SetZeroCount(1) + hdp1.SetScale(-1) + + // index -1 and 0 are values 0.25, 1 at scale -1 + hdp1.Positive().SetOffset(-1) + hdp1.Positive().BucketCounts().FromRaw([]uint64{1, 1}) + + // The above will print: + // Bucket [0, 0], Count: 1 + // Bucket [0.250000, 1.000000), Count: 1 + // Bucket [1.000000, 4.000000), Count: 1 + + exemplar := hdp1.Exemplars().AppendEmpty() + exemplar.SetTraceID([16]byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10}) + exemplar.SetSpanID([8]byte{0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}) + exemplar.SetTimestamp(metricExemplarTimestamp) + exemplar.SetIntValue(15) + initMetricExemplarAttributes(exemplar.FilteredAttributes()) +} + +func initSummaryMetric(sm pmetric.Metric) { + initMetric(sm, TestSummaryMetricName, pmetric.MetricTypeSummary) + + sdps := sm.Summary().DataPoints() + sdp0 := sdps.AppendEmpty() + initMetricAttributes13(sdp0.Attributes()) + sdp0.SetStartTimestamp(metricStartTimestamp) + sdp0.SetTimestamp(metricTimestamp) + sdp0.SetCount(1) + sdp0.SetSum(15) + + sdp1 := sdps.AppendEmpty() + initMetricAttributes2(sdp1.Attributes()) + sdp1.SetStartTimestamp(metricStartTimestamp) + sdp1.SetTimestamp(metricTimestamp) + sdp1.SetCount(1) + sdp1.SetSum(15) + + quantile := sdp1.QuantileValues().AppendEmpty() + quantile.SetQuantile(0.01) + quantile.SetValue(15) +} + +func initMetric(m pmetric.Metric, name string, ty pmetric.MetricType) { + m.SetName(name) + m.SetDescription("") + m.SetUnit("1") + switch ty { + case pmetric.MetricTypeGauge: + m.SetEmptyGauge() + case pmetric.MetricTypeSum: + sum := m.SetEmptySum() + sum.SetIsMonotonic(true) + sum.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + case pmetric.MetricTypeHistogram: + histo := m.SetEmptyHistogram() + histo.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + case pmetric.MetricTypeExponentialHistogram: + histo := m.SetEmptyExponentialHistogram() + histo.SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + case pmetric.MetricTypeSummary: + m.SetEmptySummary() + } +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/testdata/profile.go b/vendor/go.opentelemetry.io/collector/pdata/testdata/profile.go new file mode 100644 index 00000000000..e67c924f03d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/testdata/profile.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package testdata // import "go.opentelemetry.io/collector/pdata/testdata" + +import ( + "time" + + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pprofile" +) + +var ( + profileStartTimestamp = pcommon.NewTimestampFromTime(time.Date(2020, 2, 11, 20, 26, 12, 321, time.UTC)) + profileEndTimestamp = pcommon.NewTimestampFromTime(time.Date(2020, 2, 11, 20, 26, 13, 789, time.UTC)) +) + +// GenerateProfiles generates dummy profiling data for tests +func GenerateProfiles(profilesCount int) pprofile.Profiles { + td := pprofile.NewProfiles() + initResource(td.ResourceProfiles().AppendEmpty().Resource()) + ss := td.ResourceProfiles().At(0).ScopeProfiles().AppendEmpty().Profiles() + ss.EnsureCapacity(profilesCount) + for i := 0; i < profilesCount; i++ { + switch i % 2 { + case 0: + fillProfileOne(ss.AppendEmpty()) + case 1: + fillProfileTwo(ss.AppendEmpty()) + } + } + return td +} + +func fillProfileOne(profile pprofile.Profile) { + profile.SetProfileID([16]byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10}) + profile.SetTime(profileStartTimestamp) + profile.SetDuration(profileEndTimestamp) + profile.SetDroppedAttributesCount(1) + + attr := profile.AttributeTable().AppendEmpty() + attr.SetKey("key") + attr.Value().SetStr("value") + + sample := profile.Sample().AppendEmpty() + sample.SetLocationsStartIndex(2) + sample.SetLocationsLength(10) + sample.Value().Append(4) + sample.AttributeIndices().Append(0) +} + +func fillProfileTwo(profile pprofile.Profile) { + profile.SetProfileID([16]byte{0x02, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10}) + profile.SetTime(profileStartTimestamp) + profile.SetDuration(profileEndTimestamp) + + attr := profile.AttributeTable().AppendEmpty() + attr.SetKey("key") + attr.Value().SetStr("value") + + sample := profile.Sample().AppendEmpty() + sample.SetLocationsStartIndex(7) + sample.SetLocationsLength(20) + sample.Value().Append(9) + sample.AttributeIndices().Append(0) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/testdata/resource.go b/vendor/go.opentelemetry.io/collector/pdata/testdata/resource.go new file mode 100644 index 00000000000..cac7a404602 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/testdata/resource.go @@ -0,0 +1,10 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package testdata + +import "go.opentelemetry.io/collector/pdata/pcommon" + +func initResource(r pcommon.Resource) { + r.Attributes().PutStr("resource-attr", "resource-attr-val-1") +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/testdata/trace.go b/vendor/go.opentelemetry.io/collector/pdata/testdata/trace.go new file mode 100644 index 00000000000..779430af666 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/testdata/trace.go @@ -0,0 +1,69 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package testdata + +import ( + "time" + + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" +) + +var ( + spanStartTimestamp = pcommon.NewTimestampFromTime(time.Date(2020, 2, 11, 20, 26, 12, 321, time.UTC)) + spanEventTimestamp = pcommon.NewTimestampFromTime(time.Date(2020, 2, 11, 20, 26, 13, 123, time.UTC)) + spanEndTimestamp = pcommon.NewTimestampFromTime(time.Date(2020, 2, 11, 20, 26, 13, 789, time.UTC)) +) + +func GenerateTraces(spanCount int) ptrace.Traces { + td := ptrace.NewTraces() + initResource(td.ResourceSpans().AppendEmpty().Resource()) + ss := td.ResourceSpans().At(0).ScopeSpans().AppendEmpty().Spans() + ss.EnsureCapacity(spanCount) + for i := 0; i < spanCount; i++ { + switch i % 2 { + case 0: + fillSpanOne(ss.AppendEmpty()) + case 1: + fillSpanTwo(ss.AppendEmpty()) + } + } + return td +} + +func fillSpanOne(span ptrace.Span) { + span.SetName("operationA") + span.SetStartTimestamp(spanStartTimestamp) + span.SetEndTimestamp(spanEndTimestamp) + span.SetDroppedAttributesCount(1) + span.TraceState().FromRaw("ot=th:0") // 100% sampling + span.SetTraceID([16]byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10}) + span.SetSpanID([8]byte{0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}) + evs := span.Events() + ev0 := evs.AppendEmpty() + ev0.SetTimestamp(spanEventTimestamp) + ev0.SetName("event-with-attr") + ev0.Attributes().PutStr("span-event-attr", "span-event-attr-val") + ev0.SetDroppedAttributesCount(2) + ev1 := evs.AppendEmpty() + ev1.SetTimestamp(spanEventTimestamp) + ev1.SetName("event") + ev1.SetDroppedAttributesCount(2) + span.SetDroppedEventsCount(1) + status := span.Status() + status.SetCode(ptrace.StatusCodeError) + status.SetMessage("status-cancelled") +} + +func fillSpanTwo(span ptrace.Span) { + span.SetName("operationB") + span.SetStartTimestamp(spanStartTimestamp) + span.SetEndTimestamp(spanEndTimestamp) + link0 := span.Links().AppendEmpty() + link0.Attributes().PutStr("span-link-attr", "span-link-attr-val") + link0.SetDroppedAttributesCount(4) + link1 := span.Links().AppendEmpty() + link1.SetDroppedAttributesCount(4) + span.SetDroppedLinksCount(3) +} diff --git a/vendor/go.opentelemetry.io/collector/pipeline/LICENSE b/vendor/go.opentelemetry.io/collector/pipeline/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pipeline/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/pipeline/Makefile b/vendor/go.opentelemetry.io/collector/pipeline/Makefile new file mode 100644 index 00000000000..39734bfaebb --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pipeline/Makefile @@ -0,0 +1 @@ +include ../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/pipeline/internal/globalsignal/signal.go b/vendor/go.opentelemetry.io/collector/pipeline/internal/globalsignal/signal.go new file mode 100644 index 00000000000..dd7c6ec623c --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pipeline/internal/globalsignal/signal.go @@ -0,0 +1,51 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package globalsignal // import "go.opentelemetry.io/collector/pipeline/internal/globalsignal" + +import ( + "errors" + "fmt" + "regexp" +) + +// Signal represents the signals supported by the collector. +type Signal struct { + name string +} + +// String returns the string representation of the signal. +func (s Signal) String() string { + return s.name +} + +// MarshalText marshals the Signal. +func (s Signal) MarshalText() (text []byte, err error) { + return []byte(s.name), nil +} + +// signalRegex is used to validate the signal. +// A signal must consist of 1 to 62 lowercase ASCII alphabetic characters. +var signalRegex = regexp.MustCompile(`^[a-z]{1,62}$`) + +// NewSignal creates a Signal. It returns an error if the Signal is invalid. +// A Signal must consist of 1 to 62 lowercase ASCII alphabetic characters. +func NewSignal(signal string) (Signal, error) { + if len(signal) == 0 { + return Signal{}, errors.New("signal must not be empty") + } + if !signalRegex.MatchString(signal) { + return Signal{}, fmt.Errorf("invalid character(s) in type %q", signal) + } + return Signal{name: signal}, nil +} + +// MustNewSignal creates a Signal. It panics if the Signal is invalid. +// A signal must consist of 1 to 62 lowercase ASCII alphabetic characters. +func MustNewSignal(signal string) Signal { + s, err := NewSignal(signal) + if err != nil { + panic(err) + } + return s +} diff --git a/vendor/go.opentelemetry.io/collector/pipeline/pipeline.go b/vendor/go.opentelemetry.io/collector/pipeline/pipeline.go new file mode 100644 index 00000000000..aa2d3d0d0ad --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pipeline/pipeline.go @@ -0,0 +1,131 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pipeline // import "go.opentelemetry.io/collector/pipeline" +import ( + "errors" + "fmt" + "regexp" + "strings" + + "go.opentelemetry.io/collector/pipeline/internal/globalsignal" +) + +// typeAndNameSeparator is the separator that is used between type and name in type/name composite keys. +const typeAndNameSeparator = "/" + +// ID represents the identity for a pipeline. It combines two values: +// * signal - the Signal of the pipeline. +// * name - the name of that pipeline. +type ID struct { + signal Signal `mapstructure:"-"` + name string `mapstructure:"-"` +} + +// NewID returns a new ID with the given Signal and empty name. +func NewID(signal Signal) ID { + return ID{signal: signal} +} + +// MustNewID builds a Signal and returns a new ID with the given Signal and empty name. +// It panics if the Signal is invalid. +// A signal must consist of 1 to 62 lowercase ASCII alphabetic characters. +func MustNewID(signal string) ID { + return ID{signal: globalsignal.MustNewSignal(signal)} +} + +// NewIDWithName returns a new ID with the given Signal and name. +func NewIDWithName(signal Signal, name string) ID { + return ID{signal: signal, name: name} +} + +// MustNewIDWithName builds a Signal and returns a new ID with the given Signal and name. +// It panics if the Signal is invalid or name is invalid. +// A signal must consist of 1 to 62 lowercase ASCII alphabetic characters. +// A name must consist of 1 to 1024 unicode characters excluding whitespace, control characters, and symbols. +func MustNewIDWithName(signal string, name string) ID { + id := ID{signal: globalsignal.MustNewSignal(signal)} + err := validateName(name) + if err != nil { + panic(err) + } + id.name = name + return id +} + +// Signal returns the Signal of the ID. +func (i ID) Signal() Signal { + return i.signal +} + +// Name returns the name of the ID. +func (i ID) Name() string { + return i.name +} + +// MarshalText implements the encoding.TextMarshaler interface. +// This marshals the Signal and name as one string in the config. +func (i ID) MarshalText() (text []byte, err error) { + return []byte(i.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +func (i *ID) UnmarshalText(text []byte) error { + idStr := string(text) + items := strings.SplitN(idStr, typeAndNameSeparator, 2) + var signalStr, nameStr string + if len(items) >= 1 { + signalStr = strings.TrimSpace(items[0]) + } + + if len(items) == 1 && signalStr == "" { + return errors.New("id must not be empty") + } + + if signalStr == "" { + return fmt.Errorf("in %q id: the part before %s should not be empty", idStr, typeAndNameSeparator) + } + + if len(items) > 1 { + // "name" part is present. + nameStr = strings.TrimSpace(items[1]) + if nameStr == "" { + return fmt.Errorf("in %q id: the part after %s should not be empty", idStr, typeAndNameSeparator) + } + if err := validateName(nameStr); err != nil { + return fmt.Errorf("in %q id: %w", nameStr, err) + } + } + + var err error + if i.signal, err = globalsignal.NewSignal(signalStr); err != nil { + return fmt.Errorf("in %q id: %w", idStr, err) + } + i.name = nameStr + + return nil +} + +// String returns the ID string representation as "signal[/name]" format. +func (i ID) String() string { + if i.name == "" { + return i.signal.String() + } + + return i.signal.String() + typeAndNameSeparator + i.name +} + +// nameRegexp is used to validate the name of an ID. A name can consist of +// 1 to 1024 unicode characters excluding whitespace, control characters, and +// symbols. +var nameRegexp = regexp.MustCompile(`^[^\pZ\pC\pS]+$`) + +func validateName(nameStr string) error { + if len(nameStr) > 1024 { + return fmt.Errorf("name %q is longer than 1024 characters (%d characters)", nameStr, len(nameStr)) + } + if !nameRegexp.MatchString(nameStr) { + return fmt.Errorf("invalid character(s) in name %q", nameStr) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/collector/pipeline/signal.go b/vendor/go.opentelemetry.io/collector/pipeline/signal.go new file mode 100644 index 00000000000..77376c999ad --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pipeline/signal.go @@ -0,0 +1,22 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pipeline // import "go.opentelemetry.io/collector/pipeline" + +import ( + "errors" + + "go.opentelemetry.io/collector/pipeline/internal/globalsignal" +) + +// Signal represents the signals supported by the collector. We currently support +// collecting metrics, traces and logs, this can expand in the future. +type Signal = globalsignal.Signal + +var ErrSignalNotSupported = errors.New("telemetry type is not supported") + +var ( + SignalTraces = globalsignal.MustNewSignal("traces") + SignalMetrics = globalsignal.MustNewSignal("metrics") + SignalLogs = globalsignal.MustNewSignal("logs") +) diff --git a/vendor/go.opentelemetry.io/collector/pipeline/xpipeline/LICENSE b/vendor/go.opentelemetry.io/collector/pipeline/xpipeline/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pipeline/xpipeline/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/pipeline/xpipeline/Makefile b/vendor/go.opentelemetry.io/collector/pipeline/xpipeline/Makefile new file mode 100644 index 00000000000..ded7a36092d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pipeline/xpipeline/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/pipeline/xpipeline/config.go b/vendor/go.opentelemetry.io/collector/pipeline/xpipeline/config.go new file mode 100644 index 00000000000..f37a7ea479d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pipeline/xpipeline/config.go @@ -0,0 +1,8 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package xpipeline // import "go.opentelemetry.io/collector/pipeline/xpipeline" + +import "go.opentelemetry.io/collector/pipeline/internal/globalsignal" + +var SignalProfiles = globalsignal.MustNewSignal("profiles") diff --git a/vendor/go.opentelemetry.io/collector/processor/internal/obsmetrics.go b/vendor/go.opentelemetry.io/collector/processor/internal/obsmetrics.go new file mode 100644 index 00000000000..c96fbe5e9e0 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/processor/internal/obsmetrics.go @@ -0,0 +1,13 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/processor/internal" + +const ( + MetricNameSep = "_" + + // ProcessorKey is the key used to identify processors in metrics and traces. + ProcessorKey = "processor" + + ProcessorMetricPrefix = ProcessorKey + MetricNameSep +) diff --git a/vendor/go.opentelemetry.io/collector/processor/processor.go b/vendor/go.opentelemetry.io/collector/processor/processor.go index 98feba69a57..8ecd4d497c6 100644 --- a/vendor/go.opentelemetry.io/collector/processor/processor.go +++ b/vendor/go.opentelemetry.io/collector/processor/processor.go @@ -5,17 +5,11 @@ package processor // import "go.opentelemetry.io/collector/processor" import ( "context" - "errors" "fmt" - "go.uber.org/zap" - "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" -) - -var ( - errNilNextConsumer = errors.New("nil next Consumer") + "go.opentelemetry.io/collector/pipeline" ) // Traces is a processor that can consume traces. @@ -36,8 +30,8 @@ type Logs interface { consumer.Logs } -// CreateSettings is passed to Create* functions in Factory. -type CreateSettings struct { +// Settings is passed to Create* functions in Factory. +type Settings struct { // ID returns the ID of the component that will be created. ID component.ID @@ -50,41 +44,44 @@ type CreateSettings struct { // Factory is Factory interface for processors. // // This interface cannot be directly implemented. Implementations must -// use the NewProcessorFactory to implement it. +// use the NewFactory to implement it. type Factory interface { component.Factory - // CreateTracesProcessor creates a TracesProcessor based on this config. - // If the processor type does not support tracing or if the config is not valid, - // an error will be returned instead. - CreateTracesProcessor(ctx context.Context, set CreateSettings, cfg component.Config, nextConsumer consumer.Traces) (Traces, error) + // CreateTraces creates a Traces processor based on this config. + // If the processor type does not support traces, + // this function returns the error [pipeline.ErrSignalNotSupported]. + // Implementers can assume `next` is never nil. + CreateTraces(ctx context.Context, set Settings, cfg component.Config, next consumer.Traces) (Traces, error) - // TracesProcessorStability gets the stability level of the TracesProcessor. - TracesProcessorStability() component.StabilityLevel + // TracesStability gets the stability level of the Traces processor. + TracesStability() component.StabilityLevel - // CreateMetricsProcessor creates a MetricsProcessor based on this config. - // If the processor type does not support metrics or if the config is not valid, - // an error will be returned instead. - CreateMetricsProcessor(ctx context.Context, set CreateSettings, cfg component.Config, nextConsumer consumer.Metrics) (Metrics, error) + // CreateMetrics creates a Metrics processor based on this config. + // If the processor type does not support metrics, + // this function returns the error [pipeline.ErrSignalNotSupported]. + // Implementers can assume `next` is never nil. + CreateMetrics(ctx context.Context, set Settings, cfg component.Config, next consumer.Metrics) (Metrics, error) - // MetricsProcessorStability gets the stability level of the MetricsProcessor. - MetricsProcessorStability() component.StabilityLevel + // MetricsStability gets the stability level of the Metrics processor. + MetricsStability() component.StabilityLevel - // CreateLogsProcessor creates a LogsProcessor based on the config. - // If the processor type does not support logs or if the config is not valid, - // an error will be returned instead. - CreateLogsProcessor(ctx context.Context, set CreateSettings, cfg component.Config, nextConsumer consumer.Logs) (Logs, error) + // CreateLogs creates a Logs processor based on the config. + // If the processor type does not support logs, + // this function returns the error [pipeline.ErrSignalNotSupported]. + // Implementers can assume `next` is never nil. + CreateLogs(ctx context.Context, set Settings, cfg component.Config, next consumer.Logs) (Logs, error) - // LogsProcessorStability gets the stability level of the LogsProcessor. - LogsProcessorStability() component.StabilityLevel + // LogsStability gets the stability level of the Logs processor. + LogsStability() component.StabilityLevel unexportedFactoryFunc() } // FactoryOption apply changes to Options. type FactoryOption interface { - // applyProcessorFactoryOption applies the option. - applyProcessorFactoryOption(o *factory) + // applyOption applies the option. + applyOption(o *factory) } var _ FactoryOption = (*factoryOptionFunc)(nil) @@ -92,57 +89,10 @@ var _ FactoryOption = (*factoryOptionFunc)(nil) // factoryOptionFunc is a FactoryOption created through a function. type factoryOptionFunc func(*factory) -func (f factoryOptionFunc) applyProcessorFactoryOption(o *factory) { +func (f factoryOptionFunc) applyOption(o *factory) { f(o) } -// CreateTracesFunc is the equivalent of Factory.CreateTraces(). -type CreateTracesFunc func(context.Context, CreateSettings, component.Config, consumer.Traces) (Traces, error) - -// CreateTracesProcessor implements Factory.CreateTracesProcessor(). -func (f CreateTracesFunc) CreateTracesProcessor( - ctx context.Context, - set CreateSettings, - cfg component.Config, - nextConsumer consumer.Traces) (Traces, error) { - if f == nil { - return nil, component.ErrDataTypeIsNotSupported - } - return f(ctx, set, cfg, nextConsumer) -} - -// CreateMetricsFunc is the equivalent of Factory.CreateMetrics(). -type CreateMetricsFunc func(context.Context, CreateSettings, component.Config, consumer.Metrics) (Metrics, error) - -// CreateMetricsProcessor implements Factory.CreateMetricsProcessor(). -func (f CreateMetricsFunc) CreateMetricsProcessor( - ctx context.Context, - set CreateSettings, - cfg component.Config, - nextConsumer consumer.Metrics, -) (Metrics, error) { - if f == nil { - return nil, component.ErrDataTypeIsNotSupported - } - return f(ctx, set, cfg, nextConsumer) -} - -// CreateLogsFunc is the equivalent of Factory.CreateLogs(). -type CreateLogsFunc func(context.Context, CreateSettings, component.Config, consumer.Logs) (Logs, error) - -// CreateLogsProcessor implements Factory.CreateLogsProcessor(). -func (f CreateLogsFunc) CreateLogsProcessor( - ctx context.Context, - set CreateSettings, - cfg component.Config, - nextConsumer consumer.Logs, -) (Logs, error) { - if f == nil { - return nil, component.ErrDataTypeIsNotSupported - } - return f(ctx, set, cfg, nextConsumer) -} - type factory struct { cfgType component.Type component.CreateDefaultConfigFunc @@ -160,18 +110,51 @@ func (f *factory) Type() component.Type { func (f *factory) unexportedFactoryFunc() {} -func (f factory) TracesProcessorStability() component.StabilityLevel { +func (f *factory) TracesStability() component.StabilityLevel { return f.tracesStabilityLevel } -func (f factory) MetricsProcessorStability() component.StabilityLevel { +func (f *factory) MetricsStability() component.StabilityLevel { return f.metricsStabilityLevel } -func (f factory) LogsProcessorStability() component.StabilityLevel { +func (f *factory) LogsStability() component.StabilityLevel { return f.logsStabilityLevel } +// CreateTracesFunc is the equivalent of Factory.CreateTraces(). +type CreateTracesFunc func(context.Context, Settings, component.Config, consumer.Traces) (Traces, error) + +// CreateTraces implements Factory.CreateTraces. +func (f CreateTracesFunc) CreateTraces(ctx context.Context, set Settings, cfg component.Config, next consumer.Traces) (Traces, error) { + if f == nil { + return nil, pipeline.ErrSignalNotSupported + } + return f(ctx, set, cfg, next) +} + +// CreateMetricsFunc is the equivalent of Factory.CreateMetrics(). +type CreateMetricsFunc func(context.Context, Settings, component.Config, consumer.Metrics) (Metrics, error) + +// CreateMetrics implements Factory.CreateMetrics. +func (f CreateMetricsFunc) CreateMetrics(ctx context.Context, set Settings, cfg component.Config, next consumer.Metrics) (Metrics, error) { + if f == nil { + return nil, pipeline.ErrSignalNotSupported + } + return f(ctx, set, cfg, next) +} + +// CreateLogsFunc is the equivalent of Factory.CreateLogs. +type CreateLogsFunc func(context.Context, Settings, component.Config, consumer.Logs) (Logs, error) + +// CreateLogs implements Factory.CreateLogs(). +func (f CreateLogsFunc) CreateLogs(ctx context.Context, set Settings, cfg component.Config, next consumer.Logs) (Logs, error) { + if f == nil { + return nil, pipeline.ErrSignalNotSupported + } + return f(ctx, set, cfg, next) +} + // WithTraces overrides the default "error not supported" implementation for CreateTraces and the default "undefined" stability level. func WithTraces(createTraces CreateTracesFunc, sl component.StabilityLevel) FactoryOption { return factoryOptionFunc(func(o *factory) { @@ -203,7 +186,7 @@ func NewFactory(cfgType component.Type, createDefaultConfig component.CreateDefa CreateDefaultConfigFunc: createDefaultConfig, } for _, opt := range options { - opt.applyProcessorFactoryOption(f) + opt.applyOption(f) } return f } @@ -220,86 +203,3 @@ func MakeFactoryMap(factories ...Factory) (map[component.Type]Factory, error) { } return fMap, nil } - -// Builder processor is a helper struct that given a set of Configs and Factories helps with creating processors. -type Builder struct { - cfgs map[component.ID]component.Config - factories map[component.Type]Factory -} - -// NewBuilder creates a new processor.Builder to help with creating components form a set of configs and factories. -func NewBuilder(cfgs map[component.ID]component.Config, factories map[component.Type]Factory) *Builder { - return &Builder{cfgs: cfgs, factories: factories} -} - -// CreateTraces creates a Traces processor based on the settings and config. -func (b *Builder) CreateTraces(ctx context.Context, set CreateSettings, next consumer.Traces) (Traces, error) { - if next == nil { - return nil, errNilNextConsumer - } - cfg, existsCfg := b.cfgs[set.ID] - if !existsCfg { - return nil, fmt.Errorf("processor %q is not configured", set.ID) - } - - f, existsFactory := b.factories[set.ID.Type()] - if !existsFactory { - return nil, fmt.Errorf("processor factory not available for: %q", set.ID) - } - - logStabilityLevel(set.Logger, f.TracesProcessorStability()) - return f.CreateTracesProcessor(ctx, set, cfg, next) -} - -// CreateMetrics creates a Metrics processor based on the settings and config. -func (b *Builder) CreateMetrics(ctx context.Context, set CreateSettings, next consumer.Metrics) (Metrics, error) { - if next == nil { - return nil, errNilNextConsumer - } - cfg, existsCfg := b.cfgs[set.ID] - if !existsCfg { - return nil, fmt.Errorf("processor %q is not configured", set.ID) - } - - f, existsFactory := b.factories[set.ID.Type()] - if !existsFactory { - return nil, fmt.Errorf("processor factory not available for: %q", set.ID) - } - - logStabilityLevel(set.Logger, f.MetricsProcessorStability()) - return f.CreateMetricsProcessor(ctx, set, cfg, next) -} - -// CreateLogs creates a Logs processor based on the settings and config. -func (b *Builder) CreateLogs(ctx context.Context, set CreateSettings, next consumer.Logs) (Logs, error) { - if next == nil { - return nil, errNilNextConsumer - } - cfg, existsCfg := b.cfgs[set.ID] - if !existsCfg { - return nil, fmt.Errorf("processor %q is not configured", set.ID) - } - - f, existsFactory := b.factories[set.ID.Type()] - if !existsFactory { - return nil, fmt.Errorf("processor factory not available for: %q", set.ID) - } - - logStabilityLevel(set.Logger, f.LogsProcessorStability()) - return f.CreateLogsProcessor(ctx, set, cfg, next) -} - -func (b *Builder) Factory(componentType component.Type) component.Factory { - return b.factories[componentType] -} - -// logStabilityLevel logs the stability level of a component. The log level is set to info for -// undefined, unmaintained, deprecated and development. The log level is set to debug -// for alpha, beta and stable. -func logStabilityLevel(logger *zap.Logger, sl component.StabilityLevel) { - if sl >= component.StabilityLevelAlpha { - logger.Debug(sl.LogMessage()) - } else { - logger.Info(sl.LogMessage()) - } -} diff --git a/vendor/go.opentelemetry.io/collector/processor/processorhelper/documentation.md b/vendor/go.opentelemetry.io/collector/processor/processorhelper/documentation.md index 073fc9a7219..1c1b10f9f46 100644 --- a/vendor/go.opentelemetry.io/collector/processor/processorhelper/documentation.md +++ b/vendor/go.opentelemetry.io/collector/processor/processorhelper/documentation.md @@ -6,74 +6,18 @@ The following telemetry is emitted by this component. -### processor_accepted_log_records +### otelcol_processor_incoming_items -Number of log records successfully pushed into the next component in the pipeline. +Number of items passed to the processor. [alpha] | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {items} | Sum | Int | true | -### processor_accepted_metric_points +### otelcol_processor_outgoing_items -Number of metric points successfully pushed into the next component in the pipeline. +Number of items emitted from the processor. [alpha] | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | - -### processor_accepted_spans - -Number of spans successfully pushed into the next component in the pipeline. - -| Unit | Metric Type | Value Type | Monotonic | -| ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | - -### processor_dropped_log_records - -Number of log records that were dropped. - -| Unit | Metric Type | Value Type | Monotonic | -| ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | - -### processor_dropped_metric_points - -Number of metric points that were dropped. - -| Unit | Metric Type | Value Type | Monotonic | -| ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | - -### processor_dropped_spans - -Number of spans that were dropped. - -| Unit | Metric Type | Value Type | Monotonic | -| ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | - -### processor_refused_log_records - -Number of log records that were rejected by the next component in the pipeline. - -| Unit | Metric Type | Value Type | Monotonic | -| ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | - -### processor_refused_metric_points - -Number of metric points that were rejected by the next component in the pipeline. - -| Unit | Metric Type | Value Type | Monotonic | -| ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | - -### processor_refused_spans - -Number of spans that were rejected by the next component in the pipeline. - -| Unit | Metric Type | Value Type | Monotonic | -| ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {items} | Sum | Int | true | diff --git a/vendor/go.opentelemetry.io/collector/processor/processorhelper/internal/metadata/generated_telemetry.go b/vendor/go.opentelemetry.io/collector/processor/processorhelper/internal/metadata/generated_telemetry.go index 568b689edd5..06ad630679b 100644 --- a/vendor/go.opentelemetry.io/collector/processor/processorhelper/internal/metadata/generated_telemetry.go +++ b/vendor/go.opentelemetry.io/collector/processor/processorhelper/internal/metadata/generated_telemetry.go @@ -6,7 +6,7 @@ import ( "errors" "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/metric/noop" + noopmetric "go.opentelemetry.io/otel/metric/noop" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/collector/component" @@ -24,97 +24,49 @@ func Tracer(settings component.TelemetrySettings) trace.Tracer { // TelemetryBuilder provides an interface for components to report telemetry // as defined in metadata and user config. type TelemetryBuilder struct { - ProcessorAcceptedLogRecords metric.Int64Counter - ProcessorAcceptedMetricPoints metric.Int64Counter - ProcessorAcceptedSpans metric.Int64Counter - ProcessorDroppedLogRecords metric.Int64Counter - ProcessorDroppedMetricPoints metric.Int64Counter - ProcessorDroppedSpans metric.Int64Counter - ProcessorRefusedLogRecords metric.Int64Counter - ProcessorRefusedMetricPoints metric.Int64Counter - ProcessorRefusedSpans metric.Int64Counter - level configtelemetry.Level + meter metric.Meter + ProcessorIncomingItems metric.Int64Counter + ProcessorOutgoingItems metric.Int64Counter } -// telemetryBuilderOption applies changes to default builder. -type telemetryBuilderOption func(*TelemetryBuilder) +// TelemetryBuilderOption applies changes to default builder. +type TelemetryBuilderOption interface { + apply(*TelemetryBuilder) +} -// WithLevel sets the current telemetry level for the component. -func WithLevel(lvl configtelemetry.Level) telemetryBuilderOption { - return func(builder *TelemetryBuilder) { - builder.level = lvl - } +type telemetryBuilderOptionFunc func(mb *TelemetryBuilder) + +func (tbof telemetryBuilderOptionFunc) apply(mb *TelemetryBuilder) { + tbof(mb) } // NewTelemetryBuilder provides a struct with methods to update all internal telemetry // for a component -func NewTelemetryBuilder(settings component.TelemetrySettings, options ...telemetryBuilderOption) (*TelemetryBuilder, error) { - builder := TelemetryBuilder{level: configtelemetry.LevelBasic} +func NewTelemetryBuilder(settings component.TelemetrySettings, options ...TelemetryBuilderOption) (*TelemetryBuilder, error) { + builder := TelemetryBuilder{} for _, op := range options { - op(&builder) + op.apply(&builder) } - var ( - err, errs error - meter metric.Meter - ) - if builder.level >= configtelemetry.LevelBasic { - meter = Meter(settings) - } else { - meter = noop.Meter{} - } - builder.ProcessorAcceptedLogRecords, err = meter.Int64Counter( - "processor_accepted_log_records", - metric.WithDescription("Number of log records successfully pushed into the next component in the pipeline."), - metric.WithUnit("1"), - ) - errs = errors.Join(errs, err) - builder.ProcessorAcceptedMetricPoints, err = meter.Int64Counter( - "processor_accepted_metric_points", - metric.WithDescription("Number of metric points successfully pushed into the next component in the pipeline."), - metric.WithUnit("1"), - ) - errs = errors.Join(errs, err) - builder.ProcessorAcceptedSpans, err = meter.Int64Counter( - "processor_accepted_spans", - metric.WithDescription("Number of spans successfully pushed into the next component in the pipeline."), - metric.WithUnit("1"), - ) - errs = errors.Join(errs, err) - builder.ProcessorDroppedLogRecords, err = meter.Int64Counter( - "processor_dropped_log_records", - metric.WithDescription("Number of log records that were dropped."), - metric.WithUnit("1"), + builder.meter = Meter(settings) + var err, errs error + builder.ProcessorIncomingItems, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_processor_incoming_items", + metric.WithDescription("Number of items passed to the processor. [alpha]"), + metric.WithUnit("{items}"), ) errs = errors.Join(errs, err) - builder.ProcessorDroppedMetricPoints, err = meter.Int64Counter( - "processor_dropped_metric_points", - metric.WithDescription("Number of metric points that were dropped."), - metric.WithUnit("1"), - ) - errs = errors.Join(errs, err) - builder.ProcessorDroppedSpans, err = meter.Int64Counter( - "processor_dropped_spans", - metric.WithDescription("Number of spans that were dropped."), - metric.WithUnit("1"), - ) - errs = errors.Join(errs, err) - builder.ProcessorRefusedLogRecords, err = meter.Int64Counter( - "processor_refused_log_records", - metric.WithDescription("Number of log records that were rejected by the next component in the pipeline."), - metric.WithUnit("1"), - ) - errs = errors.Join(errs, err) - builder.ProcessorRefusedMetricPoints, err = meter.Int64Counter( - "processor_refused_metric_points", - metric.WithDescription("Number of metric points that were rejected by the next component in the pipeline."), - metric.WithUnit("1"), - ) - errs = errors.Join(errs, err) - builder.ProcessorRefusedSpans, err = meter.Int64Counter( - "processor_refused_spans", - metric.WithDescription("Number of spans that were rejected by the next component in the pipeline."), - metric.WithUnit("1"), + builder.ProcessorOutgoingItems, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_processor_outgoing_items", + metric.WithDescription("Number of items emitted from the processor. [alpha]"), + metric.WithUnit("{items}"), ) errs = errors.Join(errs, err) return &builder, errs } + +func getLeveledMeter(meter metric.Meter, cfgLevel, srvLevel configtelemetry.Level) metric.Meter { + if cfgLevel <= srvLevel { + return meter + } + return noopmetric.Meter{} +} diff --git a/vendor/go.opentelemetry.io/collector/processor/processorhelper/logs.go b/vendor/go.opentelemetry.io/collector/processor/processorhelper/logs.go index ade2f45a385..23c702cd2a3 100644 --- a/vendor/go.opentelemetry.io/collector/processor/processorhelper/logs.go +++ b/vendor/go.opentelemetry.io/collector/processor/processorhelper/logs.go @@ -12,6 +12,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pipeline" "go.opentelemetry.io/collector/processor" ) @@ -19,47 +20,56 @@ import ( // If error is returned then returned data are ignored. It MUST not call the next component. type ProcessLogsFunc func(context.Context, plog.Logs) (plog.Logs, error) -type logProcessor struct { +type logs struct { component.StartFunc component.ShutdownFunc consumer.Logs } -// NewLogsProcessor creates a processor.Logs that ensure context propagation and the right tags are set. -func NewLogsProcessor( +// NewLogs creates a processor.Logs that ensure context propagation and the right tags are set. +func NewLogs( _ context.Context, - set processor.CreateSettings, + set processor.Settings, _ component.Config, nextConsumer consumer.Logs, logsFunc ProcessLogsFunc, options ...Option, ) (processor.Logs, error) { - // TODO: Add observability metrics support if logsFunc == nil { return nil, errors.New("nil logsFunc") } + obs, err := newObsReport(set, pipeline.SignalLogs) + if err != nil { + return nil, err + } + eventOptions := spanAttributes(set.ID) bs := fromOptions(options) logsConsumer, err := consumer.NewLogs(func(ctx context.Context, ld plog.Logs) error { span := trace.SpanFromContext(ctx) span.AddEvent("Start processing.", eventOptions) - var err error - ld, err = logsFunc(ctx, ld) + recordsIn := ld.LogRecordCount() + + var errFunc error + ld, errFunc = logsFunc(ctx, ld) span.AddEvent("End processing.", eventOptions) - if err != nil { - if errors.Is(err, ErrSkipProcessingData) { + if errFunc != nil { + obs.recordInOut(ctx, recordsIn, 0) + if errors.Is(errFunc, ErrSkipProcessingData) { return nil } - return err + return errFunc } + recordsOut := ld.LogRecordCount() + obs.recordInOut(ctx, recordsIn, recordsOut) return nextConsumer.ConsumeLogs(ctx, ld) }, bs.consumerOptions...) if err != nil { return nil, err } - return &logProcessor{ + return &logs{ StartFunc: bs.StartFunc, ShutdownFunc: bs.ShutdownFunc, Logs: logsConsumer, diff --git a/vendor/go.opentelemetry.io/collector/processor/processorhelper/metadata.yaml b/vendor/go.opentelemetry.io/collector/processor/processorhelper/metadata.yaml index 7e0c72f5c9c..32ae7621b12 100644 --- a/vendor/go.opentelemetry.io/collector/processor/processorhelper/metadata.yaml +++ b/vendor/go.opentelemetry.io/collector/processor/processorhelper/metadata.yaml @@ -5,78 +5,25 @@ status: not_component: true stability: beta: [traces, metrics, logs] - distributions: [core, contrib] telemetry: metrics: - processor_accepted_spans: + processor_incoming_items: enabled: true - description: Number of spans successfully pushed into the next component in the pipeline. - unit: 1 + stability: + level: alpha + description: Number of items passed to the processor. + unit: "{items}" sum: value_type: int monotonic: true - processor_refused_spans: + processor_outgoing_items: enabled: true - description: Number of spans that were rejected by the next component in the pipeline. - unit: 1 + stability: + level: alpha + description: Number of items emitted from the processor. + unit: "{items}" sum: value_type: int monotonic: true - - processor_dropped_spans: - enabled: true - description: Number of spans that were dropped. - unit: 1 - sum: - value_type: int - monotonic: true - - processor_accepted_metric_points: - enabled: true - description: Number of metric points successfully pushed into the next component in the pipeline. - unit: 1 - sum: - value_type: int - monotonic: true - - processor_refused_metric_points: - enabled: true - description: Number of metric points that were rejected by the next component in the pipeline. - unit: 1 - sum: - value_type: int - monotonic: true - - processor_dropped_metric_points: - enabled: true - description: Number of metric points that were dropped. - unit: 1 - sum: - value_type: int - monotonic: true - - processor_accepted_log_records: - enabled: true - description: Number of log records successfully pushed into the next component in the pipeline. - unit: 1 - sum: - value_type: int - monotonic: true - - processor_refused_log_records: - enabled: true - description: Number of log records that were rejected by the next component in the pipeline. - unit: 1 - sum: - value_type: int - monotonic: true - - processor_dropped_log_records: - enabled: true - description: Number of log records that were dropped. - unit: 1 - sum: - value_type: int - monotonic: true \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/collector/processor/processorhelper/metrics.go b/vendor/go.opentelemetry.io/collector/processor/processorhelper/metrics.go index ac3802722d0..0bd32553335 100644 --- a/vendor/go.opentelemetry.io/collector/processor/processorhelper/metrics.go +++ b/vendor/go.opentelemetry.io/collector/processor/processorhelper/metrics.go @@ -12,6 +12,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pipeline" "go.opentelemetry.io/collector/processor" ) @@ -19,47 +20,56 @@ import ( // If error is returned then returned data are ignored. It MUST not call the next component. type ProcessMetricsFunc func(context.Context, pmetric.Metrics) (pmetric.Metrics, error) -type metricsProcessor struct { +type metrics struct { component.StartFunc component.ShutdownFunc consumer.Metrics } -// NewMetricsProcessor creates a processor.Metrics that ensure context propagation and the right tags are set. -func NewMetricsProcessor( +// NewMetrics creates a processor.Metrics that ensure context propagation and the right tags are set. +func NewMetrics( _ context.Context, - set processor.CreateSettings, + set processor.Settings, _ component.Config, nextConsumer consumer.Metrics, metricsFunc ProcessMetricsFunc, options ...Option, ) (processor.Metrics, error) { - // TODO: Add observability metrics support if metricsFunc == nil { return nil, errors.New("nil metricsFunc") } + obs, err := newObsReport(set, pipeline.SignalMetrics) + if err != nil { + return nil, err + } + eventOptions := spanAttributes(set.ID) bs := fromOptions(options) metricsConsumer, err := consumer.NewMetrics(func(ctx context.Context, md pmetric.Metrics) error { span := trace.SpanFromContext(ctx) span.AddEvent("Start processing.", eventOptions) - var err error - md, err = metricsFunc(ctx, md) + pointsIn := md.DataPointCount() + + var errFunc error + md, errFunc = metricsFunc(ctx, md) span.AddEvent("End processing.", eventOptions) - if err != nil { - if errors.Is(err, ErrSkipProcessingData) { + if errFunc != nil { + obs.recordInOut(ctx, pointsIn, 0) + if errors.Is(errFunc, ErrSkipProcessingData) { return nil } - return err + return errFunc } + pointsOut := md.DataPointCount() + obs.recordInOut(ctx, pointsIn, pointsOut) return nextConsumer.ConsumeMetrics(ctx, md) }, bs.consumerOptions...) if err != nil { return nil, err } - return &metricsProcessor{ + return &metrics{ StartFunc: bs.StartFunc, ShutdownFunc: bs.ShutdownFunc, Metrics: metricsConsumer, diff --git a/vendor/go.opentelemetry.io/collector/processor/processorhelper/obsreport.go b/vendor/go.opentelemetry.io/collector/processor/processorhelper/obsreport.go index 51aa26a9a88..a342e021d75 100644 --- a/vendor/go.opentelemetry.io/collector/processor/processorhelper/obsreport.go +++ b/vendor/go.opentelemetry.io/collector/processor/processorhelper/obsreport.go @@ -5,128 +5,38 @@ package processorhelper // import "go.opentelemetry.io/collector/processor/proce import ( "context" - "strings" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" - "go.uber.org/zap" - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics" + "go.opentelemetry.io/collector/pipeline" "go.opentelemetry.io/collector/processor" + "go.opentelemetry.io/collector/processor/internal" "go.opentelemetry.io/collector/processor/processorhelper/internal/metadata" ) -// BuildCustomMetricName is used to be build a metric name following -// the standards used in the Collector. The configType should be the same -// value used to identify the type on the config. -func BuildCustomMetricName(configType, metric string) string { - componentPrefix := obsmetrics.ProcessorMetricPrefix - if !strings.HasSuffix(componentPrefix, obsmetrics.MetricNameSep) { - componentPrefix += obsmetrics.MetricNameSep - } - if configType == "" { - return componentPrefix - } - return componentPrefix + configType + obsmetrics.MetricNameSep + metric -} - -// ObsReport is a helper to add observability to a processor. -type ObsReport struct { - logger *zap.Logger +const signalKey = "otel.signal" - otelAttrs []attribute.KeyValue +type obsReport struct { + otelAttrs metric.MeasurementOption telemetryBuilder *metadata.TelemetryBuilder } -// ObsReportSettings are settings for creating an ObsReport. -type ObsReportSettings struct { - ProcessorID component.ID - ProcessorCreateSettings processor.CreateSettings -} - -// NewObsReport creates a new Processor. -func NewObsReport(cfg ObsReportSettings) (*ObsReport, error) { - return newObsReport(cfg) -} - -func newObsReport(cfg ObsReportSettings) (*ObsReport, error) { - telemetryBuilder, err := metadata.NewTelemetryBuilder(cfg.ProcessorCreateSettings.TelemetrySettings, metadata.WithLevel(cfg.ProcessorCreateSettings.MetricsLevel)) +func newObsReport(set processor.Settings, signal pipeline.Signal) (*obsReport, error) { + telemetryBuilder, err := metadata.NewTelemetryBuilder(set.TelemetrySettings) if err != nil { return nil, err } - return &ObsReport{ - logger: cfg.ProcessorCreateSettings.Logger, - otelAttrs: []attribute.KeyValue{ - attribute.String(obsmetrics.ProcessorKey, cfg.ProcessorID.String()), - }, + return &obsReport{ + otelAttrs: metric.WithAttributeSet(attribute.NewSet( + attribute.String(internal.ProcessorKey, set.ID.String()), + attribute.String(signalKey, signal.String()), + )), telemetryBuilder: telemetryBuilder, }, nil } -func (or *ObsReport) recordData(ctx context.Context, dataType component.DataType, accepted, refused, dropped int64) { - var acceptedCount, refusedCount, droppedCount metric.Int64Counter - switch dataType { - case component.DataTypeTraces: - acceptedCount = or.telemetryBuilder.ProcessorAcceptedSpans - refusedCount = or.telemetryBuilder.ProcessorRefusedSpans - droppedCount = or.telemetryBuilder.ProcessorDroppedSpans - case component.DataTypeMetrics: - acceptedCount = or.telemetryBuilder.ProcessorAcceptedMetricPoints - refusedCount = or.telemetryBuilder.ProcessorRefusedMetricPoints - droppedCount = or.telemetryBuilder.ProcessorDroppedMetricPoints - case component.DataTypeLogs: - acceptedCount = or.telemetryBuilder.ProcessorAcceptedLogRecords - refusedCount = or.telemetryBuilder.ProcessorRefusedLogRecords - droppedCount = or.telemetryBuilder.ProcessorDroppedLogRecords - } - - acceptedCount.Add(ctx, accepted, metric.WithAttributes(or.otelAttrs...)) - refusedCount.Add(ctx, refused, metric.WithAttributes(or.otelAttrs...)) - droppedCount.Add(ctx, dropped, metric.WithAttributes(or.otelAttrs...)) -} - -// TracesAccepted reports that the trace data was accepted. -func (or *ObsReport) TracesAccepted(ctx context.Context, numSpans int) { - or.recordData(ctx, component.DataTypeTraces, int64(numSpans), int64(0), int64(0)) -} - -// TracesRefused reports that the trace data was refused. -func (or *ObsReport) TracesRefused(ctx context.Context, numSpans int) { - or.recordData(ctx, component.DataTypeTraces, int64(0), int64(numSpans), int64(0)) -} - -// TracesDropped reports that the trace data was dropped. -func (or *ObsReport) TracesDropped(ctx context.Context, numSpans int) { - or.recordData(ctx, component.DataTypeTraces, int64(0), int64(0), int64(numSpans)) -} - -// MetricsAccepted reports that the metrics were accepted. -func (or *ObsReport) MetricsAccepted(ctx context.Context, numPoints int) { - or.recordData(ctx, component.DataTypeMetrics, int64(numPoints), int64(0), int64(0)) -} - -// MetricsRefused reports that the metrics were refused. -func (or *ObsReport) MetricsRefused(ctx context.Context, numPoints int) { - or.recordData(ctx, component.DataTypeMetrics, int64(0), int64(numPoints), int64(0)) -} - -// MetricsDropped reports that the metrics were dropped. -func (or *ObsReport) MetricsDropped(ctx context.Context, numPoints int) { - or.recordData(ctx, component.DataTypeMetrics, int64(0), int64(0), int64(numPoints)) -} - -// LogsAccepted reports that the logs were accepted. -func (or *ObsReport) LogsAccepted(ctx context.Context, numRecords int) { - or.recordData(ctx, component.DataTypeLogs, int64(numRecords), int64(0), int64(0)) -} - -// LogsRefused reports that the logs were refused. -func (or *ObsReport) LogsRefused(ctx context.Context, numRecords int) { - or.recordData(ctx, component.DataTypeLogs, int64(0), int64(numRecords), int64(0)) -} - -// LogsDropped reports that the logs were dropped. -func (or *ObsReport) LogsDropped(ctx context.Context, numRecords int) { - or.recordData(ctx, component.DataTypeLogs, int64(0), int64(0), int64(numRecords)) +func (or *obsReport) recordInOut(ctx context.Context, incoming, outgoing int) { + or.telemetryBuilder.ProcessorIncomingItems.Add(ctx, int64(incoming), or.otelAttrs) + or.telemetryBuilder.ProcessorOutgoingItems.Add(ctx, int64(outgoing), or.otelAttrs) } diff --git a/vendor/go.opentelemetry.io/collector/processor/processorhelper/processor.go b/vendor/go.opentelemetry.io/collector/processor/processorhelper/processor.go index 8bdaa4fbf73..03d99b3375b 100644 --- a/vendor/go.opentelemetry.io/collector/processor/processorhelper/processor.go +++ b/vendor/go.opentelemetry.io/collector/processor/processorhelper/processor.go @@ -13,7 +13,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics" + "go.opentelemetry.io/collector/processor/internal" ) // ErrSkipProcessingData is a sentinel value to indicate when traces or metrics should intentionally be dropped @@ -22,30 +22,38 @@ import ( var ErrSkipProcessingData = errors.New("sentinel error to skip processing data from the remainder of the pipeline") // Option apply changes to internalOptions. -type Option func(*baseSettings) +type Option interface { + apply(*baseSettings) +} + +type optionFunc func(*baseSettings) + +func (of optionFunc) apply(e *baseSettings) { + of(e) +} // WithStart overrides the default Start function for an processor. // The default shutdown function does nothing and always returns nil. func WithStart(start component.StartFunc) Option { - return func(o *baseSettings) { + return optionFunc(func(o *baseSettings) { o.StartFunc = start - } + }) } // WithShutdown overrides the default Shutdown function for an processor. // The default shutdown function does nothing and always returns nil. func WithShutdown(shutdown component.ShutdownFunc) Option { - return func(o *baseSettings) { + return optionFunc(func(o *baseSettings) { o.ShutdownFunc = shutdown - } + }) } // WithCapabilities overrides the default GetCapabilities function for an processor. // The default GetCapabilities function returns mutable capabilities. func WithCapabilities(capabilities consumer.Capabilities) Option { - return func(o *baseSettings) { + return optionFunc(func(o *baseSettings) { o.consumerOptions = append(o.consumerOptions, consumer.WithCapabilities(capabilities)) - } + }) } type baseSettings struct { @@ -62,12 +70,12 @@ func fromOptions(options []Option) *baseSettings { } for _, op := range options { - op(opts) + op.apply(opts) } return opts } func spanAttributes(id component.ID) trace.EventOption { - return trace.WithAttributes(attribute.String(obsmetrics.ProcessorKey, id.String())) + return trace.WithAttributes(attribute.String(internal.ProcessorKey, id.String())) } diff --git a/vendor/go.opentelemetry.io/collector/processor/processorhelper/traces.go b/vendor/go.opentelemetry.io/collector/processor/processorhelper/traces.go index 578f65c7efa..f6389a2272a 100644 --- a/vendor/go.opentelemetry.io/collector/processor/processorhelper/traces.go +++ b/vendor/go.opentelemetry.io/collector/processor/processorhelper/traces.go @@ -12,6 +12,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/pdata/ptrace" + "go.opentelemetry.io/collector/pipeline" "go.opentelemetry.io/collector/processor" ) @@ -19,48 +20,56 @@ import ( // If error is returned then returned data are ignored. It MUST not call the next component. type ProcessTracesFunc func(context.Context, ptrace.Traces) (ptrace.Traces, error) -type tracesProcessor struct { +type traces struct { component.StartFunc component.ShutdownFunc consumer.Traces } -// NewTracesProcessor creates a processor.Traces that ensure context propagation and the right tags are set. -func NewTracesProcessor( +// NewTraces creates a processor.Traces that ensure context propagation and the right tags are set. +func NewTraces( _ context.Context, - set processor.CreateSettings, + set processor.Settings, _ component.Config, nextConsumer consumer.Traces, tracesFunc ProcessTracesFunc, options ...Option, ) (processor.Traces, error) { - // TODO: Add observability Traces support if tracesFunc == nil { return nil, errors.New("nil tracesFunc") } + obs, err := newObsReport(set, pipeline.SignalTraces) + if err != nil { + return nil, err + } + eventOptions := spanAttributes(set.ID) bs := fromOptions(options) traceConsumer, err := consumer.NewTraces(func(ctx context.Context, td ptrace.Traces) error { span := trace.SpanFromContext(ctx) span.AddEvent("Start processing.", eventOptions) - var err error - td, err = tracesFunc(ctx, td) + spansIn := td.SpanCount() + + var errFunc error + td, errFunc = tracesFunc(ctx, td) span.AddEvent("End processing.", eventOptions) - if err != nil { - if errors.Is(err, ErrSkipProcessingData) { + if errFunc != nil { + obs.recordInOut(ctx, spansIn, 0) + if errors.Is(errFunc, ErrSkipProcessingData) { return nil } - return err + return errFunc } + spansOut := td.SpanCount() + obs.recordInOut(ctx, spansIn, spansOut) return nextConsumer.ConsumeTraces(ctx, td) }, bs.consumerOptions...) - if err != nil { return nil, err } - return &tracesProcessor{ + return &traces{ StartFunc: bs.StartFunc, ShutdownFunc: bs.ShutdownFunc, Traces: traceConsumer, diff --git a/vendor/go.opentelemetry.io/collector/processor/processortest/LICENSE b/vendor/go.opentelemetry.io/collector/processor/processortest/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/processor/processortest/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/processor/processortest/Makefile b/vendor/go.opentelemetry.io/collector/processor/processortest/Makefile new file mode 100644 index 00000000000..ded7a36092d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/processor/processortest/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/processor/processortest/nop_processor.go b/vendor/go.opentelemetry.io/collector/processor/processortest/nop_processor.go new file mode 100644 index 00000000000..4260bc17d45 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/processor/processortest/nop_processor.go @@ -0,0 +1,68 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package processortest // import "go.opentelemetry.io/collector/processor/processortest" + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/consumer/xconsumer" + "go.opentelemetry.io/collector/processor" + "go.opentelemetry.io/collector/processor/xprocessor" +) + +var nopType = component.MustNewType("nop") + +// NewNopSettings returns a new nop settings for Create* functions. +func NewNopSettings() processor.Settings { + return processor.Settings{ + ID: component.NewID(nopType), + TelemetrySettings: componenttest.NewNopTelemetrySettings(), + BuildInfo: component.NewDefaultBuildInfo(), + } +} + +// NewNopFactory returns a component.ProcessorFactory that constructs nop processors. +func NewNopFactory() processor.Factory { + return xprocessor.NewFactory( + nopType, + func() component.Config { return &nopConfig{} }, + xprocessor.WithTraces(createTraces, component.StabilityLevelStable), + xprocessor.WithMetrics(createMetrics, component.StabilityLevelStable), + xprocessor.WithLogs(createLogs, component.StabilityLevelStable), + xprocessor.WithProfiles(createProfiles, component.StabilityLevelAlpha), + ) +} + +func createTraces(context.Context, processor.Settings, component.Config, consumer.Traces) (processor.Traces, error) { + return nopInstance, nil +} + +func createMetrics(context.Context, processor.Settings, component.Config, consumer.Metrics) (processor.Metrics, error) { + return nopInstance, nil +} + +func createLogs(context.Context, processor.Settings, component.Config, consumer.Logs) (processor.Logs, error) { + return nopInstance, nil +} + +func createProfiles(context.Context, processor.Settings, component.Config, xconsumer.Profiles) (xprocessor.Profiles, error) { + return nopInstance, nil +} + +type nopConfig struct{} + +var nopInstance = &nop{ + Consumer: consumertest.NewNop(), +} + +// nop acts as a processor for testing purposes. +type nop struct { + component.StartFunc + component.ShutdownFunc + consumertest.Consumer +} diff --git a/vendor/go.opentelemetry.io/collector/processor/processortest/shutdown_verifier.go b/vendor/go.opentelemetry.io/collector/processor/processortest/shutdown_verifier.go new file mode 100644 index 00000000000..9fe4b9a048c --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/processor/processortest/shutdown_verifier.go @@ -0,0 +1,99 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package processortest // import "go.opentelemetry.io/collector/processor/processortest" + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/pdata/testdata" + "go.opentelemetry.io/collector/pipeline" + "go.opentelemetry.io/collector/processor" +) + +func verifyTracesDoesNotProduceAfterShutdown(t *testing.T, factory processor.Factory, cfg component.Config) { + // Create a proc and output its produce to a sink. + nextSink := new(consumertest.TracesSink) + proc, err := factory.CreateTraces(context.Background(), NewNopSettings(), cfg, nextSink) + if errors.Is(err, pipeline.ErrSignalNotSupported) { + return + } + require.NoError(t, err) + assert.NoError(t, proc.Start(context.Background(), componenttest.NewNopHost())) + + // Send some traces to the proc. + const generatedCount = 10 + for i := 0; i < generatedCount; i++ { + require.NoError(t, proc.ConsumeTraces(context.Background(), testdata.GenerateTraces(1))) + } + + // Now shutdown the proc. + assert.NoError(t, proc.Shutdown(context.Background())) + + // The Shutdown() is done. It means the proc must have sent everything we + // gave it to the next sink. + assert.EqualValues(t, generatedCount, nextSink.SpanCount()) +} + +func verifyLogsDoesNotProduceAfterShutdown(t *testing.T, factory processor.Factory, cfg component.Config) { + // Create a proc and output its produce to a sink. + nextSink := new(consumertest.LogsSink) + proc, err := factory.CreateLogs(context.Background(), NewNopSettings(), cfg, nextSink) + if errors.Is(err, pipeline.ErrSignalNotSupported) { + return + } + require.NoError(t, err) + assert.NoError(t, proc.Start(context.Background(), componenttest.NewNopHost())) + + // Send some logs to the proc. + const generatedCount = 10 + for i := 0; i < generatedCount; i++ { + require.NoError(t, proc.ConsumeLogs(context.Background(), testdata.GenerateLogs(1))) + } + + // Now shutdown the proc. + assert.NoError(t, proc.Shutdown(context.Background())) + + // The Shutdown() is done. It means the proc must have sent everything we + // gave it to the next sink. + assert.EqualValues(t, generatedCount, nextSink.LogRecordCount()) +} + +func verifyMetricsDoesNotProduceAfterShutdown(t *testing.T, factory processor.Factory, cfg component.Config) { + // Create a proc and output its produce to a sink. + nextSink := new(consumertest.MetricsSink) + proc, err := factory.CreateMetrics(context.Background(), NewNopSettings(), cfg, nextSink) + if errors.Is(err, pipeline.ErrSignalNotSupported) { + return + } + require.NoError(t, err) + assert.NoError(t, proc.Start(context.Background(), componenttest.NewNopHost())) + + // Send some metrics to the proc. testdata.GenerateMetrics creates metrics with 2 data points each. + const generatedCount = 10 + for i := 0; i < generatedCount; i++ { + require.NoError(t, proc.ConsumeMetrics(context.Background(), testdata.GenerateMetrics(1))) + } + + // Now shutdown the proc. + assert.NoError(t, proc.Shutdown(context.Background())) + + // The Shutdown() is done. It means the proc must have sent everything we + // gave it to the next sink. + assert.EqualValues(t, generatedCount*2, nextSink.DataPointCount()) +} + +// VerifyShutdown verifies the processor doesn't produce telemetry data after shutdown. +func VerifyShutdown(t *testing.T, factory processor.Factory, cfg component.Config) { + verifyTracesDoesNotProduceAfterShutdown(t, factory, cfg) + verifyLogsDoesNotProduceAfterShutdown(t, factory, cfg) + verifyMetricsDoesNotProduceAfterShutdown(t, factory, cfg) +} diff --git a/vendor/go.opentelemetry.io/collector/processor/processortest/unhealthy_processor.go b/vendor/go.opentelemetry.io/collector/processor/processortest/unhealthy_processor.go new file mode 100644 index 00000000000..ce6d7d2f87a --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/processor/processortest/unhealthy_processor.go @@ -0,0 +1,62 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package processortest // import "go.opentelemetry.io/collector/processor/processortest" + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componentstatus" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/processor" +) + +// NewUnhealthyProcessorFactory returns a processor.Factory that constructs nop processors. +func NewUnhealthyProcessorFactory() processor.Factory { + return processor.NewFactory( + component.MustNewType("unhealthy"), + func() component.Config { + return &struct{}{} + }, + processor.WithTraces(createUnhealthyTraces, component.StabilityLevelStable), + processor.WithMetrics(createUnhealthyMetrics, component.StabilityLevelStable), + processor.WithLogs(createUnhealthyLogs, component.StabilityLevelStable), + ) +} + +func createUnhealthyTraces(_ context.Context, set processor.Settings, _ component.Config, _ consumer.Traces) (processor.Traces, error) { + return &unhealthy{ + Consumer: consumertest.NewNop(), + telemetry: set.TelemetrySettings, + }, nil +} + +func createUnhealthyMetrics(_ context.Context, set processor.Settings, _ component.Config, _ consumer.Metrics) (processor.Metrics, error) { + return &unhealthy{ + Consumer: consumertest.NewNop(), + telemetry: set.TelemetrySettings, + }, nil +} + +func createUnhealthyLogs(_ context.Context, set processor.Settings, _ component.Config, _ consumer.Logs) (processor.Logs, error) { + return &unhealthy{ + Consumer: consumertest.NewNop(), + telemetry: set.TelemetrySettings, + }, nil +} + +type unhealthy struct { + component.StartFunc + component.ShutdownFunc + consumertest.Consumer + telemetry component.TelemetrySettings +} + +func (p unhealthy) Start(_ context.Context, host component.Host) error { + go func() { + componentstatus.ReportStatus(host, componentstatus.NewEvent(componentstatus.StatusRecoverableError)) + }() + return nil +} diff --git a/vendor/go.opentelemetry.io/collector/processor/xprocessor/LICENSE b/vendor/go.opentelemetry.io/collector/processor/xprocessor/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/processor/xprocessor/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/processor/xprocessor/Makefile b/vendor/go.opentelemetry.io/collector/processor/xprocessor/Makefile new file mode 100644 index 00000000000..ded7a36092d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/processor/xprocessor/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/processor/xprocessor/processor.go b/vendor/go.opentelemetry.io/collector/processor/xprocessor/processor.go new file mode 100644 index 00000000000..b029abbf519 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/processor/xprocessor/processor.go @@ -0,0 +1,114 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package xprocessor // import "go.opentelemetry.io/collector/processor/xprocessor" + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer/xconsumer" + "go.opentelemetry.io/collector/pipeline" + "go.opentelemetry.io/collector/processor" +) + +// Factory is a component.Factory interface for processors. +// +// This interface cannot be directly implemented. Implementations must +// use the NewFactory to implement it. +type Factory interface { + processor.Factory + + // CreateProfiles creates a Profiles processor based on this config. + // If the processor type does not support tracing or if the config is not valid, + // an error will be returned instead. + CreateProfiles(ctx context.Context, set processor.Settings, cfg component.Config, next xconsumer.Profiles) (Profiles, error) + + // ProfilesStability gets the stability level of the Profiles processor. + ProfilesStability() component.StabilityLevel +} + +// Profiles is a processor that can consume profiles. +type Profiles interface { + component.Component + xconsumer.Profiles +} + +// CreateProfilesFunc is the equivalent of Factory.CreateProfiles(). +// CreateProfilesFunc is the equivalent of Factory.CreateProfiles(). +type CreateProfilesFunc func(context.Context, processor.Settings, component.Config, xconsumer.Profiles) (Profiles, error) + +// CreateProfiles implements Factory.CreateProfiles. +func (f CreateProfilesFunc) CreateProfiles(ctx context.Context, set processor.Settings, cfg component.Config, next xconsumer.Profiles) (Profiles, error) { + if f == nil { + return nil, pipeline.ErrSignalNotSupported + } + return f(ctx, set, cfg, next) +} + +// FactoryOption apply changes to ReceiverOptions. +type FactoryOption interface { + // applyOption applies the option. + applyOption(o *factoryOpts) +} + +// factoryOptionFunc is an ReceiverFactoryOption created through a function. +type factoryOptionFunc func(*factoryOpts) + +func (f factoryOptionFunc) applyOption(o *factoryOpts) { + f(o) +} + +type factory struct { + processor.Factory + CreateProfilesFunc + profilesStabilityLevel component.StabilityLevel +} + +func (f factory) ProfilesStability() component.StabilityLevel { + return f.profilesStabilityLevel +} + +type factoryOpts struct { + opts []processor.FactoryOption + *factory +} + +// WithTraces overrides the default "error not supported" implementation for CreateTraces and the default "undefined" stability level. +func WithTraces(createTraces processor.CreateTracesFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.opts = append(o.opts, processor.WithTraces(createTraces, sl)) + }) +} + +// WithMetrics overrides the default "error not supported" implementation for CreateMetrics and the default "undefined" stability level. +func WithMetrics(createMetrics processor.CreateMetricsFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.opts = append(o.opts, processor.WithMetrics(createMetrics, sl)) + }) +} + +// WithLogs overrides the default "error not supported" implementation for CreateLogs and the default "undefined" stability level. +func WithLogs(createLogs processor.CreateLogsFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.opts = append(o.opts, processor.WithLogs(createLogs, sl)) + }) +} + +// WithProfiles overrides the default "error not supported" implementation for CreateProfiles and the default "undefined" stability level. +func WithProfiles(createProfiles CreateProfilesFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.profilesStabilityLevel = sl + o.CreateProfilesFunc = createProfiles + }) +} + +// NewFactory returns a Factory. +func NewFactory(cfgType component.Type, createDefaultConfig component.CreateDefaultConfigFunc, options ...FactoryOption) Factory { + opts := factoryOpts{factory: &factory{}} + for _, opt := range options { + opt.applyOption(&opts) + } + opts.factory.Factory = processor.NewFactory(cfgType, createDefaultConfig, opts.opts...) + return opts.factory +} diff --git a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/README.md b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/README.md index a5572071629..aab0d924148 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/README.md +++ b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/README.md @@ -3,15 +3,19 @@ | Status | | | ------------- |-----------| -| Stability | [beta]: logs | +| Stability | [development]: profiles | +| | [beta]: logs | | | [stable]: traces, metrics | -| Distributions | [core], [contrib] | -| Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Areceiver%2Fotlp%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Areceiver%2Fotlp) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Areceiver%2Fotlp%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Areceiver%2Fotlp) | +| Distributions | [core], [contrib], [k8s], [otlp] | +| Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector?query=is%3Aissue%20is%3Aopen%20label%3Areceiver%2Fotlp%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector/issues?q=is%3Aopen+is%3Aissue+label%3Areceiver%2Fotlp) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector?query=is%3Aissue%20is%3Aclosed%20label%3Areceiver%2Fotlp%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector/issues?q=is%3Aclosed+is%3Aissue+label%3Areceiver%2Fotlp) | -[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta -[stable]: https://github.com/open-telemetry/opentelemetry-collector#stable +[development]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#development +[beta]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#beta +[stable]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#stable [core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol [contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib +[k8s]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-k8s +[otlp]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-otlp Receives data via gRPC or HTTP using [OTLP]( @@ -34,11 +38,11 @@ receivers: The following settings are configurable: -- `endpoint` (default = 0.0.0.0:4317 for grpc protocol, 0.0.0.0:4318 http protocol): +- `endpoint` (default = localhost:4317 for grpc protocol, localhost:4318 http protocol): host:port to which the receiver is going to receive data. The valid syntax is - described at https://github.com/grpc/grpc/blob/master/doc/naming.md. The - `component.UseLocalHostAsDefaultHost` feature gate changes these to localhost:4317 and - localhost:4318 respectively. This will become the default in a future release. + described at https://github.com/grpc/grpc/blob/master/doc/naming.md. See our + [security best practices doc](https://opentelemetry.io/docs/security/config-best-practices/#protect-against-denial-of-service-attacks) + to understand how to set the endpoint in different environments. ## Advanced Configuration @@ -55,15 +59,21 @@ The OTLP receiver can receive trace export calls via HTTP/JSON in addition to gRPC. The HTTP/JSON address is the same as gRPC as the protocol is recognized and processed accordingly. Note the serialization format needs to be [OTLP JSON](https://opentelemetry.io/docs/specs/otlp/#json-protobuf-encoding). -The HTTP/JSON configuration also provides `traces_url_path`, `metrics_url_path`, and `logs_url_path` -configuration to allow the URL paths that signal data needs to be sent to be modified per signal type. These default to -`/v1/traces`, `/v1/metrics`, and `/v1/logs` respectively. - -To write traces with HTTP/JSON, `POST` to `[address]/[traces_url_path]` for traces, -to `[address]/[metrics_url_path]` for metrics, to `[address]/[logs_url_path]` for logs. -The default port is `4318`. When using the `otlphttpexporter` peer to communicate with this component, -use the `traces_endpoint`, `metrics_endpoint`, and `logs_endpoint` settings in the `otlphttpexporter` to set the -proper URL to match the address and URL signal path on the `otlpreceiver`. +The HTTP/JSON configuration also provides `traces_url_path`, +`metrics_url_path`, `logs_url_path`, and `profiles_url_path` configurations to +allow the URL paths that signal data needs to be sent to be modified per signal +type. These default to `/v1/traces`, `/v1/metrics`, `/v1/logs`, and +`/v1/profiles` respectively. + +To write traces with HTTP/JSON, `POST` to `[address]/[traces_url_path]` for +traces, to `[address]/[metrics_url_path]` for metrics, to +`[address]/[logs_url_path]` for logs, and to `[address]/[profiles_url_path]` for +profiles. +The default port is `4318`. When using the `otlphttpexporter` peer to +communicate with this component, use the `traces_endpoint`, +`metrics_endpoint`, `logs_endpoint`, and `profiles_endpoint` settings in the +`otlphttpexporter` to set the proper URL to match the address and URL signal +path on the `otlpreceiver`. ### CORS (Cross-origin resource sharing) @@ -93,7 +103,5 @@ receivers: max_age: 7200 ``` -[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta [contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib [core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol -[stable]: https://github.com/open-telemetry/opentelemetry-collector#stable diff --git a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/config.go b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/config.go index 8e512f5a278..b40a0476fed 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/config.go +++ b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/config.go @@ -46,8 +46,10 @@ type Config struct { Protocols `mapstructure:"protocols"` } -var _ component.Config = (*Config)(nil) -var _ confmap.Unmarshaler = (*Config)(nil) +var ( + _ component.Config = (*Config)(nil) + _ confmap.Unmarshaler = (*Config)(nil) +) // Validate checks the receiver configuration is valid func (cfg *Config) Validate() error { diff --git a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/config.md b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/config.md index df6cf931a12..b77b4bdae73 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/config.md +++ b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/config.md @@ -20,7 +20,7 @@ Config defines configuration for OTLP receiver. | Name | Type | Default | Docs | |------------------------|-----------------------------------------------------------------------|--------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| endpoint | string | 0.0.0.0:4317 | Endpoint configures the address for this network connection. For TCP and UDP networks, the address has the form "host:port". The host must be a literal IP address, or a host name that can be resolved to IP addresses. The port must be a literal port number or a service name. If the host is a literal IPv6 address it must be enclosed in square brackets, as in "[2001:db8::1]:80" or "[fe80::1%zone]:80". The zone specifies the scope of the literal IPv6 address as defined in RFC 4007. | +| endpoint | string | localhost:4317 | Endpoint configures the address for this network connection. For TCP and UDP networks, the address has the form "host:port". The host must be a literal IP address, or a host name that can be resolved to IP addresses. The port must be a literal port number or a service name. If the host is a literal IPv6 address it must be enclosed in square brackets, as in "[2001:db8::1]:80" or "[fe80::1%zone]:80". The zone specifies the scope of the literal IPv6 address as defined in RFC 4007. | | transport | string | tcp | Transport to use. Known protocols are "tcp", "tcp4" (IPv4-only), "tcp6" (IPv6-only), "udp", "udp4" (IPv4-only), "udp6" (IPv6-only), "ip", "ip4" (IPv4-only), "ip6" (IPv6-only), "unix", "unixgram" and "unixpacket". | | tls | [configtls-TLSServerSetting](#configtls-tlsserversetting) | | Configures the protocol to use TLS. The default value is nil, which will cause the protocol to not use TLS. | | max_recv_msg_size_mib | uint64 | | MaxRecvMsgSizeMiB sets the maximum size (in MiB) of messages accepted by the server. | @@ -73,10 +73,10 @@ Config defines configuration for OTLP receiver. | Name | Type | Default | Docs | |-----------------------|-----------------------------------------------------------|--------------|-----------------------------------------------------------------------------------------------------------------------------------------| -| endpoint | string | 0.0.0.0:4318 | Endpoint configures the listening address for the server. | +| endpoint | string | localhost:4318 | Endpoint configures the listening address for the server. | | tls | [configtls-TLSServerSetting](#configtls-tlsserversetting) | | TLSSetting struct exposes TLS client configuration. | | cors | [confighttp-CORSConfig](#confighttp-corsconfig) | | CORSConfig configures a receiver for HTTP cross-origin resource sharing (CORS). | -| max_request_body_size | int | 0 | MaxRequestBodySize configures the maximum allowed body size in bytes for a single request. The default `0` means there's no restriction | +| max_request_body_size | int | 20971520 | MaxRequestBodySize configures the maximum allowed body size in bytes for a single request. The default `20971520` means 20MiB | ### confighttp-CORSConfig diff --git a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/encoder.go b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/encoder.go index d37437c7ca9..6462aec745d 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/encoder.go +++ b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/encoder.go @@ -12,6 +12,7 @@ import ( "go.opentelemetry.io/collector/pdata/plog/plogotlp" "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp" + "go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp" "go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp" ) @@ -30,10 +31,12 @@ type encoder interface { unmarshalTracesRequest(buf []byte) (ptraceotlp.ExportRequest, error) unmarshalMetricsRequest(buf []byte) (pmetricotlp.ExportRequest, error) unmarshalLogsRequest(buf []byte) (plogotlp.ExportRequest, error) + unmarshalProfilesRequest(buf []byte) (pprofileotlp.ExportRequest, error) marshalTracesResponse(ptraceotlp.ExportResponse) ([]byte, error) marshalMetricsResponse(pmetricotlp.ExportResponse) ([]byte, error) marshalLogsResponse(plogotlp.ExportResponse) ([]byte, error) + marshalProfilesResponse(pprofileotlp.ExportResponse) ([]byte, error) marshalStatus(rsp *spb.Status) ([]byte, error) @@ -60,6 +63,12 @@ func (protoEncoder) unmarshalLogsRequest(buf []byte) (plogotlp.ExportRequest, er return req, err } +func (protoEncoder) unmarshalProfilesRequest(buf []byte) (pprofileotlp.ExportRequest, error) { + req := pprofileotlp.NewExportRequest() + err := req.UnmarshalProto(buf) + return req, err +} + func (protoEncoder) marshalTracesResponse(resp ptraceotlp.ExportResponse) ([]byte, error) { return resp.MarshalProto() } @@ -72,6 +81,10 @@ func (protoEncoder) marshalLogsResponse(resp plogotlp.ExportResponse) ([]byte, e return resp.MarshalProto() } +func (protoEncoder) marshalProfilesResponse(resp pprofileotlp.ExportResponse) ([]byte, error) { + return resp.MarshalProto() +} + func (protoEncoder) marshalStatus(resp *spb.Status) ([]byte, error) { return proto.Marshal(resp) } @@ -100,6 +113,12 @@ func (jsonEncoder) unmarshalLogsRequest(buf []byte) (plogotlp.ExportRequest, err return req, err } +func (jsonEncoder) unmarshalProfilesRequest(buf []byte) (pprofileotlp.ExportRequest, error) { + req := pprofileotlp.NewExportRequest() + err := req.UnmarshalJSON(buf) + return req, err +} + func (jsonEncoder) marshalTracesResponse(resp ptraceotlp.ExportResponse) ([]byte, error) { return resp.MarshalJSON() } @@ -112,6 +131,10 @@ func (jsonEncoder) marshalLogsResponse(resp plogotlp.ExportResponse) ([]byte, er return resp.MarshalJSON() } +func (jsonEncoder) marshalProfilesResponse(resp pprofileotlp.ExportResponse) ([]byte, error) { + return resp.MarshalJSON() +} + func (jsonEncoder) marshalStatus(resp *spb.Status) ([]byte, error) { buf := new(bytes.Buffer) err := jsonPbMarshaler.Marshal(buf, resp) diff --git a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/factory.go b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/factory.go index 4b148c40be3..cb7cc0a6c7d 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/factory.go +++ b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/factory.go @@ -11,29 +11,29 @@ import ( "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/config/confignet" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/internal/localhostgate" + "go.opentelemetry.io/collector/consumer/xconsumer" "go.opentelemetry.io/collector/internal/sharedcomponent" "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/metadata" + "go.opentelemetry.io/collector/receiver/xreceiver" ) const ( - grpcPort = 4317 - httpPort = 4318 - - defaultTracesURLPath = "/v1/traces" - defaultMetricsURLPath = "/v1/metrics" - defaultLogsURLPath = "/v1/logs" + defaultTracesURLPath = "/v1/traces" + defaultMetricsURLPath = "/v1/metrics" + defaultLogsURLPath = "/v1/logs" + defaultProfilesURLPath = "/v1development/profiles" ) // NewFactory creates a new OTLP receiver factory. func NewFactory() receiver.Factory { - return receiver.NewFactory( + return xreceiver.NewFactory( metadata.Type, createDefaultConfig, - receiver.WithTraces(createTraces, metadata.TracesStability), - receiver.WithMetrics(createMetrics, metadata.MetricsStability), - receiver.WithLogs(createLog, metadata.LogsStability), + xreceiver.WithTraces(createTraces, metadata.TracesStability), + xreceiver.WithMetrics(createMetrics, metadata.MetricsStability), + xreceiver.WithLogs(createLog, metadata.LogsStability), + xreceiver.WithProfiles(createProfiles, metadata.ProfilesStability), ) } @@ -43,7 +43,7 @@ func createDefaultConfig() component.Config { Protocols: Protocols{ GRPC: &configgrpc.ServerConfig{ NetAddr: confignet.AddrConfig{ - Endpoint: localhostgate.EndpointForPort(grpcPort), + Endpoint: "localhost:4317", Transport: confignet.TransportTypeTCP, }, // We almost write 0 bytes, so no need to tune WriteBufferSize. @@ -51,7 +51,7 @@ func createDefaultConfig() component.Config { }, HTTP: &HTTPConfig{ ServerConfig: &confighttp.ServerConfig{ - Endpoint: localhostgate.EndpointForPort(httpPort), + Endpoint: "localhost:4318", }, TracesURLPath: defaultTracesURLPath, MetricsURLPath: defaultMetricsURLPath, @@ -64,7 +64,7 @@ func createDefaultConfig() component.Config { // createTraces creates a trace receiver based on provided config. func createTraces( _ context.Context, - set receiver.CreateSettings, + set receiver.Settings, cfg component.Config, nextConsumer consumer.Traces, ) (receiver.Traces, error) { @@ -74,7 +74,6 @@ func createTraces( func() (*otlpReceiver, error) { return newOtlpReceiver(oCfg, &set) }, - &set.TelemetrySettings, ) if err != nil { return nil, err @@ -87,7 +86,7 @@ func createTraces( // createMetrics creates a metrics receiver based on provided config. func createMetrics( _ context.Context, - set receiver.CreateSettings, + set receiver.Settings, cfg component.Config, consumer consumer.Metrics, ) (receiver.Metrics, error) { @@ -97,7 +96,6 @@ func createMetrics( func() (*otlpReceiver, error) { return newOtlpReceiver(oCfg, &set) }, - &set.TelemetrySettings, ) if err != nil { return nil, err @@ -110,7 +108,7 @@ func createMetrics( // createLog creates a log receiver based on provided config. func createLog( _ context.Context, - set receiver.CreateSettings, + set receiver.Settings, cfg component.Config, consumer consumer.Logs, ) (receiver.Logs, error) { @@ -120,7 +118,6 @@ func createLog( func() (*otlpReceiver, error) { return newOtlpReceiver(oCfg, &set) }, - &set.TelemetrySettings, ) if err != nil { return nil, err @@ -130,9 +127,31 @@ func createLog( return r, nil } +// createProfiles creates a trace receiver based on provided config. +func createProfiles( + _ context.Context, + set receiver.Settings, + cfg component.Config, + nextConsumer xconsumer.Profiles, +) (xreceiver.Profiles, error) { + oCfg := cfg.(*Config) + r, err := receivers.LoadOrStore( + oCfg, + func() (*otlpReceiver, error) { + return newOtlpReceiver(oCfg, &set) + }, + ) + if err != nil { + return nil, err + } + + r.Unwrap().registerProfilesConsumer(nextConsumer) + return r, nil +} + // This is the map of already created OTLP receivers for particular configurations. -// We maintain this map because the Factory is asked trace and metric receivers separately -// when it gets CreateTracesReceiver() and CreateMetricsReceiver() but they must not +// We maintain this map because the receiver.Factory is asked trace and metric receivers separately +// when it gets CreateTraces() and CreateMetrics() but they must not // create separate objects, they must use one otlpReceiver object per configuration. // When the receiver is shutdown it should be removed from this map so the same configuration // can be recreated successfully. diff --git a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/internal/errors/errors.go b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/internal/errors/errors.go index 0619b7aa8ea..519a31fbc25 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/internal/errors/errors.go +++ b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/internal/errors/errors.go @@ -40,6 +40,18 @@ func GetHTTPStatusCodeFromStatus(s *status.Status) int { case codes.ResourceExhausted: return http.StatusTooManyRequests // Not Retryable + case codes.InvalidArgument: + return http.StatusBadRequest + // Not Retryable + case codes.Unauthenticated: + return http.StatusUnauthorized + // Not Retryable + case codes.PermissionDenied: + return http.StatusForbidden + // Not Retryable + case codes.Unimplemented: + return http.StatusNotFound + // Not Retryable default: return http.StatusInternalServerError } diff --git a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/internal/metadata/generated_status.go b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/internal/metadata/generated_status.go index c9cff844fa2..5fe9771f5dd 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/internal/metadata/generated_status.go +++ b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/internal/metadata/generated_status.go @@ -7,11 +7,13 @@ import ( ) var ( - Type = component.MustNewType("otlp") + Type = component.MustNewType("otlp") + ScopeName = "go.opentelemetry.io/collector/receiver/otlpreceiver" ) const ( - LogsStability = component.StabilityLevelBeta - TracesStability = component.StabilityLevelStable - MetricsStability = component.StabilityLevelStable + ProfilesStability = component.StabilityLevelDevelopment + LogsStability = component.StabilityLevelBeta + TracesStability = component.StabilityLevelStable + MetricsStability = component.StabilityLevelStable ) diff --git a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/internal/profiles/otlp.go b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/internal/profiles/otlp.go new file mode 100644 index 00000000000..269769b6824 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/internal/profiles/otlp.go @@ -0,0 +1,48 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package profiles // import "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/profiles" + +import ( + "context" + + "go.opentelemetry.io/collector/consumer/xconsumer" + "go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp" + "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/errors" +) + +// Receiver is the type used to handle spans from OpenTelemetry exporters. +type Receiver struct { + pprofileotlp.UnimplementedGRPCServer + nextConsumer xconsumer.Profiles +} + +// New creates a new Receiver reference. +func New(nextConsumer xconsumer.Profiles) *Receiver { + return &Receiver{ + nextConsumer: nextConsumer, + } +} + +// Export implements the service Export profiles func. +func (r *Receiver) Export(ctx context.Context, req pprofileotlp.ExportRequest) (pprofileotlp.ExportResponse, error) { + td := req.Profiles() + // We need to ensure that it propagates the receiver name as a tag + numProfiles := td.SampleCount() + if numProfiles == 0 { + return pprofileotlp.NewExportResponse(), nil + } + + err := r.nextConsumer.ConsumeProfiles(ctx, td) + // Use appropriate status codes for permanent/non-permanent errors + // If we return the error straightaway, then the grpc implementation will set status code to Unknown + // Refer: https://github.com/grpc/grpc-go/blob/v1.59.0/server.go#L1345 + // So, convert the error to appropriate grpc status and return the error + // NonPermanent errors will be converted to codes.Unavailable (equivalent to HTTP 503) + // Permanent errors will be converted to codes.InvalidArgument (equivalent to HTTP 400) + if err != nil { + return pprofileotlp.NewExportResponse(), errors.GetStatusFromError(err) + } + + return pprofileotlp.NewExportResponse(), nil +} diff --git a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/metadata.yaml b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/metadata.yaml index c3036a3341d..cae2267ea3a 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/metadata.yaml +++ b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/metadata.yaml @@ -1,8 +1,10 @@ type: otlp +github_project: open-telemetry/opentelemetry-collector status: class: receiver stability: stable: [traces, metrics] beta: [logs] - distributions: [core, contrib] + development: [profiles] + distributions: [core, contrib, k8s, otlp] diff --git a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/otlp.go b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/otlp.go index 43eed9b821a..0f8d3c66378 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/otlp.go +++ b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/otlp.go @@ -14,14 +14,18 @@ import ( "google.golang.org/grpc" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componentstatus" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/xconsumer" "go.opentelemetry.io/collector/pdata/plog/plogotlp" "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp" + "go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp" "go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp" "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/logs" "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/metrics" + "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/profiles" "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/trace" "go.opentelemetry.io/collector/receiver/receiverhelper" ) @@ -32,27 +36,29 @@ type otlpReceiver struct { serverGRPC *grpc.Server serverHTTP *http.Server - nextTraces consumer.Traces - nextMetrics consumer.Metrics - nextLogs consumer.Logs - shutdownWG sync.WaitGroup + nextTraces consumer.Traces + nextMetrics consumer.Metrics + nextLogs consumer.Logs + nextProfiles xconsumer.Profiles + shutdownWG sync.WaitGroup obsrepGRPC *receiverhelper.ObsReport obsrepHTTP *receiverhelper.ObsReport - settings *receiver.CreateSettings + settings *receiver.Settings } // newOtlpReceiver just creates the OpenTelemetry receiver services. It is the caller's // responsibility to invoke the respective Start*Reception methods as well // as the various Stop*Reception methods to end it. -func newOtlpReceiver(cfg *Config, set *receiver.CreateSettings) (*otlpReceiver, error) { +func newOtlpReceiver(cfg *Config, set *receiver.Settings) (*otlpReceiver, error) { r := &otlpReceiver{ - cfg: cfg, - nextTraces: nil, - nextMetrics: nil, - nextLogs: nil, - settings: set, + cfg: cfg, + nextTraces: nil, + nextMetrics: nil, + nextLogs: nil, + nextProfiles: nil, + settings: set, } var err error @@ -99,6 +105,10 @@ func (r *otlpReceiver) startGRPCServer(host component.Host) error { plogotlp.RegisterGRPCServer(r.serverGRPC, logs.New(r.nextLogs, r.obsrepGRPC)) } + if r.nextProfiles != nil { + pprofileotlp.RegisterGRPCServer(r.serverGRPC, profiles.New(r.nextProfiles)) + } + r.settings.Logger.Info("Starting GRPC server", zap.String("endpoint", r.cfg.GRPC.NetAddr.Endpoint)) var gln net.Listener if gln, err = r.cfg.GRPC.NetAddr.Listen(context.Background()); err != nil { @@ -110,7 +120,7 @@ func (r *otlpReceiver) startGRPCServer(host component.Host) error { defer r.shutdownWG.Done() if errGrpc := r.serverGRPC.Serve(gln); errGrpc != nil && !errors.Is(errGrpc, grpc.ErrServerStopped) { - r.settings.ReportStatus(component.NewFatalErrorEvent(errGrpc)) + componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(errGrpc)) } }() return nil @@ -144,6 +154,13 @@ func (r *otlpReceiver) startHTTPServer(ctx context.Context, host component.Host) }) } + if r.nextProfiles != nil { + httpProfilesReceiver := profiles.New(r.nextProfiles) + httpMux.HandleFunc(defaultProfilesURLPath, func(resp http.ResponseWriter, req *http.Request) { + handleProfiles(resp, req, httpProfilesReceiver) + }) + } + var err error if r.serverHTTP, err = r.cfg.HTTP.ToServer(ctx, host, r.settings.TelemetrySettings, httpMux, confighttp.WithErrorHandler(errorHandler)); err != nil { return err @@ -160,7 +177,7 @@ func (r *otlpReceiver) startHTTPServer(ctx context.Context, host component.Host) defer r.shutdownWG.Done() if errHTTP := r.serverHTTP.Serve(hln); errHTTP != nil && !errors.Is(errHTTP, http.ErrServerClosed) { - r.settings.ReportStatus(component.NewFatalErrorEvent(errHTTP)) + componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(errHTTP)) } }() return nil @@ -209,3 +226,7 @@ func (r *otlpReceiver) registerMetricsConsumer(mc consumer.Metrics) { func (r *otlpReceiver) registerLogsConsumer(lc consumer.Logs) { r.nextLogs = lc } + +func (r *otlpReceiver) registerProfilesConsumer(tc xconsumer.Profiles) { + r.nextProfiles = tc +} diff --git a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/otlphttp.go b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/otlphttp.go index b895b8d2e7c..4c9c8231fe7 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/otlphttp.go +++ b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/otlphttp.go @@ -16,6 +16,7 @@ import ( "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/errors" "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/logs" "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/metrics" + "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/profiles" "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/trace" ) @@ -117,6 +118,37 @@ func handleLogs(resp http.ResponseWriter, req *http.Request, logsReceiver *logs. writeResponse(resp, enc.contentType(), http.StatusOK, msg) } +func handleProfiles(resp http.ResponseWriter, req *http.Request, profilesReceiver *profiles.Receiver) { + enc, ok := readContentType(resp, req) + if !ok { + return + } + + body, ok := readAndCloseBody(resp, req, enc) + if !ok { + return + } + + otlpReq, err := enc.unmarshalProfilesRequest(body) + if err != nil { + writeError(resp, enc, err, http.StatusBadRequest) + return + } + + otlpResp, err := profilesReceiver.Export(req.Context(), otlpReq) + if err != nil { + writeError(resp, enc, err, http.StatusInternalServerError) + return + } + + msg, err := enc.marshalProfilesResponse(otlpResp) + if err != nil { + writeError(resp, enc, err, http.StatusInternalServerError) + return + } + writeResponse(resp, enc.contentType(), http.StatusOK, msg) +} + func readContentType(resp http.ResponseWriter, req *http.Request) (encoder, bool) { if req.Method != http.MethodPost { handleUnmatchedMethod(resp) diff --git a/vendor/go.opentelemetry.io/collector/receiver/receiver.go b/vendor/go.opentelemetry.io/collector/receiver/receiver.go index 3b0f0ac8371..dd7242d5f77 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/receiver.go +++ b/vendor/go.opentelemetry.io/collector/receiver/receiver.go @@ -5,22 +5,16 @@ package receiver // import "go.opentelemetry.io/collector/receiver" import ( "context" - "errors" "fmt" - "go.uber.org/zap" - "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" -) - -var ( - errNilNextConsumer = errors.New("nil next Consumer") + "go.opentelemetry.io/collector/pipeline" ) // Traces receiver receives traces. // Its purpose is to translate data from any format to the collector's internal trace format. -// TracesReceiver feeds a consumer.Traces with data. +// Traces receiver feeds a consumer.Traces with data. // // For example, it could be Zipkin data source which translates Zipkin spans into ptrace.Traces. type Traces interface { @@ -29,7 +23,7 @@ type Traces interface { // Metrics receiver receives metrics. // Its purpose is to translate data from any format to the collector's internal metrics format. -// MetricsReceiver feeds a consumer.Metrics with data. +// Metrics receiver feeds a consumer.Metrics with data. // // For example, it could be Prometheus data source which translates Prometheus metrics into pmetric.Metrics. type Metrics interface { @@ -38,15 +32,15 @@ type Metrics interface { // Logs receiver receives logs. // Its purpose is to translate data from any format to the collector's internal logs data format. -// LogsReceiver feeds a consumer.Logs with data. +// Logs receiver feeds a consumer.Logs with data. // // For example, it could be a receiver that reads syslogs and convert them into plog.Logs. type Logs interface { component.Component } -// CreateSettings configures Receiver creators. -type CreateSettings struct { +// Settings configures receiver creators. +type Settings struct { // ID returns the ID of the component that will be created. ID component.ID @@ -56,47 +50,50 @@ type CreateSettings struct { BuildInfo component.BuildInfo } -// Factory is factory interface for receivers. +// Factory is a factory interface for receivers. // // This interface cannot be directly implemented. Implementations must -// use the NewReceiverFactory to implement it. +// use the NewFactory to implement it. type Factory interface { component.Factory - // CreateTracesReceiver creates a TracesReceiver based on this config. - // If the receiver type does not support tracing or if the config is not valid - // an error will be returned instead. `nextConsumer` is never nil. - CreateTracesReceiver(ctx context.Context, set CreateSettings, cfg component.Config, nextConsumer consumer.Traces) (Traces, error) + // CreateTraces creates a Traces based on this config. + // If the receiver type does not support traces, + // this function returns the error [pipeline.ErrSignalNotSupported]. + // Implementers can assume `next` is never nil. + CreateTraces(ctx context.Context, set Settings, cfg component.Config, next consumer.Traces) (Traces, error) - // TracesReceiverStability gets the stability level of the TracesReceiver. - TracesReceiverStability() component.StabilityLevel + // TracesStability gets the stability level of the Traces receiver. + TracesStability() component.StabilityLevel - // CreateMetricsReceiver creates a MetricsReceiver based on this config. - // If the receiver type does not support metrics or if the config is not valid - // an error will be returned instead. `nextConsumer` is never nil. - CreateMetricsReceiver(ctx context.Context, set CreateSettings, cfg component.Config, nextConsumer consumer.Metrics) (Metrics, error) + // CreateMetrics creates a Metrics based on this config. + // If the receiver type does not support metrics, + // this function returns the error [pipeline.ErrSignalNotSupported]. + // Implementers can assume `next` is never nil. + CreateMetrics(ctx context.Context, set Settings, cfg component.Config, next consumer.Metrics) (Metrics, error) - // MetricsReceiverStability gets the stability level of the MetricsReceiver. - MetricsReceiverStability() component.StabilityLevel + // MetricsStability gets the stability level of the Metrics receiver. + MetricsStability() component.StabilityLevel - // CreateLogsReceiver creates a LogsReceiver based on this config. - // If the receiver type does not support the data type or if the config is not valid - // an error will be returned instead. `nextConsumer` is never nil. - CreateLogsReceiver(ctx context.Context, set CreateSettings, cfg component.Config, nextConsumer consumer.Logs) (Logs, error) + // CreateLogs creates a Logs based on this config. + // If the receiver type does not support logs, + // this function returns the error [pipeline.ErrSignalNotSupported]. + // Implementers can assume `next` is never nil. + CreateLogs(ctx context.Context, set Settings, cfg component.Config, next consumer.Logs) (Logs, error) - // LogsReceiverStability gets the stability level of the LogsReceiver. - LogsReceiverStability() component.StabilityLevel + // LogsStability gets the stability level of the Logs receiver. + LogsStability() component.StabilityLevel unexportedFactoryFunc() } -// FactoryOption apply changes to ReceiverOptions. +// FactoryOption apply changes to Factory. type FactoryOption interface { // applyOption applies the option. applyOption(o *factory) } -// factoryOptionFunc is an ReceiverFactoryOption created through a function. +// factoryOptionFunc is an FactoryOption created through a function. type factoryOptionFunc func(*factory) func (f factoryOptionFunc) applyOption(o *factory) { @@ -104,50 +101,36 @@ func (f factoryOptionFunc) applyOption(o *factory) { } // CreateTracesFunc is the equivalent of Factory.CreateTraces. -type CreateTracesFunc func(context.Context, CreateSettings, component.Config, consumer.Traces) (Traces, error) - -// CreateTracesReceiver implements Factory.CreateTracesReceiver(). -func (f CreateTracesFunc) CreateTracesReceiver( - ctx context.Context, - set CreateSettings, - cfg component.Config, - nextConsumer consumer.Traces) (Traces, error) { +type CreateTracesFunc func(context.Context, Settings, component.Config, consumer.Traces) (Traces, error) + +// CreateTraces implements Factory.CreateTraces(). +func (f CreateTracesFunc) CreateTraces(ctx context.Context, set Settings, cfg component.Config, next consumer.Traces) (Traces, error) { if f == nil { - return nil, component.ErrDataTypeIsNotSupported + return nil, pipeline.ErrSignalNotSupported } - return f(ctx, set, cfg, nextConsumer) + return f(ctx, set, cfg, next) } // CreateMetricsFunc is the equivalent of Factory.CreateMetrics. -type CreateMetricsFunc func(context.Context, CreateSettings, component.Config, consumer.Metrics) (Metrics, error) - -// CreateMetricsReceiver implements Factory.CreateMetricsReceiver(). -func (f CreateMetricsFunc) CreateMetricsReceiver( - ctx context.Context, - set CreateSettings, - cfg component.Config, - nextConsumer consumer.Metrics, -) (Metrics, error) { +type CreateMetricsFunc func(context.Context, Settings, component.Config, consumer.Metrics) (Metrics, error) + +// CreateMetrics implements Factory.CreateMetrics. +func (f CreateMetricsFunc) CreateMetrics(ctx context.Context, set Settings, cfg component.Config, next consumer.Metrics) (Metrics, error) { if f == nil { - return nil, component.ErrDataTypeIsNotSupported + return nil, pipeline.ErrSignalNotSupported } - return f(ctx, set, cfg, nextConsumer) + return f(ctx, set, cfg, next) } -// CreateLogsFunc is the equivalent of ReceiverFactory.CreateLogsReceiver(). -type CreateLogsFunc func(context.Context, CreateSettings, component.Config, consumer.Logs) (Logs, error) +// CreateLogsFunc is the equivalent of Factory.CreateLogs. +type CreateLogsFunc func(context.Context, Settings, component.Config, consumer.Logs) (Logs, error) -// CreateLogsReceiver implements Factory.CreateLogsReceiver(). -func (f CreateLogsFunc) CreateLogsReceiver( - ctx context.Context, - set CreateSettings, - cfg component.Config, - nextConsumer consumer.Logs, -) (Logs, error) { +// CreateLogs implements Factory.CreateLogs. +func (f CreateLogsFunc) CreateLogs(ctx context.Context, set Settings, cfg component.Config, next consumer.Logs) (Logs, error) { if f == nil { - return nil, component.ErrDataTypeIsNotSupported + return nil, pipeline.ErrSignalNotSupported } - return f(ctx, set, cfg, nextConsumer) + return f(ctx, set, cfg, next) } type factory struct { @@ -167,39 +150,39 @@ func (f *factory) Type() component.Type { func (f *factory) unexportedFactoryFunc() {} -func (f *factory) TracesReceiverStability() component.StabilityLevel { +func (f *factory) TracesStability() component.StabilityLevel { return f.tracesStabilityLevel } -func (f *factory) MetricsReceiverStability() component.StabilityLevel { +func (f *factory) MetricsStability() component.StabilityLevel { return f.metricsStabilityLevel } -func (f *factory) LogsReceiverStability() component.StabilityLevel { +func (f *factory) LogsStability() component.StabilityLevel { return f.logsStabilityLevel } -// WithTraces overrides the default "error not supported" implementation for CreateTracesReceiver and the default "undefined" stability level. -func WithTraces(createTracesReceiver CreateTracesFunc, sl component.StabilityLevel) FactoryOption { +// WithTraces overrides the default "error not supported" implementation for Factory.CreateTraces and the default "undefined" stability level. +func WithTraces(createTraces CreateTracesFunc, sl component.StabilityLevel) FactoryOption { return factoryOptionFunc(func(o *factory) { o.tracesStabilityLevel = sl - o.CreateTracesFunc = createTracesReceiver + o.CreateTracesFunc = createTraces }) } -// WithMetrics overrides the default "error not supported" implementation for CreateMetricsReceiver and the default "undefined" stability level. -func WithMetrics(createMetricsReceiver CreateMetricsFunc, sl component.StabilityLevel) FactoryOption { +// WithMetrics overrides the default "error not supported" implementation for Factory.CreateMetrics and the default "undefined" stability level. +func WithMetrics(createMetrics CreateMetricsFunc, sl component.StabilityLevel) FactoryOption { return factoryOptionFunc(func(o *factory) { o.metricsStabilityLevel = sl - o.CreateMetricsFunc = createMetricsReceiver + o.CreateMetricsFunc = createMetrics }) } -// WithLogs overrides the default "error not supported" implementation for CreateLogsReceiver and the default "undefined" stability level. -func WithLogs(createLogsReceiver CreateLogsFunc, sl component.StabilityLevel) FactoryOption { +// WithLogs overrides the default "error not supported" implementation for Factory.CreateLogs and the default "undefined" stability level. +func WithLogs(createLogs CreateLogsFunc, sl component.StabilityLevel) FactoryOption { return factoryOptionFunc(func(o *factory) { o.logsStabilityLevel = sl - o.CreateLogsFunc = createLogsReceiver + o.CreateLogsFunc = createLogs }) } @@ -227,86 +210,3 @@ func MakeFactoryMap(factories ...Factory) (map[component.Type]Factory, error) { } return fMap, nil } - -// Builder receiver is a helper struct that given a set of Configs and Factories helps with creating receivers. -type Builder struct { - cfgs map[component.ID]component.Config - factories map[component.Type]Factory -} - -// NewBuilder creates a new receiver.Builder to help with creating components form a set of configs and factories. -func NewBuilder(cfgs map[component.ID]component.Config, factories map[component.Type]Factory) *Builder { - return &Builder{cfgs: cfgs, factories: factories} -} - -// CreateTraces creates a Traces receiver based on the settings and config. -func (b *Builder) CreateTraces(ctx context.Context, set CreateSettings, next consumer.Traces) (Traces, error) { - if next == nil { - return nil, errNilNextConsumer - } - cfg, existsCfg := b.cfgs[set.ID] - if !existsCfg { - return nil, fmt.Errorf("receiver %q is not configured", set.ID) - } - - f, existsFactory := b.factories[set.ID.Type()] - if !existsFactory { - return nil, fmt.Errorf("receiver factory not available for: %q", set.ID) - } - - logStabilityLevel(set.Logger, f.TracesReceiverStability()) - return f.CreateTracesReceiver(ctx, set, cfg, next) -} - -// CreateMetrics creates a Metrics receiver based on the settings and config. -func (b *Builder) CreateMetrics(ctx context.Context, set CreateSettings, next consumer.Metrics) (Metrics, error) { - if next == nil { - return nil, errNilNextConsumer - } - cfg, existsCfg := b.cfgs[set.ID] - if !existsCfg { - return nil, fmt.Errorf("receiver %q is not configured", set.ID) - } - - f, existsFactory := b.factories[set.ID.Type()] - if !existsFactory { - return nil, fmt.Errorf("receiver factory not available for: %q", set.ID) - } - - logStabilityLevel(set.Logger, f.MetricsReceiverStability()) - return f.CreateMetricsReceiver(ctx, set, cfg, next) -} - -// CreateLogs creates a Logs receiver based on the settings and config. -func (b *Builder) CreateLogs(ctx context.Context, set CreateSettings, next consumer.Logs) (Logs, error) { - if next == nil { - return nil, errNilNextConsumer - } - cfg, existsCfg := b.cfgs[set.ID] - if !existsCfg { - return nil, fmt.Errorf("receiver %q is not configured", set.ID) - } - - f, existsFactory := b.factories[set.ID.Type()] - if !existsFactory { - return nil, fmt.Errorf("receiver factory not available for: %q", set.ID) - } - - logStabilityLevel(set.Logger, f.LogsReceiverStability()) - return f.CreateLogsReceiver(ctx, set, cfg, next) -} - -func (b *Builder) Factory(componentType component.Type) component.Factory { - return b.factories[componentType] -} - -// logStabilityLevel logs the stability level of a component. The log level is set to info for -// undefined, unmaintained, deprecated and development. The log level is set to debug -// for alpha, beta and stable. -func logStabilityLevel(logger *zap.Logger, sl component.StabilityLevel) { - if sl >= component.StabilityLevelAlpha { - logger.Debug(sl.LogMessage()) - } else { - logger.Info(sl.LogMessage()) - } -} diff --git a/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/documentation.md b/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/documentation.md index 457a856618e..4f88fd0956d 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/documentation.md +++ b/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/documentation.md @@ -6,50 +6,50 @@ The following telemetry is emitted by this component. -### receiver_accepted_log_records +### otelcol_receiver_accepted_log_records -Number of log records successfully pushed into the pipeline. +Number of log records successfully pushed into the pipeline. [alpha] | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {records} | Sum | Int | true | -### receiver_accepted_metric_points +### otelcol_receiver_accepted_metric_points -Number of metric points successfully pushed into the pipeline. +Number of metric points successfully pushed into the pipeline. [alpha] | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {datapoints} | Sum | Int | true | -### receiver_accepted_spans +### otelcol_receiver_accepted_spans -Number of spans successfully pushed into the pipeline. +Number of spans successfully pushed into the pipeline. [alpha] | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {spans} | Sum | Int | true | -### receiver_refused_log_records +### otelcol_receiver_refused_log_records -Number of log records that could not be pushed into the pipeline. +Number of log records that could not be pushed into the pipeline. [alpha] | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {records} | Sum | Int | true | -### receiver_refused_metric_points +### otelcol_receiver_refused_metric_points -Number of metric points that could not be pushed into the pipeline. +Number of metric points that could not be pushed into the pipeline. [alpha] | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {datapoints} | Sum | Int | true | -### receiver_refused_spans +### otelcol_receiver_refused_spans -Number of spans that could not be pushed into the pipeline. +Number of spans that could not be pushed into the pipeline. [alpha] | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {spans} | Sum | Int | true | diff --git a/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/internal/metadata/generated_telemetry.go b/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/internal/metadata/generated_telemetry.go index ee70b2e498f..17955ac81e6 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/internal/metadata/generated_telemetry.go +++ b/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/internal/metadata/generated_telemetry.go @@ -6,7 +6,7 @@ import ( "errors" "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/metric/noop" + noopmetric "go.opentelemetry.io/otel/metric/noop" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/collector/component" @@ -24,76 +24,77 @@ func Tracer(settings component.TelemetrySettings) trace.Tracer { // TelemetryBuilder provides an interface for components to report telemetry // as defined in metadata and user config. type TelemetryBuilder struct { + meter metric.Meter ReceiverAcceptedLogRecords metric.Int64Counter ReceiverAcceptedMetricPoints metric.Int64Counter ReceiverAcceptedSpans metric.Int64Counter ReceiverRefusedLogRecords metric.Int64Counter ReceiverRefusedMetricPoints metric.Int64Counter ReceiverRefusedSpans metric.Int64Counter - level configtelemetry.Level } -// telemetryBuilderOption applies changes to default builder. -type telemetryBuilderOption func(*TelemetryBuilder) +// TelemetryBuilderOption applies changes to default builder. +type TelemetryBuilderOption interface { + apply(*TelemetryBuilder) +} -// WithLevel sets the current telemetry level for the component. -func WithLevel(lvl configtelemetry.Level) telemetryBuilderOption { - return func(builder *TelemetryBuilder) { - builder.level = lvl - } +type telemetryBuilderOptionFunc func(mb *TelemetryBuilder) + +func (tbof telemetryBuilderOptionFunc) apply(mb *TelemetryBuilder) { + tbof(mb) } // NewTelemetryBuilder provides a struct with methods to update all internal telemetry // for a component -func NewTelemetryBuilder(settings component.TelemetrySettings, options ...telemetryBuilderOption) (*TelemetryBuilder, error) { - builder := TelemetryBuilder{level: configtelemetry.LevelBasic} +func NewTelemetryBuilder(settings component.TelemetrySettings, options ...TelemetryBuilderOption) (*TelemetryBuilder, error) { + builder := TelemetryBuilder{} for _, op := range options { - op(&builder) - } - var ( - err, errs error - meter metric.Meter - ) - if builder.level >= configtelemetry.LevelBasic { - meter = Meter(settings) - } else { - meter = noop.Meter{} + op.apply(&builder) } - builder.ReceiverAcceptedLogRecords, err = meter.Int64Counter( - "receiver_accepted_log_records", - metric.WithDescription("Number of log records successfully pushed into the pipeline."), - metric.WithUnit("1"), + builder.meter = Meter(settings) + var err, errs error + builder.ReceiverAcceptedLogRecords, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_receiver_accepted_log_records", + metric.WithDescription("Number of log records successfully pushed into the pipeline. [alpha]"), + metric.WithUnit("{records}"), ) errs = errors.Join(errs, err) - builder.ReceiverAcceptedMetricPoints, err = meter.Int64Counter( - "receiver_accepted_metric_points", - metric.WithDescription("Number of metric points successfully pushed into the pipeline."), - metric.WithUnit("1"), + builder.ReceiverAcceptedMetricPoints, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_receiver_accepted_metric_points", + metric.WithDescription("Number of metric points successfully pushed into the pipeline. [alpha]"), + metric.WithUnit("{datapoints}"), ) errs = errors.Join(errs, err) - builder.ReceiverAcceptedSpans, err = meter.Int64Counter( - "receiver_accepted_spans", - metric.WithDescription("Number of spans successfully pushed into the pipeline."), - metric.WithUnit("1"), + builder.ReceiverAcceptedSpans, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_receiver_accepted_spans", + metric.WithDescription("Number of spans successfully pushed into the pipeline. [alpha]"), + metric.WithUnit("{spans}"), ) errs = errors.Join(errs, err) - builder.ReceiverRefusedLogRecords, err = meter.Int64Counter( - "receiver_refused_log_records", - metric.WithDescription("Number of log records that could not be pushed into the pipeline."), - metric.WithUnit("1"), + builder.ReceiverRefusedLogRecords, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_receiver_refused_log_records", + metric.WithDescription("Number of log records that could not be pushed into the pipeline. [alpha]"), + metric.WithUnit("{records}"), ) errs = errors.Join(errs, err) - builder.ReceiverRefusedMetricPoints, err = meter.Int64Counter( - "receiver_refused_metric_points", - metric.WithDescription("Number of metric points that could not be pushed into the pipeline."), - metric.WithUnit("1"), + builder.ReceiverRefusedMetricPoints, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_receiver_refused_metric_points", + metric.WithDescription("Number of metric points that could not be pushed into the pipeline. [alpha]"), + metric.WithUnit("{datapoints}"), ) errs = errors.Join(errs, err) - builder.ReceiverRefusedSpans, err = meter.Int64Counter( - "receiver_refused_spans", - metric.WithDescription("Number of spans that could not be pushed into the pipeline."), - metric.WithUnit("1"), + builder.ReceiverRefusedSpans, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_receiver_refused_spans", + metric.WithDescription("Number of spans that could not be pushed into the pipeline. [alpha]"), + metric.WithUnit("{spans}"), ) errs = errors.Join(errs, err) return &builder, errs } + +func getLeveledMeter(meter metric.Meter, cfgLevel, srvLevel configtelemetry.Level) metric.Meter { + if cfgLevel <= srvLevel { + return meter + } + return noopmetric.Meter{} +} diff --git a/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/internal/obsmetrics.go b/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/internal/obsmetrics.go new file mode 100644 index 00000000000..3b90592b6b6 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/internal/obsmetrics.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/receiver/receiverhelper/internal" + +const ( + // SpanNameSep is duplicate between receiver and exporter. + SpanNameSep = "/" + + // ReceiverKey used to identify receivers in metrics and traces. + ReceiverKey = "receiver" + // TransportKey used to identify the transport used to received the data. + TransportKey = "transport" + // FormatKey used to identify the format of the data received. + FormatKey = "format" + + // AcceptedSpansKey used to identify spans accepted by the Collector. + AcceptedSpansKey = "accepted_spans" + // RefusedSpansKey used to identify spans refused (ie.: not ingested) by the Collector. + RefusedSpansKey = "refused_spans" + + // AcceptedMetricPointsKey used to identify metric points accepted by the Collector. + AcceptedMetricPointsKey = "accepted_metric_points" + // RefusedMetricPointsKey used to identify metric points refused (ie.: not ingested) by the + // Collector. + RefusedMetricPointsKey = "refused_metric_points" + + // AcceptedLogRecordsKey used to identify log records accepted by the Collector. + AcceptedLogRecordsKey = "accepted_log_records" + // RefusedLogRecordsKey used to identify log records refused (ie.: not ingested) by the + // Collector. + RefusedLogRecordsKey = "refused_log_records" + + ReceiveTraceDataOperationSuffix = SpanNameSep + "TraceDataReceived" + ReceiverMetricsOperationSuffix = SpanNameSep + "MetricsReceived" + ReceiverLogsOperationSuffix = SpanNameSep + "LogsReceived" +) diff --git a/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/metadata.yaml b/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/metadata.yaml index 2897d549a70..d13d7c3307b 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/metadata.yaml +++ b/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/metadata.yaml @@ -1,58 +1,70 @@ type: receiverhelper +github_project: open-telemetry/opentelemetry-collector status: class: receiver not_component: true stability: beta: [traces, metrics, logs] - distributions: [core, contrib] telemetry: metrics: receiver_accepted_spans: enabled: true + stability: + level: alpha description: Number of spans successfully pushed into the pipeline. - unit: 1 + unit: "{spans}" sum: value_type: int monotonic: true receiver_refused_spans: enabled: true + stability: + level: alpha description: Number of spans that could not be pushed into the pipeline. - unit: 1 + unit: "{spans}" sum: value_type: int monotonic: true receiver_accepted_metric_points: enabled: true + stability: + level: alpha description: Number of metric points successfully pushed into the pipeline. - unit: 1 + unit: "{datapoints}" sum: value_type: int monotonic: true receiver_refused_metric_points: enabled: true + stability: + level: alpha description: Number of metric points that could not be pushed into the pipeline. - unit: 1 + unit: "{datapoints}" sum: value_type: int monotonic: true receiver_accepted_log_records: enabled: true + stability: + level: alpha description: Number of log records successfully pushed into the pipeline. - unit: 1 + unit: "{records}" sum: value_type: int monotonic: true receiver_refused_log_records: enabled: true + stability: + level: alpha description: Number of log records that could not be pushed into the pipeline. - unit: 1 + unit: "{records}" sum: value_type: int - monotonic: true \ No newline at end of file + monotonic: true diff --git a/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/obsreport.go b/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/obsreport.go index 18ff7556454..e14af8a153b 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/obsreport.go +++ b/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/obsreport.go @@ -12,25 +12,22 @@ import ( "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/config/configtelemetry" - "go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics" + "go.opentelemetry.io/collector/pipeline" "go.opentelemetry.io/collector/receiver" + "go.opentelemetry.io/collector/receiver/receiverhelper/internal" "go.opentelemetry.io/collector/receiver/receiverhelper/internal/metadata" ) // ObsReport is a helper to add observability to a receiver. type ObsReport struct { - level configtelemetry.Level spanNamePrefix string transport string longLivedCtx bool tracer trace.Tracer - logger *zap.Logger - otelAttrs []attribute.KeyValue + otelAttrs metric.MeasurementOption telemetryBuilder *metadata.TelemetryBuilder } @@ -44,7 +41,7 @@ type ObsReportSettings struct { // eg.: a gRPC stream, for which many batches of data are received in individual // operations without a corresponding new context per operation. LongLivedCtx bool - ReceiverCreateSettings receiver.CreateSettings + ReceiverCreateSettings receiver.Settings } // NewObsReport creates a new ObsReport. @@ -58,17 +55,15 @@ func newReceiver(cfg ObsReportSettings) (*ObsReport, error) { return nil, err } return &ObsReport{ - level: cfg.ReceiverCreateSettings.TelemetrySettings.MetricsLevel, - spanNamePrefix: obsmetrics.ReceiverPrefix + cfg.ReceiverID.String(), + spanNamePrefix: internal.ReceiverKey + internal.SpanNameSep + cfg.ReceiverID.String(), transport: cfg.Transport, longLivedCtx: cfg.LongLivedCtx, tracer: cfg.ReceiverCreateSettings.TracerProvider.Tracer(cfg.ReceiverID.String()), - logger: cfg.ReceiverCreateSettings.Logger, - otelAttrs: []attribute.KeyValue{ - attribute.String(obsmetrics.ReceiverKey, cfg.ReceiverID.String()), - attribute.String(obsmetrics.TransportKey, cfg.Transport), - }, + otelAttrs: metric.WithAttributeSet(attribute.NewSet( + attribute.String(internal.ReceiverKey, cfg.ReceiverID.String()), + attribute.String(internal.TransportKey, cfg.Transport), + )), telemetryBuilder: telemetryBuilder, }, nil } @@ -77,7 +72,7 @@ func newReceiver(cfg ObsReportSettings) (*ObsReport, error) { // The returned context should be used in other calls to the obsreport functions // dealing with the same receive operation. func (rec *ObsReport) StartTracesOp(operationCtx context.Context) context.Context { - return rec.startOp(operationCtx, obsmetrics.ReceiveTraceDataOperationSuffix) + return rec.startOp(operationCtx, internal.ReceiveTraceDataOperationSuffix) } // EndTracesOp completes the receive operation that was started with @@ -88,14 +83,14 @@ func (rec *ObsReport) EndTracesOp( numReceivedSpans int, err error, ) { - rec.endOp(receiverCtx, format, numReceivedSpans, err, component.DataTypeTraces) + rec.endOp(receiverCtx, format, numReceivedSpans, err, pipeline.SignalTraces) } // StartLogsOp is called when a request is received from a client. // The returned context should be used in other calls to the obsreport functions // dealing with the same receive operation. func (rec *ObsReport) StartLogsOp(operationCtx context.Context) context.Context { - return rec.startOp(operationCtx, obsmetrics.ReceiverLogsOperationSuffix) + return rec.startOp(operationCtx, internal.ReceiverLogsOperationSuffix) } // EndLogsOp completes the receive operation that was started with @@ -106,14 +101,14 @@ func (rec *ObsReport) EndLogsOp( numReceivedLogRecords int, err error, ) { - rec.endOp(receiverCtx, format, numReceivedLogRecords, err, component.DataTypeLogs) + rec.endOp(receiverCtx, format, numReceivedLogRecords, err, pipeline.SignalLogs) } // StartMetricsOp is called when a request is received from a client. // The returned context should be used in other calls to the obsreport functions // dealing with the same receive operation. func (rec *ObsReport) StartMetricsOp(operationCtx context.Context) context.Context { - return rec.startOp(operationCtx, obsmetrics.ReceiverMetricsOperationSuffix) + return rec.startOp(operationCtx, internal.ReceiverMetricsOperationSuffix) } // EndMetricsOp completes the receive operation that was started with @@ -124,7 +119,7 @@ func (rec *ObsReport) EndMetricsOp( numReceivedPoints int, err error, ) { - rec.endOp(receiverCtx, format, numReceivedPoints, err, component.DataTypeMetrics) + rec.endOp(receiverCtx, format, numReceivedPoints, err, pipeline.SignalMetrics) } // startOp creates the span used to trace the operation. Returning @@ -147,7 +142,7 @@ func (rec *ObsReport) startOp(receiverCtx context.Context, operationSuffix strin } if rec.transport != "" { - span.SetAttributes(attribute.String(obsmetrics.TransportKey, rec.transport)) + span.SetAttributes(attribute.String(internal.TransportKey, rec.transport)) } return ctx } @@ -158,7 +153,7 @@ func (rec *ObsReport) endOp( format string, numReceivedItems int, err error, - dataType component.DataType, + signal pipeline.Signal, ) { numAccepted := numReceivedItems numRefused := 0 @@ -169,27 +164,25 @@ func (rec *ObsReport) endOp( span := trace.SpanFromContext(receiverCtx) - if rec.level != configtelemetry.LevelNone { - rec.recordMetrics(receiverCtx, dataType, numAccepted, numRefused) - } + rec.recordMetrics(receiverCtx, signal, numAccepted, numRefused) // end span according to errors if span.IsRecording() { var acceptedItemsKey, refusedItemsKey string - switch dataType { - case component.DataTypeTraces: - acceptedItemsKey = obsmetrics.AcceptedSpansKey - refusedItemsKey = obsmetrics.RefusedSpansKey - case component.DataTypeMetrics: - acceptedItemsKey = obsmetrics.AcceptedMetricPointsKey - refusedItemsKey = obsmetrics.RefusedMetricPointsKey - case component.DataTypeLogs: - acceptedItemsKey = obsmetrics.AcceptedLogRecordsKey - refusedItemsKey = obsmetrics.RefusedLogRecordsKey + switch signal { + case pipeline.SignalTraces: + acceptedItemsKey = internal.AcceptedSpansKey + refusedItemsKey = internal.RefusedSpansKey + case pipeline.SignalMetrics: + acceptedItemsKey = internal.AcceptedMetricPointsKey + refusedItemsKey = internal.RefusedMetricPointsKey + case pipeline.SignalLogs: + acceptedItemsKey = internal.AcceptedLogRecordsKey + refusedItemsKey = internal.RefusedLogRecordsKey } span.SetAttributes( - attribute.String(obsmetrics.FormatKey, format), + attribute.String(internal.FormatKey, format), attribute.Int64(acceptedItemsKey, int64(numAccepted)), attribute.Int64(refusedItemsKey, int64(numRefused)), ) @@ -200,20 +193,20 @@ func (rec *ObsReport) endOp( span.End() } -func (rec *ObsReport) recordMetrics(receiverCtx context.Context, dataType component.DataType, numAccepted, numRefused int) { +func (rec *ObsReport) recordMetrics(receiverCtx context.Context, signal pipeline.Signal, numAccepted, numRefused int) { var acceptedMeasure, refusedMeasure metric.Int64Counter - switch dataType { - case component.DataTypeTraces: + switch signal { + case pipeline.SignalTraces: acceptedMeasure = rec.telemetryBuilder.ReceiverAcceptedSpans refusedMeasure = rec.telemetryBuilder.ReceiverRefusedSpans - case component.DataTypeMetrics: + case pipeline.SignalMetrics: acceptedMeasure = rec.telemetryBuilder.ReceiverAcceptedMetricPoints refusedMeasure = rec.telemetryBuilder.ReceiverRefusedMetricPoints - case component.DataTypeLogs: + case pipeline.SignalLogs: acceptedMeasure = rec.telemetryBuilder.ReceiverAcceptedLogRecords refusedMeasure = rec.telemetryBuilder.ReceiverRefusedLogRecords } - acceptedMeasure.Add(receiverCtx, int64(numAccepted), metric.WithAttributes(rec.otelAttrs...)) - refusedMeasure.Add(receiverCtx, int64(numRefused), metric.WithAttributes(rec.otelAttrs...)) + acceptedMeasure.Add(receiverCtx, int64(numAccepted), rec.otelAttrs) + refusedMeasure.Add(receiverCtx, int64(numRefused), rec.otelAttrs) } diff --git a/vendor/go.opentelemetry.io/collector/receiver/receivertest/LICENSE b/vendor/go.opentelemetry.io/collector/receiver/receivertest/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/receiver/receivertest/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/receiver/receivertest/Makefile b/vendor/go.opentelemetry.io/collector/receiver/receivertest/Makefile new file mode 100644 index 00000000000..ded7a36092d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/receiver/receivertest/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/receiver/receivertest/contract_checker.go b/vendor/go.opentelemetry.io/collector/receiver/receivertest/contract_checker.go index 6951b5f41f4..148a025048c 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/receivertest/contract_checker.go +++ b/vendor/go.opentelemetry.io/collector/receiver/receivertest/contract_checker.go @@ -7,7 +7,7 @@ import ( "context" "errors" "fmt" - "math/rand" + "math/rand/v2" "sync" "sync/atomic" "testing" @@ -24,6 +24,7 @@ import ( "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/ptrace" + "go.opentelemetry.io/collector/pipeline" "go.opentelemetry.io/collector/receiver" ) @@ -54,8 +55,7 @@ type CheckConsumeContractParams struct { T *testing.T // Factory that allows to create a receiver. Factory receiver.Factory - // DataType to test for. - DataType component.DataType + Signal pipeline.Signal // Config of the receiver to use. Config component.Config // Generator that can send data to the receiver. @@ -111,13 +111,13 @@ func checkConsumeContractScenario(params CheckConsumeContractParams, decisionFun // Create and start the receiver. var receiver component.Component var err error - switch params.DataType { - case component.DataTypeLogs: - receiver, err = params.Factory.CreateLogsReceiver(ctx, NewNopCreateSettings(), params.Config, consumer) - case component.DataTypeTraces: - receiver, err = params.Factory.CreateTracesReceiver(ctx, NewNopCreateSettings(), params.Config, consumer) - case component.DataTypeMetrics: - receiver, err = params.Factory.CreateMetricsReceiver(ctx, NewNopCreateSettings(), params.Config, consumer) + switch params.Signal { + case pipeline.SignalLogs: + receiver, err = params.Factory.CreateLogs(ctx, NewNopSettings(), params.Config, consumer) + case pipeline.SignalTraces: + receiver, err = params.Factory.CreateTraces(ctx, NewNopSettings(), params.Config, consumer) + case pipeline.SignalMetrics: + receiver, err = params.Factory.CreateMetrics(ctx, NewNopSettings(), params.Config, consumer) default: require.FailNow(params.T, "must specify a valid DataType to test for") } @@ -148,7 +148,7 @@ func checkConsumeContractScenario(params CheckConsumeContractParams, decisionFun defer wg.Done() for atomic.AddInt64(&generatedIndex, 1) <= int64(params.GenerateCount) { ids := params.Generator.Generate() - require.Greater(params.T, len(ids), 0) + require.NotEmpty(params.T, ids) mux.Lock() duplicates := generatedIDs.mergeSlice(ids) @@ -193,7 +193,7 @@ func checkConsumeContractScenario(params CheckConsumeContractParams, decisionFun } err = receiver.Shutdown(ctx) - assert.NoError(params.T, err) + require.NoError(params.T, err) // Print some stats to help debug test failures. fmt.Printf( @@ -271,8 +271,10 @@ func (ds idSet) union(other idSet) (union idSet, duplicates []UniqueIDAttrVal) { // between the receiver and it next consumer. type consumeDecisionFunc func(ids idSet) error -var errNonPermanent = errors.New("non permanent error") -var errPermanent = errors.New("permanent error") +var ( + errNonPermanent = errors.New("non permanent error") + errPermanent = errors.New("permanent error") +) // randomNonPermanentErrorConsumeDecision is a decision function that succeeds approximately // half of the time and fails with a non-permanent error the rest of the time. diff --git a/vendor/go.opentelemetry.io/collector/receiver/receivertest/nop_receiver.go b/vendor/go.opentelemetry.io/collector/receiver/receivertest/nop_receiver.go index e9cec06ca1b..be7bbd45421 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/receivertest/nop_receiver.go +++ b/vendor/go.opentelemetry.io/collector/receiver/receivertest/nop_receiver.go @@ -11,14 +11,16 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/xconsumer" "go.opentelemetry.io/collector/receiver" + "go.opentelemetry.io/collector/receiver/xreceiver" ) var defaultComponentType = component.MustNewType("nop") -// NewNopCreateSettings returns a new nop settings for Create*Receiver functions. -func NewNopCreateSettings() receiver.CreateSettings { - return receiver.CreateSettings{ +// NewNopSettings returns a new nop settings for Create*Receiver functions. +func NewNopSettings() receiver.Settings { + return receiver.Settings{ ID: component.NewIDWithName(defaultComponentType, uuid.NewString()), TelemetrySettings: componenttest.NewNopTelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo(), @@ -27,44 +29,31 @@ func NewNopCreateSettings() receiver.CreateSettings { // NewNopFactory returns a receiver.Factory that constructs nop receivers supporting all data types. func NewNopFactory() receiver.Factory { - return receiver.NewFactory( + return xreceiver.NewFactory( defaultComponentType, func() component.Config { return &nopConfig{} }, - receiver.WithTraces(createTraces, component.StabilityLevelStable), - receiver.WithMetrics(createMetrics, component.StabilityLevelStable), - receiver.WithLogs(createLogs, component.StabilityLevelStable)) + xreceiver.WithTraces(createTraces, component.StabilityLevelStable), + xreceiver.WithMetrics(createMetrics, component.StabilityLevelStable), + xreceiver.WithLogs(createLogs, component.StabilityLevelStable), + xreceiver.WithProfiles(createProfiles, component.StabilityLevelAlpha), + ) } -// NewNopFactoryForType returns a receiver.Factory that constructs nop receivers supporting only the -// given data type. -func NewNopFactoryForType(dataType component.DataType) receiver.Factory { - var factoryOpt receiver.FactoryOption - switch dataType { - case component.DataTypeTraces: - factoryOpt = receiver.WithTraces(createTraces, component.StabilityLevelStable) - case component.DataTypeMetrics: - factoryOpt = receiver.WithMetrics(createMetrics, component.StabilityLevelStable) - case component.DataTypeLogs: - factoryOpt = receiver.WithLogs(createLogs, component.StabilityLevelStable) - default: - panic("unsupported data type for creating nop receiver factory: " + dataType.String()) - } +type nopConfig struct{} - componentType := component.MustNewType(defaultComponentType.String() + "_" + dataType.String()) - return receiver.NewFactory(componentType, func() component.Config { return &nopConfig{} }, factoryOpt) +func createTraces(context.Context, receiver.Settings, component.Config, consumer.Traces) (receiver.Traces, error) { + return nopInstance, nil } -type nopConfig struct{} - -func createTraces(context.Context, receiver.CreateSettings, component.Config, consumer.Traces) (receiver.Traces, error) { +func createMetrics(context.Context, receiver.Settings, component.Config, consumer.Metrics) (receiver.Metrics, error) { return nopInstance, nil } -func createMetrics(context.Context, receiver.CreateSettings, component.Config, consumer.Metrics) (receiver.Metrics, error) { +func createLogs(context.Context, receiver.Settings, component.Config, consumer.Logs) (receiver.Logs, error) { return nopInstance, nil } -func createLogs(context.Context, receiver.CreateSettings, component.Config, consumer.Logs) (receiver.Logs, error) { +func createProfiles(context.Context, receiver.Settings, component.Config, xconsumer.Profiles) (xreceiver.Profiles, error) { return nopInstance, nil } @@ -75,11 +64,3 @@ type nopReceiver struct { component.StartFunc component.ShutdownFunc } - -// NewNopBuilder returns a receiver.Builder that constructs nop receivers. -func NewNopBuilder() *receiver.Builder { - nopFactory := NewNopFactory() - return receiver.NewBuilder( - map[component.ID]component.Config{component.NewID(defaultComponentType): nopFactory.CreateDefaultConfig()}, - map[component.Type]receiver.Factory{defaultComponentType: nopFactory}) -} diff --git a/vendor/go.opentelemetry.io/collector/receiver/xreceiver/LICENSE b/vendor/go.opentelemetry.io/collector/receiver/xreceiver/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/receiver/xreceiver/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/receiver/xreceiver/Makefile b/vendor/go.opentelemetry.io/collector/receiver/xreceiver/Makefile new file mode 100644 index 00000000000..ded7a36092d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/receiver/xreceiver/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/receiver/xreceiver/profiles.go b/vendor/go.opentelemetry.io/collector/receiver/xreceiver/profiles.go new file mode 100644 index 00000000000..2e0cbbd8cee --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/receiver/xreceiver/profiles.go @@ -0,0 +1,116 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package xreceiver // import "go.opentelemetry.io/collector/receiver/xreceiver" + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer/xconsumer" + "go.opentelemetry.io/collector/pipeline" + "go.opentelemetry.io/collector/receiver" +) + +// Profiles receiver receives profiles. +// Its purpose is to translate data from any format to the collector's internal profile format. +// Profiles receiver feeds a xconsumer.Profiles with data. +// +// For example, it could be a pprof data source which translates pprof profiles into pprofile.Profiles. +type Profiles interface { + component.Component +} + +// Factory is a factory interface for receivers. +// +// This interface cannot be directly implemented. Implementations must +// use the NewFactory to implement it. +type Factory interface { + receiver.Factory + + // CreateProfiles creates a Profiles based on this config. + // If the receiver type does not support tracing or if the config is not valid + // an error will be returned instead. `next` is never nil. + CreateProfiles(ctx context.Context, set receiver.Settings, cfg component.Config, next xconsumer.Profiles) (Profiles, error) + + // ProfilesStability gets the stability level of the Profiles receiver. + ProfilesStability() component.StabilityLevel +} + +// CreateProfilesFunc is the equivalent of Factory.CreateProfiles. +type CreateProfilesFunc func(context.Context, receiver.Settings, component.Config, xconsumer.Profiles) (Profiles, error) + +// CreateProfiles implements Factory.CreateProfiles. +func (f CreateProfilesFunc) CreateProfiles(ctx context.Context, set receiver.Settings, cfg component.Config, next xconsumer.Profiles) (Profiles, error) { + if f == nil { + return nil, pipeline.ErrSignalNotSupported + } + return f(ctx, set, cfg, next) +} + +// FactoryOption apply changes to Factory. +type FactoryOption interface { + // applyOption applies the option. + applyOption(o *factoryOpts) +} + +// factoryOptionFunc is a FactoryOption created through a function. +type factoryOptionFunc func(*factoryOpts) + +func (f factoryOptionFunc) applyOption(o *factoryOpts) { + f(o) +} + +type factory struct { + receiver.Factory + CreateProfilesFunc + profilesStabilityLevel component.StabilityLevel +} + +func (f *factory) ProfilesStability() component.StabilityLevel { + return f.profilesStabilityLevel +} + +type factoryOpts struct { + opts []receiver.FactoryOption + *factory +} + +// WithTraces overrides the default "error not supported" implementation for Factory.CreateTraces and the default "undefined" stability level. +func WithTraces(createTraces receiver.CreateTracesFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.opts = append(o.opts, receiver.WithTraces(createTraces, sl)) + }) +} + +// WithMetrics overrides the default "error not supported" implementation for Factory.CreateMetrics and the default "undefined" stability level. +func WithMetrics(createMetrics receiver.CreateMetricsFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.opts = append(o.opts, receiver.WithMetrics(createMetrics, sl)) + }) +} + +// WithLogs overrides the default "error not supported" implementation for Factory.CreateLogs and the default "undefined" stability level. +func WithLogs(createLogs receiver.CreateLogsFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.opts = append(o.opts, receiver.WithLogs(createLogs, sl)) + }) +} + +// WithProfiles overrides the default "error not supported" implementation for Factory.CreateProfiles and the default "undefined" stability level. +func WithProfiles(createProfiles CreateProfilesFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.profilesStabilityLevel = sl + o.CreateProfilesFunc = createProfiles + }) +} + +// NewFactory returns a Factory. +func NewFactory(cfgType component.Type, createDefaultConfig component.CreateDefaultConfigFunc, options ...FactoryOption) Factory { + opts := factoryOpts{factory: &factory{}} + for _, opt := range options { + opt.applyOption(&opts) + } + opts.factory.Factory = receiver.NewFactory(cfgType, createDefaultConfig, opts.opts...) + return opts.factory +} diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.12.0/generated_resource.go b/vendor/go.opentelemetry.io/collector/semconv/v1.12.0/generated_resource.go new file mode 100644 index 00000000000..2786da48c31 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.12.0/generated_resource.go @@ -0,0 +1,1086 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv + +// The web browser in which the application represented by the resource is running. The `browser.*` attributes MUST be used only for resources that represent applications running in a web browser (regardless of whether running on a mobile or desktop device). +const ( + // Array of brand name and version separated by a space + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the UA client hints API + // (navigator.userAgentData.brands). + AttributeBrowserBrands = "browser.brands" + // The platform on which the browser is running + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the UA client hints API + // (navigator.userAgentData.platform). If unavailable, the legacy + // navigator.platform API SHOULD NOT be used instead and this attribute SHOULD be + // left unset in order for the values to be consistent. + // The list of possible values is defined in the W3C User-Agent Client Hints + // specification. Note that some (but not all) of these values can overlap with + // values in the os.type and os.name attributes. However, for consistency, the + // values in the browser.platform attribute should capture the exact value that + // the user agent provides. + AttributeBrowserPlatform = "browser.platform" + // Full user-agent string provided by the browser + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 + // (KHTML, ' + // 'like Gecko) Chrome/95.0.4638.54 Safari/537.36' + // Note: The user-agent value SHOULD be provided only from browsers that do not + // have a mechanism to retrieve brands and platform individually from the User- + // Agent Client Hints API. To retrieve the value, the legacy navigator.userAgent + // API can be used. + AttributeBrowserUserAgent = "browser.user_agent" +) + +// A cloud environment (e.g. GCP, Azure, AWS) +const ( + // Name of the cloud provider. + // + // Type: Enum + // Required: No + // Stability: stable + AttributeCloudProvider = "cloud.provider" + // The cloud account ID the resource is assigned to. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '111111111111', 'opentelemetry' + AttributeCloudAccountID = "cloud.account.id" + // The geographical region the resource is running. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for example + // Alibaba Cloud regions, AWS regions, Azure regions, Google Cloud regions, or + // Tencent Cloud regions. + AttributeCloudRegion = "cloud.region" + // Cloud regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the resource + // is running. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and + // Google Cloud. + AttributeCloudAvailabilityZone = "cloud.availability_zone" + // The cloud platform in use. + // + // Type: Enum + // Required: No + // Stability: stable + // Note: The prefix of the service SHOULD match the one specified in + // cloud.provider. + AttributeCloudPlatform = "cloud.platform" +) + +const ( + // Alibaba Cloud + AttributeCloudProviderAlibabaCloud = "alibaba_cloud" + // Amazon Web Services + AttributeCloudProviderAWS = "aws" + // Microsoft Azure + AttributeCloudProviderAzure = "azure" + // Google Cloud Platform + AttributeCloudProviderGCP = "gcp" + // Tencent Cloud + AttributeCloudProviderTencentCloud = "tencent_cloud" +) + +const ( + // Alibaba Cloud Elastic Compute Service + AttributeCloudPlatformAlibabaCloudECS = "alibaba_cloud_ecs" + // Alibaba Cloud Function Compute + AttributeCloudPlatformAlibabaCloudFc = "alibaba_cloud_fc" + // AWS Elastic Compute Cloud + AttributeCloudPlatformAWSEC2 = "aws_ec2" + // AWS Elastic Container Service + AttributeCloudPlatformAWSECS = "aws_ecs" + // AWS Elastic Kubernetes Service + AttributeCloudPlatformAWSEKS = "aws_eks" + // AWS Lambda + AttributeCloudPlatformAWSLambda = "aws_lambda" + // AWS Elastic Beanstalk + AttributeCloudPlatformAWSElasticBeanstalk = "aws_elastic_beanstalk" + // AWS App Runner + AttributeCloudPlatformAWSAppRunner = "aws_app_runner" + // Azure Virtual Machines + AttributeCloudPlatformAzureVM = "azure_vm" + // Azure Container Instances + AttributeCloudPlatformAzureContainerInstances = "azure_container_instances" + // Azure Kubernetes Service + AttributeCloudPlatformAzureAKS = "azure_aks" + // Azure Functions + AttributeCloudPlatformAzureFunctions = "azure_functions" + // Azure App Service + AttributeCloudPlatformAzureAppService = "azure_app_service" + // Google Cloud Compute Engine (GCE) + AttributeCloudPlatformGCPComputeEngine = "gcp_compute_engine" + // Google Cloud Run + AttributeCloudPlatformGCPCloudRun = "gcp_cloud_run" + // Google Cloud Kubernetes Engine (GKE) + AttributeCloudPlatformGCPKubernetesEngine = "gcp_kubernetes_engine" + // Google Cloud Functions (GCF) + AttributeCloudPlatformGCPCloudFunctions = "gcp_cloud_functions" + // Google Cloud App Engine (GAE) + AttributeCloudPlatformGCPAppEngine = "gcp_app_engine" + // Tencent Cloud Cloud Virtual Machine (CVM) + AttributeCloudPlatformTencentCloudCvm = "tencent_cloud_cvm" + // Tencent Cloud Elastic Kubernetes Service (EKS) + AttributeCloudPlatformTencentCloudEKS = "tencent_cloud_eks" + // Tencent Cloud Serverless Cloud Function (SCF) + AttributeCloudPlatformTencentCloudScf = "tencent_cloud_scf" +) + +// Resources used by AWS Elastic Container Service (ECS). +const ( + // The Amazon Resource Name (ARN) of an ECS container instance. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:ecs:us- + // west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AttributeAWSECSContainerARN = "aws.ecs.container.arn" + // The ARN of an ECS cluster. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AttributeAWSECSClusterARN = "aws.ecs.cluster.arn" + // The launch type for an ECS task. + // + // Type: Enum + // Required: No + // Stability: stable + AttributeAWSECSLaunchtype = "aws.ecs.launchtype" + // The ARN of an ECS task definition. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:ecs:us- + // west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' + AttributeAWSECSTaskARN = "aws.ecs.task.arn" + // The task definition family this task definition is a member of. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-family' + AttributeAWSECSTaskFamily = "aws.ecs.task.family" + // The revision for this task definition. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '8', '26' + AttributeAWSECSTaskRevision = "aws.ecs.task.revision" +) + +const ( + // ec2 + AttributeAWSECSLaunchtypeEC2 = "ec2" + // fargate + AttributeAWSECSLaunchtypeFargate = "fargate" +) + +// Resources used by AWS Elastic Kubernetes Service (EKS). +const ( + // The ARN of an EKS cluster. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AttributeAWSEKSClusterARN = "aws.eks.cluster.arn" +) + +// Resources specific to Amazon Web Services. +const ( + // The name(s) of the AWS log group(s) an application is writing to. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like multi-container + // applications, where a single application has sidecar containers, and each write + // to their own log group. + AttributeAWSLogGroupNames = "aws.log.group.names" + // The Amazon Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the log group ARN format documentation. + AttributeAWSLogGroupARNs = "aws.log.group.arns" + // The name(s) of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AttributeAWSLogStreamNames = "aws.log.stream.names" + // The ARN(s) of the AWS log stream(s). + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log- + // stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the log stream ARN format documentation. One log group can contain + // several log streams, so these ARNs necessarily identify both a log group and a + // log stream. + AttributeAWSLogStreamARNs = "aws.log.stream.arns" +) + +// A container instance. +const ( + // Container name used by container runtime. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-autoconf' + AttributeContainerName = "container.name" + // Container ID. Usually a UUID, as for example used to identify Docker + // containers. The UUID might be abbreviated. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'a3bf90e006b2' + AttributeContainerID = "container.id" + // The container runtime managing this container. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'docker', 'containerd', 'rkt' + AttributeContainerRuntime = "container.runtime" + // Name of the image the container was built on. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'gcr.io/opentelemetry/operator' + AttributeContainerImageName = "container.image.name" + // Container image tag. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '0.1' + AttributeContainerImageTag = "container.image.tag" +) + +// The software deployment. +const ( + // Name of the deployment environment (aka deployment tier). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'staging', 'production' + AttributeDeploymentEnvironment = "deployment.environment" +) + +// The device on which the process represented by this resource is running. +const ( + // A unique identifier representing the device + // + // Type: string + // Required: No + // Stability: stable + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values outlined + // below. This value is not an advertising identifier and MUST NOT be used as + // such. On iOS (Swift or Objective-C), this value MUST be equal to the vendor + // identifier. On Android (Java or Kotlin), this value MUST be equal to the + // Firebase Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found here on best + // practices and exact implementation details. Caution should be taken when + // storing personal data or anything which can identify a user. GDPR and data + // protection laws may apply, ensure you do your own due diligence. + AttributeDeviceID = "device.id" + // The model identifier for the device + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine readable version of the + // model identifier rather than the market or consumer-friendly name of the + // device. + AttributeDeviceModelIdentifier = "device.model.identifier" + // The marketing name for the device model + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human readable version of the + // device model rather than a machine readable alternative. + AttributeDeviceModelName = "device.model.name" + // The name of the device manufacturer + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via Build. iOS apps SHOULD hardcode + // the value Apple. + AttributeDeviceManufacturer = "device.manufacturer" +) + +// A serverless instance. +const ( + // The name of the single function that this runtime instance executes. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // code.namespace/code.function + // span attributes).For some cloud providers, the above definition is ambiguous. + // The following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud providers/products:
    + //
  • Azure: The full name /, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the faas.id attribute).
  • + //
+ AttributeFaaSName = "faas.name" + // The unique ID of the single function that this runtime instance executes. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' + // Note: On some cloud providers, it may not be possible to determine the full ID + // at startup, + // so consider setting faas.id as a span attribute instead.The exact value to use + // for faas.id depends on the cloud provider:
    + //
  • AWS Lambda: The function ARN. + // Take care not to use the "invoked ARN" directly but replace any + // alias suffix + // with the resolved function version, as the same runtime instance may be + // invocable with + // multiple different aliases.
  • + //
  • GCP: The URI of the resource
  • + //
  • Azure: The Fully Qualified Resource ID of the invoked function, + // not the function app, having the form + // /subscriptions//resourceGroups//providers/Microsoft.Web/s + // ites//functions/. + // This means that a span attribute MUST be used, as an Azure function app can + // host multiple functions that would usually share + // a TracerProvider.
  • + //
+ AttributeFaaSID = "faas.id" + // The immutable version of the function being executed. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use:
    + //
  • AWS Lambda: The function version + // (an integer represented as a decimal string).
  • + //
  • Google Cloud Run: The revision + // (i.e., the function name plus the revision suffix).
  • + //
  • Google Cloud Functions: The value of the + // K_REVISION environment variable.
  • + //
  • Azure Functions: Not applicable. Do not set this attribute.
  • + //
+ AttributeFaaSVersion = "faas.version" + // The execution environment ID as a string, that will be potentially reused for + // other invocations to the same function/function version. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note:
    + //
  • AWS Lambda: Use the (full) log stream name.
  • + //
+ AttributeFaaSInstance = "faas.instance" + // The amount of memory available to the serverless function in MiB. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 128 + // Note: It's recommended to set this attribute since e.g. too little memory can + // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, + // the environment variable AWS_LAMBDA_FUNCTION_MEMORY_SIZE provides this + // information. + AttributeFaaSMaxMemory = "faas.max_memory" +) + +// A host is defined as a general computing instance. +const ( + // Unique host ID. For Cloud, this must be the instance_id assigned by the cloud + // provider. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-test' + AttributeHostID = "host.id" + // Name of the host. On Unix systems, it may contain what the hostname command + // returns, or the fully qualified hostname, or another name specified by the + // user. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-test' + AttributeHostName = "host.name" + // Type of host. For Cloud, this must be the machine type. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'n1-standard-1' + AttributeHostType = "host.type" + // The CPU architecture the host system is running on. + // + // Type: Enum + // Required: No + // Stability: stable + AttributeHostArch = "host.arch" + // Name of the VM image or OS install the host was instantiated from. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + AttributeHostImageName = "host.image.name" + // VM image ID. For Cloud, this value is from the provider. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'ami-07b06b442921831e5' + AttributeHostImageID = "host.image.id" + // The version string of the VM image as defined in Version Attributes. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '0.1' + AttributeHostImageVersion = "host.image.version" +) + +const ( + // AMD64 + AttributeHostArchAMD64 = "amd64" + // ARM32 + AttributeHostArchARM32 = "arm32" + // ARM64 + AttributeHostArchARM64 = "arm64" + // Itanium + AttributeHostArchIA64 = "ia64" + // 32-bit PowerPC + AttributeHostArchPPC32 = "ppc32" + // 64-bit PowerPC + AttributeHostArchPPC64 = "ppc64" + // IBM z/Architecture + AttributeHostArchS390x = "s390x" + // 32-bit x86 + AttributeHostArchX86 = "x86" +) + +// A Kubernetes Cluster. +const ( + // The name of the cluster. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-cluster' + AttributeK8SClusterName = "k8s.cluster.name" +) + +// A Kubernetes Node object. +const ( + // The name of the Node. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'node-1' + AttributeK8SNodeName = "k8s.node.name" + // The UID of the Node. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + AttributeK8SNodeUID = "k8s.node.uid" +) + +// A Kubernetes Namespace. +const ( + // The name of the namespace that the pod is running in. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'default' + AttributeK8SNamespaceName = "k8s.namespace.name" +) + +// A Kubernetes Pod object. +const ( + // The UID of the Pod. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SPodUID = "k8s.pod.uid" + // The name of the Pod. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-pod-autoconf' + AttributeK8SPodName = "k8s.pod.name" +) + +// A container in a [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). +const ( + // The name of the Container from Pod specification, must be unique within a Pod. + // Container runtime usually uses different globally unique name (container.name). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'redis' + AttributeK8SContainerName = "k8s.container.name" + // Number of times the container was restarted. This attribute can be used to + // identify a particular container (running or stopped) within a container spec. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 0, 2 + AttributeK8SContainerRestartCount = "k8s.container.restart_count" +) + +// A Kubernetes ReplicaSet object. +const ( + // The UID of the ReplicaSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SReplicaSetUID = "k8s.replicaset.uid" + // The name of the ReplicaSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + AttributeK8SReplicaSetName = "k8s.replicaset.name" +) + +// A Kubernetes Deployment object. +const ( + // The UID of the Deployment. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SDeploymentUID = "k8s.deployment.uid" + // The name of the Deployment. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + AttributeK8SDeploymentName = "k8s.deployment.name" +) + +// A Kubernetes StatefulSet object. +const ( + // The UID of the StatefulSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SStatefulSetUID = "k8s.statefulset.uid" + // The name of the StatefulSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + AttributeK8SStatefulSetName = "k8s.statefulset.name" +) + +// A Kubernetes DaemonSet object. +const ( + // The UID of the DaemonSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SDaemonSetUID = "k8s.daemonset.uid" + // The name of the DaemonSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + AttributeK8SDaemonSetName = "k8s.daemonset.name" +) + +// A Kubernetes Job object. +const ( + // The UID of the Job. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SJobUID = "k8s.job.uid" + // The name of the Job. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + AttributeK8SJobName = "k8s.job.name" +) + +// A Kubernetes CronJob object. +const ( + // The UID of the CronJob. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SCronJobUID = "k8s.cronjob.uid" + // The name of the CronJob. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + AttributeK8SCronJobName = "k8s.cronjob.name" +) + +// The operating system (OS) on which the process represented by this resource is running. +const ( + // The operating system type. + // + // Type: Enum + // Required: Always + // Stability: stable + AttributeOSType = "os.type" + // Human readable (not intended to be parsed) OS version information, like e.g. + // reported by ver or lsb_release -a commands. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS' + AttributeOSDescription = "os.description" + // Human readable operating system name. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'iOS', 'Android', 'Ubuntu' + AttributeOSName = "os.name" + // The version string of the operating system as defined in Version Attributes. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '14.2.1', '18.04.1' + AttributeOSVersion = "os.version" +) + +const ( + // Microsoft Windows + AttributeOSTypeWindows = "windows" + // Linux + AttributeOSTypeLinux = "linux" + // Apple Darwin + AttributeOSTypeDarwin = "darwin" + // FreeBSD + AttributeOSTypeFreeBSD = "freebsd" + // NetBSD + AttributeOSTypeNetBSD = "netbsd" + // OpenBSD + AttributeOSTypeOpenBSD = "openbsd" + // DragonFly BSD + AttributeOSTypeDragonflyBSD = "dragonflybsd" + // HP-UX (Hewlett Packard Unix) + AttributeOSTypeHPUX = "hpux" + // AIX (Advanced Interactive eXecutive) + AttributeOSTypeAIX = "aix" + // SunOS, Oracle Solaris + AttributeOSTypeSolaris = "solaris" + // IBM z/OS + AttributeOSTypeZOS = "z_os" +) + +// An operating system process. +const ( + // Process identifier (PID). + // + // Type: int + // Required: No + // Stability: stable + // Examples: 1234 + AttributeProcessPID = "process.pid" + // The name of the process executable. On Linux based systems, can be set to the + // Name in proc/[pid]/status. On Windows, can be set to the base name of + // GetProcessImageFileNameW. + // + // Type: string + // Required: See below + // Stability: stable + // Examples: 'otelcol' + AttributeProcessExecutableName = "process.executable.name" + // The full path to the process executable. On Linux based systems, can be set to + // the target of proc/[pid]/exe. On Windows, can be set to the result of + // GetProcessImageFileNameW. + // + // Type: string + // Required: See below + // Stability: stable + // Examples: '/usr/bin/cmd/otelcol' + AttributeProcessExecutablePath = "process.executable.path" + // The command used to launch the process (i.e. the command name). On Linux based + // systems, can be set to the zeroth string in proc/[pid]/cmdline. On Windows, can + // be set to the first parameter extracted from GetCommandLineW. + // + // Type: string + // Required: See below + // Stability: stable + // Examples: 'cmd/otelcol' + AttributeProcessCommand = "process.command" + // The full command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of GetCommandLineW. Do not + // set this if you have to assemble it just for monitoring; use + // process.command_args instead. + // + // Type: string + // Required: See below + // Stability: stable + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + AttributeProcessCommandLine = "process.command_line" + // All the command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited strings + // extracted from proc/[pid]/cmdline. For libc-based executables, this would be + // the full argv vector passed to main. + // + // Type: string[] + // Required: See below + // Stability: stable + // Examples: 'cmd/otecol', '--config=config.yaml' + AttributeProcessCommandArgs = "process.command_args" + // The username of the user that owns the process. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'root' + AttributeProcessOwner = "process.owner" +) + +// The single (language) runtime instance which is monitored. +const ( + // The name of the runtime of this process. For compiled native binaries, this + // SHOULD be the name of the compiler. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'OpenJDK Runtime Environment' + AttributeProcessRuntimeName = "process.runtime.name" + // The version of the runtime of this process, as returned by the runtime without + // modification. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '14.0.2' + AttributeProcessRuntimeVersion = "process.runtime.version" + // An additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + AttributeProcessRuntimeDescription = "process.runtime.description" +) + +// A service instance. +const ( + // Logical name of the service. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled services. If + // the value was not specified, SDKs MUST fallback to unknown_service: + // concatenated with process.executable.name, e.g. unknown_service:bash. If + // process.executable.name is not available, the value MUST be set to + // unknown_service. + AttributeServiceName = "service.name" + // A namespace for service.name. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group of + // services, for example the team name that owns a group of services. service.name + // is expected to be unique within the same namespace. If service.namespace is not + // specified in the Resource then service.name is expected to be unique for all + // services that have no explicit namespace defined (so the empty/unspecified + // namespace is simply one more valid namespace). Zero-length namespace string is + // assumed equal to unspecified namespace. + AttributeServiceNamespace = "service.namespace" + // The string ID of the service instance. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // service.namespace,service.name pair (in other words + // service.namespace,service.name,service.instance.id triplet MUST be globally + // unique). The ID helps to distinguish instances of the same service that exist + // at the same time (e.g. instances of a horizontally scaled service). It is + // preferable for the ID to be persistent and stay the same for the lifetime of + // the service instance, however it is acceptable that the ID is ephemeral and + // changes during important lifetime events for the service (e.g. service + // restarts). If the service has no inherent unique ID that can be used as the + // value of this attribute it is recommended to generate a random Version 1 or + // Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use + // Version 5, see RFC 4122 for more recommendations). + AttributeServiceInstanceID = "service.instance.id" + // The version string of the service API or implementation. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '2.0.0' + AttributeServiceVersion = "service.version" +) + +// The telemetry SDK used to capture data recorded by the instrumentation libraries. +const ( + // The name of the telemetry SDK as defined above. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + AttributeTelemetrySDKName = "telemetry.sdk.name" + // The language of the telemetry SDK. + // + // Type: Enum + // Required: No + // Stability: stable + AttributeTelemetrySDKLanguage = "telemetry.sdk.language" + // The version string of the telemetry SDK. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '1.2.3' + AttributeTelemetrySDKVersion = "telemetry.sdk.version" + // The version string of the auto instrumentation agent, if used. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '1.2.3' + AttributeTelemetryAutoVersion = "telemetry.auto.version" +) + +const ( + // cpp + AttributeTelemetrySDKLanguageCPP = "cpp" + // dotnet + AttributeTelemetrySDKLanguageDotnet = "dotnet" + // erlang + AttributeTelemetrySDKLanguageErlang = "erlang" + // go + AttributeTelemetrySDKLanguageGo = "go" + // java + AttributeTelemetrySDKLanguageJava = "java" + // nodejs + AttributeTelemetrySDKLanguageNodejs = "nodejs" + // php + AttributeTelemetrySDKLanguagePHP = "php" + // python + AttributeTelemetrySDKLanguagePython = "python" + // ruby + AttributeTelemetrySDKLanguageRuby = "ruby" + // webjs + AttributeTelemetrySDKLanguageWebjs = "webjs" + // swift + AttributeTelemetrySDKLanguageSwift = "swift" +) + +// Resource describing the packaged software running the application code. Web engines are typically executed using process.runtime. +const ( + // The name of the web engine. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'WildFly' + AttributeWebEngineName = "webengine.name" + // The version of the web engine. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '21.0.0' + AttributeWebEngineVersion = "webengine.version" + // Additional description of the web engine (e.g. detailed version and edition + // information). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final' + AttributeWebEngineDescription = "webengine.description" +) + +func GetResourceSemanticConventionAttributeNames() []string { + return []string{ + AttributeBrowserBrands, + AttributeBrowserPlatform, + AttributeBrowserUserAgent, + AttributeCloudProvider, + AttributeCloudAccountID, + AttributeCloudRegion, + AttributeCloudAvailabilityZone, + AttributeCloudPlatform, + AttributeAWSECSContainerARN, + AttributeAWSECSClusterARN, + AttributeAWSECSLaunchtype, + AttributeAWSECSTaskARN, + AttributeAWSECSTaskFamily, + AttributeAWSECSTaskRevision, + AttributeAWSEKSClusterARN, + AttributeAWSLogGroupNames, + AttributeAWSLogGroupARNs, + AttributeAWSLogStreamNames, + AttributeAWSLogStreamARNs, + AttributeContainerName, + AttributeContainerID, + AttributeContainerRuntime, + AttributeContainerImageName, + AttributeContainerImageTag, + AttributeDeploymentEnvironment, + AttributeDeviceID, + AttributeDeviceModelIdentifier, + AttributeDeviceModelName, + AttributeDeviceManufacturer, + AttributeFaaSName, + AttributeFaaSID, + AttributeFaaSVersion, + AttributeFaaSInstance, + AttributeFaaSMaxMemory, + AttributeHostID, + AttributeHostName, + AttributeHostType, + AttributeHostArch, + AttributeHostImageName, + AttributeHostImageID, + AttributeHostImageVersion, + AttributeK8SClusterName, + AttributeK8SNodeName, + AttributeK8SNodeUID, + AttributeK8SNamespaceName, + AttributeK8SPodUID, + AttributeK8SPodName, + AttributeK8SContainerName, + AttributeK8SContainerRestartCount, + AttributeK8SReplicaSetUID, + AttributeK8SReplicaSetName, + AttributeK8SDeploymentUID, + AttributeK8SDeploymentName, + AttributeK8SStatefulSetUID, + AttributeK8SStatefulSetName, + AttributeK8SDaemonSetUID, + AttributeK8SDaemonSetName, + AttributeK8SJobUID, + AttributeK8SJobName, + AttributeK8SCronJobUID, + AttributeK8SCronJobName, + AttributeOSType, + AttributeOSDescription, + AttributeOSName, + AttributeOSVersion, + AttributeProcessPID, + AttributeProcessExecutableName, + AttributeProcessExecutablePath, + AttributeProcessCommand, + AttributeProcessCommandLine, + AttributeProcessCommandArgs, + AttributeProcessOwner, + AttributeProcessRuntimeName, + AttributeProcessRuntimeVersion, + AttributeProcessRuntimeDescription, + AttributeServiceName, + AttributeServiceNamespace, + AttributeServiceInstanceID, + AttributeServiceVersion, + AttributeTelemetrySDKName, + AttributeTelemetrySDKLanguage, + AttributeTelemetrySDKVersion, + AttributeTelemetryAutoVersion, + AttributeWebEngineName, + AttributeWebEngineVersion, + AttributeWebEngineDescription, + } +} diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.12.0/generated_trace.go b/vendor/go.opentelemetry.io/collector/semconv/v1.12.0/generated_trace.go new file mode 100644 index 00000000000..ab9d27236a7 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.12.0/generated_trace.go @@ -0,0 +1,1805 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv + +// Span attributes used by AWS Lambda (in addition to general `faas` attributes). +const ( + // The full invoked ARN as provided on the Context passed to the function (Lambda- + // Runtime-Invoked-Function-ARN header on the /runtime/invocation/next + // applicable). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from faas.id if an alias is involved. + AttributeAWSLambdaInvokedARN = "aws.lambda.invoked_arn" +) + +// This document defines attributes for CloudEvents. CloudEvents is a specification on how to define event data in a standard way. These attributes can be attached to spans when performing operations with CloudEvents, regardless of the protocol being used. +const ( + // The event_id uniquely identifies the event. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + AttributeCloudeventsEventID = "cloudevents.event_id" + // The source identifies the context in which an event happened. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'https://github.com/cloudevents', '/cloudevents/spec/pull/123', 'my- + // service' + AttributeCloudeventsEventSource = "cloudevents.event_source" + // The version of the CloudEvents specification which the event uses. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: '1.0' + AttributeCloudeventsEventSpecVersion = "cloudevents.event_spec_version" + // The event_type contains a value describing the type of event related to the + // originating occurrence. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'com.github.pull_request.opened', 'com.example.object.deleted.v2' + AttributeCloudeventsEventType = "cloudevents.event_type" + // The subject of the event in the context of the event producer (identified by + // source). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'mynewfile.jpg' + AttributeCloudeventsEventSubject = "cloudevents.event_subject" +) + +// This document defines semantic conventions for the OpenTracing Shim +const ( + // Parent-child Reference type + // + // Type: Enum + // Required: No + // Stability: stable + // Note: The causal relationship between a child Span and a parent Span. + AttributeOpentracingRefType = "opentracing.ref_type" +) + +const ( + // The parent Span depends on the child Span in some capacity + AttributeOpentracingRefTypeChildOf = "child_of" + // The parent Span does not depend in any way on the result of the child Span + AttributeOpentracingRefTypeFollowsFrom = "follows_from" +) + +// This document defines the attributes used to perform database client calls. +const ( + // An identifier for the database management system (DBMS) product being used. See + // below for a list of well-known identifiers. + // + // Type: Enum + // Required: Always + // Stability: stable + AttributeDBSystem = "db.system" + // The connection string used to connect to the database. It is recommended to + // remove embedded credentials. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' + AttributeDBConnectionString = "db.connection_string" + // Username for accessing the database. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'readonly_user', 'reporting_user' + AttributeDBUser = "db.user" + // The fully-qualified class name of the Java Database Connectivity (JDBC) driver + // used to connect. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'org.postgresql.Driver', + // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' + AttributeDBJDBCDriverClassname = "db.jdbc.driver_classname" + // This attribute is used to report the name of the database being accessed. For + // commands that switch the database, this should be set to the target database + // (even if the command fails). + // + // Type: string + // Required: Required, if applicable. + // Stability: stable + // Examples: 'customers', 'main' + // Note: In some SQL databases, the database name to be used is called + // "schema name". In case there are multiple layers that could be + // considered for database name (e.g. Oracle instance name and schema name), the + // database name to be used is the more specific layer (e.g. Oracle schema name). + AttributeDBName = "db.name" + // The database statement being executed. + // + // Type: string + // Required: Required if applicable and not explicitly disabled via + // instrumentation configuration. + // Stability: stable + // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' + // Note: The value may be sanitized to exclude sensitive information. + AttributeDBStatement = "db.statement" + // The name of the operation being executed, e.g. the MongoDB command name such as + // findAndModify, or the SQL keyword. + // + // Type: string + // Required: Required, if `db.statement` is not applicable. + // Stability: stable + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: When setting this to an SQL keyword, it is not recommended to attempt any + // client-side parsing of db.statement just to get this property, but it should be + // set if the operation name is provided by the library being instrumented. If the + // SQL statement has an ambiguous operation, or performs more than one operation, + // this value may be omitted. + AttributeDBOperation = "db.operation" +) + +const ( + // Some other SQL database. Fallback only. See notes + AttributeDBSystemOtherSQL = "other_sql" + // Microsoft SQL Server + AttributeDBSystemMSSQL = "mssql" + // MySQL + AttributeDBSystemMySQL = "mysql" + // Oracle Database + AttributeDBSystemOracle = "oracle" + // IBM DB2 + AttributeDBSystemDB2 = "db2" + // PostgreSQL + AttributeDBSystemPostgreSQL = "postgresql" + // Amazon Redshift + AttributeDBSystemRedshift = "redshift" + // Apache Hive + AttributeDBSystemHive = "hive" + // Cloudscape + AttributeDBSystemCloudscape = "cloudscape" + // HyperSQL DataBase + AttributeDBSystemHSQLDB = "hsqldb" + // Progress Database + AttributeDBSystemProgress = "progress" + // SAP MaxDB + AttributeDBSystemMaxDB = "maxdb" + // SAP HANA + AttributeDBSystemHanaDB = "hanadb" + // Ingres + AttributeDBSystemIngres = "ingres" + // FirstSQL + AttributeDBSystemFirstSQL = "firstsql" + // EnterpriseDB + AttributeDBSystemEDB = "edb" + // InterSystems Caché + AttributeDBSystemCache = "cache" + // Adabas (Adaptable Database System) + AttributeDBSystemAdabas = "adabas" + // Firebird + AttributeDBSystemFirebird = "firebird" + // Apache Derby + AttributeDBSystemDerby = "derby" + // FileMaker + AttributeDBSystemFilemaker = "filemaker" + // Informix + AttributeDBSystemInformix = "informix" + // InstantDB + AttributeDBSystemInstantDB = "instantdb" + // InterBase + AttributeDBSystemInterbase = "interbase" + // MariaDB + AttributeDBSystemMariaDB = "mariadb" + // Netezza + AttributeDBSystemNetezza = "netezza" + // Pervasive PSQL + AttributeDBSystemPervasive = "pervasive" + // PointBase + AttributeDBSystemPointbase = "pointbase" + // SQLite + AttributeDBSystemSqlite = "sqlite" + // Sybase + AttributeDBSystemSybase = "sybase" + // Teradata + AttributeDBSystemTeradata = "teradata" + // Vertica + AttributeDBSystemVertica = "vertica" + // H2 + AttributeDBSystemH2 = "h2" + // ColdFusion IMQ + AttributeDBSystemColdfusion = "coldfusion" + // Apache Cassandra + AttributeDBSystemCassandra = "cassandra" + // Apache HBase + AttributeDBSystemHBase = "hbase" + // MongoDB + AttributeDBSystemMongoDB = "mongodb" + // Redis + AttributeDBSystemRedis = "redis" + // Couchbase + AttributeDBSystemCouchbase = "couchbase" + // CouchDB + AttributeDBSystemCouchDB = "couchdb" + // Microsoft Azure Cosmos DB + AttributeDBSystemCosmosDB = "cosmosdb" + // Amazon DynamoDB + AttributeDBSystemDynamoDB = "dynamodb" + // Neo4j + AttributeDBSystemNeo4j = "neo4j" + // Apache Geode + AttributeDBSystemGeode = "geode" + // Elasticsearch + AttributeDBSystemElasticsearch = "elasticsearch" + // Memcached + AttributeDBSystemMemcached = "memcached" + // CockroachDB + AttributeDBSystemCockroachdb = "cockroachdb" +) + +// Connection-level attributes for Microsoft SQL Server +const ( + // The Microsoft SQL Server instance name connecting to. This name is used to + // determine the port of a named instance. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'MSSQLSERVER' + // Note: If setting a db.mssql.instance_name, net.peer.port is no longer required + // (but still recommended if non-standard). + AttributeDBMSSQLInstanceName = "db.mssql.instance_name" +) + +// Call-level attributes for Cassandra +const ( + // The fetch size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 5000 + AttributeDBCassandraPageSize = "db.cassandra.page_size" + // The consistency level of the query. Based on consistency values from CQL. + // + // Type: Enum + // Required: No + // Stability: stable + AttributeDBCassandraConsistencyLevel = "db.cassandra.consistency_level" + // The name of the primary table that the operation is acting upon, including the + // keyspace name (if applicable). + // + // Type: string + // Required: Recommended if available. + // Stability: stable + // Examples: 'mytable' + // Note: This mirrors the db.sql.table attribute but references cassandra rather + // than sql. It is not recommended to attempt any client-side parsing of + // db.statement just to get this property, but it should be set if it is provided + // by the library being instrumented. If the operation is acting upon an anonymous + // table, or more than one table, this value MUST NOT be set. + AttributeDBCassandraTable = "db.cassandra.table" + // Whether or not the query is idempotent. + // + // Type: boolean + // Required: No + // Stability: stable + AttributeDBCassandraIdempotence = "db.cassandra.idempotence" + // The number of times a query was speculatively executed. Not set or 0 if the + // query was not executed speculatively. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 0, 2 + AttributeDBCassandraSpeculativeExecutionCount = "db.cassandra.speculative_execution_count" + // The ID of the coordinating node for a query. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + AttributeDBCassandraCoordinatorID = "db.cassandra.coordinator.id" + // The data center of the coordinating node for a query. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'us-west-2' + AttributeDBCassandraCoordinatorDC = "db.cassandra.coordinator.dc" +) + +const ( + // all + AttributeDBCassandraConsistencyLevelAll = "all" + // each_quorum + AttributeDBCassandraConsistencyLevelEachQuorum = "each_quorum" + // quorum + AttributeDBCassandraConsistencyLevelQuorum = "quorum" + // local_quorum + AttributeDBCassandraConsistencyLevelLocalQuorum = "local_quorum" + // one + AttributeDBCassandraConsistencyLevelOne = "one" + // two + AttributeDBCassandraConsistencyLevelTwo = "two" + // three + AttributeDBCassandraConsistencyLevelThree = "three" + // local_one + AttributeDBCassandraConsistencyLevelLocalOne = "local_one" + // any + AttributeDBCassandraConsistencyLevelAny = "any" + // serial + AttributeDBCassandraConsistencyLevelSerial = "serial" + // local_serial + AttributeDBCassandraConsistencyLevelLocalSerial = "local_serial" +) + +// Call-level attributes for Redis +const ( + // The index of the database being accessed as used in the SELECT command, + // provided as an integer. To be used instead of the generic db.name attribute. + // + // Type: int + // Required: Required, if other than the default database (`0`). + // Stability: stable + // Examples: 0, 1, 15 + AttributeDBRedisDBIndex = "db.redis.database_index" +) + +// Call-level attributes for MongoDB +const ( + // The collection being accessed within the database stated in db.name. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'customers', 'products' + AttributeDBMongoDBCollection = "db.mongodb.collection" +) + +// Call-level attributes for SQL databases +const ( + // The name of the primary table that the operation is acting upon, including the + // database name (if applicable). + // + // Type: string + // Required: Recommended if available. + // Stability: stable + // Examples: 'public.users', 'customers' + // Note: It is not recommended to attempt any client-side parsing of db.statement + // just to get this property, but it should be set if it is provided by the + // library being instrumented. If the operation is acting upon an anonymous table, + // or more than one table, this value MUST NOT be set. + AttributeDBSQLTable = "db.sql.table" +) + +// This document defines the attributes used to report a single exception associated with a span. +const ( + // The type of the exception (its fully-qualified class name, if applicable). The + // dynamic type of the exception should be preferred over the static type in + // languages that support it. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + AttributeExceptionType = "exception.type" + // The exception message. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str implicitly" + AttributeExceptionMessage = "exception.message" + // A stacktrace as a string in the natural representation for the language + // runtime. The representation is to be determined and documented by each language + // SIG. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + AttributeExceptionStacktrace = "exception.stacktrace" + // SHOULD be set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // Required: No + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's __exit__ method in Python) but will + // usually be caught at the point of recording the exception in most languages.It + // is usually not possible to determine at the point where an exception is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending the span, + // as done in the example above.It follows that an exception may still escape the + // scope of the span + // even if the exception.escaped attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + AttributeExceptionEscaped = "exception.escaped" +) + +// This semantic convention describes an instance of a function that runs without provisioning or managing of servers (also known as serverless functions or Function as a Service (FaaS)) with spans. +const ( + // Type of the trigger which caused this function execution. + // + // Type: Enum + // Required: No + // Stability: stable + // Note: For the server/consumer span on the incoming side, + // faas.trigger MUST be set.Clients invoking FaaS instances usually cannot set + // faas.trigger, + // since they would typically need to look in the payload to determine + // the event type. If clients set it, it should be the same as the + // trigger that corresponding incoming would have (i.e., this has + // nothing to do with the underlying transport used to make the API + // call to invoke the lambda, which is often HTTP). + AttributeFaaSTrigger = "faas.trigger" + // The execution ID of the current function execution. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + AttributeFaaSExecution = "faas.execution" +) + +const ( + // A response to some data source operation such as a database or filesystem read/write + AttributeFaaSTriggerDatasource = "datasource" + // To provide an answer to an inbound HTTP request + AttributeFaaSTriggerHTTP = "http" + // A function is set to be executed when messages are sent to a messaging system + AttributeFaaSTriggerPubsub = "pubsub" + // A function is scheduled to be executed regularly + AttributeFaaSTriggerTimer = "timer" + // If none of the others apply + AttributeFaaSTriggerOther = "other" +) + +// Semantic Convention for FaaS triggered as a response to some data source operation such as a database or filesystem read/write. +const ( + // The name of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos + // DB to the database name. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'myBucketName', 'myDBName' + AttributeFaaSDocumentCollection = "faas.document.collection" + // Describes the type of the operation that was performed on the data. + // + // Type: Enum + // Required: Always + // Stability: stable + AttributeFaaSDocumentOperation = "faas.document.operation" + // A string containing the time when the data was accessed in the ISO 8601 format + // expressed in UTC. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + AttributeFaaSDocumentTime = "faas.document.time" + // The document name/table subjected to the operation. For example, in Cloud + // Storage or S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'myFile.txt', 'myTableName' + AttributeFaaSDocumentName = "faas.document.name" +) + +const ( + // When a new object is created + AttributeFaaSDocumentOperationInsert = "insert" + // When an object is modified + AttributeFaaSDocumentOperationEdit = "edit" + // When an object is deleted + AttributeFaaSDocumentOperationDelete = "delete" +) + +// Semantic Convention for FaaS scheduled to be executed regularly. +const ( + // A string containing the function invocation time in the ISO 8601 format + // expressed in UTC. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + AttributeFaaSTime = "faas.time" + // A string containing the schedule period as Cron Expression. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '0/5 * * * ? *' + AttributeFaaSCron = "faas.cron" +) + +// Contains additional attributes for incoming FaaS spans. +const ( + // A boolean that is true if the serverless function is executed for the first + // time (aka cold-start). + // + // Type: boolean + // Required: No + // Stability: stable + AttributeFaaSColdstart = "faas.coldstart" +) + +// Contains additional attributes for outgoing FaaS spans. +const ( + // The name of the invoked function. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'my-function' + // Note: SHOULD be equal to the faas.name resource attribute of the invoked + // function. + AttributeFaaSInvokedName = "faas.invoked_name" + // The cloud provider of the invoked function. + // + // Type: Enum + // Required: Always + // Stability: stable + // Note: SHOULD be equal to the cloud.provider resource attribute of the invoked + // function. + AttributeFaaSInvokedProvider = "faas.invoked_provider" + // The cloud region of the invoked function. + // + // Type: string + // Required: For some cloud providers, like AWS or GCP, the region in which a + // function is hosted is essential to uniquely identify the function and also part + // of its endpoint. Since it's part of the endpoint being called, the region is + // always known to clients. In these cases, `faas.invoked_region` MUST be set + // accordingly. If the region is unknown to the client or not required for + // identifying the invoked function, setting `faas.invoked_region` is optional. + // Stability: stable + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the cloud.region resource attribute of the invoked + // function. + AttributeFaaSInvokedRegion = "faas.invoked_region" +) + +const ( + // Alibaba Cloud + AttributeFaaSInvokedProviderAlibabaCloud = "alibaba_cloud" + // Amazon Web Services + AttributeFaaSInvokedProviderAWS = "aws" + // Microsoft Azure + AttributeFaaSInvokedProviderAzure = "azure" + // Google Cloud Platform + AttributeFaaSInvokedProviderGCP = "gcp" + // Tencent Cloud + AttributeFaaSInvokedProviderTencentCloud = "tencent_cloud" +) + +// These attributes may be used for any network related operation. +const ( + // Transport protocol used. See note below. + // + // Type: Enum + // Required: No + // Stability: stable + AttributeNetTransport = "net.transport" + // Remote address of the peer (dotted decimal for IPv4 or RFC5952 for IPv6) + // + // Type: string + // Required: No + // Stability: stable + // Examples: '127.0.0.1' + AttributeNetPeerIP = "net.peer.ip" + // Remote port number. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 80, 8080, 443 + AttributeNetPeerPort = "net.peer.port" + // Remote hostname or similar, see note below. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'example.com' + // Note: net.peer.name SHOULD NOT be set if capturing it would require an extra + // DNS lookup. + AttributeNetPeerName = "net.peer.name" + // Like net.peer.ip but for the host IP. Useful in case of a multi-IP host. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '192.168.0.1' + AttributeNetHostIP = "net.host.ip" + // Like net.peer.port but for the host port. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 35555 + AttributeNetHostPort = "net.host.port" + // Local hostname or similar, see note below. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'localhost' + AttributeNetHostName = "net.host.name" + // The internet connection type currently being used by the host. + // + // Type: Enum + // Required: No + // Stability: stable + // Examples: 'wifi' + AttributeNetHostConnectionType = "net.host.connection.type" + // This describes more details regarding the connection.type. It may be the type + // of cell technology connection, but it could be used for describing details + // about a wifi connection. + // + // Type: Enum + // Required: No + // Stability: stable + // Examples: 'LTE' + AttributeNetHostConnectionSubtype = "net.host.connection.subtype" + // The name of the mobile carrier. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'sprint' + AttributeNetHostCarrierName = "net.host.carrier.name" + // The mobile carrier country code. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '310' + AttributeNetHostCarrierMcc = "net.host.carrier.mcc" + // The mobile carrier network code. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '001' + AttributeNetHostCarrierMnc = "net.host.carrier.mnc" + // The ISO 3166-1 alpha-2 2-character country code associated with the mobile + // carrier network. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'DE' + AttributeNetHostCarrierIcc = "net.host.carrier.icc" +) + +const ( + // ip_tcp + AttributeNetTransportTCP = "ip_tcp" + // ip_udp + AttributeNetTransportUDP = "ip_udp" + // Another IP-based protocol + AttributeNetTransportIP = "ip" + // Unix Domain socket. See below + AttributeNetTransportUnix = "unix" + // Named or anonymous pipe. See note below + AttributeNetTransportPipe = "pipe" + // In-process communication + AttributeNetTransportInProc = "inproc" + // Something else (non IP-based) + AttributeNetTransportOther = "other" +) + +const ( + // wifi + AttributeNetHostConnectionTypeWifi = "wifi" + // wired + AttributeNetHostConnectionTypeWired = "wired" + // cell + AttributeNetHostConnectionTypeCell = "cell" + // unavailable + AttributeNetHostConnectionTypeUnavailable = "unavailable" + // unknown + AttributeNetHostConnectionTypeUnknown = "unknown" +) + +const ( + // GPRS + AttributeNetHostConnectionSubtypeGprs = "gprs" + // EDGE + AttributeNetHostConnectionSubtypeEdge = "edge" + // UMTS + AttributeNetHostConnectionSubtypeUmts = "umts" + // CDMA + AttributeNetHostConnectionSubtypeCdma = "cdma" + // EVDO Rel. 0 + AttributeNetHostConnectionSubtypeEvdo0 = "evdo_0" + // EVDO Rev. A + AttributeNetHostConnectionSubtypeEvdoA = "evdo_a" + // CDMA2000 1XRTT + AttributeNetHostConnectionSubtypeCdma20001xrtt = "cdma2000_1xrtt" + // HSDPA + AttributeNetHostConnectionSubtypeHsdpa = "hsdpa" + // HSUPA + AttributeNetHostConnectionSubtypeHsupa = "hsupa" + // HSPA + AttributeNetHostConnectionSubtypeHspa = "hspa" + // IDEN + AttributeNetHostConnectionSubtypeIden = "iden" + // EVDO Rev. B + AttributeNetHostConnectionSubtypeEvdoB = "evdo_b" + // LTE + AttributeNetHostConnectionSubtypeLte = "lte" + // EHRPD + AttributeNetHostConnectionSubtypeEhrpd = "ehrpd" + // HSPAP + AttributeNetHostConnectionSubtypeHspap = "hspap" + // GSM + AttributeNetHostConnectionSubtypeGsm = "gsm" + // TD-SCDMA + AttributeNetHostConnectionSubtypeTdScdma = "td_scdma" + // IWLAN + AttributeNetHostConnectionSubtypeIwlan = "iwlan" + // 5G NR (New Radio) + AttributeNetHostConnectionSubtypeNr = "nr" + // 5G NRNSA (New Radio Non-Standalone) + AttributeNetHostConnectionSubtypeNrnsa = "nrnsa" + // LTE CA + AttributeNetHostConnectionSubtypeLteCa = "lte_ca" +) + +// Operations that access some remote service. +const ( + // The service.name of the remote service. SHOULD be equal to the actual + // service.name resource attribute of the remote service if any. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'AuthTokenCache' + AttributePeerService = "peer.service" +) + +// These attributes may be used for any operation with an authenticated and/or authorized enduser. +const ( + // Username or client_id extracted from the access token or Authorization header + // in the inbound request from outside the system. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'username' + AttributeEnduserID = "enduser.id" + // Actual/assumed role the client is making the request under extracted from token + // or application security context. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'admin' + AttributeEnduserRole = "enduser.role" + // Scopes or granted authorities the client currently possesses extracted from + // token or application security context. The value would come from the scope + // associated with an OAuth 2.0 Access Token or an attribute value in a SAML 2.0 + // Assertion. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'read:message, write:files' + AttributeEnduserScope = "enduser.scope" +) + +// These attributes may be used for any operation to store information about a thread that started a span. +const ( + // Current "managed" thread ID (as opposed to OS thread ID). + // + // Type: int + // Required: No + // Stability: stable + // Examples: 42 + AttributeThreadID = "thread.id" + // Current thread name. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'main' + AttributeThreadName = "thread.name" +) + +// These attributes allow to report this unit of code and therefore to provide more context about the span. +const ( + // The method or function name, or equivalent (usually rightmost part of the code + // unit's name). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'serveRequest' + AttributeCodeFunction = "code.function" + // The "namespace" within which code.function is defined. Usually the + // qualified class or module name, such that code.namespace + some separator + + // code.function form a unique identifier for the code unit. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'com.example.MyHTTPService' + AttributeCodeNamespace = "code.namespace" + // The source code file name that identifies the code unit as uniquely as possible + // (preferably an absolute file path). + // + // Type: string + // Required: No + // Stability: stable + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + AttributeCodeFilepath = "code.filepath" + // The line number in code.filepath best representing the operation. It SHOULD + // point within the code unit named in code.function. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 42 + AttributeCodeLineNumber = "code.lineno" +) + +// This document defines semantic conventions for HTTP client and server Spans. +const ( + // HTTP request method. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + AttributeHTTPMethod = "http.method" + // Full HTTP request URL in the form scheme://host[:port]/path?query[#fragment]. + // Usually the fragment is not transmitted over HTTP, but if it is known, it + // should be included nevertheless. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' + // Note: http.url MUST NOT contain credentials passed via URL in form of + // https://username:password@www.example.com/. In such case the attribute's value + // should be https://www.example.com/. + AttributeHTTPURL = "http.url" + // The full request target as passed in a HTTP request line or equivalent. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '/path/12314/?q=ddds#123' + AttributeHTTPTarget = "http.target" + // The value of the HTTP host header. An empty Host header should also be + // reported, see note. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'www.example.org' + // Note: When the header is present but empty the attribute SHOULD be set to the + // empty string. Note that this is a valid situation that is expected in certain + // cases, according the aforementioned section of RFC 7230. When the header is not + // set the attribute MUST NOT be set. + AttributeHTTPHost = "http.host" + // The URI scheme identifying the used protocol. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'http', 'https' + AttributeHTTPScheme = "http.scheme" + // HTTP response status code. + // + // Type: int + // Required: If and only if one was received/sent. + // Stability: stable + // Examples: 200 + AttributeHTTPStatusCode = "http.status_code" + // Kind of HTTP protocol used. + // + // Type: Enum + // Required: No + // Stability: stable + // Note: If net.transport is not specified, it can be assumed to be IP.TCP except + // if http.flavor is QUIC, in which case IP.UDP is assumed. + AttributeHTTPFlavor = "http.flavor" + // Value of the HTTP User-Agent header sent by the client. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' + AttributeHTTPUserAgent = "http.user_agent" + // The size of the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as the + // Content-Length header. For requests using transport encoding, this should be + // the compressed size. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 3495 + AttributeHTTPRequestContentLength = "http.request_content_length" + // The size of the uncompressed request payload body after transport decoding. Not + // set if transport encoding not used. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 5493 + AttributeHTTPRequestContentLengthUncompressed = "http.request_content_length_uncompressed" + // The size of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as the + // Content-Length header. For requests using transport encoding, this should be + // the compressed size. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 3495 + AttributeHTTPResponseContentLength = "http.response_content_length" + // The size of the uncompressed response payload body after transport decoding. + // Not set if transport encoding not used. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 5493 + AttributeHTTPResponseContentLengthUncompressed = "http.response_content_length_uncompressed" + // The ordinal number of request re-sending attempt. + // + // Type: int + // Required: If and only if a request was retried. + // Stability: stable + // Examples: 3 + AttributeHTTPRetryCount = "http.retry_count" +) + +const ( + // HTTP/1.0 + AttributeHTTPFlavorHTTP10 = "1.0" + // HTTP/1.1 + AttributeHTTPFlavorHTTP11 = "1.1" + // HTTP/2 + AttributeHTTPFlavorHTTP20 = "2.0" + // HTTP/3 + AttributeHTTPFlavorHTTP30 = "3.0" + // SPDY protocol + AttributeHTTPFlavorSPDY = "SPDY" + // QUIC protocol + AttributeHTTPFlavorQUIC = "QUIC" +) + +// Semantic Convention for HTTP Server +const ( + // The primary server name of the matched virtual host. This should be obtained + // via configuration. If no such configuration can be obtained, this attribute + // MUST NOT be set ( net.host.name should be used instead). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'example.com' + // Note: http.url is usually not readily available on the server side but would + // have to be assembled in a cumbersome and sometimes lossy process from other + // information (see e.g. open-telemetry/opentelemetry-python/pull/148). It is thus + // preferred to supply the raw data that is available. + AttributeHTTPServerName = "http.server_name" + // The matched route (path template). + // + // Type: string + // Required: No + // Stability: stable + // Examples: '/users/:userID?' + AttributeHTTPRoute = "http.route" + // The IP address of the original client behind all proxies, if known (e.g. from + // X-Forwarded-For). + // + // Type: string + // Required: No + // Stability: stable + // Examples: '83.164.160.102' + // Note: This is not necessarily the same as net.peer.ip, which would + // identify the network-level peer, which may be a proxy.This attribute should be + // set when a source of information different + // from the one used for net.peer.ip, is available even if that other + // source just confirms the same value as net.peer.ip. + // Rationale: For net.peer.ip, one typically does not know if it + // comes from a proxy, reverse proxy, or the actual client. Setting + // http.client_ip when it's the same as net.peer.ip means that + // one is at least somewhat confident that the address is not that of + // the closest proxy. + AttributeHTTPClientIP = "http.client_ip" +) + +// Attributes that exist for multiple DynamoDB request types. +const ( + // The keys in the RequestItems object field. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'Users', 'Cats' + AttributeAWSDynamoDBTableNames = "aws.dynamodb.table_names" + // The JSON-serialized value of each item in the ConsumedCapacity response field. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : { + // "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, + // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": + // "string", "WriteCapacityUnits": number }' + AttributeAWSDynamoDBConsumedCapacity = "aws.dynamodb.consumed_capacity" + // The JSON-serialized value of the ItemCollectionMetrics response field. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, + // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : + // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": + // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }' + AttributeAWSDynamoDBItemCollectionMetrics = "aws.dynamodb.item_collection_metrics" + // The value of the ProvisionedThroughput.ReadCapacityUnits request parameter. + // + // Type: double + // Required: No + // Stability: stable + // Examples: 1.0, 2.0 + AttributeAWSDynamoDBProvisionedReadCapacity = "aws.dynamodb.provisioned_read_capacity" + // The value of the ProvisionedThroughput.WriteCapacityUnits request parameter. + // + // Type: double + // Required: No + // Stability: stable + // Examples: 1.0, 2.0 + AttributeAWSDynamoDBProvisionedWriteCapacity = "aws.dynamodb.provisioned_write_capacity" + // The value of the ConsistentRead request parameter. + // + // Type: boolean + // Required: No + // Stability: stable + AttributeAWSDynamoDBConsistentRead = "aws.dynamodb.consistent_read" + // The value of the ProjectionExpression request parameter. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems, + // ProductReviews' + AttributeAWSDynamoDBProjection = "aws.dynamodb.projection" + // The value of the Limit request parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 10 + AttributeAWSDynamoDBLimit = "aws.dynamodb.limit" + // The value of the AttributesToGet request parameter. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'lives', 'id' + AttributeAWSDynamoDBAttributesToGet = "aws.dynamodb.attributes_to_get" + // The value of the IndexName request parameter. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'name_to_group' + AttributeAWSDynamoDBIndexName = "aws.dynamodb.index_name" + // The value of the Select request parameter. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AttributeAWSDynamoDBSelect = "aws.dynamodb.select" +) + +// DynamoDB.CreateTable +const ( + // The JSON-serialized value of each item of the GlobalSecondaryIndexes request + // field + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits": + // number, "WriteCapacityUnits": number } }' + AttributeAWSDynamoDBGlobalSecondaryIndexes = "aws.dynamodb.global_secondary_indexes" + // The JSON-serialized value of each item of the LocalSecondaryIndexes request + // field. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes": + // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" } }' + AttributeAWSDynamoDBLocalSecondaryIndexes = "aws.dynamodb.local_secondary_indexes" +) + +// DynamoDB.ListTables +const ( + // The value of the ExclusiveStartTableName request parameter. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Users', 'CatsTable' + AttributeAWSDynamoDBExclusiveStartTable = "aws.dynamodb.exclusive_start_table" + // The the number of items in the TableNames response parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 20 + AttributeAWSDynamoDBTableCount = "aws.dynamodb.table_count" +) + +// DynamoDB.Query +const ( + // The value of the ScanIndexForward request parameter. + // + // Type: boolean + // Required: No + // Stability: stable + AttributeAWSDynamoDBScanForward = "aws.dynamodb.scan_forward" +) + +// DynamoDB.Scan +const ( + // The value of the Segment request parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 10 + AttributeAWSDynamoDBSegment = "aws.dynamodb.segment" + // The value of the TotalSegments request parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 100 + AttributeAWSDynamoDBTotalSegments = "aws.dynamodb.total_segments" + // The value of the Count response parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 10 + AttributeAWSDynamoDBCount = "aws.dynamodb.count" + // The value of the ScannedCount response parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 50 + AttributeAWSDynamoDBScannedCount = "aws.dynamodb.scanned_count" +) + +// DynamoDB.UpdateTable +const ( + // The JSON-serialized value of each item in the AttributeDefinitions request + // field. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AttributeAWSDynamoDBAttributeDefinitions = "aws.dynamodb.attribute_definitions" + // The JSON-serialized value of each item in the the GlobalSecondaryIndexUpdates + // request field. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }' + AttributeAWSDynamoDBGlobalSecondaryIndexUpdates = "aws.dynamodb.global_secondary_index_updates" +) + +// This document defines the attributes used in messaging systems. +const ( + // A string identifying the messaging system. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' + AttributeMessagingSystem = "messaging.system" + // The message destination name. This might be equal to the span name but is + // required nevertheless. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + AttributeMessagingDestination = "messaging.destination" + // The kind of message destination + // + // Type: Enum + // Required: Required only if the message destination is either a `queue` or + // `topic`. + // Stability: stable + AttributeMessagingDestinationKind = "messaging.destination_kind" + // A boolean that is true if the message destination is temporary. + // + // Type: boolean + // Required: If missing, it is assumed to be false. + // Stability: stable + AttributeMessagingTempDestination = "messaging.temp_destination" + // The name of the transport protocol. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'AMQP', 'MQTT' + AttributeMessagingProtocol = "messaging.protocol" + // The version of the transport protocol. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '0.9.1' + AttributeMessagingProtocolVersion = "messaging.protocol_version" + // Connection string. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'tibjmsnaming://localhost:7222', + // 'https://queue.amazonaws.com/80398EXAMPLE/MyQueue' + AttributeMessagingURL = "messaging.url" + // A value used by the messaging system as an identifier for the message, + // represented as a string. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + AttributeMessagingMessageID = "messaging.message_id" + // The conversation ID identifying the conversation to which the message belongs, + // represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'MyConversationID' + AttributeMessagingConversationID = "messaging.conversation_id" + // The (uncompressed) size of the message payload in bytes. Also use this + // attribute if it is unknown whether the compressed or uncompressed payload size + // is reported. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 2738 + AttributeMessagingMessagePayloadSizeBytes = "messaging.message_payload_size_bytes" + // The compressed size of the message payload in bytes. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 2048 + AttributeMessagingMessagePayloadCompressedSizeBytes = "messaging.message_payload_compressed_size_bytes" +) + +const ( + // A message sent to a queue + AttributeMessagingDestinationKindQueue = "queue" + // A message sent to a topic + AttributeMessagingDestinationKindTopic = "topic" +) + +// Semantic convention for a consumer of messages received from a messaging system +const ( + // A string identifying the kind of message consumption as defined in the + // Operation names section above. If the operation is "send", this + // attribute MUST NOT be set, since the operation can be inferred from the span + // kind in that case. + // + // Type: Enum + // Required: No + // Stability: stable + AttributeMessagingOperation = "messaging.operation" + // The identifier for the consumer receiving a message. For Kafka, set it to + // {messaging.kafka.consumer_group} - {messaging.kafka.client_id}, if both are + // present, or only messaging.kafka.consumer_group. For brokers, such as RabbitMQ + // and Artemis, set it to the client_id of the client consuming the message. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'mygroup - client-6' + AttributeMessagingConsumerID = "messaging.consumer_id" +) + +const ( + // receive + AttributeMessagingOperationReceive = "receive" + // process + AttributeMessagingOperationProcess = "process" +) + +// Attributes for RabbitMQ +const ( + // RabbitMQ message routing key. + // + // Type: string + // Required: Unless it is empty. + // Stability: stable + // Examples: 'myKey' + AttributeMessagingRabbitmqRoutingKey = "messaging.rabbitmq.routing_key" +) + +// Attributes for Apache Kafka +const ( + // Message keys in Kafka are used for grouping alike messages to ensure they're + // processed on the same partition. They differ from messaging.message_id in that + // they're not unique. If the key is null, the attribute MUST NOT be set. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to be + // supplied for the attribute. If the key has no unambiguous, canonical string + // form, don't include its value. + AttributeMessagingKafkaMessageKey = "messaging.kafka.message_key" + // Name of the Kafka Consumer Group that is handling the message. Only applies to + // consumers, not producers. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'my-group' + AttributeMessagingKafkaConsumerGroup = "messaging.kafka.consumer_group" + // Client ID for the Consumer or Producer that is handling the message. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'client-5' + AttributeMessagingKafkaClientID = "messaging.kafka.client_id" + // Partition the message is sent to. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 2 + AttributeMessagingKafkaPartition = "messaging.kafka.partition" + // A boolean that is true if the message is a tombstone. + // + // Type: boolean + // Required: If missing, it is assumed to be false. + // Stability: stable + AttributeMessagingKafkaTombstone = "messaging.kafka.tombstone" +) + +// Attributes for Apache RocketMQ +const ( + // Namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'myNamespace' + AttributeMessagingRocketmqNamespace = "messaging.rocketmq.namespace" + // Name of the RocketMQ producer/consumer group that is handling the message. The + // client type is identified by the SpanKind. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'myConsumerGroup' + AttributeMessagingRocketmqClientGroup = "messaging.rocketmq.client_group" + // The unique identifier for each client. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'myhost@8742@s8083jm' + AttributeMessagingRocketmqClientID = "messaging.rocketmq.client_id" + // Type of message. + // + // Type: Enum + // Required: No + // Stability: stable + AttributeMessagingRocketmqMessageType = "messaging.rocketmq.message_type" + // The secondary classifier of message besides topic. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'tagA' + AttributeMessagingRocketmqMessageTag = "messaging.rocketmq.message_tag" + // Key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'keyA', 'keyB' + AttributeMessagingRocketmqMessageKeys = "messaging.rocketmq.message_keys" + // Model of message consumption. This only applies to consumer spans. + // + // Type: Enum + // Required: No + // Stability: stable + AttributeMessagingRocketmqConsumptionModel = "messaging.rocketmq.consumption_model" +) + +const ( + // Normal message + AttributeMessagingRocketmqMessageTypeNormal = "normal" + // FIFO message + AttributeMessagingRocketmqMessageTypeFifo = "fifo" + // Delay message + AttributeMessagingRocketmqMessageTypeDelay = "delay" + // Transaction message + AttributeMessagingRocketmqMessageTypeTransaction = "transaction" +) + +const ( + // Clustering consumption model + AttributeMessagingRocketmqConsumptionModelClustering = "clustering" + // Broadcasting consumption model + AttributeMessagingRocketmqConsumptionModelBroadcasting = "broadcasting" +) + +// This document defines semantic conventions for remote procedure calls. +const ( + // A string identifying the remoting system. See below for a list of well-known + // identifiers. + // + // Type: Enum + // Required: Always + // Stability: stable + AttributeRPCSystem = "rpc.system" + // The full (logical) name of the service being called, including its package + // name, if applicable. + // + // Type: string + // Required: No, but recommended + // Stability: stable + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing class. + // The code.namespace attribute may be used to store the latter (despite the + // attribute name, it may include a class name; e.g., class with method actually + // executing the call on the server side, RPC client stub class on the client + // side). + AttributeRPCService = "rpc.service" + // The name of the (logical) method being called, must be equal to the $method + // part in the span name. + // + // Type: string + // Required: No, but recommended + // Stability: stable + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The code.function attribute may be used to store the latter + // (e.g., method actually executing the call on the server side, RPC client stub + // method on the client side). + AttributeRPCMethod = "rpc.method" +) + +const ( + // gRPC + AttributeRPCSystemGRPC = "grpc" + // Java RMI + AttributeRPCSystemJavaRmi = "java_rmi" + // .NET WCF + AttributeRPCSystemDotnetWcf = "dotnet_wcf" + // Apache Dubbo + AttributeRPCSystemApacheDubbo = "apache_dubbo" +) + +// Tech-specific attributes for gRPC. +const ( + // The numeric status code of the gRPC request. + // + // Type: Enum + // Required: Always + // Stability: stable + AttributeRPCGRPCStatusCode = "rpc.grpc.status_code" +) + +const ( + // OK + AttributeRPCGRPCStatusCodeOk = "0" + // CANCELLED + AttributeRPCGRPCStatusCodeCancelled = "1" + // UNKNOWN + AttributeRPCGRPCStatusCodeUnknown = "2" + // INVALID_ARGUMENT + AttributeRPCGRPCStatusCodeInvalidArgument = "3" + // DEADLINE_EXCEEDED + AttributeRPCGRPCStatusCodeDeadlineExceeded = "4" + // NOT_FOUND + AttributeRPCGRPCStatusCodeNotFound = "5" + // ALREADY_EXISTS + AttributeRPCGRPCStatusCodeAlreadyExists = "6" + // PERMISSION_DENIED + AttributeRPCGRPCStatusCodePermissionDenied = "7" + // RESOURCE_EXHAUSTED + AttributeRPCGRPCStatusCodeResourceExhausted = "8" + // FAILED_PRECONDITION + AttributeRPCGRPCStatusCodeFailedPrecondition = "9" + // ABORTED + AttributeRPCGRPCStatusCodeAborted = "10" + // OUT_OF_RANGE + AttributeRPCGRPCStatusCodeOutOfRange = "11" + // UNIMPLEMENTED + AttributeRPCGRPCStatusCodeUnimplemented = "12" + // INTERNAL + AttributeRPCGRPCStatusCodeInternal = "13" + // UNAVAILABLE + AttributeRPCGRPCStatusCodeUnavailable = "14" + // DATA_LOSS + AttributeRPCGRPCStatusCodeDataLoss = "15" + // UNAUTHENTICATED + AttributeRPCGRPCStatusCodeUnauthenticated = "16" +) + +// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). +const ( + // Protocol version as in jsonrpc property of request/response. Since JSON-RPC 1.0 + // does not specify this, the value can be omitted. + // + // Type: string + // Required: If missing, it is assumed to be "1.0". + // Stability: stable + // Examples: '2.0', '1.0' + AttributeRPCJsonrpcVersion = "rpc.jsonrpc.version" + // id property of request or response. Since protocol allows id to be int, string, + // null or missing (for notifications), value is expected to be cast to string for + // simplicity. Use empty string in case of null value. Omit entirely if this is a + // notification. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '10', 'request-7', '' + AttributeRPCJsonrpcRequestID = "rpc.jsonrpc.request_id" + // error.code property of response if it is an error response. + // + // Type: int + // Required: If missing, response is assumed to be successful. + // Stability: stable + // Examples: -32700, 100 + AttributeRPCJsonrpcErrorCode = "rpc.jsonrpc.error_code" + // error.message property of response if it is an error response. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Parse error', 'User already exists' + AttributeRPCJsonrpcErrorMessage = "rpc.jsonrpc.error_message" +) + +// RPC received/sent message. +const ( + // Whether this is a received or sent message. + // + // Type: Enum + // Required: No + // Stability: stable + AttributeMessageType = "message.type" + // MUST be calculated as two different counters starting from 1 one for sent + // messages and one for received message. + // + // Type: int + // Required: No + // Stability: stable + // Note: This way we guarantee that the values will be consistent between + // different implementations. + AttributeMessageID = "message.id" + // Compressed size of the message in bytes. + // + // Type: int + // Required: No + // Stability: stable + AttributeMessageCompressedSize = "message.compressed_size" + // Uncompressed size of the message in bytes. + // + // Type: int + // Required: No + // Stability: stable + AttributeMessageUncompressedSize = "message.uncompressed_size" +) + +const ( + // sent + AttributeMessageTypeSent = "SENT" + // received + AttributeMessageTypeReceived = "RECEIVED" +) + +func GetTraceSemanticConventionAttributeNames() []string { + return []string{ + AttributeAWSLambdaInvokedARN, + AttributeCloudeventsEventID, + AttributeCloudeventsEventSource, + AttributeCloudeventsEventSpecVersion, + AttributeCloudeventsEventType, + AttributeCloudeventsEventSubject, + AttributeOpentracingRefType, + AttributeDBSystem, + AttributeDBConnectionString, + AttributeDBUser, + AttributeDBJDBCDriverClassname, + AttributeDBName, + AttributeDBStatement, + AttributeDBOperation, + AttributeDBMSSQLInstanceName, + AttributeDBCassandraPageSize, + AttributeDBCassandraConsistencyLevel, + AttributeDBCassandraTable, + AttributeDBCassandraIdempotence, + AttributeDBCassandraSpeculativeExecutionCount, + AttributeDBCassandraCoordinatorID, + AttributeDBCassandraCoordinatorDC, + AttributeDBRedisDBIndex, + AttributeDBMongoDBCollection, + AttributeDBSQLTable, + AttributeExceptionType, + AttributeExceptionMessage, + AttributeExceptionStacktrace, + AttributeExceptionEscaped, + AttributeFaaSTrigger, + AttributeFaaSExecution, + AttributeFaaSDocumentCollection, + AttributeFaaSDocumentOperation, + AttributeFaaSDocumentTime, + AttributeFaaSDocumentName, + AttributeFaaSTime, + AttributeFaaSCron, + AttributeFaaSColdstart, + AttributeFaaSInvokedName, + AttributeFaaSInvokedProvider, + AttributeFaaSInvokedRegion, + AttributeNetTransport, + AttributeNetPeerIP, + AttributeNetPeerPort, + AttributeNetPeerName, + AttributeNetHostIP, + AttributeNetHostPort, + AttributeNetHostName, + AttributeNetHostConnectionType, + AttributeNetHostConnectionSubtype, + AttributeNetHostCarrierName, + AttributeNetHostCarrierMcc, + AttributeNetHostCarrierMnc, + AttributeNetHostCarrierIcc, + AttributePeerService, + AttributeEnduserID, + AttributeEnduserRole, + AttributeEnduserScope, + AttributeThreadID, + AttributeThreadName, + AttributeCodeFunction, + AttributeCodeNamespace, + AttributeCodeFilepath, + AttributeCodeLineNumber, + AttributeHTTPMethod, + AttributeHTTPURL, + AttributeHTTPTarget, + AttributeHTTPHost, + AttributeHTTPScheme, + AttributeHTTPStatusCode, + AttributeHTTPFlavor, + AttributeHTTPUserAgent, + AttributeHTTPRequestContentLength, + AttributeHTTPRequestContentLengthUncompressed, + AttributeHTTPResponseContentLength, + AttributeHTTPResponseContentLengthUncompressed, + AttributeHTTPRetryCount, + AttributeHTTPServerName, + AttributeHTTPRoute, + AttributeHTTPClientIP, + AttributeAWSDynamoDBTableNames, + AttributeAWSDynamoDBConsumedCapacity, + AttributeAWSDynamoDBItemCollectionMetrics, + AttributeAWSDynamoDBProvisionedReadCapacity, + AttributeAWSDynamoDBProvisionedWriteCapacity, + AttributeAWSDynamoDBConsistentRead, + AttributeAWSDynamoDBProjection, + AttributeAWSDynamoDBLimit, + AttributeAWSDynamoDBAttributesToGet, + AttributeAWSDynamoDBIndexName, + AttributeAWSDynamoDBSelect, + AttributeAWSDynamoDBGlobalSecondaryIndexes, + AttributeAWSDynamoDBLocalSecondaryIndexes, + AttributeAWSDynamoDBExclusiveStartTable, + AttributeAWSDynamoDBTableCount, + AttributeAWSDynamoDBScanForward, + AttributeAWSDynamoDBSegment, + AttributeAWSDynamoDBTotalSegments, + AttributeAWSDynamoDBCount, + AttributeAWSDynamoDBScannedCount, + AttributeAWSDynamoDBAttributeDefinitions, + AttributeAWSDynamoDBGlobalSecondaryIndexUpdates, + AttributeMessagingSystem, + AttributeMessagingDestination, + AttributeMessagingDestinationKind, + AttributeMessagingTempDestination, + AttributeMessagingProtocol, + AttributeMessagingProtocolVersion, + AttributeMessagingURL, + AttributeMessagingMessageID, + AttributeMessagingConversationID, + AttributeMessagingMessagePayloadSizeBytes, + AttributeMessagingMessagePayloadCompressedSizeBytes, + AttributeMessagingOperation, + AttributeMessagingConsumerID, + AttributeMessagingRabbitmqRoutingKey, + AttributeMessagingKafkaMessageKey, + AttributeMessagingKafkaConsumerGroup, + AttributeMessagingKafkaClientID, + AttributeMessagingKafkaPartition, + AttributeMessagingKafkaTombstone, + AttributeMessagingRocketmqNamespace, + AttributeMessagingRocketmqClientGroup, + AttributeMessagingRocketmqClientID, + AttributeMessagingRocketmqMessageType, + AttributeMessagingRocketmqMessageTag, + AttributeMessagingRocketmqMessageKeys, + AttributeMessagingRocketmqConsumptionModel, + AttributeRPCSystem, + AttributeRPCService, + AttributeRPCMethod, + AttributeRPCGRPCStatusCode, + AttributeRPCJsonrpcVersion, + AttributeRPCJsonrpcRequestID, + AttributeRPCJsonrpcErrorCode, + AttributeRPCJsonrpcErrorMessage, + AttributeMessageType, + AttributeMessageID, + AttributeMessageCompressedSize, + AttributeMessageUncompressedSize, + } +} diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.12.0/nonstandard.go b/vendor/go.opentelemetry.io/collector/semconv/v1.12.0/nonstandard.go new file mode 100644 index 00000000000..e7798b15828 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.12.0/nonstandard.go @@ -0,0 +1,11 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/collector/semconv/v1.12.0" + +const ( + OtelLibraryName = "otel.library.name" + OtelLibraryVersion = "otel.library.version" + OtelStatusCode = "otel.status_code" + OtelStatusDescription = "otel.status_description" +) diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.12.0/schema.go b/vendor/go.opentelemetry.io/collector/semconv/v1.12.0/schema.go new file mode 100644 index 00000000000..42bbd0976df --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.12.0/schema.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/collector/semconv/v1.12.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Conventions packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.12.0" diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.13.0/generated_resource.go b/vendor/go.opentelemetry.io/collector/semconv/v1.13.0/generated_resource.go index f4922ef4497..af7c567fb6a 100644 --- a/vendor/go.opentelemetry.io/collector/semconv/v1.13.0/generated_resource.go +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.13.0/generated_resource.go @@ -393,12 +393,12 @@ const ( // Take care not to use the "invoked ARN" directly but replace any // alias suffix // with the resolved function version, as the same runtime instance may be - // invokable with + // invocable with // multiple different aliases. //
  • GCP: The URI of the resource
  • //
  • Azure: The Fully Qualified Resource ID of the invoked function, // not the function app, having the form - // /subscriptions//resourceGroups//providers/Microsoft.Web/s + // /subscriptions//resourceGroups//providers/Microsoft.Web/s // ites//functions/. // This means that a span attribute MUST be used, as an Azure function app can // host multiple functions that would usually share diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.16.0/generated_resource.go b/vendor/go.opentelemetry.io/collector/semconv/v1.16.0/generated_resource.go new file mode 100644 index 00000000000..3ab9f86d569 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.16.0/generated_resource.go @@ -0,0 +1,1168 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv + +// The web browser in which the application represented by the resource is running. The `browser.*` attributes MUST be used only for resources that represent applications running in a web browser (regardless of whether running on a mobile or desktop device). +const ( + // Array of brand name and version separated by a space + // + // Type: string[] + // Requirement Level: Optional + // Stability: stable + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the UA client hints API + // (navigator.userAgentData.brands). + AttributeBrowserBrands = "browser.brands" + // The platform on which the browser is running + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the UA client hints API + // (navigator.userAgentData.platform). If unavailable, the legacy + // navigator.platform API SHOULD NOT be used instead and this attribute SHOULD be + // left unset in order for the values to be consistent. + // The list of possible values is defined in the W3C User-Agent Client Hints + // specification. Note that some (but not all) of these values can overlap with + // values in the os.type and os.name attributes. However, for consistency, the + // values in the browser.platform attribute should capture the exact value that + // the user agent provides. + AttributeBrowserPlatform = "browser.platform" + // A boolean that is true if the browser is running on a mobile device + // + // Type: boolean + // Requirement Level: Optional + // Stability: stable + // Note: This value is intended to be taken from the UA client hints API + // (navigator.userAgentData.mobile). If unavailable, this attribute SHOULD be left + // unset. + AttributeBrowserMobile = "browser.mobile" + // Full user-agent string provided by the browser + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 + // (KHTML, ' + // 'like Gecko) Chrome/95.0.4638.54 Safari/537.36' + // Note: The user-agent value SHOULD be provided only from browsers that do not + // have a mechanism to retrieve brands and platform individually from the User- + // Agent Client Hints API. To retrieve the value, the legacy navigator.userAgent + // API can be used. + AttributeBrowserUserAgent = "browser.user_agent" + // Preferred language of the user using the browser + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // navigator.language. + AttributeBrowserLanguage = "browser.language" +) + +// A cloud environment (e.g. GCP, Azure, AWS) +const ( + // Name of the cloud provider. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + AttributeCloudProvider = "cloud.provider" + // The cloud account ID the resource is assigned to. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '111111111111', 'opentelemetry' + AttributeCloudAccountID = "cloud.account.id" + // The geographical region the resource is running. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for example + // Alibaba Cloud regions, AWS regions, Azure regions, Google Cloud regions, or + // Tencent Cloud regions. + AttributeCloudRegion = "cloud.region" + // Cloud regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the resource + // is running. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and + // Google Cloud. + AttributeCloudAvailabilityZone = "cloud.availability_zone" + // The cloud platform in use. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Note: The prefix of the service SHOULD match the one specified in + // cloud.provider. + AttributeCloudPlatform = "cloud.platform" +) + +const ( + // Alibaba Cloud + AttributeCloudProviderAlibabaCloud = "alibaba_cloud" + // Amazon Web Services + AttributeCloudProviderAWS = "aws" + // Microsoft Azure + AttributeCloudProviderAzure = "azure" + // Google Cloud Platform + AttributeCloudProviderGCP = "gcp" + // IBM Cloud + AttributeCloudProviderIbmCloud = "ibm_cloud" + // Tencent Cloud + AttributeCloudProviderTencentCloud = "tencent_cloud" +) + +const ( + // Alibaba Cloud Elastic Compute Service + AttributeCloudPlatformAlibabaCloudECS = "alibaba_cloud_ecs" + // Alibaba Cloud Function Compute + AttributeCloudPlatformAlibabaCloudFc = "alibaba_cloud_fc" + // Red Hat OpenShift on Alibaba Cloud + AttributeCloudPlatformAlibabaCloudOpenshift = "alibaba_cloud_openshift" + // AWS Elastic Compute Cloud + AttributeCloudPlatformAWSEC2 = "aws_ec2" + // AWS Elastic Container Service + AttributeCloudPlatformAWSECS = "aws_ecs" + // AWS Elastic Kubernetes Service + AttributeCloudPlatformAWSEKS = "aws_eks" + // AWS Lambda + AttributeCloudPlatformAWSLambda = "aws_lambda" + // AWS Elastic Beanstalk + AttributeCloudPlatformAWSElasticBeanstalk = "aws_elastic_beanstalk" + // AWS App Runner + AttributeCloudPlatformAWSAppRunner = "aws_app_runner" + // Red Hat OpenShift on AWS (ROSA) + AttributeCloudPlatformAWSOpenshift = "aws_openshift" + // Azure Virtual Machines + AttributeCloudPlatformAzureVM = "azure_vm" + // Azure Container Instances + AttributeCloudPlatformAzureContainerInstances = "azure_container_instances" + // Azure Kubernetes Service + AttributeCloudPlatformAzureAKS = "azure_aks" + // Azure Functions + AttributeCloudPlatformAzureFunctions = "azure_functions" + // Azure App Service + AttributeCloudPlatformAzureAppService = "azure_app_service" + // Azure Red Hat OpenShift + AttributeCloudPlatformAzureOpenshift = "azure_openshift" + // Google Cloud Compute Engine (GCE) + AttributeCloudPlatformGCPComputeEngine = "gcp_compute_engine" + // Google Cloud Run + AttributeCloudPlatformGCPCloudRun = "gcp_cloud_run" + // Google Cloud Kubernetes Engine (GKE) + AttributeCloudPlatformGCPKubernetesEngine = "gcp_kubernetes_engine" + // Google Cloud Functions (GCF) + AttributeCloudPlatformGCPCloudFunctions = "gcp_cloud_functions" + // Google Cloud App Engine (GAE) + AttributeCloudPlatformGCPAppEngine = "gcp_app_engine" + // Red Hat OpenShift on Google Cloud + AttributeCloudPlatformGoogleCloudOpenshift = "google_cloud_openshift" + // Red Hat OpenShift on IBM Cloud + AttributeCloudPlatformIbmCloudOpenshift = "ibm_cloud_openshift" + // Tencent Cloud Cloud Virtual Machine (CVM) + AttributeCloudPlatformTencentCloudCvm = "tencent_cloud_cvm" + // Tencent Cloud Elastic Kubernetes Service (EKS) + AttributeCloudPlatformTencentCloudEKS = "tencent_cloud_eks" + // Tencent Cloud Serverless Cloud Function (SCF) + AttributeCloudPlatformTencentCloudScf = "tencent_cloud_scf" +) + +// Resources used by AWS Elastic Container Service (ECS). +const ( + // The Amazon Resource Name (ARN) of an ECS container instance. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us- + // west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AttributeAWSECSContainerARN = "aws.ecs.container.arn" + // The ARN of an ECS cluster. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AttributeAWSECSClusterARN = "aws.ecs.cluster.arn" + // The launch type for an ECS task. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + AttributeAWSECSLaunchtype = "aws.ecs.launchtype" + // The ARN of an ECS task definition. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us- + // west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' + AttributeAWSECSTaskARN = "aws.ecs.task.arn" + // The task definition family this task definition is a member of. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'opentelemetry-family' + AttributeAWSECSTaskFamily = "aws.ecs.task.family" + // The revision for this task definition. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '8', '26' + AttributeAWSECSTaskRevision = "aws.ecs.task.revision" +) + +const ( + // ec2 + AttributeAWSECSLaunchtypeEC2 = "ec2" + // fargate + AttributeAWSECSLaunchtypeFargate = "fargate" +) + +// Resources used by AWS Elastic Kubernetes Service (EKS). +const ( + // The ARN of an EKS cluster. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AttributeAWSEKSClusterARN = "aws.eks.cluster.arn" +) + +// Resources specific to Amazon Web Services. +const ( + // The name(s) of the AWS log group(s) an application is writing to. + // + // Type: string[] + // Requirement Level: Optional + // Stability: stable + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like multi-container + // applications, where a single application has sidecar containers, and each write + // to their own log group. + AttributeAWSLogGroupNames = "aws.log.group.names" + // The Amazon Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // Requirement Level: Optional + // Stability: stable + // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the log group ARN format documentation. + AttributeAWSLogGroupARNs = "aws.log.group.arns" + // The name(s) of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // Requirement Level: Optional + // Stability: stable + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AttributeAWSLogStreamNames = "aws.log.stream.names" + // The ARN(s) of the AWS log stream(s). + // + // Type: string[] + // Requirement Level: Optional + // Stability: stable + // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log- + // stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the log stream ARN format documentation. One log group can contain + // several log streams, so these ARNs necessarily identify both a log group and a + // log stream. + AttributeAWSLogStreamARNs = "aws.log.stream.arns" +) + +// A container instance. +const ( + // Container name used by container runtime. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'opentelemetry-autoconf' + AttributeContainerName = "container.name" + // Container ID. Usually a UUID, as for example used to identify Docker + // containers. The UUID might be abbreviated. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'a3bf90e006b2' + AttributeContainerID = "container.id" + // The container runtime managing this container. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'docker', 'containerd', 'rkt' + AttributeContainerRuntime = "container.runtime" + // Name of the image the container was built on. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'gcr.io/opentelemetry/operator' + AttributeContainerImageName = "container.image.name" + // Container image tag. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '0.1' + AttributeContainerImageTag = "container.image.tag" +) + +// The software deployment. +const ( + // Name of the deployment environment (aka deployment tier). + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'staging', 'production' + AttributeDeploymentEnvironment = "deployment.environment" +) + +// The device on which the process represented by this resource is running. +const ( + // A unique identifier representing the device + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values outlined + // below. This value is not an advertising identifier and MUST NOT be used as + // such. On iOS (Swift or Objective-C), this value MUST be equal to the vendor + // identifier. On Android (Java or Kotlin), this value MUST be equal to the + // Firebase Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found here on best + // practices and exact implementation details. Caution should be taken when + // storing personal data or anything which can identify a user. GDPR and data + // protection laws may apply, ensure you do your own due diligence. + AttributeDeviceID = "device.id" + // The model identifier for the device + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine readable version of the + // model identifier rather than the market or consumer-friendly name of the + // device. + AttributeDeviceModelIdentifier = "device.model.identifier" + // The marketing name for the device model + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human readable version of the + // device model rather than a machine readable alternative. + AttributeDeviceModelName = "device.model.name" + // The name of the device manufacturer + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via Build. iOS apps SHOULD hardcode + // the value Apple. + AttributeDeviceManufacturer = "device.manufacturer" +) + +// A serverless instance. +const ( + // The name of the single function that this runtime instance executes. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // code.namespace/code.function + // span attributes).For some cloud providers, the above definition is ambiguous. + // The following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud providers/products:
      + //
    • Azure: The full name /, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the faas.id attribute).
    • + //
    + AttributeFaaSName = "faas.name" + // The unique ID of the single function that this runtime instance executes. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' + // Note: On some cloud providers, it may not be possible to determine the full ID + // at startup, + // so consider setting faas.id as a span attribute instead.The exact value to use + // for faas.id depends on the cloud provider:
      + //
    • AWS Lambda: The function ARN. + // Take care not to use the "invoked ARN" directly but replace any + // alias suffix + // with the resolved function version, as the same runtime instance may be + // invocable with + // multiple different aliases.
    • + //
    • GCP: The URI of the resource
    • + //
    • Azure: The Fully Qualified Resource ID of the invoked function, + // not the function app, having the form + // /subscriptions//resourceGroups//providers/Microsoft.Web/s + // ites//functions/. + // This means that a span attribute MUST be used, as an Azure function app can + // host multiple functions that would usually share + // a TracerProvider.
    • + //
    + AttributeFaaSID = "faas.id" + // The immutable version of the function being executed. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use:
      + //
    • AWS Lambda: The function version + // (an integer represented as a decimal string).
    • + //
    • Google Cloud Run: The revision + // (i.e., the function name plus the revision suffix).
    • + //
    • Google Cloud Functions: The value of the + // K_REVISION environment variable.
    • + //
    • Azure Functions: Not applicable. Do not set this attribute.
    • + //
    + AttributeFaaSVersion = "faas.version" + // The execution environment ID as a string, that will be potentially reused for + // other invocations to the same function/function version. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note:
      + //
    • AWS Lambda: Use the (full) log stream name.
    • + //
    + AttributeFaaSInstance = "faas.instance" + // The amount of memory available to the serverless function in MiB. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 128 + // Note: It's recommended to set this attribute since e.g. too little memory can + // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, + // the environment variable AWS_LAMBDA_FUNCTION_MEMORY_SIZE provides this + // information. + AttributeFaaSMaxMemory = "faas.max_memory" +) + +// A host is defined as a general computing instance. +const ( + // Unique host ID. For Cloud, this must be the instance_id assigned by the cloud + // provider. For non-containerized Linux systems, the machine-id located in + // /etc/machine-id or /var/lib/dbus/machine-id may be used. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + AttributeHostID = "host.id" + // Name of the host. On Unix systems, it may contain what the hostname command + // returns, or the fully qualified hostname, or another name specified by the + // user. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'opentelemetry-test' + AttributeHostName = "host.name" + // Type of host. For Cloud, this must be the machine type. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'n1-standard-1' + AttributeHostType = "host.type" + // The CPU architecture the host system is running on. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + AttributeHostArch = "host.arch" + // Name of the VM image or OS install the host was instantiated from. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + AttributeHostImageName = "host.image.name" + // VM image ID. For Cloud, this value is from the provider. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'ami-07b06b442921831e5' + AttributeHostImageID = "host.image.id" + // The version string of the VM image as defined in Version Attributes. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '0.1' + AttributeHostImageVersion = "host.image.version" +) + +const ( + // AMD64 + AttributeHostArchAMD64 = "amd64" + // ARM32 + AttributeHostArchARM32 = "arm32" + // ARM64 + AttributeHostArchARM64 = "arm64" + // Itanium + AttributeHostArchIA64 = "ia64" + // 32-bit PowerPC + AttributeHostArchPPC32 = "ppc32" + // 64-bit PowerPC + AttributeHostArchPPC64 = "ppc64" + // IBM z/Architecture + AttributeHostArchS390x = "s390x" + // 32-bit x86 + AttributeHostArchX86 = "x86" +) + +// A Kubernetes Cluster. +const ( + // The name of the cluster. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'opentelemetry-cluster' + AttributeK8SClusterName = "k8s.cluster.name" +) + +// A Kubernetes Node object. +const ( + // The name of the Node. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'node-1' + AttributeK8SNodeName = "k8s.node.name" + // The UID of the Node. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + AttributeK8SNodeUID = "k8s.node.uid" +) + +// A Kubernetes Namespace. +const ( + // The name of the namespace that the pod is running in. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'default' + AttributeK8SNamespaceName = "k8s.namespace.name" +) + +// A Kubernetes Pod object. +const ( + // The UID of the Pod. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SPodUID = "k8s.pod.uid" + // The name of the Pod. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'opentelemetry-pod-autoconf' + AttributeK8SPodName = "k8s.pod.name" +) + +// A container in a [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). +const ( + // The name of the Container from Pod specification, must be unique within a Pod. + // Container runtime usually uses different globally unique name (container.name). + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'redis' + AttributeK8SContainerName = "k8s.container.name" + // Number of times the container was restarted. This attribute can be used to + // identify a particular container (running or stopped) within a container spec. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 0, 2 + AttributeK8SContainerRestartCount = "k8s.container.restart_count" +) + +// A Kubernetes ReplicaSet object. +const ( + // The UID of the ReplicaSet. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SReplicaSetUID = "k8s.replicaset.uid" + // The name of the ReplicaSet. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'opentelemetry' + AttributeK8SReplicaSetName = "k8s.replicaset.name" +) + +// A Kubernetes Deployment object. +const ( + // The UID of the Deployment. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SDeploymentUID = "k8s.deployment.uid" + // The name of the Deployment. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'opentelemetry' + AttributeK8SDeploymentName = "k8s.deployment.name" +) + +// A Kubernetes StatefulSet object. +const ( + // The UID of the StatefulSet. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SStatefulSetUID = "k8s.statefulset.uid" + // The name of the StatefulSet. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'opentelemetry' + AttributeK8SStatefulSetName = "k8s.statefulset.name" +) + +// A Kubernetes DaemonSet object. +const ( + // The UID of the DaemonSet. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SDaemonSetUID = "k8s.daemonset.uid" + // The name of the DaemonSet. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'opentelemetry' + AttributeK8SDaemonSetName = "k8s.daemonset.name" +) + +// A Kubernetes Job object. +const ( + // The UID of the Job. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SJobUID = "k8s.job.uid" + // The name of the Job. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'opentelemetry' + AttributeK8SJobName = "k8s.job.name" +) + +// A Kubernetes CronJob object. +const ( + // The UID of the CronJob. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SCronJobUID = "k8s.cronjob.uid" + // The name of the CronJob. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'opentelemetry' + AttributeK8SCronJobName = "k8s.cronjob.name" +) + +// The operating system (OS) on which the process represented by this resource is running. +const ( + // The operating system type. + // + // Type: Enum + // Requirement Level: Required + // Stability: stable + AttributeOSType = "os.type" + // Human readable (not intended to be parsed) OS version information, like e.g. + // reported by ver or lsb_release -a commands. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS' + AttributeOSDescription = "os.description" + // Human readable operating system name. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'iOS', 'Android', 'Ubuntu' + AttributeOSName = "os.name" + // The version string of the operating system as defined in Version Attributes. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '14.2.1', '18.04.1' + AttributeOSVersion = "os.version" +) + +const ( + // Microsoft Windows + AttributeOSTypeWindows = "windows" + // Linux + AttributeOSTypeLinux = "linux" + // Apple Darwin + AttributeOSTypeDarwin = "darwin" + // FreeBSD + AttributeOSTypeFreeBSD = "freebsd" + // NetBSD + AttributeOSTypeNetBSD = "netbsd" + // OpenBSD + AttributeOSTypeOpenBSD = "openbsd" + // DragonFly BSD + AttributeOSTypeDragonflyBSD = "dragonflybsd" + // HP-UX (Hewlett Packard Unix) + AttributeOSTypeHPUX = "hpux" + // AIX (Advanced Interactive eXecutive) + AttributeOSTypeAIX = "aix" + // SunOS, Oracle Solaris + AttributeOSTypeSolaris = "solaris" + // IBM z/OS + AttributeOSTypeZOS = "z_os" +) + +// An operating system process. +const ( + // Process identifier (PID). + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 1234 + AttributeProcessPID = "process.pid" + // Parent Process identifier (PID). + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 111 + AttributeProcessParentPID = "process.parent_pid" + // The name of the process executable. On Linux based systems, can be set to the + // Name in proc/[pid]/status. On Windows, can be set to the base name of + // GetProcessImageFileNameW. + // + // Type: string + // Requirement Level: Conditionally Required - See alternative attributes below. + // Stability: stable + // Examples: 'otelcol' + AttributeProcessExecutableName = "process.executable.name" + // The full path to the process executable. On Linux based systems, can be set to + // the target of proc/[pid]/exe. On Windows, can be set to the result of + // GetProcessImageFileNameW. + // + // Type: string + // Requirement Level: Conditionally Required - See alternative attributes below. + // Stability: stable + // Examples: '/usr/bin/cmd/otelcol' + AttributeProcessExecutablePath = "process.executable.path" + // The command used to launch the process (i.e. the command name). On Linux based + // systems, can be set to the zeroth string in proc/[pid]/cmdline. On Windows, can + // be set to the first parameter extracted from GetCommandLineW. + // + // Type: string + // Requirement Level: Conditionally Required - See alternative attributes below. + // Stability: stable + // Examples: 'cmd/otelcol' + AttributeProcessCommand = "process.command" + // The full command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of GetCommandLineW. Do not + // set this if you have to assemble it just for monitoring; use + // process.command_args instead. + // + // Type: string + // Requirement Level: Conditionally Required - See alternative attributes below. + // Stability: stable + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + AttributeProcessCommandLine = "process.command_line" + // All the command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited strings + // extracted from proc/[pid]/cmdline. For libc-based executables, this would be + // the full argv vector passed to main. + // + // Type: string[] + // Requirement Level: Conditionally Required - See alternative attributes below. + // Stability: stable + // Examples: 'cmd/otecol', '--config=config.yaml' + AttributeProcessCommandArgs = "process.command_args" + // The username of the user that owns the process. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'root' + AttributeProcessOwner = "process.owner" +) + +// The single (language) runtime instance which is monitored. +const ( + // The name of the runtime of this process. For compiled native binaries, this + // SHOULD be the name of the compiler. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'OpenJDK Runtime Environment' + AttributeProcessRuntimeName = "process.runtime.name" + // The version of the runtime of this process, as returned by the runtime without + // modification. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '14.0.2' + AttributeProcessRuntimeVersion = "process.runtime.version" + // An additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + AttributeProcessRuntimeDescription = "process.runtime.description" +) + +// A service instance. +const ( + // Logical name of the service. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled services. If + // the value was not specified, SDKs MUST fallback to unknown_service: + // concatenated with process.executable.name, e.g. unknown_service:bash. If + // process.executable.name is not available, the value MUST be set to + // unknown_service. + AttributeServiceName = "service.name" + // A namespace for service.name. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group of + // services, for example the team name that owns a group of services. service.name + // is expected to be unique within the same namespace. If service.namespace is not + // specified in the Resource then service.name is expected to be unique for all + // services that have no explicit namespace defined (so the empty/unspecified + // namespace is simply one more valid namespace). Zero-length namespace string is + // assumed equal to unspecified namespace. + AttributeServiceNamespace = "service.namespace" + // The string ID of the service instance. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // service.namespace,service.name pair (in other words + // service.namespace,service.name,service.instance.id triplet MUST be globally + // unique). The ID helps to distinguish instances of the same service that exist + // at the same time (e.g. instances of a horizontally scaled service). It is + // preferable for the ID to be persistent and stay the same for the lifetime of + // the service instance, however it is acceptable that the ID is ephemeral and + // changes during important lifetime events for the service (e.g. service + // restarts). If the service has no inherent unique ID that can be used as the + // value of this attribute it is recommended to generate a random Version 1 or + // Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use + // Version 5, see RFC 4122 for more recommendations). + AttributeServiceInstanceID = "service.instance.id" + // The version string of the service API or implementation. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '2.0.0' + AttributeServiceVersion = "service.version" +) + +// The telemetry SDK used to capture data recorded by the instrumentation libraries. +const ( + // The name of the telemetry SDK as defined above. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'opentelemetry' + AttributeTelemetrySDKName = "telemetry.sdk.name" + // The language of the telemetry SDK. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + AttributeTelemetrySDKLanguage = "telemetry.sdk.language" + // The version string of the telemetry SDK. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '1.2.3' + AttributeTelemetrySDKVersion = "telemetry.sdk.version" + // The version string of the auto instrumentation agent, if used. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '1.2.3' + AttributeTelemetryAutoVersion = "telemetry.auto.version" +) + +const ( + // cpp + AttributeTelemetrySDKLanguageCPP = "cpp" + // dotnet + AttributeTelemetrySDKLanguageDotnet = "dotnet" + // erlang + AttributeTelemetrySDKLanguageErlang = "erlang" + // go + AttributeTelemetrySDKLanguageGo = "go" + // java + AttributeTelemetrySDKLanguageJava = "java" + // nodejs + AttributeTelemetrySDKLanguageNodejs = "nodejs" + // php + AttributeTelemetrySDKLanguagePHP = "php" + // python + AttributeTelemetrySDKLanguagePython = "python" + // ruby + AttributeTelemetrySDKLanguageRuby = "ruby" + // webjs + AttributeTelemetrySDKLanguageWebjs = "webjs" + // swift + AttributeTelemetrySDKLanguageSwift = "swift" +) + +// Resource describing the packaged software running the application code. Web engines are typically executed using process.runtime. +const ( + // The name of the web engine. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: 'WildFly' + AttributeWebEngineName = "webengine.name" + // The version of the web engine. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '21.0.0' + AttributeWebEngineVersion = "webengine.version" + // Additional description of the web engine (e.g. detailed version and edition + // information). + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final' + AttributeWebEngineDescription = "webengine.description" +) + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's concepts. +const ( + // The name of the instrumentation scope - (InstrumentationScope.Name in OTLP). + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'io.opentelemetry.contrib.mongodb' + AttributeOtelScopeName = "otel.scope.name" + // The version of the instrumentation scope - (InstrumentationScope.Version in + // OTLP). + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '1.0.0' + AttributeOtelScopeVersion = "otel.scope.version" +) + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry Scope's concepts. +const ( + // Deprecated, use the otel.scope.name attribute. + // + // Type: string + // Requirement Level: Optional + // Stability: deprecated + // Examples: 'io.opentelemetry.contrib.mongodb' + AttributeOtelLibraryName = "otel.library.name" + // Deprecated, use the otel.scope.version attribute. + // + // Type: string + // Requirement Level: Optional + // Stability: deprecated + // Examples: '1.0.0' + AttributeOtelLibraryVersion = "otel.library.version" +) + +func GetResourceSemanticConventionAttributeNames() []string { + return []string{ + AttributeBrowserBrands, + AttributeBrowserPlatform, + AttributeBrowserMobile, + AttributeBrowserUserAgent, + AttributeBrowserLanguage, + AttributeCloudProvider, + AttributeCloudAccountID, + AttributeCloudRegion, + AttributeCloudAvailabilityZone, + AttributeCloudPlatform, + AttributeAWSECSContainerARN, + AttributeAWSECSClusterARN, + AttributeAWSECSLaunchtype, + AttributeAWSECSTaskARN, + AttributeAWSECSTaskFamily, + AttributeAWSECSTaskRevision, + AttributeAWSEKSClusterARN, + AttributeAWSLogGroupNames, + AttributeAWSLogGroupARNs, + AttributeAWSLogStreamNames, + AttributeAWSLogStreamARNs, + AttributeContainerName, + AttributeContainerID, + AttributeContainerRuntime, + AttributeContainerImageName, + AttributeContainerImageTag, + AttributeDeploymentEnvironment, + AttributeDeviceID, + AttributeDeviceModelIdentifier, + AttributeDeviceModelName, + AttributeDeviceManufacturer, + AttributeFaaSName, + AttributeFaaSID, + AttributeFaaSVersion, + AttributeFaaSInstance, + AttributeFaaSMaxMemory, + AttributeHostID, + AttributeHostName, + AttributeHostType, + AttributeHostArch, + AttributeHostImageName, + AttributeHostImageID, + AttributeHostImageVersion, + AttributeK8SClusterName, + AttributeK8SNodeName, + AttributeK8SNodeUID, + AttributeK8SNamespaceName, + AttributeK8SPodUID, + AttributeK8SPodName, + AttributeK8SContainerName, + AttributeK8SContainerRestartCount, + AttributeK8SReplicaSetUID, + AttributeK8SReplicaSetName, + AttributeK8SDeploymentUID, + AttributeK8SDeploymentName, + AttributeK8SStatefulSetUID, + AttributeK8SStatefulSetName, + AttributeK8SDaemonSetUID, + AttributeK8SDaemonSetName, + AttributeK8SJobUID, + AttributeK8SJobName, + AttributeK8SCronJobUID, + AttributeK8SCronJobName, + AttributeOSType, + AttributeOSDescription, + AttributeOSName, + AttributeOSVersion, + AttributeProcessPID, + AttributeProcessParentPID, + AttributeProcessExecutableName, + AttributeProcessExecutablePath, + AttributeProcessCommand, + AttributeProcessCommandLine, + AttributeProcessCommandArgs, + AttributeProcessOwner, + AttributeProcessRuntimeName, + AttributeProcessRuntimeVersion, + AttributeProcessRuntimeDescription, + AttributeServiceName, + AttributeServiceNamespace, + AttributeServiceInstanceID, + AttributeServiceVersion, + AttributeTelemetrySDKName, + AttributeTelemetrySDKLanguage, + AttributeTelemetrySDKVersion, + AttributeTelemetryAutoVersion, + AttributeWebEngineName, + AttributeWebEngineVersion, + AttributeWebEngineDescription, + AttributeOtelScopeName, + AttributeOtelScopeVersion, + AttributeOtelLibraryName, + AttributeOtelLibraryVersion, + } +} diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.16.0/generated_trace.go b/vendor/go.opentelemetry.io/collector/semconv/v1.16.0/generated_trace.go new file mode 100644 index 00000000000..33912ab9a58 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.16.0/generated_trace.go @@ -0,0 +1,1913 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv + +// This document defines the shared attributes used to report a single exception associated with a span or log. +const ( + // The type of the exception (its fully-qualified class name, if applicable). The + // dynamic type of the exception should be preferred over the static type in + // languages that support it. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + AttributeExceptionType = "exception.type" + // The exception message. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str implicitly" + AttributeExceptionMessage = "exception.message" + // A stacktrace as a string in the natural representation for the language + // runtime. The representation is to be determined and documented by each language + // SIG. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + AttributeExceptionStacktrace = "exception.stacktrace" +) + +// This document defines attributes for Events represented using Log Records. +const ( + // The name identifies the event. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: 'click', 'exception' + AttributeEventName = "event.name" + // The domain identifies the business context for the events. + // + // Type: Enum + // Requirement Level: Required + // Stability: stable + // Note: Events across different domains may have same event.name, yet be + // unrelated events. + AttributeEventDomain = "event.domain" +) + +const ( + // Events from browser apps + AttributeEventDomainBrowser = "browser" + // Events from mobile apps + AttributeEventDomainDevice = "device" + // Events from Kubernetes + AttributeEventDomainK8S = "k8s" +) + +// Span attributes used by AWS Lambda (in addition to general `faas` attributes). +const ( + // The full invoked ARN as provided on the Context passed to the function (Lambda- + // Runtime-Invoked-Function-ARN header on the /runtime/invocation/next + // applicable). + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from faas.id if an alias is involved. + AttributeAWSLambdaInvokedARN = "aws.lambda.invoked_arn" +) + +// This document defines attributes for CloudEvents. CloudEvents is a specification on how to define event data in a standard way. These attributes can be attached to spans when performing operations with CloudEvents, regardless of the protocol being used. +const ( + // The event_id uniquely identifies the event. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + AttributeCloudeventsEventID = "cloudevents.event_id" + // The source identifies the context in which an event happened. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: 'https://github.com/cloudevents', '/cloudevents/spec/pull/123', 'my- + // service' + AttributeCloudeventsEventSource = "cloudevents.event_source" + // The version of the CloudEvents specification which the event uses. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '1.0' + AttributeCloudeventsEventSpecVersion = "cloudevents.event_spec_version" + // The event_type contains a value describing the type of event related to the + // originating occurrence. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'com.github.pull_request.opened', 'com.example.object.deleted.v2' + AttributeCloudeventsEventType = "cloudevents.event_type" + // The subject of the event in the context of the event producer (identified by + // source). + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'mynewfile.jpg' + AttributeCloudeventsEventSubject = "cloudevents.event_subject" +) + +// This document defines semantic conventions for the OpenTracing Shim +const ( + // Parent-child Reference type + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Note: The causal relationship between a child Span and a parent Span. + AttributeOpentracingRefType = "opentracing.ref_type" +) + +const ( + // The parent Span depends on the child Span in some capacity + AttributeOpentracingRefTypeChildOf = "child_of" + // The parent Span does not depend in any way on the result of the child Span + AttributeOpentracingRefTypeFollowsFrom = "follows_from" +) + +// This document defines the attributes used to perform database client calls. +const ( + // An identifier for the database management system (DBMS) product being used. See + // below for a list of well-known identifiers. + // + // Type: Enum + // Requirement Level: Required + // Stability: stable + AttributeDBSystem = "db.system" + // The connection string used to connect to the database. It is recommended to + // remove embedded credentials. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' + AttributeDBConnectionString = "db.connection_string" + // Username for accessing the database. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'readonly_user', 'reporting_user' + AttributeDBUser = "db.user" + // The fully-qualified class name of the Java Database Connectivity (JDBC) driver + // used to connect. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'org.postgresql.Driver', + // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' + AttributeDBJDBCDriverClassname = "db.jdbc.driver_classname" + // This attribute is used to report the name of the database being accessed. For + // commands that switch the database, this should be set to the target database + // (even if the command fails). + // + // Type: string + // Requirement Level: Conditionally Required - If applicable. + // Stability: stable + // Examples: 'customers', 'main' + // Note: In some SQL databases, the database name to be used is called + // "schema name". In case there are multiple layers that could be + // considered for database name (e.g. Oracle instance name and schema name), the + // database name to be used is the more specific layer (e.g. Oracle schema name). + AttributeDBName = "db.name" + // The database statement being executed. + // + // Type: string + // Requirement Level: Conditionally Required - If applicable and not explicitly + // disabled via instrumentation configuration. + // Stability: stable + // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' + // Note: The value may be sanitized to exclude sensitive information. + AttributeDBStatement = "db.statement" + // The name of the operation being executed, e.g. the MongoDB command name such as + // findAndModify, or the SQL keyword. + // + // Type: string + // Requirement Level: Conditionally Required - If `db.statement` is not + // applicable. + // Stability: stable + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: When setting this to an SQL keyword, it is not recommended to attempt any + // client-side parsing of db.statement just to get this property, but it should be + // set if the operation name is provided by the library being instrumented. If the + // SQL statement has an ambiguous operation, or performs more than one operation, + // this value may be omitted. + AttributeDBOperation = "db.operation" +) + +const ( + // Some other SQL database. Fallback only. See notes + AttributeDBSystemOtherSQL = "other_sql" + // Microsoft SQL Server + AttributeDBSystemMSSQL = "mssql" + // MySQL + AttributeDBSystemMySQL = "mysql" + // Oracle Database + AttributeDBSystemOracle = "oracle" + // IBM DB2 + AttributeDBSystemDB2 = "db2" + // PostgreSQL + AttributeDBSystemPostgreSQL = "postgresql" + // Amazon Redshift + AttributeDBSystemRedshift = "redshift" + // Apache Hive + AttributeDBSystemHive = "hive" + // Cloudscape + AttributeDBSystemCloudscape = "cloudscape" + // HyperSQL DataBase + AttributeDBSystemHSQLDB = "hsqldb" + // Progress Database + AttributeDBSystemProgress = "progress" + // SAP MaxDB + AttributeDBSystemMaxDB = "maxdb" + // SAP HANA + AttributeDBSystemHanaDB = "hanadb" + // Ingres + AttributeDBSystemIngres = "ingres" + // FirstSQL + AttributeDBSystemFirstSQL = "firstsql" + // EnterpriseDB + AttributeDBSystemEDB = "edb" + // InterSystems Caché + AttributeDBSystemCache = "cache" + // Adabas (Adaptable Database System) + AttributeDBSystemAdabas = "adabas" + // Firebird + AttributeDBSystemFirebird = "firebird" + // Apache Derby + AttributeDBSystemDerby = "derby" + // FileMaker + AttributeDBSystemFilemaker = "filemaker" + // Informix + AttributeDBSystemInformix = "informix" + // InstantDB + AttributeDBSystemInstantDB = "instantdb" + // InterBase + AttributeDBSystemInterbase = "interbase" + // MariaDB + AttributeDBSystemMariaDB = "mariadb" + // Netezza + AttributeDBSystemNetezza = "netezza" + // Pervasive PSQL + AttributeDBSystemPervasive = "pervasive" + // PointBase + AttributeDBSystemPointbase = "pointbase" + // SQLite + AttributeDBSystemSqlite = "sqlite" + // Sybase + AttributeDBSystemSybase = "sybase" + // Teradata + AttributeDBSystemTeradata = "teradata" + // Vertica + AttributeDBSystemVertica = "vertica" + // H2 + AttributeDBSystemH2 = "h2" + // ColdFusion IMQ + AttributeDBSystemColdfusion = "coldfusion" + // Apache Cassandra + AttributeDBSystemCassandra = "cassandra" + // Apache HBase + AttributeDBSystemHBase = "hbase" + // MongoDB + AttributeDBSystemMongoDB = "mongodb" + // Redis + AttributeDBSystemRedis = "redis" + // Couchbase + AttributeDBSystemCouchbase = "couchbase" + // CouchDB + AttributeDBSystemCouchDB = "couchdb" + // Microsoft Azure Cosmos DB + AttributeDBSystemCosmosDB = "cosmosdb" + // Amazon DynamoDB + AttributeDBSystemDynamoDB = "dynamodb" + // Neo4j + AttributeDBSystemNeo4j = "neo4j" + // Apache Geode + AttributeDBSystemGeode = "geode" + // Elasticsearch + AttributeDBSystemElasticsearch = "elasticsearch" + // Memcached + AttributeDBSystemMemcached = "memcached" + // CockroachDB + AttributeDBSystemCockroachdb = "cockroachdb" + // OpenSearch + AttributeDBSystemOpensearch = "opensearch" +) + +// Connection-level attributes for Microsoft SQL Server +const ( + // The Microsoft SQL Server instance name connecting to. This name is used to + // determine the port of a named instance. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'MSSQLSERVER' + // Note: If setting a db.mssql.instance_name, net.peer.port is no longer required + // (but still recommended if non-standard). + AttributeDBMSSQLInstanceName = "db.mssql.instance_name" +) + +// Call-level attributes for Cassandra +const ( + // The fetch size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 5000 + AttributeDBCassandraPageSize = "db.cassandra.page_size" + // The consistency level of the query. Based on consistency values from CQL. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + AttributeDBCassandraConsistencyLevel = "db.cassandra.consistency_level" + // The name of the primary table that the operation is acting upon, including the + // keyspace name (if applicable). + // + // Type: string + // Requirement Level: Recommended + // Stability: stable + // Examples: 'mytable' + // Note: This mirrors the db.sql.table attribute but references cassandra rather + // than sql. It is not recommended to attempt any client-side parsing of + // db.statement just to get this property, but it should be set if it is provided + // by the library being instrumented. If the operation is acting upon an anonymous + // table, or more than one table, this value MUST NOT be set. + AttributeDBCassandraTable = "db.cassandra.table" + // Whether or not the query is idempotent. + // + // Type: boolean + // Requirement Level: Optional + // Stability: stable + AttributeDBCassandraIdempotence = "db.cassandra.idempotence" + // The number of times a query was speculatively executed. Not set or 0 if the + // query was not executed speculatively. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 0, 2 + AttributeDBCassandraSpeculativeExecutionCount = "db.cassandra.speculative_execution_count" + // The ID of the coordinating node for a query. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + AttributeDBCassandraCoordinatorID = "db.cassandra.coordinator.id" + // The data center of the coordinating node for a query. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'us-west-2' + AttributeDBCassandraCoordinatorDC = "db.cassandra.coordinator.dc" +) + +const ( + // all + AttributeDBCassandraConsistencyLevelAll = "all" + // each_quorum + AttributeDBCassandraConsistencyLevelEachQuorum = "each_quorum" + // quorum + AttributeDBCassandraConsistencyLevelQuorum = "quorum" + // local_quorum + AttributeDBCassandraConsistencyLevelLocalQuorum = "local_quorum" + // one + AttributeDBCassandraConsistencyLevelOne = "one" + // two + AttributeDBCassandraConsistencyLevelTwo = "two" + // three + AttributeDBCassandraConsistencyLevelThree = "three" + // local_one + AttributeDBCassandraConsistencyLevelLocalOne = "local_one" + // any + AttributeDBCassandraConsistencyLevelAny = "any" + // serial + AttributeDBCassandraConsistencyLevelSerial = "serial" + // local_serial + AttributeDBCassandraConsistencyLevelLocalSerial = "local_serial" +) + +// Call-level attributes for Redis +const ( + // The index of the database being accessed as used in the SELECT command, + // provided as an integer. To be used instead of the generic db.name attribute. + // + // Type: int + // Requirement Level: Conditionally Required - If other than the default database + // (`0`). + // Stability: stable + // Examples: 0, 1, 15 + AttributeDBRedisDBIndex = "db.redis.database_index" +) + +// Call-level attributes for MongoDB +const ( + // The collection being accessed within the database stated in db.name. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: 'customers', 'products' + AttributeDBMongoDBCollection = "db.mongodb.collection" +) + +// Call-level attributes for SQL databases +const ( + // The name of the primary table that the operation is acting upon, including the + // database name (if applicable). + // + // Type: string + // Requirement Level: Recommended + // Stability: stable + // Examples: 'public.users', 'customers' + // Note: It is not recommended to attempt any client-side parsing of db.statement + // just to get this property, but it should be set if it is provided by the + // library being instrumented. If the operation is acting upon an anonymous table, + // or more than one table, this value MUST NOT be set. + AttributeDBSQLTable = "db.sql.table" +) + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's concepts. +const ( + // Name of the code, either "OK" or "ERROR". MUST NOT be set + // if the status code is UNSET. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + AttributeOtelStatusCode = "otel.status_code" + // Description of the Status if it has a value, otherwise not set. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'resource not found' + AttributeOtelStatusDescription = "otel.status_description" +) + +const ( + // The operation has been validated by an Application developer or Operator to have completed successfully + AttributeOtelStatusCodeOk = "OK" + // The operation contains an error + AttributeOtelStatusCodeError = "ERROR" +) + +// This semantic convention describes an instance of a function that runs without provisioning or managing of servers (also known as serverless functions or Function as a Service (FaaS)) with spans. +const ( + // Type of the trigger which caused this function execution. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Note: For the server/consumer span on the incoming side, + // faas.trigger MUST be set.Clients invoking FaaS instances usually cannot set + // faas.trigger, + // since they would typically need to look in the payload to determine + // the event type. If clients set it, it should be the same as the + // trigger that corresponding incoming would have (i.e., this has + // nothing to do with the underlying transport used to make the API + // call to invoke the lambda, which is often HTTP). + AttributeFaaSTrigger = "faas.trigger" + // The execution ID of the current function execution. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + AttributeFaaSExecution = "faas.execution" +) + +const ( + // A response to some data source operation such as a database or filesystem read/write + AttributeFaaSTriggerDatasource = "datasource" + // To provide an answer to an inbound HTTP request + AttributeFaaSTriggerHTTP = "http" + // A function is set to be executed when messages are sent to a messaging system + AttributeFaaSTriggerPubsub = "pubsub" + // A function is scheduled to be executed regularly + AttributeFaaSTriggerTimer = "timer" + // If none of the others apply + AttributeFaaSTriggerOther = "other" +) + +// Semantic Convention for FaaS triggered as a response to some data source operation such as a database or filesystem read/write. +const ( + // The name of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos + // DB to the database name. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: 'myBucketName', 'myDBName' + AttributeFaaSDocumentCollection = "faas.document.collection" + // Describes the type of the operation that was performed on the data. + // + // Type: Enum + // Requirement Level: Required + // Stability: stable + AttributeFaaSDocumentOperation = "faas.document.operation" + // A string containing the time when the data was accessed in the ISO 8601 format + // expressed in UTC. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + AttributeFaaSDocumentTime = "faas.document.time" + // The document name/table subjected to the operation. For example, in Cloud + // Storage or S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'myFile.txt', 'myTableName' + AttributeFaaSDocumentName = "faas.document.name" +) + +const ( + // When a new object is created + AttributeFaaSDocumentOperationInsert = "insert" + // When an object is modified + AttributeFaaSDocumentOperationEdit = "edit" + // When an object is deleted + AttributeFaaSDocumentOperationDelete = "delete" +) + +// Semantic Convention for FaaS scheduled to be executed regularly. +const ( + // A string containing the function invocation time in the ISO 8601 format + // expressed in UTC. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + AttributeFaaSTime = "faas.time" + // A string containing the schedule period as Cron Expression. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '0/5 * * * ? *' + AttributeFaaSCron = "faas.cron" +) + +// Contains additional attributes for incoming FaaS spans. +const ( + // A boolean that is true if the serverless function is executed for the first + // time (aka cold-start). + // + // Type: boolean + // Requirement Level: Optional + // Stability: stable + AttributeFaaSColdstart = "faas.coldstart" +) + +// Contains additional attributes for outgoing FaaS spans. +const ( + // The name of the invoked function. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: 'my-function' + // Note: SHOULD be equal to the faas.name resource attribute of the invoked + // function. + AttributeFaaSInvokedName = "faas.invoked_name" + // The cloud provider of the invoked function. + // + // Type: Enum + // Requirement Level: Required + // Stability: stable + // Note: SHOULD be equal to the cloud.provider resource attribute of the invoked + // function. + AttributeFaaSInvokedProvider = "faas.invoked_provider" + // The cloud region of the invoked function. + // + // Type: string + // Requirement Level: Conditionally Required - For some cloud providers, like AWS + // or GCP, the region in which a function is hosted is essential to uniquely + // identify the function and also part of its endpoint. Since it's part of the + // endpoint being called, the region is always known to clients. In these cases, + // `faas.invoked_region` MUST be set accordingly. If the region is unknown to the + // client or not required for identifying the invoked function, setting + // `faas.invoked_region` is optional. + // Stability: stable + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the cloud.region resource attribute of the invoked + // function. + AttributeFaaSInvokedRegion = "faas.invoked_region" +) + +const ( + // Alibaba Cloud + AttributeFaaSInvokedProviderAlibabaCloud = "alibaba_cloud" + // Amazon Web Services + AttributeFaaSInvokedProviderAWS = "aws" + // Microsoft Azure + AttributeFaaSInvokedProviderAzure = "azure" + // Google Cloud Platform + AttributeFaaSInvokedProviderGCP = "gcp" + // Tencent Cloud + AttributeFaaSInvokedProviderTencentCloud = "tencent_cloud" +) + +// These attributes may be used for any network related operation. +const ( + // Transport protocol used. See note below. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + AttributeNetTransport = "net.transport" + // Application layer protocol used. The value SHOULD be normalized to lowercase. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + AttributeNetAppProtocolName = "net.app.protocol.name" + // Version of the application layer protocol used. See note below. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '3.1.1' + // Note: net.app.protocol.version refers to the version of the protocol used and + // might be different from the protocol client's version. If the HTTP client used + // has a version of 0.27.2, but sends HTTP version 1.1, this attribute should be + // set to 1.1. + AttributeNetAppProtocolVersion = "net.app.protocol.version" + // Remote socket peer name. + // + // Type: string + // Requirement Level: Recommended - If available and different from + // `net.peer.name` and if `net.sock.peer.addr` is set. + // Stability: stable + // Examples: 'proxy.example.com' + AttributeNetSockPeerName = "net.sock.peer.name" + // Remote socket peer address: IPv4 or IPv6 for internet protocols, path for local + // communication, etc. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '127.0.0.1', '/tmp/mysql.sock' + AttributeNetSockPeerAddr = "net.sock.peer.addr" + // Remote socket peer port. + // + // Type: int + // Requirement Level: Recommended - If defined for the address family and if + // different than `net.peer.port` and if `net.sock.peer.addr` is set. + // Stability: stable + // Examples: 16456 + AttributeNetSockPeerPort = "net.sock.peer.port" + // Protocol address family which is used for communication. + // + // Type: Enum + // Requirement Level: Conditionally Required - If different than `inet` and if any + // of `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers of telemetry + // SHOULD accept both IPv4 and IPv6 formats for the address in + // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support + // instrumentations that follow previous versions of this document. + // Stability: stable + // Examples: 'inet6', 'bluetooth' + AttributeNetSockFamily = "net.sock.family" + // Logical remote hostname, see note below. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'example.com' + // Note: net.peer.name SHOULD NOT be set if capturing it would require an extra + // DNS lookup. + AttributeNetPeerName = "net.peer.name" + // Logical remote port number + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 80, 8080, 443 + AttributeNetPeerPort = "net.peer.port" + // Logical local hostname or similar, see note below. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'localhost' + AttributeNetHostName = "net.host.name" + // Logical local port number, preferably the one that the peer used to connect + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 8080 + AttributeNetHostPort = "net.host.port" + // Local socket address. Useful in case of a multi-IP host. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '192.168.0.1' + AttributeNetSockHostAddr = "net.sock.host.addr" + // Local socket port number. + // + // Type: int + // Requirement Level: Recommended - If defined for the address family and if + // different than `net.host.port` and if `net.sock.host.addr` is set. + // Stability: stable + // Examples: 35555 + AttributeNetSockHostPort = "net.sock.host.port" + // The internet connection type currently being used by the host. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'wifi' + AttributeNetHostConnectionType = "net.host.connection.type" + // This describes more details regarding the connection.type. It may be the type + // of cell technology connection, but it could be used for describing details + // about a wifi connection. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'LTE' + AttributeNetHostConnectionSubtype = "net.host.connection.subtype" + // The name of the mobile carrier. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'sprint' + AttributeNetHostCarrierName = "net.host.carrier.name" + // The mobile carrier country code. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '310' + AttributeNetHostCarrierMcc = "net.host.carrier.mcc" + // The mobile carrier network code. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '001' + AttributeNetHostCarrierMnc = "net.host.carrier.mnc" + // The ISO 3166-1 alpha-2 2-character country code associated with the mobile + // carrier network. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'DE' + AttributeNetHostCarrierIcc = "net.host.carrier.icc" +) + +const ( + // ip_tcp + AttributeNetTransportTCP = "ip_tcp" + // ip_udp + AttributeNetTransportUDP = "ip_udp" + // Named or anonymous pipe. See note below + AttributeNetTransportPipe = "pipe" + // In-process communication + AttributeNetTransportInProc = "inproc" + // Something else (non IP-based) + AttributeNetTransportOther = "other" +) + +const ( + // IPv4 address + AttributeNetSockFamilyInet = "inet" + // IPv6 address + AttributeNetSockFamilyInet6 = "inet6" + // Unix domain socket path + AttributeNetSockFamilyUnix = "unix" +) + +const ( + // wifi + AttributeNetHostConnectionTypeWifi = "wifi" + // wired + AttributeNetHostConnectionTypeWired = "wired" + // cell + AttributeNetHostConnectionTypeCell = "cell" + // unavailable + AttributeNetHostConnectionTypeUnavailable = "unavailable" + // unknown + AttributeNetHostConnectionTypeUnknown = "unknown" +) + +const ( + // GPRS + AttributeNetHostConnectionSubtypeGprs = "gprs" + // EDGE + AttributeNetHostConnectionSubtypeEdge = "edge" + // UMTS + AttributeNetHostConnectionSubtypeUmts = "umts" + // CDMA + AttributeNetHostConnectionSubtypeCdma = "cdma" + // EVDO Rel. 0 + AttributeNetHostConnectionSubtypeEvdo0 = "evdo_0" + // EVDO Rev. A + AttributeNetHostConnectionSubtypeEvdoA = "evdo_a" + // CDMA2000 1XRTT + AttributeNetHostConnectionSubtypeCdma20001xrtt = "cdma2000_1xrtt" + // HSDPA + AttributeNetHostConnectionSubtypeHsdpa = "hsdpa" + // HSUPA + AttributeNetHostConnectionSubtypeHsupa = "hsupa" + // HSPA + AttributeNetHostConnectionSubtypeHspa = "hspa" + // IDEN + AttributeNetHostConnectionSubtypeIden = "iden" + // EVDO Rev. B + AttributeNetHostConnectionSubtypeEvdoB = "evdo_b" + // LTE + AttributeNetHostConnectionSubtypeLte = "lte" + // EHRPD + AttributeNetHostConnectionSubtypeEhrpd = "ehrpd" + // HSPAP + AttributeNetHostConnectionSubtypeHspap = "hspap" + // GSM + AttributeNetHostConnectionSubtypeGsm = "gsm" + // TD-SCDMA + AttributeNetHostConnectionSubtypeTdScdma = "td_scdma" + // IWLAN + AttributeNetHostConnectionSubtypeIwlan = "iwlan" + // 5G NR (New Radio) + AttributeNetHostConnectionSubtypeNr = "nr" + // 5G NRNSA (New Radio Non-Standalone) + AttributeNetHostConnectionSubtypeNrnsa = "nrnsa" + // LTE CA + AttributeNetHostConnectionSubtypeLteCa = "lte_ca" +) + +// Operations that access some remote service. +const ( + // The service.name of the remote service. SHOULD be equal to the actual + // service.name resource attribute of the remote service if any. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'AuthTokenCache' + AttributePeerService = "peer.service" +) + +// These attributes may be used for any operation with an authenticated and/or authorized enduser. +const ( + // Username or client_id extracted from the access token or Authorization header + // in the inbound request from outside the system. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'username' + AttributeEnduserID = "enduser.id" + // Actual/assumed role the client is making the request under extracted from token + // or application security context. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'admin' + AttributeEnduserRole = "enduser.role" + // Scopes or granted authorities the client currently possesses extracted from + // token or application security context. The value would come from the scope + // associated with an OAuth 2.0 Access Token or an attribute value in a SAML 2.0 + // Assertion. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'read:message, write:files' + AttributeEnduserScope = "enduser.scope" +) + +// These attributes may be used for any operation to store information about a thread that started a span. +const ( + // Current "managed" thread ID (as opposed to OS thread ID). + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 42 + AttributeThreadID = "thread.id" + // Current thread name. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'main' + AttributeThreadName = "thread.name" +) + +// These attributes allow to report this unit of code and therefore to provide more context about the span. +const ( + // The method or function name, or equivalent (usually rightmost part of the code + // unit's name). + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'serveRequest' + AttributeCodeFunction = "code.function" + // The "namespace" within which code.function is defined. Usually the + // qualified class or module name, such that code.namespace + some separator + + // code.function form a unique identifier for the code unit. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'com.example.MyHTTPService' + AttributeCodeNamespace = "code.namespace" + // The source code file name that identifies the code unit as uniquely as possible + // (preferably an absolute file path). + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + AttributeCodeFilepath = "code.filepath" + // The line number in code.filepath best representing the operation. It SHOULD + // point within the code unit named in code.function. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 42 + AttributeCodeLineNumber = "code.lineno" +) + +// This document defines semantic conventions for HTTP client and server Spans. +const ( + // HTTP request method. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + AttributeHTTPMethod = "http.method" + // HTTP response status code. + // + // Type: int + // Requirement Level: Conditionally Required - If and only if one was + // received/sent. + // Stability: stable + // Examples: 200 + AttributeHTTPStatusCode = "http.status_code" + // Kind of HTTP protocol used. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Note: If net.transport is not specified, it can be assumed to be IP.TCP except + // if http.flavor is QUIC, in which case IP.UDP is assumed. + AttributeHTTPFlavor = "http.flavor" + // Value of the HTTP User-Agent header sent by the client. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' + AttributeHTTPUserAgent = "http.user_agent" + // The size of the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as the + // Content-Length header. For requests using transport encoding, this should be + // the compressed size. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 3495 + AttributeHTTPRequestContentLength = "http.request_content_length" + // The size of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as the + // Content-Length header. For requests using transport encoding, this should be + // the compressed size. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 3495 + AttributeHTTPResponseContentLength = "http.response_content_length" +) + +const ( + // HTTP/1.0 + AttributeHTTPFlavorHTTP10 = "1.0" + // HTTP/1.1 + AttributeHTTPFlavorHTTP11 = "1.1" + // HTTP/2 + AttributeHTTPFlavorHTTP20 = "2.0" + // HTTP/3 + AttributeHTTPFlavorHTTP30 = "3.0" + // SPDY protocol + AttributeHTTPFlavorSPDY = "SPDY" + // QUIC protocol + AttributeHTTPFlavorQUIC = "QUIC" +) + +// Semantic Convention for HTTP Client +const ( + // Full HTTP request URL in the form scheme://host[:port]/path?query[#fragment]. + // Usually the fragment is not transmitted over HTTP, but if it is known, it + // should be included nevertheless. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' + // Note: http.url MUST NOT contain credentials passed via URL in form of + // https://username:password@www.example.com/. In such case the attribute's value + // should be https://www.example.com/. + AttributeHTTPURL = "http.url" + // The ordinal number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // Requirement Level: Recommended - if and only if request was retried. + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets resent + // by the client, regardless of what was the cause of the resending (e.g. + // redirection, authorization failure, 503 Server Unavailable, network issues, or + // any other). + AttributeHTTPResendCount = "http.resend_count" +) + +// Semantic Convention for HTTP Server +const ( + // The URI scheme identifying the used protocol. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: 'http', 'https' + AttributeHTTPScheme = "http.scheme" + // The full request target as passed in a HTTP request line or equivalent. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: '/path/12314/?q=ddds' + AttributeHTTPTarget = "http.target" + // The matched route (path template in the format used by the respective server + // framework). See note below + // + // Type: string + // Requirement Level: Conditionally Required - If and only if it's available + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: 'http.route' MUST NOT be populated when this is not supported by the HTTP + // server framework as the route attribute should have low-cardinality and the URI + // path can NOT substitute it. + AttributeHTTPRoute = "http.route" + // The IP address of the original client behind all proxies, if known (e.g. from + // X-Forwarded-For). + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '83.164.160.102' + // Note: This is not necessarily the same as net.sock.peer.addr, which would + // identify the network-level peer, which may be a proxy.This attribute should be + // set when a source of information different + // from the one used for net.sock.peer.addr, is available even if that other + // source just confirms the same value as net.sock.peer.addr. + // Rationale: For net.sock.peer.addr, one typically does not know if it + // comes from a proxy, reverse proxy, or the actual client. Setting + // http.client_ip when it's the same as net.sock.peer.addr means that + // one is at least somewhat confident that the address is not that of + // the closest proxy. + AttributeHTTPClientIP = "http.client_ip" +) + +// Attributes that exist for multiple DynamoDB request types. +const ( + // The keys in the RequestItems object field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: stable + // Examples: 'Users', 'Cats' + AttributeAWSDynamoDBTableNames = "aws.dynamodb.table_names" + // The JSON-serialized value of each item in the ConsumedCapacity response field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: stable + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : { + // "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, + // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": + // "string", "WriteCapacityUnits": number }' + AttributeAWSDynamoDBConsumedCapacity = "aws.dynamodb.consumed_capacity" + // The JSON-serialized value of the ItemCollectionMetrics response field. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, + // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : + // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": + // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }' + AttributeAWSDynamoDBItemCollectionMetrics = "aws.dynamodb.item_collection_metrics" + // The value of the ProvisionedThroughput.ReadCapacityUnits request parameter. + // + // Type: double + // Requirement Level: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AttributeAWSDynamoDBProvisionedReadCapacity = "aws.dynamodb.provisioned_read_capacity" + // The value of the ProvisionedThroughput.WriteCapacityUnits request parameter. + // + // Type: double + // Requirement Level: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AttributeAWSDynamoDBProvisionedWriteCapacity = "aws.dynamodb.provisioned_write_capacity" + // The value of the ConsistentRead request parameter. + // + // Type: boolean + // Requirement Level: Optional + // Stability: stable + AttributeAWSDynamoDBConsistentRead = "aws.dynamodb.consistent_read" + // The value of the ProjectionExpression request parameter. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems, + // ProductReviews' + AttributeAWSDynamoDBProjection = "aws.dynamodb.projection" + // The value of the Limit request parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 10 + AttributeAWSDynamoDBLimit = "aws.dynamodb.limit" + // The value of the AttributesToGet request parameter. + // + // Type: string[] + // Requirement Level: Optional + // Stability: stable + // Examples: 'lives', 'id' + AttributeAWSDynamoDBAttributesToGet = "aws.dynamodb.attributes_to_get" + // The value of the IndexName request parameter. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'name_to_group' + AttributeAWSDynamoDBIndexName = "aws.dynamodb.index_name" + // The value of the Select request parameter. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AttributeAWSDynamoDBSelect = "aws.dynamodb.select" +) + +// DynamoDB.CreateTable +const ( + // The JSON-serialized value of each item of the GlobalSecondaryIndexes request + // field + // + // Type: string[] + // Requirement Level: Optional + // Stability: stable + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits": + // number, "WriteCapacityUnits": number } }' + AttributeAWSDynamoDBGlobalSecondaryIndexes = "aws.dynamodb.global_secondary_indexes" + // The JSON-serialized value of each item of the LocalSecondaryIndexes request + // field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: stable + // Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes": + // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" } }' + AttributeAWSDynamoDBLocalSecondaryIndexes = "aws.dynamodb.local_secondary_indexes" +) + +// DynamoDB.ListTables +const ( + // The value of the ExclusiveStartTableName request parameter. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'Users', 'CatsTable' + AttributeAWSDynamoDBExclusiveStartTable = "aws.dynamodb.exclusive_start_table" + // The the number of items in the TableNames response parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 20 + AttributeAWSDynamoDBTableCount = "aws.dynamodb.table_count" +) + +// DynamoDB.Query +const ( + // The value of the ScanIndexForward request parameter. + // + // Type: boolean + // Requirement Level: Optional + // Stability: stable + AttributeAWSDynamoDBScanForward = "aws.dynamodb.scan_forward" +) + +// DynamoDB.Scan +const ( + // The value of the Segment request parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 10 + AttributeAWSDynamoDBSegment = "aws.dynamodb.segment" + // The value of the TotalSegments request parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 100 + AttributeAWSDynamoDBTotalSegments = "aws.dynamodb.total_segments" + // The value of the Count response parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 10 + AttributeAWSDynamoDBCount = "aws.dynamodb.count" + // The value of the ScannedCount response parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 50 + AttributeAWSDynamoDBScannedCount = "aws.dynamodb.scanned_count" +) + +// DynamoDB.UpdateTable +const ( + // The JSON-serialized value of each item in the AttributeDefinitions request + // field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: stable + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AttributeAWSDynamoDBAttributeDefinitions = "aws.dynamodb.attribute_definitions" + // The JSON-serialized value of each item in the the GlobalSecondaryIndexUpdates + // request field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: stable + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }' + AttributeAWSDynamoDBGlobalSecondaryIndexUpdates = "aws.dynamodb.global_secondary_index_updates" +) + +// This document defines semantic conventions to apply when instrumenting the GraphQL implementation. They map GraphQL operations to attributes on a Span. +const ( + // The name of the operation being executed. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'findBookByID' + AttributeGraphqlOperationName = "graphql.operation.name" + // The type of the operation being executed. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'query', 'mutation', 'subscription' + AttributeGraphqlOperationType = "graphql.operation.type" + // The GraphQL document being executed. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + AttributeGraphqlDocument = "graphql.document" +) + +const ( + // GraphQL query + AttributeGraphqlOperationTypeQuery = "query" + // GraphQL mutation + AttributeGraphqlOperationTypeMutation = "mutation" + // GraphQL subscription + AttributeGraphqlOperationTypeSubscription = "subscription" +) + +// This document defines the attributes used in messaging systems. +const ( + // A string identifying the messaging system. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' + AttributeMessagingSystem = "messaging.system" + // The message destination name. This might be equal to the span name but is + // required nevertheless. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + AttributeMessagingDestination = "messaging.destination" + // The kind of message destination + // + // Type: Enum + // Requirement Level: Conditionally Required - If the message destination is + // either a `queue` or `topic`. + // Stability: stable + AttributeMessagingDestinationKind = "messaging.destination_kind" + // A boolean that is true if the message destination is temporary. + // + // Type: boolean + // Requirement Level: Conditionally Required - If value is `true`. When missing, + // the value is assumed to be `false`. + // Stability: stable + AttributeMessagingTempDestination = "messaging.temp_destination" + // The name of the transport protocol. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'AMQP', 'MQTT' + AttributeMessagingProtocol = "messaging.protocol" + // The version of the transport protocol. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '0.9.1' + AttributeMessagingProtocolVersion = "messaging.protocol_version" + // Connection string. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'tibjmsnaming://localhost:7222', + // 'https://queue.amazonaws.com/80398EXAMPLE/MyQueue' + AttributeMessagingURL = "messaging.url" + // A value used by the messaging system as an identifier for the message, + // represented as a string. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + AttributeMessagingMessageID = "messaging.message_id" + // The conversation ID identifying the conversation to which the message belongs, + // represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'MyConversationID' + AttributeMessagingConversationID = "messaging.conversation_id" + // The (uncompressed) size of the message payload in bytes. Also use this + // attribute if it is unknown whether the compressed or uncompressed payload size + // is reported. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 2738 + AttributeMessagingMessagePayloadSizeBytes = "messaging.message_payload_size_bytes" + // The compressed size of the message payload in bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 2048 + AttributeMessagingMessagePayloadCompressedSizeBytes = "messaging.message_payload_compressed_size_bytes" +) + +const ( + // A message sent to a queue + AttributeMessagingDestinationKindQueue = "queue" + // A message sent to a topic + AttributeMessagingDestinationKindTopic = "topic" +) + +// Semantic convention for a consumer of messages received from a messaging system +const ( + // A string identifying the kind of message consumption as defined in the + // Operation names section above. If the operation is "send", this + // attribute MUST NOT be set, since the operation can be inferred from the span + // kind in that case. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + AttributeMessagingOperation = "messaging.operation" + // The identifier for the consumer receiving a message. For Kafka, set it to + // {messaging.kafka.consumer_group} - {messaging.kafka.client_id}, if both are + // present, or only messaging.kafka.consumer_group. For brokers, such as RabbitMQ + // and Artemis, set it to the client_id of the client consuming the message. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'mygroup - client-6' + AttributeMessagingConsumerID = "messaging.consumer_id" +) + +const ( + // receive + AttributeMessagingOperationReceive = "receive" + // process + AttributeMessagingOperationProcess = "process" +) + +// Attributes for RabbitMQ +const ( + // RabbitMQ message routing key. + // + // Type: string + // Requirement Level: Conditionally Required - If not empty. + // Stability: stable + // Examples: 'myKey' + AttributeMessagingRabbitmqRoutingKey = "messaging.rabbitmq.routing_key" +) + +// Attributes for Apache Kafka +const ( + // Message keys in Kafka are used for grouping alike messages to ensure they're + // processed on the same partition. They differ from messaging.message_id in that + // they're not unique. If the key is null, the attribute MUST NOT be set. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to be + // supplied for the attribute. If the key has no unambiguous, canonical string + // form, don't include its value. + AttributeMessagingKafkaMessageKey = "messaging.kafka.message_key" + // Name of the Kafka Consumer Group that is handling the message. Only applies to + // consumers, not producers. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'my-group' + AttributeMessagingKafkaConsumerGroup = "messaging.kafka.consumer_group" + // Client ID for the Consumer or Producer that is handling the message. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'client-5' + AttributeMessagingKafkaClientID = "messaging.kafka.client_id" + // Partition the message is sent to. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 2 + AttributeMessagingKafkaPartition = "messaging.kafka.partition" + // The offset of a record in the corresponding Kafka partition. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 42 + AttributeMessagingKafkaMessageOffset = "messaging.kafka.message.offset" + // A boolean that is true if the message is a tombstone. + // + // Type: boolean + // Requirement Level: Conditionally Required - If value is `true`. When missing, + // the value is assumed to be `false`. + // Stability: stable + AttributeMessagingKafkaTombstone = "messaging.kafka.tombstone" +) + +// Attributes for Apache RocketMQ +const ( + // Namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: 'myNamespace' + AttributeMessagingRocketmqNamespace = "messaging.rocketmq.namespace" + // Name of the RocketMQ producer/consumer group that is handling the message. The + // client type is identified by the SpanKind. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: 'myConsumerGroup' + AttributeMessagingRocketmqClientGroup = "messaging.rocketmq.client_group" + // The unique identifier for each client. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: 'myhost@8742@s8083jm' + AttributeMessagingRocketmqClientID = "messaging.rocketmq.client_id" + // The timestamp in milliseconds that the delay message is expected to be + // delivered to consumer. + // + // Type: int + // Requirement Level: Conditionally Required - If the message type is delay and + // delay time level is not specified. + // Stability: stable + // Examples: 1665987217045 + AttributeMessagingRocketmqDeliveryTimestamp = "messaging.rocketmq.delivery_timestamp" + // The delay time level for delay message, which determines the message delay + // time. + // + // Type: int + // Requirement Level: Conditionally Required - If the message type is delay and + // delivery timestamp is not specified. + // Stability: stable + // Examples: 3 + AttributeMessagingRocketmqDelayTimeLevel = "messaging.rocketmq.delay_time_level" + // It is essential for FIFO message. Messages that belong to the same message + // group are always processed one by one within the same consumer group. + // + // Type: string + // Requirement Level: Conditionally Required - If the message type is FIFO. + // Stability: stable + // Examples: 'myMessageGroup' + AttributeMessagingRocketmqMessageGroup = "messaging.rocketmq.message_group" + // Type of message. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + AttributeMessagingRocketmqMessageType = "messaging.rocketmq.message_type" + // The secondary classifier of message besides topic. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'tagA' + AttributeMessagingRocketmqMessageTag = "messaging.rocketmq.message_tag" + // Key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // Requirement Level: Optional + // Stability: stable + // Examples: 'keyA', 'keyB' + AttributeMessagingRocketmqMessageKeys = "messaging.rocketmq.message_keys" + // Model of message consumption. This only applies to consumer spans. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + AttributeMessagingRocketmqConsumptionModel = "messaging.rocketmq.consumption_model" +) + +const ( + // Normal message + AttributeMessagingRocketmqMessageTypeNormal = "normal" + // FIFO message + AttributeMessagingRocketmqMessageTypeFifo = "fifo" + // Delay message + AttributeMessagingRocketmqMessageTypeDelay = "delay" + // Transaction message + AttributeMessagingRocketmqMessageTypeTransaction = "transaction" +) + +const ( + // Clustering consumption model + AttributeMessagingRocketmqConsumptionModelClustering = "clustering" + // Broadcasting consumption model + AttributeMessagingRocketmqConsumptionModelBroadcasting = "broadcasting" +) + +// This document defines semantic conventions for remote procedure calls. +const ( + // A string identifying the remoting system. See below for a list of well-known + // identifiers. + // + // Type: Enum + // Requirement Level: Required + // Stability: stable + AttributeRPCSystem = "rpc.system" + // The full (logical) name of the service being called, including its package + // name, if applicable. + // + // Type: string + // Requirement Level: Recommended + // Stability: stable + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing class. + // The code.namespace attribute may be used to store the latter (despite the + // attribute name, it may include a class name; e.g., class with method actually + // executing the call on the server side, RPC client stub class on the client + // side). + AttributeRPCService = "rpc.service" + // The name of the (logical) method being called, must be equal to the $method + // part in the span name. + // + // Type: string + // Requirement Level: Recommended + // Stability: stable + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The code.function attribute may be used to store the latter + // (e.g., method actually executing the call on the server side, RPC client stub + // method on the client side). + AttributeRPCMethod = "rpc.method" +) + +const ( + // gRPC + AttributeRPCSystemGRPC = "grpc" + // Java RMI + AttributeRPCSystemJavaRmi = "java_rmi" + // .NET WCF + AttributeRPCSystemDotnetWcf = "dotnet_wcf" + // Apache Dubbo + AttributeRPCSystemApacheDubbo = "apache_dubbo" +) + +// Tech-specific attributes for gRPC. +const ( + // The numeric status code of the gRPC request. + // + // Type: Enum + // Requirement Level: Required + // Stability: stable + AttributeRPCGRPCStatusCode = "rpc.grpc.status_code" +) + +const ( + // OK + AttributeRPCGRPCStatusCodeOk = "0" + // CANCELLED + AttributeRPCGRPCStatusCodeCancelled = "1" + // UNKNOWN + AttributeRPCGRPCStatusCodeUnknown = "2" + // INVALID_ARGUMENT + AttributeRPCGRPCStatusCodeInvalidArgument = "3" + // DEADLINE_EXCEEDED + AttributeRPCGRPCStatusCodeDeadlineExceeded = "4" + // NOT_FOUND + AttributeRPCGRPCStatusCodeNotFound = "5" + // ALREADY_EXISTS + AttributeRPCGRPCStatusCodeAlreadyExists = "6" + // PERMISSION_DENIED + AttributeRPCGRPCStatusCodePermissionDenied = "7" + // RESOURCE_EXHAUSTED + AttributeRPCGRPCStatusCodeResourceExhausted = "8" + // FAILED_PRECONDITION + AttributeRPCGRPCStatusCodeFailedPrecondition = "9" + // ABORTED + AttributeRPCGRPCStatusCodeAborted = "10" + // OUT_OF_RANGE + AttributeRPCGRPCStatusCodeOutOfRange = "11" + // UNIMPLEMENTED + AttributeRPCGRPCStatusCodeUnimplemented = "12" + // INTERNAL + AttributeRPCGRPCStatusCodeInternal = "13" + // UNAVAILABLE + AttributeRPCGRPCStatusCodeUnavailable = "14" + // DATA_LOSS + AttributeRPCGRPCStatusCodeDataLoss = "15" + // UNAUTHENTICATED + AttributeRPCGRPCStatusCodeUnauthenticated = "16" +) + +// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). +const ( + // Protocol version as in jsonrpc property of request/response. Since JSON-RPC 1.0 + // does not specify this, the value can be omitted. + // + // Type: string + // Requirement Level: Conditionally Required - If other than the default version + // (`1.0`) + // Stability: stable + // Examples: '2.0', '1.0' + AttributeRPCJsonrpcVersion = "rpc.jsonrpc.version" + // id property of request or response. Since protocol allows id to be int, string, + // null or missing (for notifications), value is expected to be cast to string for + // simplicity. Use empty string in case of null value. Omit entirely if this is a + // notification. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '10', 'request-7', '' + AttributeRPCJsonrpcRequestID = "rpc.jsonrpc.request_id" + // error.code property of response if it is an error response. + // + // Type: int + // Requirement Level: Conditionally Required - If response is not successful. + // Stability: stable + // Examples: -32700, 100 + AttributeRPCJsonrpcErrorCode = "rpc.jsonrpc.error_code" + // error.message property of response if it is an error response. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'Parse error', 'User already exists' + AttributeRPCJsonrpcErrorMessage = "rpc.jsonrpc.error_message" +) + +func GetTraceSemanticConventionAttributeNames() []string { + return []string{ + AttributeExceptionType, + AttributeExceptionMessage, + AttributeExceptionStacktrace, + AttributeEventName, + AttributeEventDomain, + AttributeAWSLambdaInvokedARN, + AttributeCloudeventsEventID, + AttributeCloudeventsEventSource, + AttributeCloudeventsEventSpecVersion, + AttributeCloudeventsEventType, + AttributeCloudeventsEventSubject, + AttributeOpentracingRefType, + AttributeDBSystem, + AttributeDBConnectionString, + AttributeDBUser, + AttributeDBJDBCDriverClassname, + AttributeDBName, + AttributeDBStatement, + AttributeDBOperation, + AttributeDBMSSQLInstanceName, + AttributeDBCassandraPageSize, + AttributeDBCassandraConsistencyLevel, + AttributeDBCassandraTable, + AttributeDBCassandraIdempotence, + AttributeDBCassandraSpeculativeExecutionCount, + AttributeDBCassandraCoordinatorID, + AttributeDBCassandraCoordinatorDC, + AttributeDBRedisDBIndex, + AttributeDBMongoDBCollection, + AttributeDBSQLTable, + AttributeOtelStatusCode, + AttributeOtelStatusDescription, + AttributeFaaSTrigger, + AttributeFaaSExecution, + AttributeFaaSDocumentCollection, + AttributeFaaSDocumentOperation, + AttributeFaaSDocumentTime, + AttributeFaaSDocumentName, + AttributeFaaSTime, + AttributeFaaSCron, + AttributeFaaSColdstart, + AttributeFaaSInvokedName, + AttributeFaaSInvokedProvider, + AttributeFaaSInvokedRegion, + AttributeNetTransport, + AttributeNetAppProtocolName, + AttributeNetAppProtocolVersion, + AttributeNetSockPeerName, + AttributeNetSockPeerAddr, + AttributeNetSockPeerPort, + AttributeNetSockFamily, + AttributeNetPeerName, + AttributeNetPeerPort, + AttributeNetHostName, + AttributeNetHostPort, + AttributeNetSockHostAddr, + AttributeNetSockHostPort, + AttributeNetHostConnectionType, + AttributeNetHostConnectionSubtype, + AttributeNetHostCarrierName, + AttributeNetHostCarrierMcc, + AttributeNetHostCarrierMnc, + AttributeNetHostCarrierIcc, + AttributePeerService, + AttributeEnduserID, + AttributeEnduserRole, + AttributeEnduserScope, + AttributeThreadID, + AttributeThreadName, + AttributeCodeFunction, + AttributeCodeNamespace, + AttributeCodeFilepath, + AttributeCodeLineNumber, + AttributeHTTPMethod, + AttributeHTTPStatusCode, + AttributeHTTPFlavor, + AttributeHTTPUserAgent, + AttributeHTTPRequestContentLength, + AttributeHTTPResponseContentLength, + AttributeHTTPURL, + AttributeHTTPResendCount, + AttributeHTTPScheme, + AttributeHTTPTarget, + AttributeHTTPRoute, + AttributeHTTPClientIP, + AttributeAWSDynamoDBTableNames, + AttributeAWSDynamoDBConsumedCapacity, + AttributeAWSDynamoDBItemCollectionMetrics, + AttributeAWSDynamoDBProvisionedReadCapacity, + AttributeAWSDynamoDBProvisionedWriteCapacity, + AttributeAWSDynamoDBConsistentRead, + AttributeAWSDynamoDBProjection, + AttributeAWSDynamoDBLimit, + AttributeAWSDynamoDBAttributesToGet, + AttributeAWSDynamoDBIndexName, + AttributeAWSDynamoDBSelect, + AttributeAWSDynamoDBGlobalSecondaryIndexes, + AttributeAWSDynamoDBLocalSecondaryIndexes, + AttributeAWSDynamoDBExclusiveStartTable, + AttributeAWSDynamoDBTableCount, + AttributeAWSDynamoDBScanForward, + AttributeAWSDynamoDBSegment, + AttributeAWSDynamoDBTotalSegments, + AttributeAWSDynamoDBCount, + AttributeAWSDynamoDBScannedCount, + AttributeAWSDynamoDBAttributeDefinitions, + AttributeAWSDynamoDBGlobalSecondaryIndexUpdates, + AttributeGraphqlOperationName, + AttributeGraphqlOperationType, + AttributeGraphqlDocument, + AttributeMessagingSystem, + AttributeMessagingDestination, + AttributeMessagingDestinationKind, + AttributeMessagingTempDestination, + AttributeMessagingProtocol, + AttributeMessagingProtocolVersion, + AttributeMessagingURL, + AttributeMessagingMessageID, + AttributeMessagingConversationID, + AttributeMessagingMessagePayloadSizeBytes, + AttributeMessagingMessagePayloadCompressedSizeBytes, + AttributeMessagingOperation, + AttributeMessagingConsumerID, + AttributeMessagingRabbitmqRoutingKey, + AttributeMessagingKafkaMessageKey, + AttributeMessagingKafkaConsumerGroup, + AttributeMessagingKafkaClientID, + AttributeMessagingKafkaPartition, + AttributeMessagingKafkaMessageOffset, + AttributeMessagingKafkaTombstone, + AttributeMessagingRocketmqNamespace, + AttributeMessagingRocketmqClientGroup, + AttributeMessagingRocketmqClientID, + AttributeMessagingRocketmqDeliveryTimestamp, + AttributeMessagingRocketmqDelayTimeLevel, + AttributeMessagingRocketmqMessageGroup, + AttributeMessagingRocketmqMessageType, + AttributeMessagingRocketmqMessageTag, + AttributeMessagingRocketmqMessageKeys, + AttributeMessagingRocketmqConsumptionModel, + AttributeRPCSystem, + AttributeRPCService, + AttributeRPCMethod, + AttributeRPCGRPCStatusCode, + AttributeRPCJsonrpcVersion, + AttributeRPCJsonrpcRequestID, + AttributeRPCJsonrpcErrorCode, + AttributeRPCJsonrpcErrorMessage, + } +} diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.16.0/nonstandard.go b/vendor/go.opentelemetry.io/collector/semconv/v1.16.0/nonstandard.go new file mode 100644 index 00000000000..9346ba9dcd5 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.16.0/nonstandard.go @@ -0,0 +1,11 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/collector/semconv/v1.16.0" + +const ( + OtelLibraryName = "otel.library.name" + OtelLibraryVersion = "otel.library.version" + OtelStatusCode = "otel.status_code" + OtelStatusDescription = "otel.status_description" +) diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.16.0/schema.go b/vendor/go.opentelemetry.io/collector/semconv/v1.16.0/schema.go new file mode 100644 index 00000000000..bb438059e67 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.16.0/schema.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/collector/semconv/v1.16.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Conventions packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.16.0" diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.18.0/generated_resource.go b/vendor/go.opentelemetry.io/collector/semconv/v1.18.0/generated_resource.go index f2a04b8666e..049f689b978 100644 --- a/vendor/go.opentelemetry.io/collector/semconv/v1.18.0/generated_resource.go +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.18.0/generated_resource.go @@ -423,12 +423,12 @@ const ( // Take care not to use the "invoked ARN" directly but replace any // alias suffix // with the resolved function version, as the same runtime instance may be - // invokable with + // invocable with // multiple different aliases.
  • //
  • GCP: The URI of the resource
  • //
  • Azure: The Fully Qualified Resource ID of the invoked function, // not the function app, having the form - // /subscriptions//resourceGroups//providers/Microsoft.Web/s + // /subscriptions//resourceGroups//providers/Microsoft.Web/s // ites//functions/. // This means that a span attribute MUST be used, as an Azure function app can // host multiple functions that would usually share diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/doc.go b/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/doc.go new file mode 100644 index 00000000000..ad9e94a49f8 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/doc.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the v1.25.0 +// version of the OpenTelemetry semantic conventions. +package semconv // import "go.opentelemetry.io/collector/semconv/v1.25.0" diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/generated_attribute_group.go b/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/generated_attribute_group.go new file mode 100644 index 00000000000..5bd8f5b8fac --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/generated_attribute_group.go @@ -0,0 +1,4796 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv + +// Attributes for Events represented using Log Records. +const ( + // Identifies the class / type of event. + // + // Type: string + // Requirement Level: Required + // Stability: experimental + // Examples: 'browser.mouse.click', 'device.app.lifecycle' + // Note: Event names are subject to the same rules as attribute names. Notably, + // event names are namespaced to avoid collisions and provide a clean separation + // of semantics for events in separate domains like browser, mobile, and + // kubernetes. + AttributeEventName = "event.name" +) + +// The attributes described in this section are rather generic. They may be +// used in any Log Record they apply to. +const ( + // A unique identifier for the Log Record. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' + // Note: If an id is provided, other log records with the same id will be + // considered duplicates and can be removed safely. This means, that two + // distinguishable log records MUST have different values. + // The id MAY be an Universally Unique Lexicographically Sortable Identifier + // (ULID), but other identifiers (e.g. UUID) may be used as needed. + AttributeLogRecordUID = "log.record.uid" +) + +// Describes Log attributes +const ( + // The stream associated with the log. See below for a list of well-known values. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeLogIostream = "log.iostream" +) + +const ( + // Logs from stdout stream + AttributeLogIostreamStdout = "stdout" + // Events from stderr stream + AttributeLogIostreamStderr = "stderr" +) + +// A file to which log was emitted. +const ( + // The basename of the file. + // + // Type: string + // Requirement Level: Recommended + // Stability: experimental + // Examples: 'audit.log' + AttributeLogFileName = "log.file.name" + // The basename of the file, with symlinks resolved. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'uuid.log' + AttributeLogFileNameResolved = "log.file.name_resolved" + // The full path to the file. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/var/log/mysql/audit.log' + AttributeLogFilePath = "log.file.path" + // The full path to the file, with symlinks resolved. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/var/lib/docker/uuid.log' + AttributeLogFilePathResolved = "log.file.path_resolved" +) + +// Describes Database attributes +const ( + // The name of the connection pool; unique within the instrumented application. In + // case the connection pool implementation doesn't provide a name, instrumentation + // should use a combination of server.address and server.port attributes formatted + // as server.address:server.port. + // + // Type: string + // Requirement Level: Required + // Stability: experimental + // Examples: 'myDataSource' + AttributePoolName = "pool.name" + // The state of a connection in the pool + // + // Type: Enum + // Requirement Level: Required + // Stability: experimental + // Examples: 'idle' + AttributeState = "state" +) + +const ( + // idle + AttributeStateIdle = "idle" + // used + AttributeStateUsed = "used" +) + +// ASP.NET Core attributes +const ( + // Rate-limiting result, shows whether the lease was acquired or contains a + // rejection reason + // + // Type: Enum + // Requirement Level: Required + // Stability: stable + // Examples: 'acquired', 'request_canceled' + AttributeAspnetcoreRateLimitingResult = "aspnetcore.rate_limiting.result" + // Full type name of the IExceptionHandler implementation that handled the + // exception. + // + // Type: string + // Requirement Level: Conditionally Required - if and only if the exception was + // handled by this handler. + // Stability: stable + // Examples: 'Contoso.MyHandler' + AttributeAspnetcoreDiagnosticsHandlerType = "aspnetcore.diagnostics.handler.type" + // Rate limiting policy name. + // + // Type: string + // Requirement Level: Conditionally Required - if the matched endpoint for the + // request had a rate-limiting policy. + // Stability: stable + // Examples: 'fixed', 'sliding', 'token' + AttributeAspnetcoreRateLimitingPolicy = "aspnetcore.rate_limiting.policy" + // Flag indicating if request was handled by the application pipeline. + // + // Type: boolean + // Requirement Level: Conditionally Required - if and only if the request was not + // handled. + // Stability: stable + // Examples: True + AttributeAspnetcoreRequestIsUnhandled = "aspnetcore.request.is_unhandled" + // A value that indicates whether the matched route is a fallback route. + // + // Type: boolean + // Requirement Level: Conditionally Required - If and only if a route was + // successfully matched. + // Stability: stable + // Examples: True + AttributeAspnetcoreRoutingIsFallback = "aspnetcore.routing.is_fallback" +) + +const ( + // Lease was acquired + AttributeAspnetcoreRateLimitingResultAcquired = "acquired" + // Lease request was rejected by the endpoint limiter + AttributeAspnetcoreRateLimitingResultEndpointLimiter = "endpoint_limiter" + // Lease request was rejected by the global limiter + AttributeAspnetcoreRateLimitingResultGlobalLimiter = "global_limiter" + // Lease request was canceled + AttributeAspnetcoreRateLimitingResultRequestCanceled = "request_canceled" +) + +// SignalR attributes +const ( + // SignalR HTTP connection closure status. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'app_shutdown', 'timeout' + AttributeSignalrConnectionStatus = "signalr.connection.status" + // SignalR transport type + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'web_sockets', 'long_polling' + AttributeSignalrTransport = "signalr.transport" +) + +const ( + // The connection was closed normally + AttributeSignalrConnectionStatusNormalClosure = "normal_closure" + // The connection was closed due to a timeout + AttributeSignalrConnectionStatusTimeout = "timeout" + // The connection was closed because the app is shutting down + AttributeSignalrConnectionStatusAppShutdown = "app_shutdown" +) + +const ( + // ServerSentEvents protocol + AttributeSignalrTransportServerSentEvents = "server_sent_events" + // LongPolling protocol + AttributeSignalrTransportLongPolling = "long_polling" + // WebSockets protocol + AttributeSignalrTransportWebSockets = "web_sockets" +) + +// Describes JVM buffer metric attributes. +const ( + // Name of the buffer pool. + // + // Type: string + // Requirement Level: Recommended + // Stability: experimental + // Examples: 'mapped', 'direct' + // Note: Pool names are generally obtained via BufferPoolMXBean#getName(). + AttributeJvmBufferPoolName = "jvm.buffer.pool.name" +) + +// Describes JVM memory metric attributes. +const ( + // Name of the memory pool. + // + // Type: string + // Requirement Level: Recommended + // Stability: stable + // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space' + // Note: Pool names are generally obtained via MemoryPoolMXBean#getName(). + AttributeJvmMemoryPoolName = "jvm.memory.pool.name" + // The type of memory. + // + // Type: Enum + // Requirement Level: Recommended + // Stability: stable + // Examples: 'heap', 'non_heap' + AttributeJvmMemoryType = "jvm.memory.type" +) + +const ( + // Heap memory + AttributeJvmMemoryTypeHeap = "heap" + // Non-heap memory + AttributeJvmMemoryTypeNonHeap = "non_heap" +) + +// Attributes for process CPU metrics. +const ( + // The CPU state for this data point. A process SHOULD be characterized either by + // data points with no state labels, or only data points with state labels. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeProcessCPUState = "process.cpu.state" +) + +const ( + // system + AttributeProcessCPUStateSystem = "system" + // user + AttributeProcessCPUStateUser = "user" + // wait + AttributeProcessCPUStateWait = "wait" +) + +// Describes System metric attributes +const ( + // The device identifier + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '(identifier)' + AttributeSystemDevice = "system.device" +) + +// Describes System CPU metric attributes +const ( + // The logical CPU number [0..n-1] + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1 + AttributeSystemCPULogicalNumber = "system.cpu.logical_number" + // The CPU state for this data point. A system's CPU SHOULD be characterized + // either by data points with no state labels, or only data points with state + // labels. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'idle', 'interrupt' + AttributeSystemCPUState = "system.cpu.state" +) + +const ( + // user + AttributeSystemCPUStateUser = "user" + // system + AttributeSystemCPUStateSystem = "system" + // nice + AttributeSystemCPUStateNice = "nice" + // idle + AttributeSystemCPUStateIdle = "idle" + // iowait + AttributeSystemCPUStateIowait = "iowait" + // interrupt + AttributeSystemCPUStateInterrupt = "interrupt" + // steal + AttributeSystemCPUStateSteal = "steal" +) + +// Describes System Memory metric attributes +const ( + // The memory state + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'free', 'cached' + AttributeSystemMemoryState = "system.memory.state" +) + +const ( + // used + AttributeSystemMemoryStateUsed = "used" + // free + AttributeSystemMemoryStateFree = "free" + // shared + AttributeSystemMemoryStateShared = "shared" + // buffers + AttributeSystemMemoryStateBuffers = "buffers" + // cached + AttributeSystemMemoryStateCached = "cached" +) + +// Describes System Memory Paging metric attributes +const ( + // The paging access direction + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'in' + AttributeSystemPagingDirection = "system.paging.direction" + // The memory paging state + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'free' + AttributeSystemPagingState = "system.paging.state" + // The memory paging type + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'minor' + AttributeSystemPagingType = "system.paging.type" +) + +const ( + // in + AttributeSystemPagingDirectionIn = "in" + // out + AttributeSystemPagingDirectionOut = "out" +) + +const ( + // used + AttributeSystemPagingStateUsed = "used" + // free + AttributeSystemPagingStateFree = "free" +) + +const ( + // major + AttributeSystemPagingTypeMajor = "major" + // minor + AttributeSystemPagingTypeMinor = "minor" +) + +// Describes Filesystem metric attributes +const ( + // The filesystem mode + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'rw, ro' + AttributeSystemFilesystemMode = "system.filesystem.mode" + // The filesystem mount path + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/mnt/data' + AttributeSystemFilesystemMountpoint = "system.filesystem.mountpoint" + // The filesystem state + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'used' + AttributeSystemFilesystemState = "system.filesystem.state" + // The filesystem type + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'ext4' + AttributeSystemFilesystemType = "system.filesystem.type" +) + +const ( + // used + AttributeSystemFilesystemStateUsed = "used" + // free + AttributeSystemFilesystemStateFree = "free" + // reserved + AttributeSystemFilesystemStateReserved = "reserved" +) + +const ( + // fat32 + AttributeSystemFilesystemTypeFat32 = "fat32" + // exfat + AttributeSystemFilesystemTypeExfat = "exfat" + // ntfs + AttributeSystemFilesystemTypeNtfs = "ntfs" + // refs + AttributeSystemFilesystemTypeRefs = "refs" + // hfsplus + AttributeSystemFilesystemTypeHfsplus = "hfsplus" + // ext4 + AttributeSystemFilesystemTypeExt4 = "ext4" +) + +// Describes Network metric attributes +const ( + // A stateless protocol MUST NOT set this attribute + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'close_wait' + AttributeSystemNetworkState = "system.network.state" +) + +const ( + // close + AttributeSystemNetworkStateClose = "close" + // close_wait + AttributeSystemNetworkStateCloseWait = "close_wait" + // closing + AttributeSystemNetworkStateClosing = "closing" + // delete + AttributeSystemNetworkStateDelete = "delete" + // established + AttributeSystemNetworkStateEstablished = "established" + // fin_wait_1 + AttributeSystemNetworkStateFinWait1 = "fin_wait_1" + // fin_wait_2 + AttributeSystemNetworkStateFinWait2 = "fin_wait_2" + // last_ack + AttributeSystemNetworkStateLastAck = "last_ack" + // listen + AttributeSystemNetworkStateListen = "listen" + // syn_recv + AttributeSystemNetworkStateSynRecv = "syn_recv" + // syn_sent + AttributeSystemNetworkStateSynSent = "syn_sent" + // time_wait + AttributeSystemNetworkStateTimeWait = "time_wait" +) + +// Describes System Process metric attributes +const ( + // The process state, e.g., Linux Process State Codes + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'running' + AttributeSystemProcessStatus = "system.process.status" +) + +const ( + // running + AttributeSystemProcessStatusRunning = "running" + // sleeping + AttributeSystemProcessStatusSleeping = "sleeping" + // stopped + AttributeSystemProcessStatusStopped = "stopped" + // defunct + AttributeSystemProcessStatusDefunct = "defunct" +) + +// The Android platform on which the Android application is running. +const ( + // Uniquely identifies the framework API revision offered by a version + // (os.version) of the android operating system. More information can be found + // here. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '33', '32' + AttributeAndroidOSAPILevel = "android.os.api_level" +) + +// Attributes for AWS DynamoDB. +const ( + // The JSON-serialized value of each item in the AttributeDefinitions request + // field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AttributeAWSDynamoDBAttributeDefinitions = "aws.dynamodb.attribute_definitions" + // The value of the AttributesToGet request parameter. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'lives', 'id' + AttributeAWSDynamoDBAttributesToGet = "aws.dynamodb.attributes_to_get" + // The value of the ConsistentRead request parameter. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeAWSDynamoDBConsistentRead = "aws.dynamodb.consistent_read" + // The JSON-serialized value of each item in the ConsumedCapacity response field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : { + // "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, + // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": + // "string", "WriteCapacityUnits": number }' + AttributeAWSDynamoDBConsumedCapacity = "aws.dynamodb.consumed_capacity" + // The value of the Count response parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 10 + AttributeAWSDynamoDBCount = "aws.dynamodb.count" + // The value of the ExclusiveStartTableName request parameter. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Users', 'CatsTable' + AttributeAWSDynamoDBExclusiveStartTable = "aws.dynamodb.exclusive_start_table" + // The JSON-serialized value of each item in the GlobalSecondaryIndexUpdates + // request field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }' + AttributeAWSDynamoDBGlobalSecondaryIndexUpdates = "aws.dynamodb.global_secondary_index_updates" + // The JSON-serialized value of each item of the GlobalSecondaryIndexes request + // field + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits": + // number, "WriteCapacityUnits": number } }' + AttributeAWSDynamoDBGlobalSecondaryIndexes = "aws.dynamodb.global_secondary_indexes" + // The value of the IndexName request parameter. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'name_to_group' + AttributeAWSDynamoDBIndexName = "aws.dynamodb.index_name" + // The JSON-serialized value of the ItemCollectionMetrics response field. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, + // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : + // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": + // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }' + AttributeAWSDynamoDBItemCollectionMetrics = "aws.dynamodb.item_collection_metrics" + // The value of the Limit request parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 10 + AttributeAWSDynamoDBLimit = "aws.dynamodb.limit" + // The JSON-serialized value of each item of the LocalSecondaryIndexes request + // field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes": + // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" } }' + AttributeAWSDynamoDBLocalSecondaryIndexes = "aws.dynamodb.local_secondary_indexes" + // The value of the ProjectionExpression request parameter. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems, + // ProductReviews' + AttributeAWSDynamoDBProjection = "aws.dynamodb.projection" + // The value of the ProvisionedThroughput.ReadCapacityUnits request parameter. + // + // Type: double + // Requirement Level: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AttributeAWSDynamoDBProvisionedReadCapacity = "aws.dynamodb.provisioned_read_capacity" + // The value of the ProvisionedThroughput.WriteCapacityUnits request parameter. + // + // Type: double + // Requirement Level: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AttributeAWSDynamoDBProvisionedWriteCapacity = "aws.dynamodb.provisioned_write_capacity" + // The value of the ScanIndexForward request parameter. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeAWSDynamoDBScanForward = "aws.dynamodb.scan_forward" + // The value of the ScannedCount response parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 50 + AttributeAWSDynamoDBScannedCount = "aws.dynamodb.scanned_count" + // The value of the Segment request parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 10 + AttributeAWSDynamoDBSegment = "aws.dynamodb.segment" + // The value of the Select request parameter. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AttributeAWSDynamoDBSelect = "aws.dynamodb.select" + // The number of items in the TableNames response parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 20 + AttributeAWSDynamoDBTableCount = "aws.dynamodb.table_count" + // The keys in the RequestItems object field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Users', 'Cats' + AttributeAWSDynamoDBTableNames = "aws.dynamodb.table_names" + // The value of the TotalSegments request parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 100 + AttributeAWSDynamoDBTotalSegments = "aws.dynamodb.total_segments" +) + +// The web browser attributes +const ( + // Array of brand name and version separated by a space + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the UA client hints API + // (navigator.userAgentData.brands). + AttributeBrowserBrands = "browser.brands" + // Preferred language of the user using the browser + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // navigator.language. + AttributeBrowserLanguage = "browser.language" + // A boolean that is true if the browser is running on a mobile device + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + // Note: This value is intended to be taken from the UA client hints API + // (navigator.userAgentData.mobile). If unavailable, this attribute SHOULD be left + // unset. + AttributeBrowserMobile = "browser.mobile" + // The platform on which the browser is running + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the UA client hints API + // (navigator.userAgentData.platform). If unavailable, the legacy + // navigator.platform API SHOULD NOT be used instead and this attribute SHOULD be + // left unset in order for the values to be consistent. + // The list of possible values is defined in the W3C User-Agent Client Hints + // specification. Note that some (but not all) of these values can overlap with + // values in the os.type and os.name attributes. However, for consistency, the + // values in the browser.platform attribute should capture the exact value that + // the user agent provides. + AttributeBrowserPlatform = "browser.platform" +) + +// These attributes may be used to describe the client in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // Client address - domain name if available without reverse DNS lookup; + // otherwise, IP address or Unix domain socket name. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the server side, and when communicating through an + // intermediary, client.address SHOULD represent the client address behind any + // intermediaries, for example proxies, if it's available. + AttributeClientAddress = "client.address" + // Client port number. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 65123 + // Note: When observed from the server side, and when communicating through an + // intermediary, client.port SHOULD represent the client port behind any + // intermediaries, for example proxies, if it's available. + AttributeClientPort = "client.port" +) + +// A cloud environment (e.g. GCP, Azure, AWS). +const ( + // The cloud account ID the resource is assigned to. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '111111111111', 'opentelemetry' + AttributeCloudAccountID = "cloud.account.id" + // Cloud regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the resource + // is running. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and + // Google Cloud. + AttributeCloudAvailabilityZone = "cloud.availability_zone" + // The cloud platform in use. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: The prefix of the service SHOULD match the one specified in + // cloud.provider. + AttributeCloudPlatform = "cloud.platform" + // Name of the cloud provider. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeCloudProvider = "cloud.provider" + // The geographical region the resource is running. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for example + // Alibaba Cloud regions, AWS regions, Azure regions, Google Cloud regions, or + // Tencent Cloud regions. + AttributeCloudRegion = "cloud.region" + // Cloud provider-specific native identifier of the monitored cloud resource (e.g. + // an ARN on AWS, a fully qualified resource ID on Azure, a full resource name on + // GCP) + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', '//run.googl + // eapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', '/sub + // scriptions//resourceGroups//providers/Microsoft.Web/sites + // //functions/' + // Note: On some cloud providers, it may not be possible to determine the full ID + // at startup, + // so it may be necessary to set cloud.resource_id as a span attribute instead.The + // exact value to use for cloud.resource_id depends on the cloud provider. + // The following well-known definitions MUST be used if you set this attribute and + // they apply:
      + //
    • AWS Lambda: The function ARN. + // Take care not to use the "invoked ARN" directly but replace any + // alias suffix + // with the resolved function version, as the same runtime instance may be + // invocable with + // multiple different aliases.
    • + //
    • GCP: The URI of the resource
    • + //
    • Azure: The Fully Qualified Resource ID of the invoked function, + // not the function app, having the form + // /subscriptions//resourceGroups//providers/Microsoft.Web/s + // ites//functions/. + // This means that a span attribute MUST be used, as an Azure function app can + // host multiple functions that would usually share + // a TracerProvider.
    • + //
    + AttributeCloudResourceID = "cloud.resource_id" +) + +const ( + // Alibaba Cloud Elastic Compute Service + AttributeCloudPlatformAlibabaCloudECS = "alibaba_cloud_ecs" + // Alibaba Cloud Function Compute + AttributeCloudPlatformAlibabaCloudFc = "alibaba_cloud_fc" + // Red Hat OpenShift on Alibaba Cloud + AttributeCloudPlatformAlibabaCloudOpenshift = "alibaba_cloud_openshift" + // AWS Elastic Compute Cloud + AttributeCloudPlatformAWSEC2 = "aws_ec2" + // AWS Elastic Container Service + AttributeCloudPlatformAWSECS = "aws_ecs" + // AWS Elastic Kubernetes Service + AttributeCloudPlatformAWSEKS = "aws_eks" + // AWS Lambda + AttributeCloudPlatformAWSLambda = "aws_lambda" + // AWS Elastic Beanstalk + AttributeCloudPlatformAWSElasticBeanstalk = "aws_elastic_beanstalk" + // AWS App Runner + AttributeCloudPlatformAWSAppRunner = "aws_app_runner" + // Red Hat OpenShift on AWS (ROSA) + AttributeCloudPlatformAWSOpenshift = "aws_openshift" + // Azure Virtual Machines + AttributeCloudPlatformAzureVM = "azure_vm" + // Azure Container Apps + AttributeCloudPlatformAzureContainerApps = "azure_container_apps" + // Azure Container Instances + AttributeCloudPlatformAzureContainerInstances = "azure_container_instances" + // Azure Kubernetes Service + AttributeCloudPlatformAzureAKS = "azure_aks" + // Azure Functions + AttributeCloudPlatformAzureFunctions = "azure_functions" + // Azure App Service + AttributeCloudPlatformAzureAppService = "azure_app_service" + // Azure Red Hat OpenShift + AttributeCloudPlatformAzureOpenshift = "azure_openshift" + // Google Bare Metal Solution (BMS) + AttributeCloudPlatformGCPBareMetalSolution = "gcp_bare_metal_solution" + // Google Cloud Compute Engine (GCE) + AttributeCloudPlatformGCPComputeEngine = "gcp_compute_engine" + // Google Cloud Run + AttributeCloudPlatformGCPCloudRun = "gcp_cloud_run" + // Google Cloud Kubernetes Engine (GKE) + AttributeCloudPlatformGCPKubernetesEngine = "gcp_kubernetes_engine" + // Google Cloud Functions (GCF) + AttributeCloudPlatformGCPCloudFunctions = "gcp_cloud_functions" + // Google Cloud App Engine (GAE) + AttributeCloudPlatformGCPAppEngine = "gcp_app_engine" + // Red Hat OpenShift on Google Cloud + AttributeCloudPlatformGCPOpenshift = "gcp_openshift" + // Red Hat OpenShift on IBM Cloud + AttributeCloudPlatformIbmCloudOpenshift = "ibm_cloud_openshift" + // Tencent Cloud Cloud Virtual Machine (CVM) + AttributeCloudPlatformTencentCloudCvm = "tencent_cloud_cvm" + // Tencent Cloud Elastic Kubernetes Service (EKS) + AttributeCloudPlatformTencentCloudEKS = "tencent_cloud_eks" + // Tencent Cloud Serverless Cloud Function (SCF) + AttributeCloudPlatformTencentCloudScf = "tencent_cloud_scf" +) + +const ( + // Alibaba Cloud + AttributeCloudProviderAlibabaCloud = "alibaba_cloud" + // Amazon Web Services + AttributeCloudProviderAWS = "aws" + // Microsoft Azure + AttributeCloudProviderAzure = "azure" + // Google Cloud Platform + AttributeCloudProviderGCP = "gcp" + // Heroku Platform as a Service + AttributeCloudProviderHeroku = "heroku" + // IBM Cloud + AttributeCloudProviderIbmCloud = "ibm_cloud" + // Tencent Cloud + AttributeCloudProviderTencentCloud = "tencent_cloud" +) + +// Attributes for CloudEvents. +const ( + // The event_id uniquely identifies the event. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + AttributeCloudeventsEventID = "cloudevents.event_id" + // The source identifies the context in which an event happened. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'https://github.com/cloudevents', '/cloudevents/spec/pull/123', 'my- + // service' + AttributeCloudeventsEventSource = "cloudevents.event_source" + // The version of the CloudEvents specification which the event uses. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1.0' + AttributeCloudeventsEventSpecVersion = "cloudevents.event_spec_version" + // The subject of the event in the context of the event producer (identified by + // source). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'mynewfile.jpg' + AttributeCloudeventsEventSubject = "cloudevents.event_subject" + // The event_type contains a value describing the type of event related to the + // originating occurrence. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'com.github.pull_request.opened', 'com.example.object.deleted.v2' + AttributeCloudeventsEventType = "cloudevents.event_type" +) + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // The column number in code.filepath best representing the operation. It SHOULD + // point within the code unit named in code.function. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 16 + AttributeCodeColumn = "code.column" + // The source code file name that identifies the code unit as uniquely as possible + // (preferably an absolute file path). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + AttributeCodeFilepath = "code.filepath" + // The method or function name, or equivalent (usually rightmost part of the code + // unit's name). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'serveRequest' + AttributeCodeFunction = "code.function" + // The line number in code.filepath best representing the operation. It SHOULD + // point within the code unit named in code.function. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 42 + AttributeCodeLineNumber = "code.lineno" + // The "namespace" within which code.function is defined. Usually the + // qualified class or module name, such that code.namespace + some separator + + // code.function form a unique identifier for the code unit. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'com.example.MyHTTPService' + AttributeCodeNamespace = "code.namespace" + // A stacktrace as a string in the natural representation for the language + // runtime. The representation is to be determined and documented by each language + // SIG. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + AttributeCodeStacktrace = "code.stacktrace" +) + +// A container instance. +const ( + // The command used to run the container (i.e. the command name). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'otelcontribcol' + // Note: If using embedded credentials or sensitive data, it is recommended to + // remove them to prevent potential leakage. + AttributeContainerCommand = "container.command" + // All the command arguments (including the command/executable itself) run by the + // container. [2] + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'otelcontribcol, --config, config.yaml' + AttributeContainerCommandArgs = "container.command_args" + // The full command run by the container as a single string representing the full + // command. [2] + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'otelcontribcol --config config.yaml' + AttributeContainerCommandLine = "container.command_line" + // The CPU state for this data point. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'user', 'kernel' + AttributeContainerCPUState = "container.cpu.state" + // Container ID. Usually a UUID, as for example used to identify Docker + // containers. The UUID might be abbreviated. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'a3bf90e006b2' + AttributeContainerID = "container.id" + // Runtime specific image identifier. Usually a hash algorithm followed by a UUID. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: + // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' + // Note: Docker defines a sha256 of the image id; container.image.id corresponds + // to the Image field from the Docker container inspect API endpoint. + // K8S defines a link to the container registry repository with digest "imageID": + // "registry.azurecr.io /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e + // 8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625". + // The ID is assigned by the container runtime and can vary in different + // environments. Consider using oci.manifest.digest if it is important to identify + // the same image in different environments/runtimes. + AttributeContainerImageID = "container.image.id" + // Name of the image the container was built on. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'gcr.io/opentelemetry/operator' + AttributeContainerImageName = "container.image.name" + // Repo digests of the container image as provided by the container runtime. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d7 + // 02d249a0ccb', 'internal.registry.example.com:5000/example@sha256:b69959407d21e8 + // a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578' + // Note: Docker and CRI report those under the RepoDigests field. + AttributeContainerImageRepoDigests = "container.image.repo_digests" + // Container image tags. An example can be found in Docker Image Inspect. Should + // be only the section of the full name for example from + // registry.example.com/my-org/my-image:. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'v1.27.1', '3.5.7-0' + AttributeContainerImageTags = "container.image.tags" + // Container name used by container runtime. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry-autoconf' + AttributeContainerName = "container.name" + // The container runtime managing this container. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'docker', 'containerd', 'rkt' + AttributeContainerRuntime = "container.runtime" +) + +const ( + // When tasks of the cgroup are in user mode (Linux). When all container processes are in user mode (Windows) + AttributeContainerCPUStateUser = "user" + // When CPU is used by the system (host OS) + AttributeContainerCPUStateSystem = "system" + // When tasks of the cgroup are in kernel mode (Linux). When all container processes are in kernel mode (Windows) + AttributeContainerCPUStateKernel = "kernel" +) + +// The attributes used to describe telemetry in the context of databases. +const ( + // The consistency level of the query. Based on consistency values from CQL. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeDBCassandraConsistencyLevel = "db.cassandra.consistency_level" + // The data center of the coordinating node for a query. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'us-west-2' + AttributeDBCassandraCoordinatorDC = "db.cassandra.coordinator.dc" + // The ID of the coordinating node for a query. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + AttributeDBCassandraCoordinatorID = "db.cassandra.coordinator.id" + // Whether or not the query is idempotent. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeDBCassandraIdempotence = "db.cassandra.idempotence" + // The fetch size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 5000 + AttributeDBCassandraPageSize = "db.cassandra.page_size" + // The number of times a query was speculatively executed. Not set or 0 if the + // query was not executed speculatively. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 0, 2 + AttributeDBCassandraSpeculativeExecutionCount = "db.cassandra.speculative_execution_count" + // The name of the primary Cassandra table that the operation is acting upon, + // including the keyspace name (if applicable). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'mytable' + // Note: This mirrors the db.sql.table attribute but references cassandra rather + // than sql. It is not recommended to attempt any client-side parsing of + // db.statement just to get this property, but it should be set if it is provided + // by the library being instrumented. If the operation is acting upon an anonymous + // table, or more than one table, this value MUST NOT be set. + AttributeDBCassandraTable = "db.cassandra.table" + // Unique Cosmos client instance id. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' + AttributeDBCosmosDBClientID = "db.cosmosdb.client_id" + // Cosmos client connection mode. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeDBCosmosDBConnectionMode = "db.cosmosdb.connection_mode" + // Cosmos DB container name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'anystring' + AttributeDBCosmosDBContainer = "db.cosmosdb.container" + // CosmosDB Operation Type. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeDBCosmosDBOperationType = "db.cosmosdb.operation_type" + // RU consumed for that operation + // + // Type: double + // Requirement Level: Optional + // Stability: experimental + // Examples: 46.18, 1.0 + AttributeDBCosmosDBRequestCharge = "db.cosmosdb.request_charge" + // Request payload size in bytes + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + AttributeDBCosmosDBRequestContentLength = "db.cosmosdb.request_content_length" + // Cosmos DB status code. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 200, 201 + AttributeDBCosmosDBStatusCode = "db.cosmosdb.status_code" + // Cosmos DB sub status code. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1000, 1002 + AttributeDBCosmosDBSubStatusCode = "db.cosmosdb.sub_status_code" + // Represents the identifier of an Elasticsearch cluster. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f' + AttributeDBElasticsearchClusterName = "db.elasticsearch.cluster.name" + // An identifier (address, unique name, or any other identifier) of the database + // instance that is executing queries or mutations on the current connection. This + // is useful in cases where the database is running in a clustered environment and + // the instrumentation is able to record the node executing the query. The client + // may obtain this value in databases like MySQL using queries like select + // @@hostname. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'mysql-e26b99z.example.com' + AttributeDBInstanceID = "db.instance.id" + // The MongoDB collection being accessed within the database stated in db.name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'customers', 'products' + AttributeDBMongoDBCollection = "db.mongodb.collection" + // The Microsoft SQL Server instance name connecting to. This name is used to + // determine the port of a named instance. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MSSQLSERVER' + // Note: If setting a db.mssql.instance_name, server.port is no longer required + // (but still recommended if non-standard). + AttributeDBMSSQLInstanceName = "db.mssql.instance_name" + // This attribute is used to report the name of the database being accessed. For + // commands that switch the database, this should be set to the target database + // (even if the command fails). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'customers', 'main' + // Note: In some SQL databases, the database name to be used is called + // "schema name". In case there are multiple layers that could be + // considered for database name (e.g. Oracle instance name and schema name), the + // database name to be used is the more specific layer (e.g. Oracle schema name). + AttributeDBName = "db.name" + // The name of the operation being executed, e.g. the MongoDB command name such as + // findAndModify, or the SQL keyword. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: When setting this to an SQL keyword, it is not recommended to attempt any + // client-side parsing of db.statement just to get this property, but it should be + // set if the operation name is provided by the library being instrumented. If the + // SQL statement has an ambiguous operation, or performs more than one operation, + // this value may be omitted. + AttributeDBOperation = "db.operation" + // The index of the database being accessed as used in the SELECT command, + // provided as an integer. To be used instead of the generic db.name attribute. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 0, 1, 15 + AttributeDBRedisDBIndex = "db.redis.database_index" + // The name of the primary table that the operation is acting upon, including the + // database name (if applicable). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'public.users', 'customers' + // Note: It is not recommended to attempt any client-side parsing of db.statement + // just to get this property, but it should be set if it is provided by the + // library being instrumented. If the operation is acting upon an anonymous table, + // or more than one table, this value MUST NOT be set. + AttributeDBSQLTable = "db.sql.table" + // The database statement being executed. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' + AttributeDBStatement = "db.statement" + // An identifier for the database management system (DBMS) product being used. See + // below for a list of well-known identifiers. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeDBSystem = "db.system" + // Username for accessing the database. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'readonly_user', 'reporting_user' + AttributeDBUser = "db.user" +) + +const ( + // all + AttributeDBCassandraConsistencyLevelAll = "all" + // each_quorum + AttributeDBCassandraConsistencyLevelEachQuorum = "each_quorum" + // quorum + AttributeDBCassandraConsistencyLevelQuorum = "quorum" + // local_quorum + AttributeDBCassandraConsistencyLevelLocalQuorum = "local_quorum" + // one + AttributeDBCassandraConsistencyLevelOne = "one" + // two + AttributeDBCassandraConsistencyLevelTwo = "two" + // three + AttributeDBCassandraConsistencyLevelThree = "three" + // local_one + AttributeDBCassandraConsistencyLevelLocalOne = "local_one" + // any + AttributeDBCassandraConsistencyLevelAny = "any" + // serial + AttributeDBCassandraConsistencyLevelSerial = "serial" + // local_serial + AttributeDBCassandraConsistencyLevelLocalSerial = "local_serial" +) + +const ( + // Gateway (HTTP) connections mode + AttributeDBCosmosDBConnectionModeGateway = "gateway" + // Direct connection + AttributeDBCosmosDBConnectionModeDirect = "direct" +) + +const ( + // invalid + AttributeDBCosmosDBOperationTypeInvalid = "Invalid" + // create + AttributeDBCosmosDBOperationTypeCreate = "Create" + // patch + AttributeDBCosmosDBOperationTypePatch = "Patch" + // read + AttributeDBCosmosDBOperationTypeRead = "Read" + // read_feed + AttributeDBCosmosDBOperationTypeReadFeed = "ReadFeed" + // delete + AttributeDBCosmosDBOperationTypeDelete = "Delete" + // replace + AttributeDBCosmosDBOperationTypeReplace = "Replace" + // execute + AttributeDBCosmosDBOperationTypeExecute = "Execute" + // query + AttributeDBCosmosDBOperationTypeQuery = "Query" + // head + AttributeDBCosmosDBOperationTypeHead = "Head" + // head_feed + AttributeDBCosmosDBOperationTypeHeadFeed = "HeadFeed" + // upsert + AttributeDBCosmosDBOperationTypeUpsert = "Upsert" + // batch + AttributeDBCosmosDBOperationTypeBatch = "Batch" + // query_plan + AttributeDBCosmosDBOperationTypeQueryPlan = "QueryPlan" + // execute_javascript + AttributeDBCosmosDBOperationTypeExecuteJavascript = "ExecuteJavaScript" +) + +const ( + // Some other SQL database. Fallback only. See notes + AttributeDBSystemOtherSQL = "other_sql" + // Microsoft SQL Server + AttributeDBSystemMSSQL = "mssql" + // Microsoft SQL Server Compact + AttributeDBSystemMssqlcompact = "mssqlcompact" + // MySQL + AttributeDBSystemMySQL = "mysql" + // Oracle Database + AttributeDBSystemOracle = "oracle" + // IBM DB2 + AttributeDBSystemDB2 = "db2" + // PostgreSQL + AttributeDBSystemPostgreSQL = "postgresql" + // Amazon Redshift + AttributeDBSystemRedshift = "redshift" + // Apache Hive + AttributeDBSystemHive = "hive" + // Cloudscape + AttributeDBSystemCloudscape = "cloudscape" + // HyperSQL DataBase + AttributeDBSystemHSQLDB = "hsqldb" + // Progress Database + AttributeDBSystemProgress = "progress" + // SAP MaxDB + AttributeDBSystemMaxDB = "maxdb" + // SAP HANA + AttributeDBSystemHanaDB = "hanadb" + // Ingres + AttributeDBSystemIngres = "ingres" + // FirstSQL + AttributeDBSystemFirstSQL = "firstsql" + // EnterpriseDB + AttributeDBSystemEDB = "edb" + // InterSystems Caché + AttributeDBSystemCache = "cache" + // Adabas (Adaptable Database System) + AttributeDBSystemAdabas = "adabas" + // Firebird + AttributeDBSystemFirebird = "firebird" + // Apache Derby + AttributeDBSystemDerby = "derby" + // FileMaker + AttributeDBSystemFilemaker = "filemaker" + // Informix + AttributeDBSystemInformix = "informix" + // InstantDB + AttributeDBSystemInstantDB = "instantdb" + // InterBase + AttributeDBSystemInterbase = "interbase" + // MariaDB + AttributeDBSystemMariaDB = "mariadb" + // Netezza + AttributeDBSystemNetezza = "netezza" + // Pervasive PSQL + AttributeDBSystemPervasive = "pervasive" + // PointBase + AttributeDBSystemPointbase = "pointbase" + // SQLite + AttributeDBSystemSqlite = "sqlite" + // Sybase + AttributeDBSystemSybase = "sybase" + // Teradata + AttributeDBSystemTeradata = "teradata" + // Vertica + AttributeDBSystemVertica = "vertica" + // H2 + AttributeDBSystemH2 = "h2" + // ColdFusion IMQ + AttributeDBSystemColdfusion = "coldfusion" + // Apache Cassandra + AttributeDBSystemCassandra = "cassandra" + // Apache HBase + AttributeDBSystemHBase = "hbase" + // MongoDB + AttributeDBSystemMongoDB = "mongodb" + // Redis + AttributeDBSystemRedis = "redis" + // Couchbase + AttributeDBSystemCouchbase = "couchbase" + // CouchDB + AttributeDBSystemCouchDB = "couchdb" + // Microsoft Azure Cosmos DB + AttributeDBSystemCosmosDB = "cosmosdb" + // Amazon DynamoDB + AttributeDBSystemDynamoDB = "dynamodb" + // Neo4j + AttributeDBSystemNeo4j = "neo4j" + // Apache Geode + AttributeDBSystemGeode = "geode" + // Elasticsearch + AttributeDBSystemElasticsearch = "elasticsearch" + // Memcached + AttributeDBSystemMemcached = "memcached" + // CockroachDB + AttributeDBSystemCockroachdb = "cockroachdb" + // OpenSearch + AttributeDBSystemOpensearch = "opensearch" + // ClickHouse + AttributeDBSystemClickhouse = "clickhouse" + // Cloud Spanner + AttributeDBSystemSpanner = "spanner" + // Trino + AttributeDBSystemTrino = "trino" +) + +// Attributes for software deployments. +const ( + // Name of the deployment environment (aka deployment tier). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'staging', 'production' + // Note: deployment.environment does not affect the uniqueness constraints defined + // through + // the service.namespace, service.name and service.instance.id resource + // attributes. + // This implies that resources carrying the following attribute combinations MUST + // be + // considered to be identifying the same service:
      + //
    • service.name=frontend, deployment.environment=production
    • + //
    • service.name=frontend, deployment.environment=staging.
    • + //
    + AttributeDeploymentEnvironment = "deployment.environment" +) + +// "Describes deprecated db attributes." +const ( + // Deprecated, use server.address, server.port attributes instead. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: "Replaced by `server.address` and `server.port`." + // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' + AttributeDBConnectionString = "db.connection_string" + // Deprecated, use db.instance.id instead. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `db.instance.id`. + // Examples: 'instance-0000000001' + AttributeDBElasticsearchNodeName = "db.elasticsearch.node.name" + // Removed, no replacement at this time. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Removed as not used. + // Examples: 'org.postgresql.Driver', + // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' + AttributeDBJDBCDriverClassname = "db.jdbc.driver_classname" +) + +// Describes deprecated HTTP attributes. +const ( + // Deprecated, use network.protocol.name instead. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `network.protocol.name`. + AttributeHTTPFlavor = "http.flavor" + // Deprecated, use http.request.method instead. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `http.request.method`. + // Examples: 'GET', 'POST', 'HEAD' + AttributeHTTPMethod = "http.method" + // Deprecated, use http.request.header.content-length instead. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `http.request.header.content-length`. + // Examples: 3495 + AttributeHTTPRequestContentLength = "http.request_content_length" + // Deprecated, use http.response.header.content-length instead. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `http.response.header.content-length`. + // Examples: 3495 + AttributeHTTPResponseContentLength = "http.response_content_length" + // Deprecated, use url.scheme instead. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `url.scheme` instead. + // Examples: 'http', 'https' + AttributeHTTPScheme = "http.scheme" + // Deprecated, use http.response.status_code instead. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `http.response.status_code`. + // Examples: 200 + AttributeHTTPStatusCode = "http.status_code" + // Deprecated, use url.path and url.query instead. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Split to `url.path` and `url.query. + // Examples: '/search?q=OpenTelemetry#SemConv' + AttributeHTTPTarget = "http.target" + // Deprecated, use url.full instead. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `url.full`. + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' + AttributeHTTPURL = "http.url" + // Deprecated, use user_agent.original instead. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `user_agent.original`. + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU iPhone + // OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) + // Version/14.1.2 Mobile/15E148 Safari/604.1' + AttributeHTTPUserAgent = "http.user_agent" +) + +const ( + // HTTP/1.0 + AttributeHTTPFlavorHTTP10 = "1.0" + // HTTP/1.1 + AttributeHTTPFlavorHTTP11 = "1.1" + // HTTP/2 + AttributeHTTPFlavorHTTP20 = "2.0" + // HTTP/3 + AttributeHTTPFlavorHTTP30 = "3.0" + // SPDY protocol + AttributeHTTPFlavorSPDY = "SPDY" + // QUIC protocol + AttributeHTTPFlavorQUIC = "QUIC" +) + +// Describes deprecated messaging attributes. +const ( + // "Deprecated, use messaging.destination.partition.id instead." + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `messaging.destination.partition.id`. + // Examples: 2 + AttributeMessagingKafkaDestinationPartition = "messaging.kafka.destination.partition" +) + +// These attributes may be used for any network related operation. +const ( + // Deprecated, use server.address. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `server.address`. + // Examples: 'example.com' + AttributeNetHostName = "net.host.name" + // Deprecated, use server.port. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `server.port`. + // Examples: 8080 + AttributeNetHostPort = "net.host.port" + // Deprecated, use server.address on client spans and client.address on server + // spans. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `server.address` on client spans and `client.address` + // on server spans. + // Examples: 'example.com' + AttributeNetPeerName = "net.peer.name" + // Deprecated, use server.port on client spans and client.port on server spans. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `server.port` on client spans and `client.port` on + // server spans. + // Examples: 8080 + AttributeNetPeerPort = "net.peer.port" + // Deprecated, use network.protocol.name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `network.protocol.name`. + // Examples: 'amqp', 'http', 'mqtt' + AttributeNetProtocolName = "net.protocol.name" + // Deprecated, use network.protocol.version. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `network.protocol.version`. + // Examples: '3.1.1' + AttributeNetProtocolVersion = "net.protocol.version" + // Deprecated, use network.transport and network.type. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Split to `network.transport` and `network.type`. + AttributeNetSockFamily = "net.sock.family" + // Deprecated, use network.local.address. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `network.local.address`. + // Examples: '/var/my.sock' + AttributeNetSockHostAddr = "net.sock.host.addr" + // Deprecated, use network.local.port. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `network.local.port`. + // Examples: 8080 + AttributeNetSockHostPort = "net.sock.host.port" + // Deprecated, use network.peer.address. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `network.peer.address`. + // Examples: '192.168.0.1' + AttributeNetSockPeerAddr = "net.sock.peer.addr" + // Deprecated, no replacement at this time. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Removed. + // Examples: '/var/my.sock' + AttributeNetSockPeerName = "net.sock.peer.name" + // Deprecated, use network.peer.port. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `network.peer.port`. + // Examples: 65531 + AttributeNetSockPeerPort = "net.sock.peer.port" + // Deprecated, use network.transport. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `network.transport`. + AttributeNetTransport = "net.transport" +) + +const ( + // IPv4 address + AttributeNetSockFamilyInet = "inet" + // IPv6 address + AttributeNetSockFamilyInet6 = "inet6" + // Unix domain socket path + AttributeNetSockFamilyUnix = "unix" +) + +const ( + // ip_tcp + AttributeNetTransportTCP = "ip_tcp" + // ip_udp + AttributeNetTransportUDP = "ip_udp" + // Named or anonymous pipe + AttributeNetTransportPipe = "pipe" + // In-process communication + AttributeNetTransportInProc = "inproc" + // Something else (non IP-based) + AttributeNetTransportOther = "other" +) + +// Deprecated system attributes. +const ( + // Deprecated, use system.process.status instead. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `system.process.status`. + // Examples: 'running' + AttributeSystemProcessesStatus = "system.processes.status" +) + +const ( + // running + AttributeSystemProcessesStatusRunning = "running" + // sleeping + AttributeSystemProcessesStatusSleeping = "sleeping" + // stopped + AttributeSystemProcessesStatusStopped = "stopped" + // defunct + AttributeSystemProcessesStatusDefunct = "defunct" +) + +// These attributes may be used to describe the receiver of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // Destination address - domain name if available without reverse DNS lookup; + // otherwise, IP address or Unix domain socket name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the source side, and when communicating through an + // intermediary, destination.address SHOULD represent the destination address + // behind any intermediaries, for example proxies, if it's available. + AttributeDestinationAddress = "destination.address" + // Destination port number + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 3389, 2888 + AttributeDestinationPort = "destination.port" +) + +// Describes device attributes. +const ( + // A unique identifier representing the device + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values outlined + // below. This value is not an advertising identifier and MUST NOT be used as + // such. On iOS (Swift or Objective-C), this value MUST be equal to the vendor + // identifier. On Android (Java or Kotlin), this value MUST be equal to the + // Firebase Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found here on best + // practices and exact implementation details. Caution should be taken when + // storing personal data or anything which can identify a user. GDPR and data + // protection laws may apply, ensure you do your own due diligence. + AttributeDeviceID = "device.id" + // The name of the device manufacturer + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via Build. iOS apps SHOULD hardcode + // the value Apple. + AttributeDeviceManufacturer = "device.manufacturer" + // The model identifier for the device + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine-readable version of the + // model identifier rather than the market or consumer-friendly name of the + // device. + AttributeDeviceModelIdentifier = "device.model.identifier" + // The marketing name for the device model + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human-readable version of the + // device model rather than a machine-readable alternative. + AttributeDeviceModelName = "device.model.name" +) + +// These attributes may be used for any disk related operation. +const ( + // The disk IO operation direction. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'read' + AttributeDiskIoDirection = "disk.io.direction" +) + +const ( + // read + AttributeDiskIoDirectionRead = "read" + // write + AttributeDiskIoDirectionWrite = "write" +) + +// The shared attributes used to report a DNS query. +const ( + // The name being queried. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'www.example.com', 'opentelemetry.io' + // Note: If the name field contains non-printable characters (below 32 or above + // 126), those characters should be represented as escaped base 10 integers + // (\DDD). Back slashes and quotes should be escaped. Tabs, carriage returns, and + // line feeds should be converted to \t, \r, and \n respectively. + AttributeDNSQuestionName = "dns.question.name" +) + +// Attributes for operations with an authenticated and/or authorized enduser. +const ( + // Username or client_id extracted from the access token or Authorization header + // in the inbound request from outside the system. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'username' + AttributeEnduserID = "enduser.id" + // Actual/assumed role the client is making the request under extracted from token + // or application security context. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'admin' + AttributeEnduserRole = "enduser.role" + // Scopes or granted authorities the client currently possesses extracted from + // token or application security context. The value would come from the scope + // associated with an OAuth 2.0 Access Token or an attribute value in a SAML 2.0 + // Assertion. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'read:message, write:files' + AttributeEnduserScope = "enduser.scope" +) + +// The shared attributes used to report an error. +const ( + // Describes a class of error the operation ended with. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'timeout', 'java.net.UnknownHostException', + // 'server_certificate_invalid', '500' + // Note: The error.type SHOULD be predictable and SHOULD have low cardinality. + // Instrumentations SHOULD document the list of errors they report.The cardinality + // of error.type within one instrumentation library SHOULD be low. + // Telemetry consumers that aggregate data from multiple instrumentation libraries + // and applications + // should be prepared for error.type to have high cardinality at query time when + // no + // additional filters are applied.If the operation has completed successfully, + // instrumentations SHOULD NOT set error.type.If a specific domain defines its own + // set of error identifiers (such as HTTP or gRPC status codes), + // it's RECOMMENDED to:
      + //
    • Use a domain-specific attribute
    • + //
    • Set error.type to capture all errors, regardless of whether they are + // defined within the domain-specific set or not.
    • + //
    + AttributeErrorType = "error.type" +) + +const ( + // A fallback error value to be used when the instrumentation doesn't define a custom value + AttributeErrorTypeOther = "_OTHER" +) + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // SHOULD be set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // Requirement Level: Optional + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's __exit__ method in Python) but will + // usually be caught at the point of recording the exception in most languages.It + // is usually not possible to determine at the point where an exception is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending the span, + // as done in the example for recording span exceptions.It follows that an + // exception may still escape the scope of the span + // even if the exception.escaped attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + AttributeExceptionEscaped = "exception.escaped" + // The exception message. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str implicitly" + AttributeExceptionMessage = "exception.message" + // A stacktrace as a string in the natural representation for the language + // runtime. The representation is to be determined and documented by each language + // SIG. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + AttributeExceptionStacktrace = "exception.stacktrace" + // The type of the exception (its fully-qualified class name, if applicable). The + // dynamic type of the exception should be preferred over the static type in + // languages that support it. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + AttributeExceptionType = "exception.type" +) + +// FaaS attributes +const ( + // A boolean that is true if the serverless function is executed for the first + // time (aka cold-start). + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeFaaSColdstart = "faas.coldstart" + // A string containing the schedule period as Cron Expression. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '0/5 * * * ? *' + AttributeFaaSCron = "faas.cron" + // The name of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos + // DB to the database name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myBucketName', 'myDBName' + AttributeFaaSDocumentCollection = "faas.document.collection" + // The document name/table subjected to the operation. For example, in Cloud + // Storage or S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myFile.txt', 'myTableName' + AttributeFaaSDocumentName = "faas.document.name" + // Describes the type of the operation that was performed on the data. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeFaaSDocumentOperation = "faas.document.operation" + // A string containing the time when the data was accessed in the ISO 8601 format + // expressed in UTC. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + AttributeFaaSDocumentTime = "faas.document.time" + // The execution environment ID as a string, that will be potentially reused for + // other invocations to the same function/function version. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note:
      + //
    • AWS Lambda: Use the (full) log stream name.
    • + //
    + AttributeFaaSInstance = "faas.instance" + // The invocation ID of the current function invocation. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + AttributeFaaSInvocationID = "faas.invocation_id" + // The name of the invoked function. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'my-function' + // Note: SHOULD be equal to the faas.name resource attribute of the invoked + // function. + AttributeFaaSInvokedName = "faas.invoked_name" + // The cloud provider of the invoked function. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: SHOULD be equal to the cloud.provider resource attribute of the invoked + // function. + AttributeFaaSInvokedProvider = "faas.invoked_provider" + // The cloud region of the invoked function. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the cloud.region resource attribute of the invoked + // function. + AttributeFaaSInvokedRegion = "faas.invoked_region" + // The amount of memory available to the serverless function converted to Bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 134217728 + // Note: It's recommended to set this attribute since e.g. too little memory can + // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, + // the environment variable AWS_LAMBDA_FUNCTION_MEMORY_SIZE provides this + // information (which must be multiplied by 1,048,576). + AttributeFaaSMaxMemory = "faas.max_memory" + // The name of the single function that this runtime instance executes. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // code.namespace/code.function + // span attributes).For some cloud providers, the above definition is ambiguous. + // The following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud providers/products:
      + //
    • Azure: The full name /, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the cloud.resource_id attribute).
    • + //
    + AttributeFaaSName = "faas.name" + // A string containing the function invocation time in the ISO 8601 format + // expressed in UTC. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + AttributeFaaSTime = "faas.time" + // Type of the trigger which caused this function invocation. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeFaaSTrigger = "faas.trigger" + // The immutable version of the function being executed. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use:
      + //
    • AWS Lambda: The function version + // (an integer represented as a decimal string).
    • + //
    • Google Cloud Run (Services): The revision + // (i.e., the function name plus the revision suffix).
    • + //
    • Google Cloud Functions: The value of the + // K_REVISION environment variable.
    • + //
    • Azure Functions: Not applicable. Do not set this attribute.
    • + //
    + AttributeFaaSVersion = "faas.version" +) + +const ( + // When a new object is created + AttributeFaaSDocumentOperationInsert = "insert" + // When an object is modified + AttributeFaaSDocumentOperationEdit = "edit" + // When an object is deleted + AttributeFaaSDocumentOperationDelete = "delete" +) + +const ( + // Alibaba Cloud + AttributeFaaSInvokedProviderAlibabaCloud = "alibaba_cloud" + // Amazon Web Services + AttributeFaaSInvokedProviderAWS = "aws" + // Microsoft Azure + AttributeFaaSInvokedProviderAzure = "azure" + // Google Cloud Platform + AttributeFaaSInvokedProviderGCP = "gcp" + // Tencent Cloud + AttributeFaaSInvokedProviderTencentCloud = "tencent_cloud" +) + +const ( + // A response to some data source operation such as a database or filesystem read/write + AttributeFaaSTriggerDatasource = "datasource" + // To provide an answer to an inbound HTTP request + AttributeFaaSTriggerHTTP = "http" + // A function is set to be executed when messages are sent to a messaging system + AttributeFaaSTriggerPubsub = "pubsub" + // A function is scheduled to be executed regularly + AttributeFaaSTriggerTimer = "timer" + // If none of the others apply + AttributeFaaSTriggerOther = "other" +) + +// Attributes for Feature Flags. +const ( + // The unique identifier of the feature flag. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'logo-color' + AttributeFeatureFlagKey = "feature_flag.key" + // The name of the service provider that performs the flag evaluation. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Flag Manager' + AttributeFeatureFlagProviderName = "feature_flag.provider_name" + // SHOULD be a semantic identifier for a value. If one is unavailable, a + // stringified version of the value can be used. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'red', 'true', 'on' + // Note: A semantic identifier, commonly referred to as a variant, provides a + // means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant red maybe be used for the value #c05543.A stringified + // version of the value can be used in situations where a + // semantic identifier is unavailable. String representation of the value + // should be determined by the implementer. + AttributeFeatureFlagVariant = "feature_flag.variant" +) + +// Describes file attributes. +const ( + // Directory where the file is located. It should include the drive letter, when + // appropriate. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/home/user', 'C:\\Program Files\\MyApp' + AttributeFileDirectory = "file.directory" + // File extension, excluding the leading dot. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'png', 'gz' + // Note: When the file name has multiple extensions (example.tar.gz), only the + // last one should be captured ("gz", not "tar.gz"). + AttributeFileExtension = "file.extension" + // Name of the file including the extension, without the directory. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'example.png' + AttributeFileName = "file.name" + // Full path to the file, including the file name. It should include the drive + // letter, when appropriate. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/home/alice/example.png', 'C:\\Program Files\\MyApp\\myapp.exe' + AttributeFilePath = "file.path" + // File size in bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + AttributeFileSize = "file.size" +) + +// Attributes for Google Cloud Run. +const ( + // The name of the Cloud Run execution being run for the Job, as set by the + // CLOUD_RUN_EXECUTION environment variable. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'job-name-xxxx', 'sample-job-mdw84' + AttributeGCPCloudRunJobExecution = "gcp.cloud_run.job.execution" + // The index for a task within an execution as provided by the + // CLOUD_RUN_TASK_INDEX environment variable. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 0, 1 + AttributeGCPCloudRunJobTaskIndex = "gcp.cloud_run.job.task_index" +) + +// Attributes for Google Compute Engine (GCE). +const ( + // The hostname of a GCE instance. This is the full value of the default or custom + // hostname. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'my-host1234.example.com', 'sample-vm.us-west1-b.c.my- + // project.internal' + AttributeGCPGceInstanceHostname = "gcp.gce.instance.hostname" + // The instance name of a GCE instance. This is the value provided by host.name, + // the visible name of the instance in the Cloud Console UI, and the prefix for + // the default hostname of the instance as defined by the default internal DNS + // name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'instance-1', 'my-vm-name' + AttributeGCPGceInstanceName = "gcp.gce.instance.name" +) + +// A host is defined as a computing instance. For example, physical servers, +// virtual machines, switches or disk array. +const ( + // The CPU architecture the host system is running on. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeHostArch = "host.arch" + // The amount of level 2 memory cache available to the processor (in Bytes). + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 12288000 + AttributeHostCPUCacheL2Size = "host.cpu.cache.l2.size" + // Family or generation of the CPU. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '6', 'PA-RISC 1.1e' + AttributeHostCPUFamily = "host.cpu.family" + // Model identifier. It provides more granular information about the CPU, + // distinguishing it from other CPUs within the same family. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '6', '9000/778/B180L' + AttributeHostCPUModelID = "host.cpu.model.id" + // Model designation of the processor. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz' + AttributeHostCPUModelName = "host.cpu.model.name" + // Stepping or core revisions. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1', 'r1p1' + AttributeHostCPUStepping = "host.cpu.stepping" + // Processor manufacturer identifier. A maximum 12-character string. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'GenuineIntel' + // Note: CPUID command returns the vendor ID string in EBX, EDX and ECX registers. + // Writing these to memory in this order results in a 12-character string. + AttributeHostCPUVendorID = "host.cpu.vendor.id" + // Unique host ID. For Cloud, this must be the instance_id assigned by the cloud + // provider. For non-containerized systems, this should be the machine-id. See the + // table below for the sources to use to determine the machine-id based on + // operating system. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + AttributeHostID = "host.id" + // VM image ID or host OS image ID. For Cloud, this value is from the provider. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'ami-07b06b442921831e5' + AttributeHostImageID = "host.image.id" + // Name of the VM image or OS install the host was instantiated from. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + AttributeHostImageName = "host.image.name" + // The version string of the VM image or host OS as defined in Version Attributes. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '0.1' + AttributeHostImageVersion = "host.image.version" + // Available IP addresses of the host, excluding loopback interfaces. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e' + // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 addresses + // MUST be specified in the RFC 5952 format. + AttributeHostIP = "host.ip" + // Available MAC addresses of the host, excluding loopback interfaces. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F' + // Note: MAC Addresses MUST be represented in IEEE RA hexadecimal form: as hyphen- + // separated octets in uppercase hexadecimal form from most to least significant. + AttributeHostMac = "host.mac" + // Name of the host. On Unix systems, it may contain what the hostname command + // returns, or the fully qualified hostname, or another name specified by the + // user. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry-test' + AttributeHostName = "host.name" + // Type of host. For Cloud, this must be the machine type. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'n1-standard-1' + AttributeHostType = "host.type" +) + +const ( + // AMD64 + AttributeHostArchAMD64 = "amd64" + // ARM32 + AttributeHostArchARM32 = "arm32" + // ARM64 + AttributeHostArchARM64 = "arm64" + // Itanium + AttributeHostArchIA64 = "ia64" + // 32-bit PowerPC + AttributeHostArchPPC32 = "ppc32" + // 64-bit PowerPC + AttributeHostArchPPC64 = "ppc64" + // IBM z/Architecture + AttributeHostArchS390x = "s390x" + // 32-bit x86 + AttributeHostArchX86 = "x86" +) + +// Semantic convention attributes in the HTTP namespace. +const ( + // State of the HTTP connection in the HTTP connection pool. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'active', 'idle' + AttributeHTTPConnectionState = "http.connection.state" + // The size of the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as the + // Content-Length header. For requests using transport encoding, this should be + // the compressed size. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 3495 + AttributeHTTPRequestBodySize = "http.request.body.size" + // HTTP request method. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + // Note: HTTP request method value SHOULD be "known" to the + // instrumentation. + // By default, this convention defines "known" methods as the ones + // listed in RFC9110 + // and the PATCH method defined in RFC5789.If the HTTP request method is not known + // to instrumentation, it MUST set the http.request.method attribute to _OTHER.If + // the HTTP instrumentation could end up converting valid HTTP request methods to + // _OTHER, then it MUST provide a way to override + // the list of known HTTP methods. If this override is done via environment + // variable, then the environment variable MUST be named + // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated list of + // case-sensitive known HTTP methods + // (this list MUST be a full override of the default known method, it is not a + // list of known methods in addition to the defaults).HTTP method names are case- + // sensitive and http.request.method attribute value MUST match a known HTTP + // method name exactly. + // Instrumentations for specific web frameworks that consider HTTP methods to be + // case insensitive, SHOULD populate a canonical equivalent. + // Tracing instrumentations that do so, MUST also set http.request.method_original + // to the original value. + AttributeHTTPRequestMethod = "http.request.method" + // Original HTTP method sent by the client in the request line. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'GeT', 'ACL', 'foo' + AttributeHTTPRequestMethodOriginal = "http.request.method_original" + // The ordinal number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets resent + // by the client, regardless of what was the cause of the resending (e.g. + // redirection, authorization failure, 503 Server Unavailable, network issues, or + // any other). + AttributeHTTPRequestResendCount = "http.request.resend_count" + // The total size of the request in bytes. This should be the total number of + // bytes sent over the wire, including the request line (HTTP/1.1), framing + // (HTTP/2 and HTTP/3), headers, and request body if any. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1437 + AttributeHTTPRequestSize = "http.request.size" + // The size of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as the + // Content-Length header. For requests using transport encoding, this should be + // the compressed size. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 3495 + AttributeHTTPResponseBodySize = "http.response.body.size" + // The total size of the response in bytes. This should be the total number of + // bytes sent over the wire, including the status line (HTTP/1.1), framing (HTTP/2 + // and HTTP/3), headers, and response body and trailers if any. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1437 + AttributeHTTPResponseSize = "http.response.size" + // HTTP response status code. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 200 + AttributeHTTPResponseStatusCode = "http.response.status_code" + // The matched route, that is, the path template in the format used by the + // respective server framework. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: MUST NOT be populated when this is not supported by the HTTP server + // framework as the route attribute should have low-cardinality and the URI path + // can NOT substitute it. + // SHOULD include the application root if there is one. + AttributeHTTPRoute = "http.route" +) + +const ( + // active state + AttributeHTTPConnectionStateActive = "active" + // idle state + AttributeHTTPConnectionStateIdle = "idle" +) + +const ( + // CONNECT method + AttributeHTTPRequestMethodConnect = "CONNECT" + // DELETE method + AttributeHTTPRequestMethodDelete = "DELETE" + // GET method + AttributeHTTPRequestMethodGet = "GET" + // HEAD method + AttributeHTTPRequestMethodHead = "HEAD" + // OPTIONS method + AttributeHTTPRequestMethodOptions = "OPTIONS" + // PATCH method + AttributeHTTPRequestMethodPatch = "PATCH" + // POST method + AttributeHTTPRequestMethodPost = "POST" + // PUT method + AttributeHTTPRequestMethodPut = "PUT" + // TRACE method + AttributeHTTPRequestMethodTrace = "TRACE" + // Any HTTP method that the instrumentation has no prior knowledge of + AttributeHTTPRequestMethodOther = "_OTHER" +) + +// Kubernetes resource attributes. +const ( + // The name of the cluster. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry-cluster' + AttributeK8SClusterName = "k8s.cluster.name" + // A pseudo-ID for the cluster, set to the UID of the kube-system namespace. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' + // Note: K8S doesn't have support for obtaining a cluster ID. If this is ever + // added, we will recommend collecting the k8s.cluster.uid through the + // official APIs. In the meantime, we are able to use the uid of the + // kube-system namespace as a proxy for cluster ID. Read on for the + // rationale.Every object created in a K8S cluster is assigned a distinct UID. The + // kube-system namespace is used by Kubernetes itself and will exist + // for the lifetime of the cluster. Using the uid of the kube-system + // namespace is a reasonable proxy for the K8S ClusterID as it will only + // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are + // UUIDs as standardized by + // ISO/IEC 9834-8 and ITU-T X.667. + // Which states:
    + // If generated according to one of the mechanisms defined in Rec.
    + // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be + // different from all other UUIDs generated before 3603 A.D., or is + // extremely likely to be different (depending on the mechanism + // chosen).Therefore, UIDs between clusters should be extremely unlikely to + // conflict. + AttributeK8SClusterUID = "k8s.cluster.uid" + // The name of the Container from Pod specification, must be unique within a Pod. + // Container runtime usually uses different globally unique name (container.name). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'redis' + AttributeK8SContainerName = "k8s.container.name" + // Number of times the container was restarted. This attribute can be used to + // identify a particular container (running or stopped) within a container spec. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 0, 2 + AttributeK8SContainerRestartCount = "k8s.container.restart_count" + // The name of the CronJob. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SCronJobName = "k8s.cronjob.name" + // The UID of the CronJob. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SCronJobUID = "k8s.cronjob.uid" + // The name of the DaemonSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SDaemonSetName = "k8s.daemonset.name" + // The UID of the DaemonSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SDaemonSetUID = "k8s.daemonset.uid" + // The name of the Deployment. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SDeploymentName = "k8s.deployment.name" + // The UID of the Deployment. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SDeploymentUID = "k8s.deployment.uid" + // The name of the Job. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SJobName = "k8s.job.name" + // The UID of the Job. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SJobUID = "k8s.job.uid" + // The name of the namespace that the pod is running in. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'default' + AttributeK8SNamespaceName = "k8s.namespace.name" + // The name of the Node. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'node-1' + AttributeK8SNodeName = "k8s.node.name" + // The UID of the Node. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + AttributeK8SNodeUID = "k8s.node.uid" + // The name of the Pod. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry-pod-autoconf' + AttributeK8SPodName = "k8s.pod.name" + // The UID of the Pod. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SPodUID = "k8s.pod.uid" + // The name of the ReplicaSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SReplicaSetName = "k8s.replicaset.name" + // The UID of the ReplicaSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SReplicaSetUID = "k8s.replicaset.uid" + // The name of the StatefulSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SStatefulSetName = "k8s.statefulset.name" + // The UID of the StatefulSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SStatefulSetUID = "k8s.statefulset.uid" +) + +// Attributes describing telemetry around messaging systems and messaging +// activities. +const ( + // The number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set messaging.batch.message_count on spans + // that operate with a single message. When a messaging client library supports + // both batch and single-message API for the same operation, instrumentations + // SHOULD use messaging.batch.message_count for batching APIs and SHOULD NOT use + // it for single-message APIs. + AttributeMessagingBatchMessageCount = "messaging.batch.message_count" + // A unique identifier for the client that consumes or produces a message. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'client-5', 'myhost@8742@s8083jm' + AttributeMessagingClientID = "messaging.client_id" + // A boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingDestinationAnonymous = "messaging.destination.anonymous" + // The message destination name + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MyQueue', 'MyTopic' + // Note: Destination name SHOULD uniquely identify a specific queue, topic or + // other entity within the broker. If + // the broker doesn't have such notion, the destination name SHOULD uniquely + // identify the broker. + AttributeMessagingDestinationName = "messaging.destination.name" + // The identifier of the partition messages are sent to or received from, unique + // within the messaging.destination.name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1' + AttributeMessagingDestinationPartitionID = "messaging.destination.partition.id" + // Low cardinality representation of the messaging destination name + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/customers/{customerID}' + // Note: Destination names could be constructed from templates. An example would + // be a destination name involving a user name or product id. Although the + // destination name in this case is of high cardinality, the underlying template + // is of low cardinality and can be effectively used for grouping and aggregation. + AttributeMessagingDestinationTemplate = "messaging.destination.template" + // A boolean that is true if the message destination is temporary and might not + // exist anymore after messages are processed. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingDestinationTemporary = "messaging.destination.temporary" + // A boolean that is true if the publish message destination is anonymous (could + // be unnamed or have auto-generated name). + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingDestinationPublishAnonymous = "messaging.destination_publish.anonymous" + // The name of the original destination the message was published to + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MyQueue', 'MyTopic' + // Note: The name SHOULD uniquely identify a specific queue, topic, or other + // entity within the broker. If + // the broker doesn't have such notion, the original destination name SHOULD + // uniquely identify the broker. + AttributeMessagingDestinationPublishName = "messaging.destination_publish.name" + // The name of the consumer group the event consumer is associated with. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'indexer' + AttributeMessagingEventhubsConsumerGroup = "messaging.eventhubs.consumer.group" + // The UTC epoch seconds at which the message has been accepted and stored in the + // entity. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1701393730 + AttributeMessagingEventhubsMessageEnqueuedTime = "messaging.eventhubs.message.enqueued_time" + // The ordering key for a given message. If the attribute is not present, the + // message does not have an ordering key. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'ordering_key' + AttributeMessagingGCPPubsubMessageOrderingKey = "messaging.gcp_pubsub.message.ordering_key" + // Name of the Kafka Consumer Group that is handling the message. Only applies to + // consumers, not producers. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'my-group' + AttributeMessagingKafkaConsumerGroup = "messaging.kafka.consumer.group" + // Message keys in Kafka are used for grouping alike messages to ensure they're + // processed on the same partition. They differ from messaging.message.id in that + // they're not unique. If the key is null, the attribute MUST NOT be set. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to be + // supplied for the attribute. If the key has no unambiguous, canonical string + // form, don't include its value. + AttributeMessagingKafkaMessageKey = "messaging.kafka.message.key" + // The offset of a record in the corresponding Kafka partition. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 42 + AttributeMessagingKafkaMessageOffset = "messaging.kafka.message.offset" + // A boolean that is true if the message is a tombstone. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingKafkaMessageTombstone = "messaging.kafka.message.tombstone" + // The size of the message body in bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1439 + // Note: This can refer to both the compressed or uncompressed body size. If both + // sizes are known, the uncompressed + // body size should be used. + AttributeMessagingMessageBodySize = "messaging.message.body.size" + // The conversation ID identifying the conversation to which the message belongs, + // represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MyConversationID' + AttributeMessagingMessageConversationID = "messaging.message.conversation_id" + // The size of the message body and metadata in bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 2738 + // Note: This can refer to both the compressed or uncompressed size. If both sizes + // are known, the uncompressed + // size should be used. + AttributeMessagingMessageEnvelopeSize = "messaging.message.envelope.size" + // A value used by the messaging system as an identifier for the message, + // represented as a string. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + AttributeMessagingMessageID = "messaging.message.id" + // A string identifying the kind of messaging operation. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: If a custom value is used, it MUST be of low cardinality. + AttributeMessagingOperation = "messaging.operation" + // RabbitMQ message routing key. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myKey' + AttributeMessagingRabbitmqDestinationRoutingKey = "messaging.rabbitmq.destination.routing_key" + // RabbitMQ message delivery tag + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 123 + AttributeMessagingRabbitmqMessageDeliveryTag = "messaging.rabbitmq.message.delivery_tag" + // Name of the RocketMQ producer/consumer group that is handling the message. The + // client type is identified by the SpanKind. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myConsumerGroup' + AttributeMessagingRocketmqClientGroup = "messaging.rocketmq.client_group" + // Model of message consumption. This only applies to consumer spans. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingRocketmqConsumptionModel = "messaging.rocketmq.consumption_model" + // The delay time level for delay message, which determines the message delay + // time. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 3 + AttributeMessagingRocketmqMessageDelayTimeLevel = "messaging.rocketmq.message.delay_time_level" + // The timestamp in milliseconds that the delay message is expected to be + // delivered to consumer. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1665987217045 + AttributeMessagingRocketmqMessageDeliveryTimestamp = "messaging.rocketmq.message.delivery_timestamp" + // It is essential for FIFO message. Messages that belong to the same message + // group are always processed one by one within the same consumer group. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myMessageGroup' + AttributeMessagingRocketmqMessageGroup = "messaging.rocketmq.message.group" + // Key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'keyA', 'keyB' + AttributeMessagingRocketmqMessageKeys = "messaging.rocketmq.message.keys" + // The secondary classifier of message besides topic. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'tagA' + AttributeMessagingRocketmqMessageTag = "messaging.rocketmq.message.tag" + // Type of message. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingRocketmqMessageType = "messaging.rocketmq.message.type" + // Namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myNamespace' + AttributeMessagingRocketmqNamespace = "messaging.rocketmq.namespace" + // The name of the subscription in the topic messages are received from. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'mySubscription' + AttributeMessagingServicebusDestinationSubscriptionName = "messaging.servicebus.destination.subscription_name" + // Describes the settlement type. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingServicebusDispositionStatus = "messaging.servicebus.disposition_status" + // Number of deliveries that have been attempted for this message. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 2 + AttributeMessagingServicebusMessageDeliveryCount = "messaging.servicebus.message.delivery_count" + // The UTC epoch seconds at which the message has been accepted and stored in the + // entity. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1701393730 + AttributeMessagingServicebusMessageEnqueuedTime = "messaging.servicebus.message.enqueued_time" + // An identifier for the messaging system being used. See below for a list of + // well-known identifiers. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingSystem = "messaging.system" +) + +const ( + // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created + AttributeMessagingOperationPublish = "publish" + // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios + AttributeMessagingOperationCreate = "create" + // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages + AttributeMessagingOperationReceive = "receive" + // One or more messages are delivered to or processed by a consumer + AttributeMessagingOperationDeliver = "process" + // One or more messages are settled + AttributeMessagingOperationSettle = "settle" +) + +const ( + // Clustering consumption model + AttributeMessagingRocketmqConsumptionModelClustering = "clustering" + // Broadcasting consumption model + AttributeMessagingRocketmqConsumptionModelBroadcasting = "broadcasting" +) + +const ( + // Normal message + AttributeMessagingRocketmqMessageTypeNormal = "normal" + // FIFO message + AttributeMessagingRocketmqMessageTypeFifo = "fifo" + // Delay message + AttributeMessagingRocketmqMessageTypeDelay = "delay" + // Transaction message + AttributeMessagingRocketmqMessageTypeTransaction = "transaction" +) + +const ( + // Message is completed + AttributeMessagingServicebusDispositionStatusComplete = "complete" + // Message is abandoned + AttributeMessagingServicebusDispositionStatusAbandon = "abandon" + // Message is sent to dead letter queue + AttributeMessagingServicebusDispositionStatusDeadLetter = "dead_letter" + // Message is deferred + AttributeMessagingServicebusDispositionStatusDefer = "defer" +) + +const ( + // Apache ActiveMQ + AttributeMessagingSystemActivemq = "activemq" + // Amazon Simple Queue Service (SQS) + AttributeMessagingSystemAWSSqs = "aws_sqs" + // Azure Event Grid + AttributeMessagingSystemEventgrid = "eventgrid" + // Azure Event Hubs + AttributeMessagingSystemEventhubs = "eventhubs" + // Azure Service Bus + AttributeMessagingSystemServicebus = "servicebus" + // Google Cloud Pub/Sub + AttributeMessagingSystemGCPPubsub = "gcp_pubsub" + // Java Message Service + AttributeMessagingSystemJms = "jms" + // Apache Kafka + AttributeMessagingSystemKafka = "kafka" + // RabbitMQ + AttributeMessagingSystemRabbitmq = "rabbitmq" + // Apache RocketMQ + AttributeMessagingSystemRocketmq = "rocketmq" +) + +// These attributes may be used for any network related operation. +const ( + // The ISO 3166-1 alpha-2 2-character country code associated with the mobile + // carrier network. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'DE' + AttributeNetworkCarrierIcc = "network.carrier.icc" + // The mobile carrier country code. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '310' + AttributeNetworkCarrierMcc = "network.carrier.mcc" + // The mobile carrier network code. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '001' + AttributeNetworkCarrierMnc = "network.carrier.mnc" + // The name of the mobile carrier. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'sprint' + AttributeNetworkCarrierName = "network.carrier.name" + // This describes more details regarding the connection.type. It may be the type + // of cell technology connection, but it could be used for describing details + // about a wifi connection. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'LTE' + AttributeNetworkConnectionSubtype = "network.connection.subtype" + // The internet connection type. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'wifi' + AttributeNetworkConnectionType = "network.connection.type" + // The network IO operation direction. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'transmit' + AttributeNetworkIoDirection = "network.io.direction" + // Local address of the network connection - IP address or Unix domain socket + // name. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + AttributeNetworkLocalAddress = "network.local.address" + // Local port number of the network connection. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 65123 + AttributeNetworkLocalPort = "network.local.port" + // Peer address of the network connection - IP address or Unix domain socket name. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + AttributeNetworkPeerAddress = "network.peer.address" + // Peer port number of the network connection. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 65123 + AttributeNetworkPeerPort = "network.peer.port" + // OSI application layer or non-OSI equivalent. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + // Note: The value SHOULD be normalized to lowercase. + AttributeNetworkProtocolName = "network.protocol.name" + // The actual version of the protocol used for network communication. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '1.1', '2' + // Note: If protocol version is subject to negotiation (for example using ALPN), + // this attribute SHOULD be set to the negotiated version. If the actual protocol + // version is not known, this attribute SHOULD NOT be set. + AttributeNetworkProtocolVersion = "network.protocol.version" + // OSI transport layer or inter-process communication method. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'tcp', 'udp' + // Note: The value SHOULD be normalized to lowercase.Consider always setting the + // transport when setting a port number, since + // a port number is ambiguous without knowing the transport. For example + // different processes could be listening on TCP port 12345 and UDP port 12345. + AttributeNetworkTransport = "network.transport" + // OSI network layer or non-OSI equivalent. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'ipv4', 'ipv6' + // Note: The value SHOULD be normalized to lowercase. + AttributeNetworkType = "network.type" +) + +const ( + // GPRS + AttributeNetworkConnectionSubtypeGprs = "gprs" + // EDGE + AttributeNetworkConnectionSubtypeEdge = "edge" + // UMTS + AttributeNetworkConnectionSubtypeUmts = "umts" + // CDMA + AttributeNetworkConnectionSubtypeCdma = "cdma" + // EVDO Rel. 0 + AttributeNetworkConnectionSubtypeEvdo0 = "evdo_0" + // EVDO Rev. A + AttributeNetworkConnectionSubtypeEvdoA = "evdo_a" + // CDMA2000 1XRTT + AttributeNetworkConnectionSubtypeCdma20001xrtt = "cdma2000_1xrtt" + // HSDPA + AttributeNetworkConnectionSubtypeHsdpa = "hsdpa" + // HSUPA + AttributeNetworkConnectionSubtypeHsupa = "hsupa" + // HSPA + AttributeNetworkConnectionSubtypeHspa = "hspa" + // IDEN + AttributeNetworkConnectionSubtypeIden = "iden" + // EVDO Rev. B + AttributeNetworkConnectionSubtypeEvdoB = "evdo_b" + // LTE + AttributeNetworkConnectionSubtypeLte = "lte" + // EHRPD + AttributeNetworkConnectionSubtypeEhrpd = "ehrpd" + // HSPAP + AttributeNetworkConnectionSubtypeHspap = "hspap" + // GSM + AttributeNetworkConnectionSubtypeGsm = "gsm" + // TD-SCDMA + AttributeNetworkConnectionSubtypeTdScdma = "td_scdma" + // IWLAN + AttributeNetworkConnectionSubtypeIwlan = "iwlan" + // 5G NR (New Radio) + AttributeNetworkConnectionSubtypeNr = "nr" + // 5G NRNSA (New Radio Non-Standalone) + AttributeNetworkConnectionSubtypeNrnsa = "nrnsa" + // LTE CA + AttributeNetworkConnectionSubtypeLteCa = "lte_ca" +) + +const ( + // wifi + AttributeNetworkConnectionTypeWifi = "wifi" + // wired + AttributeNetworkConnectionTypeWired = "wired" + // cell + AttributeNetworkConnectionTypeCell = "cell" + // unavailable + AttributeNetworkConnectionTypeUnavailable = "unavailable" + // unknown + AttributeNetworkConnectionTypeUnknown = "unknown" +) + +const ( + // transmit + AttributeNetworkIoDirectionTransmit = "transmit" + // receive + AttributeNetworkIoDirectionReceive = "receive" +) + +const ( + // TCP + AttributeNetworkTransportTCP = "tcp" + // UDP + AttributeNetworkTransportUDP = "udp" + // Named or anonymous pipe + AttributeNetworkTransportPipe = "pipe" + // Unix domain socket + AttributeNetworkTransportUnix = "unix" +) + +const ( + // IPv4 + AttributeNetworkTypeIpv4 = "ipv4" + // IPv6 + AttributeNetworkTypeIpv6 = "ipv6" +) + +// An OCI image manifest. +const ( + // The digest of the OCI image manifest. For container images specifically is the + // digest by which the container image is known. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: + // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4' + // Note: Follows OCI Image Manifest Specification, and specifically the Digest + // property. + // An example can be found in Example Image Manifest. + AttributeOciManifestDigest = "oci.manifest.digest" +) + +// The operating system (OS) on which the process represented by this resource +// is running. +const ( + // Unique identifier for a particular build or compilation of the operating + // system. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'TQ3C.230805.001.B2', '20E247', '22621' + AttributeOSBuildID = "os.build_id" + // Human readable (not intended to be parsed) OS version information, like e.g. + // reported by ver or lsb_release -a commands. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS' + AttributeOSDescription = "os.description" + // Human readable operating system name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'iOS', 'Android', 'Ubuntu' + AttributeOSName = "os.name" + // The operating system type. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeOSType = "os.type" + // The version string of the operating system as defined in Version Attributes. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '14.2.1', '18.04.1' + AttributeOSVersion = "os.version" +) + +const ( + // Microsoft Windows + AttributeOSTypeWindows = "windows" + // Linux + AttributeOSTypeLinux = "linux" + // Apple Darwin + AttributeOSTypeDarwin = "darwin" + // FreeBSD + AttributeOSTypeFreeBSD = "freebsd" + // NetBSD + AttributeOSTypeNetBSD = "netbsd" + // OpenBSD + AttributeOSTypeOpenBSD = "openbsd" + // DragonFly BSD + AttributeOSTypeDragonflyBSD = "dragonflybsd" + // HP-UX (Hewlett Packard Unix) + AttributeOSTypeHPUX = "hpux" + // AIX (Advanced Interactive eXecutive) + AttributeOSTypeAIX = "aix" + // SunOS, Oracle Solaris + AttributeOSTypeSolaris = "solaris" + // IBM z/OS + AttributeOSTypeZOS = "z_os" +) + +// An operating system process. +const ( + // The command used to launch the process (i.e. the command name). On Linux based + // systems, can be set to the zeroth string in proc/[pid]/cmdline. On Windows, can + // be set to the first parameter extracted from GetCommandLineW. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'cmd/otelcol' + AttributeProcessCommand = "process.command" + // All the command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited strings + // extracted from proc/[pid]/cmdline. For libc-based executables, this would be + // the full argv vector passed to main. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'cmd/otecol', '--config=config.yaml' + AttributeProcessCommandArgs = "process.command_args" + // The full command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of GetCommandLineW. Do not + // set this if you have to assemble it just for monitoring; use + // process.command_args instead. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + AttributeProcessCommandLine = "process.command_line" + // The name of the process executable. On Linux based systems, can be set to the + // Name in proc/[pid]/status. On Windows, can be set to the base name of + // GetProcessImageFileNameW. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'otelcol' + AttributeProcessExecutableName = "process.executable.name" + // The full path to the process executable. On Linux based systems, can be set to + // the target of proc/[pid]/exe. On Windows, can be set to the result of + // GetProcessImageFileNameW. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/usr/bin/cmd/otelcol' + AttributeProcessExecutablePath = "process.executable.path" + // The username of the user that owns the process. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'root' + AttributeProcessOwner = "process.owner" + // Parent Process identifier (PPID). + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 111 + AttributeProcessParentPID = "process.parent_pid" + // Process identifier (PID). + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1234 + AttributeProcessPID = "process.pid" + // An additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + AttributeProcessRuntimeDescription = "process.runtime.description" + // The name of the runtime of this process. For compiled native binaries, this + // SHOULD be the name of the compiler. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'OpenJDK Runtime Environment' + AttributeProcessRuntimeName = "process.runtime.name" + // The version of the runtime of this process, as returned by the runtime without + // modification. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '14.0.2' + AttributeProcessRuntimeVersion = "process.runtime.version" +) + +// Attributes for remote procedure calls. +const ( + // The error codes of the Connect request. Error codes are always string values. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeRPCConnectRPCErrorCode = "rpc.connect_rpc.error_code" + // The numeric status code of the gRPC request. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeRPCGRPCStatusCode = "rpc.grpc.status_code" + // error.code property of response if it is an error response. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: -32700, 100 + AttributeRPCJsonrpcErrorCode = "rpc.jsonrpc.error_code" + // error.message property of response if it is an error response. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Parse error', 'User already exists' + AttributeRPCJsonrpcErrorMessage = "rpc.jsonrpc.error_message" + // id property of request or response. Since protocol allows id to be int, string, + // null or missing (for notifications), value is expected to be cast to string for + // simplicity. Use empty string in case of null value. Omit entirely if this is a + // notification. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '10', 'request-7', '' + AttributeRPCJsonrpcRequestID = "rpc.jsonrpc.request_id" + // Protocol version as in jsonrpc property of request/response. Since JSON-RPC 1.0 + // doesn't specify this, the value can be omitted. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2.0', '1.0' + AttributeRPCJsonrpcVersion = "rpc.jsonrpc.version" + // The name of the (logical) method being called, must be equal to the $method + // part in the span name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The code.function attribute may be used to store the latter + // (e.g., method actually executing the call on the server side, RPC client stub + // method on the client side). + AttributeRPCMethod = "rpc.method" + // The full (logical) name of the service being called, including its package + // name, if applicable. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing class. + // The code.namespace attribute may be used to store the latter (despite the + // attribute name, it may include a class name; e.g., class with method actually + // executing the call on the server side, RPC client stub class on the client + // side). + AttributeRPCService = "rpc.service" + // A string identifying the remoting system. See below for a list of well-known + // identifiers. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeRPCSystem = "rpc.system" +) + +const ( + // cancelled + AttributeRPCConnectRPCErrorCodeCancelled = "cancelled" + // unknown + AttributeRPCConnectRPCErrorCodeUnknown = "unknown" + // invalid_argument + AttributeRPCConnectRPCErrorCodeInvalidArgument = "invalid_argument" + // deadline_exceeded + AttributeRPCConnectRPCErrorCodeDeadlineExceeded = "deadline_exceeded" + // not_found + AttributeRPCConnectRPCErrorCodeNotFound = "not_found" + // already_exists + AttributeRPCConnectRPCErrorCodeAlreadyExists = "already_exists" + // permission_denied + AttributeRPCConnectRPCErrorCodePermissionDenied = "permission_denied" + // resource_exhausted + AttributeRPCConnectRPCErrorCodeResourceExhausted = "resource_exhausted" + // failed_precondition + AttributeRPCConnectRPCErrorCodeFailedPrecondition = "failed_precondition" + // aborted + AttributeRPCConnectRPCErrorCodeAborted = "aborted" + // out_of_range + AttributeRPCConnectRPCErrorCodeOutOfRange = "out_of_range" + // unimplemented + AttributeRPCConnectRPCErrorCodeUnimplemented = "unimplemented" + // internal + AttributeRPCConnectRPCErrorCodeInternal = "internal" + // unavailable + AttributeRPCConnectRPCErrorCodeUnavailable = "unavailable" + // data_loss + AttributeRPCConnectRPCErrorCodeDataLoss = "data_loss" + // unauthenticated + AttributeRPCConnectRPCErrorCodeUnauthenticated = "unauthenticated" +) + +const ( + // OK + AttributeRPCGRPCStatusCodeOk = "0" + // CANCELLED + AttributeRPCGRPCStatusCodeCancelled = "1" + // UNKNOWN + AttributeRPCGRPCStatusCodeUnknown = "2" + // INVALID_ARGUMENT + AttributeRPCGRPCStatusCodeInvalidArgument = "3" + // DEADLINE_EXCEEDED + AttributeRPCGRPCStatusCodeDeadlineExceeded = "4" + // NOT_FOUND + AttributeRPCGRPCStatusCodeNotFound = "5" + // ALREADY_EXISTS + AttributeRPCGRPCStatusCodeAlreadyExists = "6" + // PERMISSION_DENIED + AttributeRPCGRPCStatusCodePermissionDenied = "7" + // RESOURCE_EXHAUSTED + AttributeRPCGRPCStatusCodeResourceExhausted = "8" + // FAILED_PRECONDITION + AttributeRPCGRPCStatusCodeFailedPrecondition = "9" + // ABORTED + AttributeRPCGRPCStatusCodeAborted = "10" + // OUT_OF_RANGE + AttributeRPCGRPCStatusCodeOutOfRange = "11" + // UNIMPLEMENTED + AttributeRPCGRPCStatusCodeUnimplemented = "12" + // INTERNAL + AttributeRPCGRPCStatusCodeInternal = "13" + // UNAVAILABLE + AttributeRPCGRPCStatusCodeUnavailable = "14" + // DATA_LOSS + AttributeRPCGRPCStatusCodeDataLoss = "15" + // UNAUTHENTICATED + AttributeRPCGRPCStatusCodeUnauthenticated = "16" +) + +const ( + // gRPC + AttributeRPCSystemGRPC = "grpc" + // Java RMI + AttributeRPCSystemJavaRmi = "java_rmi" + // .NET WCF + AttributeRPCSystemDotnetWcf = "dotnet_wcf" + // Apache Dubbo + AttributeRPCSystemApacheDubbo = "apache_dubbo" + // Connect RPC + AttributeRPCSystemConnectRPC = "connect_rpc" +) + +// These attributes may be used to describe the server in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // Server domain name if available without reverse DNS lookup; otherwise, IP + // address or Unix domain socket name. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the client side, and when communicating through an + // intermediary, server.address SHOULD represent the server address behind any + // intermediaries, for example proxies, if it's available. + AttributeServerAddress = "server.address" + // Server port number. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 80, 8080, 443 + // Note: When observed from the client side, and when communicating through an + // intermediary, server.port SHOULD represent the server port behind any + // intermediaries, for example proxies, if it's available. + AttributeServerPort = "server.port" +) + +// A service instance. +const ( + // The string ID of the service instance. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // service.namespace,service.name pair (in other words + // service.namespace,service.name,service.instance.id triplet MUST be globally + // unique). The ID helps to + // distinguish instances of the same service that exist at the same time (e.g. + // instances of a horizontally scaled + // service).Implementations, such as SDKs, are recommended to generate a random + // Version 1 or Version 4 RFC + // 4122 UUID, but are free to use an inherent unique ID as the source of + // this value if stability is desirable. In that case, the ID SHOULD be used as + // source of a UUID Version 5 and + // SHOULD use the following UUID as the namespace: 4d63009a-8d0f-11ee- + // aad7-4c796ed8e320.UUIDs are typically recommended, as only an opaque value for + // the purposes of identifying a service instance is + // needed. Similar to what can be seen in the man page for the + // /etc/machine-id file, the underlying + // data, such as pod name and namespace should be treated as confidential, being + // the user's choice to expose it + // or not via another resource attribute.For applications running behind an + // application server (like unicorn), we do not recommend using one identifier + // for all processes participating in the application. Instead, it's recommended + // each division (e.g. a worker + // thread in unicorn) to have its own instance.id.It's not recommended for a + // Collector to set service.instance.id if it can't unambiguously determine the + // service instance that is generating that telemetry. For instance, creating an + // UUID based on pod.name will + // likely be wrong, as the Collector might not know from which container within + // that pod the telemetry originated. + // However, Collectors can set the service.instance.id if they can unambiguously + // determine the service instance + // for that telemetry. This is typically the case for scraping receivers, as they + // know the target address and + // port. + AttributeServiceInstanceID = "service.instance.id" + // Logical name of the service. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled services. If + // the value was not specified, SDKs MUST fallback to unknown_service: + // concatenated with process.executable.name, e.g. unknown_service:bash. If + // process.executable.name is not available, the value MUST be set to + // unknown_service. + AttributeServiceName = "service.name" + // A namespace for service.name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group of + // services, for example the team name that owns a group of services. service.name + // is expected to be unique within the same namespace. If service.namespace is not + // specified in the Resource then service.name is expected to be unique for all + // services that have no explicit namespace defined (so the empty/unspecified + // namespace is simply one more valid namespace). Zero-length namespace string is + // assumed equal to unspecified namespace. + AttributeServiceNamespace = "service.namespace" + // The version string of the service API or implementation. The format is not + // defined by these conventions. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '2.0.0', 'a01dbef8a' + AttributeServiceVersion = "service.version" +) + +// Session is defined as the period of time encompassing all activities +// performed by the application and the actions executed by the end user. +// Consequently, a Session is represented as a collection of Logs, Events, and +// Spans emitted by the Client Application throughout the Session's duration. +// Each Session is assigned a unique identifier, which is included as an +// attribute in the Logs, Events, and Spans generated during the Session's +// lifecycle. +// When a session reaches end of life, typically due to user inactivity or +// session timeout, a new session identifier will be assigned. The previous +// session identifier may be provided by the instrumentation so that telemetry +// backends can link the two sessions. +const ( + // A unique id to identify a session. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + AttributeSessionID = "session.id" + // The previous session.id for this user, when known. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + AttributeSessionPreviousID = "session.previous_id" +) + +// These attributes may be used to describe the sender of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // Source address - domain name if available without reverse DNS lookup; + // otherwise, IP address or Unix domain socket name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the destination side, and when communicating through + // an intermediary, source.address SHOULD represent the source address behind any + // intermediaries, for example proxies, if it's available. + AttributeSourceAddress = "source.address" + // Source port number + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 3389, 2888 + AttributeSourcePort = "source.port" +) + +// Attributes for telemetry SDK. +const ( + // The language of the telemetry SDK. + // + // Type: Enum + // Requirement Level: Required + // Stability: stable + AttributeTelemetrySDKLanguage = "telemetry.sdk.language" + // The name of the telemetry SDK as defined above. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: 'opentelemetry' + // Note: The OpenTelemetry SDK MUST set the telemetry.sdk.name attribute to + // opentelemetry. + // If another SDK, like a fork or a vendor-provided implementation, is used, this + // SDK MUST set the + // telemetry.sdk.name attribute to the fully-qualified class or module name of + // this SDK's main entry point + // or another suitable identifier depending on the language. + // The identifier opentelemetry is reserved and MUST NOT be used in this case. + // All custom identifiers SHOULD be stable across different versions of an + // implementation. + AttributeTelemetrySDKName = "telemetry.sdk.name" + // The version string of the telemetry SDK. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: '1.2.3' + AttributeTelemetrySDKVersion = "telemetry.sdk.version" + // The name of the auto instrumentation agent or distribution, if used. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'parts-unlimited-java' + // Note: Official auto instrumentation agents and distributions SHOULD set the + // telemetry.distro.name attribute to + // a string starting with opentelemetry-, e.g. opentelemetry-java-instrumentation. + AttributeTelemetryDistroName = "telemetry.distro.name" + // The version string of the auto instrumentation agent or distribution, if used. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1.2.3' + AttributeTelemetryDistroVersion = "telemetry.distro.version" +) + +const ( + // cpp + AttributeTelemetrySDKLanguageCPP = "cpp" + // dotnet + AttributeTelemetrySDKLanguageDotnet = "dotnet" + // erlang + AttributeTelemetrySDKLanguageErlang = "erlang" + // go + AttributeTelemetrySDKLanguageGo = "go" + // java + AttributeTelemetrySDKLanguageJava = "java" + // nodejs + AttributeTelemetrySDKLanguageNodejs = "nodejs" + // php + AttributeTelemetrySDKLanguagePHP = "php" + // python + AttributeTelemetrySDKLanguagePython = "python" + // ruby + AttributeTelemetrySDKLanguageRuby = "ruby" + // rust + AttributeTelemetrySDKLanguageRust = "rust" + // swift + AttributeTelemetrySDKLanguageSwift = "swift" + // webjs + AttributeTelemetrySDKLanguageWebjs = "webjs" +) + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // Current "managed" thread ID (as opposed to OS thread ID). + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 42 + AttributeThreadID = "thread.id" + // Current thread name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'main' + AttributeThreadName = "thread.name" +) + +// Semantic convention attributes in the TLS namespace. +const ( + // String indicating the cipher used during the current connection. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', + // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' + // Note: The values allowed for tls.cipher MUST be one of the Descriptions of the + // registered TLS Cipher Suits. + AttributeTLSCipher = "tls.cipher" + // PEM-encoded stand-alone certificate offered by the client. This is usually + // mutually-exclusive of client.certificate_chain since this value also exists in + // that list. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MII...' + AttributeTLSClientCertificate = "tls.client.certificate" + // Array of PEM-encoded certificates that make up the certificate chain offered by + // the client. This is usually mutually-exclusive of client.certificate since that + // value should be the first certificate in the chain. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + AttributeTLSClientCertificateChain = "tls.client.certificate_chain" + // Certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash values, this + // value should be formatted as an uppercase hash. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + AttributeTLSClientHashMd5 = "tls.client.hash.md5" + // Certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash values, this + // value should be formatted as an uppercase hash. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + AttributeTLSClientHashSha1 = "tls.client.hash.sha1" + // Certificate fingerprint using the SHA256 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash values, this + // value should be formatted as an uppercase hash. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + AttributeTLSClientHashSha256 = "tls.client.hash.sha256" + // Distinguished name of subject of the issuer of the x.509 certificate presented + // by the client. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com' + AttributeTLSClientIssuer = "tls.client.issuer" + // A hash that identifies clients based on how they perform an SSL/TLS handshake. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + AttributeTLSClientJa3 = "tls.client.ja3" + // Date/Time indicating when client certificate is no longer considered valid. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + AttributeTLSClientNotAfter = "tls.client.not_after" + // Date/Time indicating when client certificate is first considered valid. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + AttributeTLSClientNotBefore = "tls.client.not_before" + // Also called an SNI, this tells the server which hostname to which the client is + // attempting to connect to. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry.io' + AttributeTLSClientServerName = "tls.client.server_name" + // Distinguished name of subject of the x.509 certificate presented by the client. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com' + AttributeTLSClientSubject = "tls.client.subject" + // Array of ciphers offered by the client during the client hello. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."' + AttributeTLSClientSupportedCiphers = "tls.client.supported_ciphers" + // String indicating the curve used for the given cipher, when applicable + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'secp256r1' + AttributeTLSCurve = "tls.curve" + // Boolean flag indicating if the TLS negotiation was successful and transitioned + // to an encrypted tunnel. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + // Examples: True + AttributeTLSEstablished = "tls.established" + // String indicating the protocol being tunneled. Per the values in the IANA + // registry, this string should be lower case. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'http/1.1' + AttributeTLSNextProtocol = "tls.next_protocol" + // Normalized lowercase protocol name parsed from original string of the + // negotiated SSL/TLS protocol version + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeTLSProtocolName = "tls.protocol.name" + // Numeric part of the version parsed from the original string of the negotiated + // SSL/TLS protocol version + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1.2', '3' + AttributeTLSProtocolVersion = "tls.protocol.version" + // Boolean flag indicating if this TLS connection was resumed from an existing TLS + // negotiation. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + // Examples: True + AttributeTLSResumed = "tls.resumed" + // PEM-encoded stand-alone certificate offered by the server. This is usually + // mutually-exclusive of server.certificate_chain since this value also exists in + // that list. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MII...' + AttributeTLSServerCertificate = "tls.server.certificate" + // Array of PEM-encoded certificates that make up the certificate chain offered by + // the server. This is usually mutually-exclusive of server.certificate since that + // value should be the first certificate in the chain. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + AttributeTLSServerCertificateChain = "tls.server.certificate_chain" + // Certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash values, this + // value should be formatted as an uppercase hash. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + AttributeTLSServerHashMd5 = "tls.server.hash.md5" + // Certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash values, this + // value should be formatted as an uppercase hash. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + AttributeTLSServerHashSha1 = "tls.server.hash.sha1" + // Certificate fingerprint using the SHA256 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash values, this + // value should be formatted as an uppercase hash. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + AttributeTLSServerHashSha256 = "tls.server.hash.sha256" + // Distinguished name of subject of the issuer of the x.509 certificate presented + // by the client. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com' + AttributeTLSServerIssuer = "tls.server.issuer" + // A hash that identifies servers based on how they perform an SSL/TLS handshake. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + AttributeTLSServerJa3s = "tls.server.ja3s" + // Date/Time indicating when server certificate is no longer considered valid. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + AttributeTLSServerNotAfter = "tls.server.not_after" + // Date/Time indicating when server certificate is first considered valid. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + AttributeTLSServerNotBefore = "tls.server.not_before" + // Distinguished name of subject of the x.509 certificate presented by the server. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com' + AttributeTLSServerSubject = "tls.server.subject" +) + +const ( + // ssl + AttributeTLSProtocolNameSsl = "ssl" + // tls + AttributeTLSProtocolNameTLS = "tls" +) + +// Attributes describing URL. +const ( + // Domain extracted from the url.full, such as "opentelemetry.io". + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'www.foo.bar', 'opentelemetry.io', '3.12.167.2', + // '[1080:0:0:0:8:800:200C:417A]' + // Note: In some cases a URL may refer to an IP and/or port directly, without a + // domain name. In this case, the IP address would go to the domain field. If the + // URL contains a literal IPv6 address enclosed by [ and ], the [ and ] characters + // should also be captured in the domain field. + AttributeURLDomain = "url.domain" + // The file extension extracted from the url.full, excluding the leading dot. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'png', 'gz' + // Note: The file extension is only set if it exists, as not every url has a file + // extension. When the file name has multiple extensions example.tar.gz, only the + // last one should be captured gz, not tar.gz. + AttributeURLExtension = "url.extension" + // The URI fragment component + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'SemConv' + AttributeURLFragment = "url.fragment" + // Absolute URL describing a network resource according to RFC3986 + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', '//localhost' + // Note: For network calls, URL usually has + // scheme://host[:port][path][?query][#fragment] format, where the fragment is not + // transmitted over HTTP, but if it is known, it SHOULD be included nevertheless. + // url.full MUST NOT contain credentials passed via URL in form of + // https://username:password@www.example.com/. In such case username and password + // SHOULD be redacted and attribute's value SHOULD be + // https://REDACTED:REDACTED@www.example.com/. + // url.full SHOULD capture the absolute URL when it is available (or can be + // reconstructed). Sensitive content provided in url.full SHOULD be scrubbed when + // instrumentations can identify it. + AttributeURLFull = "url.full" + // Unmodified original URL as seen in the event source. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', + // 'search?q=OpenTelemetry' + // Note: In network monitoring, the observed URL may be a full URL, whereas in + // access logs, the URL is often just represented as a path. This field is meant + // to represent the URL as it was observed, complete or not. + // url.original might contain credentials passed via URL in form of + // https://username:password@www.example.com/. In such case password and username + // SHOULD NOT be redacted and attribute's value SHOULD remain the same. + AttributeURLOriginal = "url.original" + // The URI path component + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '/search' + // Note: Sensitive content provided in url.path SHOULD be scrubbed when + // instrumentations can identify it. + AttributeURLPath = "url.path" + // Port extracted from the url.full + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 443 + AttributeURLPort = "url.port" + // The URI query component + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'q=OpenTelemetry' + // Note: Sensitive content provided in url.query SHOULD be scrubbed when + // instrumentations can identify it. + AttributeURLQuery = "url.query" + // The highest registered url domain, stripped of the subdomain. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'example.com', 'foo.co.uk' + // Note: This value can be determined precisely with the public suffix list. For + // example, the registered domain for foo.example.com is example.com. Trying to + // approximate this by simply taking the last two labels will not work well for + // TLDs such as co.uk. + AttributeURLRegisteredDomain = "url.registered_domain" + // The URI scheme component identifying the used protocol. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'https', 'ftp', 'telnet' + AttributeURLScheme = "url.scheme" + // The subdomain portion of a fully qualified domain name includes all of the + // names except the host name under the registered_domain. In a partially + // qualified domain, or if the qualification level of the full name cannot be + // determined, subdomain contains all of the names below the registered domain. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'east', 'sub2.sub1' + // Note: The subdomain portion of www.east.mydomain.co.uk is east. If the domain + // has multiple levels of subdomain, such as sub2.sub1.example.com, the subdomain + // field should contain sub2.sub1, with no trailing period. + AttributeURLSubdomain = "url.subdomain" + // The effective top level domain (eTLD), also known as the domain suffix, is the + // last part of the domain name. For example, the top level domain for example.com + // is com. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'com', 'co.uk' + // Note: This value can be determined precisely with the public suffix list. + AttributeURLTopLevelDomain = "url.top_level_domain" +) + +// Describes user-agent attributes. +const ( + // Name of the user-agent extracted from original. Usually refers to the browser's + // name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Safari', 'YourApp' + // Note: Example of extracting browser's name from original string. In the case of + // using a user-agent for non-browser products, such as microservices with + // multiple names/versions inside the user_agent.original, the most significant + // name SHOULD be selected. In such a scenario it should align with + // user_agent.version + AttributeUserAgentName = "user_agent.name" + // Value of the HTTP User-Agent header sent by the client. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU iPhone + // OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) + // Version/14.1.2 Mobile/15E148 Safari/604.1', 'YourApp/1.0.0 grpc-java- + // okhttp/1.27.2' + AttributeUserAgentOriginal = "user_agent.original" + // Version of the user-agent extracted from original. Usually refers to the + // browser's version + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '14.1.2', '1.0.0' + // Note: Example of extracting browser's version from original string. In the case + // of using a user-agent for non-browser products, such as microservices with + // multiple names/versions inside the user_agent.original, the most significant + // version SHOULD be selected. In such a scenario it should align with + // user_agent.name + AttributeUserAgentVersion = "user_agent.version" +) + +func GetAttribute_groupSemanticConventionAttributeNames() []string { + return []string{ + AttributeEventName, + AttributeLogRecordUID, + AttributeLogIostream, + AttributeLogFileName, + AttributeLogFileNameResolved, + AttributeLogFilePath, + AttributeLogFilePathResolved, + AttributePoolName, + AttributeState, + AttributeAspnetcoreRateLimitingResult, + AttributeAspnetcoreDiagnosticsHandlerType, + AttributeAspnetcoreRateLimitingPolicy, + AttributeAspnetcoreRequestIsUnhandled, + AttributeAspnetcoreRoutingIsFallback, + AttributeSignalrConnectionStatus, + AttributeSignalrTransport, + AttributeJvmBufferPoolName, + AttributeJvmMemoryPoolName, + AttributeJvmMemoryType, + AttributeProcessCPUState, + AttributeSystemDevice, + AttributeSystemCPULogicalNumber, + AttributeSystemCPUState, + AttributeSystemMemoryState, + AttributeSystemPagingDirection, + AttributeSystemPagingState, + AttributeSystemPagingType, + AttributeSystemFilesystemMode, + AttributeSystemFilesystemMountpoint, + AttributeSystemFilesystemState, + AttributeSystemFilesystemType, + AttributeSystemNetworkState, + AttributeSystemProcessStatus, + AttributeAndroidOSAPILevel, + AttributeAWSDynamoDBAttributeDefinitions, + AttributeAWSDynamoDBAttributesToGet, + AttributeAWSDynamoDBConsistentRead, + AttributeAWSDynamoDBConsumedCapacity, + AttributeAWSDynamoDBCount, + AttributeAWSDynamoDBExclusiveStartTable, + AttributeAWSDynamoDBGlobalSecondaryIndexUpdates, + AttributeAWSDynamoDBGlobalSecondaryIndexes, + AttributeAWSDynamoDBIndexName, + AttributeAWSDynamoDBItemCollectionMetrics, + AttributeAWSDynamoDBLimit, + AttributeAWSDynamoDBLocalSecondaryIndexes, + AttributeAWSDynamoDBProjection, + AttributeAWSDynamoDBProvisionedReadCapacity, + AttributeAWSDynamoDBProvisionedWriteCapacity, + AttributeAWSDynamoDBScanForward, + AttributeAWSDynamoDBScannedCount, + AttributeAWSDynamoDBSegment, + AttributeAWSDynamoDBSelect, + AttributeAWSDynamoDBTableCount, + AttributeAWSDynamoDBTableNames, + AttributeAWSDynamoDBTotalSegments, + AttributeBrowserBrands, + AttributeBrowserLanguage, + AttributeBrowserMobile, + AttributeBrowserPlatform, + AttributeClientAddress, + AttributeClientPort, + AttributeCloudAccountID, + AttributeCloudAvailabilityZone, + AttributeCloudPlatform, + AttributeCloudProvider, + AttributeCloudRegion, + AttributeCloudResourceID, + AttributeCloudeventsEventID, + AttributeCloudeventsEventSource, + AttributeCloudeventsEventSpecVersion, + AttributeCloudeventsEventSubject, + AttributeCloudeventsEventType, + AttributeCodeColumn, + AttributeCodeFilepath, + AttributeCodeFunction, + AttributeCodeLineNumber, + AttributeCodeNamespace, + AttributeCodeStacktrace, + AttributeContainerCommand, + AttributeContainerCommandArgs, + AttributeContainerCommandLine, + AttributeContainerCPUState, + AttributeContainerID, + AttributeContainerImageID, + AttributeContainerImageName, + AttributeContainerImageRepoDigests, + AttributeContainerImageTags, + AttributeContainerName, + AttributeContainerRuntime, + AttributeDBCassandraConsistencyLevel, + AttributeDBCassandraCoordinatorDC, + AttributeDBCassandraCoordinatorID, + AttributeDBCassandraIdempotence, + AttributeDBCassandraPageSize, + AttributeDBCassandraSpeculativeExecutionCount, + AttributeDBCassandraTable, + AttributeDBCosmosDBClientID, + AttributeDBCosmosDBConnectionMode, + AttributeDBCosmosDBContainer, + AttributeDBCosmosDBOperationType, + AttributeDBCosmosDBRequestCharge, + AttributeDBCosmosDBRequestContentLength, + AttributeDBCosmosDBStatusCode, + AttributeDBCosmosDBSubStatusCode, + AttributeDBElasticsearchClusterName, + AttributeDBInstanceID, + AttributeDBMongoDBCollection, + AttributeDBMSSQLInstanceName, + AttributeDBName, + AttributeDBOperation, + AttributeDBRedisDBIndex, + AttributeDBSQLTable, + AttributeDBStatement, + AttributeDBSystem, + AttributeDBUser, + AttributeDeploymentEnvironment, + AttributeDBConnectionString, + AttributeDBElasticsearchNodeName, + AttributeDBJDBCDriverClassname, + AttributeHTTPFlavor, + AttributeHTTPMethod, + AttributeHTTPRequestContentLength, + AttributeHTTPResponseContentLength, + AttributeHTTPScheme, + AttributeHTTPStatusCode, + AttributeHTTPTarget, + AttributeHTTPURL, + AttributeHTTPUserAgent, + AttributeMessagingKafkaDestinationPartition, + AttributeNetHostName, + AttributeNetHostPort, + AttributeNetPeerName, + AttributeNetPeerPort, + AttributeNetProtocolName, + AttributeNetProtocolVersion, + AttributeNetSockFamily, + AttributeNetSockHostAddr, + AttributeNetSockHostPort, + AttributeNetSockPeerAddr, + AttributeNetSockPeerName, + AttributeNetSockPeerPort, + AttributeNetTransport, + AttributeSystemProcessesStatus, + AttributeDestinationAddress, + AttributeDestinationPort, + AttributeDeviceID, + AttributeDeviceManufacturer, + AttributeDeviceModelIdentifier, + AttributeDeviceModelName, + AttributeDiskIoDirection, + AttributeDNSQuestionName, + AttributeEnduserID, + AttributeEnduserRole, + AttributeEnduserScope, + AttributeErrorType, + AttributeExceptionEscaped, + AttributeExceptionMessage, + AttributeExceptionStacktrace, + AttributeExceptionType, + AttributeFaaSColdstart, + AttributeFaaSCron, + AttributeFaaSDocumentCollection, + AttributeFaaSDocumentName, + AttributeFaaSDocumentOperation, + AttributeFaaSDocumentTime, + AttributeFaaSInstance, + AttributeFaaSInvocationID, + AttributeFaaSInvokedName, + AttributeFaaSInvokedProvider, + AttributeFaaSInvokedRegion, + AttributeFaaSMaxMemory, + AttributeFaaSName, + AttributeFaaSTime, + AttributeFaaSTrigger, + AttributeFaaSVersion, + AttributeFeatureFlagKey, + AttributeFeatureFlagProviderName, + AttributeFeatureFlagVariant, + AttributeFileDirectory, + AttributeFileExtension, + AttributeFileName, + AttributeFilePath, + AttributeFileSize, + AttributeGCPCloudRunJobExecution, + AttributeGCPCloudRunJobTaskIndex, + AttributeGCPGceInstanceHostname, + AttributeGCPGceInstanceName, + AttributeHostArch, + AttributeHostCPUCacheL2Size, + AttributeHostCPUFamily, + AttributeHostCPUModelID, + AttributeHostCPUModelName, + AttributeHostCPUStepping, + AttributeHostCPUVendorID, + AttributeHostID, + AttributeHostImageID, + AttributeHostImageName, + AttributeHostImageVersion, + AttributeHostIP, + AttributeHostMac, + AttributeHostName, + AttributeHostType, + AttributeHTTPConnectionState, + AttributeHTTPRequestBodySize, + AttributeHTTPRequestMethod, + AttributeHTTPRequestMethodOriginal, + AttributeHTTPRequestResendCount, + AttributeHTTPRequestSize, + AttributeHTTPResponseBodySize, + AttributeHTTPResponseSize, + AttributeHTTPResponseStatusCode, + AttributeHTTPRoute, + AttributeK8SClusterName, + AttributeK8SClusterUID, + AttributeK8SContainerName, + AttributeK8SContainerRestartCount, + AttributeK8SCronJobName, + AttributeK8SCronJobUID, + AttributeK8SDaemonSetName, + AttributeK8SDaemonSetUID, + AttributeK8SDeploymentName, + AttributeK8SDeploymentUID, + AttributeK8SJobName, + AttributeK8SJobUID, + AttributeK8SNamespaceName, + AttributeK8SNodeName, + AttributeK8SNodeUID, + AttributeK8SPodName, + AttributeK8SPodUID, + AttributeK8SReplicaSetName, + AttributeK8SReplicaSetUID, + AttributeK8SStatefulSetName, + AttributeK8SStatefulSetUID, + AttributeMessagingBatchMessageCount, + AttributeMessagingClientID, + AttributeMessagingDestinationAnonymous, + AttributeMessagingDestinationName, + AttributeMessagingDestinationPartitionID, + AttributeMessagingDestinationTemplate, + AttributeMessagingDestinationTemporary, + AttributeMessagingDestinationPublishAnonymous, + AttributeMessagingDestinationPublishName, + AttributeMessagingEventhubsConsumerGroup, + AttributeMessagingEventhubsMessageEnqueuedTime, + AttributeMessagingGCPPubsubMessageOrderingKey, + AttributeMessagingKafkaConsumerGroup, + AttributeMessagingKafkaMessageKey, + AttributeMessagingKafkaMessageOffset, + AttributeMessagingKafkaMessageTombstone, + AttributeMessagingMessageBodySize, + AttributeMessagingMessageConversationID, + AttributeMessagingMessageEnvelopeSize, + AttributeMessagingMessageID, + AttributeMessagingOperation, + AttributeMessagingRabbitmqDestinationRoutingKey, + AttributeMessagingRabbitmqMessageDeliveryTag, + AttributeMessagingRocketmqClientGroup, + AttributeMessagingRocketmqConsumptionModel, + AttributeMessagingRocketmqMessageDelayTimeLevel, + AttributeMessagingRocketmqMessageDeliveryTimestamp, + AttributeMessagingRocketmqMessageGroup, + AttributeMessagingRocketmqMessageKeys, + AttributeMessagingRocketmqMessageTag, + AttributeMessagingRocketmqMessageType, + AttributeMessagingRocketmqNamespace, + AttributeMessagingServicebusDestinationSubscriptionName, + AttributeMessagingServicebusDispositionStatus, + AttributeMessagingServicebusMessageDeliveryCount, + AttributeMessagingServicebusMessageEnqueuedTime, + AttributeMessagingSystem, + AttributeNetworkCarrierIcc, + AttributeNetworkCarrierMcc, + AttributeNetworkCarrierMnc, + AttributeNetworkCarrierName, + AttributeNetworkConnectionSubtype, + AttributeNetworkConnectionType, + AttributeNetworkIoDirection, + AttributeNetworkLocalAddress, + AttributeNetworkLocalPort, + AttributeNetworkPeerAddress, + AttributeNetworkPeerPort, + AttributeNetworkProtocolName, + AttributeNetworkProtocolVersion, + AttributeNetworkTransport, + AttributeNetworkType, + AttributeOciManifestDigest, + AttributeOSBuildID, + AttributeOSDescription, + AttributeOSName, + AttributeOSType, + AttributeOSVersion, + AttributeProcessCommand, + AttributeProcessCommandArgs, + AttributeProcessCommandLine, + AttributeProcessExecutableName, + AttributeProcessExecutablePath, + AttributeProcessOwner, + AttributeProcessParentPID, + AttributeProcessPID, + AttributeProcessRuntimeDescription, + AttributeProcessRuntimeName, + AttributeProcessRuntimeVersion, + AttributeRPCConnectRPCErrorCode, + AttributeRPCGRPCStatusCode, + AttributeRPCJsonrpcErrorCode, + AttributeRPCJsonrpcErrorMessage, + AttributeRPCJsonrpcRequestID, + AttributeRPCJsonrpcVersion, + AttributeRPCMethod, + AttributeRPCService, + AttributeRPCSystem, + AttributeServerAddress, + AttributeServerPort, + AttributeServiceInstanceID, + AttributeServiceName, + AttributeServiceNamespace, + AttributeServiceVersion, + AttributeSessionID, + AttributeSessionPreviousID, + AttributeSourceAddress, + AttributeSourcePort, + AttributeTelemetrySDKLanguage, + AttributeTelemetrySDKName, + AttributeTelemetrySDKVersion, + AttributeTelemetryDistroName, + AttributeTelemetryDistroVersion, + AttributeThreadID, + AttributeThreadName, + AttributeTLSCipher, + AttributeTLSClientCertificate, + AttributeTLSClientCertificateChain, + AttributeTLSClientHashMd5, + AttributeTLSClientHashSha1, + AttributeTLSClientHashSha256, + AttributeTLSClientIssuer, + AttributeTLSClientJa3, + AttributeTLSClientNotAfter, + AttributeTLSClientNotBefore, + AttributeTLSClientServerName, + AttributeTLSClientSubject, + AttributeTLSClientSupportedCiphers, + AttributeTLSCurve, + AttributeTLSEstablished, + AttributeTLSNextProtocol, + AttributeTLSProtocolName, + AttributeTLSProtocolVersion, + AttributeTLSResumed, + AttributeTLSServerCertificate, + AttributeTLSServerCertificateChain, + AttributeTLSServerHashMd5, + AttributeTLSServerHashSha1, + AttributeTLSServerHashSha256, + AttributeTLSServerIssuer, + AttributeTLSServerJa3s, + AttributeTLSServerNotAfter, + AttributeTLSServerNotBefore, + AttributeTLSServerSubject, + AttributeURLDomain, + AttributeURLExtension, + AttributeURLFragment, + AttributeURLFull, + AttributeURLOriginal, + AttributeURLPath, + AttributeURLPort, + AttributeURLQuery, + AttributeURLRegisteredDomain, + AttributeURLScheme, + AttributeURLSubdomain, + AttributeURLTopLevelDomain, + AttributeUserAgentName, + AttributeUserAgentOriginal, + AttributeUserAgentVersion, + } +} diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/generated_event.go b/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/generated_event.go new file mode 100644 index 00000000000..9ce1307afe1 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/generated_event.go @@ -0,0 +1,105 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv + +// This event represents an occurrence of a lifecycle transition on the iOS +// platform. +const ( + // This attribute represents the state the application has transitioned into at + // the occurrence of the event. + // + // Type: Enum + // Requirement Level: Required + // Stability: experimental + // Note: The iOS lifecycle states are defined in the UIApplicationDelegate + // documentation, and from which the OS terminology column values are derived. + AttributeIosState = "ios.state" +) + +const ( + // The app has become `active`. Associated with UIKit notification `applicationDidBecomeActive` + AttributeIosStateActive = "active" + // The app is now `inactive`. Associated with UIKit notification `applicationWillResignActive` + AttributeIosStateInactive = "inactive" + // The app is now in the background. This value is associated with UIKit notification `applicationDidEnterBackground` + AttributeIosStateBackground = "background" + // The app is now in the foreground. This value is associated with UIKit notification `applicationWillEnterForeground` + AttributeIosStateForeground = "foreground" + // The app is about to terminate. Associated with UIKit notification `applicationWillTerminate` + AttributeIosStateTerminate = "terminate" +) + +// This event represents an occurrence of a lifecycle transition on the Android +// platform. +const ( + // This attribute represents the state the application has transitioned into at + // the occurrence of the event. + // + // Type: Enum + // Requirement Level: Required + // Stability: experimental + // Note: The Android lifecycle states are defined in Activity lifecycle callbacks, + // and from which the OS identifiers are derived. + AttributeAndroidState = "android.state" +) + +const ( + // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time + AttributeAndroidStateCreated = "created" + // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state + AttributeAndroidStateBackground = "background" + // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states + AttributeAndroidStateForeground = "foreground" +) + +// RPC received/sent message. +const ( + // Compressed size of the message in bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + AttributeMessageCompressedSize = "message.compressed_size" + // MUST be calculated as two different counters starting from 1 one for sent + // messages and one for received message. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Note: This way we guarantee that the values will be consistent between + // different implementations. + AttributeMessageID = "message.id" + // Whether this is a received or sent message. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeMessageType = "message.type" + // Uncompressed size of the message in bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + AttributeMessageUncompressedSize = "message.uncompressed_size" +) + +const ( + // sent + AttributeMessageTypeSent = "SENT" + // received + AttributeMessageTypeReceived = "RECEIVED" +) + +func GetEventSemanticConventionAttributeNames() []string { + return []string{ + AttributeIosState, + AttributeAndroidState, + AttributeMessageCompressedSize, + AttributeMessageID, + AttributeMessageType, + AttributeMessageUncompressedSize, + } +} diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/generated_resource.go b/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/generated_resource.go new file mode 100644 index 00000000000..7b355e40661 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/generated_resource.go @@ -0,0 +1,242 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv + +// Resources used by AWS Elastic Container Service (ECS). +const ( + // The ID of a running ECS task. The ID MUST be extracted from task.arn. + // + // Type: string + // Requirement Level: Conditionally Required - If and only if `task.arn` is + // populated. + // Stability: experimental + // Examples: '10838bed-421f-43ef-870a-f43feacbbb5b', + // '23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' + AttributeAWSECSTaskID = "aws.ecs.task.id" + // The ARN of an ECS cluster. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AttributeAWSECSClusterARN = "aws.ecs.cluster.arn" + // The Amazon Resource Name (ARN) of an ECS container instance. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us- + // west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AttributeAWSECSContainerARN = "aws.ecs.container.arn" + // The launch type for an ECS task. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeAWSECSLaunchtype = "aws.ecs.launchtype" + // The ARN of a running ECS task. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us- + // west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b', + // 'arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task- + // id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' + AttributeAWSECSTaskARN = "aws.ecs.task.arn" + // The family name of the ECS task definition used to create the ECS task. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry-family' + AttributeAWSECSTaskFamily = "aws.ecs.task.family" + // The revision for the task definition used to create the ECS task. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '8', '26' + AttributeAWSECSTaskRevision = "aws.ecs.task.revision" +) + +const ( + // ec2 + AttributeAWSECSLaunchtypeEC2 = "ec2" + // fargate + AttributeAWSECSLaunchtypeFargate = "fargate" +) + +// Resources used by AWS Elastic Kubernetes Service (EKS). +const ( + // The ARN of an EKS cluster. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AttributeAWSEKSClusterARN = "aws.eks.cluster.arn" +) + +// Resources specific to Amazon Web Services. +const ( + // The Amazon Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the log group ARN format documentation. + AttributeAWSLogGroupARNs = "aws.log.group.arns" + // The name(s) of the AWS log group(s) an application is writing to. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like multi-container + // applications, where a single application has sidecar containers, and each write + // to their own log group. + AttributeAWSLogGroupNames = "aws.log.group.names" + // The ARN(s) of the AWS log stream(s). + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log- + // stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the log stream ARN format documentation. One log group can contain + // several log streams, so these ARNs necessarily identify both a log group and a + // log stream. + AttributeAWSLogStreamARNs = "aws.log.stream.arns" + // The name(s) of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AttributeAWSLogStreamNames = "aws.log.stream.names" +) + +// Heroku dyno metadata +const ( + // Unique identifier for the application + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' + AttributeHerokuAppID = "heroku.app.id" + // Commit hash for the current release + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' + AttributeHerokuReleaseCommit = "heroku.release.commit" + // Time and date the release was created + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2022-10-23T18:00:42Z' + AttributeHerokuReleaseCreationTimestamp = "heroku.release.creation_timestamp" +) + +// Resource describing the packaged software running the application code. Web +// engines are typically executed using process.runtime. +const ( + // The name of the web engine. + // + // Type: string + // Requirement Level: Required + // Stability: experimental + // Examples: 'WildFly' + AttributeWebEngineName = "webengine.name" + // Additional description of the web engine (e.g. detailed version and edition + // information). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final' + AttributeWebEngineDescription = "webengine.description" + // The version of the web engine. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '21.0.0' + AttributeWebEngineVersion = "webengine.version" +) + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // The name of the instrumentation scope - (InstrumentationScope.Name in OTLP). + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'io.opentelemetry.contrib.mongodb' + AttributeOTelScopeName = "otel.scope.name" + // The version of the instrumentation scope - (InstrumentationScope.Version in + // OTLP). + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '1.0.0' + AttributeOTelScopeVersion = "otel.scope.version" +) + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry +// Scope's concepts. +const ( + // None + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: use the `otel.scope.name` attribute. + // Examples: 'io.opentelemetry.contrib.mongodb' + AttributeOTelLibraryName = "otel.library.name" + // None + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: use the `otel.scope.version` attribute. + // Examples: '1.0.0' + AttributeOTelLibraryVersion = "otel.library.version" +) + +func GetResourceSemanticConventionAttributeNames() []string { + return []string{ + AttributeAWSECSTaskID, + AttributeAWSECSClusterARN, + AttributeAWSECSContainerARN, + AttributeAWSECSLaunchtype, + AttributeAWSECSTaskARN, + AttributeAWSECSTaskFamily, + AttributeAWSECSTaskRevision, + AttributeAWSEKSClusterARN, + AttributeAWSLogGroupARNs, + AttributeAWSLogGroupNames, + AttributeAWSLogStreamARNs, + AttributeAWSLogStreamNames, + AttributeHerokuAppID, + AttributeHerokuReleaseCommit, + AttributeHerokuReleaseCreationTimestamp, + AttributeWebEngineName, + AttributeWebEngineDescription, + AttributeWebEngineVersion, + AttributeOTelScopeName, + AttributeOTelScopeVersion, + AttributeOTelLibraryName, + AttributeOTelLibraryVersion, + } +} diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/generated_trace.go b/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/generated_trace.go new file mode 100644 index 00000000000..b214a9f4fb0 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/generated_trace.go @@ -0,0 +1,245 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv + +// Operations that access some remote service. +const ( + // The service.name of the remote service. SHOULD be equal to the actual + // service.name resource attribute of the remote service if any. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'AuthTokenCache' + AttributePeerService = "peer.service" +) + +// Span attributes used by AWS Lambda (in addition to general `faas` +// attributes). +const ( + // The full invoked ARN as provided on the Context passed to the function (Lambda- + // Runtime-Invoked-Function-ARN header on the /runtime/invocation/next + // applicable). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from cloud.resource_id if an alias is involved. + AttributeAWSLambdaInvokedARN = "aws.lambda.invoked_arn" +) + +// Semantic conventions for the OpenTracing Shim +const ( + // Parent-child Reference type + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: The causal relationship between a child Span and a parent Span. + AttributeOpentracingRefType = "opentracing.ref_type" +) + +const ( + // The parent Span depends on the child Span in some capacity + AttributeOpentracingRefTypeChildOf = "child_of" + // The parent Span doesn't depend in any way on the result of the child Span + AttributeOpentracingRefTypeFollowsFrom = "follows_from" +) + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's +// concepts. +const ( + // Name of the code, either "OK" or "ERROR". MUST NOT be set + // if the status code is UNSET. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + AttributeOTelStatusCode = "otel.status_code" + // Description of the Status if it has a value, otherwise not set. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'resource not found' + AttributeOTelStatusDescription = "otel.status_description" +) + +const ( + // The operation has been validated by an Application developer or Operator to have completed successfully + AttributeOTelStatusCodeOk = "OK" + // The operation contains an error + AttributeOTelStatusCodeError = "ERROR" +) + +// The `aws` conventions apply to operations using the AWS SDK. They map +// request or response parameters in AWS SDK API calls to attributes on a Span. +// The conventions have been collected over time based on feedback from AWS +// users of tracing and will continue to evolve as new interesting conventions +// are found. +// Some descriptions are also provided for populating general OpenTelemetry +// semantic conventions based on these APIs. +const ( + // The AWS request ID as returned in the response headers x-amz-request-id or + // x-amz-requestid. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' + AttributeAWSRequestID = "aws.request_id" +) + +// Attributes that exist for S3 request types. +const ( + // The S3 bucket name the request refers to. Corresponds to the --bucket parameter + // of the S3 API operations. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'some-bucket-name' + // Note: The bucket attribute is applicable to all S3 operations that reference a + // bucket, i.e. that require the bucket name as a mandatory parameter. + // This applies to almost all S3 operations except list-buckets. + AttributeAWSS3Bucket = "aws.s3.bucket" + // The source object (in the form bucket/key) for the copy operation. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The copy_source attribute applies to S3 copy operations and corresponds + // to the --copy-source parameter + // of the copy-object operation within the S3 API. + // This applies in particular to the following operations:
      + //
    • copy-object
    • + //
    • upload-part-copy
    • + //
    + AttributeAWSS3CopySource = "aws.s3.copy_source" + // The delete request container that specifies the objects to be deleted. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string} + // ],Quiet=boolean' + // Note: The delete attribute is only applicable to the delete-object operation. + // The delete attribute corresponds to the --delete parameter of the + // delete-objects operation within the S3 API. + AttributeAWSS3Delete = "aws.s3.delete" + // The S3 object key the request refers to. Corresponds to the --key parameter of + // the S3 API operations. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The key attribute is applicable to all object-related S3 operations, i.e. + // that require the object key as a mandatory parameter. + // This applies in particular to the following operations:
      + //
    • copy-object
    • + //
    • delete-object
    • + //
    • get-object
    • + //
    • head-object
    • + //
    • put-object
    • + //
    • restore-object
    • + //
    • select-object-content
    • + //
    • abort-multipart-upload
    • + //
    • complete-multipart-upload
    • + //
    • create-multipart-upload
    • + //
    • list-parts
    • + //
    • upload-part
    • + //
    • upload-part-copy
    • + //
    + AttributeAWSS3Key = "aws.s3.key" + // The part number of the part being uploaded in a multipart-upload operation. + // This is a positive integer between 1 and 10,000. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 3456 + // Note: The part_number attribute is only applicable to the upload-part + // and upload-part-copy operations. + // The part_number attribute corresponds to the --part-number parameter of the + // upload-part operation within the S3 API. + AttributeAWSS3PartNumber = "aws.s3.part_number" + // Upload ID that identifies the multipart upload. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' + // Note: The upload_id attribute applies to S3 multipart-upload operations and + // corresponds to the --upload-id parameter + // of the S3 API multipart operations. + // This applies in particular to the following operations:
      + //
    • abort-multipart-upload
    • + //
    • complete-multipart-upload
    • + //
    • list-parts
    • + //
    • upload-part
    • + //
    • upload-part-copy
    • + //
    + AttributeAWSS3UploadID = "aws.s3.upload_id" +) + +// Semantic conventions to apply when instrumenting the GraphQL implementation. +// They map GraphQL operations to attributes on a Span. +const ( + // The GraphQL document being executed. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + AttributeGraphqlDocument = "graphql.document" + // The name of the operation being executed. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'findBookByID' + AttributeGraphqlOperationName = "graphql.operation.name" + // The type of the operation being executed. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'query', 'mutation', 'subscription' + AttributeGraphqlOperationType = "graphql.operation.type" +) + +const ( + // GraphQL query + AttributeGraphqlOperationTypeQuery = "query" + // GraphQL mutation + AttributeGraphqlOperationTypeMutation = "mutation" + // GraphQL subscription + AttributeGraphqlOperationTypeSubscription = "subscription" +) + +func GetTraceSemanticConventionAttributeNames() []string { + return []string{ + AttributePeerService, + AttributeAWSLambdaInvokedARN, + AttributeOpentracingRefType, + AttributeOTelStatusCode, + AttributeOTelStatusDescription, + AttributeAWSRequestID, + AttributeAWSS3Bucket, + AttributeAWSS3CopySource, + AttributeAWSS3Delete, + AttributeAWSS3Key, + AttributeAWSS3PartNumber, + AttributeAWSS3UploadID, + AttributeGraphqlDocument, + AttributeGraphqlOperationName, + AttributeGraphqlOperationType, + } +} diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/schema.go b/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/schema.go new file mode 100644 index 00000000000..3739fe9ef81 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/schema.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/collector/semconv/v1.25.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.25.0" diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.26.0/doc.go b/vendor/go.opentelemetry.io/collector/semconv/v1.26.0/doc.go new file mode 100644 index 00000000000..c8bb8c57e6a --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.26.0/doc.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the v1.26.0 +// version of the OpenTelemetry semantic conventions. +package semconv // import "go.opentelemetry.io/collector/semconv/v1.26.0" diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.26.0/generated_attribute_group.go b/vendor/go.opentelemetry.io/collector/semconv/v1.26.0/generated_attribute_group.go new file mode 100644 index 00000000000..98e34f3b9f7 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.26.0/generated_attribute_group.go @@ -0,0 +1,5331 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv + +// The Android platform on which the Android application is running. +const ( + // Uniquely identifies the framework API revision offered by a version + // (os.version) of the android operating system. More information can be found + // here. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '33', '32' + AttributeAndroidOSAPILevel = "android.os.api_level" +) + +// ASP.NET Core attributes +const ( + // Rate-limiting result, shows whether the lease was acquired or contains a + // rejection reason + // + // Type: Enum + // Requirement Level: Required + // Stability: stable + // Examples: 'acquired', 'request_canceled' + AttributeAspnetcoreRateLimitingResult = "aspnetcore.rate_limiting.result" + // Full type name of the IExceptionHandler implementation that handled the + // exception. + // + // Type: string + // Requirement Level: Conditionally Required - if and only if the exception was + // handled by this handler. + // Stability: stable + // Examples: 'Contoso.MyHandler' + AttributeAspnetcoreDiagnosticsHandlerType = "aspnetcore.diagnostics.handler.type" + // ASP.NET Core exception middleware handling result + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'handled', 'unhandled' + AttributeAspnetcoreDiagnosticsExceptionResult = "aspnetcore.diagnostics.exception.result" + // Rate limiting policy name. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'fixed', 'sliding', 'token' + AttributeAspnetcoreRateLimitingPolicy = "aspnetcore.rate_limiting.policy" + // Flag indicating if request was handled by the application pipeline. + // + // Type: boolean + // Requirement Level: Optional + // Stability: stable + // Examples: True + AttributeAspnetcoreRequestIsUnhandled = "aspnetcore.request.is_unhandled" + // A value that indicates whether the matched route is a fallback route. + // + // Type: boolean + // Requirement Level: Optional + // Stability: stable + // Examples: True + AttributeAspnetcoreRoutingIsFallback = "aspnetcore.routing.is_fallback" + // Match result - success or failure + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'success', 'failure' + AttributeAspnetcoreRoutingMatchStatus = "aspnetcore.routing.match_status" +) + +const ( + // Lease was acquired + AttributeAspnetcoreRateLimitingResultAcquired = "acquired" + // Lease request was rejected by the endpoint limiter + AttributeAspnetcoreRateLimitingResultEndpointLimiter = "endpoint_limiter" + // Lease request was rejected by the global limiter + AttributeAspnetcoreRateLimitingResultGlobalLimiter = "global_limiter" + // Lease request was canceled + AttributeAspnetcoreRateLimitingResultRequestCanceled = "request_canceled" +) + +const ( + // Exception was handled by the exception handling middleware + AttributeAspnetcoreDiagnosticsExceptionResultHandled = "handled" + // Exception was not handled by the exception handling middleware + AttributeAspnetcoreDiagnosticsExceptionResultUnhandled = "unhandled" + // Exception handling was skipped because the response had started + AttributeAspnetcoreDiagnosticsExceptionResultSkipped = "skipped" + // Exception handling didn't run because the request was aborted + AttributeAspnetcoreDiagnosticsExceptionResultAborted = "aborted" +) + +const ( + // Match succeeded + AttributeAspnetcoreRoutingMatchStatusSuccess = "success" + // Match failed + AttributeAspnetcoreRoutingMatchStatusFailure = "failure" +) + +// Generic attributes for AWS services. +const ( + // The AWS request ID as returned in the response headers x-amz-request-id or + // x-amz-requestid. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' + AttributeAWSRequestID = "aws.request_id" +) + +// Attributes for AWS DynamoDB. +const ( + // The JSON-serialized value of each item in the AttributeDefinitions request + // field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AttributeAWSDynamoDBAttributeDefinitions = "aws.dynamodb.attribute_definitions" + // The value of the AttributesToGet request parameter. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'lives', 'id' + AttributeAWSDynamoDBAttributesToGet = "aws.dynamodb.attributes_to_get" + // The value of the ConsistentRead request parameter. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeAWSDynamoDBConsistentRead = "aws.dynamodb.consistent_read" + // The JSON-serialized value of each item in the ConsumedCapacity response field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : { + // "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, + // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": + // "string", "WriteCapacityUnits": number }' + AttributeAWSDynamoDBConsumedCapacity = "aws.dynamodb.consumed_capacity" + // The value of the Count response parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 10 + AttributeAWSDynamoDBCount = "aws.dynamodb.count" + // The value of the ExclusiveStartTableName request parameter. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Users', 'CatsTable' + AttributeAWSDynamoDBExclusiveStartTable = "aws.dynamodb.exclusive_start_table" + // The JSON-serialized value of each item in the GlobalSecondaryIndexUpdates + // request field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }' + AttributeAWSDynamoDBGlobalSecondaryIndexUpdates = "aws.dynamodb.global_secondary_index_updates" + // The JSON-serialized value of each item of the GlobalSecondaryIndexes request + // field + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits": + // number, "WriteCapacityUnits": number } }' + AttributeAWSDynamoDBGlobalSecondaryIndexes = "aws.dynamodb.global_secondary_indexes" + // The value of the IndexName request parameter. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'name_to_group' + AttributeAWSDynamoDBIndexName = "aws.dynamodb.index_name" + // The JSON-serialized value of the ItemCollectionMetrics response field. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, + // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : + // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": + // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }' + AttributeAWSDynamoDBItemCollectionMetrics = "aws.dynamodb.item_collection_metrics" + // The value of the Limit request parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 10 + AttributeAWSDynamoDBLimit = "aws.dynamodb.limit" + // The JSON-serialized value of each item of the LocalSecondaryIndexes request + // field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes": + // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" } }' + AttributeAWSDynamoDBLocalSecondaryIndexes = "aws.dynamodb.local_secondary_indexes" + // The value of the ProjectionExpression request parameter. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems, + // ProductReviews' + AttributeAWSDynamoDBProjection = "aws.dynamodb.projection" + // The value of the ProvisionedThroughput.ReadCapacityUnits request parameter. + // + // Type: double + // Requirement Level: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AttributeAWSDynamoDBProvisionedReadCapacity = "aws.dynamodb.provisioned_read_capacity" + // The value of the ProvisionedThroughput.WriteCapacityUnits request parameter. + // + // Type: double + // Requirement Level: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AttributeAWSDynamoDBProvisionedWriteCapacity = "aws.dynamodb.provisioned_write_capacity" + // The value of the ScanIndexForward request parameter. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeAWSDynamoDBScanForward = "aws.dynamodb.scan_forward" + // The value of the ScannedCount response parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 50 + AttributeAWSDynamoDBScannedCount = "aws.dynamodb.scanned_count" + // The value of the Segment request parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 10 + AttributeAWSDynamoDBSegment = "aws.dynamodb.segment" + // The value of the Select request parameter. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AttributeAWSDynamoDBSelect = "aws.dynamodb.select" + // The number of items in the TableNames response parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 20 + AttributeAWSDynamoDBTableCount = "aws.dynamodb.table_count" + // The keys in the RequestItems object field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Users', 'Cats' + AttributeAWSDynamoDBTableNames = "aws.dynamodb.table_names" + // The value of the TotalSegments request parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 100 + AttributeAWSDynamoDBTotalSegments = "aws.dynamodb.total_segments" +) + +// Attributes for AWS Elastic Container Service (ECS). +const ( + // The ID of a running ECS task. The ID MUST be extracted from task.arn. + // + // Type: string + // Requirement Level: Conditionally Required - If and only if `task.arn` is + // populated. + // Stability: experimental + // Examples: '10838bed-421f-43ef-870a-f43feacbbb5b', + // '23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' + AttributeAWSECSTaskID = "aws.ecs.task.id" + // The ARN of an ECS cluster. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AttributeAWSECSClusterARN = "aws.ecs.cluster.arn" + // The Amazon Resource Name (ARN) of an ECS container instance. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us- + // west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AttributeAWSECSContainerARN = "aws.ecs.container.arn" + // The launch type for an ECS task. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeAWSECSLaunchtype = "aws.ecs.launchtype" + // The ARN of a running ECS task. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us- + // west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b', + // 'arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task- + // id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' + AttributeAWSECSTaskARN = "aws.ecs.task.arn" + // The family name of the ECS task definition used to create the ECS task. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry-family' + AttributeAWSECSTaskFamily = "aws.ecs.task.family" + // The revision for the task definition used to create the ECS task. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '8', '26' + AttributeAWSECSTaskRevision = "aws.ecs.task.revision" +) + +const ( + // ec2 + AttributeAWSECSLaunchtypeEC2 = "ec2" + // fargate + AttributeAWSECSLaunchtypeFargate = "fargate" +) + +// Attributes for AWS Elastic Kubernetes Service (EKS). +const ( + // The ARN of an EKS cluster. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AttributeAWSEKSClusterARN = "aws.eks.cluster.arn" +) + +// Attributes for AWS Logs. +const ( + // The Amazon Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the log group ARN format documentation. + AttributeAWSLogGroupARNs = "aws.log.group.arns" + // The name(s) of the AWS log group(s) an application is writing to. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like multi-container + // applications, where a single application has sidecar containers, and each write + // to their own log group. + AttributeAWSLogGroupNames = "aws.log.group.names" + // The ARN(s) of the AWS log stream(s). + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log- + // stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the log stream ARN format documentation. One log group can contain + // several log streams, so these ARNs necessarily identify both a log group and a + // log stream. + AttributeAWSLogStreamARNs = "aws.log.stream.arns" + // The name(s) of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AttributeAWSLogStreamNames = "aws.log.stream.names" +) + +// Attributes for AWS Lambda. +const ( + // The full invoked ARN as provided on the Context passed to the function (Lambda- + // Runtime-Invoked-Function-ARN header on the /runtime/invocation/next + // applicable). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from cloud.resource_id if an alias is involved. + AttributeAWSLambdaInvokedARN = "aws.lambda.invoked_arn" +) + +// Attributes for AWS S3. +const ( + // The S3 bucket name the request refers to. Corresponds to the --bucket parameter + // of the S3 API operations. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'some-bucket-name' + // Note: The bucket attribute is applicable to all S3 operations that reference a + // bucket, i.e. that require the bucket name as a mandatory parameter. + // This applies to almost all S3 operations except list-buckets. + AttributeAWSS3Bucket = "aws.s3.bucket" + // The source object (in the form bucket/key) for the copy operation. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The copy_source attribute applies to S3 copy operations and corresponds + // to the --copy-source parameter + // of the copy-object operation within the S3 API. + // This applies in particular to the following operations:
      + //
    • copy-object
    • + //
    • upload-part-copy
    • + //
    + AttributeAWSS3CopySource = "aws.s3.copy_source" + // The delete request container that specifies the objects to be deleted. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string} + // ],Quiet=boolean' + // Note: The delete attribute is only applicable to the delete-object operation. + // The delete attribute corresponds to the --delete parameter of the + // delete-objects operation within the S3 API. + AttributeAWSS3Delete = "aws.s3.delete" + // The S3 object key the request refers to. Corresponds to the --key parameter of + // the S3 API operations. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The key attribute is applicable to all object-related S3 operations, i.e. + // that require the object key as a mandatory parameter. + // This applies in particular to the following operations:
      + //
    • copy-object
    • + //
    • delete-object
    • + //
    • get-object
    • + //
    • head-object
    • + //
    • put-object
    • + //
    • restore-object
    • + //
    • select-object-content
    • + //
    • abort-multipart-upload
    • + //
    • complete-multipart-upload
    • + //
    • create-multipart-upload
    • + //
    • list-parts
    • + //
    • upload-part
    • + //
    • upload-part-copy
    • + //
    + AttributeAWSS3Key = "aws.s3.key" + // The part number of the part being uploaded in a multipart-upload operation. + // This is a positive integer between 1 and 10,000. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 3456 + // Note: The part_number attribute is only applicable to the upload-part + // and upload-part-copy operations. + // The part_number attribute corresponds to the --part-number parameter of the + // upload-part operation within the S3 API. + AttributeAWSS3PartNumber = "aws.s3.part_number" + // Upload ID that identifies the multipart upload. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' + // Note: The upload_id attribute applies to S3 multipart-upload operations and + // corresponds to the --upload-id parameter + // of the S3 API multipart operations. + // This applies in particular to the following operations:
      + //
    • abort-multipart-upload
    • + //
    • complete-multipart-upload
    • + //
    • list-parts
    • + //
    • upload-part
    • + //
    • upload-part-copy
    • + //
    + AttributeAWSS3UploadID = "aws.s3.upload_id" +) + +// The web browser attributes +const ( + // Array of brand name and version separated by a space + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the UA client hints API + // (navigator.userAgentData.brands). + AttributeBrowserBrands = "browser.brands" + // Preferred language of the user using the browser + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // navigator.language. + AttributeBrowserLanguage = "browser.language" + // A boolean that is true if the browser is running on a mobile device + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + // Note: This value is intended to be taken from the UA client hints API + // (navigator.userAgentData.mobile). If unavailable, this attribute SHOULD be left + // unset. + AttributeBrowserMobile = "browser.mobile" + // The platform on which the browser is running + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the UA client hints API + // (navigator.userAgentData.platform). If unavailable, the legacy + // navigator.platform API SHOULD NOT be used instead and this attribute SHOULD be + // left unset in order for the values to be consistent. + // The list of possible values is defined in the W3C User-Agent Client Hints + // specification. Note that some (but not all) of these values can overlap with + // values in the os.type and os.name attributes. However, for consistency, the + // values in the browser.platform attribute should capture the exact value that + // the user agent provides. + AttributeBrowserPlatform = "browser.platform" +) + +// These attributes may be used to describe the client in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // Client address - domain name if available without reverse DNS lookup; + // otherwise, IP address or Unix domain socket name. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the server side, and when communicating through an + // intermediary, client.address SHOULD represent the client address behind any + // intermediaries, for example proxies, if it's available. + AttributeClientAddress = "client.address" + // Client port number. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 65123 + // Note: When observed from the server side, and when communicating through an + // intermediary, client.port SHOULD represent the client port behind any + // intermediaries, for example proxies, if it's available. + AttributeClientPort = "client.port" +) + +// A cloud environment (e.g. GCP, Azure, AWS). +const ( + // The cloud account ID the resource is assigned to. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '111111111111', 'opentelemetry' + AttributeCloudAccountID = "cloud.account.id" + // Cloud regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the resource + // is running. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and + // Google Cloud. + AttributeCloudAvailabilityZone = "cloud.availability_zone" + // The cloud platform in use. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: The prefix of the service SHOULD match the one specified in + // cloud.provider. + AttributeCloudPlatform = "cloud.platform" + // Name of the cloud provider. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeCloudProvider = "cloud.provider" + // The geographical region the resource is running. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for example + // Alibaba Cloud regions, AWS regions, Azure regions, Google Cloud regions, or + // Tencent Cloud regions. + AttributeCloudRegion = "cloud.region" + // Cloud provider-specific native identifier of the monitored cloud resource (e.g. + // an ARN on AWS, a fully qualified resource ID on Azure, a full resource name on + // GCP) + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', '//run.googl + // eapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', '/sub + // scriptions//resourceGroups//providers/Microsoft.Web/sites + // //functions/' + // Note: On some cloud providers, it may not be possible to determine the full ID + // at startup, + // so it may be necessary to set cloud.resource_id as a span attribute instead.The + // exact value to use for cloud.resource_id depends on the cloud provider. + // The following well-known definitions MUST be used if you set this attribute and + // they apply:
      + //
    • AWS Lambda: The function ARN. + // Take care not to use the "invoked ARN" directly but replace any + // alias suffix + // with the resolved function version, as the same runtime instance may be + // invocable with + // multiple different aliases.
    • + //
    • GCP: The URI of the resource
    • + //
    • Azure: The Fully Qualified Resource ID of the invoked function, + // not the function app, having the form + // /subscriptions//resourceGroups//providers/Microsoft.Web/s + // ites//functions/. + // This means that a span attribute MUST be used, as an Azure function app can + // host multiple functions that would usually share + // a TracerProvider.
    • + //
    + AttributeCloudResourceID = "cloud.resource_id" +) + +const ( + // Alibaba Cloud Elastic Compute Service + AttributeCloudPlatformAlibabaCloudECS = "alibaba_cloud_ecs" + // Alibaba Cloud Function Compute + AttributeCloudPlatformAlibabaCloudFc = "alibaba_cloud_fc" + // Red Hat OpenShift on Alibaba Cloud + AttributeCloudPlatformAlibabaCloudOpenshift = "alibaba_cloud_openshift" + // AWS Elastic Compute Cloud + AttributeCloudPlatformAWSEC2 = "aws_ec2" + // AWS Elastic Container Service + AttributeCloudPlatformAWSECS = "aws_ecs" + // AWS Elastic Kubernetes Service + AttributeCloudPlatformAWSEKS = "aws_eks" + // AWS Lambda + AttributeCloudPlatformAWSLambda = "aws_lambda" + // AWS Elastic Beanstalk + AttributeCloudPlatformAWSElasticBeanstalk = "aws_elastic_beanstalk" + // AWS App Runner + AttributeCloudPlatformAWSAppRunner = "aws_app_runner" + // Red Hat OpenShift on AWS (ROSA) + AttributeCloudPlatformAWSOpenshift = "aws_openshift" + // Azure Virtual Machines + AttributeCloudPlatformAzureVM = "azure_vm" + // Azure Container Apps + AttributeCloudPlatformAzureContainerApps = "azure_container_apps" + // Azure Container Instances + AttributeCloudPlatformAzureContainerInstances = "azure_container_instances" + // Azure Kubernetes Service + AttributeCloudPlatformAzureAKS = "azure_aks" + // Azure Functions + AttributeCloudPlatformAzureFunctions = "azure_functions" + // Azure App Service + AttributeCloudPlatformAzureAppService = "azure_app_service" + // Azure Red Hat OpenShift + AttributeCloudPlatformAzureOpenshift = "azure_openshift" + // Google Bare Metal Solution (BMS) + AttributeCloudPlatformGCPBareMetalSolution = "gcp_bare_metal_solution" + // Google Cloud Compute Engine (GCE) + AttributeCloudPlatformGCPComputeEngine = "gcp_compute_engine" + // Google Cloud Run + AttributeCloudPlatformGCPCloudRun = "gcp_cloud_run" + // Google Cloud Kubernetes Engine (GKE) + AttributeCloudPlatformGCPKubernetesEngine = "gcp_kubernetes_engine" + // Google Cloud Functions (GCF) + AttributeCloudPlatformGCPCloudFunctions = "gcp_cloud_functions" + // Google Cloud App Engine (GAE) + AttributeCloudPlatformGCPAppEngine = "gcp_app_engine" + // Red Hat OpenShift on Google Cloud + AttributeCloudPlatformGCPOpenshift = "gcp_openshift" + // Red Hat OpenShift on IBM Cloud + AttributeCloudPlatformIbmCloudOpenshift = "ibm_cloud_openshift" + // Tencent Cloud Cloud Virtual Machine (CVM) + AttributeCloudPlatformTencentCloudCvm = "tencent_cloud_cvm" + // Tencent Cloud Elastic Kubernetes Service (EKS) + AttributeCloudPlatformTencentCloudEKS = "tencent_cloud_eks" + // Tencent Cloud Serverless Cloud Function (SCF) + AttributeCloudPlatformTencentCloudScf = "tencent_cloud_scf" +) + +const ( + // Alibaba Cloud + AttributeCloudProviderAlibabaCloud = "alibaba_cloud" + // Amazon Web Services + AttributeCloudProviderAWS = "aws" + // Microsoft Azure + AttributeCloudProviderAzure = "azure" + // Google Cloud Platform + AttributeCloudProviderGCP = "gcp" + // Heroku Platform as a Service + AttributeCloudProviderHeroku = "heroku" + // IBM Cloud + AttributeCloudProviderIbmCloud = "ibm_cloud" + // Tencent Cloud + AttributeCloudProviderTencentCloud = "tencent_cloud" +) + +// Attributes for CloudEvents. +const ( + // The event_id uniquely identifies the event. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + AttributeCloudeventsEventID = "cloudevents.event_id" + // The source identifies the context in which an event happened. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'https://github.com/cloudevents', '/cloudevents/spec/pull/123', 'my- + // service' + AttributeCloudeventsEventSource = "cloudevents.event_source" + // The version of the CloudEvents specification which the event uses. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1.0' + AttributeCloudeventsEventSpecVersion = "cloudevents.event_spec_version" + // The subject of the event in the context of the event producer (identified by + // source). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'mynewfile.jpg' + AttributeCloudeventsEventSubject = "cloudevents.event_subject" + // The event_type contains a value describing the type of event related to the + // originating occurrence. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'com.github.pull_request.opened', 'com.example.object.deleted.v2' + AttributeCloudeventsEventType = "cloudevents.event_type" +) + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // The column number in code.filepath best representing the operation. It SHOULD + // point within the code unit named in code.function. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 16 + AttributeCodeColumn = "code.column" + // The source code file name that identifies the code unit as uniquely as possible + // (preferably an absolute file path). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + AttributeCodeFilepath = "code.filepath" + // The method or function name, or equivalent (usually rightmost part of the code + // unit's name). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'serveRequest' + AttributeCodeFunction = "code.function" + // The line number in code.filepath best representing the operation. It SHOULD + // point within the code unit named in code.function. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 42 + AttributeCodeLineNumber = "code.lineno" + // The "namespace" within which code.function is defined. Usually the + // qualified class or module name, such that code.namespace + some separator + + // code.function form a unique identifier for the code unit. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'com.example.MyHTTPService' + AttributeCodeNamespace = "code.namespace" + // A stacktrace as a string in the natural representation for the language + // runtime. The representation is to be determined and documented by each language + // SIG. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + AttributeCodeStacktrace = "code.stacktrace" +) + +// A container instance. +const ( + // The command used to run the container (i.e. the command name). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'otelcontribcol' + // Note: If using embedded credentials or sensitive data, it is recommended to + // remove them to prevent potential leakage. + AttributeContainerCommand = "container.command" + // All the command arguments (including the command/executable itself) run by the + // container. [2] + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'otelcontribcol, --config, config.yaml' + AttributeContainerCommandArgs = "container.command_args" + // The full command run by the container as a single string representing the full + // command. [2] + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'otelcontribcol --config config.yaml' + AttributeContainerCommandLine = "container.command_line" + // The CPU state for this data point. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'user', 'kernel' + AttributeContainerCPUState = "container.cpu.state" + // Container ID. Usually a UUID, as for example used to identify Docker + // containers. The UUID might be abbreviated. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'a3bf90e006b2' + AttributeContainerID = "container.id" + // Runtime specific image identifier. Usually a hash algorithm followed by a UUID. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: + // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' + // Note: Docker defines a sha256 of the image id; container.image.id corresponds + // to the Image field from the Docker container inspect API endpoint. + // K8S defines a link to the container registry repository with digest "imageID": + // "registry.azurecr.io /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e + // 8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625". + // The ID is assigned by the container runtime and can vary in different + // environments. Consider using oci.manifest.digest if it is important to identify + // the same image in different environments/runtimes. + AttributeContainerImageID = "container.image.id" + // Name of the image the container was built on. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'gcr.io/opentelemetry/operator' + AttributeContainerImageName = "container.image.name" + // Repo digests of the container image as provided by the container runtime. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d7 + // 02d249a0ccb', 'internal.registry.example.com:5000/example@sha256:b69959407d21e8 + // a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578' + // Note: Docker and CRI report those under the RepoDigests field. + AttributeContainerImageRepoDigests = "container.image.repo_digests" + // Container image tags. An example can be found in Docker Image Inspect. Should + // be only the section of the full name for example from + // registry.example.com/my-org/my-image:. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'v1.27.1', '3.5.7-0' + AttributeContainerImageTags = "container.image.tags" + // Container name used by container runtime. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry-autoconf' + AttributeContainerName = "container.name" + // The container runtime managing this container. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'docker', 'containerd', 'rkt' + AttributeContainerRuntime = "container.runtime" +) + +const ( + // When tasks of the cgroup are in user mode (Linux). When all container processes are in user mode (Windows) + AttributeContainerCPUStateUser = "user" + // When CPU is used by the system (host OS) + AttributeContainerCPUStateSystem = "system" + // When tasks of the cgroup are in kernel mode (Linux). When all container processes are in kernel mode (Windows) + AttributeContainerCPUStateKernel = "kernel" +) + +// This group defines the attributes used to describe telemetry in the context +// of databases. +const ( + // The name of the connection pool; unique within the instrumented application. In + // case the connection pool implementation doesn't provide a name, instrumentation + // should use a combination of server.address and server.port attributes formatted + // as server.address:server.port. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myDataSource' + AttributeDBClientConnectionsPoolName = "db.client.connections.pool.name" + // The state of a connection in the pool + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'idle' + AttributeDBClientConnectionsState = "db.client.connections.state" + // The name of a collection (table, container) within the database. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'public.users', 'customers' + // Note: If the collection name is parsed from the query, it SHOULD match the + // value provided in the query and may be qualified with the schema and database + // name. + // It is RECOMMENDED to capture the value as provided by the application without + // attempting to do any case normalization. + AttributeDBCollectionName = "db.collection.name" + // The name of the database, fully qualified within the server address and port. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'customers', 'test.users' + // Note: If a database system has multiple namespace components, they SHOULD be + // concatenated (potentially using database system specific conventions) from most + // general to most specific namespace component, and more specific namespaces + // SHOULD NOT be captured without the more general namespaces, to ensure that + // "startswith" queries for the more general namespaces will be valid. + // Semantic conventions for individual database systems SHOULD document what + // db.namespace means in the context of that system. + // It is RECOMMENDED to capture the value as provided by the application without + // attempting to do any case normalization. + AttributeDBNamespace = "db.namespace" + // The name of the operation or command being executed. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + AttributeDBOperationName = "db.operation.name" + // The database query being executed. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'SELECT * FROM wuser_table where username = ?', 'SET mykey "WuValue"' + AttributeDBQueryText = "db.query.text" + // The database management system (DBMS) product as identified by the client + // instrumentation. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: The actual DBMS may differ from the one identified by the client. For + // example, when using PostgreSQL client libraries to connect to a CockroachDB, + // the db.system is set to postgresql based on the instrumentation's best + // knowledge. + AttributeDBSystem = "db.system" +) + +const ( + // idle + AttributeDBClientConnectionsStateIdle = "idle" + // used + AttributeDBClientConnectionsStateUsed = "used" +) + +const ( + // Some other SQL database. Fallback only. See notes + AttributeDBSystemOtherSQL = "other_sql" + // Microsoft SQL Server + AttributeDBSystemMSSQL = "mssql" + // Microsoft SQL Server Compact + AttributeDBSystemMssqlcompact = "mssqlcompact" + // MySQL + AttributeDBSystemMySQL = "mysql" + // Oracle Database + AttributeDBSystemOracle = "oracle" + // IBM DB2 + AttributeDBSystemDB2 = "db2" + // PostgreSQL + AttributeDBSystemPostgreSQL = "postgresql" + // Amazon Redshift + AttributeDBSystemRedshift = "redshift" + // Apache Hive + AttributeDBSystemHive = "hive" + // Cloudscape + AttributeDBSystemCloudscape = "cloudscape" + // HyperSQL DataBase + AttributeDBSystemHSQLDB = "hsqldb" + // Progress Database + AttributeDBSystemProgress = "progress" + // SAP MaxDB + AttributeDBSystemMaxDB = "maxdb" + // SAP HANA + AttributeDBSystemHanaDB = "hanadb" + // Ingres + AttributeDBSystemIngres = "ingres" + // FirstSQL + AttributeDBSystemFirstSQL = "firstsql" + // EnterpriseDB + AttributeDBSystemEDB = "edb" + // InterSystems Caché + AttributeDBSystemCache = "cache" + // Adabas (Adaptable Database System) + AttributeDBSystemAdabas = "adabas" + // Firebird + AttributeDBSystemFirebird = "firebird" + // Apache Derby + AttributeDBSystemDerby = "derby" + // FileMaker + AttributeDBSystemFilemaker = "filemaker" + // Informix + AttributeDBSystemInformix = "informix" + // InstantDB + AttributeDBSystemInstantDB = "instantdb" + // InterBase + AttributeDBSystemInterbase = "interbase" + // MariaDB + AttributeDBSystemMariaDB = "mariadb" + // Netezza + AttributeDBSystemNetezza = "netezza" + // Pervasive PSQL + AttributeDBSystemPervasive = "pervasive" + // PointBase + AttributeDBSystemPointbase = "pointbase" + // SQLite + AttributeDBSystemSqlite = "sqlite" + // Sybase + AttributeDBSystemSybase = "sybase" + // Teradata + AttributeDBSystemTeradata = "teradata" + // Vertica + AttributeDBSystemVertica = "vertica" + // H2 + AttributeDBSystemH2 = "h2" + // ColdFusion IMQ + AttributeDBSystemColdfusion = "coldfusion" + // Apache Cassandra + AttributeDBSystemCassandra = "cassandra" + // Apache HBase + AttributeDBSystemHBase = "hbase" + // MongoDB + AttributeDBSystemMongoDB = "mongodb" + // Redis + AttributeDBSystemRedis = "redis" + // Couchbase + AttributeDBSystemCouchbase = "couchbase" + // CouchDB + AttributeDBSystemCouchDB = "couchdb" + // Microsoft Azure Cosmos DB + AttributeDBSystemCosmosDB = "cosmosdb" + // Amazon DynamoDB + AttributeDBSystemDynamoDB = "dynamodb" + // Neo4j + AttributeDBSystemNeo4j = "neo4j" + // Apache Geode + AttributeDBSystemGeode = "geode" + // Elasticsearch + AttributeDBSystemElasticsearch = "elasticsearch" + // Memcached + AttributeDBSystemMemcached = "memcached" + // CockroachDB + AttributeDBSystemCockroachdb = "cockroachdb" + // OpenSearch + AttributeDBSystemOpensearch = "opensearch" + // ClickHouse + AttributeDBSystemClickhouse = "clickhouse" + // Cloud Spanner + AttributeDBSystemSpanner = "spanner" + // Trino + AttributeDBSystemTrino = "trino" +) + +// This group defines attributes for Cassandra. +const ( + // The consistency level of the query. Based on consistency values from CQL. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeDBCassandraConsistencyLevel = "db.cassandra.consistency_level" + // The data center of the coordinating node for a query. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'us-west-2' + AttributeDBCassandraCoordinatorDC = "db.cassandra.coordinator.dc" + // The ID of the coordinating node for a query. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + AttributeDBCassandraCoordinatorID = "db.cassandra.coordinator.id" + // Whether or not the query is idempotent. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeDBCassandraIdempotence = "db.cassandra.idempotence" + // The fetch size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 5000 + AttributeDBCassandraPageSize = "db.cassandra.page_size" + // The number of times a query was speculatively executed. Not set or 0 if the + // query was not executed speculatively. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 0, 2 + AttributeDBCassandraSpeculativeExecutionCount = "db.cassandra.speculative_execution_count" +) + +const ( + // all + AttributeDBCassandraConsistencyLevelAll = "all" + // each_quorum + AttributeDBCassandraConsistencyLevelEachQuorum = "each_quorum" + // quorum + AttributeDBCassandraConsistencyLevelQuorum = "quorum" + // local_quorum + AttributeDBCassandraConsistencyLevelLocalQuorum = "local_quorum" + // one + AttributeDBCassandraConsistencyLevelOne = "one" + // two + AttributeDBCassandraConsistencyLevelTwo = "two" + // three + AttributeDBCassandraConsistencyLevelThree = "three" + // local_one + AttributeDBCassandraConsistencyLevelLocalOne = "local_one" + // any + AttributeDBCassandraConsistencyLevelAny = "any" + // serial + AttributeDBCassandraConsistencyLevelSerial = "serial" + // local_serial + AttributeDBCassandraConsistencyLevelLocalSerial = "local_serial" +) + +// This group defines attributes for Azure Cosmos DB. +const ( + // Unique Cosmos client instance id. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' + AttributeDBCosmosDBClientID = "db.cosmosdb.client_id" + // Cosmos client connection mode. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeDBCosmosDBConnectionMode = "db.cosmosdb.connection_mode" + // CosmosDB Operation Type. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeDBCosmosDBOperationType = "db.cosmosdb.operation_type" + // RU consumed for that operation + // + // Type: double + // Requirement Level: Optional + // Stability: experimental + // Examples: 46.18, 1.0 + AttributeDBCosmosDBRequestCharge = "db.cosmosdb.request_charge" + // Request payload size in bytes + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + AttributeDBCosmosDBRequestContentLength = "db.cosmosdb.request_content_length" + // Cosmos DB status code. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 200, 201 + AttributeDBCosmosDBStatusCode = "db.cosmosdb.status_code" + // Cosmos DB sub status code. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1000, 1002 + AttributeDBCosmosDBSubStatusCode = "db.cosmosdb.sub_status_code" +) + +const ( + // Gateway (HTTP) connections mode + AttributeDBCosmosDBConnectionModeGateway = "gateway" + // Direct connection + AttributeDBCosmosDBConnectionModeDirect = "direct" +) + +const ( + // invalid + AttributeDBCosmosDBOperationTypeInvalid = "Invalid" + // create + AttributeDBCosmosDBOperationTypeCreate = "Create" + // patch + AttributeDBCosmosDBOperationTypePatch = "Patch" + // read + AttributeDBCosmosDBOperationTypeRead = "Read" + // read_feed + AttributeDBCosmosDBOperationTypeReadFeed = "ReadFeed" + // delete + AttributeDBCosmosDBOperationTypeDelete = "Delete" + // replace + AttributeDBCosmosDBOperationTypeReplace = "Replace" + // execute + AttributeDBCosmosDBOperationTypeExecute = "Execute" + // query + AttributeDBCosmosDBOperationTypeQuery = "Query" + // head + AttributeDBCosmosDBOperationTypeHead = "Head" + // head_feed + AttributeDBCosmosDBOperationTypeHeadFeed = "HeadFeed" + // upsert + AttributeDBCosmosDBOperationTypeUpsert = "Upsert" + // batch + AttributeDBCosmosDBOperationTypeBatch = "Batch" + // query_plan + AttributeDBCosmosDBOperationTypeQueryPlan = "QueryPlan" + // execute_javascript + AttributeDBCosmosDBOperationTypeExecuteJavascript = "ExecuteJavaScript" +) + +// This group defines attributes for Elasticsearch. +const ( + // Represents the identifier of an Elasticsearch cluster. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f' + AttributeDBElasticsearchClusterName = "db.elasticsearch.cluster.name" + // Represents the human-readable identifier of the node/instance to which a + // request was routed. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'instance-0000000001' + AttributeDBElasticsearchNodeName = "db.elasticsearch.node.name" +) + +// Attributes for software deployments. +const ( + // Name of the deployment environment (aka deployment tier). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'staging', 'production' + // Note: deployment.environment does not affect the uniqueness constraints defined + // through + // the service.namespace, service.name and service.instance.id resource + // attributes. + // This implies that resources carrying the following attribute combinations MUST + // be + // considered to be identifying the same service:
      + //
    • service.name=frontend, deployment.environment=production
    • + //
    • service.name=frontend, deployment.environment=staging.
    • + //
    + AttributeDeploymentEnvironment = "deployment.environment" +) + +// Attributes that represents an occurrence of a lifecycle transition on the +// Android platform. +const ( + // Deprecated use the device.app.lifecycle event definition including + // android.state as a payload field instead. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: The Android lifecycle states are defined in Activity lifecycle callbacks, + // and from which the OS identifiers are derived. + AttributeAndroidState = "android.state" +) + +const ( + // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time + AttributeAndroidStateCreated = "created" + // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state + AttributeAndroidStateBackground = "background" + // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states + AttributeAndroidStateForeground = "foreground" +) + +// These attributes may be used to describe the receiver of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // Destination address - domain name if available without reverse DNS lookup; + // otherwise, IP address or Unix domain socket name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the source side, and when communicating through an + // intermediary, destination.address SHOULD represent the destination address + // behind any intermediaries, for example proxies, if it's available. + AttributeDestinationAddress = "destination.address" + // Destination port number + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 3389, 2888 + AttributeDestinationPort = "destination.port" +) + +// Describes device attributes. +const ( + // A unique identifier representing the device + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values outlined + // below. This value is not an advertising identifier and MUST NOT be used as + // such. On iOS (Swift or Objective-C), this value MUST be equal to the vendor + // identifier. On Android (Java or Kotlin), this value MUST be equal to the + // Firebase Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found here on best + // practices and exact implementation details. Caution should be taken when + // storing personal data or anything which can identify a user. GDPR and data + // protection laws may apply, ensure you do your own due diligence. + AttributeDeviceID = "device.id" + // The name of the device manufacturer + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via Build. iOS apps SHOULD hardcode + // the value Apple. + AttributeDeviceManufacturer = "device.manufacturer" + // The model identifier for the device + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine-readable version of the + // model identifier rather than the market or consumer-friendly name of the + // device. + AttributeDeviceModelIdentifier = "device.model.identifier" + // The marketing name for the device model + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human-readable version of the + // device model rather than a machine-readable alternative. + AttributeDeviceModelName = "device.model.name" +) + +// These attributes may be used for any disk related operation. +const ( + // The disk IO operation direction. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'read' + AttributeDiskIoDirection = "disk.io.direction" +) + +const ( + // read + AttributeDiskIoDirectionRead = "read" + // write + AttributeDiskIoDirectionWrite = "write" +) + +// The shared attributes used to report a DNS query. +const ( + // The name being queried. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'www.example.com', 'opentelemetry.io' + // Note: If the name field contains non-printable characters (below 32 or above + // 126), those characters should be represented as escaped base 10 integers + // (\DDD). Back slashes and quotes should be escaped. Tabs, carriage returns, and + // line feeds should be converted to \t, \r, and \n respectively. + AttributeDNSQuestionName = "dns.question.name" +) + +// Attributes for operations with an authenticated and/or authorized enduser. +const ( + // Username or client_id extracted from the access token or Authorization header + // in the inbound request from outside the system. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'username' + AttributeEnduserID = "enduser.id" + // Actual/assumed role the client is making the request under extracted from token + // or application security context. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'admin' + AttributeEnduserRole = "enduser.role" + // Scopes or granted authorities the client currently possesses extracted from + // token or application security context. The value would come from the scope + // associated with an OAuth 2.0 Access Token or an attribute value in a SAML 2.0 + // Assertion. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'read:message, write:files' + AttributeEnduserScope = "enduser.scope" +) + +// The shared attributes used to report an error. +const ( + // Describes a class of error the operation ended with. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'timeout', 'java.net.UnknownHostException', + // 'server_certificate_invalid', '500' + // Note: The error.type SHOULD be predictable, and SHOULD have low + // cardinality.When error.type is set to a type (e.g., an exception type), its + // canonical class name identifying the type within the artifact SHOULD be + // used.Instrumentations SHOULD document the list of errors they report.The + // cardinality of error.type within one instrumentation library SHOULD be low. + // Telemetry consumers that aggregate data from multiple instrumentation libraries + // and applications + // should be prepared for error.type to have high cardinality at query time when + // no + // additional filters are applied.If the operation has completed successfully, + // instrumentations SHOULD NOT set error.type.If a specific domain defines its own + // set of error identifiers (such as HTTP or gRPC status codes), + // it's RECOMMENDED to:
      + //
    • Use a domain-specific attribute
    • + //
    • Set error.type to capture all errors, regardless of whether they are + // defined within the domain-specific set or not.
    • + //
    + AttributeErrorType = "error.type" +) + +const ( + // A fallback error value to be used when the instrumentation doesn't define a custom value + AttributeErrorTypeOther = "_OTHER" +) + +// Attributes for Events represented using Log Records. +const ( + // Identifies the class / type of event. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'browser.mouse.click', 'device.app.lifecycle' + // Note: Event names are subject to the same rules as attribute names. Notably, + // event names are namespaced to avoid collisions and provide a clean separation + // of semantics for events in separate domains like browser, mobile, and + // kubernetes. + AttributeEventName = "event.name" +) + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // SHOULD be set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // Requirement Level: Optional + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's __exit__ method in Python) but will + // usually be caught at the point of recording the exception in most languages.It + // is usually not possible to determine at the point where an exception is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending the span, + // as done in the example for recording span exceptions.It follows that an + // exception may still escape the scope of the span + // even if the exception.escaped attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + AttributeExceptionEscaped = "exception.escaped" + // The exception message. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str implicitly" + AttributeExceptionMessage = "exception.message" + // A stacktrace as a string in the natural representation for the language + // runtime. The representation is to be determined and documented by each language + // SIG. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + AttributeExceptionStacktrace = "exception.stacktrace" + // The type of the exception (its fully-qualified class name, if applicable). The + // dynamic type of the exception should be preferred over the static type in + // languages that support it. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + AttributeExceptionType = "exception.type" +) + +// FaaS attributes +const ( + // A boolean that is true if the serverless function is executed for the first + // time (aka cold-start). + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeFaaSColdstart = "faas.coldstart" + // A string containing the schedule period as Cron Expression. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '0/5 * * * ? *' + AttributeFaaSCron = "faas.cron" + // The name of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos + // DB to the database name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myBucketName', 'myDBName' + AttributeFaaSDocumentCollection = "faas.document.collection" + // The document name/table subjected to the operation. For example, in Cloud + // Storage or S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myFile.txt', 'myTableName' + AttributeFaaSDocumentName = "faas.document.name" + // Describes the type of the operation that was performed on the data. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeFaaSDocumentOperation = "faas.document.operation" + // A string containing the time when the data was accessed in the ISO 8601 format + // expressed in UTC. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + AttributeFaaSDocumentTime = "faas.document.time" + // The execution environment ID as a string, that will be potentially reused for + // other invocations to the same function/function version. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note:
      + //
    • AWS Lambda: Use the (full) log stream name.
    • + //
    + AttributeFaaSInstance = "faas.instance" + // The invocation ID of the current function invocation. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + AttributeFaaSInvocationID = "faas.invocation_id" + // The name of the invoked function. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'my-function' + // Note: SHOULD be equal to the faas.name resource attribute of the invoked + // function. + AttributeFaaSInvokedName = "faas.invoked_name" + // The cloud provider of the invoked function. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: SHOULD be equal to the cloud.provider resource attribute of the invoked + // function. + AttributeFaaSInvokedProvider = "faas.invoked_provider" + // The cloud region of the invoked function. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the cloud.region resource attribute of the invoked + // function. + AttributeFaaSInvokedRegion = "faas.invoked_region" + // The amount of memory available to the serverless function converted to Bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 134217728 + // Note: It's recommended to set this attribute since e.g. too little memory can + // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, + // the environment variable AWS_LAMBDA_FUNCTION_MEMORY_SIZE provides this + // information (which must be multiplied by 1,048,576). + AttributeFaaSMaxMemory = "faas.max_memory" + // The name of the single function that this runtime instance executes. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // code.namespace/code.function + // span attributes).For some cloud providers, the above definition is ambiguous. + // The following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud providers/products:
      + //
    • Azure: The full name /, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the cloud.resource_id attribute).
    • + //
    + AttributeFaaSName = "faas.name" + // A string containing the function invocation time in the ISO 8601 format + // expressed in UTC. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + AttributeFaaSTime = "faas.time" + // Type of the trigger which caused this function invocation. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeFaaSTrigger = "faas.trigger" + // The immutable version of the function being executed. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use:
      + //
    • AWS Lambda: The function version + // (an integer represented as a decimal string).
    • + //
    • Google Cloud Run (Services): The revision + // (i.e., the function name plus the revision suffix).
    • + //
    • Google Cloud Functions: The value of the + // K_REVISION environment variable.
    • + //
    • Azure Functions: Not applicable. Do not set this attribute.
    • + //
    + AttributeFaaSVersion = "faas.version" +) + +const ( + // When a new object is created + AttributeFaaSDocumentOperationInsert = "insert" + // When an object is modified + AttributeFaaSDocumentOperationEdit = "edit" + // When an object is deleted + AttributeFaaSDocumentOperationDelete = "delete" +) + +const ( + // Alibaba Cloud + AttributeFaaSInvokedProviderAlibabaCloud = "alibaba_cloud" + // Amazon Web Services + AttributeFaaSInvokedProviderAWS = "aws" + // Microsoft Azure + AttributeFaaSInvokedProviderAzure = "azure" + // Google Cloud Platform + AttributeFaaSInvokedProviderGCP = "gcp" + // Tencent Cloud + AttributeFaaSInvokedProviderTencentCloud = "tencent_cloud" +) + +const ( + // A response to some data source operation such as a database or filesystem read/write + AttributeFaaSTriggerDatasource = "datasource" + // To provide an answer to an inbound HTTP request + AttributeFaaSTriggerHTTP = "http" + // A function is set to be executed when messages are sent to a messaging system + AttributeFaaSTriggerPubsub = "pubsub" + // A function is scheduled to be executed regularly + AttributeFaaSTriggerTimer = "timer" + // If none of the others apply + AttributeFaaSTriggerOther = "other" +) + +// Attributes for Feature Flags. +const ( + // The unique identifier of the feature flag. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'logo-color' + AttributeFeatureFlagKey = "feature_flag.key" + // The name of the service provider that performs the flag evaluation. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Flag Manager' + AttributeFeatureFlagProviderName = "feature_flag.provider_name" + // SHOULD be a semantic identifier for a value. If one is unavailable, a + // stringified version of the value can be used. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'red', 'true', 'on' + // Note: A semantic identifier, commonly referred to as a variant, provides a + // means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant red maybe be used for the value #c05543.A stringified + // version of the value can be used in situations where a + // semantic identifier is unavailable. String representation of the value + // should be determined by the implementer. + AttributeFeatureFlagVariant = "feature_flag.variant" +) + +// Describes file attributes. +const ( + // Directory where the file is located. It should include the drive letter, when + // appropriate. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/home/user', 'C:\\Program Files\\MyApp' + AttributeFileDirectory = "file.directory" + // File extension, excluding the leading dot. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'png', 'gz' + // Note: When the file name has multiple extensions (example.tar.gz), only the + // last one should be captured ("gz", not "tar.gz"). + AttributeFileExtension = "file.extension" + // Name of the file including the extension, without the directory. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'example.png' + AttributeFileName = "file.name" + // Full path to the file, including the file name. It should include the drive + // letter, when appropriate. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/home/alice/example.png', 'C:\\Program Files\\MyApp\\myapp.exe' + AttributeFilePath = "file.path" + // File size in bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + AttributeFileSize = "file.size" +) + +// Attributes for Google Cloud Run. +const ( + // The name of the Cloud Run execution being run for the Job, as set by the + // CLOUD_RUN_EXECUTION environment variable. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'job-name-xxxx', 'sample-job-mdw84' + AttributeGCPCloudRunJobExecution = "gcp.cloud_run.job.execution" + // The index for a task within an execution as provided by the + // CLOUD_RUN_TASK_INDEX environment variable. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 0, 1 + AttributeGCPCloudRunJobTaskIndex = "gcp.cloud_run.job.task_index" +) + +// Attributes for Google Compute Engine (GCE). +const ( + // The hostname of a GCE instance. This is the full value of the default or custom + // hostname. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'my-host1234.example.com', 'sample-vm.us-west1-b.c.my- + // project.internal' + AttributeGCPGceInstanceHostname = "gcp.gce.instance.hostname" + // The instance name of a GCE instance. This is the value provided by host.name, + // the visible name of the instance in the Cloud Console UI, and the prefix for + // the default hostname of the instance as defined by the default internal DNS + // name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'instance-1', 'my-vm-name' + AttributeGCPGceInstanceName = "gcp.gce.instance.name" +) + +// The attributes used to describe telemetry in the context of LLM (Large +// Language Models) requests and responses. +const ( + // The full response received from the LLM. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: "[{'role': 'assistant', 'content': 'The capital of France is + // Paris.'}]" + // Note: It's RECOMMENDED to format completions as JSON string matching OpenAI + // messages format + AttributeGenAiCompletion = "gen_ai.completion" + // The full prompt sent to an LLM. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: "[{'role': 'user', 'content': 'What is the capital of France?'}]" + // Note: It's RECOMMENDED to format prompts as JSON string matching OpenAI + // messages format + AttributeGenAiPrompt = "gen_ai.prompt" + // The maximum number of tokens the LLM generates for a request. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 100 + AttributeGenAiRequestMaxTokens = "gen_ai.request.max_tokens" + // The name of the LLM a request is being made to. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'gpt-4' + AttributeGenAiRequestModel = "gen_ai.request.model" + // The temperature setting for the LLM request. + // + // Type: double + // Requirement Level: Optional + // Stability: experimental + // Examples: 0.0 + AttributeGenAiRequestTemperature = "gen_ai.request.temperature" + // The top_p sampling setting for the LLM request. + // + // Type: double + // Requirement Level: Optional + // Stability: experimental + // Examples: 1.0 + AttributeGenAiRequestTopP = "gen_ai.request.top_p" + // Array of reasons the model stopped generating tokens, corresponding to each + // generation received. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'stop' + AttributeGenAiResponseFinishReasons = "gen_ai.response.finish_reasons" + // The unique identifier for the completion. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'chatcmpl-123' + AttributeGenAiResponseID = "gen_ai.response.id" + // The name of the LLM a response was generated from. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'gpt-4-0613' + AttributeGenAiResponseModel = "gen_ai.response.model" + // The Generative AI product as identified by the client instrumentation. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'openai' + // Note: The actual GenAI product may differ from the one identified by the + // client. For example, when using OpenAI client libraries to communicate with + // Mistral, the gen_ai.system is set to openai based on the instrumentation's best + // knowledge. + AttributeGenAiSystem = "gen_ai.system" + // The number of tokens used in the LLM response (completion). + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 180 + AttributeGenAiUsageCompletionTokens = "gen_ai.usage.completion_tokens" + // The number of tokens used in the LLM prompt. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 100 + AttributeGenAiUsagePromptTokens = "gen_ai.usage.prompt_tokens" +) + +const ( + // OpenAI + AttributeGenAiSystemOpenai = "openai" +) + +// Attributes for GraphQL. +const ( + // The GraphQL document being executed. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + AttributeGraphqlDocument = "graphql.document" + // The name of the operation being executed. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'findBookByID' + AttributeGraphqlOperationName = "graphql.operation.name" + // The type of the operation being executed. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'query', 'mutation', 'subscription' + AttributeGraphqlOperationType = "graphql.operation.type" +) + +const ( + // GraphQL query + AttributeGraphqlOperationTypeQuery = "query" + // GraphQL mutation + AttributeGraphqlOperationTypeMutation = "mutation" + // GraphQL subscription + AttributeGraphqlOperationTypeSubscription = "subscription" +) + +// Attributes for the Android platform on which the Android application is +// running. +const ( + // Unique identifier for the application + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' + AttributeHerokuAppID = "heroku.app.id" + // Commit hash for the current release + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' + AttributeHerokuReleaseCommit = "heroku.release.commit" + // Time and date the release was created + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2022-10-23T18:00:42Z' + AttributeHerokuReleaseCreationTimestamp = "heroku.release.creation_timestamp" +) + +// A host is defined as a computing instance. For example, physical servers, +// virtual machines, switches or disk array. +const ( + // The CPU architecture the host system is running on. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeHostArch = "host.arch" + // The amount of level 2 memory cache available to the processor (in Bytes). + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 12288000 + AttributeHostCPUCacheL2Size = "host.cpu.cache.l2.size" + // Family or generation of the CPU. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '6', 'PA-RISC 1.1e' + AttributeHostCPUFamily = "host.cpu.family" + // Model identifier. It provides more granular information about the CPU, + // distinguishing it from other CPUs within the same family. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '6', '9000/778/B180L' + AttributeHostCPUModelID = "host.cpu.model.id" + // Model designation of the processor. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz' + AttributeHostCPUModelName = "host.cpu.model.name" + // Stepping or core revisions. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1', 'r1p1' + AttributeHostCPUStepping = "host.cpu.stepping" + // Processor manufacturer identifier. A maximum 12-character string. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'GenuineIntel' + // Note: CPUID command returns the vendor ID string in EBX, EDX and ECX registers. + // Writing these to memory in this order results in a 12-character string. + AttributeHostCPUVendorID = "host.cpu.vendor.id" + // Unique host ID. For Cloud, this must be the instance_id assigned by the cloud + // provider. For non-containerized systems, this should be the machine-id. See the + // table below for the sources to use to determine the machine-id based on + // operating system. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + AttributeHostID = "host.id" + // VM image ID or host OS image ID. For Cloud, this value is from the provider. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'ami-07b06b442921831e5' + AttributeHostImageID = "host.image.id" + // Name of the VM image or OS install the host was instantiated from. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + AttributeHostImageName = "host.image.name" + // The version string of the VM image or host OS as defined in Version Attributes. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '0.1' + AttributeHostImageVersion = "host.image.version" + // Available IP addresses of the host, excluding loopback interfaces. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e' + // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 addresses + // MUST be specified in the RFC 5952 format. + AttributeHostIP = "host.ip" + // Available MAC addresses of the host, excluding loopback interfaces. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F' + // Note: MAC Addresses MUST be represented in IEEE RA hexadecimal form: as hyphen- + // separated octets in uppercase hexadecimal form from most to least significant. + AttributeHostMac = "host.mac" + // Name of the host. On Unix systems, it may contain what the hostname command + // returns, or the fully qualified hostname, or another name specified by the + // user. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry-test' + AttributeHostName = "host.name" + // Type of host. For Cloud, this must be the machine type. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'n1-standard-1' + AttributeHostType = "host.type" +) + +const ( + // AMD64 + AttributeHostArchAMD64 = "amd64" + // ARM32 + AttributeHostArchARM32 = "arm32" + // ARM64 + AttributeHostArchARM64 = "arm64" + // Itanium + AttributeHostArchIA64 = "ia64" + // 32-bit PowerPC + AttributeHostArchPPC32 = "ppc32" + // 64-bit PowerPC + AttributeHostArchPPC64 = "ppc64" + // IBM z/Architecture + AttributeHostArchS390x = "s390x" + // 32-bit x86 + AttributeHostArchX86 = "x86" +) + +// Semantic convention attributes in the HTTP namespace. +const ( + // State of the HTTP connection in the HTTP connection pool. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'active', 'idle' + AttributeHTTPConnectionState = "http.connection.state" + // The size of the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as the + // Content-Length header. For requests using transport encoding, this should be + // the compressed size. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 3495 + AttributeHTTPRequestBodySize = "http.request.body.size" + // HTTP request method. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + // Note: HTTP request method value SHOULD be "known" to the + // instrumentation. + // By default, this convention defines "known" methods as the ones + // listed in RFC9110 + // and the PATCH method defined in RFC5789.If the HTTP request method is not known + // to instrumentation, it MUST set the http.request.method attribute to _OTHER.If + // the HTTP instrumentation could end up converting valid HTTP request methods to + // _OTHER, then it MUST provide a way to override + // the list of known HTTP methods. If this override is done via environment + // variable, then the environment variable MUST be named + // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated list of + // case-sensitive known HTTP methods + // (this list MUST be a full override of the default known method, it is not a + // list of known methods in addition to the defaults).HTTP method names are case- + // sensitive and http.request.method attribute value MUST match a known HTTP + // method name exactly. + // Instrumentations for specific web frameworks that consider HTTP methods to be + // case insensitive, SHOULD populate a canonical equivalent. + // Tracing instrumentations that do so, MUST also set http.request.method_original + // to the original value. + AttributeHTTPRequestMethod = "http.request.method" + // Original HTTP method sent by the client in the request line. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'GeT', 'ACL', 'foo' + AttributeHTTPRequestMethodOriginal = "http.request.method_original" + // The ordinal number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets resent + // by the client, regardless of what was the cause of the resending (e.g. + // redirection, authorization failure, 503 Server Unavailable, network issues, or + // any other). + AttributeHTTPRequestResendCount = "http.request.resend_count" + // The total size of the request in bytes. This should be the total number of + // bytes sent over the wire, including the request line (HTTP/1.1), framing + // (HTTP/2 and HTTP/3), headers, and request body if any. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1437 + AttributeHTTPRequestSize = "http.request.size" + // The size of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as the + // Content-Length header. For requests using transport encoding, this should be + // the compressed size. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 3495 + AttributeHTTPResponseBodySize = "http.response.body.size" + // The total size of the response in bytes. This should be the total number of + // bytes sent over the wire, including the status line (HTTP/1.1), framing (HTTP/2 + // and HTTP/3), headers, and response body and trailers if any. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1437 + AttributeHTTPResponseSize = "http.response.size" + // HTTP response status code. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 200 + AttributeHTTPResponseStatusCode = "http.response.status_code" + // The matched route, that is, the path template in the format used by the + // respective server framework. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: MUST NOT be populated when this is not supported by the HTTP server + // framework as the route attribute should have low-cardinality and the URI path + // can NOT substitute it. + // SHOULD include the application root if there is one. + AttributeHTTPRoute = "http.route" +) + +const ( + // active state + AttributeHTTPConnectionStateActive = "active" + // idle state + AttributeHTTPConnectionStateIdle = "idle" +) + +const ( + // CONNECT method + AttributeHTTPRequestMethodConnect = "CONNECT" + // DELETE method + AttributeHTTPRequestMethodDelete = "DELETE" + // GET method + AttributeHTTPRequestMethodGet = "GET" + // HEAD method + AttributeHTTPRequestMethodHead = "HEAD" + // OPTIONS method + AttributeHTTPRequestMethodOptions = "OPTIONS" + // PATCH method + AttributeHTTPRequestMethodPatch = "PATCH" + // POST method + AttributeHTTPRequestMethodPost = "POST" + // PUT method + AttributeHTTPRequestMethodPut = "PUT" + // TRACE method + AttributeHTTPRequestMethodTrace = "TRACE" + // Any HTTP method that the instrumentation has no prior knowledge of + AttributeHTTPRequestMethodOther = "_OTHER" +) + +// Java Virtual machine related attributes. +const ( + // Name of the buffer pool. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'mapped', 'direct' + // Note: Pool names are generally obtained via BufferPoolMXBean#getName(). + AttributeJvmBufferPoolName = "jvm.buffer.pool.name" + // Name of the garbage collector action. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'end of minor GC', 'end of major GC' + // Note: Garbage collector action is generally obtained via + // GarbageCollectionNotificationInfo#getGcAction(). + AttributeJvmGcAction = "jvm.gc.action" + // Name of the garbage collector. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'G1 Young Generation', 'G1 Old Generation' + // Note: Garbage collector name is generally obtained via + // GarbageCollectionNotificationInfo#getGcName(). + AttributeJvmGcName = "jvm.gc.name" + // Name of the memory pool. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space' + // Note: Pool names are generally obtained via MemoryPoolMXBean#getName(). + AttributeJvmMemoryPoolName = "jvm.memory.pool.name" + // The type of memory. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'heap', 'non_heap' + AttributeJvmMemoryType = "jvm.memory.type" + // Whether the thread is daemon or not. + // + // Type: boolean + // Requirement Level: Optional + // Stability: stable + AttributeJvmThreadDaemon = "jvm.thread.daemon" + // State of the thread. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'runnable', 'blocked' + AttributeJvmThreadState = "jvm.thread.state" +) + +const ( + // Heap memory + AttributeJvmMemoryTypeHeap = "heap" + // Non-heap memory + AttributeJvmMemoryTypeNonHeap = "non_heap" +) + +const ( + // A thread that has not yet started is in this state + AttributeJvmThreadStateNew = "new" + // A thread executing in the Java virtual machine is in this state + AttributeJvmThreadStateRunnable = "runnable" + // A thread that is blocked waiting for a monitor lock is in this state + AttributeJvmThreadStateBlocked = "blocked" + // A thread that is waiting indefinitely for another thread to perform a particular action is in this state + AttributeJvmThreadStateWaiting = "waiting" + // A thread that is waiting for another thread to perform an action for up to a specified waiting time is in this state + AttributeJvmThreadStateTimedWaiting = "timed_waiting" + // A thread that has exited is in this state + AttributeJvmThreadStateTerminated = "terminated" +) + +// Kubernetes resource attributes. +const ( + // The name of the cluster. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry-cluster' + AttributeK8SClusterName = "k8s.cluster.name" + // A pseudo-ID for the cluster, set to the UID of the kube-system namespace. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' + // Note: K8S doesn't have support for obtaining a cluster ID. If this is ever + // added, we will recommend collecting the k8s.cluster.uid through the + // official APIs. In the meantime, we are able to use the uid of the + // kube-system namespace as a proxy for cluster ID. Read on for the + // rationale.Every object created in a K8S cluster is assigned a distinct UID. The + // kube-system namespace is used by Kubernetes itself and will exist + // for the lifetime of the cluster. Using the uid of the kube-system + // namespace is a reasonable proxy for the K8S ClusterID as it will only + // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are + // UUIDs as standardized by + // ISO/IEC 9834-8 and ITU-T X.667. + // Which states:
    + // If generated according to one of the mechanisms defined in Rec.
    + // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be + // different from all other UUIDs generated before 3603 A.D., or is + // extremely likely to be different (depending on the mechanism + // chosen).Therefore, UIDs between clusters should be extremely unlikely to + // conflict. + AttributeK8SClusterUID = "k8s.cluster.uid" + // The name of the Container from Pod specification, must be unique within a Pod. + // Container runtime usually uses different globally unique name (container.name). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'redis' + AttributeK8SContainerName = "k8s.container.name" + // Number of times the container was restarted. This attribute can be used to + // identify a particular container (running or stopped) within a container spec. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + AttributeK8SContainerRestartCount = "k8s.container.restart_count" + // Last terminated reason of the Container. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Evicted', 'Error' + AttributeK8SContainerStatusLastTerminatedReason = "k8s.container.status.last_terminated_reason" + // The name of the CronJob. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SCronJobName = "k8s.cronjob.name" + // The UID of the CronJob. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SCronJobUID = "k8s.cronjob.uid" + // The name of the DaemonSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SDaemonSetName = "k8s.daemonset.name" + // The UID of the DaemonSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SDaemonSetUID = "k8s.daemonset.uid" + // The name of the Deployment. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SDeploymentName = "k8s.deployment.name" + // The UID of the Deployment. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SDeploymentUID = "k8s.deployment.uid" + // The name of the Job. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SJobName = "k8s.job.name" + // The UID of the Job. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SJobUID = "k8s.job.uid" + // The name of the namespace that the pod is running in. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'default' + AttributeK8SNamespaceName = "k8s.namespace.name" + // The name of the Node. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'node-1' + AttributeK8SNodeName = "k8s.node.name" + // The UID of the Node. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + AttributeK8SNodeUID = "k8s.node.uid" + // The name of the Pod. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry-pod-autoconf' + AttributeK8SPodName = "k8s.pod.name" + // The UID of the Pod. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SPodUID = "k8s.pod.uid" + // The name of the ReplicaSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SReplicaSetName = "k8s.replicaset.name" + // The UID of the ReplicaSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SReplicaSetUID = "k8s.replicaset.uid" + // The name of the StatefulSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SStatefulSetName = "k8s.statefulset.name" + // The UID of the StatefulSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SStatefulSetUID = "k8s.statefulset.uid" +) + +// Log attributes +const ( + // The stream associated with the log. See below for a list of well-known values. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeLogIostream = "log.iostream" +) + +const ( + // Logs from stdout stream + AttributeLogIostreamStdout = "stdout" + // Events from stderr stream + AttributeLogIostreamStderr = "stderr" +) + +// Attributes for a file to which log was emitted. +const ( + // The basename of the file. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'audit.log' + AttributeLogFileName = "log.file.name" + // The basename of the file, with symlinks resolved. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'uuid.log' + AttributeLogFileNameResolved = "log.file.name_resolved" + // The full path to the file. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/var/log/mysql/audit.log' + AttributeLogFilePath = "log.file.path" + // The full path to the file, with symlinks resolved. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/var/lib/docker/uuid.log' + AttributeLogFilePathResolved = "log.file.path_resolved" +) + +// The generic attributes that may be used in any Log Record. +const ( + // A unique identifier for the Log Record. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' + // Note: If an id is provided, other log records with the same id will be + // considered duplicates and can be removed safely. This means, that two + // distinguishable log records MUST have different values. + // The id MAY be an Universally Unique Lexicographically Sortable Identifier + // (ULID), but other identifiers (e.g. UUID) may be used as needed. + AttributeLogRecordUID = "log.record.uid" +) + +// Attributes describing telemetry around messaging systems and messaging +// activities. +const ( + // The number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set messaging.batch.message_count on spans + // that operate with a single message. When a messaging client library supports + // both batch and single-message API for the same operation, instrumentations + // SHOULD use messaging.batch.message_count for batching APIs and SHOULD NOT use + // it for single-message APIs. + AttributeMessagingBatchMessageCount = "messaging.batch.message_count" + // A unique identifier for the client that consumes or produces a message. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'client-5', 'myhost@8742@s8083jm' + AttributeMessagingClientID = "messaging.client.id" + // A boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingDestinationAnonymous = "messaging.destination.anonymous" + // The message destination name + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MyQueue', 'MyTopic' + // Note: Destination name SHOULD uniquely identify a specific queue, topic or + // other entity within the broker. If + // the broker doesn't have such notion, the destination name SHOULD uniquely + // identify the broker. + AttributeMessagingDestinationName = "messaging.destination.name" + // The identifier of the partition messages are sent to or received from, unique + // within the messaging.destination.name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1' + AttributeMessagingDestinationPartitionID = "messaging.destination.partition.id" + // Low cardinality representation of the messaging destination name + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/customers/{customerID}' + // Note: Destination names could be constructed from templates. An example would + // be a destination name involving a user name or product id. Although the + // destination name in this case is of high cardinality, the underlying template + // is of low cardinality and can be effectively used for grouping and aggregation. + AttributeMessagingDestinationTemplate = "messaging.destination.template" + // A boolean that is true if the message destination is temporary and might not + // exist anymore after messages are processed. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingDestinationTemporary = "messaging.destination.temporary" + // A boolean that is true if the publish message destination is anonymous (could + // be unnamed or have auto-generated name). + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingDestinationPublishAnonymous = "messaging.destination_publish.anonymous" + // The name of the original destination the message was published to + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MyQueue', 'MyTopic' + // Note: The name SHOULD uniquely identify a specific queue, topic, or other + // entity within the broker. If + // the broker doesn't have such notion, the original destination name SHOULD + // uniquely identify the broker. + AttributeMessagingDestinationPublishName = "messaging.destination_publish.name" + // The size of the message body in bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1439 + // Note: This can refer to both the compressed or uncompressed body size. If both + // sizes are known, the uncompressed + // body size should be used. + AttributeMessagingMessageBodySize = "messaging.message.body.size" + // The conversation ID identifying the conversation to which the message belongs, + // represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MyConversationID' + AttributeMessagingMessageConversationID = "messaging.message.conversation_id" + // The size of the message body and metadata in bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 2738 + // Note: This can refer to both the compressed or uncompressed size. If both sizes + // are known, the uncompressed + // size should be used. + AttributeMessagingMessageEnvelopeSize = "messaging.message.envelope.size" + // A value used by the messaging system as an identifier for the message, + // represented as a string. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + AttributeMessagingMessageID = "messaging.message.id" + // The system-specific name of the messaging operation. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'ack', 'nack', 'send' + AttributeMessagingOperationName = "messaging.operation.name" + // A string identifying the type of the messaging operation. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: If a custom value is used, it MUST be of low cardinality. + AttributeMessagingOperationType = "messaging.operation.type" + // The messaging system as identified by the client instrumentation. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: The actual messaging system may differ from the one known by the client. + // For example, when using Kafka client libraries to communicate with Azure Event + // Hubs, the messaging.system is set to kafka based on the instrumentation's best + // knowledge. + AttributeMessagingSystem = "messaging.system" +) + +const ( + // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created + AttributeMessagingOperationTypePublish = "publish" + // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios + AttributeMessagingOperationTypeCreate = "create" + // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages + AttributeMessagingOperationTypeReceive = "receive" + // One or more messages are delivered to or processed by a consumer + AttributeMessagingOperationTypeDeliver = "process" + // One or more messages are settled + AttributeMessagingOperationTypeSettle = "settle" +) + +const ( + // Apache ActiveMQ + AttributeMessagingSystemActivemq = "activemq" + // Amazon Simple Queue Service (SQS) + AttributeMessagingSystemAWSSqs = "aws_sqs" + // Azure Event Grid + AttributeMessagingSystemEventgrid = "eventgrid" + // Azure Event Hubs + AttributeMessagingSystemEventhubs = "eventhubs" + // Azure Service Bus + AttributeMessagingSystemServicebus = "servicebus" + // Google Cloud Pub/Sub + AttributeMessagingSystemGCPPubsub = "gcp_pubsub" + // Java Message Service + AttributeMessagingSystemJms = "jms" + // Apache Kafka + AttributeMessagingSystemKafka = "kafka" + // RabbitMQ + AttributeMessagingSystemRabbitmq = "rabbitmq" + // Apache RocketMQ + AttributeMessagingSystemRocketmq = "rocketmq" +) + +// This group describes attributes specific to Apache Kafka. +const ( + // Name of the Kafka Consumer Group that is handling the message. Only applies to + // consumers, not producers. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'my-group' + AttributeMessagingKafkaConsumerGroup = "messaging.kafka.consumer.group" + // Message keys in Kafka are used for grouping alike messages to ensure they're + // processed on the same partition. They differ from messaging.message.id in that + // they're not unique. If the key is null, the attribute MUST NOT be set. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to be + // supplied for the attribute. If the key has no unambiguous, canonical string + // form, don't include its value. + AttributeMessagingKafkaMessageKey = "messaging.kafka.message.key" + // The offset of a record in the corresponding Kafka partition. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 42 + AttributeMessagingKafkaMessageOffset = "messaging.kafka.message.offset" + // A boolean that is true if the message is a tombstone. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingKafkaMessageTombstone = "messaging.kafka.message.tombstone" +) + +// This group describes attributes specific to RabbitMQ. +const ( + // RabbitMQ message routing key. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myKey' + AttributeMessagingRabbitmqDestinationRoutingKey = "messaging.rabbitmq.destination.routing_key" + // RabbitMQ message delivery tag + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 123 + AttributeMessagingRabbitmqMessageDeliveryTag = "messaging.rabbitmq.message.delivery_tag" +) + +// This group describes attributes specific to RocketMQ. +const ( + // Name of the RocketMQ producer/consumer group that is handling the message. The + // client type is identified by the SpanKind. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myConsumerGroup' + AttributeMessagingRocketmqClientGroup = "messaging.rocketmq.client_group" + // Model of message consumption. This only applies to consumer spans. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingRocketmqConsumptionModel = "messaging.rocketmq.consumption_model" + // The delay time level for delay message, which determines the message delay + // time. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 3 + AttributeMessagingRocketmqMessageDelayTimeLevel = "messaging.rocketmq.message.delay_time_level" + // The timestamp in milliseconds that the delay message is expected to be + // delivered to consumer. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1665987217045 + AttributeMessagingRocketmqMessageDeliveryTimestamp = "messaging.rocketmq.message.delivery_timestamp" + // It is essential for FIFO message. Messages that belong to the same message + // group are always processed one by one within the same consumer group. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myMessageGroup' + AttributeMessagingRocketmqMessageGroup = "messaging.rocketmq.message.group" + // Key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'keyA', 'keyB' + AttributeMessagingRocketmqMessageKeys = "messaging.rocketmq.message.keys" + // The secondary classifier of message besides topic. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'tagA' + AttributeMessagingRocketmqMessageTag = "messaging.rocketmq.message.tag" + // Type of message. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingRocketmqMessageType = "messaging.rocketmq.message.type" + // Namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myNamespace' + AttributeMessagingRocketmqNamespace = "messaging.rocketmq.namespace" +) + +const ( + // Clustering consumption model + AttributeMessagingRocketmqConsumptionModelClustering = "clustering" + // Broadcasting consumption model + AttributeMessagingRocketmqConsumptionModelBroadcasting = "broadcasting" +) + +const ( + // Normal message + AttributeMessagingRocketmqMessageTypeNormal = "normal" + // FIFO message + AttributeMessagingRocketmqMessageTypeFifo = "fifo" + // Delay message + AttributeMessagingRocketmqMessageTypeDelay = "delay" + // Transaction message + AttributeMessagingRocketmqMessageTypeTransaction = "transaction" +) + +// This group describes attributes specific to GCP Pub/Sub. +const ( + // The ack deadline in seconds set for the modify ack deadline request. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 10 + AttributeMessagingGCPPubsubMessageAckDeadline = "messaging.gcp_pubsub.message.ack_deadline" + // The ack id for a given message. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'ack_id' + AttributeMessagingGCPPubsubMessageAckID = "messaging.gcp_pubsub.message.ack_id" + // The delivery attempt for a given message. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 2 + AttributeMessagingGCPPubsubMessageDeliveryAttempt = "messaging.gcp_pubsub.message.delivery_attempt" + // The ordering key for a given message. If the attribute is not present, the + // message does not have an ordering key. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'ordering_key' + AttributeMessagingGCPPubsubMessageOrderingKey = "messaging.gcp_pubsub.message.ordering_key" +) + +// This group describes attributes specific to Azure Service Bus. +const ( + // The name of the subscription in the topic messages are received from. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'mySubscription' + AttributeMessagingServicebusDestinationSubscriptionName = "messaging.servicebus.destination.subscription_name" + // Describes the settlement type. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingServicebusDispositionStatus = "messaging.servicebus.disposition_status" + // Number of deliveries that have been attempted for this message. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 2 + AttributeMessagingServicebusMessageDeliveryCount = "messaging.servicebus.message.delivery_count" + // The UTC epoch seconds at which the message has been accepted and stored in the + // entity. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1701393730 + AttributeMessagingServicebusMessageEnqueuedTime = "messaging.servicebus.message.enqueued_time" +) + +const ( + // Message is completed + AttributeMessagingServicebusDispositionStatusComplete = "complete" + // Message is abandoned + AttributeMessagingServicebusDispositionStatusAbandon = "abandon" + // Message is sent to dead letter queue + AttributeMessagingServicebusDispositionStatusDeadLetter = "dead_letter" + // Message is deferred + AttributeMessagingServicebusDispositionStatusDefer = "defer" +) + +// This group describes attributes specific to Azure Event Hubs. +const ( + // The name of the consumer group the event consumer is associated with. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'indexer' + AttributeMessagingEventhubsConsumerGroup = "messaging.eventhubs.consumer.group" + // The UTC epoch seconds at which the message has been accepted and stored in the + // entity. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1701393730 + AttributeMessagingEventhubsMessageEnqueuedTime = "messaging.eventhubs.message.enqueued_time" +) + +// These attributes may be used for any network related operation. +const ( + // The ISO 3166-1 alpha-2 2-character country code associated with the mobile + // carrier network. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'DE' + AttributeNetworkCarrierIcc = "network.carrier.icc" + // The mobile carrier country code. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '310' + AttributeNetworkCarrierMcc = "network.carrier.mcc" + // The mobile carrier network code. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '001' + AttributeNetworkCarrierMnc = "network.carrier.mnc" + // The name of the mobile carrier. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'sprint' + AttributeNetworkCarrierName = "network.carrier.name" + // This describes more details regarding the connection.type. It may be the type + // of cell technology connection, but it could be used for describing details + // about a wifi connection. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'LTE' + AttributeNetworkConnectionSubtype = "network.connection.subtype" + // The internet connection type. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'wifi' + AttributeNetworkConnectionType = "network.connection.type" + // The network IO operation direction. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'transmit' + AttributeNetworkIoDirection = "network.io.direction" + // Local address of the network connection - IP address or Unix domain socket + // name. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + AttributeNetworkLocalAddress = "network.local.address" + // Local port number of the network connection. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 65123 + AttributeNetworkLocalPort = "network.local.port" + // Peer address of the network connection - IP address or Unix domain socket name. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + AttributeNetworkPeerAddress = "network.peer.address" + // Peer port number of the network connection. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 65123 + AttributeNetworkPeerPort = "network.peer.port" + // OSI application layer or non-OSI equivalent. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + // Note: The value SHOULD be normalized to lowercase. + AttributeNetworkProtocolName = "network.protocol.name" + // The actual version of the protocol used for network communication. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '1.1', '2' + // Note: If protocol version is subject to negotiation (for example using ALPN), + // this attribute SHOULD be set to the negotiated version. If the actual protocol + // version is not known, this attribute SHOULD NOT be set. + AttributeNetworkProtocolVersion = "network.protocol.version" + // OSI transport layer or inter-process communication method. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'tcp', 'udp' + // Note: The value SHOULD be normalized to lowercase.Consider always setting the + // transport when setting a port number, since + // a port number is ambiguous without knowing the transport. For example + // different processes could be listening on TCP port 12345 and UDP port 12345. + AttributeNetworkTransport = "network.transport" + // OSI network layer or non-OSI equivalent. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'ipv4', 'ipv6' + // Note: The value SHOULD be normalized to lowercase. + AttributeNetworkType = "network.type" +) + +const ( + // GPRS + AttributeNetworkConnectionSubtypeGprs = "gprs" + // EDGE + AttributeNetworkConnectionSubtypeEdge = "edge" + // UMTS + AttributeNetworkConnectionSubtypeUmts = "umts" + // CDMA + AttributeNetworkConnectionSubtypeCdma = "cdma" + // EVDO Rel. 0 + AttributeNetworkConnectionSubtypeEvdo0 = "evdo_0" + // EVDO Rev. A + AttributeNetworkConnectionSubtypeEvdoA = "evdo_a" + // CDMA2000 1XRTT + AttributeNetworkConnectionSubtypeCdma20001xrtt = "cdma2000_1xrtt" + // HSDPA + AttributeNetworkConnectionSubtypeHsdpa = "hsdpa" + // HSUPA + AttributeNetworkConnectionSubtypeHsupa = "hsupa" + // HSPA + AttributeNetworkConnectionSubtypeHspa = "hspa" + // IDEN + AttributeNetworkConnectionSubtypeIden = "iden" + // EVDO Rev. B + AttributeNetworkConnectionSubtypeEvdoB = "evdo_b" + // LTE + AttributeNetworkConnectionSubtypeLte = "lte" + // EHRPD + AttributeNetworkConnectionSubtypeEhrpd = "ehrpd" + // HSPAP + AttributeNetworkConnectionSubtypeHspap = "hspap" + // GSM + AttributeNetworkConnectionSubtypeGsm = "gsm" + // TD-SCDMA + AttributeNetworkConnectionSubtypeTdScdma = "td_scdma" + // IWLAN + AttributeNetworkConnectionSubtypeIwlan = "iwlan" + // 5G NR (New Radio) + AttributeNetworkConnectionSubtypeNr = "nr" + // 5G NRNSA (New Radio Non-Standalone) + AttributeNetworkConnectionSubtypeNrnsa = "nrnsa" + // LTE CA + AttributeNetworkConnectionSubtypeLteCa = "lte_ca" +) + +const ( + // wifi + AttributeNetworkConnectionTypeWifi = "wifi" + // wired + AttributeNetworkConnectionTypeWired = "wired" + // cell + AttributeNetworkConnectionTypeCell = "cell" + // unavailable + AttributeNetworkConnectionTypeUnavailable = "unavailable" + // unknown + AttributeNetworkConnectionTypeUnknown = "unknown" +) + +const ( + // transmit + AttributeNetworkIoDirectionTransmit = "transmit" + // receive + AttributeNetworkIoDirectionReceive = "receive" +) + +const ( + // TCP + AttributeNetworkTransportTCP = "tcp" + // UDP + AttributeNetworkTransportUDP = "udp" + // Named or anonymous pipe + AttributeNetworkTransportPipe = "pipe" + // Unix domain socket + AttributeNetworkTransportUnix = "unix" +) + +const ( + // IPv4 + AttributeNetworkTypeIpv4 = "ipv4" + // IPv6 + AttributeNetworkTypeIpv6 = "ipv6" +) + +// An OCI image manifest. +const ( + // The digest of the OCI image manifest. For container images specifically is the + // digest by which the container image is known. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: + // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4' + // Note: Follows OCI Image Manifest Specification, and specifically the Digest + // property. + // An example can be found in Example Image Manifest. + AttributeOciManifestDigest = "oci.manifest.digest" +) + +// Attributes used by the OpenTracing Shim layer. +const ( + // Parent-child Reference type + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: The causal relationship between a child Span and a parent Span. + AttributeOpentracingRefType = "opentracing.ref_type" +) + +const ( + // The parent Span depends on the child Span in some capacity + AttributeOpentracingRefTypeChildOf = "child_of" + // The parent Span doesn't depend in any way on the result of the child Span + AttributeOpentracingRefTypeFollowsFrom = "follows_from" +) + +// The operating system (OS) on which the process represented by this resource +// is running. +const ( + // Unique identifier for a particular build or compilation of the operating + // system. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'TQ3C.230805.001.B2', '20E247', '22621' + AttributeOSBuildID = "os.build_id" + // Human readable (not intended to be parsed) OS version information, like e.g. + // reported by ver or lsb_release -a commands. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS' + AttributeOSDescription = "os.description" + // Human readable operating system name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'iOS', 'Android', 'Ubuntu' + AttributeOSName = "os.name" + // The operating system type. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeOSType = "os.type" + // The version string of the operating system as defined in Version Attributes. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '14.2.1', '18.04.1' + AttributeOSVersion = "os.version" +) + +const ( + // Microsoft Windows + AttributeOSTypeWindows = "windows" + // Linux + AttributeOSTypeLinux = "linux" + // Apple Darwin + AttributeOSTypeDarwin = "darwin" + // FreeBSD + AttributeOSTypeFreeBSD = "freebsd" + // NetBSD + AttributeOSTypeNetBSD = "netbsd" + // OpenBSD + AttributeOSTypeOpenBSD = "openbsd" + // DragonFly BSD + AttributeOSTypeDragonflyBSD = "dragonflybsd" + // HP-UX (Hewlett Packard Unix) + AttributeOSTypeHPUX = "hpux" + // AIX (Advanced Interactive eXecutive) + AttributeOSTypeAIX = "aix" + // SunOS, Oracle Solaris + AttributeOSTypeSolaris = "solaris" + // IBM z/OS + AttributeOSTypeZOS = "z_os" +) + +// Attributes reserved for OpenTelemetry +const ( + // Name of the code, either "OK" or "ERROR". MUST NOT be set + // if the status code is UNSET. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + AttributeOTelStatusCode = "otel.status_code" + // Description of the Status if it has a value, otherwise not set. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'resource not found' + AttributeOTelStatusDescription = "otel.status_description" +) + +const ( + // The operation has been validated by an Application developer or Operator to have completed successfully + AttributeOTelStatusCodeOk = "OK" + // The operation contains an error + AttributeOTelStatusCodeError = "ERROR" +) + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // The name of the instrumentation scope - (InstrumentationScope.Name in OTLP). + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'io.opentelemetry.contrib.mongodb' + AttributeOTelScopeName = "otel.scope.name" + // The version of the instrumentation scope - (InstrumentationScope.Version in + // OTLP). + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '1.0.0' + AttributeOTelScopeVersion = "otel.scope.version" +) + +// Operations that access some remote service. +const ( + // The service.name of the remote service. SHOULD be equal to the actual + // service.name resource attribute of the remote service if any. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'AuthTokenCache' + AttributePeerService = "peer.service" +) + +// An operating system process. +const ( + // The command used to launch the process (i.e. the command name). On Linux based + // systems, can be set to the zeroth string in proc/[pid]/cmdline. On Windows, can + // be set to the first parameter extracted from GetCommandLineW. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'cmd/otelcol' + AttributeProcessCommand = "process.command" + // All the command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited strings + // extracted from proc/[pid]/cmdline. For libc-based executables, this would be + // the full argv vector passed to main. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'cmd/otecol', '--config=config.yaml' + AttributeProcessCommandArgs = "process.command_args" + // The full command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of GetCommandLineW. Do not + // set this if you have to assemble it just for monitoring; use + // process.command_args instead. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + AttributeProcessCommandLine = "process.command_line" + // Specifies whether the context switches for this data point were voluntary or + // involuntary. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeProcessContextSwitchType = "process.context_switch_type" + // The date and time the process was created, in ISO 8601 format. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2023-11-21T09:25:34.853Z' + AttributeProcessCreationTime = "process.creation.time" + // The name of the process executable. On Linux based systems, can be set to the + // Name in proc/[pid]/status. On Windows, can be set to the base name of + // GetProcessImageFileNameW. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'otelcol' + AttributeProcessExecutableName = "process.executable.name" + // The full path to the process executable. On Linux based systems, can be set to + // the target of proc/[pid]/exe. On Windows, can be set to the result of + // GetProcessImageFileNameW. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/usr/bin/cmd/otelcol' + AttributeProcessExecutablePath = "process.executable.path" + // The exit code of the process. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 127 + AttributeProcessExitCode = "process.exit.code" + // The date and time the process exited, in ISO 8601 format. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2023-11-21T09:26:12.315Z' + AttributeProcessExitTime = "process.exit.time" + // The PID of the process's group leader. This is also the process group ID (PGID) + // of the process. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 23 + AttributeProcessGroupLeaderPID = "process.group_leader.pid" + // Whether the process is connected to an interactive shell. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeProcessInteractive = "process.interactive" + // The username of the user that owns the process. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'root' + AttributeProcessOwner = "process.owner" + // The type of page fault for this data point. Type major is for major/hard page + // faults, and minor is for minor/soft page faults. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeProcessPagingFaultType = "process.paging.fault_type" + // Parent Process identifier (PPID). + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 111 + AttributeProcessParentPID = "process.parent_pid" + // Process identifier (PID). + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1234 + AttributeProcessPID = "process.pid" + // The real user ID (RUID) of the process. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1000 + AttributeProcessRealUserID = "process.real_user.id" + // The username of the real user of the process. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'operator' + AttributeProcessRealUserName = "process.real_user.name" + // An additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + AttributeProcessRuntimeDescription = "process.runtime.description" + // The name of the runtime of this process. For compiled native binaries, this + // SHOULD be the name of the compiler. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'OpenJDK Runtime Environment' + AttributeProcessRuntimeName = "process.runtime.name" + // The version of the runtime of this process, as returned by the runtime without + // modification. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '14.0.2' + AttributeProcessRuntimeVersion = "process.runtime.version" + // The saved user ID (SUID) of the process. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1002 + AttributeProcessSavedUserID = "process.saved_user.id" + // The username of the saved user. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'operator' + AttributeProcessSavedUserName = "process.saved_user.name" + // The PID of the process's session leader. This is also the session ID (SID) of + // the process. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 14 + AttributeProcessSessionLeaderPID = "process.session_leader.pid" + // The effective user ID (EUID) of the process. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1001 + AttributeProcessUserID = "process.user.id" + // The username of the effective user of the process. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'root' + AttributeProcessUserName = "process.user.name" + // Virtual process identifier. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 12 + // Note: The process ID within a PID namespace. This is not necessarily unique + // across all processes on the host but it is unique within the process namespace + // that the process exists within. + AttributeProcessVpid = "process.vpid" +) + +const ( + // voluntary + AttributeProcessContextSwitchTypeVoluntary = "voluntary" + // involuntary + AttributeProcessContextSwitchTypeInvoluntary = "involuntary" +) + +const ( + // major + AttributeProcessPagingFaultTypeMajor = "major" + // minor + AttributeProcessPagingFaultTypeMinor = "minor" +) + +// Attributes for process CPU +const ( + // The CPU state of the process. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeProcessCPUState = "process.cpu.state" +) + +const ( + // system + AttributeProcessCPUStateSystem = "system" + // user + AttributeProcessCPUStateUser = "user" + // wait + AttributeProcessCPUStateWait = "wait" +) + +// Attributes for remote procedure calls. +const ( + // The error codes of the Connect request. Error codes are always string values. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeRPCConnectRPCErrorCode = "rpc.connect_rpc.error_code" + // The numeric status code of the gRPC request. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeRPCGRPCStatusCode = "rpc.grpc.status_code" + // error.code property of response if it is an error response. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: -32700, 100 + AttributeRPCJsonrpcErrorCode = "rpc.jsonrpc.error_code" + // error.message property of response if it is an error response. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Parse error', 'User already exists' + AttributeRPCJsonrpcErrorMessage = "rpc.jsonrpc.error_message" + // id property of request or response. Since protocol allows id to be int, string, + // null or missing (for notifications), value is expected to be cast to string for + // simplicity. Use empty string in case of null value. Omit entirely if this is a + // notification. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '10', 'request-7', '' + AttributeRPCJsonrpcRequestID = "rpc.jsonrpc.request_id" + // Protocol version as in jsonrpc property of request/response. Since JSON-RPC 1.0 + // doesn't specify this, the value can be omitted. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2.0', '1.0' + AttributeRPCJsonrpcVersion = "rpc.jsonrpc.version" + // Compressed size of the message in bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + AttributeRPCMessageCompressedSize = "rpc.message.compressed_size" + // MUST be calculated as two different counters starting from 1 one for sent + // messages and one for received message. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Note: This way we guarantee that the values will be consistent between + // different implementations. + AttributeRPCMessageID = "rpc.message.id" + // Whether this is a received or sent message. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeRPCMessageType = "rpc.message.type" + // Uncompressed size of the message in bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + AttributeRPCMessageUncompressedSize = "rpc.message.uncompressed_size" + // The name of the (logical) method being called, must be equal to the $method + // part in the span name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The code.function attribute may be used to store the latter + // (e.g., method actually executing the call on the server side, RPC client stub + // method on the client side). + AttributeRPCMethod = "rpc.method" + // The full (logical) name of the service being called, including its package + // name, if applicable. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing class. + // The code.namespace attribute may be used to store the latter (despite the + // attribute name, it may include a class name; e.g., class with method actually + // executing the call on the server side, RPC client stub class on the client + // side). + AttributeRPCService = "rpc.service" + // A string identifying the remoting system. See below for a list of well-known + // identifiers. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeRPCSystem = "rpc.system" +) + +const ( + // cancelled + AttributeRPCConnectRPCErrorCodeCancelled = "cancelled" + // unknown + AttributeRPCConnectRPCErrorCodeUnknown = "unknown" + // invalid_argument + AttributeRPCConnectRPCErrorCodeInvalidArgument = "invalid_argument" + // deadline_exceeded + AttributeRPCConnectRPCErrorCodeDeadlineExceeded = "deadline_exceeded" + // not_found + AttributeRPCConnectRPCErrorCodeNotFound = "not_found" + // already_exists + AttributeRPCConnectRPCErrorCodeAlreadyExists = "already_exists" + // permission_denied + AttributeRPCConnectRPCErrorCodePermissionDenied = "permission_denied" + // resource_exhausted + AttributeRPCConnectRPCErrorCodeResourceExhausted = "resource_exhausted" + // failed_precondition + AttributeRPCConnectRPCErrorCodeFailedPrecondition = "failed_precondition" + // aborted + AttributeRPCConnectRPCErrorCodeAborted = "aborted" + // out_of_range + AttributeRPCConnectRPCErrorCodeOutOfRange = "out_of_range" + // unimplemented + AttributeRPCConnectRPCErrorCodeUnimplemented = "unimplemented" + // internal + AttributeRPCConnectRPCErrorCodeInternal = "internal" + // unavailable + AttributeRPCConnectRPCErrorCodeUnavailable = "unavailable" + // data_loss + AttributeRPCConnectRPCErrorCodeDataLoss = "data_loss" + // unauthenticated + AttributeRPCConnectRPCErrorCodeUnauthenticated = "unauthenticated" +) + +const ( + // OK + AttributeRPCGRPCStatusCodeOk = "0" + // CANCELLED + AttributeRPCGRPCStatusCodeCancelled = "1" + // UNKNOWN + AttributeRPCGRPCStatusCodeUnknown = "2" + // INVALID_ARGUMENT + AttributeRPCGRPCStatusCodeInvalidArgument = "3" + // DEADLINE_EXCEEDED + AttributeRPCGRPCStatusCodeDeadlineExceeded = "4" + // NOT_FOUND + AttributeRPCGRPCStatusCodeNotFound = "5" + // ALREADY_EXISTS + AttributeRPCGRPCStatusCodeAlreadyExists = "6" + // PERMISSION_DENIED + AttributeRPCGRPCStatusCodePermissionDenied = "7" + // RESOURCE_EXHAUSTED + AttributeRPCGRPCStatusCodeResourceExhausted = "8" + // FAILED_PRECONDITION + AttributeRPCGRPCStatusCodeFailedPrecondition = "9" + // ABORTED + AttributeRPCGRPCStatusCodeAborted = "10" + // OUT_OF_RANGE + AttributeRPCGRPCStatusCodeOutOfRange = "11" + // UNIMPLEMENTED + AttributeRPCGRPCStatusCodeUnimplemented = "12" + // INTERNAL + AttributeRPCGRPCStatusCodeInternal = "13" + // UNAVAILABLE + AttributeRPCGRPCStatusCodeUnavailable = "14" + // DATA_LOSS + AttributeRPCGRPCStatusCodeDataLoss = "15" + // UNAUTHENTICATED + AttributeRPCGRPCStatusCodeUnauthenticated = "16" +) + +const ( + // sent + AttributeRPCMessageTypeSent = "SENT" + // received + AttributeRPCMessageTypeReceived = "RECEIVED" +) + +const ( + // gRPC + AttributeRPCSystemGRPC = "grpc" + // Java RMI + AttributeRPCSystemJavaRmi = "java_rmi" + // .NET WCF + AttributeRPCSystemDotnetWcf = "dotnet_wcf" + // Apache Dubbo + AttributeRPCSystemApacheDubbo = "apache_dubbo" + // Connect RPC + AttributeRPCSystemConnectRPC = "connect_rpc" +) + +// These attributes may be used to describe the server in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // Server domain name if available without reverse DNS lookup; otherwise, IP + // address or Unix domain socket name. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the client side, and when communicating through an + // intermediary, server.address SHOULD represent the server address behind any + // intermediaries, for example proxies, if it's available. + AttributeServerAddress = "server.address" + // Server port number. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 80, 8080, 443 + // Note: When observed from the client side, and when communicating through an + // intermediary, server.port SHOULD represent the server port behind any + // intermediaries, for example proxies, if it's available. + AttributeServerPort = "server.port" +) + +// A service instance. +const ( + // The string ID of the service instance. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // service.namespace,service.name pair (in other words + // service.namespace,service.name,service.instance.id triplet MUST be globally + // unique). The ID helps to + // distinguish instances of the same service that exist at the same time (e.g. + // instances of a horizontally scaled + // service).Implementations, such as SDKs, are recommended to generate a random + // Version 1 or Version 4 RFC + // 4122 UUID, but are free to use an inherent unique ID as the source of + // this value if stability is desirable. In that case, the ID SHOULD be used as + // source of a UUID Version 5 and + // SHOULD use the following UUID as the namespace: 4d63009a-8d0f-11ee- + // aad7-4c796ed8e320.UUIDs are typically recommended, as only an opaque value for + // the purposes of identifying a service instance is + // needed. Similar to what can be seen in the man page for the + // /etc/machine-id file, the underlying + // data, such as pod name and namespace should be treated as confidential, being + // the user's choice to expose it + // or not via another resource attribute.For applications running behind an + // application server (like unicorn), we do not recommend using one identifier + // for all processes participating in the application. Instead, it's recommended + // each division (e.g. a worker + // thread in unicorn) to have its own instance.id.It's not recommended for a + // Collector to set service.instance.id if it can't unambiguously determine the + // service instance that is generating that telemetry. For instance, creating an + // UUID based on pod.name will + // likely be wrong, as the Collector might not know from which container within + // that pod the telemetry originated. + // However, Collectors can set the service.instance.id if they can unambiguously + // determine the service instance + // for that telemetry. This is typically the case for scraping receivers, as they + // know the target address and + // port. + AttributeServiceInstanceID = "service.instance.id" + // Logical name of the service. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled services. If + // the value was not specified, SDKs MUST fallback to unknown_service: + // concatenated with process.executable.name, e.g. unknown_service:bash. If + // process.executable.name is not available, the value MUST be set to + // unknown_service. + AttributeServiceName = "service.name" + // A namespace for service.name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group of + // services, for example the team name that owns a group of services. service.name + // is expected to be unique within the same namespace. If service.namespace is not + // specified in the Resource then service.name is expected to be unique for all + // services that have no explicit namespace defined (so the empty/unspecified + // namespace is simply one more valid namespace). Zero-length namespace string is + // assumed equal to unspecified namespace. + AttributeServiceNamespace = "service.namespace" + // The version string of the service API or implementation. The format is not + // defined by these conventions. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '2.0.0', 'a01dbef8a' + AttributeServiceVersion = "service.version" +) + +// Session is defined as the period of time encompassing all activities +// performed by the application and the actions executed by the end user. +// Consequently, a Session is represented as a collection of Logs, Events, and +// Spans emitted by the Client Application throughout the Session's duration. +// Each Session is assigned a unique identifier, which is included as an +// attribute in the Logs, Events, and Spans generated during the Session's +// lifecycle. +// When a session reaches end of life, typically due to user inactivity or +// session timeout, a new session identifier will be assigned. The previous +// session identifier may be provided by the instrumentation so that telemetry +// backends can link the two sessions. +const ( + // A unique id to identify a session. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + AttributeSessionID = "session.id" + // The previous session.id for this user, when known. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + AttributeSessionPreviousID = "session.previous_id" +) + +// SignalR attributes +const ( + // SignalR HTTP connection closure status. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'app_shutdown', 'timeout' + AttributeSignalrConnectionStatus = "signalr.connection.status" + // SignalR transport type + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'web_sockets', 'long_polling' + AttributeSignalrTransport = "signalr.transport" +) + +const ( + // The connection was closed normally + AttributeSignalrConnectionStatusNormalClosure = "normal_closure" + // The connection was closed due to a timeout + AttributeSignalrConnectionStatusTimeout = "timeout" + // The connection was closed because the app is shutting down + AttributeSignalrConnectionStatusAppShutdown = "app_shutdown" +) + +const ( + // ServerSentEvents protocol + AttributeSignalrTransportServerSentEvents = "server_sent_events" + // LongPolling protocol + AttributeSignalrTransportLongPolling = "long_polling" + // WebSockets protocol + AttributeSignalrTransportWebSockets = "web_sockets" +) + +// These attributes may be used to describe the sender of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // Source address - domain name if available without reverse DNS lookup; + // otherwise, IP address or Unix domain socket name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the destination side, and when communicating through + // an intermediary, source.address SHOULD represent the source address behind any + // intermediaries, for example proxies, if it's available. + AttributeSourceAddress = "source.address" + // Source port number + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 3389, 2888 + AttributeSourcePort = "source.port" +) + +// Describes System attributes +const ( + // The device identifier + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '(identifier)' + AttributeSystemDevice = "system.device" +) + +// Describes System CPU attributes +const ( + // The logical CPU number [0..n-1] + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1 + AttributeSystemCPULogicalNumber = "system.cpu.logical_number" + // The state of the CPU + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'idle', 'interrupt' + AttributeSystemCPUState = "system.cpu.state" +) + +const ( + // user + AttributeSystemCPUStateUser = "user" + // system + AttributeSystemCPUStateSystem = "system" + // nice + AttributeSystemCPUStateNice = "nice" + // idle + AttributeSystemCPUStateIdle = "idle" + // iowait + AttributeSystemCPUStateIowait = "iowait" + // interrupt + AttributeSystemCPUStateInterrupt = "interrupt" + // steal + AttributeSystemCPUStateSteal = "steal" +) + +// Describes System Memory attributes +const ( + // The memory state + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'free', 'cached' + AttributeSystemMemoryState = "system.memory.state" +) + +const ( + // used + AttributeSystemMemoryStateUsed = "used" + // free + AttributeSystemMemoryStateFree = "free" + // shared + AttributeSystemMemoryStateShared = "shared" + // buffers + AttributeSystemMemoryStateBuffers = "buffers" + // cached + AttributeSystemMemoryStateCached = "cached" +) + +// Describes System Memory Paging attributes +const ( + // The paging access direction + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'in' + AttributeSystemPagingDirection = "system.paging.direction" + // The memory paging state + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'free' + AttributeSystemPagingState = "system.paging.state" + // The memory paging type + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'minor' + AttributeSystemPagingType = "system.paging.type" +) + +const ( + // in + AttributeSystemPagingDirectionIn = "in" + // out + AttributeSystemPagingDirectionOut = "out" +) + +const ( + // used + AttributeSystemPagingStateUsed = "used" + // free + AttributeSystemPagingStateFree = "free" +) + +const ( + // major + AttributeSystemPagingTypeMajor = "major" + // minor + AttributeSystemPagingTypeMinor = "minor" +) + +// Describes Filesystem attributes +const ( + // The filesystem mode + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'rw, ro' + AttributeSystemFilesystemMode = "system.filesystem.mode" + // The filesystem mount path + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/mnt/data' + AttributeSystemFilesystemMountpoint = "system.filesystem.mountpoint" + // The filesystem state + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'used' + AttributeSystemFilesystemState = "system.filesystem.state" + // The filesystem type + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'ext4' + AttributeSystemFilesystemType = "system.filesystem.type" +) + +const ( + // used + AttributeSystemFilesystemStateUsed = "used" + // free + AttributeSystemFilesystemStateFree = "free" + // reserved + AttributeSystemFilesystemStateReserved = "reserved" +) + +const ( + // fat32 + AttributeSystemFilesystemTypeFat32 = "fat32" + // exfat + AttributeSystemFilesystemTypeExfat = "exfat" + // ntfs + AttributeSystemFilesystemTypeNtfs = "ntfs" + // refs + AttributeSystemFilesystemTypeRefs = "refs" + // hfsplus + AttributeSystemFilesystemTypeHfsplus = "hfsplus" + // ext4 + AttributeSystemFilesystemTypeExt4 = "ext4" +) + +// Describes Network attributes +const ( + // A stateless protocol MUST NOT set this attribute + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'close_wait' + AttributeSystemNetworkState = "system.network.state" +) + +const ( + // close + AttributeSystemNetworkStateClose = "close" + // close_wait + AttributeSystemNetworkStateCloseWait = "close_wait" + // closing + AttributeSystemNetworkStateClosing = "closing" + // delete + AttributeSystemNetworkStateDelete = "delete" + // established + AttributeSystemNetworkStateEstablished = "established" + // fin_wait_1 + AttributeSystemNetworkStateFinWait1 = "fin_wait_1" + // fin_wait_2 + AttributeSystemNetworkStateFinWait2 = "fin_wait_2" + // last_ack + AttributeSystemNetworkStateLastAck = "last_ack" + // listen + AttributeSystemNetworkStateListen = "listen" + // syn_recv + AttributeSystemNetworkStateSynRecv = "syn_recv" + // syn_sent + AttributeSystemNetworkStateSynSent = "syn_sent" + // time_wait + AttributeSystemNetworkStateTimeWait = "time_wait" +) + +// Describes System Process attributes +const ( + // The process state, e.g., Linux Process State Codes + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'running' + AttributeSystemProcessStatus = "system.process.status" +) + +const ( + // running + AttributeSystemProcessStatusRunning = "running" + // sleeping + AttributeSystemProcessStatusSleeping = "sleeping" + // stopped + AttributeSystemProcessStatusStopped = "stopped" + // defunct + AttributeSystemProcessStatusDefunct = "defunct" +) + +// Attributes for telemetry SDK. +const ( + // The language of the telemetry SDK. + // + // Type: Enum + // Requirement Level: Required + // Stability: stable + AttributeTelemetrySDKLanguage = "telemetry.sdk.language" + // The name of the telemetry SDK as defined above. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: 'opentelemetry' + // Note: The OpenTelemetry SDK MUST set the telemetry.sdk.name attribute to + // opentelemetry. + // If another SDK, like a fork or a vendor-provided implementation, is used, this + // SDK MUST set the + // telemetry.sdk.name attribute to the fully-qualified class or module name of + // this SDK's main entry point + // or another suitable identifier depending on the language. + // The identifier opentelemetry is reserved and MUST NOT be used in this case. + // All custom identifiers SHOULD be stable across different versions of an + // implementation. + AttributeTelemetrySDKName = "telemetry.sdk.name" + // The version string of the telemetry SDK. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: '1.2.3' + AttributeTelemetrySDKVersion = "telemetry.sdk.version" + // The name of the auto instrumentation agent or distribution, if used. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'parts-unlimited-java' + // Note: Official auto instrumentation agents and distributions SHOULD set the + // telemetry.distro.name attribute to + // a string starting with opentelemetry-, e.g. opentelemetry-java-instrumentation. + AttributeTelemetryDistroName = "telemetry.distro.name" + // The version string of the auto instrumentation agent or distribution, if used. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1.2.3' + AttributeTelemetryDistroVersion = "telemetry.distro.version" +) + +const ( + // cpp + AttributeTelemetrySDKLanguageCPP = "cpp" + // dotnet + AttributeTelemetrySDKLanguageDotnet = "dotnet" + // erlang + AttributeTelemetrySDKLanguageErlang = "erlang" + // go + AttributeTelemetrySDKLanguageGo = "go" + // java + AttributeTelemetrySDKLanguageJava = "java" + // nodejs + AttributeTelemetrySDKLanguageNodejs = "nodejs" + // php + AttributeTelemetrySDKLanguagePHP = "php" + // python + AttributeTelemetrySDKLanguagePython = "python" + // ruby + AttributeTelemetrySDKLanguageRuby = "ruby" + // rust + AttributeTelemetrySDKLanguageRust = "rust" + // swift + AttributeTelemetrySDKLanguageSwift = "swift" + // webjs + AttributeTelemetrySDKLanguageWebjs = "webjs" +) + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // Current "managed" thread ID (as opposed to OS thread ID). + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 42 + AttributeThreadID = "thread.id" + // Current thread name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'main' + AttributeThreadName = "thread.name" +) + +// Semantic convention attributes in the TLS namespace. +const ( + // String indicating the cipher used during the current connection. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', + // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' + // Note: The values allowed for tls.cipher MUST be one of the Descriptions of the + // registered TLS Cipher Suits. + AttributeTLSCipher = "tls.cipher" + // PEM-encoded stand-alone certificate offered by the client. This is usually + // mutually-exclusive of client.certificate_chain since this value also exists in + // that list. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MII...' + AttributeTLSClientCertificate = "tls.client.certificate" + // Array of PEM-encoded certificates that make up the certificate chain offered by + // the client. This is usually mutually-exclusive of client.certificate since that + // value should be the first certificate in the chain. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + AttributeTLSClientCertificateChain = "tls.client.certificate_chain" + // Certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash values, this + // value should be formatted as an uppercase hash. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + AttributeTLSClientHashMd5 = "tls.client.hash.md5" + // Certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash values, this + // value should be formatted as an uppercase hash. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + AttributeTLSClientHashSha1 = "tls.client.hash.sha1" + // Certificate fingerprint using the SHA256 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash values, this + // value should be formatted as an uppercase hash. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + AttributeTLSClientHashSha256 = "tls.client.hash.sha256" + // Distinguished name of subject of the issuer of the x.509 certificate presented + // by the client. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com' + AttributeTLSClientIssuer = "tls.client.issuer" + // A hash that identifies clients based on how they perform an SSL/TLS handshake. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + AttributeTLSClientJa3 = "tls.client.ja3" + // Date/Time indicating when client certificate is no longer considered valid. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + AttributeTLSClientNotAfter = "tls.client.not_after" + // Date/Time indicating when client certificate is first considered valid. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + AttributeTLSClientNotBefore = "tls.client.not_before" + // Also called an SNI, this tells the server which hostname to which the client is + // attempting to connect to. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry.io' + AttributeTLSClientServerName = "tls.client.server_name" + // Distinguished name of subject of the x.509 certificate presented by the client. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com' + AttributeTLSClientSubject = "tls.client.subject" + // Array of ciphers offered by the client during the client hello. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."' + AttributeTLSClientSupportedCiphers = "tls.client.supported_ciphers" + // String indicating the curve used for the given cipher, when applicable + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'secp256r1' + AttributeTLSCurve = "tls.curve" + // Boolean flag indicating if the TLS negotiation was successful and transitioned + // to an encrypted tunnel. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + // Examples: True + AttributeTLSEstablished = "tls.established" + // String indicating the protocol being tunneled. Per the values in the IANA + // registry, this string should be lower case. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'http/1.1' + AttributeTLSNextProtocol = "tls.next_protocol" + // Normalized lowercase protocol name parsed from original string of the + // negotiated SSL/TLS protocol version + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeTLSProtocolName = "tls.protocol.name" + // Numeric part of the version parsed from the original string of the negotiated + // SSL/TLS protocol version + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1.2', '3' + AttributeTLSProtocolVersion = "tls.protocol.version" + // Boolean flag indicating if this TLS connection was resumed from an existing TLS + // negotiation. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + // Examples: True + AttributeTLSResumed = "tls.resumed" + // PEM-encoded stand-alone certificate offered by the server. This is usually + // mutually-exclusive of server.certificate_chain since this value also exists in + // that list. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MII...' + AttributeTLSServerCertificate = "tls.server.certificate" + // Array of PEM-encoded certificates that make up the certificate chain offered by + // the server. This is usually mutually-exclusive of server.certificate since that + // value should be the first certificate in the chain. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + AttributeTLSServerCertificateChain = "tls.server.certificate_chain" + // Certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash values, this + // value should be formatted as an uppercase hash. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + AttributeTLSServerHashMd5 = "tls.server.hash.md5" + // Certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash values, this + // value should be formatted as an uppercase hash. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + AttributeTLSServerHashSha1 = "tls.server.hash.sha1" + // Certificate fingerprint using the SHA256 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash values, this + // value should be formatted as an uppercase hash. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + AttributeTLSServerHashSha256 = "tls.server.hash.sha256" + // Distinguished name of subject of the issuer of the x.509 certificate presented + // by the client. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com' + AttributeTLSServerIssuer = "tls.server.issuer" + // A hash that identifies servers based on how they perform an SSL/TLS handshake. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + AttributeTLSServerJa3s = "tls.server.ja3s" + // Date/Time indicating when server certificate is no longer considered valid. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + AttributeTLSServerNotAfter = "tls.server.not_after" + // Date/Time indicating when server certificate is first considered valid. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + AttributeTLSServerNotBefore = "tls.server.not_before" + // Distinguished name of subject of the x.509 certificate presented by the server. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com' + AttributeTLSServerSubject = "tls.server.subject" +) + +const ( + // ssl + AttributeTLSProtocolNameSsl = "ssl" + // tls + AttributeTLSProtocolNameTLS = "tls" +) + +// Attributes describing URL. +const ( + // Domain extracted from the url.full, such as "opentelemetry.io". + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'www.foo.bar', 'opentelemetry.io', '3.12.167.2', + // '[1080:0:0:0:8:800:200C:417A]' + // Note: In some cases a URL may refer to an IP and/or port directly, without a + // domain name. In this case, the IP address would go to the domain field. If the + // URL contains a literal IPv6 address enclosed by [ and ], the [ and ] characters + // should also be captured in the domain field. + AttributeURLDomain = "url.domain" + // The file extension extracted from the url.full, excluding the leading dot. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'png', 'gz' + // Note: The file extension is only set if it exists, as not every url has a file + // extension. When the file name has multiple extensions example.tar.gz, only the + // last one should be captured gz, not tar.gz. + AttributeURLExtension = "url.extension" + // The URI fragment component + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'SemConv' + AttributeURLFragment = "url.fragment" + // Absolute URL describing a network resource according to RFC3986 + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', '//localhost' + // Note: For network calls, URL usually has + // scheme://host[:port][path][?query][#fragment] format, where the fragment is not + // transmitted over HTTP, but if it is known, it SHOULD be included nevertheless. + // url.full MUST NOT contain credentials passed via URL in form of + // https://username:password@www.example.com/. In such case username and password + // SHOULD be redacted and attribute's value SHOULD be + // https://REDACTED:REDACTED@www.example.com/. + // url.full SHOULD capture the absolute URL when it is available (or can be + // reconstructed). Sensitive content provided in url.full SHOULD be scrubbed when + // instrumentations can identify it. + AttributeURLFull = "url.full" + // Unmodified original URL as seen in the event source. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', + // 'search?q=OpenTelemetry' + // Note: In network monitoring, the observed URL may be a full URL, whereas in + // access logs, the URL is often just represented as a path. This field is meant + // to represent the URL as it was observed, complete or not. + // url.original might contain credentials passed via URL in form of + // https://username:password@www.example.com/. In such case password and username + // SHOULD NOT be redacted and attribute's value SHOULD remain the same. + AttributeURLOriginal = "url.original" + // The URI path component + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '/search' + // Note: Sensitive content provided in url.path SHOULD be scrubbed when + // instrumentations can identify it. + AttributeURLPath = "url.path" + // Port extracted from the url.full + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 443 + AttributeURLPort = "url.port" + // The URI query component + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'q=OpenTelemetry' + // Note: Sensitive content provided in url.query SHOULD be scrubbed when + // instrumentations can identify it. + AttributeURLQuery = "url.query" + // The highest registered url domain, stripped of the subdomain. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'example.com', 'foo.co.uk' + // Note: This value can be determined precisely with the public suffix list. For + // example, the registered domain for foo.example.com is example.com. Trying to + // approximate this by simply taking the last two labels will not work well for + // TLDs such as co.uk. + AttributeURLRegisteredDomain = "url.registered_domain" + // The URI scheme component identifying the used protocol. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'https', 'ftp', 'telnet' + AttributeURLScheme = "url.scheme" + // The subdomain portion of a fully qualified domain name includes all of the + // names except the host name under the registered_domain. In a partially + // qualified domain, or if the qualification level of the full name cannot be + // determined, subdomain contains all of the names below the registered domain. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'east', 'sub2.sub1' + // Note: The subdomain portion of www.east.mydomain.co.uk is east. If the domain + // has multiple levels of subdomain, such as sub2.sub1.example.com, the subdomain + // field should contain sub2.sub1, with no trailing period. + AttributeURLSubdomain = "url.subdomain" + // The low-cardinality template of an absolute path reference. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/users/{id}', '/users/:id', '/users?id={id}' + AttributeURLTemplate = "url.template" + // The effective top level domain (eTLD), also known as the domain suffix, is the + // last part of the domain name. For example, the top level domain for example.com + // is com. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'com', 'co.uk' + // Note: This value can be determined precisely with the public suffix list. + AttributeURLTopLevelDomain = "url.top_level_domain" +) + +// Describes user-agent attributes. +const ( + // Name of the user-agent extracted from original. Usually refers to the browser's + // name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Safari', 'YourApp' + // Note: Example of extracting browser's name from original string. In the case of + // using a user-agent for non-browser products, such as microservices with + // multiple names/versions inside the user_agent.original, the most significant + // name SHOULD be selected. In such a scenario it should align with + // user_agent.version + AttributeUserAgentName = "user_agent.name" + // Value of the HTTP User-Agent header sent by the client. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU iPhone + // OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) + // Version/14.1.2 Mobile/15E148 Safari/604.1', 'YourApp/1.0.0 grpc-java- + // okhttp/1.27.2' + AttributeUserAgentOriginal = "user_agent.original" + // Version of the user-agent extracted from original. Usually refers to the + // browser's version + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '14.1.2', '1.0.0' + // Note: Example of extracting browser's version from original string. In the case + // of using a user-agent for non-browser products, such as microservices with + // multiple names/versions inside the user_agent.original, the most significant + // version SHOULD be selected. In such a scenario it should align with + // user_agent.name + AttributeUserAgentVersion = "user_agent.version" +) + +// The attributes used to describe the packaged software running the +// application code. +const ( + // Additional description of the web engine (e.g. detailed version and edition + // information). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final' + AttributeWebEngineDescription = "webengine.description" + // The name of the web engine. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'WildFly' + AttributeWebEngineName = "webengine.name" + // The version of the web engine. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '21.0.0' + AttributeWebEngineVersion = "webengine.version" +) + +func GetAttribute_groupSemanticConventionAttributeNames() []string { + return []string{ + AttributeAndroidOSAPILevel, + AttributeAspnetcoreRateLimitingResult, + AttributeAspnetcoreDiagnosticsHandlerType, + AttributeAspnetcoreDiagnosticsExceptionResult, + AttributeAspnetcoreRateLimitingPolicy, + AttributeAspnetcoreRequestIsUnhandled, + AttributeAspnetcoreRoutingIsFallback, + AttributeAspnetcoreRoutingMatchStatus, + AttributeAWSRequestID, + AttributeAWSDynamoDBAttributeDefinitions, + AttributeAWSDynamoDBAttributesToGet, + AttributeAWSDynamoDBConsistentRead, + AttributeAWSDynamoDBConsumedCapacity, + AttributeAWSDynamoDBCount, + AttributeAWSDynamoDBExclusiveStartTable, + AttributeAWSDynamoDBGlobalSecondaryIndexUpdates, + AttributeAWSDynamoDBGlobalSecondaryIndexes, + AttributeAWSDynamoDBIndexName, + AttributeAWSDynamoDBItemCollectionMetrics, + AttributeAWSDynamoDBLimit, + AttributeAWSDynamoDBLocalSecondaryIndexes, + AttributeAWSDynamoDBProjection, + AttributeAWSDynamoDBProvisionedReadCapacity, + AttributeAWSDynamoDBProvisionedWriteCapacity, + AttributeAWSDynamoDBScanForward, + AttributeAWSDynamoDBScannedCount, + AttributeAWSDynamoDBSegment, + AttributeAWSDynamoDBSelect, + AttributeAWSDynamoDBTableCount, + AttributeAWSDynamoDBTableNames, + AttributeAWSDynamoDBTotalSegments, + AttributeAWSECSTaskID, + AttributeAWSECSClusterARN, + AttributeAWSECSContainerARN, + AttributeAWSECSLaunchtype, + AttributeAWSECSTaskARN, + AttributeAWSECSTaskFamily, + AttributeAWSECSTaskRevision, + AttributeAWSEKSClusterARN, + AttributeAWSLogGroupARNs, + AttributeAWSLogGroupNames, + AttributeAWSLogStreamARNs, + AttributeAWSLogStreamNames, + AttributeAWSLambdaInvokedARN, + AttributeAWSS3Bucket, + AttributeAWSS3CopySource, + AttributeAWSS3Delete, + AttributeAWSS3Key, + AttributeAWSS3PartNumber, + AttributeAWSS3UploadID, + AttributeBrowserBrands, + AttributeBrowserLanguage, + AttributeBrowserMobile, + AttributeBrowserPlatform, + AttributeClientAddress, + AttributeClientPort, + AttributeCloudAccountID, + AttributeCloudAvailabilityZone, + AttributeCloudPlatform, + AttributeCloudProvider, + AttributeCloudRegion, + AttributeCloudResourceID, + AttributeCloudeventsEventID, + AttributeCloudeventsEventSource, + AttributeCloudeventsEventSpecVersion, + AttributeCloudeventsEventSubject, + AttributeCloudeventsEventType, + AttributeCodeColumn, + AttributeCodeFilepath, + AttributeCodeFunction, + AttributeCodeLineNumber, + AttributeCodeNamespace, + AttributeCodeStacktrace, + AttributeContainerCommand, + AttributeContainerCommandArgs, + AttributeContainerCommandLine, + AttributeContainerCPUState, + AttributeContainerID, + AttributeContainerImageID, + AttributeContainerImageName, + AttributeContainerImageRepoDigests, + AttributeContainerImageTags, + AttributeContainerName, + AttributeContainerRuntime, + AttributeDBClientConnectionsPoolName, + AttributeDBClientConnectionsState, + AttributeDBCollectionName, + AttributeDBNamespace, + AttributeDBOperationName, + AttributeDBQueryText, + AttributeDBSystem, + AttributeDBCassandraConsistencyLevel, + AttributeDBCassandraCoordinatorDC, + AttributeDBCassandraCoordinatorID, + AttributeDBCassandraIdempotence, + AttributeDBCassandraPageSize, + AttributeDBCassandraSpeculativeExecutionCount, + AttributeDBCosmosDBClientID, + AttributeDBCosmosDBConnectionMode, + AttributeDBCosmosDBOperationType, + AttributeDBCosmosDBRequestCharge, + AttributeDBCosmosDBRequestContentLength, + AttributeDBCosmosDBStatusCode, + AttributeDBCosmosDBSubStatusCode, + AttributeDBElasticsearchClusterName, + AttributeDBElasticsearchNodeName, + AttributeDeploymentEnvironment, + AttributeAndroidState, + AttributeDestinationAddress, + AttributeDestinationPort, + AttributeDeviceID, + AttributeDeviceManufacturer, + AttributeDeviceModelIdentifier, + AttributeDeviceModelName, + AttributeDiskIoDirection, + AttributeDNSQuestionName, + AttributeEnduserID, + AttributeEnduserRole, + AttributeEnduserScope, + AttributeErrorType, + AttributeEventName, + AttributeExceptionEscaped, + AttributeExceptionMessage, + AttributeExceptionStacktrace, + AttributeExceptionType, + AttributeFaaSColdstart, + AttributeFaaSCron, + AttributeFaaSDocumentCollection, + AttributeFaaSDocumentName, + AttributeFaaSDocumentOperation, + AttributeFaaSDocumentTime, + AttributeFaaSInstance, + AttributeFaaSInvocationID, + AttributeFaaSInvokedName, + AttributeFaaSInvokedProvider, + AttributeFaaSInvokedRegion, + AttributeFaaSMaxMemory, + AttributeFaaSName, + AttributeFaaSTime, + AttributeFaaSTrigger, + AttributeFaaSVersion, + AttributeFeatureFlagKey, + AttributeFeatureFlagProviderName, + AttributeFeatureFlagVariant, + AttributeFileDirectory, + AttributeFileExtension, + AttributeFileName, + AttributeFilePath, + AttributeFileSize, + AttributeGCPCloudRunJobExecution, + AttributeGCPCloudRunJobTaskIndex, + AttributeGCPGceInstanceHostname, + AttributeGCPGceInstanceName, + AttributeGenAiCompletion, + AttributeGenAiPrompt, + AttributeGenAiRequestMaxTokens, + AttributeGenAiRequestModel, + AttributeGenAiRequestTemperature, + AttributeGenAiRequestTopP, + AttributeGenAiResponseFinishReasons, + AttributeGenAiResponseID, + AttributeGenAiResponseModel, + AttributeGenAiSystem, + AttributeGenAiUsageCompletionTokens, + AttributeGenAiUsagePromptTokens, + AttributeGraphqlDocument, + AttributeGraphqlOperationName, + AttributeGraphqlOperationType, + AttributeHerokuAppID, + AttributeHerokuReleaseCommit, + AttributeHerokuReleaseCreationTimestamp, + AttributeHostArch, + AttributeHostCPUCacheL2Size, + AttributeHostCPUFamily, + AttributeHostCPUModelID, + AttributeHostCPUModelName, + AttributeHostCPUStepping, + AttributeHostCPUVendorID, + AttributeHostID, + AttributeHostImageID, + AttributeHostImageName, + AttributeHostImageVersion, + AttributeHostIP, + AttributeHostMac, + AttributeHostName, + AttributeHostType, + AttributeHTTPConnectionState, + AttributeHTTPRequestBodySize, + AttributeHTTPRequestMethod, + AttributeHTTPRequestMethodOriginal, + AttributeHTTPRequestResendCount, + AttributeHTTPRequestSize, + AttributeHTTPResponseBodySize, + AttributeHTTPResponseSize, + AttributeHTTPResponseStatusCode, + AttributeHTTPRoute, + AttributeJvmBufferPoolName, + AttributeJvmGcAction, + AttributeJvmGcName, + AttributeJvmMemoryPoolName, + AttributeJvmMemoryType, + AttributeJvmThreadDaemon, + AttributeJvmThreadState, + AttributeK8SClusterName, + AttributeK8SClusterUID, + AttributeK8SContainerName, + AttributeK8SContainerRestartCount, + AttributeK8SContainerStatusLastTerminatedReason, + AttributeK8SCronJobName, + AttributeK8SCronJobUID, + AttributeK8SDaemonSetName, + AttributeK8SDaemonSetUID, + AttributeK8SDeploymentName, + AttributeK8SDeploymentUID, + AttributeK8SJobName, + AttributeK8SJobUID, + AttributeK8SNamespaceName, + AttributeK8SNodeName, + AttributeK8SNodeUID, + AttributeK8SPodName, + AttributeK8SPodUID, + AttributeK8SReplicaSetName, + AttributeK8SReplicaSetUID, + AttributeK8SStatefulSetName, + AttributeK8SStatefulSetUID, + AttributeLogIostream, + AttributeLogFileName, + AttributeLogFileNameResolved, + AttributeLogFilePath, + AttributeLogFilePathResolved, + AttributeLogRecordUID, + AttributeMessagingBatchMessageCount, + AttributeMessagingClientID, + AttributeMessagingDestinationAnonymous, + AttributeMessagingDestinationName, + AttributeMessagingDestinationPartitionID, + AttributeMessagingDestinationTemplate, + AttributeMessagingDestinationTemporary, + AttributeMessagingDestinationPublishAnonymous, + AttributeMessagingDestinationPublishName, + AttributeMessagingMessageBodySize, + AttributeMessagingMessageConversationID, + AttributeMessagingMessageEnvelopeSize, + AttributeMessagingMessageID, + AttributeMessagingOperationName, + AttributeMessagingOperationType, + AttributeMessagingSystem, + AttributeMessagingKafkaConsumerGroup, + AttributeMessagingKafkaMessageKey, + AttributeMessagingKafkaMessageOffset, + AttributeMessagingKafkaMessageTombstone, + AttributeMessagingRabbitmqDestinationRoutingKey, + AttributeMessagingRabbitmqMessageDeliveryTag, + AttributeMessagingRocketmqClientGroup, + AttributeMessagingRocketmqConsumptionModel, + AttributeMessagingRocketmqMessageDelayTimeLevel, + AttributeMessagingRocketmqMessageDeliveryTimestamp, + AttributeMessagingRocketmqMessageGroup, + AttributeMessagingRocketmqMessageKeys, + AttributeMessagingRocketmqMessageTag, + AttributeMessagingRocketmqMessageType, + AttributeMessagingRocketmqNamespace, + AttributeMessagingGCPPubsubMessageAckDeadline, + AttributeMessagingGCPPubsubMessageAckID, + AttributeMessagingGCPPubsubMessageDeliveryAttempt, + AttributeMessagingGCPPubsubMessageOrderingKey, + AttributeMessagingServicebusDestinationSubscriptionName, + AttributeMessagingServicebusDispositionStatus, + AttributeMessagingServicebusMessageDeliveryCount, + AttributeMessagingServicebusMessageEnqueuedTime, + AttributeMessagingEventhubsConsumerGroup, + AttributeMessagingEventhubsMessageEnqueuedTime, + AttributeNetworkCarrierIcc, + AttributeNetworkCarrierMcc, + AttributeNetworkCarrierMnc, + AttributeNetworkCarrierName, + AttributeNetworkConnectionSubtype, + AttributeNetworkConnectionType, + AttributeNetworkIoDirection, + AttributeNetworkLocalAddress, + AttributeNetworkLocalPort, + AttributeNetworkPeerAddress, + AttributeNetworkPeerPort, + AttributeNetworkProtocolName, + AttributeNetworkProtocolVersion, + AttributeNetworkTransport, + AttributeNetworkType, + AttributeOciManifestDigest, + AttributeOpentracingRefType, + AttributeOSBuildID, + AttributeOSDescription, + AttributeOSName, + AttributeOSType, + AttributeOSVersion, + AttributeOTelStatusCode, + AttributeOTelStatusDescription, + AttributeOTelScopeName, + AttributeOTelScopeVersion, + AttributePeerService, + AttributeProcessCommand, + AttributeProcessCommandArgs, + AttributeProcessCommandLine, + AttributeProcessContextSwitchType, + AttributeProcessCreationTime, + AttributeProcessExecutableName, + AttributeProcessExecutablePath, + AttributeProcessExitCode, + AttributeProcessExitTime, + AttributeProcessGroupLeaderPID, + AttributeProcessInteractive, + AttributeProcessOwner, + AttributeProcessPagingFaultType, + AttributeProcessParentPID, + AttributeProcessPID, + AttributeProcessRealUserID, + AttributeProcessRealUserName, + AttributeProcessRuntimeDescription, + AttributeProcessRuntimeName, + AttributeProcessRuntimeVersion, + AttributeProcessSavedUserID, + AttributeProcessSavedUserName, + AttributeProcessSessionLeaderPID, + AttributeProcessUserID, + AttributeProcessUserName, + AttributeProcessVpid, + AttributeProcessCPUState, + AttributeRPCConnectRPCErrorCode, + AttributeRPCGRPCStatusCode, + AttributeRPCJsonrpcErrorCode, + AttributeRPCJsonrpcErrorMessage, + AttributeRPCJsonrpcRequestID, + AttributeRPCJsonrpcVersion, + AttributeRPCMessageCompressedSize, + AttributeRPCMessageID, + AttributeRPCMessageType, + AttributeRPCMessageUncompressedSize, + AttributeRPCMethod, + AttributeRPCService, + AttributeRPCSystem, + AttributeServerAddress, + AttributeServerPort, + AttributeServiceInstanceID, + AttributeServiceName, + AttributeServiceNamespace, + AttributeServiceVersion, + AttributeSessionID, + AttributeSessionPreviousID, + AttributeSignalrConnectionStatus, + AttributeSignalrTransport, + AttributeSourceAddress, + AttributeSourcePort, + AttributeSystemDevice, + AttributeSystemCPULogicalNumber, + AttributeSystemCPUState, + AttributeSystemMemoryState, + AttributeSystemPagingDirection, + AttributeSystemPagingState, + AttributeSystemPagingType, + AttributeSystemFilesystemMode, + AttributeSystemFilesystemMountpoint, + AttributeSystemFilesystemState, + AttributeSystemFilesystemType, + AttributeSystemNetworkState, + AttributeSystemProcessStatus, + AttributeTelemetrySDKLanguage, + AttributeTelemetrySDKName, + AttributeTelemetrySDKVersion, + AttributeTelemetryDistroName, + AttributeTelemetryDistroVersion, + AttributeThreadID, + AttributeThreadName, + AttributeTLSCipher, + AttributeTLSClientCertificate, + AttributeTLSClientCertificateChain, + AttributeTLSClientHashMd5, + AttributeTLSClientHashSha1, + AttributeTLSClientHashSha256, + AttributeTLSClientIssuer, + AttributeTLSClientJa3, + AttributeTLSClientNotAfter, + AttributeTLSClientNotBefore, + AttributeTLSClientServerName, + AttributeTLSClientSubject, + AttributeTLSClientSupportedCiphers, + AttributeTLSCurve, + AttributeTLSEstablished, + AttributeTLSNextProtocol, + AttributeTLSProtocolName, + AttributeTLSProtocolVersion, + AttributeTLSResumed, + AttributeTLSServerCertificate, + AttributeTLSServerCertificateChain, + AttributeTLSServerHashMd5, + AttributeTLSServerHashSha1, + AttributeTLSServerHashSha256, + AttributeTLSServerIssuer, + AttributeTLSServerJa3s, + AttributeTLSServerNotAfter, + AttributeTLSServerNotBefore, + AttributeTLSServerSubject, + AttributeURLDomain, + AttributeURLExtension, + AttributeURLFragment, + AttributeURLFull, + AttributeURLOriginal, + AttributeURLPath, + AttributeURLPort, + AttributeURLQuery, + AttributeURLRegisteredDomain, + AttributeURLScheme, + AttributeURLSubdomain, + AttributeURLTemplate, + AttributeURLTopLevelDomain, + AttributeUserAgentName, + AttributeUserAgentOriginal, + AttributeUserAgentVersion, + AttributeWebEngineDescription, + AttributeWebEngineName, + AttributeWebEngineVersion, + } +} diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.26.0/generated_event.go b/vendor/go.opentelemetry.io/collector/semconv/v1.26.0/generated_event.go new file mode 100644 index 00000000000..ac6893c3b26 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.26.0/generated_event.go @@ -0,0 +1,10 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv + +func GetEventSemanticConventionAttributeNames() []string { + return []string{} +} diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.26.0/generated_resource.go b/vendor/go.opentelemetry.io/collector/semconv/v1.26.0/generated_resource.go new file mode 100644 index 00000000000..bb89e4806f5 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.26.0/generated_resource.go @@ -0,0 +1,10 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv + +func GetResourceSemanticConventionAttributeNames() []string { + return []string{} +} diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.26.0/generated_trace.go b/vendor/go.opentelemetry.io/collector/semconv/v1.26.0/generated_trace.go new file mode 100644 index 00000000000..380529563a2 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.26.0/generated_trace.go @@ -0,0 +1,10 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv + +func GetTraceSemanticConventionAttributeNames() []string { + return []string{} +} diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.26.0/schema.go b/vendor/go.opentelemetry.io/collector/semconv/v1.26.0/schema.go new file mode 100644 index 00000000000..dcd02283faf --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.26.0/schema.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/collector/semconv/v1.26.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.26.0" diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.27.0/doc.go b/vendor/go.opentelemetry.io/collector/semconv/v1.27.0/doc.go new file mode 100644 index 00000000000..5042d2eba86 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.27.0/doc.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the v1.27.0 +// version of the OpenTelemetry semantic conventions. +package semconv // import "go.opentelemetry.io/collector/semconv/v1.27.0" diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.27.0/generated_attribute_group.go b/vendor/go.opentelemetry.io/collector/semconv/v1.27.0/generated_attribute_group.go new file mode 100644 index 00000000000..6247b5d7917 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.27.0/generated_attribute_group.go @@ -0,0 +1,5843 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv + +// The Android platform on which the Android application is running. +const ( + // Uniquely identifies the framework API revision offered by a version + // (os.version) of the android operating system. More information can be found + // here. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '33', '32' + AttributeAndroidOSAPILevel = "android.os.api_level" +) + +// This group describes attributes specific to artifacts. Artifacts are files +// or other immutable objects that are intended for distribution. This +// definition aligns directly with the +// [SLSA](https://slsa.dev/spec/v1.0/terminology#package-model) package model. +const ( + // The provenance filename of the built attestation which directly relates to the + // build artifact filename. This filename SHOULD accompany the artifact at publish + // time. See the SLSA Relationship specification for more information. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'golang-binary-amd64-v0.1.0.attestation', 'docker-image- + // amd64-v0.1.0.intoto.json1', 'release-1.tar.gz.attestation', 'file-name- + // package.tar.gz.intoto.json1' + AttributeArtifactAttestationFilename = "artifact.attestation.filename" + // The full hash value (see glossary), of the built attestation. Some envelopes in + // the software attestation space also refer to this as the digest. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1b31dfcd5b7f9267bf2ff47651df1cfb9147b9e4df1f335accf65b4cda498408' + AttributeArtifactAttestationHash = "artifact.attestation.hash" + // The id of the build software attestation. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '123' + AttributeArtifactAttestationID = "artifact.attestation.id" + // The human readable file name of the artifact, typically generated during build + // and release processes. Often includes the package name and version in the file + // name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'golang-binary-amd64-v0.1.0', 'docker-image-amd64-v0.1.0', + // 'release-1.tar.gz', 'file-name-package.tar.gz' + // Note: This file name can also act as the Package Name + // in cases where the package ecosystem maps accordingly. + // Additionally, the artifact can be published + // for others, but that is not a guarantee. + AttributeArtifactFilename = "artifact.filename" + // The full hash value (see glossary), often found in checksum.txt on a release of + // the artifact and used to verify package integrity. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '9ff4c52759e2c4ac70b7d517bc7fcdc1cda631ca0045271ddd1b192544f8a3e9' + // Note: The specific algorithm used to create the cryptographic hash value is + // not defined. In situations where an artifact has multiple + // cryptographic hashes, it is up to the implementer to choose which + // hash value to set here; this should be the most secure hash algorithm + // that is suitable for the situation and consistent with the + // corresponding attestation. The implementer can then provide the other + // hash values through an additional set of attribute extensions as they + // deem necessary. + AttributeArtifactHash = "artifact.hash" + // The Package URL of the package artifact provides a standard way to identify and + // locate the packaged artifact. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'pkg:github/package-url/purl-spec@1209109710924', + // 'pkg:npm/foo@12.12.3' + AttributeArtifactPurl = "artifact.purl" + // The version of the artifact. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'v0.1.0', '1.2.1', '122691-build' + AttributeArtifactVersion = "artifact.version" +) + +// ASP.NET Core attributes +const ( + // Rate-limiting result, shows whether the lease was acquired or contains a + // rejection reason + // + // Type: Enum + // Requirement Level: Required + // Stability: stable + // Examples: 'acquired', 'request_canceled' + AttributeAspnetcoreRateLimitingResult = "aspnetcore.rate_limiting.result" + // Full type name of the IExceptionHandler implementation that handled the + // exception. + // + // Type: string + // Requirement Level: Conditionally Required - if and only if the exception was + // handled by this handler. + // Stability: stable + // Examples: 'Contoso.MyHandler' + AttributeAspnetcoreDiagnosticsHandlerType = "aspnetcore.diagnostics.handler.type" + // ASP.NET Core exception middleware handling result + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'handled', 'unhandled' + AttributeAspnetcoreDiagnosticsExceptionResult = "aspnetcore.diagnostics.exception.result" + // Rate limiting policy name. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'fixed', 'sliding', 'token' + AttributeAspnetcoreRateLimitingPolicy = "aspnetcore.rate_limiting.policy" + // Flag indicating if request was handled by the application pipeline. + // + // Type: boolean + // Requirement Level: Optional + // Stability: stable + // Examples: True + AttributeAspnetcoreRequestIsUnhandled = "aspnetcore.request.is_unhandled" + // A value that indicates whether the matched route is a fallback route. + // + // Type: boolean + // Requirement Level: Optional + // Stability: stable + // Examples: True + AttributeAspnetcoreRoutingIsFallback = "aspnetcore.routing.is_fallback" + // Match result - success or failure + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'success', 'failure' + AttributeAspnetcoreRoutingMatchStatus = "aspnetcore.routing.match_status" +) + +const ( + // Lease was acquired + AttributeAspnetcoreRateLimitingResultAcquired = "acquired" + // Lease request was rejected by the endpoint limiter + AttributeAspnetcoreRateLimitingResultEndpointLimiter = "endpoint_limiter" + // Lease request was rejected by the global limiter + AttributeAspnetcoreRateLimitingResultGlobalLimiter = "global_limiter" + // Lease request was canceled + AttributeAspnetcoreRateLimitingResultRequestCanceled = "request_canceled" +) + +const ( + // Exception was handled by the exception handling middleware + AttributeAspnetcoreDiagnosticsExceptionResultHandled = "handled" + // Exception was not handled by the exception handling middleware + AttributeAspnetcoreDiagnosticsExceptionResultUnhandled = "unhandled" + // Exception handling was skipped because the response had started + AttributeAspnetcoreDiagnosticsExceptionResultSkipped = "skipped" + // Exception handling didn't run because the request was aborted + AttributeAspnetcoreDiagnosticsExceptionResultAborted = "aborted" +) + +const ( + // Match succeeded + AttributeAspnetcoreRoutingMatchStatusSuccess = "success" + // Match failed + AttributeAspnetcoreRoutingMatchStatusFailure = "failure" +) + +// Generic attributes for AWS services. +const ( + // The AWS request ID as returned in the response headers x-amz-request-id or + // x-amz-requestid. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' + AttributeAWSRequestID = "aws.request_id" +) + +// Attributes for AWS DynamoDB. +const ( + // The JSON-serialized value of each item in the AttributeDefinitions request + // field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AttributeAWSDynamoDBAttributeDefinitions = "aws.dynamodb.attribute_definitions" + // The value of the AttributesToGet request parameter. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'lives', 'id' + AttributeAWSDynamoDBAttributesToGet = "aws.dynamodb.attributes_to_get" + // The value of the ConsistentRead request parameter. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeAWSDynamoDBConsistentRead = "aws.dynamodb.consistent_read" + // The JSON-serialized value of each item in the ConsumedCapacity response field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : { + // "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, + // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": + // "string", "WriteCapacityUnits": number }' + AttributeAWSDynamoDBConsumedCapacity = "aws.dynamodb.consumed_capacity" + // The value of the Count response parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 10 + AttributeAWSDynamoDBCount = "aws.dynamodb.count" + // The value of the ExclusiveStartTableName request parameter. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Users', 'CatsTable' + AttributeAWSDynamoDBExclusiveStartTable = "aws.dynamodb.exclusive_start_table" + // The JSON-serialized value of each item in the GlobalSecondaryIndexUpdates + // request field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }' + AttributeAWSDynamoDBGlobalSecondaryIndexUpdates = "aws.dynamodb.global_secondary_index_updates" + // The JSON-serialized value of each item of the GlobalSecondaryIndexes request + // field + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits": + // number, "WriteCapacityUnits": number } }' + AttributeAWSDynamoDBGlobalSecondaryIndexes = "aws.dynamodb.global_secondary_indexes" + // The value of the IndexName request parameter. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'name_to_group' + AttributeAWSDynamoDBIndexName = "aws.dynamodb.index_name" + // The JSON-serialized value of the ItemCollectionMetrics response field. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, + // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : + // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": + // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }' + AttributeAWSDynamoDBItemCollectionMetrics = "aws.dynamodb.item_collection_metrics" + // The value of the Limit request parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 10 + AttributeAWSDynamoDBLimit = "aws.dynamodb.limit" + // The JSON-serialized value of each item of the LocalSecondaryIndexes request + // field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes": + // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" } }' + AttributeAWSDynamoDBLocalSecondaryIndexes = "aws.dynamodb.local_secondary_indexes" + // The value of the ProjectionExpression request parameter. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems, + // ProductReviews' + AttributeAWSDynamoDBProjection = "aws.dynamodb.projection" + // The value of the ProvisionedThroughput.ReadCapacityUnits request parameter. + // + // Type: double + // Requirement Level: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AttributeAWSDynamoDBProvisionedReadCapacity = "aws.dynamodb.provisioned_read_capacity" + // The value of the ProvisionedThroughput.WriteCapacityUnits request parameter. + // + // Type: double + // Requirement Level: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AttributeAWSDynamoDBProvisionedWriteCapacity = "aws.dynamodb.provisioned_write_capacity" + // The value of the ScanIndexForward request parameter. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeAWSDynamoDBScanForward = "aws.dynamodb.scan_forward" + // The value of the ScannedCount response parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 50 + AttributeAWSDynamoDBScannedCount = "aws.dynamodb.scanned_count" + // The value of the Segment request parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 10 + AttributeAWSDynamoDBSegment = "aws.dynamodb.segment" + // The value of the Select request parameter. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AttributeAWSDynamoDBSelect = "aws.dynamodb.select" + // The number of items in the TableNames response parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 20 + AttributeAWSDynamoDBTableCount = "aws.dynamodb.table_count" + // The keys in the RequestItems object field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Users', 'Cats' + AttributeAWSDynamoDBTableNames = "aws.dynamodb.table_names" + // The value of the TotalSegments request parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 100 + AttributeAWSDynamoDBTotalSegments = "aws.dynamodb.total_segments" +) + +// Attributes for AWS Elastic Container Service (ECS). +const ( + // The ID of a running ECS task. The ID MUST be extracted from task.arn. + // + // Type: string + // Requirement Level: Conditionally Required - If and only if `task.arn` is + // populated. + // Stability: experimental + // Examples: '10838bed-421f-43ef-870a-f43feacbbb5b', + // '23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' + AttributeAWSECSTaskID = "aws.ecs.task.id" + // The ARN of an ECS cluster. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AttributeAWSECSClusterARN = "aws.ecs.cluster.arn" + // The Amazon Resource Name (ARN) of an ECS container instance. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us- + // west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AttributeAWSECSContainerARN = "aws.ecs.container.arn" + // The launch type for an ECS task. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeAWSECSLaunchtype = "aws.ecs.launchtype" + // The ARN of a running ECS task. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us- + // west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b', + // 'arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task- + // id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' + AttributeAWSECSTaskARN = "aws.ecs.task.arn" + // The family name of the ECS task definition used to create the ECS task. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry-family' + AttributeAWSECSTaskFamily = "aws.ecs.task.family" + // The revision for the task definition used to create the ECS task. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '8', '26' + AttributeAWSECSTaskRevision = "aws.ecs.task.revision" +) + +const ( + // ec2 + AttributeAWSECSLaunchtypeEC2 = "ec2" + // fargate + AttributeAWSECSLaunchtypeFargate = "fargate" +) + +// Attributes for AWS Elastic Kubernetes Service (EKS). +const ( + // The ARN of an EKS cluster. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AttributeAWSEKSClusterARN = "aws.eks.cluster.arn" +) + +// Attributes for AWS Logs. +const ( + // The Amazon Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the log group ARN format documentation. + AttributeAWSLogGroupARNs = "aws.log.group.arns" + // The name(s) of the AWS log group(s) an application is writing to. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like multi-container + // applications, where a single application has sidecar containers, and each write + // to their own log group. + AttributeAWSLogGroupNames = "aws.log.group.names" + // The ARN(s) of the AWS log stream(s). + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log- + // stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the log stream ARN format documentation. One log group can contain + // several log streams, so these ARNs necessarily identify both a log group and a + // log stream. + AttributeAWSLogStreamARNs = "aws.log.stream.arns" + // The name(s) of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AttributeAWSLogStreamNames = "aws.log.stream.names" +) + +// Attributes for AWS Lambda. +const ( + // The full invoked ARN as provided on the Context passed to the function (Lambda- + // Runtime-Invoked-Function-ARN header on the /runtime/invocation/next + // applicable). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from cloud.resource_id if an alias is involved. + AttributeAWSLambdaInvokedARN = "aws.lambda.invoked_arn" +) + +// Attributes for AWS S3. +const ( + // The S3 bucket name the request refers to. Corresponds to the --bucket parameter + // of the S3 API operations. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'some-bucket-name' + // Note: The bucket attribute is applicable to all S3 operations that reference a + // bucket, i.e. that require the bucket name as a mandatory parameter. + // This applies to almost all S3 operations except list-buckets. + AttributeAWSS3Bucket = "aws.s3.bucket" + // The source object (in the form bucket/key) for the copy operation. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The copy_source attribute applies to S3 copy operations and corresponds + // to the --copy-source parameter + // of the copy-object operation within the S3 API. + // This applies in particular to the following operations:
      + //
    • copy-object
    • + //
    • upload-part-copy
    • + //
    + AttributeAWSS3CopySource = "aws.s3.copy_source" + // The delete request container that specifies the objects to be deleted. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string} + // ],Quiet=boolean' + // Note: The delete attribute is only applicable to the delete-object operation. + // The delete attribute corresponds to the --delete parameter of the + // delete-objects operation within the S3 API. + AttributeAWSS3Delete = "aws.s3.delete" + // The S3 object key the request refers to. Corresponds to the --key parameter of + // the S3 API operations. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The key attribute is applicable to all object-related S3 operations, i.e. + // that require the object key as a mandatory parameter. + // This applies in particular to the following operations:
      + //
    • copy-object
    • + //
    • delete-object
    • + //
    • get-object
    • + //
    • head-object
    • + //
    • put-object
    • + //
    • restore-object
    • + //
    • select-object-content
    • + //
    • abort-multipart-upload
    • + //
    • complete-multipart-upload
    • + //
    • create-multipart-upload
    • + //
    • list-parts
    • + //
    • upload-part
    • + //
    • upload-part-copy
    • + //
    + AttributeAWSS3Key = "aws.s3.key" + // The part number of the part being uploaded in a multipart-upload operation. + // This is a positive integer between 1 and 10,000. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 3456 + // Note: The part_number attribute is only applicable to the upload-part + // and upload-part-copy operations. + // The part_number attribute corresponds to the --part-number parameter of the + // upload-part operation within the S3 API. + AttributeAWSS3PartNumber = "aws.s3.part_number" + // Upload ID that identifies the multipart upload. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' + // Note: The upload_id attribute applies to S3 multipart-upload operations and + // corresponds to the --upload-id parameter + // of the S3 API multipart operations. + // This applies in particular to the following operations:
      + //
    • abort-multipart-upload
    • + //
    • complete-multipart-upload
    • + //
    • list-parts
    • + //
    • upload-part
    • + //
    • upload-part-copy
    • + //
    + AttributeAWSS3UploadID = "aws.s3.upload_id" +) + +// Generic attributes for Azure SDK. +const ( + // The unique identifier of the service request. It's generated by the Azure + // service and returned with the response. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '00000000-0000-0000-0000-000000000000' + AttributeAzServiceRequestID = "az.service_request_id" +) + +// The web browser attributes +const ( + // Array of brand name and version separated by a space + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the UA client hints API + // (navigator.userAgentData.brands). + AttributeBrowserBrands = "browser.brands" + // Preferred language of the user using the browser + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // navigator.language. + AttributeBrowserLanguage = "browser.language" + // A boolean that is true if the browser is running on a mobile device + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + // Note: This value is intended to be taken from the UA client hints API + // (navigator.userAgentData.mobile). If unavailable, this attribute SHOULD be left + // unset. + AttributeBrowserMobile = "browser.mobile" + // The platform on which the browser is running + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the UA client hints API + // (navigator.userAgentData.platform). If unavailable, the legacy + // navigator.platform API SHOULD NOT be used instead and this attribute SHOULD be + // left unset in order for the values to be consistent. + // The list of possible values is defined in the W3C User-Agent Client Hints + // specification. Note that some (but not all) of these values can overlap with + // values in the os.type and os.name attributes. However, for consistency, the + // values in the browser.platform attribute should capture the exact value that + // the user agent provides. + AttributeBrowserPlatform = "browser.platform" +) + +// This group describes attributes specific to pipelines within a Continuous +// Integration and Continuous Deployment (CI/CD) system. A +// [pipeline](https://en.wikipedia.org/wiki/Pipeline_(computing)) in this case +// is a series of steps that are performed in order to deliver a new version of +// software. This aligns with the +// [Britannica](https://www.britannica.com/dictionary/pipeline) definition of a +// pipeline where a **pipeline** is the system for developing and producing +// something. In the context of CI/CD, a pipeline produces or delivers +// software. +const ( + // The human readable name of the pipeline within a CI/CD system. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Build and Test', 'Lint', 'Deploy Go Project', + // 'deploy_to_environment' + AttributeCicdPipelineName = "cicd.pipeline.name" + // The unique identifier of a pipeline run within a CI/CD system. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '120912' + AttributeCicdPipelineRunID = "cicd.pipeline.run.id" + // The human readable name of a task within a pipeline. Task here most closely + // aligns with a computing process in a pipeline. Other terms for tasks include + // commands, steps, and procedures. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Run GoLang Linter', 'Go Build', 'go-test', 'deploy_binary' + AttributeCicdPipelineTaskName = "cicd.pipeline.task.name" + // The unique identifier of a task run within a pipeline. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '12097' + AttributeCicdPipelineTaskRunID = "cicd.pipeline.task.run.id" + // The URL of the pipeline run providing the complete address in order to locate + // and identify the pipeline run. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'https://github.com/open-telemetry/semantic- + // conventions/actions/runs/9753949763/job/26920038674?pr=1075' + AttributeCicdPipelineTaskRunURLFull = "cicd.pipeline.task.run.url.full" + // The type of the task within a pipeline. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'build', 'test', 'deploy' + AttributeCicdPipelineTaskType = "cicd.pipeline.task.type" +) + +const ( + // build + AttributeCicdPipelineTaskTypeBuild = "build" + // test + AttributeCicdPipelineTaskTypeTest = "test" + // deploy + AttributeCicdPipelineTaskTypeDeploy = "deploy" +) + +// These attributes may be used to describe the client in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // Client address - domain name if available without reverse DNS lookup; + // otherwise, IP address or Unix domain socket name. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the server side, and when communicating through an + // intermediary, client.address SHOULD represent the client address behind any + // intermediaries, for example proxies, if it's available. + AttributeClientAddress = "client.address" + // Client port number. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 65123 + // Note: When observed from the server side, and when communicating through an + // intermediary, client.port SHOULD represent the client port behind any + // intermediaries, for example proxies, if it's available. + AttributeClientPort = "client.port" +) + +// A cloud environment (e.g. GCP, Azure, AWS). +const ( + // The cloud account ID the resource is assigned to. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '111111111111', 'opentelemetry' + AttributeCloudAccountID = "cloud.account.id" + // Cloud regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the resource + // is running. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and + // Google Cloud. + AttributeCloudAvailabilityZone = "cloud.availability_zone" + // The cloud platform in use. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: The prefix of the service SHOULD match the one specified in + // cloud.provider. + AttributeCloudPlatform = "cloud.platform" + // Name of the cloud provider. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeCloudProvider = "cloud.provider" + // The geographical region the resource is running. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for example + // Alibaba Cloud regions, AWS regions, Azure regions, Google Cloud regions, or + // Tencent Cloud regions. + AttributeCloudRegion = "cloud.region" + // Cloud provider-specific native identifier of the monitored cloud resource (e.g. + // an ARN on AWS, a fully qualified resource ID on Azure, a full resource name on + // GCP) + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', '//run.googl + // eapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', '/sub + // scriptions//resourceGroups//providers/Microsoft.Web/sites + // //functions/' + // Note: On some cloud providers, it may not be possible to determine the full ID + // at startup, + // so it may be necessary to set cloud.resource_id as a span attribute instead.The + // exact value to use for cloud.resource_id depends on the cloud provider. + // The following well-known definitions MUST be used if you set this attribute and + // they apply:
      + //
    • AWS Lambda: The function ARN. + // Take care not to use the "invoked ARN" directly but replace any + // alias suffix + // with the resolved function version, as the same runtime instance may be + // invocable with + // multiple different aliases.
    • + //
    • GCP: The URI of the resource
    • + //
    • Azure: The Fully Qualified Resource ID of the invoked function, + // not the function app, having the form + // /subscriptions//resourceGroups//providers/Microsoft.Web/s + // ites//functions/. + // This means that a span attribute MUST be used, as an Azure function app can + // host multiple functions that would usually share + // a TracerProvider.
    • + //
    + AttributeCloudResourceID = "cloud.resource_id" +) + +const ( + // Alibaba Cloud Elastic Compute Service + AttributeCloudPlatformAlibabaCloudECS = "alibaba_cloud_ecs" + // Alibaba Cloud Function Compute + AttributeCloudPlatformAlibabaCloudFc = "alibaba_cloud_fc" + // Red Hat OpenShift on Alibaba Cloud + AttributeCloudPlatformAlibabaCloudOpenshift = "alibaba_cloud_openshift" + // AWS Elastic Compute Cloud + AttributeCloudPlatformAWSEC2 = "aws_ec2" + // AWS Elastic Container Service + AttributeCloudPlatformAWSECS = "aws_ecs" + // AWS Elastic Kubernetes Service + AttributeCloudPlatformAWSEKS = "aws_eks" + // AWS Lambda + AttributeCloudPlatformAWSLambda = "aws_lambda" + // AWS Elastic Beanstalk + AttributeCloudPlatformAWSElasticBeanstalk = "aws_elastic_beanstalk" + // AWS App Runner + AttributeCloudPlatformAWSAppRunner = "aws_app_runner" + // Red Hat OpenShift on AWS (ROSA) + AttributeCloudPlatformAWSOpenshift = "aws_openshift" + // Azure Virtual Machines + AttributeCloudPlatformAzureVM = "azure_vm" + // Azure Container Apps + AttributeCloudPlatformAzureContainerApps = "azure_container_apps" + // Azure Container Instances + AttributeCloudPlatformAzureContainerInstances = "azure_container_instances" + // Azure Kubernetes Service + AttributeCloudPlatformAzureAKS = "azure_aks" + // Azure Functions + AttributeCloudPlatformAzureFunctions = "azure_functions" + // Azure App Service + AttributeCloudPlatformAzureAppService = "azure_app_service" + // Azure Red Hat OpenShift + AttributeCloudPlatformAzureOpenshift = "azure_openshift" + // Google Bare Metal Solution (BMS) + AttributeCloudPlatformGCPBareMetalSolution = "gcp_bare_metal_solution" + // Google Cloud Compute Engine (GCE) + AttributeCloudPlatformGCPComputeEngine = "gcp_compute_engine" + // Google Cloud Run + AttributeCloudPlatformGCPCloudRun = "gcp_cloud_run" + // Google Cloud Kubernetes Engine (GKE) + AttributeCloudPlatformGCPKubernetesEngine = "gcp_kubernetes_engine" + // Google Cloud Functions (GCF) + AttributeCloudPlatformGCPCloudFunctions = "gcp_cloud_functions" + // Google Cloud App Engine (GAE) + AttributeCloudPlatformGCPAppEngine = "gcp_app_engine" + // Red Hat OpenShift on Google Cloud + AttributeCloudPlatformGCPOpenshift = "gcp_openshift" + // Red Hat OpenShift on IBM Cloud + AttributeCloudPlatformIbmCloudOpenshift = "ibm_cloud_openshift" + // Tencent Cloud Cloud Virtual Machine (CVM) + AttributeCloudPlatformTencentCloudCvm = "tencent_cloud_cvm" + // Tencent Cloud Elastic Kubernetes Service (EKS) + AttributeCloudPlatformTencentCloudEKS = "tencent_cloud_eks" + // Tencent Cloud Serverless Cloud Function (SCF) + AttributeCloudPlatformTencentCloudScf = "tencent_cloud_scf" +) + +const ( + // Alibaba Cloud + AttributeCloudProviderAlibabaCloud = "alibaba_cloud" + // Amazon Web Services + AttributeCloudProviderAWS = "aws" + // Microsoft Azure + AttributeCloudProviderAzure = "azure" + // Google Cloud Platform + AttributeCloudProviderGCP = "gcp" + // Heroku Platform as a Service + AttributeCloudProviderHeroku = "heroku" + // IBM Cloud + AttributeCloudProviderIbmCloud = "ibm_cloud" + // Tencent Cloud + AttributeCloudProviderTencentCloud = "tencent_cloud" +) + +// Attributes for CloudEvents. +const ( + // The event_id uniquely identifies the event. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + AttributeCloudeventsEventID = "cloudevents.event_id" + // The source identifies the context in which an event happened. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'https://github.com/cloudevents', '/cloudevents/spec/pull/123', 'my- + // service' + AttributeCloudeventsEventSource = "cloudevents.event_source" + // The version of the CloudEvents specification which the event uses. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1.0' + AttributeCloudeventsEventSpecVersion = "cloudevents.event_spec_version" + // The subject of the event in the context of the event producer (identified by + // source). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'mynewfile.jpg' + AttributeCloudeventsEventSubject = "cloudevents.event_subject" + // The event_type contains a value describing the type of event related to the + // originating occurrence. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'com.github.pull_request.opened', 'com.example.object.deleted.v2' + AttributeCloudeventsEventType = "cloudevents.event_type" +) + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // The column number in code.filepath best representing the operation. It SHOULD + // point within the code unit named in code.function. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 16 + AttributeCodeColumn = "code.column" + // The source code file name that identifies the code unit as uniquely as possible + // (preferably an absolute file path). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + AttributeCodeFilepath = "code.filepath" + // The method or function name, or equivalent (usually rightmost part of the code + // unit's name). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'serveRequest' + AttributeCodeFunction = "code.function" + // The line number in code.filepath best representing the operation. It SHOULD + // point within the code unit named in code.function. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 42 + AttributeCodeLineNumber = "code.lineno" + // The "namespace" within which code.function is defined. Usually the + // qualified class or module name, such that code.namespace + some separator + + // code.function form a unique identifier for the code unit. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'com.example.MyHTTPService' + AttributeCodeNamespace = "code.namespace" + // A stacktrace as a string in the natural representation for the language + // runtime. The representation is to be determined and documented by each language + // SIG. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + AttributeCodeStacktrace = "code.stacktrace" +) + +// A container instance. +const ( + // The command used to run the container (i.e. the command name). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'otelcontribcol' + // Note: If using embedded credentials or sensitive data, it is recommended to + // remove them to prevent potential leakage. + AttributeContainerCommand = "container.command" + // All the command arguments (including the command/executable itself) run by the + // container. [2] + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'otelcontribcol, --config, config.yaml' + AttributeContainerCommandArgs = "container.command_args" + // The full command run by the container as a single string representing the full + // command. [2] + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'otelcontribcol --config config.yaml' + AttributeContainerCommandLine = "container.command_line" + // Container ID. Usually a UUID, as for example used to identify Docker + // containers. The UUID might be abbreviated. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'a3bf90e006b2' + AttributeContainerID = "container.id" + // Runtime specific image identifier. Usually a hash algorithm followed by a UUID. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: + // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' + // Note: Docker defines a sha256 of the image id; container.image.id corresponds + // to the Image field from the Docker container inspect API endpoint. + // K8S defines a link to the container registry repository with digest "imageID": + // "registry.azurecr.io /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e + // 8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625". + // The ID is assigned by the container runtime and can vary in different + // environments. Consider using oci.manifest.digest if it is important to identify + // the same image in different environments/runtimes. + AttributeContainerImageID = "container.image.id" + // Name of the image the container was built on. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'gcr.io/opentelemetry/operator' + AttributeContainerImageName = "container.image.name" + // Repo digests of the container image as provided by the container runtime. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d7 + // 02d249a0ccb', 'internal.registry.example.com:5000/example@sha256:b69959407d21e8 + // a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578' + // Note: Docker and CRI report those under the RepoDigests field. + AttributeContainerImageRepoDigests = "container.image.repo_digests" + // Container image tags. An example can be found in Docker Image Inspect. Should + // be only the section of the full name for example from + // registry.example.com/my-org/my-image:. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'v1.27.1', '3.5.7-0' + AttributeContainerImageTags = "container.image.tags" + // Container name used by container runtime. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry-autoconf' + AttributeContainerName = "container.name" + // The container runtime managing this container. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'docker', 'containerd', 'rkt' + AttributeContainerRuntime = "container.runtime" +) + +// Attributes specific to a cpu instance. +const ( + // The mode of the CPU + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'user', 'system' + AttributeCPUMode = "cpu.mode" +) + +const ( + // user + AttributeCPUModeUser = "user" + // system + AttributeCPUModeSystem = "system" + // nice + AttributeCPUModeNice = "nice" + // idle + AttributeCPUModeIdle = "idle" + // iowait + AttributeCPUModeIowait = "iowait" + // interrupt + AttributeCPUModeInterrupt = "interrupt" + // steal + AttributeCPUModeSteal = "steal" + // kernel + AttributeCPUModeKernel = "kernel" +) + +// This group defines the attributes used to describe telemetry in the context +// of databases. +const ( + // The name of the connection pool; unique within the instrumented application. In + // case the connection pool implementation doesn't provide a name, instrumentation + // SHOULD use a combination of parameters that would make the name unique, for + // example, combining attributes server.address, server.port, and db.namespace, + // formatted as server.address:server.port/db.namespace. Instrumentations that + // generate connection pool name following different patterns SHOULD document it. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myDataSource' + AttributeDBClientConnectionPoolName = "db.client.connection.pool.name" + // The state of a connection in the pool + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'idle' + AttributeDBClientConnectionState = "db.client.connection.state" + // The name of a collection (table, container) within the database. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'public.users', 'customers' + // Note: It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + // If the collection name is parsed from the query text, it SHOULD be the first + // collection name found in the query and it SHOULD match the value provided in + // the query text including any schema and database name prefix. + // For batch operations, if the individual operations are known to have the same + // collection name then that collection name SHOULD be used, otherwise + // db.collection.name SHOULD NOT be captured. + AttributeDBCollectionName = "db.collection.name" + // The name of the database, fully qualified within the server address and port. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'customers', 'test.users' + // Note: If a database system has multiple namespace components, they SHOULD be + // concatenated (potentially using database system specific conventions) from most + // general to most specific namespace component, and more specific namespaces + // SHOULD NOT be captured without the more general namespaces, to ensure that + // "startswith" queries for the more general namespaces will be valid. + // Semantic conventions for individual database systems SHOULD document what + // db.namespace means in the context of that system. + // It is RECOMMENDED to capture the value as provided by the application without + // attempting to do any case normalization. + AttributeDBNamespace = "db.namespace" + // The number of queries included in a batch operation. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 2, 3, 4 + // Note: Operations are only considered batches when they contain two or more + // operations, and so db.operation.batch.size SHOULD never be 1. + AttributeDBOperationBatchSize = "db.operation.batch.size" + // The name of the operation or command being executed. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + // If the operation name is parsed from the query text, it SHOULD be the first + // operation name found in the query. + // For batch operations, if the individual operations are known to have the same + // operation name then that operation name SHOULD be used prepended by BATCH, + // otherwise db.operation.name SHOULD be BATCH or some other database system + // specific term if more applicable. + AttributeDBOperationName = "db.operation.name" + // The database query being executed. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'SELECT * FROM wuser_table where username = ?', 'SET mykey "WuValue"' + // Note: For sanitization see Sanitization of db.query.text. + // For batch operations, if the individual operations are known to have the same + // query text then that query text SHOULD be used, otherwise all of the individual + // query texts SHOULD be concatenated with separator ; or some other database + // system specific separator if more applicable. + // Even though parameterized query text can potentially have sensitive data, by + // using a parameterized query the user is giving a strong signal that any + // sensitive data will be passed as parameter values, and the benefit to + // observability of capturing the static part of the query text by default + // outweighs the risk. + AttributeDBQueryText = "db.query.text" + // The database management system (DBMS) product as identified by the client + // instrumentation. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: The actual DBMS may differ from the one identified by the client. For + // example, when using PostgreSQL client libraries to connect to a CockroachDB, + // the db.system is set to postgresql based on the instrumentation's best + // knowledge. + AttributeDBSystem = "db.system" +) + +const ( + // idle + AttributeDBClientConnectionStateIdle = "idle" + // used + AttributeDBClientConnectionStateUsed = "used" +) + +const ( + // Some other SQL database. Fallback only. See notes + AttributeDBSystemOtherSQL = "other_sql" + // Adabas (Adaptable Database System) + AttributeDBSystemAdabas = "adabas" + // Deprecated, use `intersystems_cache` instead + AttributeDBSystemCache = "cache" + // InterSystems Caché + AttributeDBSystemIntersystemsCache = "intersystems_cache" + // Apache Cassandra + AttributeDBSystemCassandra = "cassandra" + // ClickHouse + AttributeDBSystemClickhouse = "clickhouse" + // Deprecated, use `other_sql` instead + AttributeDBSystemCloudscape = "cloudscape" + // CockroachDB + AttributeDBSystemCockroachdb = "cockroachdb" + // Deprecated, no replacement at this time + AttributeDBSystemColdfusion = "coldfusion" + // Microsoft Azure Cosmos DB + AttributeDBSystemCosmosDB = "cosmosdb" + // Couchbase + AttributeDBSystemCouchbase = "couchbase" + // CouchDB + AttributeDBSystemCouchDB = "couchdb" + // IBM DB2 + AttributeDBSystemDB2 = "db2" + // Apache Derby + AttributeDBSystemDerby = "derby" + // Amazon DynamoDB + AttributeDBSystemDynamoDB = "dynamodb" + // EnterpriseDB + AttributeDBSystemEDB = "edb" + // Elasticsearch + AttributeDBSystemElasticsearch = "elasticsearch" + // FileMaker + AttributeDBSystemFilemaker = "filemaker" + // Firebird + AttributeDBSystemFirebird = "firebird" + // Deprecated, use `other_sql` instead + AttributeDBSystemFirstSQL = "firstsql" + // Apache Geode + AttributeDBSystemGeode = "geode" + // H2 + AttributeDBSystemH2 = "h2" + // SAP HANA + AttributeDBSystemHanaDB = "hanadb" + // Apache HBase + AttributeDBSystemHBase = "hbase" + // Apache Hive + AttributeDBSystemHive = "hive" + // HyperSQL DataBase + AttributeDBSystemHSQLDB = "hsqldb" + // InfluxDB + AttributeDBSystemInfluxdb = "influxdb" + // Informix + AttributeDBSystemInformix = "informix" + // Ingres + AttributeDBSystemIngres = "ingres" + // InstantDB + AttributeDBSystemInstantDB = "instantdb" + // InterBase + AttributeDBSystemInterbase = "interbase" + // MariaDB + AttributeDBSystemMariaDB = "mariadb" + // SAP MaxDB + AttributeDBSystemMaxDB = "maxdb" + // Memcached + AttributeDBSystemMemcached = "memcached" + // MongoDB + AttributeDBSystemMongoDB = "mongodb" + // Microsoft SQL Server + AttributeDBSystemMSSQL = "mssql" + // Deprecated, Microsoft SQL Server Compact is discontinued + AttributeDBSystemMssqlcompact = "mssqlcompact" + // MySQL + AttributeDBSystemMySQL = "mysql" + // Neo4j + AttributeDBSystemNeo4j = "neo4j" + // Netezza + AttributeDBSystemNetezza = "netezza" + // OpenSearch + AttributeDBSystemOpensearch = "opensearch" + // Oracle Database + AttributeDBSystemOracle = "oracle" + // Pervasive PSQL + AttributeDBSystemPervasive = "pervasive" + // PointBase + AttributeDBSystemPointbase = "pointbase" + // PostgreSQL + AttributeDBSystemPostgreSQL = "postgresql" + // Progress Database + AttributeDBSystemProgress = "progress" + // Redis + AttributeDBSystemRedis = "redis" + // Amazon Redshift + AttributeDBSystemRedshift = "redshift" + // Cloud Spanner + AttributeDBSystemSpanner = "spanner" + // SQLite + AttributeDBSystemSqlite = "sqlite" + // Sybase + AttributeDBSystemSybase = "sybase" + // Teradata + AttributeDBSystemTeradata = "teradata" + // Trino + AttributeDBSystemTrino = "trino" + // Vertica + AttributeDBSystemVertica = "vertica" +) + +// This group defines attributes for Cassandra. +const ( + // The consistency level of the query. Based on consistency values from CQL. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeDBCassandraConsistencyLevel = "db.cassandra.consistency_level" + // The data center of the coordinating node for a query. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'us-west-2' + AttributeDBCassandraCoordinatorDC = "db.cassandra.coordinator.dc" + // The ID of the coordinating node for a query. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + AttributeDBCassandraCoordinatorID = "db.cassandra.coordinator.id" + // Whether or not the query is idempotent. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeDBCassandraIdempotence = "db.cassandra.idempotence" + // The fetch size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 5000 + AttributeDBCassandraPageSize = "db.cassandra.page_size" + // The number of times a query was speculatively executed. Not set or 0 if the + // query was not executed speculatively. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 0, 2 + AttributeDBCassandraSpeculativeExecutionCount = "db.cassandra.speculative_execution_count" +) + +const ( + // all + AttributeDBCassandraConsistencyLevelAll = "all" + // each_quorum + AttributeDBCassandraConsistencyLevelEachQuorum = "each_quorum" + // quorum + AttributeDBCassandraConsistencyLevelQuorum = "quorum" + // local_quorum + AttributeDBCassandraConsistencyLevelLocalQuorum = "local_quorum" + // one + AttributeDBCassandraConsistencyLevelOne = "one" + // two + AttributeDBCassandraConsistencyLevelTwo = "two" + // three + AttributeDBCassandraConsistencyLevelThree = "three" + // local_one + AttributeDBCassandraConsistencyLevelLocalOne = "local_one" + // any + AttributeDBCassandraConsistencyLevelAny = "any" + // serial + AttributeDBCassandraConsistencyLevelSerial = "serial" + // local_serial + AttributeDBCassandraConsistencyLevelLocalSerial = "local_serial" +) + +// This group defines attributes for Azure Cosmos DB. +const ( + // Unique Cosmos client instance id. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' + AttributeDBCosmosDBClientID = "db.cosmosdb.client_id" + // Cosmos client connection mode. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeDBCosmosDBConnectionMode = "db.cosmosdb.connection_mode" + // CosmosDB Operation Type. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeDBCosmosDBOperationType = "db.cosmosdb.operation_type" + // RU consumed for that operation + // + // Type: double + // Requirement Level: Optional + // Stability: experimental + // Examples: 46.18, 1.0 + AttributeDBCosmosDBRequestCharge = "db.cosmosdb.request_charge" + // Request payload size in bytes + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + AttributeDBCosmosDBRequestContentLength = "db.cosmosdb.request_content_length" + // Cosmos DB status code. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 200, 201 + AttributeDBCosmosDBStatusCode = "db.cosmosdb.status_code" + // Cosmos DB sub status code. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1000, 1002 + AttributeDBCosmosDBSubStatusCode = "db.cosmosdb.sub_status_code" +) + +const ( + // Gateway (HTTP) connections mode + AttributeDBCosmosDBConnectionModeGateway = "gateway" + // Direct connection + AttributeDBCosmosDBConnectionModeDirect = "direct" +) + +const ( + // invalid + AttributeDBCosmosDBOperationTypeInvalid = "Invalid" + // create + AttributeDBCosmosDBOperationTypeCreate = "Create" + // patch + AttributeDBCosmosDBOperationTypePatch = "Patch" + // read + AttributeDBCosmosDBOperationTypeRead = "Read" + // read_feed + AttributeDBCosmosDBOperationTypeReadFeed = "ReadFeed" + // delete + AttributeDBCosmosDBOperationTypeDelete = "Delete" + // replace + AttributeDBCosmosDBOperationTypeReplace = "Replace" + // execute + AttributeDBCosmosDBOperationTypeExecute = "Execute" + // query + AttributeDBCosmosDBOperationTypeQuery = "Query" + // head + AttributeDBCosmosDBOperationTypeHead = "Head" + // head_feed + AttributeDBCosmosDBOperationTypeHeadFeed = "HeadFeed" + // upsert + AttributeDBCosmosDBOperationTypeUpsert = "Upsert" + // batch + AttributeDBCosmosDBOperationTypeBatch = "Batch" + // query_plan + AttributeDBCosmosDBOperationTypeQueryPlan = "QueryPlan" + // execute_javascript + AttributeDBCosmosDBOperationTypeExecuteJavascript = "ExecuteJavaScript" +) + +// This group defines attributes for Elasticsearch. +const ( + // Represents the human-readable identifier of the node/instance to which a + // request was routed. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'instance-0000000001' + AttributeDBElasticsearchNodeName = "db.elasticsearch.node.name" +) + +// Attributes for software deployments. +const ( + // Name of the deployment environment (aka deployment tier). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'staging', 'production' + // Note: deployment.environment.name does not affect the uniqueness constraints + // defined through + // the service.namespace, service.name and service.instance.id resource + // attributes. + // This implies that resources carrying the following attribute combinations MUST + // be + // considered to be identifying the same service:
      + //
    • service.name=frontend, deployment.environment.name=production
    • + //
    • service.name=frontend, deployment.environment.name=staging.
    • + //
    + AttributeDeploymentEnvironmentName = "deployment.environment.name" + // The id of the deployment. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1208' + AttributeDeploymentID = "deployment.id" + // The name of the deployment. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'deploy my app', 'deploy-frontend' + AttributeDeploymentName = "deployment.name" + // The status of the deployment. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeDeploymentStatus = "deployment.status" +) + +const ( + // failed + AttributeDeploymentStatusFailed = "failed" + // succeeded + AttributeDeploymentStatusSucceeded = "succeeded" +) + +// Attributes that represents an occurrence of a lifecycle transition on the +// Android platform. +const ( + // Deprecated use the device.app.lifecycle event definition including + // android.state as a payload field instead. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: The Android lifecycle states are defined in Activity lifecycle callbacks, + // and from which the OS identifiers are derived. + AttributeAndroidState = "android.state" +) + +const ( + // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time + AttributeAndroidStateCreated = "created" + // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state + AttributeAndroidStateBackground = "background" + // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states + AttributeAndroidStateForeground = "foreground" +) + +// These attributes may be used to describe the receiver of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // Destination address - domain name if available without reverse DNS lookup; + // otherwise, IP address or Unix domain socket name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the source side, and when communicating through an + // intermediary, destination.address SHOULD represent the destination address + // behind any intermediaries, for example proxies, if it's available. + AttributeDestinationAddress = "destination.address" + // Destination port number + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 3389, 2888 + AttributeDestinationPort = "destination.port" +) + +// Describes device attributes. +const ( + // A unique identifier representing the device + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values outlined + // below. This value is not an advertising identifier and MUST NOT be used as + // such. On iOS (Swift or Objective-C), this value MUST be equal to the vendor + // identifier. On Android (Java or Kotlin), this value MUST be equal to the + // Firebase Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found here on best + // practices and exact implementation details. Caution should be taken when + // storing personal data or anything which can identify a user. GDPR and data + // protection laws may apply, ensure you do your own due diligence. + AttributeDeviceID = "device.id" + // The name of the device manufacturer + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via Build. iOS apps SHOULD hardcode + // the value Apple. + AttributeDeviceManufacturer = "device.manufacturer" + // The model identifier for the device + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine-readable version of the + // model identifier rather than the market or consumer-friendly name of the + // device. + AttributeDeviceModelIdentifier = "device.model.identifier" + // The marketing name for the device model + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human-readable version of the + // device model rather than a machine-readable alternative. + AttributeDeviceModelName = "device.model.name" +) + +// These attributes may be used for any disk related operation. +const ( + // The disk IO operation direction. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'read' + AttributeDiskIoDirection = "disk.io.direction" +) + +const ( + // read + AttributeDiskIoDirectionRead = "read" + // write + AttributeDiskIoDirectionWrite = "write" +) + +// The shared attributes used to report a DNS query. +const ( + // The name being queried. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'www.example.com', 'opentelemetry.io' + // Note: If the name field contains non-printable characters (below 32 or above + // 126), those characters should be represented as escaped base 10 integers + // (\DDD). Back slashes and quotes should be escaped. Tabs, carriage returns, and + // line feeds should be converted to \t, \r, and \n respectively. + AttributeDNSQuestionName = "dns.question.name" +) + +// The shared attributes used to report an error. +const ( + // Describes a class of error the operation ended with. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'timeout', 'java.net.UnknownHostException', + // 'server_certificate_invalid', '500' + // Note: The error.type SHOULD be predictable, and SHOULD have low + // cardinality.When error.type is set to a type (e.g., an exception type), its + // canonical class name identifying the type within the artifact SHOULD be + // used.Instrumentations SHOULD document the list of errors they report.The + // cardinality of error.type within one instrumentation library SHOULD be low. + // Telemetry consumers that aggregate data from multiple instrumentation libraries + // and applications + // should be prepared for error.type to have high cardinality at query time when + // no + // additional filters are applied.If the operation has completed successfully, + // instrumentations SHOULD NOT set error.type.If a specific domain defines its own + // set of error identifiers (such as HTTP or gRPC status codes), + // it's RECOMMENDED to:
      + //
    • Use a domain-specific attribute
    • + //
    • Set error.type to capture all errors, regardless of whether they are + // defined within the domain-specific set or not.
    • + //
    + AttributeErrorType = "error.type" +) + +const ( + // A fallback error value to be used when the instrumentation doesn't define a custom value + AttributeErrorTypeOther = "_OTHER" +) + +// Attributes for Events represented using Log Records. +const ( + // Identifies the class / type of event. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'browser.mouse.click', 'device.app.lifecycle' + // Note: Event names are subject to the same rules as attribute names. Notably, + // event names are namespaced to avoid collisions and provide a clean separation + // of semantics for events in separate domains like browser, mobile, and + // kubernetes. + AttributeEventName = "event.name" +) + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // SHOULD be set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // Requirement Level: Optional + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's __exit__ method in Python) but will + // usually be caught at the point of recording the exception in most languages.It + // is usually not possible to determine at the point where an exception is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending the span, + // as done in the example for recording span exceptions.It follows that an + // exception may still escape the scope of the span + // even if the exception.escaped attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + AttributeExceptionEscaped = "exception.escaped" + // The exception message. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str implicitly" + AttributeExceptionMessage = "exception.message" + // A stacktrace as a string in the natural representation for the language + // runtime. The representation is to be determined and documented by each language + // SIG. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + AttributeExceptionStacktrace = "exception.stacktrace" + // The type of the exception (its fully-qualified class name, if applicable). The + // dynamic type of the exception should be preferred over the static type in + // languages that support it. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + AttributeExceptionType = "exception.type" +) + +// FaaS attributes +const ( + // A boolean that is true if the serverless function is executed for the first + // time (aka cold-start). + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeFaaSColdstart = "faas.coldstart" + // A string containing the schedule period as Cron Expression. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '0/5 * * * ? *' + AttributeFaaSCron = "faas.cron" + // The name of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos + // DB to the database name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myBucketName', 'myDBName' + AttributeFaaSDocumentCollection = "faas.document.collection" + // The document name/table subjected to the operation. For example, in Cloud + // Storage or S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myFile.txt', 'myTableName' + AttributeFaaSDocumentName = "faas.document.name" + // Describes the type of the operation that was performed on the data. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeFaaSDocumentOperation = "faas.document.operation" + // A string containing the time when the data was accessed in the ISO 8601 format + // expressed in UTC. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + AttributeFaaSDocumentTime = "faas.document.time" + // The execution environment ID as a string, that will be potentially reused for + // other invocations to the same function/function version. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note:
      + //
    • AWS Lambda: Use the (full) log stream name.
    • + //
    + AttributeFaaSInstance = "faas.instance" + // The invocation ID of the current function invocation. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + AttributeFaaSInvocationID = "faas.invocation_id" + // The name of the invoked function. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'my-function' + // Note: SHOULD be equal to the faas.name resource attribute of the invoked + // function. + AttributeFaaSInvokedName = "faas.invoked_name" + // The cloud provider of the invoked function. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: SHOULD be equal to the cloud.provider resource attribute of the invoked + // function. + AttributeFaaSInvokedProvider = "faas.invoked_provider" + // The cloud region of the invoked function. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the cloud.region resource attribute of the invoked + // function. + AttributeFaaSInvokedRegion = "faas.invoked_region" + // The amount of memory available to the serverless function converted to Bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 134217728 + // Note: It's recommended to set this attribute since e.g. too little memory can + // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, + // the environment variable AWS_LAMBDA_FUNCTION_MEMORY_SIZE provides this + // information (which must be multiplied by 1,048,576). + AttributeFaaSMaxMemory = "faas.max_memory" + // The name of the single function that this runtime instance executes. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // code.namespace/code.function + // span attributes).For some cloud providers, the above definition is ambiguous. + // The following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud providers/products:
      + //
    • Azure: The full name /, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the cloud.resource_id attribute).
    • + //
    + AttributeFaaSName = "faas.name" + // A string containing the function invocation time in the ISO 8601 format + // expressed in UTC. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + AttributeFaaSTime = "faas.time" + // Type of the trigger which caused this function invocation. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeFaaSTrigger = "faas.trigger" + // The immutable version of the function being executed. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use:
      + //
    • AWS Lambda: The function version + // (an integer represented as a decimal string).
    • + //
    • Google Cloud Run (Services): The revision + // (i.e., the function name plus the revision suffix).
    • + //
    • Google Cloud Functions: The value of the + // K_REVISION environment variable.
    • + //
    • Azure Functions: Not applicable. Do not set this attribute.
    • + //
    + AttributeFaaSVersion = "faas.version" +) + +const ( + // When a new object is created + AttributeFaaSDocumentOperationInsert = "insert" + // When an object is modified + AttributeFaaSDocumentOperationEdit = "edit" + // When an object is deleted + AttributeFaaSDocumentOperationDelete = "delete" +) + +const ( + // Alibaba Cloud + AttributeFaaSInvokedProviderAlibabaCloud = "alibaba_cloud" + // Amazon Web Services + AttributeFaaSInvokedProviderAWS = "aws" + // Microsoft Azure + AttributeFaaSInvokedProviderAzure = "azure" + // Google Cloud Platform + AttributeFaaSInvokedProviderGCP = "gcp" + // Tencent Cloud + AttributeFaaSInvokedProviderTencentCloud = "tencent_cloud" +) + +const ( + // A response to some data source operation such as a database or filesystem read/write + AttributeFaaSTriggerDatasource = "datasource" + // To provide an answer to an inbound HTTP request + AttributeFaaSTriggerHTTP = "http" + // A function is set to be executed when messages are sent to a messaging system + AttributeFaaSTriggerPubsub = "pubsub" + // A function is scheduled to be executed regularly + AttributeFaaSTriggerTimer = "timer" + // If none of the others apply + AttributeFaaSTriggerOther = "other" +) + +// Attributes for Feature Flags. +const ( + // The unique identifier of the feature flag. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'logo-color' + AttributeFeatureFlagKey = "feature_flag.key" + // The name of the service provider that performs the flag evaluation. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Flag Manager' + AttributeFeatureFlagProviderName = "feature_flag.provider_name" + // SHOULD be a semantic identifier for a value. If one is unavailable, a + // stringified version of the value can be used. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'red', 'true', 'on' + // Note: A semantic identifier, commonly referred to as a variant, provides a + // means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant red maybe be used for the value #c05543.A stringified + // version of the value can be used in situations where a + // semantic identifier is unavailable. String representation of the value + // should be determined by the implementer. + AttributeFeatureFlagVariant = "feature_flag.variant" +) + +// Describes file attributes. +const ( + // Directory where the file is located. It should include the drive letter, when + // appropriate. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/home/user', 'C:\\Program Files\\MyApp' + AttributeFileDirectory = "file.directory" + // File extension, excluding the leading dot. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'png', 'gz' + // Note: When the file name has multiple extensions (example.tar.gz), only the + // last one should be captured ("gz", not "tar.gz"). + AttributeFileExtension = "file.extension" + // Name of the file including the extension, without the directory. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'example.png' + AttributeFileName = "file.name" + // Full path to the file, including the file name. It should include the drive + // letter, when appropriate. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/home/alice/example.png', 'C:\\Program Files\\MyApp\\myapp.exe' + AttributeFilePath = "file.path" + // File size in bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + AttributeFileSize = "file.size" +) + +// Attributes for Google Cloud client libraries. +const ( + // Identifies the Google Cloud service for which the official client library is + // intended. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'appengine', 'run', 'firestore', 'alloydb', 'spanner' + // Note: Intended to be a stable identifier for Google Cloud client libraries that + // is uniform across implementation languages. The value should be derived from + // the canonical service domain for the service; for example, 'foo.googleapis.com' + // should result in a value of 'foo'. + AttributeGCPClientService = "gcp.client.service" +) + +// Attributes for Google Cloud Run. +const ( + // The name of the Cloud Run execution being run for the Job, as set by the + // CLOUD_RUN_EXECUTION environment variable. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'job-name-xxxx', 'sample-job-mdw84' + AttributeGCPCloudRunJobExecution = "gcp.cloud_run.job.execution" + // The index for a task within an execution as provided by the + // CLOUD_RUN_TASK_INDEX environment variable. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 0, 1 + AttributeGCPCloudRunJobTaskIndex = "gcp.cloud_run.job.task_index" +) + +// Attributes for Google Compute Engine (GCE). +const ( + // The hostname of a GCE instance. This is the full value of the default or custom + // hostname. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'my-host1234.example.com', 'sample-vm.us-west1-b.c.my- + // project.internal' + AttributeGCPGceInstanceHostname = "gcp.gce.instance.hostname" + // The instance name of a GCE instance. This is the value provided by host.name, + // the visible name of the instance in the Cloud Console UI, and the prefix for + // the default hostname of the instance as defined by the default internal DNS + // name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'instance-1', 'my-vm-name' + AttributeGCPGceInstanceName = "gcp.gce.instance.name" +) + +// The attributes used to describe telemetry in the context of Generative +// Artificial Intelligence (GenAI) Models requests and responses. +const ( + // The full response received from the GenAI model. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: "[{'role': 'assistant', 'content': 'The capital of France is + // Paris.'}]" + // Note: It's RECOMMENDED to format completions as JSON string matching OpenAI + // messages format + AttributeGenAiCompletion = "gen_ai.completion" + // The name of the operation being performed. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: If one of the predefined values applies, but specific system uses a + // different name it's RECOMMENDED to document it in the semantic conventions for + // specific GenAI system and use system-specific name in the instrumentation. If a + // different name is not documented, instrumentation libraries SHOULD use + // applicable predefined value. + AttributeGenAiOperationName = "gen_ai.operation.name" + // The full prompt sent to the GenAI model. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: "[{'role': 'user', 'content': 'What is the capital of France?'}]" + // Note: It's RECOMMENDED to format prompts as JSON string matching OpenAI + // messages format + AttributeGenAiPrompt = "gen_ai.prompt" + // The frequency penalty setting for the GenAI request. + // + // Type: double + // Requirement Level: Optional + // Stability: experimental + // Examples: 0.1 + AttributeGenAiRequestFrequencyPenalty = "gen_ai.request.frequency_penalty" + // The maximum number of tokens the model generates for a request. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 100 + AttributeGenAiRequestMaxTokens = "gen_ai.request.max_tokens" + // The name of the GenAI model a request is being made to. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'gpt-4' + AttributeGenAiRequestModel = "gen_ai.request.model" + // The presence penalty setting for the GenAI request. + // + // Type: double + // Requirement Level: Optional + // Stability: experimental + // Examples: 0.1 + AttributeGenAiRequestPresencePenalty = "gen_ai.request.presence_penalty" + // List of sequences that the model will use to stop generating further tokens. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'forest', 'lived' + AttributeGenAiRequestStopSequences = "gen_ai.request.stop_sequences" + // The temperature setting for the GenAI request. + // + // Type: double + // Requirement Level: Optional + // Stability: experimental + // Examples: 0.0 + AttributeGenAiRequestTemperature = "gen_ai.request.temperature" + // The top_k sampling setting for the GenAI request. + // + // Type: double + // Requirement Level: Optional + // Stability: experimental + // Examples: 1.0 + AttributeGenAiRequestTopK = "gen_ai.request.top_k" + // The top_p sampling setting for the GenAI request. + // + // Type: double + // Requirement Level: Optional + // Stability: experimental + // Examples: 1.0 + AttributeGenAiRequestTopP = "gen_ai.request.top_p" + // Array of reasons the model stopped generating tokens, corresponding to each + // generation received. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'stop' + AttributeGenAiResponseFinishReasons = "gen_ai.response.finish_reasons" + // The unique identifier for the completion. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'chatcmpl-123' + AttributeGenAiResponseID = "gen_ai.response.id" + // The name of the model that generated the response. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'gpt-4-0613' + AttributeGenAiResponseModel = "gen_ai.response.model" + // The Generative AI product as identified by the client or server + // instrumentation. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'openai' + // Note: The gen_ai.system describes a family of GenAI models with specific model + // identified + // by gen_ai.request.model and gen_ai.response.model attributes.The actual GenAI + // product may differ from the one identified by the client. + // For example, when using OpenAI client libraries to communicate with Mistral, + // the gen_ai.system + // is set to openai based on the instrumentation's best knowledge.For custom + // model, a custom friendly name SHOULD be used. + // If none of these options apply, the gen_ai.system SHOULD be set to _OTHER. + AttributeGenAiSystem = "gen_ai.system" + // The type of token being counted. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'input', 'output' + AttributeGenAiTokenType = "gen_ai.token.type" + // The number of tokens used in the GenAI input (prompt). + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 100 + AttributeGenAiUsageInputTokens = "gen_ai.usage.input_tokens" + // The number of tokens used in the GenAI response (completion). + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 180 + AttributeGenAiUsageOutputTokens = "gen_ai.usage.output_tokens" +) + +const ( + // Chat completion operation such as [OpenAI Chat API](https://platform.openai.com/docs/api-reference/chat) + AttributeGenAiOperationNameChat = "chat" + // Text completions operation such as [OpenAI Completions API (Legacy)](https://platform.openai.com/docs/api-reference/completions) + AttributeGenAiOperationNameTextCompletion = "text_completion" +) + +const ( + // OpenAI + AttributeGenAiSystemOpenai = "openai" + // Vertex AI + AttributeGenAiSystemVertexAi = "vertex_ai" + // Anthropic + AttributeGenAiSystemAnthropic = "anthropic" + // Cohere + AttributeGenAiSystemCohere = "cohere" +) + +const ( + // Input tokens (prompt, input, etc.) + AttributeGenAiTokenTypeInput = "input" + // Output tokens (completion, response, etc.) + AttributeGenAiTokenTypeCompletion = "output" +) + +// Go related attributes. +const ( + // The type of memory. + // + // Type: Enum + // Requirement Level: Recommended + // Stability: experimental + // Examples: 'other', 'stack' + AttributeGoMemoryType = "go.memory.type" +) + +const ( + // Memory allocated from the heap that is reserved for stack space, whether or not it is currently in-use + AttributeGoMemoryTypeStack = "stack" + // Memory used by the Go runtime, excluding other categories of memory usage described in this enumeration + AttributeGoMemoryTypeOther = "other" +) + +// Attributes for GraphQL. +const ( + // The GraphQL document being executed. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + AttributeGraphqlDocument = "graphql.document" + // The name of the operation being executed. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'findBookByID' + AttributeGraphqlOperationName = "graphql.operation.name" + // The type of the operation being executed. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'query', 'mutation', 'subscription' + AttributeGraphqlOperationType = "graphql.operation.type" +) + +const ( + // GraphQL query + AttributeGraphqlOperationTypeQuery = "query" + // GraphQL mutation + AttributeGraphqlOperationTypeMutation = "mutation" + // GraphQL subscription + AttributeGraphqlOperationTypeSubscription = "subscription" +) + +// Attributes for the Android platform on which the Android application is +// running. +const ( + // Unique identifier for the application + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' + AttributeHerokuAppID = "heroku.app.id" + // Commit hash for the current release + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' + AttributeHerokuReleaseCommit = "heroku.release.commit" + // Time and date the release was created + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2022-10-23T18:00:42Z' + AttributeHerokuReleaseCreationTimestamp = "heroku.release.creation_timestamp" +) + +// A host is defined as a computing instance. For example, physical servers, +// virtual machines, switches or disk array. +const ( + // The CPU architecture the host system is running on. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeHostArch = "host.arch" + // The amount of level 2 memory cache available to the processor (in Bytes). + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 12288000 + AttributeHostCPUCacheL2Size = "host.cpu.cache.l2.size" + // Family or generation of the CPU. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '6', 'PA-RISC 1.1e' + AttributeHostCPUFamily = "host.cpu.family" + // Model identifier. It provides more granular information about the CPU, + // distinguishing it from other CPUs within the same family. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '6', '9000/778/B180L' + AttributeHostCPUModelID = "host.cpu.model.id" + // Model designation of the processor. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz' + AttributeHostCPUModelName = "host.cpu.model.name" + // Stepping or core revisions. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1', 'r1p1' + AttributeHostCPUStepping = "host.cpu.stepping" + // Processor manufacturer identifier. A maximum 12-character string. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'GenuineIntel' + // Note: CPUID command returns the vendor ID string in EBX, EDX and ECX registers. + // Writing these to memory in this order results in a 12-character string. + AttributeHostCPUVendorID = "host.cpu.vendor.id" + // Unique host ID. For Cloud, this must be the instance_id assigned by the cloud + // provider. For non-containerized systems, this should be the machine-id. See the + // table below for the sources to use to determine the machine-id based on + // operating system. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + AttributeHostID = "host.id" + // VM image ID or host OS image ID. For Cloud, this value is from the provider. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'ami-07b06b442921831e5' + AttributeHostImageID = "host.image.id" + // Name of the VM image or OS install the host was instantiated from. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + AttributeHostImageName = "host.image.name" + // The version string of the VM image or host OS as defined in Version Attributes. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '0.1' + AttributeHostImageVersion = "host.image.version" + // Available IP addresses of the host, excluding loopback interfaces. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e' + // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 addresses + // MUST be specified in the RFC 5952 format. + AttributeHostIP = "host.ip" + // Available MAC addresses of the host, excluding loopback interfaces. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F' + // Note: MAC Addresses MUST be represented in IEEE RA hexadecimal form: as hyphen- + // separated octets in uppercase hexadecimal form from most to least significant. + AttributeHostMac = "host.mac" + // Name of the host. On Unix systems, it may contain what the hostname command + // returns, or the fully qualified hostname, or another name specified by the + // user. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry-test' + AttributeHostName = "host.name" + // Type of host. For Cloud, this must be the machine type. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'n1-standard-1' + AttributeHostType = "host.type" +) + +const ( + // AMD64 + AttributeHostArchAMD64 = "amd64" + // ARM32 + AttributeHostArchARM32 = "arm32" + // ARM64 + AttributeHostArchARM64 = "arm64" + // Itanium + AttributeHostArchIA64 = "ia64" + // 32-bit PowerPC + AttributeHostArchPPC32 = "ppc32" + // 64-bit PowerPC + AttributeHostArchPPC64 = "ppc64" + // IBM z/Architecture + AttributeHostArchS390x = "s390x" + // 32-bit x86 + AttributeHostArchX86 = "x86" +) + +// Semantic convention attributes in the HTTP namespace. +const ( + // State of the HTTP connection in the HTTP connection pool. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'active', 'idle' + AttributeHTTPConnectionState = "http.connection.state" + // The size of the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as the + // Content-Length header. For requests using transport encoding, this should be + // the compressed size. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 3495 + AttributeHTTPRequestBodySize = "http.request.body.size" + // HTTP request method. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + // Note: HTTP request method value SHOULD be "known" to the + // instrumentation. + // By default, this convention defines "known" methods as the ones + // listed in RFC9110 + // and the PATCH method defined in RFC5789.If the HTTP request method is not known + // to instrumentation, it MUST set the http.request.method attribute to _OTHER.If + // the HTTP instrumentation could end up converting valid HTTP request methods to + // _OTHER, then it MUST provide a way to override + // the list of known HTTP methods. If this override is done via environment + // variable, then the environment variable MUST be named + // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated list of + // case-sensitive known HTTP methods + // (this list MUST be a full override of the default known method, it is not a + // list of known methods in addition to the defaults).HTTP method names are case- + // sensitive and http.request.method attribute value MUST match a known HTTP + // method name exactly. + // Instrumentations for specific web frameworks that consider HTTP methods to be + // case insensitive, SHOULD populate a canonical equivalent. + // Tracing instrumentations that do so, MUST also set http.request.method_original + // to the original value. + AttributeHTTPRequestMethod = "http.request.method" + // Original HTTP method sent by the client in the request line. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'GeT', 'ACL', 'foo' + AttributeHTTPRequestMethodOriginal = "http.request.method_original" + // The ordinal number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets resent + // by the client, regardless of what was the cause of the resending (e.g. + // redirection, authorization failure, 503 Server Unavailable, network issues, or + // any other). + AttributeHTTPRequestResendCount = "http.request.resend_count" + // The total size of the request in bytes. This should be the total number of + // bytes sent over the wire, including the request line (HTTP/1.1), framing + // (HTTP/2 and HTTP/3), headers, and request body if any. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1437 + AttributeHTTPRequestSize = "http.request.size" + // The size of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as the + // Content-Length header. For requests using transport encoding, this should be + // the compressed size. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 3495 + AttributeHTTPResponseBodySize = "http.response.body.size" + // The total size of the response in bytes. This should be the total number of + // bytes sent over the wire, including the status line (HTTP/1.1), framing (HTTP/2 + // and HTTP/3), headers, and response body and trailers if any. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1437 + AttributeHTTPResponseSize = "http.response.size" + // HTTP response status code. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 200 + AttributeHTTPResponseStatusCode = "http.response.status_code" + // The matched route, that is, the path template in the format used by the + // respective server framework. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: MUST NOT be populated when this is not supported by the HTTP server + // framework as the route attribute should have low-cardinality and the URI path + // can NOT substitute it. + // SHOULD include the application root if there is one. + AttributeHTTPRoute = "http.route" +) + +const ( + // active state + AttributeHTTPConnectionStateActive = "active" + // idle state + AttributeHTTPConnectionStateIdle = "idle" +) + +const ( + // CONNECT method + AttributeHTTPRequestMethodConnect = "CONNECT" + // DELETE method + AttributeHTTPRequestMethodDelete = "DELETE" + // GET method + AttributeHTTPRequestMethodGet = "GET" + // HEAD method + AttributeHTTPRequestMethodHead = "HEAD" + // OPTIONS method + AttributeHTTPRequestMethodOptions = "OPTIONS" + // PATCH method + AttributeHTTPRequestMethodPatch = "PATCH" + // POST method + AttributeHTTPRequestMethodPost = "POST" + // PUT method + AttributeHTTPRequestMethodPut = "PUT" + // TRACE method + AttributeHTTPRequestMethodTrace = "TRACE" + // Any HTTP method that the instrumentation has no prior knowledge of + AttributeHTTPRequestMethodOther = "_OTHER" +) + +// Java Virtual machine related attributes. +const ( + // Name of the buffer pool. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'mapped', 'direct' + // Note: Pool names are generally obtained via BufferPoolMXBean#getName(). + AttributeJvmBufferPoolName = "jvm.buffer.pool.name" + // Name of the garbage collector action. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'end of minor GC', 'end of major GC' + // Note: Garbage collector action is generally obtained via + // GarbageCollectionNotificationInfo#getGcAction(). + AttributeJvmGcAction = "jvm.gc.action" + // Name of the garbage collector. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'G1 Young Generation', 'G1 Old Generation' + // Note: Garbage collector name is generally obtained via + // GarbageCollectionNotificationInfo#getGcName(). + AttributeJvmGcName = "jvm.gc.name" + // Name of the memory pool. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space' + // Note: Pool names are generally obtained via MemoryPoolMXBean#getName(). + AttributeJvmMemoryPoolName = "jvm.memory.pool.name" + // The type of memory. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'heap', 'non_heap' + AttributeJvmMemoryType = "jvm.memory.type" + // Whether the thread is daemon or not. + // + // Type: boolean + // Requirement Level: Optional + // Stability: stable + AttributeJvmThreadDaemon = "jvm.thread.daemon" + // State of the thread. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'runnable', 'blocked' + AttributeJvmThreadState = "jvm.thread.state" +) + +const ( + // Heap memory + AttributeJvmMemoryTypeHeap = "heap" + // Non-heap memory + AttributeJvmMemoryTypeNonHeap = "non_heap" +) + +const ( + // A thread that has not yet started is in this state + AttributeJvmThreadStateNew = "new" + // A thread executing in the Java virtual machine is in this state + AttributeJvmThreadStateRunnable = "runnable" + // A thread that is blocked waiting for a monitor lock is in this state + AttributeJvmThreadStateBlocked = "blocked" + // A thread that is waiting indefinitely for another thread to perform a particular action is in this state + AttributeJvmThreadStateWaiting = "waiting" + // A thread that is waiting for another thread to perform an action for up to a specified waiting time is in this state + AttributeJvmThreadStateTimedWaiting = "timed_waiting" + // A thread that has exited is in this state + AttributeJvmThreadStateTerminated = "terminated" +) + +// Kubernetes resource attributes. +const ( + // The name of the cluster. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry-cluster' + AttributeK8SClusterName = "k8s.cluster.name" + // A pseudo-ID for the cluster, set to the UID of the kube-system namespace. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' + // Note: K8S doesn't have support for obtaining a cluster ID. If this is ever + // added, we will recommend collecting the k8s.cluster.uid through the + // official APIs. In the meantime, we are able to use the uid of the + // kube-system namespace as a proxy for cluster ID. Read on for the + // rationale.Every object created in a K8S cluster is assigned a distinct UID. The + // kube-system namespace is used by Kubernetes itself and will exist + // for the lifetime of the cluster. Using the uid of the kube-system + // namespace is a reasonable proxy for the K8S ClusterID as it will only + // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are + // UUIDs as standardized by + // ISO/IEC 9834-8 and ITU-T X.667. + // Which states:
    + // If generated according to one of the mechanisms defined in Rec.
    + // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be + // different from all other UUIDs generated before 3603 A.D., or is + // extremely likely to be different (depending on the mechanism + // chosen).Therefore, UIDs between clusters should be extremely unlikely to + // conflict. + AttributeK8SClusterUID = "k8s.cluster.uid" + // The name of the Container from Pod specification, must be unique within a Pod. + // Container runtime usually uses different globally unique name (container.name). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'redis' + AttributeK8SContainerName = "k8s.container.name" + // Number of times the container was restarted. This attribute can be used to + // identify a particular container (running or stopped) within a container spec. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + AttributeK8SContainerRestartCount = "k8s.container.restart_count" + // Last terminated reason of the Container. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Evicted', 'Error' + AttributeK8SContainerStatusLastTerminatedReason = "k8s.container.status.last_terminated_reason" + // The name of the CronJob. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SCronJobName = "k8s.cronjob.name" + // The UID of the CronJob. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SCronJobUID = "k8s.cronjob.uid" + // The name of the DaemonSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SDaemonSetName = "k8s.daemonset.name" + // The UID of the DaemonSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SDaemonSetUID = "k8s.daemonset.uid" + // The name of the Deployment. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SDeploymentName = "k8s.deployment.name" + // The UID of the Deployment. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SDeploymentUID = "k8s.deployment.uid" + // The name of the Job. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SJobName = "k8s.job.name" + // The UID of the Job. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SJobUID = "k8s.job.uid" + // The name of the namespace that the pod is running in. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'default' + AttributeK8SNamespaceName = "k8s.namespace.name" + // The name of the Node. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'node-1' + AttributeK8SNodeName = "k8s.node.name" + // The UID of the Node. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + AttributeK8SNodeUID = "k8s.node.uid" + // The name of the Pod. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry-pod-autoconf' + AttributeK8SPodName = "k8s.pod.name" + // The UID of the Pod. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SPodUID = "k8s.pod.uid" + // The name of the ReplicaSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SReplicaSetName = "k8s.replicaset.name" + // The UID of the ReplicaSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SReplicaSetUID = "k8s.replicaset.uid" + // The name of the StatefulSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SStatefulSetName = "k8s.statefulset.name" + // The UID of the StatefulSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SStatefulSetUID = "k8s.statefulset.uid" +) + +// Describes Linux Memory attributes +const ( + // The Linux Slab memory state + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'reclaimable', 'unreclaimable' + AttributeLinuxMemorySlabState = "linux.memory.slab.state" +) + +const ( + // reclaimable + AttributeLinuxMemorySlabStateReclaimable = "reclaimable" + // unreclaimable + AttributeLinuxMemorySlabStateUnreclaimable = "unreclaimable" +) + +// Log attributes +const ( + // The stream associated with the log. See below for a list of well-known values. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeLogIostream = "log.iostream" +) + +const ( + // Logs from stdout stream + AttributeLogIostreamStdout = "stdout" + // Events from stderr stream + AttributeLogIostreamStderr = "stderr" +) + +// Attributes for a file to which log was emitted. +const ( + // The basename of the file. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'audit.log' + AttributeLogFileName = "log.file.name" + // The basename of the file, with symlinks resolved. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'uuid.log' + AttributeLogFileNameResolved = "log.file.name_resolved" + // The full path to the file. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/var/log/mysql/audit.log' + AttributeLogFilePath = "log.file.path" + // The full path to the file, with symlinks resolved. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/var/lib/docker/uuid.log' + AttributeLogFilePathResolved = "log.file.path_resolved" +) + +// The generic attributes that may be used in any Log Record. +const ( + // The complete original Log Record. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '77 <86>1 2015-08-06T21:58:59.694Z 192.168.2.133 inactive - - - + // Something happened', '[INFO] 8/3/24 12:34:56 Something happened' + // Note: This value MAY be added when processing a Log Record which was originally + // transmitted as a string or equivalent data type AND the Body field of the Log + // Record does not contain the same value. (e.g. a syslog or a log record read + // from a file.) + AttributeLogRecordOriginal = "log.record.original" + // A unique identifier for the Log Record. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' + // Note: If an id is provided, other log records with the same id will be + // considered duplicates and can be removed safely. This means, that two + // distinguishable log records MUST have different values. + // The id MAY be an Universally Unique Lexicographically Sortable Identifier + // (ULID), but other identifiers (e.g. UUID) may be used as needed. + AttributeLogRecordUID = "log.record.uid" +) + +// Attributes describing telemetry around messaging systems and messaging +// activities. +const ( + // The number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set messaging.batch.message_count on spans + // that operate with a single message. When a messaging client library supports + // both batch and single-message API for the same operation, instrumentations + // SHOULD use messaging.batch.message_count for batching APIs and SHOULD NOT use + // it for single-message APIs. + AttributeMessagingBatchMessageCount = "messaging.batch.message_count" + // A unique identifier for the client that consumes or produces a message. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'client-5', 'myhost@8742@s8083jm' + AttributeMessagingClientID = "messaging.client.id" + // The name of the consumer group with which a consumer is associated. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'my-group', 'indexer' + // Note: Semantic conventions for individual messaging systems SHOULD document + // whether messaging.consumer.group.name is applicable and what it means in the + // context of that system. + AttributeMessagingConsumerGroupName = "messaging.consumer.group.name" + // A boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingDestinationAnonymous = "messaging.destination.anonymous" + // The message destination name + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MyQueue', 'MyTopic' + // Note: Destination name SHOULD uniquely identify a specific queue, topic or + // other entity within the broker. If + // the broker doesn't have such notion, the destination name SHOULD uniquely + // identify the broker. + AttributeMessagingDestinationName = "messaging.destination.name" + // The identifier of the partition messages are sent to or received from, unique + // within the messaging.destination.name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1' + AttributeMessagingDestinationPartitionID = "messaging.destination.partition.id" + // The name of the destination subscription from which a message is consumed. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'subscription-a' + // Note: Semantic conventions for individual messaging systems SHOULD document + // whether messaging.destination.subscription.name is applicable and what it means + // in the context of that system. + AttributeMessagingDestinationSubscriptionName = "messaging.destination.subscription.name" + // Low cardinality representation of the messaging destination name + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/customers/{customerID}' + // Note: Destination names could be constructed from templates. An example would + // be a destination name involving a user name or product id. Although the + // destination name in this case is of high cardinality, the underlying template + // is of low cardinality and can be effectively used for grouping and aggregation. + AttributeMessagingDestinationTemplate = "messaging.destination.template" + // A boolean that is true if the message destination is temporary and might not + // exist anymore after messages are processed. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingDestinationTemporary = "messaging.destination.temporary" + // The size of the message body in bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1439 + // Note: This can refer to both the compressed or uncompressed body size. If both + // sizes are known, the uncompressed + // body size should be used. + AttributeMessagingMessageBodySize = "messaging.message.body.size" + // The conversation ID identifying the conversation to which the message belongs, + // represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MyConversationID' + AttributeMessagingMessageConversationID = "messaging.message.conversation_id" + // The size of the message body and metadata in bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 2738 + // Note: This can refer to both the compressed or uncompressed size. If both sizes + // are known, the uncompressed + // size should be used. + AttributeMessagingMessageEnvelopeSize = "messaging.message.envelope.size" + // A value used by the messaging system as an identifier for the message, + // represented as a string. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + AttributeMessagingMessageID = "messaging.message.id" + // The system-specific name of the messaging operation. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'ack', 'nack', 'send' + AttributeMessagingOperationName = "messaging.operation.name" + // A string identifying the type of the messaging operation. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: If a custom value is used, it MUST be of low cardinality. + AttributeMessagingOperationType = "messaging.operation.type" + // The messaging system as identified by the client instrumentation. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: The actual messaging system may differ from the one known by the client. + // For example, when using Kafka client libraries to communicate with Azure Event + // Hubs, the messaging.system is set to kafka based on the instrumentation's best + // knowledge. + AttributeMessagingSystem = "messaging.system" +) + +const ( + // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created + AttributeMessagingOperationTypePublish = "publish" + // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios + AttributeMessagingOperationTypeCreate = "create" + // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages + AttributeMessagingOperationTypeReceive = "receive" + // One or more messages are processed by a consumer + AttributeMessagingOperationTypeProcess = "process" + // One or more messages are settled + AttributeMessagingOperationTypeSettle = "settle" + // Deprecated. Use `process` instead + AttributeMessagingOperationTypeDeliver = "deliver" +) + +const ( + // Apache ActiveMQ + AttributeMessagingSystemActivemq = "activemq" + // Amazon Simple Queue Service (SQS) + AttributeMessagingSystemAWSSqs = "aws_sqs" + // Azure Event Grid + AttributeMessagingSystemEventgrid = "eventgrid" + // Azure Event Hubs + AttributeMessagingSystemEventhubs = "eventhubs" + // Azure Service Bus + AttributeMessagingSystemServicebus = "servicebus" + // Google Cloud Pub/Sub + AttributeMessagingSystemGCPPubsub = "gcp_pubsub" + // Java Message Service + AttributeMessagingSystemJms = "jms" + // Apache Kafka + AttributeMessagingSystemKafka = "kafka" + // RabbitMQ + AttributeMessagingSystemRabbitmq = "rabbitmq" + // Apache RocketMQ + AttributeMessagingSystemRocketmq = "rocketmq" + // Apache Pulsar + AttributeMessagingSystemPulsar = "pulsar" +) + +// This group describes attributes specific to Apache Kafka. +const ( + // Message keys in Kafka are used for grouping alike messages to ensure they're + // processed on the same partition. They differ from messaging.message.id in that + // they're not unique. If the key is null, the attribute MUST NOT be set. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to be + // supplied for the attribute. If the key has no unambiguous, canonical string + // form, don't include its value. + AttributeMessagingKafkaMessageKey = "messaging.kafka.message.key" + // A boolean that is true if the message is a tombstone. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingKafkaMessageTombstone = "messaging.kafka.message.tombstone" + // The offset of a record in the corresponding Kafka partition. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 42 + AttributeMessagingKafkaOffset = "messaging.kafka.offset" +) + +// This group describes attributes specific to RabbitMQ. +const ( + // RabbitMQ message routing key. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myKey' + AttributeMessagingRabbitmqDestinationRoutingKey = "messaging.rabbitmq.destination.routing_key" + // RabbitMQ message delivery tag + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 123 + AttributeMessagingRabbitmqMessageDeliveryTag = "messaging.rabbitmq.message.delivery_tag" +) + +// This group describes attributes specific to RocketMQ. +const ( + // Model of message consumption. This only applies to consumer spans. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingRocketmqConsumptionModel = "messaging.rocketmq.consumption_model" + // The delay time level for delay message, which determines the message delay + // time. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 3 + AttributeMessagingRocketmqMessageDelayTimeLevel = "messaging.rocketmq.message.delay_time_level" + // The timestamp in milliseconds that the delay message is expected to be + // delivered to consumer. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1665987217045 + AttributeMessagingRocketmqMessageDeliveryTimestamp = "messaging.rocketmq.message.delivery_timestamp" + // It is essential for FIFO message. Messages that belong to the same message + // group are always processed one by one within the same consumer group. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myMessageGroup' + AttributeMessagingRocketmqMessageGroup = "messaging.rocketmq.message.group" + // Key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'keyA', 'keyB' + AttributeMessagingRocketmqMessageKeys = "messaging.rocketmq.message.keys" + // The secondary classifier of message besides topic. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'tagA' + AttributeMessagingRocketmqMessageTag = "messaging.rocketmq.message.tag" + // Type of message. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingRocketmqMessageType = "messaging.rocketmq.message.type" + // Namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myNamespace' + AttributeMessagingRocketmqNamespace = "messaging.rocketmq.namespace" +) + +const ( + // Clustering consumption model + AttributeMessagingRocketmqConsumptionModelClustering = "clustering" + // Broadcasting consumption model + AttributeMessagingRocketmqConsumptionModelBroadcasting = "broadcasting" +) + +const ( + // Normal message + AttributeMessagingRocketmqMessageTypeNormal = "normal" + // FIFO message + AttributeMessagingRocketmqMessageTypeFifo = "fifo" + // Delay message + AttributeMessagingRocketmqMessageTypeDelay = "delay" + // Transaction message + AttributeMessagingRocketmqMessageTypeTransaction = "transaction" +) + +// This group describes attributes specific to GCP Pub/Sub. +const ( + // The ack deadline in seconds set for the modify ack deadline request. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 10 + AttributeMessagingGCPPubsubMessageAckDeadline = "messaging.gcp_pubsub.message.ack_deadline" + // The ack id for a given message. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'ack_id' + AttributeMessagingGCPPubsubMessageAckID = "messaging.gcp_pubsub.message.ack_id" + // The delivery attempt for a given message. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 2 + AttributeMessagingGCPPubsubMessageDeliveryAttempt = "messaging.gcp_pubsub.message.delivery_attempt" + // The ordering key for a given message. If the attribute is not present, the + // message does not have an ordering key. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'ordering_key' + AttributeMessagingGCPPubsubMessageOrderingKey = "messaging.gcp_pubsub.message.ordering_key" +) + +// This group describes attributes specific to Azure Service Bus. +const ( + // Describes the settlement type. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingServicebusDispositionStatus = "messaging.servicebus.disposition_status" + // Number of deliveries that have been attempted for this message. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 2 + AttributeMessagingServicebusMessageDeliveryCount = "messaging.servicebus.message.delivery_count" + // The UTC epoch seconds at which the message has been accepted and stored in the + // entity. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1701393730 + AttributeMessagingServicebusMessageEnqueuedTime = "messaging.servicebus.message.enqueued_time" +) + +const ( + // Message is completed + AttributeMessagingServicebusDispositionStatusComplete = "complete" + // Message is abandoned + AttributeMessagingServicebusDispositionStatusAbandon = "abandon" + // Message is sent to dead letter queue + AttributeMessagingServicebusDispositionStatusDeadLetter = "dead_letter" + // Message is deferred + AttributeMessagingServicebusDispositionStatusDefer = "defer" +) + +// This group describes attributes specific to Azure Event Hubs. +const ( + // The UTC epoch seconds at which the message has been accepted and stored in the + // entity. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1701393730 + AttributeMessagingEventhubsMessageEnqueuedTime = "messaging.eventhubs.message.enqueued_time" +) + +// These attributes may be used for any network related operation. +const ( + // The ISO 3166-1 alpha-2 2-character country code associated with the mobile + // carrier network. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'DE' + AttributeNetworkCarrierIcc = "network.carrier.icc" + // The mobile carrier country code. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '310' + AttributeNetworkCarrierMcc = "network.carrier.mcc" + // The mobile carrier network code. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '001' + AttributeNetworkCarrierMnc = "network.carrier.mnc" + // The name of the mobile carrier. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'sprint' + AttributeNetworkCarrierName = "network.carrier.name" + // This describes more details regarding the connection.type. It may be the type + // of cell technology connection, but it could be used for describing details + // about a wifi connection. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'LTE' + AttributeNetworkConnectionSubtype = "network.connection.subtype" + // The internet connection type. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'wifi' + AttributeNetworkConnectionType = "network.connection.type" + // The network IO operation direction. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'transmit' + AttributeNetworkIoDirection = "network.io.direction" + // Local address of the network connection - IP address or Unix domain socket + // name. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + AttributeNetworkLocalAddress = "network.local.address" + // Local port number of the network connection. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 65123 + AttributeNetworkLocalPort = "network.local.port" + // Peer address of the network connection - IP address or Unix domain socket name. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + AttributeNetworkPeerAddress = "network.peer.address" + // Peer port number of the network connection. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 65123 + AttributeNetworkPeerPort = "network.peer.port" + // OSI application layer or non-OSI equivalent. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + // Note: The value SHOULD be normalized to lowercase. + AttributeNetworkProtocolName = "network.protocol.name" + // The actual version of the protocol used for network communication. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '1.1', '2' + // Note: If protocol version is subject to negotiation (for example using ALPN), + // this attribute SHOULD be set to the negotiated version. If the actual protocol + // version is not known, this attribute SHOULD NOT be set. + AttributeNetworkProtocolVersion = "network.protocol.version" + // OSI transport layer or inter-process communication method. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'tcp', 'udp' + // Note: The value SHOULD be normalized to lowercase.Consider always setting the + // transport when setting a port number, since + // a port number is ambiguous without knowing the transport. For example + // different processes could be listening on TCP port 12345 and UDP port 12345. + AttributeNetworkTransport = "network.transport" + // OSI network layer or non-OSI equivalent. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'ipv4', 'ipv6' + // Note: The value SHOULD be normalized to lowercase. + AttributeNetworkType = "network.type" +) + +const ( + // GPRS + AttributeNetworkConnectionSubtypeGprs = "gprs" + // EDGE + AttributeNetworkConnectionSubtypeEdge = "edge" + // UMTS + AttributeNetworkConnectionSubtypeUmts = "umts" + // CDMA + AttributeNetworkConnectionSubtypeCdma = "cdma" + // EVDO Rel. 0 + AttributeNetworkConnectionSubtypeEvdo0 = "evdo_0" + // EVDO Rev. A + AttributeNetworkConnectionSubtypeEvdoA = "evdo_a" + // CDMA2000 1XRTT + AttributeNetworkConnectionSubtypeCdma20001xrtt = "cdma2000_1xrtt" + // HSDPA + AttributeNetworkConnectionSubtypeHsdpa = "hsdpa" + // HSUPA + AttributeNetworkConnectionSubtypeHsupa = "hsupa" + // HSPA + AttributeNetworkConnectionSubtypeHspa = "hspa" + // IDEN + AttributeNetworkConnectionSubtypeIden = "iden" + // EVDO Rev. B + AttributeNetworkConnectionSubtypeEvdoB = "evdo_b" + // LTE + AttributeNetworkConnectionSubtypeLte = "lte" + // EHRPD + AttributeNetworkConnectionSubtypeEhrpd = "ehrpd" + // HSPAP + AttributeNetworkConnectionSubtypeHspap = "hspap" + // GSM + AttributeNetworkConnectionSubtypeGsm = "gsm" + // TD-SCDMA + AttributeNetworkConnectionSubtypeTdScdma = "td_scdma" + // IWLAN + AttributeNetworkConnectionSubtypeIwlan = "iwlan" + // 5G NR (New Radio) + AttributeNetworkConnectionSubtypeNr = "nr" + // 5G NRNSA (New Radio Non-Standalone) + AttributeNetworkConnectionSubtypeNrnsa = "nrnsa" + // LTE CA + AttributeNetworkConnectionSubtypeLteCa = "lte_ca" +) + +const ( + // wifi + AttributeNetworkConnectionTypeWifi = "wifi" + // wired + AttributeNetworkConnectionTypeWired = "wired" + // cell + AttributeNetworkConnectionTypeCell = "cell" + // unavailable + AttributeNetworkConnectionTypeUnavailable = "unavailable" + // unknown + AttributeNetworkConnectionTypeUnknown = "unknown" +) + +const ( + // transmit + AttributeNetworkIoDirectionTransmit = "transmit" + // receive + AttributeNetworkIoDirectionReceive = "receive" +) + +const ( + // TCP + AttributeNetworkTransportTCP = "tcp" + // UDP + AttributeNetworkTransportUDP = "udp" + // Named or anonymous pipe + AttributeNetworkTransportPipe = "pipe" + // Unix domain socket + AttributeNetworkTransportUnix = "unix" + // QUIC + AttributeNetworkTransportQUIC = "quic" +) + +const ( + // IPv4 + AttributeNetworkTypeIpv4 = "ipv4" + // IPv6 + AttributeNetworkTypeIpv6 = "ipv6" +) + +// An OCI image manifest. +const ( + // The digest of the OCI image manifest. For container images specifically is the + // digest by which the container image is known. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: + // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4' + // Note: Follows OCI Image Manifest Specification, and specifically the Digest + // property. + // An example can be found in Example Image Manifest. + AttributeOciManifestDigest = "oci.manifest.digest" +) + +// Attributes used by the OpenTracing Shim layer. +const ( + // Parent-child Reference type + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: The causal relationship between a child Span and a parent Span. + AttributeOpentracingRefType = "opentracing.ref_type" +) + +const ( + // The parent Span depends on the child Span in some capacity + AttributeOpentracingRefTypeChildOf = "child_of" + // The parent Span doesn't depend in any way on the result of the child Span + AttributeOpentracingRefTypeFollowsFrom = "follows_from" +) + +// The operating system (OS) on which the process represented by this resource +// is running. +const ( + // Unique identifier for a particular build or compilation of the operating + // system. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'TQ3C.230805.001.B2', '20E247', '22621' + AttributeOSBuildID = "os.build_id" + // Human readable (not intended to be parsed) OS version information, like e.g. + // reported by ver or lsb_release -a commands. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS' + AttributeOSDescription = "os.description" + // Human readable operating system name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'iOS', 'Android', 'Ubuntu' + AttributeOSName = "os.name" + // The operating system type. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeOSType = "os.type" + // The version string of the operating system as defined in Version Attributes. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '14.2.1', '18.04.1' + AttributeOSVersion = "os.version" +) + +const ( + // Microsoft Windows + AttributeOSTypeWindows = "windows" + // Linux + AttributeOSTypeLinux = "linux" + // Apple Darwin + AttributeOSTypeDarwin = "darwin" + // FreeBSD + AttributeOSTypeFreeBSD = "freebsd" + // NetBSD + AttributeOSTypeNetBSD = "netbsd" + // OpenBSD + AttributeOSTypeOpenBSD = "openbsd" + // DragonFly BSD + AttributeOSTypeDragonflyBSD = "dragonflybsd" + // HP-UX (Hewlett Packard Unix) + AttributeOSTypeHPUX = "hpux" + // AIX (Advanced Interactive eXecutive) + AttributeOSTypeAIX = "aix" + // SunOS, Oracle Solaris + AttributeOSTypeSolaris = "solaris" + // IBM z/OS + AttributeOSTypeZOS = "z_os" +) + +// Attributes reserved for OpenTelemetry +const ( + // Name of the code, either "OK" or "ERROR". MUST NOT be set + // if the status code is UNSET. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + AttributeOTelStatusCode = "otel.status_code" + // Description of the Status if it has a value, otherwise not set. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'resource not found' + AttributeOTelStatusDescription = "otel.status_description" +) + +const ( + // The operation has been validated by an Application developer or Operator to have completed successfully + AttributeOTelStatusCodeOk = "OK" + // The operation contains an error + AttributeOTelStatusCodeError = "ERROR" +) + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // The name of the instrumentation scope - (InstrumentationScope.Name in OTLP). + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'io.opentelemetry.contrib.mongodb' + AttributeOTelScopeName = "otel.scope.name" + // The version of the instrumentation scope - (InstrumentationScope.Version in + // OTLP). + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '1.0.0' + AttributeOTelScopeVersion = "otel.scope.version" +) + +// Operations that access some remote service. +const ( + // The service.name of the remote service. SHOULD be equal to the actual + // service.name resource attribute of the remote service if any. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'AuthTokenCache' + AttributePeerService = "peer.service" +) + +// An operating system process. +const ( + // The command used to launch the process (i.e. the command name). On Linux based + // systems, can be set to the zeroth string in proc/[pid]/cmdline. On Windows, can + // be set to the first parameter extracted from GetCommandLineW. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'cmd/otelcol' + AttributeProcessCommand = "process.command" + // All the command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited strings + // extracted from proc/[pid]/cmdline. For libc-based executables, this would be + // the full argv vector passed to main. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'cmd/otecol', '--config=config.yaml' + AttributeProcessCommandArgs = "process.command_args" + // The full command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of GetCommandLineW. Do not + // set this if you have to assemble it just for monitoring; use + // process.command_args instead. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + AttributeProcessCommandLine = "process.command_line" + // Specifies whether the context switches for this data point were voluntary or + // involuntary. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeProcessContextSwitchType = "process.context_switch_type" + // The date and time the process was created, in ISO 8601 format. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2023-11-21T09:25:34.853Z' + AttributeProcessCreationTime = "process.creation.time" + // The name of the process executable. On Linux based systems, can be set to the + // Name in proc/[pid]/status. On Windows, can be set to the base name of + // GetProcessImageFileNameW. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'otelcol' + AttributeProcessExecutableName = "process.executable.name" + // The full path to the process executable. On Linux based systems, can be set to + // the target of proc/[pid]/exe. On Windows, can be set to the result of + // GetProcessImageFileNameW. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/usr/bin/cmd/otelcol' + AttributeProcessExecutablePath = "process.executable.path" + // The exit code of the process. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 127 + AttributeProcessExitCode = "process.exit.code" + // The date and time the process exited, in ISO 8601 format. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2023-11-21T09:26:12.315Z' + AttributeProcessExitTime = "process.exit.time" + // The PID of the process's group leader. This is also the process group ID (PGID) + // of the process. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 23 + AttributeProcessGroupLeaderPID = "process.group_leader.pid" + // Whether the process is connected to an interactive shell. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeProcessInteractive = "process.interactive" + // The username of the user that owns the process. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'root' + AttributeProcessOwner = "process.owner" + // The type of page fault for this data point. Type major is for major/hard page + // faults, and minor is for minor/soft page faults. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeProcessPagingFaultType = "process.paging.fault_type" + // Parent Process identifier (PPID). + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 111 + AttributeProcessParentPID = "process.parent_pid" + // Process identifier (PID). + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1234 + AttributeProcessPID = "process.pid" + // The real user ID (RUID) of the process. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1000 + AttributeProcessRealUserID = "process.real_user.id" + // The username of the real user of the process. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'operator' + AttributeProcessRealUserName = "process.real_user.name" + // An additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + AttributeProcessRuntimeDescription = "process.runtime.description" + // The name of the runtime of this process. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'OpenJDK Runtime Environment' + AttributeProcessRuntimeName = "process.runtime.name" + // The version of the runtime of this process, as returned by the runtime without + // modification. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '14.0.2' + AttributeProcessRuntimeVersion = "process.runtime.version" + // The saved user ID (SUID) of the process. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1002 + AttributeProcessSavedUserID = "process.saved_user.id" + // The username of the saved user. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'operator' + AttributeProcessSavedUserName = "process.saved_user.name" + // The PID of the process's session leader. This is also the session ID (SID) of + // the process. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 14 + AttributeProcessSessionLeaderPID = "process.session_leader.pid" + // The effective user ID (EUID) of the process. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1001 + AttributeProcessUserID = "process.user.id" + // The username of the effective user of the process. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'root' + AttributeProcessUserName = "process.user.name" + // Virtual process identifier. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 12 + // Note: The process ID within a PID namespace. This is not necessarily unique + // across all processes on the host but it is unique within the process namespace + // that the process exists within. + AttributeProcessVpid = "process.vpid" +) + +const ( + // voluntary + AttributeProcessContextSwitchTypeVoluntary = "voluntary" + // involuntary + AttributeProcessContextSwitchTypeInvoluntary = "involuntary" +) + +const ( + // major + AttributeProcessPagingFaultTypeMajor = "major" + // minor + AttributeProcessPagingFaultTypeMinor = "minor" +) + +// Attributes for remote procedure calls. +const ( + // The error codes of the Connect request. Error codes are always string values. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeRPCConnectRPCErrorCode = "rpc.connect_rpc.error_code" + // The numeric status code of the gRPC request. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeRPCGRPCStatusCode = "rpc.grpc.status_code" + // error.code property of response if it is an error response. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: -32700, 100 + AttributeRPCJsonrpcErrorCode = "rpc.jsonrpc.error_code" + // error.message property of response if it is an error response. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Parse error', 'User already exists' + AttributeRPCJsonrpcErrorMessage = "rpc.jsonrpc.error_message" + // id property of request or response. Since protocol allows id to be int, string, + // null or missing (for notifications), value is expected to be cast to string for + // simplicity. Use empty string in case of null value. Omit entirely if this is a + // notification. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '10', 'request-7', '' + AttributeRPCJsonrpcRequestID = "rpc.jsonrpc.request_id" + // Protocol version as in jsonrpc property of request/response. Since JSON-RPC 1.0 + // doesn't specify this, the value can be omitted. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2.0', '1.0' + AttributeRPCJsonrpcVersion = "rpc.jsonrpc.version" + // Compressed size of the message in bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + AttributeRPCMessageCompressedSize = "rpc.message.compressed_size" + // MUST be calculated as two different counters starting from 1 one for sent + // messages and one for received message. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Note: This way we guarantee that the values will be consistent between + // different implementations. + AttributeRPCMessageID = "rpc.message.id" + // Whether this is a received or sent message. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeRPCMessageType = "rpc.message.type" + // Uncompressed size of the message in bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + AttributeRPCMessageUncompressedSize = "rpc.message.uncompressed_size" + // The name of the (logical) method being called, must be equal to the $method + // part in the span name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The code.function attribute may be used to store the latter + // (e.g., method actually executing the call on the server side, RPC client stub + // method on the client side). + AttributeRPCMethod = "rpc.method" + // The full (logical) name of the service being called, including its package + // name, if applicable. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing class. + // The code.namespace attribute may be used to store the latter (despite the + // attribute name, it may include a class name; e.g., class with method actually + // executing the call on the server side, RPC client stub class on the client + // side). + AttributeRPCService = "rpc.service" + // A string identifying the remoting system. See below for a list of well-known + // identifiers. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeRPCSystem = "rpc.system" +) + +const ( + // cancelled + AttributeRPCConnectRPCErrorCodeCancelled = "cancelled" + // unknown + AttributeRPCConnectRPCErrorCodeUnknown = "unknown" + // invalid_argument + AttributeRPCConnectRPCErrorCodeInvalidArgument = "invalid_argument" + // deadline_exceeded + AttributeRPCConnectRPCErrorCodeDeadlineExceeded = "deadline_exceeded" + // not_found + AttributeRPCConnectRPCErrorCodeNotFound = "not_found" + // already_exists + AttributeRPCConnectRPCErrorCodeAlreadyExists = "already_exists" + // permission_denied + AttributeRPCConnectRPCErrorCodePermissionDenied = "permission_denied" + // resource_exhausted + AttributeRPCConnectRPCErrorCodeResourceExhausted = "resource_exhausted" + // failed_precondition + AttributeRPCConnectRPCErrorCodeFailedPrecondition = "failed_precondition" + // aborted + AttributeRPCConnectRPCErrorCodeAborted = "aborted" + // out_of_range + AttributeRPCConnectRPCErrorCodeOutOfRange = "out_of_range" + // unimplemented + AttributeRPCConnectRPCErrorCodeUnimplemented = "unimplemented" + // internal + AttributeRPCConnectRPCErrorCodeInternal = "internal" + // unavailable + AttributeRPCConnectRPCErrorCodeUnavailable = "unavailable" + // data_loss + AttributeRPCConnectRPCErrorCodeDataLoss = "data_loss" + // unauthenticated + AttributeRPCConnectRPCErrorCodeUnauthenticated = "unauthenticated" +) + +const ( + // OK + AttributeRPCGRPCStatusCodeOk = "0" + // CANCELLED + AttributeRPCGRPCStatusCodeCancelled = "1" + // UNKNOWN + AttributeRPCGRPCStatusCodeUnknown = "2" + // INVALID_ARGUMENT + AttributeRPCGRPCStatusCodeInvalidArgument = "3" + // DEADLINE_EXCEEDED + AttributeRPCGRPCStatusCodeDeadlineExceeded = "4" + // NOT_FOUND + AttributeRPCGRPCStatusCodeNotFound = "5" + // ALREADY_EXISTS + AttributeRPCGRPCStatusCodeAlreadyExists = "6" + // PERMISSION_DENIED + AttributeRPCGRPCStatusCodePermissionDenied = "7" + // RESOURCE_EXHAUSTED + AttributeRPCGRPCStatusCodeResourceExhausted = "8" + // FAILED_PRECONDITION + AttributeRPCGRPCStatusCodeFailedPrecondition = "9" + // ABORTED + AttributeRPCGRPCStatusCodeAborted = "10" + // OUT_OF_RANGE + AttributeRPCGRPCStatusCodeOutOfRange = "11" + // UNIMPLEMENTED + AttributeRPCGRPCStatusCodeUnimplemented = "12" + // INTERNAL + AttributeRPCGRPCStatusCodeInternal = "13" + // UNAVAILABLE + AttributeRPCGRPCStatusCodeUnavailable = "14" + // DATA_LOSS + AttributeRPCGRPCStatusCodeDataLoss = "15" + // UNAUTHENTICATED + AttributeRPCGRPCStatusCodeUnauthenticated = "16" +) + +const ( + // sent + AttributeRPCMessageTypeSent = "SENT" + // received + AttributeRPCMessageTypeReceived = "RECEIVED" +) + +const ( + // gRPC + AttributeRPCSystemGRPC = "grpc" + // Java RMI + AttributeRPCSystemJavaRmi = "java_rmi" + // .NET WCF + AttributeRPCSystemDotnetWcf = "dotnet_wcf" + // Apache Dubbo + AttributeRPCSystemApacheDubbo = "apache_dubbo" + // Connect RPC + AttributeRPCSystemConnectRPC = "connect_rpc" +) + +// These attributes may be used to describe the server in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // Server domain name if available without reverse DNS lookup; otherwise, IP + // address or Unix domain socket name. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the client side, and when communicating through an + // intermediary, server.address SHOULD represent the server address behind any + // intermediaries, for example proxies, if it's available. + AttributeServerAddress = "server.address" + // Server port number. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 80, 8080, 443 + // Note: When observed from the client side, and when communicating through an + // intermediary, server.port SHOULD represent the server port behind any + // intermediaries, for example proxies, if it's available. + AttributeServerPort = "server.port" +) + +// A service instance. +const ( + // The string ID of the service instance. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // service.namespace,service.name pair (in other words + // service.namespace,service.name,service.instance.id triplet MUST be globally + // unique). The ID helps to + // distinguish instances of the same service that exist at the same time (e.g. + // instances of a horizontally scaled + // service).Implementations, such as SDKs, are recommended to generate a random + // Version 1 or Version 4 RFC + // 4122 UUID, but are free to use an inherent unique ID as the source of + // this value if stability is desirable. In that case, the ID SHOULD be used as + // source of a UUID Version 5 and + // SHOULD use the following UUID as the namespace: 4d63009a-8d0f-11ee- + // aad7-4c796ed8e320.UUIDs are typically recommended, as only an opaque value for + // the purposes of identifying a service instance is + // needed. Similar to what can be seen in the man page for the + // /etc/machine-id file, the underlying + // data, such as pod name and namespace should be treated as confidential, being + // the user's choice to expose it + // or not via another resource attribute.For applications running behind an + // application server (like unicorn), we do not recommend using one identifier + // for all processes participating in the application. Instead, it's recommended + // each division (e.g. a worker + // thread in unicorn) to have its own instance.id.It's not recommended for a + // Collector to set service.instance.id if it can't unambiguously determine the + // service instance that is generating that telemetry. For instance, creating an + // UUID based on pod.name will + // likely be wrong, as the Collector might not know from which container within + // that pod the telemetry originated. + // However, Collectors can set the service.instance.id if they can unambiguously + // determine the service instance + // for that telemetry. This is typically the case for scraping receivers, as they + // know the target address and + // port. + AttributeServiceInstanceID = "service.instance.id" + // Logical name of the service. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled services. If + // the value was not specified, SDKs MUST fallback to unknown_service: + // concatenated with process.executable.name, e.g. unknown_service:bash. If + // process.executable.name is not available, the value MUST be set to + // unknown_service. + AttributeServiceName = "service.name" + // A namespace for service.name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group of + // services, for example the team name that owns a group of services. service.name + // is expected to be unique within the same namespace. If service.namespace is not + // specified in the Resource then service.name is expected to be unique for all + // services that have no explicit namespace defined (so the empty/unspecified + // namespace is simply one more valid namespace). Zero-length namespace string is + // assumed equal to unspecified namespace. + AttributeServiceNamespace = "service.namespace" + // The version string of the service API or implementation. The format is not + // defined by these conventions. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '2.0.0', 'a01dbef8a' + AttributeServiceVersion = "service.version" +) + +// Session is defined as the period of time encompassing all activities +// performed by the application and the actions executed by the end user. +// Consequently, a Session is represented as a collection of Logs, Events, and +// Spans emitted by the Client Application throughout the Session's duration. +// Each Session is assigned a unique identifier, which is included as an +// attribute in the Logs, Events, and Spans generated during the Session's +// lifecycle. +// When a session reaches end of life, typically due to user inactivity or +// session timeout, a new session identifier will be assigned. The previous +// session identifier may be provided by the instrumentation so that telemetry +// backends can link the two sessions. +const ( + // A unique id to identify a session. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + AttributeSessionID = "session.id" + // The previous session.id for this user, when known. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + AttributeSessionPreviousID = "session.previous_id" +) + +// SignalR attributes +const ( + // SignalR HTTP connection closure status. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'app_shutdown', 'timeout' + AttributeSignalrConnectionStatus = "signalr.connection.status" + // SignalR transport type + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'web_sockets', 'long_polling' + AttributeSignalrTransport = "signalr.transport" +) + +const ( + // The connection was closed normally + AttributeSignalrConnectionStatusNormalClosure = "normal_closure" + // The connection was closed due to a timeout + AttributeSignalrConnectionStatusTimeout = "timeout" + // The connection was closed because the app is shutting down + AttributeSignalrConnectionStatusAppShutdown = "app_shutdown" +) + +const ( + // ServerSentEvents protocol + AttributeSignalrTransportServerSentEvents = "server_sent_events" + // LongPolling protocol + AttributeSignalrTransportLongPolling = "long_polling" + // WebSockets protocol + AttributeSignalrTransportWebSockets = "web_sockets" +) + +// These attributes may be used to describe the sender of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // Source address - domain name if available without reverse DNS lookup; + // otherwise, IP address or Unix domain socket name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the destination side, and when communicating through + // an intermediary, source.address SHOULD represent the source address behind any + // intermediaries, for example proxies, if it's available. + AttributeSourceAddress = "source.address" + // Source port number + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 3389, 2888 + AttributeSourcePort = "source.port" +) + +// Describes System attributes +const ( + // The device identifier + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '(identifier)' + AttributeSystemDevice = "system.device" +) + +// Describes System CPU attributes +const ( + // The logical CPU number [0..n-1] + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1 + AttributeSystemCPULogicalNumber = "system.cpu.logical_number" +) + +// Describes System Memory attributes +const ( + // The memory state + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'free', 'cached' + AttributeSystemMemoryState = "system.memory.state" +) + +const ( + // used + AttributeSystemMemoryStateUsed = "used" + // free + AttributeSystemMemoryStateFree = "free" + // shared + AttributeSystemMemoryStateShared = "shared" + // buffers + AttributeSystemMemoryStateBuffers = "buffers" + // cached + AttributeSystemMemoryStateCached = "cached" +) + +// Describes System Memory Paging attributes +const ( + // The paging access direction + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'in' + AttributeSystemPagingDirection = "system.paging.direction" + // The memory paging state + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'free' + AttributeSystemPagingState = "system.paging.state" + // The memory paging type + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'minor' + AttributeSystemPagingType = "system.paging.type" +) + +const ( + // in + AttributeSystemPagingDirectionIn = "in" + // out + AttributeSystemPagingDirectionOut = "out" +) + +const ( + // used + AttributeSystemPagingStateUsed = "used" + // free + AttributeSystemPagingStateFree = "free" +) + +const ( + // major + AttributeSystemPagingTypeMajor = "major" + // minor + AttributeSystemPagingTypeMinor = "minor" +) + +// Describes Filesystem attributes +const ( + // The filesystem mode + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'rw, ro' + AttributeSystemFilesystemMode = "system.filesystem.mode" + // The filesystem mount path + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/mnt/data' + AttributeSystemFilesystemMountpoint = "system.filesystem.mountpoint" + // The filesystem state + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'used' + AttributeSystemFilesystemState = "system.filesystem.state" + // The filesystem type + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'ext4' + AttributeSystemFilesystemType = "system.filesystem.type" +) + +const ( + // used + AttributeSystemFilesystemStateUsed = "used" + // free + AttributeSystemFilesystemStateFree = "free" + // reserved + AttributeSystemFilesystemStateReserved = "reserved" +) + +const ( + // fat32 + AttributeSystemFilesystemTypeFat32 = "fat32" + // exfat + AttributeSystemFilesystemTypeExfat = "exfat" + // ntfs + AttributeSystemFilesystemTypeNtfs = "ntfs" + // refs + AttributeSystemFilesystemTypeRefs = "refs" + // hfsplus + AttributeSystemFilesystemTypeHfsplus = "hfsplus" + // ext4 + AttributeSystemFilesystemTypeExt4 = "ext4" +) + +// Describes Network attributes +const ( + // A stateless protocol MUST NOT set this attribute + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'close_wait' + AttributeSystemNetworkState = "system.network.state" +) + +const ( + // close + AttributeSystemNetworkStateClose = "close" + // close_wait + AttributeSystemNetworkStateCloseWait = "close_wait" + // closing + AttributeSystemNetworkStateClosing = "closing" + // delete + AttributeSystemNetworkStateDelete = "delete" + // established + AttributeSystemNetworkStateEstablished = "established" + // fin_wait_1 + AttributeSystemNetworkStateFinWait1 = "fin_wait_1" + // fin_wait_2 + AttributeSystemNetworkStateFinWait2 = "fin_wait_2" + // last_ack + AttributeSystemNetworkStateLastAck = "last_ack" + // listen + AttributeSystemNetworkStateListen = "listen" + // syn_recv + AttributeSystemNetworkStateSynRecv = "syn_recv" + // syn_sent + AttributeSystemNetworkStateSynSent = "syn_sent" + // time_wait + AttributeSystemNetworkStateTimeWait = "time_wait" +) + +// Describes System Process attributes +const ( + // The process state, e.g., Linux Process State Codes + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'running' + AttributeSystemProcessStatus = "system.process.status" +) + +const ( + // running + AttributeSystemProcessStatusRunning = "running" + // sleeping + AttributeSystemProcessStatusSleeping = "sleeping" + // stopped + AttributeSystemProcessStatusStopped = "stopped" + // defunct + AttributeSystemProcessStatusDefunct = "defunct" +) + +// Attributes for telemetry SDK. +const ( + // The language of the telemetry SDK. + // + // Type: Enum + // Requirement Level: Required + // Stability: stable + AttributeTelemetrySDKLanguage = "telemetry.sdk.language" + // The name of the telemetry SDK as defined above. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: 'opentelemetry' + // Note: The OpenTelemetry SDK MUST set the telemetry.sdk.name attribute to + // opentelemetry. + // If another SDK, like a fork or a vendor-provided implementation, is used, this + // SDK MUST set the + // telemetry.sdk.name attribute to the fully-qualified class or module name of + // this SDK's main entry point + // or another suitable identifier depending on the language. + // The identifier opentelemetry is reserved and MUST NOT be used in this case. + // All custom identifiers SHOULD be stable across different versions of an + // implementation. + AttributeTelemetrySDKName = "telemetry.sdk.name" + // The version string of the telemetry SDK. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: '1.2.3' + AttributeTelemetrySDKVersion = "telemetry.sdk.version" + // The name of the auto instrumentation agent or distribution, if used. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'parts-unlimited-java' + // Note: Official auto instrumentation agents and distributions SHOULD set the + // telemetry.distro.name attribute to + // a string starting with opentelemetry-, e.g. opentelemetry-java-instrumentation. + AttributeTelemetryDistroName = "telemetry.distro.name" + // The version string of the auto instrumentation agent or distribution, if used. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1.2.3' + AttributeTelemetryDistroVersion = "telemetry.distro.version" +) + +const ( + // cpp + AttributeTelemetrySDKLanguageCPP = "cpp" + // dotnet + AttributeTelemetrySDKLanguageDotnet = "dotnet" + // erlang + AttributeTelemetrySDKLanguageErlang = "erlang" + // go + AttributeTelemetrySDKLanguageGo = "go" + // java + AttributeTelemetrySDKLanguageJava = "java" + // nodejs + AttributeTelemetrySDKLanguageNodejs = "nodejs" + // php + AttributeTelemetrySDKLanguagePHP = "php" + // python + AttributeTelemetrySDKLanguagePython = "python" + // ruby + AttributeTelemetrySDKLanguageRuby = "ruby" + // rust + AttributeTelemetrySDKLanguageRust = "rust" + // swift + AttributeTelemetrySDKLanguageSwift = "swift" + // webjs + AttributeTelemetrySDKLanguageWebjs = "webjs" +) + +// This group describes attributes specific to [software +// tests](https://en.wikipedia.org/wiki/Software_testing). +const ( + // The fully qualified human readable name of the test case. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'org.example.TestCase1.test1', 'example/tests/TestCase1.test1', + // 'ExampleTestCase1_test1' + AttributeTestCaseName = "test.case.name" + // The status of the actual test case result from test execution. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'pass', 'fail' + AttributeTestCaseResultStatus = "test.case.result.status" + // The human readable name of a test suite. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'TestSuite1' + AttributeTestSuiteName = "test.suite.name" + // The status of the test suite run. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'success', 'failure', 'skipped', 'aborted', 'timed_out', + // 'in_progress' + AttributeTestSuiteRunStatus = "test.suite.run.status" +) + +const ( + // pass + AttributeTestCaseResultStatusPass = "pass" + // fail + AttributeTestCaseResultStatusFail = "fail" +) + +const ( + // success + AttributeTestSuiteRunStatusSuccess = "success" + // failure + AttributeTestSuiteRunStatusFailure = "failure" + // skipped + AttributeTestSuiteRunStatusSkipped = "skipped" + // aborted + AttributeTestSuiteRunStatusAborted = "aborted" + // timed_out + AttributeTestSuiteRunStatusTimedOut = "timed_out" + // in_progress + AttributeTestSuiteRunStatusInProgress = "in_progress" +) + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // Current "managed" thread ID (as opposed to OS thread ID). + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 42 + AttributeThreadID = "thread.id" + // Current thread name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'main' + AttributeThreadName = "thread.name" +) + +// Semantic convention attributes in the TLS namespace. +const ( + // String indicating the cipher used during the current connection. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', + // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' + // Note: The values allowed for tls.cipher MUST be one of the Descriptions of the + // registered TLS Cipher Suits. + AttributeTLSCipher = "tls.cipher" + // PEM-encoded stand-alone certificate offered by the client. This is usually + // mutually-exclusive of client.certificate_chain since this value also exists in + // that list. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MII...' + AttributeTLSClientCertificate = "tls.client.certificate" + // Array of PEM-encoded certificates that make up the certificate chain offered by + // the client. This is usually mutually-exclusive of client.certificate since that + // value should be the first certificate in the chain. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + AttributeTLSClientCertificateChain = "tls.client.certificate_chain" + // Certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash values, this + // value should be formatted as an uppercase hash. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + AttributeTLSClientHashMd5 = "tls.client.hash.md5" + // Certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash values, this + // value should be formatted as an uppercase hash. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + AttributeTLSClientHashSha1 = "tls.client.hash.sha1" + // Certificate fingerprint using the SHA256 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash values, this + // value should be formatted as an uppercase hash. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + AttributeTLSClientHashSha256 = "tls.client.hash.sha256" + // Distinguished name of subject of the issuer of the x.509 certificate presented + // by the client. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com' + AttributeTLSClientIssuer = "tls.client.issuer" + // A hash that identifies clients based on how they perform an SSL/TLS handshake. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + AttributeTLSClientJa3 = "tls.client.ja3" + // Date/Time indicating when client certificate is no longer considered valid. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + AttributeTLSClientNotAfter = "tls.client.not_after" + // Date/Time indicating when client certificate is first considered valid. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + AttributeTLSClientNotBefore = "tls.client.not_before" + // Distinguished name of subject of the x.509 certificate presented by the client. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com' + AttributeTLSClientSubject = "tls.client.subject" + // Array of ciphers offered by the client during the client hello. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384', + // 'TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384', '...' + AttributeTLSClientSupportedCiphers = "tls.client.supported_ciphers" + // String indicating the curve used for the given cipher, when applicable + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'secp256r1' + AttributeTLSCurve = "tls.curve" + // Boolean flag indicating if the TLS negotiation was successful and transitioned + // to an encrypted tunnel. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + // Examples: True + AttributeTLSEstablished = "tls.established" + // String indicating the protocol being tunneled. Per the values in the IANA + // registry, this string should be lower case. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'http/1.1' + AttributeTLSNextProtocol = "tls.next_protocol" + // Normalized lowercase protocol name parsed from original string of the + // negotiated SSL/TLS protocol version + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeTLSProtocolName = "tls.protocol.name" + // Numeric part of the version parsed from the original string of the negotiated + // SSL/TLS protocol version + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1.2', '3' + AttributeTLSProtocolVersion = "tls.protocol.version" + // Boolean flag indicating if this TLS connection was resumed from an existing TLS + // negotiation. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + // Examples: True + AttributeTLSResumed = "tls.resumed" + // PEM-encoded stand-alone certificate offered by the server. This is usually + // mutually-exclusive of server.certificate_chain since this value also exists in + // that list. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MII...' + AttributeTLSServerCertificate = "tls.server.certificate" + // Array of PEM-encoded certificates that make up the certificate chain offered by + // the server. This is usually mutually-exclusive of server.certificate since that + // value should be the first certificate in the chain. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + AttributeTLSServerCertificateChain = "tls.server.certificate_chain" + // Certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash values, this + // value should be formatted as an uppercase hash. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + AttributeTLSServerHashMd5 = "tls.server.hash.md5" + // Certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash values, this + // value should be formatted as an uppercase hash. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + AttributeTLSServerHashSha1 = "tls.server.hash.sha1" + // Certificate fingerprint using the SHA256 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash values, this + // value should be formatted as an uppercase hash. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + AttributeTLSServerHashSha256 = "tls.server.hash.sha256" + // Distinguished name of subject of the issuer of the x.509 certificate presented + // by the client. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com' + AttributeTLSServerIssuer = "tls.server.issuer" + // A hash that identifies servers based on how they perform an SSL/TLS handshake. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + AttributeTLSServerJa3s = "tls.server.ja3s" + // Date/Time indicating when server certificate is no longer considered valid. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + AttributeTLSServerNotAfter = "tls.server.not_after" + // Date/Time indicating when server certificate is first considered valid. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + AttributeTLSServerNotBefore = "tls.server.not_before" + // Distinguished name of subject of the x.509 certificate presented by the server. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com' + AttributeTLSServerSubject = "tls.server.subject" +) + +const ( + // ssl + AttributeTLSProtocolNameSsl = "ssl" + // tls + AttributeTLSProtocolNameTLS = "tls" +) + +// Attributes describing URL. +const ( + // Domain extracted from the url.full, such as "opentelemetry.io". + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'www.foo.bar', 'opentelemetry.io', '3.12.167.2', + // '[1080:0:0:0:8:800:200C:417A]' + // Note: In some cases a URL may refer to an IP and/or port directly, without a + // domain name. In this case, the IP address would go to the domain field. If the + // URL contains a literal IPv6 address enclosed by [ and ], the [ and ] characters + // should also be captured in the domain field. + AttributeURLDomain = "url.domain" + // The file extension extracted from the url.full, excluding the leading dot. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'png', 'gz' + // Note: The file extension is only set if it exists, as not every url has a file + // extension. When the file name has multiple extensions example.tar.gz, only the + // last one should be captured gz, not tar.gz. + AttributeURLExtension = "url.extension" + // The URI fragment component + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'SemConv' + AttributeURLFragment = "url.fragment" + // Absolute URL describing a network resource according to RFC3986 + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', '//localhost' + // Note: For network calls, URL usually has + // scheme://host[:port][path][?query][#fragment] format, where the fragment is not + // transmitted over HTTP, but if it is known, it SHOULD be included nevertheless. + // url.full MUST NOT contain credentials passed via URL in form of + // https://username:password@www.example.com/. In such case username and password + // SHOULD be redacted and attribute's value SHOULD be + // https://REDACTED:REDACTED@www.example.com/. + // url.full SHOULD capture the absolute URL when it is available (or can be + // reconstructed). Sensitive content provided in url.full SHOULD be scrubbed when + // instrumentations can identify it. + AttributeURLFull = "url.full" + // Unmodified original URL as seen in the event source. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', + // 'search?q=OpenTelemetry' + // Note: In network monitoring, the observed URL may be a full URL, whereas in + // access logs, the URL is often just represented as a path. This field is meant + // to represent the URL as it was observed, complete or not. + // url.original might contain credentials passed via URL in form of + // https://username:password@www.example.com/. In such case password and username + // SHOULD NOT be redacted and attribute's value SHOULD remain the same. + AttributeURLOriginal = "url.original" + // The URI path component + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '/search' + // Note: Sensitive content provided in url.path SHOULD be scrubbed when + // instrumentations can identify it. + AttributeURLPath = "url.path" + // Port extracted from the url.full + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 443 + AttributeURLPort = "url.port" + // The URI query component + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'q=OpenTelemetry' + // Note: Sensitive content provided in url.query SHOULD be scrubbed when + // instrumentations can identify it. + AttributeURLQuery = "url.query" + // The highest registered url domain, stripped of the subdomain. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'example.com', 'foo.co.uk' + // Note: This value can be determined precisely with the public suffix list. For + // example, the registered domain for foo.example.com is example.com. Trying to + // approximate this by simply taking the last two labels will not work well for + // TLDs such as co.uk. + AttributeURLRegisteredDomain = "url.registered_domain" + // The URI scheme component identifying the used protocol. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'https', 'ftp', 'telnet' + AttributeURLScheme = "url.scheme" + // The subdomain portion of a fully qualified domain name includes all of the + // names except the host name under the registered_domain. In a partially + // qualified domain, or if the qualification level of the full name cannot be + // determined, subdomain contains all of the names below the registered domain. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'east', 'sub2.sub1' + // Note: The subdomain portion of www.east.mydomain.co.uk is east. If the domain + // has multiple levels of subdomain, such as sub2.sub1.example.com, the subdomain + // field should contain sub2.sub1, with no trailing period. + AttributeURLSubdomain = "url.subdomain" + // The low-cardinality template of an absolute path reference. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/users/{id}', '/users/:id', '/users?id={id}' + AttributeURLTemplate = "url.template" + // The effective top level domain (eTLD), also known as the domain suffix, is the + // last part of the domain name. For example, the top level domain for example.com + // is com. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'com', 'co.uk' + // Note: This value can be determined precisely with the public suffix list. + AttributeURLTopLevelDomain = "url.top_level_domain" +) + +// Describes user-agent attributes. +const ( + // Name of the user-agent extracted from original. Usually refers to the browser's + // name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Safari', 'YourApp' + // Note: Example of extracting browser's name from original string. In the case of + // using a user-agent for non-browser products, such as microservices with + // multiple names/versions inside the user_agent.original, the most significant + // name SHOULD be selected. In such a scenario it should align with + // user_agent.version + AttributeUserAgentName = "user_agent.name" + // Value of the HTTP User-Agent header sent by the client. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU iPhone + // OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) + // Version/14.1.2 Mobile/15E148 Safari/604.1', 'YourApp/1.0.0 grpc-java- + // okhttp/1.27.2' + AttributeUserAgentOriginal = "user_agent.original" + // Version of the user-agent extracted from original. Usually refers to the + // browser's version + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '14.1.2', '1.0.0' + // Note: Example of extracting browser's version from original string. In the case + // of using a user-agent for non-browser products, such as microservices with + // multiple names/versions inside the user_agent.original, the most significant + // version SHOULD be selected. In such a scenario it should align with + // user_agent.name + AttributeUserAgentVersion = "user_agent.version" +) + +// Describes information about the user. +const ( + // User email address. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'a.einstein@example.com' + AttributeUserEmail = "user.email" + // User's full name + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Albert Einstein' + AttributeUserFullName = "user.full_name" + // Unique user hash to correlate information for a user in anonymized form. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '364fc68eaf4c8acec74a4e52d7d1feaa' + // Note: Useful if user.id or user.name contain confidential information and + // cannot be used. + AttributeUserHash = "user.hash" + // Unique identifier of the user. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'S-1-5-21-202424912787-2692429404-2351956786-1000' + AttributeUserID = "user.id" + // Short name or login/username of the user. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'a.einstein' + AttributeUserName = "user.name" + // Array of user roles at the time of the event. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'admin', 'reporting_user' + AttributeUserRoles = "user.roles" +) + +// Describes V8 JS Engine Runtime related attributes. +const ( + // The type of garbage collection. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeV8jsGcType = "v8js.gc.type" + // The name of the space type of heap memory. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: Value can be retrieved from value space_name of + // v8.getHeapSpaceStatistics() + AttributeV8jsHeapSpaceName = "v8js.heap.space.name" +) + +const ( + // Major (Mark Sweep Compact) + AttributeV8jsGcTypeMajor = "major" + // Minor (Scavenge) + AttributeV8jsGcTypeMinor = "minor" + // Incremental (Incremental Marking) + AttributeV8jsGcTypeIncremental = "incremental" + // Weak Callbacks (Process Weak Callbacks) + AttributeV8jsGcTypeWeakcb = "weakcb" +) + +const ( + // New memory space + AttributeV8jsHeapSpaceNameNewSpace = "new_space" + // Old memory space + AttributeV8jsHeapSpaceNameOldSpace = "old_space" + // Code memory space + AttributeV8jsHeapSpaceNameCodeSpace = "code_space" + // Map memory space + AttributeV8jsHeapSpaceNameMapSpace = "map_space" + // Large object memory space + AttributeV8jsHeapSpaceNameLargeObjectSpace = "large_object_space" +) + +// This group defines the attributes for [Version Control Systems +// (VCS)](https://en.wikipedia.org/wiki/Version_control). +const ( + // The ID of the change (pull request/merge request) if applicable. This is + // usually a unique (within repository) identifier generated by the VCS system. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '123' + AttributeVcsRepositoryChangeID = "vcs.repository.change.id" + // The human readable title of the change (pull request/merge request). This title + // is often a brief summary of the change and may get merged in to a ref as the + // commit summary. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Fixes broken thing', 'feat: add my new feature', '[chore] update + // dependency' + AttributeVcsRepositoryChangeTitle = "vcs.repository.change.title" + // The name of the reference such as branch or tag in the repository. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'my-feature-branch', 'tag-1-test' + AttributeVcsRepositoryRefName = "vcs.repository.ref.name" + // The revision, literally revised version, The revision most often refers to a + // commit object in Git, or a revision number in SVN. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '9d59409acf479dfa0df1aa568182e43e43df8bbe28d60fcf2bc52e30068802cc', + // 'main', '123', 'HEAD' + // Note: The revision can be a full hash value (see glossary), + // of the recorded change to a ref within a repository pointing to a + // commit commit object. It does + // not necessarily have to be a hash; it can simply define a + // revision number + // which is an integer that is monotonically increasing. In cases where + // it is identical to the ref.name, it SHOULD still be included. It is + // up to the implementer to decide which value to set as the revision + // based on the VCS system and situational context. + AttributeVcsRepositoryRefRevision = "vcs.repository.ref.revision" + // The type of the reference in the repository. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'branch', 'tag' + AttributeVcsRepositoryRefType = "vcs.repository.ref.type" + // The URL of the repository providing the complete address in order to locate and + // identify the repository. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'https://github.com/opentelemetry/open-telemetry-collector-contrib', + // 'https://gitlab.com/my-org/my-project/my-projects-project/repo' + AttributeVcsRepositoryURLFull = "vcs.repository.url.full" +) + +const ( + // [branch](https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch) + AttributeVcsRepositoryRefTypeBranch = "branch" + // [tag](https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag) + AttributeVcsRepositoryRefTypeTag = "tag" +) + +// The attributes used to describe the packaged software running the +// application code. +const ( + // Additional description of the web engine (e.g. detailed version and edition + // information). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final' + AttributeWebEngineDescription = "webengine.description" + // The name of the web engine. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'WildFly' + AttributeWebEngineName = "webengine.name" + // The version of the web engine. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '21.0.0' + AttributeWebEngineVersion = "webengine.version" +) + +func GetAttribute_groupSemanticConventionAttributeNames() []string { + return []string{ + AttributeAndroidOSAPILevel, + AttributeArtifactAttestationFilename, + AttributeArtifactAttestationHash, + AttributeArtifactAttestationID, + AttributeArtifactFilename, + AttributeArtifactHash, + AttributeArtifactPurl, + AttributeArtifactVersion, + AttributeAspnetcoreRateLimitingResult, + AttributeAspnetcoreDiagnosticsHandlerType, + AttributeAspnetcoreDiagnosticsExceptionResult, + AttributeAspnetcoreRateLimitingPolicy, + AttributeAspnetcoreRequestIsUnhandled, + AttributeAspnetcoreRoutingIsFallback, + AttributeAspnetcoreRoutingMatchStatus, + AttributeAWSRequestID, + AttributeAWSDynamoDBAttributeDefinitions, + AttributeAWSDynamoDBAttributesToGet, + AttributeAWSDynamoDBConsistentRead, + AttributeAWSDynamoDBConsumedCapacity, + AttributeAWSDynamoDBCount, + AttributeAWSDynamoDBExclusiveStartTable, + AttributeAWSDynamoDBGlobalSecondaryIndexUpdates, + AttributeAWSDynamoDBGlobalSecondaryIndexes, + AttributeAWSDynamoDBIndexName, + AttributeAWSDynamoDBItemCollectionMetrics, + AttributeAWSDynamoDBLimit, + AttributeAWSDynamoDBLocalSecondaryIndexes, + AttributeAWSDynamoDBProjection, + AttributeAWSDynamoDBProvisionedReadCapacity, + AttributeAWSDynamoDBProvisionedWriteCapacity, + AttributeAWSDynamoDBScanForward, + AttributeAWSDynamoDBScannedCount, + AttributeAWSDynamoDBSegment, + AttributeAWSDynamoDBSelect, + AttributeAWSDynamoDBTableCount, + AttributeAWSDynamoDBTableNames, + AttributeAWSDynamoDBTotalSegments, + AttributeAWSECSTaskID, + AttributeAWSECSClusterARN, + AttributeAWSECSContainerARN, + AttributeAWSECSLaunchtype, + AttributeAWSECSTaskARN, + AttributeAWSECSTaskFamily, + AttributeAWSECSTaskRevision, + AttributeAWSEKSClusterARN, + AttributeAWSLogGroupARNs, + AttributeAWSLogGroupNames, + AttributeAWSLogStreamARNs, + AttributeAWSLogStreamNames, + AttributeAWSLambdaInvokedARN, + AttributeAWSS3Bucket, + AttributeAWSS3CopySource, + AttributeAWSS3Delete, + AttributeAWSS3Key, + AttributeAWSS3PartNumber, + AttributeAWSS3UploadID, + AttributeAzServiceRequestID, + AttributeBrowserBrands, + AttributeBrowserLanguage, + AttributeBrowserMobile, + AttributeBrowserPlatform, + AttributeCicdPipelineName, + AttributeCicdPipelineRunID, + AttributeCicdPipelineTaskName, + AttributeCicdPipelineTaskRunID, + AttributeCicdPipelineTaskRunURLFull, + AttributeCicdPipelineTaskType, + AttributeClientAddress, + AttributeClientPort, + AttributeCloudAccountID, + AttributeCloudAvailabilityZone, + AttributeCloudPlatform, + AttributeCloudProvider, + AttributeCloudRegion, + AttributeCloudResourceID, + AttributeCloudeventsEventID, + AttributeCloudeventsEventSource, + AttributeCloudeventsEventSpecVersion, + AttributeCloudeventsEventSubject, + AttributeCloudeventsEventType, + AttributeCodeColumn, + AttributeCodeFilepath, + AttributeCodeFunction, + AttributeCodeLineNumber, + AttributeCodeNamespace, + AttributeCodeStacktrace, + AttributeContainerCommand, + AttributeContainerCommandArgs, + AttributeContainerCommandLine, + AttributeContainerID, + AttributeContainerImageID, + AttributeContainerImageName, + AttributeContainerImageRepoDigests, + AttributeContainerImageTags, + AttributeContainerName, + AttributeContainerRuntime, + AttributeCPUMode, + AttributeDBClientConnectionPoolName, + AttributeDBClientConnectionState, + AttributeDBCollectionName, + AttributeDBNamespace, + AttributeDBOperationBatchSize, + AttributeDBOperationName, + AttributeDBQueryText, + AttributeDBSystem, + AttributeDBCassandraConsistencyLevel, + AttributeDBCassandraCoordinatorDC, + AttributeDBCassandraCoordinatorID, + AttributeDBCassandraIdempotence, + AttributeDBCassandraPageSize, + AttributeDBCassandraSpeculativeExecutionCount, + AttributeDBCosmosDBClientID, + AttributeDBCosmosDBConnectionMode, + AttributeDBCosmosDBOperationType, + AttributeDBCosmosDBRequestCharge, + AttributeDBCosmosDBRequestContentLength, + AttributeDBCosmosDBStatusCode, + AttributeDBCosmosDBSubStatusCode, + AttributeDBElasticsearchNodeName, + AttributeDeploymentEnvironmentName, + AttributeDeploymentID, + AttributeDeploymentName, + AttributeDeploymentStatus, + AttributeAndroidState, + AttributeDestinationAddress, + AttributeDestinationPort, + AttributeDeviceID, + AttributeDeviceManufacturer, + AttributeDeviceModelIdentifier, + AttributeDeviceModelName, + AttributeDiskIoDirection, + AttributeDNSQuestionName, + AttributeErrorType, + AttributeEventName, + AttributeExceptionEscaped, + AttributeExceptionMessage, + AttributeExceptionStacktrace, + AttributeExceptionType, + AttributeFaaSColdstart, + AttributeFaaSCron, + AttributeFaaSDocumentCollection, + AttributeFaaSDocumentName, + AttributeFaaSDocumentOperation, + AttributeFaaSDocumentTime, + AttributeFaaSInstance, + AttributeFaaSInvocationID, + AttributeFaaSInvokedName, + AttributeFaaSInvokedProvider, + AttributeFaaSInvokedRegion, + AttributeFaaSMaxMemory, + AttributeFaaSName, + AttributeFaaSTime, + AttributeFaaSTrigger, + AttributeFaaSVersion, + AttributeFeatureFlagKey, + AttributeFeatureFlagProviderName, + AttributeFeatureFlagVariant, + AttributeFileDirectory, + AttributeFileExtension, + AttributeFileName, + AttributeFilePath, + AttributeFileSize, + AttributeGCPClientService, + AttributeGCPCloudRunJobExecution, + AttributeGCPCloudRunJobTaskIndex, + AttributeGCPGceInstanceHostname, + AttributeGCPGceInstanceName, + AttributeGenAiCompletion, + AttributeGenAiOperationName, + AttributeGenAiPrompt, + AttributeGenAiRequestFrequencyPenalty, + AttributeGenAiRequestMaxTokens, + AttributeGenAiRequestModel, + AttributeGenAiRequestPresencePenalty, + AttributeGenAiRequestStopSequences, + AttributeGenAiRequestTemperature, + AttributeGenAiRequestTopK, + AttributeGenAiRequestTopP, + AttributeGenAiResponseFinishReasons, + AttributeGenAiResponseID, + AttributeGenAiResponseModel, + AttributeGenAiSystem, + AttributeGenAiTokenType, + AttributeGenAiUsageInputTokens, + AttributeGenAiUsageOutputTokens, + AttributeGoMemoryType, + AttributeGraphqlDocument, + AttributeGraphqlOperationName, + AttributeGraphqlOperationType, + AttributeHerokuAppID, + AttributeHerokuReleaseCommit, + AttributeHerokuReleaseCreationTimestamp, + AttributeHostArch, + AttributeHostCPUCacheL2Size, + AttributeHostCPUFamily, + AttributeHostCPUModelID, + AttributeHostCPUModelName, + AttributeHostCPUStepping, + AttributeHostCPUVendorID, + AttributeHostID, + AttributeHostImageID, + AttributeHostImageName, + AttributeHostImageVersion, + AttributeHostIP, + AttributeHostMac, + AttributeHostName, + AttributeHostType, + AttributeHTTPConnectionState, + AttributeHTTPRequestBodySize, + AttributeHTTPRequestMethod, + AttributeHTTPRequestMethodOriginal, + AttributeHTTPRequestResendCount, + AttributeHTTPRequestSize, + AttributeHTTPResponseBodySize, + AttributeHTTPResponseSize, + AttributeHTTPResponseStatusCode, + AttributeHTTPRoute, + AttributeJvmBufferPoolName, + AttributeJvmGcAction, + AttributeJvmGcName, + AttributeJvmMemoryPoolName, + AttributeJvmMemoryType, + AttributeJvmThreadDaemon, + AttributeJvmThreadState, + AttributeK8SClusterName, + AttributeK8SClusterUID, + AttributeK8SContainerName, + AttributeK8SContainerRestartCount, + AttributeK8SContainerStatusLastTerminatedReason, + AttributeK8SCronJobName, + AttributeK8SCronJobUID, + AttributeK8SDaemonSetName, + AttributeK8SDaemonSetUID, + AttributeK8SDeploymentName, + AttributeK8SDeploymentUID, + AttributeK8SJobName, + AttributeK8SJobUID, + AttributeK8SNamespaceName, + AttributeK8SNodeName, + AttributeK8SNodeUID, + AttributeK8SPodName, + AttributeK8SPodUID, + AttributeK8SReplicaSetName, + AttributeK8SReplicaSetUID, + AttributeK8SStatefulSetName, + AttributeK8SStatefulSetUID, + AttributeLinuxMemorySlabState, + AttributeLogIostream, + AttributeLogFileName, + AttributeLogFileNameResolved, + AttributeLogFilePath, + AttributeLogFilePathResolved, + AttributeLogRecordOriginal, + AttributeLogRecordUID, + AttributeMessagingBatchMessageCount, + AttributeMessagingClientID, + AttributeMessagingConsumerGroupName, + AttributeMessagingDestinationAnonymous, + AttributeMessagingDestinationName, + AttributeMessagingDestinationPartitionID, + AttributeMessagingDestinationSubscriptionName, + AttributeMessagingDestinationTemplate, + AttributeMessagingDestinationTemporary, + AttributeMessagingMessageBodySize, + AttributeMessagingMessageConversationID, + AttributeMessagingMessageEnvelopeSize, + AttributeMessagingMessageID, + AttributeMessagingOperationName, + AttributeMessagingOperationType, + AttributeMessagingSystem, + AttributeMessagingKafkaMessageKey, + AttributeMessagingKafkaMessageTombstone, + AttributeMessagingKafkaOffset, + AttributeMessagingRabbitmqDestinationRoutingKey, + AttributeMessagingRabbitmqMessageDeliveryTag, + AttributeMessagingRocketmqConsumptionModel, + AttributeMessagingRocketmqMessageDelayTimeLevel, + AttributeMessagingRocketmqMessageDeliveryTimestamp, + AttributeMessagingRocketmqMessageGroup, + AttributeMessagingRocketmqMessageKeys, + AttributeMessagingRocketmqMessageTag, + AttributeMessagingRocketmqMessageType, + AttributeMessagingRocketmqNamespace, + AttributeMessagingGCPPubsubMessageAckDeadline, + AttributeMessagingGCPPubsubMessageAckID, + AttributeMessagingGCPPubsubMessageDeliveryAttempt, + AttributeMessagingGCPPubsubMessageOrderingKey, + AttributeMessagingServicebusDispositionStatus, + AttributeMessagingServicebusMessageDeliveryCount, + AttributeMessagingServicebusMessageEnqueuedTime, + AttributeMessagingEventhubsMessageEnqueuedTime, + AttributeNetworkCarrierIcc, + AttributeNetworkCarrierMcc, + AttributeNetworkCarrierMnc, + AttributeNetworkCarrierName, + AttributeNetworkConnectionSubtype, + AttributeNetworkConnectionType, + AttributeNetworkIoDirection, + AttributeNetworkLocalAddress, + AttributeNetworkLocalPort, + AttributeNetworkPeerAddress, + AttributeNetworkPeerPort, + AttributeNetworkProtocolName, + AttributeNetworkProtocolVersion, + AttributeNetworkTransport, + AttributeNetworkType, + AttributeOciManifestDigest, + AttributeOpentracingRefType, + AttributeOSBuildID, + AttributeOSDescription, + AttributeOSName, + AttributeOSType, + AttributeOSVersion, + AttributeOTelStatusCode, + AttributeOTelStatusDescription, + AttributeOTelScopeName, + AttributeOTelScopeVersion, + AttributePeerService, + AttributeProcessCommand, + AttributeProcessCommandArgs, + AttributeProcessCommandLine, + AttributeProcessContextSwitchType, + AttributeProcessCreationTime, + AttributeProcessExecutableName, + AttributeProcessExecutablePath, + AttributeProcessExitCode, + AttributeProcessExitTime, + AttributeProcessGroupLeaderPID, + AttributeProcessInteractive, + AttributeProcessOwner, + AttributeProcessPagingFaultType, + AttributeProcessParentPID, + AttributeProcessPID, + AttributeProcessRealUserID, + AttributeProcessRealUserName, + AttributeProcessRuntimeDescription, + AttributeProcessRuntimeName, + AttributeProcessRuntimeVersion, + AttributeProcessSavedUserID, + AttributeProcessSavedUserName, + AttributeProcessSessionLeaderPID, + AttributeProcessUserID, + AttributeProcessUserName, + AttributeProcessVpid, + AttributeRPCConnectRPCErrorCode, + AttributeRPCGRPCStatusCode, + AttributeRPCJsonrpcErrorCode, + AttributeRPCJsonrpcErrorMessage, + AttributeRPCJsonrpcRequestID, + AttributeRPCJsonrpcVersion, + AttributeRPCMessageCompressedSize, + AttributeRPCMessageID, + AttributeRPCMessageType, + AttributeRPCMessageUncompressedSize, + AttributeRPCMethod, + AttributeRPCService, + AttributeRPCSystem, + AttributeServerAddress, + AttributeServerPort, + AttributeServiceInstanceID, + AttributeServiceName, + AttributeServiceNamespace, + AttributeServiceVersion, + AttributeSessionID, + AttributeSessionPreviousID, + AttributeSignalrConnectionStatus, + AttributeSignalrTransport, + AttributeSourceAddress, + AttributeSourcePort, + AttributeSystemDevice, + AttributeSystemCPULogicalNumber, + AttributeSystemMemoryState, + AttributeSystemPagingDirection, + AttributeSystemPagingState, + AttributeSystemPagingType, + AttributeSystemFilesystemMode, + AttributeSystemFilesystemMountpoint, + AttributeSystemFilesystemState, + AttributeSystemFilesystemType, + AttributeSystemNetworkState, + AttributeSystemProcessStatus, + AttributeTelemetrySDKLanguage, + AttributeTelemetrySDKName, + AttributeTelemetrySDKVersion, + AttributeTelemetryDistroName, + AttributeTelemetryDistroVersion, + AttributeTestCaseName, + AttributeTestCaseResultStatus, + AttributeTestSuiteName, + AttributeTestSuiteRunStatus, + AttributeThreadID, + AttributeThreadName, + AttributeTLSCipher, + AttributeTLSClientCertificate, + AttributeTLSClientCertificateChain, + AttributeTLSClientHashMd5, + AttributeTLSClientHashSha1, + AttributeTLSClientHashSha256, + AttributeTLSClientIssuer, + AttributeTLSClientJa3, + AttributeTLSClientNotAfter, + AttributeTLSClientNotBefore, + AttributeTLSClientSubject, + AttributeTLSClientSupportedCiphers, + AttributeTLSCurve, + AttributeTLSEstablished, + AttributeTLSNextProtocol, + AttributeTLSProtocolName, + AttributeTLSProtocolVersion, + AttributeTLSResumed, + AttributeTLSServerCertificate, + AttributeTLSServerCertificateChain, + AttributeTLSServerHashMd5, + AttributeTLSServerHashSha1, + AttributeTLSServerHashSha256, + AttributeTLSServerIssuer, + AttributeTLSServerJa3s, + AttributeTLSServerNotAfter, + AttributeTLSServerNotBefore, + AttributeTLSServerSubject, + AttributeURLDomain, + AttributeURLExtension, + AttributeURLFragment, + AttributeURLFull, + AttributeURLOriginal, + AttributeURLPath, + AttributeURLPort, + AttributeURLQuery, + AttributeURLRegisteredDomain, + AttributeURLScheme, + AttributeURLSubdomain, + AttributeURLTemplate, + AttributeURLTopLevelDomain, + AttributeUserAgentName, + AttributeUserAgentOriginal, + AttributeUserAgentVersion, + AttributeUserEmail, + AttributeUserFullName, + AttributeUserHash, + AttributeUserID, + AttributeUserName, + AttributeUserRoles, + AttributeV8jsGcType, + AttributeV8jsHeapSpaceName, + AttributeVcsRepositoryChangeID, + AttributeVcsRepositoryChangeTitle, + AttributeVcsRepositoryRefName, + AttributeVcsRepositoryRefRevision, + AttributeVcsRepositoryRefType, + AttributeVcsRepositoryURLFull, + AttributeWebEngineDescription, + AttributeWebEngineName, + AttributeWebEngineVersion, + } +} diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.27.0/generated_event.go b/vendor/go.opentelemetry.io/collector/semconv/v1.27.0/generated_event.go new file mode 100644 index 00000000000..ac6893c3b26 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.27.0/generated_event.go @@ -0,0 +1,10 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv + +func GetEventSemanticConventionAttributeNames() []string { + return []string{} +} diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.27.0/generated_resource.go b/vendor/go.opentelemetry.io/collector/semconv/v1.27.0/generated_resource.go new file mode 100644 index 00000000000..bb89e4806f5 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.27.0/generated_resource.go @@ -0,0 +1,10 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv + +func GetResourceSemanticConventionAttributeNames() []string { + return []string{} +} diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.27.0/generated_trace.go b/vendor/go.opentelemetry.io/collector/semconv/v1.27.0/generated_trace.go new file mode 100644 index 00000000000..380529563a2 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.27.0/generated_trace.go @@ -0,0 +1,10 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv + +func GetTraceSemanticConventionAttributeNames() []string { + return []string{} +} diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.27.0/schema.go b/vendor/go.opentelemetry.io/collector/semconv/v1.27.0/schema.go new file mode 100644 index 00000000000..be4e3241f25 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.27.0/schema.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/collector/semconv/v1.27.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.27.0" diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.6.1/generated_resource.go b/vendor/go.opentelemetry.io/collector/semconv/v1.6.1/generated_resource.go index 1a9bc437647..801970ec372 100644 --- a/vendor/go.opentelemetry.io/collector/semconv/v1.6.1/generated_resource.go +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.6.1/generated_resource.go @@ -316,7 +316,7 @@ const ( // // Take care not to use the "invoked ARN" directly but replace any // alias suffix with the resolved function version, as the same runtime instance - // may be invokable with multiple + // may be invocable with multiple // different aliases.
      //
    • GCP: The URI of the resource
    • //
    • Azure: The Fully Qualified Resource ID.
    • diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.9.0/generated_resource.go b/vendor/go.opentelemetry.io/collector/semconv/v1.9.0/generated_resource.go index d740be56d50..896db946a37 100644 --- a/vendor/go.opentelemetry.io/collector/semconv/v1.9.0/generated_resource.go +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.9.0/generated_resource.go @@ -336,7 +336,7 @@ const ( //
    // Take care not to use the "invoked ARN" directly but replace any // alias suffix with the resolved function version, as the same runtime instance - // may be invokable with multiple + // may be invocable with multiple // different aliases.
      //
    • GCP: The URI of the resource
    • //
    • Azure: The Fully Qualified Resource ID.
    • diff --git a/vendor/go.opentelemetry.io/collector/service/attributes.go b/vendor/go.opentelemetry.io/collector/service/attributes.go new file mode 100644 index 00000000000..f2587b33130 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/attributes.go @@ -0,0 +1,27 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package service // import "go.opentelemetry.io/collector/service" + +import ( + sdkresource "go.opentelemetry.io/otel/sdk/resource" + + "go.opentelemetry.io/collector/service/telemetry" +) + +func attributes(res *sdkresource.Resource, cfg telemetry.Config) map[string]any { + attrs := map[string]any{} + for _, r := range res.Attributes() { + attrs[string(r.Key)] = r.Value.AsString() + } + + for k, v := range cfg.Resource { + if v != nil { + attrs[k] = *v + } else { + // the new value is nil, delete the existing key + delete(attrs, k) + } + } + return attrs +} diff --git a/vendor/go.opentelemetry.io/collector/service/documentation.md b/vendor/go.opentelemetry.io/collector/service/documentation.md new file mode 100644 index 00000000000..346def750d2 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/documentation.md @@ -0,0 +1,55 @@ +[comment]: <> (Code generated by mdatagen. DO NOT EDIT.) + +# service + +## Internal Telemetry + +The following telemetry is emitted by this component. + +### otelcol_process_cpu_seconds + +Total CPU user and system time in seconds [alpha] + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| s | Sum | Double | true | + +### otelcol_process_memory_rss + +Total physical memory (resident set size) [alpha] + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +### otelcol_process_runtime_heap_alloc_bytes + +Bytes of allocated heap objects (see 'go doc runtime.MemStats.HeapAlloc') [alpha] + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +### otelcol_process_runtime_total_alloc_bytes + +Cumulative bytes allocated for heap objects (see 'go doc runtime.MemStats.TotalAlloc') [alpha] + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| By | Sum | Int | true | + +### otelcol_process_runtime_total_sys_memory_bytes + +Total bytes of memory obtained from the OS (see 'go doc runtime.MemStats.Sys') [alpha] + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +### otelcol_process_uptime + +Uptime of the process [alpha] + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| s | Sum | Double | true | diff --git a/vendor/go.opentelemetry.io/collector/service/extensions/extensions.go b/vendor/go.opentelemetry.io/collector/service/extensions/extensions.go index e05c5aa01f0..fcefcb33f20 100644 --- a/vendor/go.opentelemetry.io/collector/service/extensions/extensions.go +++ b/vendor/go.opentelemetry.io/collector/service/extensions/extensions.go @@ -10,12 +10,16 @@ import ( "sort" "go.uber.org/multierr" + "go.uber.org/zap" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componentstatus" "go.opentelemetry.io/collector/confmap" "go.opentelemetry.io/collector/extension" + "go.opentelemetry.io/collector/extension/extensioncapabilities" + "go.opentelemetry.io/collector/service/internal/builders" "go.opentelemetry.io/collector/service/internal/components" - "go.opentelemetry.io/collector/service/internal/servicetelemetry" + "go.opentelemetry.io/collector/service/internal/status" "go.opentelemetry.io/collector/service/internal/zpages" ) @@ -23,10 +27,11 @@ const zExtensionName = "zextensionname" // Extensions is a map of extensions created from extension configs. type Extensions struct { - telemetry servicetelemetry.TelemetrySettings + telemetry component.TelemetrySettings extMap map[component.ID]extension.Extension - instanceIDs map[component.ID]*component.InstanceID + instanceIDs map[component.ID]*componentstatus.InstanceID extensionIDs []component.ID // start order (and reverse stop order) + reporter status.Reporter } // Start starts all extensions. @@ -37,18 +42,20 @@ func (bes *Extensions) Start(ctx context.Context, host component.Host) error { extLogger.Info("Extension is starting...") instanceID := bes.instanceIDs[extID] ext := bes.extMap[extID] - bes.telemetry.Status.ReportStatus( + bes.reporter.ReportStatus( instanceID, - component.NewStatusEvent(component.StatusStarting), + componentstatus.NewEvent(componentstatus.StatusStarting), ) if err := ext.Start(ctx, host); err != nil { - bes.telemetry.Status.ReportStatus( + bes.reporter.ReportStatus( instanceID, - component.NewPermanentErrorEvent(err), + componentstatus.NewPermanentErrorEvent(err), ) + // We log with zap.AddStacktrace(zap.DPanicLevel) to avoid adding the stack trace to the error log + extLogger.WithOptions(zap.AddStacktrace(zap.DPanicLevel)).Error("Failed to start extension", zap.Error(err)) return err } - bes.telemetry.Status.ReportOKIfStarting(instanceID) + bes.reporter.ReportOKIfStarting(instanceID) extLogger.Info("Extension started.") } return nil @@ -62,21 +69,21 @@ func (bes *Extensions) Shutdown(ctx context.Context) error { extID := bes.extensionIDs[i] instanceID := bes.instanceIDs[extID] ext := bes.extMap[extID] - bes.telemetry.Status.ReportStatus( + bes.reporter.ReportStatus( instanceID, - component.NewStatusEvent(component.StatusStopping), + componentstatus.NewEvent(componentstatus.StatusStopping), ) if err := ext.Shutdown(ctx); err != nil { - bes.telemetry.Status.ReportStatus( + bes.reporter.ReportStatus( instanceID, - component.NewPermanentErrorEvent(err), + componentstatus.NewPermanentErrorEvent(err), ) errs = multierr.Append(errs, err) continue } - bes.telemetry.Status.ReportStatus( + bes.reporter.ReportStatus( instanceID, - component.NewStatusEvent(component.StatusStopped), + componentstatus.NewEvent(componentstatus.StatusStopped), ) } @@ -86,7 +93,7 @@ func (bes *Extensions) Shutdown(ctx context.Context) error { func (bes *Extensions) NotifyPipelineReady() error { for _, extID := range bes.extensionIDs { ext := bes.extMap[extID] - if pw, ok := ext.(extension.PipelineWatcher); ok { + if pw, ok := ext.(extensioncapabilities.PipelineWatcher); ok { if err := pw.Ready(); err != nil { return fmt.Errorf("failed to notify extension %q: %w", extID, err) } @@ -99,7 +106,7 @@ func (bes *Extensions) NotifyPipelineNotReady() error { var errs error for _, extID := range bes.extensionIDs { ext := bes.extMap[extID] - if pw, ok := ext.(extension.PipelineWatcher); ok { + if pw, ok := ext.(extensioncapabilities.PipelineWatcher); ok { errs = multierr.Append(errs, pw.NotReady()) } } @@ -110,7 +117,7 @@ func (bes *Extensions) NotifyConfig(ctx context.Context, conf *confmap.Conf) err var errs error for _, extID := range bes.extensionIDs { ext := bes.extMap[extID] - if cw, ok := ext.(extension.ConfigWatcher); ok { + if cw, ok := ext.(extensioncapabilities.ConfigWatcher); ok { clonedConf := confmap.NewFromStringMap(conf.ToStringMap()) errs = multierr.Append(errs, cw.NotifyConfig(ctx, clonedConf)) } @@ -118,10 +125,10 @@ func (bes *Extensions) NotifyConfig(ctx context.Context, conf *confmap.Conf) err return errs } -func (bes *Extensions) NotifyComponentStatusChange(source *component.InstanceID, event *component.StatusEvent) { +func (bes *Extensions) NotifyComponentStatusChange(source *componentstatus.InstanceID, event *componentstatus.Event) { for _, extID := range bes.extensionIDs { ext := bes.extMap[extID] - if sw, ok := ext.(extension.StatusWatcher); ok { + if sw, ok := ext.(componentstatus.Watcher); ok { sw.ComponentStatusChanged(source, event) } } @@ -163,30 +170,51 @@ func (bes *Extensions) HandleZPages(w http.ResponseWriter, r *http.Request) { // Settings holds configuration for building Extensions. type Settings struct { - Telemetry servicetelemetry.TelemetrySettings - BuildInfo component.BuildInfo + Telemetry component.TelemetrySettings + BuildInfo component.BuildInfo + ModuleInfo extension.ModuleInfo // Extensions builder for extensions. - Extensions *extension.Builder + Extensions builders.Extension +} + +type Option interface { + apply(*Extensions) +} + +type optionFunc func(*Extensions) + +func (of optionFunc) apply(e *Extensions) { + of(e) +} + +func WithReporter(reporter status.Reporter) Option { + return optionFunc(func(e *Extensions) { + e.reporter = reporter + }) } // New creates a new Extensions from Config. -func New(ctx context.Context, set Settings, cfg Config) (*Extensions, error) { +func New(ctx context.Context, set Settings, cfg Config, options ...Option) (*Extensions, error) { exts := &Extensions{ telemetry: set.Telemetry, extMap: make(map[component.ID]extension.Extension), - instanceIDs: make(map[component.ID]*component.InstanceID), + instanceIDs: make(map[component.ID]*componentstatus.InstanceID), extensionIDs: make([]component.ID, 0, len(cfg)), + reporter: &nopReporter{}, } + + for _, opt := range options { + opt.apply(exts) + } + for _, extID := range cfg { - instanceID := &component.InstanceID{ - ID: extID, - Kind: component.KindExtension, - } - extSet := extension.CreateSettings{ + instanceID := componentstatus.NewInstanceID(extID, component.KindExtension) + extSet := extension.Settings{ ID: extID, - TelemetrySettings: set.Telemetry.ToComponentTelemetrySettings(instanceID), + TelemetrySettings: set.Telemetry, BuildInfo: set.BuildInfo, + ModuleInfo: set.ModuleInfo, } extSet.TelemetrySettings.Logger = components.ExtensionLogger(set.Telemetry.Logger, extID) @@ -210,3 +238,11 @@ func New(ctx context.Context, set Settings, cfg Config) (*Extensions, error) { exts.extensionIDs = order return exts, nil } + +type nopReporter struct{} + +func (r *nopReporter) Ready() {} + +func (r *nopReporter) ReportStatus(*componentstatus.InstanceID, *componentstatus.Event) {} + +func (r *nopReporter) ReportOKIfStarting(*componentstatus.InstanceID) {} diff --git a/vendor/go.opentelemetry.io/collector/service/extensions/graph.go b/vendor/go.opentelemetry.io/collector/service/extensions/graph.go index 7099d018a1f..b5570b53113 100644 --- a/vendor/go.opentelemetry.io/collector/service/extensions/graph.go +++ b/vendor/go.opentelemetry.io/collector/service/extensions/graph.go @@ -13,7 +13,7 @@ import ( "gonum.org/v1/gonum/graph/topo" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/extension" + "go.opentelemetry.io/collector/extension/extensioncapabilities" ) type node struct { @@ -38,13 +38,13 @@ func computeOrder(exts *Extensions) ([]component.ID, error) { } for extID, ext := range exts.extMap { n := nodes[extID] - if dep, ok := ext.(extension.Dependent); ok { + if dep, ok := ext.(extensioncapabilities.Dependent); ok { for _, depID := range dep.Dependencies() { - if d, ok := nodes[depID]; ok { - graph.SetEdge(graph.NewEdge(d, n)) - } else { + d, ok := nodes[depID] + if !ok { return nil, fmt.Errorf("unable to find extension %s on which extension %s depends", depID, extID) } + graph.SetEdge(graph.NewEdge(d, n)) } } } @@ -73,5 +73,5 @@ func cycleErr(err error, cycles [][]graph.Node) error { names = append(names, node.extID.String()) } cycleStr := "[" + strings.Join(names, " -> ") + "]" - return fmt.Errorf("unable to order extenions by dependencies, cycle found %s: %w", cycleStr, err) + return fmt.Errorf("unable to order extensions by dependencies, cycle found %s: %w", cycleStr, err) } diff --git a/vendor/go.opentelemetry.io/collector/service/host.go b/vendor/go.opentelemetry.io/collector/service/host.go deleted file mode 100644 index ce8dc530d40..00000000000 --- a/vendor/go.opentelemetry.io/collector/service/host.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package service // import "go.opentelemetry.io/collector/service" - -import ( - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/connector" - "go.opentelemetry.io/collector/exporter" - "go.opentelemetry.io/collector/extension" - "go.opentelemetry.io/collector/processor" - "go.opentelemetry.io/collector/receiver" - "go.opentelemetry.io/collector/service/extensions" - "go.opentelemetry.io/collector/service/internal/graph" -) - -// TODO: remove as part of https://github.com/open-telemetry/opentelemetry-collector/issues/7370 for service 1.0 -type getExporters interface { - GetExporters() map[component.DataType]map[component.ID]component.Component -} - -var _ getExporters = (*serviceHost)(nil) -var _ component.Host = (*serviceHost)(nil) - -type serviceHost struct { - asyncErrorChannel chan error - receivers *receiver.Builder - processors *processor.Builder - exporters *exporter.Builder - connectors *connector.Builder - extensions *extension.Builder - - buildInfo component.BuildInfo - - pipelines *graph.Graph - serviceExtensions *extensions.Extensions -} - -func (host *serviceHost) GetFactory(kind component.Kind, componentType component.Type) component.Factory { - switch kind { - case component.KindReceiver: - return host.receivers.Factory(componentType) - case component.KindProcessor: - return host.processors.Factory(componentType) - case component.KindExporter: - return host.exporters.Factory(componentType) - case component.KindConnector: - return host.connectors.Factory(componentType) - case component.KindExtension: - return host.extensions.Factory(componentType) - } - return nil -} - -func (host *serviceHost) GetExtensions() map[component.ID]component.Component { - return host.serviceExtensions.GetExtensions() -} - -// Deprecated: [0.79.0] This function will be removed in the future. -// Several components in the contrib repository use this function so it cannot be removed -// before those cases are removed. In most cases, use of this function can be replaced by a -// connector. See https://github.com/open-telemetry/opentelemetry-collector/issues/7370 and -// https://github.com/open-telemetry/opentelemetry-collector/pull/7390#issuecomment-1483710184 -// for additional information. -func (host *serviceHost) GetExporters() map[component.DataType]map[component.ID]component.Component { - return host.pipelines.GetExporters() -} - -func (host *serviceHost) notifyComponentStatusChange(source *component.InstanceID, event *component.StatusEvent) { - host.serviceExtensions.NotifyComponentStatusChange(source, event) - if event.Status() == component.StatusFatalError { - host.asyncErrorChannel <- event.Err() - } -} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/builders/builders.go b/vendor/go.opentelemetry.io/collector/service/internal/builders/builders.go new file mode 100644 index 00000000000..bf5844bd932 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/internal/builders/builders.go @@ -0,0 +1,28 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package builders // import "go.opentelemetry.io/collector/service/internal/builders" + +import ( + "errors" + + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" +) + +var ( + errNilNextConsumer = errors.New("nil next Consumer") + nopType = component.MustNewType("nop") +) + +// logStabilityLevel logs the stability level of a component. The log level is set to info for +// undefined, unmaintained, deprecated and development. The log level is set to debug +// for alpha, beta and stable. +func logStabilityLevel(logger *zap.Logger, sl component.StabilityLevel) { + if sl >= component.StabilityLevelAlpha { + logger.Debug(sl.LogMessage()) + } else { + logger.Info(sl.LogMessage()) + } +} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/builders/connector.go b/vendor/go.opentelemetry.io/collector/service/internal/builders/connector.go new file mode 100644 index 00000000000..b157d2dd64a --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/internal/builders/connector.go @@ -0,0 +1,399 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package builders // import "go.opentelemetry.io/collector/service/internal/builders" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/connector" + "go.opentelemetry.io/collector/connector/connectortest" + "go.opentelemetry.io/collector/connector/xconnector" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/xconsumer" + "go.opentelemetry.io/collector/pipeline" + "go.opentelemetry.io/collector/pipeline/xpipeline" +) + +func errDataTypes(id component.ID, from, to pipeline.Signal) error { + return fmt.Errorf("connector %q cannot connect from %s to %s: %w", id, from, to, pipeline.ErrSignalNotSupported) +} + +// ConnectorBuilder is a helper struct that given a set of Configs and Factories helps with creating connectors. +type ConnectorBuilder struct { + cfgs map[component.ID]component.Config + factories map[component.Type]connector.Factory +} + +// NewConnector creates a new ConnectorBuilder to help with creating components form a set of configs and factories. +func NewConnector(cfgs map[component.ID]component.Config, factories map[component.Type]connector.Factory) *ConnectorBuilder { + return &ConnectorBuilder{cfgs: cfgs, factories: factories} +} + +// CreateTracesToTraces creates a Traces connector based on the settings and config. +func (b *ConnectorBuilder) CreateTracesToTraces(ctx context.Context, set connector.Settings, next consumer.Traces) (connector.Traces, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("connector %q is not configured", set.ID) + } + + f, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("connector factory not available for: %q", set.ID) + } + + logStabilityLevel(set.Logger, f.TracesToTracesStability()) + return f.CreateTracesToTraces(ctx, set, cfg, next) +} + +// CreateTracesToMetrics creates a Traces connector based on the settings and config. +func (b *ConnectorBuilder) CreateTracesToMetrics(ctx context.Context, set connector.Settings, next consumer.Metrics) (connector.Traces, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("connector %q is not configured", set.ID) + } + + f, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("connector factory not available for: %q", set.ID) + } + + logStabilityLevel(set.Logger, f.TracesToMetricsStability()) + return f.CreateTracesToMetrics(ctx, set, cfg, next) +} + +// CreateTracesToLogs creates a Traces connector based on the settings and config. +func (b *ConnectorBuilder) CreateTracesToLogs(ctx context.Context, set connector.Settings, next consumer.Logs) (connector.Traces, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("connector %q is not configured", set.ID) + } + + f, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("connector factory not available for: %q", set.ID) + } + + logStabilityLevel(set.Logger, f.TracesToLogsStability()) + return f.CreateTracesToLogs(ctx, set, cfg, next) +} + +// CreateTracesToProfiles creates a Traces connector based on the settings and config. +func (b *ConnectorBuilder) CreateTracesToProfiles(ctx context.Context, set connector.Settings, next xconsumer.Profiles) (connector.Traces, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("connector %q is not configured", set.ID) + } + + connFact, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("connector factory not available for: %q", set.ID) + } + + f, ok := connFact.(xconnector.Factory) + if !ok { + return nil, errDataTypes(set.ID, pipeline.SignalTraces, xpipeline.SignalProfiles) + } + + logStabilityLevel(set.Logger, f.TracesToProfilesStability()) + return f.CreateTracesToProfiles(ctx, set, cfg, next) +} + +// CreateMetricsToTraces creates a Metrics connector based on the settings and config. +func (b *ConnectorBuilder) CreateMetricsToTraces(ctx context.Context, set connector.Settings, next consumer.Traces) (connector.Metrics, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("connector %q is not configured", set.ID) + } + + f, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("connector factory not available for: %q", set.ID) + } + + logStabilityLevel(set.Logger, f.MetricsToTracesStability()) + return f.CreateMetricsToTraces(ctx, set, cfg, next) +} + +// CreateMetricsToMetrics creates a Metrics connector based on the settings and config. +func (b *ConnectorBuilder) CreateMetricsToMetrics(ctx context.Context, set connector.Settings, next consumer.Metrics) (connector.Metrics, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("connector %q is not configured", set.ID) + } + + f, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("connector factory not available for: %q", set.ID) + } + + logStabilityLevel(set.Logger, f.MetricsToMetricsStability()) + return f.CreateMetricsToMetrics(ctx, set, cfg, next) +} + +// CreateMetricsToLogs creates a Metrics connector based on the settings and config. +func (b *ConnectorBuilder) CreateMetricsToLogs(ctx context.Context, set connector.Settings, next consumer.Logs) (connector.Metrics, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("connector %q is not configured", set.ID) + } + + f, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("connector factory not available for: %q", set.ID) + } + + logStabilityLevel(set.Logger, f.MetricsToLogsStability()) + return f.CreateMetricsToLogs(ctx, set, cfg, next) +} + +// CreateMetricsToProfiles creates a Metrics connector based on the settings and config. +func (b *ConnectorBuilder) CreateMetricsToProfiles(ctx context.Context, set connector.Settings, next xconsumer.Profiles) (connector.Metrics, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("connector %q is not configured", set.ID) + } + + connFact, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("connector factory not available for: %q", set.ID) + } + + f, ok := connFact.(xconnector.Factory) + if !ok { + return nil, errDataTypes(set.ID, pipeline.SignalMetrics, xpipeline.SignalProfiles) + } + + logStabilityLevel(set.Logger, f.MetricsToProfilesStability()) + return f.CreateMetricsToProfiles(ctx, set, cfg, next) +} + +// CreateLogsToTraces creates a Logs connector based on the settings and config. +func (b *ConnectorBuilder) CreateLogsToTraces(ctx context.Context, set connector.Settings, next consumer.Traces) (connector.Logs, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("connector %q is not configured", set.ID) + } + + f, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("connector factory not available for: %q", set.ID) + } + + logStabilityLevel(set.Logger, f.LogsToTracesStability()) + return f.CreateLogsToTraces(ctx, set, cfg, next) +} + +// CreateLogsToMetrics creates a Logs connector based on the settings and config. +func (b *ConnectorBuilder) CreateLogsToMetrics(ctx context.Context, set connector.Settings, next consumer.Metrics) (connector.Logs, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("connector %q is not configured", set.ID) + } + + f, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("connector factory not available for: %q", set.ID) + } + + logStabilityLevel(set.Logger, f.LogsToMetricsStability()) + return f.CreateLogsToMetrics(ctx, set, cfg, next) +} + +// CreateLogsToLogs creates a Logs connector based on the settings and config. +func (b *ConnectorBuilder) CreateLogsToLogs(ctx context.Context, set connector.Settings, next consumer.Logs) (connector.Logs, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("connector %q is not configured", set.ID) + } + + f, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("connector factory not available for: %q", set.ID) + } + + logStabilityLevel(set.Logger, f.LogsToLogsStability()) + return f.CreateLogsToLogs(ctx, set, cfg, next) +} + +// CreateLogsToProfiles creates a Logs connector based on the settings and config. +func (b *ConnectorBuilder) CreateLogsToProfiles(ctx context.Context, set connector.Settings, next xconsumer.Profiles) (connector.Logs, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("connector %q is not configured", set.ID) + } + + connFact, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("connector factory not available for: %q", set.ID) + } + + f, ok := connFact.(xconnector.Factory) + if !ok { + return nil, errDataTypes(set.ID, pipeline.SignalLogs, xpipeline.SignalProfiles) + } + + logStabilityLevel(set.Logger, f.LogsToProfilesStability()) + return f.CreateLogsToProfiles(ctx, set, cfg, next) +} + +// CreateProfilesToTraces creates a Profiles connector based on the settings and config. +func (b *ConnectorBuilder) CreateProfilesToTraces(ctx context.Context, set connector.Settings, next consumer.Traces) (xconnector.Profiles, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("connector %q is not configured", set.ID) + } + + connFact, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("connector factory not available for: %q", set.ID) + } + + f, ok := connFact.(xconnector.Factory) + if !ok { + return nil, errDataTypes(set.ID, xpipeline.SignalProfiles, pipeline.SignalTraces) + } + + logStabilityLevel(set.Logger, f.ProfilesToTracesStability()) + return f.CreateProfilesToTraces(ctx, set, cfg, next) +} + +// CreateProfilesToMetrics creates a Profiles connector based on the settings and config. +func (b *ConnectorBuilder) CreateProfilesToMetrics(ctx context.Context, set connector.Settings, next consumer.Metrics) (xconnector.Profiles, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("connector %q is not configured", set.ID) + } + + connFact, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("connector factory not available for: %q", set.ID) + } + + f, ok := connFact.(xconnector.Factory) + if !ok { + return nil, errDataTypes(set.ID, xpipeline.SignalProfiles, pipeline.SignalMetrics) + } + + logStabilityLevel(set.Logger, f.ProfilesToMetricsStability()) + return f.CreateProfilesToMetrics(ctx, set, cfg, next) +} + +// CreateProfilesToLogs creates a Profiles connector based on the settings and config. +func (b *ConnectorBuilder) CreateProfilesToLogs(ctx context.Context, set connector.Settings, next consumer.Logs) (xconnector.Profiles, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("connector %q is not configured", set.ID) + } + + connFact, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("connector factory not available for: %q", set.ID) + } + + f, ok := connFact.(xconnector.Factory) + if !ok { + return nil, errDataTypes(set.ID, xpipeline.SignalProfiles, pipeline.SignalLogs) + } + + logStabilityLevel(set.Logger, f.ProfilesToLogsStability()) + return f.CreateProfilesToLogs(ctx, set, cfg, next) +} + +// CreateProfilesToProfiles creates a Profiles connector based on the settings and config. +func (b *ConnectorBuilder) CreateProfilesToProfiles(ctx context.Context, set connector.Settings, next xconsumer.Profiles) (xconnector.Profiles, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("connector %q is not configured", set.ID) + } + + connFact, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("connector factory not available for: %q", set.ID) + } + + f, ok := connFact.(xconnector.Factory) + if !ok { + return nil, errDataTypes(set.ID, xpipeline.SignalProfiles, xpipeline.SignalProfiles) + } + + logStabilityLevel(set.Logger, f.ProfilesToProfilesStability()) + return f.CreateProfilesToProfiles(ctx, set, cfg, next) +} + +func (b *ConnectorBuilder) IsConfigured(componentID component.ID) bool { + _, ok := b.cfgs[componentID] + return ok +} + +func (b *ConnectorBuilder) Factory(componentType component.Type) component.Factory { + return b.factories[componentType] +} + +// NewNopConnectorConfigsAndFactories returns a configuration and factories that allows building a new nop connector. +func NewNopConnectorConfigsAndFactories() (map[component.ID]component.Config, map[component.Type]connector.Factory) { + nopFactory := connectortest.NewNopFactory() + // Use a different ID than receivertest and exportertest to avoid ambiguous + // configuration scenarios. Ambiguous IDs are detected in the 'otelcol' package, + // but lower level packages such as 'service' assume that IDs are disambiguated. + connID := component.NewIDWithName(nopType, "conn") + + configs := map[component.ID]component.Config{ + connID: nopFactory.CreateDefaultConfig(), + } + factories := map[component.Type]connector.Factory{ + nopType: nopFactory, + } + + return configs, factories +} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/builders/exporter.go b/vendor/go.opentelemetry.io/collector/service/internal/builders/exporter.go new file mode 100644 index 00000000000..6570828b76f --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/internal/builders/exporter.go @@ -0,0 +1,112 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package builders // import "go.opentelemetry.io/collector/service/internal/builders" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exportertest" + "go.opentelemetry.io/collector/exporter/xexporter" + "go.opentelemetry.io/collector/pipeline" +) + +// ExporterBuilder is a helper struct that given a set of Configs and Factories helps with creating exporters. +type ExporterBuilder struct { + cfgs map[component.ID]component.Config + factories map[component.Type]exporter.Factory +} + +// NewExporter creates a new ExporterBuilder to help with creating components form a set of configs and factories. +func NewExporter(cfgs map[component.ID]component.Config, factories map[component.Type]exporter.Factory) *ExporterBuilder { + return &ExporterBuilder{cfgs: cfgs, factories: factories} +} + +// CreateTraces creates a Traces exporter based on the settings and config. +func (b *ExporterBuilder) CreateTraces(ctx context.Context, set exporter.Settings) (exporter.Traces, error) { + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("exporter %q is not configured", set.ID) + } + + f, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("exporter factory not available for: %q", set.ID) + } + + logStabilityLevel(set.Logger, f.TracesStability()) + return f.CreateTraces(ctx, set, cfg) +} + +// CreateMetrics creates a Metrics exporter based on the settings and config. +func (b *ExporterBuilder) CreateMetrics(ctx context.Context, set exporter.Settings) (exporter.Metrics, error) { + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("exporter %q is not configured", set.ID) + } + + f, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("exporter factory not available for: %q", set.ID) + } + + logStabilityLevel(set.Logger, f.MetricsStability()) + return f.CreateMetrics(ctx, set, cfg) +} + +// CreateLogs creates a Logs exporter based on the settings and config. +func (b *ExporterBuilder) CreateLogs(ctx context.Context, set exporter.Settings) (exporter.Logs, error) { + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("exporter %q is not configured", set.ID) + } + + f, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("exporter factory not available for: %q", set.ID) + } + + logStabilityLevel(set.Logger, f.LogsStability()) + return f.CreateLogs(ctx, set, cfg) +} + +// CreateProfiles creates a Profiles exporter based on the settings and config. +func (b *ExporterBuilder) CreateProfiles(ctx context.Context, set exporter.Settings) (xexporter.Profiles, error) { + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("exporter %q is not configured", set.ID) + } + + expFact, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("exporter factory not available for: %q", set.ID) + } + + f, ok := expFact.(xexporter.Factory) + if !ok { + return nil, pipeline.ErrSignalNotSupported + } + + logStabilityLevel(set.Logger, f.ProfilesStability()) + return f.CreateProfiles(ctx, set, cfg) +} + +func (b *ExporterBuilder) Factory(componentType component.Type) component.Factory { + return b.factories[componentType] +} + +// NewNopExporterConfigsAndFactories returns a configuration and factories that allows building a new nop exporter. +func NewNopExporterConfigsAndFactories() (map[component.ID]component.Config, map[component.Type]exporter.Factory) { + nopFactory := exportertest.NewNopFactory() + configs := map[component.ID]component.Config{ + component.NewID(nopType): nopFactory.CreateDefaultConfig(), + } + factories := map[component.Type]exporter.Factory{ + nopType: nopFactory, + } + + return configs, factories +} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/builders/extension.go b/vendor/go.opentelemetry.io/collector/service/internal/builders/extension.go new file mode 100644 index 00000000000..6ac32fe5698 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/internal/builders/extension.go @@ -0,0 +1,69 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package builders // import "go.opentelemetry.io/collector/service/internal/builders" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/extension" + "go.opentelemetry.io/collector/extension/extensiontest" +) + +// Extension is an interface that allows using implementations of the builder +// from different packages. +type Extension interface { + Create(context.Context, extension.Settings) (extension.Extension, error) + Factory(component.Type) component.Factory +} + +// ExtensionBuilder is a helper struct that given a set of Configs and Factories helps with creating extensions. +type ExtensionBuilder struct { + cfgs map[component.ID]component.Config + factories map[component.Type]extension.Factory +} + +// NewExtension creates a new ExtensionBuilder to help with creating +// components form a set of configs and factories. +func NewExtension(cfgs map[component.ID]component.Config, factories map[component.Type]extension.Factory) *ExtensionBuilder { + return &ExtensionBuilder{cfgs: cfgs, factories: factories} +} + +// Create creates an extension based on the settings and configs available. +func (b *ExtensionBuilder) Create(ctx context.Context, set extension.Settings) (extension.Extension, error) { + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("extension %q is not configured", set.ID) + } + + f, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("extension factory not available for: %q", set.ID) + } + + sl := f.Stability() + if sl >= component.StabilityLevelAlpha { + set.Logger.Debug(sl.LogMessage()) + } else { + set.Logger.Info(sl.LogMessage()) + } + return f.Create(ctx, set, cfg) +} + +func (b *ExtensionBuilder) Factory(componentType component.Type) component.Factory { + return b.factories[componentType] +} + +// NewNopExtensionConfigsAndFactories returns a configuration and factories that allows building a new nop processor. +func NewNopExtensionConfigsAndFactories() (map[component.ID]component.Config, map[component.Type]extension.Factory) { + nopFactory := extensiontest.NewNopFactory() + configs := map[component.ID]component.Config{ + component.NewID(nopType): nopFactory.CreateDefaultConfig(), + } + factories := map[component.Type]extension.Factory{ + nopType: nopFactory, + } + return configs, factories +} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/builders/processor.go b/vendor/go.opentelemetry.io/collector/service/internal/builders/processor.go new file mode 100644 index 00000000000..c0df0f3b575 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/internal/builders/processor.go @@ -0,0 +1,126 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package builders // import "go.opentelemetry.io/collector/service/internal/builders" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/xconsumer" + "go.opentelemetry.io/collector/pipeline" + "go.opentelemetry.io/collector/processor" + "go.opentelemetry.io/collector/processor/processortest" + "go.opentelemetry.io/collector/processor/xprocessor" +) + +// ProcessorBuilder processor is a helper struct that given a set of Configs +// and Factories helps with creating processors. +type ProcessorBuilder struct { + cfgs map[component.ID]component.Config + factories map[component.Type]processor.Factory +} + +// NewProcessor creates a new ProcessorBuilder to help with creating components form a set of configs and factories. +func NewProcessor(cfgs map[component.ID]component.Config, factories map[component.Type]processor.Factory) *ProcessorBuilder { + return &ProcessorBuilder{cfgs: cfgs, factories: factories} +} + +// CreateTraces creates a Traces processor based on the settings and config. +func (b *ProcessorBuilder) CreateTraces(ctx context.Context, set processor.Settings, next consumer.Traces) (processor.Traces, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("processor %q is not configured", set.ID) + } + + f, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("processor factory not available for: %q", set.ID) + } + + logStabilityLevel(set.Logger, f.TracesStability()) + return f.CreateTraces(ctx, set, cfg, next) +} + +// CreateMetrics creates a Metrics processor based on the settings and config. +func (b *ProcessorBuilder) CreateMetrics(ctx context.Context, set processor.Settings, next consumer.Metrics) (processor.Metrics, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("processor %q is not configured", set.ID) + } + + f, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("processor factory not available for: %q", set.ID) + } + + logStabilityLevel(set.Logger, f.MetricsStability()) + return f.CreateMetrics(ctx, set, cfg, next) +} + +// CreateLogs creates a Logs processor based on the settings and config. +func (b *ProcessorBuilder) CreateLogs(ctx context.Context, set processor.Settings, next consumer.Logs) (processor.Logs, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("processor %q is not configured", set.ID) + } + + f, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("processor factory not available for: %q", set.ID) + } + + logStabilityLevel(set.Logger, f.LogsStability()) + return f.CreateLogs(ctx, set, cfg, next) +} + +// CreateProfiles creates a Profiles processor based on the settings and config. +func (b *ProcessorBuilder) CreateProfiles(ctx context.Context, set processor.Settings, next xconsumer.Profiles) (xprocessor.Profiles, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("processor %q is not configured", set.ID) + } + + procFact, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("processor factory not available for: %q", set.ID) + } + + f, ok := procFact.(xprocessor.Factory) + if !ok { + return nil, pipeline.ErrSignalNotSupported + } + logStabilityLevel(set.Logger, f.ProfilesStability()) + return f.CreateProfiles(ctx, set, cfg, next) +} + +func (b *ProcessorBuilder) Factory(componentType component.Type) component.Factory { + return b.factories[componentType] +} + +// NewNopProcessorConfigsAndFactories returns a configuration and factories that allows building a new nop processor. +func NewNopProcessorConfigsAndFactories() (map[component.ID]component.Config, map[component.Type]processor.Factory) { + nopFactory := processortest.NewNopFactory() + configs := map[component.ID]component.Config{ + component.NewID(nopType): nopFactory.CreateDefaultConfig(), + } + factories := map[component.Type]processor.Factory{ + nopType: nopFactory, + } + + return configs, factories +} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/builders/receiver.go b/vendor/go.opentelemetry.io/collector/service/internal/builders/receiver.go new file mode 100644 index 00000000000..007d9be2187 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/internal/builders/receiver.go @@ -0,0 +1,128 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package builders // import "go.opentelemetry.io/collector/service/internal/builders" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/xconsumer" + "go.opentelemetry.io/collector/pipeline" + "go.opentelemetry.io/collector/receiver" + "go.opentelemetry.io/collector/receiver/receivertest" + "go.opentelemetry.io/collector/receiver/xreceiver" +) + +// ReceiverBuilder receiver is a helper struct that given a set of Configs and +// Factories helps with creating receivers. +type ReceiverBuilder struct { + cfgs map[component.ID]component.Config + factories map[component.Type]receiver.Factory +} + +// NewReceiver creates a new ReceiverBuilder to help with creating +// components form a set of configs and factories. +func NewReceiver(cfgs map[component.ID]component.Config, factories map[component.Type]receiver.Factory) *ReceiverBuilder { + return &ReceiverBuilder{cfgs: cfgs, factories: factories} +} + +// CreateTraces creates a Traces receiver based on the settings and config. +func (b *ReceiverBuilder) CreateTraces(ctx context.Context, set receiver.Settings, next consumer.Traces) (receiver.Traces, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("receiver %q is not configured", set.ID) + } + + f, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("receiver factory not available for: %q", set.ID) + } + + logStabilityLevel(set.Logger, f.TracesStability()) + return f.CreateTraces(ctx, set, cfg, next) +} + +// CreateMetrics creates a Metrics receiver based on the settings and config. +func (b *ReceiverBuilder) CreateMetrics(ctx context.Context, set receiver.Settings, next consumer.Metrics) (receiver.Metrics, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("receiver %q is not configured", set.ID) + } + + f, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("receiver factory not available for: %q", set.ID) + } + + logStabilityLevel(set.Logger, f.MetricsStability()) + return f.CreateMetrics(ctx, set, cfg, next) +} + +// CreateLogs creates a Logs receiver based on the settings and config. +func (b *ReceiverBuilder) CreateLogs(ctx context.Context, set receiver.Settings, next consumer.Logs) (receiver.Logs, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("receiver %q is not configured", set.ID) + } + + f, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("receiver factory not available for: %q", set.ID) + } + + logStabilityLevel(set.Logger, f.LogsStability()) + return f.CreateLogs(ctx, set, cfg, next) +} + +// CreateProfiles creates a Profiles receiver based on the settings and config. +func (b *ReceiverBuilder) CreateProfiles(ctx context.Context, set receiver.Settings, next xconsumer.Profiles) (xreceiver.Profiles, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("receiver %q is not configured", set.ID) + } + + recvFact, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("receiver factory not available for: %q", set.ID) + } + + f, ok := recvFact.(xreceiver.Factory) + if !ok { + return nil, pipeline.ErrSignalNotSupported + } + + logStabilityLevel(set.Logger, f.ProfilesStability()) + return f.CreateProfiles(ctx, set, cfg, next) +} + +func (b *ReceiverBuilder) Factory(componentType component.Type) component.Factory { + return b.factories[componentType] +} + +// NewNopReceiverConfigsAndFactories returns a configuration and factories that allows building a new nop receiver. +func NewNopReceiverConfigsAndFactories() (map[component.ID]component.Config, map[component.Type]receiver.Factory) { + nopFactory := receivertest.NewNopFactory() + configs := map[component.ID]component.Config{ + component.NewID(nopType): nopFactory.CreateDefaultConfig(), + } + factories := map[component.Type]receiver.Factory{ + nopType: nopFactory, + } + + return configs, factories +} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/capabilityconsumer/capabilities.go b/vendor/go.opentelemetry.io/collector/service/internal/capabilityconsumer/capabilities.go index 0b700a45e54..6e867eef54e 100644 --- a/vendor/go.opentelemetry.io/collector/service/internal/capabilityconsumer/capabilities.go +++ b/vendor/go.opentelemetry.io/collector/service/internal/capabilityconsumer/capabilities.go @@ -5,13 +5,14 @@ package capabilityconsumer // import "go.opentelemetry.io/collector/service/inte import ( "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/xconsumer" ) -func NewLogs(logs consumer.Logs, cap consumer.Capabilities) consumer.Logs { - if logs.Capabilities() == cap { +func NewLogs(logs consumer.Logs, capabilities consumer.Capabilities) consumer.Logs { + if logs.Capabilities() == capabilities { return logs } - return capLogs{Logs: logs, cap: cap} + return capLogs{Logs: logs, cap: capabilities} } type capLogs struct { @@ -23,11 +24,11 @@ func (mts capLogs) Capabilities() consumer.Capabilities { return mts.cap } -func NewMetrics(metrics consumer.Metrics, cap consumer.Capabilities) consumer.Metrics { - if metrics.Capabilities() == cap { +func NewMetrics(metrics consumer.Metrics, capabilities consumer.Capabilities) consumer.Metrics { + if metrics.Capabilities() == capabilities { return metrics } - return capMetrics{Metrics: metrics, cap: cap} + return capMetrics{Metrics: metrics, cap: capabilities} } type capMetrics struct { @@ -39,11 +40,11 @@ func (mts capMetrics) Capabilities() consumer.Capabilities { return mts.cap } -func NewTraces(traces consumer.Traces, cap consumer.Capabilities) consumer.Traces { - if traces.Capabilities() == cap { +func NewTraces(traces consumer.Traces, capabilities consumer.Capabilities) consumer.Traces { + if traces.Capabilities() == capabilities { return traces } - return capTraces{Traces: traces, cap: cap} + return capTraces{Traces: traces, cap: capabilities} } type capTraces struct { @@ -54,3 +55,19 @@ type capTraces struct { func (mts capTraces) Capabilities() consumer.Capabilities { return mts.cap } + +func NewProfiles(profiles xconsumer.Profiles, capabilities consumer.Capabilities) xconsumer.Profiles { + if profiles.Capabilities() == capabilities { + return profiles + } + return capProfiles{Profiles: profiles, cap: capabilities} +} + +type capProfiles struct { + xconsumer.Profiles + cap consumer.Capabilities +} + +func (mts capProfiles) Capabilities() consumer.Capabilities { + return mts.cap +} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/components/loggers.go b/vendor/go.opentelemetry.io/collector/service/internal/components/loggers.go index f4178977b2d..f02d19fb082 100644 --- a/vendor/go.opentelemetry.io/collector/service/internal/components/loggers.go +++ b/vendor/go.opentelemetry.io/collector/service/internal/components/loggers.go @@ -9,6 +9,7 @@ import ( "go.uber.org/zap" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/pipeline" ) const ( @@ -21,21 +22,21 @@ const ( zapReceiverInPipeline = "receiver_in_pipeline" ) -func ReceiverLogger(logger *zap.Logger, id component.ID, dt component.DataType) *zap.Logger { +func ReceiverLogger(logger *zap.Logger, id component.ID, dt pipeline.Signal) *zap.Logger { return logger.With( zap.String(zapKindKey, strings.ToLower(component.KindReceiver.String())), zap.String(zapNameKey, id.String()), zap.String(zapDataTypeKey, dt.String())) } -func ProcessorLogger(logger *zap.Logger, id component.ID, pipelineID component.ID) *zap.Logger { +func ProcessorLogger(logger *zap.Logger, id component.ID, pipelineID pipeline.ID) *zap.Logger { return logger.With( zap.String(zapKindKey, strings.ToLower(component.KindProcessor.String())), zap.String(zapNameKey, id.String()), zap.String(zapPipelineKey, pipelineID.String())) } -func ExporterLogger(logger *zap.Logger, id component.ID, dt component.DataType) *zap.Logger { +func ExporterLogger(logger *zap.Logger, id component.ID, dt pipeline.Signal) *zap.Logger { return logger.With( zap.String(zapKindKey, strings.ToLower(component.KindExporter.String())), zap.String(zapDataTypeKey, dt.String()), @@ -48,7 +49,7 @@ func ExtensionLogger(logger *zap.Logger, id component.ID) *zap.Logger { zap.String(zapNameKey, id.String())) } -func ConnectorLogger(logger *zap.Logger, id component.ID, expDT, rcvDT component.DataType) *zap.Logger { +func ConnectorLogger(logger *zap.Logger, id component.ID, expDT, rcvDT pipeline.Signal) *zap.Logger { return logger.With( zap.String(zapKindKey, strings.ToLower(component.KindConnector.String())), zap.String(zapNameKey, id.String()), diff --git a/vendor/go.opentelemetry.io/collector/service/internal/graph/capabilities.go b/vendor/go.opentelemetry.io/collector/service/internal/graph/capabilities.go new file mode 100644 index 00000000000..128e6fde926 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/internal/graph/capabilities.go @@ -0,0 +1,40 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package graph // import "go.opentelemetry.io/collector/service/internal/graph" + +import ( + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/xconsumer" + "go.opentelemetry.io/collector/pipeline" +) + +const capabilitiesSeed = "capabilities" + +var _ consumerNode = (*capabilitiesNode)(nil) + +// Every pipeline has a "virtual" capabilities node immediately after the receiver(s). +// There are two purposes for this node: +// 1. Present aggregated capabilities to receivers, such as whether the pipeline mutates data. +// 2. Present a consistent "first consumer" for each pipeline. +// The nodeID is derived from "pipeline ID". +type capabilitiesNode struct { + nodeID + pipelineID pipeline.ID + baseConsumer + consumer.ConsumeTracesFunc + consumer.ConsumeMetricsFunc + consumer.ConsumeLogsFunc + xconsumer.ConsumeProfilesFunc +} + +func newCapabilitiesNode(pipelineID pipeline.ID) *capabilitiesNode { + return &capabilitiesNode{ + nodeID: newNodeID(capabilitiesSeed, pipelineID.String()), + pipelineID: pipelineID, + } +} + +func (n *capabilitiesNode) getConsumer() baseConsumer { + return n +} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/graph/connector.go b/vendor/go.opentelemetry.io/collector/service/internal/graph/connector.go new file mode 100644 index 00000000000..1f654454ee6 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/internal/graph/connector.go @@ -0,0 +1,220 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package graph // import "go.opentelemetry.io/collector/service/internal/graph" + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/connector" + "go.opentelemetry.io/collector/connector/xconnector" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/xconsumer" + "go.opentelemetry.io/collector/pipeline" + "go.opentelemetry.io/collector/pipeline/xpipeline" + "go.opentelemetry.io/collector/service/internal/builders" + "go.opentelemetry.io/collector/service/internal/capabilityconsumer" + "go.opentelemetry.io/collector/service/internal/components" +) + +const connectorSeed = "connector" + +var _ consumerNode = (*connectorNode)(nil) + +// A connector instance connects one pipeline type to one other pipeline type. +// Therefore, nodeID is derived from "exporter pipeline type", "receiver pipeline type", and "component ID". +type connectorNode struct { + nodeID + componentID component.ID + exprPipelineType pipeline.Signal + rcvrPipelineType pipeline.Signal + component.Component +} + +func newConnectorNode(exprPipelineType, rcvrPipelineType pipeline.Signal, connID component.ID) *connectorNode { + return &connectorNode{ + nodeID: newNodeID(connectorSeed, connID.String(), exprPipelineType.String(), rcvrPipelineType.String()), + componentID: connID, + exprPipelineType: exprPipelineType, + rcvrPipelineType: rcvrPipelineType, + } +} + +func (n *connectorNode) getConsumer() baseConsumer { + return n.Component.(baseConsumer) +} + +func (n *connectorNode) buildComponent( + ctx context.Context, + tel component.TelemetrySettings, + info component.BuildInfo, + builder *builders.ConnectorBuilder, + nexts []baseConsumer, +) error { + tel.Logger = components.ConnectorLogger(tel.Logger, n.componentID, n.exprPipelineType, n.rcvrPipelineType) + set := connector.Settings{ID: n.componentID, TelemetrySettings: tel, BuildInfo: info} + switch n.rcvrPipelineType { + case pipeline.SignalTraces: + return n.buildTraces(ctx, set, builder, nexts) + case pipeline.SignalMetrics: + return n.buildMetrics(ctx, set, builder, nexts) + case pipeline.SignalLogs: + return n.buildLogs(ctx, set, builder, nexts) + case xpipeline.SignalProfiles: + return n.buildProfiles(ctx, set, builder, nexts) + } + return nil +} + +func (n *connectorNode) buildTraces( + ctx context.Context, + set connector.Settings, + builder *builders.ConnectorBuilder, + nexts []baseConsumer, +) error { + consumers := make(map[pipeline.ID]consumer.Traces, len(nexts)) + for _, next := range nexts { + consumers[next.(*capabilitiesNode).pipelineID] = next.(consumer.Traces) + } + next := connector.NewTracesRouter(consumers) + + var err error + switch n.exprPipelineType { + case pipeline.SignalTraces: + var conn connector.Traces + conn, err = builder.CreateTracesToTraces(ctx, set, next) + if err != nil { + return err + } + n.Component = componentTraces{ + Component: conn, + Traces: capabilityconsumer.NewTraces(conn, aggregateCap(conn, nexts)), + } + return nil + case pipeline.SignalMetrics: + n.Component, err = builder.CreateMetricsToTraces(ctx, set, next) + case pipeline.SignalLogs: + n.Component, err = builder.CreateLogsToTraces(ctx, set, next) + case xpipeline.SignalProfiles: + n.Component, err = builder.CreateProfilesToTraces(ctx, set, next) + } + return err +} + +func (n *connectorNode) buildMetrics( + ctx context.Context, + set connector.Settings, + builder *builders.ConnectorBuilder, + nexts []baseConsumer, +) error { + consumers := make(map[pipeline.ID]consumer.Metrics, len(nexts)) + for _, next := range nexts { + consumers[next.(*capabilitiesNode).pipelineID] = next.(consumer.Metrics) + } + next := connector.NewMetricsRouter(consumers) + + var err error + switch n.exprPipelineType { + case pipeline.SignalMetrics: + var conn connector.Metrics + conn, err = builder.CreateMetricsToMetrics(ctx, set, next) + if err != nil { + return err + } + n.Component = componentMetrics{ + Component: conn, + Metrics: capabilityconsumer.NewMetrics(conn, aggregateCap(conn, nexts)), + } + return nil + case pipeline.SignalTraces: + n.Component, err = builder.CreateTracesToMetrics(ctx, set, next) + case pipeline.SignalLogs: + n.Component, err = builder.CreateLogsToMetrics(ctx, set, next) + case xpipeline.SignalProfiles: + n.Component, err = builder.CreateProfilesToMetrics(ctx, set, next) + } + return err +} + +func (n *connectorNode) buildLogs( + ctx context.Context, + set connector.Settings, + builder *builders.ConnectorBuilder, + nexts []baseConsumer, +) error { + consumers := make(map[pipeline.ID]consumer.Logs, len(nexts)) + for _, next := range nexts { + consumers[next.(*capabilitiesNode).pipelineID] = next.(consumer.Logs) + } + next := connector.NewLogsRouter(consumers) + + var err error + switch n.exprPipelineType { + case pipeline.SignalLogs: + var conn connector.Logs + conn, err = builder.CreateLogsToLogs(ctx, set, next) + if err != nil { + return err + } + n.Component = componentLogs{ + Component: conn, + Logs: capabilityconsumer.NewLogs(conn, aggregateCap(conn, nexts)), + } + return nil + case pipeline.SignalTraces: + n.Component, err = builder.CreateTracesToLogs(ctx, set, next) + case pipeline.SignalMetrics: + n.Component, err = builder.CreateMetricsToLogs(ctx, set, next) + case xpipeline.SignalProfiles: + n.Component, err = builder.CreateProfilesToLogs(ctx, set, next) + } + return err +} + +func (n *connectorNode) buildProfiles( + ctx context.Context, + set connector.Settings, + builder *builders.ConnectorBuilder, + nexts []baseConsumer, +) error { + consumers := make(map[pipeline.ID]xconsumer.Profiles, len(nexts)) + for _, next := range nexts { + consumers[next.(*capabilitiesNode).pipelineID] = next.(xconsumer.Profiles) + } + next := xconnector.NewProfilesRouter(consumers) + + var err error + switch n.exprPipelineType { + case xpipeline.SignalProfiles: + var conn xconnector.Profiles + conn, err = builder.CreateProfilesToProfiles(ctx, set, next) + if err != nil { + return err + } + n.Component = componentProfiles{ + Component: conn, + Profiles: capabilityconsumer.NewProfiles(conn, aggregateCap(conn, nexts)), + } + return nil + case pipeline.SignalTraces: + n.Component, err = builder.CreateTracesToProfiles(ctx, set, next) + case pipeline.SignalMetrics: + n.Component, err = builder.CreateMetricsToProfiles(ctx, set, next) + case pipeline.SignalLogs: + n.Component, err = builder.CreateLogsToProfiles(ctx, set, next) + } + return err +} + +// When connecting pipelines of the same data type, the connector must +// inherit the capabilities of pipelines in which it is acting as a receiver. +// Since the incoming and outgoing data types are the same, we must also consider +// that the connector itself may mutate the data and pass it along. +func aggregateCap(base baseConsumer, nexts []baseConsumer) consumer.Capabilities { + capabilities := base.Capabilities() + for _, next := range nexts { + capabilities.MutatesData = capabilities.MutatesData || next.Capabilities().MutatesData + } + return capabilities +} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/graph/consumer.go b/vendor/go.opentelemetry.io/collector/service/internal/graph/consumer.go new file mode 100644 index 00000000000..2c44a993031 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/internal/graph/consumer.go @@ -0,0 +1,39 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package graph // import "go.opentelemetry.io/collector/service/internal/graph" + +import ( + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/xconsumer" +) + +// baseConsumer redeclared here since not public in consumer package. May consider to make that public. +type baseConsumer interface { + Capabilities() consumer.Capabilities +} + +type consumerNode interface { + getConsumer() baseConsumer +} + +type componentTraces struct { + component.Component + consumer.Traces +} + +type componentMetrics struct { + component.Component + consumer.Metrics +} + +type componentLogs struct { + component.Component + consumer.Logs +} + +type componentProfiles struct { + component.Component + xconsumer.Profiles +} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/graph/exporter.go b/vendor/go.opentelemetry.io/collector/service/internal/graph/exporter.go new file mode 100644 index 00000000000..ab7d0f6392b --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/internal/graph/exporter.go @@ -0,0 +1,68 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package graph // import "go.opentelemetry.io/collector/service/internal/graph" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/pipeline" + "go.opentelemetry.io/collector/pipeline/xpipeline" + "go.opentelemetry.io/collector/service/internal/builders" + "go.opentelemetry.io/collector/service/internal/components" +) + +const exporterSeed = "exporter" + +var _ consumerNode = (*exporterNode)(nil) + +// An exporter instance can be shared by multiple pipelines of the same type. +// Therefore, nodeID is derived from "pipeline type" and "component ID". +type exporterNode struct { + nodeID + componentID component.ID + pipelineType pipeline.Signal + component.Component +} + +func newExporterNode(pipelineType pipeline.Signal, exprID component.ID) *exporterNode { + return &exporterNode{ + nodeID: newNodeID(exporterSeed, pipelineType.String(), exprID.String()), + componentID: exprID, + pipelineType: pipelineType, + } +} + +func (n *exporterNode) getConsumer() baseConsumer { + return n.Component.(baseConsumer) +} + +func (n *exporterNode) buildComponent( + ctx context.Context, + tel component.TelemetrySettings, + info component.BuildInfo, + builder *builders.ExporterBuilder, +) error { + tel.Logger = components.ExporterLogger(tel.Logger, n.componentID, n.pipelineType) + set := exporter.Settings{ID: n.componentID, TelemetrySettings: tel, BuildInfo: info} + var err error + switch n.pipelineType { + case pipeline.SignalTraces: + n.Component, err = builder.CreateTraces(ctx, set) + case pipeline.SignalMetrics: + n.Component, err = builder.CreateMetrics(ctx, set) + case pipeline.SignalLogs: + n.Component, err = builder.CreateLogs(ctx, set) + case xpipeline.SignalProfiles: + n.Component, err = builder.CreateProfiles(ctx, set) + default: + return fmt.Errorf("error creating exporter %q for data type %q is not supported", set.ID, n.pipelineType) + } + if err != nil { + return fmt.Errorf("failed to create %q exporter for data type %q: %w", set.ID, n.pipelineType, err) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/graph/fanout.go b/vendor/go.opentelemetry.io/collector/service/internal/graph/fanout.go new file mode 100644 index 00000000000..13c8d4ad1c5 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/internal/graph/fanout.go @@ -0,0 +1,31 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package graph // import "go.opentelemetry.io/collector/service/internal/graph" + +import ( + "go.opentelemetry.io/collector/pipeline" +) + +const fanOutToExporters = "fanout_to_exporters" + +var _ consumerNode = (*fanOutNode)(nil) + +// Each pipeline has one fan-out node before exporters. +// Therefore, nodeID is derived from "pipeline ID". +type fanOutNode struct { + nodeID + pipelineID pipeline.ID + baseConsumer +} + +func newFanOutNode(pipelineID pipeline.ID) *fanOutNode { + return &fanOutNode{ + nodeID: newNodeID(fanOutToExporters, pipelineID.String()), + pipelineID: pipelineID, + } +} + +func (n *fanOutNode) getConsumer() baseConsumer { + return n.baseConsumer +} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/graph/graph.go b/vendor/go.opentelemetry.io/collector/service/internal/graph/graph.go index c95d1f54b7e..3f5ae33fd91 100644 --- a/vendor/go.opentelemetry.io/collector/service/internal/graph/graph.go +++ b/vendor/go.opentelemetry.io/collector/service/internal/graph/graph.go @@ -19,34 +19,40 @@ import ( "strings" "go.uber.org/multierr" + "go.uber.org/zap" "gonum.org/v1/gonum/graph" "gonum.org/v1/gonum/graph/simple" "gonum.org/v1/gonum/graph/topo" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componentstatus" "go.opentelemetry.io/collector/connector" + "go.opentelemetry.io/collector/connector/xconnector" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/consumer/xconsumer" "go.opentelemetry.io/collector/internal/fanoutconsumer" - "go.opentelemetry.io/collector/processor" - "go.opentelemetry.io/collector/receiver" + "go.opentelemetry.io/collector/pipeline" + "go.opentelemetry.io/collector/pipeline/xpipeline" + "go.opentelemetry.io/collector/service/internal/builders" "go.opentelemetry.io/collector/service/internal/capabilityconsumer" - "go.opentelemetry.io/collector/service/internal/servicetelemetry" + "go.opentelemetry.io/collector/service/internal/status" "go.opentelemetry.io/collector/service/pipelines" ) // Settings holds configuration for building builtPipelines. type Settings struct { - Telemetry servicetelemetry.TelemetrySettings + Telemetry component.TelemetrySettings BuildInfo component.BuildInfo - ReceiverBuilder *receiver.Builder - ProcessorBuilder *processor.Builder - ExporterBuilder *exporter.Builder - ConnectorBuilder *connector.Builder + ReceiverBuilder *builders.ReceiverBuilder + ProcessorBuilder *builders.ProcessorBuilder + ExporterBuilder *builders.ExporterBuilder + ConnectorBuilder *builders.ConnectorBuilder // PipelineConfigs is a map of component.ID to PipelineConfig. PipelineConfigs pipelines.Config + + ReportStatus status.ServiceStatusFunc } type Graph struct { @@ -54,12 +60,12 @@ type Graph struct { componentGraph *simple.DirectedGraph // Keep track of how nodes relate to pipelines, so we can declare edges in the graph. - pipelines map[component.ID]*pipelineNodes + pipelines map[pipeline.ID]*pipelineNodes // Keep track of status source per node - instanceIDs map[int64]*component.InstanceID + instanceIDs map[int64]*componentstatus.InstanceID - telemetry servicetelemetry.TelemetrySettings + telemetry component.TelemetrySettings } // Build builds a full pipeline graph. @@ -67,8 +73,8 @@ type Graph struct { func Build(ctx context.Context, set Settings) (*Graph, error) { pipelines := &Graph{ componentGraph: simple.NewDirectedGraph(), - pipelines: make(map[component.ID]*pipelineNodes, len(set.PipelineConfigs)), - instanceIDs: make(map[int64]*component.InstanceID), + pipelines: make(map[pipeline.ID]*pipelineNodes, len(set.PipelineConfigs)), + instanceIDs: make(map[int64]*componentstatus.InstanceID), telemetry: set.Telemetry, } for pipelineID := range set.PipelineConfigs { @@ -91,8 +97,8 @@ func (g *Graph) createNodes(set Settings) error { connectors := make(map[component.ID]struct{}) // Keep track of connectors and where they are used. (map[connectorID][]pipelineID). - connectorsAsExporter := make(map[component.ID][]component.ID) - connectorsAsReceiver := make(map[component.ID][]component.ID) + connectorsAsExporter := make(map[component.ID][]pipeline.ID) + connectorsAsReceiver := make(map[component.ID][]pipeline.ID) // Build each pipelineNodes struct for each pipeline by parsing the pipelineCfg. // Also populates the connectors, connectorsAsExporter and connectorsAsReceiver maps. @@ -136,19 +142,19 @@ func (g *Graph) createNodes(set Settings) error { } connFactory := factory.(connector.Factory) - expTypes := make(map[component.DataType]bool) + expTypes := make(map[pipeline.Signal]bool) for _, pipelineID := range connectorsAsExporter[connID] { // The presence of each key indicates how the connector is used as an exporter. // The value is initially set to false. Later we will set the value to true *if* we // confirm that there is a supported corresponding use as a receiver. - expTypes[pipelineID.Type()] = false + expTypes[pipelineID.Signal()] = false } - recTypes := make(map[component.DataType]bool) + recTypes := make(map[pipeline.Signal]bool) for _, pipelineID := range connectorsAsReceiver[connID] { // The presence of each key indicates how the connector is used as a receiver. // The value is initially set to false. Later we will set the value to true *if* we // confirm that there is a supported corresponding use as an exporter. - recTypes[pipelineID.Type()] = false + recTypes[pipelineID.Signal()] = false } for expType := range expTypes { @@ -176,7 +182,7 @@ func (g *Graph) createNodes(set Settings) error { for _, eID := range connectorsAsExporter[connID] { for _, rID := range connectorsAsReceiver[connID] { - if connectorStability(connFactory, eID.Type(), rID.Type()) == component.StabilityLevelUndefined { + if connectorStability(connFactory, eID.Signal(), rID.Signal()) == component.StabilityLevelUndefined { // Connector is not supported for this combination, but we know it is used correctly elsewhere continue } @@ -190,70 +196,54 @@ func (g *Graph) createNodes(set Settings) error { return nil } -func (g *Graph) createReceiver(pipelineID, recvID component.ID) *receiverNode { - rcvrNode := newReceiverNode(pipelineID.Type(), recvID) +func (g *Graph) createReceiver(pipelineID pipeline.ID, recvID component.ID) *receiverNode { + rcvrNode := newReceiverNode(pipelineID.Signal(), recvID) if node := g.componentGraph.Node(rcvrNode.ID()); node != nil { - g.instanceIDs[node.ID()].PipelineIDs[pipelineID] = struct{}{} + instanceID := g.instanceIDs[node.ID()] + g.instanceIDs[node.ID()] = instanceID.WithPipelines(pipelineID) return node.(*receiverNode) } g.componentGraph.AddNode(rcvrNode) - g.instanceIDs[rcvrNode.ID()] = &component.InstanceID{ - ID: recvID, - Kind: component.KindReceiver, - PipelineIDs: map[component.ID]struct{}{ - pipelineID: {}, - }, - } + g.instanceIDs[rcvrNode.ID()] = componentstatus.NewInstanceID( + recvID, component.KindReceiver, pipelineID, + ) return rcvrNode } -func (g *Graph) createProcessor(pipelineID, procID component.ID) *processorNode { +func (g *Graph) createProcessor(pipelineID pipeline.ID, procID component.ID) *processorNode { procNode := newProcessorNode(pipelineID, procID) g.componentGraph.AddNode(procNode) - g.instanceIDs[procNode.ID()] = &component.InstanceID{ - ID: procID, - Kind: component.KindProcessor, - PipelineIDs: map[component.ID]struct{}{ - pipelineID: {}, - }, - } + g.instanceIDs[procNode.ID()] = componentstatus.NewInstanceID( + procID, component.KindProcessor, pipelineID, + ) return procNode } -func (g *Graph) createExporter(pipelineID, exprID component.ID) *exporterNode { - expNode := newExporterNode(pipelineID.Type(), exprID) +func (g *Graph) createExporter(pipelineID pipeline.ID, exprID component.ID) *exporterNode { + expNode := newExporterNode(pipelineID.Signal(), exprID) if node := g.componentGraph.Node(expNode.ID()); node != nil { - g.instanceIDs[expNode.ID()].PipelineIDs[pipelineID] = struct{}{} + instanceID := g.instanceIDs[expNode.ID()] + g.instanceIDs[expNode.ID()] = instanceID.WithPipelines(pipelineID) return node.(*exporterNode) } g.componentGraph.AddNode(expNode) - g.instanceIDs[expNode.ID()] = &component.InstanceID{ - ID: expNode.componentID, - Kind: component.KindExporter, - PipelineIDs: map[component.ID]struct{}{ - pipelineID: {}, - }, - } + g.instanceIDs[expNode.ID()] = componentstatus.NewInstanceID( + expNode.componentID, component.KindExporter, pipelineID, + ) return expNode } -func (g *Graph) createConnector(exprPipelineID, rcvrPipelineID, connID component.ID) *connectorNode { - connNode := newConnectorNode(exprPipelineID.Type(), rcvrPipelineID.Type(), connID) +func (g *Graph) createConnector(exprPipelineID, rcvrPipelineID pipeline.ID, connID component.ID) *connectorNode { + connNode := newConnectorNode(exprPipelineID.Signal(), rcvrPipelineID.Signal(), connID) if node := g.componentGraph.Node(connNode.ID()); node != nil { instanceID := g.instanceIDs[connNode.ID()] - instanceID.PipelineIDs[exprPipelineID] = struct{}{} - instanceID.PipelineIDs[rcvrPipelineID] = struct{}{} + g.instanceIDs[connNode.ID()] = instanceID.WithPipelines(exprPipelineID, rcvrPipelineID) return node.(*connectorNode) } g.componentGraph.AddNode(connNode) - g.instanceIDs[connNode.ID()] = &component.InstanceID{ - ID: connNode.componentID, - Kind: component.KindConnector, - PipelineIDs: map[component.ID]struct{}{ - exprPipelineID: {}, - rcvrPipelineID: {}, - }, - } + g.instanceIDs[connNode.ID()] = componentstatus.NewInstanceID( + connNode.componentID, component.KindConnector, exprPipelineID, rcvrPipelineID, + ) return connNode } @@ -296,66 +286,70 @@ func (g *Graph) buildComponents(ctx context.Context, set Settings) error { for i := len(nodes) - 1; i >= 0; i-- { node := nodes[i] - // skipped for capabilitiesNodes and fanoutNodes as they are not assigned componentIDs. - var telemetrySettings component.TelemetrySettings - if instanceID, ok := g.instanceIDs[node.ID()]; ok { - telemetrySettings = set.Telemetry.ToComponentTelemetrySettings(instanceID) - } - switch n := node.(type) { case *receiverNode: - err = n.buildComponent(ctx, telemetrySettings, set.BuildInfo, set.ReceiverBuilder, g.nextConsumers(n.ID())) + err = n.buildComponent(ctx, set.Telemetry, set.BuildInfo, set.ReceiverBuilder, g.nextConsumers(n.ID())) case *processorNode: // nextConsumers is guaranteed to be length 1. Either it is the next processor or it is the fanout node for the exporters. - err = n.buildComponent(ctx, telemetrySettings, set.BuildInfo, set.ProcessorBuilder, g.nextConsumers(n.ID())[0]) + err = n.buildComponent(ctx, set.Telemetry, set.BuildInfo, set.ProcessorBuilder, g.nextConsumers(n.ID())[0]) case *exporterNode: - err = n.buildComponent(ctx, telemetrySettings, set.BuildInfo, set.ExporterBuilder) + err = n.buildComponent(ctx, set.Telemetry, set.BuildInfo, set.ExporterBuilder) case *connectorNode: - err = n.buildComponent(ctx, telemetrySettings, set.BuildInfo, set.ConnectorBuilder, g.nextConsumers(n.ID())) + err = n.buildComponent(ctx, set.Telemetry, set.BuildInfo, set.ConnectorBuilder, g.nextConsumers(n.ID())) case *capabilitiesNode: capability := consumer.Capabilities{ // The fanOutNode represents the aggregate capabilities of the exporters in the pipeline. MutatesData: g.pipelines[n.pipelineID].fanOutNode.getConsumer().Capabilities().MutatesData, } for _, proc := range g.pipelines[n.pipelineID].processors { - capability.MutatesData = capability.MutatesData || proc.getConsumer().Capabilities().MutatesData + capability.MutatesData = capability.MutatesData || proc.(*processorNode).getConsumer().Capabilities().MutatesData } next := g.nextConsumers(n.ID())[0] - switch n.pipelineID.Type() { - case component.DataTypeTraces: + switch n.pipelineID.Signal() { + case pipeline.SignalTraces: cc := capabilityconsumer.NewTraces(next.(consumer.Traces), capability) n.baseConsumer = cc n.ConsumeTracesFunc = cc.ConsumeTraces - case component.DataTypeMetrics: + case pipeline.SignalMetrics: cc := capabilityconsumer.NewMetrics(next.(consumer.Metrics), capability) n.baseConsumer = cc n.ConsumeMetricsFunc = cc.ConsumeMetrics - case component.DataTypeLogs: + case pipeline.SignalLogs: cc := capabilityconsumer.NewLogs(next.(consumer.Logs), capability) n.baseConsumer = cc n.ConsumeLogsFunc = cc.ConsumeLogs + case xpipeline.SignalProfiles: + cc := capabilityconsumer.NewProfiles(next.(xconsumer.Profiles), capability) + n.baseConsumer = cc + n.ConsumeProfilesFunc = cc.ConsumeProfiles } case *fanOutNode: nexts := g.nextConsumers(n.ID()) - switch n.pipelineID.Type() { - case component.DataTypeTraces: + switch n.pipelineID.Signal() { + case pipeline.SignalTraces: consumers := make([]consumer.Traces, 0, len(nexts)) for _, next := range nexts { consumers = append(consumers, next.(consumer.Traces)) } n.baseConsumer = fanoutconsumer.NewTraces(consumers) - case component.DataTypeMetrics: + case pipeline.SignalMetrics: consumers := make([]consumer.Metrics, 0, len(nexts)) for _, next := range nexts { consumers = append(consumers, next.(consumer.Metrics)) } n.baseConsumer = fanoutconsumer.NewMetrics(consumers) - case component.DataTypeLogs: + case pipeline.SignalLogs: consumers := make([]consumer.Logs, 0, len(nexts)) for _, next := range nexts { consumers = append(consumers, next.(consumer.Logs)) } n.baseConsumer = fanoutconsumer.NewLogs(consumers) + case xpipeline.SignalProfiles: + consumers := make([]xconsumer.Profiles, 0, len(nexts)) + for _, next := range nexts { + consumers = append(consumers, next.(xconsumer.Profiles)) + } + n.baseConsumer = fanoutconsumer.NewProfiles(consumers) } } if err != nil { @@ -385,7 +379,7 @@ type pipelineNodes struct { *capabilitiesNode // The order of processors is very important. Therefore use a slice for processors. - processors []*processorNode + processors []graph.Node // Emits to exporters. *fanOutNode @@ -394,7 +388,11 @@ type pipelineNodes struct { exporters map[int64]graph.Node } -func (g *Graph) StartAll(ctx context.Context, host component.Host) error { +func (g *Graph) StartAll(ctx context.Context, host *Host) error { + if host == nil { + return errors.New("host cannot be nil") + } + nodes, err := topo.Sort(g.componentGraph) if err != nil { return err @@ -413,25 +411,32 @@ func (g *Graph) StartAll(ctx context.Context, host component.Host) error { } instanceID := g.instanceIDs[node.ID()] - g.telemetry.Status.ReportStatus( + host.Reporter.ReportStatus( instanceID, - component.NewStatusEvent(component.StatusStarting), + componentstatus.NewEvent(componentstatus.StatusStarting), ) - if compErr := comp.Start(ctx, host); compErr != nil { - g.telemetry.Status.ReportStatus( + if compErr := comp.Start(ctx, &HostWrapper{Host: host, InstanceID: instanceID}); compErr != nil { + host.Reporter.ReportStatus( instanceID, - component.NewPermanentErrorEvent(compErr), + componentstatus.NewPermanentErrorEvent(compErr), ) + // We log with zap.AddStacktrace(zap.DPanicLevel) to avoid adding the stack trace to the error log + g.telemetry.Logger.WithOptions(zap.AddStacktrace(zap.DPanicLevel)). + Error("Failed to start component", + zap.Error(compErr), + zap.String("type", instanceID.Kind().String()), + zap.String("id", instanceID.ComponentID().String()), + ) return compErr } - g.telemetry.Status.ReportOKIfStarting(instanceID) + host.Reporter.ReportOKIfStarting(instanceID) } return nil } -func (g *Graph) ShutdownAll(ctx context.Context) error { +func (g *Graph) ShutdownAll(ctx context.Context, reporter status.Reporter) error { nodes, err := topo.Sort(g.componentGraph) if err != nil { return err @@ -452,39 +457,34 @@ func (g *Graph) ShutdownAll(ctx context.Context) error { } instanceID := g.instanceIDs[node.ID()] - g.telemetry.Status.ReportStatus( + reporter.ReportStatus( instanceID, - component.NewStatusEvent(component.StatusStopping), + componentstatus.NewEvent(componentstatus.StatusStopping), ) if compErr := comp.Shutdown(ctx); compErr != nil { errs = multierr.Append(errs, compErr) - g.telemetry.Status.ReportStatus( + reporter.ReportStatus( instanceID, - component.NewPermanentErrorEvent(compErr), + componentstatus.NewPermanentErrorEvent(compErr), ) continue } - g.telemetry.Status.ReportStatus( + reporter.ReportStatus( instanceID, - component.NewStatusEvent(component.StatusStopped), + componentstatus.NewEvent(componentstatus.StatusStopped), ) } return errs } -// Deprecated: [0.79.0] This function will be removed in the future. -// Several components in the contrib repository use this function so it cannot be removed -// before those cases are removed. In most cases, use of this function can be replaced by a -// connector. See https://github.com/open-telemetry/opentelemetry-collector/issues/7370 and -// https://github.com/open-telemetry/opentelemetry-collector/pull/7390#issuecomment-1483710184 -// for additional information. -func (g *Graph) GetExporters() map[component.DataType]map[component.ID]component.Component { - exportersMap := make(map[component.DataType]map[component.ID]component.Component) - exportersMap[component.DataTypeTraces] = make(map[component.ID]component.Component) - exportersMap[component.DataTypeMetrics] = make(map[component.ID]component.Component) - exportersMap[component.DataTypeLogs] = make(map[component.ID]component.Component) +func (g *Graph) GetExporters() map[pipeline.Signal]map[component.ID]component.Component { + exportersMap := make(map[pipeline.Signal]map[component.ID]component.Component) + exportersMap[pipeline.SignalTraces] = make(map[component.ID]component.Component) + exportersMap[pipeline.SignalMetrics] = make(map[component.ID]component.Component) + exportersMap[pipeline.SignalLogs] = make(map[component.ID]component.Component) + exportersMap[xpipeline.SignalProfiles] = make(map[component.ID]component.Component) for _, pg := range g.pipelines { for _, expNode := range pg.exporters { @@ -527,7 +527,7 @@ func cycleErr(err error, cycles [][]graph.Node) error { for _, node := range cycle { switch n := node.(type) { case *processorNode: - componentDetails = append(componentDetails, fmt.Sprintf("processor %q in pipeline %q", n.componentID, n.pipelineID)) + componentDetails = append(componentDetails, fmt.Sprintf("processor %q in pipeline %q", n.componentID, n.pipelineID.String())) case *connectorNode: componentDetails = append(componentDetails, fmt.Sprintf("connector %q (%s to %s)", n.componentID, n.exprPipelineType, n.rcvrPipelineType)) default: @@ -537,35 +537,83 @@ func cycleErr(err error, cycles [][]graph.Node) error { return fmt.Errorf("cycle detected: %s", strings.Join(componentDetails, " -> ")) } -func connectorStability(f connector.Factory, expType, recType component.Type) component.StabilityLevel { +func connectorStability(f connector.Factory, expType, recType pipeline.Signal) component.StabilityLevel { switch expType { - case component.DataTypeTraces: + case pipeline.SignalTraces: switch recType { - case component.DataTypeTraces: + case pipeline.SignalTraces: return f.TracesToTracesStability() - case component.DataTypeMetrics: + case pipeline.SignalMetrics: return f.TracesToMetricsStability() - case component.DataTypeLogs: + case pipeline.SignalLogs: return f.TracesToLogsStability() + case xpipeline.SignalProfiles: + fprof, ok := f.(xconnector.Factory) + if !ok { + return component.StabilityLevelUndefined + } + return fprof.TracesToProfilesStability() } - case component.DataTypeMetrics: + case pipeline.SignalMetrics: switch recType { - case component.DataTypeTraces: + case pipeline.SignalTraces: return f.MetricsToTracesStability() - case component.DataTypeMetrics: + case pipeline.SignalMetrics: return f.MetricsToMetricsStability() - case component.DataTypeLogs: + case pipeline.SignalLogs: return f.MetricsToLogsStability() + case xpipeline.SignalProfiles: + fprof, ok := f.(xconnector.Factory) + if !ok { + return component.StabilityLevelUndefined + } + return fprof.MetricsToProfilesStability() } - case component.DataTypeLogs: + case pipeline.SignalLogs: switch recType { - case component.DataTypeTraces: + case pipeline.SignalTraces: return f.LogsToTracesStability() - case component.DataTypeMetrics: + case pipeline.SignalMetrics: return f.LogsToMetricsStability() - case component.DataTypeLogs: + case pipeline.SignalLogs: return f.LogsToLogsStability() + case xpipeline.SignalProfiles: + fprof, ok := f.(xconnector.Factory) + if !ok { + return component.StabilityLevelUndefined + } + return fprof.LogsToProfilesStability() + } + case xpipeline.SignalProfiles: + fprof, ok := f.(xconnector.Factory) + if !ok { + return component.StabilityLevelUndefined + } + switch recType { + case pipeline.SignalTraces: + return fprof.ProfilesToTracesStability() + case pipeline.SignalMetrics: + return fprof.ProfilesToMetricsStability() + case pipeline.SignalLogs: + return fprof.ProfilesToLogsStability() + case xpipeline.SignalProfiles: + return fprof.ProfilesToProfilesStability() } } return component.StabilityLevelUndefined } + +var ( + _ getExporters = (*HostWrapper)(nil) + _ component.Host = (*HostWrapper)(nil) + _ componentstatus.Reporter = (*HostWrapper)(nil) +) + +type HostWrapper struct { + *Host + InstanceID *componentstatus.InstanceID +} + +func (host *HostWrapper) Report(event *componentstatus.Event) { + host.Reporter.ReportStatus(host.InstanceID, event) +} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/graph/host.go b/vendor/go.opentelemetry.io/collector/service/internal/graph/host.go new file mode 100644 index 00000000000..3aaef5e055f --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/internal/graph/host.go @@ -0,0 +1,167 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package graph // import "go.opentelemetry.io/collector/service/internal/graph" + +import ( + "net/http" + "path" + "runtime" + "time" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componentstatus" + "go.opentelemetry.io/collector/extension" + "go.opentelemetry.io/collector/featuregate" + "go.opentelemetry.io/collector/pipeline" + "go.opentelemetry.io/collector/service/extensions" + "go.opentelemetry.io/collector/service/internal/builders" + "go.opentelemetry.io/collector/service/internal/status" + "go.opentelemetry.io/collector/service/internal/zpages" +) + +// TODO: remove as part of https://github.com/open-telemetry/opentelemetry-collector/issues/7370 for service 1.0 +type getExporters interface { + GetExporters() map[pipeline.Signal]map[component.ID]component.Component +} + +var ( + _ getExporters = (*Host)(nil) + _ component.Host = (*Host)(nil) +) + +type Host struct { + AsyncErrorChannel chan error + Receivers *builders.ReceiverBuilder + Processors *builders.ProcessorBuilder + Exporters *builders.ExporterBuilder + Connectors *builders.ConnectorBuilder + Extensions *builders.ExtensionBuilder + + ModuleInfo extension.ModuleInfo + BuildInfo component.BuildInfo + + Pipelines *Graph + ServiceExtensions *extensions.Extensions + + Reporter status.Reporter +} + +func (host *Host) GetFactory(kind component.Kind, componentType component.Type) component.Factory { + switch kind { + case component.KindReceiver: + return host.Receivers.Factory(componentType) + case component.KindProcessor: + return host.Processors.Factory(componentType) + case component.KindExporter: + return host.Exporters.Factory(componentType) + case component.KindConnector: + return host.Connectors.Factory(componentType) + case component.KindExtension: + return host.Extensions.Factory(componentType) + } + return nil +} + +func (host *Host) GetExtensions() map[component.ID]component.Component { + return host.ServiceExtensions.GetExtensions() +} + +// Deprecated: [0.79.0] This function will be removed in the future. +// Several components in the contrib repository use this function so it cannot be removed +// before those cases are removed. In most cases, use of this function can be replaced by a +// connector. See https://github.com/open-telemetry/opentelemetry-collector/issues/7370 and +// https://github.com/open-telemetry/opentelemetry-collector/pull/7390#issuecomment-1483710184 +// for additional information. +func (host *Host) GetExporters() map[pipeline.Signal]map[component.ID]component.Component { + return host.Pipelines.GetExporters() +} + +func (host *Host) NotifyComponentStatusChange(source *componentstatus.InstanceID, event *componentstatus.Event) { + host.ServiceExtensions.NotifyComponentStatusChange(source, event) + if event.Status() == componentstatus.StatusFatalError { + host.AsyncErrorChannel <- event.Err() + } +} + +const ( + // Paths + zServicePath = "servicez" + zPipelinePath = "pipelinez" + zExtensionPath = "extensionz" + zFeaturePath = "featurez" +) + +// InfoVar is a singleton instance of the Info struct. +var runtimeInfoVar [][2]string + +func init() { + runtimeInfoVar = [][2]string{ + {"StartTimestamp", time.Now().String()}, + {"Go", runtime.Version()}, + {"OS", runtime.GOOS}, + {"Arch", runtime.GOARCH}, + // Add other valuable runtime information here. + } +} + +func (host *Host) RegisterZPages(mux *http.ServeMux, pathPrefix string) { + mux.HandleFunc(path.Join(pathPrefix, zServicePath), host.zPagesRequest) + mux.HandleFunc(path.Join(pathPrefix, zPipelinePath), host.Pipelines.HandleZPages) + mux.HandleFunc(path.Join(pathPrefix, zExtensionPath), host.ServiceExtensions.HandleZPages) + mux.HandleFunc(path.Join(pathPrefix, zFeaturePath), handleFeaturezRequest) +} + +func (host *Host) zPagesRequest(w http.ResponseWriter, _ *http.Request) { + w.Header().Set("Content-Type", "text/html; charset=utf-8") + zpages.WriteHTMLPageHeader(w, zpages.HeaderData{Title: "Service " + host.BuildInfo.Command}) + zpages.WriteHTMLPropertiesTable(w, zpages.PropertiesTableData{Name: "Build Info", Properties: getBuildInfoProperties(host.BuildInfo)}) + zpages.WriteHTMLPropertiesTable(w, zpages.PropertiesTableData{Name: "Runtime Info", Properties: runtimeInfoVar}) + zpages.WriteHTMLComponentHeader(w, zpages.ComponentHeaderData{ + Name: "Pipelines", + ComponentEndpoint: zPipelinePath, + Link: true, + }) + zpages.WriteHTMLComponentHeader(w, zpages.ComponentHeaderData{ + Name: "Extensions", + ComponentEndpoint: zExtensionPath, + Link: true, + }) + zpages.WriteHTMLComponentHeader(w, zpages.ComponentHeaderData{ + Name: "Features", + ComponentEndpoint: zFeaturePath, + Link: true, + }) + zpages.WriteHTMLPageFooter(w) +} + +func handleFeaturezRequest(w http.ResponseWriter, _ *http.Request) { + w.Header().Set("Content-Type", "text/html; charset=utf-8") + zpages.WriteHTMLPageHeader(w, zpages.HeaderData{Title: "Feature Gates"}) + zpages.WriteHTMLFeaturesTable(w, getFeaturesTableData()) + zpages.WriteHTMLPageFooter(w) +} + +func getFeaturesTableData() zpages.FeatureGateTableData { + data := zpages.FeatureGateTableData{} + featuregate.GlobalRegistry().VisitAll(func(gate *featuregate.Gate) { + data.Rows = append(data.Rows, zpages.FeatureGateTableRowData{ + ID: gate.ID(), + Enabled: gate.IsEnabled(), + Description: gate.Description(), + Stage: gate.Stage().String(), + FromVersion: gate.FromVersion(), + ToVersion: gate.ToVersion(), + ReferenceURL: gate.ReferenceURL(), + }) + }) + return data +} + +func getBuildInfoProperties(buildInfo component.BuildInfo) [][2]string { + return [][2]string{ + {"Command", buildInfo.Command}, + {"Description", buildInfo.Description}, + {"Version", buildInfo.Version}, + } +} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/graph/node.go b/vendor/go.opentelemetry.io/collector/service/internal/graph/node.go new file mode 100644 index 00000000000..0d1c329c68e --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/internal/graph/node.go @@ -0,0 +1,22 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package graph // import "go.opentelemetry.io/collector/service/internal/graph" + +import ( + "hash/fnv" + "strings" +) + +type nodeID int64 + +func (n nodeID) ID() int64 { + return int64(n) +} + +func newNodeID(parts ...string) nodeID { + h := fnv.New64a() + h.Write([]byte(strings.Join(parts, "|"))) + // nolint:gosec + return nodeID(h.Sum64()) +} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/graph/nodes.go b/vendor/go.opentelemetry.io/collector/service/internal/graph/nodes.go deleted file mode 100644 index 238bbf3b287..00000000000 --- a/vendor/go.opentelemetry.io/collector/service/internal/graph/nodes.go +++ /dev/null @@ -1,396 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package graph // import "go.opentelemetry.io/collector/service/internal/graph" - -import ( - "context" - "fmt" - "hash/fnv" - "strings" - - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/connector" - "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/exporter" - "go.opentelemetry.io/collector/internal/fanoutconsumer" - "go.opentelemetry.io/collector/processor" - "go.opentelemetry.io/collector/receiver" - "go.opentelemetry.io/collector/service/internal/capabilityconsumer" - "go.opentelemetry.io/collector/service/internal/components" -) - -const ( - receiverSeed = "receiver" - processorSeed = "processor" - exporterSeed = "exporter" - connectorSeed = "connector" - capabilitiesSeed = "capabilities" - fanOutToExporters = "fanout_to_exporters" -) - -// baseConsumer redeclared here since not public in consumer package. May consider to make that public. -type baseConsumer interface { - Capabilities() consumer.Capabilities -} - -type nodeID int64 - -func (n nodeID) ID() int64 { - return int64(n) -} - -func newNodeID(parts ...string) nodeID { - h := fnv.New64a() - h.Write([]byte(strings.Join(parts, "|"))) - return nodeID(h.Sum64()) -} - -type consumerNode interface { - getConsumer() baseConsumer -} - -// A receiver instance can be shared by multiple pipelines of the same type. -// Therefore, nodeID is derived from "pipeline type" and "component ID". -type receiverNode struct { - nodeID - componentID component.ID - pipelineType component.DataType - component.Component -} - -func newReceiverNode(pipelineType component.DataType, recvID component.ID) *receiverNode { - return &receiverNode{ - nodeID: newNodeID(receiverSeed, pipelineType.String(), recvID.String()), - componentID: recvID, - pipelineType: pipelineType, - } -} - -func (n *receiverNode) buildComponent(ctx context.Context, - tel component.TelemetrySettings, - info component.BuildInfo, - builder *receiver.Builder, - nexts []baseConsumer, -) error { - set := receiver.CreateSettings{ID: n.componentID, TelemetrySettings: tel, BuildInfo: info} - set.TelemetrySettings.Logger = components.ReceiverLogger(tel.Logger, n.componentID, n.pipelineType) - var err error - switch n.pipelineType { - case component.DataTypeTraces: - var consumers []consumer.Traces - for _, next := range nexts { - consumers = append(consumers, next.(consumer.Traces)) - } - n.Component, err = builder.CreateTraces(ctx, set, fanoutconsumer.NewTraces(consumers)) - case component.DataTypeMetrics: - var consumers []consumer.Metrics - for _, next := range nexts { - consumers = append(consumers, next.(consumer.Metrics)) - } - n.Component, err = builder.CreateMetrics(ctx, set, fanoutconsumer.NewMetrics(consumers)) - case component.DataTypeLogs: - var consumers []consumer.Logs - for _, next := range nexts { - consumers = append(consumers, next.(consumer.Logs)) - } - n.Component, err = builder.CreateLogs(ctx, set, fanoutconsumer.NewLogs(consumers)) - default: - return fmt.Errorf("error creating receiver %q for data type %q is not supported", set.ID, n.pipelineType) - } - if err != nil { - return fmt.Errorf("failed to create %q receiver for data type %q: %w", set.ID, n.pipelineType, err) - } - return nil -} - -var _ consumerNode = (*processorNode)(nil) - -// Every processor instance is unique to one pipeline. -// Therefore, nodeID is derived from "pipeline ID" and "component ID". -type processorNode struct { - nodeID - componentID component.ID - pipelineID component.ID - component.Component -} - -func newProcessorNode(pipelineID, procID component.ID) *processorNode { - return &processorNode{ - nodeID: newNodeID(processorSeed, pipelineID.String(), procID.String()), - componentID: procID, - pipelineID: pipelineID, - } -} - -func (n *processorNode) getConsumer() baseConsumer { - return n.Component.(baseConsumer) -} - -func (n *processorNode) buildComponent(ctx context.Context, - tel component.TelemetrySettings, - info component.BuildInfo, - builder *processor.Builder, - next baseConsumer, -) error { - set := processor.CreateSettings{ID: n.componentID, TelemetrySettings: tel, BuildInfo: info} - set.TelemetrySettings.Logger = components.ProcessorLogger(set.TelemetrySettings.Logger, n.componentID, n.pipelineID) - var err error - switch n.pipelineID.Type() { - case component.DataTypeTraces: - n.Component, err = builder.CreateTraces(ctx, set, next.(consumer.Traces)) - case component.DataTypeMetrics: - n.Component, err = builder.CreateMetrics(ctx, set, next.(consumer.Metrics)) - case component.DataTypeLogs: - n.Component, err = builder.CreateLogs(ctx, set, next.(consumer.Logs)) - default: - return fmt.Errorf("error creating processor %q in pipeline %q, data type %q is not supported", set.ID, n.pipelineID, n.pipelineID.Type()) - } - if err != nil { - return fmt.Errorf("failed to create %q processor, in pipeline %q: %w", set.ID, n.pipelineID, err) - } - return nil -} - -var _ consumerNode = (*exporterNode)(nil) - -// An exporter instance can be shared by multiple pipelines of the same type. -// Therefore, nodeID is derived from "pipeline type" and "component ID". -type exporterNode struct { - nodeID - componentID component.ID - pipelineType component.DataType - component.Component -} - -func newExporterNode(pipelineType component.DataType, exprID component.ID) *exporterNode { - return &exporterNode{ - nodeID: newNodeID(exporterSeed, pipelineType.String(), exprID.String()), - componentID: exprID, - pipelineType: pipelineType, - } -} - -func (n *exporterNode) getConsumer() baseConsumer { - return n.Component.(baseConsumer) -} - -func (n *exporterNode) buildComponent( - ctx context.Context, - tel component.TelemetrySettings, - info component.BuildInfo, - builder *exporter.Builder, -) error { - set := exporter.CreateSettings{ID: n.componentID, TelemetrySettings: tel, BuildInfo: info} - set.TelemetrySettings.Logger = components.ExporterLogger(set.TelemetrySettings.Logger, n.componentID, n.pipelineType) - var err error - switch n.pipelineType { - case component.DataTypeTraces: - n.Component, err = builder.CreateTraces(ctx, set) - case component.DataTypeMetrics: - n.Component, err = builder.CreateMetrics(ctx, set) - case component.DataTypeLogs: - n.Component, err = builder.CreateLogs(ctx, set) - default: - return fmt.Errorf("error creating exporter %q for data type %q is not supported", set.ID, n.pipelineType) - } - if err != nil { - return fmt.Errorf("failed to create %q exporter for data type %q: %w", set.ID, n.pipelineType, err) - } - return nil -} - -var _ consumerNode = (*connectorNode)(nil) - -// A connector instance connects one pipeline type to one other pipeline type. -// Therefore, nodeID is derived from "exporter pipeline type", "receiver pipeline type", and "component ID". -type connectorNode struct { - nodeID - componentID component.ID - exprPipelineType component.DataType - rcvrPipelineType component.DataType - component.Component - baseConsumer -} - -func newConnectorNode(exprPipelineType, rcvrPipelineType component.DataType, connID component.ID) *connectorNode { - return &connectorNode{ - nodeID: newNodeID(connectorSeed, connID.String(), exprPipelineType.String(), rcvrPipelineType.String()), - componentID: connID, - exprPipelineType: exprPipelineType, - rcvrPipelineType: rcvrPipelineType, - } -} - -func (n *connectorNode) getConsumer() baseConsumer { - return n.baseConsumer -} - -func (n *connectorNode) buildComponent( - ctx context.Context, - tel component.TelemetrySettings, - info component.BuildInfo, - builder *connector.Builder, - nexts []baseConsumer, -) error { - set := connector.CreateSettings{ID: n.componentID, TelemetrySettings: tel, BuildInfo: info} - set.TelemetrySettings.Logger = components.ConnectorLogger(set.TelemetrySettings.Logger, n.componentID, n.exprPipelineType, n.rcvrPipelineType) - - switch n.rcvrPipelineType { - case component.DataTypeTraces: - capability := consumer.Capabilities{MutatesData: false} - consumers := make(map[component.ID]consumer.Traces, len(nexts)) - for _, next := range nexts { - consumers[next.(*capabilitiesNode).pipelineID] = next.(consumer.Traces) - capability.MutatesData = capability.MutatesData || next.Capabilities().MutatesData - } - next := connector.NewTracesRouter(consumers) - - switch n.exprPipelineType { - case component.DataTypeTraces: - conn, err := builder.CreateTracesToTraces(ctx, set, next) - if err != nil { - return err - } - n.Component = conn - // When connecting pipelines of the same data type, the connector must - // inherit the capabilities of pipelines in which it is acting as a receiver. - // Since the incoming and outgoing data types are the same, we must also consider - // that the connector itself may MutatesData. - capability.MutatesData = capability.MutatesData || conn.Capabilities().MutatesData - n.baseConsumer = capabilityconsumer.NewTraces(conn, capability) - case component.DataTypeMetrics: - conn, err := builder.CreateMetricsToTraces(ctx, set, next) - if err != nil { - return err - } - n.Component, n.baseConsumer = conn, conn - case component.DataTypeLogs: - conn, err := builder.CreateLogsToTraces(ctx, set, next) - if err != nil { - return err - } - n.Component, n.baseConsumer = conn, conn - } - - case component.DataTypeMetrics: - capability := consumer.Capabilities{MutatesData: false} - consumers := make(map[component.ID]consumer.Metrics, len(nexts)) - for _, next := range nexts { - consumers[next.(*capabilitiesNode).pipelineID] = next.(consumer.Metrics) - capability.MutatesData = capability.MutatesData || next.Capabilities().MutatesData - } - next := connector.NewMetricsRouter(consumers) - - switch n.exprPipelineType { - case component.DataTypeTraces: - conn, err := builder.CreateTracesToMetrics(ctx, set, next) - if err != nil { - return err - } - n.Component, n.baseConsumer = conn, conn - case component.DataTypeMetrics: - conn, err := builder.CreateMetricsToMetrics(ctx, set, next) - if err != nil { - return err - } - n.Component = conn - // When connecting pipelines of the same data type, the connector must - // inherit the capabilities of pipelines in which it is acting as a receiver. - // Since the incoming and outgoing data types are the same, we must also consider - // that the connector itself may MutatesData. - capability.MutatesData = capability.MutatesData || conn.Capabilities().MutatesData - n.baseConsumer = capabilityconsumer.NewMetrics(conn, capability) - case component.DataTypeLogs: - conn, err := builder.CreateLogsToMetrics(ctx, set, next) - if err != nil { - return err - } - n.Component, n.baseConsumer = conn, conn - } - case component.DataTypeLogs: - capability := consumer.Capabilities{MutatesData: false} - consumers := make(map[component.ID]consumer.Logs, len(nexts)) - for _, next := range nexts { - consumers[next.(*capabilitiesNode).pipelineID] = next.(consumer.Logs) - capability.MutatesData = capability.MutatesData || next.Capabilities().MutatesData - } - next := connector.NewLogsRouter(consumers) - - switch n.exprPipelineType { - case component.DataTypeTraces: - conn, err := builder.CreateTracesToLogs(ctx, set, next) - if err != nil { - return err - } - n.Component, n.baseConsumer = conn, conn - case component.DataTypeMetrics: - conn, err := builder.CreateMetricsToLogs(ctx, set, next) - if err != nil { - return err - } - n.Component, n.baseConsumer = conn, conn - case component.DataTypeLogs: - conn, err := builder.CreateLogsToLogs(ctx, set, next) - if err != nil { - return err - } - n.Component = conn - // When connecting pipelines of the same data type, the connector must - // inherit the capabilities of pipelines in which it is acting as a receiver. - // Since the incoming and outgoing data types are the same, we must also consider - // that the connector itself may MutatesData. - capability.MutatesData = capability.MutatesData || conn.Capabilities().MutatesData - n.baseConsumer = capabilityconsumer.NewLogs(conn, capability) - } - } - return nil -} - -var _ consumerNode = (*capabilitiesNode)(nil) - -// Every pipeline has a "virtual" capabilities node immediately after the receiver(s). -// There are two purposes for this node: -// 1. Present aggregated capabilities to receivers, such as whether the pipeline mutates data. -// 2. Present a consistent "first consumer" for each pipeline. -// The nodeID is derived from "pipeline ID". -type capabilitiesNode struct { - nodeID - pipelineID component.ID - baseConsumer - consumer.ConsumeTracesFunc - consumer.ConsumeMetricsFunc - consumer.ConsumeLogsFunc -} - -func newCapabilitiesNode(pipelineID component.ID) *capabilitiesNode { - return &capabilitiesNode{ - nodeID: newNodeID(capabilitiesSeed, pipelineID.String()), - pipelineID: pipelineID, - } -} - -func (n *capabilitiesNode) getConsumer() baseConsumer { - return n -} - -var _ consumerNode = (*fanOutNode)(nil) - -// Each pipeline has one fan-out node before exporters. -// Therefore, nodeID is derived from "pipeline ID". -type fanOutNode struct { - nodeID - pipelineID component.ID - baseConsumer -} - -func newFanOutNode(pipelineID component.ID) *fanOutNode { - return &fanOutNode{ - nodeID: newNodeID(fanOutToExporters, pipelineID.String()), - pipelineID: pipelineID, - } -} - -func (n *fanOutNode) getConsumer() baseConsumer { - return n.baseConsumer -} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/graph/processor.go b/vendor/go.opentelemetry.io/collector/service/internal/graph/processor.go new file mode 100644 index 00000000000..3288a505d80 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/internal/graph/processor.go @@ -0,0 +1,70 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package graph // import "go.opentelemetry.io/collector/service/internal/graph" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/xconsumer" + "go.opentelemetry.io/collector/pipeline" + "go.opentelemetry.io/collector/pipeline/xpipeline" + "go.opentelemetry.io/collector/processor" + "go.opentelemetry.io/collector/service/internal/builders" + "go.opentelemetry.io/collector/service/internal/components" +) + +const processorSeed = "processor" + +var _ consumerNode = (*processorNode)(nil) + +// Every processor instance is unique to one pipeline. +// Therefore, nodeID is derived from "pipeline ID" and "component ID". +type processorNode struct { + nodeID + componentID component.ID + pipelineID pipeline.ID + component.Component +} + +func newProcessorNode(pipelineID pipeline.ID, procID component.ID) *processorNode { + return &processorNode{ + nodeID: newNodeID(processorSeed, pipelineID.String(), procID.String()), + componentID: procID, + pipelineID: pipelineID, + } +} + +func (n *processorNode) getConsumer() baseConsumer { + return n.Component.(baseConsumer) +} + +func (n *processorNode) buildComponent(ctx context.Context, + tel component.TelemetrySettings, + info component.BuildInfo, + builder *builders.ProcessorBuilder, + next baseConsumer, +) error { + tel.Logger = components.ProcessorLogger(tel.Logger, n.componentID, n.pipelineID) + set := processor.Settings{ID: n.componentID, TelemetrySettings: tel, BuildInfo: info} + var err error + switch n.pipelineID.Signal() { + case pipeline.SignalTraces: + n.Component, err = builder.CreateTraces(ctx, set, next.(consumer.Traces)) + case pipeline.SignalMetrics: + n.Component, err = builder.CreateMetrics(ctx, set, next.(consumer.Metrics)) + case pipeline.SignalLogs: + n.Component, err = builder.CreateLogs(ctx, set, next.(consumer.Logs)) + case xpipeline.SignalProfiles: + n.Component, err = builder.CreateProfiles(ctx, set, next.(xconsumer.Profiles)) + default: + return fmt.Errorf("error creating processor %q in pipeline %q, data type %q is not supported", set.ID, n.pipelineID.String(), n.pipelineID.Signal()) + } + if err != nil { + return fmt.Errorf("failed to create %q processor, in pipeline %q: %w", set.ID, n.pipelineID.String(), err) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/graph/receiver.go b/vendor/go.opentelemetry.io/collector/service/internal/graph/receiver.go new file mode 100644 index 00000000000..48f7a36d148 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/internal/graph/receiver.go @@ -0,0 +1,81 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package graph // import "go.opentelemetry.io/collector/service/internal/graph" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/xconsumer" + "go.opentelemetry.io/collector/internal/fanoutconsumer" + "go.opentelemetry.io/collector/pipeline" + "go.opentelemetry.io/collector/pipeline/xpipeline" + "go.opentelemetry.io/collector/receiver" + "go.opentelemetry.io/collector/service/internal/builders" + "go.opentelemetry.io/collector/service/internal/components" +) + +const receiverSeed = "receiver" + +// A receiver instance can be shared by multiple pipelines of the same type. +// Therefore, nodeID is derived from "pipeline type" and "component ID". +type receiverNode struct { + nodeID + componentID component.ID + pipelineType pipeline.Signal + component.Component +} + +func newReceiverNode(pipelineType pipeline.Signal, recvID component.ID) *receiverNode { + return &receiverNode{ + nodeID: newNodeID(receiverSeed, pipelineType.String(), recvID.String()), + componentID: recvID, + pipelineType: pipelineType, + } +} + +func (n *receiverNode) buildComponent(ctx context.Context, + tel component.TelemetrySettings, + info component.BuildInfo, + builder *builders.ReceiverBuilder, + nexts []baseConsumer, +) error { + tel.Logger = components.ReceiverLogger(tel.Logger, n.componentID, n.pipelineType) + set := receiver.Settings{ID: n.componentID, TelemetrySettings: tel, BuildInfo: info} + var err error + switch n.pipelineType { + case pipeline.SignalTraces: + var consumers []consumer.Traces + for _, next := range nexts { + consumers = append(consumers, next.(consumer.Traces)) + } + n.Component, err = builder.CreateTraces(ctx, set, fanoutconsumer.NewTraces(consumers)) + case pipeline.SignalMetrics: + var consumers []consumer.Metrics + for _, next := range nexts { + consumers = append(consumers, next.(consumer.Metrics)) + } + n.Component, err = builder.CreateMetrics(ctx, set, fanoutconsumer.NewMetrics(consumers)) + case pipeline.SignalLogs: + var consumers []consumer.Logs + for _, next := range nexts { + consumers = append(consumers, next.(consumer.Logs)) + } + n.Component, err = builder.CreateLogs(ctx, set, fanoutconsumer.NewLogs(consumers)) + case xpipeline.SignalProfiles: + var consumers []xconsumer.Profiles + for _, next := range nexts { + consumers = append(consumers, next.(xconsumer.Profiles)) + } + n.Component, err = builder.CreateProfiles(ctx, set, fanoutconsumer.NewProfiles(consumers)) + default: + return fmt.Errorf("error creating receiver %q for data type %q is not supported", set.ID, n.pipelineType) + } + if err != nil { + return fmt.Errorf("failed to create %q receiver for data type %q: %w", set.ID, n.pipelineType, err) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/graph/zpages.go b/vendor/go.opentelemetry.io/collector/service/internal/graph/zpages.go index 0f2793974e9..e4b9b5869a5 100644 --- a/vendor/go.opentelemetry.io/collector/service/internal/graph/zpages.go +++ b/vendor/go.opentelemetry.io/collector/service/internal/graph/zpages.go @@ -41,7 +41,7 @@ func (g *Graph) HandleZPages(w http.ResponseWriter, r *http.Request) { } procIDs := make([]string, 0, len(p.processors)) for _, c := range p.processors { - procIDs = append(procIDs, c.componentID.String()) + procIDs = append(procIDs, c.(*processorNode).componentID.String()) } exprIDs := make([]string, 0, len(p.exporters)) for _, c := range p.exporters { @@ -55,7 +55,7 @@ func (g *Graph) HandleZPages(w http.ResponseWriter, r *http.Request) { sumData.Rows = append(sumData.Rows, zpages.SummaryPipelinesTableRowData{ FullName: c.String(), - InputType: c.Type().String(), + InputType: c.Signal().String(), MutatesData: p.capabilitiesNode.getConsumer().Capabilities().MutatesData, Receivers: recvIDs, Processors: procIDs, diff --git a/vendor/go.opentelemetry.io/collector/service/internal/metadata/generated_telemetry.go b/vendor/go.opentelemetry.io/collector/service/internal/metadata/generated_telemetry.go new file mode 100644 index 00000000000..a180db3c3f0 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/internal/metadata/generated_telemetry.go @@ -0,0 +1,179 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "context" + "errors" + + "go.opentelemetry.io/otel/metric" + noopmetric "go.opentelemetry.io/otel/metric/noop" + "go.opentelemetry.io/otel/trace" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configtelemetry" +) + +func Meter(settings component.TelemetrySettings) metric.Meter { + return settings.MeterProvider.Meter("go.opentelemetry.io/collector/service") +} + +func Tracer(settings component.TelemetrySettings) trace.Tracer { + return settings.TracerProvider.Tracer("go.opentelemetry.io/collector/service") +} + +// TelemetryBuilder provides an interface for components to report telemetry +// as defined in metadata and user config. +type TelemetryBuilder struct { + meter metric.Meter + ProcessCPUSeconds metric.Float64ObservableCounter + observeProcessCPUSeconds func(context.Context, metric.Observer) error + ProcessMemoryRss metric.Int64ObservableGauge + observeProcessMemoryRss func(context.Context, metric.Observer) error + ProcessRuntimeHeapAllocBytes metric.Int64ObservableGauge + observeProcessRuntimeHeapAllocBytes func(context.Context, metric.Observer) error + ProcessRuntimeTotalAllocBytes metric.Int64ObservableCounter + observeProcessRuntimeTotalAllocBytes func(context.Context, metric.Observer) error + ProcessRuntimeTotalSysMemoryBytes metric.Int64ObservableGauge + observeProcessRuntimeTotalSysMemoryBytes func(context.Context, metric.Observer) error + ProcessUptime metric.Float64ObservableCounter + observeProcessUptime func(context.Context, metric.Observer) error +} + +// TelemetryBuilderOption applies changes to default builder. +type TelemetryBuilderOption interface { + apply(*TelemetryBuilder) +} + +type telemetryBuilderOptionFunc func(mb *TelemetryBuilder) + +func (tbof telemetryBuilderOptionFunc) apply(mb *TelemetryBuilder) { + tbof(mb) +} + +// WithProcessCPUSecondsCallback sets callback for observable ProcessCPUSeconds metric. +func WithProcessCPUSecondsCallback(cb func() float64, opts ...metric.ObserveOption) TelemetryBuilderOption { + return telemetryBuilderOptionFunc(func(builder *TelemetryBuilder) { + builder.observeProcessCPUSeconds = func(_ context.Context, o metric.Observer) error { + o.ObserveFloat64(builder.ProcessCPUSeconds, cb(), opts...) + return nil + } + }) +} + +// WithProcessMemoryRssCallback sets callback for observable ProcessMemoryRss metric. +func WithProcessMemoryRssCallback(cb func() int64, opts ...metric.ObserveOption) TelemetryBuilderOption { + return telemetryBuilderOptionFunc(func(builder *TelemetryBuilder) { + builder.observeProcessMemoryRss = func(_ context.Context, o metric.Observer) error { + o.ObserveInt64(builder.ProcessMemoryRss, cb(), opts...) + return nil + } + }) +} + +// WithProcessRuntimeHeapAllocBytesCallback sets callback for observable ProcessRuntimeHeapAllocBytes metric. +func WithProcessRuntimeHeapAllocBytesCallback(cb func() int64, opts ...metric.ObserveOption) TelemetryBuilderOption { + return telemetryBuilderOptionFunc(func(builder *TelemetryBuilder) { + builder.observeProcessRuntimeHeapAllocBytes = func(_ context.Context, o metric.Observer) error { + o.ObserveInt64(builder.ProcessRuntimeHeapAllocBytes, cb(), opts...) + return nil + } + }) +} + +// WithProcessRuntimeTotalAllocBytesCallback sets callback for observable ProcessRuntimeTotalAllocBytes metric. +func WithProcessRuntimeTotalAllocBytesCallback(cb func() int64, opts ...metric.ObserveOption) TelemetryBuilderOption { + return telemetryBuilderOptionFunc(func(builder *TelemetryBuilder) { + builder.observeProcessRuntimeTotalAllocBytes = func(_ context.Context, o metric.Observer) error { + o.ObserveInt64(builder.ProcessRuntimeTotalAllocBytes, cb(), opts...) + return nil + } + }) +} + +// WithProcessRuntimeTotalSysMemoryBytesCallback sets callback for observable ProcessRuntimeTotalSysMemoryBytes metric. +func WithProcessRuntimeTotalSysMemoryBytesCallback(cb func() int64, opts ...metric.ObserveOption) TelemetryBuilderOption { + return telemetryBuilderOptionFunc(func(builder *TelemetryBuilder) { + builder.observeProcessRuntimeTotalSysMemoryBytes = func(_ context.Context, o metric.Observer) error { + o.ObserveInt64(builder.ProcessRuntimeTotalSysMemoryBytes, cb(), opts...) + return nil + } + }) +} + +// WithProcessUptimeCallback sets callback for observable ProcessUptime metric. +func WithProcessUptimeCallback(cb func() float64, opts ...metric.ObserveOption) TelemetryBuilderOption { + return telemetryBuilderOptionFunc(func(builder *TelemetryBuilder) { + builder.observeProcessUptime = func(_ context.Context, o metric.Observer) error { + o.ObserveFloat64(builder.ProcessUptime, cb(), opts...) + return nil + } + }) +} + +// NewTelemetryBuilder provides a struct with methods to update all internal telemetry +// for a component +func NewTelemetryBuilder(settings component.TelemetrySettings, options ...TelemetryBuilderOption) (*TelemetryBuilder, error) { + builder := TelemetryBuilder{} + for _, op := range options { + op.apply(&builder) + } + builder.meter = Meter(settings) + var err, errs error + builder.ProcessCPUSeconds, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Float64ObservableCounter( + "otelcol_process_cpu_seconds", + metric.WithDescription("Total CPU user and system time in seconds [alpha]"), + metric.WithUnit("s"), + ) + errs = errors.Join(errs, err) + _, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).RegisterCallback(builder.observeProcessCPUSeconds, builder.ProcessCPUSeconds) + errs = errors.Join(errs, err) + builder.ProcessMemoryRss, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64ObservableGauge( + "otelcol_process_memory_rss", + metric.WithDescription("Total physical memory (resident set size) [alpha]"), + metric.WithUnit("By"), + ) + errs = errors.Join(errs, err) + _, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).RegisterCallback(builder.observeProcessMemoryRss, builder.ProcessMemoryRss) + errs = errors.Join(errs, err) + builder.ProcessRuntimeHeapAllocBytes, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64ObservableGauge( + "otelcol_process_runtime_heap_alloc_bytes", + metric.WithDescription("Bytes of allocated heap objects (see 'go doc runtime.MemStats.HeapAlloc') [alpha]"), + metric.WithUnit("By"), + ) + errs = errors.Join(errs, err) + _, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).RegisterCallback(builder.observeProcessRuntimeHeapAllocBytes, builder.ProcessRuntimeHeapAllocBytes) + errs = errors.Join(errs, err) + builder.ProcessRuntimeTotalAllocBytes, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64ObservableCounter( + "otelcol_process_runtime_total_alloc_bytes", + metric.WithDescription("Cumulative bytes allocated for heap objects (see 'go doc runtime.MemStats.TotalAlloc') [alpha]"), + metric.WithUnit("By"), + ) + errs = errors.Join(errs, err) + _, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).RegisterCallback(builder.observeProcessRuntimeTotalAllocBytes, builder.ProcessRuntimeTotalAllocBytes) + errs = errors.Join(errs, err) + builder.ProcessRuntimeTotalSysMemoryBytes, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64ObservableGauge( + "otelcol_process_runtime_total_sys_memory_bytes", + metric.WithDescription("Total bytes of memory obtained from the OS (see 'go doc runtime.MemStats.Sys') [alpha]"), + metric.WithUnit("By"), + ) + errs = errors.Join(errs, err) + _, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).RegisterCallback(builder.observeProcessRuntimeTotalSysMemoryBytes, builder.ProcessRuntimeTotalSysMemoryBytes) + errs = errors.Join(errs, err) + builder.ProcessUptime, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Float64ObservableCounter( + "otelcol_process_uptime", + metric.WithDescription("Uptime of the process [alpha]"), + metric.WithUnit("s"), + ) + errs = errors.Join(errs, err) + _, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).RegisterCallback(builder.observeProcessUptime, builder.ProcessUptime) + errs = errors.Join(errs, err) + return &builder, errs +} + +func getLeveledMeter(meter metric.Meter, cfgLevel, srvLevel configtelemetry.Level) metric.Meter { + if cfgLevel <= srvLevel { + return meter + } + return noopmetric.Meter{} +} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/proctelemetry/config.go b/vendor/go.opentelemetry.io/collector/service/internal/proctelemetry/config.go deleted file mode 100644 index 0a9a8b08d82..00000000000 --- a/vendor/go.opentelemetry.io/collector/service/internal/proctelemetry/config.go +++ /dev/null @@ -1,296 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package proctelemetry // import "go.opentelemetry.io/collector/service/internal/proctelemetry" - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "net/http" - "net/url" - "os" - "strings" - "time" - - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp" - "go.opentelemetry.io/contrib/config" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/bridge/opencensus" - "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" - "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" - otelprom "go.opentelemetry.io/otel/exporters/prometheus" - "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric" - "go.opentelemetry.io/otel/sdk/instrumentation" - sdkmetric "go.opentelemetry.io/otel/sdk/metric" - "go.opentelemetry.io/otel/sdk/resource" - - "go.opentelemetry.io/collector/processor/processorhelper" - semconv "go.opentelemetry.io/collector/semconv/v1.18.0" -) - -const ( - - // gRPC Instrumentation Name - GRPCInstrumentation = "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" - - // http Instrumentation Name - HTTPInstrumentation = "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" - - // supported protocols - protocolProtobufHTTP = "http/protobuf" - protocolProtobufGRPC = "grpc/protobuf" -) - -var ( - // GRPCUnacceptableKeyValues is a list of high cardinality grpc attributes that should be filtered out. - GRPCUnacceptableKeyValues = []attribute.KeyValue{ - attribute.String(semconv.AttributeNetSockPeerAddr, ""), - attribute.String(semconv.AttributeNetSockPeerPort, ""), - attribute.String(semconv.AttributeNetSockPeerName, ""), - } - - // HTTPUnacceptableKeyValues is a list of high cardinality http attributes that should be filtered out. - HTTPUnacceptableKeyValues = []attribute.KeyValue{ - attribute.String(semconv.AttributeNetHostName, ""), - attribute.String(semconv.AttributeNetHostPort, ""), - } - - errNoValidMetricExporter = errors.New("no valid metric exporter") -) - -func InitMetricReader(ctx context.Context, reader config.MetricReader, asyncErrorChannel chan error) (sdkmetric.Reader, *http.Server, error) { - if reader.Pull != nil { - return initPullExporter(reader.Pull.Exporter, asyncErrorChannel) - } - if reader.Periodic != nil { - opts := []sdkmetric.PeriodicReaderOption{ - sdkmetric.WithProducer(opencensus.NewMetricProducer()), - } - if reader.Periodic.Interval != nil { - opts = append(opts, sdkmetric.WithInterval(time.Duration(*reader.Periodic.Interval)*time.Millisecond)) - } - - if reader.Periodic.Timeout != nil { - opts = append(opts, sdkmetric.WithTimeout(time.Duration(*reader.Periodic.Timeout)*time.Millisecond)) - } - return initPeriodicExporter(ctx, reader.Periodic.Exporter, opts...) - } - return nil, nil, fmt.Errorf("unsupported metric reader type %v", reader) -} - -func InitOpenTelemetry(res *resource.Resource, options []sdkmetric.Option, disableHighCardinality bool) (*sdkmetric.MeterProvider, error) { - opts := []sdkmetric.Option{ - sdkmetric.WithResource(res), - sdkmetric.WithView(batchViews(disableHighCardinality)...), - } - - opts = append(opts, options...) - return sdkmetric.NewMeterProvider( - opts..., - ), nil -} - -func InitPrometheusServer(registry *prometheus.Registry, address string, asyncErrorChannel chan error) *http.Server { - mux := http.NewServeMux() - mux.Handle("/metrics", promhttp.HandlerFor(registry, promhttp.HandlerOpts{})) - server := &http.Server{ - Addr: address, - Handler: mux, - } - go func() { - if serveErr := server.ListenAndServe(); serveErr != nil && !errors.Is(serveErr, http.ErrServerClosed) { - asyncErrorChannel <- serveErr - } - }() - return server -} - -func batchViews(disableHighCardinality bool) []sdkmetric.View { - views := []sdkmetric.View{ - sdkmetric.NewView( - sdkmetric.Instrument{Name: processorhelper.BuildCustomMetricName("batch", "batch_send_size")}, - sdkmetric.Stream{Aggregation: sdkmetric.AggregationExplicitBucketHistogram{ - Boundaries: []float64{10, 25, 50, 75, 100, 250, 500, 750, 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000, 20000, 30000, 50000, 100000}, - }}, - ), - sdkmetric.NewView( - sdkmetric.Instrument{Name: processorhelper.BuildCustomMetricName("batch", "batch_send_size_bytes")}, - sdkmetric.Stream{Aggregation: sdkmetric.AggregationExplicitBucketHistogram{ - Boundaries: []float64{10, 25, 50, 75, 100, 250, 500, 750, 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000, 20000, 30000, 50000, - 100_000, 200_000, 300_000, 400_000, 500_000, 600_000, 700_000, 800_000, 900_000, - 1000_000, 2000_000, 3000_000, 4000_000, 5000_000, 6000_000, 7000_000, 8000_000, 9000_000}, - }}, - ), - } - if disableHighCardinality { - views = append(views, sdkmetric.NewView(sdkmetric.Instrument{ - Scope: instrumentation.Scope{ - Name: GRPCInstrumentation, - }, - }, sdkmetric.Stream{ - AttributeFilter: cardinalityFilter(GRPCUnacceptableKeyValues...), - })) - views = append(views, sdkmetric.NewView(sdkmetric.Instrument{ - Scope: instrumentation.Scope{ - Name: HTTPInstrumentation, - }, - }, sdkmetric.Stream{ - AttributeFilter: cardinalityFilter(HTTPUnacceptableKeyValues...), - })) - } - return views -} - -func cardinalityFilter(kvs ...attribute.KeyValue) attribute.Filter { - filter := attribute.NewSet(kvs...) - return func(kv attribute.KeyValue) bool { - return !filter.HasValue(kv.Key) - } -} - -func initPrometheusExporter(prometheusConfig *config.Prometheus, asyncErrorChannel chan error) (sdkmetric.Reader, *http.Server, error) { - promRegistry := prometheus.NewRegistry() - if prometheusConfig.Host == nil { - return nil, nil, fmt.Errorf("host must be specified") - } - if prometheusConfig.Port == nil { - return nil, nil, fmt.Errorf("port must be specified") - } - exporter, err := otelprom.New( - otelprom.WithRegisterer(promRegistry), - // https://github.com/open-telemetry/opentelemetry-collector/issues/8043 - otelprom.WithoutUnits(), - // Disabled for the moment until this becomes stable, and we are ready to break backwards compatibility. - otelprom.WithoutScopeInfo(), - otelprom.WithProducer(opencensus.NewMetricProducer()), - // This allows us to produce metrics that are backwards compatible w/ opencensus - otelprom.WithoutCounterSuffixes(), - otelprom.WithNamespace("otelcol"), - otelprom.WithResourceAsConstantLabels(attribute.NewDenyKeysFilter()), - ) - if err != nil { - return nil, nil, fmt.Errorf("error creating otel prometheus exporter: %w", err) - } - - return exporter, InitPrometheusServer(promRegistry, fmt.Sprintf("%s:%d", *prometheusConfig.Host, *prometheusConfig.Port), asyncErrorChannel), nil -} - -func initPullExporter(exporter config.MetricExporter, asyncErrorChannel chan error) (sdkmetric.Reader, *http.Server, error) { - if exporter.Prometheus != nil { - return initPrometheusExporter(exporter.Prometheus, asyncErrorChannel) - } - return nil, nil, errNoValidMetricExporter -} - -func initPeriodicExporter(ctx context.Context, exporter config.MetricExporter, opts ...sdkmetric.PeriodicReaderOption) (sdkmetric.Reader, *http.Server, error) { - if exporter.Console != nil { - enc := json.NewEncoder(os.Stdout) - enc.SetIndent("", " ") - - exp, err := stdoutmetric.New( - stdoutmetric.WithEncoder(enc), - ) - if err != nil { - return nil, nil, err - } - return sdkmetric.NewPeriodicReader(exp, opts...), nil, nil - } - if exporter.OTLP != nil { - var err error - var exp sdkmetric.Exporter - switch exporter.OTLP.Protocol { - case protocolProtobufHTTP: - exp, err = initOTLPHTTPExporter(ctx, exporter.OTLP) - case protocolProtobufGRPC: - exp, err = initOTLPgRPCExporter(ctx, exporter.OTLP) - default: - return nil, nil, fmt.Errorf("unsupported protocol %s", exporter.OTLP.Protocol) - } - if err != nil { - return nil, nil, err - } - return sdkmetric.NewPeriodicReader(exp, opts...), nil, nil - } - return nil, nil, errNoValidMetricExporter -} - -func normalizeEndpoint(endpoint string) string { - if !strings.HasPrefix(endpoint, "https://") && !strings.HasPrefix(endpoint, "http://") { - return fmt.Sprintf("http://%s", endpoint) - } - return endpoint -} - -func initOTLPgRPCExporter(ctx context.Context, otlpConfig *config.OTLPMetric) (sdkmetric.Exporter, error) { - opts := []otlpmetricgrpc.Option{} - - if len(otlpConfig.Endpoint) > 0 { - u, err := url.ParseRequestURI(normalizeEndpoint(otlpConfig.Endpoint)) - if err != nil { - return nil, err - } - opts = append(opts, otlpmetricgrpc.WithEndpoint(u.Host)) - if u.Scheme == "http" { - opts = append(opts, otlpmetricgrpc.WithInsecure()) - } - } - - if otlpConfig.Compression != nil { - switch *otlpConfig.Compression { - case "gzip": - opts = append(opts, otlpmetricgrpc.WithCompressor(*otlpConfig.Compression)) - case "none": - break - default: - return nil, fmt.Errorf("unsupported compression %q", *otlpConfig.Compression) - } - } - if otlpConfig.Timeout != nil { - opts = append(opts, otlpmetricgrpc.WithTimeout(time.Millisecond*time.Duration(*otlpConfig.Timeout))) - } - if len(otlpConfig.Headers) > 0 { - opts = append(opts, otlpmetricgrpc.WithHeaders(otlpConfig.Headers)) - } - - return otlpmetricgrpc.New(ctx, opts...) -} - -func initOTLPHTTPExporter(ctx context.Context, otlpConfig *config.OTLPMetric) (sdkmetric.Exporter, error) { - opts := []otlpmetrichttp.Option{} - - if len(otlpConfig.Endpoint) > 0 { - u, err := url.ParseRequestURI(normalizeEndpoint(otlpConfig.Endpoint)) - if err != nil { - return nil, err - } - opts = append(opts, otlpmetrichttp.WithEndpoint(u.Host)) - - if u.Scheme == "http" { - opts = append(opts, otlpmetrichttp.WithInsecure()) - } - if len(u.Path) > 0 { - opts = append(opts, otlpmetrichttp.WithURLPath(u.Path)) - } - } - if otlpConfig.Compression != nil { - switch *otlpConfig.Compression { - case "gzip": - opts = append(opts, otlpmetrichttp.WithCompression(otlpmetrichttp.GzipCompression)) - case "none": - opts = append(opts, otlpmetrichttp.WithCompression(otlpmetrichttp.NoCompression)) - default: - return nil, fmt.Errorf("unsupported compression %q", *otlpConfig.Compression) - } - } - if otlpConfig.Timeout != nil { - opts = append(opts, otlpmetrichttp.WithTimeout(time.Millisecond*time.Duration(*otlpConfig.Timeout))) - } - if len(otlpConfig.Headers) > 0 { - opts = append(opts, otlpmetrichttp.WithHeaders(otlpConfig.Headers)) - } - - return otlpmetrichttp.New(ctx, opts...) -} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/proctelemetry/process_telemetry.go b/vendor/go.opentelemetry.io/collector/service/internal/proctelemetry/process_telemetry.go index 991897f8b1b..de60d4518ed 100644 --- a/vendor/go.opentelemetry.io/collector/service/internal/proctelemetry/process_telemetry.go +++ b/vendor/go.opentelemetry.io/collector/service/internal/proctelemetry/process_telemetry.go @@ -10,31 +10,19 @@ import ( "sync" "time" - "github.com/shirou/gopsutil/v3/common" - "github.com/shirou/gopsutil/v3/process" - otelmetric "go.opentelemetry.io/otel/metric" - "go.uber.org/multierr" -) + "github.com/shirou/gopsutil/v4/common" + "github.com/shirou/gopsutil/v4/process" -const ( - scopeName = "go.opentelemetry.io/collector/service/process_telemetry" - processNameKey = "process_name" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/service/internal/metadata" ) // processMetrics is a struct that contains views related to process metrics (cpu, mem, etc) type processMetrics struct { startTimeUnixNano int64 - ballastSizeBytes uint64 proc *process.Process context context.Context - otelProcessUptime otelmetric.Float64ObservableCounter - otelAllocMem otelmetric.Int64ObservableGauge - otelTotalAllocMem otelmetric.Int64ObservableCounter - otelSysMem otelmetric.Int64ObservableGauge - otelCPUSeconds otelmetric.Float64ObservableCounter - otelRSSMemory otelmetric.Int64ObservableGauge - // mu protects everything bellow. mu sync.Mutex lastMsRead time.Time @@ -64,7 +52,7 @@ func WithHostProc(hostProc string) RegisterOption { // RegisterProcessMetrics creates a new set of processMetrics (mem, cpu) that can be used to measure // basic information about this process. -func RegisterProcessMetrics(mp otelmetric.MeterProvider, ballastSizeBytes uint64, opts ...RegisterOption) error { +func RegisterProcessMetrics(cfg component.TelemetrySettings, opts ...RegisterOption) error { set := registerOption{} for _, opt := range opts { opt.apply(&set) @@ -72,7 +60,6 @@ func RegisterProcessMetrics(mp otelmetric.MeterProvider, ballastSizeBytes uint64 var err error pm := &processMetrics{ startTimeUnixNano: time.Now().UnixNano(), - ballastSizeBytes: ballastSizeBytes, ms: &runtime.MemStats{}, } @@ -81,78 +68,21 @@ func RegisterProcessMetrics(mp otelmetric.MeterProvider, ballastSizeBytes uint64 ctx = context.WithValue(ctx, common.EnvKey, common.EnvMap{common.HostProcEnvKey: set.hostProc}) } pm.context = ctx + // nolint:gosec pm.proc, err = process.NewProcessWithContext(pm.context, int32(os.Getpid())) if err != nil { return err } - return pm.record(mp.Meter(scopeName)) -} - -func (pm *processMetrics) record(meter otelmetric.Meter) error { - var errs, err error - - pm.otelProcessUptime, err = meter.Float64ObservableCounter( - "process_uptime", - otelmetric.WithDescription("Uptime of the process"), - otelmetric.WithUnit("s"), - otelmetric.WithFloat64Callback(func(_ context.Context, o otelmetric.Float64Observer) error { - o.Observe(pm.updateProcessUptime()) - return nil - })) - errs = multierr.Append(errs, err) - - pm.otelAllocMem, err = meter.Int64ObservableGauge( - "process_runtime_heap_alloc_bytes", - otelmetric.WithDescription("Bytes of allocated heap objects (see 'go doc runtime.MemStats.HeapAlloc')"), - otelmetric.WithUnit("By"), - otelmetric.WithInt64Callback(func(_ context.Context, o otelmetric.Int64Observer) error { - o.Observe(pm.updateAllocMem()) - return nil - })) - errs = multierr.Append(errs, err) - - pm.otelTotalAllocMem, err = meter.Int64ObservableCounter( - "process_runtime_total_alloc_bytes", - otelmetric.WithDescription("Cumulative bytes allocated for heap objects (see 'go doc runtime.MemStats.TotalAlloc')"), - otelmetric.WithUnit("By"), - otelmetric.WithInt64Callback(func(_ context.Context, o otelmetric.Int64Observer) error { - o.Observe(pm.updateTotalAllocMem()) - return nil - })) - errs = multierr.Append(errs, err) - - pm.otelSysMem, err = meter.Int64ObservableGauge( - "process_runtime_total_sys_memory_bytes", - otelmetric.WithDescription("Total bytes of memory obtained from the OS (see 'go doc runtime.MemStats.Sys')"), - otelmetric.WithUnit("By"), - otelmetric.WithInt64Callback(func(_ context.Context, o otelmetric.Int64Observer) error { - o.Observe(pm.updateSysMem()) - return nil - })) - errs = multierr.Append(errs, err) - - pm.otelCPUSeconds, err = meter.Float64ObservableCounter( - "process_cpu_seconds", - otelmetric.WithDescription("Total CPU user and system time in seconds"), - otelmetric.WithUnit("s"), - otelmetric.WithFloat64Callback(func(_ context.Context, o otelmetric.Float64Observer) error { - o.Observe(pm.updateCPUSeconds()) - return nil - })) - errs = multierr.Append(errs, err) - - pm.otelRSSMemory, err = meter.Int64ObservableGauge( - "process_memory_rss", - otelmetric.WithDescription("Total physical memory (resident set size)"), - otelmetric.WithUnit("By"), - otelmetric.WithInt64Callback(func(_ context.Context, o otelmetric.Int64Observer) error { - o.Observe(pm.updateRSSMemory()) - return nil - })) - errs = multierr.Append(errs, err) - - return errs + _, err = metadata.NewTelemetryBuilder(cfg, + metadata.WithProcessUptimeCallback(pm.updateProcessUptime), + metadata.WithProcessRuntimeHeapAllocBytesCallback(pm.updateAllocMem), + metadata.WithProcessRuntimeTotalAllocBytesCallback(pm.updateTotalAllocMem), + metadata.WithProcessRuntimeTotalSysMemoryBytesCallback(pm.updateSysMem), + metadata.WithProcessCPUSecondsCallback(pm.updateCPUSeconds), + metadata.WithProcessMemoryRssCallback(pm.updateRSSMemory), + ) + return err } func (pm *processMetrics) updateProcessUptime() float64 { @@ -164,6 +94,7 @@ func (pm *processMetrics) updateAllocMem() int64 { pm.mu.Lock() defer pm.mu.Unlock() pm.readMemStatsIfNeeded() + // nolint:gosec return int64(pm.ms.Alloc) } @@ -171,6 +102,7 @@ func (pm *processMetrics) updateTotalAllocMem() int64 { pm.mu.Lock() defer pm.mu.Unlock() pm.readMemStatsIfNeeded() + // nolint:gosec return int64(pm.ms.TotalAlloc) } @@ -178,6 +110,7 @@ func (pm *processMetrics) updateSysMem() int64 { pm.mu.Lock() defer pm.mu.Unlock() pm.readMemStatsIfNeeded() + // nolint:gosec return int64(pm.ms.Sys) } @@ -196,6 +129,7 @@ func (pm *processMetrics) updateRSSMemory() int64 { if err != nil { return 0 } + // nolint:gosec return int64(mem.RSS) } @@ -207,10 +141,4 @@ func (pm *processMetrics) readMemStatsIfNeeded() { } pm.lastMsRead = now runtime.ReadMemStats(pm.ms) - if pm.ballastSizeBytes > 0 { - pm.ms.Alloc -= pm.ballastSizeBytes - pm.ms.HeapAlloc -= pm.ballastSizeBytes - pm.ms.HeapSys -= pm.ballastSizeBytes - pm.ms.HeapInuse -= pm.ballastSizeBytes - } } diff --git a/vendor/go.opentelemetry.io/collector/service/internal/servicetelemetry/nop_telemetry_settings.go b/vendor/go.opentelemetry.io/collector/service/internal/servicetelemetry/nop_telemetry_settings.go deleted file mode 100644 index 116554a51f8..00000000000 --- a/vendor/go.opentelemetry.io/collector/service/internal/servicetelemetry/nop_telemetry_settings.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package servicetelemetry // import "go.opentelemetry.io/collector/service/internal/servicetelemetry" - -import ( - noopmetric "go.opentelemetry.io/otel/metric/noop" - nooptrace "go.opentelemetry.io/otel/trace/noop" - "go.uber.org/zap" - - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/config/configtelemetry" - "go.opentelemetry.io/collector/pdata/pcommon" - "go.opentelemetry.io/collector/service/internal/status" -) - -// NewNopTelemetrySettings returns a new nop settings for Create* functions. -func NewNopTelemetrySettings() TelemetrySettings { - return TelemetrySettings{ - Logger: zap.NewNop(), - TracerProvider: nooptrace.NewTracerProvider(), - MeterProvider: noopmetric.NewMeterProvider(), - MetricsLevel: configtelemetry.LevelNone, - Resource: pcommon.NewResource(), - Status: status.NewReporter(func(*component.InstanceID, *component.StatusEvent) {}, func(error) {}), - } -} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/servicetelemetry/telemetry_settings.go b/vendor/go.opentelemetry.io/collector/service/internal/servicetelemetry/telemetry_settings.go deleted file mode 100644 index 55c6b9f3962..00000000000 --- a/vendor/go.opentelemetry.io/collector/service/internal/servicetelemetry/telemetry_settings.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package servicetelemetry // import "go.opentelemetry.io/collector/service/internal/servicetelemetry" - -import ( - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" - - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/config/configtelemetry" - "go.opentelemetry.io/collector/pdata/pcommon" - "go.opentelemetry.io/collector/service/internal/status" -) - -// TelemetrySettings mirrors component.TelemetrySettings except for the mechanism for reporting -// status. Service-level status reporting has additional methods which can report status for -// components by their InstanceID whereas the component versions are tied to a specific component. -type TelemetrySettings struct { - // Logger that the factory can use during creation and can pass to the created - // component to be used later as well. - Logger *zap.Logger - - // TracerProvider that the factory can pass to other instrumented third-party libraries. - TracerProvider trace.TracerProvider - - // MeterProvider that the factory can pass to other instrumented third-party libraries. - MeterProvider metric.MeterProvider - - // MetricsLevel controls the level of detail for metrics emitted by the collector. - // Experimental: *NOTE* this field is experimental and may be changed or removed. - MetricsLevel configtelemetry.Level - - // Resource contains the resource attributes for the collector's telemetry. - Resource pcommon.Resource - - // Status contains a Reporter that allows the service to report status on behalf of a - // component. - Status *status.Reporter -} - -// ToComponentTelemetrySettings returns a TelemetrySettings for a specific component derived from -// this service level Settings object. -func (s TelemetrySettings) ToComponentTelemetrySettings(id *component.InstanceID) component.TelemetrySettings { - statusFunc := status.NewReportStatusFunc(id, s.Status.ReportStatus) - return component.TelemetrySettings{ - Logger: s.Logger, - TracerProvider: s.TracerProvider, - MeterProvider: s.MeterProvider, - MetricsLevel: s.MetricsLevel, - Resource: s.Resource, - ReportStatus: statusFunc, - } -} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/status/status.go b/vendor/go.opentelemetry.io/collector/service/internal/status/status.go index e1d524906e5..5a44a02665f 100644 --- a/vendor/go.opentelemetry.io/collector/service/internal/status/status.go +++ b/vendor/go.opentelemetry.io/collector/service/internal/status/status.go @@ -8,26 +8,26 @@ import ( "fmt" "sync" - "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componentstatus" ) -// onTransitionFunc receives a component.StatusEvent on a successful state transition -type onTransitionFunc func(*component.StatusEvent) +// onTransitionFunc receives a componentstatus.Event on a successful state transition +type onTransitionFunc func(*componentstatus.Event) // errInvalidStateTransition is returned for invalid state transitions var errInvalidStateTransition = errors.New("invalid state transition") // fsm is a finite state machine that models transitions for component status type fsm struct { - current *component.StatusEvent - transitions map[component.Status]map[component.Status]struct{} + current *componentstatus.Event + transitions map[componentstatus.Status]map[componentstatus.Status]struct{} onTransition onTransitionFunc } // transition will attempt to execute a state transition. If it's successful, it calls the -// onTransitionFunc with a StatusEvent representing the new state. Returns an error if the arguments +// onTransitionFunc with a Event representing the new state. Returns an error if the arguments // result in an invalid status, or if the state transition is not valid. -func (m *fsm) transition(ev *component.StatusEvent) error { +func (m *fsm) transition(ev *componentstatus.Event) error { if _, ok := m.transitions[m.current.Status()][ev.Status()]; !ok { return fmt.Errorf( "cannot transition from %s to %s: %w", @@ -41,134 +41,125 @@ func (m *fsm) transition(ev *component.StatusEvent) error { return nil } -// newFSM creates a state machine with all valid transitions for component.Status. -// The initial state is set to component.StatusNone. +// newFSM creates a state machine with all valid transitions for componentstatus.Status. +// The initial state is set to componentstatus.StatusNone. func newFSM(onTransition onTransitionFunc) *fsm { return &fsm{ - current: component.NewStatusEvent(component.StatusNone), + current: componentstatus.NewEvent(componentstatus.StatusNone), onTransition: onTransition, - transitions: map[component.Status]map[component.Status]struct{}{ - component.StatusNone: { - component.StatusStarting: {}, + transitions: map[componentstatus.Status]map[componentstatus.Status]struct{}{ + componentstatus.StatusNone: { + componentstatus.StatusStarting: {}, }, - component.StatusStarting: { - component.StatusOK: {}, - component.StatusRecoverableError: {}, - component.StatusPermanentError: {}, - component.StatusFatalError: {}, - component.StatusStopping: {}, + componentstatus.StatusStarting: { + componentstatus.StatusOK: {}, + componentstatus.StatusRecoverableError: {}, + componentstatus.StatusPermanentError: {}, + componentstatus.StatusFatalError: {}, + componentstatus.StatusStopping: {}, }, - component.StatusOK: { - component.StatusRecoverableError: {}, - component.StatusPermanentError: {}, - component.StatusFatalError: {}, - component.StatusStopping: {}, + componentstatus.StatusOK: { + componentstatus.StatusRecoverableError: {}, + componentstatus.StatusPermanentError: {}, + componentstatus.StatusFatalError: {}, + componentstatus.StatusStopping: {}, }, - component.StatusRecoverableError: { - component.StatusOK: {}, - component.StatusPermanentError: {}, - component.StatusFatalError: {}, - component.StatusStopping: {}, + componentstatus.StatusRecoverableError: { + componentstatus.StatusOK: {}, + componentstatus.StatusPermanentError: {}, + componentstatus.StatusFatalError: {}, + componentstatus.StatusStopping: {}, }, - component.StatusPermanentError: {}, - component.StatusFatalError: {}, - component.StatusStopping: { - component.StatusRecoverableError: {}, - component.StatusPermanentError: {}, - component.StatusFatalError: {}, - component.StatusStopped: {}, + componentstatus.StatusPermanentError: { + componentstatus.StatusStopping: {}, }, - component.StatusStopped: {}, + componentstatus.StatusFatalError: {}, + componentstatus.StatusStopping: { + componentstatus.StatusRecoverableError: {}, + componentstatus.StatusPermanentError: {}, + componentstatus.StatusFatalError: {}, + componentstatus.StatusStopped: {}, + }, + componentstatus.StatusStopped: {}, }, } } // NotifyStatusFunc is the receiver of status events after successful state transitions -type NotifyStatusFunc func(*component.InstanceID, *component.StatusEvent) +type NotifyStatusFunc func(*componentstatus.InstanceID, *componentstatus.Event) // InvalidTransitionFunc is the receiver of invalid transition errors type InvalidTransitionFunc func(error) -// ServiceStatusFunc is the expected type of ReportStatus for servicetelemetry.Settings -type ServiceStatusFunc func(*component.InstanceID, *component.StatusEvent) +// ServiceStatusFunc is the expected type of ReportStatus +type ServiceStatusFunc func(*componentstatus.InstanceID, *componentstatus.Event) // ErrStatusNotReady is returned when trying to report status before service start var ErrStatusNotReady = errors.New("report component status is not ready until service start") // Reporter handles component status reporting -type Reporter struct { +type Reporter interface { + ReportStatus(id *componentstatus.InstanceID, ev *componentstatus.Event) + ReportOKIfStarting(id *componentstatus.InstanceID) +} + +type reporter struct { mu sync.Mutex - ready bool - fsmMap map[*component.InstanceID]*fsm + fsmMap map[*componentstatus.InstanceID]*fsm onStatusChange NotifyStatusFunc onInvalidTransition InvalidTransitionFunc } // NewReporter returns a reporter that will invoke the NotifyStatusFunc when a component's status // has changed. -func NewReporter(onStatusChange NotifyStatusFunc, onInvalidTransition InvalidTransitionFunc) *Reporter { - return &Reporter{ - fsmMap: make(map[*component.InstanceID]*fsm), +func NewReporter(onStatusChange NotifyStatusFunc, onInvalidTransition InvalidTransitionFunc) Reporter { + return &reporter{ + fsmMap: make(map[*componentstatus.InstanceID]*fsm), onStatusChange: onStatusChange, onInvalidTransition: onInvalidTransition, } } -// Ready enables status reporting -func (r *Reporter) Ready() { - r.mu.Lock() - defer r.mu.Unlock() - r.ready = true -} - // ReportStatus reports status for the given InstanceID -func (r *Reporter) ReportStatus( - id *component.InstanceID, - ev *component.StatusEvent, +func (r *reporter) ReportStatus( + id *componentstatus.InstanceID, + ev *componentstatus.Event, ) { r.mu.Lock() defer r.mu.Unlock() - if !r.ready { - r.onInvalidTransition(ErrStatusNotReady) - } else { - if err := r.componentFSM(id).transition(ev); err != nil { - r.onInvalidTransition(err) - } + + if err := r.componentFSM(id).transition(ev); err != nil { + r.onInvalidTransition(err) } } -func (r *Reporter) ReportOKIfStarting(id *component.InstanceID) { +func (r *reporter) ReportOKIfStarting(id *componentstatus.InstanceID) { r.mu.Lock() defer r.mu.Unlock() - if !r.ready { - r.onInvalidTransition(ErrStatusNotReady) - } fsm := r.componentFSM(id) - if fsm.current.Status() == component.StatusStarting { - if err := fsm.transition(component.NewStatusEvent(component.StatusOK)); err != nil { + if fsm.current.Status() == componentstatus.StatusStarting { + if err := fsm.transition(componentstatus.NewEvent(componentstatus.StatusOK)); err != nil { r.onInvalidTransition(err) } } } // Note: a lock must be acquired before calling this method. -func (r *Reporter) componentFSM(id *component.InstanceID) *fsm { +func (r *reporter) componentFSM(id *componentstatus.InstanceID) *fsm { fsm, ok := r.fsmMap[id] if !ok { - fsm = newFSM(func(ev *component.StatusEvent) { r.onStatusChange(id, ev) }) + fsm = newFSM(func(ev *componentstatus.Event) { r.onStatusChange(id, ev) }) r.fsmMap[id] = fsm } return fsm } -// NewReportStatusFunc returns a function to be used as ReportStatus for -// component.TelemetrySettings, which differs from servicetelemetry.Settings in that -// the component version is tied to specific component instance. +// NewReportStatusFunc returns a function to be used as ReportStatus for componentstatus.TelemetrySettings func NewReportStatusFunc( - id *component.InstanceID, + id *componentstatus.InstanceID, srvStatus ServiceStatusFunc, -) func(*component.StatusEvent) { - return func(ev *component.StatusEvent) { +) func(*componentstatus.Event) { + return func(ev *componentstatus.Event) { srvStatus(id, ev) } } diff --git a/vendor/go.opentelemetry.io/collector/service/metadata.yaml b/vendor/go.opentelemetry.io/collector/service/metadata.yaml new file mode 100644 index 00000000000..a1531f7e382 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/metadata.yaml @@ -0,0 +1,73 @@ +type: service +github_project: open-telemetry/opentelemetry-collector + +status: + class: pkg + stability: + development: [traces, metrics, logs] + distributions: [core, contrib] + +telemetry: + metrics: + process_uptime: + enabled: true + stability: + level: alpha + description: Uptime of the process + unit: s + sum: + async: true + value_type: double + monotonic: true + + process_runtime_heap_alloc_bytes: + enabled: true + stability: + level: alpha + description: Bytes of allocated heap objects (see 'go doc runtime.MemStats.HeapAlloc') + unit: By + gauge: + async: true + value_type: int + + process_runtime_total_alloc_bytes: + enabled: true + stability: + level: alpha + description: Cumulative bytes allocated for heap objects (see 'go doc runtime.MemStats.TotalAlloc') + unit: By + sum: + async: true + value_type: int + monotonic: true + + process_runtime_total_sys_memory_bytes: + enabled: true + stability: + level: alpha + description: Total bytes of memory obtained from the OS (see 'go doc runtime.MemStats.Sys') + unit: By + gauge: + async: true + value_type: int + + process_cpu_seconds: + enabled: true + stability: + level: alpha + description: Total CPU user and system time in seconds + unit: s + sum: + async: true + value_type: double + monotonic: true + + process_memory_rss: + enabled: true + stability: + level: alpha + description: Total physical memory (resident set size) + unit: By + gauge: + async: true + value_type: int diff --git a/vendor/go.opentelemetry.io/collector/service/pipelines/config.go b/vendor/go.opentelemetry.io/collector/service/pipelines/config.go index ed8c77a31c0..025d3c8395a 100644 --- a/vendor/go.opentelemetry.io/collector/service/pipelines/config.go +++ b/vendor/go.opentelemetry.io/collector/service/pipelines/config.go @@ -8,16 +8,27 @@ import ( "fmt" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/featuregate" + "go.opentelemetry.io/collector/pipeline" + "go.opentelemetry.io/collector/pipeline/xpipeline" ) var ( errMissingServicePipelines = errors.New("service must have at least one pipeline") errMissingServicePipelineReceivers = errors.New("must have at least one receiver") errMissingServicePipelineExporters = errors.New("must have at least one exporter") + + serviceProfileSupportGateID = "service.profilesSupport" + serviceProfileSupportGate = featuregate.GlobalRegistry().MustRegister( + serviceProfileSupportGateID, + featuregate.StageAlpha, + featuregate.WithRegisterFromVersion("v0.112.0"), + featuregate.WithRegisterDescription("Controls whether profiles support can be enabled"), + ) ) // Config defines the configurable settings for service telemetry. -type Config map[component.ID]*PipelineConfig +type Config map[pipeline.ID]*PipelineConfig func (cfg Config) Validate() error { // Must have at least one pipeline. @@ -27,14 +38,25 @@ func (cfg Config) Validate() error { // Check that all pipelines have at least one receiver and one exporter, and they reference // only configured components. - for pipelineID, pipeline := range cfg { - if pipelineID.Type() != component.DataTypeTraces && pipelineID.Type() != component.DataTypeMetrics && pipelineID.Type() != component.DataTypeLogs { - return fmt.Errorf("pipeline %q: unknown datatype %q", pipelineID, pipelineID.Type()) + for pipelineID, p := range cfg { + switch pipelineID.Signal() { + case pipeline.SignalTraces, pipeline.SignalMetrics, pipeline.SignalLogs: + // Continue + case xpipeline.SignalProfiles: + if !serviceProfileSupportGate.IsEnabled() { + return fmt.Errorf( + "pipeline %q: profiling signal support is at alpha level, gated under the %q feature gate", + pipelineID.String(), + serviceProfileSupportGateID, + ) + } + default: + return fmt.Errorf("pipeline %q: unknown signal %q", pipelineID.String(), pipelineID.Signal()) } // Validate pipeline has at least one receiver. - if err := pipeline.Validate(); err != nil { - return fmt.Errorf("pipeline %q: %w", pipelineID, err) + if err := p.Validate(); err != nil { + return fmt.Errorf("pipeline %q: %w", pipelineID.String(), err) } } diff --git a/vendor/go.opentelemetry.io/collector/service/service.go b/vendor/go.opentelemetry.io/collector/service/service.go index 53ea3eebe65..cf4b3475229 100644 --- a/vendor/go.opentelemetry.io/collector/service/service.go +++ b/vendor/go.opentelemetry.io/collector/service/service.go @@ -1,6 +1,8 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +//go:generate mdatagen metadata.yaml + package service // import "go.opentelemetry.io/collector/service" import ( @@ -9,6 +11,8 @@ import ( "fmt" "runtime" + "go.opentelemetry.io/contrib/config" + "go.opentelemetry.io/otel/log" "go.opentelemetry.io/otel/metric" sdkresource "go.opentelemetry.io/otel/sdk/resource" "go.uber.org/multierr" @@ -20,20 +24,29 @@ import ( "go.opentelemetry.io/collector/connector" "go.opentelemetry.io/collector/exporter" "go.opentelemetry.io/collector/extension" - "go.opentelemetry.io/collector/internal/localhostgate" - "go.opentelemetry.io/collector/internal/obsreportconfig" + "go.opentelemetry.io/collector/featuregate" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/processor" "go.opentelemetry.io/collector/receiver" + semconv "go.opentelemetry.io/collector/semconv/v1.26.0" "go.opentelemetry.io/collector/service/extensions" + "go.opentelemetry.io/collector/service/internal/builders" "go.opentelemetry.io/collector/service/internal/graph" "go.opentelemetry.io/collector/service/internal/proctelemetry" "go.opentelemetry.io/collector/service/internal/resource" - "go.opentelemetry.io/collector/service/internal/servicetelemetry" "go.opentelemetry.io/collector/service/internal/status" "go.opentelemetry.io/collector/service/telemetry" ) +// useOtelWithSDKConfigurationForInternalTelemetryFeatureGate is the feature gate that controls whether the collector +// supports configuring the OpenTelemetry SDK via configuration +var _ = featuregate.GlobalRegistry().MustRegister( + "telemetry.useOtelWithSDKConfigurationForInternalTelemetry", + featuregate.StageStable, + featuregate.WithRegisterToVersion("v0.110.0"), + featuregate.WithRegisterDescription("controls whether the collector supports extended OpenTelemetry"+ + "configuration for internal telemetry")) + // Settings holds configuration for building a new Service. type Settings struct { // BuildInfo provides collector start information. @@ -42,20 +55,31 @@ type Settings struct { // CollectorConf contains the Collector's current configuration CollectorConf *confmap.Conf - // Receivers builder for receivers. - Receivers *receiver.Builder + // Receivers configuration to its builder. + ReceiversConfigs map[component.ID]component.Config + ReceiversFactories map[component.Type]receiver.Factory - // Processors builder for processors. - Processors *processor.Builder + // Processors configuration to its builder. + ProcessorsConfigs map[component.ID]component.Config + ProcessorsFactories map[component.Type]processor.Factory - // Exporters builder for exporters. - Exporters *exporter.Builder + // exporters configuration to its builder. + ExportersConfigs map[component.ID]component.Config + ExportersFactories map[component.Type]exporter.Factory - // Connectors builder for connectors. - Connectors *connector.Builder + // Connectors configuration to its builder. + ConnectorsConfigs map[component.ID]component.Config + ConnectorsFactories map[component.Type]connector.Factory // Extensions builder for extensions. - Extensions *extension.Builder + Extensions builders.Extension + + // Extensions configuration to its builder. + ExtensionsConfigs map[component.ID]component.Config + ExtensionsFactories map[component.Type]extension.Factory + + // ModuleInfo describes the go module for each component. + ModuleInfo extension.ModuleInfo // AsyncErrorChannel is the channel that is used to report fatal errors. AsyncErrorChannel chan error @@ -67,25 +91,26 @@ type Settings struct { // Service represents the implementation of a component.Host. type Service struct { buildInfo component.BuildInfo - telemetrySettings servicetelemetry.TelemetrySettings - host *serviceHost + telemetrySettings component.TelemetrySettings + host *graph.Host collectorConf *confmap.Conf + loggerProvider log.LoggerProvider } // New creates a new Service, its telemetry, and Components. func New(ctx context.Context, set Settings, cfg Config) (*Service, error) { - disableHighCard := obsreportconfig.DisableHighCardinalityMetricsfeatureGate.IsEnabled() - extendedConfig := obsreportconfig.UseOtelWithSDKConfigurationForInternalTelemetryFeatureGate.IsEnabled() srv := &Service{ buildInfo: set.BuildInfo, - host: &serviceHost{ - receivers: set.Receivers, - processors: set.Processors, - exporters: set.Exporters, - connectors: set.Connectors, - extensions: set.Extensions, - buildInfo: set.BuildInfo, - asyncErrorChannel: set.AsyncErrorChannel, + host: &graph.Host{ + Receivers: builders.NewReceiver(set.ReceiversConfigs, set.ReceiversFactories), + Processors: builders.NewProcessor(set.ProcessorsConfigs, set.ProcessorsFactories), + Exporters: builders.NewExporter(set.ExportersConfigs, set.ExportersFactories), + Connectors: builders.NewConnector(set.ConnectorsConfigs, set.ConnectorsFactories), + Extensions: builders.NewExtension(set.ExtensionsConfigs, set.ExtensionsFactories), + + ModuleInfo: set.ModuleInfo, + BuildInfo: set.BuildInfo, + AsyncErrorChannel: set.AsyncErrorChannel, }, collectorConf: set.CollectorConf, } @@ -94,16 +119,42 @@ func New(ctx context.Context, set Settings, cfg Config) (*Service, error) { res := resource.New(set.BuildInfo, cfg.Telemetry.Resource) pcommonRes := pdataFromSdk(res) + sch := semconv.SchemaURL + cfgRes := config.Resource{ + SchemaUrl: &sch, + Attributes: attributes(res, cfg.Telemetry), + } + + sdk, err := config.NewSDK( + config.WithContext(ctx), + config.WithOpenTelemetryConfiguration( + config.OpenTelemetryConfiguration{ + LoggerProvider: &config.LoggerProvider{ + Processors: cfg.Telemetry.Logs.Processors, + }, + TracerProvider: &config.TracerProvider{ + Processors: cfg.Telemetry.Traces.Processors, + }, + Resource: &cfgRes, + }, + ), + ) + if err != nil { + return nil, fmt.Errorf("failed to create SDK: %w", err) + } + telFactory := telemetry.NewFactory() telset := telemetry.Settings{ BuildInfo: set.BuildInfo, ZapOptions: set.LoggingOptions, + SDK: &sdk, } - logger, err := telFactory.CreateLogger(ctx, telset, &cfg.Telemetry) + logger, lp, err := telFactory.CreateLogger(ctx, telset, &cfg.Telemetry) if err != nil { return nil, fmt.Errorf("failed to create logger: %w", err) } + srv.loggerProvider = lp tracerProvider, err := telFactory.CreateTracerProvider(ctx, telset, &cfg.Telemetry) if err != nil { @@ -111,55 +162,54 @@ func New(ctx context.Context, set Settings, cfg Config) (*Service, error) { } logger.Info("Setting up own telemetry...") - mp, err := newMeterProvider( - meterProviderSettings{ - res: res, - cfg: cfg.Telemetry.Metrics, - asyncErrorChannel: set.AsyncErrorChannel, - }, - disableHighCard, - ) + + mp, err := telFactory.CreateMeterProvider(ctx, telset, &cfg.Telemetry) if err != nil { - return nil, fmt.Errorf("failed to create metric provider: %w", err) + return nil, fmt.Errorf("failed to create meter provider: %w", err) } - logsAboutMeterProvider(logger, cfg.Telemetry.Metrics, mp, extendedConfig) - srv.telemetrySettings = servicetelemetry.TelemetrySettings{ + logsAboutMeterProvider(logger, cfg.Telemetry.Metrics, mp) + srv.telemetrySettings = component.TelemetrySettings{ Logger: logger, MeterProvider: mp, TracerProvider: tracerProvider, MetricsLevel: cfg.Telemetry.Metrics.Level, // Construct telemetry attributes from build info and config's resource attributes. Resource: pcommonRes, - Status: status.NewReporter(srv.host.notifyComponentStatusChange, func(err error) { - if errors.Is(err, status.ErrStatusNotReady) { - logger.Warn("Invalid transition", zap.Error(err)) - } - // ignore other errors as they represent invalid state transitions and are considered benign. - }), + } + srv.host.Reporter = status.NewReporter(srv.host.NotifyComponentStatusChange, func(err error) { + if errors.Is(err, status.ErrStatusNotReady) { + logger.Warn("Invalid transition", zap.Error(err)) + } + // ignore other errors as they represent invalid state transitions and are considered benign. + }) + + if err = srv.initGraph(ctx, cfg); err != nil { + err = multierr.Append(err, srv.shutdownTelemetry(ctx)) + return nil, err } // process the configuration and initialize the pipeline - if err = srv.initExtensionsAndPipeline(ctx, set, cfg); err != nil { - // If pipeline initialization fails then shut down telemetry + if err = srv.initExtensions(ctx, cfg.Extensions); err != nil { err = multierr.Append(err, srv.shutdownTelemetry(ctx)) return nil, err } + if err = proctelemetry.RegisterProcessMetrics(srv.telemetrySettings); err != nil { + return nil, fmt.Errorf("failed to register process metrics: %w", err) + } + return srv, nil } -func logsAboutMeterProvider(logger *zap.Logger, cfg telemetry.MetricsConfig, mp metric.MeterProvider, extendedConfig bool) { - if cfg.Level == configtelemetry.LevelNone || (cfg.Address == "" && len(cfg.Readers) == 0) { - logger.Info( - "Skipped telemetry setup.", - zap.String(zapKeyTelemetryAddress, cfg.Address), - zap.Stringer(zapKeyTelemetryLevel, cfg.Level), - ) +func logsAboutMeterProvider(logger *zap.Logger, cfg telemetry.MetricsConfig, mp metric.MeterProvider) { + if cfg.Level == configtelemetry.LevelNone || len(cfg.Readers) == 0 { + logger.Info("Skipped telemetry setup.") return } - if len(cfg.Address) != 0 && extendedConfig { + //nolint + if len(cfg.Address) != 0 { logger.Warn("service::telemetry::metrics::address is being deprecated in favor of service::telemetry::metrics::readers") } @@ -182,29 +232,25 @@ func (srv *Service) Start(ctx context.Context) error { zap.Int("NumCPU", runtime.NumCPU()), ) - // enable status reporting - srv.telemetrySettings.Status.Ready() - - if err := srv.host.serviceExtensions.Start(ctx, srv.host); err != nil { + if err := srv.host.ServiceExtensions.Start(ctx, srv.host); err != nil { return fmt.Errorf("failed to start extensions: %w", err) } if srv.collectorConf != nil { - if err := srv.host.serviceExtensions.NotifyConfig(ctx, srv.collectorConf); err != nil { + if err := srv.host.ServiceExtensions.NotifyConfig(ctx, srv.collectorConf); err != nil { return err } } - if err := srv.host.pipelines.StartAll(ctx, srv.host); err != nil { + if err := srv.host.Pipelines.StartAll(ctx, srv.host); err != nil { return fmt.Errorf("cannot start pipelines: %w", err) } - if err := srv.host.serviceExtensions.NotifyPipelineReady(); err != nil { + if err := srv.host.ServiceExtensions.NotifyPipelineReady(); err != nil { return err } srv.telemetrySettings.Logger.Info("Everything is ready. Begin running and processing data.") - localhostgate.LogAboutUseLocalHostAsDefault(srv.telemetrySettings.Logger) return nil } @@ -227,6 +273,12 @@ func (srv *Service) shutdownTelemetry(ctx context.Context) error { err = multierr.Append(err, fmt.Errorf("failed to shutdown tracer provider: %w", shutdownErr)) } } + + if prov, ok := srv.loggerProvider.(shutdownable); ok { + if shutdownErr := prov.Shutdown(ctx); shutdownErr != nil { + err = multierr.Append(err, fmt.Errorf("failed to shutdown logger provider: %w", shutdownErr)) + } + } return err } @@ -242,15 +294,15 @@ func (srv *Service) Shutdown(ctx context.Context) error { // Begin shutdown sequence. srv.telemetrySettings.Logger.Info("Starting shutdown...") - if err := srv.host.serviceExtensions.NotifyPipelineNotReady(); err != nil { + if err := srv.host.ServiceExtensions.NotifyPipelineNotReady(); err != nil { errs = multierr.Append(errs, fmt.Errorf("failed to notify that pipeline is not ready: %w", err)) } - if err := srv.host.pipelines.ShutdownAll(ctx); err != nil { + if err := srv.host.Pipelines.ShutdownAll(ctx, srv.host.Reporter); err != nil { errs = multierr.Append(errs, fmt.Errorf("failed to shutdown pipelines: %w", err)) } - if err := srv.host.serviceExtensions.Shutdown(ctx); err != nil { + if err := srv.host.ServiceExtensions.Shutdown(ctx); err != nil { errs = multierr.Append(errs, fmt.Errorf("failed to shutdown extensions: %w", err)) } @@ -261,39 +313,36 @@ func (srv *Service) Shutdown(ctx context.Context) error { return errs } -// Creates extensions and then builds the pipeline graph. -func (srv *Service) initExtensionsAndPipeline(ctx context.Context, set Settings, cfg Config) error { +// Creates extensions. +func (srv *Service) initExtensions(ctx context.Context, cfg extensions.Config) error { var err error extensionsSettings := extensions.Settings{ Telemetry: srv.telemetrySettings, BuildInfo: srv.buildInfo, - Extensions: srv.host.extensions, + Extensions: srv.host.Extensions, + ModuleInfo: srv.host.ModuleInfo, } - if srv.host.serviceExtensions, err = extensions.New(ctx, extensionsSettings, cfg.Extensions); err != nil { + if srv.host.ServiceExtensions, err = extensions.New(ctx, extensionsSettings, cfg, extensions.WithReporter(srv.host.Reporter)); err != nil { return fmt.Errorf("failed to build extensions: %w", err) } + return nil +} - pSet := graph.Settings{ +// Creates the pipeline graph. +func (srv *Service) initGraph(ctx context.Context, cfg Config) error { + var err error + if srv.host.Pipelines, err = graph.Build(ctx, graph.Settings{ Telemetry: srv.telemetrySettings, BuildInfo: srv.buildInfo, - ReceiverBuilder: set.Receivers, - ProcessorBuilder: set.Processors, - ExporterBuilder: set.Exporters, - ConnectorBuilder: set.Connectors, + ReceiverBuilder: srv.host.Receivers, + ProcessorBuilder: srv.host.Processors, + ExporterBuilder: srv.host.Exporters, + ConnectorBuilder: srv.host.Connectors, PipelineConfigs: cfg.Pipelines, - } - - if srv.host.pipelines, err = graph.Build(ctx, pSet); err != nil { + ReportStatus: srv.host.Reporter.ReportStatus, + }); err != nil { return fmt.Errorf("failed to build pipelines: %w", err) } - - if cfg.Telemetry.Metrics.Level != configtelemetry.LevelNone && cfg.Telemetry.Metrics.Address != "" { - // The process telemetry initialization requires the ballast size, which is available after the extensions are initialized. - if err = proctelemetry.RegisterProcessMetrics(srv.telemetrySettings.MeterProvider, getBallastSize(srv.host)); err != nil { - return fmt.Errorf("failed to register process metrics: %w", err) - } - } - return nil } @@ -303,15 +352,6 @@ func (srv *Service) Logger() *zap.Logger { return srv.telemetrySettings.Logger } -func getBallastSize(host component.Host) uint64 { - for _, ext := range host.GetExtensions() { - if bExt, ok := ext.(interface{ GetBallastSize() uint64 }); ok { - return bExt.GetBallastSize() - } - } - return 0 -} - func pdataFromSdk(res *sdkresource.Resource) pcommon.Resource { // pcommon.NewResource is the best way to generate a new resource currently and is safe to use outside of tests. // Because the resource is signal agnostic, and we need a net new resource, not an existing one, this is the only diff --git a/vendor/go.opentelemetry.io/collector/service/telemetry.go b/vendor/go.opentelemetry.io/collector/service/telemetry.go deleted file mode 100644 index c2a67c7f16f..00000000000 --- a/vendor/go.opentelemetry.io/collector/service/telemetry.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package service // import "go.opentelemetry.io/collector/service" - -import ( - "context" - "net" - "net/http" - "strconv" - - "go.opentelemetry.io/contrib/config" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/metric/noop" - sdkmetric "go.opentelemetry.io/otel/sdk/metric" - "go.opentelemetry.io/otel/sdk/resource" - "go.uber.org/multierr" - "go.uber.org/zap" - - "go.opentelemetry.io/collector/config/configtelemetry" - "go.opentelemetry.io/collector/service/internal/proctelemetry" - "go.opentelemetry.io/collector/service/telemetry" -) - -const ( - zapKeyTelemetryAddress = "address" - zapKeyTelemetryLevel = "level" -) - -type meterProvider struct { - *sdkmetric.MeterProvider - servers []*http.Server -} - -type meterProviderSettings struct { - res *resource.Resource - cfg telemetry.MetricsConfig - asyncErrorChannel chan error -} - -func newMeterProvider(set meterProviderSettings, disableHighCardinality bool) (metric.MeterProvider, error) { - if set.cfg.Level == configtelemetry.LevelNone || (set.cfg.Address == "" && len(set.cfg.Readers) == 0) { - return noop.NewMeterProvider(), nil - } - - if len(set.cfg.Address) != 0 { - host, port, err := net.SplitHostPort(set.cfg.Address) - if err != nil { - return nil, err - } - portInt, err := strconv.Atoi(port) - if err != nil { - return nil, err - } - if set.cfg.Readers == nil { - set.cfg.Readers = []config.MetricReader{} - } - set.cfg.Readers = append(set.cfg.Readers, config.MetricReader{ - Pull: &config.PullMetricReader{ - Exporter: config.MetricExporter{ - Prometheus: &config.Prometheus{ - Host: &host, - Port: &portInt, - }, - }, - }, - }) - } - - mp := &meterProvider{} - var opts []sdkmetric.Option - for _, reader := range set.cfg.Readers { - // https://github.com/open-telemetry/opentelemetry-collector/issues/8045 - r, server, err := proctelemetry.InitMetricReader(context.Background(), reader, set.asyncErrorChannel) - if err != nil { - return nil, err - } - if server != nil { - mp.servers = append(mp.servers, server) - - } - opts = append(opts, sdkmetric.WithReader(r)) - } - - var err error - mp.MeterProvider, err = proctelemetry.InitOpenTelemetry(set.res, opts, disableHighCardinality) - if err != nil { - return nil, err - } - return mp, nil -} - -// LogAboutServers logs about the servers that are serving metrics. -func (mp *meterProvider) LogAboutServers(logger *zap.Logger, cfg telemetry.MetricsConfig) { - for _, server := range mp.servers { - logger.Info( - "Serving metrics", - zap.String(zapKeyTelemetryAddress, server.Addr), - zap.Stringer(zapKeyTelemetryLevel, cfg.Level), - ) - } -} - -// Shutdown the meter provider and all the associated resources. -// The type signature of this method matches that of the sdkmetric.MeterProvider. -func (mp *meterProvider) Shutdown(ctx context.Context) error { - var errs error - for _, server := range mp.servers { - if server != nil { - errs = multierr.Append(errs, server.Close()) - } - } - return multierr.Append(errs, mp.MeterProvider.Shutdown(ctx)) -} diff --git a/vendor/go.opentelemetry.io/collector/service/telemetry/config.go b/vendor/go.opentelemetry.io/collector/service/telemetry/config.go index 3336fcac578..e6d80dbd16c 100644 --- a/vendor/go.opentelemetry.io/collector/service/telemetry/config.go +++ b/vendor/go.opentelemetry.io/collector/service/telemetry/config.go @@ -4,15 +4,29 @@ package telemetry // import "go.opentelemetry.io/collector/service/telemetry" import ( + "errors" "fmt" + "net" + "strconv" "time" "go.opentelemetry.io/contrib/config" "go.uber.org/zap/zapcore" "go.opentelemetry.io/collector/config/configtelemetry" + "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/featuregate" ) +var _ confmap.Unmarshaler = (*Config)(nil) + +var disableAddressFieldForInternalTelemetryFeatureGate = featuregate.GlobalRegistry().MustRegister( + "telemetry.disableAddressFieldForInternalTelemetry", + featuregate.StageAlpha, + featuregate.WithRegisterFromVersion("v0.111.0"), + featuregate.WithRegisterToVersion("v0.114.0"), + featuregate.WithRegisterDescription("controls whether the deprecated address field for internal telemetry is still supported")) + // Config defines the configurable settings for service telemetry. type Config struct { Logs LogsConfig `mapstructure:"logs"` @@ -92,6 +106,10 @@ type LogsConfig struct { // // By default, there is no initial field. InitialFields map[string]any `mapstructure:"initial_fields"` + + // Processors allow configuration of log record processors to emit logs to + // any number of suported backends. + Processors []config.LogRecordProcessor `mapstructure:"processors"` } // LogsSamplingConfig sets a sampling strategy for the logger. Sampling caps the @@ -119,7 +137,7 @@ type MetricsConfig struct { // - "detailed" adds dimensions and views to the previous levels. Level configtelemetry.Level `mapstructure:"level"` - // Address is the [address]:port that metrics exposition should be bound to. + // Deprecated: [v0.111.0] use readers configuration. Address string `mapstructure:"address"` // Readers allow configuration of metric readers to emit metrics to @@ -130,20 +148,65 @@ type MetricsConfig struct { // TracesConfig exposes the common Telemetry configuration for collector's internal spans. // Experimental: *NOTE* this structure is subject to change or removal in the future. type TracesConfig struct { + // Level configures whether spans are emitted or not, the possible values are: + // - "none" indicates that no tracing data should be collected; + // - "basic" is the recommended and covers the basics of the service telemetry. + Level configtelemetry.Level `mapstructure:"level"` // Propagators is a list of TextMapPropagators from the supported propagators list. Currently, // tracecontext and b3 are supported. By default, the value is set to empty list and // context propagation is disabled. Propagators []string `mapstructure:"propagators"` // Processors allow configuration of span processors to emit spans to - // any number of suported backends. + // any number of supported backends. Processors []config.SpanProcessor `mapstructure:"processors"` } +func (c *Config) Unmarshal(conf *confmap.Conf) error { + if err := conf.Unmarshal(c); err != nil { + return err + } + + // If the support for "metrics::address" is disabled, nothing to do. + // TODO: when this gate is marked stable remove the whole Unmarshal definition. + if disableAddressFieldForInternalTelemetryFeatureGate.IsEnabled() { + return nil + } + + if len(c.Metrics.Address) != 0 { + host, port, err := net.SplitHostPort(c.Metrics.Address) + if err != nil { + return fmt.Errorf("failing to parse metrics address %q: %w", c.Metrics.Address, err) + } + portInt, err := strconv.Atoi(port) + if err != nil { + return fmt.Errorf("failing to extract the port from the metrics address %q: %w", c.Metrics.Address, err) + } + + // User did not overwrite readers, so we will remove the default configured reader. + if !conf.IsSet("metrics::readers") { + c.Metrics.Readers = nil + } + + c.Metrics.Readers = append(c.Metrics.Readers, config.MetricReader{ + Pull: &config.PullMetricReader{ + Exporter: config.MetricExporter{ + Prometheus: &config.Prometheus{ + Host: &host, + Port: &portInt, + }, + }, + }, + }) + } + + return nil +} + // Validate checks whether the current configuration is valid func (c *Config) Validate() error { - // Check when service telemetry metric level is not none, the metrics address should not be empty - if c.Metrics.Level != configtelemetry.LevelNone && c.Metrics.Address == "" && len(c.Metrics.Readers) == 0 { - return fmt.Errorf("collector telemetry metric address or reader should exist when metric level is not none") + // Check when service telemetry metric level is not none, the metrics readers should not be empty + if c.Metrics.Level != configtelemetry.LevelNone && len(c.Metrics.Readers) == 0 { + return errors.New("collector telemetry metrics reader should exist when metric level is not none") } return nil diff --git a/vendor/go.opentelemetry.io/collector/service/telemetry/factory.go b/vendor/go.opentelemetry.io/collector/service/telemetry/factory.go index c236d3c5733..af23cfcabae 100644 --- a/vendor/go.opentelemetry.io/collector/service/telemetry/factory.go +++ b/vendor/go.opentelemetry.io/collector/service/telemetry/factory.go @@ -7,16 +7,95 @@ import ( "context" "time" + "go.opentelemetry.io/contrib/config" + "go.opentelemetry.io/otel/log" + "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/trace" "go.uber.org/zap" "go.uber.org/zap/zapcore" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configtelemetry" - "go.opentelemetry.io/collector/service/telemetry/internal" + "go.opentelemetry.io/collector/featuregate" + "go.opentelemetry.io/collector/service/internal/resource" ) +var useLocalHostAsDefaultMetricsAddressFeatureGate = featuregate.GlobalRegistry().MustRegister( + "telemetry.UseLocalHostAsDefaultMetricsAddress", + featuregate.StageBeta, + featuregate.WithRegisterFromVersion("v0.111.0"), + featuregate.WithRegisterDescription("controls whether default Prometheus metrics server use localhost as the default host for their endpoints"), +) + +// disableHighCardinalityMetricsFeatureGate is the feature gate that controls whether the collector should enable +// potentially high cardinality metrics. The gate will be removed when the collector allows for view configuration. +var disableHighCardinalityMetricsFeatureGate = featuregate.GlobalRegistry().MustRegister( + "telemetry.disableHighCardinalityMetrics", + featuregate.StageAlpha, + featuregate.WithRegisterDescription("controls whether the collector should enable potentially high"+ + "cardinality metrics. The gate will be removed when the collector allows for view configuration.")) + +// Settings holds configuration for building Telemetry. +type Settings struct { + BuildInfo component.BuildInfo + AsyncErrorChannel chan error + ZapOptions []zap.Option + SDK *config.SDK +} + +// Factory is factory interface for telemetry. +// This interface cannot be directly implemented. Implementations must +// use the NewFactory to implement it. +type Factory interface { + // CreateDefaultConfig creates the default configuration for the telemetry. + // TODO: Should we just inherit from component.Factory? + CreateDefaultConfig() component.Config + + // CreateLogger creates a logger. + CreateLogger(ctx context.Context, set Settings, cfg component.Config) (*zap.Logger, log.LoggerProvider, error) + + // CreateTracerProvider creates a TracerProvider. + CreateTracerProvider(ctx context.Context, set Settings, cfg component.Config) (trace.TracerProvider, error) + + // CreateMeterProvider creates a MeterProvider. + CreateMeterProvider(ctx context.Context, set Settings, cfg component.Config) (metric.MeterProvider, error) + + // unexportedFactoryFunc is used to prevent external implementations of Factory. + unexportedFactoryFunc() +} + +// NewFactory creates a new Factory. +func NewFactory() Factory { + return newFactory(createDefaultConfig, + withLogger(func(_ context.Context, set Settings, cfg component.Config) (*zap.Logger, log.LoggerProvider, error) { + c := *cfg.(*Config) + return newLogger(set, c) + }), + withTracerProvider(func(_ context.Context, set Settings, cfg component.Config) (trace.TracerProvider, error) { + c := *cfg.(*Config) + return newTracerProvider(set, c) + }), + withMeterProvider(func(_ context.Context, set Settings, cfg component.Config) (metric.MeterProvider, error) { + c := *cfg.(*Config) + disableHighCard := disableHighCardinalityMetricsFeatureGate.IsEnabled() + return newMeterProvider( + meterProviderSettings{ + res: resource.New(set.BuildInfo, c.Resource), + cfg: c.Metrics, + asyncErrorChannel: set.AsyncErrorChannel, + }, + disableHighCard, + ) + }), + ) +} + func createDefaultConfig() component.Config { + metricsHost := "localhost" + if !useLocalHostAsDefaultMetricsAddressFeatureGate.IsEnabled() { + metricsHost = "" + } + return &Config{ Logs: LogsConfig{ Level: zapcore.InfoLevel, @@ -35,25 +114,19 @@ func createDefaultConfig() component.Config { InitialFields: map[string]any(nil), }, Metrics: MetricsConfig{ - Level: configtelemetry.LevelNormal, - Address: ":8888", + Level: configtelemetry.LevelNormal, + Readers: []config.MetricReader{ + { + Pull: &config.PullMetricReader{Exporter: config.MetricExporter{Prometheus: &config.Prometheus{ + Host: &metricsHost, + Port: newPtr(8888), + }}}, + }, + }, }, } } -// Factory is a telemetry factory. -type Factory = internal.Factory - -// NewFactory creates a new Factory. -func NewFactory() Factory { - return internal.NewFactory(createDefaultConfig, - internal.WithLogger(func(_ context.Context, set Settings, cfg component.Config) (*zap.Logger, error) { - c := *cfg.(*Config) - return newLogger(c.Logs, set.ZapOptions) - }), - internal.WithTracerProvider(func(ctx context.Context, _ Settings, cfg component.Config) (trace.TracerProvider, error) { - c := *cfg.(*Config) - return newTracerProvider(ctx, c) - }), - ) +func newPtr[T int | string](str T) *T { + return &str } diff --git a/vendor/go.opentelemetry.io/collector/service/telemetry/factory_impl.go b/vendor/go.opentelemetry.io/collector/service/telemetry/factory_impl.go new file mode 100644 index 00000000000..364afbde9d6 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/telemetry/factory_impl.go @@ -0,0 +1,111 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/collector/service/telemetry" + +import ( + "context" + + "go.opentelemetry.io/otel/log" + lognoop "go.opentelemetry.io/otel/log/noop" + "go.opentelemetry.io/otel/metric" + metricnoop "go.opentelemetry.io/otel/metric/noop" + "go.opentelemetry.io/otel/trace" + tracenoop "go.opentelemetry.io/otel/trace/noop" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" +) + +// factoryOption apply changes to Factory. +type factoryOption interface { + // applyTelemetryFactoryOption applies the option. + applyTelemetryFactoryOption(o *factory) +} + +var _ factoryOption = (*factoryOptionFunc)(nil) + +// factoryOptionFunc is an factoryOption created through a function. +type factoryOptionFunc func(*factory) + +func (f factoryOptionFunc) applyTelemetryFactoryOption(o *factory) { + f(o) +} + +var _ Factory = (*factory)(nil) + +// Factory is the implementation of Factory. +type factory struct { + createDefaultConfig component.CreateDefaultConfigFunc + createLoggerFunc + createTracerProviderFunc + createMeterProviderFunc +} + +func (f *factory) CreateDefaultConfig() component.Config { + return f.createDefaultConfig() +} + +// createLoggerFunc is the equivalent of Factory.CreateLogger. +type createLoggerFunc func(context.Context, Settings, component.Config) (*zap.Logger, log.LoggerProvider, error) + +// withLogger overrides the default no-op logger. +func withLogger(createLogger createLoggerFunc) factoryOption { + return factoryOptionFunc(func(o *factory) { + o.createLoggerFunc = createLogger + }) +} + +func (f *factory) CreateLogger(ctx context.Context, set Settings, cfg component.Config) (*zap.Logger, log.LoggerProvider, error) { + if f.createLoggerFunc == nil { + return zap.NewNop(), lognoop.NewLoggerProvider(), nil + } + return f.createLoggerFunc(ctx, set, cfg) +} + +// createTracerProviderFunc is the equivalent of Factory.CreateTracerProvider. +type createTracerProviderFunc func(context.Context, Settings, component.Config) (trace.TracerProvider, error) + +// withTracerProvider overrides the default no-op tracer provider. +func withTracerProvider(createTracerProvider createTracerProviderFunc) factoryOption { + return factoryOptionFunc(func(o *factory) { + o.createTracerProviderFunc = createTracerProvider + }) +} + +func (f *factory) CreateTracerProvider(ctx context.Context, set Settings, cfg component.Config) (trace.TracerProvider, error) { + if f.createTracerProviderFunc == nil { + return tracenoop.NewTracerProvider(), nil + } + return f.createTracerProviderFunc(ctx, set, cfg) +} + +// createMeterProviderFunc is the equivalent of Factory.CreateMeterProvider. +type createMeterProviderFunc func(context.Context, Settings, component.Config) (metric.MeterProvider, error) + +// withMeterProvider overrides the default no-op meter provider. +func withMeterProvider(createMeterProvider createMeterProviderFunc) factoryOption { + return factoryOptionFunc(func(o *factory) { + o.createMeterProviderFunc = createMeterProvider + }) +} + +func (f *factory) CreateMeterProvider(ctx context.Context, set Settings, cfg component.Config) (metric.MeterProvider, error) { + if f.createMeterProviderFunc == nil { + return metricnoop.NewMeterProvider(), nil + } + return f.createMeterProviderFunc(ctx, set, cfg) +} + +func (f *factory) unexportedFactoryFunc() {} + +// newFactory returns a new Factory. +func newFactory(createDefaultConfig component.CreateDefaultConfigFunc, options ...factoryOption) Factory { + f := &factory{ + createDefaultConfig: createDefaultConfig, + } + for _, op := range options { + op.applyTelemetryFactoryOption(f) + } + return f +} diff --git a/vendor/go.opentelemetry.io/collector/service/telemetry/internal/factory.go b/vendor/go.opentelemetry.io/collector/service/telemetry/internal/factory.go deleted file mode 100644 index f1368c9704d..00000000000 --- a/vendor/go.opentelemetry.io/collector/service/telemetry/internal/factory.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package internal // import "go.opentelemetry.io/collector/service/telemetry/internal" - -import ( - "context" - - "go.opentelemetry.io/otel/trace" - tracenoop "go.opentelemetry.io/otel/trace/noop" - "go.uber.org/zap" - - "go.opentelemetry.io/collector/component" -) - -// CreateSettings holds configuration for building Telemetry. -type CreateSettings struct { - BuildInfo component.BuildInfo - AsyncErrorChannel chan error - ZapOptions []zap.Option -} - -// Factory is factory interface for telemetry. -// This interface cannot be directly implemented. Implementations must -// use the NewFactory to implement it. -type Factory interface { - // CreateDefaultConfig creates the default configuration for the telemetry. - // TODO: Should we just inherit from component.Factory? - CreateDefaultConfig() component.Config - - // CreateLogger creates a logger. - CreateLogger(ctx context.Context, set CreateSettings, cfg component.Config) (*zap.Logger, error) - - // CreateTracerProvider creates a TracerProvider. - CreateTracerProvider(ctx context.Context, set CreateSettings, cfg component.Config) (trace.TracerProvider, error) - - // TODO: Add CreateMeterProvider. - - // unexportedFactoryFunc is used to prevent external implementations of Factory. - unexportedFactoryFunc() -} - -// FactoryOption apply changes to Factory. -type FactoryOption interface { - // applyTelemetryFactoryOption applies the option. - applyTelemetryFactoryOption(o *factory) -} - -var _ FactoryOption = (*factoryOptionFunc)(nil) - -// factoryOptionFunc is an FactoryOption created through a function. -type factoryOptionFunc func(*factory) - -func (f factoryOptionFunc) applyTelemetryFactoryOption(o *factory) { - f(o) -} - -var _ Factory = (*factory)(nil) - -// factory is the implementation of Factory. -type factory struct { - createDefaultConfig component.CreateDefaultConfigFunc - CreateLoggerFunc - CreateTracerProviderFunc -} - -func (f *factory) CreateDefaultConfig() component.Config { - return f.createDefaultConfig() -} - -// CreateLoggerFunc is the equivalent of Factory.CreateLogger. -type CreateLoggerFunc func(context.Context, CreateSettings, component.Config) (*zap.Logger, error) - -// WithLogger overrides the default no-op logger. -func WithLogger(createLogger CreateLoggerFunc) FactoryOption { - return factoryOptionFunc(func(o *factory) { - o.CreateLoggerFunc = createLogger - }) -} - -func (f *factory) CreateLogger(ctx context.Context, set CreateSettings, cfg component.Config) (*zap.Logger, error) { - if f.CreateLoggerFunc == nil { - return zap.NewNop(), nil - } - return f.CreateLoggerFunc(ctx, set, cfg) -} - -// CreateTracerProviderFunc is the equivalent of Factory.CreateTracerProvider. -type CreateTracerProviderFunc func(context.Context, CreateSettings, component.Config) (trace.TracerProvider, error) - -// WithTracerProvider overrides the default no-op tracer provider. -func WithTracerProvider(createTracerProvider CreateTracerProviderFunc) FactoryOption { - return factoryOptionFunc(func(o *factory) { - o.CreateTracerProviderFunc = createTracerProvider - }) -} - -func (f *factory) CreateTracerProvider(ctx context.Context, set CreateSettings, cfg component.Config) (trace.TracerProvider, error) { - if f.CreateTracerProviderFunc == nil { - return tracenoop.NewTracerProvider(), nil - } - return f.CreateTracerProviderFunc(ctx, set, cfg) -} - -func (f *factory) unexportedFactoryFunc() {} - -// NewFactory returns a new Factory. -func NewFactory(createDefaultConfig component.CreateDefaultConfigFunc, options ...FactoryOption) Factory { - f := &factory{ - createDefaultConfig: createDefaultConfig, - } - for _, op := range options { - op.applyTelemetryFactoryOption(f) - } - return f -} diff --git a/vendor/go.opentelemetry.io/collector/service/telemetry/internal/otelinit/config.go b/vendor/go.opentelemetry.io/collector/service/telemetry/internal/otelinit/config.go new file mode 100644 index 00000000000..8e4a5c45adf --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/telemetry/internal/otelinit/config.go @@ -0,0 +1,341 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelinit // import "go.opentelemetry.io/collector/service/telemetry/internal/otelinit" + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + "go.opentelemetry.io/contrib/config" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" + otelprom "go.opentelemetry.io/otel/exporters/prometheus" + "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric" + "go.opentelemetry.io/otel/sdk/instrumentation" + sdkmetric "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + "go.opentelemetry.io/otel/sdk/resource" + "google.golang.org/grpc/credentials" + + semconv "go.opentelemetry.io/collector/semconv/v1.18.0" +) + +const ( + + // gRPC Instrumentation Name + GRPCInstrumentation = "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + + // http Instrumentation Name + HTTPInstrumentation = "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + + // supported protocols + protocolProtobufHTTP = "http/protobuf" + protocolProtobufGRPC = "grpc/protobuf" + defaultReadHeaderTimeout = 10 * time.Second +) + +var ( + // GRPCUnacceptableKeyValues is a list of high cardinality grpc attributes that should be filtered out. + GRPCUnacceptableKeyValues = attribute.NewSet( + attribute.String(semconv.AttributeNetSockPeerAddr, ""), + attribute.String(semconv.AttributeNetSockPeerPort, ""), + attribute.String(semconv.AttributeNetSockPeerName, ""), + ) + + // HTTPUnacceptableKeyValues is a list of high cardinality http attributes that should be filtered out. + HTTPUnacceptableKeyValues = attribute.NewSet( + attribute.String(semconv.AttributeNetHostName, ""), + attribute.String(semconv.AttributeNetHostPort, ""), + ) + + errNoValidMetricExporter = errors.New("no valid metric exporter") +) + +func InitMetricReader(ctx context.Context, reader config.MetricReader, asyncErrorChannel chan error, serverWG *sync.WaitGroup) (sdkmetric.Reader, *http.Server, error) { + if reader.Pull != nil { + return initPullExporter(reader.Pull.Exporter, asyncErrorChannel, serverWG) + } + if reader.Periodic != nil { + var opts []sdkmetric.PeriodicReaderOption + if reader.Periodic.Interval != nil { + opts = append(opts, sdkmetric.WithInterval(time.Duration(*reader.Periodic.Interval)*time.Millisecond)) + } + + if reader.Periodic.Timeout != nil { + opts = append(opts, sdkmetric.WithTimeout(time.Duration(*reader.Periodic.Timeout)*time.Millisecond)) + } + return initPeriodicExporter(ctx, reader.Periodic.Exporter, opts...) + } + return nil, nil, fmt.Errorf("unsupported metric reader type %v", reader) +} + +func InitOpenTelemetry(res *resource.Resource, options []sdkmetric.Option, disableHighCardinality bool) (*sdkmetric.MeterProvider, error) { + opts := []sdkmetric.Option{ + sdkmetric.WithResource(res), + sdkmetric.WithView(disableHighCardinalityViews(disableHighCardinality)...), + } + + opts = append(opts, options...) + return sdkmetric.NewMeterProvider( + opts..., + ), nil +} + +func InitPrometheusServer(registry *prometheus.Registry, address string, asyncErrorChannel chan error, serverWG *sync.WaitGroup) *http.Server { + mux := http.NewServeMux() + mux.Handle("/metrics", promhttp.HandlerFor(registry, promhttp.HandlerOpts{})) + server := &http.Server{ + Addr: address, + Handler: mux, + ReadHeaderTimeout: defaultReadHeaderTimeout, + } + + serverWG.Add(1) + go func() { + defer serverWG.Done() + if serveErr := server.ListenAndServe(); serveErr != nil && !errors.Is(serveErr, http.ErrServerClosed) { + select { + case asyncErrorChannel <- serveErr: + case <-time.After(1 * time.Second): + } + } + }() + return server +} + +func disableHighCardinalityViews(disableHighCardinality bool) []sdkmetric.View { + if !disableHighCardinality { + return nil + } + return []sdkmetric.View{ + sdkmetric.NewView( + sdkmetric.Instrument{Scope: instrumentation.Scope{Name: GRPCInstrumentation}}, + sdkmetric.Stream{ + AttributeFilter: cardinalityFilter(GRPCUnacceptableKeyValues), + }), + sdkmetric.NewView( + sdkmetric.Instrument{Scope: instrumentation.Scope{Name: HTTPInstrumentation}}, + sdkmetric.Stream{ + AttributeFilter: cardinalityFilter(HTTPUnacceptableKeyValues), + }), + } +} + +func cardinalityFilter(filter attribute.Set) attribute.Filter { + return func(kv attribute.KeyValue) bool { + return !filter.HasValue(kv.Key) + } +} + +func initPrometheusExporter(prometheusConfig *config.Prometheus, asyncErrorChannel chan error, serverWG *sync.WaitGroup) (sdkmetric.Reader, *http.Server, error) { + promRegistry := prometheus.NewRegistry() + if prometheusConfig.Host == nil { + return nil, nil, errors.New("host must be specified") + } + if prometheusConfig.Port == nil { + return nil, nil, errors.New("port must be specified") + } + + opts := []otelprom.Option{ + otelprom.WithRegisterer(promRegistry), + // https://github.com/open-telemetry/opentelemetry-collector/issues/8043 + otelprom.WithoutUnits(), + // Disabled for the moment until this becomes stable, and we are ready to break backwards compatibility. + otelprom.WithoutScopeInfo(), + // This allows us to produce metrics that are backwards compatible w/ opencensus + otelprom.WithoutCounterSuffixes(), + otelprom.WithResourceAsConstantLabels(attribute.NewDenyKeysFilter()), + } + exporter, err := otelprom.New(opts...) + if err != nil { + return nil, nil, fmt.Errorf("error creating otel prometheus exporter: %w", err) + } + + return exporter, InitPrometheusServer(promRegistry, net.JoinHostPort(*prometheusConfig.Host, strconv.Itoa(*prometheusConfig.Port)), asyncErrorChannel, serverWG), nil +} + +func initPullExporter(exporter config.MetricExporter, asyncErrorChannel chan error, serverWG *sync.WaitGroup) (sdkmetric.Reader, *http.Server, error) { + if exporter.Prometheus != nil { + return initPrometheusExporter(exporter.Prometheus, asyncErrorChannel, serverWG) + } + return nil, nil, errNoValidMetricExporter +} + +func initPeriodicExporter(ctx context.Context, exporter config.MetricExporter, opts ...sdkmetric.PeriodicReaderOption) (sdkmetric.Reader, *http.Server, error) { + if exporter.Console != nil { + enc := json.NewEncoder(os.Stdout) + enc.SetIndent("", " ") + + exp, err := stdoutmetric.New( + stdoutmetric.WithEncoder(enc), + ) + if err != nil { + return nil, nil, err + } + return sdkmetric.NewPeriodicReader(exp, opts...), nil, nil + } + if exporter.OTLP != nil { + var err error + var exp sdkmetric.Exporter + switch exporter.OTLP.Protocol { + case protocolProtobufHTTP: + exp, err = initOTLPHTTPExporter(ctx, exporter.OTLP) + case protocolProtobufGRPC: + exp, err = initOTLPgRPCExporter(ctx, exporter.OTLP) + default: + return nil, nil, fmt.Errorf("unsupported protocol %s", exporter.OTLP.Protocol) + } + if err != nil { + return nil, nil, err + } + return sdkmetric.NewPeriodicReader(exp, opts...), nil, nil + } + return nil, nil, errNoValidMetricExporter +} + +func normalizeEndpoint(endpoint string) string { + if !strings.HasPrefix(endpoint, "https://") && !strings.HasPrefix(endpoint, "http://") { + return "http://" + endpoint + } + return endpoint +} + +func initOTLPgRPCExporter(ctx context.Context, otlpConfig *config.OTLPMetric) (sdkmetric.Exporter, error) { + opts := []otlpmetricgrpc.Option{} + + if len(otlpConfig.Endpoint) > 0 { + u, err := url.ParseRequestURI(normalizeEndpoint(otlpConfig.Endpoint)) + if err != nil { + return nil, err + } + opts = append(opts, otlpmetricgrpc.WithEndpoint(u.Host)) + if u.Scheme == "http" { + opts = append(opts, otlpmetricgrpc.WithInsecure()) + } else if otlpConfig.Certificate != nil { + creds, err := credentials.NewClientTLSFromFile(*otlpConfig.Certificate, "") + if err != nil { + return nil, fmt.Errorf("could not create client tls credentials: %w", err) + } + opts = append(opts, otlpmetricgrpc.WithTLSCredentials(creds)) + } + } + + if otlpConfig.Compression != nil { + switch *otlpConfig.Compression { + case "gzip": + opts = append(opts, otlpmetricgrpc.WithCompressor(*otlpConfig.Compression)) + case "none": + default: + return nil, fmt.Errorf("unsupported compression %q", *otlpConfig.Compression) + } + } + if otlpConfig.Timeout != nil { + opts = append(opts, otlpmetricgrpc.WithTimeout(time.Millisecond*time.Duration(*otlpConfig.Timeout))) + } + if len(otlpConfig.Headers) > 0 { + opts = append(opts, otlpmetricgrpc.WithHeaders(otlpConfig.Headers)) + } + if otlpConfig.TemporalityPreference != nil { + switch *otlpConfig.TemporalityPreference { + case "delta": + opts = append(opts, otlpmetricgrpc.WithTemporalitySelector(temporalityPreferenceDelta)) + case "cumulative": + opts = append(opts, otlpmetricgrpc.WithTemporalitySelector(temporalityPreferenceCumulative)) + case "lowmemory": + opts = append(opts, otlpmetricgrpc.WithTemporalitySelector(temporalityPreferenceLowMemory)) + default: + return nil, fmt.Errorf("unsupported temporality preference %q", *otlpConfig.TemporalityPreference) + } + } + + return otlpmetricgrpc.New(ctx, opts...) +} + +func initOTLPHTTPExporter(ctx context.Context, otlpConfig *config.OTLPMetric) (sdkmetric.Exporter, error) { + opts := []otlpmetrichttp.Option{} + + if len(otlpConfig.Endpoint) > 0 { + u, err := url.ParseRequestURI(normalizeEndpoint(otlpConfig.Endpoint)) + if err != nil { + return nil, err + } + opts = append(opts, otlpmetrichttp.WithEndpoint(u.Host)) + + if u.Scheme == "http" { + opts = append(opts, otlpmetrichttp.WithInsecure()) + } + if len(u.Path) > 0 { + opts = append(opts, otlpmetrichttp.WithURLPath(u.Path)) + } + } + if otlpConfig.Compression != nil { + switch *otlpConfig.Compression { + case "gzip": + opts = append(opts, otlpmetrichttp.WithCompression(otlpmetrichttp.GzipCompression)) + case "none": + opts = append(opts, otlpmetrichttp.WithCompression(otlpmetrichttp.NoCompression)) + default: + return nil, fmt.Errorf("unsupported compression %q", *otlpConfig.Compression) + } + } + if otlpConfig.Timeout != nil { + opts = append(opts, otlpmetrichttp.WithTimeout(time.Millisecond*time.Duration(*otlpConfig.Timeout))) + } + if len(otlpConfig.Headers) > 0 { + opts = append(opts, otlpmetrichttp.WithHeaders(otlpConfig.Headers)) + } + if otlpConfig.TemporalityPreference != nil { + switch *otlpConfig.TemporalityPreference { + case "delta": + opts = append(opts, otlpmetrichttp.WithTemporalitySelector(temporalityPreferenceDelta)) + case "cumulative": + opts = append(opts, otlpmetrichttp.WithTemporalitySelector(temporalityPreferenceCumulative)) + case "lowmemory": + opts = append(opts, otlpmetrichttp.WithTemporalitySelector(temporalityPreferenceLowMemory)) + default: + return nil, fmt.Errorf("unsupported temporality preference %q", *otlpConfig.TemporalityPreference) + } + } + + return otlpmetrichttp.New(ctx, opts...) +} + +func temporalityPreferenceCumulative(_ sdkmetric.InstrumentKind) metricdata.Temporality { + return metricdata.CumulativeTemporality +} + +func temporalityPreferenceDelta(ik sdkmetric.InstrumentKind) metricdata.Temporality { + switch ik { + case sdkmetric.InstrumentKindCounter, sdkmetric.InstrumentKindObservableCounter, sdkmetric.InstrumentKindHistogram: + return metricdata.DeltaTemporality + case sdkmetric.InstrumentKindObservableUpDownCounter, sdkmetric.InstrumentKindUpDownCounter: + return metricdata.CumulativeTemporality + default: + return metricdata.DeltaTemporality + } +} + +func temporalityPreferenceLowMemory(ik sdkmetric.InstrumentKind) metricdata.Temporality { + switch ik { + case sdkmetric.InstrumentKindCounter, sdkmetric.InstrumentKindHistogram: + return metricdata.DeltaTemporality + case sdkmetric.InstrumentKindObservableCounter, sdkmetric.InstrumentKindObservableUpDownCounter, sdkmetric.InstrumentKindUpDownCounter: + return metricdata.CumulativeTemporality + default: + return metricdata.DeltaTemporality + } +} diff --git a/vendor/go.opentelemetry.io/collector/service/telemetry/logger.go b/vendor/go.opentelemetry.io/collector/service/telemetry/logger.go index eb675bc459f..b6fe6d22ae1 100644 --- a/vendor/go.opentelemetry.io/collector/service/telemetry/logger.go +++ b/vendor/go.opentelemetry.io/collector/service/telemetry/logger.go @@ -4,22 +4,27 @@ package telemetry // import "go.opentelemetry.io/collector/service/telemetry" import ( + "go.opentelemetry.io/contrib/bridges/otelzap" + "go.opentelemetry.io/otel/log" "go.uber.org/zap" "go.uber.org/zap/zapcore" ) -func newLogger(cfg LogsConfig, options []zap.Option) (*zap.Logger, error) { +// newLogger creates a Logger and a LoggerProvider from Config. +func newLogger(set Settings, cfg Config) (*zap.Logger, log.LoggerProvider, error) { // Copied from NewProductionConfig. + ec := zap.NewProductionEncoderConfig() + ec.EncodeTime = zapcore.ISO8601TimeEncoder zapCfg := &zap.Config{ - Level: zap.NewAtomicLevelAt(cfg.Level), - Development: cfg.Development, - Encoding: cfg.Encoding, - EncoderConfig: zap.NewProductionEncoderConfig(), - OutputPaths: cfg.OutputPaths, - ErrorOutputPaths: cfg.ErrorOutputPaths, - DisableCaller: cfg.DisableCaller, - DisableStacktrace: cfg.DisableStacktrace, - InitialFields: cfg.InitialFields, + Level: zap.NewAtomicLevelAt(cfg.Logs.Level), + Development: cfg.Logs.Development, + Encoding: cfg.Logs.Encoding, + EncoderConfig: ec, + OutputPaths: cfg.Logs.OutputPaths, + ErrorOutputPaths: cfg.Logs.ErrorOutputPaths, + DisableCaller: cfg.Logs.DisableCaller, + DisableStacktrace: cfg.Logs.DisableStacktrace, + InitialFields: cfg.Logs.InitialFields, } if zapCfg.Encoding == "console" { @@ -27,15 +32,35 @@ func newLogger(cfg LogsConfig, options []zap.Option) (*zap.Logger, error) { zapCfg.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder } - logger, err := zapCfg.Build(options...) + logger, err := zapCfg.Build(set.ZapOptions...) if err != nil { - return nil, err + return nil, nil, err } - if cfg.Sampling != nil && cfg.Sampling.Enabled { - logger = newSampledLogger(logger, cfg.Sampling) + + var lp log.LoggerProvider + + if len(cfg.Logs.Processors) > 0 && set.SDK != nil { + lp = set.SDK.LoggerProvider() + + logger = logger.WithOptions(zap.WrapCore(func(c zapcore.Core) zapcore.Core { + core, err := zapcore.NewIncreaseLevelCore(zapcore.NewTee( + c, + otelzap.NewCore("go.opentelemetry.io/collector/service/telemetry", + otelzap.WithLoggerProvider(lp), + ), + ), zap.NewAtomicLevelAt(cfg.Logs.Level)) + if err != nil { + panic(err) + } + return core + })) + } + + if cfg.Logs.Sampling != nil && cfg.Logs.Sampling.Enabled { + logger = newSampledLogger(logger, cfg.Logs.Sampling) } - return logger, nil + return logger, lp, nil } func newSampledLogger(logger *zap.Logger, sc *LogsSamplingConfig) *zap.Logger { diff --git a/vendor/go.opentelemetry.io/collector/service/telemetry/metrics.go b/vendor/go.opentelemetry.io/collector/service/telemetry/metrics.go new file mode 100644 index 00000000000..0b2690c0af1 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/telemetry/metrics.go @@ -0,0 +1,91 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/collector/service/telemetry" + +import ( + "context" + "net/http" + "sync" + + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" + sdkmetric "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/resource" + "go.uber.org/multierr" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/config/configtelemetry" + "go.opentelemetry.io/collector/service/telemetry/internal/otelinit" +) + +const ( + zapKeyTelemetryAddress = "address" + zapKeyTelemetryLevel = "metrics level" +) + +type meterProvider struct { + *sdkmetric.MeterProvider + servers []*http.Server + serverWG sync.WaitGroup +} + +type meterProviderSettings struct { + res *resource.Resource + cfg MetricsConfig + asyncErrorChannel chan error +} + +// newMeterProvider creates a new MeterProvider from Config. +func newMeterProvider(set meterProviderSettings, disableHighCardinality bool) (metric.MeterProvider, error) { + if set.cfg.Level == configtelemetry.LevelNone || len(set.cfg.Readers) == 0 { + return noop.NewMeterProvider(), nil + } + + mp := &meterProvider{} + var opts []sdkmetric.Option + for _, reader := range set.cfg.Readers { + // https://github.com/open-telemetry/opentelemetry-collector/issues/8045 + r, server, err := otelinit.InitMetricReader(context.Background(), reader, set.asyncErrorChannel, &mp.serverWG) + if err != nil { + return nil, err + } + if server != nil { + mp.servers = append(mp.servers, server) + } + opts = append(opts, sdkmetric.WithReader(r)) + } + + var err error + mp.MeterProvider, err = otelinit.InitOpenTelemetry(set.res, opts, disableHighCardinality) + if err != nil { + return nil, err + } + return mp, nil +} + +// LogAboutServers logs about the servers that are serving metrics. +func (mp *meterProvider) LogAboutServers(logger *zap.Logger, cfg MetricsConfig) { + for _, server := range mp.servers { + logger.Info( + "Serving metrics", + zap.String(zapKeyTelemetryAddress, server.Addr), + zap.Stringer(zapKeyTelemetryLevel, cfg.Level), + ) + } +} + +// Shutdown the meter provider and all the associated resources. +// The type signature of this method matches that of the sdkmetric.MeterProvider. +func (mp *meterProvider) Shutdown(ctx context.Context) error { + var errs error + for _, server := range mp.servers { + if server != nil { + errs = multierr.Append(errs, server.Close()) + } + } + errs = multierr.Append(errs, mp.MeterProvider.Shutdown(ctx)) + mp.serverWG.Wait() + + return errs +} diff --git a/vendor/go.opentelemetry.io/collector/service/telemetry/telemetry.go b/vendor/go.opentelemetry.io/collector/service/telemetry/telemetry.go deleted file mode 100644 index c9346c26d59..00000000000 --- a/vendor/go.opentelemetry.io/collector/service/telemetry/telemetry.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package telemetry // import "go.opentelemetry.io/collector/service/telemetry" - -import ( - "go.opentelemetry.io/collector/service/telemetry/internal" -) - -// Settings holds configuration for building Telemetry. -type Settings = internal.CreateSettings diff --git a/vendor/go.opentelemetry.io/collector/service/telemetry/tracer.go b/vendor/go.opentelemetry.io/collector/service/telemetry/tracer.go index d235ca3aaf2..f9ff92c1878 100644 --- a/vendor/go.opentelemetry.io/collector/service/telemetry/tracer.go +++ b/vendor/go.opentelemetry.io/collector/service/telemetry/tracer.go @@ -7,57 +7,53 @@ import ( "context" "errors" - "go.opentelemetry.io/contrib/config" "go.opentelemetry.io/contrib/propagators/b3" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/embedded" + "go.opentelemetry.io/otel/trace/noop" + + "go.opentelemetry.io/collector/config/configtelemetry" + "go.opentelemetry.io/collector/featuregate" ) +var noopTracerProvider = featuregate.GlobalRegistry().MustRegister("service.noopTracerProvider", + featuregate.StageAlpha, + featuregate.WithRegisterFromVersion("v0.107.0"), + featuregate.WithRegisterToVersion("v0.109.0"), + featuregate.WithRegisterDescription("Sets a Noop OpenTelemetry TracerProvider to reduce memory allocations. This featuregate is incompatible with the zPages extension.")) + const ( // supported trace propagators traceContextPropagator = "tracecontext" b3Propagator = "b3" ) -var ( - errUnsupportedPropagator = errors.New("unsupported trace propagator") -) +var errUnsupportedPropagator = errors.New("unsupported trace propagator") -// New creates a new Telemetry from Config. -func newTracerProvider(ctx context.Context, cfg Config) (trace.TracerProvider, error) { - sdk, err := config.NewSDK( - config.WithContext(ctx), - config.WithOpenTelemetryConfiguration( - config.OpenTelemetryConfiguration{ - TracerProvider: &config.TracerProvider{ - Processors: cfg.Traces.Processors, - // TODO: once https://github.com/open-telemetry/opentelemetry-configuration/issues/83 is resolved, - // configuration for sampler should be done here via something like the following: - // - // Sampler: &config.Sampler{ - // ParentBased: &config.SamplerParentBased{ - // LocalParentSampled: &config.Sampler{ - // AlwaysOn: config.SamplerAlwaysOn{}, - // }, - // LocalParentNotSampled: &config.Sampler{ - // RecordOnly: config.SamplerRecordOnly{}, - // }, - // RemoteParentSampled: &config.Sampler{ - // AlwaysOn: config.SamplerAlwaysOn{}, - // }, - // RemoteParentNotSampled: &config.Sampler{ - // RecordOnly: config.SamplerRecordOnly{}, - // }, - // }, - // }, - }, - }, - ), - ) - - if err != nil { - return nil, err +type noopNoContextTracer struct { + embedded.Tracer +} + +var noopSpan = noop.Span{} + +func (n *noopNoContextTracer) Start(ctx context.Context, _ string, _ ...trace.SpanStartOption) (context.Context, trace.Span) { + return ctx, noopSpan +} + +type noopNoContextTracerProvider struct { + embedded.TracerProvider +} + +func (n *noopNoContextTracerProvider) Tracer(_ string, _ ...trace.TracerOption) trace.Tracer { + return &noopNoContextTracer{} +} + +// newTracerProvider creates a new TracerProvider from Config. +func newTracerProvider(set Settings, cfg Config) (trace.TracerProvider, error) { + if noopTracerProvider.IsEnabled() || cfg.Traces.Level == configtelemetry.LevelNone { + return &noopNoContextTracerProvider{}, nil } if tp, err := textMapPropagatorFromConfig(cfg.Traces.Propagators); err == nil { @@ -66,7 +62,10 @@ func newTracerProvider(ctx context.Context, cfg Config) (trace.TracerProvider, e return nil, err } - return sdk.TracerProvider(), nil + if set.SDK != nil { + return set.SDK.TracerProvider(), nil + } + return nil, errors.New("no sdk set") } func textMapPropagatorFromConfig(props []string) (propagation.TextMapPropagator, error) { diff --git a/vendor/go.opentelemetry.io/collector/service/zpages.go b/vendor/go.opentelemetry.io/collector/service/zpages.go deleted file mode 100644 index c7f7b494ad3..00000000000 --- a/vendor/go.opentelemetry.io/collector/service/zpages.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package service // import "go.opentelemetry.io/collector/service" - -import ( - "net/http" - "path" - "runtime" - "time" - - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/featuregate" - "go.opentelemetry.io/collector/service/internal/zpages" -) - -const ( - // Paths - zServicePath = "servicez" - zPipelinePath = "pipelinez" - zExtensionPath = "extensionz" - zFeaturePath = "featurez" -) - -var ( - // InfoVar is a singleton instance of the Info struct. - runtimeInfoVar [][2]string -) - -func init() { - runtimeInfoVar = [][2]string{ - {"StartTimestamp", time.Now().String()}, - {"Go", runtime.Version()}, - {"OS", runtime.GOOS}, - {"Arch", runtime.GOARCH}, - // Add other valuable runtime information here. - } -} - -func (host *serviceHost) RegisterZPages(mux *http.ServeMux, pathPrefix string) { - mux.HandleFunc(path.Join(pathPrefix, zServicePath), host.zPagesRequest) - mux.HandleFunc(path.Join(pathPrefix, zPipelinePath), host.pipelines.HandleZPages) - mux.HandleFunc(path.Join(pathPrefix, zExtensionPath), host.serviceExtensions.HandleZPages) - mux.HandleFunc(path.Join(pathPrefix, zFeaturePath), handleFeaturezRequest) -} - -func (host *serviceHost) zPagesRequest(w http.ResponseWriter, _ *http.Request) { - w.Header().Set("Content-Type", "text/html; charset=utf-8") - zpages.WriteHTMLPageHeader(w, zpages.HeaderData{Title: "Service " + host.buildInfo.Command}) - zpages.WriteHTMLPropertiesTable(w, zpages.PropertiesTableData{Name: "Build Info", Properties: getBuildInfoProperties(host.buildInfo)}) - zpages.WriteHTMLPropertiesTable(w, zpages.PropertiesTableData{Name: "Runtime Info", Properties: runtimeInfoVar}) - zpages.WriteHTMLComponentHeader(w, zpages.ComponentHeaderData{ - Name: "Pipelines", - ComponentEndpoint: zPipelinePath, - Link: true, - }) - zpages.WriteHTMLComponentHeader(w, zpages.ComponentHeaderData{ - Name: "Extensions", - ComponentEndpoint: zExtensionPath, - Link: true, - }) - zpages.WriteHTMLComponentHeader(w, zpages.ComponentHeaderData{ - Name: "Features", - ComponentEndpoint: zFeaturePath, - Link: true, - }) - zpages.WriteHTMLPageFooter(w) -} - -func handleFeaturezRequest(w http.ResponseWriter, _ *http.Request) { - w.Header().Set("Content-Type", "text/html; charset=utf-8") - zpages.WriteHTMLPageHeader(w, zpages.HeaderData{Title: "Feature Gates"}) - zpages.WriteHTMLFeaturesTable(w, getFeaturesTableData()) - zpages.WriteHTMLPageFooter(w) -} - -func getFeaturesTableData() zpages.FeatureGateTableData { - data := zpages.FeatureGateTableData{} - featuregate.GlobalRegistry().VisitAll(func(gate *featuregate.Gate) { - data.Rows = append(data.Rows, zpages.FeatureGateTableRowData{ - ID: gate.ID(), - Enabled: gate.IsEnabled(), - Description: gate.Description(), - Stage: gate.Stage().String(), - FromVersion: gate.FromVersion(), - ToVersion: gate.ToVersion(), - ReferenceURL: gate.ReferenceURL(), - }) - }) - return data -} - -func getBuildInfoProperties(buildInfo component.BuildInfo) [][2]string { - return [][2]string{ - {"Command", buildInfo.Command}, - {"Description", buildInfo.Description}, - {"Version", buildInfo.Version}, - } -} diff --git a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/LICENSE b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/README.md b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/README.md new file mode 100644 index 00000000000..5565260ae55 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/README.md @@ -0,0 +1,3 @@ +# OpenTelemetry Zap Log Bridge + +[![Go Reference](https://pkg.go.dev/badge/go.opentelemetry.io/contrib/bridges/otelzap.svg)](https://pkg.go.dev/go.opentelemetry.io/contrib/bridges/otelzap) diff --git a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/convert.go b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/convert.go new file mode 100644 index 00000000000..6f64c794b76 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/convert.go @@ -0,0 +1,123 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/logutil/convert.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelzap // import "go.opentelemetry.io/contrib/bridges/otelzap" + +import ( + "fmt" + "math" + "reflect" + "strconv" + "time" + + "go.opentelemetry.io/otel/log" +) + +// convertValue converts various types to log.Value. +func convertValue(v any) log.Value { + // Handling the most common types without reflect is a small perf win. + switch val := v.(type) { + case bool: + return log.BoolValue(val) + case string: + return log.StringValue(val) + case int: + return log.Int64Value(int64(val)) + case int8: + return log.Int64Value(int64(val)) + case int16: + return log.Int64Value(int64(val)) + case int32: + return log.Int64Value(int64(val)) + case int64: + return log.Int64Value(val) + case uint: + return convertUintValue(uint64(val)) + case uint8: + return log.Int64Value(int64(val)) + case uint16: + return log.Int64Value(int64(val)) + case uint32: + return log.Int64Value(int64(val)) + case uint64: + return convertUintValue(val) + case uintptr: + return convertUintValue(uint64(val)) + case float32: + return log.Float64Value(float64(val)) + case float64: + return log.Float64Value(val) + case time.Duration: + return log.Int64Value(val.Nanoseconds()) + case complex64: + r := log.Float64("r", real(complex128(val))) + i := log.Float64("i", imag(complex128(val))) + return log.MapValue(r, i) + case complex128: + r := log.Float64("r", real(val)) + i := log.Float64("i", imag(val)) + return log.MapValue(r, i) + case time.Time: + return log.Int64Value(val.UnixNano()) + case []byte: + return log.BytesValue(val) + case error: + return log.StringValue(val.Error()) + } + + t := reflect.TypeOf(v) + if t == nil { + return log.Value{} + } + val := reflect.ValueOf(v) + switch t.Kind() { + case reflect.Struct: + return log.StringValue(fmt.Sprintf("%+v", v)) + case reflect.Slice, reflect.Array: + items := make([]log.Value, 0, val.Len()) + for i := 0; i < val.Len(); i++ { + items = append(items, convertValue(val.Index(i).Interface())) + } + return log.SliceValue(items...) + case reflect.Map: + kvs := make([]log.KeyValue, 0, val.Len()) + for _, k := range val.MapKeys() { + var key string + switch k.Kind() { + case reflect.String: + key = k.String() + default: + key = fmt.Sprintf("%+v", k.Interface()) + } + kvs = append(kvs, log.KeyValue{ + Key: key, + Value: convertValue(val.MapIndex(k).Interface()), + }) + } + return log.MapValue(kvs...) + case reflect.Ptr, reflect.Interface: + if val.IsNil() { + return log.Value{} + } + return convertValue(val.Elem().Interface()) + } + + // Try to handle this as gracefully as possible. + // + // Don't panic here. it is preferable to have user's open issue + // asking why their attributes have a "unhandled: " prefix than + // say that their code is panicking. + return log.StringValue(fmt.Sprintf("unhandled: (%s) %+v", t, v)) +} + +// convertUintValue converts a uint64 to a log.Value. +// If the value is too large to fit in an int64, it is converted to a string. +func convertUintValue(v uint64) log.Value { + if v > math.MaxInt64 { + return log.StringValue(strconv.FormatUint(v, 10)) + } + return log.Int64Value(int64(v)) +} diff --git a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/core.go b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/core.go new file mode 100644 index 00000000000..545f756803f --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/core.go @@ -0,0 +1,277 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package otelzap provides a bridge between the [go.uber.org/zap] and +// [OpenTelemetry]. +// +// # Record Conversion +// +// The [zapcore.Entry] and [zapcore.Field] are converted to OpenTelemetry [log.Record] in the following +// way: +// +// - Time is set as the Timestamp. +// - Message is set as the Body using a [log.StringValue]. +// - Level is transformed and set as the Severity. The SeverityText is also +// set. +// - Fields are transformed and set as the Attributes. +// - Field value of type [context.Context] is used as context when emitting log records. +// - For named loggers, LoggerName is used to access [log.Logger] from [log.LoggerProvider] +// +// The Level is transformed to the OpenTelemetry Severity types in the following way. +// +// - [zapcore.DebugLevel] is transformed to [log.SeverityDebug] +// - [zapcore.InfoLevel] is transformed to [log.SeverityInfo] +// - [zapcore.WarnLevel] is transformed to [log.SeverityWarn] +// - [zapcore.ErrorLevel] is transformed to [log.SeverityError] +// - [zapcore.DPanicLevel] is transformed to [log.SeverityFatal1] +// - [zapcore.PanicLevel] is transformed to [log.SeverityFatal2] +// - [zapcore.FatalLevel] is transformed to [log.SeverityFatal3] +// +// Fields are transformed based on their type into log attributes, or +// into a string value encoded using [fmt.Sprintf] if there is no matching type. +// +// [OpenTelemetry]: https://opentelemetry.io/docs/concepts/signals/logs/ +package otelzap // import "go.opentelemetry.io/contrib/bridges/otelzap" + +import ( + "context" + "slices" + "strings" + + "go.uber.org/zap/zapcore" + + "go.opentelemetry.io/otel/log" + "go.opentelemetry.io/otel/log/global" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +type config struct { + provider log.LoggerProvider + version string + schemaURL string +} + +func newConfig(options []Option) config { + var c config + for _, opt := range options { + c = opt.apply(c) + } + + if c.provider == nil { + c.provider = global.GetLoggerProvider() + } + + return c +} + +// Option configures a [Core]. +type Option interface { + apply(config) config +} + +type optFunc func(config) config + +func (f optFunc) apply(c config) config { return f(c) } + +// WithVersion returns an [Option] that configures the version of the +// [log.Logger] used by a [Core]. The version should be the version of the +// package that is being logged. +func WithVersion(version string) Option { + return optFunc(func(c config) config { + c.version = version + return c + }) +} + +// WithSchemaURL returns an [Option] that configures the semantic convention +// schema URL of the [log.Logger] used by a [Core]. The schemaURL should be +// the schema URL for the semantic conventions used in log records. +func WithSchemaURL(schemaURL string) Option { + return optFunc(func(c config) config { + c.schemaURL = schemaURL + return c + }) +} + +// WithLoggerProvider returns an [Option] that configures [log.LoggerProvider] +// used by a [Core] to create its [log.Logger]. +// +// By default if this Option is not provided, the Handler will use the global +// LoggerProvider. +func WithLoggerProvider(provider log.LoggerProvider) Option { + return optFunc(func(c config) config { + c.provider = provider + return c + }) +} + +// Core is a [zapcore.Core] that sends logging records to OpenTelemetry. +type Core struct { + provider log.LoggerProvider + logger log.Logger + opts []log.LoggerOption + attr []log.KeyValue + ctx context.Context +} + +// Compile-time check *Core implements zapcore.Core. +var _ zapcore.Core = (*Core)(nil) + +// NewCore creates a new [zapcore.Core] that can be used with [go.uber.org/zap.New]. +// The name should be the package import path that is being logged. +// The name is ignored for named loggers created using [go.uber.org/zap.Logger.Named]. +func NewCore(name string, opts ...Option) *Core { + cfg := newConfig(opts) + + var loggerOpts []log.LoggerOption + if cfg.version != "" { + loggerOpts = append(loggerOpts, log.WithInstrumentationVersion(cfg.version)) + } + if cfg.schemaURL != "" { + loggerOpts = append(loggerOpts, log.WithSchemaURL(cfg.schemaURL)) + } + + logger := cfg.provider.Logger(name, loggerOpts...) + + return &Core{ + provider: cfg.provider, + logger: logger, + opts: loggerOpts, + ctx: context.Background(), + } +} + +// Enabled decides whether a given logging level is enabled when logging a message. +func (o *Core) Enabled(level zapcore.Level) bool { + param := log.EnabledParameters{Severity: convertLevel(level)} + return o.logger.Enabled(context.Background(), param) +} + +// With adds structured context to the Core. +func (o *Core) With(fields []zapcore.Field) zapcore.Core { + cloned := o.clone() + if len(fields) > 0 { + ctx, attrbuf := convertField(fields) + if ctx != nil { + cloned.ctx = ctx + } + cloned.attr = append(cloned.attr, attrbuf...) + } + return cloned +} + +func (o *Core) clone() *Core { + return &Core{ + provider: o.provider, + opts: o.opts, + logger: o.logger, + attr: slices.Clone(o.attr), + ctx: o.ctx, + } +} + +// Sync flushes buffered logs (if any). +func (o *Core) Sync() error { + return nil +} + +// Check determines whether the supplied Entry should be logged. +// If the entry should be logged, the Core adds itself to the CheckedEntry and returns the result. +func (o *Core) Check(ent zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry { + param := log.EnabledParameters{Severity: convertLevel(ent.Level)} + + logger := o.logger + if ent.LoggerName != "" { + logger = o.provider.Logger(ent.LoggerName, o.opts...) + } + + if logger.Enabled(context.Background(), param) { + return ce.AddCore(ent, o) + } + return ce +} + +// Write method encodes zap fields to OTel logs and emits them. +func (o *Core) Write(ent zapcore.Entry, fields []zapcore.Field) error { + r := log.Record{} + r.SetTimestamp(ent.Time) + r.SetBody(log.StringValue(ent.Message)) + r.SetSeverity(convertLevel(ent.Level)) + r.SetSeverityText(ent.Level.String()) + + r.AddAttributes(o.attr...) + if ent.Caller.Defined { + funcName, namespace := splitFuncName(ent.Caller.Function) + r.AddAttributes( + log.String(string(semconv.CodeFilepathKey), ent.Caller.File), + log.Int(string(semconv.CodeLineNumberKey), ent.Caller.Line), + log.String(string(semconv.CodeFunctionKey), funcName), + log.String(string(semconv.CodeNamespaceKey), namespace), + ) + } + if ent.Stack != "" { + r.AddAttributes(log.String(string(semconv.CodeStacktraceKey), ent.Stack)) + } + if len(fields) > 0 { + ctx, attrbuf := convertField(fields) + if ctx != nil { + o.ctx = ctx + } + r.AddAttributes(attrbuf...) + } + + logger := o.logger + if ent.LoggerName != "" { + logger = o.provider.Logger(ent.LoggerName, o.opts...) + } + logger.Emit(o.ctx, r) + return nil +} + +func convertField(fields []zapcore.Field) (context.Context, []log.KeyValue) { + var ctx context.Context + enc := newObjectEncoder(len(fields)) + for _, field := range fields { + if ctxFld, ok := field.Interface.(context.Context); ok { + ctx = ctxFld + continue + } + field.AddTo(enc) + } + + enc.calculate(enc.root) + return ctx, enc.root.attrs +} + +func convertLevel(level zapcore.Level) log.Severity { + switch level { + case zapcore.DebugLevel: + return log.SeverityDebug + case zapcore.InfoLevel: + return log.SeverityInfo + case zapcore.WarnLevel: + return log.SeverityWarn + case zapcore.ErrorLevel: + return log.SeverityError + case zapcore.DPanicLevel: + return log.SeverityFatal1 + case zapcore.PanicLevel: + return log.SeverityFatal2 + case zapcore.FatalLevel: + return log.SeverityFatal3 + default: + return log.SeverityUndefined + } +} + +// splitFuncName splits package path-qualified function name into +// function name and package full name (namespace). E.g. it splits +// "github.com/my/repo/pkg.foo" into +// "foo" and "github.com/my/repo/pkg". +func splitFuncName(f string) (string, string) { + i := strings.LastIndexByte(f, '.') + if i < 0 { + return "", "" + } + return f[i+1:], f[:i] +} diff --git a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/encoder.go b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/encoder.go new file mode 100644 index 00000000000..8147576ae77 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/encoder.go @@ -0,0 +1,274 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelzap // import "go.opentelemetry.io/contrib/bridges/otelzap" + +import ( + "time" + + "go.uber.org/zap/zapcore" + + "go.opentelemetry.io/otel/log" +) + +var ( + _ zapcore.ObjectEncoder = (*objectEncoder)(nil) + _ zapcore.ArrayEncoder = (*arrayEncoder)(nil) +) + +type namespace struct { + name string + attrs []log.KeyValue + next *namespace +} + +// objectEncoder implements zapcore.ObjectEncoder. +// It encodes given fields to OTel key-values. +type objectEncoder struct { + // root is a pointer to the default namespace + root *namespace + // cur is a pointer to the namespace we're currently writing to. + cur *namespace +} + +func newObjectEncoder(n int) *objectEncoder { + keyval := make([]log.KeyValue, 0, n) + m := &namespace{ + attrs: keyval, + } + return &objectEncoder{ + root: m, + cur: m, + } +} + +// It iterates to the end of the linked list and appends namespace data. +// Run this function before accessing complete result. +func (m *objectEncoder) calculate(o *namespace) { + if o.next == nil { + return + } + m.calculate(o.next) + o.attrs = append(o.attrs, log.Map(o.next.name, o.next.attrs...)) +} + +func (m *objectEncoder) AddArray(key string, v zapcore.ArrayMarshaler) error { + arr := newArrayEncoder() + err := v.MarshalLogArray(arr) + m.cur.attrs = append(m.cur.attrs, log.Slice(key, arr.elems...)) + return err +} + +func (m *objectEncoder) AddObject(k string, v zapcore.ObjectMarshaler) error { + // Similar to console_encoder which uses capacity of 2: + // https://github.com/uber-go/zap/blob/bd0cf0447951b77aa98dcfc1ac19e6f58d3ee64f/zapcore/console_encoder.go#L33. + newobj := newObjectEncoder(2) + err := v.MarshalLogObject(newobj) + newobj.calculate(newobj.root) + m.cur.attrs = append(m.cur.attrs, log.Map(k, newobj.root.attrs...)) + return err +} + +func (m *objectEncoder) AddBinary(k string, v []byte) { + m.cur.attrs = append(m.cur.attrs, log.Bytes(k, v)) +} + +func (m *objectEncoder) AddByteString(k string, v []byte) { + m.cur.attrs = append(m.cur.attrs, log.String(k, string(v))) +} + +func (m *objectEncoder) AddBool(k string, v bool) { + m.cur.attrs = append(m.cur.attrs, log.Bool(k, v)) +} + +func (m *objectEncoder) AddDuration(k string, v time.Duration) { + m.AddInt64(k, v.Nanoseconds()) +} + +func (m *objectEncoder) AddComplex128(k string, v complex128) { + r := log.Float64("r", real(v)) + i := log.Float64("i", imag(v)) + m.cur.attrs = append(m.cur.attrs, log.Map(k, r, i)) +} + +func (m *objectEncoder) AddFloat64(k string, v float64) { + m.cur.attrs = append(m.cur.attrs, log.Float64(k, v)) +} + +func (m *objectEncoder) AddInt64(k string, v int64) { + m.cur.attrs = append(m.cur.attrs, log.Int64(k, v)) +} + +func (m *objectEncoder) AddInt(k string, v int) { + m.cur.attrs = append(m.cur.attrs, log.Int(k, v)) +} + +func (m *objectEncoder) AddString(k string, v string) { + m.cur.attrs = append(m.cur.attrs, log.String(k, v)) +} + +func (m *objectEncoder) AddUint64(k string, v uint64) { + m.cur.attrs = append(m.cur.attrs, + log.KeyValue{ + Key: k, + Value: assignUintValue(v), + }) +} + +func (m *objectEncoder) AddReflected(k string, v interface{}) error { + m.cur.attrs = append(m.cur.attrs, + log.KeyValue{ + Key: k, + Value: convertValue(v), + }) + return nil +} + +// OpenNamespace opens an isolated namespace where all subsequent fields will +// be added. +func (m *objectEncoder) OpenNamespace(k string) { + keyValue := make([]log.KeyValue, 0, 5) + s := &namespace{ + name: k, + attrs: keyValue, + } + m.cur.next = s + m.cur = s +} + +func (m *objectEncoder) AddComplex64(k string, v complex64) { + m.AddComplex128(k, complex128(v)) +} + +func (m *objectEncoder) AddTime(k string, v time.Time) { + m.AddInt64(k, v.UnixNano()) +} + +func (m *objectEncoder) AddFloat32(k string, v float32) { + m.AddFloat64(k, float64(v)) +} + +func (m *objectEncoder) AddInt32(k string, v int32) { + m.AddInt64(k, int64(v)) +} + +func (m *objectEncoder) AddInt16(k string, v int16) { + m.AddInt64(k, int64(v)) +} + +func (m *objectEncoder) AddInt8(k string, v int8) { + m.AddInt64(k, int64(v)) +} + +func (m *objectEncoder) AddUint(k string, v uint) { + m.AddUint64(k, uint64(v)) +} + +func (m *objectEncoder) AddUint32(k string, v uint32) { + m.AddInt64(k, int64(v)) +} + +func (m *objectEncoder) AddUint16(k string, v uint16) { + m.AddInt64(k, int64(v)) +} + +func (m *objectEncoder) AddUint8(k string, v uint8) { + m.AddInt64(k, int64(v)) +} + +func (m *objectEncoder) AddUintptr(k string, v uintptr) { + m.AddUint64(k, uint64(v)) +} + +func assignUintValue(v uint64) log.Value { + const maxInt64 = ^uint64(0) >> 1 + if v > maxInt64 { + return log.Float64Value(float64(v)) + } + return log.Int64Value(int64(v)) // nolint:gosec // Overflow checked above. +} + +// arrayEncoder implements [zapcore.ArrayEncoder]. +type arrayEncoder struct { + elems []log.Value +} + +func newArrayEncoder() *arrayEncoder { + return &arrayEncoder{ + // Similar to console_encoder which uses capacity of 2: + // https://github.com/uber-go/zap/blob/bd0cf0447951b77aa98dcfc1ac19e6f58d3ee64f/zapcore/console_encoder.go#L33. + elems: make([]log.Value, 0, 2), + } +} + +func (a *arrayEncoder) AppendArray(v zapcore.ArrayMarshaler) error { + arr := newArrayEncoder() + err := v.MarshalLogArray(arr) + a.elems = append(a.elems, log.SliceValue(arr.elems...)) + return err +} + +func (a *arrayEncoder) AppendObject(v zapcore.ObjectMarshaler) error { + // Similar to console_encoder which uses capacity of 2: + // https://github.com/uber-go/zap/blob/bd0cf0447951b77aa98dcfc1ac19e6f58d3ee64f/zapcore/console_encoder.go#L33. + m := newObjectEncoder(2) + err := v.MarshalLogObject(m) + m.calculate(m.root) + a.elems = append(a.elems, log.MapValue(m.root.attrs...)) + return err +} + +func (a *arrayEncoder) AppendReflected(v interface{}) error { + a.elems = append(a.elems, convertValue(v)) + return nil +} + +func (a *arrayEncoder) AppendByteString(v []byte) { + a.elems = append(a.elems, log.StringValue(string(v))) +} + +func (a *arrayEncoder) AppendBool(v bool) { + a.elems = append(a.elems, log.BoolValue(v)) +} + +func (a *arrayEncoder) AppendFloat64(v float64) { + a.elems = append(a.elems, log.Float64Value(v)) +} + +func (a *arrayEncoder) AppendFloat32(v float32) { + a.AppendFloat64(float64(v)) +} + +func (a *arrayEncoder) AppendInt(v int) { + a.elems = append(a.elems, log.IntValue(v)) +} + +func (a *arrayEncoder) AppendInt64(v int64) { + a.elems = append(a.elems, log.Int64Value(v)) +} + +func (a *arrayEncoder) AppendString(v string) { + a.elems = append(a.elems, log.StringValue(v)) +} + +func (a *arrayEncoder) AppendComplex128(v complex128) { + r := log.Float64("r", real(v)) + i := log.Float64("i", imag(v)) + a.elems = append(a.elems, log.MapValue(r, i)) +} + +func (a *arrayEncoder) AppendUint64(v uint64) { + a.elems = append(a.elems, assignUintValue(v)) +} + +func (a *arrayEncoder) AppendComplex64(v complex64) { a.AppendComplex128(complex128(v)) } +func (a *arrayEncoder) AppendDuration(v time.Duration) { a.AppendInt64(v.Nanoseconds()) } +func (a *arrayEncoder) AppendInt32(v int32) { a.AppendInt64(int64(v)) } +func (a *arrayEncoder) AppendInt16(v int16) { a.AppendInt64(int64(v)) } +func (a *arrayEncoder) AppendInt8(v int8) { a.AppendInt64(int64(v)) } +func (a *arrayEncoder) AppendTime(v time.Time) { a.AppendInt64(v.UnixNano()) } +func (a *arrayEncoder) AppendUint(v uint) { a.AppendUint64(uint64(v)) } +func (a *arrayEncoder) AppendUint32(v uint32) { a.AppendInt64(int64(v)) } +func (a *arrayEncoder) AppendUint16(v uint16) { a.AppendInt64(int64(v)) } +func (a *arrayEncoder) AppendUint8(v uint8) { a.AppendInt64(int64(v)) } +func (a *arrayEncoder) AppendUintptr(v uintptr) { a.AppendUint64(uint64(v)) } diff --git a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/gen.go b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/gen.go new file mode 100644 index 00000000000..5c8b2eea7e4 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/gen.go @@ -0,0 +1,8 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelzap // import "go.opentelemetry.io/contrib/bridges/otelzap" + +// Generate convert: +//go:generate gotmpl --body=../../internal/shared/logutil/convert_test.go.tmpl "--data={ \"pkg\": \"otelzap\" }" --out=convert_test.go +//go:generate gotmpl --body=../../internal/shared/logutil/convert.go.tmpl "--data={ \"pkg\": \"otelzap\" }" --out=convert.go diff --git a/vendor/go.opentelemetry.io/contrib/bridges/prometheus/producer.go b/vendor/go.opentelemetry.io/contrib/bridges/prometheus/producer.go index 5626bdc5545..e94d07a6d46 100644 --- a/vendor/go.opentelemetry.io/contrib/bridges/prometheus/producer.go +++ b/vendor/go.opentelemetry.io/contrib/bridges/prometheus/producer.go @@ -214,10 +214,10 @@ func convertExponentialBuckets(bucketSpans []*dto.BucketSpan, deltas []int64) me initialOffset := bucketSpans[0].GetOffset() - 1 // We will have one bucket count for each delta, and zeros for the offsets // after the initial offset. - lenCounts := int32(len(deltas)) + lenCounts := len(deltas) for i, bs := range bucketSpans { if i != 0 { - lenCounts += bs.GetOffset() + lenCounts += int(bs.GetOffset()) } } counts := make([]uint64, lenCounts) diff --git a/vendor/go.opentelemetry.io/contrib/config/config.go b/vendor/go.opentelemetry.io/contrib/config/config.go index 15f471bc6ed..6b8d43cd468 100644 --- a/vendor/go.opentelemetry.io/contrib/config/config.go +++ b/vendor/go.opentelemetry.io/contrib/config/config.go @@ -7,6 +7,7 @@ import ( "context" "errors" + "go.opentelemetry.io/otel/log" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/trace" ) @@ -35,6 +36,7 @@ func noopShutdown(context.Context) error { type SDK struct { meterProvider metric.MeterProvider tracerProvider trace.TracerProvider + loggerProvider log.LoggerProvider shutdown shutdownFunc } @@ -48,6 +50,11 @@ func (s *SDK) MeterProvider() metric.MeterProvider { return s.meterProvider } +// LoggerProvider returns a configured log.LoggerProvider. +func (s *SDK) LoggerProvider() log.LoggerProvider { + return s.loggerProvider +} + // Shutdown calls shutdown on all configured providers. func (s *SDK) Shutdown(ctx context.Context) error { return s.shutdown(ctx) @@ -77,12 +84,17 @@ func NewSDK(opts ...ConfigurationOption) (SDK, error) { return SDK{}, err } + lp, lpShutdown, err := loggerProvider(o, r) + if err != nil { + return SDK{}, err + } + return SDK{ meterProvider: mp, tracerProvider: tp, + loggerProvider: lp, shutdown: func(ctx context.Context) error { - err := mpShutdown(ctx) - return errors.Join(err, tpShutdown(ctx)) + return errors.Join(mpShutdown(ctx), tpShutdown(ctx), lpShutdown(ctx)) }, }, nil } @@ -118,6 +130,3 @@ func WithOpenTelemetryConfiguration(cfg OpenTelemetryConfiguration) Configuratio // TODO: implement parsing functionality: // - https://github.com/open-telemetry/opentelemetry-go-contrib/issues/4373 // - https://github.com/open-telemetry/opentelemetry-go-contrib/issues/4412 - -// TODO: create SDK from the model: -// - https://github.com/open-telemetry/opentelemetry-go-contrib/issues/4371 diff --git a/vendor/go.opentelemetry.io/contrib/config/generated_config.go b/vendor/go.opentelemetry.io/contrib/config/generated_config.go index 64154ee5437..244a9756899 100644 --- a/vendor/go.opentelemetry.io/contrib/config/generated_config.go +++ b/vendor/go.opentelemetry.io/contrib/config/generated_config.go @@ -18,12 +18,7 @@ type AttributeLimits struct { AdditionalProperties interface{} } -type Attributes struct { - // ServiceName corresponds to the JSON schema field "service.name". - ServiceName *string `mapstructure:"service.name,omitempty"` - - AdditionalProperties interface{} -} +type Attributes map[string]interface{} type BatchLogRecordProcessor struct { // ExportTimeout corresponds to the JSON schema field "export_timeout". @@ -101,9 +96,33 @@ type Common map[string]interface{} type Console map[string]interface{} +type Detectors struct { + // Attributes corresponds to the JSON schema field "attributes". + Attributes *DetectorsAttributes `mapstructure:"attributes,omitempty"` +} + +type DetectorsAttributes struct { + // Excluded corresponds to the JSON schema field "excluded". + Excluded []string `mapstructure:"excluded,omitempty"` + + // Included corresponds to the JSON schema field "included". + Included []string `mapstructure:"included,omitempty"` +} + type Headers map[string]string +type IncludeExclude struct { + // Excluded corresponds to the JSON schema field "excluded". + Excluded []string `mapstructure:"excluded,omitempty"` + + // Included corresponds to the JSON schema field "included". + Included []string `mapstructure:"included,omitempty"` +} + type LogRecordExporter struct { + // Console corresponds to the JSON schema field "console". + Console Console `mapstructure:"console,omitempty"` + // OTLP corresponds to the JSON schema field "otlp". OTLP *OTLP `mapstructure:"otlp,omitempty"` @@ -186,6 +205,9 @@ type OTLP struct { // Headers corresponds to the JSON schema field "headers". Headers Headers `mapstructure:"headers,omitempty"` + // Insecure corresponds to the JSON schema field "insecure". + Insecure *bool `mapstructure:"insecure,omitempty"` + // Protocol corresponds to the JSON schema field "protocol". Protocol string `mapstructure:"protocol"` @@ -216,6 +238,9 @@ type OTLPMetric struct { // Headers corresponds to the JSON schema field "headers". Headers Headers `mapstructure:"headers,omitempty"` + // Insecure corresponds to the JSON schema field "insecure". + Insecure *bool `mapstructure:"insecure,omitempty"` + // Protocol corresponds to the JSON schema field "protocol". Protocol string `mapstructure:"protocol"` @@ -381,6 +406,10 @@ type Prometheus struct { // Port corresponds to the JSON schema field "port". Port *int `mapstructure:"port,omitempty"` + // WithResourceConstantLabels corresponds to the JSON schema field + // "with_resource_constant_labels". + WithResourceConstantLabels *IncludeExclude `mapstructure:"with_resource_constant_labels,omitempty"` + // WithoutScopeInfo corresponds to the JSON schema field "without_scope_info". WithoutScopeInfo *bool `mapstructure:"without_scope_info,omitempty"` @@ -423,7 +452,10 @@ func (j *PullMetricReader) UnmarshalJSON(b []byte) error { type Resource struct { // Attributes corresponds to the JSON schema field "attributes". - Attributes *Attributes `mapstructure:"attributes,omitempty"` + Attributes Attributes `mapstructure:"attributes,omitempty"` + + // Detectors corresponds to the JSON schema field "detectors". + Detectors *Detectors `mapstructure:"detectors,omitempty"` // SchemaUrl corresponds to the JSON schema field "schema_url". SchemaUrl *string `mapstructure:"schema_url,omitempty"` diff --git a/vendor/go.opentelemetry.io/contrib/config/log.go b/vendor/go.opentelemetry.io/contrib/config/log.go new file mode 100644 index 00000000000..f30b37c8a45 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/config/log.go @@ -0,0 +1,155 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package config // import "go.opentelemetry.io/contrib/config" + +import ( + "context" + "errors" + "fmt" + "net/url" + "time" + + "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp" + "go.opentelemetry.io/otel/exporters/stdout/stdoutlog" + "go.opentelemetry.io/otel/log" + "go.opentelemetry.io/otel/log/noop" + sdklog "go.opentelemetry.io/otel/sdk/log" + "go.opentelemetry.io/otel/sdk/resource" +) + +func loggerProvider(cfg configOptions, res *resource.Resource) (log.LoggerProvider, shutdownFunc, error) { + if cfg.opentelemetryConfig.LoggerProvider == nil { + return noop.NewLoggerProvider(), noopShutdown, nil + } + opts := []sdklog.LoggerProviderOption{ + sdklog.WithResource(res), + } + var errs []error + for _, processor := range cfg.opentelemetryConfig.LoggerProvider.Processors { + sp, err := logProcessor(cfg.ctx, processor) + if err == nil { + opts = append(opts, sdklog.WithProcessor(sp)) + } else { + errs = append(errs, err) + } + } + + if len(errs) > 0 { + return noop.NewLoggerProvider(), noopShutdown, errors.Join(errs...) + } + + lp := sdklog.NewLoggerProvider(opts...) + return lp, lp.Shutdown, nil +} + +func logProcessor(ctx context.Context, processor LogRecordProcessor) (sdklog.Processor, error) { + if processor.Batch != nil && processor.Simple != nil { + return nil, errors.New("must not specify multiple log processor type") + } + if processor.Batch != nil { + exp, err := logExporter(ctx, processor.Batch.Exporter) + if err != nil { + return nil, err + } + return batchLogProcessor(processor.Batch, exp) + } + if processor.Simple != nil { + exp, err := logExporter(ctx, processor.Simple.Exporter) + if err != nil { + return nil, err + } + return sdklog.NewSimpleProcessor(exp), nil + } + return nil, fmt.Errorf("unsupported log processor type, must be one of simple or batch") +} + +func logExporter(ctx context.Context, exporter LogRecordExporter) (sdklog.Exporter, error) { + if exporter.Console != nil && exporter.OTLP != nil { + return nil, errors.New("must not specify multiple exporters") + } + + if exporter.Console != nil { + return stdoutlog.New( + stdoutlog.WithPrettyPrint(), + ) + } + + if exporter.OTLP != nil { + switch exporter.OTLP.Protocol { + case protocolProtobufHTTP: + return otlpHTTPLogExporter(ctx, exporter.OTLP) + default: + return nil, fmt.Errorf("unsupported protocol %q", exporter.OTLP.Protocol) + } + } + return nil, errors.New("no valid log exporter") +} + +func batchLogProcessor(blp *BatchLogRecordProcessor, exp sdklog.Exporter) (*sdklog.BatchProcessor, error) { + var opts []sdklog.BatchProcessorOption + if blp.ExportTimeout != nil { + if *blp.ExportTimeout < 0 { + return nil, fmt.Errorf("invalid export timeout %d", *blp.ExportTimeout) + } + opts = append(opts, sdklog.WithExportTimeout(time.Millisecond*time.Duration(*blp.ExportTimeout))) + } + if blp.MaxExportBatchSize != nil { + if *blp.MaxExportBatchSize < 0 { + return nil, fmt.Errorf("invalid batch size %d", *blp.MaxExportBatchSize) + } + opts = append(opts, sdklog.WithExportMaxBatchSize(*blp.MaxExportBatchSize)) + } + if blp.MaxQueueSize != nil { + if *blp.MaxQueueSize < 0 { + return nil, fmt.Errorf("invalid queue size %d", *blp.MaxQueueSize) + } + opts = append(opts, sdklog.WithMaxQueueSize(*blp.MaxQueueSize)) + } + + if blp.ScheduleDelay != nil { + if *blp.ScheduleDelay < 0 { + return nil, fmt.Errorf("invalid schedule delay %d", *blp.ScheduleDelay) + } + opts = append(opts, sdklog.WithExportInterval(time.Millisecond*time.Duration(*blp.ScheduleDelay))) + } + + return sdklog.NewBatchProcessor(exp, opts...), nil +} + +func otlpHTTPLogExporter(ctx context.Context, otlpConfig *OTLP) (sdklog.Exporter, error) { + var opts []otlploghttp.Option + + if len(otlpConfig.Endpoint) > 0 { + u, err := url.ParseRequestURI(otlpConfig.Endpoint) + if err != nil { + return nil, err + } + opts = append(opts, otlploghttp.WithEndpoint(u.Host)) + + if u.Scheme == "http" { + opts = append(opts, otlploghttp.WithInsecure()) + } + if len(u.Path) > 0 { + opts = append(opts, otlploghttp.WithURLPath(u.Path)) + } + } + if otlpConfig.Compression != nil { + switch *otlpConfig.Compression { + case compressionGzip: + opts = append(opts, otlploghttp.WithCompression(otlploghttp.GzipCompression)) + case compressionNone: + opts = append(opts, otlploghttp.WithCompression(otlploghttp.NoCompression)) + default: + return nil, fmt.Errorf("unsupported compression %q", *otlpConfig.Compression) + } + } + if otlpConfig.Timeout != nil && *otlpConfig.Timeout > 0 { + opts = append(opts, otlploghttp.WithTimeout(time.Millisecond*time.Duration(*otlpConfig.Timeout))) + } + if len(otlpConfig.Headers) > 0 { + opts = append(opts, otlploghttp.WithHeaders(otlpConfig.Headers)) + } + + return otlploghttp.New(ctx, opts...) +} diff --git a/vendor/go.opentelemetry.io/contrib/config/metric.go b/vendor/go.opentelemetry.io/contrib/config/metric.go index 0b7543d3dbc..38b294172fb 100644 --- a/vendor/go.opentelemetry.io/contrib/config/metric.go +++ b/vendor/go.opentelemetry.io/contrib/config/metric.go @@ -8,6 +8,7 @@ import ( "encoding/json" "errors" "fmt" + "math" "net" "net/http" "net/url" @@ -18,16 +19,22 @@ import ( "github.com/prometheus/client_golang/prometheus/promhttp" "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" otelprom "go.opentelemetry.io/otel/exporters/prometheus" "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/noop" + "go.opentelemetry.io/otel/sdk/instrumentation" sdkmetric "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/resource" ) +var zeroScope instrumentation.Scope + +const instrumentKindUndefined = sdkmetric.InstrumentKind(0) + func meterProvider(cfg configOptions, res *resource.Resource) (metric.MeterProvider, shutdownFunc, error) { if cfg.opentelemetryConfig.MeterProvider == nil { return noop.NewMeterProvider(), noopShutdown, nil @@ -45,6 +52,15 @@ func meterProvider(cfg configOptions, res *resource.Resource) (metric.MeterProvi errs = append(errs, err) } } + for _, vw := range cfg.opentelemetryConfig.MeterProvider.Views { + v, err := view(vw) + if err == nil { + opts = append(opts, sdkmetric.WithView(v)) + } else { + errs = append(errs, err) + } + } + if len(errs) > 0 { return noop.NewMeterProvider(), noopShutdown, errors.Join(errs...) } @@ -59,7 +75,15 @@ func metricReader(ctx context.Context, r MetricReader) (sdkmetric.Reader, error) } if r.Periodic != nil { - return periodicExporter(ctx, r.Periodic.Exporter) + var opts []sdkmetric.PeriodicReaderOption + if r.Periodic.Interval != nil { + opts = append(opts, sdkmetric.WithInterval(time.Duration(*r.Periodic.Interval)*time.Millisecond)) + } + + if r.Periodic.Timeout != nil { + opts = append(opts, sdkmetric.WithTimeout(time.Duration(*r.Periodic.Timeout)*time.Millisecond)) + } + return periodicExporter(ctx, r.Periodic.Exporter, opts...) } if r.Pull != nil { @@ -148,7 +172,7 @@ func otlpHTTPMetricExporter(ctx context.Context, otlpConfig *OTLPMetric) (sdkmet } func otlpGRPCMetricExporter(ctx context.Context, otlpConfig *OTLPMetric) (sdkmetric.Exporter, error) { - opts := []otlpmetricgrpc.Option{} + var opts []otlpmetricgrpc.Option if len(otlpConfig.Endpoint) > 0 { u, err := url.ParseRequestURI(otlpConfig.Endpoint) @@ -207,6 +231,22 @@ func prometheusReader(ctx context.Context, prometheusConfig *Prometheus) (sdkmet if prometheusConfig.WithoutUnits != nil && *prometheusConfig.WithoutUnits { opts = append(opts, otelprom.WithoutUnits()) } + if prometheusConfig.WithResourceConstantLabels != nil { + if prometheusConfig.WithResourceConstantLabels.Included != nil { + var keys []attribute.Key + for _, val := range prometheusConfig.WithResourceConstantLabels.Included { + keys = append(keys, attribute.Key(val)) + } + otelprom.WithResourceAsConstantLabels(attribute.NewAllowKeysFilter(keys...)) + } + if prometheusConfig.WithResourceConstantLabels.Excluded != nil { + var keys []attribute.Key + for _, val := range prometheusConfig.WithResourceConstantLabels.Included { + keys = append(keys, attribute.Key(val)) + } + otelprom.WithResourceAsConstantLabels(attribute.NewDenyKeysFilter(keys...)) + } + } reg := prometheus.NewRegistry() opts = append(opts, otelprom.WithRegisterer(reg)) @@ -214,7 +254,7 @@ func prometheusReader(ctx context.Context, prometheusConfig *Prometheus) (sdkmet mux := http.NewServeMux() mux.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg})) server := http.Server{ - // Timeouts are necessary to make a server resilent to attacks, but ListenAndServe doesn't set any. + // Timeouts are necessary to make a server resilient to attacks, but ListenAndServe doesn't set any. // We use values from this example: https://blog.cloudflare.com/exposing-go-on-the-internet/#:~:text=There%20are%20three%20main%20timeouts ReadTimeout: 5 * time.Second, WriteTimeout: 10 * time.Second, @@ -223,9 +263,6 @@ func prometheusReader(ctx context.Context, prometheusConfig *Prometheus) (sdkmet } addr := fmt.Sprintf("%s:%d", *prometheusConfig.Host, *prometheusConfig.Port) - // TODO: add support for constant label filter - // otelprom.WithResourceAsConstantLabels(attribute.NewDenyKeysFilter()), - // ) reader, err := otelprom.New(opts...) if err != nil { return nil, fmt.Errorf("error creating otel prometheus exporter: %w", err) @@ -239,7 +276,7 @@ func prometheusReader(ctx context.Context, prometheusConfig *Prometheus) (sdkmet } go func() { - if err := server.Serve(lis); err != nil && err != http.ErrServerClosed { + if err := server.Serve(lis); err != nil && errors.Is(err, http.ErrServerClosed) { otel.Handle(fmt.Errorf("the Prometheus HTTP server exited unexpectedly: %w", err)) } }() @@ -258,3 +295,155 @@ func (rws readerWithServer) Shutdown(ctx context.Context) error { rws.server.Shutdown(ctx), ) } + +func view(v View) (sdkmetric.View, error) { + if v.Selector == nil { + return nil, errors.New("view: no selector provided") + } + + inst, err := instrument(*v.Selector) + if err != nil { + return nil, err + } + + return sdkmetric.NewView(inst, stream(v.Stream)), nil +} + +func instrument(vs ViewSelector) (sdkmetric.Instrument, error) { + kind, err := instrumentKind(vs.InstrumentType) + if err != nil { + return sdkmetric.Instrument{}, fmt.Errorf("view_selector: %w", err) + } + inst := sdkmetric.Instrument{ + Name: strOrEmpty(vs.InstrumentName), + Unit: strOrEmpty(vs.Unit), + Kind: kind, + Scope: instrumentation.Scope{ + Name: strOrEmpty(vs.MeterName), + Version: strOrEmpty(vs.MeterVersion), + SchemaURL: strOrEmpty(vs.MeterSchemaUrl), + }, + } + + if instrumentIsEmpty(inst) { + return sdkmetric.Instrument{}, errors.New("view_selector: empty selector not supporter") + } + return inst, nil +} + +func stream(vs *ViewStream) sdkmetric.Stream { + if vs == nil { + return sdkmetric.Stream{} + } + + return sdkmetric.Stream{ + Name: strOrEmpty(vs.Name), + Description: strOrEmpty(vs.Description), + Aggregation: aggregation(vs.Aggregation), + AttributeFilter: attributeFilter(vs.AttributeKeys), + } +} + +func attributeFilter(attributeKeys []string) attribute.Filter { + var attrKeys []attribute.Key + for _, attrStr := range attributeKeys { + attrKeys = append(attrKeys, attribute.Key(attrStr)) + } + return attribute.NewAllowKeysFilter(attrKeys...) +} + +func aggregation(aggr *ViewStreamAggregation) sdkmetric.Aggregation { + if aggr == nil { + return nil + } + + if aggr.Base2ExponentialBucketHistogram != nil { + return sdkmetric.AggregationBase2ExponentialHistogram{ + MaxSize: int32OrZero(aggr.Base2ExponentialBucketHistogram.MaxSize), + MaxScale: int32OrZero(aggr.Base2ExponentialBucketHistogram.MaxScale), + // Need to negate because config has the positive action RecordMinMax. + NoMinMax: !boolOrFalse(aggr.Base2ExponentialBucketHistogram.RecordMinMax), + } + } + if aggr.Default != nil { + // TODO: Understand what to set here. + return nil + } + if aggr.Drop != nil { + return sdkmetric.AggregationDrop{} + } + if aggr.ExplicitBucketHistogram != nil { + return sdkmetric.AggregationExplicitBucketHistogram{ + Boundaries: aggr.ExplicitBucketHistogram.Boundaries, + // Need to negate because config has the positive action RecordMinMax. + NoMinMax: !boolOrFalse(aggr.ExplicitBucketHistogram.RecordMinMax), + } + } + if aggr.LastValue != nil { + return sdkmetric.AggregationLastValue{} + } + if aggr.Sum != nil { + return sdkmetric.AggregationSum{} + } + return nil +} + +func instrumentKind(vsit *ViewSelectorInstrumentType) (sdkmetric.InstrumentKind, error) { + if vsit == nil { + // Equivalent to instrumentKindUndefined. + return instrumentKindUndefined, nil + } + + switch *vsit { + case ViewSelectorInstrumentTypeCounter: + return sdkmetric.InstrumentKindCounter, nil + case ViewSelectorInstrumentTypeUpDownCounter: + return sdkmetric.InstrumentKindUpDownCounter, nil + case ViewSelectorInstrumentTypeHistogram: + return sdkmetric.InstrumentKindHistogram, nil + case ViewSelectorInstrumentTypeObservableCounter: + return sdkmetric.InstrumentKindObservableCounter, nil + case ViewSelectorInstrumentTypeObservableUpDownCounter: + return sdkmetric.InstrumentKindObservableUpDownCounter, nil + case ViewSelectorInstrumentTypeObservableGauge: + return sdkmetric.InstrumentKindObservableGauge, nil + } + + return instrumentKindUndefined, errors.New("instrument_type: invalid value") +} + +func instrumentIsEmpty(i sdkmetric.Instrument) bool { + return i.Name == "" && + i.Description == "" && + i.Kind == instrumentKindUndefined && + i.Unit == "" && + i.Scope == zeroScope +} + +func boolOrFalse(pBool *bool) bool { + if pBool == nil { + return false + } + return *pBool +} + +func int32OrZero(pInt *int) int32 { + if pInt == nil { + return 0 + } + i := *pInt + if i > math.MaxInt32 { + return math.MaxInt32 + } + if i < math.MinInt32 { + return math.MinInt32 + } + return int32(i) // nolint: gosec // Overflow and underflow checked above. +} + +func strOrEmpty(pStr *string) string { + if pStr == nil { + return "" + } + return *pStr +} diff --git a/vendor/go.opentelemetry.io/contrib/config/resource.go b/vendor/go.opentelemetry.io/contrib/config/resource.go index 302a59637e6..020d6660b23 100644 --- a/vendor/go.opentelemetry.io/contrib/config/resource.go +++ b/vendor/go.opentelemetry.io/contrib/config/resource.go @@ -8,7 +8,6 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/resource" - semconv "go.opentelemetry.io/otel/semconv/v1.25.0" ) func keyVal(k string, v any) attribute.KeyValue { @@ -50,14 +49,10 @@ func newResource(res *Resource) (*resource.Resource, error) { if res == nil || res.Attributes == nil { return resource.Default(), nil } - attrs := []attribute.KeyValue{ - semconv.ServiceName(*res.Attributes.ServiceName), - } + var attrs []attribute.KeyValue - if props, ok := res.Attributes.AdditionalProperties.(map[string]any); ok { - for k, v := range props { - attrs = append(attrs, keyVal(k, v)) - } + for k, v := range res.Attributes { + attrs = append(attrs, keyVal(k, v)) } return resource.Merge(resource.Default(), diff --git a/vendor/go.opentelemetry.io/contrib/config/trace.go b/vendor/go.opentelemetry.io/contrib/config/trace.go index 3666cea5378..aff4c3584ec 100644 --- a/vendor/go.opentelemetry.io/contrib/config/trace.go +++ b/vendor/go.opentelemetry.io/contrib/config/trace.go @@ -83,7 +83,7 @@ func spanProcessor(ctx context.Context, processor SpanProcessor) (sdktrace.SpanP } return sdktrace.NewSimpleSpanProcessor(exp), nil } - return nil, fmt.Errorf("unsupported span processor type %v", processor) + return nil, fmt.Errorf("unsupported span processor type, must be one of simple or batch") } func otlpGRPCSpanExporter(ctx context.Context, otlpConfig *OTLP) (sdktrace.SpanExporter, error) { diff --git a/vendor/go.opentelemetry.io/contrib/exporters/autoexport/logs.go b/vendor/go.opentelemetry.io/contrib/exporters/autoexport/logs.go index 9e926ce32e1..618379c621d 100644 --- a/vendor/go.opentelemetry.io/contrib/exporters/autoexport/logs.go +++ b/vendor/go.opentelemetry.io/contrib/exporters/autoexport/logs.go @@ -7,6 +7,7 @@ import ( "context" "os" + "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc" "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp" "go.opentelemetry.io/otel/exporters/stdout/stdoutlog" "go.opentelemetry.io/otel/sdk/log" @@ -31,6 +32,8 @@ var logsSignal = newSignal[log.Exporter]("OTEL_LOGS_EXPORTER") // supported values: // - "http/protobuf" (default) - protobuf-encoded data over HTTP connection; // see: [go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp] +// - "grpc" - gRPC with protobuf-encoded data over HTTP/2 connection; +// see: [go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc] // // OTEL_EXPORTER_OTLP_LOGS_PROTOCOL defines OTLP exporter's transport protocol for the logs signal; // supported values are the same as OTEL_EXPORTER_OTLP_PROTOCOL. @@ -67,9 +70,8 @@ func init() { } switch proto { - // grpc is not supported yet, should comment out when it is supported - // case "grpc": - // return otlploggrpc.New(ctx) + case "grpc": + return otlploggrpc.New(ctx) case "http/protobuf": return otlploghttp.New(ctx) default: diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go index a199b36b4fa..9e87fb4bb19 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go @@ -30,7 +30,7 @@ const ( type InterceptorFilter func(*InterceptorInfo) bool // Filter is a predicate used to determine whether a given request in -// should be instrumented by the attatched RPC tag info. +// should be instrumented by the attached RPC tag info. // A Filter must return true if the request should be instrumented. type Filter func(*stats.RPCTagInfo) bool @@ -42,6 +42,8 @@ type config struct { TracerProvider trace.TracerProvider MeterProvider metric.MeterProvider SpanStartOptions []trace.SpanStartOption + SpanAttributes []attribute.KeyValue + MetricAttributes []attribute.KeyValue ReceivedEvent bool SentEvent bool @@ -49,11 +51,11 @@ type config struct { tracer trace.Tracer meter metric.Meter - rpcDuration metric.Float64Histogram - rpcRequestSize metric.Int64Histogram - rpcResponseSize metric.Int64Histogram - rpcRequestsPerRPC metric.Int64Histogram - rpcResponsesPerRPC metric.Int64Histogram + rpcDuration metric.Float64Histogram + rpcInBytes metric.Int64Histogram + rpcOutBytes metric.Int64Histogram + rpcInMessages metric.Int64Histogram + rpcOutMessages metric.Int64Histogram } // Option applies an option value for a config. @@ -94,46 +96,64 @@ func newConfig(opts []Option, role string) *config { } } - c.rpcRequestSize, err = c.meter.Int64Histogram("rpc."+role+".request.size", + rpcRequestSize, err := c.meter.Int64Histogram("rpc."+role+".request.size", metric.WithDescription("Measures size of RPC request messages (uncompressed)."), metric.WithUnit("By")) if err != nil { otel.Handle(err) - if c.rpcRequestSize == nil { - c.rpcRequestSize = noop.Int64Histogram{} + if rpcRequestSize == nil { + rpcRequestSize = noop.Int64Histogram{} } } - c.rpcResponseSize, err = c.meter.Int64Histogram("rpc."+role+".response.size", + rpcResponseSize, err := c.meter.Int64Histogram("rpc."+role+".response.size", metric.WithDescription("Measures size of RPC response messages (uncompressed)."), metric.WithUnit("By")) if err != nil { otel.Handle(err) - if c.rpcResponseSize == nil { - c.rpcResponseSize = noop.Int64Histogram{} + if rpcResponseSize == nil { + rpcResponseSize = noop.Int64Histogram{} } } - c.rpcRequestsPerRPC, err = c.meter.Int64Histogram("rpc."+role+".requests_per_rpc", + rpcRequestsPerRPC, err := c.meter.Int64Histogram("rpc."+role+".requests_per_rpc", metric.WithDescription("Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs."), metric.WithUnit("{count}")) if err != nil { otel.Handle(err) - if c.rpcRequestsPerRPC == nil { - c.rpcRequestsPerRPC = noop.Int64Histogram{} + if rpcRequestsPerRPC == nil { + rpcRequestsPerRPC = noop.Int64Histogram{} } } - c.rpcResponsesPerRPC, err = c.meter.Int64Histogram("rpc."+role+".responses_per_rpc", + rpcResponsesPerRPC, err := c.meter.Int64Histogram("rpc."+role+".responses_per_rpc", metric.WithDescription("Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs."), metric.WithUnit("{count}")) if err != nil { otel.Handle(err) - if c.rpcResponsesPerRPC == nil { - c.rpcResponsesPerRPC = noop.Int64Histogram{} + if rpcResponsesPerRPC == nil { + rpcResponsesPerRPC = noop.Int64Histogram{} } } + switch role { + case "client": + c.rpcInBytes = rpcResponseSize + c.rpcInMessages = rpcResponsesPerRPC + c.rpcOutBytes = rpcRequestSize + c.rpcOutMessages = rpcRequestsPerRPC + case "server": + c.rpcInBytes = rpcRequestSize + c.rpcInMessages = rpcRequestsPerRPC + c.rpcOutBytes = rpcResponseSize + c.rpcOutMessages = rpcResponsesPerRPC + default: + c.rpcInBytes = noop.Int64Histogram{} + c.rpcInMessages = noop.Int64Histogram{} + c.rpcOutBytes = noop.Int64Histogram{} + c.rpcOutMessages = noop.Int64Histogram{} + } + return c } @@ -257,3 +277,29 @@ func (o spanStartOption) apply(c *config) { func WithSpanOptions(opts ...trace.SpanStartOption) Option { return spanStartOption{opts} } + +type spanAttributesOption struct{ a []attribute.KeyValue } + +func (o spanAttributesOption) apply(c *config) { + if o.a != nil { + c.SpanAttributes = o.a + } +} + +// WithSpanAttributes returns an Option to add custom attributes to the spans. +func WithSpanAttributes(a ...attribute.KeyValue) Option { + return spanAttributesOption{a: a} +} + +type metricAttributesOption struct{ a []attribute.KeyValue } + +func (o metricAttributesOption) apply(c *config) { + if o.a != nil { + c.MetricAttributes = o.a + } +} + +// WithMetricAttributes returns an Option to add custom attributes to the metrics. +func WithMetricAttributes(a ...attribute.KeyValue) Option { + return metricAttributesOption{a: a} +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go index 7f19058e4c4..7d5ed058082 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go @@ -7,6 +7,7 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.g // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/rpc.md import ( "context" + "errors" "io" "net" "strconv" @@ -136,7 +137,7 @@ func (w *clientStream) RecvMsg(m interface{}) error { if err == nil && !w.desc.ServerStreams { w.endSpan(nil) - } else if err == io.EOF { + } else if errors.Is(err, io.EOF) { w.endSpan(nil) } else if err != nil { w.endSpan(err) @@ -333,7 +334,7 @@ func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor { elapsedTime := float64(time.Since(before)) / float64(time.Millisecond) metricAttrs = append(metricAttrs, grpcStatusCodeAttr) - cfg.rpcDuration.Record(ctx, elapsedTime, metric.WithAttributes(metricAttrs...)) + cfg.rpcDuration.Record(ctx, elapsedTime, metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) return resp, err } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go index fad58733fec..c01cb897cd3 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go @@ -13,21 +13,22 @@ import ( "google.golang.org/grpc/stats" "google.golang.org/grpc/status" - "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/metric" semconv "go.opentelemetry.io/otel/semconv/v1.17.0" "go.opentelemetry.io/otel/trace" + + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal" ) type gRPCContextKey struct{} type gRPCContext struct { - messagesReceived int64 - messagesSent int64 - metricAttrs []attribute.KeyValue - record bool + inMessages int64 + outMessages int64 + metricAttrs []attribute.KeyValue + record bool } type serverHandler struct { @@ -62,11 +63,11 @@ func (h *serverHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)), name, trace.WithSpanKind(trace.SpanKindServer), - trace.WithAttributes(attrs...), + trace.WithAttributes(append(attrs, h.config.SpanAttributes...)...), ) gctx := gRPCContext{ - metricAttrs: attrs, + metricAttrs: append(attrs, h.config.MetricAttributes...), record: true, } if h.config.Filter != nil { @@ -102,11 +103,11 @@ func (h *clientHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont ctx, name, trace.WithSpanKind(trace.SpanKindClient), - trace.WithAttributes(attrs...), + trace.WithAttributes(append(attrs, h.config.SpanAttributes...)...), ) gctx := gRPCContext{ - metricAttrs: attrs, + metricAttrs: append(attrs, h.config.MetricAttributes...), record: true, } if h.config.Filter != nil { @@ -150,8 +151,8 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool case *stats.Begin: case *stats.InPayload: if gctx != nil { - messageId = atomic.AddInt64(&gctx.messagesReceived, 1) - c.rpcRequestSize.Record(ctx, int64(rs.Length), metric.WithAttributes(metricAttrs...)) + messageId = atomic.AddInt64(&gctx.inMessages, 1) + c.rpcInBytes.Record(ctx, int64(rs.Length), metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) } if c.ReceivedEvent { @@ -166,8 +167,8 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool } case *stats.OutPayload: if gctx != nil { - messageId = atomic.AddInt64(&gctx.messagesSent, 1) - c.rpcResponseSize.Record(ctx, int64(rs.Length), metric.WithAttributes(metricAttrs...)) + messageId = atomic.AddInt64(&gctx.outMessages, 1) + c.rpcOutBytes.Record(ctx, int64(rs.Length), metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) } if c.SentEvent { @@ -204,14 +205,17 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool span.End() metricAttrs = append(metricAttrs, rpcStatusAttr) + // Allocate vararg slice once. + recordOpts := []metric.RecordOption{metric.WithAttributeSet(attribute.NewSet(metricAttrs...))} // Use floating point division here for higher precision (instead of Millisecond method). + // Measure right before calling Record() to capture as much elapsed time as possible. elapsedTime := float64(rs.EndTime.Sub(rs.BeginTime)) / float64(time.Millisecond) - c.rpcDuration.Record(ctx, elapsedTime, metric.WithAttributes(metricAttrs...)) + c.rpcDuration.Record(ctx, elapsedTime, recordOpts...) if gctx != nil { - c.rpcRequestsPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesReceived), metric.WithAttributes(metricAttrs...)) - c.rpcResponsesPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesSent), metric.WithAttributes(metricAttrs...)) + c.rpcInMessages.Record(ctx, atomic.LoadInt64(&gctx.inMessages), recordOpts...) + c.rpcOutMessages.Record(ctx, atomic.LoadInt64(&gctx.outMessages), recordOpts...) } default: return diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go index 3f9cfda5413..80e5f2f6fc9 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go @@ -5,7 +5,7 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.g // Version is the current release version of the gRPC instrumentation. func Version() string { - return "0.52.0" + return "0.59.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go index 6aae83bfd20..b25641c55d3 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go @@ -18,7 +18,7 @@ var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)} // Get is a convenient replacement for http.Get that adds a span around the request. func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) { - req, err := http.NewRequestWithContext(ctx, "GET", targetURL, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, targetURL, nil) if err != nil { return nil, err } @@ -27,7 +27,7 @@ func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) // Head is a convenient replacement for http.Head that adds a span around the request. func Head(ctx context.Context, targetURL string) (resp *http.Response, err error) { - req, err := http.NewRequestWithContext(ctx, "HEAD", targetURL, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodHead, targetURL, nil) if err != nil { return nil, err } @@ -36,7 +36,7 @@ func Head(ctx context.Context, targetURL string) (resp *http.Response, err error // Post is a convenient replacement for http.Post that adds a span around the request. func Post(ctx context.Context, targetURL, contentType string, body io.Reader) (resp *http.Response, err error) { - req, err := http.NewRequestWithContext(ctx, "POST", targetURL, body) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, targetURL, body) if err != nil { return nil, err } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go index 5d6e6156b7b..a83a026274a 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go @@ -18,13 +18,6 @@ const ( WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded) ) -// Client HTTP metrics. -const ( - clientRequestSize = "http.client.request.size" // Outgoing request bytes total - clientResponseSize = "http.client.response.size" // Outgoing response bytes total - clientDuration = "http.client.duration" // Outgoing end to end duration, milliseconds -) - // Filter is a predicate used to determine whether a given http.request should // be traced. A Filter must return true if the request should be traced. type Filter func(*http.Request) bool diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go index 33580a35b77..e555a475f13 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -81,12 +81,6 @@ func (h *middleware) configure(c *config) { h.semconv = semconv.NewHTTPServer(c.Meter) } -func handleErr(err error) { - if err != nil { - otel.Handle(err) - } -} - // serveHTTP sets up tracing and calls the given next http.Handler with the span // context injected into the request context. func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http.Handler) { @@ -123,6 +117,11 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http } } + if startTime := StartTimeFromContext(ctx); !startTime.IsZero() { + opts = append(opts, trace.WithTimestamp(startTime)) + requestStartTime = startTime + } + ctx, span := tracer.Start(ctx, h.spanNameFormatter(h.operation, r), opts...) defer span.End() @@ -190,14 +189,18 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http // Use floating point division here for higher precision (instead of Millisecond method). elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) - h.semconv.RecordMetrics(ctx, semconv.MetricData{ - ServerName: h.server, - Req: r, - StatusCode: statusCode, - AdditionalAttributes: labeler.Get(), - RequestSize: bw.BytesRead(), - ResponseSize: bytesWritten, - ElapsedTime: elapsedTime, + h.semconv.RecordMetrics(ctx, semconv.ServerMetricData{ + ServerName: h.server, + ResponseSize: bytesWritten, + MetricAttributes: semconv.MetricAttributes{ + Req: r, + StatusCode: statusCode, + AdditionalAttributes: labeler.Get(), + }, + MetricData: semconv.MetricData{ + RequestSize: bw.BytesRead(), + ElapsedTime: elapsedTime, + }, }) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go index 9cae4cab86a..3b036f8a37b 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go @@ -9,6 +9,7 @@ import ( "net/http" "os" "strings" + "sync" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" @@ -50,9 +51,9 @@ type HTTPServer struct { // The req Host will be used to determine the server instead. func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { if s.duplicate { - return append(oldHTTPServer{}.RequestTraceAttrs(server, req), newHTTPServer{}.RequestTraceAttrs(server, req)...) + return append(OldHTTPServer{}.RequestTraceAttrs(server, req), CurrentHTTPServer{}.RequestTraceAttrs(server, req)...) } - return oldHTTPServer{}.RequestTraceAttrs(server, req) + return OldHTTPServer{}.RequestTraceAttrs(server, req) } // ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. @@ -60,14 +61,14 @@ func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request) []attrib // If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. func (s HTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { if s.duplicate { - return append(oldHTTPServer{}.ResponseTraceAttrs(resp), newHTTPServer{}.ResponseTraceAttrs(resp)...) + return append(OldHTTPServer{}.ResponseTraceAttrs(resp), CurrentHTTPServer{}.ResponseTraceAttrs(resp)...) } - return oldHTTPServer{}.ResponseTraceAttrs(resp) + return OldHTTPServer{}.ResponseTraceAttrs(resp) } // Route returns the attribute for the route. func (s HTTPServer) Route(route string) attribute.KeyValue { - return oldHTTPServer{}.Route(route) + return OldHTTPServer{}.Route(route) } // Status returns a span status code and message for an HTTP status code @@ -83,29 +84,46 @@ func (s HTTPServer) Status(code int) (codes.Code, string) { return codes.Unset, "" } -type MetricData struct { - ServerName string +type ServerMetricData struct { + ServerName string + ResponseSize int64 + + MetricData + MetricAttributes +} + +type MetricAttributes struct { Req *http.Request StatusCode int AdditionalAttributes []attribute.KeyValue +} - RequestSize int64 - ResponseSize int64 - ElapsedTime float64 +type MetricData struct { + RequestSize int64 + ElapsedTime float64 +} + +var metricAddOptionPool = &sync.Pool{ + New: func() interface{} { + return &[]metric.AddOption{} + }, } -func (s HTTPServer) RecordMetrics(ctx context.Context, md MetricData) { +func (s HTTPServer) RecordMetrics(ctx context.Context, md ServerMetricData) { if s.requestBytesCounter == nil || s.responseBytesCounter == nil || s.serverLatencyMeasure == nil { - // This will happen if an HTTPServer{} is used insted of NewHTTPServer. + // This will happen if an HTTPServer{} is used instead of NewHTTPServer. return } - attributes := oldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) + attributes := OldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) o := metric.WithAttributeSet(attribute.NewSet(attributes...)) - addOpts := []metric.AddOption{o} // Allocate vararg slice once. - s.requestBytesCounter.Add(ctx, md.RequestSize, addOpts...) - s.responseBytesCounter.Add(ctx, md.ResponseSize, addOpts...) + addOpts := metricAddOptionPool.Get().(*[]metric.AddOption) + *addOpts = append(*addOpts, o) + s.requestBytesCounter.Add(ctx, md.RequestSize, *addOpts...) + s.responseBytesCounter.Add(ctx, md.ResponseSize, *addOpts...) s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o) + *addOpts = (*addOpts)[:0] + metricAddOptionPool.Put(addOpts) // TODO: Duplicate Metrics } @@ -116,34 +134,43 @@ func NewHTTPServer(meter metric.Meter) HTTPServer { server := HTTPServer{ duplicate: duplicate, } - server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = oldHTTPServer{}.createMeasures(meter) + server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = OldHTTPServer{}.createMeasures(meter) return server } type HTTPClient struct { duplicate bool + + // old metrics + requestBytesCounter metric.Int64Counter + responseBytesCounter metric.Int64Counter + latencyMeasure metric.Float64Histogram } -func NewHTTPClient() HTTPClient { +func NewHTTPClient(meter metric.Meter) HTTPClient { env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN")) - return HTTPClient{duplicate: env == "http/dup"} + client := HTTPClient{ + duplicate: env == "http/dup", + } + client.requestBytesCounter, client.responseBytesCounter, client.latencyMeasure = OldHTTPClient{}.createMeasures(meter) + return client } // RequestTraceAttrs returns attributes for an HTTP request made by a client. func (c HTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { if c.duplicate { - return append(oldHTTPClient{}.RequestTraceAttrs(req), newHTTPClient{}.RequestTraceAttrs(req)...) + return append(OldHTTPClient{}.RequestTraceAttrs(req), CurrentHTTPClient{}.RequestTraceAttrs(req)...) } - return oldHTTPClient{}.RequestTraceAttrs(req) + return OldHTTPClient{}.RequestTraceAttrs(req) } // ResponseTraceAttrs returns metric attributes for an HTTP request made by a client. func (c HTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { if c.duplicate { - return append(oldHTTPClient{}.ResponseTraceAttrs(resp), newHTTPClient{}.ResponseTraceAttrs(resp)...) + return append(OldHTTPClient{}.ResponseTraceAttrs(resp), CurrentHTTPClient{}.ResponseTraceAttrs(resp)...) } - return oldHTTPClient{}.ResponseTraceAttrs(resp) + return OldHTTPClient{}.ResponseTraceAttrs(resp) } func (c HTTPClient) Status(code int) (codes.Code, string) { @@ -158,8 +185,53 @@ func (c HTTPClient) Status(code int) (codes.Code, string) { func (c HTTPClient) ErrorType(err error) attribute.KeyValue { if c.duplicate { - return newHTTPClient{}.ErrorType(err) + return CurrentHTTPClient{}.ErrorType(err) } return attribute.KeyValue{} } + +type MetricOpts struct { + measurement metric.MeasurementOption + addOptions metric.AddOption +} + +func (o MetricOpts) MeasurementOption() metric.MeasurementOption { + return o.measurement +} + +func (o MetricOpts) AddOptions() metric.AddOption { + return o.addOptions +} + +func (c HTTPClient) MetricOptions(ma MetricAttributes) MetricOpts { + attributes := OldHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes) + // TODO: Duplicate Metrics + set := metric.WithAttributeSet(attribute.NewSet(attributes...)) + return MetricOpts{ + measurement: set, + addOptions: set, + } +} + +func (s HTTPClient) RecordMetrics(ctx context.Context, md MetricData, opts MetricOpts) { + if s.requestBytesCounter == nil || s.latencyMeasure == nil { + // This will happen if an HTTPClient{} is used instead of NewHTTPClient(). + return + } + + s.requestBytesCounter.Add(ctx, md.RequestSize, opts.AddOptions()) + s.latencyMeasure.Record(ctx, md.ElapsedTime, opts.MeasurementOption()) + + // TODO: Duplicate Metrics +} + +func (s HTTPClient) RecordResponseSize(ctx context.Context, responseData int64, opts metric.AddOption) { + if s.responseBytesCounter == nil { + // This will happen if an HTTPClient{} is used instead of NewHTTPClient(). + return + } + + s.responseBytesCounter.Add(ctx, responseData, opts) + // TODO: Duplicate Metrics +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go index 745b8c67bc4..dc9ec7bc39e 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go @@ -14,7 +14,7 @@ import ( semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" ) -type newHTTPServer struct{} +type CurrentHTTPServer struct{} // TraceRequest returns trace attributes for an HTTP request received by a // server. @@ -32,18 +32,18 @@ type newHTTPServer struct{} // // If the primary server name is not known, server should be an empty string. // The req Host will be used to determine the server instead. -func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { +func (n CurrentHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { count := 3 // ServerAddress, Method, Scheme var host string var p int if server == "" { - host, p = splitHostPort(req.Host) + host, p = SplitHostPort(req.Host) } else { // Prioritize the primary server name. - host, p = splitHostPort(server) + host, p = SplitHostPort(server) if p < 0 { - _, p = splitHostPort(req.Host) + _, p = SplitHostPort(req.Host) } } @@ -59,7 +59,7 @@ func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []att scheme := n.scheme(req.TLS != nil) - if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" { + if peer, peerPort := SplitHostPort(req.RemoteAddr); peer != "" { // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a // file-path that would be interpreted with a sock family. count++ @@ -104,7 +104,7 @@ func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []att attrs = append(attrs, methodOriginal) } - if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" { + if peer, peerPort := SplitHostPort(req.RemoteAddr); peer != "" { // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a // file-path that would be interpreted with a sock family. attrs = append(attrs, semconvNew.NetworkPeerAddress(peer)) @@ -135,7 +135,7 @@ func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []att return attrs } -func (n newHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) { +func (n CurrentHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) { if method == "" { return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} } @@ -150,7 +150,7 @@ func (n newHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyV return semconvNew.HTTPRequestMethodGet, orig } -func (n newHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive +func (n CurrentHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive if https { return semconvNew.URLScheme("https") } @@ -160,7 +160,7 @@ func (n newHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive // TraceResponse returns trace attributes for telemetry from an HTTP response. // // If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. -func (n newHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { +func (n CurrentHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { var count int if resp.ReadBytes > 0 { @@ -195,14 +195,14 @@ func (n newHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.Ke } // Route returns the attribute for the route. -func (n newHTTPServer) Route(route string) attribute.KeyValue { +func (n CurrentHTTPServer) Route(route string) attribute.KeyValue { return semconvNew.HTTPRoute(route) } -type newHTTPClient struct{} +type CurrentHTTPClient struct{} // RequestTraceAttrs returns trace attributes for an HTTP request made by a client. -func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { +func (n CurrentHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { /* below attributes are returned: - http.request.method @@ -222,7 +222,7 @@ func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue var requestHost string var requestPort int for _, hostport := range []string{urlHost, req.Header.Get("Host")} { - requestHost, requestPort = splitHostPort(hostport) + requestHost, requestPort = SplitHostPort(hostport) if requestHost != "" || requestPort > 0 { break } @@ -284,7 +284,7 @@ func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue } // ResponseTraceAttrs returns trace attributes for an HTTP response made by a client. -func (n newHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { +func (n CurrentHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { /* below attributes are returned: - http.response.status_code @@ -311,7 +311,7 @@ func (n newHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyVa return attrs } -func (n newHTTPClient) ErrorType(err error) attribute.KeyValue { +func (n CurrentHTTPClient) ErrorType(err error) attribute.KeyValue { t := reflect.TypeOf(err) var value string if t.PkgPath() == "" && t.Name() == "" { @@ -328,7 +328,7 @@ func (n newHTTPClient) ErrorType(err error) attribute.KeyValue { return semconvNew.ErrorTypeKey.String(value) } -func (n newHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) { +func (n CurrentHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) { if method == "" { return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go index e6e14924f57..93e8d0f94c1 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go @@ -14,14 +14,14 @@ import ( semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" ) -// splitHostPort splits a network address hostport of the form "host", +// SplitHostPort splits a network address hostport of the form "host", // "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port", // "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and // port. // // An empty host is returned if it is not provided or unparsable. A negative // port is returned if it is not provided or unparsable. -func splitHostPort(hostport string) (host string, port int) { +func SplitHostPort(hostport string) (host string, port int) { port = -1 if strings.HasPrefix(hostport, "[") { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go index c999b05e675..c042249dd72 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go @@ -17,7 +17,7 @@ import ( semconv "go.opentelemetry.io/otel/semconv/v1.20.0" ) -type oldHTTPServer struct{} +type OldHTTPServer struct{} // RequestTraceAttrs returns trace attributes for an HTTP request received by a // server. @@ -35,14 +35,14 @@ type oldHTTPServer struct{} // // If the primary server name is not known, server should be an empty string. // The req Host will be used to determine the server instead. -func (o oldHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { +func (o OldHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { return semconvutil.HTTPServerRequest(server, req) } // ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. // // If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. -func (o oldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { +func (o OldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { attributes := []attribute.KeyValue{} if resp.ReadBytes > 0 { @@ -67,7 +67,7 @@ func (o oldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.Ke } // Route returns the attribute for the route. -func (o oldHTTPServer) Route(route string) attribute.KeyValue { +func (o OldHTTPServer) Route(route string) attribute.KeyValue { return semconv.HTTPRoute(route) } @@ -84,7 +84,7 @@ const ( serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds ) -func (h oldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { +func (h OldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { if meter == nil { return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{} } @@ -113,17 +113,17 @@ func (h oldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, return requestBytesCounter, responseBytesCounter, serverLatencyMeasure } -func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { +func (o OldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { n := len(additionalAttributes) + 3 var host string var p int if server == "" { - host, p = splitHostPort(req.Host) + host, p = SplitHostPort(req.Host) } else { // Prioritize the primary server name. - host, p = splitHostPort(server) + host, p = SplitHostPort(server) if p < 0 { - _, p = splitHostPort(req.Host) + _, p = SplitHostPort(req.Host) } } hostPort := requiredHTTPPort(req.TLS != nil, p) @@ -144,7 +144,7 @@ func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, status attributes := slices.Grow(additionalAttributes, n) attributes = append(attributes, - o.methodMetric(req.Method), + standardizeHTTPMethodMetric(req.Method), o.scheme(req.TLS != nil), semconv.NetHostName(host)) @@ -164,29 +164,111 @@ func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, status return attributes } -func (o oldHTTPServer) methodMetric(method string) attribute.KeyValue { - method = strings.ToUpper(method) - switch method { - case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: - default: - method = "_OTHER" - } - return semconv.HTTPMethod(method) -} - -func (o oldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive +func (o OldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive if https { return semconv.HTTPSchemeHTTPS } return semconv.HTTPSchemeHTTP } -type oldHTTPClient struct{} +type OldHTTPClient struct{} -func (o oldHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { +func (o OldHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { return semconvutil.HTTPClientRequest(req) } -func (o oldHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { +func (o OldHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { return semconvutil.HTTPClientResponse(resp) } + +func (o OldHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { + /* The following semantic conventions are returned if present: + http.method string + http.status_code int + net.peer.name string + net.peer.port int + */ + + n := 2 // method, peer name. + var h string + if req.URL != nil { + h = req.URL.Host + } + var requestHost string + var requestPort int + for _, hostport := range []string{h, req.Header.Get("Host")} { + requestHost, requestPort = SplitHostPort(hostport) + if requestHost != "" || requestPort > 0 { + break + } + } + + port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort) + if port > 0 { + n++ + } + + if statusCode > 0 { + n++ + } + + attributes := slices.Grow(additionalAttributes, n) + attributes = append(attributes, + standardizeHTTPMethodMetric(req.Method), + semconv.NetPeerName(requestHost), + ) + + if port > 0 { + attributes = append(attributes, semconv.NetPeerPort(port)) + } + + if statusCode > 0 { + attributes = append(attributes, semconv.HTTPStatusCode(statusCode)) + } + return attributes +} + +// Client HTTP metrics. +const ( + clientRequestSize = "http.client.request.size" // Incoming request bytes total + clientResponseSize = "http.client.response.size" // Incoming response bytes total + clientDuration = "http.client.duration" // Incoming end to end duration, milliseconds +) + +func (o OldHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { + if meter == nil { + return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{} + } + requestBytesCounter, err := meter.Int64Counter( + clientRequestSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP request messages."), + ) + handleErr(err) + + responseBytesCounter, err := meter.Int64Counter( + clientResponseSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP response messages."), + ) + handleErr(err) + + latencyMeasure, err := meter.Float64Histogram( + clientDuration, + metric.WithUnit("ms"), + metric.WithDescription("Measures the duration of outbound HTTP requests."), + ) + handleErr(err) + + return requestBytesCounter, responseBytesCounter, latencyMeasure +} + +func standardizeHTTPMethodMetric(method string) attribute.KeyValue { + method = strings.ToUpper(method) + switch method { + case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: + default: + method = "_OTHER" + } + return semconv.HTTPMethod(method) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/start_time_context.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/start_time_context.go new file mode 100644 index 00000000000..9476ef01b01 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/start_time_context.go @@ -0,0 +1,29 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "time" +) + +type startTimeContextKeyType int + +const startTimeContextKey startTimeContextKeyType = 0 + +// ContextWithStartTime returns a new context with the provided start time. The +// start time will be used for metrics and traces emitted by the +// instrumentation. Only one labeller can be injected into the context. +// Injecting it multiple times will override the previous calls. +func ContextWithStartTime(parent context.Context, start time.Time) context.Context { + return context.WithValue(parent, startTimeContextKey, start) +} + +// StartTimeFromContext retrieves a time.Time from the provided context if one +// is available. If no start time was found in the provided context, a new, +// zero start time is returned and the second return value is false. +func StartTimeFromContext(ctx context.Context) time.Time { + t, _ := ctx.Value(startTimeContextKey).(time.Time) + return t +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go index b4119d3438b..39681ad4b09 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go @@ -13,11 +13,9 @@ import ( "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" - "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" @@ -29,7 +27,6 @@ type Transport struct { rt http.RoundTripper tracer trace.Tracer - meter metric.Meter propagators propagation.TextMapPropagator spanStartOptions []trace.SpanStartOption filters []Filter @@ -37,10 +34,7 @@ type Transport struct { clientTrace func(context.Context) *httptrace.ClientTrace metricAttributesFn func(*http.Request) []attribute.KeyValue - semconv semconv.HTTPClient - requestBytesCounter metric.Int64Counter - responseBytesCounter metric.Int64Counter - latencyMeasure metric.Float64Histogram + semconv semconv.HTTPClient } var _ http.RoundTripper = &Transport{} @@ -57,8 +51,7 @@ func NewTransport(base http.RoundTripper, opts ...Option) *Transport { } t := Transport{ - rt: base, - semconv: semconv.NewHTTPClient(), + rt: base, } defaultOpts := []Option{ @@ -68,46 +61,21 @@ func NewTransport(base http.RoundTripper, opts ...Option) *Transport { c := newConfig(append(defaultOpts, opts...)...) t.applyConfig(c) - t.createMeasures() return &t } func (t *Transport) applyConfig(c *config) { t.tracer = c.Tracer - t.meter = c.Meter t.propagators = c.Propagators t.spanStartOptions = c.SpanStartOptions t.filters = c.Filters t.spanNameFormatter = c.SpanNameFormatter t.clientTrace = c.ClientTrace + t.semconv = semconv.NewHTTPClient(c.Meter) t.metricAttributesFn = c.MetricAttributesFn } -func (t *Transport) createMeasures() { - var err error - t.requestBytesCounter, err = t.meter.Int64Counter( - clientRequestSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP request messages."), - ) - handleErr(err) - - t.responseBytesCounter, err = t.meter.Int64Counter( - clientResponseSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP response messages."), - ) - handleErr(err) - - t.latencyMeasure, err = t.meter.Float64Histogram( - clientDuration, - metric.WithUnit("ms"), - metric.WithDescription("Measures the duration of outbound HTTP requests."), - ) - handleErr(err) -} - func defaultTransportFormatter(_ string, r *http.Request) string { return "HTTP " + r.Method } @@ -177,16 +145,15 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { } // metrics - metricAttrs := append(append(labeler.Get(), semconvutil.HTTPClientRequestMetrics(r)...), t.metricAttributesFromRequest(r)...) - if res.StatusCode > 0 { - metricAttrs = append(metricAttrs, semconv.HTTPStatusCode(res.StatusCode)) - } - o := metric.WithAttributeSet(attribute.NewSet(metricAttrs...)) + metricOpts := t.semconv.MetricOptions(semconv.MetricAttributes{ + Req: r, + StatusCode: res.StatusCode, + AdditionalAttributes: append(labeler.Get(), t.metricAttributesFromRequest(r)...), + }) - t.requestBytesCounter.Add(ctx, bw.BytesRead(), o) // For handling response bytes we leverage a callback when the client reads the http response readRecordFunc := func(n int64) { - t.responseBytesCounter.Add(ctx, n, o) + t.semconv.RecordResponseSize(ctx, n, metricOpts.AddOptions()) } // traces @@ -198,9 +165,12 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { // Use floating point division here for higher precision (instead of Millisecond method). elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) - t.latencyMeasure.Record(ctx, elapsedTime, o) + t.semconv.RecordMetrics(ctx, semconv.MetricData{ + RequestSize: bw.BytesRead(), + ElapsedTime: elapsedTime, + }, metricOpts) - return res, err + return res, nil } func (t *Transport) metricAttributesFromRequest(r *http.Request) []attribute.KeyValue { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index 1133961d393..353e43b91fd 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -5,7 +5,7 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http // Version is the current release version of the otelhttp instrumentation. func Version() string { - return "0.55.0" + return "0.58.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/propagators/b3/b3_propagator.go b/vendor/go.opentelemetry.io/contrib/propagators/b3/b3_propagator.go index 905cd71fe8c..45d0811a26e 100644 --- a/vendor/go.opentelemetry.io/contrib/propagators/b3/b3_propagator.go +++ b/vendor/go.opentelemetry.io/contrib/propagators/b3/b3_propagator.go @@ -262,11 +262,11 @@ func extractSingle(ctx context.Context, contextHeader string) (context.Context, case string(contextHeader[traceID64BitsWidth]) == "-": // traceID must be 64 bits pos += traceID64BitsWidth // {traceID} - traceID = b3TraceIDPadding + string(contextHeader[0:pos]) + traceID = b3TraceIDPadding + contextHeader[0:pos] case string(contextHeader[32]) == "-": // traceID must be 128 bits pos += traceID128BitsWidth // {traceID} - traceID = string(contextHeader[0:pos]) + traceID = contextHeader[0:pos] default: return ctx, empty, errInvalidTraceIDValue } @@ -277,6 +277,9 @@ func extractSingle(ctx context.Context, contextHeader string) (context.Context, } pos += separatorWidth // {traceID}- + if headerLen < pos+spanIDWidth { + return ctx, empty, errInvalidSpanIDValue + } scc.SpanID, err = trace.SpanIDFromHex(contextHeader[pos : pos+spanIDWidth]) if err != nil { return ctx, empty, errInvalidSpanIDValue diff --git a/vendor/go.opentelemetry.io/contrib/propagators/b3/version.go b/vendor/go.opentelemetry.io/contrib/propagators/b3/version.go index 18932c8441a..cb5239ed692 100644 --- a/vendor/go.opentelemetry.io/contrib/propagators/b3/version.go +++ b/vendor/go.opentelemetry.io/contrib/propagators/b3/version.go @@ -5,7 +5,7 @@ package b3 // import "go.opentelemetry.io/contrib/propagators/b3" // Version is the current release version of the B3 propagator. func Version() string { - return "1.27.0" + return "1.33.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore index 895c7664beb..ae8577ef366 100644 --- a/vendor/go.opentelemetry.io/otel/.gitignore +++ b/vendor/go.opentelemetry.io/otel/.gitignore @@ -12,11 +12,3 @@ go.work go.work.sum gen/ - -/example/dice/dice -/example/namedtracer/namedtracer -/example/otel-collector/otel-collector -/example/opencensus/opencensus -/example/passthrough/passthrough -/example/prometheus/prometheus -/example/zipkin/zipkin diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index d09555506f7..ce3f40b609c 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -22,6 +22,7 @@ linters: - govet - ineffassign - misspell + - perfsprint - revive - staticcheck - tenv @@ -30,6 +31,7 @@ linters: - unconvert - unused - unparam + - usestdlibvars issues: # Maximum issues count per one linter. @@ -61,10 +63,11 @@ issues: text: "calls to (.+) only in main[(][)] or init[(][)] functions" linters: - revive - # It's okay to not run gosec in a test. + # It's okay to not run gosec and perfsprint in a test. - path: _test\.go linters: - gosec + - perfsprint # Ignoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) # as we commonly use it in tests and examples. - text: "G404:" @@ -95,6 +98,13 @@ linters-settings: - pkg: "crypto/md5" - pkg: "crypto/sha1" - pkg: "crypto/**/pkix" + auto/sdk: + files: + - "!internal/global/trace.go" + - "~internal/global/trace_test.go" + deny: + - pkg: "go.opentelemetry.io/auto/sdk" + desc: Do not use SDK from automatic instrumentation. otlp-internal: files: - "!**/exporters/otlp/internal/**/*.go" @@ -127,8 +137,6 @@ linters-settings: - "**/metric/**/*.go" - "**/bridge/*.go" - "**/bridge/**/*.go" - - "**/example/*.go" - - "**/example/**/*.go" - "**/trace/*.go" - "**/trace/**/*.go" - "**/log/*.go" @@ -156,6 +164,12 @@ linters-settings: locale: US ignore-words: - cancelled + perfsprint: + err-error: true + errorf: true + int-conversion: true + sprintf1: true + strconcat: true revive: # Sets the default failure confidence. # This means that linting errors with less than 0.8 confidence will be ignored. diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index 4b361d0269c..599d59cd130 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -11,6 +11,93 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm +## [1.34.0/0.56.0/0.10.0] 2025-01-17 + +### Changed + +- Remove the notices from `Logger` to make the whole Logs API user-facing in `go.opentelemetry.io/otel/log`. (#6167) + +### Fixed + +- Relax minimum Go version to 1.22.0 in various modules. (#6073) +- The `Type` name logged for the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` client is corrected from `otlphttpgrpc` to `otlptracegrpc`. (#6143) +- The `Type` name logged for the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlphttpgrpc` client is corrected from `otlphttphttp` to `otlptracehttp`. (#6143) + +## [1.33.0/0.55.0/0.9.0/0.0.12] 2024-12-12 + +### Added + +- Add `Reset` method to `SpanRecorder` in `go.opentelemetry.io/otel/sdk/trace/tracetest`. (#5994) +- Add `EnabledInstrument` interface in `go.opentelemetry.io/otel/sdk/metric/internal/x`. + This is an experimental interface that is implemented by synchronous instruments provided by `go.opentelemetry.io/otel/sdk/metric`. + Users can use it to avoid performing computationally expensive operations when recording measurements. + It does not fall within the scope of the OpenTelemetry Go versioning and stability [policy](./VERSIONING.md) and it may be changed in backwards incompatible ways or removed in feature releases. (#6016) + +### Changed + +- The default global API now supports full auto-instrumentation from the `go.opentelemetry.io/auto` package. + See that package for more information. (#5920) +- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5929) +- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5929) +- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5929) +- Performance improvements for attribute value `AsStringSlice`, `AsFloat64Slice`, `AsInt64Slice`, `AsBoolSlice`. (#6011) +- Change `EnabledParameters` to have a `Severity` field instead of a getter and setter in `go.opentelemetry.io/otel/log`. (#6009) + +### Fixed + +- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5954) +- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5954) +- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5954) +- Fix invalid exemplar keys in `go.opentelemetry.io/otel/exporters/prometheus`. (#5995) +- Fix attribute value truncation in `go.opentelemetry.io/otel/sdk/trace`. (#5997) +- Fix attribute value truncation in `go.opentelemetry.io/otel/sdk/log`. (#6032) + +## [1.32.0/0.54.0/0.8.0/0.0.11] 2024-11-08 + +### Added + +- Add `go.opentelemetry.io/otel/sdk/metric/exemplar.AlwaysOffFilter`, which can be used to disable exemplar recording. (#5850) +- Add `go.opentelemetry.io/otel/sdk/metric.WithExemplarFilter`, which can be used to configure the exemplar filter used by the metrics SDK. (#5850) +- Add `ExemplarReservoirProviderSelector` and `DefaultExemplarReservoirProviderSelector` to `go.opentelemetry.io/otel/sdk/metric`, which defines the exemplar reservoir to use based on the aggregation of the metric. (#5861) +- Add `ExemplarReservoirProviderSelector` to `go.opentelemetry.io/otel/sdk/metric.Stream` to allow using views to configure the exemplar reservoir to use for a metric. (#5861) +- Add `ReservoirProvider`, `HistogramReservoirProvider` and `FixedSizeReservoirProvider` to `go.opentelemetry.io/otel/sdk/metric/exemplar` to make it convenient to use providers of Reservoirs. (#5861) +- The `go.opentelemetry.io/otel/semconv/v1.27.0` package. + The package contains semantic conventions from the `v1.27.0` version of the OpenTelemetry Semantic Conventions. (#5894) +- Add `Attributes attribute.Set` field to `Scope` in `go.opentelemetry.io/otel/sdk/instrumentation`. (#5903) +- Add `Attributes attribute.Set` field to `ScopeRecords` in `go.opentelemetry.io/otel/log/logtest`. (#5927) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` adds instrumentation scope attributes. (#5934) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` adds instrumentation scope attributes. (#5934) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` adds instrumentation scope attributes. (#5935) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` adds instrumentation scope attributes. (#5935) +- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` adds instrumentation scope attributes. (#5933) +- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` adds instrumentation scope attributes. (#5933) +- `go.opentelemetry.io/otel/exporters/prometheus` adds instrumentation scope attributes in `otel_scope_info` metric as labels. (#5932) + +### Changed + +- Support scope attributes and make them as identifying for `Tracer` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/trace`. (#5924) +- Support scope attributes and make them as identifying for `Meter` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/metric`. (#5926) +- Support scope attributes and make them as identifying for `Logger` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/log`. (#5925) +- Make schema URL and scope attributes as identifying for `Tracer` in `go.opentelemetry.io/otel/bridge/opentracing`. (#5931) +- Clear unneeded slice elements to allow GC to collect the objects in `go.opentelemetry.io/otel/sdk/metric` and `go.opentelemetry.io/otel/sdk/trace`. (#5804) + +### Fixed + +- Global MeterProvider registration unwraps global instrument Observers, the undocumented Unwrap() methods are now private. (#5881) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5892) +- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5911) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5915) +- Fix `go.opentelemetry.io/otel/exporters/prometheus` trying to add exemplars to Gauge metrics, which is unsupported. (#5912) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#5944) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5944) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#5944) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5944) +- Fix incorrect metrics generated from callbacks when multiple readers are used in `go.opentelemetry.io/otel/sdk/metric`. (#5900) + +### Removed + +- Remove all examples under `go.opentelemetry.io/otel/example` as they are moved to [Contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples). (#5930) + ## [1.31.0/0.53.0/0.7.0/0.0.10] 2024-10-11 ### Added @@ -3110,7 +3197,10 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.31.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.34.0...HEAD +[1.34.0/0.56.0/0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.34.0 +[1.33.0/0.55.0/0.9.0/0.0.12]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.33.0 +[1.32.0/0.54.0/0.8.0/0.0.11]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.32.0 [1.31.0/0.53.0/0.7.0/0.0.10]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.31.0 [1.30.0/0.52.0/0.6.0/0.0.9]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.30.0 [1.29.0/0.51.0/0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.29.0 diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index bb339655743..22a2e9dbd49 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -629,6 +629,10 @@ should be canceled. ## Approvers and Maintainers +### Triagers + +- [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent + ### Approvers ### Maintainers @@ -641,13 +645,13 @@ should be canceled. ### Emeritus -- [Aaron Clawson](https://github.com/MadVikingGod), LightStep -- [Anthony Mirabella](https://github.com/Aneurysm9), AWS -- [Chester Cheung](https://github.com/hanyuancheung), Tencent -- [Evan Torrie](https://github.com/evantorrie), Yahoo -- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep -- [Josh MacDonald](https://github.com/jmacd), LightStep -- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb +- [Aaron Clawson](https://github.com/MadVikingGod) +- [Anthony Mirabella](https://github.com/Aneurysm9) +- [Chester Cheung](https://github.com/hanyuancheung) +- [Evan Torrie](https://github.com/evantorrie) +- [Gustavo Silva Paiva](https://github.com/paivagustavo) +- [Josh MacDonald](https://github.com/jmacd) +- [Liz Fong-Jones](https://github.com/lizthegrey) ### Become an Approver or a Maintainer diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index a1228a21240..a7f6d8cc688 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -14,8 +14,8 @@ TIMEOUT = 60 .DEFAULT_GOAL := precommit .PHONY: precommit ci -precommit: generate license-check misspell go-mod-tidy golangci-lint-fix verify-readmes verify-mods test-default -ci: generate license-check lint vanity-import-check verify-readmes verify-mods build test-default check-clean-work-tree test-coverage +precommit: generate toolchain-check license-check misspell go-mod-tidy golangci-lint-fix verify-readmes verify-mods test-default +ci: generate toolchain-check license-check lint vanity-import-check verify-readmes verify-mods build test-default check-clean-work-tree test-coverage # Tools @@ -235,6 +235,16 @@ govulncheck/%: $(GOVULNCHECK) codespell: $(CODESPELL) @$(DOCKERPY) $(CODESPELL) +.PHONY: toolchain-check +toolchain-check: + @toolchainRes=$$(for f in $(ALL_GO_MOD_DIRS); do \ + awk '/^toolchain/ { found=1; next } END { if (found) print FILENAME }' $$f/go.mod; \ + done); \ + if [ -n "$${toolchainRes}" ]; then \ + echo "toolchain checking failed:"; echo "$${toolchainRes}"; \ + exit 1; \ + fi + .PHONY: license-check license-check: @licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './.git/*' ) ; do \ @@ -260,7 +270,7 @@ SEMCONVPKG ?= "semconv/" semconv-generate: $(SEMCONVGEN) $(SEMCONVKIT) [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 ) [ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 ) - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -z "$(SEMCONVPKG)/capitalizations.txt" -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=metric -f metric.go -t "$(SEMCONVPKG)/metric_template.j2" -s "$(TAG)" $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index efec278905b..d9a19207625 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -1,6 +1,6 @@ # OpenTelemetry-Go -[![CI](https://github.com/open-telemetry/opentelemetry-go/workflows/ci/badge.svg)](https://github.com/open-telemetry/opentelemetry-go/actions?query=workflow%3Aci+branch%3Amain) +[![ci](https://github.com/open-telemetry/opentelemetry-go/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/open-telemetry/opentelemetry-go/actions/workflows/ci.yml) [![codecov.io](https://codecov.io/gh/open-telemetry/opentelemetry-go/coverage.svg?branch=main)](https://app.codecov.io/gh/open-telemetry/opentelemetry-go?branch=main) [![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel)](https://pkg.go.dev/go.opentelemetry.io/otel) [![Go Report Card](https://goreportcard.com/badge/go.opentelemetry.io/otel)](https://goreportcard.com/report/go.opentelemetry.io/otel) diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md index ffa9b61258a..4ebef4f9ddf 100644 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -130,6 +130,6 @@ Importantly, bump any package versions referenced to be the latest one you just Bump the dependencies in the following Go services: -- [`accountingservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accountingservice) -- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkoutservice) -- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/productcatalogservice) +- [`accounting`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accounting) +- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkout) +- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/product-catalog) diff --git a/vendor/go.opentelemetry.io/otel/VERSIONING.md b/vendor/go.opentelemetry.io/otel/VERSIONING.md index 412f1e362bb..b8cb605c166 100644 --- a/vendor/go.opentelemetry.io/otel/VERSIONING.md +++ b/vendor/go.opentelemetry.io/otel/VERSIONING.md @@ -26,7 +26,7 @@ is designed so the following goals can be achieved. go.opentelemetry.io/otel/v2 v2.0.1`) and in the package import path (e.g., `import "go.opentelemetry.io/otel/v2/trace"`). This includes the paths used in `go get` commands (e.g., `go get - go.opentelemetry.io/otel/v2@v2.0.1`. Note there is both a `/v2` and a + go.opentelemetry.io/otel/v2@v2.0.1`). Note there is both a `/v2` and a `@v2.0.1` in that example. One way to think about it is that the module name now includes the `/v2`, so include `/v2` whenever you are using the module name). diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go index 36f5367030c..0e1fe242203 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -355,7 +355,7 @@ func parseMember(member string) (Member, error) { } // replaceInvalidUTF8Sequences replaces invalid UTF-8 sequences with '�'. -func replaceInvalidUTF8Sequences(cap int, unescapeVal string) string { +func replaceInvalidUTF8Sequences(c int, unescapeVal string) string { if utf8.ValidString(unescapeVal) { return unescapeVal } @@ -363,7 +363,7 @@ func replaceInvalidUTF8Sequences(cap int, unescapeVal string) string { // https://github.com/w3c/baggage/blob/8c215efbeebd3fa4b1aceb937a747e56444f22f3/baggage/HTTP_HEADER_FORMAT.md?plain=1#L69 var b strings.Builder - b.Grow(cap) + b.Grow(c) for i := 0; i < len(unescapeVal); { r, size := utf8.DecodeRuneInString(unescapeVal[i:]) if r == utf8.RuneError && size == 1 { diff --git a/vendor/go.opentelemetry.io/otel/bridge/opencensus/internal/oc2otel/span_context.go b/vendor/go.opentelemetry.io/otel/bridge/opencensus/internal/oc2otel/span_context.go index 18c80c9b1ba..2a866f7f8bd 100644 --- a/vendor/go.opentelemetry.io/otel/bridge/opencensus/internal/oc2otel/span_context.go +++ b/vendor/go.opentelemetry.io/otel/bridge/opencensus/internal/oc2otel/span_context.go @@ -4,6 +4,8 @@ package oc2otel // import "go.opentelemetry.io/otel/bridge/opencensus/internal/oc2otel" import ( + "slices" + octrace "go.opencensus.io/trace" "go.opentelemetry.io/otel/trace" @@ -14,9 +16,19 @@ func SpanContext(sc octrace.SpanContext) trace.SpanContext { if sc.IsSampled() { traceFlags = trace.FlagsSampled } + + entries := slices.Clone(sc.Tracestate.Entries()) + slices.Reverse(entries) + + tsOtel := trace.TraceState{} + for _, entry := range entries { + tsOtel, _ = tsOtel.Insert(entry.Key, entry.Value) + } + return trace.NewSpanContext(trace.SpanContextConfig{ TraceID: trace.TraceID(sc.TraceID), SpanID: trace.SpanID(sc.SpanID), TraceFlags: traceFlags, + TraceState: tsOtel, }) } diff --git a/vendor/go.opentelemetry.io/otel/bridge/opencensus/internal/ocmetric/metric.go b/vendor/go.opentelemetry.io/otel/bridge/opencensus/internal/ocmetric/metric.go index 4d053e8016d..f38598bbfda 100644 --- a/vendor/go.opentelemetry.io/otel/bridge/opencensus/internal/ocmetric/metric.go +++ b/vendor/go.opentelemetry.io/otel/bridge/opencensus/internal/ocmetric/metric.go @@ -144,7 +144,7 @@ func convertHistogram(labelKeys []ocmetricdata.LabelKey, ts []*ocmetricdata.Time Attributes: attrs, StartTime: t.StartTime, Time: p.Time, - Count: uint64(dist.Count), + Count: uint64(max(0, dist.Count)), // nolint:gosec // A count should never be negative. Sum: dist.Sum, Bounds: dist.BucketOptions.Bounds, BucketCounts: bucketCounts, @@ -166,7 +166,7 @@ func convertBuckets(buckets []ocmetricdata.Bucket) ([]uint64, []metricdata.Exemp err = errors.Join(err, fmt.Errorf("%w: %q", errNegativeBucketCount, bucket.Count)) continue } - bucketCounts[i] = uint64(bucket.Count) + bucketCounts[i] = uint64(max(0, bucket.Count)) // nolint:gosec // A count should never be negative. if bucket.Exemplar != nil { exemplar, exemplarErr := convertExemplar(bucket.Exemplar) @@ -233,7 +233,7 @@ func convertKV(key string, value any) attribute.KeyValue { case uintptr: return uint64KV(key, uint64(typedVal)) case uint64: - return uint64KV(key, uint64(typedVal)) + return uint64KV(key, typedVal) case float32: return attribute.Float64(key, float64(typedVal)) case float64: @@ -301,7 +301,7 @@ func uintKV(key string, val uint) attribute.KeyValue { if val > uint(math.MaxInt) { return attribute.String(key, strconv.FormatUint(uint64(val), 10)) } - return attribute.Int(key, int(val)) + return attribute.Int(key, int(val)) // nolint: gosec // Overflow checked above. } func uintSliceKV[N uint | uint8 | uint16 | uint32 | uint64 | uintptr](key string, val []N) attribute.KeyValue { @@ -317,7 +317,7 @@ func uint64KV(key string, val uint64) attribute.KeyValue { if val > maxInt64 { return attribute.String(key, strconv.FormatUint(val, 10)) } - return attribute.Int64(key, int64(val)) + return attribute.Int64(key, int64(val)) // nolint: gosec // Overflow checked above. } func complexSliceKV[N complex64 | complex128](key string, val []N) attribute.KeyValue { @@ -357,7 +357,7 @@ func convertSummary(labelKeys []ocmetricdata.LabelKey, ts []*ocmetricdata.TimeSe Attributes: attrs, StartTime: t.StartTime, Time: p.Time, - Count: uint64(summary.Count), + Count: uint64(max(0, summary.Count)), // nolint:gosec // A count should never be negative. QuantileValues: convertQuantiles(summary.Snapshot), Sum: summary.Sum, } diff --git a/vendor/go.opentelemetry.io/otel/bridge/opencensus/internal/otel2oc/span_context.go b/vendor/go.opentelemetry.io/otel/bridge/opencensus/internal/otel2oc/span_context.go index 74dcc90b8dc..f9e16bedbcc 100644 --- a/vendor/go.opentelemetry.io/otel/bridge/opencensus/internal/otel2oc/span_context.go +++ b/vendor/go.opentelemetry.io/otel/bridge/opencensus/internal/otel2oc/span_context.go @@ -5,6 +5,7 @@ package otel2oc // import "go.opentelemetry.io/otel/bridge/opencensus/internal/o import ( octrace "go.opencensus.io/trace" + "go.opencensus.io/trace/tracestate" "go.opentelemetry.io/otel/trace" ) @@ -15,9 +16,18 @@ func SpanContext(sc trace.SpanContext) octrace.SpanContext { // OpenCensus doesn't expose functions to directly set sampled to = 0x1 } + + entries := make([]tracestate.Entry, 0, sc.TraceState().Len()) + sc.TraceState().Walk(func(key, value string) bool { + entries = append(entries, tracestate.Entry{Key: key, Value: value}) + return true + }) + tsOc, _ := tracestate.New(nil, entries...) + return octrace.SpanContext{ TraceID: octrace.TraceID(sc.TraceID()), SpanID: octrace.SpanID(sc.SpanID()), TraceOptions: to, + Tracestate: tsOc, } } diff --git a/vendor/go.opentelemetry.io/otel/bridge/opencensus/internal/span.go b/vendor/go.opentelemetry.io/otel/bridge/opencensus/internal/span.go index 9e7ee39fb5b..3ddef4d037e 100644 --- a/vendor/go.opentelemetry.io/otel/bridge/opencensus/internal/span.go +++ b/vendor/go.opentelemetry.io/otel/bridge/opencensus/internal/span.go @@ -61,7 +61,7 @@ func (s *Span) SetName(name string) { // SetStatus sets the status of this span, if it is recording events. func (s *Span) SetStatus(status octrace.Status) { - s.otelSpan.SetStatus(codes.Code(status.Code), status.Message) + s.otelSpan.SetStatus(codes.Code(max(0, status.Code)), status.Message) // nolint:gosec // Overflow checked. } // AddAttributes sets attributes in this span. @@ -128,5 +128,5 @@ func (s *Span) AddLink(l octrace.Link) { // String prints a string representation of this span. func (s *Span) String() string { - return fmt.Sprintf("span %s", s.otelSpan.SpanContext().SpanID().String()) + return "span " + s.otelSpan.SpanContext().SpanID().String() } diff --git a/vendor/go.opentelemetry.io/otel/bridge/opencensus/internal/tracer.go b/vendor/go.opentelemetry.io/otel/bridge/opencensus/internal/tracer.go index 2454f27091c..13164684525 100644 --- a/vendor/go.opentelemetry.io/otel/bridge/opencensus/internal/tracer.go +++ b/vendor/go.opentelemetry.io/otel/bridge/opencensus/internal/tracer.go @@ -37,7 +37,7 @@ func (o *Tracer) StartSpan(ctx context.Context, name string, s ...octrace.StartO // StartSpanWithRemoteParent starts a new child span of the span from the // given parent. func (o *Tracer) StartSpanWithRemoteParent(ctx context.Context, name string, parent octrace.SpanContext, s ...octrace.StartOption) (context.Context, *octrace.Span) { - // make sure span context is zero'd out so we use the remote parent + // make sure span context is zeroed out so we use the remote parent ctx = trace.ContextWithSpan(ctx, nil) ctx = trace.ContextWithRemoteSpanContext(ctx, oc2otel.SpanContext(parent)) return o.StartSpan(ctx, name, s...) diff --git a/vendor/go.opentelemetry.io/otel/bridge/opencensus/version.go b/vendor/go.opentelemetry.io/otel/bridge/opencensus/version.go index ba912bd2751..5182395f628 100644 --- a/vendor/go.opentelemetry.io/otel/bridge/opencensus/version.go +++ b/vendor/go.opentelemetry.io/otel/bridge/opencensus/version.go @@ -5,5 +5,5 @@ package opencensus // import "go.opentelemetry.io/otel/bridge/opencensus" // Version is the current release version of the opencensus bridge. func Version() string { - return "1.27.0" + return "1.34.0" } diff --git a/vendor/go.opentelemetry.io/otel/bridge/opentracing/bridge.go b/vendor/go.opentelemetry.io/otel/bridge/opentracing/bridge.go index 8aee5567f9e..848f028c87f 100644 --- a/vendor/go.opentelemetry.io/otel/bridge/opentracing/bridge.go +++ b/vendor/go.opentelemetry.io/otel/bridge/opentracing/bridge.go @@ -6,6 +6,7 @@ package opentracing // import "go.opentelemetry.io/otel/bridge/opentracing" import ( "context" "fmt" + "strconv" "strings" "sync" @@ -532,7 +533,7 @@ func otTagToOTelAttr(k string, v interface{}) attribute.KeyValue { case int64: return key.Int64(val) case uint64: - return key.String(fmt.Sprintf("%d", val)) + return key.String(strconv.FormatUint(val, 10)) case float64: return key.Float64(val) case int8: @@ -552,7 +553,7 @@ func otTagToOTelAttr(k string, v interface{}) attribute.KeyValue { case int: return key.Int(val) case uint: - return key.String(fmt.Sprintf("%d", val)) + return key.String(strconv.FormatUint(uint64(val), 10)) case string: return key.String(val) default: diff --git a/vendor/go.opentelemetry.io/otel/bridge/opentracing/provider.go b/vendor/go.opentelemetry.io/otel/bridge/opentracing/provider.go index b069753f7e9..3f093e75fcf 100644 --- a/vendor/go.opentelemetry.io/otel/bridge/opentracing/provider.go +++ b/vendor/go.opentelemetry.io/otel/bridge/opentracing/provider.go @@ -6,6 +6,7 @@ package opentracing // import "go.opentelemetry.io/otel/bridge/opentracing" import ( "sync" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" ) @@ -38,6 +39,8 @@ func NewTracerProvider(bridge *BridgeTracer, provider trace.TracerProvider) *Tra type wrappedTracerKey struct { name string version string + schema string + attrs attribute.Set } // Tracer creates a WrappedTracer that wraps the OpenTelemetry tracer for each call to @@ -51,6 +54,8 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T key := wrappedTracerKey{ name: name, version: c.InstrumentationVersion(), + schema: c.SchemaURL(), + attrs: c.InstrumentationAttributes(), } if t, ok := p.tracers[key]; ok { diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go index 2acbac35466..49a35b12255 100644 --- a/vendor/go.opentelemetry.io/otel/codes/codes.go +++ b/vendor/go.opentelemetry.io/otel/codes/codes.go @@ -5,6 +5,7 @@ package codes // import "go.opentelemetry.io/otel/codes" import ( "encoding/json" + "errors" "fmt" "strconv" ) @@ -63,7 +64,7 @@ func (c *Code) UnmarshalJSON(b []byte) error { return nil } if c == nil { - return fmt.Errorf("nil receiver passed to UnmarshalJSON") + return errors.New("nil receiver passed to UnmarshalJSON") } var x interface{} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/README.md b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/README.md new file mode 100644 index 00000000000..0b4603c8e40 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/README.md @@ -0,0 +1,3 @@ +# OTLP Log gRPC Exporter + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/client.go new file mode 100644 index 00000000000..05abd92eeec --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/client.go @@ -0,0 +1,258 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlploggrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc" + +import ( + "context" + "fmt" + "time" + + "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc" + "google.golang.org/grpc/backoff" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/encoding/gzip" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry" + collogpb "go.opentelemetry.io/proto/otlp/collector/logs/v1" + logpb "go.opentelemetry.io/proto/otlp/logs/v1" +) + +// The methods of this type are not expected to be called concurrently. +type client struct { + metadata metadata.MD + exportTimeout time.Duration + requestFunc retry.RequestFunc + + // ourConn keeps track of where conn was created: true if created here in + // NewClient, or false if passed with an option. This is important on + // Shutdown as conn should only be closed if we created it. Otherwise, + // it is up to the processes that passed conn to close it. + ourConn bool + conn *grpc.ClientConn + lsc collogpb.LogsServiceClient +} + +// Used for testing. +var newGRPCClientFn = grpc.NewClient + +// newClient creates a new gRPC log client. +func newClient(cfg config) (*client, error) { + c := &client{ + exportTimeout: cfg.timeout.Value, + requestFunc: cfg.retryCfg.Value.RequestFunc(retryable), + conn: cfg.gRPCConn.Value, + } + + if len(cfg.headers.Value) > 0 { + c.metadata = metadata.New(cfg.headers.Value) + } + + if c.conn == nil { + // If the caller did not provide a ClientConn when the client was + // created, create one using the configuration they did provide. + dialOpts := newGRPCDialOptions(cfg) + + conn, err := newGRPCClientFn(cfg.endpoint.Value, dialOpts...) + if err != nil { + return nil, err + } + // Keep track that we own the lifecycle of this conn and need to close + // it on Shutdown. + c.ourConn = true + c.conn = conn + } + + c.lsc = collogpb.NewLogsServiceClient(c.conn) + + return c, nil +} + +func newGRPCDialOptions(cfg config) []grpc.DialOption { + userAgent := "OTel Go OTLP over gRPC logs exporter/" + Version() + dialOpts := []grpc.DialOption{grpc.WithUserAgent(userAgent)} + dialOpts = append(dialOpts, cfg.dialOptions.Value...) + + // Convert other grpc configs to the dial options. + // Service config + if cfg.serviceConfig.Value != "" { + dialOpts = append(dialOpts, grpc.WithDefaultServiceConfig(cfg.serviceConfig.Value)) + } + // Prioritize GRPCCredentials over Insecure (passing both is an error). + if cfg.gRPCCredentials.Value != nil { + dialOpts = append(dialOpts, grpc.WithTransportCredentials(cfg.gRPCCredentials.Value)) + } else if cfg.insecure.Value { + dialOpts = append(dialOpts, grpc.WithTransportCredentials(insecure.NewCredentials())) + } else { + // Default to using the host's root CA. + dialOpts = append(dialOpts, grpc.WithTransportCredentials( + credentials.NewTLS(nil), + )) + } + // Compression + if cfg.compression.Value == GzipCompression { + dialOpts = append(dialOpts, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name))) + } + // Reconnection period + if cfg.reconnectionPeriod.Value != 0 { + p := grpc.ConnectParams{ + Backoff: backoff.DefaultConfig, + MinConnectTimeout: cfg.reconnectionPeriod.Value, + } + dialOpts = append(dialOpts, grpc.WithConnectParams(p)) + } + + return dialOpts +} + +// UploadLogs sends proto logs to connected endpoint. +// +// Retryable errors from the server will be handled according to any +// RetryConfig the client was created with. +// +// The otlplog.Exporter synchronizes access to client methods, and +// ensures this is not called after the Exporter is shutdown. Only thing +// to do here is send data. +func (c *client) UploadLogs(ctx context.Context, rl []*logpb.ResourceLogs) error { + select { + case <-ctx.Done(): + // Do not upload if the context is already expired. + return ctx.Err() + default: + } + + ctx, cancel := c.exportContext(ctx) + defer cancel() + + return c.requestFunc(ctx, func(ctx context.Context) error { + resp, err := c.lsc.Export(ctx, &collogpb.ExportLogsServiceRequest{ + ResourceLogs: rl, + }) + if resp != nil && resp.PartialSuccess != nil { + msg := resp.PartialSuccess.GetErrorMessage() + n := resp.PartialSuccess.GetRejectedLogRecords() + if n != 0 || msg != "" { + err := fmt.Errorf("OTLP partial success: %s (%d log records rejected)", msg, n) + otel.Handle(err) + } + } + // nil is converted to OK. + if status.Code(err) == codes.OK { + // Success. + return nil + } + return err + }) +} + +// Shutdown shuts down the client, freeing all resources. +// +// Any active connections to a remote endpoint are closed if they were created +// by the client. Any gRPC connection passed during creation using +// WithGRPCConn will not be closed. It is the caller's responsibility to +// handle cleanup of that resource. +// +// The otlplog.Exporter synchronizes access to client methods and +// ensures this is called only once. The only thing that needs to be done +// here is to release any computational resources the client holds. +func (c *client) Shutdown(ctx context.Context) error { + c.metadata = nil + c.requestFunc = nil + c.lsc = nil + + // Release the connection if we created it. + err := ctx.Err() + if c.ourConn { + closeErr := c.conn.Close() + // A context timeout error takes precedence over this error. + if err == nil && closeErr != nil { + err = closeErr + } + } + c.conn = nil + return err +} + +// exportContext returns a copy of parent with an appropriate deadline and +// cancellation function based on the clients configured export timeout. +// +// It is the callers responsibility to cancel the returned context once its +// use is complete, via the parent or directly with the returned CancelFunc, to +// ensure all resources are correctly released. +func (c *client) exportContext(parent context.Context) (context.Context, context.CancelFunc) { + var ( + ctx context.Context + cancel context.CancelFunc + ) + + if c.exportTimeout > 0 { + ctx, cancel = context.WithTimeout(parent, c.exportTimeout) + } else { + ctx, cancel = context.WithCancel(parent) + } + + if c.metadata.Len() > 0 { + md := c.metadata + if outMD, ok := metadata.FromOutgoingContext(ctx); ok { + md = metadata.Join(md, outMD) + } + + ctx = metadata.NewOutgoingContext(ctx, md) + } + + return ctx, cancel +} + +type noopClient struct{} + +func newNoopClient() *noopClient { + return &noopClient{} +} + +func (c *noopClient) UploadLogs(context.Context, []*logpb.ResourceLogs) error { return nil } + +func (c *noopClient) Shutdown(context.Context) error { return nil } + +// retryable returns if err identifies a request that can be retried and a +// duration to wait for if an explicit throttle time is included in err. +func retryable(err error) (bool, time.Duration) { + s := status.Convert(err) + return retryableGRPCStatus(s) +} + +func retryableGRPCStatus(s *status.Status) (bool, time.Duration) { + switch s.Code() { + case codes.Canceled, + codes.DeadlineExceeded, + codes.Aborted, + codes.OutOfRange, + codes.Unavailable, + codes.DataLoss: + // Additionally, handle RetryInfo. + _, d := throttleDelay(s) + return true, d + case codes.ResourceExhausted: + // Retry only if the server signals that the recovery from resource exhaustion is possible. + return throttleDelay(s) + } + + // Not a retry-able error. + return false, 0 +} + +// throttleDelay returns if the status is RetryInfo +// and the duration to wait for if an explicit throttle time is included. +func throttleDelay(s *status.Status) (bool, time.Duration) { + for _, detail := range s.Details() { + if t, ok := detail.(*errdetails.RetryInfo); ok { + return true, t.RetryDelay.AsDuration() + } + } + return false, 0 +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/config.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/config.go new file mode 100644 index 00000000000..cd33a168271 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/config.go @@ -0,0 +1,653 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlploggrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc" + +import ( + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "net/url" + "os" + "strconv" + "strings" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry" + "go.opentelemetry.io/otel/internal/global" +) + +// Default values. +var ( + defaultEndpoint = "localhost:4317" + defaultTimeout = 10 * time.Second + defaultRetryCfg = retry.DefaultConfig +) + +// Environment variable keys. +var ( + envEndpoint = []string{ + "OTEL_EXPORTER_OTLP_LOGS_ENDPOINT", + "OTEL_EXPORTER_OTLP_ENDPOINT", + } + envInsecure = []string{ + "OTEL_EXPORTER_OTLP_LOGS_INSECURE", + "OTEL_EXPORTER_OTLP_INSECURE", + } + + envHeaders = []string{ + "OTEL_EXPORTER_OTLP_LOGS_HEADERS", + "OTEL_EXPORTER_OTLP_HEADERS", + } + + envCompression = []string{ + "OTEL_EXPORTER_OTLP_LOGS_COMPRESSION", + "OTEL_EXPORTER_OTLP_COMPRESSION", + } + + envTimeout = []string{ + "OTEL_EXPORTER_OTLP_LOGS_TIMEOUT", + "OTEL_EXPORTER_OTLP_TIMEOUT", + } + + envTLSCert = []string{ + "OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE", + "OTEL_EXPORTER_OTLP_CERTIFICATE", + } + envTLSClient = []struct { + Certificate string + Key string + }{ + { + "OTEL_EXPORTER_OTLP_LOGS_CLIENT_CERTIFICATE", + "OTEL_EXPORTER_OTLP_LOGS_CLIENT_KEY", + }, + { + "OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE", + "OTEL_EXPORTER_OTLP_CLIENT_KEY", + }, + } +) + +type fnOpt func(config) config + +func (f fnOpt) applyOption(c config) config { return f(c) } + +// Option applies an option to the Exporter. +type Option interface { + applyOption(config) config +} + +type config struct { + endpoint setting[string] + insecure setting[bool] + tlsCfg setting[*tls.Config] + headers setting[map[string]string] + compression setting[Compression] + timeout setting[time.Duration] + retryCfg setting[retry.Config] + + // gRPC configurations + gRPCCredentials setting[credentials.TransportCredentials] + serviceConfig setting[string] + reconnectionPeriod setting[time.Duration] + dialOptions setting[[]grpc.DialOption] + gRPCConn setting[*grpc.ClientConn] +} + +func newConfig(options []Option) config { + var c config + for _, opt := range options { + c = opt.applyOption(c) + } + + // Apply environment value and default value + c.endpoint = c.endpoint.Resolve( + getEnv[string](envEndpoint, convEndpoint), + fallback[string](defaultEndpoint), + ) + c.insecure = c.insecure.Resolve( + loadInsecureFromEnvEndpoint(envEndpoint), + getEnv[bool](envInsecure, convInsecure), + ) + c.tlsCfg = c.tlsCfg.Resolve( + loadEnvTLS[*tls.Config](), + ) + c.headers = c.headers.Resolve( + getEnv[map[string]string](envHeaders, convHeaders), + ) + c.compression = c.compression.Resolve( + getEnv[Compression](envCompression, convCompression), + ) + c.timeout = c.timeout.Resolve( + getEnv[time.Duration](envTimeout, convDuration), + fallback[time.Duration](defaultTimeout), + ) + c.retryCfg = c.retryCfg.Resolve( + fallback[retry.Config](defaultRetryCfg), + ) + + return c +} + +// RetryConfig defines configuration for retrying the export of log data +// that failed. +// +// This configuration does not define any network retry strategy. That is +// entirely handled by the gRPC ClientConn. +type RetryConfig retry.Config + +// WithInsecure disables client transport security for the Exporter's gRPC +// connection, just like grpc.WithInsecure() +// (https://pkg.go.dev/google.golang.org/grpc#WithInsecure) does. +// +// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_LOGS_ENDPOINT +// environment variable is set, and this option is not passed, that variable +// value will be used to determine client security. If the endpoint has a +// scheme of "http" or "unix" client security will be disabled. If both are +// set, OTEL_EXPORTER_OTLP_LOGS_ENDPOINT will take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, client security will be used. +// +// This option has no effect if WithGRPCConn is used. +func WithInsecure() Option { + return fnOpt(func(c config) config { + c.insecure = newSetting(true) + return c + }) +} + +// WithEndpoint sets the target endpoint the Exporter will connect to. +// +// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_LOGS_ENDPOINT +// environment variable is set, and this option is not passed, that variable +// value will be used. If both are set, OTEL_EXPORTER_OTLP_LOGS_ENDPOINT +// will take precedence. +// +// If both this option and WithEndpointURL are used, the last used option will +// take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, "localhost:4317" will be used. +// +// This option has no effect if WithGRPCConn is used. +func WithEndpoint(endpoint string) Option { + return fnOpt(func(c config) config { + c.endpoint = newSetting(endpoint) + return c + }) +} + +// WithEndpointURL sets the target endpoint URL the Exporter will connect to. +// +// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_LOGS_ENDPOINT +// environment variable is set, and this option is not passed, that variable +// value will be used. If both are set, OTEL_EXPORTER_OTLP_LOGS_ENDPOINT +// will take precedence. +// +// If both this option and WithEndpoint are used, the last used option will +// take precedence. +// +// If an invalid URL is provided, the default value will be kept. +// +// By default, if an environment variable is not set, and this option is not +// passed, "localhost:4317" will be used. +// +// This option has no effect if WithGRPCConn is used. +func WithEndpointURL(rawURL string) Option { + u, err := url.Parse(rawURL) + if err != nil { + global.Error(err, "otlplog: parse endpoint url", "url", rawURL) + return fnOpt(func(c config) config { return c }) + } + return fnOpt(func(c config) config { + c.endpoint = newSetting(u.Host) + c.insecure = insecureFromScheme(c.insecure, u.Scheme) + return c + }) +} + +// WithReconnectionPeriod set the minimum amount of time between connection +// attempts to the target endpoint. +// +// This option has no effect if WithGRPCConn is used. +func WithReconnectionPeriod(rp time.Duration) Option { + return fnOpt(func(c config) config { + c.reconnectionPeriod = newSetting(rp) + return c + }) +} + +// Compression describes the compression used for exported payloads. +type Compression int + +const ( + // NoCompression represents that no compression should be used. + NoCompression Compression = iota + // GzipCompression represents that gzip compression should be used. + GzipCompression +) + +// WithCompressor sets the compressor the gRPC client uses. +// Supported compressor values: "gzip". +// +// If the OTEL_EXPORTER_OTLP_COMPRESSION or +// OTEL_EXPORTER_OTLP_LOGS_COMPRESSION environment variable is set, and +// this option is not passed, that variable value will be used. That value can +// be either "none" or "gzip". If both are set, +// OTEL_EXPORTER_OTLP_LOGS_COMPRESSION will take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, no compression strategy will be used. +// +// This option has no effect if WithGRPCConn is used. +func WithCompressor(compressor string) Option { + return fnOpt(func(c config) config { + c.compression = newSetting(compressorToCompression(compressor)) + return c + }) +} + +// WithHeaders will send the provided headers with each gRPC requests. +// +// If the OTEL_EXPORTER_OTLP_HEADERS or OTEL_EXPORTER_OTLP_LOGS_HEADERS +// environment variable is set, and this option is not passed, that variable +// value will be used. The value will be parsed as a list of key value pairs. +// These pairs are expected to be in the W3C Correlation-Context format +// without additional semi-colon delimited metadata (i.e. "k1=v1,k2=v2"). If +// both are set, OTEL_EXPORTER_OTLP_LOGS_HEADERS will take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, no user headers will be set. +func WithHeaders(headers map[string]string) Option { + return fnOpt(func(c config) config { + c.headers = newSetting(headers) + return c + }) +} + +// WithTLSCredentials sets the gRPC connection to use creds. +// +// If the OTEL_EXPORTER_OTLP_CERTIFICATE or +// OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE environment variable is set, and +// this option is not passed, that variable value will be used. The value will +// be parsed the filepath of the TLS certificate chain to use. If both are +// set, OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE will take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, no TLS credentials will be used. +// +// This option has no effect if WithGRPCConn is used. +func WithTLSCredentials(credential credentials.TransportCredentials) Option { + return fnOpt(func(c config) config { + c.gRPCCredentials = newSetting(credential) + return c + }) +} + +// WithServiceConfig defines the default gRPC service config used. +// +// This option has no effect if WithGRPCConn is used. +func WithServiceConfig(serviceConfig string) Option { + return fnOpt(func(c config) config { + c.serviceConfig = newSetting(serviceConfig) + return c + }) +} + +// WithDialOption sets explicit grpc.DialOptions to use when establishing a +// gRPC connection. The options here are appended to the internal grpc.DialOptions +// used so they will take precedence over any other internal grpc.DialOptions +// they might conflict with. +// The [grpc.WithBlock], [grpc.WithTimeout], and [grpc.WithReturnConnectionError] +// grpc.DialOptions are ignored. +// +// This option has no effect if WithGRPCConn is used. +func WithDialOption(opts ...grpc.DialOption) Option { + return fnOpt(func(c config) config { + c.dialOptions = newSetting(opts) + return c + }) +} + +// WithGRPCConn sets conn as the gRPC ClientConn used for all communication. +// +// This option takes precedence over any other option that relates to +// establishing or persisting a gRPC connection to a target endpoint. Any +// other option of those types passed will be ignored. +// +// It is the callers responsibility to close the passed conn. The Exporter +// Shutdown method will not close this connection. +func WithGRPCConn(conn *grpc.ClientConn) Option { + return fnOpt(func(c config) config { + c.gRPCConn = newSetting(conn) + return c + }) +} + +// WithTimeout sets the max amount of time an Exporter will attempt an export. +// +// This takes precedence over any retry settings defined by WithRetry. Once +// this time limit has been reached the export is abandoned and the log +// data is dropped. +// +// If the OTEL_EXPORTER_OTLP_TIMEOUT or OTEL_EXPORTER_OTLP_LOGS_TIMEOUT +// environment variable is set, and this option is not passed, that variable +// value will be used. The value will be parsed as an integer representing the +// timeout in milliseconds. If both are set, +// OTEL_EXPORTER_OTLP_LOGS_TIMEOUT will take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, a timeout of 10 seconds will be used. +func WithTimeout(duration time.Duration) Option { + return fnOpt(func(c config) config { + c.timeout = newSetting(duration) + return c + }) +} + +// WithRetry sets the retry policy for transient retryable errors that are +// returned by the target endpoint. +// +// If the target endpoint responds with not only a retryable error, but +// explicitly returns a backoff time in the response, that time will take +// precedence over these settings. +// +// These settings do not define any network retry strategy. That is entirely +// handled by the gRPC ClientConn. +// +// If unset, the default retry policy will be used. It will retry the export +// 5 seconds after receiving a retryable error and increase exponentially +// after each error for no more than a total time of 1 minute. +func WithRetry(rc RetryConfig) Option { + return fnOpt(func(c config) config { + c.retryCfg = newSetting(retry.Config(rc)) + return c + }) +} + +// convCompression returns the parsed compression encoded in s. NoCompression +// and an errors are returned if s is unknown. +func convCompression(s string) (Compression, error) { + switch s { + case "gzip": + return GzipCompression, nil + case "none", "": + return NoCompression, nil + } + return NoCompression, fmt.Errorf("unknown compression: %s", s) +} + +// convEndpoint converts s from a URL string to an endpoint if s is a valid +// URL. Otherwise, "" and an error are returned. +func convEndpoint(s string) (string, error) { + u, err := url.Parse(s) + if err != nil { + return "", err + } + return u.Host, nil +} + +// convInsecure converts s from string to bool without case sensitivity. +// If s is not valid returns error. +func convInsecure(s string) (bool, error) { + s = strings.ToLower(s) + if s != "true" && s != "false" { + return false, fmt.Errorf("can't convert %q to bool", s) + } + + return s == "true", nil +} + +// loadInsecureFromEnvEndpoint returns a resolver that fetches +// insecure setting from envEndpoint is it possible. +func loadInsecureFromEnvEndpoint(envEndpoint []string) resolver[bool] { + return func(s setting[bool]) setting[bool] { + if s.Set { + // Passed, valid, options have precedence. + return s + } + + for _, key := range envEndpoint { + if vStr := os.Getenv(key); vStr != "" { + u, err := url.Parse(vStr) + if err != nil { + otel.Handle(fmt.Errorf("invalid %s value %s: %w", key, vStr, err)) + continue + } + + return insecureFromScheme(s, u.Scheme) + } + } + return s + } +} + +// convHeaders converts the OTel environment variable header value s into a +// mapping of header key to value. If s is invalid a partial result and error +// are returned. +func convHeaders(s string) (map[string]string, error) { + out := make(map[string]string) + var err error + for _, header := range strings.Split(s, ",") { + rawKey, rawVal, found := strings.Cut(header, "=") + if !found { + err = errors.Join(err, fmt.Errorf("invalid header: %s", header)) + continue + } + + escKey, e := url.PathUnescape(rawKey) + if e != nil { + err = errors.Join(err, fmt.Errorf("invalid header key: %s", rawKey)) + continue + } + key := strings.TrimSpace(escKey) + + escVal, e := url.PathUnescape(rawVal) + if e != nil { + err = errors.Join(err, fmt.Errorf("invalid header value: %s", rawVal)) + continue + } + val := strings.TrimSpace(escVal) + + out[key] = val + } + return out, err +} + +// convDuration converts s into a duration of milliseconds. If s does not +// contain an integer, 0 and an error are returned. +func convDuration(s string) (time.Duration, error) { + d, err := strconv.Atoi(s) + if err != nil { + return 0, err + } + // OTel durations are defined in milliseconds. + return time.Duration(d) * time.Millisecond, nil +} + +// loadEnvTLS returns a resolver that loads a *tls.Config from files defined by +// the OTLP TLS environment variables. This will load both the rootCAs and +// certificates used for mTLS. +// +// If the filepath defined is invalid or does not contain valid TLS files, an +// error is passed to the OTel ErrorHandler and no TLS configuration is +// provided. +func loadEnvTLS[T *tls.Config]() resolver[T] { + return func(s setting[T]) setting[T] { + if s.Set { + // Passed, valid, options have precedence. + return s + } + + var rootCAs *x509.CertPool + var err error + for _, key := range envTLSCert { + if v := os.Getenv(key); v != "" { + rootCAs, err = loadCertPool(v) + break + } + } + + var certs []tls.Certificate + for _, pair := range envTLSClient { + cert := os.Getenv(pair.Certificate) + key := os.Getenv(pair.Key) + if cert != "" && key != "" { + var e error + certs, e = loadCertificates(cert, key) + err = errors.Join(err, e) + break + } + } + + if err != nil { + err = fmt.Errorf("failed to load TLS: %w", err) + otel.Handle(err) + } else if rootCAs != nil || certs != nil { + s.Set = true + s.Value = &tls.Config{RootCAs: rootCAs, Certificates: certs} + } + return s + } +} + +// readFile is used for testing. +var readFile = os.ReadFile + +// loadCertPool loads and returns the *x509.CertPool found at path if it exists +// and is valid. Otherwise, nil and an error is returned. +func loadCertPool(path string) (*x509.CertPool, error) { + b, err := readFile(path) + if err != nil { + return nil, err + } + cp := x509.NewCertPool() + if ok := cp.AppendCertsFromPEM(b); !ok { + return nil, errors.New("certificate not added") + } + return cp, nil +} + +// loadCertificates loads and returns the tls.Certificate found at path if it +// exists and is valid. Otherwise, nil and an error is returned. +func loadCertificates(certPath, keyPath string) ([]tls.Certificate, error) { + cert, err := readFile(certPath) + if err != nil { + return nil, err + } + key, err := readFile(keyPath) + if err != nil { + return nil, err + } + crt, err := tls.X509KeyPair(cert, key) + if err != nil { + return nil, err + } + return []tls.Certificate{crt}, nil +} + +// insecureFromScheme return setting if the connection should +// use client transport security or not. +// Empty scheme doesn't force insecure setting. +func insecureFromScheme(prev setting[bool], scheme string) setting[bool] { + if scheme == "https" { + return newSetting(false) + } else if len(scheme) > 0 { + return newSetting(true) + } + + return prev +} + +func compressorToCompression(compressor string) Compression { + c, err := convCompression(compressor) + if err != nil { + otel.Handle(fmt.Errorf("%w, using no compression as default", err)) + return NoCompression + } + + return c +} + +// setting is a configuration setting value. +type setting[T any] struct { + Value T + Set bool +} + +// newSetting returns a new setting with the value set. +func newSetting[T any](value T) setting[T] { + return setting[T]{Value: value, Set: true} +} + +// resolver returns an updated setting after applying an resolution operation. +type resolver[T any] func(setting[T]) setting[T] + +// Resolve returns a resolved version of s. +// +// It will apply all the passed fn in the order provided, chaining together the +// return setting to the next input. The setting s is used as the initial +// argument to the first fn. +// +// Each fn needs to validate if it should apply given the Set state of the +// setting. This will not perform any checks on the set state when chaining +// function. +func (s setting[T]) Resolve(fn ...resolver[T]) setting[T] { + for _, f := range fn { + s = f(s) + } + return s +} + +// getEnv returns a resolver that will apply an environment variable value +// associated with the first set key to a setting value. The conv function is +// used to convert between the environment variable value and the setting type. +// +// If the input setting to the resolver is set, the environment variable will +// not be applied. +// +// Any error returned from conv is sent to the OTel ErrorHandler and the +// setting will not be updated. +func getEnv[T any](keys []string, conv func(string) (T, error)) resolver[T] { + return func(s setting[T]) setting[T] { + if s.Set { + // Passed, valid, options have precedence. + return s + } + + for _, key := range keys { + if vStr := os.Getenv(key); vStr != "" { + v, err := conv(vStr) + if err == nil { + s.Value = v + s.Set = true + break + } + otel.Handle(fmt.Errorf("invalid %s value %s: %w", key, vStr, err)) + } + } + return s + } +} + +// fallback returns a resolve that will set a setting value to val if it is not +// already set. +// +// This is usually passed at the end of a resolver chain to ensure a default is +// applied if the setting has not already been set. +func fallback[T any](val T) resolver[T] { + return func(s setting[T]) setting[T] { + if !s.Set { + s.Value = val + s.Set = true + } + return s + } +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/doc.go new file mode 100644 index 00000000000..67cb814342d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/doc.go @@ -0,0 +1,63 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package otlploggrpc provides an OTLP log exporter using gRPC. The exporter uses gRPC to +transport OTLP protobuf payloads. + +All Exporters must be created with [New]. + +The environment variables described below can be used for configuration. + +OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_LOGS_ENDPOINT (default: "https://localhost:4317") - +target to which the exporter sends telemetry. +The target syntax is defined in https://github.com/grpc/grpc/blob/master/doc/naming.md. +The value must contain a scheme ("http" or "https") and host. +The value may additionally contain a port, and a path. +The value should not contain a query string or fragment. +OTEL_EXPORTER_OTLP_LOGS_ENDPOINT takes precedence over OTEL_EXPORTER_OTLP_ENDPOINT. +The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithGRPCConn] options. + +OTEL_EXPORTER_OTLP_INSECURE, OTEL_EXPORTER_OTLP_LOGS_INSECURE (default: "false") - +setting "true" disables client transport security for the exporter's gRPC connection. +You can use this only when an endpoint is provided without scheme. +OTEL_EXPORTER_OTLP_LOGS_INSECURE takes precedence over OTEL_EXPORTER_OTLP_INSECURE. +The configuration can be overridden by [WithInsecure], [WithGRPCConn] options. + +OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_LOGS_HEADERS (default: none) - +key-value pairs used as gRPC metadata associated with gRPC requests. +The value is expected to be represented in a format matching the [W3C Baggage HTTP Header Content Format], +except that additional semi-colon delimited metadata is not supported. +Example value: "key1=value1,key2=value2". +OTEL_EXPORTER_OTLP_LOGS_HEADERS takes precedence over OTEL_EXPORTER_OTLP_HEADERS. +The configuration can be overridden by [WithHeaders] option. + +OTEL_EXPORTER_OTLP_TIMEOUT, OTEL_EXPORTER_OTLP_LOGS_TIMEOUT (default: "10000") - +maximum time in milliseconds the OTLP exporter waits for each batch export. +OTEL_EXPORTER_OTLP_LOGS_TIMEOUT takes precedence over OTEL_EXPORTER_OTLP_TIMEOUT. +The configuration can be overridden by [WithTimeout] option. + +OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_LOGS_COMPRESSION (default: none) - +the gRPC compressor the exporter uses. +Supported value: "gzip". +OTEL_EXPORTER_OTLP_LOGS_COMPRESSION takes precedence over OTEL_EXPORTER_OTLP_COMPRESSION. +The configuration can be overridden by [WithCompressor], [WithGRPCConn] options. + +OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE (default: none) - +the filepath to the trusted certificate to use when verifying a server's TLS credentials. +OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CERTIFICATE. +The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options. + +OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_LOGS_CLIENT_CERTIFICATE (default: none) - +the filepath to the client certificate/chain trust for client's private key to use in mTLS communication in PEM format. +OTEL_EXPORTER_OTLP_LOGS_CLIENT_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE. +The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options. + +OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_LOGS_CLIENT_KEY (default: none) - +the filepath to the client's private key to use in mTLS communication in PEM format. +OTEL_EXPORTER_OTLP_LOGS_CLIENT_KEY takes precedence over OTEL_EXPORTER_OTLP_CLIENT_KEY. +The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] option. + +[W3C Baggage HTTP Header Content Format]: https://www.w3.org/TR/baggage/#header-content +*/ +package otlploggrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc" diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/exporter.go new file mode 100644 index 00000000000..66895c3a1a0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/exporter.go @@ -0,0 +1,93 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlploggrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc" + +import ( + "context" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform" + "go.opentelemetry.io/otel/sdk/log" + logpb "go.opentelemetry.io/proto/otlp/logs/v1" +) + +type logClient interface { + UploadLogs(ctx context.Context, rl []*logpb.ResourceLogs) error + Shutdown(context.Context) error +} + +// Exporter is a OpenTelemetry log Exporter. It transports log data encoded as +// OTLP protobufs using gRPC. +// All Exporters must be created with [New]. +type Exporter struct { + // Ensure synchronous access to the client across all functionality. + clientMu sync.Mutex + client logClient + + stopped atomic.Bool +} + +// Compile-time check Exporter implements [log.Exporter]. +var _ log.Exporter = (*Exporter)(nil) + +// New returns a new [Exporter]. +// +// It is recommended to use it with a [BatchProcessor] +// or other processor exporting records asynchronously. +func New(_ context.Context, options ...Option) (*Exporter, error) { + cfg := newConfig(options) + c, err := newClient(cfg) + if err != nil { + return nil, err + } + return newExporter(c), nil +} + +func newExporter(c logClient) *Exporter { + var e Exporter + e.client = c + return &e +} + +var transformResourceLogs = transform.ResourceLogs + +// Export transforms and transmits log records to an OTLP receiver. +// +// This method returns nil and drops records if called after Shutdown. +// This method returns an error if the method is canceled by the passed context. +func (e *Exporter) Export(ctx context.Context, records []log.Record) error { + if e.stopped.Load() { + return nil + } + + otlp := transformResourceLogs(records) + if otlp == nil { + return nil + } + + e.clientMu.Lock() + defer e.clientMu.Unlock() + return e.client.UploadLogs(ctx, otlp) +} + +// Shutdown shuts down the Exporter. Calls to Export or ForceFlush will perform +// no operation after this is called. +func (e *Exporter) Shutdown(ctx context.Context) error { + if e.stopped.Swap(true) { + return nil + } + + e.clientMu.Lock() + defer e.clientMu.Unlock() + + err := e.client.Shutdown(ctx) + e.client = newNoopClient() + return err +} + +// ForceFlush does nothing. The Exporter holds no state. +func (e *Exporter) ForceFlush(ctx context.Context) error { + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry/retry.go new file mode 100644 index 00000000000..f2da12382a0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry/retry.go @@ -0,0 +1,145 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/retry/retry.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package retry provides request retry functionality that can perform +// configurable exponential backoff for transient errors and honor any +// explicit throttle responses received. +package retry // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry" + +import ( + "context" + "fmt" + "time" + + "github.com/cenkalti/backoff/v4" +) + +// DefaultConfig are the recommended defaults to use. +var DefaultConfig = Config{ + Enabled: true, + InitialInterval: 5 * time.Second, + MaxInterval: 30 * time.Second, + MaxElapsedTime: time.Minute, +} + +// Config defines configuration for retrying batches in case of export failure +// using an exponential backoff. +type Config struct { + // Enabled indicates whether to not retry sending batches in case of + // export failure. + Enabled bool + // InitialInterval the time to wait after the first failure before + // retrying. + InitialInterval time.Duration + // MaxInterval is the upper bound on backoff interval. Once this value is + // reached the delay between consecutive retries will always be + // `MaxInterval`. + MaxInterval time.Duration + // MaxElapsedTime is the maximum amount of time (including retries) spent + // trying to send a request/batch. Once this value is reached, the data + // is discarded. + MaxElapsedTime time.Duration +} + +// RequestFunc wraps a request with retry logic. +type RequestFunc func(context.Context, func(context.Context) error) error + +// EvaluateFunc returns if an error is retry-able and if an explicit throttle +// duration should be honored that was included in the error. +// +// The function must return true if the error argument is retry-able, +// otherwise it must return false for the first return parameter. +// +// The function must return a non-zero time.Duration if the error contains +// explicit throttle duration that should be honored, otherwise it must return +// a zero valued time.Duration. +type EvaluateFunc func(error) (bool, time.Duration) + +// RequestFunc returns a RequestFunc using the evaluate function to determine +// if requests can be retried and based on the exponential backoff +// configuration of c. +func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { + if !c.Enabled { + return func(ctx context.Context, fn func(context.Context) error) error { + return fn(ctx) + } + } + + return func(ctx context.Context, fn func(context.Context) error) error { + // Do not use NewExponentialBackOff since it calls Reset and the code here + // must call Reset after changing the InitialInterval (this saves an + // unnecessary call to Now). + b := &backoff.ExponentialBackOff{ + InitialInterval: c.InitialInterval, + RandomizationFactor: backoff.DefaultRandomizationFactor, + Multiplier: backoff.DefaultMultiplier, + MaxInterval: c.MaxInterval, + MaxElapsedTime: c.MaxElapsedTime, + Stop: backoff.Stop, + Clock: backoff.SystemClock, + } + b.Reset() + + for { + err := fn(ctx) + if err == nil { + return nil + } + + retryable, throttle := evaluate(err) + if !retryable { + return err + } + + bOff := b.NextBackOff() + if bOff == backoff.Stop { + return fmt.Errorf("max retry time elapsed: %w", err) + } + + // Wait for the greater of the backoff or throttle delay. + var delay time.Duration + if bOff > throttle { + delay = bOff + } else { + elapsed := b.GetElapsedTime() + if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime { + return fmt.Errorf("max retry time would elapse: %w", err) + } + delay = throttle + } + + if ctxErr := waitFunc(ctx, delay); ctxErr != nil { + return fmt.Errorf("%w: %w", ctxErr, err) + } + } + } +} + +// Allow override for testing. +var waitFunc = wait + +// wait takes the caller's context, and the amount of time to wait. It will +// return nil if the timer fires before or at the same time as the context's +// deadline. This indicates that the call can be retried. +func wait(ctx context.Context, delay time.Duration) error { + timer := time.NewTimer(delay) + defer timer.Stop() + + select { + case <-ctx.Done(): + // Handle the case where the timer and context deadline end + // simultaneously by prioritizing the timer expiration nil value + // response. + select { + case <-timer.C: + default: + return ctx.Err() + } + case <-timer.C: + } + + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform/log.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform/log.go new file mode 100644 index 00000000000..03e9d8c71cd --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform/log.go @@ -0,0 +1,390 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/otlplog/transform/log.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package transform provides transformation functionality from the +// sdk/log data-types into OTLP data-types. +package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform" + +import ( + "time" + + cpb "go.opentelemetry.io/proto/otlp/common/v1" + lpb "go.opentelemetry.io/proto/otlp/logs/v1" + rpb "go.opentelemetry.io/proto/otlp/resource/v1" + + "go.opentelemetry.io/otel/attribute" + api "go.opentelemetry.io/otel/log" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/log" +) + +// ResourceLogs returns an slice of OTLP ResourceLogs generated from records. +func ResourceLogs(records []log.Record) []*lpb.ResourceLogs { + if len(records) == 0 { + return nil + } + + resMap := make(map[attribute.Distinct]*lpb.ResourceLogs) + + type key struct { + r attribute.Distinct + is instrumentation.Scope + } + scopeMap := make(map[key]*lpb.ScopeLogs) + + var resources int + for _, r := range records { + res := r.Resource() + rKey := res.Equivalent() + scope := r.InstrumentationScope() + k := key{ + r: rKey, + is: scope, + } + sl, iOk := scopeMap[k] + if !iOk { + sl = new(lpb.ScopeLogs) + var emptyScope instrumentation.Scope + if scope != emptyScope { + sl.Scope = &cpb.InstrumentationScope{ + Name: scope.Name, + Version: scope.Version, + Attributes: AttrIter(scope.Attributes.Iter()), + } + sl.SchemaUrl = scope.SchemaURL + } + scopeMap[k] = sl + } + + sl.LogRecords = append(sl.LogRecords, LogRecord(r)) + rl, rOk := resMap[rKey] + if !rOk { + resources++ + rl = new(lpb.ResourceLogs) + if res.Len() > 0 { + rl.Resource = &rpb.Resource{ + Attributes: AttrIter(res.Iter()), + } + } + rl.SchemaUrl = res.SchemaURL() + resMap[rKey] = rl + } + if !iOk { + rl.ScopeLogs = append(rl.ScopeLogs, sl) + } + } + + // Transform the categorized map into a slice + resLogs := make([]*lpb.ResourceLogs, 0, resources) + for _, rl := range resMap { + resLogs = append(resLogs, rl) + } + + return resLogs +} + +// LogRecord returns an OTLP LogRecord generated from record. +func LogRecord(record log.Record) *lpb.LogRecord { + r := &lpb.LogRecord{ + TimeUnixNano: timeUnixNano(record.Timestamp()), + ObservedTimeUnixNano: timeUnixNano(record.ObservedTimestamp()), + SeverityNumber: SeverityNumber(record.Severity()), + SeverityText: record.SeverityText(), + Body: LogAttrValue(record.Body()), + Attributes: make([]*cpb.KeyValue, 0, record.AttributesLen()), + Flags: uint32(record.TraceFlags()), + // TODO: DroppedAttributesCount: /* ... */, + } + record.WalkAttributes(func(kv api.KeyValue) bool { + r.Attributes = append(r.Attributes, LogAttr(kv)) + return true + }) + if tID := record.TraceID(); tID.IsValid() { + r.TraceId = tID[:] + } + if sID := record.SpanID(); sID.IsValid() { + r.SpanId = sID[:] + } + return r +} + +// timeUnixNano returns t as a Unix time, the number of nanoseconds elapsed +// since January 1, 1970 UTC as uint64. The result is undefined if the Unix +// time in nanoseconds cannot be represented by an int64 (a date before the +// year 1678 or after 2262). timeUnixNano on the zero Time returns 0. The +// result does not depend on the location associated with t. +func timeUnixNano(t time.Time) uint64 { + nano := t.UnixNano() + if nano < 0 { + return 0 + } + return uint64(nano) // nolint:gosec // Overflow checked. +} + +// AttrIter transforms an [attribute.Iterator] into OTLP key-values. +func AttrIter(iter attribute.Iterator) []*cpb.KeyValue { + l := iter.Len() + if l == 0 { + return nil + } + + out := make([]*cpb.KeyValue, 0, l) + for iter.Next() { + out = append(out, Attr(iter.Attribute())) + } + return out +} + +// Attrs transforms a slice of [attribute.KeyValue] into OTLP key-values. +func Attrs(attrs []attribute.KeyValue) []*cpb.KeyValue { + if len(attrs) == 0 { + return nil + } + + out := make([]*cpb.KeyValue, 0, len(attrs)) + for _, kv := range attrs { + out = append(out, Attr(kv)) + } + return out +} + +// Attr transforms an [attribute.KeyValue] into an OTLP key-value. +func Attr(kv attribute.KeyValue) *cpb.KeyValue { + return &cpb.KeyValue{Key: string(kv.Key), Value: AttrValue(kv.Value)} +} + +// AttrValue transforms an [attribute.Value] into an OTLP AnyValue. +func AttrValue(v attribute.Value) *cpb.AnyValue { + av := new(cpb.AnyValue) + switch v.Type() { + case attribute.BOOL: + av.Value = &cpb.AnyValue_BoolValue{ + BoolValue: v.AsBool(), + } + case attribute.BOOLSLICE: + av.Value = &cpb.AnyValue_ArrayValue{ + ArrayValue: &cpb.ArrayValue{ + Values: boolSliceValues(v.AsBoolSlice()), + }, + } + case attribute.INT64: + av.Value = &cpb.AnyValue_IntValue{ + IntValue: v.AsInt64(), + } + case attribute.INT64SLICE: + av.Value = &cpb.AnyValue_ArrayValue{ + ArrayValue: &cpb.ArrayValue{ + Values: int64SliceValues(v.AsInt64Slice()), + }, + } + case attribute.FLOAT64: + av.Value = &cpb.AnyValue_DoubleValue{ + DoubleValue: v.AsFloat64(), + } + case attribute.FLOAT64SLICE: + av.Value = &cpb.AnyValue_ArrayValue{ + ArrayValue: &cpb.ArrayValue{ + Values: float64SliceValues(v.AsFloat64Slice()), + }, + } + case attribute.STRING: + av.Value = &cpb.AnyValue_StringValue{ + StringValue: v.AsString(), + } + case attribute.STRINGSLICE: + av.Value = &cpb.AnyValue_ArrayValue{ + ArrayValue: &cpb.ArrayValue{ + Values: stringSliceValues(v.AsStringSlice()), + }, + } + default: + av.Value = &cpb.AnyValue_StringValue{ + StringValue: "INVALID", + } + } + return av +} + +func boolSliceValues(vals []bool) []*cpb.AnyValue { + converted := make([]*cpb.AnyValue, len(vals)) + for i, v := range vals { + converted[i] = &cpb.AnyValue{ + Value: &cpb.AnyValue_BoolValue{ + BoolValue: v, + }, + } + } + return converted +} + +func int64SliceValues(vals []int64) []*cpb.AnyValue { + converted := make([]*cpb.AnyValue, len(vals)) + for i, v := range vals { + converted[i] = &cpb.AnyValue{ + Value: &cpb.AnyValue_IntValue{ + IntValue: v, + }, + } + } + return converted +} + +func float64SliceValues(vals []float64) []*cpb.AnyValue { + converted := make([]*cpb.AnyValue, len(vals)) + for i, v := range vals { + converted[i] = &cpb.AnyValue{ + Value: &cpb.AnyValue_DoubleValue{ + DoubleValue: v, + }, + } + } + return converted +} + +func stringSliceValues(vals []string) []*cpb.AnyValue { + converted := make([]*cpb.AnyValue, len(vals)) + for i, v := range vals { + converted[i] = &cpb.AnyValue{ + Value: &cpb.AnyValue_StringValue{ + StringValue: v, + }, + } + } + return converted +} + +// Attrs transforms a slice of [api.KeyValue] into OTLP key-values. +func LogAttrs(attrs []api.KeyValue) []*cpb.KeyValue { + if len(attrs) == 0 { + return nil + } + + out := make([]*cpb.KeyValue, 0, len(attrs)) + for _, kv := range attrs { + out = append(out, LogAttr(kv)) + } + return out +} + +// LogAttr transforms an [api.KeyValue] into an OTLP key-value. +func LogAttr(attr api.KeyValue) *cpb.KeyValue { + return &cpb.KeyValue{ + Key: attr.Key, + Value: LogAttrValue(attr.Value), + } +} + +// LogAttrValues transforms a slice of [api.Value] into an OTLP []AnyValue. +func LogAttrValues(vals []api.Value) []*cpb.AnyValue { + if len(vals) == 0 { + return nil + } + + out := make([]*cpb.AnyValue, 0, len(vals)) + for _, v := range vals { + out = append(out, LogAttrValue(v)) + } + return out +} + +// LogAttrValue transforms an [api.Value] into an OTLP AnyValue. +func LogAttrValue(v api.Value) *cpb.AnyValue { + av := new(cpb.AnyValue) + switch v.Kind() { + case api.KindBool: + av.Value = &cpb.AnyValue_BoolValue{ + BoolValue: v.AsBool(), + } + case api.KindInt64: + av.Value = &cpb.AnyValue_IntValue{ + IntValue: v.AsInt64(), + } + case api.KindFloat64: + av.Value = &cpb.AnyValue_DoubleValue{ + DoubleValue: v.AsFloat64(), + } + case api.KindString: + av.Value = &cpb.AnyValue_StringValue{ + StringValue: v.AsString(), + } + case api.KindBytes: + av.Value = &cpb.AnyValue_BytesValue{ + BytesValue: v.AsBytes(), + } + case api.KindSlice: + av.Value = &cpb.AnyValue_ArrayValue{ + ArrayValue: &cpb.ArrayValue{ + Values: LogAttrValues(v.AsSlice()), + }, + } + case api.KindMap: + av.Value = &cpb.AnyValue_KvlistValue{ + KvlistValue: &cpb.KeyValueList{ + Values: LogAttrs(v.AsMap()), + }, + } + default: + av.Value = &cpb.AnyValue_StringValue{ + StringValue: "INVALID", + } + } + return av +} + +// SeverityNumber transforms a [log.Severity] into an OTLP SeverityNumber. +func SeverityNumber(s api.Severity) lpb.SeverityNumber { + switch s { + case api.SeverityTrace: + return lpb.SeverityNumber_SEVERITY_NUMBER_TRACE + case api.SeverityTrace2: + return lpb.SeverityNumber_SEVERITY_NUMBER_TRACE2 + case api.SeverityTrace3: + return lpb.SeverityNumber_SEVERITY_NUMBER_TRACE3 + case api.SeverityTrace4: + return lpb.SeverityNumber_SEVERITY_NUMBER_TRACE4 + case api.SeverityDebug: + return lpb.SeverityNumber_SEVERITY_NUMBER_DEBUG + case api.SeverityDebug2: + return lpb.SeverityNumber_SEVERITY_NUMBER_DEBUG2 + case api.SeverityDebug3: + return lpb.SeverityNumber_SEVERITY_NUMBER_DEBUG3 + case api.SeverityDebug4: + return lpb.SeverityNumber_SEVERITY_NUMBER_DEBUG4 + case api.SeverityInfo: + return lpb.SeverityNumber_SEVERITY_NUMBER_INFO + case api.SeverityInfo2: + return lpb.SeverityNumber_SEVERITY_NUMBER_INFO2 + case api.SeverityInfo3: + return lpb.SeverityNumber_SEVERITY_NUMBER_INFO3 + case api.SeverityInfo4: + return lpb.SeverityNumber_SEVERITY_NUMBER_INFO4 + case api.SeverityWarn: + return lpb.SeverityNumber_SEVERITY_NUMBER_WARN + case api.SeverityWarn2: + return lpb.SeverityNumber_SEVERITY_NUMBER_WARN2 + case api.SeverityWarn3: + return lpb.SeverityNumber_SEVERITY_NUMBER_WARN3 + case api.SeverityWarn4: + return lpb.SeverityNumber_SEVERITY_NUMBER_WARN4 + case api.SeverityError: + return lpb.SeverityNumber_SEVERITY_NUMBER_ERROR + case api.SeverityError2: + return lpb.SeverityNumber_SEVERITY_NUMBER_ERROR2 + case api.SeverityError3: + return lpb.SeverityNumber_SEVERITY_NUMBER_ERROR3 + case api.SeverityError4: + return lpb.SeverityNumber_SEVERITY_NUMBER_ERROR4 + case api.SeverityFatal: + return lpb.SeverityNumber_SEVERITY_NUMBER_FATAL + case api.SeverityFatal2: + return lpb.SeverityNumber_SEVERITY_NUMBER_FATAL2 + case api.SeverityFatal3: + return lpb.SeverityNumber_SEVERITY_NUMBER_FATAL3 + case api.SeverityFatal4: + return lpb.SeverityNumber_SEVERITY_NUMBER_FATAL4 + } + return lpb.SeverityNumber_SEVERITY_NUMBER_UNSPECIFIED +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/version.go new file mode 100644 index 00000000000..0c60e1b0613 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/version.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlploggrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc" + +// Version is the current release version of the OpenTelemetry OTLP over gRPC logs exporter in use. +func Version() string { + return "0.10.0" +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/client.go index 04d46d42ea0..279b4be4f60 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/client.go @@ -14,6 +14,7 @@ import ( "net/http" "net/url" "strconv" + "strings" "sync" "time" @@ -143,15 +144,20 @@ func (c *httpClient) uploadLogs(ctx context.Context, data []*logpb.ResourceLogs) resp, err := c.client.Do(request.Request) var urlErr *url.Error if errors.As(err, &urlErr) && urlErr.Temporary() { - return newResponseError(http.Header{}) + return newResponseError(http.Header{}, err) } if err != nil { return err } + if resp != nil && resp.Body != nil { + defer func() { + if err := resp.Body.Close(); err != nil { + otel.Handle(err) + } + }() + } - var rErr error - switch sc := resp.StatusCode; { - case sc >= 200 && sc <= 299: + if sc := resp.StatusCode; sc >= 200 && sc <= 299 { // Success, do not retry. // Read the partial success message, if any. @@ -179,26 +185,34 @@ func (c *httpClient) uploadLogs(ctx context.Context, data []*logpb.ResourceLogs) } } return nil - case sc == http.StatusTooManyRequests, - sc == http.StatusBadGateway, - sc == http.StatusServiceUnavailable, - sc == http.StatusGatewayTimeout: - // Retry-able failure. - rErr = newResponseError(resp.Header) - - // Going to retry, drain the body to reuse the connection. - if _, err := io.Copy(io.Discard, resp.Body); err != nil { - _ = resp.Body.Close() - return err - } - default: - rErr = fmt.Errorf("failed to send logs to %s: %s", request.URL, resp.Status) } - - if err := resp.Body.Close(); err != nil { + // Error cases. + + // server may return a message with the response + // body, so we read it to include in the error + // message to be returned. It will help in + // debugging the actual issue. + var respData bytes.Buffer + if _, err := io.Copy(&respData, resp.Body); err != nil { return err } - return rErr + respStr := strings.TrimSpace(respData.String()) + if len(respStr) == 0 { + respStr = "(empty)" + } + bodyErr := fmt.Errorf("body: %s", respStr) + + switch resp.StatusCode { + case http.StatusTooManyRequests, + http.StatusBadGateway, + http.StatusServiceUnavailable, + http.StatusGatewayTimeout: + // Retryable failure. + return newResponseError(resp.Header, bodyErr) + default: + // Non-retryable failure. + return fmt.Errorf("failed to send logs to %s: %s (%w)", request.URL, resp.Status, bodyErr) + } }) } @@ -266,24 +280,50 @@ func (r *request) reset(ctx context.Context) { // retryableError represents a request failure that can be retried. type retryableError struct { throttle int64 + err error } // newResponseError returns a retryableError and will extract any explicit -// throttle delay contained in headers. -func newResponseError(header http.Header) error { +// throttle delay contained in headers. The returned error wraps wrapped +// if it is not nil. +func newResponseError(header http.Header, wrapped error) error { var rErr retryableError if v := header.Get("Retry-After"); v != "" { if t, err := strconv.ParseInt(v, 10, 64); err == nil { rErr.throttle = t } } + + rErr.err = wrapped return rErr } func (e retryableError) Error() string { + if e.err != nil { + return fmt.Sprintf("retry-able request failure: %v", e.err.Error()) + } + return "retry-able request failure" } +func (e retryableError) Unwrap() error { + return e.err +} + +func (e retryableError) As(target interface{}) bool { + if e.err == nil { + return false + } + + switch v := target.(type) { + case **retryableError: + *v = &e + return true + default: + return false + } +} + // evaluate returns if err is retry-able. If it is and it includes an explicit // throttling delay, that delay is also returned. func evaluate(err error) (bool, time.Duration) { diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/config.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/config.go index 383890d91d5..bfe768091e3 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/config.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/config.go @@ -143,8 +143,12 @@ func newConfig(options []Option) config { // // If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_LOGS_ENDPOINT // environment variable is set, and this option is not passed, that variable -// value will be used. If both are set, OTEL_EXPORTER_OTLP_LOGS_ENDPOINT -// will take precedence. +// value will be used. If both environment variables are set, +// OTEL_EXPORTER_OTLP_LOGS_ENDPOINT will take precedence. If an environment +// variable is set, and this option is passed, this option will take precedence. +// +// If both this option and WithEndpointURL are used, the last used option will +// take precedence. // // By default, if an environment variable is not set, and this option is not // passed, "localhost:4318" will be used. @@ -159,8 +163,9 @@ func WithEndpoint(endpoint string) Option { // // If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_LOGS_ENDPOINT // environment variable is set, and this option is not passed, that variable -// value will be used. If both are set, OTEL_EXPORTER_OTLP_LOGS_ENDPOINT -// will take precedence. +// value will be used. If both environment variables are set, +// OTEL_EXPORTER_OTLP_LOGS_ENDPOINT will take precedence. If an environment +// variable is set, and this option is passed, this option will take precedence. // // If both this option and WithEndpoint are used, the last used option will // take precedence. @@ -178,11 +183,7 @@ func WithEndpointURL(rawURL string) Option { return fnOpt(func(c config) config { c.endpoint = newSetting(u.Host) c.path = newSetting(u.Path) - if u.Scheme != "https" { - c.insecure = newSetting(true) - } else { - c.insecure = newSetting(false) - } + c.insecure = newSetting(u.Scheme != "https") return c }) } @@ -372,7 +373,7 @@ func (s setting[T]) Resolve(fn ...resolver[T]) setting[T] { return s } -// loadEnvTLS returns a resolver that loads a *tls.Config from files defeind by +// loadEnvTLS returns a resolver that loads a *tls.Config from files defined by // the OTLP TLS environment variables. This will load both the rootCAs and // certificates used for mTLS. // diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/doc.go index 87e55efe091..2607e3b9b68 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/doc.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/doc.go @@ -22,7 +22,7 @@ target URL to which the exporter sends telemetry. The value must contain a scheme ("http" or "https") and host. The value may additionally contain a port and a path. The value should not contain a query string or fragment. -The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WitnInsecure], and [WithURLPath] options. +The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithURLPath] options. OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_LOGS_HEADERS (default: none) - key-value pairs used as headers associated with HTTP requests. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/exporter.go index deee36c001c..f1c8d3ae0a7 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/exporter.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/exporter.go @@ -23,6 +23,9 @@ type Exporter struct { var _ log.Exporter = (*Exporter)(nil) // New returns a new [Exporter]. +// +// It is recommended to use it with a [BatchProcessor] +// or other processor exporting records asynchronously. func New(_ context.Context, options ...Option) (*Exporter, error) { cfg := newConfig(options) c, err := newHTTPClient(cfg) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/transform/log.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/transform/log.go index 232172eb6b9..4b52a14ebc6 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/transform/log.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/transform/log.go @@ -9,7 +9,6 @@ package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/transform" import ( - "sync" "time" cpb "go.opentelemetry.io/proto/otlp/common/v1" @@ -28,31 +27,42 @@ func ResourceLogs(records []log.Record) []*lpb.ResourceLogs { return nil } - resMap := resourceLogsMapPool.Get().(map[attribute.Distinct]*lpb.ResourceLogs) - defer func() { - clear(resMap) - resourceLogsMapPool.Put(resMap) - }() - resourceLogsMap(&resMap, records) + resMap := make(map[attribute.Distinct]*lpb.ResourceLogs) - out := make([]*lpb.ResourceLogs, 0, len(resMap)) - for _, rl := range resMap { - out = append(out, rl) + type key struct { + r attribute.Distinct + is instrumentation.Scope } - return out -} - -var resourceLogsMapPool = sync.Pool{ - New: func() any { - return make(map[attribute.Distinct]*lpb.ResourceLogs) - }, -} + scopeMap := make(map[key]*lpb.ScopeLogs) -func resourceLogsMap(dst *map[attribute.Distinct]*lpb.ResourceLogs, records []log.Record) { + var resources int for _, r := range records { res := r.Resource() - rl, ok := (*dst)[res.Equivalent()] - if !ok { + rKey := res.Equivalent() + scope := r.InstrumentationScope() + k := key{ + r: rKey, + is: scope, + } + sl, iOk := scopeMap[k] + if !iOk { + sl = new(lpb.ScopeLogs) + var emptyScope instrumentation.Scope + if scope != emptyScope { + sl.Scope = &cpb.InstrumentationScope{ + Name: scope.Name, + Version: scope.Version, + Attributes: AttrIter(scope.Attributes.Iter()), + } + sl.SchemaUrl = scope.SchemaURL + } + scopeMap[k] = sl + } + + sl.LogRecords = append(sl.LogRecords, LogRecord(r)) + rl, rOk := resMap[rKey] + if !rOk { + resources++ rl = new(lpb.ResourceLogs) if res.Len() > 0 { rl.Resource = &rpb.Resource{ @@ -60,52 +70,20 @@ func resourceLogsMap(dst *map[attribute.Distinct]*lpb.ResourceLogs, records []lo } } rl.SchemaUrl = res.SchemaURL() - (*dst)[res.Equivalent()] = rl + resMap[rKey] = rl + } + if !iOk { + rl.ScopeLogs = append(rl.ScopeLogs, sl) } - rl.ScopeLogs = ScopeLogs(records) } -} -// ScopeLogs returns a slice of OTLP ScopeLogs generated from recoreds. -func ScopeLogs(records []log.Record) []*lpb.ScopeLogs { - scopeMap := scopeLogsMapPool.Get().(map[instrumentation.Scope]*lpb.ScopeLogs) - defer func() { - clear(scopeMap) - scopeLogsMapPool.Put(scopeMap) - }() - scopeLogsMap(&scopeMap, records) - - out := make([]*lpb.ScopeLogs, 0, len(scopeMap)) - for _, sl := range scopeMap { - out = append(out, sl) + // Transform the categorized map into a slice + resLogs := make([]*lpb.ResourceLogs, 0, resources) + for _, rl := range resMap { + resLogs = append(resLogs, rl) } - return out -} -var scopeLogsMapPool = sync.Pool{ - New: func() any { - return make(map[instrumentation.Scope]*lpb.ScopeLogs) - }, -} - -func scopeLogsMap(dst *map[instrumentation.Scope]*lpb.ScopeLogs, records []log.Record) { - for _, r := range records { - scope := r.InstrumentationScope() - sl, ok := (*dst)[scope] - if !ok { - sl = new(lpb.ScopeLogs) - var emptyScope instrumentation.Scope - if scope != emptyScope { - sl.Scope = &cpb.InstrumentationScope{ - Name: scope.Name, - Version: scope.Version, - } - sl.SchemaUrl = scope.SchemaURL - } - (*dst)[scope] = sl - } - sl.LogRecords = append(sl.LogRecords, LogRecord(r)) - } + return resLogs } // LogRecord returns an OTLP LogRecord generated from record. @@ -139,10 +117,11 @@ func LogRecord(record log.Record) *lpb.LogRecord { // year 1678 or after 2262). timeUnixNano on the zero Time returns 0. The // result does not depend on the location associated with t. func timeUnixNano(t time.Time) uint64 { - if t.IsZero() { + nano := t.UnixNano() + if nano < 0 { return 0 } - return uint64(t.UnixNano()) + return uint64(nano) // nolint:gosec // Overflow checked. } // AttrIter transforms an [attribute.Iterator] into OTLP key-values. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/version.go index 48deb18678c..f25b13bd4a8 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/version.go @@ -5,5 +5,5 @@ package otlploghttp // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/o // Version is the current release version of the OpenTelemetry OTLP over HTTP/protobuf logs exporter in use. func Version() string { - return "0.4.0" + return "0.10.0" } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go index 428cfea2334..e0fa0570a81 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go @@ -155,7 +155,12 @@ func (c *client) exportContext(parent context.Context) (context.Context, context } if c.metadata.Len() > 0 { - ctx = metadata.NewOutgoingContext(ctx, c.metadata) + md := c.metadata + if outMD, ok := metadata.FromOutgoingContext(ctx); ok { + md = metadata.Join(md, outMD) + } + + ctx = metadata.NewOutgoingContext(ctx, md) } return ctx, cancel diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go index 38d7d60d403..db6e3714b3b 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go @@ -66,8 +66,9 @@ func WithInsecure() Option { // // If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT // environment variable is set, and this option is not passed, that variable -// value will be used. If both are set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT -// will take precedence. +// value will be used. If both environment variables are set, +// OTEL_EXPORTER_OTLP_METRICS_ENDPOINT will take precedence. If an environment +// variable is set, and this option is passed, this option will take precedence. // // If both this option and WithEndpointURL are used, the last used option will // take precedence. @@ -84,8 +85,9 @@ func WithEndpoint(endpoint string) Option { // // If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT // environment variable is set, and this option is not passed, that variable -// value will be used. If both are set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT -// will take precedence. +// value will be used. If both environment variables are set, +// OTEL_EXPORTER_OTLP_METRICS_ENDPOINT will take precedence. If an environment +// variable is set, and this option is passed, this option will take precedence. // // If both this option and WithEndpoint are used, the last used option will // take precedence. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/doc.go index 3d74ef1a01d..dcd8de5df4e 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/doc.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/doc.go @@ -12,9 +12,8 @@ The environment variables described below can be used for configuration. OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT (default: "https://localhost:4317") - target to which the exporter sends telemetry. The target syntax is defined in https://github.com/grpc/grpc/blob/master/doc/naming.md. -The value must contain a host. -The value may additionally a port, a scheme, and a path. -The value accepts "http" and "https" scheme. +The value must contain a scheme ("http" or "https") and host. +The value may additionally contain a port, and a path. The value should not contain a query string or fragment. OTEL_EXPORTER_OTLP_METRICS_ENDPOINT takes precedence over OTEL_EXPORTER_OTLP_ENDPOINT. The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithGRPCConn] options. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go index 98afc0b1e9d..3977c1f8a6c 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go @@ -5,6 +5,7 @@ package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpme import ( "context" + "errors" "fmt" "sync" @@ -114,7 +115,7 @@ func (e *Exporter) Shutdown(ctx context.Context) error { return err } -var errShutdown = fmt.Errorf("gRPC exporter is shutdown") +var errShutdown = errors.New("gRPC exporter is shutdown") type shutdownClient struct{} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go index b2735ba923c..261f5502682 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go @@ -15,6 +15,7 @@ import ( "strconv" "strings" "time" + "unicode" "go.opentelemetry.io/otel/internal/global" ) @@ -163,12 +164,16 @@ func stringToHeader(value string) map[string]string { global.Error(errors.New("missing '="), "parse headers", "input", header) continue } - name, err := url.PathUnescape(n) - if err != nil { - global.Error(err, "escape header key", "key", n) + + trimmedName := strings.TrimSpace(n) + + // Validate the key. + if !isValidHeaderKey(trimmedName) { + global.Error(errors.New("invalid header key"), "parse headers", "key", trimmedName) continue } - trimmedName := strings.TrimSpace(name) + + // Only decode the value. value, err := url.PathUnescape(v) if err != nil { global.Error(err, "escape header value", "value", v) @@ -189,3 +194,22 @@ func createCertPool(certBytes []byte) (*x509.CertPool, error) { } return cp, nil } + +func isValidHeaderKey(key string) bool { + if key == "" { + return false + } + for _, c := range key { + if !isTokenChar(c) { + return false + } + } + return true +} + +func isTokenChar(c rune) bool { + return c <= unicode.MaxASCII && (unicode.IsLetter(c) || + unicode.IsDigit(c) || + c == '!' || c == '#' || c == '$' || c == '%' || c == '&' || c == '\'' || c == '*' || + c == '+' || c == '-' || c == '.' || c == '^' || c == '_' || c == '`' || c == '|' || c == '~') +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go index b6ed9a2bb65..2ac8db5a887 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go @@ -111,7 +111,7 @@ func cleanPath(urlPath string, defaultPath string) string { return defaultPath } if !path.IsAbs(tmp) { - tmp = fmt.Sprintf("/%s", tmp) + tmp = "/" + tmp } return tmp } @@ -139,7 +139,7 @@ func NewGRPCConfig(opts ...GRPCOption) Config { if cfg.ServiceConfig != "" { cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig)) } - // Priroritize GRPCCredentials over Insecure (passing both is an error). + // Prioritize GRPCCredentials over Insecure (passing both is an error). if cfg.Metrics.GRPCCredentials != nil { cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Metrics.GRPCCredentials)) } else if cfg.Metrics.Insecure { @@ -287,9 +287,7 @@ func WithEndpointURL(v string) GenericOption { cfg.Metrics.Endpoint = u.Host cfg.Metrics.URLPath = u.Path - if u.Scheme != "https" { - cfg.Metrics.Insecure = true - } + cfg.Metrics.Insecure = u.Scheme != "https" return cfg }) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/tls.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/tls.go index 0229ac80bef..03e7fbcdfb5 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/tls.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/tls.go @@ -14,7 +14,7 @@ import ( ) // ReadTLSConfigFromFile reads a PEM certificate file and creates -// a tls.Config that will use this certifate to verify a server certificate. +// a tls.Config that will use this certificate to verify a server certificate. func ReadTLSConfigFromFile(path string) (*tls.Config, error) { b, err := os.ReadFile(path) if err != nil { diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata.go index 975e3b7aa1a..abf7f021960 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata.go @@ -46,8 +46,9 @@ func ScopeMetrics(sms []metricdata.ScopeMetrics) ([]*mpb.ScopeMetrics, error) { out = append(out, &mpb.ScopeMetrics{ Scope: &cpb.InstrumentationScope{ - Name: sm.Scope.Name, - Version: sm.Scope.Version, + Name: sm.Scope.Name, + Version: sm.Scope.Version, + Attributes: AttrIter(sm.Scope.Attributes.Iter()), }, Metrics: ms, SchemaUrl: sm.Scope.SchemaURL, @@ -83,13 +84,13 @@ func metric(m metricdata.Metrics) (*mpb.Metric, error) { } switch a := m.Data.(type) { case metricdata.Gauge[int64]: - out.Data = Gauge[int64](a) + out.Data = Gauge(a) case metricdata.Gauge[float64]: - out.Data = Gauge[float64](a) + out.Data = Gauge(a) case metricdata.Sum[int64]: - out.Data, err = Sum[int64](a) + out.Data, err = Sum(a) case metricdata.Sum[float64]: - out.Data, err = Sum[float64](a) + out.Data, err = Sum(a) case metricdata.Histogram[int64]: out.Data, err = Histogram(a) case metricdata.Histogram[float64]: @@ -279,10 +280,7 @@ func Temporality(t metricdata.Temporality) (mpb.AggregationTemporality, error) { // timeUnixNano on the zero Time returns 0. // The result does not depend on the location associated with t. func timeUnixNano(t time.Time) uint64 { - if t.IsZero() { - return 0 - } - return uint64(t.UnixNano()) + return uint64(max(0, t.UnixNano())) // nolint:gosec // Overflow checked. } // Exemplars returns a slice of OTLP Exemplars generated from exemplars. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go index a731860f5c0..da1f9e7fc72 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go @@ -5,5 +5,5 @@ package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpme // Version is the current release version of the OpenTelemetry OTLP over gRPC metrics exporter in use. func Version() string { - return "1.28.0" + return "1.34.0" } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go index 205594b7f34..86da30e3754 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go @@ -14,6 +14,7 @@ import ( "net/http" "net/url" "strconv" + "strings" "sync" "time" @@ -146,15 +147,20 @@ func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.Resou resp, err := c.httpClient.Do(request.Request) var urlErr *url.Error if errors.As(err, &urlErr) && urlErr.Temporary() { - return newResponseError(http.Header{}) + return newResponseError(http.Header{}, err) } if err != nil { return err } + if resp != nil && resp.Body != nil { + defer func() { + if err := resp.Body.Close(); err != nil { + otel.Handle(err) + } + }() + } - var rErr error - switch sc := resp.StatusCode; { - case sc >= 200 && sc <= 299: + if sc := resp.StatusCode; sc >= 200 && sc <= 299 { // Success, do not retry. // Read the partial success message, if any. @@ -182,26 +188,34 @@ func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.Resou } } return nil - case sc == http.StatusTooManyRequests, - sc == http.StatusBadGateway, - sc == http.StatusServiceUnavailable, - sc == http.StatusGatewayTimeout: - // Retry-able failure. - rErr = newResponseError(resp.Header) - - // Going to retry, drain the body to reuse the connection. - if _, err := io.Copy(io.Discard, resp.Body); err != nil { - _ = resp.Body.Close() - return err - } - default: - rErr = fmt.Errorf("failed to send metrics to %s: %s", request.URL, resp.Status) } - - if err := resp.Body.Close(); err != nil { + // Error cases. + + // server may return a message with the response + // body, so we read it to include in the error + // message to be returned. It will help in + // debugging the actual issue. + var respData bytes.Buffer + if _, err := io.Copy(&respData, resp.Body); err != nil { return err } - return rErr + respStr := strings.TrimSpace(respData.String()) + if len(respStr) == 0 { + respStr = "(empty)" + } + bodyErr := fmt.Errorf("body: %s", respStr) + + switch resp.StatusCode { + case http.StatusTooManyRequests, + http.StatusBadGateway, + http.StatusServiceUnavailable, + http.StatusGatewayTimeout: + // Retryable failure. + return newResponseError(resp.Header, bodyErr) + default: + // Non-retryable failure. + return fmt.Errorf("failed to send metrics to %s: %s (%w)", request.URL, resp.Status, bodyErr) + } }) } @@ -269,24 +283,50 @@ func (r *request) reset(ctx context.Context) { // retryableError represents a request failure that can be retried. type retryableError struct { throttle int64 + err error } // newResponseError returns a retryableError and will extract any explicit -// throttle delay contained in headers. -func newResponseError(header http.Header) error { +// throttle delay contained in headers. The returned error wraps wrapped +// if it is not nil. +func newResponseError(header http.Header, wrapped error) error { var rErr retryableError if v := header.Get("Retry-After"); v != "" { if t, err := strconv.ParseInt(v, 10, 64); err == nil { rErr.throttle = t } } + + rErr.err = wrapped return rErr } func (e retryableError) Error() string { + if e.err != nil { + return "retry-able request failure: " + e.err.Error() + } + return "retry-able request failure" } +func (e retryableError) Unwrap() error { + return e.err +} + +func (e retryableError) As(target interface{}) bool { + if e.err == nil { + return false + } + + switch v := target.(type) { + case **retryableError: + *v = &e + return true + default: + return false + } +} + // evaluate returns if err is retry-able. If it is and it includes an explicit // throttling delay, that delay is also returned. func evaluate(err error) (bool, time.Duration) { diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/config.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/config.go index 4e08d9293da..bf05adcf1b1 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/config.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/config.go @@ -63,8 +63,9 @@ func (w wrappedOption) applyHTTPOption(cfg oconf.Config) oconf.Config { // // If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT // environment variable is set, and this option is not passed, that variable -// value will be used. If both are set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT -// will take precedence. +// value will be used. If both environment variables are set, +// OTEL_EXPORTER_OTLP_METRICS_ENDPOINT will take precedence. If an environment +// variable is set, and this option is passed, this option will take precedence. // // By default, if an environment variable is not set, and this option is not // passed, "localhost:4318" will be used. @@ -76,8 +77,9 @@ func WithEndpoint(endpoint string) Option { // // If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT // environment variable is set, and this option is not passed, that variable -// value will be used. If both are set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT -// will take precedence. +// value will be used. If both environment variables are set, +// OTEL_EXPORTER_OTLP_METRICS_ENDPOINT will take precedence. If an environment +// variable is set, and this option is passed, this option will take precedence. // // If both this option and WithEndpoint are used, the last used option will // take precedence. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/doc.go index eabb82b9847..de9e71a6e35 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/doc.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/doc.go @@ -22,7 +22,7 @@ target URL to which the exporter sends telemetry. The value must contain a scheme ("http" or "https") and host. The value may additionally contain a port and a path. The value should not contain a query string or fragment. -The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WitnInsecure], and [WithURLPath] options. +The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithURLPath] options. OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_METRICS_HEADERS (default: none) - key-value pairs used as headers associated with HTTP requests. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/exporter.go index 701deb6d390..50ac8f86ea3 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/exporter.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/exporter.go @@ -5,6 +5,7 @@ package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpme import ( "context" + "errors" "fmt" "sync" @@ -114,7 +115,7 @@ func (e *Exporter) Shutdown(ctx context.Context) error { return err } -var errShutdown = fmt.Errorf("HTTP exporter is shutdown") +var errShutdown = errors.New("HTTP exporter is shutdown") type shutdownClient struct{} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig/envconfig.go index 35885ba8a72..7ac42759f6c 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig/envconfig.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig/envconfig.go @@ -15,6 +15,7 @@ import ( "strconv" "strings" "time" + "unicode" "go.opentelemetry.io/otel/internal/global" ) @@ -163,12 +164,16 @@ func stringToHeader(value string) map[string]string { global.Error(errors.New("missing '="), "parse headers", "input", header) continue } - name, err := url.PathUnescape(n) - if err != nil { - global.Error(err, "escape header key", "key", n) + + trimmedName := strings.TrimSpace(n) + + // Validate the key. + if !isValidHeaderKey(trimmedName) { + global.Error(errors.New("invalid header key"), "parse headers", "key", trimmedName) continue } - trimmedName := strings.TrimSpace(name) + + // Only decode the value. value, err := url.PathUnescape(v) if err != nil { global.Error(err, "escape header value", "value", v) @@ -189,3 +194,22 @@ func createCertPool(certBytes []byte) (*x509.CertPool, error) { } return cp, nil } + +func isValidHeaderKey(key string) bool { + if key == "" { + return false + } + for _, c := range key { + if !isTokenChar(c) { + return false + } + } + return true +} + +func isTokenChar(c rune) bool { + return c <= unicode.MaxASCII && (unicode.IsLetter(c) || + unicode.IsDigit(c) || + c == '!' || c == '#' || c == '$' || c == '%' || c == '&' || c == '\'' || c == '*' || + c == '+' || c == '-' || c == '.' || c == '^' || c == '_' || c == '`' || c == '|' || c == '~') +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go index 9bbf0941f94..db595e49ec2 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go @@ -111,7 +111,7 @@ func cleanPath(urlPath string, defaultPath string) string { return defaultPath } if !path.IsAbs(tmp) { - tmp = fmt.Sprintf("/%s", tmp) + tmp = "/" + tmp } return tmp } @@ -139,7 +139,7 @@ func NewGRPCConfig(opts ...GRPCOption) Config { if cfg.ServiceConfig != "" { cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig)) } - // Priroritize GRPCCredentials over Insecure (passing both is an error). + // Prioritize GRPCCredentials over Insecure (passing both is an error). if cfg.Metrics.GRPCCredentials != nil { cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Metrics.GRPCCredentials)) } else if cfg.Metrics.Insecure { @@ -287,9 +287,7 @@ func WithEndpointURL(v string) GenericOption { cfg.Metrics.Endpoint = u.Host cfg.Metrics.URLPath = u.Path - if u.Scheme != "https" { - cfg.Metrics.Insecure = true - } + cfg.Metrics.Insecure = u.Scheme != "https" return cfg }) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/tls.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/tls.go index ae09ad57e16..f603dc605eb 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/tls.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/tls.go @@ -14,7 +14,7 @@ import ( ) // ReadTLSConfigFromFile reads a PEM certificate file and creates -// a tls.Config that will use this certifate to verify a server certificate. +// a tls.Config that will use this certificate to verify a server certificate. func ReadTLSConfigFromFile(path string) (*tls.Config, error) { b, err := os.ReadFile(path) if err != nil { diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata.go index 0a1a65c44d2..8207b15a421 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata.go @@ -46,8 +46,9 @@ func ScopeMetrics(sms []metricdata.ScopeMetrics) ([]*mpb.ScopeMetrics, error) { out = append(out, &mpb.ScopeMetrics{ Scope: &cpb.InstrumentationScope{ - Name: sm.Scope.Name, - Version: sm.Scope.Version, + Name: sm.Scope.Name, + Version: sm.Scope.Version, + Attributes: AttrIter(sm.Scope.Attributes.Iter()), }, Metrics: ms, SchemaUrl: sm.Scope.SchemaURL, @@ -83,13 +84,13 @@ func metric(m metricdata.Metrics) (*mpb.Metric, error) { } switch a := m.Data.(type) { case metricdata.Gauge[int64]: - out.Data = Gauge[int64](a) + out.Data = Gauge(a) case metricdata.Gauge[float64]: - out.Data = Gauge[float64](a) + out.Data = Gauge(a) case metricdata.Sum[int64]: - out.Data, err = Sum[int64](a) + out.Data, err = Sum(a) case metricdata.Sum[float64]: - out.Data, err = Sum[float64](a) + out.Data, err = Sum(a) case metricdata.Histogram[int64]: out.Data, err = Histogram(a) case metricdata.Histogram[float64]: @@ -279,10 +280,7 @@ func Temporality(t metricdata.Temporality) (mpb.AggregationTemporality, error) { // timeUnixNano on the zero Time returns 0. // The result does not depend on the location associated with t. func timeUnixNano(t time.Time) uint64 { - if t.IsZero() { - return 0 - } - return uint64(t.UnixNano()) + return uint64(max(0, t.UnixNano())) // nolint:gosec // Overflow checked. } // Exemplars returns a slice of OTLP Exemplars generated from exemplars. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go index 88e84ca892d..3da27601bbf 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go @@ -5,5 +5,5 @@ package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpme // Version is the current release version of the OpenTelemetry OTLP over HTTP/protobuf metrics exporter in use. func Version() string { - return "1.28.0" + return "1.34.0" } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go index f6dd3decc90..2e7690e43a2 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go @@ -13,7 +13,8 @@ func InstrumentationScope(il instrumentation.Scope) *commonpb.InstrumentationSco return nil } return &commonpb.InstrumentationScope{ - Name: il.Name, - Version: il.Version, + Name: il.Name, + Version: il.Version, + Attributes: Iterator(il.Attributes.Iter()), } } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go index c3c69c5a0d6..bf27ef0220e 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go @@ -4,6 +4,8 @@ package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" import ( + "math" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/sdk/instrumentation" @@ -95,16 +97,16 @@ func span(sd tracesdk.ReadOnlySpan) *tracepb.Span { SpanId: sid[:], TraceState: sd.SpanContext().TraceState().String(), Status: status(sd.Status().Code, sd.Status().Description), - StartTimeUnixNano: uint64(sd.StartTime().UnixNano()), - EndTimeUnixNano: uint64(sd.EndTime().UnixNano()), + StartTimeUnixNano: uint64(max(0, sd.StartTime().UnixNano())), // nolint:gosec // Overflow checked. + EndTimeUnixNano: uint64(max(0, sd.EndTime().UnixNano())), // nolint:gosec // Overflow checked. Links: links(sd.Links()), Kind: spanKind(sd.SpanKind()), Name: sd.Name(), Attributes: KeyValues(sd.Attributes()), Events: spanEvents(sd.Events()), - DroppedAttributesCount: uint32(sd.DroppedAttributes()), - DroppedEventsCount: uint32(sd.DroppedEvents()), - DroppedLinksCount: uint32(sd.DroppedLinks()), + DroppedAttributesCount: clampUint32(sd.DroppedAttributes()), + DroppedEventsCount: clampUint32(sd.DroppedEvents()), + DroppedLinksCount: clampUint32(sd.DroppedLinks()), } if psid := sd.Parent().SpanID(); psid.IsValid() { @@ -115,6 +117,16 @@ func span(sd tracesdk.ReadOnlySpan) *tracepb.Span { return s } +func clampUint32(v int) uint32 { + if v < 0 { + return 0 + } + if int64(v) > math.MaxUint32 { + return math.MaxUint32 + } + return uint32(v) // nolint: gosec // Overflow/Underflow checked. +} + // status transform a span code and message into an OTLP span status. func status(status codes.Code, message string) *tracepb.Status { var c tracepb.Status_StatusCode @@ -153,7 +165,7 @@ func links(links []tracesdk.Link) []*tracepb.Span_Link { TraceId: tid[:], SpanId: sid[:], Attributes: KeyValues(otLink.Attributes), - DroppedAttributesCount: uint32(otLink.DroppedAttributeCount), + DroppedAttributesCount: clampUint32(otLink.DroppedAttributeCount), Flags: flags, }) } @@ -166,7 +178,7 @@ func buildSpanFlags(sc trace.SpanContext) uint32 { flags |= tracepb.SpanFlags_SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK } - return uint32(flags) + return uint32(flags) // nolint:gosec // Flags is a bitmask and can't be negative } // spanEvents transforms span Events to an OTLP span events. @@ -180,9 +192,9 @@ func spanEvents(es []tracesdk.Event) []*tracepb.Span_Event { for i := 0; i < len(es); i++ { events[i] = &tracepb.Span_Event{ Name: es[i].Name, - TimeUnixNano: uint64(es[i].Time.UnixNano()), + TimeUnixNano: uint64(max(0, es[i].Time.UnixNano())), // nolint:gosec // Overflow checked. Attributes: KeyValues(es[i].Attributes), - DroppedAttributesCount: uint32(es[i].DroppedAttributeCount), + DroppedAttributesCount: clampUint32(es[i].DroppedAttributeCount), } } return events diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go index 3993df927de..8409b5f8f95 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go @@ -229,7 +229,12 @@ func (c *client) exportContext(parent context.Context) (context.Context, context } if c.metadata.Len() > 0 { - ctx = metadata.NewOutgoingContext(ctx, c.metadata) + md := c.metadata + if outMD, ok := metadata.FromOutgoingContext(ctx); ok { + md = metadata.Join(md, outMD) + } + + ctx = metadata.NewOutgoingContext(ctx, md) } // Unify the client stopCtx with the parent. @@ -289,7 +294,7 @@ func (c *client) MarshalLog() interface{} { Type string Endpoint string }{ - Type: "otlphttpgrpc", + Type: "otlptracegrpc", Endpoint: c.endpoint, } } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go index e783b57ac4b..b7bd429ffdf 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go @@ -12,9 +12,8 @@ The environment variables described below can be used for configuration. OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT (default: "https://localhost:4317") - target to which the exporter sends telemetry. The target syntax is defined in https://github.com/grpc/grpc/blob/master/doc/naming.md. -The value must contain a host. -The value may additionally a port, a scheme, and a path. -The value accepts "http" and "https" scheme. +The value must contain a scheme ("http" or "https") and host. +The value may additionally contain a port, and a path. The value should not contain a query string or fragment. OTEL_EXPORTER_OTLP_TRACES_ENDPOINT takes precedence over OTEL_EXPORTER_OTLP_ENDPOINT. The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithGRPCConn] options. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go index 9513c0a57ca..4abf48d1f62 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go @@ -15,6 +15,7 @@ import ( "strconv" "strings" "time" + "unicode" "go.opentelemetry.io/otel/internal/global" ) @@ -163,12 +164,16 @@ func stringToHeader(value string) map[string]string { global.Error(errors.New("missing '="), "parse headers", "input", header) continue } - name, err := url.PathUnescape(n) - if err != nil { - global.Error(err, "escape header key", "key", n) + + trimmedName := strings.TrimSpace(n) + + // Validate the key. + if !isValidHeaderKey(trimmedName) { + global.Error(errors.New("invalid header key"), "parse headers", "key", trimmedName) continue } - trimmedName := strings.TrimSpace(name) + + // Only decode the value. value, err := url.PathUnescape(v) if err != nil { global.Error(err, "escape header value", "value", v) @@ -189,3 +194,22 @@ func createCertPool(certBytes []byte) (*x509.CertPool, error) { } return cp, nil } + +func isValidHeaderKey(key string) bool { + if key == "" { + return false + } + for _, c := range key { + if !isTokenChar(c) { + return false + } + } + return true +} + +func isTokenChar(c rune) bool { + return c <= unicode.MaxASCII && (unicode.IsLetter(c) || + unicode.IsDigit(c) || + c == '!' || c == '#' || c == '$' || c == '%' || c == '&' || c == '\'' || c == '*' || + c == '+' || c == '-' || c == '.' || c == '^' || c == '_' || c == '`' || c == '|' || c == '~') +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go index 8f84a799632..0a317d92637 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go @@ -98,7 +98,7 @@ func cleanPath(urlPath string, defaultPath string) string { return defaultPath } if !path.IsAbs(tmp) { - tmp = fmt.Sprintf("/%s", tmp) + tmp = "/" + tmp } return tmp } @@ -125,7 +125,7 @@ func NewGRPCConfig(opts ...GRPCOption) Config { if cfg.ServiceConfig != "" { cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig)) } - // Priroritize GRPCCredentials over Insecure (passing both is an error). + // Prioritize GRPCCredentials over Insecure (passing both is an error). if cfg.Traces.GRPCCredentials != nil { cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Traces.GRPCCredentials)) } else if cfg.Traces.Insecure { @@ -278,9 +278,7 @@ func WithEndpointURL(v string) GenericOption { cfg.Traces.Endpoint = u.Host cfg.Traces.URLPath = u.Path - if u.Scheme != "https" { - cfg.Traces.Insecure = true - } + cfg.Traces.Insecure = u.Scheme != "https" return cfg }) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go index 1e59ff23932..16c006b2cfd 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go @@ -14,6 +14,7 @@ import ( "net/http" "net/url" "strconv" + "strings" "sync" "time" @@ -151,7 +152,7 @@ func (d *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc resp, err := d.client.Do(request.Request) var urlErr *url.Error if errors.As(err, &urlErr) && urlErr.Temporary() { - return newResponseError(http.Header{}) + return newResponseError(http.Header{}, err) } if err != nil { return err @@ -165,8 +166,7 @@ func (d *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc }() } - switch sc := resp.StatusCode; { - case sc >= 200 && sc <= 299: + if sc := resp.StatusCode; sc >= 200 && sc <= 299 { // Success, do not retry. // Read the partial success message, if any. var respData bytes.Buffer @@ -193,18 +193,33 @@ func (d *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc } } return nil - - case sc == http.StatusTooManyRequests, - sc == http.StatusBadGateway, - sc == http.StatusServiceUnavailable, - sc == http.StatusGatewayTimeout: - // Retry-able failures. Drain the body to reuse the connection. - if _, err := io.Copy(io.Discard, resp.Body); err != nil { - otel.Handle(err) - } - return newResponseError(resp.Header) + } + // Error cases. + + // server may return a message with the response + // body, so we read it to include in the error + // message to be returned. It will help in + // debugging the actual issue. + var respData bytes.Buffer + if _, err := io.Copy(&respData, resp.Body); err != nil { + return err + } + respStr := strings.TrimSpace(respData.String()) + if len(respStr) == 0 { + respStr = "(empty)" + } + bodyErr := fmt.Errorf("body: %s", respStr) + + switch resp.StatusCode { + case http.StatusTooManyRequests, + http.StatusBadGateway, + http.StatusServiceUnavailable, + http.StatusGatewayTimeout: + // Retryable failure. + return newResponseError(resp.Header, bodyErr) default: - return fmt.Errorf("failed to send to %s: %s", request.URL, resp.Status) + // Non-retryable failure. + return fmt.Errorf("failed to send to %s: %s (%w)", request.URL, resp.Status, bodyErr) } }) } @@ -261,7 +276,7 @@ func (d *client) MarshalLog() interface{} { Endpoint string Insecure bool }{ - Type: "otlphttphttp", + Type: "otlptracehttp", Endpoint: d.cfg.Endpoint, Insecure: d.cfg.Insecure, } @@ -291,24 +306,50 @@ func (r *request) reset(ctx context.Context) { // retryableError represents a request failure that can be retried. type retryableError struct { throttle int64 + err error } // newResponseError returns a retryableError and will extract any explicit -// throttle delay contained in headers. -func newResponseError(header http.Header) error { +// throttle delay contained in headers. The returned error wraps wrapped +// if it is not nil. +func newResponseError(header http.Header, wrapped error) error { var rErr retryableError if s, ok := header["Retry-After"]; ok { if t, err := strconv.ParseInt(s[0], 10, 64); err == nil { rErr.throttle = t } } + + rErr.err = wrapped return rErr } func (e retryableError) Error() string { + if e.err != nil { + return "retry-able request failure: " + e.err.Error() + } + return "retry-able request failure" } +func (e retryableError) Unwrap() error { + return e.err +} + +func (e retryableError) As(target interface{}) bool { + if e.err == nil { + return false + } + + switch v := target.(type) { + case **retryableError: + *v = &e + return true + default: + return false + } +} + // evaluate returns if err is retry-able. If it is and it includes an explicit // throttling delay, that delay is also returned. func evaluate(err error) (bool, time.Duration) { diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/doc.go index 43534cbfba4..9fea75ad19c 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/doc.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/doc.go @@ -22,7 +22,7 @@ target URL to which the exporter sends telemetry. The value must contain a scheme ("http" or "https") and host. The value may additionally contain a port and a path. The value should not contain a query string or fragment. -The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WitnInsecure], and [WithURLPath] options. +The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithURLPath] options. OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_TRACES_HEADERS (default: none) - key-value pairs used as headers associated with HTTP requests. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig/envconfig.go index 26a316d003d..f30bb66aeda 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig/envconfig.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig/envconfig.go @@ -15,6 +15,7 @@ import ( "strconv" "strings" "time" + "unicode" "go.opentelemetry.io/otel/internal/global" ) @@ -163,12 +164,16 @@ func stringToHeader(value string) map[string]string { global.Error(errors.New("missing '="), "parse headers", "input", header) continue } - name, err := url.PathUnescape(n) - if err != nil { - global.Error(err, "escape header key", "key", n) + + trimmedName := strings.TrimSpace(n) + + // Validate the key. + if !isValidHeaderKey(trimmedName) { + global.Error(errors.New("invalid header key"), "parse headers", "key", trimmedName) continue } - trimmedName := strings.TrimSpace(name) + + // Only decode the value. value, err := url.PathUnescape(v) if err != nil { global.Error(err, "escape header value", "value", v) @@ -189,3 +194,22 @@ func createCertPool(certBytes []byte) (*x509.CertPool, error) { } return cp, nil } + +func isValidHeaderKey(key string) bool { + if key == "" { + return false + } + for _, c := range key { + if !isTokenChar(c) { + return false + } + } + return true +} + +func isTokenChar(c rune) bool { + return c <= unicode.MaxASCII && (unicode.IsLetter(c) || + unicode.IsDigit(c) || + c == '!' || c == '#' || c == '$' || c == '%' || c == '&' || c == '\'' || c == '*' || + c == '+' || c == '-' || c == '.' || c == '^' || c == '_' || c == '`' || c == '|' || c == '~') +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go index 2ebbc752f4b..6a9c4d3a652 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go @@ -98,7 +98,7 @@ func cleanPath(urlPath string, defaultPath string) string { return defaultPath } if !path.IsAbs(tmp) { - tmp = fmt.Sprintf("/%s", tmp) + tmp = "/" + tmp } return tmp } @@ -125,7 +125,7 @@ func NewGRPCConfig(opts ...GRPCOption) Config { if cfg.ServiceConfig != "" { cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig)) } - // Priroritize GRPCCredentials over Insecure (passing both is an error). + // Prioritize GRPCCredentials over Insecure (passing both is an error). if cfg.Traces.GRPCCredentials != nil { cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Traces.GRPCCredentials)) } else if cfg.Traces.Insecure { @@ -278,9 +278,7 @@ func WithEndpointURL(v string) GenericOption { cfg.Traces.Endpoint = u.Host cfg.Traces.URLPath = u.Path - if u.Scheme != "https" { - cfg.Traces.Insecure = true - } + cfg.Traces.Insecure = u.Scheme != "https" return cfg }) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go index 6497f3ccdd0..3559c5664f4 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go @@ -62,8 +62,10 @@ func (w wrappedOption) applyHTTPOption(cfg otlpconfig.Config) otlpconfig.Config // // If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_TRACES_ENDPOINT // environment variable is set, and this option is not passed, that variable -// value will be used. If both are set, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT -// will take precedence. Note, both environment variables include the full +// value will be used. If both environment variables are set, +// OTEL_EXPORTER_OTLP_TRACES_ENDPOINT will take precedence. If an environment +// variable is set, and this option is passed, this option will take precedence. +// Note, both environment variables include the full // scheme and path, while WithEndpoint sets only the host and port. // // If both this option and WithEndpointURL are used, the last used option will @@ -82,8 +84,9 @@ func WithEndpoint(endpoint string) Option { // // If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_TRACES_ENDPOINT // environment variable is set, and this option is not passed, that variable -// value will be used. If both are set, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT -// will take precedence. +// value will be used. If both environment variables are set, +// OTEL_EXPORTER_OTLP_TRACES_ENDPOINT will take precedence. If an environment +// variable is set, and this option is passed, this option will take precedence. // // If both this option and WithEndpoint are used, the last used option will // take precedence. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go index 14ad8c33b48..f156ee66720 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go @@ -5,5 +5,5 @@ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" // Version is the current release version of the OpenTelemetry OTLP trace exporter in use. func Version() string { - return "1.28.0" + return "1.34.0" } diff --git a/vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go b/vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go index a10ab7f1df8..660675dd620 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go +++ b/vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go @@ -7,6 +7,7 @@ import ( "strings" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/model" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/metric" @@ -131,7 +132,10 @@ func WithoutScopeInfo() Option { // have special behavior based on their name. func WithNamespace(ns string) Option { return optionFunc(func(cfg config) config { - ns = sanitizeName(ns) + if model.NameValidationScheme != model.UTF8Validation { + // Only sanitize if prometheus does not support UTF-8. + ns = model.EscapeName(ns, model.NameEscapingScheme) + } if !strings.HasSuffix(ns, "_") { // namespace and metric names should be separated with an underscore, // adds a trailing underscore if there is not one already. diff --git a/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go index d2e387e607c..50c95a16f7f 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go +++ b/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go @@ -11,11 +11,10 @@ import ( "slices" "strings" "sync" - "unicode" - "unicode/utf8" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/model" "google.golang.org/protobuf/proto" "go.opentelemetry.io/otel" @@ -34,15 +33,14 @@ const ( scopeInfoMetricName = "otel_scope_info" scopeInfoDescription = "Instrumentation Scope metadata" + scopeNameLabel = "otel_scope_name" + scopeVersionLabel = "otel_scope_version" + traceIDExemplarKey = "trace_id" spanIDExemplarKey = "span_id" ) -var ( - scopeInfoKeys = [2]string{"otel_scope_name", "otel_scope_version"} - - errScopeInvalid = errors.New("invalid scope") -) +var errScopeInvalid = errors.New("invalid scope") // Exporter is a Prometheus Exporter that embeds the OTel metric.Reader // interface for easy instantiation with a MeterProvider. @@ -188,7 +186,11 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) { } for _, scopeMetrics := range metrics.ScopeMetrics { - var keys, values [2]string + n := len(c.resourceKeyVals.keys) + 2 // resource attrs + scope name + scope version + kv := keyVals{ + keys: make([]string, 0, n), + vals: make([]string, 0, n), + } if !c.disableScopeInfo { scopeInfo, err := c.scopeInfo(scopeMetrics.Scope) @@ -203,10 +205,13 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) { ch <- scopeInfo - keys = scopeInfoKeys - values = [2]string{scopeMetrics.Scope.Name, scopeMetrics.Scope.Version} + kv.keys = append(kv.keys, scopeNameLabel, scopeVersionLabel) + kv.vals = append(kv.vals, scopeMetrics.Scope.Name, scopeMetrics.Scope.Version) } + kv.keys = append(kv.keys, c.resourceKeyVals.keys...) + kv.vals = append(kv.vals, c.resourceKeyVals.vals...) + for _, m := range scopeMetrics.Metrics { typ := c.metricType(m) if typ == nil { @@ -225,25 +230,27 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) { switch v := m.Data.(type) { case metricdata.Histogram[int64]: - addHistogramMetric(ch, v, m, keys, values, name, c.resourceKeyVals) + addHistogramMetric(ch, v, m, name, kv) case metricdata.Histogram[float64]: - addHistogramMetric(ch, v, m, keys, values, name, c.resourceKeyVals) + addHistogramMetric(ch, v, m, name, kv) case metricdata.Sum[int64]: - addSumMetric(ch, v, m, keys, values, name, c.resourceKeyVals) + addSumMetric(ch, v, m, name, kv) case metricdata.Sum[float64]: - addSumMetric(ch, v, m, keys, values, name, c.resourceKeyVals) + addSumMetric(ch, v, m, name, kv) case metricdata.Gauge[int64]: - addGaugeMetric(ch, v, m, keys, values, name, c.resourceKeyVals) + addGaugeMetric(ch, v, m, name, kv) case metricdata.Gauge[float64]: - addGaugeMetric(ch, v, m, keys, values, name, c.resourceKeyVals) + addGaugeMetric(ch, v, m, name, kv) } } } } -func addHistogramMetric[N int64 | float64](ch chan<- prometheus.Metric, histogram metricdata.Histogram[N], m metricdata.Metrics, ks, vs [2]string, name string, resourceKV keyVals) { +func addHistogramMetric[N int64 | float64](ch chan<- prometheus.Metric, histogram metricdata.Histogram[N], m metricdata.Metrics, name string, kv keyVals) { for _, dp := range histogram.DataPoints { - keys, values := getAttrs(dp.Attributes, ks, vs, resourceKV) + keys, values := getAttrs(dp.Attributes) + keys = append(keys, kv.keys...) + values = append(values, kv.vals...) desc := prometheus.NewDesc(name, m.Description, keys, nil) buckets := make(map[float64]uint64, len(dp.Bounds)) @@ -263,14 +270,16 @@ func addHistogramMetric[N int64 | float64](ch chan<- prometheus.Metric, histogra } } -func addSumMetric[N int64 | float64](ch chan<- prometheus.Metric, sum metricdata.Sum[N], m metricdata.Metrics, ks, vs [2]string, name string, resourceKV keyVals) { +func addSumMetric[N int64 | float64](ch chan<- prometheus.Metric, sum metricdata.Sum[N], m metricdata.Metrics, name string, kv keyVals) { valueType := prometheus.CounterValue if !sum.IsMonotonic { valueType = prometheus.GaugeValue } for _, dp := range sum.DataPoints { - keys, values := getAttrs(dp.Attributes, ks, vs, resourceKV) + keys, values := getAttrs(dp.Attributes) + keys = append(keys, kv.keys...) + values = append(values, kv.vals...) desc := prometheus.NewDesc(name, m.Description, keys, nil) m, err := prometheus.NewConstMetric(desc, valueType, float64(dp.Value), values...) @@ -278,14 +287,20 @@ func addSumMetric[N int64 | float64](ch chan<- prometheus.Metric, sum metricdata otel.Handle(err) continue } - m = addExemplars(m, dp.Exemplars) + // GaugeValues don't support Exemplars at this time + // https://github.com/prometheus/client_golang/blob/aef8aedb4b6e1fb8ac1c90790645169125594096/prometheus/metric.go#L199 + if valueType != prometheus.GaugeValue { + m = addExemplars(m, dp.Exemplars) + } ch <- m } } -func addGaugeMetric[N int64 | float64](ch chan<- prometheus.Metric, gauge metricdata.Gauge[N], m metricdata.Metrics, ks, vs [2]string, name string, resourceKV keyVals) { +func addGaugeMetric[N int64 | float64](ch chan<- prometheus.Metric, gauge metricdata.Gauge[N], m metricdata.Metrics, name string, kv keyVals) { for _, dp := range gauge.DataPoints { - keys, values := getAttrs(dp.Attributes, ks, vs, resourceKV) + keys, values := getAttrs(dp.Attributes) + keys = append(keys, kv.keys...) + values = append(values, kv.vals...) desc := prometheus.NewDesc(name, m.Description, keys, nil) m, err := prometheus.NewConstMetric(desc, prometheus.GaugeValue, float64(dp.Value), values...) @@ -297,61 +312,58 @@ func addGaugeMetric[N int64 | float64](ch chan<- prometheus.Metric, gauge metric } } -// getAttrs parses the attribute.Set to two lists of matching Prometheus-style -// keys and values. It sanitizes invalid characters and handles duplicate keys -// (due to sanitization) by sorting and concatenating the values following the spec. -func getAttrs(attrs attribute.Set, ks, vs [2]string, resourceKV keyVals) ([]string, []string) { - keysMap := make(map[string][]string) - itr := attrs.Iter() - for itr.Next() { - kv := itr.Attribute() - key := strings.Map(sanitizeRune, string(kv.Key)) - if _, ok := keysMap[key]; !ok { - keysMap[key] = []string{kv.Value.Emit()} - } else { - // if the sanitized key is a duplicate, append to the list of keys - keysMap[key] = append(keysMap[key], kv.Value.Emit()) - } - } - +// getAttrs converts the attribute.Set to two lists of matching Prometheus-style +// keys and values. +func getAttrs(attrs attribute.Set) ([]string, []string) { keys := make([]string, 0, attrs.Len()) values := make([]string, 0, attrs.Len()) - for key, vals := range keysMap { - keys = append(keys, key) - slices.Sort(vals) - values = append(values, strings.Join(vals, ";")) - } - - if ks[0] != "" { - keys = append(keys, ks[:]...) - values = append(values, vs[:]...) - } + itr := attrs.Iter() - for idx := range resourceKV.keys { - keys = append(keys, resourceKV.keys[idx]) - values = append(values, resourceKV.vals[idx]) + if model.NameValidationScheme == model.UTF8Validation { + // Do not perform sanitization if prometheus supports UTF-8. + for itr.Next() { + kv := itr.Attribute() + keys = append(keys, string(kv.Key)) + values = append(values, kv.Value.Emit()) + } + } else { + // It sanitizes invalid characters and handles duplicate keys + // (due to sanitization) by sorting and concatenating the values following the spec. + keysMap := make(map[string][]string) + for itr.Next() { + kv := itr.Attribute() + key := model.EscapeName(string(kv.Key), model.NameEscapingScheme) + if _, ok := keysMap[key]; !ok { + keysMap[key] = []string{kv.Value.Emit()} + } else { + // if the sanitized key is a duplicate, append to the list of keys + keysMap[key] = append(keysMap[key], kv.Value.Emit()) + } + } + for key, vals := range keysMap { + keys = append(keys, key) + slices.Sort(vals) + values = append(values, strings.Join(vals, ";")) + } } - return keys, values } func createInfoMetric(name, description string, res *resource.Resource) (prometheus.Metric, error) { - keys, values := getAttrs(*res.Set(), [2]string{}, [2]string{}, keyVals{}) + keys, values := getAttrs(*res.Set()) desc := prometheus.NewDesc(name, description, keys, nil) return prometheus.NewConstMetric(desc, prometheus.GaugeValue, float64(1), values...) } func createScopeInfoMetric(scope instrumentation.Scope) (prometheus.Metric, error) { - keys := scopeInfoKeys[:] - desc := prometheus.NewDesc(scopeInfoMetricName, scopeInfoDescription, keys, nil) - return prometheus.NewConstMetric(desc, prometheus.GaugeValue, float64(1), scope.Name, scope.Version) -} + attrs := make([]attribute.KeyValue, 0, scope.Attributes.Len()+2) // resource attrs + scope name + scope version + attrs = append(attrs, scope.Attributes.ToSlice()...) + attrs = append(attrs, attribute.String(scopeNameLabel, scope.Name)) + attrs = append(attrs, attribute.String(scopeVersionLabel, scope.Version)) -func sanitizeRune(r rune) rune { - if unicode.IsLetter(r) || unicode.IsDigit(r) || r == ':' || r == '_' { - return r - } - return '_' + keys, values := getAttrs(attribute.NewSet(attrs...)) + desc := prometheus.NewDesc(scopeInfoMetricName, scopeInfoDescription, keys, nil) + return prometheus.NewConstMetric(desc, prometheus.GaugeValue, float64(1), values...) } var unitSuffixes = map[string]string{ @@ -392,7 +404,11 @@ var unitSuffixes = map[string]string{ // getName returns the sanitized name, prefixed with the namespace and suffixed with unit. func (c *collector) getName(m metricdata.Metrics, typ *dto.MetricType) string { - name := sanitizeName(m.Name) + name := m.Name + if model.NameValidationScheme != model.UTF8Validation { + // Only sanitize if prometheus does not support UTF-8. + name = model.EscapeName(name, model.NameEscapingScheme) + } addCounterSuffix := !c.withoutCounterSuffixes && *typ == dto.MetricType_COUNTER if addCounterSuffix { // Remove the _total suffix here, as we will re-add the total suffix @@ -411,59 +427,6 @@ func (c *collector) getName(m metricdata.Metrics, typ *dto.MetricType) string { return name } -func sanitizeName(n string) string { - // This algorithm is based on strings.Map from Go 1.19. - const replacement = '_' - - valid := func(i int, r rune) bool { - // Taken from - // https://github.com/prometheus/common/blob/dfbc25bd00225c70aca0d94c3c4bb7744f28ace0/model/metric.go#L92-L102 - if (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') || r == '_' || r == ':' || (r >= '0' && r <= '9' && i > 0) { - return true - } - return false - } - - // This output buffer b is initialized on demand, the first time a - // character needs to be replaced. - var b strings.Builder - for i, c := range n { - if valid(i, c) { - continue - } - - if i == 0 && c >= '0' && c <= '9' { - // Prefix leading number with replacement character. - b.Grow(len(n) + 1) - _ = b.WriteByte(byte(replacement)) - break - } - b.Grow(len(n)) - _, _ = b.WriteString(n[:i]) - _ = b.WriteByte(byte(replacement)) - width := utf8.RuneLen(c) - n = n[i+width:] - break - } - - // Fast path for unchanged input. - if b.Cap() == 0 { // b.Grow was not called above. - return n - } - - for _, c := range n { - // Due to inlining, it is more performant to invoke WriteByte rather then - // WriteRune. - if valid(1, c) { // We are guaranteed to not be at the start. - _ = b.WriteByte(byte(c)) - } else { - _ = b.WriteByte(byte(replacement)) - } - } - - return b.String() -} - func (c *collector) metricType(m metricdata.Metrics) *dto.MetricType { switch v := m.Data.(type) { case metricdata.Histogram[int64], metricdata.Histogram[float64]: @@ -489,7 +452,7 @@ func (c *collector) createResourceAttributes(res *resource.Resource) { defer c.mu.Unlock() resourceAttrs, _ := res.Set().Filter(c.resourceAttributesFilter) - resourceKeys, resourceValues := getAttrs(resourceAttrs, [2]string{}, [2]string{}, keyVals{}) + resourceKeys, resourceValues := getAttrs(resourceAttrs) c.resourceKeyVals = keyVals{keys: resourceKeys, vals: resourceValues} } @@ -584,7 +547,8 @@ func addExemplars[N int64 | float64](m prometheus.Metric, exemplars []metricdata func attributesToLabels(attrs []attribute.KeyValue) prometheus.Labels { labels := make(map[string]string) for _, attr := range attrs { - labels[string(attr.Key)] = attr.Value.Emit() + key := model.EscapeName(string(attr.Key), model.NameEscapingScheme) + labels[key] = attr.Value.Emit() } return labels } diff --git a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go index 822d8479474..691d96c7554 100644 --- a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go +++ b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go @@ -49,12 +49,11 @@ func AsBoolSlice(v interface{}) []bool { if rv.Type().Kind() != reflect.Array { return nil } - var zero bool - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]bool) + cpy := make([]bool, rv.Len()) + if len(cpy) > 0 { + _ = reflect.Copy(reflect.ValueOf(cpy), rv) + } + return cpy } // AsInt64Slice converts an int64 array into a slice into with same elements as array. @@ -63,12 +62,11 @@ func AsInt64Slice(v interface{}) []int64 { if rv.Type().Kind() != reflect.Array { return nil } - var zero int64 - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]int64) + cpy := make([]int64, rv.Len()) + if len(cpy) > 0 { + _ = reflect.Copy(reflect.ValueOf(cpy), rv) + } + return cpy } // AsFloat64Slice converts a float64 array into a slice into with same elements as array. @@ -77,12 +75,11 @@ func AsFloat64Slice(v interface{}) []float64 { if rv.Type().Kind() != reflect.Array { return nil } - var zero float64 - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]float64) + cpy := make([]float64, rv.Len()) + if len(cpy) > 0 { + _ = reflect.Copy(reflect.ValueOf(cpy), rv) + } + return cpy } // AsStringSlice converts a string array into a slice into with same elements as array. @@ -91,10 +88,9 @@ func AsStringSlice(v interface{}) []string { if rv.Type().Kind() != reflect.Array { return nil } - var zero string - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]string) + cpy := make([]string, rv.Len()) + if len(cpy) > 0 { + _ = reflect.Copy(reflect.ValueOf(cpy), rv) + } + return cpy } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go index 3a0cc42f6a4..ae92a425166 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go @@ -13,7 +13,7 @@ import ( // unwrapper unwraps to return the underlying instrument implementation. type unwrapper interface { - Unwrap() metric.Observable + unwrap() metric.Observable } type afCounter struct { @@ -40,7 +40,7 @@ func (i *afCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *afCounter) Unwrap() metric.Observable { +func (i *afCounter) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Float64ObservableCounter) } @@ -71,7 +71,7 @@ func (i *afUpDownCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *afUpDownCounter) Unwrap() metric.Observable { +func (i *afUpDownCounter) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Float64ObservableUpDownCounter) } @@ -102,7 +102,7 @@ func (i *afGauge) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *afGauge) Unwrap() metric.Observable { +func (i *afGauge) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Float64ObservableGauge) } @@ -133,7 +133,7 @@ func (i *aiCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *aiCounter) Unwrap() metric.Observable { +func (i *aiCounter) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Int64ObservableCounter) } @@ -164,7 +164,7 @@ func (i *aiUpDownCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *aiUpDownCounter) Unwrap() metric.Observable { +func (i *aiUpDownCounter) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Int64ObservableUpDownCounter) } @@ -195,7 +195,7 @@ func (i *aiGauge) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *aiGauge) Unwrap() metric.Observable { +func (i *aiGauge) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Int64ObservableGauge) } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go index e3db438a09f..a6acd8dca66 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/meter.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go @@ -5,6 +5,7 @@ package global // import "go.opentelemetry.io/otel/internal/global" import ( "container/list" + "context" "reflect" "sync" @@ -66,6 +67,7 @@ func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Me name: name, version: c.InstrumentationVersion(), schema: c.SchemaURL(), + attrs: c.InstrumentationAttributes(), } if p.meters == nil { @@ -472,8 +474,7 @@ func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) defer m.mtx.Unlock() if m.delegate != nil { - insts = unwrapInstruments(insts) - return m.delegate.RegisterCallback(f, insts...) + return m.delegate.RegisterCallback(unwrapCallback(f), unwrapInstruments(insts)...) } reg := ®istration{instruments: insts, function: f} @@ -487,15 +488,11 @@ func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) return reg, nil } -type wrapped interface { - unwrap() metric.Observable -} - func unwrapInstruments(instruments []metric.Observable) []metric.Observable { out := make([]metric.Observable, 0, len(instruments)) for _, inst := range instruments { - if in, ok := inst.(wrapped); ok { + if in, ok := inst.(unwrapper); ok { out = append(out, in.unwrap()) } else { out = append(out, inst) @@ -515,9 +512,61 @@ type registration struct { unregMu sync.Mutex } -func (c *registration) setDelegate(m metric.Meter) { - insts := unwrapInstruments(c.instruments) +type unwrapObs struct { + embedded.Observer + obs metric.Observer +} + +// unwrapFloat64Observable returns an expected metric.Float64Observable after +// unwrapping the global object. +func unwrapFloat64Observable(inst metric.Float64Observable) metric.Float64Observable { + if unwrapped, ok := inst.(unwrapper); ok { + if floatObs, ok := unwrapped.unwrap().(metric.Float64Observable); ok { + // Note: if the unwrapped object does not + // unwrap as an observable for either of the + // predicates here, it means an internal bug in + // this package. We avoid logging an error in + // this case, because the SDK has to try its + // own type conversion on the object. The SDK + // will see this and be forced to respond with + // its own error. + // + // This code uses a double-nested if statement + // to avoid creating a branch that is + // impossible to cover. + inst = floatObs + } + } + return inst +} + +// unwrapInt64Observable returns an expected metric.Int64Observable after +// unwrapping the global object. +func unwrapInt64Observable(inst metric.Int64Observable) metric.Int64Observable { + if unwrapped, ok := inst.(unwrapper); ok { + if unint, ok := unwrapped.unwrap().(metric.Int64Observable); ok { + // See the comment in unwrapFloat64Observable(). + inst = unint + } + } + return inst +} + +func (uo *unwrapObs) ObserveFloat64(inst metric.Float64Observable, value float64, opts ...metric.ObserveOption) { + uo.obs.ObserveFloat64(unwrapFloat64Observable(inst), value, opts...) +} + +func (uo *unwrapObs) ObserveInt64(inst metric.Int64Observable, value int64, opts ...metric.ObserveOption) { + uo.obs.ObserveInt64(unwrapInt64Observable(inst), value, opts...) +} +func unwrapCallback(f metric.Callback) metric.Callback { + return func(ctx context.Context, obs metric.Observer) error { + return f(ctx, &unwrapObs{obs: obs}) + } +} + +func (c *registration) setDelegate(m metric.Meter) { c.unregMu.Lock() defer c.unregMu.Unlock() @@ -526,7 +575,7 @@ func (c *registration) setDelegate(m metric.Meter) { return } - reg, err := m.RegisterCallback(c.function, insts...) + reg, err := m.RegisterCallback(unwrapCallback(c.function), unwrapInstruments(c.instruments)...) if err != nil { GetErrorHandler().Handle(err) return diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go index e31f442b48f..8982aa0dc56 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/trace.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -25,6 +25,7 @@ import ( "sync" "sync/atomic" + "go.opentelemetry.io/auto/sdk" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/trace" @@ -87,6 +88,7 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T name: name, version: c.InstrumentationVersion(), schema: c.SchemaURL(), + attrs: c.InstrumentationAttributes(), } if p.tracers == nil { @@ -102,7 +104,12 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T return t } -type il struct{ name, version, schema string } +type il struct { + name string + version string + schema string + attrs attribute.Set +} // tracer is a placeholder for a trace.Tracer. // @@ -139,6 +146,30 @@ func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStart return delegate.(trace.Tracer).Start(ctx, name, opts...) } + return t.newSpan(ctx, autoInstEnabled, name, opts) +} + +// autoInstEnabled determines if the auto-instrumentation SDK span is returned +// from the tracer when not backed by a delegate and auto-instrumentation has +// attached to this process. +// +// The auto-instrumentation is expected to overwrite this value to true when it +// attaches. By default, this will point to false and mean a tracer will return +// a nonRecordingSpan by default. +var autoInstEnabled = new(bool) + +func (t *tracer) newSpan(ctx context.Context, autoSpan *bool, name string, opts []trace.SpanStartOption) (context.Context, trace.Span) { + // autoInstEnabled is passed to newSpan via the autoSpan parameter. This is + // so the auto-instrumentation can define a uprobe for (*t).newSpan and be + // provided with the address of the bool autoInstEnabled points to. It + // needs to be a parameter so that pointer can be reliably determined, it + // should not be read from the global. + + if *autoSpan { + tracer := sdk.TracerProvider().Tracer(t.name, t.opts...) + return tracer.Start(ctx, name, opts...) + } + s := nonRecordingSpan{sc: trace.SpanContextFromContext(ctx), tracer: t} ctx = trace.ContextWithSpan(ctx, s) return ctx, s diff --git a/vendor/go.opentelemetry.io/otel/log/DESIGN.md b/vendor/go.opentelemetry.io/otel/log/DESIGN.md index 2bb8c3a643f..568df49d96e 100644 --- a/vendor/go.opentelemetry.io/otel/log/DESIGN.md +++ b/vendor/go.opentelemetry.io/otel/log/DESIGN.md @@ -26,14 +26,12 @@ This proposed design aims to: The API is published as a single `go.opentelemetry.io/otel/log` Go module. -The module name is compliant with -[Artifact Naming](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/bridge-api.md#artifact-naming) -and the package structure is the same as for Trace API and Metrics API. - +The package structure is similar to Trace API and Metrics API. The Go module consists of the following packages: - `go.opentelemetry.io/otel/log` - `go.opentelemetry.io/otel/log/embedded` +- `go.opentelemetry.io/otel/log/logtest` - `go.opentelemetry.io/otel/log/noop` Rejected alternative: @@ -105,16 +103,16 @@ is defined as `Record` struct in [record.go](record.go). is accessed using following methods: ```go -func (r *Record) Timestamp() time.Time -func (r *Record) SetTimestamp(t time.Time) +func (r *Record) Timestamp() time.Time +func (r *Record) SetTimestamp(t time.Time) ``` [`ObservedTimestamp`](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-observedtimestamp) is accessed using following methods: ```go -func (r *Record) ObservedTimestamp() time.Time -func (r *Record) SetObservedTimestamp(t time.Time) +func (r *Record) ObservedTimestamp() time.Time +func (r *Record) SetObservedTimestamp(t time.Time) ``` [`SeverityNumber`](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-severitynumber) @@ -253,6 +251,23 @@ Rejected alternatives: - [Add XYZ method to Logger](#add-xyz-method-to-logger) - [Rename KeyValue to Attr](#rename-keyvalue-to-attr) +### Logger.Enabled + +The `Enabled` method implements the [`Enabled` operation](https://opentelemetry.io/docs/specs/otel/logs/bridge-api/#enabled). + +[`Context` associated with the `LogRecord`](https://opentelemetry.io/docs/specs/otel/context/) +is accepted as a `context.Context` method argument. + +Calls to `Enabled` are supposed to be on the hot path and the list of arguments +can be extendend in future. Therefore, in order to reduce the number of heap +allocations and make it possible to handle new arguments, `Enabled` accepts +a `EnabledParameters` struct, defined in [logger.go](logger.go), as the second +method argument. + +The `EnabledParameters` uses fields, instead of getters and setters, to allow +simpler usage which allows configuring the `EnabledParameters` in the same line +where `Enabled` is called. + ### noop package The `go.opentelemetry.io/otel/log/noop` package provides @@ -307,7 +322,7 @@ The API needs to evolve orthogonally to `slog`. `slog` is not compliant with the [Logs Bridge API](https://opentelemetry.io/docs/specs/otel/logs/bridge-api/). and we cannot expect the Go team to make `slog` compliant with it. -The interoperabilty can be achieved using [a log bridge](https://opentelemetry.io/docs/specs/otel/glossary/#log-appender--bridge). +The interoperability can be achieved using [a log bridge](https://opentelemetry.io/docs/specs/otel/glossary/#log-appender--bridge). You can read more about OpenTelemetry Logs design on [opentelemetry.io](https://opentelemetry.io/docs/concepts/signals/logs/). diff --git a/vendor/go.opentelemetry.io/otel/log/doc.go b/vendor/go.opentelemetry.io/otel/log/doc.go index 420b6898e89..18cbd1cb2e5 100644 --- a/vendor/go.opentelemetry.io/otel/log/doc.go +++ b/vendor/go.opentelemetry.io/otel/log/doc.go @@ -2,10 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 /* -Package log provides the OpenTelemetry Logs Bridge API. +Package log provides the OpenTelemetry Logs API. -This package is intended to be a bridge between existing logging libraries and -OpenTelemetry. It is not designed to be a logging API itself. +This package is intended to be used by bridges between existing logging +libraries and OpenTelemetry. Users should not directly use this package as a +logging library. Instead, install one of the bridges listed in the +[registry], and use the associated logging library. # API Implementations @@ -68,5 +70,7 @@ It is strongly recommended that authors only embed go.opentelemetry.io/otel/log/noop if they choose this default behavior. That implementation is the only one OpenTelemetry authors can guarantee will fully implement all the API interfaces when a user updates their API. + +[registry]: https://opentelemetry.io/ecosystem/registry/?language=go&component=log-bridge */ package log // import "go.opentelemetry.io/otel/log" diff --git a/vendor/go.opentelemetry.io/otel/log/global/README.md b/vendor/go.opentelemetry.io/otel/log/global/README.md new file mode 100644 index 00000000000..11e5afefc01 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/log/global/README.md @@ -0,0 +1,3 @@ +# Log Global + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/log/global)](https://pkg.go.dev/go.opentelemetry.io/otel/log/global) diff --git a/vendor/go.opentelemetry.io/otel/log/global/log.go b/vendor/go.opentelemetry.io/otel/log/global/log.go new file mode 100644 index 00000000000..71ec577986d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/log/global/log.go @@ -0,0 +1,49 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package global provides access to a global implementation of the OpenTelemetry +Logs Bridge API. + +This package is experimental. It will be deprecated and removed when the [log] +package becomes stable. Its functionality will be migrated to +go.opentelemetry.io/otel. +*/ +package global // import "go.opentelemetry.io/otel/log/global" + +import ( + "go.opentelemetry.io/otel/log" + "go.opentelemetry.io/otel/log/internal/global" +) + +// Logger returns a [log.Logger] configured with the provided name and options +// from the globally configured [log.LoggerProvider]. +// +// If this is called before a global LoggerProvider is configured, the returned +// Logger will be a No-Op implementation of a Logger. When a global +// LoggerProvider is registered for the first time, the returned Logger is +// updated in-place to report to this new LoggerProvider. There is no need to +// call this function again for an updated instance. +// +// This is a convenience function. It is equivalent to: +// +// GetLoggerProvider().Logger(name, options...) +func Logger(name string, options ...log.LoggerOption) log.Logger { + return GetLoggerProvider().Logger(name, options...) +} + +// GetLoggerProvider returns the globally configured [log.LoggerProvider]. +// +// If a global LoggerProvider has not been configured with [SetLoggerProvider], +// the returned Logger will be a No-Op implementation of a LoggerProvider. When +// a global LoggerProvider is registered for the first time, the returned +// LoggerProvider and all of its created Loggers are updated in-place. There is +// no need to call this function again for an updated instance. +func GetLoggerProvider() log.LoggerProvider { + return global.GetLoggerProvider() +} + +// SetLoggerProvider configures provider as the global [log.LoggerProvider]. +func SetLoggerProvider(provider log.LoggerProvider) { + global.SetLoggerProvider(provider) +} diff --git a/vendor/go.opentelemetry.io/otel/log/internal/global/log.go b/vendor/go.opentelemetry.io/otel/log/internal/global/log.go new file mode 100644 index 00000000000..d97ee966350 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/log/internal/global/log.go @@ -0,0 +1,107 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package global // import "go.opentelemetry.io/otel/log/internal/global" + +import ( + "context" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/log" + "go.opentelemetry.io/otel/log/embedded" +) + +// instLib defines the instrumentation library a logger is created for. +// +// Do not use sdk/instrumentation (API cannot depend on the SDK). +type instLib struct { + name string + version string + schemaURL string + attrs attribute.Set +} + +type loggerProvider struct { + embedded.LoggerProvider + + mu sync.Mutex + loggers map[instLib]*logger + delegate log.LoggerProvider +} + +// Compile-time guarantee loggerProvider implements LoggerProvider. +var _ log.LoggerProvider = (*loggerProvider)(nil) + +func (p *loggerProvider) Logger(name string, options ...log.LoggerOption) log.Logger { + p.mu.Lock() + defer p.mu.Unlock() + + if p.delegate != nil { + return p.delegate.Logger(name, options...) + } + + cfg := log.NewLoggerConfig(options...) + key := instLib{ + name: name, + version: cfg.InstrumentationVersion(), + schemaURL: cfg.SchemaURL(), + attrs: cfg.InstrumentationAttributes(), + } + + if p.loggers == nil { + l := &logger{name: name, options: options} + p.loggers = map[instLib]*logger{key: l} + return l + } + + if l, ok := p.loggers[key]; ok { + return l + } + + l := &logger{name: name, options: options} + p.loggers[key] = l + return l +} + +func (p *loggerProvider) setDelegate(provider log.LoggerProvider) { + p.mu.Lock() + defer p.mu.Unlock() + + p.delegate = provider + for _, l := range p.loggers { + l.setDelegate(provider) + } + p.loggers = nil // Only set logger delegates once. +} + +type logger struct { + embedded.Logger + + name string + options []log.LoggerOption + + delegate atomic.Value // log.Logger +} + +// Compile-time guarantee logger implements Logger. +var _ log.Logger = (*logger)(nil) + +func (l *logger) Emit(ctx context.Context, r log.Record) { + if del, ok := l.delegate.Load().(log.Logger); ok { + del.Emit(ctx, r) + } +} + +func (l *logger) Enabled(ctx context.Context, param log.EnabledParameters) bool { + var enabled bool + if del, ok := l.delegate.Load().(log.Logger); ok { + enabled = del.Enabled(ctx, param) + } + return enabled +} + +func (l *logger) setDelegate(provider log.LoggerProvider) { + l.delegate.Store(provider.Logger(l.name, l.options...)) +} diff --git a/vendor/go.opentelemetry.io/otel/log/internal/global/state.go b/vendor/go.opentelemetry.io/otel/log/internal/global/state.go new file mode 100644 index 00000000000..dbe1c2fbfb6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/log/internal/global/state.go @@ -0,0 +1,53 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package global // import "go.opentelemetry.io/otel/log/internal/global" + +import ( + "errors" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/log" +) + +var ( + globalLoggerProvider = defaultLoggerProvider() + + delegateLoggerOnce sync.Once +) + +func defaultLoggerProvider() *atomic.Value { + v := &atomic.Value{} + v.Store(loggerProviderHolder{provider: &loggerProvider{}}) + return v +} + +type loggerProviderHolder struct { + provider log.LoggerProvider +} + +// GetLoggerProvider returns the global LoggerProvider. +func GetLoggerProvider() log.LoggerProvider { + return globalLoggerProvider.Load().(loggerProviderHolder).provider +} + +// SetLoggerProvider sets the global LoggerProvider. +func SetLoggerProvider(provider log.LoggerProvider) { + current := GetLoggerProvider() + if _, cOk := current.(*loggerProvider); cOk { + if _, mpOk := provider.(*loggerProvider); mpOk && current == provider { + err := errors.New("invalid delegation: LoggerProvider self-delegation") + global.Error(err, "No delegate will be configured") + return + } + } + + delegateLoggerOnce.Do(func() { + if def, ok := current.(*loggerProvider); ok { + def.setDelegate(provider) + } + }) + globalLoggerProvider.Store(loggerProviderHolder{provider: provider}) +} diff --git a/vendor/go.opentelemetry.io/otel/log/keyvalue.go b/vendor/go.opentelemetry.io/otel/log/keyvalue.go index 8258defe360..2e1d30c1b88 100644 --- a/vendor/go.opentelemetry.io/otel/log/keyvalue.go +++ b/vendor/go.opentelemetry.io/otel/log/keyvalue.go @@ -76,7 +76,8 @@ func IntValue(v int) Value { return Int64Value(int64(v)) } // Int64Value returns a [Value] for an int64. func Int64Value(v int64) Value { - return Value{num: uint64(v), any: KindInt64} + // This can be later converted back to int64 (overflow not checked). + return Value{num: uint64(v), any: KindInt64} // nolint:gosec } // Float64Value returns a [Value] for a float64. @@ -146,7 +147,10 @@ func (v Value) AsInt64() int64 { // asInt64 returns the value held by v as an int64. If v is not of KindInt64, // this will return garbage. -func (v Value) asInt64() int64 { return int64(v.num) } +func (v Value) asInt64() int64 { + // Assumes v.num was a valid int64 (overflow not checked). + return int64(v.num) // nolint: gosec +} // AsBool returns the value held by v as a bool. func (v Value) AsBool() bool { @@ -289,7 +293,8 @@ func (v Value) String() string { case KindString: return v.asString() case KindInt64: - return strconv.FormatInt(int64(v.num), 10) + // Assumes v.num was a valid int64 (overflow not checked). + return strconv.FormatInt(int64(v.num), 10) // nolint: gosec case KindFloat64: return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64) case KindBool: @@ -351,16 +356,19 @@ func Bool(key string, value bool) KeyValue { } // Bytes returns a KeyValue for a []byte value. +// The passed slice must not be changed after it is passed. func Bytes(key string, value []byte) KeyValue { return KeyValue{key, BytesValue(value)} } // Slice returns a KeyValue for a []Value value. +// The passed slice must not be changed after it is passed. func Slice(key string, value ...Value) KeyValue { return KeyValue{key, SliceValue(value...)} } // Map returns a KeyValue for a map value. +// The passed slice must not be changed after it is passed. func Map(key string, value ...KeyValue) KeyValue { return KeyValue{key, MapValue(value...)} } diff --git a/vendor/go.opentelemetry.io/otel/log/logger.go b/vendor/go.opentelemetry.io/otel/log/logger.go index df2e88ea6b2..d7d8831be5b 100644 --- a/vendor/go.opentelemetry.io/otel/log/logger.go +++ b/vendor/go.opentelemetry.io/otel/log/logger.go @@ -31,26 +31,26 @@ type Logger interface { Emit(ctx context.Context, record Record) // Enabled returns whether the Logger emits for the given context and - // record. + // param. // - // The passed record is likely to be a partial record with only the - // bridge-relevant information being provided (e.g a record with only the + // The passed param is likely to be a partial record with only the + // bridge-relevant information being provided (e.g a param with only the // Severity set). If a Logger needs more information than is provided, it // is said to be in an indeterminate state (see below). // // The returned value will be true when the Logger will emit for the - // provided context and record, and will be false if the Logger will not + // provided context and param, and will be false if the Logger will not // emit. The returned value may be true or false in an indeterminate state. // An implementation should default to returning true for an indeterminate // state, but may return false if valid reasons in particular circumstances // exist (e.g. performance, correctness). // - // The record should not be held by the implementation. A copy should be + // The param should not be held by the implementation. A copy should be // made if the record needs to be held after the call returns. // // Implementations of this method need to be safe for a user to call // concurrently. - Enabled(ctx context.Context, record Record) bool + Enabled(ctx context.Context, param EnabledParameters) bool } // LoggerOption applies configuration options to a [Logger]. @@ -129,3 +129,8 @@ func WithSchemaURL(schemaURL string) LoggerOption { return config }) } + +// EnabledParameters represents payload for [Logger]'s Enabled method. +type EnabledParameters struct { + Severity Severity +} diff --git a/vendor/go.opentelemetry.io/otel/log/noop/noop.go b/vendor/go.opentelemetry.io/otel/log/noop/noop.go index d2e21edba66..f45a7c7e0b3 100644 --- a/vendor/go.opentelemetry.io/otel/log/noop/noop.go +++ b/vendor/go.opentelemetry.io/otel/log/noop/noop.go @@ -47,4 +47,4 @@ type Logger struct{ embedded.Logger } func (Logger) Emit(context.Context, log.Record) {} // Enabled returns false. No log records are ever emitted. -func (Logger) Enabled(context.Context, log.Record) bool { return false } +func (Logger) Enabled(context.Context, log.EnabledParameters) bool { return false } diff --git a/vendor/go.opentelemetry.io/otel/log/provider.go b/vendor/go.opentelemetry.io/otel/log/provider.go index caa89ac024e..5c8ca328f87 100644 --- a/vendor/go.opentelemetry.io/otel/log/provider.go +++ b/vendor/go.opentelemetry.io/otel/log/provider.go @@ -18,8 +18,19 @@ type LoggerProvider interface { // Logger returns a new [Logger] with the provided name and configuration. // + // The name needs to uniquely identify the source of logged code. It is + // recommended that name is the Go package name of the library using a log + // bridge (note: this is not the name of the bridge package). Most + // commonly, this means a bridge will need to accept this value from its + // users. + // // If name is empty, implementations need to provide a default name. // + // The version of the packages using a bridge can be critical information + // to include when logging. The bridge should accept this version + // information and use the [WithInstrumentationVersion] option to configure + // the Logger appropriately. + // // Implementations of this method need to be safe for a user to call // concurrently. Logger(name string, options ...LoggerOption) Logger diff --git a/vendor/go.opentelemetry.io/otel/log/record.go b/vendor/go.opentelemetry.io/otel/log/record.go index 96302f00624..7cf5446a041 100644 --- a/vendor/go.opentelemetry.io/otel/log/record.go +++ b/vendor/go.opentelemetry.io/otel/log/record.go @@ -16,6 +16,9 @@ const attributesInlineCount = 5 // Record represents a log record. type Record struct { + // Ensure forward compatibility by explicitly making this not comparable. + noCmp [0]func() //nolint: unused // This is indeed used. + timestamp time.Time observedTimestamp time.Time severity Severity diff --git a/vendor/go.opentelemetry.io/otel/renovate.json b/vendor/go.opentelemetry.io/otel/renovate.json index 0a29a2f13d8..4f80c898a1d 100644 --- a/vendor/go.opentelemetry.io/otel/renovate.json +++ b/vendor/go.opentelemetry.io/otel/renovate.json @@ -14,12 +14,6 @@ "matchDepTypes": ["indirect"], "enabled": true }, - { - "matchFileNames": ["internal/tools/**"], - "matchManagers": ["gomod"], - "matchDepTypes": ["indirect"], - "enabled": false - }, { "matchPackageNames": ["google.golang.org/genproto/googleapis/**"], "groupName": "googleapis" diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go index 728115045bb..34852a47b21 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go @@ -3,6 +3,8 @@ package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" +import "go.opentelemetry.io/otel/attribute" + // Scope represents the instrumentation scope. type Scope struct { // Name is the name of the instrumentation scope. This should be the @@ -12,4 +14,6 @@ type Scope struct { Version string // SchemaURL of the telemetry emitted by the scope. SchemaURL string + // Attributes of the telemetry emitted by the scope. + Attributes attribute.Set } diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/DESIGN.md b/vendor/go.opentelemetry.io/otel/sdk/log/DESIGN.md index 6d73150db5d..2e0fb15e29c 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/DESIGN.md +++ b/vendor/go.opentelemetry.io/otel/sdk/log/DESIGN.md @@ -5,13 +5,28 @@ `go.opentelemetry.io/otel/sdk/log` provides Logs SDK compliant with the [specification](https://opentelemetry.io/docs/specs/otel/logs/sdk/). -The main and recommended use case is to configure the SDK to use an OTLP -exporter with a batch processor.[^1] Therefore, the design aims to be -high-performant in this scenario. - The prototype was created in [#4955](https://github.com/open-telemetry/opentelemetry-go/pull/4955). +## Background + +The goal is to design the exported API of the SDK would have low performance +overhead. Most importantly, have a design that reduces the amount of heap +allocations and even make it possible to have a zero-allocation implementation. +Eliminating the amount of heap allocations reduces the GC pressure which can +produce some of the largest improvements in performance.[^1] + +The main and recommended use case is to configure the SDK to use an OTLP +exporter with a batch processor.[^2] Therefore, the implementation aims to be +high-performant in this scenario. Some users that require high throughput may +also want to use e.g. an [user_events](https://docs.kernel.org/trace/user_events.html), +[LLTng](https://lttng.org/docs/v2.13/#doc-tracing-your-own-user-application) +or [ETW](https://learn.microsoft.com/en-us/windows/win32/etw/about-event-tracing) +exporter with a simple processor. Users may also want to use +[OTLP File](https://opentelemetry.io/docs/specs/otel/protocol/file-exporter/) +or [Standard Output](https://opentelemetry.io/docs/specs/otel/logs/sdk_exporters/stdout/) +exporter in order to emit logs to standard output/error or files. + ## Modules structure The SDK is published as a single `go.opentelemetry.io/otel/sdk/log` Go module. @@ -107,15 +122,15 @@ The benchmark results can be found in [the prototype](https://github.com/open-te ## Rejected alternatives -### Represent both LogRecordProcessor and LogRecordExporter as Expoter +### Represent both LogRecordProcessor and LogRecordExporter as Exporter Because the [LogRecordProcessor](https://opentelemetry.io/docs/specs/otel/logs/sdk/#logrecordprocessor) and the [LogRecordProcessor](https://opentelemetry.io/docs/specs/otel/logs/sdk/#logrecordexporter) abstractions are so similar, there was a proposal to unify them under -single `Expoter` interface.[^2] +single `Exporter` interface.[^3] However, introducing a `Processor` interface makes it easier -to create custom processor decorators[^3] +to create custom processor decorators[^4] and makes the design more aligned with the specification. ### Embed log.Record @@ -131,6 +146,31 @@ provided via API. Moreover it is safer to have these abstraction decoupled. E.g. there can be a need for some fields that can be set via API and cannot be modified by the processors. -[^1]: [OpenTelemetry Logging](https://opentelemetry.io/docs/specs/otel/logs) -[^2]: [Conversation on representing LogRecordProcessor and LogRecordExporter via a single Expoter interface](https://github.com/open-telemetry/opentelemetry-go/pull/4954#discussion_r1515050480) -[^3]: [Introduce Processor](https://github.com/pellared/opentelemetry-go/pull/9) +### Processor.OnEmit to accept Record values + +There was a proposal to make the [Processor](#processor)'s `OnEmit` +to accept a [Record](#record) value instead of a pointer to reduce allocations +as well as to have design similar to [`slog.Handler`](https://pkg.go.dev/log/slog#Handler). + +There have been long discussions within the OpenTelemetry Specification SIG[^5] +about whether such a design would comply with the specification. The summary +was that the current processor design flaws are present in other languages as +well. Therefore, it would be favorable to introduce new processing concepts +(e.g. chaining processors) in the specification that would coexist with the +current "mutable" processor design. + +The performance disadvantages caused by using a pointer (which at the time of +writing causes an additional heap allocation) may be mitigated by future +versions of the Go compiler, thanks to improved escape analysis and +profile-guided optimization (PGO)[^6]. + +On the other hand, [Processor](#processor)'s `Enabled` is fine to accept +a [Record](#record) value as the processors should not mutate the passed +parameters. + +[^1]: [A Guide to the Go Garbage Collector](https://tip.golang.org/doc/gc-guide) +[^2]: [OpenTelemetry Logging](https://opentelemetry.io/docs/specs/otel/logs) +[^3]: [Conversation on representing LogRecordProcessor and LogRecordExporter via a single Exporter interface](https://github.com/open-telemetry/opentelemetry-go/pull/4954#discussion_r1515050480) +[^4]: [Introduce Processor](https://github.com/pellared/opentelemetry-go/pull/9) +[^5]: [Log record mutations do not have to be visible in next registered processors](https://github.com/open-telemetry/opentelemetry-specification/pull/4067) +[^6]: [Profile-guided optimization](https://go.dev/doc/pgo) diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/batch.go b/vendor/go.opentelemetry.io/otel/sdk/log/batch.go index 8e43b0e8f75..28c969262b4 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/batch.go +++ b/vendor/go.opentelemetry.io/otel/sdk/log/batch.go @@ -19,6 +19,7 @@ const ( dfltExpInterval = time.Second dfltExpTimeout = 30 * time.Second dfltExpMaxBatchSize = 512 + dfltExpBufferSize = 1 envarMaxQSize = "OTEL_BLRP_MAX_QUEUE_SIZE" envarExpInterval = "OTEL_BLRP_SCHEDULE_DELAY" @@ -96,6 +97,8 @@ type BatchProcessor struct { // stopped holds the stopped state of the BatchProcessor. stopped atomic.Bool + + noCmp [0]func() //nolint: unused // This is indeed used. } // NewBatchProcessor decorates the provided exporter @@ -117,8 +120,7 @@ func NewBatchProcessor(exporter Exporter, opts ...BatchProcessorOption) *BatchPr exporter = newChunkExporter(exporter, cfg.expMaxBatchSize.Value) b := &BatchProcessor{ - // TODO: explore making the size of this configurable. - exporter: newBufferExporter(exporter, 1), + exporter: newBufferExporter(exporter, cfg.expBufferSize.Value), q: newQueue(cfg.maxQSize.Value), batchSize: cfg.expMaxBatchSize.Value, @@ -176,11 +178,13 @@ func (b *BatchProcessor) poll(interval time.Duration) (done chan struct{}) { } // OnEmit batches provided log record. -func (b *BatchProcessor) OnEmit(_ context.Context, r Record) error { +func (b *BatchProcessor) OnEmit(_ context.Context, r *Record) error { if b.stopped.Load() || b.q == nil { return nil } - if n := b.q.Enqueue(r); n >= b.batchSize { + // The record is cloned so that changes done by subsequent processors + // are not going to lead to a data race. + if n := b.q.Enqueue(r.Clone()); n >= b.batchSize { select { case b.pollTrigger <- struct{}{}: default: @@ -192,11 +196,6 @@ func (b *BatchProcessor) OnEmit(_ context.Context, r Record) error { return nil } -// Enabled returns if b is enabled. -func (b *BatchProcessor) Enabled(context.Context, Record) bool { - return !b.stopped.Load() && b.q != nil -} - // Shutdown flushes queued log records and shuts down the decorated exporter. func (b *BatchProcessor) Shutdown(ctx context.Context) error { if b.stopped.Swap(true) || b.q == nil { @@ -350,6 +349,7 @@ type batchConfig struct { expInterval setting[time.Duration] expTimeout setting[time.Duration] expMaxBatchSize setting[int] + expBufferSize setting[int] } func newBatchConfig(options []BatchProcessorOption) batchConfig { @@ -383,6 +383,10 @@ func newBatchConfig(options []BatchProcessorOption) batchConfig { clampMax[int](c.maxQSize.Value), fallback[int](dfltExpMaxBatchSize), ) + c.expBufferSize = c.expBufferSize.Resolve( + clearLessThanOne[int](), + fallback[int](dfltExpBufferSize), + ) return c } @@ -459,3 +463,15 @@ func WithExportMaxBatchSize(size int) BatchProcessorOption { return cfg }) } + +// WithExportBufferSize sets the batch buffer size. +// Batches will be temporarily kept in a memory buffer until they are exported. +// +// By default, a value of 1 will be used. +// The default value is also used when the provided value is less than one. +func WithExportBufferSize(size int) BatchProcessorOption { + return batchOptionFunc(func(cfg batchConfig) batchConfig { + cfg.expBufferSize = newSetting(size) + return cfg + }) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/doc.go b/vendor/go.opentelemetry.io/otel/sdk/log/doc.go index 10704030aba..14a581db6b6 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/doc.go +++ b/vendor/go.opentelemetry.io/otel/sdk/log/doc.go @@ -1,9 +1,39 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -// TODO (#5065): Expand documentation stub. - /* Package log provides the OpenTelemetry Logs SDK. + +See https://opentelemetry.io/docs/concepts/signals/logs/ for information +about the concept of OpenTelemetry Logs and +https://opentelemetry.io/docs/concepts/components/ for more information +about OpenTelemetry SDKs. + +The entry point for the log package is [NewLoggerProvider]. +[LoggerProvider] is the object that all Bridge API calls use to create +Loggers, and ultimately emit log records. +Also, it is an object that should be used to +control the life-cycle (start, flush, and shutdown) of the Logs SDK. + +A LoggerProvider needs to be configured to process the log records, this is +done by configuring it with a [Processor] implementation using [WithProcessor]. +The log package provides the [BatchProcessor] and [SimpleProcessor] +that are configured with an [Exporter] implementation which +exports the log records to given destination. See +[go.opentelemetry.io/otel/exporters] for exporters that can be used with these +Processors. + +The data generated by a LoggerProvider needs to include information about its +origin. A LoggerProvider needs to be configured with a Resource, by using +[WithResource], to include this information. This Resource +should be used to describe the unique runtime environment instrumented code +is being run on. That way when multiple instances of the code are collected +at a single endpoint their origin is decipherable. + +See [go.opentelemetry.io/otel/log] for more information about +the OpenTelemetry Logs Bridge API. + +See [go.opentelemetry.io/otel/sdk/log/internal/x] for information about the +experimental features. */ package log // import "go.opentelemetry.io/otel/sdk/log" diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/exporter.go b/vendor/go.opentelemetry.io/otel/sdk/log/exporter.go index 1cdddc03e39..e4e3c5402bf 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/exporter.go +++ b/vendor/go.opentelemetry.io/otel/sdk/log/exporter.go @@ -15,10 +15,6 @@ import ( ) // Exporter handles the delivery of log records to external receivers. -// -// Any of the Exporter's methods may be called concurrently with itself -// or with other methods. It is the responsibility of the Exporter to manage -// this concurrency. type Exporter interface { // Export transmits log records to a receiver. // @@ -34,7 +30,11 @@ type Exporter interface { // // Before modifying a Record, the implementation must use Record.Clone // to create a copy that shares no state with the original. + // + // Export should never be called concurrently with other Export calls. + // However, it may be called concurrently with other methods. Export(ctx context.Context, records []Record) error + // Shutdown is called when the SDK shuts down. Any cleanup or release of // resources held by the exporter should be done in this call. // @@ -43,12 +43,17 @@ type Exporter interface { // // After Shutdown is called, calls to Export, Shutdown, or ForceFlush // should perform no operation and return nil error. + // + // Shutdown may be called concurrently with itself or with other methods. Shutdown(ctx context.Context) error + // ForceFlush exports log records to the configured Exporter that have not yet // been exported. // // The deadline or cancellation of the passed context must be honored. An // appropriate error should be returned in these situations. + // + // ForceFlush may be called concurrently with itself or with other methods. ForceFlush(ctx context.Context) error } diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/internal/x/README.md b/vendor/go.opentelemetry.io/otel/sdk/log/internal/x/README.md new file mode 100644 index 00000000000..73f4db626af --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/log/internal/x/README.md @@ -0,0 +1,35 @@ +# Experimental Features + +The Logs SDK contains features that have not yet stabilized. +These features are added to the OpenTelemetry Go Logs SDK prior to stabilization so that users can start experimenting with them and provide feedback. + +These feature may change in backwards incompatible ways as feedback is applied. +See the [Compatibility and Stability](#compatibility-and-stability) section for more information. + +## Features + +- [Filter Processors](#filter-processor) + +### Filter Processor + +Users of logging libraries often want to know if a log `Record` will be processed or dropped before they perform complex operations to construct the `Record`. +The [`Logger`] in the Logs Bridge API provides the `Enabled` method for just this use-case. +In order for the Logs Bridge SDK to effectively implement this API, it needs to be known if the registered [`Processor`]s are enabled for the `Record` within a context. +A [`Processor`] that knows, and can identify, what `Record` it will process or drop when it is passed to `OnEmit` can communicate this to the SDK `Logger` by implementing the `FilterProcessor`. + +By default, the SDK `Logger.Enabled` will return true when called. +Only if all the registered [`Processor`]s implement `FilterProcessor` and they all return `false` will `Logger.Enabled` return `false`. + +See the [`minsev`] [`Processor`] for an example use-case. +It is used to filter `Record`s out that a have a `Severity` below a threshold. + +[`Logger`]: https://pkg.go.dev/go.opentelemetry.io/otel/log#Logger +[`Processor`]: https://pkg.go.dev/go.opentelemetry.io/otel/sdk/log#Processor +[`minsev`]: https://pkg.go.dev/go.opentelemetry.io/contrib/processors/minsev + +## Compatibility and Stability + +Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../../VERSIONING.md). +These features may be removed or modified in successive version releases, including patch versions. + +When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release. diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/log/internal/x/x.go new file mode 100644 index 00000000000..ca78d109778 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/log/internal/x/x.go @@ -0,0 +1,47 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package x contains support for Logs SDK experimental features. +package x // import "go.opentelemetry.io/otel/sdk/log/internal/x" + +import ( + "context" + + "go.opentelemetry.io/otel/log" +) + +// FilterProcessor is a [go.opentelemetry.io/otel/sdk/log.Processor] that knows, +// and can identify, what [log.Record] it will process or drop when it is +// passed to OnEmit. +// +// This is useful for users of logging libraries that want to know if a [log.Record] +// will be processed or dropped before they perform complex operations to +// construct the [log.Record]. +// +// Processor implementations that choose to support this by satisfying this +// interface are expected to re-evaluate the [log.Record]s passed to OnEmit, it is +// not expected that the caller to OnEmit will use the functionality from this +// interface prior to calling OnEmit. +// +// This should only be implemented for Processors that can make reliable +// enough determination of this prior to processing a [log.Record] and where +// the result is dynamic. +// +// [Processor]: https://pkg.go.dev/go.opentelemetry.io/otel/sdk/log#Processor +type FilterProcessor interface { + // Enabled returns whether the Processor will process for the given context + // and param. + // + // The passed param is likely to be a partial record with only the + // bridge-relevant information being provided (e.g a record with only the + // Severity set). If a Logger needs more information than is provided, it + // is said to be in an indeterminate state (see below). + // + // The returned value will be true when the Processor will process for the + // provided context and param, and will be false if the Processor will not + // process. An implementation should default to returning true for an + // indeterminate state. + // + // Implementations should not modify the param. + Enabled(ctx context.Context, param log.EnabledParameters) bool +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/logger.go b/vendor/go.opentelemetry.io/otel/sdk/log/logger.go index 245867f3fd6..d6ca2ea41aa 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/logger.go +++ b/vendor/go.opentelemetry.io/otel/sdk/log/logger.go @@ -11,6 +11,7 @@ import ( "go.opentelemetry.io/otel/log" "go.opentelemetry.io/otel/log/embedded" "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/log/internal/x" "go.opentelemetry.io/otel/trace" ) @@ -36,19 +37,35 @@ func newLogger(p *LoggerProvider, scope instrumentation.Scope) *logger { func (l *logger) Emit(ctx context.Context, r log.Record) { newRecord := l.newRecord(ctx, r) for _, p := range l.provider.processors { - if err := p.OnEmit(ctx, newRecord); err != nil { + if err := p.OnEmit(ctx, &newRecord); err != nil { otel.Handle(err) } } } -func (l *logger) Enabled(ctx context.Context, r log.Record) bool { - newRecord := l.newRecord(ctx, r) - for _, p := range l.provider.processors { - if enabled := p.Enabled(ctx, newRecord); enabled { +// Enabled returns true if at least one Processor held by the LoggerProvider +// that created the logger will process param for the provided context and param. +// +// If it is not possible to definitively determine the param will be +// processed, true will be returned by default. A value of false will only be +// returned if it can be positively verified that no Processor will process. +func (l *logger) Enabled(ctx context.Context, param log.EnabledParameters) bool { + fltrs := l.provider.filterProcessors() + // If there are more Processors than FilterProcessors we cannot be sure + // that all Processors will drop the record. Therefore, return true. + // + // If all Processors are FilterProcessors, check if any is enabled. + return len(l.provider.processors) > len(fltrs) || anyEnabled(ctx, param, fltrs) +} + +func anyEnabled(ctx context.Context, param log.EnabledParameters, fltrs []x.FilterProcessor) bool { + for _, f := range fltrs { + if f.Enabled(ctx, param) { + // At least one Processor will process the Record. return true } } + // No Processor will process the record return false } diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/processor.go b/vendor/go.opentelemetry.io/otel/sdk/log/processor.go index f95ea949027..fcab34c7a48 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/log/processor.go @@ -12,6 +12,9 @@ import ( // Any of the Processor's methods may be called concurrently with itself // or with other methods. It is the responsibility of the Processor to manage // this concurrency. +// +// See [go.opentelemetry.io/otel/sdk/log/internal/x] for information about how +// a Processor can be extended to support experimental features. type Processor interface { // OnEmit is called when a Record is emitted. // @@ -26,27 +29,15 @@ type Processor interface { // considered unrecoverable and will be reported to a configured error // Handler. // - // Before modifying a Record, the implementation must use Record.Clone + // The SDK invokes the processors sequentially in the same order as + // they were registered using [WithProcessor]. + // Implementations may synchronously modify the record so that the changes + // are visible in the next registered processor. + // Notice that [Record] is not concurrent safe. Therefore, asynchronous + // processing may cause race conditions. Use [Record.Clone] // to create a copy that shares no state with the original. - OnEmit(ctx context.Context, record Record) error - // Enabled returns whether the Processor will process for the given context - // and record. - // - // The passed record is likely to be a partial record with only the - // bridge-relevant information being provided (e.g a record with only the - // Severity set). If a Logger needs more information than is provided, it - // is said to be in an indeterminate state (see below). - // - // The returned value will be true when the Processor will process for the - // provided context and record, and will be false if the Processor will not - // process. The returned value may be true or false in an indeterminate - // state. An implementation should default to returning true for an - // indeterminate state, but may return false if valid reasons in particular - // circumstances exist (e.g. performance, correctness). - // - // Before modifying a Record, the implementation must use Record.Clone - // to create a copy that shares no state with the original. - Enabled(ctx context.Context, record Record) bool + OnEmit(ctx context.Context, record *Record) error + // Shutdown is called when the SDK shuts down. Any cleanup or release of // resources held by the exporter should be done in this call. // @@ -56,6 +47,7 @@ type Processor interface { // After Shutdown is called, calls to Export, Shutdown, or ForceFlush // should perform no operation and return nil error. Shutdown(ctx context.Context) error + // ForceFlush exports log records to the configured Exporter that have not yet // been exported. // diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/provider.go b/vendor/go.opentelemetry.io/otel/sdk/log/provider.go index 84bb14c5ec7..8c825e6ab79 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/provider.go +++ b/vendor/go.opentelemetry.io/otel/sdk/log/provider.go @@ -9,11 +9,13 @@ import ( "sync" "sync/atomic" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/log" "go.opentelemetry.io/otel/log/embedded" "go.opentelemetry.io/otel/log/noop" "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/log/internal/x" "go.opentelemetry.io/otel/sdk/resource" ) @@ -65,10 +67,15 @@ type LoggerProvider struct { attributeCountLimit int attributeValueLengthLimit int + fltrProcessorsOnce sync.Once + fltrProcessors []x.FilterProcessor + loggersMu sync.Mutex loggers map[instrumentation.Scope]*logger stopped atomic.Bool + + noCmp [0]func() //nolint: unused // This is indeed used. } // Compile-time check LoggerProvider implements log.LoggerProvider. @@ -90,6 +97,17 @@ func NewLoggerProvider(opts ...LoggerProviderOption) *LoggerProvider { } } +func (p *LoggerProvider) filterProcessors() []x.FilterProcessor { + p.fltrProcessorsOnce.Do(func() { + for _, proc := range p.processors { + if f, ok := proc.(x.FilterProcessor); ok { + p.fltrProcessors = append(p.fltrProcessors, f) + } + } + }) + return p.fltrProcessors +} + // Logger returns a new [log.Logger] with the provided name and configuration. // // If p is shut down, a [noop.Logger] instance is returned. @@ -106,9 +124,10 @@ func (p *LoggerProvider) Logger(name string, opts ...log.LoggerOption) log.Logge cfg := log.NewLoggerConfig(opts...) scope := instrumentation.Scope{ - Name: name, - Version: cfg.InstrumentationVersion(), - SchemaURL: cfg.SchemaURL(), + Name: name, + Version: cfg.InstrumentationVersion(), + SchemaURL: cfg.SchemaURL(), + Attributes: cfg.InstrumentationAttributes(), } p.loggersMu.Lock() @@ -179,7 +198,11 @@ func (fn loggerProviderOptionFunc) apply(c providerConfig) providerConfig { // go.opentelemetry.io/otel/sdk/resource package will be used. func WithResource(res *resource.Resource) LoggerProviderOption { return loggerProviderOptionFunc(func(cfg providerConfig) providerConfig { - cfg.resource = res + var err error + cfg.resource, err = resource.Merge(resource.Environment(), res) + if err != nil { + otel.Handle(err) + } return cfg }) } @@ -189,8 +212,8 @@ func WithResource(res *resource.Resource) LoggerProviderOption { // By default, if this option is not used, the LoggerProvider will perform no // operations; no data will be exported without a processor. // -// Each WithProcessor creates a separate pipeline. Use custom decorators -// for advanced scenarios such as enriching with attributes. +// The SDK invokes the processors sequentially in the same order as they were +// registered. // // For production, use [NewBatchProcessor] to batch log records before they are exported. // For testing and debugging, use [NewSimpleProcessor] to synchronously export log records. diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/record.go b/vendor/go.opentelemetry.io/otel/sdk/log/record.go index a6e50df7782..f04e5b28f95 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/record.go +++ b/vendor/go.opentelemetry.io/otel/sdk/log/record.go @@ -42,6 +42,10 @@ func putIndex(index map[string]int) { } // Record is a log record emitted by the Logger. +// +// Do not create instances of Record on your own in production code. +// You can use [go.opentelemetry.io/otel/sdk/log/logtest.RecordFactory] +// for testing purposes. type Record struct { // Do not embed the log.Record. Attributes need to be overwrite-able and // deep-copying needs to be possible. @@ -86,6 +90,8 @@ type Record struct { attributeValueLengthLimit int attributeCountLimit int + + noCmp [0]func() //nolint: unused // This is indeed used. } func (r *Record) addDropped(n int) { @@ -228,7 +234,7 @@ func (r *Record) AddAttributes(attrs ...log.KeyValue) { // // Do not use head(attrs, r.attributeCountLimit - n) here. If // (r.attributeCountLimit - n) <= 0 attrs needs to be emptied. - last := max(0, (r.attributeCountLimit - n)) + last := max(0, r.attributeCountLimit-n) r.addDropped(len(attrs) - last) attrs = attrs[:last] } @@ -400,7 +406,7 @@ func (r *Record) applyValueLimits(val log.Value) log.Value { case log.KindString: s := val.AsString() if len(s) > r.attributeValueLengthLimit { - val = log.StringValue(truncate(s, r.attributeValueLengthLimit)) + val = log.StringValue(truncate(r.attributeValueLengthLimit, s)) } case log.KindSlice: sl := val.AsSlice() @@ -421,40 +427,78 @@ func (r *Record) applyValueLimits(val log.Value) log.Value { return val } -// truncate returns a copy of str truncated to have a length of at most n -// characters. If the length of str is less than n, str itself is returned. +// truncate returns a truncated version of s such that it contains less than +// the limit number of characters. Truncation is applied by returning the limit +// number of valid characters contained in s. +// +// If limit is negative, it returns the original string. // -// The truncate of str ensures that no valid UTF-8 code point is split. The -// copy returned will be less than n if a characters straddles the length -// limit. +// UTF-8 is supported. When truncating, all invalid characters are dropped +// before applying truncation. // -// No truncation is performed if n is less than zero. -func truncate(str string, n int) string { - if n < 0 { - return str +// If s already contains less than the limit number of bytes, it is returned +// unchanged. No invalid characters are removed. +func truncate(limit int, s string) string { + // This prioritize performance in the following order based on the most + // common expected use-cases. + // + // - Short values less than the default limit (128). + // - Strings with valid encodings that exceed the limit. + // - No limit. + // - Strings with invalid encodings that exceed the limit. + if limit < 0 || len(s) <= limit { + return s } - // cut returns a copy of the s truncated to not exceed a length of n. If - // invalid UTF-8 is encountered, s is returned with false. Otherwise, the - // truncated copy will be returned with true. - cut := func(s string) (string, bool) { - var i int - for i = 0; i < n; { - r, size := utf8.DecodeRuneInString(s[i:]) - if r == utf8.RuneError { - return s, false - } - if i+size > n { - break + // Optimistically, assume all valid UTF-8. + var b strings.Builder + count := 0 + for i, c := range s { + if c != utf8.RuneError { + count++ + if count > limit { + return s[:i] } - i += size + continue + } + + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // Invalid encoding. + b.Grow(len(s) - 1) + _, _ = b.WriteString(s[:i]) + s = s[i:] + break } - return s[:i], true } - cp, ok := cut(str) - if !ok { - cp, _ = cut(strings.ToValidUTF8(str, "")) + // Fast-path, no invalid input. + if b.Cap() == 0 { + return s } - return cp + + // Truncate while validating UTF-8. + for i := 0; i < len(s) && count < limit; { + c := s[i] + if c < utf8.RuneSelf { + // Optimization for single byte runes (common case). + _ = b.WriteByte(c) + i++ + count++ + continue + } + + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // We checked for all 1-byte runes above, this is a RuneError. + i++ + continue + } + + _, _ = b.WriteString(s[i : i+size]) + i += size + count++ + } + + return b.String() } diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/simple.go b/vendor/go.opentelemetry.io/otel/sdk/log/simple.go index fc5690b22d5..002e52cae66 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/simple.go +++ b/vendor/go.opentelemetry.io/otel/sdk/log/simple.go @@ -12,22 +12,24 @@ import ( var _ Processor = (*SimpleProcessor)(nil) // SimpleProcessor is an processor that synchronously exports log records. +// +// Use [NewSimpleProcessor] to create a SimpleProcessor. type SimpleProcessor struct { + mu sync.Mutex exporter Exporter + + noCmp [0]func() //nolint: unused // This is indeed used. } // NewSimpleProcessor is a simple Processor adapter. // -// This Processor is not recommended for production use. The synchronous -// nature of this Processor make it good for testing, debugging, or -// showing examples of other features, but it can be slow and have a high -// computation resource usage overhead. [NewBatchProcessor] is recommended -// for production use instead. +// This Processor is not recommended for production use due to its synchronous +// nature, which makes it suitable for testing, debugging, or demonstrating +// other features, but can lead to slow performance and high computational +// overhead. For production environments, it is recommended to use +// [NewBatchProcessor] instead. However, there may be exceptions where certain +// [Exporter] implementations perform better with this Processor. func NewSimpleProcessor(exporter Exporter, _ ...SimpleProcessorOption) *SimpleProcessor { - if exporter == nil { - // Do not panic on nil exporter. - exporter = defaultNoopExporter - } return &SimpleProcessor{exporter: exporter} } @@ -39,9 +41,16 @@ var simpleProcRecordsPool = sync.Pool{ } // OnEmit batches provided log record. -func (s *SimpleProcessor) OnEmit(ctx context.Context, r Record) error { +func (s *SimpleProcessor) OnEmit(ctx context.Context, r *Record) error { + if s.exporter == nil { + return nil + } + + s.mu.Lock() + defer s.mu.Unlock() + records := simpleProcRecordsPool.Get().(*[]Record) - (*records)[0] = r + (*records)[0] = *r defer func() { simpleProcRecordsPool.Put(records) }() @@ -49,18 +58,21 @@ func (s *SimpleProcessor) OnEmit(ctx context.Context, r Record) error { return s.exporter.Export(ctx, *records) } -// Enabled returns true. -func (s *SimpleProcessor) Enabled(context.Context, Record) bool { - return true -} - -// Shutdown shuts down the expoter. +// Shutdown shuts down the exporter. func (s *SimpleProcessor) Shutdown(ctx context.Context) error { + if s.exporter == nil { + return nil + } + return s.exporter.Shutdown(ctx) } // ForceFlush flushes the exporter. func (s *SimpleProcessor) ForceFlush(ctx context.Context) error { + if s.exporter == nil { + return nil + } + return s.exporter.ForceFlush(ctx) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/config.go b/vendor/go.opentelemetry.io/otel/sdk/metric/config.go index bbe7bf671fd..203cd9d6508 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/config.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/config.go @@ -5,17 +5,22 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric" import ( "context" - "fmt" + "errors" + "os" + "strings" "sync" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/sdk/metric/exemplar" "go.opentelemetry.io/otel/sdk/resource" ) // config contains configuration options for a MeterProvider. type config struct { - res *resource.Resource - readers []Reader - views []View + res *resource.Resource + readers []Reader + views []View + exemplarFilter exemplar.Filter } // readerSignals returns a force-flush and shutdown function for a @@ -39,25 +44,13 @@ func (c config) readerSignals() (forceFlush, shutdown func(context.Context) erro // value. func unify(funcs []func(context.Context) error) func(context.Context) error { return func(ctx context.Context) error { - var errs []error + var err error for _, f := range funcs { - if err := f(ctx); err != nil { - errs = append(errs, err) + if e := f(ctx); e != nil { + err = errors.Join(err, e) } } - return unifyErrors(errs) - } -} - -// unifyErrors combines multiple errors into a single error. -func unifyErrors(errs []error) error { - switch len(errs) { - case 0: - return nil - case 1: - return errs[0] - default: - return fmt.Errorf("%v", errs) + return err } } @@ -75,7 +68,13 @@ func unifyShutdown(funcs []func(context.Context) error) func(context.Context) er // newConfig returns a config configured with options. func newConfig(options []Option) config { - conf := config{res: resource.Default()} + conf := config{ + res: resource.Default(), + exemplarFilter: exemplar.TraceBasedFilter, + } + for _, o := range meterProviderOptionsFromEnv() { + conf = o.apply(conf) + } for _, o := range options { conf = o.apply(conf) } @@ -103,7 +102,11 @@ func (o optionFunc) apply(conf config) config { // go.opentelemetry.io/otel/sdk/resource package will be used. func WithResource(res *resource.Resource) Option { return optionFunc(func(conf config) config { - conf.res = res + var err error + conf.res, err = resource.Merge(resource.Environment(), res) + if err != nil { + otel.Handle(err) + } return conf }) } @@ -135,3 +138,35 @@ func WithView(views ...View) Option { return cfg }) } + +// WithExemplarFilter configures the exemplar filter. +// +// The exemplar filter determines which measurements are offered to the +// exemplar reservoir, but the exemplar reservoir makes the final decision of +// whether to store an exemplar. +// +// By default, the [exemplar.SampledFilter] +// is used. Exemplars can be entirely disabled by providing the +// [exemplar.AlwaysOffFilter]. +func WithExemplarFilter(filter exemplar.Filter) Option { + return optionFunc(func(cfg config) config { + cfg.exemplarFilter = filter + return cfg + }) +} + +func meterProviderOptionsFromEnv() []Option { + var opts []Option + // https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/configuration/sdk-environment-variables.md#exemplar + const filterEnvKey = "OTEL_METRICS_EXEMPLAR_FILTER" + + switch strings.ToLower(strings.TrimSpace(os.Getenv(filterEnvKey))) { + case "always_on": + opts = append(opts, WithExemplarFilter(exemplar.AlwaysOnFilter)) + case "always_off": + opts = append(opts, WithExemplarFilter(exemplar.AlwaysOffFilter)) + case "trace_based": + opts = append(opts, WithExemplarFilter(exemplar.TraceBasedFilter)) + } + return opts +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go b/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go index 4f553a57153..90a4ae16c1a 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go @@ -31,6 +31,14 @@ // is being run on. That way when multiple instances of the code are collected // at a single endpoint their origin is decipherable. // +// To avoid leaking memory, the SDK returns the same instrument for calls to +// create new instruments with the same Name, Unit, and Description. +// Importantly, callbacks provided using metric.WithFloat64Callback or +// metric.WithInt64Callback will only apply for the first instrument created +// with a given Name, Unit, and Description. Instead, use +// Meter.RegisterCallback and Registration.Unregister to add and remove +// callbacks without leaking memory. +// // See [go.opentelemetry.io/otel/metric] for more information about // the metric API. // diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go index 82619da78ec..0335b8ae48e 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go @@ -4,51 +4,49 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric" import ( - "os" "runtime" - "slices" - "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" - "go.opentelemetry.io/otel/sdk/metric/internal/x" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/exemplar" + "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" ) -// reservoirFunc returns the appropriately configured exemplar reservoir -// creation func based on the passed InstrumentKind and user defined -// environment variables. -// -// Note: This will only return non-nil values when the experimental exemplar -// feature is enabled and the OTEL_METRICS_EXEMPLAR_FILTER environment variable -// is not set to always_off. -func reservoirFunc[N int64 | float64](agg Aggregation) func() exemplar.FilteredReservoir[N] { - if !x.Exemplars.Enabled() { - return nil - } - // https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/configuration/sdk-environment-variables.md#exemplar - const filterEnvKey = "OTEL_METRICS_EXEMPLAR_FILTER" +// ExemplarReservoirProviderSelector selects the +// [exemplar.ReservoirProvider] to use +// based on the [Aggregation] of the metric. +type ExemplarReservoirProviderSelector func(Aggregation) exemplar.ReservoirProvider - var filter exemplar.Filter - - switch os.Getenv(filterEnvKey) { - case "always_on": - filter = exemplar.AlwaysOnFilter - case "always_off": - return exemplar.Drop - case "trace_based": - fallthrough - default: - filter = exemplar.SampledFilter +// reservoirFunc returns the appropriately configured exemplar reservoir +// creation func based on the passed InstrumentKind and filter configuration. +func reservoirFunc[N int64 | float64](provider exemplar.ReservoirProvider, filter exemplar.Filter) func(attribute.Set) aggregate.FilteredExemplarReservoir[N] { + return func(attrs attribute.Set) aggregate.FilteredExemplarReservoir[N] { + return aggregate.NewFilteredExemplarReservoir[N](filter, provider(attrs)) } +} +// DefaultExemplarReservoirProviderSelector returns the default +// [exemplar.ReservoirProvider] for the +// provided [Aggregation]. +// +// For explicit bucket histograms with more than 1 bucket, it uses the +// [exemplar.HistogramReservoirProvider]. +// For exponential histograms, it uses the +// [exemplar.FixedSizeReservoirProvider] +// with a size of min(20, max_buckets). +// For all other aggregations, it uses the +// [exemplar.FixedSizeReservoirProvider] +// with a size equal to the number of CPUs. +// +// Exemplar default reservoirs MAY change in a minor version bump. No +// guarantees are made on the shape or statistical properties of returned +// exemplars. +func DefaultExemplarReservoirProviderSelector(agg Aggregation) exemplar.ReservoirProvider { // https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/metrics/sdk.md#exemplar-defaults // Explicit bucket histogram aggregation with more than 1 bucket will // use AlignedHistogramBucketExemplarReservoir. a, ok := agg.(AggregationExplicitBucketHistogram) if ok && len(a.Boundaries) > 0 { - cp := slices.Clone(a.Boundaries) - return func() exemplar.FilteredReservoir[N] { - bounds := cp - return exemplar.NewFilteredReservoir[N](filter, exemplar.Histogram(bounds)) - } + return exemplar.HistogramReservoirProvider(a.Boundaries) } var n int @@ -75,7 +73,5 @@ func reservoirFunc[N int64 | float64](agg Aggregation) func() exemplar.FilteredR } } - return func() exemplar.FilteredReservoir[N] { - return exemplar.NewFilteredReservoir[N](filter, exemplar.FixedSize(n)) - } + return exemplar.FixedSizeReservoirProvider(n) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/README.md b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/README.md new file mode 100644 index 00000000000..d1025f5eb89 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/README.md @@ -0,0 +1,3 @@ +# Metric SDK Exemplars + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/metric/exemplar)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric/exemplar) diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/doc.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/doc.go new file mode 100644 index 00000000000..9f238937688 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/doc.go @@ -0,0 +1,6 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package exemplar provides an implementation of the OpenTelemetry exemplar +// reservoir to be used in metric collection pipelines. +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar" diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/exemplar.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/exemplar.go similarity index 98% rename from vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/exemplar.go rename to vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/exemplar.go index fcaa6a4697c..1ab69467868 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/exemplar.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/exemplar.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar" import ( "time" diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/filter.go similarity index 75% rename from vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go rename to vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/filter.go index 152a069a09e..b595e2acef3 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/filter.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar" import ( "context" @@ -16,10 +16,10 @@ import ( // Reservoir in making a sampling decision. type Filter func(context.Context) bool -// SampledFilter is a [Filter] that will only offer measurements +// TraceBasedFilter is a [Filter] that will only offer measurements // if the passed context associated with the measurement contains a sampled // [go.opentelemetry.io/otel/trace.SpanContext]. -func SampledFilter(ctx context.Context) bool { +func TraceBasedFilter(ctx context.Context) bool { return trace.SpanContextFromContext(ctx).IsSampled() } @@ -27,3 +27,8 @@ func SampledFilter(ctx context.Context) bool { func AlwaysOnFilter(ctx context.Context) bool { return true } + +// AlwaysOffFilter is a [Filter] that never offers measurements. +func AlwaysOffFilter(ctx context.Context) bool { + return false +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/fixed_size_reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/fixed_size_reservoir.go new file mode 100644 index 00000000000..d4aab0aad4f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/fixed_size_reservoir.go @@ -0,0 +1,217 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar" + +import ( + "context" + "math" + "math/rand" + "time" + + "go.opentelemetry.io/otel/attribute" +) + +// FixedSizeReservoirProvider returns a provider of [FixedSizeReservoir]. +func FixedSizeReservoirProvider(k int) ReservoirProvider { + return func(_ attribute.Set) Reservoir { + return NewFixedSizeReservoir(k) + } +} + +// NewFixedSizeReservoir returns a [FixedSizeReservoir] that samples at most +// k exemplars. If there are k or less measurements made, the Reservoir will +// sample each one. If there are more than k, the Reservoir will then randomly +// sample all additional measurement with a decreasing probability. +func NewFixedSizeReservoir(k int) *FixedSizeReservoir { + return newFixedSizeReservoir(newStorage(k)) +} + +var _ Reservoir = &FixedSizeReservoir{} + +// FixedSizeReservoir is a [Reservoir] that samples at most k exemplars. If +// there are k or less measurements made, the Reservoir will sample each one. +// If there are more than k, the Reservoir will then randomly sample all +// additional measurement with a decreasing probability. +type FixedSizeReservoir struct { + *storage + + // count is the number of measurement seen. + count int64 + // next is the next count that will store a measurement at a random index + // once the reservoir has been filled. + next int64 + // w is the largest random number in a distribution that is used to compute + // the next next. + w float64 + + // rng is used to make sampling decisions. + // + // Do not use crypto/rand. There is no reason for the decrease in performance + // given this is not a security sensitive decision. + rng *rand.Rand +} + +func newFixedSizeReservoir(s *storage) *FixedSizeReservoir { + r := &FixedSizeReservoir{ + storage: s, + rng: rand.New(rand.NewSource(time.Now().UnixNano())), + } + r.reset() + return r +} + +// randomFloat64 returns, as a float64, a uniform pseudo-random number in the +// open interval (0.0,1.0). +func (r *FixedSizeReservoir) randomFloat64() float64 { + // TODO: This does not return a uniform number. rng.Float64 returns a + // uniformly random int in [0,2^53) that is divided by 2^53. Meaning it + // returns multiples of 2^-53, and not all floating point numbers between 0 + // and 1 (i.e. for values less than 2^-4 the 4 last bits of the significand + // are always going to be 0). + // + // An alternative algorithm should be considered that will actually return + // a uniform number in the interval (0,1). For example, since the default + // rand source provides a uniform distribution for Int63, this can be + // converted following the prototypical code of Mersenne Twister 64 (Takuji + // Nishimura and Makoto Matsumoto: + // http://www.math.sci.hiroshima-u.ac.jp/m-mat/MT/VERSIONS/C-LANG/mt19937-64.c) + // + // (float64(rng.Int63()>>11) + 0.5) * (1.0 / 4503599627370496.0) + // + // There are likely many other methods to explore here as well. + + f := r.rng.Float64() + for f == 0 { + f = r.rng.Float64() + } + return f +} + +// Offer accepts the parameters associated with a measurement. The +// parameters will be stored as an exemplar if the Reservoir decides to +// sample the measurement. +// +// The passed ctx needs to contain any baggage or span that were active +// when the measurement was made. This information may be used by the +// Reservoir in making a sampling decision. +// +// The time t is the time when the measurement was made. The v and a +// parameters are the value and dropped (filtered) attributes of the +// measurement respectively. +func (r *FixedSizeReservoir) Offer(ctx context.Context, t time.Time, n Value, a []attribute.KeyValue) { + // The following algorithm is "Algorithm L" from Li, Kim-Hung (4 December + // 1994). "Reservoir-Sampling Algorithms of Time Complexity + // O(n(1+log(N/n)))". ACM Transactions on Mathematical Software. 20 (4): + // 481–493 (https://dl.acm.org/doi/10.1145/198429.198435). + // + // A high-level overview of "Algorithm L": + // 0) Pre-calculate the random count greater than the storage size when + // an exemplar will be replaced. + // 1) Accept all measurements offered until the configured storage size is + // reached. + // 2) Loop: + // a) When the pre-calculate count is reached, replace a random + // existing exemplar with the offered measurement. + // b) Calculate the next random count greater than the existing one + // which will replace another exemplars + // + // The way a "replacement" count is computed is by looking at `n` number of + // independent random numbers each corresponding to an offered measurement. + // Of these numbers the smallest `k` (the same size as the storage + // capacity) of them are kept as a subset. The maximum value in this + // subset, called `w` is used to weight another random number generation + // for the next count that will be considered. + // + // By weighting the next count computation like described, it is able to + // perform a uniformly-weighted sampling algorithm based on the number of + // samples the reservoir has seen so far. The sampling will "slow down" as + // more and more samples are offered so as to reduce a bias towards those + // offered just prior to the end of the collection. + // + // This algorithm is preferred because of its balance of simplicity and + // performance. It will compute three random numbers (the bulk of + // computation time) for each item that becomes part of the reservoir, but + // it does not spend any time on items that do not. In particular it has an + // asymptotic runtime of O(k(1 + log(n/k)) where n is the number of + // measurements offered and k is the reservoir size. + // + // See https://en.wikipedia.org/wiki/Reservoir_sampling for an overview of + // this and other reservoir sampling algorithms. See + // https://github.com/MrAlias/reservoir-sampling for a performance + // comparison of reservoir sampling algorithms. + + if int(r.count) < cap(r.store) { + r.store[r.count] = newMeasurement(ctx, t, n, a) + } else { + if r.count == r.next { + // Overwrite a random existing measurement with the one offered. + idx := int(r.rng.Int63n(int64(cap(r.store)))) + r.store[idx] = newMeasurement(ctx, t, n, a) + r.advance() + } + } + r.count++ +} + +// reset resets r to the initial state. +func (r *FixedSizeReservoir) reset() { + // This resets the number of exemplars known. + r.count = 0 + // Random index inserts should only happen after the storage is full. + r.next = int64(cap(r.store)) + + // Initial random number in the series used to generate r.next. + // + // This is set before r.advance to reset or initialize the random number + // series. Without doing so it would always be 0 or never restart a new + // random number series. + // + // This maps the uniform random number in (0,1) to a geometric distribution + // over the same interval. The mean of the distribution is inversely + // proportional to the storage capacity. + r.w = math.Exp(math.Log(r.randomFloat64()) / float64(cap(r.store))) + + r.advance() +} + +// advance updates the count at which the offered measurement will overwrite an +// existing exemplar. +func (r *FixedSizeReservoir) advance() { + // Calculate the next value in the random number series. + // + // The current value of r.w is based on the max of a distribution of random + // numbers (i.e. `w = max(u_1,u_2,...,u_k)` for `k` equal to the capacity + // of the storage and each `u` in the interval (0,w)). To calculate the + // next r.w we use the fact that when the next exemplar is selected to be + // included in the storage an existing one will be dropped, and the + // corresponding random number in the set used to calculate r.w will also + // be replaced. The replacement random number will also be within (0,w), + // therefore the next r.w will be based on the same distribution (i.e. + // `max(u_1,u_2,...,u_k)`). Therefore, we can sample the next r.w by + // computing the next random number `u` and take r.w as `w * u^(1/k)`. + r.w *= math.Exp(math.Log(r.randomFloat64()) / float64(cap(r.store))) + // Use the new random number in the series to calculate the count of the + // next measurement that will be stored. + // + // Given 0 < r.w < 1, each iteration will result in subsequent r.w being + // smaller. This translates here into the next next being selected against + // a distribution with a higher mean (i.e. the expected value will increase + // and replacements become less likely) + // + // Important to note, the new r.next will always be at least 1 more than + // the last r.next. + r.next += int64(math.Log(r.randomFloat64())/math.Log(1-r.w)) + 1 +} + +// Collect returns all the held exemplars. +// +// The Reservoir state is preserved after this call. +func (r *FixedSizeReservoir) Collect(dest *[]Exemplar) { + r.storage.Collect(dest) + // Call reset here even though it will reset r.count and restart the random + // number series. This will persist any old exemplars as long as no new + // measurements are offered, but it will also prioritize those new + // measurements that are made over the older collection cycle ones. + r.reset() +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/histogram_reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/histogram_reservoir.go new file mode 100644 index 00000000000..3b76cf305a4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/histogram_reservoir.go @@ -0,0 +1,70 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar" + +import ( + "context" + "slices" + "sort" + "time" + + "go.opentelemetry.io/otel/attribute" +) + +// HistogramReservoirProvider is a provider of [HistogramReservoir]. +func HistogramReservoirProvider(bounds []float64) ReservoirProvider { + cp := slices.Clone(bounds) + slices.Sort(cp) + return func(_ attribute.Set) Reservoir { + return NewHistogramReservoir(cp) + } +} + +// NewHistogramReservoir returns a [HistogramReservoir] that samples the last +// measurement that falls within a histogram bucket. The histogram bucket +// upper-boundaries are define by bounds. +// +// The passed bounds must be sorted before calling this function. +func NewHistogramReservoir(bounds []float64) *HistogramReservoir { + return &HistogramReservoir{ + bounds: bounds, + storage: newStorage(len(bounds) + 1), + } +} + +var _ Reservoir = &HistogramReservoir{} + +// HistogramReservoir is a [Reservoir] that samples the last measurement that +// falls within a histogram bucket. The histogram bucket upper-boundaries are +// define by bounds. +type HistogramReservoir struct { + *storage + + // bounds are bucket bounds in ascending order. + bounds []float64 +} + +// Offer accepts the parameters associated with a measurement. The +// parameters will be stored as an exemplar if the Reservoir decides to +// sample the measurement. +// +// The passed ctx needs to contain any baggage or span that were active +// when the measurement was made. This information may be used by the +// Reservoir in making a sampling decision. +// +// The time t is the time when the measurement was made. The v and a +// parameters are the value and dropped (filtered) attributes of the +// measurement respectively. +func (r *HistogramReservoir) Offer(ctx context.Context, t time.Time, v Value, a []attribute.KeyValue) { + var x float64 + switch v.Type() { + case Int64ValueType: + x = float64(v.Int64()) + case Float64ValueType: + x = v.Float64() + default: + panic("unknown value type") + } + r.store[sort.SearchFloat64s(r.bounds, x)] = newMeasurement(ctx, t, v, a) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/reservoir.go new file mode 100644 index 00000000000..ba5cd1a6b3d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/reservoir.go @@ -0,0 +1,40 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar" + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/attribute" +) + +// Reservoir holds the sampled exemplar of measurements made. +type Reservoir interface { + // Offer accepts the parameters associated with a measurement. The + // parameters will be stored as an exemplar if the Reservoir decides to + // sample the measurement. + // + // The passed ctx needs to contain any baggage or span that were active + // when the measurement was made. This information may be used by the + // Reservoir in making a sampling decision. + // + // The time t is the time when the measurement was made. The val and attr + // parameters are the value and dropped (filtered) attributes of the + // measurement respectively. + Offer(ctx context.Context, t time.Time, val Value, attr []attribute.KeyValue) + + // Collect returns all the held exemplars. + // + // The Reservoir state is preserved after this call. + Collect(dest *[]Exemplar) +} + +// ReservoirProvider creates new [Reservoir]s. +// +// The attributes provided are attributes which are kept by the aggregation, and +// are exclusive with attributes passed to Offer. The combination of these +// attributes and the attributes passed to Offer is the complete set of +// attributes a measurement was made with. +type ReservoirProvider func(attr attribute.Set) Reservoir diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/storage.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/storage.go new file mode 100644 index 00000000000..0e2e26dfb18 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/storage.go @@ -0,0 +1,95 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar" + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// storage is an exemplar storage for [Reservoir] implementations. +type storage struct { + // store are the measurements sampled. + // + // This does not use []metricdata.Exemplar because it potentially would + // require an allocation for trace and span IDs in the hot path of Offer. + store []measurement +} + +func newStorage(n int) *storage { + return &storage{store: make([]measurement, n)} +} + +// Collect returns all the held exemplars. +// +// The Reservoir state is preserved after this call. +func (r *storage) Collect(dest *[]Exemplar) { + *dest = reset(*dest, len(r.store), len(r.store)) + var n int + for _, m := range r.store { + if !m.valid { + continue + } + + m.exemplar(&(*dest)[n]) + n++ + } + *dest = (*dest)[:n] +} + +// measurement is a measurement made by a telemetry system. +type measurement struct { + // FilteredAttributes are the attributes dropped during the measurement. + FilteredAttributes []attribute.KeyValue + // Time is the time when the measurement was made. + Time time.Time + // Value is the value of the measurement. + Value Value + // SpanContext is the SpanContext active when a measurement was made. + SpanContext trace.SpanContext + + valid bool +} + +// newMeasurement returns a new non-empty Measurement. +func newMeasurement(ctx context.Context, ts time.Time, v Value, droppedAttr []attribute.KeyValue) measurement { + return measurement{ + FilteredAttributes: droppedAttr, + Time: ts, + Value: v, + SpanContext: trace.SpanContextFromContext(ctx), + valid: true, + } +} + +// exemplar returns m as an [Exemplar]. +func (m measurement) exemplar(dest *Exemplar) { + dest.FilteredAttributes = m.FilteredAttributes + dest.Time = m.Time + dest.Value = m.Value + + if m.SpanContext.HasTraceID() { + traceID := m.SpanContext.TraceID() + dest.TraceID = traceID[:] + } else { + dest.TraceID = dest.TraceID[:0] + } + + if m.SpanContext.HasSpanID() { + spanID := m.SpanContext.SpanID() + dest.SpanID = spanID[:] + } else { + dest.SpanID = dest.SpanID[:0] + } +} + +func reset[T any](s []T, length, capacity int) []T { + if cap(s) < capacity { + return make([]T, length, capacity) + } + return s[:length] +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/value.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/value.go new file mode 100644 index 00000000000..590b089a806 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/value.go @@ -0,0 +1,59 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar" + +import "math" + +// ValueType identifies the type of value used in exemplar data. +type ValueType uint8 + +const ( + // UnknownValueType should not be used. It represents a misconfigured + // Value. + UnknownValueType ValueType = 0 + // Int64ValueType represents a Value with int64 data. + Int64ValueType ValueType = 1 + // Float64ValueType represents a Value with float64 data. + Float64ValueType ValueType = 2 +) + +// Value is the value of data held by an exemplar. +type Value struct { + t ValueType + val uint64 +} + +// NewValue returns a new [Value] for the provided value. +func NewValue[N int64 | float64](value N) Value { + switch v := any(value).(type) { + case int64: + // This can be later converted back to int64 (overflow not checked). + return Value{t: Int64ValueType, val: uint64(v)} // nolint:gosec + case float64: + return Value{t: Float64ValueType, val: math.Float64bits(v)} + } + return Value{} +} + +// Type returns the [ValueType] of data held by v. +func (v Value) Type() ValueType { return v.t } + +// Int64 returns the value of v as an int64. If the ValueType of v is not an +// Int64ValueType, 0 is returned. +func (v Value) Int64() int64 { + if v.t == Int64ValueType { + // Assumes the correct int64 was stored in v.val based on type. + return int64(v.val) // nolint: gosec + } + return 0 +} + +// Float64 returns the value of v as an float64. If the ValueType of v is not +// an Float64ValueType, 0 is returned. +func (v Value) Float64() float64 { + if v.t == Float64ValueType { + return math.Float64frombits(v.val) + } + return 0 +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exporter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exporter.go index 1a3cccb6775..1969cb42cf4 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/exporter.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exporter.go @@ -5,14 +5,14 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric" import ( "context" - "fmt" + "errors" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) // ErrExporterShutdown is returned if Export or Shutdown are called after an // Exporter has been Shutdown. -var ErrExporterShutdown = fmt.Errorf("exporter is shutdown") +var ErrExporterShutdown = errors.New("exporter is shutdown") // Exporter handles the delivery of metric data to external receivers. This is // the final component in the metric push pipeline. diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go b/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go index b52a330b3bc..c33e1a28cb4 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go @@ -16,6 +16,7 @@ import ( "go.opentelemetry.io/otel/metric/embedded" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" + "go.opentelemetry.io/otel/sdk/metric/internal/x" ) var zeroScope instrumentation.Scope @@ -144,6 +145,12 @@ type Stream struct { // Use NewAllowKeysFilter from "go.opentelemetry.io/otel/attribute" to // provide an allow-list of attribute keys here. AttributeFilter attribute.Filter + // ExemplarReservoirProvider selects the + // [go.opentelemetry.io/otel/sdk/metric/exemplar.ReservoirProvider] based + // on the [Aggregation]. + // + // If unspecified, [DefaultExemplarReservoirProviderSelector] is used. + ExemplarReservoirProviderSelector ExemplarReservoirProviderSelector } // instID are the identifying properties of a instrument. @@ -184,6 +191,7 @@ var ( _ metric.Int64UpDownCounter = (*int64Inst)(nil) _ metric.Int64Histogram = (*int64Inst)(nil) _ metric.Int64Gauge = (*int64Inst)(nil) + _ x.EnabledInstrument = (*int64Inst)(nil) ) func (i *int64Inst) Add(ctx context.Context, val int64, opts ...metric.AddOption) { @@ -196,6 +204,10 @@ func (i *int64Inst) Record(ctx context.Context, val int64, opts ...metric.Record i.aggregate(ctx, val, c.Attributes()) } +func (i *int64Inst) Enabled(_ context.Context) bool { + return len(i.measures) != 0 +} + func (i *int64Inst) aggregate(ctx context.Context, val int64, s attribute.Set) { // nolint:revive // okay to shadow pkg with method. for _, in := range i.measures { in(ctx, val, s) @@ -216,6 +228,7 @@ var ( _ metric.Float64UpDownCounter = (*float64Inst)(nil) _ metric.Float64Histogram = (*float64Inst)(nil) _ metric.Float64Gauge = (*float64Inst)(nil) + _ x.EnabledInstrument = (*float64Inst)(nil) ) func (i *float64Inst) Add(ctx context.Context, val float64, opts ...metric.AddOption) { @@ -228,14 +241,18 @@ func (i *float64Inst) Record(ctx context.Context, val float64, opts ...metric.Re i.aggregate(ctx, val, c.Attributes()) } +func (i *float64Inst) Enabled(_ context.Context) bool { + return len(i.measures) != 0 +} + func (i *float64Inst) aggregate(ctx context.Context, val float64, s attribute.Set) { for _, in := range i.measures { in(ctx, val, s) } } -// observablID is a comparable unique identifier of an observable. -type observablID[N int64 | float64] struct { +// observableID is a comparable unique identifier of an observable. +type observableID[N int64 | float64] struct { name string description string kind InstrumentKind @@ -287,7 +304,7 @@ func newInt64Observable(m *meter, kind InstrumentKind, name, desc, u string) int type observable[N int64 | float64] struct { metric.Observable - observablID[N] + observableID[N] meter *meter measures measures[N] @@ -296,7 +313,7 @@ type observable[N int64 | float64] struct { func newObservable[N int64 | float64](m *meter, kind InstrumentKind, name, desc, u string) *observable[N] { return &observable[N]{ - observablID: observablID[N]{ + observableID: observableID[N]{ name: name, description: desc, kind: kind, diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go index b18ee719bd1..fde21933389 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go @@ -8,7 +8,6 @@ import ( "time" "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) @@ -38,8 +37,8 @@ type Builder[N int64 | float64] struct { // create new exemplar reservoirs for a new seen attribute set. // // If this is not provided a default factory function that returns an - // exemplar.Drop reservoir will be used. - ReservoirFunc func() exemplar.FilteredReservoir[N] + // dropReservoir reservoir will be used. + ReservoirFunc func(attribute.Set) FilteredExemplarReservoir[N] // AggregationLimit is the cardinality limit of measurement attributes. Any // measurement for new attributes once the limit has been reached will be // aggregated into a single aggregate for the "otel.metric.overflow" @@ -50,12 +49,12 @@ type Builder[N int64 | float64] struct { AggregationLimit int } -func (b Builder[N]) resFunc() func() exemplar.FilteredReservoir[N] { +func (b Builder[N]) resFunc() func(attribute.Set) FilteredExemplarReservoir[N] { if b.ReservoirFunc != nil { return b.ReservoirFunc } - return exemplar.Drop + return dropReservoir } type fltrMeasure[N int64 | float64] func(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/drop.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/drop.go new file mode 100644 index 00000000000..8396faaa4ae --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/drop.go @@ -0,0 +1,27 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/exemplar" +) + +// dropReservoir returns a [FilteredReservoir] that drops all measurements it is offered. +func dropReservoir[N int64 | float64](attribute.Set) FilteredExemplarReservoir[N] { + return &dropRes[N]{} +} + +type dropRes[N int64 | float64] struct{} + +// Offer does nothing, all measurements offered will be dropped. +func (r *dropRes[N]) Offer(context.Context, N, []attribute.KeyValue) {} + +// Collect resets dest. No exemplars will ever be returned. +func (r *dropRes[N]) Collect(dest *[]exemplar.Exemplar) { + clear(*dest) // Erase elements to let GC collect objects + *dest = (*dest)[:0] +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go index 170ae8e58e2..25d709948e9 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go @@ -6,7 +6,7 @@ package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggreg import ( "sync" - "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + "go.opentelemetry.io/otel/sdk/metric/exemplar" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) @@ -17,6 +17,7 @@ var exemplarPool = sync.Pool{ func collectExemplars[N int64 | float64](out *[]metricdata.Exemplar[N], f func(*[]exemplar.Exemplar)) { dest := exemplarPool.Get().(*[]exemplar.Exemplar) defer func() { + clear(*dest) // Erase elements to let GC collect objects. *dest = (*dest)[:0] exemplarPool.Put(dest) }() diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go index c9c7e8f62a9..336ea91d1bf 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go @@ -12,7 +12,6 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) @@ -31,7 +30,7 @@ const ( // expoHistogramDataPoint is a single data point in an exponential histogram. type expoHistogramDataPoint[N int64 | float64] struct { attrs attribute.Set - res exemplar.FilteredReservoir[N] + res FilteredExemplarReservoir[N] count uint64 min N @@ -42,25 +41,25 @@ type expoHistogramDataPoint[N int64 | float64] struct { noMinMax bool noSum bool - scale int + scale int32 posBuckets expoBuckets negBuckets expoBuckets zeroCount uint64 } -func newExpoHistogramDataPoint[N int64 | float64](attrs attribute.Set, maxSize, maxScale int, noMinMax, noSum bool) *expoHistogramDataPoint[N] { +func newExpoHistogramDataPoint[N int64 | float64](attrs attribute.Set, maxSize int, maxScale int32, noMinMax, noSum bool) *expoHistogramDataPoint[N] { f := math.MaxFloat64 - max := N(f) // if N is int64, max will overflow to -9223372036854775808 - min := N(-f) + ma := N(f) // if N is int64, max will overflow to -9223372036854775808 + mi := N(-f) if N(maxInt64) > N(f) { - max = N(maxInt64) - min = N(minInt64) + ma = N(maxInt64) + mi = N(minInt64) } return &expoHistogramDataPoint[N]{ attrs: attrs, - min: max, - max: min, + min: ma, + max: mi, maxSize: maxSize, noMinMax: noMinMax, noSum: noSum, @@ -119,11 +118,13 @@ func (p *expoHistogramDataPoint[N]) record(v N) { } // getBin returns the bin v should be recorded into. -func (p *expoHistogramDataPoint[N]) getBin(v float64) int { - frac, exp := math.Frexp(v) +func (p *expoHistogramDataPoint[N]) getBin(v float64) int32 { + frac, expInt := math.Frexp(v) + // 11-bit exponential. + exp := int32(expInt) // nolint: gosec if p.scale <= 0 { // Because of the choice of fraction is always 1 power of two higher than we want. - correction := 1 + var correction int32 = 1 if frac == .5 { // If v is an exact power of two the frac will be .5 and the exp // will be one higher than we want. @@ -131,7 +132,7 @@ func (p *expoHistogramDataPoint[N]) getBin(v float64) int { } return (exp - correction) >> (-p.scale) } - return exp<= bin { - low = bin - high = startBin + length - 1 + low = int(bin) + high = int(startBin) + length - 1 } - count := 0 + var count int32 for high-low >= p.maxSize { low = low >> 1 high = high >> 1 @@ -189,39 +190,39 @@ func (p *expoHistogramDataPoint[N]) scaleChange(bin, startBin, length int) int { // expoBuckets is a set of buckets in an exponential histogram. type expoBuckets struct { - startBin int + startBin int32 counts []uint64 } // record increments the count for the given bin, and expands the buckets if needed. // Size changes must be done before calling this function. -func (b *expoBuckets) record(bin int) { +func (b *expoBuckets) record(bin int32) { if len(b.counts) == 0 { b.counts = []uint64{1} b.startBin = bin return } - endBin := b.startBin + len(b.counts) - 1 + endBin := int(b.startBin) + len(b.counts) - 1 // if the new bin is inside the current range - if bin >= b.startBin && bin <= endBin { + if bin >= b.startBin && int(bin) <= endBin { b.counts[bin-b.startBin]++ return } // if the new bin is before the current start add spaces to the counts if bin < b.startBin { origLen := len(b.counts) - newLength := endBin - bin + 1 + newLength := endBin - int(bin) + 1 shift := b.startBin - bin if newLength > cap(b.counts) { b.counts = append(b.counts, make([]uint64, newLength-len(b.counts))...) } - copy(b.counts[shift:origLen+shift], b.counts[:]) + copy(b.counts[shift:origLen+int(shift)], b.counts[:]) b.counts = b.counts[:newLength] - for i := 1; i < shift; i++ { + for i := 1; i < int(shift); i++ { b.counts[i] = 0 } b.startBin = bin @@ -229,17 +230,17 @@ func (b *expoBuckets) record(bin int) { return } // if the new is after the end add spaces to the end - if bin > endBin { - if bin-b.startBin < cap(b.counts) { + if int(bin) > endBin { + if int(bin-b.startBin) < cap(b.counts) { b.counts = b.counts[:bin-b.startBin+1] - for i := endBin + 1 - b.startBin; i < len(b.counts); i++ { + for i := endBin + 1 - int(b.startBin); i < len(b.counts); i++ { b.counts[i] = 0 } b.counts[bin-b.startBin] = 1 return } - end := make([]uint64, bin-b.startBin-len(b.counts)+1) + end := make([]uint64, int(bin-b.startBin)-len(b.counts)+1) b.counts = append(b.counts, end...) b.counts[bin-b.startBin] = 1 } @@ -247,7 +248,7 @@ func (b *expoBuckets) record(bin int) { // downscale shrinks a bucket by a factor of 2*s. It will sum counts into the // correct lower resolution bucket. -func (b *expoBuckets) downscale(delta int) { +func (b *expoBuckets) downscale(delta int32) { // Example // delta = 2 // Original offset: -6 @@ -262,19 +263,19 @@ func (b *expoBuckets) downscale(delta int) { return } - steps := 1 << delta + steps := int32(1) << delta offset := b.startBin % steps offset = (offset + steps) % steps // to make offset positive for i := 1; i < len(b.counts); i++ { - idx := i + offset - if idx%steps == 0 { - b.counts[idx/steps] = b.counts[i] + idx := i + int(offset) + if idx%int(steps) == 0 { + b.counts[idx/int(steps)] = b.counts[i] continue } - b.counts[idx/steps] += b.counts[i] + b.counts[idx/int(steps)] += b.counts[i] } - lastIdx := (len(b.counts) - 1 + offset) / steps + lastIdx := (len(b.counts) - 1 + int(offset)) / int(steps) b.counts = b.counts[:lastIdx+1] b.startBin = b.startBin >> delta } @@ -282,12 +283,12 @@ func (b *expoBuckets) downscale(delta int) { // newExponentialHistogram returns an Aggregator that summarizes a set of // measurements as an exponential histogram. Each histogram is scoped by attributes // and the aggregation cycle the measurements were made in. -func newExponentialHistogram[N int64 | float64](maxSize, maxScale int32, noMinMax, noSum bool, limit int, r func() exemplar.FilteredReservoir[N]) *expoHistogram[N] { +func newExponentialHistogram[N int64 | float64](maxSize, maxScale int32, noMinMax, noSum bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *expoHistogram[N] { return &expoHistogram[N]{ noSum: noSum, noMinMax: noMinMax, maxSize: int(maxSize), - maxScale: int(maxScale), + maxScale: maxScale, newRes: r, limit: newLimiter[*expoHistogramDataPoint[N]](limit), @@ -303,9 +304,9 @@ type expoHistogram[N int64 | float64] struct { noSum bool noMinMax bool maxSize int - maxScale int + maxScale int32 - newRes func() exemplar.FilteredReservoir[N] + newRes func(attribute.Set) FilteredExemplarReservoir[N] limit limiter[*expoHistogramDataPoint[N]] values map[attribute.Distinct]*expoHistogramDataPoint[N] valuesMu sync.Mutex @@ -326,7 +327,7 @@ func (e *expoHistogram[N]) measure(ctx context.Context, value N, fltrAttr attrib v, ok := e.values[attr.Equivalent()] if !ok { v = newExpoHistogramDataPoint[N](attr, e.maxSize, e.maxScale, e.noMinMax, e.noSum) - v.res = e.newRes() + v.res = e.newRes(attr) e.values[attr.Equivalent()] = v } @@ -354,15 +355,15 @@ func (e *expoHistogram[N]) delta(dest *metricdata.Aggregation) int { hDPts[i].StartTime = e.start hDPts[i].Time = t hDPts[i].Count = val.count - hDPts[i].Scale = int32(val.scale) + hDPts[i].Scale = val.scale hDPts[i].ZeroCount = val.zeroCount hDPts[i].ZeroThreshold = 0.0 - hDPts[i].PositiveBucket.Offset = int32(val.posBuckets.startBin) + hDPts[i].PositiveBucket.Offset = val.posBuckets.startBin hDPts[i].PositiveBucket.Counts = reset(hDPts[i].PositiveBucket.Counts, len(val.posBuckets.counts), len(val.posBuckets.counts)) copy(hDPts[i].PositiveBucket.Counts, val.posBuckets.counts) - hDPts[i].NegativeBucket.Offset = int32(val.negBuckets.startBin) + hDPts[i].NegativeBucket.Offset = val.negBuckets.startBin hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(val.negBuckets.counts), len(val.negBuckets.counts)) copy(hDPts[i].NegativeBucket.Counts, val.negBuckets.counts) @@ -407,15 +408,15 @@ func (e *expoHistogram[N]) cumulative(dest *metricdata.Aggregation) int { hDPts[i].StartTime = e.start hDPts[i].Time = t hDPts[i].Count = val.count - hDPts[i].Scale = int32(val.scale) + hDPts[i].Scale = val.scale hDPts[i].ZeroCount = val.zeroCount hDPts[i].ZeroThreshold = 0.0 - hDPts[i].PositiveBucket.Offset = int32(val.posBuckets.startBin) + hDPts[i].PositiveBucket.Offset = val.posBuckets.startBin hDPts[i].PositiveBucket.Counts = reset(hDPts[i].PositiveBucket.Counts, len(val.posBuckets.counts), len(val.posBuckets.counts)) copy(hDPts[i].PositiveBucket.Counts, val.posBuckets.counts) - hDPts[i].NegativeBucket.Offset = int32(val.negBuckets.startBin) + hDPts[i].NegativeBucket.Offset = val.negBuckets.startBin hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(val.negBuckets.counts), len(val.negBuckets.counts)) copy(hDPts[i].NegativeBucket.Counts, val.negBuckets.counts) diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/filtered_reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/filtered_reservoir.go new file mode 100644 index 00000000000..691a910608d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/filtered_reservoir.go @@ -0,0 +1,50 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/exemplar" +) + +// FilteredExemplarReservoir wraps a [exemplar.Reservoir] with a filter. +type FilteredExemplarReservoir[N int64 | float64] interface { + // Offer accepts the parameters associated with a measurement. The + // parameters will be stored as an exemplar if the filter decides to + // sample the measurement. + // + // The passed ctx needs to contain any baggage or span that were active + // when the measurement was made. This information may be used by the + // Reservoir in making a sampling decision. + Offer(ctx context.Context, val N, attr []attribute.KeyValue) + // Collect returns all the held exemplars in the reservoir. + Collect(dest *[]exemplar.Exemplar) +} + +// filteredExemplarReservoir handles the pre-sampled exemplar of measurements made. +type filteredExemplarReservoir[N int64 | float64] struct { + filter exemplar.Filter + reservoir exemplar.Reservoir +} + +// NewFilteredExemplarReservoir creates a [FilteredExemplarReservoir] which only offers values +// that are allowed by the filter. +func NewFilteredExemplarReservoir[N int64 | float64](f exemplar.Filter, r exemplar.Reservoir) FilteredExemplarReservoir[N] { + return &filteredExemplarReservoir[N]{ + filter: f, + reservoir: r, + } +} + +func (f *filteredExemplarReservoir[N]) Offer(ctx context.Context, val N, attr []attribute.KeyValue) { + if f.filter(ctx) { + // only record the current time if we are sampling this measurement. + f.reservoir.Offer(ctx, time.Now(), exemplar.NewValue(val), attr) + } +} + +func (f *filteredExemplarReservoir[N]) Collect(dest *[]exemplar.Exemplar) { f.reservoir.Collect(dest) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go index ade0941f5f5..d577ae2c198 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go @@ -11,13 +11,12 @@ import ( "time" "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) type buckets[N int64 | float64] struct { attrs attribute.Set - res exemplar.FilteredReservoir[N] + res FilteredExemplarReservoir[N] counts []uint64 count uint64 @@ -48,13 +47,13 @@ type histValues[N int64 | float64] struct { noSum bool bounds []float64 - newRes func() exemplar.FilteredReservoir[N] + newRes func(attribute.Set) FilteredExemplarReservoir[N] limit limiter[*buckets[N]] values map[attribute.Distinct]*buckets[N] valuesMu sync.Mutex } -func newHistValues[N int64 | float64](bounds []float64, noSum bool, limit int, r func() exemplar.FilteredReservoir[N]) *histValues[N] { +func newHistValues[N int64 | float64](bounds []float64, noSum bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *histValues[N] { // The responsibility of keeping all buckets correctly associated with the // passed boundaries is ultimately this type's responsibility. Make a copy // here so we can always guarantee this. Or, in the case of failure, have @@ -94,7 +93,7 @@ func (s *histValues[N]) measure(ctx context.Context, value N, fltrAttr attribute // // buckets = (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, +∞) b = newBuckets[N](attr, len(s.bounds)+1) - b.res = s.newRes() + b.res = s.newRes(attr) // Ensure min and max are recorded values (not zero), for new buckets. b.min, b.max = value, value @@ -109,7 +108,7 @@ func (s *histValues[N]) measure(ctx context.Context, value N, fltrAttr attribute // newHistogram returns an Aggregator that summarizes a set of measurements as // an histogram. -func newHistogram[N int64 | float64](boundaries []float64, noMinMax, noSum bool, limit int, r func() exemplar.FilteredReservoir[N]) *histogram[N] { +func newHistogram[N int64 | float64](boundaries []float64, noMinMax, noSum bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *histogram[N] { return &histogram[N]{ histValues: newHistValues[N](boundaries, noSum, limit, r), noMinMax: noMinMax, diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go index c359368403e..d3a93f085c9 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go @@ -9,7 +9,6 @@ import ( "time" "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) @@ -17,10 +16,10 @@ import ( type datapoint[N int64 | float64] struct { attrs attribute.Set value N - res exemplar.FilteredReservoir[N] + res FilteredExemplarReservoir[N] } -func newLastValue[N int64 | float64](limit int, r func() exemplar.FilteredReservoir[N]) *lastValue[N] { +func newLastValue[N int64 | float64](limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *lastValue[N] { return &lastValue[N]{ newRes: r, limit: newLimiter[datapoint[N]](limit), @@ -33,7 +32,7 @@ func newLastValue[N int64 | float64](limit int, r func() exemplar.FilteredReserv type lastValue[N int64 | float64] struct { sync.Mutex - newRes func() exemplar.FilteredReservoir[N] + newRes func(attribute.Set) FilteredExemplarReservoir[N] limit limiter[datapoint[N]] values map[attribute.Distinct]datapoint[N] start time.Time @@ -46,7 +45,7 @@ func (s *lastValue[N]) measure(ctx context.Context, value N, fltrAttr attribute. attr := s.limit.Attributes(fltrAttr, s.values) d, ok := s.values[attr.Equivalent()] if !ok { - d.res = s.newRes() + d.res = s.newRes(attr) } d.attrs = attr @@ -115,7 +114,7 @@ func (s *lastValue[N]) copyDpts(dest *[]metricdata.DataPoint[N], t time.Time) in // newPrecomputedLastValue returns an aggregator that summarizes a set of // observations as the last one made. -func newPrecomputedLastValue[N int64 | float64](limit int, r func() exemplar.FilteredReservoir[N]) *precomputedLastValue[N] { +func newPrecomputedLastValue[N int64 | float64](limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *precomputedLastValue[N] { return &precomputedLastValue[N]{lastValue: newLastValue[N](limit, r)} } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go index 89136692260..8e132ad6181 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go @@ -9,25 +9,24 @@ import ( "time" "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) type sumValue[N int64 | float64] struct { n N - res exemplar.FilteredReservoir[N] + res FilteredExemplarReservoir[N] attrs attribute.Set } // valueMap is the storage for sums. type valueMap[N int64 | float64] struct { sync.Mutex - newRes func() exemplar.FilteredReservoir[N] + newRes func(attribute.Set) FilteredExemplarReservoir[N] limit limiter[sumValue[N]] values map[attribute.Distinct]sumValue[N] } -func newValueMap[N int64 | float64](limit int, r func() exemplar.FilteredReservoir[N]) *valueMap[N] { +func newValueMap[N int64 | float64](limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *valueMap[N] { return &valueMap[N]{ newRes: r, limit: newLimiter[sumValue[N]](limit), @@ -42,7 +41,7 @@ func (s *valueMap[N]) measure(ctx context.Context, value N, fltrAttr attribute.S attr := s.limit.Attributes(fltrAttr, s.values) v, ok := s.values[attr.Equivalent()] if !ok { - v.res = s.newRes() + v.res = s.newRes(attr) } v.attrs = attr @@ -55,7 +54,7 @@ func (s *valueMap[N]) measure(ctx context.Context, value N, fltrAttr attribute.S // newSum returns an aggregator that summarizes a set of measurements as their // arithmetic sum. Each sum is scoped by attributes and the aggregation cycle // the measurements were made in. -func newSum[N int64 | float64](monotonic bool, limit int, r func() exemplar.FilteredReservoir[N]) *sum[N] { +func newSum[N int64 | float64](monotonic bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *sum[N] { return &sum[N]{ valueMap: newValueMap[N](limit, r), monotonic: monotonic, @@ -142,9 +141,9 @@ func (s *sum[N]) cumulative(dest *metricdata.Aggregation) int { } // newPrecomputedSum returns an aggregator that summarizes a set of -// observatrions as their arithmetic sum. Each sum is scoped by attributes and +// observations as their arithmetic sum. Each sum is scoped by attributes and // the aggregation cycle the measurements were made in. -func newPrecomputedSum[N int64 | float64](monotonic bool, limit int, r func() exemplar.FilteredReservoir[N]) *precomputedSum[N] { +func newPrecomputedSum[N int64 | float64](monotonic bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *precomputedSum[N] { return &precomputedSum[N]{ valueMap: newValueMap[N](limit, r), monotonic: monotonic, @@ -152,7 +151,7 @@ func newPrecomputedSum[N int64 | float64](monotonic bool, limit int, r func() ex } } -// precomputedSum summarizes a set of observatrions as their arithmetic sum. +// precomputedSum summarizes a set of observations as their arithmetic sum. type precomputedSum[N int64 | float64] struct { *valueMap[N] diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/doc.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/doc.go deleted file mode 100644 index 5394f48e0df..00000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package exemplar provides an implementation of the OpenTelemetry exemplar -// reservoir to be used in metric collection pipelines. -package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go deleted file mode 100644 index 5a0f39ae147..00000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" - -import ( - "context" - - "go.opentelemetry.io/otel/attribute" -) - -// Drop returns a [FilteredReservoir] that drops all measurements it is offered. -func Drop[N int64 | float64]() FilteredReservoir[N] { return &dropRes[N]{} } - -type dropRes[N int64 | float64] struct{} - -// Offer does nothing, all measurements offered will be dropped. -func (r *dropRes[N]) Offer(context.Context, N, []attribute.KeyValue) {} - -// Collect resets dest. No exemplars will ever be returned. -func (r *dropRes[N]) Collect(dest *[]Exemplar) { - *dest = (*dest)[:0] -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filtered_reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filtered_reservoir.go deleted file mode 100644 index 9fedfa4be68..00000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filtered_reservoir.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" - -import ( - "context" - "time" - - "go.opentelemetry.io/otel/attribute" -) - -// FilteredReservoir wraps a [Reservoir] with a filter. -type FilteredReservoir[N int64 | float64] interface { - // Offer accepts the parameters associated with a measurement. The - // parameters will be stored as an exemplar if the filter decides to - // sample the measurement. - // - // The passed ctx needs to contain any baggage or span that were active - // when the measurement was made. This information may be used by the - // Reservoir in making a sampling decision. - Offer(ctx context.Context, val N, attr []attribute.KeyValue) - // Collect returns all the held exemplars in the reservoir. - Collect(dest *[]Exemplar) -} - -// filteredReservoir handles the pre-sampled exemplar of measurements made. -type filteredReservoir[N int64 | float64] struct { - filter Filter - reservoir Reservoir -} - -// NewFilteredReservoir creates a [FilteredReservoir] which only offers values -// that are allowed by the filter. -func NewFilteredReservoir[N int64 | float64](f Filter, r Reservoir) FilteredReservoir[N] { - return &filteredReservoir[N]{ - filter: f, - reservoir: r, - } -} - -func (f *filteredReservoir[N]) Offer(ctx context.Context, val N, attr []attribute.KeyValue) { - if f.filter(ctx) { - // only record the current time if we are sampling this measurment. - f.reservoir.Offer(ctx, time.Now(), NewValue(val), attr) - } -} - -func (f *filteredReservoir[N]) Collect(dest *[]Exemplar) { f.reservoir.Collect(dest) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go deleted file mode 100644 index a6ff86d0271..00000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" - -import ( - "context" - "slices" - "sort" - "time" - - "go.opentelemetry.io/otel/attribute" -) - -// Histogram returns a [Reservoir] that samples the last measurement that falls -// within a histogram bucket. The histogram bucket upper-boundaries are define -// by bounds. -// -// The passed bounds will be sorted by this function. -func Histogram(bounds []float64) Reservoir { - slices.Sort(bounds) - return &histRes{ - bounds: bounds, - storage: newStorage(len(bounds) + 1), - } -} - -type histRes struct { - *storage - - // bounds are bucket bounds in ascending order. - bounds []float64 -} - -func (r *histRes) Offer(ctx context.Context, t time.Time, v Value, a []attribute.KeyValue) { - var x float64 - switch v.Type() { - case Int64ValueType: - x = float64(v.Int64()) - case Float64ValueType: - x = v.Float64() - default: - panic("unknown value type") - } - r.store[sort.SearchFloat64s(r.bounds, x)] = newMeasurement(ctx, t, v, a) -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go deleted file mode 100644 index 199a2608f71..00000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go +++ /dev/null @@ -1,191 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" - -import ( - "context" - "math" - "math/rand" - "sync" - "time" - - "go.opentelemetry.io/otel/attribute" -) - -var ( - // rng is used to make sampling decisions. - // - // Do not use crypto/rand. There is no reason for the decrease in performance - // given this is not a security sensitive decision. - rng = rand.New(rand.NewSource(time.Now().UnixNano())) - // Ensure concurrent safe accecess to rng and its underlying source. - rngMu sync.Mutex -) - -// random returns, as a float64, a uniform pseudo-random number in the open -// interval (0.0,1.0). -func random() float64 { - // TODO: This does not return a uniform number. rng.Float64 returns a - // uniformly random int in [0,2^53) that is divided by 2^53. Meaning it - // returns multiples of 2^-53, and not all floating point numbers between 0 - // and 1 (i.e. for values less than 2^-4 the 4 last bits of the significand - // are always going to be 0). - // - // An alternative algorithm should be considered that will actually return - // a uniform number in the interval (0,1). For example, since the default - // rand source provides a uniform distribution for Int63, this can be - // converted following the prototypical code of Mersenne Twister 64 (Takuji - // Nishimura and Makoto Matsumoto: - // http://www.math.sci.hiroshima-u.ac.jp/m-mat/MT/VERSIONS/C-LANG/mt19937-64.c) - // - // (float64(rng.Int63()>>11) + 0.5) * (1.0 / 4503599627370496.0) - // - // There are likely many other methods to explore here as well. - - rngMu.Lock() - defer rngMu.Unlock() - - f := rng.Float64() - for f == 0 { - f = rng.Float64() - } - return f -} - -// FixedSize returns a [Reservoir] that samples at most k exemplars. If there -// are k or less measurements made, the Reservoir will sample each one. If -// there are more than k, the Reservoir will then randomly sample all -// additional measurement with a decreasing probability. -func FixedSize(k int) Reservoir { - r := &randRes{storage: newStorage(k)} - r.reset() - return r -} - -type randRes struct { - *storage - - // count is the number of measurement seen. - count int64 - // next is the next count that will store a measurement at a random index - // once the reservoir has been filled. - next int64 - // w is the largest random number in a distribution that is used to compute - // the next next. - w float64 -} - -func (r *randRes) Offer(ctx context.Context, t time.Time, n Value, a []attribute.KeyValue) { - // The following algorithm is "Algorithm L" from Li, Kim-Hung (4 December - // 1994). "Reservoir-Sampling Algorithms of Time Complexity - // O(n(1+log(N/n)))". ACM Transactions on Mathematical Software. 20 (4): - // 481–493 (https://dl.acm.org/doi/10.1145/198429.198435). - // - // A high-level overview of "Algorithm L": - // 0) Pre-calculate the random count greater than the storage size when - // an exemplar will be replaced. - // 1) Accept all measurements offered until the configured storage size is - // reached. - // 2) Loop: - // a) When the pre-calculate count is reached, replace a random - // existing exemplar with the offered measurement. - // b) Calculate the next random count greater than the existing one - // which will replace another exemplars - // - // The way a "replacement" count is computed is by looking at `n` number of - // independent random numbers each corresponding to an offered measurement. - // Of these numbers the smallest `k` (the same size as the storage - // capacity) of them are kept as a subset. The maximum value in this - // subset, called `w` is used to weight another random number generation - // for the next count that will be considered. - // - // By weighting the next count computation like described, it is able to - // perform a uniformly-weighted sampling algorithm based on the number of - // samples the reservoir has seen so far. The sampling will "slow down" as - // more and more samples are offered so as to reduce a bias towards those - // offered just prior to the end of the collection. - // - // This algorithm is preferred because of its balance of simplicity and - // performance. It will compute three random numbers (the bulk of - // computation time) for each item that becomes part of the reservoir, but - // it does not spend any time on items that do not. In particular it has an - // asymptotic runtime of O(k(1 + log(n/k)) where n is the number of - // measurements offered and k is the reservoir size. - // - // See https://en.wikipedia.org/wiki/Reservoir_sampling for an overview of - // this and other reservoir sampling algorithms. See - // https://github.com/MrAlias/reservoir-sampling for a performance - // comparison of reservoir sampling algorithms. - - if int(r.count) < cap(r.store) { - r.store[r.count] = newMeasurement(ctx, t, n, a) - } else { - if r.count == r.next { - // Overwrite a random existing measurement with the one offered. - idx := int(rng.Int63n(int64(cap(r.store)))) - r.store[idx] = newMeasurement(ctx, t, n, a) - r.advance() - } - } - r.count++ -} - -// reset resets r to the initial state. -func (r *randRes) reset() { - // This resets the number of exemplars known. - r.count = 0 - // Random index inserts should only happen after the storage is full. - r.next = int64(cap(r.store)) - - // Initial random number in the series used to generate r.next. - // - // This is set before r.advance to reset or initialize the random number - // series. Without doing so it would always be 0 or never restart a new - // random number series. - // - // This maps the uniform random number in (0,1) to a geometric distribution - // over the same interval. The mean of the distribution is inversely - // proportional to the storage capacity. - r.w = math.Exp(math.Log(random()) / float64(cap(r.store))) - - r.advance() -} - -// advance updates the count at which the offered measurement will overwrite an -// existing exemplar. -func (r *randRes) advance() { - // Calculate the next value in the random number series. - // - // The current value of r.w is based on the max of a distribution of random - // numbers (i.e. `w = max(u_1,u_2,...,u_k)` for `k` equal to the capacity - // of the storage and each `u` in the interval (0,w)). To calculate the - // next r.w we use the fact that when the next exemplar is selected to be - // included in the storage an existing one will be dropped, and the - // corresponding random number in the set used to calculate r.w will also - // be replaced. The replacement random number will also be within (0,w), - // therefore the next r.w will be based on the same distribution (i.e. - // `max(u_1,u_2,...,u_k)`). Therefore, we can sample the next r.w by - // computing the next random number `u` and take r.w as `w * u^(1/k)`. - r.w *= math.Exp(math.Log(random()) / float64(cap(r.store))) - // Use the new random number in the series to calculate the count of the - // next measurement that will be stored. - // - // Given 0 < r.w < 1, each iteration will result in subsequent r.w being - // smaller. This translates here into the next next being selected against - // a distribution with a higher mean (i.e. the expected value will increase - // and replacements become less likely) - // - // Important to note, the new r.next will always be at least 1 more than - // the last r.next. - r.next += int64(math.Log(random())/math.Log(1-r.w)) + 1 -} - -func (r *randRes) Collect(dest *[]Exemplar) { - r.storage.Collect(dest) - // Call reset here even though it will reset r.count and restart the random - // number series. This will persist any old exemplars as long as no new - // measurements are offered, but it will also prioritize those new - // measurements that are made over the older collection cycle ones. - r.reset() -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go deleted file mode 100644 index 80fa59554f2..00000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" - -import ( - "context" - "time" - - "go.opentelemetry.io/otel/attribute" -) - -// Reservoir holds the sampled exemplar of measurements made. -type Reservoir interface { - // Offer accepts the parameters associated with a measurement. The - // parameters will be stored as an exemplar if the Reservoir decides to - // sample the measurement. - // - // The passed ctx needs to contain any baggage or span that were active - // when the measurement was made. This information may be used by the - // Reservoir in making a sampling decision. - // - // The time t is the time when the measurement was made. The val and attr - // parameters are the value and dropped (filtered) attributes of the - // measurement respectively. - Offer(ctx context.Context, t time.Time, val Value, attr []attribute.KeyValue) - - // Collect returns all the held exemplars. - // - // The Reservoir state is preserved after this call. - Collect(dest *[]Exemplar) -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go deleted file mode 100644 index 10b2976f796..00000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" - -import ( - "context" - "time" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -// storage is an exemplar storage for [Reservoir] implementations. -type storage struct { - // store are the measurements sampled. - // - // This does not use []metricdata.Exemplar because it potentially would - // require an allocation for trace and span IDs in the hot path of Offer. - store []measurement -} - -func newStorage(n int) *storage { - return &storage{store: make([]measurement, n)} -} - -// Collect returns all the held exemplars. -// -// The Reservoir state is preserved after this call. -func (r *storage) Collect(dest *[]Exemplar) { - *dest = reset(*dest, len(r.store), len(r.store)) - var n int - for _, m := range r.store { - if !m.valid { - continue - } - - m.Exemplar(&(*dest)[n]) - n++ - } - *dest = (*dest)[:n] -} - -// measurement is a measurement made by a telemetry system. -type measurement struct { - // FilteredAttributes are the attributes dropped during the measurement. - FilteredAttributes []attribute.KeyValue - // Time is the time when the measurement was made. - Time time.Time - // Value is the value of the measurement. - Value Value - // SpanContext is the SpanContext active when a measurement was made. - SpanContext trace.SpanContext - - valid bool -} - -// newMeasurement returns a new non-empty Measurement. -func newMeasurement(ctx context.Context, ts time.Time, v Value, droppedAttr []attribute.KeyValue) measurement { - return measurement{ - FilteredAttributes: droppedAttr, - Time: ts, - Value: v, - SpanContext: trace.SpanContextFromContext(ctx), - valid: true, - } -} - -// Exemplar returns m as an [Exemplar]. -func (m measurement) Exemplar(dest *Exemplar) { - dest.FilteredAttributes = m.FilteredAttributes - dest.Time = m.Time - dest.Value = m.Value - - if m.SpanContext.HasTraceID() { - traceID := m.SpanContext.TraceID() - dest.TraceID = traceID[:] - } else { - dest.TraceID = dest.TraceID[:0] - } - - if m.SpanContext.HasSpanID() { - spanID := m.SpanContext.SpanID() - dest.SpanID = spanID[:] - } else { - dest.SpanID = dest.SpanID[:0] - } -} - -func reset[T any](s []T, length, capacity int) []T { - if cap(s) < capacity { - return make([]T, length, capacity) - } - return s[:length] -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/value.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/value.go deleted file mode 100644 index 9daf27dc006..00000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/value.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" - -import "math" - -// ValueType identifies the type of value used in exemplar data. -type ValueType uint8 - -const ( - // UnknownValueType should not be used. It represents a misconfigured - // Value. - UnknownValueType ValueType = 0 - // Int64ValueType represents a Value with int64 data. - Int64ValueType ValueType = 1 - // Float64ValueType represents a Value with float64 data. - Float64ValueType ValueType = 2 -) - -// Value is the value of data held by an exemplar. -type Value struct { - t ValueType - val uint64 -} - -// NewValue returns a new [Value] for the provided value. -func NewValue[N int64 | float64](value N) Value { - switch v := any(value).(type) { - case int64: - return Value{t: Int64ValueType, val: uint64(v)} - case float64: - return Value{t: Float64ValueType, val: math.Float64bits(v)} - } - return Value{} -} - -// Type returns the [ValueType] of data held by v. -func (v Value) Type() ValueType { return v.t } - -// Int64 returns the value of v as an int64. If the ValueType of v is not an -// Int64ValueType, 0 is returned. -func (v Value) Int64() int64 { - if v.t == Int64ValueType { - return int64(v.val) - } - return 0 -} - -// Float64 returns the value of v as an float64. If the ValueType of v is not -// an Float64ValueType, 0 is returned. -func (v Value) Float64() float64 { - if v.t == Float64ValueType { - return math.Float64frombits(v.val) - } - return 0 -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md index aba69d65471..59f736b733f 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md @@ -10,6 +10,7 @@ See the [Compatibility and Stability](#compatibility-and-stability) section for - [Cardinality Limit](#cardinality-limit) - [Exemplars](#exemplars) +- [Instrument Enabled](#instrument-enabled) ### Cardinality Limit @@ -102,6 +103,24 @@ Revert to the default exemplar filter (`"trace_based"`) unset OTEL_METRICS_EXEMPLAR_FILTER ``` +### Instrument Enabled + +To help users avoid performing computationally expensive operations when recording measurements, synchronous instruments provide an `Enabled` method. + +#### Examples + +The following code shows an example of how to check if an instrument implements the `EnabledInstrument` interface before using the `Enabled` function to avoid doing an expensive computation: + +```go +type enabledInstrument interface { Enabled(context.Context) bool } + +ctr, err := m.Int64Counter("expensive-counter") +c, ok := ctr.(enabledInstrument) +if !ok || c.Enabled(context.Background()) { + c.Add(expensiveComputation()) +} +``` + ## Compatibility and Stability Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../../VERSIONING.md). diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go index 8cd2f37417b..a98606238ad 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go @@ -8,41 +8,26 @@ package x // import "go.opentelemetry.io/otel/sdk/metric/internal/x" import ( + "context" "os" "strconv" - "strings" ) -var ( - // Exemplars is an experimental feature flag that defines if exemplars - // should be recorded for metric data-points. - // - // To enable this feature set the OTEL_GO_X_EXEMPLAR environment variable - // to the case-insensitive string value of "true" (i.e. "True" and "TRUE" - // will also enable this). - Exemplars = newFeature("EXEMPLAR", func(v string) (string, bool) { - if strings.ToLower(v) == "true" { - return v, true - } - return "", false - }) - - // CardinalityLimit is an experimental feature flag that defines if - // cardinality limits should be applied to the recorded metric data-points. - // - // To enable this feature set the OTEL_GO_X_CARDINALITY_LIMIT environment - // variable to the integer limit value you want to use. - // - // Setting OTEL_GO_X_CARDINALITY_LIMIT to a value less than or equal to 0 - // will disable the cardinality limits. - CardinalityLimit = newFeature("CARDINALITY_LIMIT", func(v string) (int, bool) { - n, err := strconv.Atoi(v) - if err != nil { - return 0, false - } - return n, true - }) -) +// CardinalityLimit is an experimental feature flag that defines if +// cardinality limits should be applied to the recorded metric data-points. +// +// To enable this feature set the OTEL_GO_X_CARDINALITY_LIMIT environment +// variable to the integer limit value you want to use. +// +// Setting OTEL_GO_X_CARDINALITY_LIMIT to a value less than or equal to 0 +// will disable the cardinality limits. +var CardinalityLimit = newFeature("CARDINALITY_LIMIT", func(v string) (int, bool) { + n, err := strconv.Atoi(v) + if err != nil { + return 0, false + } + return n, true +}) // Feature is an experimental feature control flag. It provides a uniform way // to interact with these feature flags and parse their values. @@ -83,3 +68,14 @@ func (f Feature[T]) Enabled() bool { _, ok := f.Lookup() return ok } + +// EnabledInstrument informs whether the instrument is enabled. +// +// EnabledInstrument interface is implemented by synchronous instruments. +type EnabledInstrument interface { + // Enabled returns whether the instrument will process measurements for the given context. + // + // This function can be used in places where measuring an instrument + // would result in computationally expensive operations. + Enabled(context.Context) bool +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go index e0fd86ca78d..c495985bc28 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go @@ -113,18 +113,17 @@ func (mr *ManualReader) Collect(ctx context.Context, rm *metricdata.ResourceMetr if err != nil { return err } - var errs []error for _, producer := range mr.externalProducers.Load().([]Producer) { - externalMetrics, err := producer.Produce(ctx) - if err != nil { - errs = append(errs, err) + externalMetrics, e := producer.Produce(ctx) + if e != nil { + err = errors.Join(err, e) } rm.ScopeMetrics = append(rm.ScopeMetrics, externalMetrics...) } global.Debug("ManualReader collection", "Data", rm) - return unifyErrors(errs) + return err } // MarshalLog returns logging data about the ManualReader. diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go index 479b7610eb1..a6ccd117b80 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go @@ -150,6 +150,11 @@ func (m *meter) int64ObservableInstrument(id Instrument, callbacks []metric.Int6 continue } inst.appendMeasures(in) + + // Add the measures to the pipeline. It is required to maintain + // measures per pipeline to avoid calling the measure that + // is not part of the pipeline. + insert.pipeline.addInt64Measure(inst.observableID, in) for _, cback := range callbacks { inst := int64Observer{measures: in} fn := cback @@ -185,6 +190,11 @@ func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64Obser // configured with options. The instrument is used to asynchronously record // int64 measurements once per a measurement collection cycle. Only the // measurements recorded during the collection cycle are exported. +// +// If Int64ObservableUpDownCounter is invoked repeatedly with the same Name, +// Description, and Unit, only the first set of callbacks provided are used. +// Use meter.RegisterCallback and Registration.Unregister to manage callbacks +// if instrumentation can be created multiple times with different callbacks. func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { cfg := metric.NewInt64ObservableUpDownCounterConfig(options...) id := Instrument{ @@ -201,6 +211,11 @@ func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int6 // configured with options. The instrument is used to asynchronously record // instantaneous int64 measurements once per a measurement collection cycle. // Only the measurements recorded during the collection cycle are exported. +// +// If Int64ObservableGauge is invoked repeatedly with the same Name, +// Description, and Unit, only the first set of callbacks provided are used. +// Use meter.RegisterCallback and Registration.Unregister to manage callbacks +// if instrumentation can be created multiple times with different callbacks. func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { cfg := metric.NewInt64ObservableGaugeConfig(options...) id := Instrument{ @@ -299,6 +314,11 @@ func (m *meter) float64ObservableInstrument(id Instrument, callbacks []metric.Fl continue } inst.appendMeasures(in) + + // Add the measures to the pipeline. It is required to maintain + // measures per pipeline to avoid calling the measure that + // is not part of the pipeline. + insert.pipeline.addFloat64Measure(inst.observableID, in) for _, cback := range callbacks { inst := float64Observer{measures: in} fn := cback @@ -334,6 +354,11 @@ func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64O // and configured with options. The instrument is used to asynchronously record // float64 measurements once per a measurement collection cycle. Only the // measurements recorded during the collection cycle are exported. +// +// If Float64ObservableUpDownCounter is invoked repeatedly with the same Name, +// Description, and Unit, only the first set of callbacks provided are used. +// Use meter.RegisterCallback and Registration.Unregister to manage callbacks +// if instrumentation can be created multiple times with different callbacks. func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...) id := Instrument{ @@ -350,6 +375,11 @@ func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Fl // configured with options. The instrument is used to asynchronously record // instantaneous float64 measurements once per a measurement collection cycle. // Only the measurements recorded during the collection cycle are exported. +// +// If Float64ObservableGauge is invoked repeatedly with the same Name, +// Description, and Unit, only the first set of callbacks provided are used. +// Use meter.RegisterCallback and Registration.Unregister to manage callbacks +// if instrumentation can be created multiple times with different callbacks. func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { cfg := metric.NewFloat64ObservableGaugeConfig(options...) id := Instrument{ @@ -421,73 +451,80 @@ func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) return noopRegister{}, nil } - reg := newObserver() - var errs multierror + var err error + validInstruments := make([]metric.Observable, 0, len(insts)) for _, inst := range insts { - // Unwrap any global. - if u, ok := inst.(interface { - Unwrap() metric.Observable - }); ok { - inst = u.Unwrap() - } - switch o := inst.(type) { case int64Observable: - if err := o.registerable(m); err != nil { - if !errors.Is(err, errEmptyAgg) { - errs.append(err) + if e := o.registerable(m); e != nil { + if !errors.Is(e, errEmptyAgg) { + err = errors.Join(err, e) } continue } - reg.registerInt64(o.observablID) + + validInstruments = append(validInstruments, inst) case float64Observable: - if err := o.registerable(m); err != nil { - if !errors.Is(err, errEmptyAgg) { - errs.append(err) + if e := o.registerable(m); e != nil { + if !errors.Is(e, errEmptyAgg) { + err = errors.Join(err, e) } continue } - reg.registerFloat64(o.observablID) + + validInstruments = append(validInstruments, inst) default: // Instrument external to the SDK. - return nil, fmt.Errorf("invalid observable: from different implementation") + return nil, errors.New("invalid observable: from different implementation") } } - err := errs.errorOrNil() - if reg.len() == 0 { + if len(validInstruments) == 0 { // All insts use drop aggregation or are invalid. return noopRegister{}, err } - // Some or all instruments were valid. - cback := func(ctx context.Context) error { return f(ctx, reg) } - return m.pipes.registerMultiCallback(cback), err + unregs := make([]func(), len(m.pipes)) + for ix, pipe := range m.pipes { + reg := newObserver(pipe) + for _, inst := range validInstruments { + switch o := inst.(type) { + case int64Observable: + reg.registerInt64(o.observableID) + case float64Observable: + reg.registerFloat64(o.observableID) + } + } + + // Some or all instruments were valid. + cBack := func(ctx context.Context) error { return f(ctx, reg) } + unregs[ix] = pipe.addMultiCallback(cBack) + } + + return unregisterFuncs{f: unregs}, err } type observer struct { embedded.Observer - float64 map[observablID[float64]]struct{} - int64 map[observablID[int64]]struct{} + pipe *pipeline + float64 map[observableID[float64]]struct{} + int64 map[observableID[int64]]struct{} } -func newObserver() observer { +func newObserver(p *pipeline) observer { return observer{ - float64: make(map[observablID[float64]]struct{}), - int64: make(map[observablID[int64]]struct{}), + pipe: p, + float64: make(map[observableID[float64]]struct{}), + int64: make(map[observableID[int64]]struct{}), } } -func (r observer) len() int { - return len(r.float64) + len(r.int64) -} - -func (r observer) registerFloat64(id observablID[float64]) { +func (r observer) registerFloat64(id observableID[float64]) { r.float64[id] = struct{}{} } -func (r observer) registerInt64(id observablID[int64]) { +func (r observer) registerInt64(id observableID[int64]) { r.int64[id] = struct{}{} } @@ -501,22 +538,12 @@ func (r observer) ObserveFloat64(o metric.Float64Observable, v float64, opts ... switch conv := o.(type) { case float64Observable: oImpl = conv - case interface { - Unwrap() metric.Observable - }: - // Unwrap any global. - async := conv.Unwrap() - var ok bool - if oImpl, ok = async.(float64Observable); !ok { - global.Error(errUnknownObserver, "failed to record asynchronous") - return - } default: global.Error(errUnknownObserver, "failed to record") return } - if _, registered := r.float64[oImpl.observablID]; !registered { + if _, registered := r.float64[oImpl.observableID]; !registered { if !oImpl.dropAggregation { global.Error(errUnregObserver, "failed to record", "name", oImpl.name, @@ -528,7 +555,12 @@ func (r observer) ObserveFloat64(o metric.Float64Observable, v float64, opts ... return } c := metric.NewObserveConfig(opts) - oImpl.observe(v, c.Attributes()) + // Access to r.pipe.float64Measure is already guarded by a lock in pipeline.produce. + // TODO (#5946): Refactor pipeline and observable measures. + measures := r.pipe.float64Measures[oImpl.observableID] + for _, m := range measures { + m(context.Background(), v, c.Attributes()) + } } func (r observer) ObserveInt64(o metric.Int64Observable, v int64, opts ...metric.ObserveOption) { @@ -536,22 +568,12 @@ func (r observer) ObserveInt64(o metric.Int64Observable, v int64, opts ...metric switch conv := o.(type) { case int64Observable: oImpl = conv - case interface { - Unwrap() metric.Observable - }: - // Unwrap any global. - async := conv.Unwrap() - var ok bool - if oImpl, ok = async.(int64Observable); !ok { - global.Error(errUnknownObserver, "failed to record asynchronous") - return - } default: global.Error(errUnknownObserver, "failed to record") return } - if _, registered := r.int64[oImpl.observablID]; !registered { + if _, registered := r.int64[oImpl.observableID]; !registered { if !oImpl.dropAggregation { global.Error(errUnregObserver, "failed to record", "name", oImpl.name, @@ -563,7 +585,12 @@ func (r observer) ObserveInt64(o metric.Int64Observable, v int64, opts ...metric return } c := metric.NewObserveConfig(opts) - oImpl.observe(v, c.Attributes()) + // Access to r.pipe.int64Measures is already guarded b a lock in pipeline.produce. + // TODO (#5946): Refactor pipeline and observable measures. + measures := r.pipe.int64Measures[oImpl.observableID] + for _, m := range measures { + m(context.Background(), v, c.Attributes()) + } } type noopRegister struct{ embedded.Registration } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go index 67ee1b11a2e..dcd2182d9a1 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go @@ -251,18 +251,17 @@ func (r *PeriodicReader) collect(ctx context.Context, p interface{}, rm *metricd if err != nil { return err } - var errs []error for _, producer := range r.externalProducers.Load().([]Producer) { - externalMetrics, err := producer.Produce(ctx) - if err != nil { - errs = append(errs, err) + externalMetrics, e := producer.Produce(ctx) + if e != nil { + err = errors.Join(err, e) } rm.ScopeMetrics = append(rm.ScopeMetrics, externalMetrics...) } global.Debug("PeriodicReader collection", "Data", rm) - return unifyErrors(errs) + return err } // export exports metric data m using r's exporter. diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go index 823bf2fe3d2..775e2452619 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go @@ -8,14 +8,13 @@ import ( "context" "errors" "fmt" - "strings" "sync" "sync/atomic" "go.opentelemetry.io/otel/internal/global" - "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/embedded" "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/metric/exemplar" "go.opentelemetry.io/otel/sdk/metric/internal" "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" "go.opentelemetry.io/otel/sdk/metric/internal/x" @@ -38,14 +37,17 @@ type instrumentSync struct { compAgg aggregate.ComputeAggregation } -func newPipeline(res *resource.Resource, reader Reader, views []View) *pipeline { +func newPipeline(res *resource.Resource, reader Reader, views []View, exemplarFilter exemplar.Filter) *pipeline { if res == nil { res = resource.Empty() } return &pipeline{ - resource: res, - reader: reader, - views: views, + resource: res, + reader: reader, + views: views, + int64Measures: map[observableID[int64]][]aggregate.Measure[int64]{}, + float64Measures: map[observableID[float64]][]aggregate.Measure[float64]{}, + exemplarFilter: exemplarFilter, // aggregations is lazy allocated when needed. } } @@ -63,9 +65,26 @@ type pipeline struct { views []View sync.Mutex - aggregations map[instrumentation.Scope][]instrumentSync - callbacks []func(context.Context) error - multiCallbacks list.List + int64Measures map[observableID[int64]][]aggregate.Measure[int64] + float64Measures map[observableID[float64]][]aggregate.Measure[float64] + aggregations map[instrumentation.Scope][]instrumentSync + callbacks []func(context.Context) error + multiCallbacks list.List + exemplarFilter exemplar.Filter +} + +// addInt64Measure adds a new int64 measure to the pipeline for each observer. +func (p *pipeline) addInt64Measure(id observableID[int64], m []aggregate.Measure[int64]) { + p.Lock() + defer p.Unlock() + p.int64Measures[id] = m +} + +// addFloat64Measure adds a new float64 measure to the pipeline for each observer. +func (p *pipeline) addFloat64Measure(id observableID[float64], m []aggregate.Measure[float64]) { + p.Lock() + defer p.Unlock() + p.float64Measures[id] = m } // addSync adds the instrumentSync to pipeline p with scope. This method is not @@ -105,14 +124,15 @@ func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics) p.Lock() defer p.Unlock() - var errs multierror + var err error for _, c := range p.callbacks { // TODO make the callbacks parallel. ( #3034 ) - if err := c(ctx); err != nil { - errs.append(err) + if e := c(ctx); e != nil { + err = errors.Join(err, e) } if err := ctx.Err(); err != nil { rm.Resource = nil + clear(rm.ScopeMetrics) // Erase elements to let GC collect objects. rm.ScopeMetrics = rm.ScopeMetrics[:0] return err } @@ -120,12 +140,13 @@ func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics) for e := p.multiCallbacks.Front(); e != nil; e = e.Next() { // TODO make the callbacks parallel. ( #3034 ) f := e.Value.(multiCallback) - if err := f(ctx); err != nil { - errs.append(err) + if e := f(ctx); e != nil { + err = errors.Join(err, e) } if err := ctx.Err(); err != nil { // This means the context expired before we finished running callbacks. rm.Resource = nil + clear(rm.ScopeMetrics) // Erase elements to let GC collect objects. rm.ScopeMetrics = rm.ScopeMetrics[:0] return err } @@ -157,7 +178,7 @@ func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics) rm.ScopeMetrics = rm.ScopeMetrics[:i] - return errs.errorOrNil() + return err } // inserter facilitates inserting of new instruments from a single scope into a @@ -219,7 +240,7 @@ func (i *inserter[N]) Instrument(inst Instrument, readerAggregation Aggregation) measures []aggregate.Measure[N] ) - errs := &multierror{wrapped: errCreatingAggregators} + var err error seen := make(map[uint64]struct{}) for _, v := range i.pipeline.views { stream, match := v(inst) @@ -227,9 +248,9 @@ func (i *inserter[N]) Instrument(inst Instrument, readerAggregation Aggregation) continue } matched = true - in, id, err := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation) - if err != nil { - errs.append(err) + in, id, e := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation) + if e != nil { + err = errors.Join(err, e) } if in == nil { // Drop aggregation. continue @@ -242,8 +263,12 @@ func (i *inserter[N]) Instrument(inst Instrument, readerAggregation Aggregation) measures = append(measures, in) } + if err != nil { + err = errors.Join(errCreatingAggregators, err) + } + if matched { - return measures, errs.errorOrNil() + return measures, err } // Apply implicit default view if no explicit matched. @@ -252,15 +277,18 @@ func (i *inserter[N]) Instrument(inst Instrument, readerAggregation Aggregation) Description: inst.Description, Unit: inst.Unit, } - in, _, err := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation) - if err != nil { - errs.append(err) + in, _, e := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation) + if e != nil { + if err == nil { + err = errCreatingAggregators + } + err = errors.Join(err, e) } if in != nil { // Ensured to have not seen given matched was false. measures = append(measures, in) } - return measures, errs.errorOrNil() + return measures, err } // addCallback registers a single instrument callback to be run when @@ -329,6 +357,9 @@ func (i *inserter[N]) cachedAggregator(scope instrumentation.Scope, kind Instrum // The view explicitly requested the default aggregation. stream.Aggregation = DefaultAggregationSelector(kind) } + if stream.ExemplarReservoirProviderSelector == nil { + stream.ExemplarReservoirProviderSelector = DefaultExemplarReservoirProviderSelector + } if err := isAggregatorCompatible(kind, stream.Aggregation); err != nil { return nil, 0, fmt.Errorf( @@ -349,7 +380,7 @@ func (i *inserter[N]) cachedAggregator(scope instrumentation.Scope, kind Instrum cv := i.aggregators.Lookup(normID, func() aggVal[N] { b := aggregate.Builder[N]{ Temporality: i.pipeline.reader.temporality(kind), - ReservoirFunc: reservoirFunc[N](stream.Aggregation), + ReservoirFunc: reservoirFunc[N](stream.ExemplarReservoirProviderSelector(stream.Aggregation), i.pipeline.exemplarFilter), } b.Filter = stream.AttributeFilter // A value less than or equal to zero will disable the aggregation @@ -552,24 +583,16 @@ func isAggregatorCompatible(kind InstrumentKind, agg Aggregation) error { // measurement. type pipelines []*pipeline -func newPipelines(res *resource.Resource, readers []Reader, views []View) pipelines { +func newPipelines(res *resource.Resource, readers []Reader, views []View, exemplarFilter exemplar.Filter) pipelines { pipes := make([]*pipeline, 0, len(readers)) for _, r := range readers { - p := newPipeline(res, r, views) + p := newPipeline(res, r, views, exemplarFilter) r.register(p) pipes = append(pipes, p) } return pipes } -func (p pipelines) registerMultiCallback(c multiCallback) metric.Registration { - unregs := make([]func(), len(p)) - for i, pipe := range p { - unregs[i] = pipe.addMultiCallback(c) - } - return unregisterFuncs{f: unregs} -} - type unregisterFuncs struct { embedded.Registration f []func() @@ -602,15 +625,15 @@ func newResolver[N int64 | float64](p pipelines, vc *cache[string, instID]) reso func (r resolver[N]) Aggregators(id Instrument) ([]aggregate.Measure[N], error) { var measures []aggregate.Measure[N] - errs := &multierror{} + var err error for _, i := range r.inserters { - in, err := i.Instrument(id, i.readerDefaultAggregation(id.Kind)) - if err != nil { - errs.append(err) + in, e := i.Instrument(id, i.readerDefaultAggregation(id.Kind)) + if e != nil { + err = errors.Join(err, e) } measures = append(measures, in...) } - return measures, errs.errorOrNil() + return measures, err } // HistogramAggregators returns the histogram Aggregators that must be updated by the instrument @@ -619,37 +642,18 @@ func (r resolver[N]) Aggregators(id Instrument) ([]aggregate.Measure[N], error) func (r resolver[N]) HistogramAggregators(id Instrument, boundaries []float64) ([]aggregate.Measure[N], error) { var measures []aggregate.Measure[N] - errs := &multierror{} + var err error for _, i := range r.inserters { agg := i.readerDefaultAggregation(id.Kind) if histAgg, ok := agg.(AggregationExplicitBucketHistogram); ok && len(boundaries) > 0 { histAgg.Boundaries = boundaries agg = histAgg } - in, err := i.Instrument(id, agg) - if err != nil { - errs.append(err) + in, e := i.Instrument(id, agg) + if e != nil { + err = errors.Join(err, e) } measures = append(measures, in...) } - return measures, errs.errorOrNil() -} - -type multierror struct { - wrapped error - errors []string -} - -func (m *multierror) errorOrNil() error { - if len(m.errors) == 0 { - return nil - } - if m.wrapped == nil { - return errors.New(strings.Join(m.errors, "; ")) - } - return fmt.Errorf("%w: %s", m.wrapped, strings.Join(m.errors, "; ")) -} - -func (m *multierror) append(err error) { - m.errors = append(m.errors, err.Error()) + return measures, err } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go b/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go index a82af538e67..2fca89e5a8e 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go @@ -42,7 +42,7 @@ func NewMeterProvider(options ...Option) *MeterProvider { flush, sdown := conf.readerSignals() mp := &MeterProvider{ - pipes: newPipelines(conf.res, conf.readers, conf.views), + pipes: newPipelines(conf.res, conf.readers, conf.views, conf.exemplarFilter), forceFlush: flush, shutdown: sdown, } @@ -76,15 +76,17 @@ func (mp *MeterProvider) Meter(name string, options ...metric.MeterOption) metri c := metric.NewMeterConfig(options...) s := instrumentation.Scope{ - Name: name, - Version: c.InstrumentationVersion(), - SchemaURL: c.SchemaURL(), + Name: name, + Version: c.InstrumentationVersion(), + SchemaURL: c.SchemaURL(), + Attributes: c.InstrumentationAttributes(), } global.Info("Meter created", "Name", s.Name, "Version", s.Version, "SchemaURL", s.SchemaURL, + "Attributes", s.Attributes, ) return mp.meters.Lookup(s, func() *meter { diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go index a55f9a5372c..d13a7069788 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go @@ -5,26 +5,26 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric" import ( "context" - "fmt" + "errors" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) // errDuplicateRegister is logged by a Reader when an attempt to registered it // more than once occurs. -var errDuplicateRegister = fmt.Errorf("duplicate reader registration") +var errDuplicateRegister = errors.New("duplicate reader registration") // ErrReaderNotRegistered is returned if Collect or Shutdown are called before // the reader is registered with a MeterProvider. -var ErrReaderNotRegistered = fmt.Errorf("reader is not registered") +var ErrReaderNotRegistered = errors.New("reader is not registered") // ErrReaderShutdown is returned if Collect or Shutdown are called after a // reader has been Shutdown once. -var ErrReaderShutdown = fmt.Errorf("reader is shutdown") +var ErrReaderShutdown = errors.New("reader is shutdown") // errNonPositiveDuration is logged when an environmental variable // has non-positive value. -var errNonPositiveDuration = fmt.Errorf("non-positive duration") +var errNonPositiveDuration = errors.New("non-positive duration") // Reader is the interface used between the SDK and an // exporter. Control flow is bi-directional through the @@ -34,7 +34,7 @@ var errNonPositiveDuration = fmt.Errorf("non-positive duration") // start of bi-directional control flow. // // Typically, push-based exporters that are periodic will -// implement PeroidicExporter themselves and construct a +// implement PeriodicExporter themselves and construct a // PeriodicReader to satisfy this interface. // // Pull-based exporters will typically implement Register @@ -60,8 +60,8 @@ type Reader interface { aggregation(InstrumentKind) Aggregation // nolint:revive // import-shadow for method scoped by type. // Collect gathers and returns all metric data related to the Reader from - // the SDK and stores it in out. An error is returned if this is called - // after Shutdown or if out is nil. + // the SDK and stores it in rm. An error is returned if this is called + // after Shutdown or if rm is nil. // // This method needs to be concurrent safe, and the cancellation of the // passed context is expected to be honored. diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go index dade0a19a2b..7c4b8530df1 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go @@ -5,5 +5,5 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric" // version is the current release version of the metric SDK in use. func version() string { - return "1.28.0" + return "1.34.0" } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/view.go b/vendor/go.opentelemetry.io/otel/sdk/metric/view.go index cd08c673248..630890f4263 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/view.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/view.go @@ -96,11 +96,12 @@ func NewView(criteria Instrument, mask Stream) View { return func(i Instrument) (Stream, bool) { if matchFunc(i) { return Stream{ - Name: nonZero(mask.Name, i.Name), - Description: nonZero(mask.Description, i.Description), - Unit: nonZero(mask.Unit, i.Unit), - Aggregation: agg, - AttributeFilter: mask.AttributeFilter, + Name: nonZero(mask.Name, i.Name), + Description: nonZero(mask.Description, i.Description), + Unit: nonZero(mask.Unit, i.Unit), + Aggregation: agg, + AttributeFilter: mask.AttributeFilter, + ExemplarReservoirProviderSelector: mask.ExemplarReservoirProviderSelector, }, true } return Stream{}, false diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go index 95a61d61d49..c02aeefdde5 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go @@ -7,7 +7,6 @@ import ( "context" "errors" "fmt" - "strings" ) // ErrPartialResource is returned by a detector when complete source @@ -57,62 +56,37 @@ func Detect(ctx context.Context, detectors ...Detector) (*Resource, error) { // these errors will be returned. Otherwise, nil is returned. func detect(ctx context.Context, res *Resource, detectors []Detector) error { var ( - r *Resource - errs detectErrs - err error + r *Resource + err error + e error ) for _, detector := range detectors { if detector == nil { continue } - r, err = detector.Detect(ctx) - if err != nil { - errs = append(errs, err) - if !errors.Is(err, ErrPartialResource) { + r, e = detector.Detect(ctx) + if e != nil { + err = errors.Join(err, e) + if !errors.Is(e, ErrPartialResource) { continue } } - r, err = Merge(res, r) - if err != nil { - errs = append(errs, err) + r, e = Merge(res, r) + if e != nil { + err = errors.Join(err, e) } *res = *r } - if len(errs) == 0 { - return nil - } - if errors.Is(errs, ErrSchemaURLConflict) { - // If there has been a merge conflict, ensure the resource has no - // schema URL. - res.schemaURL = "" - } - return errs -} - -type detectErrs []error - -func (e detectErrs) Error() string { - errStr := make([]string, len(e)) - for i, err := range e { - errStr[i] = fmt.Sprintf("* %s", err) - } - - format := "%d errors occurred detecting resource:\n\t%s" - return fmt.Sprintf(format, len(e), strings.Join(errStr, "\n\t")) -} + if err != nil { + if errors.Is(err, ErrSchemaURLConflict) { + // If there has been a merge conflict, ensure the resource has no + // schema URL. + res.schemaURL = "" + } -func (e detectErrs) Unwrap() error { - switch len(e) { - case 0: - return nil - case 1: - return e[0] + err = fmt.Errorf("error detecting resource: %w", err) } - return e[1:] -} - -func (e detectErrs) Is(target error) bool { - return len(e) != 0 && errors.Is(e[0], target) + return err } diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go index 6ac1cdbf7b4..cf3c88e15cd 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go @@ -20,15 +20,13 @@ type ( // telemetrySDK is a Detector that provides information about // the OpenTelemetry SDK used. This Detector is included as a // builtin. If these resource attributes are not wanted, use - // the WithTelemetrySDK(nil) or WithoutBuiltin() options to - // explicitly disable them. + // resource.New() to explicitly disable them. telemetrySDK struct{} // host is a Detector that provides information about the host // being run on. This Detector is included as a builtin. If // these resource attributes are not wanted, use the - // WithHost(nil) or WithoutBuiltin() options to explicitly - // disable them. + // resource.New() to explicitly disable them. host struct{} stringDetector struct { diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go index 4ce757dfd6b..ccc97e1b662 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go @@ -280,6 +280,7 @@ func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error { // // It is up to the exporter to implement any type of retry logic if a batch is failing // to be exported, since it is specific to the protocol and backend being sent to. + clear(bsp.batch) // Erase elements to let GC collect objects bsp.batch = bsp.batch[:0] if err != nil { diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go index 14c2e5bebda..185aa7c08f7 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go @@ -139,9 +139,10 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T name = defaultTracerName } is := instrumentation.Scope{ - Name: name, - Version: c.InstrumentationVersion(), - SchemaURL: c.SchemaURL(), + Name: name, + Version: c.InstrumentationVersion(), + SchemaURL: c.SchemaURL(), + Attributes: c.InstrumentationAttributes(), } t, ok := func() (trace.Tracer, bool) { @@ -168,7 +169,7 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T // slowing down all tracing consumers. // - Logging code may be instrumented with tracing and deadlock because it could try // acquiring the same non-reentrant mutex. - global.Info("Tracer created", "name", name, "version", is.Version, "schemaURL", is.SchemaURL) + global.Info("Tracer created", "name", name, "version", is.Version, "schemaURL", is.SchemaURL, "attributes", is.Attributes) } return t } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go index d2d1f72466b..9b672a1d70d 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go @@ -5,7 +5,6 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "errors" - "fmt" "os" "strconv" "strings" @@ -26,7 +25,7 @@ const ( type errUnsupportedSampler string func (e errUnsupportedSampler) Error() string { - return fmt.Sprintf("unsupported sampler: %s", string(e)) + return "unsupported sampler: " + string(e) } var ( @@ -39,7 +38,7 @@ type samplerArgParseError struct { } func (e samplerArgParseError) Error() string { - return fmt.Sprintf("parsing sampler argument: %s", e.parseErr.Error()) + return "parsing sampler argument: " + e.parseErr.Error() } func (e samplerArgParseError) Unwrap() error { diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go index 730fb85c3ef..8f4fc385082 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go @@ -347,54 +347,99 @@ func truncateAttr(limit int, attr attribute.KeyValue) attribute.KeyValue { } switch attr.Value.Type() { case attribute.STRING: - if v := attr.Value.AsString(); len(v) > limit { - return attr.Key.String(safeTruncate(v, limit)) - } + v := attr.Value.AsString() + return attr.Key.String(truncate(limit, v)) case attribute.STRINGSLICE: v := attr.Value.AsStringSlice() for i := range v { - if len(v[i]) > limit { - v[i] = safeTruncate(v[i], limit) - } + v[i] = truncate(limit, v[i]) } return attr.Key.StringSlice(v) } return attr } -// safeTruncate truncates the string and guarantees valid UTF-8 is returned. -func safeTruncate(input string, limit int) string { - if trunc, ok := safeTruncateValidUTF8(input, limit); ok { - return trunc +// truncate returns a truncated version of s such that it contains less than +// the limit number of characters. Truncation is applied by returning the limit +// number of valid characters contained in s. +// +// If limit is negative, it returns the original string. +// +// UTF-8 is supported. When truncating, all invalid characters are dropped +// before applying truncation. +// +// If s already contains less than the limit number of bytes, it is returned +// unchanged. No invalid characters are removed. +func truncate(limit int, s string) string { + // This prioritize performance in the following order based on the most + // common expected use-cases. + // + // - Short values less than the default limit (128). + // - Strings with valid encodings that exceed the limit. + // - No limit. + // - Strings with invalid encodings that exceed the limit. + if limit < 0 || len(s) <= limit { + return s + } + + // Optimistically, assume all valid UTF-8. + var b strings.Builder + count := 0 + for i, c := range s { + if c != utf8.RuneError { + count++ + if count > limit { + return s[:i] + } + continue + } + + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // Invalid encoding. + b.Grow(len(s) - 1) + _, _ = b.WriteString(s[:i]) + s = s[i:] + break + } + } + + // Fast-path, no invalid input. + if b.Cap() == 0 { + return s } - trunc, _ := safeTruncateValidUTF8(strings.ToValidUTF8(input, ""), limit) - return trunc -} -// safeTruncateValidUTF8 returns a copy of the input string safely truncated to -// limit. The truncation is ensured to occur at the bounds of complete UTF-8 -// characters. If invalid encoding of UTF-8 is encountered, input is returned -// with false, otherwise, the truncated input will be returned with true. -func safeTruncateValidUTF8(input string, limit int) (string, bool) { - for cnt := 0; cnt <= limit; { - r, size := utf8.DecodeRuneInString(input[cnt:]) - if r == utf8.RuneError { - return input, false + // Truncate while validating UTF-8. + for i := 0; i < len(s) && count < limit; { + c := s[i] + if c < utf8.RuneSelf { + // Optimization for single byte runes (common case). + _ = b.WriteByte(c) + i++ + count++ + continue } - if cnt+size > limit { - return input[:cnt], true + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // We checked for all 1-byte runes above, this is a RuneError. + i++ + continue } - cnt += size + + _, _ = b.WriteString(s[i : i+size]) + i += size + count++ } - return input, true + + return b.String() } // End ends the span. This method does nothing if the span is already ended or // is not being recorded. // -// The only SpanOption currently supported is WithTimestamp which will set the -// end time for a Span's life-cycle. +// The only SpanEndOption currently supported are [trace.WithTimestamp], and +// [trace.WithStackTrace]. // // If this method is called while panicking an error event is added to the // Span before ending it and the panic is continued. @@ -639,10 +684,7 @@ func (s *recordingSpan) dedupeAttrsFromRecord(record map[attribute.Key]int) { record[a.Key] = len(unique) - 1 } } - // s.attributes have element types of attribute.KeyValue. These types are - // not pointers and they themselves do not contain pointer fields, - // therefore the duplicate values do not need to be zeroed for them to be - // garbage collected. + clear(s.attributes[len(unique):]) // Erase unneeded elements to let GC collect objects. s.attributes = unique } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go index 7aababbbf2f..732669a17ad 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go @@ -69,6 +69,19 @@ func (sr *SpanRecorder) Started() []sdktrace.ReadWriteSpan { return dst } +// Reset clears the recorded spans. +// +// This method is safe to be called concurrently. +func (sr *SpanRecorder) Reset() { + sr.startedMu.Lock() + sr.endedMu.Lock() + defer sr.startedMu.Unlock() + defer sr.endedMu.Unlock() + + sr.started = nil + sr.ended = nil +} + // Ended returns a copy of all ended spans that have been recorded. // // This method is safe to be called concurrently. diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go index dc1eaa8e9d0..6b403851073 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/version.go @@ -5,5 +5,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk" // Version is the current release version of the OpenTelemetry SDK in use. func Version() string { - return "1.31.0" + return "1.34.0" } diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.18.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.18.0/README.md new file mode 100644 index 00000000000..5f03e01386f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.18.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.18.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.18.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.18.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.18.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.18.0/doc.go new file mode 100644 index 00000000000..ff55fe79b58 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.18.0/doc.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the conventions +// as of the v1.18.0 version of the OpenTelemetry specification. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.18.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.18.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.18.0/event.go new file mode 100644 index 00000000000..60ef182ffcd --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.18.0/event.go @@ -0,0 +1,188 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.18.0" + +import "go.opentelemetry.io/otel/attribute" + +// This semantic convention defines the attributes used to represent a feature +// flag evaluation as an event. +const ( + // FeatureFlagKeyKey is the attribute Key conforming to the + // "feature_flag.key" semantic conventions. It represents the unique + // identifier of the feature flag. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'logo-color' + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider_name" semantic conventions. It represents the + // name of the service provider that performs the flag evaluation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'Flag Manager' + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") + + // FeatureFlagVariantKey is the attribute Key conforming to the + // "feature_flag.variant" semantic conventions. It represents the sHOULD be + // a semantic identifier for a value. If one is unavailable, a stringified + // version of the value can be used. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'red', 'true', 'on' + // Note: A semantic identifier, commonly referred to as a variant, provides + // a means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + // + // A stringified version of the value can be used in situations where a + // semantic identifier is unavailable. String representation of the value + // should be determined by the implementer. + FeatureFlagVariantKey = attribute.Key("feature_flag.variant") +) + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the unique identifier +// of the feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider_name" semantic conventions. It represents the name of +// the service provider that performs the flag evaluation. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagVariant returns an attribute KeyValue conforming to the +// "feature_flag.variant" semantic conventions. It represents the sHOULD be a +// semantic identifier for a value. If one is unavailable, a stringified +// version of the value can be used. +func FeatureFlagVariant(val string) attribute.KeyValue { + return FeatureFlagVariantKey.String(val) +} + +// RPC received/sent message. +const ( + // MessageTypeKey is the attribute Key conforming to the "message.type" + // semantic conventions. It represents the whether this is a received or + // sent message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessageTypeKey = attribute.Key("message.type") + + // MessageIDKey is the attribute Key conforming to the "message.id" + // semantic conventions. It represents the mUST be calculated as two + // different counters starting from `1` one for sent messages and one for + // received message. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Note: This way we guarantee that the values will be consistent between + // different implementations. + MessageIDKey = attribute.Key("message.id") + + // MessageCompressedSizeKey is the attribute Key conforming to the + // "message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + MessageCompressedSizeKey = attribute.Key("message.compressed_size") + + // MessageUncompressedSizeKey is the attribute Key conforming to the + // "message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") +) + +var ( + // sent + MessageTypeSent = MessageTypeKey.String("SENT") + // received + MessageTypeReceived = MessageTypeKey.String("RECEIVED") +) + +// MessageID returns an attribute KeyValue conforming to the "message.id" +// semantic conventions. It represents the mUST be calculated as two different +// counters starting from `1` one for sent messages and one for received +// message. +func MessageID(val int) attribute.KeyValue { + return MessageIDKey.Int(val) +} + +// MessageCompressedSize returns an attribute KeyValue conforming to the +// "message.compressed_size" semantic conventions. It represents the compressed +// size of the message in bytes. +func MessageCompressedSize(val int) attribute.KeyValue { + return MessageCompressedSizeKey.Int(val) +} + +// MessageUncompressedSize returns an attribute KeyValue conforming to the +// "message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func MessageUncompressedSize(val int) attribute.KeyValue { + return MessageUncompressedSizeKey.Int(val) +} + +// The attributes used to report a single exception associated with a span. +const ( + // ExceptionEscapedKey is the attribute Key conforming to the + // "exception.escaped" semantic conventions. It represents the sHOULD be + // set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of + // a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's `__exit__` method in Python) but will + // usually be caught at the point of recording the exception in most + // languages. + // + // It is usually not possible to determine at the point where an exception + // is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending + // the span, + // as done in the [example above](#recording-an-exception). + // + // It follows that an exception may still escape the scope of the span + // even if the `exception.escaped` attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + ExceptionEscapedKey = attribute.Key("exception.escaped") +) + +// ExceptionEscaped returns an attribute KeyValue conforming to the +// "exception.escaped" semantic conventions. It represents the sHOULD be set to +// true if the exception event is recorded at a point where it is known that +// the exception is escaping the scope of the span. +func ExceptionEscaped(val bool) attribute.KeyValue { + return ExceptionEscapedKey.Bool(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.18.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.18.0/exception.go new file mode 100644 index 00000000000..d7b2de12475 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.18.0/exception.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.18.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.18.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.18.0/http.go new file mode 100644 index 00000000000..9c5d10fe556 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.18.0/http.go @@ -0,0 +1,10 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.18.0" + +// HTTP scheme attributes. +var ( + HTTPSchemeHTTP = HTTPSchemeKey.String("http") + HTTPSchemeHTTPS = HTTPSchemeKey.String("https") +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.18.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.18.0/resource.go new file mode 100644 index 00000000000..5f8c8fd4c5d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.18.0/resource.go @@ -0,0 +1,1999 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.18.0" + +import "go.opentelemetry.io/otel/attribute" + +// The web browser in which the application represented by the resource is +// running. The `browser.*` attributes MUST be used only for resources that +// represent applications running in a web browser (regardless of whether +// running on a mobile or desktop device). +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.brands`). + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserPlatformKey is the attribute Key conforming to the + // "browser.platform" semantic conventions. It represents the platform on + // which the browser is running + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute + // SHOULD be left unset in order for the values to be consistent. + // The list of possible values is defined in the [W3C User-Agent Client + // Hints + // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). + // Note that some (but not all) of these values can overlap with values in + // the [`os.type` and `os.name` attributes](./os.md). However, for + // consistency, the values in the `browser.platform` attribute should + // capture the exact value that the user agent provides. + BrowserPlatformKey = attribute.Key("browser.platform") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the + // browser is running on a mobile device + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.mobile`). If unavailable, this attribute + // SHOULD be left unset. + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserUserAgentKey is the attribute Key conforming to the + // "browser.user_agent" semantic conventions. It represents the full + // user-agent string provided by the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) + // AppleWebKit/537.36 (KHTML, ' + // 'like Gecko) Chrome/95.0.4638.54 Safari/537.36' + // Note: The user-agent value SHOULD be provided only from browsers that do + // not have a mechanism to retrieve brands and platform individually from + // the User-Agent Client Hints API. To retrieve the value, the legacy + // `navigator.userAgent` API can be used. + BrowserUserAgentKey = attribute.Key("browser.user_agent") + + // BrowserLanguageKey is the attribute Key conforming to the + // "browser.language" semantic conventions. It represents the preferred + // language of the user using the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") +) + +// BrowserBrands returns an attribute KeyValue conforming to the +// "browser.brands" semantic conventions. It represents the array of brand name +// and version separated by a space +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the +// "browser.mobile" semantic conventions. It represents a boolean that is true +// if the browser is running on a mobile device +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserUserAgent returns an attribute KeyValue conforming to the +// "browser.user_agent" semantic conventions. It represents the full user-agent +// string provided by the browser +func BrowserUserAgent(val string) attribute.KeyValue { + return BrowserUserAgentKey.String(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred +// language of the user using the browser +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// A cloud environment (e.g. GCP, Azure, AWS) +const ( + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudAccountIDKey is the attribute Key conforming to the + // "cloud.account.id" semantic conventions. It represents the cloud account + // ID the resource is assigned to. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '111111111111', 'opentelemetry' + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" + // semantic conventions. It represents the geographical region the resource + // is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for + // example [Alibaba Cloud + // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS + // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), + // [Azure + // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/), + // [Google Cloud regions](https://cloud.google.com/about/locations), or + // [Tencent Cloud + // regions](https://intl.cloud.tencent.com/document/product/213/6091). + CloudRegionKey = attribute.Key("cloud.region") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the + // resource is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") +) + +var ( + // Alibaba Cloud + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + CloudProviderGCP = CloudProviderKey.String("gcp") + // IBM Cloud + CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") + // Tencent Cloud + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +var ( + // Alibaba Cloud Elastic Compute Service + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Instances + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Azure Red Hat OpenShift + CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") + // Google Cloud Compute Engine (GCE) + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") + // Red Hat OpenShift on IBM Cloud + CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") + // Tencent Cloud Cloud Virtual Machine (CVM) + CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the +// "cloud.region" semantic conventions. It represents the geographical region +// the resource is running. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// Resources used by AWS Elastic Container Service (ECS). +const ( + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container + // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS + // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch + // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the + // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an + // [ECS task + // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the task + // definition family this task definition is a member of. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision + // for this task definition. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") +) + +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container +// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS +// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS +// task +// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the task +// definition family this task definition is a member of. +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// this task definition. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// Resources used by AWS Elastic Kubernetes Service (EKS). +const ( + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an + // EKS cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// Resources specific to Amazon Web Services. +const ( + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of + // the AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like + // multi-container applications, where a single application has sidecar + // containers, and each write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon + // Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) + // of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of + // the AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + // One log group can contain several log streams, so these ARNs necessarily + // identify both a log group and a log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") +) + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of +// the AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// A container instance. +const ( + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-autoconf' + ContainerNameKey = attribute.Key("container.name") + + // ContainerIDKey is the attribute Key conforming to the "container.id" + // semantic conventions. It represents the container ID. Usually a UUID, as + // for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container-identification). + // The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'a3bf90e006b2' + ContainerIDKey = attribute.Key("container.id") + + // ContainerRuntimeKey is the attribute Key conforming to the + // "container.runtime" semantic conventions. It represents the container + // runtime managing this container. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'docker', 'containerd', 'rkt' + ContainerRuntimeKey = attribute.Key("container.runtime") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of + // the image the container was built on. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'gcr.io/opentelemetry/operator' + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageTagKey is the attribute Key conforming to the + // "container.image.tag" semantic conventions. It represents the container + // image tag. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0.1' + ContainerImageTagKey = attribute.Key("container.image.tag") +) + +// ContainerName returns an attribute KeyValue conforming to the +// "container.name" semantic conventions. It represents the container name used +// by container runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the +// "container.id" semantic conventions. It represents the container ID. Usually +// a UUID, as for example used to [identify Docker +// containers](https://docs.docker.com/engine/reference/run/#container-identification). +// The UUID might be abbreviated. +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerRuntime returns an attribute KeyValue conforming to the +// "container.runtime" semantic conventions. It represents the container +// runtime managing this container. +func ContainerRuntime(val string) attribute.KeyValue { + return ContainerRuntimeKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageTag returns an attribute KeyValue conforming to the +// "container.image.tag" semantic conventions. It represents the container +// image tag. +func ContainerImageTag(val string) attribute.KeyValue { + return ContainerImageTagKey.String(val) +} + +// The software deployment. +const ( + // DeploymentEnvironmentKey is the attribute Key conforming to the + // "deployment.environment" semantic conventions. It represents the name of + // the [deployment + // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka + // deployment tier). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'staging', 'production' + DeploymentEnvironmentKey = attribute.Key("deployment.environment") +) + +// DeploymentEnvironment returns an attribute KeyValue conforming to the +// "deployment.environment" semantic conventions. It represents the name of the +// [deployment +// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka +// deployment tier). +func DeploymentEnvironment(val string) attribute.KeyValue { + return DeploymentEnvironmentKey.String(val) +} + +// The device on which the process represented by this resource is running. +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values + // outlined below. This value is not an advertising identifier and MUST NOT + // be used as such. On iOS (Swift or Objective-C), this value MUST be equal + // to the [vendor + // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). + // On Android (Java or Kotlin), this value MUST be equal to the Firebase + // Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found + // [here](https://developer.android.com/training/articles/user-data-ids) on + // best practices and exact implementation details. Caution should be taken + // when storing personal data or anything which can identify a user. GDPR + // and data protection laws may apply, ensure you do your own due + // diligence. + DeviceIDKey = attribute.Key("device.id") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine readable version + // of the model identifier rather than the market or consumer-friendly name + // of the device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the + // "device.model.name" semantic conventions. It represents the marketing + // name for the device model + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human readable version of + // the device model rather than a machine readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of + // the device manufacturer + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via + // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). + // iOS apps SHOULD hardcode the value `Apple`. + DeviceManufacturerKey = attribute.Key("device.manufacturer") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" +// semantic conventions. It represents a unique identifier representing the +// device +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name +// for the device model +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// A serverless instance. +const ( + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this + // runtime instance executes. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the + // FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes) + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The + // following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud + // providers/products: + // + // * **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `faas.id` attribute). + FaaSNameKey = attribute.Key("faas.name") + + // FaaSIDKey is the attribute Key conforming to the "faas.id" semantic + // conventions. It represents the unique ID of the single function that + // this runtime instance executes. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' + // Note: On some cloud providers, it may not be possible to determine the + // full ID at startup, + // so consider setting `faas.id` as a span attribute instead. + // + // The exact value to use for `faas.id` depends on the cloud provider: + // + // * **AWS Lambda:** The function + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + // Take care not to use the "invoked ARN" directly but replace any + // [alias + // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) + // with the resolved function version, as the same runtime instance may + // be invokable with + // multiple different aliases. + // * **GCP:** The [URI of the + // resource](https://cloud.google.com/iam/docs/full-resource-names) + // * **Azure:** The [Fully Qualified Resource + // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id) + // of the invoked function, + // *not* the function app, having the form + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider. + FaaSIDKey = attribute.Key("faas.id") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" + // semantic conventions. It represents the immutable version of the + // function being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use: + // + // * **AWS Lambda:** The [function + // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) + // (an integer represented as a decimal string). + // * **Google Cloud Run:** The + // [revision](https://cloud.google.com/run/docs/managing/revisions) + // (i.e., the function name plus the revision suffix). + // * **Google Cloud Functions:** The value of the + // [`K_REVISION` environment + // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). + // * **Azure Functions:** Not applicable. Do not set this attribute. + FaaSVersionKey = attribute.Key("faas.version") + + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a + // string, that will be potentially reused for other invocations to the + // same function/function version. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note: * **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSMaxMemoryKey is the attribute Key conforming to the + // "faas.max_memory" semantic conventions. It represents the amount of + // memory available to the serverless function in MiB. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 128 + // Note: It's recommended to set this attribute since e.g. too little + // memory can easily stop a Java AWS Lambda function from working + // correctly. On AWS Lambda, the environment variable + // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information. + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") +) + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" +// semantic conventions. It represents the name of the single function that +// this runtime instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSID returns an attribute KeyValue conforming to the "faas.id" semantic +// conventions. It represents the unique ID of the single function that this +// runtime instance executes. +func FaaSID(val string) attribute.KeyValue { + return FaaSIDKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the +// "faas.version" semantic conventions. It represents the immutable version of +// the function being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// FaaSInstance returns an attribute KeyValue conforming to the +// "faas.instance" semantic conventions. It represents the execution +// environment ID as a string, that will be potentially reused for other +// invocations to the same function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function in MiB. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// A host is defined as a general computing instance. +const ( + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be + // the instance_id assigned by the cloud provider. For non-containerized + // Linux systems, the `machine-id` located in `/etc/machine-id` or + // `/var/lib/dbus/machine-id` may be used. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + HostIDKey = attribute.Key("host.id") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified + // hostname, or another name specified by the user. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-test' + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'n1-standard-1' + HostTypeKey = attribute.Key("host.type") + + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is + // running on. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + HostArchKey = attribute.Key("host.arch") + + // HostImageNameKey is the attribute Key conforming to the + // "host.image.name" semantic conventions. It represents the name of the VM + // image or OS install the host was instantiated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the vM image ID. For Cloud, this + // value is from the provider. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ami-07b06b442921831e5' + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version + // string of the VM image as defined in [Version + // Attributes](README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0.1' + HostImageVersionKey = attribute.Key("host.image.version") +) + +var ( + // AMD64 + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + HostArchX86 = HostArchKey.String("x86") +) + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized Linux +// systems, the `machine-id` located in `/etc/machine-id` or +// `/var/lib/dbus/machine-id` may be used. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" +// semantic conventions. It represents the name of the host. On Unix systems, +// it may contain what the hostname command returns, or the fully qualified +// hostname, or another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" +// semantic conventions. It represents the type of host. For Cloud, this must +// be the machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM +// image or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the +// "host.image.id" semantic conventions. It represents the vM image ID. For +// Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string +// of the VM image as defined in [Version +// Attributes](README.md#version-attributes). +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// A Kubernetes Cluster. +const ( + // K8SClusterNameKey is the attribute Key conforming to the + // "k8s.cluster.name" semantic conventions. It represents the name of the + // cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-cluster' + K8SClusterNameKey = attribute.Key("k8s.cluster.name") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// A Kubernetes Node object. +const ( + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'node-1' + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" + // semantic conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + K8SNodeUIDKey = attribute.Key("k8s.node.uid") +) + +// K8SNodeName returns an attribute KeyValue conforming to the +// "k8s.node.name" semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// A Kubernetes Namespace. +const ( + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'default' + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") +) + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// A Kubernetes Pod object. +const ( + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" + // semantic conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" + // semantic conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-pod-autoconf' + K8SPodNameKey = attribute.Key("k8s.pod.name") +) + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// A container in a +// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). +const ( + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'redis' + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the + // number of times the container was restarted. This attribute can be used + // to identify a particular container (running or stopped) within a + // container spec. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 0, 2 + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") +) + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify +// a particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// A Kubernetes ReplicaSet object. +const ( + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of + // the ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") +) + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// A Kubernetes Deployment object. +const ( + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of + // the Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") +) + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// A Kubernetes StatefulSet object. +const ( + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of + // the StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") +) + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// A Kubernetes DaemonSet object. +const ( + // K8SDaemonSetUIDKey is the attribute Key conforming to the + // "k8s.daemonset.uid" semantic conventions. It represents the UID of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") +) + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// A Kubernetes Job object. +const ( + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" + // semantic conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" + // semantic conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SJobNameKey = attribute.Key("k8s.job.name") +) + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// A Kubernetes CronJob object. +const ( + // K8SCronJobUIDKey is the attribute Key conforming to the + // "k8s.cronjob.uid" semantic conventions. It represents the UID of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SCronJobNameKey is the attribute Key conforming to the + // "k8s.cronjob.name" semantic conventions. It represents the name of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") +) + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the +// CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// The operating system (OS) on which the process represented by this resource +// is running. +const ( + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + OSTypeKey = attribute.Key("os.type") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to + // be parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 + // LTS' + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iOS', 'Android', 'Ubuntu' + OSNameKey = attribute.Key("os.name") + + // OSVersionKey is the attribute Key conforming to the "os.version" + // semantic conventions. It represents the version string of the operating + // system as defined in [Version + // Attributes](../../resource/semantic_conventions/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '14.2.1', '18.04.1' + OSVersionKey = attribute.Key("os.version") +) + +var ( + // Microsoft Windows + OSTypeWindows = OSTypeKey.String("windows") + // Linux + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + OSTypeZOS = OSTypeKey.String("z_os") +) + +// OSDescription returns an attribute KeyValue conforming to the +// "os.description" semantic conventions. It represents the human readable (not +// intended to be parsed) OS version information, like e.g. reported by `ver` +// or `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating +// system as defined in [Version +// Attributes](../../resource/semantic_conventions/README.md#version-attributes). +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// An operating system process. +const ( + // ProcessPIDKey is the attribute Key conforming to the "process.pid" + // semantic conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent + // Process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name + // of the process executable. On Linux based systems, can be set to the + // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name + // of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'otelcol' + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full + // path to the process executable. On Linux based systems, can be set to + // the target of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: '/usr/bin/cmd/otelcol' + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessCommandKey is the attribute Key conforming to the + // "process.command" semantic conventions. It represents the command used + // to launch the process (i.e. the command name). On Linux based systems, + // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can + // be set to the first parameter extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'cmd/otelcol' + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full + // command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. + // Do not set this if you have to assemble it just for monitoring; use + // `process.command_args` instead. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, + // this would be the full argv vector passed to `main`. + // + // Type: string[] + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'cmd/otecol', '--config=config.yaml' + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns + // the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'root' + ProcessOwnerKey = attribute.Key("process.owner") +) + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of +// the process executable. On Linux based systems, can be set to the `Name` in +// `proc/[pid]/status`. On Windows, can be set to the base name of +// `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path +// to the process executable. On Linux based systems, can be set to the target +// of `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be +// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to +// the first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this +// if you have to assemble it just for monitoring; use `process.command_args` +// instead. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) as received by +// the process. On Linux-based systems (and some other Unixoid systems +// supporting procfs), can be set according to the list of null-delimited +// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, +// this would be the full argv vector passed to `main`. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the +// "process.owner" semantic conventions. It represents the username of the user +// that owns the process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// The single (language) runtime instance which is monitored. +const ( + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of + // the runtime of this process. For compiled native binaries, this SHOULD + // be the name of the compiler. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'OpenJDK Runtime Environment' + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the + // version of the runtime of this process, as returned by the runtime + // without modification. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '14.0.2' + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") +) + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. For compiled native binaries, this SHOULD be the +// name of the compiler. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without +// modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// A service instance. +const ( + // ServiceNameKey is the attribute Key conforming to the "service.name" + // semantic conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled + // services. If the value was not specified, SDKs MUST fallback to + // `unknown_service:` concatenated with + // [`process.executable.name`](process.md#process), e.g. + // `unknown_service:bash`. If `process.executable.name` is not available, + // the value MUST be set to `unknown_service`. + ServiceNameKey = attribute.Key("service.name") + + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group + // of services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` + // is expected to be unique for all services that have no explicit + // namespace defined (so the empty/unspecified namespace is simply one more + // valid namespace). Zero-length namespace string is assumed equal to + // unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID + // of the service instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be + // globally unique). The ID helps to distinguish instances of the same + // service that exist at the same time (e.g. instances of a horizontally + // scaled service). It is preferable for the ID to be persistent and stay + // the same for the lifetime of the service instance, however it is + // acceptable that the ID is ephemeral and changes during important + // lifetime events for the service (e.g. service restarts). If the service + // has no inherent unique ID that can be used as the value of this + // attribute it is recommended to generate a random Version 1 or Version 4 + // RFC 4122 UUID (services aiming for reproducible UUIDs may also use + // Version 5, see RFC 4122 for more recommendations). + ServiceInstanceIDKey = attribute.Key("service.instance.id") + + // ServiceVersionKey is the attribute Key conforming to the + // "service.version" semantic conventions. It represents the version string + // of the service API or implementation. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2.0.0' + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceName returns an attribute KeyValue conforming to the +// "service.name" semantic conventions. It represents the logical name of the +// service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of +// the service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// The telemetry SDK used to capture data recorded by the instrumentation +// libraries. +const ( + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the + // language of the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.2.3' + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") + + // TelemetryAutoVersionKey is the attribute Key conforming to the + // "telemetry.auto.version" semantic conventions. It represents the version + // string of the auto instrumentation agent, if used. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.2.3' + TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") +) + +var ( + // cpp + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // webjs + TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") + // swift + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") +) + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version +// string of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// TelemetryAutoVersion returns an attribute KeyValue conforming to the +// "telemetry.auto.version" semantic conventions. It represents the version +// string of the auto instrumentation agent, if used. +func TelemetryAutoVersion(val string) attribute.KeyValue { + return TelemetryAutoVersionKey.String(val) +} + +// Resource describing the packaged software running the application code. Web +// engines are typically executed using process.runtime. +const ( + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'WildFly' + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of + // the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '21.0.0' + WebEngineVersionKey = attribute.Key("webengine.version") + + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the + // additional description of the web engine (e.g. detailed version and + // edition information). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") +) + +// WebEngineName returns an attribute KeyValue conforming to the +// "webengine.name" semantic conventions. It represents the name of the web +// engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the +// web engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition +// information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // OTelScopeNameKey is the attribute Key conforming to the + // "otel.scope.name" semantic conventions. It represents the name of the + // instrumentation scope - (`InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'io.opentelemetry.contrib.mongodb' + OTelScopeNameKey = attribute.Key("otel.scope.name") + + // OTelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of + // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0.0' + OTelScopeVersionKey = attribute.Key("otel.scope.version") +) + +// OTelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OTelScopeName(val string) attribute.KeyValue { + return OTelScopeNameKey.String(val) +} + +// OTelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OTelScopeVersion(val string) attribute.KeyValue { + return OTelScopeVersionKey.String(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry +// Scope's concepts. +const ( + // OTelLibraryNameKey is the attribute Key conforming to the + // "otel.library.name" semantic conventions. It represents the deprecated, + // use the `otel.scope.name` attribute. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'io.opentelemetry.contrib.mongodb' + OTelLibraryNameKey = attribute.Key("otel.library.name") + + // OTelLibraryVersionKey is the attribute Key conforming to the + // "otel.library.version" semantic conventions. It represents the + // deprecated, use the `otel.scope.version` attribute. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '1.0.0' + OTelLibraryVersionKey = attribute.Key("otel.library.version") +) + +// OTelLibraryName returns an attribute KeyValue conforming to the +// "otel.library.name" semantic conventions. It represents the deprecated, use +// the `otel.scope.name` attribute. +func OTelLibraryName(val string) attribute.KeyValue { + return OTelLibraryNameKey.String(val) +} + +// OTelLibraryVersion returns an attribute KeyValue conforming to the +// "otel.library.version" semantic conventions. It represents the deprecated, +// use the `otel.scope.version` attribute. +func OTelLibraryVersion(val string) attribute.KeyValue { + return OTelLibraryVersionKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.18.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.18.0/schema.go new file mode 100644 index 00000000000..70ad4d1b6cc --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.18.0/schema.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.18.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.18.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.18.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.18.0/trace.go new file mode 100644 index 00000000000..03a64d90e42 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.18.0/trace.go @@ -0,0 +1,3370 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.18.0" + +import "go.opentelemetry.io/otel/attribute" + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the + // exception should be preferred over the static type in languages that + // support it. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + ExceptionTypeKey = attribute.Key("exception.type") + + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str + // implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace + // as a string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") +) + +// ExceptionType returns an attribute KeyValue conforming to the +// "exception.type" semantic conventions. It represents the type of the +// exception (its fully-qualified class name, if applicable). The dynamic type +// of the exception should be preferred over the static type in languages that +// support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception +// message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// Attributes for Events represented using Log Records. +const ( + // EventNameKey is the attribute Key conforming to the "event.name" + // semantic conventions. It represents the name identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'click', 'exception' + EventNameKey = attribute.Key("event.name") + + // EventDomainKey is the attribute Key conforming to the "event.domain" + // semantic conventions. It represents the domain identifies the business + // context for the events. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: Events across different domains may have same `event.name`, yet be + // unrelated events. + EventDomainKey = attribute.Key("event.domain") +) + +var ( + // Events from browser apps + EventDomainBrowser = EventDomainKey.String("browser") + // Events from mobile apps + EventDomainDevice = EventDomainKey.String("device") + // Events from Kubernetes + EventDomainK8S = EventDomainKey.String("k8s") +) + +// EventName returns an attribute KeyValue conforming to the "event.name" +// semantic conventions. It represents the name identifies the event. +func EventName(val string) attribute.KeyValue { + return EventNameKey.String(val) +} + +// Span attributes used by AWS Lambda (in addition to general `faas` +// attributes). +const ( + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full + // invoked ARN as provided on the `Context` passed to the function + // (`Lambda-Runtime-Invoked-Function-ARN` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from `faas.id` if an alias is involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") +) + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full +// invoked ARN as provided on the `Context` passed to the function +// (`Lambda-Runtime-Invoked-Function-ARN` header on the +// `/runtime/invocation/next` applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// Attributes for CloudEvents. CloudEvents is a specification on how to define +// event data in a standard way. These attributes can be attached to spans when +// performing operations with CloudEvents, regardless of the protocol being +// used. +const ( + // CloudeventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the + // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudeventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the + // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'https://github.com/cloudevents', + // '/cloudevents/spec/pull/123', 'my-service' + CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudeventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents + // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) + // which the event uses. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0' + CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudeventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the + // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'com.github.pull_request.opened', + // 'com.example.object.deleted.v2' + CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") + + // CloudeventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the + // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) + // of the event in the context of the event producer (identified by + // source). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'mynewfile.jpg' + CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") +) + +// CloudeventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the +// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) +// uniquely identifies the event. +func CloudeventsEventID(val string) attribute.KeyValue { + return CloudeventsEventIDKey.String(val) +} + +// CloudeventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the +// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) +// identifies the context in which an event happened. +func CloudeventsEventSource(val string) attribute.KeyValue { + return CloudeventsEventSourceKey.String(val) +} + +// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to +// the "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents +// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) +// which the event uses. +func CloudeventsEventSpecVersion(val string) attribute.KeyValue { + return CloudeventsEventSpecVersionKey.String(val) +} + +// CloudeventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the +// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) +// contains a value describing the type of event related to the originating +// occurrence. +func CloudeventsEventType(val string) attribute.KeyValue { + return CloudeventsEventTypeKey.String(val) +} + +// CloudeventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the +// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) +// of the event in the context of the event producer (identified by source). +func CloudeventsEventSubject(val string) attribute.KeyValue { + return CloudeventsEventSubjectKey.String(val) +} + +// Semantic conventions for the OpenTracing Shim +const ( + // OpentracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the + // parent-child Reference type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: The causal relationship between a child Span and a parent Span. + OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +var ( + // The parent Span depends on the child Span in some capacity + OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") + // The parent Span does not depend in any way on the result of the child Span + OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") +) + +// The attributes used to perform database client calls. +const ( + // DBSystemKey is the attribute Key conforming to the "db.system" semantic + // conventions. It represents an identifier for the database management + // system (DBMS) product being used. See below for a list of well-known + // identifiers. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + DBSystemKey = attribute.Key("db.system") + + // DBConnectionStringKey is the attribute Key conforming to the + // "db.connection_string" semantic conventions. It represents the + // connection string used to connect to the database. It is recommended to + // remove embedded credentials. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' + DBConnectionStringKey = attribute.Key("db.connection_string") + + // DBUserKey is the attribute Key conforming to the "db.user" semantic + // conventions. It represents the username for accessing the database. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'readonly_user', 'reporting_user' + DBUserKey = attribute.Key("db.user") + + // DBJDBCDriverClassnameKey is the attribute Key conforming to the + // "db.jdbc.driver_classname" semantic conventions. It represents the + // fully-qualified class name of the [Java Database Connectivity + // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) + // driver used to connect. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'org.postgresql.Driver', + // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' + DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") + + // DBNameKey is the attribute Key conforming to the "db.name" semantic + // conventions. It represents the this attribute is used to report the name + // of the database being accessed. For commands that switch the database, + // this should be set to the target database (even if the command fails). + // + // Type: string + // RequirementLevel: ConditionallyRequired (If applicable.) + // Stability: stable + // Examples: 'customers', 'main' + // Note: In some SQL databases, the database name to be used is called + // "schema name". In case there are multiple layers that could be + // considered for database name (e.g. Oracle instance name and schema + // name), the database name to be used is the more specific layer (e.g. + // Oracle schema name). + DBNameKey = attribute.Key("db.name") + + // DBStatementKey is the attribute Key conforming to the "db.statement" + // semantic conventions. It represents the database statement being + // executed. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If applicable and not + // explicitly disabled via instrumentation configuration.) + // Stability: stable + // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' + // Note: The value may be sanitized to exclude sensitive information. + DBStatementKey = attribute.Key("db.statement") + + // DBOperationKey is the attribute Key conforming to the "db.operation" + // semantic conventions. It represents the name of the operation being + // executed, e.g. the [MongoDB command + // name](https://docs.mongodb.com/manual/reference/command/#database-operations) + // such as `findAndModify`, or the SQL keyword. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If `db.statement` is not + // applicable.) + // Stability: stable + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: When setting this to an SQL keyword, it is not recommended to + // attempt any client-side parsing of `db.statement` just to get this + // property, but it should be set if the operation name is provided by the + // library being instrumented. If the SQL statement has an ambiguous + // operation, or performs more than one operation, this value may be + // omitted. + DBOperationKey = attribute.Key("db.operation") +) + +var ( + // Some other SQL database. Fallback only. See notes + DBSystemOtherSQL = DBSystemKey.String("other_sql") + // Microsoft SQL Server + DBSystemMSSQL = DBSystemKey.String("mssql") + // Microsoft SQL Server Compact + DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") + // MySQL + DBSystemMySQL = DBSystemKey.String("mysql") + // Oracle Database + DBSystemOracle = DBSystemKey.String("oracle") + // IBM DB2 + DBSystemDB2 = DBSystemKey.String("db2") + // PostgreSQL + DBSystemPostgreSQL = DBSystemKey.String("postgresql") + // Amazon Redshift + DBSystemRedshift = DBSystemKey.String("redshift") + // Apache Hive + DBSystemHive = DBSystemKey.String("hive") + // Cloudscape + DBSystemCloudscape = DBSystemKey.String("cloudscape") + // HyperSQL DataBase + DBSystemHSQLDB = DBSystemKey.String("hsqldb") + // Progress Database + DBSystemProgress = DBSystemKey.String("progress") + // SAP MaxDB + DBSystemMaxDB = DBSystemKey.String("maxdb") + // SAP HANA + DBSystemHanaDB = DBSystemKey.String("hanadb") + // Ingres + DBSystemIngres = DBSystemKey.String("ingres") + // FirstSQL + DBSystemFirstSQL = DBSystemKey.String("firstsql") + // EnterpriseDB + DBSystemEDB = DBSystemKey.String("edb") + // InterSystems Caché + DBSystemCache = DBSystemKey.String("cache") + // Adabas (Adaptable Database System) + DBSystemAdabas = DBSystemKey.String("adabas") + // Firebird + DBSystemFirebird = DBSystemKey.String("firebird") + // Apache Derby + DBSystemDerby = DBSystemKey.String("derby") + // FileMaker + DBSystemFilemaker = DBSystemKey.String("filemaker") + // Informix + DBSystemInformix = DBSystemKey.String("informix") + // InstantDB + DBSystemInstantDB = DBSystemKey.String("instantdb") + // InterBase + DBSystemInterbase = DBSystemKey.String("interbase") + // MariaDB + DBSystemMariaDB = DBSystemKey.String("mariadb") + // Netezza + DBSystemNetezza = DBSystemKey.String("netezza") + // Pervasive PSQL + DBSystemPervasive = DBSystemKey.String("pervasive") + // PointBase + DBSystemPointbase = DBSystemKey.String("pointbase") + // SQLite + DBSystemSqlite = DBSystemKey.String("sqlite") + // Sybase + DBSystemSybase = DBSystemKey.String("sybase") + // Teradata + DBSystemTeradata = DBSystemKey.String("teradata") + // Vertica + DBSystemVertica = DBSystemKey.String("vertica") + // H2 + DBSystemH2 = DBSystemKey.String("h2") + // ColdFusion IMQ + DBSystemColdfusion = DBSystemKey.String("coldfusion") + // Apache Cassandra + DBSystemCassandra = DBSystemKey.String("cassandra") + // Apache HBase + DBSystemHBase = DBSystemKey.String("hbase") + // MongoDB + DBSystemMongoDB = DBSystemKey.String("mongodb") + // Redis + DBSystemRedis = DBSystemKey.String("redis") + // Couchbase + DBSystemCouchbase = DBSystemKey.String("couchbase") + // CouchDB + DBSystemCouchDB = DBSystemKey.String("couchdb") + // Microsoft Azure Cosmos DB + DBSystemCosmosDB = DBSystemKey.String("cosmosdb") + // Amazon DynamoDB + DBSystemDynamoDB = DBSystemKey.String("dynamodb") + // Neo4j + DBSystemNeo4j = DBSystemKey.String("neo4j") + // Apache Geode + DBSystemGeode = DBSystemKey.String("geode") + // Elasticsearch + DBSystemElasticsearch = DBSystemKey.String("elasticsearch") + // Memcached + DBSystemMemcached = DBSystemKey.String("memcached") + // CockroachDB + DBSystemCockroachdb = DBSystemKey.String("cockroachdb") + // OpenSearch + DBSystemOpensearch = DBSystemKey.String("opensearch") + // ClickHouse + DBSystemClickhouse = DBSystemKey.String("clickhouse") + // Cloud Spanner + DBSystemSpanner = DBSystemKey.String("spanner") +) + +// DBConnectionString returns an attribute KeyValue conforming to the +// "db.connection_string" semantic conventions. It represents the connection +// string used to connect to the database. It is recommended to remove embedded +// credentials. +func DBConnectionString(val string) attribute.KeyValue { + return DBConnectionStringKey.String(val) +} + +// DBUser returns an attribute KeyValue conforming to the "db.user" semantic +// conventions. It represents the username for accessing the database. +func DBUser(val string) attribute.KeyValue { + return DBUserKey.String(val) +} + +// DBJDBCDriverClassname returns an attribute KeyValue conforming to the +// "db.jdbc.driver_classname" semantic conventions. It represents the +// fully-qualified class name of the [Java Database Connectivity +// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver +// used to connect. +func DBJDBCDriverClassname(val string) attribute.KeyValue { + return DBJDBCDriverClassnameKey.String(val) +} + +// DBName returns an attribute KeyValue conforming to the "db.name" semantic +// conventions. It represents the this attribute is used to report the name of +// the database being accessed. For commands that switch the database, this +// should be set to the target database (even if the command fails). +func DBName(val string) attribute.KeyValue { + return DBNameKey.String(val) +} + +// DBStatement returns an attribute KeyValue conforming to the +// "db.statement" semantic conventions. It represents the database statement +// being executed. +func DBStatement(val string) attribute.KeyValue { + return DBStatementKey.String(val) +} + +// DBOperation returns an attribute KeyValue conforming to the +// "db.operation" semantic conventions. It represents the name of the operation +// being executed, e.g. the [MongoDB command +// name](https://docs.mongodb.com/manual/reference/command/#database-operations) +// such as `findAndModify`, or the SQL keyword. +func DBOperation(val string) attribute.KeyValue { + return DBOperationKey.String(val) +} + +// Connection-level attributes for Microsoft SQL Server +const ( + // DBMSSQLInstanceNameKey is the attribute Key conforming to the + // "db.mssql.instance_name" semantic conventions. It represents the + // Microsoft SQL Server [instance + // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) + // connecting to. This name is used to determine the port of a named + // instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MSSQLSERVER' + // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no + // longer required (but still recommended if non-standard). + DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") +) + +// DBMSSQLInstanceName returns an attribute KeyValue conforming to the +// "db.mssql.instance_name" semantic conventions. It represents the Microsoft +// SQL Server [instance +// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) +// connecting to. This name is used to determine the port of a named instance. +func DBMSSQLInstanceName(val string) attribute.KeyValue { + return DBMSSQLInstanceNameKey.String(val) +} + +// Call-level attributes for Cassandra +const ( + // DBCassandraPageSizeKey is the attribute Key conforming to the + // "db.cassandra.page_size" semantic conventions. It represents the fetch + // size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + + // DBCassandraConsistencyLevelKey is the attribute Key conforming to the + // "db.cassandra.consistency_level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + + // DBCassandraTableKey is the attribute Key conforming to the + // "db.cassandra.table" semantic conventions. It represents the name of the + // primary table that the operation is acting upon, including the keyspace + // name (if applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'mytable' + // Note: This mirrors the db.sql.table attribute but references cassandra + // rather than sql. It is not recommended to attempt any client-side + // parsing of `db.statement` just to get this property, but it should be + // set if it is provided by the library being instrumented. If the + // operation is acting upon an anonymous table, or more than one table, + // this value MUST NOT be set. + DBCassandraTableKey = attribute.Key("db.cassandra.table") + + // DBCassandraIdempotenceKey is the attribute Key conforming to the + // "db.cassandra.idempotence" semantic conventions. It represents the + // whether or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + + // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming + // to the "db.cassandra.speculative_execution_count" semantic conventions. + // It represents the number of times a query was speculatively executed. + // Not set or `0` if the query was not executed speculatively. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") + + // DBCassandraCoordinatorIDKey is the attribute Key conforming to the + // "db.cassandra.coordinator.id" semantic conventions. It represents the ID + // of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + + // DBCassandraCoordinatorDCKey is the attribute Key conforming to the + // "db.cassandra.coordinator.dc" semantic conventions. It represents the + // data center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +// DBCassandraPageSize returns an attribute KeyValue conforming to the +// "db.cassandra.page_size" semantic conventions. It represents the fetch size +// used for paging, i.e. how many rows will be returned at once. +func DBCassandraPageSize(val int) attribute.KeyValue { + return DBCassandraPageSizeKey.Int(val) +} + +// DBCassandraTable returns an attribute KeyValue conforming to the +// "db.cassandra.table" semantic conventions. It represents the name of the +// primary table that the operation is acting upon, including the keyspace name +// (if applicable). +func DBCassandraTable(val string) attribute.KeyValue { + return DBCassandraTableKey.String(val) +} + +// DBCassandraIdempotence returns an attribute KeyValue conforming to the +// "db.cassandra.idempotence" semantic conventions. It represents the whether +// or not the query is idempotent. +func DBCassandraIdempotence(val bool) attribute.KeyValue { + return DBCassandraIdempotenceKey.Bool(val) +} + +// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue +// conforming to the "db.cassandra.speculative_execution_count" semantic +// conventions. It represents the number of times a query was speculatively +// executed. Not set or `0` if the query was not executed speculatively. +func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return DBCassandraSpeculativeExecutionCountKey.Int(val) +} + +// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of +// the coordinating node for a query. +func DBCassandraCoordinatorID(val string) attribute.KeyValue { + return DBCassandraCoordinatorIDKey.String(val) +} + +// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.dc" semantic conventions. It represents the data +// center of the coordinating node for a query. +func DBCassandraCoordinatorDC(val string) attribute.KeyValue { + return DBCassandraCoordinatorDCKey.String(val) +} + +// Call-level attributes for Redis +const ( + // DBRedisDBIndexKey is the attribute Key conforming to the + // "db.redis.database_index" semantic conventions. It represents the index + // of the database being accessed as used in the [`SELECT` + // command](https://redis.io/commands/select), provided as an integer. To + // be used instead of the generic `db.name` attribute. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If other than the default + // database (`0`).) + // Stability: stable + // Examples: 0, 1, 15 + DBRedisDBIndexKey = attribute.Key("db.redis.database_index") +) + +// DBRedisDBIndex returns an attribute KeyValue conforming to the +// "db.redis.database_index" semantic conventions. It represents the index of +// the database being accessed as used in the [`SELECT` +// command](https://redis.io/commands/select), provided as an integer. To be +// used instead of the generic `db.name` attribute. +func DBRedisDBIndex(val int) attribute.KeyValue { + return DBRedisDBIndexKey.Int(val) +} + +// Call-level attributes for MongoDB +const ( + // DBMongoDBCollectionKey is the attribute Key conforming to the + // "db.mongodb.collection" semantic conventions. It represents the + // collection being accessed within the database stated in `db.name`. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'customers', 'products' + DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") +) + +// DBMongoDBCollection returns an attribute KeyValue conforming to the +// "db.mongodb.collection" semantic conventions. It represents the collection +// being accessed within the database stated in `db.name`. +func DBMongoDBCollection(val string) attribute.KeyValue { + return DBMongoDBCollectionKey.String(val) +} + +// Call-level attributes for SQL databases +const ( + // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" + // semantic conventions. It represents the name of the primary table that + // the operation is acting upon, including the database name (if + // applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'public.users', 'customers' + // Note: It is not recommended to attempt any client-side parsing of + // `db.statement` just to get this property, but it should be set if it is + // provided by the library being instrumented. If the operation is acting + // upon an anonymous table, or more than one table, this value MUST NOT be + // set. + DBSQLTableKey = attribute.Key("db.sql.table") +) + +// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" +// semantic conventions. It represents the name of the primary table that the +// operation is acting upon, including the database name (if applicable). +func DBSQLTable(val string) attribute.KeyValue { + return DBSQLTableKey.String(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's +// concepts. +const ( + // OTelStatusCodeKey is the attribute Key conforming to the + // "otel.status_code" semantic conventions. It represents the name of the + // code, either "OK" or "ERROR". MUST NOT be set if the status code is + // UNSET. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + OTelStatusCodeKey = attribute.Key("otel.status_code") + + // OTelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the + // description of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'resource not found' + OTelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +var ( + // The operation has been validated by an Application developer or Operator to have completed successfully + OTelStatusCodeOk = OTelStatusCodeKey.String("OK") + // The operation contains an error + OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") +) + +// OTelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the +// description of the Status if it has a value, otherwise not set. +func OTelStatusDescription(val string) attribute.KeyValue { + return OTelStatusDescriptionKey.String(val) +} + +// This semantic convention describes an instance of a function that runs +// without provisioning or managing of servers (also known as serverless +// functions or Function as a Service (FaaS)) with spans. +const ( + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" + // semantic conventions. It represents the type of the trigger which caused + // this function execution. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: For the server/consumer span on the incoming side, + // `faas.trigger` MUST be set. + // + // Clients invoking FaaS instances usually cannot set `faas.trigger`, + // since they would typically need to look in the payload to determine + // the event type. If clients set it, it should be the same as the + // trigger that corresponding incoming would have (i.e., this has + // nothing to do with the underlying transport used to make the API + // call to invoke the lambda, which is often HTTP). + FaaSTriggerKey = attribute.Key("faas.trigger") + + // FaaSExecutionKey is the attribute Key conforming to the "faas.execution" + // semantic conventions. It represents the execution ID of the current + // function execution. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + FaaSExecutionKey = attribute.Key("faas.execution") +) + +var ( + // A response to some data source operation such as a database or filesystem read/write + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// FaaSExecution returns an attribute KeyValue conforming to the +// "faas.execution" semantic conventions. It represents the execution ID of the +// current function execution. +func FaaSExecution(val string) attribute.KeyValue { + return FaaSExecutionKey.String(val) +} + +// Semantic Convention for FaaS triggered as a response to some data source +// operation such as a database or filesystem read/write. +const ( + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name + // of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in + // Cosmos DB to the database name. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myBucketName', 'myDBName' + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the + // describes the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string + // containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or + // S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'myFile.txt', 'myTableName' + FaaSDocumentNameKey = attribute.Key("faas.document.name") +) + +var ( + // When a new object is created + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of +// the source on which the triggering operation was performed. For example, in +// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the +// database name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 +// is the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// Semantic Convention for FaaS scheduled to be executed regularly. +const ( + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation + // time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSTimeKey = attribute.Key("faas.time") + + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron + // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0/5 * * * ? *' + FaaSCronKey = attribute.Key("faas.cron") +) + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" +// semantic conventions. It represents a string containing the function +// invocation time in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" +// semantic conventions. It represents a string containing the schedule period +// as [Cron +// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// Contains additional attributes for incoming FaaS spans. +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the + // serverless function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + FaaSColdstartKey = attribute.Key("faas.coldstart") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the +// "faas.coldstart" semantic conventions. It represents a boolean that is true +// if the serverless function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// Contains additional attributes for outgoing FaaS spans. +const ( + // FaaSInvokedNameKey is the attribute Key conforming to the + // "faas.invoked_name" semantic conventions. It represents the name of the + // invoked function. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'my-function' + // Note: SHOULD be equal to the `faas.name` resource attribute of the + // invoked function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud + // region of the invoked function. + // + // Type: string + // RequirementLevel: ConditionallyRequired (For some cloud providers, like + // AWS or GCP, the region in which a function is hosted is essential to + // uniquely identify the function and also part of its endpoint. Since it's + // part of the endpoint being called, the region is always known to + // clients. In these cases, `faas.invoked_region` MUST be set accordingly. + // If the region is unknown to the client or not required for identifying + // the invoked function, setting `faas.invoked_region` is optional.) + // Stability: stable + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the `cloud.region` resource attribute of the + // invoked function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") +) + +var ( + // Alibaba Cloud + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region +// of the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetTransportKey is the attribute Key conforming to the "net.transport" + // semantic conventions. It represents the transport protocol used. See + // note below. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + NetTransportKey = attribute.Key("net.transport") + + // NetAppProtocolNameKey is the attribute Key conforming to the + // "net.app.protocol.name" semantic conventions. It represents the + // application layer protocol used. The value SHOULD be normalized to + // lowercase. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + NetAppProtocolNameKey = attribute.Key("net.app.protocol.name") + + // NetAppProtocolVersionKey is the attribute Key conforming to the + // "net.app.protocol.version" semantic conventions. It represents the + // version of the application layer protocol used. See note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '3.1.1' + // Note: `net.app.protocol.version` refers to the version of the protocol + // used and might be different from the protocol client's version. If the + // HTTP client used has a version of `0.27.2`, but sends HTTP version + // `1.1`, this attribute should be set to `1.1`. + NetAppProtocolVersionKey = attribute.Key("net.app.protocol.version") + + // NetSockPeerNameKey is the attribute Key conforming to the + // "net.sock.peer.name" semantic conventions. It represents the remote + // socket peer name. + // + // Type: string + // RequirementLevel: Recommended (If available and different from + // `net.peer.name` and if `net.sock.peer.addr` is set.) + // Stability: stable + // Examples: 'proxy.example.com' + NetSockPeerNameKey = attribute.Key("net.sock.peer.name") + + // NetSockPeerAddrKey is the attribute Key conforming to the + // "net.sock.peer.addr" semantic conventions. It represents the remote + // socket peer address: IPv4 or IPv6 for internet protocols, path for local + // communication, + // [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '127.0.0.1', '/tmp/mysql.sock' + NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") + + // NetSockPeerPortKey is the attribute Key conforming to the + // "net.sock.peer.port" semantic conventions. It represents the remote + // socket peer port. + // + // Type: int + // RequirementLevel: Recommended (If defined for the address family and if + // different than `net.peer.port` and if `net.sock.peer.addr` is set.) + // Stability: stable + // Examples: 16456 + NetSockPeerPortKey = attribute.Key("net.sock.peer.port") + + // NetSockFamilyKey is the attribute Key conforming to the + // "net.sock.family" semantic conventions. It represents the protocol + // [address + // family](https://man7.org/linux/man-pages/man7/address_families.7.html) + // which is used for communication. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (If different than `inet` and if + // any of `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers + // of telemetry SHOULD accept both IPv4 and IPv6 formats for the address in + // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support + // instrumentations that follow previous versions of this document.) + // Stability: stable + // Examples: 'inet6', 'bluetooth' + NetSockFamilyKey = attribute.Key("net.sock.family") + + // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" + // semantic conventions. It represents the logical remote hostname, see + // note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'example.com' + // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an + // extra DNS lookup. + NetPeerNameKey = attribute.Key("net.peer.name") + + // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" + // semantic conventions. It represents the logical remote port number + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 80, 8080, 443 + NetPeerPortKey = attribute.Key("net.peer.port") + + // NetHostNameKey is the attribute Key conforming to the "net.host.name" + // semantic conventions. It represents the logical local hostname or + // similar, see note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'localhost' + NetHostNameKey = attribute.Key("net.host.name") + + // NetHostPortKey is the attribute Key conforming to the "net.host.port" + // semantic conventions. It represents the logical local port number, + // preferably the one that the peer used to connect + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 8080 + NetHostPortKey = attribute.Key("net.host.port") + + // NetSockHostAddrKey is the attribute Key conforming to the + // "net.sock.host.addr" semantic conventions. It represents the local + // socket address. Useful in case of a multi-IP host. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '192.168.0.1' + NetSockHostAddrKey = attribute.Key("net.sock.host.addr") + + // NetSockHostPortKey is the attribute Key conforming to the + // "net.sock.host.port" semantic conventions. It represents the local + // socket port number. + // + // Type: int + // RequirementLevel: Recommended (If defined for the address family and if + // different than `net.host.port` and if `net.sock.host.addr` is set.) + // Stability: stable + // Examples: 35555 + NetSockHostPortKey = attribute.Key("net.sock.host.port") + + // NetHostConnectionTypeKey is the attribute Key conforming to the + // "net.host.connection.type" semantic conventions. It represents the + // internet connection type currently being used by the host. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'wifi' + NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") + + // NetHostConnectionSubtypeKey is the attribute Key conforming to the + // "net.host.connection.subtype" semantic conventions. It represents the + // this describes more details regarding the connection.type. It may be the + // type of cell technology connection, but it could be used for describing + // details about a wifi connection. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'LTE' + NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") + + // NetHostCarrierNameKey is the attribute Key conforming to the + // "net.host.carrier.name" semantic conventions. It represents the name of + // the mobile carrier. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'sprint' + NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") + + // NetHostCarrierMccKey is the attribute Key conforming to the + // "net.host.carrier.mcc" semantic conventions. It represents the mobile + // carrier country code. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '310' + NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") + + // NetHostCarrierMncKey is the attribute Key conforming to the + // "net.host.carrier.mnc" semantic conventions. It represents the mobile + // carrier network code. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '001' + NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") + + // NetHostCarrierIccKey is the attribute Key conforming to the + // "net.host.carrier.icc" semantic conventions. It represents the ISO + // 3166-1 alpha-2 2-character country code associated with the mobile + // carrier network. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'DE' + NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") +) + +var ( + // ip_tcp + NetTransportTCP = NetTransportKey.String("ip_tcp") + // ip_udp + NetTransportUDP = NetTransportKey.String("ip_udp") + // Named or anonymous pipe. See note below + NetTransportPipe = NetTransportKey.String("pipe") + // In-process communication + NetTransportInProc = NetTransportKey.String("inproc") + // Something else (non IP-based) + NetTransportOther = NetTransportKey.String("other") +) + +var ( + // IPv4 address + NetSockFamilyInet = NetSockFamilyKey.String("inet") + // IPv6 address + NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") + // Unix domain socket path + NetSockFamilyUnix = NetSockFamilyKey.String("unix") +) + +var ( + // wifi + NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") + // wired + NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") + // cell + NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") + // unavailable + NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") + // unknown + NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") +) + +var ( + // GPRS + NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") + // EDGE + NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") + // UMTS + NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") + // CDMA + NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") + // HSUPA + NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") + // HSPA + NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") + // IDEN + NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") + // EVDO Rev. B + NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") + // LTE + NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") + // EHRPD + NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") + // HSPAP + NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") + // GSM + NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") + // TD-SCDMA + NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") + // IWLAN + NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") + // LTE CA + NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") +) + +// NetAppProtocolName returns an attribute KeyValue conforming to the +// "net.app.protocol.name" semantic conventions. It represents the application +// layer protocol used. The value SHOULD be normalized to lowercase. +func NetAppProtocolName(val string) attribute.KeyValue { + return NetAppProtocolNameKey.String(val) +} + +// NetAppProtocolVersion returns an attribute KeyValue conforming to the +// "net.app.protocol.version" semantic conventions. It represents the version +// of the application layer protocol used. See note below. +func NetAppProtocolVersion(val string) attribute.KeyValue { + return NetAppProtocolVersionKey.String(val) +} + +// NetSockPeerName returns an attribute KeyValue conforming to the +// "net.sock.peer.name" semantic conventions. It represents the remote socket +// peer name. +func NetSockPeerName(val string) attribute.KeyValue { + return NetSockPeerNameKey.String(val) +} + +// NetSockPeerAddr returns an attribute KeyValue conforming to the +// "net.sock.peer.addr" semantic conventions. It represents the remote socket +// peer address: IPv4 or IPv6 for internet protocols, path for local +// communication, +// [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). +func NetSockPeerAddr(val string) attribute.KeyValue { + return NetSockPeerAddrKey.String(val) +} + +// NetSockPeerPort returns an attribute KeyValue conforming to the +// "net.sock.peer.port" semantic conventions. It represents the remote socket +// peer port. +func NetSockPeerPort(val int) attribute.KeyValue { + return NetSockPeerPortKey.Int(val) +} + +// NetPeerName returns an attribute KeyValue conforming to the +// "net.peer.name" semantic conventions. It represents the logical remote +// hostname, see note below. +func NetPeerName(val string) attribute.KeyValue { + return NetPeerNameKey.String(val) +} + +// NetPeerPort returns an attribute KeyValue conforming to the +// "net.peer.port" semantic conventions. It represents the logical remote port +// number +func NetPeerPort(val int) attribute.KeyValue { + return NetPeerPortKey.Int(val) +} + +// NetHostName returns an attribute KeyValue conforming to the +// "net.host.name" semantic conventions. It represents the logical local +// hostname or similar, see note below. +func NetHostName(val string) attribute.KeyValue { + return NetHostNameKey.String(val) +} + +// NetHostPort returns an attribute KeyValue conforming to the +// "net.host.port" semantic conventions. It represents the logical local port +// number, preferably the one that the peer used to connect +func NetHostPort(val int) attribute.KeyValue { + return NetHostPortKey.Int(val) +} + +// NetSockHostAddr returns an attribute KeyValue conforming to the +// "net.sock.host.addr" semantic conventions. It represents the local socket +// address. Useful in case of a multi-IP host. +func NetSockHostAddr(val string) attribute.KeyValue { + return NetSockHostAddrKey.String(val) +} + +// NetSockHostPort returns an attribute KeyValue conforming to the +// "net.sock.host.port" semantic conventions. It represents the local socket +// port number. +func NetSockHostPort(val int) attribute.KeyValue { + return NetSockHostPortKey.Int(val) +} + +// NetHostCarrierName returns an attribute KeyValue conforming to the +// "net.host.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetHostCarrierName(val string) attribute.KeyValue { + return NetHostCarrierNameKey.String(val) +} + +// NetHostCarrierMcc returns an attribute KeyValue conforming to the +// "net.host.carrier.mcc" semantic conventions. It represents the mobile +// carrier country code. +func NetHostCarrierMcc(val string) attribute.KeyValue { + return NetHostCarrierMccKey.String(val) +} + +// NetHostCarrierMnc returns an attribute KeyValue conforming to the +// "net.host.carrier.mnc" semantic conventions. It represents the mobile +// carrier network code. +func NetHostCarrierMnc(val string) attribute.KeyValue { + return NetHostCarrierMncKey.String(val) +} + +// NetHostCarrierIcc returns an attribute KeyValue conforming to the +// "net.host.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetHostCarrierIcc(val string) attribute.KeyValue { + return NetHostCarrierIccKey.String(val) +} + +// Operations that access some remote service. +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" + // semantic conventions. It represents the + // [`service.name`](../../resource/semantic_conventions/README.md#service) + // of the remote service. SHOULD be equal to the actual `service.name` + // resource attribute of the remote service if any. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the +// "peer.service" semantic conventions. It represents the +// [`service.name`](../../resource/semantic_conventions/README.md#service) of +// the remote service. SHOULD be equal to the actual `service.name` resource +// attribute of the remote service if any. +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// These attributes may be used for any operation with an authenticated and/or +// authorized enduser. +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" + // semantic conventions. It represents the username or client_id extracted + // from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header + // in the inbound request from outside the system. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'username' + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserRoleKey is the attribute Key conforming to the "enduser.role" + // semantic conventions. It represents the actual/assumed role the client + // is making the request under extracted from token or application security + // context. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'admin' + EnduserRoleKey = attribute.Key("enduser.role") + + // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" + // semantic conventions. It represents the scopes or granted authorities + // the client currently possesses extracted from token or application + // security context. The value would come from the scope associated with an + // [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute + // value in a [SAML 2.0 + // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'read:message, write:files' + EnduserScopeKey = attribute.Key("enduser.scope") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the username or client_id extracted from +// the access token or +// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in +// the inbound request from outside the system. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserRole returns an attribute KeyValue conforming to the +// "enduser.role" semantic conventions. It represents the actual/assumed role +// the client is making the request under extracted from token or application +// security context. +func EnduserRole(val string) attribute.KeyValue { + return EnduserRoleKey.String(val) +} + +// EnduserScope returns an attribute KeyValue conforming to the +// "enduser.scope" semantic conventions. It represents the scopes or granted +// authorities the client currently possesses extracted from token or +// application security context. The value would come from the scope associated +// with an [OAuth 2.0 Access +// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute +// value in a [SAML 2.0 +// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). +func EnduserScope(val string) attribute.KeyValue { + return EnduserScopeKey.String(val) +} + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed + // to OS thread ID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" + // semantic conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'main' + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" +// semantic conventions. It represents the current "managed" thread ID (as +// opposed to OS thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // CodeFunctionKey is the attribute Key conforming to the "code.function" + // semantic conventions. It represents the method or function name, or + // equivalent (usually rightmost part of the code unit's name). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'serveRequest' + CodeFunctionKey = attribute.Key("code.function") + + // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" + // semantic conventions. It represents the "namespace" within which + // `code.function` is defined. Usually the qualified class or module name, + // such that `code.namespace` + some separator + `code.function` form a + // unique identifier for the code unit. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'com.example.MyHTTPService' + CodeNamespaceKey = attribute.Key("code.namespace") + + // CodeFilepathKey is the attribute Key conforming to the "code.filepath" + // semantic conventions. It represents the source code file name that + // identifies the code unit as uniquely as possible (preferably an absolute + // file path). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + CodeFilepathKey = attribute.Key("code.filepath") + + // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" + // semantic conventions. It represents the line number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + CodeLineNumberKey = attribute.Key("code.lineno") + + // CodeColumnKey is the attribute Key conforming to the "code.column" + // semantic conventions. It represents the column number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 16 + CodeColumnKey = attribute.Key("code.column") +) + +// CodeFunction returns an attribute KeyValue conforming to the +// "code.function" semantic conventions. It represents the method or function +// name, or equivalent (usually rightmost part of the code unit's name). +func CodeFunction(val string) attribute.KeyValue { + return CodeFunctionKey.String(val) +} + +// CodeNamespace returns an attribute KeyValue conforming to the +// "code.namespace" semantic conventions. It represents the "namespace" within +// which `code.function` is defined. Usually the qualified class or module +// name, such that `code.namespace` + some separator + `code.function` form a +// unique identifier for the code unit. +func CodeNamespace(val string) attribute.KeyValue { + return CodeNamespaceKey.String(val) +} + +// CodeFilepath returns an attribute KeyValue conforming to the +// "code.filepath" semantic conventions. It represents the source code file +// name that identifies the code unit as uniquely as possible (preferably an +// absolute file path). +func CodeFilepath(val string) attribute.KeyValue { + return CodeFilepathKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" +// semantic conventions. It represents the line number in `code.filepath` best +// representing the operation. It SHOULD point within the code unit named in +// `code.function`. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeColumn returns an attribute KeyValue conforming to the "code.column" +// semantic conventions. It represents the column number in `code.filepath` +// best representing the operation. It SHOULD point within the code unit named +// in `code.function`. +func CodeColumn(val int) attribute.KeyValue { + return CodeColumnKey.Int(val) +} + +// Semantic conventions for HTTP client and server Spans. +const ( + // HTTPMethodKey is the attribute Key conforming to the "http.method" + // semantic conventions. It represents the hTTP request method. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + HTTPMethodKey = attribute.Key("http.method") + + // HTTPStatusCodeKey is the attribute Key conforming to the + // "http.status_code" semantic conventions. It represents the [HTTP + // response status code](https://tools.ietf.org/html/rfc7231#section-6). + // + // Type: int + // RequirementLevel: ConditionallyRequired (If and only if one was + // received/sent.) + // Stability: stable + // Examples: 200 + HTTPStatusCodeKey = attribute.Key("http.status_code") + + // HTTPFlavorKey is the attribute Key conforming to the "http.flavor" + // semantic conventions. It represents the kind of HTTP protocol used. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: If `net.transport` is not specified, it can be assumed to be + // `IP.TCP` except if `http.flavor` is `QUIC`, in which case `IP.UDP` is + // assumed. + HTTPFlavorKey = attribute.Key("http.flavor") + + // HTTPUserAgentKey is the attribute Key conforming to the + // "http.user_agent" semantic conventions. It represents the value of the + // [HTTP + // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) + // header sent by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' + HTTPUserAgentKey = attribute.Key("http.user_agent") + + // HTTPRequestContentLengthKey is the attribute Key conforming to the + // "http.request_content_length" semantic conventions. It represents the + // size of the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3495 + HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") + + // HTTPResponseContentLengthKey is the attribute Key conforming to the + // "http.response_content_length" semantic conventions. It represents the + // size of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3495 + HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") +) + +var ( + // HTTP/1.0 + HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") + // HTTP/1.1 + HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") + // HTTP/2 + HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") + // HTTP/3 + HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0") + // SPDY protocol + HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") + // QUIC protocol + HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") +) + +// HTTPMethod returns an attribute KeyValue conforming to the "http.method" +// semantic conventions. It represents the hTTP request method. +func HTTPMethod(val string) attribute.KeyValue { + return HTTPMethodKey.String(val) +} + +// HTTPStatusCode returns an attribute KeyValue conforming to the +// "http.status_code" semantic conventions. It represents the [HTTP response +// status code](https://tools.ietf.org/html/rfc7231#section-6). +func HTTPStatusCode(val int) attribute.KeyValue { + return HTTPStatusCodeKey.Int(val) +} + +// HTTPUserAgent returns an attribute KeyValue conforming to the +// "http.user_agent" semantic conventions. It represents the value of the [HTTP +// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) +// header sent by the client. +func HTTPUserAgent(val string) attribute.KeyValue { + return HTTPUserAgentKey.String(val) +} + +// HTTPRequestContentLength returns an attribute KeyValue conforming to the +// "http.request_content_length" semantic conventions. It represents the size +// of the request payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPRequestContentLength(val int) attribute.KeyValue { + return HTTPRequestContentLengthKey.Int(val) +} + +// HTTPResponseContentLength returns an attribute KeyValue conforming to the +// "http.response_content_length" semantic conventions. It represents the size +// of the response payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPResponseContentLength(val int) attribute.KeyValue { + return HTTPResponseContentLengthKey.Int(val) +} + +// Semantic Convention for HTTP Client +const ( + // HTTPURLKey is the attribute Key conforming to the "http.url" semantic + // conventions. It represents the full HTTP request URL in the form + // `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is + // not transmitted over HTTP, but if it is known, it should be included + // nevertheless. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' + // Note: `http.url` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case the + // attribute's value should be `https://www.example.com/`. + HTTPURLKey = attribute.Key("http.url") + + // HTTPResendCountKey is the attribute Key conforming to the + // "http.resend_count" semantic conventions. It represents the ordinal + // number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // RequirementLevel: Recommended (if and only if request was retried.) + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending + // (e.g. redirection, authorization failure, 503 Server Unavailable, + // network issues, or any other). + HTTPResendCountKey = attribute.Key("http.resend_count") +) + +// HTTPURL returns an attribute KeyValue conforming to the "http.url" +// semantic conventions. It represents the full HTTP request URL in the form +// `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is not +// transmitted over HTTP, but if it is known, it should be included +// nevertheless. +func HTTPURL(val string) attribute.KeyValue { + return HTTPURLKey.String(val) +} + +// HTTPResendCount returns an attribute KeyValue conforming to the +// "http.resend_count" semantic conventions. It represents the ordinal number +// of request resending attempt (for any reason, including redirects). +func HTTPResendCount(val int) attribute.KeyValue { + return HTTPResendCountKey.Int(val) +} + +// Semantic Convention for HTTP Server +const ( + // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" + // semantic conventions. It represents the URI scheme identifying the used + // protocol. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'http', 'https' + HTTPSchemeKey = attribute.Key("http.scheme") + + // HTTPTargetKey is the attribute Key conforming to the "http.target" + // semantic conventions. It represents the full request target as passed in + // a HTTP request line or equivalent. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '/path/12314/?q=ddds' + HTTPTargetKey = attribute.Key("http.target") + + // HTTPRouteKey is the attribute Key conforming to the "http.route" + // semantic conventions. It represents the matched route (path template in + // the format used by the respective server framework). See note below + // + // Type: string + // RequirementLevel: ConditionallyRequired (If and only if it's available) + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: MUST NOT be populated when this is not supported by the HTTP + // server framework as the route attribute should have low-cardinality and + // the URI path can NOT substitute it. + // SHOULD include the [application root](#http-server-definitions) if there + // is one. + HTTPRouteKey = attribute.Key("http.route") + + // HTTPClientIPKey is the attribute Key conforming to the "http.client_ip" + // semantic conventions. It represents the IP address of the original + // client behind all proxies, if known (e.g. from + // [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '83.164.160.102' + // Note: This is not necessarily the same as `net.sock.peer.addr`, which + // would + // identify the network-level peer, which may be a proxy. + // + // This attribute should be set when a source of information different + // from the one used for `net.sock.peer.addr`, is available even if that + // other + // source just confirms the same value as `net.sock.peer.addr`. + // Rationale: For `net.sock.peer.addr`, one typically does not know if it + // comes from a proxy, reverse proxy, or the actual client. Setting + // `http.client_ip` when it's the same as `net.sock.peer.addr` means that + // one is at least somewhat confident that the address is not that of + // the closest proxy. + HTTPClientIPKey = attribute.Key("http.client_ip") +) + +// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" +// semantic conventions. It represents the URI scheme identifying the used +// protocol. +func HTTPScheme(val string) attribute.KeyValue { + return HTTPSchemeKey.String(val) +} + +// HTTPTarget returns an attribute KeyValue conforming to the "http.target" +// semantic conventions. It represents the full request target as passed in a +// HTTP request line or equivalent. +func HTTPTarget(val string) attribute.KeyValue { + return HTTPTargetKey.String(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route (path template in the +// format used by the respective server framework). See note below +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// HTTPClientIP returns an attribute KeyValue conforming to the +// "http.client_ip" semantic conventions. It represents the IP address of the +// original client behind all proxies, if known (e.g. from +// [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). +func HTTPClientIP(val string) attribute.KeyValue { + return HTTPClientIPKey.String(val) +} + +// Attributes that exist for multiple DynamoDB request types. +const ( + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys + // in the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response + // field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { + // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number }, "TableName": "string", + // "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to + // the "aws.dynamodb.item_collection_metrics" semantic conventions. It + // represents the JSON-serialized value of the `ItemCollectionMetrics` + // response field. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": + // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { + // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], + // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, + // "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to + // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It + // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` + // request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming + // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. + // It represents the value of the + // `ProvisionedThroughput.WriteCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the + // value of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value + // of the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, + // RelatedItems, ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of + // the `Limit` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value + // of the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of + // the `Select` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") +) + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in +// the `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming +// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It +// represents the JSON-serialized value of the `ItemCollectionMetrics` response +// field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.ReadCapacityUnits` request parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.WriteCapacityUnits` request parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of +// the `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to +// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the +// value of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of +// the `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// DynamoDB.CreateTable +const ( + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `GlobalSecondaryIndexes` request field + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `LocalSecondaryIndexes` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "IndexARN": "string", "IndexName": "string", + // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") +) + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_indexes" semantic +// conventions. It represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming +// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `LocalSecondaryIndexes` request field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// DynamoDB.ListTables +const ( + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents + // the value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the the + // number of items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") +) + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming +// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It +// represents the value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the the +// number of items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// DynamoDB.Query +const ( + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the + // value of the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") +) + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// DynamoDB.Scan +const ( + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of + // the `Segment` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the + // value of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") + + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of + // the `Count` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the + // value of the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") +) + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value +// of the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value +// of the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// DynamoDB.UpdateTable +const ( + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to + // the "aws.dynamodb.attribute_definitions" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `AttributeDefinitions` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key + // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic + // conventions. It represents the JSON-serialized value of each item in the + // the `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") +) + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming +// to the "aws.dynamodb.attribute_definitions" semantic conventions. It +// represents the JSON-serialized value of each item in the +// `AttributeDefinitions` request field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// Semantic conventions to apply when instrumenting the GraphQL implementation. +// They map GraphQL operations to attributes on a Span. +const ( + // GraphqlOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of + // the operation being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'findBookByID' + GraphqlOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphqlOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of + // the operation being executed. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'query', 'mutation', 'subscription' + GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") + + // GraphqlDocumentKey is the attribute Key conforming to the + // "graphql.document" semantic conventions. It represents the GraphQL + // document being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + GraphqlDocumentKey = attribute.Key("graphql.document") +) + +var ( + // GraphQL query + GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") + // GraphQL mutation + GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") + // GraphQL subscription + GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") +) + +// GraphqlOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphqlOperationName(val string) attribute.KeyValue { + return GraphqlOperationNameKey.String(val) +} + +// GraphqlDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphqlDocument(val string) attribute.KeyValue { + return GraphqlDocumentKey.String(val) +} + +// Semantic convention describing per-message attributes populated on messaging +// spans or links. +const ( + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used + // by the messaging system as an identifier for the message, represented as + // a string. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents + // the [conversation ID](#conversations) identifying the conversation to + // which the message belongs, represented as a string. Sometimes called + // "Correlation ID". + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyConversationID' + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to + // the "messaging.message.payload_size_bytes" semantic conventions. It + // represents the (uncompressed) size of the message payload in bytes. Also + // use this attribute if it is unknown whether the compressed or + // uncompressed payload size is reported. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2738 + MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes") + + // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key + // conforming to the "messaging.message.payload_compressed_size_bytes" + // semantic conventions. It represents the compressed size of the message + // payload in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2048 + MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes") +) + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by +// the messaging system as an identifier for the message, represented as a +// string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming +// to the "messaging.message.conversation_id" semantic conventions. It +// represents the [conversation ID](#conversations) identifying the +// conversation to which the message belongs, represented as a string. +// Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming +// to the "messaging.message.payload_size_bytes" semantic conventions. It +// represents the (uncompressed) size of the message payload in bytes. Also use +// this attribute if it is unknown whether the compressed or uncompressed +// payload size is reported. +func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue { + return MessagingMessagePayloadSizeBytesKey.Int(val) +} + +// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue +// conforming to the "messaging.message.payload_compressed_size_bytes" semantic +// conventions. It represents the compressed size of the message payload in +// bytes. +func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue { + return MessagingMessagePayloadCompressedSizeBytesKey.Int(val) +} + +// Semantic convention for attributes that describe messaging destination on +// broker +const ( + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the + // message destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + // Note: Destination name SHOULD uniquely identify a specific queue, topic + // or other entity within the broker. If + // the broker does not have such notion, the destination name SHOULD + // uniquely identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationKindKey is the attribute Key conforming to the + // "messaging.destination.kind" semantic conventions. It represents the + // kind of message destination + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationKindKey = attribute.Key("messaging.destination.kind") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the + // low cardinality representation of the messaging destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/customers/{customerID}' + // Note: Destination names could be constructed from templates. An example + // would be a destination name involving a user name or product id. + // Although the destination name in this case is of high cardinality, the + // underlying template is of low cardinality and can be effectively used + // for grouping and aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might + // not exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") +) + +var ( + // A message sent to a queue + MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue") + // A message sent to a topic + MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic") +) + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to +// the "messaging.destination.template" semantic conventions. It represents the +// low cardinality representation of the messaging destination name +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to +// the "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to +// the "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be +// unnamed or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// Semantic convention for attributes that describe messaging source on broker +const ( + // MessagingSourceNameKey is the attribute Key conforming to the + // "messaging.source.name" semantic conventions. It represents the message + // source name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + // Note: Source name SHOULD uniquely identify a specific queue, topic, or + // other entity within the broker. If + // the broker does not have such notion, the source name SHOULD uniquely + // identify the broker. + MessagingSourceNameKey = attribute.Key("messaging.source.name") + + // MessagingSourceKindKey is the attribute Key conforming to the + // "messaging.source.kind" semantic conventions. It represents the kind of + // message source + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingSourceKindKey = attribute.Key("messaging.source.kind") + + // MessagingSourceTemplateKey is the attribute Key conforming to the + // "messaging.source.template" semantic conventions. It represents the low + // cardinality representation of the messaging source name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/customers/{customerID}' + // Note: Source names could be constructed from templates. An example would + // be a source name involving a user name or product id. Although the + // source name in this case is of high cardinality, the underlying template + // is of low cardinality and can be effectively used for grouping and + // aggregation. + MessagingSourceTemplateKey = attribute.Key("messaging.source.template") + + // MessagingSourceTemporaryKey is the attribute Key conforming to the + // "messaging.source.temporary" semantic conventions. It represents a + // boolean that is true if the message source is temporary and might not + // exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingSourceTemporaryKey = attribute.Key("messaging.source.temporary") + + // MessagingSourceAnonymousKey is the attribute Key conforming to the + // "messaging.source.anonymous" semantic conventions. It represents a + // boolean that is true if the message source is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingSourceAnonymousKey = attribute.Key("messaging.source.anonymous") +) + +var ( + // A message received from a queue + MessagingSourceKindQueue = MessagingSourceKindKey.String("queue") + // A message received from a topic + MessagingSourceKindTopic = MessagingSourceKindKey.String("topic") +) + +// MessagingSourceName returns an attribute KeyValue conforming to the +// "messaging.source.name" semantic conventions. It represents the message +// source name +func MessagingSourceName(val string) attribute.KeyValue { + return MessagingSourceNameKey.String(val) +} + +// MessagingSourceTemplate returns an attribute KeyValue conforming to the +// "messaging.source.template" semantic conventions. It represents the low +// cardinality representation of the messaging source name +func MessagingSourceTemplate(val string) attribute.KeyValue { + return MessagingSourceTemplateKey.String(val) +} + +// MessagingSourceTemporary returns an attribute KeyValue conforming to the +// "messaging.source.temporary" semantic conventions. It represents a boolean +// that is true if the message source is temporary and might not exist anymore +// after messages are processed. +func MessagingSourceTemporary(val bool) attribute.KeyValue { + return MessagingSourceTemporaryKey.Bool(val) +} + +// MessagingSourceAnonymous returns an attribute KeyValue conforming to the +// "messaging.source.anonymous" semantic conventions. It represents a boolean +// that is true if the message source is anonymous (could be unnamed or have +// auto-generated name). +func MessagingSourceAnonymous(val bool) attribute.KeyValue { + return MessagingSourceAnonymousKey.Bool(val) +} + +// General attributes used in messaging systems. +const ( + // MessagingSystemKey is the attribute Key conforming to the + // "messaging.system" semantic conventions. It represents a string + // identifying the messaging system. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' + MessagingSystemKey = attribute.Key("messaging.system") + + // MessagingOperationKey is the attribute Key conforming to the + // "messaging.operation" semantic conventions. It represents a string + // identifying the kind of messaging operation as defined in the [Operation + // names](#operation-names) section above. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationKey = attribute.Key("messaging.operation") + + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the span describes an + // operation on a batch of messages.) + // Stability: stable + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client + // library supports both batch and single-message API for the same + // operation, instrumentations SHOULD use `messaging.batch.message_count` + // for batching APIs and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") +) + +var ( + // publish + MessagingOperationPublish = MessagingOperationKey.String("publish") + // receive + MessagingOperationReceive = MessagingOperationKey.String("receive") + // process + MessagingOperationProcess = MessagingOperationKey.String("process") +) + +// MessagingSystem returns an attribute KeyValue conforming to the +// "messaging.system" semantic conventions. It represents a string identifying +// the messaging system. +func MessagingSystem(val string) attribute.KeyValue { + return MessagingSystemKey.String(val) +} + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to +// the "messaging.batch.message_count" semantic conventions. It represents the +// number of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// Semantic convention for a consumer of messages received from a messaging +// system +const ( + // MessagingConsumerIDKey is the attribute Key conforming to the + // "messaging.consumer.id" semantic conventions. It represents the + // identifier for the consumer receiving a message. For Kafka, set it to + // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if + // both are present, or only `messaging.kafka.consumer.group`. For brokers, + // such as RabbitMQ and Artemis, set it to the `client_id` of the client + // consuming the message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'mygroup - client-6' + MessagingConsumerIDKey = attribute.Key("messaging.consumer.id") +) + +// MessagingConsumerID returns an attribute KeyValue conforming to the +// "messaging.consumer.id" semantic conventions. It represents the identifier +// for the consumer receiving a message. For Kafka, set it to +// `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both +// are present, or only `messaging.kafka.consumer.group`. For brokers, such as +// RabbitMQ and Artemis, set it to the `client_id` of the client consuming the +// message. +func MessagingConsumerID(val string) attribute.KeyValue { + return MessagingConsumerIDKey.String(val) +} + +// Attributes for RabbitMQ +const ( + // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key + // conforming to the "messaging.rabbitmq.destination.routing_key" semantic + // conventions. It represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If not empty.) + // Stability: stable + // Examples: 'myKey' + MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") +) + +// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitmqDestinationRoutingKeyKey.String(val) +} + +// Attributes for Apache Kafka +const ( + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the + // message keys in Kafka are used for grouping alike messages to ensure + // they're processed on the same partition. They differ from + // `messaging.message.id` in that they're not unique. If the key is `null`, + // the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to + // be supplied for the attribute. If the key has no unambiguous, canonical + // string form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the + // "messaging.kafka.consumer.group" semantic conventions. It represents the + // name of the Kafka Consumer Group that is handling the message. Only + // applies to consumers, not producers. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'my-group' + MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") + + // MessagingKafkaClientIDKey is the attribute Key conforming to the + // "messaging.kafka.client_id" semantic conventions. It represents the + // client ID for the Consumer or Producer that is handling the message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'client-5' + MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") + + // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to + // the "messaging.kafka.destination.partition" semantic conventions. It + // represents the partition the message is sent to. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2 + MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") + + // MessagingKafkaSourcePartitionKey is the attribute Key conforming to the + // "messaging.kafka.source.partition" semantic conventions. It represents + // the partition the message is received from. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2 + MessagingKafkaSourcePartitionKey = attribute.Key("messaging.kafka.source.partition") + + // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the + // "messaging.kafka.message.offset" semantic conventions. It represents the + // offset of a record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents + // a boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: ConditionallyRequired (If value is `true`. When + // missing, the value is assumed to be `false`.) + // Stability: stable + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") +) + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the +// message keys in Kafka are used for grouping alike messages to ensure they're +// processed on the same partition. They differ from `messaging.message.id` in +// that they're not unique. If the key is `null`, the attribute MUST NOT be +// set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to +// the "messaging.kafka.consumer.group" semantic conventions. It represents the +// name of the Kafka Consumer Group that is handling the message. Only applies +// to consumers, not producers. +func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { + return MessagingKafkaConsumerGroupKey.String(val) +} + +// MessagingKafkaClientID returns an attribute KeyValue conforming to the +// "messaging.kafka.client_id" semantic conventions. It represents the client +// ID for the Consumer or Producer that is handling the message. +func MessagingKafkaClientID(val string) attribute.KeyValue { + return MessagingKafkaClientIDKey.String(val) +} + +// MessagingKafkaDestinationPartition returns an attribute KeyValue +// conforming to the "messaging.kafka.destination.partition" semantic +// conventions. It represents the partition the message is sent to. +func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { + return MessagingKafkaDestinationPartitionKey.Int(val) +} + +// MessagingKafkaSourcePartition returns an attribute KeyValue conforming to +// the "messaging.kafka.source.partition" semantic conventions. It represents +// the partition the message is received from. +func MessagingKafkaSourcePartition(val int) attribute.KeyValue { + return MessagingKafkaSourcePartitionKey.Int(val) +} + +// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to +// the "messaging.kafka.message.offset" semantic conventions. It represents the +// offset of a record in the corresponding Kafka partition. +func MessagingKafkaMessageOffset(val int) attribute.KeyValue { + return MessagingKafkaMessageOffsetKey.Int(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming +// to the "messaging.kafka.message.tombstone" semantic conventions. It +// represents a boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// Attributes for Apache RocketMQ +const ( + // MessagingRocketmqNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myNamespace' + MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") + + // MessagingRocketmqClientGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.client_group" semantic conventions. It represents + // the name of the RocketMQ producer/consumer group that is handling the + // message. The client type is identified by the SpanKind. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myConsumerGroup' + MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") + + // MessagingRocketmqClientIDKey is the attribute Key conforming to the + // "messaging.rocketmq.client_id" semantic conventions. It represents the + // unique identifier for each client. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myhost@8742@s8083jm' + MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id") + + // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delivery_timestamp" + // semantic conventions. It represents the timestamp in milliseconds that + // the delay message is expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the message type is delay + // and delay time level is not specified.) + // Stability: stable + // Examples: 1665987217045 + MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delay_time_level" semantic + // conventions. It represents the delay time level for delay message, which + // determines the message delay time. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the message type is delay + // and delivery timestamp is not specified.) + // Stability: stable + // Examples: 3 + MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents + // the it is essential for FIFO message. Messages that belong to the same + // message group are always processed one by one within the same consumer + // group. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If the message type is FIFO.) + // Stability: stable + // Examples: 'myMessageGroup' + MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents + // the type of message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketmqMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'tagA' + MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents + // the key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'keyA', 'keyB' + MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to + // the "messaging.rocketmq.consumption_model" semantic conventions. It + // represents the model of message consumption. This only applies to + // consumer spans. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") +) + +var ( + // Normal message + MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") + // FIFO message + MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") + // Delay message + MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") + // Transaction message + MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") +) + +var ( + // Clustering consumption model + MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") + // Broadcasting consumption model + MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") +) + +// MessagingRocketmqNamespace returns an attribute KeyValue conforming to +// the "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketmqNamespace(val string) attribute.KeyValue { + return MessagingRocketmqNamespaceKey.String(val) +} + +// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.client_group" semantic conventions. It represents +// the name of the RocketMQ producer/consumer group that is handling the +// message. The client type is identified by the SpanKind. +func MessagingRocketmqClientGroup(val string) attribute.KeyValue { + return MessagingRocketmqClientGroupKey.String(val) +} + +// MessagingRocketmqClientID returns an attribute KeyValue conforming to the +// "messaging.rocketmq.client_id" semantic conventions. It represents the +// unique identifier for each client. +func MessagingRocketmqClientID(val string) attribute.KeyValue { + return MessagingRocketmqClientIDKey.String(val) +} + +// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.group" semantic conventions. It represents +// the it is essential for FIFO message. Messages that belong to the same +// message group are always processed one by one within the same consumer +// group. +func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { + return MessagingRocketmqMessageGroupKey.String(val) +} + +// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketmqMessageTag(val string) attribute.KeyValue { + return MessagingRocketmqMessageTagKey.String(val) +} + +// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.keys" semantic conventions. It represents +// the key(s) of message, another way to mark message besides message id. +func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketmqMessageKeysKey.StringSlice(val) +} + +// Semantic conventions for remote procedure calls. +const ( + // RPCSystemKey is the attribute Key conforming to the "rpc.system" + // semantic conventions. It represents a string identifying the remoting + // system. See below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + RPCSystemKey = attribute.Key("rpc.system") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" + // semantic conventions. It represents the full (logical) name of the + // service being called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing + // class. The `code.namespace` attribute may be used to store the latter + // (despite the attribute name, it may include a class name; e.g., class + // with method actually executing the call on the server side, RPC client + // stub class on the client side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" + // semantic conventions. It represents the name of the (logical) method + // being called, must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") +) + +var ( + // gRPC + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") +) + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// Tech-specific attributes for gRPC. +const ( + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the [numeric + // status + // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of + // the gRPC request. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") +) + +var ( + // OK + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). +const ( + // RPCJsonrpcVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // does not specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If other than the default + // version (`1.0`)) + // Stability: stable + // Examples: '2.0', '1.0' + RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCJsonrpcRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, + // string, `null` or missing (for notifications), value is expected to be + // cast to string for simplicity. Use empty string in case of `null` value. + // Omit entirely if this is a notification. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10', 'request-7', '' + RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the + // `error.code` property of response if it is an error response. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If response is not successful.) + // Stability: stable + // Examples: -32700, 100 + RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Parse error', 'User already exists' + RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") +) + +// RPCJsonrpcVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol +// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 +// does not specify this, the value can be omitted. +func RPCJsonrpcVersion(val string) attribute.KeyValue { + return RPCJsonrpcVersionKey.String(val) +} + +// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` +// property of request or response. Since protocol allows id to be int, string, +// `null` or missing (for notifications), value is expected to be cast to +// string for simplicity. Use empty string in case of `null` value. Omit +// entirely if this is a notification. +func RPCJsonrpcRequestID(val string) attribute.KeyValue { + return RPCJsonrpcRequestIDKey.String(val) +} + +// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the +// `error.code` property of response if it is an error response. +func RPCJsonrpcErrorCode(val int) attribute.KeyValue { + return RPCJsonrpcErrorCodeKey.Int(val) +} + +// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { + return RPCJsonrpcErrorMessageKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go index 273d58e0014..9c0b720a4d6 100644 --- a/vendor/go.opentelemetry.io/otel/trace/config.go +++ b/vendor/go.opentelemetry.io/otel/trace/config.go @@ -213,7 +213,7 @@ var _ SpanStartEventOption = attributeOption{} // WithAttributes adds the attributes related to a span life-cycle event. // These attributes are used to describe the work a Span represents when this -// option is provided to a Span's start or end events. Otherwise, these +// option is provided to a Span's start event. Otherwise, these // attributes provide additional information about the event being recorded // (e.g. error, state change, processing progress, system event). // diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index 6d3c7b1f40e..eb22002d824 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.31.0" + return "1.34.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index cdebdb5eb78..ce4fe59b0e4 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,19 +3,13 @@ module-sets: stable-v1: - version: v1.31.0 + version: v1.34.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus - go.opentelemetry.io/otel/bridge/opencensus/test - go.opentelemetry.io/otel/bridge/opentracing - go.opentelemetry.io/otel/bridge/opentracing/test - - go.opentelemetry.io/otel/example/dice - - go.opentelemetry.io/otel/example/namedtracer - - go.opentelemetry.io/otel/example/opencensus - - go.opentelemetry.io/otel/example/otel-collector - - go.opentelemetry.io/otel/example/passthrough - - go.opentelemetry.io/otel/example/zipkin - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp - go.opentelemetry.io/otel/exporters/otlp/otlptrace @@ -29,12 +23,11 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.53.0 + version: v0.56.0 modules: - - go.opentelemetry.io/otel/example/prometheus - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.7.0 + version: v0.10.0 modules: - go.opentelemetry.io/otel/log - go.opentelemetry.io/otel/sdk/log @@ -42,7 +35,7 @@ module-sets: - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp - go.opentelemetry.io/otel/exporters/stdout/stdoutlog experimental-schema: - version: v0.0.10 + version: v0.0.12 modules: - go.opentelemetry.io/otel/schema excluded-modules: diff --git a/vendor/go.opentelemetry.io/proto/otlp/logs/v1/logs.pb.go b/vendor/go.opentelemetry.io/proto/otlp/logs/v1/logs.pb.go index e0246bb0786..9b47481ce0e 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/logs/v1/logs.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/logs/v1/logs.pb.go @@ -284,7 +284,8 @@ type ResourceLogs struct { // A list of ScopeLogs that originate from a resource. ScopeLogs []*ScopeLogs `protobuf:"bytes,2,rep,name=scope_logs,json=scopeLogs,proto3" json:"scope_logs,omitempty"` // The Schema URL, if known. This is the identifier of the Schema that the resource data - // is recorded in. To learn more about Schema URL see + // is recorded in. Notably, the last part of the URL path is the version number of the + // schema: http[s]://server[:port]/path/. To learn more about Schema URL see // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url // This schema_url applies to the data in the "resource" field. It does not apply // to the data in the "scope_logs" field which have their own schema_url field. @@ -357,7 +358,8 @@ type ScopeLogs struct { // A list of log records. LogRecords []*LogRecord `protobuf:"bytes,2,rep,name=log_records,json=logRecords,proto3" json:"log_records,omitempty"` // The Schema URL, if known. This is the identifier of the Schema that the log data - // is recorded in. To learn more about Schema URL see + // is recorded in. Notably, the last part of the URL path is the version number of the + // schema: http[s]://server[:port]/path/. To learn more about Schema URL see // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url // This schema_url applies to all logs in the "logs" field. SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` @@ -489,6 +491,19 @@ type LogRecord struct { // - the field is not present, // - the field contains an invalid value. SpanId []byte `protobuf:"bytes,10,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` + // A unique identifier of event category/type. + // All events with the same event_name are expected to conform to the same + // schema for both their attributes and their body. + // + // Recommended to be fully qualified and short (no longer than 256 characters). + // + // Presence of event_name on the log record identifies this record + // as an event. + // + // [Optional]. + // + // Status: [Development] + EventName string `protobuf:"bytes,12,opt,name=event_name,json=eventName,proto3" json:"event_name,omitempty"` } func (x *LogRecord) Reset() { @@ -593,6 +608,13 @@ func (x *LogRecord) GetSpanId() []byte { return nil } +func (x *LogRecord) GetEventName() string { + if x != nil { + return x.EventName + } + return "" +} + var File_opentelemetry_proto_logs_v1_logs_proto protoreflect.FileDescriptor var file_opentelemetry_proto_logs_v1_logs_proto_rawDesc = []byte{ @@ -636,7 +658,7 @@ var file_opentelemetry_proto_logs_v1_logs_proto_rawDesc = []byte{ 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x0a, 0x6c, 0x6f, 0x67, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x55, 0x72, 0x6c, 0x22, 0xf3, 0x03, 0x0a, 0x09, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x63, 0x6f, + 0x61, 0x55, 0x72, 0x6c, 0x22, 0x92, 0x04, 0x0a, 0x09, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x06, 0x52, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x35, 0x0a, 0x17, 0x6f, 0x62, 0x73, 0x65, @@ -667,65 +689,67 @@ var file_opentelemetry_proto_logs_v1_logs_proto_rawDesc = []byte{ 0x12, 0x19, 0x0a, 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, 0x70, - 0x61, 0x6e, 0x49, 0x64, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x2a, 0xc3, 0x05, 0x0a, 0x0e, 0x53, - 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1f, 0x0a, - 0x1b, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, - 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x19, - 0x0a, 0x15, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, - 0x52, 0x5f, 0x54, 0x52, 0x41, 0x43, 0x45, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x45, 0x56, - 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x54, 0x52, 0x41, - 0x43, 0x45, 0x32, 0x10, 0x02, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, - 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x54, 0x52, 0x41, 0x43, 0x45, 0x33, 0x10, - 0x03, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, - 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x54, 0x52, 0x41, 0x43, 0x45, 0x34, 0x10, 0x04, 0x12, 0x19, 0x0a, + 0x61, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x4e, + 0x61, 0x6d, 0x65, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x2a, 0xc3, 0x05, 0x0a, 0x0e, 0x53, 0x65, + 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1f, 0x0a, 0x1b, + 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, + 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x19, 0x0a, 0x15, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, - 0x5f, 0x44, 0x45, 0x42, 0x55, 0x47, 0x10, 0x05, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x45, 0x56, 0x45, - 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x44, 0x45, 0x42, 0x55, - 0x47, 0x32, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, - 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x44, 0x45, 0x42, 0x55, 0x47, 0x33, 0x10, 0x07, + 0x5f, 0x54, 0x52, 0x41, 0x43, 0x45, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x45, 0x56, 0x45, + 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x54, 0x52, 0x41, 0x43, + 0x45, 0x32, 0x10, 0x02, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, + 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x54, 0x52, 0x41, 0x43, 0x45, 0x33, 0x10, 0x03, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, - 0x42, 0x45, 0x52, 0x5f, 0x44, 0x45, 0x42, 0x55, 0x47, 0x34, 0x10, 0x08, 0x12, 0x18, 0x0a, 0x14, - 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, - 0x49, 0x4e, 0x46, 0x4f, 0x10, 0x09, 0x12, 0x19, 0x0a, 0x15, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, - 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x49, 0x4e, 0x46, 0x4f, 0x32, 0x10, - 0x0a, 0x12, 0x19, 0x0a, 0x15, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, - 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x49, 0x4e, 0x46, 0x4f, 0x33, 0x10, 0x0b, 0x12, 0x19, 0x0a, 0x15, - 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, - 0x49, 0x4e, 0x46, 0x4f, 0x34, 0x10, 0x0c, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x45, 0x56, 0x45, 0x52, - 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x57, 0x41, 0x52, 0x4e, 0x10, - 0x0d, 0x12, 0x19, 0x0a, 0x15, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, - 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x57, 0x41, 0x52, 0x4e, 0x32, 0x10, 0x0e, 0x12, 0x19, 0x0a, 0x15, + 0x42, 0x45, 0x52, 0x5f, 0x54, 0x52, 0x41, 0x43, 0x45, 0x34, 0x10, 0x04, 0x12, 0x19, 0x0a, 0x15, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, - 0x57, 0x41, 0x52, 0x4e, 0x33, 0x10, 0x0f, 0x12, 0x19, 0x0a, 0x15, 0x53, 0x45, 0x56, 0x45, 0x52, - 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x57, 0x41, 0x52, 0x4e, 0x34, - 0x10, 0x10, 0x12, 0x19, 0x0a, 0x15, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, - 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x11, 0x12, 0x1a, 0x0a, - 0x16, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, - 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x32, 0x10, 0x12, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x45, 0x56, - 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x45, 0x52, 0x52, - 0x4f, 0x52, 0x33, 0x10, 0x13, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, - 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x34, 0x10, - 0x14, 0x12, 0x19, 0x0a, 0x15, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, - 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x46, 0x41, 0x54, 0x41, 0x4c, 0x10, 0x15, 0x12, 0x1a, 0x0a, 0x16, + 0x44, 0x45, 0x42, 0x55, 0x47, 0x10, 0x05, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x45, 0x56, 0x45, 0x52, + 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x44, 0x45, 0x42, 0x55, 0x47, + 0x32, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, + 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x44, 0x45, 0x42, 0x55, 0x47, 0x33, 0x10, 0x07, 0x12, + 0x1a, 0x0a, 0x16, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, + 0x45, 0x52, 0x5f, 0x44, 0x45, 0x42, 0x55, 0x47, 0x34, 0x10, 0x08, 0x12, 0x18, 0x0a, 0x14, 0x53, + 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x49, + 0x4e, 0x46, 0x4f, 0x10, 0x09, 0x12, 0x19, 0x0a, 0x15, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, + 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x49, 0x4e, 0x46, 0x4f, 0x32, 0x10, 0x0a, + 0x12, 0x19, 0x0a, 0x15, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, + 0x42, 0x45, 0x52, 0x5f, 0x49, 0x4e, 0x46, 0x4f, 0x33, 0x10, 0x0b, 0x12, 0x19, 0x0a, 0x15, 0x53, + 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x49, + 0x4e, 0x46, 0x4f, 0x34, 0x10, 0x0c, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, + 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x57, 0x41, 0x52, 0x4e, 0x10, 0x0d, + 0x12, 0x19, 0x0a, 0x15, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, + 0x42, 0x45, 0x52, 0x5f, 0x57, 0x41, 0x52, 0x4e, 0x32, 0x10, 0x0e, 0x12, 0x19, 0x0a, 0x15, 0x53, + 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x57, + 0x41, 0x52, 0x4e, 0x33, 0x10, 0x0f, 0x12, 0x19, 0x0a, 0x15, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, + 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x57, 0x41, 0x52, 0x4e, 0x34, 0x10, + 0x10, 0x12, 0x19, 0x0a, 0x15, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, + 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x11, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, - 0x46, 0x41, 0x54, 0x41, 0x4c, 0x32, 0x10, 0x16, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x45, 0x56, 0x45, - 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x46, 0x41, 0x54, 0x41, - 0x4c, 0x33, 0x10, 0x17, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, - 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x46, 0x41, 0x54, 0x41, 0x4c, 0x34, 0x10, 0x18, - 0x2a, 0x59, 0x0a, 0x0e, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x46, 0x6c, 0x61, - 0x67, 0x73, 0x12, 0x1f, 0x0a, 0x1b, 0x4c, 0x4f, 0x47, 0x5f, 0x52, 0x45, 0x43, 0x4f, 0x52, 0x44, - 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x44, 0x4f, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x55, 0x53, - 0x45, 0x10, 0x00, 0x12, 0x26, 0x0a, 0x21, 0x4c, 0x4f, 0x47, 0x5f, 0x52, 0x45, 0x43, 0x4f, 0x52, - 0x44, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x54, 0x52, 0x41, 0x43, 0x45, 0x5f, 0x46, 0x4c, - 0x41, 0x47, 0x53, 0x5f, 0x4d, 0x41, 0x53, 0x4b, 0x10, 0xff, 0x01, 0x42, 0x73, 0x0a, 0x1e, 0x69, - 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6c, 0x6f, 0x67, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x09, 0x4c, - 0x6f, 0x67, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x26, 0x67, 0x6f, 0x2e, 0x6f, - 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x6c, 0x6f, 0x67, 0x73, 0x2f, - 0x76, 0x31, 0xaa, 0x02, 0x1b, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, - 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x6f, 0x67, 0x73, 0x2e, 0x56, 0x31, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x45, 0x52, 0x52, 0x4f, 0x52, 0x32, 0x10, 0x12, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x45, 0x56, 0x45, + 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, + 0x52, 0x33, 0x10, 0x13, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, + 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x34, 0x10, 0x14, + 0x12, 0x19, 0x0a, 0x15, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, + 0x42, 0x45, 0x52, 0x5f, 0x46, 0x41, 0x54, 0x41, 0x4c, 0x10, 0x15, 0x12, 0x1a, 0x0a, 0x16, 0x53, + 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x46, + 0x41, 0x54, 0x41, 0x4c, 0x32, 0x10, 0x16, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x45, 0x56, 0x45, 0x52, + 0x49, 0x54, 0x59, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x46, 0x41, 0x54, 0x41, 0x4c, + 0x33, 0x10, 0x17, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54, 0x59, 0x5f, + 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x46, 0x41, 0x54, 0x41, 0x4c, 0x34, 0x10, 0x18, 0x2a, + 0x59, 0x0a, 0x0e, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x46, 0x6c, 0x61, 0x67, + 0x73, 0x12, 0x1f, 0x0a, 0x1b, 0x4c, 0x4f, 0x47, 0x5f, 0x52, 0x45, 0x43, 0x4f, 0x52, 0x44, 0x5f, + 0x46, 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x44, 0x4f, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x55, 0x53, 0x45, + 0x10, 0x00, 0x12, 0x26, 0x0a, 0x21, 0x4c, 0x4f, 0x47, 0x5f, 0x52, 0x45, 0x43, 0x4f, 0x52, 0x44, + 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x54, 0x52, 0x41, 0x43, 0x45, 0x5f, 0x46, 0x4c, 0x41, + 0x47, 0x53, 0x5f, 0x4d, 0x41, 0x53, 0x4b, 0x10, 0xff, 0x01, 0x42, 0x73, 0x0a, 0x1e, 0x69, 0x6f, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6c, 0x6f, 0x67, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x09, 0x4c, 0x6f, + 0x67, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x26, 0x67, 0x6f, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x6c, 0x6f, 0x67, 0x73, 0x2f, 0x76, + 0x31, 0xaa, 0x02, 0x1b, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, + 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x6f, 0x67, 0x73, 0x2e, 0x56, 0x31, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go b/vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go index bca86dc4422..8799d6ba251 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go @@ -210,6 +210,24 @@ func (DataPointFlags) EnumDescriptor() ([]byte, []int) { // storage, OR can be embedded by other protocols that transfer OTLP metrics // data but do not implement the OTLP protocol. // +// MetricsData +// └─── ResourceMetrics +// ├── Resource +// ├── SchemaURL +// └── ScopeMetrics +// ├── Scope +// ├── SchemaURL +// └── Metric +// ├── Name +// ├── Description +// ├── Unit +// └── data +// ├── Gauge +// ├── Sum +// ├── Histogram +// ├── ExponentialHistogram +// └── Summary +// // The main difference between this message and collector protocol is that // in this message there will not be any "control" or "metadata" specific to // OTLP protocol. @@ -280,7 +298,8 @@ type ResourceMetrics struct { // A list of metrics that originate from a resource. ScopeMetrics []*ScopeMetrics `protobuf:"bytes,2,rep,name=scope_metrics,json=scopeMetrics,proto3" json:"scope_metrics,omitempty"` // The Schema URL, if known. This is the identifier of the Schema that the resource data - // is recorded in. To learn more about Schema URL see + // is recorded in. Notably, the last part of the URL path is the version number of the + // schema: http[s]://server[:port]/path/. To learn more about Schema URL see // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url // This schema_url applies to the data in the "resource" field. It does not apply // to the data in the "scope_metrics" field which have their own schema_url field. @@ -353,7 +372,8 @@ type ScopeMetrics struct { // A list of metrics that originate from an instrumentation library. Metrics []*Metric `protobuf:"bytes,2,rep,name=metrics,proto3" json:"metrics,omitempty"` // The Schema URL, if known. This is the identifier of the Schema that the metric data - // is recorded in. To learn more about Schema URL see + // is recorded in. Notably, the last part of the URL path is the version number of the + // schema: http[s]://server[:port]/path/. To learn more about Schema URL see // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url // This schema_url applies to all metrics in the "metrics" field. SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` @@ -417,7 +437,6 @@ func (x *ScopeMetrics) GetSchemaUrl() string { // // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md // -// // The data model and relation between entities is shown in the // diagram below. Here, "DataPoint" is the term used to refer to any // one of the specific data point value types, and "points" is the term used @@ -429,7 +448,7 @@ func (x *ScopeMetrics) GetSchemaUrl() string { // - DataPoint contains timestamps, attributes, and one of the possible value type // fields. // -// Metric +// Metric // +------------+ // |name | // |description | @@ -914,6 +933,9 @@ func (x *ExponentialHistogram) GetAggregationTemporality() AggregationTemporalit // data type. These data points cannot always be merged in a meaningful way. // While they can be useful in some applications, histogram data points are // recommended for new applications. +// Summary metrics do not have an aggregation temporality field. This is +// because the count and sum fields of a SummaryDataPoint are assumed to be +// cumulative values. type Summary struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1145,7 +1167,7 @@ type HistogramDataPoint struct { // events, and is assumed to be monotonic over the values of these events. // Negative events *can* be recorded, but sum should not be filled out when // doing so. This is specifically to enforce compatibility w/ OpenMetrics, - // see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram + // see: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#histogram Sum *float64 `protobuf:"fixed64,5,opt,name=sum,proto3,oneof" json:"sum,omitempty"` // bucket_counts is an optional field contains the count values of histogram // for each bucket. @@ -1327,7 +1349,7 @@ type ExponentialHistogramDataPoint struct { // events, and is assumed to be monotonic over the values of these events. // Negative events *can* be recorded, but sum should not be filled out when // doing so. This is specifically to enforce compatibility w/ OpenMetrics, - // see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram + // see: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#histogram Sum *float64 `protobuf:"fixed64,5,opt,name=sum,proto3,oneof" json:"sum,omitempty"` // scale describes the resolution of the histogram. Boundaries are // located at powers of the base, where: @@ -1508,7 +1530,8 @@ func (x *ExponentialHistogramDataPoint) GetZeroThreshold() float64 { } // SummaryDataPoint is a single data point in a timeseries that describes the -// time-varying values of a Summary metric. +// time-varying values of a Summary metric. The count and sum fields represent +// cumulative values. type SummaryDataPoint struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1539,7 +1562,7 @@ type SummaryDataPoint struct { // events, and is assumed to be monotonic over the values of these events. // Negative events *can* be recorded, but sum should not be filled out when // doing so. This is specifically to enforce compatibility w/ OpenMetrics, - // see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#summary + // see: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#summary Sum float64 `protobuf:"fixed64,5,opt,name=sum,proto3" json:"sum,omitempty"` // (Optional) list of values at different quantiles of the distribution calculated // from the current snapshot. The quantiles must be strictly increasing. diff --git a/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go index d7099c35bc4..b342a0a9401 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go @@ -311,7 +311,8 @@ type ResourceSpans struct { // A list of ScopeSpans that originate from a resource. ScopeSpans []*ScopeSpans `protobuf:"bytes,2,rep,name=scope_spans,json=scopeSpans,proto3" json:"scope_spans,omitempty"` // The Schema URL, if known. This is the identifier of the Schema that the resource data - // is recorded in. To learn more about Schema URL see + // is recorded in. Notably, the last part of the URL path is the version number of the + // schema: http[s]://server[:port]/path/. To learn more about Schema URL see // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url // This schema_url applies to the data in the "resource" field. It does not apply // to the data in the "scope_spans" field which have their own schema_url field. @@ -384,7 +385,8 @@ type ScopeSpans struct { // A list of Spans that originate from an instrumentation scope. Spans []*Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"` // The Schema URL, if known. This is the identifier of the Schema that the span data - // is recorded in. To learn more about Schema URL see + // is recorded in. Notably, the last part of the URL path is the version number of the + // schema: http[s]://server[:port]/path/. To learn more about Schema URL see // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url // This schema_url applies to all spans and span events in the "spans" field. SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` diff --git a/vendor/golang.org/x/crypto/pkcs12/crypto.go b/vendor/golang.org/x/crypto/pkcs12/crypto.go index 96f4a1a56ec..212538cb5a8 100644 --- a/vendor/golang.org/x/crypto/pkcs12/crypto.go +++ b/vendor/golang.org/x/crypto/pkcs12/crypto.go @@ -26,7 +26,7 @@ type pbeCipher interface { create(key []byte) (cipher.Block, error) // deriveKey returns a key derived from the given password and salt. deriveKey(salt, password []byte, iterations int) []byte - // deriveKey returns an IV derived from the given password and salt. + // deriveIV returns an IV derived from the given password and salt. deriveIV(salt, password []byte, iterations int) []byte } diff --git a/vendor/golang.org/x/exp/LICENSE b/vendor/golang.org/x/exp/LICENSE index 6a66aea5eaf..2a7cf70da6e 100644 --- a/vendor/golang.org/x/exp/LICENSE +++ b/vendor/golang.org/x/exp/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go index b67897f76b5..f58bbc7ba4d 100644 --- a/vendor/golang.org/x/exp/slices/sort.go +++ b/vendor/golang.org/x/exp/slices/sort.go @@ -22,10 +22,12 @@ func Sort[S ~[]E, E constraints.Ordered](x S) { // SortFunc sorts the slice x in ascending order as determined by the cmp // function. This sort is not guaranteed to be stable. // cmp(a, b) should return a negative number when a < b, a positive number when -// a > b and zero when a == b. +// a > b and zero when a == b or when a is not comparable to b in the sense +// of the formal definition of Strict Weak Ordering. // // SortFunc requires that cmp is a strict weak ordering. // See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings. +// To indicate 'uncomparable', return 0 from the function. func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) { n := len(x) pdqsortCmpFunc(x, 0, n, bits.Len(uint(n)), cmp) diff --git a/vendor/golang.org/x/mod/LICENSE b/vendor/golang.org/x/mod/LICENSE index 6a66aea5eaf..2a7cf70da6e 100644 --- a/vendor/golang.org/x/mod/LICENSE +++ b/vendor/golang.org/x/mod/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go new file mode 100644 index 00000000000..cf66309c4a8 --- /dev/null +++ b/vendor/golang.org/x/net/context/context.go @@ -0,0 +1,56 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package context defines the Context type, which carries deadlines, +// cancelation signals, and other request-scoped values across API boundaries +// and between processes. +// As of Go 1.7 this package is available in the standard library under the +// name context. https://golang.org/pkg/context. +// +// Incoming requests to a server should create a Context, and outgoing calls to +// servers should accept a Context. The chain of function calls between must +// propagate the Context, optionally replacing it with a modified copy created +// using WithDeadline, WithTimeout, WithCancel, or WithValue. +// +// Programs that use Contexts should follow these rules to keep interfaces +// consistent across packages and enable static analysis tools to check context +// propagation: +// +// Do not store Contexts inside a struct type; instead, pass a Context +// explicitly to each function that needs it. The Context should be the first +// parameter, typically named ctx: +// +// func DoSomething(ctx context.Context, arg Arg) error { +// // ... use ctx ... +// } +// +// Do not pass a nil Context, even if a function permits it. Pass context.TODO +// if you are unsure about which Context to use. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +// +// The same Context may be passed to functions running in different goroutines; +// Contexts are safe for simultaneous use by multiple goroutines. +// +// See http://blog.golang.org/context for example code for a server that uses +// Contexts. +package context // import "golang.org/x/net/context" + +// Background returns a non-nil, empty Context. It is never canceled, has no +// values, and has no deadline. It is typically used by the main function, +// initialization, and tests, and as the top-level Context for incoming +// requests. +func Background() Context { + return background +} + +// TODO returns a non-nil, empty Context. Code should use context.TODO when +// it's unclear which Context to use or it is not yet available (because the +// surrounding function has not yet been extended to accept a Context +// parameter). TODO is recognized by static analysis tools that determine +// whether Contexts are propagated correctly in a program. +func TODO() Context { + return todo +} diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go new file mode 100644 index 00000000000..0c1b8679376 --- /dev/null +++ b/vendor/golang.org/x/net/context/go17.go @@ -0,0 +1,72 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.7 + +package context + +import ( + "context" // standard library's context, as of Go 1.7 + "time" +) + +var ( + todo = context.TODO() + background = context.Background() +) + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = context.Canceled + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = context.DeadlineExceeded + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + ctx, f := context.WithCancel(parent) + return ctx, f +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + ctx, f := context.WithDeadline(parent, deadline) + return ctx, f +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return context.WithValue(parent, key, val) +} diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go new file mode 100644 index 00000000000..e31e35a9045 --- /dev/null +++ b/vendor/golang.org/x/net/context/go19.go @@ -0,0 +1,20 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.9 + +package context + +import "context" // standard library's context, as of Go 1.7 + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context = context.Context + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc = context.CancelFunc diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go new file mode 100644 index 00000000000..065ff3dfa52 --- /dev/null +++ b/vendor/golang.org/x/net/context/pre_go17.go @@ -0,0 +1,300 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.7 + +package context + +import ( + "errors" + "fmt" + "sync" + "time" +) + +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case background: + return "context.Background" + case todo: + return "context.TODO" + } + return "unknown empty Context" +} + +var ( + background = new(emptyCtx) + todo = new(emptyCtx) +) + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = errors.New("context canceled") + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = errors.New("context deadline exceeded") + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + c := newCancelCtx(parent) + propagateCancel(parent, c) + return c, func() { c.cancel(true, Canceled) } +} + +// newCancelCtx returns an initialized cancelCtx. +func newCancelCtx(parent Context) *cancelCtx { + return &cancelCtx{ + Context: parent, + done: make(chan struct{}), + } +} + +// propagateCancel arranges for child to be canceled when parent is. +func propagateCancel(parent Context, child canceler) { + if parent.Done() == nil { + return // parent is never canceled + } + if p, ok := parentCancelCtx(parent); ok { + p.mu.Lock() + if p.err != nil { + // parent has already been canceled + child.cancel(false, p.err) + } else { + if p.children == nil { + p.children = make(map[canceler]bool) + } + p.children[child] = true + } + p.mu.Unlock() + } else { + go func() { + select { + case <-parent.Done(): + child.cancel(false, parent.Err()) + case <-child.Done(): + } + }() + } +} + +// parentCancelCtx follows a chain of parent references until it finds a +// *cancelCtx. This function understands how each of the concrete types in this +// package represents its parent. +func parentCancelCtx(parent Context) (*cancelCtx, bool) { + for { + switch c := parent.(type) { + case *cancelCtx: + return c, true + case *timerCtx: + return c.cancelCtx, true + case *valueCtx: + parent = c.Context + default: + return nil, false + } + } +} + +// removeChild removes a context from its parent. +func removeChild(parent Context, child canceler) { + p, ok := parentCancelCtx(parent) + if !ok { + return + } + p.mu.Lock() + if p.children != nil { + delete(p.children, child) + } + p.mu.Unlock() +} + +// A canceler is a context type that can be canceled directly. The +// implementations are *cancelCtx and *timerCtx. +type canceler interface { + cancel(removeFromParent bool, err error) + Done() <-chan struct{} +} + +// A cancelCtx can be canceled. When canceled, it also cancels any children +// that implement canceler. +type cancelCtx struct { + Context + + done chan struct{} // closed by the first cancel call. + + mu sync.Mutex + children map[canceler]bool // set to nil by the first cancel call + err error // set to non-nil by the first cancel call +} + +func (c *cancelCtx) Done() <-chan struct{} { + return c.done +} + +func (c *cancelCtx) Err() error { + c.mu.Lock() + defer c.mu.Unlock() + return c.err +} + +func (c *cancelCtx) String() string { + return fmt.Sprintf("%v.WithCancel", c.Context) +} + +// cancel closes c.done, cancels each of c's children, and, if +// removeFromParent is true, removes c from its parent's children. +func (c *cancelCtx) cancel(removeFromParent bool, err error) { + if err == nil { + panic("context: internal error: missing cancel error") + } + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return // already canceled + } + c.err = err + close(c.done) + for child := range c.children { + // NOTE: acquiring the child's lock while holding parent's lock. + child.cancel(false, err) + } + c.children = nil + c.mu.Unlock() + + if removeFromParent { + removeChild(c.Context, c) + } +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { + // The current deadline is already sooner than the new one. + return WithCancel(parent) + } + c := &timerCtx{ + cancelCtx: newCancelCtx(parent), + deadline: deadline, + } + propagateCancel(parent, c) + d := deadline.Sub(time.Now()) + if d <= 0 { + c.cancel(true, DeadlineExceeded) // deadline has already passed + return c, func() { c.cancel(true, Canceled) } + } + c.mu.Lock() + defer c.mu.Unlock() + if c.err == nil { + c.timer = time.AfterFunc(d, func() { + c.cancel(true, DeadlineExceeded) + }) + } + return c, func() { c.cancel(true, Canceled) } +} + +// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to +// implement Done and Err. It implements cancel by stopping its timer then +// delegating to cancelCtx.cancel. +type timerCtx struct { + *cancelCtx + timer *time.Timer // Under cancelCtx.mu. + + deadline time.Time +} + +func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { + return c.deadline, true +} + +func (c *timerCtx) String() string { + return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) +} + +func (c *timerCtx) cancel(removeFromParent bool, err error) { + c.cancelCtx.cancel(false, err) + if removeFromParent { + // Remove this timerCtx from its parent cancelCtx's children. + removeChild(c.cancelCtx.Context, c) + } + c.mu.Lock() + if c.timer != nil { + c.timer.Stop() + c.timer = nil + } + c.mu.Unlock() +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return &valueCtx{parent, key, val} +} + +// A valueCtx carries a key-value pair. It implements Value for that key and +// delegates all other calls to the embedded Context. +type valueCtx struct { + Context + key, val interface{} +} + +func (c *valueCtx) String() string { + return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) +} + +func (c *valueCtx) Value(key interface{}) interface{} { + if c.key == key { + return c.val + } + return c.Context.Value(key) +} diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go new file mode 100644 index 00000000000..ec5a6380335 --- /dev/null +++ b/vendor/golang.org/x/net/context/pre_go19.go @@ -0,0 +1,109 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.9 + +package context + +import "time" + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + // + // WithCancel arranges for Done to be closed when cancel is called; + // WithDeadline arranges for Done to be closed when the deadline + // expires; WithTimeout arranges for Done to be closed when the timeout + // elapses. + // + // Done is provided for use in select statements: + // + // // Stream generates values with DoSomething and sends them to out + // // until DoSomething returns an error or ctx.Done is closed. + // func Stream(ctx context.Context, out chan<- Value) error { + // for { + // v, err := DoSomething(ctx) + // if err != nil { + // return err + // } + // select { + // case <-ctx.Done(): + // return ctx.Err() + // case out <- v: + // } + // } + // } + // + // See http://blog.golang.org/pipelines for more examples of how to use + // a Done channel for cancelation. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + // + // A key identifies a specific value in a Context. Functions that wish + // to store values in Context typically allocate a key in a global + // variable then use that key as the argument to context.WithValue and + // Context.Value. A key can be any type that supports equality; + // packages should define keys as an unexported type to avoid + // collisions. + // + // Packages that define a Context key should provide type-safe accessors + // for the values stores using that key: + // + // // Package user defines a User type that's stored in Contexts. + // package user + // + // import "golang.org/x/net/context" + // + // // User is the type of value stored in the Contexts. + // type User struct {...} + // + // // key is an unexported type for keys defined in this package. + // // This prevents collisions with keys defined in other packages. + // type key int + // + // // userKey is the key for user.User values in Contexts. It is + // // unexported; clients use user.NewContext and user.FromContext + // // instead of using this key directly. + // var userKey key = 0 + // + // // NewContext returns a new Context that carries value u. + // func NewContext(ctx context.Context, u *User) context.Context { + // return context.WithValue(ctx, userKey, u) + // } + // + // // FromContext returns the User value stored in ctx, if any. + // func FromContext(ctx context.Context) (*User, bool) { + // u, ok := ctx.Value(userKey).(*User) + // return u, ok + // } + Value(key interface{}) interface{} +} + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc func() diff --git a/vendor/golang.org/x/net/html/atom/atom.go b/vendor/golang.org/x/net/html/atom/atom.go new file mode 100644 index 00000000000..cd0a8ac1545 --- /dev/null +++ b/vendor/golang.org/x/net/html/atom/atom.go @@ -0,0 +1,78 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package atom provides integer codes (also known as atoms) for a fixed set of +// frequently occurring HTML strings: tag names and attribute keys such as "p" +// and "id". +// +// Sharing an atom's name between all elements with the same tag can result in +// fewer string allocations when tokenizing and parsing HTML. Integer +// comparisons are also generally faster than string comparisons. +// +// The value of an atom's particular code is not guaranteed to stay the same +// between versions of this package. Neither is any ordering guaranteed: +// whether atom.H1 < atom.H2 may also change. The codes are not guaranteed to +// be dense. The only guarantees are that e.g. looking up "div" will yield +// atom.Div, calling atom.Div.String will return "div", and atom.Div != 0. +package atom // import "golang.org/x/net/html/atom" + +// Atom is an integer code for a string. The zero value maps to "". +type Atom uint32 + +// String returns the atom's name. +func (a Atom) String() string { + start := uint32(a >> 8) + n := uint32(a & 0xff) + if start+n > uint32(len(atomText)) { + return "" + } + return atomText[start : start+n] +} + +func (a Atom) string() string { + return atomText[a>>8 : a>>8+a&0xff] +} + +// fnv computes the FNV hash with an arbitrary starting value h. +func fnv(h uint32, s []byte) uint32 { + for i := range s { + h ^= uint32(s[i]) + h *= 16777619 + } + return h +} + +func match(s string, t []byte) bool { + for i, c := range t { + if s[i] != c { + return false + } + } + return true +} + +// Lookup returns the atom whose name is s. It returns zero if there is no +// such atom. The lookup is case sensitive. +func Lookup(s []byte) Atom { + if len(s) == 0 || len(s) > maxAtomLen { + return 0 + } + h := fnv(hash0, s) + if a := table[h&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) { + return a + } + if a := table[(h>>16)&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) { + return a + } + return 0 +} + +// String returns a string whose contents are equal to s. In that sense, it is +// equivalent to string(s) but may be more efficient. +func String(s []byte) string { + if a := Lookup(s); a != 0 { + return a.String() + } + return string(s) +} diff --git a/vendor/golang.org/x/net/html/atom/table.go b/vendor/golang.org/x/net/html/atom/table.go new file mode 100644 index 00000000000..2a938864cb9 --- /dev/null +++ b/vendor/golang.org/x/net/html/atom/table.go @@ -0,0 +1,783 @@ +// Code generated by go generate gen.go; DO NOT EDIT. + +//go:generate go run gen.go + +package atom + +const ( + A Atom = 0x1 + Abbr Atom = 0x4 + Accept Atom = 0x1a06 + AcceptCharset Atom = 0x1a0e + Accesskey Atom = 0x2c09 + Acronym Atom = 0xaa07 + Action Atom = 0x27206 + Address Atom = 0x6f307 + Align Atom = 0xb105 + Allowfullscreen Atom = 0x2080f + Allowpaymentrequest Atom = 0xc113 + Allowusermedia Atom = 0xdd0e + Alt Atom = 0xf303 + Annotation Atom = 0x1c90a + AnnotationXml Atom = 0x1c90e + Applet Atom = 0x31906 + Area Atom = 0x35604 + Article Atom = 0x3fc07 + As Atom = 0x3c02 + Aside Atom = 0x10705 + Async Atom = 0xff05 + Audio Atom = 0x11505 + Autocomplete Atom = 0x2780c + Autofocus Atom = 0x12109 + Autoplay Atom = 0x13c08 + B Atom = 0x101 + Base Atom = 0x3b04 + Basefont Atom = 0x3b08 + Bdi Atom = 0xba03 + Bdo Atom = 0x14b03 + Bgsound Atom = 0x15e07 + Big Atom = 0x17003 + Blink Atom = 0x17305 + Blockquote Atom = 0x1870a + Body Atom = 0x2804 + Br Atom = 0x202 + Button Atom = 0x19106 + Canvas Atom = 0x10306 + Caption Atom = 0x23107 + Center Atom = 0x22006 + Challenge Atom = 0x29b09 + Charset Atom = 0x2107 + Checked Atom = 0x47907 + Cite Atom = 0x19c04 + Class Atom = 0x56405 + Code Atom = 0x5c504 + Col Atom = 0x1ab03 + Colgroup Atom = 0x1ab08 + Color Atom = 0x1bf05 + Cols Atom = 0x1c404 + Colspan Atom = 0x1c407 + Command Atom = 0x1d707 + Content Atom = 0x58b07 + Contenteditable Atom = 0x58b0f + Contextmenu Atom = 0x3800b + Controls Atom = 0x1de08 + Coords Atom = 0x1ea06 + Crossorigin Atom = 0x1fb0b + Data Atom = 0x4a504 + Datalist Atom = 0x4a508 + Datetime Atom = 0x2b808 + Dd Atom = 0x2d702 + Default Atom = 0x10a07 + Defer Atom = 0x5c705 + Del Atom = 0x45203 + Desc Atom = 0x56104 + Details Atom = 0x7207 + Dfn Atom = 0x8703 + Dialog Atom = 0xbb06 + Dir Atom = 0x9303 + Dirname Atom = 0x9307 + Disabled Atom = 0x16408 + Div Atom = 0x16b03 + Dl Atom = 0x5e602 + Download Atom = 0x46308 + Draggable Atom = 0x17a09 + Dropzone Atom = 0x40508 + Dt Atom = 0x64b02 + Em Atom = 0x6e02 + Embed Atom = 0x6e05 + Enctype Atom = 0x28d07 + Face Atom = 0x21e04 + Fieldset Atom = 0x22608 + Figcaption Atom = 0x22e0a + Figure Atom = 0x24806 + Font Atom = 0x3f04 + Footer Atom = 0xf606 + For Atom = 0x25403 + ForeignObject Atom = 0x2540d + Foreignobject Atom = 0x2610d + Form Atom = 0x26e04 + Formaction Atom = 0x26e0a + Formenctype Atom = 0x2890b + Formmethod Atom = 0x2a40a + Formnovalidate Atom = 0x2ae0e + Formtarget Atom = 0x2c00a + Frame Atom = 0x8b05 + Frameset Atom = 0x8b08 + H1 Atom = 0x15c02 + H2 Atom = 0x2de02 + H3 Atom = 0x30d02 + H4 Atom = 0x34502 + H5 Atom = 0x34f02 + H6 Atom = 0x64d02 + Head Atom = 0x33104 + Header Atom = 0x33106 + Headers Atom = 0x33107 + Height Atom = 0x5206 + Hgroup Atom = 0x2ca06 + Hidden Atom = 0x2d506 + High Atom = 0x2db04 + Hr Atom = 0x15702 + Href Atom = 0x2e004 + Hreflang Atom = 0x2e008 + Html Atom = 0x5604 + HttpEquiv Atom = 0x2e80a + I Atom = 0x601 + Icon Atom = 0x58a04 + Id Atom = 0x10902 + Iframe Atom = 0x2fc06 + Image Atom = 0x30205 + Img Atom = 0x30703 + Input Atom = 0x44b05 + Inputmode Atom = 0x44b09 + Ins Atom = 0x20403 + Integrity Atom = 0x23f09 + Is Atom = 0x16502 + Isindex Atom = 0x30f07 + Ismap Atom = 0x31605 + Itemid Atom = 0x38b06 + Itemprop Atom = 0x19d08 + Itemref Atom = 0x3cd07 + Itemscope Atom = 0x67109 + Itemtype Atom = 0x31f08 + Kbd Atom = 0xb903 + Keygen Atom = 0x3206 + Keytype Atom = 0xd607 + Kind Atom = 0x17704 + Label Atom = 0x5905 + Lang Atom = 0x2e404 + Legend Atom = 0x18106 + Li Atom = 0xb202 + Link Atom = 0x17404 + List Atom = 0x4a904 + Listing Atom = 0x4a907 + Loop Atom = 0x5d04 + Low Atom = 0xc303 + Main Atom = 0x1004 + Malignmark Atom = 0xb00a + Manifest Atom = 0x6d708 + Map Atom = 0x31803 + Mark Atom = 0xb604 + Marquee Atom = 0x32707 + Math Atom = 0x32e04 + Max Atom = 0x33d03 + Maxlength Atom = 0x33d09 + Media Atom = 0xe605 + Mediagroup Atom = 0xe60a + Menu Atom = 0x38704 + Menuitem Atom = 0x38708 + Meta Atom = 0x4b804 + Meter Atom = 0x9805 + Method Atom = 0x2a806 + Mglyph Atom = 0x30806 + Mi Atom = 0x34702 + Min Atom = 0x34703 + Minlength Atom = 0x34709 + Mn Atom = 0x2b102 + Mo Atom = 0xa402 + Ms Atom = 0x67402 + Mtext Atom = 0x35105 + Multiple Atom = 0x35f08 + Muted Atom = 0x36705 + Name Atom = 0x9604 + Nav Atom = 0x1303 + Nobr Atom = 0x3704 + Noembed Atom = 0x6c07 + Noframes Atom = 0x8908 + Nomodule Atom = 0xa208 + Nonce Atom = 0x1a605 + Noscript Atom = 0x21608 + Novalidate Atom = 0x2b20a + Object Atom = 0x26806 + Ol Atom = 0x13702 + Onabort Atom = 0x19507 + Onafterprint Atom = 0x2360c + Onautocomplete Atom = 0x2760e + Onautocompleteerror Atom = 0x27613 + Onauxclick Atom = 0x61f0a + Onbeforeprint Atom = 0x69e0d + Onbeforeunload Atom = 0x6e70e + Onblur Atom = 0x56d06 + Oncancel Atom = 0x11908 + Oncanplay Atom = 0x14d09 + Oncanplaythrough Atom = 0x14d10 + Onchange Atom = 0x41b08 + Onclick Atom = 0x2f507 + Onclose Atom = 0x36c07 + Oncontextmenu Atom = 0x37e0d + Oncopy Atom = 0x39106 + Oncuechange Atom = 0x3970b + Oncut Atom = 0x3a205 + Ondblclick Atom = 0x3a70a + Ondrag Atom = 0x3b106 + Ondragend Atom = 0x3b109 + Ondragenter Atom = 0x3ba0b + Ondragexit Atom = 0x3c50a + Ondragleave Atom = 0x3df0b + Ondragover Atom = 0x3ea0a + Ondragstart Atom = 0x3f40b + Ondrop Atom = 0x40306 + Ondurationchange Atom = 0x41310 + Onemptied Atom = 0x40a09 + Onended Atom = 0x42307 + Onerror Atom = 0x42a07 + Onfocus Atom = 0x43107 + Onhashchange Atom = 0x43d0c + Oninput Atom = 0x44907 + Oninvalid Atom = 0x45509 + Onkeydown Atom = 0x45e09 + Onkeypress Atom = 0x46b0a + Onkeyup Atom = 0x48007 + Onlanguagechange Atom = 0x48d10 + Onload Atom = 0x49d06 + Onloadeddata Atom = 0x49d0c + Onloadedmetadata Atom = 0x4b010 + Onloadend Atom = 0x4c609 + Onloadstart Atom = 0x4cf0b + Onmessage Atom = 0x4da09 + Onmessageerror Atom = 0x4da0e + Onmousedown Atom = 0x4e80b + Onmouseenter Atom = 0x4f30c + Onmouseleave Atom = 0x4ff0c + Onmousemove Atom = 0x50b0b + Onmouseout Atom = 0x5160a + Onmouseover Atom = 0x5230b + Onmouseup Atom = 0x52e09 + Onmousewheel Atom = 0x53c0c + Onoffline Atom = 0x54809 + Ononline Atom = 0x55108 + Onpagehide Atom = 0x5590a + Onpageshow Atom = 0x5730a + Onpaste Atom = 0x57f07 + Onpause Atom = 0x59a07 + Onplay Atom = 0x5a406 + Onplaying Atom = 0x5a409 + Onpopstate Atom = 0x5ad0a + Onprogress Atom = 0x5b70a + Onratechange Atom = 0x5cc0c + Onrejectionhandled Atom = 0x5d812 + Onreset Atom = 0x5ea07 + Onresize Atom = 0x5f108 + Onscroll Atom = 0x60008 + Onsecuritypolicyviolation Atom = 0x60819 + Onseeked Atom = 0x62908 + Onseeking Atom = 0x63109 + Onselect Atom = 0x63a08 + Onshow Atom = 0x64406 + Onsort Atom = 0x64f06 + Onstalled Atom = 0x65909 + Onstorage Atom = 0x66209 + Onsubmit Atom = 0x66b08 + Onsuspend Atom = 0x67b09 + Ontimeupdate Atom = 0x400c + Ontoggle Atom = 0x68408 + Onunhandledrejection Atom = 0x68c14 + Onunload Atom = 0x6ab08 + Onvolumechange Atom = 0x6b30e + Onwaiting Atom = 0x6c109 + Onwheel Atom = 0x6ca07 + Open Atom = 0x1a304 + Optgroup Atom = 0x5f08 + Optimum Atom = 0x6d107 + Option Atom = 0x6e306 + Output Atom = 0x51d06 + P Atom = 0xc01 + Param Atom = 0xc05 + Pattern Atom = 0x6607 + Picture Atom = 0x7b07 + Ping Atom = 0xef04 + Placeholder Atom = 0x1310b + Plaintext Atom = 0x1b209 + Playsinline Atom = 0x1400b + Poster Atom = 0x2cf06 + Pre Atom = 0x47003 + Preload Atom = 0x48607 + Progress Atom = 0x5b908 + Prompt Atom = 0x53606 + Public Atom = 0x58606 + Q Atom = 0xcf01 + Radiogroup Atom = 0x30a + Rb Atom = 0x3a02 + Readonly Atom = 0x35708 + Referrerpolicy Atom = 0x3d10e + Rel Atom = 0x48703 + Required Atom = 0x24c08 + Reversed Atom = 0x8008 + Rows Atom = 0x9c04 + Rowspan Atom = 0x9c07 + Rp Atom = 0x23c02 + Rt Atom = 0x19a02 + Rtc Atom = 0x19a03 + Ruby Atom = 0xfb04 + S Atom = 0x2501 + Samp Atom = 0x7804 + Sandbox Atom = 0x12907 + Scope Atom = 0x67505 + Scoped Atom = 0x67506 + Script Atom = 0x21806 + Seamless Atom = 0x37108 + Section Atom = 0x56807 + Select Atom = 0x63c06 + Selected Atom = 0x63c08 + Shape Atom = 0x1e505 + Size Atom = 0x5f504 + Sizes Atom = 0x5f505 + Slot Atom = 0x1ef04 + Small Atom = 0x20605 + Sortable Atom = 0x65108 + Sorted Atom = 0x33706 + Source Atom = 0x37806 + Spacer Atom = 0x43706 + Span Atom = 0x9f04 + Spellcheck Atom = 0x4740a + Src Atom = 0x5c003 + Srcdoc Atom = 0x5c006 + Srclang Atom = 0x5f907 + Srcset Atom = 0x6f906 + Start Atom = 0x3fa05 + Step Atom = 0x58304 + Strike Atom = 0xd206 + Strong Atom = 0x6dd06 + Style Atom = 0x6ff05 + Sub Atom = 0x66d03 + Summary Atom = 0x70407 + Sup Atom = 0x70b03 + Svg Atom = 0x70e03 + System Atom = 0x71106 + Tabindex Atom = 0x4be08 + Table Atom = 0x59505 + Target Atom = 0x2c406 + Tbody Atom = 0x2705 + Td Atom = 0x9202 + Template Atom = 0x71408 + Textarea Atom = 0x35208 + Tfoot Atom = 0xf505 + Th Atom = 0x15602 + Thead Atom = 0x33005 + Time Atom = 0x4204 + Title Atom = 0x11005 + Tr Atom = 0xcc02 + Track Atom = 0x1ba05 + Translate Atom = 0x1f209 + Tt Atom = 0x6802 + Type Atom = 0xd904 + Typemustmatch Atom = 0x2900d + U Atom = 0xb01 + Ul Atom = 0xa702 + Updateviacache Atom = 0x460e + Usemap Atom = 0x59e06 + Value Atom = 0x1505 + Var Atom = 0x16d03 + Video Atom = 0x2f105 + Wbr Atom = 0x57c03 + Width Atom = 0x64905 + Workertype Atom = 0x71c0a + Wrap Atom = 0x72604 + Xmp Atom = 0x12f03 +) + +const hash0 = 0x81cdf10e + +const maxAtomLen = 25 + +var table = [1 << 9]Atom{ + 0x1: 0xe60a, // mediagroup + 0x2: 0x2e404, // lang + 0x4: 0x2c09, // accesskey + 0x5: 0x8b08, // frameset + 0x7: 0x63a08, // onselect + 0x8: 0x71106, // system + 0xa: 0x64905, // width + 0xc: 0x2890b, // formenctype + 0xd: 0x13702, // ol + 0xe: 0x3970b, // oncuechange + 0x10: 0x14b03, // bdo + 0x11: 0x11505, // audio + 0x12: 0x17a09, // draggable + 0x14: 0x2f105, // video + 0x15: 0x2b102, // mn + 0x16: 0x38704, // menu + 0x17: 0x2cf06, // poster + 0x19: 0xf606, // footer + 0x1a: 0x2a806, // method + 0x1b: 0x2b808, // datetime + 0x1c: 0x19507, // onabort + 0x1d: 0x460e, // updateviacache + 0x1e: 0xff05, // async + 0x1f: 0x49d06, // onload + 0x21: 0x11908, // oncancel + 0x22: 0x62908, // onseeked + 0x23: 0x30205, // image + 0x24: 0x5d812, // onrejectionhandled + 0x26: 0x17404, // link + 0x27: 0x51d06, // output + 0x28: 0x33104, // head + 0x29: 0x4ff0c, // onmouseleave + 0x2a: 0x57f07, // onpaste + 0x2b: 0x5a409, // onplaying + 0x2c: 0x1c407, // colspan + 0x2f: 0x1bf05, // color + 0x30: 0x5f504, // size + 0x31: 0x2e80a, // http-equiv + 0x33: 0x601, // i + 0x34: 0x5590a, // onpagehide + 0x35: 0x68c14, // onunhandledrejection + 0x37: 0x42a07, // onerror + 0x3a: 0x3b08, // basefont + 0x3f: 0x1303, // nav + 0x40: 0x17704, // kind + 0x41: 0x35708, // readonly + 0x42: 0x30806, // mglyph + 0x44: 0xb202, // li + 0x46: 0x2d506, // hidden + 0x47: 0x70e03, // svg + 0x48: 0x58304, // step + 0x49: 0x23f09, // integrity + 0x4a: 0x58606, // public + 0x4c: 0x1ab03, // col + 0x4d: 0x1870a, // blockquote + 0x4e: 0x34f02, // h5 + 0x50: 0x5b908, // progress + 0x51: 0x5f505, // sizes + 0x52: 0x34502, // h4 + 0x56: 0x33005, // thead + 0x57: 0xd607, // keytype + 0x58: 0x5b70a, // onprogress + 0x59: 0x44b09, // inputmode + 0x5a: 0x3b109, // ondragend + 0x5d: 0x3a205, // oncut + 0x5e: 0x43706, // spacer + 0x5f: 0x1ab08, // colgroup + 0x62: 0x16502, // is + 0x65: 0x3c02, // as + 0x66: 0x54809, // onoffline + 0x67: 0x33706, // sorted + 0x69: 0x48d10, // onlanguagechange + 0x6c: 0x43d0c, // onhashchange + 0x6d: 0x9604, // name + 0x6e: 0xf505, // tfoot + 0x6f: 0x56104, // desc + 0x70: 0x33d03, // max + 0x72: 0x1ea06, // coords + 0x73: 0x30d02, // h3 + 0x74: 0x6e70e, // onbeforeunload + 0x75: 0x9c04, // rows + 0x76: 0x63c06, // select + 0x77: 0x9805, // meter + 0x78: 0x38b06, // itemid + 0x79: 0x53c0c, // onmousewheel + 0x7a: 0x5c006, // srcdoc + 0x7d: 0x1ba05, // track + 0x7f: 0x31f08, // itemtype + 0x82: 0xa402, // mo + 0x83: 0x41b08, // onchange + 0x84: 0x33107, // headers + 0x85: 0x5cc0c, // onratechange + 0x86: 0x60819, // onsecuritypolicyviolation + 0x88: 0x4a508, // datalist + 0x89: 0x4e80b, // onmousedown + 0x8a: 0x1ef04, // slot + 0x8b: 0x4b010, // onloadedmetadata + 0x8c: 0x1a06, // accept + 0x8d: 0x26806, // object + 0x91: 0x6b30e, // onvolumechange + 0x92: 0x2107, // charset + 0x93: 0x27613, // onautocompleteerror + 0x94: 0xc113, // allowpaymentrequest + 0x95: 0x2804, // body + 0x96: 0x10a07, // default + 0x97: 0x63c08, // selected + 0x98: 0x21e04, // face + 0x99: 0x1e505, // shape + 0x9b: 0x68408, // ontoggle + 0x9e: 0x64b02, // dt + 0x9f: 0xb604, // mark + 0xa1: 0xb01, // u + 0xa4: 0x6ab08, // onunload + 0xa5: 0x5d04, // loop + 0xa6: 0x16408, // disabled + 0xaa: 0x42307, // onended + 0xab: 0xb00a, // malignmark + 0xad: 0x67b09, // onsuspend + 0xae: 0x35105, // mtext + 0xaf: 0x64f06, // onsort + 0xb0: 0x19d08, // itemprop + 0xb3: 0x67109, // itemscope + 0xb4: 0x17305, // blink + 0xb6: 0x3b106, // ondrag + 0xb7: 0xa702, // ul + 0xb8: 0x26e04, // form + 0xb9: 0x12907, // sandbox + 0xba: 0x8b05, // frame + 0xbb: 0x1505, // value + 0xbc: 0x66209, // onstorage + 0xbf: 0xaa07, // acronym + 0xc0: 0x19a02, // rt + 0xc2: 0x202, // br + 0xc3: 0x22608, // fieldset + 0xc4: 0x2900d, // typemustmatch + 0xc5: 0xa208, // nomodule + 0xc6: 0x6c07, // noembed + 0xc7: 0x69e0d, // onbeforeprint + 0xc8: 0x19106, // button + 0xc9: 0x2f507, // onclick + 0xca: 0x70407, // summary + 0xcd: 0xfb04, // ruby + 0xce: 0x56405, // class + 0xcf: 0x3f40b, // ondragstart + 0xd0: 0x23107, // caption + 0xd4: 0xdd0e, // allowusermedia + 0xd5: 0x4cf0b, // onloadstart + 0xd9: 0x16b03, // div + 0xda: 0x4a904, // list + 0xdb: 0x32e04, // math + 0xdc: 0x44b05, // input + 0xdf: 0x3ea0a, // ondragover + 0xe0: 0x2de02, // h2 + 0xe2: 0x1b209, // plaintext + 0xe4: 0x4f30c, // onmouseenter + 0xe7: 0x47907, // checked + 0xe8: 0x47003, // pre + 0xea: 0x35f08, // multiple + 0xeb: 0xba03, // bdi + 0xec: 0x33d09, // maxlength + 0xed: 0xcf01, // q + 0xee: 0x61f0a, // onauxclick + 0xf0: 0x57c03, // wbr + 0xf2: 0x3b04, // base + 0xf3: 0x6e306, // option + 0xf5: 0x41310, // ondurationchange + 0xf7: 0x8908, // noframes + 0xf9: 0x40508, // dropzone + 0xfb: 0x67505, // scope + 0xfc: 0x8008, // reversed + 0xfd: 0x3ba0b, // ondragenter + 0xfe: 0x3fa05, // start + 0xff: 0x12f03, // xmp + 0x100: 0x5f907, // srclang + 0x101: 0x30703, // img + 0x104: 0x101, // b + 0x105: 0x25403, // for + 0x106: 0x10705, // aside + 0x107: 0x44907, // oninput + 0x108: 0x35604, // area + 0x109: 0x2a40a, // formmethod + 0x10a: 0x72604, // wrap + 0x10c: 0x23c02, // rp + 0x10d: 0x46b0a, // onkeypress + 0x10e: 0x6802, // tt + 0x110: 0x34702, // mi + 0x111: 0x36705, // muted + 0x112: 0xf303, // alt + 0x113: 0x5c504, // code + 0x114: 0x6e02, // em + 0x115: 0x3c50a, // ondragexit + 0x117: 0x9f04, // span + 0x119: 0x6d708, // manifest + 0x11a: 0x38708, // menuitem + 0x11b: 0x58b07, // content + 0x11d: 0x6c109, // onwaiting + 0x11f: 0x4c609, // onloadend + 0x121: 0x37e0d, // oncontextmenu + 0x123: 0x56d06, // onblur + 0x124: 0x3fc07, // article + 0x125: 0x9303, // dir + 0x126: 0xef04, // ping + 0x127: 0x24c08, // required + 0x128: 0x45509, // oninvalid + 0x129: 0xb105, // align + 0x12b: 0x58a04, // icon + 0x12c: 0x64d02, // h6 + 0x12d: 0x1c404, // cols + 0x12e: 0x22e0a, // figcaption + 0x12f: 0x45e09, // onkeydown + 0x130: 0x66b08, // onsubmit + 0x131: 0x14d09, // oncanplay + 0x132: 0x70b03, // sup + 0x133: 0xc01, // p + 0x135: 0x40a09, // onemptied + 0x136: 0x39106, // oncopy + 0x137: 0x19c04, // cite + 0x138: 0x3a70a, // ondblclick + 0x13a: 0x50b0b, // onmousemove + 0x13c: 0x66d03, // sub + 0x13d: 0x48703, // rel + 0x13e: 0x5f08, // optgroup + 0x142: 0x9c07, // rowspan + 0x143: 0x37806, // source + 0x144: 0x21608, // noscript + 0x145: 0x1a304, // open + 0x146: 0x20403, // ins + 0x147: 0x2540d, // foreignObject + 0x148: 0x5ad0a, // onpopstate + 0x14a: 0x28d07, // enctype + 0x14b: 0x2760e, // onautocomplete + 0x14c: 0x35208, // textarea + 0x14e: 0x2780c, // autocomplete + 0x14f: 0x15702, // hr + 0x150: 0x1de08, // controls + 0x151: 0x10902, // id + 0x153: 0x2360c, // onafterprint + 0x155: 0x2610d, // foreignobject + 0x156: 0x32707, // marquee + 0x157: 0x59a07, // onpause + 0x158: 0x5e602, // dl + 0x159: 0x5206, // height + 0x15a: 0x34703, // min + 0x15b: 0x9307, // dirname + 0x15c: 0x1f209, // translate + 0x15d: 0x5604, // html + 0x15e: 0x34709, // minlength + 0x15f: 0x48607, // preload + 0x160: 0x71408, // template + 0x161: 0x3df0b, // ondragleave + 0x162: 0x3a02, // rb + 0x164: 0x5c003, // src + 0x165: 0x6dd06, // strong + 0x167: 0x7804, // samp + 0x168: 0x6f307, // address + 0x169: 0x55108, // ononline + 0x16b: 0x1310b, // placeholder + 0x16c: 0x2c406, // target + 0x16d: 0x20605, // small + 0x16e: 0x6ca07, // onwheel + 0x16f: 0x1c90a, // annotation + 0x170: 0x4740a, // spellcheck + 0x171: 0x7207, // details + 0x172: 0x10306, // canvas + 0x173: 0x12109, // autofocus + 0x174: 0xc05, // param + 0x176: 0x46308, // download + 0x177: 0x45203, // del + 0x178: 0x36c07, // onclose + 0x179: 0xb903, // kbd + 0x17a: 0x31906, // applet + 0x17b: 0x2e004, // href + 0x17c: 0x5f108, // onresize + 0x17e: 0x49d0c, // onloadeddata + 0x180: 0xcc02, // tr + 0x181: 0x2c00a, // formtarget + 0x182: 0x11005, // title + 0x183: 0x6ff05, // style + 0x184: 0xd206, // strike + 0x185: 0x59e06, // usemap + 0x186: 0x2fc06, // iframe + 0x187: 0x1004, // main + 0x189: 0x7b07, // picture + 0x18c: 0x31605, // ismap + 0x18e: 0x4a504, // data + 0x18f: 0x5905, // label + 0x191: 0x3d10e, // referrerpolicy + 0x192: 0x15602, // th + 0x194: 0x53606, // prompt + 0x195: 0x56807, // section + 0x197: 0x6d107, // optimum + 0x198: 0x2db04, // high + 0x199: 0x15c02, // h1 + 0x19a: 0x65909, // onstalled + 0x19b: 0x16d03, // var + 0x19c: 0x4204, // time + 0x19e: 0x67402, // ms + 0x19f: 0x33106, // header + 0x1a0: 0x4da09, // onmessage + 0x1a1: 0x1a605, // nonce + 0x1a2: 0x26e0a, // formaction + 0x1a3: 0x22006, // center + 0x1a4: 0x3704, // nobr + 0x1a5: 0x59505, // table + 0x1a6: 0x4a907, // listing + 0x1a7: 0x18106, // legend + 0x1a9: 0x29b09, // challenge + 0x1aa: 0x24806, // figure + 0x1ab: 0xe605, // media + 0x1ae: 0xd904, // type + 0x1af: 0x3f04, // font + 0x1b0: 0x4da0e, // onmessageerror + 0x1b1: 0x37108, // seamless + 0x1b2: 0x8703, // dfn + 0x1b3: 0x5c705, // defer + 0x1b4: 0xc303, // low + 0x1b5: 0x19a03, // rtc + 0x1b6: 0x5230b, // onmouseover + 0x1b7: 0x2b20a, // novalidate + 0x1b8: 0x71c0a, // workertype + 0x1ba: 0x3cd07, // itemref + 0x1bd: 0x1, // a + 0x1be: 0x31803, // map + 0x1bf: 0x400c, // ontimeupdate + 0x1c0: 0x15e07, // bgsound + 0x1c1: 0x3206, // keygen + 0x1c2: 0x2705, // tbody + 0x1c5: 0x64406, // onshow + 0x1c7: 0x2501, // s + 0x1c8: 0x6607, // pattern + 0x1cc: 0x14d10, // oncanplaythrough + 0x1ce: 0x2d702, // dd + 0x1cf: 0x6f906, // srcset + 0x1d0: 0x17003, // big + 0x1d2: 0x65108, // sortable + 0x1d3: 0x48007, // onkeyup + 0x1d5: 0x5a406, // onplay + 0x1d7: 0x4b804, // meta + 0x1d8: 0x40306, // ondrop + 0x1da: 0x60008, // onscroll + 0x1db: 0x1fb0b, // crossorigin + 0x1dc: 0x5730a, // onpageshow + 0x1dd: 0x4, // abbr + 0x1de: 0x9202, // td + 0x1df: 0x58b0f, // contenteditable + 0x1e0: 0x27206, // action + 0x1e1: 0x1400b, // playsinline + 0x1e2: 0x43107, // onfocus + 0x1e3: 0x2e008, // hreflang + 0x1e5: 0x5160a, // onmouseout + 0x1e6: 0x5ea07, // onreset + 0x1e7: 0x13c08, // autoplay + 0x1e8: 0x63109, // onseeking + 0x1ea: 0x67506, // scoped + 0x1ec: 0x30a, // radiogroup + 0x1ee: 0x3800b, // contextmenu + 0x1ef: 0x52e09, // onmouseup + 0x1f1: 0x2ca06, // hgroup + 0x1f2: 0x2080f, // allowfullscreen + 0x1f3: 0x4be08, // tabindex + 0x1f6: 0x30f07, // isindex + 0x1f7: 0x1a0e, // accept-charset + 0x1f8: 0x2ae0e, // formnovalidate + 0x1fb: 0x1c90e, // annotation-xml + 0x1fc: 0x6e05, // embed + 0x1fd: 0x21806, // script + 0x1fe: 0xbb06, // dialog + 0x1ff: 0x1d707, // command +} + +const atomText = "abbradiogrouparamainavalueaccept-charsetbodyaccesskeygenobrb" + + "asefontimeupdateviacacheightmlabelooptgroupatternoembedetail" + + "sampictureversedfnoframesetdirnameterowspanomoduleacronymali" + + "gnmarkbdialogallowpaymentrequestrikeytypeallowusermediagroup" + + "ingaltfooterubyasyncanvasidefaultitleaudioncancelautofocusan" + + "dboxmplaceholderautoplaysinlinebdoncanplaythrough1bgsoundisa" + + "bledivarbigblinkindraggablegendblockquotebuttonabortcitempro" + + "penoncecolgrouplaintextrackcolorcolspannotation-xmlcommandco" + + "ntrolshapecoordslotranslatecrossoriginsmallowfullscreenoscri" + + "ptfacenterfieldsetfigcaptionafterprintegrityfigurequiredfore" + + "ignObjectforeignobjectformactionautocompleteerrorformenctype" + + "mustmatchallengeformmethodformnovalidatetimeformtargethgroup" + + "osterhiddenhigh2hreflanghttp-equivideonclickiframeimageimgly" + + "ph3isindexismappletitemtypemarqueematheadersortedmaxlength4m" + + "inlength5mtextareadonlymultiplemutedoncloseamlessourceoncont" + + "extmenuitemidoncopyoncuechangeoncutondblclickondragendondrag" + + "enterondragexitemreferrerpolicyondragleaveondragoverondragst" + + "articleondropzonemptiedondurationchangeonendedonerroronfocus" + + "paceronhashchangeoninputmodeloninvalidonkeydownloadonkeypres" + + "spellcheckedonkeyupreloadonlanguagechangeonloadeddatalisting" + + "onloadedmetadatabindexonloadendonloadstartonmessageerroronmo" + + "usedownonmouseenteronmouseleaveonmousemoveonmouseoutputonmou" + + "seoveronmouseupromptonmousewheelonofflineononlineonpagehides" + + "classectionbluronpageshowbronpastepublicontenteditableonpaus" + + "emaponplayingonpopstateonprogressrcdocodeferonratechangeonre" + + "jectionhandledonresetonresizesrclangonscrollonsecuritypolicy" + + "violationauxclickonseekedonseekingonselectedonshowidth6onsor" + + "tableonstalledonstorageonsubmitemscopedonsuspendontoggleonun" + + "handledrejectionbeforeprintonunloadonvolumechangeonwaitingon" + + "wheeloptimumanifestrongoptionbeforeunloaddressrcsetstylesumm" + + "arysupsvgsystemplateworkertypewrap" diff --git a/vendor/golang.org/x/net/html/charset/charset.go b/vendor/golang.org/x/net/html/charset/charset.go new file mode 100644 index 00000000000..13bed1599f7 --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/charset.go @@ -0,0 +1,257 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package charset provides common text encodings for HTML documents. +// +// The mapping from encoding labels to encodings is defined at +// https://encoding.spec.whatwg.org/. +package charset // import "golang.org/x/net/html/charset" + +import ( + "bytes" + "fmt" + "io" + "mime" + "strings" + "unicode/utf8" + + "golang.org/x/net/html" + "golang.org/x/text/encoding" + "golang.org/x/text/encoding/charmap" + "golang.org/x/text/encoding/htmlindex" + "golang.org/x/text/transform" +) + +// Lookup returns the encoding with the specified label, and its canonical +// name. It returns nil and the empty string if label is not one of the +// standard encodings for HTML. Matching is case-insensitive and ignores +// leading and trailing whitespace. Encoders will use HTML escape sequences for +// runes that are not supported by the character set. +func Lookup(label string) (e encoding.Encoding, name string) { + e, err := htmlindex.Get(label) + if err != nil { + return nil, "" + } + name, _ = htmlindex.Name(e) + return &htmlEncoding{e}, name +} + +type htmlEncoding struct{ encoding.Encoding } + +func (h *htmlEncoding) NewEncoder() *encoding.Encoder { + // HTML requires a non-terminating legacy encoder. We use HTML escapes to + // substitute unsupported code points. + return encoding.HTMLEscapeUnsupported(h.Encoding.NewEncoder()) +} + +// DetermineEncoding determines the encoding of an HTML document by examining +// up to the first 1024 bytes of content and the declared Content-Type. +// +// See http://www.whatwg.org/specs/web-apps/current-work/multipage/parsing.html#determining-the-character-encoding +func DetermineEncoding(content []byte, contentType string) (e encoding.Encoding, name string, certain bool) { + if len(content) > 1024 { + content = content[:1024] + } + + for _, b := range boms { + if bytes.HasPrefix(content, b.bom) { + e, name = Lookup(b.enc) + return e, name, true + } + } + + if _, params, err := mime.ParseMediaType(contentType); err == nil { + if cs, ok := params["charset"]; ok { + if e, name = Lookup(cs); e != nil { + return e, name, true + } + } + } + + if len(content) > 0 { + e, name = prescan(content) + if e != nil { + return e, name, false + } + } + + // Try to detect UTF-8. + // First eliminate any partial rune at the end. + for i := len(content) - 1; i >= 0 && i > len(content)-4; i-- { + b := content[i] + if b < 0x80 { + break + } + if utf8.RuneStart(b) { + content = content[:i] + break + } + } + hasHighBit := false + for _, c := range content { + if c >= 0x80 { + hasHighBit = true + break + } + } + if hasHighBit && utf8.Valid(content) { + return encoding.Nop, "utf-8", false + } + + // TODO: change default depending on user's locale? + return charmap.Windows1252, "windows-1252", false +} + +// NewReader returns an io.Reader that converts the content of r to UTF-8. +// It calls DetermineEncoding to find out what r's encoding is. +func NewReader(r io.Reader, contentType string) (io.Reader, error) { + preview := make([]byte, 1024) + n, err := io.ReadFull(r, preview) + switch { + case err == io.ErrUnexpectedEOF: + preview = preview[:n] + r = bytes.NewReader(preview) + case err != nil: + return nil, err + default: + r = io.MultiReader(bytes.NewReader(preview), r) + } + + if e, _, _ := DetermineEncoding(preview, contentType); e != encoding.Nop { + r = transform.NewReader(r, e.NewDecoder()) + } + return r, nil +} + +// NewReaderLabel returns a reader that converts from the specified charset to +// UTF-8. It uses Lookup to find the encoding that corresponds to label, and +// returns an error if Lookup returns nil. It is suitable for use as +// encoding/xml.Decoder's CharsetReader function. +func NewReaderLabel(label string, input io.Reader) (io.Reader, error) { + e, _ := Lookup(label) + if e == nil { + return nil, fmt.Errorf("unsupported charset: %q", label) + } + return transform.NewReader(input, e.NewDecoder()), nil +} + +func prescan(content []byte) (e encoding.Encoding, name string) { + z := html.NewTokenizer(bytes.NewReader(content)) + for { + switch z.Next() { + case html.ErrorToken: + return nil, "" + + case html.StartTagToken, html.SelfClosingTagToken: + tagName, hasAttr := z.TagName() + if !bytes.Equal(tagName, []byte("meta")) { + continue + } + attrList := make(map[string]bool) + gotPragma := false + + const ( + dontKnow = iota + doNeedPragma + doNotNeedPragma + ) + needPragma := dontKnow + + name = "" + e = nil + for hasAttr { + var key, val []byte + key, val, hasAttr = z.TagAttr() + ks := string(key) + if attrList[ks] { + continue + } + attrList[ks] = true + for i, c := range val { + if 'A' <= c && c <= 'Z' { + val[i] = c + 0x20 + } + } + + switch ks { + case "http-equiv": + if bytes.Equal(val, []byte("content-type")) { + gotPragma = true + } + + case "content": + if e == nil { + name = fromMetaElement(string(val)) + if name != "" { + e, name = Lookup(name) + if e != nil { + needPragma = doNeedPragma + } + } + } + + case "charset": + e, name = Lookup(string(val)) + needPragma = doNotNeedPragma + } + } + + if needPragma == dontKnow || needPragma == doNeedPragma && !gotPragma { + continue + } + + if strings.HasPrefix(name, "utf-16") { + name = "utf-8" + e = encoding.Nop + } + + if e != nil { + return e, name + } + } + } +} + +func fromMetaElement(s string) string { + for s != "" { + csLoc := strings.Index(s, "charset") + if csLoc == -1 { + return "" + } + s = s[csLoc+len("charset"):] + s = strings.TrimLeft(s, " \t\n\f\r") + if !strings.HasPrefix(s, "=") { + continue + } + s = s[1:] + s = strings.TrimLeft(s, " \t\n\f\r") + if s == "" { + return "" + } + if q := s[0]; q == '"' || q == '\'' { + s = s[1:] + closeQuote := strings.IndexRune(s, rune(q)) + if closeQuote == -1 { + return "" + } + return s[:closeQuote] + } + + end := strings.IndexAny(s, "; \t\n\f\r") + if end == -1 { + end = len(s) + } + return s[:end] + } + return "" +} + +var boms = []struct { + bom []byte + enc string +}{ + {[]byte{0xfe, 0xff}, "utf-16be"}, + {[]byte{0xff, 0xfe}, "utf-16le"}, + {[]byte{0xef, 0xbb, 0xbf}, "utf-8"}, +} diff --git a/vendor/golang.org/x/net/html/const.go b/vendor/golang.org/x/net/html/const.go new file mode 100644 index 00000000000..ff7acf2d5b4 --- /dev/null +++ b/vendor/golang.org/x/net/html/const.go @@ -0,0 +1,111 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +// Section 12.2.4.2 of the HTML5 specification says "The following elements +// have varying levels of special parsing rules". +// https://html.spec.whatwg.org/multipage/syntax.html#the-stack-of-open-elements +var isSpecialElementMap = map[string]bool{ + "address": true, + "applet": true, + "area": true, + "article": true, + "aside": true, + "base": true, + "basefont": true, + "bgsound": true, + "blockquote": true, + "body": true, + "br": true, + "button": true, + "caption": true, + "center": true, + "col": true, + "colgroup": true, + "dd": true, + "details": true, + "dir": true, + "div": true, + "dl": true, + "dt": true, + "embed": true, + "fieldset": true, + "figcaption": true, + "figure": true, + "footer": true, + "form": true, + "frame": true, + "frameset": true, + "h1": true, + "h2": true, + "h3": true, + "h4": true, + "h5": true, + "h6": true, + "head": true, + "header": true, + "hgroup": true, + "hr": true, + "html": true, + "iframe": true, + "img": true, + "input": true, + "keygen": true, // "keygen" has been removed from the spec, but are kept here for backwards compatibility. + "li": true, + "link": true, + "listing": true, + "main": true, + "marquee": true, + "menu": true, + "meta": true, + "nav": true, + "noembed": true, + "noframes": true, + "noscript": true, + "object": true, + "ol": true, + "p": true, + "param": true, + "plaintext": true, + "pre": true, + "script": true, + "section": true, + "select": true, + "source": true, + "style": true, + "summary": true, + "table": true, + "tbody": true, + "td": true, + "template": true, + "textarea": true, + "tfoot": true, + "th": true, + "thead": true, + "title": true, + "tr": true, + "track": true, + "ul": true, + "wbr": true, + "xmp": true, +} + +func isSpecialElement(element *Node) bool { + switch element.Namespace { + case "", "html": + return isSpecialElementMap[element.Data] + case "math": + switch element.Data { + case "mi", "mo", "mn", "ms", "mtext", "annotation-xml": + return true + } + case "svg": + switch element.Data { + case "foreignObject", "desc", "title": + return true + } + } + return false +} diff --git a/vendor/golang.org/x/net/html/doc.go b/vendor/golang.org/x/net/html/doc.go new file mode 100644 index 00000000000..885c4c5936b --- /dev/null +++ b/vendor/golang.org/x/net/html/doc.go @@ -0,0 +1,122 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package html implements an HTML5-compliant tokenizer and parser. + +Tokenization is done by creating a Tokenizer for an io.Reader r. It is the +caller's responsibility to ensure that r provides UTF-8 encoded HTML. + + z := html.NewTokenizer(r) + +Given a Tokenizer z, the HTML is tokenized by repeatedly calling z.Next(), +which parses the next token and returns its type, or an error: + + for { + tt := z.Next() + if tt == html.ErrorToken { + // ... + return ... + } + // Process the current token. + } + +There are two APIs for retrieving the current token. The high-level API is to +call Token; the low-level API is to call Text or TagName / TagAttr. Both APIs +allow optionally calling Raw after Next but before Token, Text, TagName, or +TagAttr. In EBNF notation, the valid call sequence per token is: + + Next {Raw} [ Token | Text | TagName {TagAttr} ] + +Token returns an independent data structure that completely describes a token. +Entities (such as "<") are unescaped, tag names and attribute keys are +lower-cased, and attributes are collected into a []Attribute. For example: + + for { + if z.Next() == html.ErrorToken { + // Returning io.EOF indicates success. + return z.Err() + } + emitToken(z.Token()) + } + +The low-level API performs fewer allocations and copies, but the contents of +the []byte values returned by Text, TagName and TagAttr may change on the next +call to Next. For example, to extract an HTML page's anchor text: + + depth := 0 + for { + tt := z.Next() + switch tt { + case html.ErrorToken: + return z.Err() + case html.TextToken: + if depth > 0 { + // emitBytes should copy the []byte it receives, + // if it doesn't process it immediately. + emitBytes(z.Text()) + } + case html.StartTagToken, html.EndTagToken: + tn, _ := z.TagName() + if len(tn) == 1 && tn[0] == 'a' { + if tt == html.StartTagToken { + depth++ + } else { + depth-- + } + } + } + } + +Parsing is done by calling Parse with an io.Reader, which returns the root of +the parse tree (the document element) as a *Node. It is the caller's +responsibility to ensure that the Reader provides UTF-8 encoded HTML. For +example, to process each anchor node in depth-first order: + + doc, err := html.Parse(r) + if err != nil { + // ... + } + for n := range doc.Descendants() { + if n.Type == html.ElementNode && n.Data == "a" { + // Do something with n... + } + } + +The relevant specifications include: +https://html.spec.whatwg.org/multipage/syntax.html and +https://html.spec.whatwg.org/multipage/syntax.html#tokenization + +# Security Considerations + +Care should be taken when parsing and interpreting HTML, whether full documents +or fragments, within the framework of the HTML specification, especially with +regard to untrusted inputs. + +This package provides both a tokenizer and a parser, which implement the +tokenization, and tokenization and tree construction stages of the WHATWG HTML +parsing specification respectively. While the tokenizer parses and normalizes +individual HTML tokens, only the parser constructs the DOM tree from the +tokenized HTML, as described in the tree construction stage of the +specification, dynamically modifying or extending the document's DOM tree. + +If your use case requires semantically well-formed HTML documents, as defined by +the WHATWG specification, the parser should be used rather than the tokenizer. + +In security contexts, if trust decisions are being made using the tokenized or +parsed content, the input must be re-serialized (for instance by using Render or +Token.String) in order for those trust decisions to hold, as the process of +tokenization or parsing may alter the content. +*/ +package html // import "golang.org/x/net/html" + +// The tokenization algorithm implemented by this package is not a line-by-line +// transliteration of the relatively verbose state-machine in the WHATWG +// specification. A more direct approach is used instead, where the program +// counter implies the state, such as whether it is tokenizing a tag or a text +// node. Specification compliance is verified by checking expected and actual +// outputs over a test suite rather than aiming for algorithmic fidelity. + +// TODO(nigeltao): Does a DOM API belong in this package or a separate one? +// TODO(nigeltao): How does parsing interact with a JavaScript engine? diff --git a/vendor/golang.org/x/net/html/doctype.go b/vendor/golang.org/x/net/html/doctype.go new file mode 100644 index 00000000000..bca3ae9a0c2 --- /dev/null +++ b/vendor/golang.org/x/net/html/doctype.go @@ -0,0 +1,156 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "strings" +) + +// parseDoctype parses the data from a DoctypeToken into a name, +// public identifier, and system identifier. It returns a Node whose Type +// is DoctypeNode, whose Data is the name, and which has attributes +// named "system" and "public" for the two identifiers if they were present. +// quirks is whether the document should be parsed in "quirks mode". +func parseDoctype(s string) (n *Node, quirks bool) { + n = &Node{Type: DoctypeNode} + + // Find the name. + space := strings.IndexAny(s, whitespace) + if space == -1 { + space = len(s) + } + n.Data = s[:space] + // The comparison to "html" is case-sensitive. + if n.Data != "html" { + quirks = true + } + n.Data = strings.ToLower(n.Data) + s = strings.TrimLeft(s[space:], whitespace) + + if len(s) < 6 { + // It can't start with "PUBLIC" or "SYSTEM". + // Ignore the rest of the string. + return n, quirks || s != "" + } + + key := strings.ToLower(s[:6]) + s = s[6:] + for key == "public" || key == "system" { + s = strings.TrimLeft(s, whitespace) + if s == "" { + break + } + quote := s[0] + if quote != '"' && quote != '\'' { + break + } + s = s[1:] + q := strings.IndexRune(s, rune(quote)) + var id string + if q == -1 { + id = s + s = "" + } else { + id = s[:q] + s = s[q+1:] + } + n.Attr = append(n.Attr, Attribute{Key: key, Val: id}) + if key == "public" { + key = "system" + } else { + key = "" + } + } + + if key != "" || s != "" { + quirks = true + } else if len(n.Attr) > 0 { + if n.Attr[0].Key == "public" { + public := strings.ToLower(n.Attr[0].Val) + switch public { + case "-//w3o//dtd w3 html strict 3.0//en//", "-/w3d/dtd html 4.0 transitional/en", "html": + quirks = true + default: + for _, q := range quirkyIDs { + if strings.HasPrefix(public, q) { + quirks = true + break + } + } + } + // The following two public IDs only cause quirks mode if there is no system ID. + if len(n.Attr) == 1 && (strings.HasPrefix(public, "-//w3c//dtd html 4.01 frameset//") || + strings.HasPrefix(public, "-//w3c//dtd html 4.01 transitional//")) { + quirks = true + } + } + if lastAttr := n.Attr[len(n.Attr)-1]; lastAttr.Key == "system" && + strings.EqualFold(lastAttr.Val, "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd") { + quirks = true + } + } + + return n, quirks +} + +// quirkyIDs is a list of public doctype identifiers that cause a document +// to be interpreted in quirks mode. The identifiers should be in lower case. +var quirkyIDs = []string{ + "+//silmaril//dtd html pro v0r11 19970101//", + "-//advasoft ltd//dtd html 3.0 aswedit + extensions//", + "-//as//dtd html 3.0 aswedit + extensions//", + "-//ietf//dtd html 2.0 level 1//", + "-//ietf//dtd html 2.0 level 2//", + "-//ietf//dtd html 2.0 strict level 1//", + "-//ietf//dtd html 2.0 strict level 2//", + "-//ietf//dtd html 2.0 strict//", + "-//ietf//dtd html 2.0//", + "-//ietf//dtd html 2.1e//", + "-//ietf//dtd html 3.0//", + "-//ietf//dtd html 3.2 final//", + "-//ietf//dtd html 3.2//", + "-//ietf//dtd html 3//", + "-//ietf//dtd html level 0//", + "-//ietf//dtd html level 1//", + "-//ietf//dtd html level 2//", + "-//ietf//dtd html level 3//", + "-//ietf//dtd html strict level 0//", + "-//ietf//dtd html strict level 1//", + "-//ietf//dtd html strict level 2//", + "-//ietf//dtd html strict level 3//", + "-//ietf//dtd html strict//", + "-//ietf//dtd html//", + "-//metrius//dtd metrius presentational//", + "-//microsoft//dtd internet explorer 2.0 html strict//", + "-//microsoft//dtd internet explorer 2.0 html//", + "-//microsoft//dtd internet explorer 2.0 tables//", + "-//microsoft//dtd internet explorer 3.0 html strict//", + "-//microsoft//dtd internet explorer 3.0 html//", + "-//microsoft//dtd internet explorer 3.0 tables//", + "-//netscape comm. corp.//dtd html//", + "-//netscape comm. corp.//dtd strict html//", + "-//o'reilly and associates//dtd html 2.0//", + "-//o'reilly and associates//dtd html extended 1.0//", + "-//o'reilly and associates//dtd html extended relaxed 1.0//", + "-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//", + "-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//", + "-//spyglass//dtd html 2.0 extended//", + "-//sq//dtd html 2.0 hotmetal + extensions//", + "-//sun microsystems corp.//dtd hotjava html//", + "-//sun microsystems corp.//dtd hotjava strict html//", + "-//w3c//dtd html 3 1995-03-24//", + "-//w3c//dtd html 3.2 draft//", + "-//w3c//dtd html 3.2 final//", + "-//w3c//dtd html 3.2//", + "-//w3c//dtd html 3.2s draft//", + "-//w3c//dtd html 4.0 frameset//", + "-//w3c//dtd html 4.0 transitional//", + "-//w3c//dtd html experimental 19960712//", + "-//w3c//dtd html experimental 970421//", + "-//w3c//dtd w3 html//", + "-//w3o//dtd w3 html 3.0//", + "-//webtechs//dtd mozilla html 2.0//", + "-//webtechs//dtd mozilla html//", +} diff --git a/vendor/golang.org/x/net/html/entity.go b/vendor/golang.org/x/net/html/entity.go new file mode 100644 index 00000000000..b628880a014 --- /dev/null +++ b/vendor/golang.org/x/net/html/entity.go @@ -0,0 +1,2253 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +// All entities that do not end with ';' are 6 or fewer bytes long. +const longestEntityWithoutSemicolon = 6 + +// entity is a map from HTML entity names to their values. The semicolon matters: +// https://html.spec.whatwg.org/multipage/syntax.html#named-character-references +// lists both "amp" and "amp;" as two separate entries. +// +// Note that the HTML5 list is larger than the HTML4 list at +// http://www.w3.org/TR/html4/sgml/entities.html +var entity = map[string]rune{ + "AElig;": '\U000000C6', + "AMP;": '\U00000026', + "Aacute;": '\U000000C1', + "Abreve;": '\U00000102', + "Acirc;": '\U000000C2', + "Acy;": '\U00000410', + "Afr;": '\U0001D504', + "Agrave;": '\U000000C0', + "Alpha;": '\U00000391', + "Amacr;": '\U00000100', + "And;": '\U00002A53', + "Aogon;": '\U00000104', + "Aopf;": '\U0001D538', + "ApplyFunction;": '\U00002061', + "Aring;": '\U000000C5', + "Ascr;": '\U0001D49C', + "Assign;": '\U00002254', + "Atilde;": '\U000000C3', + "Auml;": '\U000000C4', + "Backslash;": '\U00002216', + "Barv;": '\U00002AE7', + "Barwed;": '\U00002306', + "Bcy;": '\U00000411', + "Because;": '\U00002235', + "Bernoullis;": '\U0000212C', + "Beta;": '\U00000392', + "Bfr;": '\U0001D505', + "Bopf;": '\U0001D539', + "Breve;": '\U000002D8', + "Bscr;": '\U0000212C', + "Bumpeq;": '\U0000224E', + "CHcy;": '\U00000427', + "COPY;": '\U000000A9', + "Cacute;": '\U00000106', + "Cap;": '\U000022D2', + "CapitalDifferentialD;": '\U00002145', + "Cayleys;": '\U0000212D', + "Ccaron;": '\U0000010C', + "Ccedil;": '\U000000C7', + "Ccirc;": '\U00000108', + "Cconint;": '\U00002230', + "Cdot;": '\U0000010A', + "Cedilla;": '\U000000B8', + "CenterDot;": '\U000000B7', + "Cfr;": '\U0000212D', + "Chi;": '\U000003A7', + "CircleDot;": '\U00002299', + "CircleMinus;": '\U00002296', + "CirclePlus;": '\U00002295', + "CircleTimes;": '\U00002297', + "ClockwiseContourIntegral;": '\U00002232', + "CloseCurlyDoubleQuote;": '\U0000201D', + "CloseCurlyQuote;": '\U00002019', + "Colon;": '\U00002237', + "Colone;": '\U00002A74', + "Congruent;": '\U00002261', + "Conint;": '\U0000222F', + "ContourIntegral;": '\U0000222E', + "Copf;": '\U00002102', + "Coproduct;": '\U00002210', + "CounterClockwiseContourIntegral;": '\U00002233', + "Cross;": '\U00002A2F', + "Cscr;": '\U0001D49E', + "Cup;": '\U000022D3', + "CupCap;": '\U0000224D', + "DD;": '\U00002145', + "DDotrahd;": '\U00002911', + "DJcy;": '\U00000402', + "DScy;": '\U00000405', + "DZcy;": '\U0000040F', + "Dagger;": '\U00002021', + "Darr;": '\U000021A1', + "Dashv;": '\U00002AE4', + "Dcaron;": '\U0000010E', + "Dcy;": '\U00000414', + "Del;": '\U00002207', + "Delta;": '\U00000394', + "Dfr;": '\U0001D507', + "DiacriticalAcute;": '\U000000B4', + "DiacriticalDot;": '\U000002D9', + "DiacriticalDoubleAcute;": '\U000002DD', + "DiacriticalGrave;": '\U00000060', + "DiacriticalTilde;": '\U000002DC', + "Diamond;": '\U000022C4', + "DifferentialD;": '\U00002146', + "Dopf;": '\U0001D53B', + "Dot;": '\U000000A8', + "DotDot;": '\U000020DC', + "DotEqual;": '\U00002250', + "DoubleContourIntegral;": '\U0000222F', + "DoubleDot;": '\U000000A8', + "DoubleDownArrow;": '\U000021D3', + "DoubleLeftArrow;": '\U000021D0', + "DoubleLeftRightArrow;": '\U000021D4', + "DoubleLeftTee;": '\U00002AE4', + "DoubleLongLeftArrow;": '\U000027F8', + "DoubleLongLeftRightArrow;": '\U000027FA', + "DoubleLongRightArrow;": '\U000027F9', + "DoubleRightArrow;": '\U000021D2', + "DoubleRightTee;": '\U000022A8', + "DoubleUpArrow;": '\U000021D1', + "DoubleUpDownArrow;": '\U000021D5', + "DoubleVerticalBar;": '\U00002225', + "DownArrow;": '\U00002193', + "DownArrowBar;": '\U00002913', + "DownArrowUpArrow;": '\U000021F5', + "DownBreve;": '\U00000311', + "DownLeftRightVector;": '\U00002950', + "DownLeftTeeVector;": '\U0000295E', + "DownLeftVector;": '\U000021BD', + "DownLeftVectorBar;": '\U00002956', + "DownRightTeeVector;": '\U0000295F', + "DownRightVector;": '\U000021C1', + "DownRightVectorBar;": '\U00002957', + "DownTee;": '\U000022A4', + "DownTeeArrow;": '\U000021A7', + "Downarrow;": '\U000021D3', + "Dscr;": '\U0001D49F', + "Dstrok;": '\U00000110', + "ENG;": '\U0000014A', + "ETH;": '\U000000D0', + "Eacute;": '\U000000C9', + "Ecaron;": '\U0000011A', + "Ecirc;": '\U000000CA', + "Ecy;": '\U0000042D', + "Edot;": '\U00000116', + "Efr;": '\U0001D508', + "Egrave;": '\U000000C8', + "Element;": '\U00002208', + "Emacr;": '\U00000112', + "EmptySmallSquare;": '\U000025FB', + "EmptyVerySmallSquare;": '\U000025AB', + "Eogon;": '\U00000118', + "Eopf;": '\U0001D53C', + "Epsilon;": '\U00000395', + "Equal;": '\U00002A75', + "EqualTilde;": '\U00002242', + "Equilibrium;": '\U000021CC', + "Escr;": '\U00002130', + "Esim;": '\U00002A73', + "Eta;": '\U00000397', + "Euml;": '\U000000CB', + "Exists;": '\U00002203', + "ExponentialE;": '\U00002147', + "Fcy;": '\U00000424', + "Ffr;": '\U0001D509', + "FilledSmallSquare;": '\U000025FC', + "FilledVerySmallSquare;": '\U000025AA', + "Fopf;": '\U0001D53D', + "ForAll;": '\U00002200', + "Fouriertrf;": '\U00002131', + "Fscr;": '\U00002131', + "GJcy;": '\U00000403', + "GT;": '\U0000003E', + "Gamma;": '\U00000393', + "Gammad;": '\U000003DC', + "Gbreve;": '\U0000011E', + "Gcedil;": '\U00000122', + "Gcirc;": '\U0000011C', + "Gcy;": '\U00000413', + "Gdot;": '\U00000120', + "Gfr;": '\U0001D50A', + "Gg;": '\U000022D9', + "Gopf;": '\U0001D53E', + "GreaterEqual;": '\U00002265', + "GreaterEqualLess;": '\U000022DB', + "GreaterFullEqual;": '\U00002267', + "GreaterGreater;": '\U00002AA2', + "GreaterLess;": '\U00002277', + "GreaterSlantEqual;": '\U00002A7E', + "GreaterTilde;": '\U00002273', + "Gscr;": '\U0001D4A2', + "Gt;": '\U0000226B', + "HARDcy;": '\U0000042A', + "Hacek;": '\U000002C7', + "Hat;": '\U0000005E', + "Hcirc;": '\U00000124', + "Hfr;": '\U0000210C', + "HilbertSpace;": '\U0000210B', + "Hopf;": '\U0000210D', + "HorizontalLine;": '\U00002500', + "Hscr;": '\U0000210B', + "Hstrok;": '\U00000126', + "HumpDownHump;": '\U0000224E', + "HumpEqual;": '\U0000224F', + "IEcy;": '\U00000415', + "IJlig;": '\U00000132', + "IOcy;": '\U00000401', + "Iacute;": '\U000000CD', + "Icirc;": '\U000000CE', + "Icy;": '\U00000418', + "Idot;": '\U00000130', + "Ifr;": '\U00002111', + "Igrave;": '\U000000CC', + "Im;": '\U00002111', + "Imacr;": '\U0000012A', + "ImaginaryI;": '\U00002148', + "Implies;": '\U000021D2', + "Int;": '\U0000222C', + "Integral;": '\U0000222B', + "Intersection;": '\U000022C2', + "InvisibleComma;": '\U00002063', + "InvisibleTimes;": '\U00002062', + "Iogon;": '\U0000012E', + "Iopf;": '\U0001D540', + "Iota;": '\U00000399', + "Iscr;": '\U00002110', + "Itilde;": '\U00000128', + "Iukcy;": '\U00000406', + "Iuml;": '\U000000CF', + "Jcirc;": '\U00000134', + "Jcy;": '\U00000419', + "Jfr;": '\U0001D50D', + "Jopf;": '\U0001D541', + "Jscr;": '\U0001D4A5', + "Jsercy;": '\U00000408', + "Jukcy;": '\U00000404', + "KHcy;": '\U00000425', + "KJcy;": '\U0000040C', + "Kappa;": '\U0000039A', + "Kcedil;": '\U00000136', + "Kcy;": '\U0000041A', + "Kfr;": '\U0001D50E', + "Kopf;": '\U0001D542', + "Kscr;": '\U0001D4A6', + "LJcy;": '\U00000409', + "LT;": '\U0000003C', + "Lacute;": '\U00000139', + "Lambda;": '\U0000039B', + "Lang;": '\U000027EA', + "Laplacetrf;": '\U00002112', + "Larr;": '\U0000219E', + "Lcaron;": '\U0000013D', + "Lcedil;": '\U0000013B', + "Lcy;": '\U0000041B', + "LeftAngleBracket;": '\U000027E8', + "LeftArrow;": '\U00002190', + "LeftArrowBar;": '\U000021E4', + "LeftArrowRightArrow;": '\U000021C6', + "LeftCeiling;": '\U00002308', + "LeftDoubleBracket;": '\U000027E6', + "LeftDownTeeVector;": '\U00002961', + "LeftDownVector;": '\U000021C3', + "LeftDownVectorBar;": '\U00002959', + "LeftFloor;": '\U0000230A', + "LeftRightArrow;": '\U00002194', + "LeftRightVector;": '\U0000294E', + "LeftTee;": '\U000022A3', + "LeftTeeArrow;": '\U000021A4', + "LeftTeeVector;": '\U0000295A', + "LeftTriangle;": '\U000022B2', + "LeftTriangleBar;": '\U000029CF', + "LeftTriangleEqual;": '\U000022B4', + "LeftUpDownVector;": '\U00002951', + "LeftUpTeeVector;": '\U00002960', + "LeftUpVector;": '\U000021BF', + "LeftUpVectorBar;": '\U00002958', + "LeftVector;": '\U000021BC', + "LeftVectorBar;": '\U00002952', + "Leftarrow;": '\U000021D0', + "Leftrightarrow;": '\U000021D4', + "LessEqualGreater;": '\U000022DA', + "LessFullEqual;": '\U00002266', + "LessGreater;": '\U00002276', + "LessLess;": '\U00002AA1', + "LessSlantEqual;": '\U00002A7D', + "LessTilde;": '\U00002272', + "Lfr;": '\U0001D50F', + "Ll;": '\U000022D8', + "Lleftarrow;": '\U000021DA', + "Lmidot;": '\U0000013F', + "LongLeftArrow;": '\U000027F5', + "LongLeftRightArrow;": '\U000027F7', + "LongRightArrow;": '\U000027F6', + "Longleftarrow;": '\U000027F8', + "Longleftrightarrow;": '\U000027FA', + "Longrightarrow;": '\U000027F9', + "Lopf;": '\U0001D543', + "LowerLeftArrow;": '\U00002199', + "LowerRightArrow;": '\U00002198', + "Lscr;": '\U00002112', + "Lsh;": '\U000021B0', + "Lstrok;": '\U00000141', + "Lt;": '\U0000226A', + "Map;": '\U00002905', + "Mcy;": '\U0000041C', + "MediumSpace;": '\U0000205F', + "Mellintrf;": '\U00002133', + "Mfr;": '\U0001D510', + "MinusPlus;": '\U00002213', + "Mopf;": '\U0001D544', + "Mscr;": '\U00002133', + "Mu;": '\U0000039C', + "NJcy;": '\U0000040A', + "Nacute;": '\U00000143', + "Ncaron;": '\U00000147', + "Ncedil;": '\U00000145', + "Ncy;": '\U0000041D', + "NegativeMediumSpace;": '\U0000200B', + "NegativeThickSpace;": '\U0000200B', + "NegativeThinSpace;": '\U0000200B', + "NegativeVeryThinSpace;": '\U0000200B', + "NestedGreaterGreater;": '\U0000226B', + "NestedLessLess;": '\U0000226A', + "NewLine;": '\U0000000A', + "Nfr;": '\U0001D511', + "NoBreak;": '\U00002060', + "NonBreakingSpace;": '\U000000A0', + "Nopf;": '\U00002115', + "Not;": '\U00002AEC', + "NotCongruent;": '\U00002262', + "NotCupCap;": '\U0000226D', + "NotDoubleVerticalBar;": '\U00002226', + "NotElement;": '\U00002209', + "NotEqual;": '\U00002260', + "NotExists;": '\U00002204', + "NotGreater;": '\U0000226F', + "NotGreaterEqual;": '\U00002271', + "NotGreaterLess;": '\U00002279', + "NotGreaterTilde;": '\U00002275', + "NotLeftTriangle;": '\U000022EA', + "NotLeftTriangleEqual;": '\U000022EC', + "NotLess;": '\U0000226E', + "NotLessEqual;": '\U00002270', + "NotLessGreater;": '\U00002278', + "NotLessTilde;": '\U00002274', + "NotPrecedes;": '\U00002280', + "NotPrecedesSlantEqual;": '\U000022E0', + "NotReverseElement;": '\U0000220C', + "NotRightTriangle;": '\U000022EB', + "NotRightTriangleEqual;": '\U000022ED', + "NotSquareSubsetEqual;": '\U000022E2', + "NotSquareSupersetEqual;": '\U000022E3', + "NotSubsetEqual;": '\U00002288', + "NotSucceeds;": '\U00002281', + "NotSucceedsSlantEqual;": '\U000022E1', + "NotSupersetEqual;": '\U00002289', + "NotTilde;": '\U00002241', + "NotTildeEqual;": '\U00002244', + "NotTildeFullEqual;": '\U00002247', + "NotTildeTilde;": '\U00002249', + "NotVerticalBar;": '\U00002224', + "Nscr;": '\U0001D4A9', + "Ntilde;": '\U000000D1', + "Nu;": '\U0000039D', + "OElig;": '\U00000152', + "Oacute;": '\U000000D3', + "Ocirc;": '\U000000D4', + "Ocy;": '\U0000041E', + "Odblac;": '\U00000150', + "Ofr;": '\U0001D512', + "Ograve;": '\U000000D2', + "Omacr;": '\U0000014C', + "Omega;": '\U000003A9', + "Omicron;": '\U0000039F', + "Oopf;": '\U0001D546', + "OpenCurlyDoubleQuote;": '\U0000201C', + "OpenCurlyQuote;": '\U00002018', + "Or;": '\U00002A54', + "Oscr;": '\U0001D4AA', + "Oslash;": '\U000000D8', + "Otilde;": '\U000000D5', + "Otimes;": '\U00002A37', + "Ouml;": '\U000000D6', + "OverBar;": '\U0000203E', + "OverBrace;": '\U000023DE', + "OverBracket;": '\U000023B4', + "OverParenthesis;": '\U000023DC', + "PartialD;": '\U00002202', + "Pcy;": '\U0000041F', + "Pfr;": '\U0001D513', + "Phi;": '\U000003A6', + "Pi;": '\U000003A0', + "PlusMinus;": '\U000000B1', + "Poincareplane;": '\U0000210C', + "Popf;": '\U00002119', + "Pr;": '\U00002ABB', + "Precedes;": '\U0000227A', + "PrecedesEqual;": '\U00002AAF', + "PrecedesSlantEqual;": '\U0000227C', + "PrecedesTilde;": '\U0000227E', + "Prime;": '\U00002033', + "Product;": '\U0000220F', + "Proportion;": '\U00002237', + "Proportional;": '\U0000221D', + "Pscr;": '\U0001D4AB', + "Psi;": '\U000003A8', + "QUOT;": '\U00000022', + "Qfr;": '\U0001D514', + "Qopf;": '\U0000211A', + "Qscr;": '\U0001D4AC', + "RBarr;": '\U00002910', + "REG;": '\U000000AE', + "Racute;": '\U00000154', + "Rang;": '\U000027EB', + "Rarr;": '\U000021A0', + "Rarrtl;": '\U00002916', + "Rcaron;": '\U00000158', + "Rcedil;": '\U00000156', + "Rcy;": '\U00000420', + "Re;": '\U0000211C', + "ReverseElement;": '\U0000220B', + "ReverseEquilibrium;": '\U000021CB', + "ReverseUpEquilibrium;": '\U0000296F', + "Rfr;": '\U0000211C', + "Rho;": '\U000003A1', + "RightAngleBracket;": '\U000027E9', + "RightArrow;": '\U00002192', + "RightArrowBar;": '\U000021E5', + "RightArrowLeftArrow;": '\U000021C4', + "RightCeiling;": '\U00002309', + "RightDoubleBracket;": '\U000027E7', + "RightDownTeeVector;": '\U0000295D', + "RightDownVector;": '\U000021C2', + "RightDownVectorBar;": '\U00002955', + "RightFloor;": '\U0000230B', + "RightTee;": '\U000022A2', + "RightTeeArrow;": '\U000021A6', + "RightTeeVector;": '\U0000295B', + "RightTriangle;": '\U000022B3', + "RightTriangleBar;": '\U000029D0', + "RightTriangleEqual;": '\U000022B5', + "RightUpDownVector;": '\U0000294F', + "RightUpTeeVector;": '\U0000295C', + "RightUpVector;": '\U000021BE', + "RightUpVectorBar;": '\U00002954', + "RightVector;": '\U000021C0', + "RightVectorBar;": '\U00002953', + "Rightarrow;": '\U000021D2', + "Ropf;": '\U0000211D', + "RoundImplies;": '\U00002970', + "Rrightarrow;": '\U000021DB', + "Rscr;": '\U0000211B', + "Rsh;": '\U000021B1', + "RuleDelayed;": '\U000029F4', + "SHCHcy;": '\U00000429', + "SHcy;": '\U00000428', + "SOFTcy;": '\U0000042C', + "Sacute;": '\U0000015A', + "Sc;": '\U00002ABC', + "Scaron;": '\U00000160', + "Scedil;": '\U0000015E', + "Scirc;": '\U0000015C', + "Scy;": '\U00000421', + "Sfr;": '\U0001D516', + "ShortDownArrow;": '\U00002193', + "ShortLeftArrow;": '\U00002190', + "ShortRightArrow;": '\U00002192', + "ShortUpArrow;": '\U00002191', + "Sigma;": '\U000003A3', + "SmallCircle;": '\U00002218', + "Sopf;": '\U0001D54A', + "Sqrt;": '\U0000221A', + "Square;": '\U000025A1', + "SquareIntersection;": '\U00002293', + "SquareSubset;": '\U0000228F', + "SquareSubsetEqual;": '\U00002291', + "SquareSuperset;": '\U00002290', + "SquareSupersetEqual;": '\U00002292', + "SquareUnion;": '\U00002294', + "Sscr;": '\U0001D4AE', + "Star;": '\U000022C6', + "Sub;": '\U000022D0', + "Subset;": '\U000022D0', + "SubsetEqual;": '\U00002286', + "Succeeds;": '\U0000227B', + "SucceedsEqual;": '\U00002AB0', + "SucceedsSlantEqual;": '\U0000227D', + "SucceedsTilde;": '\U0000227F', + "SuchThat;": '\U0000220B', + "Sum;": '\U00002211', + "Sup;": '\U000022D1', + "Superset;": '\U00002283', + "SupersetEqual;": '\U00002287', + "Supset;": '\U000022D1', + "THORN;": '\U000000DE', + "TRADE;": '\U00002122', + "TSHcy;": '\U0000040B', + "TScy;": '\U00000426', + "Tab;": '\U00000009', + "Tau;": '\U000003A4', + "Tcaron;": '\U00000164', + "Tcedil;": '\U00000162', + "Tcy;": '\U00000422', + "Tfr;": '\U0001D517', + "Therefore;": '\U00002234', + "Theta;": '\U00000398', + "ThinSpace;": '\U00002009', + "Tilde;": '\U0000223C', + "TildeEqual;": '\U00002243', + "TildeFullEqual;": '\U00002245', + "TildeTilde;": '\U00002248', + "Topf;": '\U0001D54B', + "TripleDot;": '\U000020DB', + "Tscr;": '\U0001D4AF', + "Tstrok;": '\U00000166', + "Uacute;": '\U000000DA', + "Uarr;": '\U0000219F', + "Uarrocir;": '\U00002949', + "Ubrcy;": '\U0000040E', + "Ubreve;": '\U0000016C', + "Ucirc;": '\U000000DB', + "Ucy;": '\U00000423', + "Udblac;": '\U00000170', + "Ufr;": '\U0001D518', + "Ugrave;": '\U000000D9', + "Umacr;": '\U0000016A', + "UnderBar;": '\U0000005F', + "UnderBrace;": '\U000023DF', + "UnderBracket;": '\U000023B5', + "UnderParenthesis;": '\U000023DD', + "Union;": '\U000022C3', + "UnionPlus;": '\U0000228E', + "Uogon;": '\U00000172', + "Uopf;": '\U0001D54C', + "UpArrow;": '\U00002191', + "UpArrowBar;": '\U00002912', + "UpArrowDownArrow;": '\U000021C5', + "UpDownArrow;": '\U00002195', + "UpEquilibrium;": '\U0000296E', + "UpTee;": '\U000022A5', + "UpTeeArrow;": '\U000021A5', + "Uparrow;": '\U000021D1', + "Updownarrow;": '\U000021D5', + "UpperLeftArrow;": '\U00002196', + "UpperRightArrow;": '\U00002197', + "Upsi;": '\U000003D2', + "Upsilon;": '\U000003A5', + "Uring;": '\U0000016E', + "Uscr;": '\U0001D4B0', + "Utilde;": '\U00000168', + "Uuml;": '\U000000DC', + "VDash;": '\U000022AB', + "Vbar;": '\U00002AEB', + "Vcy;": '\U00000412', + "Vdash;": '\U000022A9', + "Vdashl;": '\U00002AE6', + "Vee;": '\U000022C1', + "Verbar;": '\U00002016', + "Vert;": '\U00002016', + "VerticalBar;": '\U00002223', + "VerticalLine;": '\U0000007C', + "VerticalSeparator;": '\U00002758', + "VerticalTilde;": '\U00002240', + "VeryThinSpace;": '\U0000200A', + "Vfr;": '\U0001D519', + "Vopf;": '\U0001D54D', + "Vscr;": '\U0001D4B1', + "Vvdash;": '\U000022AA', + "Wcirc;": '\U00000174', + "Wedge;": '\U000022C0', + "Wfr;": '\U0001D51A', + "Wopf;": '\U0001D54E', + "Wscr;": '\U0001D4B2', + "Xfr;": '\U0001D51B', + "Xi;": '\U0000039E', + "Xopf;": '\U0001D54F', + "Xscr;": '\U0001D4B3', + "YAcy;": '\U0000042F', + "YIcy;": '\U00000407', + "YUcy;": '\U0000042E', + "Yacute;": '\U000000DD', + "Ycirc;": '\U00000176', + "Ycy;": '\U0000042B', + "Yfr;": '\U0001D51C', + "Yopf;": '\U0001D550', + "Yscr;": '\U0001D4B4', + "Yuml;": '\U00000178', + "ZHcy;": '\U00000416', + "Zacute;": '\U00000179', + "Zcaron;": '\U0000017D', + "Zcy;": '\U00000417', + "Zdot;": '\U0000017B', + "ZeroWidthSpace;": '\U0000200B', + "Zeta;": '\U00000396', + "Zfr;": '\U00002128', + "Zopf;": '\U00002124', + "Zscr;": '\U0001D4B5', + "aacute;": '\U000000E1', + "abreve;": '\U00000103', + "ac;": '\U0000223E', + "acd;": '\U0000223F', + "acirc;": '\U000000E2', + "acute;": '\U000000B4', + "acy;": '\U00000430', + "aelig;": '\U000000E6', + "af;": '\U00002061', + "afr;": '\U0001D51E', + "agrave;": '\U000000E0', + "alefsym;": '\U00002135', + "aleph;": '\U00002135', + "alpha;": '\U000003B1', + "amacr;": '\U00000101', + "amalg;": '\U00002A3F', + "amp;": '\U00000026', + "and;": '\U00002227', + "andand;": '\U00002A55', + "andd;": '\U00002A5C', + "andslope;": '\U00002A58', + "andv;": '\U00002A5A', + "ang;": '\U00002220', + "ange;": '\U000029A4', + "angle;": '\U00002220', + "angmsd;": '\U00002221', + "angmsdaa;": '\U000029A8', + "angmsdab;": '\U000029A9', + "angmsdac;": '\U000029AA', + "angmsdad;": '\U000029AB', + "angmsdae;": '\U000029AC', + "angmsdaf;": '\U000029AD', + "angmsdag;": '\U000029AE', + "angmsdah;": '\U000029AF', + "angrt;": '\U0000221F', + "angrtvb;": '\U000022BE', + "angrtvbd;": '\U0000299D', + "angsph;": '\U00002222', + "angst;": '\U000000C5', + "angzarr;": '\U0000237C', + "aogon;": '\U00000105', + "aopf;": '\U0001D552', + "ap;": '\U00002248', + "apE;": '\U00002A70', + "apacir;": '\U00002A6F', + "ape;": '\U0000224A', + "apid;": '\U0000224B', + "apos;": '\U00000027', + "approx;": '\U00002248', + "approxeq;": '\U0000224A', + "aring;": '\U000000E5', + "ascr;": '\U0001D4B6', + "ast;": '\U0000002A', + "asymp;": '\U00002248', + "asympeq;": '\U0000224D', + "atilde;": '\U000000E3', + "auml;": '\U000000E4', + "awconint;": '\U00002233', + "awint;": '\U00002A11', + "bNot;": '\U00002AED', + "backcong;": '\U0000224C', + "backepsilon;": '\U000003F6', + "backprime;": '\U00002035', + "backsim;": '\U0000223D', + "backsimeq;": '\U000022CD', + "barvee;": '\U000022BD', + "barwed;": '\U00002305', + "barwedge;": '\U00002305', + "bbrk;": '\U000023B5', + "bbrktbrk;": '\U000023B6', + "bcong;": '\U0000224C', + "bcy;": '\U00000431', + "bdquo;": '\U0000201E', + "becaus;": '\U00002235', + "because;": '\U00002235', + "bemptyv;": '\U000029B0', + "bepsi;": '\U000003F6', + "bernou;": '\U0000212C', + "beta;": '\U000003B2', + "beth;": '\U00002136', + "between;": '\U0000226C', + "bfr;": '\U0001D51F', + "bigcap;": '\U000022C2', + "bigcirc;": '\U000025EF', + "bigcup;": '\U000022C3', + "bigodot;": '\U00002A00', + "bigoplus;": '\U00002A01', + "bigotimes;": '\U00002A02', + "bigsqcup;": '\U00002A06', + "bigstar;": '\U00002605', + "bigtriangledown;": '\U000025BD', + "bigtriangleup;": '\U000025B3', + "biguplus;": '\U00002A04', + "bigvee;": '\U000022C1', + "bigwedge;": '\U000022C0', + "bkarow;": '\U0000290D', + "blacklozenge;": '\U000029EB', + "blacksquare;": '\U000025AA', + "blacktriangle;": '\U000025B4', + "blacktriangledown;": '\U000025BE', + "blacktriangleleft;": '\U000025C2', + "blacktriangleright;": '\U000025B8', + "blank;": '\U00002423', + "blk12;": '\U00002592', + "blk14;": '\U00002591', + "blk34;": '\U00002593', + "block;": '\U00002588', + "bnot;": '\U00002310', + "bopf;": '\U0001D553', + "bot;": '\U000022A5', + "bottom;": '\U000022A5', + "bowtie;": '\U000022C8', + "boxDL;": '\U00002557', + "boxDR;": '\U00002554', + "boxDl;": '\U00002556', + "boxDr;": '\U00002553', + "boxH;": '\U00002550', + "boxHD;": '\U00002566', + "boxHU;": '\U00002569', + "boxHd;": '\U00002564', + "boxHu;": '\U00002567', + "boxUL;": '\U0000255D', + "boxUR;": '\U0000255A', + "boxUl;": '\U0000255C', + "boxUr;": '\U00002559', + "boxV;": '\U00002551', + "boxVH;": '\U0000256C', + "boxVL;": '\U00002563', + "boxVR;": '\U00002560', + "boxVh;": '\U0000256B', + "boxVl;": '\U00002562', + "boxVr;": '\U0000255F', + "boxbox;": '\U000029C9', + "boxdL;": '\U00002555', + "boxdR;": '\U00002552', + "boxdl;": '\U00002510', + "boxdr;": '\U0000250C', + "boxh;": '\U00002500', + "boxhD;": '\U00002565', + "boxhU;": '\U00002568', + "boxhd;": '\U0000252C', + "boxhu;": '\U00002534', + "boxminus;": '\U0000229F', + "boxplus;": '\U0000229E', + "boxtimes;": '\U000022A0', + "boxuL;": '\U0000255B', + "boxuR;": '\U00002558', + "boxul;": '\U00002518', + "boxur;": '\U00002514', + "boxv;": '\U00002502', + "boxvH;": '\U0000256A', + "boxvL;": '\U00002561', + "boxvR;": '\U0000255E', + "boxvh;": '\U0000253C', + "boxvl;": '\U00002524', + "boxvr;": '\U0000251C', + "bprime;": '\U00002035', + "breve;": '\U000002D8', + "brvbar;": '\U000000A6', + "bscr;": '\U0001D4B7', + "bsemi;": '\U0000204F', + "bsim;": '\U0000223D', + "bsime;": '\U000022CD', + "bsol;": '\U0000005C', + "bsolb;": '\U000029C5', + "bsolhsub;": '\U000027C8', + "bull;": '\U00002022', + "bullet;": '\U00002022', + "bump;": '\U0000224E', + "bumpE;": '\U00002AAE', + "bumpe;": '\U0000224F', + "bumpeq;": '\U0000224F', + "cacute;": '\U00000107', + "cap;": '\U00002229', + "capand;": '\U00002A44', + "capbrcup;": '\U00002A49', + "capcap;": '\U00002A4B', + "capcup;": '\U00002A47', + "capdot;": '\U00002A40', + "caret;": '\U00002041', + "caron;": '\U000002C7', + "ccaps;": '\U00002A4D', + "ccaron;": '\U0000010D', + "ccedil;": '\U000000E7', + "ccirc;": '\U00000109', + "ccups;": '\U00002A4C', + "ccupssm;": '\U00002A50', + "cdot;": '\U0000010B', + "cedil;": '\U000000B8', + "cemptyv;": '\U000029B2', + "cent;": '\U000000A2', + "centerdot;": '\U000000B7', + "cfr;": '\U0001D520', + "chcy;": '\U00000447', + "check;": '\U00002713', + "checkmark;": '\U00002713', + "chi;": '\U000003C7', + "cir;": '\U000025CB', + "cirE;": '\U000029C3', + "circ;": '\U000002C6', + "circeq;": '\U00002257', + "circlearrowleft;": '\U000021BA', + "circlearrowright;": '\U000021BB', + "circledR;": '\U000000AE', + "circledS;": '\U000024C8', + "circledast;": '\U0000229B', + "circledcirc;": '\U0000229A', + "circleddash;": '\U0000229D', + "cire;": '\U00002257', + "cirfnint;": '\U00002A10', + "cirmid;": '\U00002AEF', + "cirscir;": '\U000029C2', + "clubs;": '\U00002663', + "clubsuit;": '\U00002663', + "colon;": '\U0000003A', + "colone;": '\U00002254', + "coloneq;": '\U00002254', + "comma;": '\U0000002C', + "commat;": '\U00000040', + "comp;": '\U00002201', + "compfn;": '\U00002218', + "complement;": '\U00002201', + "complexes;": '\U00002102', + "cong;": '\U00002245', + "congdot;": '\U00002A6D', + "conint;": '\U0000222E', + "copf;": '\U0001D554', + "coprod;": '\U00002210', + "copy;": '\U000000A9', + "copysr;": '\U00002117', + "crarr;": '\U000021B5', + "cross;": '\U00002717', + "cscr;": '\U0001D4B8', + "csub;": '\U00002ACF', + "csube;": '\U00002AD1', + "csup;": '\U00002AD0', + "csupe;": '\U00002AD2', + "ctdot;": '\U000022EF', + "cudarrl;": '\U00002938', + "cudarrr;": '\U00002935', + "cuepr;": '\U000022DE', + "cuesc;": '\U000022DF', + "cularr;": '\U000021B6', + "cularrp;": '\U0000293D', + "cup;": '\U0000222A', + "cupbrcap;": '\U00002A48', + "cupcap;": '\U00002A46', + "cupcup;": '\U00002A4A', + "cupdot;": '\U0000228D', + "cupor;": '\U00002A45', + "curarr;": '\U000021B7', + "curarrm;": '\U0000293C', + "curlyeqprec;": '\U000022DE', + "curlyeqsucc;": '\U000022DF', + "curlyvee;": '\U000022CE', + "curlywedge;": '\U000022CF', + "curren;": '\U000000A4', + "curvearrowleft;": '\U000021B6', + "curvearrowright;": '\U000021B7', + "cuvee;": '\U000022CE', + "cuwed;": '\U000022CF', + "cwconint;": '\U00002232', + "cwint;": '\U00002231', + "cylcty;": '\U0000232D', + "dArr;": '\U000021D3', + "dHar;": '\U00002965', + "dagger;": '\U00002020', + "daleth;": '\U00002138', + "darr;": '\U00002193', + "dash;": '\U00002010', + "dashv;": '\U000022A3', + "dbkarow;": '\U0000290F', + "dblac;": '\U000002DD', + "dcaron;": '\U0000010F', + "dcy;": '\U00000434', + "dd;": '\U00002146', + "ddagger;": '\U00002021', + "ddarr;": '\U000021CA', + "ddotseq;": '\U00002A77', + "deg;": '\U000000B0', + "delta;": '\U000003B4', + "demptyv;": '\U000029B1', + "dfisht;": '\U0000297F', + "dfr;": '\U0001D521', + "dharl;": '\U000021C3', + "dharr;": '\U000021C2', + "diam;": '\U000022C4', + "diamond;": '\U000022C4', + "diamondsuit;": '\U00002666', + "diams;": '\U00002666', + "die;": '\U000000A8', + "digamma;": '\U000003DD', + "disin;": '\U000022F2', + "div;": '\U000000F7', + "divide;": '\U000000F7', + "divideontimes;": '\U000022C7', + "divonx;": '\U000022C7', + "djcy;": '\U00000452', + "dlcorn;": '\U0000231E', + "dlcrop;": '\U0000230D', + "dollar;": '\U00000024', + "dopf;": '\U0001D555', + "dot;": '\U000002D9', + "doteq;": '\U00002250', + "doteqdot;": '\U00002251', + "dotminus;": '\U00002238', + "dotplus;": '\U00002214', + "dotsquare;": '\U000022A1', + "doublebarwedge;": '\U00002306', + "downarrow;": '\U00002193', + "downdownarrows;": '\U000021CA', + "downharpoonleft;": '\U000021C3', + "downharpoonright;": '\U000021C2', + "drbkarow;": '\U00002910', + "drcorn;": '\U0000231F', + "drcrop;": '\U0000230C', + "dscr;": '\U0001D4B9', + "dscy;": '\U00000455', + "dsol;": '\U000029F6', + "dstrok;": '\U00000111', + "dtdot;": '\U000022F1', + "dtri;": '\U000025BF', + "dtrif;": '\U000025BE', + "duarr;": '\U000021F5', + "duhar;": '\U0000296F', + "dwangle;": '\U000029A6', + "dzcy;": '\U0000045F', + "dzigrarr;": '\U000027FF', + "eDDot;": '\U00002A77', + "eDot;": '\U00002251', + "eacute;": '\U000000E9', + "easter;": '\U00002A6E', + "ecaron;": '\U0000011B', + "ecir;": '\U00002256', + "ecirc;": '\U000000EA', + "ecolon;": '\U00002255', + "ecy;": '\U0000044D', + "edot;": '\U00000117', + "ee;": '\U00002147', + "efDot;": '\U00002252', + "efr;": '\U0001D522', + "eg;": '\U00002A9A', + "egrave;": '\U000000E8', + "egs;": '\U00002A96', + "egsdot;": '\U00002A98', + "el;": '\U00002A99', + "elinters;": '\U000023E7', + "ell;": '\U00002113', + "els;": '\U00002A95', + "elsdot;": '\U00002A97', + "emacr;": '\U00000113', + "empty;": '\U00002205', + "emptyset;": '\U00002205', + "emptyv;": '\U00002205', + "emsp;": '\U00002003', + "emsp13;": '\U00002004', + "emsp14;": '\U00002005', + "eng;": '\U0000014B', + "ensp;": '\U00002002', + "eogon;": '\U00000119', + "eopf;": '\U0001D556', + "epar;": '\U000022D5', + "eparsl;": '\U000029E3', + "eplus;": '\U00002A71', + "epsi;": '\U000003B5', + "epsilon;": '\U000003B5', + "epsiv;": '\U000003F5', + "eqcirc;": '\U00002256', + "eqcolon;": '\U00002255', + "eqsim;": '\U00002242', + "eqslantgtr;": '\U00002A96', + "eqslantless;": '\U00002A95', + "equals;": '\U0000003D', + "equest;": '\U0000225F', + "equiv;": '\U00002261', + "equivDD;": '\U00002A78', + "eqvparsl;": '\U000029E5', + "erDot;": '\U00002253', + "erarr;": '\U00002971', + "escr;": '\U0000212F', + "esdot;": '\U00002250', + "esim;": '\U00002242', + "eta;": '\U000003B7', + "eth;": '\U000000F0', + "euml;": '\U000000EB', + "euro;": '\U000020AC', + "excl;": '\U00000021', + "exist;": '\U00002203', + "expectation;": '\U00002130', + "exponentiale;": '\U00002147', + "fallingdotseq;": '\U00002252', + "fcy;": '\U00000444', + "female;": '\U00002640', + "ffilig;": '\U0000FB03', + "fflig;": '\U0000FB00', + "ffllig;": '\U0000FB04', + "ffr;": '\U0001D523', + "filig;": '\U0000FB01', + "flat;": '\U0000266D', + "fllig;": '\U0000FB02', + "fltns;": '\U000025B1', + "fnof;": '\U00000192', + "fopf;": '\U0001D557', + "forall;": '\U00002200', + "fork;": '\U000022D4', + "forkv;": '\U00002AD9', + "fpartint;": '\U00002A0D', + "frac12;": '\U000000BD', + "frac13;": '\U00002153', + "frac14;": '\U000000BC', + "frac15;": '\U00002155', + "frac16;": '\U00002159', + "frac18;": '\U0000215B', + "frac23;": '\U00002154', + "frac25;": '\U00002156', + "frac34;": '\U000000BE', + "frac35;": '\U00002157', + "frac38;": '\U0000215C', + "frac45;": '\U00002158', + "frac56;": '\U0000215A', + "frac58;": '\U0000215D', + "frac78;": '\U0000215E', + "frasl;": '\U00002044', + "frown;": '\U00002322', + "fscr;": '\U0001D4BB', + "gE;": '\U00002267', + "gEl;": '\U00002A8C', + "gacute;": '\U000001F5', + "gamma;": '\U000003B3', + "gammad;": '\U000003DD', + "gap;": '\U00002A86', + "gbreve;": '\U0000011F', + "gcirc;": '\U0000011D', + "gcy;": '\U00000433', + "gdot;": '\U00000121', + "ge;": '\U00002265', + "gel;": '\U000022DB', + "geq;": '\U00002265', + "geqq;": '\U00002267', + "geqslant;": '\U00002A7E', + "ges;": '\U00002A7E', + "gescc;": '\U00002AA9', + "gesdot;": '\U00002A80', + "gesdoto;": '\U00002A82', + "gesdotol;": '\U00002A84', + "gesles;": '\U00002A94', + "gfr;": '\U0001D524', + "gg;": '\U0000226B', + "ggg;": '\U000022D9', + "gimel;": '\U00002137', + "gjcy;": '\U00000453', + "gl;": '\U00002277', + "glE;": '\U00002A92', + "gla;": '\U00002AA5', + "glj;": '\U00002AA4', + "gnE;": '\U00002269', + "gnap;": '\U00002A8A', + "gnapprox;": '\U00002A8A', + "gne;": '\U00002A88', + "gneq;": '\U00002A88', + "gneqq;": '\U00002269', + "gnsim;": '\U000022E7', + "gopf;": '\U0001D558', + "grave;": '\U00000060', + "gscr;": '\U0000210A', + "gsim;": '\U00002273', + "gsime;": '\U00002A8E', + "gsiml;": '\U00002A90', + "gt;": '\U0000003E', + "gtcc;": '\U00002AA7', + "gtcir;": '\U00002A7A', + "gtdot;": '\U000022D7', + "gtlPar;": '\U00002995', + "gtquest;": '\U00002A7C', + "gtrapprox;": '\U00002A86', + "gtrarr;": '\U00002978', + "gtrdot;": '\U000022D7', + "gtreqless;": '\U000022DB', + "gtreqqless;": '\U00002A8C', + "gtrless;": '\U00002277', + "gtrsim;": '\U00002273', + "hArr;": '\U000021D4', + "hairsp;": '\U0000200A', + "half;": '\U000000BD', + "hamilt;": '\U0000210B', + "hardcy;": '\U0000044A', + "harr;": '\U00002194', + "harrcir;": '\U00002948', + "harrw;": '\U000021AD', + "hbar;": '\U0000210F', + "hcirc;": '\U00000125', + "hearts;": '\U00002665', + "heartsuit;": '\U00002665', + "hellip;": '\U00002026', + "hercon;": '\U000022B9', + "hfr;": '\U0001D525', + "hksearow;": '\U00002925', + "hkswarow;": '\U00002926', + "hoarr;": '\U000021FF', + "homtht;": '\U0000223B', + "hookleftarrow;": '\U000021A9', + "hookrightarrow;": '\U000021AA', + "hopf;": '\U0001D559', + "horbar;": '\U00002015', + "hscr;": '\U0001D4BD', + "hslash;": '\U0000210F', + "hstrok;": '\U00000127', + "hybull;": '\U00002043', + "hyphen;": '\U00002010', + "iacute;": '\U000000ED', + "ic;": '\U00002063', + "icirc;": '\U000000EE', + "icy;": '\U00000438', + "iecy;": '\U00000435', + "iexcl;": '\U000000A1', + "iff;": '\U000021D4', + "ifr;": '\U0001D526', + "igrave;": '\U000000EC', + "ii;": '\U00002148', + "iiiint;": '\U00002A0C', + "iiint;": '\U0000222D', + "iinfin;": '\U000029DC', + "iiota;": '\U00002129', + "ijlig;": '\U00000133', + "imacr;": '\U0000012B', + "image;": '\U00002111', + "imagline;": '\U00002110', + "imagpart;": '\U00002111', + "imath;": '\U00000131', + "imof;": '\U000022B7', + "imped;": '\U000001B5', + "in;": '\U00002208', + "incare;": '\U00002105', + "infin;": '\U0000221E', + "infintie;": '\U000029DD', + "inodot;": '\U00000131', + "int;": '\U0000222B', + "intcal;": '\U000022BA', + "integers;": '\U00002124', + "intercal;": '\U000022BA', + "intlarhk;": '\U00002A17', + "intprod;": '\U00002A3C', + "iocy;": '\U00000451', + "iogon;": '\U0000012F', + "iopf;": '\U0001D55A', + "iota;": '\U000003B9', + "iprod;": '\U00002A3C', + "iquest;": '\U000000BF', + "iscr;": '\U0001D4BE', + "isin;": '\U00002208', + "isinE;": '\U000022F9', + "isindot;": '\U000022F5', + "isins;": '\U000022F4', + "isinsv;": '\U000022F3', + "isinv;": '\U00002208', + "it;": '\U00002062', + "itilde;": '\U00000129', + "iukcy;": '\U00000456', + "iuml;": '\U000000EF', + "jcirc;": '\U00000135', + "jcy;": '\U00000439', + "jfr;": '\U0001D527', + "jmath;": '\U00000237', + "jopf;": '\U0001D55B', + "jscr;": '\U0001D4BF', + "jsercy;": '\U00000458', + "jukcy;": '\U00000454', + "kappa;": '\U000003BA', + "kappav;": '\U000003F0', + "kcedil;": '\U00000137', + "kcy;": '\U0000043A', + "kfr;": '\U0001D528', + "kgreen;": '\U00000138', + "khcy;": '\U00000445', + "kjcy;": '\U0000045C', + "kopf;": '\U0001D55C', + "kscr;": '\U0001D4C0', + "lAarr;": '\U000021DA', + "lArr;": '\U000021D0', + "lAtail;": '\U0000291B', + "lBarr;": '\U0000290E', + "lE;": '\U00002266', + "lEg;": '\U00002A8B', + "lHar;": '\U00002962', + "lacute;": '\U0000013A', + "laemptyv;": '\U000029B4', + "lagran;": '\U00002112', + "lambda;": '\U000003BB', + "lang;": '\U000027E8', + "langd;": '\U00002991', + "langle;": '\U000027E8', + "lap;": '\U00002A85', + "laquo;": '\U000000AB', + "larr;": '\U00002190', + "larrb;": '\U000021E4', + "larrbfs;": '\U0000291F', + "larrfs;": '\U0000291D', + "larrhk;": '\U000021A9', + "larrlp;": '\U000021AB', + "larrpl;": '\U00002939', + "larrsim;": '\U00002973', + "larrtl;": '\U000021A2', + "lat;": '\U00002AAB', + "latail;": '\U00002919', + "late;": '\U00002AAD', + "lbarr;": '\U0000290C', + "lbbrk;": '\U00002772', + "lbrace;": '\U0000007B', + "lbrack;": '\U0000005B', + "lbrke;": '\U0000298B', + "lbrksld;": '\U0000298F', + "lbrkslu;": '\U0000298D', + "lcaron;": '\U0000013E', + "lcedil;": '\U0000013C', + "lceil;": '\U00002308', + "lcub;": '\U0000007B', + "lcy;": '\U0000043B', + "ldca;": '\U00002936', + "ldquo;": '\U0000201C', + "ldquor;": '\U0000201E', + "ldrdhar;": '\U00002967', + "ldrushar;": '\U0000294B', + "ldsh;": '\U000021B2', + "le;": '\U00002264', + "leftarrow;": '\U00002190', + "leftarrowtail;": '\U000021A2', + "leftharpoondown;": '\U000021BD', + "leftharpoonup;": '\U000021BC', + "leftleftarrows;": '\U000021C7', + "leftrightarrow;": '\U00002194', + "leftrightarrows;": '\U000021C6', + "leftrightharpoons;": '\U000021CB', + "leftrightsquigarrow;": '\U000021AD', + "leftthreetimes;": '\U000022CB', + "leg;": '\U000022DA', + "leq;": '\U00002264', + "leqq;": '\U00002266', + "leqslant;": '\U00002A7D', + "les;": '\U00002A7D', + "lescc;": '\U00002AA8', + "lesdot;": '\U00002A7F', + "lesdoto;": '\U00002A81', + "lesdotor;": '\U00002A83', + "lesges;": '\U00002A93', + "lessapprox;": '\U00002A85', + "lessdot;": '\U000022D6', + "lesseqgtr;": '\U000022DA', + "lesseqqgtr;": '\U00002A8B', + "lessgtr;": '\U00002276', + "lesssim;": '\U00002272', + "lfisht;": '\U0000297C', + "lfloor;": '\U0000230A', + "lfr;": '\U0001D529', + "lg;": '\U00002276', + "lgE;": '\U00002A91', + "lhard;": '\U000021BD', + "lharu;": '\U000021BC', + "lharul;": '\U0000296A', + "lhblk;": '\U00002584', + "ljcy;": '\U00000459', + "ll;": '\U0000226A', + "llarr;": '\U000021C7', + "llcorner;": '\U0000231E', + "llhard;": '\U0000296B', + "lltri;": '\U000025FA', + "lmidot;": '\U00000140', + "lmoust;": '\U000023B0', + "lmoustache;": '\U000023B0', + "lnE;": '\U00002268', + "lnap;": '\U00002A89', + "lnapprox;": '\U00002A89', + "lne;": '\U00002A87', + "lneq;": '\U00002A87', + "lneqq;": '\U00002268', + "lnsim;": '\U000022E6', + "loang;": '\U000027EC', + "loarr;": '\U000021FD', + "lobrk;": '\U000027E6', + "longleftarrow;": '\U000027F5', + "longleftrightarrow;": '\U000027F7', + "longmapsto;": '\U000027FC', + "longrightarrow;": '\U000027F6', + "looparrowleft;": '\U000021AB', + "looparrowright;": '\U000021AC', + "lopar;": '\U00002985', + "lopf;": '\U0001D55D', + "loplus;": '\U00002A2D', + "lotimes;": '\U00002A34', + "lowast;": '\U00002217', + "lowbar;": '\U0000005F', + "loz;": '\U000025CA', + "lozenge;": '\U000025CA', + "lozf;": '\U000029EB', + "lpar;": '\U00000028', + "lparlt;": '\U00002993', + "lrarr;": '\U000021C6', + "lrcorner;": '\U0000231F', + "lrhar;": '\U000021CB', + "lrhard;": '\U0000296D', + "lrm;": '\U0000200E', + "lrtri;": '\U000022BF', + "lsaquo;": '\U00002039', + "lscr;": '\U0001D4C1', + "lsh;": '\U000021B0', + "lsim;": '\U00002272', + "lsime;": '\U00002A8D', + "lsimg;": '\U00002A8F', + "lsqb;": '\U0000005B', + "lsquo;": '\U00002018', + "lsquor;": '\U0000201A', + "lstrok;": '\U00000142', + "lt;": '\U0000003C', + "ltcc;": '\U00002AA6', + "ltcir;": '\U00002A79', + "ltdot;": '\U000022D6', + "lthree;": '\U000022CB', + "ltimes;": '\U000022C9', + "ltlarr;": '\U00002976', + "ltquest;": '\U00002A7B', + "ltrPar;": '\U00002996', + "ltri;": '\U000025C3', + "ltrie;": '\U000022B4', + "ltrif;": '\U000025C2', + "lurdshar;": '\U0000294A', + "luruhar;": '\U00002966', + "mDDot;": '\U0000223A', + "macr;": '\U000000AF', + "male;": '\U00002642', + "malt;": '\U00002720', + "maltese;": '\U00002720', + "map;": '\U000021A6', + "mapsto;": '\U000021A6', + "mapstodown;": '\U000021A7', + "mapstoleft;": '\U000021A4', + "mapstoup;": '\U000021A5', + "marker;": '\U000025AE', + "mcomma;": '\U00002A29', + "mcy;": '\U0000043C', + "mdash;": '\U00002014', + "measuredangle;": '\U00002221', + "mfr;": '\U0001D52A', + "mho;": '\U00002127', + "micro;": '\U000000B5', + "mid;": '\U00002223', + "midast;": '\U0000002A', + "midcir;": '\U00002AF0', + "middot;": '\U000000B7', + "minus;": '\U00002212', + "minusb;": '\U0000229F', + "minusd;": '\U00002238', + "minusdu;": '\U00002A2A', + "mlcp;": '\U00002ADB', + "mldr;": '\U00002026', + "mnplus;": '\U00002213', + "models;": '\U000022A7', + "mopf;": '\U0001D55E', + "mp;": '\U00002213', + "mscr;": '\U0001D4C2', + "mstpos;": '\U0000223E', + "mu;": '\U000003BC', + "multimap;": '\U000022B8', + "mumap;": '\U000022B8', + "nLeftarrow;": '\U000021CD', + "nLeftrightarrow;": '\U000021CE', + "nRightarrow;": '\U000021CF', + "nVDash;": '\U000022AF', + "nVdash;": '\U000022AE', + "nabla;": '\U00002207', + "nacute;": '\U00000144', + "nap;": '\U00002249', + "napos;": '\U00000149', + "napprox;": '\U00002249', + "natur;": '\U0000266E', + "natural;": '\U0000266E', + "naturals;": '\U00002115', + "nbsp;": '\U000000A0', + "ncap;": '\U00002A43', + "ncaron;": '\U00000148', + "ncedil;": '\U00000146', + "ncong;": '\U00002247', + "ncup;": '\U00002A42', + "ncy;": '\U0000043D', + "ndash;": '\U00002013', + "ne;": '\U00002260', + "neArr;": '\U000021D7', + "nearhk;": '\U00002924', + "nearr;": '\U00002197', + "nearrow;": '\U00002197', + "nequiv;": '\U00002262', + "nesear;": '\U00002928', + "nexist;": '\U00002204', + "nexists;": '\U00002204', + "nfr;": '\U0001D52B', + "nge;": '\U00002271', + "ngeq;": '\U00002271', + "ngsim;": '\U00002275', + "ngt;": '\U0000226F', + "ngtr;": '\U0000226F', + "nhArr;": '\U000021CE', + "nharr;": '\U000021AE', + "nhpar;": '\U00002AF2', + "ni;": '\U0000220B', + "nis;": '\U000022FC', + "nisd;": '\U000022FA', + "niv;": '\U0000220B', + "njcy;": '\U0000045A', + "nlArr;": '\U000021CD', + "nlarr;": '\U0000219A', + "nldr;": '\U00002025', + "nle;": '\U00002270', + "nleftarrow;": '\U0000219A', + "nleftrightarrow;": '\U000021AE', + "nleq;": '\U00002270', + "nless;": '\U0000226E', + "nlsim;": '\U00002274', + "nlt;": '\U0000226E', + "nltri;": '\U000022EA', + "nltrie;": '\U000022EC', + "nmid;": '\U00002224', + "nopf;": '\U0001D55F', + "not;": '\U000000AC', + "notin;": '\U00002209', + "notinva;": '\U00002209', + "notinvb;": '\U000022F7', + "notinvc;": '\U000022F6', + "notni;": '\U0000220C', + "notniva;": '\U0000220C', + "notnivb;": '\U000022FE', + "notnivc;": '\U000022FD', + "npar;": '\U00002226', + "nparallel;": '\U00002226', + "npolint;": '\U00002A14', + "npr;": '\U00002280', + "nprcue;": '\U000022E0', + "nprec;": '\U00002280', + "nrArr;": '\U000021CF', + "nrarr;": '\U0000219B', + "nrightarrow;": '\U0000219B', + "nrtri;": '\U000022EB', + "nrtrie;": '\U000022ED', + "nsc;": '\U00002281', + "nsccue;": '\U000022E1', + "nscr;": '\U0001D4C3', + "nshortmid;": '\U00002224', + "nshortparallel;": '\U00002226', + "nsim;": '\U00002241', + "nsime;": '\U00002244', + "nsimeq;": '\U00002244', + "nsmid;": '\U00002224', + "nspar;": '\U00002226', + "nsqsube;": '\U000022E2', + "nsqsupe;": '\U000022E3', + "nsub;": '\U00002284', + "nsube;": '\U00002288', + "nsubseteq;": '\U00002288', + "nsucc;": '\U00002281', + "nsup;": '\U00002285', + "nsupe;": '\U00002289', + "nsupseteq;": '\U00002289', + "ntgl;": '\U00002279', + "ntilde;": '\U000000F1', + "ntlg;": '\U00002278', + "ntriangleleft;": '\U000022EA', + "ntrianglelefteq;": '\U000022EC', + "ntriangleright;": '\U000022EB', + "ntrianglerighteq;": '\U000022ED', + "nu;": '\U000003BD', + "num;": '\U00000023', + "numero;": '\U00002116', + "numsp;": '\U00002007', + "nvDash;": '\U000022AD', + "nvHarr;": '\U00002904', + "nvdash;": '\U000022AC', + "nvinfin;": '\U000029DE', + "nvlArr;": '\U00002902', + "nvrArr;": '\U00002903', + "nwArr;": '\U000021D6', + "nwarhk;": '\U00002923', + "nwarr;": '\U00002196', + "nwarrow;": '\U00002196', + "nwnear;": '\U00002927', + "oS;": '\U000024C8', + "oacute;": '\U000000F3', + "oast;": '\U0000229B', + "ocir;": '\U0000229A', + "ocirc;": '\U000000F4', + "ocy;": '\U0000043E', + "odash;": '\U0000229D', + "odblac;": '\U00000151', + "odiv;": '\U00002A38', + "odot;": '\U00002299', + "odsold;": '\U000029BC', + "oelig;": '\U00000153', + "ofcir;": '\U000029BF', + "ofr;": '\U0001D52C', + "ogon;": '\U000002DB', + "ograve;": '\U000000F2', + "ogt;": '\U000029C1', + "ohbar;": '\U000029B5', + "ohm;": '\U000003A9', + "oint;": '\U0000222E', + "olarr;": '\U000021BA', + "olcir;": '\U000029BE', + "olcross;": '\U000029BB', + "oline;": '\U0000203E', + "olt;": '\U000029C0', + "omacr;": '\U0000014D', + "omega;": '\U000003C9', + "omicron;": '\U000003BF', + "omid;": '\U000029B6', + "ominus;": '\U00002296', + "oopf;": '\U0001D560', + "opar;": '\U000029B7', + "operp;": '\U000029B9', + "oplus;": '\U00002295', + "or;": '\U00002228', + "orarr;": '\U000021BB', + "ord;": '\U00002A5D', + "order;": '\U00002134', + "orderof;": '\U00002134', + "ordf;": '\U000000AA', + "ordm;": '\U000000BA', + "origof;": '\U000022B6', + "oror;": '\U00002A56', + "orslope;": '\U00002A57', + "orv;": '\U00002A5B', + "oscr;": '\U00002134', + "oslash;": '\U000000F8', + "osol;": '\U00002298', + "otilde;": '\U000000F5', + "otimes;": '\U00002297', + "otimesas;": '\U00002A36', + "ouml;": '\U000000F6', + "ovbar;": '\U0000233D', + "par;": '\U00002225', + "para;": '\U000000B6', + "parallel;": '\U00002225', + "parsim;": '\U00002AF3', + "parsl;": '\U00002AFD', + "part;": '\U00002202', + "pcy;": '\U0000043F', + "percnt;": '\U00000025', + "period;": '\U0000002E', + "permil;": '\U00002030', + "perp;": '\U000022A5', + "pertenk;": '\U00002031', + "pfr;": '\U0001D52D', + "phi;": '\U000003C6', + "phiv;": '\U000003D5', + "phmmat;": '\U00002133', + "phone;": '\U0000260E', + "pi;": '\U000003C0', + "pitchfork;": '\U000022D4', + "piv;": '\U000003D6', + "planck;": '\U0000210F', + "planckh;": '\U0000210E', + "plankv;": '\U0000210F', + "plus;": '\U0000002B', + "plusacir;": '\U00002A23', + "plusb;": '\U0000229E', + "pluscir;": '\U00002A22', + "plusdo;": '\U00002214', + "plusdu;": '\U00002A25', + "pluse;": '\U00002A72', + "plusmn;": '\U000000B1', + "plussim;": '\U00002A26', + "plustwo;": '\U00002A27', + "pm;": '\U000000B1', + "pointint;": '\U00002A15', + "popf;": '\U0001D561', + "pound;": '\U000000A3', + "pr;": '\U0000227A', + "prE;": '\U00002AB3', + "prap;": '\U00002AB7', + "prcue;": '\U0000227C', + "pre;": '\U00002AAF', + "prec;": '\U0000227A', + "precapprox;": '\U00002AB7', + "preccurlyeq;": '\U0000227C', + "preceq;": '\U00002AAF', + "precnapprox;": '\U00002AB9', + "precneqq;": '\U00002AB5', + "precnsim;": '\U000022E8', + "precsim;": '\U0000227E', + "prime;": '\U00002032', + "primes;": '\U00002119', + "prnE;": '\U00002AB5', + "prnap;": '\U00002AB9', + "prnsim;": '\U000022E8', + "prod;": '\U0000220F', + "profalar;": '\U0000232E', + "profline;": '\U00002312', + "profsurf;": '\U00002313', + "prop;": '\U0000221D', + "propto;": '\U0000221D', + "prsim;": '\U0000227E', + "prurel;": '\U000022B0', + "pscr;": '\U0001D4C5', + "psi;": '\U000003C8', + "puncsp;": '\U00002008', + "qfr;": '\U0001D52E', + "qint;": '\U00002A0C', + "qopf;": '\U0001D562', + "qprime;": '\U00002057', + "qscr;": '\U0001D4C6', + "quaternions;": '\U0000210D', + "quatint;": '\U00002A16', + "quest;": '\U0000003F', + "questeq;": '\U0000225F', + "quot;": '\U00000022', + "rAarr;": '\U000021DB', + "rArr;": '\U000021D2', + "rAtail;": '\U0000291C', + "rBarr;": '\U0000290F', + "rHar;": '\U00002964', + "racute;": '\U00000155', + "radic;": '\U0000221A', + "raemptyv;": '\U000029B3', + "rang;": '\U000027E9', + "rangd;": '\U00002992', + "range;": '\U000029A5', + "rangle;": '\U000027E9', + "raquo;": '\U000000BB', + "rarr;": '\U00002192', + "rarrap;": '\U00002975', + "rarrb;": '\U000021E5', + "rarrbfs;": '\U00002920', + "rarrc;": '\U00002933', + "rarrfs;": '\U0000291E', + "rarrhk;": '\U000021AA', + "rarrlp;": '\U000021AC', + "rarrpl;": '\U00002945', + "rarrsim;": '\U00002974', + "rarrtl;": '\U000021A3', + "rarrw;": '\U0000219D', + "ratail;": '\U0000291A', + "ratio;": '\U00002236', + "rationals;": '\U0000211A', + "rbarr;": '\U0000290D', + "rbbrk;": '\U00002773', + "rbrace;": '\U0000007D', + "rbrack;": '\U0000005D', + "rbrke;": '\U0000298C', + "rbrksld;": '\U0000298E', + "rbrkslu;": '\U00002990', + "rcaron;": '\U00000159', + "rcedil;": '\U00000157', + "rceil;": '\U00002309', + "rcub;": '\U0000007D', + "rcy;": '\U00000440', + "rdca;": '\U00002937', + "rdldhar;": '\U00002969', + "rdquo;": '\U0000201D', + "rdquor;": '\U0000201D', + "rdsh;": '\U000021B3', + "real;": '\U0000211C', + "realine;": '\U0000211B', + "realpart;": '\U0000211C', + "reals;": '\U0000211D', + "rect;": '\U000025AD', + "reg;": '\U000000AE', + "rfisht;": '\U0000297D', + "rfloor;": '\U0000230B', + "rfr;": '\U0001D52F', + "rhard;": '\U000021C1', + "rharu;": '\U000021C0', + "rharul;": '\U0000296C', + "rho;": '\U000003C1', + "rhov;": '\U000003F1', + "rightarrow;": '\U00002192', + "rightarrowtail;": '\U000021A3', + "rightharpoondown;": '\U000021C1', + "rightharpoonup;": '\U000021C0', + "rightleftarrows;": '\U000021C4', + "rightleftharpoons;": '\U000021CC', + "rightrightarrows;": '\U000021C9', + "rightsquigarrow;": '\U0000219D', + "rightthreetimes;": '\U000022CC', + "ring;": '\U000002DA', + "risingdotseq;": '\U00002253', + "rlarr;": '\U000021C4', + "rlhar;": '\U000021CC', + "rlm;": '\U0000200F', + "rmoust;": '\U000023B1', + "rmoustache;": '\U000023B1', + "rnmid;": '\U00002AEE', + "roang;": '\U000027ED', + "roarr;": '\U000021FE', + "robrk;": '\U000027E7', + "ropar;": '\U00002986', + "ropf;": '\U0001D563', + "roplus;": '\U00002A2E', + "rotimes;": '\U00002A35', + "rpar;": '\U00000029', + "rpargt;": '\U00002994', + "rppolint;": '\U00002A12', + "rrarr;": '\U000021C9', + "rsaquo;": '\U0000203A', + "rscr;": '\U0001D4C7', + "rsh;": '\U000021B1', + "rsqb;": '\U0000005D', + "rsquo;": '\U00002019', + "rsquor;": '\U00002019', + "rthree;": '\U000022CC', + "rtimes;": '\U000022CA', + "rtri;": '\U000025B9', + "rtrie;": '\U000022B5', + "rtrif;": '\U000025B8', + "rtriltri;": '\U000029CE', + "ruluhar;": '\U00002968', + "rx;": '\U0000211E', + "sacute;": '\U0000015B', + "sbquo;": '\U0000201A', + "sc;": '\U0000227B', + "scE;": '\U00002AB4', + "scap;": '\U00002AB8', + "scaron;": '\U00000161', + "sccue;": '\U0000227D', + "sce;": '\U00002AB0', + "scedil;": '\U0000015F', + "scirc;": '\U0000015D', + "scnE;": '\U00002AB6', + "scnap;": '\U00002ABA', + "scnsim;": '\U000022E9', + "scpolint;": '\U00002A13', + "scsim;": '\U0000227F', + "scy;": '\U00000441', + "sdot;": '\U000022C5', + "sdotb;": '\U000022A1', + "sdote;": '\U00002A66', + "seArr;": '\U000021D8', + "searhk;": '\U00002925', + "searr;": '\U00002198', + "searrow;": '\U00002198', + "sect;": '\U000000A7', + "semi;": '\U0000003B', + "seswar;": '\U00002929', + "setminus;": '\U00002216', + "setmn;": '\U00002216', + "sext;": '\U00002736', + "sfr;": '\U0001D530', + "sfrown;": '\U00002322', + "sharp;": '\U0000266F', + "shchcy;": '\U00000449', + "shcy;": '\U00000448', + "shortmid;": '\U00002223', + "shortparallel;": '\U00002225', + "shy;": '\U000000AD', + "sigma;": '\U000003C3', + "sigmaf;": '\U000003C2', + "sigmav;": '\U000003C2', + "sim;": '\U0000223C', + "simdot;": '\U00002A6A', + "sime;": '\U00002243', + "simeq;": '\U00002243', + "simg;": '\U00002A9E', + "simgE;": '\U00002AA0', + "siml;": '\U00002A9D', + "simlE;": '\U00002A9F', + "simne;": '\U00002246', + "simplus;": '\U00002A24', + "simrarr;": '\U00002972', + "slarr;": '\U00002190', + "smallsetminus;": '\U00002216', + "smashp;": '\U00002A33', + "smeparsl;": '\U000029E4', + "smid;": '\U00002223', + "smile;": '\U00002323', + "smt;": '\U00002AAA', + "smte;": '\U00002AAC', + "softcy;": '\U0000044C', + "sol;": '\U0000002F', + "solb;": '\U000029C4', + "solbar;": '\U0000233F', + "sopf;": '\U0001D564', + "spades;": '\U00002660', + "spadesuit;": '\U00002660', + "spar;": '\U00002225', + "sqcap;": '\U00002293', + "sqcup;": '\U00002294', + "sqsub;": '\U0000228F', + "sqsube;": '\U00002291', + "sqsubset;": '\U0000228F', + "sqsubseteq;": '\U00002291', + "sqsup;": '\U00002290', + "sqsupe;": '\U00002292', + "sqsupset;": '\U00002290', + "sqsupseteq;": '\U00002292', + "squ;": '\U000025A1', + "square;": '\U000025A1', + "squarf;": '\U000025AA', + "squf;": '\U000025AA', + "srarr;": '\U00002192', + "sscr;": '\U0001D4C8', + "ssetmn;": '\U00002216', + "ssmile;": '\U00002323', + "sstarf;": '\U000022C6', + "star;": '\U00002606', + "starf;": '\U00002605', + "straightepsilon;": '\U000003F5', + "straightphi;": '\U000003D5', + "strns;": '\U000000AF', + "sub;": '\U00002282', + "subE;": '\U00002AC5', + "subdot;": '\U00002ABD', + "sube;": '\U00002286', + "subedot;": '\U00002AC3', + "submult;": '\U00002AC1', + "subnE;": '\U00002ACB', + "subne;": '\U0000228A', + "subplus;": '\U00002ABF', + "subrarr;": '\U00002979', + "subset;": '\U00002282', + "subseteq;": '\U00002286', + "subseteqq;": '\U00002AC5', + "subsetneq;": '\U0000228A', + "subsetneqq;": '\U00002ACB', + "subsim;": '\U00002AC7', + "subsub;": '\U00002AD5', + "subsup;": '\U00002AD3', + "succ;": '\U0000227B', + "succapprox;": '\U00002AB8', + "succcurlyeq;": '\U0000227D', + "succeq;": '\U00002AB0', + "succnapprox;": '\U00002ABA', + "succneqq;": '\U00002AB6', + "succnsim;": '\U000022E9', + "succsim;": '\U0000227F', + "sum;": '\U00002211', + "sung;": '\U0000266A', + "sup;": '\U00002283', + "sup1;": '\U000000B9', + "sup2;": '\U000000B2', + "sup3;": '\U000000B3', + "supE;": '\U00002AC6', + "supdot;": '\U00002ABE', + "supdsub;": '\U00002AD8', + "supe;": '\U00002287', + "supedot;": '\U00002AC4', + "suphsol;": '\U000027C9', + "suphsub;": '\U00002AD7', + "suplarr;": '\U0000297B', + "supmult;": '\U00002AC2', + "supnE;": '\U00002ACC', + "supne;": '\U0000228B', + "supplus;": '\U00002AC0', + "supset;": '\U00002283', + "supseteq;": '\U00002287', + "supseteqq;": '\U00002AC6', + "supsetneq;": '\U0000228B', + "supsetneqq;": '\U00002ACC', + "supsim;": '\U00002AC8', + "supsub;": '\U00002AD4', + "supsup;": '\U00002AD6', + "swArr;": '\U000021D9', + "swarhk;": '\U00002926', + "swarr;": '\U00002199', + "swarrow;": '\U00002199', + "swnwar;": '\U0000292A', + "szlig;": '\U000000DF', + "target;": '\U00002316', + "tau;": '\U000003C4', + "tbrk;": '\U000023B4', + "tcaron;": '\U00000165', + "tcedil;": '\U00000163', + "tcy;": '\U00000442', + "tdot;": '\U000020DB', + "telrec;": '\U00002315', + "tfr;": '\U0001D531', + "there4;": '\U00002234', + "therefore;": '\U00002234', + "theta;": '\U000003B8', + "thetasym;": '\U000003D1', + "thetav;": '\U000003D1', + "thickapprox;": '\U00002248', + "thicksim;": '\U0000223C', + "thinsp;": '\U00002009', + "thkap;": '\U00002248', + "thksim;": '\U0000223C', + "thorn;": '\U000000FE', + "tilde;": '\U000002DC', + "times;": '\U000000D7', + "timesb;": '\U000022A0', + "timesbar;": '\U00002A31', + "timesd;": '\U00002A30', + "tint;": '\U0000222D', + "toea;": '\U00002928', + "top;": '\U000022A4', + "topbot;": '\U00002336', + "topcir;": '\U00002AF1', + "topf;": '\U0001D565', + "topfork;": '\U00002ADA', + "tosa;": '\U00002929', + "tprime;": '\U00002034', + "trade;": '\U00002122', + "triangle;": '\U000025B5', + "triangledown;": '\U000025BF', + "triangleleft;": '\U000025C3', + "trianglelefteq;": '\U000022B4', + "triangleq;": '\U0000225C', + "triangleright;": '\U000025B9', + "trianglerighteq;": '\U000022B5', + "tridot;": '\U000025EC', + "trie;": '\U0000225C', + "triminus;": '\U00002A3A', + "triplus;": '\U00002A39', + "trisb;": '\U000029CD', + "tritime;": '\U00002A3B', + "trpezium;": '\U000023E2', + "tscr;": '\U0001D4C9', + "tscy;": '\U00000446', + "tshcy;": '\U0000045B', + "tstrok;": '\U00000167', + "twixt;": '\U0000226C', + "twoheadleftarrow;": '\U0000219E', + "twoheadrightarrow;": '\U000021A0', + "uArr;": '\U000021D1', + "uHar;": '\U00002963', + "uacute;": '\U000000FA', + "uarr;": '\U00002191', + "ubrcy;": '\U0000045E', + "ubreve;": '\U0000016D', + "ucirc;": '\U000000FB', + "ucy;": '\U00000443', + "udarr;": '\U000021C5', + "udblac;": '\U00000171', + "udhar;": '\U0000296E', + "ufisht;": '\U0000297E', + "ufr;": '\U0001D532', + "ugrave;": '\U000000F9', + "uharl;": '\U000021BF', + "uharr;": '\U000021BE', + "uhblk;": '\U00002580', + "ulcorn;": '\U0000231C', + "ulcorner;": '\U0000231C', + "ulcrop;": '\U0000230F', + "ultri;": '\U000025F8', + "umacr;": '\U0000016B', + "uml;": '\U000000A8', + "uogon;": '\U00000173', + "uopf;": '\U0001D566', + "uparrow;": '\U00002191', + "updownarrow;": '\U00002195', + "upharpoonleft;": '\U000021BF', + "upharpoonright;": '\U000021BE', + "uplus;": '\U0000228E', + "upsi;": '\U000003C5', + "upsih;": '\U000003D2', + "upsilon;": '\U000003C5', + "upuparrows;": '\U000021C8', + "urcorn;": '\U0000231D', + "urcorner;": '\U0000231D', + "urcrop;": '\U0000230E', + "uring;": '\U0000016F', + "urtri;": '\U000025F9', + "uscr;": '\U0001D4CA', + "utdot;": '\U000022F0', + "utilde;": '\U00000169', + "utri;": '\U000025B5', + "utrif;": '\U000025B4', + "uuarr;": '\U000021C8', + "uuml;": '\U000000FC', + "uwangle;": '\U000029A7', + "vArr;": '\U000021D5', + "vBar;": '\U00002AE8', + "vBarv;": '\U00002AE9', + "vDash;": '\U000022A8', + "vangrt;": '\U0000299C', + "varepsilon;": '\U000003F5', + "varkappa;": '\U000003F0', + "varnothing;": '\U00002205', + "varphi;": '\U000003D5', + "varpi;": '\U000003D6', + "varpropto;": '\U0000221D', + "varr;": '\U00002195', + "varrho;": '\U000003F1', + "varsigma;": '\U000003C2', + "vartheta;": '\U000003D1', + "vartriangleleft;": '\U000022B2', + "vartriangleright;": '\U000022B3', + "vcy;": '\U00000432', + "vdash;": '\U000022A2', + "vee;": '\U00002228', + "veebar;": '\U000022BB', + "veeeq;": '\U0000225A', + "vellip;": '\U000022EE', + "verbar;": '\U0000007C', + "vert;": '\U0000007C', + "vfr;": '\U0001D533', + "vltri;": '\U000022B2', + "vopf;": '\U0001D567', + "vprop;": '\U0000221D', + "vrtri;": '\U000022B3', + "vscr;": '\U0001D4CB', + "vzigzag;": '\U0000299A', + "wcirc;": '\U00000175', + "wedbar;": '\U00002A5F', + "wedge;": '\U00002227', + "wedgeq;": '\U00002259', + "weierp;": '\U00002118', + "wfr;": '\U0001D534', + "wopf;": '\U0001D568', + "wp;": '\U00002118', + "wr;": '\U00002240', + "wreath;": '\U00002240', + "wscr;": '\U0001D4CC', + "xcap;": '\U000022C2', + "xcirc;": '\U000025EF', + "xcup;": '\U000022C3', + "xdtri;": '\U000025BD', + "xfr;": '\U0001D535', + "xhArr;": '\U000027FA', + "xharr;": '\U000027F7', + "xi;": '\U000003BE', + "xlArr;": '\U000027F8', + "xlarr;": '\U000027F5', + "xmap;": '\U000027FC', + "xnis;": '\U000022FB', + "xodot;": '\U00002A00', + "xopf;": '\U0001D569', + "xoplus;": '\U00002A01', + "xotime;": '\U00002A02', + "xrArr;": '\U000027F9', + "xrarr;": '\U000027F6', + "xscr;": '\U0001D4CD', + "xsqcup;": '\U00002A06', + "xuplus;": '\U00002A04', + "xutri;": '\U000025B3', + "xvee;": '\U000022C1', + "xwedge;": '\U000022C0', + "yacute;": '\U000000FD', + "yacy;": '\U0000044F', + "ycirc;": '\U00000177', + "ycy;": '\U0000044B', + "yen;": '\U000000A5', + "yfr;": '\U0001D536', + "yicy;": '\U00000457', + "yopf;": '\U0001D56A', + "yscr;": '\U0001D4CE', + "yucy;": '\U0000044E', + "yuml;": '\U000000FF', + "zacute;": '\U0000017A', + "zcaron;": '\U0000017E', + "zcy;": '\U00000437', + "zdot;": '\U0000017C', + "zeetrf;": '\U00002128', + "zeta;": '\U000003B6', + "zfr;": '\U0001D537', + "zhcy;": '\U00000436', + "zigrarr;": '\U000021DD', + "zopf;": '\U0001D56B', + "zscr;": '\U0001D4CF', + "zwj;": '\U0000200D', + "zwnj;": '\U0000200C', + "AElig": '\U000000C6', + "AMP": '\U00000026', + "Aacute": '\U000000C1', + "Acirc": '\U000000C2', + "Agrave": '\U000000C0', + "Aring": '\U000000C5', + "Atilde": '\U000000C3', + "Auml": '\U000000C4', + "COPY": '\U000000A9', + "Ccedil": '\U000000C7', + "ETH": '\U000000D0', + "Eacute": '\U000000C9', + "Ecirc": '\U000000CA', + "Egrave": '\U000000C8', + "Euml": '\U000000CB', + "GT": '\U0000003E', + "Iacute": '\U000000CD', + "Icirc": '\U000000CE', + "Igrave": '\U000000CC', + "Iuml": '\U000000CF', + "LT": '\U0000003C', + "Ntilde": '\U000000D1', + "Oacute": '\U000000D3', + "Ocirc": '\U000000D4', + "Ograve": '\U000000D2', + "Oslash": '\U000000D8', + "Otilde": '\U000000D5', + "Ouml": '\U000000D6', + "QUOT": '\U00000022', + "REG": '\U000000AE', + "THORN": '\U000000DE', + "Uacute": '\U000000DA', + "Ucirc": '\U000000DB', + "Ugrave": '\U000000D9', + "Uuml": '\U000000DC', + "Yacute": '\U000000DD', + "aacute": '\U000000E1', + "acirc": '\U000000E2', + "acute": '\U000000B4', + "aelig": '\U000000E6', + "agrave": '\U000000E0', + "amp": '\U00000026', + "aring": '\U000000E5', + "atilde": '\U000000E3', + "auml": '\U000000E4', + "brvbar": '\U000000A6', + "ccedil": '\U000000E7', + "cedil": '\U000000B8', + "cent": '\U000000A2', + "copy": '\U000000A9', + "curren": '\U000000A4', + "deg": '\U000000B0', + "divide": '\U000000F7', + "eacute": '\U000000E9', + "ecirc": '\U000000EA', + "egrave": '\U000000E8', + "eth": '\U000000F0', + "euml": '\U000000EB', + "frac12": '\U000000BD', + "frac14": '\U000000BC', + "frac34": '\U000000BE', + "gt": '\U0000003E', + "iacute": '\U000000ED', + "icirc": '\U000000EE', + "iexcl": '\U000000A1', + "igrave": '\U000000EC', + "iquest": '\U000000BF', + "iuml": '\U000000EF', + "laquo": '\U000000AB', + "lt": '\U0000003C', + "macr": '\U000000AF', + "micro": '\U000000B5', + "middot": '\U000000B7', + "nbsp": '\U000000A0', + "not": '\U000000AC', + "ntilde": '\U000000F1', + "oacute": '\U000000F3', + "ocirc": '\U000000F4', + "ograve": '\U000000F2', + "ordf": '\U000000AA', + "ordm": '\U000000BA', + "oslash": '\U000000F8', + "otilde": '\U000000F5', + "ouml": '\U000000F6', + "para": '\U000000B6', + "plusmn": '\U000000B1', + "pound": '\U000000A3', + "quot": '\U00000022', + "raquo": '\U000000BB', + "reg": '\U000000AE', + "sect": '\U000000A7', + "shy": '\U000000AD', + "sup1": '\U000000B9', + "sup2": '\U000000B2', + "sup3": '\U000000B3', + "szlig": '\U000000DF', + "thorn": '\U000000FE', + "times": '\U000000D7', + "uacute": '\U000000FA', + "ucirc": '\U000000FB', + "ugrave": '\U000000F9', + "uml": '\U000000A8', + "uuml": '\U000000FC', + "yacute": '\U000000FD', + "yen": '\U000000A5', + "yuml": '\U000000FF', +} + +// HTML entities that are two unicode codepoints. +var entity2 = map[string][2]rune{ + // TODO(nigeltao): Handle replacements that are wider than their names. + // "nLt;": {'\u226A', '\u20D2'}, + // "nGt;": {'\u226B', '\u20D2'}, + "NotEqualTilde;": {'\u2242', '\u0338'}, + "NotGreaterFullEqual;": {'\u2267', '\u0338'}, + "NotGreaterGreater;": {'\u226B', '\u0338'}, + "NotGreaterSlantEqual;": {'\u2A7E', '\u0338'}, + "NotHumpDownHump;": {'\u224E', '\u0338'}, + "NotHumpEqual;": {'\u224F', '\u0338'}, + "NotLeftTriangleBar;": {'\u29CF', '\u0338'}, + "NotLessLess;": {'\u226A', '\u0338'}, + "NotLessSlantEqual;": {'\u2A7D', '\u0338'}, + "NotNestedGreaterGreater;": {'\u2AA2', '\u0338'}, + "NotNestedLessLess;": {'\u2AA1', '\u0338'}, + "NotPrecedesEqual;": {'\u2AAF', '\u0338'}, + "NotRightTriangleBar;": {'\u29D0', '\u0338'}, + "NotSquareSubset;": {'\u228F', '\u0338'}, + "NotSquareSuperset;": {'\u2290', '\u0338'}, + "NotSubset;": {'\u2282', '\u20D2'}, + "NotSucceedsEqual;": {'\u2AB0', '\u0338'}, + "NotSucceedsTilde;": {'\u227F', '\u0338'}, + "NotSuperset;": {'\u2283', '\u20D2'}, + "ThickSpace;": {'\u205F', '\u200A'}, + "acE;": {'\u223E', '\u0333'}, + "bne;": {'\u003D', '\u20E5'}, + "bnequiv;": {'\u2261', '\u20E5'}, + "caps;": {'\u2229', '\uFE00'}, + "cups;": {'\u222A', '\uFE00'}, + "fjlig;": {'\u0066', '\u006A'}, + "gesl;": {'\u22DB', '\uFE00'}, + "gvertneqq;": {'\u2269', '\uFE00'}, + "gvnE;": {'\u2269', '\uFE00'}, + "lates;": {'\u2AAD', '\uFE00'}, + "lesg;": {'\u22DA', '\uFE00'}, + "lvertneqq;": {'\u2268', '\uFE00'}, + "lvnE;": {'\u2268', '\uFE00'}, + "nGg;": {'\u22D9', '\u0338'}, + "nGtv;": {'\u226B', '\u0338'}, + "nLl;": {'\u22D8', '\u0338'}, + "nLtv;": {'\u226A', '\u0338'}, + "nang;": {'\u2220', '\u20D2'}, + "napE;": {'\u2A70', '\u0338'}, + "napid;": {'\u224B', '\u0338'}, + "nbump;": {'\u224E', '\u0338'}, + "nbumpe;": {'\u224F', '\u0338'}, + "ncongdot;": {'\u2A6D', '\u0338'}, + "nedot;": {'\u2250', '\u0338'}, + "nesim;": {'\u2242', '\u0338'}, + "ngE;": {'\u2267', '\u0338'}, + "ngeqq;": {'\u2267', '\u0338'}, + "ngeqslant;": {'\u2A7E', '\u0338'}, + "nges;": {'\u2A7E', '\u0338'}, + "nlE;": {'\u2266', '\u0338'}, + "nleqq;": {'\u2266', '\u0338'}, + "nleqslant;": {'\u2A7D', '\u0338'}, + "nles;": {'\u2A7D', '\u0338'}, + "notinE;": {'\u22F9', '\u0338'}, + "notindot;": {'\u22F5', '\u0338'}, + "nparsl;": {'\u2AFD', '\u20E5'}, + "npart;": {'\u2202', '\u0338'}, + "npre;": {'\u2AAF', '\u0338'}, + "npreceq;": {'\u2AAF', '\u0338'}, + "nrarrc;": {'\u2933', '\u0338'}, + "nrarrw;": {'\u219D', '\u0338'}, + "nsce;": {'\u2AB0', '\u0338'}, + "nsubE;": {'\u2AC5', '\u0338'}, + "nsubset;": {'\u2282', '\u20D2'}, + "nsubseteqq;": {'\u2AC5', '\u0338'}, + "nsucceq;": {'\u2AB0', '\u0338'}, + "nsupE;": {'\u2AC6', '\u0338'}, + "nsupset;": {'\u2283', '\u20D2'}, + "nsupseteqq;": {'\u2AC6', '\u0338'}, + "nvap;": {'\u224D', '\u20D2'}, + "nvge;": {'\u2265', '\u20D2'}, + "nvgt;": {'\u003E', '\u20D2'}, + "nvle;": {'\u2264', '\u20D2'}, + "nvlt;": {'\u003C', '\u20D2'}, + "nvltrie;": {'\u22B4', '\u20D2'}, + "nvrtrie;": {'\u22B5', '\u20D2'}, + "nvsim;": {'\u223C', '\u20D2'}, + "race;": {'\u223D', '\u0331'}, + "smtes;": {'\u2AAC', '\uFE00'}, + "sqcaps;": {'\u2293', '\uFE00'}, + "sqcups;": {'\u2294', '\uFE00'}, + "varsubsetneq;": {'\u228A', '\uFE00'}, + "varsubsetneqq;": {'\u2ACB', '\uFE00'}, + "varsupsetneq;": {'\u228B', '\uFE00'}, + "varsupsetneqq;": {'\u2ACC', '\uFE00'}, + "vnsub;": {'\u2282', '\u20D2'}, + "vnsup;": {'\u2283', '\u20D2'}, + "vsubnE;": {'\u2ACB', '\uFE00'}, + "vsubne;": {'\u228A', '\uFE00'}, + "vsupnE;": {'\u2ACC', '\uFE00'}, + "vsupne;": {'\u228B', '\uFE00'}, +} diff --git a/vendor/golang.org/x/net/html/escape.go b/vendor/golang.org/x/net/html/escape.go new file mode 100644 index 00000000000..04c6bec2107 --- /dev/null +++ b/vendor/golang.org/x/net/html/escape.go @@ -0,0 +1,339 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "bytes" + "strings" + "unicode/utf8" +) + +// These replacements permit compatibility with old numeric entities that +// assumed Windows-1252 encoding. +// https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference +var replacementTable = [...]rune{ + '\u20AC', // First entry is what 0x80 should be replaced with. + '\u0081', + '\u201A', + '\u0192', + '\u201E', + '\u2026', + '\u2020', + '\u2021', + '\u02C6', + '\u2030', + '\u0160', + '\u2039', + '\u0152', + '\u008D', + '\u017D', + '\u008F', + '\u0090', + '\u2018', + '\u2019', + '\u201C', + '\u201D', + '\u2022', + '\u2013', + '\u2014', + '\u02DC', + '\u2122', + '\u0161', + '\u203A', + '\u0153', + '\u009D', + '\u017E', + '\u0178', // Last entry is 0x9F. + // 0x00->'\uFFFD' is handled programmatically. + // 0x0D->'\u000D' is a no-op. +} + +// unescapeEntity reads an entity like "<" from b[src:] and writes the +// corresponding "<" to b[dst:], returning the incremented dst and src cursors. +// Precondition: b[src] == '&' && dst <= src. +// attribute should be true if parsing an attribute value. +func unescapeEntity(b []byte, dst, src int, attribute bool) (dst1, src1 int) { + // https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference + + // i starts at 1 because we already know that s[0] == '&'. + i, s := 1, b[src:] + + if len(s) <= 1 { + b[dst] = b[src] + return dst + 1, src + 1 + } + + if s[i] == '#' { + if len(s) <= 3 { // We need to have at least "&#.". + b[dst] = b[src] + return dst + 1, src + 1 + } + i++ + c := s[i] + hex := false + if c == 'x' || c == 'X' { + hex = true + i++ + } + + x := '\x00' + for i < len(s) { + c = s[i] + i++ + if hex { + if '0' <= c && c <= '9' { + x = 16*x + rune(c) - '0' + continue + } else if 'a' <= c && c <= 'f' { + x = 16*x + rune(c) - 'a' + 10 + continue + } else if 'A' <= c && c <= 'F' { + x = 16*x + rune(c) - 'A' + 10 + continue + } + } else if '0' <= c && c <= '9' { + x = 10*x + rune(c) - '0' + continue + } + if c != ';' { + i-- + } + break + } + + if i <= 3 { // No characters matched. + b[dst] = b[src] + return dst + 1, src + 1 + } + + if 0x80 <= x && x <= 0x9F { + // Replace characters from Windows-1252 with UTF-8 equivalents. + x = replacementTable[x-0x80] + } else if x == 0 || (0xD800 <= x && x <= 0xDFFF) || x > 0x10FFFF { + // Replace invalid characters with the replacement character. + x = '\uFFFD' + } + + return dst + utf8.EncodeRune(b[dst:], x), src + i + } + + // Consume the maximum number of characters possible, with the + // consumed characters matching one of the named references. + + for i < len(s) { + c := s[i] + i++ + // Lower-cased characters are more common in entities, so we check for them first. + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { + continue + } + if c != ';' { + i-- + } + break + } + + entityName := string(s[1:i]) + if entityName == "" { + // No-op. + } else if attribute && entityName[len(entityName)-1] != ';' && len(s) > i && s[i] == '=' { + // No-op. + } else if x := entity[entityName]; x != 0 { + return dst + utf8.EncodeRune(b[dst:], x), src + i + } else if x := entity2[entityName]; x[0] != 0 { + dst1 := dst + utf8.EncodeRune(b[dst:], x[0]) + return dst1 + utf8.EncodeRune(b[dst1:], x[1]), src + i + } else if !attribute { + maxLen := len(entityName) - 1 + if maxLen > longestEntityWithoutSemicolon { + maxLen = longestEntityWithoutSemicolon + } + for j := maxLen; j > 1; j-- { + if x := entity[entityName[:j]]; x != 0 { + return dst + utf8.EncodeRune(b[dst:], x), src + j + 1 + } + } + } + + dst1, src1 = dst+i, src+i + copy(b[dst:dst1], b[src:src1]) + return dst1, src1 +} + +// unescape unescapes b's entities in-place, so that "a<b" becomes "a' byte that, per above, we'd like to avoid escaping unless we have to. +// +// Studying the summary table (and T actions in its '>' column) closely, we +// only need to escape in states 43, 44, 49, 51 and 52. State 43 is at the +// start of the comment data. State 52 is after a '!'. The other three states +// are after a '-'. +// +// Our algorithm is thus to escape every '&' and to escape '>' if and only if: +// - The '>' is after a '!' or '-' (in the unescaped data) or +// - The '>' is at the start of the comment data (after the opening ""); err != nil { + return err + } + return nil + case DoctypeNode: + if _, err := w.WriteString("') + case RawNode: + _, err := w.WriteString(n.Data) + return err + default: + return errors.New("html: unknown node type") + } + + // Render the opening tag. + if err := w.WriteByte('<'); err != nil { + return err + } + if _, err := w.WriteString(n.Data); err != nil { + return err + } + for _, a := range n.Attr { + if err := w.WriteByte(' '); err != nil { + return err + } + if a.Namespace != "" { + if _, err := w.WriteString(a.Namespace); err != nil { + return err + } + if err := w.WriteByte(':'); err != nil { + return err + } + } + if _, err := w.WriteString(a.Key); err != nil { + return err + } + if _, err := w.WriteString(`="`); err != nil { + return err + } + if err := escape(w, a.Val); err != nil { + return err + } + if err := w.WriteByte('"'); err != nil { + return err + } + } + if voidElements[n.Data] { + if n.FirstChild != nil { + return fmt.Errorf("html: void element <%s> has child nodes", n.Data) + } + _, err := w.WriteString("/>") + return err + } + if err := w.WriteByte('>'); err != nil { + return err + } + + // Add initial newline where there is danger of a newline beging ignored. + if c := n.FirstChild; c != nil && c.Type == TextNode && strings.HasPrefix(c.Data, "\n") { + switch n.Data { + case "pre", "listing", "textarea": + if err := w.WriteByte('\n'); err != nil { + return err + } + } + } + + // Render any child nodes + if childTextNodesAreLiteral(n) { + for c := n.FirstChild; c != nil; c = c.NextSibling { + if c.Type == TextNode { + if _, err := w.WriteString(c.Data); err != nil { + return err + } + } else { + if err := render1(w, c); err != nil { + return err + } + } + } + if n.Data == "plaintext" { + // Don't render anything else. must be the + // last element in the file, with no closing tag. + return plaintextAbort + } + } else { + for c := n.FirstChild; c != nil; c = c.NextSibling { + if err := render1(w, c); err != nil { + return err + } + } + } + + // Render the </xxx> closing tag. + if _, err := w.WriteString("</"); err != nil { + return err + } + if _, err := w.WriteString(n.Data); err != nil { + return err + } + return w.WriteByte('>') +} + +func childTextNodesAreLiteral(n *Node) bool { + // Per WHATWG HTML 13.3, if the parent of the current node is a style, + // script, xmp, iframe, noembed, noframes, or plaintext element, and the + // current node is a text node, append the value of the node's data + // literally. The specification is not explicit about it, but we only + // enforce this if we are in the HTML namespace (i.e. when the namespace is + // ""). + // NOTE: we also always include noscript elements, although the + // specification states that they should only be rendered as such if + // scripting is enabled for the node (which is not something we track). + if n.Namespace != "" { + return false + } + switch n.Data { + case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "xmp": + return true + default: + return false + } +} + +// writeQuoted writes s to w surrounded by quotes. Normally it will use double +// quotes, but if s contains a double quote, it will use single quotes. +// It is used for writing the identifiers in a doctype declaration. +// In valid HTML, they can't contain both types of quotes. +func writeQuoted(w writer, s string) error { + var q byte = '"' + if strings.Contains(s, `"`) { + q = '\'' + } + if err := w.WriteByte(q); err != nil { + return err + } + if _, err := w.WriteString(s); err != nil { + return err + } + if err := w.WriteByte(q); err != nil { + return err + } + return nil +} + +// Section 12.1.2, "Elements", gives this list of void elements. Void elements +// are those that can't have any contents. +var voidElements = map[string]bool{ + "area": true, + "base": true, + "br": true, + "col": true, + "embed": true, + "hr": true, + "img": true, + "input": true, + "keygen": true, // "keygen" has been removed from the spec, but are kept here for backwards compatibility. + "link": true, + "meta": true, + "param": true, + "source": true, + "track": true, + "wbr": true, +} diff --git a/vendor/golang.org/x/net/html/token.go b/vendor/golang.org/x/net/html/token.go new file mode 100644 index 00000000000..3c57880d697 --- /dev/null +++ b/vendor/golang.org/x/net/html/token.go @@ -0,0 +1,1272 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "bytes" + "errors" + "io" + "strconv" + "strings" + + "golang.org/x/net/html/atom" +) + +// A TokenType is the type of a Token. +type TokenType uint32 + +const ( + // ErrorToken means that an error occurred during tokenization. + ErrorToken TokenType = iota + // TextToken means a text node. + TextToken + // A StartTagToken looks like <a>. + StartTagToken + // An EndTagToken looks like </a>. + EndTagToken + // A SelfClosingTagToken tag looks like <br/>. + SelfClosingTagToken + // A CommentToken looks like <!--x-->. + CommentToken + // A DoctypeToken looks like <!DOCTYPE x> + DoctypeToken +) + +// ErrBufferExceeded means that the buffering limit was exceeded. +var ErrBufferExceeded = errors.New("max buffer exceeded") + +// String returns a string representation of the TokenType. +func (t TokenType) String() string { + switch t { + case ErrorToken: + return "Error" + case TextToken: + return "Text" + case StartTagToken: + return "StartTag" + case EndTagToken: + return "EndTag" + case SelfClosingTagToken: + return "SelfClosingTag" + case CommentToken: + return "Comment" + case DoctypeToken: + return "Doctype" + } + return "Invalid(" + strconv.Itoa(int(t)) + ")" +} + +// An Attribute is an attribute namespace-key-value triple. Namespace is +// non-empty for foreign attributes like xlink, Key is alphabetic (and hence +// does not contain escapable characters like '&', '<' or '>'), and Val is +// unescaped (it looks like "a<b" rather than "a&lt;b"). +// +// Namespace is only used by the parser, not the tokenizer. +type Attribute struct { + Namespace, Key, Val string +} + +// A Token consists of a TokenType and some Data (tag name for start and end +// tags, content for text, comments and doctypes). A tag Token may also contain +// a slice of Attributes. Data is unescaped for all Tokens (it looks like "a<b" +// rather than "a&lt;b"). For tag Tokens, DataAtom is the atom for Data, or +// zero if Data is not a known tag name. +type Token struct { + Type TokenType + DataAtom atom.Atom + Data string + Attr []Attribute +} + +// tagString returns a string representation of a tag Token's Data and Attr. +func (t Token) tagString() string { + if len(t.Attr) == 0 { + return t.Data + } + buf := bytes.NewBufferString(t.Data) + for _, a := range t.Attr { + buf.WriteByte(' ') + buf.WriteString(a.Key) + buf.WriteString(`="`) + escape(buf, a.Val) + buf.WriteByte('"') + } + return buf.String() +} + +// String returns a string representation of the Token. +func (t Token) String() string { + switch t.Type { + case ErrorToken: + return "" + case TextToken: + return EscapeString(t.Data) + case StartTagToken: + return "<" + t.tagString() + ">" + case EndTagToken: + return "</" + t.tagString() + ">" + case SelfClosingTagToken: + return "<" + t.tagString() + "/>" + case CommentToken: + return "<!--" + escapeCommentString(t.Data) + "-->" + case DoctypeToken: + return "<!DOCTYPE " + EscapeString(t.Data) + ">" + } + return "Invalid(" + strconv.Itoa(int(t.Type)) + ")" +} + +// span is a range of bytes in a Tokenizer's buffer. The start is inclusive, +// the end is exclusive. +type span struct { + start, end int +} + +// A Tokenizer returns a stream of HTML Tokens. +type Tokenizer struct { + // r is the source of the HTML text. + r io.Reader + // tt is the TokenType of the current token. + tt TokenType + // err is the first error encountered during tokenization. It is possible + // for tt != Error && err != nil to hold: this means that Next returned a + // valid token but the subsequent Next call will return an error token. + // For example, if the HTML text input was just "plain", then the first + // Next call would set z.err to io.EOF but return a TextToken, and all + // subsequent Next calls would return an ErrorToken. + // err is never reset. Once it becomes non-nil, it stays non-nil. + err error + // readErr is the error returned by the io.Reader r. It is separate from + // err because it is valid for an io.Reader to return (n int, err1 error) + // such that n > 0 && err1 != nil, and callers should always process the + // n > 0 bytes before considering the error err1. + readErr error + // buf[raw.start:raw.end] holds the raw bytes of the current token. + // buf[raw.end:] is buffered input that will yield future tokens. + raw span + buf []byte + // maxBuf limits the data buffered in buf. A value of 0 means unlimited. + maxBuf int + // buf[data.start:data.end] holds the raw bytes of the current token's data: + // a text token's text, a tag token's tag name, etc. + data span + // pendingAttr is the attribute key and value currently being tokenized. + // When complete, pendingAttr is pushed onto attr. nAttrReturned is + // incremented on each call to TagAttr. + pendingAttr [2]span + attr [][2]span + nAttrReturned int + // rawTag is the "script" in "</script>" that closes the next token. If + // non-empty, the subsequent call to Next will return a raw or RCDATA text + // token: one that treats "<p>" as text instead of an element. + // rawTag's contents are lower-cased. + rawTag string + // textIsRaw is whether the current text token's data is not escaped. + textIsRaw bool + // convertNUL is whether NUL bytes in the current token's data should + // be converted into \ufffd replacement characters. + convertNUL bool + // allowCDATA is whether CDATA sections are allowed in the current context. + allowCDATA bool +} + +// AllowCDATA sets whether or not the tokenizer recognizes <![CDATA[foo]]> as +// the text "foo". The default value is false, which means to recognize it as +// a bogus comment "<!-- [CDATA[foo]] -->" instead. +// +// Strictly speaking, an HTML5 compliant tokenizer should allow CDATA if and +// only if tokenizing foreign content, such as MathML and SVG. However, +// tracking foreign-contentness is difficult to do purely in the tokenizer, +// as opposed to the parser, due to HTML integration points: an <svg> element +// can contain a <foreignObject> that is foreign-to-SVG but not foreign-to- +// HTML. For strict compliance with the HTML5 tokenization algorithm, it is the +// responsibility of the user of a tokenizer to call AllowCDATA as appropriate. +// In practice, if using the tokenizer without caring whether MathML or SVG +// CDATA is text or comments, such as tokenizing HTML to find all the anchor +// text, it is acceptable to ignore this responsibility. +func (z *Tokenizer) AllowCDATA(allowCDATA bool) { + z.allowCDATA = allowCDATA +} + +// NextIsNotRawText instructs the tokenizer that the next token should not be +// considered as 'raw text'. Some elements, such as script and title elements, +// normally require the next token after the opening tag to be 'raw text' that +// has no child elements. For example, tokenizing "<title>a<b>c</b>d</title>" +// yields a start tag token for "<title>", a text token for "a<b>c</b>d", and +// an end tag token for "</title>". There are no distinct start tag or end tag +// tokens for the "<b>" and "</b>". +// +// This tokenizer implementation will generally look for raw text at the right +// times. Strictly speaking, an HTML5 compliant tokenizer should not look for +// raw text if in foreign content: <title> generally needs raw text, but a +// <title> inside an <svg> does not. Another example is that a <textarea> +// generally needs raw text, but a <textarea> is not allowed as an immediate +// child of a <select>; in normal parsing, a <textarea> implies </select>, but +// one cannot close the implicit element when parsing a <select>'s InnerHTML. +// Similarly to AllowCDATA, tracking the correct moment to override raw-text- +// ness is difficult to do purely in the tokenizer, as opposed to the parser. +// For strict compliance with the HTML5 tokenization algorithm, it is the +// responsibility of the user of a tokenizer to call NextIsNotRawText as +// appropriate. In practice, like AllowCDATA, it is acceptable to ignore this +// responsibility for basic usage. +// +// Note that this 'raw text' concept is different from the one offered by the +// Tokenizer.Raw method. +func (z *Tokenizer) NextIsNotRawText() { + z.rawTag = "" +} + +// Err returns the error associated with the most recent ErrorToken token. +// This is typically io.EOF, meaning the end of tokenization. +func (z *Tokenizer) Err() error { + if z.tt != ErrorToken { + return nil + } + return z.err +} + +// readByte returns the next byte from the input stream, doing a buffered read +// from z.r into z.buf if necessary. z.buf[z.raw.start:z.raw.end] remains a contiguous byte +// slice that holds all the bytes read so far for the current token. +// It sets z.err if the underlying reader returns an error. +// Pre-condition: z.err == nil. +func (z *Tokenizer) readByte() byte { + if z.raw.end >= len(z.buf) { + // Our buffer is exhausted and we have to read from z.r. Check if the + // previous read resulted in an error. + if z.readErr != nil { + z.err = z.readErr + return 0 + } + // We copy z.buf[z.raw.start:z.raw.end] to the beginning of z.buf. If the length + // z.raw.end - z.raw.start is more than half the capacity of z.buf, then we + // allocate a new buffer before the copy. + c := cap(z.buf) + d := z.raw.end - z.raw.start + var buf1 []byte + if 2*d > c { + buf1 = make([]byte, d, 2*c) + } else { + buf1 = z.buf[:d] + } + copy(buf1, z.buf[z.raw.start:z.raw.end]) + if x := z.raw.start; x != 0 { + // Adjust the data/attr spans to refer to the same contents after the copy. + z.data.start -= x + z.data.end -= x + z.pendingAttr[0].start -= x + z.pendingAttr[0].end -= x + z.pendingAttr[1].start -= x + z.pendingAttr[1].end -= x + for i := range z.attr { + z.attr[i][0].start -= x + z.attr[i][0].end -= x + z.attr[i][1].start -= x + z.attr[i][1].end -= x + } + } + z.raw.start, z.raw.end, z.buf = 0, d, buf1[:d] + // Now that we have copied the live bytes to the start of the buffer, + // we read from z.r into the remainder. + var n int + n, z.readErr = readAtLeastOneByte(z.r, buf1[d:cap(buf1)]) + if n == 0 { + z.err = z.readErr + return 0 + } + z.buf = buf1[:d+n] + } + x := z.buf[z.raw.end] + z.raw.end++ + if z.maxBuf > 0 && z.raw.end-z.raw.start >= z.maxBuf { + z.err = ErrBufferExceeded + return 0 + } + return x +} + +// Buffered returns a slice containing data buffered but not yet tokenized. +func (z *Tokenizer) Buffered() []byte { + return z.buf[z.raw.end:] +} + +// readAtLeastOneByte wraps an io.Reader so that reading cannot return (0, nil). +// It returns io.ErrNoProgress if the underlying r.Read method returns (0, nil) +// too many times in succession. +func readAtLeastOneByte(r io.Reader, b []byte) (int, error) { + for i := 0; i < 100; i++ { + if n, err := r.Read(b); n != 0 || err != nil { + return n, err + } + } + return 0, io.ErrNoProgress +} + +// skipWhiteSpace skips past any white space. +func (z *Tokenizer) skipWhiteSpace() { + if z.err != nil { + return + } + for { + c := z.readByte() + if z.err != nil { + return + } + switch c { + case ' ', '\n', '\r', '\t', '\f': + // No-op. + default: + z.raw.end-- + return + } + } +} + +// readRawOrRCDATA reads until the next "</foo>", where "foo" is z.rawTag and +// is typically something like "script" or "textarea". +func (z *Tokenizer) readRawOrRCDATA() { + if z.rawTag == "script" { + z.readScript() + z.textIsRaw = true + z.rawTag = "" + return + } +loop: + for { + c := z.readByte() + if z.err != nil { + break loop + } + if c != '<' { + continue loop + } + c = z.readByte() + if z.err != nil { + break loop + } + if c != '/' { + z.raw.end-- + continue loop + } + if z.readRawEndTag() || z.err != nil { + break loop + } + } + z.data.end = z.raw.end + // A textarea's or title's RCDATA can contain escaped entities. + z.textIsRaw = z.rawTag != "textarea" && z.rawTag != "title" + z.rawTag = "" +} + +// readRawEndTag attempts to read a tag like "</foo>", where "foo" is z.rawTag. +// If it succeeds, it backs up the input position to reconsume the tag and +// returns true. Otherwise it returns false. The opening "</" has already been +// consumed. +func (z *Tokenizer) readRawEndTag() bool { + for i := 0; i < len(z.rawTag); i++ { + c := z.readByte() + if z.err != nil { + return false + } + if c != z.rawTag[i] && c != z.rawTag[i]-('a'-'A') { + z.raw.end-- + return false + } + } + c := z.readByte() + if z.err != nil { + return false + } + switch c { + case ' ', '\n', '\r', '\t', '\f', '/', '>': + // The 3 is 2 for the leading "</" plus 1 for the trailing character c. + z.raw.end -= 3 + len(z.rawTag) + return true + } + z.raw.end-- + return false +} + +// readScript reads until the next </script> tag, following the byzantine +// rules for escaping/hiding the closing tag. +func (z *Tokenizer) readScript() { + defer func() { + z.data.end = z.raw.end + }() + var c byte + +scriptData: + c = z.readByte() + if z.err != nil { + return + } + if c == '<' { + goto scriptDataLessThanSign + } + goto scriptData + +scriptDataLessThanSign: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '/': + goto scriptDataEndTagOpen + case '!': + goto scriptDataEscapeStart + } + z.raw.end-- + goto scriptData + +scriptDataEndTagOpen: + if z.readRawEndTag() || z.err != nil { + return + } + goto scriptData + +scriptDataEscapeStart: + c = z.readByte() + if z.err != nil { + return + } + if c == '-' { + goto scriptDataEscapeStartDash + } + z.raw.end-- + goto scriptData + +scriptDataEscapeStartDash: + c = z.readByte() + if z.err != nil { + return + } + if c == '-' { + goto scriptDataEscapedDashDash + } + z.raw.end-- + goto scriptData + +scriptDataEscaped: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '-': + goto scriptDataEscapedDash + case '<': + goto scriptDataEscapedLessThanSign + } + goto scriptDataEscaped + +scriptDataEscapedDash: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '-': + goto scriptDataEscapedDashDash + case '<': + goto scriptDataEscapedLessThanSign + } + goto scriptDataEscaped + +scriptDataEscapedDashDash: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '-': + goto scriptDataEscapedDashDash + case '<': + goto scriptDataEscapedLessThanSign + case '>': + goto scriptData + } + goto scriptDataEscaped + +scriptDataEscapedLessThanSign: + c = z.readByte() + if z.err != nil { + return + } + if c == '/' { + goto scriptDataEscapedEndTagOpen + } + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' { + goto scriptDataDoubleEscapeStart + } + z.raw.end-- + goto scriptData + +scriptDataEscapedEndTagOpen: + if z.readRawEndTag() || z.err != nil { + return + } + goto scriptDataEscaped + +scriptDataDoubleEscapeStart: + z.raw.end-- + for i := 0; i < len("script"); i++ { + c = z.readByte() + if z.err != nil { + return + } + if c != "script"[i] && c != "SCRIPT"[i] { + z.raw.end-- + goto scriptDataEscaped + } + } + c = z.readByte() + if z.err != nil { + return + } + switch c { + case ' ', '\n', '\r', '\t', '\f', '/', '>': + goto scriptDataDoubleEscaped + } + z.raw.end-- + goto scriptDataEscaped + +scriptDataDoubleEscaped: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '-': + goto scriptDataDoubleEscapedDash + case '<': + goto scriptDataDoubleEscapedLessThanSign + } + goto scriptDataDoubleEscaped + +scriptDataDoubleEscapedDash: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '-': + goto scriptDataDoubleEscapedDashDash + case '<': + goto scriptDataDoubleEscapedLessThanSign + } + goto scriptDataDoubleEscaped + +scriptDataDoubleEscapedDashDash: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '-': + goto scriptDataDoubleEscapedDashDash + case '<': + goto scriptDataDoubleEscapedLessThanSign + case '>': + goto scriptData + } + goto scriptDataDoubleEscaped + +scriptDataDoubleEscapedLessThanSign: + c = z.readByte() + if z.err != nil { + return + } + if c == '/' { + goto scriptDataDoubleEscapeEnd + } + z.raw.end-- + goto scriptDataDoubleEscaped + +scriptDataDoubleEscapeEnd: + if z.readRawEndTag() { + z.raw.end += len("</script>") + goto scriptDataEscaped + } + if z.err != nil { + return + } + goto scriptDataDoubleEscaped +} + +// readComment reads the next comment token starting with "<!--". The opening +// "<!--" has already been consumed. +func (z *Tokenizer) readComment() { + // When modifying this function, consider manually increasing the + // maxSuffixLen constant in func TestComments, from 6 to e.g. 9 or more. + // That increase should only be temporary, not committed, as it + // exponentially affects the test running time. + + z.data.start = z.raw.end + defer func() { + if z.data.end < z.data.start { + // It's a comment with no data, like <!-->. + z.data.end = z.data.start + } + }() + + var dashCount int + beginning := true + for { + c := z.readByte() + if z.err != nil { + z.data.end = z.calculateAbruptCommentDataEnd() + return + } + switch c { + case '-': + dashCount++ + continue + case '>': + if dashCount >= 2 || beginning { + z.data.end = z.raw.end - len("-->") + return + } + case '!': + if dashCount >= 2 { + c = z.readByte() + if z.err != nil { + z.data.end = z.calculateAbruptCommentDataEnd() + return + } else if c == '>' { + z.data.end = z.raw.end - len("--!>") + return + } else if c == '-' { + dashCount = 1 + beginning = false + continue + } + } + } + dashCount = 0 + beginning = false + } +} + +func (z *Tokenizer) calculateAbruptCommentDataEnd() int { + raw := z.Raw() + const prefixLen = len("<!--") + if len(raw) >= prefixLen { + raw = raw[prefixLen:] + if hasSuffix(raw, "--!") { + return z.raw.end - 3 + } else if hasSuffix(raw, "--") { + return z.raw.end - 2 + } else if hasSuffix(raw, "-") { + return z.raw.end - 1 + } + } + return z.raw.end +} + +func hasSuffix(b []byte, suffix string) bool { + if len(b) < len(suffix) { + return false + } + b = b[len(b)-len(suffix):] + for i := range b { + if b[i] != suffix[i] { + return false + } + } + return true +} + +// readUntilCloseAngle reads until the next ">". +func (z *Tokenizer) readUntilCloseAngle() { + z.data.start = z.raw.end + for { + c := z.readByte() + if z.err != nil { + z.data.end = z.raw.end + return + } + if c == '>' { + z.data.end = z.raw.end - len(">") + return + } + } +} + +// readMarkupDeclaration reads the next token starting with "<!". It might be +// a "<!--comment-->", a "<!DOCTYPE foo>", a "<![CDATA[section]]>" or +// "<!a bogus comment". The opening "<!" has already been consumed. +func (z *Tokenizer) readMarkupDeclaration() TokenType { + z.data.start = z.raw.end + var c [2]byte + for i := 0; i < 2; i++ { + c[i] = z.readByte() + if z.err != nil { + z.data.end = z.raw.end + return CommentToken + } + } + if c[0] == '-' && c[1] == '-' { + z.readComment() + return CommentToken + } + z.raw.end -= 2 + if z.readDoctype() { + return DoctypeToken + } + if z.allowCDATA && z.readCDATA() { + z.convertNUL = true + return TextToken + } + // It's a bogus comment. + z.readUntilCloseAngle() + return CommentToken +} + +// readDoctype attempts to read a doctype declaration and returns true if +// successful. The opening "<!" has already been consumed. +func (z *Tokenizer) readDoctype() bool { + const s = "DOCTYPE" + for i := 0; i < len(s); i++ { + c := z.readByte() + if z.err != nil { + z.data.end = z.raw.end + return false + } + if c != s[i] && c != s[i]+('a'-'A') { + // Back up to read the fragment of "DOCTYPE" again. + z.raw.end = z.data.start + return false + } + } + if z.skipWhiteSpace(); z.err != nil { + z.data.start = z.raw.end + z.data.end = z.raw.end + return true + } + z.readUntilCloseAngle() + return true +} + +// readCDATA attempts to read a CDATA section and returns true if +// successful. The opening "<!" has already been consumed. +func (z *Tokenizer) readCDATA() bool { + const s = "[CDATA[" + for i := 0; i < len(s); i++ { + c := z.readByte() + if z.err != nil { + z.data.end = z.raw.end + return false + } + if c != s[i] { + // Back up to read the fragment of "[CDATA[" again. + z.raw.end = z.data.start + return false + } + } + z.data.start = z.raw.end + brackets := 0 + for { + c := z.readByte() + if z.err != nil { + z.data.end = z.raw.end + return true + } + switch c { + case ']': + brackets++ + case '>': + if brackets >= 2 { + z.data.end = z.raw.end - len("]]>") + return true + } + brackets = 0 + default: + brackets = 0 + } + } +} + +// startTagIn returns whether the start tag in z.buf[z.data.start:z.data.end] +// case-insensitively matches any element of ss. +func (z *Tokenizer) startTagIn(ss ...string) bool { +loop: + for _, s := range ss { + if z.data.end-z.data.start != len(s) { + continue loop + } + for i := 0; i < len(s); i++ { + c := z.buf[z.data.start+i] + if 'A' <= c && c <= 'Z' { + c += 'a' - 'A' + } + if c != s[i] { + continue loop + } + } + return true + } + return false +} + +// readStartTag reads the next start tag token. The opening "<a" has already +// been consumed, where 'a' means anything in [A-Za-z]. +func (z *Tokenizer) readStartTag() TokenType { + z.readTag(true) + if z.err != nil { + return ErrorToken + } + // Several tags flag the tokenizer's next token as raw. + c, raw := z.buf[z.data.start], false + if 'A' <= c && c <= 'Z' { + c += 'a' - 'A' + } + switch c { + case 'i': + raw = z.startTagIn("iframe") + case 'n': + raw = z.startTagIn("noembed", "noframes", "noscript") + case 'p': + raw = z.startTagIn("plaintext") + case 's': + raw = z.startTagIn("script", "style") + case 't': + raw = z.startTagIn("textarea", "title") + case 'x': + raw = z.startTagIn("xmp") + } + if raw { + z.rawTag = strings.ToLower(string(z.buf[z.data.start:z.data.end])) + } + // Look for a self-closing token like "<br/>". + if z.err == nil && z.buf[z.raw.end-2] == '/' { + return SelfClosingTagToken + } + return StartTagToken +} + +// readTag reads the next tag token and its attributes. If saveAttr, those +// attributes are saved in z.attr, otherwise z.attr is set to an empty slice. +// The opening "<a" or "</a" has already been consumed, where 'a' means anything +// in [A-Za-z]. +func (z *Tokenizer) readTag(saveAttr bool) { + z.attr = z.attr[:0] + z.nAttrReturned = 0 + // Read the tag name and attribute key/value pairs. + z.readTagName() + if z.skipWhiteSpace(); z.err != nil { + return + } + for { + c := z.readByte() + if z.err != nil || c == '>' { + break + } + z.raw.end-- + z.readTagAttrKey() + z.readTagAttrVal() + // Save pendingAttr if saveAttr and that attribute has a non-empty key. + if saveAttr && z.pendingAttr[0].start != z.pendingAttr[0].end { + z.attr = append(z.attr, z.pendingAttr) + } + if z.skipWhiteSpace(); z.err != nil { + break + } + } +} + +// readTagName sets z.data to the "div" in "<div k=v>". The reader (z.raw.end) +// is positioned such that the first byte of the tag name (the "d" in "<div") +// has already been consumed. +func (z *Tokenizer) readTagName() { + z.data.start = z.raw.end - 1 + for { + c := z.readByte() + if z.err != nil { + z.data.end = z.raw.end + return + } + switch c { + case ' ', '\n', '\r', '\t', '\f': + z.data.end = z.raw.end - 1 + return + case '/', '>': + z.raw.end-- + z.data.end = z.raw.end + return + } + } +} + +// readTagAttrKey sets z.pendingAttr[0] to the "k" in "<div k=v>". +// Precondition: z.err == nil. +func (z *Tokenizer) readTagAttrKey() { + z.pendingAttr[0].start = z.raw.end + for { + c := z.readByte() + if z.err != nil { + z.pendingAttr[0].end = z.raw.end + return + } + switch c { + case '=': + if z.pendingAttr[0].start+1 == z.raw.end { + // WHATWG 13.2.5.32, if we see an equals sign before the attribute name + // begins, we treat it as a character in the attribute name and continue. + continue + } + fallthrough + case ' ', '\n', '\r', '\t', '\f', '/', '>': + // WHATWG 13.2.5.33 Attribute name state + // We need to reconsume the char in the after attribute name state to support the / character + z.raw.end-- + z.pendingAttr[0].end = z.raw.end + return + } + } +} + +// readTagAttrVal sets z.pendingAttr[1] to the "v" in "<div k=v>". +func (z *Tokenizer) readTagAttrVal() { + z.pendingAttr[1].start = z.raw.end + z.pendingAttr[1].end = z.raw.end + if z.skipWhiteSpace(); z.err != nil { + return + } + c := z.readByte() + if z.err != nil { + return + } + if c == '/' { + // WHATWG 13.2.5.34 After attribute name state + // U+002F SOLIDUS (/) - Switch to the self-closing start tag state. + return + } + if c != '=' { + z.raw.end-- + return + } + if z.skipWhiteSpace(); z.err != nil { + return + } + quote := z.readByte() + if z.err != nil { + return + } + switch quote { + case '>': + z.raw.end-- + return + + case '\'', '"': + z.pendingAttr[1].start = z.raw.end + for { + c := z.readByte() + if z.err != nil { + z.pendingAttr[1].end = z.raw.end + return + } + if c == quote { + z.pendingAttr[1].end = z.raw.end - 1 + return + } + } + + default: + z.pendingAttr[1].start = z.raw.end - 1 + for { + c := z.readByte() + if z.err != nil { + z.pendingAttr[1].end = z.raw.end + return + } + switch c { + case ' ', '\n', '\r', '\t', '\f': + z.pendingAttr[1].end = z.raw.end - 1 + return + case '>': + z.raw.end-- + z.pendingAttr[1].end = z.raw.end + return + } + } + } +} + +// Next scans the next token and returns its type. +func (z *Tokenizer) Next() TokenType { + z.raw.start = z.raw.end + z.data.start = z.raw.end + z.data.end = z.raw.end + if z.err != nil { + z.tt = ErrorToken + return z.tt + } + if z.rawTag != "" { + if z.rawTag == "plaintext" { + // Read everything up to EOF. + for z.err == nil { + z.readByte() + } + z.data.end = z.raw.end + z.textIsRaw = true + } else { + z.readRawOrRCDATA() + } + if z.data.end > z.data.start { + z.tt = TextToken + z.convertNUL = true + return z.tt + } + } + z.textIsRaw = false + z.convertNUL = false + +loop: + for { + c := z.readByte() + if z.err != nil { + break loop + } + if c != '<' { + continue loop + } + + // Check if the '<' we have just read is part of a tag, comment + // or doctype. If not, it's part of the accumulated text token. + c = z.readByte() + if z.err != nil { + break loop + } + var tokenType TokenType + switch { + case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z': + tokenType = StartTagToken + case c == '/': + tokenType = EndTagToken + case c == '!' || c == '?': + // We use CommentToken to mean any of "<!--actual comments-->", + // "<!DOCTYPE declarations>" and "<?xml processing instructions?>". + tokenType = CommentToken + default: + // Reconsume the current character. + z.raw.end-- + continue + } + + // We have a non-text token, but we might have accumulated some text + // before that. If so, we return the text first, and return the non- + // text token on the subsequent call to Next. + if x := z.raw.end - len("<a"); z.raw.start < x { + z.raw.end = x + z.data.end = x + z.tt = TextToken + return z.tt + } + switch tokenType { + case StartTagToken: + z.tt = z.readStartTag() + return z.tt + case EndTagToken: + c = z.readByte() + if z.err != nil { + break loop + } + if c == '>' { + // "</>" does not generate a token at all. Generate an empty comment + // to allow passthrough clients to pick up the data using Raw. + // Reset the tokenizer state and start again. + z.tt = CommentToken + return z.tt + } + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' { + z.readTag(false) + if z.err != nil { + z.tt = ErrorToken + } else { + z.tt = EndTagToken + } + return z.tt + } + z.raw.end-- + z.readUntilCloseAngle() + z.tt = CommentToken + return z.tt + case CommentToken: + if c == '!' { + z.tt = z.readMarkupDeclaration() + return z.tt + } + z.raw.end-- + z.readUntilCloseAngle() + z.tt = CommentToken + return z.tt + } + } + if z.raw.start < z.raw.end { + z.data.end = z.raw.end + z.tt = TextToken + return z.tt + } + z.tt = ErrorToken + return z.tt +} + +// Raw returns the unmodified text of the current token. Calling Next, Token, +// Text, TagName or TagAttr may change the contents of the returned slice. +// +// The token stream's raw bytes partition the byte stream (up until an +// ErrorToken). There are no overlaps or gaps between two consecutive token's +// raw bytes. One implication is that the byte offset of the current token is +// the sum of the lengths of all previous tokens' raw bytes. +func (z *Tokenizer) Raw() []byte { + return z.buf[z.raw.start:z.raw.end] +} + +// convertNewlines converts "\r" and "\r\n" in s to "\n". +// The conversion happens in place, but the resulting slice may be shorter. +func convertNewlines(s []byte) []byte { + for i, c := range s { + if c != '\r' { + continue + } + + src := i + 1 + if src >= len(s) || s[src] != '\n' { + s[i] = '\n' + continue + } + + dst := i + for src < len(s) { + if s[src] == '\r' { + if src+1 < len(s) && s[src+1] == '\n' { + src++ + } + s[dst] = '\n' + } else { + s[dst] = s[src] + } + src++ + dst++ + } + return s[:dst] + } + return s +} + +var ( + nul = []byte("\x00") + replacement = []byte("\ufffd") +) + +// Text returns the unescaped text of a text, comment or doctype token. The +// contents of the returned slice may change on the next call to Next. +func (z *Tokenizer) Text() []byte { + switch z.tt { + case TextToken, CommentToken, DoctypeToken: + s := z.buf[z.data.start:z.data.end] + z.data.start = z.raw.end + z.data.end = z.raw.end + s = convertNewlines(s) + if (z.convertNUL || z.tt == CommentToken) && bytes.Contains(s, nul) { + s = bytes.Replace(s, nul, replacement, -1) + } + if !z.textIsRaw { + s = unescape(s, false) + } + return s + } + return nil +} + +// TagName returns the lower-cased name of a tag token (the `img` out of +// `<IMG SRC="foo">`) and whether the tag has attributes. +// The contents of the returned slice may change on the next call to Next. +func (z *Tokenizer) TagName() (name []byte, hasAttr bool) { + if z.data.start < z.data.end { + switch z.tt { + case StartTagToken, EndTagToken, SelfClosingTagToken: + s := z.buf[z.data.start:z.data.end] + z.data.start = z.raw.end + z.data.end = z.raw.end + return lower(s), z.nAttrReturned < len(z.attr) + } + } + return nil, false +} + +// TagAttr returns the lower-cased key and unescaped value of the next unparsed +// attribute for the current tag token and whether there are more attributes. +// The contents of the returned slices may change on the next call to Next. +func (z *Tokenizer) TagAttr() (key, val []byte, moreAttr bool) { + if z.nAttrReturned < len(z.attr) { + switch z.tt { + case StartTagToken, SelfClosingTagToken: + x := z.attr[z.nAttrReturned] + z.nAttrReturned++ + key = z.buf[x[0].start:x[0].end] + val = z.buf[x[1].start:x[1].end] + return lower(key), unescape(convertNewlines(val), true), z.nAttrReturned < len(z.attr) + } + } + return nil, nil, false +} + +// Token returns the current Token. The result's Data and Attr values remain +// valid after subsequent Next calls. +func (z *Tokenizer) Token() Token { + t := Token{Type: z.tt} + switch z.tt { + case TextToken, CommentToken, DoctypeToken: + t.Data = string(z.Text()) + case StartTagToken, SelfClosingTagToken, EndTagToken: + name, moreAttr := z.TagName() + for moreAttr { + var key, val []byte + key, val, moreAttr = z.TagAttr() + t.Attr = append(t.Attr, Attribute{"", atom.String(key), string(val)}) + } + if a := atom.Lookup(name); a != 0 { + t.DataAtom, t.Data = a, a.String() + } else { + t.DataAtom, t.Data = 0, string(name) + } + } + return t +} + +// SetMaxBuf sets a limit on the amount of data buffered during tokenization. +// A value of 0 means unlimited. +func (z *Tokenizer) SetMaxBuf(n int) { + z.maxBuf = n +} + +// NewTokenizer returns a new HTML Tokenizer for the given Reader. +// The input is assumed to be UTF-8 encoded. +func NewTokenizer(r io.Reader) *Tokenizer { + return NewTokenizerFragment(r, "") +} + +// NewTokenizerFragment returns a new HTML Tokenizer for the given Reader, for +// tokenizing an existing element's InnerHTML fragment. contextTag is that +// element's tag, such as "div" or "iframe". +// +// For example, how the InnerHTML "a<b" is tokenized depends on whether it is +// for a <p> tag or a <script> tag. +// +// The input is assumed to be UTF-8 encoded. +func NewTokenizerFragment(r io.Reader, contextTag string) *Tokenizer { + z := &Tokenizer{ + r: r, + buf: make([]byte, 0, 4096), + } + if contextTag != "" { + switch s := strings.ToLower(contextTag); s { + case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "title", "textarea", "xmp": + z.rawTag = s + } + } + return z +} diff --git a/vendor/golang.org/x/net/http2/config.go b/vendor/golang.org/x/net/http2/config.go index de58dfb8dc4..ca645d9a1af 100644 --- a/vendor/golang.org/x/net/http2/config.go +++ b/vendor/golang.org/x/net/http2/config.go @@ -60,7 +60,7 @@ func configFromServer(h1 *http.Server, h2 *Server) http2Config { return conf } -// configFromServer merges configuration settings from h2 and h2.t1.HTTP2 +// configFromTransport merges configuration settings from h2 and h2.t1.HTTP2 // (the net/http Transport). func configFromTransport(h2 *Transport) http2Config { conf := http2Config{ diff --git a/vendor/golang.org/x/net/http2/config_go124.go b/vendor/golang.org/x/net/http2/config_go124.go index e3784123c81..5b516c55fff 100644 --- a/vendor/golang.org/x/net/http2/config_go124.go +++ b/vendor/golang.org/x/net/http2/config_go124.go @@ -13,7 +13,7 @@ func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) { fillNetHTTPConfig(conf, srv.HTTP2) } -// fillNetHTTPServerConfig sets fields in conf from tr.HTTP2. +// fillNetHTTPTransportConfig sets fields in conf from tr.HTTP2. func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) { fillNetHTTPConfig(conf, tr.HTTP2) } diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index 105c3b279c0..81faec7e75d 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -1490,7 +1490,7 @@ func (mh *MetaHeadersFrame) checkPseudos() error { pf := mh.PseudoFields() for i, hf := range pf { switch hf.Name { - case ":method", ":path", ":scheme", ":authority": + case ":method", ":path", ":scheme", ":authority", ":protocol": isRequest = true case ":status": isResponse = true @@ -1498,7 +1498,7 @@ func (mh *MetaHeadersFrame) checkPseudos() error { return pseudoHeaderError(hf.Name) } // Check for duplicates. - // This would be a bad algorithm, but N is 4. + // This would be a bad algorithm, but N is 5. // And this doesn't allocate. for _, hf2 := range pf[:i] { if hf.Name == hf2.Name { diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index 7688c356b7c..c7601c909ff 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -34,10 +34,11 @@ import ( ) var ( - VerboseLogs bool - logFrameWrites bool - logFrameReads bool - inTests bool + VerboseLogs bool + logFrameWrites bool + logFrameReads bool + inTests bool + disableExtendedConnectProtocol bool ) func init() { @@ -50,6 +51,9 @@ func init() { logFrameWrites = true logFrameReads = true } + if strings.Contains(e, "http2xconnect=0") { + disableExtendedConnectProtocol = true + } } const ( @@ -141,6 +145,10 @@ func (s Setting) Valid() error { if s.Val < 16384 || s.Val > 1<<24-1 { return ConnectionError(ErrCodeProtocol) } + case SettingEnableConnectProtocol: + if s.Val != 1 && s.Val != 0 { + return ConnectionError(ErrCodeProtocol) + } } return nil } @@ -150,21 +158,23 @@ func (s Setting) Valid() error { type SettingID uint16 const ( - SettingHeaderTableSize SettingID = 0x1 - SettingEnablePush SettingID = 0x2 - SettingMaxConcurrentStreams SettingID = 0x3 - SettingInitialWindowSize SettingID = 0x4 - SettingMaxFrameSize SettingID = 0x5 - SettingMaxHeaderListSize SettingID = 0x6 + SettingHeaderTableSize SettingID = 0x1 + SettingEnablePush SettingID = 0x2 + SettingMaxConcurrentStreams SettingID = 0x3 + SettingInitialWindowSize SettingID = 0x4 + SettingMaxFrameSize SettingID = 0x5 + SettingMaxHeaderListSize SettingID = 0x6 + SettingEnableConnectProtocol SettingID = 0x8 ) var settingName = map[SettingID]string{ - SettingHeaderTableSize: "HEADER_TABLE_SIZE", - SettingEnablePush: "ENABLE_PUSH", - SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", - SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", - SettingMaxFrameSize: "MAX_FRAME_SIZE", - SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", + SettingHeaderTableSize: "HEADER_TABLE_SIZE", + SettingEnablePush: "ENABLE_PUSH", + SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", + SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", + SettingMaxFrameSize: "MAX_FRAME_SIZE", + SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", + SettingEnableConnectProtocol: "ENABLE_CONNECT_PROTOCOL", } func (s SettingID) String() string { diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 832414b450c..b55547aec64 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -932,14 +932,18 @@ func (sc *serverConn) serve(conf http2Config) { sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) } + settings := writeSettings{ + {SettingMaxFrameSize, conf.MaxReadFrameSize}, + {SettingMaxConcurrentStreams, sc.advMaxStreams}, + {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, + {SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize}, + {SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)}, + } + if !disableExtendedConnectProtocol { + settings = append(settings, Setting{SettingEnableConnectProtocol, 1}) + } sc.writeFrame(FrameWriteRequest{ - write: writeSettings{ - {SettingMaxFrameSize, conf.MaxReadFrameSize}, - {SettingMaxConcurrentStreams, sc.advMaxStreams}, - {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, - {SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize}, - {SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)}, - }, + write: settings, }) sc.unackedSettings++ @@ -1801,6 +1805,9 @@ func (sc *serverConn) processSetting(s Setting) error { sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31 case SettingMaxHeaderListSize: sc.peerMaxHeaderListSize = s.Val + case SettingEnableConnectProtocol: + // Receipt of this parameter by a server does not + // have any impact default: // Unknown setting: "An endpoint that receives a SETTINGS // frame with any unknown or unsupported identifier MUST @@ -2231,11 +2238,17 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res scheme: f.PseudoValue("scheme"), authority: f.PseudoValue("authority"), path: f.PseudoValue("path"), + protocol: f.PseudoValue("protocol"), + } + + // extended connect is disabled, so we should not see :protocol + if disableExtendedConnectProtocol && rp.protocol != "" { + return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol)) } isConnect := rp.method == "CONNECT" if isConnect { - if rp.path != "" || rp.scheme != "" || rp.authority == "" { + if rp.protocol == "" && (rp.path != "" || rp.scheme != "" || rp.authority == "") { return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol)) } } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") { @@ -2259,6 +2272,9 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res if rp.authority == "" { rp.authority = rp.header.Get("Host") } + if rp.protocol != "" { + rp.header.Set(":protocol", rp.protocol) + } rw, req, err := sc.newWriterAndRequestNoBody(st, rp) if err != nil { @@ -2285,6 +2301,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res type requestParam struct { method string scheme, authority, path string + protocol string header http.Header } @@ -2326,7 +2343,7 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r var url_ *url.URL var requestURI string - if rp.method == "CONNECT" { + if rp.method == "CONNECT" && rp.protocol == "" { url_ = &url.URL{Host: rp.authority} requestURI = rp.authority // mimic HTTP/1 server behavior } else { diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index f5968f44071..b2e2ed33739 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -368,25 +368,27 @@ type ClientConn struct { idleTimeout time.Duration // or 0 for never idleTimer timer - mu sync.Mutex // guards following - cond *sync.Cond // hold mu; broadcast on flow/closed changes - flow outflow // our conn-level flow control quota (cs.outflow is per stream) - inflow inflow // peer's conn-level flow control - doNotReuse bool // whether conn is marked to not be reused for any future requests - closing bool - closed bool - seenSettings bool // true if we've seen a settings frame, false otherwise - wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back - goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received - goAwayDebug string // goAway frame's debug data, retained as a string - streams map[uint32]*clientStream // client-initiated - streamsReserved int // incr by ReserveNewRequest; decr on RoundTrip - nextStreamID uint32 - pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams - pings map[[8]byte]chan struct{} // in flight ping data to notification channel - br *bufio.Reader - lastActive time.Time - lastIdle time.Time // time last idle + mu sync.Mutex // guards following + cond *sync.Cond // hold mu; broadcast on flow/closed changes + flow outflow // our conn-level flow control quota (cs.outflow is per stream) + inflow inflow // peer's conn-level flow control + doNotReuse bool // whether conn is marked to not be reused for any future requests + closing bool + closed bool + closedOnIdle bool // true if conn was closed for idleness + seenSettings bool // true if we've seen a settings frame, false otherwise + seenSettingsChan chan struct{} // closed when seenSettings is true or frame reading fails + wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back + goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received + goAwayDebug string // goAway frame's debug data, retained as a string + streams map[uint32]*clientStream // client-initiated + streamsReserved int // incr by ReserveNewRequest; decr on RoundTrip + nextStreamID uint32 + pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams + pings map[[8]byte]chan struct{} // in flight ping data to notification channel + br *bufio.Reader + lastActive time.Time + lastIdle time.Time // time last idle // Settings from peer: (also guarded by wmu) maxFrameSize uint32 maxConcurrentStreams uint32 @@ -396,6 +398,17 @@ type ClientConn struct { initialStreamRecvWindowSize int32 readIdleTimeout time.Duration pingTimeout time.Duration + extendedConnectAllowed bool + + // rstStreamPingsBlocked works around an unfortunate gRPC behavior. + // gRPC strictly limits the number of PING frames that it will receive. + // The default is two pings per two hours, but the limit resets every time + // the gRPC endpoint sends a HEADERS or DATA frame. See golang/go#70575. + // + // rstStreamPingsBlocked is set after receiving a response to a PING frame + // bundled with an RST_STREAM (see pendingResets below), and cleared after + // receiving a HEADERS or DATA frame. + rstStreamPingsBlocked bool // pendingResets is the number of RST_STREAM frames we have sent to the peer, // without confirming that the peer has received them. When we send a RST_STREAM, @@ -819,6 +832,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. streams: make(map[uint32]*clientStream), singleUse: singleUse, + seenSettingsChan: make(chan struct{}), wantSettingsAck: true, readIdleTimeout: conf.SendPingTimeout, pingTimeout: conf.PingTimeout, @@ -1076,10 +1090,12 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { // If this connection has never been used for a request and is closed, // then let it take a request (which will fail). + // If the conn was closed for idleness, we're racing the idle timer; + // don't try to use the conn. (Issue #70515.) // // This avoids a situation where an error early in a connection's lifetime // goes unreported. - if cc.nextStreamID == 1 && cc.streamsReserved == 0 && cc.closed { + if cc.nextStreamID == 1 && cc.streamsReserved == 0 && cc.closed && !cc.closedOnIdle { st.canTakeNewRequest = true } @@ -1142,6 +1158,7 @@ func (cc *ClientConn) closeIfIdle() { return } cc.closed = true + cc.closedOnIdle = true nextID := cc.nextStreamID // TODO: do clients send GOAWAY too? maybe? Just Close: cc.mu.Unlock() @@ -1466,6 +1483,8 @@ func (cs *clientStream) doRequest(req *http.Request, streamf func(*clientStream) cs.cleanupWriteRequest(err) } +var errExtendedConnectNotSupported = errors.New("net/http: extended connect not supported by peer") + // writeRequest sends a request. // // It returns nil after the request is written, the response read, @@ -1481,12 +1500,31 @@ func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStre return err } + // wait for setting frames to be received, a server can change this value later, + // but we just wait for the first settings frame + var isExtendedConnect bool + if req.Method == "CONNECT" && req.Header.Get(":protocol") != "" { + isExtendedConnect = true + } + // Acquire the new-request lock by writing to reqHeaderMu. // This lock guards the critical section covering allocating a new stream ID // (requires mu) and creating the stream (requires wmu). if cc.reqHeaderMu == nil { panic("RoundTrip on uninitialized ClientConn") // for tests } + if isExtendedConnect { + select { + case <-cs.reqCancel: + return errRequestCanceled + case <-ctx.Done(): + return ctx.Err() + case <-cc.seenSettingsChan: + if !cc.extendedConnectAllowed { + return errExtendedConnectNotSupported + } + } + } select { case cc.reqHeaderMu <- struct{}{}: case <-cs.reqCancel: @@ -1714,10 +1752,14 @@ func (cs *clientStream) cleanupWriteRequest(err error) { ping := false if !closeOnIdle { cc.mu.Lock() - if cc.pendingResets == 0 { - ping = true + // rstStreamPingsBlocked works around a gRPC behavior: + // see comment on the field for details. + if !cc.rstStreamPingsBlocked { + if cc.pendingResets == 0 { + ping = true + } + cc.pendingResets++ } - cc.pendingResets++ cc.mu.Unlock() } cc.writeStreamReset(cs.ID, ErrCodeCancel, ping, err) @@ -2030,7 +2072,7 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) func validateHeaders(hdrs http.Header) string { for k, vv := range hdrs { - if !httpguts.ValidHeaderFieldName(k) { + if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" { return fmt.Sprintf("name %q", k) } for _, v := range vv { @@ -2046,6 +2088,10 @@ func validateHeaders(hdrs http.Header) string { var errNilRequestURL = errors.New("http2: Request.URI is nil") +func isNormalConnect(req *http.Request) bool { + return req.Method == "CONNECT" && req.Header.Get(":protocol") == "" +} + // requires cc.wmu be held. func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { cc.hbuf.Reset() @@ -2066,7 +2112,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail } var path string - if req.Method != "CONNECT" { + if !isNormalConnect(req) { path = req.URL.RequestURI() if !validPseudoPath(path) { orig := path @@ -2103,7 +2149,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail m = http.MethodGet } f(":method", m) - if req.Method != "CONNECT" { + if !isNormalConnect(req) { f(":path", path) f(":scheme", req.URL.Scheme) } @@ -2392,9 +2438,12 @@ func (rl *clientConnReadLoop) cleanup() { // This avoids a situation where new connections are constantly created, // added to the pool, fail, and are removed from the pool, without any error // being surfaced to the user. - const unusedWaitTime = 5 * time.Second + unusedWaitTime := 5 * time.Second + if cc.idleTimeout > 0 && unusedWaitTime > cc.idleTimeout { + unusedWaitTime = cc.idleTimeout + } idleTime := cc.t.now().Sub(cc.lastActive) - if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime { + if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime && !cc.closedOnIdle { cc.idleTimer = cc.t.afterFunc(unusedWaitTime-idleTime, func() { cc.t.connPool().MarkDead(cc) }) @@ -2461,7 +2510,7 @@ func (rl *clientConnReadLoop) run() error { cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) } if se, ok := err.(StreamError); ok { - if cs := rl.streamByID(se.StreamID); cs != nil { + if cs := rl.streamByID(se.StreamID, notHeaderOrDataFrame); cs != nil { if se.Cause == nil { se.Cause = cc.fr.errDetail } @@ -2507,13 +2556,16 @@ func (rl *clientConnReadLoop) run() error { if VerboseLogs { cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err) } + if !cc.seenSettings { + close(cc.seenSettingsChan) + } return err } } } func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, headerOrDataFrame) if cs == nil { // We'd get here if we canceled a request while the // server had its response still in flight. So if this @@ -2842,7 +2894,7 @@ func (b transportResponseBody) Close() error { func (rl *clientConnReadLoop) processData(f *DataFrame) error { cc := rl.cc - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, headerOrDataFrame) data := f.Data() if cs == nil { cc.mu.Lock() @@ -2977,9 +3029,22 @@ func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { cs.abortStream(err) } -func (rl *clientConnReadLoop) streamByID(id uint32) *clientStream { +// Constants passed to streamByID for documentation purposes. +const ( + headerOrDataFrame = true + notHeaderOrDataFrame = false +) + +// streamByID returns the stream with the given id, or nil if no stream has that id. +// If headerOrData is true, it clears rst.StreamPingsBlocked. +func (rl *clientConnReadLoop) streamByID(id uint32, headerOrData bool) *clientStream { rl.cc.mu.Lock() defer rl.cc.mu.Unlock() + if headerOrData { + // Work around an unfortunate gRPC behavior. + // See comment on ClientConn.rstStreamPingsBlocked for details. + rl.cc.rstStreamPingsBlocked = false + } cs := rl.cc.streams[id] if cs != nil && !cs.readAborted { return cs @@ -3073,6 +3138,21 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { case SettingHeaderTableSize: cc.henc.SetMaxDynamicTableSize(s.Val) cc.peerMaxHeaderTableSize = s.Val + case SettingEnableConnectProtocol: + if err := s.Valid(); err != nil { + return err + } + // If the peer wants to send us SETTINGS_ENABLE_CONNECT_PROTOCOL, + // we require that it do so in the first SETTINGS frame. + // + // When we attempt to use extended CONNECT, we wait for the first + // SETTINGS frame to see if the server supports it. If we let the + // server enable the feature with a later SETTINGS frame, then + // users will see inconsistent results depending on whether we've + // seen that frame or not. + if !cc.seenSettings { + cc.extendedConnectAllowed = s.Val == 1 + } default: cc.vlogf("Unhandled Setting: %v", s) } @@ -3090,6 +3170,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { // connection can establish to our default. cc.maxConcurrentStreams = defaultMaxConcurrentStreams } + close(cc.seenSettingsChan) cc.seenSettings = true } @@ -3098,7 +3179,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { cc := rl.cc - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, notHeaderOrDataFrame) if f.StreamID != 0 && cs == nil { return nil } @@ -3127,7 +3208,7 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { } func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, notHeaderOrDataFrame) if cs == nil { // TODO: return error if server tries to RST_STREAM an idle stream return nil @@ -3205,6 +3286,7 @@ func (rl *clientConnReadLoop) processPing(f *PingFrame) error { if cc.pendingResets > 0 { // See clientStream.cleanupWriteRequest. cc.pendingResets = 0 + cc.rstStreamPingsBlocked = true cc.cond.Broadcast() } return nil diff --git a/vendor/golang.org/x/oauth2/LICENSE b/vendor/golang.org/x/oauth2/LICENSE index 6a66aea5eaf..2a7cf70da6e 100644 --- a/vendor/golang.org/x/oauth2/LICENSE +++ b/vendor/golang.org/x/oauth2/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md index 781770c2046..48dbb9d84c8 100644 --- a/vendor/golang.org/x/oauth2/README.md +++ b/vendor/golang.org/x/oauth2/README.md @@ -5,15 +5,6 @@ oauth2 package contains a client implementation for OAuth 2.0 spec. -## Installation - -~~~~ -go get golang.org/x/oauth2 -~~~~ - -Or you can manually git clone the repository to -`$(go env GOPATH)/src/golang.org/x/oauth2`. - See pkg.go.dev for further documentation and examples. * [pkg.go.dev/golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) @@ -33,7 +24,11 @@ The main issue tracker for the oauth2 repository is located at https://github.com/golang/oauth2/issues. This repository uses Gerrit for code changes. To learn how to submit changes to -this repository, see https://golang.org/doc/contribute.html. In particular: +this repository, see https://go.dev/doc/contribute. + +The git repository is https://go.googlesource.com/oauth2. + +Note: * Excluding trivial changes, all contributions should be connected to an existing issue. * API changes must go through the [change proposal process](https://go.dev/s/proposal-process) before they can be accepted. diff --git a/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go b/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go index 2459d069f73..51121a3d520 100644 --- a/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go +++ b/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go @@ -37,7 +37,7 @@ type Config struct { // URL. This is a constant specific to each server. TokenURL string - // Scope specifies optional requested permissions. + // Scopes specifies optional requested permissions. Scopes []string // EndpointParams specifies additional parameters for requests to the token endpoint. diff --git a/vendor/golang.org/x/oauth2/google/externalaccount/aws.go b/vendor/golang.org/x/oauth2/google/externalaccount/aws.go index ca27c2e98c9..55d59999e0e 100644 --- a/vendor/golang.org/x/oauth2/google/externalaccount/aws.go +++ b/vendor/golang.org/x/oauth2/google/externalaccount/aws.go @@ -28,7 +28,7 @@ import ( // AwsSecurityCredentials models AWS security credentials. type AwsSecurityCredentials struct { - // AccessKeyId is the AWS Access Key ID - Required. + // AccessKeyID is the AWS Access Key ID - Required. AccessKeyID string `json:"AccessKeyID"` // SecretAccessKey is the AWS Secret Access Key - Required. SecretAccessKey string `json:"SecretAccessKey"` diff --git a/vendor/golang.org/x/oauth2/google/externalaccount/basecredentials.go b/vendor/golang.org/x/oauth2/google/externalaccount/basecredentials.go index 6c81a68728e..ee34924e301 100644 --- a/vendor/golang.org/x/oauth2/google/externalaccount/basecredentials.go +++ b/vendor/golang.org/x/oauth2/google/externalaccount/basecredentials.go @@ -329,7 +329,7 @@ type SubjectTokenSupplier interface { type AwsSecurityCredentialsSupplier interface { // AwsRegion should return the AWS region or an error. AwsRegion(ctx context.Context, options SupplierOptions) (string, error) - // GetAwsSecurityCredentials should return a valid set of AwsSecurityCredentials or an error. + // AwsSecurityCredentials should return a valid set of AwsSecurityCredentials or an error. // The external account token source does not cache the returned security credentials, so caching // logic should be implemented in the supplier to prevent multiple requests for the same security credentials. AwsSecurityCredentials(ctx context.Context, options SupplierOptions) (*AwsSecurityCredentials, error) diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index 09f6a49b80a..74f052aa9fa 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -56,7 +56,7 @@ type Config struct { // the OAuth flow, after the resource owner's URLs. RedirectURL string - // Scope specifies optional requested permissions. + // Scopes specifies optional requested permissions. Scopes []string // authStyleCache caches which auth style to use when Endpoint.AuthStyle is diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go index 5bbb3321748..109997d77ce 100644 --- a/vendor/golang.org/x/oauth2/token.go +++ b/vendor/golang.org/x/oauth2/token.go @@ -49,6 +49,13 @@ type Token struct { // mechanisms for that TokenSource will not be used. Expiry time.Time `json:"expiry,omitempty"` + // ExpiresIn is the OAuth2 wire format "expires_in" field, + // which specifies how many seconds later the token expires, + // relative to an unknown time base approximately around "now". + // It is the application's responsibility to populate + // `Expiry` from `ExpiresIn` when required. + ExpiresIn int64 `json:"expires_in,omitempty"` + // raw optionally contains extra metadata from the server // when updating a token. raw interface{} diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index 97cb916f2c9..be8c0020701 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -246,6 +246,18 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e return sendfile(outfd, infd, offset, count) } +func Dup3(oldfd, newfd, flags int) error { + if oldfd == newfd || flags&^O_CLOEXEC != 0 { + return EINVAL + } + how := F_DUP2FD + if flags&O_CLOEXEC != 0 { + how = F_DUP2FD_CLOEXEC + } + _, err := fcntl(oldfd, how, newfd) + return err +} + /* * Exposed directly */ diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index ccba391c9fb..6ebc48b3fec 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -321,6 +321,9 @@ const ( AUDIT_INTEGRITY_STATUS = 0x70a AUDIT_IPC = 0x517 AUDIT_IPC_SET_PERM = 0x51f + AUDIT_IPE_ACCESS = 0x58c + AUDIT_IPE_CONFIG_CHANGE = 0x58d + AUDIT_IPE_POLICY_LOAD = 0x58e AUDIT_KERNEL = 0x7d0 AUDIT_KERNEL_OTHER = 0x524 AUDIT_KERN_MODULE = 0x532 @@ -489,6 +492,7 @@ const ( BPF_F_ID = 0x20 BPF_F_NETFILTER_IP_DEFRAG = 0x1 BPF_F_QUERY_EFFECTIVE = 0x1 + BPF_F_REDIRECT_FLAGS = 0x19 BPF_F_REPLACE = 0x4 BPF_F_SLEEPABLE = 0x10 BPF_F_STRICT_ALIGNMENT = 0x1 @@ -1166,6 +1170,7 @@ const ( EXTA = 0xe EXTB = 0xf F2FS_SUPER_MAGIC = 0xf2f52010 + FALLOC_FL_ALLOCATE_RANGE = 0x0 FALLOC_FL_COLLAPSE_RANGE = 0x8 FALLOC_FL_INSERT_RANGE = 0x20 FALLOC_FL_KEEP_SIZE = 0x1 @@ -1799,6 +1804,8 @@ const ( LANDLOCK_ACCESS_NET_BIND_TCP = 0x1 LANDLOCK_ACCESS_NET_CONNECT_TCP = 0x2 LANDLOCK_CREATE_RULESET_VERSION = 0x1 + LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET = 0x1 + LANDLOCK_SCOPE_SIGNAL = 0x2 LINUX_REBOOT_CMD_CAD_OFF = 0x0 LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef LINUX_REBOOT_CMD_HALT = 0xcdef0123 @@ -1924,6 +1931,7 @@ const ( MNT_FORCE = 0x1 MNT_ID_REQ_SIZE_VER0 = 0x18 MNT_ID_REQ_SIZE_VER1 = 0x20 + MNT_NS_INFO_SIZE_VER0 = 0x10 MODULE_INIT_COMPRESSED_FILE = 0x4 MODULE_INIT_IGNORE_MODVERSIONS = 0x1 MODULE_INIT_IGNORE_VERMAGIC = 0x2 @@ -2970,6 +2978,7 @@ const ( RWF_WRITE_LIFE_NOT_SET = 0x0 SCHED_BATCH = 0x3 SCHED_DEADLINE = 0x6 + SCHED_EXT = 0x7 SCHED_FIFO = 0x1 SCHED_FLAG_ALL = 0x7f SCHED_FLAG_DL_OVERRUN = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 0c00cb3f3af..c0d45e32050 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -109,6 +109,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -297,6 +298,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -335,6 +338,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index dfb364554dd..c731d24f025 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -109,6 +109,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -298,6 +299,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -336,6 +339,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index d46dcf78abc..680018a4a7c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -303,6 +304,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -341,6 +344,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 3af3248a7f2..a63909f308d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -112,6 +112,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -205,6 +206,7 @@ const ( PERF_EVENT_IOC_SET_BPF = 0x40042408 PERF_EVENT_IOC_SET_FILTER = 0x40082406 PERF_EVENT_IOC_SET_OUTPUT = 0x2405 + POE_MAGIC = 0x504f4530 PPPIOCATTACH = 0x4004743d PPPIOCATTCHAN = 0x40047438 PPPIOCBRIDGECHAN = 0x40047435 @@ -294,6 +296,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -332,6 +336,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 292bcf0283d..9b0a2573fe3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -109,6 +109,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -290,6 +291,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -328,6 +331,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 782b7110fa1..958e6e0645a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 @@ -296,6 +297,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -334,6 +337,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 84973fd9271..50c7f25bd16 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 @@ -296,6 +297,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -334,6 +337,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 6d9cbc3b274..ced21d66d95 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 @@ -296,6 +297,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -334,6 +337,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 5f9fedbce02..226c0441902 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 @@ -296,6 +297,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -334,6 +337,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index bb0026ee0c4..3122737cd46 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x4000 ICANON = 0x100 IEXTEN = 0x400 @@ -351,6 +352,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -389,6 +392,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 46120db5c9a..eb5d3467edf 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x4000 ICANON = 0x100 IEXTEN = 0x400 @@ -355,6 +356,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -393,6 +396,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 5c951634fbe..e921ebc60b7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x4000 ICANON = 0x100 IEXTEN = 0x400 @@ -355,6 +356,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -393,6 +396,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 11a84d5af20..38ba81c55c1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -287,6 +288,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -325,6 +328,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index f78c4617cac..71f0400977b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -359,6 +360,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -397,6 +400,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index aeb777c3442..c44a313322c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -112,6 +112,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -350,6 +351,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x58 + SCM_DEVMEM_LINEAR = 0x57 SCM_TIMESTAMPING = 0x23 SCM_TIMESTAMPING_OPT_STATS = 0x38 SCM_TIMESTAMPING_PKTINFO = 0x3c @@ -436,6 +439,9 @@ const ( SO_CNX_ADVICE = 0x37 SO_COOKIE = 0x3b SO_DETACH_REUSEPORT_BPF = 0x47 + SO_DEVMEM_DMABUF = 0x58 + SO_DEVMEM_DONTNEED = 0x59 + SO_DEVMEM_LINEAR = 0x57 SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index d003c3d4378..17c53bd9b33 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -462,11 +462,14 @@ type FdSet struct { const ( SizeofIfMsghdr = 0x70 + SizeofIfMsghdr2 = 0xa0 SizeofIfData = 0x60 + SizeofIfData64 = 0x80 SizeofIfaMsghdr = 0x14 SizeofIfmaMsghdr = 0x10 SizeofIfmaMsghdr2 = 0x14 SizeofRtMsghdr = 0x5c + SizeofRtMsghdr2 = 0x5c SizeofRtMetrics = 0x38 ) @@ -480,6 +483,20 @@ type IfMsghdr struct { Data IfData } +type IfMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Snd_len int32 + Snd_maxlen int32 + Snd_drops int32 + Timer int32 + Data IfData64 +} + type IfData struct { Type uint8 Typelen uint8 @@ -512,6 +529,34 @@ type IfData struct { Reserved2 uint32 } +type IfData64 struct { + Type uint8 + Typelen uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Recvquota uint8 + Xmitquota uint8 + Unused1 uint8 + Mtu uint32 + Metric uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Noproto uint64 + Recvtiming uint32 + Xmittiming uint32 + Lastchange Timeval32 +} + type IfaMsghdr struct { Msglen uint16 Version uint8 @@ -557,6 +602,21 @@ type RtMsghdr struct { Rmx RtMetrics } +type RtMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Flags int32 + Addrs int32 + Refcnt int32 + Parentflags int32 + Reserved int32 + Use int32 + Inits uint32 + Rmx RtMetrics +} + type RtMetrics struct { Locks uint32 Mtu uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 0d45a941aae..2392226a743 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -462,11 +462,14 @@ type FdSet struct { const ( SizeofIfMsghdr = 0x70 + SizeofIfMsghdr2 = 0xa0 SizeofIfData = 0x60 + SizeofIfData64 = 0x80 SizeofIfaMsghdr = 0x14 SizeofIfmaMsghdr = 0x10 SizeofIfmaMsghdr2 = 0x14 SizeofRtMsghdr = 0x5c + SizeofRtMsghdr2 = 0x5c SizeofRtMetrics = 0x38 ) @@ -480,6 +483,20 @@ type IfMsghdr struct { Data IfData } +type IfMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Snd_len int32 + Snd_maxlen int32 + Snd_drops int32 + Timer int32 + Data IfData64 +} + type IfData struct { Type uint8 Typelen uint8 @@ -512,6 +529,34 @@ type IfData struct { Reserved2 uint32 } +type IfData64 struct { + Type uint8 + Typelen uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Recvquota uint8 + Xmitquota uint8 + Unused1 uint8 + Mtu uint32 + Metric uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Noproto uint64 + Recvtiming uint32 + Xmittiming uint32 + Lastchange Timeval32 +} + type IfaMsghdr struct { Msglen uint16 Version uint8 @@ -557,6 +602,21 @@ type RtMsghdr struct { Rmx RtMetrics } +type RtMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Flags int32 + Addrs int32 + Refcnt int32 + Parentflags int32 + Reserved int32 + Use int32 + Inits uint32 + Rmx RtMetrics +} + type RtMetrics struct { Locks uint32 Mtu uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 8daaf3faf4c..5537148dcbb 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -2594,8 +2594,8 @@ const ( SOF_TIMESTAMPING_BIND_PHC = 0x8000 SOF_TIMESTAMPING_OPT_ID_TCP = 0x10000 - SOF_TIMESTAMPING_LAST = 0x10000 - SOF_TIMESTAMPING_MASK = 0x1ffff + SOF_TIMESTAMPING_LAST = 0x20000 + SOF_TIMESTAMPING_MASK = 0x3ffff SCM_TSTAMP_SND = 0x0 SCM_TSTAMP_SCHED = 0x1 @@ -3541,7 +3541,7 @@ type Nhmsg struct { type NexthopGrp struct { Id uint32 Weight uint8 - Resvd1 uint8 + High uint8 Resvd2 uint16 } @@ -3802,7 +3802,7 @@ const ( ETHTOOL_MSG_PSE_GET = 0x24 ETHTOOL_MSG_PSE_SET = 0x25 ETHTOOL_MSG_RSS_GET = 0x26 - ETHTOOL_MSG_USER_MAX = 0x2c + ETHTOOL_MSG_USER_MAX = 0x2d ETHTOOL_MSG_KERNEL_NONE = 0x0 ETHTOOL_MSG_STRSET_GET_REPLY = 0x1 ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2 @@ -3842,7 +3842,7 @@ const ( ETHTOOL_MSG_MODULE_NTF = 0x24 ETHTOOL_MSG_PSE_GET_REPLY = 0x25 ETHTOOL_MSG_RSS_GET_REPLY = 0x26 - ETHTOOL_MSG_KERNEL_MAX = 0x2c + ETHTOOL_MSG_KERNEL_MAX = 0x2e ETHTOOL_FLAG_COMPACT_BITSETS = 0x1 ETHTOOL_FLAG_OMIT_REPLY = 0x2 ETHTOOL_FLAG_STATS = 0x4 @@ -3850,7 +3850,7 @@ const ( ETHTOOL_A_HEADER_DEV_INDEX = 0x1 ETHTOOL_A_HEADER_DEV_NAME = 0x2 ETHTOOL_A_HEADER_FLAGS = 0x3 - ETHTOOL_A_HEADER_MAX = 0x3 + ETHTOOL_A_HEADER_MAX = 0x4 ETHTOOL_A_BITSET_BIT_UNSPEC = 0x0 ETHTOOL_A_BITSET_BIT_INDEX = 0x1 ETHTOOL_A_BITSET_BIT_NAME = 0x2 @@ -4031,11 +4031,11 @@ const ( ETHTOOL_A_CABLE_RESULT_UNSPEC = 0x0 ETHTOOL_A_CABLE_RESULT_PAIR = 0x1 ETHTOOL_A_CABLE_RESULT_CODE = 0x2 - ETHTOOL_A_CABLE_RESULT_MAX = 0x2 + ETHTOOL_A_CABLE_RESULT_MAX = 0x3 ETHTOOL_A_CABLE_FAULT_LENGTH_UNSPEC = 0x0 ETHTOOL_A_CABLE_FAULT_LENGTH_PAIR = 0x1 ETHTOOL_A_CABLE_FAULT_LENGTH_CM = 0x2 - ETHTOOL_A_CABLE_FAULT_LENGTH_MAX = 0x2 + ETHTOOL_A_CABLE_FAULT_LENGTH_MAX = 0x3 ETHTOOL_A_CABLE_TEST_NTF_STATUS_UNSPEC = 0x0 ETHTOOL_A_CABLE_TEST_NTF_STATUS_STARTED = 0x1 ETHTOOL_A_CABLE_TEST_NTF_STATUS_COMPLETED = 0x2 @@ -4200,7 +4200,8 @@ type ( } PtpSysOffsetExtended struct { Samples uint32 - Rsv [3]uint32 + Clockid int32 + Rsv [2]uint32 Ts [25][3]PtpClockTime } PtpSysOffsetPrecise struct { @@ -4399,6 +4400,7 @@ const ( type LandlockRulesetAttr struct { Access_fs uint64 Access_net uint64 + Scoped uint64 } type LandlockPathBeneathAttr struct { diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go index 4e613cf6335..3ca814f54d4 100644 --- a/vendor/golang.org/x/sys/windows/dll_windows.go +++ b/vendor/golang.org/x/sys/windows/dll_windows.go @@ -43,8 +43,8 @@ type DLL struct { // LoadDLL loads DLL file into memory. // // Warning: using LoadDLL without an absolute path name is subject to -// DLL preloading attacks. To safely load a system DLL, use LazyDLL -// with System set to true, or use LoadLibraryEx directly. +// DLL preloading attacks. To safely load a system DLL, use [NewLazySystemDLL], +// or use [LoadLibraryEx] directly. func LoadDLL(name string) (dll *DLL, err error) { namep, err := UTF16PtrFromString(name) if err != nil { @@ -271,6 +271,9 @@ func (d *LazyDLL) NewProc(name string) *LazyProc { } // NewLazyDLL creates new LazyDLL associated with DLL file. +// +// Warning: using NewLazyDLL without an absolute path name is subject to +// DLL preloading attacks. To safely load a system DLL, use [NewLazySystemDLL]. func NewLazyDLL(name string) *LazyDLL { return &LazyDLL{Name: name} } @@ -410,7 +413,3 @@ func loadLibraryEx(name string, system bool) (*DLL, error) { } return &DLL{Name: name, Handle: h}, nil } - -type errString string - -func (s errString) Error() string { return string(s) } diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 4510bfc3f5c..4a325438685 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -168,6 +168,8 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) [failretval==InvalidHandle] = CreateNamedPipeW //sys ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) //sys DisconnectNamedPipe(pipe Handle) (err error) +//sys GetNamedPipeClientProcessId(pipe Handle, clientProcessID *uint32) (err error) +//sys GetNamedPipeServerProcessId(pipe Handle, serverProcessID *uint32) (err error) //sys GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) //sys GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW //sys SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) = SetNamedPipeHandleState diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 51311e205ff..9d138de5fed 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -176,6 +176,7 @@ const ( WAIT_FAILED = 0xFFFFFFFF // Access rights for process. + PROCESS_ALL_ACCESS = 0xFFFF PROCESS_CREATE_PROCESS = 0x0080 PROCESS_CREATE_THREAD = 0x0002 PROCESS_DUP_HANDLE = 0x0040 diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 6f5252880ce..01c0716c2c4 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -280,8 +280,10 @@ var ( procGetMaximumProcessorCount = modkernel32.NewProc("GetMaximumProcessorCount") procGetModuleFileNameW = modkernel32.NewProc("GetModuleFileNameW") procGetModuleHandleExW = modkernel32.NewProc("GetModuleHandleExW") + procGetNamedPipeClientProcessId = modkernel32.NewProc("GetNamedPipeClientProcessId") procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") + procGetNamedPipeServerProcessId = modkernel32.NewProc("GetNamedPipeServerProcessId") procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult") procGetPriorityClass = modkernel32.NewProc("GetPriorityClass") procGetProcAddress = modkernel32.NewProc("GetProcAddress") @@ -1612,7 +1614,7 @@ func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, si } func CancelMibChangeNotify2(notificationHandle Handle) (errcode error) { - r0, _, _ := syscall.SyscallN(procCancelMibChangeNotify2.Addr(), uintptr(notificationHandle)) + r0, _, _ := syscall.Syscall(procCancelMibChangeNotify2.Addr(), 1, uintptr(notificationHandle), 0, 0) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1652,7 +1654,7 @@ func GetIfEntry(pIfRow *MibIfRow) (errcode error) { } func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) { - r0, _, _ := syscall.SyscallN(procGetIfEntry2Ex.Addr(), uintptr(level), uintptr(unsafe.Pointer(row))) + r0, _, _ := syscall.Syscall(procGetIfEntry2Ex.Addr(), 2, uintptr(level), uintptr(unsafe.Pointer(row)), 0) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1660,7 +1662,7 @@ func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) { } func GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) { - r0, _, _ := syscall.SyscallN(procGetUnicastIpAddressEntry.Addr(), uintptr(unsafe.Pointer(row))) + r0, _, _ := syscall.Syscall(procGetUnicastIpAddressEntry.Addr(), 1, uintptr(unsafe.Pointer(row)), 0, 0) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1672,7 +1674,7 @@ func NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsa if initialNotification { _p0 = 1 } - r0, _, _ := syscall.SyscallN(procNotifyIpInterfaceChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) + r0, _, _ := syscall.Syscall6(procNotifyIpInterfaceChange.Addr(), 5, uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)), 0) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1684,7 +1686,7 @@ func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext if initialNotification { _p0 = 1 } - r0, _, _ := syscall.SyscallN(procNotifyUnicastIpAddressChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) + r0, _, _ := syscall.Syscall6(procNotifyUnicastIpAddressChange.Addr(), 5, uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)), 0) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -2446,6 +2448,14 @@ func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err er return } +func GetNamedPipeClientProcessId(pipe Handle, clientProcessID *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetNamedPipeClientProcessId.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(clientProcessID)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) if r1 == 0 { @@ -2462,6 +2472,14 @@ func GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint3 return } +func GetNamedPipeServerProcessId(pipe Handle, serverProcessID *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetNamedPipeServerProcessId.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(serverProcessID)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error) { var _p0 uint32 if wait { diff --git a/vendor/golang.org/x/text/encoding/htmlindex/htmlindex.go b/vendor/golang.org/x/text/encoding/htmlindex/htmlindex.go new file mode 100644 index 00000000000..bdc7d15dda4 --- /dev/null +++ b/vendor/golang.org/x/text/encoding/htmlindex/htmlindex.go @@ -0,0 +1,86 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go + +// Package htmlindex maps character set encoding names to Encodings as +// recommended by the W3C for use in HTML 5. See http://www.w3.org/TR/encoding. +package htmlindex + +// TODO: perhaps have a "bare" version of the index (used by this package) that +// is not pre-loaded with all encodings. Global variables in encodings prevent +// the linker from being able to purge unneeded tables. This means that +// referencing all encodings, as this package does for the default index, links +// in all encodings unconditionally. +// +// This issue can be solved by either solving the linking issue (see +// https://github.com/golang/go/issues/6330) or refactoring the encoding tables +// (e.g. moving the tables to internal packages that do not use global +// variables). + +// TODO: allow canonicalizing names + +import ( + "errors" + "strings" + "sync" + + "golang.org/x/text/encoding" + "golang.org/x/text/encoding/internal/identifier" + "golang.org/x/text/language" +) + +var ( + errInvalidName = errors.New("htmlindex: invalid encoding name") + errUnknown = errors.New("htmlindex: unknown Encoding") + errUnsupported = errors.New("htmlindex: this encoding is not supported") +) + +var ( + matcherOnce sync.Once + matcher language.Matcher +) + +// LanguageDefault returns the canonical name of the default encoding for a +// given language. +func LanguageDefault(tag language.Tag) string { + matcherOnce.Do(func() { + tags := []language.Tag{} + for _, t := range strings.Split(locales, " ") { + tags = append(tags, language.MustParse(t)) + } + matcher = language.NewMatcher(tags, language.PreferSameScript(true)) + }) + _, i, _ := matcher.Match(tag) + return canonical[localeMap[i]] // Default is Windows-1252. +} + +// Get returns an Encoding for one of the names listed in +// http://www.w3.org/TR/encoding using the Default Index. Matching is case- +// insensitive. +func Get(name string) (encoding.Encoding, error) { + x, ok := nameMap[strings.ToLower(strings.TrimSpace(name))] + if !ok { + return nil, errInvalidName + } + return encodings[x], nil +} + +// Name reports the canonical name of the given Encoding. It will return +// an error if e is not associated with a supported encoding scheme. +func Name(e encoding.Encoding) (string, error) { + id, ok := e.(identifier.Interface) + if !ok { + return "", errUnknown + } + mib, _ := id.ID() + if mib == 0 { + return "", errUnknown + } + v, ok := mibMap[mib] + if !ok { + return "", errUnsupported + } + return canonical[v], nil +} diff --git a/vendor/golang.org/x/text/encoding/htmlindex/map.go b/vendor/golang.org/x/text/encoding/htmlindex/map.go new file mode 100644 index 00000000000..c61439045d0 --- /dev/null +++ b/vendor/golang.org/x/text/encoding/htmlindex/map.go @@ -0,0 +1,105 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package htmlindex + +import ( + "golang.org/x/text/encoding" + "golang.org/x/text/encoding/charmap" + "golang.org/x/text/encoding/internal/identifier" + "golang.org/x/text/encoding/japanese" + "golang.org/x/text/encoding/korean" + "golang.org/x/text/encoding/simplifiedchinese" + "golang.org/x/text/encoding/traditionalchinese" + "golang.org/x/text/encoding/unicode" +) + +// mibMap maps a MIB identifier to an htmlEncoding index. +var mibMap = map[identifier.MIB]htmlEncoding{ + identifier.UTF8: utf8, + identifier.UTF16BE: utf16be, + identifier.UTF16LE: utf16le, + identifier.IBM866: ibm866, + identifier.ISOLatin2: iso8859_2, + identifier.ISOLatin3: iso8859_3, + identifier.ISOLatin4: iso8859_4, + identifier.ISOLatinCyrillic: iso8859_5, + identifier.ISOLatinArabic: iso8859_6, + identifier.ISOLatinGreek: iso8859_7, + identifier.ISOLatinHebrew: iso8859_8, + identifier.ISO88598I: iso8859_8I, + identifier.ISOLatin6: iso8859_10, + identifier.ISO885913: iso8859_13, + identifier.ISO885914: iso8859_14, + identifier.ISO885915: iso8859_15, + identifier.ISO885916: iso8859_16, + identifier.KOI8R: koi8r, + identifier.KOI8U: koi8u, + identifier.Macintosh: macintosh, + identifier.MacintoshCyrillic: macintoshCyrillic, + identifier.Windows874: windows874, + identifier.Windows1250: windows1250, + identifier.Windows1251: windows1251, + identifier.Windows1252: windows1252, + identifier.Windows1253: windows1253, + identifier.Windows1254: windows1254, + identifier.Windows1255: windows1255, + identifier.Windows1256: windows1256, + identifier.Windows1257: windows1257, + identifier.Windows1258: windows1258, + identifier.XUserDefined: xUserDefined, + identifier.GBK: gbk, + identifier.GB18030: gb18030, + identifier.Big5: big5, + identifier.EUCPkdFmtJapanese: eucjp, + identifier.ISO2022JP: iso2022jp, + identifier.ShiftJIS: shiftJIS, + identifier.EUCKR: euckr, + identifier.Replacement: replacement, +} + +// encodings maps the internal htmlEncoding to an Encoding. +// TODO: consider using a reusable index in encoding/internal. +var encodings = [numEncodings]encoding.Encoding{ + utf8: unicode.UTF8, + ibm866: charmap.CodePage866, + iso8859_2: charmap.ISO8859_2, + iso8859_3: charmap.ISO8859_3, + iso8859_4: charmap.ISO8859_4, + iso8859_5: charmap.ISO8859_5, + iso8859_6: charmap.ISO8859_6, + iso8859_7: charmap.ISO8859_7, + iso8859_8: charmap.ISO8859_8, + iso8859_8I: charmap.ISO8859_8I, + iso8859_10: charmap.ISO8859_10, + iso8859_13: charmap.ISO8859_13, + iso8859_14: charmap.ISO8859_14, + iso8859_15: charmap.ISO8859_15, + iso8859_16: charmap.ISO8859_16, + koi8r: charmap.KOI8R, + koi8u: charmap.KOI8U, + macintosh: charmap.Macintosh, + windows874: charmap.Windows874, + windows1250: charmap.Windows1250, + windows1251: charmap.Windows1251, + windows1252: charmap.Windows1252, + windows1253: charmap.Windows1253, + windows1254: charmap.Windows1254, + windows1255: charmap.Windows1255, + windows1256: charmap.Windows1256, + windows1257: charmap.Windows1257, + windows1258: charmap.Windows1258, + macintoshCyrillic: charmap.MacintoshCyrillic, + gbk: simplifiedchinese.GBK, + gb18030: simplifiedchinese.GB18030, + big5: traditionalchinese.Big5, + eucjp: japanese.EUCJP, + iso2022jp: japanese.ISO2022JP, + shiftJIS: japanese.ShiftJIS, + euckr: korean.EUCKR, + replacement: encoding.Replacement, + utf16be: unicode.UTF16(unicode.BigEndian, unicode.IgnoreBOM), + utf16le: unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM), + xUserDefined: charmap.XUserDefined, +} diff --git a/vendor/golang.org/x/text/encoding/htmlindex/tables.go b/vendor/golang.org/x/text/encoding/htmlindex/tables.go new file mode 100644 index 00000000000..9e6daa8965c --- /dev/null +++ b/vendor/golang.org/x/text/encoding/htmlindex/tables.go @@ -0,0 +1,362 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package htmlindex + +type htmlEncoding byte + +const ( + utf8 htmlEncoding = iota + ibm866 + iso8859_2 + iso8859_3 + iso8859_4 + iso8859_5 + iso8859_6 + iso8859_7 + iso8859_8 + iso8859_8I + iso8859_10 + iso8859_13 + iso8859_14 + iso8859_15 + iso8859_16 + koi8r + koi8u + macintosh + windows874 + windows1250 + windows1251 + windows1252 + windows1253 + windows1254 + windows1255 + windows1256 + windows1257 + windows1258 + macintoshCyrillic + gbk + gb18030 + big5 + eucjp + iso2022jp + shiftJIS + euckr + replacement + utf16be + utf16le + xUserDefined + numEncodings +) + +var canonical = [numEncodings]string{ + "utf-8", + "ibm866", + "iso-8859-2", + "iso-8859-3", + "iso-8859-4", + "iso-8859-5", + "iso-8859-6", + "iso-8859-7", + "iso-8859-8", + "iso-8859-8-i", + "iso-8859-10", + "iso-8859-13", + "iso-8859-14", + "iso-8859-15", + "iso-8859-16", + "koi8-r", + "koi8-u", + "macintosh", + "windows-874", + "windows-1250", + "windows-1251", + "windows-1252", + "windows-1253", + "windows-1254", + "windows-1255", + "windows-1256", + "windows-1257", + "windows-1258", + "x-mac-cyrillic", + "gbk", + "gb18030", + "big5", + "euc-jp", + "iso-2022-jp", + "shift_jis", + "euc-kr", + "replacement", + "utf-16be", + "utf-16le", + "x-user-defined", +} + +var nameMap = map[string]htmlEncoding{ + "unicode-1-1-utf-8": utf8, + "unicode11utf8": utf8, + "unicode20utf8": utf8, + "utf-8": utf8, + "utf8": utf8, + "x-unicode20utf8": utf8, + "866": ibm866, + "cp866": ibm866, + "csibm866": ibm866, + "ibm866": ibm866, + "csisolatin2": iso8859_2, + "iso-8859-2": iso8859_2, + "iso-ir-101": iso8859_2, + "iso8859-2": iso8859_2, + "iso88592": iso8859_2, + "iso_8859-2": iso8859_2, + "iso_8859-2:1987": iso8859_2, + "l2": iso8859_2, + "latin2": iso8859_2, + "csisolatin3": iso8859_3, + "iso-8859-3": iso8859_3, + "iso-ir-109": iso8859_3, + "iso8859-3": iso8859_3, + "iso88593": iso8859_3, + "iso_8859-3": iso8859_3, + "iso_8859-3:1988": iso8859_3, + "l3": iso8859_3, + "latin3": iso8859_3, + "csisolatin4": iso8859_4, + "iso-8859-4": iso8859_4, + "iso-ir-110": iso8859_4, + "iso8859-4": iso8859_4, + "iso88594": iso8859_4, + "iso_8859-4": iso8859_4, + "iso_8859-4:1988": iso8859_4, + "l4": iso8859_4, + "latin4": iso8859_4, + "csisolatincyrillic": iso8859_5, + "cyrillic": iso8859_5, + "iso-8859-5": iso8859_5, + "iso-ir-144": iso8859_5, + "iso8859-5": iso8859_5, + "iso88595": iso8859_5, + "iso_8859-5": iso8859_5, + "iso_8859-5:1988": iso8859_5, + "arabic": iso8859_6, + "asmo-708": iso8859_6, + "csiso88596e": iso8859_6, + "csiso88596i": iso8859_6, + "csisolatinarabic": iso8859_6, + "ecma-114": iso8859_6, + "iso-8859-6": iso8859_6, + "iso-8859-6-e": iso8859_6, + "iso-8859-6-i": iso8859_6, + "iso-ir-127": iso8859_6, + "iso8859-6": iso8859_6, + "iso88596": iso8859_6, + "iso_8859-6": iso8859_6, + "iso_8859-6:1987": iso8859_6, + "csisolatingreek": iso8859_7, + "ecma-118": iso8859_7, + "elot_928": iso8859_7, + "greek": iso8859_7, + "greek8": iso8859_7, + "iso-8859-7": iso8859_7, + "iso-ir-126": iso8859_7, + "iso8859-7": iso8859_7, + "iso88597": iso8859_7, + "iso_8859-7": iso8859_7, + "iso_8859-7:1987": iso8859_7, + "sun_eu_greek": iso8859_7, + "csiso88598e": iso8859_8, + "csisolatinhebrew": iso8859_8, + "hebrew": iso8859_8, + "iso-8859-8": iso8859_8, + "iso-8859-8-e": iso8859_8, + "iso-ir-138": iso8859_8, + "iso8859-8": iso8859_8, + "iso88598": iso8859_8, + "iso_8859-8": iso8859_8, + "iso_8859-8:1988": iso8859_8, + "visual": iso8859_8, + "csiso88598i": iso8859_8I, + "iso-8859-8-i": iso8859_8I, + "logical": iso8859_8I, + "csisolatin6": iso8859_10, + "iso-8859-10": iso8859_10, + "iso-ir-157": iso8859_10, + "iso8859-10": iso8859_10, + "iso885910": iso8859_10, + "l6": iso8859_10, + "latin6": iso8859_10, + "iso-8859-13": iso8859_13, + "iso8859-13": iso8859_13, + "iso885913": iso8859_13, + "iso-8859-14": iso8859_14, + "iso8859-14": iso8859_14, + "iso885914": iso8859_14, + "csisolatin9": iso8859_15, + "iso-8859-15": iso8859_15, + "iso8859-15": iso8859_15, + "iso885915": iso8859_15, + "iso_8859-15": iso8859_15, + "l9": iso8859_15, + "iso-8859-16": iso8859_16, + "cskoi8r": koi8r, + "koi": koi8r, + "koi8": koi8r, + "koi8-r": koi8r, + "koi8_r": koi8r, + "koi8-ru": koi8u, + "koi8-u": koi8u, + "csmacintosh": macintosh, + "mac": macintosh, + "macintosh": macintosh, + "x-mac-roman": macintosh, + "dos-874": windows874, + "iso-8859-11": windows874, + "iso8859-11": windows874, + "iso885911": windows874, + "tis-620": windows874, + "windows-874": windows874, + "cp1250": windows1250, + "windows-1250": windows1250, + "x-cp1250": windows1250, + "cp1251": windows1251, + "windows-1251": windows1251, + "x-cp1251": windows1251, + "ansi_x3.4-1968": windows1252, + "ascii": windows1252, + "cp1252": windows1252, + "cp819": windows1252, + "csisolatin1": windows1252, + "ibm819": windows1252, + "iso-8859-1": windows1252, + "iso-ir-100": windows1252, + "iso8859-1": windows1252, + "iso88591": windows1252, + "iso_8859-1": windows1252, + "iso_8859-1:1987": windows1252, + "l1": windows1252, + "latin1": windows1252, + "us-ascii": windows1252, + "windows-1252": windows1252, + "x-cp1252": windows1252, + "cp1253": windows1253, + "windows-1253": windows1253, + "x-cp1253": windows1253, + "cp1254": windows1254, + "csisolatin5": windows1254, + "iso-8859-9": windows1254, + "iso-ir-148": windows1254, + "iso8859-9": windows1254, + "iso88599": windows1254, + "iso_8859-9": windows1254, + "iso_8859-9:1989": windows1254, + "l5": windows1254, + "latin5": windows1254, + "windows-1254": windows1254, + "x-cp1254": windows1254, + "cp1255": windows1255, + "windows-1255": windows1255, + "x-cp1255": windows1255, + "cp1256": windows1256, + "windows-1256": windows1256, + "x-cp1256": windows1256, + "cp1257": windows1257, + "windows-1257": windows1257, + "x-cp1257": windows1257, + "cp1258": windows1258, + "windows-1258": windows1258, + "x-cp1258": windows1258, + "x-mac-cyrillic": macintoshCyrillic, + "x-mac-ukrainian": macintoshCyrillic, + "chinese": gbk, + "csgb2312": gbk, + "csiso58gb231280": gbk, + "gb2312": gbk, + "gb_2312": gbk, + "gb_2312-80": gbk, + "gbk": gbk, + "iso-ir-58": gbk, + "x-gbk": gbk, + "gb18030": gb18030, + "big5": big5, + "big5-hkscs": big5, + "cn-big5": big5, + "csbig5": big5, + "x-x-big5": big5, + "cseucpkdfmtjapanese": eucjp, + "euc-jp": eucjp, + "x-euc-jp": eucjp, + "csiso2022jp": iso2022jp, + "iso-2022-jp": iso2022jp, + "csshiftjis": shiftJIS, + "ms932": shiftJIS, + "ms_kanji": shiftJIS, + "shift-jis": shiftJIS, + "shift_jis": shiftJIS, + "sjis": shiftJIS, + "windows-31j": shiftJIS, + "x-sjis": shiftJIS, + "cseuckr": euckr, + "csksc56011987": euckr, + "euc-kr": euckr, + "iso-ir-149": euckr, + "korean": euckr, + "ks_c_5601-1987": euckr, + "ks_c_5601-1989": euckr, + "ksc5601": euckr, + "ksc_5601": euckr, + "windows-949": euckr, + "csiso2022kr": replacement, + "hz-gb-2312": replacement, + "iso-2022-cn": replacement, + "iso-2022-cn-ext": replacement, + "iso-2022-kr": replacement, + "replacement": replacement, + "unicodefffe": utf16be, + "utf-16be": utf16be, + "csunicode": utf16le, + "iso-10646-ucs-2": utf16le, + "ucs-2": utf16le, + "unicode": utf16le, + "unicodefeff": utf16le, + "utf-16": utf16le, + "utf-16le": utf16le, + "x-user-defined": xUserDefined, +} + +var localeMap = []htmlEncoding{ + windows1252, // und_Latn + windows1256, // ar + windows1251, // ba + windows1251, // be + windows1251, // bg + windows1250, // cs + iso8859_7, // el + windows1257, // et + windows1256, // fa + windows1255, // he + windows1250, // hr + iso8859_2, // hu + shiftJIS, // ja + windows1251, // kk + euckr, // ko + windows1254, // ku + windows1251, // ky + windows1257, // lt + windows1257, // lv + windows1251, // mk + iso8859_2, // pl + windows1251, // ru + windows1251, // sah + windows1250, // sk + iso8859_2, // sl + windows1251, // sr + windows1251, // tg + windows874, // th + windows1254, // tr + windows1251, // tt + windows1251, // uk + windows1258, // vi + gb18030, // zh-hans + big5, // zh-hant +} + +const locales = "und_Latn ar ba be bg cs el et fa he hr hu ja kk ko ku ky lt lv mk pl ru sah sk sl sr tg th tr tt uk vi zh-hans zh-hant" diff --git a/vendor/golang.org/x/text/internal/language/common.go b/vendor/golang.org/x/text/internal/language/common.go new file mode 100644 index 00000000000..cdfdb749718 --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/common.go @@ -0,0 +1,16 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package language + +// This file contains code common to the maketables.go and the package code. + +// AliasType is the type of an alias in AliasMap. +type AliasType int8 + +const ( + Deprecated AliasType = iota + Macro + Legacy + + AliasTypeUnknown AliasType = -1 +) diff --git a/vendor/golang.org/x/text/internal/language/compact.go b/vendor/golang.org/x/text/internal/language/compact.go new file mode 100644 index 00000000000..46a0015074f --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/compact.go @@ -0,0 +1,29 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package language + +// CompactCoreInfo is a compact integer with the three core tags encoded. +type CompactCoreInfo uint32 + +// GetCompactCore generates a uint32 value that is guaranteed to be unique for +// different language, region, and script values. +func GetCompactCore(t Tag) (cci CompactCoreInfo, ok bool) { + if t.LangID > langNoIndexOffset { + return 0, false + } + cci |= CompactCoreInfo(t.LangID) << (8 + 12) + cci |= CompactCoreInfo(t.ScriptID) << 12 + cci |= CompactCoreInfo(t.RegionID) + return cci, true +} + +// Tag generates a tag from c. +func (c CompactCoreInfo) Tag() Tag { + return Tag{ + LangID: Language(c >> 20), + RegionID: Region(c & 0x3ff), + ScriptID: Script(c>>12) & 0xff, + } +} diff --git a/vendor/golang.org/x/text/internal/language/compact/compact.go b/vendor/golang.org/x/text/internal/language/compact/compact.go new file mode 100644 index 00000000000..1b36935ef7b --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/compact/compact.go @@ -0,0 +1,61 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package compact defines a compact representation of language tags. +// +// Common language tags (at least all for which locale information is defined +// in CLDR) are assigned a unique index. Each Tag is associated with such an +// ID for selecting language-related resources (such as translations) as well +// as one for selecting regional defaults (currency, number formatting, etc.) +// +// It may want to export this functionality at some point, but at this point +// this is only available for use within x/text. +package compact // import "golang.org/x/text/internal/language/compact" + +import ( + "sort" + "strings" + + "golang.org/x/text/internal/language" +) + +// ID is an integer identifying a single tag. +type ID uint16 + +func getCoreIndex(t language.Tag) (id ID, ok bool) { + cci, ok := language.GetCompactCore(t) + if !ok { + return 0, false + } + i := sort.Search(len(coreTags), func(i int) bool { + return cci <= coreTags[i] + }) + if i == len(coreTags) || coreTags[i] != cci { + return 0, false + } + return ID(i), true +} + +// Parent returns the ID of the parent or the root ID if id is already the root. +func (id ID) Parent() ID { + return parents[id] +} + +// Tag converts id to an internal language Tag. +func (id ID) Tag() language.Tag { + if int(id) >= len(coreTags) { + return specialTags[int(id)-len(coreTags)] + } + return coreTags[id].Tag() +} + +var specialTags []language.Tag + +func init() { + tags := strings.Split(specialTagsStr, " ") + specialTags = make([]language.Tag, len(tags)) + for i, t := range tags { + specialTags[i] = language.MustParse(t) + } +} diff --git a/vendor/golang.org/x/text/internal/language/compact/language.go b/vendor/golang.org/x/text/internal/language/compact/language.go new file mode 100644 index 00000000000..8c1b6666fb8 --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/compact/language.go @@ -0,0 +1,260 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go gen_index.go -output tables.go +//go:generate go run gen_parents.go + +package compact + +// TODO: Remove above NOTE after: +// - verifying that tables are dropped correctly (most notably matcher tables). + +import ( + "strings" + + "golang.org/x/text/internal/language" +) + +// Tag represents a BCP 47 language tag. It is used to specify an instance of a +// specific language or locale. All language tag values are guaranteed to be +// well-formed. +type Tag struct { + // NOTE: exported tags will become part of the public API. + language ID + locale ID + full fullTag // always a language.Tag for now. +} + +const _und = 0 + +type fullTag interface { + IsRoot() bool + Parent() language.Tag +} + +// Make a compact Tag from a fully specified internal language Tag. +func Make(t language.Tag) (tag Tag) { + if region := t.TypeForKey("rg"); len(region) == 6 && region[2:] == "zzzz" { + if r, err := language.ParseRegion(region[:2]); err == nil { + tFull := t + t, _ = t.SetTypeForKey("rg", "") + // TODO: should we not consider "va" for the language tag? + var exact1, exact2 bool + tag.language, exact1 = FromTag(t) + t.RegionID = r + tag.locale, exact2 = FromTag(t) + if !exact1 || !exact2 { + tag.full = tFull + } + return tag + } + } + lang, ok := FromTag(t) + tag.language = lang + tag.locale = lang + if !ok { + tag.full = t + } + return tag +} + +// Tag returns an internal language Tag version of this tag. +func (t Tag) Tag() language.Tag { + if t.full != nil { + return t.full.(language.Tag) + } + tag := t.language.Tag() + if t.language != t.locale { + loc := t.locale.Tag() + tag, _ = tag.SetTypeForKey("rg", strings.ToLower(loc.RegionID.String())+"zzzz") + } + return tag +} + +// IsCompact reports whether this tag is fully defined in terms of ID. +func (t *Tag) IsCompact() bool { + return t.full == nil +} + +// MayHaveVariants reports whether a tag may have variants. If it returns false +// it is guaranteed the tag does not have variants. +func (t Tag) MayHaveVariants() bool { + return t.full != nil || int(t.language) >= len(coreTags) +} + +// MayHaveExtensions reports whether a tag may have extensions. If it returns +// false it is guaranteed the tag does not have them. +func (t Tag) MayHaveExtensions() bool { + return t.full != nil || + int(t.language) >= len(coreTags) || + t.language != t.locale +} + +// IsRoot returns true if t is equal to language "und". +func (t Tag) IsRoot() bool { + if t.full != nil { + return t.full.IsRoot() + } + return t.language == _und +} + +// Parent returns the CLDR parent of t. In CLDR, missing fields in data for a +// specific language are substituted with fields from the parent language. +// The parent for a language may change for newer versions of CLDR. +func (t Tag) Parent() Tag { + if t.full != nil { + return Make(t.full.Parent()) + } + if t.language != t.locale { + // Simulate stripping -u-rg-xxxxxx + return Tag{language: t.language, locale: t.language} + } + // TODO: use parent lookup table once cycle from internal package is + // removed. Probably by internalizing the table and declaring this fast + // enough. + // lang := compactID(internal.Parent(uint16(t.language))) + lang, _ := FromTag(t.language.Tag().Parent()) + return Tag{language: lang, locale: lang} +} + +// nextToken returns token t and the rest of the string. +func nextToken(s string) (t, tail string) { + p := strings.Index(s[1:], "-") + if p == -1 { + return s[1:], "" + } + p++ + return s[1:p], s[p:] +} + +// LanguageID returns an index, where 0 <= index < NumCompactTags, for tags +// for which data exists in the text repository.The index will change over time +// and should not be stored in persistent storage. If t does not match a compact +// index, exact will be false and the compact index will be returned for the +// first match after repeatedly taking the Parent of t. +func LanguageID(t Tag) (id ID, exact bool) { + return t.language, t.full == nil +} + +// RegionalID returns the ID for the regional variant of this tag. This index is +// used to indicate region-specific overrides, such as default currency, default +// calendar and week data, default time cycle, and default measurement system +// and unit preferences. +// +// For instance, the tag en-GB-u-rg-uszzzz specifies British English with US +// settings for currency, number formatting, etc. The CompactIndex for this tag +// will be that for en-GB, while the RegionalID will be the one corresponding to +// en-US. +func RegionalID(t Tag) (id ID, exact bool) { + return t.locale, t.full == nil +} + +// LanguageTag returns t stripped of regional variant indicators. +// +// At the moment this means it is stripped of a regional and variant subtag "rg" +// and "va" in the "u" extension. +func (t Tag) LanguageTag() Tag { + if t.full == nil { + return Tag{language: t.language, locale: t.language} + } + tt := t.Tag() + tt.SetTypeForKey("rg", "") + tt.SetTypeForKey("va", "") + return Make(tt) +} + +// RegionalTag returns the regional variant of the tag. +// +// At the moment this means that the region is set from the regional subtag +// "rg" in the "u" extension. +func (t Tag) RegionalTag() Tag { + rt := Tag{language: t.locale, locale: t.locale} + if t.full == nil { + return rt + } + b := language.Builder{} + tag := t.Tag() + // tag, _ = tag.SetTypeForKey("rg", "") + b.SetTag(t.locale.Tag()) + if v := tag.Variants(); v != "" { + for _, v := range strings.Split(v, "-") { + b.AddVariant(v) + } + } + for _, e := range tag.Extensions() { + b.AddExt(e) + } + return t +} + +// FromTag reports closest matching ID for an internal language Tag. +func FromTag(t language.Tag) (id ID, exact bool) { + // TODO: perhaps give more frequent tags a lower index. + // TODO: we could make the indexes stable. This will excluded some + // possibilities for optimization, so don't do this quite yet. + exact = true + + b, s, r := t.Raw() + if t.HasString() { + if t.IsPrivateUse() { + // We have no entries for user-defined tags. + return 0, false + } + hasExtra := false + if t.HasVariants() { + if t.HasExtensions() { + build := language.Builder{} + build.SetTag(language.Tag{LangID: b, ScriptID: s, RegionID: r}) + build.AddVariant(t.Variants()) + exact = false + t = build.Make() + } + hasExtra = true + } else if _, ok := t.Extension('u'); ok { + // TODO: va may mean something else. Consider not considering it. + // Strip all but the 'va' entry. + old := t + variant := t.TypeForKey("va") + t = language.Tag{LangID: b, ScriptID: s, RegionID: r} + if variant != "" { + t, _ = t.SetTypeForKey("va", variant) + hasExtra = true + } + exact = old == t + } else { + exact = false + } + if hasExtra { + // We have some variants. + for i, s := range specialTags { + if s == t { + return ID(i + len(coreTags)), exact + } + } + exact = false + } + } + if x, ok := getCoreIndex(t); ok { + return x, exact + } + exact = false + if r != 0 && s == 0 { + // Deal with cases where an extra script is inserted for the region. + t, _ := t.Maximize() + if x, ok := getCoreIndex(t); ok { + return x, exact + } + } + for t = t.Parent(); t != root; t = t.Parent() { + // No variants specified: just compare core components. + // The key has the form lllssrrr, where l, s, and r are nibbles for + // respectively the langID, scriptID, and regionID. + if x, ok := getCoreIndex(t); ok { + return x, exact + } + } + return 0, exact +} + +var root = language.Tag{} diff --git a/vendor/golang.org/x/text/internal/language/compact/parents.go b/vendor/golang.org/x/text/internal/language/compact/parents.go new file mode 100644 index 00000000000..8d810723c75 --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/compact/parents.go @@ -0,0 +1,120 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package compact + +// parents maps a compact index of a tag to the compact index of the parent of +// this tag. +var parents = []ID{ // 775 elements + // Entry 0 - 3F + 0x0000, 0x0000, 0x0001, 0x0001, 0x0000, 0x0004, 0x0000, 0x0006, + 0x0000, 0x0008, 0x0000, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, + 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, + 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, + 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x0000, + 0x0000, 0x0028, 0x0000, 0x002a, 0x0000, 0x002c, 0x0000, 0x0000, + 0x002f, 0x002e, 0x002e, 0x0000, 0x0033, 0x0000, 0x0035, 0x0000, + 0x0037, 0x0000, 0x0039, 0x0000, 0x003b, 0x0000, 0x0000, 0x003e, + // Entry 40 - 7F + 0x0000, 0x0040, 0x0040, 0x0000, 0x0043, 0x0043, 0x0000, 0x0046, + 0x0000, 0x0048, 0x0000, 0x0000, 0x004b, 0x004a, 0x004a, 0x0000, + 0x004f, 0x004f, 0x004f, 0x004f, 0x0000, 0x0054, 0x0054, 0x0000, + 0x0057, 0x0000, 0x0059, 0x0000, 0x005b, 0x0000, 0x005d, 0x005d, + 0x0000, 0x0060, 0x0000, 0x0062, 0x0000, 0x0064, 0x0000, 0x0066, + 0x0066, 0x0000, 0x0069, 0x0000, 0x006b, 0x006b, 0x006b, 0x006b, + 0x006b, 0x006b, 0x006b, 0x0000, 0x0073, 0x0000, 0x0075, 0x0000, + 0x0077, 0x0000, 0x0000, 0x007a, 0x0000, 0x007c, 0x0000, 0x007e, + // Entry 80 - BF + 0x0000, 0x0080, 0x0080, 0x0000, 0x0083, 0x0083, 0x0000, 0x0086, + 0x0087, 0x0087, 0x0087, 0x0086, 0x0088, 0x0087, 0x0087, 0x0087, + 0x0086, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0088, + 0x0087, 0x0087, 0x0087, 0x0087, 0x0088, 0x0087, 0x0088, 0x0087, + 0x0087, 0x0088, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, + 0x0087, 0x0087, 0x0087, 0x0086, 0x0087, 0x0087, 0x0087, 0x0087, + 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, + 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0086, 0x0087, 0x0086, + // Entry C0 - FF + 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, + 0x0088, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, + 0x0086, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0088, 0x0087, + 0x0087, 0x0088, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, + 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0086, 0x0086, 0x0087, + 0x0087, 0x0086, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0000, + 0x00ef, 0x0000, 0x00f1, 0x00f2, 0x00f2, 0x00f2, 0x00f2, 0x00f2, + 0x00f2, 0x00f2, 0x00f2, 0x00f2, 0x00f1, 0x00f2, 0x00f1, 0x00f1, + // Entry 100 - 13F + 0x00f2, 0x00f2, 0x00f1, 0x00f2, 0x00f2, 0x00f2, 0x00f2, 0x00f1, + 0x00f2, 0x00f2, 0x00f2, 0x00f2, 0x00f2, 0x00f2, 0x0000, 0x010e, + 0x0000, 0x0110, 0x0000, 0x0112, 0x0000, 0x0114, 0x0114, 0x0000, + 0x0117, 0x0117, 0x0117, 0x0117, 0x0000, 0x011c, 0x0000, 0x011e, + 0x0000, 0x0120, 0x0120, 0x0000, 0x0123, 0x0123, 0x0123, 0x0123, + 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, + 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, + 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, + // Entry 140 - 17F + 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, + 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, + 0x0123, 0x0123, 0x0000, 0x0152, 0x0000, 0x0154, 0x0000, 0x0156, + 0x0000, 0x0158, 0x0000, 0x015a, 0x0000, 0x015c, 0x015c, 0x015c, + 0x0000, 0x0160, 0x0000, 0x0000, 0x0163, 0x0000, 0x0165, 0x0000, + 0x0167, 0x0167, 0x0167, 0x0000, 0x016b, 0x0000, 0x016d, 0x0000, + 0x016f, 0x0000, 0x0171, 0x0171, 0x0000, 0x0174, 0x0000, 0x0176, + 0x0000, 0x0178, 0x0000, 0x017a, 0x0000, 0x017c, 0x0000, 0x017e, + // Entry 180 - 1BF + 0x0000, 0x0000, 0x0000, 0x0182, 0x0000, 0x0184, 0x0184, 0x0184, + 0x0184, 0x0000, 0x0000, 0x0000, 0x018b, 0x0000, 0x0000, 0x018e, + 0x0000, 0x0000, 0x0191, 0x0000, 0x0000, 0x0000, 0x0195, 0x0000, + 0x0197, 0x0000, 0x0000, 0x019a, 0x0000, 0x0000, 0x019d, 0x0000, + 0x019f, 0x0000, 0x01a1, 0x0000, 0x01a3, 0x0000, 0x01a5, 0x0000, + 0x01a7, 0x0000, 0x01a9, 0x0000, 0x01ab, 0x0000, 0x01ad, 0x0000, + 0x01af, 0x0000, 0x01b1, 0x01b1, 0x0000, 0x01b4, 0x0000, 0x01b6, + 0x0000, 0x01b8, 0x0000, 0x01ba, 0x0000, 0x01bc, 0x0000, 0x0000, + // Entry 1C0 - 1FF + 0x01bf, 0x0000, 0x01c1, 0x0000, 0x01c3, 0x0000, 0x01c5, 0x0000, + 0x01c7, 0x0000, 0x01c9, 0x0000, 0x01cb, 0x01cb, 0x01cb, 0x01cb, + 0x0000, 0x01d0, 0x0000, 0x01d2, 0x01d2, 0x0000, 0x01d5, 0x0000, + 0x01d7, 0x0000, 0x01d9, 0x0000, 0x01db, 0x0000, 0x01dd, 0x0000, + 0x01df, 0x01df, 0x0000, 0x01e2, 0x0000, 0x01e4, 0x0000, 0x01e6, + 0x0000, 0x01e8, 0x0000, 0x01ea, 0x0000, 0x01ec, 0x0000, 0x01ee, + 0x0000, 0x01f0, 0x0000, 0x0000, 0x01f3, 0x0000, 0x01f5, 0x01f5, + 0x01f5, 0x0000, 0x01f9, 0x0000, 0x01fb, 0x0000, 0x01fd, 0x0000, + // Entry 200 - 23F + 0x01ff, 0x0000, 0x0000, 0x0202, 0x0000, 0x0204, 0x0204, 0x0000, + 0x0207, 0x0000, 0x0209, 0x0209, 0x0000, 0x020c, 0x020c, 0x0000, + 0x020f, 0x020f, 0x020f, 0x020f, 0x020f, 0x020f, 0x020f, 0x0000, + 0x0217, 0x0000, 0x0219, 0x0000, 0x021b, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0221, 0x0000, 0x0000, 0x0224, 0x0000, 0x0226, + 0x0226, 0x0000, 0x0229, 0x0000, 0x022b, 0x022b, 0x0000, 0x0000, + 0x022f, 0x022e, 0x022e, 0x0000, 0x0000, 0x0234, 0x0000, 0x0236, + 0x0000, 0x0238, 0x0000, 0x0244, 0x023a, 0x0244, 0x0244, 0x0244, + // Entry 240 - 27F + 0x0244, 0x0244, 0x0244, 0x0244, 0x023a, 0x0244, 0x0244, 0x0000, + 0x0247, 0x0247, 0x0247, 0x0000, 0x024b, 0x0000, 0x024d, 0x0000, + 0x024f, 0x024f, 0x0000, 0x0252, 0x0000, 0x0254, 0x0254, 0x0254, + 0x0254, 0x0254, 0x0254, 0x0000, 0x025b, 0x0000, 0x025d, 0x0000, + 0x025f, 0x0000, 0x0261, 0x0000, 0x0263, 0x0000, 0x0265, 0x0000, + 0x0000, 0x0268, 0x0268, 0x0268, 0x0000, 0x026c, 0x0000, 0x026e, + 0x0000, 0x0270, 0x0000, 0x0000, 0x0000, 0x0274, 0x0273, 0x0273, + 0x0000, 0x0278, 0x0000, 0x027a, 0x0000, 0x027c, 0x0000, 0x0000, + // Entry 280 - 2BF + 0x0000, 0x0000, 0x0281, 0x0000, 0x0000, 0x0284, 0x0000, 0x0286, + 0x0286, 0x0286, 0x0286, 0x0000, 0x028b, 0x028b, 0x028b, 0x0000, + 0x028f, 0x028f, 0x028f, 0x028f, 0x028f, 0x0000, 0x0295, 0x0295, + 0x0295, 0x0295, 0x0000, 0x0000, 0x0000, 0x0000, 0x029d, 0x029d, + 0x029d, 0x0000, 0x02a1, 0x02a1, 0x02a1, 0x02a1, 0x0000, 0x0000, + 0x02a7, 0x02a7, 0x02a7, 0x02a7, 0x0000, 0x02ac, 0x0000, 0x02ae, + 0x02ae, 0x0000, 0x02b1, 0x0000, 0x02b3, 0x0000, 0x02b5, 0x02b5, + 0x0000, 0x0000, 0x02b9, 0x0000, 0x0000, 0x0000, 0x02bd, 0x0000, + // Entry 2C0 - 2FF + 0x02bf, 0x02bf, 0x0000, 0x0000, 0x02c3, 0x0000, 0x02c5, 0x0000, + 0x02c7, 0x0000, 0x02c9, 0x0000, 0x02cb, 0x0000, 0x02cd, 0x02cd, + 0x0000, 0x0000, 0x02d1, 0x0000, 0x02d3, 0x02d0, 0x02d0, 0x0000, + 0x0000, 0x02d8, 0x02d7, 0x02d7, 0x0000, 0x0000, 0x02dd, 0x0000, + 0x02df, 0x0000, 0x02e1, 0x0000, 0x0000, 0x02e4, 0x0000, 0x02e6, + 0x0000, 0x0000, 0x02e9, 0x0000, 0x02eb, 0x0000, 0x02ed, 0x0000, + 0x02ef, 0x02ef, 0x0000, 0x0000, 0x02f3, 0x02f2, 0x02f2, 0x0000, + 0x02f7, 0x0000, 0x02f9, 0x02f9, 0x02f9, 0x02f9, 0x02f9, 0x0000, + // Entry 300 - 33F + 0x02ff, 0x0300, 0x02ff, 0x0000, 0x0303, 0x0051, 0x00e6, +} // Size: 1574 bytes + +// Total table size 1574 bytes (1KiB); checksum: 895AAF0B diff --git a/vendor/golang.org/x/text/internal/language/compact/tables.go b/vendor/golang.org/x/text/internal/language/compact/tables.go new file mode 100644 index 00000000000..a09ed198a5d --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/compact/tables.go @@ -0,0 +1,1015 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package compact + +import "golang.org/x/text/internal/language" + +// CLDRVersion is the CLDR version from which the tables in this package are derived. +const CLDRVersion = "32" + +// NumCompactTags is the number of common tags. The maximum tag is +// NumCompactTags-1. +const NumCompactTags = 775 +const ( + undIndex ID = 0 + afIndex ID = 1 + afNAIndex ID = 2 + afZAIndex ID = 3 + agqIndex ID = 4 + agqCMIndex ID = 5 + akIndex ID = 6 + akGHIndex ID = 7 + amIndex ID = 8 + amETIndex ID = 9 + arIndex ID = 10 + ar001Index ID = 11 + arAEIndex ID = 12 + arBHIndex ID = 13 + arDJIndex ID = 14 + arDZIndex ID = 15 + arEGIndex ID = 16 + arEHIndex ID = 17 + arERIndex ID = 18 + arILIndex ID = 19 + arIQIndex ID = 20 + arJOIndex ID = 21 + arKMIndex ID = 22 + arKWIndex ID = 23 + arLBIndex ID = 24 + arLYIndex ID = 25 + arMAIndex ID = 26 + arMRIndex ID = 27 + arOMIndex ID = 28 + arPSIndex ID = 29 + arQAIndex ID = 30 + arSAIndex ID = 31 + arSDIndex ID = 32 + arSOIndex ID = 33 + arSSIndex ID = 34 + arSYIndex ID = 35 + arTDIndex ID = 36 + arTNIndex ID = 37 + arYEIndex ID = 38 + arsIndex ID = 39 + asIndex ID = 40 + asINIndex ID = 41 + asaIndex ID = 42 + asaTZIndex ID = 43 + astIndex ID = 44 + astESIndex ID = 45 + azIndex ID = 46 + azCyrlIndex ID = 47 + azCyrlAZIndex ID = 48 + azLatnIndex ID = 49 + azLatnAZIndex ID = 50 + basIndex ID = 51 + basCMIndex ID = 52 + beIndex ID = 53 + beBYIndex ID = 54 + bemIndex ID = 55 + bemZMIndex ID = 56 + bezIndex ID = 57 + bezTZIndex ID = 58 + bgIndex ID = 59 + bgBGIndex ID = 60 + bhIndex ID = 61 + bmIndex ID = 62 + bmMLIndex ID = 63 + bnIndex ID = 64 + bnBDIndex ID = 65 + bnINIndex ID = 66 + boIndex ID = 67 + boCNIndex ID = 68 + boINIndex ID = 69 + brIndex ID = 70 + brFRIndex ID = 71 + brxIndex ID = 72 + brxINIndex ID = 73 + bsIndex ID = 74 + bsCyrlIndex ID = 75 + bsCyrlBAIndex ID = 76 + bsLatnIndex ID = 77 + bsLatnBAIndex ID = 78 + caIndex ID = 79 + caADIndex ID = 80 + caESIndex ID = 81 + caFRIndex ID = 82 + caITIndex ID = 83 + ccpIndex ID = 84 + ccpBDIndex ID = 85 + ccpINIndex ID = 86 + ceIndex ID = 87 + ceRUIndex ID = 88 + cggIndex ID = 89 + cggUGIndex ID = 90 + chrIndex ID = 91 + chrUSIndex ID = 92 + ckbIndex ID = 93 + ckbIQIndex ID = 94 + ckbIRIndex ID = 95 + csIndex ID = 96 + csCZIndex ID = 97 + cuIndex ID = 98 + cuRUIndex ID = 99 + cyIndex ID = 100 + cyGBIndex ID = 101 + daIndex ID = 102 + daDKIndex ID = 103 + daGLIndex ID = 104 + davIndex ID = 105 + davKEIndex ID = 106 + deIndex ID = 107 + deATIndex ID = 108 + deBEIndex ID = 109 + deCHIndex ID = 110 + deDEIndex ID = 111 + deITIndex ID = 112 + deLIIndex ID = 113 + deLUIndex ID = 114 + djeIndex ID = 115 + djeNEIndex ID = 116 + dsbIndex ID = 117 + dsbDEIndex ID = 118 + duaIndex ID = 119 + duaCMIndex ID = 120 + dvIndex ID = 121 + dyoIndex ID = 122 + dyoSNIndex ID = 123 + dzIndex ID = 124 + dzBTIndex ID = 125 + ebuIndex ID = 126 + ebuKEIndex ID = 127 + eeIndex ID = 128 + eeGHIndex ID = 129 + eeTGIndex ID = 130 + elIndex ID = 131 + elCYIndex ID = 132 + elGRIndex ID = 133 + enIndex ID = 134 + en001Index ID = 135 + en150Index ID = 136 + enAGIndex ID = 137 + enAIIndex ID = 138 + enASIndex ID = 139 + enATIndex ID = 140 + enAUIndex ID = 141 + enBBIndex ID = 142 + enBEIndex ID = 143 + enBIIndex ID = 144 + enBMIndex ID = 145 + enBSIndex ID = 146 + enBWIndex ID = 147 + enBZIndex ID = 148 + enCAIndex ID = 149 + enCCIndex ID = 150 + enCHIndex ID = 151 + enCKIndex ID = 152 + enCMIndex ID = 153 + enCXIndex ID = 154 + enCYIndex ID = 155 + enDEIndex ID = 156 + enDGIndex ID = 157 + enDKIndex ID = 158 + enDMIndex ID = 159 + enERIndex ID = 160 + enFIIndex ID = 161 + enFJIndex ID = 162 + enFKIndex ID = 163 + enFMIndex ID = 164 + enGBIndex ID = 165 + enGDIndex ID = 166 + enGGIndex ID = 167 + enGHIndex ID = 168 + enGIIndex ID = 169 + enGMIndex ID = 170 + enGUIndex ID = 171 + enGYIndex ID = 172 + enHKIndex ID = 173 + enIEIndex ID = 174 + enILIndex ID = 175 + enIMIndex ID = 176 + enINIndex ID = 177 + enIOIndex ID = 178 + enJEIndex ID = 179 + enJMIndex ID = 180 + enKEIndex ID = 181 + enKIIndex ID = 182 + enKNIndex ID = 183 + enKYIndex ID = 184 + enLCIndex ID = 185 + enLRIndex ID = 186 + enLSIndex ID = 187 + enMGIndex ID = 188 + enMHIndex ID = 189 + enMOIndex ID = 190 + enMPIndex ID = 191 + enMSIndex ID = 192 + enMTIndex ID = 193 + enMUIndex ID = 194 + enMWIndex ID = 195 + enMYIndex ID = 196 + enNAIndex ID = 197 + enNFIndex ID = 198 + enNGIndex ID = 199 + enNLIndex ID = 200 + enNRIndex ID = 201 + enNUIndex ID = 202 + enNZIndex ID = 203 + enPGIndex ID = 204 + enPHIndex ID = 205 + enPKIndex ID = 206 + enPNIndex ID = 207 + enPRIndex ID = 208 + enPWIndex ID = 209 + enRWIndex ID = 210 + enSBIndex ID = 211 + enSCIndex ID = 212 + enSDIndex ID = 213 + enSEIndex ID = 214 + enSGIndex ID = 215 + enSHIndex ID = 216 + enSIIndex ID = 217 + enSLIndex ID = 218 + enSSIndex ID = 219 + enSXIndex ID = 220 + enSZIndex ID = 221 + enTCIndex ID = 222 + enTKIndex ID = 223 + enTOIndex ID = 224 + enTTIndex ID = 225 + enTVIndex ID = 226 + enTZIndex ID = 227 + enUGIndex ID = 228 + enUMIndex ID = 229 + enUSIndex ID = 230 + enVCIndex ID = 231 + enVGIndex ID = 232 + enVIIndex ID = 233 + enVUIndex ID = 234 + enWSIndex ID = 235 + enZAIndex ID = 236 + enZMIndex ID = 237 + enZWIndex ID = 238 + eoIndex ID = 239 + eo001Index ID = 240 + esIndex ID = 241 + es419Index ID = 242 + esARIndex ID = 243 + esBOIndex ID = 244 + esBRIndex ID = 245 + esBZIndex ID = 246 + esCLIndex ID = 247 + esCOIndex ID = 248 + esCRIndex ID = 249 + esCUIndex ID = 250 + esDOIndex ID = 251 + esEAIndex ID = 252 + esECIndex ID = 253 + esESIndex ID = 254 + esGQIndex ID = 255 + esGTIndex ID = 256 + esHNIndex ID = 257 + esICIndex ID = 258 + esMXIndex ID = 259 + esNIIndex ID = 260 + esPAIndex ID = 261 + esPEIndex ID = 262 + esPHIndex ID = 263 + esPRIndex ID = 264 + esPYIndex ID = 265 + esSVIndex ID = 266 + esUSIndex ID = 267 + esUYIndex ID = 268 + esVEIndex ID = 269 + etIndex ID = 270 + etEEIndex ID = 271 + euIndex ID = 272 + euESIndex ID = 273 + ewoIndex ID = 274 + ewoCMIndex ID = 275 + faIndex ID = 276 + faAFIndex ID = 277 + faIRIndex ID = 278 + ffIndex ID = 279 + ffCMIndex ID = 280 + ffGNIndex ID = 281 + ffMRIndex ID = 282 + ffSNIndex ID = 283 + fiIndex ID = 284 + fiFIIndex ID = 285 + filIndex ID = 286 + filPHIndex ID = 287 + foIndex ID = 288 + foDKIndex ID = 289 + foFOIndex ID = 290 + frIndex ID = 291 + frBEIndex ID = 292 + frBFIndex ID = 293 + frBIIndex ID = 294 + frBJIndex ID = 295 + frBLIndex ID = 296 + frCAIndex ID = 297 + frCDIndex ID = 298 + frCFIndex ID = 299 + frCGIndex ID = 300 + frCHIndex ID = 301 + frCIIndex ID = 302 + frCMIndex ID = 303 + frDJIndex ID = 304 + frDZIndex ID = 305 + frFRIndex ID = 306 + frGAIndex ID = 307 + frGFIndex ID = 308 + frGNIndex ID = 309 + frGPIndex ID = 310 + frGQIndex ID = 311 + frHTIndex ID = 312 + frKMIndex ID = 313 + frLUIndex ID = 314 + frMAIndex ID = 315 + frMCIndex ID = 316 + frMFIndex ID = 317 + frMGIndex ID = 318 + frMLIndex ID = 319 + frMQIndex ID = 320 + frMRIndex ID = 321 + frMUIndex ID = 322 + frNCIndex ID = 323 + frNEIndex ID = 324 + frPFIndex ID = 325 + frPMIndex ID = 326 + frREIndex ID = 327 + frRWIndex ID = 328 + frSCIndex ID = 329 + frSNIndex ID = 330 + frSYIndex ID = 331 + frTDIndex ID = 332 + frTGIndex ID = 333 + frTNIndex ID = 334 + frVUIndex ID = 335 + frWFIndex ID = 336 + frYTIndex ID = 337 + furIndex ID = 338 + furITIndex ID = 339 + fyIndex ID = 340 + fyNLIndex ID = 341 + gaIndex ID = 342 + gaIEIndex ID = 343 + gdIndex ID = 344 + gdGBIndex ID = 345 + glIndex ID = 346 + glESIndex ID = 347 + gswIndex ID = 348 + gswCHIndex ID = 349 + gswFRIndex ID = 350 + gswLIIndex ID = 351 + guIndex ID = 352 + guINIndex ID = 353 + guwIndex ID = 354 + guzIndex ID = 355 + guzKEIndex ID = 356 + gvIndex ID = 357 + gvIMIndex ID = 358 + haIndex ID = 359 + haGHIndex ID = 360 + haNEIndex ID = 361 + haNGIndex ID = 362 + hawIndex ID = 363 + hawUSIndex ID = 364 + heIndex ID = 365 + heILIndex ID = 366 + hiIndex ID = 367 + hiINIndex ID = 368 + hrIndex ID = 369 + hrBAIndex ID = 370 + hrHRIndex ID = 371 + hsbIndex ID = 372 + hsbDEIndex ID = 373 + huIndex ID = 374 + huHUIndex ID = 375 + hyIndex ID = 376 + hyAMIndex ID = 377 + idIndex ID = 378 + idIDIndex ID = 379 + igIndex ID = 380 + igNGIndex ID = 381 + iiIndex ID = 382 + iiCNIndex ID = 383 + inIndex ID = 384 + ioIndex ID = 385 + isIndex ID = 386 + isISIndex ID = 387 + itIndex ID = 388 + itCHIndex ID = 389 + itITIndex ID = 390 + itSMIndex ID = 391 + itVAIndex ID = 392 + iuIndex ID = 393 + iwIndex ID = 394 + jaIndex ID = 395 + jaJPIndex ID = 396 + jboIndex ID = 397 + jgoIndex ID = 398 + jgoCMIndex ID = 399 + jiIndex ID = 400 + jmcIndex ID = 401 + jmcTZIndex ID = 402 + jvIndex ID = 403 + jwIndex ID = 404 + kaIndex ID = 405 + kaGEIndex ID = 406 + kabIndex ID = 407 + kabDZIndex ID = 408 + kajIndex ID = 409 + kamIndex ID = 410 + kamKEIndex ID = 411 + kcgIndex ID = 412 + kdeIndex ID = 413 + kdeTZIndex ID = 414 + keaIndex ID = 415 + keaCVIndex ID = 416 + khqIndex ID = 417 + khqMLIndex ID = 418 + kiIndex ID = 419 + kiKEIndex ID = 420 + kkIndex ID = 421 + kkKZIndex ID = 422 + kkjIndex ID = 423 + kkjCMIndex ID = 424 + klIndex ID = 425 + klGLIndex ID = 426 + klnIndex ID = 427 + klnKEIndex ID = 428 + kmIndex ID = 429 + kmKHIndex ID = 430 + knIndex ID = 431 + knINIndex ID = 432 + koIndex ID = 433 + koKPIndex ID = 434 + koKRIndex ID = 435 + kokIndex ID = 436 + kokINIndex ID = 437 + ksIndex ID = 438 + ksINIndex ID = 439 + ksbIndex ID = 440 + ksbTZIndex ID = 441 + ksfIndex ID = 442 + ksfCMIndex ID = 443 + kshIndex ID = 444 + kshDEIndex ID = 445 + kuIndex ID = 446 + kwIndex ID = 447 + kwGBIndex ID = 448 + kyIndex ID = 449 + kyKGIndex ID = 450 + lagIndex ID = 451 + lagTZIndex ID = 452 + lbIndex ID = 453 + lbLUIndex ID = 454 + lgIndex ID = 455 + lgUGIndex ID = 456 + lktIndex ID = 457 + lktUSIndex ID = 458 + lnIndex ID = 459 + lnAOIndex ID = 460 + lnCDIndex ID = 461 + lnCFIndex ID = 462 + lnCGIndex ID = 463 + loIndex ID = 464 + loLAIndex ID = 465 + lrcIndex ID = 466 + lrcIQIndex ID = 467 + lrcIRIndex ID = 468 + ltIndex ID = 469 + ltLTIndex ID = 470 + luIndex ID = 471 + luCDIndex ID = 472 + luoIndex ID = 473 + luoKEIndex ID = 474 + luyIndex ID = 475 + luyKEIndex ID = 476 + lvIndex ID = 477 + lvLVIndex ID = 478 + masIndex ID = 479 + masKEIndex ID = 480 + masTZIndex ID = 481 + merIndex ID = 482 + merKEIndex ID = 483 + mfeIndex ID = 484 + mfeMUIndex ID = 485 + mgIndex ID = 486 + mgMGIndex ID = 487 + mghIndex ID = 488 + mghMZIndex ID = 489 + mgoIndex ID = 490 + mgoCMIndex ID = 491 + mkIndex ID = 492 + mkMKIndex ID = 493 + mlIndex ID = 494 + mlINIndex ID = 495 + mnIndex ID = 496 + mnMNIndex ID = 497 + moIndex ID = 498 + mrIndex ID = 499 + mrINIndex ID = 500 + msIndex ID = 501 + msBNIndex ID = 502 + msMYIndex ID = 503 + msSGIndex ID = 504 + mtIndex ID = 505 + mtMTIndex ID = 506 + muaIndex ID = 507 + muaCMIndex ID = 508 + myIndex ID = 509 + myMMIndex ID = 510 + mznIndex ID = 511 + mznIRIndex ID = 512 + nahIndex ID = 513 + naqIndex ID = 514 + naqNAIndex ID = 515 + nbIndex ID = 516 + nbNOIndex ID = 517 + nbSJIndex ID = 518 + ndIndex ID = 519 + ndZWIndex ID = 520 + ndsIndex ID = 521 + ndsDEIndex ID = 522 + ndsNLIndex ID = 523 + neIndex ID = 524 + neINIndex ID = 525 + neNPIndex ID = 526 + nlIndex ID = 527 + nlAWIndex ID = 528 + nlBEIndex ID = 529 + nlBQIndex ID = 530 + nlCWIndex ID = 531 + nlNLIndex ID = 532 + nlSRIndex ID = 533 + nlSXIndex ID = 534 + nmgIndex ID = 535 + nmgCMIndex ID = 536 + nnIndex ID = 537 + nnNOIndex ID = 538 + nnhIndex ID = 539 + nnhCMIndex ID = 540 + noIndex ID = 541 + nqoIndex ID = 542 + nrIndex ID = 543 + nsoIndex ID = 544 + nusIndex ID = 545 + nusSSIndex ID = 546 + nyIndex ID = 547 + nynIndex ID = 548 + nynUGIndex ID = 549 + omIndex ID = 550 + omETIndex ID = 551 + omKEIndex ID = 552 + orIndex ID = 553 + orINIndex ID = 554 + osIndex ID = 555 + osGEIndex ID = 556 + osRUIndex ID = 557 + paIndex ID = 558 + paArabIndex ID = 559 + paArabPKIndex ID = 560 + paGuruIndex ID = 561 + paGuruINIndex ID = 562 + papIndex ID = 563 + plIndex ID = 564 + plPLIndex ID = 565 + prgIndex ID = 566 + prg001Index ID = 567 + psIndex ID = 568 + psAFIndex ID = 569 + ptIndex ID = 570 + ptAOIndex ID = 571 + ptBRIndex ID = 572 + ptCHIndex ID = 573 + ptCVIndex ID = 574 + ptGQIndex ID = 575 + ptGWIndex ID = 576 + ptLUIndex ID = 577 + ptMOIndex ID = 578 + ptMZIndex ID = 579 + ptPTIndex ID = 580 + ptSTIndex ID = 581 + ptTLIndex ID = 582 + quIndex ID = 583 + quBOIndex ID = 584 + quECIndex ID = 585 + quPEIndex ID = 586 + rmIndex ID = 587 + rmCHIndex ID = 588 + rnIndex ID = 589 + rnBIIndex ID = 590 + roIndex ID = 591 + roMDIndex ID = 592 + roROIndex ID = 593 + rofIndex ID = 594 + rofTZIndex ID = 595 + ruIndex ID = 596 + ruBYIndex ID = 597 + ruKGIndex ID = 598 + ruKZIndex ID = 599 + ruMDIndex ID = 600 + ruRUIndex ID = 601 + ruUAIndex ID = 602 + rwIndex ID = 603 + rwRWIndex ID = 604 + rwkIndex ID = 605 + rwkTZIndex ID = 606 + sahIndex ID = 607 + sahRUIndex ID = 608 + saqIndex ID = 609 + saqKEIndex ID = 610 + sbpIndex ID = 611 + sbpTZIndex ID = 612 + sdIndex ID = 613 + sdPKIndex ID = 614 + sdhIndex ID = 615 + seIndex ID = 616 + seFIIndex ID = 617 + seNOIndex ID = 618 + seSEIndex ID = 619 + sehIndex ID = 620 + sehMZIndex ID = 621 + sesIndex ID = 622 + sesMLIndex ID = 623 + sgIndex ID = 624 + sgCFIndex ID = 625 + shIndex ID = 626 + shiIndex ID = 627 + shiLatnIndex ID = 628 + shiLatnMAIndex ID = 629 + shiTfngIndex ID = 630 + shiTfngMAIndex ID = 631 + siIndex ID = 632 + siLKIndex ID = 633 + skIndex ID = 634 + skSKIndex ID = 635 + slIndex ID = 636 + slSIIndex ID = 637 + smaIndex ID = 638 + smiIndex ID = 639 + smjIndex ID = 640 + smnIndex ID = 641 + smnFIIndex ID = 642 + smsIndex ID = 643 + snIndex ID = 644 + snZWIndex ID = 645 + soIndex ID = 646 + soDJIndex ID = 647 + soETIndex ID = 648 + soKEIndex ID = 649 + soSOIndex ID = 650 + sqIndex ID = 651 + sqALIndex ID = 652 + sqMKIndex ID = 653 + sqXKIndex ID = 654 + srIndex ID = 655 + srCyrlIndex ID = 656 + srCyrlBAIndex ID = 657 + srCyrlMEIndex ID = 658 + srCyrlRSIndex ID = 659 + srCyrlXKIndex ID = 660 + srLatnIndex ID = 661 + srLatnBAIndex ID = 662 + srLatnMEIndex ID = 663 + srLatnRSIndex ID = 664 + srLatnXKIndex ID = 665 + ssIndex ID = 666 + ssyIndex ID = 667 + stIndex ID = 668 + svIndex ID = 669 + svAXIndex ID = 670 + svFIIndex ID = 671 + svSEIndex ID = 672 + swIndex ID = 673 + swCDIndex ID = 674 + swKEIndex ID = 675 + swTZIndex ID = 676 + swUGIndex ID = 677 + syrIndex ID = 678 + taIndex ID = 679 + taINIndex ID = 680 + taLKIndex ID = 681 + taMYIndex ID = 682 + taSGIndex ID = 683 + teIndex ID = 684 + teINIndex ID = 685 + teoIndex ID = 686 + teoKEIndex ID = 687 + teoUGIndex ID = 688 + tgIndex ID = 689 + tgTJIndex ID = 690 + thIndex ID = 691 + thTHIndex ID = 692 + tiIndex ID = 693 + tiERIndex ID = 694 + tiETIndex ID = 695 + tigIndex ID = 696 + tkIndex ID = 697 + tkTMIndex ID = 698 + tlIndex ID = 699 + tnIndex ID = 700 + toIndex ID = 701 + toTOIndex ID = 702 + trIndex ID = 703 + trCYIndex ID = 704 + trTRIndex ID = 705 + tsIndex ID = 706 + ttIndex ID = 707 + ttRUIndex ID = 708 + twqIndex ID = 709 + twqNEIndex ID = 710 + tzmIndex ID = 711 + tzmMAIndex ID = 712 + ugIndex ID = 713 + ugCNIndex ID = 714 + ukIndex ID = 715 + ukUAIndex ID = 716 + urIndex ID = 717 + urINIndex ID = 718 + urPKIndex ID = 719 + uzIndex ID = 720 + uzArabIndex ID = 721 + uzArabAFIndex ID = 722 + uzCyrlIndex ID = 723 + uzCyrlUZIndex ID = 724 + uzLatnIndex ID = 725 + uzLatnUZIndex ID = 726 + vaiIndex ID = 727 + vaiLatnIndex ID = 728 + vaiLatnLRIndex ID = 729 + vaiVaiiIndex ID = 730 + vaiVaiiLRIndex ID = 731 + veIndex ID = 732 + viIndex ID = 733 + viVNIndex ID = 734 + voIndex ID = 735 + vo001Index ID = 736 + vunIndex ID = 737 + vunTZIndex ID = 738 + waIndex ID = 739 + waeIndex ID = 740 + waeCHIndex ID = 741 + woIndex ID = 742 + woSNIndex ID = 743 + xhIndex ID = 744 + xogIndex ID = 745 + xogUGIndex ID = 746 + yavIndex ID = 747 + yavCMIndex ID = 748 + yiIndex ID = 749 + yi001Index ID = 750 + yoIndex ID = 751 + yoBJIndex ID = 752 + yoNGIndex ID = 753 + yueIndex ID = 754 + yueHansIndex ID = 755 + yueHansCNIndex ID = 756 + yueHantIndex ID = 757 + yueHantHKIndex ID = 758 + zghIndex ID = 759 + zghMAIndex ID = 760 + zhIndex ID = 761 + zhHansIndex ID = 762 + zhHansCNIndex ID = 763 + zhHansHKIndex ID = 764 + zhHansMOIndex ID = 765 + zhHansSGIndex ID = 766 + zhHantIndex ID = 767 + zhHantHKIndex ID = 768 + zhHantMOIndex ID = 769 + zhHantTWIndex ID = 770 + zuIndex ID = 771 + zuZAIndex ID = 772 + caESvalenciaIndex ID = 773 + enUSuvaposixIndex ID = 774 +) + +var coreTags = []language.CompactCoreInfo{ // 773 elements + // Entry 0 - 1F + 0x00000000, 0x01600000, 0x016000d3, 0x01600162, + 0x01c00000, 0x01c00052, 0x02100000, 0x02100081, + 0x02700000, 0x02700070, 0x03a00000, 0x03a00001, + 0x03a00023, 0x03a00039, 0x03a00063, 0x03a00068, + 0x03a0006c, 0x03a0006d, 0x03a0006e, 0x03a00098, + 0x03a0009c, 0x03a000a2, 0x03a000a9, 0x03a000ad, + 0x03a000b1, 0x03a000ba, 0x03a000bb, 0x03a000ca, + 0x03a000e2, 0x03a000ee, 0x03a000f4, 0x03a00109, + // Entry 20 - 3F + 0x03a0010c, 0x03a00116, 0x03a00118, 0x03a0011d, + 0x03a00121, 0x03a00129, 0x03a0015f, 0x04000000, + 0x04300000, 0x0430009a, 0x04400000, 0x04400130, + 0x04800000, 0x0480006f, 0x05800000, 0x05820000, + 0x05820032, 0x0585b000, 0x0585b032, 0x05e00000, + 0x05e00052, 0x07100000, 0x07100047, 0x07500000, + 0x07500163, 0x07900000, 0x07900130, 0x07e00000, + 0x07e00038, 0x08200000, 0x0a000000, 0x0a0000c4, + // Entry 40 - 5F + 0x0a500000, 0x0a500035, 0x0a50009a, 0x0a900000, + 0x0a900053, 0x0a90009a, 0x0b200000, 0x0b200079, + 0x0b500000, 0x0b50009a, 0x0b700000, 0x0b720000, + 0x0b720033, 0x0b75b000, 0x0b75b033, 0x0d700000, + 0x0d700022, 0x0d70006f, 0x0d700079, 0x0d70009f, + 0x0db00000, 0x0db00035, 0x0db0009a, 0x0dc00000, + 0x0dc00107, 0x0df00000, 0x0df00132, 0x0e500000, + 0x0e500136, 0x0e900000, 0x0e90009c, 0x0e90009d, + // Entry 60 - 7F + 0x0fa00000, 0x0fa0005f, 0x0fe00000, 0x0fe00107, + 0x10000000, 0x1000007c, 0x10100000, 0x10100064, + 0x10100083, 0x10800000, 0x108000a5, 0x10d00000, + 0x10d0002e, 0x10d00036, 0x10d0004e, 0x10d00061, + 0x10d0009f, 0x10d000b3, 0x10d000b8, 0x11700000, + 0x117000d5, 0x11f00000, 0x11f00061, 0x12400000, + 0x12400052, 0x12800000, 0x12b00000, 0x12b00115, + 0x12d00000, 0x12d00043, 0x12f00000, 0x12f000a5, + // Entry 80 - 9F + 0x13000000, 0x13000081, 0x13000123, 0x13600000, + 0x1360005e, 0x13600088, 0x13900000, 0x13900001, + 0x1390001a, 0x13900025, 0x13900026, 0x1390002d, + 0x1390002e, 0x1390002f, 0x13900034, 0x13900036, + 0x1390003a, 0x1390003d, 0x13900042, 0x13900046, + 0x13900048, 0x13900049, 0x1390004a, 0x1390004e, + 0x13900050, 0x13900052, 0x1390005d, 0x1390005e, + 0x13900061, 0x13900062, 0x13900064, 0x13900065, + // Entry A0 - BF + 0x1390006e, 0x13900073, 0x13900074, 0x13900075, + 0x13900076, 0x1390007c, 0x1390007d, 0x13900080, + 0x13900081, 0x13900082, 0x13900084, 0x1390008b, + 0x1390008d, 0x1390008e, 0x13900097, 0x13900098, + 0x13900099, 0x1390009a, 0x1390009b, 0x139000a0, + 0x139000a1, 0x139000a5, 0x139000a8, 0x139000aa, + 0x139000ae, 0x139000b2, 0x139000b5, 0x139000b6, + 0x139000c0, 0x139000c1, 0x139000c7, 0x139000c8, + // Entry C0 - DF + 0x139000cb, 0x139000cc, 0x139000cd, 0x139000cf, + 0x139000d1, 0x139000d3, 0x139000d6, 0x139000d7, + 0x139000da, 0x139000de, 0x139000e0, 0x139000e1, + 0x139000e7, 0x139000e8, 0x139000e9, 0x139000ec, + 0x139000ed, 0x139000f1, 0x13900108, 0x1390010a, + 0x1390010b, 0x1390010c, 0x1390010d, 0x1390010e, + 0x1390010f, 0x13900110, 0x13900113, 0x13900118, + 0x1390011c, 0x1390011e, 0x13900120, 0x13900126, + // Entry E0 - FF + 0x1390012a, 0x1390012d, 0x1390012e, 0x13900130, + 0x13900132, 0x13900134, 0x13900136, 0x1390013a, + 0x1390013d, 0x1390013e, 0x13900140, 0x13900143, + 0x13900162, 0x13900163, 0x13900165, 0x13c00000, + 0x13c00001, 0x13e00000, 0x13e0001f, 0x13e0002c, + 0x13e0003f, 0x13e00041, 0x13e00048, 0x13e00051, + 0x13e00054, 0x13e00057, 0x13e0005a, 0x13e00066, + 0x13e00069, 0x13e0006a, 0x13e0006f, 0x13e00087, + // Entry 100 - 11F + 0x13e0008a, 0x13e00090, 0x13e00095, 0x13e000d0, + 0x13e000d9, 0x13e000e3, 0x13e000e5, 0x13e000e8, + 0x13e000ed, 0x13e000f2, 0x13e0011b, 0x13e00136, + 0x13e00137, 0x13e0013c, 0x14000000, 0x1400006b, + 0x14500000, 0x1450006f, 0x14600000, 0x14600052, + 0x14800000, 0x14800024, 0x1480009d, 0x14e00000, + 0x14e00052, 0x14e00085, 0x14e000ca, 0x14e00115, + 0x15100000, 0x15100073, 0x15300000, 0x153000e8, + // Entry 120 - 13F + 0x15800000, 0x15800064, 0x15800077, 0x15e00000, + 0x15e00036, 0x15e00037, 0x15e0003a, 0x15e0003b, + 0x15e0003c, 0x15e00049, 0x15e0004b, 0x15e0004c, + 0x15e0004d, 0x15e0004e, 0x15e0004f, 0x15e00052, + 0x15e00063, 0x15e00068, 0x15e00079, 0x15e0007b, + 0x15e0007f, 0x15e00085, 0x15e00086, 0x15e00087, + 0x15e00092, 0x15e000a9, 0x15e000b8, 0x15e000bb, + 0x15e000bc, 0x15e000bf, 0x15e000c0, 0x15e000c4, + // Entry 140 - 15F + 0x15e000c9, 0x15e000ca, 0x15e000cd, 0x15e000d4, + 0x15e000d5, 0x15e000e6, 0x15e000eb, 0x15e00103, + 0x15e00108, 0x15e0010b, 0x15e00115, 0x15e0011d, + 0x15e00121, 0x15e00123, 0x15e00129, 0x15e00140, + 0x15e00141, 0x15e00160, 0x16900000, 0x1690009f, + 0x16d00000, 0x16d000da, 0x16e00000, 0x16e00097, + 0x17e00000, 0x17e0007c, 0x19000000, 0x1900006f, + 0x1a300000, 0x1a30004e, 0x1a300079, 0x1a3000b3, + // Entry 160 - 17F + 0x1a400000, 0x1a40009a, 0x1a900000, 0x1ab00000, + 0x1ab000a5, 0x1ac00000, 0x1ac00099, 0x1b400000, + 0x1b400081, 0x1b4000d5, 0x1b4000d7, 0x1b800000, + 0x1b800136, 0x1bc00000, 0x1bc00098, 0x1be00000, + 0x1be0009a, 0x1d100000, 0x1d100033, 0x1d100091, + 0x1d200000, 0x1d200061, 0x1d500000, 0x1d500093, + 0x1d700000, 0x1d700028, 0x1e100000, 0x1e100096, + 0x1e700000, 0x1e7000d7, 0x1ea00000, 0x1ea00053, + // Entry 180 - 19F + 0x1f300000, 0x1f500000, 0x1f800000, 0x1f80009e, + 0x1f900000, 0x1f90004e, 0x1f90009f, 0x1f900114, + 0x1f900139, 0x1fa00000, 0x1fb00000, 0x20000000, + 0x200000a3, 0x20300000, 0x20700000, 0x20700052, + 0x20800000, 0x20a00000, 0x20a00130, 0x20e00000, + 0x20f00000, 0x21000000, 0x2100007e, 0x21200000, + 0x21200068, 0x21600000, 0x21700000, 0x217000a5, + 0x21f00000, 0x22300000, 0x22300130, 0x22700000, + // Entry 1A0 - 1BF + 0x2270005b, 0x23400000, 0x234000c4, 0x23900000, + 0x239000a5, 0x24200000, 0x242000af, 0x24400000, + 0x24400052, 0x24500000, 0x24500083, 0x24600000, + 0x246000a5, 0x24a00000, 0x24a000a7, 0x25100000, + 0x2510009a, 0x25400000, 0x254000ab, 0x254000ac, + 0x25600000, 0x2560009a, 0x26a00000, 0x26a0009a, + 0x26b00000, 0x26b00130, 0x26d00000, 0x26d00052, + 0x26e00000, 0x26e00061, 0x27400000, 0x28100000, + // Entry 1C0 - 1DF + 0x2810007c, 0x28a00000, 0x28a000a6, 0x29100000, + 0x29100130, 0x29500000, 0x295000b8, 0x2a300000, + 0x2a300132, 0x2af00000, 0x2af00136, 0x2b500000, + 0x2b50002a, 0x2b50004b, 0x2b50004c, 0x2b50004d, + 0x2b800000, 0x2b8000b0, 0x2bf00000, 0x2bf0009c, + 0x2bf0009d, 0x2c000000, 0x2c0000b7, 0x2c200000, + 0x2c20004b, 0x2c400000, 0x2c4000a5, 0x2c500000, + 0x2c5000a5, 0x2c700000, 0x2c7000b9, 0x2d100000, + // Entry 1E0 - 1FF + 0x2d1000a5, 0x2d100130, 0x2e900000, 0x2e9000a5, + 0x2ed00000, 0x2ed000cd, 0x2f100000, 0x2f1000c0, + 0x2f200000, 0x2f2000d2, 0x2f400000, 0x2f400052, + 0x2ff00000, 0x2ff000c3, 0x30400000, 0x3040009a, + 0x30b00000, 0x30b000c6, 0x31000000, 0x31b00000, + 0x31b0009a, 0x31f00000, 0x31f0003e, 0x31f000d1, + 0x31f0010e, 0x32000000, 0x320000cc, 0x32500000, + 0x32500052, 0x33100000, 0x331000c5, 0x33a00000, + // Entry 200 - 21F + 0x33a0009d, 0x34100000, 0x34500000, 0x345000d3, + 0x34700000, 0x347000db, 0x34700111, 0x34e00000, + 0x34e00165, 0x35000000, 0x35000061, 0x350000da, + 0x35100000, 0x3510009a, 0x351000dc, 0x36700000, + 0x36700030, 0x36700036, 0x36700040, 0x3670005c, + 0x367000da, 0x36700117, 0x3670011c, 0x36800000, + 0x36800052, 0x36a00000, 0x36a000db, 0x36c00000, + 0x36c00052, 0x36f00000, 0x37500000, 0x37600000, + // Entry 220 - 23F + 0x37a00000, 0x38000000, 0x38000118, 0x38700000, + 0x38900000, 0x38900132, 0x39000000, 0x39000070, + 0x390000a5, 0x39500000, 0x3950009a, 0x39800000, + 0x3980007e, 0x39800107, 0x39d00000, 0x39d05000, + 0x39d050e9, 0x39d36000, 0x39d3609a, 0x3a100000, + 0x3b300000, 0x3b3000ea, 0x3bd00000, 0x3bd00001, + 0x3be00000, 0x3be00024, 0x3c000000, 0x3c00002a, + 0x3c000041, 0x3c00004e, 0x3c00005b, 0x3c000087, + // Entry 240 - 25F + 0x3c00008c, 0x3c0000b8, 0x3c0000c7, 0x3c0000d2, + 0x3c0000ef, 0x3c000119, 0x3c000127, 0x3c400000, + 0x3c40003f, 0x3c40006a, 0x3c4000e5, 0x3d400000, + 0x3d40004e, 0x3d900000, 0x3d90003a, 0x3dc00000, + 0x3dc000bd, 0x3dc00105, 0x3de00000, 0x3de00130, + 0x3e200000, 0x3e200047, 0x3e2000a6, 0x3e2000af, + 0x3e2000bd, 0x3e200107, 0x3e200131, 0x3e500000, + 0x3e500108, 0x3e600000, 0x3e600130, 0x3eb00000, + // Entry 260 - 27F + 0x3eb00107, 0x3ec00000, 0x3ec000a5, 0x3f300000, + 0x3f300130, 0x3fa00000, 0x3fa000e9, 0x3fc00000, + 0x3fd00000, 0x3fd00073, 0x3fd000db, 0x3fd0010d, + 0x3ff00000, 0x3ff000d2, 0x40100000, 0x401000c4, + 0x40200000, 0x4020004c, 0x40700000, 0x40800000, + 0x4085b000, 0x4085b0bb, 0x408eb000, 0x408eb0bb, + 0x40c00000, 0x40c000b4, 0x41200000, 0x41200112, + 0x41600000, 0x41600110, 0x41c00000, 0x41d00000, + // Entry 280 - 29F + 0x41e00000, 0x41f00000, 0x41f00073, 0x42200000, + 0x42300000, 0x42300165, 0x42900000, 0x42900063, + 0x42900070, 0x429000a5, 0x42900116, 0x43100000, + 0x43100027, 0x431000c3, 0x4310014e, 0x43200000, + 0x43220000, 0x43220033, 0x432200be, 0x43220106, + 0x4322014e, 0x4325b000, 0x4325b033, 0x4325b0be, + 0x4325b106, 0x4325b14e, 0x43700000, 0x43a00000, + 0x43b00000, 0x44400000, 0x44400031, 0x44400073, + // Entry 2A0 - 2BF + 0x4440010d, 0x44500000, 0x4450004b, 0x445000a5, + 0x44500130, 0x44500132, 0x44e00000, 0x45000000, + 0x4500009a, 0x450000b4, 0x450000d1, 0x4500010e, + 0x46100000, 0x4610009a, 0x46400000, 0x464000a5, + 0x46400132, 0x46700000, 0x46700125, 0x46b00000, + 0x46b00124, 0x46f00000, 0x46f0006e, 0x46f00070, + 0x47100000, 0x47600000, 0x47600128, 0x47a00000, + 0x48000000, 0x48200000, 0x4820012a, 0x48a00000, + // Entry 2C0 - 2DF + 0x48a0005e, 0x48a0012c, 0x48e00000, 0x49400000, + 0x49400107, 0x4a400000, 0x4a4000d5, 0x4a900000, + 0x4a9000bb, 0x4ac00000, 0x4ac00053, 0x4ae00000, + 0x4ae00131, 0x4b400000, 0x4b40009a, 0x4b4000e9, + 0x4bc00000, 0x4bc05000, 0x4bc05024, 0x4bc20000, + 0x4bc20138, 0x4bc5b000, 0x4bc5b138, 0x4be00000, + 0x4be5b000, 0x4be5b0b5, 0x4bef4000, 0x4bef40b5, + 0x4c000000, 0x4c300000, 0x4c30013f, 0x4c900000, + // Entry 2E0 - 2FF + 0x4c900001, 0x4cc00000, 0x4cc00130, 0x4ce00000, + 0x4cf00000, 0x4cf0004e, 0x4e500000, 0x4e500115, + 0x4f200000, 0x4fb00000, 0x4fb00132, 0x50900000, + 0x50900052, 0x51200000, 0x51200001, 0x51800000, + 0x5180003b, 0x518000d7, 0x51f00000, 0x51f3b000, + 0x51f3b053, 0x51f3c000, 0x51f3c08e, 0x52800000, + 0x528000bb, 0x52900000, 0x5293b000, 0x5293b053, + 0x5293b08e, 0x5293b0c7, 0x5293b10e, 0x5293c000, + // Entry 300 - 31F + 0x5293c08e, 0x5293c0c7, 0x5293c12f, 0x52f00000, + 0x52f00162, +} // Size: 3116 bytes + +const specialTagsStr string = "ca-ES-valencia en-US-u-va-posix" + +// Total table size 3147 bytes (3KiB); checksum: 5A8FFFA5 diff --git a/vendor/golang.org/x/text/internal/language/compact/tags.go b/vendor/golang.org/x/text/internal/language/compact/tags.go new file mode 100644 index 00000000000..ca135d295ae --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/compact/tags.go @@ -0,0 +1,91 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package compact + +var ( + und = Tag{} + + Und Tag = Tag{} + + Afrikaans Tag = Tag{language: afIndex, locale: afIndex} + Amharic Tag = Tag{language: amIndex, locale: amIndex} + Arabic Tag = Tag{language: arIndex, locale: arIndex} + ModernStandardArabic Tag = Tag{language: ar001Index, locale: ar001Index} + Azerbaijani Tag = Tag{language: azIndex, locale: azIndex} + Bulgarian Tag = Tag{language: bgIndex, locale: bgIndex} + Bengali Tag = Tag{language: bnIndex, locale: bnIndex} + Catalan Tag = Tag{language: caIndex, locale: caIndex} + Czech Tag = Tag{language: csIndex, locale: csIndex} + Danish Tag = Tag{language: daIndex, locale: daIndex} + German Tag = Tag{language: deIndex, locale: deIndex} + Greek Tag = Tag{language: elIndex, locale: elIndex} + English Tag = Tag{language: enIndex, locale: enIndex} + AmericanEnglish Tag = Tag{language: enUSIndex, locale: enUSIndex} + BritishEnglish Tag = Tag{language: enGBIndex, locale: enGBIndex} + Spanish Tag = Tag{language: esIndex, locale: esIndex} + EuropeanSpanish Tag = Tag{language: esESIndex, locale: esESIndex} + LatinAmericanSpanish Tag = Tag{language: es419Index, locale: es419Index} + Estonian Tag = Tag{language: etIndex, locale: etIndex} + Persian Tag = Tag{language: faIndex, locale: faIndex} + Finnish Tag = Tag{language: fiIndex, locale: fiIndex} + Filipino Tag = Tag{language: filIndex, locale: filIndex} + French Tag = Tag{language: frIndex, locale: frIndex} + CanadianFrench Tag = Tag{language: frCAIndex, locale: frCAIndex} + Gujarati Tag = Tag{language: guIndex, locale: guIndex} + Hebrew Tag = Tag{language: heIndex, locale: heIndex} + Hindi Tag = Tag{language: hiIndex, locale: hiIndex} + Croatian Tag = Tag{language: hrIndex, locale: hrIndex} + Hungarian Tag = Tag{language: huIndex, locale: huIndex} + Armenian Tag = Tag{language: hyIndex, locale: hyIndex} + Indonesian Tag = Tag{language: idIndex, locale: idIndex} + Icelandic Tag = Tag{language: isIndex, locale: isIndex} + Italian Tag = Tag{language: itIndex, locale: itIndex} + Japanese Tag = Tag{language: jaIndex, locale: jaIndex} + Georgian Tag = Tag{language: kaIndex, locale: kaIndex} + Kazakh Tag = Tag{language: kkIndex, locale: kkIndex} + Khmer Tag = Tag{language: kmIndex, locale: kmIndex} + Kannada Tag = Tag{language: knIndex, locale: knIndex} + Korean Tag = Tag{language: koIndex, locale: koIndex} + Kirghiz Tag = Tag{language: kyIndex, locale: kyIndex} + Lao Tag = Tag{language: loIndex, locale: loIndex} + Lithuanian Tag = Tag{language: ltIndex, locale: ltIndex} + Latvian Tag = Tag{language: lvIndex, locale: lvIndex} + Macedonian Tag = Tag{language: mkIndex, locale: mkIndex} + Malayalam Tag = Tag{language: mlIndex, locale: mlIndex} + Mongolian Tag = Tag{language: mnIndex, locale: mnIndex} + Marathi Tag = Tag{language: mrIndex, locale: mrIndex} + Malay Tag = Tag{language: msIndex, locale: msIndex} + Burmese Tag = Tag{language: myIndex, locale: myIndex} + Nepali Tag = Tag{language: neIndex, locale: neIndex} + Dutch Tag = Tag{language: nlIndex, locale: nlIndex} + Norwegian Tag = Tag{language: noIndex, locale: noIndex} + Punjabi Tag = Tag{language: paIndex, locale: paIndex} + Polish Tag = Tag{language: plIndex, locale: plIndex} + Portuguese Tag = Tag{language: ptIndex, locale: ptIndex} + BrazilianPortuguese Tag = Tag{language: ptBRIndex, locale: ptBRIndex} + EuropeanPortuguese Tag = Tag{language: ptPTIndex, locale: ptPTIndex} + Romanian Tag = Tag{language: roIndex, locale: roIndex} + Russian Tag = Tag{language: ruIndex, locale: ruIndex} + Sinhala Tag = Tag{language: siIndex, locale: siIndex} + Slovak Tag = Tag{language: skIndex, locale: skIndex} + Slovenian Tag = Tag{language: slIndex, locale: slIndex} + Albanian Tag = Tag{language: sqIndex, locale: sqIndex} + Serbian Tag = Tag{language: srIndex, locale: srIndex} + SerbianLatin Tag = Tag{language: srLatnIndex, locale: srLatnIndex} + Swedish Tag = Tag{language: svIndex, locale: svIndex} + Swahili Tag = Tag{language: swIndex, locale: swIndex} + Tamil Tag = Tag{language: taIndex, locale: taIndex} + Telugu Tag = Tag{language: teIndex, locale: teIndex} + Thai Tag = Tag{language: thIndex, locale: thIndex} + Turkish Tag = Tag{language: trIndex, locale: trIndex} + Ukrainian Tag = Tag{language: ukIndex, locale: ukIndex} + Urdu Tag = Tag{language: urIndex, locale: urIndex} + Uzbek Tag = Tag{language: uzIndex, locale: uzIndex} + Vietnamese Tag = Tag{language: viIndex, locale: viIndex} + Chinese Tag = Tag{language: zhIndex, locale: zhIndex} + SimplifiedChinese Tag = Tag{language: zhHansIndex, locale: zhHansIndex} + TraditionalChinese Tag = Tag{language: zhHantIndex, locale: zhHantIndex} + Zulu Tag = Tag{language: zuIndex, locale: zuIndex} +) diff --git a/vendor/golang.org/x/text/internal/language/compose.go b/vendor/golang.org/x/text/internal/language/compose.go new file mode 100644 index 00000000000..4ae78e0fa5f --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/compose.go @@ -0,0 +1,167 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package language + +import ( + "sort" + "strings" +) + +// A Builder allows constructing a Tag from individual components. +// Its main user is Compose in the top-level language package. +type Builder struct { + Tag Tag + + private string // the x extension + variants []string + extensions []string +} + +// Make returns a new Tag from the current settings. +func (b *Builder) Make() Tag { + t := b.Tag + + if len(b.extensions) > 0 || len(b.variants) > 0 { + sort.Sort(sortVariants(b.variants)) + sort.Strings(b.extensions) + + if b.private != "" { + b.extensions = append(b.extensions, b.private) + } + n := maxCoreSize + tokenLen(b.variants...) + tokenLen(b.extensions...) + buf := make([]byte, n) + p := t.genCoreBytes(buf) + t.pVariant = byte(p) + p += appendTokens(buf[p:], b.variants...) + t.pExt = uint16(p) + p += appendTokens(buf[p:], b.extensions...) + t.str = string(buf[:p]) + // We may not always need to remake the string, but when or when not + // to do so is rather tricky. + scan := makeScanner(buf[:p]) + t, _ = parse(&scan, "") + return t + + } else if b.private != "" { + t.str = b.private + t.RemakeString() + } + return t +} + +// SetTag copies all the settings from a given Tag. Any previously set values +// are discarded. +func (b *Builder) SetTag(t Tag) { + b.Tag.LangID = t.LangID + b.Tag.RegionID = t.RegionID + b.Tag.ScriptID = t.ScriptID + // TODO: optimize + b.variants = b.variants[:0] + if variants := t.Variants(); variants != "" { + for _, vr := range strings.Split(variants[1:], "-") { + b.variants = append(b.variants, vr) + } + } + b.extensions, b.private = b.extensions[:0], "" + for _, e := range t.Extensions() { + b.AddExt(e) + } +} + +// AddExt adds extension e to the tag. e must be a valid extension as returned +// by Tag.Extension. If the extension already exists, it will be discarded, +// except for a -u extension, where non-existing key-type pairs will added. +func (b *Builder) AddExt(e string) { + if e[0] == 'x' { + if b.private == "" { + b.private = e + } + return + } + for i, s := range b.extensions { + if s[0] == e[0] { + if e[0] == 'u' { + b.extensions[i] += e[1:] + } + return + } + } + b.extensions = append(b.extensions, e) +} + +// SetExt sets the extension e to the tag. e must be a valid extension as +// returned by Tag.Extension. If the extension already exists, it will be +// overwritten, except for a -u extension, where the individual key-type pairs +// will be set. +func (b *Builder) SetExt(e string) { + if e[0] == 'x' { + b.private = e + return + } + for i, s := range b.extensions { + if s[0] == e[0] { + if e[0] == 'u' { + b.extensions[i] = e + s[1:] + } else { + b.extensions[i] = e + } + return + } + } + b.extensions = append(b.extensions, e) +} + +// AddVariant adds any number of variants. +func (b *Builder) AddVariant(v ...string) { + for _, v := range v { + if v != "" { + b.variants = append(b.variants, v) + } + } +} + +// ClearVariants removes any variants previously added, including those +// copied from a Tag in SetTag. +func (b *Builder) ClearVariants() { + b.variants = b.variants[:0] +} + +// ClearExtensions removes any extensions previously added, including those +// copied from a Tag in SetTag. +func (b *Builder) ClearExtensions() { + b.private = "" + b.extensions = b.extensions[:0] +} + +func tokenLen(token ...string) (n int) { + for _, t := range token { + n += len(t) + 1 + } + return +} + +func appendTokens(b []byte, token ...string) int { + p := 0 + for _, t := range token { + b[p] = '-' + copy(b[p+1:], t) + p += 1 + len(t) + } + return p +} + +type sortVariants []string + +func (s sortVariants) Len() int { + return len(s) +} + +func (s sortVariants) Swap(i, j int) { + s[j], s[i] = s[i], s[j] +} + +func (s sortVariants) Less(i, j int) bool { + return variantIndex[s[i]] < variantIndex[s[j]] +} diff --git a/vendor/golang.org/x/text/internal/language/coverage.go b/vendor/golang.org/x/text/internal/language/coverage.go new file mode 100644 index 00000000000..9b20b88feb8 --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/coverage.go @@ -0,0 +1,28 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package language + +// BaseLanguages returns the list of all supported base languages. It generates +// the list by traversing the internal structures. +func BaseLanguages() []Language { + base := make([]Language, 0, NumLanguages) + for i := 0; i < langNoIndexOffset; i++ { + // We included "und" already for the value 0. + if i != nonCanonicalUnd { + base = append(base, Language(i)) + } + } + i := langNoIndexOffset + for _, v := range langNoIndex { + for k := 0; k < 8; k++ { + if v&1 == 1 { + base = append(base, Language(i)) + } + v >>= 1 + i++ + } + } + return base +} diff --git a/vendor/golang.org/x/text/internal/language/language.go b/vendor/golang.org/x/text/internal/language/language.go new file mode 100644 index 00000000000..09d41c73670 --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/language.go @@ -0,0 +1,627 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go gen_common.go -output tables.go + +package language // import "golang.org/x/text/internal/language" + +// TODO: Remove above NOTE after: +// - verifying that tables are dropped correctly (most notably matcher tables). + +import ( + "errors" + "fmt" + "strings" +) + +const ( + // maxCoreSize is the maximum size of a BCP 47 tag without variants and + // extensions. Equals max lang (3) + script (4) + max reg (3) + 2 dashes. + maxCoreSize = 12 + + // max99thPercentileSize is a somewhat arbitrary buffer size that presumably + // is large enough to hold at least 99% of the BCP 47 tags. + max99thPercentileSize = 32 + + // maxSimpleUExtensionSize is the maximum size of a -u extension with one + // key-type pair. Equals len("-u-") + key (2) + dash + max value (8). + maxSimpleUExtensionSize = 14 +) + +// Tag represents a BCP 47 language tag. It is used to specify an instance of a +// specific language or locale. All language tag values are guaranteed to be +// well-formed. The zero value of Tag is Und. +type Tag struct { + // TODO: the following fields have the form TagTypeID. This name is chosen + // to allow refactoring the public package without conflicting with its + // Base, Script, and Region methods. Once the transition is fully completed + // the ID can be stripped from the name. + + LangID Language + RegionID Region + // TODO: we will soon run out of positions for ScriptID. Idea: instead of + // storing lang, region, and ScriptID codes, store only the compact index and + // have a lookup table from this code to its expansion. This greatly speeds + // up table lookup, speed up common variant cases. + // This will also immediately free up 3 extra bytes. Also, the pVariant + // field can now be moved to the lookup table, as the compact index uniquely + // determines the offset of a possible variant. + ScriptID Script + pVariant byte // offset in str, includes preceding '-' + pExt uint16 // offset of first extension, includes preceding '-' + + // str is the string representation of the Tag. It will only be used if the + // tag has variants or extensions. + str string +} + +// Make is a convenience wrapper for Parse that omits the error. +// In case of an error, a sensible default is returned. +func Make(s string) Tag { + t, _ := Parse(s) + return t +} + +// Raw returns the raw base language, script and region, without making an +// attempt to infer their values. +// TODO: consider removing +func (t Tag) Raw() (b Language, s Script, r Region) { + return t.LangID, t.ScriptID, t.RegionID +} + +// equalTags compares language, script and region subtags only. +func (t Tag) equalTags(a Tag) bool { + return t.LangID == a.LangID && t.ScriptID == a.ScriptID && t.RegionID == a.RegionID +} + +// IsRoot returns true if t is equal to language "und". +func (t Tag) IsRoot() bool { + if int(t.pVariant) < len(t.str) { + return false + } + return t.equalTags(Und) +} + +// IsPrivateUse reports whether the Tag consists solely of an IsPrivateUse use +// tag. +func (t Tag) IsPrivateUse() bool { + return t.str != "" && t.pVariant == 0 +} + +// RemakeString is used to update t.str in case lang, script or region changed. +// It is assumed that pExt and pVariant still point to the start of the +// respective parts. +func (t *Tag) RemakeString() { + if t.str == "" { + return + } + extra := t.str[t.pVariant:] + if t.pVariant > 0 { + extra = extra[1:] + } + if t.equalTags(Und) && strings.HasPrefix(extra, "x-") { + t.str = extra + t.pVariant = 0 + t.pExt = 0 + return + } + var buf [max99thPercentileSize]byte // avoid extra memory allocation in most cases. + b := buf[:t.genCoreBytes(buf[:])] + if extra != "" { + diff := len(b) - int(t.pVariant) + b = append(b, '-') + b = append(b, extra...) + t.pVariant = uint8(int(t.pVariant) + diff) + t.pExt = uint16(int(t.pExt) + diff) + } else { + t.pVariant = uint8(len(b)) + t.pExt = uint16(len(b)) + } + t.str = string(b) +} + +// genCoreBytes writes a string for the base languages, script and region tags +// to the given buffer and returns the number of bytes written. It will never +// write more than maxCoreSize bytes. +func (t *Tag) genCoreBytes(buf []byte) int { + n := t.LangID.StringToBuf(buf[:]) + if t.ScriptID != 0 { + n += copy(buf[n:], "-") + n += copy(buf[n:], t.ScriptID.String()) + } + if t.RegionID != 0 { + n += copy(buf[n:], "-") + n += copy(buf[n:], t.RegionID.String()) + } + return n +} + +// String returns the canonical string representation of the language tag. +func (t Tag) String() string { + if t.str != "" { + return t.str + } + if t.ScriptID == 0 && t.RegionID == 0 { + return t.LangID.String() + } + buf := [maxCoreSize]byte{} + return string(buf[:t.genCoreBytes(buf[:])]) +} + +// MarshalText implements encoding.TextMarshaler. +func (t Tag) MarshalText() (text []byte, err error) { + if t.str != "" { + text = append(text, t.str...) + } else if t.ScriptID == 0 && t.RegionID == 0 { + text = append(text, t.LangID.String()...) + } else { + buf := [maxCoreSize]byte{} + text = buf[:t.genCoreBytes(buf[:])] + } + return text, nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (t *Tag) UnmarshalText(text []byte) error { + tag, err := Parse(string(text)) + *t = tag + return err +} + +// Variants returns the part of the tag holding all variants or the empty string +// if there are no variants defined. +func (t Tag) Variants() string { + if t.pVariant == 0 { + return "" + } + return t.str[t.pVariant:t.pExt] +} + +// VariantOrPrivateUseTags returns variants or private use tags. +func (t Tag) VariantOrPrivateUseTags() string { + if t.pExt > 0 { + return t.str[t.pVariant:t.pExt] + } + return t.str[t.pVariant:] +} + +// HasString reports whether this tag defines more than just the raw +// components. +func (t Tag) HasString() bool { + return t.str != "" +} + +// Parent returns the CLDR parent of t. In CLDR, missing fields in data for a +// specific language are substituted with fields from the parent language. +// The parent for a language may change for newer versions of CLDR. +func (t Tag) Parent() Tag { + if t.str != "" { + // Strip the variants and extensions. + b, s, r := t.Raw() + t = Tag{LangID: b, ScriptID: s, RegionID: r} + if t.RegionID == 0 && t.ScriptID != 0 && t.LangID != 0 { + base, _ := addTags(Tag{LangID: t.LangID}) + if base.ScriptID == t.ScriptID { + return Tag{LangID: t.LangID} + } + } + return t + } + if t.LangID != 0 { + if t.RegionID != 0 { + maxScript := t.ScriptID + if maxScript == 0 { + max, _ := addTags(t) + maxScript = max.ScriptID + } + + for i := range parents { + if Language(parents[i].lang) == t.LangID && Script(parents[i].maxScript) == maxScript { + for _, r := range parents[i].fromRegion { + if Region(r) == t.RegionID { + return Tag{ + LangID: t.LangID, + ScriptID: Script(parents[i].script), + RegionID: Region(parents[i].toRegion), + } + } + } + } + } + + // Strip the script if it is the default one. + base, _ := addTags(Tag{LangID: t.LangID}) + if base.ScriptID != maxScript { + return Tag{LangID: t.LangID, ScriptID: maxScript} + } + return Tag{LangID: t.LangID} + } else if t.ScriptID != 0 { + // The parent for an base-script pair with a non-default script is + // "und" instead of the base language. + base, _ := addTags(Tag{LangID: t.LangID}) + if base.ScriptID != t.ScriptID { + return Und + } + return Tag{LangID: t.LangID} + } + } + return Und +} + +// ParseExtension parses s as an extension and returns it on success. +func ParseExtension(s string) (ext string, err error) { + defer func() { + if recover() != nil { + ext = "" + err = ErrSyntax + } + }() + + scan := makeScannerString(s) + var end int + if n := len(scan.token); n != 1 { + return "", ErrSyntax + } + scan.toLower(0, len(scan.b)) + end = parseExtension(&scan) + if end != len(s) { + return "", ErrSyntax + } + return string(scan.b), nil +} + +// HasVariants reports whether t has variants. +func (t Tag) HasVariants() bool { + return uint16(t.pVariant) < t.pExt +} + +// HasExtensions reports whether t has extensions. +func (t Tag) HasExtensions() bool { + return int(t.pExt) < len(t.str) +} + +// Extension returns the extension of type x for tag t. It will return +// false for ok if t does not have the requested extension. The returned +// extension will be invalid in this case. +func (t Tag) Extension(x byte) (ext string, ok bool) { + for i := int(t.pExt); i < len(t.str)-1; { + var ext string + i, ext = getExtension(t.str, i) + if ext[0] == x { + return ext, true + } + } + return "", false +} + +// Extensions returns all extensions of t. +func (t Tag) Extensions() []string { + e := []string{} + for i := int(t.pExt); i < len(t.str)-1; { + var ext string + i, ext = getExtension(t.str, i) + e = append(e, ext) + } + return e +} + +// TypeForKey returns the type associated with the given key, where key and type +// are of the allowed values defined for the Unicode locale extension ('u') in +// https://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers. +// TypeForKey will traverse the inheritance chain to get the correct value. +// +// If there are multiple types associated with a key, only the first will be +// returned. If there is no type associated with a key, it returns the empty +// string. +func (t Tag) TypeForKey(key string) string { + if _, start, end, _ := t.findTypeForKey(key); end != start { + s := t.str[start:end] + if p := strings.IndexByte(s, '-'); p >= 0 { + s = s[:p] + } + return s + } + return "" +} + +var ( + errPrivateUse = errors.New("cannot set a key on a private use tag") + errInvalidArguments = errors.New("invalid key or type") +) + +// SetTypeForKey returns a new Tag with the key set to type, where key and type +// are of the allowed values defined for the Unicode locale extension ('u') in +// https://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers. +// An empty value removes an existing pair with the same key. +func (t Tag) SetTypeForKey(key, value string) (Tag, error) { + if t.IsPrivateUse() { + return t, errPrivateUse + } + if len(key) != 2 { + return t, errInvalidArguments + } + + // Remove the setting if value is "". + if value == "" { + start, sep, end, _ := t.findTypeForKey(key) + if start != sep { + // Remove a possible empty extension. + switch { + case t.str[start-2] != '-': // has previous elements. + case end == len(t.str), // end of string + end+2 < len(t.str) && t.str[end+2] == '-': // end of extension + start -= 2 + } + if start == int(t.pVariant) && end == len(t.str) { + t.str = "" + t.pVariant, t.pExt = 0, 0 + } else { + t.str = fmt.Sprintf("%s%s", t.str[:start], t.str[end:]) + } + } + return t, nil + } + + if len(value) < 3 || len(value) > 8 { + return t, errInvalidArguments + } + + var ( + buf [maxCoreSize + maxSimpleUExtensionSize]byte + uStart int // start of the -u extension. + ) + + // Generate the tag string if needed. + if t.str == "" { + uStart = t.genCoreBytes(buf[:]) + buf[uStart] = '-' + uStart++ + } + + // Create new key-type pair and parse it to verify. + b := buf[uStart:] + copy(b, "u-") + copy(b[2:], key) + b[4] = '-' + b = b[:5+copy(b[5:], value)] + scan := makeScanner(b) + if parseExtensions(&scan); scan.err != nil { + return t, scan.err + } + + // Assemble the replacement string. + if t.str == "" { + t.pVariant, t.pExt = byte(uStart-1), uint16(uStart-1) + t.str = string(buf[:uStart+len(b)]) + } else { + s := t.str + start, sep, end, hasExt := t.findTypeForKey(key) + if start == sep { + if hasExt { + b = b[2:] + } + t.str = fmt.Sprintf("%s-%s%s", s[:sep], b, s[end:]) + } else { + t.str = fmt.Sprintf("%s-%s%s", s[:start+3], value, s[end:]) + } + } + return t, nil +} + +// findTypeForKey returns the start and end position for the type corresponding +// to key or the point at which to insert the key-value pair if the type +// wasn't found. The hasExt return value reports whether an -u extension was present. +// Note: the extensions are typically very small and are likely to contain +// only one key-type pair. +func (t Tag) findTypeForKey(key string) (start, sep, end int, hasExt bool) { + p := int(t.pExt) + if len(key) != 2 || p == len(t.str) || p == 0 { + return p, p, p, false + } + s := t.str + + // Find the correct extension. + for p++; s[p] != 'u'; p++ { + if s[p] > 'u' { + p-- + return p, p, p, false + } + if p = nextExtension(s, p); p == len(s) { + return len(s), len(s), len(s), false + } + } + // Proceed to the hyphen following the extension name. + p++ + + // curKey is the key currently being processed. + curKey := "" + + // Iterate over keys until we get the end of a section. + for { + end = p + for p++; p < len(s) && s[p] != '-'; p++ { + } + n := p - end - 1 + if n <= 2 && curKey == key { + if sep < end { + sep++ + } + return start, sep, end, true + } + switch n { + case 0, // invalid string + 1: // next extension + return end, end, end, true + case 2: + // next key + curKey = s[end+1 : p] + if curKey > key { + return end, end, end, true + } + start = end + sep = p + } + } +} + +// ParseBase parses a 2- or 3-letter ISO 639 code. +// It returns a ValueError if s is a well-formed but unknown language identifier +// or another error if another error occurred. +func ParseBase(s string) (l Language, err error) { + defer func() { + if recover() != nil { + l = 0 + err = ErrSyntax + } + }() + + if n := len(s); n < 2 || 3 < n { + return 0, ErrSyntax + } + var buf [3]byte + return getLangID(buf[:copy(buf[:], s)]) +} + +// ParseScript parses a 4-letter ISO 15924 code. +// It returns a ValueError if s is a well-formed but unknown script identifier +// or another error if another error occurred. +func ParseScript(s string) (scr Script, err error) { + defer func() { + if recover() != nil { + scr = 0 + err = ErrSyntax + } + }() + + if len(s) != 4 { + return 0, ErrSyntax + } + var buf [4]byte + return getScriptID(script, buf[:copy(buf[:], s)]) +} + +// EncodeM49 returns the Region for the given UN M.49 code. +// It returns an error if r is not a valid code. +func EncodeM49(r int) (Region, error) { + return getRegionM49(r) +} + +// ParseRegion parses a 2- or 3-letter ISO 3166-1 or a UN M.49 code. +// It returns a ValueError if s is a well-formed but unknown region identifier +// or another error if another error occurred. +func ParseRegion(s string) (r Region, err error) { + defer func() { + if recover() != nil { + r = 0 + err = ErrSyntax + } + }() + + if n := len(s); n < 2 || 3 < n { + return 0, ErrSyntax + } + var buf [3]byte + return getRegionID(buf[:copy(buf[:], s)]) +} + +// IsCountry returns whether this region is a country or autonomous area. This +// includes non-standard definitions from CLDR. +func (r Region) IsCountry() bool { + if r == 0 || r.IsGroup() || r.IsPrivateUse() && r != _XK { + return false + } + return true +} + +// IsGroup returns whether this region defines a collection of regions. This +// includes non-standard definitions from CLDR. +func (r Region) IsGroup() bool { + if r == 0 { + return false + } + return int(regionInclusion[r]) < len(regionContainment) +} + +// Contains returns whether Region c is contained by Region r. It returns true +// if c == r. +func (r Region) Contains(c Region) bool { + if r == c { + return true + } + g := regionInclusion[r] + if g >= nRegionGroups { + return false + } + m := regionContainment[g] + + d := regionInclusion[c] + b := regionInclusionBits[d] + + // A contained country may belong to multiple disjoint groups. Matching any + // of these indicates containment. If the contained region is a group, it + // must strictly be a subset. + if d >= nRegionGroups { + return b&m != 0 + } + return b&^m == 0 +} + +var errNoTLD = errors.New("language: region is not a valid ccTLD") + +// TLD returns the country code top-level domain (ccTLD). UK is returned for GB. +// In all other cases it returns either the region itself or an error. +// +// This method may return an error for a region for which there exists a +// canonical form with a ccTLD. To get that ccTLD canonicalize r first. The +// region will already be canonicalized it was obtained from a Tag that was +// obtained using any of the default methods. +func (r Region) TLD() (Region, error) { + // See http://en.wikipedia.org/wiki/Country_code_top-level_domain for the + // difference between ISO 3166-1 and IANA ccTLD. + if r == _GB { + r = _UK + } + if (r.typ() & ccTLD) == 0 { + return 0, errNoTLD + } + return r, nil +} + +// Canonicalize returns the region or a possible replacement if the region is +// deprecated. It will not return a replacement for deprecated regions that +// are split into multiple regions. +func (r Region) Canonicalize() Region { + if cr := normRegion(r); cr != 0 { + return cr + } + return r +} + +// Variant represents a registered variant of a language as defined by BCP 47. +type Variant struct { + ID uint8 + str string +} + +// ParseVariant parses and returns a Variant. An error is returned if s is not +// a valid variant. +func ParseVariant(s string) (v Variant, err error) { + defer func() { + if recover() != nil { + v = Variant{} + err = ErrSyntax + } + }() + + s = strings.ToLower(s) + if id, ok := variantIndex[s]; ok { + return Variant{id, s}, nil + } + return Variant{}, NewValueError([]byte(s)) +} + +// String returns the string representation of the variant. +func (v Variant) String() string { + return v.str +} diff --git a/vendor/golang.org/x/text/internal/language/lookup.go b/vendor/golang.org/x/text/internal/language/lookup.go new file mode 100644 index 00000000000..231b4fbdebf --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/lookup.go @@ -0,0 +1,412 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package language + +import ( + "bytes" + "fmt" + "sort" + "strconv" + + "golang.org/x/text/internal/tag" +) + +// findIndex tries to find the given tag in idx and returns a standardized error +// if it could not be found. +func findIndex(idx tag.Index, key []byte, form string) (index int, err error) { + if !tag.FixCase(form, key) { + return 0, ErrSyntax + } + i := idx.Index(key) + if i == -1 { + return 0, NewValueError(key) + } + return i, nil +} + +func searchUint(imap []uint16, key uint16) int { + return sort.Search(len(imap), func(i int) bool { + return imap[i] >= key + }) +} + +type Language uint16 + +// getLangID returns the langID of s if s is a canonical subtag +// or langUnknown if s is not a canonical subtag. +func getLangID(s []byte) (Language, error) { + if len(s) == 2 { + return getLangISO2(s) + } + return getLangISO3(s) +} + +// TODO language normalization as well as the AliasMaps could be moved to the +// higher level package, but it is a bit tricky to separate the generation. + +func (id Language) Canonicalize() (Language, AliasType) { + return normLang(id) +} + +// normLang returns the mapped langID of id according to mapping m. +func normLang(id Language) (Language, AliasType) { + k := sort.Search(len(AliasMap), func(i int) bool { + return AliasMap[i].From >= uint16(id) + }) + if k < len(AliasMap) && AliasMap[k].From == uint16(id) { + return Language(AliasMap[k].To), AliasTypes[k] + } + return id, AliasTypeUnknown +} + +// getLangISO2 returns the langID for the given 2-letter ISO language code +// or unknownLang if this does not exist. +func getLangISO2(s []byte) (Language, error) { + if !tag.FixCase("zz", s) { + return 0, ErrSyntax + } + if i := lang.Index(s); i != -1 && lang.Elem(i)[3] != 0 { + return Language(i), nil + } + return 0, NewValueError(s) +} + +const base = 'z' - 'a' + 1 + +func strToInt(s []byte) uint { + v := uint(0) + for i := 0; i < len(s); i++ { + v *= base + v += uint(s[i] - 'a') + } + return v +} + +// converts the given integer to the original ASCII string passed to strToInt. +// len(s) must match the number of characters obtained. +func intToStr(v uint, s []byte) { + for i := len(s) - 1; i >= 0; i-- { + s[i] = byte(v%base) + 'a' + v /= base + } +} + +// getLangISO3 returns the langID for the given 3-letter ISO language code +// or unknownLang if this does not exist. +func getLangISO3(s []byte) (Language, error) { + if tag.FixCase("und", s) { + // first try to match canonical 3-letter entries + for i := lang.Index(s[:2]); i != -1; i = lang.Next(s[:2], i) { + if e := lang.Elem(i); e[3] == 0 && e[2] == s[2] { + // We treat "und" as special and always translate it to "unspecified". + // Note that ZZ and Zzzz are private use and are not treated as + // unspecified by default. + id := Language(i) + if id == nonCanonicalUnd { + return 0, nil + } + return id, nil + } + } + if i := altLangISO3.Index(s); i != -1 { + return Language(altLangIndex[altLangISO3.Elem(i)[3]]), nil + } + n := strToInt(s) + if langNoIndex[n/8]&(1<<(n%8)) != 0 { + return Language(n) + langNoIndexOffset, nil + } + // Check for non-canonical uses of ISO3. + for i := lang.Index(s[:1]); i != -1; i = lang.Next(s[:1], i) { + if e := lang.Elem(i); e[2] == s[1] && e[3] == s[2] { + return Language(i), nil + } + } + return 0, NewValueError(s) + } + return 0, ErrSyntax +} + +// StringToBuf writes the string to b and returns the number of bytes +// written. cap(b) must be >= 3. +func (id Language) StringToBuf(b []byte) int { + if id >= langNoIndexOffset { + intToStr(uint(id)-langNoIndexOffset, b[:3]) + return 3 + } else if id == 0 { + return copy(b, "und") + } + l := lang[id<<2:] + if l[3] == 0 { + return copy(b, l[:3]) + } + return copy(b, l[:2]) +} + +// String returns the BCP 47 representation of the langID. +// Use b as variable name, instead of id, to ensure the variable +// used is consistent with that of Base in which this type is embedded. +func (b Language) String() string { + if b == 0 { + return "und" + } else if b >= langNoIndexOffset { + b -= langNoIndexOffset + buf := [3]byte{} + intToStr(uint(b), buf[:]) + return string(buf[:]) + } + l := lang.Elem(int(b)) + if l[3] == 0 { + return l[:3] + } + return l[:2] +} + +// ISO3 returns the ISO 639-3 language code. +func (b Language) ISO3() string { + if b == 0 || b >= langNoIndexOffset { + return b.String() + } + l := lang.Elem(int(b)) + if l[3] == 0 { + return l[:3] + } else if l[2] == 0 { + return altLangISO3.Elem(int(l[3]))[:3] + } + // This allocation will only happen for 3-letter ISO codes + // that are non-canonical BCP 47 language identifiers. + return l[0:1] + l[2:4] +} + +// IsPrivateUse reports whether this language code is reserved for private use. +func (b Language) IsPrivateUse() bool { + return langPrivateStart <= b && b <= langPrivateEnd +} + +// SuppressScript returns the script marked as SuppressScript in the IANA +// language tag repository, or 0 if there is no such script. +func (b Language) SuppressScript() Script { + if b < langNoIndexOffset { + return Script(suppressScript[b]) + } + return 0 +} + +type Region uint16 + +// getRegionID returns the region id for s if s is a valid 2-letter region code +// or unknownRegion. +func getRegionID(s []byte) (Region, error) { + if len(s) == 3 { + if isAlpha(s[0]) { + return getRegionISO3(s) + } + if i, err := strconv.ParseUint(string(s), 10, 10); err == nil { + return getRegionM49(int(i)) + } + } + return getRegionISO2(s) +} + +// getRegionISO2 returns the regionID for the given 2-letter ISO country code +// or unknownRegion if this does not exist. +func getRegionISO2(s []byte) (Region, error) { + i, err := findIndex(regionISO, s, "ZZ") + if err != nil { + return 0, err + } + return Region(i) + isoRegionOffset, nil +} + +// getRegionISO3 returns the regionID for the given 3-letter ISO country code +// or unknownRegion if this does not exist. +func getRegionISO3(s []byte) (Region, error) { + if tag.FixCase("ZZZ", s) { + for i := regionISO.Index(s[:1]); i != -1; i = regionISO.Next(s[:1], i) { + if e := regionISO.Elem(i); e[2] == s[1] && e[3] == s[2] { + return Region(i) + isoRegionOffset, nil + } + } + for i := 0; i < len(altRegionISO3); i += 3 { + if tag.Compare(altRegionISO3[i:i+3], s) == 0 { + return Region(altRegionIDs[i/3]), nil + } + } + return 0, NewValueError(s) + } + return 0, ErrSyntax +} + +func getRegionM49(n int) (Region, error) { + if 0 < n && n <= 999 { + const ( + searchBits = 7 + regionBits = 9 + regionMask = 1<<regionBits - 1 + ) + idx := n >> searchBits + buf := fromM49[m49Index[idx]:m49Index[idx+1]] + val := uint16(n) << regionBits // we rely on bits shifting out + i := sort.Search(len(buf), func(i int) bool { + return buf[i] >= val + }) + if r := fromM49[int(m49Index[idx])+i]; r&^regionMask == val { + return Region(r & regionMask), nil + } + } + var e ValueError + fmt.Fprint(bytes.NewBuffer([]byte(e.v[:])), n) + return 0, e +} + +// normRegion returns a region if r is deprecated or 0 otherwise. +// TODO: consider supporting BYS (-> BLR), CSK (-> 200 or CZ), PHI (-> PHL) and AFI (-> DJ). +// TODO: consider mapping split up regions to new most populous one (like CLDR). +func normRegion(r Region) Region { + m := regionOldMap + k := sort.Search(len(m), func(i int) bool { + return m[i].From >= uint16(r) + }) + if k < len(m) && m[k].From == uint16(r) { + return Region(m[k].To) + } + return 0 +} + +const ( + iso3166UserAssigned = 1 << iota + ccTLD + bcp47Region +) + +func (r Region) typ() byte { + return regionTypes[r] +} + +// String returns the BCP 47 representation for the region. +// It returns "ZZ" for an unspecified region. +func (r Region) String() string { + if r < isoRegionOffset { + if r == 0 { + return "ZZ" + } + return fmt.Sprintf("%03d", r.M49()) + } + r -= isoRegionOffset + return regionISO.Elem(int(r))[:2] +} + +// ISO3 returns the 3-letter ISO code of r. +// Note that not all regions have a 3-letter ISO code. +// In such cases this method returns "ZZZ". +func (r Region) ISO3() string { + if r < isoRegionOffset { + return "ZZZ" + } + r -= isoRegionOffset + reg := regionISO.Elem(int(r)) + switch reg[2] { + case 0: + return altRegionISO3[reg[3]:][:3] + case ' ': + return "ZZZ" + } + return reg[0:1] + reg[2:4] +} + +// M49 returns the UN M.49 encoding of r, or 0 if this encoding +// is not defined for r. +func (r Region) M49() int { + return int(m49[r]) +} + +// IsPrivateUse reports whether r has the ISO 3166 User-assigned status. This +// may include private-use tags that are assigned by CLDR and used in this +// implementation. So IsPrivateUse and IsCountry can be simultaneously true. +func (r Region) IsPrivateUse() bool { + return r.typ()&iso3166UserAssigned != 0 +} + +type Script uint16 + +// getScriptID returns the script id for string s. It assumes that s +// is of the format [A-Z][a-z]{3}. +func getScriptID(idx tag.Index, s []byte) (Script, error) { + i, err := findIndex(idx, s, "Zzzz") + return Script(i), err +} + +// String returns the script code in title case. +// It returns "Zzzz" for an unspecified script. +func (s Script) String() string { + if s == 0 { + return "Zzzz" + } + return script.Elem(int(s)) +} + +// IsPrivateUse reports whether this script code is reserved for private use. +func (s Script) IsPrivateUse() bool { + return _Qaaa <= s && s <= _Qabx +} + +const ( + maxAltTaglen = len("en-US-POSIX") + maxLen = maxAltTaglen +) + +var ( + // grandfatheredMap holds a mapping from legacy and grandfathered tags to + // their base language or index to more elaborate tag. + grandfatheredMap = map[[maxLen]byte]int16{ + [maxLen]byte{'a', 'r', 't', '-', 'l', 'o', 'j', 'b', 'a', 'n'}: _jbo, // art-lojban + [maxLen]byte{'i', '-', 'a', 'm', 'i'}: _ami, // i-ami + [maxLen]byte{'i', '-', 'b', 'n', 'n'}: _bnn, // i-bnn + [maxLen]byte{'i', '-', 'h', 'a', 'k'}: _hak, // i-hak + [maxLen]byte{'i', '-', 'k', 'l', 'i', 'n', 'g', 'o', 'n'}: _tlh, // i-klingon + [maxLen]byte{'i', '-', 'l', 'u', 'x'}: _lb, // i-lux + [maxLen]byte{'i', '-', 'n', 'a', 'v', 'a', 'j', 'o'}: _nv, // i-navajo + [maxLen]byte{'i', '-', 'p', 'w', 'n'}: _pwn, // i-pwn + [maxLen]byte{'i', '-', 't', 'a', 'o'}: _tao, // i-tao + [maxLen]byte{'i', '-', 't', 'a', 'y'}: _tay, // i-tay + [maxLen]byte{'i', '-', 't', 's', 'u'}: _tsu, // i-tsu + [maxLen]byte{'n', 'o', '-', 'b', 'o', 'k'}: _nb, // no-bok + [maxLen]byte{'n', 'o', '-', 'n', 'y', 'n'}: _nn, // no-nyn + [maxLen]byte{'s', 'g', 'n', '-', 'b', 'e', '-', 'f', 'r'}: _sfb, // sgn-BE-FR + [maxLen]byte{'s', 'g', 'n', '-', 'b', 'e', '-', 'n', 'l'}: _vgt, // sgn-BE-NL + [maxLen]byte{'s', 'g', 'n', '-', 'c', 'h', '-', 'd', 'e'}: _sgg, // sgn-CH-DE + [maxLen]byte{'z', 'h', '-', 'g', 'u', 'o', 'y', 'u'}: _cmn, // zh-guoyu + [maxLen]byte{'z', 'h', '-', 'h', 'a', 'k', 'k', 'a'}: _hak, // zh-hakka + [maxLen]byte{'z', 'h', '-', 'm', 'i', 'n', '-', 'n', 'a', 'n'}: _nan, // zh-min-nan + [maxLen]byte{'z', 'h', '-', 'x', 'i', 'a', 'n', 'g'}: _hsn, // zh-xiang + + // Grandfathered tags with no modern replacement will be converted as + // follows: + [maxLen]byte{'c', 'e', 'l', '-', 'g', 'a', 'u', 'l', 'i', 's', 'h'}: -1, // cel-gaulish + [maxLen]byte{'e', 'n', '-', 'g', 'b', '-', 'o', 'e', 'd'}: -2, // en-GB-oed + [maxLen]byte{'i', '-', 'd', 'e', 'f', 'a', 'u', 'l', 't'}: -3, // i-default + [maxLen]byte{'i', '-', 'e', 'n', 'o', 'c', 'h', 'i', 'a', 'n'}: -4, // i-enochian + [maxLen]byte{'i', '-', 'm', 'i', 'n', 'g', 'o'}: -5, // i-mingo + [maxLen]byte{'z', 'h', '-', 'm', 'i', 'n'}: -6, // zh-min + + // CLDR-specific tag. + [maxLen]byte{'r', 'o', 'o', 't'}: 0, // root + [maxLen]byte{'e', 'n', '-', 'u', 's', '-', 'p', 'o', 's', 'i', 'x'}: -7, // en_US_POSIX" + } + + altTagIndex = [...]uint8{0, 17, 31, 45, 61, 74, 86, 102} + + altTags = "xtg-x-cel-gaulishen-GB-oxendicten-x-i-defaultund-x-i-enochiansee-x-i-mingonan-x-zh-minen-US-u-va-posix" +) + +func grandfathered(s [maxAltTaglen]byte) (t Tag, ok bool) { + if v, ok := grandfatheredMap[s]; ok { + if v < 0 { + return Make(altTags[altTagIndex[-v-1]:altTagIndex[-v]]), true + } + t.LangID = Language(v) + return t, true + } + return t, false +} diff --git a/vendor/golang.org/x/text/internal/language/match.go b/vendor/golang.org/x/text/internal/language/match.go new file mode 100644 index 00000000000..75a2dbca764 --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/match.go @@ -0,0 +1,226 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package language + +import "errors" + +type scriptRegionFlags uint8 + +const ( + isList = 1 << iota + scriptInFrom + regionInFrom +) + +func (t *Tag) setUndefinedLang(id Language) { + if t.LangID == 0 { + t.LangID = id + } +} + +func (t *Tag) setUndefinedScript(id Script) { + if t.ScriptID == 0 { + t.ScriptID = id + } +} + +func (t *Tag) setUndefinedRegion(id Region) { + if t.RegionID == 0 || t.RegionID.Contains(id) { + t.RegionID = id + } +} + +// ErrMissingLikelyTagsData indicates no information was available +// to compute likely values of missing tags. +var ErrMissingLikelyTagsData = errors.New("missing likely tags data") + +// addLikelySubtags sets subtags to their most likely value, given the locale. +// In most cases this means setting fields for unknown values, but in some +// cases it may alter a value. It returns an ErrMissingLikelyTagsData error +// if the given locale cannot be expanded. +func (t Tag) addLikelySubtags() (Tag, error) { + id, err := addTags(t) + if err != nil { + return t, err + } else if id.equalTags(t) { + return t, nil + } + id.RemakeString() + return id, nil +} + +// specializeRegion attempts to specialize a group region. +func specializeRegion(t *Tag) bool { + if i := regionInclusion[t.RegionID]; i < nRegionGroups { + x := likelyRegionGroup[i] + if Language(x.lang) == t.LangID && Script(x.script) == t.ScriptID { + t.RegionID = Region(x.region) + } + return true + } + return false +} + +// Maximize returns a new tag with missing tags filled in. +func (t Tag) Maximize() (Tag, error) { + return addTags(t) +} + +func addTags(t Tag) (Tag, error) { + // We leave private use identifiers alone. + if t.IsPrivateUse() { + return t, nil + } + if t.ScriptID != 0 && t.RegionID != 0 { + if t.LangID != 0 { + // already fully specified + specializeRegion(&t) + return t, nil + } + // Search matches for und-script-region. Note that for these cases + // region will never be a group so there is no need to check for this. + list := likelyRegion[t.RegionID : t.RegionID+1] + if x := list[0]; x.flags&isList != 0 { + list = likelyRegionList[x.lang : x.lang+uint16(x.script)] + } + for _, x := range list { + // Deviating from the spec. See match_test.go for details. + if Script(x.script) == t.ScriptID { + t.setUndefinedLang(Language(x.lang)) + return t, nil + } + } + } + if t.LangID != 0 { + // Search matches for lang-script and lang-region, where lang != und. + if t.LangID < langNoIndexOffset { + x := likelyLang[t.LangID] + if x.flags&isList != 0 { + list := likelyLangList[x.region : x.region+uint16(x.script)] + if t.ScriptID != 0 { + for _, x := range list { + if Script(x.script) == t.ScriptID && x.flags&scriptInFrom != 0 { + t.setUndefinedRegion(Region(x.region)) + return t, nil + } + } + } else if t.RegionID != 0 { + count := 0 + goodScript := true + tt := t + for _, x := range list { + // We visit all entries for which the script was not + // defined, including the ones where the region was not + // defined. This allows for proper disambiguation within + // regions. + if x.flags&scriptInFrom == 0 && t.RegionID.Contains(Region(x.region)) { + tt.RegionID = Region(x.region) + tt.setUndefinedScript(Script(x.script)) + goodScript = goodScript && tt.ScriptID == Script(x.script) + count++ + } + } + if count == 1 { + return tt, nil + } + // Even if we fail to find a unique Region, we might have + // an unambiguous script. + if goodScript { + t.ScriptID = tt.ScriptID + } + } + } + } + } else { + // Search matches for und-script. + if t.ScriptID != 0 { + x := likelyScript[t.ScriptID] + if x.region != 0 { + t.setUndefinedRegion(Region(x.region)) + t.setUndefinedLang(Language(x.lang)) + return t, nil + } + } + // Search matches for und-region. If und-script-region exists, it would + // have been found earlier. + if t.RegionID != 0 { + if i := regionInclusion[t.RegionID]; i < nRegionGroups { + x := likelyRegionGroup[i] + if x.region != 0 { + t.setUndefinedLang(Language(x.lang)) + t.setUndefinedScript(Script(x.script)) + t.RegionID = Region(x.region) + } + } else { + x := likelyRegion[t.RegionID] + if x.flags&isList != 0 { + x = likelyRegionList[x.lang] + } + if x.script != 0 && x.flags != scriptInFrom { + t.setUndefinedLang(Language(x.lang)) + t.setUndefinedScript(Script(x.script)) + return t, nil + } + } + } + } + + // Search matches for lang. + if t.LangID < langNoIndexOffset { + x := likelyLang[t.LangID] + if x.flags&isList != 0 { + x = likelyLangList[x.region] + } + if x.region != 0 { + t.setUndefinedScript(Script(x.script)) + t.setUndefinedRegion(Region(x.region)) + } + specializeRegion(&t) + if t.LangID == 0 { + t.LangID = _en // default language + } + return t, nil + } + return t, ErrMissingLikelyTagsData +} + +func (t *Tag) setTagsFrom(id Tag) { + t.LangID = id.LangID + t.ScriptID = id.ScriptID + t.RegionID = id.RegionID +} + +// minimize removes the region or script subtags from t such that +// t.addLikelySubtags() == t.minimize().addLikelySubtags(). +func (t Tag) minimize() (Tag, error) { + t, err := minimizeTags(t) + if err != nil { + return t, err + } + t.RemakeString() + return t, nil +} + +// minimizeTags mimics the behavior of the ICU 51 C implementation. +func minimizeTags(t Tag) (Tag, error) { + if t.equalTags(Und) { + return t, nil + } + max, err := addTags(t) + if err != nil { + return t, err + } + for _, id := range [...]Tag{ + {LangID: t.LangID}, + {LangID: t.LangID, RegionID: t.RegionID}, + {LangID: t.LangID, ScriptID: t.ScriptID}, + } { + if x, err := addTags(id); err == nil && max.equalTags(x) { + t.setTagsFrom(id) + break + } + } + return t, nil +} diff --git a/vendor/golang.org/x/text/internal/language/parse.go b/vendor/golang.org/x/text/internal/language/parse.go new file mode 100644 index 00000000000..aad1e0acf77 --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/parse.go @@ -0,0 +1,608 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package language + +import ( + "bytes" + "errors" + "fmt" + "sort" + + "golang.org/x/text/internal/tag" +) + +// isAlpha returns true if the byte is not a digit. +// b must be an ASCII letter or digit. +func isAlpha(b byte) bool { + return b > '9' +} + +// isAlphaNum returns true if the string contains only ASCII letters or digits. +func isAlphaNum(s []byte) bool { + for _, c := range s { + if !('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9') { + return false + } + } + return true +} + +// ErrSyntax is returned by any of the parsing functions when the +// input is not well-formed, according to BCP 47. +// TODO: return the position at which the syntax error occurred? +var ErrSyntax = errors.New("language: tag is not well-formed") + +// ErrDuplicateKey is returned when a tag contains the same key twice with +// different values in the -u section. +var ErrDuplicateKey = errors.New("language: different values for same key in -u extension") + +// ValueError is returned by any of the parsing functions when the +// input is well-formed but the respective subtag is not recognized +// as a valid value. +type ValueError struct { + v [8]byte +} + +// NewValueError creates a new ValueError. +func NewValueError(tag []byte) ValueError { + var e ValueError + copy(e.v[:], tag) + return e +} + +func (e ValueError) tag() []byte { + n := bytes.IndexByte(e.v[:], 0) + if n == -1 { + n = 8 + } + return e.v[:n] +} + +// Error implements the error interface. +func (e ValueError) Error() string { + return fmt.Sprintf("language: subtag %q is well-formed but unknown", e.tag()) +} + +// Subtag returns the subtag for which the error occurred. +func (e ValueError) Subtag() string { + return string(e.tag()) +} + +// scanner is used to scan BCP 47 tokens, which are separated by _ or -. +type scanner struct { + b []byte + bytes [max99thPercentileSize]byte + token []byte + start int // start position of the current token + end int // end position of the current token + next int // next point for scan + err error + done bool +} + +func makeScannerString(s string) scanner { + scan := scanner{} + if len(s) <= len(scan.bytes) { + scan.b = scan.bytes[:copy(scan.bytes[:], s)] + } else { + scan.b = []byte(s) + } + scan.init() + return scan +} + +// makeScanner returns a scanner using b as the input buffer. +// b is not copied and may be modified by the scanner routines. +func makeScanner(b []byte) scanner { + scan := scanner{b: b} + scan.init() + return scan +} + +func (s *scanner) init() { + for i, c := range s.b { + if c == '_' { + s.b[i] = '-' + } + } + s.scan() +} + +// restToLower converts the string between start and end to lower case. +func (s *scanner) toLower(start, end int) { + for i := start; i < end; i++ { + c := s.b[i] + if 'A' <= c && c <= 'Z' { + s.b[i] += 'a' - 'A' + } + } +} + +func (s *scanner) setError(e error) { + if s.err == nil || (e == ErrSyntax && s.err != ErrSyntax) { + s.err = e + } +} + +// resizeRange shrinks or grows the array at position oldStart such that +// a new string of size newSize can fit between oldStart and oldEnd. +// Sets the scan point to after the resized range. +func (s *scanner) resizeRange(oldStart, oldEnd, newSize int) { + s.start = oldStart + if end := oldStart + newSize; end != oldEnd { + diff := end - oldEnd + var b []byte + if n := len(s.b) + diff; n > cap(s.b) { + b = make([]byte, n) + copy(b, s.b[:oldStart]) + } else { + b = s.b[:n] + } + copy(b[end:], s.b[oldEnd:]) + s.b = b + s.next = end + (s.next - s.end) + s.end = end + } +} + +// replace replaces the current token with repl. +func (s *scanner) replace(repl string) { + s.resizeRange(s.start, s.end, len(repl)) + copy(s.b[s.start:], repl) +} + +// gobble removes the current token from the input. +// Caller must call scan after calling gobble. +func (s *scanner) gobble(e error) { + s.setError(e) + if s.start == 0 { + s.b = s.b[:+copy(s.b, s.b[s.next:])] + s.end = 0 + } else { + s.b = s.b[:s.start-1+copy(s.b[s.start-1:], s.b[s.end:])] + s.end = s.start - 1 + } + s.next = s.start +} + +// deleteRange removes the given range from s.b before the current token. +func (s *scanner) deleteRange(start, end int) { + s.b = s.b[:start+copy(s.b[start:], s.b[end:])] + diff := end - start + s.next -= diff + s.start -= diff + s.end -= diff +} + +// scan parses the next token of a BCP 47 string. Tokens that are larger +// than 8 characters or include non-alphanumeric characters result in an error +// and are gobbled and removed from the output. +// It returns the end position of the last token consumed. +func (s *scanner) scan() (end int) { + end = s.end + s.token = nil + for s.start = s.next; s.next < len(s.b); { + i := bytes.IndexByte(s.b[s.next:], '-') + if i == -1 { + s.end = len(s.b) + s.next = len(s.b) + i = s.end - s.start + } else { + s.end = s.next + i + s.next = s.end + 1 + } + token := s.b[s.start:s.end] + if i < 1 || i > 8 || !isAlphaNum(token) { + s.gobble(ErrSyntax) + continue + } + s.token = token + return end + } + if n := len(s.b); n > 0 && s.b[n-1] == '-' { + s.setError(ErrSyntax) + s.b = s.b[:len(s.b)-1] + } + s.done = true + return end +} + +// acceptMinSize parses multiple tokens of the given size or greater. +// It returns the end position of the last token consumed. +func (s *scanner) acceptMinSize(min int) (end int) { + end = s.end + s.scan() + for ; len(s.token) >= min; s.scan() { + end = s.end + } + return end +} + +// Parse parses the given BCP 47 string and returns a valid Tag. If parsing +// failed it returns an error and any part of the tag that could be parsed. +// If parsing succeeded but an unknown value was found, it returns +// ValueError. The Tag returned in this case is just stripped of the unknown +// value. All other values are preserved. It accepts tags in the BCP 47 format +// and extensions to this standard defined in +// https://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers. +func Parse(s string) (t Tag, err error) { + // TODO: consider supporting old-style locale key-value pairs. + if s == "" { + return Und, ErrSyntax + } + defer func() { + if recover() != nil { + t = Und + err = ErrSyntax + return + } + }() + if len(s) <= maxAltTaglen { + b := [maxAltTaglen]byte{} + for i, c := range s { + // Generating invalid UTF-8 is okay as it won't match. + if 'A' <= c && c <= 'Z' { + c += 'a' - 'A' + } else if c == '_' { + c = '-' + } + b[i] = byte(c) + } + if t, ok := grandfathered(b); ok { + return t, nil + } + } + scan := makeScannerString(s) + return parse(&scan, s) +} + +func parse(scan *scanner, s string) (t Tag, err error) { + t = Und + var end int + if n := len(scan.token); n <= 1 { + scan.toLower(0, len(scan.b)) + if n == 0 || scan.token[0] != 'x' { + return t, ErrSyntax + } + end = parseExtensions(scan) + } else if n >= 4 { + return Und, ErrSyntax + } else { // the usual case + t, end = parseTag(scan, true) + if n := len(scan.token); n == 1 { + t.pExt = uint16(end) + end = parseExtensions(scan) + } else if end < len(scan.b) { + scan.setError(ErrSyntax) + scan.b = scan.b[:end] + } + } + if int(t.pVariant) < len(scan.b) { + if end < len(s) { + s = s[:end] + } + if len(s) > 0 && tag.Compare(s, scan.b) == 0 { + t.str = s + } else { + t.str = string(scan.b) + } + } else { + t.pVariant, t.pExt = 0, 0 + } + return t, scan.err +} + +// parseTag parses language, script, region and variants. +// It returns a Tag and the end position in the input that was parsed. +// If doNorm is true, then <lang>-<extlang> will be normalized to <extlang>. +func parseTag(scan *scanner, doNorm bool) (t Tag, end int) { + var e error + // TODO: set an error if an unknown lang, script or region is encountered. + t.LangID, e = getLangID(scan.token) + scan.setError(e) + scan.replace(t.LangID.String()) + langStart := scan.start + end = scan.scan() + for len(scan.token) == 3 && isAlpha(scan.token[0]) { + // From http://tools.ietf.org/html/bcp47, <lang>-<extlang> tags are equivalent + // to a tag of the form <extlang>. + if doNorm { + lang, e := getLangID(scan.token) + if lang != 0 { + t.LangID = lang + langStr := lang.String() + copy(scan.b[langStart:], langStr) + scan.b[langStart+len(langStr)] = '-' + scan.start = langStart + len(langStr) + 1 + } + scan.gobble(e) + } + end = scan.scan() + } + if len(scan.token) == 4 && isAlpha(scan.token[0]) { + t.ScriptID, e = getScriptID(script, scan.token) + if t.ScriptID == 0 { + scan.gobble(e) + } + end = scan.scan() + } + if n := len(scan.token); n >= 2 && n <= 3 { + t.RegionID, e = getRegionID(scan.token) + if t.RegionID == 0 { + scan.gobble(e) + } else { + scan.replace(t.RegionID.String()) + } + end = scan.scan() + } + scan.toLower(scan.start, len(scan.b)) + t.pVariant = byte(end) + end = parseVariants(scan, end, t) + t.pExt = uint16(end) + return t, end +} + +var separator = []byte{'-'} + +// parseVariants scans tokens as long as each token is a valid variant string. +// Duplicate variants are removed. +func parseVariants(scan *scanner, end int, t Tag) int { + start := scan.start + varIDBuf := [4]uint8{} + variantBuf := [4][]byte{} + varID := varIDBuf[:0] + variant := variantBuf[:0] + last := -1 + needSort := false + for ; len(scan.token) >= 4; scan.scan() { + // TODO: measure the impact of needing this conversion and redesign + // the data structure if there is an issue. + v, ok := variantIndex[string(scan.token)] + if !ok { + // unknown variant + // TODO: allow user-defined variants? + scan.gobble(NewValueError(scan.token)) + continue + } + varID = append(varID, v) + variant = append(variant, scan.token) + if !needSort { + if last < int(v) { + last = int(v) + } else { + needSort = true + // There is no legal combinations of more than 7 variants + // (and this is by no means a useful sequence). + const maxVariants = 8 + if len(varID) > maxVariants { + break + } + } + } + end = scan.end + } + if needSort { + sort.Sort(variantsSort{varID, variant}) + k, l := 0, -1 + for i, v := range varID { + w := int(v) + if l == w { + // Remove duplicates. + continue + } + varID[k] = varID[i] + variant[k] = variant[i] + k++ + l = w + } + if str := bytes.Join(variant[:k], separator); len(str) == 0 { + end = start - 1 + } else { + scan.resizeRange(start, end, len(str)) + copy(scan.b[scan.start:], str) + end = scan.end + } + } + return end +} + +type variantsSort struct { + i []uint8 + v [][]byte +} + +func (s variantsSort) Len() int { + return len(s.i) +} + +func (s variantsSort) Swap(i, j int) { + s.i[i], s.i[j] = s.i[j], s.i[i] + s.v[i], s.v[j] = s.v[j], s.v[i] +} + +func (s variantsSort) Less(i, j int) bool { + return s.i[i] < s.i[j] +} + +type bytesSort struct { + b [][]byte + n int // first n bytes to compare +} + +func (b bytesSort) Len() int { + return len(b.b) +} + +func (b bytesSort) Swap(i, j int) { + b.b[i], b.b[j] = b.b[j], b.b[i] +} + +func (b bytesSort) Less(i, j int) bool { + for k := 0; k < b.n; k++ { + if b.b[i][k] == b.b[j][k] { + continue + } + return b.b[i][k] < b.b[j][k] + } + return false +} + +// parseExtensions parses and normalizes the extensions in the buffer. +// It returns the last position of scan.b that is part of any extension. +// It also trims scan.b to remove excess parts accordingly. +func parseExtensions(scan *scanner) int { + start := scan.start + exts := [][]byte{} + private := []byte{} + end := scan.end + for len(scan.token) == 1 { + extStart := scan.start + ext := scan.token[0] + end = parseExtension(scan) + extension := scan.b[extStart:end] + if len(extension) < 3 || (ext != 'x' && len(extension) < 4) { + scan.setError(ErrSyntax) + end = extStart + continue + } else if start == extStart && (ext == 'x' || scan.start == len(scan.b)) { + scan.b = scan.b[:end] + return end + } else if ext == 'x' { + private = extension + break + } + exts = append(exts, extension) + } + sort.Sort(bytesSort{exts, 1}) + if len(private) > 0 { + exts = append(exts, private) + } + scan.b = scan.b[:start] + if len(exts) > 0 { + scan.b = append(scan.b, bytes.Join(exts, separator)...) + } else if start > 0 { + // Strip trailing '-'. + scan.b = scan.b[:start-1] + } + return end +} + +// parseExtension parses a single extension and returns the position of +// the extension end. +func parseExtension(scan *scanner) int { + start, end := scan.start, scan.end + switch scan.token[0] { + case 'u': // https://www.ietf.org/rfc/rfc6067.txt + attrStart := end + scan.scan() + for last := []byte{}; len(scan.token) > 2; scan.scan() { + if bytes.Compare(scan.token, last) != -1 { + // Attributes are unsorted. Start over from scratch. + p := attrStart + 1 + scan.next = p + attrs := [][]byte{} + for scan.scan(); len(scan.token) > 2; scan.scan() { + attrs = append(attrs, scan.token) + end = scan.end + } + sort.Sort(bytesSort{attrs, 3}) + copy(scan.b[p:], bytes.Join(attrs, separator)) + break + } + last = scan.token + end = scan.end + } + // Scan key-type sequences. A key is of length 2 and may be followed + // by 0 or more "type" subtags from 3 to the maximum of 8 letters. + var last, key []byte + for attrEnd := end; len(scan.token) == 2; last = key { + key = scan.token + end = scan.end + for scan.scan(); end < scan.end && len(scan.token) > 2; scan.scan() { + end = scan.end + } + // TODO: check key value validity + if bytes.Compare(key, last) != 1 || scan.err != nil { + // We have an invalid key or the keys are not sorted. + // Start scanning keys from scratch and reorder. + p := attrEnd + 1 + scan.next = p + keys := [][]byte{} + for scan.scan(); len(scan.token) == 2; { + keyStart := scan.start + end = scan.end + for scan.scan(); end < scan.end && len(scan.token) > 2; scan.scan() { + end = scan.end + } + keys = append(keys, scan.b[keyStart:end]) + } + sort.Stable(bytesSort{keys, 2}) + if n := len(keys); n > 0 { + k := 0 + for i := 1; i < n; i++ { + if !bytes.Equal(keys[k][:2], keys[i][:2]) { + k++ + keys[k] = keys[i] + } else if !bytes.Equal(keys[k], keys[i]) { + scan.setError(ErrDuplicateKey) + } + } + keys = keys[:k+1] + } + reordered := bytes.Join(keys, separator) + if e := p + len(reordered); e < end { + scan.deleteRange(e, end) + end = e + } + copy(scan.b[p:], reordered) + break + } + } + case 't': // https://www.ietf.org/rfc/rfc6497.txt + scan.scan() + if n := len(scan.token); n >= 2 && n <= 3 && isAlpha(scan.token[1]) { + _, end = parseTag(scan, false) + scan.toLower(start, end) + } + for len(scan.token) == 2 && !isAlpha(scan.token[1]) { + end = scan.acceptMinSize(3) + } + case 'x': + end = scan.acceptMinSize(1) + default: + end = scan.acceptMinSize(2) + } + return end +} + +// getExtension returns the name, body and end position of the extension. +func getExtension(s string, p int) (end int, ext string) { + if s[p] == '-' { + p++ + } + if s[p] == 'x' { + return len(s), s[p:] + } + end = nextExtension(s, p) + return end, s[p:end] +} + +// nextExtension finds the next extension within the string, searching +// for the -<char>- pattern from position p. +// In the fast majority of cases, language tags will have at most +// one extension and extensions tend to be small. +func nextExtension(s string, p int) int { + for n := len(s) - 3; p < n; { + if s[p] == '-' { + if s[p+2] == '-' { + return p + } + p += 3 + } else { + p++ + } + } + return len(s) +} diff --git a/vendor/golang.org/x/text/internal/language/tables.go b/vendor/golang.org/x/text/internal/language/tables.go new file mode 100644 index 00000000000..14167e74e40 --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/tables.go @@ -0,0 +1,3494 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package language + +import "golang.org/x/text/internal/tag" + +// CLDRVersion is the CLDR version from which the tables in this package are derived. +const CLDRVersion = "32" + +const NumLanguages = 8798 + +const NumScripts = 261 + +const NumRegions = 358 + +type FromTo struct { + From uint16 + To uint16 +} + +const nonCanonicalUnd = 1201 +const ( + _af = 22 + _am = 39 + _ar = 58 + _az = 88 + _bg = 126 + _bn = 165 + _ca = 215 + _cs = 250 + _da = 257 + _de = 269 + _el = 310 + _en = 313 + _es = 318 + _et = 320 + _fa = 328 + _fi = 337 + _fil = 339 + _fr = 350 + _gu = 420 + _he = 444 + _hi = 446 + _hr = 465 + _hu = 469 + _hy = 471 + _id = 481 + _is = 504 + _it = 505 + _ja = 512 + _ka = 528 + _kk = 578 + _km = 586 + _kn = 593 + _ko = 596 + _ky = 650 + _lo = 696 + _lt = 704 + _lv = 711 + _mk = 767 + _ml = 772 + _mn = 779 + _mo = 784 + _mr = 795 + _ms = 799 + _mul = 806 + _my = 817 + _nb = 839 + _ne = 849 + _nl = 871 + _no = 879 + _pa = 925 + _pl = 947 + _pt = 960 + _ro = 988 + _ru = 994 + _sh = 1031 + _si = 1036 + _sk = 1042 + _sl = 1046 + _sq = 1073 + _sr = 1074 + _sv = 1092 + _sw = 1093 + _ta = 1104 + _te = 1121 + _th = 1131 + _tl = 1146 + _tn = 1152 + _tr = 1162 + _uk = 1198 + _ur = 1204 + _uz = 1212 + _vi = 1219 + _zh = 1321 + _zu = 1327 + _jbo = 515 + _ami = 1650 + _bnn = 2357 + _hak = 438 + _tlh = 14467 + _lb = 661 + _nv = 899 + _pwn = 12055 + _tao = 14188 + _tay = 14198 + _tsu = 14662 + _nn = 874 + _sfb = 13629 + _vgt = 15701 + _sgg = 13660 + _cmn = 3007 + _nan = 835 + _hsn = 467 +) + +const langPrivateStart = 0x2f72 + +const langPrivateEnd = 0x3179 + +// lang holds an alphabetically sorted list of ISO-639 language identifiers. +// All entries are 4 bytes. The index of the identifier (divided by 4) is the language tag. +// For 2-byte language identifiers, the two successive bytes have the following meaning: +// - if the first letter of the 2- and 3-letter ISO codes are the same: +// the second and third letter of the 3-letter ISO code. +// - otherwise: a 0 and a by 2 bits right-shifted index into altLangISO3. +// +// For 3-byte language identifiers the 4th byte is 0. +const lang tag.Index = "" + // Size: 5324 bytes + "---\x00aaaraai\x00aak\x00aau\x00abbkabi\x00abq\x00abr\x00abt\x00aby\x00a" + + "cd\x00ace\x00ach\x00ada\x00ade\x00adj\x00ady\x00adz\x00aeveaeb\x00aey" + + "\x00affragc\x00agd\x00agg\x00agm\x00ago\x00agq\x00aha\x00ahl\x00aho\x00a" + + "jg\x00akkaakk\x00ala\x00ali\x00aln\x00alt\x00ammhamm\x00amn\x00amo\x00am" + + "p\x00anrganc\x00ank\x00ann\x00any\x00aoj\x00aom\x00aoz\x00apc\x00apd\x00" + + "ape\x00apr\x00aps\x00apz\x00arraarc\x00arh\x00arn\x00aro\x00arq\x00ars" + + "\x00ary\x00arz\x00assmasa\x00ase\x00asg\x00aso\x00ast\x00ata\x00atg\x00a" + + "tj\x00auy\x00avvaavl\x00avn\x00avt\x00avu\x00awa\x00awb\x00awo\x00awx" + + "\x00ayymayb\x00azzebaakbal\x00ban\x00bap\x00bar\x00bas\x00bav\x00bax\x00" + + "bba\x00bbb\x00bbc\x00bbd\x00bbj\x00bbp\x00bbr\x00bcf\x00bch\x00bci\x00bc" + + "m\x00bcn\x00bco\x00bcq\x00bcu\x00bdd\x00beelbef\x00beh\x00bej\x00bem\x00" + + "bet\x00bew\x00bex\x00bez\x00bfd\x00bfq\x00bft\x00bfy\x00bgulbgc\x00bgn" + + "\x00bgx\x00bhihbhb\x00bhg\x00bhi\x00bhk\x00bhl\x00bho\x00bhy\x00biisbib" + + "\x00big\x00bik\x00bim\x00bin\x00bio\x00biq\x00bjh\x00bji\x00bjj\x00bjn" + + "\x00bjo\x00bjr\x00bjt\x00bjz\x00bkc\x00bkm\x00bkq\x00bku\x00bkv\x00blt" + + "\x00bmambmh\x00bmk\x00bmq\x00bmu\x00bnenbng\x00bnm\x00bnp\x00boodboj\x00" + + "bom\x00bon\x00bpy\x00bqc\x00bqi\x00bqp\x00bqv\x00brrebra\x00brh\x00brx" + + "\x00brz\x00bsosbsj\x00bsq\x00bss\x00bst\x00bto\x00btt\x00btv\x00bua\x00b" + + "uc\x00bud\x00bug\x00buk\x00bum\x00buo\x00bus\x00buu\x00bvb\x00bwd\x00bwr" + + "\x00bxh\x00bye\x00byn\x00byr\x00bys\x00byv\x00byx\x00bza\x00bze\x00bzf" + + "\x00bzh\x00bzw\x00caatcan\x00cbj\x00cch\x00ccp\x00ceheceb\x00cfa\x00cgg" + + "\x00chhachk\x00chm\x00cho\x00chp\x00chr\x00cja\x00cjm\x00cjv\x00ckb\x00c" + + "kl\x00cko\x00cky\x00cla\x00cme\x00cmg\x00cooscop\x00cps\x00crrecrh\x00cr" + + "j\x00crk\x00crl\x00crm\x00crs\x00csescsb\x00csw\x00ctd\x00cuhucvhvcyymda" + + "andad\x00daf\x00dag\x00dah\x00dak\x00dar\x00dav\x00dbd\x00dbq\x00dcc\x00" + + "ddn\x00deeuded\x00den\x00dga\x00dgh\x00dgi\x00dgl\x00dgr\x00dgz\x00dia" + + "\x00dje\x00dnj\x00dob\x00doi\x00dop\x00dow\x00dri\x00drs\x00dsb\x00dtm" + + "\x00dtp\x00dts\x00dty\x00dua\x00duc\x00dud\x00dug\x00dvivdva\x00dww\x00d" + + "yo\x00dyu\x00dzzodzg\x00ebu\x00eeweefi\x00egl\x00egy\x00eka\x00eky\x00el" + + "llema\x00emi\x00enngenn\x00enq\x00eopoeri\x00es\x00\x05esu\x00etstetr" + + "\x00ett\x00etu\x00etx\x00euusewo\x00ext\x00faasfaa\x00fab\x00fag\x00fai" + + "\x00fan\x00ffulffi\x00ffm\x00fiinfia\x00fil\x00fit\x00fjijflr\x00fmp\x00" + + "foaofod\x00fon\x00for\x00fpe\x00fqs\x00frrafrc\x00frp\x00frr\x00frs\x00f" + + "ub\x00fud\x00fue\x00fuf\x00fuh\x00fuq\x00fur\x00fuv\x00fuy\x00fvr\x00fyr" + + "ygalegaa\x00gaf\x00gag\x00gah\x00gaj\x00gam\x00gan\x00gaw\x00gay\x00gba" + + "\x00gbf\x00gbm\x00gby\x00gbz\x00gcr\x00gdlagde\x00gdn\x00gdr\x00geb\x00g" + + "ej\x00gel\x00gez\x00gfk\x00ggn\x00ghs\x00gil\x00gim\x00gjk\x00gjn\x00gju" + + "\x00gkn\x00gkp\x00gllgglk\x00gmm\x00gmv\x00gnrngnd\x00gng\x00god\x00gof" + + "\x00goi\x00gom\x00gon\x00gor\x00gos\x00got\x00grb\x00grc\x00grt\x00grw" + + "\x00gsw\x00guujgub\x00guc\x00gud\x00gur\x00guw\x00gux\x00guz\x00gvlvgvf" + + "\x00gvr\x00gvs\x00gwc\x00gwi\x00gwt\x00gyi\x00haauhag\x00hak\x00ham\x00h" + + "aw\x00haz\x00hbb\x00hdy\x00heebhhy\x00hiinhia\x00hif\x00hig\x00hih\x00hi" + + "l\x00hla\x00hlu\x00hmd\x00hmt\x00hnd\x00hne\x00hnj\x00hnn\x00hno\x00homo" + + "hoc\x00hoj\x00hot\x00hrrvhsb\x00hsn\x00htathuunhui\x00hyyehzerianaian" + + "\x00iar\x00iba\x00ibb\x00iby\x00ica\x00ich\x00idndidd\x00idi\x00idu\x00i" + + "eleife\x00igboigb\x00ige\x00iiiiijj\x00ikpkikk\x00ikt\x00ikw\x00ikx\x00i" + + "lo\x00imo\x00inndinh\x00iodoiou\x00iri\x00isslittaiukuiw\x00\x03iwm\x00i" + + "ws\x00izh\x00izi\x00japnjab\x00jam\x00jbo\x00jbu\x00jen\x00jgk\x00jgo" + + "\x00ji\x00\x06jib\x00jmc\x00jml\x00jra\x00jut\x00jvavjwavkaatkaa\x00kab" + + "\x00kac\x00kad\x00kai\x00kaj\x00kam\x00kao\x00kbd\x00kbm\x00kbp\x00kbq" + + "\x00kbx\x00kby\x00kcg\x00kck\x00kcl\x00kct\x00kde\x00kdh\x00kdl\x00kdt" + + "\x00kea\x00ken\x00kez\x00kfo\x00kfr\x00kfy\x00kgonkge\x00kgf\x00kgp\x00k" + + "ha\x00khb\x00khn\x00khq\x00khs\x00kht\x00khw\x00khz\x00kiikkij\x00kiu" + + "\x00kiw\x00kjuakjd\x00kjg\x00kjs\x00kjy\x00kkazkkc\x00kkj\x00klalkln\x00" + + "klq\x00klt\x00klx\x00kmhmkmb\x00kmh\x00kmo\x00kms\x00kmu\x00kmw\x00knank" + + "nf\x00knp\x00koorkoi\x00kok\x00kol\x00kos\x00koz\x00kpe\x00kpf\x00kpo" + + "\x00kpr\x00kpx\x00kqb\x00kqf\x00kqs\x00kqy\x00kraukrc\x00kri\x00krj\x00k" + + "rl\x00krs\x00kru\x00ksasksb\x00ksd\x00ksf\x00ksh\x00ksj\x00ksr\x00ktb" + + "\x00ktm\x00kto\x00kuurkub\x00kud\x00kue\x00kuj\x00kum\x00kun\x00kup\x00k" + + "us\x00kvomkvg\x00kvr\x00kvx\x00kw\x00\x01kwj\x00kwo\x00kxa\x00kxc\x00kxm" + + "\x00kxp\x00kxw\x00kxz\x00kyirkye\x00kyx\x00kzr\x00laatlab\x00lad\x00lag" + + "\x00lah\x00laj\x00las\x00lbtzlbe\x00lbu\x00lbw\x00lcm\x00lcp\x00ldb\x00l" + + "ed\x00lee\x00lem\x00lep\x00leq\x00leu\x00lez\x00lguglgg\x00liimlia\x00li" + + "d\x00lif\x00lig\x00lih\x00lij\x00lis\x00ljp\x00lki\x00lkt\x00lle\x00lln" + + "\x00lmn\x00lmo\x00lmp\x00lninlns\x00lnu\x00loaoloj\x00lok\x00lol\x00lor" + + "\x00los\x00loz\x00lrc\x00ltitltg\x00luublua\x00luo\x00luy\x00luz\x00lvav" + + "lwl\x00lzh\x00lzz\x00mad\x00maf\x00mag\x00mai\x00mak\x00man\x00mas\x00ma" + + "w\x00maz\x00mbh\x00mbo\x00mbq\x00mbu\x00mbw\x00mci\x00mcp\x00mcq\x00mcr" + + "\x00mcu\x00mda\x00mde\x00mdf\x00mdh\x00mdj\x00mdr\x00mdx\x00med\x00mee" + + "\x00mek\x00men\x00mer\x00met\x00meu\x00mfa\x00mfe\x00mfn\x00mfo\x00mfq" + + "\x00mglgmgh\x00mgl\x00mgo\x00mgp\x00mgy\x00mhahmhi\x00mhl\x00mirimif\x00" + + "min\x00mis\x00miw\x00mkkdmki\x00mkl\x00mkp\x00mkw\x00mlalmle\x00mlp\x00m" + + "ls\x00mmo\x00mmu\x00mmx\x00mnonmna\x00mnf\x00mni\x00mnw\x00moolmoa\x00mo" + + "e\x00moh\x00mos\x00mox\x00mpp\x00mps\x00mpt\x00mpx\x00mql\x00mrarmrd\x00" + + "mrj\x00mro\x00mssamtltmtc\x00mtf\x00mti\x00mtr\x00mua\x00mul\x00mur\x00m" + + "us\x00mva\x00mvn\x00mvy\x00mwk\x00mwr\x00mwv\x00mxc\x00mxm\x00myyamyk" + + "\x00mym\x00myv\x00myw\x00myx\x00myz\x00mzk\x00mzm\x00mzn\x00mzp\x00mzw" + + "\x00mzz\x00naaunac\x00naf\x00nah\x00nak\x00nan\x00nap\x00naq\x00nas\x00n" + + "bobnca\x00nce\x00ncf\x00nch\x00nco\x00ncu\x00nddendc\x00nds\x00neepneb" + + "\x00new\x00nex\x00nfr\x00ngdonga\x00ngb\x00ngl\x00nhb\x00nhe\x00nhw\x00n" + + "if\x00nii\x00nij\x00nin\x00niu\x00niy\x00niz\x00njo\x00nkg\x00nko\x00nll" + + "dnmg\x00nmz\x00nnnonnf\x00nnh\x00nnk\x00nnm\x00noornod\x00noe\x00non\x00" + + "nop\x00nou\x00nqo\x00nrblnrb\x00nsk\x00nsn\x00nso\x00nss\x00ntm\x00ntr" + + "\x00nui\x00nup\x00nus\x00nuv\x00nux\x00nvavnwb\x00nxq\x00nxr\x00nyyanym" + + "\x00nyn\x00nzi\x00occiogc\x00ojjiokr\x00okv\x00omrmong\x00onn\x00ons\x00" + + "opm\x00orrioro\x00oru\x00osssosa\x00ota\x00otk\x00ozm\x00paanpag\x00pal" + + "\x00pam\x00pap\x00pau\x00pbi\x00pcd\x00pcm\x00pdc\x00pdt\x00ped\x00peo" + + "\x00pex\x00pfl\x00phl\x00phn\x00pilipil\x00pip\x00pka\x00pko\x00plolpla" + + "\x00pms\x00png\x00pnn\x00pnt\x00pon\x00ppo\x00pra\x00prd\x00prg\x00psusp" + + "ss\x00ptorptp\x00puu\x00pwa\x00quuequc\x00qug\x00rai\x00raj\x00rao\x00rc" + + "f\x00rej\x00rel\x00res\x00rgn\x00rhg\x00ria\x00rif\x00rjs\x00rkt\x00rmoh" + + "rmf\x00rmo\x00rmt\x00rmu\x00rnunrna\x00rng\x00roonrob\x00rof\x00roo\x00r" + + "ro\x00rtm\x00ruusrue\x00rug\x00rw\x00\x04rwk\x00rwo\x00ryu\x00saansaf" + + "\x00sah\x00saq\x00sas\x00sat\x00sav\x00saz\x00sba\x00sbe\x00sbp\x00scrds" + + "ck\x00scl\x00scn\x00sco\x00scs\x00sdndsdc\x00sdh\x00semesef\x00seh\x00se" + + "i\x00ses\x00sgagsga\x00sgs\x00sgw\x00sgz\x00sh\x00\x02shi\x00shk\x00shn" + + "\x00shu\x00siinsid\x00sig\x00sil\x00sim\x00sjr\x00sklkskc\x00skr\x00sks" + + "\x00sllvsld\x00sli\x00sll\x00sly\x00smmosma\x00smi\x00smj\x00smn\x00smp" + + "\x00smq\x00sms\x00snnasnc\x00snk\x00snp\x00snx\x00sny\x00soomsok\x00soq" + + "\x00sou\x00soy\x00spd\x00spl\x00sps\x00sqqisrrpsrb\x00srn\x00srr\x00srx" + + "\x00ssswssd\x00ssg\x00ssy\x00stotstk\x00stq\x00suunsua\x00sue\x00suk\x00" + + "sur\x00sus\x00svweswwaswb\x00swc\x00swg\x00swp\x00swv\x00sxn\x00sxw\x00s" + + "yl\x00syr\x00szl\x00taamtaj\x00tal\x00tan\x00taq\x00tbc\x00tbd\x00tbf" + + "\x00tbg\x00tbo\x00tbw\x00tbz\x00tci\x00tcy\x00tdd\x00tdg\x00tdh\x00teelt" + + "ed\x00tem\x00teo\x00tet\x00tfi\x00tggktgc\x00tgo\x00tgu\x00thhathl\x00th" + + "q\x00thr\x00tiirtif\x00tig\x00tik\x00tim\x00tio\x00tiv\x00tkuktkl\x00tkr" + + "\x00tkt\x00tlgltlf\x00tlx\x00tly\x00tmh\x00tmy\x00tnsntnh\x00toontof\x00" + + "tog\x00toq\x00tpi\x00tpm\x00tpz\x00tqo\x00trurtru\x00trv\x00trw\x00tssot" + + "sd\x00tsf\x00tsg\x00tsj\x00tsw\x00ttatttd\x00tte\x00ttj\x00ttr\x00tts" + + "\x00ttt\x00tuh\x00tul\x00tum\x00tuq\x00tvd\x00tvl\x00tvu\x00twwitwh\x00t" + + "wq\x00txg\x00tyahtya\x00tyv\x00tzm\x00ubu\x00udm\x00ugiguga\x00ukkruli" + + "\x00umb\x00und\x00unr\x00unx\x00urrduri\x00urt\x00urw\x00usa\x00utr\x00u" + + "vh\x00uvl\x00uzzbvag\x00vai\x00van\x00veenvec\x00vep\x00viievic\x00viv" + + "\x00vls\x00vmf\x00vmw\x00voolvot\x00vro\x00vun\x00vut\x00walnwae\x00waj" + + "\x00wal\x00wan\x00war\x00wbp\x00wbq\x00wbr\x00wci\x00wer\x00wgi\x00whg" + + "\x00wib\x00wiu\x00wiv\x00wja\x00wji\x00wls\x00wmo\x00wnc\x00wni\x00wnu" + + "\x00woolwob\x00wos\x00wrs\x00wsk\x00wtm\x00wuu\x00wuv\x00wwa\x00xav\x00x" + + "bi\x00xcr\x00xes\x00xhhoxla\x00xlc\x00xld\x00xmf\x00xmn\x00xmr\x00xna" + + "\x00xnr\x00xog\x00xon\x00xpr\x00xrb\x00xsa\x00xsi\x00xsm\x00xsr\x00xwe" + + "\x00yam\x00yao\x00yap\x00yas\x00yat\x00yav\x00yay\x00yaz\x00yba\x00ybb" + + "\x00yby\x00yer\x00ygr\x00ygw\x00yiidyko\x00yle\x00ylg\x00yll\x00yml\x00y" + + "ooryon\x00yrb\x00yre\x00yrl\x00yss\x00yua\x00yue\x00yuj\x00yut\x00yuw" + + "\x00zahazag\x00zbl\x00zdj\x00zea\x00zgh\x00zhhozhx\x00zia\x00zlm\x00zmi" + + "\x00zne\x00zuulzxx\x00zza\x00\xff\xff\xff\xff" + +const langNoIndexOffset = 1330 + +// langNoIndex is a bit vector of all 3-letter language codes that are not used as an index +// in lookup tables. The language ids for these language codes are derived directly +// from the letters and are not consecutive. +// Size: 2197 bytes, 2197 elements +var langNoIndex = [2197]uint8{ + // Entry 0 - 3F + 0xff, 0xf8, 0xed, 0xfe, 0xeb, 0xd3, 0x3b, 0xd2, + 0xfb, 0xbf, 0x7a, 0xfa, 0x37, 0x1d, 0x3c, 0x57, + 0x6e, 0x97, 0x73, 0x38, 0xfb, 0xea, 0xbf, 0x70, + 0xad, 0x03, 0xff, 0xff, 0xcf, 0x05, 0x84, 0x72, + 0xe9, 0xbf, 0xfd, 0xbf, 0xbf, 0xf7, 0xfd, 0x77, + 0x0f, 0xff, 0xef, 0x6f, 0xff, 0xfb, 0xdf, 0xe2, + 0xc9, 0xf8, 0x7f, 0x7e, 0x4d, 0xbc, 0x0a, 0x6a, + 0x7c, 0xea, 0xe3, 0xfa, 0x7a, 0xbf, 0x67, 0xff, + // Entry 40 - 7F + 0xff, 0xff, 0xff, 0xdf, 0x2a, 0x54, 0x91, 0xc0, + 0x5d, 0xe3, 0x97, 0x14, 0x07, 0x20, 0xdd, 0xed, + 0x9f, 0x3f, 0xc9, 0x21, 0xf8, 0x3f, 0x94, 0x35, + 0x7c, 0x5f, 0xff, 0x5f, 0x8e, 0x6e, 0xdf, 0xff, + 0xff, 0xff, 0x55, 0x7c, 0xd3, 0xfd, 0xbf, 0xb5, + 0x7b, 0xdf, 0x7f, 0xf7, 0xca, 0xfe, 0xdb, 0xa3, + 0xa8, 0xff, 0x1f, 0x67, 0x7d, 0xeb, 0xef, 0xce, + 0xff, 0xff, 0x9f, 0xff, 0xb7, 0xef, 0xfe, 0xcf, + // Entry 80 - BF + 0xdb, 0xff, 0xf3, 0xcd, 0xfb, 0x7f, 0xff, 0xff, + 0xbb, 0xee, 0xf7, 0xbd, 0xdb, 0xff, 0x5f, 0xf7, + 0xfd, 0xf2, 0xfd, 0xff, 0x5e, 0x2f, 0x3b, 0xba, + 0x7e, 0xff, 0xff, 0xfe, 0xf7, 0xff, 0xdd, 0xff, + 0xfd, 0xdf, 0xfb, 0xfe, 0x9d, 0xb4, 0xd3, 0xff, + 0xef, 0xff, 0xdf, 0xf7, 0x7f, 0xb7, 0xfd, 0xd5, + 0xa5, 0x77, 0x40, 0xff, 0x9c, 0xc1, 0x41, 0x2c, + 0x08, 0x21, 0x41, 0x00, 0x50, 0x40, 0x00, 0x80, + // Entry C0 - FF + 0xfb, 0x4a, 0xf2, 0x9f, 0xb4, 0x42, 0x41, 0x96, + 0x1b, 0x14, 0x08, 0xf3, 0x2b, 0xe7, 0x17, 0x56, + 0x05, 0x7d, 0x0e, 0x1c, 0x37, 0x7f, 0xf3, 0xef, + 0x97, 0xff, 0x5d, 0x38, 0x64, 0x08, 0x00, 0x10, + 0xbc, 0x85, 0xaf, 0xdf, 0xff, 0xff, 0x7b, 0x35, + 0x3e, 0xc7, 0xc7, 0xdf, 0xff, 0x01, 0x81, 0x00, + 0xb0, 0x05, 0x80, 0x00, 0x20, 0x00, 0x00, 0x03, + 0x40, 0x00, 0x40, 0x92, 0x21, 0x50, 0xb1, 0x5d, + // Entry 100 - 13F + 0xfd, 0xdc, 0xbe, 0x5e, 0x00, 0x00, 0x02, 0x64, + 0x0d, 0x19, 0x41, 0xdf, 0x79, 0x22, 0x00, 0x00, + 0x00, 0x5e, 0x64, 0xdc, 0x24, 0xe5, 0xd9, 0xe3, + 0xfe, 0xff, 0xfd, 0xcb, 0x9f, 0x14, 0x41, 0x0c, + 0x86, 0x00, 0xd1, 0x00, 0xf0, 0xc7, 0x67, 0x5f, + 0x56, 0x99, 0x5e, 0xb5, 0x6c, 0xaf, 0x03, 0x00, + 0x02, 0x00, 0x00, 0x00, 0xc0, 0x37, 0xda, 0x56, + 0x90, 0x6d, 0x01, 0x2e, 0x96, 0x69, 0x20, 0xfb, + // Entry 140 - 17F + 0xff, 0x3f, 0x00, 0x00, 0x00, 0x01, 0x0c, 0x16, + 0x03, 0x00, 0x00, 0xb0, 0x14, 0x23, 0x50, 0x06, + 0x0a, 0x00, 0x01, 0x00, 0x00, 0x10, 0x11, 0x09, + 0x00, 0x00, 0x60, 0x10, 0x00, 0x00, 0x00, 0x10, + 0x00, 0x00, 0x44, 0x00, 0x00, 0x10, 0x00, 0x05, + 0x08, 0x00, 0x00, 0x05, 0x00, 0x80, 0x28, 0x04, + 0x00, 0x00, 0x40, 0xd5, 0x2d, 0x00, 0x64, 0x35, + 0x24, 0x52, 0xf4, 0xd5, 0xbf, 0x62, 0xc9, 0x03, + // Entry 180 - 1BF + 0x00, 0x80, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x04, 0x13, 0x39, 0x01, 0xdd, 0x57, 0x98, + 0x21, 0x18, 0x81, 0x08, 0x00, 0x01, 0x40, 0x82, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x40, 0x00, 0x44, 0x00, 0x00, 0x80, 0xea, + 0xa9, 0x39, 0x00, 0x02, 0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, + // Entry 1C0 - 1FF + 0x00, 0x03, 0x28, 0x05, 0x00, 0x00, 0x00, 0x00, + 0x04, 0x20, 0x04, 0xa6, 0x00, 0x04, 0x00, 0x00, + 0x81, 0x50, 0x00, 0x00, 0x00, 0x11, 0x84, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x55, + 0x02, 0x10, 0x08, 0x04, 0x00, 0x00, 0x00, 0x40, + 0x30, 0x83, 0x01, 0x00, 0x00, 0x00, 0x11, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x1e, 0xcd, 0xbf, 0x7a, 0xbf, + // Entry 200 - 23F + 0xdf, 0xc3, 0x83, 0x82, 0xc0, 0xfb, 0x57, 0x27, + 0xed, 0x55, 0xe7, 0x01, 0x00, 0x20, 0xb2, 0xc5, + 0xa4, 0x45, 0x25, 0x9b, 0x02, 0xdf, 0xe1, 0xdf, + 0x03, 0x44, 0x08, 0x90, 0x01, 0x04, 0x81, 0xe3, + 0x92, 0x54, 0xdb, 0x28, 0xd3, 0x5f, 0xfe, 0x6d, + 0x79, 0xed, 0x1c, 0x7f, 0x04, 0x08, 0x00, 0x01, + 0x21, 0x12, 0x64, 0x5f, 0xdd, 0x0e, 0x85, 0x4f, + 0x40, 0x40, 0x00, 0x04, 0xf1, 0xfd, 0x3d, 0x54, + // Entry 240 - 27F + 0xe8, 0x03, 0xb4, 0x27, 0x23, 0x0d, 0x00, 0x00, + 0x20, 0x7b, 0x78, 0x02, 0x07, 0x84, 0x00, 0xf0, + 0xbb, 0x7e, 0x5a, 0x00, 0x18, 0x04, 0x81, 0x00, + 0x00, 0x00, 0x80, 0x10, 0x90, 0x1c, 0x01, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x10, 0x40, 0x00, 0x04, + 0x08, 0xa0, 0x70, 0xa5, 0x0c, 0x40, 0x00, 0x00, + 0x91, 0x24, 0x04, 0x68, 0x00, 0x20, 0x70, 0xff, + 0x7b, 0x7f, 0x70, 0x00, 0x05, 0x9b, 0xdd, 0x66, + // Entry 280 - 2BF + 0x03, 0x00, 0x11, 0x00, 0x00, 0x00, 0x40, 0x05, + 0xb5, 0xb6, 0x80, 0x08, 0x04, 0x00, 0x04, 0x51, + 0xe2, 0xef, 0xfd, 0x3f, 0x05, 0x09, 0x08, 0x05, + 0x40, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, + 0x0c, 0x00, 0x00, 0x00, 0x00, 0x81, 0x00, 0x60, + 0xe7, 0x48, 0x00, 0x81, 0x20, 0xc0, 0x05, 0x80, + 0x03, 0x00, 0x00, 0x00, 0x8c, 0x50, 0x40, 0x04, + 0x84, 0x47, 0x84, 0x40, 0x20, 0x10, 0x00, 0x20, + // Entry 2C0 - 2FF + 0x02, 0x50, 0x80, 0x11, 0x00, 0x99, 0x6c, 0xe2, + 0x50, 0x27, 0x1d, 0x11, 0x29, 0x0e, 0x59, 0xe9, + 0x33, 0x08, 0x00, 0x20, 0x04, 0x40, 0x10, 0x00, + 0x00, 0x00, 0x50, 0x44, 0x92, 0x49, 0xd6, 0x5d, + 0xa7, 0x81, 0x47, 0x97, 0xfb, 0x00, 0x10, 0x00, + 0x08, 0x00, 0x80, 0x00, 0x40, 0x04, 0x00, 0x01, + 0x02, 0x00, 0x01, 0x40, 0x80, 0x00, 0x40, 0x08, + 0xd8, 0xeb, 0xf6, 0x39, 0xc4, 0x8d, 0x12, 0x00, + // Entry 300 - 33F + 0x00, 0x0c, 0x04, 0x01, 0x20, 0x20, 0xdd, 0xa0, + 0x01, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, + 0x04, 0x10, 0xd0, 0x9d, 0x95, 0x13, 0x04, 0x80, + 0x00, 0x01, 0xd0, 0x16, 0x40, 0x00, 0x10, 0xb0, + 0x10, 0x62, 0x4c, 0xd2, 0x02, 0x01, 0x4a, 0x00, + 0x46, 0x04, 0x00, 0x08, 0x02, 0x00, 0x20, 0x80, + 0x00, 0x80, 0x06, 0x00, 0x08, 0x00, 0x00, 0x00, + 0x00, 0xf0, 0xd8, 0x6f, 0x15, 0x02, 0x08, 0x00, + // Entry 340 - 37F + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x10, 0x01, + 0x00, 0x10, 0x00, 0x00, 0x00, 0xf0, 0x84, 0xe3, + 0xdd, 0xbf, 0xf9, 0xf9, 0x3b, 0x7f, 0x7f, 0xdb, + 0xfd, 0xfc, 0xfe, 0xdf, 0xff, 0xfd, 0xff, 0xf6, + 0xfb, 0xfc, 0xf7, 0x1f, 0xff, 0xb3, 0x6c, 0xff, + 0xd9, 0xad, 0xdf, 0xfe, 0xef, 0xba, 0xdf, 0xff, + 0xff, 0xff, 0xb7, 0xdd, 0x7d, 0xbf, 0xab, 0x7f, + 0xfd, 0xfd, 0xdf, 0x2f, 0x9c, 0xdf, 0xf3, 0x6f, + // Entry 380 - 3BF + 0xdf, 0xdd, 0xff, 0xfb, 0xee, 0xd2, 0xab, 0x5f, + 0xd5, 0xdf, 0x7f, 0xff, 0xeb, 0xff, 0xe4, 0x4d, + 0xf9, 0xff, 0xfe, 0xf7, 0xfd, 0xdf, 0xfb, 0xbf, + 0xee, 0xdb, 0x6f, 0xef, 0xff, 0x7f, 0xff, 0xff, + 0xf7, 0x5f, 0xd3, 0x3b, 0xfd, 0xd9, 0xdf, 0xeb, + 0xbc, 0x08, 0x05, 0x24, 0xff, 0x07, 0x70, 0xfe, + 0xe6, 0x5e, 0x00, 0x08, 0x00, 0x83, 0x7d, 0x1f, + 0x06, 0xe6, 0x72, 0x60, 0xd1, 0x3c, 0x7f, 0x44, + // Entry 3C0 - 3FF + 0x02, 0x30, 0x9f, 0x7a, 0x16, 0xbd, 0x7f, 0x57, + 0xf2, 0xff, 0x31, 0xff, 0xf2, 0x1e, 0x90, 0xf7, + 0xf1, 0xf9, 0x45, 0x80, 0x01, 0x02, 0x00, 0x20, + 0x40, 0x54, 0x9f, 0x8a, 0xdf, 0xf9, 0x6e, 0x11, + 0x86, 0x51, 0xc0, 0xf3, 0xfb, 0x47, 0x40, 0x03, + 0x05, 0xd1, 0x50, 0x5c, 0x00, 0x40, 0x00, 0x10, + 0x04, 0x02, 0x00, 0x00, 0x0a, 0x00, 0x17, 0xd2, + 0xb9, 0xfd, 0xfc, 0xba, 0xfe, 0xef, 0xc7, 0xbe, + // Entry 400 - 43F + 0x53, 0x6f, 0xdf, 0xe7, 0xdb, 0x65, 0xbb, 0x7f, + 0xfa, 0xff, 0x77, 0xf3, 0xef, 0xbf, 0xfd, 0xf7, + 0xdf, 0xdf, 0x9b, 0x7f, 0xff, 0xff, 0x7f, 0x6f, + 0xf7, 0xfb, 0xeb, 0xdf, 0xbc, 0xff, 0xbf, 0x6b, + 0x7b, 0xfb, 0xff, 0xce, 0x76, 0xbd, 0xf7, 0xf7, + 0xdf, 0xdc, 0xf7, 0xf7, 0xff, 0xdf, 0xf3, 0xfe, + 0xef, 0xff, 0xff, 0xff, 0xb6, 0x7f, 0x7f, 0xde, + 0xf7, 0xb9, 0xeb, 0x77, 0xff, 0xfb, 0xbf, 0xdf, + // Entry 440 - 47F + 0xfd, 0xfe, 0xfb, 0xff, 0xfe, 0xeb, 0x1f, 0x7d, + 0x2f, 0xfd, 0xb6, 0xb5, 0xa5, 0xfc, 0xff, 0xfd, + 0x7f, 0x4e, 0xbf, 0x8f, 0xae, 0xff, 0xee, 0xdf, + 0x7f, 0xf7, 0x73, 0x02, 0x02, 0x04, 0xfc, 0xf7, + 0xff, 0xb7, 0xd7, 0xef, 0xfe, 0xcd, 0xf5, 0xce, + 0xe2, 0x8e, 0xe7, 0xbf, 0xb7, 0xff, 0x56, 0xfd, + 0xcd, 0xff, 0xfb, 0xff, 0xdf, 0xd7, 0xea, 0xff, + 0xe5, 0x5f, 0x6d, 0x0f, 0xa7, 0x51, 0x06, 0xc4, + // Entry 480 - 4BF + 0x93, 0x50, 0x5d, 0xaf, 0xa6, 0xff, 0x99, 0xfb, + 0x63, 0x1d, 0x53, 0xff, 0xef, 0xb7, 0x35, 0x20, + 0x14, 0x00, 0x55, 0x51, 0xc2, 0x65, 0xf5, 0x41, + 0xe2, 0xff, 0xfc, 0xdf, 0x02, 0x85, 0xc5, 0x05, + 0x00, 0x22, 0x00, 0x74, 0x69, 0x10, 0x08, 0x05, + 0x41, 0x00, 0x01, 0x06, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x51, 0x20, 0x05, 0x04, 0x01, 0x00, 0x00, + 0x06, 0x11, 0x20, 0x00, 0x18, 0x01, 0x92, 0xf1, + // Entry 4C0 - 4FF + 0xfd, 0x47, 0x69, 0x06, 0x95, 0x06, 0x57, 0xed, + 0xfb, 0x4d, 0x1c, 0x6b, 0x83, 0x04, 0x62, 0x40, + 0x00, 0x11, 0x42, 0x00, 0x00, 0x00, 0x54, 0x83, + 0xb8, 0x4f, 0x10, 0x8e, 0x89, 0x46, 0xde, 0xf7, + 0x13, 0x31, 0x00, 0x20, 0x00, 0x00, 0x00, 0x90, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x10, 0x00, + 0x01, 0x00, 0x00, 0xf0, 0x5b, 0xf4, 0xbe, 0x3d, + 0xbe, 0xcf, 0xf7, 0xaf, 0x42, 0x04, 0x84, 0x41, + // Entry 500 - 53F + 0x30, 0xff, 0x79, 0x72, 0x04, 0x00, 0x00, 0x49, + 0x2d, 0x14, 0x27, 0x5f, 0xed, 0xf1, 0x3f, 0xe7, + 0x3f, 0x00, 0x00, 0x02, 0xc6, 0xa0, 0x1e, 0xf8, + 0xbb, 0xff, 0xfd, 0xfb, 0xb7, 0xfd, 0xe7, 0xf7, + 0xfd, 0xfc, 0xd5, 0xed, 0x47, 0xf4, 0x7e, 0x10, + 0x01, 0x01, 0x84, 0x6d, 0xff, 0xf7, 0xdd, 0xf9, + 0x5b, 0x05, 0x86, 0xed, 0xf5, 0x77, 0xbd, 0x3c, + 0x00, 0x00, 0x00, 0x42, 0x71, 0x42, 0x00, 0x40, + // Entry 540 - 57F + 0x00, 0x00, 0x01, 0x43, 0x19, 0x24, 0x08, 0x00, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + // Entry 580 - 5BF + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xab, 0xbd, 0xe7, 0x57, 0xee, 0x13, 0x5d, + 0x09, 0xc1, 0x40, 0x21, 0xfa, 0x17, 0x01, 0x80, + 0x00, 0x00, 0x00, 0x00, 0xf0, 0xce, 0xfb, 0xbf, + 0x00, 0x23, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, + 0x00, 0x30, 0x15, 0xa3, 0x10, 0x00, 0x00, 0x00, + 0x11, 0x04, 0x16, 0x00, 0x00, 0x02, 0x20, 0x81, + 0xa3, 0x01, 0x50, 0x00, 0x00, 0x83, 0x11, 0x40, + // Entry 5C0 - 5FF + 0x00, 0x00, 0x00, 0xf0, 0xdd, 0x7b, 0xbe, 0x02, + 0xaa, 0x10, 0x5d, 0x98, 0x52, 0x00, 0x80, 0x20, + 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x02, 0x02, + 0x3d, 0x40, 0x10, 0x02, 0x10, 0x61, 0x5a, 0x9d, + 0x31, 0x00, 0x00, 0x00, 0x01, 0x18, 0x02, 0x20, + 0x00, 0x00, 0x01, 0x00, 0x42, 0x00, 0x20, 0x00, + 0x00, 0x1f, 0xdf, 0xd2, 0xb9, 0xff, 0xfd, 0x3f, + 0x1f, 0x98, 0xcf, 0x9c, 0xff, 0xaf, 0x5f, 0xfe, + // Entry 600 - 63F + 0x7b, 0x4b, 0x40, 0x10, 0xe1, 0xfd, 0xaf, 0xd9, + 0xb7, 0xf6, 0xfb, 0xb3, 0xc7, 0xff, 0x6f, 0xf1, + 0x73, 0xb1, 0x7f, 0x9f, 0x7f, 0xbd, 0xfc, 0xb7, + 0xee, 0x1c, 0xfa, 0xcb, 0xef, 0xdd, 0xf9, 0xbd, + 0x6e, 0xae, 0x55, 0xfd, 0x6e, 0x81, 0x76, 0x9f, + 0xd4, 0x77, 0xf5, 0x7d, 0xfb, 0xff, 0xeb, 0xfe, + 0xbe, 0x5f, 0x46, 0x5b, 0xe9, 0x5f, 0x50, 0x18, + 0x02, 0xfa, 0xf7, 0x9d, 0x15, 0x97, 0x05, 0x0f, + // Entry 640 - 67F + 0x75, 0xc4, 0x7d, 0x81, 0x92, 0xf5, 0x57, 0x6c, + 0xff, 0xe4, 0xef, 0x6f, 0xff, 0xfc, 0xdd, 0xde, + 0xfc, 0xfd, 0x76, 0x5f, 0x7a, 0x3f, 0x00, 0x98, + 0x02, 0xfb, 0xa3, 0xef, 0xf3, 0xd6, 0xf2, 0xff, + 0xb9, 0xda, 0x7d, 0xd0, 0x3e, 0x15, 0x7b, 0xb4, + 0xf5, 0x3e, 0xff, 0xff, 0xf1, 0xf7, 0xff, 0xe7, + 0x5f, 0xff, 0xff, 0x9e, 0xdf, 0xf6, 0xd7, 0xb9, + 0xef, 0x27, 0x80, 0xbb, 0xc5, 0xff, 0xff, 0xe3, + // Entry 680 - 6BF + 0x97, 0x9d, 0xbf, 0x9f, 0xf7, 0xc7, 0xfd, 0x37, + 0xce, 0x7f, 0x44, 0x1d, 0x73, 0x7f, 0xf8, 0xda, + 0x5d, 0xce, 0x7d, 0x06, 0xb9, 0xea, 0x79, 0xa0, + 0x1a, 0x20, 0x00, 0x30, 0x02, 0x04, 0x24, 0x08, + 0x04, 0x00, 0x00, 0x40, 0xd4, 0x02, 0x04, 0x00, + 0x00, 0x04, 0x00, 0x04, 0x00, 0x20, 0x09, 0x06, + 0x50, 0x00, 0x08, 0x00, 0x00, 0x00, 0x24, 0x00, + 0x04, 0x00, 0x10, 0xdc, 0x58, 0xd7, 0x0d, 0x0f, + // Entry 6C0 - 6FF + 0x54, 0x4d, 0xf1, 0x16, 0x44, 0xd5, 0x42, 0x08, + 0x40, 0x02, 0x00, 0x40, 0x00, 0x08, 0x00, 0x00, + 0x00, 0xdc, 0xfb, 0xcb, 0x0e, 0x58, 0x48, 0x41, + 0x24, 0x20, 0x04, 0x00, 0x30, 0x12, 0x40, 0x00, + 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x80, 0x10, 0x10, 0xab, + 0x6d, 0x93, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x80, 0x80, 0x25, 0x00, 0x00, + // Entry 700 - 73F + 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x00, + 0x80, 0x86, 0xc2, 0x00, 0x00, 0x01, 0x00, 0x01, + 0xff, 0x18, 0x02, 0x00, 0x02, 0xf0, 0xfd, 0x79, + 0x3b, 0x00, 0x25, 0x00, 0x00, 0x00, 0x02, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, + 0x03, 0x00, 0x09, 0x20, 0x00, 0x00, 0x01, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 740 - 77F + 0x00, 0x00, 0x00, 0xef, 0xd5, 0xfd, 0xcf, 0x7e, + 0xb0, 0x11, 0x00, 0x00, 0x00, 0x92, 0x01, 0x46, + 0xcd, 0xf9, 0x5c, 0x00, 0x01, 0x00, 0x30, 0x04, + 0x04, 0x55, 0x00, 0x01, 0x04, 0xf4, 0x3f, 0x4a, + 0x01, 0x00, 0x00, 0xb0, 0x80, 0x20, 0x55, 0x75, + 0x97, 0x7c, 0xdf, 0x31, 0xcc, 0x68, 0xd1, 0x03, + 0xd5, 0x57, 0x27, 0x14, 0x01, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x2c, 0xf7, 0xcb, 0x1f, 0x14, 0x60, + // Entry 780 - 7BF + 0x83, 0x68, 0x01, 0x10, 0x8b, 0x38, 0x8a, 0x01, + 0x00, 0x00, 0x20, 0x00, 0x24, 0x44, 0x00, 0x00, + 0x10, 0x03, 0x31, 0x02, 0x01, 0x00, 0x00, 0xf0, + 0xf5, 0xff, 0xd5, 0x97, 0xbc, 0x70, 0xd6, 0x78, + 0x78, 0x15, 0x50, 0x05, 0xa4, 0x84, 0xa9, 0x41, + 0x00, 0x00, 0x00, 0x6b, 0x39, 0x52, 0x74, 0x40, + 0xe8, 0x30, 0x90, 0x6a, 0x92, 0x00, 0x00, 0x02, + 0xff, 0xef, 0xff, 0x4b, 0x85, 0x53, 0xf4, 0xed, + // Entry 7C0 - 7FF + 0xdd, 0xbf, 0xf2, 0x5d, 0xc7, 0x0c, 0xd5, 0x42, + 0xfc, 0xff, 0xf7, 0x1f, 0x00, 0x80, 0x40, 0x56, + 0xcc, 0x16, 0x9e, 0xea, 0x35, 0x7d, 0xef, 0xff, + 0xbd, 0xa4, 0xaf, 0x01, 0x44, 0x18, 0x01, 0x4d, + 0x4e, 0x4a, 0x08, 0x50, 0x28, 0x30, 0xe0, 0x80, + 0x10, 0x20, 0x24, 0x00, 0xff, 0x2f, 0xd3, 0x60, + 0xfe, 0x01, 0x02, 0x88, 0x2a, 0x40, 0x16, 0x01, + 0x01, 0x15, 0x2b, 0x3c, 0x01, 0x00, 0x00, 0x10, + // Entry 800 - 83F + 0x90, 0x49, 0x41, 0x02, 0x02, 0x01, 0xe1, 0xbf, + 0xbf, 0x03, 0x00, 0x00, 0x10, 0xdc, 0xa3, 0xd1, + 0x40, 0x9c, 0x44, 0xdf, 0xf5, 0x8f, 0x66, 0xb3, + 0x55, 0x20, 0xd4, 0xc1, 0xd8, 0x30, 0x3d, 0x80, + 0x00, 0x00, 0x00, 0x04, 0xd4, 0x11, 0xc5, 0x84, + 0x2f, 0x50, 0x00, 0x22, 0x50, 0x6e, 0xbd, 0x93, + 0x07, 0x00, 0x20, 0x10, 0x84, 0xb2, 0x45, 0x10, + 0x06, 0x44, 0x00, 0x00, 0x12, 0x02, 0x11, 0x00, + // Entry 840 - 87F + 0xf0, 0xfb, 0xfd, 0x7f, 0x05, 0x00, 0x16, 0x89, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x03, + 0x00, 0x00, 0x00, 0x00, 0x03, 0x30, 0x02, 0x28, + 0x84, 0x00, 0x21, 0xc0, 0x23, 0x24, 0x00, 0x00, + 0x00, 0xcb, 0xe4, 0x3a, 0x46, 0x88, 0x54, 0xf1, + 0xef, 0xff, 0x7f, 0x12, 0x01, 0x01, 0x84, 0x50, + 0x07, 0xfc, 0xff, 0xff, 0x0f, 0x01, 0x00, 0x40, + 0x10, 0x38, 0x01, 0x01, 0x1c, 0x12, 0x40, 0xe1, + // Entry 880 - 8BF + 0x76, 0x16, 0x08, 0x03, 0x10, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x24, + 0x0a, 0x00, 0x80, 0x00, 0x00, +} + +// altLangISO3 holds an alphabetically sorted list of 3-letter language code alternatives +// to 2-letter language codes that cannot be derived using the method described above. +// Each 3-letter code is followed by its 1-byte langID. +const altLangISO3 tag.Index = "---\x00cor\x00hbs\x01heb\x02kin\x03spa\x04yid\x05\xff\xff\xff\xff" + +// altLangIndex is used to convert indexes in altLangISO3 to langIDs. +// Size: 12 bytes, 6 elements +var altLangIndex = [6]uint16{ + 0x0281, 0x0407, 0x01fb, 0x03e5, 0x013e, 0x0208, +} + +// AliasMap maps langIDs to their suggested replacements. +// Size: 772 bytes, 193 elements +var AliasMap = [193]FromTo{ + 0: {From: 0x82, To: 0x88}, + 1: {From: 0x187, To: 0x1ae}, + 2: {From: 0x1f3, To: 0x1e1}, + 3: {From: 0x1fb, To: 0x1bc}, + 4: {From: 0x208, To: 0x512}, + 5: {From: 0x20f, To: 0x20e}, + 6: {From: 0x310, To: 0x3dc}, + 7: {From: 0x347, To: 0x36f}, + 8: {From: 0x407, To: 0x432}, + 9: {From: 0x47a, To: 0x153}, + 10: {From: 0x490, To: 0x451}, + 11: {From: 0x4a2, To: 0x21}, + 12: {From: 0x53e, To: 0x544}, + 13: {From: 0x58f, To: 0x12d}, + 14: {From: 0x62b, To: 0x34}, + 15: {From: 0x62f, To: 0x14}, + 16: {From: 0x630, To: 0x1eb1}, + 17: {From: 0x651, To: 0x431}, + 18: {From: 0x662, To: 0x431}, + 19: {From: 0x6ed, To: 0x3a}, + 20: {From: 0x6f8, To: 0x1d7}, + 21: {From: 0x709, To: 0x3625}, + 22: {From: 0x73e, To: 0x21a1}, + 23: {From: 0x7b3, To: 0x56}, + 24: {From: 0x7b9, To: 0x299b}, + 25: {From: 0x7c5, To: 0x58}, + 26: {From: 0x7e6, To: 0x145}, + 27: {From: 0x80c, To: 0x5a}, + 28: {From: 0x815, To: 0x8d}, + 29: {From: 0x87e, To: 0x810}, + 30: {From: 0x8a8, To: 0x8b7}, + 31: {From: 0x8c3, To: 0xee3}, + 32: {From: 0x8fa, To: 0x1dc}, + 33: {From: 0x9ef, To: 0x331}, + 34: {From: 0xa36, To: 0x2c5}, + 35: {From: 0xa3d, To: 0xbf}, + 36: {From: 0xabe, To: 0x3322}, + 37: {From: 0xb38, To: 0x529}, + 38: {From: 0xb75, To: 0x265a}, + 39: {From: 0xb7e, To: 0xbc3}, + 40: {From: 0xb9b, To: 0x44e}, + 41: {From: 0xbbc, To: 0x4229}, + 42: {From: 0xbbf, To: 0x529}, + 43: {From: 0xbfe, To: 0x2da7}, + 44: {From: 0xc2e, To: 0x3181}, + 45: {From: 0xcb9, To: 0xf3}, + 46: {From: 0xd08, To: 0xfa}, + 47: {From: 0xdc8, To: 0x11a}, + 48: {From: 0xdd7, To: 0x32d}, + 49: {From: 0xdf8, To: 0xdfb}, + 50: {From: 0xdfe, To: 0x531}, + 51: {From: 0xe01, To: 0xdf3}, + 52: {From: 0xedf, To: 0x205a}, + 53: {From: 0xee9, To: 0x222e}, + 54: {From: 0xeee, To: 0x2e9a}, + 55: {From: 0xf39, To: 0x367}, + 56: {From: 0x10d0, To: 0x140}, + 57: {From: 0x1104, To: 0x2d0}, + 58: {From: 0x11a0, To: 0x1ec}, + 59: {From: 0x1279, To: 0x21}, + 60: {From: 0x1424, To: 0x15e}, + 61: {From: 0x1470, To: 0x14e}, + 62: {From: 0x151f, To: 0xd9b}, + 63: {From: 0x1523, To: 0x390}, + 64: {From: 0x1532, To: 0x19f}, + 65: {From: 0x1580, To: 0x210}, + 66: {From: 0x1583, To: 0x10d}, + 67: {From: 0x15a3, To: 0x3caf}, + 68: {From: 0x1630, To: 0x222e}, + 69: {From: 0x166a, To: 0x19b}, + 70: {From: 0x16c8, To: 0x136}, + 71: {From: 0x1700, To: 0x29f8}, + 72: {From: 0x1718, To: 0x194}, + 73: {From: 0x1727, To: 0xf3f}, + 74: {From: 0x177a, To: 0x178}, + 75: {From: 0x1809, To: 0x17b6}, + 76: {From: 0x1816, To: 0x18f3}, + 77: {From: 0x188a, To: 0x436}, + 78: {From: 0x1979, To: 0x1d01}, + 79: {From: 0x1a74, To: 0x2bb0}, + 80: {From: 0x1a8a, To: 0x1f8}, + 81: {From: 0x1b5a, To: 0x1fa}, + 82: {From: 0x1b86, To: 0x1515}, + 83: {From: 0x1d64, To: 0x2c9b}, + 84: {From: 0x2038, To: 0x37b1}, + 85: {From: 0x203d, To: 0x20dd}, + 86: {From: 0x2042, To: 0x2e00}, + 87: {From: 0x205a, To: 0x30b}, + 88: {From: 0x20e3, To: 0x274}, + 89: {From: 0x20ee, To: 0x263}, + 90: {From: 0x20f2, To: 0x22d}, + 91: {From: 0x20f9, To: 0x256}, + 92: {From: 0x210f, To: 0x21eb}, + 93: {From: 0x2135, To: 0x27d}, + 94: {From: 0x2160, To: 0x913}, + 95: {From: 0x2199, To: 0x121}, + 96: {From: 0x21ce, To: 0x1561}, + 97: {From: 0x21e6, To: 0x504}, + 98: {From: 0x21f4, To: 0x49f}, + 99: {From: 0x21fb, To: 0x269}, + 100: {From: 0x222d, To: 0x121}, + 101: {From: 0x2237, To: 0x121}, + 102: {From: 0x2248, To: 0x217d}, + 103: {From: 0x2262, To: 0x92a}, + 104: {From: 0x2316, To: 0x3226}, + 105: {From: 0x236a, To: 0x2835}, + 106: {From: 0x2382, To: 0x3365}, + 107: {From: 0x2472, To: 0x2c7}, + 108: {From: 0x24e4, To: 0x2ff}, + 109: {From: 0x24f0, To: 0x2fa}, + 110: {From: 0x24fa, To: 0x31f}, + 111: {From: 0x2550, To: 0xb5b}, + 112: {From: 0x25a9, To: 0xe2}, + 113: {From: 0x263e, To: 0x2d0}, + 114: {From: 0x26c9, To: 0x26b4}, + 115: {From: 0x26f9, To: 0x3c8}, + 116: {From: 0x2727, To: 0x3caf}, + 117: {From: 0x2755, To: 0x6a4}, + 118: {From: 0x2765, To: 0x26b4}, + 119: {From: 0x2789, To: 0x4358}, + 120: {From: 0x27c9, To: 0x2001}, + 121: {From: 0x28ea, To: 0x27b1}, + 122: {From: 0x28ef, To: 0x2837}, + 123: {From: 0x28fe, To: 0xaa5}, + 124: {From: 0x2914, To: 0x351}, + 125: {From: 0x2986, To: 0x2da7}, + 126: {From: 0x29f0, To: 0x96b}, + 127: {From: 0x2b1a, To: 0x38d}, + 128: {From: 0x2bfc, To: 0x395}, + 129: {From: 0x2c3f, To: 0x3caf}, + 130: {From: 0x2ce1, To: 0x2201}, + 131: {From: 0x2cfc, To: 0x3be}, + 132: {From: 0x2d13, To: 0x597}, + 133: {From: 0x2d47, To: 0x148}, + 134: {From: 0x2d48, To: 0x148}, + 135: {From: 0x2dff, To: 0x2f1}, + 136: {From: 0x2e08, To: 0x19cc}, + 137: {From: 0x2e10, To: 0xc45}, + 138: {From: 0x2e1a, To: 0x2d95}, + 139: {From: 0x2e21, To: 0x292}, + 140: {From: 0x2e54, To: 0x7d}, + 141: {From: 0x2e65, To: 0x2282}, + 142: {From: 0x2e97, To: 0x1a4}, + 143: {From: 0x2ea0, To: 0x2e9b}, + 144: {From: 0x2eef, To: 0x2ed7}, + 145: {From: 0x3193, To: 0x3c4}, + 146: {From: 0x3366, To: 0x338e}, + 147: {From: 0x342a, To: 0x3dc}, + 148: {From: 0x34ee, To: 0x18d0}, + 149: {From: 0x35c8, To: 0x2c9b}, + 150: {From: 0x35e6, To: 0x412}, + 151: {From: 0x35f5, To: 0x24b}, + 152: {From: 0x360d, To: 0x1dc}, + 153: {From: 0x3658, To: 0x246}, + 154: {From: 0x3676, To: 0x3f4}, + 155: {From: 0x36fd, To: 0x445}, + 156: {From: 0x3747, To: 0x3b42}, + 157: {From: 0x37c0, To: 0x121}, + 158: {From: 0x3816, To: 0x38f2}, + 159: {From: 0x382a, To: 0x2b48}, + 160: {From: 0x382b, To: 0x2c9b}, + 161: {From: 0x382f, To: 0xa9}, + 162: {From: 0x3832, To: 0x3228}, + 163: {From: 0x386c, To: 0x39a6}, + 164: {From: 0x3892, To: 0x3fc0}, + 165: {From: 0x38a0, To: 0x45f}, + 166: {From: 0x38a5, To: 0x39d7}, + 167: {From: 0x38b4, To: 0x1fa4}, + 168: {From: 0x38b5, To: 0x2e9a}, + 169: {From: 0x38fa, To: 0x38f1}, + 170: {From: 0x395c, To: 0x47e}, + 171: {From: 0x3b4e, To: 0xd91}, + 172: {From: 0x3b78, To: 0x137}, + 173: {From: 0x3c99, To: 0x4bc}, + 174: {From: 0x3fbd, To: 0x100}, + 175: {From: 0x4208, To: 0xa91}, + 176: {From: 0x42be, To: 0x573}, + 177: {From: 0x42f9, To: 0x3f60}, + 178: {From: 0x4378, To: 0x25a}, + 179: {From: 0x43b8, To: 0xe6c}, + 180: {From: 0x43cd, To: 0x10f}, + 181: {From: 0x43d4, To: 0x4848}, + 182: {From: 0x44af, To: 0x3322}, + 183: {From: 0x44e3, To: 0x512}, + 184: {From: 0x45ca, To: 0x2409}, + 185: {From: 0x45dd, To: 0x26dc}, + 186: {From: 0x4610, To: 0x48ae}, + 187: {From: 0x46ae, To: 0x46a0}, + 188: {From: 0x473e, To: 0x4745}, + 189: {From: 0x4817, To: 0x3503}, + 190: {From: 0x483b, To: 0x208b}, + 191: {From: 0x4916, To: 0x31f}, + 192: {From: 0x49a7, To: 0x523}, +} + +// Size: 193 bytes, 193 elements +var AliasTypes = [193]AliasType{ + // Entry 0 - 3F + 1, 0, 0, 0, 0, 0, 0, 1, 2, 2, 0, 1, 0, 0, 0, 0, + 1, 2, 1, 1, 2, 0, 0, 1, 0, 1, 2, 1, 1, 0, 0, 0, + 0, 2, 1, 1, 0, 2, 0, 0, 1, 0, 1, 0, 0, 1, 2, 1, + 1, 1, 1, 0, 0, 0, 0, 2, 1, 1, 1, 1, 2, 1, 0, 1, + // Entry 40 - 7F + 1, 2, 2, 0, 0, 1, 2, 0, 1, 0, 1, 1, 1, 1, 0, 0, + 2, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 2, 2, 2, 0, + 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, + // Entry 80 - BF + 1, 0, 0, 1, 0, 2, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, + 0, 1, 1, 2, 0, 0, 2, 0, 0, 1, 1, 1, 0, 0, 0, 0, + 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 2, 0, + 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, + // Entry C0 - FF + 1, +} + +const ( + _Latn = 91 + _Hani = 57 + _Hans = 59 + _Hant = 60 + _Qaaa = 149 + _Qaai = 157 + _Qabx = 198 + _Zinh = 255 + _Zyyy = 260 + _Zzzz = 261 +) + +// script is an alphabetically sorted list of ISO 15924 codes. The index +// of the script in the string, divided by 4, is the internal scriptID. +const script tag.Index = "" + // Size: 1052 bytes + "----AdlmAfakAghbAhomArabAranArmiArmnAvstBaliBamuBassBatkBengBhksBlisBopo" + + "BrahBraiBugiBuhdCakmCansCariChamCherChrsCirtCoptCpmnCprtCyrlCyrsDevaDiak" + + "DogrDsrtDuplEgydEgyhEgypElbaElymEthiGeokGeorGlagGongGonmGothGranGrekGujr" + + "GuruHanbHangHaniHanoHansHantHatrHebrHiraHluwHmngHmnpHrktHungIndsItalJamo" + + "JavaJpanJurcKaliKanaKawiKharKhmrKhojKitlKitsKndaKoreKpelKthiLanaLaooLatf" + + "LatgLatnLekeLepcLimbLinaLinbLisuLomaLyciLydiMahjMakaMandManiMarcMayaMedf" + + "MendMercMeroMlymModiMongMoonMrooMteiMultMymrNagmNandNarbNbatNewaNkdbNkgb" + + "NkooNshuOgamOlckOrkhOryaOsgeOsmaOugrPalmPaucPcunPelmPermPhagPhliPhlpPhlv" + + "PhnxPiqdPlrdPrtiPsinQaaaQaabQaacQaadQaaeQaafQaagQaahQaaiQaajQaakQaalQaam" + + "QaanQaaoQaapQaaqQaarQaasQaatQaauQaavQaawQaaxQaayQaazQabaQabbQabcQabdQabe" + + "QabfQabgQabhQabiQabjQabkQablQabmQabnQaboQabpQabqQabrQabsQabtQabuQabvQabw" + + "QabxRanjRjngRohgRoroRunrSamrSaraSarbSaurSgnwShawShrdShuiSiddSindSinhSogd" + + "SogoSoraSoyoSundSunuSyloSyrcSyreSyrjSyrnTagbTakrTaleTaluTamlTangTavtTelu" + + "TengTfngTglgThaaThaiTibtTirhTnsaTotoUgarVaiiVispVithWaraWchoWoleXpeoXsux" + + "YeziYiiiZanbZinhZmthZsyeZsymZxxxZyyyZzzz\xff\xff\xff\xff" + +// suppressScript is an index from langID to the dominant script for that language, +// if it exists. If a script is given, it should be suppressed from the language tag. +// Size: 1330 bytes, 1330 elements +var suppressScript = [1330]uint8{ + // Entry 0 - 3F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2c, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 40 - 7F + 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, + // Entry 80 - BF + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry C0 - FF + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 100 - 13F + 0x5b, 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xed, 0x00, 0x00, 0x00, 0x00, 0xef, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x34, 0x00, + 0x00, 0x5b, 0x00, 0x00, 0x5b, 0x00, 0x5b, 0x00, + // Entry 140 - 17F + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x5b, 0x00, 0x00, + 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x5b, 0x00, 0x00, 0x00, 0x5b, 0x00, 0x00, + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, 0x00, + 0x00, 0x5b, 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, 0x5b, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 180 - 1BF + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x5b, 0x00, 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x5b, 0x35, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x3e, 0x00, 0x22, 0x00, + // Entry 1C0 - 1FF + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x5b, 0x5b, 0x00, 0x5b, 0x5b, 0x00, 0x08, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, 0x00, + 0x5b, 0x5b, 0x00, 0x3e, 0x00, 0x00, 0x00, 0x00, + // Entry 200 - 23F + 0x49, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x2e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 240 - 27F + 0x00, 0x00, 0x20, 0x00, 0x00, 0x5b, 0x00, 0x00, + 0x00, 0x00, 0x4f, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x53, 0x00, 0x00, 0x54, 0x00, 0x22, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 280 - 2BF + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, 0x00, 0x00, + 0x58, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 2C0 - 2FF + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x22, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, + // Entry 300 - 33F + 0x00, 0x00, 0x00, 0x00, 0x6f, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x5b, + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, 0x00, + // Entry 340 - 37F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, 0x00, + 0x5b, 0x22, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, + 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, 0x00, 0x5b, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x5b, 0x00, + 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 380 - 3BF + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x83, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, + // Entry 3C0 - 3FF + 0x5b, 0x00, 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, + 0x00, 0x5b, 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x20, 0x00, 0x00, 0x5b, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 400 - 43F + 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xd6, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, 0x5b, 0x00, + 0x00, 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, + 0x00, 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, 0x00, + // Entry 440 - 47F + 0x00, 0x00, 0x00, 0x00, 0x5b, 0x5b, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xe6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0xe9, 0x00, 0x5b, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0xee, 0x00, 0x00, 0x00, 0x2c, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, + 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, 0x5b, 0x00, + // Entry 480 - 4BF + 0x5b, 0x00, 0x5b, 0x00, 0x00, 0x00, 0x5b, 0x00, + 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, 0x5b, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 4C0 - 4FF + 0x5b, 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 500 - 53F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x3e, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, + 0x00, 0x00, +} + +const ( + _001 = 1 + _419 = 31 + _BR = 65 + _CA = 73 + _ES = 111 + _GB = 124 + _MD = 189 + _PT = 239 + _UK = 307 + _US = 310 + _ZZ = 358 + _XA = 324 + _XC = 326 + _XK = 334 +) + +// isoRegionOffset needs to be added to the index of regionISO to obtain the regionID +// for 2-letter ISO codes. (The first isoRegionOffset regionIDs are reserved for +// the UN.M49 codes used for groups.) +const isoRegionOffset = 32 + +// regionTypes defines the status of a region for various standards. +// Size: 359 bytes, 359 elements +var regionTypes = [359]uint8{ + // Entry 0 - 3F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x05, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + // Entry 40 - 7F + 0x06, 0x06, 0x06, 0x06, 0x04, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x04, 0x04, 0x06, + 0x04, 0x00, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x04, 0x06, 0x04, 0x06, 0x06, 0x06, 0x06, 0x00, + 0x06, 0x04, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x04, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x00, 0x06, 0x04, 0x06, 0x06, 0x06, 0x06, 0x06, + // Entry 80 - BF + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x00, 0x04, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x00, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + // Entry C0 - FF + 0x06, 0x06, 0x00, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x00, 0x06, 0x06, 0x06, 0x06, 0x00, 0x06, 0x04, + 0x06, 0x06, 0x06, 0x06, 0x00, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x00, 0x06, 0x06, 0x00, 0x06, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + // Entry 100 - 13F + 0x05, 0x05, 0x05, 0x06, 0x00, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x04, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x02, 0x06, 0x04, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x00, 0x06, 0x06, 0x06, 0x06, + // Entry 140 - 17F + 0x06, 0x06, 0x00, 0x06, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x04, 0x06, + 0x06, 0x04, 0x06, 0x06, 0x04, 0x06, 0x05, +} + +// regionISO holds a list of alphabetically sorted 2-letter ISO region codes. +// Each 2-letter codes is followed by two bytes with the following meaning: +// - [A-Z}{2}: the first letter of the 2-letter code plus these two +// letters form the 3-letter ISO code. +// - 0, n: index into altRegionISO3. +const regionISO tag.Index = "" + // Size: 1312 bytes + "AAAAACSCADNDAEREAFFGAGTGAIIAALLBAMRMANNTAOGOAQTAARRGASSMATUTAUUSAWBWAXLA" + + "AZZEBAIHBBRBBDGDBEELBFFABGGRBHHRBIDIBJENBLLMBMMUBNRNBOOLBQESBRRABSHSBTTN" + + "BUURBVVTBWWABYLRBZLZCAANCCCKCDODCFAFCGOGCHHECIIVCKOKCLHLCMMRCNHNCOOLCPPT" + + "CQ CRRICS\x00\x00CTTECUUBCVPVCWUWCXXRCYYPCZZEDDDRDEEUDGGADJJIDKNKDMMADO" + + "OMDYHYDZZAEA ECCUEESTEGGYEHSHERRIESSPETTHEU\x00\x03EZ FIINFJJIFKLKFMSM" + + "FOROFQ\x00\x18FRRAFXXXGAABGBBRGDRDGEEOGFUFGGGYGHHAGIIBGLRLGMMBGNINGPLPGQ" + + "NQGRRCGS\x00\x06GTTMGUUMGWNBGYUYHKKGHMMDHNNDHRRVHTTIHUUNHVVOIC IDDNIERL" + + "ILSRIMMNINNDIOOTIQRQIRRNISSLITTAJEEYJMAMJOORJPPNJTTNKEENKGGZKHHMKIIRKM" + + "\x00\x09KNNAKP\x00\x0cKRORKWWTKY\x00\x0fKZAZLAAOLBBNLCCALIIELKKALRBRLSSO" + + "LTTULUUXLVVALYBYMAARMCCOMDDAMENEMFAFMGDGMHHLMIIDMKKDMLLIMMMRMNNGMOACMPNP" + + "MQTQMRRTMSSRMTLTMUUSMVDVMWWIMXEXMYYSMZOZNAAMNCCLNEERNFFKNGGANHHBNIICNLLD" + + "NOORNPPLNQ\x00\x1eNRRUNTTZNUIUNZZLOMMNPAANPCCIPEERPFYFPGNGPHHLPKAKPLOLPM" + + "\x00\x12PNCNPRRIPSSEPTRTPUUSPWLWPYRYPZCZQAATQMMMQNNNQOOOQPPPQQQQQRRRQSSS" + + "QTTTQU\x00\x03QVVVQWWWQXXXQYYYQZZZREEURHHOROOURS\x00\x15RUUSRWWASAAUSBLB" + + "SCYCSDDNSEWESGGPSHHNSIVNSJJMSKVKSLLESMMRSNENSOOMSRURSSSDSTTPSUUNSVLVSXXM" + + "SYYRSZWZTAAATCCATDCDTF\x00\x18TGGOTHHATJJKTKKLTLLSTMKMTNUNTOONTPMPTRURTT" + + "TOTVUVTWWNTZZAUAKRUGGAUK UMMIUN USSAUYRYUZZBVAATVCCTVDDRVEENVGGBVIIRVN" + + "NMVUUTWFLFWKAKWSSMXAAAXBBBXCCCXDDDXEEEXFFFXGGGXHHHXIIIXJJJXKKKXLLLXMMMXN" + + "NNXOOOXPPPXQQQXRRRXSSSXTTTXUUUXVVVXWWWXXXXXYYYXZZZYDMDYEEMYT\x00\x1bYUUG" + + "ZAAFZMMBZRARZWWEZZZZ\xff\xff\xff\xff" + +// altRegionISO3 holds a list of 3-letter region codes that cannot be +// mapped to 2-letter codes using the default algorithm. This is a short list. +const altRegionISO3 string = "SCGQUUSGSCOMPRKCYMSPMSRBATFMYTATN" + +// altRegionIDs holds a list of regionIDs the positions of which match those +// of the 3-letter ISO codes in altRegionISO3. +// Size: 22 bytes, 11 elements +var altRegionIDs = [11]uint16{ + 0x0058, 0x0071, 0x0089, 0x00a9, 0x00ab, 0x00ae, 0x00eb, 0x0106, + 0x0122, 0x0160, 0x00dd, +} + +// Size: 80 bytes, 20 elements +var regionOldMap = [20]FromTo{ + 0: {From: 0x44, To: 0xc5}, + 1: {From: 0x59, To: 0xa8}, + 2: {From: 0x60, To: 0x61}, + 3: {From: 0x67, To: 0x3b}, + 4: {From: 0x7a, To: 0x79}, + 5: {From: 0x94, To: 0x37}, + 6: {From: 0xa4, To: 0x134}, + 7: {From: 0xc2, To: 0x134}, + 8: {From: 0xd8, To: 0x140}, + 9: {From: 0xdd, To: 0x2b}, + 10: {From: 0xf0, To: 0x134}, + 11: {From: 0xf3, To: 0xe3}, + 12: {From: 0xfd, To: 0x71}, + 13: {From: 0x104, To: 0x165}, + 14: {From: 0x12b, To: 0x127}, + 15: {From: 0x133, To: 0x7c}, + 16: {From: 0x13b, To: 0x13f}, + 17: {From: 0x142, To: 0x134}, + 18: {From: 0x15e, To: 0x15f}, + 19: {From: 0x164, To: 0x4b}, +} + +// m49 maps regionIDs to UN.M49 codes. The first isoRegionOffset entries are +// codes indicating collections of regions. +// Size: 718 bytes, 359 elements +var m49 = [359]int16{ + // Entry 0 - 3F + 0, 1, 2, 3, 5, 9, 11, 13, + 14, 15, 17, 18, 19, 21, 29, 30, + 34, 35, 39, 53, 54, 57, 61, 142, + 143, 145, 150, 151, 154, 155, 202, 419, + 958, 0, 20, 784, 4, 28, 660, 8, + 51, 530, 24, 10, 32, 16, 40, 36, + 533, 248, 31, 70, 52, 50, 56, 854, + 100, 48, 108, 204, 652, 60, 96, 68, + // Entry 40 - 7F + 535, 76, 44, 64, 104, 74, 72, 112, + 84, 124, 166, 180, 140, 178, 756, 384, + 184, 152, 120, 156, 170, 0, 0, 188, + 891, 296, 192, 132, 531, 162, 196, 203, + 278, 276, 0, 262, 208, 212, 214, 204, + 12, 0, 218, 233, 818, 732, 232, 724, + 231, 967, 0, 246, 242, 238, 583, 234, + 0, 250, 249, 266, 826, 308, 268, 254, + // Entry 80 - BF + 831, 288, 292, 304, 270, 324, 312, 226, + 300, 239, 320, 316, 624, 328, 344, 334, + 340, 191, 332, 348, 854, 0, 360, 372, + 376, 833, 356, 86, 368, 364, 352, 380, + 832, 388, 400, 392, 581, 404, 417, 116, + 296, 174, 659, 408, 410, 414, 136, 398, + 418, 422, 662, 438, 144, 430, 426, 440, + 442, 428, 434, 504, 492, 498, 499, 663, + // Entry C0 - FF + 450, 584, 581, 807, 466, 104, 496, 446, + 580, 474, 478, 500, 470, 480, 462, 454, + 484, 458, 508, 516, 540, 562, 574, 566, + 548, 558, 528, 578, 524, 10, 520, 536, + 570, 554, 512, 591, 0, 604, 258, 598, + 608, 586, 616, 666, 612, 630, 275, 620, + 581, 585, 600, 591, 634, 959, 960, 961, + 962, 963, 964, 965, 966, 967, 968, 969, + // Entry 100 - 13F + 970, 971, 972, 638, 716, 642, 688, 643, + 646, 682, 90, 690, 729, 752, 702, 654, + 705, 744, 703, 694, 674, 686, 706, 740, + 728, 678, 810, 222, 534, 760, 748, 0, + 796, 148, 260, 768, 764, 762, 772, 626, + 795, 788, 776, 626, 792, 780, 798, 158, + 834, 804, 800, 826, 581, 0, 840, 858, + 860, 336, 670, 704, 862, 92, 850, 704, + // Entry 140 - 17F + 548, 876, 581, 882, 973, 974, 975, 976, + 977, 978, 979, 980, 981, 982, 983, 984, + 985, 986, 987, 988, 989, 990, 991, 992, + 993, 994, 995, 996, 997, 998, 720, 887, + 175, 891, 710, 894, 180, 716, 999, +} + +// m49Index gives indexes into fromM49 based on the three most significant bits +// of a 10-bit UN.M49 code. To search an UN.M49 code in fromM49, search in +// +// fromM49[m49Index[msb39(code)]:m49Index[msb3(code)+1]] +// +// for an entry where the first 7 bits match the 7 lsb of the UN.M49 code. +// The region code is stored in the 9 lsb of the indexed value. +// Size: 18 bytes, 9 elements +var m49Index = [9]int16{ + 0, 59, 108, 143, 181, 220, 259, 291, + 333, +} + +// fromM49 contains entries to map UN.M49 codes to regions. See m49Index for details. +// Size: 666 bytes, 333 elements +var fromM49 = [333]uint16{ + // Entry 0 - 3F + 0x0201, 0x0402, 0x0603, 0x0824, 0x0a04, 0x1027, 0x1205, 0x142b, + 0x1606, 0x1868, 0x1a07, 0x1c08, 0x1e09, 0x202d, 0x220a, 0x240b, + 0x260c, 0x2822, 0x2a0d, 0x302a, 0x3825, 0x3a0e, 0x3c0f, 0x3e32, + 0x402c, 0x4410, 0x4611, 0x482f, 0x4e12, 0x502e, 0x5842, 0x6039, + 0x6435, 0x6628, 0x6834, 0x6a13, 0x6c14, 0x7036, 0x7215, 0x783d, + 0x7a16, 0x8043, 0x883f, 0x8c33, 0x9046, 0x9445, 0x9841, 0xa848, + 0xac9b, 0xb50a, 0xb93d, 0xc03e, 0xc838, 0xd0c5, 0xd83a, 0xe047, + 0xe8a7, 0xf052, 0xf849, 0x085b, 0x10ae, 0x184c, 0x1c17, 0x1e18, + // Entry 40 - 7F + 0x20b4, 0x2219, 0x2921, 0x2c1a, 0x2e1b, 0x3051, 0x341c, 0x361d, + 0x3853, 0x3d2f, 0x445d, 0x4c4a, 0x5454, 0x5ca9, 0x5f60, 0x644d, + 0x684b, 0x7050, 0x7857, 0x7e91, 0x805a, 0x885e, 0x941e, 0x965f, + 0x983b, 0xa064, 0xa865, 0xac66, 0xb46a, 0xbd1b, 0xc487, 0xcc70, + 0xce70, 0xd06e, 0xd26b, 0xd477, 0xdc75, 0xde89, 0xe474, 0xec73, + 0xf031, 0xf27a, 0xf479, 0xfc7f, 0x04e6, 0x0922, 0x0c63, 0x147b, + 0x187e, 0x1c84, 0x26ee, 0x2861, 0x2c60, 0x3061, 0x4081, 0x4882, + 0x50a8, 0x5888, 0x6083, 0x687d, 0x7086, 0x788b, 0x808a, 0x8885, + // Entry 80 - BF + 0x908d, 0x9892, 0x9c8f, 0xa139, 0xa890, 0xb08e, 0xb893, 0xc09e, + 0xc89a, 0xd096, 0xd89d, 0xe09c, 0xe897, 0xf098, 0xf89f, 0x004f, + 0x08a1, 0x10a3, 0x1caf, 0x20a2, 0x28a5, 0x30ab, 0x34ac, 0x3cad, + 0x42a6, 0x44b0, 0x461f, 0x4cb1, 0x54b6, 0x58b9, 0x5cb5, 0x64ba, + 0x6cb3, 0x70b7, 0x74b8, 0x7cc7, 0x84c0, 0x8ccf, 0x94d1, 0x9cce, + 0xa4c4, 0xaccc, 0xb4c9, 0xbcca, 0xc0cd, 0xc8d0, 0xd8bc, 0xe0c6, + 0xe4bd, 0xe6be, 0xe8cb, 0xf0bb, 0xf8d2, 0x00e2, 0x08d3, 0x10de, + 0x18dc, 0x20da, 0x2429, 0x265c, 0x2a30, 0x2d1c, 0x2e40, 0x30df, + // Entry C0 - FF + 0x38d4, 0x4940, 0x54e1, 0x5cd9, 0x64d5, 0x6cd7, 0x74e0, 0x7cd6, + 0x84db, 0x88c8, 0x8b34, 0x8e76, 0x90c1, 0x92f1, 0x94e9, 0x9ee3, + 0xace7, 0xb0f2, 0xb8e5, 0xc0e8, 0xc8ec, 0xd0ea, 0xd8ef, 0xe08c, + 0xe527, 0xeced, 0xf4f4, 0xfd03, 0x0505, 0x0707, 0x0d08, 0x183c, + 0x1d0f, 0x26aa, 0x2826, 0x2cb2, 0x2ebf, 0x34eb, 0x3d3a, 0x4514, + 0x4d19, 0x5509, 0x5d15, 0x6106, 0x650b, 0x6d13, 0x7d0e, 0x7f12, + 0x813f, 0x8310, 0x8516, 0x8d62, 0x9965, 0xa15e, 0xa86f, 0xb118, + 0xb30c, 0xb86d, 0xc10c, 0xc917, 0xd111, 0xd91e, 0xe10d, 0xe84e, + // Entry 100 - 13F + 0xf11d, 0xf525, 0xf924, 0x0123, 0x0926, 0x112a, 0x192d, 0x2023, + 0x2929, 0x312c, 0x3728, 0x3920, 0x3d2e, 0x4132, 0x4931, 0x4ec3, + 0x551a, 0x646c, 0x747c, 0x7e80, 0x80a0, 0x8299, 0x8530, 0x9136, + 0xa53e, 0xac37, 0xb537, 0xb938, 0xbd3c, 0xd941, 0xe543, 0xed5f, + 0xef5f, 0xf658, 0xfd63, 0x7c20, 0x7ef5, 0x80f6, 0x82f7, 0x84f8, + 0x86f9, 0x88fa, 0x8afb, 0x8cfc, 0x8e71, 0x90fe, 0x92ff, 0x9500, + 0x9701, 0x9902, 0x9b44, 0x9d45, 0x9f46, 0xa147, 0xa348, 0xa549, + 0xa74a, 0xa94b, 0xab4c, 0xad4d, 0xaf4e, 0xb14f, 0xb350, 0xb551, + // Entry 140 - 17F + 0xb752, 0xb953, 0xbb54, 0xbd55, 0xbf56, 0xc157, 0xc358, 0xc559, + 0xc75a, 0xc95b, 0xcb5c, 0xcd5d, 0xcf66, +} + +// Size: 2128 bytes +var variantIndex = map[string]uint8{ + "1606nict": 0x0, + "1694acad": 0x1, + "1901": 0x2, + "1959acad": 0x3, + "1994": 0x67, + "1996": 0x4, + "abl1943": 0x5, + "akuapem": 0x6, + "alalc97": 0x69, + "aluku": 0x7, + "ao1990": 0x8, + "aranes": 0x9, + "arevela": 0xa, + "arevmda": 0xb, + "arkaika": 0xc, + "asante": 0xd, + "auvern": 0xe, + "baku1926": 0xf, + "balanka": 0x10, + "barla": 0x11, + "basiceng": 0x12, + "bauddha": 0x13, + "bciav": 0x14, + "bcizbl": 0x15, + "biscayan": 0x16, + "biske": 0x62, + "bohoric": 0x17, + "boont": 0x18, + "bornholm": 0x19, + "cisaup": 0x1a, + "colb1945": 0x1b, + "cornu": 0x1c, + "creiss": 0x1d, + "dajnko": 0x1e, + "ekavsk": 0x1f, + "emodeng": 0x20, + "fonipa": 0x6a, + "fonkirsh": 0x6b, + "fonnapa": 0x6c, + "fonupa": 0x6d, + "fonxsamp": 0x6e, + "gallo": 0x21, + "gascon": 0x22, + "grclass": 0x23, + "grital": 0x24, + "grmistr": 0x25, + "hepburn": 0x26, + "heploc": 0x68, + "hognorsk": 0x27, + "hsistemo": 0x28, + "ijekavsk": 0x29, + "itihasa": 0x2a, + "ivanchov": 0x2b, + "jauer": 0x2c, + "jyutping": 0x2d, + "kkcor": 0x2e, + "kociewie": 0x2f, + "kscor": 0x30, + "laukika": 0x31, + "lemosin": 0x32, + "lengadoc": 0x33, + "lipaw": 0x63, + "ltg1929": 0x34, + "ltg2007": 0x35, + "luna1918": 0x36, + "metelko": 0x37, + "monoton": 0x38, + "ndyuka": 0x39, + "nedis": 0x3a, + "newfound": 0x3b, + "nicard": 0x3c, + "njiva": 0x64, + "nulik": 0x3d, + "osojs": 0x65, + "oxendict": 0x3e, + "pahawh2": 0x3f, + "pahawh3": 0x40, + "pahawh4": 0x41, + "pamaka": 0x42, + "peano": 0x43, + "petr1708": 0x44, + "pinyin": 0x45, + "polyton": 0x46, + "provenc": 0x47, + "puter": 0x48, + "rigik": 0x49, + "rozaj": 0x4a, + "rumgr": 0x4b, + "scotland": 0x4c, + "scouse": 0x4d, + "simple": 0x6f, + "solba": 0x66, + "sotav": 0x4e, + "spanglis": 0x4f, + "surmiran": 0x50, + "sursilv": 0x51, + "sutsilv": 0x52, + "synnejyl": 0x53, + "tarask": 0x54, + "tongyong": 0x55, + "tunumiit": 0x56, + "uccor": 0x57, + "ucrcor": 0x58, + "ulster": 0x59, + "unifon": 0x5a, + "vaidika": 0x5b, + "valencia": 0x5c, + "vallader": 0x5d, + "vecdruka": 0x5e, + "vivaraup": 0x5f, + "wadegile": 0x60, + "xsistemo": 0x61, +} + +// variantNumSpecialized is the number of specialized variants in variants. +const variantNumSpecialized = 105 + +// nRegionGroups is the number of region groups. +const nRegionGroups = 33 + +type likelyLangRegion struct { + lang uint16 + region uint16 +} + +// likelyScript is a lookup table, indexed by scriptID, for the most likely +// languages and regions given a script. +// Size: 1052 bytes, 263 elements +var likelyScript = [263]likelyLangRegion{ + 1: {lang: 0x14e, region: 0x85}, + 3: {lang: 0x2a2, region: 0x107}, + 4: {lang: 0x1f, region: 0x9a}, + 5: {lang: 0x3a, region: 0x6c}, + 7: {lang: 0x3b, region: 0x9d}, + 8: {lang: 0x1d7, region: 0x28}, + 9: {lang: 0x13, region: 0x9d}, + 10: {lang: 0x5b, region: 0x96}, + 11: {lang: 0x60, region: 0x52}, + 12: {lang: 0xb9, region: 0xb5}, + 13: {lang: 0x63, region: 0x96}, + 14: {lang: 0xa5, region: 0x35}, + 15: {lang: 0x3e9, region: 0x9a}, + 17: {lang: 0x529, region: 0x12f}, + 18: {lang: 0x3b1, region: 0x9a}, + 19: {lang: 0x15e, region: 0x79}, + 20: {lang: 0xc2, region: 0x96}, + 21: {lang: 0x9d, region: 0xe8}, + 22: {lang: 0xdb, region: 0x35}, + 23: {lang: 0xf3, region: 0x49}, + 24: {lang: 0x4f0, region: 0x12c}, + 25: {lang: 0xe7, region: 0x13f}, + 26: {lang: 0xe5, region: 0x136}, + 29: {lang: 0xf1, region: 0x6c}, + 31: {lang: 0x1a0, region: 0x5e}, + 32: {lang: 0x3e2, region: 0x107}, + 34: {lang: 0x1be, region: 0x9a}, + 38: {lang: 0x15e, region: 0x79}, + 41: {lang: 0x133, region: 0x6c}, + 42: {lang: 0x431, region: 0x27}, + 44: {lang: 0x27, region: 0x70}, + 46: {lang: 0x210, region: 0x7e}, + 47: {lang: 0xfe, region: 0x38}, + 49: {lang: 0x19b, region: 0x9a}, + 50: {lang: 0x19e, region: 0x131}, + 51: {lang: 0x3e9, region: 0x9a}, + 52: {lang: 0x136, region: 0x88}, + 53: {lang: 0x1a4, region: 0x9a}, + 54: {lang: 0x39d, region: 0x9a}, + 55: {lang: 0x529, region: 0x12f}, + 56: {lang: 0x254, region: 0xac}, + 57: {lang: 0x529, region: 0x53}, + 58: {lang: 0x1cb, region: 0xe8}, + 59: {lang: 0x529, region: 0x53}, + 60: {lang: 0x529, region: 0x12f}, + 61: {lang: 0x2fd, region: 0x9c}, + 62: {lang: 0x1bc, region: 0x98}, + 63: {lang: 0x200, region: 0xa3}, + 64: {lang: 0x1c5, region: 0x12c}, + 65: {lang: 0x1ca, region: 0xb0}, + 68: {lang: 0x1d5, region: 0x93}, + 70: {lang: 0x142, region: 0x9f}, + 71: {lang: 0x254, region: 0xac}, + 72: {lang: 0x20e, region: 0x96}, + 73: {lang: 0x200, region: 0xa3}, + 75: {lang: 0x135, region: 0xc5}, + 76: {lang: 0x200, region: 0xa3}, + 78: {lang: 0x3bb, region: 0xe9}, + 79: {lang: 0x24a, region: 0xa7}, + 80: {lang: 0x3fa, region: 0x9a}, + 83: {lang: 0x251, region: 0x9a}, + 84: {lang: 0x254, region: 0xac}, + 86: {lang: 0x88, region: 0x9a}, + 87: {lang: 0x370, region: 0x124}, + 88: {lang: 0x2b8, region: 0xb0}, + 93: {lang: 0x29f, region: 0x9a}, + 94: {lang: 0x2a8, region: 0x9a}, + 95: {lang: 0x28f, region: 0x88}, + 96: {lang: 0x1a0, region: 0x88}, + 97: {lang: 0x2ac, region: 0x53}, + 99: {lang: 0x4f4, region: 0x12c}, + 100: {lang: 0x4f5, region: 0x12c}, + 101: {lang: 0x1be, region: 0x9a}, + 103: {lang: 0x337, region: 0x9d}, + 104: {lang: 0x4f7, region: 0x53}, + 105: {lang: 0xa9, region: 0x53}, + 108: {lang: 0x2e8, region: 0x113}, + 109: {lang: 0x4f8, region: 0x10c}, + 110: {lang: 0x4f8, region: 0x10c}, + 111: {lang: 0x304, region: 0x9a}, + 112: {lang: 0x31b, region: 0x9a}, + 113: {lang: 0x30b, region: 0x53}, + 115: {lang: 0x31e, region: 0x35}, + 116: {lang: 0x30e, region: 0x9a}, + 117: {lang: 0x414, region: 0xe9}, + 118: {lang: 0x331, region: 0xc5}, + 121: {lang: 0x4f9, region: 0x109}, + 122: {lang: 0x3b, region: 0xa2}, + 123: {lang: 0x353, region: 0xdc}, + 126: {lang: 0x2d0, region: 0x85}, + 127: {lang: 0x52a, region: 0x53}, + 128: {lang: 0x403, region: 0x97}, + 129: {lang: 0x3ee, region: 0x9a}, + 130: {lang: 0x39b, region: 0xc6}, + 131: {lang: 0x395, region: 0x9a}, + 132: {lang: 0x399, region: 0x136}, + 133: {lang: 0x429, region: 0x116}, + 135: {lang: 0x3b, region: 0x11d}, + 136: {lang: 0xfd, region: 0xc5}, + 139: {lang: 0x27d, region: 0x107}, + 140: {lang: 0x2c9, region: 0x53}, + 141: {lang: 0x39f, region: 0x9d}, + 142: {lang: 0x39f, region: 0x53}, + 144: {lang: 0x3ad, region: 0xb1}, + 146: {lang: 0x1c6, region: 0x53}, + 147: {lang: 0x4fd, region: 0x9d}, + 200: {lang: 0x3cb, region: 0x96}, + 203: {lang: 0x372, region: 0x10d}, + 204: {lang: 0x420, region: 0x98}, + 206: {lang: 0x4ff, region: 0x15f}, + 207: {lang: 0x3f0, region: 0x9a}, + 208: {lang: 0x45, region: 0x136}, + 209: {lang: 0x139, region: 0x7c}, + 210: {lang: 0x3e9, region: 0x9a}, + 212: {lang: 0x3e9, region: 0x9a}, + 213: {lang: 0x3fa, region: 0x9a}, + 214: {lang: 0x40c, region: 0xb4}, + 217: {lang: 0x433, region: 0x9a}, + 218: {lang: 0xef, region: 0xc6}, + 219: {lang: 0x43e, region: 0x96}, + 221: {lang: 0x44d, region: 0x35}, + 222: {lang: 0x44e, region: 0x9c}, + 226: {lang: 0x45a, region: 0xe8}, + 227: {lang: 0x11a, region: 0x9a}, + 228: {lang: 0x45e, region: 0x53}, + 229: {lang: 0x232, region: 0x53}, + 230: {lang: 0x450, region: 0x9a}, + 231: {lang: 0x4a5, region: 0x53}, + 232: {lang: 0x9f, region: 0x13f}, + 233: {lang: 0x461, region: 0x9a}, + 235: {lang: 0x528, region: 0xbb}, + 236: {lang: 0x153, region: 0xe8}, + 237: {lang: 0x128, region: 0xce}, + 238: {lang: 0x46b, region: 0x124}, + 239: {lang: 0xa9, region: 0x53}, + 240: {lang: 0x2ce, region: 0x9a}, + 243: {lang: 0x4ad, region: 0x11d}, + 244: {lang: 0x4be, region: 0xb5}, + 247: {lang: 0x1ce, region: 0x9a}, + 250: {lang: 0x3a9, region: 0x9d}, + 251: {lang: 0x22, region: 0x9c}, + 253: {lang: 0x1ea, region: 0x53}, + 254: {lang: 0xef, region: 0xc6}, +} + +type likelyScriptRegion struct { + region uint16 + script uint16 + flags uint8 +} + +// likelyLang is a lookup table, indexed by langID, for the most likely +// scripts and regions given incomplete information. If more entries exist for a +// given language, region and script are the index and size respectively +// of the list in likelyLangList. +// Size: 7980 bytes, 1330 elements +var likelyLang = [1330]likelyScriptRegion{ + 0: {region: 0x136, script: 0x5b, flags: 0x0}, + 1: {region: 0x70, script: 0x5b, flags: 0x0}, + 2: {region: 0x166, script: 0x5b, flags: 0x0}, + 3: {region: 0x166, script: 0x5b, flags: 0x0}, + 4: {region: 0x166, script: 0x5b, flags: 0x0}, + 5: {region: 0x7e, script: 0x20, flags: 0x0}, + 6: {region: 0x166, script: 0x5b, flags: 0x0}, + 7: {region: 0x166, script: 0x20, flags: 0x0}, + 8: {region: 0x81, script: 0x5b, flags: 0x0}, + 9: {region: 0x166, script: 0x5b, flags: 0x0}, + 10: {region: 0x166, script: 0x5b, flags: 0x0}, + 11: {region: 0x166, script: 0x5b, flags: 0x0}, + 12: {region: 0x96, script: 0x5b, flags: 0x0}, + 13: {region: 0x132, script: 0x5b, flags: 0x0}, + 14: {region: 0x81, script: 0x5b, flags: 0x0}, + 15: {region: 0x166, script: 0x5b, flags: 0x0}, + 16: {region: 0x166, script: 0x5b, flags: 0x0}, + 17: {region: 0x107, script: 0x20, flags: 0x0}, + 18: {region: 0x166, script: 0x5b, flags: 0x0}, + 19: {region: 0x9d, script: 0x9, flags: 0x0}, + 20: {region: 0x129, script: 0x5, flags: 0x0}, + 21: {region: 0x166, script: 0x5b, flags: 0x0}, + 22: {region: 0x162, script: 0x5b, flags: 0x0}, + 23: {region: 0x166, script: 0x5b, flags: 0x0}, + 24: {region: 0x166, script: 0x5b, flags: 0x0}, + 25: {region: 0x166, script: 0x5b, flags: 0x0}, + 26: {region: 0x166, script: 0x5b, flags: 0x0}, + 27: {region: 0x166, script: 0x5b, flags: 0x0}, + 28: {region: 0x52, script: 0x5b, flags: 0x0}, + 29: {region: 0x166, script: 0x5b, flags: 0x0}, + 30: {region: 0x166, script: 0x5b, flags: 0x0}, + 31: {region: 0x9a, script: 0x4, flags: 0x0}, + 32: {region: 0x166, script: 0x5b, flags: 0x0}, + 33: {region: 0x81, script: 0x5b, flags: 0x0}, + 34: {region: 0x9c, script: 0xfb, flags: 0x0}, + 35: {region: 0x166, script: 0x5b, flags: 0x0}, + 36: {region: 0x166, script: 0x5b, flags: 0x0}, + 37: {region: 0x14e, script: 0x5b, flags: 0x0}, + 38: {region: 0x107, script: 0x20, flags: 0x0}, + 39: {region: 0x70, script: 0x2c, flags: 0x0}, + 40: {region: 0x166, script: 0x5b, flags: 0x0}, + 41: {region: 0x166, script: 0x5b, flags: 0x0}, + 42: {region: 0xd7, script: 0x5b, flags: 0x0}, + 43: {region: 0x166, script: 0x5b, flags: 0x0}, + 45: {region: 0x166, script: 0x5b, flags: 0x0}, + 46: {region: 0x166, script: 0x5b, flags: 0x0}, + 47: {region: 0x166, script: 0x5b, flags: 0x0}, + 48: {region: 0x166, script: 0x5b, flags: 0x0}, + 49: {region: 0x166, script: 0x5b, flags: 0x0}, + 50: {region: 0x166, script: 0x5b, flags: 0x0}, + 51: {region: 0x96, script: 0x5b, flags: 0x0}, + 52: {region: 0x166, script: 0x5, flags: 0x0}, + 53: {region: 0x123, script: 0x5, flags: 0x0}, + 54: {region: 0x166, script: 0x5b, flags: 0x0}, + 55: {region: 0x166, script: 0x5b, flags: 0x0}, + 56: {region: 0x166, script: 0x5b, flags: 0x0}, + 57: {region: 0x166, script: 0x5b, flags: 0x0}, + 58: {region: 0x6c, script: 0x5, flags: 0x0}, + 59: {region: 0x0, script: 0x3, flags: 0x1}, + 60: {region: 0x166, script: 0x5b, flags: 0x0}, + 61: {region: 0x51, script: 0x5b, flags: 0x0}, + 62: {region: 0x3f, script: 0x5b, flags: 0x0}, + 63: {region: 0x68, script: 0x5, flags: 0x0}, + 65: {region: 0xbb, script: 0x5, flags: 0x0}, + 66: {region: 0x6c, script: 0x5, flags: 0x0}, + 67: {region: 0x9a, script: 0xe, flags: 0x0}, + 68: {region: 0x130, script: 0x5b, flags: 0x0}, + 69: {region: 0x136, script: 0xd0, flags: 0x0}, + 70: {region: 0x166, script: 0x5b, flags: 0x0}, + 71: {region: 0x166, script: 0x5b, flags: 0x0}, + 72: {region: 0x6f, script: 0x5b, flags: 0x0}, + 73: {region: 0x166, script: 0x5b, flags: 0x0}, + 74: {region: 0x166, script: 0x5b, flags: 0x0}, + 75: {region: 0x49, script: 0x5b, flags: 0x0}, + 76: {region: 0x166, script: 0x5b, flags: 0x0}, + 77: {region: 0x107, script: 0x20, flags: 0x0}, + 78: {region: 0x166, script: 0x5, flags: 0x0}, + 79: {region: 0x166, script: 0x5b, flags: 0x0}, + 80: {region: 0x166, script: 0x5b, flags: 0x0}, + 81: {region: 0x166, script: 0x5b, flags: 0x0}, + 82: {region: 0x9a, script: 0x22, flags: 0x0}, + 83: {region: 0x166, script: 0x5b, flags: 0x0}, + 84: {region: 0x166, script: 0x5b, flags: 0x0}, + 85: {region: 0x166, script: 0x5b, flags: 0x0}, + 86: {region: 0x3f, script: 0x5b, flags: 0x0}, + 87: {region: 0x166, script: 0x5b, flags: 0x0}, + 88: {region: 0x3, script: 0x5, flags: 0x1}, + 89: {region: 0x107, script: 0x20, flags: 0x0}, + 90: {region: 0xe9, script: 0x5, flags: 0x0}, + 91: {region: 0x96, script: 0x5b, flags: 0x0}, + 92: {region: 0xdc, script: 0x22, flags: 0x0}, + 93: {region: 0x2e, script: 0x5b, flags: 0x0}, + 94: {region: 0x52, script: 0x5b, flags: 0x0}, + 95: {region: 0x166, script: 0x5b, flags: 0x0}, + 96: {region: 0x52, script: 0xb, flags: 0x0}, + 97: {region: 0x166, script: 0x5b, flags: 0x0}, + 98: {region: 0x166, script: 0x5b, flags: 0x0}, + 99: {region: 0x96, script: 0x5b, flags: 0x0}, + 100: {region: 0x166, script: 0x5b, flags: 0x0}, + 101: {region: 0x52, script: 0x5b, flags: 0x0}, + 102: {region: 0x166, script: 0x5b, flags: 0x0}, + 103: {region: 0x166, script: 0x5b, flags: 0x0}, + 104: {region: 0x166, script: 0x5b, flags: 0x0}, + 105: {region: 0x166, script: 0x5b, flags: 0x0}, + 106: {region: 0x4f, script: 0x5b, flags: 0x0}, + 107: {region: 0x166, script: 0x5b, flags: 0x0}, + 108: {region: 0x166, script: 0x5b, flags: 0x0}, + 109: {region: 0x166, script: 0x5b, flags: 0x0}, + 110: {region: 0x166, script: 0x2c, flags: 0x0}, + 111: {region: 0x166, script: 0x5b, flags: 0x0}, + 112: {region: 0x166, script: 0x5b, flags: 0x0}, + 113: {region: 0x47, script: 0x20, flags: 0x0}, + 114: {region: 0x166, script: 0x5b, flags: 0x0}, + 115: {region: 0x166, script: 0x5b, flags: 0x0}, + 116: {region: 0x10c, script: 0x5, flags: 0x0}, + 117: {region: 0x163, script: 0x5b, flags: 0x0}, + 118: {region: 0x166, script: 0x5b, flags: 0x0}, + 119: {region: 0x96, script: 0x5b, flags: 0x0}, + 120: {region: 0x166, script: 0x5b, flags: 0x0}, + 121: {region: 0x130, script: 0x5b, flags: 0x0}, + 122: {region: 0x52, script: 0x5b, flags: 0x0}, + 123: {region: 0x9a, script: 0xe6, flags: 0x0}, + 124: {region: 0xe9, script: 0x5, flags: 0x0}, + 125: {region: 0x9a, script: 0x22, flags: 0x0}, + 126: {region: 0x38, script: 0x20, flags: 0x0}, + 127: {region: 0x9a, script: 0x22, flags: 0x0}, + 128: {region: 0xe9, script: 0x5, flags: 0x0}, + 129: {region: 0x12c, script: 0x34, flags: 0x0}, + 131: {region: 0x9a, script: 0x22, flags: 0x0}, + 132: {region: 0x166, script: 0x5b, flags: 0x0}, + 133: {region: 0x9a, script: 0x22, flags: 0x0}, + 134: {region: 0xe8, script: 0x5b, flags: 0x0}, + 135: {region: 0x166, script: 0x5b, flags: 0x0}, + 136: {region: 0x9a, script: 0x22, flags: 0x0}, + 137: {region: 0x166, script: 0x5b, flags: 0x0}, + 138: {region: 0x140, script: 0x5b, flags: 0x0}, + 139: {region: 0x166, script: 0x5b, flags: 0x0}, + 140: {region: 0x166, script: 0x5b, flags: 0x0}, + 141: {region: 0xe8, script: 0x5b, flags: 0x0}, + 142: {region: 0x166, script: 0x5b, flags: 0x0}, + 143: {region: 0xd7, script: 0x5b, flags: 0x0}, + 144: {region: 0x166, script: 0x5b, flags: 0x0}, + 145: {region: 0x166, script: 0x5b, flags: 0x0}, + 146: {region: 0x166, script: 0x5b, flags: 0x0}, + 147: {region: 0x166, script: 0x2c, flags: 0x0}, + 148: {region: 0x9a, script: 0x22, flags: 0x0}, + 149: {region: 0x96, script: 0x5b, flags: 0x0}, + 150: {region: 0x166, script: 0x5b, flags: 0x0}, + 151: {region: 0x166, script: 0x5b, flags: 0x0}, + 152: {region: 0x115, script: 0x5b, flags: 0x0}, + 153: {region: 0x166, script: 0x5b, flags: 0x0}, + 154: {region: 0x166, script: 0x5b, flags: 0x0}, + 155: {region: 0x52, script: 0x5b, flags: 0x0}, + 156: {region: 0x166, script: 0x5b, flags: 0x0}, + 157: {region: 0xe8, script: 0x5b, flags: 0x0}, + 158: {region: 0x166, script: 0x5b, flags: 0x0}, + 159: {region: 0x13f, script: 0xe8, flags: 0x0}, + 160: {region: 0xc4, script: 0x5b, flags: 0x0}, + 161: {region: 0x166, script: 0x5b, flags: 0x0}, + 162: {region: 0x166, script: 0x5b, flags: 0x0}, + 163: {region: 0xc4, script: 0x5b, flags: 0x0}, + 164: {region: 0x166, script: 0x5b, flags: 0x0}, + 165: {region: 0x35, script: 0xe, flags: 0x0}, + 166: {region: 0x166, script: 0x5b, flags: 0x0}, + 167: {region: 0x166, script: 0x5b, flags: 0x0}, + 168: {region: 0x166, script: 0x5b, flags: 0x0}, + 169: {region: 0x53, script: 0xef, flags: 0x0}, + 170: {region: 0x166, script: 0x5b, flags: 0x0}, + 171: {region: 0x166, script: 0x5b, flags: 0x0}, + 172: {region: 0x166, script: 0x5b, flags: 0x0}, + 173: {region: 0x9a, script: 0xe, flags: 0x0}, + 174: {region: 0x166, script: 0x5b, flags: 0x0}, + 175: {region: 0x9d, script: 0x5, flags: 0x0}, + 176: {region: 0x166, script: 0x5b, flags: 0x0}, + 177: {region: 0x4f, script: 0x5b, flags: 0x0}, + 178: {region: 0x79, script: 0x5b, flags: 0x0}, + 179: {region: 0x9a, script: 0x22, flags: 0x0}, + 180: {region: 0xe9, script: 0x5, flags: 0x0}, + 181: {region: 0x9a, script: 0x22, flags: 0x0}, + 182: {region: 0x166, script: 0x5b, flags: 0x0}, + 183: {region: 0x33, script: 0x5b, flags: 0x0}, + 184: {region: 0x166, script: 0x5b, flags: 0x0}, + 185: {region: 0xb5, script: 0xc, flags: 0x0}, + 186: {region: 0x52, script: 0x5b, flags: 0x0}, + 187: {region: 0x166, script: 0x2c, flags: 0x0}, + 188: {region: 0xe8, script: 0x5b, flags: 0x0}, + 189: {region: 0x166, script: 0x5b, flags: 0x0}, + 190: {region: 0xe9, script: 0x22, flags: 0x0}, + 191: {region: 0x107, script: 0x20, flags: 0x0}, + 192: {region: 0x160, script: 0x5b, flags: 0x0}, + 193: {region: 0x166, script: 0x5b, flags: 0x0}, + 194: {region: 0x96, script: 0x5b, flags: 0x0}, + 195: {region: 0x166, script: 0x5b, flags: 0x0}, + 196: {region: 0x52, script: 0x5b, flags: 0x0}, + 197: {region: 0x166, script: 0x5b, flags: 0x0}, + 198: {region: 0x166, script: 0x5b, flags: 0x0}, + 199: {region: 0x166, script: 0x5b, flags: 0x0}, + 200: {region: 0x87, script: 0x5b, flags: 0x0}, + 201: {region: 0x166, script: 0x5b, flags: 0x0}, + 202: {region: 0x166, script: 0x5b, flags: 0x0}, + 203: {region: 0x166, script: 0x5b, flags: 0x0}, + 204: {region: 0x166, script: 0x5b, flags: 0x0}, + 205: {region: 0x6e, script: 0x2c, flags: 0x0}, + 206: {region: 0x166, script: 0x5b, flags: 0x0}, + 207: {region: 0x166, script: 0x5b, flags: 0x0}, + 208: {region: 0x52, script: 0x5b, flags: 0x0}, + 209: {region: 0x166, script: 0x5b, flags: 0x0}, + 210: {region: 0x166, script: 0x5b, flags: 0x0}, + 211: {region: 0xc4, script: 0x5b, flags: 0x0}, + 212: {region: 0x166, script: 0x5b, flags: 0x0}, + 213: {region: 0x166, script: 0x5b, flags: 0x0}, + 214: {region: 0x166, script: 0x5b, flags: 0x0}, + 215: {region: 0x6f, script: 0x5b, flags: 0x0}, + 216: {region: 0x166, script: 0x5b, flags: 0x0}, + 217: {region: 0x166, script: 0x5b, flags: 0x0}, + 218: {region: 0xd7, script: 0x5b, flags: 0x0}, + 219: {region: 0x35, script: 0x16, flags: 0x0}, + 220: {region: 0x107, script: 0x20, flags: 0x0}, + 221: {region: 0xe8, script: 0x5b, flags: 0x0}, + 222: {region: 0x166, script: 0x5b, flags: 0x0}, + 223: {region: 0x132, script: 0x5b, flags: 0x0}, + 224: {region: 0x8b, script: 0x5b, flags: 0x0}, + 225: {region: 0x76, script: 0x5b, flags: 0x0}, + 226: {region: 0x107, script: 0x20, flags: 0x0}, + 227: {region: 0x136, script: 0x5b, flags: 0x0}, + 228: {region: 0x49, script: 0x5b, flags: 0x0}, + 229: {region: 0x136, script: 0x1a, flags: 0x0}, + 230: {region: 0xa7, script: 0x5, flags: 0x0}, + 231: {region: 0x13f, script: 0x19, flags: 0x0}, + 232: {region: 0x166, script: 0x5b, flags: 0x0}, + 233: {region: 0x9c, script: 0x5, flags: 0x0}, + 234: {region: 0x166, script: 0x5b, flags: 0x0}, + 235: {region: 0x166, script: 0x5b, flags: 0x0}, + 236: {region: 0x166, script: 0x5b, flags: 0x0}, + 237: {region: 0x166, script: 0x5b, flags: 0x0}, + 238: {region: 0x166, script: 0x5b, flags: 0x0}, + 239: {region: 0xc6, script: 0xda, flags: 0x0}, + 240: {region: 0x79, script: 0x5b, flags: 0x0}, + 241: {region: 0x6c, script: 0x1d, flags: 0x0}, + 242: {region: 0xe8, script: 0x5b, flags: 0x0}, + 243: {region: 0x49, script: 0x17, flags: 0x0}, + 244: {region: 0x131, script: 0x20, flags: 0x0}, + 245: {region: 0x49, script: 0x17, flags: 0x0}, + 246: {region: 0x49, script: 0x17, flags: 0x0}, + 247: {region: 0x49, script: 0x17, flags: 0x0}, + 248: {region: 0x49, script: 0x17, flags: 0x0}, + 249: {region: 0x10b, script: 0x5b, flags: 0x0}, + 250: {region: 0x5f, script: 0x5b, flags: 0x0}, + 251: {region: 0xea, script: 0x5b, flags: 0x0}, + 252: {region: 0x49, script: 0x17, flags: 0x0}, + 253: {region: 0xc5, script: 0x88, flags: 0x0}, + 254: {region: 0x8, script: 0x2, flags: 0x1}, + 255: {region: 0x107, script: 0x20, flags: 0x0}, + 256: {region: 0x7c, script: 0x5b, flags: 0x0}, + 257: {region: 0x64, script: 0x5b, flags: 0x0}, + 258: {region: 0x166, script: 0x5b, flags: 0x0}, + 259: {region: 0x166, script: 0x5b, flags: 0x0}, + 260: {region: 0x166, script: 0x5b, flags: 0x0}, + 261: {region: 0x166, script: 0x5b, flags: 0x0}, + 262: {region: 0x136, script: 0x5b, flags: 0x0}, + 263: {region: 0x107, script: 0x20, flags: 0x0}, + 264: {region: 0xa5, script: 0x5b, flags: 0x0}, + 265: {region: 0x166, script: 0x5b, flags: 0x0}, + 266: {region: 0x166, script: 0x5b, flags: 0x0}, + 267: {region: 0x9a, script: 0x5, flags: 0x0}, + 268: {region: 0x166, script: 0x5b, flags: 0x0}, + 269: {region: 0x61, script: 0x5b, flags: 0x0}, + 270: {region: 0x166, script: 0x5b, flags: 0x0}, + 271: {region: 0x49, script: 0x5b, flags: 0x0}, + 272: {region: 0x166, script: 0x5b, flags: 0x0}, + 273: {region: 0x166, script: 0x5b, flags: 0x0}, + 274: {region: 0x166, script: 0x5b, flags: 0x0}, + 275: {region: 0x166, script: 0x5, flags: 0x0}, + 276: {region: 0x49, script: 0x5b, flags: 0x0}, + 277: {region: 0x166, script: 0x5b, flags: 0x0}, + 278: {region: 0x166, script: 0x5b, flags: 0x0}, + 279: {region: 0xd5, script: 0x5b, flags: 0x0}, + 280: {region: 0x4f, script: 0x5b, flags: 0x0}, + 281: {region: 0x166, script: 0x5b, flags: 0x0}, + 282: {region: 0x9a, script: 0x5, flags: 0x0}, + 283: {region: 0x166, script: 0x5b, flags: 0x0}, + 284: {region: 0x166, script: 0x5b, flags: 0x0}, + 285: {region: 0x166, script: 0x5b, flags: 0x0}, + 286: {region: 0x166, script: 0x2c, flags: 0x0}, + 287: {region: 0x61, script: 0x5b, flags: 0x0}, + 288: {region: 0xc4, script: 0x5b, flags: 0x0}, + 289: {region: 0xd1, script: 0x5b, flags: 0x0}, + 290: {region: 0x166, script: 0x5b, flags: 0x0}, + 291: {region: 0xdc, script: 0x22, flags: 0x0}, + 292: {region: 0x52, script: 0x5b, flags: 0x0}, + 293: {region: 0x166, script: 0x5b, flags: 0x0}, + 294: {region: 0x166, script: 0x5b, flags: 0x0}, + 295: {region: 0x166, script: 0x5b, flags: 0x0}, + 296: {region: 0xce, script: 0xed, flags: 0x0}, + 297: {region: 0x166, script: 0x5b, flags: 0x0}, + 298: {region: 0x166, script: 0x5b, flags: 0x0}, + 299: {region: 0x115, script: 0x5b, flags: 0x0}, + 300: {region: 0x37, script: 0x5b, flags: 0x0}, + 301: {region: 0x43, script: 0xef, flags: 0x0}, + 302: {region: 0x166, script: 0x5b, flags: 0x0}, + 303: {region: 0xa5, script: 0x5b, flags: 0x0}, + 304: {region: 0x81, script: 0x5b, flags: 0x0}, + 305: {region: 0xd7, script: 0x5b, flags: 0x0}, + 306: {region: 0x9f, script: 0x5b, flags: 0x0}, + 307: {region: 0x6c, script: 0x29, flags: 0x0}, + 308: {region: 0x166, script: 0x5b, flags: 0x0}, + 309: {region: 0xc5, script: 0x4b, flags: 0x0}, + 310: {region: 0x88, script: 0x34, flags: 0x0}, + 311: {region: 0x166, script: 0x5b, flags: 0x0}, + 312: {region: 0x166, script: 0x5b, flags: 0x0}, + 313: {region: 0xa, script: 0x2, flags: 0x1}, + 314: {region: 0x166, script: 0x5b, flags: 0x0}, + 315: {region: 0x166, script: 0x5b, flags: 0x0}, + 316: {region: 0x1, script: 0x5b, flags: 0x0}, + 317: {region: 0x166, script: 0x5b, flags: 0x0}, + 318: {region: 0x6f, script: 0x5b, flags: 0x0}, + 319: {region: 0x136, script: 0x5b, flags: 0x0}, + 320: {region: 0x6b, script: 0x5b, flags: 0x0}, + 321: {region: 0x166, script: 0x5b, flags: 0x0}, + 322: {region: 0x9f, script: 0x46, flags: 0x0}, + 323: {region: 0x166, script: 0x5b, flags: 0x0}, + 324: {region: 0x166, script: 0x5b, flags: 0x0}, + 325: {region: 0x6f, script: 0x5b, flags: 0x0}, + 326: {region: 0x52, script: 0x5b, flags: 0x0}, + 327: {region: 0x6f, script: 0x5b, flags: 0x0}, + 328: {region: 0x9d, script: 0x5, flags: 0x0}, + 329: {region: 0x166, script: 0x5b, flags: 0x0}, + 330: {region: 0x166, script: 0x5b, flags: 0x0}, + 331: {region: 0x166, script: 0x5b, flags: 0x0}, + 332: {region: 0x166, script: 0x5b, flags: 0x0}, + 333: {region: 0x87, script: 0x5b, flags: 0x0}, + 334: {region: 0xc, script: 0x2, flags: 0x1}, + 335: {region: 0x166, script: 0x5b, flags: 0x0}, + 336: {region: 0xc4, script: 0x5b, flags: 0x0}, + 337: {region: 0x73, script: 0x5b, flags: 0x0}, + 338: {region: 0x10c, script: 0x5, flags: 0x0}, + 339: {region: 0xe8, script: 0x5b, flags: 0x0}, + 340: {region: 0x10d, script: 0x5b, flags: 0x0}, + 341: {region: 0x74, script: 0x5b, flags: 0x0}, + 342: {region: 0x166, script: 0x5b, flags: 0x0}, + 343: {region: 0x166, script: 0x5b, flags: 0x0}, + 344: {region: 0x77, script: 0x5b, flags: 0x0}, + 345: {region: 0x166, script: 0x5b, flags: 0x0}, + 346: {region: 0x3b, script: 0x5b, flags: 0x0}, + 347: {region: 0x166, script: 0x5b, flags: 0x0}, + 348: {region: 0x166, script: 0x5b, flags: 0x0}, + 349: {region: 0x166, script: 0x5b, flags: 0x0}, + 350: {region: 0x79, script: 0x5b, flags: 0x0}, + 351: {region: 0x136, script: 0x5b, flags: 0x0}, + 352: {region: 0x79, script: 0x5b, flags: 0x0}, + 353: {region: 0x61, script: 0x5b, flags: 0x0}, + 354: {region: 0x61, script: 0x5b, flags: 0x0}, + 355: {region: 0x52, script: 0x5, flags: 0x0}, + 356: {region: 0x141, script: 0x5b, flags: 0x0}, + 357: {region: 0x166, script: 0x5b, flags: 0x0}, + 358: {region: 0x85, script: 0x5b, flags: 0x0}, + 359: {region: 0x166, script: 0x5b, flags: 0x0}, + 360: {region: 0xd5, script: 0x5b, flags: 0x0}, + 361: {region: 0x9f, script: 0x5b, flags: 0x0}, + 362: {region: 0xd7, script: 0x5b, flags: 0x0}, + 363: {region: 0x166, script: 0x5b, flags: 0x0}, + 364: {region: 0x10c, script: 0x5b, flags: 0x0}, + 365: {region: 0xda, script: 0x5b, flags: 0x0}, + 366: {region: 0x97, script: 0x5b, flags: 0x0}, + 367: {region: 0x81, script: 0x5b, flags: 0x0}, + 368: {region: 0x166, script: 0x5b, flags: 0x0}, + 369: {region: 0xbd, script: 0x5b, flags: 0x0}, + 370: {region: 0x166, script: 0x5b, flags: 0x0}, + 371: {region: 0x166, script: 0x5b, flags: 0x0}, + 372: {region: 0x166, script: 0x5b, flags: 0x0}, + 373: {region: 0x53, script: 0x3b, flags: 0x0}, + 374: {region: 0x166, script: 0x5b, flags: 0x0}, + 375: {region: 0x96, script: 0x5b, flags: 0x0}, + 376: {region: 0x166, script: 0x5b, flags: 0x0}, + 377: {region: 0x166, script: 0x5b, flags: 0x0}, + 378: {region: 0x9a, script: 0x22, flags: 0x0}, + 379: {region: 0x166, script: 0x5b, flags: 0x0}, + 380: {region: 0x9d, script: 0x5, flags: 0x0}, + 381: {region: 0x7f, script: 0x5b, flags: 0x0}, + 382: {region: 0x7c, script: 0x5b, flags: 0x0}, + 383: {region: 0x166, script: 0x5b, flags: 0x0}, + 384: {region: 0x166, script: 0x5b, flags: 0x0}, + 385: {region: 0x166, script: 0x5b, flags: 0x0}, + 386: {region: 0x166, script: 0x5b, flags: 0x0}, + 387: {region: 0x166, script: 0x5b, flags: 0x0}, + 388: {region: 0x166, script: 0x5b, flags: 0x0}, + 389: {region: 0x70, script: 0x2c, flags: 0x0}, + 390: {region: 0x166, script: 0x5b, flags: 0x0}, + 391: {region: 0xdc, script: 0x22, flags: 0x0}, + 392: {region: 0x166, script: 0x5b, flags: 0x0}, + 393: {region: 0xa8, script: 0x5b, flags: 0x0}, + 394: {region: 0x166, script: 0x5b, flags: 0x0}, + 395: {region: 0xe9, script: 0x5, flags: 0x0}, + 396: {region: 0x166, script: 0x5b, flags: 0x0}, + 397: {region: 0xe9, script: 0x5, flags: 0x0}, + 398: {region: 0x166, script: 0x5b, flags: 0x0}, + 399: {region: 0x166, script: 0x5b, flags: 0x0}, + 400: {region: 0x6f, script: 0x5b, flags: 0x0}, + 401: {region: 0x9d, script: 0x5, flags: 0x0}, + 402: {region: 0x166, script: 0x5b, flags: 0x0}, + 403: {region: 0x166, script: 0x2c, flags: 0x0}, + 404: {region: 0xf2, script: 0x5b, flags: 0x0}, + 405: {region: 0x166, script: 0x5b, flags: 0x0}, + 406: {region: 0x166, script: 0x5b, flags: 0x0}, + 407: {region: 0x166, script: 0x5b, flags: 0x0}, + 408: {region: 0x166, script: 0x2c, flags: 0x0}, + 409: {region: 0x166, script: 0x5b, flags: 0x0}, + 410: {region: 0x9a, script: 0x22, flags: 0x0}, + 411: {region: 0x9a, script: 0xe9, flags: 0x0}, + 412: {region: 0x96, script: 0x5b, flags: 0x0}, + 413: {region: 0xda, script: 0x5b, flags: 0x0}, + 414: {region: 0x131, script: 0x32, flags: 0x0}, + 415: {region: 0x166, script: 0x5b, flags: 0x0}, + 416: {region: 0xe, script: 0x2, flags: 0x1}, + 417: {region: 0x9a, script: 0xe, flags: 0x0}, + 418: {region: 0x166, script: 0x5b, flags: 0x0}, + 419: {region: 0x4e, script: 0x5b, flags: 0x0}, + 420: {region: 0x9a, script: 0x35, flags: 0x0}, + 421: {region: 0x41, script: 0x5b, flags: 0x0}, + 422: {region: 0x54, script: 0x5b, flags: 0x0}, + 423: {region: 0x166, script: 0x5b, flags: 0x0}, + 424: {region: 0x81, script: 0x5b, flags: 0x0}, + 425: {region: 0x166, script: 0x5b, flags: 0x0}, + 426: {region: 0x166, script: 0x5b, flags: 0x0}, + 427: {region: 0xa5, script: 0x5b, flags: 0x0}, + 428: {region: 0x99, script: 0x5b, flags: 0x0}, + 429: {region: 0x166, script: 0x5b, flags: 0x0}, + 430: {region: 0xdc, script: 0x22, flags: 0x0}, + 431: {region: 0x166, script: 0x5b, flags: 0x0}, + 432: {region: 0x166, script: 0x5, flags: 0x0}, + 433: {region: 0x49, script: 0x5b, flags: 0x0}, + 434: {region: 0x166, script: 0x5, flags: 0x0}, + 435: {region: 0x166, script: 0x5b, flags: 0x0}, + 436: {region: 0x10, script: 0x3, flags: 0x1}, + 437: {region: 0x166, script: 0x5b, flags: 0x0}, + 438: {region: 0x53, script: 0x3b, flags: 0x0}, + 439: {region: 0x166, script: 0x5b, flags: 0x0}, + 440: {region: 0x136, script: 0x5b, flags: 0x0}, + 441: {region: 0x24, script: 0x5, flags: 0x0}, + 442: {region: 0x166, script: 0x5b, flags: 0x0}, + 443: {region: 0x166, script: 0x2c, flags: 0x0}, + 444: {region: 0x98, script: 0x3e, flags: 0x0}, + 445: {region: 0x166, script: 0x5b, flags: 0x0}, + 446: {region: 0x9a, script: 0x22, flags: 0x0}, + 447: {region: 0x166, script: 0x5b, flags: 0x0}, + 448: {region: 0x74, script: 0x5b, flags: 0x0}, + 449: {region: 0x166, script: 0x5b, flags: 0x0}, + 450: {region: 0x166, script: 0x5b, flags: 0x0}, + 451: {region: 0xe8, script: 0x5b, flags: 0x0}, + 452: {region: 0x166, script: 0x5b, flags: 0x0}, + 453: {region: 0x12c, script: 0x40, flags: 0x0}, + 454: {region: 0x53, script: 0x92, flags: 0x0}, + 455: {region: 0x166, script: 0x5b, flags: 0x0}, + 456: {region: 0xe9, script: 0x5, flags: 0x0}, + 457: {region: 0x9a, script: 0x22, flags: 0x0}, + 458: {region: 0xb0, script: 0x41, flags: 0x0}, + 459: {region: 0xe8, script: 0x5b, flags: 0x0}, + 460: {region: 0xe9, script: 0x5, flags: 0x0}, + 461: {region: 0xe7, script: 0x5b, flags: 0x0}, + 462: {region: 0x9a, script: 0x22, flags: 0x0}, + 463: {region: 0x9a, script: 0x22, flags: 0x0}, + 464: {region: 0x166, script: 0x5b, flags: 0x0}, + 465: {region: 0x91, script: 0x5b, flags: 0x0}, + 466: {region: 0x61, script: 0x5b, flags: 0x0}, + 467: {region: 0x53, script: 0x3b, flags: 0x0}, + 468: {region: 0x92, script: 0x5b, flags: 0x0}, + 469: {region: 0x93, script: 0x5b, flags: 0x0}, + 470: {region: 0x166, script: 0x5b, flags: 0x0}, + 471: {region: 0x28, script: 0x8, flags: 0x0}, + 472: {region: 0xd3, script: 0x5b, flags: 0x0}, + 473: {region: 0x79, script: 0x5b, flags: 0x0}, + 474: {region: 0x166, script: 0x5b, flags: 0x0}, + 475: {region: 0x166, script: 0x5b, flags: 0x0}, + 476: {region: 0xd1, script: 0x5b, flags: 0x0}, + 477: {region: 0xd7, script: 0x5b, flags: 0x0}, + 478: {region: 0x166, script: 0x5b, flags: 0x0}, + 479: {region: 0x166, script: 0x5b, flags: 0x0}, + 480: {region: 0x166, script: 0x5b, flags: 0x0}, + 481: {region: 0x96, script: 0x5b, flags: 0x0}, + 482: {region: 0x166, script: 0x5b, flags: 0x0}, + 483: {region: 0x166, script: 0x5b, flags: 0x0}, + 484: {region: 0x166, script: 0x5b, flags: 0x0}, + 486: {region: 0x123, script: 0x5b, flags: 0x0}, + 487: {region: 0xd7, script: 0x5b, flags: 0x0}, + 488: {region: 0x166, script: 0x5b, flags: 0x0}, + 489: {region: 0x166, script: 0x5b, flags: 0x0}, + 490: {region: 0x53, script: 0xfd, flags: 0x0}, + 491: {region: 0x166, script: 0x5b, flags: 0x0}, + 492: {region: 0x136, script: 0x5b, flags: 0x0}, + 493: {region: 0x166, script: 0x5b, flags: 0x0}, + 494: {region: 0x49, script: 0x5b, flags: 0x0}, + 495: {region: 0x166, script: 0x5b, flags: 0x0}, + 496: {region: 0x166, script: 0x5b, flags: 0x0}, + 497: {region: 0xe8, script: 0x5b, flags: 0x0}, + 498: {region: 0x166, script: 0x5b, flags: 0x0}, + 499: {region: 0x96, script: 0x5b, flags: 0x0}, + 500: {region: 0x107, script: 0x20, flags: 0x0}, + 501: {region: 0x1, script: 0x5b, flags: 0x0}, + 502: {region: 0x166, script: 0x5b, flags: 0x0}, + 503: {region: 0x166, script: 0x5b, flags: 0x0}, + 504: {region: 0x9e, script: 0x5b, flags: 0x0}, + 505: {region: 0x9f, script: 0x5b, flags: 0x0}, + 506: {region: 0x49, script: 0x17, flags: 0x0}, + 507: {region: 0x98, script: 0x3e, flags: 0x0}, + 508: {region: 0x166, script: 0x5b, flags: 0x0}, + 509: {region: 0x166, script: 0x5b, flags: 0x0}, + 510: {region: 0x107, script: 0x5b, flags: 0x0}, + 511: {region: 0x166, script: 0x5b, flags: 0x0}, + 512: {region: 0xa3, script: 0x49, flags: 0x0}, + 513: {region: 0x166, script: 0x5b, flags: 0x0}, + 514: {region: 0xa1, script: 0x5b, flags: 0x0}, + 515: {region: 0x1, script: 0x5b, flags: 0x0}, + 516: {region: 0x166, script: 0x5b, flags: 0x0}, + 517: {region: 0x166, script: 0x5b, flags: 0x0}, + 518: {region: 0x166, script: 0x5b, flags: 0x0}, + 519: {region: 0x52, script: 0x5b, flags: 0x0}, + 520: {region: 0x131, script: 0x3e, flags: 0x0}, + 521: {region: 0x166, script: 0x5b, flags: 0x0}, + 522: {region: 0x130, script: 0x5b, flags: 0x0}, + 523: {region: 0xdc, script: 0x22, flags: 0x0}, + 524: {region: 0x166, script: 0x5b, flags: 0x0}, + 525: {region: 0x64, script: 0x5b, flags: 0x0}, + 526: {region: 0x96, script: 0x5b, flags: 0x0}, + 527: {region: 0x96, script: 0x5b, flags: 0x0}, + 528: {region: 0x7e, script: 0x2e, flags: 0x0}, + 529: {region: 0x138, script: 0x20, flags: 0x0}, + 530: {region: 0x68, script: 0x5b, flags: 0x0}, + 531: {region: 0xc5, script: 0x5b, flags: 0x0}, + 532: {region: 0x166, script: 0x5b, flags: 0x0}, + 533: {region: 0x166, script: 0x5b, flags: 0x0}, + 534: {region: 0xd7, script: 0x5b, flags: 0x0}, + 535: {region: 0xa5, script: 0x5b, flags: 0x0}, + 536: {region: 0xc4, script: 0x5b, flags: 0x0}, + 537: {region: 0x107, script: 0x20, flags: 0x0}, + 538: {region: 0x166, script: 0x5b, flags: 0x0}, + 539: {region: 0x166, script: 0x5b, flags: 0x0}, + 540: {region: 0x166, script: 0x5b, flags: 0x0}, + 541: {region: 0x166, script: 0x5b, flags: 0x0}, + 542: {region: 0xd5, script: 0x5, flags: 0x0}, + 543: {region: 0xd7, script: 0x5b, flags: 0x0}, + 544: {region: 0x165, script: 0x5b, flags: 0x0}, + 545: {region: 0x166, script: 0x5b, flags: 0x0}, + 546: {region: 0x166, script: 0x5b, flags: 0x0}, + 547: {region: 0x130, script: 0x5b, flags: 0x0}, + 548: {region: 0x123, script: 0x5, flags: 0x0}, + 549: {region: 0x166, script: 0x5b, flags: 0x0}, + 550: {region: 0x124, script: 0xee, flags: 0x0}, + 551: {region: 0x5b, script: 0x5b, flags: 0x0}, + 552: {region: 0x52, script: 0x5b, flags: 0x0}, + 553: {region: 0x166, script: 0x5b, flags: 0x0}, + 554: {region: 0x4f, script: 0x5b, flags: 0x0}, + 555: {region: 0x9a, script: 0x22, flags: 0x0}, + 556: {region: 0x9a, script: 0x22, flags: 0x0}, + 557: {region: 0x4b, script: 0x5b, flags: 0x0}, + 558: {region: 0x96, script: 0x5b, flags: 0x0}, + 559: {region: 0x166, script: 0x5b, flags: 0x0}, + 560: {region: 0x41, script: 0x5b, flags: 0x0}, + 561: {region: 0x9a, script: 0x5b, flags: 0x0}, + 562: {region: 0x53, script: 0xe5, flags: 0x0}, + 563: {region: 0x9a, script: 0x22, flags: 0x0}, + 564: {region: 0xc4, script: 0x5b, flags: 0x0}, + 565: {region: 0x166, script: 0x5b, flags: 0x0}, + 566: {region: 0x9a, script: 0x76, flags: 0x0}, + 567: {region: 0xe9, script: 0x5, flags: 0x0}, + 568: {region: 0x166, script: 0x5b, flags: 0x0}, + 569: {region: 0xa5, script: 0x5b, flags: 0x0}, + 570: {region: 0x166, script: 0x5b, flags: 0x0}, + 571: {region: 0x12c, script: 0x5b, flags: 0x0}, + 572: {region: 0x166, script: 0x5b, flags: 0x0}, + 573: {region: 0xd3, script: 0x5b, flags: 0x0}, + 574: {region: 0x166, script: 0x5b, flags: 0x0}, + 575: {region: 0xb0, script: 0x58, flags: 0x0}, + 576: {region: 0x166, script: 0x5b, flags: 0x0}, + 577: {region: 0x166, script: 0x5b, flags: 0x0}, + 578: {region: 0x13, script: 0x6, flags: 0x1}, + 579: {region: 0x166, script: 0x5b, flags: 0x0}, + 580: {region: 0x52, script: 0x5b, flags: 0x0}, + 581: {region: 0x83, script: 0x5b, flags: 0x0}, + 582: {region: 0xa5, script: 0x5b, flags: 0x0}, + 583: {region: 0x166, script: 0x5b, flags: 0x0}, + 584: {region: 0x166, script: 0x5b, flags: 0x0}, + 585: {region: 0x166, script: 0x5b, flags: 0x0}, + 586: {region: 0xa7, script: 0x4f, flags: 0x0}, + 587: {region: 0x2a, script: 0x5b, flags: 0x0}, + 588: {region: 0x166, script: 0x5b, flags: 0x0}, + 589: {region: 0x166, script: 0x5b, flags: 0x0}, + 590: {region: 0x166, script: 0x5b, flags: 0x0}, + 591: {region: 0x166, script: 0x5b, flags: 0x0}, + 592: {region: 0x166, script: 0x5b, flags: 0x0}, + 593: {region: 0x9a, script: 0x53, flags: 0x0}, + 594: {region: 0x8c, script: 0x5b, flags: 0x0}, + 595: {region: 0x166, script: 0x5b, flags: 0x0}, + 596: {region: 0xac, script: 0x54, flags: 0x0}, + 597: {region: 0x107, script: 0x20, flags: 0x0}, + 598: {region: 0x9a, script: 0x22, flags: 0x0}, + 599: {region: 0x166, script: 0x5b, flags: 0x0}, + 600: {region: 0x76, script: 0x5b, flags: 0x0}, + 601: {region: 0x166, script: 0x5b, flags: 0x0}, + 602: {region: 0xb5, script: 0x5b, flags: 0x0}, + 603: {region: 0x166, script: 0x5b, flags: 0x0}, + 604: {region: 0x166, script: 0x5b, flags: 0x0}, + 605: {region: 0x166, script: 0x5b, flags: 0x0}, + 606: {region: 0x166, script: 0x5b, flags: 0x0}, + 607: {region: 0x166, script: 0x5b, flags: 0x0}, + 608: {region: 0x166, script: 0x5b, flags: 0x0}, + 609: {region: 0x166, script: 0x5b, flags: 0x0}, + 610: {region: 0x166, script: 0x2c, flags: 0x0}, + 611: {region: 0x166, script: 0x5b, flags: 0x0}, + 612: {region: 0x107, script: 0x20, flags: 0x0}, + 613: {region: 0x113, script: 0x5b, flags: 0x0}, + 614: {region: 0xe8, script: 0x5b, flags: 0x0}, + 615: {region: 0x107, script: 0x5b, flags: 0x0}, + 616: {region: 0x166, script: 0x5b, flags: 0x0}, + 617: {region: 0x9a, script: 0x22, flags: 0x0}, + 618: {region: 0x9a, script: 0x5, flags: 0x0}, + 619: {region: 0x130, script: 0x5b, flags: 0x0}, + 620: {region: 0x166, script: 0x5b, flags: 0x0}, + 621: {region: 0x52, script: 0x5b, flags: 0x0}, + 622: {region: 0x61, script: 0x5b, flags: 0x0}, + 623: {region: 0x166, script: 0x5b, flags: 0x0}, + 624: {region: 0x166, script: 0x5b, flags: 0x0}, + 625: {region: 0x166, script: 0x2c, flags: 0x0}, + 626: {region: 0x166, script: 0x5b, flags: 0x0}, + 627: {region: 0x166, script: 0x5b, flags: 0x0}, + 628: {region: 0x19, script: 0x3, flags: 0x1}, + 629: {region: 0x166, script: 0x5b, flags: 0x0}, + 630: {region: 0x166, script: 0x5b, flags: 0x0}, + 631: {region: 0x166, script: 0x5b, flags: 0x0}, + 632: {region: 0x166, script: 0x5b, flags: 0x0}, + 633: {region: 0x107, script: 0x20, flags: 0x0}, + 634: {region: 0x166, script: 0x5b, flags: 0x0}, + 635: {region: 0x166, script: 0x5b, flags: 0x0}, + 636: {region: 0x166, script: 0x5b, flags: 0x0}, + 637: {region: 0x107, script: 0x20, flags: 0x0}, + 638: {region: 0x166, script: 0x5b, flags: 0x0}, + 639: {region: 0x96, script: 0x5b, flags: 0x0}, + 640: {region: 0xe9, script: 0x5, flags: 0x0}, + 641: {region: 0x7c, script: 0x5b, flags: 0x0}, + 642: {region: 0x166, script: 0x5b, flags: 0x0}, + 643: {region: 0x166, script: 0x5b, flags: 0x0}, + 644: {region: 0x166, script: 0x5b, flags: 0x0}, + 645: {region: 0x166, script: 0x2c, flags: 0x0}, + 646: {region: 0x124, script: 0xee, flags: 0x0}, + 647: {region: 0xe9, script: 0x5, flags: 0x0}, + 648: {region: 0x166, script: 0x5b, flags: 0x0}, + 649: {region: 0x166, script: 0x5b, flags: 0x0}, + 650: {region: 0x1c, script: 0x5, flags: 0x1}, + 651: {region: 0x166, script: 0x5b, flags: 0x0}, + 652: {region: 0x166, script: 0x5b, flags: 0x0}, + 653: {region: 0x166, script: 0x5b, flags: 0x0}, + 654: {region: 0x139, script: 0x5b, flags: 0x0}, + 655: {region: 0x88, script: 0x5f, flags: 0x0}, + 656: {region: 0x98, script: 0x3e, flags: 0x0}, + 657: {region: 0x130, script: 0x5b, flags: 0x0}, + 658: {region: 0xe9, script: 0x5, flags: 0x0}, + 659: {region: 0x132, script: 0x5b, flags: 0x0}, + 660: {region: 0x166, script: 0x5b, flags: 0x0}, + 661: {region: 0xb8, script: 0x5b, flags: 0x0}, + 662: {region: 0x107, script: 0x20, flags: 0x0}, + 663: {region: 0x166, script: 0x5b, flags: 0x0}, + 664: {region: 0x96, script: 0x5b, flags: 0x0}, + 665: {region: 0x166, script: 0x5b, flags: 0x0}, + 666: {region: 0x53, script: 0xee, flags: 0x0}, + 667: {region: 0x166, script: 0x5b, flags: 0x0}, + 668: {region: 0x166, script: 0x5b, flags: 0x0}, + 669: {region: 0x166, script: 0x5b, flags: 0x0}, + 670: {region: 0x166, script: 0x5b, flags: 0x0}, + 671: {region: 0x9a, script: 0x5d, flags: 0x0}, + 672: {region: 0x166, script: 0x5b, flags: 0x0}, + 673: {region: 0x166, script: 0x5b, flags: 0x0}, + 674: {region: 0x107, script: 0x20, flags: 0x0}, + 675: {region: 0x132, script: 0x5b, flags: 0x0}, + 676: {region: 0x166, script: 0x5b, flags: 0x0}, + 677: {region: 0xda, script: 0x5b, flags: 0x0}, + 678: {region: 0x166, script: 0x5b, flags: 0x0}, + 679: {region: 0x166, script: 0x5b, flags: 0x0}, + 680: {region: 0x21, script: 0x2, flags: 0x1}, + 681: {region: 0x166, script: 0x5b, flags: 0x0}, + 682: {region: 0x166, script: 0x5b, flags: 0x0}, + 683: {region: 0x9f, script: 0x5b, flags: 0x0}, + 684: {region: 0x53, script: 0x61, flags: 0x0}, + 685: {region: 0x96, script: 0x5b, flags: 0x0}, + 686: {region: 0x9d, script: 0x5, flags: 0x0}, + 687: {region: 0x136, script: 0x5b, flags: 0x0}, + 688: {region: 0x166, script: 0x5b, flags: 0x0}, + 689: {region: 0x166, script: 0x5b, flags: 0x0}, + 690: {region: 0x9a, script: 0xe9, flags: 0x0}, + 691: {region: 0x9f, script: 0x5b, flags: 0x0}, + 692: {region: 0x166, script: 0x5b, flags: 0x0}, + 693: {region: 0x4b, script: 0x5b, flags: 0x0}, + 694: {region: 0x166, script: 0x5b, flags: 0x0}, + 695: {region: 0x166, script: 0x5b, flags: 0x0}, + 696: {region: 0xb0, script: 0x58, flags: 0x0}, + 697: {region: 0x166, script: 0x5b, flags: 0x0}, + 698: {region: 0x166, script: 0x5b, flags: 0x0}, + 699: {region: 0x4b, script: 0x5b, flags: 0x0}, + 700: {region: 0x166, script: 0x5b, flags: 0x0}, + 701: {region: 0x166, script: 0x5b, flags: 0x0}, + 702: {region: 0x163, script: 0x5b, flags: 0x0}, + 703: {region: 0x9d, script: 0x5, flags: 0x0}, + 704: {region: 0xb7, script: 0x5b, flags: 0x0}, + 705: {region: 0xb9, script: 0x5b, flags: 0x0}, + 706: {region: 0x4b, script: 0x5b, flags: 0x0}, + 707: {region: 0x4b, script: 0x5b, flags: 0x0}, + 708: {region: 0xa5, script: 0x5b, flags: 0x0}, + 709: {region: 0xa5, script: 0x5b, flags: 0x0}, + 710: {region: 0x9d, script: 0x5, flags: 0x0}, + 711: {region: 0xb9, script: 0x5b, flags: 0x0}, + 712: {region: 0x124, script: 0xee, flags: 0x0}, + 713: {region: 0x53, script: 0x3b, flags: 0x0}, + 714: {region: 0x12c, script: 0x5b, flags: 0x0}, + 715: {region: 0x96, script: 0x5b, flags: 0x0}, + 716: {region: 0x52, script: 0x5b, flags: 0x0}, + 717: {region: 0x9a, script: 0x22, flags: 0x0}, + 718: {region: 0x9a, script: 0x22, flags: 0x0}, + 719: {region: 0x96, script: 0x5b, flags: 0x0}, + 720: {region: 0x23, script: 0x3, flags: 0x1}, + 721: {region: 0xa5, script: 0x5b, flags: 0x0}, + 722: {region: 0x166, script: 0x5b, flags: 0x0}, + 723: {region: 0xd0, script: 0x5b, flags: 0x0}, + 724: {region: 0x166, script: 0x5b, flags: 0x0}, + 725: {region: 0x166, script: 0x5b, flags: 0x0}, + 726: {region: 0x166, script: 0x5b, flags: 0x0}, + 727: {region: 0x166, script: 0x5b, flags: 0x0}, + 728: {region: 0x166, script: 0x5b, flags: 0x0}, + 729: {region: 0x166, script: 0x5b, flags: 0x0}, + 730: {region: 0x166, script: 0x5b, flags: 0x0}, + 731: {region: 0x166, script: 0x5b, flags: 0x0}, + 732: {region: 0x166, script: 0x5b, flags: 0x0}, + 733: {region: 0x166, script: 0x5b, flags: 0x0}, + 734: {region: 0x166, script: 0x5b, flags: 0x0}, + 735: {region: 0x166, script: 0x5, flags: 0x0}, + 736: {region: 0x107, script: 0x20, flags: 0x0}, + 737: {region: 0xe8, script: 0x5b, flags: 0x0}, + 738: {region: 0x166, script: 0x5b, flags: 0x0}, + 739: {region: 0x96, script: 0x5b, flags: 0x0}, + 740: {region: 0x166, script: 0x2c, flags: 0x0}, + 741: {region: 0x166, script: 0x5b, flags: 0x0}, + 742: {region: 0x166, script: 0x5b, flags: 0x0}, + 743: {region: 0x166, script: 0x5b, flags: 0x0}, + 744: {region: 0x113, script: 0x5b, flags: 0x0}, + 745: {region: 0xa5, script: 0x5b, flags: 0x0}, + 746: {region: 0x166, script: 0x5b, flags: 0x0}, + 747: {region: 0x166, script: 0x5b, flags: 0x0}, + 748: {region: 0x124, script: 0x5, flags: 0x0}, + 749: {region: 0xcd, script: 0x5b, flags: 0x0}, + 750: {region: 0x166, script: 0x5b, flags: 0x0}, + 751: {region: 0x166, script: 0x5b, flags: 0x0}, + 752: {region: 0x166, script: 0x5b, flags: 0x0}, + 753: {region: 0xc0, script: 0x5b, flags: 0x0}, + 754: {region: 0xd2, script: 0x5b, flags: 0x0}, + 755: {region: 0x166, script: 0x5b, flags: 0x0}, + 756: {region: 0x52, script: 0x5b, flags: 0x0}, + 757: {region: 0xdc, script: 0x22, flags: 0x0}, + 758: {region: 0x130, script: 0x5b, flags: 0x0}, + 759: {region: 0xc1, script: 0x5b, flags: 0x0}, + 760: {region: 0x166, script: 0x5b, flags: 0x0}, + 761: {region: 0x166, script: 0x5b, flags: 0x0}, + 762: {region: 0xe1, script: 0x5b, flags: 0x0}, + 763: {region: 0x166, script: 0x5b, flags: 0x0}, + 764: {region: 0x96, script: 0x5b, flags: 0x0}, + 765: {region: 0x9c, script: 0x3d, flags: 0x0}, + 766: {region: 0x166, script: 0x5b, flags: 0x0}, + 767: {region: 0xc3, script: 0x20, flags: 0x0}, + 768: {region: 0x166, script: 0x5, flags: 0x0}, + 769: {region: 0x166, script: 0x5b, flags: 0x0}, + 770: {region: 0x166, script: 0x5b, flags: 0x0}, + 771: {region: 0x166, script: 0x5b, flags: 0x0}, + 772: {region: 0x9a, script: 0x6f, flags: 0x0}, + 773: {region: 0x166, script: 0x5b, flags: 0x0}, + 774: {region: 0x166, script: 0x5b, flags: 0x0}, + 775: {region: 0x10c, script: 0x5b, flags: 0x0}, + 776: {region: 0x166, script: 0x5b, flags: 0x0}, + 777: {region: 0x166, script: 0x5b, flags: 0x0}, + 778: {region: 0x166, script: 0x5b, flags: 0x0}, + 779: {region: 0x26, script: 0x3, flags: 0x1}, + 780: {region: 0x166, script: 0x5b, flags: 0x0}, + 781: {region: 0x166, script: 0x5b, flags: 0x0}, + 782: {region: 0x9a, script: 0xe, flags: 0x0}, + 783: {region: 0xc5, script: 0x76, flags: 0x0}, + 785: {region: 0x166, script: 0x5b, flags: 0x0}, + 786: {region: 0x49, script: 0x5b, flags: 0x0}, + 787: {region: 0x49, script: 0x5b, flags: 0x0}, + 788: {region: 0x37, script: 0x5b, flags: 0x0}, + 789: {region: 0x166, script: 0x5b, flags: 0x0}, + 790: {region: 0x166, script: 0x5b, flags: 0x0}, + 791: {region: 0x166, script: 0x5b, flags: 0x0}, + 792: {region: 0x166, script: 0x5b, flags: 0x0}, + 793: {region: 0x166, script: 0x5b, flags: 0x0}, + 794: {region: 0x166, script: 0x5b, flags: 0x0}, + 795: {region: 0x9a, script: 0x22, flags: 0x0}, + 796: {region: 0xdc, script: 0x22, flags: 0x0}, + 797: {region: 0x107, script: 0x20, flags: 0x0}, + 798: {region: 0x35, script: 0x73, flags: 0x0}, + 799: {region: 0x29, script: 0x3, flags: 0x1}, + 800: {region: 0xcc, script: 0x5b, flags: 0x0}, + 801: {region: 0x166, script: 0x5b, flags: 0x0}, + 802: {region: 0x166, script: 0x5b, flags: 0x0}, + 803: {region: 0x166, script: 0x5b, flags: 0x0}, + 804: {region: 0x9a, script: 0x22, flags: 0x0}, + 805: {region: 0x52, script: 0x5b, flags: 0x0}, + 807: {region: 0x166, script: 0x5b, flags: 0x0}, + 808: {region: 0x136, script: 0x5b, flags: 0x0}, + 809: {region: 0x166, script: 0x5b, flags: 0x0}, + 810: {region: 0x166, script: 0x5b, flags: 0x0}, + 811: {region: 0xe9, script: 0x5, flags: 0x0}, + 812: {region: 0xc4, script: 0x5b, flags: 0x0}, + 813: {region: 0x9a, script: 0x22, flags: 0x0}, + 814: {region: 0x96, script: 0x5b, flags: 0x0}, + 815: {region: 0x165, script: 0x5b, flags: 0x0}, + 816: {region: 0x166, script: 0x5b, flags: 0x0}, + 817: {region: 0xc5, script: 0x76, flags: 0x0}, + 818: {region: 0x166, script: 0x5b, flags: 0x0}, + 819: {region: 0x166, script: 0x2c, flags: 0x0}, + 820: {region: 0x107, script: 0x20, flags: 0x0}, + 821: {region: 0x166, script: 0x5b, flags: 0x0}, + 822: {region: 0x132, script: 0x5b, flags: 0x0}, + 823: {region: 0x9d, script: 0x67, flags: 0x0}, + 824: {region: 0x166, script: 0x5b, flags: 0x0}, + 825: {region: 0x166, script: 0x5b, flags: 0x0}, + 826: {region: 0x9d, script: 0x5, flags: 0x0}, + 827: {region: 0x166, script: 0x5b, flags: 0x0}, + 828: {region: 0x166, script: 0x5b, flags: 0x0}, + 829: {region: 0x166, script: 0x5b, flags: 0x0}, + 830: {region: 0xde, script: 0x5b, flags: 0x0}, + 831: {region: 0x166, script: 0x5b, flags: 0x0}, + 832: {region: 0x166, script: 0x5b, flags: 0x0}, + 834: {region: 0x166, script: 0x5b, flags: 0x0}, + 835: {region: 0x53, script: 0x3b, flags: 0x0}, + 836: {region: 0x9f, script: 0x5b, flags: 0x0}, + 837: {region: 0xd3, script: 0x5b, flags: 0x0}, + 838: {region: 0x166, script: 0x5b, flags: 0x0}, + 839: {region: 0xdb, script: 0x5b, flags: 0x0}, + 840: {region: 0x166, script: 0x5b, flags: 0x0}, + 841: {region: 0x166, script: 0x5b, flags: 0x0}, + 842: {region: 0x166, script: 0x5b, flags: 0x0}, + 843: {region: 0xd0, script: 0x5b, flags: 0x0}, + 844: {region: 0x166, script: 0x5b, flags: 0x0}, + 845: {region: 0x166, script: 0x5b, flags: 0x0}, + 846: {region: 0x165, script: 0x5b, flags: 0x0}, + 847: {region: 0xd2, script: 0x5b, flags: 0x0}, + 848: {region: 0x61, script: 0x5b, flags: 0x0}, + 849: {region: 0xdc, script: 0x22, flags: 0x0}, + 850: {region: 0x166, script: 0x5b, flags: 0x0}, + 851: {region: 0xdc, script: 0x22, flags: 0x0}, + 852: {region: 0x166, script: 0x5b, flags: 0x0}, + 853: {region: 0x166, script: 0x5b, flags: 0x0}, + 854: {region: 0xd3, script: 0x5b, flags: 0x0}, + 855: {region: 0x166, script: 0x5b, flags: 0x0}, + 856: {region: 0x166, script: 0x5b, flags: 0x0}, + 857: {region: 0xd2, script: 0x5b, flags: 0x0}, + 858: {region: 0x166, script: 0x5b, flags: 0x0}, + 859: {region: 0xd0, script: 0x5b, flags: 0x0}, + 860: {region: 0xd0, script: 0x5b, flags: 0x0}, + 861: {region: 0x166, script: 0x5b, flags: 0x0}, + 862: {region: 0x166, script: 0x5b, flags: 0x0}, + 863: {region: 0x96, script: 0x5b, flags: 0x0}, + 864: {region: 0x166, script: 0x5b, flags: 0x0}, + 865: {region: 0xe0, script: 0x5b, flags: 0x0}, + 866: {region: 0x166, script: 0x5b, flags: 0x0}, + 867: {region: 0x166, script: 0x5b, flags: 0x0}, + 868: {region: 0x9a, script: 0x5b, flags: 0x0}, + 869: {region: 0x166, script: 0x5b, flags: 0x0}, + 870: {region: 0x166, script: 0x5b, flags: 0x0}, + 871: {region: 0xda, script: 0x5b, flags: 0x0}, + 872: {region: 0x52, script: 0x5b, flags: 0x0}, + 873: {region: 0x166, script: 0x5b, flags: 0x0}, + 874: {region: 0xdb, script: 0x5b, flags: 0x0}, + 875: {region: 0x166, script: 0x5b, flags: 0x0}, + 876: {region: 0x52, script: 0x5b, flags: 0x0}, + 877: {region: 0x166, script: 0x5b, flags: 0x0}, + 878: {region: 0x166, script: 0x5b, flags: 0x0}, + 879: {region: 0xdb, script: 0x5b, flags: 0x0}, + 880: {region: 0x124, script: 0x57, flags: 0x0}, + 881: {region: 0x9a, script: 0x22, flags: 0x0}, + 882: {region: 0x10d, script: 0xcb, flags: 0x0}, + 883: {region: 0x166, script: 0x5b, flags: 0x0}, + 884: {region: 0x166, script: 0x5b, flags: 0x0}, + 885: {region: 0x85, script: 0x7e, flags: 0x0}, + 886: {region: 0x162, script: 0x5b, flags: 0x0}, + 887: {region: 0x166, script: 0x5b, flags: 0x0}, + 888: {region: 0x49, script: 0x17, flags: 0x0}, + 889: {region: 0x166, script: 0x5b, flags: 0x0}, + 890: {region: 0x162, script: 0x5b, flags: 0x0}, + 891: {region: 0x166, script: 0x5b, flags: 0x0}, + 892: {region: 0x166, script: 0x5b, flags: 0x0}, + 893: {region: 0x166, script: 0x5b, flags: 0x0}, + 894: {region: 0x166, script: 0x5b, flags: 0x0}, + 895: {region: 0x166, script: 0x5b, flags: 0x0}, + 896: {region: 0x118, script: 0x5b, flags: 0x0}, + 897: {region: 0x166, script: 0x5b, flags: 0x0}, + 898: {region: 0x166, script: 0x5b, flags: 0x0}, + 899: {region: 0x136, script: 0x5b, flags: 0x0}, + 900: {region: 0x166, script: 0x5b, flags: 0x0}, + 901: {region: 0x53, script: 0x5b, flags: 0x0}, + 902: {region: 0x166, script: 0x5b, flags: 0x0}, + 903: {region: 0xcf, script: 0x5b, flags: 0x0}, + 904: {region: 0x130, script: 0x5b, flags: 0x0}, + 905: {region: 0x132, script: 0x5b, flags: 0x0}, + 906: {region: 0x81, script: 0x5b, flags: 0x0}, + 907: {region: 0x79, script: 0x5b, flags: 0x0}, + 908: {region: 0x166, script: 0x5b, flags: 0x0}, + 910: {region: 0x166, script: 0x5b, flags: 0x0}, + 911: {region: 0x166, script: 0x5b, flags: 0x0}, + 912: {region: 0x70, script: 0x5b, flags: 0x0}, + 913: {region: 0x166, script: 0x5b, flags: 0x0}, + 914: {region: 0x166, script: 0x5b, flags: 0x0}, + 915: {region: 0x166, script: 0x5b, flags: 0x0}, + 916: {region: 0x166, script: 0x5b, flags: 0x0}, + 917: {region: 0x9a, script: 0x83, flags: 0x0}, + 918: {region: 0x166, script: 0x5b, flags: 0x0}, + 919: {region: 0x166, script: 0x5, flags: 0x0}, + 920: {region: 0x7e, script: 0x20, flags: 0x0}, + 921: {region: 0x136, script: 0x84, flags: 0x0}, + 922: {region: 0x166, script: 0x5, flags: 0x0}, + 923: {region: 0xc6, script: 0x82, flags: 0x0}, + 924: {region: 0x166, script: 0x5b, flags: 0x0}, + 925: {region: 0x2c, script: 0x3, flags: 0x1}, + 926: {region: 0xe8, script: 0x5b, flags: 0x0}, + 927: {region: 0x2f, script: 0x2, flags: 0x1}, + 928: {region: 0xe8, script: 0x5b, flags: 0x0}, + 929: {region: 0x30, script: 0x5b, flags: 0x0}, + 930: {region: 0xf1, script: 0x5b, flags: 0x0}, + 931: {region: 0x166, script: 0x5b, flags: 0x0}, + 932: {region: 0x79, script: 0x5b, flags: 0x0}, + 933: {region: 0xd7, script: 0x5b, flags: 0x0}, + 934: {region: 0x136, script: 0x5b, flags: 0x0}, + 935: {region: 0x49, script: 0x5b, flags: 0x0}, + 936: {region: 0x166, script: 0x5b, flags: 0x0}, + 937: {region: 0x9d, script: 0xfa, flags: 0x0}, + 938: {region: 0x166, script: 0x5b, flags: 0x0}, + 939: {region: 0x61, script: 0x5b, flags: 0x0}, + 940: {region: 0x166, script: 0x5, flags: 0x0}, + 941: {region: 0xb1, script: 0x90, flags: 0x0}, + 943: {region: 0x166, script: 0x5b, flags: 0x0}, + 944: {region: 0x166, script: 0x5b, flags: 0x0}, + 945: {region: 0x9a, script: 0x12, flags: 0x0}, + 946: {region: 0xa5, script: 0x5b, flags: 0x0}, + 947: {region: 0xea, script: 0x5b, flags: 0x0}, + 948: {region: 0x166, script: 0x5b, flags: 0x0}, + 949: {region: 0x9f, script: 0x5b, flags: 0x0}, + 950: {region: 0x166, script: 0x5b, flags: 0x0}, + 951: {region: 0x166, script: 0x5b, flags: 0x0}, + 952: {region: 0x88, script: 0x34, flags: 0x0}, + 953: {region: 0x76, script: 0x5b, flags: 0x0}, + 954: {region: 0x166, script: 0x5b, flags: 0x0}, + 955: {region: 0xe9, script: 0x4e, flags: 0x0}, + 956: {region: 0x9d, script: 0x5, flags: 0x0}, + 957: {region: 0x1, script: 0x5b, flags: 0x0}, + 958: {region: 0x24, script: 0x5, flags: 0x0}, + 959: {region: 0x166, script: 0x5b, flags: 0x0}, + 960: {region: 0x41, script: 0x5b, flags: 0x0}, + 961: {region: 0x166, script: 0x5b, flags: 0x0}, + 962: {region: 0x7b, script: 0x5b, flags: 0x0}, + 963: {region: 0x166, script: 0x5b, flags: 0x0}, + 964: {region: 0xe5, script: 0x5b, flags: 0x0}, + 965: {region: 0x8a, script: 0x5b, flags: 0x0}, + 966: {region: 0x6a, script: 0x5b, flags: 0x0}, + 967: {region: 0x166, script: 0x5b, flags: 0x0}, + 968: {region: 0x9a, script: 0x22, flags: 0x0}, + 969: {region: 0x166, script: 0x5b, flags: 0x0}, + 970: {region: 0x103, script: 0x5b, flags: 0x0}, + 971: {region: 0x96, script: 0x5b, flags: 0x0}, + 972: {region: 0x166, script: 0x5b, flags: 0x0}, + 973: {region: 0x166, script: 0x5b, flags: 0x0}, + 974: {region: 0x9f, script: 0x5b, flags: 0x0}, + 975: {region: 0x166, script: 0x5, flags: 0x0}, + 976: {region: 0x9a, script: 0x5b, flags: 0x0}, + 977: {region: 0x31, script: 0x2, flags: 0x1}, + 978: {region: 0xdc, script: 0x22, flags: 0x0}, + 979: {region: 0x35, script: 0xe, flags: 0x0}, + 980: {region: 0x4e, script: 0x5b, flags: 0x0}, + 981: {region: 0x73, script: 0x5b, flags: 0x0}, + 982: {region: 0x4e, script: 0x5b, flags: 0x0}, + 983: {region: 0x9d, script: 0x5, flags: 0x0}, + 984: {region: 0x10d, script: 0x5b, flags: 0x0}, + 985: {region: 0x3a, script: 0x5b, flags: 0x0}, + 986: {region: 0x166, script: 0x5b, flags: 0x0}, + 987: {region: 0xd2, script: 0x5b, flags: 0x0}, + 988: {region: 0x105, script: 0x5b, flags: 0x0}, + 989: {region: 0x96, script: 0x5b, flags: 0x0}, + 990: {region: 0x130, script: 0x5b, flags: 0x0}, + 991: {region: 0x166, script: 0x5b, flags: 0x0}, + 992: {region: 0x166, script: 0x5b, flags: 0x0}, + 993: {region: 0x74, script: 0x5b, flags: 0x0}, + 994: {region: 0x107, script: 0x20, flags: 0x0}, + 995: {region: 0x131, script: 0x20, flags: 0x0}, + 996: {region: 0x10a, script: 0x5b, flags: 0x0}, + 997: {region: 0x108, script: 0x5b, flags: 0x0}, + 998: {region: 0x130, script: 0x5b, flags: 0x0}, + 999: {region: 0x166, script: 0x5b, flags: 0x0}, + 1000: {region: 0xa3, script: 0x4c, flags: 0x0}, + 1001: {region: 0x9a, script: 0x22, flags: 0x0}, + 1002: {region: 0x81, script: 0x5b, flags: 0x0}, + 1003: {region: 0x107, script: 0x20, flags: 0x0}, + 1004: {region: 0xa5, script: 0x5b, flags: 0x0}, + 1005: {region: 0x96, script: 0x5b, flags: 0x0}, + 1006: {region: 0x9a, script: 0x5b, flags: 0x0}, + 1007: {region: 0x115, script: 0x5b, flags: 0x0}, + 1008: {region: 0x9a, script: 0xcf, flags: 0x0}, + 1009: {region: 0x166, script: 0x5b, flags: 0x0}, + 1010: {region: 0x166, script: 0x5b, flags: 0x0}, + 1011: {region: 0x130, script: 0x5b, flags: 0x0}, + 1012: {region: 0x9f, script: 0x5b, flags: 0x0}, + 1013: {region: 0x9a, script: 0x22, flags: 0x0}, + 1014: {region: 0x166, script: 0x5, flags: 0x0}, + 1015: {region: 0x9f, script: 0x5b, flags: 0x0}, + 1016: {region: 0x7c, script: 0x5b, flags: 0x0}, + 1017: {region: 0x49, script: 0x5b, flags: 0x0}, + 1018: {region: 0x33, script: 0x4, flags: 0x1}, + 1019: {region: 0x9f, script: 0x5b, flags: 0x0}, + 1020: {region: 0x9d, script: 0x5, flags: 0x0}, + 1021: {region: 0xdb, script: 0x5b, flags: 0x0}, + 1022: {region: 0x4f, script: 0x5b, flags: 0x0}, + 1023: {region: 0xd2, script: 0x5b, flags: 0x0}, + 1024: {region: 0xd0, script: 0x5b, flags: 0x0}, + 1025: {region: 0xc4, script: 0x5b, flags: 0x0}, + 1026: {region: 0x4c, script: 0x5b, flags: 0x0}, + 1027: {region: 0x97, script: 0x80, flags: 0x0}, + 1028: {region: 0xb7, script: 0x5b, flags: 0x0}, + 1029: {region: 0x166, script: 0x2c, flags: 0x0}, + 1030: {region: 0x166, script: 0x5b, flags: 0x0}, + 1032: {region: 0xbb, script: 0xeb, flags: 0x0}, + 1033: {region: 0x166, script: 0x5b, flags: 0x0}, + 1034: {region: 0xc5, script: 0x76, flags: 0x0}, + 1035: {region: 0x166, script: 0x5, flags: 0x0}, + 1036: {region: 0xb4, script: 0xd6, flags: 0x0}, + 1037: {region: 0x70, script: 0x5b, flags: 0x0}, + 1038: {region: 0x166, script: 0x5b, flags: 0x0}, + 1039: {region: 0x166, script: 0x5b, flags: 0x0}, + 1040: {region: 0x166, script: 0x5b, flags: 0x0}, + 1041: {region: 0x166, script: 0x5b, flags: 0x0}, + 1042: {region: 0x112, script: 0x5b, flags: 0x0}, + 1043: {region: 0x166, script: 0x5b, flags: 0x0}, + 1044: {region: 0xe9, script: 0x5, flags: 0x0}, + 1045: {region: 0x166, script: 0x5b, flags: 0x0}, + 1046: {region: 0x110, script: 0x5b, flags: 0x0}, + 1047: {region: 0x166, script: 0x5b, flags: 0x0}, + 1048: {region: 0xea, script: 0x5b, flags: 0x0}, + 1049: {region: 0x166, script: 0x5b, flags: 0x0}, + 1050: {region: 0x96, script: 0x5b, flags: 0x0}, + 1051: {region: 0x143, script: 0x5b, flags: 0x0}, + 1052: {region: 0x10d, script: 0x5b, flags: 0x0}, + 1054: {region: 0x10d, script: 0x5b, flags: 0x0}, + 1055: {region: 0x73, script: 0x5b, flags: 0x0}, + 1056: {region: 0x98, script: 0xcc, flags: 0x0}, + 1057: {region: 0x166, script: 0x5b, flags: 0x0}, + 1058: {region: 0x73, script: 0x5b, flags: 0x0}, + 1059: {region: 0x165, script: 0x5b, flags: 0x0}, + 1060: {region: 0x166, script: 0x5b, flags: 0x0}, + 1061: {region: 0xc4, script: 0x5b, flags: 0x0}, + 1062: {region: 0x166, script: 0x5b, flags: 0x0}, + 1063: {region: 0x166, script: 0x5b, flags: 0x0}, + 1064: {region: 0x166, script: 0x5b, flags: 0x0}, + 1065: {region: 0x116, script: 0x5b, flags: 0x0}, + 1066: {region: 0x166, script: 0x5b, flags: 0x0}, + 1067: {region: 0x166, script: 0x5b, flags: 0x0}, + 1068: {region: 0x124, script: 0xee, flags: 0x0}, + 1069: {region: 0x166, script: 0x5b, flags: 0x0}, + 1070: {region: 0x166, script: 0x5b, flags: 0x0}, + 1071: {region: 0x166, script: 0x5b, flags: 0x0}, + 1072: {region: 0x166, script: 0x5b, flags: 0x0}, + 1073: {region: 0x27, script: 0x5b, flags: 0x0}, + 1074: {region: 0x37, script: 0x5, flags: 0x1}, + 1075: {region: 0x9a, script: 0xd9, flags: 0x0}, + 1076: {region: 0x117, script: 0x5b, flags: 0x0}, + 1077: {region: 0x115, script: 0x5b, flags: 0x0}, + 1078: {region: 0x9a, script: 0x22, flags: 0x0}, + 1079: {region: 0x162, script: 0x5b, flags: 0x0}, + 1080: {region: 0x166, script: 0x5b, flags: 0x0}, + 1081: {region: 0x166, script: 0x5b, flags: 0x0}, + 1082: {region: 0x6e, script: 0x5b, flags: 0x0}, + 1083: {region: 0x162, script: 0x5b, flags: 0x0}, + 1084: {region: 0x166, script: 0x5b, flags: 0x0}, + 1085: {region: 0x61, script: 0x5b, flags: 0x0}, + 1086: {region: 0x96, script: 0x5b, flags: 0x0}, + 1087: {region: 0x166, script: 0x5b, flags: 0x0}, + 1088: {region: 0x166, script: 0x5b, flags: 0x0}, + 1089: {region: 0x130, script: 0x5b, flags: 0x0}, + 1090: {region: 0x166, script: 0x5b, flags: 0x0}, + 1091: {region: 0x85, script: 0x5b, flags: 0x0}, + 1092: {region: 0x10d, script: 0x5b, flags: 0x0}, + 1093: {region: 0x130, script: 0x5b, flags: 0x0}, + 1094: {region: 0x160, script: 0x5, flags: 0x0}, + 1095: {region: 0x4b, script: 0x5b, flags: 0x0}, + 1096: {region: 0x61, script: 0x5b, flags: 0x0}, + 1097: {region: 0x166, script: 0x5b, flags: 0x0}, + 1098: {region: 0x9a, script: 0x22, flags: 0x0}, + 1099: {region: 0x96, script: 0x5b, flags: 0x0}, + 1100: {region: 0x166, script: 0x5b, flags: 0x0}, + 1101: {region: 0x35, script: 0xe, flags: 0x0}, + 1102: {region: 0x9c, script: 0xde, flags: 0x0}, + 1103: {region: 0xea, script: 0x5b, flags: 0x0}, + 1104: {region: 0x9a, script: 0xe6, flags: 0x0}, + 1105: {region: 0xdc, script: 0x22, flags: 0x0}, + 1106: {region: 0x166, script: 0x5b, flags: 0x0}, + 1107: {region: 0x166, script: 0x5b, flags: 0x0}, + 1108: {region: 0x166, script: 0x5b, flags: 0x0}, + 1109: {region: 0x166, script: 0x5b, flags: 0x0}, + 1110: {region: 0x166, script: 0x5b, flags: 0x0}, + 1111: {region: 0x166, script: 0x5b, flags: 0x0}, + 1112: {region: 0x166, script: 0x5b, flags: 0x0}, + 1113: {region: 0x166, script: 0x5b, flags: 0x0}, + 1114: {region: 0xe8, script: 0x5b, flags: 0x0}, + 1115: {region: 0x166, script: 0x5b, flags: 0x0}, + 1116: {region: 0x166, script: 0x5b, flags: 0x0}, + 1117: {region: 0x9a, script: 0x53, flags: 0x0}, + 1118: {region: 0x53, script: 0xe4, flags: 0x0}, + 1119: {region: 0xdc, script: 0x22, flags: 0x0}, + 1120: {region: 0xdc, script: 0x22, flags: 0x0}, + 1121: {region: 0x9a, script: 0xe9, flags: 0x0}, + 1122: {region: 0x166, script: 0x5b, flags: 0x0}, + 1123: {region: 0x113, script: 0x5b, flags: 0x0}, + 1124: {region: 0x132, script: 0x5b, flags: 0x0}, + 1125: {region: 0x127, script: 0x5b, flags: 0x0}, + 1126: {region: 0x166, script: 0x5b, flags: 0x0}, + 1127: {region: 0x3c, script: 0x3, flags: 0x1}, + 1128: {region: 0x166, script: 0x5b, flags: 0x0}, + 1129: {region: 0x166, script: 0x5b, flags: 0x0}, + 1130: {region: 0x166, script: 0x5b, flags: 0x0}, + 1131: {region: 0x124, script: 0xee, flags: 0x0}, + 1132: {region: 0xdc, script: 0x22, flags: 0x0}, + 1133: {region: 0xdc, script: 0x22, flags: 0x0}, + 1134: {region: 0xdc, script: 0x22, flags: 0x0}, + 1135: {region: 0x70, script: 0x2c, flags: 0x0}, + 1136: {region: 0x166, script: 0x5b, flags: 0x0}, + 1137: {region: 0x6e, script: 0x2c, flags: 0x0}, + 1138: {region: 0x166, script: 0x5b, flags: 0x0}, + 1139: {region: 0x166, script: 0x5b, flags: 0x0}, + 1140: {region: 0x166, script: 0x5b, flags: 0x0}, + 1141: {region: 0xd7, script: 0x5b, flags: 0x0}, + 1142: {region: 0x128, script: 0x5b, flags: 0x0}, + 1143: {region: 0x126, script: 0x5b, flags: 0x0}, + 1144: {region: 0x32, script: 0x5b, flags: 0x0}, + 1145: {region: 0xdc, script: 0x22, flags: 0x0}, + 1146: {region: 0xe8, script: 0x5b, flags: 0x0}, + 1147: {region: 0x166, script: 0x5b, flags: 0x0}, + 1148: {region: 0x166, script: 0x5b, flags: 0x0}, + 1149: {region: 0x32, script: 0x5b, flags: 0x0}, + 1150: {region: 0xd5, script: 0x5b, flags: 0x0}, + 1151: {region: 0x166, script: 0x5b, flags: 0x0}, + 1152: {region: 0x162, script: 0x5b, flags: 0x0}, + 1153: {region: 0x166, script: 0x5b, flags: 0x0}, + 1154: {region: 0x12a, script: 0x5b, flags: 0x0}, + 1155: {region: 0x166, script: 0x5b, flags: 0x0}, + 1156: {region: 0xcf, script: 0x5b, flags: 0x0}, + 1157: {region: 0x166, script: 0x5b, flags: 0x0}, + 1158: {region: 0xe7, script: 0x5b, flags: 0x0}, + 1159: {region: 0x166, script: 0x5b, flags: 0x0}, + 1160: {region: 0x166, script: 0x5b, flags: 0x0}, + 1161: {region: 0x166, script: 0x5b, flags: 0x0}, + 1162: {region: 0x12c, script: 0x5b, flags: 0x0}, + 1163: {region: 0x12c, script: 0x5b, flags: 0x0}, + 1164: {region: 0x12f, script: 0x5b, flags: 0x0}, + 1165: {region: 0x166, script: 0x5, flags: 0x0}, + 1166: {region: 0x162, script: 0x5b, flags: 0x0}, + 1167: {region: 0x88, script: 0x34, flags: 0x0}, + 1168: {region: 0xdc, script: 0x22, flags: 0x0}, + 1169: {region: 0xe8, script: 0x5b, flags: 0x0}, + 1170: {region: 0x43, script: 0xef, flags: 0x0}, + 1171: {region: 0x166, script: 0x5b, flags: 0x0}, + 1172: {region: 0x107, script: 0x20, flags: 0x0}, + 1173: {region: 0x166, script: 0x5b, flags: 0x0}, + 1174: {region: 0x166, script: 0x5b, flags: 0x0}, + 1175: {region: 0x132, script: 0x5b, flags: 0x0}, + 1176: {region: 0x166, script: 0x5b, flags: 0x0}, + 1177: {region: 0x124, script: 0xee, flags: 0x0}, + 1178: {region: 0x32, script: 0x5b, flags: 0x0}, + 1179: {region: 0x166, script: 0x5b, flags: 0x0}, + 1180: {region: 0x166, script: 0x5b, flags: 0x0}, + 1181: {region: 0xcf, script: 0x5b, flags: 0x0}, + 1182: {region: 0x166, script: 0x5b, flags: 0x0}, + 1183: {region: 0x166, script: 0x5b, flags: 0x0}, + 1184: {region: 0x12e, script: 0x5b, flags: 0x0}, + 1185: {region: 0x166, script: 0x5b, flags: 0x0}, + 1187: {region: 0x166, script: 0x5b, flags: 0x0}, + 1188: {region: 0xd5, script: 0x5b, flags: 0x0}, + 1189: {region: 0x53, script: 0xe7, flags: 0x0}, + 1190: {region: 0xe6, script: 0x5b, flags: 0x0}, + 1191: {region: 0x166, script: 0x5b, flags: 0x0}, + 1192: {region: 0x107, script: 0x20, flags: 0x0}, + 1193: {region: 0xbb, script: 0x5b, flags: 0x0}, + 1194: {region: 0x166, script: 0x5b, flags: 0x0}, + 1195: {region: 0x107, script: 0x20, flags: 0x0}, + 1196: {region: 0x3f, script: 0x4, flags: 0x1}, + 1197: {region: 0x11d, script: 0xf3, flags: 0x0}, + 1198: {region: 0x131, script: 0x20, flags: 0x0}, + 1199: {region: 0x76, script: 0x5b, flags: 0x0}, + 1200: {region: 0x2a, script: 0x5b, flags: 0x0}, + 1202: {region: 0x43, script: 0x3, flags: 0x1}, + 1203: {region: 0x9a, script: 0xe, flags: 0x0}, + 1204: {region: 0xe9, script: 0x5, flags: 0x0}, + 1205: {region: 0x166, script: 0x5b, flags: 0x0}, + 1206: {region: 0x166, script: 0x5b, flags: 0x0}, + 1207: {region: 0x166, script: 0x5b, flags: 0x0}, + 1208: {region: 0x166, script: 0x5b, flags: 0x0}, + 1209: {region: 0x166, script: 0x5b, flags: 0x0}, + 1210: {region: 0x166, script: 0x5b, flags: 0x0}, + 1211: {region: 0x166, script: 0x5b, flags: 0x0}, + 1212: {region: 0x46, script: 0x4, flags: 0x1}, + 1213: {region: 0x166, script: 0x5b, flags: 0x0}, + 1214: {region: 0xb5, script: 0xf4, flags: 0x0}, + 1215: {region: 0x166, script: 0x5b, flags: 0x0}, + 1216: {region: 0x162, script: 0x5b, flags: 0x0}, + 1217: {region: 0x9f, script: 0x5b, flags: 0x0}, + 1218: {region: 0x107, script: 0x5b, flags: 0x0}, + 1219: {region: 0x13f, script: 0x5b, flags: 0x0}, + 1220: {region: 0x11c, script: 0x5b, flags: 0x0}, + 1221: {region: 0x166, script: 0x5b, flags: 0x0}, + 1222: {region: 0x36, script: 0x5b, flags: 0x0}, + 1223: {region: 0x61, script: 0x5b, flags: 0x0}, + 1224: {region: 0xd2, script: 0x5b, flags: 0x0}, + 1225: {region: 0x1, script: 0x5b, flags: 0x0}, + 1226: {region: 0x107, script: 0x5b, flags: 0x0}, + 1227: {region: 0x6b, script: 0x5b, flags: 0x0}, + 1228: {region: 0x130, script: 0x5b, flags: 0x0}, + 1229: {region: 0x166, script: 0x5b, flags: 0x0}, + 1230: {region: 0x36, script: 0x5b, flags: 0x0}, + 1231: {region: 0x4e, script: 0x5b, flags: 0x0}, + 1232: {region: 0x166, script: 0x5b, flags: 0x0}, + 1233: {region: 0x70, script: 0x2c, flags: 0x0}, + 1234: {region: 0x166, script: 0x5b, flags: 0x0}, + 1235: {region: 0xe8, script: 0x5b, flags: 0x0}, + 1236: {region: 0x2f, script: 0x5b, flags: 0x0}, + 1237: {region: 0x9a, script: 0xe9, flags: 0x0}, + 1238: {region: 0x9a, script: 0x22, flags: 0x0}, + 1239: {region: 0x166, script: 0x5b, flags: 0x0}, + 1240: {region: 0x166, script: 0x5b, flags: 0x0}, + 1241: {region: 0x166, script: 0x5b, flags: 0x0}, + 1242: {region: 0x166, script: 0x5b, flags: 0x0}, + 1243: {region: 0x166, script: 0x5b, flags: 0x0}, + 1244: {region: 0x166, script: 0x5b, flags: 0x0}, + 1245: {region: 0x166, script: 0x5b, flags: 0x0}, + 1246: {region: 0x166, script: 0x5b, flags: 0x0}, + 1247: {region: 0x166, script: 0x5b, flags: 0x0}, + 1248: {region: 0x141, script: 0x5b, flags: 0x0}, + 1249: {region: 0x166, script: 0x5b, flags: 0x0}, + 1250: {region: 0x166, script: 0x5b, flags: 0x0}, + 1251: {region: 0xa9, script: 0x5, flags: 0x0}, + 1252: {region: 0x166, script: 0x5b, flags: 0x0}, + 1253: {region: 0x115, script: 0x5b, flags: 0x0}, + 1254: {region: 0x166, script: 0x5b, flags: 0x0}, + 1255: {region: 0x166, script: 0x5b, flags: 0x0}, + 1256: {region: 0x166, script: 0x5b, flags: 0x0}, + 1257: {region: 0x166, script: 0x5b, flags: 0x0}, + 1258: {region: 0x9a, script: 0x22, flags: 0x0}, + 1259: {region: 0x53, script: 0x3b, flags: 0x0}, + 1260: {region: 0x166, script: 0x5b, flags: 0x0}, + 1261: {region: 0x166, script: 0x5b, flags: 0x0}, + 1262: {region: 0x41, script: 0x5b, flags: 0x0}, + 1263: {region: 0x166, script: 0x5b, flags: 0x0}, + 1264: {region: 0x12c, script: 0x18, flags: 0x0}, + 1265: {region: 0x166, script: 0x5b, flags: 0x0}, + 1266: {region: 0x162, script: 0x5b, flags: 0x0}, + 1267: {region: 0x166, script: 0x5b, flags: 0x0}, + 1268: {region: 0x12c, script: 0x63, flags: 0x0}, + 1269: {region: 0x12c, script: 0x64, flags: 0x0}, + 1270: {region: 0x7e, script: 0x2e, flags: 0x0}, + 1271: {region: 0x53, script: 0x68, flags: 0x0}, + 1272: {region: 0x10c, script: 0x6d, flags: 0x0}, + 1273: {region: 0x109, script: 0x79, flags: 0x0}, + 1274: {region: 0x9a, script: 0x22, flags: 0x0}, + 1275: {region: 0x132, script: 0x5b, flags: 0x0}, + 1276: {region: 0x166, script: 0x5b, flags: 0x0}, + 1277: {region: 0x9d, script: 0x93, flags: 0x0}, + 1278: {region: 0x166, script: 0x5b, flags: 0x0}, + 1279: {region: 0x15f, script: 0xce, flags: 0x0}, + 1280: {region: 0x166, script: 0x5b, flags: 0x0}, + 1281: {region: 0x166, script: 0x5b, flags: 0x0}, + 1282: {region: 0xdc, script: 0x22, flags: 0x0}, + 1283: {region: 0x166, script: 0x5b, flags: 0x0}, + 1284: {region: 0x166, script: 0x5b, flags: 0x0}, + 1285: {region: 0xd2, script: 0x5b, flags: 0x0}, + 1286: {region: 0x76, script: 0x5b, flags: 0x0}, + 1287: {region: 0x166, script: 0x5b, flags: 0x0}, + 1288: {region: 0x166, script: 0x5b, flags: 0x0}, + 1289: {region: 0x52, script: 0x5b, flags: 0x0}, + 1290: {region: 0x166, script: 0x5b, flags: 0x0}, + 1291: {region: 0x166, script: 0x5b, flags: 0x0}, + 1292: {region: 0x166, script: 0x5b, flags: 0x0}, + 1293: {region: 0x52, script: 0x5b, flags: 0x0}, + 1294: {region: 0x166, script: 0x5b, flags: 0x0}, + 1295: {region: 0x166, script: 0x5b, flags: 0x0}, + 1296: {region: 0x166, script: 0x5b, flags: 0x0}, + 1297: {region: 0x166, script: 0x5b, flags: 0x0}, + 1298: {region: 0x1, script: 0x3e, flags: 0x0}, + 1299: {region: 0x166, script: 0x5b, flags: 0x0}, + 1300: {region: 0x166, script: 0x5b, flags: 0x0}, + 1301: {region: 0x166, script: 0x5b, flags: 0x0}, + 1302: {region: 0x166, script: 0x5b, flags: 0x0}, + 1303: {region: 0x166, script: 0x5b, flags: 0x0}, + 1304: {region: 0xd7, script: 0x5b, flags: 0x0}, + 1305: {region: 0x166, script: 0x5b, flags: 0x0}, + 1306: {region: 0x166, script: 0x5b, flags: 0x0}, + 1307: {region: 0x166, script: 0x5b, flags: 0x0}, + 1308: {region: 0x41, script: 0x5b, flags: 0x0}, + 1309: {region: 0x166, script: 0x5b, flags: 0x0}, + 1310: {region: 0xd0, script: 0x5b, flags: 0x0}, + 1311: {region: 0x4a, script: 0x3, flags: 0x1}, + 1312: {region: 0x166, script: 0x5b, flags: 0x0}, + 1313: {region: 0x166, script: 0x5b, flags: 0x0}, + 1314: {region: 0x166, script: 0x5b, flags: 0x0}, + 1315: {region: 0x53, script: 0x5b, flags: 0x0}, + 1316: {region: 0x10c, script: 0x5b, flags: 0x0}, + 1318: {region: 0xa9, script: 0x5, flags: 0x0}, + 1319: {region: 0xda, script: 0x5b, flags: 0x0}, + 1320: {region: 0xbb, script: 0xeb, flags: 0x0}, + 1321: {region: 0x4d, script: 0x14, flags: 0x1}, + 1322: {region: 0x53, script: 0x7f, flags: 0x0}, + 1323: {region: 0x166, script: 0x5b, flags: 0x0}, + 1324: {region: 0x123, script: 0x5b, flags: 0x0}, + 1325: {region: 0xd1, script: 0x5b, flags: 0x0}, + 1326: {region: 0x166, script: 0x5b, flags: 0x0}, + 1327: {region: 0x162, script: 0x5b, flags: 0x0}, + 1329: {region: 0x12c, script: 0x5b, flags: 0x0}, +} + +// likelyLangList holds lists info associated with likelyLang. +// Size: 582 bytes, 97 elements +var likelyLangList = [97]likelyScriptRegion{ + 0: {region: 0x9d, script: 0x7, flags: 0x0}, + 1: {region: 0xa2, script: 0x7a, flags: 0x2}, + 2: {region: 0x11d, script: 0x87, flags: 0x2}, + 3: {region: 0x32, script: 0x5b, flags: 0x0}, + 4: {region: 0x9c, script: 0x5, flags: 0x4}, + 5: {region: 0x9d, script: 0x5, flags: 0x4}, + 6: {region: 0x107, script: 0x20, flags: 0x4}, + 7: {region: 0x9d, script: 0x5, flags: 0x2}, + 8: {region: 0x107, script: 0x20, flags: 0x0}, + 9: {region: 0x38, script: 0x2f, flags: 0x2}, + 10: {region: 0x136, script: 0x5b, flags: 0x0}, + 11: {region: 0x7c, script: 0xd1, flags: 0x2}, + 12: {region: 0x115, script: 0x5b, flags: 0x0}, + 13: {region: 0x85, script: 0x1, flags: 0x2}, + 14: {region: 0x5e, script: 0x1f, flags: 0x0}, + 15: {region: 0x88, script: 0x60, flags: 0x2}, + 16: {region: 0xd7, script: 0x5b, flags: 0x0}, + 17: {region: 0x52, script: 0x5, flags: 0x4}, + 18: {region: 0x10c, script: 0x5, flags: 0x4}, + 19: {region: 0xaf, script: 0x20, flags: 0x0}, + 20: {region: 0x24, script: 0x5, flags: 0x4}, + 21: {region: 0x53, script: 0x5, flags: 0x4}, + 22: {region: 0x9d, script: 0x5, flags: 0x4}, + 23: {region: 0xc6, script: 0x5, flags: 0x4}, + 24: {region: 0x53, script: 0x5, flags: 0x2}, + 25: {region: 0x12c, script: 0x5b, flags: 0x0}, + 26: {region: 0xb1, script: 0x5, flags: 0x4}, + 27: {region: 0x9c, script: 0x5, flags: 0x2}, + 28: {region: 0xa6, script: 0x20, flags: 0x0}, + 29: {region: 0x53, script: 0x5, flags: 0x4}, + 30: {region: 0x12c, script: 0x5b, flags: 0x4}, + 31: {region: 0x53, script: 0x5, flags: 0x2}, + 32: {region: 0x12c, script: 0x5b, flags: 0x2}, + 33: {region: 0xdc, script: 0x22, flags: 0x0}, + 34: {region: 0x9a, script: 0x5e, flags: 0x2}, + 35: {region: 0x84, script: 0x5b, flags: 0x0}, + 36: {region: 0x85, script: 0x7e, flags: 0x4}, + 37: {region: 0x85, script: 0x7e, flags: 0x2}, + 38: {region: 0xc6, script: 0x20, flags: 0x0}, + 39: {region: 0x53, script: 0x71, flags: 0x4}, + 40: {region: 0x53, script: 0x71, flags: 0x2}, + 41: {region: 0xd1, script: 0x5b, flags: 0x0}, + 42: {region: 0x4a, script: 0x5, flags: 0x4}, + 43: {region: 0x96, script: 0x5, flags: 0x4}, + 44: {region: 0x9a, script: 0x36, flags: 0x0}, + 45: {region: 0xe9, script: 0x5, flags: 0x4}, + 46: {region: 0xe9, script: 0x5, flags: 0x2}, + 47: {region: 0x9d, script: 0x8d, flags: 0x0}, + 48: {region: 0x53, script: 0x8e, flags: 0x2}, + 49: {region: 0xbb, script: 0xeb, flags: 0x0}, + 50: {region: 0xda, script: 0x5b, flags: 0x4}, + 51: {region: 0xe9, script: 0x5, flags: 0x0}, + 52: {region: 0x9a, script: 0x22, flags: 0x2}, + 53: {region: 0x9a, script: 0x50, flags: 0x2}, + 54: {region: 0x9a, script: 0xd5, flags: 0x2}, + 55: {region: 0x106, script: 0x20, flags: 0x0}, + 56: {region: 0xbe, script: 0x5b, flags: 0x4}, + 57: {region: 0x105, script: 0x5b, flags: 0x4}, + 58: {region: 0x107, script: 0x5b, flags: 0x4}, + 59: {region: 0x12c, script: 0x5b, flags: 0x4}, + 60: {region: 0x125, script: 0x20, flags: 0x0}, + 61: {region: 0xe9, script: 0x5, flags: 0x4}, + 62: {region: 0xe9, script: 0x5, flags: 0x2}, + 63: {region: 0x53, script: 0x5, flags: 0x0}, + 64: {region: 0xaf, script: 0x20, flags: 0x4}, + 65: {region: 0xc6, script: 0x20, flags: 0x4}, + 66: {region: 0xaf, script: 0x20, flags: 0x2}, + 67: {region: 0x9a, script: 0xe, flags: 0x0}, + 68: {region: 0xdc, script: 0x22, flags: 0x4}, + 69: {region: 0xdc, script: 0x22, flags: 0x2}, + 70: {region: 0x138, script: 0x5b, flags: 0x0}, + 71: {region: 0x24, script: 0x5, flags: 0x4}, + 72: {region: 0x53, script: 0x20, flags: 0x4}, + 73: {region: 0x24, script: 0x5, flags: 0x2}, + 74: {region: 0x8e, script: 0x3c, flags: 0x0}, + 75: {region: 0x53, script: 0x3b, flags: 0x4}, + 76: {region: 0x53, script: 0x3b, flags: 0x2}, + 77: {region: 0x53, script: 0x3b, flags: 0x0}, + 78: {region: 0x2f, script: 0x3c, flags: 0x4}, + 79: {region: 0x3e, script: 0x3c, flags: 0x4}, + 80: {region: 0x7c, script: 0x3c, flags: 0x4}, + 81: {region: 0x7f, script: 0x3c, flags: 0x4}, + 82: {region: 0x8e, script: 0x3c, flags: 0x4}, + 83: {region: 0x96, script: 0x3c, flags: 0x4}, + 84: {region: 0xc7, script: 0x3c, flags: 0x4}, + 85: {region: 0xd1, script: 0x3c, flags: 0x4}, + 86: {region: 0xe3, script: 0x3c, flags: 0x4}, + 87: {region: 0xe6, script: 0x3c, flags: 0x4}, + 88: {region: 0xe8, script: 0x3c, flags: 0x4}, + 89: {region: 0x117, script: 0x3c, flags: 0x4}, + 90: {region: 0x124, script: 0x3c, flags: 0x4}, + 91: {region: 0x12f, script: 0x3c, flags: 0x4}, + 92: {region: 0x136, script: 0x3c, flags: 0x4}, + 93: {region: 0x13f, script: 0x3c, flags: 0x4}, + 94: {region: 0x12f, script: 0x11, flags: 0x2}, + 95: {region: 0x12f, script: 0x37, flags: 0x2}, + 96: {region: 0x12f, script: 0x3c, flags: 0x2}, +} + +type likelyLangScript struct { + lang uint16 + script uint16 + flags uint8 +} + +// likelyRegion is a lookup table, indexed by regionID, for the most likely +// languages and scripts given incomplete information. If more entries exist +// for a given regionID, lang and script are the index and size respectively +// of the list in likelyRegionList. +// TODO: exclude containers and user-definable regions from the list. +// Size: 2154 bytes, 359 elements +var likelyRegion = [359]likelyLangScript{ + 34: {lang: 0xd7, script: 0x5b, flags: 0x0}, + 35: {lang: 0x3a, script: 0x5, flags: 0x0}, + 36: {lang: 0x0, script: 0x2, flags: 0x1}, + 39: {lang: 0x2, script: 0x2, flags: 0x1}, + 40: {lang: 0x4, script: 0x2, flags: 0x1}, + 42: {lang: 0x3c0, script: 0x5b, flags: 0x0}, + 43: {lang: 0x0, script: 0x5b, flags: 0x0}, + 44: {lang: 0x13e, script: 0x5b, flags: 0x0}, + 45: {lang: 0x41b, script: 0x5b, flags: 0x0}, + 46: {lang: 0x10d, script: 0x5b, flags: 0x0}, + 48: {lang: 0x367, script: 0x5b, flags: 0x0}, + 49: {lang: 0x444, script: 0x5b, flags: 0x0}, + 50: {lang: 0x58, script: 0x5b, flags: 0x0}, + 51: {lang: 0x6, script: 0x2, flags: 0x1}, + 53: {lang: 0xa5, script: 0xe, flags: 0x0}, + 54: {lang: 0x367, script: 0x5b, flags: 0x0}, + 55: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 56: {lang: 0x7e, script: 0x20, flags: 0x0}, + 57: {lang: 0x3a, script: 0x5, flags: 0x0}, + 58: {lang: 0x3d9, script: 0x5b, flags: 0x0}, + 59: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 60: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 62: {lang: 0x31f, script: 0x5b, flags: 0x0}, + 63: {lang: 0x13e, script: 0x5b, flags: 0x0}, + 64: {lang: 0x3a1, script: 0x5b, flags: 0x0}, + 65: {lang: 0x3c0, script: 0x5b, flags: 0x0}, + 67: {lang: 0x8, script: 0x2, flags: 0x1}, + 69: {lang: 0x0, script: 0x5b, flags: 0x0}, + 71: {lang: 0x71, script: 0x20, flags: 0x0}, + 73: {lang: 0x512, script: 0x3e, flags: 0x2}, + 74: {lang: 0x31f, script: 0x5, flags: 0x2}, + 75: {lang: 0x445, script: 0x5b, flags: 0x0}, + 76: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 77: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 78: {lang: 0x10d, script: 0x5b, flags: 0x0}, + 79: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 81: {lang: 0x13e, script: 0x5b, flags: 0x0}, + 82: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 83: {lang: 0xa, script: 0x4, flags: 0x1}, + 84: {lang: 0x13e, script: 0x5b, flags: 0x0}, + 85: {lang: 0x0, script: 0x5b, flags: 0x0}, + 87: {lang: 0x13e, script: 0x5b, flags: 0x0}, + 90: {lang: 0x13e, script: 0x5b, flags: 0x0}, + 91: {lang: 0x3c0, script: 0x5b, flags: 0x0}, + 92: {lang: 0x3a1, script: 0x5b, flags: 0x0}, + 94: {lang: 0xe, script: 0x2, flags: 0x1}, + 95: {lang: 0xfa, script: 0x5b, flags: 0x0}, + 97: {lang: 0x10d, script: 0x5b, flags: 0x0}, + 99: {lang: 0x1, script: 0x5b, flags: 0x0}, + 100: {lang: 0x101, script: 0x5b, flags: 0x0}, + 102: {lang: 0x13e, script: 0x5b, flags: 0x0}, + 104: {lang: 0x10, script: 0x2, flags: 0x1}, + 105: {lang: 0x13e, script: 0x5b, flags: 0x0}, + 106: {lang: 0x13e, script: 0x5b, flags: 0x0}, + 107: {lang: 0x140, script: 0x5b, flags: 0x0}, + 108: {lang: 0x3a, script: 0x5, flags: 0x0}, + 109: {lang: 0x3a, script: 0x5, flags: 0x0}, + 110: {lang: 0x46f, script: 0x2c, flags: 0x0}, + 111: {lang: 0x13e, script: 0x5b, flags: 0x0}, + 112: {lang: 0x12, script: 0x2, flags: 0x1}, + 114: {lang: 0x10d, script: 0x5b, flags: 0x0}, + 115: {lang: 0x151, script: 0x5b, flags: 0x0}, + 116: {lang: 0x1c0, script: 0x22, flags: 0x2}, + 119: {lang: 0x158, script: 0x5b, flags: 0x0}, + 121: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 123: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 124: {lang: 0x14, script: 0x2, flags: 0x1}, + 126: {lang: 0x16, script: 0x3, flags: 0x1}, + 127: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 129: {lang: 0x21, script: 0x5b, flags: 0x0}, + 131: {lang: 0x245, script: 0x5b, flags: 0x0}, + 133: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 134: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 135: {lang: 0x13e, script: 0x5b, flags: 0x0}, + 136: {lang: 0x19, script: 0x2, flags: 0x1}, + 137: {lang: 0x0, script: 0x5b, flags: 0x0}, + 138: {lang: 0x13e, script: 0x5b, flags: 0x0}, + 140: {lang: 0x3c0, script: 0x5b, flags: 0x0}, + 142: {lang: 0x529, script: 0x3c, flags: 0x0}, + 143: {lang: 0x0, script: 0x5b, flags: 0x0}, + 144: {lang: 0x13e, script: 0x5b, flags: 0x0}, + 145: {lang: 0x1d1, script: 0x5b, flags: 0x0}, + 146: {lang: 0x1d4, script: 0x5b, flags: 0x0}, + 147: {lang: 0x1d5, script: 0x5b, flags: 0x0}, + 149: {lang: 0x13e, script: 0x5b, flags: 0x0}, + 150: {lang: 0x1b, script: 0x2, flags: 0x1}, + 152: {lang: 0x1bc, script: 0x3e, flags: 0x0}, + 154: {lang: 0x1d, script: 0x3, flags: 0x1}, + 156: {lang: 0x3a, script: 0x5, flags: 0x0}, + 157: {lang: 0x20, script: 0x2, flags: 0x1}, + 158: {lang: 0x1f8, script: 0x5b, flags: 0x0}, + 159: {lang: 0x1f9, script: 0x5b, flags: 0x0}, + 162: {lang: 0x3a, script: 0x5, flags: 0x0}, + 163: {lang: 0x200, script: 0x49, flags: 0x0}, + 165: {lang: 0x445, script: 0x5b, flags: 0x0}, + 166: {lang: 0x28a, script: 0x20, flags: 0x0}, + 167: {lang: 0x22, script: 0x3, flags: 0x1}, + 169: {lang: 0x25, script: 0x2, flags: 0x1}, + 171: {lang: 0x254, script: 0x54, flags: 0x0}, + 172: {lang: 0x254, script: 0x54, flags: 0x0}, + 173: {lang: 0x3a, script: 0x5, flags: 0x0}, + 175: {lang: 0x3e2, script: 0x20, flags: 0x0}, + 176: {lang: 0x27, script: 0x2, flags: 0x1}, + 177: {lang: 0x3a, script: 0x5, flags: 0x0}, + 179: {lang: 0x10d, script: 0x5b, flags: 0x0}, + 180: {lang: 0x40c, script: 0xd6, flags: 0x0}, + 182: {lang: 0x43b, script: 0x5b, flags: 0x0}, + 183: {lang: 0x2c0, script: 0x5b, flags: 0x0}, + 184: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 185: {lang: 0x2c7, script: 0x5b, flags: 0x0}, + 186: {lang: 0x3a, script: 0x5, flags: 0x0}, + 187: {lang: 0x29, script: 0x2, flags: 0x1}, + 188: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 189: {lang: 0x2b, script: 0x2, flags: 0x1}, + 190: {lang: 0x432, script: 0x5b, flags: 0x0}, + 191: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 192: {lang: 0x2f1, script: 0x5b, flags: 0x0}, + 195: {lang: 0x2d, script: 0x2, flags: 0x1}, + 196: {lang: 0xa0, script: 0x5b, flags: 0x0}, + 197: {lang: 0x2f, script: 0x2, flags: 0x1}, + 198: {lang: 0x31, script: 0x2, flags: 0x1}, + 199: {lang: 0x33, script: 0x2, flags: 0x1}, + 201: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 202: {lang: 0x35, script: 0x2, flags: 0x1}, + 204: {lang: 0x320, script: 0x5b, flags: 0x0}, + 205: {lang: 0x37, script: 0x3, flags: 0x1}, + 206: {lang: 0x128, script: 0xed, flags: 0x0}, + 208: {lang: 0x13e, script: 0x5b, flags: 0x0}, + 209: {lang: 0x31f, script: 0x5b, flags: 0x0}, + 210: {lang: 0x3c0, script: 0x5b, flags: 0x0}, + 211: {lang: 0x16, script: 0x5b, flags: 0x0}, + 212: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 213: {lang: 0x1b4, script: 0x5b, flags: 0x0}, + 215: {lang: 0x1b4, script: 0x5, flags: 0x2}, + 217: {lang: 0x13e, script: 0x5b, flags: 0x0}, + 218: {lang: 0x367, script: 0x5b, flags: 0x0}, + 219: {lang: 0x347, script: 0x5b, flags: 0x0}, + 220: {lang: 0x351, script: 0x22, flags: 0x0}, + 226: {lang: 0x3a, script: 0x5, flags: 0x0}, + 227: {lang: 0x13e, script: 0x5b, flags: 0x0}, + 229: {lang: 0x13e, script: 0x5b, flags: 0x0}, + 230: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 231: {lang: 0x486, script: 0x5b, flags: 0x0}, + 232: {lang: 0x153, script: 0x5b, flags: 0x0}, + 233: {lang: 0x3a, script: 0x3, flags: 0x1}, + 234: {lang: 0x3b3, script: 0x5b, flags: 0x0}, + 235: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 237: {lang: 0x13e, script: 0x5b, flags: 0x0}, + 238: {lang: 0x3a, script: 0x5, flags: 0x0}, + 239: {lang: 0x3c0, script: 0x5b, flags: 0x0}, + 241: {lang: 0x3a2, script: 0x5b, flags: 0x0}, + 242: {lang: 0x194, script: 0x5b, flags: 0x0}, + 244: {lang: 0x3a, script: 0x5, flags: 0x0}, + 259: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 261: {lang: 0x3d, script: 0x2, flags: 0x1}, + 262: {lang: 0x432, script: 0x20, flags: 0x0}, + 263: {lang: 0x3f, script: 0x2, flags: 0x1}, + 264: {lang: 0x3e5, script: 0x5b, flags: 0x0}, + 265: {lang: 0x3a, script: 0x5, flags: 0x0}, + 267: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 268: {lang: 0x3a, script: 0x5, flags: 0x0}, + 269: {lang: 0x41, script: 0x2, flags: 0x1}, + 272: {lang: 0x416, script: 0x5b, flags: 0x0}, + 273: {lang: 0x347, script: 0x5b, flags: 0x0}, + 274: {lang: 0x43, script: 0x2, flags: 0x1}, + 276: {lang: 0x1f9, script: 0x5b, flags: 0x0}, + 277: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 278: {lang: 0x429, script: 0x5b, flags: 0x0}, + 279: {lang: 0x367, script: 0x5b, flags: 0x0}, + 281: {lang: 0x3c0, script: 0x5b, flags: 0x0}, + 283: {lang: 0x13e, script: 0x5b, flags: 0x0}, + 285: {lang: 0x45, script: 0x2, flags: 0x1}, + 289: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 290: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 291: {lang: 0x47, script: 0x2, flags: 0x1}, + 292: {lang: 0x49, script: 0x3, flags: 0x1}, + 293: {lang: 0x4c, script: 0x2, flags: 0x1}, + 294: {lang: 0x477, script: 0x5b, flags: 0x0}, + 295: {lang: 0x3c0, script: 0x5b, flags: 0x0}, + 296: {lang: 0x476, script: 0x5b, flags: 0x0}, + 297: {lang: 0x4e, script: 0x2, flags: 0x1}, + 298: {lang: 0x482, script: 0x5b, flags: 0x0}, + 300: {lang: 0x50, script: 0x4, flags: 0x1}, + 302: {lang: 0x4a0, script: 0x5b, flags: 0x0}, + 303: {lang: 0x54, script: 0x2, flags: 0x1}, + 304: {lang: 0x445, script: 0x5b, flags: 0x0}, + 305: {lang: 0x56, script: 0x3, flags: 0x1}, + 306: {lang: 0x445, script: 0x5b, flags: 0x0}, + 310: {lang: 0x512, script: 0x3e, flags: 0x2}, + 311: {lang: 0x13e, script: 0x5b, flags: 0x0}, + 312: {lang: 0x4bc, script: 0x5b, flags: 0x0}, + 313: {lang: 0x1f9, script: 0x5b, flags: 0x0}, + 316: {lang: 0x13e, script: 0x5b, flags: 0x0}, + 319: {lang: 0x4c3, script: 0x5b, flags: 0x0}, + 320: {lang: 0x8a, script: 0x5b, flags: 0x0}, + 321: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 323: {lang: 0x41b, script: 0x5b, flags: 0x0}, + 334: {lang: 0x59, script: 0x2, flags: 0x1}, + 351: {lang: 0x3a, script: 0x5, flags: 0x0}, + 352: {lang: 0x5b, script: 0x2, flags: 0x1}, + 357: {lang: 0x423, script: 0x5b, flags: 0x0}, +} + +// likelyRegionList holds lists info associated with likelyRegion. +// Size: 558 bytes, 93 elements +var likelyRegionList = [93]likelyLangScript{ + 0: {lang: 0x148, script: 0x5, flags: 0x0}, + 1: {lang: 0x476, script: 0x5b, flags: 0x0}, + 2: {lang: 0x431, script: 0x5b, flags: 0x0}, + 3: {lang: 0x2ff, script: 0x20, flags: 0x0}, + 4: {lang: 0x1d7, script: 0x8, flags: 0x0}, + 5: {lang: 0x274, script: 0x5b, flags: 0x0}, + 6: {lang: 0xb7, script: 0x5b, flags: 0x0}, + 7: {lang: 0x432, script: 0x20, flags: 0x0}, + 8: {lang: 0x12d, script: 0xef, flags: 0x0}, + 9: {lang: 0x351, script: 0x22, flags: 0x0}, + 10: {lang: 0x529, script: 0x3b, flags: 0x0}, + 11: {lang: 0x4ac, script: 0x5, flags: 0x0}, + 12: {lang: 0x523, script: 0x5b, flags: 0x0}, + 13: {lang: 0x29a, script: 0xee, flags: 0x0}, + 14: {lang: 0x136, script: 0x34, flags: 0x0}, + 15: {lang: 0x48a, script: 0x5b, flags: 0x0}, + 16: {lang: 0x3a, script: 0x5, flags: 0x0}, + 17: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 18: {lang: 0x27, script: 0x2c, flags: 0x0}, + 19: {lang: 0x139, script: 0x5b, flags: 0x0}, + 20: {lang: 0x26a, script: 0x5, flags: 0x2}, + 21: {lang: 0x512, script: 0x3e, flags: 0x2}, + 22: {lang: 0x210, script: 0x2e, flags: 0x0}, + 23: {lang: 0x5, script: 0x20, flags: 0x0}, + 24: {lang: 0x274, script: 0x5b, flags: 0x0}, + 25: {lang: 0x136, script: 0x34, flags: 0x0}, + 26: {lang: 0x2ff, script: 0x20, flags: 0x0}, + 27: {lang: 0x1e1, script: 0x5b, flags: 0x0}, + 28: {lang: 0x31f, script: 0x5, flags: 0x0}, + 29: {lang: 0x1be, script: 0x22, flags: 0x0}, + 30: {lang: 0x4b4, script: 0x5, flags: 0x0}, + 31: {lang: 0x236, script: 0x76, flags: 0x0}, + 32: {lang: 0x148, script: 0x5, flags: 0x0}, + 33: {lang: 0x476, script: 0x5b, flags: 0x0}, + 34: {lang: 0x24a, script: 0x4f, flags: 0x0}, + 35: {lang: 0xe6, script: 0x5, flags: 0x0}, + 36: {lang: 0x226, script: 0xee, flags: 0x0}, + 37: {lang: 0x3a, script: 0x5, flags: 0x0}, + 38: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 39: {lang: 0x2b8, script: 0x58, flags: 0x0}, + 40: {lang: 0x226, script: 0xee, flags: 0x0}, + 41: {lang: 0x3a, script: 0x5, flags: 0x0}, + 42: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 43: {lang: 0x3dc, script: 0x5b, flags: 0x0}, + 44: {lang: 0x4ae, script: 0x20, flags: 0x0}, + 45: {lang: 0x2ff, script: 0x20, flags: 0x0}, + 46: {lang: 0x431, script: 0x5b, flags: 0x0}, + 47: {lang: 0x331, script: 0x76, flags: 0x0}, + 48: {lang: 0x213, script: 0x5b, flags: 0x0}, + 49: {lang: 0x30b, script: 0x20, flags: 0x0}, + 50: {lang: 0x242, script: 0x5, flags: 0x0}, + 51: {lang: 0x529, script: 0x3c, flags: 0x0}, + 52: {lang: 0x3c0, script: 0x5b, flags: 0x0}, + 53: {lang: 0x3a, script: 0x5, flags: 0x0}, + 54: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 55: {lang: 0x2ed, script: 0x5b, flags: 0x0}, + 56: {lang: 0x4b4, script: 0x5, flags: 0x0}, + 57: {lang: 0x88, script: 0x22, flags: 0x0}, + 58: {lang: 0x4b4, script: 0x5, flags: 0x0}, + 59: {lang: 0x4b4, script: 0x5, flags: 0x0}, + 60: {lang: 0xbe, script: 0x22, flags: 0x0}, + 61: {lang: 0x3dc, script: 0x5b, flags: 0x0}, + 62: {lang: 0x7e, script: 0x20, flags: 0x0}, + 63: {lang: 0x3e2, script: 0x20, flags: 0x0}, + 64: {lang: 0x267, script: 0x5b, flags: 0x0}, + 65: {lang: 0x444, script: 0x5b, flags: 0x0}, + 66: {lang: 0x512, script: 0x3e, flags: 0x0}, + 67: {lang: 0x412, script: 0x5b, flags: 0x0}, + 68: {lang: 0x4ae, script: 0x20, flags: 0x0}, + 69: {lang: 0x3a, script: 0x5, flags: 0x0}, + 70: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 71: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 72: {lang: 0x35, script: 0x5, flags: 0x0}, + 73: {lang: 0x46b, script: 0xee, flags: 0x0}, + 74: {lang: 0x2ec, script: 0x5, flags: 0x0}, + 75: {lang: 0x30f, script: 0x76, flags: 0x0}, + 76: {lang: 0x467, script: 0x20, flags: 0x0}, + 77: {lang: 0x148, script: 0x5, flags: 0x0}, + 78: {lang: 0x3a, script: 0x5, flags: 0x0}, + 79: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 80: {lang: 0x48a, script: 0x5b, flags: 0x0}, + 81: {lang: 0x58, script: 0x5, flags: 0x0}, + 82: {lang: 0x219, script: 0x20, flags: 0x0}, + 83: {lang: 0x81, script: 0x34, flags: 0x0}, + 84: {lang: 0x529, script: 0x3c, flags: 0x0}, + 85: {lang: 0x48c, script: 0x5b, flags: 0x0}, + 86: {lang: 0x4ae, script: 0x20, flags: 0x0}, + 87: {lang: 0x512, script: 0x3e, flags: 0x0}, + 88: {lang: 0x3b3, script: 0x5b, flags: 0x0}, + 89: {lang: 0x431, script: 0x5b, flags: 0x0}, + 90: {lang: 0x432, script: 0x20, flags: 0x0}, + 91: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 92: {lang: 0x446, script: 0x5, flags: 0x0}, +} + +type likelyTag struct { + lang uint16 + region uint16 + script uint16 +} + +// Size: 198 bytes, 33 elements +var likelyRegionGroup = [33]likelyTag{ + 1: {lang: 0x139, region: 0xd7, script: 0x5b}, + 2: {lang: 0x139, region: 0x136, script: 0x5b}, + 3: {lang: 0x3c0, region: 0x41, script: 0x5b}, + 4: {lang: 0x139, region: 0x2f, script: 0x5b}, + 5: {lang: 0x139, region: 0xd7, script: 0x5b}, + 6: {lang: 0x13e, region: 0xd0, script: 0x5b}, + 7: {lang: 0x445, region: 0x130, script: 0x5b}, + 8: {lang: 0x3a, region: 0x6c, script: 0x5}, + 9: {lang: 0x445, region: 0x4b, script: 0x5b}, + 10: {lang: 0x139, region: 0x162, script: 0x5b}, + 11: {lang: 0x139, region: 0x136, script: 0x5b}, + 12: {lang: 0x139, region: 0x136, script: 0x5b}, + 13: {lang: 0x13e, region: 0x5a, script: 0x5b}, + 14: {lang: 0x529, region: 0x53, script: 0x3b}, + 15: {lang: 0x1be, region: 0x9a, script: 0x22}, + 16: {lang: 0x1e1, region: 0x96, script: 0x5b}, + 17: {lang: 0x1f9, region: 0x9f, script: 0x5b}, + 18: {lang: 0x139, region: 0x2f, script: 0x5b}, + 19: {lang: 0x139, region: 0xe7, script: 0x5b}, + 20: {lang: 0x139, region: 0x8b, script: 0x5b}, + 21: {lang: 0x41b, region: 0x143, script: 0x5b}, + 22: {lang: 0x529, region: 0x53, script: 0x3b}, + 23: {lang: 0x4bc, region: 0x138, script: 0x5b}, + 24: {lang: 0x3a, region: 0x109, script: 0x5}, + 25: {lang: 0x3e2, region: 0x107, script: 0x20}, + 26: {lang: 0x3e2, region: 0x107, script: 0x20}, + 27: {lang: 0x139, region: 0x7c, script: 0x5b}, + 28: {lang: 0x10d, region: 0x61, script: 0x5b}, + 29: {lang: 0x139, region: 0xd7, script: 0x5b}, + 30: {lang: 0x13e, region: 0x1f, script: 0x5b}, + 31: {lang: 0x139, region: 0x9b, script: 0x5b}, + 32: {lang: 0x139, region: 0x7c, script: 0x5b}, +} + +// Size: 264 bytes, 33 elements +var regionContainment = [33]uint64{ + // Entry 0 - 1F + 0x00000001ffffffff, 0x00000000200007a2, 0x0000000000003044, 0x0000000000000008, + 0x00000000803c0010, 0x0000000000000020, 0x0000000000000040, 0x0000000000000080, + 0x0000000000000100, 0x0000000000000200, 0x0000000000000400, 0x000000004000384c, + 0x0000000000001000, 0x0000000000002000, 0x0000000000004000, 0x0000000000008000, + 0x0000000000010000, 0x0000000000020000, 0x0000000000040000, 0x0000000000080000, + 0x0000000000100000, 0x0000000000200000, 0x0000000001c1c000, 0x0000000000800000, + 0x0000000001000000, 0x000000001e020000, 0x0000000004000000, 0x0000000008000000, + 0x0000000010000000, 0x00000000200006a0, 0x0000000040002048, 0x0000000080000000, + // Entry 20 - 3F + 0x0000000100000000, +} + +// regionInclusion maps region identifiers to sets of regions in regionInclusionBits, +// where each set holds all groupings that are directly connected in a region +// containment graph. +// Size: 359 bytes, 359 elements +var regionInclusion = [359]uint8{ + // Entry 0 - 3F + 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, + 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, + 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, + 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x26, 0x23, + 0x24, 0x26, 0x27, 0x22, 0x28, 0x29, 0x2a, 0x2b, + 0x26, 0x2c, 0x24, 0x23, 0x26, 0x25, 0x2a, 0x2d, + 0x2e, 0x24, 0x2f, 0x2d, 0x26, 0x30, 0x31, 0x28, + // Entry 40 - 7F + 0x26, 0x28, 0x26, 0x25, 0x31, 0x22, 0x32, 0x33, + 0x34, 0x30, 0x22, 0x27, 0x27, 0x27, 0x35, 0x2d, + 0x29, 0x28, 0x27, 0x36, 0x28, 0x22, 0x21, 0x34, + 0x23, 0x21, 0x26, 0x2d, 0x26, 0x22, 0x37, 0x2e, + 0x35, 0x2a, 0x22, 0x2f, 0x38, 0x26, 0x26, 0x21, + 0x39, 0x39, 0x28, 0x38, 0x39, 0x39, 0x2f, 0x3a, + 0x2f, 0x20, 0x21, 0x38, 0x3b, 0x28, 0x3c, 0x2c, + 0x21, 0x2a, 0x35, 0x27, 0x38, 0x26, 0x24, 0x28, + // Entry 80 - BF + 0x2c, 0x2d, 0x23, 0x30, 0x2d, 0x2d, 0x26, 0x27, + 0x3a, 0x22, 0x34, 0x3c, 0x2d, 0x28, 0x36, 0x22, + 0x34, 0x3a, 0x26, 0x2e, 0x21, 0x39, 0x31, 0x38, + 0x24, 0x2c, 0x25, 0x22, 0x24, 0x25, 0x2c, 0x3a, + 0x2c, 0x26, 0x24, 0x36, 0x21, 0x2f, 0x3d, 0x31, + 0x3c, 0x2f, 0x26, 0x36, 0x36, 0x24, 0x26, 0x3d, + 0x31, 0x24, 0x26, 0x35, 0x25, 0x2d, 0x32, 0x38, + 0x2a, 0x38, 0x39, 0x39, 0x35, 0x33, 0x23, 0x26, + // Entry C0 - FF + 0x2f, 0x3c, 0x21, 0x23, 0x2d, 0x31, 0x36, 0x36, + 0x3c, 0x26, 0x2d, 0x26, 0x3a, 0x2f, 0x25, 0x2f, + 0x34, 0x31, 0x2f, 0x32, 0x3b, 0x2d, 0x2b, 0x2d, + 0x21, 0x34, 0x2a, 0x2c, 0x25, 0x21, 0x3c, 0x24, + 0x29, 0x2b, 0x24, 0x34, 0x21, 0x28, 0x29, 0x3b, + 0x31, 0x25, 0x2e, 0x30, 0x29, 0x26, 0x24, 0x3a, + 0x21, 0x3c, 0x28, 0x21, 0x24, 0x21, 0x21, 0x1f, + 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, + // Entry 100 - 13F + 0x21, 0x21, 0x21, 0x2f, 0x21, 0x2e, 0x23, 0x33, + 0x2f, 0x24, 0x3b, 0x2f, 0x39, 0x38, 0x31, 0x2d, + 0x3a, 0x2c, 0x2e, 0x2d, 0x23, 0x2d, 0x2f, 0x28, + 0x2f, 0x27, 0x33, 0x34, 0x26, 0x24, 0x32, 0x22, + 0x26, 0x27, 0x22, 0x2d, 0x31, 0x3d, 0x29, 0x31, + 0x3d, 0x39, 0x29, 0x31, 0x24, 0x26, 0x29, 0x36, + 0x2f, 0x33, 0x2f, 0x21, 0x22, 0x21, 0x30, 0x28, + 0x3d, 0x23, 0x26, 0x21, 0x28, 0x26, 0x26, 0x31, + // Entry 140 - 17F + 0x3b, 0x29, 0x21, 0x29, 0x21, 0x21, 0x21, 0x21, + 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x23, 0x21, + 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, + 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x24, 0x24, + 0x2f, 0x23, 0x32, 0x2f, 0x27, 0x2f, 0x21, +} + +// regionInclusionBits is an array of bit vectors where every vector represents +// a set of region groupings. These sets are used to compute the distance +// between two regions for the purpose of language matching. +// Size: 584 bytes, 73 elements +var regionInclusionBits = [73]uint64{ + // Entry 0 - 1F + 0x0000000102400813, 0x00000000200007a3, 0x0000000000003844, 0x0000000040000808, + 0x00000000803c0011, 0x0000000020000022, 0x0000000040000844, 0x0000000020000082, + 0x0000000000000102, 0x0000000020000202, 0x0000000020000402, 0x000000004000384d, + 0x0000000000001804, 0x0000000040002804, 0x0000000000404000, 0x0000000000408000, + 0x0000000000410000, 0x0000000002020000, 0x0000000000040010, 0x0000000000080010, + 0x0000000000100010, 0x0000000000200010, 0x0000000001c1c001, 0x0000000000c00000, + 0x0000000001400000, 0x000000001e020001, 0x0000000006000000, 0x000000000a000000, + 0x0000000012000000, 0x00000000200006a2, 0x0000000040002848, 0x0000000080000010, + // Entry 20 - 3F + 0x0000000100000001, 0x0000000000000001, 0x0000000080000000, 0x0000000000020000, + 0x0000000001000000, 0x0000000000008000, 0x0000000000002000, 0x0000000000000200, + 0x0000000000000008, 0x0000000000200000, 0x0000000110000000, 0x0000000000040000, + 0x0000000008000000, 0x0000000000000020, 0x0000000104000000, 0x0000000000000080, + 0x0000000000001000, 0x0000000000010000, 0x0000000000000400, 0x0000000004000000, + 0x0000000000000040, 0x0000000010000000, 0x0000000000004000, 0x0000000101000000, + 0x0000000108000000, 0x0000000000000100, 0x0000000100020000, 0x0000000000080000, + 0x0000000000100000, 0x0000000000800000, 0x00000001ffffffff, 0x0000000122400fb3, + // Entry 40 - 5F + 0x00000001827c0813, 0x000000014240385f, 0x0000000103c1c813, 0x000000011e420813, + 0x0000000112000001, 0x0000000106000001, 0x0000000101400001, 0x000000010a000001, + 0x0000000102020001, +} + +// regionInclusionNext marks, for each entry in regionInclusionBits, the set of +// all groups that are reachable from the groups set in the respective entry. +// Size: 73 bytes, 73 elements +var regionInclusionNext = [73]uint8{ + // Entry 0 - 3F + 0x3e, 0x3f, 0x0b, 0x0b, 0x40, 0x01, 0x0b, 0x01, + 0x01, 0x01, 0x01, 0x41, 0x0b, 0x0b, 0x16, 0x16, + 0x16, 0x19, 0x04, 0x04, 0x04, 0x04, 0x42, 0x16, + 0x16, 0x43, 0x19, 0x19, 0x19, 0x01, 0x0b, 0x04, + 0x00, 0x00, 0x1f, 0x11, 0x18, 0x0f, 0x0d, 0x09, + 0x03, 0x15, 0x44, 0x12, 0x1b, 0x05, 0x45, 0x07, + 0x0c, 0x10, 0x0a, 0x1a, 0x06, 0x1c, 0x0e, 0x46, + 0x47, 0x08, 0x48, 0x13, 0x14, 0x17, 0x3e, 0x3e, + // Entry 40 - 7F + 0x3e, 0x3e, 0x3e, 0x3e, 0x43, 0x43, 0x42, 0x43, + 0x43, +} + +type parentRel struct { + lang uint16 + script uint16 + maxScript uint16 + toRegion uint16 + fromRegion []uint16 +} + +// Size: 414 bytes, 5 elements +var parents = [5]parentRel{ + 0: {lang: 0x139, script: 0x0, maxScript: 0x5b, toRegion: 0x1, fromRegion: []uint16{0x1a, 0x25, 0x26, 0x2f, 0x34, 0x36, 0x3d, 0x42, 0x46, 0x48, 0x49, 0x4a, 0x50, 0x52, 0x5d, 0x5e, 0x62, 0x65, 0x6e, 0x74, 0x75, 0x76, 0x7c, 0x7d, 0x80, 0x81, 0x82, 0x84, 0x8d, 0x8e, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0xa0, 0xa1, 0xa5, 0xa8, 0xaa, 0xae, 0xb2, 0xb5, 0xb6, 0xc0, 0xc7, 0xcb, 0xcc, 0xcd, 0xcf, 0xd1, 0xd3, 0xd6, 0xd7, 0xde, 0xe0, 0xe1, 0xe7, 0xe8, 0xe9, 0xec, 0xf1, 0x108, 0x10a, 0x10b, 0x10c, 0x10e, 0x10f, 0x113, 0x118, 0x11c, 0x11e, 0x120, 0x126, 0x12a, 0x12d, 0x12e, 0x130, 0x132, 0x13a, 0x13d, 0x140, 0x143, 0x162, 0x163, 0x165}}, + 1: {lang: 0x139, script: 0x0, maxScript: 0x5b, toRegion: 0x1a, fromRegion: []uint16{0x2e, 0x4e, 0x61, 0x64, 0x73, 0xda, 0x10d, 0x110}}, + 2: {lang: 0x13e, script: 0x0, maxScript: 0x5b, toRegion: 0x1f, fromRegion: []uint16{0x2c, 0x3f, 0x41, 0x48, 0x51, 0x54, 0x57, 0x5a, 0x66, 0x6a, 0x8a, 0x90, 0xd0, 0xd9, 0xe3, 0xe5, 0xed, 0xf2, 0x11b, 0x136, 0x137, 0x13c}}, + 3: {lang: 0x3c0, script: 0x0, maxScript: 0x5b, toRegion: 0xef, fromRegion: []uint16{0x2a, 0x4e, 0x5b, 0x87, 0x8c, 0xb8, 0xc7, 0xd2, 0x119, 0x127}}, + 4: {lang: 0x529, script: 0x3c, maxScript: 0x3c, toRegion: 0x8e, fromRegion: []uint16{0xc7}}, +} + +// Total table size 30466 bytes (29KiB); checksum: 7544152B diff --git a/vendor/golang.org/x/text/internal/language/tags.go b/vendor/golang.org/x/text/internal/language/tags.go new file mode 100644 index 00000000000..e7afd3188e6 --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/tags.go @@ -0,0 +1,48 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package language + +// MustParse is like Parse, but panics if the given BCP 47 tag cannot be parsed. +// It simplifies safe initialization of Tag values. +func MustParse(s string) Tag { + t, err := Parse(s) + if err != nil { + panic(err) + } + return t +} + +// MustParseBase is like ParseBase, but panics if the given base cannot be parsed. +// It simplifies safe initialization of Base values. +func MustParseBase(s string) Language { + b, err := ParseBase(s) + if err != nil { + panic(err) + } + return b +} + +// MustParseScript is like ParseScript, but panics if the given script cannot be +// parsed. It simplifies safe initialization of Script values. +func MustParseScript(s string) Script { + scr, err := ParseScript(s) + if err != nil { + panic(err) + } + return scr +} + +// MustParseRegion is like ParseRegion, but panics if the given region cannot be +// parsed. It simplifies safe initialization of Region values. +func MustParseRegion(s string) Region { + r, err := ParseRegion(s) + if err != nil { + panic(err) + } + return r +} + +// Und is the root language. +var Und Tag diff --git a/vendor/golang.org/x/text/internal/tag/tag.go b/vendor/golang.org/x/text/internal/tag/tag.go new file mode 100644 index 00000000000..b5d348891d8 --- /dev/null +++ b/vendor/golang.org/x/text/internal/tag/tag.go @@ -0,0 +1,100 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package tag contains functionality handling tags and related data. +package tag // import "golang.org/x/text/internal/tag" + +import "sort" + +// An Index converts tags to a compact numeric value. +// +// All elements are of size 4. Tags may be up to 4 bytes long. Excess bytes can +// be used to store additional information about the tag. +type Index string + +// Elem returns the element data at the given index. +func (s Index) Elem(x int) string { + return string(s[x*4 : x*4+4]) +} + +// Index reports the index of the given key or -1 if it could not be found. +// Only the first len(key) bytes from the start of the 4-byte entries will be +// considered for the search and the first match in Index will be returned. +func (s Index) Index(key []byte) int { + n := len(key) + // search the index of the first entry with an equal or higher value than + // key in s. + index := sort.Search(len(s)/4, func(i int) bool { + return cmp(s[i*4:i*4+n], key) != -1 + }) + i := index * 4 + if cmp(s[i:i+len(key)], key) != 0 { + return -1 + } + return index +} + +// Next finds the next occurrence of key after index x, which must have been +// obtained from a call to Index using the same key. It returns x+1 or -1. +func (s Index) Next(key []byte, x int) int { + if x++; x*4 < len(s) && cmp(s[x*4:x*4+len(key)], key) == 0 { + return x + } + return -1 +} + +// cmp returns an integer comparing a and b lexicographically. +func cmp(a Index, b []byte) int { + n := len(a) + if len(b) < n { + n = len(b) + } + for i, c := range b[:n] { + switch { + case a[i] > c: + return 1 + case a[i] < c: + return -1 + } + } + switch { + case len(a) < len(b): + return -1 + case len(a) > len(b): + return 1 + } + return 0 +} + +// Compare returns an integer comparing a and b lexicographically. +func Compare(a string, b []byte) int { + return cmp(Index(a), b) +} + +// FixCase reformats b to the same pattern of cases as form. +// If returns false if string b is malformed. +func FixCase(form string, b []byte) bool { + if len(form) != len(b) { + return false + } + for i, c := range b { + if form[i] <= 'Z' { + if c >= 'a' { + c -= 'z' - 'Z' + } + if c < 'A' || 'Z' < c { + return false + } + } else { + if c <= 'Z' { + c += 'z' - 'Z' + } + if c < 'a' || 'z' < c { + return false + } + } + b[i] = c + } + return true +} diff --git a/vendor/golang.org/x/text/language/coverage.go b/vendor/golang.org/x/text/language/coverage.go new file mode 100644 index 00000000000..a24fd1a4d69 --- /dev/null +++ b/vendor/golang.org/x/text/language/coverage.go @@ -0,0 +1,187 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package language + +import ( + "fmt" + "sort" + + "golang.org/x/text/internal/language" +) + +// The Coverage interface is used to define the level of coverage of an +// internationalization service. Note that not all types are supported by all +// services. As lists may be generated on the fly, it is recommended that users +// of a Coverage cache the results. +type Coverage interface { + // Tags returns the list of supported tags. + Tags() []Tag + + // BaseLanguages returns the list of supported base languages. + BaseLanguages() []Base + + // Scripts returns the list of supported scripts. + Scripts() []Script + + // Regions returns the list of supported regions. + Regions() []Region +} + +var ( + // Supported defines a Coverage that lists all supported subtags. Tags + // always returns nil. + Supported Coverage = allSubtags{} +) + +// TODO: +// - Support Variants, numbering systems. +// - CLDR coverage levels. +// - Set of common tags defined in this package. + +type allSubtags struct{} + +// Regions returns the list of supported regions. As all regions are in a +// consecutive range, it simply returns a slice of numbers in increasing order. +// The "undefined" region is not returned. +func (s allSubtags) Regions() []Region { + reg := make([]Region, language.NumRegions) + for i := range reg { + reg[i] = Region{language.Region(i + 1)} + } + return reg +} + +// Scripts returns the list of supported scripts. As all scripts are in a +// consecutive range, it simply returns a slice of numbers in increasing order. +// The "undefined" script is not returned. +func (s allSubtags) Scripts() []Script { + scr := make([]Script, language.NumScripts) + for i := range scr { + scr[i] = Script{language.Script(i + 1)} + } + return scr +} + +// BaseLanguages returns the list of all supported base languages. It generates +// the list by traversing the internal structures. +func (s allSubtags) BaseLanguages() []Base { + bs := language.BaseLanguages() + base := make([]Base, len(bs)) + for i, b := range bs { + base[i] = Base{b} + } + return base +} + +// Tags always returns nil. +func (s allSubtags) Tags() []Tag { + return nil +} + +// coverage is used by NewCoverage which is used as a convenient way for +// creating Coverage implementations for partially defined data. Very often a +// package will only need to define a subset of slices. coverage provides a +// convenient way to do this. Moreover, packages using NewCoverage, instead of +// their own implementation, will not break if later new slice types are added. +type coverage struct { + tags func() []Tag + bases func() []Base + scripts func() []Script + regions func() []Region +} + +func (s *coverage) Tags() []Tag { + if s.tags == nil { + return nil + } + return s.tags() +} + +// bases implements sort.Interface and is used to sort base languages. +type bases []Base + +func (b bases) Len() int { + return len(b) +} + +func (b bases) Swap(i, j int) { + b[i], b[j] = b[j], b[i] +} + +func (b bases) Less(i, j int) bool { + return b[i].langID < b[j].langID +} + +// BaseLanguages returns the result from calling s.bases if it is specified or +// otherwise derives the set of supported base languages from tags. +func (s *coverage) BaseLanguages() []Base { + if s.bases == nil { + tags := s.Tags() + if len(tags) == 0 { + return nil + } + a := make([]Base, len(tags)) + for i, t := range tags { + a[i] = Base{language.Language(t.lang())} + } + sort.Sort(bases(a)) + k := 0 + for i := 1; i < len(a); i++ { + if a[k] != a[i] { + k++ + a[k] = a[i] + } + } + return a[:k+1] + } + return s.bases() +} + +func (s *coverage) Scripts() []Script { + if s.scripts == nil { + return nil + } + return s.scripts() +} + +func (s *coverage) Regions() []Region { + if s.regions == nil { + return nil + } + return s.regions() +} + +// NewCoverage returns a Coverage for the given lists. It is typically used by +// packages providing internationalization services to define their level of +// coverage. A list may be of type []T or func() []T, where T is either Tag, +// Base, Script or Region. The returned Coverage derives the value for Bases +// from Tags if no func or slice for []Base is specified. For other unspecified +// types the returned Coverage will return nil for the respective methods. +func NewCoverage(list ...interface{}) Coverage { + s := &coverage{} + for _, x := range list { + switch v := x.(type) { + case func() []Base: + s.bases = v + case func() []Script: + s.scripts = v + case func() []Region: + s.regions = v + case func() []Tag: + s.tags = v + case []Base: + s.bases = func() []Base { return v } + case []Script: + s.scripts = func() []Script { return v } + case []Region: + s.regions = func() []Region { return v } + case []Tag: + s.tags = func() []Tag { return v } + default: + panic(fmt.Sprintf("language: unsupported set type %T", v)) + } + } + return s +} diff --git a/vendor/golang.org/x/text/language/doc.go b/vendor/golang.org/x/text/language/doc.go new file mode 100644 index 00000000000..212b77c9068 --- /dev/null +++ b/vendor/golang.org/x/text/language/doc.go @@ -0,0 +1,98 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package language implements BCP 47 language tags and related functionality. +// +// The most important function of package language is to match a list of +// user-preferred languages to a list of supported languages. +// It alleviates the developer of dealing with the complexity of this process +// and provides the user with the best experience +// (see https://blog.golang.org/matchlang). +// +// # Matching preferred against supported languages +// +// A Matcher for an application that supports English, Australian English, +// Danish, and standard Mandarin can be created as follows: +// +// var matcher = language.NewMatcher([]language.Tag{ +// language.English, // The first language is used as fallback. +// language.MustParse("en-AU"), +// language.Danish, +// language.Chinese, +// }) +// +// This list of supported languages is typically implied by the languages for +// which there exists translations of the user interface. +// +// User-preferred languages usually come as a comma-separated list of BCP 47 +// language tags. +// The MatchString finds best matches for such strings: +// +// handler(w http.ResponseWriter, r *http.Request) { +// lang, _ := r.Cookie("lang") +// accept := r.Header.Get("Accept-Language") +// tag, _ := language.MatchStrings(matcher, lang.String(), accept) +// +// // tag should now be used for the initialization of any +// // locale-specific service. +// } +// +// The Matcher's Match method can be used to match Tags directly. +// +// Matchers are aware of the intricacies of equivalence between languages, such +// as deprecated subtags, legacy tags, macro languages, mutual +// intelligibility between scripts and languages, and transparently passing +// BCP 47 user configuration. +// For instance, it will know that a reader of Bokmål Danish can read Norwegian +// and will know that Cantonese ("yue") is a good match for "zh-HK". +// +// # Using match results +// +// To guarantee a consistent user experience to the user it is important to +// use the same language tag for the selection of any locale-specific services. +// For example, it is utterly confusing to substitute spelled-out numbers +// or dates in one language in text of another language. +// More subtly confusing is using the wrong sorting order or casing +// algorithm for a certain language. +// +// All the packages in x/text that provide locale-specific services +// (e.g. collate, cases) should be initialized with the tag that was +// obtained at the start of an interaction with the user. +// +// Note that Tag that is returned by Match and MatchString may differ from any +// of the supported languages, as it may contain carried over settings from +// the user tags. +// This may be inconvenient when your application has some additional +// locale-specific data for your supported languages. +// Match and MatchString both return the index of the matched supported tag +// to simplify associating such data with the matched tag. +// +// # Canonicalization +// +// If one uses the Matcher to compare languages one does not need to +// worry about canonicalization. +// +// The meaning of a Tag varies per application. The language package +// therefore delays canonicalization and preserves information as much +// as possible. The Matcher, however, will always take into account that +// two different tags may represent the same language. +// +// By default, only legacy and deprecated tags are converted into their +// canonical equivalent. All other information is preserved. This approach makes +// the confidence scores more accurate and allows matchers to distinguish +// between variants that are otherwise lost. +// +// As a consequence, two tags that should be treated as identical according to +// BCP 47 or CLDR, like "en-Latn" and "en", will be represented differently. The +// Matcher handles such distinctions, though, and is aware of the +// equivalence relations. The CanonType type can be used to alter the +// canonicalization form. +// +// # References +// +// BCP 47 - Tags for Identifying Languages http://tools.ietf.org/html/bcp47 +package language // import "golang.org/x/text/language" + +// TODO: explanation on how to match languages for your own locale-specific +// service. diff --git a/vendor/golang.org/x/text/language/language.go b/vendor/golang.org/x/text/language/language.go new file mode 100644 index 00000000000..4d9c6612129 --- /dev/null +++ b/vendor/golang.org/x/text/language/language.go @@ -0,0 +1,605 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go -output tables.go + +package language + +// TODO: Remove above NOTE after: +// - verifying that tables are dropped correctly (most notably matcher tables). + +import ( + "strings" + + "golang.org/x/text/internal/language" + "golang.org/x/text/internal/language/compact" +) + +// Tag represents a BCP 47 language tag. It is used to specify an instance of a +// specific language or locale. All language tag values are guaranteed to be +// well-formed. +type Tag compact.Tag + +func makeTag(t language.Tag) (tag Tag) { + return Tag(compact.Make(t)) +} + +func (t *Tag) tag() language.Tag { + return (*compact.Tag)(t).Tag() +} + +func (t *Tag) isCompact() bool { + return (*compact.Tag)(t).IsCompact() +} + +// TODO: improve performance. +func (t *Tag) lang() language.Language { return t.tag().LangID } +func (t *Tag) region() language.Region { return t.tag().RegionID } +func (t *Tag) script() language.Script { return t.tag().ScriptID } + +// Make is a convenience wrapper for Parse that omits the error. +// In case of an error, a sensible default is returned. +func Make(s string) Tag { + return Default.Make(s) +} + +// Make is a convenience wrapper for c.Parse that omits the error. +// In case of an error, a sensible default is returned. +func (c CanonType) Make(s string) Tag { + t, _ := c.Parse(s) + return t +} + +// Raw returns the raw base language, script and region, without making an +// attempt to infer their values. +func (t Tag) Raw() (b Base, s Script, r Region) { + tt := t.tag() + return Base{tt.LangID}, Script{tt.ScriptID}, Region{tt.RegionID} +} + +// IsRoot returns true if t is equal to language "und". +func (t Tag) IsRoot() bool { + return compact.Tag(t).IsRoot() +} + +// CanonType can be used to enable or disable various types of canonicalization. +type CanonType int + +const ( + // Replace deprecated base languages with their preferred replacements. + DeprecatedBase CanonType = 1 << iota + // Replace deprecated scripts with their preferred replacements. + DeprecatedScript + // Replace deprecated regions with their preferred replacements. + DeprecatedRegion + // Remove redundant scripts. + SuppressScript + // Normalize legacy encodings. This includes legacy languages defined in + // CLDR as well as bibliographic codes defined in ISO-639. + Legacy + // Map the dominant language of a macro language group to the macro language + // subtag. For example cmn -> zh. + Macro + // The CLDR flag should be used if full compatibility with CLDR is required. + // There are a few cases where language.Tag may differ from CLDR. To follow all + // of CLDR's suggestions, use All|CLDR. + CLDR + + // Raw can be used to Compose or Parse without Canonicalization. + Raw CanonType = 0 + + // Replace all deprecated tags with their preferred replacements. + Deprecated = DeprecatedBase | DeprecatedScript | DeprecatedRegion + + // All canonicalizations recommended by BCP 47. + BCP47 = Deprecated | SuppressScript + + // All canonicalizations. + All = BCP47 | Legacy | Macro + + // Default is the canonicalization used by Parse, Make and Compose. To + // preserve as much information as possible, canonicalizations that remove + // potentially valuable information are not included. The Matcher is + // designed to recognize similar tags that would be the same if + // they were canonicalized using All. + Default = Deprecated | Legacy + + canonLang = DeprecatedBase | Legacy | Macro + + // TODO: LikelyScript, LikelyRegion: suppress similar to ICU. +) + +// canonicalize returns the canonicalized equivalent of the tag and +// whether there was any change. +func canonicalize(c CanonType, t language.Tag) (language.Tag, bool) { + if c == Raw { + return t, false + } + changed := false + if c&SuppressScript != 0 { + if t.LangID.SuppressScript() == t.ScriptID { + t.ScriptID = 0 + changed = true + } + } + if c&canonLang != 0 { + for { + if l, aliasType := t.LangID.Canonicalize(); l != t.LangID { + switch aliasType { + case language.Legacy: + if c&Legacy != 0 { + if t.LangID == _sh && t.ScriptID == 0 { + t.ScriptID = _Latn + } + t.LangID = l + changed = true + } + case language.Macro: + if c&Macro != 0 { + // We deviate here from CLDR. The mapping "nb" -> "no" + // qualifies as a typical Macro language mapping. However, + // for legacy reasons, CLDR maps "no", the macro language + // code for Norwegian, to the dominant variant "nb". This + // change is currently under consideration for CLDR as well. + // See https://unicode.org/cldr/trac/ticket/2698 and also + // https://unicode.org/cldr/trac/ticket/1790 for some of the + // practical implications. TODO: this check could be removed + // if CLDR adopts this change. + if c&CLDR == 0 || t.LangID != _nb { + changed = true + t.LangID = l + } + } + case language.Deprecated: + if c&DeprecatedBase != 0 { + if t.LangID == _mo && t.RegionID == 0 { + t.RegionID = _MD + } + t.LangID = l + changed = true + // Other canonicalization types may still apply. + continue + } + } + } else if c&Legacy != 0 && t.LangID == _no && c&CLDR != 0 { + t.LangID = _nb + changed = true + } + break + } + } + if c&DeprecatedScript != 0 { + if t.ScriptID == _Qaai { + changed = true + t.ScriptID = _Zinh + } + } + if c&DeprecatedRegion != 0 { + if r := t.RegionID.Canonicalize(); r != t.RegionID { + changed = true + t.RegionID = r + } + } + return t, changed +} + +// Canonicalize returns the canonicalized equivalent of the tag. +func (c CanonType) Canonicalize(t Tag) (Tag, error) { + // First try fast path. + if t.isCompact() { + if _, changed := canonicalize(c, compact.Tag(t).Tag()); !changed { + return t, nil + } + } + // It is unlikely that one will canonicalize a tag after matching. So do + // a slow but simple approach here. + if tag, changed := canonicalize(c, t.tag()); changed { + tag.RemakeString() + return makeTag(tag), nil + } + return t, nil + +} + +// Confidence indicates the level of certainty for a given return value. +// For example, Serbian may be written in Cyrillic or Latin script. +// The confidence level indicates whether a value was explicitly specified, +// whether it is typically the only possible value, or whether there is +// an ambiguity. +type Confidence int + +const ( + No Confidence = iota // full confidence that there was no match + Low // most likely value picked out of a set of alternatives + High // value is generally assumed to be the correct match + Exact // exact match or explicitly specified value +) + +var confName = []string{"No", "Low", "High", "Exact"} + +func (c Confidence) String() string { + return confName[c] +} + +// String returns the canonical string representation of the language tag. +func (t Tag) String() string { + return t.tag().String() +} + +// MarshalText implements encoding.TextMarshaler. +func (t Tag) MarshalText() (text []byte, err error) { + return t.tag().MarshalText() +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (t *Tag) UnmarshalText(text []byte) error { + var tag language.Tag + err := tag.UnmarshalText(text) + *t = makeTag(tag) + return err +} + +// Base returns the base language of the language tag. If the base language is +// unspecified, an attempt will be made to infer it from the context. +// It uses a variant of CLDR's Add Likely Subtags algorithm. This is subject to change. +func (t Tag) Base() (Base, Confidence) { + if b := t.lang(); b != 0 { + return Base{b}, Exact + } + tt := t.tag() + c := High + if tt.ScriptID == 0 && !tt.RegionID.IsCountry() { + c = Low + } + if tag, err := tt.Maximize(); err == nil && tag.LangID != 0 { + return Base{tag.LangID}, c + } + return Base{0}, No +} + +// Script infers the script for the language tag. If it was not explicitly given, it will infer +// a most likely candidate. +// If more than one script is commonly used for a language, the most likely one +// is returned with a low confidence indication. For example, it returns (Cyrl, Low) +// for Serbian. +// If a script cannot be inferred (Zzzz, No) is returned. We do not use Zyyy (undetermined) +// as one would suspect from the IANA registry for BCP 47. In a Unicode context Zyyy marks +// common characters (like 1, 2, 3, '.', etc.) and is therefore more like multiple scripts. +// See https://www.unicode.org/reports/tr24/#Values for more details. Zzzz is also used for +// unknown value in CLDR. (Zzzz, Exact) is returned if Zzzz was explicitly specified. +// Note that an inferred script is never guaranteed to be the correct one. Latin is +// almost exclusively used for Afrikaans, but Arabic has been used for some texts +// in the past. Also, the script that is commonly used may change over time. +// It uses a variant of CLDR's Add Likely Subtags algorithm. This is subject to change. +func (t Tag) Script() (Script, Confidence) { + if scr := t.script(); scr != 0 { + return Script{scr}, Exact + } + tt := t.tag() + sc, c := language.Script(_Zzzz), No + if scr := tt.LangID.SuppressScript(); scr != 0 { + // Note: it is not always the case that a language with a suppress + // script value is only written in one script (e.g. kk, ms, pa). + if tt.RegionID == 0 { + return Script{scr}, High + } + sc, c = scr, High + } + if tag, err := tt.Maximize(); err == nil { + if tag.ScriptID != sc { + sc, c = tag.ScriptID, Low + } + } else { + tt, _ = canonicalize(Deprecated|Macro, tt) + if tag, err := tt.Maximize(); err == nil && tag.ScriptID != sc { + sc, c = tag.ScriptID, Low + } + } + return Script{sc}, c +} + +// Region returns the region for the language tag. If it was not explicitly given, it will +// infer a most likely candidate from the context. +// It uses a variant of CLDR's Add Likely Subtags algorithm. This is subject to change. +func (t Tag) Region() (Region, Confidence) { + if r := t.region(); r != 0 { + return Region{r}, Exact + } + tt := t.tag() + if tt, err := tt.Maximize(); err == nil { + return Region{tt.RegionID}, Low // TODO: differentiate between high and low. + } + tt, _ = canonicalize(Deprecated|Macro, tt) + if tag, err := tt.Maximize(); err == nil { + return Region{tag.RegionID}, Low + } + return Region{_ZZ}, No // TODO: return world instead of undetermined? +} + +// Variants returns the variants specified explicitly for this language tag. +// or nil if no variant was specified. +func (t Tag) Variants() []Variant { + if !compact.Tag(t).MayHaveVariants() { + return nil + } + v := []Variant{} + x, str := "", t.tag().Variants() + for str != "" { + x, str = nextToken(str) + v = append(v, Variant{x}) + } + return v +} + +// Parent returns the CLDR parent of t. In CLDR, missing fields in data for a +// specific language are substituted with fields from the parent language. +// The parent for a language may change for newer versions of CLDR. +// +// Parent returns a tag for a less specific language that is mutually +// intelligible or Und if there is no such language. This may not be the same as +// simply stripping the last BCP 47 subtag. For instance, the parent of "zh-TW" +// is "zh-Hant", and the parent of "zh-Hant" is "und". +func (t Tag) Parent() Tag { + return Tag(compact.Tag(t).Parent()) +} + +// nextToken returns token t and the rest of the string. +func nextToken(s string) (t, tail string) { + p := strings.Index(s[1:], "-") + if p == -1 { + return s[1:], "" + } + p++ + return s[1:p], s[p:] +} + +// Extension is a single BCP 47 extension. +type Extension struct { + s string +} + +// String returns the string representation of the extension, including the +// type tag. +func (e Extension) String() string { + return e.s +} + +// ParseExtension parses s as an extension and returns it on success. +func ParseExtension(s string) (e Extension, err error) { + ext, err := language.ParseExtension(s) + return Extension{ext}, err +} + +// Type returns the one-byte extension type of e. It returns 0 for the zero +// exception. +func (e Extension) Type() byte { + if e.s == "" { + return 0 + } + return e.s[0] +} + +// Tokens returns the list of tokens of e. +func (e Extension) Tokens() []string { + return strings.Split(e.s, "-") +} + +// Extension returns the extension of type x for tag t. It will return +// false for ok if t does not have the requested extension. The returned +// extension will be invalid in this case. +func (t Tag) Extension(x byte) (ext Extension, ok bool) { + if !compact.Tag(t).MayHaveExtensions() { + return Extension{}, false + } + e, ok := t.tag().Extension(x) + return Extension{e}, ok +} + +// Extensions returns all extensions of t. +func (t Tag) Extensions() []Extension { + if !compact.Tag(t).MayHaveExtensions() { + return nil + } + e := []Extension{} + for _, ext := range t.tag().Extensions() { + e = append(e, Extension{ext}) + } + return e +} + +// TypeForKey returns the type associated with the given key, where key and type +// are of the allowed values defined for the Unicode locale extension ('u') in +// https://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers. +// TypeForKey will traverse the inheritance chain to get the correct value. +// +// If there are multiple types associated with a key, only the first will be +// returned. If there is no type associated with a key, it returns the empty +// string. +func (t Tag) TypeForKey(key string) string { + if !compact.Tag(t).MayHaveExtensions() { + if key != "rg" && key != "va" { + return "" + } + } + return t.tag().TypeForKey(key) +} + +// SetTypeForKey returns a new Tag with the key set to type, where key and type +// are of the allowed values defined for the Unicode locale extension ('u') in +// https://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers. +// An empty value removes an existing pair with the same key. +func (t Tag) SetTypeForKey(key, value string) (Tag, error) { + tt, err := t.tag().SetTypeForKey(key, value) + return makeTag(tt), err +} + +// NumCompactTags is the number of compact tags. The maximum tag is +// NumCompactTags-1. +const NumCompactTags = compact.NumCompactTags + +// CompactIndex returns an index, where 0 <= index < NumCompactTags, for tags +// for which data exists in the text repository.The index will change over time +// and should not be stored in persistent storage. If t does not match a compact +// index, exact will be false and the compact index will be returned for the +// first match after repeatedly taking the Parent of t. +func CompactIndex(t Tag) (index int, exact bool) { + id, exact := compact.LanguageID(compact.Tag(t)) + return int(id), exact +} + +var root = language.Tag{} + +// Base is an ISO 639 language code, used for encoding the base language +// of a language tag. +type Base struct { + langID language.Language +} + +// ParseBase parses a 2- or 3-letter ISO 639 code. +// It returns a ValueError if s is a well-formed but unknown language identifier +// or another error if another error occurred. +func ParseBase(s string) (Base, error) { + l, err := language.ParseBase(s) + return Base{l}, err +} + +// String returns the BCP 47 representation of the base language. +func (b Base) String() string { + return b.langID.String() +} + +// ISO3 returns the ISO 639-3 language code. +func (b Base) ISO3() string { + return b.langID.ISO3() +} + +// IsPrivateUse reports whether this language code is reserved for private use. +func (b Base) IsPrivateUse() bool { + return b.langID.IsPrivateUse() +} + +// Script is a 4-letter ISO 15924 code for representing scripts. +// It is idiomatically represented in title case. +type Script struct { + scriptID language.Script +} + +// ParseScript parses a 4-letter ISO 15924 code. +// It returns a ValueError if s is a well-formed but unknown script identifier +// or another error if another error occurred. +func ParseScript(s string) (Script, error) { + sc, err := language.ParseScript(s) + return Script{sc}, err +} + +// String returns the script code in title case. +// It returns "Zzzz" for an unspecified script. +func (s Script) String() string { + return s.scriptID.String() +} + +// IsPrivateUse reports whether this script code is reserved for private use. +func (s Script) IsPrivateUse() bool { + return s.scriptID.IsPrivateUse() +} + +// Region is an ISO 3166-1 or UN M.49 code for representing countries and regions. +type Region struct { + regionID language.Region +} + +// EncodeM49 returns the Region for the given UN M.49 code. +// It returns an error if r is not a valid code. +func EncodeM49(r int) (Region, error) { + rid, err := language.EncodeM49(r) + return Region{rid}, err +} + +// ParseRegion parses a 2- or 3-letter ISO 3166-1 or a UN M.49 code. +// It returns a ValueError if s is a well-formed but unknown region identifier +// or another error if another error occurred. +func ParseRegion(s string) (Region, error) { + r, err := language.ParseRegion(s) + return Region{r}, err +} + +// String returns the BCP 47 representation for the region. +// It returns "ZZ" for an unspecified region. +func (r Region) String() string { + return r.regionID.String() +} + +// ISO3 returns the 3-letter ISO code of r. +// Note that not all regions have a 3-letter ISO code. +// In such cases this method returns "ZZZ". +func (r Region) ISO3() string { + return r.regionID.ISO3() +} + +// M49 returns the UN M.49 encoding of r, or 0 if this encoding +// is not defined for r. +func (r Region) M49() int { + return r.regionID.M49() +} + +// IsPrivateUse reports whether r has the ISO 3166 User-assigned status. This +// may include private-use tags that are assigned by CLDR and used in this +// implementation. So IsPrivateUse and IsCountry can be simultaneously true. +func (r Region) IsPrivateUse() bool { + return r.regionID.IsPrivateUse() +} + +// IsCountry returns whether this region is a country or autonomous area. This +// includes non-standard definitions from CLDR. +func (r Region) IsCountry() bool { + return r.regionID.IsCountry() +} + +// IsGroup returns whether this region defines a collection of regions. This +// includes non-standard definitions from CLDR. +func (r Region) IsGroup() bool { + return r.regionID.IsGroup() +} + +// Contains returns whether Region c is contained by Region r. It returns true +// if c == r. +func (r Region) Contains(c Region) bool { + return r.regionID.Contains(c.regionID) +} + +// TLD returns the country code top-level domain (ccTLD). UK is returned for GB. +// In all other cases it returns either the region itself or an error. +// +// This method may return an error for a region for which there exists a +// canonical form with a ccTLD. To get that ccTLD canonicalize r first. The +// region will already be canonicalized it was obtained from a Tag that was +// obtained using any of the default methods. +func (r Region) TLD() (Region, error) { + tld, err := r.regionID.TLD() + return Region{tld}, err +} + +// Canonicalize returns the region or a possible replacement if the region is +// deprecated. It will not return a replacement for deprecated regions that +// are split into multiple regions. +func (r Region) Canonicalize() Region { + return Region{r.regionID.Canonicalize()} +} + +// Variant represents a registered variant of a language as defined by BCP 47. +type Variant struct { + variant string +} + +// ParseVariant parses and returns a Variant. An error is returned if s is not +// a valid variant. +func ParseVariant(s string) (Variant, error) { + v, err := language.ParseVariant(s) + return Variant{v.String()}, err +} + +// String returns the string representation of the variant. +func (v Variant) String() string { + return v.variant +} diff --git a/vendor/golang.org/x/text/language/match.go b/vendor/golang.org/x/text/language/match.go new file mode 100644 index 00000000000..1153baf291c --- /dev/null +++ b/vendor/golang.org/x/text/language/match.go @@ -0,0 +1,735 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package language + +import ( + "errors" + "strings" + + "golang.org/x/text/internal/language" +) + +// A MatchOption configures a Matcher. +type MatchOption func(*matcher) + +// PreferSameScript will, in the absence of a match, result in the first +// preferred tag with the same script as a supported tag to match this supported +// tag. The default is currently true, but this may change in the future. +func PreferSameScript(preferSame bool) MatchOption { + return func(m *matcher) { m.preferSameScript = preferSame } +} + +// TODO(v1.0.0): consider making Matcher a concrete type, instead of interface. +// There doesn't seem to be too much need for multiple types. +// Making it a concrete type allows MatchStrings to be a method, which will +// improve its discoverability. + +// MatchStrings parses and matches the given strings until one of them matches +// the language in the Matcher. A string may be an Accept-Language header as +// handled by ParseAcceptLanguage. The default language is returned if no +// other language matched. +func MatchStrings(m Matcher, lang ...string) (tag Tag, index int) { + for _, accept := range lang { + desired, _, err := ParseAcceptLanguage(accept) + if err != nil { + continue + } + if tag, index, conf := m.Match(desired...); conf != No { + return tag, index + } + } + tag, index, _ = m.Match() + return +} + +// Matcher is the interface that wraps the Match method. +// +// Match returns the best match for any of the given tags, along with +// a unique index associated with the returned tag and a confidence +// score. +type Matcher interface { + Match(t ...Tag) (tag Tag, index int, c Confidence) +} + +// Comprehends reports the confidence score for a speaker of a given language +// to being able to comprehend the written form of an alternative language. +func Comprehends(speaker, alternative Tag) Confidence { + _, _, c := NewMatcher([]Tag{alternative}).Match(speaker) + return c +} + +// NewMatcher returns a Matcher that matches an ordered list of preferred tags +// against a list of supported tags based on written intelligibility, closeness +// of dialect, equivalence of subtags and various other rules. It is initialized +// with the list of supported tags. The first element is used as the default +// value in case no match is found. +// +// Its Match method matches the first of the given Tags to reach a certain +// confidence threshold. The tags passed to Match should therefore be specified +// in order of preference. Extensions are ignored for matching. +// +// The index returned by the Match method corresponds to the index of the +// matched tag in t, but is augmented with the Unicode extension ('u')of the +// corresponding preferred tag. This allows user locale options to be passed +// transparently. +func NewMatcher(t []Tag, options ...MatchOption) Matcher { + return newMatcher(t, options) +} + +func (m *matcher) Match(want ...Tag) (t Tag, index int, c Confidence) { + var tt language.Tag + match, w, c := m.getBest(want...) + if match != nil { + tt, index = match.tag, match.index + } else { + // TODO: this should be an option + tt = m.default_.tag + if m.preferSameScript { + outer: + for _, w := range want { + script, _ := w.Script() + if script.scriptID == 0 { + // Don't do anything if there is no script, such as with + // private subtags. + continue + } + for i, h := range m.supported { + if script.scriptID == h.maxScript { + tt, index = h.tag, i + break outer + } + } + } + } + // TODO: select first language tag based on script. + } + if w.RegionID != tt.RegionID && w.RegionID != 0 { + if w.RegionID != 0 && tt.RegionID != 0 && tt.RegionID.Contains(w.RegionID) { + tt.RegionID = w.RegionID + tt.RemakeString() + } else if r := w.RegionID.String(); len(r) == 2 { + // TODO: also filter macro and deprecated. + tt, _ = tt.SetTypeForKey("rg", strings.ToLower(r)+"zzzz") + } + } + // Copy options from the user-provided tag into the result tag. This is hard + // to do after the fact, so we do it here. + // TODO: add in alternative variants to -u-va-. + // TODO: add preferred region to -u-rg-. + if e := w.Extensions(); len(e) > 0 { + b := language.Builder{} + b.SetTag(tt) + for _, e := range e { + b.AddExt(e) + } + tt = b.Make() + } + return makeTag(tt), index, c +} + +// ErrMissingLikelyTagsData indicates no information was available +// to compute likely values of missing tags. +var ErrMissingLikelyTagsData = errors.New("missing likely tags data") + +// func (t *Tag) setTagsFrom(id Tag) { +// t.LangID = id.LangID +// t.ScriptID = id.ScriptID +// t.RegionID = id.RegionID +// } + +// Tag Matching +// CLDR defines an algorithm for finding the best match between two sets of language +// tags. The basic algorithm defines how to score a possible match and then find +// the match with the best score +// (see https://www.unicode.org/reports/tr35/#LanguageMatching). +// Using scoring has several disadvantages. The scoring obfuscates the importance of +// the various factors considered, making the algorithm harder to understand. Using +// scoring also requires the full score to be computed for each pair of tags. +// +// We will use a different algorithm which aims to have the following properties: +// - clarity on the precedence of the various selection factors, and +// - improved performance by allowing early termination of a comparison. +// +// Matching algorithm (overview) +// Input: +// - supported: a set of supported tags +// - default: the default tag to return in case there is no match +// - desired: list of desired tags, ordered by preference, starting with +// the most-preferred. +// +// Algorithm: +// 1) Set the best match to the lowest confidence level +// 2) For each tag in "desired": +// a) For each tag in "supported": +// 1) compute the match between the two tags. +// 2) if the match is better than the previous best match, replace it +// with the new match. (see next section) +// b) if the current best match is Exact and pin is true the result will be +// frozen to the language found thusfar, although better matches may +// still be found for the same language. +// 3) If the best match so far is below a certain threshold, return "default". +// +// Ranking: +// We use two phases to determine whether one pair of tags are a better match +// than another pair of tags. First, we determine a rough confidence level. If the +// levels are different, the one with the highest confidence wins. +// Second, if the rough confidence levels are identical, we use a set of tie-breaker +// rules. +// +// The confidence level of matching a pair of tags is determined by finding the +// lowest confidence level of any matches of the corresponding subtags (the +// result is deemed as good as its weakest link). +// We define the following levels: +// Exact - An exact match of a subtag, before adding likely subtags. +// MaxExact - An exact match of a subtag, after adding likely subtags. +// [See Note 2]. +// High - High level of mutual intelligibility between different subtag +// variants. +// Low - Low level of mutual intelligibility between different subtag +// variants. +// No - No mutual intelligibility. +// +// The following levels can occur for each type of subtag: +// Base: Exact, MaxExact, High, Low, No +// Script: Exact, MaxExact [see Note 3], Low, No +// Region: Exact, MaxExact, High +// Variant: Exact, High +// Private: Exact, No +// +// Any result with a confidence level of Low or higher is deemed a possible match. +// Once a desired tag matches any of the supported tags with a level of MaxExact +// or higher, the next desired tag is not considered (see Step 2.b). +// Note that CLDR provides languageMatching data that defines close equivalence +// classes for base languages, scripts and regions. +// +// Tie-breaking +// If we get the same confidence level for two matches, we apply a sequence of +// tie-breaking rules. The first that succeeds defines the result. The rules are +// applied in the following order. +// 1) Original language was defined and was identical. +// 2) Original region was defined and was identical. +// 3) Distance between two maximized regions was the smallest. +// 4) Original script was defined and was identical. +// 5) Distance from want tag to have tag using the parent relation [see Note 5.] +// If there is still no winner after these rules are applied, the first match +// found wins. +// +// Notes: +// [2] In practice, as matching of Exact is done in a separate phase from +// matching the other levels, we reuse the Exact level to mean MaxExact in +// the second phase. As a consequence, we only need the levels defined by +// the Confidence type. The MaxExact confidence level is mapped to High in +// the public API. +// [3] We do not differentiate between maximized script values that were derived +// from suppressScript versus most likely tag data. We determined that in +// ranking the two, one ranks just after the other. Moreover, the two cannot +// occur concurrently. As a consequence, they are identical for practical +// purposes. +// [4] In case of deprecated, macro-equivalents and legacy mappings, we assign +// the MaxExact level to allow iw vs he to still be a closer match than +// en-AU vs en-US, for example. +// [5] In CLDR a locale inherits fields that are unspecified for this locale +// from its parent. Therefore, if a locale is a parent of another locale, +// it is a strong measure for closeness, especially when no other tie +// breaker rule applies. One could also argue it is inconsistent, for +// example, when pt-AO matches pt (which CLDR equates with pt-BR), even +// though its parent is pt-PT according to the inheritance rules. +// +// Implementation Details: +// There are several performance considerations worth pointing out. Most notably, +// we preprocess as much as possible (within reason) at the time of creation of a +// matcher. This includes: +// - creating a per-language map, which includes data for the raw base language +// and its canonicalized variant (if applicable), +// - expanding entries for the equivalence classes defined in CLDR's +// languageMatch data. +// The per-language map ensures that typically only a very small number of tags +// need to be considered. The pre-expansion of canonicalized subtags and +// equivalence classes reduces the amount of map lookups that need to be done at +// runtime. + +// matcher keeps a set of supported language tags, indexed by language. +type matcher struct { + default_ *haveTag + supported []*haveTag + index map[language.Language]*matchHeader + passSettings bool + preferSameScript bool +} + +// matchHeader has the lists of tags for exact matches and matches based on +// maximized and canonicalized tags for a given language. +type matchHeader struct { + haveTags []*haveTag + original bool +} + +// haveTag holds a supported Tag and its maximized script and region. The maximized +// or canonicalized language is not stored as it is not needed during matching. +type haveTag struct { + tag language.Tag + + // index of this tag in the original list of supported tags. + index int + + // conf is the maximum confidence that can result from matching this haveTag. + // When conf < Exact this means it was inserted after applying a CLDR equivalence rule. + conf Confidence + + // Maximized region and script. + maxRegion language.Region + maxScript language.Script + + // altScript may be checked as an alternative match to maxScript. If altScript + // matches, the confidence level for this match is Low. Theoretically there + // could be multiple alternative scripts. This does not occur in practice. + altScript language.Script + + // nextMax is the index of the next haveTag with the same maximized tags. + nextMax uint16 +} + +func makeHaveTag(tag language.Tag, index int) (haveTag, language.Language) { + max := tag + if tag.LangID != 0 || tag.RegionID != 0 || tag.ScriptID != 0 { + max, _ = canonicalize(All, max) + max, _ = max.Maximize() + max.RemakeString() + } + return haveTag{tag, index, Exact, max.RegionID, max.ScriptID, altScript(max.LangID, max.ScriptID), 0}, max.LangID +} + +// altScript returns an alternative script that may match the given script with +// a low confidence. At the moment, the langMatch data allows for at most one +// script to map to another and we rely on this to keep the code simple. +func altScript(l language.Language, s language.Script) language.Script { + for _, alt := range matchScript { + // TODO: also match cases where language is not the same. + if (language.Language(alt.wantLang) == l || language.Language(alt.haveLang) == l) && + language.Script(alt.haveScript) == s { + return language.Script(alt.wantScript) + } + } + return 0 +} + +// addIfNew adds a haveTag to the list of tags only if it is a unique tag. +// Tags that have the same maximized values are linked by index. +func (h *matchHeader) addIfNew(n haveTag, exact bool) { + h.original = h.original || exact + // Don't add new exact matches. + for _, v := range h.haveTags { + if equalsRest(v.tag, n.tag) { + return + } + } + // Allow duplicate maximized tags, but create a linked list to allow quickly + // comparing the equivalents and bail out. + for i, v := range h.haveTags { + if v.maxScript == n.maxScript && + v.maxRegion == n.maxRegion && + v.tag.VariantOrPrivateUseTags() == n.tag.VariantOrPrivateUseTags() { + for h.haveTags[i].nextMax != 0 { + i = int(h.haveTags[i].nextMax) + } + h.haveTags[i].nextMax = uint16(len(h.haveTags)) + break + } + } + h.haveTags = append(h.haveTags, &n) +} + +// header returns the matchHeader for the given language. It creates one if +// it doesn't already exist. +func (m *matcher) header(l language.Language) *matchHeader { + if h := m.index[l]; h != nil { + return h + } + h := &matchHeader{} + m.index[l] = h + return h +} + +func toConf(d uint8) Confidence { + if d <= 10 { + return High + } + if d < 30 { + return Low + } + return No +} + +// newMatcher builds an index for the given supported tags and returns it as +// a matcher. It also expands the index by considering various equivalence classes +// for a given tag. +func newMatcher(supported []Tag, options []MatchOption) *matcher { + m := &matcher{ + index: make(map[language.Language]*matchHeader), + preferSameScript: true, + } + for _, o := range options { + o(m) + } + if len(supported) == 0 { + m.default_ = &haveTag{} + return m + } + // Add supported languages to the index. Add exact matches first to give + // them precedence. + for i, tag := range supported { + tt := tag.tag() + pair, _ := makeHaveTag(tt, i) + m.header(tt.LangID).addIfNew(pair, true) + m.supported = append(m.supported, &pair) + } + m.default_ = m.header(supported[0].lang()).haveTags[0] + // Keep these in two different loops to support the case that two equivalent + // languages are distinguished, such as iw and he. + for i, tag := range supported { + tt := tag.tag() + pair, max := makeHaveTag(tt, i) + if max != tt.LangID { + m.header(max).addIfNew(pair, true) + } + } + + // update is used to add indexes in the map for equivalent languages. + // update will only add entries to original indexes, thus not computing any + // transitive relations. + update := func(want, have uint16, conf Confidence) { + if hh := m.index[language.Language(have)]; hh != nil { + if !hh.original { + return + } + hw := m.header(language.Language(want)) + for _, ht := range hh.haveTags { + v := *ht + if conf < v.conf { + v.conf = conf + } + v.nextMax = 0 // this value needs to be recomputed + if v.altScript != 0 { + v.altScript = altScript(language.Language(want), v.maxScript) + } + hw.addIfNew(v, conf == Exact && hh.original) + } + } + } + + // Add entries for languages with mutual intelligibility as defined by CLDR's + // languageMatch data. + for _, ml := range matchLang { + update(ml.want, ml.have, toConf(ml.distance)) + if !ml.oneway { + update(ml.have, ml.want, toConf(ml.distance)) + } + } + + // Add entries for possible canonicalizations. This is an optimization to + // ensure that only one map lookup needs to be done at runtime per desired tag. + // First we match deprecated equivalents. If they are perfect equivalents + // (their canonicalization simply substitutes a different language code, but + // nothing else), the match confidence is Exact, otherwise it is High. + for i, lm := range language.AliasMap { + // If deprecated codes match and there is no fiddling with the script + // or region, we consider it an exact match. + conf := Exact + if language.AliasTypes[i] != language.Macro { + if !isExactEquivalent(language.Language(lm.From)) { + conf = High + } + update(lm.To, lm.From, conf) + } + update(lm.From, lm.To, conf) + } + return m +} + +// getBest gets the best matching tag in m for any of the given tags, taking into +// account the order of preference of the given tags. +func (m *matcher) getBest(want ...Tag) (got *haveTag, orig language.Tag, c Confidence) { + best := bestMatch{} + for i, ww := range want { + w := ww.tag() + var max language.Tag + // Check for exact match first. + h := m.index[w.LangID] + if w.LangID != 0 { + if h == nil { + continue + } + // Base language is defined. + max, _ = canonicalize(Legacy|Deprecated|Macro, w) + // A region that is added through canonicalization is stronger than + // a maximized region: set it in the original (e.g. mo -> ro-MD). + if w.RegionID != max.RegionID { + w.RegionID = max.RegionID + } + // TODO: should we do the same for scripts? + // See test case: en, sr, nl ; sh ; sr + max, _ = max.Maximize() + } else { + // Base language is not defined. + if h != nil { + for i := range h.haveTags { + have := h.haveTags[i] + if equalsRest(have.tag, w) { + return have, w, Exact + } + } + } + if w.ScriptID == 0 && w.RegionID == 0 { + // We skip all tags matching und for approximate matching, including + // private tags. + continue + } + max, _ = w.Maximize() + if h = m.index[max.LangID]; h == nil { + continue + } + } + pin := true + for _, t := range want[i+1:] { + if w.LangID == t.lang() { + pin = false + break + } + } + // Check for match based on maximized tag. + for i := range h.haveTags { + have := h.haveTags[i] + best.update(have, w, max.ScriptID, max.RegionID, pin) + if best.conf == Exact { + for have.nextMax != 0 { + have = h.haveTags[have.nextMax] + best.update(have, w, max.ScriptID, max.RegionID, pin) + } + return best.have, best.want, best.conf + } + } + } + if best.conf <= No { + if len(want) != 0 { + return nil, want[0].tag(), No + } + return nil, language.Tag{}, No + } + return best.have, best.want, best.conf +} + +// bestMatch accumulates the best match so far. +type bestMatch struct { + have *haveTag + want language.Tag + conf Confidence + pinnedRegion language.Region + pinLanguage bool + sameRegionGroup bool + // Cached results from applying tie-breaking rules. + origLang bool + origReg bool + paradigmReg bool + regGroupDist uint8 + origScript bool +} + +// update updates the existing best match if the new pair is considered to be a +// better match. To determine if the given pair is a better match, it first +// computes the rough confidence level. If this surpasses the current match, it +// will replace it and update the tie-breaker rule cache. If there is a tie, it +// proceeds with applying a series of tie-breaker rules. If there is no +// conclusive winner after applying the tie-breaker rules, it leaves the current +// match as the preferred match. +// +// If pin is true and have and tag are a strong match, it will henceforth only +// consider matches for this language. This corresponds to the idea that most +// users have a strong preference for the first defined language. A user can +// still prefer a second language over a dialect of the preferred language by +// explicitly specifying dialects, e.g. "en, nl, en-GB". In this case pin should +// be false. +func (m *bestMatch) update(have *haveTag, tag language.Tag, maxScript language.Script, maxRegion language.Region, pin bool) { + // Bail if the maximum attainable confidence is below that of the current best match. + c := have.conf + if c < m.conf { + return + } + // Don't change the language once we already have found an exact match. + if m.pinLanguage && tag.LangID != m.want.LangID { + return + } + // Pin the region group if we are comparing tags for the same language. + if tag.LangID == m.want.LangID && m.sameRegionGroup { + _, sameGroup := regionGroupDist(m.pinnedRegion, have.maxRegion, have.maxScript, m.want.LangID) + if !sameGroup { + return + } + } + if c == Exact && have.maxScript == maxScript { + // If there is another language and then another entry of this language, + // don't pin anything, otherwise pin the language. + m.pinLanguage = pin + } + if equalsRest(have.tag, tag) { + } else if have.maxScript != maxScript { + // There is usually very little comprehension between different scripts. + // In a few cases there may still be Low comprehension. This possibility + // is pre-computed and stored in have.altScript. + if Low < m.conf || have.altScript != maxScript { + return + } + c = Low + } else if have.maxRegion != maxRegion { + if High < c { + // There is usually a small difference between languages across regions. + c = High + } + } + + // We store the results of the computations of the tie-breaker rules along + // with the best match. There is no need to do the checks once we determine + // we have a winner, but we do still need to do the tie-breaker computations. + // We use "beaten" to keep track if we still need to do the checks. + beaten := false // true if the new pair defeats the current one. + if c != m.conf { + if c < m.conf { + return + } + beaten = true + } + + // Tie-breaker rules: + // We prefer if the pre-maximized language was specified and identical. + origLang := have.tag.LangID == tag.LangID && tag.LangID != 0 + if !beaten && m.origLang != origLang { + if m.origLang { + return + } + beaten = true + } + + // We prefer if the pre-maximized region was specified and identical. + origReg := have.tag.RegionID == tag.RegionID && tag.RegionID != 0 + if !beaten && m.origReg != origReg { + if m.origReg { + return + } + beaten = true + } + + regGroupDist, sameGroup := regionGroupDist(have.maxRegion, maxRegion, maxScript, tag.LangID) + if !beaten && m.regGroupDist != regGroupDist { + if regGroupDist > m.regGroupDist { + return + } + beaten = true + } + + paradigmReg := isParadigmLocale(tag.LangID, have.maxRegion) + if !beaten && m.paradigmReg != paradigmReg { + if !paradigmReg { + return + } + beaten = true + } + + // Next we prefer if the pre-maximized script was specified and identical. + origScript := have.tag.ScriptID == tag.ScriptID && tag.ScriptID != 0 + if !beaten && m.origScript != origScript { + if m.origScript { + return + } + beaten = true + } + + // Update m to the newly found best match. + if beaten { + m.have = have + m.want = tag + m.conf = c + m.pinnedRegion = maxRegion + m.sameRegionGroup = sameGroup + m.origLang = origLang + m.origReg = origReg + m.paradigmReg = paradigmReg + m.origScript = origScript + m.regGroupDist = regGroupDist + } +} + +func isParadigmLocale(lang language.Language, r language.Region) bool { + for _, e := range paradigmLocales { + if language.Language(e[0]) == lang && (r == language.Region(e[1]) || r == language.Region(e[2])) { + return true + } + } + return false +} + +// regionGroupDist computes the distance between two regions based on their +// CLDR grouping. +func regionGroupDist(a, b language.Region, script language.Script, lang language.Language) (dist uint8, same bool) { + const defaultDistance = 4 + + aGroup := uint(regionToGroups[a]) << 1 + bGroup := uint(regionToGroups[b]) << 1 + for _, ri := range matchRegion { + if language.Language(ri.lang) == lang && (ri.script == 0 || language.Script(ri.script) == script) { + group := uint(1 << (ri.group &^ 0x80)) + if 0x80&ri.group == 0 { + if aGroup&bGroup&group != 0 { // Both regions are in the group. + return ri.distance, ri.distance == defaultDistance + } + } else { + if (aGroup|bGroup)&group == 0 { // Both regions are not in the group. + return ri.distance, ri.distance == defaultDistance + } + } + } + } + return defaultDistance, true +} + +// equalsRest compares everything except the language. +func equalsRest(a, b language.Tag) bool { + // TODO: don't include extensions in this comparison. To do this efficiently, + // though, we should handle private tags separately. + return a.ScriptID == b.ScriptID && a.RegionID == b.RegionID && a.VariantOrPrivateUseTags() == b.VariantOrPrivateUseTags() +} + +// isExactEquivalent returns true if canonicalizing the language will not alter +// the script or region of a tag. +func isExactEquivalent(l language.Language) bool { + for _, o := range notEquivalent { + if o == l { + return false + } + } + return true +} + +var notEquivalent []language.Language + +func init() { + // Create a list of all languages for which canonicalization may alter the + // script or region. + for _, lm := range language.AliasMap { + tag := language.Tag{LangID: language.Language(lm.From)} + if tag, _ = canonicalize(All, tag); tag.ScriptID != 0 || tag.RegionID != 0 { + notEquivalent = append(notEquivalent, language.Language(lm.From)) + } + } + // Maximize undefined regions of paradigm locales. + for i, v := range paradigmLocales { + t := language.Tag{LangID: language.Language(v[0])} + max, _ := t.Maximize() + if v[1] == 0 { + paradigmLocales[i][1] = uint16(max.RegionID) + } + if v[2] == 0 { + paradigmLocales[i][2] = uint16(max.RegionID) + } + } +} diff --git a/vendor/golang.org/x/text/language/parse.go b/vendor/golang.org/x/text/language/parse.go new file mode 100644 index 00000000000..4d57222e770 --- /dev/null +++ b/vendor/golang.org/x/text/language/parse.go @@ -0,0 +1,256 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package language + +import ( + "errors" + "sort" + "strconv" + "strings" + + "golang.org/x/text/internal/language" +) + +// ValueError is returned by any of the parsing functions when the +// input is well-formed but the respective subtag is not recognized +// as a valid value. +type ValueError interface { + error + + // Subtag returns the subtag for which the error occurred. + Subtag() string +} + +// Parse parses the given BCP 47 string and returns a valid Tag. If parsing +// failed it returns an error and any part of the tag that could be parsed. +// If parsing succeeded but an unknown value was found, it returns +// ValueError. The Tag returned in this case is just stripped of the unknown +// value. All other values are preserved. It accepts tags in the BCP 47 format +// and extensions to this standard defined in +// https://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers. +// The resulting tag is canonicalized using the default canonicalization type. +func Parse(s string) (t Tag, err error) { + return Default.Parse(s) +} + +// Parse parses the given BCP 47 string and returns a valid Tag. If parsing +// failed it returns an error and any part of the tag that could be parsed. +// If parsing succeeded but an unknown value was found, it returns +// ValueError. The Tag returned in this case is just stripped of the unknown +// value. All other values are preserved. It accepts tags in the BCP 47 format +// and extensions to this standard defined in +// https://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers. +// The resulting tag is canonicalized using the canonicalization type c. +func (c CanonType) Parse(s string) (t Tag, err error) { + defer func() { + if recover() != nil { + t = Tag{} + err = language.ErrSyntax + } + }() + + tt, err := language.Parse(s) + if err != nil { + return makeTag(tt), err + } + tt, changed := canonicalize(c, tt) + if changed { + tt.RemakeString() + } + return makeTag(tt), err +} + +// Compose creates a Tag from individual parts, which may be of type Tag, Base, +// Script, Region, Variant, []Variant, Extension, []Extension or error. If a +// Base, Script or Region or slice of type Variant or Extension is passed more +// than once, the latter will overwrite the former. Variants and Extensions are +// accumulated, but if two extensions of the same type are passed, the latter +// will replace the former. For -u extensions, though, the key-type pairs are +// added, where later values overwrite older ones. A Tag overwrites all former +// values and typically only makes sense as the first argument. The resulting +// tag is returned after canonicalizing using the Default CanonType. If one or +// more errors are encountered, one of the errors is returned. +func Compose(part ...interface{}) (t Tag, err error) { + return Default.Compose(part...) +} + +// Compose creates a Tag from individual parts, which may be of type Tag, Base, +// Script, Region, Variant, []Variant, Extension, []Extension or error. If a +// Base, Script or Region or slice of type Variant or Extension is passed more +// than once, the latter will overwrite the former. Variants and Extensions are +// accumulated, but if two extensions of the same type are passed, the latter +// will replace the former. For -u extensions, though, the key-type pairs are +// added, where later values overwrite older ones. A Tag overwrites all former +// values and typically only makes sense as the first argument. The resulting +// tag is returned after canonicalizing using CanonType c. If one or more errors +// are encountered, one of the errors is returned. +func (c CanonType) Compose(part ...interface{}) (t Tag, err error) { + defer func() { + if recover() != nil { + t = Tag{} + err = language.ErrSyntax + } + }() + + var b language.Builder + if err = update(&b, part...); err != nil { + return und, err + } + b.Tag, _ = canonicalize(c, b.Tag) + return makeTag(b.Make()), err +} + +var errInvalidArgument = errors.New("invalid Extension or Variant") + +func update(b *language.Builder, part ...interface{}) (err error) { + for _, x := range part { + switch v := x.(type) { + case Tag: + b.SetTag(v.tag()) + case Base: + b.Tag.LangID = v.langID + case Script: + b.Tag.ScriptID = v.scriptID + case Region: + b.Tag.RegionID = v.regionID + case Variant: + if v.variant == "" { + err = errInvalidArgument + break + } + b.AddVariant(v.variant) + case Extension: + if v.s == "" { + err = errInvalidArgument + break + } + b.SetExt(v.s) + case []Variant: + b.ClearVariants() + for _, v := range v { + b.AddVariant(v.variant) + } + case []Extension: + b.ClearExtensions() + for _, e := range v { + b.SetExt(e.s) + } + // TODO: support parsing of raw strings based on morphology or just extensions? + case error: + if v != nil { + err = v + } + } + } + return +} + +var errInvalidWeight = errors.New("ParseAcceptLanguage: invalid weight") +var errTagListTooLarge = errors.New("tag list exceeds max length") + +// ParseAcceptLanguage parses the contents of an Accept-Language header as +// defined in http://www.ietf.org/rfc/rfc2616.txt and returns a list of Tags and +// a list of corresponding quality weights. It is more permissive than RFC 2616 +// and may return non-nil slices even if the input is not valid. +// The Tags will be sorted by highest weight first and then by first occurrence. +// Tags with a weight of zero will be dropped. An error will be returned if the +// input could not be parsed. +func ParseAcceptLanguage(s string) (tag []Tag, q []float32, err error) { + defer func() { + if recover() != nil { + tag = nil + q = nil + err = language.ErrSyntax + } + }() + + if strings.Count(s, "-") > 1000 { + return nil, nil, errTagListTooLarge + } + + var entry string + for s != "" { + if entry, s = split(s, ','); entry == "" { + continue + } + + entry, weight := split(entry, ';') + + // Scan the language. + t, err := Parse(entry) + if err != nil { + id, ok := acceptFallback[entry] + if !ok { + return nil, nil, err + } + t = makeTag(language.Tag{LangID: id}) + } + + // Scan the optional weight. + w := 1.0 + if weight != "" { + weight = consume(weight, 'q') + weight = consume(weight, '=') + // consume returns the empty string when a token could not be + // consumed, resulting in an error for ParseFloat. + if w, err = strconv.ParseFloat(weight, 32); err != nil { + return nil, nil, errInvalidWeight + } + // Drop tags with a quality weight of 0. + if w <= 0 { + continue + } + } + + tag = append(tag, t) + q = append(q, float32(w)) + } + sort.Stable(&tagSort{tag, q}) + return tag, q, nil +} + +// consume removes a leading token c from s and returns the result or the empty +// string if there is no such token. +func consume(s string, c byte) string { + if s == "" || s[0] != c { + return "" + } + return strings.TrimSpace(s[1:]) +} + +func split(s string, c byte) (head, tail string) { + if i := strings.IndexByte(s, c); i >= 0 { + return strings.TrimSpace(s[:i]), strings.TrimSpace(s[i+1:]) + } + return strings.TrimSpace(s), "" +} + +// Add hack mapping to deal with a small number of cases that occur +// in Accept-Language (with reasonable frequency). +var acceptFallback = map[string]language.Language{ + "english": _en, + "deutsch": _de, + "italian": _it, + "french": _fr, + "*": _mul, // defined in the spec to match all languages. +} + +type tagSort struct { + tag []Tag + q []float32 +} + +func (s *tagSort) Len() int { + return len(s.q) +} + +func (s *tagSort) Less(i, j int) bool { + return s.q[i] > s.q[j] +} + +func (s *tagSort) Swap(i, j int) { + s.tag[i], s.tag[j] = s.tag[j], s.tag[i] + s.q[i], s.q[j] = s.q[j], s.q[i] +} diff --git a/vendor/golang.org/x/text/language/tables.go b/vendor/golang.org/x/text/language/tables.go new file mode 100644 index 00000000000..a6573dcb215 --- /dev/null +++ b/vendor/golang.org/x/text/language/tables.go @@ -0,0 +1,298 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package language + +// CLDRVersion is the CLDR version from which the tables in this package are derived. +const CLDRVersion = "32" + +const ( + _de = 269 + _en = 313 + _fr = 350 + _it = 505 + _mo = 784 + _no = 879 + _nb = 839 + _pt = 960 + _sh = 1031 + _mul = 806 + _und = 0 +) +const ( + _001 = 1 + _419 = 31 + _BR = 65 + _CA = 73 + _ES = 111 + _GB = 124 + _MD = 189 + _PT = 239 + _UK = 307 + _US = 310 + _ZZ = 358 + _XA = 324 + _XC = 326 + _XK = 334 +) +const ( + _Latn = 91 + _Hani = 57 + _Hans = 59 + _Hant = 60 + _Qaaa = 149 + _Qaai = 157 + _Qabx = 198 + _Zinh = 255 + _Zyyy = 260 + _Zzzz = 261 +) + +var regionToGroups = []uint8{ // 359 elements + // Entry 0 - 3F + 0x00, 0x00, 0x00, 0x04, 0x04, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0x00, 0x04, 0x04, 0x04, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x04, 0x00, + 0x00, 0x04, 0x00, 0x00, 0x04, 0x01, 0x00, 0x00, + 0x04, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x04, 0x04, 0x00, 0x04, + // Entry 40 - 7F + 0x04, 0x04, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x04, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x04, 0x00, 0x00, 0x04, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x04, 0x00, 0x04, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x04, 0x00, + 0x08, 0x00, 0x04, 0x00, 0x00, 0x08, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x04, + // Entry 80 - BF + 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x04, 0x00, + 0x00, 0x00, 0x04, 0x01, 0x00, 0x04, 0x02, 0x00, + 0x04, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x04, 0x00, + 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x08, 0x08, 0x00, 0x00, 0x00, 0x04, + // Entry C0 - FF + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x01, 0x04, 0x08, 0x04, 0x00, 0x00, 0x00, 0x00, + 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x04, 0x00, 0x04, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x04, 0x00, 0x05, 0x00, 0x00, + 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 100 - 13F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0x04, 0x04, 0x00, 0x00, 0x00, + 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x08, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x05, 0x04, + 0x00, 0x00, 0x04, 0x00, 0x04, 0x04, 0x05, 0x00, + // Entry 140 - 17F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +} // Size: 383 bytes + +var paradigmLocales = [][3]uint16{ // 3 elements + 0: [3]uint16{0x139, 0x0, 0x7c}, + 1: [3]uint16{0x13e, 0x0, 0x1f}, + 2: [3]uint16{0x3c0, 0x41, 0xef}, +} // Size: 42 bytes + +type mutualIntelligibility struct { + want uint16 + have uint16 + distance uint8 + oneway bool +} +type scriptIntelligibility struct { + wantLang uint16 + haveLang uint16 + wantScript uint8 + haveScript uint8 + distance uint8 +} +type regionIntelligibility struct { + lang uint16 + script uint8 + group uint8 + distance uint8 +} + +// matchLang holds pairs of langIDs of base languages that are typically +// mutually intelligible. Each pair is associated with a confidence and +// whether the intelligibility goes one or both ways. +var matchLang = []mutualIntelligibility{ // 113 elements + 0: {want: 0x1d1, have: 0xb7, distance: 0x4, oneway: false}, + 1: {want: 0x407, have: 0xb7, distance: 0x4, oneway: false}, + 2: {want: 0x407, have: 0x1d1, distance: 0x4, oneway: false}, + 3: {want: 0x407, have: 0x432, distance: 0x4, oneway: false}, + 4: {want: 0x43a, have: 0x1, distance: 0x4, oneway: false}, + 5: {want: 0x1a3, have: 0x10d, distance: 0x4, oneway: true}, + 6: {want: 0x295, have: 0x10d, distance: 0x4, oneway: true}, + 7: {want: 0x101, have: 0x36f, distance: 0x8, oneway: false}, + 8: {want: 0x101, have: 0x347, distance: 0x8, oneway: false}, + 9: {want: 0x5, have: 0x3e2, distance: 0xa, oneway: true}, + 10: {want: 0xd, have: 0x139, distance: 0xa, oneway: true}, + 11: {want: 0x16, have: 0x367, distance: 0xa, oneway: true}, + 12: {want: 0x21, have: 0x139, distance: 0xa, oneway: true}, + 13: {want: 0x56, have: 0x13e, distance: 0xa, oneway: true}, + 14: {want: 0x58, have: 0x3e2, distance: 0xa, oneway: true}, + 15: {want: 0x71, have: 0x3e2, distance: 0xa, oneway: true}, + 16: {want: 0x75, have: 0x139, distance: 0xa, oneway: true}, + 17: {want: 0x82, have: 0x1be, distance: 0xa, oneway: true}, + 18: {want: 0xa5, have: 0x139, distance: 0xa, oneway: true}, + 19: {want: 0xb2, have: 0x15e, distance: 0xa, oneway: true}, + 20: {want: 0xdd, have: 0x153, distance: 0xa, oneway: true}, + 21: {want: 0xe5, have: 0x139, distance: 0xa, oneway: true}, + 22: {want: 0xe9, have: 0x3a, distance: 0xa, oneway: true}, + 23: {want: 0xf0, have: 0x15e, distance: 0xa, oneway: true}, + 24: {want: 0xf9, have: 0x15e, distance: 0xa, oneway: true}, + 25: {want: 0x100, have: 0x139, distance: 0xa, oneway: true}, + 26: {want: 0x130, have: 0x139, distance: 0xa, oneway: true}, + 27: {want: 0x13c, have: 0x139, distance: 0xa, oneway: true}, + 28: {want: 0x140, have: 0x151, distance: 0xa, oneway: true}, + 29: {want: 0x145, have: 0x13e, distance: 0xa, oneway: true}, + 30: {want: 0x158, have: 0x101, distance: 0xa, oneway: true}, + 31: {want: 0x16d, have: 0x367, distance: 0xa, oneway: true}, + 32: {want: 0x16e, have: 0x139, distance: 0xa, oneway: true}, + 33: {want: 0x16f, have: 0x139, distance: 0xa, oneway: true}, + 34: {want: 0x17e, have: 0x139, distance: 0xa, oneway: true}, + 35: {want: 0x190, have: 0x13e, distance: 0xa, oneway: true}, + 36: {want: 0x194, have: 0x13e, distance: 0xa, oneway: true}, + 37: {want: 0x1a4, have: 0x1be, distance: 0xa, oneway: true}, + 38: {want: 0x1b4, have: 0x139, distance: 0xa, oneway: true}, + 39: {want: 0x1b8, have: 0x139, distance: 0xa, oneway: true}, + 40: {want: 0x1d4, have: 0x15e, distance: 0xa, oneway: true}, + 41: {want: 0x1d7, have: 0x3e2, distance: 0xa, oneway: true}, + 42: {want: 0x1d9, have: 0x139, distance: 0xa, oneway: true}, + 43: {want: 0x1e7, have: 0x139, distance: 0xa, oneway: true}, + 44: {want: 0x1f8, have: 0x139, distance: 0xa, oneway: true}, + 45: {want: 0x20e, have: 0x1e1, distance: 0xa, oneway: true}, + 46: {want: 0x210, have: 0x139, distance: 0xa, oneway: true}, + 47: {want: 0x22d, have: 0x15e, distance: 0xa, oneway: true}, + 48: {want: 0x242, have: 0x3e2, distance: 0xa, oneway: true}, + 49: {want: 0x24a, have: 0x139, distance: 0xa, oneway: true}, + 50: {want: 0x251, have: 0x139, distance: 0xa, oneway: true}, + 51: {want: 0x265, have: 0x139, distance: 0xa, oneway: true}, + 52: {want: 0x274, have: 0x48a, distance: 0xa, oneway: true}, + 53: {want: 0x28a, have: 0x3e2, distance: 0xa, oneway: true}, + 54: {want: 0x28e, have: 0x1f9, distance: 0xa, oneway: true}, + 55: {want: 0x2a3, have: 0x139, distance: 0xa, oneway: true}, + 56: {want: 0x2b5, have: 0x15e, distance: 0xa, oneway: true}, + 57: {want: 0x2b8, have: 0x139, distance: 0xa, oneway: true}, + 58: {want: 0x2be, have: 0x139, distance: 0xa, oneway: true}, + 59: {want: 0x2c3, have: 0x15e, distance: 0xa, oneway: true}, + 60: {want: 0x2ed, have: 0x139, distance: 0xa, oneway: true}, + 61: {want: 0x2f1, have: 0x15e, distance: 0xa, oneway: true}, + 62: {want: 0x2fa, have: 0x139, distance: 0xa, oneway: true}, + 63: {want: 0x2ff, have: 0x7e, distance: 0xa, oneway: true}, + 64: {want: 0x304, have: 0x139, distance: 0xa, oneway: true}, + 65: {want: 0x30b, have: 0x3e2, distance: 0xa, oneway: true}, + 66: {want: 0x31b, have: 0x1be, distance: 0xa, oneway: true}, + 67: {want: 0x31f, have: 0x1e1, distance: 0xa, oneway: true}, + 68: {want: 0x320, have: 0x139, distance: 0xa, oneway: true}, + 69: {want: 0x331, have: 0x139, distance: 0xa, oneway: true}, + 70: {want: 0x351, have: 0x139, distance: 0xa, oneway: true}, + 71: {want: 0x36a, have: 0x347, distance: 0xa, oneway: false}, + 72: {want: 0x36a, have: 0x36f, distance: 0xa, oneway: true}, + 73: {want: 0x37a, have: 0x139, distance: 0xa, oneway: true}, + 74: {want: 0x387, have: 0x139, distance: 0xa, oneway: true}, + 75: {want: 0x389, have: 0x139, distance: 0xa, oneway: true}, + 76: {want: 0x38b, have: 0x15e, distance: 0xa, oneway: true}, + 77: {want: 0x390, have: 0x139, distance: 0xa, oneway: true}, + 78: {want: 0x395, have: 0x139, distance: 0xa, oneway: true}, + 79: {want: 0x39d, have: 0x139, distance: 0xa, oneway: true}, + 80: {want: 0x3a5, have: 0x139, distance: 0xa, oneway: true}, + 81: {want: 0x3be, have: 0x139, distance: 0xa, oneway: true}, + 82: {want: 0x3c4, have: 0x13e, distance: 0xa, oneway: true}, + 83: {want: 0x3d4, have: 0x10d, distance: 0xa, oneway: true}, + 84: {want: 0x3d9, have: 0x139, distance: 0xa, oneway: true}, + 85: {want: 0x3e5, have: 0x15e, distance: 0xa, oneway: true}, + 86: {want: 0x3e9, have: 0x1be, distance: 0xa, oneway: true}, + 87: {want: 0x3fa, have: 0x139, distance: 0xa, oneway: true}, + 88: {want: 0x40c, have: 0x139, distance: 0xa, oneway: true}, + 89: {want: 0x423, have: 0x139, distance: 0xa, oneway: true}, + 90: {want: 0x429, have: 0x139, distance: 0xa, oneway: true}, + 91: {want: 0x431, have: 0x139, distance: 0xa, oneway: true}, + 92: {want: 0x43b, have: 0x139, distance: 0xa, oneway: true}, + 93: {want: 0x43e, have: 0x1e1, distance: 0xa, oneway: true}, + 94: {want: 0x445, have: 0x139, distance: 0xa, oneway: true}, + 95: {want: 0x450, have: 0x139, distance: 0xa, oneway: true}, + 96: {want: 0x461, have: 0x139, distance: 0xa, oneway: true}, + 97: {want: 0x467, have: 0x3e2, distance: 0xa, oneway: true}, + 98: {want: 0x46f, have: 0x139, distance: 0xa, oneway: true}, + 99: {want: 0x476, have: 0x3e2, distance: 0xa, oneway: true}, + 100: {want: 0x3883, have: 0x139, distance: 0xa, oneway: true}, + 101: {want: 0x480, have: 0x139, distance: 0xa, oneway: true}, + 102: {want: 0x482, have: 0x139, distance: 0xa, oneway: true}, + 103: {want: 0x494, have: 0x3e2, distance: 0xa, oneway: true}, + 104: {want: 0x49d, have: 0x139, distance: 0xa, oneway: true}, + 105: {want: 0x4ac, have: 0x529, distance: 0xa, oneway: true}, + 106: {want: 0x4b4, have: 0x139, distance: 0xa, oneway: true}, + 107: {want: 0x4bc, have: 0x3e2, distance: 0xa, oneway: true}, + 108: {want: 0x4e5, have: 0x15e, distance: 0xa, oneway: true}, + 109: {want: 0x4f2, have: 0x139, distance: 0xa, oneway: true}, + 110: {want: 0x512, have: 0x139, distance: 0xa, oneway: true}, + 111: {want: 0x518, have: 0x139, distance: 0xa, oneway: true}, + 112: {want: 0x52f, have: 0x139, distance: 0xa, oneway: true}, +} // Size: 702 bytes + +// matchScript holds pairs of scriptIDs where readers of one script +// can typically also read the other. Each is associated with a confidence. +var matchScript = []scriptIntelligibility{ // 26 elements + 0: {wantLang: 0x432, haveLang: 0x432, wantScript: 0x5b, haveScript: 0x20, distance: 0x5}, + 1: {wantLang: 0x432, haveLang: 0x432, wantScript: 0x20, haveScript: 0x5b, distance: 0x5}, + 2: {wantLang: 0x58, haveLang: 0x3e2, wantScript: 0x5b, haveScript: 0x20, distance: 0xa}, + 3: {wantLang: 0xa5, haveLang: 0x139, wantScript: 0xe, haveScript: 0x5b, distance: 0xa}, + 4: {wantLang: 0x1d7, haveLang: 0x3e2, wantScript: 0x8, haveScript: 0x20, distance: 0xa}, + 5: {wantLang: 0x210, haveLang: 0x139, wantScript: 0x2e, haveScript: 0x5b, distance: 0xa}, + 6: {wantLang: 0x24a, haveLang: 0x139, wantScript: 0x4f, haveScript: 0x5b, distance: 0xa}, + 7: {wantLang: 0x251, haveLang: 0x139, wantScript: 0x53, haveScript: 0x5b, distance: 0xa}, + 8: {wantLang: 0x2b8, haveLang: 0x139, wantScript: 0x58, haveScript: 0x5b, distance: 0xa}, + 9: {wantLang: 0x304, haveLang: 0x139, wantScript: 0x6f, haveScript: 0x5b, distance: 0xa}, + 10: {wantLang: 0x331, haveLang: 0x139, wantScript: 0x76, haveScript: 0x5b, distance: 0xa}, + 11: {wantLang: 0x351, haveLang: 0x139, wantScript: 0x22, haveScript: 0x5b, distance: 0xa}, + 12: {wantLang: 0x395, haveLang: 0x139, wantScript: 0x83, haveScript: 0x5b, distance: 0xa}, + 13: {wantLang: 0x39d, haveLang: 0x139, wantScript: 0x36, haveScript: 0x5b, distance: 0xa}, + 14: {wantLang: 0x3be, haveLang: 0x139, wantScript: 0x5, haveScript: 0x5b, distance: 0xa}, + 15: {wantLang: 0x3fa, haveLang: 0x139, wantScript: 0x5, haveScript: 0x5b, distance: 0xa}, + 16: {wantLang: 0x40c, haveLang: 0x139, wantScript: 0xd6, haveScript: 0x5b, distance: 0xa}, + 17: {wantLang: 0x450, haveLang: 0x139, wantScript: 0xe6, haveScript: 0x5b, distance: 0xa}, + 18: {wantLang: 0x461, haveLang: 0x139, wantScript: 0xe9, haveScript: 0x5b, distance: 0xa}, + 19: {wantLang: 0x46f, haveLang: 0x139, wantScript: 0x2c, haveScript: 0x5b, distance: 0xa}, + 20: {wantLang: 0x476, haveLang: 0x3e2, wantScript: 0x5b, haveScript: 0x20, distance: 0xa}, + 21: {wantLang: 0x4b4, haveLang: 0x139, wantScript: 0x5, haveScript: 0x5b, distance: 0xa}, + 22: {wantLang: 0x4bc, haveLang: 0x3e2, wantScript: 0x5b, haveScript: 0x20, distance: 0xa}, + 23: {wantLang: 0x512, haveLang: 0x139, wantScript: 0x3e, haveScript: 0x5b, distance: 0xa}, + 24: {wantLang: 0x529, haveLang: 0x529, wantScript: 0x3b, haveScript: 0x3c, distance: 0xf}, + 25: {wantLang: 0x529, haveLang: 0x529, wantScript: 0x3c, haveScript: 0x3b, distance: 0x13}, +} // Size: 232 bytes + +var matchRegion = []regionIntelligibility{ // 15 elements + 0: {lang: 0x3a, script: 0x0, group: 0x4, distance: 0x4}, + 1: {lang: 0x3a, script: 0x0, group: 0x84, distance: 0x4}, + 2: {lang: 0x139, script: 0x0, group: 0x1, distance: 0x4}, + 3: {lang: 0x139, script: 0x0, group: 0x81, distance: 0x4}, + 4: {lang: 0x13e, script: 0x0, group: 0x3, distance: 0x4}, + 5: {lang: 0x13e, script: 0x0, group: 0x83, distance: 0x4}, + 6: {lang: 0x3c0, script: 0x0, group: 0x3, distance: 0x4}, + 7: {lang: 0x3c0, script: 0x0, group: 0x83, distance: 0x4}, + 8: {lang: 0x529, script: 0x3c, group: 0x2, distance: 0x4}, + 9: {lang: 0x529, script: 0x3c, group: 0x82, distance: 0x4}, + 10: {lang: 0x3a, script: 0x0, group: 0x80, distance: 0x5}, + 11: {lang: 0x139, script: 0x0, group: 0x80, distance: 0x5}, + 12: {lang: 0x13e, script: 0x0, group: 0x80, distance: 0x5}, + 13: {lang: 0x3c0, script: 0x0, group: 0x80, distance: 0x5}, + 14: {lang: 0x529, script: 0x3c, group: 0x80, distance: 0x5}, +} // Size: 114 bytes + +// Total table size 1473 bytes (1KiB); checksum: 7BB90B5C diff --git a/vendor/golang.org/x/text/language/tags.go b/vendor/golang.org/x/text/language/tags.go new file mode 100644 index 00000000000..42ea7926660 --- /dev/null +++ b/vendor/golang.org/x/text/language/tags.go @@ -0,0 +1,145 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package language + +import "golang.org/x/text/internal/language/compact" + +// TODO: Various sets of commonly use tags and regions. + +// MustParse is like Parse, but panics if the given BCP 47 tag cannot be parsed. +// It simplifies safe initialization of Tag values. +func MustParse(s string) Tag { + t, err := Parse(s) + if err != nil { + panic(err) + } + return t +} + +// MustParse is like Parse, but panics if the given BCP 47 tag cannot be parsed. +// It simplifies safe initialization of Tag values. +func (c CanonType) MustParse(s string) Tag { + t, err := c.Parse(s) + if err != nil { + panic(err) + } + return t +} + +// MustParseBase is like ParseBase, but panics if the given base cannot be parsed. +// It simplifies safe initialization of Base values. +func MustParseBase(s string) Base { + b, err := ParseBase(s) + if err != nil { + panic(err) + } + return b +} + +// MustParseScript is like ParseScript, but panics if the given script cannot be +// parsed. It simplifies safe initialization of Script values. +func MustParseScript(s string) Script { + scr, err := ParseScript(s) + if err != nil { + panic(err) + } + return scr +} + +// MustParseRegion is like ParseRegion, but panics if the given region cannot be +// parsed. It simplifies safe initialization of Region values. +func MustParseRegion(s string) Region { + r, err := ParseRegion(s) + if err != nil { + panic(err) + } + return r +} + +var ( + und = Tag{} + + Und Tag = Tag{} + + Afrikaans Tag = Tag(compact.Afrikaans) + Amharic Tag = Tag(compact.Amharic) + Arabic Tag = Tag(compact.Arabic) + ModernStandardArabic Tag = Tag(compact.ModernStandardArabic) + Azerbaijani Tag = Tag(compact.Azerbaijani) + Bulgarian Tag = Tag(compact.Bulgarian) + Bengali Tag = Tag(compact.Bengali) + Catalan Tag = Tag(compact.Catalan) + Czech Tag = Tag(compact.Czech) + Danish Tag = Tag(compact.Danish) + German Tag = Tag(compact.German) + Greek Tag = Tag(compact.Greek) + English Tag = Tag(compact.English) + AmericanEnglish Tag = Tag(compact.AmericanEnglish) + BritishEnglish Tag = Tag(compact.BritishEnglish) + Spanish Tag = Tag(compact.Spanish) + EuropeanSpanish Tag = Tag(compact.EuropeanSpanish) + LatinAmericanSpanish Tag = Tag(compact.LatinAmericanSpanish) + Estonian Tag = Tag(compact.Estonian) + Persian Tag = Tag(compact.Persian) + Finnish Tag = Tag(compact.Finnish) + Filipino Tag = Tag(compact.Filipino) + French Tag = Tag(compact.French) + CanadianFrench Tag = Tag(compact.CanadianFrench) + Gujarati Tag = Tag(compact.Gujarati) + Hebrew Tag = Tag(compact.Hebrew) + Hindi Tag = Tag(compact.Hindi) + Croatian Tag = Tag(compact.Croatian) + Hungarian Tag = Tag(compact.Hungarian) + Armenian Tag = Tag(compact.Armenian) + Indonesian Tag = Tag(compact.Indonesian) + Icelandic Tag = Tag(compact.Icelandic) + Italian Tag = Tag(compact.Italian) + Japanese Tag = Tag(compact.Japanese) + Georgian Tag = Tag(compact.Georgian) + Kazakh Tag = Tag(compact.Kazakh) + Khmer Tag = Tag(compact.Khmer) + Kannada Tag = Tag(compact.Kannada) + Korean Tag = Tag(compact.Korean) + Kirghiz Tag = Tag(compact.Kirghiz) + Lao Tag = Tag(compact.Lao) + Lithuanian Tag = Tag(compact.Lithuanian) + Latvian Tag = Tag(compact.Latvian) + Macedonian Tag = Tag(compact.Macedonian) + Malayalam Tag = Tag(compact.Malayalam) + Mongolian Tag = Tag(compact.Mongolian) + Marathi Tag = Tag(compact.Marathi) + Malay Tag = Tag(compact.Malay) + Burmese Tag = Tag(compact.Burmese) + Nepali Tag = Tag(compact.Nepali) + Dutch Tag = Tag(compact.Dutch) + Norwegian Tag = Tag(compact.Norwegian) + Punjabi Tag = Tag(compact.Punjabi) + Polish Tag = Tag(compact.Polish) + Portuguese Tag = Tag(compact.Portuguese) + BrazilianPortuguese Tag = Tag(compact.BrazilianPortuguese) + EuropeanPortuguese Tag = Tag(compact.EuropeanPortuguese) + Romanian Tag = Tag(compact.Romanian) + Russian Tag = Tag(compact.Russian) + Sinhala Tag = Tag(compact.Sinhala) + Slovak Tag = Tag(compact.Slovak) + Slovenian Tag = Tag(compact.Slovenian) + Albanian Tag = Tag(compact.Albanian) + Serbian Tag = Tag(compact.Serbian) + SerbianLatin Tag = Tag(compact.SerbianLatin) + Swedish Tag = Tag(compact.Swedish) + Swahili Tag = Tag(compact.Swahili) + Tamil Tag = Tag(compact.Tamil) + Telugu Tag = Tag(compact.Telugu) + Thai Tag = Tag(compact.Thai) + Turkish Tag = Tag(compact.Turkish) + Ukrainian Tag = Tag(compact.Ukrainian) + Urdu Tag = Tag(compact.Urdu) + Uzbek Tag = Tag(compact.Uzbek) + Vietnamese Tag = Tag(compact.Vietnamese) + Chinese Tag = Tag(compact.Chinese) + SimplifiedChinese Tag = Tag(compact.SimplifiedChinese) + TraditionalChinese Tag = Tag(compact.TraditionalChinese) + Zulu Tag = Tag(compact.Zulu) +) diff --git a/vendor/golang.org/x/text/width/kind_string.go b/vendor/golang.org/x/text/width/kind_string.go new file mode 100644 index 00000000000..dd3febd43b2 --- /dev/null +++ b/vendor/golang.org/x/text/width/kind_string.go @@ -0,0 +1,28 @@ +// Code generated by "stringer -type=Kind"; DO NOT EDIT. + +package width + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[Neutral-0] + _ = x[EastAsianAmbiguous-1] + _ = x[EastAsianWide-2] + _ = x[EastAsianNarrow-3] + _ = x[EastAsianFullwidth-4] + _ = x[EastAsianHalfwidth-5] +} + +const _Kind_name = "NeutralEastAsianAmbiguousEastAsianWideEastAsianNarrowEastAsianFullwidthEastAsianHalfwidth" + +var _Kind_index = [...]uint8{0, 7, 25, 38, 53, 71, 89} + +func (i Kind) String() string { + if i < 0 || i >= Kind(len(_Kind_index)-1) { + return "Kind(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Kind_name[_Kind_index[i]:_Kind_index[i+1]] +} diff --git a/vendor/golang.org/x/text/width/tables10.0.0.go b/vendor/golang.org/x/text/width/tables10.0.0.go new file mode 100644 index 00000000000..07c1cb17af7 --- /dev/null +++ b/vendor/golang.org/x/text/width/tables10.0.0.go @@ -0,0 +1,1328 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +//go:build go1.10 && !go1.13 + +package width + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "10.0.0" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *widthTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return widthValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = widthIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *widthTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return widthValues[c0] + } + i := widthIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = widthIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = widthIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *widthTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return widthValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = widthIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *widthTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return widthValues[c0] + } + i := widthIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = widthIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = widthIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// widthTrie. Total size: 14336 bytes (14.00 KiB). Checksum: c59df54630d3dc4a. +type widthTrie struct{} + +func newWidthTrie(i int) *widthTrie { + return &widthTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *widthTrie) lookupValue(n uint32, b byte) uint16 { + switch { + default: + return uint16(widthValues[n<<6+uint32(b)]) + } +} + +// widthValues: 101 blocks, 6464 entries, 12928 bytes +// The third block is the zero block. +var widthValues = [6464]uint16{ + // Block 0x0, offset 0x0 + 0x20: 0x6001, 0x21: 0x6002, 0x22: 0x6002, 0x23: 0x6002, + 0x24: 0x6002, 0x25: 0x6002, 0x26: 0x6002, 0x27: 0x6002, 0x28: 0x6002, 0x29: 0x6002, + 0x2a: 0x6002, 0x2b: 0x6002, 0x2c: 0x6002, 0x2d: 0x6002, 0x2e: 0x6002, 0x2f: 0x6002, + 0x30: 0x6002, 0x31: 0x6002, 0x32: 0x6002, 0x33: 0x6002, 0x34: 0x6002, 0x35: 0x6002, + 0x36: 0x6002, 0x37: 0x6002, 0x38: 0x6002, 0x39: 0x6002, 0x3a: 0x6002, 0x3b: 0x6002, + 0x3c: 0x6002, 0x3d: 0x6002, 0x3e: 0x6002, 0x3f: 0x6002, + // Block 0x1, offset 0x40 + 0x40: 0x6003, 0x41: 0x6003, 0x42: 0x6003, 0x43: 0x6003, 0x44: 0x6003, 0x45: 0x6003, + 0x46: 0x6003, 0x47: 0x6003, 0x48: 0x6003, 0x49: 0x6003, 0x4a: 0x6003, 0x4b: 0x6003, + 0x4c: 0x6003, 0x4d: 0x6003, 0x4e: 0x6003, 0x4f: 0x6003, 0x50: 0x6003, 0x51: 0x6003, + 0x52: 0x6003, 0x53: 0x6003, 0x54: 0x6003, 0x55: 0x6003, 0x56: 0x6003, 0x57: 0x6003, + 0x58: 0x6003, 0x59: 0x6003, 0x5a: 0x6003, 0x5b: 0x6003, 0x5c: 0x6003, 0x5d: 0x6003, + 0x5e: 0x6003, 0x5f: 0x6003, 0x60: 0x6004, 0x61: 0x6004, 0x62: 0x6004, 0x63: 0x6004, + 0x64: 0x6004, 0x65: 0x6004, 0x66: 0x6004, 0x67: 0x6004, 0x68: 0x6004, 0x69: 0x6004, + 0x6a: 0x6004, 0x6b: 0x6004, 0x6c: 0x6004, 0x6d: 0x6004, 0x6e: 0x6004, 0x6f: 0x6004, + 0x70: 0x6004, 0x71: 0x6004, 0x72: 0x6004, 0x73: 0x6004, 0x74: 0x6004, 0x75: 0x6004, + 0x76: 0x6004, 0x77: 0x6004, 0x78: 0x6004, 0x79: 0x6004, 0x7a: 0x6004, 0x7b: 0x6004, + 0x7c: 0x6004, 0x7d: 0x6004, 0x7e: 0x6004, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xe1: 0x2000, 0xe2: 0x6005, 0xe3: 0x6005, + 0xe4: 0x2000, 0xe5: 0x6006, 0xe6: 0x6005, 0xe7: 0x2000, 0xe8: 0x2000, + 0xea: 0x2000, 0xec: 0x6007, 0xed: 0x2000, 0xee: 0x2000, 0xef: 0x6008, + 0xf0: 0x2000, 0xf1: 0x2000, 0xf2: 0x2000, 0xf3: 0x2000, 0xf4: 0x2000, + 0xf6: 0x2000, 0xf7: 0x2000, 0xf8: 0x2000, 0xf9: 0x2000, 0xfa: 0x2000, + 0xfc: 0x2000, 0xfd: 0x2000, 0xfe: 0x2000, 0xff: 0x2000, + // Block 0x4, offset 0x100 + 0x106: 0x2000, + 0x110: 0x2000, + 0x117: 0x2000, + 0x118: 0x2000, + 0x11e: 0x2000, 0x11f: 0x2000, 0x120: 0x2000, 0x121: 0x2000, + 0x126: 0x2000, 0x128: 0x2000, 0x129: 0x2000, + 0x12a: 0x2000, 0x12c: 0x2000, 0x12d: 0x2000, + 0x130: 0x2000, 0x132: 0x2000, 0x133: 0x2000, + 0x137: 0x2000, 0x138: 0x2000, 0x139: 0x2000, 0x13a: 0x2000, + 0x13c: 0x2000, 0x13e: 0x2000, + // Block 0x5, offset 0x140 + 0x141: 0x2000, + 0x151: 0x2000, + 0x153: 0x2000, + 0x15b: 0x2000, + 0x166: 0x2000, 0x167: 0x2000, + 0x16b: 0x2000, + 0x171: 0x2000, 0x172: 0x2000, 0x173: 0x2000, + 0x178: 0x2000, + 0x17f: 0x2000, + // Block 0x6, offset 0x180 + 0x180: 0x2000, 0x181: 0x2000, 0x182: 0x2000, 0x184: 0x2000, + 0x188: 0x2000, 0x189: 0x2000, 0x18a: 0x2000, 0x18b: 0x2000, + 0x18d: 0x2000, + 0x192: 0x2000, 0x193: 0x2000, + 0x1a6: 0x2000, 0x1a7: 0x2000, + 0x1ab: 0x2000, + // Block 0x7, offset 0x1c0 + 0x1ce: 0x2000, 0x1d0: 0x2000, + 0x1d2: 0x2000, 0x1d4: 0x2000, 0x1d6: 0x2000, + 0x1d8: 0x2000, 0x1da: 0x2000, 0x1dc: 0x2000, + // Block 0x8, offset 0x200 + 0x211: 0x2000, + 0x221: 0x2000, + // Block 0x9, offset 0x240 + 0x244: 0x2000, + 0x247: 0x2000, 0x249: 0x2000, 0x24a: 0x2000, 0x24b: 0x2000, + 0x24d: 0x2000, 0x250: 0x2000, + 0x258: 0x2000, 0x259: 0x2000, 0x25a: 0x2000, 0x25b: 0x2000, 0x25d: 0x2000, + 0x25f: 0x2000, + // Block 0xa, offset 0x280 + 0x280: 0x2000, 0x281: 0x2000, 0x282: 0x2000, 0x283: 0x2000, 0x284: 0x2000, 0x285: 0x2000, + 0x286: 0x2000, 0x287: 0x2000, 0x288: 0x2000, 0x289: 0x2000, 0x28a: 0x2000, 0x28b: 0x2000, + 0x28c: 0x2000, 0x28d: 0x2000, 0x28e: 0x2000, 0x28f: 0x2000, 0x290: 0x2000, 0x291: 0x2000, + 0x292: 0x2000, 0x293: 0x2000, 0x294: 0x2000, 0x295: 0x2000, 0x296: 0x2000, 0x297: 0x2000, + 0x298: 0x2000, 0x299: 0x2000, 0x29a: 0x2000, 0x29b: 0x2000, 0x29c: 0x2000, 0x29d: 0x2000, + 0x29e: 0x2000, 0x29f: 0x2000, 0x2a0: 0x2000, 0x2a1: 0x2000, 0x2a2: 0x2000, 0x2a3: 0x2000, + 0x2a4: 0x2000, 0x2a5: 0x2000, 0x2a6: 0x2000, 0x2a7: 0x2000, 0x2a8: 0x2000, 0x2a9: 0x2000, + 0x2aa: 0x2000, 0x2ab: 0x2000, 0x2ac: 0x2000, 0x2ad: 0x2000, 0x2ae: 0x2000, 0x2af: 0x2000, + 0x2b0: 0x2000, 0x2b1: 0x2000, 0x2b2: 0x2000, 0x2b3: 0x2000, 0x2b4: 0x2000, 0x2b5: 0x2000, + 0x2b6: 0x2000, 0x2b7: 0x2000, 0x2b8: 0x2000, 0x2b9: 0x2000, 0x2ba: 0x2000, 0x2bb: 0x2000, + 0x2bc: 0x2000, 0x2bd: 0x2000, 0x2be: 0x2000, 0x2bf: 0x2000, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x2000, 0x2c1: 0x2000, 0x2c2: 0x2000, 0x2c3: 0x2000, 0x2c4: 0x2000, 0x2c5: 0x2000, + 0x2c6: 0x2000, 0x2c7: 0x2000, 0x2c8: 0x2000, 0x2c9: 0x2000, 0x2ca: 0x2000, 0x2cb: 0x2000, + 0x2cc: 0x2000, 0x2cd: 0x2000, 0x2ce: 0x2000, 0x2cf: 0x2000, 0x2d0: 0x2000, 0x2d1: 0x2000, + 0x2d2: 0x2000, 0x2d3: 0x2000, 0x2d4: 0x2000, 0x2d5: 0x2000, 0x2d6: 0x2000, 0x2d7: 0x2000, + 0x2d8: 0x2000, 0x2d9: 0x2000, 0x2da: 0x2000, 0x2db: 0x2000, 0x2dc: 0x2000, 0x2dd: 0x2000, + 0x2de: 0x2000, 0x2df: 0x2000, 0x2e0: 0x2000, 0x2e1: 0x2000, 0x2e2: 0x2000, 0x2e3: 0x2000, + 0x2e4: 0x2000, 0x2e5: 0x2000, 0x2e6: 0x2000, 0x2e7: 0x2000, 0x2e8: 0x2000, 0x2e9: 0x2000, + 0x2ea: 0x2000, 0x2eb: 0x2000, 0x2ec: 0x2000, 0x2ed: 0x2000, 0x2ee: 0x2000, 0x2ef: 0x2000, + // Block 0xc, offset 0x300 + 0x311: 0x2000, + 0x312: 0x2000, 0x313: 0x2000, 0x314: 0x2000, 0x315: 0x2000, 0x316: 0x2000, 0x317: 0x2000, + 0x318: 0x2000, 0x319: 0x2000, 0x31a: 0x2000, 0x31b: 0x2000, 0x31c: 0x2000, 0x31d: 0x2000, + 0x31e: 0x2000, 0x31f: 0x2000, 0x320: 0x2000, 0x321: 0x2000, 0x323: 0x2000, + 0x324: 0x2000, 0x325: 0x2000, 0x326: 0x2000, 0x327: 0x2000, 0x328: 0x2000, 0x329: 0x2000, + 0x331: 0x2000, 0x332: 0x2000, 0x333: 0x2000, 0x334: 0x2000, 0x335: 0x2000, + 0x336: 0x2000, 0x337: 0x2000, 0x338: 0x2000, 0x339: 0x2000, 0x33a: 0x2000, 0x33b: 0x2000, + 0x33c: 0x2000, 0x33d: 0x2000, 0x33e: 0x2000, 0x33f: 0x2000, + // Block 0xd, offset 0x340 + 0x340: 0x2000, 0x341: 0x2000, 0x343: 0x2000, 0x344: 0x2000, 0x345: 0x2000, + 0x346: 0x2000, 0x347: 0x2000, 0x348: 0x2000, 0x349: 0x2000, + // Block 0xe, offset 0x380 + 0x381: 0x2000, + 0x390: 0x2000, 0x391: 0x2000, + 0x392: 0x2000, 0x393: 0x2000, 0x394: 0x2000, 0x395: 0x2000, 0x396: 0x2000, 0x397: 0x2000, + 0x398: 0x2000, 0x399: 0x2000, 0x39a: 0x2000, 0x39b: 0x2000, 0x39c: 0x2000, 0x39d: 0x2000, + 0x39e: 0x2000, 0x39f: 0x2000, 0x3a0: 0x2000, 0x3a1: 0x2000, 0x3a2: 0x2000, 0x3a3: 0x2000, + 0x3a4: 0x2000, 0x3a5: 0x2000, 0x3a6: 0x2000, 0x3a7: 0x2000, 0x3a8: 0x2000, 0x3a9: 0x2000, + 0x3aa: 0x2000, 0x3ab: 0x2000, 0x3ac: 0x2000, 0x3ad: 0x2000, 0x3ae: 0x2000, 0x3af: 0x2000, + 0x3b0: 0x2000, 0x3b1: 0x2000, 0x3b2: 0x2000, 0x3b3: 0x2000, 0x3b4: 0x2000, 0x3b5: 0x2000, + 0x3b6: 0x2000, 0x3b7: 0x2000, 0x3b8: 0x2000, 0x3b9: 0x2000, 0x3ba: 0x2000, 0x3bb: 0x2000, + 0x3bc: 0x2000, 0x3bd: 0x2000, 0x3be: 0x2000, 0x3bf: 0x2000, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x2000, 0x3c1: 0x2000, 0x3c2: 0x2000, 0x3c3: 0x2000, 0x3c4: 0x2000, 0x3c5: 0x2000, + 0x3c6: 0x2000, 0x3c7: 0x2000, 0x3c8: 0x2000, 0x3c9: 0x2000, 0x3ca: 0x2000, 0x3cb: 0x2000, + 0x3cc: 0x2000, 0x3cd: 0x2000, 0x3ce: 0x2000, 0x3cf: 0x2000, 0x3d1: 0x2000, + // Block 0x10, offset 0x400 + 0x400: 0x4000, 0x401: 0x4000, 0x402: 0x4000, 0x403: 0x4000, 0x404: 0x4000, 0x405: 0x4000, + 0x406: 0x4000, 0x407: 0x4000, 0x408: 0x4000, 0x409: 0x4000, 0x40a: 0x4000, 0x40b: 0x4000, + 0x40c: 0x4000, 0x40d: 0x4000, 0x40e: 0x4000, 0x40f: 0x4000, 0x410: 0x4000, 0x411: 0x4000, + 0x412: 0x4000, 0x413: 0x4000, 0x414: 0x4000, 0x415: 0x4000, 0x416: 0x4000, 0x417: 0x4000, + 0x418: 0x4000, 0x419: 0x4000, 0x41a: 0x4000, 0x41b: 0x4000, 0x41c: 0x4000, 0x41d: 0x4000, + 0x41e: 0x4000, 0x41f: 0x4000, 0x420: 0x4000, 0x421: 0x4000, 0x422: 0x4000, 0x423: 0x4000, + 0x424: 0x4000, 0x425: 0x4000, 0x426: 0x4000, 0x427: 0x4000, 0x428: 0x4000, 0x429: 0x4000, + 0x42a: 0x4000, 0x42b: 0x4000, 0x42c: 0x4000, 0x42d: 0x4000, 0x42e: 0x4000, 0x42f: 0x4000, + 0x430: 0x4000, 0x431: 0x4000, 0x432: 0x4000, 0x433: 0x4000, 0x434: 0x4000, 0x435: 0x4000, + 0x436: 0x4000, 0x437: 0x4000, 0x438: 0x4000, 0x439: 0x4000, 0x43a: 0x4000, 0x43b: 0x4000, + 0x43c: 0x4000, 0x43d: 0x4000, 0x43e: 0x4000, 0x43f: 0x4000, + // Block 0x11, offset 0x440 + 0x440: 0x4000, 0x441: 0x4000, 0x442: 0x4000, 0x443: 0x4000, 0x444: 0x4000, 0x445: 0x4000, + 0x446: 0x4000, 0x447: 0x4000, 0x448: 0x4000, 0x449: 0x4000, 0x44a: 0x4000, 0x44b: 0x4000, + 0x44c: 0x4000, 0x44d: 0x4000, 0x44e: 0x4000, 0x44f: 0x4000, 0x450: 0x4000, 0x451: 0x4000, + 0x452: 0x4000, 0x453: 0x4000, 0x454: 0x4000, 0x455: 0x4000, 0x456: 0x4000, 0x457: 0x4000, + 0x458: 0x4000, 0x459: 0x4000, 0x45a: 0x4000, 0x45b: 0x4000, 0x45c: 0x4000, 0x45d: 0x4000, + 0x45e: 0x4000, 0x45f: 0x4000, + // Block 0x12, offset 0x480 + 0x490: 0x2000, + 0x493: 0x2000, 0x494: 0x2000, 0x495: 0x2000, 0x496: 0x2000, + 0x498: 0x2000, 0x499: 0x2000, 0x49c: 0x2000, 0x49d: 0x2000, + 0x4a0: 0x2000, 0x4a1: 0x2000, 0x4a2: 0x2000, + 0x4a4: 0x2000, 0x4a5: 0x2000, 0x4a6: 0x2000, 0x4a7: 0x2000, + 0x4b0: 0x2000, 0x4b2: 0x2000, 0x4b3: 0x2000, 0x4b5: 0x2000, + 0x4bb: 0x2000, + 0x4be: 0x2000, + // Block 0x13, offset 0x4c0 + 0x4f4: 0x2000, + 0x4ff: 0x2000, + // Block 0x14, offset 0x500 + 0x501: 0x2000, 0x502: 0x2000, 0x503: 0x2000, 0x504: 0x2000, + 0x529: 0xa009, + 0x52c: 0x2000, + // Block 0x15, offset 0x540 + 0x543: 0x2000, 0x545: 0x2000, + 0x549: 0x2000, + 0x553: 0x2000, 0x556: 0x2000, + 0x561: 0x2000, 0x562: 0x2000, + 0x566: 0x2000, + 0x56b: 0x2000, + // Block 0x16, offset 0x580 + 0x593: 0x2000, 0x594: 0x2000, + 0x59b: 0x2000, 0x59c: 0x2000, 0x59d: 0x2000, + 0x59e: 0x2000, 0x5a0: 0x2000, 0x5a1: 0x2000, 0x5a2: 0x2000, 0x5a3: 0x2000, + 0x5a4: 0x2000, 0x5a5: 0x2000, 0x5a6: 0x2000, 0x5a7: 0x2000, 0x5a8: 0x2000, 0x5a9: 0x2000, + 0x5aa: 0x2000, 0x5ab: 0x2000, + 0x5b0: 0x2000, 0x5b1: 0x2000, 0x5b2: 0x2000, 0x5b3: 0x2000, 0x5b4: 0x2000, 0x5b5: 0x2000, + 0x5b6: 0x2000, 0x5b7: 0x2000, 0x5b8: 0x2000, 0x5b9: 0x2000, + // Block 0x17, offset 0x5c0 + 0x5c9: 0x2000, + 0x5d0: 0x200a, 0x5d1: 0x200b, + 0x5d2: 0x200a, 0x5d3: 0x200c, 0x5d4: 0x2000, 0x5d5: 0x2000, 0x5d6: 0x2000, 0x5d7: 0x2000, + 0x5d8: 0x2000, 0x5d9: 0x2000, + 0x5f8: 0x2000, 0x5f9: 0x2000, + // Block 0x18, offset 0x600 + 0x612: 0x2000, 0x614: 0x2000, + 0x627: 0x2000, + // Block 0x19, offset 0x640 + 0x640: 0x2000, 0x642: 0x2000, 0x643: 0x2000, + 0x647: 0x2000, 0x648: 0x2000, 0x64b: 0x2000, + 0x64f: 0x2000, 0x651: 0x2000, + 0x655: 0x2000, + 0x65a: 0x2000, 0x65d: 0x2000, + 0x65e: 0x2000, 0x65f: 0x2000, 0x660: 0x2000, 0x663: 0x2000, + 0x665: 0x2000, 0x667: 0x2000, 0x668: 0x2000, 0x669: 0x2000, + 0x66a: 0x2000, 0x66b: 0x2000, 0x66c: 0x2000, 0x66e: 0x2000, + 0x674: 0x2000, 0x675: 0x2000, + 0x676: 0x2000, 0x677: 0x2000, + 0x67c: 0x2000, 0x67d: 0x2000, + // Block 0x1a, offset 0x680 + 0x688: 0x2000, + 0x68c: 0x2000, + 0x692: 0x2000, + 0x6a0: 0x2000, 0x6a1: 0x2000, + 0x6a4: 0x2000, 0x6a5: 0x2000, 0x6a6: 0x2000, 0x6a7: 0x2000, + 0x6aa: 0x2000, 0x6ab: 0x2000, 0x6ae: 0x2000, 0x6af: 0x2000, + // Block 0x1b, offset 0x6c0 + 0x6c2: 0x2000, 0x6c3: 0x2000, + 0x6c6: 0x2000, 0x6c7: 0x2000, + 0x6d5: 0x2000, + 0x6d9: 0x2000, + 0x6e5: 0x2000, + 0x6ff: 0x2000, + // Block 0x1c, offset 0x700 + 0x712: 0x2000, + 0x71a: 0x4000, 0x71b: 0x4000, + 0x729: 0x4000, + 0x72a: 0x4000, + // Block 0x1d, offset 0x740 + 0x769: 0x4000, + 0x76a: 0x4000, 0x76b: 0x4000, 0x76c: 0x4000, + 0x770: 0x4000, 0x773: 0x4000, + // Block 0x1e, offset 0x780 + 0x7a0: 0x2000, 0x7a1: 0x2000, 0x7a2: 0x2000, 0x7a3: 0x2000, + 0x7a4: 0x2000, 0x7a5: 0x2000, 0x7a6: 0x2000, 0x7a7: 0x2000, 0x7a8: 0x2000, 0x7a9: 0x2000, + 0x7aa: 0x2000, 0x7ab: 0x2000, 0x7ac: 0x2000, 0x7ad: 0x2000, 0x7ae: 0x2000, 0x7af: 0x2000, + 0x7b0: 0x2000, 0x7b1: 0x2000, 0x7b2: 0x2000, 0x7b3: 0x2000, 0x7b4: 0x2000, 0x7b5: 0x2000, + 0x7b6: 0x2000, 0x7b7: 0x2000, 0x7b8: 0x2000, 0x7b9: 0x2000, 0x7ba: 0x2000, 0x7bb: 0x2000, + 0x7bc: 0x2000, 0x7bd: 0x2000, 0x7be: 0x2000, 0x7bf: 0x2000, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x2000, 0x7c1: 0x2000, 0x7c2: 0x2000, 0x7c3: 0x2000, 0x7c4: 0x2000, 0x7c5: 0x2000, + 0x7c6: 0x2000, 0x7c7: 0x2000, 0x7c8: 0x2000, 0x7c9: 0x2000, 0x7ca: 0x2000, 0x7cb: 0x2000, + 0x7cc: 0x2000, 0x7cd: 0x2000, 0x7ce: 0x2000, 0x7cf: 0x2000, 0x7d0: 0x2000, 0x7d1: 0x2000, + 0x7d2: 0x2000, 0x7d3: 0x2000, 0x7d4: 0x2000, 0x7d5: 0x2000, 0x7d6: 0x2000, 0x7d7: 0x2000, + 0x7d8: 0x2000, 0x7d9: 0x2000, 0x7da: 0x2000, 0x7db: 0x2000, 0x7dc: 0x2000, 0x7dd: 0x2000, + 0x7de: 0x2000, 0x7df: 0x2000, 0x7e0: 0x2000, 0x7e1: 0x2000, 0x7e2: 0x2000, 0x7e3: 0x2000, + 0x7e4: 0x2000, 0x7e5: 0x2000, 0x7e6: 0x2000, 0x7e7: 0x2000, 0x7e8: 0x2000, 0x7e9: 0x2000, + 0x7eb: 0x2000, 0x7ec: 0x2000, 0x7ed: 0x2000, 0x7ee: 0x2000, 0x7ef: 0x2000, + 0x7f0: 0x2000, 0x7f1: 0x2000, 0x7f2: 0x2000, 0x7f3: 0x2000, 0x7f4: 0x2000, 0x7f5: 0x2000, + 0x7f6: 0x2000, 0x7f7: 0x2000, 0x7f8: 0x2000, 0x7f9: 0x2000, 0x7fa: 0x2000, 0x7fb: 0x2000, + 0x7fc: 0x2000, 0x7fd: 0x2000, 0x7fe: 0x2000, 0x7ff: 0x2000, + // Block 0x20, offset 0x800 + 0x800: 0x2000, 0x801: 0x2000, 0x802: 0x200d, 0x803: 0x2000, 0x804: 0x2000, 0x805: 0x2000, + 0x806: 0x2000, 0x807: 0x2000, 0x808: 0x2000, 0x809: 0x2000, 0x80a: 0x2000, 0x80b: 0x2000, + 0x80c: 0x2000, 0x80d: 0x2000, 0x80e: 0x2000, 0x80f: 0x2000, 0x810: 0x2000, 0x811: 0x2000, + 0x812: 0x2000, 0x813: 0x2000, 0x814: 0x2000, 0x815: 0x2000, 0x816: 0x2000, 0x817: 0x2000, + 0x818: 0x2000, 0x819: 0x2000, 0x81a: 0x2000, 0x81b: 0x2000, 0x81c: 0x2000, 0x81d: 0x2000, + 0x81e: 0x2000, 0x81f: 0x2000, 0x820: 0x2000, 0x821: 0x2000, 0x822: 0x2000, 0x823: 0x2000, + 0x824: 0x2000, 0x825: 0x2000, 0x826: 0x2000, 0x827: 0x2000, 0x828: 0x2000, 0x829: 0x2000, + 0x82a: 0x2000, 0x82b: 0x2000, 0x82c: 0x2000, 0x82d: 0x2000, 0x82e: 0x2000, 0x82f: 0x2000, + 0x830: 0x2000, 0x831: 0x2000, 0x832: 0x2000, 0x833: 0x2000, 0x834: 0x2000, 0x835: 0x2000, + 0x836: 0x2000, 0x837: 0x2000, 0x838: 0x2000, 0x839: 0x2000, 0x83a: 0x2000, 0x83b: 0x2000, + 0x83c: 0x2000, 0x83d: 0x2000, 0x83e: 0x2000, 0x83f: 0x2000, + // Block 0x21, offset 0x840 + 0x840: 0x2000, 0x841: 0x2000, 0x842: 0x2000, 0x843: 0x2000, 0x844: 0x2000, 0x845: 0x2000, + 0x846: 0x2000, 0x847: 0x2000, 0x848: 0x2000, 0x849: 0x2000, 0x84a: 0x2000, 0x84b: 0x2000, + 0x850: 0x2000, 0x851: 0x2000, + 0x852: 0x2000, 0x853: 0x2000, 0x854: 0x2000, 0x855: 0x2000, 0x856: 0x2000, 0x857: 0x2000, + 0x858: 0x2000, 0x859: 0x2000, 0x85a: 0x2000, 0x85b: 0x2000, 0x85c: 0x2000, 0x85d: 0x2000, + 0x85e: 0x2000, 0x85f: 0x2000, 0x860: 0x2000, 0x861: 0x2000, 0x862: 0x2000, 0x863: 0x2000, + 0x864: 0x2000, 0x865: 0x2000, 0x866: 0x2000, 0x867: 0x2000, 0x868: 0x2000, 0x869: 0x2000, + 0x86a: 0x2000, 0x86b: 0x2000, 0x86c: 0x2000, 0x86d: 0x2000, 0x86e: 0x2000, 0x86f: 0x2000, + 0x870: 0x2000, 0x871: 0x2000, 0x872: 0x2000, 0x873: 0x2000, + // Block 0x22, offset 0x880 + 0x880: 0x2000, 0x881: 0x2000, 0x882: 0x2000, 0x883: 0x2000, 0x884: 0x2000, 0x885: 0x2000, + 0x886: 0x2000, 0x887: 0x2000, 0x888: 0x2000, 0x889: 0x2000, 0x88a: 0x2000, 0x88b: 0x2000, + 0x88c: 0x2000, 0x88d: 0x2000, 0x88e: 0x2000, 0x88f: 0x2000, + 0x892: 0x2000, 0x893: 0x2000, 0x894: 0x2000, 0x895: 0x2000, + 0x8a0: 0x200e, 0x8a1: 0x2000, 0x8a3: 0x2000, + 0x8a4: 0x2000, 0x8a5: 0x2000, 0x8a6: 0x2000, 0x8a7: 0x2000, 0x8a8: 0x2000, 0x8a9: 0x2000, + 0x8b2: 0x2000, 0x8b3: 0x2000, + 0x8b6: 0x2000, 0x8b7: 0x2000, + 0x8bc: 0x2000, 0x8bd: 0x2000, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x2000, 0x8c1: 0x2000, + 0x8c6: 0x2000, 0x8c7: 0x2000, 0x8c8: 0x2000, 0x8cb: 0x200f, + 0x8ce: 0x2000, 0x8cf: 0x2000, 0x8d0: 0x2000, 0x8d1: 0x2000, + 0x8e2: 0x2000, 0x8e3: 0x2000, + 0x8e4: 0x2000, 0x8e5: 0x2000, + 0x8ef: 0x2000, + 0x8fd: 0x4000, 0x8fe: 0x4000, + // Block 0x24, offset 0x900 + 0x905: 0x2000, + 0x906: 0x2000, 0x909: 0x2000, + 0x90e: 0x2000, 0x90f: 0x2000, + 0x914: 0x4000, 0x915: 0x4000, + 0x91c: 0x2000, + 0x91e: 0x2000, + // Block 0x25, offset 0x940 + 0x940: 0x2000, 0x942: 0x2000, + 0x948: 0x4000, 0x949: 0x4000, 0x94a: 0x4000, 0x94b: 0x4000, + 0x94c: 0x4000, 0x94d: 0x4000, 0x94e: 0x4000, 0x94f: 0x4000, 0x950: 0x4000, 0x951: 0x4000, + 0x952: 0x4000, 0x953: 0x4000, + 0x960: 0x2000, 0x961: 0x2000, 0x963: 0x2000, + 0x964: 0x2000, 0x965: 0x2000, 0x967: 0x2000, 0x968: 0x2000, 0x969: 0x2000, + 0x96a: 0x2000, 0x96c: 0x2000, 0x96d: 0x2000, 0x96f: 0x2000, + 0x97f: 0x4000, + // Block 0x26, offset 0x980 + 0x993: 0x4000, + 0x99e: 0x2000, 0x99f: 0x2000, 0x9a1: 0x4000, + 0x9aa: 0x4000, 0x9ab: 0x4000, + 0x9bd: 0x4000, 0x9be: 0x4000, 0x9bf: 0x2000, + // Block 0x27, offset 0x9c0 + 0x9c4: 0x4000, 0x9c5: 0x4000, + 0x9c6: 0x2000, 0x9c7: 0x2000, 0x9c8: 0x2000, 0x9c9: 0x2000, 0x9ca: 0x2000, 0x9cb: 0x2000, + 0x9cc: 0x2000, 0x9cd: 0x2000, 0x9ce: 0x4000, 0x9cf: 0x2000, 0x9d0: 0x2000, 0x9d1: 0x2000, + 0x9d2: 0x2000, 0x9d3: 0x2000, 0x9d4: 0x4000, 0x9d5: 0x2000, 0x9d6: 0x2000, 0x9d7: 0x2000, + 0x9d8: 0x2000, 0x9d9: 0x2000, 0x9da: 0x2000, 0x9db: 0x2000, 0x9dc: 0x2000, 0x9dd: 0x2000, + 0x9de: 0x2000, 0x9df: 0x2000, 0x9e0: 0x2000, 0x9e1: 0x2000, 0x9e3: 0x2000, + 0x9e8: 0x2000, 0x9e9: 0x2000, + 0x9ea: 0x4000, 0x9eb: 0x2000, 0x9ec: 0x2000, 0x9ed: 0x2000, 0x9ee: 0x2000, 0x9ef: 0x2000, + 0x9f0: 0x2000, 0x9f1: 0x2000, 0x9f2: 0x4000, 0x9f3: 0x4000, 0x9f4: 0x2000, 0x9f5: 0x4000, + 0x9f6: 0x2000, 0x9f7: 0x2000, 0x9f8: 0x2000, 0x9f9: 0x2000, 0x9fa: 0x4000, 0x9fb: 0x2000, + 0x9fc: 0x2000, 0x9fd: 0x4000, 0x9fe: 0x2000, 0x9ff: 0x2000, + // Block 0x28, offset 0xa00 + 0xa05: 0x4000, + 0xa0a: 0x4000, 0xa0b: 0x4000, + 0xa28: 0x4000, + 0xa3d: 0x2000, + // Block 0x29, offset 0xa40 + 0xa4c: 0x4000, 0xa4e: 0x4000, + 0xa53: 0x4000, 0xa54: 0x4000, 0xa55: 0x4000, 0xa57: 0x4000, + 0xa76: 0x2000, 0xa77: 0x2000, 0xa78: 0x2000, 0xa79: 0x2000, 0xa7a: 0x2000, 0xa7b: 0x2000, + 0xa7c: 0x2000, 0xa7d: 0x2000, 0xa7e: 0x2000, 0xa7f: 0x2000, + // Block 0x2a, offset 0xa80 + 0xa95: 0x4000, 0xa96: 0x4000, 0xa97: 0x4000, + 0xab0: 0x4000, + 0xabf: 0x4000, + // Block 0x2b, offset 0xac0 + 0xae6: 0x6000, 0xae7: 0x6000, 0xae8: 0x6000, 0xae9: 0x6000, + 0xaea: 0x6000, 0xaeb: 0x6000, 0xaec: 0x6000, 0xaed: 0x6000, + // Block 0x2c, offset 0xb00 + 0xb05: 0x6010, + 0xb06: 0x6011, + // Block 0x2d, offset 0xb40 + 0xb5b: 0x4000, 0xb5c: 0x4000, + // Block 0x2e, offset 0xb80 + 0xb90: 0x4000, + 0xb95: 0x4000, 0xb96: 0x2000, 0xb97: 0x2000, + 0xb98: 0x2000, 0xb99: 0x2000, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x4000, 0xbc1: 0x4000, 0xbc2: 0x4000, 0xbc3: 0x4000, 0xbc4: 0x4000, 0xbc5: 0x4000, + 0xbc6: 0x4000, 0xbc7: 0x4000, 0xbc8: 0x4000, 0xbc9: 0x4000, 0xbca: 0x4000, 0xbcb: 0x4000, + 0xbcc: 0x4000, 0xbcd: 0x4000, 0xbce: 0x4000, 0xbcf: 0x4000, 0xbd0: 0x4000, 0xbd1: 0x4000, + 0xbd2: 0x4000, 0xbd3: 0x4000, 0xbd4: 0x4000, 0xbd5: 0x4000, 0xbd6: 0x4000, 0xbd7: 0x4000, + 0xbd8: 0x4000, 0xbd9: 0x4000, 0xbdb: 0x4000, 0xbdc: 0x4000, 0xbdd: 0x4000, + 0xbde: 0x4000, 0xbdf: 0x4000, 0xbe0: 0x4000, 0xbe1: 0x4000, 0xbe2: 0x4000, 0xbe3: 0x4000, + 0xbe4: 0x4000, 0xbe5: 0x4000, 0xbe6: 0x4000, 0xbe7: 0x4000, 0xbe8: 0x4000, 0xbe9: 0x4000, + 0xbea: 0x4000, 0xbeb: 0x4000, 0xbec: 0x4000, 0xbed: 0x4000, 0xbee: 0x4000, 0xbef: 0x4000, + 0xbf0: 0x4000, 0xbf1: 0x4000, 0xbf2: 0x4000, 0xbf3: 0x4000, 0xbf4: 0x4000, 0xbf5: 0x4000, + 0xbf6: 0x4000, 0xbf7: 0x4000, 0xbf8: 0x4000, 0xbf9: 0x4000, 0xbfa: 0x4000, 0xbfb: 0x4000, + 0xbfc: 0x4000, 0xbfd: 0x4000, 0xbfe: 0x4000, 0xbff: 0x4000, + // Block 0x30, offset 0xc00 + 0xc00: 0x4000, 0xc01: 0x4000, 0xc02: 0x4000, 0xc03: 0x4000, 0xc04: 0x4000, 0xc05: 0x4000, + 0xc06: 0x4000, 0xc07: 0x4000, 0xc08: 0x4000, 0xc09: 0x4000, 0xc0a: 0x4000, 0xc0b: 0x4000, + 0xc0c: 0x4000, 0xc0d: 0x4000, 0xc0e: 0x4000, 0xc0f: 0x4000, 0xc10: 0x4000, 0xc11: 0x4000, + 0xc12: 0x4000, 0xc13: 0x4000, 0xc14: 0x4000, 0xc15: 0x4000, 0xc16: 0x4000, 0xc17: 0x4000, + 0xc18: 0x4000, 0xc19: 0x4000, 0xc1a: 0x4000, 0xc1b: 0x4000, 0xc1c: 0x4000, 0xc1d: 0x4000, + 0xc1e: 0x4000, 0xc1f: 0x4000, 0xc20: 0x4000, 0xc21: 0x4000, 0xc22: 0x4000, 0xc23: 0x4000, + 0xc24: 0x4000, 0xc25: 0x4000, 0xc26: 0x4000, 0xc27: 0x4000, 0xc28: 0x4000, 0xc29: 0x4000, + 0xc2a: 0x4000, 0xc2b: 0x4000, 0xc2c: 0x4000, 0xc2d: 0x4000, 0xc2e: 0x4000, 0xc2f: 0x4000, + 0xc30: 0x4000, 0xc31: 0x4000, 0xc32: 0x4000, 0xc33: 0x4000, + // Block 0x31, offset 0xc40 + 0xc40: 0x4000, 0xc41: 0x4000, 0xc42: 0x4000, 0xc43: 0x4000, 0xc44: 0x4000, 0xc45: 0x4000, + 0xc46: 0x4000, 0xc47: 0x4000, 0xc48: 0x4000, 0xc49: 0x4000, 0xc4a: 0x4000, 0xc4b: 0x4000, + 0xc4c: 0x4000, 0xc4d: 0x4000, 0xc4e: 0x4000, 0xc4f: 0x4000, 0xc50: 0x4000, 0xc51: 0x4000, + 0xc52: 0x4000, 0xc53: 0x4000, 0xc54: 0x4000, 0xc55: 0x4000, + 0xc70: 0x4000, 0xc71: 0x4000, 0xc72: 0x4000, 0xc73: 0x4000, 0xc74: 0x4000, 0xc75: 0x4000, + 0xc76: 0x4000, 0xc77: 0x4000, 0xc78: 0x4000, 0xc79: 0x4000, 0xc7a: 0x4000, 0xc7b: 0x4000, + // Block 0x32, offset 0xc80 + 0xc80: 0x9012, 0xc81: 0x4013, 0xc82: 0x4014, 0xc83: 0x4000, 0xc84: 0x4000, 0xc85: 0x4000, + 0xc86: 0x4000, 0xc87: 0x4000, 0xc88: 0x4000, 0xc89: 0x4000, 0xc8a: 0x4000, 0xc8b: 0x4000, + 0xc8c: 0x4015, 0xc8d: 0x4015, 0xc8e: 0x4000, 0xc8f: 0x4000, 0xc90: 0x4000, 0xc91: 0x4000, + 0xc92: 0x4000, 0xc93: 0x4000, 0xc94: 0x4000, 0xc95: 0x4000, 0xc96: 0x4000, 0xc97: 0x4000, + 0xc98: 0x4000, 0xc99: 0x4000, 0xc9a: 0x4000, 0xc9b: 0x4000, 0xc9c: 0x4000, 0xc9d: 0x4000, + 0xc9e: 0x4000, 0xc9f: 0x4000, 0xca0: 0x4000, 0xca1: 0x4000, 0xca2: 0x4000, 0xca3: 0x4000, + 0xca4: 0x4000, 0xca5: 0x4000, 0xca6: 0x4000, 0xca7: 0x4000, 0xca8: 0x4000, 0xca9: 0x4000, + 0xcaa: 0x4000, 0xcab: 0x4000, 0xcac: 0x4000, 0xcad: 0x4000, 0xcae: 0x4000, 0xcaf: 0x4000, + 0xcb0: 0x4000, 0xcb1: 0x4000, 0xcb2: 0x4000, 0xcb3: 0x4000, 0xcb4: 0x4000, 0xcb5: 0x4000, + 0xcb6: 0x4000, 0xcb7: 0x4000, 0xcb8: 0x4000, 0xcb9: 0x4000, 0xcba: 0x4000, 0xcbb: 0x4000, + 0xcbc: 0x4000, 0xcbd: 0x4000, 0xcbe: 0x4000, + // Block 0x33, offset 0xcc0 + 0xcc1: 0x4000, 0xcc2: 0x4000, 0xcc3: 0x4000, 0xcc4: 0x4000, 0xcc5: 0x4000, + 0xcc6: 0x4000, 0xcc7: 0x4000, 0xcc8: 0x4000, 0xcc9: 0x4000, 0xcca: 0x4000, 0xccb: 0x4000, + 0xccc: 0x4000, 0xccd: 0x4000, 0xcce: 0x4000, 0xccf: 0x4000, 0xcd0: 0x4000, 0xcd1: 0x4000, + 0xcd2: 0x4000, 0xcd3: 0x4000, 0xcd4: 0x4000, 0xcd5: 0x4000, 0xcd6: 0x4000, 0xcd7: 0x4000, + 0xcd8: 0x4000, 0xcd9: 0x4000, 0xcda: 0x4000, 0xcdb: 0x4000, 0xcdc: 0x4000, 0xcdd: 0x4000, + 0xcde: 0x4000, 0xcdf: 0x4000, 0xce0: 0x4000, 0xce1: 0x4000, 0xce2: 0x4000, 0xce3: 0x4000, + 0xce4: 0x4000, 0xce5: 0x4000, 0xce6: 0x4000, 0xce7: 0x4000, 0xce8: 0x4000, 0xce9: 0x4000, + 0xcea: 0x4000, 0xceb: 0x4000, 0xcec: 0x4000, 0xced: 0x4000, 0xcee: 0x4000, 0xcef: 0x4000, + 0xcf0: 0x4000, 0xcf1: 0x4000, 0xcf2: 0x4000, 0xcf3: 0x4000, 0xcf4: 0x4000, 0xcf5: 0x4000, + 0xcf6: 0x4000, 0xcf7: 0x4000, 0xcf8: 0x4000, 0xcf9: 0x4000, 0xcfa: 0x4000, 0xcfb: 0x4000, + 0xcfc: 0x4000, 0xcfd: 0x4000, 0xcfe: 0x4000, 0xcff: 0x4000, + // Block 0x34, offset 0xd00 + 0xd00: 0x4000, 0xd01: 0x4000, 0xd02: 0x4000, 0xd03: 0x4000, 0xd04: 0x4000, 0xd05: 0x4000, + 0xd06: 0x4000, 0xd07: 0x4000, 0xd08: 0x4000, 0xd09: 0x4000, 0xd0a: 0x4000, 0xd0b: 0x4000, + 0xd0c: 0x4000, 0xd0d: 0x4000, 0xd0e: 0x4000, 0xd0f: 0x4000, 0xd10: 0x4000, 0xd11: 0x4000, + 0xd12: 0x4000, 0xd13: 0x4000, 0xd14: 0x4000, 0xd15: 0x4000, 0xd16: 0x4000, + 0xd19: 0x4016, 0xd1a: 0x4017, 0xd1b: 0x4000, 0xd1c: 0x4000, 0xd1d: 0x4000, + 0xd1e: 0x4000, 0xd1f: 0x4000, 0xd20: 0x4000, 0xd21: 0x4018, 0xd22: 0x4019, 0xd23: 0x401a, + 0xd24: 0x401b, 0xd25: 0x401c, 0xd26: 0x401d, 0xd27: 0x401e, 0xd28: 0x401f, 0xd29: 0x4020, + 0xd2a: 0x4021, 0xd2b: 0x4022, 0xd2c: 0x4000, 0xd2d: 0x4010, 0xd2e: 0x4000, 0xd2f: 0x4023, + 0xd30: 0x4000, 0xd31: 0x4024, 0xd32: 0x4000, 0xd33: 0x4025, 0xd34: 0x4000, 0xd35: 0x4026, + 0xd36: 0x4000, 0xd37: 0x401a, 0xd38: 0x4000, 0xd39: 0x4027, 0xd3a: 0x4000, 0xd3b: 0x4028, + 0xd3c: 0x4000, 0xd3d: 0x4020, 0xd3e: 0x4000, 0xd3f: 0x4029, + // Block 0x35, offset 0xd40 + 0xd40: 0x4000, 0xd41: 0x402a, 0xd42: 0x4000, 0xd43: 0x402b, 0xd44: 0x402c, 0xd45: 0x4000, + 0xd46: 0x4017, 0xd47: 0x4000, 0xd48: 0x402d, 0xd49: 0x4000, 0xd4a: 0x402e, 0xd4b: 0x402f, + 0xd4c: 0x4030, 0xd4d: 0x4017, 0xd4e: 0x4016, 0xd4f: 0x4017, 0xd50: 0x4000, 0xd51: 0x4000, + 0xd52: 0x4031, 0xd53: 0x4000, 0xd54: 0x4000, 0xd55: 0x4031, 0xd56: 0x4000, 0xd57: 0x4000, + 0xd58: 0x4032, 0xd59: 0x4000, 0xd5a: 0x4000, 0xd5b: 0x4032, 0xd5c: 0x4000, 0xd5d: 0x4000, + 0xd5e: 0x4033, 0xd5f: 0x402e, 0xd60: 0x4034, 0xd61: 0x4035, 0xd62: 0x4034, 0xd63: 0x4036, + 0xd64: 0x4037, 0xd65: 0x4024, 0xd66: 0x4035, 0xd67: 0x4025, 0xd68: 0x4038, 0xd69: 0x4038, + 0xd6a: 0x4039, 0xd6b: 0x4039, 0xd6c: 0x403a, 0xd6d: 0x403a, 0xd6e: 0x4000, 0xd6f: 0x4035, + 0xd70: 0x4000, 0xd71: 0x4000, 0xd72: 0x403b, 0xd73: 0x403c, 0xd74: 0x4000, 0xd75: 0x4000, + 0xd76: 0x4000, 0xd77: 0x4000, 0xd78: 0x4000, 0xd79: 0x4000, 0xd7a: 0x4000, 0xd7b: 0x403d, + 0xd7c: 0x401c, 0xd7d: 0x4000, 0xd7e: 0x4000, 0xd7f: 0x4000, + // Block 0x36, offset 0xd80 + 0xd85: 0x4000, + 0xd86: 0x4000, 0xd87: 0x4000, 0xd88: 0x4000, 0xd89: 0x4000, 0xd8a: 0x4000, 0xd8b: 0x4000, + 0xd8c: 0x4000, 0xd8d: 0x4000, 0xd8e: 0x4000, 0xd8f: 0x4000, 0xd90: 0x4000, 0xd91: 0x4000, + 0xd92: 0x4000, 0xd93: 0x4000, 0xd94: 0x4000, 0xd95: 0x4000, 0xd96: 0x4000, 0xd97: 0x4000, + 0xd98: 0x4000, 0xd99: 0x4000, 0xd9a: 0x4000, 0xd9b: 0x4000, 0xd9c: 0x4000, 0xd9d: 0x4000, + 0xd9e: 0x4000, 0xd9f: 0x4000, 0xda0: 0x4000, 0xda1: 0x4000, 0xda2: 0x4000, 0xda3: 0x4000, + 0xda4: 0x4000, 0xda5: 0x4000, 0xda6: 0x4000, 0xda7: 0x4000, 0xda8: 0x4000, 0xda9: 0x4000, + 0xdaa: 0x4000, 0xdab: 0x4000, 0xdac: 0x4000, 0xdad: 0x4000, 0xdae: 0x4000, + 0xdb1: 0x403e, 0xdb2: 0x403e, 0xdb3: 0x403e, 0xdb4: 0x403e, 0xdb5: 0x403e, + 0xdb6: 0x403e, 0xdb7: 0x403e, 0xdb8: 0x403e, 0xdb9: 0x403e, 0xdba: 0x403e, 0xdbb: 0x403e, + 0xdbc: 0x403e, 0xdbd: 0x403e, 0xdbe: 0x403e, 0xdbf: 0x403e, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x4037, 0xdc1: 0x4037, 0xdc2: 0x4037, 0xdc3: 0x4037, 0xdc4: 0x4037, 0xdc5: 0x4037, + 0xdc6: 0x4037, 0xdc7: 0x4037, 0xdc8: 0x4037, 0xdc9: 0x4037, 0xdca: 0x4037, 0xdcb: 0x4037, + 0xdcc: 0x4037, 0xdcd: 0x4037, 0xdce: 0x4037, 0xdcf: 0x400e, 0xdd0: 0x403f, 0xdd1: 0x4040, + 0xdd2: 0x4041, 0xdd3: 0x4040, 0xdd4: 0x403f, 0xdd5: 0x4042, 0xdd6: 0x4043, 0xdd7: 0x4044, + 0xdd8: 0x4040, 0xdd9: 0x4041, 0xdda: 0x4040, 0xddb: 0x4045, 0xddc: 0x4009, 0xddd: 0x4045, + 0xdde: 0x4046, 0xddf: 0x4045, 0xde0: 0x4047, 0xde1: 0x400b, 0xde2: 0x400a, 0xde3: 0x400c, + 0xde4: 0x4048, 0xde5: 0x4000, 0xde6: 0x4000, 0xde7: 0x4000, 0xde8: 0x4000, 0xde9: 0x4000, + 0xdea: 0x4000, 0xdeb: 0x4000, 0xdec: 0x4000, 0xded: 0x4000, 0xdee: 0x4000, 0xdef: 0x4000, + 0xdf0: 0x4000, 0xdf1: 0x4000, 0xdf2: 0x4000, 0xdf3: 0x4000, 0xdf4: 0x4000, 0xdf5: 0x4000, + 0xdf6: 0x4000, 0xdf7: 0x4000, 0xdf8: 0x4000, 0xdf9: 0x4000, 0xdfa: 0x4000, 0xdfb: 0x4000, + 0xdfc: 0x4000, 0xdfd: 0x4000, 0xdfe: 0x4000, 0xdff: 0x4000, + // Block 0x38, offset 0xe00 + 0xe00: 0x4000, 0xe01: 0x4000, 0xe02: 0x4000, 0xe03: 0x4000, 0xe04: 0x4000, 0xe05: 0x4000, + 0xe06: 0x4000, 0xe07: 0x4000, 0xe08: 0x4000, 0xe09: 0x4000, 0xe0a: 0x4000, 0xe0b: 0x4000, + 0xe0c: 0x4000, 0xe0d: 0x4000, 0xe0e: 0x4000, 0xe10: 0x4000, 0xe11: 0x4000, + 0xe12: 0x4000, 0xe13: 0x4000, 0xe14: 0x4000, 0xe15: 0x4000, 0xe16: 0x4000, 0xe17: 0x4000, + 0xe18: 0x4000, 0xe19: 0x4000, 0xe1a: 0x4000, 0xe1b: 0x4000, 0xe1c: 0x4000, 0xe1d: 0x4000, + 0xe1e: 0x4000, 0xe1f: 0x4000, 0xe20: 0x4000, 0xe21: 0x4000, 0xe22: 0x4000, 0xe23: 0x4000, + 0xe24: 0x4000, 0xe25: 0x4000, 0xe26: 0x4000, 0xe27: 0x4000, 0xe28: 0x4000, 0xe29: 0x4000, + 0xe2a: 0x4000, 0xe2b: 0x4000, 0xe2c: 0x4000, 0xe2d: 0x4000, 0xe2e: 0x4000, 0xe2f: 0x4000, + 0xe30: 0x4000, 0xe31: 0x4000, 0xe32: 0x4000, 0xe33: 0x4000, 0xe34: 0x4000, 0xe35: 0x4000, + 0xe36: 0x4000, 0xe37: 0x4000, 0xe38: 0x4000, 0xe39: 0x4000, 0xe3a: 0x4000, + // Block 0x39, offset 0xe40 + 0xe40: 0x4000, 0xe41: 0x4000, 0xe42: 0x4000, 0xe43: 0x4000, 0xe44: 0x4000, 0xe45: 0x4000, + 0xe46: 0x4000, 0xe47: 0x4000, 0xe48: 0x4000, 0xe49: 0x4000, 0xe4a: 0x4000, 0xe4b: 0x4000, + 0xe4c: 0x4000, 0xe4d: 0x4000, 0xe4e: 0x4000, 0xe4f: 0x4000, 0xe50: 0x4000, 0xe51: 0x4000, + 0xe52: 0x4000, 0xe53: 0x4000, 0xe54: 0x4000, 0xe55: 0x4000, 0xe56: 0x4000, 0xe57: 0x4000, + 0xe58: 0x4000, 0xe59: 0x4000, 0xe5a: 0x4000, 0xe5b: 0x4000, 0xe5c: 0x4000, 0xe5d: 0x4000, + 0xe5e: 0x4000, 0xe5f: 0x4000, 0xe60: 0x4000, 0xe61: 0x4000, 0xe62: 0x4000, 0xe63: 0x4000, + 0xe70: 0x4000, 0xe71: 0x4000, 0xe72: 0x4000, 0xe73: 0x4000, 0xe74: 0x4000, 0xe75: 0x4000, + 0xe76: 0x4000, 0xe77: 0x4000, 0xe78: 0x4000, 0xe79: 0x4000, 0xe7a: 0x4000, 0xe7b: 0x4000, + 0xe7c: 0x4000, 0xe7d: 0x4000, 0xe7e: 0x4000, 0xe7f: 0x4000, + // Block 0x3a, offset 0xe80 + 0xe80: 0x4000, 0xe81: 0x4000, 0xe82: 0x4000, 0xe83: 0x4000, 0xe84: 0x4000, 0xe85: 0x4000, + 0xe86: 0x4000, 0xe87: 0x4000, 0xe88: 0x4000, 0xe89: 0x4000, 0xe8a: 0x4000, 0xe8b: 0x4000, + 0xe8c: 0x4000, 0xe8d: 0x4000, 0xe8e: 0x4000, 0xe8f: 0x4000, 0xe90: 0x4000, 0xe91: 0x4000, + 0xe92: 0x4000, 0xe93: 0x4000, 0xe94: 0x4000, 0xe95: 0x4000, 0xe96: 0x4000, 0xe97: 0x4000, + 0xe98: 0x4000, 0xe99: 0x4000, 0xe9a: 0x4000, 0xe9b: 0x4000, 0xe9c: 0x4000, 0xe9d: 0x4000, + 0xe9e: 0x4000, 0xea0: 0x4000, 0xea1: 0x4000, 0xea2: 0x4000, 0xea3: 0x4000, + 0xea4: 0x4000, 0xea5: 0x4000, 0xea6: 0x4000, 0xea7: 0x4000, 0xea8: 0x4000, 0xea9: 0x4000, + 0xeaa: 0x4000, 0xeab: 0x4000, 0xeac: 0x4000, 0xead: 0x4000, 0xeae: 0x4000, 0xeaf: 0x4000, + 0xeb0: 0x4000, 0xeb1: 0x4000, 0xeb2: 0x4000, 0xeb3: 0x4000, 0xeb4: 0x4000, 0xeb5: 0x4000, + 0xeb6: 0x4000, 0xeb7: 0x4000, 0xeb8: 0x4000, 0xeb9: 0x4000, 0xeba: 0x4000, 0xebb: 0x4000, + 0xebc: 0x4000, 0xebd: 0x4000, 0xebe: 0x4000, 0xebf: 0x4000, + // Block 0x3b, offset 0xec0 + 0xec0: 0x4000, 0xec1: 0x4000, 0xec2: 0x4000, 0xec3: 0x4000, 0xec4: 0x4000, 0xec5: 0x4000, + 0xec6: 0x4000, 0xec7: 0x4000, 0xec8: 0x2000, 0xec9: 0x2000, 0xeca: 0x2000, 0xecb: 0x2000, + 0xecc: 0x2000, 0xecd: 0x2000, 0xece: 0x2000, 0xecf: 0x2000, 0xed0: 0x4000, 0xed1: 0x4000, + 0xed2: 0x4000, 0xed3: 0x4000, 0xed4: 0x4000, 0xed5: 0x4000, 0xed6: 0x4000, 0xed7: 0x4000, + 0xed8: 0x4000, 0xed9: 0x4000, 0xeda: 0x4000, 0xedb: 0x4000, 0xedc: 0x4000, 0xedd: 0x4000, + 0xede: 0x4000, 0xedf: 0x4000, 0xee0: 0x4000, 0xee1: 0x4000, 0xee2: 0x4000, 0xee3: 0x4000, + 0xee4: 0x4000, 0xee5: 0x4000, 0xee6: 0x4000, 0xee7: 0x4000, 0xee8: 0x4000, 0xee9: 0x4000, + 0xeea: 0x4000, 0xeeb: 0x4000, 0xeec: 0x4000, 0xeed: 0x4000, 0xeee: 0x4000, 0xeef: 0x4000, + 0xef0: 0x4000, 0xef1: 0x4000, 0xef2: 0x4000, 0xef3: 0x4000, 0xef4: 0x4000, 0xef5: 0x4000, + 0xef6: 0x4000, 0xef7: 0x4000, 0xef8: 0x4000, 0xef9: 0x4000, 0xefa: 0x4000, 0xefb: 0x4000, + 0xefc: 0x4000, 0xefd: 0x4000, 0xefe: 0x4000, 0xeff: 0x4000, + // Block 0x3c, offset 0xf00 + 0xf00: 0x4000, 0xf01: 0x4000, 0xf02: 0x4000, 0xf03: 0x4000, 0xf04: 0x4000, 0xf05: 0x4000, + 0xf06: 0x4000, 0xf07: 0x4000, 0xf08: 0x4000, 0xf09: 0x4000, 0xf0a: 0x4000, 0xf0b: 0x4000, + 0xf0c: 0x4000, 0xf0d: 0x4000, 0xf0e: 0x4000, 0xf0f: 0x4000, 0xf10: 0x4000, 0xf11: 0x4000, + 0xf12: 0x4000, 0xf13: 0x4000, 0xf14: 0x4000, 0xf15: 0x4000, 0xf16: 0x4000, 0xf17: 0x4000, + 0xf18: 0x4000, 0xf19: 0x4000, 0xf1a: 0x4000, 0xf1b: 0x4000, 0xf1c: 0x4000, 0xf1d: 0x4000, + 0xf1e: 0x4000, 0xf1f: 0x4000, 0xf20: 0x4000, 0xf21: 0x4000, 0xf22: 0x4000, 0xf23: 0x4000, + 0xf24: 0x4000, 0xf25: 0x4000, 0xf26: 0x4000, 0xf27: 0x4000, 0xf28: 0x4000, 0xf29: 0x4000, + 0xf2a: 0x4000, 0xf2b: 0x4000, 0xf2c: 0x4000, 0xf2d: 0x4000, 0xf2e: 0x4000, 0xf2f: 0x4000, + 0xf30: 0x4000, 0xf31: 0x4000, 0xf32: 0x4000, 0xf33: 0x4000, 0xf34: 0x4000, 0xf35: 0x4000, + 0xf36: 0x4000, 0xf37: 0x4000, 0xf38: 0x4000, 0xf39: 0x4000, 0xf3a: 0x4000, 0xf3b: 0x4000, + 0xf3c: 0x4000, 0xf3d: 0x4000, 0xf3e: 0x4000, + // Block 0x3d, offset 0xf40 + 0xf40: 0x4000, 0xf41: 0x4000, 0xf42: 0x4000, 0xf43: 0x4000, 0xf44: 0x4000, 0xf45: 0x4000, + 0xf46: 0x4000, 0xf47: 0x4000, 0xf48: 0x4000, 0xf49: 0x4000, 0xf4a: 0x4000, 0xf4b: 0x4000, + 0xf4c: 0x4000, 0xf50: 0x4000, 0xf51: 0x4000, + 0xf52: 0x4000, 0xf53: 0x4000, 0xf54: 0x4000, 0xf55: 0x4000, 0xf56: 0x4000, 0xf57: 0x4000, + 0xf58: 0x4000, 0xf59: 0x4000, 0xf5a: 0x4000, 0xf5b: 0x4000, 0xf5c: 0x4000, 0xf5d: 0x4000, + 0xf5e: 0x4000, 0xf5f: 0x4000, 0xf60: 0x4000, 0xf61: 0x4000, 0xf62: 0x4000, 0xf63: 0x4000, + 0xf64: 0x4000, 0xf65: 0x4000, 0xf66: 0x4000, 0xf67: 0x4000, 0xf68: 0x4000, 0xf69: 0x4000, + 0xf6a: 0x4000, 0xf6b: 0x4000, 0xf6c: 0x4000, 0xf6d: 0x4000, 0xf6e: 0x4000, 0xf6f: 0x4000, + 0xf70: 0x4000, 0xf71: 0x4000, 0xf72: 0x4000, 0xf73: 0x4000, 0xf74: 0x4000, 0xf75: 0x4000, + 0xf76: 0x4000, 0xf77: 0x4000, 0xf78: 0x4000, 0xf79: 0x4000, 0xf7a: 0x4000, 0xf7b: 0x4000, + 0xf7c: 0x4000, 0xf7d: 0x4000, 0xf7e: 0x4000, 0xf7f: 0x4000, + // Block 0x3e, offset 0xf80 + 0xf80: 0x4000, 0xf81: 0x4000, 0xf82: 0x4000, 0xf83: 0x4000, 0xf84: 0x4000, 0xf85: 0x4000, + 0xf86: 0x4000, + // Block 0x3f, offset 0xfc0 + 0xfe0: 0x4000, 0xfe1: 0x4000, 0xfe2: 0x4000, 0xfe3: 0x4000, + 0xfe4: 0x4000, 0xfe5: 0x4000, 0xfe6: 0x4000, 0xfe7: 0x4000, 0xfe8: 0x4000, 0xfe9: 0x4000, + 0xfea: 0x4000, 0xfeb: 0x4000, 0xfec: 0x4000, 0xfed: 0x4000, 0xfee: 0x4000, 0xfef: 0x4000, + 0xff0: 0x4000, 0xff1: 0x4000, 0xff2: 0x4000, 0xff3: 0x4000, 0xff4: 0x4000, 0xff5: 0x4000, + 0xff6: 0x4000, 0xff7: 0x4000, 0xff8: 0x4000, 0xff9: 0x4000, 0xffa: 0x4000, 0xffb: 0x4000, + 0xffc: 0x4000, + // Block 0x40, offset 0x1000 + 0x1000: 0x4000, 0x1001: 0x4000, 0x1002: 0x4000, 0x1003: 0x4000, 0x1004: 0x4000, 0x1005: 0x4000, + 0x1006: 0x4000, 0x1007: 0x4000, 0x1008: 0x4000, 0x1009: 0x4000, 0x100a: 0x4000, 0x100b: 0x4000, + 0x100c: 0x4000, 0x100d: 0x4000, 0x100e: 0x4000, 0x100f: 0x4000, 0x1010: 0x4000, 0x1011: 0x4000, + 0x1012: 0x4000, 0x1013: 0x4000, 0x1014: 0x4000, 0x1015: 0x4000, 0x1016: 0x4000, 0x1017: 0x4000, + 0x1018: 0x4000, 0x1019: 0x4000, 0x101a: 0x4000, 0x101b: 0x4000, 0x101c: 0x4000, 0x101d: 0x4000, + 0x101e: 0x4000, 0x101f: 0x4000, 0x1020: 0x4000, 0x1021: 0x4000, 0x1022: 0x4000, 0x1023: 0x4000, + // Block 0x41, offset 0x1040 + 0x1040: 0x2000, 0x1041: 0x2000, 0x1042: 0x2000, 0x1043: 0x2000, 0x1044: 0x2000, 0x1045: 0x2000, + 0x1046: 0x2000, 0x1047: 0x2000, 0x1048: 0x2000, 0x1049: 0x2000, 0x104a: 0x2000, 0x104b: 0x2000, + 0x104c: 0x2000, 0x104d: 0x2000, 0x104e: 0x2000, 0x104f: 0x2000, 0x1050: 0x4000, 0x1051: 0x4000, + 0x1052: 0x4000, 0x1053: 0x4000, 0x1054: 0x4000, 0x1055: 0x4000, 0x1056: 0x4000, 0x1057: 0x4000, + 0x1058: 0x4000, 0x1059: 0x4000, + 0x1070: 0x4000, 0x1071: 0x4000, 0x1072: 0x4000, 0x1073: 0x4000, 0x1074: 0x4000, 0x1075: 0x4000, + 0x1076: 0x4000, 0x1077: 0x4000, 0x1078: 0x4000, 0x1079: 0x4000, 0x107a: 0x4000, 0x107b: 0x4000, + 0x107c: 0x4000, 0x107d: 0x4000, 0x107e: 0x4000, 0x107f: 0x4000, + // Block 0x42, offset 0x1080 + 0x1080: 0x4000, 0x1081: 0x4000, 0x1082: 0x4000, 0x1083: 0x4000, 0x1084: 0x4000, 0x1085: 0x4000, + 0x1086: 0x4000, 0x1087: 0x4000, 0x1088: 0x4000, 0x1089: 0x4000, 0x108a: 0x4000, 0x108b: 0x4000, + 0x108c: 0x4000, 0x108d: 0x4000, 0x108e: 0x4000, 0x108f: 0x4000, 0x1090: 0x4000, 0x1091: 0x4000, + 0x1092: 0x4000, 0x1094: 0x4000, 0x1095: 0x4000, 0x1096: 0x4000, 0x1097: 0x4000, + 0x1098: 0x4000, 0x1099: 0x4000, 0x109a: 0x4000, 0x109b: 0x4000, 0x109c: 0x4000, 0x109d: 0x4000, + 0x109e: 0x4000, 0x109f: 0x4000, 0x10a0: 0x4000, 0x10a1: 0x4000, 0x10a2: 0x4000, 0x10a3: 0x4000, + 0x10a4: 0x4000, 0x10a5: 0x4000, 0x10a6: 0x4000, 0x10a8: 0x4000, 0x10a9: 0x4000, + 0x10aa: 0x4000, 0x10ab: 0x4000, + // Block 0x43, offset 0x10c0 + 0x10c1: 0x9012, 0x10c2: 0x9012, 0x10c3: 0x9012, 0x10c4: 0x9012, 0x10c5: 0x9012, + 0x10c6: 0x9012, 0x10c7: 0x9012, 0x10c8: 0x9012, 0x10c9: 0x9012, 0x10ca: 0x9012, 0x10cb: 0x9012, + 0x10cc: 0x9012, 0x10cd: 0x9012, 0x10ce: 0x9012, 0x10cf: 0x9012, 0x10d0: 0x9012, 0x10d1: 0x9012, + 0x10d2: 0x9012, 0x10d3: 0x9012, 0x10d4: 0x9012, 0x10d5: 0x9012, 0x10d6: 0x9012, 0x10d7: 0x9012, + 0x10d8: 0x9012, 0x10d9: 0x9012, 0x10da: 0x9012, 0x10db: 0x9012, 0x10dc: 0x9012, 0x10dd: 0x9012, + 0x10de: 0x9012, 0x10df: 0x9012, 0x10e0: 0x9049, 0x10e1: 0x9049, 0x10e2: 0x9049, 0x10e3: 0x9049, + 0x10e4: 0x9049, 0x10e5: 0x9049, 0x10e6: 0x9049, 0x10e7: 0x9049, 0x10e8: 0x9049, 0x10e9: 0x9049, + 0x10ea: 0x9049, 0x10eb: 0x9049, 0x10ec: 0x9049, 0x10ed: 0x9049, 0x10ee: 0x9049, 0x10ef: 0x9049, + 0x10f0: 0x9049, 0x10f1: 0x9049, 0x10f2: 0x9049, 0x10f3: 0x9049, 0x10f4: 0x9049, 0x10f5: 0x9049, + 0x10f6: 0x9049, 0x10f7: 0x9049, 0x10f8: 0x9049, 0x10f9: 0x9049, 0x10fa: 0x9049, 0x10fb: 0x9049, + 0x10fc: 0x9049, 0x10fd: 0x9049, 0x10fe: 0x9049, 0x10ff: 0x9049, + // Block 0x44, offset 0x1100 + 0x1100: 0x9049, 0x1101: 0x9049, 0x1102: 0x9049, 0x1103: 0x9049, 0x1104: 0x9049, 0x1105: 0x9049, + 0x1106: 0x9049, 0x1107: 0x9049, 0x1108: 0x9049, 0x1109: 0x9049, 0x110a: 0x9049, 0x110b: 0x9049, + 0x110c: 0x9049, 0x110d: 0x9049, 0x110e: 0x9049, 0x110f: 0x9049, 0x1110: 0x9049, 0x1111: 0x9049, + 0x1112: 0x9049, 0x1113: 0x9049, 0x1114: 0x9049, 0x1115: 0x9049, 0x1116: 0x9049, 0x1117: 0x9049, + 0x1118: 0x9049, 0x1119: 0x9049, 0x111a: 0x9049, 0x111b: 0x9049, 0x111c: 0x9049, 0x111d: 0x9049, + 0x111e: 0x9049, 0x111f: 0x904a, 0x1120: 0x904b, 0x1121: 0xb04c, 0x1122: 0xb04d, 0x1123: 0xb04d, + 0x1124: 0xb04e, 0x1125: 0xb04f, 0x1126: 0xb050, 0x1127: 0xb051, 0x1128: 0xb052, 0x1129: 0xb053, + 0x112a: 0xb054, 0x112b: 0xb055, 0x112c: 0xb056, 0x112d: 0xb057, 0x112e: 0xb058, 0x112f: 0xb059, + 0x1130: 0xb05a, 0x1131: 0xb05b, 0x1132: 0xb05c, 0x1133: 0xb05d, 0x1134: 0xb05e, 0x1135: 0xb05f, + 0x1136: 0xb060, 0x1137: 0xb061, 0x1138: 0xb062, 0x1139: 0xb063, 0x113a: 0xb064, 0x113b: 0xb065, + 0x113c: 0xb052, 0x113d: 0xb066, 0x113e: 0xb067, 0x113f: 0xb055, + // Block 0x45, offset 0x1140 + 0x1140: 0xb068, 0x1141: 0xb069, 0x1142: 0xb06a, 0x1143: 0xb06b, 0x1144: 0xb05a, 0x1145: 0xb056, + 0x1146: 0xb06c, 0x1147: 0xb06d, 0x1148: 0xb06b, 0x1149: 0xb06e, 0x114a: 0xb06b, 0x114b: 0xb06f, + 0x114c: 0xb06f, 0x114d: 0xb070, 0x114e: 0xb070, 0x114f: 0xb071, 0x1150: 0xb056, 0x1151: 0xb072, + 0x1152: 0xb073, 0x1153: 0xb072, 0x1154: 0xb074, 0x1155: 0xb073, 0x1156: 0xb075, 0x1157: 0xb075, + 0x1158: 0xb076, 0x1159: 0xb076, 0x115a: 0xb077, 0x115b: 0xb077, 0x115c: 0xb073, 0x115d: 0xb078, + 0x115e: 0xb079, 0x115f: 0xb067, 0x1160: 0xb07a, 0x1161: 0xb07b, 0x1162: 0xb07b, 0x1163: 0xb07b, + 0x1164: 0xb07b, 0x1165: 0xb07b, 0x1166: 0xb07b, 0x1167: 0xb07b, 0x1168: 0xb07b, 0x1169: 0xb07b, + 0x116a: 0xb07b, 0x116b: 0xb07b, 0x116c: 0xb07b, 0x116d: 0xb07b, 0x116e: 0xb07b, 0x116f: 0xb07b, + 0x1170: 0xb07c, 0x1171: 0xb07c, 0x1172: 0xb07c, 0x1173: 0xb07c, 0x1174: 0xb07c, 0x1175: 0xb07c, + 0x1176: 0xb07c, 0x1177: 0xb07c, 0x1178: 0xb07c, 0x1179: 0xb07c, 0x117a: 0xb07c, 0x117b: 0xb07c, + 0x117c: 0xb07c, 0x117d: 0xb07c, 0x117e: 0xb07c, + // Block 0x46, offset 0x1180 + 0x1182: 0xb07d, 0x1183: 0xb07e, 0x1184: 0xb07f, 0x1185: 0xb080, + 0x1186: 0xb07f, 0x1187: 0xb07e, 0x118a: 0xb081, 0x118b: 0xb082, + 0x118c: 0xb083, 0x118d: 0xb07f, 0x118e: 0xb080, 0x118f: 0xb07f, + 0x1192: 0xb084, 0x1193: 0xb085, 0x1194: 0xb084, 0x1195: 0xb086, 0x1196: 0xb084, 0x1197: 0xb087, + 0x119a: 0xb088, 0x119b: 0xb089, 0x119c: 0xb08a, + 0x11a0: 0x908b, 0x11a1: 0x908b, 0x11a2: 0x908c, 0x11a3: 0x908d, + 0x11a4: 0x908b, 0x11a5: 0x908e, 0x11a6: 0x908f, 0x11a8: 0xb090, 0x11a9: 0xb091, + 0x11aa: 0xb092, 0x11ab: 0xb091, 0x11ac: 0xb093, 0x11ad: 0xb094, 0x11ae: 0xb095, + 0x11bd: 0x2000, + // Block 0x47, offset 0x11c0 + 0x11e0: 0x4000, 0x11e1: 0x4000, + // Block 0x48, offset 0x1200 + 0x1200: 0x4000, 0x1201: 0x4000, 0x1202: 0x4000, 0x1203: 0x4000, 0x1204: 0x4000, 0x1205: 0x4000, + 0x1206: 0x4000, 0x1207: 0x4000, 0x1208: 0x4000, 0x1209: 0x4000, 0x120a: 0x4000, 0x120b: 0x4000, + 0x120c: 0x4000, 0x120d: 0x4000, 0x120e: 0x4000, 0x120f: 0x4000, 0x1210: 0x4000, 0x1211: 0x4000, + 0x1212: 0x4000, 0x1213: 0x4000, 0x1214: 0x4000, 0x1215: 0x4000, 0x1216: 0x4000, 0x1217: 0x4000, + 0x1218: 0x4000, 0x1219: 0x4000, 0x121a: 0x4000, 0x121b: 0x4000, 0x121c: 0x4000, 0x121d: 0x4000, + 0x121e: 0x4000, 0x121f: 0x4000, 0x1220: 0x4000, 0x1221: 0x4000, 0x1222: 0x4000, 0x1223: 0x4000, + 0x1224: 0x4000, 0x1225: 0x4000, 0x1226: 0x4000, 0x1227: 0x4000, 0x1228: 0x4000, 0x1229: 0x4000, + 0x122a: 0x4000, 0x122b: 0x4000, 0x122c: 0x4000, + // Block 0x49, offset 0x1240 + 0x1240: 0x4000, 0x1241: 0x4000, 0x1242: 0x4000, 0x1243: 0x4000, 0x1244: 0x4000, 0x1245: 0x4000, + 0x1246: 0x4000, 0x1247: 0x4000, 0x1248: 0x4000, 0x1249: 0x4000, 0x124a: 0x4000, 0x124b: 0x4000, + 0x124c: 0x4000, 0x124d: 0x4000, 0x124e: 0x4000, 0x124f: 0x4000, 0x1250: 0x4000, 0x1251: 0x4000, + 0x1252: 0x4000, 0x1253: 0x4000, 0x1254: 0x4000, 0x1255: 0x4000, 0x1256: 0x4000, 0x1257: 0x4000, + 0x1258: 0x4000, 0x1259: 0x4000, 0x125a: 0x4000, 0x125b: 0x4000, 0x125c: 0x4000, 0x125d: 0x4000, + 0x125e: 0x4000, 0x125f: 0x4000, 0x1260: 0x4000, 0x1261: 0x4000, 0x1262: 0x4000, 0x1263: 0x4000, + 0x1264: 0x4000, 0x1265: 0x4000, 0x1266: 0x4000, 0x1267: 0x4000, 0x1268: 0x4000, 0x1269: 0x4000, + 0x126a: 0x4000, 0x126b: 0x4000, 0x126c: 0x4000, 0x126d: 0x4000, 0x126e: 0x4000, 0x126f: 0x4000, + 0x1270: 0x4000, 0x1271: 0x4000, 0x1272: 0x4000, + // Block 0x4a, offset 0x1280 + 0x1280: 0x4000, 0x1281: 0x4000, 0x1282: 0x4000, 0x1283: 0x4000, 0x1284: 0x4000, 0x1285: 0x4000, + 0x1286: 0x4000, 0x1287: 0x4000, 0x1288: 0x4000, 0x1289: 0x4000, 0x128a: 0x4000, 0x128b: 0x4000, + 0x128c: 0x4000, 0x128d: 0x4000, 0x128e: 0x4000, 0x128f: 0x4000, 0x1290: 0x4000, 0x1291: 0x4000, + 0x1292: 0x4000, 0x1293: 0x4000, 0x1294: 0x4000, 0x1295: 0x4000, 0x1296: 0x4000, 0x1297: 0x4000, + 0x1298: 0x4000, 0x1299: 0x4000, 0x129a: 0x4000, 0x129b: 0x4000, 0x129c: 0x4000, 0x129d: 0x4000, + 0x129e: 0x4000, + // Block 0x4b, offset 0x12c0 + 0x12f0: 0x4000, 0x12f1: 0x4000, 0x12f2: 0x4000, 0x12f3: 0x4000, 0x12f4: 0x4000, 0x12f5: 0x4000, + 0x12f6: 0x4000, 0x12f7: 0x4000, 0x12f8: 0x4000, 0x12f9: 0x4000, 0x12fa: 0x4000, 0x12fb: 0x4000, + 0x12fc: 0x4000, 0x12fd: 0x4000, 0x12fe: 0x4000, 0x12ff: 0x4000, + // Block 0x4c, offset 0x1300 + 0x1300: 0x4000, 0x1301: 0x4000, 0x1302: 0x4000, 0x1303: 0x4000, 0x1304: 0x4000, 0x1305: 0x4000, + 0x1306: 0x4000, 0x1307: 0x4000, 0x1308: 0x4000, 0x1309: 0x4000, 0x130a: 0x4000, 0x130b: 0x4000, + 0x130c: 0x4000, 0x130d: 0x4000, 0x130e: 0x4000, 0x130f: 0x4000, 0x1310: 0x4000, 0x1311: 0x4000, + 0x1312: 0x4000, 0x1313: 0x4000, 0x1314: 0x4000, 0x1315: 0x4000, 0x1316: 0x4000, 0x1317: 0x4000, + 0x1318: 0x4000, 0x1319: 0x4000, 0x131a: 0x4000, 0x131b: 0x4000, 0x131c: 0x4000, 0x131d: 0x4000, + 0x131e: 0x4000, 0x131f: 0x4000, 0x1320: 0x4000, 0x1321: 0x4000, 0x1322: 0x4000, 0x1323: 0x4000, + 0x1324: 0x4000, 0x1325: 0x4000, 0x1326: 0x4000, 0x1327: 0x4000, 0x1328: 0x4000, 0x1329: 0x4000, + 0x132a: 0x4000, 0x132b: 0x4000, 0x132c: 0x4000, 0x132d: 0x4000, 0x132e: 0x4000, 0x132f: 0x4000, + 0x1330: 0x4000, 0x1331: 0x4000, 0x1332: 0x4000, 0x1333: 0x4000, 0x1334: 0x4000, 0x1335: 0x4000, + 0x1336: 0x4000, 0x1337: 0x4000, 0x1338: 0x4000, 0x1339: 0x4000, 0x133a: 0x4000, 0x133b: 0x4000, + // Block 0x4d, offset 0x1340 + 0x1344: 0x4000, + // Block 0x4e, offset 0x1380 + 0x138f: 0x4000, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x2000, 0x13c1: 0x2000, 0x13c2: 0x2000, 0x13c3: 0x2000, 0x13c4: 0x2000, 0x13c5: 0x2000, + 0x13c6: 0x2000, 0x13c7: 0x2000, 0x13c8: 0x2000, 0x13c9: 0x2000, 0x13ca: 0x2000, + 0x13d0: 0x2000, 0x13d1: 0x2000, + 0x13d2: 0x2000, 0x13d3: 0x2000, 0x13d4: 0x2000, 0x13d5: 0x2000, 0x13d6: 0x2000, 0x13d7: 0x2000, + 0x13d8: 0x2000, 0x13d9: 0x2000, 0x13da: 0x2000, 0x13db: 0x2000, 0x13dc: 0x2000, 0x13dd: 0x2000, + 0x13de: 0x2000, 0x13df: 0x2000, 0x13e0: 0x2000, 0x13e1: 0x2000, 0x13e2: 0x2000, 0x13e3: 0x2000, + 0x13e4: 0x2000, 0x13e5: 0x2000, 0x13e6: 0x2000, 0x13e7: 0x2000, 0x13e8: 0x2000, 0x13e9: 0x2000, + 0x13ea: 0x2000, 0x13eb: 0x2000, 0x13ec: 0x2000, 0x13ed: 0x2000, + 0x13f0: 0x2000, 0x13f1: 0x2000, 0x13f2: 0x2000, 0x13f3: 0x2000, 0x13f4: 0x2000, 0x13f5: 0x2000, + 0x13f6: 0x2000, 0x13f7: 0x2000, 0x13f8: 0x2000, 0x13f9: 0x2000, 0x13fa: 0x2000, 0x13fb: 0x2000, + 0x13fc: 0x2000, 0x13fd: 0x2000, 0x13fe: 0x2000, 0x13ff: 0x2000, + // Block 0x50, offset 0x1400 + 0x1400: 0x2000, 0x1401: 0x2000, 0x1402: 0x2000, 0x1403: 0x2000, 0x1404: 0x2000, 0x1405: 0x2000, + 0x1406: 0x2000, 0x1407: 0x2000, 0x1408: 0x2000, 0x1409: 0x2000, 0x140a: 0x2000, 0x140b: 0x2000, + 0x140c: 0x2000, 0x140d: 0x2000, 0x140e: 0x2000, 0x140f: 0x2000, 0x1410: 0x2000, 0x1411: 0x2000, + 0x1412: 0x2000, 0x1413: 0x2000, 0x1414: 0x2000, 0x1415: 0x2000, 0x1416: 0x2000, 0x1417: 0x2000, + 0x1418: 0x2000, 0x1419: 0x2000, 0x141a: 0x2000, 0x141b: 0x2000, 0x141c: 0x2000, 0x141d: 0x2000, + 0x141e: 0x2000, 0x141f: 0x2000, 0x1420: 0x2000, 0x1421: 0x2000, 0x1422: 0x2000, 0x1423: 0x2000, + 0x1424: 0x2000, 0x1425: 0x2000, 0x1426: 0x2000, 0x1427: 0x2000, 0x1428: 0x2000, 0x1429: 0x2000, + 0x1430: 0x2000, 0x1431: 0x2000, 0x1432: 0x2000, 0x1433: 0x2000, 0x1434: 0x2000, 0x1435: 0x2000, + 0x1436: 0x2000, 0x1437: 0x2000, 0x1438: 0x2000, 0x1439: 0x2000, 0x143a: 0x2000, 0x143b: 0x2000, + 0x143c: 0x2000, 0x143d: 0x2000, 0x143e: 0x2000, 0x143f: 0x2000, + // Block 0x51, offset 0x1440 + 0x1440: 0x2000, 0x1441: 0x2000, 0x1442: 0x2000, 0x1443: 0x2000, 0x1444: 0x2000, 0x1445: 0x2000, + 0x1446: 0x2000, 0x1447: 0x2000, 0x1448: 0x2000, 0x1449: 0x2000, 0x144a: 0x2000, 0x144b: 0x2000, + 0x144c: 0x2000, 0x144d: 0x2000, 0x144e: 0x4000, 0x144f: 0x2000, 0x1450: 0x2000, 0x1451: 0x4000, + 0x1452: 0x4000, 0x1453: 0x4000, 0x1454: 0x4000, 0x1455: 0x4000, 0x1456: 0x4000, 0x1457: 0x4000, + 0x1458: 0x4000, 0x1459: 0x4000, 0x145a: 0x4000, 0x145b: 0x2000, 0x145c: 0x2000, 0x145d: 0x2000, + 0x145e: 0x2000, 0x145f: 0x2000, 0x1460: 0x2000, 0x1461: 0x2000, 0x1462: 0x2000, 0x1463: 0x2000, + 0x1464: 0x2000, 0x1465: 0x2000, 0x1466: 0x2000, 0x1467: 0x2000, 0x1468: 0x2000, 0x1469: 0x2000, + 0x146a: 0x2000, 0x146b: 0x2000, 0x146c: 0x2000, + // Block 0x52, offset 0x1480 + 0x1480: 0x4000, 0x1481: 0x4000, 0x1482: 0x4000, + 0x1490: 0x4000, 0x1491: 0x4000, + 0x1492: 0x4000, 0x1493: 0x4000, 0x1494: 0x4000, 0x1495: 0x4000, 0x1496: 0x4000, 0x1497: 0x4000, + 0x1498: 0x4000, 0x1499: 0x4000, 0x149a: 0x4000, 0x149b: 0x4000, 0x149c: 0x4000, 0x149d: 0x4000, + 0x149e: 0x4000, 0x149f: 0x4000, 0x14a0: 0x4000, 0x14a1: 0x4000, 0x14a2: 0x4000, 0x14a3: 0x4000, + 0x14a4: 0x4000, 0x14a5: 0x4000, 0x14a6: 0x4000, 0x14a7: 0x4000, 0x14a8: 0x4000, 0x14a9: 0x4000, + 0x14aa: 0x4000, 0x14ab: 0x4000, 0x14ac: 0x4000, 0x14ad: 0x4000, 0x14ae: 0x4000, 0x14af: 0x4000, + 0x14b0: 0x4000, 0x14b1: 0x4000, 0x14b2: 0x4000, 0x14b3: 0x4000, 0x14b4: 0x4000, 0x14b5: 0x4000, + 0x14b6: 0x4000, 0x14b7: 0x4000, 0x14b8: 0x4000, 0x14b9: 0x4000, 0x14ba: 0x4000, 0x14bb: 0x4000, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x4000, 0x14c1: 0x4000, 0x14c2: 0x4000, 0x14c3: 0x4000, 0x14c4: 0x4000, 0x14c5: 0x4000, + 0x14c6: 0x4000, 0x14c7: 0x4000, 0x14c8: 0x4000, + 0x14d0: 0x4000, 0x14d1: 0x4000, + 0x14e0: 0x4000, 0x14e1: 0x4000, 0x14e2: 0x4000, 0x14e3: 0x4000, + 0x14e4: 0x4000, 0x14e5: 0x4000, + // Block 0x54, offset 0x1500 + 0x1500: 0x4000, 0x1501: 0x4000, 0x1502: 0x4000, 0x1503: 0x4000, 0x1504: 0x4000, 0x1505: 0x4000, + 0x1506: 0x4000, 0x1507: 0x4000, 0x1508: 0x4000, 0x1509: 0x4000, 0x150a: 0x4000, 0x150b: 0x4000, + 0x150c: 0x4000, 0x150d: 0x4000, 0x150e: 0x4000, 0x150f: 0x4000, 0x1510: 0x4000, 0x1511: 0x4000, + 0x1512: 0x4000, 0x1513: 0x4000, 0x1514: 0x4000, 0x1515: 0x4000, 0x1516: 0x4000, 0x1517: 0x4000, + 0x1518: 0x4000, 0x1519: 0x4000, 0x151a: 0x4000, 0x151b: 0x4000, 0x151c: 0x4000, 0x151d: 0x4000, + 0x151e: 0x4000, 0x151f: 0x4000, 0x1520: 0x4000, + 0x152d: 0x4000, 0x152e: 0x4000, 0x152f: 0x4000, + 0x1530: 0x4000, 0x1531: 0x4000, 0x1532: 0x4000, 0x1533: 0x4000, 0x1534: 0x4000, 0x1535: 0x4000, + 0x1537: 0x4000, 0x1538: 0x4000, 0x1539: 0x4000, 0x153a: 0x4000, 0x153b: 0x4000, + 0x153c: 0x4000, 0x153d: 0x4000, 0x153e: 0x4000, 0x153f: 0x4000, + // Block 0x55, offset 0x1540 + 0x1540: 0x4000, 0x1541: 0x4000, 0x1542: 0x4000, 0x1543: 0x4000, 0x1544: 0x4000, 0x1545: 0x4000, + 0x1546: 0x4000, 0x1547: 0x4000, 0x1548: 0x4000, 0x1549: 0x4000, 0x154a: 0x4000, 0x154b: 0x4000, + 0x154c: 0x4000, 0x154d: 0x4000, 0x154e: 0x4000, 0x154f: 0x4000, 0x1550: 0x4000, 0x1551: 0x4000, + 0x1552: 0x4000, 0x1553: 0x4000, 0x1554: 0x4000, 0x1555: 0x4000, 0x1556: 0x4000, 0x1557: 0x4000, + 0x1558: 0x4000, 0x1559: 0x4000, 0x155a: 0x4000, 0x155b: 0x4000, 0x155c: 0x4000, 0x155d: 0x4000, + 0x155e: 0x4000, 0x155f: 0x4000, 0x1560: 0x4000, 0x1561: 0x4000, 0x1562: 0x4000, 0x1563: 0x4000, + 0x1564: 0x4000, 0x1565: 0x4000, 0x1566: 0x4000, 0x1567: 0x4000, 0x1568: 0x4000, 0x1569: 0x4000, + 0x156a: 0x4000, 0x156b: 0x4000, 0x156c: 0x4000, 0x156d: 0x4000, 0x156e: 0x4000, 0x156f: 0x4000, + 0x1570: 0x4000, 0x1571: 0x4000, 0x1572: 0x4000, 0x1573: 0x4000, 0x1574: 0x4000, 0x1575: 0x4000, + 0x1576: 0x4000, 0x1577: 0x4000, 0x1578: 0x4000, 0x1579: 0x4000, 0x157a: 0x4000, 0x157b: 0x4000, + 0x157c: 0x4000, 0x157e: 0x4000, 0x157f: 0x4000, + // Block 0x56, offset 0x1580 + 0x1580: 0x4000, 0x1581: 0x4000, 0x1582: 0x4000, 0x1583: 0x4000, 0x1584: 0x4000, 0x1585: 0x4000, + 0x1586: 0x4000, 0x1587: 0x4000, 0x1588: 0x4000, 0x1589: 0x4000, 0x158a: 0x4000, 0x158b: 0x4000, + 0x158c: 0x4000, 0x158d: 0x4000, 0x158e: 0x4000, 0x158f: 0x4000, 0x1590: 0x4000, 0x1591: 0x4000, + 0x1592: 0x4000, 0x1593: 0x4000, + 0x15a0: 0x4000, 0x15a1: 0x4000, 0x15a2: 0x4000, 0x15a3: 0x4000, + 0x15a4: 0x4000, 0x15a5: 0x4000, 0x15a6: 0x4000, 0x15a7: 0x4000, 0x15a8: 0x4000, 0x15a9: 0x4000, + 0x15aa: 0x4000, 0x15ab: 0x4000, 0x15ac: 0x4000, 0x15ad: 0x4000, 0x15ae: 0x4000, 0x15af: 0x4000, + 0x15b0: 0x4000, 0x15b1: 0x4000, 0x15b2: 0x4000, 0x15b3: 0x4000, 0x15b4: 0x4000, 0x15b5: 0x4000, + 0x15b6: 0x4000, 0x15b7: 0x4000, 0x15b8: 0x4000, 0x15b9: 0x4000, 0x15ba: 0x4000, 0x15bb: 0x4000, + 0x15bc: 0x4000, 0x15bd: 0x4000, 0x15be: 0x4000, 0x15bf: 0x4000, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x4000, 0x15c1: 0x4000, 0x15c2: 0x4000, 0x15c3: 0x4000, 0x15c4: 0x4000, 0x15c5: 0x4000, + 0x15c6: 0x4000, 0x15c7: 0x4000, 0x15c8: 0x4000, 0x15c9: 0x4000, 0x15ca: 0x4000, + 0x15cf: 0x4000, 0x15d0: 0x4000, 0x15d1: 0x4000, + 0x15d2: 0x4000, 0x15d3: 0x4000, + 0x15e0: 0x4000, 0x15e1: 0x4000, 0x15e2: 0x4000, 0x15e3: 0x4000, + 0x15e4: 0x4000, 0x15e5: 0x4000, 0x15e6: 0x4000, 0x15e7: 0x4000, 0x15e8: 0x4000, 0x15e9: 0x4000, + 0x15ea: 0x4000, 0x15eb: 0x4000, 0x15ec: 0x4000, 0x15ed: 0x4000, 0x15ee: 0x4000, 0x15ef: 0x4000, + 0x15f0: 0x4000, 0x15f4: 0x4000, + 0x15f8: 0x4000, 0x15f9: 0x4000, 0x15fa: 0x4000, 0x15fb: 0x4000, + 0x15fc: 0x4000, 0x15fd: 0x4000, 0x15fe: 0x4000, 0x15ff: 0x4000, + // Block 0x58, offset 0x1600 + 0x1600: 0x4000, 0x1602: 0x4000, 0x1603: 0x4000, 0x1604: 0x4000, 0x1605: 0x4000, + 0x1606: 0x4000, 0x1607: 0x4000, 0x1608: 0x4000, 0x1609: 0x4000, 0x160a: 0x4000, 0x160b: 0x4000, + 0x160c: 0x4000, 0x160d: 0x4000, 0x160e: 0x4000, 0x160f: 0x4000, 0x1610: 0x4000, 0x1611: 0x4000, + 0x1612: 0x4000, 0x1613: 0x4000, 0x1614: 0x4000, 0x1615: 0x4000, 0x1616: 0x4000, 0x1617: 0x4000, + 0x1618: 0x4000, 0x1619: 0x4000, 0x161a: 0x4000, 0x161b: 0x4000, 0x161c: 0x4000, 0x161d: 0x4000, + 0x161e: 0x4000, 0x161f: 0x4000, 0x1620: 0x4000, 0x1621: 0x4000, 0x1622: 0x4000, 0x1623: 0x4000, + 0x1624: 0x4000, 0x1625: 0x4000, 0x1626: 0x4000, 0x1627: 0x4000, 0x1628: 0x4000, 0x1629: 0x4000, + 0x162a: 0x4000, 0x162b: 0x4000, 0x162c: 0x4000, 0x162d: 0x4000, 0x162e: 0x4000, 0x162f: 0x4000, + 0x1630: 0x4000, 0x1631: 0x4000, 0x1632: 0x4000, 0x1633: 0x4000, 0x1634: 0x4000, 0x1635: 0x4000, + 0x1636: 0x4000, 0x1637: 0x4000, 0x1638: 0x4000, 0x1639: 0x4000, 0x163a: 0x4000, 0x163b: 0x4000, + 0x163c: 0x4000, 0x163d: 0x4000, 0x163e: 0x4000, 0x163f: 0x4000, + // Block 0x59, offset 0x1640 + 0x1640: 0x4000, 0x1641: 0x4000, 0x1642: 0x4000, 0x1643: 0x4000, 0x1644: 0x4000, 0x1645: 0x4000, + 0x1646: 0x4000, 0x1647: 0x4000, 0x1648: 0x4000, 0x1649: 0x4000, 0x164a: 0x4000, 0x164b: 0x4000, + 0x164c: 0x4000, 0x164d: 0x4000, 0x164e: 0x4000, 0x164f: 0x4000, 0x1650: 0x4000, 0x1651: 0x4000, + 0x1652: 0x4000, 0x1653: 0x4000, 0x1654: 0x4000, 0x1655: 0x4000, 0x1656: 0x4000, 0x1657: 0x4000, + 0x1658: 0x4000, 0x1659: 0x4000, 0x165a: 0x4000, 0x165b: 0x4000, 0x165c: 0x4000, 0x165d: 0x4000, + 0x165e: 0x4000, 0x165f: 0x4000, 0x1660: 0x4000, 0x1661: 0x4000, 0x1662: 0x4000, 0x1663: 0x4000, + 0x1664: 0x4000, 0x1665: 0x4000, 0x1666: 0x4000, 0x1667: 0x4000, 0x1668: 0x4000, 0x1669: 0x4000, + 0x166a: 0x4000, 0x166b: 0x4000, 0x166c: 0x4000, 0x166d: 0x4000, 0x166e: 0x4000, 0x166f: 0x4000, + 0x1670: 0x4000, 0x1671: 0x4000, 0x1672: 0x4000, 0x1673: 0x4000, 0x1674: 0x4000, 0x1675: 0x4000, + 0x1676: 0x4000, 0x1677: 0x4000, 0x1678: 0x4000, 0x1679: 0x4000, 0x167a: 0x4000, 0x167b: 0x4000, + 0x167c: 0x4000, 0x167f: 0x4000, + // Block 0x5a, offset 0x1680 + 0x1680: 0x4000, 0x1681: 0x4000, 0x1682: 0x4000, 0x1683: 0x4000, 0x1684: 0x4000, 0x1685: 0x4000, + 0x1686: 0x4000, 0x1687: 0x4000, 0x1688: 0x4000, 0x1689: 0x4000, 0x168a: 0x4000, 0x168b: 0x4000, + 0x168c: 0x4000, 0x168d: 0x4000, 0x168e: 0x4000, 0x168f: 0x4000, 0x1690: 0x4000, 0x1691: 0x4000, + 0x1692: 0x4000, 0x1693: 0x4000, 0x1694: 0x4000, 0x1695: 0x4000, 0x1696: 0x4000, 0x1697: 0x4000, + 0x1698: 0x4000, 0x1699: 0x4000, 0x169a: 0x4000, 0x169b: 0x4000, 0x169c: 0x4000, 0x169d: 0x4000, + 0x169e: 0x4000, 0x169f: 0x4000, 0x16a0: 0x4000, 0x16a1: 0x4000, 0x16a2: 0x4000, 0x16a3: 0x4000, + 0x16a4: 0x4000, 0x16a5: 0x4000, 0x16a6: 0x4000, 0x16a7: 0x4000, 0x16a8: 0x4000, 0x16a9: 0x4000, + 0x16aa: 0x4000, 0x16ab: 0x4000, 0x16ac: 0x4000, 0x16ad: 0x4000, 0x16ae: 0x4000, 0x16af: 0x4000, + 0x16b0: 0x4000, 0x16b1: 0x4000, 0x16b2: 0x4000, 0x16b3: 0x4000, 0x16b4: 0x4000, 0x16b5: 0x4000, + 0x16b6: 0x4000, 0x16b7: 0x4000, 0x16b8: 0x4000, 0x16b9: 0x4000, 0x16ba: 0x4000, 0x16bb: 0x4000, + 0x16bc: 0x4000, 0x16bd: 0x4000, + // Block 0x5b, offset 0x16c0 + 0x16cb: 0x4000, + 0x16cc: 0x4000, 0x16cd: 0x4000, 0x16ce: 0x4000, 0x16d0: 0x4000, 0x16d1: 0x4000, + 0x16d2: 0x4000, 0x16d3: 0x4000, 0x16d4: 0x4000, 0x16d5: 0x4000, 0x16d6: 0x4000, 0x16d7: 0x4000, + 0x16d8: 0x4000, 0x16d9: 0x4000, 0x16da: 0x4000, 0x16db: 0x4000, 0x16dc: 0x4000, 0x16dd: 0x4000, + 0x16de: 0x4000, 0x16df: 0x4000, 0x16e0: 0x4000, 0x16e1: 0x4000, 0x16e2: 0x4000, 0x16e3: 0x4000, + 0x16e4: 0x4000, 0x16e5: 0x4000, 0x16e6: 0x4000, 0x16e7: 0x4000, + 0x16fa: 0x4000, + // Block 0x5c, offset 0x1700 + 0x1715: 0x4000, 0x1716: 0x4000, + 0x1724: 0x4000, + // Block 0x5d, offset 0x1740 + 0x177b: 0x4000, + 0x177c: 0x4000, 0x177d: 0x4000, 0x177e: 0x4000, 0x177f: 0x4000, + // Block 0x5e, offset 0x1780 + 0x1780: 0x4000, 0x1781: 0x4000, 0x1782: 0x4000, 0x1783: 0x4000, 0x1784: 0x4000, 0x1785: 0x4000, + 0x1786: 0x4000, 0x1787: 0x4000, 0x1788: 0x4000, 0x1789: 0x4000, 0x178a: 0x4000, 0x178b: 0x4000, + 0x178c: 0x4000, 0x178d: 0x4000, 0x178e: 0x4000, 0x178f: 0x4000, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x4000, 0x17c1: 0x4000, 0x17c2: 0x4000, 0x17c3: 0x4000, 0x17c4: 0x4000, 0x17c5: 0x4000, + 0x17cc: 0x4000, 0x17d0: 0x4000, 0x17d1: 0x4000, + 0x17d2: 0x4000, + 0x17eb: 0x4000, 0x17ec: 0x4000, + 0x17f4: 0x4000, 0x17f5: 0x4000, + 0x17f6: 0x4000, 0x17f7: 0x4000, 0x17f8: 0x4000, + // Block 0x60, offset 0x1800 + 0x1810: 0x4000, 0x1811: 0x4000, + 0x1812: 0x4000, 0x1813: 0x4000, 0x1814: 0x4000, 0x1815: 0x4000, 0x1816: 0x4000, 0x1817: 0x4000, + 0x1818: 0x4000, 0x1819: 0x4000, 0x181a: 0x4000, 0x181b: 0x4000, 0x181c: 0x4000, 0x181d: 0x4000, + 0x181e: 0x4000, 0x181f: 0x4000, 0x1820: 0x4000, 0x1821: 0x4000, 0x1822: 0x4000, 0x1823: 0x4000, + 0x1824: 0x4000, 0x1825: 0x4000, 0x1826: 0x4000, 0x1827: 0x4000, 0x1828: 0x4000, 0x1829: 0x4000, + 0x182a: 0x4000, 0x182b: 0x4000, 0x182c: 0x4000, 0x182d: 0x4000, 0x182e: 0x4000, 0x182f: 0x4000, + 0x1830: 0x4000, 0x1831: 0x4000, 0x1832: 0x4000, 0x1833: 0x4000, 0x1834: 0x4000, 0x1835: 0x4000, + 0x1836: 0x4000, 0x1837: 0x4000, 0x1838: 0x4000, 0x1839: 0x4000, 0x183a: 0x4000, 0x183b: 0x4000, + 0x183c: 0x4000, 0x183d: 0x4000, 0x183e: 0x4000, + // Block 0x61, offset 0x1840 + 0x1840: 0x4000, 0x1841: 0x4000, 0x1842: 0x4000, 0x1843: 0x4000, 0x1844: 0x4000, 0x1845: 0x4000, + 0x1846: 0x4000, 0x1847: 0x4000, 0x1848: 0x4000, 0x1849: 0x4000, 0x184a: 0x4000, 0x184b: 0x4000, + 0x184c: 0x4000, 0x1850: 0x4000, 0x1851: 0x4000, + 0x1852: 0x4000, 0x1853: 0x4000, 0x1854: 0x4000, 0x1855: 0x4000, 0x1856: 0x4000, 0x1857: 0x4000, + 0x1858: 0x4000, 0x1859: 0x4000, 0x185a: 0x4000, 0x185b: 0x4000, 0x185c: 0x4000, 0x185d: 0x4000, + 0x185e: 0x4000, 0x185f: 0x4000, 0x1860: 0x4000, 0x1861: 0x4000, 0x1862: 0x4000, 0x1863: 0x4000, + 0x1864: 0x4000, 0x1865: 0x4000, 0x1866: 0x4000, 0x1867: 0x4000, 0x1868: 0x4000, 0x1869: 0x4000, + 0x186a: 0x4000, 0x186b: 0x4000, + // Block 0x62, offset 0x1880 + 0x1880: 0x4000, 0x1881: 0x4000, 0x1882: 0x4000, 0x1883: 0x4000, 0x1884: 0x4000, 0x1885: 0x4000, + 0x1886: 0x4000, 0x1887: 0x4000, 0x1888: 0x4000, 0x1889: 0x4000, 0x188a: 0x4000, 0x188b: 0x4000, + 0x188c: 0x4000, 0x188d: 0x4000, 0x188e: 0x4000, 0x188f: 0x4000, 0x1890: 0x4000, 0x1891: 0x4000, + 0x1892: 0x4000, 0x1893: 0x4000, 0x1894: 0x4000, 0x1895: 0x4000, 0x1896: 0x4000, 0x1897: 0x4000, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x4000, + 0x18d0: 0x4000, 0x18d1: 0x4000, + 0x18d2: 0x4000, 0x18d3: 0x4000, 0x18d4: 0x4000, 0x18d5: 0x4000, 0x18d6: 0x4000, 0x18d7: 0x4000, + 0x18d8: 0x4000, 0x18d9: 0x4000, 0x18da: 0x4000, 0x18db: 0x4000, 0x18dc: 0x4000, 0x18dd: 0x4000, + 0x18de: 0x4000, 0x18df: 0x4000, 0x18e0: 0x4000, 0x18e1: 0x4000, 0x18e2: 0x4000, 0x18e3: 0x4000, + 0x18e4: 0x4000, 0x18e5: 0x4000, 0x18e6: 0x4000, + // Block 0x64, offset 0x1900 + 0x1900: 0x2000, 0x1901: 0x2000, 0x1902: 0x2000, 0x1903: 0x2000, 0x1904: 0x2000, 0x1905: 0x2000, + 0x1906: 0x2000, 0x1907: 0x2000, 0x1908: 0x2000, 0x1909: 0x2000, 0x190a: 0x2000, 0x190b: 0x2000, + 0x190c: 0x2000, 0x190d: 0x2000, 0x190e: 0x2000, 0x190f: 0x2000, 0x1910: 0x2000, 0x1911: 0x2000, + 0x1912: 0x2000, 0x1913: 0x2000, 0x1914: 0x2000, 0x1915: 0x2000, 0x1916: 0x2000, 0x1917: 0x2000, + 0x1918: 0x2000, 0x1919: 0x2000, 0x191a: 0x2000, 0x191b: 0x2000, 0x191c: 0x2000, 0x191d: 0x2000, + 0x191e: 0x2000, 0x191f: 0x2000, 0x1920: 0x2000, 0x1921: 0x2000, 0x1922: 0x2000, 0x1923: 0x2000, + 0x1924: 0x2000, 0x1925: 0x2000, 0x1926: 0x2000, 0x1927: 0x2000, 0x1928: 0x2000, 0x1929: 0x2000, + 0x192a: 0x2000, 0x192b: 0x2000, 0x192c: 0x2000, 0x192d: 0x2000, 0x192e: 0x2000, 0x192f: 0x2000, + 0x1930: 0x2000, 0x1931: 0x2000, 0x1932: 0x2000, 0x1933: 0x2000, 0x1934: 0x2000, 0x1935: 0x2000, + 0x1936: 0x2000, 0x1937: 0x2000, 0x1938: 0x2000, 0x1939: 0x2000, 0x193a: 0x2000, 0x193b: 0x2000, + 0x193c: 0x2000, 0x193d: 0x2000, +} + +// widthIndex: 22 blocks, 1408 entries, 1408 bytes +// Block 0 is the zero block. +var widthIndex = [1408]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x02, 0xc4: 0x03, 0xc5: 0x04, 0xc7: 0x05, + 0xc9: 0x06, 0xcb: 0x07, 0xcc: 0x08, 0xcd: 0x09, 0xce: 0x0a, 0xcf: 0x0b, + 0xd0: 0x0c, 0xd1: 0x0d, + 0xe1: 0x02, 0xe2: 0x03, 0xe3: 0x04, 0xe4: 0x05, 0xe5: 0x06, 0xe6: 0x06, 0xe7: 0x06, + 0xe8: 0x06, 0xe9: 0x06, 0xea: 0x07, 0xeb: 0x06, 0xec: 0x06, 0xed: 0x08, 0xee: 0x09, 0xef: 0x0a, + 0xf0: 0x0f, 0xf3: 0x12, 0xf4: 0x13, + // Block 0x4, offset 0x100 + 0x104: 0x0e, 0x105: 0x0f, + // Block 0x5, offset 0x140 + 0x140: 0x10, 0x141: 0x11, 0x142: 0x12, 0x144: 0x13, 0x145: 0x14, 0x146: 0x15, 0x147: 0x16, + 0x148: 0x17, 0x149: 0x18, 0x14a: 0x19, 0x14c: 0x1a, 0x14f: 0x1b, + 0x151: 0x1c, 0x152: 0x08, 0x153: 0x1d, 0x154: 0x1e, 0x155: 0x1f, 0x156: 0x20, 0x157: 0x21, + 0x158: 0x22, 0x159: 0x23, 0x15a: 0x24, 0x15b: 0x25, 0x15c: 0x26, 0x15d: 0x27, 0x15e: 0x28, 0x15f: 0x29, + 0x166: 0x2a, + 0x16c: 0x2b, 0x16d: 0x2c, + 0x17a: 0x2d, 0x17b: 0x2e, 0x17c: 0x0e, 0x17d: 0x0e, 0x17e: 0x0e, 0x17f: 0x2f, + // Block 0x6, offset 0x180 + 0x180: 0x30, 0x181: 0x31, 0x182: 0x32, 0x183: 0x33, 0x184: 0x34, 0x185: 0x35, 0x186: 0x36, 0x187: 0x37, + 0x188: 0x38, 0x189: 0x39, 0x18a: 0x0e, 0x18b: 0x3a, 0x18c: 0x0e, 0x18d: 0x0e, 0x18e: 0x0e, 0x18f: 0x0e, + 0x190: 0x0e, 0x191: 0x0e, 0x192: 0x0e, 0x193: 0x0e, 0x194: 0x0e, 0x195: 0x0e, 0x196: 0x0e, 0x197: 0x0e, + 0x198: 0x0e, 0x199: 0x0e, 0x19a: 0x0e, 0x19b: 0x0e, 0x19c: 0x0e, 0x19d: 0x0e, 0x19e: 0x0e, 0x19f: 0x0e, + 0x1a0: 0x0e, 0x1a1: 0x0e, 0x1a2: 0x0e, 0x1a3: 0x0e, 0x1a4: 0x0e, 0x1a5: 0x0e, 0x1a6: 0x0e, 0x1a7: 0x0e, + 0x1a8: 0x0e, 0x1a9: 0x0e, 0x1aa: 0x0e, 0x1ab: 0x0e, 0x1ac: 0x0e, 0x1ad: 0x0e, 0x1ae: 0x0e, 0x1af: 0x0e, + 0x1b0: 0x0e, 0x1b1: 0x0e, 0x1b2: 0x0e, 0x1b3: 0x0e, 0x1b4: 0x0e, 0x1b5: 0x0e, 0x1b6: 0x0e, 0x1b7: 0x0e, + 0x1b8: 0x0e, 0x1b9: 0x0e, 0x1ba: 0x0e, 0x1bb: 0x0e, 0x1bc: 0x0e, 0x1bd: 0x0e, 0x1be: 0x0e, 0x1bf: 0x0e, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0e, 0x1c1: 0x0e, 0x1c2: 0x0e, 0x1c3: 0x0e, 0x1c4: 0x0e, 0x1c5: 0x0e, 0x1c6: 0x0e, 0x1c7: 0x0e, + 0x1c8: 0x0e, 0x1c9: 0x0e, 0x1ca: 0x0e, 0x1cb: 0x0e, 0x1cc: 0x0e, 0x1cd: 0x0e, 0x1ce: 0x0e, 0x1cf: 0x0e, + 0x1d0: 0x0e, 0x1d1: 0x0e, 0x1d2: 0x0e, 0x1d3: 0x0e, 0x1d4: 0x0e, 0x1d5: 0x0e, 0x1d6: 0x0e, 0x1d7: 0x0e, + 0x1d8: 0x0e, 0x1d9: 0x0e, 0x1da: 0x0e, 0x1db: 0x0e, 0x1dc: 0x0e, 0x1dd: 0x0e, 0x1de: 0x0e, 0x1df: 0x0e, + 0x1e0: 0x0e, 0x1e1: 0x0e, 0x1e2: 0x0e, 0x1e3: 0x0e, 0x1e4: 0x0e, 0x1e5: 0x0e, 0x1e6: 0x0e, 0x1e7: 0x0e, + 0x1e8: 0x0e, 0x1e9: 0x0e, 0x1ea: 0x0e, 0x1eb: 0x0e, 0x1ec: 0x0e, 0x1ed: 0x0e, 0x1ee: 0x0e, 0x1ef: 0x0e, + 0x1f0: 0x0e, 0x1f1: 0x0e, 0x1f2: 0x0e, 0x1f3: 0x0e, 0x1f4: 0x0e, 0x1f5: 0x0e, 0x1f6: 0x0e, + 0x1f8: 0x0e, 0x1f9: 0x0e, 0x1fa: 0x0e, 0x1fb: 0x0e, 0x1fc: 0x0e, 0x1fd: 0x0e, 0x1fe: 0x0e, 0x1ff: 0x0e, + // Block 0x8, offset 0x200 + 0x200: 0x0e, 0x201: 0x0e, 0x202: 0x0e, 0x203: 0x0e, 0x204: 0x0e, 0x205: 0x0e, 0x206: 0x0e, 0x207: 0x0e, + 0x208: 0x0e, 0x209: 0x0e, 0x20a: 0x0e, 0x20b: 0x0e, 0x20c: 0x0e, 0x20d: 0x0e, 0x20e: 0x0e, 0x20f: 0x0e, + 0x210: 0x0e, 0x211: 0x0e, 0x212: 0x0e, 0x213: 0x0e, 0x214: 0x0e, 0x215: 0x0e, 0x216: 0x0e, 0x217: 0x0e, + 0x218: 0x0e, 0x219: 0x0e, 0x21a: 0x0e, 0x21b: 0x0e, 0x21c: 0x0e, 0x21d: 0x0e, 0x21e: 0x0e, 0x21f: 0x0e, + 0x220: 0x0e, 0x221: 0x0e, 0x222: 0x0e, 0x223: 0x0e, 0x224: 0x0e, 0x225: 0x0e, 0x226: 0x0e, 0x227: 0x0e, + 0x228: 0x0e, 0x229: 0x0e, 0x22a: 0x0e, 0x22b: 0x0e, 0x22c: 0x0e, 0x22d: 0x0e, 0x22e: 0x0e, 0x22f: 0x0e, + 0x230: 0x0e, 0x231: 0x0e, 0x232: 0x0e, 0x233: 0x0e, 0x234: 0x0e, 0x235: 0x0e, 0x236: 0x0e, 0x237: 0x0e, + 0x238: 0x0e, 0x239: 0x0e, 0x23a: 0x0e, 0x23b: 0x0e, 0x23c: 0x0e, 0x23d: 0x0e, 0x23e: 0x0e, 0x23f: 0x0e, + // Block 0x9, offset 0x240 + 0x240: 0x0e, 0x241: 0x0e, 0x242: 0x0e, 0x243: 0x0e, 0x244: 0x0e, 0x245: 0x0e, 0x246: 0x0e, 0x247: 0x0e, + 0x248: 0x0e, 0x249: 0x0e, 0x24a: 0x0e, 0x24b: 0x0e, 0x24c: 0x0e, 0x24d: 0x0e, 0x24e: 0x0e, 0x24f: 0x0e, + 0x250: 0x0e, 0x251: 0x0e, 0x252: 0x3b, 0x253: 0x3c, + 0x265: 0x3d, + 0x270: 0x0e, 0x271: 0x0e, 0x272: 0x0e, 0x273: 0x0e, 0x274: 0x0e, 0x275: 0x0e, 0x276: 0x0e, 0x277: 0x0e, + 0x278: 0x0e, 0x279: 0x0e, 0x27a: 0x0e, 0x27b: 0x0e, 0x27c: 0x0e, 0x27d: 0x0e, 0x27e: 0x0e, 0x27f: 0x0e, + // Block 0xa, offset 0x280 + 0x280: 0x0e, 0x281: 0x0e, 0x282: 0x0e, 0x283: 0x0e, 0x284: 0x0e, 0x285: 0x0e, 0x286: 0x0e, 0x287: 0x0e, + 0x288: 0x0e, 0x289: 0x0e, 0x28a: 0x0e, 0x28b: 0x0e, 0x28c: 0x0e, 0x28d: 0x0e, 0x28e: 0x0e, 0x28f: 0x0e, + 0x290: 0x0e, 0x291: 0x0e, 0x292: 0x0e, 0x293: 0x0e, 0x294: 0x0e, 0x295: 0x0e, 0x296: 0x0e, 0x297: 0x0e, + 0x298: 0x0e, 0x299: 0x0e, 0x29a: 0x0e, 0x29b: 0x0e, 0x29c: 0x0e, 0x29d: 0x0e, 0x29e: 0x3e, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x08, 0x2c1: 0x08, 0x2c2: 0x08, 0x2c3: 0x08, 0x2c4: 0x08, 0x2c5: 0x08, 0x2c6: 0x08, 0x2c7: 0x08, + 0x2c8: 0x08, 0x2c9: 0x08, 0x2ca: 0x08, 0x2cb: 0x08, 0x2cc: 0x08, 0x2cd: 0x08, 0x2ce: 0x08, 0x2cf: 0x08, + 0x2d0: 0x08, 0x2d1: 0x08, 0x2d2: 0x08, 0x2d3: 0x08, 0x2d4: 0x08, 0x2d5: 0x08, 0x2d6: 0x08, 0x2d7: 0x08, + 0x2d8: 0x08, 0x2d9: 0x08, 0x2da: 0x08, 0x2db: 0x08, 0x2dc: 0x08, 0x2dd: 0x08, 0x2de: 0x08, 0x2df: 0x08, + 0x2e0: 0x08, 0x2e1: 0x08, 0x2e2: 0x08, 0x2e3: 0x08, 0x2e4: 0x08, 0x2e5: 0x08, 0x2e6: 0x08, 0x2e7: 0x08, + 0x2e8: 0x08, 0x2e9: 0x08, 0x2ea: 0x08, 0x2eb: 0x08, 0x2ec: 0x08, 0x2ed: 0x08, 0x2ee: 0x08, 0x2ef: 0x08, + 0x2f0: 0x08, 0x2f1: 0x08, 0x2f2: 0x08, 0x2f3: 0x08, 0x2f4: 0x08, 0x2f5: 0x08, 0x2f6: 0x08, 0x2f7: 0x08, + 0x2f8: 0x08, 0x2f9: 0x08, 0x2fa: 0x08, 0x2fb: 0x08, 0x2fc: 0x08, 0x2fd: 0x08, 0x2fe: 0x08, 0x2ff: 0x08, + // Block 0xc, offset 0x300 + 0x300: 0x08, 0x301: 0x08, 0x302: 0x08, 0x303: 0x08, 0x304: 0x08, 0x305: 0x08, 0x306: 0x08, 0x307: 0x08, + 0x308: 0x08, 0x309: 0x08, 0x30a: 0x08, 0x30b: 0x08, 0x30c: 0x08, 0x30d: 0x08, 0x30e: 0x08, 0x30f: 0x08, + 0x310: 0x08, 0x311: 0x08, 0x312: 0x08, 0x313: 0x08, 0x314: 0x08, 0x315: 0x08, 0x316: 0x08, 0x317: 0x08, + 0x318: 0x08, 0x319: 0x08, 0x31a: 0x08, 0x31b: 0x08, 0x31c: 0x08, 0x31d: 0x08, 0x31e: 0x08, 0x31f: 0x08, + 0x320: 0x08, 0x321: 0x08, 0x322: 0x08, 0x323: 0x08, 0x324: 0x0e, 0x325: 0x0e, 0x326: 0x0e, 0x327: 0x0e, + 0x328: 0x0e, 0x329: 0x0e, 0x32a: 0x0e, 0x32b: 0x0e, + 0x338: 0x3f, 0x339: 0x40, 0x33c: 0x41, 0x33d: 0x42, 0x33e: 0x43, 0x33f: 0x44, + // Block 0xd, offset 0x340 + 0x37f: 0x45, + // Block 0xe, offset 0x380 + 0x380: 0x0e, 0x381: 0x0e, 0x382: 0x0e, 0x383: 0x0e, 0x384: 0x0e, 0x385: 0x0e, 0x386: 0x0e, 0x387: 0x0e, + 0x388: 0x0e, 0x389: 0x0e, 0x38a: 0x0e, 0x38b: 0x0e, 0x38c: 0x0e, 0x38d: 0x0e, 0x38e: 0x0e, 0x38f: 0x0e, + 0x390: 0x0e, 0x391: 0x0e, 0x392: 0x0e, 0x393: 0x0e, 0x394: 0x0e, 0x395: 0x0e, 0x396: 0x0e, 0x397: 0x0e, + 0x398: 0x0e, 0x399: 0x0e, 0x39a: 0x0e, 0x39b: 0x0e, 0x39c: 0x0e, 0x39d: 0x0e, 0x39e: 0x0e, 0x39f: 0x46, + 0x3a0: 0x0e, 0x3a1: 0x0e, 0x3a2: 0x0e, 0x3a3: 0x0e, 0x3a4: 0x0e, 0x3a5: 0x0e, 0x3a6: 0x0e, 0x3a7: 0x0e, + 0x3a8: 0x0e, 0x3a9: 0x0e, 0x3aa: 0x0e, 0x3ab: 0x47, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x0e, 0x3c1: 0x0e, 0x3c2: 0x0e, 0x3c3: 0x0e, 0x3c4: 0x48, 0x3c5: 0x49, 0x3c6: 0x0e, 0x3c7: 0x0e, + 0x3c8: 0x0e, 0x3c9: 0x0e, 0x3ca: 0x0e, 0x3cb: 0x4a, + // Block 0x10, offset 0x400 + 0x400: 0x4b, 0x403: 0x4c, 0x404: 0x4d, 0x405: 0x4e, 0x406: 0x4f, + 0x408: 0x50, 0x409: 0x51, 0x40c: 0x52, 0x40d: 0x53, 0x40e: 0x54, 0x40f: 0x55, + 0x410: 0x3a, 0x411: 0x56, 0x412: 0x0e, 0x413: 0x57, 0x414: 0x58, 0x415: 0x59, 0x416: 0x5a, 0x417: 0x5b, + 0x418: 0x0e, 0x419: 0x5c, 0x41a: 0x0e, 0x41b: 0x5d, + 0x424: 0x5e, 0x425: 0x5f, 0x426: 0x60, 0x427: 0x61, + // Block 0x11, offset 0x440 + 0x456: 0x0b, 0x457: 0x06, + 0x458: 0x0c, 0x45b: 0x0d, 0x45f: 0x0e, + 0x460: 0x06, 0x461: 0x06, 0x462: 0x06, 0x463: 0x06, 0x464: 0x06, 0x465: 0x06, 0x466: 0x06, 0x467: 0x06, + 0x468: 0x06, 0x469: 0x06, 0x46a: 0x06, 0x46b: 0x06, 0x46c: 0x06, 0x46d: 0x06, 0x46e: 0x06, 0x46f: 0x06, + 0x470: 0x06, 0x471: 0x06, 0x472: 0x06, 0x473: 0x06, 0x474: 0x06, 0x475: 0x06, 0x476: 0x06, 0x477: 0x06, + 0x478: 0x06, 0x479: 0x06, 0x47a: 0x06, 0x47b: 0x06, 0x47c: 0x06, 0x47d: 0x06, 0x47e: 0x06, 0x47f: 0x06, + // Block 0x12, offset 0x480 + 0x484: 0x08, 0x485: 0x08, 0x486: 0x08, 0x487: 0x09, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x08, 0x4c1: 0x08, 0x4c2: 0x08, 0x4c3: 0x08, 0x4c4: 0x08, 0x4c5: 0x08, 0x4c6: 0x08, 0x4c7: 0x08, + 0x4c8: 0x08, 0x4c9: 0x08, 0x4ca: 0x08, 0x4cb: 0x08, 0x4cc: 0x08, 0x4cd: 0x08, 0x4ce: 0x08, 0x4cf: 0x08, + 0x4d0: 0x08, 0x4d1: 0x08, 0x4d2: 0x08, 0x4d3: 0x08, 0x4d4: 0x08, 0x4d5: 0x08, 0x4d6: 0x08, 0x4d7: 0x08, + 0x4d8: 0x08, 0x4d9: 0x08, 0x4da: 0x08, 0x4db: 0x08, 0x4dc: 0x08, 0x4dd: 0x08, 0x4de: 0x08, 0x4df: 0x08, + 0x4e0: 0x08, 0x4e1: 0x08, 0x4e2: 0x08, 0x4e3: 0x08, 0x4e4: 0x08, 0x4e5: 0x08, 0x4e6: 0x08, 0x4e7: 0x08, + 0x4e8: 0x08, 0x4e9: 0x08, 0x4ea: 0x08, 0x4eb: 0x08, 0x4ec: 0x08, 0x4ed: 0x08, 0x4ee: 0x08, 0x4ef: 0x08, + 0x4f0: 0x08, 0x4f1: 0x08, 0x4f2: 0x08, 0x4f3: 0x08, 0x4f4: 0x08, 0x4f5: 0x08, 0x4f6: 0x08, 0x4f7: 0x08, + 0x4f8: 0x08, 0x4f9: 0x08, 0x4fa: 0x08, 0x4fb: 0x08, 0x4fc: 0x08, 0x4fd: 0x08, 0x4fe: 0x08, 0x4ff: 0x62, + // Block 0x14, offset 0x500 + 0x520: 0x10, + 0x530: 0x09, 0x531: 0x09, 0x532: 0x09, 0x533: 0x09, 0x534: 0x09, 0x535: 0x09, 0x536: 0x09, 0x537: 0x09, + 0x538: 0x09, 0x539: 0x09, 0x53a: 0x09, 0x53b: 0x09, 0x53c: 0x09, 0x53d: 0x09, 0x53e: 0x09, 0x53f: 0x11, + // Block 0x15, offset 0x540 + 0x540: 0x09, 0x541: 0x09, 0x542: 0x09, 0x543: 0x09, 0x544: 0x09, 0x545: 0x09, 0x546: 0x09, 0x547: 0x09, + 0x548: 0x09, 0x549: 0x09, 0x54a: 0x09, 0x54b: 0x09, 0x54c: 0x09, 0x54d: 0x09, 0x54e: 0x09, 0x54f: 0x11, +} + +// inverseData contains 4-byte entries of the following format: +// +// <length> <modified UTF-8-encoded rune> <0 padding> +// +// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the +// UTF-8 encoding of the original rune. Mappings often have the following +// pattern: +// +// A -> A (U+FF21 -> U+0041) +// B -> B (U+FF22 -> U+0042) +// ... +// +// By xor-ing the last byte the same entry can be shared by many mappings. This +// reduces the total number of distinct entries by about two thirds. +// The resulting entry for the aforementioned mappings is +// +// { 0x01, 0xE0, 0x00, 0x00 } +// +// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get +// +// E0 ^ A1 = 41. +// +// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get +// +// E0 ^ A2 = 42. +// +// Note that because of the xor-ing, the byte sequence stored in the entry is +// not valid UTF-8. +var inverseData = [150][4]byte{ + {0x00, 0x00, 0x00, 0x00}, + {0x03, 0xe3, 0x80, 0xa0}, + {0x03, 0xef, 0xbc, 0xa0}, + {0x03, 0xef, 0xbc, 0xe0}, + {0x03, 0xef, 0xbd, 0xe0}, + {0x03, 0xef, 0xbf, 0x02}, + {0x03, 0xef, 0xbf, 0x00}, + {0x03, 0xef, 0xbf, 0x0e}, + {0x03, 0xef, 0xbf, 0x0c}, + {0x03, 0xef, 0xbf, 0x0f}, + {0x03, 0xef, 0xbf, 0x39}, + {0x03, 0xef, 0xbf, 0x3b}, + {0x03, 0xef, 0xbf, 0x3f}, + {0x03, 0xef, 0xbf, 0x2a}, + {0x03, 0xef, 0xbf, 0x0d}, + {0x03, 0xef, 0xbf, 0x25}, + {0x03, 0xef, 0xbd, 0x1a}, + {0x03, 0xef, 0xbd, 0x26}, + {0x01, 0xa0, 0x00, 0x00}, + {0x03, 0xef, 0xbd, 0x25}, + {0x03, 0xef, 0xbd, 0x23}, + {0x03, 0xef, 0xbd, 0x2e}, + {0x03, 0xef, 0xbe, 0x07}, + {0x03, 0xef, 0xbe, 0x05}, + {0x03, 0xef, 0xbd, 0x06}, + {0x03, 0xef, 0xbd, 0x13}, + {0x03, 0xef, 0xbd, 0x0b}, + {0x03, 0xef, 0xbd, 0x16}, + {0x03, 0xef, 0xbd, 0x0c}, + {0x03, 0xef, 0xbd, 0x15}, + {0x03, 0xef, 0xbd, 0x0d}, + {0x03, 0xef, 0xbd, 0x1c}, + {0x03, 0xef, 0xbd, 0x02}, + {0x03, 0xef, 0xbd, 0x1f}, + {0x03, 0xef, 0xbd, 0x1d}, + {0x03, 0xef, 0xbd, 0x17}, + {0x03, 0xef, 0xbd, 0x08}, + {0x03, 0xef, 0xbd, 0x09}, + {0x03, 0xef, 0xbd, 0x0e}, + {0x03, 0xef, 0xbd, 0x04}, + {0x03, 0xef, 0xbd, 0x05}, + {0x03, 0xef, 0xbe, 0x3f}, + {0x03, 0xef, 0xbe, 0x00}, + {0x03, 0xef, 0xbd, 0x2c}, + {0x03, 0xef, 0xbe, 0x06}, + {0x03, 0xef, 0xbe, 0x0c}, + {0x03, 0xef, 0xbe, 0x0f}, + {0x03, 0xef, 0xbe, 0x0d}, + {0x03, 0xef, 0xbe, 0x0b}, + {0x03, 0xef, 0xbe, 0x19}, + {0x03, 0xef, 0xbe, 0x15}, + {0x03, 0xef, 0xbe, 0x11}, + {0x03, 0xef, 0xbe, 0x31}, + {0x03, 0xef, 0xbe, 0x33}, + {0x03, 0xef, 0xbd, 0x0f}, + {0x03, 0xef, 0xbe, 0x30}, + {0x03, 0xef, 0xbe, 0x3e}, + {0x03, 0xef, 0xbe, 0x32}, + {0x03, 0xef, 0xbe, 0x36}, + {0x03, 0xef, 0xbd, 0x14}, + {0x03, 0xef, 0xbe, 0x2e}, + {0x03, 0xef, 0xbd, 0x1e}, + {0x03, 0xef, 0xbe, 0x10}, + {0x03, 0xef, 0xbf, 0x13}, + {0x03, 0xef, 0xbf, 0x15}, + {0x03, 0xef, 0xbf, 0x17}, + {0x03, 0xef, 0xbf, 0x1f}, + {0x03, 0xef, 0xbf, 0x1d}, + {0x03, 0xef, 0xbf, 0x1b}, + {0x03, 0xef, 0xbf, 0x09}, + {0x03, 0xef, 0xbf, 0x0b}, + {0x03, 0xef, 0xbf, 0x37}, + {0x03, 0xef, 0xbe, 0x04}, + {0x01, 0xe0, 0x00, 0x00}, + {0x03, 0xe2, 0xa6, 0x1a}, + {0x03, 0xe2, 0xa6, 0x26}, + {0x03, 0xe3, 0x80, 0x23}, + {0x03, 0xe3, 0x80, 0x2e}, + {0x03, 0xe3, 0x80, 0x25}, + {0x03, 0xe3, 0x83, 0x1e}, + {0x03, 0xe3, 0x83, 0x14}, + {0x03, 0xe3, 0x82, 0x06}, + {0x03, 0xe3, 0x82, 0x0b}, + {0x03, 0xe3, 0x82, 0x0c}, + {0x03, 0xe3, 0x82, 0x0d}, + {0x03, 0xe3, 0x82, 0x02}, + {0x03, 0xe3, 0x83, 0x0f}, + {0x03, 0xe3, 0x83, 0x08}, + {0x03, 0xe3, 0x83, 0x09}, + {0x03, 0xe3, 0x83, 0x2c}, + {0x03, 0xe3, 0x83, 0x0c}, + {0x03, 0xe3, 0x82, 0x13}, + {0x03, 0xe3, 0x82, 0x16}, + {0x03, 0xe3, 0x82, 0x15}, + {0x03, 0xe3, 0x82, 0x1c}, + {0x03, 0xe3, 0x82, 0x1f}, + {0x03, 0xe3, 0x82, 0x1d}, + {0x03, 0xe3, 0x82, 0x1a}, + {0x03, 0xe3, 0x82, 0x17}, + {0x03, 0xe3, 0x82, 0x08}, + {0x03, 0xe3, 0x82, 0x09}, + {0x03, 0xe3, 0x82, 0x0e}, + {0x03, 0xe3, 0x82, 0x04}, + {0x03, 0xe3, 0x82, 0x05}, + {0x03, 0xe3, 0x82, 0x3f}, + {0x03, 0xe3, 0x83, 0x00}, + {0x03, 0xe3, 0x83, 0x06}, + {0x03, 0xe3, 0x83, 0x05}, + {0x03, 0xe3, 0x83, 0x0d}, + {0x03, 0xe3, 0x83, 0x0b}, + {0x03, 0xe3, 0x83, 0x07}, + {0x03, 0xe3, 0x83, 0x19}, + {0x03, 0xe3, 0x83, 0x15}, + {0x03, 0xe3, 0x83, 0x11}, + {0x03, 0xe3, 0x83, 0x31}, + {0x03, 0xe3, 0x83, 0x33}, + {0x03, 0xe3, 0x83, 0x30}, + {0x03, 0xe3, 0x83, 0x3e}, + {0x03, 0xe3, 0x83, 0x32}, + {0x03, 0xe3, 0x83, 0x36}, + {0x03, 0xe3, 0x83, 0x2e}, + {0x03, 0xe3, 0x82, 0x07}, + {0x03, 0xe3, 0x85, 0x04}, + {0x03, 0xe3, 0x84, 0x10}, + {0x03, 0xe3, 0x85, 0x30}, + {0x03, 0xe3, 0x85, 0x0d}, + {0x03, 0xe3, 0x85, 0x13}, + {0x03, 0xe3, 0x85, 0x15}, + {0x03, 0xe3, 0x85, 0x17}, + {0x03, 0xe3, 0x85, 0x1f}, + {0x03, 0xe3, 0x85, 0x1d}, + {0x03, 0xe3, 0x85, 0x1b}, + {0x03, 0xe3, 0x85, 0x09}, + {0x03, 0xe3, 0x85, 0x0f}, + {0x03, 0xe3, 0x85, 0x0b}, + {0x03, 0xe3, 0x85, 0x37}, + {0x03, 0xe3, 0x85, 0x3b}, + {0x03, 0xe3, 0x85, 0x39}, + {0x03, 0xe3, 0x85, 0x3f}, + {0x02, 0xc2, 0x02, 0x00}, + {0x02, 0xc2, 0x0e, 0x00}, + {0x02, 0xc2, 0x0c, 0x00}, + {0x02, 0xc2, 0x00, 0x00}, + {0x03, 0xe2, 0x82, 0x0f}, + {0x03, 0xe2, 0x94, 0x2a}, + {0x03, 0xe2, 0x86, 0x39}, + {0x03, 0xe2, 0x86, 0x3b}, + {0x03, 0xe2, 0x86, 0x3f}, + {0x03, 0xe2, 0x96, 0x0d}, + {0x03, 0xe2, 0x97, 0x25}, +} + +// Total table size 14936 bytes (14KiB) diff --git a/vendor/golang.org/x/text/width/tables11.0.0.go b/vendor/golang.org/x/text/width/tables11.0.0.go new file mode 100644 index 00000000000..89288b3dae3 --- /dev/null +++ b/vendor/golang.org/x/text/width/tables11.0.0.go @@ -0,0 +1,1340 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +//go:build go1.13 && !go1.14 + +package width + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "11.0.0" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *widthTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return widthValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = widthIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *widthTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return widthValues[c0] + } + i := widthIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = widthIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = widthIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *widthTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return widthValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = widthIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *widthTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return widthValues[c0] + } + i := widthIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = widthIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = widthIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// widthTrie. Total size: 14336 bytes (14.00 KiB). Checksum: c0f7712776e71cd4. +type widthTrie struct{} + +func newWidthTrie(i int) *widthTrie { + return &widthTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *widthTrie) lookupValue(n uint32, b byte) uint16 { + switch { + default: + return uint16(widthValues[n<<6+uint32(b)]) + } +} + +// widthValues: 101 blocks, 6464 entries, 12928 bytes +// The third block is the zero block. +var widthValues = [6464]uint16{ + // Block 0x0, offset 0x0 + 0x20: 0x6001, 0x21: 0x6002, 0x22: 0x6002, 0x23: 0x6002, + 0x24: 0x6002, 0x25: 0x6002, 0x26: 0x6002, 0x27: 0x6002, 0x28: 0x6002, 0x29: 0x6002, + 0x2a: 0x6002, 0x2b: 0x6002, 0x2c: 0x6002, 0x2d: 0x6002, 0x2e: 0x6002, 0x2f: 0x6002, + 0x30: 0x6002, 0x31: 0x6002, 0x32: 0x6002, 0x33: 0x6002, 0x34: 0x6002, 0x35: 0x6002, + 0x36: 0x6002, 0x37: 0x6002, 0x38: 0x6002, 0x39: 0x6002, 0x3a: 0x6002, 0x3b: 0x6002, + 0x3c: 0x6002, 0x3d: 0x6002, 0x3e: 0x6002, 0x3f: 0x6002, + // Block 0x1, offset 0x40 + 0x40: 0x6003, 0x41: 0x6003, 0x42: 0x6003, 0x43: 0x6003, 0x44: 0x6003, 0x45: 0x6003, + 0x46: 0x6003, 0x47: 0x6003, 0x48: 0x6003, 0x49: 0x6003, 0x4a: 0x6003, 0x4b: 0x6003, + 0x4c: 0x6003, 0x4d: 0x6003, 0x4e: 0x6003, 0x4f: 0x6003, 0x50: 0x6003, 0x51: 0x6003, + 0x52: 0x6003, 0x53: 0x6003, 0x54: 0x6003, 0x55: 0x6003, 0x56: 0x6003, 0x57: 0x6003, + 0x58: 0x6003, 0x59: 0x6003, 0x5a: 0x6003, 0x5b: 0x6003, 0x5c: 0x6003, 0x5d: 0x6003, + 0x5e: 0x6003, 0x5f: 0x6003, 0x60: 0x6004, 0x61: 0x6004, 0x62: 0x6004, 0x63: 0x6004, + 0x64: 0x6004, 0x65: 0x6004, 0x66: 0x6004, 0x67: 0x6004, 0x68: 0x6004, 0x69: 0x6004, + 0x6a: 0x6004, 0x6b: 0x6004, 0x6c: 0x6004, 0x6d: 0x6004, 0x6e: 0x6004, 0x6f: 0x6004, + 0x70: 0x6004, 0x71: 0x6004, 0x72: 0x6004, 0x73: 0x6004, 0x74: 0x6004, 0x75: 0x6004, + 0x76: 0x6004, 0x77: 0x6004, 0x78: 0x6004, 0x79: 0x6004, 0x7a: 0x6004, 0x7b: 0x6004, + 0x7c: 0x6004, 0x7d: 0x6004, 0x7e: 0x6004, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xe1: 0x2000, 0xe2: 0x6005, 0xe3: 0x6005, + 0xe4: 0x2000, 0xe5: 0x6006, 0xe6: 0x6005, 0xe7: 0x2000, 0xe8: 0x2000, + 0xea: 0x2000, 0xec: 0x6007, 0xed: 0x2000, 0xee: 0x2000, 0xef: 0x6008, + 0xf0: 0x2000, 0xf1: 0x2000, 0xf2: 0x2000, 0xf3: 0x2000, 0xf4: 0x2000, + 0xf6: 0x2000, 0xf7: 0x2000, 0xf8: 0x2000, 0xf9: 0x2000, 0xfa: 0x2000, + 0xfc: 0x2000, 0xfd: 0x2000, 0xfe: 0x2000, 0xff: 0x2000, + // Block 0x4, offset 0x100 + 0x106: 0x2000, + 0x110: 0x2000, + 0x117: 0x2000, + 0x118: 0x2000, + 0x11e: 0x2000, 0x11f: 0x2000, 0x120: 0x2000, 0x121: 0x2000, + 0x126: 0x2000, 0x128: 0x2000, 0x129: 0x2000, + 0x12a: 0x2000, 0x12c: 0x2000, 0x12d: 0x2000, + 0x130: 0x2000, 0x132: 0x2000, 0x133: 0x2000, + 0x137: 0x2000, 0x138: 0x2000, 0x139: 0x2000, 0x13a: 0x2000, + 0x13c: 0x2000, 0x13e: 0x2000, + // Block 0x5, offset 0x140 + 0x141: 0x2000, + 0x151: 0x2000, + 0x153: 0x2000, + 0x15b: 0x2000, + 0x166: 0x2000, 0x167: 0x2000, + 0x16b: 0x2000, + 0x171: 0x2000, 0x172: 0x2000, 0x173: 0x2000, + 0x178: 0x2000, + 0x17f: 0x2000, + // Block 0x6, offset 0x180 + 0x180: 0x2000, 0x181: 0x2000, 0x182: 0x2000, 0x184: 0x2000, + 0x188: 0x2000, 0x189: 0x2000, 0x18a: 0x2000, 0x18b: 0x2000, + 0x18d: 0x2000, + 0x192: 0x2000, 0x193: 0x2000, + 0x1a6: 0x2000, 0x1a7: 0x2000, + 0x1ab: 0x2000, + // Block 0x7, offset 0x1c0 + 0x1ce: 0x2000, 0x1d0: 0x2000, + 0x1d2: 0x2000, 0x1d4: 0x2000, 0x1d6: 0x2000, + 0x1d8: 0x2000, 0x1da: 0x2000, 0x1dc: 0x2000, + // Block 0x8, offset 0x200 + 0x211: 0x2000, + 0x221: 0x2000, + // Block 0x9, offset 0x240 + 0x244: 0x2000, + 0x247: 0x2000, 0x249: 0x2000, 0x24a: 0x2000, 0x24b: 0x2000, + 0x24d: 0x2000, 0x250: 0x2000, + 0x258: 0x2000, 0x259: 0x2000, 0x25a: 0x2000, 0x25b: 0x2000, 0x25d: 0x2000, + 0x25f: 0x2000, + // Block 0xa, offset 0x280 + 0x280: 0x2000, 0x281: 0x2000, 0x282: 0x2000, 0x283: 0x2000, 0x284: 0x2000, 0x285: 0x2000, + 0x286: 0x2000, 0x287: 0x2000, 0x288: 0x2000, 0x289: 0x2000, 0x28a: 0x2000, 0x28b: 0x2000, + 0x28c: 0x2000, 0x28d: 0x2000, 0x28e: 0x2000, 0x28f: 0x2000, 0x290: 0x2000, 0x291: 0x2000, + 0x292: 0x2000, 0x293: 0x2000, 0x294: 0x2000, 0x295: 0x2000, 0x296: 0x2000, 0x297: 0x2000, + 0x298: 0x2000, 0x299: 0x2000, 0x29a: 0x2000, 0x29b: 0x2000, 0x29c: 0x2000, 0x29d: 0x2000, + 0x29e: 0x2000, 0x29f: 0x2000, 0x2a0: 0x2000, 0x2a1: 0x2000, 0x2a2: 0x2000, 0x2a3: 0x2000, + 0x2a4: 0x2000, 0x2a5: 0x2000, 0x2a6: 0x2000, 0x2a7: 0x2000, 0x2a8: 0x2000, 0x2a9: 0x2000, + 0x2aa: 0x2000, 0x2ab: 0x2000, 0x2ac: 0x2000, 0x2ad: 0x2000, 0x2ae: 0x2000, 0x2af: 0x2000, + 0x2b0: 0x2000, 0x2b1: 0x2000, 0x2b2: 0x2000, 0x2b3: 0x2000, 0x2b4: 0x2000, 0x2b5: 0x2000, + 0x2b6: 0x2000, 0x2b7: 0x2000, 0x2b8: 0x2000, 0x2b9: 0x2000, 0x2ba: 0x2000, 0x2bb: 0x2000, + 0x2bc: 0x2000, 0x2bd: 0x2000, 0x2be: 0x2000, 0x2bf: 0x2000, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x2000, 0x2c1: 0x2000, 0x2c2: 0x2000, 0x2c3: 0x2000, 0x2c4: 0x2000, 0x2c5: 0x2000, + 0x2c6: 0x2000, 0x2c7: 0x2000, 0x2c8: 0x2000, 0x2c9: 0x2000, 0x2ca: 0x2000, 0x2cb: 0x2000, + 0x2cc: 0x2000, 0x2cd: 0x2000, 0x2ce: 0x2000, 0x2cf: 0x2000, 0x2d0: 0x2000, 0x2d1: 0x2000, + 0x2d2: 0x2000, 0x2d3: 0x2000, 0x2d4: 0x2000, 0x2d5: 0x2000, 0x2d6: 0x2000, 0x2d7: 0x2000, + 0x2d8: 0x2000, 0x2d9: 0x2000, 0x2da: 0x2000, 0x2db: 0x2000, 0x2dc: 0x2000, 0x2dd: 0x2000, + 0x2de: 0x2000, 0x2df: 0x2000, 0x2e0: 0x2000, 0x2e1: 0x2000, 0x2e2: 0x2000, 0x2e3: 0x2000, + 0x2e4: 0x2000, 0x2e5: 0x2000, 0x2e6: 0x2000, 0x2e7: 0x2000, 0x2e8: 0x2000, 0x2e9: 0x2000, + 0x2ea: 0x2000, 0x2eb: 0x2000, 0x2ec: 0x2000, 0x2ed: 0x2000, 0x2ee: 0x2000, 0x2ef: 0x2000, + // Block 0xc, offset 0x300 + 0x311: 0x2000, + 0x312: 0x2000, 0x313: 0x2000, 0x314: 0x2000, 0x315: 0x2000, 0x316: 0x2000, 0x317: 0x2000, + 0x318: 0x2000, 0x319: 0x2000, 0x31a: 0x2000, 0x31b: 0x2000, 0x31c: 0x2000, 0x31d: 0x2000, + 0x31e: 0x2000, 0x31f: 0x2000, 0x320: 0x2000, 0x321: 0x2000, 0x323: 0x2000, + 0x324: 0x2000, 0x325: 0x2000, 0x326: 0x2000, 0x327: 0x2000, 0x328: 0x2000, 0x329: 0x2000, + 0x331: 0x2000, 0x332: 0x2000, 0x333: 0x2000, 0x334: 0x2000, 0x335: 0x2000, + 0x336: 0x2000, 0x337: 0x2000, 0x338: 0x2000, 0x339: 0x2000, 0x33a: 0x2000, 0x33b: 0x2000, + 0x33c: 0x2000, 0x33d: 0x2000, 0x33e: 0x2000, 0x33f: 0x2000, + // Block 0xd, offset 0x340 + 0x340: 0x2000, 0x341: 0x2000, 0x343: 0x2000, 0x344: 0x2000, 0x345: 0x2000, + 0x346: 0x2000, 0x347: 0x2000, 0x348: 0x2000, 0x349: 0x2000, + // Block 0xe, offset 0x380 + 0x381: 0x2000, + 0x390: 0x2000, 0x391: 0x2000, + 0x392: 0x2000, 0x393: 0x2000, 0x394: 0x2000, 0x395: 0x2000, 0x396: 0x2000, 0x397: 0x2000, + 0x398: 0x2000, 0x399: 0x2000, 0x39a: 0x2000, 0x39b: 0x2000, 0x39c: 0x2000, 0x39d: 0x2000, + 0x39e: 0x2000, 0x39f: 0x2000, 0x3a0: 0x2000, 0x3a1: 0x2000, 0x3a2: 0x2000, 0x3a3: 0x2000, + 0x3a4: 0x2000, 0x3a5: 0x2000, 0x3a6: 0x2000, 0x3a7: 0x2000, 0x3a8: 0x2000, 0x3a9: 0x2000, + 0x3aa: 0x2000, 0x3ab: 0x2000, 0x3ac: 0x2000, 0x3ad: 0x2000, 0x3ae: 0x2000, 0x3af: 0x2000, + 0x3b0: 0x2000, 0x3b1: 0x2000, 0x3b2: 0x2000, 0x3b3: 0x2000, 0x3b4: 0x2000, 0x3b5: 0x2000, + 0x3b6: 0x2000, 0x3b7: 0x2000, 0x3b8: 0x2000, 0x3b9: 0x2000, 0x3ba: 0x2000, 0x3bb: 0x2000, + 0x3bc: 0x2000, 0x3bd: 0x2000, 0x3be: 0x2000, 0x3bf: 0x2000, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x2000, 0x3c1: 0x2000, 0x3c2: 0x2000, 0x3c3: 0x2000, 0x3c4: 0x2000, 0x3c5: 0x2000, + 0x3c6: 0x2000, 0x3c7: 0x2000, 0x3c8: 0x2000, 0x3c9: 0x2000, 0x3ca: 0x2000, 0x3cb: 0x2000, + 0x3cc: 0x2000, 0x3cd: 0x2000, 0x3ce: 0x2000, 0x3cf: 0x2000, 0x3d1: 0x2000, + // Block 0x10, offset 0x400 + 0x400: 0x4000, 0x401: 0x4000, 0x402: 0x4000, 0x403: 0x4000, 0x404: 0x4000, 0x405: 0x4000, + 0x406: 0x4000, 0x407: 0x4000, 0x408: 0x4000, 0x409: 0x4000, 0x40a: 0x4000, 0x40b: 0x4000, + 0x40c: 0x4000, 0x40d: 0x4000, 0x40e: 0x4000, 0x40f: 0x4000, 0x410: 0x4000, 0x411: 0x4000, + 0x412: 0x4000, 0x413: 0x4000, 0x414: 0x4000, 0x415: 0x4000, 0x416: 0x4000, 0x417: 0x4000, + 0x418: 0x4000, 0x419: 0x4000, 0x41a: 0x4000, 0x41b: 0x4000, 0x41c: 0x4000, 0x41d: 0x4000, + 0x41e: 0x4000, 0x41f: 0x4000, 0x420: 0x4000, 0x421: 0x4000, 0x422: 0x4000, 0x423: 0x4000, + 0x424: 0x4000, 0x425: 0x4000, 0x426: 0x4000, 0x427: 0x4000, 0x428: 0x4000, 0x429: 0x4000, + 0x42a: 0x4000, 0x42b: 0x4000, 0x42c: 0x4000, 0x42d: 0x4000, 0x42e: 0x4000, 0x42f: 0x4000, + 0x430: 0x4000, 0x431: 0x4000, 0x432: 0x4000, 0x433: 0x4000, 0x434: 0x4000, 0x435: 0x4000, + 0x436: 0x4000, 0x437: 0x4000, 0x438: 0x4000, 0x439: 0x4000, 0x43a: 0x4000, 0x43b: 0x4000, + 0x43c: 0x4000, 0x43d: 0x4000, 0x43e: 0x4000, 0x43f: 0x4000, + // Block 0x11, offset 0x440 + 0x440: 0x4000, 0x441: 0x4000, 0x442: 0x4000, 0x443: 0x4000, 0x444: 0x4000, 0x445: 0x4000, + 0x446: 0x4000, 0x447: 0x4000, 0x448: 0x4000, 0x449: 0x4000, 0x44a: 0x4000, 0x44b: 0x4000, + 0x44c: 0x4000, 0x44d: 0x4000, 0x44e: 0x4000, 0x44f: 0x4000, 0x450: 0x4000, 0x451: 0x4000, + 0x452: 0x4000, 0x453: 0x4000, 0x454: 0x4000, 0x455: 0x4000, 0x456: 0x4000, 0x457: 0x4000, + 0x458: 0x4000, 0x459: 0x4000, 0x45a: 0x4000, 0x45b: 0x4000, 0x45c: 0x4000, 0x45d: 0x4000, + 0x45e: 0x4000, 0x45f: 0x4000, + // Block 0x12, offset 0x480 + 0x490: 0x2000, + 0x493: 0x2000, 0x494: 0x2000, 0x495: 0x2000, 0x496: 0x2000, + 0x498: 0x2000, 0x499: 0x2000, 0x49c: 0x2000, 0x49d: 0x2000, + 0x4a0: 0x2000, 0x4a1: 0x2000, 0x4a2: 0x2000, + 0x4a4: 0x2000, 0x4a5: 0x2000, 0x4a6: 0x2000, 0x4a7: 0x2000, + 0x4b0: 0x2000, 0x4b2: 0x2000, 0x4b3: 0x2000, 0x4b5: 0x2000, + 0x4bb: 0x2000, + 0x4be: 0x2000, + // Block 0x13, offset 0x4c0 + 0x4f4: 0x2000, + 0x4ff: 0x2000, + // Block 0x14, offset 0x500 + 0x501: 0x2000, 0x502: 0x2000, 0x503: 0x2000, 0x504: 0x2000, + 0x529: 0xa009, + 0x52c: 0x2000, + // Block 0x15, offset 0x540 + 0x543: 0x2000, 0x545: 0x2000, + 0x549: 0x2000, + 0x553: 0x2000, 0x556: 0x2000, + 0x561: 0x2000, 0x562: 0x2000, + 0x566: 0x2000, + 0x56b: 0x2000, + // Block 0x16, offset 0x580 + 0x593: 0x2000, 0x594: 0x2000, + 0x59b: 0x2000, 0x59c: 0x2000, 0x59d: 0x2000, + 0x59e: 0x2000, 0x5a0: 0x2000, 0x5a1: 0x2000, 0x5a2: 0x2000, 0x5a3: 0x2000, + 0x5a4: 0x2000, 0x5a5: 0x2000, 0x5a6: 0x2000, 0x5a7: 0x2000, 0x5a8: 0x2000, 0x5a9: 0x2000, + 0x5aa: 0x2000, 0x5ab: 0x2000, + 0x5b0: 0x2000, 0x5b1: 0x2000, 0x5b2: 0x2000, 0x5b3: 0x2000, 0x5b4: 0x2000, 0x5b5: 0x2000, + 0x5b6: 0x2000, 0x5b7: 0x2000, 0x5b8: 0x2000, 0x5b9: 0x2000, + // Block 0x17, offset 0x5c0 + 0x5c9: 0x2000, + 0x5d0: 0x200a, 0x5d1: 0x200b, + 0x5d2: 0x200a, 0x5d3: 0x200c, 0x5d4: 0x2000, 0x5d5: 0x2000, 0x5d6: 0x2000, 0x5d7: 0x2000, + 0x5d8: 0x2000, 0x5d9: 0x2000, + 0x5f8: 0x2000, 0x5f9: 0x2000, + // Block 0x18, offset 0x600 + 0x612: 0x2000, 0x614: 0x2000, + 0x627: 0x2000, + // Block 0x19, offset 0x640 + 0x640: 0x2000, 0x642: 0x2000, 0x643: 0x2000, + 0x647: 0x2000, 0x648: 0x2000, 0x64b: 0x2000, + 0x64f: 0x2000, 0x651: 0x2000, + 0x655: 0x2000, + 0x65a: 0x2000, 0x65d: 0x2000, + 0x65e: 0x2000, 0x65f: 0x2000, 0x660: 0x2000, 0x663: 0x2000, + 0x665: 0x2000, 0x667: 0x2000, 0x668: 0x2000, 0x669: 0x2000, + 0x66a: 0x2000, 0x66b: 0x2000, 0x66c: 0x2000, 0x66e: 0x2000, + 0x674: 0x2000, 0x675: 0x2000, + 0x676: 0x2000, 0x677: 0x2000, + 0x67c: 0x2000, 0x67d: 0x2000, + // Block 0x1a, offset 0x680 + 0x688: 0x2000, + 0x68c: 0x2000, + 0x692: 0x2000, + 0x6a0: 0x2000, 0x6a1: 0x2000, + 0x6a4: 0x2000, 0x6a5: 0x2000, 0x6a6: 0x2000, 0x6a7: 0x2000, + 0x6aa: 0x2000, 0x6ab: 0x2000, 0x6ae: 0x2000, 0x6af: 0x2000, + // Block 0x1b, offset 0x6c0 + 0x6c2: 0x2000, 0x6c3: 0x2000, + 0x6c6: 0x2000, 0x6c7: 0x2000, + 0x6d5: 0x2000, + 0x6d9: 0x2000, + 0x6e5: 0x2000, + 0x6ff: 0x2000, + // Block 0x1c, offset 0x700 + 0x712: 0x2000, + 0x71a: 0x4000, 0x71b: 0x4000, + 0x729: 0x4000, + 0x72a: 0x4000, + // Block 0x1d, offset 0x740 + 0x769: 0x4000, + 0x76a: 0x4000, 0x76b: 0x4000, 0x76c: 0x4000, + 0x770: 0x4000, 0x773: 0x4000, + // Block 0x1e, offset 0x780 + 0x7a0: 0x2000, 0x7a1: 0x2000, 0x7a2: 0x2000, 0x7a3: 0x2000, + 0x7a4: 0x2000, 0x7a5: 0x2000, 0x7a6: 0x2000, 0x7a7: 0x2000, 0x7a8: 0x2000, 0x7a9: 0x2000, + 0x7aa: 0x2000, 0x7ab: 0x2000, 0x7ac: 0x2000, 0x7ad: 0x2000, 0x7ae: 0x2000, 0x7af: 0x2000, + 0x7b0: 0x2000, 0x7b1: 0x2000, 0x7b2: 0x2000, 0x7b3: 0x2000, 0x7b4: 0x2000, 0x7b5: 0x2000, + 0x7b6: 0x2000, 0x7b7: 0x2000, 0x7b8: 0x2000, 0x7b9: 0x2000, 0x7ba: 0x2000, 0x7bb: 0x2000, + 0x7bc: 0x2000, 0x7bd: 0x2000, 0x7be: 0x2000, 0x7bf: 0x2000, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x2000, 0x7c1: 0x2000, 0x7c2: 0x2000, 0x7c3: 0x2000, 0x7c4: 0x2000, 0x7c5: 0x2000, + 0x7c6: 0x2000, 0x7c7: 0x2000, 0x7c8: 0x2000, 0x7c9: 0x2000, 0x7ca: 0x2000, 0x7cb: 0x2000, + 0x7cc: 0x2000, 0x7cd: 0x2000, 0x7ce: 0x2000, 0x7cf: 0x2000, 0x7d0: 0x2000, 0x7d1: 0x2000, + 0x7d2: 0x2000, 0x7d3: 0x2000, 0x7d4: 0x2000, 0x7d5: 0x2000, 0x7d6: 0x2000, 0x7d7: 0x2000, + 0x7d8: 0x2000, 0x7d9: 0x2000, 0x7da: 0x2000, 0x7db: 0x2000, 0x7dc: 0x2000, 0x7dd: 0x2000, + 0x7de: 0x2000, 0x7df: 0x2000, 0x7e0: 0x2000, 0x7e1: 0x2000, 0x7e2: 0x2000, 0x7e3: 0x2000, + 0x7e4: 0x2000, 0x7e5: 0x2000, 0x7e6: 0x2000, 0x7e7: 0x2000, 0x7e8: 0x2000, 0x7e9: 0x2000, + 0x7eb: 0x2000, 0x7ec: 0x2000, 0x7ed: 0x2000, 0x7ee: 0x2000, 0x7ef: 0x2000, + 0x7f0: 0x2000, 0x7f1: 0x2000, 0x7f2: 0x2000, 0x7f3: 0x2000, 0x7f4: 0x2000, 0x7f5: 0x2000, + 0x7f6: 0x2000, 0x7f7: 0x2000, 0x7f8: 0x2000, 0x7f9: 0x2000, 0x7fa: 0x2000, 0x7fb: 0x2000, + 0x7fc: 0x2000, 0x7fd: 0x2000, 0x7fe: 0x2000, 0x7ff: 0x2000, + // Block 0x20, offset 0x800 + 0x800: 0x2000, 0x801: 0x2000, 0x802: 0x200d, 0x803: 0x2000, 0x804: 0x2000, 0x805: 0x2000, + 0x806: 0x2000, 0x807: 0x2000, 0x808: 0x2000, 0x809: 0x2000, 0x80a: 0x2000, 0x80b: 0x2000, + 0x80c: 0x2000, 0x80d: 0x2000, 0x80e: 0x2000, 0x80f: 0x2000, 0x810: 0x2000, 0x811: 0x2000, + 0x812: 0x2000, 0x813: 0x2000, 0x814: 0x2000, 0x815: 0x2000, 0x816: 0x2000, 0x817: 0x2000, + 0x818: 0x2000, 0x819: 0x2000, 0x81a: 0x2000, 0x81b: 0x2000, 0x81c: 0x2000, 0x81d: 0x2000, + 0x81e: 0x2000, 0x81f: 0x2000, 0x820: 0x2000, 0x821: 0x2000, 0x822: 0x2000, 0x823: 0x2000, + 0x824: 0x2000, 0x825: 0x2000, 0x826: 0x2000, 0x827: 0x2000, 0x828: 0x2000, 0x829: 0x2000, + 0x82a: 0x2000, 0x82b: 0x2000, 0x82c: 0x2000, 0x82d: 0x2000, 0x82e: 0x2000, 0x82f: 0x2000, + 0x830: 0x2000, 0x831: 0x2000, 0x832: 0x2000, 0x833: 0x2000, 0x834: 0x2000, 0x835: 0x2000, + 0x836: 0x2000, 0x837: 0x2000, 0x838: 0x2000, 0x839: 0x2000, 0x83a: 0x2000, 0x83b: 0x2000, + 0x83c: 0x2000, 0x83d: 0x2000, 0x83e: 0x2000, 0x83f: 0x2000, + // Block 0x21, offset 0x840 + 0x840: 0x2000, 0x841: 0x2000, 0x842: 0x2000, 0x843: 0x2000, 0x844: 0x2000, 0x845: 0x2000, + 0x846: 0x2000, 0x847: 0x2000, 0x848: 0x2000, 0x849: 0x2000, 0x84a: 0x2000, 0x84b: 0x2000, + 0x850: 0x2000, 0x851: 0x2000, + 0x852: 0x2000, 0x853: 0x2000, 0x854: 0x2000, 0x855: 0x2000, 0x856: 0x2000, 0x857: 0x2000, + 0x858: 0x2000, 0x859: 0x2000, 0x85a: 0x2000, 0x85b: 0x2000, 0x85c: 0x2000, 0x85d: 0x2000, + 0x85e: 0x2000, 0x85f: 0x2000, 0x860: 0x2000, 0x861: 0x2000, 0x862: 0x2000, 0x863: 0x2000, + 0x864: 0x2000, 0x865: 0x2000, 0x866: 0x2000, 0x867: 0x2000, 0x868: 0x2000, 0x869: 0x2000, + 0x86a: 0x2000, 0x86b: 0x2000, 0x86c: 0x2000, 0x86d: 0x2000, 0x86e: 0x2000, 0x86f: 0x2000, + 0x870: 0x2000, 0x871: 0x2000, 0x872: 0x2000, 0x873: 0x2000, + // Block 0x22, offset 0x880 + 0x880: 0x2000, 0x881: 0x2000, 0x882: 0x2000, 0x883: 0x2000, 0x884: 0x2000, 0x885: 0x2000, + 0x886: 0x2000, 0x887: 0x2000, 0x888: 0x2000, 0x889: 0x2000, 0x88a: 0x2000, 0x88b: 0x2000, + 0x88c: 0x2000, 0x88d: 0x2000, 0x88e: 0x2000, 0x88f: 0x2000, + 0x892: 0x2000, 0x893: 0x2000, 0x894: 0x2000, 0x895: 0x2000, + 0x8a0: 0x200e, 0x8a1: 0x2000, 0x8a3: 0x2000, + 0x8a4: 0x2000, 0x8a5: 0x2000, 0x8a6: 0x2000, 0x8a7: 0x2000, 0x8a8: 0x2000, 0x8a9: 0x2000, + 0x8b2: 0x2000, 0x8b3: 0x2000, + 0x8b6: 0x2000, 0x8b7: 0x2000, + 0x8bc: 0x2000, 0x8bd: 0x2000, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x2000, 0x8c1: 0x2000, + 0x8c6: 0x2000, 0x8c7: 0x2000, 0x8c8: 0x2000, 0x8cb: 0x200f, + 0x8ce: 0x2000, 0x8cf: 0x2000, 0x8d0: 0x2000, 0x8d1: 0x2000, + 0x8e2: 0x2000, 0x8e3: 0x2000, + 0x8e4: 0x2000, 0x8e5: 0x2000, + 0x8ef: 0x2000, + 0x8fd: 0x4000, 0x8fe: 0x4000, + // Block 0x24, offset 0x900 + 0x905: 0x2000, + 0x906: 0x2000, 0x909: 0x2000, + 0x90e: 0x2000, 0x90f: 0x2000, + 0x914: 0x4000, 0x915: 0x4000, + 0x91c: 0x2000, + 0x91e: 0x2000, + // Block 0x25, offset 0x940 + 0x940: 0x2000, 0x942: 0x2000, + 0x948: 0x4000, 0x949: 0x4000, 0x94a: 0x4000, 0x94b: 0x4000, + 0x94c: 0x4000, 0x94d: 0x4000, 0x94e: 0x4000, 0x94f: 0x4000, 0x950: 0x4000, 0x951: 0x4000, + 0x952: 0x4000, 0x953: 0x4000, + 0x960: 0x2000, 0x961: 0x2000, 0x963: 0x2000, + 0x964: 0x2000, 0x965: 0x2000, 0x967: 0x2000, 0x968: 0x2000, 0x969: 0x2000, + 0x96a: 0x2000, 0x96c: 0x2000, 0x96d: 0x2000, 0x96f: 0x2000, + 0x97f: 0x4000, + // Block 0x26, offset 0x980 + 0x993: 0x4000, + 0x99e: 0x2000, 0x99f: 0x2000, 0x9a1: 0x4000, + 0x9aa: 0x4000, 0x9ab: 0x4000, + 0x9bd: 0x4000, 0x9be: 0x4000, 0x9bf: 0x2000, + // Block 0x27, offset 0x9c0 + 0x9c4: 0x4000, 0x9c5: 0x4000, + 0x9c6: 0x2000, 0x9c7: 0x2000, 0x9c8: 0x2000, 0x9c9: 0x2000, 0x9ca: 0x2000, 0x9cb: 0x2000, + 0x9cc: 0x2000, 0x9cd: 0x2000, 0x9ce: 0x4000, 0x9cf: 0x2000, 0x9d0: 0x2000, 0x9d1: 0x2000, + 0x9d2: 0x2000, 0x9d3: 0x2000, 0x9d4: 0x4000, 0x9d5: 0x2000, 0x9d6: 0x2000, 0x9d7: 0x2000, + 0x9d8: 0x2000, 0x9d9: 0x2000, 0x9da: 0x2000, 0x9db: 0x2000, 0x9dc: 0x2000, 0x9dd: 0x2000, + 0x9de: 0x2000, 0x9df: 0x2000, 0x9e0: 0x2000, 0x9e1: 0x2000, 0x9e3: 0x2000, + 0x9e8: 0x2000, 0x9e9: 0x2000, + 0x9ea: 0x4000, 0x9eb: 0x2000, 0x9ec: 0x2000, 0x9ed: 0x2000, 0x9ee: 0x2000, 0x9ef: 0x2000, + 0x9f0: 0x2000, 0x9f1: 0x2000, 0x9f2: 0x4000, 0x9f3: 0x4000, 0x9f4: 0x2000, 0x9f5: 0x4000, + 0x9f6: 0x2000, 0x9f7: 0x2000, 0x9f8: 0x2000, 0x9f9: 0x2000, 0x9fa: 0x4000, 0x9fb: 0x2000, + 0x9fc: 0x2000, 0x9fd: 0x4000, 0x9fe: 0x2000, 0x9ff: 0x2000, + // Block 0x28, offset 0xa00 + 0xa05: 0x4000, + 0xa0a: 0x4000, 0xa0b: 0x4000, + 0xa28: 0x4000, + 0xa3d: 0x2000, + // Block 0x29, offset 0xa40 + 0xa4c: 0x4000, 0xa4e: 0x4000, + 0xa53: 0x4000, 0xa54: 0x4000, 0xa55: 0x4000, 0xa57: 0x4000, + 0xa76: 0x2000, 0xa77: 0x2000, 0xa78: 0x2000, 0xa79: 0x2000, 0xa7a: 0x2000, 0xa7b: 0x2000, + 0xa7c: 0x2000, 0xa7d: 0x2000, 0xa7e: 0x2000, 0xa7f: 0x2000, + // Block 0x2a, offset 0xa80 + 0xa95: 0x4000, 0xa96: 0x4000, 0xa97: 0x4000, + 0xab0: 0x4000, + 0xabf: 0x4000, + // Block 0x2b, offset 0xac0 + 0xae6: 0x6000, 0xae7: 0x6000, 0xae8: 0x6000, 0xae9: 0x6000, + 0xaea: 0x6000, 0xaeb: 0x6000, 0xaec: 0x6000, 0xaed: 0x6000, + // Block 0x2c, offset 0xb00 + 0xb05: 0x6010, + 0xb06: 0x6011, + // Block 0x2d, offset 0xb40 + 0xb5b: 0x4000, 0xb5c: 0x4000, + // Block 0x2e, offset 0xb80 + 0xb90: 0x4000, + 0xb95: 0x4000, 0xb96: 0x2000, 0xb97: 0x2000, + 0xb98: 0x2000, 0xb99: 0x2000, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x4000, 0xbc1: 0x4000, 0xbc2: 0x4000, 0xbc3: 0x4000, 0xbc4: 0x4000, 0xbc5: 0x4000, + 0xbc6: 0x4000, 0xbc7: 0x4000, 0xbc8: 0x4000, 0xbc9: 0x4000, 0xbca: 0x4000, 0xbcb: 0x4000, + 0xbcc: 0x4000, 0xbcd: 0x4000, 0xbce: 0x4000, 0xbcf: 0x4000, 0xbd0: 0x4000, 0xbd1: 0x4000, + 0xbd2: 0x4000, 0xbd3: 0x4000, 0xbd4: 0x4000, 0xbd5: 0x4000, 0xbd6: 0x4000, 0xbd7: 0x4000, + 0xbd8: 0x4000, 0xbd9: 0x4000, 0xbdb: 0x4000, 0xbdc: 0x4000, 0xbdd: 0x4000, + 0xbde: 0x4000, 0xbdf: 0x4000, 0xbe0: 0x4000, 0xbe1: 0x4000, 0xbe2: 0x4000, 0xbe3: 0x4000, + 0xbe4: 0x4000, 0xbe5: 0x4000, 0xbe6: 0x4000, 0xbe7: 0x4000, 0xbe8: 0x4000, 0xbe9: 0x4000, + 0xbea: 0x4000, 0xbeb: 0x4000, 0xbec: 0x4000, 0xbed: 0x4000, 0xbee: 0x4000, 0xbef: 0x4000, + 0xbf0: 0x4000, 0xbf1: 0x4000, 0xbf2: 0x4000, 0xbf3: 0x4000, 0xbf4: 0x4000, 0xbf5: 0x4000, + 0xbf6: 0x4000, 0xbf7: 0x4000, 0xbf8: 0x4000, 0xbf9: 0x4000, 0xbfa: 0x4000, 0xbfb: 0x4000, + 0xbfc: 0x4000, 0xbfd: 0x4000, 0xbfe: 0x4000, 0xbff: 0x4000, + // Block 0x30, offset 0xc00 + 0xc00: 0x4000, 0xc01: 0x4000, 0xc02: 0x4000, 0xc03: 0x4000, 0xc04: 0x4000, 0xc05: 0x4000, + 0xc06: 0x4000, 0xc07: 0x4000, 0xc08: 0x4000, 0xc09: 0x4000, 0xc0a: 0x4000, 0xc0b: 0x4000, + 0xc0c: 0x4000, 0xc0d: 0x4000, 0xc0e: 0x4000, 0xc0f: 0x4000, 0xc10: 0x4000, 0xc11: 0x4000, + 0xc12: 0x4000, 0xc13: 0x4000, 0xc14: 0x4000, 0xc15: 0x4000, 0xc16: 0x4000, 0xc17: 0x4000, + 0xc18: 0x4000, 0xc19: 0x4000, 0xc1a: 0x4000, 0xc1b: 0x4000, 0xc1c: 0x4000, 0xc1d: 0x4000, + 0xc1e: 0x4000, 0xc1f: 0x4000, 0xc20: 0x4000, 0xc21: 0x4000, 0xc22: 0x4000, 0xc23: 0x4000, + 0xc24: 0x4000, 0xc25: 0x4000, 0xc26: 0x4000, 0xc27: 0x4000, 0xc28: 0x4000, 0xc29: 0x4000, + 0xc2a: 0x4000, 0xc2b: 0x4000, 0xc2c: 0x4000, 0xc2d: 0x4000, 0xc2e: 0x4000, 0xc2f: 0x4000, + 0xc30: 0x4000, 0xc31: 0x4000, 0xc32: 0x4000, 0xc33: 0x4000, + // Block 0x31, offset 0xc40 + 0xc40: 0x4000, 0xc41: 0x4000, 0xc42: 0x4000, 0xc43: 0x4000, 0xc44: 0x4000, 0xc45: 0x4000, + 0xc46: 0x4000, 0xc47: 0x4000, 0xc48: 0x4000, 0xc49: 0x4000, 0xc4a: 0x4000, 0xc4b: 0x4000, + 0xc4c: 0x4000, 0xc4d: 0x4000, 0xc4e: 0x4000, 0xc4f: 0x4000, 0xc50: 0x4000, 0xc51: 0x4000, + 0xc52: 0x4000, 0xc53: 0x4000, 0xc54: 0x4000, 0xc55: 0x4000, + 0xc70: 0x4000, 0xc71: 0x4000, 0xc72: 0x4000, 0xc73: 0x4000, 0xc74: 0x4000, 0xc75: 0x4000, + 0xc76: 0x4000, 0xc77: 0x4000, 0xc78: 0x4000, 0xc79: 0x4000, 0xc7a: 0x4000, 0xc7b: 0x4000, + // Block 0x32, offset 0xc80 + 0xc80: 0x9012, 0xc81: 0x4013, 0xc82: 0x4014, 0xc83: 0x4000, 0xc84: 0x4000, 0xc85: 0x4000, + 0xc86: 0x4000, 0xc87: 0x4000, 0xc88: 0x4000, 0xc89: 0x4000, 0xc8a: 0x4000, 0xc8b: 0x4000, + 0xc8c: 0x4015, 0xc8d: 0x4015, 0xc8e: 0x4000, 0xc8f: 0x4000, 0xc90: 0x4000, 0xc91: 0x4000, + 0xc92: 0x4000, 0xc93: 0x4000, 0xc94: 0x4000, 0xc95: 0x4000, 0xc96: 0x4000, 0xc97: 0x4000, + 0xc98: 0x4000, 0xc99: 0x4000, 0xc9a: 0x4000, 0xc9b: 0x4000, 0xc9c: 0x4000, 0xc9d: 0x4000, + 0xc9e: 0x4000, 0xc9f: 0x4000, 0xca0: 0x4000, 0xca1: 0x4000, 0xca2: 0x4000, 0xca3: 0x4000, + 0xca4: 0x4000, 0xca5: 0x4000, 0xca6: 0x4000, 0xca7: 0x4000, 0xca8: 0x4000, 0xca9: 0x4000, + 0xcaa: 0x4000, 0xcab: 0x4000, 0xcac: 0x4000, 0xcad: 0x4000, 0xcae: 0x4000, 0xcaf: 0x4000, + 0xcb0: 0x4000, 0xcb1: 0x4000, 0xcb2: 0x4000, 0xcb3: 0x4000, 0xcb4: 0x4000, 0xcb5: 0x4000, + 0xcb6: 0x4000, 0xcb7: 0x4000, 0xcb8: 0x4000, 0xcb9: 0x4000, 0xcba: 0x4000, 0xcbb: 0x4000, + 0xcbc: 0x4000, 0xcbd: 0x4000, 0xcbe: 0x4000, + // Block 0x33, offset 0xcc0 + 0xcc1: 0x4000, 0xcc2: 0x4000, 0xcc3: 0x4000, 0xcc4: 0x4000, 0xcc5: 0x4000, + 0xcc6: 0x4000, 0xcc7: 0x4000, 0xcc8: 0x4000, 0xcc9: 0x4000, 0xcca: 0x4000, 0xccb: 0x4000, + 0xccc: 0x4000, 0xccd: 0x4000, 0xcce: 0x4000, 0xccf: 0x4000, 0xcd0: 0x4000, 0xcd1: 0x4000, + 0xcd2: 0x4000, 0xcd3: 0x4000, 0xcd4: 0x4000, 0xcd5: 0x4000, 0xcd6: 0x4000, 0xcd7: 0x4000, + 0xcd8: 0x4000, 0xcd9: 0x4000, 0xcda: 0x4000, 0xcdb: 0x4000, 0xcdc: 0x4000, 0xcdd: 0x4000, + 0xcde: 0x4000, 0xcdf: 0x4000, 0xce0: 0x4000, 0xce1: 0x4000, 0xce2: 0x4000, 0xce3: 0x4000, + 0xce4: 0x4000, 0xce5: 0x4000, 0xce6: 0x4000, 0xce7: 0x4000, 0xce8: 0x4000, 0xce9: 0x4000, + 0xcea: 0x4000, 0xceb: 0x4000, 0xcec: 0x4000, 0xced: 0x4000, 0xcee: 0x4000, 0xcef: 0x4000, + 0xcf0: 0x4000, 0xcf1: 0x4000, 0xcf2: 0x4000, 0xcf3: 0x4000, 0xcf4: 0x4000, 0xcf5: 0x4000, + 0xcf6: 0x4000, 0xcf7: 0x4000, 0xcf8: 0x4000, 0xcf9: 0x4000, 0xcfa: 0x4000, 0xcfb: 0x4000, + 0xcfc: 0x4000, 0xcfd: 0x4000, 0xcfe: 0x4000, 0xcff: 0x4000, + // Block 0x34, offset 0xd00 + 0xd00: 0x4000, 0xd01: 0x4000, 0xd02: 0x4000, 0xd03: 0x4000, 0xd04: 0x4000, 0xd05: 0x4000, + 0xd06: 0x4000, 0xd07: 0x4000, 0xd08: 0x4000, 0xd09: 0x4000, 0xd0a: 0x4000, 0xd0b: 0x4000, + 0xd0c: 0x4000, 0xd0d: 0x4000, 0xd0e: 0x4000, 0xd0f: 0x4000, 0xd10: 0x4000, 0xd11: 0x4000, + 0xd12: 0x4000, 0xd13: 0x4000, 0xd14: 0x4000, 0xd15: 0x4000, 0xd16: 0x4000, + 0xd19: 0x4016, 0xd1a: 0x4017, 0xd1b: 0x4000, 0xd1c: 0x4000, 0xd1d: 0x4000, + 0xd1e: 0x4000, 0xd1f: 0x4000, 0xd20: 0x4000, 0xd21: 0x4018, 0xd22: 0x4019, 0xd23: 0x401a, + 0xd24: 0x401b, 0xd25: 0x401c, 0xd26: 0x401d, 0xd27: 0x401e, 0xd28: 0x401f, 0xd29: 0x4020, + 0xd2a: 0x4021, 0xd2b: 0x4022, 0xd2c: 0x4000, 0xd2d: 0x4010, 0xd2e: 0x4000, 0xd2f: 0x4023, + 0xd30: 0x4000, 0xd31: 0x4024, 0xd32: 0x4000, 0xd33: 0x4025, 0xd34: 0x4000, 0xd35: 0x4026, + 0xd36: 0x4000, 0xd37: 0x401a, 0xd38: 0x4000, 0xd39: 0x4027, 0xd3a: 0x4000, 0xd3b: 0x4028, + 0xd3c: 0x4000, 0xd3d: 0x4020, 0xd3e: 0x4000, 0xd3f: 0x4029, + // Block 0x35, offset 0xd40 + 0xd40: 0x4000, 0xd41: 0x402a, 0xd42: 0x4000, 0xd43: 0x402b, 0xd44: 0x402c, 0xd45: 0x4000, + 0xd46: 0x4017, 0xd47: 0x4000, 0xd48: 0x402d, 0xd49: 0x4000, 0xd4a: 0x402e, 0xd4b: 0x402f, + 0xd4c: 0x4030, 0xd4d: 0x4017, 0xd4e: 0x4016, 0xd4f: 0x4017, 0xd50: 0x4000, 0xd51: 0x4000, + 0xd52: 0x4031, 0xd53: 0x4000, 0xd54: 0x4000, 0xd55: 0x4031, 0xd56: 0x4000, 0xd57: 0x4000, + 0xd58: 0x4032, 0xd59: 0x4000, 0xd5a: 0x4000, 0xd5b: 0x4032, 0xd5c: 0x4000, 0xd5d: 0x4000, + 0xd5e: 0x4033, 0xd5f: 0x402e, 0xd60: 0x4034, 0xd61: 0x4035, 0xd62: 0x4034, 0xd63: 0x4036, + 0xd64: 0x4037, 0xd65: 0x4024, 0xd66: 0x4035, 0xd67: 0x4025, 0xd68: 0x4038, 0xd69: 0x4038, + 0xd6a: 0x4039, 0xd6b: 0x4039, 0xd6c: 0x403a, 0xd6d: 0x403a, 0xd6e: 0x4000, 0xd6f: 0x4035, + 0xd70: 0x4000, 0xd71: 0x4000, 0xd72: 0x403b, 0xd73: 0x403c, 0xd74: 0x4000, 0xd75: 0x4000, + 0xd76: 0x4000, 0xd77: 0x4000, 0xd78: 0x4000, 0xd79: 0x4000, 0xd7a: 0x4000, 0xd7b: 0x403d, + 0xd7c: 0x401c, 0xd7d: 0x4000, 0xd7e: 0x4000, 0xd7f: 0x4000, + // Block 0x36, offset 0xd80 + 0xd85: 0x4000, + 0xd86: 0x4000, 0xd87: 0x4000, 0xd88: 0x4000, 0xd89: 0x4000, 0xd8a: 0x4000, 0xd8b: 0x4000, + 0xd8c: 0x4000, 0xd8d: 0x4000, 0xd8e: 0x4000, 0xd8f: 0x4000, 0xd90: 0x4000, 0xd91: 0x4000, + 0xd92: 0x4000, 0xd93: 0x4000, 0xd94: 0x4000, 0xd95: 0x4000, 0xd96: 0x4000, 0xd97: 0x4000, + 0xd98: 0x4000, 0xd99: 0x4000, 0xd9a: 0x4000, 0xd9b: 0x4000, 0xd9c: 0x4000, 0xd9d: 0x4000, + 0xd9e: 0x4000, 0xd9f: 0x4000, 0xda0: 0x4000, 0xda1: 0x4000, 0xda2: 0x4000, 0xda3: 0x4000, + 0xda4: 0x4000, 0xda5: 0x4000, 0xda6: 0x4000, 0xda7: 0x4000, 0xda8: 0x4000, 0xda9: 0x4000, + 0xdaa: 0x4000, 0xdab: 0x4000, 0xdac: 0x4000, 0xdad: 0x4000, 0xdae: 0x4000, 0xdaf: 0x4000, + 0xdb1: 0x403e, 0xdb2: 0x403e, 0xdb3: 0x403e, 0xdb4: 0x403e, 0xdb5: 0x403e, + 0xdb6: 0x403e, 0xdb7: 0x403e, 0xdb8: 0x403e, 0xdb9: 0x403e, 0xdba: 0x403e, 0xdbb: 0x403e, + 0xdbc: 0x403e, 0xdbd: 0x403e, 0xdbe: 0x403e, 0xdbf: 0x403e, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x4037, 0xdc1: 0x4037, 0xdc2: 0x4037, 0xdc3: 0x4037, 0xdc4: 0x4037, 0xdc5: 0x4037, + 0xdc6: 0x4037, 0xdc7: 0x4037, 0xdc8: 0x4037, 0xdc9: 0x4037, 0xdca: 0x4037, 0xdcb: 0x4037, + 0xdcc: 0x4037, 0xdcd: 0x4037, 0xdce: 0x4037, 0xdcf: 0x400e, 0xdd0: 0x403f, 0xdd1: 0x4040, + 0xdd2: 0x4041, 0xdd3: 0x4040, 0xdd4: 0x403f, 0xdd5: 0x4042, 0xdd6: 0x4043, 0xdd7: 0x4044, + 0xdd8: 0x4040, 0xdd9: 0x4041, 0xdda: 0x4040, 0xddb: 0x4045, 0xddc: 0x4009, 0xddd: 0x4045, + 0xdde: 0x4046, 0xddf: 0x4045, 0xde0: 0x4047, 0xde1: 0x400b, 0xde2: 0x400a, 0xde3: 0x400c, + 0xde4: 0x4048, 0xde5: 0x4000, 0xde6: 0x4000, 0xde7: 0x4000, 0xde8: 0x4000, 0xde9: 0x4000, + 0xdea: 0x4000, 0xdeb: 0x4000, 0xdec: 0x4000, 0xded: 0x4000, 0xdee: 0x4000, 0xdef: 0x4000, + 0xdf0: 0x4000, 0xdf1: 0x4000, 0xdf2: 0x4000, 0xdf3: 0x4000, 0xdf4: 0x4000, 0xdf5: 0x4000, + 0xdf6: 0x4000, 0xdf7: 0x4000, 0xdf8: 0x4000, 0xdf9: 0x4000, 0xdfa: 0x4000, 0xdfb: 0x4000, + 0xdfc: 0x4000, 0xdfd: 0x4000, 0xdfe: 0x4000, 0xdff: 0x4000, + // Block 0x38, offset 0xe00 + 0xe00: 0x4000, 0xe01: 0x4000, 0xe02: 0x4000, 0xe03: 0x4000, 0xe04: 0x4000, 0xe05: 0x4000, + 0xe06: 0x4000, 0xe07: 0x4000, 0xe08: 0x4000, 0xe09: 0x4000, 0xe0a: 0x4000, 0xe0b: 0x4000, + 0xe0c: 0x4000, 0xe0d: 0x4000, 0xe0e: 0x4000, 0xe10: 0x4000, 0xe11: 0x4000, + 0xe12: 0x4000, 0xe13: 0x4000, 0xe14: 0x4000, 0xe15: 0x4000, 0xe16: 0x4000, 0xe17: 0x4000, + 0xe18: 0x4000, 0xe19: 0x4000, 0xe1a: 0x4000, 0xe1b: 0x4000, 0xe1c: 0x4000, 0xe1d: 0x4000, + 0xe1e: 0x4000, 0xe1f: 0x4000, 0xe20: 0x4000, 0xe21: 0x4000, 0xe22: 0x4000, 0xe23: 0x4000, + 0xe24: 0x4000, 0xe25: 0x4000, 0xe26: 0x4000, 0xe27: 0x4000, 0xe28: 0x4000, 0xe29: 0x4000, + 0xe2a: 0x4000, 0xe2b: 0x4000, 0xe2c: 0x4000, 0xe2d: 0x4000, 0xe2e: 0x4000, 0xe2f: 0x4000, + 0xe30: 0x4000, 0xe31: 0x4000, 0xe32: 0x4000, 0xe33: 0x4000, 0xe34: 0x4000, 0xe35: 0x4000, + 0xe36: 0x4000, 0xe37: 0x4000, 0xe38: 0x4000, 0xe39: 0x4000, 0xe3a: 0x4000, + // Block 0x39, offset 0xe40 + 0xe40: 0x4000, 0xe41: 0x4000, 0xe42: 0x4000, 0xe43: 0x4000, 0xe44: 0x4000, 0xe45: 0x4000, + 0xe46: 0x4000, 0xe47: 0x4000, 0xe48: 0x4000, 0xe49: 0x4000, 0xe4a: 0x4000, 0xe4b: 0x4000, + 0xe4c: 0x4000, 0xe4d: 0x4000, 0xe4e: 0x4000, 0xe4f: 0x4000, 0xe50: 0x4000, 0xe51: 0x4000, + 0xe52: 0x4000, 0xe53: 0x4000, 0xe54: 0x4000, 0xe55: 0x4000, 0xe56: 0x4000, 0xe57: 0x4000, + 0xe58: 0x4000, 0xe59: 0x4000, 0xe5a: 0x4000, 0xe5b: 0x4000, 0xe5c: 0x4000, 0xe5d: 0x4000, + 0xe5e: 0x4000, 0xe5f: 0x4000, 0xe60: 0x4000, 0xe61: 0x4000, 0xe62: 0x4000, 0xe63: 0x4000, + 0xe70: 0x4000, 0xe71: 0x4000, 0xe72: 0x4000, 0xe73: 0x4000, 0xe74: 0x4000, 0xe75: 0x4000, + 0xe76: 0x4000, 0xe77: 0x4000, 0xe78: 0x4000, 0xe79: 0x4000, 0xe7a: 0x4000, 0xe7b: 0x4000, + 0xe7c: 0x4000, 0xe7d: 0x4000, 0xe7e: 0x4000, 0xe7f: 0x4000, + // Block 0x3a, offset 0xe80 + 0xe80: 0x4000, 0xe81: 0x4000, 0xe82: 0x4000, 0xe83: 0x4000, 0xe84: 0x4000, 0xe85: 0x4000, + 0xe86: 0x4000, 0xe87: 0x4000, 0xe88: 0x4000, 0xe89: 0x4000, 0xe8a: 0x4000, 0xe8b: 0x4000, + 0xe8c: 0x4000, 0xe8d: 0x4000, 0xe8e: 0x4000, 0xe8f: 0x4000, 0xe90: 0x4000, 0xe91: 0x4000, + 0xe92: 0x4000, 0xe93: 0x4000, 0xe94: 0x4000, 0xe95: 0x4000, 0xe96: 0x4000, 0xe97: 0x4000, + 0xe98: 0x4000, 0xe99: 0x4000, 0xe9a: 0x4000, 0xe9b: 0x4000, 0xe9c: 0x4000, 0xe9d: 0x4000, + 0xe9e: 0x4000, 0xea0: 0x4000, 0xea1: 0x4000, 0xea2: 0x4000, 0xea3: 0x4000, + 0xea4: 0x4000, 0xea5: 0x4000, 0xea6: 0x4000, 0xea7: 0x4000, 0xea8: 0x4000, 0xea9: 0x4000, + 0xeaa: 0x4000, 0xeab: 0x4000, 0xeac: 0x4000, 0xead: 0x4000, 0xeae: 0x4000, 0xeaf: 0x4000, + 0xeb0: 0x4000, 0xeb1: 0x4000, 0xeb2: 0x4000, 0xeb3: 0x4000, 0xeb4: 0x4000, 0xeb5: 0x4000, + 0xeb6: 0x4000, 0xeb7: 0x4000, 0xeb8: 0x4000, 0xeb9: 0x4000, 0xeba: 0x4000, 0xebb: 0x4000, + 0xebc: 0x4000, 0xebd: 0x4000, 0xebe: 0x4000, 0xebf: 0x4000, + // Block 0x3b, offset 0xec0 + 0xec0: 0x4000, 0xec1: 0x4000, 0xec2: 0x4000, 0xec3: 0x4000, 0xec4: 0x4000, 0xec5: 0x4000, + 0xec6: 0x4000, 0xec7: 0x4000, 0xec8: 0x2000, 0xec9: 0x2000, 0xeca: 0x2000, 0xecb: 0x2000, + 0xecc: 0x2000, 0xecd: 0x2000, 0xece: 0x2000, 0xecf: 0x2000, 0xed0: 0x4000, 0xed1: 0x4000, + 0xed2: 0x4000, 0xed3: 0x4000, 0xed4: 0x4000, 0xed5: 0x4000, 0xed6: 0x4000, 0xed7: 0x4000, + 0xed8: 0x4000, 0xed9: 0x4000, 0xeda: 0x4000, 0xedb: 0x4000, 0xedc: 0x4000, 0xedd: 0x4000, + 0xede: 0x4000, 0xedf: 0x4000, 0xee0: 0x4000, 0xee1: 0x4000, 0xee2: 0x4000, 0xee3: 0x4000, + 0xee4: 0x4000, 0xee5: 0x4000, 0xee6: 0x4000, 0xee7: 0x4000, 0xee8: 0x4000, 0xee9: 0x4000, + 0xeea: 0x4000, 0xeeb: 0x4000, 0xeec: 0x4000, 0xeed: 0x4000, 0xeee: 0x4000, 0xeef: 0x4000, + 0xef0: 0x4000, 0xef1: 0x4000, 0xef2: 0x4000, 0xef3: 0x4000, 0xef4: 0x4000, 0xef5: 0x4000, + 0xef6: 0x4000, 0xef7: 0x4000, 0xef8: 0x4000, 0xef9: 0x4000, 0xefa: 0x4000, 0xefb: 0x4000, + 0xefc: 0x4000, 0xefd: 0x4000, 0xefe: 0x4000, 0xeff: 0x4000, + // Block 0x3c, offset 0xf00 + 0xf00: 0x4000, 0xf01: 0x4000, 0xf02: 0x4000, 0xf03: 0x4000, 0xf04: 0x4000, 0xf05: 0x4000, + 0xf06: 0x4000, 0xf07: 0x4000, 0xf08: 0x4000, 0xf09: 0x4000, 0xf0a: 0x4000, 0xf0b: 0x4000, + 0xf0c: 0x4000, 0xf0d: 0x4000, 0xf0e: 0x4000, 0xf0f: 0x4000, 0xf10: 0x4000, 0xf11: 0x4000, + 0xf12: 0x4000, 0xf13: 0x4000, 0xf14: 0x4000, 0xf15: 0x4000, 0xf16: 0x4000, 0xf17: 0x4000, + 0xf18: 0x4000, 0xf19: 0x4000, 0xf1a: 0x4000, 0xf1b: 0x4000, 0xf1c: 0x4000, 0xf1d: 0x4000, + 0xf1e: 0x4000, 0xf1f: 0x4000, 0xf20: 0x4000, 0xf21: 0x4000, 0xf22: 0x4000, 0xf23: 0x4000, + 0xf24: 0x4000, 0xf25: 0x4000, 0xf26: 0x4000, 0xf27: 0x4000, 0xf28: 0x4000, 0xf29: 0x4000, + 0xf2a: 0x4000, 0xf2b: 0x4000, 0xf2c: 0x4000, 0xf2d: 0x4000, 0xf2e: 0x4000, 0xf2f: 0x4000, + 0xf30: 0x4000, 0xf31: 0x4000, 0xf32: 0x4000, 0xf33: 0x4000, 0xf34: 0x4000, 0xf35: 0x4000, + 0xf36: 0x4000, 0xf37: 0x4000, 0xf38: 0x4000, 0xf39: 0x4000, 0xf3a: 0x4000, 0xf3b: 0x4000, + 0xf3c: 0x4000, 0xf3d: 0x4000, 0xf3e: 0x4000, + // Block 0x3d, offset 0xf40 + 0xf40: 0x4000, 0xf41: 0x4000, 0xf42: 0x4000, 0xf43: 0x4000, 0xf44: 0x4000, 0xf45: 0x4000, + 0xf46: 0x4000, 0xf47: 0x4000, 0xf48: 0x4000, 0xf49: 0x4000, 0xf4a: 0x4000, 0xf4b: 0x4000, + 0xf4c: 0x4000, 0xf50: 0x4000, 0xf51: 0x4000, + 0xf52: 0x4000, 0xf53: 0x4000, 0xf54: 0x4000, 0xf55: 0x4000, 0xf56: 0x4000, 0xf57: 0x4000, + 0xf58: 0x4000, 0xf59: 0x4000, 0xf5a: 0x4000, 0xf5b: 0x4000, 0xf5c: 0x4000, 0xf5d: 0x4000, + 0xf5e: 0x4000, 0xf5f: 0x4000, 0xf60: 0x4000, 0xf61: 0x4000, 0xf62: 0x4000, 0xf63: 0x4000, + 0xf64: 0x4000, 0xf65: 0x4000, 0xf66: 0x4000, 0xf67: 0x4000, 0xf68: 0x4000, 0xf69: 0x4000, + 0xf6a: 0x4000, 0xf6b: 0x4000, 0xf6c: 0x4000, 0xf6d: 0x4000, 0xf6e: 0x4000, 0xf6f: 0x4000, + 0xf70: 0x4000, 0xf71: 0x4000, 0xf72: 0x4000, 0xf73: 0x4000, 0xf74: 0x4000, 0xf75: 0x4000, + 0xf76: 0x4000, 0xf77: 0x4000, 0xf78: 0x4000, 0xf79: 0x4000, 0xf7a: 0x4000, 0xf7b: 0x4000, + 0xf7c: 0x4000, 0xf7d: 0x4000, 0xf7e: 0x4000, 0xf7f: 0x4000, + // Block 0x3e, offset 0xf80 + 0xf80: 0x4000, 0xf81: 0x4000, 0xf82: 0x4000, 0xf83: 0x4000, 0xf84: 0x4000, 0xf85: 0x4000, + 0xf86: 0x4000, + // Block 0x3f, offset 0xfc0 + 0xfe0: 0x4000, 0xfe1: 0x4000, 0xfe2: 0x4000, 0xfe3: 0x4000, + 0xfe4: 0x4000, 0xfe5: 0x4000, 0xfe6: 0x4000, 0xfe7: 0x4000, 0xfe8: 0x4000, 0xfe9: 0x4000, + 0xfea: 0x4000, 0xfeb: 0x4000, 0xfec: 0x4000, 0xfed: 0x4000, 0xfee: 0x4000, 0xfef: 0x4000, + 0xff0: 0x4000, 0xff1: 0x4000, 0xff2: 0x4000, 0xff3: 0x4000, 0xff4: 0x4000, 0xff5: 0x4000, + 0xff6: 0x4000, 0xff7: 0x4000, 0xff8: 0x4000, 0xff9: 0x4000, 0xffa: 0x4000, 0xffb: 0x4000, + 0xffc: 0x4000, + // Block 0x40, offset 0x1000 + 0x1000: 0x4000, 0x1001: 0x4000, 0x1002: 0x4000, 0x1003: 0x4000, 0x1004: 0x4000, 0x1005: 0x4000, + 0x1006: 0x4000, 0x1007: 0x4000, 0x1008: 0x4000, 0x1009: 0x4000, 0x100a: 0x4000, 0x100b: 0x4000, + 0x100c: 0x4000, 0x100d: 0x4000, 0x100e: 0x4000, 0x100f: 0x4000, 0x1010: 0x4000, 0x1011: 0x4000, + 0x1012: 0x4000, 0x1013: 0x4000, 0x1014: 0x4000, 0x1015: 0x4000, 0x1016: 0x4000, 0x1017: 0x4000, + 0x1018: 0x4000, 0x1019: 0x4000, 0x101a: 0x4000, 0x101b: 0x4000, 0x101c: 0x4000, 0x101d: 0x4000, + 0x101e: 0x4000, 0x101f: 0x4000, 0x1020: 0x4000, 0x1021: 0x4000, 0x1022: 0x4000, 0x1023: 0x4000, + // Block 0x41, offset 0x1040 + 0x1040: 0x2000, 0x1041: 0x2000, 0x1042: 0x2000, 0x1043: 0x2000, 0x1044: 0x2000, 0x1045: 0x2000, + 0x1046: 0x2000, 0x1047: 0x2000, 0x1048: 0x2000, 0x1049: 0x2000, 0x104a: 0x2000, 0x104b: 0x2000, + 0x104c: 0x2000, 0x104d: 0x2000, 0x104e: 0x2000, 0x104f: 0x2000, 0x1050: 0x4000, 0x1051: 0x4000, + 0x1052: 0x4000, 0x1053: 0x4000, 0x1054: 0x4000, 0x1055: 0x4000, 0x1056: 0x4000, 0x1057: 0x4000, + 0x1058: 0x4000, 0x1059: 0x4000, + 0x1070: 0x4000, 0x1071: 0x4000, 0x1072: 0x4000, 0x1073: 0x4000, 0x1074: 0x4000, 0x1075: 0x4000, + 0x1076: 0x4000, 0x1077: 0x4000, 0x1078: 0x4000, 0x1079: 0x4000, 0x107a: 0x4000, 0x107b: 0x4000, + 0x107c: 0x4000, 0x107d: 0x4000, 0x107e: 0x4000, 0x107f: 0x4000, + // Block 0x42, offset 0x1080 + 0x1080: 0x4000, 0x1081: 0x4000, 0x1082: 0x4000, 0x1083: 0x4000, 0x1084: 0x4000, 0x1085: 0x4000, + 0x1086: 0x4000, 0x1087: 0x4000, 0x1088: 0x4000, 0x1089: 0x4000, 0x108a: 0x4000, 0x108b: 0x4000, + 0x108c: 0x4000, 0x108d: 0x4000, 0x108e: 0x4000, 0x108f: 0x4000, 0x1090: 0x4000, 0x1091: 0x4000, + 0x1092: 0x4000, 0x1094: 0x4000, 0x1095: 0x4000, 0x1096: 0x4000, 0x1097: 0x4000, + 0x1098: 0x4000, 0x1099: 0x4000, 0x109a: 0x4000, 0x109b: 0x4000, 0x109c: 0x4000, 0x109d: 0x4000, + 0x109e: 0x4000, 0x109f: 0x4000, 0x10a0: 0x4000, 0x10a1: 0x4000, 0x10a2: 0x4000, 0x10a3: 0x4000, + 0x10a4: 0x4000, 0x10a5: 0x4000, 0x10a6: 0x4000, 0x10a8: 0x4000, 0x10a9: 0x4000, + 0x10aa: 0x4000, 0x10ab: 0x4000, + // Block 0x43, offset 0x10c0 + 0x10c1: 0x9012, 0x10c2: 0x9012, 0x10c3: 0x9012, 0x10c4: 0x9012, 0x10c5: 0x9012, + 0x10c6: 0x9012, 0x10c7: 0x9012, 0x10c8: 0x9012, 0x10c9: 0x9012, 0x10ca: 0x9012, 0x10cb: 0x9012, + 0x10cc: 0x9012, 0x10cd: 0x9012, 0x10ce: 0x9012, 0x10cf: 0x9012, 0x10d0: 0x9012, 0x10d1: 0x9012, + 0x10d2: 0x9012, 0x10d3: 0x9012, 0x10d4: 0x9012, 0x10d5: 0x9012, 0x10d6: 0x9012, 0x10d7: 0x9012, + 0x10d8: 0x9012, 0x10d9: 0x9012, 0x10da: 0x9012, 0x10db: 0x9012, 0x10dc: 0x9012, 0x10dd: 0x9012, + 0x10de: 0x9012, 0x10df: 0x9012, 0x10e0: 0x9049, 0x10e1: 0x9049, 0x10e2: 0x9049, 0x10e3: 0x9049, + 0x10e4: 0x9049, 0x10e5: 0x9049, 0x10e6: 0x9049, 0x10e7: 0x9049, 0x10e8: 0x9049, 0x10e9: 0x9049, + 0x10ea: 0x9049, 0x10eb: 0x9049, 0x10ec: 0x9049, 0x10ed: 0x9049, 0x10ee: 0x9049, 0x10ef: 0x9049, + 0x10f0: 0x9049, 0x10f1: 0x9049, 0x10f2: 0x9049, 0x10f3: 0x9049, 0x10f4: 0x9049, 0x10f5: 0x9049, + 0x10f6: 0x9049, 0x10f7: 0x9049, 0x10f8: 0x9049, 0x10f9: 0x9049, 0x10fa: 0x9049, 0x10fb: 0x9049, + 0x10fc: 0x9049, 0x10fd: 0x9049, 0x10fe: 0x9049, 0x10ff: 0x9049, + // Block 0x44, offset 0x1100 + 0x1100: 0x9049, 0x1101: 0x9049, 0x1102: 0x9049, 0x1103: 0x9049, 0x1104: 0x9049, 0x1105: 0x9049, + 0x1106: 0x9049, 0x1107: 0x9049, 0x1108: 0x9049, 0x1109: 0x9049, 0x110a: 0x9049, 0x110b: 0x9049, + 0x110c: 0x9049, 0x110d: 0x9049, 0x110e: 0x9049, 0x110f: 0x9049, 0x1110: 0x9049, 0x1111: 0x9049, + 0x1112: 0x9049, 0x1113: 0x9049, 0x1114: 0x9049, 0x1115: 0x9049, 0x1116: 0x9049, 0x1117: 0x9049, + 0x1118: 0x9049, 0x1119: 0x9049, 0x111a: 0x9049, 0x111b: 0x9049, 0x111c: 0x9049, 0x111d: 0x9049, + 0x111e: 0x9049, 0x111f: 0x904a, 0x1120: 0x904b, 0x1121: 0xb04c, 0x1122: 0xb04d, 0x1123: 0xb04d, + 0x1124: 0xb04e, 0x1125: 0xb04f, 0x1126: 0xb050, 0x1127: 0xb051, 0x1128: 0xb052, 0x1129: 0xb053, + 0x112a: 0xb054, 0x112b: 0xb055, 0x112c: 0xb056, 0x112d: 0xb057, 0x112e: 0xb058, 0x112f: 0xb059, + 0x1130: 0xb05a, 0x1131: 0xb05b, 0x1132: 0xb05c, 0x1133: 0xb05d, 0x1134: 0xb05e, 0x1135: 0xb05f, + 0x1136: 0xb060, 0x1137: 0xb061, 0x1138: 0xb062, 0x1139: 0xb063, 0x113a: 0xb064, 0x113b: 0xb065, + 0x113c: 0xb052, 0x113d: 0xb066, 0x113e: 0xb067, 0x113f: 0xb055, + // Block 0x45, offset 0x1140 + 0x1140: 0xb068, 0x1141: 0xb069, 0x1142: 0xb06a, 0x1143: 0xb06b, 0x1144: 0xb05a, 0x1145: 0xb056, + 0x1146: 0xb06c, 0x1147: 0xb06d, 0x1148: 0xb06b, 0x1149: 0xb06e, 0x114a: 0xb06b, 0x114b: 0xb06f, + 0x114c: 0xb06f, 0x114d: 0xb070, 0x114e: 0xb070, 0x114f: 0xb071, 0x1150: 0xb056, 0x1151: 0xb072, + 0x1152: 0xb073, 0x1153: 0xb072, 0x1154: 0xb074, 0x1155: 0xb073, 0x1156: 0xb075, 0x1157: 0xb075, + 0x1158: 0xb076, 0x1159: 0xb076, 0x115a: 0xb077, 0x115b: 0xb077, 0x115c: 0xb073, 0x115d: 0xb078, + 0x115e: 0xb079, 0x115f: 0xb067, 0x1160: 0xb07a, 0x1161: 0xb07b, 0x1162: 0xb07b, 0x1163: 0xb07b, + 0x1164: 0xb07b, 0x1165: 0xb07b, 0x1166: 0xb07b, 0x1167: 0xb07b, 0x1168: 0xb07b, 0x1169: 0xb07b, + 0x116a: 0xb07b, 0x116b: 0xb07b, 0x116c: 0xb07b, 0x116d: 0xb07b, 0x116e: 0xb07b, 0x116f: 0xb07b, + 0x1170: 0xb07c, 0x1171: 0xb07c, 0x1172: 0xb07c, 0x1173: 0xb07c, 0x1174: 0xb07c, 0x1175: 0xb07c, + 0x1176: 0xb07c, 0x1177: 0xb07c, 0x1178: 0xb07c, 0x1179: 0xb07c, 0x117a: 0xb07c, 0x117b: 0xb07c, + 0x117c: 0xb07c, 0x117d: 0xb07c, 0x117e: 0xb07c, + // Block 0x46, offset 0x1180 + 0x1182: 0xb07d, 0x1183: 0xb07e, 0x1184: 0xb07f, 0x1185: 0xb080, + 0x1186: 0xb07f, 0x1187: 0xb07e, 0x118a: 0xb081, 0x118b: 0xb082, + 0x118c: 0xb083, 0x118d: 0xb07f, 0x118e: 0xb080, 0x118f: 0xb07f, + 0x1192: 0xb084, 0x1193: 0xb085, 0x1194: 0xb084, 0x1195: 0xb086, 0x1196: 0xb084, 0x1197: 0xb087, + 0x119a: 0xb088, 0x119b: 0xb089, 0x119c: 0xb08a, + 0x11a0: 0x908b, 0x11a1: 0x908b, 0x11a2: 0x908c, 0x11a3: 0x908d, + 0x11a4: 0x908b, 0x11a5: 0x908e, 0x11a6: 0x908f, 0x11a8: 0xb090, 0x11a9: 0xb091, + 0x11aa: 0xb092, 0x11ab: 0xb091, 0x11ac: 0xb093, 0x11ad: 0xb094, 0x11ae: 0xb095, + 0x11bd: 0x2000, + // Block 0x47, offset 0x11c0 + 0x11e0: 0x4000, 0x11e1: 0x4000, + // Block 0x48, offset 0x1200 + 0x1200: 0x4000, 0x1201: 0x4000, 0x1202: 0x4000, 0x1203: 0x4000, 0x1204: 0x4000, 0x1205: 0x4000, + 0x1206: 0x4000, 0x1207: 0x4000, 0x1208: 0x4000, 0x1209: 0x4000, 0x120a: 0x4000, 0x120b: 0x4000, + 0x120c: 0x4000, 0x120d: 0x4000, 0x120e: 0x4000, 0x120f: 0x4000, 0x1210: 0x4000, 0x1211: 0x4000, + 0x1212: 0x4000, 0x1213: 0x4000, 0x1214: 0x4000, 0x1215: 0x4000, 0x1216: 0x4000, 0x1217: 0x4000, + 0x1218: 0x4000, 0x1219: 0x4000, 0x121a: 0x4000, 0x121b: 0x4000, 0x121c: 0x4000, 0x121d: 0x4000, + 0x121e: 0x4000, 0x121f: 0x4000, 0x1220: 0x4000, 0x1221: 0x4000, 0x1222: 0x4000, 0x1223: 0x4000, + 0x1224: 0x4000, 0x1225: 0x4000, 0x1226: 0x4000, 0x1227: 0x4000, 0x1228: 0x4000, 0x1229: 0x4000, + 0x122a: 0x4000, 0x122b: 0x4000, 0x122c: 0x4000, 0x122d: 0x4000, 0x122e: 0x4000, 0x122f: 0x4000, + 0x1230: 0x4000, 0x1231: 0x4000, + // Block 0x49, offset 0x1240 + 0x1240: 0x4000, 0x1241: 0x4000, 0x1242: 0x4000, 0x1243: 0x4000, 0x1244: 0x4000, 0x1245: 0x4000, + 0x1246: 0x4000, 0x1247: 0x4000, 0x1248: 0x4000, 0x1249: 0x4000, 0x124a: 0x4000, 0x124b: 0x4000, + 0x124c: 0x4000, 0x124d: 0x4000, 0x124e: 0x4000, 0x124f: 0x4000, 0x1250: 0x4000, 0x1251: 0x4000, + 0x1252: 0x4000, 0x1253: 0x4000, 0x1254: 0x4000, 0x1255: 0x4000, 0x1256: 0x4000, 0x1257: 0x4000, + 0x1258: 0x4000, 0x1259: 0x4000, 0x125a: 0x4000, 0x125b: 0x4000, 0x125c: 0x4000, 0x125d: 0x4000, + 0x125e: 0x4000, 0x125f: 0x4000, 0x1260: 0x4000, 0x1261: 0x4000, 0x1262: 0x4000, 0x1263: 0x4000, + 0x1264: 0x4000, 0x1265: 0x4000, 0x1266: 0x4000, 0x1267: 0x4000, 0x1268: 0x4000, 0x1269: 0x4000, + 0x126a: 0x4000, 0x126b: 0x4000, 0x126c: 0x4000, 0x126d: 0x4000, 0x126e: 0x4000, 0x126f: 0x4000, + 0x1270: 0x4000, 0x1271: 0x4000, 0x1272: 0x4000, + // Block 0x4a, offset 0x1280 + 0x1280: 0x4000, 0x1281: 0x4000, 0x1282: 0x4000, 0x1283: 0x4000, 0x1284: 0x4000, 0x1285: 0x4000, + 0x1286: 0x4000, 0x1287: 0x4000, 0x1288: 0x4000, 0x1289: 0x4000, 0x128a: 0x4000, 0x128b: 0x4000, + 0x128c: 0x4000, 0x128d: 0x4000, 0x128e: 0x4000, 0x128f: 0x4000, 0x1290: 0x4000, 0x1291: 0x4000, + 0x1292: 0x4000, 0x1293: 0x4000, 0x1294: 0x4000, 0x1295: 0x4000, 0x1296: 0x4000, 0x1297: 0x4000, + 0x1298: 0x4000, 0x1299: 0x4000, 0x129a: 0x4000, 0x129b: 0x4000, 0x129c: 0x4000, 0x129d: 0x4000, + 0x129e: 0x4000, + // Block 0x4b, offset 0x12c0 + 0x12f0: 0x4000, 0x12f1: 0x4000, 0x12f2: 0x4000, 0x12f3: 0x4000, 0x12f4: 0x4000, 0x12f5: 0x4000, + 0x12f6: 0x4000, 0x12f7: 0x4000, 0x12f8: 0x4000, 0x12f9: 0x4000, 0x12fa: 0x4000, 0x12fb: 0x4000, + 0x12fc: 0x4000, 0x12fd: 0x4000, 0x12fe: 0x4000, 0x12ff: 0x4000, + // Block 0x4c, offset 0x1300 + 0x1300: 0x4000, 0x1301: 0x4000, 0x1302: 0x4000, 0x1303: 0x4000, 0x1304: 0x4000, 0x1305: 0x4000, + 0x1306: 0x4000, 0x1307: 0x4000, 0x1308: 0x4000, 0x1309: 0x4000, 0x130a: 0x4000, 0x130b: 0x4000, + 0x130c: 0x4000, 0x130d: 0x4000, 0x130e: 0x4000, 0x130f: 0x4000, 0x1310: 0x4000, 0x1311: 0x4000, + 0x1312: 0x4000, 0x1313: 0x4000, 0x1314: 0x4000, 0x1315: 0x4000, 0x1316: 0x4000, 0x1317: 0x4000, + 0x1318: 0x4000, 0x1319: 0x4000, 0x131a: 0x4000, 0x131b: 0x4000, 0x131c: 0x4000, 0x131d: 0x4000, + 0x131e: 0x4000, 0x131f: 0x4000, 0x1320: 0x4000, 0x1321: 0x4000, 0x1322: 0x4000, 0x1323: 0x4000, + 0x1324: 0x4000, 0x1325: 0x4000, 0x1326: 0x4000, 0x1327: 0x4000, 0x1328: 0x4000, 0x1329: 0x4000, + 0x132a: 0x4000, 0x132b: 0x4000, 0x132c: 0x4000, 0x132d: 0x4000, 0x132e: 0x4000, 0x132f: 0x4000, + 0x1330: 0x4000, 0x1331: 0x4000, 0x1332: 0x4000, 0x1333: 0x4000, 0x1334: 0x4000, 0x1335: 0x4000, + 0x1336: 0x4000, 0x1337: 0x4000, 0x1338: 0x4000, 0x1339: 0x4000, 0x133a: 0x4000, 0x133b: 0x4000, + // Block 0x4d, offset 0x1340 + 0x1344: 0x4000, + // Block 0x4e, offset 0x1380 + 0x138f: 0x4000, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x2000, 0x13c1: 0x2000, 0x13c2: 0x2000, 0x13c3: 0x2000, 0x13c4: 0x2000, 0x13c5: 0x2000, + 0x13c6: 0x2000, 0x13c7: 0x2000, 0x13c8: 0x2000, 0x13c9: 0x2000, 0x13ca: 0x2000, + 0x13d0: 0x2000, 0x13d1: 0x2000, + 0x13d2: 0x2000, 0x13d3: 0x2000, 0x13d4: 0x2000, 0x13d5: 0x2000, 0x13d6: 0x2000, 0x13d7: 0x2000, + 0x13d8: 0x2000, 0x13d9: 0x2000, 0x13da: 0x2000, 0x13db: 0x2000, 0x13dc: 0x2000, 0x13dd: 0x2000, + 0x13de: 0x2000, 0x13df: 0x2000, 0x13e0: 0x2000, 0x13e1: 0x2000, 0x13e2: 0x2000, 0x13e3: 0x2000, + 0x13e4: 0x2000, 0x13e5: 0x2000, 0x13e6: 0x2000, 0x13e7: 0x2000, 0x13e8: 0x2000, 0x13e9: 0x2000, + 0x13ea: 0x2000, 0x13eb: 0x2000, 0x13ec: 0x2000, 0x13ed: 0x2000, + 0x13f0: 0x2000, 0x13f1: 0x2000, 0x13f2: 0x2000, 0x13f3: 0x2000, 0x13f4: 0x2000, 0x13f5: 0x2000, + 0x13f6: 0x2000, 0x13f7: 0x2000, 0x13f8: 0x2000, 0x13f9: 0x2000, 0x13fa: 0x2000, 0x13fb: 0x2000, + 0x13fc: 0x2000, 0x13fd: 0x2000, 0x13fe: 0x2000, 0x13ff: 0x2000, + // Block 0x50, offset 0x1400 + 0x1400: 0x2000, 0x1401: 0x2000, 0x1402: 0x2000, 0x1403: 0x2000, 0x1404: 0x2000, 0x1405: 0x2000, + 0x1406: 0x2000, 0x1407: 0x2000, 0x1408: 0x2000, 0x1409: 0x2000, 0x140a: 0x2000, 0x140b: 0x2000, + 0x140c: 0x2000, 0x140d: 0x2000, 0x140e: 0x2000, 0x140f: 0x2000, 0x1410: 0x2000, 0x1411: 0x2000, + 0x1412: 0x2000, 0x1413: 0x2000, 0x1414: 0x2000, 0x1415: 0x2000, 0x1416: 0x2000, 0x1417: 0x2000, + 0x1418: 0x2000, 0x1419: 0x2000, 0x141a: 0x2000, 0x141b: 0x2000, 0x141c: 0x2000, 0x141d: 0x2000, + 0x141e: 0x2000, 0x141f: 0x2000, 0x1420: 0x2000, 0x1421: 0x2000, 0x1422: 0x2000, 0x1423: 0x2000, + 0x1424: 0x2000, 0x1425: 0x2000, 0x1426: 0x2000, 0x1427: 0x2000, 0x1428: 0x2000, 0x1429: 0x2000, + 0x1430: 0x2000, 0x1431: 0x2000, 0x1432: 0x2000, 0x1433: 0x2000, 0x1434: 0x2000, 0x1435: 0x2000, + 0x1436: 0x2000, 0x1437: 0x2000, 0x1438: 0x2000, 0x1439: 0x2000, 0x143a: 0x2000, 0x143b: 0x2000, + 0x143c: 0x2000, 0x143d: 0x2000, 0x143e: 0x2000, 0x143f: 0x2000, + // Block 0x51, offset 0x1440 + 0x1440: 0x2000, 0x1441: 0x2000, 0x1442: 0x2000, 0x1443: 0x2000, 0x1444: 0x2000, 0x1445: 0x2000, + 0x1446: 0x2000, 0x1447: 0x2000, 0x1448: 0x2000, 0x1449: 0x2000, 0x144a: 0x2000, 0x144b: 0x2000, + 0x144c: 0x2000, 0x144d: 0x2000, 0x144e: 0x4000, 0x144f: 0x2000, 0x1450: 0x2000, 0x1451: 0x4000, + 0x1452: 0x4000, 0x1453: 0x4000, 0x1454: 0x4000, 0x1455: 0x4000, 0x1456: 0x4000, 0x1457: 0x4000, + 0x1458: 0x4000, 0x1459: 0x4000, 0x145a: 0x4000, 0x145b: 0x2000, 0x145c: 0x2000, 0x145d: 0x2000, + 0x145e: 0x2000, 0x145f: 0x2000, 0x1460: 0x2000, 0x1461: 0x2000, 0x1462: 0x2000, 0x1463: 0x2000, + 0x1464: 0x2000, 0x1465: 0x2000, 0x1466: 0x2000, 0x1467: 0x2000, 0x1468: 0x2000, 0x1469: 0x2000, + 0x146a: 0x2000, 0x146b: 0x2000, 0x146c: 0x2000, + // Block 0x52, offset 0x1480 + 0x1480: 0x4000, 0x1481: 0x4000, 0x1482: 0x4000, + 0x1490: 0x4000, 0x1491: 0x4000, + 0x1492: 0x4000, 0x1493: 0x4000, 0x1494: 0x4000, 0x1495: 0x4000, 0x1496: 0x4000, 0x1497: 0x4000, + 0x1498: 0x4000, 0x1499: 0x4000, 0x149a: 0x4000, 0x149b: 0x4000, 0x149c: 0x4000, 0x149d: 0x4000, + 0x149e: 0x4000, 0x149f: 0x4000, 0x14a0: 0x4000, 0x14a1: 0x4000, 0x14a2: 0x4000, 0x14a3: 0x4000, + 0x14a4: 0x4000, 0x14a5: 0x4000, 0x14a6: 0x4000, 0x14a7: 0x4000, 0x14a8: 0x4000, 0x14a9: 0x4000, + 0x14aa: 0x4000, 0x14ab: 0x4000, 0x14ac: 0x4000, 0x14ad: 0x4000, 0x14ae: 0x4000, 0x14af: 0x4000, + 0x14b0: 0x4000, 0x14b1: 0x4000, 0x14b2: 0x4000, 0x14b3: 0x4000, 0x14b4: 0x4000, 0x14b5: 0x4000, + 0x14b6: 0x4000, 0x14b7: 0x4000, 0x14b8: 0x4000, 0x14b9: 0x4000, 0x14ba: 0x4000, 0x14bb: 0x4000, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x4000, 0x14c1: 0x4000, 0x14c2: 0x4000, 0x14c3: 0x4000, 0x14c4: 0x4000, 0x14c5: 0x4000, + 0x14c6: 0x4000, 0x14c7: 0x4000, 0x14c8: 0x4000, + 0x14d0: 0x4000, 0x14d1: 0x4000, + 0x14e0: 0x4000, 0x14e1: 0x4000, 0x14e2: 0x4000, 0x14e3: 0x4000, + 0x14e4: 0x4000, 0x14e5: 0x4000, + // Block 0x54, offset 0x1500 + 0x1500: 0x4000, 0x1501: 0x4000, 0x1502: 0x4000, 0x1503: 0x4000, 0x1504: 0x4000, 0x1505: 0x4000, + 0x1506: 0x4000, 0x1507: 0x4000, 0x1508: 0x4000, 0x1509: 0x4000, 0x150a: 0x4000, 0x150b: 0x4000, + 0x150c: 0x4000, 0x150d: 0x4000, 0x150e: 0x4000, 0x150f: 0x4000, 0x1510: 0x4000, 0x1511: 0x4000, + 0x1512: 0x4000, 0x1513: 0x4000, 0x1514: 0x4000, 0x1515: 0x4000, 0x1516: 0x4000, 0x1517: 0x4000, + 0x1518: 0x4000, 0x1519: 0x4000, 0x151a: 0x4000, 0x151b: 0x4000, 0x151c: 0x4000, 0x151d: 0x4000, + 0x151e: 0x4000, 0x151f: 0x4000, 0x1520: 0x4000, + 0x152d: 0x4000, 0x152e: 0x4000, 0x152f: 0x4000, + 0x1530: 0x4000, 0x1531: 0x4000, 0x1532: 0x4000, 0x1533: 0x4000, 0x1534: 0x4000, 0x1535: 0x4000, + 0x1537: 0x4000, 0x1538: 0x4000, 0x1539: 0x4000, 0x153a: 0x4000, 0x153b: 0x4000, + 0x153c: 0x4000, 0x153d: 0x4000, 0x153e: 0x4000, 0x153f: 0x4000, + // Block 0x55, offset 0x1540 + 0x1540: 0x4000, 0x1541: 0x4000, 0x1542: 0x4000, 0x1543: 0x4000, 0x1544: 0x4000, 0x1545: 0x4000, + 0x1546: 0x4000, 0x1547: 0x4000, 0x1548: 0x4000, 0x1549: 0x4000, 0x154a: 0x4000, 0x154b: 0x4000, + 0x154c: 0x4000, 0x154d: 0x4000, 0x154e: 0x4000, 0x154f: 0x4000, 0x1550: 0x4000, 0x1551: 0x4000, + 0x1552: 0x4000, 0x1553: 0x4000, 0x1554: 0x4000, 0x1555: 0x4000, 0x1556: 0x4000, 0x1557: 0x4000, + 0x1558: 0x4000, 0x1559: 0x4000, 0x155a: 0x4000, 0x155b: 0x4000, 0x155c: 0x4000, 0x155d: 0x4000, + 0x155e: 0x4000, 0x155f: 0x4000, 0x1560: 0x4000, 0x1561: 0x4000, 0x1562: 0x4000, 0x1563: 0x4000, + 0x1564: 0x4000, 0x1565: 0x4000, 0x1566: 0x4000, 0x1567: 0x4000, 0x1568: 0x4000, 0x1569: 0x4000, + 0x156a: 0x4000, 0x156b: 0x4000, 0x156c: 0x4000, 0x156d: 0x4000, 0x156e: 0x4000, 0x156f: 0x4000, + 0x1570: 0x4000, 0x1571: 0x4000, 0x1572: 0x4000, 0x1573: 0x4000, 0x1574: 0x4000, 0x1575: 0x4000, + 0x1576: 0x4000, 0x1577: 0x4000, 0x1578: 0x4000, 0x1579: 0x4000, 0x157a: 0x4000, 0x157b: 0x4000, + 0x157c: 0x4000, 0x157e: 0x4000, 0x157f: 0x4000, + // Block 0x56, offset 0x1580 + 0x1580: 0x4000, 0x1581: 0x4000, 0x1582: 0x4000, 0x1583: 0x4000, 0x1584: 0x4000, 0x1585: 0x4000, + 0x1586: 0x4000, 0x1587: 0x4000, 0x1588: 0x4000, 0x1589: 0x4000, 0x158a: 0x4000, 0x158b: 0x4000, + 0x158c: 0x4000, 0x158d: 0x4000, 0x158e: 0x4000, 0x158f: 0x4000, 0x1590: 0x4000, 0x1591: 0x4000, + 0x1592: 0x4000, 0x1593: 0x4000, + 0x15a0: 0x4000, 0x15a1: 0x4000, 0x15a2: 0x4000, 0x15a3: 0x4000, + 0x15a4: 0x4000, 0x15a5: 0x4000, 0x15a6: 0x4000, 0x15a7: 0x4000, 0x15a8: 0x4000, 0x15a9: 0x4000, + 0x15aa: 0x4000, 0x15ab: 0x4000, 0x15ac: 0x4000, 0x15ad: 0x4000, 0x15ae: 0x4000, 0x15af: 0x4000, + 0x15b0: 0x4000, 0x15b1: 0x4000, 0x15b2: 0x4000, 0x15b3: 0x4000, 0x15b4: 0x4000, 0x15b5: 0x4000, + 0x15b6: 0x4000, 0x15b7: 0x4000, 0x15b8: 0x4000, 0x15b9: 0x4000, 0x15ba: 0x4000, 0x15bb: 0x4000, + 0x15bc: 0x4000, 0x15bd: 0x4000, 0x15be: 0x4000, 0x15bf: 0x4000, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x4000, 0x15c1: 0x4000, 0x15c2: 0x4000, 0x15c3: 0x4000, 0x15c4: 0x4000, 0x15c5: 0x4000, + 0x15c6: 0x4000, 0x15c7: 0x4000, 0x15c8: 0x4000, 0x15c9: 0x4000, 0x15ca: 0x4000, + 0x15cf: 0x4000, 0x15d0: 0x4000, 0x15d1: 0x4000, + 0x15d2: 0x4000, 0x15d3: 0x4000, + 0x15e0: 0x4000, 0x15e1: 0x4000, 0x15e2: 0x4000, 0x15e3: 0x4000, + 0x15e4: 0x4000, 0x15e5: 0x4000, 0x15e6: 0x4000, 0x15e7: 0x4000, 0x15e8: 0x4000, 0x15e9: 0x4000, + 0x15ea: 0x4000, 0x15eb: 0x4000, 0x15ec: 0x4000, 0x15ed: 0x4000, 0x15ee: 0x4000, 0x15ef: 0x4000, + 0x15f0: 0x4000, 0x15f4: 0x4000, + 0x15f8: 0x4000, 0x15f9: 0x4000, 0x15fa: 0x4000, 0x15fb: 0x4000, + 0x15fc: 0x4000, 0x15fd: 0x4000, 0x15fe: 0x4000, 0x15ff: 0x4000, + // Block 0x58, offset 0x1600 + 0x1600: 0x4000, 0x1602: 0x4000, 0x1603: 0x4000, 0x1604: 0x4000, 0x1605: 0x4000, + 0x1606: 0x4000, 0x1607: 0x4000, 0x1608: 0x4000, 0x1609: 0x4000, 0x160a: 0x4000, 0x160b: 0x4000, + 0x160c: 0x4000, 0x160d: 0x4000, 0x160e: 0x4000, 0x160f: 0x4000, 0x1610: 0x4000, 0x1611: 0x4000, + 0x1612: 0x4000, 0x1613: 0x4000, 0x1614: 0x4000, 0x1615: 0x4000, 0x1616: 0x4000, 0x1617: 0x4000, + 0x1618: 0x4000, 0x1619: 0x4000, 0x161a: 0x4000, 0x161b: 0x4000, 0x161c: 0x4000, 0x161d: 0x4000, + 0x161e: 0x4000, 0x161f: 0x4000, 0x1620: 0x4000, 0x1621: 0x4000, 0x1622: 0x4000, 0x1623: 0x4000, + 0x1624: 0x4000, 0x1625: 0x4000, 0x1626: 0x4000, 0x1627: 0x4000, 0x1628: 0x4000, 0x1629: 0x4000, + 0x162a: 0x4000, 0x162b: 0x4000, 0x162c: 0x4000, 0x162d: 0x4000, 0x162e: 0x4000, 0x162f: 0x4000, + 0x1630: 0x4000, 0x1631: 0x4000, 0x1632: 0x4000, 0x1633: 0x4000, 0x1634: 0x4000, 0x1635: 0x4000, + 0x1636: 0x4000, 0x1637: 0x4000, 0x1638: 0x4000, 0x1639: 0x4000, 0x163a: 0x4000, 0x163b: 0x4000, + 0x163c: 0x4000, 0x163d: 0x4000, 0x163e: 0x4000, 0x163f: 0x4000, + // Block 0x59, offset 0x1640 + 0x1640: 0x4000, 0x1641: 0x4000, 0x1642: 0x4000, 0x1643: 0x4000, 0x1644: 0x4000, 0x1645: 0x4000, + 0x1646: 0x4000, 0x1647: 0x4000, 0x1648: 0x4000, 0x1649: 0x4000, 0x164a: 0x4000, 0x164b: 0x4000, + 0x164c: 0x4000, 0x164d: 0x4000, 0x164e: 0x4000, 0x164f: 0x4000, 0x1650: 0x4000, 0x1651: 0x4000, + 0x1652: 0x4000, 0x1653: 0x4000, 0x1654: 0x4000, 0x1655: 0x4000, 0x1656: 0x4000, 0x1657: 0x4000, + 0x1658: 0x4000, 0x1659: 0x4000, 0x165a: 0x4000, 0x165b: 0x4000, 0x165c: 0x4000, 0x165d: 0x4000, + 0x165e: 0x4000, 0x165f: 0x4000, 0x1660: 0x4000, 0x1661: 0x4000, 0x1662: 0x4000, 0x1663: 0x4000, + 0x1664: 0x4000, 0x1665: 0x4000, 0x1666: 0x4000, 0x1667: 0x4000, 0x1668: 0x4000, 0x1669: 0x4000, + 0x166a: 0x4000, 0x166b: 0x4000, 0x166c: 0x4000, 0x166d: 0x4000, 0x166e: 0x4000, 0x166f: 0x4000, + 0x1670: 0x4000, 0x1671: 0x4000, 0x1672: 0x4000, 0x1673: 0x4000, 0x1674: 0x4000, 0x1675: 0x4000, + 0x1676: 0x4000, 0x1677: 0x4000, 0x1678: 0x4000, 0x1679: 0x4000, 0x167a: 0x4000, 0x167b: 0x4000, + 0x167c: 0x4000, 0x167f: 0x4000, + // Block 0x5a, offset 0x1680 + 0x1680: 0x4000, 0x1681: 0x4000, 0x1682: 0x4000, 0x1683: 0x4000, 0x1684: 0x4000, 0x1685: 0x4000, + 0x1686: 0x4000, 0x1687: 0x4000, 0x1688: 0x4000, 0x1689: 0x4000, 0x168a: 0x4000, 0x168b: 0x4000, + 0x168c: 0x4000, 0x168d: 0x4000, 0x168e: 0x4000, 0x168f: 0x4000, 0x1690: 0x4000, 0x1691: 0x4000, + 0x1692: 0x4000, 0x1693: 0x4000, 0x1694: 0x4000, 0x1695: 0x4000, 0x1696: 0x4000, 0x1697: 0x4000, + 0x1698: 0x4000, 0x1699: 0x4000, 0x169a: 0x4000, 0x169b: 0x4000, 0x169c: 0x4000, 0x169d: 0x4000, + 0x169e: 0x4000, 0x169f: 0x4000, 0x16a0: 0x4000, 0x16a1: 0x4000, 0x16a2: 0x4000, 0x16a3: 0x4000, + 0x16a4: 0x4000, 0x16a5: 0x4000, 0x16a6: 0x4000, 0x16a7: 0x4000, 0x16a8: 0x4000, 0x16a9: 0x4000, + 0x16aa: 0x4000, 0x16ab: 0x4000, 0x16ac: 0x4000, 0x16ad: 0x4000, 0x16ae: 0x4000, 0x16af: 0x4000, + 0x16b0: 0x4000, 0x16b1: 0x4000, 0x16b2: 0x4000, 0x16b3: 0x4000, 0x16b4: 0x4000, 0x16b5: 0x4000, + 0x16b6: 0x4000, 0x16b7: 0x4000, 0x16b8: 0x4000, 0x16b9: 0x4000, 0x16ba: 0x4000, 0x16bb: 0x4000, + 0x16bc: 0x4000, 0x16bd: 0x4000, + // Block 0x5b, offset 0x16c0 + 0x16cb: 0x4000, + 0x16cc: 0x4000, 0x16cd: 0x4000, 0x16ce: 0x4000, 0x16d0: 0x4000, 0x16d1: 0x4000, + 0x16d2: 0x4000, 0x16d3: 0x4000, 0x16d4: 0x4000, 0x16d5: 0x4000, 0x16d6: 0x4000, 0x16d7: 0x4000, + 0x16d8: 0x4000, 0x16d9: 0x4000, 0x16da: 0x4000, 0x16db: 0x4000, 0x16dc: 0x4000, 0x16dd: 0x4000, + 0x16de: 0x4000, 0x16df: 0x4000, 0x16e0: 0x4000, 0x16e1: 0x4000, 0x16e2: 0x4000, 0x16e3: 0x4000, + 0x16e4: 0x4000, 0x16e5: 0x4000, 0x16e6: 0x4000, 0x16e7: 0x4000, + 0x16fa: 0x4000, + // Block 0x5c, offset 0x1700 + 0x1715: 0x4000, 0x1716: 0x4000, + 0x1724: 0x4000, + // Block 0x5d, offset 0x1740 + 0x177b: 0x4000, + 0x177c: 0x4000, 0x177d: 0x4000, 0x177e: 0x4000, 0x177f: 0x4000, + // Block 0x5e, offset 0x1780 + 0x1780: 0x4000, 0x1781: 0x4000, 0x1782: 0x4000, 0x1783: 0x4000, 0x1784: 0x4000, 0x1785: 0x4000, + 0x1786: 0x4000, 0x1787: 0x4000, 0x1788: 0x4000, 0x1789: 0x4000, 0x178a: 0x4000, 0x178b: 0x4000, + 0x178c: 0x4000, 0x178d: 0x4000, 0x178e: 0x4000, 0x178f: 0x4000, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x4000, 0x17c1: 0x4000, 0x17c2: 0x4000, 0x17c3: 0x4000, 0x17c4: 0x4000, 0x17c5: 0x4000, + 0x17cc: 0x4000, 0x17d0: 0x4000, 0x17d1: 0x4000, + 0x17d2: 0x4000, + 0x17eb: 0x4000, 0x17ec: 0x4000, + 0x17f4: 0x4000, 0x17f5: 0x4000, + 0x17f6: 0x4000, 0x17f7: 0x4000, 0x17f8: 0x4000, 0x17f9: 0x4000, + // Block 0x60, offset 0x1800 + 0x1810: 0x4000, 0x1811: 0x4000, + 0x1812: 0x4000, 0x1813: 0x4000, 0x1814: 0x4000, 0x1815: 0x4000, 0x1816: 0x4000, 0x1817: 0x4000, + 0x1818: 0x4000, 0x1819: 0x4000, 0x181a: 0x4000, 0x181b: 0x4000, 0x181c: 0x4000, 0x181d: 0x4000, + 0x181e: 0x4000, 0x181f: 0x4000, 0x1820: 0x4000, 0x1821: 0x4000, 0x1822: 0x4000, 0x1823: 0x4000, + 0x1824: 0x4000, 0x1825: 0x4000, 0x1826: 0x4000, 0x1827: 0x4000, 0x1828: 0x4000, 0x1829: 0x4000, + 0x182a: 0x4000, 0x182b: 0x4000, 0x182c: 0x4000, 0x182d: 0x4000, 0x182e: 0x4000, 0x182f: 0x4000, + 0x1830: 0x4000, 0x1831: 0x4000, 0x1832: 0x4000, 0x1833: 0x4000, 0x1834: 0x4000, 0x1835: 0x4000, + 0x1836: 0x4000, 0x1837: 0x4000, 0x1838: 0x4000, 0x1839: 0x4000, 0x183a: 0x4000, 0x183b: 0x4000, + 0x183c: 0x4000, 0x183d: 0x4000, 0x183e: 0x4000, + // Block 0x61, offset 0x1840 + 0x1840: 0x4000, 0x1841: 0x4000, 0x1842: 0x4000, 0x1843: 0x4000, 0x1844: 0x4000, 0x1845: 0x4000, + 0x1846: 0x4000, 0x1847: 0x4000, 0x1848: 0x4000, 0x1849: 0x4000, 0x184a: 0x4000, 0x184b: 0x4000, + 0x184c: 0x4000, 0x184d: 0x4000, 0x184e: 0x4000, 0x184f: 0x4000, 0x1850: 0x4000, 0x1851: 0x4000, + 0x1852: 0x4000, 0x1853: 0x4000, 0x1854: 0x4000, 0x1855: 0x4000, 0x1856: 0x4000, 0x1857: 0x4000, + 0x1858: 0x4000, 0x1859: 0x4000, 0x185a: 0x4000, 0x185b: 0x4000, 0x185c: 0x4000, 0x185d: 0x4000, + 0x185e: 0x4000, 0x185f: 0x4000, 0x1860: 0x4000, 0x1861: 0x4000, 0x1862: 0x4000, 0x1863: 0x4000, + 0x1864: 0x4000, 0x1865: 0x4000, 0x1866: 0x4000, 0x1867: 0x4000, 0x1868: 0x4000, 0x1869: 0x4000, + 0x186a: 0x4000, 0x186b: 0x4000, 0x186c: 0x4000, 0x186d: 0x4000, 0x186e: 0x4000, 0x186f: 0x4000, + 0x1870: 0x4000, 0x1873: 0x4000, 0x1874: 0x4000, 0x1875: 0x4000, + 0x1876: 0x4000, 0x187a: 0x4000, + 0x187c: 0x4000, 0x187d: 0x4000, 0x187e: 0x4000, 0x187f: 0x4000, + // Block 0x62, offset 0x1880 + 0x1880: 0x4000, 0x1881: 0x4000, 0x1882: 0x4000, 0x1883: 0x4000, 0x1884: 0x4000, 0x1885: 0x4000, + 0x1886: 0x4000, 0x1887: 0x4000, 0x1888: 0x4000, 0x1889: 0x4000, 0x188a: 0x4000, 0x188b: 0x4000, + 0x188c: 0x4000, 0x188d: 0x4000, 0x188e: 0x4000, 0x188f: 0x4000, 0x1890: 0x4000, 0x1891: 0x4000, + 0x1892: 0x4000, 0x1893: 0x4000, 0x1894: 0x4000, 0x1895: 0x4000, 0x1896: 0x4000, 0x1897: 0x4000, + 0x1898: 0x4000, 0x1899: 0x4000, 0x189a: 0x4000, 0x189b: 0x4000, 0x189c: 0x4000, 0x189d: 0x4000, + 0x189e: 0x4000, 0x189f: 0x4000, 0x18a0: 0x4000, 0x18a1: 0x4000, 0x18a2: 0x4000, + 0x18b0: 0x4000, 0x18b1: 0x4000, 0x18b2: 0x4000, 0x18b3: 0x4000, 0x18b4: 0x4000, 0x18b5: 0x4000, + 0x18b6: 0x4000, 0x18b7: 0x4000, 0x18b8: 0x4000, 0x18b9: 0x4000, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x4000, 0x18c1: 0x4000, 0x18c2: 0x4000, + 0x18d0: 0x4000, 0x18d1: 0x4000, + 0x18d2: 0x4000, 0x18d3: 0x4000, 0x18d4: 0x4000, 0x18d5: 0x4000, 0x18d6: 0x4000, 0x18d7: 0x4000, + 0x18d8: 0x4000, 0x18d9: 0x4000, 0x18da: 0x4000, 0x18db: 0x4000, 0x18dc: 0x4000, 0x18dd: 0x4000, + 0x18de: 0x4000, 0x18df: 0x4000, 0x18e0: 0x4000, 0x18e1: 0x4000, 0x18e2: 0x4000, 0x18e3: 0x4000, + 0x18e4: 0x4000, 0x18e5: 0x4000, 0x18e6: 0x4000, 0x18e7: 0x4000, 0x18e8: 0x4000, 0x18e9: 0x4000, + 0x18ea: 0x4000, 0x18eb: 0x4000, 0x18ec: 0x4000, 0x18ed: 0x4000, 0x18ee: 0x4000, 0x18ef: 0x4000, + 0x18f0: 0x4000, 0x18f1: 0x4000, 0x18f2: 0x4000, 0x18f3: 0x4000, 0x18f4: 0x4000, 0x18f5: 0x4000, + 0x18f6: 0x4000, 0x18f7: 0x4000, 0x18f8: 0x4000, 0x18f9: 0x4000, 0x18fa: 0x4000, 0x18fb: 0x4000, + 0x18fc: 0x4000, 0x18fd: 0x4000, 0x18fe: 0x4000, 0x18ff: 0x4000, + // Block 0x64, offset 0x1900 + 0x1900: 0x2000, 0x1901: 0x2000, 0x1902: 0x2000, 0x1903: 0x2000, 0x1904: 0x2000, 0x1905: 0x2000, + 0x1906: 0x2000, 0x1907: 0x2000, 0x1908: 0x2000, 0x1909: 0x2000, 0x190a: 0x2000, 0x190b: 0x2000, + 0x190c: 0x2000, 0x190d: 0x2000, 0x190e: 0x2000, 0x190f: 0x2000, 0x1910: 0x2000, 0x1911: 0x2000, + 0x1912: 0x2000, 0x1913: 0x2000, 0x1914: 0x2000, 0x1915: 0x2000, 0x1916: 0x2000, 0x1917: 0x2000, + 0x1918: 0x2000, 0x1919: 0x2000, 0x191a: 0x2000, 0x191b: 0x2000, 0x191c: 0x2000, 0x191d: 0x2000, + 0x191e: 0x2000, 0x191f: 0x2000, 0x1920: 0x2000, 0x1921: 0x2000, 0x1922: 0x2000, 0x1923: 0x2000, + 0x1924: 0x2000, 0x1925: 0x2000, 0x1926: 0x2000, 0x1927: 0x2000, 0x1928: 0x2000, 0x1929: 0x2000, + 0x192a: 0x2000, 0x192b: 0x2000, 0x192c: 0x2000, 0x192d: 0x2000, 0x192e: 0x2000, 0x192f: 0x2000, + 0x1930: 0x2000, 0x1931: 0x2000, 0x1932: 0x2000, 0x1933: 0x2000, 0x1934: 0x2000, 0x1935: 0x2000, + 0x1936: 0x2000, 0x1937: 0x2000, 0x1938: 0x2000, 0x1939: 0x2000, 0x193a: 0x2000, 0x193b: 0x2000, + 0x193c: 0x2000, 0x193d: 0x2000, +} + +// widthIndex: 22 blocks, 1408 entries, 1408 bytes +// Block 0 is the zero block. +var widthIndex = [1408]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x02, 0xc4: 0x03, 0xc5: 0x04, 0xc7: 0x05, + 0xc9: 0x06, 0xcb: 0x07, 0xcc: 0x08, 0xcd: 0x09, 0xce: 0x0a, 0xcf: 0x0b, + 0xd0: 0x0c, 0xd1: 0x0d, + 0xe1: 0x02, 0xe2: 0x03, 0xe3: 0x04, 0xe4: 0x05, 0xe5: 0x06, 0xe6: 0x06, 0xe7: 0x06, + 0xe8: 0x06, 0xe9: 0x06, 0xea: 0x07, 0xeb: 0x06, 0xec: 0x06, 0xed: 0x08, 0xee: 0x09, 0xef: 0x0a, + 0xf0: 0x0f, 0xf3: 0x12, 0xf4: 0x13, + // Block 0x4, offset 0x100 + 0x104: 0x0e, 0x105: 0x0f, + // Block 0x5, offset 0x140 + 0x140: 0x10, 0x141: 0x11, 0x142: 0x12, 0x144: 0x13, 0x145: 0x14, 0x146: 0x15, 0x147: 0x16, + 0x148: 0x17, 0x149: 0x18, 0x14a: 0x19, 0x14c: 0x1a, 0x14f: 0x1b, + 0x151: 0x1c, 0x152: 0x08, 0x153: 0x1d, 0x154: 0x1e, 0x155: 0x1f, 0x156: 0x20, 0x157: 0x21, + 0x158: 0x22, 0x159: 0x23, 0x15a: 0x24, 0x15b: 0x25, 0x15c: 0x26, 0x15d: 0x27, 0x15e: 0x28, 0x15f: 0x29, + 0x166: 0x2a, + 0x16c: 0x2b, 0x16d: 0x2c, + 0x17a: 0x2d, 0x17b: 0x2e, 0x17c: 0x0e, 0x17d: 0x0e, 0x17e: 0x0e, 0x17f: 0x2f, + // Block 0x6, offset 0x180 + 0x180: 0x30, 0x181: 0x31, 0x182: 0x32, 0x183: 0x33, 0x184: 0x34, 0x185: 0x35, 0x186: 0x36, 0x187: 0x37, + 0x188: 0x38, 0x189: 0x39, 0x18a: 0x0e, 0x18b: 0x3a, 0x18c: 0x0e, 0x18d: 0x0e, 0x18e: 0x0e, 0x18f: 0x0e, + 0x190: 0x0e, 0x191: 0x0e, 0x192: 0x0e, 0x193: 0x0e, 0x194: 0x0e, 0x195: 0x0e, 0x196: 0x0e, 0x197: 0x0e, + 0x198: 0x0e, 0x199: 0x0e, 0x19a: 0x0e, 0x19b: 0x0e, 0x19c: 0x0e, 0x19d: 0x0e, 0x19e: 0x0e, 0x19f: 0x0e, + 0x1a0: 0x0e, 0x1a1: 0x0e, 0x1a2: 0x0e, 0x1a3: 0x0e, 0x1a4: 0x0e, 0x1a5: 0x0e, 0x1a6: 0x0e, 0x1a7: 0x0e, + 0x1a8: 0x0e, 0x1a9: 0x0e, 0x1aa: 0x0e, 0x1ab: 0x0e, 0x1ac: 0x0e, 0x1ad: 0x0e, 0x1ae: 0x0e, 0x1af: 0x0e, + 0x1b0: 0x0e, 0x1b1: 0x0e, 0x1b2: 0x0e, 0x1b3: 0x0e, 0x1b4: 0x0e, 0x1b5: 0x0e, 0x1b6: 0x0e, 0x1b7: 0x0e, + 0x1b8: 0x0e, 0x1b9: 0x0e, 0x1ba: 0x0e, 0x1bb: 0x0e, 0x1bc: 0x0e, 0x1bd: 0x0e, 0x1be: 0x0e, 0x1bf: 0x0e, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0e, 0x1c1: 0x0e, 0x1c2: 0x0e, 0x1c3: 0x0e, 0x1c4: 0x0e, 0x1c5: 0x0e, 0x1c6: 0x0e, 0x1c7: 0x0e, + 0x1c8: 0x0e, 0x1c9: 0x0e, 0x1ca: 0x0e, 0x1cb: 0x0e, 0x1cc: 0x0e, 0x1cd: 0x0e, 0x1ce: 0x0e, 0x1cf: 0x0e, + 0x1d0: 0x0e, 0x1d1: 0x0e, 0x1d2: 0x0e, 0x1d3: 0x0e, 0x1d4: 0x0e, 0x1d5: 0x0e, 0x1d6: 0x0e, 0x1d7: 0x0e, + 0x1d8: 0x0e, 0x1d9: 0x0e, 0x1da: 0x0e, 0x1db: 0x0e, 0x1dc: 0x0e, 0x1dd: 0x0e, 0x1de: 0x0e, 0x1df: 0x0e, + 0x1e0: 0x0e, 0x1e1: 0x0e, 0x1e2: 0x0e, 0x1e3: 0x0e, 0x1e4: 0x0e, 0x1e5: 0x0e, 0x1e6: 0x0e, 0x1e7: 0x0e, + 0x1e8: 0x0e, 0x1e9: 0x0e, 0x1ea: 0x0e, 0x1eb: 0x0e, 0x1ec: 0x0e, 0x1ed: 0x0e, 0x1ee: 0x0e, 0x1ef: 0x0e, + 0x1f0: 0x0e, 0x1f1: 0x0e, 0x1f2: 0x0e, 0x1f3: 0x0e, 0x1f4: 0x0e, 0x1f5: 0x0e, 0x1f6: 0x0e, + 0x1f8: 0x0e, 0x1f9: 0x0e, 0x1fa: 0x0e, 0x1fb: 0x0e, 0x1fc: 0x0e, 0x1fd: 0x0e, 0x1fe: 0x0e, 0x1ff: 0x0e, + // Block 0x8, offset 0x200 + 0x200: 0x0e, 0x201: 0x0e, 0x202: 0x0e, 0x203: 0x0e, 0x204: 0x0e, 0x205: 0x0e, 0x206: 0x0e, 0x207: 0x0e, + 0x208: 0x0e, 0x209: 0x0e, 0x20a: 0x0e, 0x20b: 0x0e, 0x20c: 0x0e, 0x20d: 0x0e, 0x20e: 0x0e, 0x20f: 0x0e, + 0x210: 0x0e, 0x211: 0x0e, 0x212: 0x0e, 0x213: 0x0e, 0x214: 0x0e, 0x215: 0x0e, 0x216: 0x0e, 0x217: 0x0e, + 0x218: 0x0e, 0x219: 0x0e, 0x21a: 0x0e, 0x21b: 0x0e, 0x21c: 0x0e, 0x21d: 0x0e, 0x21e: 0x0e, 0x21f: 0x0e, + 0x220: 0x0e, 0x221: 0x0e, 0x222: 0x0e, 0x223: 0x0e, 0x224: 0x0e, 0x225: 0x0e, 0x226: 0x0e, 0x227: 0x0e, + 0x228: 0x0e, 0x229: 0x0e, 0x22a: 0x0e, 0x22b: 0x0e, 0x22c: 0x0e, 0x22d: 0x0e, 0x22e: 0x0e, 0x22f: 0x0e, + 0x230: 0x0e, 0x231: 0x0e, 0x232: 0x0e, 0x233: 0x0e, 0x234: 0x0e, 0x235: 0x0e, 0x236: 0x0e, 0x237: 0x0e, + 0x238: 0x0e, 0x239: 0x0e, 0x23a: 0x0e, 0x23b: 0x0e, 0x23c: 0x0e, 0x23d: 0x0e, 0x23e: 0x0e, 0x23f: 0x0e, + // Block 0x9, offset 0x240 + 0x240: 0x0e, 0x241: 0x0e, 0x242: 0x0e, 0x243: 0x0e, 0x244: 0x0e, 0x245: 0x0e, 0x246: 0x0e, 0x247: 0x0e, + 0x248: 0x0e, 0x249: 0x0e, 0x24a: 0x0e, 0x24b: 0x0e, 0x24c: 0x0e, 0x24d: 0x0e, 0x24e: 0x0e, 0x24f: 0x0e, + 0x250: 0x0e, 0x251: 0x0e, 0x252: 0x3b, 0x253: 0x3c, + 0x265: 0x3d, + 0x270: 0x0e, 0x271: 0x0e, 0x272: 0x0e, 0x273: 0x0e, 0x274: 0x0e, 0x275: 0x0e, 0x276: 0x0e, 0x277: 0x0e, + 0x278: 0x0e, 0x279: 0x0e, 0x27a: 0x0e, 0x27b: 0x0e, 0x27c: 0x0e, 0x27d: 0x0e, 0x27e: 0x0e, 0x27f: 0x0e, + // Block 0xa, offset 0x280 + 0x280: 0x0e, 0x281: 0x0e, 0x282: 0x0e, 0x283: 0x0e, 0x284: 0x0e, 0x285: 0x0e, 0x286: 0x0e, 0x287: 0x0e, + 0x288: 0x0e, 0x289: 0x0e, 0x28a: 0x0e, 0x28b: 0x0e, 0x28c: 0x0e, 0x28d: 0x0e, 0x28e: 0x0e, 0x28f: 0x0e, + 0x290: 0x0e, 0x291: 0x0e, 0x292: 0x0e, 0x293: 0x0e, 0x294: 0x0e, 0x295: 0x0e, 0x296: 0x0e, 0x297: 0x0e, + 0x298: 0x0e, 0x299: 0x0e, 0x29a: 0x0e, 0x29b: 0x0e, 0x29c: 0x0e, 0x29d: 0x0e, 0x29e: 0x3e, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x08, 0x2c1: 0x08, 0x2c2: 0x08, 0x2c3: 0x08, 0x2c4: 0x08, 0x2c5: 0x08, 0x2c6: 0x08, 0x2c7: 0x08, + 0x2c8: 0x08, 0x2c9: 0x08, 0x2ca: 0x08, 0x2cb: 0x08, 0x2cc: 0x08, 0x2cd: 0x08, 0x2ce: 0x08, 0x2cf: 0x08, + 0x2d0: 0x08, 0x2d1: 0x08, 0x2d2: 0x08, 0x2d3: 0x08, 0x2d4: 0x08, 0x2d5: 0x08, 0x2d6: 0x08, 0x2d7: 0x08, + 0x2d8: 0x08, 0x2d9: 0x08, 0x2da: 0x08, 0x2db: 0x08, 0x2dc: 0x08, 0x2dd: 0x08, 0x2de: 0x08, 0x2df: 0x08, + 0x2e0: 0x08, 0x2e1: 0x08, 0x2e2: 0x08, 0x2e3: 0x08, 0x2e4: 0x08, 0x2e5: 0x08, 0x2e6: 0x08, 0x2e7: 0x08, + 0x2e8: 0x08, 0x2e9: 0x08, 0x2ea: 0x08, 0x2eb: 0x08, 0x2ec: 0x08, 0x2ed: 0x08, 0x2ee: 0x08, 0x2ef: 0x08, + 0x2f0: 0x08, 0x2f1: 0x08, 0x2f2: 0x08, 0x2f3: 0x08, 0x2f4: 0x08, 0x2f5: 0x08, 0x2f6: 0x08, 0x2f7: 0x08, + 0x2f8: 0x08, 0x2f9: 0x08, 0x2fa: 0x08, 0x2fb: 0x08, 0x2fc: 0x08, 0x2fd: 0x08, 0x2fe: 0x08, 0x2ff: 0x08, + // Block 0xc, offset 0x300 + 0x300: 0x08, 0x301: 0x08, 0x302: 0x08, 0x303: 0x08, 0x304: 0x08, 0x305: 0x08, 0x306: 0x08, 0x307: 0x08, + 0x308: 0x08, 0x309: 0x08, 0x30a: 0x08, 0x30b: 0x08, 0x30c: 0x08, 0x30d: 0x08, 0x30e: 0x08, 0x30f: 0x08, + 0x310: 0x08, 0x311: 0x08, 0x312: 0x08, 0x313: 0x08, 0x314: 0x08, 0x315: 0x08, 0x316: 0x08, 0x317: 0x08, + 0x318: 0x08, 0x319: 0x08, 0x31a: 0x08, 0x31b: 0x08, 0x31c: 0x08, 0x31d: 0x08, 0x31e: 0x08, 0x31f: 0x08, + 0x320: 0x08, 0x321: 0x08, 0x322: 0x08, 0x323: 0x08, 0x324: 0x0e, 0x325: 0x0e, 0x326: 0x0e, 0x327: 0x0e, + 0x328: 0x0e, 0x329: 0x0e, 0x32a: 0x0e, 0x32b: 0x0e, + 0x338: 0x3f, 0x339: 0x40, 0x33c: 0x41, 0x33d: 0x42, 0x33e: 0x43, 0x33f: 0x44, + // Block 0xd, offset 0x340 + 0x37f: 0x45, + // Block 0xe, offset 0x380 + 0x380: 0x0e, 0x381: 0x0e, 0x382: 0x0e, 0x383: 0x0e, 0x384: 0x0e, 0x385: 0x0e, 0x386: 0x0e, 0x387: 0x0e, + 0x388: 0x0e, 0x389: 0x0e, 0x38a: 0x0e, 0x38b: 0x0e, 0x38c: 0x0e, 0x38d: 0x0e, 0x38e: 0x0e, 0x38f: 0x0e, + 0x390: 0x0e, 0x391: 0x0e, 0x392: 0x0e, 0x393: 0x0e, 0x394: 0x0e, 0x395: 0x0e, 0x396: 0x0e, 0x397: 0x0e, + 0x398: 0x0e, 0x399: 0x0e, 0x39a: 0x0e, 0x39b: 0x0e, 0x39c: 0x0e, 0x39d: 0x0e, 0x39e: 0x0e, 0x39f: 0x46, + 0x3a0: 0x0e, 0x3a1: 0x0e, 0x3a2: 0x0e, 0x3a3: 0x0e, 0x3a4: 0x0e, 0x3a5: 0x0e, 0x3a6: 0x0e, 0x3a7: 0x0e, + 0x3a8: 0x0e, 0x3a9: 0x0e, 0x3aa: 0x0e, 0x3ab: 0x47, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x0e, 0x3c1: 0x0e, 0x3c2: 0x0e, 0x3c3: 0x0e, 0x3c4: 0x48, 0x3c5: 0x49, 0x3c6: 0x0e, 0x3c7: 0x0e, + 0x3c8: 0x0e, 0x3c9: 0x0e, 0x3ca: 0x0e, 0x3cb: 0x4a, + // Block 0x10, offset 0x400 + 0x400: 0x4b, 0x403: 0x4c, 0x404: 0x4d, 0x405: 0x4e, 0x406: 0x4f, + 0x408: 0x50, 0x409: 0x51, 0x40c: 0x52, 0x40d: 0x53, 0x40e: 0x54, 0x40f: 0x55, + 0x410: 0x3a, 0x411: 0x56, 0x412: 0x0e, 0x413: 0x57, 0x414: 0x58, 0x415: 0x59, 0x416: 0x5a, 0x417: 0x5b, + 0x418: 0x0e, 0x419: 0x5c, 0x41a: 0x0e, 0x41b: 0x5d, + 0x424: 0x5e, 0x425: 0x5f, 0x426: 0x60, 0x427: 0x61, + // Block 0x11, offset 0x440 + 0x456: 0x0b, 0x457: 0x06, + 0x458: 0x0c, 0x45b: 0x0d, 0x45f: 0x0e, + 0x460: 0x06, 0x461: 0x06, 0x462: 0x06, 0x463: 0x06, 0x464: 0x06, 0x465: 0x06, 0x466: 0x06, 0x467: 0x06, + 0x468: 0x06, 0x469: 0x06, 0x46a: 0x06, 0x46b: 0x06, 0x46c: 0x06, 0x46d: 0x06, 0x46e: 0x06, 0x46f: 0x06, + 0x470: 0x06, 0x471: 0x06, 0x472: 0x06, 0x473: 0x06, 0x474: 0x06, 0x475: 0x06, 0x476: 0x06, 0x477: 0x06, + 0x478: 0x06, 0x479: 0x06, 0x47a: 0x06, 0x47b: 0x06, 0x47c: 0x06, 0x47d: 0x06, 0x47e: 0x06, 0x47f: 0x06, + // Block 0x12, offset 0x480 + 0x484: 0x08, 0x485: 0x08, 0x486: 0x08, 0x487: 0x09, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x08, 0x4c1: 0x08, 0x4c2: 0x08, 0x4c3: 0x08, 0x4c4: 0x08, 0x4c5: 0x08, 0x4c6: 0x08, 0x4c7: 0x08, + 0x4c8: 0x08, 0x4c9: 0x08, 0x4ca: 0x08, 0x4cb: 0x08, 0x4cc: 0x08, 0x4cd: 0x08, 0x4ce: 0x08, 0x4cf: 0x08, + 0x4d0: 0x08, 0x4d1: 0x08, 0x4d2: 0x08, 0x4d3: 0x08, 0x4d4: 0x08, 0x4d5: 0x08, 0x4d6: 0x08, 0x4d7: 0x08, + 0x4d8: 0x08, 0x4d9: 0x08, 0x4da: 0x08, 0x4db: 0x08, 0x4dc: 0x08, 0x4dd: 0x08, 0x4de: 0x08, 0x4df: 0x08, + 0x4e0: 0x08, 0x4e1: 0x08, 0x4e2: 0x08, 0x4e3: 0x08, 0x4e4: 0x08, 0x4e5: 0x08, 0x4e6: 0x08, 0x4e7: 0x08, + 0x4e8: 0x08, 0x4e9: 0x08, 0x4ea: 0x08, 0x4eb: 0x08, 0x4ec: 0x08, 0x4ed: 0x08, 0x4ee: 0x08, 0x4ef: 0x08, + 0x4f0: 0x08, 0x4f1: 0x08, 0x4f2: 0x08, 0x4f3: 0x08, 0x4f4: 0x08, 0x4f5: 0x08, 0x4f6: 0x08, 0x4f7: 0x08, + 0x4f8: 0x08, 0x4f9: 0x08, 0x4fa: 0x08, 0x4fb: 0x08, 0x4fc: 0x08, 0x4fd: 0x08, 0x4fe: 0x08, 0x4ff: 0x62, + // Block 0x14, offset 0x500 + 0x520: 0x10, + 0x530: 0x09, 0x531: 0x09, 0x532: 0x09, 0x533: 0x09, 0x534: 0x09, 0x535: 0x09, 0x536: 0x09, 0x537: 0x09, + 0x538: 0x09, 0x539: 0x09, 0x53a: 0x09, 0x53b: 0x09, 0x53c: 0x09, 0x53d: 0x09, 0x53e: 0x09, 0x53f: 0x11, + // Block 0x15, offset 0x540 + 0x540: 0x09, 0x541: 0x09, 0x542: 0x09, 0x543: 0x09, 0x544: 0x09, 0x545: 0x09, 0x546: 0x09, 0x547: 0x09, + 0x548: 0x09, 0x549: 0x09, 0x54a: 0x09, 0x54b: 0x09, 0x54c: 0x09, 0x54d: 0x09, 0x54e: 0x09, 0x54f: 0x11, +} + +// inverseData contains 4-byte entries of the following format: +// +// <length> <modified UTF-8-encoded rune> <0 padding> +// +// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the +// UTF-8 encoding of the original rune. Mappings often have the following +// pattern: +// +// A -> A (U+FF21 -> U+0041) +// B -> B (U+FF22 -> U+0042) +// ... +// +// By xor-ing the last byte the same entry can be shared by many mappings. This +// reduces the total number of distinct entries by about two thirds. +// The resulting entry for the aforementioned mappings is +// +// { 0x01, 0xE0, 0x00, 0x00 } +// +// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get +// +// E0 ^ A1 = 41. +// +// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get +// +// E0 ^ A2 = 42. +// +// Note that because of the xor-ing, the byte sequence stored in the entry is +// not valid UTF-8. +var inverseData = [150][4]byte{ + {0x00, 0x00, 0x00, 0x00}, + {0x03, 0xe3, 0x80, 0xa0}, + {0x03, 0xef, 0xbc, 0xa0}, + {0x03, 0xef, 0xbc, 0xe0}, + {0x03, 0xef, 0xbd, 0xe0}, + {0x03, 0xef, 0xbf, 0x02}, + {0x03, 0xef, 0xbf, 0x00}, + {0x03, 0xef, 0xbf, 0x0e}, + {0x03, 0xef, 0xbf, 0x0c}, + {0x03, 0xef, 0xbf, 0x0f}, + {0x03, 0xef, 0xbf, 0x39}, + {0x03, 0xef, 0xbf, 0x3b}, + {0x03, 0xef, 0xbf, 0x3f}, + {0x03, 0xef, 0xbf, 0x2a}, + {0x03, 0xef, 0xbf, 0x0d}, + {0x03, 0xef, 0xbf, 0x25}, + {0x03, 0xef, 0xbd, 0x1a}, + {0x03, 0xef, 0xbd, 0x26}, + {0x01, 0xa0, 0x00, 0x00}, + {0x03, 0xef, 0xbd, 0x25}, + {0x03, 0xef, 0xbd, 0x23}, + {0x03, 0xef, 0xbd, 0x2e}, + {0x03, 0xef, 0xbe, 0x07}, + {0x03, 0xef, 0xbe, 0x05}, + {0x03, 0xef, 0xbd, 0x06}, + {0x03, 0xef, 0xbd, 0x13}, + {0x03, 0xef, 0xbd, 0x0b}, + {0x03, 0xef, 0xbd, 0x16}, + {0x03, 0xef, 0xbd, 0x0c}, + {0x03, 0xef, 0xbd, 0x15}, + {0x03, 0xef, 0xbd, 0x0d}, + {0x03, 0xef, 0xbd, 0x1c}, + {0x03, 0xef, 0xbd, 0x02}, + {0x03, 0xef, 0xbd, 0x1f}, + {0x03, 0xef, 0xbd, 0x1d}, + {0x03, 0xef, 0xbd, 0x17}, + {0x03, 0xef, 0xbd, 0x08}, + {0x03, 0xef, 0xbd, 0x09}, + {0x03, 0xef, 0xbd, 0x0e}, + {0x03, 0xef, 0xbd, 0x04}, + {0x03, 0xef, 0xbd, 0x05}, + {0x03, 0xef, 0xbe, 0x3f}, + {0x03, 0xef, 0xbe, 0x00}, + {0x03, 0xef, 0xbd, 0x2c}, + {0x03, 0xef, 0xbe, 0x06}, + {0x03, 0xef, 0xbe, 0x0c}, + {0x03, 0xef, 0xbe, 0x0f}, + {0x03, 0xef, 0xbe, 0x0d}, + {0x03, 0xef, 0xbe, 0x0b}, + {0x03, 0xef, 0xbe, 0x19}, + {0x03, 0xef, 0xbe, 0x15}, + {0x03, 0xef, 0xbe, 0x11}, + {0x03, 0xef, 0xbe, 0x31}, + {0x03, 0xef, 0xbe, 0x33}, + {0x03, 0xef, 0xbd, 0x0f}, + {0x03, 0xef, 0xbe, 0x30}, + {0x03, 0xef, 0xbe, 0x3e}, + {0x03, 0xef, 0xbe, 0x32}, + {0x03, 0xef, 0xbe, 0x36}, + {0x03, 0xef, 0xbd, 0x14}, + {0x03, 0xef, 0xbe, 0x2e}, + {0x03, 0xef, 0xbd, 0x1e}, + {0x03, 0xef, 0xbe, 0x10}, + {0x03, 0xef, 0xbf, 0x13}, + {0x03, 0xef, 0xbf, 0x15}, + {0x03, 0xef, 0xbf, 0x17}, + {0x03, 0xef, 0xbf, 0x1f}, + {0x03, 0xef, 0xbf, 0x1d}, + {0x03, 0xef, 0xbf, 0x1b}, + {0x03, 0xef, 0xbf, 0x09}, + {0x03, 0xef, 0xbf, 0x0b}, + {0x03, 0xef, 0xbf, 0x37}, + {0x03, 0xef, 0xbe, 0x04}, + {0x01, 0xe0, 0x00, 0x00}, + {0x03, 0xe2, 0xa6, 0x1a}, + {0x03, 0xe2, 0xa6, 0x26}, + {0x03, 0xe3, 0x80, 0x23}, + {0x03, 0xe3, 0x80, 0x2e}, + {0x03, 0xe3, 0x80, 0x25}, + {0x03, 0xe3, 0x83, 0x1e}, + {0x03, 0xe3, 0x83, 0x14}, + {0x03, 0xe3, 0x82, 0x06}, + {0x03, 0xe3, 0x82, 0x0b}, + {0x03, 0xe3, 0x82, 0x0c}, + {0x03, 0xe3, 0x82, 0x0d}, + {0x03, 0xe3, 0x82, 0x02}, + {0x03, 0xe3, 0x83, 0x0f}, + {0x03, 0xe3, 0x83, 0x08}, + {0x03, 0xe3, 0x83, 0x09}, + {0x03, 0xe3, 0x83, 0x2c}, + {0x03, 0xe3, 0x83, 0x0c}, + {0x03, 0xe3, 0x82, 0x13}, + {0x03, 0xe3, 0x82, 0x16}, + {0x03, 0xe3, 0x82, 0x15}, + {0x03, 0xe3, 0x82, 0x1c}, + {0x03, 0xe3, 0x82, 0x1f}, + {0x03, 0xe3, 0x82, 0x1d}, + {0x03, 0xe3, 0x82, 0x1a}, + {0x03, 0xe3, 0x82, 0x17}, + {0x03, 0xe3, 0x82, 0x08}, + {0x03, 0xe3, 0x82, 0x09}, + {0x03, 0xe3, 0x82, 0x0e}, + {0x03, 0xe3, 0x82, 0x04}, + {0x03, 0xe3, 0x82, 0x05}, + {0x03, 0xe3, 0x82, 0x3f}, + {0x03, 0xe3, 0x83, 0x00}, + {0x03, 0xe3, 0x83, 0x06}, + {0x03, 0xe3, 0x83, 0x05}, + {0x03, 0xe3, 0x83, 0x0d}, + {0x03, 0xe3, 0x83, 0x0b}, + {0x03, 0xe3, 0x83, 0x07}, + {0x03, 0xe3, 0x83, 0x19}, + {0x03, 0xe3, 0x83, 0x15}, + {0x03, 0xe3, 0x83, 0x11}, + {0x03, 0xe3, 0x83, 0x31}, + {0x03, 0xe3, 0x83, 0x33}, + {0x03, 0xe3, 0x83, 0x30}, + {0x03, 0xe3, 0x83, 0x3e}, + {0x03, 0xe3, 0x83, 0x32}, + {0x03, 0xe3, 0x83, 0x36}, + {0x03, 0xe3, 0x83, 0x2e}, + {0x03, 0xe3, 0x82, 0x07}, + {0x03, 0xe3, 0x85, 0x04}, + {0x03, 0xe3, 0x84, 0x10}, + {0x03, 0xe3, 0x85, 0x30}, + {0x03, 0xe3, 0x85, 0x0d}, + {0x03, 0xe3, 0x85, 0x13}, + {0x03, 0xe3, 0x85, 0x15}, + {0x03, 0xe3, 0x85, 0x17}, + {0x03, 0xe3, 0x85, 0x1f}, + {0x03, 0xe3, 0x85, 0x1d}, + {0x03, 0xe3, 0x85, 0x1b}, + {0x03, 0xe3, 0x85, 0x09}, + {0x03, 0xe3, 0x85, 0x0f}, + {0x03, 0xe3, 0x85, 0x0b}, + {0x03, 0xe3, 0x85, 0x37}, + {0x03, 0xe3, 0x85, 0x3b}, + {0x03, 0xe3, 0x85, 0x39}, + {0x03, 0xe3, 0x85, 0x3f}, + {0x02, 0xc2, 0x02, 0x00}, + {0x02, 0xc2, 0x0e, 0x00}, + {0x02, 0xc2, 0x0c, 0x00}, + {0x02, 0xc2, 0x00, 0x00}, + {0x03, 0xe2, 0x82, 0x0f}, + {0x03, 0xe2, 0x94, 0x2a}, + {0x03, 0xe2, 0x86, 0x39}, + {0x03, 0xe2, 0x86, 0x3b}, + {0x03, 0xe2, 0x86, 0x3f}, + {0x03, 0xe2, 0x96, 0x0d}, + {0x03, 0xe2, 0x97, 0x25}, +} + +// Total table size 14936 bytes (14KiB) diff --git a/vendor/golang.org/x/text/width/tables12.0.0.go b/vendor/golang.org/x/text/width/tables12.0.0.go new file mode 100644 index 00000000000..755ee912218 --- /dev/null +++ b/vendor/golang.org/x/text/width/tables12.0.0.go @@ -0,0 +1,1360 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +//go:build go1.14 && !go1.16 + +package width + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "12.0.0" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *widthTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return widthValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = widthIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *widthTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return widthValues[c0] + } + i := widthIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = widthIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = widthIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *widthTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return widthValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = widthIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *widthTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return widthValues[c0] + } + i := widthIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = widthIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = widthIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// widthTrie. Total size: 14720 bytes (14.38 KiB). Checksum: 3f4f2516ded5489b. +type widthTrie struct{} + +func newWidthTrie(i int) *widthTrie { + return &widthTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *widthTrie) lookupValue(n uint32, b byte) uint16 { + switch { + default: + return uint16(widthValues[n<<6+uint32(b)]) + } +} + +// widthValues: 104 blocks, 6656 entries, 13312 bytes +// The third block is the zero block. +var widthValues = [6656]uint16{ + // Block 0x0, offset 0x0 + 0x20: 0x6001, 0x21: 0x6002, 0x22: 0x6002, 0x23: 0x6002, + 0x24: 0x6002, 0x25: 0x6002, 0x26: 0x6002, 0x27: 0x6002, 0x28: 0x6002, 0x29: 0x6002, + 0x2a: 0x6002, 0x2b: 0x6002, 0x2c: 0x6002, 0x2d: 0x6002, 0x2e: 0x6002, 0x2f: 0x6002, + 0x30: 0x6002, 0x31: 0x6002, 0x32: 0x6002, 0x33: 0x6002, 0x34: 0x6002, 0x35: 0x6002, + 0x36: 0x6002, 0x37: 0x6002, 0x38: 0x6002, 0x39: 0x6002, 0x3a: 0x6002, 0x3b: 0x6002, + 0x3c: 0x6002, 0x3d: 0x6002, 0x3e: 0x6002, 0x3f: 0x6002, + // Block 0x1, offset 0x40 + 0x40: 0x6003, 0x41: 0x6003, 0x42: 0x6003, 0x43: 0x6003, 0x44: 0x6003, 0x45: 0x6003, + 0x46: 0x6003, 0x47: 0x6003, 0x48: 0x6003, 0x49: 0x6003, 0x4a: 0x6003, 0x4b: 0x6003, + 0x4c: 0x6003, 0x4d: 0x6003, 0x4e: 0x6003, 0x4f: 0x6003, 0x50: 0x6003, 0x51: 0x6003, + 0x52: 0x6003, 0x53: 0x6003, 0x54: 0x6003, 0x55: 0x6003, 0x56: 0x6003, 0x57: 0x6003, + 0x58: 0x6003, 0x59: 0x6003, 0x5a: 0x6003, 0x5b: 0x6003, 0x5c: 0x6003, 0x5d: 0x6003, + 0x5e: 0x6003, 0x5f: 0x6003, 0x60: 0x6004, 0x61: 0x6004, 0x62: 0x6004, 0x63: 0x6004, + 0x64: 0x6004, 0x65: 0x6004, 0x66: 0x6004, 0x67: 0x6004, 0x68: 0x6004, 0x69: 0x6004, + 0x6a: 0x6004, 0x6b: 0x6004, 0x6c: 0x6004, 0x6d: 0x6004, 0x6e: 0x6004, 0x6f: 0x6004, + 0x70: 0x6004, 0x71: 0x6004, 0x72: 0x6004, 0x73: 0x6004, 0x74: 0x6004, 0x75: 0x6004, + 0x76: 0x6004, 0x77: 0x6004, 0x78: 0x6004, 0x79: 0x6004, 0x7a: 0x6004, 0x7b: 0x6004, + 0x7c: 0x6004, 0x7d: 0x6004, 0x7e: 0x6004, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xe1: 0x2000, 0xe2: 0x6005, 0xe3: 0x6005, + 0xe4: 0x2000, 0xe5: 0x6006, 0xe6: 0x6005, 0xe7: 0x2000, 0xe8: 0x2000, + 0xea: 0x2000, 0xec: 0x6007, 0xed: 0x2000, 0xee: 0x2000, 0xef: 0x6008, + 0xf0: 0x2000, 0xf1: 0x2000, 0xf2: 0x2000, 0xf3: 0x2000, 0xf4: 0x2000, + 0xf6: 0x2000, 0xf7: 0x2000, 0xf8: 0x2000, 0xf9: 0x2000, 0xfa: 0x2000, + 0xfc: 0x2000, 0xfd: 0x2000, 0xfe: 0x2000, 0xff: 0x2000, + // Block 0x4, offset 0x100 + 0x106: 0x2000, + 0x110: 0x2000, + 0x117: 0x2000, + 0x118: 0x2000, + 0x11e: 0x2000, 0x11f: 0x2000, 0x120: 0x2000, 0x121: 0x2000, + 0x126: 0x2000, 0x128: 0x2000, 0x129: 0x2000, + 0x12a: 0x2000, 0x12c: 0x2000, 0x12d: 0x2000, + 0x130: 0x2000, 0x132: 0x2000, 0x133: 0x2000, + 0x137: 0x2000, 0x138: 0x2000, 0x139: 0x2000, 0x13a: 0x2000, + 0x13c: 0x2000, 0x13e: 0x2000, + // Block 0x5, offset 0x140 + 0x141: 0x2000, + 0x151: 0x2000, + 0x153: 0x2000, + 0x15b: 0x2000, + 0x166: 0x2000, 0x167: 0x2000, + 0x16b: 0x2000, + 0x171: 0x2000, 0x172: 0x2000, 0x173: 0x2000, + 0x178: 0x2000, + 0x17f: 0x2000, + // Block 0x6, offset 0x180 + 0x180: 0x2000, 0x181: 0x2000, 0x182: 0x2000, 0x184: 0x2000, + 0x188: 0x2000, 0x189: 0x2000, 0x18a: 0x2000, 0x18b: 0x2000, + 0x18d: 0x2000, + 0x192: 0x2000, 0x193: 0x2000, + 0x1a6: 0x2000, 0x1a7: 0x2000, + 0x1ab: 0x2000, + // Block 0x7, offset 0x1c0 + 0x1ce: 0x2000, 0x1d0: 0x2000, + 0x1d2: 0x2000, 0x1d4: 0x2000, 0x1d6: 0x2000, + 0x1d8: 0x2000, 0x1da: 0x2000, 0x1dc: 0x2000, + // Block 0x8, offset 0x200 + 0x211: 0x2000, + 0x221: 0x2000, + // Block 0x9, offset 0x240 + 0x244: 0x2000, + 0x247: 0x2000, 0x249: 0x2000, 0x24a: 0x2000, 0x24b: 0x2000, + 0x24d: 0x2000, 0x250: 0x2000, + 0x258: 0x2000, 0x259: 0x2000, 0x25a: 0x2000, 0x25b: 0x2000, 0x25d: 0x2000, + 0x25f: 0x2000, + // Block 0xa, offset 0x280 + 0x280: 0x2000, 0x281: 0x2000, 0x282: 0x2000, 0x283: 0x2000, 0x284: 0x2000, 0x285: 0x2000, + 0x286: 0x2000, 0x287: 0x2000, 0x288: 0x2000, 0x289: 0x2000, 0x28a: 0x2000, 0x28b: 0x2000, + 0x28c: 0x2000, 0x28d: 0x2000, 0x28e: 0x2000, 0x28f: 0x2000, 0x290: 0x2000, 0x291: 0x2000, + 0x292: 0x2000, 0x293: 0x2000, 0x294: 0x2000, 0x295: 0x2000, 0x296: 0x2000, 0x297: 0x2000, + 0x298: 0x2000, 0x299: 0x2000, 0x29a: 0x2000, 0x29b: 0x2000, 0x29c: 0x2000, 0x29d: 0x2000, + 0x29e: 0x2000, 0x29f: 0x2000, 0x2a0: 0x2000, 0x2a1: 0x2000, 0x2a2: 0x2000, 0x2a3: 0x2000, + 0x2a4: 0x2000, 0x2a5: 0x2000, 0x2a6: 0x2000, 0x2a7: 0x2000, 0x2a8: 0x2000, 0x2a9: 0x2000, + 0x2aa: 0x2000, 0x2ab: 0x2000, 0x2ac: 0x2000, 0x2ad: 0x2000, 0x2ae: 0x2000, 0x2af: 0x2000, + 0x2b0: 0x2000, 0x2b1: 0x2000, 0x2b2: 0x2000, 0x2b3: 0x2000, 0x2b4: 0x2000, 0x2b5: 0x2000, + 0x2b6: 0x2000, 0x2b7: 0x2000, 0x2b8: 0x2000, 0x2b9: 0x2000, 0x2ba: 0x2000, 0x2bb: 0x2000, + 0x2bc: 0x2000, 0x2bd: 0x2000, 0x2be: 0x2000, 0x2bf: 0x2000, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x2000, 0x2c1: 0x2000, 0x2c2: 0x2000, 0x2c3: 0x2000, 0x2c4: 0x2000, 0x2c5: 0x2000, + 0x2c6: 0x2000, 0x2c7: 0x2000, 0x2c8: 0x2000, 0x2c9: 0x2000, 0x2ca: 0x2000, 0x2cb: 0x2000, + 0x2cc: 0x2000, 0x2cd: 0x2000, 0x2ce: 0x2000, 0x2cf: 0x2000, 0x2d0: 0x2000, 0x2d1: 0x2000, + 0x2d2: 0x2000, 0x2d3: 0x2000, 0x2d4: 0x2000, 0x2d5: 0x2000, 0x2d6: 0x2000, 0x2d7: 0x2000, + 0x2d8: 0x2000, 0x2d9: 0x2000, 0x2da: 0x2000, 0x2db: 0x2000, 0x2dc: 0x2000, 0x2dd: 0x2000, + 0x2de: 0x2000, 0x2df: 0x2000, 0x2e0: 0x2000, 0x2e1: 0x2000, 0x2e2: 0x2000, 0x2e3: 0x2000, + 0x2e4: 0x2000, 0x2e5: 0x2000, 0x2e6: 0x2000, 0x2e7: 0x2000, 0x2e8: 0x2000, 0x2e9: 0x2000, + 0x2ea: 0x2000, 0x2eb: 0x2000, 0x2ec: 0x2000, 0x2ed: 0x2000, 0x2ee: 0x2000, 0x2ef: 0x2000, + // Block 0xc, offset 0x300 + 0x311: 0x2000, + 0x312: 0x2000, 0x313: 0x2000, 0x314: 0x2000, 0x315: 0x2000, 0x316: 0x2000, 0x317: 0x2000, + 0x318: 0x2000, 0x319: 0x2000, 0x31a: 0x2000, 0x31b: 0x2000, 0x31c: 0x2000, 0x31d: 0x2000, + 0x31e: 0x2000, 0x31f: 0x2000, 0x320: 0x2000, 0x321: 0x2000, 0x323: 0x2000, + 0x324: 0x2000, 0x325: 0x2000, 0x326: 0x2000, 0x327: 0x2000, 0x328: 0x2000, 0x329: 0x2000, + 0x331: 0x2000, 0x332: 0x2000, 0x333: 0x2000, 0x334: 0x2000, 0x335: 0x2000, + 0x336: 0x2000, 0x337: 0x2000, 0x338: 0x2000, 0x339: 0x2000, 0x33a: 0x2000, 0x33b: 0x2000, + 0x33c: 0x2000, 0x33d: 0x2000, 0x33e: 0x2000, 0x33f: 0x2000, + // Block 0xd, offset 0x340 + 0x340: 0x2000, 0x341: 0x2000, 0x343: 0x2000, 0x344: 0x2000, 0x345: 0x2000, + 0x346: 0x2000, 0x347: 0x2000, 0x348: 0x2000, 0x349: 0x2000, + // Block 0xe, offset 0x380 + 0x381: 0x2000, + 0x390: 0x2000, 0x391: 0x2000, + 0x392: 0x2000, 0x393: 0x2000, 0x394: 0x2000, 0x395: 0x2000, 0x396: 0x2000, 0x397: 0x2000, + 0x398: 0x2000, 0x399: 0x2000, 0x39a: 0x2000, 0x39b: 0x2000, 0x39c: 0x2000, 0x39d: 0x2000, + 0x39e: 0x2000, 0x39f: 0x2000, 0x3a0: 0x2000, 0x3a1: 0x2000, 0x3a2: 0x2000, 0x3a3: 0x2000, + 0x3a4: 0x2000, 0x3a5: 0x2000, 0x3a6: 0x2000, 0x3a7: 0x2000, 0x3a8: 0x2000, 0x3a9: 0x2000, + 0x3aa: 0x2000, 0x3ab: 0x2000, 0x3ac: 0x2000, 0x3ad: 0x2000, 0x3ae: 0x2000, 0x3af: 0x2000, + 0x3b0: 0x2000, 0x3b1: 0x2000, 0x3b2: 0x2000, 0x3b3: 0x2000, 0x3b4: 0x2000, 0x3b5: 0x2000, + 0x3b6: 0x2000, 0x3b7: 0x2000, 0x3b8: 0x2000, 0x3b9: 0x2000, 0x3ba: 0x2000, 0x3bb: 0x2000, + 0x3bc: 0x2000, 0x3bd: 0x2000, 0x3be: 0x2000, 0x3bf: 0x2000, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x2000, 0x3c1: 0x2000, 0x3c2: 0x2000, 0x3c3: 0x2000, 0x3c4: 0x2000, 0x3c5: 0x2000, + 0x3c6: 0x2000, 0x3c7: 0x2000, 0x3c8: 0x2000, 0x3c9: 0x2000, 0x3ca: 0x2000, 0x3cb: 0x2000, + 0x3cc: 0x2000, 0x3cd: 0x2000, 0x3ce: 0x2000, 0x3cf: 0x2000, 0x3d1: 0x2000, + // Block 0x10, offset 0x400 + 0x400: 0x4000, 0x401: 0x4000, 0x402: 0x4000, 0x403: 0x4000, 0x404: 0x4000, 0x405: 0x4000, + 0x406: 0x4000, 0x407: 0x4000, 0x408: 0x4000, 0x409: 0x4000, 0x40a: 0x4000, 0x40b: 0x4000, + 0x40c: 0x4000, 0x40d: 0x4000, 0x40e: 0x4000, 0x40f: 0x4000, 0x410: 0x4000, 0x411: 0x4000, + 0x412: 0x4000, 0x413: 0x4000, 0x414: 0x4000, 0x415: 0x4000, 0x416: 0x4000, 0x417: 0x4000, + 0x418: 0x4000, 0x419: 0x4000, 0x41a: 0x4000, 0x41b: 0x4000, 0x41c: 0x4000, 0x41d: 0x4000, + 0x41e: 0x4000, 0x41f: 0x4000, 0x420: 0x4000, 0x421: 0x4000, 0x422: 0x4000, 0x423: 0x4000, + 0x424: 0x4000, 0x425: 0x4000, 0x426: 0x4000, 0x427: 0x4000, 0x428: 0x4000, 0x429: 0x4000, + 0x42a: 0x4000, 0x42b: 0x4000, 0x42c: 0x4000, 0x42d: 0x4000, 0x42e: 0x4000, 0x42f: 0x4000, + 0x430: 0x4000, 0x431: 0x4000, 0x432: 0x4000, 0x433: 0x4000, 0x434: 0x4000, 0x435: 0x4000, + 0x436: 0x4000, 0x437: 0x4000, 0x438: 0x4000, 0x439: 0x4000, 0x43a: 0x4000, 0x43b: 0x4000, + 0x43c: 0x4000, 0x43d: 0x4000, 0x43e: 0x4000, 0x43f: 0x4000, + // Block 0x11, offset 0x440 + 0x440: 0x4000, 0x441: 0x4000, 0x442: 0x4000, 0x443: 0x4000, 0x444: 0x4000, 0x445: 0x4000, + 0x446: 0x4000, 0x447: 0x4000, 0x448: 0x4000, 0x449: 0x4000, 0x44a: 0x4000, 0x44b: 0x4000, + 0x44c: 0x4000, 0x44d: 0x4000, 0x44e: 0x4000, 0x44f: 0x4000, 0x450: 0x4000, 0x451: 0x4000, + 0x452: 0x4000, 0x453: 0x4000, 0x454: 0x4000, 0x455: 0x4000, 0x456: 0x4000, 0x457: 0x4000, + 0x458: 0x4000, 0x459: 0x4000, 0x45a: 0x4000, 0x45b: 0x4000, 0x45c: 0x4000, 0x45d: 0x4000, + 0x45e: 0x4000, 0x45f: 0x4000, + // Block 0x12, offset 0x480 + 0x490: 0x2000, + 0x493: 0x2000, 0x494: 0x2000, 0x495: 0x2000, 0x496: 0x2000, + 0x498: 0x2000, 0x499: 0x2000, 0x49c: 0x2000, 0x49d: 0x2000, + 0x4a0: 0x2000, 0x4a1: 0x2000, 0x4a2: 0x2000, + 0x4a4: 0x2000, 0x4a5: 0x2000, 0x4a6: 0x2000, 0x4a7: 0x2000, + 0x4b0: 0x2000, 0x4b2: 0x2000, 0x4b3: 0x2000, 0x4b5: 0x2000, + 0x4bb: 0x2000, + 0x4be: 0x2000, + // Block 0x13, offset 0x4c0 + 0x4f4: 0x2000, + 0x4ff: 0x2000, + // Block 0x14, offset 0x500 + 0x501: 0x2000, 0x502: 0x2000, 0x503: 0x2000, 0x504: 0x2000, + 0x529: 0xa009, + 0x52c: 0x2000, + // Block 0x15, offset 0x540 + 0x543: 0x2000, 0x545: 0x2000, + 0x549: 0x2000, + 0x553: 0x2000, 0x556: 0x2000, + 0x561: 0x2000, 0x562: 0x2000, + 0x566: 0x2000, + 0x56b: 0x2000, + // Block 0x16, offset 0x580 + 0x593: 0x2000, 0x594: 0x2000, + 0x59b: 0x2000, 0x59c: 0x2000, 0x59d: 0x2000, + 0x59e: 0x2000, 0x5a0: 0x2000, 0x5a1: 0x2000, 0x5a2: 0x2000, 0x5a3: 0x2000, + 0x5a4: 0x2000, 0x5a5: 0x2000, 0x5a6: 0x2000, 0x5a7: 0x2000, 0x5a8: 0x2000, 0x5a9: 0x2000, + 0x5aa: 0x2000, 0x5ab: 0x2000, + 0x5b0: 0x2000, 0x5b1: 0x2000, 0x5b2: 0x2000, 0x5b3: 0x2000, 0x5b4: 0x2000, 0x5b5: 0x2000, + 0x5b6: 0x2000, 0x5b7: 0x2000, 0x5b8: 0x2000, 0x5b9: 0x2000, + // Block 0x17, offset 0x5c0 + 0x5c9: 0x2000, + 0x5d0: 0x200a, 0x5d1: 0x200b, + 0x5d2: 0x200a, 0x5d3: 0x200c, 0x5d4: 0x2000, 0x5d5: 0x2000, 0x5d6: 0x2000, 0x5d7: 0x2000, + 0x5d8: 0x2000, 0x5d9: 0x2000, + 0x5f8: 0x2000, 0x5f9: 0x2000, + // Block 0x18, offset 0x600 + 0x612: 0x2000, 0x614: 0x2000, + 0x627: 0x2000, + // Block 0x19, offset 0x640 + 0x640: 0x2000, 0x642: 0x2000, 0x643: 0x2000, + 0x647: 0x2000, 0x648: 0x2000, 0x64b: 0x2000, + 0x64f: 0x2000, 0x651: 0x2000, + 0x655: 0x2000, + 0x65a: 0x2000, 0x65d: 0x2000, + 0x65e: 0x2000, 0x65f: 0x2000, 0x660: 0x2000, 0x663: 0x2000, + 0x665: 0x2000, 0x667: 0x2000, 0x668: 0x2000, 0x669: 0x2000, + 0x66a: 0x2000, 0x66b: 0x2000, 0x66c: 0x2000, 0x66e: 0x2000, + 0x674: 0x2000, 0x675: 0x2000, + 0x676: 0x2000, 0x677: 0x2000, + 0x67c: 0x2000, 0x67d: 0x2000, + // Block 0x1a, offset 0x680 + 0x688: 0x2000, + 0x68c: 0x2000, + 0x692: 0x2000, + 0x6a0: 0x2000, 0x6a1: 0x2000, + 0x6a4: 0x2000, 0x6a5: 0x2000, 0x6a6: 0x2000, 0x6a7: 0x2000, + 0x6aa: 0x2000, 0x6ab: 0x2000, 0x6ae: 0x2000, 0x6af: 0x2000, + // Block 0x1b, offset 0x6c0 + 0x6c2: 0x2000, 0x6c3: 0x2000, + 0x6c6: 0x2000, 0x6c7: 0x2000, + 0x6d5: 0x2000, + 0x6d9: 0x2000, + 0x6e5: 0x2000, + 0x6ff: 0x2000, + // Block 0x1c, offset 0x700 + 0x712: 0x2000, + 0x71a: 0x4000, 0x71b: 0x4000, + 0x729: 0x4000, + 0x72a: 0x4000, + // Block 0x1d, offset 0x740 + 0x769: 0x4000, + 0x76a: 0x4000, 0x76b: 0x4000, 0x76c: 0x4000, + 0x770: 0x4000, 0x773: 0x4000, + // Block 0x1e, offset 0x780 + 0x7a0: 0x2000, 0x7a1: 0x2000, 0x7a2: 0x2000, 0x7a3: 0x2000, + 0x7a4: 0x2000, 0x7a5: 0x2000, 0x7a6: 0x2000, 0x7a7: 0x2000, 0x7a8: 0x2000, 0x7a9: 0x2000, + 0x7aa: 0x2000, 0x7ab: 0x2000, 0x7ac: 0x2000, 0x7ad: 0x2000, 0x7ae: 0x2000, 0x7af: 0x2000, + 0x7b0: 0x2000, 0x7b1: 0x2000, 0x7b2: 0x2000, 0x7b3: 0x2000, 0x7b4: 0x2000, 0x7b5: 0x2000, + 0x7b6: 0x2000, 0x7b7: 0x2000, 0x7b8: 0x2000, 0x7b9: 0x2000, 0x7ba: 0x2000, 0x7bb: 0x2000, + 0x7bc: 0x2000, 0x7bd: 0x2000, 0x7be: 0x2000, 0x7bf: 0x2000, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x2000, 0x7c1: 0x2000, 0x7c2: 0x2000, 0x7c3: 0x2000, 0x7c4: 0x2000, 0x7c5: 0x2000, + 0x7c6: 0x2000, 0x7c7: 0x2000, 0x7c8: 0x2000, 0x7c9: 0x2000, 0x7ca: 0x2000, 0x7cb: 0x2000, + 0x7cc: 0x2000, 0x7cd: 0x2000, 0x7ce: 0x2000, 0x7cf: 0x2000, 0x7d0: 0x2000, 0x7d1: 0x2000, + 0x7d2: 0x2000, 0x7d3: 0x2000, 0x7d4: 0x2000, 0x7d5: 0x2000, 0x7d6: 0x2000, 0x7d7: 0x2000, + 0x7d8: 0x2000, 0x7d9: 0x2000, 0x7da: 0x2000, 0x7db: 0x2000, 0x7dc: 0x2000, 0x7dd: 0x2000, + 0x7de: 0x2000, 0x7df: 0x2000, 0x7e0: 0x2000, 0x7e1: 0x2000, 0x7e2: 0x2000, 0x7e3: 0x2000, + 0x7e4: 0x2000, 0x7e5: 0x2000, 0x7e6: 0x2000, 0x7e7: 0x2000, 0x7e8: 0x2000, 0x7e9: 0x2000, + 0x7eb: 0x2000, 0x7ec: 0x2000, 0x7ed: 0x2000, 0x7ee: 0x2000, 0x7ef: 0x2000, + 0x7f0: 0x2000, 0x7f1: 0x2000, 0x7f2: 0x2000, 0x7f3: 0x2000, 0x7f4: 0x2000, 0x7f5: 0x2000, + 0x7f6: 0x2000, 0x7f7: 0x2000, 0x7f8: 0x2000, 0x7f9: 0x2000, 0x7fa: 0x2000, 0x7fb: 0x2000, + 0x7fc: 0x2000, 0x7fd: 0x2000, 0x7fe: 0x2000, 0x7ff: 0x2000, + // Block 0x20, offset 0x800 + 0x800: 0x2000, 0x801: 0x2000, 0x802: 0x200d, 0x803: 0x2000, 0x804: 0x2000, 0x805: 0x2000, + 0x806: 0x2000, 0x807: 0x2000, 0x808: 0x2000, 0x809: 0x2000, 0x80a: 0x2000, 0x80b: 0x2000, + 0x80c: 0x2000, 0x80d: 0x2000, 0x80e: 0x2000, 0x80f: 0x2000, 0x810: 0x2000, 0x811: 0x2000, + 0x812: 0x2000, 0x813: 0x2000, 0x814: 0x2000, 0x815: 0x2000, 0x816: 0x2000, 0x817: 0x2000, + 0x818: 0x2000, 0x819: 0x2000, 0x81a: 0x2000, 0x81b: 0x2000, 0x81c: 0x2000, 0x81d: 0x2000, + 0x81e: 0x2000, 0x81f: 0x2000, 0x820: 0x2000, 0x821: 0x2000, 0x822: 0x2000, 0x823: 0x2000, + 0x824: 0x2000, 0x825: 0x2000, 0x826: 0x2000, 0x827: 0x2000, 0x828: 0x2000, 0x829: 0x2000, + 0x82a: 0x2000, 0x82b: 0x2000, 0x82c: 0x2000, 0x82d: 0x2000, 0x82e: 0x2000, 0x82f: 0x2000, + 0x830: 0x2000, 0x831: 0x2000, 0x832: 0x2000, 0x833: 0x2000, 0x834: 0x2000, 0x835: 0x2000, + 0x836: 0x2000, 0x837: 0x2000, 0x838: 0x2000, 0x839: 0x2000, 0x83a: 0x2000, 0x83b: 0x2000, + 0x83c: 0x2000, 0x83d: 0x2000, 0x83e: 0x2000, 0x83f: 0x2000, + // Block 0x21, offset 0x840 + 0x840: 0x2000, 0x841: 0x2000, 0x842: 0x2000, 0x843: 0x2000, 0x844: 0x2000, 0x845: 0x2000, + 0x846: 0x2000, 0x847: 0x2000, 0x848: 0x2000, 0x849: 0x2000, 0x84a: 0x2000, 0x84b: 0x2000, + 0x850: 0x2000, 0x851: 0x2000, + 0x852: 0x2000, 0x853: 0x2000, 0x854: 0x2000, 0x855: 0x2000, 0x856: 0x2000, 0x857: 0x2000, + 0x858: 0x2000, 0x859: 0x2000, 0x85a: 0x2000, 0x85b: 0x2000, 0x85c: 0x2000, 0x85d: 0x2000, + 0x85e: 0x2000, 0x85f: 0x2000, 0x860: 0x2000, 0x861: 0x2000, 0x862: 0x2000, 0x863: 0x2000, + 0x864: 0x2000, 0x865: 0x2000, 0x866: 0x2000, 0x867: 0x2000, 0x868: 0x2000, 0x869: 0x2000, + 0x86a: 0x2000, 0x86b: 0x2000, 0x86c: 0x2000, 0x86d: 0x2000, 0x86e: 0x2000, 0x86f: 0x2000, + 0x870: 0x2000, 0x871: 0x2000, 0x872: 0x2000, 0x873: 0x2000, + // Block 0x22, offset 0x880 + 0x880: 0x2000, 0x881: 0x2000, 0x882: 0x2000, 0x883: 0x2000, 0x884: 0x2000, 0x885: 0x2000, + 0x886: 0x2000, 0x887: 0x2000, 0x888: 0x2000, 0x889: 0x2000, 0x88a: 0x2000, 0x88b: 0x2000, + 0x88c: 0x2000, 0x88d: 0x2000, 0x88e: 0x2000, 0x88f: 0x2000, + 0x892: 0x2000, 0x893: 0x2000, 0x894: 0x2000, 0x895: 0x2000, + 0x8a0: 0x200e, 0x8a1: 0x2000, 0x8a3: 0x2000, + 0x8a4: 0x2000, 0x8a5: 0x2000, 0x8a6: 0x2000, 0x8a7: 0x2000, 0x8a8: 0x2000, 0x8a9: 0x2000, + 0x8b2: 0x2000, 0x8b3: 0x2000, + 0x8b6: 0x2000, 0x8b7: 0x2000, + 0x8bc: 0x2000, 0x8bd: 0x2000, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x2000, 0x8c1: 0x2000, + 0x8c6: 0x2000, 0x8c7: 0x2000, 0x8c8: 0x2000, 0x8cb: 0x200f, + 0x8ce: 0x2000, 0x8cf: 0x2000, 0x8d0: 0x2000, 0x8d1: 0x2000, + 0x8e2: 0x2000, 0x8e3: 0x2000, + 0x8e4: 0x2000, 0x8e5: 0x2000, + 0x8ef: 0x2000, + 0x8fd: 0x4000, 0x8fe: 0x4000, + // Block 0x24, offset 0x900 + 0x905: 0x2000, + 0x906: 0x2000, 0x909: 0x2000, + 0x90e: 0x2000, 0x90f: 0x2000, + 0x914: 0x4000, 0x915: 0x4000, + 0x91c: 0x2000, + 0x91e: 0x2000, + // Block 0x25, offset 0x940 + 0x940: 0x2000, 0x942: 0x2000, + 0x948: 0x4000, 0x949: 0x4000, 0x94a: 0x4000, 0x94b: 0x4000, + 0x94c: 0x4000, 0x94d: 0x4000, 0x94e: 0x4000, 0x94f: 0x4000, 0x950: 0x4000, 0x951: 0x4000, + 0x952: 0x4000, 0x953: 0x4000, + 0x960: 0x2000, 0x961: 0x2000, 0x963: 0x2000, + 0x964: 0x2000, 0x965: 0x2000, 0x967: 0x2000, 0x968: 0x2000, 0x969: 0x2000, + 0x96a: 0x2000, 0x96c: 0x2000, 0x96d: 0x2000, 0x96f: 0x2000, + 0x97f: 0x4000, + // Block 0x26, offset 0x980 + 0x993: 0x4000, + 0x99e: 0x2000, 0x99f: 0x2000, 0x9a1: 0x4000, + 0x9aa: 0x4000, 0x9ab: 0x4000, + 0x9bd: 0x4000, 0x9be: 0x4000, 0x9bf: 0x2000, + // Block 0x27, offset 0x9c0 + 0x9c4: 0x4000, 0x9c5: 0x4000, + 0x9c6: 0x2000, 0x9c7: 0x2000, 0x9c8: 0x2000, 0x9c9: 0x2000, 0x9ca: 0x2000, 0x9cb: 0x2000, + 0x9cc: 0x2000, 0x9cd: 0x2000, 0x9ce: 0x4000, 0x9cf: 0x2000, 0x9d0: 0x2000, 0x9d1: 0x2000, + 0x9d2: 0x2000, 0x9d3: 0x2000, 0x9d4: 0x4000, 0x9d5: 0x2000, 0x9d6: 0x2000, 0x9d7: 0x2000, + 0x9d8: 0x2000, 0x9d9: 0x2000, 0x9da: 0x2000, 0x9db: 0x2000, 0x9dc: 0x2000, 0x9dd: 0x2000, + 0x9de: 0x2000, 0x9df: 0x2000, 0x9e0: 0x2000, 0x9e1: 0x2000, 0x9e3: 0x2000, + 0x9e8: 0x2000, 0x9e9: 0x2000, + 0x9ea: 0x4000, 0x9eb: 0x2000, 0x9ec: 0x2000, 0x9ed: 0x2000, 0x9ee: 0x2000, 0x9ef: 0x2000, + 0x9f0: 0x2000, 0x9f1: 0x2000, 0x9f2: 0x4000, 0x9f3: 0x4000, 0x9f4: 0x2000, 0x9f5: 0x4000, + 0x9f6: 0x2000, 0x9f7: 0x2000, 0x9f8: 0x2000, 0x9f9: 0x2000, 0x9fa: 0x4000, 0x9fb: 0x2000, + 0x9fc: 0x2000, 0x9fd: 0x4000, 0x9fe: 0x2000, 0x9ff: 0x2000, + // Block 0x28, offset 0xa00 + 0xa05: 0x4000, + 0xa0a: 0x4000, 0xa0b: 0x4000, + 0xa28: 0x4000, + 0xa3d: 0x2000, + // Block 0x29, offset 0xa40 + 0xa4c: 0x4000, 0xa4e: 0x4000, + 0xa53: 0x4000, 0xa54: 0x4000, 0xa55: 0x4000, 0xa57: 0x4000, + 0xa76: 0x2000, 0xa77: 0x2000, 0xa78: 0x2000, 0xa79: 0x2000, 0xa7a: 0x2000, 0xa7b: 0x2000, + 0xa7c: 0x2000, 0xa7d: 0x2000, 0xa7e: 0x2000, 0xa7f: 0x2000, + // Block 0x2a, offset 0xa80 + 0xa95: 0x4000, 0xa96: 0x4000, 0xa97: 0x4000, + 0xab0: 0x4000, + 0xabf: 0x4000, + // Block 0x2b, offset 0xac0 + 0xae6: 0x6000, 0xae7: 0x6000, 0xae8: 0x6000, 0xae9: 0x6000, + 0xaea: 0x6000, 0xaeb: 0x6000, 0xaec: 0x6000, 0xaed: 0x6000, + // Block 0x2c, offset 0xb00 + 0xb05: 0x6010, + 0xb06: 0x6011, + // Block 0x2d, offset 0xb40 + 0xb5b: 0x4000, 0xb5c: 0x4000, + // Block 0x2e, offset 0xb80 + 0xb90: 0x4000, + 0xb95: 0x4000, 0xb96: 0x2000, 0xb97: 0x2000, + 0xb98: 0x2000, 0xb99: 0x2000, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x4000, 0xbc1: 0x4000, 0xbc2: 0x4000, 0xbc3: 0x4000, 0xbc4: 0x4000, 0xbc5: 0x4000, + 0xbc6: 0x4000, 0xbc7: 0x4000, 0xbc8: 0x4000, 0xbc9: 0x4000, 0xbca: 0x4000, 0xbcb: 0x4000, + 0xbcc: 0x4000, 0xbcd: 0x4000, 0xbce: 0x4000, 0xbcf: 0x4000, 0xbd0: 0x4000, 0xbd1: 0x4000, + 0xbd2: 0x4000, 0xbd3: 0x4000, 0xbd4: 0x4000, 0xbd5: 0x4000, 0xbd6: 0x4000, 0xbd7: 0x4000, + 0xbd8: 0x4000, 0xbd9: 0x4000, 0xbdb: 0x4000, 0xbdc: 0x4000, 0xbdd: 0x4000, + 0xbde: 0x4000, 0xbdf: 0x4000, 0xbe0: 0x4000, 0xbe1: 0x4000, 0xbe2: 0x4000, 0xbe3: 0x4000, + 0xbe4: 0x4000, 0xbe5: 0x4000, 0xbe6: 0x4000, 0xbe7: 0x4000, 0xbe8: 0x4000, 0xbe9: 0x4000, + 0xbea: 0x4000, 0xbeb: 0x4000, 0xbec: 0x4000, 0xbed: 0x4000, 0xbee: 0x4000, 0xbef: 0x4000, + 0xbf0: 0x4000, 0xbf1: 0x4000, 0xbf2: 0x4000, 0xbf3: 0x4000, 0xbf4: 0x4000, 0xbf5: 0x4000, + 0xbf6: 0x4000, 0xbf7: 0x4000, 0xbf8: 0x4000, 0xbf9: 0x4000, 0xbfa: 0x4000, 0xbfb: 0x4000, + 0xbfc: 0x4000, 0xbfd: 0x4000, 0xbfe: 0x4000, 0xbff: 0x4000, + // Block 0x30, offset 0xc00 + 0xc00: 0x4000, 0xc01: 0x4000, 0xc02: 0x4000, 0xc03: 0x4000, 0xc04: 0x4000, 0xc05: 0x4000, + 0xc06: 0x4000, 0xc07: 0x4000, 0xc08: 0x4000, 0xc09: 0x4000, 0xc0a: 0x4000, 0xc0b: 0x4000, + 0xc0c: 0x4000, 0xc0d: 0x4000, 0xc0e: 0x4000, 0xc0f: 0x4000, 0xc10: 0x4000, 0xc11: 0x4000, + 0xc12: 0x4000, 0xc13: 0x4000, 0xc14: 0x4000, 0xc15: 0x4000, 0xc16: 0x4000, 0xc17: 0x4000, + 0xc18: 0x4000, 0xc19: 0x4000, 0xc1a: 0x4000, 0xc1b: 0x4000, 0xc1c: 0x4000, 0xc1d: 0x4000, + 0xc1e: 0x4000, 0xc1f: 0x4000, 0xc20: 0x4000, 0xc21: 0x4000, 0xc22: 0x4000, 0xc23: 0x4000, + 0xc24: 0x4000, 0xc25: 0x4000, 0xc26: 0x4000, 0xc27: 0x4000, 0xc28: 0x4000, 0xc29: 0x4000, + 0xc2a: 0x4000, 0xc2b: 0x4000, 0xc2c: 0x4000, 0xc2d: 0x4000, 0xc2e: 0x4000, 0xc2f: 0x4000, + 0xc30: 0x4000, 0xc31: 0x4000, 0xc32: 0x4000, 0xc33: 0x4000, + // Block 0x31, offset 0xc40 + 0xc40: 0x4000, 0xc41: 0x4000, 0xc42: 0x4000, 0xc43: 0x4000, 0xc44: 0x4000, 0xc45: 0x4000, + 0xc46: 0x4000, 0xc47: 0x4000, 0xc48: 0x4000, 0xc49: 0x4000, 0xc4a: 0x4000, 0xc4b: 0x4000, + 0xc4c: 0x4000, 0xc4d: 0x4000, 0xc4e: 0x4000, 0xc4f: 0x4000, 0xc50: 0x4000, 0xc51: 0x4000, + 0xc52: 0x4000, 0xc53: 0x4000, 0xc54: 0x4000, 0xc55: 0x4000, + 0xc70: 0x4000, 0xc71: 0x4000, 0xc72: 0x4000, 0xc73: 0x4000, 0xc74: 0x4000, 0xc75: 0x4000, + 0xc76: 0x4000, 0xc77: 0x4000, 0xc78: 0x4000, 0xc79: 0x4000, 0xc7a: 0x4000, 0xc7b: 0x4000, + // Block 0x32, offset 0xc80 + 0xc80: 0x9012, 0xc81: 0x4013, 0xc82: 0x4014, 0xc83: 0x4000, 0xc84: 0x4000, 0xc85: 0x4000, + 0xc86: 0x4000, 0xc87: 0x4000, 0xc88: 0x4000, 0xc89: 0x4000, 0xc8a: 0x4000, 0xc8b: 0x4000, + 0xc8c: 0x4015, 0xc8d: 0x4015, 0xc8e: 0x4000, 0xc8f: 0x4000, 0xc90: 0x4000, 0xc91: 0x4000, + 0xc92: 0x4000, 0xc93: 0x4000, 0xc94: 0x4000, 0xc95: 0x4000, 0xc96: 0x4000, 0xc97: 0x4000, + 0xc98: 0x4000, 0xc99: 0x4000, 0xc9a: 0x4000, 0xc9b: 0x4000, 0xc9c: 0x4000, 0xc9d: 0x4000, + 0xc9e: 0x4000, 0xc9f: 0x4000, 0xca0: 0x4000, 0xca1: 0x4000, 0xca2: 0x4000, 0xca3: 0x4000, + 0xca4: 0x4000, 0xca5: 0x4000, 0xca6: 0x4000, 0xca7: 0x4000, 0xca8: 0x4000, 0xca9: 0x4000, + 0xcaa: 0x4000, 0xcab: 0x4000, 0xcac: 0x4000, 0xcad: 0x4000, 0xcae: 0x4000, 0xcaf: 0x4000, + 0xcb0: 0x4000, 0xcb1: 0x4000, 0xcb2: 0x4000, 0xcb3: 0x4000, 0xcb4: 0x4000, 0xcb5: 0x4000, + 0xcb6: 0x4000, 0xcb7: 0x4000, 0xcb8: 0x4000, 0xcb9: 0x4000, 0xcba: 0x4000, 0xcbb: 0x4000, + 0xcbc: 0x4000, 0xcbd: 0x4000, 0xcbe: 0x4000, + // Block 0x33, offset 0xcc0 + 0xcc1: 0x4000, 0xcc2: 0x4000, 0xcc3: 0x4000, 0xcc4: 0x4000, 0xcc5: 0x4000, + 0xcc6: 0x4000, 0xcc7: 0x4000, 0xcc8: 0x4000, 0xcc9: 0x4000, 0xcca: 0x4000, 0xccb: 0x4000, + 0xccc: 0x4000, 0xccd: 0x4000, 0xcce: 0x4000, 0xccf: 0x4000, 0xcd0: 0x4000, 0xcd1: 0x4000, + 0xcd2: 0x4000, 0xcd3: 0x4000, 0xcd4: 0x4000, 0xcd5: 0x4000, 0xcd6: 0x4000, 0xcd7: 0x4000, + 0xcd8: 0x4000, 0xcd9: 0x4000, 0xcda: 0x4000, 0xcdb: 0x4000, 0xcdc: 0x4000, 0xcdd: 0x4000, + 0xcde: 0x4000, 0xcdf: 0x4000, 0xce0: 0x4000, 0xce1: 0x4000, 0xce2: 0x4000, 0xce3: 0x4000, + 0xce4: 0x4000, 0xce5: 0x4000, 0xce6: 0x4000, 0xce7: 0x4000, 0xce8: 0x4000, 0xce9: 0x4000, + 0xcea: 0x4000, 0xceb: 0x4000, 0xcec: 0x4000, 0xced: 0x4000, 0xcee: 0x4000, 0xcef: 0x4000, + 0xcf0: 0x4000, 0xcf1: 0x4000, 0xcf2: 0x4000, 0xcf3: 0x4000, 0xcf4: 0x4000, 0xcf5: 0x4000, + 0xcf6: 0x4000, 0xcf7: 0x4000, 0xcf8: 0x4000, 0xcf9: 0x4000, 0xcfa: 0x4000, 0xcfb: 0x4000, + 0xcfc: 0x4000, 0xcfd: 0x4000, 0xcfe: 0x4000, 0xcff: 0x4000, + // Block 0x34, offset 0xd00 + 0xd00: 0x4000, 0xd01: 0x4000, 0xd02: 0x4000, 0xd03: 0x4000, 0xd04: 0x4000, 0xd05: 0x4000, + 0xd06: 0x4000, 0xd07: 0x4000, 0xd08: 0x4000, 0xd09: 0x4000, 0xd0a: 0x4000, 0xd0b: 0x4000, + 0xd0c: 0x4000, 0xd0d: 0x4000, 0xd0e: 0x4000, 0xd0f: 0x4000, 0xd10: 0x4000, 0xd11: 0x4000, + 0xd12: 0x4000, 0xd13: 0x4000, 0xd14: 0x4000, 0xd15: 0x4000, 0xd16: 0x4000, + 0xd19: 0x4016, 0xd1a: 0x4017, 0xd1b: 0x4000, 0xd1c: 0x4000, 0xd1d: 0x4000, + 0xd1e: 0x4000, 0xd1f: 0x4000, 0xd20: 0x4000, 0xd21: 0x4018, 0xd22: 0x4019, 0xd23: 0x401a, + 0xd24: 0x401b, 0xd25: 0x401c, 0xd26: 0x401d, 0xd27: 0x401e, 0xd28: 0x401f, 0xd29: 0x4020, + 0xd2a: 0x4021, 0xd2b: 0x4022, 0xd2c: 0x4000, 0xd2d: 0x4010, 0xd2e: 0x4000, 0xd2f: 0x4023, + 0xd30: 0x4000, 0xd31: 0x4024, 0xd32: 0x4000, 0xd33: 0x4025, 0xd34: 0x4000, 0xd35: 0x4026, + 0xd36: 0x4000, 0xd37: 0x401a, 0xd38: 0x4000, 0xd39: 0x4027, 0xd3a: 0x4000, 0xd3b: 0x4028, + 0xd3c: 0x4000, 0xd3d: 0x4020, 0xd3e: 0x4000, 0xd3f: 0x4029, + // Block 0x35, offset 0xd40 + 0xd40: 0x4000, 0xd41: 0x402a, 0xd42: 0x4000, 0xd43: 0x402b, 0xd44: 0x402c, 0xd45: 0x4000, + 0xd46: 0x4017, 0xd47: 0x4000, 0xd48: 0x402d, 0xd49: 0x4000, 0xd4a: 0x402e, 0xd4b: 0x402f, + 0xd4c: 0x4030, 0xd4d: 0x4017, 0xd4e: 0x4016, 0xd4f: 0x4017, 0xd50: 0x4000, 0xd51: 0x4000, + 0xd52: 0x4031, 0xd53: 0x4000, 0xd54: 0x4000, 0xd55: 0x4031, 0xd56: 0x4000, 0xd57: 0x4000, + 0xd58: 0x4032, 0xd59: 0x4000, 0xd5a: 0x4000, 0xd5b: 0x4032, 0xd5c: 0x4000, 0xd5d: 0x4000, + 0xd5e: 0x4033, 0xd5f: 0x402e, 0xd60: 0x4034, 0xd61: 0x4035, 0xd62: 0x4034, 0xd63: 0x4036, + 0xd64: 0x4037, 0xd65: 0x4024, 0xd66: 0x4035, 0xd67: 0x4025, 0xd68: 0x4038, 0xd69: 0x4038, + 0xd6a: 0x4039, 0xd6b: 0x4039, 0xd6c: 0x403a, 0xd6d: 0x403a, 0xd6e: 0x4000, 0xd6f: 0x4035, + 0xd70: 0x4000, 0xd71: 0x4000, 0xd72: 0x403b, 0xd73: 0x403c, 0xd74: 0x4000, 0xd75: 0x4000, + 0xd76: 0x4000, 0xd77: 0x4000, 0xd78: 0x4000, 0xd79: 0x4000, 0xd7a: 0x4000, 0xd7b: 0x403d, + 0xd7c: 0x401c, 0xd7d: 0x4000, 0xd7e: 0x4000, 0xd7f: 0x4000, + // Block 0x36, offset 0xd80 + 0xd85: 0x4000, + 0xd86: 0x4000, 0xd87: 0x4000, 0xd88: 0x4000, 0xd89: 0x4000, 0xd8a: 0x4000, 0xd8b: 0x4000, + 0xd8c: 0x4000, 0xd8d: 0x4000, 0xd8e: 0x4000, 0xd8f: 0x4000, 0xd90: 0x4000, 0xd91: 0x4000, + 0xd92: 0x4000, 0xd93: 0x4000, 0xd94: 0x4000, 0xd95: 0x4000, 0xd96: 0x4000, 0xd97: 0x4000, + 0xd98: 0x4000, 0xd99: 0x4000, 0xd9a: 0x4000, 0xd9b: 0x4000, 0xd9c: 0x4000, 0xd9d: 0x4000, + 0xd9e: 0x4000, 0xd9f: 0x4000, 0xda0: 0x4000, 0xda1: 0x4000, 0xda2: 0x4000, 0xda3: 0x4000, + 0xda4: 0x4000, 0xda5: 0x4000, 0xda6: 0x4000, 0xda7: 0x4000, 0xda8: 0x4000, 0xda9: 0x4000, + 0xdaa: 0x4000, 0xdab: 0x4000, 0xdac: 0x4000, 0xdad: 0x4000, 0xdae: 0x4000, 0xdaf: 0x4000, + 0xdb1: 0x403e, 0xdb2: 0x403e, 0xdb3: 0x403e, 0xdb4: 0x403e, 0xdb5: 0x403e, + 0xdb6: 0x403e, 0xdb7: 0x403e, 0xdb8: 0x403e, 0xdb9: 0x403e, 0xdba: 0x403e, 0xdbb: 0x403e, + 0xdbc: 0x403e, 0xdbd: 0x403e, 0xdbe: 0x403e, 0xdbf: 0x403e, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x4037, 0xdc1: 0x4037, 0xdc2: 0x4037, 0xdc3: 0x4037, 0xdc4: 0x4037, 0xdc5: 0x4037, + 0xdc6: 0x4037, 0xdc7: 0x4037, 0xdc8: 0x4037, 0xdc9: 0x4037, 0xdca: 0x4037, 0xdcb: 0x4037, + 0xdcc: 0x4037, 0xdcd: 0x4037, 0xdce: 0x4037, 0xdcf: 0x400e, 0xdd0: 0x403f, 0xdd1: 0x4040, + 0xdd2: 0x4041, 0xdd3: 0x4040, 0xdd4: 0x403f, 0xdd5: 0x4042, 0xdd6: 0x4043, 0xdd7: 0x4044, + 0xdd8: 0x4040, 0xdd9: 0x4041, 0xdda: 0x4040, 0xddb: 0x4045, 0xddc: 0x4009, 0xddd: 0x4045, + 0xdde: 0x4046, 0xddf: 0x4045, 0xde0: 0x4047, 0xde1: 0x400b, 0xde2: 0x400a, 0xde3: 0x400c, + 0xde4: 0x4048, 0xde5: 0x4000, 0xde6: 0x4000, 0xde7: 0x4000, 0xde8: 0x4000, 0xde9: 0x4000, + 0xdea: 0x4000, 0xdeb: 0x4000, 0xdec: 0x4000, 0xded: 0x4000, 0xdee: 0x4000, 0xdef: 0x4000, + 0xdf0: 0x4000, 0xdf1: 0x4000, 0xdf2: 0x4000, 0xdf3: 0x4000, 0xdf4: 0x4000, 0xdf5: 0x4000, + 0xdf6: 0x4000, 0xdf7: 0x4000, 0xdf8: 0x4000, 0xdf9: 0x4000, 0xdfa: 0x4000, 0xdfb: 0x4000, + 0xdfc: 0x4000, 0xdfd: 0x4000, 0xdfe: 0x4000, 0xdff: 0x4000, + // Block 0x38, offset 0xe00 + 0xe00: 0x4000, 0xe01: 0x4000, 0xe02: 0x4000, 0xe03: 0x4000, 0xe04: 0x4000, 0xe05: 0x4000, + 0xe06: 0x4000, 0xe07: 0x4000, 0xe08: 0x4000, 0xe09: 0x4000, 0xe0a: 0x4000, 0xe0b: 0x4000, + 0xe0c: 0x4000, 0xe0d: 0x4000, 0xe0e: 0x4000, 0xe10: 0x4000, 0xe11: 0x4000, + 0xe12: 0x4000, 0xe13: 0x4000, 0xe14: 0x4000, 0xe15: 0x4000, 0xe16: 0x4000, 0xe17: 0x4000, + 0xe18: 0x4000, 0xe19: 0x4000, 0xe1a: 0x4000, 0xe1b: 0x4000, 0xe1c: 0x4000, 0xe1d: 0x4000, + 0xe1e: 0x4000, 0xe1f: 0x4000, 0xe20: 0x4000, 0xe21: 0x4000, 0xe22: 0x4000, 0xe23: 0x4000, + 0xe24: 0x4000, 0xe25: 0x4000, 0xe26: 0x4000, 0xe27: 0x4000, 0xe28: 0x4000, 0xe29: 0x4000, + 0xe2a: 0x4000, 0xe2b: 0x4000, 0xe2c: 0x4000, 0xe2d: 0x4000, 0xe2e: 0x4000, 0xe2f: 0x4000, + 0xe30: 0x4000, 0xe31: 0x4000, 0xe32: 0x4000, 0xe33: 0x4000, 0xe34: 0x4000, 0xe35: 0x4000, + 0xe36: 0x4000, 0xe37: 0x4000, 0xe38: 0x4000, 0xe39: 0x4000, 0xe3a: 0x4000, + // Block 0x39, offset 0xe40 + 0xe40: 0x4000, 0xe41: 0x4000, 0xe42: 0x4000, 0xe43: 0x4000, 0xe44: 0x4000, 0xe45: 0x4000, + 0xe46: 0x4000, 0xe47: 0x4000, 0xe48: 0x4000, 0xe49: 0x4000, 0xe4a: 0x4000, 0xe4b: 0x4000, + 0xe4c: 0x4000, 0xe4d: 0x4000, 0xe4e: 0x4000, 0xe4f: 0x4000, 0xe50: 0x4000, 0xe51: 0x4000, + 0xe52: 0x4000, 0xe53: 0x4000, 0xe54: 0x4000, 0xe55: 0x4000, 0xe56: 0x4000, 0xe57: 0x4000, + 0xe58: 0x4000, 0xe59: 0x4000, 0xe5a: 0x4000, 0xe5b: 0x4000, 0xe5c: 0x4000, 0xe5d: 0x4000, + 0xe5e: 0x4000, 0xe5f: 0x4000, 0xe60: 0x4000, 0xe61: 0x4000, 0xe62: 0x4000, 0xe63: 0x4000, + 0xe70: 0x4000, 0xe71: 0x4000, 0xe72: 0x4000, 0xe73: 0x4000, 0xe74: 0x4000, 0xe75: 0x4000, + 0xe76: 0x4000, 0xe77: 0x4000, 0xe78: 0x4000, 0xe79: 0x4000, 0xe7a: 0x4000, 0xe7b: 0x4000, + 0xe7c: 0x4000, 0xe7d: 0x4000, 0xe7e: 0x4000, 0xe7f: 0x4000, + // Block 0x3a, offset 0xe80 + 0xe80: 0x4000, 0xe81: 0x4000, 0xe82: 0x4000, 0xe83: 0x4000, 0xe84: 0x4000, 0xe85: 0x4000, + 0xe86: 0x4000, 0xe87: 0x4000, 0xe88: 0x4000, 0xe89: 0x4000, 0xe8a: 0x4000, 0xe8b: 0x4000, + 0xe8c: 0x4000, 0xe8d: 0x4000, 0xe8e: 0x4000, 0xe8f: 0x4000, 0xe90: 0x4000, 0xe91: 0x4000, + 0xe92: 0x4000, 0xe93: 0x4000, 0xe94: 0x4000, 0xe95: 0x4000, 0xe96: 0x4000, 0xe97: 0x4000, + 0xe98: 0x4000, 0xe99: 0x4000, 0xe9a: 0x4000, 0xe9b: 0x4000, 0xe9c: 0x4000, 0xe9d: 0x4000, + 0xe9e: 0x4000, 0xea0: 0x4000, 0xea1: 0x4000, 0xea2: 0x4000, 0xea3: 0x4000, + 0xea4: 0x4000, 0xea5: 0x4000, 0xea6: 0x4000, 0xea7: 0x4000, 0xea8: 0x4000, 0xea9: 0x4000, + 0xeaa: 0x4000, 0xeab: 0x4000, 0xeac: 0x4000, 0xead: 0x4000, 0xeae: 0x4000, 0xeaf: 0x4000, + 0xeb0: 0x4000, 0xeb1: 0x4000, 0xeb2: 0x4000, 0xeb3: 0x4000, 0xeb4: 0x4000, 0xeb5: 0x4000, + 0xeb6: 0x4000, 0xeb7: 0x4000, 0xeb8: 0x4000, 0xeb9: 0x4000, 0xeba: 0x4000, 0xebb: 0x4000, + 0xebc: 0x4000, 0xebd: 0x4000, 0xebe: 0x4000, 0xebf: 0x4000, + // Block 0x3b, offset 0xec0 + 0xec0: 0x4000, 0xec1: 0x4000, 0xec2: 0x4000, 0xec3: 0x4000, 0xec4: 0x4000, 0xec5: 0x4000, + 0xec6: 0x4000, 0xec7: 0x4000, 0xec8: 0x2000, 0xec9: 0x2000, 0xeca: 0x2000, 0xecb: 0x2000, + 0xecc: 0x2000, 0xecd: 0x2000, 0xece: 0x2000, 0xecf: 0x2000, 0xed0: 0x4000, 0xed1: 0x4000, + 0xed2: 0x4000, 0xed3: 0x4000, 0xed4: 0x4000, 0xed5: 0x4000, 0xed6: 0x4000, 0xed7: 0x4000, + 0xed8: 0x4000, 0xed9: 0x4000, 0xeda: 0x4000, 0xedb: 0x4000, 0xedc: 0x4000, 0xedd: 0x4000, + 0xede: 0x4000, 0xedf: 0x4000, 0xee0: 0x4000, 0xee1: 0x4000, 0xee2: 0x4000, 0xee3: 0x4000, + 0xee4: 0x4000, 0xee5: 0x4000, 0xee6: 0x4000, 0xee7: 0x4000, 0xee8: 0x4000, 0xee9: 0x4000, + 0xeea: 0x4000, 0xeeb: 0x4000, 0xeec: 0x4000, 0xeed: 0x4000, 0xeee: 0x4000, 0xeef: 0x4000, + 0xef0: 0x4000, 0xef1: 0x4000, 0xef2: 0x4000, 0xef3: 0x4000, 0xef4: 0x4000, 0xef5: 0x4000, + 0xef6: 0x4000, 0xef7: 0x4000, 0xef8: 0x4000, 0xef9: 0x4000, 0xefa: 0x4000, 0xefb: 0x4000, + 0xefc: 0x4000, 0xefd: 0x4000, 0xefe: 0x4000, 0xeff: 0x4000, + // Block 0x3c, offset 0xf00 + 0xf00: 0x4000, 0xf01: 0x4000, 0xf02: 0x4000, 0xf03: 0x4000, 0xf04: 0x4000, 0xf05: 0x4000, + 0xf06: 0x4000, 0xf07: 0x4000, 0xf08: 0x4000, 0xf09: 0x4000, 0xf0a: 0x4000, 0xf0b: 0x4000, + 0xf0c: 0x4000, 0xf0d: 0x4000, 0xf0e: 0x4000, 0xf0f: 0x4000, 0xf10: 0x4000, 0xf11: 0x4000, + 0xf12: 0x4000, 0xf13: 0x4000, 0xf14: 0x4000, 0xf15: 0x4000, 0xf16: 0x4000, 0xf17: 0x4000, + 0xf18: 0x4000, 0xf19: 0x4000, 0xf1a: 0x4000, 0xf1b: 0x4000, 0xf1c: 0x4000, 0xf1d: 0x4000, + 0xf1e: 0x4000, 0xf1f: 0x4000, 0xf20: 0x4000, 0xf21: 0x4000, 0xf22: 0x4000, 0xf23: 0x4000, + 0xf24: 0x4000, 0xf25: 0x4000, 0xf26: 0x4000, 0xf27: 0x4000, 0xf28: 0x4000, 0xf29: 0x4000, + 0xf2a: 0x4000, 0xf2b: 0x4000, 0xf2c: 0x4000, 0xf2d: 0x4000, 0xf2e: 0x4000, 0xf2f: 0x4000, + 0xf30: 0x4000, 0xf31: 0x4000, 0xf32: 0x4000, 0xf33: 0x4000, 0xf34: 0x4000, 0xf35: 0x4000, + 0xf36: 0x4000, 0xf37: 0x4000, 0xf38: 0x4000, 0xf39: 0x4000, 0xf3a: 0x4000, 0xf3b: 0x4000, + 0xf3c: 0x4000, 0xf3d: 0x4000, 0xf3e: 0x4000, + // Block 0x3d, offset 0xf40 + 0xf40: 0x4000, 0xf41: 0x4000, 0xf42: 0x4000, 0xf43: 0x4000, 0xf44: 0x4000, 0xf45: 0x4000, + 0xf46: 0x4000, 0xf47: 0x4000, 0xf48: 0x4000, 0xf49: 0x4000, 0xf4a: 0x4000, 0xf4b: 0x4000, + 0xf4c: 0x4000, 0xf50: 0x4000, 0xf51: 0x4000, + 0xf52: 0x4000, 0xf53: 0x4000, 0xf54: 0x4000, 0xf55: 0x4000, 0xf56: 0x4000, 0xf57: 0x4000, + 0xf58: 0x4000, 0xf59: 0x4000, 0xf5a: 0x4000, 0xf5b: 0x4000, 0xf5c: 0x4000, 0xf5d: 0x4000, + 0xf5e: 0x4000, 0xf5f: 0x4000, 0xf60: 0x4000, 0xf61: 0x4000, 0xf62: 0x4000, 0xf63: 0x4000, + 0xf64: 0x4000, 0xf65: 0x4000, 0xf66: 0x4000, 0xf67: 0x4000, 0xf68: 0x4000, 0xf69: 0x4000, + 0xf6a: 0x4000, 0xf6b: 0x4000, 0xf6c: 0x4000, 0xf6d: 0x4000, 0xf6e: 0x4000, 0xf6f: 0x4000, + 0xf70: 0x4000, 0xf71: 0x4000, 0xf72: 0x4000, 0xf73: 0x4000, 0xf74: 0x4000, 0xf75: 0x4000, + 0xf76: 0x4000, 0xf77: 0x4000, 0xf78: 0x4000, 0xf79: 0x4000, 0xf7a: 0x4000, 0xf7b: 0x4000, + 0xf7c: 0x4000, 0xf7d: 0x4000, 0xf7e: 0x4000, 0xf7f: 0x4000, + // Block 0x3e, offset 0xf80 + 0xf80: 0x4000, 0xf81: 0x4000, 0xf82: 0x4000, 0xf83: 0x4000, 0xf84: 0x4000, 0xf85: 0x4000, + 0xf86: 0x4000, + // Block 0x3f, offset 0xfc0 + 0xfe0: 0x4000, 0xfe1: 0x4000, 0xfe2: 0x4000, 0xfe3: 0x4000, + 0xfe4: 0x4000, 0xfe5: 0x4000, 0xfe6: 0x4000, 0xfe7: 0x4000, 0xfe8: 0x4000, 0xfe9: 0x4000, + 0xfea: 0x4000, 0xfeb: 0x4000, 0xfec: 0x4000, 0xfed: 0x4000, 0xfee: 0x4000, 0xfef: 0x4000, + 0xff0: 0x4000, 0xff1: 0x4000, 0xff2: 0x4000, 0xff3: 0x4000, 0xff4: 0x4000, 0xff5: 0x4000, + 0xff6: 0x4000, 0xff7: 0x4000, 0xff8: 0x4000, 0xff9: 0x4000, 0xffa: 0x4000, 0xffb: 0x4000, + 0xffc: 0x4000, + // Block 0x40, offset 0x1000 + 0x1000: 0x4000, 0x1001: 0x4000, 0x1002: 0x4000, 0x1003: 0x4000, 0x1004: 0x4000, 0x1005: 0x4000, + 0x1006: 0x4000, 0x1007: 0x4000, 0x1008: 0x4000, 0x1009: 0x4000, 0x100a: 0x4000, 0x100b: 0x4000, + 0x100c: 0x4000, 0x100d: 0x4000, 0x100e: 0x4000, 0x100f: 0x4000, 0x1010: 0x4000, 0x1011: 0x4000, + 0x1012: 0x4000, 0x1013: 0x4000, 0x1014: 0x4000, 0x1015: 0x4000, 0x1016: 0x4000, 0x1017: 0x4000, + 0x1018: 0x4000, 0x1019: 0x4000, 0x101a: 0x4000, 0x101b: 0x4000, 0x101c: 0x4000, 0x101d: 0x4000, + 0x101e: 0x4000, 0x101f: 0x4000, 0x1020: 0x4000, 0x1021: 0x4000, 0x1022: 0x4000, 0x1023: 0x4000, + // Block 0x41, offset 0x1040 + 0x1040: 0x2000, 0x1041: 0x2000, 0x1042: 0x2000, 0x1043: 0x2000, 0x1044: 0x2000, 0x1045: 0x2000, + 0x1046: 0x2000, 0x1047: 0x2000, 0x1048: 0x2000, 0x1049: 0x2000, 0x104a: 0x2000, 0x104b: 0x2000, + 0x104c: 0x2000, 0x104d: 0x2000, 0x104e: 0x2000, 0x104f: 0x2000, 0x1050: 0x4000, 0x1051: 0x4000, + 0x1052: 0x4000, 0x1053: 0x4000, 0x1054: 0x4000, 0x1055: 0x4000, 0x1056: 0x4000, 0x1057: 0x4000, + 0x1058: 0x4000, 0x1059: 0x4000, + 0x1070: 0x4000, 0x1071: 0x4000, 0x1072: 0x4000, 0x1073: 0x4000, 0x1074: 0x4000, 0x1075: 0x4000, + 0x1076: 0x4000, 0x1077: 0x4000, 0x1078: 0x4000, 0x1079: 0x4000, 0x107a: 0x4000, 0x107b: 0x4000, + 0x107c: 0x4000, 0x107d: 0x4000, 0x107e: 0x4000, 0x107f: 0x4000, + // Block 0x42, offset 0x1080 + 0x1080: 0x4000, 0x1081: 0x4000, 0x1082: 0x4000, 0x1083: 0x4000, 0x1084: 0x4000, 0x1085: 0x4000, + 0x1086: 0x4000, 0x1087: 0x4000, 0x1088: 0x4000, 0x1089: 0x4000, 0x108a: 0x4000, 0x108b: 0x4000, + 0x108c: 0x4000, 0x108d: 0x4000, 0x108e: 0x4000, 0x108f: 0x4000, 0x1090: 0x4000, 0x1091: 0x4000, + 0x1092: 0x4000, 0x1094: 0x4000, 0x1095: 0x4000, 0x1096: 0x4000, 0x1097: 0x4000, + 0x1098: 0x4000, 0x1099: 0x4000, 0x109a: 0x4000, 0x109b: 0x4000, 0x109c: 0x4000, 0x109d: 0x4000, + 0x109e: 0x4000, 0x109f: 0x4000, 0x10a0: 0x4000, 0x10a1: 0x4000, 0x10a2: 0x4000, 0x10a3: 0x4000, + 0x10a4: 0x4000, 0x10a5: 0x4000, 0x10a6: 0x4000, 0x10a8: 0x4000, 0x10a9: 0x4000, + 0x10aa: 0x4000, 0x10ab: 0x4000, + // Block 0x43, offset 0x10c0 + 0x10c1: 0x9012, 0x10c2: 0x9012, 0x10c3: 0x9012, 0x10c4: 0x9012, 0x10c5: 0x9012, + 0x10c6: 0x9012, 0x10c7: 0x9012, 0x10c8: 0x9012, 0x10c9: 0x9012, 0x10ca: 0x9012, 0x10cb: 0x9012, + 0x10cc: 0x9012, 0x10cd: 0x9012, 0x10ce: 0x9012, 0x10cf: 0x9012, 0x10d0: 0x9012, 0x10d1: 0x9012, + 0x10d2: 0x9012, 0x10d3: 0x9012, 0x10d4: 0x9012, 0x10d5: 0x9012, 0x10d6: 0x9012, 0x10d7: 0x9012, + 0x10d8: 0x9012, 0x10d9: 0x9012, 0x10da: 0x9012, 0x10db: 0x9012, 0x10dc: 0x9012, 0x10dd: 0x9012, + 0x10de: 0x9012, 0x10df: 0x9012, 0x10e0: 0x9049, 0x10e1: 0x9049, 0x10e2: 0x9049, 0x10e3: 0x9049, + 0x10e4: 0x9049, 0x10e5: 0x9049, 0x10e6: 0x9049, 0x10e7: 0x9049, 0x10e8: 0x9049, 0x10e9: 0x9049, + 0x10ea: 0x9049, 0x10eb: 0x9049, 0x10ec: 0x9049, 0x10ed: 0x9049, 0x10ee: 0x9049, 0x10ef: 0x9049, + 0x10f0: 0x9049, 0x10f1: 0x9049, 0x10f2: 0x9049, 0x10f3: 0x9049, 0x10f4: 0x9049, 0x10f5: 0x9049, + 0x10f6: 0x9049, 0x10f7: 0x9049, 0x10f8: 0x9049, 0x10f9: 0x9049, 0x10fa: 0x9049, 0x10fb: 0x9049, + 0x10fc: 0x9049, 0x10fd: 0x9049, 0x10fe: 0x9049, 0x10ff: 0x9049, + // Block 0x44, offset 0x1100 + 0x1100: 0x9049, 0x1101: 0x9049, 0x1102: 0x9049, 0x1103: 0x9049, 0x1104: 0x9049, 0x1105: 0x9049, + 0x1106: 0x9049, 0x1107: 0x9049, 0x1108: 0x9049, 0x1109: 0x9049, 0x110a: 0x9049, 0x110b: 0x9049, + 0x110c: 0x9049, 0x110d: 0x9049, 0x110e: 0x9049, 0x110f: 0x9049, 0x1110: 0x9049, 0x1111: 0x9049, + 0x1112: 0x9049, 0x1113: 0x9049, 0x1114: 0x9049, 0x1115: 0x9049, 0x1116: 0x9049, 0x1117: 0x9049, + 0x1118: 0x9049, 0x1119: 0x9049, 0x111a: 0x9049, 0x111b: 0x9049, 0x111c: 0x9049, 0x111d: 0x9049, + 0x111e: 0x9049, 0x111f: 0x904a, 0x1120: 0x904b, 0x1121: 0xb04c, 0x1122: 0xb04d, 0x1123: 0xb04d, + 0x1124: 0xb04e, 0x1125: 0xb04f, 0x1126: 0xb050, 0x1127: 0xb051, 0x1128: 0xb052, 0x1129: 0xb053, + 0x112a: 0xb054, 0x112b: 0xb055, 0x112c: 0xb056, 0x112d: 0xb057, 0x112e: 0xb058, 0x112f: 0xb059, + 0x1130: 0xb05a, 0x1131: 0xb05b, 0x1132: 0xb05c, 0x1133: 0xb05d, 0x1134: 0xb05e, 0x1135: 0xb05f, + 0x1136: 0xb060, 0x1137: 0xb061, 0x1138: 0xb062, 0x1139: 0xb063, 0x113a: 0xb064, 0x113b: 0xb065, + 0x113c: 0xb052, 0x113d: 0xb066, 0x113e: 0xb067, 0x113f: 0xb055, + // Block 0x45, offset 0x1140 + 0x1140: 0xb068, 0x1141: 0xb069, 0x1142: 0xb06a, 0x1143: 0xb06b, 0x1144: 0xb05a, 0x1145: 0xb056, + 0x1146: 0xb06c, 0x1147: 0xb06d, 0x1148: 0xb06b, 0x1149: 0xb06e, 0x114a: 0xb06b, 0x114b: 0xb06f, + 0x114c: 0xb06f, 0x114d: 0xb070, 0x114e: 0xb070, 0x114f: 0xb071, 0x1150: 0xb056, 0x1151: 0xb072, + 0x1152: 0xb073, 0x1153: 0xb072, 0x1154: 0xb074, 0x1155: 0xb073, 0x1156: 0xb075, 0x1157: 0xb075, + 0x1158: 0xb076, 0x1159: 0xb076, 0x115a: 0xb077, 0x115b: 0xb077, 0x115c: 0xb073, 0x115d: 0xb078, + 0x115e: 0xb079, 0x115f: 0xb067, 0x1160: 0xb07a, 0x1161: 0xb07b, 0x1162: 0xb07b, 0x1163: 0xb07b, + 0x1164: 0xb07b, 0x1165: 0xb07b, 0x1166: 0xb07b, 0x1167: 0xb07b, 0x1168: 0xb07b, 0x1169: 0xb07b, + 0x116a: 0xb07b, 0x116b: 0xb07b, 0x116c: 0xb07b, 0x116d: 0xb07b, 0x116e: 0xb07b, 0x116f: 0xb07b, + 0x1170: 0xb07c, 0x1171: 0xb07c, 0x1172: 0xb07c, 0x1173: 0xb07c, 0x1174: 0xb07c, 0x1175: 0xb07c, + 0x1176: 0xb07c, 0x1177: 0xb07c, 0x1178: 0xb07c, 0x1179: 0xb07c, 0x117a: 0xb07c, 0x117b: 0xb07c, + 0x117c: 0xb07c, 0x117d: 0xb07c, 0x117e: 0xb07c, + // Block 0x46, offset 0x1180 + 0x1182: 0xb07d, 0x1183: 0xb07e, 0x1184: 0xb07f, 0x1185: 0xb080, + 0x1186: 0xb07f, 0x1187: 0xb07e, 0x118a: 0xb081, 0x118b: 0xb082, + 0x118c: 0xb083, 0x118d: 0xb07f, 0x118e: 0xb080, 0x118f: 0xb07f, + 0x1192: 0xb084, 0x1193: 0xb085, 0x1194: 0xb084, 0x1195: 0xb086, 0x1196: 0xb084, 0x1197: 0xb087, + 0x119a: 0xb088, 0x119b: 0xb089, 0x119c: 0xb08a, + 0x11a0: 0x908b, 0x11a1: 0x908b, 0x11a2: 0x908c, 0x11a3: 0x908d, + 0x11a4: 0x908b, 0x11a5: 0x908e, 0x11a6: 0x908f, 0x11a8: 0xb090, 0x11a9: 0xb091, + 0x11aa: 0xb092, 0x11ab: 0xb091, 0x11ac: 0xb093, 0x11ad: 0xb094, 0x11ae: 0xb095, + 0x11bd: 0x2000, + // Block 0x47, offset 0x11c0 + 0x11e0: 0x4000, 0x11e1: 0x4000, 0x11e2: 0x4000, 0x11e3: 0x4000, + // Block 0x48, offset 0x1200 + 0x1200: 0x4000, 0x1201: 0x4000, 0x1202: 0x4000, 0x1203: 0x4000, 0x1204: 0x4000, 0x1205: 0x4000, + 0x1206: 0x4000, 0x1207: 0x4000, 0x1208: 0x4000, 0x1209: 0x4000, 0x120a: 0x4000, 0x120b: 0x4000, + 0x120c: 0x4000, 0x120d: 0x4000, 0x120e: 0x4000, 0x120f: 0x4000, 0x1210: 0x4000, 0x1211: 0x4000, + 0x1212: 0x4000, 0x1213: 0x4000, 0x1214: 0x4000, 0x1215: 0x4000, 0x1216: 0x4000, 0x1217: 0x4000, + 0x1218: 0x4000, 0x1219: 0x4000, 0x121a: 0x4000, 0x121b: 0x4000, 0x121c: 0x4000, 0x121d: 0x4000, + 0x121e: 0x4000, 0x121f: 0x4000, 0x1220: 0x4000, 0x1221: 0x4000, 0x1222: 0x4000, 0x1223: 0x4000, + 0x1224: 0x4000, 0x1225: 0x4000, 0x1226: 0x4000, 0x1227: 0x4000, 0x1228: 0x4000, 0x1229: 0x4000, + 0x122a: 0x4000, 0x122b: 0x4000, 0x122c: 0x4000, 0x122d: 0x4000, 0x122e: 0x4000, 0x122f: 0x4000, + 0x1230: 0x4000, 0x1231: 0x4000, 0x1232: 0x4000, 0x1233: 0x4000, 0x1234: 0x4000, 0x1235: 0x4000, + 0x1236: 0x4000, 0x1237: 0x4000, + // Block 0x49, offset 0x1240 + 0x1240: 0x4000, 0x1241: 0x4000, 0x1242: 0x4000, 0x1243: 0x4000, 0x1244: 0x4000, 0x1245: 0x4000, + 0x1246: 0x4000, 0x1247: 0x4000, 0x1248: 0x4000, 0x1249: 0x4000, 0x124a: 0x4000, 0x124b: 0x4000, + 0x124c: 0x4000, 0x124d: 0x4000, 0x124e: 0x4000, 0x124f: 0x4000, 0x1250: 0x4000, 0x1251: 0x4000, + 0x1252: 0x4000, 0x1253: 0x4000, 0x1254: 0x4000, 0x1255: 0x4000, 0x1256: 0x4000, 0x1257: 0x4000, + 0x1258: 0x4000, 0x1259: 0x4000, 0x125a: 0x4000, 0x125b: 0x4000, 0x125c: 0x4000, 0x125d: 0x4000, + 0x125e: 0x4000, 0x125f: 0x4000, 0x1260: 0x4000, 0x1261: 0x4000, 0x1262: 0x4000, 0x1263: 0x4000, + 0x1264: 0x4000, 0x1265: 0x4000, 0x1266: 0x4000, 0x1267: 0x4000, 0x1268: 0x4000, 0x1269: 0x4000, + 0x126a: 0x4000, 0x126b: 0x4000, 0x126c: 0x4000, 0x126d: 0x4000, 0x126e: 0x4000, 0x126f: 0x4000, + 0x1270: 0x4000, 0x1271: 0x4000, 0x1272: 0x4000, + // Block 0x4a, offset 0x1280 + 0x1280: 0x4000, 0x1281: 0x4000, 0x1282: 0x4000, 0x1283: 0x4000, 0x1284: 0x4000, 0x1285: 0x4000, + 0x1286: 0x4000, 0x1287: 0x4000, 0x1288: 0x4000, 0x1289: 0x4000, 0x128a: 0x4000, 0x128b: 0x4000, + 0x128c: 0x4000, 0x128d: 0x4000, 0x128e: 0x4000, 0x128f: 0x4000, 0x1290: 0x4000, 0x1291: 0x4000, + 0x1292: 0x4000, 0x1293: 0x4000, 0x1294: 0x4000, 0x1295: 0x4000, 0x1296: 0x4000, 0x1297: 0x4000, + 0x1298: 0x4000, 0x1299: 0x4000, 0x129a: 0x4000, 0x129b: 0x4000, 0x129c: 0x4000, 0x129d: 0x4000, + 0x129e: 0x4000, + // Block 0x4b, offset 0x12c0 + 0x12d0: 0x4000, 0x12d1: 0x4000, + 0x12d2: 0x4000, + 0x12e4: 0x4000, 0x12e5: 0x4000, 0x12e6: 0x4000, 0x12e7: 0x4000, + 0x12f0: 0x4000, 0x12f1: 0x4000, 0x12f2: 0x4000, 0x12f3: 0x4000, 0x12f4: 0x4000, 0x12f5: 0x4000, + 0x12f6: 0x4000, 0x12f7: 0x4000, 0x12f8: 0x4000, 0x12f9: 0x4000, 0x12fa: 0x4000, 0x12fb: 0x4000, + 0x12fc: 0x4000, 0x12fd: 0x4000, 0x12fe: 0x4000, 0x12ff: 0x4000, + // Block 0x4c, offset 0x1300 + 0x1300: 0x4000, 0x1301: 0x4000, 0x1302: 0x4000, 0x1303: 0x4000, 0x1304: 0x4000, 0x1305: 0x4000, + 0x1306: 0x4000, 0x1307: 0x4000, 0x1308: 0x4000, 0x1309: 0x4000, 0x130a: 0x4000, 0x130b: 0x4000, + 0x130c: 0x4000, 0x130d: 0x4000, 0x130e: 0x4000, 0x130f: 0x4000, 0x1310: 0x4000, 0x1311: 0x4000, + 0x1312: 0x4000, 0x1313: 0x4000, 0x1314: 0x4000, 0x1315: 0x4000, 0x1316: 0x4000, 0x1317: 0x4000, + 0x1318: 0x4000, 0x1319: 0x4000, 0x131a: 0x4000, 0x131b: 0x4000, 0x131c: 0x4000, 0x131d: 0x4000, + 0x131e: 0x4000, 0x131f: 0x4000, 0x1320: 0x4000, 0x1321: 0x4000, 0x1322: 0x4000, 0x1323: 0x4000, + 0x1324: 0x4000, 0x1325: 0x4000, 0x1326: 0x4000, 0x1327: 0x4000, 0x1328: 0x4000, 0x1329: 0x4000, + 0x132a: 0x4000, 0x132b: 0x4000, 0x132c: 0x4000, 0x132d: 0x4000, 0x132e: 0x4000, 0x132f: 0x4000, + 0x1330: 0x4000, 0x1331: 0x4000, 0x1332: 0x4000, 0x1333: 0x4000, 0x1334: 0x4000, 0x1335: 0x4000, + 0x1336: 0x4000, 0x1337: 0x4000, 0x1338: 0x4000, 0x1339: 0x4000, 0x133a: 0x4000, 0x133b: 0x4000, + // Block 0x4d, offset 0x1340 + 0x1344: 0x4000, + // Block 0x4e, offset 0x1380 + 0x138f: 0x4000, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x2000, 0x13c1: 0x2000, 0x13c2: 0x2000, 0x13c3: 0x2000, 0x13c4: 0x2000, 0x13c5: 0x2000, + 0x13c6: 0x2000, 0x13c7: 0x2000, 0x13c8: 0x2000, 0x13c9: 0x2000, 0x13ca: 0x2000, + 0x13d0: 0x2000, 0x13d1: 0x2000, + 0x13d2: 0x2000, 0x13d3: 0x2000, 0x13d4: 0x2000, 0x13d5: 0x2000, 0x13d6: 0x2000, 0x13d7: 0x2000, + 0x13d8: 0x2000, 0x13d9: 0x2000, 0x13da: 0x2000, 0x13db: 0x2000, 0x13dc: 0x2000, 0x13dd: 0x2000, + 0x13de: 0x2000, 0x13df: 0x2000, 0x13e0: 0x2000, 0x13e1: 0x2000, 0x13e2: 0x2000, 0x13e3: 0x2000, + 0x13e4: 0x2000, 0x13e5: 0x2000, 0x13e6: 0x2000, 0x13e7: 0x2000, 0x13e8: 0x2000, 0x13e9: 0x2000, + 0x13ea: 0x2000, 0x13eb: 0x2000, 0x13ec: 0x2000, 0x13ed: 0x2000, + 0x13f0: 0x2000, 0x13f1: 0x2000, 0x13f2: 0x2000, 0x13f3: 0x2000, 0x13f4: 0x2000, 0x13f5: 0x2000, + 0x13f6: 0x2000, 0x13f7: 0x2000, 0x13f8: 0x2000, 0x13f9: 0x2000, 0x13fa: 0x2000, 0x13fb: 0x2000, + 0x13fc: 0x2000, 0x13fd: 0x2000, 0x13fe: 0x2000, 0x13ff: 0x2000, + // Block 0x50, offset 0x1400 + 0x1400: 0x2000, 0x1401: 0x2000, 0x1402: 0x2000, 0x1403: 0x2000, 0x1404: 0x2000, 0x1405: 0x2000, + 0x1406: 0x2000, 0x1407: 0x2000, 0x1408: 0x2000, 0x1409: 0x2000, 0x140a: 0x2000, 0x140b: 0x2000, + 0x140c: 0x2000, 0x140d: 0x2000, 0x140e: 0x2000, 0x140f: 0x2000, 0x1410: 0x2000, 0x1411: 0x2000, + 0x1412: 0x2000, 0x1413: 0x2000, 0x1414: 0x2000, 0x1415: 0x2000, 0x1416: 0x2000, 0x1417: 0x2000, + 0x1418: 0x2000, 0x1419: 0x2000, 0x141a: 0x2000, 0x141b: 0x2000, 0x141c: 0x2000, 0x141d: 0x2000, + 0x141e: 0x2000, 0x141f: 0x2000, 0x1420: 0x2000, 0x1421: 0x2000, 0x1422: 0x2000, 0x1423: 0x2000, + 0x1424: 0x2000, 0x1425: 0x2000, 0x1426: 0x2000, 0x1427: 0x2000, 0x1428: 0x2000, 0x1429: 0x2000, + 0x1430: 0x2000, 0x1431: 0x2000, 0x1432: 0x2000, 0x1433: 0x2000, 0x1434: 0x2000, 0x1435: 0x2000, + 0x1436: 0x2000, 0x1437: 0x2000, 0x1438: 0x2000, 0x1439: 0x2000, 0x143a: 0x2000, 0x143b: 0x2000, + 0x143c: 0x2000, 0x143d: 0x2000, 0x143e: 0x2000, 0x143f: 0x2000, + // Block 0x51, offset 0x1440 + 0x1440: 0x2000, 0x1441: 0x2000, 0x1442: 0x2000, 0x1443: 0x2000, 0x1444: 0x2000, 0x1445: 0x2000, + 0x1446: 0x2000, 0x1447: 0x2000, 0x1448: 0x2000, 0x1449: 0x2000, 0x144a: 0x2000, 0x144b: 0x2000, + 0x144c: 0x2000, 0x144d: 0x2000, 0x144e: 0x4000, 0x144f: 0x2000, 0x1450: 0x2000, 0x1451: 0x4000, + 0x1452: 0x4000, 0x1453: 0x4000, 0x1454: 0x4000, 0x1455: 0x4000, 0x1456: 0x4000, 0x1457: 0x4000, + 0x1458: 0x4000, 0x1459: 0x4000, 0x145a: 0x4000, 0x145b: 0x2000, 0x145c: 0x2000, 0x145d: 0x2000, + 0x145e: 0x2000, 0x145f: 0x2000, 0x1460: 0x2000, 0x1461: 0x2000, 0x1462: 0x2000, 0x1463: 0x2000, + 0x1464: 0x2000, 0x1465: 0x2000, 0x1466: 0x2000, 0x1467: 0x2000, 0x1468: 0x2000, 0x1469: 0x2000, + 0x146a: 0x2000, 0x146b: 0x2000, 0x146c: 0x2000, + // Block 0x52, offset 0x1480 + 0x1480: 0x4000, 0x1481: 0x4000, 0x1482: 0x4000, + 0x1490: 0x4000, 0x1491: 0x4000, + 0x1492: 0x4000, 0x1493: 0x4000, 0x1494: 0x4000, 0x1495: 0x4000, 0x1496: 0x4000, 0x1497: 0x4000, + 0x1498: 0x4000, 0x1499: 0x4000, 0x149a: 0x4000, 0x149b: 0x4000, 0x149c: 0x4000, 0x149d: 0x4000, + 0x149e: 0x4000, 0x149f: 0x4000, 0x14a0: 0x4000, 0x14a1: 0x4000, 0x14a2: 0x4000, 0x14a3: 0x4000, + 0x14a4: 0x4000, 0x14a5: 0x4000, 0x14a6: 0x4000, 0x14a7: 0x4000, 0x14a8: 0x4000, 0x14a9: 0x4000, + 0x14aa: 0x4000, 0x14ab: 0x4000, 0x14ac: 0x4000, 0x14ad: 0x4000, 0x14ae: 0x4000, 0x14af: 0x4000, + 0x14b0: 0x4000, 0x14b1: 0x4000, 0x14b2: 0x4000, 0x14b3: 0x4000, 0x14b4: 0x4000, 0x14b5: 0x4000, + 0x14b6: 0x4000, 0x14b7: 0x4000, 0x14b8: 0x4000, 0x14b9: 0x4000, 0x14ba: 0x4000, 0x14bb: 0x4000, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x4000, 0x14c1: 0x4000, 0x14c2: 0x4000, 0x14c3: 0x4000, 0x14c4: 0x4000, 0x14c5: 0x4000, + 0x14c6: 0x4000, 0x14c7: 0x4000, 0x14c8: 0x4000, + 0x14d0: 0x4000, 0x14d1: 0x4000, + 0x14e0: 0x4000, 0x14e1: 0x4000, 0x14e2: 0x4000, 0x14e3: 0x4000, + 0x14e4: 0x4000, 0x14e5: 0x4000, + // Block 0x54, offset 0x1500 + 0x1500: 0x4000, 0x1501: 0x4000, 0x1502: 0x4000, 0x1503: 0x4000, 0x1504: 0x4000, 0x1505: 0x4000, + 0x1506: 0x4000, 0x1507: 0x4000, 0x1508: 0x4000, 0x1509: 0x4000, 0x150a: 0x4000, 0x150b: 0x4000, + 0x150c: 0x4000, 0x150d: 0x4000, 0x150e: 0x4000, 0x150f: 0x4000, 0x1510: 0x4000, 0x1511: 0x4000, + 0x1512: 0x4000, 0x1513: 0x4000, 0x1514: 0x4000, 0x1515: 0x4000, 0x1516: 0x4000, 0x1517: 0x4000, + 0x1518: 0x4000, 0x1519: 0x4000, 0x151a: 0x4000, 0x151b: 0x4000, 0x151c: 0x4000, 0x151d: 0x4000, + 0x151e: 0x4000, 0x151f: 0x4000, 0x1520: 0x4000, + 0x152d: 0x4000, 0x152e: 0x4000, 0x152f: 0x4000, + 0x1530: 0x4000, 0x1531: 0x4000, 0x1532: 0x4000, 0x1533: 0x4000, 0x1534: 0x4000, 0x1535: 0x4000, + 0x1537: 0x4000, 0x1538: 0x4000, 0x1539: 0x4000, 0x153a: 0x4000, 0x153b: 0x4000, + 0x153c: 0x4000, 0x153d: 0x4000, 0x153e: 0x4000, 0x153f: 0x4000, + // Block 0x55, offset 0x1540 + 0x1540: 0x4000, 0x1541: 0x4000, 0x1542: 0x4000, 0x1543: 0x4000, 0x1544: 0x4000, 0x1545: 0x4000, + 0x1546: 0x4000, 0x1547: 0x4000, 0x1548: 0x4000, 0x1549: 0x4000, 0x154a: 0x4000, 0x154b: 0x4000, + 0x154c: 0x4000, 0x154d: 0x4000, 0x154e: 0x4000, 0x154f: 0x4000, 0x1550: 0x4000, 0x1551: 0x4000, + 0x1552: 0x4000, 0x1553: 0x4000, 0x1554: 0x4000, 0x1555: 0x4000, 0x1556: 0x4000, 0x1557: 0x4000, + 0x1558: 0x4000, 0x1559: 0x4000, 0x155a: 0x4000, 0x155b: 0x4000, 0x155c: 0x4000, 0x155d: 0x4000, + 0x155e: 0x4000, 0x155f: 0x4000, 0x1560: 0x4000, 0x1561: 0x4000, 0x1562: 0x4000, 0x1563: 0x4000, + 0x1564: 0x4000, 0x1565: 0x4000, 0x1566: 0x4000, 0x1567: 0x4000, 0x1568: 0x4000, 0x1569: 0x4000, + 0x156a: 0x4000, 0x156b: 0x4000, 0x156c: 0x4000, 0x156d: 0x4000, 0x156e: 0x4000, 0x156f: 0x4000, + 0x1570: 0x4000, 0x1571: 0x4000, 0x1572: 0x4000, 0x1573: 0x4000, 0x1574: 0x4000, 0x1575: 0x4000, + 0x1576: 0x4000, 0x1577: 0x4000, 0x1578: 0x4000, 0x1579: 0x4000, 0x157a: 0x4000, 0x157b: 0x4000, + 0x157c: 0x4000, 0x157e: 0x4000, 0x157f: 0x4000, + // Block 0x56, offset 0x1580 + 0x1580: 0x4000, 0x1581: 0x4000, 0x1582: 0x4000, 0x1583: 0x4000, 0x1584: 0x4000, 0x1585: 0x4000, + 0x1586: 0x4000, 0x1587: 0x4000, 0x1588: 0x4000, 0x1589: 0x4000, 0x158a: 0x4000, 0x158b: 0x4000, + 0x158c: 0x4000, 0x158d: 0x4000, 0x158e: 0x4000, 0x158f: 0x4000, 0x1590: 0x4000, 0x1591: 0x4000, + 0x1592: 0x4000, 0x1593: 0x4000, + 0x15a0: 0x4000, 0x15a1: 0x4000, 0x15a2: 0x4000, 0x15a3: 0x4000, + 0x15a4: 0x4000, 0x15a5: 0x4000, 0x15a6: 0x4000, 0x15a7: 0x4000, 0x15a8: 0x4000, 0x15a9: 0x4000, + 0x15aa: 0x4000, 0x15ab: 0x4000, 0x15ac: 0x4000, 0x15ad: 0x4000, 0x15ae: 0x4000, 0x15af: 0x4000, + 0x15b0: 0x4000, 0x15b1: 0x4000, 0x15b2: 0x4000, 0x15b3: 0x4000, 0x15b4: 0x4000, 0x15b5: 0x4000, + 0x15b6: 0x4000, 0x15b7: 0x4000, 0x15b8: 0x4000, 0x15b9: 0x4000, 0x15ba: 0x4000, 0x15bb: 0x4000, + 0x15bc: 0x4000, 0x15bd: 0x4000, 0x15be: 0x4000, 0x15bf: 0x4000, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x4000, 0x15c1: 0x4000, 0x15c2: 0x4000, 0x15c3: 0x4000, 0x15c4: 0x4000, 0x15c5: 0x4000, + 0x15c6: 0x4000, 0x15c7: 0x4000, 0x15c8: 0x4000, 0x15c9: 0x4000, 0x15ca: 0x4000, + 0x15cf: 0x4000, 0x15d0: 0x4000, 0x15d1: 0x4000, + 0x15d2: 0x4000, 0x15d3: 0x4000, + 0x15e0: 0x4000, 0x15e1: 0x4000, 0x15e2: 0x4000, 0x15e3: 0x4000, + 0x15e4: 0x4000, 0x15e5: 0x4000, 0x15e6: 0x4000, 0x15e7: 0x4000, 0x15e8: 0x4000, 0x15e9: 0x4000, + 0x15ea: 0x4000, 0x15eb: 0x4000, 0x15ec: 0x4000, 0x15ed: 0x4000, 0x15ee: 0x4000, 0x15ef: 0x4000, + 0x15f0: 0x4000, 0x15f4: 0x4000, + 0x15f8: 0x4000, 0x15f9: 0x4000, 0x15fa: 0x4000, 0x15fb: 0x4000, + 0x15fc: 0x4000, 0x15fd: 0x4000, 0x15fe: 0x4000, 0x15ff: 0x4000, + // Block 0x58, offset 0x1600 + 0x1600: 0x4000, 0x1602: 0x4000, 0x1603: 0x4000, 0x1604: 0x4000, 0x1605: 0x4000, + 0x1606: 0x4000, 0x1607: 0x4000, 0x1608: 0x4000, 0x1609: 0x4000, 0x160a: 0x4000, 0x160b: 0x4000, + 0x160c: 0x4000, 0x160d: 0x4000, 0x160e: 0x4000, 0x160f: 0x4000, 0x1610: 0x4000, 0x1611: 0x4000, + 0x1612: 0x4000, 0x1613: 0x4000, 0x1614: 0x4000, 0x1615: 0x4000, 0x1616: 0x4000, 0x1617: 0x4000, + 0x1618: 0x4000, 0x1619: 0x4000, 0x161a: 0x4000, 0x161b: 0x4000, 0x161c: 0x4000, 0x161d: 0x4000, + 0x161e: 0x4000, 0x161f: 0x4000, 0x1620: 0x4000, 0x1621: 0x4000, 0x1622: 0x4000, 0x1623: 0x4000, + 0x1624: 0x4000, 0x1625: 0x4000, 0x1626: 0x4000, 0x1627: 0x4000, 0x1628: 0x4000, 0x1629: 0x4000, + 0x162a: 0x4000, 0x162b: 0x4000, 0x162c: 0x4000, 0x162d: 0x4000, 0x162e: 0x4000, 0x162f: 0x4000, + 0x1630: 0x4000, 0x1631: 0x4000, 0x1632: 0x4000, 0x1633: 0x4000, 0x1634: 0x4000, 0x1635: 0x4000, + 0x1636: 0x4000, 0x1637: 0x4000, 0x1638: 0x4000, 0x1639: 0x4000, 0x163a: 0x4000, 0x163b: 0x4000, + 0x163c: 0x4000, 0x163d: 0x4000, 0x163e: 0x4000, 0x163f: 0x4000, + // Block 0x59, offset 0x1640 + 0x1640: 0x4000, 0x1641: 0x4000, 0x1642: 0x4000, 0x1643: 0x4000, 0x1644: 0x4000, 0x1645: 0x4000, + 0x1646: 0x4000, 0x1647: 0x4000, 0x1648: 0x4000, 0x1649: 0x4000, 0x164a: 0x4000, 0x164b: 0x4000, + 0x164c: 0x4000, 0x164d: 0x4000, 0x164e: 0x4000, 0x164f: 0x4000, 0x1650: 0x4000, 0x1651: 0x4000, + 0x1652: 0x4000, 0x1653: 0x4000, 0x1654: 0x4000, 0x1655: 0x4000, 0x1656: 0x4000, 0x1657: 0x4000, + 0x1658: 0x4000, 0x1659: 0x4000, 0x165a: 0x4000, 0x165b: 0x4000, 0x165c: 0x4000, 0x165d: 0x4000, + 0x165e: 0x4000, 0x165f: 0x4000, 0x1660: 0x4000, 0x1661: 0x4000, 0x1662: 0x4000, 0x1663: 0x4000, + 0x1664: 0x4000, 0x1665: 0x4000, 0x1666: 0x4000, 0x1667: 0x4000, 0x1668: 0x4000, 0x1669: 0x4000, + 0x166a: 0x4000, 0x166b: 0x4000, 0x166c: 0x4000, 0x166d: 0x4000, 0x166e: 0x4000, 0x166f: 0x4000, + 0x1670: 0x4000, 0x1671: 0x4000, 0x1672: 0x4000, 0x1673: 0x4000, 0x1674: 0x4000, 0x1675: 0x4000, + 0x1676: 0x4000, 0x1677: 0x4000, 0x1678: 0x4000, 0x1679: 0x4000, 0x167a: 0x4000, 0x167b: 0x4000, + 0x167c: 0x4000, 0x167f: 0x4000, + // Block 0x5a, offset 0x1680 + 0x1680: 0x4000, 0x1681: 0x4000, 0x1682: 0x4000, 0x1683: 0x4000, 0x1684: 0x4000, 0x1685: 0x4000, + 0x1686: 0x4000, 0x1687: 0x4000, 0x1688: 0x4000, 0x1689: 0x4000, 0x168a: 0x4000, 0x168b: 0x4000, + 0x168c: 0x4000, 0x168d: 0x4000, 0x168e: 0x4000, 0x168f: 0x4000, 0x1690: 0x4000, 0x1691: 0x4000, + 0x1692: 0x4000, 0x1693: 0x4000, 0x1694: 0x4000, 0x1695: 0x4000, 0x1696: 0x4000, 0x1697: 0x4000, + 0x1698: 0x4000, 0x1699: 0x4000, 0x169a: 0x4000, 0x169b: 0x4000, 0x169c: 0x4000, 0x169d: 0x4000, + 0x169e: 0x4000, 0x169f: 0x4000, 0x16a0: 0x4000, 0x16a1: 0x4000, 0x16a2: 0x4000, 0x16a3: 0x4000, + 0x16a4: 0x4000, 0x16a5: 0x4000, 0x16a6: 0x4000, 0x16a7: 0x4000, 0x16a8: 0x4000, 0x16a9: 0x4000, + 0x16aa: 0x4000, 0x16ab: 0x4000, 0x16ac: 0x4000, 0x16ad: 0x4000, 0x16ae: 0x4000, 0x16af: 0x4000, + 0x16b0: 0x4000, 0x16b1: 0x4000, 0x16b2: 0x4000, 0x16b3: 0x4000, 0x16b4: 0x4000, 0x16b5: 0x4000, + 0x16b6: 0x4000, 0x16b7: 0x4000, 0x16b8: 0x4000, 0x16b9: 0x4000, 0x16ba: 0x4000, 0x16bb: 0x4000, + 0x16bc: 0x4000, 0x16bd: 0x4000, + // Block 0x5b, offset 0x16c0 + 0x16cb: 0x4000, + 0x16cc: 0x4000, 0x16cd: 0x4000, 0x16ce: 0x4000, 0x16d0: 0x4000, 0x16d1: 0x4000, + 0x16d2: 0x4000, 0x16d3: 0x4000, 0x16d4: 0x4000, 0x16d5: 0x4000, 0x16d6: 0x4000, 0x16d7: 0x4000, + 0x16d8: 0x4000, 0x16d9: 0x4000, 0x16da: 0x4000, 0x16db: 0x4000, 0x16dc: 0x4000, 0x16dd: 0x4000, + 0x16de: 0x4000, 0x16df: 0x4000, 0x16e0: 0x4000, 0x16e1: 0x4000, 0x16e2: 0x4000, 0x16e3: 0x4000, + 0x16e4: 0x4000, 0x16e5: 0x4000, 0x16e6: 0x4000, 0x16e7: 0x4000, + 0x16fa: 0x4000, + // Block 0x5c, offset 0x1700 + 0x1715: 0x4000, 0x1716: 0x4000, + 0x1724: 0x4000, + // Block 0x5d, offset 0x1740 + 0x177b: 0x4000, + 0x177c: 0x4000, 0x177d: 0x4000, 0x177e: 0x4000, 0x177f: 0x4000, + // Block 0x5e, offset 0x1780 + 0x1780: 0x4000, 0x1781: 0x4000, 0x1782: 0x4000, 0x1783: 0x4000, 0x1784: 0x4000, 0x1785: 0x4000, + 0x1786: 0x4000, 0x1787: 0x4000, 0x1788: 0x4000, 0x1789: 0x4000, 0x178a: 0x4000, 0x178b: 0x4000, + 0x178c: 0x4000, 0x178d: 0x4000, 0x178e: 0x4000, 0x178f: 0x4000, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x4000, 0x17c1: 0x4000, 0x17c2: 0x4000, 0x17c3: 0x4000, 0x17c4: 0x4000, 0x17c5: 0x4000, + 0x17cc: 0x4000, 0x17d0: 0x4000, 0x17d1: 0x4000, + 0x17d2: 0x4000, 0x17d5: 0x4000, + 0x17eb: 0x4000, 0x17ec: 0x4000, + 0x17f4: 0x4000, 0x17f5: 0x4000, + 0x17f6: 0x4000, 0x17f7: 0x4000, 0x17f8: 0x4000, 0x17f9: 0x4000, 0x17fa: 0x4000, + // Block 0x60, offset 0x1800 + 0x1820: 0x4000, 0x1821: 0x4000, 0x1822: 0x4000, 0x1823: 0x4000, + 0x1824: 0x4000, 0x1825: 0x4000, 0x1826: 0x4000, 0x1827: 0x4000, 0x1828: 0x4000, 0x1829: 0x4000, + 0x182a: 0x4000, 0x182b: 0x4000, + // Block 0x61, offset 0x1840 + 0x184d: 0x4000, 0x184e: 0x4000, 0x184f: 0x4000, 0x1850: 0x4000, 0x1851: 0x4000, + 0x1852: 0x4000, 0x1853: 0x4000, 0x1854: 0x4000, 0x1855: 0x4000, 0x1856: 0x4000, 0x1857: 0x4000, + 0x1858: 0x4000, 0x1859: 0x4000, 0x185a: 0x4000, 0x185b: 0x4000, 0x185c: 0x4000, 0x185d: 0x4000, + 0x185e: 0x4000, 0x185f: 0x4000, 0x1860: 0x4000, 0x1861: 0x4000, 0x1862: 0x4000, 0x1863: 0x4000, + 0x1864: 0x4000, 0x1865: 0x4000, 0x1866: 0x4000, 0x1867: 0x4000, 0x1868: 0x4000, 0x1869: 0x4000, + 0x186a: 0x4000, 0x186b: 0x4000, 0x186c: 0x4000, 0x186d: 0x4000, 0x186e: 0x4000, 0x186f: 0x4000, + 0x1870: 0x4000, 0x1871: 0x4000, 0x1872: 0x4000, 0x1873: 0x4000, 0x1874: 0x4000, 0x1875: 0x4000, + 0x1876: 0x4000, 0x1877: 0x4000, 0x1878: 0x4000, 0x1879: 0x4000, 0x187a: 0x4000, 0x187b: 0x4000, + 0x187c: 0x4000, 0x187d: 0x4000, 0x187e: 0x4000, 0x187f: 0x4000, + // Block 0x62, offset 0x1880 + 0x1880: 0x4000, 0x1881: 0x4000, 0x1882: 0x4000, 0x1883: 0x4000, 0x1884: 0x4000, 0x1885: 0x4000, + 0x1886: 0x4000, 0x1887: 0x4000, 0x1888: 0x4000, 0x1889: 0x4000, 0x188a: 0x4000, 0x188b: 0x4000, + 0x188c: 0x4000, 0x188d: 0x4000, 0x188e: 0x4000, 0x188f: 0x4000, 0x1890: 0x4000, 0x1891: 0x4000, + 0x1892: 0x4000, 0x1893: 0x4000, 0x1894: 0x4000, 0x1895: 0x4000, 0x1896: 0x4000, 0x1897: 0x4000, + 0x1898: 0x4000, 0x1899: 0x4000, 0x189a: 0x4000, 0x189b: 0x4000, 0x189c: 0x4000, 0x189d: 0x4000, + 0x189e: 0x4000, 0x189f: 0x4000, 0x18a0: 0x4000, 0x18a1: 0x4000, 0x18a2: 0x4000, 0x18a3: 0x4000, + 0x18a4: 0x4000, 0x18a5: 0x4000, 0x18a6: 0x4000, 0x18a7: 0x4000, 0x18a8: 0x4000, 0x18a9: 0x4000, + 0x18aa: 0x4000, 0x18ab: 0x4000, 0x18ac: 0x4000, 0x18ad: 0x4000, 0x18ae: 0x4000, 0x18af: 0x4000, + 0x18b0: 0x4000, 0x18b1: 0x4000, 0x18b3: 0x4000, 0x18b4: 0x4000, 0x18b5: 0x4000, + 0x18b6: 0x4000, 0x18ba: 0x4000, 0x18bb: 0x4000, + 0x18bc: 0x4000, 0x18bd: 0x4000, 0x18be: 0x4000, 0x18bf: 0x4000, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x4000, 0x18c1: 0x4000, 0x18c2: 0x4000, 0x18c3: 0x4000, 0x18c4: 0x4000, 0x18c5: 0x4000, + 0x18c6: 0x4000, 0x18c7: 0x4000, 0x18c8: 0x4000, 0x18c9: 0x4000, 0x18ca: 0x4000, 0x18cb: 0x4000, + 0x18cc: 0x4000, 0x18cd: 0x4000, 0x18ce: 0x4000, 0x18cf: 0x4000, 0x18d0: 0x4000, 0x18d1: 0x4000, + 0x18d2: 0x4000, 0x18d3: 0x4000, 0x18d4: 0x4000, 0x18d5: 0x4000, 0x18d6: 0x4000, 0x18d7: 0x4000, + 0x18d8: 0x4000, 0x18d9: 0x4000, 0x18da: 0x4000, 0x18db: 0x4000, 0x18dc: 0x4000, 0x18dd: 0x4000, + 0x18de: 0x4000, 0x18df: 0x4000, 0x18e0: 0x4000, 0x18e1: 0x4000, 0x18e2: 0x4000, + 0x18e5: 0x4000, 0x18e6: 0x4000, 0x18e7: 0x4000, 0x18e8: 0x4000, 0x18e9: 0x4000, + 0x18ea: 0x4000, 0x18ee: 0x4000, 0x18ef: 0x4000, + 0x18f0: 0x4000, 0x18f1: 0x4000, 0x18f2: 0x4000, 0x18f3: 0x4000, 0x18f4: 0x4000, 0x18f5: 0x4000, + 0x18f6: 0x4000, 0x18f7: 0x4000, 0x18f8: 0x4000, 0x18f9: 0x4000, 0x18fa: 0x4000, 0x18fb: 0x4000, + 0x18fc: 0x4000, 0x18fd: 0x4000, 0x18fe: 0x4000, 0x18ff: 0x4000, + // Block 0x64, offset 0x1900 + 0x1900: 0x4000, 0x1901: 0x4000, 0x1902: 0x4000, 0x1903: 0x4000, 0x1904: 0x4000, 0x1905: 0x4000, + 0x1906: 0x4000, 0x1907: 0x4000, 0x1908: 0x4000, 0x1909: 0x4000, 0x190a: 0x4000, + 0x190d: 0x4000, 0x190e: 0x4000, 0x190f: 0x4000, 0x1910: 0x4000, 0x1911: 0x4000, + 0x1912: 0x4000, 0x1913: 0x4000, 0x1914: 0x4000, 0x1915: 0x4000, 0x1916: 0x4000, 0x1917: 0x4000, + 0x1918: 0x4000, 0x1919: 0x4000, 0x191a: 0x4000, 0x191b: 0x4000, 0x191c: 0x4000, 0x191d: 0x4000, + 0x191e: 0x4000, 0x191f: 0x4000, 0x1920: 0x4000, 0x1921: 0x4000, 0x1922: 0x4000, 0x1923: 0x4000, + 0x1924: 0x4000, 0x1925: 0x4000, 0x1926: 0x4000, 0x1927: 0x4000, 0x1928: 0x4000, 0x1929: 0x4000, + 0x192a: 0x4000, 0x192b: 0x4000, 0x192c: 0x4000, 0x192d: 0x4000, 0x192e: 0x4000, 0x192f: 0x4000, + 0x1930: 0x4000, 0x1931: 0x4000, 0x1932: 0x4000, 0x1933: 0x4000, 0x1934: 0x4000, 0x1935: 0x4000, + 0x1936: 0x4000, 0x1937: 0x4000, 0x1938: 0x4000, 0x1939: 0x4000, 0x193a: 0x4000, 0x193b: 0x4000, + 0x193c: 0x4000, 0x193d: 0x4000, 0x193e: 0x4000, 0x193f: 0x4000, + // Block 0x65, offset 0x1940 + 0x1970: 0x4000, 0x1971: 0x4000, 0x1972: 0x4000, 0x1973: 0x4000, + 0x1978: 0x4000, 0x1979: 0x4000, 0x197a: 0x4000, + // Block 0x66, offset 0x1980 + 0x1980: 0x4000, 0x1981: 0x4000, 0x1982: 0x4000, + 0x1990: 0x4000, 0x1991: 0x4000, + 0x1992: 0x4000, 0x1993: 0x4000, 0x1994: 0x4000, 0x1995: 0x4000, + // Block 0x67, offset 0x19c0 + 0x19c0: 0x2000, 0x19c1: 0x2000, 0x19c2: 0x2000, 0x19c3: 0x2000, 0x19c4: 0x2000, 0x19c5: 0x2000, + 0x19c6: 0x2000, 0x19c7: 0x2000, 0x19c8: 0x2000, 0x19c9: 0x2000, 0x19ca: 0x2000, 0x19cb: 0x2000, + 0x19cc: 0x2000, 0x19cd: 0x2000, 0x19ce: 0x2000, 0x19cf: 0x2000, 0x19d0: 0x2000, 0x19d1: 0x2000, + 0x19d2: 0x2000, 0x19d3: 0x2000, 0x19d4: 0x2000, 0x19d5: 0x2000, 0x19d6: 0x2000, 0x19d7: 0x2000, + 0x19d8: 0x2000, 0x19d9: 0x2000, 0x19da: 0x2000, 0x19db: 0x2000, 0x19dc: 0x2000, 0x19dd: 0x2000, + 0x19de: 0x2000, 0x19df: 0x2000, 0x19e0: 0x2000, 0x19e1: 0x2000, 0x19e2: 0x2000, 0x19e3: 0x2000, + 0x19e4: 0x2000, 0x19e5: 0x2000, 0x19e6: 0x2000, 0x19e7: 0x2000, 0x19e8: 0x2000, 0x19e9: 0x2000, + 0x19ea: 0x2000, 0x19eb: 0x2000, 0x19ec: 0x2000, 0x19ed: 0x2000, 0x19ee: 0x2000, 0x19ef: 0x2000, + 0x19f0: 0x2000, 0x19f1: 0x2000, 0x19f2: 0x2000, 0x19f3: 0x2000, 0x19f4: 0x2000, 0x19f5: 0x2000, + 0x19f6: 0x2000, 0x19f7: 0x2000, 0x19f8: 0x2000, 0x19f9: 0x2000, 0x19fa: 0x2000, 0x19fb: 0x2000, + 0x19fc: 0x2000, 0x19fd: 0x2000, +} + +// widthIndex: 22 blocks, 1408 entries, 1408 bytes +// Block 0 is the zero block. +var widthIndex = [1408]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x02, 0xc4: 0x03, 0xc5: 0x04, 0xc7: 0x05, + 0xc9: 0x06, 0xcb: 0x07, 0xcc: 0x08, 0xcd: 0x09, 0xce: 0x0a, 0xcf: 0x0b, + 0xd0: 0x0c, 0xd1: 0x0d, + 0xe1: 0x02, 0xe2: 0x03, 0xe3: 0x04, 0xe4: 0x05, 0xe5: 0x06, 0xe6: 0x06, 0xe7: 0x06, + 0xe8: 0x06, 0xe9: 0x06, 0xea: 0x07, 0xeb: 0x06, 0xec: 0x06, 0xed: 0x08, 0xee: 0x09, 0xef: 0x0a, + 0xf0: 0x0f, 0xf3: 0x12, 0xf4: 0x13, + // Block 0x4, offset 0x100 + 0x104: 0x0e, 0x105: 0x0f, + // Block 0x5, offset 0x140 + 0x140: 0x10, 0x141: 0x11, 0x142: 0x12, 0x144: 0x13, 0x145: 0x14, 0x146: 0x15, 0x147: 0x16, + 0x148: 0x17, 0x149: 0x18, 0x14a: 0x19, 0x14c: 0x1a, 0x14f: 0x1b, + 0x151: 0x1c, 0x152: 0x08, 0x153: 0x1d, 0x154: 0x1e, 0x155: 0x1f, 0x156: 0x20, 0x157: 0x21, + 0x158: 0x22, 0x159: 0x23, 0x15a: 0x24, 0x15b: 0x25, 0x15c: 0x26, 0x15d: 0x27, 0x15e: 0x28, 0x15f: 0x29, + 0x166: 0x2a, + 0x16c: 0x2b, 0x16d: 0x2c, + 0x17a: 0x2d, 0x17b: 0x2e, 0x17c: 0x0e, 0x17d: 0x0e, 0x17e: 0x0e, 0x17f: 0x2f, + // Block 0x6, offset 0x180 + 0x180: 0x30, 0x181: 0x31, 0x182: 0x32, 0x183: 0x33, 0x184: 0x34, 0x185: 0x35, 0x186: 0x36, 0x187: 0x37, + 0x188: 0x38, 0x189: 0x39, 0x18a: 0x0e, 0x18b: 0x3a, 0x18c: 0x0e, 0x18d: 0x0e, 0x18e: 0x0e, 0x18f: 0x0e, + 0x190: 0x0e, 0x191: 0x0e, 0x192: 0x0e, 0x193: 0x0e, 0x194: 0x0e, 0x195: 0x0e, 0x196: 0x0e, 0x197: 0x0e, + 0x198: 0x0e, 0x199: 0x0e, 0x19a: 0x0e, 0x19b: 0x0e, 0x19c: 0x0e, 0x19d: 0x0e, 0x19e: 0x0e, 0x19f: 0x0e, + 0x1a0: 0x0e, 0x1a1: 0x0e, 0x1a2: 0x0e, 0x1a3: 0x0e, 0x1a4: 0x0e, 0x1a5: 0x0e, 0x1a6: 0x0e, 0x1a7: 0x0e, + 0x1a8: 0x0e, 0x1a9: 0x0e, 0x1aa: 0x0e, 0x1ab: 0x0e, 0x1ac: 0x0e, 0x1ad: 0x0e, 0x1ae: 0x0e, 0x1af: 0x0e, + 0x1b0: 0x0e, 0x1b1: 0x0e, 0x1b2: 0x0e, 0x1b3: 0x0e, 0x1b4: 0x0e, 0x1b5: 0x0e, 0x1b6: 0x0e, 0x1b7: 0x0e, + 0x1b8: 0x0e, 0x1b9: 0x0e, 0x1ba: 0x0e, 0x1bb: 0x0e, 0x1bc: 0x0e, 0x1bd: 0x0e, 0x1be: 0x0e, 0x1bf: 0x0e, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0e, 0x1c1: 0x0e, 0x1c2: 0x0e, 0x1c3: 0x0e, 0x1c4: 0x0e, 0x1c5: 0x0e, 0x1c6: 0x0e, 0x1c7: 0x0e, + 0x1c8: 0x0e, 0x1c9: 0x0e, 0x1ca: 0x0e, 0x1cb: 0x0e, 0x1cc: 0x0e, 0x1cd: 0x0e, 0x1ce: 0x0e, 0x1cf: 0x0e, + 0x1d0: 0x0e, 0x1d1: 0x0e, 0x1d2: 0x0e, 0x1d3: 0x0e, 0x1d4: 0x0e, 0x1d5: 0x0e, 0x1d6: 0x0e, 0x1d7: 0x0e, + 0x1d8: 0x0e, 0x1d9: 0x0e, 0x1da: 0x0e, 0x1db: 0x0e, 0x1dc: 0x0e, 0x1dd: 0x0e, 0x1de: 0x0e, 0x1df: 0x0e, + 0x1e0: 0x0e, 0x1e1: 0x0e, 0x1e2: 0x0e, 0x1e3: 0x0e, 0x1e4: 0x0e, 0x1e5: 0x0e, 0x1e6: 0x0e, 0x1e7: 0x0e, + 0x1e8: 0x0e, 0x1e9: 0x0e, 0x1ea: 0x0e, 0x1eb: 0x0e, 0x1ec: 0x0e, 0x1ed: 0x0e, 0x1ee: 0x0e, 0x1ef: 0x0e, + 0x1f0: 0x0e, 0x1f1: 0x0e, 0x1f2: 0x0e, 0x1f3: 0x0e, 0x1f4: 0x0e, 0x1f5: 0x0e, 0x1f6: 0x0e, + 0x1f8: 0x0e, 0x1f9: 0x0e, 0x1fa: 0x0e, 0x1fb: 0x0e, 0x1fc: 0x0e, 0x1fd: 0x0e, 0x1fe: 0x0e, 0x1ff: 0x0e, + // Block 0x8, offset 0x200 + 0x200: 0x0e, 0x201: 0x0e, 0x202: 0x0e, 0x203: 0x0e, 0x204: 0x0e, 0x205: 0x0e, 0x206: 0x0e, 0x207: 0x0e, + 0x208: 0x0e, 0x209: 0x0e, 0x20a: 0x0e, 0x20b: 0x0e, 0x20c: 0x0e, 0x20d: 0x0e, 0x20e: 0x0e, 0x20f: 0x0e, + 0x210: 0x0e, 0x211: 0x0e, 0x212: 0x0e, 0x213: 0x0e, 0x214: 0x0e, 0x215: 0x0e, 0x216: 0x0e, 0x217: 0x0e, + 0x218: 0x0e, 0x219: 0x0e, 0x21a: 0x0e, 0x21b: 0x0e, 0x21c: 0x0e, 0x21d: 0x0e, 0x21e: 0x0e, 0x21f: 0x0e, + 0x220: 0x0e, 0x221: 0x0e, 0x222: 0x0e, 0x223: 0x0e, 0x224: 0x0e, 0x225: 0x0e, 0x226: 0x0e, 0x227: 0x0e, + 0x228: 0x0e, 0x229: 0x0e, 0x22a: 0x0e, 0x22b: 0x0e, 0x22c: 0x0e, 0x22d: 0x0e, 0x22e: 0x0e, 0x22f: 0x0e, + 0x230: 0x0e, 0x231: 0x0e, 0x232: 0x0e, 0x233: 0x0e, 0x234: 0x0e, 0x235: 0x0e, 0x236: 0x0e, 0x237: 0x0e, + 0x238: 0x0e, 0x239: 0x0e, 0x23a: 0x0e, 0x23b: 0x0e, 0x23c: 0x0e, 0x23d: 0x0e, 0x23e: 0x0e, 0x23f: 0x0e, + // Block 0x9, offset 0x240 + 0x240: 0x0e, 0x241: 0x0e, 0x242: 0x0e, 0x243: 0x0e, 0x244: 0x0e, 0x245: 0x0e, 0x246: 0x0e, 0x247: 0x0e, + 0x248: 0x0e, 0x249: 0x0e, 0x24a: 0x0e, 0x24b: 0x0e, 0x24c: 0x0e, 0x24d: 0x0e, 0x24e: 0x0e, 0x24f: 0x0e, + 0x250: 0x0e, 0x251: 0x0e, 0x252: 0x3b, 0x253: 0x3c, + 0x265: 0x3d, + 0x270: 0x0e, 0x271: 0x0e, 0x272: 0x0e, 0x273: 0x0e, 0x274: 0x0e, 0x275: 0x0e, 0x276: 0x0e, 0x277: 0x0e, + 0x278: 0x0e, 0x279: 0x0e, 0x27a: 0x0e, 0x27b: 0x0e, 0x27c: 0x0e, 0x27d: 0x0e, 0x27e: 0x0e, 0x27f: 0x0e, + // Block 0xa, offset 0x280 + 0x280: 0x0e, 0x281: 0x0e, 0x282: 0x0e, 0x283: 0x0e, 0x284: 0x0e, 0x285: 0x0e, 0x286: 0x0e, 0x287: 0x0e, + 0x288: 0x0e, 0x289: 0x0e, 0x28a: 0x0e, 0x28b: 0x0e, 0x28c: 0x0e, 0x28d: 0x0e, 0x28e: 0x0e, 0x28f: 0x0e, + 0x290: 0x0e, 0x291: 0x0e, 0x292: 0x0e, 0x293: 0x0e, 0x294: 0x0e, 0x295: 0x0e, 0x296: 0x0e, 0x297: 0x0e, + 0x298: 0x0e, 0x299: 0x0e, 0x29a: 0x0e, 0x29b: 0x0e, 0x29c: 0x0e, 0x29d: 0x0e, 0x29e: 0x3e, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x08, 0x2c1: 0x08, 0x2c2: 0x08, 0x2c3: 0x08, 0x2c4: 0x08, 0x2c5: 0x08, 0x2c6: 0x08, 0x2c7: 0x08, + 0x2c8: 0x08, 0x2c9: 0x08, 0x2ca: 0x08, 0x2cb: 0x08, 0x2cc: 0x08, 0x2cd: 0x08, 0x2ce: 0x08, 0x2cf: 0x08, + 0x2d0: 0x08, 0x2d1: 0x08, 0x2d2: 0x08, 0x2d3: 0x08, 0x2d4: 0x08, 0x2d5: 0x08, 0x2d6: 0x08, 0x2d7: 0x08, + 0x2d8: 0x08, 0x2d9: 0x08, 0x2da: 0x08, 0x2db: 0x08, 0x2dc: 0x08, 0x2dd: 0x08, 0x2de: 0x08, 0x2df: 0x08, + 0x2e0: 0x08, 0x2e1: 0x08, 0x2e2: 0x08, 0x2e3: 0x08, 0x2e4: 0x08, 0x2e5: 0x08, 0x2e6: 0x08, 0x2e7: 0x08, + 0x2e8: 0x08, 0x2e9: 0x08, 0x2ea: 0x08, 0x2eb: 0x08, 0x2ec: 0x08, 0x2ed: 0x08, 0x2ee: 0x08, 0x2ef: 0x08, + 0x2f0: 0x08, 0x2f1: 0x08, 0x2f2: 0x08, 0x2f3: 0x08, 0x2f4: 0x08, 0x2f5: 0x08, 0x2f6: 0x08, 0x2f7: 0x08, + 0x2f8: 0x08, 0x2f9: 0x08, 0x2fa: 0x08, 0x2fb: 0x08, 0x2fc: 0x08, 0x2fd: 0x08, 0x2fe: 0x08, 0x2ff: 0x08, + // Block 0xc, offset 0x300 + 0x300: 0x08, 0x301: 0x08, 0x302: 0x08, 0x303: 0x08, 0x304: 0x08, 0x305: 0x08, 0x306: 0x08, 0x307: 0x08, + 0x308: 0x08, 0x309: 0x08, 0x30a: 0x08, 0x30b: 0x08, 0x30c: 0x08, 0x30d: 0x08, 0x30e: 0x08, 0x30f: 0x08, + 0x310: 0x08, 0x311: 0x08, 0x312: 0x08, 0x313: 0x08, 0x314: 0x08, 0x315: 0x08, 0x316: 0x08, 0x317: 0x08, + 0x318: 0x08, 0x319: 0x08, 0x31a: 0x08, 0x31b: 0x08, 0x31c: 0x08, 0x31d: 0x08, 0x31e: 0x08, 0x31f: 0x08, + 0x320: 0x08, 0x321: 0x08, 0x322: 0x08, 0x323: 0x08, 0x324: 0x0e, 0x325: 0x0e, 0x326: 0x0e, 0x327: 0x0e, + 0x328: 0x0e, 0x329: 0x0e, 0x32a: 0x0e, 0x32b: 0x0e, + 0x338: 0x3f, 0x339: 0x40, 0x33c: 0x41, 0x33d: 0x42, 0x33e: 0x43, 0x33f: 0x44, + // Block 0xd, offset 0x340 + 0x37f: 0x45, + // Block 0xe, offset 0x380 + 0x380: 0x0e, 0x381: 0x0e, 0x382: 0x0e, 0x383: 0x0e, 0x384: 0x0e, 0x385: 0x0e, 0x386: 0x0e, 0x387: 0x0e, + 0x388: 0x0e, 0x389: 0x0e, 0x38a: 0x0e, 0x38b: 0x0e, 0x38c: 0x0e, 0x38d: 0x0e, 0x38e: 0x0e, 0x38f: 0x0e, + 0x390: 0x0e, 0x391: 0x0e, 0x392: 0x0e, 0x393: 0x0e, 0x394: 0x0e, 0x395: 0x0e, 0x396: 0x0e, 0x397: 0x0e, + 0x398: 0x0e, 0x399: 0x0e, 0x39a: 0x0e, 0x39b: 0x0e, 0x39c: 0x0e, 0x39d: 0x0e, 0x39e: 0x0e, 0x39f: 0x46, + 0x3a0: 0x0e, 0x3a1: 0x0e, 0x3a2: 0x0e, 0x3a3: 0x0e, 0x3a4: 0x0e, 0x3a5: 0x0e, 0x3a6: 0x0e, 0x3a7: 0x0e, + 0x3a8: 0x0e, 0x3a9: 0x0e, 0x3aa: 0x0e, 0x3ab: 0x47, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x0e, 0x3c1: 0x0e, 0x3c2: 0x0e, 0x3c3: 0x0e, 0x3c4: 0x48, 0x3c5: 0x49, 0x3c6: 0x0e, 0x3c7: 0x0e, + 0x3c8: 0x0e, 0x3c9: 0x0e, 0x3ca: 0x0e, 0x3cb: 0x4a, + // Block 0x10, offset 0x400 + 0x400: 0x4b, 0x403: 0x4c, 0x404: 0x4d, 0x405: 0x4e, 0x406: 0x4f, + 0x408: 0x50, 0x409: 0x51, 0x40c: 0x52, 0x40d: 0x53, 0x40e: 0x54, 0x40f: 0x55, + 0x410: 0x3a, 0x411: 0x56, 0x412: 0x0e, 0x413: 0x57, 0x414: 0x58, 0x415: 0x59, 0x416: 0x5a, 0x417: 0x5b, + 0x418: 0x0e, 0x419: 0x5c, 0x41a: 0x0e, 0x41b: 0x5d, 0x41f: 0x5e, + 0x424: 0x5f, 0x425: 0x60, 0x426: 0x61, 0x427: 0x62, + 0x429: 0x63, 0x42a: 0x64, + // Block 0x11, offset 0x440 + 0x456: 0x0b, 0x457: 0x06, + 0x458: 0x0c, 0x45b: 0x0d, 0x45f: 0x0e, + 0x460: 0x06, 0x461: 0x06, 0x462: 0x06, 0x463: 0x06, 0x464: 0x06, 0x465: 0x06, 0x466: 0x06, 0x467: 0x06, + 0x468: 0x06, 0x469: 0x06, 0x46a: 0x06, 0x46b: 0x06, 0x46c: 0x06, 0x46d: 0x06, 0x46e: 0x06, 0x46f: 0x06, + 0x470: 0x06, 0x471: 0x06, 0x472: 0x06, 0x473: 0x06, 0x474: 0x06, 0x475: 0x06, 0x476: 0x06, 0x477: 0x06, + 0x478: 0x06, 0x479: 0x06, 0x47a: 0x06, 0x47b: 0x06, 0x47c: 0x06, 0x47d: 0x06, 0x47e: 0x06, 0x47f: 0x06, + // Block 0x12, offset 0x480 + 0x484: 0x08, 0x485: 0x08, 0x486: 0x08, 0x487: 0x09, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x08, 0x4c1: 0x08, 0x4c2: 0x08, 0x4c3: 0x08, 0x4c4: 0x08, 0x4c5: 0x08, 0x4c6: 0x08, 0x4c7: 0x08, + 0x4c8: 0x08, 0x4c9: 0x08, 0x4ca: 0x08, 0x4cb: 0x08, 0x4cc: 0x08, 0x4cd: 0x08, 0x4ce: 0x08, 0x4cf: 0x08, + 0x4d0: 0x08, 0x4d1: 0x08, 0x4d2: 0x08, 0x4d3: 0x08, 0x4d4: 0x08, 0x4d5: 0x08, 0x4d6: 0x08, 0x4d7: 0x08, + 0x4d8: 0x08, 0x4d9: 0x08, 0x4da: 0x08, 0x4db: 0x08, 0x4dc: 0x08, 0x4dd: 0x08, 0x4de: 0x08, 0x4df: 0x08, + 0x4e0: 0x08, 0x4e1: 0x08, 0x4e2: 0x08, 0x4e3: 0x08, 0x4e4: 0x08, 0x4e5: 0x08, 0x4e6: 0x08, 0x4e7: 0x08, + 0x4e8: 0x08, 0x4e9: 0x08, 0x4ea: 0x08, 0x4eb: 0x08, 0x4ec: 0x08, 0x4ed: 0x08, 0x4ee: 0x08, 0x4ef: 0x08, + 0x4f0: 0x08, 0x4f1: 0x08, 0x4f2: 0x08, 0x4f3: 0x08, 0x4f4: 0x08, 0x4f5: 0x08, 0x4f6: 0x08, 0x4f7: 0x08, + 0x4f8: 0x08, 0x4f9: 0x08, 0x4fa: 0x08, 0x4fb: 0x08, 0x4fc: 0x08, 0x4fd: 0x08, 0x4fe: 0x08, 0x4ff: 0x65, + // Block 0x14, offset 0x500 + 0x520: 0x10, + 0x530: 0x09, 0x531: 0x09, 0x532: 0x09, 0x533: 0x09, 0x534: 0x09, 0x535: 0x09, 0x536: 0x09, 0x537: 0x09, + 0x538: 0x09, 0x539: 0x09, 0x53a: 0x09, 0x53b: 0x09, 0x53c: 0x09, 0x53d: 0x09, 0x53e: 0x09, 0x53f: 0x11, + // Block 0x15, offset 0x540 + 0x540: 0x09, 0x541: 0x09, 0x542: 0x09, 0x543: 0x09, 0x544: 0x09, 0x545: 0x09, 0x546: 0x09, 0x547: 0x09, + 0x548: 0x09, 0x549: 0x09, 0x54a: 0x09, 0x54b: 0x09, 0x54c: 0x09, 0x54d: 0x09, 0x54e: 0x09, 0x54f: 0x11, +} + +// inverseData contains 4-byte entries of the following format: +// +// <length> <modified UTF-8-encoded rune> <0 padding> +// +// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the +// UTF-8 encoding of the original rune. Mappings often have the following +// pattern: +// +// A -> A (U+FF21 -> U+0041) +// B -> B (U+FF22 -> U+0042) +// ... +// +// By xor-ing the last byte the same entry can be shared by many mappings. This +// reduces the total number of distinct entries by about two thirds. +// The resulting entry for the aforementioned mappings is +// +// { 0x01, 0xE0, 0x00, 0x00 } +// +// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get +// +// E0 ^ A1 = 41. +// +// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get +// +// E0 ^ A2 = 42. +// +// Note that because of the xor-ing, the byte sequence stored in the entry is +// not valid UTF-8. +var inverseData = [150][4]byte{ + {0x00, 0x00, 0x00, 0x00}, + {0x03, 0xe3, 0x80, 0xa0}, + {0x03, 0xef, 0xbc, 0xa0}, + {0x03, 0xef, 0xbc, 0xe0}, + {0x03, 0xef, 0xbd, 0xe0}, + {0x03, 0xef, 0xbf, 0x02}, + {0x03, 0xef, 0xbf, 0x00}, + {0x03, 0xef, 0xbf, 0x0e}, + {0x03, 0xef, 0xbf, 0x0c}, + {0x03, 0xef, 0xbf, 0x0f}, + {0x03, 0xef, 0xbf, 0x39}, + {0x03, 0xef, 0xbf, 0x3b}, + {0x03, 0xef, 0xbf, 0x3f}, + {0x03, 0xef, 0xbf, 0x2a}, + {0x03, 0xef, 0xbf, 0x0d}, + {0x03, 0xef, 0xbf, 0x25}, + {0x03, 0xef, 0xbd, 0x1a}, + {0x03, 0xef, 0xbd, 0x26}, + {0x01, 0xa0, 0x00, 0x00}, + {0x03, 0xef, 0xbd, 0x25}, + {0x03, 0xef, 0xbd, 0x23}, + {0x03, 0xef, 0xbd, 0x2e}, + {0x03, 0xef, 0xbe, 0x07}, + {0x03, 0xef, 0xbe, 0x05}, + {0x03, 0xef, 0xbd, 0x06}, + {0x03, 0xef, 0xbd, 0x13}, + {0x03, 0xef, 0xbd, 0x0b}, + {0x03, 0xef, 0xbd, 0x16}, + {0x03, 0xef, 0xbd, 0x0c}, + {0x03, 0xef, 0xbd, 0x15}, + {0x03, 0xef, 0xbd, 0x0d}, + {0x03, 0xef, 0xbd, 0x1c}, + {0x03, 0xef, 0xbd, 0x02}, + {0x03, 0xef, 0xbd, 0x1f}, + {0x03, 0xef, 0xbd, 0x1d}, + {0x03, 0xef, 0xbd, 0x17}, + {0x03, 0xef, 0xbd, 0x08}, + {0x03, 0xef, 0xbd, 0x09}, + {0x03, 0xef, 0xbd, 0x0e}, + {0x03, 0xef, 0xbd, 0x04}, + {0x03, 0xef, 0xbd, 0x05}, + {0x03, 0xef, 0xbe, 0x3f}, + {0x03, 0xef, 0xbe, 0x00}, + {0x03, 0xef, 0xbd, 0x2c}, + {0x03, 0xef, 0xbe, 0x06}, + {0x03, 0xef, 0xbe, 0x0c}, + {0x03, 0xef, 0xbe, 0x0f}, + {0x03, 0xef, 0xbe, 0x0d}, + {0x03, 0xef, 0xbe, 0x0b}, + {0x03, 0xef, 0xbe, 0x19}, + {0x03, 0xef, 0xbe, 0x15}, + {0x03, 0xef, 0xbe, 0x11}, + {0x03, 0xef, 0xbe, 0x31}, + {0x03, 0xef, 0xbe, 0x33}, + {0x03, 0xef, 0xbd, 0x0f}, + {0x03, 0xef, 0xbe, 0x30}, + {0x03, 0xef, 0xbe, 0x3e}, + {0x03, 0xef, 0xbe, 0x32}, + {0x03, 0xef, 0xbe, 0x36}, + {0x03, 0xef, 0xbd, 0x14}, + {0x03, 0xef, 0xbe, 0x2e}, + {0x03, 0xef, 0xbd, 0x1e}, + {0x03, 0xef, 0xbe, 0x10}, + {0x03, 0xef, 0xbf, 0x13}, + {0x03, 0xef, 0xbf, 0x15}, + {0x03, 0xef, 0xbf, 0x17}, + {0x03, 0xef, 0xbf, 0x1f}, + {0x03, 0xef, 0xbf, 0x1d}, + {0x03, 0xef, 0xbf, 0x1b}, + {0x03, 0xef, 0xbf, 0x09}, + {0x03, 0xef, 0xbf, 0x0b}, + {0x03, 0xef, 0xbf, 0x37}, + {0x03, 0xef, 0xbe, 0x04}, + {0x01, 0xe0, 0x00, 0x00}, + {0x03, 0xe2, 0xa6, 0x1a}, + {0x03, 0xe2, 0xa6, 0x26}, + {0x03, 0xe3, 0x80, 0x23}, + {0x03, 0xe3, 0x80, 0x2e}, + {0x03, 0xe3, 0x80, 0x25}, + {0x03, 0xe3, 0x83, 0x1e}, + {0x03, 0xe3, 0x83, 0x14}, + {0x03, 0xe3, 0x82, 0x06}, + {0x03, 0xe3, 0x82, 0x0b}, + {0x03, 0xe3, 0x82, 0x0c}, + {0x03, 0xe3, 0x82, 0x0d}, + {0x03, 0xe3, 0x82, 0x02}, + {0x03, 0xe3, 0x83, 0x0f}, + {0x03, 0xe3, 0x83, 0x08}, + {0x03, 0xe3, 0x83, 0x09}, + {0x03, 0xe3, 0x83, 0x2c}, + {0x03, 0xe3, 0x83, 0x0c}, + {0x03, 0xe3, 0x82, 0x13}, + {0x03, 0xe3, 0x82, 0x16}, + {0x03, 0xe3, 0x82, 0x15}, + {0x03, 0xe3, 0x82, 0x1c}, + {0x03, 0xe3, 0x82, 0x1f}, + {0x03, 0xe3, 0x82, 0x1d}, + {0x03, 0xe3, 0x82, 0x1a}, + {0x03, 0xe3, 0x82, 0x17}, + {0x03, 0xe3, 0x82, 0x08}, + {0x03, 0xe3, 0x82, 0x09}, + {0x03, 0xe3, 0x82, 0x0e}, + {0x03, 0xe3, 0x82, 0x04}, + {0x03, 0xe3, 0x82, 0x05}, + {0x03, 0xe3, 0x82, 0x3f}, + {0x03, 0xe3, 0x83, 0x00}, + {0x03, 0xe3, 0x83, 0x06}, + {0x03, 0xe3, 0x83, 0x05}, + {0x03, 0xe3, 0x83, 0x0d}, + {0x03, 0xe3, 0x83, 0x0b}, + {0x03, 0xe3, 0x83, 0x07}, + {0x03, 0xe3, 0x83, 0x19}, + {0x03, 0xe3, 0x83, 0x15}, + {0x03, 0xe3, 0x83, 0x11}, + {0x03, 0xe3, 0x83, 0x31}, + {0x03, 0xe3, 0x83, 0x33}, + {0x03, 0xe3, 0x83, 0x30}, + {0x03, 0xe3, 0x83, 0x3e}, + {0x03, 0xe3, 0x83, 0x32}, + {0x03, 0xe3, 0x83, 0x36}, + {0x03, 0xe3, 0x83, 0x2e}, + {0x03, 0xe3, 0x82, 0x07}, + {0x03, 0xe3, 0x85, 0x04}, + {0x03, 0xe3, 0x84, 0x10}, + {0x03, 0xe3, 0x85, 0x30}, + {0x03, 0xe3, 0x85, 0x0d}, + {0x03, 0xe3, 0x85, 0x13}, + {0x03, 0xe3, 0x85, 0x15}, + {0x03, 0xe3, 0x85, 0x17}, + {0x03, 0xe3, 0x85, 0x1f}, + {0x03, 0xe3, 0x85, 0x1d}, + {0x03, 0xe3, 0x85, 0x1b}, + {0x03, 0xe3, 0x85, 0x09}, + {0x03, 0xe3, 0x85, 0x0f}, + {0x03, 0xe3, 0x85, 0x0b}, + {0x03, 0xe3, 0x85, 0x37}, + {0x03, 0xe3, 0x85, 0x3b}, + {0x03, 0xe3, 0x85, 0x39}, + {0x03, 0xe3, 0x85, 0x3f}, + {0x02, 0xc2, 0x02, 0x00}, + {0x02, 0xc2, 0x0e, 0x00}, + {0x02, 0xc2, 0x0c, 0x00}, + {0x02, 0xc2, 0x00, 0x00}, + {0x03, 0xe2, 0x82, 0x0f}, + {0x03, 0xe2, 0x94, 0x2a}, + {0x03, 0xe2, 0x86, 0x39}, + {0x03, 0xe2, 0x86, 0x3b}, + {0x03, 0xe2, 0x86, 0x3f}, + {0x03, 0xe2, 0x96, 0x0d}, + {0x03, 0xe2, 0x97, 0x25}, +} + +// Total table size 15320 bytes (14KiB) diff --git a/vendor/golang.org/x/text/width/tables13.0.0.go b/vendor/golang.org/x/text/width/tables13.0.0.go new file mode 100644 index 00000000000..40c169edf68 --- /dev/null +++ b/vendor/golang.org/x/text/width/tables13.0.0.go @@ -0,0 +1,1361 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +//go:build go1.16 && !go1.21 + +package width + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "13.0.0" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *widthTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return widthValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = widthIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *widthTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return widthValues[c0] + } + i := widthIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = widthIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = widthIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *widthTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return widthValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = widthIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *widthTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return widthValues[c0] + } + i := widthIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = widthIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = widthIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// widthTrie. Total size: 14848 bytes (14.50 KiB). Checksum: 17e24343536472f6. +type widthTrie struct{} + +func newWidthTrie(i int) *widthTrie { + return &widthTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *widthTrie) lookupValue(n uint32, b byte) uint16 { + switch { + default: + return uint16(widthValues[n<<6+uint32(b)]) + } +} + +// widthValues: 105 blocks, 6720 entries, 13440 bytes +// The third block is the zero block. +var widthValues = [6720]uint16{ + // Block 0x0, offset 0x0 + 0x20: 0x6001, 0x21: 0x6002, 0x22: 0x6002, 0x23: 0x6002, + 0x24: 0x6002, 0x25: 0x6002, 0x26: 0x6002, 0x27: 0x6002, 0x28: 0x6002, 0x29: 0x6002, + 0x2a: 0x6002, 0x2b: 0x6002, 0x2c: 0x6002, 0x2d: 0x6002, 0x2e: 0x6002, 0x2f: 0x6002, + 0x30: 0x6002, 0x31: 0x6002, 0x32: 0x6002, 0x33: 0x6002, 0x34: 0x6002, 0x35: 0x6002, + 0x36: 0x6002, 0x37: 0x6002, 0x38: 0x6002, 0x39: 0x6002, 0x3a: 0x6002, 0x3b: 0x6002, + 0x3c: 0x6002, 0x3d: 0x6002, 0x3e: 0x6002, 0x3f: 0x6002, + // Block 0x1, offset 0x40 + 0x40: 0x6003, 0x41: 0x6003, 0x42: 0x6003, 0x43: 0x6003, 0x44: 0x6003, 0x45: 0x6003, + 0x46: 0x6003, 0x47: 0x6003, 0x48: 0x6003, 0x49: 0x6003, 0x4a: 0x6003, 0x4b: 0x6003, + 0x4c: 0x6003, 0x4d: 0x6003, 0x4e: 0x6003, 0x4f: 0x6003, 0x50: 0x6003, 0x51: 0x6003, + 0x52: 0x6003, 0x53: 0x6003, 0x54: 0x6003, 0x55: 0x6003, 0x56: 0x6003, 0x57: 0x6003, + 0x58: 0x6003, 0x59: 0x6003, 0x5a: 0x6003, 0x5b: 0x6003, 0x5c: 0x6003, 0x5d: 0x6003, + 0x5e: 0x6003, 0x5f: 0x6003, 0x60: 0x6004, 0x61: 0x6004, 0x62: 0x6004, 0x63: 0x6004, + 0x64: 0x6004, 0x65: 0x6004, 0x66: 0x6004, 0x67: 0x6004, 0x68: 0x6004, 0x69: 0x6004, + 0x6a: 0x6004, 0x6b: 0x6004, 0x6c: 0x6004, 0x6d: 0x6004, 0x6e: 0x6004, 0x6f: 0x6004, + 0x70: 0x6004, 0x71: 0x6004, 0x72: 0x6004, 0x73: 0x6004, 0x74: 0x6004, 0x75: 0x6004, + 0x76: 0x6004, 0x77: 0x6004, 0x78: 0x6004, 0x79: 0x6004, 0x7a: 0x6004, 0x7b: 0x6004, + 0x7c: 0x6004, 0x7d: 0x6004, 0x7e: 0x6004, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xe1: 0x2000, 0xe2: 0x6005, 0xe3: 0x6005, + 0xe4: 0x2000, 0xe5: 0x6006, 0xe6: 0x6005, 0xe7: 0x2000, 0xe8: 0x2000, + 0xea: 0x2000, 0xec: 0x6007, 0xed: 0x2000, 0xee: 0x2000, 0xef: 0x6008, + 0xf0: 0x2000, 0xf1: 0x2000, 0xf2: 0x2000, 0xf3: 0x2000, 0xf4: 0x2000, + 0xf6: 0x2000, 0xf7: 0x2000, 0xf8: 0x2000, 0xf9: 0x2000, 0xfa: 0x2000, + 0xfc: 0x2000, 0xfd: 0x2000, 0xfe: 0x2000, 0xff: 0x2000, + // Block 0x4, offset 0x100 + 0x106: 0x2000, + 0x110: 0x2000, + 0x117: 0x2000, + 0x118: 0x2000, + 0x11e: 0x2000, 0x11f: 0x2000, 0x120: 0x2000, 0x121: 0x2000, + 0x126: 0x2000, 0x128: 0x2000, 0x129: 0x2000, + 0x12a: 0x2000, 0x12c: 0x2000, 0x12d: 0x2000, + 0x130: 0x2000, 0x132: 0x2000, 0x133: 0x2000, + 0x137: 0x2000, 0x138: 0x2000, 0x139: 0x2000, 0x13a: 0x2000, + 0x13c: 0x2000, 0x13e: 0x2000, + // Block 0x5, offset 0x140 + 0x141: 0x2000, + 0x151: 0x2000, + 0x153: 0x2000, + 0x15b: 0x2000, + 0x166: 0x2000, 0x167: 0x2000, + 0x16b: 0x2000, + 0x171: 0x2000, 0x172: 0x2000, 0x173: 0x2000, + 0x178: 0x2000, + 0x17f: 0x2000, + // Block 0x6, offset 0x180 + 0x180: 0x2000, 0x181: 0x2000, 0x182: 0x2000, 0x184: 0x2000, + 0x188: 0x2000, 0x189: 0x2000, 0x18a: 0x2000, 0x18b: 0x2000, + 0x18d: 0x2000, + 0x192: 0x2000, 0x193: 0x2000, + 0x1a6: 0x2000, 0x1a7: 0x2000, + 0x1ab: 0x2000, + // Block 0x7, offset 0x1c0 + 0x1ce: 0x2000, 0x1d0: 0x2000, + 0x1d2: 0x2000, 0x1d4: 0x2000, 0x1d6: 0x2000, + 0x1d8: 0x2000, 0x1da: 0x2000, 0x1dc: 0x2000, + // Block 0x8, offset 0x200 + 0x211: 0x2000, + 0x221: 0x2000, + // Block 0x9, offset 0x240 + 0x244: 0x2000, + 0x247: 0x2000, 0x249: 0x2000, 0x24a: 0x2000, 0x24b: 0x2000, + 0x24d: 0x2000, 0x250: 0x2000, + 0x258: 0x2000, 0x259: 0x2000, 0x25a: 0x2000, 0x25b: 0x2000, 0x25d: 0x2000, + 0x25f: 0x2000, + // Block 0xa, offset 0x280 + 0x280: 0x2000, 0x281: 0x2000, 0x282: 0x2000, 0x283: 0x2000, 0x284: 0x2000, 0x285: 0x2000, + 0x286: 0x2000, 0x287: 0x2000, 0x288: 0x2000, 0x289: 0x2000, 0x28a: 0x2000, 0x28b: 0x2000, + 0x28c: 0x2000, 0x28d: 0x2000, 0x28e: 0x2000, 0x28f: 0x2000, 0x290: 0x2000, 0x291: 0x2000, + 0x292: 0x2000, 0x293: 0x2000, 0x294: 0x2000, 0x295: 0x2000, 0x296: 0x2000, 0x297: 0x2000, + 0x298: 0x2000, 0x299: 0x2000, 0x29a: 0x2000, 0x29b: 0x2000, 0x29c: 0x2000, 0x29d: 0x2000, + 0x29e: 0x2000, 0x29f: 0x2000, 0x2a0: 0x2000, 0x2a1: 0x2000, 0x2a2: 0x2000, 0x2a3: 0x2000, + 0x2a4: 0x2000, 0x2a5: 0x2000, 0x2a6: 0x2000, 0x2a7: 0x2000, 0x2a8: 0x2000, 0x2a9: 0x2000, + 0x2aa: 0x2000, 0x2ab: 0x2000, 0x2ac: 0x2000, 0x2ad: 0x2000, 0x2ae: 0x2000, 0x2af: 0x2000, + 0x2b0: 0x2000, 0x2b1: 0x2000, 0x2b2: 0x2000, 0x2b3: 0x2000, 0x2b4: 0x2000, 0x2b5: 0x2000, + 0x2b6: 0x2000, 0x2b7: 0x2000, 0x2b8: 0x2000, 0x2b9: 0x2000, 0x2ba: 0x2000, 0x2bb: 0x2000, + 0x2bc: 0x2000, 0x2bd: 0x2000, 0x2be: 0x2000, 0x2bf: 0x2000, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x2000, 0x2c1: 0x2000, 0x2c2: 0x2000, 0x2c3: 0x2000, 0x2c4: 0x2000, 0x2c5: 0x2000, + 0x2c6: 0x2000, 0x2c7: 0x2000, 0x2c8: 0x2000, 0x2c9: 0x2000, 0x2ca: 0x2000, 0x2cb: 0x2000, + 0x2cc: 0x2000, 0x2cd: 0x2000, 0x2ce: 0x2000, 0x2cf: 0x2000, 0x2d0: 0x2000, 0x2d1: 0x2000, + 0x2d2: 0x2000, 0x2d3: 0x2000, 0x2d4: 0x2000, 0x2d5: 0x2000, 0x2d6: 0x2000, 0x2d7: 0x2000, + 0x2d8: 0x2000, 0x2d9: 0x2000, 0x2da: 0x2000, 0x2db: 0x2000, 0x2dc: 0x2000, 0x2dd: 0x2000, + 0x2de: 0x2000, 0x2df: 0x2000, 0x2e0: 0x2000, 0x2e1: 0x2000, 0x2e2: 0x2000, 0x2e3: 0x2000, + 0x2e4: 0x2000, 0x2e5: 0x2000, 0x2e6: 0x2000, 0x2e7: 0x2000, 0x2e8: 0x2000, 0x2e9: 0x2000, + 0x2ea: 0x2000, 0x2eb: 0x2000, 0x2ec: 0x2000, 0x2ed: 0x2000, 0x2ee: 0x2000, 0x2ef: 0x2000, + // Block 0xc, offset 0x300 + 0x311: 0x2000, + 0x312: 0x2000, 0x313: 0x2000, 0x314: 0x2000, 0x315: 0x2000, 0x316: 0x2000, 0x317: 0x2000, + 0x318: 0x2000, 0x319: 0x2000, 0x31a: 0x2000, 0x31b: 0x2000, 0x31c: 0x2000, 0x31d: 0x2000, + 0x31e: 0x2000, 0x31f: 0x2000, 0x320: 0x2000, 0x321: 0x2000, 0x323: 0x2000, + 0x324: 0x2000, 0x325: 0x2000, 0x326: 0x2000, 0x327: 0x2000, 0x328: 0x2000, 0x329: 0x2000, + 0x331: 0x2000, 0x332: 0x2000, 0x333: 0x2000, 0x334: 0x2000, 0x335: 0x2000, + 0x336: 0x2000, 0x337: 0x2000, 0x338: 0x2000, 0x339: 0x2000, 0x33a: 0x2000, 0x33b: 0x2000, + 0x33c: 0x2000, 0x33d: 0x2000, 0x33e: 0x2000, 0x33f: 0x2000, + // Block 0xd, offset 0x340 + 0x340: 0x2000, 0x341: 0x2000, 0x343: 0x2000, 0x344: 0x2000, 0x345: 0x2000, + 0x346: 0x2000, 0x347: 0x2000, 0x348: 0x2000, 0x349: 0x2000, + // Block 0xe, offset 0x380 + 0x381: 0x2000, + 0x390: 0x2000, 0x391: 0x2000, + 0x392: 0x2000, 0x393: 0x2000, 0x394: 0x2000, 0x395: 0x2000, 0x396: 0x2000, 0x397: 0x2000, + 0x398: 0x2000, 0x399: 0x2000, 0x39a: 0x2000, 0x39b: 0x2000, 0x39c: 0x2000, 0x39d: 0x2000, + 0x39e: 0x2000, 0x39f: 0x2000, 0x3a0: 0x2000, 0x3a1: 0x2000, 0x3a2: 0x2000, 0x3a3: 0x2000, + 0x3a4: 0x2000, 0x3a5: 0x2000, 0x3a6: 0x2000, 0x3a7: 0x2000, 0x3a8: 0x2000, 0x3a9: 0x2000, + 0x3aa: 0x2000, 0x3ab: 0x2000, 0x3ac: 0x2000, 0x3ad: 0x2000, 0x3ae: 0x2000, 0x3af: 0x2000, + 0x3b0: 0x2000, 0x3b1: 0x2000, 0x3b2: 0x2000, 0x3b3: 0x2000, 0x3b4: 0x2000, 0x3b5: 0x2000, + 0x3b6: 0x2000, 0x3b7: 0x2000, 0x3b8: 0x2000, 0x3b9: 0x2000, 0x3ba: 0x2000, 0x3bb: 0x2000, + 0x3bc: 0x2000, 0x3bd: 0x2000, 0x3be: 0x2000, 0x3bf: 0x2000, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x2000, 0x3c1: 0x2000, 0x3c2: 0x2000, 0x3c3: 0x2000, 0x3c4: 0x2000, 0x3c5: 0x2000, + 0x3c6: 0x2000, 0x3c7: 0x2000, 0x3c8: 0x2000, 0x3c9: 0x2000, 0x3ca: 0x2000, 0x3cb: 0x2000, + 0x3cc: 0x2000, 0x3cd: 0x2000, 0x3ce: 0x2000, 0x3cf: 0x2000, 0x3d1: 0x2000, + // Block 0x10, offset 0x400 + 0x400: 0x4000, 0x401: 0x4000, 0x402: 0x4000, 0x403: 0x4000, 0x404: 0x4000, 0x405: 0x4000, + 0x406: 0x4000, 0x407: 0x4000, 0x408: 0x4000, 0x409: 0x4000, 0x40a: 0x4000, 0x40b: 0x4000, + 0x40c: 0x4000, 0x40d: 0x4000, 0x40e: 0x4000, 0x40f: 0x4000, 0x410: 0x4000, 0x411: 0x4000, + 0x412: 0x4000, 0x413: 0x4000, 0x414: 0x4000, 0x415: 0x4000, 0x416: 0x4000, 0x417: 0x4000, + 0x418: 0x4000, 0x419: 0x4000, 0x41a: 0x4000, 0x41b: 0x4000, 0x41c: 0x4000, 0x41d: 0x4000, + 0x41e: 0x4000, 0x41f: 0x4000, 0x420: 0x4000, 0x421: 0x4000, 0x422: 0x4000, 0x423: 0x4000, + 0x424: 0x4000, 0x425: 0x4000, 0x426: 0x4000, 0x427: 0x4000, 0x428: 0x4000, 0x429: 0x4000, + 0x42a: 0x4000, 0x42b: 0x4000, 0x42c: 0x4000, 0x42d: 0x4000, 0x42e: 0x4000, 0x42f: 0x4000, + 0x430: 0x4000, 0x431: 0x4000, 0x432: 0x4000, 0x433: 0x4000, 0x434: 0x4000, 0x435: 0x4000, + 0x436: 0x4000, 0x437: 0x4000, 0x438: 0x4000, 0x439: 0x4000, 0x43a: 0x4000, 0x43b: 0x4000, + 0x43c: 0x4000, 0x43d: 0x4000, 0x43e: 0x4000, 0x43f: 0x4000, + // Block 0x11, offset 0x440 + 0x440: 0x4000, 0x441: 0x4000, 0x442: 0x4000, 0x443: 0x4000, 0x444: 0x4000, 0x445: 0x4000, + 0x446: 0x4000, 0x447: 0x4000, 0x448: 0x4000, 0x449: 0x4000, 0x44a: 0x4000, 0x44b: 0x4000, + 0x44c: 0x4000, 0x44d: 0x4000, 0x44e: 0x4000, 0x44f: 0x4000, 0x450: 0x4000, 0x451: 0x4000, + 0x452: 0x4000, 0x453: 0x4000, 0x454: 0x4000, 0x455: 0x4000, 0x456: 0x4000, 0x457: 0x4000, + 0x458: 0x4000, 0x459: 0x4000, 0x45a: 0x4000, 0x45b: 0x4000, 0x45c: 0x4000, 0x45d: 0x4000, + 0x45e: 0x4000, 0x45f: 0x4000, + // Block 0x12, offset 0x480 + 0x490: 0x2000, + 0x493: 0x2000, 0x494: 0x2000, 0x495: 0x2000, 0x496: 0x2000, + 0x498: 0x2000, 0x499: 0x2000, 0x49c: 0x2000, 0x49d: 0x2000, + 0x4a0: 0x2000, 0x4a1: 0x2000, 0x4a2: 0x2000, + 0x4a4: 0x2000, 0x4a5: 0x2000, 0x4a6: 0x2000, 0x4a7: 0x2000, + 0x4b0: 0x2000, 0x4b2: 0x2000, 0x4b3: 0x2000, 0x4b5: 0x2000, + 0x4bb: 0x2000, + 0x4be: 0x2000, + // Block 0x13, offset 0x4c0 + 0x4f4: 0x2000, + 0x4ff: 0x2000, + // Block 0x14, offset 0x500 + 0x501: 0x2000, 0x502: 0x2000, 0x503: 0x2000, 0x504: 0x2000, + 0x529: 0xa009, + 0x52c: 0x2000, + // Block 0x15, offset 0x540 + 0x543: 0x2000, 0x545: 0x2000, + 0x549: 0x2000, + 0x553: 0x2000, 0x556: 0x2000, + 0x561: 0x2000, 0x562: 0x2000, + 0x566: 0x2000, + 0x56b: 0x2000, + // Block 0x16, offset 0x580 + 0x593: 0x2000, 0x594: 0x2000, + 0x59b: 0x2000, 0x59c: 0x2000, 0x59d: 0x2000, + 0x59e: 0x2000, 0x5a0: 0x2000, 0x5a1: 0x2000, 0x5a2: 0x2000, 0x5a3: 0x2000, + 0x5a4: 0x2000, 0x5a5: 0x2000, 0x5a6: 0x2000, 0x5a7: 0x2000, 0x5a8: 0x2000, 0x5a9: 0x2000, + 0x5aa: 0x2000, 0x5ab: 0x2000, + 0x5b0: 0x2000, 0x5b1: 0x2000, 0x5b2: 0x2000, 0x5b3: 0x2000, 0x5b4: 0x2000, 0x5b5: 0x2000, + 0x5b6: 0x2000, 0x5b7: 0x2000, 0x5b8: 0x2000, 0x5b9: 0x2000, + // Block 0x17, offset 0x5c0 + 0x5c9: 0x2000, + 0x5d0: 0x200a, 0x5d1: 0x200b, + 0x5d2: 0x200a, 0x5d3: 0x200c, 0x5d4: 0x2000, 0x5d5: 0x2000, 0x5d6: 0x2000, 0x5d7: 0x2000, + 0x5d8: 0x2000, 0x5d9: 0x2000, + 0x5f8: 0x2000, 0x5f9: 0x2000, + // Block 0x18, offset 0x600 + 0x612: 0x2000, 0x614: 0x2000, + 0x627: 0x2000, + // Block 0x19, offset 0x640 + 0x640: 0x2000, 0x642: 0x2000, 0x643: 0x2000, + 0x647: 0x2000, 0x648: 0x2000, 0x64b: 0x2000, + 0x64f: 0x2000, 0x651: 0x2000, + 0x655: 0x2000, + 0x65a: 0x2000, 0x65d: 0x2000, + 0x65e: 0x2000, 0x65f: 0x2000, 0x660: 0x2000, 0x663: 0x2000, + 0x665: 0x2000, 0x667: 0x2000, 0x668: 0x2000, 0x669: 0x2000, + 0x66a: 0x2000, 0x66b: 0x2000, 0x66c: 0x2000, 0x66e: 0x2000, + 0x674: 0x2000, 0x675: 0x2000, + 0x676: 0x2000, 0x677: 0x2000, + 0x67c: 0x2000, 0x67d: 0x2000, + // Block 0x1a, offset 0x680 + 0x688: 0x2000, + 0x68c: 0x2000, + 0x692: 0x2000, + 0x6a0: 0x2000, 0x6a1: 0x2000, + 0x6a4: 0x2000, 0x6a5: 0x2000, 0x6a6: 0x2000, 0x6a7: 0x2000, + 0x6aa: 0x2000, 0x6ab: 0x2000, 0x6ae: 0x2000, 0x6af: 0x2000, + // Block 0x1b, offset 0x6c0 + 0x6c2: 0x2000, 0x6c3: 0x2000, + 0x6c6: 0x2000, 0x6c7: 0x2000, + 0x6d5: 0x2000, + 0x6d9: 0x2000, + 0x6e5: 0x2000, + 0x6ff: 0x2000, + // Block 0x1c, offset 0x700 + 0x712: 0x2000, + 0x71a: 0x4000, 0x71b: 0x4000, + 0x729: 0x4000, + 0x72a: 0x4000, + // Block 0x1d, offset 0x740 + 0x769: 0x4000, + 0x76a: 0x4000, 0x76b: 0x4000, 0x76c: 0x4000, + 0x770: 0x4000, 0x773: 0x4000, + // Block 0x1e, offset 0x780 + 0x7a0: 0x2000, 0x7a1: 0x2000, 0x7a2: 0x2000, 0x7a3: 0x2000, + 0x7a4: 0x2000, 0x7a5: 0x2000, 0x7a6: 0x2000, 0x7a7: 0x2000, 0x7a8: 0x2000, 0x7a9: 0x2000, + 0x7aa: 0x2000, 0x7ab: 0x2000, 0x7ac: 0x2000, 0x7ad: 0x2000, 0x7ae: 0x2000, 0x7af: 0x2000, + 0x7b0: 0x2000, 0x7b1: 0x2000, 0x7b2: 0x2000, 0x7b3: 0x2000, 0x7b4: 0x2000, 0x7b5: 0x2000, + 0x7b6: 0x2000, 0x7b7: 0x2000, 0x7b8: 0x2000, 0x7b9: 0x2000, 0x7ba: 0x2000, 0x7bb: 0x2000, + 0x7bc: 0x2000, 0x7bd: 0x2000, 0x7be: 0x2000, 0x7bf: 0x2000, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x2000, 0x7c1: 0x2000, 0x7c2: 0x2000, 0x7c3: 0x2000, 0x7c4: 0x2000, 0x7c5: 0x2000, + 0x7c6: 0x2000, 0x7c7: 0x2000, 0x7c8: 0x2000, 0x7c9: 0x2000, 0x7ca: 0x2000, 0x7cb: 0x2000, + 0x7cc: 0x2000, 0x7cd: 0x2000, 0x7ce: 0x2000, 0x7cf: 0x2000, 0x7d0: 0x2000, 0x7d1: 0x2000, + 0x7d2: 0x2000, 0x7d3: 0x2000, 0x7d4: 0x2000, 0x7d5: 0x2000, 0x7d6: 0x2000, 0x7d7: 0x2000, + 0x7d8: 0x2000, 0x7d9: 0x2000, 0x7da: 0x2000, 0x7db: 0x2000, 0x7dc: 0x2000, 0x7dd: 0x2000, + 0x7de: 0x2000, 0x7df: 0x2000, 0x7e0: 0x2000, 0x7e1: 0x2000, 0x7e2: 0x2000, 0x7e3: 0x2000, + 0x7e4: 0x2000, 0x7e5: 0x2000, 0x7e6: 0x2000, 0x7e7: 0x2000, 0x7e8: 0x2000, 0x7e9: 0x2000, + 0x7eb: 0x2000, 0x7ec: 0x2000, 0x7ed: 0x2000, 0x7ee: 0x2000, 0x7ef: 0x2000, + 0x7f0: 0x2000, 0x7f1: 0x2000, 0x7f2: 0x2000, 0x7f3: 0x2000, 0x7f4: 0x2000, 0x7f5: 0x2000, + 0x7f6: 0x2000, 0x7f7: 0x2000, 0x7f8: 0x2000, 0x7f9: 0x2000, 0x7fa: 0x2000, 0x7fb: 0x2000, + 0x7fc: 0x2000, 0x7fd: 0x2000, 0x7fe: 0x2000, 0x7ff: 0x2000, + // Block 0x20, offset 0x800 + 0x800: 0x2000, 0x801: 0x2000, 0x802: 0x200d, 0x803: 0x2000, 0x804: 0x2000, 0x805: 0x2000, + 0x806: 0x2000, 0x807: 0x2000, 0x808: 0x2000, 0x809: 0x2000, 0x80a: 0x2000, 0x80b: 0x2000, + 0x80c: 0x2000, 0x80d: 0x2000, 0x80e: 0x2000, 0x80f: 0x2000, 0x810: 0x2000, 0x811: 0x2000, + 0x812: 0x2000, 0x813: 0x2000, 0x814: 0x2000, 0x815: 0x2000, 0x816: 0x2000, 0x817: 0x2000, + 0x818: 0x2000, 0x819: 0x2000, 0x81a: 0x2000, 0x81b: 0x2000, 0x81c: 0x2000, 0x81d: 0x2000, + 0x81e: 0x2000, 0x81f: 0x2000, 0x820: 0x2000, 0x821: 0x2000, 0x822: 0x2000, 0x823: 0x2000, + 0x824: 0x2000, 0x825: 0x2000, 0x826: 0x2000, 0x827: 0x2000, 0x828: 0x2000, 0x829: 0x2000, + 0x82a: 0x2000, 0x82b: 0x2000, 0x82c: 0x2000, 0x82d: 0x2000, 0x82e: 0x2000, 0x82f: 0x2000, + 0x830: 0x2000, 0x831: 0x2000, 0x832: 0x2000, 0x833: 0x2000, 0x834: 0x2000, 0x835: 0x2000, + 0x836: 0x2000, 0x837: 0x2000, 0x838: 0x2000, 0x839: 0x2000, 0x83a: 0x2000, 0x83b: 0x2000, + 0x83c: 0x2000, 0x83d: 0x2000, 0x83e: 0x2000, 0x83f: 0x2000, + // Block 0x21, offset 0x840 + 0x840: 0x2000, 0x841: 0x2000, 0x842: 0x2000, 0x843: 0x2000, 0x844: 0x2000, 0x845: 0x2000, + 0x846: 0x2000, 0x847: 0x2000, 0x848: 0x2000, 0x849: 0x2000, 0x84a: 0x2000, 0x84b: 0x2000, + 0x850: 0x2000, 0x851: 0x2000, + 0x852: 0x2000, 0x853: 0x2000, 0x854: 0x2000, 0x855: 0x2000, 0x856: 0x2000, 0x857: 0x2000, + 0x858: 0x2000, 0x859: 0x2000, 0x85a: 0x2000, 0x85b: 0x2000, 0x85c: 0x2000, 0x85d: 0x2000, + 0x85e: 0x2000, 0x85f: 0x2000, 0x860: 0x2000, 0x861: 0x2000, 0x862: 0x2000, 0x863: 0x2000, + 0x864: 0x2000, 0x865: 0x2000, 0x866: 0x2000, 0x867: 0x2000, 0x868: 0x2000, 0x869: 0x2000, + 0x86a: 0x2000, 0x86b: 0x2000, 0x86c: 0x2000, 0x86d: 0x2000, 0x86e: 0x2000, 0x86f: 0x2000, + 0x870: 0x2000, 0x871: 0x2000, 0x872: 0x2000, 0x873: 0x2000, + // Block 0x22, offset 0x880 + 0x880: 0x2000, 0x881: 0x2000, 0x882: 0x2000, 0x883: 0x2000, 0x884: 0x2000, 0x885: 0x2000, + 0x886: 0x2000, 0x887: 0x2000, 0x888: 0x2000, 0x889: 0x2000, 0x88a: 0x2000, 0x88b: 0x2000, + 0x88c: 0x2000, 0x88d: 0x2000, 0x88e: 0x2000, 0x88f: 0x2000, + 0x892: 0x2000, 0x893: 0x2000, 0x894: 0x2000, 0x895: 0x2000, + 0x8a0: 0x200e, 0x8a1: 0x2000, 0x8a3: 0x2000, + 0x8a4: 0x2000, 0x8a5: 0x2000, 0x8a6: 0x2000, 0x8a7: 0x2000, 0x8a8: 0x2000, 0x8a9: 0x2000, + 0x8b2: 0x2000, 0x8b3: 0x2000, + 0x8b6: 0x2000, 0x8b7: 0x2000, + 0x8bc: 0x2000, 0x8bd: 0x2000, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x2000, 0x8c1: 0x2000, + 0x8c6: 0x2000, 0x8c7: 0x2000, 0x8c8: 0x2000, 0x8cb: 0x200f, + 0x8ce: 0x2000, 0x8cf: 0x2000, 0x8d0: 0x2000, 0x8d1: 0x2000, + 0x8e2: 0x2000, 0x8e3: 0x2000, + 0x8e4: 0x2000, 0x8e5: 0x2000, + 0x8ef: 0x2000, + 0x8fd: 0x4000, 0x8fe: 0x4000, + // Block 0x24, offset 0x900 + 0x905: 0x2000, + 0x906: 0x2000, 0x909: 0x2000, + 0x90e: 0x2000, 0x90f: 0x2000, + 0x914: 0x4000, 0x915: 0x4000, + 0x91c: 0x2000, + 0x91e: 0x2000, + // Block 0x25, offset 0x940 + 0x940: 0x2000, 0x942: 0x2000, + 0x948: 0x4000, 0x949: 0x4000, 0x94a: 0x4000, 0x94b: 0x4000, + 0x94c: 0x4000, 0x94d: 0x4000, 0x94e: 0x4000, 0x94f: 0x4000, 0x950: 0x4000, 0x951: 0x4000, + 0x952: 0x4000, 0x953: 0x4000, + 0x960: 0x2000, 0x961: 0x2000, 0x963: 0x2000, + 0x964: 0x2000, 0x965: 0x2000, 0x967: 0x2000, 0x968: 0x2000, 0x969: 0x2000, + 0x96a: 0x2000, 0x96c: 0x2000, 0x96d: 0x2000, 0x96f: 0x2000, + 0x97f: 0x4000, + // Block 0x26, offset 0x980 + 0x993: 0x4000, + 0x99e: 0x2000, 0x99f: 0x2000, 0x9a1: 0x4000, + 0x9aa: 0x4000, 0x9ab: 0x4000, + 0x9bd: 0x4000, 0x9be: 0x4000, 0x9bf: 0x2000, + // Block 0x27, offset 0x9c0 + 0x9c4: 0x4000, 0x9c5: 0x4000, + 0x9c6: 0x2000, 0x9c7: 0x2000, 0x9c8: 0x2000, 0x9c9: 0x2000, 0x9ca: 0x2000, 0x9cb: 0x2000, + 0x9cc: 0x2000, 0x9cd: 0x2000, 0x9ce: 0x4000, 0x9cf: 0x2000, 0x9d0: 0x2000, 0x9d1: 0x2000, + 0x9d2: 0x2000, 0x9d3: 0x2000, 0x9d4: 0x4000, 0x9d5: 0x2000, 0x9d6: 0x2000, 0x9d7: 0x2000, + 0x9d8: 0x2000, 0x9d9: 0x2000, 0x9da: 0x2000, 0x9db: 0x2000, 0x9dc: 0x2000, 0x9dd: 0x2000, + 0x9de: 0x2000, 0x9df: 0x2000, 0x9e0: 0x2000, 0x9e1: 0x2000, 0x9e3: 0x2000, + 0x9e8: 0x2000, 0x9e9: 0x2000, + 0x9ea: 0x4000, 0x9eb: 0x2000, 0x9ec: 0x2000, 0x9ed: 0x2000, 0x9ee: 0x2000, 0x9ef: 0x2000, + 0x9f0: 0x2000, 0x9f1: 0x2000, 0x9f2: 0x4000, 0x9f3: 0x4000, 0x9f4: 0x2000, 0x9f5: 0x4000, + 0x9f6: 0x2000, 0x9f7: 0x2000, 0x9f8: 0x2000, 0x9f9: 0x2000, 0x9fa: 0x4000, 0x9fb: 0x2000, + 0x9fc: 0x2000, 0x9fd: 0x4000, 0x9fe: 0x2000, 0x9ff: 0x2000, + // Block 0x28, offset 0xa00 + 0xa05: 0x4000, + 0xa0a: 0x4000, 0xa0b: 0x4000, + 0xa28: 0x4000, + 0xa3d: 0x2000, + // Block 0x29, offset 0xa40 + 0xa4c: 0x4000, 0xa4e: 0x4000, + 0xa53: 0x4000, 0xa54: 0x4000, 0xa55: 0x4000, 0xa57: 0x4000, + 0xa76: 0x2000, 0xa77: 0x2000, 0xa78: 0x2000, 0xa79: 0x2000, 0xa7a: 0x2000, 0xa7b: 0x2000, + 0xa7c: 0x2000, 0xa7d: 0x2000, 0xa7e: 0x2000, 0xa7f: 0x2000, + // Block 0x2a, offset 0xa80 + 0xa95: 0x4000, 0xa96: 0x4000, 0xa97: 0x4000, + 0xab0: 0x4000, + 0xabf: 0x4000, + // Block 0x2b, offset 0xac0 + 0xae6: 0x6000, 0xae7: 0x6000, 0xae8: 0x6000, 0xae9: 0x6000, + 0xaea: 0x6000, 0xaeb: 0x6000, 0xaec: 0x6000, 0xaed: 0x6000, + // Block 0x2c, offset 0xb00 + 0xb05: 0x6010, + 0xb06: 0x6011, + // Block 0x2d, offset 0xb40 + 0xb5b: 0x4000, 0xb5c: 0x4000, + // Block 0x2e, offset 0xb80 + 0xb90: 0x4000, + 0xb95: 0x4000, 0xb96: 0x2000, 0xb97: 0x2000, + 0xb98: 0x2000, 0xb99: 0x2000, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x4000, 0xbc1: 0x4000, 0xbc2: 0x4000, 0xbc3: 0x4000, 0xbc4: 0x4000, 0xbc5: 0x4000, + 0xbc6: 0x4000, 0xbc7: 0x4000, 0xbc8: 0x4000, 0xbc9: 0x4000, 0xbca: 0x4000, 0xbcb: 0x4000, + 0xbcc: 0x4000, 0xbcd: 0x4000, 0xbce: 0x4000, 0xbcf: 0x4000, 0xbd0: 0x4000, 0xbd1: 0x4000, + 0xbd2: 0x4000, 0xbd3: 0x4000, 0xbd4: 0x4000, 0xbd5: 0x4000, 0xbd6: 0x4000, 0xbd7: 0x4000, + 0xbd8: 0x4000, 0xbd9: 0x4000, 0xbdb: 0x4000, 0xbdc: 0x4000, 0xbdd: 0x4000, + 0xbde: 0x4000, 0xbdf: 0x4000, 0xbe0: 0x4000, 0xbe1: 0x4000, 0xbe2: 0x4000, 0xbe3: 0x4000, + 0xbe4: 0x4000, 0xbe5: 0x4000, 0xbe6: 0x4000, 0xbe7: 0x4000, 0xbe8: 0x4000, 0xbe9: 0x4000, + 0xbea: 0x4000, 0xbeb: 0x4000, 0xbec: 0x4000, 0xbed: 0x4000, 0xbee: 0x4000, 0xbef: 0x4000, + 0xbf0: 0x4000, 0xbf1: 0x4000, 0xbf2: 0x4000, 0xbf3: 0x4000, 0xbf4: 0x4000, 0xbf5: 0x4000, + 0xbf6: 0x4000, 0xbf7: 0x4000, 0xbf8: 0x4000, 0xbf9: 0x4000, 0xbfa: 0x4000, 0xbfb: 0x4000, + 0xbfc: 0x4000, 0xbfd: 0x4000, 0xbfe: 0x4000, 0xbff: 0x4000, + // Block 0x30, offset 0xc00 + 0xc00: 0x4000, 0xc01: 0x4000, 0xc02: 0x4000, 0xc03: 0x4000, 0xc04: 0x4000, 0xc05: 0x4000, + 0xc06: 0x4000, 0xc07: 0x4000, 0xc08: 0x4000, 0xc09: 0x4000, 0xc0a: 0x4000, 0xc0b: 0x4000, + 0xc0c: 0x4000, 0xc0d: 0x4000, 0xc0e: 0x4000, 0xc0f: 0x4000, 0xc10: 0x4000, 0xc11: 0x4000, + 0xc12: 0x4000, 0xc13: 0x4000, 0xc14: 0x4000, 0xc15: 0x4000, 0xc16: 0x4000, 0xc17: 0x4000, + 0xc18: 0x4000, 0xc19: 0x4000, 0xc1a: 0x4000, 0xc1b: 0x4000, 0xc1c: 0x4000, 0xc1d: 0x4000, + 0xc1e: 0x4000, 0xc1f: 0x4000, 0xc20: 0x4000, 0xc21: 0x4000, 0xc22: 0x4000, 0xc23: 0x4000, + 0xc24: 0x4000, 0xc25: 0x4000, 0xc26: 0x4000, 0xc27: 0x4000, 0xc28: 0x4000, 0xc29: 0x4000, + 0xc2a: 0x4000, 0xc2b: 0x4000, 0xc2c: 0x4000, 0xc2d: 0x4000, 0xc2e: 0x4000, 0xc2f: 0x4000, + 0xc30: 0x4000, 0xc31: 0x4000, 0xc32: 0x4000, 0xc33: 0x4000, + // Block 0x31, offset 0xc40 + 0xc40: 0x4000, 0xc41: 0x4000, 0xc42: 0x4000, 0xc43: 0x4000, 0xc44: 0x4000, 0xc45: 0x4000, + 0xc46: 0x4000, 0xc47: 0x4000, 0xc48: 0x4000, 0xc49: 0x4000, 0xc4a: 0x4000, 0xc4b: 0x4000, + 0xc4c: 0x4000, 0xc4d: 0x4000, 0xc4e: 0x4000, 0xc4f: 0x4000, 0xc50: 0x4000, 0xc51: 0x4000, + 0xc52: 0x4000, 0xc53: 0x4000, 0xc54: 0x4000, 0xc55: 0x4000, + 0xc70: 0x4000, 0xc71: 0x4000, 0xc72: 0x4000, 0xc73: 0x4000, 0xc74: 0x4000, 0xc75: 0x4000, + 0xc76: 0x4000, 0xc77: 0x4000, 0xc78: 0x4000, 0xc79: 0x4000, 0xc7a: 0x4000, 0xc7b: 0x4000, + // Block 0x32, offset 0xc80 + 0xc80: 0x9012, 0xc81: 0x4013, 0xc82: 0x4014, 0xc83: 0x4000, 0xc84: 0x4000, 0xc85: 0x4000, + 0xc86: 0x4000, 0xc87: 0x4000, 0xc88: 0x4000, 0xc89: 0x4000, 0xc8a: 0x4000, 0xc8b: 0x4000, + 0xc8c: 0x4015, 0xc8d: 0x4015, 0xc8e: 0x4000, 0xc8f: 0x4000, 0xc90: 0x4000, 0xc91: 0x4000, + 0xc92: 0x4000, 0xc93: 0x4000, 0xc94: 0x4000, 0xc95: 0x4000, 0xc96: 0x4000, 0xc97: 0x4000, + 0xc98: 0x4000, 0xc99: 0x4000, 0xc9a: 0x4000, 0xc9b: 0x4000, 0xc9c: 0x4000, 0xc9d: 0x4000, + 0xc9e: 0x4000, 0xc9f: 0x4000, 0xca0: 0x4000, 0xca1: 0x4000, 0xca2: 0x4000, 0xca3: 0x4000, + 0xca4: 0x4000, 0xca5: 0x4000, 0xca6: 0x4000, 0xca7: 0x4000, 0xca8: 0x4000, 0xca9: 0x4000, + 0xcaa: 0x4000, 0xcab: 0x4000, 0xcac: 0x4000, 0xcad: 0x4000, 0xcae: 0x4000, 0xcaf: 0x4000, + 0xcb0: 0x4000, 0xcb1: 0x4000, 0xcb2: 0x4000, 0xcb3: 0x4000, 0xcb4: 0x4000, 0xcb5: 0x4000, + 0xcb6: 0x4000, 0xcb7: 0x4000, 0xcb8: 0x4000, 0xcb9: 0x4000, 0xcba: 0x4000, 0xcbb: 0x4000, + 0xcbc: 0x4000, 0xcbd: 0x4000, 0xcbe: 0x4000, + // Block 0x33, offset 0xcc0 + 0xcc1: 0x4000, 0xcc2: 0x4000, 0xcc3: 0x4000, 0xcc4: 0x4000, 0xcc5: 0x4000, + 0xcc6: 0x4000, 0xcc7: 0x4000, 0xcc8: 0x4000, 0xcc9: 0x4000, 0xcca: 0x4000, 0xccb: 0x4000, + 0xccc: 0x4000, 0xccd: 0x4000, 0xcce: 0x4000, 0xccf: 0x4000, 0xcd0: 0x4000, 0xcd1: 0x4000, + 0xcd2: 0x4000, 0xcd3: 0x4000, 0xcd4: 0x4000, 0xcd5: 0x4000, 0xcd6: 0x4000, 0xcd7: 0x4000, + 0xcd8: 0x4000, 0xcd9: 0x4000, 0xcda: 0x4000, 0xcdb: 0x4000, 0xcdc: 0x4000, 0xcdd: 0x4000, + 0xcde: 0x4000, 0xcdf: 0x4000, 0xce0: 0x4000, 0xce1: 0x4000, 0xce2: 0x4000, 0xce3: 0x4000, + 0xce4: 0x4000, 0xce5: 0x4000, 0xce6: 0x4000, 0xce7: 0x4000, 0xce8: 0x4000, 0xce9: 0x4000, + 0xcea: 0x4000, 0xceb: 0x4000, 0xcec: 0x4000, 0xced: 0x4000, 0xcee: 0x4000, 0xcef: 0x4000, + 0xcf0: 0x4000, 0xcf1: 0x4000, 0xcf2: 0x4000, 0xcf3: 0x4000, 0xcf4: 0x4000, 0xcf5: 0x4000, + 0xcf6: 0x4000, 0xcf7: 0x4000, 0xcf8: 0x4000, 0xcf9: 0x4000, 0xcfa: 0x4000, 0xcfb: 0x4000, + 0xcfc: 0x4000, 0xcfd: 0x4000, 0xcfe: 0x4000, 0xcff: 0x4000, + // Block 0x34, offset 0xd00 + 0xd00: 0x4000, 0xd01: 0x4000, 0xd02: 0x4000, 0xd03: 0x4000, 0xd04: 0x4000, 0xd05: 0x4000, + 0xd06: 0x4000, 0xd07: 0x4000, 0xd08: 0x4000, 0xd09: 0x4000, 0xd0a: 0x4000, 0xd0b: 0x4000, + 0xd0c: 0x4000, 0xd0d: 0x4000, 0xd0e: 0x4000, 0xd0f: 0x4000, 0xd10: 0x4000, 0xd11: 0x4000, + 0xd12: 0x4000, 0xd13: 0x4000, 0xd14: 0x4000, 0xd15: 0x4000, 0xd16: 0x4000, + 0xd19: 0x4016, 0xd1a: 0x4017, 0xd1b: 0x4000, 0xd1c: 0x4000, 0xd1d: 0x4000, + 0xd1e: 0x4000, 0xd1f: 0x4000, 0xd20: 0x4000, 0xd21: 0x4018, 0xd22: 0x4019, 0xd23: 0x401a, + 0xd24: 0x401b, 0xd25: 0x401c, 0xd26: 0x401d, 0xd27: 0x401e, 0xd28: 0x401f, 0xd29: 0x4020, + 0xd2a: 0x4021, 0xd2b: 0x4022, 0xd2c: 0x4000, 0xd2d: 0x4010, 0xd2e: 0x4000, 0xd2f: 0x4023, + 0xd30: 0x4000, 0xd31: 0x4024, 0xd32: 0x4000, 0xd33: 0x4025, 0xd34: 0x4000, 0xd35: 0x4026, + 0xd36: 0x4000, 0xd37: 0x401a, 0xd38: 0x4000, 0xd39: 0x4027, 0xd3a: 0x4000, 0xd3b: 0x4028, + 0xd3c: 0x4000, 0xd3d: 0x4020, 0xd3e: 0x4000, 0xd3f: 0x4029, + // Block 0x35, offset 0xd40 + 0xd40: 0x4000, 0xd41: 0x402a, 0xd42: 0x4000, 0xd43: 0x402b, 0xd44: 0x402c, 0xd45: 0x4000, + 0xd46: 0x4017, 0xd47: 0x4000, 0xd48: 0x402d, 0xd49: 0x4000, 0xd4a: 0x402e, 0xd4b: 0x402f, + 0xd4c: 0x4030, 0xd4d: 0x4017, 0xd4e: 0x4016, 0xd4f: 0x4017, 0xd50: 0x4000, 0xd51: 0x4000, + 0xd52: 0x4031, 0xd53: 0x4000, 0xd54: 0x4000, 0xd55: 0x4031, 0xd56: 0x4000, 0xd57: 0x4000, + 0xd58: 0x4032, 0xd59: 0x4000, 0xd5a: 0x4000, 0xd5b: 0x4032, 0xd5c: 0x4000, 0xd5d: 0x4000, + 0xd5e: 0x4033, 0xd5f: 0x402e, 0xd60: 0x4034, 0xd61: 0x4035, 0xd62: 0x4034, 0xd63: 0x4036, + 0xd64: 0x4037, 0xd65: 0x4024, 0xd66: 0x4035, 0xd67: 0x4025, 0xd68: 0x4038, 0xd69: 0x4038, + 0xd6a: 0x4039, 0xd6b: 0x4039, 0xd6c: 0x403a, 0xd6d: 0x403a, 0xd6e: 0x4000, 0xd6f: 0x4035, + 0xd70: 0x4000, 0xd71: 0x4000, 0xd72: 0x403b, 0xd73: 0x403c, 0xd74: 0x4000, 0xd75: 0x4000, + 0xd76: 0x4000, 0xd77: 0x4000, 0xd78: 0x4000, 0xd79: 0x4000, 0xd7a: 0x4000, 0xd7b: 0x403d, + 0xd7c: 0x401c, 0xd7d: 0x4000, 0xd7e: 0x4000, 0xd7f: 0x4000, + // Block 0x36, offset 0xd80 + 0xd85: 0x4000, + 0xd86: 0x4000, 0xd87: 0x4000, 0xd88: 0x4000, 0xd89: 0x4000, 0xd8a: 0x4000, 0xd8b: 0x4000, + 0xd8c: 0x4000, 0xd8d: 0x4000, 0xd8e: 0x4000, 0xd8f: 0x4000, 0xd90: 0x4000, 0xd91: 0x4000, + 0xd92: 0x4000, 0xd93: 0x4000, 0xd94: 0x4000, 0xd95: 0x4000, 0xd96: 0x4000, 0xd97: 0x4000, + 0xd98: 0x4000, 0xd99: 0x4000, 0xd9a: 0x4000, 0xd9b: 0x4000, 0xd9c: 0x4000, 0xd9d: 0x4000, + 0xd9e: 0x4000, 0xd9f: 0x4000, 0xda0: 0x4000, 0xda1: 0x4000, 0xda2: 0x4000, 0xda3: 0x4000, + 0xda4: 0x4000, 0xda5: 0x4000, 0xda6: 0x4000, 0xda7: 0x4000, 0xda8: 0x4000, 0xda9: 0x4000, + 0xdaa: 0x4000, 0xdab: 0x4000, 0xdac: 0x4000, 0xdad: 0x4000, 0xdae: 0x4000, 0xdaf: 0x4000, + 0xdb1: 0x403e, 0xdb2: 0x403e, 0xdb3: 0x403e, 0xdb4: 0x403e, 0xdb5: 0x403e, + 0xdb6: 0x403e, 0xdb7: 0x403e, 0xdb8: 0x403e, 0xdb9: 0x403e, 0xdba: 0x403e, 0xdbb: 0x403e, + 0xdbc: 0x403e, 0xdbd: 0x403e, 0xdbe: 0x403e, 0xdbf: 0x403e, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x4037, 0xdc1: 0x4037, 0xdc2: 0x4037, 0xdc3: 0x4037, 0xdc4: 0x4037, 0xdc5: 0x4037, + 0xdc6: 0x4037, 0xdc7: 0x4037, 0xdc8: 0x4037, 0xdc9: 0x4037, 0xdca: 0x4037, 0xdcb: 0x4037, + 0xdcc: 0x4037, 0xdcd: 0x4037, 0xdce: 0x4037, 0xdcf: 0x400e, 0xdd0: 0x403f, 0xdd1: 0x4040, + 0xdd2: 0x4041, 0xdd3: 0x4040, 0xdd4: 0x403f, 0xdd5: 0x4042, 0xdd6: 0x4043, 0xdd7: 0x4044, + 0xdd8: 0x4040, 0xdd9: 0x4041, 0xdda: 0x4040, 0xddb: 0x4045, 0xddc: 0x4009, 0xddd: 0x4045, + 0xdde: 0x4046, 0xddf: 0x4045, 0xde0: 0x4047, 0xde1: 0x400b, 0xde2: 0x400a, 0xde3: 0x400c, + 0xde4: 0x4048, 0xde5: 0x4000, 0xde6: 0x4000, 0xde7: 0x4000, 0xde8: 0x4000, 0xde9: 0x4000, + 0xdea: 0x4000, 0xdeb: 0x4000, 0xdec: 0x4000, 0xded: 0x4000, 0xdee: 0x4000, 0xdef: 0x4000, + 0xdf0: 0x4000, 0xdf1: 0x4000, 0xdf2: 0x4000, 0xdf3: 0x4000, 0xdf4: 0x4000, 0xdf5: 0x4000, + 0xdf6: 0x4000, 0xdf7: 0x4000, 0xdf8: 0x4000, 0xdf9: 0x4000, 0xdfa: 0x4000, 0xdfb: 0x4000, + 0xdfc: 0x4000, 0xdfd: 0x4000, 0xdfe: 0x4000, 0xdff: 0x4000, + // Block 0x38, offset 0xe00 + 0xe00: 0x4000, 0xe01: 0x4000, 0xe02: 0x4000, 0xe03: 0x4000, 0xe04: 0x4000, 0xe05: 0x4000, + 0xe06: 0x4000, 0xe07: 0x4000, 0xe08: 0x4000, 0xe09: 0x4000, 0xe0a: 0x4000, 0xe0b: 0x4000, + 0xe0c: 0x4000, 0xe0d: 0x4000, 0xe0e: 0x4000, 0xe10: 0x4000, 0xe11: 0x4000, + 0xe12: 0x4000, 0xe13: 0x4000, 0xe14: 0x4000, 0xe15: 0x4000, 0xe16: 0x4000, 0xe17: 0x4000, + 0xe18: 0x4000, 0xe19: 0x4000, 0xe1a: 0x4000, 0xe1b: 0x4000, 0xe1c: 0x4000, 0xe1d: 0x4000, + 0xe1e: 0x4000, 0xe1f: 0x4000, 0xe20: 0x4000, 0xe21: 0x4000, 0xe22: 0x4000, 0xe23: 0x4000, + 0xe24: 0x4000, 0xe25: 0x4000, 0xe26: 0x4000, 0xe27: 0x4000, 0xe28: 0x4000, 0xe29: 0x4000, + 0xe2a: 0x4000, 0xe2b: 0x4000, 0xe2c: 0x4000, 0xe2d: 0x4000, 0xe2e: 0x4000, 0xe2f: 0x4000, + 0xe30: 0x4000, 0xe31: 0x4000, 0xe32: 0x4000, 0xe33: 0x4000, 0xe34: 0x4000, 0xe35: 0x4000, + 0xe36: 0x4000, 0xe37: 0x4000, 0xe38: 0x4000, 0xe39: 0x4000, 0xe3a: 0x4000, 0xe3b: 0x4000, + 0xe3c: 0x4000, 0xe3d: 0x4000, 0xe3e: 0x4000, 0xe3f: 0x4000, + // Block 0x39, offset 0xe40 + 0xe40: 0x4000, 0xe41: 0x4000, 0xe42: 0x4000, 0xe43: 0x4000, 0xe44: 0x4000, 0xe45: 0x4000, + 0xe46: 0x4000, 0xe47: 0x4000, 0xe48: 0x4000, 0xe49: 0x4000, 0xe4a: 0x4000, 0xe4b: 0x4000, + 0xe4c: 0x4000, 0xe4d: 0x4000, 0xe4e: 0x4000, 0xe4f: 0x4000, 0xe50: 0x4000, 0xe51: 0x4000, + 0xe52: 0x4000, 0xe53: 0x4000, 0xe54: 0x4000, 0xe55: 0x4000, 0xe56: 0x4000, 0xe57: 0x4000, + 0xe58: 0x4000, 0xe59: 0x4000, 0xe5a: 0x4000, 0xe5b: 0x4000, 0xe5c: 0x4000, 0xe5d: 0x4000, + 0xe5e: 0x4000, 0xe5f: 0x4000, 0xe60: 0x4000, 0xe61: 0x4000, 0xe62: 0x4000, 0xe63: 0x4000, + 0xe70: 0x4000, 0xe71: 0x4000, 0xe72: 0x4000, 0xe73: 0x4000, 0xe74: 0x4000, 0xe75: 0x4000, + 0xe76: 0x4000, 0xe77: 0x4000, 0xe78: 0x4000, 0xe79: 0x4000, 0xe7a: 0x4000, 0xe7b: 0x4000, + 0xe7c: 0x4000, 0xe7d: 0x4000, 0xe7e: 0x4000, 0xe7f: 0x4000, + // Block 0x3a, offset 0xe80 + 0xe80: 0x4000, 0xe81: 0x4000, 0xe82: 0x4000, 0xe83: 0x4000, 0xe84: 0x4000, 0xe85: 0x4000, + 0xe86: 0x4000, 0xe87: 0x4000, 0xe88: 0x4000, 0xe89: 0x4000, 0xe8a: 0x4000, 0xe8b: 0x4000, + 0xe8c: 0x4000, 0xe8d: 0x4000, 0xe8e: 0x4000, 0xe8f: 0x4000, 0xe90: 0x4000, 0xe91: 0x4000, + 0xe92: 0x4000, 0xe93: 0x4000, 0xe94: 0x4000, 0xe95: 0x4000, 0xe96: 0x4000, 0xe97: 0x4000, + 0xe98: 0x4000, 0xe99: 0x4000, 0xe9a: 0x4000, 0xe9b: 0x4000, 0xe9c: 0x4000, 0xe9d: 0x4000, + 0xe9e: 0x4000, 0xea0: 0x4000, 0xea1: 0x4000, 0xea2: 0x4000, 0xea3: 0x4000, + 0xea4: 0x4000, 0xea5: 0x4000, 0xea6: 0x4000, 0xea7: 0x4000, 0xea8: 0x4000, 0xea9: 0x4000, + 0xeaa: 0x4000, 0xeab: 0x4000, 0xeac: 0x4000, 0xead: 0x4000, 0xeae: 0x4000, 0xeaf: 0x4000, + 0xeb0: 0x4000, 0xeb1: 0x4000, 0xeb2: 0x4000, 0xeb3: 0x4000, 0xeb4: 0x4000, 0xeb5: 0x4000, + 0xeb6: 0x4000, 0xeb7: 0x4000, 0xeb8: 0x4000, 0xeb9: 0x4000, 0xeba: 0x4000, 0xebb: 0x4000, + 0xebc: 0x4000, 0xebd: 0x4000, 0xebe: 0x4000, 0xebf: 0x4000, + // Block 0x3b, offset 0xec0 + 0xec0: 0x4000, 0xec1: 0x4000, 0xec2: 0x4000, 0xec3: 0x4000, 0xec4: 0x4000, 0xec5: 0x4000, + 0xec6: 0x4000, 0xec7: 0x4000, 0xec8: 0x2000, 0xec9: 0x2000, 0xeca: 0x2000, 0xecb: 0x2000, + 0xecc: 0x2000, 0xecd: 0x2000, 0xece: 0x2000, 0xecf: 0x2000, 0xed0: 0x4000, 0xed1: 0x4000, + 0xed2: 0x4000, 0xed3: 0x4000, 0xed4: 0x4000, 0xed5: 0x4000, 0xed6: 0x4000, 0xed7: 0x4000, + 0xed8: 0x4000, 0xed9: 0x4000, 0xeda: 0x4000, 0xedb: 0x4000, 0xedc: 0x4000, 0xedd: 0x4000, + 0xede: 0x4000, 0xedf: 0x4000, 0xee0: 0x4000, 0xee1: 0x4000, 0xee2: 0x4000, 0xee3: 0x4000, + 0xee4: 0x4000, 0xee5: 0x4000, 0xee6: 0x4000, 0xee7: 0x4000, 0xee8: 0x4000, 0xee9: 0x4000, + 0xeea: 0x4000, 0xeeb: 0x4000, 0xeec: 0x4000, 0xeed: 0x4000, 0xeee: 0x4000, 0xeef: 0x4000, + 0xef0: 0x4000, 0xef1: 0x4000, 0xef2: 0x4000, 0xef3: 0x4000, 0xef4: 0x4000, 0xef5: 0x4000, + 0xef6: 0x4000, 0xef7: 0x4000, 0xef8: 0x4000, 0xef9: 0x4000, 0xefa: 0x4000, 0xefb: 0x4000, + 0xefc: 0x4000, 0xefd: 0x4000, 0xefe: 0x4000, 0xeff: 0x4000, + // Block 0x3c, offset 0xf00 + 0xf00: 0x4000, 0xf01: 0x4000, 0xf02: 0x4000, 0xf03: 0x4000, 0xf04: 0x4000, 0xf05: 0x4000, + 0xf06: 0x4000, 0xf07: 0x4000, 0xf08: 0x4000, 0xf09: 0x4000, 0xf0a: 0x4000, 0xf0b: 0x4000, + 0xf0c: 0x4000, 0xf10: 0x4000, 0xf11: 0x4000, + 0xf12: 0x4000, 0xf13: 0x4000, 0xf14: 0x4000, 0xf15: 0x4000, 0xf16: 0x4000, 0xf17: 0x4000, + 0xf18: 0x4000, 0xf19: 0x4000, 0xf1a: 0x4000, 0xf1b: 0x4000, 0xf1c: 0x4000, 0xf1d: 0x4000, + 0xf1e: 0x4000, 0xf1f: 0x4000, 0xf20: 0x4000, 0xf21: 0x4000, 0xf22: 0x4000, 0xf23: 0x4000, + 0xf24: 0x4000, 0xf25: 0x4000, 0xf26: 0x4000, 0xf27: 0x4000, 0xf28: 0x4000, 0xf29: 0x4000, + 0xf2a: 0x4000, 0xf2b: 0x4000, 0xf2c: 0x4000, 0xf2d: 0x4000, 0xf2e: 0x4000, 0xf2f: 0x4000, + 0xf30: 0x4000, 0xf31: 0x4000, 0xf32: 0x4000, 0xf33: 0x4000, 0xf34: 0x4000, 0xf35: 0x4000, + 0xf36: 0x4000, 0xf37: 0x4000, 0xf38: 0x4000, 0xf39: 0x4000, 0xf3a: 0x4000, 0xf3b: 0x4000, + 0xf3c: 0x4000, 0xf3d: 0x4000, 0xf3e: 0x4000, 0xf3f: 0x4000, + // Block 0x3d, offset 0xf40 + 0xf40: 0x4000, 0xf41: 0x4000, 0xf42: 0x4000, 0xf43: 0x4000, 0xf44: 0x4000, 0xf45: 0x4000, + 0xf46: 0x4000, + // Block 0x3e, offset 0xf80 + 0xfa0: 0x4000, 0xfa1: 0x4000, 0xfa2: 0x4000, 0xfa3: 0x4000, + 0xfa4: 0x4000, 0xfa5: 0x4000, 0xfa6: 0x4000, 0xfa7: 0x4000, 0xfa8: 0x4000, 0xfa9: 0x4000, + 0xfaa: 0x4000, 0xfab: 0x4000, 0xfac: 0x4000, 0xfad: 0x4000, 0xfae: 0x4000, 0xfaf: 0x4000, + 0xfb0: 0x4000, 0xfb1: 0x4000, 0xfb2: 0x4000, 0xfb3: 0x4000, 0xfb4: 0x4000, 0xfb5: 0x4000, + 0xfb6: 0x4000, 0xfb7: 0x4000, 0xfb8: 0x4000, 0xfb9: 0x4000, 0xfba: 0x4000, 0xfbb: 0x4000, + 0xfbc: 0x4000, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x4000, 0xfc1: 0x4000, 0xfc2: 0x4000, 0xfc3: 0x4000, 0xfc4: 0x4000, 0xfc5: 0x4000, + 0xfc6: 0x4000, 0xfc7: 0x4000, 0xfc8: 0x4000, 0xfc9: 0x4000, 0xfca: 0x4000, 0xfcb: 0x4000, + 0xfcc: 0x4000, 0xfcd: 0x4000, 0xfce: 0x4000, 0xfcf: 0x4000, 0xfd0: 0x4000, 0xfd1: 0x4000, + 0xfd2: 0x4000, 0xfd3: 0x4000, 0xfd4: 0x4000, 0xfd5: 0x4000, 0xfd6: 0x4000, 0xfd7: 0x4000, + 0xfd8: 0x4000, 0xfd9: 0x4000, 0xfda: 0x4000, 0xfdb: 0x4000, 0xfdc: 0x4000, 0xfdd: 0x4000, + 0xfde: 0x4000, 0xfdf: 0x4000, 0xfe0: 0x4000, 0xfe1: 0x4000, 0xfe2: 0x4000, 0xfe3: 0x4000, + // Block 0x40, offset 0x1000 + 0x1000: 0x2000, 0x1001: 0x2000, 0x1002: 0x2000, 0x1003: 0x2000, 0x1004: 0x2000, 0x1005: 0x2000, + 0x1006: 0x2000, 0x1007: 0x2000, 0x1008: 0x2000, 0x1009: 0x2000, 0x100a: 0x2000, 0x100b: 0x2000, + 0x100c: 0x2000, 0x100d: 0x2000, 0x100e: 0x2000, 0x100f: 0x2000, 0x1010: 0x4000, 0x1011: 0x4000, + 0x1012: 0x4000, 0x1013: 0x4000, 0x1014: 0x4000, 0x1015: 0x4000, 0x1016: 0x4000, 0x1017: 0x4000, + 0x1018: 0x4000, 0x1019: 0x4000, + 0x1030: 0x4000, 0x1031: 0x4000, 0x1032: 0x4000, 0x1033: 0x4000, 0x1034: 0x4000, 0x1035: 0x4000, + 0x1036: 0x4000, 0x1037: 0x4000, 0x1038: 0x4000, 0x1039: 0x4000, 0x103a: 0x4000, 0x103b: 0x4000, + 0x103c: 0x4000, 0x103d: 0x4000, 0x103e: 0x4000, 0x103f: 0x4000, + // Block 0x41, offset 0x1040 + 0x1040: 0x4000, 0x1041: 0x4000, 0x1042: 0x4000, 0x1043: 0x4000, 0x1044: 0x4000, 0x1045: 0x4000, + 0x1046: 0x4000, 0x1047: 0x4000, 0x1048: 0x4000, 0x1049: 0x4000, 0x104a: 0x4000, 0x104b: 0x4000, + 0x104c: 0x4000, 0x104d: 0x4000, 0x104e: 0x4000, 0x104f: 0x4000, 0x1050: 0x4000, 0x1051: 0x4000, + 0x1052: 0x4000, 0x1054: 0x4000, 0x1055: 0x4000, 0x1056: 0x4000, 0x1057: 0x4000, + 0x1058: 0x4000, 0x1059: 0x4000, 0x105a: 0x4000, 0x105b: 0x4000, 0x105c: 0x4000, 0x105d: 0x4000, + 0x105e: 0x4000, 0x105f: 0x4000, 0x1060: 0x4000, 0x1061: 0x4000, 0x1062: 0x4000, 0x1063: 0x4000, + 0x1064: 0x4000, 0x1065: 0x4000, 0x1066: 0x4000, 0x1068: 0x4000, 0x1069: 0x4000, + 0x106a: 0x4000, 0x106b: 0x4000, + // Block 0x42, offset 0x1080 + 0x1081: 0x9012, 0x1082: 0x9012, 0x1083: 0x9012, 0x1084: 0x9012, 0x1085: 0x9012, + 0x1086: 0x9012, 0x1087: 0x9012, 0x1088: 0x9012, 0x1089: 0x9012, 0x108a: 0x9012, 0x108b: 0x9012, + 0x108c: 0x9012, 0x108d: 0x9012, 0x108e: 0x9012, 0x108f: 0x9012, 0x1090: 0x9012, 0x1091: 0x9012, + 0x1092: 0x9012, 0x1093: 0x9012, 0x1094: 0x9012, 0x1095: 0x9012, 0x1096: 0x9012, 0x1097: 0x9012, + 0x1098: 0x9012, 0x1099: 0x9012, 0x109a: 0x9012, 0x109b: 0x9012, 0x109c: 0x9012, 0x109d: 0x9012, + 0x109e: 0x9012, 0x109f: 0x9012, 0x10a0: 0x9049, 0x10a1: 0x9049, 0x10a2: 0x9049, 0x10a3: 0x9049, + 0x10a4: 0x9049, 0x10a5: 0x9049, 0x10a6: 0x9049, 0x10a7: 0x9049, 0x10a8: 0x9049, 0x10a9: 0x9049, + 0x10aa: 0x9049, 0x10ab: 0x9049, 0x10ac: 0x9049, 0x10ad: 0x9049, 0x10ae: 0x9049, 0x10af: 0x9049, + 0x10b0: 0x9049, 0x10b1: 0x9049, 0x10b2: 0x9049, 0x10b3: 0x9049, 0x10b4: 0x9049, 0x10b5: 0x9049, + 0x10b6: 0x9049, 0x10b7: 0x9049, 0x10b8: 0x9049, 0x10b9: 0x9049, 0x10ba: 0x9049, 0x10bb: 0x9049, + 0x10bc: 0x9049, 0x10bd: 0x9049, 0x10be: 0x9049, 0x10bf: 0x9049, + // Block 0x43, offset 0x10c0 + 0x10c0: 0x9049, 0x10c1: 0x9049, 0x10c2: 0x9049, 0x10c3: 0x9049, 0x10c4: 0x9049, 0x10c5: 0x9049, + 0x10c6: 0x9049, 0x10c7: 0x9049, 0x10c8: 0x9049, 0x10c9: 0x9049, 0x10ca: 0x9049, 0x10cb: 0x9049, + 0x10cc: 0x9049, 0x10cd: 0x9049, 0x10ce: 0x9049, 0x10cf: 0x9049, 0x10d0: 0x9049, 0x10d1: 0x9049, + 0x10d2: 0x9049, 0x10d3: 0x9049, 0x10d4: 0x9049, 0x10d5: 0x9049, 0x10d6: 0x9049, 0x10d7: 0x9049, + 0x10d8: 0x9049, 0x10d9: 0x9049, 0x10da: 0x9049, 0x10db: 0x9049, 0x10dc: 0x9049, 0x10dd: 0x9049, + 0x10de: 0x9049, 0x10df: 0x904a, 0x10e0: 0x904b, 0x10e1: 0xb04c, 0x10e2: 0xb04d, 0x10e3: 0xb04d, + 0x10e4: 0xb04e, 0x10e5: 0xb04f, 0x10e6: 0xb050, 0x10e7: 0xb051, 0x10e8: 0xb052, 0x10e9: 0xb053, + 0x10ea: 0xb054, 0x10eb: 0xb055, 0x10ec: 0xb056, 0x10ed: 0xb057, 0x10ee: 0xb058, 0x10ef: 0xb059, + 0x10f0: 0xb05a, 0x10f1: 0xb05b, 0x10f2: 0xb05c, 0x10f3: 0xb05d, 0x10f4: 0xb05e, 0x10f5: 0xb05f, + 0x10f6: 0xb060, 0x10f7: 0xb061, 0x10f8: 0xb062, 0x10f9: 0xb063, 0x10fa: 0xb064, 0x10fb: 0xb065, + 0x10fc: 0xb052, 0x10fd: 0xb066, 0x10fe: 0xb067, 0x10ff: 0xb055, + // Block 0x44, offset 0x1100 + 0x1100: 0xb068, 0x1101: 0xb069, 0x1102: 0xb06a, 0x1103: 0xb06b, 0x1104: 0xb05a, 0x1105: 0xb056, + 0x1106: 0xb06c, 0x1107: 0xb06d, 0x1108: 0xb06b, 0x1109: 0xb06e, 0x110a: 0xb06b, 0x110b: 0xb06f, + 0x110c: 0xb06f, 0x110d: 0xb070, 0x110e: 0xb070, 0x110f: 0xb071, 0x1110: 0xb056, 0x1111: 0xb072, + 0x1112: 0xb073, 0x1113: 0xb072, 0x1114: 0xb074, 0x1115: 0xb073, 0x1116: 0xb075, 0x1117: 0xb075, + 0x1118: 0xb076, 0x1119: 0xb076, 0x111a: 0xb077, 0x111b: 0xb077, 0x111c: 0xb073, 0x111d: 0xb078, + 0x111e: 0xb079, 0x111f: 0xb067, 0x1120: 0xb07a, 0x1121: 0xb07b, 0x1122: 0xb07b, 0x1123: 0xb07b, + 0x1124: 0xb07b, 0x1125: 0xb07b, 0x1126: 0xb07b, 0x1127: 0xb07b, 0x1128: 0xb07b, 0x1129: 0xb07b, + 0x112a: 0xb07b, 0x112b: 0xb07b, 0x112c: 0xb07b, 0x112d: 0xb07b, 0x112e: 0xb07b, 0x112f: 0xb07b, + 0x1130: 0xb07c, 0x1131: 0xb07c, 0x1132: 0xb07c, 0x1133: 0xb07c, 0x1134: 0xb07c, 0x1135: 0xb07c, + 0x1136: 0xb07c, 0x1137: 0xb07c, 0x1138: 0xb07c, 0x1139: 0xb07c, 0x113a: 0xb07c, 0x113b: 0xb07c, + 0x113c: 0xb07c, 0x113d: 0xb07c, 0x113e: 0xb07c, + // Block 0x45, offset 0x1140 + 0x1142: 0xb07d, 0x1143: 0xb07e, 0x1144: 0xb07f, 0x1145: 0xb080, + 0x1146: 0xb07f, 0x1147: 0xb07e, 0x114a: 0xb081, 0x114b: 0xb082, + 0x114c: 0xb083, 0x114d: 0xb07f, 0x114e: 0xb080, 0x114f: 0xb07f, + 0x1152: 0xb084, 0x1153: 0xb085, 0x1154: 0xb084, 0x1155: 0xb086, 0x1156: 0xb084, 0x1157: 0xb087, + 0x115a: 0xb088, 0x115b: 0xb089, 0x115c: 0xb08a, + 0x1160: 0x908b, 0x1161: 0x908b, 0x1162: 0x908c, 0x1163: 0x908d, + 0x1164: 0x908b, 0x1165: 0x908e, 0x1166: 0x908f, 0x1168: 0xb090, 0x1169: 0xb091, + 0x116a: 0xb092, 0x116b: 0xb091, 0x116c: 0xb093, 0x116d: 0xb094, 0x116e: 0xb095, + 0x117d: 0x2000, + // Block 0x46, offset 0x1180 + 0x11a0: 0x4000, 0x11a1: 0x4000, 0x11a2: 0x4000, 0x11a3: 0x4000, + 0x11a4: 0x4000, + 0x11b0: 0x4000, 0x11b1: 0x4000, + // Block 0x47, offset 0x11c0 + 0x11c0: 0x4000, 0x11c1: 0x4000, 0x11c2: 0x4000, 0x11c3: 0x4000, 0x11c4: 0x4000, 0x11c5: 0x4000, + 0x11c6: 0x4000, 0x11c7: 0x4000, 0x11c8: 0x4000, 0x11c9: 0x4000, 0x11ca: 0x4000, 0x11cb: 0x4000, + 0x11cc: 0x4000, 0x11cd: 0x4000, 0x11ce: 0x4000, 0x11cf: 0x4000, 0x11d0: 0x4000, 0x11d1: 0x4000, + 0x11d2: 0x4000, 0x11d3: 0x4000, 0x11d4: 0x4000, 0x11d5: 0x4000, 0x11d6: 0x4000, 0x11d7: 0x4000, + 0x11d8: 0x4000, 0x11d9: 0x4000, 0x11da: 0x4000, 0x11db: 0x4000, 0x11dc: 0x4000, 0x11dd: 0x4000, + 0x11de: 0x4000, 0x11df: 0x4000, 0x11e0: 0x4000, 0x11e1: 0x4000, 0x11e2: 0x4000, 0x11e3: 0x4000, + 0x11e4: 0x4000, 0x11e5: 0x4000, 0x11e6: 0x4000, 0x11e7: 0x4000, 0x11e8: 0x4000, 0x11e9: 0x4000, + 0x11ea: 0x4000, 0x11eb: 0x4000, 0x11ec: 0x4000, 0x11ed: 0x4000, 0x11ee: 0x4000, 0x11ef: 0x4000, + 0x11f0: 0x4000, 0x11f1: 0x4000, 0x11f2: 0x4000, 0x11f3: 0x4000, 0x11f4: 0x4000, 0x11f5: 0x4000, + 0x11f6: 0x4000, 0x11f7: 0x4000, + // Block 0x48, offset 0x1200 + 0x1200: 0x4000, 0x1201: 0x4000, 0x1202: 0x4000, 0x1203: 0x4000, 0x1204: 0x4000, 0x1205: 0x4000, + 0x1206: 0x4000, 0x1207: 0x4000, 0x1208: 0x4000, 0x1209: 0x4000, 0x120a: 0x4000, 0x120b: 0x4000, + 0x120c: 0x4000, 0x120d: 0x4000, 0x120e: 0x4000, 0x120f: 0x4000, 0x1210: 0x4000, 0x1211: 0x4000, + 0x1212: 0x4000, 0x1213: 0x4000, 0x1214: 0x4000, 0x1215: 0x4000, + // Block 0x49, offset 0x1240 + 0x1240: 0x4000, 0x1241: 0x4000, 0x1242: 0x4000, 0x1243: 0x4000, 0x1244: 0x4000, 0x1245: 0x4000, + 0x1246: 0x4000, 0x1247: 0x4000, 0x1248: 0x4000, + // Block 0x4a, offset 0x1280 + 0x1280: 0x4000, 0x1281: 0x4000, 0x1282: 0x4000, 0x1283: 0x4000, 0x1284: 0x4000, 0x1285: 0x4000, + 0x1286: 0x4000, 0x1287: 0x4000, 0x1288: 0x4000, 0x1289: 0x4000, 0x128a: 0x4000, 0x128b: 0x4000, + 0x128c: 0x4000, 0x128d: 0x4000, 0x128e: 0x4000, 0x128f: 0x4000, 0x1290: 0x4000, 0x1291: 0x4000, + 0x1292: 0x4000, 0x1293: 0x4000, 0x1294: 0x4000, 0x1295: 0x4000, 0x1296: 0x4000, 0x1297: 0x4000, + 0x1298: 0x4000, 0x1299: 0x4000, 0x129a: 0x4000, 0x129b: 0x4000, 0x129c: 0x4000, 0x129d: 0x4000, + 0x129e: 0x4000, + // Block 0x4b, offset 0x12c0 + 0x12d0: 0x4000, 0x12d1: 0x4000, + 0x12d2: 0x4000, + 0x12e4: 0x4000, 0x12e5: 0x4000, 0x12e6: 0x4000, 0x12e7: 0x4000, + 0x12f0: 0x4000, 0x12f1: 0x4000, 0x12f2: 0x4000, 0x12f3: 0x4000, 0x12f4: 0x4000, 0x12f5: 0x4000, + 0x12f6: 0x4000, 0x12f7: 0x4000, 0x12f8: 0x4000, 0x12f9: 0x4000, 0x12fa: 0x4000, 0x12fb: 0x4000, + 0x12fc: 0x4000, 0x12fd: 0x4000, 0x12fe: 0x4000, 0x12ff: 0x4000, + // Block 0x4c, offset 0x1300 + 0x1300: 0x4000, 0x1301: 0x4000, 0x1302: 0x4000, 0x1303: 0x4000, 0x1304: 0x4000, 0x1305: 0x4000, + 0x1306: 0x4000, 0x1307: 0x4000, 0x1308: 0x4000, 0x1309: 0x4000, 0x130a: 0x4000, 0x130b: 0x4000, + 0x130c: 0x4000, 0x130d: 0x4000, 0x130e: 0x4000, 0x130f: 0x4000, 0x1310: 0x4000, 0x1311: 0x4000, + 0x1312: 0x4000, 0x1313: 0x4000, 0x1314: 0x4000, 0x1315: 0x4000, 0x1316: 0x4000, 0x1317: 0x4000, + 0x1318: 0x4000, 0x1319: 0x4000, 0x131a: 0x4000, 0x131b: 0x4000, 0x131c: 0x4000, 0x131d: 0x4000, + 0x131e: 0x4000, 0x131f: 0x4000, 0x1320: 0x4000, 0x1321: 0x4000, 0x1322: 0x4000, 0x1323: 0x4000, + 0x1324: 0x4000, 0x1325: 0x4000, 0x1326: 0x4000, 0x1327: 0x4000, 0x1328: 0x4000, 0x1329: 0x4000, + 0x132a: 0x4000, 0x132b: 0x4000, 0x132c: 0x4000, 0x132d: 0x4000, 0x132e: 0x4000, 0x132f: 0x4000, + 0x1330: 0x4000, 0x1331: 0x4000, 0x1332: 0x4000, 0x1333: 0x4000, 0x1334: 0x4000, 0x1335: 0x4000, + 0x1336: 0x4000, 0x1337: 0x4000, 0x1338: 0x4000, 0x1339: 0x4000, 0x133a: 0x4000, 0x133b: 0x4000, + // Block 0x4d, offset 0x1340 + 0x1344: 0x4000, + // Block 0x4e, offset 0x1380 + 0x138f: 0x4000, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x2000, 0x13c1: 0x2000, 0x13c2: 0x2000, 0x13c3: 0x2000, 0x13c4: 0x2000, 0x13c5: 0x2000, + 0x13c6: 0x2000, 0x13c7: 0x2000, 0x13c8: 0x2000, 0x13c9: 0x2000, 0x13ca: 0x2000, + 0x13d0: 0x2000, 0x13d1: 0x2000, + 0x13d2: 0x2000, 0x13d3: 0x2000, 0x13d4: 0x2000, 0x13d5: 0x2000, 0x13d6: 0x2000, 0x13d7: 0x2000, + 0x13d8: 0x2000, 0x13d9: 0x2000, 0x13da: 0x2000, 0x13db: 0x2000, 0x13dc: 0x2000, 0x13dd: 0x2000, + 0x13de: 0x2000, 0x13df: 0x2000, 0x13e0: 0x2000, 0x13e1: 0x2000, 0x13e2: 0x2000, 0x13e3: 0x2000, + 0x13e4: 0x2000, 0x13e5: 0x2000, 0x13e6: 0x2000, 0x13e7: 0x2000, 0x13e8: 0x2000, 0x13e9: 0x2000, + 0x13ea: 0x2000, 0x13eb: 0x2000, 0x13ec: 0x2000, 0x13ed: 0x2000, + 0x13f0: 0x2000, 0x13f1: 0x2000, 0x13f2: 0x2000, 0x13f3: 0x2000, 0x13f4: 0x2000, 0x13f5: 0x2000, + 0x13f6: 0x2000, 0x13f7: 0x2000, 0x13f8: 0x2000, 0x13f9: 0x2000, 0x13fa: 0x2000, 0x13fb: 0x2000, + 0x13fc: 0x2000, 0x13fd: 0x2000, 0x13fe: 0x2000, 0x13ff: 0x2000, + // Block 0x50, offset 0x1400 + 0x1400: 0x2000, 0x1401: 0x2000, 0x1402: 0x2000, 0x1403: 0x2000, 0x1404: 0x2000, 0x1405: 0x2000, + 0x1406: 0x2000, 0x1407: 0x2000, 0x1408: 0x2000, 0x1409: 0x2000, 0x140a: 0x2000, 0x140b: 0x2000, + 0x140c: 0x2000, 0x140d: 0x2000, 0x140e: 0x2000, 0x140f: 0x2000, 0x1410: 0x2000, 0x1411: 0x2000, + 0x1412: 0x2000, 0x1413: 0x2000, 0x1414: 0x2000, 0x1415: 0x2000, 0x1416: 0x2000, 0x1417: 0x2000, + 0x1418: 0x2000, 0x1419: 0x2000, 0x141a: 0x2000, 0x141b: 0x2000, 0x141c: 0x2000, 0x141d: 0x2000, + 0x141e: 0x2000, 0x141f: 0x2000, 0x1420: 0x2000, 0x1421: 0x2000, 0x1422: 0x2000, 0x1423: 0x2000, + 0x1424: 0x2000, 0x1425: 0x2000, 0x1426: 0x2000, 0x1427: 0x2000, 0x1428: 0x2000, 0x1429: 0x2000, + 0x1430: 0x2000, 0x1431: 0x2000, 0x1432: 0x2000, 0x1433: 0x2000, 0x1434: 0x2000, 0x1435: 0x2000, + 0x1436: 0x2000, 0x1437: 0x2000, 0x1438: 0x2000, 0x1439: 0x2000, 0x143a: 0x2000, 0x143b: 0x2000, + 0x143c: 0x2000, 0x143d: 0x2000, 0x143e: 0x2000, 0x143f: 0x2000, + // Block 0x51, offset 0x1440 + 0x1440: 0x2000, 0x1441: 0x2000, 0x1442: 0x2000, 0x1443: 0x2000, 0x1444: 0x2000, 0x1445: 0x2000, + 0x1446: 0x2000, 0x1447: 0x2000, 0x1448: 0x2000, 0x1449: 0x2000, 0x144a: 0x2000, 0x144b: 0x2000, + 0x144c: 0x2000, 0x144d: 0x2000, 0x144e: 0x4000, 0x144f: 0x2000, 0x1450: 0x2000, 0x1451: 0x4000, + 0x1452: 0x4000, 0x1453: 0x4000, 0x1454: 0x4000, 0x1455: 0x4000, 0x1456: 0x4000, 0x1457: 0x4000, + 0x1458: 0x4000, 0x1459: 0x4000, 0x145a: 0x4000, 0x145b: 0x2000, 0x145c: 0x2000, 0x145d: 0x2000, + 0x145e: 0x2000, 0x145f: 0x2000, 0x1460: 0x2000, 0x1461: 0x2000, 0x1462: 0x2000, 0x1463: 0x2000, + 0x1464: 0x2000, 0x1465: 0x2000, 0x1466: 0x2000, 0x1467: 0x2000, 0x1468: 0x2000, 0x1469: 0x2000, + 0x146a: 0x2000, 0x146b: 0x2000, 0x146c: 0x2000, + // Block 0x52, offset 0x1480 + 0x1480: 0x4000, 0x1481: 0x4000, 0x1482: 0x4000, + 0x1490: 0x4000, 0x1491: 0x4000, + 0x1492: 0x4000, 0x1493: 0x4000, 0x1494: 0x4000, 0x1495: 0x4000, 0x1496: 0x4000, 0x1497: 0x4000, + 0x1498: 0x4000, 0x1499: 0x4000, 0x149a: 0x4000, 0x149b: 0x4000, 0x149c: 0x4000, 0x149d: 0x4000, + 0x149e: 0x4000, 0x149f: 0x4000, 0x14a0: 0x4000, 0x14a1: 0x4000, 0x14a2: 0x4000, 0x14a3: 0x4000, + 0x14a4: 0x4000, 0x14a5: 0x4000, 0x14a6: 0x4000, 0x14a7: 0x4000, 0x14a8: 0x4000, 0x14a9: 0x4000, + 0x14aa: 0x4000, 0x14ab: 0x4000, 0x14ac: 0x4000, 0x14ad: 0x4000, 0x14ae: 0x4000, 0x14af: 0x4000, + 0x14b0: 0x4000, 0x14b1: 0x4000, 0x14b2: 0x4000, 0x14b3: 0x4000, 0x14b4: 0x4000, 0x14b5: 0x4000, + 0x14b6: 0x4000, 0x14b7: 0x4000, 0x14b8: 0x4000, 0x14b9: 0x4000, 0x14ba: 0x4000, 0x14bb: 0x4000, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x4000, 0x14c1: 0x4000, 0x14c2: 0x4000, 0x14c3: 0x4000, 0x14c4: 0x4000, 0x14c5: 0x4000, + 0x14c6: 0x4000, 0x14c7: 0x4000, 0x14c8: 0x4000, + 0x14d0: 0x4000, 0x14d1: 0x4000, + 0x14e0: 0x4000, 0x14e1: 0x4000, 0x14e2: 0x4000, 0x14e3: 0x4000, + 0x14e4: 0x4000, 0x14e5: 0x4000, + // Block 0x54, offset 0x1500 + 0x1500: 0x4000, 0x1501: 0x4000, 0x1502: 0x4000, 0x1503: 0x4000, 0x1504: 0x4000, 0x1505: 0x4000, + 0x1506: 0x4000, 0x1507: 0x4000, 0x1508: 0x4000, 0x1509: 0x4000, 0x150a: 0x4000, 0x150b: 0x4000, + 0x150c: 0x4000, 0x150d: 0x4000, 0x150e: 0x4000, 0x150f: 0x4000, 0x1510: 0x4000, 0x1511: 0x4000, + 0x1512: 0x4000, 0x1513: 0x4000, 0x1514: 0x4000, 0x1515: 0x4000, 0x1516: 0x4000, 0x1517: 0x4000, + 0x1518: 0x4000, 0x1519: 0x4000, 0x151a: 0x4000, 0x151b: 0x4000, 0x151c: 0x4000, 0x151d: 0x4000, + 0x151e: 0x4000, 0x151f: 0x4000, 0x1520: 0x4000, + 0x152d: 0x4000, 0x152e: 0x4000, 0x152f: 0x4000, + 0x1530: 0x4000, 0x1531: 0x4000, 0x1532: 0x4000, 0x1533: 0x4000, 0x1534: 0x4000, 0x1535: 0x4000, + 0x1537: 0x4000, 0x1538: 0x4000, 0x1539: 0x4000, 0x153a: 0x4000, 0x153b: 0x4000, + 0x153c: 0x4000, 0x153d: 0x4000, 0x153e: 0x4000, 0x153f: 0x4000, + // Block 0x55, offset 0x1540 + 0x1540: 0x4000, 0x1541: 0x4000, 0x1542: 0x4000, 0x1543: 0x4000, 0x1544: 0x4000, 0x1545: 0x4000, + 0x1546: 0x4000, 0x1547: 0x4000, 0x1548: 0x4000, 0x1549: 0x4000, 0x154a: 0x4000, 0x154b: 0x4000, + 0x154c: 0x4000, 0x154d: 0x4000, 0x154e: 0x4000, 0x154f: 0x4000, 0x1550: 0x4000, 0x1551: 0x4000, + 0x1552: 0x4000, 0x1553: 0x4000, 0x1554: 0x4000, 0x1555: 0x4000, 0x1556: 0x4000, 0x1557: 0x4000, + 0x1558: 0x4000, 0x1559: 0x4000, 0x155a: 0x4000, 0x155b: 0x4000, 0x155c: 0x4000, 0x155d: 0x4000, + 0x155e: 0x4000, 0x155f: 0x4000, 0x1560: 0x4000, 0x1561: 0x4000, 0x1562: 0x4000, 0x1563: 0x4000, + 0x1564: 0x4000, 0x1565: 0x4000, 0x1566: 0x4000, 0x1567: 0x4000, 0x1568: 0x4000, 0x1569: 0x4000, + 0x156a: 0x4000, 0x156b: 0x4000, 0x156c: 0x4000, 0x156d: 0x4000, 0x156e: 0x4000, 0x156f: 0x4000, + 0x1570: 0x4000, 0x1571: 0x4000, 0x1572: 0x4000, 0x1573: 0x4000, 0x1574: 0x4000, 0x1575: 0x4000, + 0x1576: 0x4000, 0x1577: 0x4000, 0x1578: 0x4000, 0x1579: 0x4000, 0x157a: 0x4000, 0x157b: 0x4000, + 0x157c: 0x4000, 0x157e: 0x4000, 0x157f: 0x4000, + // Block 0x56, offset 0x1580 + 0x1580: 0x4000, 0x1581: 0x4000, 0x1582: 0x4000, 0x1583: 0x4000, 0x1584: 0x4000, 0x1585: 0x4000, + 0x1586: 0x4000, 0x1587: 0x4000, 0x1588: 0x4000, 0x1589: 0x4000, 0x158a: 0x4000, 0x158b: 0x4000, + 0x158c: 0x4000, 0x158d: 0x4000, 0x158e: 0x4000, 0x158f: 0x4000, 0x1590: 0x4000, 0x1591: 0x4000, + 0x1592: 0x4000, 0x1593: 0x4000, + 0x15a0: 0x4000, 0x15a1: 0x4000, 0x15a2: 0x4000, 0x15a3: 0x4000, + 0x15a4: 0x4000, 0x15a5: 0x4000, 0x15a6: 0x4000, 0x15a7: 0x4000, 0x15a8: 0x4000, 0x15a9: 0x4000, + 0x15aa: 0x4000, 0x15ab: 0x4000, 0x15ac: 0x4000, 0x15ad: 0x4000, 0x15ae: 0x4000, 0x15af: 0x4000, + 0x15b0: 0x4000, 0x15b1: 0x4000, 0x15b2: 0x4000, 0x15b3: 0x4000, 0x15b4: 0x4000, 0x15b5: 0x4000, + 0x15b6: 0x4000, 0x15b7: 0x4000, 0x15b8: 0x4000, 0x15b9: 0x4000, 0x15ba: 0x4000, 0x15bb: 0x4000, + 0x15bc: 0x4000, 0x15bd: 0x4000, 0x15be: 0x4000, 0x15bf: 0x4000, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x4000, 0x15c1: 0x4000, 0x15c2: 0x4000, 0x15c3: 0x4000, 0x15c4: 0x4000, 0x15c5: 0x4000, + 0x15c6: 0x4000, 0x15c7: 0x4000, 0x15c8: 0x4000, 0x15c9: 0x4000, 0x15ca: 0x4000, + 0x15cf: 0x4000, 0x15d0: 0x4000, 0x15d1: 0x4000, + 0x15d2: 0x4000, 0x15d3: 0x4000, + 0x15e0: 0x4000, 0x15e1: 0x4000, 0x15e2: 0x4000, 0x15e3: 0x4000, + 0x15e4: 0x4000, 0x15e5: 0x4000, 0x15e6: 0x4000, 0x15e7: 0x4000, 0x15e8: 0x4000, 0x15e9: 0x4000, + 0x15ea: 0x4000, 0x15eb: 0x4000, 0x15ec: 0x4000, 0x15ed: 0x4000, 0x15ee: 0x4000, 0x15ef: 0x4000, + 0x15f0: 0x4000, 0x15f4: 0x4000, + 0x15f8: 0x4000, 0x15f9: 0x4000, 0x15fa: 0x4000, 0x15fb: 0x4000, + 0x15fc: 0x4000, 0x15fd: 0x4000, 0x15fe: 0x4000, 0x15ff: 0x4000, + // Block 0x58, offset 0x1600 + 0x1600: 0x4000, 0x1601: 0x4000, 0x1602: 0x4000, 0x1603: 0x4000, 0x1604: 0x4000, 0x1605: 0x4000, + 0x1606: 0x4000, 0x1607: 0x4000, 0x1608: 0x4000, 0x1609: 0x4000, 0x160a: 0x4000, 0x160b: 0x4000, + 0x160c: 0x4000, 0x160d: 0x4000, 0x160e: 0x4000, 0x160f: 0x4000, 0x1610: 0x4000, 0x1611: 0x4000, + 0x1612: 0x4000, 0x1613: 0x4000, 0x1614: 0x4000, 0x1615: 0x4000, 0x1616: 0x4000, 0x1617: 0x4000, + 0x1618: 0x4000, 0x1619: 0x4000, 0x161a: 0x4000, 0x161b: 0x4000, 0x161c: 0x4000, 0x161d: 0x4000, + 0x161e: 0x4000, 0x161f: 0x4000, 0x1620: 0x4000, 0x1621: 0x4000, 0x1622: 0x4000, 0x1623: 0x4000, + 0x1624: 0x4000, 0x1625: 0x4000, 0x1626: 0x4000, 0x1627: 0x4000, 0x1628: 0x4000, 0x1629: 0x4000, + 0x162a: 0x4000, 0x162b: 0x4000, 0x162c: 0x4000, 0x162d: 0x4000, 0x162e: 0x4000, 0x162f: 0x4000, + 0x1630: 0x4000, 0x1631: 0x4000, 0x1632: 0x4000, 0x1633: 0x4000, 0x1634: 0x4000, 0x1635: 0x4000, + 0x1636: 0x4000, 0x1637: 0x4000, 0x1638: 0x4000, 0x1639: 0x4000, 0x163a: 0x4000, 0x163b: 0x4000, + 0x163c: 0x4000, 0x163d: 0x4000, 0x163e: 0x4000, + // Block 0x59, offset 0x1640 + 0x1640: 0x4000, 0x1642: 0x4000, 0x1643: 0x4000, 0x1644: 0x4000, 0x1645: 0x4000, + 0x1646: 0x4000, 0x1647: 0x4000, 0x1648: 0x4000, 0x1649: 0x4000, 0x164a: 0x4000, 0x164b: 0x4000, + 0x164c: 0x4000, 0x164d: 0x4000, 0x164e: 0x4000, 0x164f: 0x4000, 0x1650: 0x4000, 0x1651: 0x4000, + 0x1652: 0x4000, 0x1653: 0x4000, 0x1654: 0x4000, 0x1655: 0x4000, 0x1656: 0x4000, 0x1657: 0x4000, + 0x1658: 0x4000, 0x1659: 0x4000, 0x165a: 0x4000, 0x165b: 0x4000, 0x165c: 0x4000, 0x165d: 0x4000, + 0x165e: 0x4000, 0x165f: 0x4000, 0x1660: 0x4000, 0x1661: 0x4000, 0x1662: 0x4000, 0x1663: 0x4000, + 0x1664: 0x4000, 0x1665: 0x4000, 0x1666: 0x4000, 0x1667: 0x4000, 0x1668: 0x4000, 0x1669: 0x4000, + 0x166a: 0x4000, 0x166b: 0x4000, 0x166c: 0x4000, 0x166d: 0x4000, 0x166e: 0x4000, 0x166f: 0x4000, + 0x1670: 0x4000, 0x1671: 0x4000, 0x1672: 0x4000, 0x1673: 0x4000, 0x1674: 0x4000, 0x1675: 0x4000, + 0x1676: 0x4000, 0x1677: 0x4000, 0x1678: 0x4000, 0x1679: 0x4000, 0x167a: 0x4000, 0x167b: 0x4000, + 0x167c: 0x4000, 0x167d: 0x4000, 0x167e: 0x4000, 0x167f: 0x4000, + // Block 0x5a, offset 0x1680 + 0x1680: 0x4000, 0x1681: 0x4000, 0x1682: 0x4000, 0x1683: 0x4000, 0x1684: 0x4000, 0x1685: 0x4000, + 0x1686: 0x4000, 0x1687: 0x4000, 0x1688: 0x4000, 0x1689: 0x4000, 0x168a: 0x4000, 0x168b: 0x4000, + 0x168c: 0x4000, 0x168d: 0x4000, 0x168e: 0x4000, 0x168f: 0x4000, 0x1690: 0x4000, 0x1691: 0x4000, + 0x1692: 0x4000, 0x1693: 0x4000, 0x1694: 0x4000, 0x1695: 0x4000, 0x1696: 0x4000, 0x1697: 0x4000, + 0x1698: 0x4000, 0x1699: 0x4000, 0x169a: 0x4000, 0x169b: 0x4000, 0x169c: 0x4000, 0x169d: 0x4000, + 0x169e: 0x4000, 0x169f: 0x4000, 0x16a0: 0x4000, 0x16a1: 0x4000, 0x16a2: 0x4000, 0x16a3: 0x4000, + 0x16a4: 0x4000, 0x16a5: 0x4000, 0x16a6: 0x4000, 0x16a7: 0x4000, 0x16a8: 0x4000, 0x16a9: 0x4000, + 0x16aa: 0x4000, 0x16ab: 0x4000, 0x16ac: 0x4000, 0x16ad: 0x4000, 0x16ae: 0x4000, 0x16af: 0x4000, + 0x16b0: 0x4000, 0x16b1: 0x4000, 0x16b2: 0x4000, 0x16b3: 0x4000, 0x16b4: 0x4000, 0x16b5: 0x4000, + 0x16b6: 0x4000, 0x16b7: 0x4000, 0x16b8: 0x4000, 0x16b9: 0x4000, 0x16ba: 0x4000, 0x16bb: 0x4000, + 0x16bc: 0x4000, 0x16bf: 0x4000, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x4000, 0x16c1: 0x4000, 0x16c2: 0x4000, 0x16c3: 0x4000, 0x16c4: 0x4000, 0x16c5: 0x4000, + 0x16c6: 0x4000, 0x16c7: 0x4000, 0x16c8: 0x4000, 0x16c9: 0x4000, 0x16ca: 0x4000, 0x16cb: 0x4000, + 0x16cc: 0x4000, 0x16cd: 0x4000, 0x16ce: 0x4000, 0x16cf: 0x4000, 0x16d0: 0x4000, 0x16d1: 0x4000, + 0x16d2: 0x4000, 0x16d3: 0x4000, 0x16d4: 0x4000, 0x16d5: 0x4000, 0x16d6: 0x4000, 0x16d7: 0x4000, + 0x16d8: 0x4000, 0x16d9: 0x4000, 0x16da: 0x4000, 0x16db: 0x4000, 0x16dc: 0x4000, 0x16dd: 0x4000, + 0x16de: 0x4000, 0x16df: 0x4000, 0x16e0: 0x4000, 0x16e1: 0x4000, 0x16e2: 0x4000, 0x16e3: 0x4000, + 0x16e4: 0x4000, 0x16e5: 0x4000, 0x16e6: 0x4000, 0x16e7: 0x4000, 0x16e8: 0x4000, 0x16e9: 0x4000, + 0x16ea: 0x4000, 0x16eb: 0x4000, 0x16ec: 0x4000, 0x16ed: 0x4000, 0x16ee: 0x4000, 0x16ef: 0x4000, + 0x16f0: 0x4000, 0x16f1: 0x4000, 0x16f2: 0x4000, 0x16f3: 0x4000, 0x16f4: 0x4000, 0x16f5: 0x4000, + 0x16f6: 0x4000, 0x16f7: 0x4000, 0x16f8: 0x4000, 0x16f9: 0x4000, 0x16fa: 0x4000, 0x16fb: 0x4000, + 0x16fc: 0x4000, 0x16fd: 0x4000, + // Block 0x5c, offset 0x1700 + 0x170b: 0x4000, + 0x170c: 0x4000, 0x170d: 0x4000, 0x170e: 0x4000, 0x1710: 0x4000, 0x1711: 0x4000, + 0x1712: 0x4000, 0x1713: 0x4000, 0x1714: 0x4000, 0x1715: 0x4000, 0x1716: 0x4000, 0x1717: 0x4000, + 0x1718: 0x4000, 0x1719: 0x4000, 0x171a: 0x4000, 0x171b: 0x4000, 0x171c: 0x4000, 0x171d: 0x4000, + 0x171e: 0x4000, 0x171f: 0x4000, 0x1720: 0x4000, 0x1721: 0x4000, 0x1722: 0x4000, 0x1723: 0x4000, + 0x1724: 0x4000, 0x1725: 0x4000, 0x1726: 0x4000, 0x1727: 0x4000, + 0x173a: 0x4000, + // Block 0x5d, offset 0x1740 + 0x1755: 0x4000, 0x1756: 0x4000, + 0x1764: 0x4000, + // Block 0x5e, offset 0x1780 + 0x17bb: 0x4000, + 0x17bc: 0x4000, 0x17bd: 0x4000, 0x17be: 0x4000, 0x17bf: 0x4000, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x4000, 0x17c1: 0x4000, 0x17c2: 0x4000, 0x17c3: 0x4000, 0x17c4: 0x4000, 0x17c5: 0x4000, + 0x17c6: 0x4000, 0x17c7: 0x4000, 0x17c8: 0x4000, 0x17c9: 0x4000, 0x17ca: 0x4000, 0x17cb: 0x4000, + 0x17cc: 0x4000, 0x17cd: 0x4000, 0x17ce: 0x4000, 0x17cf: 0x4000, + // Block 0x60, offset 0x1800 + 0x1800: 0x4000, 0x1801: 0x4000, 0x1802: 0x4000, 0x1803: 0x4000, 0x1804: 0x4000, 0x1805: 0x4000, + 0x180c: 0x4000, 0x1810: 0x4000, 0x1811: 0x4000, + 0x1812: 0x4000, 0x1815: 0x4000, 0x1816: 0x4000, 0x1817: 0x4000, + 0x182b: 0x4000, 0x182c: 0x4000, + 0x1834: 0x4000, 0x1835: 0x4000, + 0x1836: 0x4000, 0x1837: 0x4000, 0x1838: 0x4000, 0x1839: 0x4000, 0x183a: 0x4000, 0x183b: 0x4000, + 0x183c: 0x4000, + // Block 0x61, offset 0x1840 + 0x1860: 0x4000, 0x1861: 0x4000, 0x1862: 0x4000, 0x1863: 0x4000, + 0x1864: 0x4000, 0x1865: 0x4000, 0x1866: 0x4000, 0x1867: 0x4000, 0x1868: 0x4000, 0x1869: 0x4000, + 0x186a: 0x4000, 0x186b: 0x4000, + // Block 0x62, offset 0x1880 + 0x188c: 0x4000, 0x188d: 0x4000, 0x188e: 0x4000, 0x188f: 0x4000, 0x1890: 0x4000, 0x1891: 0x4000, + 0x1892: 0x4000, 0x1893: 0x4000, 0x1894: 0x4000, 0x1895: 0x4000, 0x1896: 0x4000, 0x1897: 0x4000, + 0x1898: 0x4000, 0x1899: 0x4000, 0x189a: 0x4000, 0x189b: 0x4000, 0x189c: 0x4000, 0x189d: 0x4000, + 0x189e: 0x4000, 0x189f: 0x4000, 0x18a0: 0x4000, 0x18a1: 0x4000, 0x18a2: 0x4000, 0x18a3: 0x4000, + 0x18a4: 0x4000, 0x18a5: 0x4000, 0x18a6: 0x4000, 0x18a7: 0x4000, 0x18a8: 0x4000, 0x18a9: 0x4000, + 0x18aa: 0x4000, 0x18ab: 0x4000, 0x18ac: 0x4000, 0x18ad: 0x4000, 0x18ae: 0x4000, 0x18af: 0x4000, + 0x18b0: 0x4000, 0x18b1: 0x4000, 0x18b2: 0x4000, 0x18b3: 0x4000, 0x18b4: 0x4000, 0x18b5: 0x4000, + 0x18b6: 0x4000, 0x18b7: 0x4000, 0x18b8: 0x4000, 0x18b9: 0x4000, 0x18ba: 0x4000, + 0x18bc: 0x4000, 0x18bd: 0x4000, 0x18be: 0x4000, 0x18bf: 0x4000, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x4000, 0x18c1: 0x4000, 0x18c2: 0x4000, 0x18c3: 0x4000, 0x18c4: 0x4000, 0x18c5: 0x4000, + 0x18c7: 0x4000, 0x18c8: 0x4000, 0x18c9: 0x4000, 0x18ca: 0x4000, 0x18cb: 0x4000, + 0x18cc: 0x4000, 0x18cd: 0x4000, 0x18ce: 0x4000, 0x18cf: 0x4000, 0x18d0: 0x4000, 0x18d1: 0x4000, + 0x18d2: 0x4000, 0x18d3: 0x4000, 0x18d4: 0x4000, 0x18d5: 0x4000, 0x18d6: 0x4000, 0x18d7: 0x4000, + 0x18d8: 0x4000, 0x18d9: 0x4000, 0x18da: 0x4000, 0x18db: 0x4000, 0x18dc: 0x4000, 0x18dd: 0x4000, + 0x18de: 0x4000, 0x18df: 0x4000, 0x18e0: 0x4000, 0x18e1: 0x4000, 0x18e2: 0x4000, 0x18e3: 0x4000, + 0x18e4: 0x4000, 0x18e5: 0x4000, 0x18e6: 0x4000, 0x18e7: 0x4000, 0x18e8: 0x4000, 0x18e9: 0x4000, + 0x18ea: 0x4000, 0x18eb: 0x4000, 0x18ec: 0x4000, 0x18ed: 0x4000, 0x18ee: 0x4000, 0x18ef: 0x4000, + 0x18f0: 0x4000, 0x18f1: 0x4000, 0x18f2: 0x4000, 0x18f3: 0x4000, 0x18f4: 0x4000, 0x18f5: 0x4000, + 0x18f6: 0x4000, 0x18f7: 0x4000, 0x18f8: 0x4000, 0x18fa: 0x4000, 0x18fb: 0x4000, + 0x18fc: 0x4000, 0x18fd: 0x4000, 0x18fe: 0x4000, 0x18ff: 0x4000, + // Block 0x64, offset 0x1900 + 0x1900: 0x4000, 0x1901: 0x4000, 0x1902: 0x4000, 0x1903: 0x4000, 0x1904: 0x4000, 0x1905: 0x4000, + 0x1906: 0x4000, 0x1907: 0x4000, 0x1908: 0x4000, 0x1909: 0x4000, 0x190a: 0x4000, 0x190b: 0x4000, + 0x190d: 0x4000, 0x190e: 0x4000, 0x190f: 0x4000, 0x1910: 0x4000, 0x1911: 0x4000, + 0x1912: 0x4000, 0x1913: 0x4000, 0x1914: 0x4000, 0x1915: 0x4000, 0x1916: 0x4000, 0x1917: 0x4000, + 0x1918: 0x4000, 0x1919: 0x4000, 0x191a: 0x4000, 0x191b: 0x4000, 0x191c: 0x4000, 0x191d: 0x4000, + 0x191e: 0x4000, 0x191f: 0x4000, 0x1920: 0x4000, 0x1921: 0x4000, 0x1922: 0x4000, 0x1923: 0x4000, + 0x1924: 0x4000, 0x1925: 0x4000, 0x1926: 0x4000, 0x1927: 0x4000, 0x1928: 0x4000, 0x1929: 0x4000, + 0x192a: 0x4000, 0x192b: 0x4000, 0x192c: 0x4000, 0x192d: 0x4000, 0x192e: 0x4000, 0x192f: 0x4000, + 0x1930: 0x4000, 0x1931: 0x4000, 0x1932: 0x4000, 0x1933: 0x4000, 0x1934: 0x4000, 0x1935: 0x4000, + 0x1936: 0x4000, 0x1937: 0x4000, 0x1938: 0x4000, 0x1939: 0x4000, 0x193a: 0x4000, 0x193b: 0x4000, + 0x193c: 0x4000, 0x193d: 0x4000, 0x193e: 0x4000, 0x193f: 0x4000, + // Block 0x65, offset 0x1940 + 0x1970: 0x4000, 0x1971: 0x4000, 0x1972: 0x4000, 0x1973: 0x4000, 0x1974: 0x4000, + 0x1978: 0x4000, 0x1979: 0x4000, 0x197a: 0x4000, + // Block 0x66, offset 0x1980 + 0x1980: 0x4000, 0x1981: 0x4000, 0x1982: 0x4000, 0x1983: 0x4000, 0x1984: 0x4000, 0x1985: 0x4000, + 0x1986: 0x4000, + 0x1990: 0x4000, 0x1991: 0x4000, + 0x1992: 0x4000, 0x1993: 0x4000, 0x1994: 0x4000, 0x1995: 0x4000, 0x1996: 0x4000, 0x1997: 0x4000, + 0x1998: 0x4000, 0x1999: 0x4000, 0x199a: 0x4000, 0x199b: 0x4000, 0x199c: 0x4000, 0x199d: 0x4000, + 0x199e: 0x4000, 0x199f: 0x4000, 0x19a0: 0x4000, 0x19a1: 0x4000, 0x19a2: 0x4000, 0x19a3: 0x4000, + 0x19a4: 0x4000, 0x19a5: 0x4000, 0x19a6: 0x4000, 0x19a7: 0x4000, 0x19a8: 0x4000, + 0x19b0: 0x4000, 0x19b1: 0x4000, 0x19b2: 0x4000, 0x19b3: 0x4000, 0x19b4: 0x4000, 0x19b5: 0x4000, + 0x19b6: 0x4000, + // Block 0x67, offset 0x19c0 + 0x19c0: 0x4000, 0x19c1: 0x4000, 0x19c2: 0x4000, + 0x19d0: 0x4000, 0x19d1: 0x4000, + 0x19d2: 0x4000, 0x19d3: 0x4000, 0x19d4: 0x4000, 0x19d5: 0x4000, 0x19d6: 0x4000, + // Block 0x68, offset 0x1a00 + 0x1a00: 0x2000, 0x1a01: 0x2000, 0x1a02: 0x2000, 0x1a03: 0x2000, 0x1a04: 0x2000, 0x1a05: 0x2000, + 0x1a06: 0x2000, 0x1a07: 0x2000, 0x1a08: 0x2000, 0x1a09: 0x2000, 0x1a0a: 0x2000, 0x1a0b: 0x2000, + 0x1a0c: 0x2000, 0x1a0d: 0x2000, 0x1a0e: 0x2000, 0x1a0f: 0x2000, 0x1a10: 0x2000, 0x1a11: 0x2000, + 0x1a12: 0x2000, 0x1a13: 0x2000, 0x1a14: 0x2000, 0x1a15: 0x2000, 0x1a16: 0x2000, 0x1a17: 0x2000, + 0x1a18: 0x2000, 0x1a19: 0x2000, 0x1a1a: 0x2000, 0x1a1b: 0x2000, 0x1a1c: 0x2000, 0x1a1d: 0x2000, + 0x1a1e: 0x2000, 0x1a1f: 0x2000, 0x1a20: 0x2000, 0x1a21: 0x2000, 0x1a22: 0x2000, 0x1a23: 0x2000, + 0x1a24: 0x2000, 0x1a25: 0x2000, 0x1a26: 0x2000, 0x1a27: 0x2000, 0x1a28: 0x2000, 0x1a29: 0x2000, + 0x1a2a: 0x2000, 0x1a2b: 0x2000, 0x1a2c: 0x2000, 0x1a2d: 0x2000, 0x1a2e: 0x2000, 0x1a2f: 0x2000, + 0x1a30: 0x2000, 0x1a31: 0x2000, 0x1a32: 0x2000, 0x1a33: 0x2000, 0x1a34: 0x2000, 0x1a35: 0x2000, + 0x1a36: 0x2000, 0x1a37: 0x2000, 0x1a38: 0x2000, 0x1a39: 0x2000, 0x1a3a: 0x2000, 0x1a3b: 0x2000, + 0x1a3c: 0x2000, 0x1a3d: 0x2000, +} + +// widthIndex: 22 blocks, 1408 entries, 1408 bytes +// Block 0 is the zero block. +var widthIndex = [1408]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x02, 0xc4: 0x03, 0xc5: 0x04, 0xc7: 0x05, + 0xc9: 0x06, 0xcb: 0x07, 0xcc: 0x08, 0xcd: 0x09, 0xce: 0x0a, 0xcf: 0x0b, + 0xd0: 0x0c, 0xd1: 0x0d, + 0xe1: 0x02, 0xe2: 0x03, 0xe3: 0x04, 0xe4: 0x05, 0xe5: 0x06, 0xe6: 0x06, 0xe7: 0x06, + 0xe8: 0x06, 0xe9: 0x06, 0xea: 0x07, 0xeb: 0x06, 0xec: 0x06, 0xed: 0x08, 0xee: 0x09, 0xef: 0x0a, + 0xf0: 0x0f, 0xf3: 0x12, 0xf4: 0x13, + // Block 0x4, offset 0x100 + 0x104: 0x0e, 0x105: 0x0f, + // Block 0x5, offset 0x140 + 0x140: 0x10, 0x141: 0x11, 0x142: 0x12, 0x144: 0x13, 0x145: 0x14, 0x146: 0x15, 0x147: 0x16, + 0x148: 0x17, 0x149: 0x18, 0x14a: 0x19, 0x14c: 0x1a, 0x14f: 0x1b, + 0x151: 0x1c, 0x152: 0x08, 0x153: 0x1d, 0x154: 0x1e, 0x155: 0x1f, 0x156: 0x20, 0x157: 0x21, + 0x158: 0x22, 0x159: 0x23, 0x15a: 0x24, 0x15b: 0x25, 0x15c: 0x26, 0x15d: 0x27, 0x15e: 0x28, 0x15f: 0x29, + 0x166: 0x2a, + 0x16c: 0x2b, 0x16d: 0x2c, + 0x17a: 0x2d, 0x17b: 0x2e, 0x17c: 0x0e, 0x17d: 0x0e, 0x17e: 0x0e, 0x17f: 0x2f, + // Block 0x6, offset 0x180 + 0x180: 0x30, 0x181: 0x31, 0x182: 0x32, 0x183: 0x33, 0x184: 0x34, 0x185: 0x35, 0x186: 0x36, 0x187: 0x37, + 0x188: 0x38, 0x189: 0x39, 0x18a: 0x0e, 0x18b: 0x0e, 0x18c: 0x0e, 0x18d: 0x0e, 0x18e: 0x0e, 0x18f: 0x0e, + 0x190: 0x0e, 0x191: 0x0e, 0x192: 0x0e, 0x193: 0x0e, 0x194: 0x0e, 0x195: 0x0e, 0x196: 0x0e, 0x197: 0x0e, + 0x198: 0x0e, 0x199: 0x0e, 0x19a: 0x0e, 0x19b: 0x0e, 0x19c: 0x0e, 0x19d: 0x0e, 0x19e: 0x0e, 0x19f: 0x0e, + 0x1a0: 0x0e, 0x1a1: 0x0e, 0x1a2: 0x0e, 0x1a3: 0x0e, 0x1a4: 0x0e, 0x1a5: 0x0e, 0x1a6: 0x0e, 0x1a7: 0x0e, + 0x1a8: 0x0e, 0x1a9: 0x0e, 0x1aa: 0x0e, 0x1ab: 0x0e, 0x1ac: 0x0e, 0x1ad: 0x0e, 0x1ae: 0x0e, 0x1af: 0x0e, + 0x1b0: 0x0e, 0x1b1: 0x0e, 0x1b2: 0x0e, 0x1b3: 0x0e, 0x1b4: 0x0e, 0x1b5: 0x0e, 0x1b6: 0x0e, 0x1b7: 0x0e, + 0x1b8: 0x0e, 0x1b9: 0x0e, 0x1ba: 0x0e, 0x1bb: 0x0e, 0x1bc: 0x0e, 0x1bd: 0x0e, 0x1be: 0x0e, 0x1bf: 0x0e, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0e, 0x1c1: 0x0e, 0x1c2: 0x0e, 0x1c3: 0x0e, 0x1c4: 0x0e, 0x1c5: 0x0e, 0x1c6: 0x0e, 0x1c7: 0x0e, + 0x1c8: 0x0e, 0x1c9: 0x0e, 0x1ca: 0x0e, 0x1cb: 0x0e, 0x1cc: 0x0e, 0x1cd: 0x0e, 0x1ce: 0x0e, 0x1cf: 0x0e, + 0x1d0: 0x0e, 0x1d1: 0x0e, 0x1d2: 0x0e, 0x1d3: 0x0e, 0x1d4: 0x0e, 0x1d5: 0x0e, 0x1d6: 0x0e, 0x1d7: 0x0e, + 0x1d8: 0x0e, 0x1d9: 0x0e, 0x1da: 0x0e, 0x1db: 0x0e, 0x1dc: 0x0e, 0x1dd: 0x0e, 0x1de: 0x0e, 0x1df: 0x0e, + 0x1e0: 0x0e, 0x1e1: 0x0e, 0x1e2: 0x0e, 0x1e3: 0x0e, 0x1e4: 0x0e, 0x1e5: 0x0e, 0x1e6: 0x0e, 0x1e7: 0x0e, + 0x1e8: 0x0e, 0x1e9: 0x0e, 0x1ea: 0x0e, 0x1eb: 0x0e, 0x1ec: 0x0e, 0x1ed: 0x0e, 0x1ee: 0x0e, 0x1ef: 0x0e, + 0x1f0: 0x0e, 0x1f1: 0x0e, 0x1f2: 0x0e, 0x1f3: 0x0e, 0x1f4: 0x0e, 0x1f5: 0x0e, 0x1f6: 0x0e, + 0x1f8: 0x0e, 0x1f9: 0x0e, 0x1fa: 0x0e, 0x1fb: 0x0e, 0x1fc: 0x0e, 0x1fd: 0x0e, 0x1fe: 0x0e, 0x1ff: 0x0e, + // Block 0x8, offset 0x200 + 0x200: 0x0e, 0x201: 0x0e, 0x202: 0x0e, 0x203: 0x0e, 0x204: 0x0e, 0x205: 0x0e, 0x206: 0x0e, 0x207: 0x0e, + 0x208: 0x0e, 0x209: 0x0e, 0x20a: 0x0e, 0x20b: 0x0e, 0x20c: 0x0e, 0x20d: 0x0e, 0x20e: 0x0e, 0x20f: 0x0e, + 0x210: 0x0e, 0x211: 0x0e, 0x212: 0x0e, 0x213: 0x0e, 0x214: 0x0e, 0x215: 0x0e, 0x216: 0x0e, 0x217: 0x0e, + 0x218: 0x0e, 0x219: 0x0e, 0x21a: 0x0e, 0x21b: 0x0e, 0x21c: 0x0e, 0x21d: 0x0e, 0x21e: 0x0e, 0x21f: 0x0e, + 0x220: 0x0e, 0x221: 0x0e, 0x222: 0x0e, 0x223: 0x0e, 0x224: 0x0e, 0x225: 0x0e, 0x226: 0x0e, 0x227: 0x0e, + 0x228: 0x0e, 0x229: 0x0e, 0x22a: 0x0e, 0x22b: 0x0e, 0x22c: 0x0e, 0x22d: 0x0e, 0x22e: 0x0e, 0x22f: 0x0e, + 0x230: 0x0e, 0x231: 0x0e, 0x232: 0x0e, 0x233: 0x0e, 0x234: 0x0e, 0x235: 0x0e, 0x236: 0x0e, 0x237: 0x0e, + 0x238: 0x0e, 0x239: 0x0e, 0x23a: 0x0e, 0x23b: 0x0e, 0x23c: 0x0e, 0x23d: 0x0e, 0x23e: 0x0e, 0x23f: 0x0e, + // Block 0x9, offset 0x240 + 0x240: 0x0e, 0x241: 0x0e, 0x242: 0x0e, 0x243: 0x0e, 0x244: 0x0e, 0x245: 0x0e, 0x246: 0x0e, 0x247: 0x0e, + 0x248: 0x0e, 0x249: 0x0e, 0x24a: 0x0e, 0x24b: 0x0e, 0x24c: 0x0e, 0x24d: 0x0e, 0x24e: 0x0e, 0x24f: 0x0e, + 0x250: 0x0e, 0x251: 0x0e, 0x252: 0x3a, 0x253: 0x3b, + 0x265: 0x3c, + 0x270: 0x0e, 0x271: 0x0e, 0x272: 0x0e, 0x273: 0x0e, 0x274: 0x0e, 0x275: 0x0e, 0x276: 0x0e, 0x277: 0x0e, + 0x278: 0x0e, 0x279: 0x0e, 0x27a: 0x0e, 0x27b: 0x0e, 0x27c: 0x0e, 0x27d: 0x0e, 0x27e: 0x0e, 0x27f: 0x0e, + // Block 0xa, offset 0x280 + 0x280: 0x0e, 0x281: 0x0e, 0x282: 0x0e, 0x283: 0x0e, 0x284: 0x0e, 0x285: 0x0e, 0x286: 0x0e, 0x287: 0x0e, + 0x288: 0x0e, 0x289: 0x0e, 0x28a: 0x0e, 0x28b: 0x0e, 0x28c: 0x0e, 0x28d: 0x0e, 0x28e: 0x0e, 0x28f: 0x0e, + 0x290: 0x0e, 0x291: 0x0e, 0x292: 0x0e, 0x293: 0x0e, 0x294: 0x0e, 0x295: 0x0e, 0x296: 0x0e, 0x297: 0x0e, + 0x298: 0x0e, 0x299: 0x0e, 0x29a: 0x0e, 0x29b: 0x0e, 0x29c: 0x0e, 0x29d: 0x0e, 0x29e: 0x3d, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x08, 0x2c1: 0x08, 0x2c2: 0x08, 0x2c3: 0x08, 0x2c4: 0x08, 0x2c5: 0x08, 0x2c6: 0x08, 0x2c7: 0x08, + 0x2c8: 0x08, 0x2c9: 0x08, 0x2ca: 0x08, 0x2cb: 0x08, 0x2cc: 0x08, 0x2cd: 0x08, 0x2ce: 0x08, 0x2cf: 0x08, + 0x2d0: 0x08, 0x2d1: 0x08, 0x2d2: 0x08, 0x2d3: 0x08, 0x2d4: 0x08, 0x2d5: 0x08, 0x2d6: 0x08, 0x2d7: 0x08, + 0x2d8: 0x08, 0x2d9: 0x08, 0x2da: 0x08, 0x2db: 0x08, 0x2dc: 0x08, 0x2dd: 0x08, 0x2de: 0x08, 0x2df: 0x08, + 0x2e0: 0x08, 0x2e1: 0x08, 0x2e2: 0x08, 0x2e3: 0x08, 0x2e4: 0x08, 0x2e5: 0x08, 0x2e6: 0x08, 0x2e7: 0x08, + 0x2e8: 0x08, 0x2e9: 0x08, 0x2ea: 0x08, 0x2eb: 0x08, 0x2ec: 0x08, 0x2ed: 0x08, 0x2ee: 0x08, 0x2ef: 0x08, + 0x2f0: 0x08, 0x2f1: 0x08, 0x2f2: 0x08, 0x2f3: 0x08, 0x2f4: 0x08, 0x2f5: 0x08, 0x2f6: 0x08, 0x2f7: 0x08, + 0x2f8: 0x08, 0x2f9: 0x08, 0x2fa: 0x08, 0x2fb: 0x08, 0x2fc: 0x08, 0x2fd: 0x08, 0x2fe: 0x08, 0x2ff: 0x08, + // Block 0xc, offset 0x300 + 0x300: 0x08, 0x301: 0x08, 0x302: 0x08, 0x303: 0x08, 0x304: 0x08, 0x305: 0x08, 0x306: 0x08, 0x307: 0x08, + 0x308: 0x08, 0x309: 0x08, 0x30a: 0x08, 0x30b: 0x08, 0x30c: 0x08, 0x30d: 0x08, 0x30e: 0x08, 0x30f: 0x08, + 0x310: 0x08, 0x311: 0x08, 0x312: 0x08, 0x313: 0x08, 0x314: 0x08, 0x315: 0x08, 0x316: 0x08, 0x317: 0x08, + 0x318: 0x08, 0x319: 0x08, 0x31a: 0x08, 0x31b: 0x08, 0x31c: 0x08, 0x31d: 0x08, 0x31e: 0x08, 0x31f: 0x08, + 0x320: 0x08, 0x321: 0x08, 0x322: 0x08, 0x323: 0x08, 0x324: 0x0e, 0x325: 0x0e, 0x326: 0x0e, 0x327: 0x0e, + 0x328: 0x0e, 0x329: 0x0e, 0x32a: 0x0e, 0x32b: 0x0e, + 0x338: 0x3e, 0x339: 0x3f, 0x33c: 0x40, 0x33d: 0x41, 0x33e: 0x42, 0x33f: 0x43, + // Block 0xd, offset 0x340 + 0x37f: 0x44, + // Block 0xe, offset 0x380 + 0x380: 0x0e, 0x381: 0x0e, 0x382: 0x0e, 0x383: 0x0e, 0x384: 0x0e, 0x385: 0x0e, 0x386: 0x0e, 0x387: 0x0e, + 0x388: 0x0e, 0x389: 0x0e, 0x38a: 0x0e, 0x38b: 0x0e, 0x38c: 0x0e, 0x38d: 0x0e, 0x38e: 0x0e, 0x38f: 0x0e, + 0x390: 0x0e, 0x391: 0x0e, 0x392: 0x0e, 0x393: 0x0e, 0x394: 0x0e, 0x395: 0x0e, 0x396: 0x0e, 0x397: 0x0e, + 0x398: 0x0e, 0x399: 0x0e, 0x39a: 0x0e, 0x39b: 0x0e, 0x39c: 0x0e, 0x39d: 0x0e, 0x39e: 0x0e, 0x39f: 0x45, + 0x3a0: 0x0e, 0x3a1: 0x0e, 0x3a2: 0x0e, 0x3a3: 0x0e, 0x3a4: 0x0e, 0x3a5: 0x0e, 0x3a6: 0x0e, 0x3a7: 0x0e, + 0x3a8: 0x0e, 0x3a9: 0x0e, 0x3aa: 0x0e, 0x3ab: 0x0e, 0x3ac: 0x0e, 0x3ad: 0x0e, 0x3ae: 0x0e, 0x3af: 0x0e, + 0x3b0: 0x0e, 0x3b1: 0x0e, 0x3b2: 0x0e, 0x3b3: 0x46, 0x3b4: 0x47, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x0e, 0x3c1: 0x0e, 0x3c2: 0x0e, 0x3c3: 0x0e, 0x3c4: 0x48, 0x3c5: 0x49, 0x3c6: 0x0e, 0x3c7: 0x0e, + 0x3c8: 0x0e, 0x3c9: 0x0e, 0x3ca: 0x0e, 0x3cb: 0x4a, + // Block 0x10, offset 0x400 + 0x400: 0x4b, 0x403: 0x4c, 0x404: 0x4d, 0x405: 0x4e, 0x406: 0x4f, + 0x408: 0x50, 0x409: 0x51, 0x40c: 0x52, 0x40d: 0x53, 0x40e: 0x54, 0x40f: 0x55, + 0x410: 0x56, 0x411: 0x57, 0x412: 0x0e, 0x413: 0x58, 0x414: 0x59, 0x415: 0x5a, 0x416: 0x5b, 0x417: 0x5c, + 0x418: 0x0e, 0x419: 0x5d, 0x41a: 0x0e, 0x41b: 0x5e, 0x41f: 0x5f, + 0x424: 0x60, 0x425: 0x61, 0x426: 0x0e, 0x427: 0x62, + 0x429: 0x63, 0x42a: 0x64, 0x42b: 0x65, + // Block 0x11, offset 0x440 + 0x456: 0x0b, 0x457: 0x06, + 0x458: 0x0c, 0x45b: 0x0d, 0x45f: 0x0e, + 0x460: 0x06, 0x461: 0x06, 0x462: 0x06, 0x463: 0x06, 0x464: 0x06, 0x465: 0x06, 0x466: 0x06, 0x467: 0x06, + 0x468: 0x06, 0x469: 0x06, 0x46a: 0x06, 0x46b: 0x06, 0x46c: 0x06, 0x46d: 0x06, 0x46e: 0x06, 0x46f: 0x06, + 0x470: 0x06, 0x471: 0x06, 0x472: 0x06, 0x473: 0x06, 0x474: 0x06, 0x475: 0x06, 0x476: 0x06, 0x477: 0x06, + 0x478: 0x06, 0x479: 0x06, 0x47a: 0x06, 0x47b: 0x06, 0x47c: 0x06, 0x47d: 0x06, 0x47e: 0x06, 0x47f: 0x06, + // Block 0x12, offset 0x480 + 0x484: 0x08, 0x485: 0x08, 0x486: 0x08, 0x487: 0x09, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x08, 0x4c1: 0x08, 0x4c2: 0x08, 0x4c3: 0x08, 0x4c4: 0x08, 0x4c5: 0x08, 0x4c6: 0x08, 0x4c7: 0x08, + 0x4c8: 0x08, 0x4c9: 0x08, 0x4ca: 0x08, 0x4cb: 0x08, 0x4cc: 0x08, 0x4cd: 0x08, 0x4ce: 0x08, 0x4cf: 0x08, + 0x4d0: 0x08, 0x4d1: 0x08, 0x4d2: 0x08, 0x4d3: 0x08, 0x4d4: 0x08, 0x4d5: 0x08, 0x4d6: 0x08, 0x4d7: 0x08, + 0x4d8: 0x08, 0x4d9: 0x08, 0x4da: 0x08, 0x4db: 0x08, 0x4dc: 0x08, 0x4dd: 0x08, 0x4de: 0x08, 0x4df: 0x08, + 0x4e0: 0x08, 0x4e1: 0x08, 0x4e2: 0x08, 0x4e3: 0x08, 0x4e4: 0x08, 0x4e5: 0x08, 0x4e6: 0x08, 0x4e7: 0x08, + 0x4e8: 0x08, 0x4e9: 0x08, 0x4ea: 0x08, 0x4eb: 0x08, 0x4ec: 0x08, 0x4ed: 0x08, 0x4ee: 0x08, 0x4ef: 0x08, + 0x4f0: 0x08, 0x4f1: 0x08, 0x4f2: 0x08, 0x4f3: 0x08, 0x4f4: 0x08, 0x4f5: 0x08, 0x4f6: 0x08, 0x4f7: 0x08, + 0x4f8: 0x08, 0x4f9: 0x08, 0x4fa: 0x08, 0x4fb: 0x08, 0x4fc: 0x08, 0x4fd: 0x08, 0x4fe: 0x08, 0x4ff: 0x66, + // Block 0x14, offset 0x500 + 0x520: 0x10, + 0x530: 0x09, 0x531: 0x09, 0x532: 0x09, 0x533: 0x09, 0x534: 0x09, 0x535: 0x09, 0x536: 0x09, 0x537: 0x09, + 0x538: 0x09, 0x539: 0x09, 0x53a: 0x09, 0x53b: 0x09, 0x53c: 0x09, 0x53d: 0x09, 0x53e: 0x09, 0x53f: 0x11, + // Block 0x15, offset 0x540 + 0x540: 0x09, 0x541: 0x09, 0x542: 0x09, 0x543: 0x09, 0x544: 0x09, 0x545: 0x09, 0x546: 0x09, 0x547: 0x09, + 0x548: 0x09, 0x549: 0x09, 0x54a: 0x09, 0x54b: 0x09, 0x54c: 0x09, 0x54d: 0x09, 0x54e: 0x09, 0x54f: 0x11, +} + +// inverseData contains 4-byte entries of the following format: +// +// <length> <modified UTF-8-encoded rune> <0 padding> +// +// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the +// UTF-8 encoding of the original rune. Mappings often have the following +// pattern: +// +// A -> A (U+FF21 -> U+0041) +// B -> B (U+FF22 -> U+0042) +// ... +// +// By xor-ing the last byte the same entry can be shared by many mappings. This +// reduces the total number of distinct entries by about two thirds. +// The resulting entry for the aforementioned mappings is +// +// { 0x01, 0xE0, 0x00, 0x00 } +// +// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get +// +// E0 ^ A1 = 41. +// +// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get +// +// E0 ^ A2 = 42. +// +// Note that because of the xor-ing, the byte sequence stored in the entry is +// not valid UTF-8. +var inverseData = [150][4]byte{ + {0x00, 0x00, 0x00, 0x00}, + {0x03, 0xe3, 0x80, 0xa0}, + {0x03, 0xef, 0xbc, 0xa0}, + {0x03, 0xef, 0xbc, 0xe0}, + {0x03, 0xef, 0xbd, 0xe0}, + {0x03, 0xef, 0xbf, 0x02}, + {0x03, 0xef, 0xbf, 0x00}, + {0x03, 0xef, 0xbf, 0x0e}, + {0x03, 0xef, 0xbf, 0x0c}, + {0x03, 0xef, 0xbf, 0x0f}, + {0x03, 0xef, 0xbf, 0x39}, + {0x03, 0xef, 0xbf, 0x3b}, + {0x03, 0xef, 0xbf, 0x3f}, + {0x03, 0xef, 0xbf, 0x2a}, + {0x03, 0xef, 0xbf, 0x0d}, + {0x03, 0xef, 0xbf, 0x25}, + {0x03, 0xef, 0xbd, 0x1a}, + {0x03, 0xef, 0xbd, 0x26}, + {0x01, 0xa0, 0x00, 0x00}, + {0x03, 0xef, 0xbd, 0x25}, + {0x03, 0xef, 0xbd, 0x23}, + {0x03, 0xef, 0xbd, 0x2e}, + {0x03, 0xef, 0xbe, 0x07}, + {0x03, 0xef, 0xbe, 0x05}, + {0x03, 0xef, 0xbd, 0x06}, + {0x03, 0xef, 0xbd, 0x13}, + {0x03, 0xef, 0xbd, 0x0b}, + {0x03, 0xef, 0xbd, 0x16}, + {0x03, 0xef, 0xbd, 0x0c}, + {0x03, 0xef, 0xbd, 0x15}, + {0x03, 0xef, 0xbd, 0x0d}, + {0x03, 0xef, 0xbd, 0x1c}, + {0x03, 0xef, 0xbd, 0x02}, + {0x03, 0xef, 0xbd, 0x1f}, + {0x03, 0xef, 0xbd, 0x1d}, + {0x03, 0xef, 0xbd, 0x17}, + {0x03, 0xef, 0xbd, 0x08}, + {0x03, 0xef, 0xbd, 0x09}, + {0x03, 0xef, 0xbd, 0x0e}, + {0x03, 0xef, 0xbd, 0x04}, + {0x03, 0xef, 0xbd, 0x05}, + {0x03, 0xef, 0xbe, 0x3f}, + {0x03, 0xef, 0xbe, 0x00}, + {0x03, 0xef, 0xbd, 0x2c}, + {0x03, 0xef, 0xbe, 0x06}, + {0x03, 0xef, 0xbe, 0x0c}, + {0x03, 0xef, 0xbe, 0x0f}, + {0x03, 0xef, 0xbe, 0x0d}, + {0x03, 0xef, 0xbe, 0x0b}, + {0x03, 0xef, 0xbe, 0x19}, + {0x03, 0xef, 0xbe, 0x15}, + {0x03, 0xef, 0xbe, 0x11}, + {0x03, 0xef, 0xbe, 0x31}, + {0x03, 0xef, 0xbe, 0x33}, + {0x03, 0xef, 0xbd, 0x0f}, + {0x03, 0xef, 0xbe, 0x30}, + {0x03, 0xef, 0xbe, 0x3e}, + {0x03, 0xef, 0xbe, 0x32}, + {0x03, 0xef, 0xbe, 0x36}, + {0x03, 0xef, 0xbd, 0x14}, + {0x03, 0xef, 0xbe, 0x2e}, + {0x03, 0xef, 0xbd, 0x1e}, + {0x03, 0xef, 0xbe, 0x10}, + {0x03, 0xef, 0xbf, 0x13}, + {0x03, 0xef, 0xbf, 0x15}, + {0x03, 0xef, 0xbf, 0x17}, + {0x03, 0xef, 0xbf, 0x1f}, + {0x03, 0xef, 0xbf, 0x1d}, + {0x03, 0xef, 0xbf, 0x1b}, + {0x03, 0xef, 0xbf, 0x09}, + {0x03, 0xef, 0xbf, 0x0b}, + {0x03, 0xef, 0xbf, 0x37}, + {0x03, 0xef, 0xbe, 0x04}, + {0x01, 0xe0, 0x00, 0x00}, + {0x03, 0xe2, 0xa6, 0x1a}, + {0x03, 0xe2, 0xa6, 0x26}, + {0x03, 0xe3, 0x80, 0x23}, + {0x03, 0xe3, 0x80, 0x2e}, + {0x03, 0xe3, 0x80, 0x25}, + {0x03, 0xe3, 0x83, 0x1e}, + {0x03, 0xe3, 0x83, 0x14}, + {0x03, 0xe3, 0x82, 0x06}, + {0x03, 0xe3, 0x82, 0x0b}, + {0x03, 0xe3, 0x82, 0x0c}, + {0x03, 0xe3, 0x82, 0x0d}, + {0x03, 0xe3, 0x82, 0x02}, + {0x03, 0xe3, 0x83, 0x0f}, + {0x03, 0xe3, 0x83, 0x08}, + {0x03, 0xe3, 0x83, 0x09}, + {0x03, 0xe3, 0x83, 0x2c}, + {0x03, 0xe3, 0x83, 0x0c}, + {0x03, 0xe3, 0x82, 0x13}, + {0x03, 0xe3, 0x82, 0x16}, + {0x03, 0xe3, 0x82, 0x15}, + {0x03, 0xe3, 0x82, 0x1c}, + {0x03, 0xe3, 0x82, 0x1f}, + {0x03, 0xe3, 0x82, 0x1d}, + {0x03, 0xe3, 0x82, 0x1a}, + {0x03, 0xe3, 0x82, 0x17}, + {0x03, 0xe3, 0x82, 0x08}, + {0x03, 0xe3, 0x82, 0x09}, + {0x03, 0xe3, 0x82, 0x0e}, + {0x03, 0xe3, 0x82, 0x04}, + {0x03, 0xe3, 0x82, 0x05}, + {0x03, 0xe3, 0x82, 0x3f}, + {0x03, 0xe3, 0x83, 0x00}, + {0x03, 0xe3, 0x83, 0x06}, + {0x03, 0xe3, 0x83, 0x05}, + {0x03, 0xe3, 0x83, 0x0d}, + {0x03, 0xe3, 0x83, 0x0b}, + {0x03, 0xe3, 0x83, 0x07}, + {0x03, 0xe3, 0x83, 0x19}, + {0x03, 0xe3, 0x83, 0x15}, + {0x03, 0xe3, 0x83, 0x11}, + {0x03, 0xe3, 0x83, 0x31}, + {0x03, 0xe3, 0x83, 0x33}, + {0x03, 0xe3, 0x83, 0x30}, + {0x03, 0xe3, 0x83, 0x3e}, + {0x03, 0xe3, 0x83, 0x32}, + {0x03, 0xe3, 0x83, 0x36}, + {0x03, 0xe3, 0x83, 0x2e}, + {0x03, 0xe3, 0x82, 0x07}, + {0x03, 0xe3, 0x85, 0x04}, + {0x03, 0xe3, 0x84, 0x10}, + {0x03, 0xe3, 0x85, 0x30}, + {0x03, 0xe3, 0x85, 0x0d}, + {0x03, 0xe3, 0x85, 0x13}, + {0x03, 0xe3, 0x85, 0x15}, + {0x03, 0xe3, 0x85, 0x17}, + {0x03, 0xe3, 0x85, 0x1f}, + {0x03, 0xe3, 0x85, 0x1d}, + {0x03, 0xe3, 0x85, 0x1b}, + {0x03, 0xe3, 0x85, 0x09}, + {0x03, 0xe3, 0x85, 0x0f}, + {0x03, 0xe3, 0x85, 0x0b}, + {0x03, 0xe3, 0x85, 0x37}, + {0x03, 0xe3, 0x85, 0x3b}, + {0x03, 0xe3, 0x85, 0x39}, + {0x03, 0xe3, 0x85, 0x3f}, + {0x02, 0xc2, 0x02, 0x00}, + {0x02, 0xc2, 0x0e, 0x00}, + {0x02, 0xc2, 0x0c, 0x00}, + {0x02, 0xc2, 0x00, 0x00}, + {0x03, 0xe2, 0x82, 0x0f}, + {0x03, 0xe2, 0x94, 0x2a}, + {0x03, 0xe2, 0x86, 0x39}, + {0x03, 0xe2, 0x86, 0x3b}, + {0x03, 0xe2, 0x86, 0x3f}, + {0x03, 0xe2, 0x96, 0x0d}, + {0x03, 0xe2, 0x97, 0x25}, +} + +// Total table size 15448 bytes (15KiB) diff --git a/vendor/golang.org/x/text/width/tables15.0.0.go b/vendor/golang.org/x/text/width/tables15.0.0.go new file mode 100644 index 00000000000..2b85289675e --- /dev/null +++ b/vendor/golang.org/x/text/width/tables15.0.0.go @@ -0,0 +1,1367 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +//go:build go1.21 + +package width + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "15.0.0" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *widthTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return widthValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = widthIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *widthTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return widthValues[c0] + } + i := widthIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = widthIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = widthIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *widthTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return widthValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = widthIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *widthTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return widthValues[c0] + } + i := widthIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = widthIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = widthIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// widthTrie. Total size: 14912 bytes (14.56 KiB). Checksum: 4468b6cd178303d2. +type widthTrie struct{} + +func newWidthTrie(i int) *widthTrie { + return &widthTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *widthTrie) lookupValue(n uint32, b byte) uint16 { + switch { + default: + return uint16(widthValues[n<<6+uint32(b)]) + } +} + +// widthValues: 105 blocks, 6720 entries, 13440 bytes +// The third block is the zero block. +var widthValues = [6720]uint16{ + // Block 0x0, offset 0x0 + 0x20: 0x6001, 0x21: 0x6002, 0x22: 0x6002, 0x23: 0x6002, + 0x24: 0x6002, 0x25: 0x6002, 0x26: 0x6002, 0x27: 0x6002, 0x28: 0x6002, 0x29: 0x6002, + 0x2a: 0x6002, 0x2b: 0x6002, 0x2c: 0x6002, 0x2d: 0x6002, 0x2e: 0x6002, 0x2f: 0x6002, + 0x30: 0x6002, 0x31: 0x6002, 0x32: 0x6002, 0x33: 0x6002, 0x34: 0x6002, 0x35: 0x6002, + 0x36: 0x6002, 0x37: 0x6002, 0x38: 0x6002, 0x39: 0x6002, 0x3a: 0x6002, 0x3b: 0x6002, + 0x3c: 0x6002, 0x3d: 0x6002, 0x3e: 0x6002, 0x3f: 0x6002, + // Block 0x1, offset 0x40 + 0x40: 0x6003, 0x41: 0x6003, 0x42: 0x6003, 0x43: 0x6003, 0x44: 0x6003, 0x45: 0x6003, + 0x46: 0x6003, 0x47: 0x6003, 0x48: 0x6003, 0x49: 0x6003, 0x4a: 0x6003, 0x4b: 0x6003, + 0x4c: 0x6003, 0x4d: 0x6003, 0x4e: 0x6003, 0x4f: 0x6003, 0x50: 0x6003, 0x51: 0x6003, + 0x52: 0x6003, 0x53: 0x6003, 0x54: 0x6003, 0x55: 0x6003, 0x56: 0x6003, 0x57: 0x6003, + 0x58: 0x6003, 0x59: 0x6003, 0x5a: 0x6003, 0x5b: 0x6003, 0x5c: 0x6003, 0x5d: 0x6003, + 0x5e: 0x6003, 0x5f: 0x6003, 0x60: 0x6004, 0x61: 0x6004, 0x62: 0x6004, 0x63: 0x6004, + 0x64: 0x6004, 0x65: 0x6004, 0x66: 0x6004, 0x67: 0x6004, 0x68: 0x6004, 0x69: 0x6004, + 0x6a: 0x6004, 0x6b: 0x6004, 0x6c: 0x6004, 0x6d: 0x6004, 0x6e: 0x6004, 0x6f: 0x6004, + 0x70: 0x6004, 0x71: 0x6004, 0x72: 0x6004, 0x73: 0x6004, 0x74: 0x6004, 0x75: 0x6004, + 0x76: 0x6004, 0x77: 0x6004, 0x78: 0x6004, 0x79: 0x6004, 0x7a: 0x6004, 0x7b: 0x6004, + 0x7c: 0x6004, 0x7d: 0x6004, 0x7e: 0x6004, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xe1: 0x2000, 0xe2: 0x6005, 0xe3: 0x6005, + 0xe4: 0x2000, 0xe5: 0x6006, 0xe6: 0x6005, 0xe7: 0x2000, 0xe8: 0x2000, + 0xea: 0x2000, 0xec: 0x6007, 0xed: 0x2000, 0xee: 0x2000, 0xef: 0x6008, + 0xf0: 0x2000, 0xf1: 0x2000, 0xf2: 0x2000, 0xf3: 0x2000, 0xf4: 0x2000, + 0xf6: 0x2000, 0xf7: 0x2000, 0xf8: 0x2000, 0xf9: 0x2000, 0xfa: 0x2000, + 0xfc: 0x2000, 0xfd: 0x2000, 0xfe: 0x2000, 0xff: 0x2000, + // Block 0x4, offset 0x100 + 0x106: 0x2000, + 0x110: 0x2000, + 0x117: 0x2000, + 0x118: 0x2000, + 0x11e: 0x2000, 0x11f: 0x2000, 0x120: 0x2000, 0x121: 0x2000, + 0x126: 0x2000, 0x128: 0x2000, 0x129: 0x2000, + 0x12a: 0x2000, 0x12c: 0x2000, 0x12d: 0x2000, + 0x130: 0x2000, 0x132: 0x2000, 0x133: 0x2000, + 0x137: 0x2000, 0x138: 0x2000, 0x139: 0x2000, 0x13a: 0x2000, + 0x13c: 0x2000, 0x13e: 0x2000, + // Block 0x5, offset 0x140 + 0x141: 0x2000, + 0x151: 0x2000, + 0x153: 0x2000, + 0x15b: 0x2000, + 0x166: 0x2000, 0x167: 0x2000, + 0x16b: 0x2000, + 0x171: 0x2000, 0x172: 0x2000, 0x173: 0x2000, + 0x178: 0x2000, + 0x17f: 0x2000, + // Block 0x6, offset 0x180 + 0x180: 0x2000, 0x181: 0x2000, 0x182: 0x2000, 0x184: 0x2000, + 0x188: 0x2000, 0x189: 0x2000, 0x18a: 0x2000, 0x18b: 0x2000, + 0x18d: 0x2000, + 0x192: 0x2000, 0x193: 0x2000, + 0x1a6: 0x2000, 0x1a7: 0x2000, + 0x1ab: 0x2000, + // Block 0x7, offset 0x1c0 + 0x1ce: 0x2000, 0x1d0: 0x2000, + 0x1d2: 0x2000, 0x1d4: 0x2000, 0x1d6: 0x2000, + 0x1d8: 0x2000, 0x1da: 0x2000, 0x1dc: 0x2000, + // Block 0x8, offset 0x200 + 0x211: 0x2000, + 0x221: 0x2000, + // Block 0x9, offset 0x240 + 0x244: 0x2000, + 0x247: 0x2000, 0x249: 0x2000, 0x24a: 0x2000, 0x24b: 0x2000, + 0x24d: 0x2000, 0x250: 0x2000, + 0x258: 0x2000, 0x259: 0x2000, 0x25a: 0x2000, 0x25b: 0x2000, 0x25d: 0x2000, + 0x25f: 0x2000, + // Block 0xa, offset 0x280 + 0x280: 0x2000, 0x281: 0x2000, 0x282: 0x2000, 0x283: 0x2000, 0x284: 0x2000, 0x285: 0x2000, + 0x286: 0x2000, 0x287: 0x2000, 0x288: 0x2000, 0x289: 0x2000, 0x28a: 0x2000, 0x28b: 0x2000, + 0x28c: 0x2000, 0x28d: 0x2000, 0x28e: 0x2000, 0x28f: 0x2000, 0x290: 0x2000, 0x291: 0x2000, + 0x292: 0x2000, 0x293: 0x2000, 0x294: 0x2000, 0x295: 0x2000, 0x296: 0x2000, 0x297: 0x2000, + 0x298: 0x2000, 0x299: 0x2000, 0x29a: 0x2000, 0x29b: 0x2000, 0x29c: 0x2000, 0x29d: 0x2000, + 0x29e: 0x2000, 0x29f: 0x2000, 0x2a0: 0x2000, 0x2a1: 0x2000, 0x2a2: 0x2000, 0x2a3: 0x2000, + 0x2a4: 0x2000, 0x2a5: 0x2000, 0x2a6: 0x2000, 0x2a7: 0x2000, 0x2a8: 0x2000, 0x2a9: 0x2000, + 0x2aa: 0x2000, 0x2ab: 0x2000, 0x2ac: 0x2000, 0x2ad: 0x2000, 0x2ae: 0x2000, 0x2af: 0x2000, + 0x2b0: 0x2000, 0x2b1: 0x2000, 0x2b2: 0x2000, 0x2b3: 0x2000, 0x2b4: 0x2000, 0x2b5: 0x2000, + 0x2b6: 0x2000, 0x2b7: 0x2000, 0x2b8: 0x2000, 0x2b9: 0x2000, 0x2ba: 0x2000, 0x2bb: 0x2000, + 0x2bc: 0x2000, 0x2bd: 0x2000, 0x2be: 0x2000, 0x2bf: 0x2000, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x2000, 0x2c1: 0x2000, 0x2c2: 0x2000, 0x2c3: 0x2000, 0x2c4: 0x2000, 0x2c5: 0x2000, + 0x2c6: 0x2000, 0x2c7: 0x2000, 0x2c8: 0x2000, 0x2c9: 0x2000, 0x2ca: 0x2000, 0x2cb: 0x2000, + 0x2cc: 0x2000, 0x2cd: 0x2000, 0x2ce: 0x2000, 0x2cf: 0x2000, 0x2d0: 0x2000, 0x2d1: 0x2000, + 0x2d2: 0x2000, 0x2d3: 0x2000, 0x2d4: 0x2000, 0x2d5: 0x2000, 0x2d6: 0x2000, 0x2d7: 0x2000, + 0x2d8: 0x2000, 0x2d9: 0x2000, 0x2da: 0x2000, 0x2db: 0x2000, 0x2dc: 0x2000, 0x2dd: 0x2000, + 0x2de: 0x2000, 0x2df: 0x2000, 0x2e0: 0x2000, 0x2e1: 0x2000, 0x2e2: 0x2000, 0x2e3: 0x2000, + 0x2e4: 0x2000, 0x2e5: 0x2000, 0x2e6: 0x2000, 0x2e7: 0x2000, 0x2e8: 0x2000, 0x2e9: 0x2000, + 0x2ea: 0x2000, 0x2eb: 0x2000, 0x2ec: 0x2000, 0x2ed: 0x2000, 0x2ee: 0x2000, 0x2ef: 0x2000, + // Block 0xc, offset 0x300 + 0x311: 0x2000, + 0x312: 0x2000, 0x313: 0x2000, 0x314: 0x2000, 0x315: 0x2000, 0x316: 0x2000, 0x317: 0x2000, + 0x318: 0x2000, 0x319: 0x2000, 0x31a: 0x2000, 0x31b: 0x2000, 0x31c: 0x2000, 0x31d: 0x2000, + 0x31e: 0x2000, 0x31f: 0x2000, 0x320: 0x2000, 0x321: 0x2000, 0x323: 0x2000, + 0x324: 0x2000, 0x325: 0x2000, 0x326: 0x2000, 0x327: 0x2000, 0x328: 0x2000, 0x329: 0x2000, + 0x331: 0x2000, 0x332: 0x2000, 0x333: 0x2000, 0x334: 0x2000, 0x335: 0x2000, + 0x336: 0x2000, 0x337: 0x2000, 0x338: 0x2000, 0x339: 0x2000, 0x33a: 0x2000, 0x33b: 0x2000, + 0x33c: 0x2000, 0x33d: 0x2000, 0x33e: 0x2000, 0x33f: 0x2000, + // Block 0xd, offset 0x340 + 0x340: 0x2000, 0x341: 0x2000, 0x343: 0x2000, 0x344: 0x2000, 0x345: 0x2000, + 0x346: 0x2000, 0x347: 0x2000, 0x348: 0x2000, 0x349: 0x2000, + // Block 0xe, offset 0x380 + 0x381: 0x2000, + 0x390: 0x2000, 0x391: 0x2000, + 0x392: 0x2000, 0x393: 0x2000, 0x394: 0x2000, 0x395: 0x2000, 0x396: 0x2000, 0x397: 0x2000, + 0x398: 0x2000, 0x399: 0x2000, 0x39a: 0x2000, 0x39b: 0x2000, 0x39c: 0x2000, 0x39d: 0x2000, + 0x39e: 0x2000, 0x39f: 0x2000, 0x3a0: 0x2000, 0x3a1: 0x2000, 0x3a2: 0x2000, 0x3a3: 0x2000, + 0x3a4: 0x2000, 0x3a5: 0x2000, 0x3a6: 0x2000, 0x3a7: 0x2000, 0x3a8: 0x2000, 0x3a9: 0x2000, + 0x3aa: 0x2000, 0x3ab: 0x2000, 0x3ac: 0x2000, 0x3ad: 0x2000, 0x3ae: 0x2000, 0x3af: 0x2000, + 0x3b0: 0x2000, 0x3b1: 0x2000, 0x3b2: 0x2000, 0x3b3: 0x2000, 0x3b4: 0x2000, 0x3b5: 0x2000, + 0x3b6: 0x2000, 0x3b7: 0x2000, 0x3b8: 0x2000, 0x3b9: 0x2000, 0x3ba: 0x2000, 0x3bb: 0x2000, + 0x3bc: 0x2000, 0x3bd: 0x2000, 0x3be: 0x2000, 0x3bf: 0x2000, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x2000, 0x3c1: 0x2000, 0x3c2: 0x2000, 0x3c3: 0x2000, 0x3c4: 0x2000, 0x3c5: 0x2000, + 0x3c6: 0x2000, 0x3c7: 0x2000, 0x3c8: 0x2000, 0x3c9: 0x2000, 0x3ca: 0x2000, 0x3cb: 0x2000, + 0x3cc: 0x2000, 0x3cd: 0x2000, 0x3ce: 0x2000, 0x3cf: 0x2000, 0x3d1: 0x2000, + // Block 0x10, offset 0x400 + 0x400: 0x4000, 0x401: 0x4000, 0x402: 0x4000, 0x403: 0x4000, 0x404: 0x4000, 0x405: 0x4000, + 0x406: 0x4000, 0x407: 0x4000, 0x408: 0x4000, 0x409: 0x4000, 0x40a: 0x4000, 0x40b: 0x4000, + 0x40c: 0x4000, 0x40d: 0x4000, 0x40e: 0x4000, 0x40f: 0x4000, 0x410: 0x4000, 0x411: 0x4000, + 0x412: 0x4000, 0x413: 0x4000, 0x414: 0x4000, 0x415: 0x4000, 0x416: 0x4000, 0x417: 0x4000, + 0x418: 0x4000, 0x419: 0x4000, 0x41a: 0x4000, 0x41b: 0x4000, 0x41c: 0x4000, 0x41d: 0x4000, + 0x41e: 0x4000, 0x41f: 0x4000, 0x420: 0x4000, 0x421: 0x4000, 0x422: 0x4000, 0x423: 0x4000, + 0x424: 0x4000, 0x425: 0x4000, 0x426: 0x4000, 0x427: 0x4000, 0x428: 0x4000, 0x429: 0x4000, + 0x42a: 0x4000, 0x42b: 0x4000, 0x42c: 0x4000, 0x42d: 0x4000, 0x42e: 0x4000, 0x42f: 0x4000, + 0x430: 0x4000, 0x431: 0x4000, 0x432: 0x4000, 0x433: 0x4000, 0x434: 0x4000, 0x435: 0x4000, + 0x436: 0x4000, 0x437: 0x4000, 0x438: 0x4000, 0x439: 0x4000, 0x43a: 0x4000, 0x43b: 0x4000, + 0x43c: 0x4000, 0x43d: 0x4000, 0x43e: 0x4000, 0x43f: 0x4000, + // Block 0x11, offset 0x440 + 0x440: 0x4000, 0x441: 0x4000, 0x442: 0x4000, 0x443: 0x4000, 0x444: 0x4000, 0x445: 0x4000, + 0x446: 0x4000, 0x447: 0x4000, 0x448: 0x4000, 0x449: 0x4000, 0x44a: 0x4000, 0x44b: 0x4000, + 0x44c: 0x4000, 0x44d: 0x4000, 0x44e: 0x4000, 0x44f: 0x4000, 0x450: 0x4000, 0x451: 0x4000, + 0x452: 0x4000, 0x453: 0x4000, 0x454: 0x4000, 0x455: 0x4000, 0x456: 0x4000, 0x457: 0x4000, + 0x458: 0x4000, 0x459: 0x4000, 0x45a: 0x4000, 0x45b: 0x4000, 0x45c: 0x4000, 0x45d: 0x4000, + 0x45e: 0x4000, 0x45f: 0x4000, + // Block 0x12, offset 0x480 + 0x490: 0x2000, + 0x493: 0x2000, 0x494: 0x2000, 0x495: 0x2000, 0x496: 0x2000, + 0x498: 0x2000, 0x499: 0x2000, 0x49c: 0x2000, 0x49d: 0x2000, + 0x4a0: 0x2000, 0x4a1: 0x2000, 0x4a2: 0x2000, + 0x4a4: 0x2000, 0x4a5: 0x2000, 0x4a6: 0x2000, 0x4a7: 0x2000, + 0x4b0: 0x2000, 0x4b2: 0x2000, 0x4b3: 0x2000, 0x4b5: 0x2000, + 0x4bb: 0x2000, + 0x4be: 0x2000, + // Block 0x13, offset 0x4c0 + 0x4f4: 0x2000, + 0x4ff: 0x2000, + // Block 0x14, offset 0x500 + 0x501: 0x2000, 0x502: 0x2000, 0x503: 0x2000, 0x504: 0x2000, + 0x529: 0xa009, + 0x52c: 0x2000, + // Block 0x15, offset 0x540 + 0x543: 0x2000, 0x545: 0x2000, + 0x549: 0x2000, + 0x553: 0x2000, 0x556: 0x2000, + 0x561: 0x2000, 0x562: 0x2000, + 0x566: 0x2000, + 0x56b: 0x2000, + // Block 0x16, offset 0x580 + 0x593: 0x2000, 0x594: 0x2000, + 0x59b: 0x2000, 0x59c: 0x2000, 0x59d: 0x2000, + 0x59e: 0x2000, 0x5a0: 0x2000, 0x5a1: 0x2000, 0x5a2: 0x2000, 0x5a3: 0x2000, + 0x5a4: 0x2000, 0x5a5: 0x2000, 0x5a6: 0x2000, 0x5a7: 0x2000, 0x5a8: 0x2000, 0x5a9: 0x2000, + 0x5aa: 0x2000, 0x5ab: 0x2000, + 0x5b0: 0x2000, 0x5b1: 0x2000, 0x5b2: 0x2000, 0x5b3: 0x2000, 0x5b4: 0x2000, 0x5b5: 0x2000, + 0x5b6: 0x2000, 0x5b7: 0x2000, 0x5b8: 0x2000, 0x5b9: 0x2000, + // Block 0x17, offset 0x5c0 + 0x5c9: 0x2000, + 0x5d0: 0x200a, 0x5d1: 0x200b, + 0x5d2: 0x200a, 0x5d3: 0x200c, 0x5d4: 0x2000, 0x5d5: 0x2000, 0x5d6: 0x2000, 0x5d7: 0x2000, + 0x5d8: 0x2000, 0x5d9: 0x2000, + 0x5f8: 0x2000, 0x5f9: 0x2000, + // Block 0x18, offset 0x600 + 0x612: 0x2000, 0x614: 0x2000, + 0x627: 0x2000, + // Block 0x19, offset 0x640 + 0x640: 0x2000, 0x642: 0x2000, 0x643: 0x2000, + 0x647: 0x2000, 0x648: 0x2000, 0x64b: 0x2000, + 0x64f: 0x2000, 0x651: 0x2000, + 0x655: 0x2000, + 0x65a: 0x2000, 0x65d: 0x2000, + 0x65e: 0x2000, 0x65f: 0x2000, 0x660: 0x2000, 0x663: 0x2000, + 0x665: 0x2000, 0x667: 0x2000, 0x668: 0x2000, 0x669: 0x2000, + 0x66a: 0x2000, 0x66b: 0x2000, 0x66c: 0x2000, 0x66e: 0x2000, + 0x674: 0x2000, 0x675: 0x2000, + 0x676: 0x2000, 0x677: 0x2000, + 0x67c: 0x2000, 0x67d: 0x2000, + // Block 0x1a, offset 0x680 + 0x688: 0x2000, + 0x68c: 0x2000, + 0x692: 0x2000, + 0x6a0: 0x2000, 0x6a1: 0x2000, + 0x6a4: 0x2000, 0x6a5: 0x2000, 0x6a6: 0x2000, 0x6a7: 0x2000, + 0x6aa: 0x2000, 0x6ab: 0x2000, 0x6ae: 0x2000, 0x6af: 0x2000, + // Block 0x1b, offset 0x6c0 + 0x6c2: 0x2000, 0x6c3: 0x2000, + 0x6c6: 0x2000, 0x6c7: 0x2000, + 0x6d5: 0x2000, + 0x6d9: 0x2000, + 0x6e5: 0x2000, + 0x6ff: 0x2000, + // Block 0x1c, offset 0x700 + 0x712: 0x2000, + 0x71a: 0x4000, 0x71b: 0x4000, + 0x729: 0x4000, + 0x72a: 0x4000, + // Block 0x1d, offset 0x740 + 0x769: 0x4000, + 0x76a: 0x4000, 0x76b: 0x4000, 0x76c: 0x4000, + 0x770: 0x4000, 0x773: 0x4000, + // Block 0x1e, offset 0x780 + 0x7a0: 0x2000, 0x7a1: 0x2000, 0x7a2: 0x2000, 0x7a3: 0x2000, + 0x7a4: 0x2000, 0x7a5: 0x2000, 0x7a6: 0x2000, 0x7a7: 0x2000, 0x7a8: 0x2000, 0x7a9: 0x2000, + 0x7aa: 0x2000, 0x7ab: 0x2000, 0x7ac: 0x2000, 0x7ad: 0x2000, 0x7ae: 0x2000, 0x7af: 0x2000, + 0x7b0: 0x2000, 0x7b1: 0x2000, 0x7b2: 0x2000, 0x7b3: 0x2000, 0x7b4: 0x2000, 0x7b5: 0x2000, + 0x7b6: 0x2000, 0x7b7: 0x2000, 0x7b8: 0x2000, 0x7b9: 0x2000, 0x7ba: 0x2000, 0x7bb: 0x2000, + 0x7bc: 0x2000, 0x7bd: 0x2000, 0x7be: 0x2000, 0x7bf: 0x2000, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x2000, 0x7c1: 0x2000, 0x7c2: 0x2000, 0x7c3: 0x2000, 0x7c4: 0x2000, 0x7c5: 0x2000, + 0x7c6: 0x2000, 0x7c7: 0x2000, 0x7c8: 0x2000, 0x7c9: 0x2000, 0x7ca: 0x2000, 0x7cb: 0x2000, + 0x7cc: 0x2000, 0x7cd: 0x2000, 0x7ce: 0x2000, 0x7cf: 0x2000, 0x7d0: 0x2000, 0x7d1: 0x2000, + 0x7d2: 0x2000, 0x7d3: 0x2000, 0x7d4: 0x2000, 0x7d5: 0x2000, 0x7d6: 0x2000, 0x7d7: 0x2000, + 0x7d8: 0x2000, 0x7d9: 0x2000, 0x7da: 0x2000, 0x7db: 0x2000, 0x7dc: 0x2000, 0x7dd: 0x2000, + 0x7de: 0x2000, 0x7df: 0x2000, 0x7e0: 0x2000, 0x7e1: 0x2000, 0x7e2: 0x2000, 0x7e3: 0x2000, + 0x7e4: 0x2000, 0x7e5: 0x2000, 0x7e6: 0x2000, 0x7e7: 0x2000, 0x7e8: 0x2000, 0x7e9: 0x2000, + 0x7eb: 0x2000, 0x7ec: 0x2000, 0x7ed: 0x2000, 0x7ee: 0x2000, 0x7ef: 0x2000, + 0x7f0: 0x2000, 0x7f1: 0x2000, 0x7f2: 0x2000, 0x7f3: 0x2000, 0x7f4: 0x2000, 0x7f5: 0x2000, + 0x7f6: 0x2000, 0x7f7: 0x2000, 0x7f8: 0x2000, 0x7f9: 0x2000, 0x7fa: 0x2000, 0x7fb: 0x2000, + 0x7fc: 0x2000, 0x7fd: 0x2000, 0x7fe: 0x2000, 0x7ff: 0x2000, + // Block 0x20, offset 0x800 + 0x800: 0x2000, 0x801: 0x2000, 0x802: 0x200d, 0x803: 0x2000, 0x804: 0x2000, 0x805: 0x2000, + 0x806: 0x2000, 0x807: 0x2000, 0x808: 0x2000, 0x809: 0x2000, 0x80a: 0x2000, 0x80b: 0x2000, + 0x80c: 0x2000, 0x80d: 0x2000, 0x80e: 0x2000, 0x80f: 0x2000, 0x810: 0x2000, 0x811: 0x2000, + 0x812: 0x2000, 0x813: 0x2000, 0x814: 0x2000, 0x815: 0x2000, 0x816: 0x2000, 0x817: 0x2000, + 0x818: 0x2000, 0x819: 0x2000, 0x81a: 0x2000, 0x81b: 0x2000, 0x81c: 0x2000, 0x81d: 0x2000, + 0x81e: 0x2000, 0x81f: 0x2000, 0x820: 0x2000, 0x821: 0x2000, 0x822: 0x2000, 0x823: 0x2000, + 0x824: 0x2000, 0x825: 0x2000, 0x826: 0x2000, 0x827: 0x2000, 0x828: 0x2000, 0x829: 0x2000, + 0x82a: 0x2000, 0x82b: 0x2000, 0x82c: 0x2000, 0x82d: 0x2000, 0x82e: 0x2000, 0x82f: 0x2000, + 0x830: 0x2000, 0x831: 0x2000, 0x832: 0x2000, 0x833: 0x2000, 0x834: 0x2000, 0x835: 0x2000, + 0x836: 0x2000, 0x837: 0x2000, 0x838: 0x2000, 0x839: 0x2000, 0x83a: 0x2000, 0x83b: 0x2000, + 0x83c: 0x2000, 0x83d: 0x2000, 0x83e: 0x2000, 0x83f: 0x2000, + // Block 0x21, offset 0x840 + 0x840: 0x2000, 0x841: 0x2000, 0x842: 0x2000, 0x843: 0x2000, 0x844: 0x2000, 0x845: 0x2000, + 0x846: 0x2000, 0x847: 0x2000, 0x848: 0x2000, 0x849: 0x2000, 0x84a: 0x2000, 0x84b: 0x2000, + 0x850: 0x2000, 0x851: 0x2000, + 0x852: 0x2000, 0x853: 0x2000, 0x854: 0x2000, 0x855: 0x2000, 0x856: 0x2000, 0x857: 0x2000, + 0x858: 0x2000, 0x859: 0x2000, 0x85a: 0x2000, 0x85b: 0x2000, 0x85c: 0x2000, 0x85d: 0x2000, + 0x85e: 0x2000, 0x85f: 0x2000, 0x860: 0x2000, 0x861: 0x2000, 0x862: 0x2000, 0x863: 0x2000, + 0x864: 0x2000, 0x865: 0x2000, 0x866: 0x2000, 0x867: 0x2000, 0x868: 0x2000, 0x869: 0x2000, + 0x86a: 0x2000, 0x86b: 0x2000, 0x86c: 0x2000, 0x86d: 0x2000, 0x86e: 0x2000, 0x86f: 0x2000, + 0x870: 0x2000, 0x871: 0x2000, 0x872: 0x2000, 0x873: 0x2000, + // Block 0x22, offset 0x880 + 0x880: 0x2000, 0x881: 0x2000, 0x882: 0x2000, 0x883: 0x2000, 0x884: 0x2000, 0x885: 0x2000, + 0x886: 0x2000, 0x887: 0x2000, 0x888: 0x2000, 0x889: 0x2000, 0x88a: 0x2000, 0x88b: 0x2000, + 0x88c: 0x2000, 0x88d: 0x2000, 0x88e: 0x2000, 0x88f: 0x2000, + 0x892: 0x2000, 0x893: 0x2000, 0x894: 0x2000, 0x895: 0x2000, + 0x8a0: 0x200e, 0x8a1: 0x2000, 0x8a3: 0x2000, + 0x8a4: 0x2000, 0x8a5: 0x2000, 0x8a6: 0x2000, 0x8a7: 0x2000, 0x8a8: 0x2000, 0x8a9: 0x2000, + 0x8b2: 0x2000, 0x8b3: 0x2000, + 0x8b6: 0x2000, 0x8b7: 0x2000, + 0x8bc: 0x2000, 0x8bd: 0x2000, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x2000, 0x8c1: 0x2000, + 0x8c6: 0x2000, 0x8c7: 0x2000, 0x8c8: 0x2000, 0x8cb: 0x200f, + 0x8ce: 0x2000, 0x8cf: 0x2000, 0x8d0: 0x2000, 0x8d1: 0x2000, + 0x8e2: 0x2000, 0x8e3: 0x2000, + 0x8e4: 0x2000, 0x8e5: 0x2000, + 0x8ef: 0x2000, + 0x8fd: 0x4000, 0x8fe: 0x4000, + // Block 0x24, offset 0x900 + 0x905: 0x2000, + 0x906: 0x2000, 0x909: 0x2000, + 0x90e: 0x2000, 0x90f: 0x2000, + 0x914: 0x4000, 0x915: 0x4000, + 0x91c: 0x2000, + 0x91e: 0x2000, + // Block 0x25, offset 0x940 + 0x940: 0x2000, 0x942: 0x2000, + 0x948: 0x4000, 0x949: 0x4000, 0x94a: 0x4000, 0x94b: 0x4000, + 0x94c: 0x4000, 0x94d: 0x4000, 0x94e: 0x4000, 0x94f: 0x4000, 0x950: 0x4000, 0x951: 0x4000, + 0x952: 0x4000, 0x953: 0x4000, + 0x960: 0x2000, 0x961: 0x2000, 0x963: 0x2000, + 0x964: 0x2000, 0x965: 0x2000, 0x967: 0x2000, 0x968: 0x2000, 0x969: 0x2000, + 0x96a: 0x2000, 0x96c: 0x2000, 0x96d: 0x2000, 0x96f: 0x2000, + 0x97f: 0x4000, + // Block 0x26, offset 0x980 + 0x993: 0x4000, + 0x99e: 0x2000, 0x99f: 0x2000, 0x9a1: 0x4000, + 0x9aa: 0x4000, 0x9ab: 0x4000, + 0x9bd: 0x4000, 0x9be: 0x4000, 0x9bf: 0x2000, + // Block 0x27, offset 0x9c0 + 0x9c4: 0x4000, 0x9c5: 0x4000, + 0x9c6: 0x2000, 0x9c7: 0x2000, 0x9c8: 0x2000, 0x9c9: 0x2000, 0x9ca: 0x2000, 0x9cb: 0x2000, + 0x9cc: 0x2000, 0x9cd: 0x2000, 0x9ce: 0x4000, 0x9cf: 0x2000, 0x9d0: 0x2000, 0x9d1: 0x2000, + 0x9d2: 0x2000, 0x9d3: 0x2000, 0x9d4: 0x4000, 0x9d5: 0x2000, 0x9d6: 0x2000, 0x9d7: 0x2000, + 0x9d8: 0x2000, 0x9d9: 0x2000, 0x9da: 0x2000, 0x9db: 0x2000, 0x9dc: 0x2000, 0x9dd: 0x2000, + 0x9de: 0x2000, 0x9df: 0x2000, 0x9e0: 0x2000, 0x9e1: 0x2000, 0x9e3: 0x2000, + 0x9e8: 0x2000, 0x9e9: 0x2000, + 0x9ea: 0x4000, 0x9eb: 0x2000, 0x9ec: 0x2000, 0x9ed: 0x2000, 0x9ee: 0x2000, 0x9ef: 0x2000, + 0x9f0: 0x2000, 0x9f1: 0x2000, 0x9f2: 0x4000, 0x9f3: 0x4000, 0x9f4: 0x2000, 0x9f5: 0x4000, + 0x9f6: 0x2000, 0x9f7: 0x2000, 0x9f8: 0x2000, 0x9f9: 0x2000, 0x9fa: 0x4000, 0x9fb: 0x2000, + 0x9fc: 0x2000, 0x9fd: 0x4000, 0x9fe: 0x2000, 0x9ff: 0x2000, + // Block 0x28, offset 0xa00 + 0xa05: 0x4000, + 0xa0a: 0x4000, 0xa0b: 0x4000, + 0xa28: 0x4000, + 0xa3d: 0x2000, + // Block 0x29, offset 0xa40 + 0xa4c: 0x4000, 0xa4e: 0x4000, + 0xa53: 0x4000, 0xa54: 0x4000, 0xa55: 0x4000, 0xa57: 0x4000, + 0xa76: 0x2000, 0xa77: 0x2000, 0xa78: 0x2000, 0xa79: 0x2000, 0xa7a: 0x2000, 0xa7b: 0x2000, + 0xa7c: 0x2000, 0xa7d: 0x2000, 0xa7e: 0x2000, 0xa7f: 0x2000, + // Block 0x2a, offset 0xa80 + 0xa95: 0x4000, 0xa96: 0x4000, 0xa97: 0x4000, + 0xab0: 0x4000, + 0xabf: 0x4000, + // Block 0x2b, offset 0xac0 + 0xae6: 0x6000, 0xae7: 0x6000, 0xae8: 0x6000, 0xae9: 0x6000, + 0xaea: 0x6000, 0xaeb: 0x6000, 0xaec: 0x6000, 0xaed: 0x6000, + // Block 0x2c, offset 0xb00 + 0xb05: 0x6010, + 0xb06: 0x6011, + // Block 0x2d, offset 0xb40 + 0xb5b: 0x4000, 0xb5c: 0x4000, + // Block 0x2e, offset 0xb80 + 0xb90: 0x4000, + 0xb95: 0x4000, 0xb96: 0x2000, 0xb97: 0x2000, + 0xb98: 0x2000, 0xb99: 0x2000, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x4000, 0xbc1: 0x4000, 0xbc2: 0x4000, 0xbc3: 0x4000, 0xbc4: 0x4000, 0xbc5: 0x4000, + 0xbc6: 0x4000, 0xbc7: 0x4000, 0xbc8: 0x4000, 0xbc9: 0x4000, 0xbca: 0x4000, 0xbcb: 0x4000, + 0xbcc: 0x4000, 0xbcd: 0x4000, 0xbce: 0x4000, 0xbcf: 0x4000, 0xbd0: 0x4000, 0xbd1: 0x4000, + 0xbd2: 0x4000, 0xbd3: 0x4000, 0xbd4: 0x4000, 0xbd5: 0x4000, 0xbd6: 0x4000, 0xbd7: 0x4000, + 0xbd8: 0x4000, 0xbd9: 0x4000, 0xbdb: 0x4000, 0xbdc: 0x4000, 0xbdd: 0x4000, + 0xbde: 0x4000, 0xbdf: 0x4000, 0xbe0: 0x4000, 0xbe1: 0x4000, 0xbe2: 0x4000, 0xbe3: 0x4000, + 0xbe4: 0x4000, 0xbe5: 0x4000, 0xbe6: 0x4000, 0xbe7: 0x4000, 0xbe8: 0x4000, 0xbe9: 0x4000, + 0xbea: 0x4000, 0xbeb: 0x4000, 0xbec: 0x4000, 0xbed: 0x4000, 0xbee: 0x4000, 0xbef: 0x4000, + 0xbf0: 0x4000, 0xbf1: 0x4000, 0xbf2: 0x4000, 0xbf3: 0x4000, 0xbf4: 0x4000, 0xbf5: 0x4000, + 0xbf6: 0x4000, 0xbf7: 0x4000, 0xbf8: 0x4000, 0xbf9: 0x4000, 0xbfa: 0x4000, 0xbfb: 0x4000, + 0xbfc: 0x4000, 0xbfd: 0x4000, 0xbfe: 0x4000, 0xbff: 0x4000, + // Block 0x30, offset 0xc00 + 0xc00: 0x4000, 0xc01: 0x4000, 0xc02: 0x4000, 0xc03: 0x4000, 0xc04: 0x4000, 0xc05: 0x4000, + 0xc06: 0x4000, 0xc07: 0x4000, 0xc08: 0x4000, 0xc09: 0x4000, 0xc0a: 0x4000, 0xc0b: 0x4000, + 0xc0c: 0x4000, 0xc0d: 0x4000, 0xc0e: 0x4000, 0xc0f: 0x4000, 0xc10: 0x4000, 0xc11: 0x4000, + 0xc12: 0x4000, 0xc13: 0x4000, 0xc14: 0x4000, 0xc15: 0x4000, 0xc16: 0x4000, 0xc17: 0x4000, + 0xc18: 0x4000, 0xc19: 0x4000, 0xc1a: 0x4000, 0xc1b: 0x4000, 0xc1c: 0x4000, 0xc1d: 0x4000, + 0xc1e: 0x4000, 0xc1f: 0x4000, 0xc20: 0x4000, 0xc21: 0x4000, 0xc22: 0x4000, 0xc23: 0x4000, + 0xc24: 0x4000, 0xc25: 0x4000, 0xc26: 0x4000, 0xc27: 0x4000, 0xc28: 0x4000, 0xc29: 0x4000, + 0xc2a: 0x4000, 0xc2b: 0x4000, 0xc2c: 0x4000, 0xc2d: 0x4000, 0xc2e: 0x4000, 0xc2f: 0x4000, + 0xc30: 0x4000, 0xc31: 0x4000, 0xc32: 0x4000, 0xc33: 0x4000, + // Block 0x31, offset 0xc40 + 0xc40: 0x4000, 0xc41: 0x4000, 0xc42: 0x4000, 0xc43: 0x4000, 0xc44: 0x4000, 0xc45: 0x4000, + 0xc46: 0x4000, 0xc47: 0x4000, 0xc48: 0x4000, 0xc49: 0x4000, 0xc4a: 0x4000, 0xc4b: 0x4000, + 0xc4c: 0x4000, 0xc4d: 0x4000, 0xc4e: 0x4000, 0xc4f: 0x4000, 0xc50: 0x4000, 0xc51: 0x4000, + 0xc52: 0x4000, 0xc53: 0x4000, 0xc54: 0x4000, 0xc55: 0x4000, + 0xc70: 0x4000, 0xc71: 0x4000, 0xc72: 0x4000, 0xc73: 0x4000, 0xc74: 0x4000, 0xc75: 0x4000, + 0xc76: 0x4000, 0xc77: 0x4000, 0xc78: 0x4000, 0xc79: 0x4000, 0xc7a: 0x4000, 0xc7b: 0x4000, + // Block 0x32, offset 0xc80 + 0xc80: 0x9012, 0xc81: 0x4013, 0xc82: 0x4014, 0xc83: 0x4000, 0xc84: 0x4000, 0xc85: 0x4000, + 0xc86: 0x4000, 0xc87: 0x4000, 0xc88: 0x4000, 0xc89: 0x4000, 0xc8a: 0x4000, 0xc8b: 0x4000, + 0xc8c: 0x4015, 0xc8d: 0x4015, 0xc8e: 0x4000, 0xc8f: 0x4000, 0xc90: 0x4000, 0xc91: 0x4000, + 0xc92: 0x4000, 0xc93: 0x4000, 0xc94: 0x4000, 0xc95: 0x4000, 0xc96: 0x4000, 0xc97: 0x4000, + 0xc98: 0x4000, 0xc99: 0x4000, 0xc9a: 0x4000, 0xc9b: 0x4000, 0xc9c: 0x4000, 0xc9d: 0x4000, + 0xc9e: 0x4000, 0xc9f: 0x4000, 0xca0: 0x4000, 0xca1: 0x4000, 0xca2: 0x4000, 0xca3: 0x4000, + 0xca4: 0x4000, 0xca5: 0x4000, 0xca6: 0x4000, 0xca7: 0x4000, 0xca8: 0x4000, 0xca9: 0x4000, + 0xcaa: 0x4000, 0xcab: 0x4000, 0xcac: 0x4000, 0xcad: 0x4000, 0xcae: 0x4000, 0xcaf: 0x4000, + 0xcb0: 0x4000, 0xcb1: 0x4000, 0xcb2: 0x4000, 0xcb3: 0x4000, 0xcb4: 0x4000, 0xcb5: 0x4000, + 0xcb6: 0x4000, 0xcb7: 0x4000, 0xcb8: 0x4000, 0xcb9: 0x4000, 0xcba: 0x4000, 0xcbb: 0x4000, + 0xcbc: 0x4000, 0xcbd: 0x4000, 0xcbe: 0x4000, + // Block 0x33, offset 0xcc0 + 0xcc1: 0x4000, 0xcc2: 0x4000, 0xcc3: 0x4000, 0xcc4: 0x4000, 0xcc5: 0x4000, + 0xcc6: 0x4000, 0xcc7: 0x4000, 0xcc8: 0x4000, 0xcc9: 0x4000, 0xcca: 0x4000, 0xccb: 0x4000, + 0xccc: 0x4000, 0xccd: 0x4000, 0xcce: 0x4000, 0xccf: 0x4000, 0xcd0: 0x4000, 0xcd1: 0x4000, + 0xcd2: 0x4000, 0xcd3: 0x4000, 0xcd4: 0x4000, 0xcd5: 0x4000, 0xcd6: 0x4000, 0xcd7: 0x4000, + 0xcd8: 0x4000, 0xcd9: 0x4000, 0xcda: 0x4000, 0xcdb: 0x4000, 0xcdc: 0x4000, 0xcdd: 0x4000, + 0xcde: 0x4000, 0xcdf: 0x4000, 0xce0: 0x4000, 0xce1: 0x4000, 0xce2: 0x4000, 0xce3: 0x4000, + 0xce4: 0x4000, 0xce5: 0x4000, 0xce6: 0x4000, 0xce7: 0x4000, 0xce8: 0x4000, 0xce9: 0x4000, + 0xcea: 0x4000, 0xceb: 0x4000, 0xcec: 0x4000, 0xced: 0x4000, 0xcee: 0x4000, 0xcef: 0x4000, + 0xcf0: 0x4000, 0xcf1: 0x4000, 0xcf2: 0x4000, 0xcf3: 0x4000, 0xcf4: 0x4000, 0xcf5: 0x4000, + 0xcf6: 0x4000, 0xcf7: 0x4000, 0xcf8: 0x4000, 0xcf9: 0x4000, 0xcfa: 0x4000, 0xcfb: 0x4000, + 0xcfc: 0x4000, 0xcfd: 0x4000, 0xcfe: 0x4000, 0xcff: 0x4000, + // Block 0x34, offset 0xd00 + 0xd00: 0x4000, 0xd01: 0x4000, 0xd02: 0x4000, 0xd03: 0x4000, 0xd04: 0x4000, 0xd05: 0x4000, + 0xd06: 0x4000, 0xd07: 0x4000, 0xd08: 0x4000, 0xd09: 0x4000, 0xd0a: 0x4000, 0xd0b: 0x4000, + 0xd0c: 0x4000, 0xd0d: 0x4000, 0xd0e: 0x4000, 0xd0f: 0x4000, 0xd10: 0x4000, 0xd11: 0x4000, + 0xd12: 0x4000, 0xd13: 0x4000, 0xd14: 0x4000, 0xd15: 0x4000, 0xd16: 0x4000, + 0xd19: 0x4016, 0xd1a: 0x4017, 0xd1b: 0x4000, 0xd1c: 0x4000, 0xd1d: 0x4000, + 0xd1e: 0x4000, 0xd1f: 0x4000, 0xd20: 0x4000, 0xd21: 0x4018, 0xd22: 0x4019, 0xd23: 0x401a, + 0xd24: 0x401b, 0xd25: 0x401c, 0xd26: 0x401d, 0xd27: 0x401e, 0xd28: 0x401f, 0xd29: 0x4020, + 0xd2a: 0x4021, 0xd2b: 0x4022, 0xd2c: 0x4000, 0xd2d: 0x4010, 0xd2e: 0x4000, 0xd2f: 0x4023, + 0xd30: 0x4000, 0xd31: 0x4024, 0xd32: 0x4000, 0xd33: 0x4025, 0xd34: 0x4000, 0xd35: 0x4026, + 0xd36: 0x4000, 0xd37: 0x401a, 0xd38: 0x4000, 0xd39: 0x4027, 0xd3a: 0x4000, 0xd3b: 0x4028, + 0xd3c: 0x4000, 0xd3d: 0x4020, 0xd3e: 0x4000, 0xd3f: 0x4029, + // Block 0x35, offset 0xd40 + 0xd40: 0x4000, 0xd41: 0x402a, 0xd42: 0x4000, 0xd43: 0x402b, 0xd44: 0x402c, 0xd45: 0x4000, + 0xd46: 0x4017, 0xd47: 0x4000, 0xd48: 0x402d, 0xd49: 0x4000, 0xd4a: 0x402e, 0xd4b: 0x402f, + 0xd4c: 0x4030, 0xd4d: 0x4017, 0xd4e: 0x4016, 0xd4f: 0x4017, 0xd50: 0x4000, 0xd51: 0x4000, + 0xd52: 0x4031, 0xd53: 0x4000, 0xd54: 0x4000, 0xd55: 0x4031, 0xd56: 0x4000, 0xd57: 0x4000, + 0xd58: 0x4032, 0xd59: 0x4000, 0xd5a: 0x4000, 0xd5b: 0x4032, 0xd5c: 0x4000, 0xd5d: 0x4000, + 0xd5e: 0x4033, 0xd5f: 0x402e, 0xd60: 0x4034, 0xd61: 0x4035, 0xd62: 0x4034, 0xd63: 0x4036, + 0xd64: 0x4037, 0xd65: 0x4024, 0xd66: 0x4035, 0xd67: 0x4025, 0xd68: 0x4038, 0xd69: 0x4038, + 0xd6a: 0x4039, 0xd6b: 0x4039, 0xd6c: 0x403a, 0xd6d: 0x403a, 0xd6e: 0x4000, 0xd6f: 0x4035, + 0xd70: 0x4000, 0xd71: 0x4000, 0xd72: 0x403b, 0xd73: 0x403c, 0xd74: 0x4000, 0xd75: 0x4000, + 0xd76: 0x4000, 0xd77: 0x4000, 0xd78: 0x4000, 0xd79: 0x4000, 0xd7a: 0x4000, 0xd7b: 0x403d, + 0xd7c: 0x401c, 0xd7d: 0x4000, 0xd7e: 0x4000, 0xd7f: 0x4000, + // Block 0x36, offset 0xd80 + 0xd85: 0x4000, + 0xd86: 0x4000, 0xd87: 0x4000, 0xd88: 0x4000, 0xd89: 0x4000, 0xd8a: 0x4000, 0xd8b: 0x4000, + 0xd8c: 0x4000, 0xd8d: 0x4000, 0xd8e: 0x4000, 0xd8f: 0x4000, 0xd90: 0x4000, 0xd91: 0x4000, + 0xd92: 0x4000, 0xd93: 0x4000, 0xd94: 0x4000, 0xd95: 0x4000, 0xd96: 0x4000, 0xd97: 0x4000, + 0xd98: 0x4000, 0xd99: 0x4000, 0xd9a: 0x4000, 0xd9b: 0x4000, 0xd9c: 0x4000, 0xd9d: 0x4000, + 0xd9e: 0x4000, 0xd9f: 0x4000, 0xda0: 0x4000, 0xda1: 0x4000, 0xda2: 0x4000, 0xda3: 0x4000, + 0xda4: 0x4000, 0xda5: 0x4000, 0xda6: 0x4000, 0xda7: 0x4000, 0xda8: 0x4000, 0xda9: 0x4000, + 0xdaa: 0x4000, 0xdab: 0x4000, 0xdac: 0x4000, 0xdad: 0x4000, 0xdae: 0x4000, 0xdaf: 0x4000, + 0xdb1: 0x403e, 0xdb2: 0x403e, 0xdb3: 0x403e, 0xdb4: 0x403e, 0xdb5: 0x403e, + 0xdb6: 0x403e, 0xdb7: 0x403e, 0xdb8: 0x403e, 0xdb9: 0x403e, 0xdba: 0x403e, 0xdbb: 0x403e, + 0xdbc: 0x403e, 0xdbd: 0x403e, 0xdbe: 0x403e, 0xdbf: 0x403e, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x4037, 0xdc1: 0x4037, 0xdc2: 0x4037, 0xdc3: 0x4037, 0xdc4: 0x4037, 0xdc5: 0x4037, + 0xdc6: 0x4037, 0xdc7: 0x4037, 0xdc8: 0x4037, 0xdc9: 0x4037, 0xdca: 0x4037, 0xdcb: 0x4037, + 0xdcc: 0x4037, 0xdcd: 0x4037, 0xdce: 0x4037, 0xdcf: 0x400e, 0xdd0: 0x403f, 0xdd1: 0x4040, + 0xdd2: 0x4041, 0xdd3: 0x4040, 0xdd4: 0x403f, 0xdd5: 0x4042, 0xdd6: 0x4043, 0xdd7: 0x4044, + 0xdd8: 0x4040, 0xdd9: 0x4041, 0xdda: 0x4040, 0xddb: 0x4045, 0xddc: 0x4009, 0xddd: 0x4045, + 0xdde: 0x4046, 0xddf: 0x4045, 0xde0: 0x4047, 0xde1: 0x400b, 0xde2: 0x400a, 0xde3: 0x400c, + 0xde4: 0x4048, 0xde5: 0x4000, 0xde6: 0x4000, 0xde7: 0x4000, 0xde8: 0x4000, 0xde9: 0x4000, + 0xdea: 0x4000, 0xdeb: 0x4000, 0xdec: 0x4000, 0xded: 0x4000, 0xdee: 0x4000, 0xdef: 0x4000, + 0xdf0: 0x4000, 0xdf1: 0x4000, 0xdf2: 0x4000, 0xdf3: 0x4000, 0xdf4: 0x4000, 0xdf5: 0x4000, + 0xdf6: 0x4000, 0xdf7: 0x4000, 0xdf8: 0x4000, 0xdf9: 0x4000, 0xdfa: 0x4000, 0xdfb: 0x4000, + 0xdfc: 0x4000, 0xdfd: 0x4000, 0xdfe: 0x4000, 0xdff: 0x4000, + // Block 0x38, offset 0xe00 + 0xe00: 0x4000, 0xe01: 0x4000, 0xe02: 0x4000, 0xe03: 0x4000, 0xe04: 0x4000, 0xe05: 0x4000, + 0xe06: 0x4000, 0xe07: 0x4000, 0xe08: 0x4000, 0xe09: 0x4000, 0xe0a: 0x4000, 0xe0b: 0x4000, + 0xe0c: 0x4000, 0xe0d: 0x4000, 0xe0e: 0x4000, 0xe10: 0x4000, 0xe11: 0x4000, + 0xe12: 0x4000, 0xe13: 0x4000, 0xe14: 0x4000, 0xe15: 0x4000, 0xe16: 0x4000, 0xe17: 0x4000, + 0xe18: 0x4000, 0xe19: 0x4000, 0xe1a: 0x4000, 0xe1b: 0x4000, 0xe1c: 0x4000, 0xe1d: 0x4000, + 0xe1e: 0x4000, 0xe1f: 0x4000, 0xe20: 0x4000, 0xe21: 0x4000, 0xe22: 0x4000, 0xe23: 0x4000, + 0xe24: 0x4000, 0xe25: 0x4000, 0xe26: 0x4000, 0xe27: 0x4000, 0xe28: 0x4000, 0xe29: 0x4000, + 0xe2a: 0x4000, 0xe2b: 0x4000, 0xe2c: 0x4000, 0xe2d: 0x4000, 0xe2e: 0x4000, 0xe2f: 0x4000, + 0xe30: 0x4000, 0xe31: 0x4000, 0xe32: 0x4000, 0xe33: 0x4000, 0xe34: 0x4000, 0xe35: 0x4000, + 0xe36: 0x4000, 0xe37: 0x4000, 0xe38: 0x4000, 0xe39: 0x4000, 0xe3a: 0x4000, 0xe3b: 0x4000, + 0xe3c: 0x4000, 0xe3d: 0x4000, 0xe3e: 0x4000, 0xe3f: 0x4000, + // Block 0x39, offset 0xe40 + 0xe40: 0x4000, 0xe41: 0x4000, 0xe42: 0x4000, 0xe43: 0x4000, 0xe44: 0x4000, 0xe45: 0x4000, + 0xe46: 0x4000, 0xe47: 0x4000, 0xe48: 0x4000, 0xe49: 0x4000, 0xe4a: 0x4000, 0xe4b: 0x4000, + 0xe4c: 0x4000, 0xe4d: 0x4000, 0xe4e: 0x4000, 0xe4f: 0x4000, 0xe50: 0x4000, 0xe51: 0x4000, + 0xe52: 0x4000, 0xe53: 0x4000, 0xe54: 0x4000, 0xe55: 0x4000, 0xe56: 0x4000, 0xe57: 0x4000, + 0xe58: 0x4000, 0xe59: 0x4000, 0xe5a: 0x4000, 0xe5b: 0x4000, 0xe5c: 0x4000, 0xe5d: 0x4000, + 0xe5e: 0x4000, 0xe5f: 0x4000, 0xe60: 0x4000, 0xe61: 0x4000, 0xe62: 0x4000, 0xe63: 0x4000, + 0xe70: 0x4000, 0xe71: 0x4000, 0xe72: 0x4000, 0xe73: 0x4000, 0xe74: 0x4000, 0xe75: 0x4000, + 0xe76: 0x4000, 0xe77: 0x4000, 0xe78: 0x4000, 0xe79: 0x4000, 0xe7a: 0x4000, 0xe7b: 0x4000, + 0xe7c: 0x4000, 0xe7d: 0x4000, 0xe7e: 0x4000, 0xe7f: 0x4000, + // Block 0x3a, offset 0xe80 + 0xe80: 0x4000, 0xe81: 0x4000, 0xe82: 0x4000, 0xe83: 0x4000, 0xe84: 0x4000, 0xe85: 0x4000, + 0xe86: 0x4000, 0xe87: 0x4000, 0xe88: 0x4000, 0xe89: 0x4000, 0xe8a: 0x4000, 0xe8b: 0x4000, + 0xe8c: 0x4000, 0xe8d: 0x4000, 0xe8e: 0x4000, 0xe8f: 0x4000, 0xe90: 0x4000, 0xe91: 0x4000, + 0xe92: 0x4000, 0xe93: 0x4000, 0xe94: 0x4000, 0xe95: 0x4000, 0xe96: 0x4000, 0xe97: 0x4000, + 0xe98: 0x4000, 0xe99: 0x4000, 0xe9a: 0x4000, 0xe9b: 0x4000, 0xe9c: 0x4000, 0xe9d: 0x4000, + 0xe9e: 0x4000, 0xea0: 0x4000, 0xea1: 0x4000, 0xea2: 0x4000, 0xea3: 0x4000, + 0xea4: 0x4000, 0xea5: 0x4000, 0xea6: 0x4000, 0xea7: 0x4000, 0xea8: 0x4000, 0xea9: 0x4000, + 0xeaa: 0x4000, 0xeab: 0x4000, 0xeac: 0x4000, 0xead: 0x4000, 0xeae: 0x4000, 0xeaf: 0x4000, + 0xeb0: 0x4000, 0xeb1: 0x4000, 0xeb2: 0x4000, 0xeb3: 0x4000, 0xeb4: 0x4000, 0xeb5: 0x4000, + 0xeb6: 0x4000, 0xeb7: 0x4000, 0xeb8: 0x4000, 0xeb9: 0x4000, 0xeba: 0x4000, 0xebb: 0x4000, + 0xebc: 0x4000, 0xebd: 0x4000, 0xebe: 0x4000, 0xebf: 0x4000, + // Block 0x3b, offset 0xec0 + 0xec0: 0x4000, 0xec1: 0x4000, 0xec2: 0x4000, 0xec3: 0x4000, 0xec4: 0x4000, 0xec5: 0x4000, + 0xec6: 0x4000, 0xec7: 0x4000, 0xec8: 0x2000, 0xec9: 0x2000, 0xeca: 0x2000, 0xecb: 0x2000, + 0xecc: 0x2000, 0xecd: 0x2000, 0xece: 0x2000, 0xecf: 0x2000, 0xed0: 0x4000, 0xed1: 0x4000, + 0xed2: 0x4000, 0xed3: 0x4000, 0xed4: 0x4000, 0xed5: 0x4000, 0xed6: 0x4000, 0xed7: 0x4000, + 0xed8: 0x4000, 0xed9: 0x4000, 0xeda: 0x4000, 0xedb: 0x4000, 0xedc: 0x4000, 0xedd: 0x4000, + 0xede: 0x4000, 0xedf: 0x4000, 0xee0: 0x4000, 0xee1: 0x4000, 0xee2: 0x4000, 0xee3: 0x4000, + 0xee4: 0x4000, 0xee5: 0x4000, 0xee6: 0x4000, 0xee7: 0x4000, 0xee8: 0x4000, 0xee9: 0x4000, + 0xeea: 0x4000, 0xeeb: 0x4000, 0xeec: 0x4000, 0xeed: 0x4000, 0xeee: 0x4000, 0xeef: 0x4000, + 0xef0: 0x4000, 0xef1: 0x4000, 0xef2: 0x4000, 0xef3: 0x4000, 0xef4: 0x4000, 0xef5: 0x4000, + 0xef6: 0x4000, 0xef7: 0x4000, 0xef8: 0x4000, 0xef9: 0x4000, 0xefa: 0x4000, 0xefb: 0x4000, + 0xefc: 0x4000, 0xefd: 0x4000, 0xefe: 0x4000, 0xeff: 0x4000, + // Block 0x3c, offset 0xf00 + 0xf00: 0x4000, 0xf01: 0x4000, 0xf02: 0x4000, 0xf03: 0x4000, 0xf04: 0x4000, 0xf05: 0x4000, + 0xf06: 0x4000, 0xf07: 0x4000, 0xf08: 0x4000, 0xf09: 0x4000, 0xf0a: 0x4000, 0xf0b: 0x4000, + 0xf0c: 0x4000, 0xf10: 0x4000, 0xf11: 0x4000, + 0xf12: 0x4000, 0xf13: 0x4000, 0xf14: 0x4000, 0xf15: 0x4000, 0xf16: 0x4000, 0xf17: 0x4000, + 0xf18: 0x4000, 0xf19: 0x4000, 0xf1a: 0x4000, 0xf1b: 0x4000, 0xf1c: 0x4000, 0xf1d: 0x4000, + 0xf1e: 0x4000, 0xf1f: 0x4000, 0xf20: 0x4000, 0xf21: 0x4000, 0xf22: 0x4000, 0xf23: 0x4000, + 0xf24: 0x4000, 0xf25: 0x4000, 0xf26: 0x4000, 0xf27: 0x4000, 0xf28: 0x4000, 0xf29: 0x4000, + 0xf2a: 0x4000, 0xf2b: 0x4000, 0xf2c: 0x4000, 0xf2d: 0x4000, 0xf2e: 0x4000, 0xf2f: 0x4000, + 0xf30: 0x4000, 0xf31: 0x4000, 0xf32: 0x4000, 0xf33: 0x4000, 0xf34: 0x4000, 0xf35: 0x4000, + 0xf36: 0x4000, 0xf37: 0x4000, 0xf38: 0x4000, 0xf39: 0x4000, 0xf3a: 0x4000, 0xf3b: 0x4000, + 0xf3c: 0x4000, 0xf3d: 0x4000, 0xf3e: 0x4000, 0xf3f: 0x4000, + // Block 0x3d, offset 0xf40 + 0xf40: 0x4000, 0xf41: 0x4000, 0xf42: 0x4000, 0xf43: 0x4000, 0xf44: 0x4000, 0xf45: 0x4000, + 0xf46: 0x4000, + // Block 0x3e, offset 0xf80 + 0xfa0: 0x4000, 0xfa1: 0x4000, 0xfa2: 0x4000, 0xfa3: 0x4000, + 0xfa4: 0x4000, 0xfa5: 0x4000, 0xfa6: 0x4000, 0xfa7: 0x4000, 0xfa8: 0x4000, 0xfa9: 0x4000, + 0xfaa: 0x4000, 0xfab: 0x4000, 0xfac: 0x4000, 0xfad: 0x4000, 0xfae: 0x4000, 0xfaf: 0x4000, + 0xfb0: 0x4000, 0xfb1: 0x4000, 0xfb2: 0x4000, 0xfb3: 0x4000, 0xfb4: 0x4000, 0xfb5: 0x4000, + 0xfb6: 0x4000, 0xfb7: 0x4000, 0xfb8: 0x4000, 0xfb9: 0x4000, 0xfba: 0x4000, 0xfbb: 0x4000, + 0xfbc: 0x4000, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x4000, 0xfc1: 0x4000, 0xfc2: 0x4000, 0xfc3: 0x4000, 0xfc4: 0x4000, 0xfc5: 0x4000, + 0xfc6: 0x4000, 0xfc7: 0x4000, 0xfc8: 0x4000, 0xfc9: 0x4000, 0xfca: 0x4000, 0xfcb: 0x4000, + 0xfcc: 0x4000, 0xfcd: 0x4000, 0xfce: 0x4000, 0xfcf: 0x4000, 0xfd0: 0x4000, 0xfd1: 0x4000, + 0xfd2: 0x4000, 0xfd3: 0x4000, 0xfd4: 0x4000, 0xfd5: 0x4000, 0xfd6: 0x4000, 0xfd7: 0x4000, + 0xfd8: 0x4000, 0xfd9: 0x4000, 0xfda: 0x4000, 0xfdb: 0x4000, 0xfdc: 0x4000, 0xfdd: 0x4000, + 0xfde: 0x4000, 0xfdf: 0x4000, 0xfe0: 0x4000, 0xfe1: 0x4000, 0xfe2: 0x4000, 0xfe3: 0x4000, + // Block 0x40, offset 0x1000 + 0x1000: 0x2000, 0x1001: 0x2000, 0x1002: 0x2000, 0x1003: 0x2000, 0x1004: 0x2000, 0x1005: 0x2000, + 0x1006: 0x2000, 0x1007: 0x2000, 0x1008: 0x2000, 0x1009: 0x2000, 0x100a: 0x2000, 0x100b: 0x2000, + 0x100c: 0x2000, 0x100d: 0x2000, 0x100e: 0x2000, 0x100f: 0x2000, 0x1010: 0x4000, 0x1011: 0x4000, + 0x1012: 0x4000, 0x1013: 0x4000, 0x1014: 0x4000, 0x1015: 0x4000, 0x1016: 0x4000, 0x1017: 0x4000, + 0x1018: 0x4000, 0x1019: 0x4000, + 0x1030: 0x4000, 0x1031: 0x4000, 0x1032: 0x4000, 0x1033: 0x4000, 0x1034: 0x4000, 0x1035: 0x4000, + 0x1036: 0x4000, 0x1037: 0x4000, 0x1038: 0x4000, 0x1039: 0x4000, 0x103a: 0x4000, 0x103b: 0x4000, + 0x103c: 0x4000, 0x103d: 0x4000, 0x103e: 0x4000, 0x103f: 0x4000, + // Block 0x41, offset 0x1040 + 0x1040: 0x4000, 0x1041: 0x4000, 0x1042: 0x4000, 0x1043: 0x4000, 0x1044: 0x4000, 0x1045: 0x4000, + 0x1046: 0x4000, 0x1047: 0x4000, 0x1048: 0x4000, 0x1049: 0x4000, 0x104a: 0x4000, 0x104b: 0x4000, + 0x104c: 0x4000, 0x104d: 0x4000, 0x104e: 0x4000, 0x104f: 0x4000, 0x1050: 0x4000, 0x1051: 0x4000, + 0x1052: 0x4000, 0x1054: 0x4000, 0x1055: 0x4000, 0x1056: 0x4000, 0x1057: 0x4000, + 0x1058: 0x4000, 0x1059: 0x4000, 0x105a: 0x4000, 0x105b: 0x4000, 0x105c: 0x4000, 0x105d: 0x4000, + 0x105e: 0x4000, 0x105f: 0x4000, 0x1060: 0x4000, 0x1061: 0x4000, 0x1062: 0x4000, 0x1063: 0x4000, + 0x1064: 0x4000, 0x1065: 0x4000, 0x1066: 0x4000, 0x1068: 0x4000, 0x1069: 0x4000, + 0x106a: 0x4000, 0x106b: 0x4000, + // Block 0x42, offset 0x1080 + 0x1081: 0x9012, 0x1082: 0x9012, 0x1083: 0x9012, 0x1084: 0x9012, 0x1085: 0x9012, + 0x1086: 0x9012, 0x1087: 0x9012, 0x1088: 0x9012, 0x1089: 0x9012, 0x108a: 0x9012, 0x108b: 0x9012, + 0x108c: 0x9012, 0x108d: 0x9012, 0x108e: 0x9012, 0x108f: 0x9012, 0x1090: 0x9012, 0x1091: 0x9012, + 0x1092: 0x9012, 0x1093: 0x9012, 0x1094: 0x9012, 0x1095: 0x9012, 0x1096: 0x9012, 0x1097: 0x9012, + 0x1098: 0x9012, 0x1099: 0x9012, 0x109a: 0x9012, 0x109b: 0x9012, 0x109c: 0x9012, 0x109d: 0x9012, + 0x109e: 0x9012, 0x109f: 0x9012, 0x10a0: 0x9049, 0x10a1: 0x9049, 0x10a2: 0x9049, 0x10a3: 0x9049, + 0x10a4: 0x9049, 0x10a5: 0x9049, 0x10a6: 0x9049, 0x10a7: 0x9049, 0x10a8: 0x9049, 0x10a9: 0x9049, + 0x10aa: 0x9049, 0x10ab: 0x9049, 0x10ac: 0x9049, 0x10ad: 0x9049, 0x10ae: 0x9049, 0x10af: 0x9049, + 0x10b0: 0x9049, 0x10b1: 0x9049, 0x10b2: 0x9049, 0x10b3: 0x9049, 0x10b4: 0x9049, 0x10b5: 0x9049, + 0x10b6: 0x9049, 0x10b7: 0x9049, 0x10b8: 0x9049, 0x10b9: 0x9049, 0x10ba: 0x9049, 0x10bb: 0x9049, + 0x10bc: 0x9049, 0x10bd: 0x9049, 0x10be: 0x9049, 0x10bf: 0x9049, + // Block 0x43, offset 0x10c0 + 0x10c0: 0x9049, 0x10c1: 0x9049, 0x10c2: 0x9049, 0x10c3: 0x9049, 0x10c4: 0x9049, 0x10c5: 0x9049, + 0x10c6: 0x9049, 0x10c7: 0x9049, 0x10c8: 0x9049, 0x10c9: 0x9049, 0x10ca: 0x9049, 0x10cb: 0x9049, + 0x10cc: 0x9049, 0x10cd: 0x9049, 0x10ce: 0x9049, 0x10cf: 0x9049, 0x10d0: 0x9049, 0x10d1: 0x9049, + 0x10d2: 0x9049, 0x10d3: 0x9049, 0x10d4: 0x9049, 0x10d5: 0x9049, 0x10d6: 0x9049, 0x10d7: 0x9049, + 0x10d8: 0x9049, 0x10d9: 0x9049, 0x10da: 0x9049, 0x10db: 0x9049, 0x10dc: 0x9049, 0x10dd: 0x9049, + 0x10de: 0x9049, 0x10df: 0x904a, 0x10e0: 0x904b, 0x10e1: 0xb04c, 0x10e2: 0xb04d, 0x10e3: 0xb04d, + 0x10e4: 0xb04e, 0x10e5: 0xb04f, 0x10e6: 0xb050, 0x10e7: 0xb051, 0x10e8: 0xb052, 0x10e9: 0xb053, + 0x10ea: 0xb054, 0x10eb: 0xb055, 0x10ec: 0xb056, 0x10ed: 0xb057, 0x10ee: 0xb058, 0x10ef: 0xb059, + 0x10f0: 0xb05a, 0x10f1: 0xb05b, 0x10f2: 0xb05c, 0x10f3: 0xb05d, 0x10f4: 0xb05e, 0x10f5: 0xb05f, + 0x10f6: 0xb060, 0x10f7: 0xb061, 0x10f8: 0xb062, 0x10f9: 0xb063, 0x10fa: 0xb064, 0x10fb: 0xb065, + 0x10fc: 0xb052, 0x10fd: 0xb066, 0x10fe: 0xb067, 0x10ff: 0xb055, + // Block 0x44, offset 0x1100 + 0x1100: 0xb068, 0x1101: 0xb069, 0x1102: 0xb06a, 0x1103: 0xb06b, 0x1104: 0xb05a, 0x1105: 0xb056, + 0x1106: 0xb06c, 0x1107: 0xb06d, 0x1108: 0xb06b, 0x1109: 0xb06e, 0x110a: 0xb06b, 0x110b: 0xb06f, + 0x110c: 0xb06f, 0x110d: 0xb070, 0x110e: 0xb070, 0x110f: 0xb071, 0x1110: 0xb056, 0x1111: 0xb072, + 0x1112: 0xb073, 0x1113: 0xb072, 0x1114: 0xb074, 0x1115: 0xb073, 0x1116: 0xb075, 0x1117: 0xb075, + 0x1118: 0xb076, 0x1119: 0xb076, 0x111a: 0xb077, 0x111b: 0xb077, 0x111c: 0xb073, 0x111d: 0xb078, + 0x111e: 0xb079, 0x111f: 0xb067, 0x1120: 0xb07a, 0x1121: 0xb07b, 0x1122: 0xb07b, 0x1123: 0xb07b, + 0x1124: 0xb07b, 0x1125: 0xb07b, 0x1126: 0xb07b, 0x1127: 0xb07b, 0x1128: 0xb07b, 0x1129: 0xb07b, + 0x112a: 0xb07b, 0x112b: 0xb07b, 0x112c: 0xb07b, 0x112d: 0xb07b, 0x112e: 0xb07b, 0x112f: 0xb07b, + 0x1130: 0xb07c, 0x1131: 0xb07c, 0x1132: 0xb07c, 0x1133: 0xb07c, 0x1134: 0xb07c, 0x1135: 0xb07c, + 0x1136: 0xb07c, 0x1137: 0xb07c, 0x1138: 0xb07c, 0x1139: 0xb07c, 0x113a: 0xb07c, 0x113b: 0xb07c, + 0x113c: 0xb07c, 0x113d: 0xb07c, 0x113e: 0xb07c, + // Block 0x45, offset 0x1140 + 0x1142: 0xb07d, 0x1143: 0xb07e, 0x1144: 0xb07f, 0x1145: 0xb080, + 0x1146: 0xb07f, 0x1147: 0xb07e, 0x114a: 0xb081, 0x114b: 0xb082, + 0x114c: 0xb083, 0x114d: 0xb07f, 0x114e: 0xb080, 0x114f: 0xb07f, + 0x1152: 0xb084, 0x1153: 0xb085, 0x1154: 0xb084, 0x1155: 0xb086, 0x1156: 0xb084, 0x1157: 0xb087, + 0x115a: 0xb088, 0x115b: 0xb089, 0x115c: 0xb08a, + 0x1160: 0x908b, 0x1161: 0x908b, 0x1162: 0x908c, 0x1163: 0x908d, + 0x1164: 0x908b, 0x1165: 0x908e, 0x1166: 0x908f, 0x1168: 0xb090, 0x1169: 0xb091, + 0x116a: 0xb092, 0x116b: 0xb091, 0x116c: 0xb093, 0x116d: 0xb094, 0x116e: 0xb095, + 0x117d: 0x2000, + // Block 0x46, offset 0x1180 + 0x11a0: 0x4000, 0x11a1: 0x4000, 0x11a2: 0x4000, 0x11a3: 0x4000, + 0x11a4: 0x4000, + 0x11b0: 0x4000, 0x11b1: 0x4000, + // Block 0x47, offset 0x11c0 + 0x11c0: 0x4000, 0x11c1: 0x4000, 0x11c2: 0x4000, 0x11c3: 0x4000, 0x11c4: 0x4000, 0x11c5: 0x4000, + 0x11c6: 0x4000, 0x11c7: 0x4000, 0x11c8: 0x4000, 0x11c9: 0x4000, 0x11ca: 0x4000, 0x11cb: 0x4000, + 0x11cc: 0x4000, 0x11cd: 0x4000, 0x11ce: 0x4000, 0x11cf: 0x4000, 0x11d0: 0x4000, 0x11d1: 0x4000, + 0x11d2: 0x4000, 0x11d3: 0x4000, 0x11d4: 0x4000, 0x11d5: 0x4000, 0x11d6: 0x4000, 0x11d7: 0x4000, + 0x11d8: 0x4000, 0x11d9: 0x4000, 0x11da: 0x4000, 0x11db: 0x4000, 0x11dc: 0x4000, 0x11dd: 0x4000, + 0x11de: 0x4000, 0x11df: 0x4000, 0x11e0: 0x4000, 0x11e1: 0x4000, 0x11e2: 0x4000, 0x11e3: 0x4000, + 0x11e4: 0x4000, 0x11e5: 0x4000, 0x11e6: 0x4000, 0x11e7: 0x4000, 0x11e8: 0x4000, 0x11e9: 0x4000, + 0x11ea: 0x4000, 0x11eb: 0x4000, 0x11ec: 0x4000, 0x11ed: 0x4000, 0x11ee: 0x4000, 0x11ef: 0x4000, + 0x11f0: 0x4000, 0x11f1: 0x4000, 0x11f2: 0x4000, 0x11f3: 0x4000, 0x11f4: 0x4000, 0x11f5: 0x4000, + 0x11f6: 0x4000, 0x11f7: 0x4000, + // Block 0x48, offset 0x1200 + 0x1200: 0x4000, 0x1201: 0x4000, 0x1202: 0x4000, 0x1203: 0x4000, 0x1204: 0x4000, 0x1205: 0x4000, + 0x1206: 0x4000, 0x1207: 0x4000, 0x1208: 0x4000, 0x1209: 0x4000, 0x120a: 0x4000, 0x120b: 0x4000, + 0x120c: 0x4000, 0x120d: 0x4000, 0x120e: 0x4000, 0x120f: 0x4000, 0x1210: 0x4000, 0x1211: 0x4000, + 0x1212: 0x4000, 0x1213: 0x4000, 0x1214: 0x4000, 0x1215: 0x4000, + // Block 0x49, offset 0x1240 + 0x1240: 0x4000, 0x1241: 0x4000, 0x1242: 0x4000, 0x1243: 0x4000, 0x1244: 0x4000, 0x1245: 0x4000, + 0x1246: 0x4000, 0x1247: 0x4000, 0x1248: 0x4000, + // Block 0x4a, offset 0x1280 + 0x12b0: 0x4000, 0x12b1: 0x4000, 0x12b2: 0x4000, 0x12b3: 0x4000, 0x12b5: 0x4000, + 0x12b6: 0x4000, 0x12b7: 0x4000, 0x12b8: 0x4000, 0x12b9: 0x4000, 0x12ba: 0x4000, 0x12bb: 0x4000, + 0x12bd: 0x4000, 0x12be: 0x4000, + // Block 0x4b, offset 0x12c0 + 0x12c0: 0x4000, 0x12c1: 0x4000, 0x12c2: 0x4000, 0x12c3: 0x4000, 0x12c4: 0x4000, 0x12c5: 0x4000, + 0x12c6: 0x4000, 0x12c7: 0x4000, 0x12c8: 0x4000, 0x12c9: 0x4000, 0x12ca: 0x4000, 0x12cb: 0x4000, + 0x12cc: 0x4000, 0x12cd: 0x4000, 0x12ce: 0x4000, 0x12cf: 0x4000, 0x12d0: 0x4000, 0x12d1: 0x4000, + 0x12d2: 0x4000, 0x12d3: 0x4000, 0x12d4: 0x4000, 0x12d5: 0x4000, 0x12d6: 0x4000, 0x12d7: 0x4000, + 0x12d8: 0x4000, 0x12d9: 0x4000, 0x12da: 0x4000, 0x12db: 0x4000, 0x12dc: 0x4000, 0x12dd: 0x4000, + 0x12de: 0x4000, 0x12df: 0x4000, 0x12e0: 0x4000, 0x12e1: 0x4000, 0x12e2: 0x4000, + 0x12f2: 0x4000, + // Block 0x4c, offset 0x1300 + 0x1310: 0x4000, 0x1311: 0x4000, + 0x1312: 0x4000, 0x1315: 0x4000, + 0x1324: 0x4000, 0x1325: 0x4000, 0x1326: 0x4000, 0x1327: 0x4000, + 0x1330: 0x4000, 0x1331: 0x4000, 0x1332: 0x4000, 0x1333: 0x4000, 0x1334: 0x4000, 0x1335: 0x4000, + 0x1336: 0x4000, 0x1337: 0x4000, 0x1338: 0x4000, 0x1339: 0x4000, 0x133a: 0x4000, 0x133b: 0x4000, + 0x133c: 0x4000, 0x133d: 0x4000, 0x133e: 0x4000, 0x133f: 0x4000, + // Block 0x4d, offset 0x1340 + 0x1340: 0x4000, 0x1341: 0x4000, 0x1342: 0x4000, 0x1343: 0x4000, 0x1344: 0x4000, 0x1345: 0x4000, + 0x1346: 0x4000, 0x1347: 0x4000, 0x1348: 0x4000, 0x1349: 0x4000, 0x134a: 0x4000, 0x134b: 0x4000, + 0x134c: 0x4000, 0x134d: 0x4000, 0x134e: 0x4000, 0x134f: 0x4000, 0x1350: 0x4000, 0x1351: 0x4000, + 0x1352: 0x4000, 0x1353: 0x4000, 0x1354: 0x4000, 0x1355: 0x4000, 0x1356: 0x4000, 0x1357: 0x4000, + 0x1358: 0x4000, 0x1359: 0x4000, 0x135a: 0x4000, 0x135b: 0x4000, 0x135c: 0x4000, 0x135d: 0x4000, + 0x135e: 0x4000, 0x135f: 0x4000, 0x1360: 0x4000, 0x1361: 0x4000, 0x1362: 0x4000, 0x1363: 0x4000, + 0x1364: 0x4000, 0x1365: 0x4000, 0x1366: 0x4000, 0x1367: 0x4000, 0x1368: 0x4000, 0x1369: 0x4000, + 0x136a: 0x4000, 0x136b: 0x4000, 0x136c: 0x4000, 0x136d: 0x4000, 0x136e: 0x4000, 0x136f: 0x4000, + 0x1370: 0x4000, 0x1371: 0x4000, 0x1372: 0x4000, 0x1373: 0x4000, 0x1374: 0x4000, 0x1375: 0x4000, + 0x1376: 0x4000, 0x1377: 0x4000, 0x1378: 0x4000, 0x1379: 0x4000, 0x137a: 0x4000, 0x137b: 0x4000, + // Block 0x4e, offset 0x1380 + 0x1384: 0x4000, + // Block 0x4f, offset 0x13c0 + 0x13cf: 0x4000, + // Block 0x50, offset 0x1400 + 0x1400: 0x2000, 0x1401: 0x2000, 0x1402: 0x2000, 0x1403: 0x2000, 0x1404: 0x2000, 0x1405: 0x2000, + 0x1406: 0x2000, 0x1407: 0x2000, 0x1408: 0x2000, 0x1409: 0x2000, 0x140a: 0x2000, + 0x1410: 0x2000, 0x1411: 0x2000, + 0x1412: 0x2000, 0x1413: 0x2000, 0x1414: 0x2000, 0x1415: 0x2000, 0x1416: 0x2000, 0x1417: 0x2000, + 0x1418: 0x2000, 0x1419: 0x2000, 0x141a: 0x2000, 0x141b: 0x2000, 0x141c: 0x2000, 0x141d: 0x2000, + 0x141e: 0x2000, 0x141f: 0x2000, 0x1420: 0x2000, 0x1421: 0x2000, 0x1422: 0x2000, 0x1423: 0x2000, + 0x1424: 0x2000, 0x1425: 0x2000, 0x1426: 0x2000, 0x1427: 0x2000, 0x1428: 0x2000, 0x1429: 0x2000, + 0x142a: 0x2000, 0x142b: 0x2000, 0x142c: 0x2000, 0x142d: 0x2000, + 0x1430: 0x2000, 0x1431: 0x2000, 0x1432: 0x2000, 0x1433: 0x2000, 0x1434: 0x2000, 0x1435: 0x2000, + 0x1436: 0x2000, 0x1437: 0x2000, 0x1438: 0x2000, 0x1439: 0x2000, 0x143a: 0x2000, 0x143b: 0x2000, + 0x143c: 0x2000, 0x143d: 0x2000, 0x143e: 0x2000, 0x143f: 0x2000, + // Block 0x51, offset 0x1440 + 0x1440: 0x2000, 0x1441: 0x2000, 0x1442: 0x2000, 0x1443: 0x2000, 0x1444: 0x2000, 0x1445: 0x2000, + 0x1446: 0x2000, 0x1447: 0x2000, 0x1448: 0x2000, 0x1449: 0x2000, 0x144a: 0x2000, 0x144b: 0x2000, + 0x144c: 0x2000, 0x144d: 0x2000, 0x144e: 0x2000, 0x144f: 0x2000, 0x1450: 0x2000, 0x1451: 0x2000, + 0x1452: 0x2000, 0x1453: 0x2000, 0x1454: 0x2000, 0x1455: 0x2000, 0x1456: 0x2000, 0x1457: 0x2000, + 0x1458: 0x2000, 0x1459: 0x2000, 0x145a: 0x2000, 0x145b: 0x2000, 0x145c: 0x2000, 0x145d: 0x2000, + 0x145e: 0x2000, 0x145f: 0x2000, 0x1460: 0x2000, 0x1461: 0x2000, 0x1462: 0x2000, 0x1463: 0x2000, + 0x1464: 0x2000, 0x1465: 0x2000, 0x1466: 0x2000, 0x1467: 0x2000, 0x1468: 0x2000, 0x1469: 0x2000, + 0x1470: 0x2000, 0x1471: 0x2000, 0x1472: 0x2000, 0x1473: 0x2000, 0x1474: 0x2000, 0x1475: 0x2000, + 0x1476: 0x2000, 0x1477: 0x2000, 0x1478: 0x2000, 0x1479: 0x2000, 0x147a: 0x2000, 0x147b: 0x2000, + 0x147c: 0x2000, 0x147d: 0x2000, 0x147e: 0x2000, 0x147f: 0x2000, + // Block 0x52, offset 0x1480 + 0x1480: 0x2000, 0x1481: 0x2000, 0x1482: 0x2000, 0x1483: 0x2000, 0x1484: 0x2000, 0x1485: 0x2000, + 0x1486: 0x2000, 0x1487: 0x2000, 0x1488: 0x2000, 0x1489: 0x2000, 0x148a: 0x2000, 0x148b: 0x2000, + 0x148c: 0x2000, 0x148d: 0x2000, 0x148e: 0x4000, 0x148f: 0x2000, 0x1490: 0x2000, 0x1491: 0x4000, + 0x1492: 0x4000, 0x1493: 0x4000, 0x1494: 0x4000, 0x1495: 0x4000, 0x1496: 0x4000, 0x1497: 0x4000, + 0x1498: 0x4000, 0x1499: 0x4000, 0x149a: 0x4000, 0x149b: 0x2000, 0x149c: 0x2000, 0x149d: 0x2000, + 0x149e: 0x2000, 0x149f: 0x2000, 0x14a0: 0x2000, 0x14a1: 0x2000, 0x14a2: 0x2000, 0x14a3: 0x2000, + 0x14a4: 0x2000, 0x14a5: 0x2000, 0x14a6: 0x2000, 0x14a7: 0x2000, 0x14a8: 0x2000, 0x14a9: 0x2000, + 0x14aa: 0x2000, 0x14ab: 0x2000, 0x14ac: 0x2000, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x4000, 0x14c1: 0x4000, 0x14c2: 0x4000, + 0x14d0: 0x4000, 0x14d1: 0x4000, + 0x14d2: 0x4000, 0x14d3: 0x4000, 0x14d4: 0x4000, 0x14d5: 0x4000, 0x14d6: 0x4000, 0x14d7: 0x4000, + 0x14d8: 0x4000, 0x14d9: 0x4000, 0x14da: 0x4000, 0x14db: 0x4000, 0x14dc: 0x4000, 0x14dd: 0x4000, + 0x14de: 0x4000, 0x14df: 0x4000, 0x14e0: 0x4000, 0x14e1: 0x4000, 0x14e2: 0x4000, 0x14e3: 0x4000, + 0x14e4: 0x4000, 0x14e5: 0x4000, 0x14e6: 0x4000, 0x14e7: 0x4000, 0x14e8: 0x4000, 0x14e9: 0x4000, + 0x14ea: 0x4000, 0x14eb: 0x4000, 0x14ec: 0x4000, 0x14ed: 0x4000, 0x14ee: 0x4000, 0x14ef: 0x4000, + 0x14f0: 0x4000, 0x14f1: 0x4000, 0x14f2: 0x4000, 0x14f3: 0x4000, 0x14f4: 0x4000, 0x14f5: 0x4000, + 0x14f6: 0x4000, 0x14f7: 0x4000, 0x14f8: 0x4000, 0x14f9: 0x4000, 0x14fa: 0x4000, 0x14fb: 0x4000, + // Block 0x54, offset 0x1500 + 0x1500: 0x4000, 0x1501: 0x4000, 0x1502: 0x4000, 0x1503: 0x4000, 0x1504: 0x4000, 0x1505: 0x4000, + 0x1506: 0x4000, 0x1507: 0x4000, 0x1508: 0x4000, + 0x1510: 0x4000, 0x1511: 0x4000, + 0x1520: 0x4000, 0x1521: 0x4000, 0x1522: 0x4000, 0x1523: 0x4000, + 0x1524: 0x4000, 0x1525: 0x4000, + // Block 0x55, offset 0x1540 + 0x1540: 0x4000, 0x1541: 0x4000, 0x1542: 0x4000, 0x1543: 0x4000, 0x1544: 0x4000, 0x1545: 0x4000, + 0x1546: 0x4000, 0x1547: 0x4000, 0x1548: 0x4000, 0x1549: 0x4000, 0x154a: 0x4000, 0x154b: 0x4000, + 0x154c: 0x4000, 0x154d: 0x4000, 0x154e: 0x4000, 0x154f: 0x4000, 0x1550: 0x4000, 0x1551: 0x4000, + 0x1552: 0x4000, 0x1553: 0x4000, 0x1554: 0x4000, 0x1555: 0x4000, 0x1556: 0x4000, 0x1557: 0x4000, + 0x1558: 0x4000, 0x1559: 0x4000, 0x155a: 0x4000, 0x155b: 0x4000, 0x155c: 0x4000, 0x155d: 0x4000, + 0x155e: 0x4000, 0x155f: 0x4000, 0x1560: 0x4000, + 0x156d: 0x4000, 0x156e: 0x4000, 0x156f: 0x4000, + 0x1570: 0x4000, 0x1571: 0x4000, 0x1572: 0x4000, 0x1573: 0x4000, 0x1574: 0x4000, 0x1575: 0x4000, + 0x1577: 0x4000, 0x1578: 0x4000, 0x1579: 0x4000, 0x157a: 0x4000, 0x157b: 0x4000, + 0x157c: 0x4000, 0x157d: 0x4000, 0x157e: 0x4000, 0x157f: 0x4000, + // Block 0x56, offset 0x1580 + 0x1580: 0x4000, 0x1581: 0x4000, 0x1582: 0x4000, 0x1583: 0x4000, 0x1584: 0x4000, 0x1585: 0x4000, + 0x1586: 0x4000, 0x1587: 0x4000, 0x1588: 0x4000, 0x1589: 0x4000, 0x158a: 0x4000, 0x158b: 0x4000, + 0x158c: 0x4000, 0x158d: 0x4000, 0x158e: 0x4000, 0x158f: 0x4000, 0x1590: 0x4000, 0x1591: 0x4000, + 0x1592: 0x4000, 0x1593: 0x4000, 0x1594: 0x4000, 0x1595: 0x4000, 0x1596: 0x4000, 0x1597: 0x4000, + 0x1598: 0x4000, 0x1599: 0x4000, 0x159a: 0x4000, 0x159b: 0x4000, 0x159c: 0x4000, 0x159d: 0x4000, + 0x159e: 0x4000, 0x159f: 0x4000, 0x15a0: 0x4000, 0x15a1: 0x4000, 0x15a2: 0x4000, 0x15a3: 0x4000, + 0x15a4: 0x4000, 0x15a5: 0x4000, 0x15a6: 0x4000, 0x15a7: 0x4000, 0x15a8: 0x4000, 0x15a9: 0x4000, + 0x15aa: 0x4000, 0x15ab: 0x4000, 0x15ac: 0x4000, 0x15ad: 0x4000, 0x15ae: 0x4000, 0x15af: 0x4000, + 0x15b0: 0x4000, 0x15b1: 0x4000, 0x15b2: 0x4000, 0x15b3: 0x4000, 0x15b4: 0x4000, 0x15b5: 0x4000, + 0x15b6: 0x4000, 0x15b7: 0x4000, 0x15b8: 0x4000, 0x15b9: 0x4000, 0x15ba: 0x4000, 0x15bb: 0x4000, + 0x15bc: 0x4000, 0x15be: 0x4000, 0x15bf: 0x4000, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x4000, 0x15c1: 0x4000, 0x15c2: 0x4000, 0x15c3: 0x4000, 0x15c4: 0x4000, 0x15c5: 0x4000, + 0x15c6: 0x4000, 0x15c7: 0x4000, 0x15c8: 0x4000, 0x15c9: 0x4000, 0x15ca: 0x4000, 0x15cb: 0x4000, + 0x15cc: 0x4000, 0x15cd: 0x4000, 0x15ce: 0x4000, 0x15cf: 0x4000, 0x15d0: 0x4000, 0x15d1: 0x4000, + 0x15d2: 0x4000, 0x15d3: 0x4000, + 0x15e0: 0x4000, 0x15e1: 0x4000, 0x15e2: 0x4000, 0x15e3: 0x4000, + 0x15e4: 0x4000, 0x15e5: 0x4000, 0x15e6: 0x4000, 0x15e7: 0x4000, 0x15e8: 0x4000, 0x15e9: 0x4000, + 0x15ea: 0x4000, 0x15eb: 0x4000, 0x15ec: 0x4000, 0x15ed: 0x4000, 0x15ee: 0x4000, 0x15ef: 0x4000, + 0x15f0: 0x4000, 0x15f1: 0x4000, 0x15f2: 0x4000, 0x15f3: 0x4000, 0x15f4: 0x4000, 0x15f5: 0x4000, + 0x15f6: 0x4000, 0x15f7: 0x4000, 0x15f8: 0x4000, 0x15f9: 0x4000, 0x15fa: 0x4000, 0x15fb: 0x4000, + 0x15fc: 0x4000, 0x15fd: 0x4000, 0x15fe: 0x4000, 0x15ff: 0x4000, + // Block 0x58, offset 0x1600 + 0x1600: 0x4000, 0x1601: 0x4000, 0x1602: 0x4000, 0x1603: 0x4000, 0x1604: 0x4000, 0x1605: 0x4000, + 0x1606: 0x4000, 0x1607: 0x4000, 0x1608: 0x4000, 0x1609: 0x4000, 0x160a: 0x4000, + 0x160f: 0x4000, 0x1610: 0x4000, 0x1611: 0x4000, + 0x1612: 0x4000, 0x1613: 0x4000, + 0x1620: 0x4000, 0x1621: 0x4000, 0x1622: 0x4000, 0x1623: 0x4000, + 0x1624: 0x4000, 0x1625: 0x4000, 0x1626: 0x4000, 0x1627: 0x4000, 0x1628: 0x4000, 0x1629: 0x4000, + 0x162a: 0x4000, 0x162b: 0x4000, 0x162c: 0x4000, 0x162d: 0x4000, 0x162e: 0x4000, 0x162f: 0x4000, + 0x1630: 0x4000, 0x1634: 0x4000, + 0x1638: 0x4000, 0x1639: 0x4000, 0x163a: 0x4000, 0x163b: 0x4000, + 0x163c: 0x4000, 0x163d: 0x4000, 0x163e: 0x4000, 0x163f: 0x4000, + // Block 0x59, offset 0x1640 + 0x1640: 0x4000, 0x1641: 0x4000, 0x1642: 0x4000, 0x1643: 0x4000, 0x1644: 0x4000, 0x1645: 0x4000, + 0x1646: 0x4000, 0x1647: 0x4000, 0x1648: 0x4000, 0x1649: 0x4000, 0x164a: 0x4000, 0x164b: 0x4000, + 0x164c: 0x4000, 0x164d: 0x4000, 0x164e: 0x4000, 0x164f: 0x4000, 0x1650: 0x4000, 0x1651: 0x4000, + 0x1652: 0x4000, 0x1653: 0x4000, 0x1654: 0x4000, 0x1655: 0x4000, 0x1656: 0x4000, 0x1657: 0x4000, + 0x1658: 0x4000, 0x1659: 0x4000, 0x165a: 0x4000, 0x165b: 0x4000, 0x165c: 0x4000, 0x165d: 0x4000, + 0x165e: 0x4000, 0x165f: 0x4000, 0x1660: 0x4000, 0x1661: 0x4000, 0x1662: 0x4000, 0x1663: 0x4000, + 0x1664: 0x4000, 0x1665: 0x4000, 0x1666: 0x4000, 0x1667: 0x4000, 0x1668: 0x4000, 0x1669: 0x4000, + 0x166a: 0x4000, 0x166b: 0x4000, 0x166c: 0x4000, 0x166d: 0x4000, 0x166e: 0x4000, 0x166f: 0x4000, + 0x1670: 0x4000, 0x1671: 0x4000, 0x1672: 0x4000, 0x1673: 0x4000, 0x1674: 0x4000, 0x1675: 0x4000, + 0x1676: 0x4000, 0x1677: 0x4000, 0x1678: 0x4000, 0x1679: 0x4000, 0x167a: 0x4000, 0x167b: 0x4000, + 0x167c: 0x4000, 0x167d: 0x4000, 0x167e: 0x4000, + // Block 0x5a, offset 0x1680 + 0x1680: 0x4000, 0x1682: 0x4000, 0x1683: 0x4000, 0x1684: 0x4000, 0x1685: 0x4000, + 0x1686: 0x4000, 0x1687: 0x4000, 0x1688: 0x4000, 0x1689: 0x4000, 0x168a: 0x4000, 0x168b: 0x4000, + 0x168c: 0x4000, 0x168d: 0x4000, 0x168e: 0x4000, 0x168f: 0x4000, 0x1690: 0x4000, 0x1691: 0x4000, + 0x1692: 0x4000, 0x1693: 0x4000, 0x1694: 0x4000, 0x1695: 0x4000, 0x1696: 0x4000, 0x1697: 0x4000, + 0x1698: 0x4000, 0x1699: 0x4000, 0x169a: 0x4000, 0x169b: 0x4000, 0x169c: 0x4000, 0x169d: 0x4000, + 0x169e: 0x4000, 0x169f: 0x4000, 0x16a0: 0x4000, 0x16a1: 0x4000, 0x16a2: 0x4000, 0x16a3: 0x4000, + 0x16a4: 0x4000, 0x16a5: 0x4000, 0x16a6: 0x4000, 0x16a7: 0x4000, 0x16a8: 0x4000, 0x16a9: 0x4000, + 0x16aa: 0x4000, 0x16ab: 0x4000, 0x16ac: 0x4000, 0x16ad: 0x4000, 0x16ae: 0x4000, 0x16af: 0x4000, + 0x16b0: 0x4000, 0x16b1: 0x4000, 0x16b2: 0x4000, 0x16b3: 0x4000, 0x16b4: 0x4000, 0x16b5: 0x4000, + 0x16b6: 0x4000, 0x16b7: 0x4000, 0x16b8: 0x4000, 0x16b9: 0x4000, 0x16ba: 0x4000, 0x16bb: 0x4000, + 0x16bc: 0x4000, 0x16bd: 0x4000, 0x16be: 0x4000, 0x16bf: 0x4000, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x4000, 0x16c1: 0x4000, 0x16c2: 0x4000, 0x16c3: 0x4000, 0x16c4: 0x4000, 0x16c5: 0x4000, + 0x16c6: 0x4000, 0x16c7: 0x4000, 0x16c8: 0x4000, 0x16c9: 0x4000, 0x16ca: 0x4000, 0x16cb: 0x4000, + 0x16cc: 0x4000, 0x16cd: 0x4000, 0x16ce: 0x4000, 0x16cf: 0x4000, 0x16d0: 0x4000, 0x16d1: 0x4000, + 0x16d2: 0x4000, 0x16d3: 0x4000, 0x16d4: 0x4000, 0x16d5: 0x4000, 0x16d6: 0x4000, 0x16d7: 0x4000, + 0x16d8: 0x4000, 0x16d9: 0x4000, 0x16da: 0x4000, 0x16db: 0x4000, 0x16dc: 0x4000, 0x16dd: 0x4000, + 0x16de: 0x4000, 0x16df: 0x4000, 0x16e0: 0x4000, 0x16e1: 0x4000, 0x16e2: 0x4000, 0x16e3: 0x4000, + 0x16e4: 0x4000, 0x16e5: 0x4000, 0x16e6: 0x4000, 0x16e7: 0x4000, 0x16e8: 0x4000, 0x16e9: 0x4000, + 0x16ea: 0x4000, 0x16eb: 0x4000, 0x16ec: 0x4000, 0x16ed: 0x4000, 0x16ee: 0x4000, 0x16ef: 0x4000, + 0x16f0: 0x4000, 0x16f1: 0x4000, 0x16f2: 0x4000, 0x16f3: 0x4000, 0x16f4: 0x4000, 0x16f5: 0x4000, + 0x16f6: 0x4000, 0x16f7: 0x4000, 0x16f8: 0x4000, 0x16f9: 0x4000, 0x16fa: 0x4000, 0x16fb: 0x4000, + 0x16fc: 0x4000, 0x16ff: 0x4000, + // Block 0x5c, offset 0x1700 + 0x1700: 0x4000, 0x1701: 0x4000, 0x1702: 0x4000, 0x1703: 0x4000, 0x1704: 0x4000, 0x1705: 0x4000, + 0x1706: 0x4000, 0x1707: 0x4000, 0x1708: 0x4000, 0x1709: 0x4000, 0x170a: 0x4000, 0x170b: 0x4000, + 0x170c: 0x4000, 0x170d: 0x4000, 0x170e: 0x4000, 0x170f: 0x4000, 0x1710: 0x4000, 0x1711: 0x4000, + 0x1712: 0x4000, 0x1713: 0x4000, 0x1714: 0x4000, 0x1715: 0x4000, 0x1716: 0x4000, 0x1717: 0x4000, + 0x1718: 0x4000, 0x1719: 0x4000, 0x171a: 0x4000, 0x171b: 0x4000, 0x171c: 0x4000, 0x171d: 0x4000, + 0x171e: 0x4000, 0x171f: 0x4000, 0x1720: 0x4000, 0x1721: 0x4000, 0x1722: 0x4000, 0x1723: 0x4000, + 0x1724: 0x4000, 0x1725: 0x4000, 0x1726: 0x4000, 0x1727: 0x4000, 0x1728: 0x4000, 0x1729: 0x4000, + 0x172a: 0x4000, 0x172b: 0x4000, 0x172c: 0x4000, 0x172d: 0x4000, 0x172e: 0x4000, 0x172f: 0x4000, + 0x1730: 0x4000, 0x1731: 0x4000, 0x1732: 0x4000, 0x1733: 0x4000, 0x1734: 0x4000, 0x1735: 0x4000, + 0x1736: 0x4000, 0x1737: 0x4000, 0x1738: 0x4000, 0x1739: 0x4000, 0x173a: 0x4000, 0x173b: 0x4000, + 0x173c: 0x4000, 0x173d: 0x4000, + // Block 0x5d, offset 0x1740 + 0x174b: 0x4000, + 0x174c: 0x4000, 0x174d: 0x4000, 0x174e: 0x4000, 0x1750: 0x4000, 0x1751: 0x4000, + 0x1752: 0x4000, 0x1753: 0x4000, 0x1754: 0x4000, 0x1755: 0x4000, 0x1756: 0x4000, 0x1757: 0x4000, + 0x1758: 0x4000, 0x1759: 0x4000, 0x175a: 0x4000, 0x175b: 0x4000, 0x175c: 0x4000, 0x175d: 0x4000, + 0x175e: 0x4000, 0x175f: 0x4000, 0x1760: 0x4000, 0x1761: 0x4000, 0x1762: 0x4000, 0x1763: 0x4000, + 0x1764: 0x4000, 0x1765: 0x4000, 0x1766: 0x4000, 0x1767: 0x4000, + 0x177a: 0x4000, + // Block 0x5e, offset 0x1780 + 0x1795: 0x4000, 0x1796: 0x4000, + 0x17a4: 0x4000, + // Block 0x5f, offset 0x17c0 + 0x17fb: 0x4000, + 0x17fc: 0x4000, 0x17fd: 0x4000, 0x17fe: 0x4000, 0x17ff: 0x4000, + // Block 0x60, offset 0x1800 + 0x1800: 0x4000, 0x1801: 0x4000, 0x1802: 0x4000, 0x1803: 0x4000, 0x1804: 0x4000, 0x1805: 0x4000, + 0x1806: 0x4000, 0x1807: 0x4000, 0x1808: 0x4000, 0x1809: 0x4000, 0x180a: 0x4000, 0x180b: 0x4000, + 0x180c: 0x4000, 0x180d: 0x4000, 0x180e: 0x4000, 0x180f: 0x4000, + // Block 0x61, offset 0x1840 + 0x1840: 0x4000, 0x1841: 0x4000, 0x1842: 0x4000, 0x1843: 0x4000, 0x1844: 0x4000, 0x1845: 0x4000, + 0x184c: 0x4000, 0x1850: 0x4000, 0x1851: 0x4000, + 0x1852: 0x4000, 0x1855: 0x4000, 0x1856: 0x4000, 0x1857: 0x4000, + 0x185c: 0x4000, 0x185d: 0x4000, + 0x185e: 0x4000, 0x185f: 0x4000, + 0x186b: 0x4000, 0x186c: 0x4000, + 0x1874: 0x4000, 0x1875: 0x4000, + 0x1876: 0x4000, 0x1877: 0x4000, 0x1878: 0x4000, 0x1879: 0x4000, 0x187a: 0x4000, 0x187b: 0x4000, + 0x187c: 0x4000, + // Block 0x62, offset 0x1880 + 0x18a0: 0x4000, 0x18a1: 0x4000, 0x18a2: 0x4000, 0x18a3: 0x4000, + 0x18a4: 0x4000, 0x18a5: 0x4000, 0x18a6: 0x4000, 0x18a7: 0x4000, 0x18a8: 0x4000, 0x18a9: 0x4000, + 0x18aa: 0x4000, 0x18ab: 0x4000, + 0x18b0: 0x4000, + // Block 0x63, offset 0x18c0 + 0x18cc: 0x4000, 0x18cd: 0x4000, 0x18ce: 0x4000, 0x18cf: 0x4000, 0x18d0: 0x4000, 0x18d1: 0x4000, + 0x18d2: 0x4000, 0x18d3: 0x4000, 0x18d4: 0x4000, 0x18d5: 0x4000, 0x18d6: 0x4000, 0x18d7: 0x4000, + 0x18d8: 0x4000, 0x18d9: 0x4000, 0x18da: 0x4000, 0x18db: 0x4000, 0x18dc: 0x4000, 0x18dd: 0x4000, + 0x18de: 0x4000, 0x18df: 0x4000, 0x18e0: 0x4000, 0x18e1: 0x4000, 0x18e2: 0x4000, 0x18e3: 0x4000, + 0x18e4: 0x4000, 0x18e5: 0x4000, 0x18e6: 0x4000, 0x18e7: 0x4000, 0x18e8: 0x4000, 0x18e9: 0x4000, + 0x18ea: 0x4000, 0x18eb: 0x4000, 0x18ec: 0x4000, 0x18ed: 0x4000, 0x18ee: 0x4000, 0x18ef: 0x4000, + 0x18f0: 0x4000, 0x18f1: 0x4000, 0x18f2: 0x4000, 0x18f3: 0x4000, 0x18f4: 0x4000, 0x18f5: 0x4000, + 0x18f6: 0x4000, 0x18f7: 0x4000, 0x18f8: 0x4000, 0x18f9: 0x4000, 0x18fa: 0x4000, + 0x18fc: 0x4000, 0x18fd: 0x4000, 0x18fe: 0x4000, 0x18ff: 0x4000, + // Block 0x64, offset 0x1900 + 0x1900: 0x4000, 0x1901: 0x4000, 0x1902: 0x4000, 0x1903: 0x4000, 0x1904: 0x4000, 0x1905: 0x4000, + 0x1907: 0x4000, 0x1908: 0x4000, 0x1909: 0x4000, 0x190a: 0x4000, 0x190b: 0x4000, + 0x190c: 0x4000, 0x190d: 0x4000, 0x190e: 0x4000, 0x190f: 0x4000, 0x1910: 0x4000, 0x1911: 0x4000, + 0x1912: 0x4000, 0x1913: 0x4000, 0x1914: 0x4000, 0x1915: 0x4000, 0x1916: 0x4000, 0x1917: 0x4000, + 0x1918: 0x4000, 0x1919: 0x4000, 0x191a: 0x4000, 0x191b: 0x4000, 0x191c: 0x4000, 0x191d: 0x4000, + 0x191e: 0x4000, 0x191f: 0x4000, 0x1920: 0x4000, 0x1921: 0x4000, 0x1922: 0x4000, 0x1923: 0x4000, + 0x1924: 0x4000, 0x1925: 0x4000, 0x1926: 0x4000, 0x1927: 0x4000, 0x1928: 0x4000, 0x1929: 0x4000, + 0x192a: 0x4000, 0x192b: 0x4000, 0x192c: 0x4000, 0x192d: 0x4000, 0x192e: 0x4000, 0x192f: 0x4000, + 0x1930: 0x4000, 0x1931: 0x4000, 0x1932: 0x4000, 0x1933: 0x4000, 0x1934: 0x4000, 0x1935: 0x4000, + 0x1936: 0x4000, 0x1937: 0x4000, 0x1938: 0x4000, 0x1939: 0x4000, 0x193a: 0x4000, 0x193b: 0x4000, + 0x193c: 0x4000, 0x193d: 0x4000, 0x193e: 0x4000, 0x193f: 0x4000, + // Block 0x65, offset 0x1940 + 0x1970: 0x4000, 0x1971: 0x4000, 0x1972: 0x4000, 0x1973: 0x4000, 0x1974: 0x4000, 0x1975: 0x4000, + 0x1976: 0x4000, 0x1977: 0x4000, 0x1978: 0x4000, 0x1979: 0x4000, 0x197a: 0x4000, 0x197b: 0x4000, + 0x197c: 0x4000, + // Block 0x66, offset 0x1980 + 0x1980: 0x4000, 0x1981: 0x4000, 0x1982: 0x4000, 0x1983: 0x4000, 0x1984: 0x4000, 0x1985: 0x4000, + 0x1986: 0x4000, 0x1987: 0x4000, 0x1988: 0x4000, + 0x1990: 0x4000, 0x1991: 0x4000, + 0x1992: 0x4000, 0x1993: 0x4000, 0x1994: 0x4000, 0x1995: 0x4000, 0x1996: 0x4000, 0x1997: 0x4000, + 0x1998: 0x4000, 0x1999: 0x4000, 0x199a: 0x4000, 0x199b: 0x4000, 0x199c: 0x4000, 0x199d: 0x4000, + 0x199e: 0x4000, 0x199f: 0x4000, 0x19a0: 0x4000, 0x19a1: 0x4000, 0x19a2: 0x4000, 0x19a3: 0x4000, + 0x19a4: 0x4000, 0x19a5: 0x4000, 0x19a6: 0x4000, 0x19a7: 0x4000, 0x19a8: 0x4000, 0x19a9: 0x4000, + 0x19aa: 0x4000, 0x19ab: 0x4000, 0x19ac: 0x4000, 0x19ad: 0x4000, 0x19ae: 0x4000, 0x19af: 0x4000, + 0x19b0: 0x4000, 0x19b1: 0x4000, 0x19b2: 0x4000, 0x19b3: 0x4000, 0x19b4: 0x4000, 0x19b5: 0x4000, + 0x19b6: 0x4000, 0x19b7: 0x4000, 0x19b8: 0x4000, 0x19b9: 0x4000, 0x19ba: 0x4000, 0x19bb: 0x4000, + 0x19bc: 0x4000, 0x19bd: 0x4000, 0x19bf: 0x4000, + // Block 0x67, offset 0x19c0 + 0x19c0: 0x4000, 0x19c1: 0x4000, 0x19c2: 0x4000, 0x19c3: 0x4000, 0x19c4: 0x4000, 0x19c5: 0x4000, + 0x19ce: 0x4000, 0x19cf: 0x4000, 0x19d0: 0x4000, 0x19d1: 0x4000, + 0x19d2: 0x4000, 0x19d3: 0x4000, 0x19d4: 0x4000, 0x19d5: 0x4000, 0x19d6: 0x4000, 0x19d7: 0x4000, + 0x19d8: 0x4000, 0x19d9: 0x4000, 0x19da: 0x4000, 0x19db: 0x4000, + 0x19e0: 0x4000, 0x19e1: 0x4000, 0x19e2: 0x4000, 0x19e3: 0x4000, + 0x19e4: 0x4000, 0x19e5: 0x4000, 0x19e6: 0x4000, 0x19e7: 0x4000, 0x19e8: 0x4000, + 0x19f0: 0x4000, 0x19f1: 0x4000, 0x19f2: 0x4000, 0x19f3: 0x4000, 0x19f4: 0x4000, 0x19f5: 0x4000, + 0x19f6: 0x4000, 0x19f7: 0x4000, 0x19f8: 0x4000, + // Block 0x68, offset 0x1a00 + 0x1a00: 0x2000, 0x1a01: 0x2000, 0x1a02: 0x2000, 0x1a03: 0x2000, 0x1a04: 0x2000, 0x1a05: 0x2000, + 0x1a06: 0x2000, 0x1a07: 0x2000, 0x1a08: 0x2000, 0x1a09: 0x2000, 0x1a0a: 0x2000, 0x1a0b: 0x2000, + 0x1a0c: 0x2000, 0x1a0d: 0x2000, 0x1a0e: 0x2000, 0x1a0f: 0x2000, 0x1a10: 0x2000, 0x1a11: 0x2000, + 0x1a12: 0x2000, 0x1a13: 0x2000, 0x1a14: 0x2000, 0x1a15: 0x2000, 0x1a16: 0x2000, 0x1a17: 0x2000, + 0x1a18: 0x2000, 0x1a19: 0x2000, 0x1a1a: 0x2000, 0x1a1b: 0x2000, 0x1a1c: 0x2000, 0x1a1d: 0x2000, + 0x1a1e: 0x2000, 0x1a1f: 0x2000, 0x1a20: 0x2000, 0x1a21: 0x2000, 0x1a22: 0x2000, 0x1a23: 0x2000, + 0x1a24: 0x2000, 0x1a25: 0x2000, 0x1a26: 0x2000, 0x1a27: 0x2000, 0x1a28: 0x2000, 0x1a29: 0x2000, + 0x1a2a: 0x2000, 0x1a2b: 0x2000, 0x1a2c: 0x2000, 0x1a2d: 0x2000, 0x1a2e: 0x2000, 0x1a2f: 0x2000, + 0x1a30: 0x2000, 0x1a31: 0x2000, 0x1a32: 0x2000, 0x1a33: 0x2000, 0x1a34: 0x2000, 0x1a35: 0x2000, + 0x1a36: 0x2000, 0x1a37: 0x2000, 0x1a38: 0x2000, 0x1a39: 0x2000, 0x1a3a: 0x2000, 0x1a3b: 0x2000, + 0x1a3c: 0x2000, 0x1a3d: 0x2000, +} + +// widthIndex: 23 blocks, 1472 entries, 1472 bytes +// Block 0 is the zero block. +var widthIndex = [1472]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x02, 0xc4: 0x03, 0xc5: 0x04, 0xc7: 0x05, + 0xc9: 0x06, 0xcb: 0x07, 0xcc: 0x08, 0xcd: 0x09, 0xce: 0x0a, 0xcf: 0x0b, + 0xd0: 0x0c, 0xd1: 0x0d, + 0xe1: 0x02, 0xe2: 0x03, 0xe3: 0x04, 0xe4: 0x05, 0xe5: 0x06, 0xe6: 0x06, 0xe7: 0x06, + 0xe8: 0x06, 0xe9: 0x06, 0xea: 0x07, 0xeb: 0x06, 0xec: 0x06, 0xed: 0x08, 0xee: 0x09, 0xef: 0x0a, + 0xf0: 0x10, 0xf3: 0x13, 0xf4: 0x14, + // Block 0x4, offset 0x100 + 0x104: 0x0e, 0x105: 0x0f, + // Block 0x5, offset 0x140 + 0x140: 0x10, 0x141: 0x11, 0x142: 0x12, 0x144: 0x13, 0x145: 0x14, 0x146: 0x15, 0x147: 0x16, + 0x148: 0x17, 0x149: 0x18, 0x14a: 0x19, 0x14c: 0x1a, 0x14f: 0x1b, + 0x151: 0x1c, 0x152: 0x08, 0x153: 0x1d, 0x154: 0x1e, 0x155: 0x1f, 0x156: 0x20, 0x157: 0x21, + 0x158: 0x22, 0x159: 0x23, 0x15a: 0x24, 0x15b: 0x25, 0x15c: 0x26, 0x15d: 0x27, 0x15e: 0x28, 0x15f: 0x29, + 0x166: 0x2a, + 0x16c: 0x2b, 0x16d: 0x2c, + 0x17a: 0x2d, 0x17b: 0x2e, 0x17c: 0x0e, 0x17d: 0x0e, 0x17e: 0x0e, 0x17f: 0x2f, + // Block 0x6, offset 0x180 + 0x180: 0x30, 0x181: 0x31, 0x182: 0x32, 0x183: 0x33, 0x184: 0x34, 0x185: 0x35, 0x186: 0x36, 0x187: 0x37, + 0x188: 0x38, 0x189: 0x39, 0x18a: 0x0e, 0x18b: 0x0e, 0x18c: 0x0e, 0x18d: 0x0e, 0x18e: 0x0e, 0x18f: 0x0e, + 0x190: 0x0e, 0x191: 0x0e, 0x192: 0x0e, 0x193: 0x0e, 0x194: 0x0e, 0x195: 0x0e, 0x196: 0x0e, 0x197: 0x0e, + 0x198: 0x0e, 0x199: 0x0e, 0x19a: 0x0e, 0x19b: 0x0e, 0x19c: 0x0e, 0x19d: 0x0e, 0x19e: 0x0e, 0x19f: 0x0e, + 0x1a0: 0x0e, 0x1a1: 0x0e, 0x1a2: 0x0e, 0x1a3: 0x0e, 0x1a4: 0x0e, 0x1a5: 0x0e, 0x1a6: 0x0e, 0x1a7: 0x0e, + 0x1a8: 0x0e, 0x1a9: 0x0e, 0x1aa: 0x0e, 0x1ab: 0x0e, 0x1ac: 0x0e, 0x1ad: 0x0e, 0x1ae: 0x0e, 0x1af: 0x0e, + 0x1b0: 0x0e, 0x1b1: 0x0e, 0x1b2: 0x0e, 0x1b3: 0x0e, 0x1b4: 0x0e, 0x1b5: 0x0e, 0x1b6: 0x0e, 0x1b7: 0x0e, + 0x1b8: 0x0e, 0x1b9: 0x0e, 0x1ba: 0x0e, 0x1bb: 0x0e, 0x1bc: 0x0e, 0x1bd: 0x0e, 0x1be: 0x0e, 0x1bf: 0x0e, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0e, 0x1c1: 0x0e, 0x1c2: 0x0e, 0x1c3: 0x0e, 0x1c4: 0x0e, 0x1c5: 0x0e, 0x1c6: 0x0e, 0x1c7: 0x0e, + 0x1c8: 0x0e, 0x1c9: 0x0e, 0x1ca: 0x0e, 0x1cb: 0x0e, 0x1cc: 0x0e, 0x1cd: 0x0e, 0x1ce: 0x0e, 0x1cf: 0x0e, + 0x1d0: 0x0e, 0x1d1: 0x0e, 0x1d2: 0x0e, 0x1d3: 0x0e, 0x1d4: 0x0e, 0x1d5: 0x0e, 0x1d6: 0x0e, 0x1d7: 0x0e, + 0x1d8: 0x0e, 0x1d9: 0x0e, 0x1da: 0x0e, 0x1db: 0x0e, 0x1dc: 0x0e, 0x1dd: 0x0e, 0x1de: 0x0e, 0x1df: 0x0e, + 0x1e0: 0x0e, 0x1e1: 0x0e, 0x1e2: 0x0e, 0x1e3: 0x0e, 0x1e4: 0x0e, 0x1e5: 0x0e, 0x1e6: 0x0e, 0x1e7: 0x0e, + 0x1e8: 0x0e, 0x1e9: 0x0e, 0x1ea: 0x0e, 0x1eb: 0x0e, 0x1ec: 0x0e, 0x1ed: 0x0e, 0x1ee: 0x0e, 0x1ef: 0x0e, + 0x1f0: 0x0e, 0x1f1: 0x0e, 0x1f2: 0x0e, 0x1f3: 0x0e, 0x1f4: 0x0e, 0x1f5: 0x0e, 0x1f6: 0x0e, + 0x1f8: 0x0e, 0x1f9: 0x0e, 0x1fa: 0x0e, 0x1fb: 0x0e, 0x1fc: 0x0e, 0x1fd: 0x0e, 0x1fe: 0x0e, 0x1ff: 0x0e, + // Block 0x8, offset 0x200 + 0x200: 0x0e, 0x201: 0x0e, 0x202: 0x0e, 0x203: 0x0e, 0x204: 0x0e, 0x205: 0x0e, 0x206: 0x0e, 0x207: 0x0e, + 0x208: 0x0e, 0x209: 0x0e, 0x20a: 0x0e, 0x20b: 0x0e, 0x20c: 0x0e, 0x20d: 0x0e, 0x20e: 0x0e, 0x20f: 0x0e, + 0x210: 0x0e, 0x211: 0x0e, 0x212: 0x0e, 0x213: 0x0e, 0x214: 0x0e, 0x215: 0x0e, 0x216: 0x0e, 0x217: 0x0e, + 0x218: 0x0e, 0x219: 0x0e, 0x21a: 0x0e, 0x21b: 0x0e, 0x21c: 0x0e, 0x21d: 0x0e, 0x21e: 0x0e, 0x21f: 0x0e, + 0x220: 0x0e, 0x221: 0x0e, 0x222: 0x0e, 0x223: 0x0e, 0x224: 0x0e, 0x225: 0x0e, 0x226: 0x0e, 0x227: 0x0e, + 0x228: 0x0e, 0x229: 0x0e, 0x22a: 0x0e, 0x22b: 0x0e, 0x22c: 0x0e, 0x22d: 0x0e, 0x22e: 0x0e, 0x22f: 0x0e, + 0x230: 0x0e, 0x231: 0x0e, 0x232: 0x0e, 0x233: 0x0e, 0x234: 0x0e, 0x235: 0x0e, 0x236: 0x0e, 0x237: 0x0e, + 0x238: 0x0e, 0x239: 0x0e, 0x23a: 0x0e, 0x23b: 0x0e, 0x23c: 0x0e, 0x23d: 0x0e, 0x23e: 0x0e, 0x23f: 0x0e, + // Block 0x9, offset 0x240 + 0x240: 0x0e, 0x241: 0x0e, 0x242: 0x0e, 0x243: 0x0e, 0x244: 0x0e, 0x245: 0x0e, 0x246: 0x0e, 0x247: 0x0e, + 0x248: 0x0e, 0x249: 0x0e, 0x24a: 0x0e, 0x24b: 0x0e, 0x24c: 0x0e, 0x24d: 0x0e, 0x24e: 0x0e, 0x24f: 0x0e, + 0x250: 0x0e, 0x251: 0x0e, 0x252: 0x3a, 0x253: 0x3b, + 0x265: 0x3c, + 0x270: 0x0e, 0x271: 0x0e, 0x272: 0x0e, 0x273: 0x0e, 0x274: 0x0e, 0x275: 0x0e, 0x276: 0x0e, 0x277: 0x0e, + 0x278: 0x0e, 0x279: 0x0e, 0x27a: 0x0e, 0x27b: 0x0e, 0x27c: 0x0e, 0x27d: 0x0e, 0x27e: 0x0e, 0x27f: 0x0e, + // Block 0xa, offset 0x280 + 0x280: 0x0e, 0x281: 0x0e, 0x282: 0x0e, 0x283: 0x0e, 0x284: 0x0e, 0x285: 0x0e, 0x286: 0x0e, 0x287: 0x0e, + 0x288: 0x0e, 0x289: 0x0e, 0x28a: 0x0e, 0x28b: 0x0e, 0x28c: 0x0e, 0x28d: 0x0e, 0x28e: 0x0e, 0x28f: 0x0e, + 0x290: 0x0e, 0x291: 0x0e, 0x292: 0x0e, 0x293: 0x0e, 0x294: 0x0e, 0x295: 0x0e, 0x296: 0x0e, 0x297: 0x0e, + 0x298: 0x0e, 0x299: 0x0e, 0x29a: 0x0e, 0x29b: 0x0e, 0x29c: 0x0e, 0x29d: 0x0e, 0x29e: 0x3d, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x08, 0x2c1: 0x08, 0x2c2: 0x08, 0x2c3: 0x08, 0x2c4: 0x08, 0x2c5: 0x08, 0x2c6: 0x08, 0x2c7: 0x08, + 0x2c8: 0x08, 0x2c9: 0x08, 0x2ca: 0x08, 0x2cb: 0x08, 0x2cc: 0x08, 0x2cd: 0x08, 0x2ce: 0x08, 0x2cf: 0x08, + 0x2d0: 0x08, 0x2d1: 0x08, 0x2d2: 0x08, 0x2d3: 0x08, 0x2d4: 0x08, 0x2d5: 0x08, 0x2d6: 0x08, 0x2d7: 0x08, + 0x2d8: 0x08, 0x2d9: 0x08, 0x2da: 0x08, 0x2db: 0x08, 0x2dc: 0x08, 0x2dd: 0x08, 0x2de: 0x08, 0x2df: 0x08, + 0x2e0: 0x08, 0x2e1: 0x08, 0x2e2: 0x08, 0x2e3: 0x08, 0x2e4: 0x08, 0x2e5: 0x08, 0x2e6: 0x08, 0x2e7: 0x08, + 0x2e8: 0x08, 0x2e9: 0x08, 0x2ea: 0x08, 0x2eb: 0x08, 0x2ec: 0x08, 0x2ed: 0x08, 0x2ee: 0x08, 0x2ef: 0x08, + 0x2f0: 0x08, 0x2f1: 0x08, 0x2f2: 0x08, 0x2f3: 0x08, 0x2f4: 0x08, 0x2f5: 0x08, 0x2f6: 0x08, 0x2f7: 0x08, + 0x2f8: 0x08, 0x2f9: 0x08, 0x2fa: 0x08, 0x2fb: 0x08, 0x2fc: 0x08, 0x2fd: 0x08, 0x2fe: 0x08, 0x2ff: 0x08, + // Block 0xc, offset 0x300 + 0x300: 0x08, 0x301: 0x08, 0x302: 0x08, 0x303: 0x08, 0x304: 0x08, 0x305: 0x08, 0x306: 0x08, 0x307: 0x08, + 0x308: 0x08, 0x309: 0x08, 0x30a: 0x08, 0x30b: 0x08, 0x30c: 0x08, 0x30d: 0x08, 0x30e: 0x08, 0x30f: 0x08, + 0x310: 0x08, 0x311: 0x08, 0x312: 0x08, 0x313: 0x08, 0x314: 0x08, 0x315: 0x08, 0x316: 0x08, 0x317: 0x08, + 0x318: 0x08, 0x319: 0x08, 0x31a: 0x08, 0x31b: 0x08, 0x31c: 0x08, 0x31d: 0x08, 0x31e: 0x08, 0x31f: 0x08, + 0x320: 0x08, 0x321: 0x08, 0x322: 0x08, 0x323: 0x08, 0x324: 0x0e, 0x325: 0x0e, 0x326: 0x0e, 0x327: 0x0e, + 0x328: 0x0e, 0x329: 0x0e, 0x32a: 0x0e, 0x32b: 0x0e, + 0x338: 0x3e, 0x339: 0x3f, 0x33c: 0x40, 0x33d: 0x41, 0x33e: 0x42, 0x33f: 0x43, + // Block 0xd, offset 0x340 + 0x37f: 0x44, + // Block 0xe, offset 0x380 + 0x380: 0x0e, 0x381: 0x0e, 0x382: 0x0e, 0x383: 0x0e, 0x384: 0x0e, 0x385: 0x0e, 0x386: 0x0e, 0x387: 0x0e, + 0x388: 0x0e, 0x389: 0x0e, 0x38a: 0x0e, 0x38b: 0x0e, 0x38c: 0x0e, 0x38d: 0x0e, 0x38e: 0x0e, 0x38f: 0x0e, + 0x390: 0x0e, 0x391: 0x0e, 0x392: 0x0e, 0x393: 0x0e, 0x394: 0x0e, 0x395: 0x0e, 0x396: 0x0e, 0x397: 0x0e, + 0x398: 0x0e, 0x399: 0x0e, 0x39a: 0x0e, 0x39b: 0x0e, 0x39c: 0x0e, 0x39d: 0x0e, 0x39e: 0x0e, 0x39f: 0x45, + 0x3a0: 0x0e, 0x3a1: 0x0e, 0x3a2: 0x0e, 0x3a3: 0x0e, 0x3a4: 0x0e, 0x3a5: 0x0e, 0x3a6: 0x0e, 0x3a7: 0x0e, + 0x3a8: 0x0e, 0x3a9: 0x0e, 0x3aa: 0x0e, 0x3ab: 0x0e, 0x3ac: 0x0e, 0x3ad: 0x0e, 0x3ae: 0x0e, 0x3af: 0x0e, + 0x3b0: 0x0e, 0x3b1: 0x0e, 0x3b2: 0x0e, 0x3b3: 0x46, 0x3b4: 0x47, + // Block 0xf, offset 0x3c0 + 0x3ff: 0x48, + // Block 0x10, offset 0x400 + 0x400: 0x0e, 0x401: 0x0e, 0x402: 0x0e, 0x403: 0x0e, 0x404: 0x49, 0x405: 0x4a, 0x406: 0x0e, 0x407: 0x0e, + 0x408: 0x0e, 0x409: 0x0e, 0x40a: 0x0e, 0x40b: 0x4b, + // Block 0x11, offset 0x440 + 0x440: 0x4c, 0x443: 0x4d, 0x444: 0x4e, 0x445: 0x4f, 0x446: 0x50, + 0x448: 0x51, 0x449: 0x52, 0x44c: 0x53, 0x44d: 0x54, 0x44e: 0x55, 0x44f: 0x56, + 0x450: 0x57, 0x451: 0x58, 0x452: 0x0e, 0x453: 0x59, 0x454: 0x5a, 0x455: 0x5b, 0x456: 0x5c, 0x457: 0x5d, + 0x458: 0x0e, 0x459: 0x5e, 0x45a: 0x0e, 0x45b: 0x5f, 0x45f: 0x60, + 0x464: 0x61, 0x465: 0x62, 0x466: 0x0e, 0x467: 0x0e, + 0x469: 0x63, 0x46a: 0x64, 0x46b: 0x65, + // Block 0x12, offset 0x480 + 0x496: 0x0b, 0x497: 0x06, + 0x498: 0x0c, 0x49a: 0x0d, 0x49b: 0x0e, 0x49f: 0x0f, + 0x4a0: 0x06, 0x4a1: 0x06, 0x4a2: 0x06, 0x4a3: 0x06, 0x4a4: 0x06, 0x4a5: 0x06, 0x4a6: 0x06, 0x4a7: 0x06, + 0x4a8: 0x06, 0x4a9: 0x06, 0x4aa: 0x06, 0x4ab: 0x06, 0x4ac: 0x06, 0x4ad: 0x06, 0x4ae: 0x06, 0x4af: 0x06, + 0x4b0: 0x06, 0x4b1: 0x06, 0x4b2: 0x06, 0x4b3: 0x06, 0x4b4: 0x06, 0x4b5: 0x06, 0x4b6: 0x06, 0x4b7: 0x06, + 0x4b8: 0x06, 0x4b9: 0x06, 0x4ba: 0x06, 0x4bb: 0x06, 0x4bc: 0x06, 0x4bd: 0x06, 0x4be: 0x06, 0x4bf: 0x06, + // Block 0x13, offset 0x4c0 + 0x4c4: 0x08, 0x4c5: 0x08, 0x4c6: 0x08, 0x4c7: 0x09, + // Block 0x14, offset 0x500 + 0x500: 0x08, 0x501: 0x08, 0x502: 0x08, 0x503: 0x08, 0x504: 0x08, 0x505: 0x08, 0x506: 0x08, 0x507: 0x08, + 0x508: 0x08, 0x509: 0x08, 0x50a: 0x08, 0x50b: 0x08, 0x50c: 0x08, 0x50d: 0x08, 0x50e: 0x08, 0x50f: 0x08, + 0x510: 0x08, 0x511: 0x08, 0x512: 0x08, 0x513: 0x08, 0x514: 0x08, 0x515: 0x08, 0x516: 0x08, 0x517: 0x08, + 0x518: 0x08, 0x519: 0x08, 0x51a: 0x08, 0x51b: 0x08, 0x51c: 0x08, 0x51d: 0x08, 0x51e: 0x08, 0x51f: 0x08, + 0x520: 0x08, 0x521: 0x08, 0x522: 0x08, 0x523: 0x08, 0x524: 0x08, 0x525: 0x08, 0x526: 0x08, 0x527: 0x08, + 0x528: 0x08, 0x529: 0x08, 0x52a: 0x08, 0x52b: 0x08, 0x52c: 0x08, 0x52d: 0x08, 0x52e: 0x08, 0x52f: 0x08, + 0x530: 0x08, 0x531: 0x08, 0x532: 0x08, 0x533: 0x08, 0x534: 0x08, 0x535: 0x08, 0x536: 0x08, 0x537: 0x08, + 0x538: 0x08, 0x539: 0x08, 0x53a: 0x08, 0x53b: 0x08, 0x53c: 0x08, 0x53d: 0x08, 0x53e: 0x08, 0x53f: 0x66, + // Block 0x15, offset 0x540 + 0x560: 0x11, + 0x570: 0x09, 0x571: 0x09, 0x572: 0x09, 0x573: 0x09, 0x574: 0x09, 0x575: 0x09, 0x576: 0x09, 0x577: 0x09, + 0x578: 0x09, 0x579: 0x09, 0x57a: 0x09, 0x57b: 0x09, 0x57c: 0x09, 0x57d: 0x09, 0x57e: 0x09, 0x57f: 0x12, + // Block 0x16, offset 0x580 + 0x580: 0x09, 0x581: 0x09, 0x582: 0x09, 0x583: 0x09, 0x584: 0x09, 0x585: 0x09, 0x586: 0x09, 0x587: 0x09, + 0x588: 0x09, 0x589: 0x09, 0x58a: 0x09, 0x58b: 0x09, 0x58c: 0x09, 0x58d: 0x09, 0x58e: 0x09, 0x58f: 0x12, +} + +// inverseData contains 4-byte entries of the following format: +// +// <length> <modified UTF-8-encoded rune> <0 padding> +// +// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the +// UTF-8 encoding of the original rune. Mappings often have the following +// pattern: +// +// A -> A (U+FF21 -> U+0041) +// B -> B (U+FF22 -> U+0042) +// ... +// +// By xor-ing the last byte the same entry can be shared by many mappings. This +// reduces the total number of distinct entries by about two thirds. +// The resulting entry for the aforementioned mappings is +// +// { 0x01, 0xE0, 0x00, 0x00 } +// +// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get +// +// E0 ^ A1 = 41. +// +// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get +// +// E0 ^ A2 = 42. +// +// Note that because of the xor-ing, the byte sequence stored in the entry is +// not valid UTF-8. +var inverseData = [150][4]byte{ + {0x00, 0x00, 0x00, 0x00}, + {0x03, 0xe3, 0x80, 0xa0}, + {0x03, 0xef, 0xbc, 0xa0}, + {0x03, 0xef, 0xbc, 0xe0}, + {0x03, 0xef, 0xbd, 0xe0}, + {0x03, 0xef, 0xbf, 0x02}, + {0x03, 0xef, 0xbf, 0x00}, + {0x03, 0xef, 0xbf, 0x0e}, + {0x03, 0xef, 0xbf, 0x0c}, + {0x03, 0xef, 0xbf, 0x0f}, + {0x03, 0xef, 0xbf, 0x39}, + {0x03, 0xef, 0xbf, 0x3b}, + {0x03, 0xef, 0xbf, 0x3f}, + {0x03, 0xef, 0xbf, 0x2a}, + {0x03, 0xef, 0xbf, 0x0d}, + {0x03, 0xef, 0xbf, 0x25}, + {0x03, 0xef, 0xbd, 0x1a}, + {0x03, 0xef, 0xbd, 0x26}, + {0x01, 0xa0, 0x00, 0x00}, + {0x03, 0xef, 0xbd, 0x25}, + {0x03, 0xef, 0xbd, 0x23}, + {0x03, 0xef, 0xbd, 0x2e}, + {0x03, 0xef, 0xbe, 0x07}, + {0x03, 0xef, 0xbe, 0x05}, + {0x03, 0xef, 0xbd, 0x06}, + {0x03, 0xef, 0xbd, 0x13}, + {0x03, 0xef, 0xbd, 0x0b}, + {0x03, 0xef, 0xbd, 0x16}, + {0x03, 0xef, 0xbd, 0x0c}, + {0x03, 0xef, 0xbd, 0x15}, + {0x03, 0xef, 0xbd, 0x0d}, + {0x03, 0xef, 0xbd, 0x1c}, + {0x03, 0xef, 0xbd, 0x02}, + {0x03, 0xef, 0xbd, 0x1f}, + {0x03, 0xef, 0xbd, 0x1d}, + {0x03, 0xef, 0xbd, 0x17}, + {0x03, 0xef, 0xbd, 0x08}, + {0x03, 0xef, 0xbd, 0x09}, + {0x03, 0xef, 0xbd, 0x0e}, + {0x03, 0xef, 0xbd, 0x04}, + {0x03, 0xef, 0xbd, 0x05}, + {0x03, 0xef, 0xbe, 0x3f}, + {0x03, 0xef, 0xbe, 0x00}, + {0x03, 0xef, 0xbd, 0x2c}, + {0x03, 0xef, 0xbe, 0x06}, + {0x03, 0xef, 0xbe, 0x0c}, + {0x03, 0xef, 0xbe, 0x0f}, + {0x03, 0xef, 0xbe, 0x0d}, + {0x03, 0xef, 0xbe, 0x0b}, + {0x03, 0xef, 0xbe, 0x19}, + {0x03, 0xef, 0xbe, 0x15}, + {0x03, 0xef, 0xbe, 0x11}, + {0x03, 0xef, 0xbe, 0x31}, + {0x03, 0xef, 0xbe, 0x33}, + {0x03, 0xef, 0xbd, 0x0f}, + {0x03, 0xef, 0xbe, 0x30}, + {0x03, 0xef, 0xbe, 0x3e}, + {0x03, 0xef, 0xbe, 0x32}, + {0x03, 0xef, 0xbe, 0x36}, + {0x03, 0xef, 0xbd, 0x14}, + {0x03, 0xef, 0xbe, 0x2e}, + {0x03, 0xef, 0xbd, 0x1e}, + {0x03, 0xef, 0xbe, 0x10}, + {0x03, 0xef, 0xbf, 0x13}, + {0x03, 0xef, 0xbf, 0x15}, + {0x03, 0xef, 0xbf, 0x17}, + {0x03, 0xef, 0xbf, 0x1f}, + {0x03, 0xef, 0xbf, 0x1d}, + {0x03, 0xef, 0xbf, 0x1b}, + {0x03, 0xef, 0xbf, 0x09}, + {0x03, 0xef, 0xbf, 0x0b}, + {0x03, 0xef, 0xbf, 0x37}, + {0x03, 0xef, 0xbe, 0x04}, + {0x01, 0xe0, 0x00, 0x00}, + {0x03, 0xe2, 0xa6, 0x1a}, + {0x03, 0xe2, 0xa6, 0x26}, + {0x03, 0xe3, 0x80, 0x23}, + {0x03, 0xe3, 0x80, 0x2e}, + {0x03, 0xe3, 0x80, 0x25}, + {0x03, 0xe3, 0x83, 0x1e}, + {0x03, 0xe3, 0x83, 0x14}, + {0x03, 0xe3, 0x82, 0x06}, + {0x03, 0xe3, 0x82, 0x0b}, + {0x03, 0xe3, 0x82, 0x0c}, + {0x03, 0xe3, 0x82, 0x0d}, + {0x03, 0xe3, 0x82, 0x02}, + {0x03, 0xe3, 0x83, 0x0f}, + {0x03, 0xe3, 0x83, 0x08}, + {0x03, 0xe3, 0x83, 0x09}, + {0x03, 0xe3, 0x83, 0x2c}, + {0x03, 0xe3, 0x83, 0x0c}, + {0x03, 0xe3, 0x82, 0x13}, + {0x03, 0xe3, 0x82, 0x16}, + {0x03, 0xe3, 0x82, 0x15}, + {0x03, 0xe3, 0x82, 0x1c}, + {0x03, 0xe3, 0x82, 0x1f}, + {0x03, 0xe3, 0x82, 0x1d}, + {0x03, 0xe3, 0x82, 0x1a}, + {0x03, 0xe3, 0x82, 0x17}, + {0x03, 0xe3, 0x82, 0x08}, + {0x03, 0xe3, 0x82, 0x09}, + {0x03, 0xe3, 0x82, 0x0e}, + {0x03, 0xe3, 0x82, 0x04}, + {0x03, 0xe3, 0x82, 0x05}, + {0x03, 0xe3, 0x82, 0x3f}, + {0x03, 0xe3, 0x83, 0x00}, + {0x03, 0xe3, 0x83, 0x06}, + {0x03, 0xe3, 0x83, 0x05}, + {0x03, 0xe3, 0x83, 0x0d}, + {0x03, 0xe3, 0x83, 0x0b}, + {0x03, 0xe3, 0x83, 0x07}, + {0x03, 0xe3, 0x83, 0x19}, + {0x03, 0xe3, 0x83, 0x15}, + {0x03, 0xe3, 0x83, 0x11}, + {0x03, 0xe3, 0x83, 0x31}, + {0x03, 0xe3, 0x83, 0x33}, + {0x03, 0xe3, 0x83, 0x30}, + {0x03, 0xe3, 0x83, 0x3e}, + {0x03, 0xe3, 0x83, 0x32}, + {0x03, 0xe3, 0x83, 0x36}, + {0x03, 0xe3, 0x83, 0x2e}, + {0x03, 0xe3, 0x82, 0x07}, + {0x03, 0xe3, 0x85, 0x04}, + {0x03, 0xe3, 0x84, 0x10}, + {0x03, 0xe3, 0x85, 0x30}, + {0x03, 0xe3, 0x85, 0x0d}, + {0x03, 0xe3, 0x85, 0x13}, + {0x03, 0xe3, 0x85, 0x15}, + {0x03, 0xe3, 0x85, 0x17}, + {0x03, 0xe3, 0x85, 0x1f}, + {0x03, 0xe3, 0x85, 0x1d}, + {0x03, 0xe3, 0x85, 0x1b}, + {0x03, 0xe3, 0x85, 0x09}, + {0x03, 0xe3, 0x85, 0x0f}, + {0x03, 0xe3, 0x85, 0x0b}, + {0x03, 0xe3, 0x85, 0x37}, + {0x03, 0xe3, 0x85, 0x3b}, + {0x03, 0xe3, 0x85, 0x39}, + {0x03, 0xe3, 0x85, 0x3f}, + {0x02, 0xc2, 0x02, 0x00}, + {0x02, 0xc2, 0x0e, 0x00}, + {0x02, 0xc2, 0x0c, 0x00}, + {0x02, 0xc2, 0x00, 0x00}, + {0x03, 0xe2, 0x82, 0x0f}, + {0x03, 0xe2, 0x94, 0x2a}, + {0x03, 0xe2, 0x86, 0x39}, + {0x03, 0xe2, 0x86, 0x3b}, + {0x03, 0xe2, 0x86, 0x3f}, + {0x03, 0xe2, 0x96, 0x0d}, + {0x03, 0xe2, 0x97, 0x25}, +} + +// Total table size 15512 bytes (15KiB) diff --git a/vendor/golang.org/x/text/width/tables9.0.0.go b/vendor/golang.org/x/text/width/tables9.0.0.go new file mode 100644 index 00000000000..d981330a9f4 --- /dev/null +++ b/vendor/golang.org/x/text/width/tables9.0.0.go @@ -0,0 +1,1296 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +//go:build !go1.10 + +package width + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "9.0.0" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *widthTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return widthValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = widthIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *widthTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return widthValues[c0] + } + i := widthIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = widthIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = widthIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *widthTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return widthValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = widthIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *widthTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return widthValues[c0] + } + i := widthIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = widthIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = widthIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// widthTrie. Total size: 14080 bytes (13.75 KiB). Checksum: 3b8aeb3dc03667a3. +type widthTrie struct{} + +func newWidthTrie(i int) *widthTrie { + return &widthTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *widthTrie) lookupValue(n uint32, b byte) uint16 { + switch { + default: + return uint16(widthValues[n<<6+uint32(b)]) + } +} + +// widthValues: 99 blocks, 6336 entries, 12672 bytes +// The third block is the zero block. +var widthValues = [6336]uint16{ + // Block 0x0, offset 0x0 + 0x20: 0x6001, 0x21: 0x6002, 0x22: 0x6002, 0x23: 0x6002, + 0x24: 0x6002, 0x25: 0x6002, 0x26: 0x6002, 0x27: 0x6002, 0x28: 0x6002, 0x29: 0x6002, + 0x2a: 0x6002, 0x2b: 0x6002, 0x2c: 0x6002, 0x2d: 0x6002, 0x2e: 0x6002, 0x2f: 0x6002, + 0x30: 0x6002, 0x31: 0x6002, 0x32: 0x6002, 0x33: 0x6002, 0x34: 0x6002, 0x35: 0x6002, + 0x36: 0x6002, 0x37: 0x6002, 0x38: 0x6002, 0x39: 0x6002, 0x3a: 0x6002, 0x3b: 0x6002, + 0x3c: 0x6002, 0x3d: 0x6002, 0x3e: 0x6002, 0x3f: 0x6002, + // Block 0x1, offset 0x40 + 0x40: 0x6003, 0x41: 0x6003, 0x42: 0x6003, 0x43: 0x6003, 0x44: 0x6003, 0x45: 0x6003, + 0x46: 0x6003, 0x47: 0x6003, 0x48: 0x6003, 0x49: 0x6003, 0x4a: 0x6003, 0x4b: 0x6003, + 0x4c: 0x6003, 0x4d: 0x6003, 0x4e: 0x6003, 0x4f: 0x6003, 0x50: 0x6003, 0x51: 0x6003, + 0x52: 0x6003, 0x53: 0x6003, 0x54: 0x6003, 0x55: 0x6003, 0x56: 0x6003, 0x57: 0x6003, + 0x58: 0x6003, 0x59: 0x6003, 0x5a: 0x6003, 0x5b: 0x6003, 0x5c: 0x6003, 0x5d: 0x6003, + 0x5e: 0x6003, 0x5f: 0x6003, 0x60: 0x6004, 0x61: 0x6004, 0x62: 0x6004, 0x63: 0x6004, + 0x64: 0x6004, 0x65: 0x6004, 0x66: 0x6004, 0x67: 0x6004, 0x68: 0x6004, 0x69: 0x6004, + 0x6a: 0x6004, 0x6b: 0x6004, 0x6c: 0x6004, 0x6d: 0x6004, 0x6e: 0x6004, 0x6f: 0x6004, + 0x70: 0x6004, 0x71: 0x6004, 0x72: 0x6004, 0x73: 0x6004, 0x74: 0x6004, 0x75: 0x6004, + 0x76: 0x6004, 0x77: 0x6004, 0x78: 0x6004, 0x79: 0x6004, 0x7a: 0x6004, 0x7b: 0x6004, + 0x7c: 0x6004, 0x7d: 0x6004, 0x7e: 0x6004, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xe1: 0x2000, 0xe2: 0x6005, 0xe3: 0x6005, + 0xe4: 0x2000, 0xe5: 0x6006, 0xe6: 0x6005, 0xe7: 0x2000, 0xe8: 0x2000, + 0xea: 0x2000, 0xec: 0x6007, 0xed: 0x2000, 0xee: 0x2000, 0xef: 0x6008, + 0xf0: 0x2000, 0xf1: 0x2000, 0xf2: 0x2000, 0xf3: 0x2000, 0xf4: 0x2000, + 0xf6: 0x2000, 0xf7: 0x2000, 0xf8: 0x2000, 0xf9: 0x2000, 0xfa: 0x2000, + 0xfc: 0x2000, 0xfd: 0x2000, 0xfe: 0x2000, 0xff: 0x2000, + // Block 0x4, offset 0x100 + 0x106: 0x2000, + 0x110: 0x2000, + 0x117: 0x2000, + 0x118: 0x2000, + 0x11e: 0x2000, 0x11f: 0x2000, 0x120: 0x2000, 0x121: 0x2000, + 0x126: 0x2000, 0x128: 0x2000, 0x129: 0x2000, + 0x12a: 0x2000, 0x12c: 0x2000, 0x12d: 0x2000, + 0x130: 0x2000, 0x132: 0x2000, 0x133: 0x2000, + 0x137: 0x2000, 0x138: 0x2000, 0x139: 0x2000, 0x13a: 0x2000, + 0x13c: 0x2000, 0x13e: 0x2000, + // Block 0x5, offset 0x140 + 0x141: 0x2000, + 0x151: 0x2000, + 0x153: 0x2000, + 0x15b: 0x2000, + 0x166: 0x2000, 0x167: 0x2000, + 0x16b: 0x2000, + 0x171: 0x2000, 0x172: 0x2000, 0x173: 0x2000, + 0x178: 0x2000, + 0x17f: 0x2000, + // Block 0x6, offset 0x180 + 0x180: 0x2000, 0x181: 0x2000, 0x182: 0x2000, 0x184: 0x2000, + 0x188: 0x2000, 0x189: 0x2000, 0x18a: 0x2000, 0x18b: 0x2000, + 0x18d: 0x2000, + 0x192: 0x2000, 0x193: 0x2000, + 0x1a6: 0x2000, 0x1a7: 0x2000, + 0x1ab: 0x2000, + // Block 0x7, offset 0x1c0 + 0x1ce: 0x2000, 0x1d0: 0x2000, + 0x1d2: 0x2000, 0x1d4: 0x2000, 0x1d6: 0x2000, + 0x1d8: 0x2000, 0x1da: 0x2000, 0x1dc: 0x2000, + // Block 0x8, offset 0x200 + 0x211: 0x2000, + 0x221: 0x2000, + // Block 0x9, offset 0x240 + 0x244: 0x2000, + 0x247: 0x2000, 0x249: 0x2000, 0x24a: 0x2000, 0x24b: 0x2000, + 0x24d: 0x2000, 0x250: 0x2000, + 0x258: 0x2000, 0x259: 0x2000, 0x25a: 0x2000, 0x25b: 0x2000, 0x25d: 0x2000, + 0x25f: 0x2000, + // Block 0xa, offset 0x280 + 0x280: 0x2000, 0x281: 0x2000, 0x282: 0x2000, 0x283: 0x2000, 0x284: 0x2000, 0x285: 0x2000, + 0x286: 0x2000, 0x287: 0x2000, 0x288: 0x2000, 0x289: 0x2000, 0x28a: 0x2000, 0x28b: 0x2000, + 0x28c: 0x2000, 0x28d: 0x2000, 0x28e: 0x2000, 0x28f: 0x2000, 0x290: 0x2000, 0x291: 0x2000, + 0x292: 0x2000, 0x293: 0x2000, 0x294: 0x2000, 0x295: 0x2000, 0x296: 0x2000, 0x297: 0x2000, + 0x298: 0x2000, 0x299: 0x2000, 0x29a: 0x2000, 0x29b: 0x2000, 0x29c: 0x2000, 0x29d: 0x2000, + 0x29e: 0x2000, 0x29f: 0x2000, 0x2a0: 0x2000, 0x2a1: 0x2000, 0x2a2: 0x2000, 0x2a3: 0x2000, + 0x2a4: 0x2000, 0x2a5: 0x2000, 0x2a6: 0x2000, 0x2a7: 0x2000, 0x2a8: 0x2000, 0x2a9: 0x2000, + 0x2aa: 0x2000, 0x2ab: 0x2000, 0x2ac: 0x2000, 0x2ad: 0x2000, 0x2ae: 0x2000, 0x2af: 0x2000, + 0x2b0: 0x2000, 0x2b1: 0x2000, 0x2b2: 0x2000, 0x2b3: 0x2000, 0x2b4: 0x2000, 0x2b5: 0x2000, + 0x2b6: 0x2000, 0x2b7: 0x2000, 0x2b8: 0x2000, 0x2b9: 0x2000, 0x2ba: 0x2000, 0x2bb: 0x2000, + 0x2bc: 0x2000, 0x2bd: 0x2000, 0x2be: 0x2000, 0x2bf: 0x2000, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x2000, 0x2c1: 0x2000, 0x2c2: 0x2000, 0x2c3: 0x2000, 0x2c4: 0x2000, 0x2c5: 0x2000, + 0x2c6: 0x2000, 0x2c7: 0x2000, 0x2c8: 0x2000, 0x2c9: 0x2000, 0x2ca: 0x2000, 0x2cb: 0x2000, + 0x2cc: 0x2000, 0x2cd: 0x2000, 0x2ce: 0x2000, 0x2cf: 0x2000, 0x2d0: 0x2000, 0x2d1: 0x2000, + 0x2d2: 0x2000, 0x2d3: 0x2000, 0x2d4: 0x2000, 0x2d5: 0x2000, 0x2d6: 0x2000, 0x2d7: 0x2000, + 0x2d8: 0x2000, 0x2d9: 0x2000, 0x2da: 0x2000, 0x2db: 0x2000, 0x2dc: 0x2000, 0x2dd: 0x2000, + 0x2de: 0x2000, 0x2df: 0x2000, 0x2e0: 0x2000, 0x2e1: 0x2000, 0x2e2: 0x2000, 0x2e3: 0x2000, + 0x2e4: 0x2000, 0x2e5: 0x2000, 0x2e6: 0x2000, 0x2e7: 0x2000, 0x2e8: 0x2000, 0x2e9: 0x2000, + 0x2ea: 0x2000, 0x2eb: 0x2000, 0x2ec: 0x2000, 0x2ed: 0x2000, 0x2ee: 0x2000, 0x2ef: 0x2000, + // Block 0xc, offset 0x300 + 0x311: 0x2000, + 0x312: 0x2000, 0x313: 0x2000, 0x314: 0x2000, 0x315: 0x2000, 0x316: 0x2000, 0x317: 0x2000, + 0x318: 0x2000, 0x319: 0x2000, 0x31a: 0x2000, 0x31b: 0x2000, 0x31c: 0x2000, 0x31d: 0x2000, + 0x31e: 0x2000, 0x31f: 0x2000, 0x320: 0x2000, 0x321: 0x2000, 0x323: 0x2000, + 0x324: 0x2000, 0x325: 0x2000, 0x326: 0x2000, 0x327: 0x2000, 0x328: 0x2000, 0x329: 0x2000, + 0x331: 0x2000, 0x332: 0x2000, 0x333: 0x2000, 0x334: 0x2000, 0x335: 0x2000, + 0x336: 0x2000, 0x337: 0x2000, 0x338: 0x2000, 0x339: 0x2000, 0x33a: 0x2000, 0x33b: 0x2000, + 0x33c: 0x2000, 0x33d: 0x2000, 0x33e: 0x2000, 0x33f: 0x2000, + // Block 0xd, offset 0x340 + 0x340: 0x2000, 0x341: 0x2000, 0x343: 0x2000, 0x344: 0x2000, 0x345: 0x2000, + 0x346: 0x2000, 0x347: 0x2000, 0x348: 0x2000, 0x349: 0x2000, + // Block 0xe, offset 0x380 + 0x381: 0x2000, + 0x390: 0x2000, 0x391: 0x2000, + 0x392: 0x2000, 0x393: 0x2000, 0x394: 0x2000, 0x395: 0x2000, 0x396: 0x2000, 0x397: 0x2000, + 0x398: 0x2000, 0x399: 0x2000, 0x39a: 0x2000, 0x39b: 0x2000, 0x39c: 0x2000, 0x39d: 0x2000, + 0x39e: 0x2000, 0x39f: 0x2000, 0x3a0: 0x2000, 0x3a1: 0x2000, 0x3a2: 0x2000, 0x3a3: 0x2000, + 0x3a4: 0x2000, 0x3a5: 0x2000, 0x3a6: 0x2000, 0x3a7: 0x2000, 0x3a8: 0x2000, 0x3a9: 0x2000, + 0x3aa: 0x2000, 0x3ab: 0x2000, 0x3ac: 0x2000, 0x3ad: 0x2000, 0x3ae: 0x2000, 0x3af: 0x2000, + 0x3b0: 0x2000, 0x3b1: 0x2000, 0x3b2: 0x2000, 0x3b3: 0x2000, 0x3b4: 0x2000, 0x3b5: 0x2000, + 0x3b6: 0x2000, 0x3b7: 0x2000, 0x3b8: 0x2000, 0x3b9: 0x2000, 0x3ba: 0x2000, 0x3bb: 0x2000, + 0x3bc: 0x2000, 0x3bd: 0x2000, 0x3be: 0x2000, 0x3bf: 0x2000, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x2000, 0x3c1: 0x2000, 0x3c2: 0x2000, 0x3c3: 0x2000, 0x3c4: 0x2000, 0x3c5: 0x2000, + 0x3c6: 0x2000, 0x3c7: 0x2000, 0x3c8: 0x2000, 0x3c9: 0x2000, 0x3ca: 0x2000, 0x3cb: 0x2000, + 0x3cc: 0x2000, 0x3cd: 0x2000, 0x3ce: 0x2000, 0x3cf: 0x2000, 0x3d1: 0x2000, + // Block 0x10, offset 0x400 + 0x400: 0x4000, 0x401: 0x4000, 0x402: 0x4000, 0x403: 0x4000, 0x404: 0x4000, 0x405: 0x4000, + 0x406: 0x4000, 0x407: 0x4000, 0x408: 0x4000, 0x409: 0x4000, 0x40a: 0x4000, 0x40b: 0x4000, + 0x40c: 0x4000, 0x40d: 0x4000, 0x40e: 0x4000, 0x40f: 0x4000, 0x410: 0x4000, 0x411: 0x4000, + 0x412: 0x4000, 0x413: 0x4000, 0x414: 0x4000, 0x415: 0x4000, 0x416: 0x4000, 0x417: 0x4000, + 0x418: 0x4000, 0x419: 0x4000, 0x41a: 0x4000, 0x41b: 0x4000, 0x41c: 0x4000, 0x41d: 0x4000, + 0x41e: 0x4000, 0x41f: 0x4000, 0x420: 0x4000, 0x421: 0x4000, 0x422: 0x4000, 0x423: 0x4000, + 0x424: 0x4000, 0x425: 0x4000, 0x426: 0x4000, 0x427: 0x4000, 0x428: 0x4000, 0x429: 0x4000, + 0x42a: 0x4000, 0x42b: 0x4000, 0x42c: 0x4000, 0x42d: 0x4000, 0x42e: 0x4000, 0x42f: 0x4000, + 0x430: 0x4000, 0x431: 0x4000, 0x432: 0x4000, 0x433: 0x4000, 0x434: 0x4000, 0x435: 0x4000, + 0x436: 0x4000, 0x437: 0x4000, 0x438: 0x4000, 0x439: 0x4000, 0x43a: 0x4000, 0x43b: 0x4000, + 0x43c: 0x4000, 0x43d: 0x4000, 0x43e: 0x4000, 0x43f: 0x4000, + // Block 0x11, offset 0x440 + 0x440: 0x4000, 0x441: 0x4000, 0x442: 0x4000, 0x443: 0x4000, 0x444: 0x4000, 0x445: 0x4000, + 0x446: 0x4000, 0x447: 0x4000, 0x448: 0x4000, 0x449: 0x4000, 0x44a: 0x4000, 0x44b: 0x4000, + 0x44c: 0x4000, 0x44d: 0x4000, 0x44e: 0x4000, 0x44f: 0x4000, 0x450: 0x4000, 0x451: 0x4000, + 0x452: 0x4000, 0x453: 0x4000, 0x454: 0x4000, 0x455: 0x4000, 0x456: 0x4000, 0x457: 0x4000, + 0x458: 0x4000, 0x459: 0x4000, 0x45a: 0x4000, 0x45b: 0x4000, 0x45c: 0x4000, 0x45d: 0x4000, + 0x45e: 0x4000, 0x45f: 0x4000, + // Block 0x12, offset 0x480 + 0x490: 0x2000, + 0x493: 0x2000, 0x494: 0x2000, 0x495: 0x2000, 0x496: 0x2000, + 0x498: 0x2000, 0x499: 0x2000, 0x49c: 0x2000, 0x49d: 0x2000, + 0x4a0: 0x2000, 0x4a1: 0x2000, 0x4a2: 0x2000, + 0x4a4: 0x2000, 0x4a5: 0x2000, 0x4a6: 0x2000, 0x4a7: 0x2000, + 0x4b0: 0x2000, 0x4b2: 0x2000, 0x4b3: 0x2000, 0x4b5: 0x2000, + 0x4bb: 0x2000, + 0x4be: 0x2000, + // Block 0x13, offset 0x4c0 + 0x4f4: 0x2000, + 0x4ff: 0x2000, + // Block 0x14, offset 0x500 + 0x501: 0x2000, 0x502: 0x2000, 0x503: 0x2000, 0x504: 0x2000, + 0x529: 0xa009, + 0x52c: 0x2000, + // Block 0x15, offset 0x540 + 0x543: 0x2000, 0x545: 0x2000, + 0x549: 0x2000, + 0x553: 0x2000, 0x556: 0x2000, + 0x561: 0x2000, 0x562: 0x2000, + 0x566: 0x2000, + 0x56b: 0x2000, + // Block 0x16, offset 0x580 + 0x593: 0x2000, 0x594: 0x2000, + 0x59b: 0x2000, 0x59c: 0x2000, 0x59d: 0x2000, + 0x59e: 0x2000, 0x5a0: 0x2000, 0x5a1: 0x2000, 0x5a2: 0x2000, 0x5a3: 0x2000, + 0x5a4: 0x2000, 0x5a5: 0x2000, 0x5a6: 0x2000, 0x5a7: 0x2000, 0x5a8: 0x2000, 0x5a9: 0x2000, + 0x5aa: 0x2000, 0x5ab: 0x2000, + 0x5b0: 0x2000, 0x5b1: 0x2000, 0x5b2: 0x2000, 0x5b3: 0x2000, 0x5b4: 0x2000, 0x5b5: 0x2000, + 0x5b6: 0x2000, 0x5b7: 0x2000, 0x5b8: 0x2000, 0x5b9: 0x2000, + // Block 0x17, offset 0x5c0 + 0x5c9: 0x2000, + 0x5d0: 0x200a, 0x5d1: 0x200b, + 0x5d2: 0x200a, 0x5d3: 0x200c, 0x5d4: 0x2000, 0x5d5: 0x2000, 0x5d6: 0x2000, 0x5d7: 0x2000, + 0x5d8: 0x2000, 0x5d9: 0x2000, + 0x5f8: 0x2000, 0x5f9: 0x2000, + // Block 0x18, offset 0x600 + 0x612: 0x2000, 0x614: 0x2000, + 0x627: 0x2000, + // Block 0x19, offset 0x640 + 0x640: 0x2000, 0x642: 0x2000, 0x643: 0x2000, + 0x647: 0x2000, 0x648: 0x2000, 0x64b: 0x2000, + 0x64f: 0x2000, 0x651: 0x2000, + 0x655: 0x2000, + 0x65a: 0x2000, 0x65d: 0x2000, + 0x65e: 0x2000, 0x65f: 0x2000, 0x660: 0x2000, 0x663: 0x2000, + 0x665: 0x2000, 0x667: 0x2000, 0x668: 0x2000, 0x669: 0x2000, + 0x66a: 0x2000, 0x66b: 0x2000, 0x66c: 0x2000, 0x66e: 0x2000, + 0x674: 0x2000, 0x675: 0x2000, + 0x676: 0x2000, 0x677: 0x2000, + 0x67c: 0x2000, 0x67d: 0x2000, + // Block 0x1a, offset 0x680 + 0x688: 0x2000, + 0x68c: 0x2000, + 0x692: 0x2000, + 0x6a0: 0x2000, 0x6a1: 0x2000, + 0x6a4: 0x2000, 0x6a5: 0x2000, 0x6a6: 0x2000, 0x6a7: 0x2000, + 0x6aa: 0x2000, 0x6ab: 0x2000, 0x6ae: 0x2000, 0x6af: 0x2000, + // Block 0x1b, offset 0x6c0 + 0x6c2: 0x2000, 0x6c3: 0x2000, + 0x6c6: 0x2000, 0x6c7: 0x2000, + 0x6d5: 0x2000, + 0x6d9: 0x2000, + 0x6e5: 0x2000, + 0x6ff: 0x2000, + // Block 0x1c, offset 0x700 + 0x712: 0x2000, + 0x71a: 0x4000, 0x71b: 0x4000, + 0x729: 0x4000, + 0x72a: 0x4000, + // Block 0x1d, offset 0x740 + 0x769: 0x4000, + 0x76a: 0x4000, 0x76b: 0x4000, 0x76c: 0x4000, + 0x770: 0x4000, 0x773: 0x4000, + // Block 0x1e, offset 0x780 + 0x7a0: 0x2000, 0x7a1: 0x2000, 0x7a2: 0x2000, 0x7a3: 0x2000, + 0x7a4: 0x2000, 0x7a5: 0x2000, 0x7a6: 0x2000, 0x7a7: 0x2000, 0x7a8: 0x2000, 0x7a9: 0x2000, + 0x7aa: 0x2000, 0x7ab: 0x2000, 0x7ac: 0x2000, 0x7ad: 0x2000, 0x7ae: 0x2000, 0x7af: 0x2000, + 0x7b0: 0x2000, 0x7b1: 0x2000, 0x7b2: 0x2000, 0x7b3: 0x2000, 0x7b4: 0x2000, 0x7b5: 0x2000, + 0x7b6: 0x2000, 0x7b7: 0x2000, 0x7b8: 0x2000, 0x7b9: 0x2000, 0x7ba: 0x2000, 0x7bb: 0x2000, + 0x7bc: 0x2000, 0x7bd: 0x2000, 0x7be: 0x2000, 0x7bf: 0x2000, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x2000, 0x7c1: 0x2000, 0x7c2: 0x2000, 0x7c3: 0x2000, 0x7c4: 0x2000, 0x7c5: 0x2000, + 0x7c6: 0x2000, 0x7c7: 0x2000, 0x7c8: 0x2000, 0x7c9: 0x2000, 0x7ca: 0x2000, 0x7cb: 0x2000, + 0x7cc: 0x2000, 0x7cd: 0x2000, 0x7ce: 0x2000, 0x7cf: 0x2000, 0x7d0: 0x2000, 0x7d1: 0x2000, + 0x7d2: 0x2000, 0x7d3: 0x2000, 0x7d4: 0x2000, 0x7d5: 0x2000, 0x7d6: 0x2000, 0x7d7: 0x2000, + 0x7d8: 0x2000, 0x7d9: 0x2000, 0x7da: 0x2000, 0x7db: 0x2000, 0x7dc: 0x2000, 0x7dd: 0x2000, + 0x7de: 0x2000, 0x7df: 0x2000, 0x7e0: 0x2000, 0x7e1: 0x2000, 0x7e2: 0x2000, 0x7e3: 0x2000, + 0x7e4: 0x2000, 0x7e5: 0x2000, 0x7e6: 0x2000, 0x7e7: 0x2000, 0x7e8: 0x2000, 0x7e9: 0x2000, + 0x7eb: 0x2000, 0x7ec: 0x2000, 0x7ed: 0x2000, 0x7ee: 0x2000, 0x7ef: 0x2000, + 0x7f0: 0x2000, 0x7f1: 0x2000, 0x7f2: 0x2000, 0x7f3: 0x2000, 0x7f4: 0x2000, 0x7f5: 0x2000, + 0x7f6: 0x2000, 0x7f7: 0x2000, 0x7f8: 0x2000, 0x7f9: 0x2000, 0x7fa: 0x2000, 0x7fb: 0x2000, + 0x7fc: 0x2000, 0x7fd: 0x2000, 0x7fe: 0x2000, 0x7ff: 0x2000, + // Block 0x20, offset 0x800 + 0x800: 0x2000, 0x801: 0x2000, 0x802: 0x200d, 0x803: 0x2000, 0x804: 0x2000, 0x805: 0x2000, + 0x806: 0x2000, 0x807: 0x2000, 0x808: 0x2000, 0x809: 0x2000, 0x80a: 0x2000, 0x80b: 0x2000, + 0x80c: 0x2000, 0x80d: 0x2000, 0x80e: 0x2000, 0x80f: 0x2000, 0x810: 0x2000, 0x811: 0x2000, + 0x812: 0x2000, 0x813: 0x2000, 0x814: 0x2000, 0x815: 0x2000, 0x816: 0x2000, 0x817: 0x2000, + 0x818: 0x2000, 0x819: 0x2000, 0x81a: 0x2000, 0x81b: 0x2000, 0x81c: 0x2000, 0x81d: 0x2000, + 0x81e: 0x2000, 0x81f: 0x2000, 0x820: 0x2000, 0x821: 0x2000, 0x822: 0x2000, 0x823: 0x2000, + 0x824: 0x2000, 0x825: 0x2000, 0x826: 0x2000, 0x827: 0x2000, 0x828: 0x2000, 0x829: 0x2000, + 0x82a: 0x2000, 0x82b: 0x2000, 0x82c: 0x2000, 0x82d: 0x2000, 0x82e: 0x2000, 0x82f: 0x2000, + 0x830: 0x2000, 0x831: 0x2000, 0x832: 0x2000, 0x833: 0x2000, 0x834: 0x2000, 0x835: 0x2000, + 0x836: 0x2000, 0x837: 0x2000, 0x838: 0x2000, 0x839: 0x2000, 0x83a: 0x2000, 0x83b: 0x2000, + 0x83c: 0x2000, 0x83d: 0x2000, 0x83e: 0x2000, 0x83f: 0x2000, + // Block 0x21, offset 0x840 + 0x840: 0x2000, 0x841: 0x2000, 0x842: 0x2000, 0x843: 0x2000, 0x844: 0x2000, 0x845: 0x2000, + 0x846: 0x2000, 0x847: 0x2000, 0x848: 0x2000, 0x849: 0x2000, 0x84a: 0x2000, 0x84b: 0x2000, + 0x850: 0x2000, 0x851: 0x2000, + 0x852: 0x2000, 0x853: 0x2000, 0x854: 0x2000, 0x855: 0x2000, 0x856: 0x2000, 0x857: 0x2000, + 0x858: 0x2000, 0x859: 0x2000, 0x85a: 0x2000, 0x85b: 0x2000, 0x85c: 0x2000, 0x85d: 0x2000, + 0x85e: 0x2000, 0x85f: 0x2000, 0x860: 0x2000, 0x861: 0x2000, 0x862: 0x2000, 0x863: 0x2000, + 0x864: 0x2000, 0x865: 0x2000, 0x866: 0x2000, 0x867: 0x2000, 0x868: 0x2000, 0x869: 0x2000, + 0x86a: 0x2000, 0x86b: 0x2000, 0x86c: 0x2000, 0x86d: 0x2000, 0x86e: 0x2000, 0x86f: 0x2000, + 0x870: 0x2000, 0x871: 0x2000, 0x872: 0x2000, 0x873: 0x2000, + // Block 0x22, offset 0x880 + 0x880: 0x2000, 0x881: 0x2000, 0x882: 0x2000, 0x883: 0x2000, 0x884: 0x2000, 0x885: 0x2000, + 0x886: 0x2000, 0x887: 0x2000, 0x888: 0x2000, 0x889: 0x2000, 0x88a: 0x2000, 0x88b: 0x2000, + 0x88c: 0x2000, 0x88d: 0x2000, 0x88e: 0x2000, 0x88f: 0x2000, + 0x892: 0x2000, 0x893: 0x2000, 0x894: 0x2000, 0x895: 0x2000, + 0x8a0: 0x200e, 0x8a1: 0x2000, 0x8a3: 0x2000, + 0x8a4: 0x2000, 0x8a5: 0x2000, 0x8a6: 0x2000, 0x8a7: 0x2000, 0x8a8: 0x2000, 0x8a9: 0x2000, + 0x8b2: 0x2000, 0x8b3: 0x2000, + 0x8b6: 0x2000, 0x8b7: 0x2000, + 0x8bc: 0x2000, 0x8bd: 0x2000, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x2000, 0x8c1: 0x2000, + 0x8c6: 0x2000, 0x8c7: 0x2000, 0x8c8: 0x2000, 0x8cb: 0x200f, + 0x8ce: 0x2000, 0x8cf: 0x2000, 0x8d0: 0x2000, 0x8d1: 0x2000, + 0x8e2: 0x2000, 0x8e3: 0x2000, + 0x8e4: 0x2000, 0x8e5: 0x2000, + 0x8ef: 0x2000, + 0x8fd: 0x4000, 0x8fe: 0x4000, + // Block 0x24, offset 0x900 + 0x905: 0x2000, + 0x906: 0x2000, 0x909: 0x2000, + 0x90e: 0x2000, 0x90f: 0x2000, + 0x914: 0x4000, 0x915: 0x4000, + 0x91c: 0x2000, + 0x91e: 0x2000, + // Block 0x25, offset 0x940 + 0x940: 0x2000, 0x942: 0x2000, + 0x948: 0x4000, 0x949: 0x4000, 0x94a: 0x4000, 0x94b: 0x4000, + 0x94c: 0x4000, 0x94d: 0x4000, 0x94e: 0x4000, 0x94f: 0x4000, 0x950: 0x4000, 0x951: 0x4000, + 0x952: 0x4000, 0x953: 0x4000, + 0x960: 0x2000, 0x961: 0x2000, 0x963: 0x2000, + 0x964: 0x2000, 0x965: 0x2000, 0x967: 0x2000, 0x968: 0x2000, 0x969: 0x2000, + 0x96a: 0x2000, 0x96c: 0x2000, 0x96d: 0x2000, 0x96f: 0x2000, + 0x97f: 0x4000, + // Block 0x26, offset 0x980 + 0x993: 0x4000, + 0x99e: 0x2000, 0x99f: 0x2000, 0x9a1: 0x4000, + 0x9aa: 0x4000, 0x9ab: 0x4000, + 0x9bd: 0x4000, 0x9be: 0x4000, 0x9bf: 0x2000, + // Block 0x27, offset 0x9c0 + 0x9c4: 0x4000, 0x9c5: 0x4000, + 0x9c6: 0x2000, 0x9c7: 0x2000, 0x9c8: 0x2000, 0x9c9: 0x2000, 0x9ca: 0x2000, 0x9cb: 0x2000, + 0x9cc: 0x2000, 0x9cd: 0x2000, 0x9ce: 0x4000, 0x9cf: 0x2000, 0x9d0: 0x2000, 0x9d1: 0x2000, + 0x9d2: 0x2000, 0x9d3: 0x2000, 0x9d4: 0x4000, 0x9d5: 0x2000, 0x9d6: 0x2000, 0x9d7: 0x2000, + 0x9d8: 0x2000, 0x9d9: 0x2000, 0x9da: 0x2000, 0x9db: 0x2000, 0x9dc: 0x2000, 0x9dd: 0x2000, + 0x9de: 0x2000, 0x9df: 0x2000, 0x9e0: 0x2000, 0x9e1: 0x2000, 0x9e3: 0x2000, + 0x9e8: 0x2000, 0x9e9: 0x2000, + 0x9ea: 0x4000, 0x9eb: 0x2000, 0x9ec: 0x2000, 0x9ed: 0x2000, 0x9ee: 0x2000, 0x9ef: 0x2000, + 0x9f0: 0x2000, 0x9f1: 0x2000, 0x9f2: 0x4000, 0x9f3: 0x4000, 0x9f4: 0x2000, 0x9f5: 0x4000, + 0x9f6: 0x2000, 0x9f7: 0x2000, 0x9f8: 0x2000, 0x9f9: 0x2000, 0x9fa: 0x4000, 0x9fb: 0x2000, + 0x9fc: 0x2000, 0x9fd: 0x4000, 0x9fe: 0x2000, 0x9ff: 0x2000, + // Block 0x28, offset 0xa00 + 0xa05: 0x4000, + 0xa0a: 0x4000, 0xa0b: 0x4000, + 0xa28: 0x4000, + 0xa3d: 0x2000, + // Block 0x29, offset 0xa40 + 0xa4c: 0x4000, 0xa4e: 0x4000, + 0xa53: 0x4000, 0xa54: 0x4000, 0xa55: 0x4000, 0xa57: 0x4000, + 0xa76: 0x2000, 0xa77: 0x2000, 0xa78: 0x2000, 0xa79: 0x2000, 0xa7a: 0x2000, 0xa7b: 0x2000, + 0xa7c: 0x2000, 0xa7d: 0x2000, 0xa7e: 0x2000, 0xa7f: 0x2000, + // Block 0x2a, offset 0xa80 + 0xa95: 0x4000, 0xa96: 0x4000, 0xa97: 0x4000, + 0xab0: 0x4000, + 0xabf: 0x4000, + // Block 0x2b, offset 0xac0 + 0xae6: 0x6000, 0xae7: 0x6000, 0xae8: 0x6000, 0xae9: 0x6000, + 0xaea: 0x6000, 0xaeb: 0x6000, 0xaec: 0x6000, 0xaed: 0x6000, + // Block 0x2c, offset 0xb00 + 0xb05: 0x6010, + 0xb06: 0x6011, + // Block 0x2d, offset 0xb40 + 0xb5b: 0x4000, 0xb5c: 0x4000, + // Block 0x2e, offset 0xb80 + 0xb90: 0x4000, + 0xb95: 0x4000, 0xb96: 0x2000, 0xb97: 0x2000, + 0xb98: 0x2000, 0xb99: 0x2000, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x4000, 0xbc1: 0x4000, 0xbc2: 0x4000, 0xbc3: 0x4000, 0xbc4: 0x4000, 0xbc5: 0x4000, + 0xbc6: 0x4000, 0xbc7: 0x4000, 0xbc8: 0x4000, 0xbc9: 0x4000, 0xbca: 0x4000, 0xbcb: 0x4000, + 0xbcc: 0x4000, 0xbcd: 0x4000, 0xbce: 0x4000, 0xbcf: 0x4000, 0xbd0: 0x4000, 0xbd1: 0x4000, + 0xbd2: 0x4000, 0xbd3: 0x4000, 0xbd4: 0x4000, 0xbd5: 0x4000, 0xbd6: 0x4000, 0xbd7: 0x4000, + 0xbd8: 0x4000, 0xbd9: 0x4000, 0xbdb: 0x4000, 0xbdc: 0x4000, 0xbdd: 0x4000, + 0xbde: 0x4000, 0xbdf: 0x4000, 0xbe0: 0x4000, 0xbe1: 0x4000, 0xbe2: 0x4000, 0xbe3: 0x4000, + 0xbe4: 0x4000, 0xbe5: 0x4000, 0xbe6: 0x4000, 0xbe7: 0x4000, 0xbe8: 0x4000, 0xbe9: 0x4000, + 0xbea: 0x4000, 0xbeb: 0x4000, 0xbec: 0x4000, 0xbed: 0x4000, 0xbee: 0x4000, 0xbef: 0x4000, + 0xbf0: 0x4000, 0xbf1: 0x4000, 0xbf2: 0x4000, 0xbf3: 0x4000, 0xbf4: 0x4000, 0xbf5: 0x4000, + 0xbf6: 0x4000, 0xbf7: 0x4000, 0xbf8: 0x4000, 0xbf9: 0x4000, 0xbfa: 0x4000, 0xbfb: 0x4000, + 0xbfc: 0x4000, 0xbfd: 0x4000, 0xbfe: 0x4000, 0xbff: 0x4000, + // Block 0x30, offset 0xc00 + 0xc00: 0x4000, 0xc01: 0x4000, 0xc02: 0x4000, 0xc03: 0x4000, 0xc04: 0x4000, 0xc05: 0x4000, + 0xc06: 0x4000, 0xc07: 0x4000, 0xc08: 0x4000, 0xc09: 0x4000, 0xc0a: 0x4000, 0xc0b: 0x4000, + 0xc0c: 0x4000, 0xc0d: 0x4000, 0xc0e: 0x4000, 0xc0f: 0x4000, 0xc10: 0x4000, 0xc11: 0x4000, + 0xc12: 0x4000, 0xc13: 0x4000, 0xc14: 0x4000, 0xc15: 0x4000, 0xc16: 0x4000, 0xc17: 0x4000, + 0xc18: 0x4000, 0xc19: 0x4000, 0xc1a: 0x4000, 0xc1b: 0x4000, 0xc1c: 0x4000, 0xc1d: 0x4000, + 0xc1e: 0x4000, 0xc1f: 0x4000, 0xc20: 0x4000, 0xc21: 0x4000, 0xc22: 0x4000, 0xc23: 0x4000, + 0xc24: 0x4000, 0xc25: 0x4000, 0xc26: 0x4000, 0xc27: 0x4000, 0xc28: 0x4000, 0xc29: 0x4000, + 0xc2a: 0x4000, 0xc2b: 0x4000, 0xc2c: 0x4000, 0xc2d: 0x4000, 0xc2e: 0x4000, 0xc2f: 0x4000, + 0xc30: 0x4000, 0xc31: 0x4000, 0xc32: 0x4000, 0xc33: 0x4000, + // Block 0x31, offset 0xc40 + 0xc40: 0x4000, 0xc41: 0x4000, 0xc42: 0x4000, 0xc43: 0x4000, 0xc44: 0x4000, 0xc45: 0x4000, + 0xc46: 0x4000, 0xc47: 0x4000, 0xc48: 0x4000, 0xc49: 0x4000, 0xc4a: 0x4000, 0xc4b: 0x4000, + 0xc4c: 0x4000, 0xc4d: 0x4000, 0xc4e: 0x4000, 0xc4f: 0x4000, 0xc50: 0x4000, 0xc51: 0x4000, + 0xc52: 0x4000, 0xc53: 0x4000, 0xc54: 0x4000, 0xc55: 0x4000, + 0xc70: 0x4000, 0xc71: 0x4000, 0xc72: 0x4000, 0xc73: 0x4000, 0xc74: 0x4000, 0xc75: 0x4000, + 0xc76: 0x4000, 0xc77: 0x4000, 0xc78: 0x4000, 0xc79: 0x4000, 0xc7a: 0x4000, 0xc7b: 0x4000, + // Block 0x32, offset 0xc80 + 0xc80: 0x9012, 0xc81: 0x4013, 0xc82: 0x4014, 0xc83: 0x4000, 0xc84: 0x4000, 0xc85: 0x4000, + 0xc86: 0x4000, 0xc87: 0x4000, 0xc88: 0x4000, 0xc89: 0x4000, 0xc8a: 0x4000, 0xc8b: 0x4000, + 0xc8c: 0x4015, 0xc8d: 0x4015, 0xc8e: 0x4000, 0xc8f: 0x4000, 0xc90: 0x4000, 0xc91: 0x4000, + 0xc92: 0x4000, 0xc93: 0x4000, 0xc94: 0x4000, 0xc95: 0x4000, 0xc96: 0x4000, 0xc97: 0x4000, + 0xc98: 0x4000, 0xc99: 0x4000, 0xc9a: 0x4000, 0xc9b: 0x4000, 0xc9c: 0x4000, 0xc9d: 0x4000, + 0xc9e: 0x4000, 0xc9f: 0x4000, 0xca0: 0x4000, 0xca1: 0x4000, 0xca2: 0x4000, 0xca3: 0x4000, + 0xca4: 0x4000, 0xca5: 0x4000, 0xca6: 0x4000, 0xca7: 0x4000, 0xca8: 0x4000, 0xca9: 0x4000, + 0xcaa: 0x4000, 0xcab: 0x4000, 0xcac: 0x4000, 0xcad: 0x4000, 0xcae: 0x4000, 0xcaf: 0x4000, + 0xcb0: 0x4000, 0xcb1: 0x4000, 0xcb2: 0x4000, 0xcb3: 0x4000, 0xcb4: 0x4000, 0xcb5: 0x4000, + 0xcb6: 0x4000, 0xcb7: 0x4000, 0xcb8: 0x4000, 0xcb9: 0x4000, 0xcba: 0x4000, 0xcbb: 0x4000, + 0xcbc: 0x4000, 0xcbd: 0x4000, 0xcbe: 0x4000, + // Block 0x33, offset 0xcc0 + 0xcc1: 0x4000, 0xcc2: 0x4000, 0xcc3: 0x4000, 0xcc4: 0x4000, 0xcc5: 0x4000, + 0xcc6: 0x4000, 0xcc7: 0x4000, 0xcc8: 0x4000, 0xcc9: 0x4000, 0xcca: 0x4000, 0xccb: 0x4000, + 0xccc: 0x4000, 0xccd: 0x4000, 0xcce: 0x4000, 0xccf: 0x4000, 0xcd0: 0x4000, 0xcd1: 0x4000, + 0xcd2: 0x4000, 0xcd3: 0x4000, 0xcd4: 0x4000, 0xcd5: 0x4000, 0xcd6: 0x4000, 0xcd7: 0x4000, + 0xcd8: 0x4000, 0xcd9: 0x4000, 0xcda: 0x4000, 0xcdb: 0x4000, 0xcdc: 0x4000, 0xcdd: 0x4000, + 0xcde: 0x4000, 0xcdf: 0x4000, 0xce0: 0x4000, 0xce1: 0x4000, 0xce2: 0x4000, 0xce3: 0x4000, + 0xce4: 0x4000, 0xce5: 0x4000, 0xce6: 0x4000, 0xce7: 0x4000, 0xce8: 0x4000, 0xce9: 0x4000, + 0xcea: 0x4000, 0xceb: 0x4000, 0xcec: 0x4000, 0xced: 0x4000, 0xcee: 0x4000, 0xcef: 0x4000, + 0xcf0: 0x4000, 0xcf1: 0x4000, 0xcf2: 0x4000, 0xcf3: 0x4000, 0xcf4: 0x4000, 0xcf5: 0x4000, + 0xcf6: 0x4000, 0xcf7: 0x4000, 0xcf8: 0x4000, 0xcf9: 0x4000, 0xcfa: 0x4000, 0xcfb: 0x4000, + 0xcfc: 0x4000, 0xcfd: 0x4000, 0xcfe: 0x4000, 0xcff: 0x4000, + // Block 0x34, offset 0xd00 + 0xd00: 0x4000, 0xd01: 0x4000, 0xd02: 0x4000, 0xd03: 0x4000, 0xd04: 0x4000, 0xd05: 0x4000, + 0xd06: 0x4000, 0xd07: 0x4000, 0xd08: 0x4000, 0xd09: 0x4000, 0xd0a: 0x4000, 0xd0b: 0x4000, + 0xd0c: 0x4000, 0xd0d: 0x4000, 0xd0e: 0x4000, 0xd0f: 0x4000, 0xd10: 0x4000, 0xd11: 0x4000, + 0xd12: 0x4000, 0xd13: 0x4000, 0xd14: 0x4000, 0xd15: 0x4000, 0xd16: 0x4000, + 0xd19: 0x4016, 0xd1a: 0x4017, 0xd1b: 0x4000, 0xd1c: 0x4000, 0xd1d: 0x4000, + 0xd1e: 0x4000, 0xd1f: 0x4000, 0xd20: 0x4000, 0xd21: 0x4018, 0xd22: 0x4019, 0xd23: 0x401a, + 0xd24: 0x401b, 0xd25: 0x401c, 0xd26: 0x401d, 0xd27: 0x401e, 0xd28: 0x401f, 0xd29: 0x4020, + 0xd2a: 0x4021, 0xd2b: 0x4022, 0xd2c: 0x4000, 0xd2d: 0x4010, 0xd2e: 0x4000, 0xd2f: 0x4023, + 0xd30: 0x4000, 0xd31: 0x4024, 0xd32: 0x4000, 0xd33: 0x4025, 0xd34: 0x4000, 0xd35: 0x4026, + 0xd36: 0x4000, 0xd37: 0x401a, 0xd38: 0x4000, 0xd39: 0x4027, 0xd3a: 0x4000, 0xd3b: 0x4028, + 0xd3c: 0x4000, 0xd3d: 0x4020, 0xd3e: 0x4000, 0xd3f: 0x4029, + // Block 0x35, offset 0xd40 + 0xd40: 0x4000, 0xd41: 0x402a, 0xd42: 0x4000, 0xd43: 0x402b, 0xd44: 0x402c, 0xd45: 0x4000, + 0xd46: 0x4017, 0xd47: 0x4000, 0xd48: 0x402d, 0xd49: 0x4000, 0xd4a: 0x402e, 0xd4b: 0x402f, + 0xd4c: 0x4030, 0xd4d: 0x4017, 0xd4e: 0x4016, 0xd4f: 0x4017, 0xd50: 0x4000, 0xd51: 0x4000, + 0xd52: 0x4031, 0xd53: 0x4000, 0xd54: 0x4000, 0xd55: 0x4031, 0xd56: 0x4000, 0xd57: 0x4000, + 0xd58: 0x4032, 0xd59: 0x4000, 0xd5a: 0x4000, 0xd5b: 0x4032, 0xd5c: 0x4000, 0xd5d: 0x4000, + 0xd5e: 0x4033, 0xd5f: 0x402e, 0xd60: 0x4034, 0xd61: 0x4035, 0xd62: 0x4034, 0xd63: 0x4036, + 0xd64: 0x4037, 0xd65: 0x4024, 0xd66: 0x4035, 0xd67: 0x4025, 0xd68: 0x4038, 0xd69: 0x4038, + 0xd6a: 0x4039, 0xd6b: 0x4039, 0xd6c: 0x403a, 0xd6d: 0x403a, 0xd6e: 0x4000, 0xd6f: 0x4035, + 0xd70: 0x4000, 0xd71: 0x4000, 0xd72: 0x403b, 0xd73: 0x403c, 0xd74: 0x4000, 0xd75: 0x4000, + 0xd76: 0x4000, 0xd77: 0x4000, 0xd78: 0x4000, 0xd79: 0x4000, 0xd7a: 0x4000, 0xd7b: 0x403d, + 0xd7c: 0x401c, 0xd7d: 0x4000, 0xd7e: 0x4000, 0xd7f: 0x4000, + // Block 0x36, offset 0xd80 + 0xd85: 0x4000, + 0xd86: 0x4000, 0xd87: 0x4000, 0xd88: 0x4000, 0xd89: 0x4000, 0xd8a: 0x4000, 0xd8b: 0x4000, + 0xd8c: 0x4000, 0xd8d: 0x4000, 0xd8e: 0x4000, 0xd8f: 0x4000, 0xd90: 0x4000, 0xd91: 0x4000, + 0xd92: 0x4000, 0xd93: 0x4000, 0xd94: 0x4000, 0xd95: 0x4000, 0xd96: 0x4000, 0xd97: 0x4000, + 0xd98: 0x4000, 0xd99: 0x4000, 0xd9a: 0x4000, 0xd9b: 0x4000, 0xd9c: 0x4000, 0xd9d: 0x4000, + 0xd9e: 0x4000, 0xd9f: 0x4000, 0xda0: 0x4000, 0xda1: 0x4000, 0xda2: 0x4000, 0xda3: 0x4000, + 0xda4: 0x4000, 0xda5: 0x4000, 0xda6: 0x4000, 0xda7: 0x4000, 0xda8: 0x4000, 0xda9: 0x4000, + 0xdaa: 0x4000, 0xdab: 0x4000, 0xdac: 0x4000, 0xdad: 0x4000, + 0xdb1: 0x403e, 0xdb2: 0x403e, 0xdb3: 0x403e, 0xdb4: 0x403e, 0xdb5: 0x403e, + 0xdb6: 0x403e, 0xdb7: 0x403e, 0xdb8: 0x403e, 0xdb9: 0x403e, 0xdba: 0x403e, 0xdbb: 0x403e, + 0xdbc: 0x403e, 0xdbd: 0x403e, 0xdbe: 0x403e, 0xdbf: 0x403e, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x4037, 0xdc1: 0x4037, 0xdc2: 0x4037, 0xdc3: 0x4037, 0xdc4: 0x4037, 0xdc5: 0x4037, + 0xdc6: 0x4037, 0xdc7: 0x4037, 0xdc8: 0x4037, 0xdc9: 0x4037, 0xdca: 0x4037, 0xdcb: 0x4037, + 0xdcc: 0x4037, 0xdcd: 0x4037, 0xdce: 0x4037, 0xdcf: 0x400e, 0xdd0: 0x403f, 0xdd1: 0x4040, + 0xdd2: 0x4041, 0xdd3: 0x4040, 0xdd4: 0x403f, 0xdd5: 0x4042, 0xdd6: 0x4043, 0xdd7: 0x4044, + 0xdd8: 0x4040, 0xdd9: 0x4041, 0xdda: 0x4040, 0xddb: 0x4045, 0xddc: 0x4009, 0xddd: 0x4045, + 0xdde: 0x4046, 0xddf: 0x4045, 0xde0: 0x4047, 0xde1: 0x400b, 0xde2: 0x400a, 0xde3: 0x400c, + 0xde4: 0x4048, 0xde5: 0x4000, 0xde6: 0x4000, 0xde7: 0x4000, 0xde8: 0x4000, 0xde9: 0x4000, + 0xdea: 0x4000, 0xdeb: 0x4000, 0xdec: 0x4000, 0xded: 0x4000, 0xdee: 0x4000, 0xdef: 0x4000, + 0xdf0: 0x4000, 0xdf1: 0x4000, 0xdf2: 0x4000, 0xdf3: 0x4000, 0xdf4: 0x4000, 0xdf5: 0x4000, + 0xdf6: 0x4000, 0xdf7: 0x4000, 0xdf8: 0x4000, 0xdf9: 0x4000, 0xdfa: 0x4000, 0xdfb: 0x4000, + 0xdfc: 0x4000, 0xdfd: 0x4000, 0xdfe: 0x4000, 0xdff: 0x4000, + // Block 0x38, offset 0xe00 + 0xe00: 0x4000, 0xe01: 0x4000, 0xe02: 0x4000, 0xe03: 0x4000, 0xe04: 0x4000, 0xe05: 0x4000, + 0xe06: 0x4000, 0xe07: 0x4000, 0xe08: 0x4000, 0xe09: 0x4000, 0xe0a: 0x4000, 0xe0b: 0x4000, + 0xe0c: 0x4000, 0xe0d: 0x4000, 0xe0e: 0x4000, 0xe10: 0x4000, 0xe11: 0x4000, + 0xe12: 0x4000, 0xe13: 0x4000, 0xe14: 0x4000, 0xe15: 0x4000, 0xe16: 0x4000, 0xe17: 0x4000, + 0xe18: 0x4000, 0xe19: 0x4000, 0xe1a: 0x4000, 0xe1b: 0x4000, 0xe1c: 0x4000, 0xe1d: 0x4000, + 0xe1e: 0x4000, 0xe1f: 0x4000, 0xe20: 0x4000, 0xe21: 0x4000, 0xe22: 0x4000, 0xe23: 0x4000, + 0xe24: 0x4000, 0xe25: 0x4000, 0xe26: 0x4000, 0xe27: 0x4000, 0xe28: 0x4000, 0xe29: 0x4000, + 0xe2a: 0x4000, 0xe2b: 0x4000, 0xe2c: 0x4000, 0xe2d: 0x4000, 0xe2e: 0x4000, 0xe2f: 0x4000, + 0xe30: 0x4000, 0xe31: 0x4000, 0xe32: 0x4000, 0xe33: 0x4000, 0xe34: 0x4000, 0xe35: 0x4000, + 0xe36: 0x4000, 0xe37: 0x4000, 0xe38: 0x4000, 0xe39: 0x4000, 0xe3a: 0x4000, + // Block 0x39, offset 0xe40 + 0xe40: 0x4000, 0xe41: 0x4000, 0xe42: 0x4000, 0xe43: 0x4000, 0xe44: 0x4000, 0xe45: 0x4000, + 0xe46: 0x4000, 0xe47: 0x4000, 0xe48: 0x4000, 0xe49: 0x4000, 0xe4a: 0x4000, 0xe4b: 0x4000, + 0xe4c: 0x4000, 0xe4d: 0x4000, 0xe4e: 0x4000, 0xe4f: 0x4000, 0xe50: 0x4000, 0xe51: 0x4000, + 0xe52: 0x4000, 0xe53: 0x4000, 0xe54: 0x4000, 0xe55: 0x4000, 0xe56: 0x4000, 0xe57: 0x4000, + 0xe58: 0x4000, 0xe59: 0x4000, 0xe5a: 0x4000, 0xe5b: 0x4000, 0xe5c: 0x4000, 0xe5d: 0x4000, + 0xe5e: 0x4000, 0xe5f: 0x4000, 0xe60: 0x4000, 0xe61: 0x4000, 0xe62: 0x4000, 0xe63: 0x4000, + 0xe70: 0x4000, 0xe71: 0x4000, 0xe72: 0x4000, 0xe73: 0x4000, 0xe74: 0x4000, 0xe75: 0x4000, + 0xe76: 0x4000, 0xe77: 0x4000, 0xe78: 0x4000, 0xe79: 0x4000, 0xe7a: 0x4000, 0xe7b: 0x4000, + 0xe7c: 0x4000, 0xe7d: 0x4000, 0xe7e: 0x4000, 0xe7f: 0x4000, + // Block 0x3a, offset 0xe80 + 0xe80: 0x4000, 0xe81: 0x4000, 0xe82: 0x4000, 0xe83: 0x4000, 0xe84: 0x4000, 0xe85: 0x4000, + 0xe86: 0x4000, 0xe87: 0x4000, 0xe88: 0x4000, 0xe89: 0x4000, 0xe8a: 0x4000, 0xe8b: 0x4000, + 0xe8c: 0x4000, 0xe8d: 0x4000, 0xe8e: 0x4000, 0xe8f: 0x4000, 0xe90: 0x4000, 0xe91: 0x4000, + 0xe92: 0x4000, 0xe93: 0x4000, 0xe94: 0x4000, 0xe95: 0x4000, 0xe96: 0x4000, 0xe97: 0x4000, + 0xe98: 0x4000, 0xe99: 0x4000, 0xe9a: 0x4000, 0xe9b: 0x4000, 0xe9c: 0x4000, 0xe9d: 0x4000, + 0xe9e: 0x4000, 0xea0: 0x4000, 0xea1: 0x4000, 0xea2: 0x4000, 0xea3: 0x4000, + 0xea4: 0x4000, 0xea5: 0x4000, 0xea6: 0x4000, 0xea7: 0x4000, 0xea8: 0x4000, 0xea9: 0x4000, + 0xeaa: 0x4000, 0xeab: 0x4000, 0xeac: 0x4000, 0xead: 0x4000, 0xeae: 0x4000, 0xeaf: 0x4000, + 0xeb0: 0x4000, 0xeb1: 0x4000, 0xeb2: 0x4000, 0xeb3: 0x4000, 0xeb4: 0x4000, 0xeb5: 0x4000, + 0xeb6: 0x4000, 0xeb7: 0x4000, 0xeb8: 0x4000, 0xeb9: 0x4000, 0xeba: 0x4000, 0xebb: 0x4000, + 0xebc: 0x4000, 0xebd: 0x4000, 0xebe: 0x4000, 0xebf: 0x4000, + // Block 0x3b, offset 0xec0 + 0xec0: 0x4000, 0xec1: 0x4000, 0xec2: 0x4000, 0xec3: 0x4000, 0xec4: 0x4000, 0xec5: 0x4000, + 0xec6: 0x4000, 0xec7: 0x4000, 0xec8: 0x2000, 0xec9: 0x2000, 0xeca: 0x2000, 0xecb: 0x2000, + 0xecc: 0x2000, 0xecd: 0x2000, 0xece: 0x2000, 0xecf: 0x2000, 0xed0: 0x4000, 0xed1: 0x4000, + 0xed2: 0x4000, 0xed3: 0x4000, 0xed4: 0x4000, 0xed5: 0x4000, 0xed6: 0x4000, 0xed7: 0x4000, + 0xed8: 0x4000, 0xed9: 0x4000, 0xeda: 0x4000, 0xedb: 0x4000, 0xedc: 0x4000, 0xedd: 0x4000, + 0xede: 0x4000, 0xedf: 0x4000, 0xee0: 0x4000, 0xee1: 0x4000, 0xee2: 0x4000, 0xee3: 0x4000, + 0xee4: 0x4000, 0xee5: 0x4000, 0xee6: 0x4000, 0xee7: 0x4000, 0xee8: 0x4000, 0xee9: 0x4000, + 0xeea: 0x4000, 0xeeb: 0x4000, 0xeec: 0x4000, 0xeed: 0x4000, 0xeee: 0x4000, 0xeef: 0x4000, + 0xef0: 0x4000, 0xef1: 0x4000, 0xef2: 0x4000, 0xef3: 0x4000, 0xef4: 0x4000, 0xef5: 0x4000, + 0xef6: 0x4000, 0xef7: 0x4000, 0xef8: 0x4000, 0xef9: 0x4000, 0xefa: 0x4000, 0xefb: 0x4000, + 0xefc: 0x4000, 0xefd: 0x4000, 0xefe: 0x4000, 0xeff: 0x4000, + // Block 0x3c, offset 0xf00 + 0xf00: 0x4000, 0xf01: 0x4000, 0xf02: 0x4000, 0xf03: 0x4000, 0xf04: 0x4000, 0xf05: 0x4000, + 0xf06: 0x4000, 0xf07: 0x4000, 0xf08: 0x4000, 0xf09: 0x4000, 0xf0a: 0x4000, 0xf0b: 0x4000, + 0xf0c: 0x4000, 0xf0d: 0x4000, 0xf0e: 0x4000, 0xf0f: 0x4000, 0xf10: 0x4000, 0xf11: 0x4000, + 0xf12: 0x4000, 0xf13: 0x4000, 0xf14: 0x4000, 0xf15: 0x4000, 0xf16: 0x4000, 0xf17: 0x4000, + 0xf18: 0x4000, 0xf19: 0x4000, 0xf1a: 0x4000, 0xf1b: 0x4000, 0xf1c: 0x4000, 0xf1d: 0x4000, + 0xf1e: 0x4000, 0xf1f: 0x4000, 0xf20: 0x4000, 0xf21: 0x4000, 0xf22: 0x4000, 0xf23: 0x4000, + 0xf24: 0x4000, 0xf25: 0x4000, 0xf26: 0x4000, 0xf27: 0x4000, 0xf28: 0x4000, 0xf29: 0x4000, + 0xf2a: 0x4000, 0xf2b: 0x4000, 0xf2c: 0x4000, 0xf2d: 0x4000, 0xf2e: 0x4000, 0xf2f: 0x4000, + 0xf30: 0x4000, 0xf31: 0x4000, 0xf32: 0x4000, 0xf33: 0x4000, 0xf34: 0x4000, 0xf35: 0x4000, + 0xf36: 0x4000, 0xf37: 0x4000, 0xf38: 0x4000, 0xf39: 0x4000, 0xf3a: 0x4000, 0xf3b: 0x4000, + 0xf3c: 0x4000, 0xf3d: 0x4000, 0xf3e: 0x4000, + // Block 0x3d, offset 0xf40 + 0xf40: 0x4000, 0xf41: 0x4000, 0xf42: 0x4000, 0xf43: 0x4000, 0xf44: 0x4000, 0xf45: 0x4000, + 0xf46: 0x4000, 0xf47: 0x4000, 0xf48: 0x4000, 0xf49: 0x4000, 0xf4a: 0x4000, 0xf4b: 0x4000, + 0xf4c: 0x4000, 0xf50: 0x4000, 0xf51: 0x4000, + 0xf52: 0x4000, 0xf53: 0x4000, 0xf54: 0x4000, 0xf55: 0x4000, 0xf56: 0x4000, 0xf57: 0x4000, + 0xf58: 0x4000, 0xf59: 0x4000, 0xf5a: 0x4000, 0xf5b: 0x4000, 0xf5c: 0x4000, 0xf5d: 0x4000, + 0xf5e: 0x4000, 0xf5f: 0x4000, 0xf60: 0x4000, 0xf61: 0x4000, 0xf62: 0x4000, 0xf63: 0x4000, + 0xf64: 0x4000, 0xf65: 0x4000, 0xf66: 0x4000, 0xf67: 0x4000, 0xf68: 0x4000, 0xf69: 0x4000, + 0xf6a: 0x4000, 0xf6b: 0x4000, 0xf6c: 0x4000, 0xf6d: 0x4000, 0xf6e: 0x4000, 0xf6f: 0x4000, + 0xf70: 0x4000, 0xf71: 0x4000, 0xf72: 0x4000, 0xf73: 0x4000, 0xf74: 0x4000, 0xf75: 0x4000, + 0xf76: 0x4000, 0xf77: 0x4000, 0xf78: 0x4000, 0xf79: 0x4000, 0xf7a: 0x4000, 0xf7b: 0x4000, + 0xf7c: 0x4000, 0xf7d: 0x4000, 0xf7e: 0x4000, 0xf7f: 0x4000, + // Block 0x3e, offset 0xf80 + 0xf80: 0x4000, 0xf81: 0x4000, 0xf82: 0x4000, 0xf83: 0x4000, 0xf84: 0x4000, 0xf85: 0x4000, + 0xf86: 0x4000, + // Block 0x3f, offset 0xfc0 + 0xfe0: 0x4000, 0xfe1: 0x4000, 0xfe2: 0x4000, 0xfe3: 0x4000, + 0xfe4: 0x4000, 0xfe5: 0x4000, 0xfe6: 0x4000, 0xfe7: 0x4000, 0xfe8: 0x4000, 0xfe9: 0x4000, + 0xfea: 0x4000, 0xfeb: 0x4000, 0xfec: 0x4000, 0xfed: 0x4000, 0xfee: 0x4000, 0xfef: 0x4000, + 0xff0: 0x4000, 0xff1: 0x4000, 0xff2: 0x4000, 0xff3: 0x4000, 0xff4: 0x4000, 0xff5: 0x4000, + 0xff6: 0x4000, 0xff7: 0x4000, 0xff8: 0x4000, 0xff9: 0x4000, 0xffa: 0x4000, 0xffb: 0x4000, + 0xffc: 0x4000, + // Block 0x40, offset 0x1000 + 0x1000: 0x4000, 0x1001: 0x4000, 0x1002: 0x4000, 0x1003: 0x4000, 0x1004: 0x4000, 0x1005: 0x4000, + 0x1006: 0x4000, 0x1007: 0x4000, 0x1008: 0x4000, 0x1009: 0x4000, 0x100a: 0x4000, 0x100b: 0x4000, + 0x100c: 0x4000, 0x100d: 0x4000, 0x100e: 0x4000, 0x100f: 0x4000, 0x1010: 0x4000, 0x1011: 0x4000, + 0x1012: 0x4000, 0x1013: 0x4000, 0x1014: 0x4000, 0x1015: 0x4000, 0x1016: 0x4000, 0x1017: 0x4000, + 0x1018: 0x4000, 0x1019: 0x4000, 0x101a: 0x4000, 0x101b: 0x4000, 0x101c: 0x4000, 0x101d: 0x4000, + 0x101e: 0x4000, 0x101f: 0x4000, 0x1020: 0x4000, 0x1021: 0x4000, 0x1022: 0x4000, 0x1023: 0x4000, + // Block 0x41, offset 0x1040 + 0x1040: 0x2000, 0x1041: 0x2000, 0x1042: 0x2000, 0x1043: 0x2000, 0x1044: 0x2000, 0x1045: 0x2000, + 0x1046: 0x2000, 0x1047: 0x2000, 0x1048: 0x2000, 0x1049: 0x2000, 0x104a: 0x2000, 0x104b: 0x2000, + 0x104c: 0x2000, 0x104d: 0x2000, 0x104e: 0x2000, 0x104f: 0x2000, 0x1050: 0x4000, 0x1051: 0x4000, + 0x1052: 0x4000, 0x1053: 0x4000, 0x1054: 0x4000, 0x1055: 0x4000, 0x1056: 0x4000, 0x1057: 0x4000, + 0x1058: 0x4000, 0x1059: 0x4000, + 0x1070: 0x4000, 0x1071: 0x4000, 0x1072: 0x4000, 0x1073: 0x4000, 0x1074: 0x4000, 0x1075: 0x4000, + 0x1076: 0x4000, 0x1077: 0x4000, 0x1078: 0x4000, 0x1079: 0x4000, 0x107a: 0x4000, 0x107b: 0x4000, + 0x107c: 0x4000, 0x107d: 0x4000, 0x107e: 0x4000, 0x107f: 0x4000, + // Block 0x42, offset 0x1080 + 0x1080: 0x4000, 0x1081: 0x4000, 0x1082: 0x4000, 0x1083: 0x4000, 0x1084: 0x4000, 0x1085: 0x4000, + 0x1086: 0x4000, 0x1087: 0x4000, 0x1088: 0x4000, 0x1089: 0x4000, 0x108a: 0x4000, 0x108b: 0x4000, + 0x108c: 0x4000, 0x108d: 0x4000, 0x108e: 0x4000, 0x108f: 0x4000, 0x1090: 0x4000, 0x1091: 0x4000, + 0x1092: 0x4000, 0x1094: 0x4000, 0x1095: 0x4000, 0x1096: 0x4000, 0x1097: 0x4000, + 0x1098: 0x4000, 0x1099: 0x4000, 0x109a: 0x4000, 0x109b: 0x4000, 0x109c: 0x4000, 0x109d: 0x4000, + 0x109e: 0x4000, 0x109f: 0x4000, 0x10a0: 0x4000, 0x10a1: 0x4000, 0x10a2: 0x4000, 0x10a3: 0x4000, + 0x10a4: 0x4000, 0x10a5: 0x4000, 0x10a6: 0x4000, 0x10a8: 0x4000, 0x10a9: 0x4000, + 0x10aa: 0x4000, 0x10ab: 0x4000, + // Block 0x43, offset 0x10c0 + 0x10c1: 0x9012, 0x10c2: 0x9012, 0x10c3: 0x9012, 0x10c4: 0x9012, 0x10c5: 0x9012, + 0x10c6: 0x9012, 0x10c7: 0x9012, 0x10c8: 0x9012, 0x10c9: 0x9012, 0x10ca: 0x9012, 0x10cb: 0x9012, + 0x10cc: 0x9012, 0x10cd: 0x9012, 0x10ce: 0x9012, 0x10cf: 0x9012, 0x10d0: 0x9012, 0x10d1: 0x9012, + 0x10d2: 0x9012, 0x10d3: 0x9012, 0x10d4: 0x9012, 0x10d5: 0x9012, 0x10d6: 0x9012, 0x10d7: 0x9012, + 0x10d8: 0x9012, 0x10d9: 0x9012, 0x10da: 0x9012, 0x10db: 0x9012, 0x10dc: 0x9012, 0x10dd: 0x9012, + 0x10de: 0x9012, 0x10df: 0x9012, 0x10e0: 0x9049, 0x10e1: 0x9049, 0x10e2: 0x9049, 0x10e3: 0x9049, + 0x10e4: 0x9049, 0x10e5: 0x9049, 0x10e6: 0x9049, 0x10e7: 0x9049, 0x10e8: 0x9049, 0x10e9: 0x9049, + 0x10ea: 0x9049, 0x10eb: 0x9049, 0x10ec: 0x9049, 0x10ed: 0x9049, 0x10ee: 0x9049, 0x10ef: 0x9049, + 0x10f0: 0x9049, 0x10f1: 0x9049, 0x10f2: 0x9049, 0x10f3: 0x9049, 0x10f4: 0x9049, 0x10f5: 0x9049, + 0x10f6: 0x9049, 0x10f7: 0x9049, 0x10f8: 0x9049, 0x10f9: 0x9049, 0x10fa: 0x9049, 0x10fb: 0x9049, + 0x10fc: 0x9049, 0x10fd: 0x9049, 0x10fe: 0x9049, 0x10ff: 0x9049, + // Block 0x44, offset 0x1100 + 0x1100: 0x9049, 0x1101: 0x9049, 0x1102: 0x9049, 0x1103: 0x9049, 0x1104: 0x9049, 0x1105: 0x9049, + 0x1106: 0x9049, 0x1107: 0x9049, 0x1108: 0x9049, 0x1109: 0x9049, 0x110a: 0x9049, 0x110b: 0x9049, + 0x110c: 0x9049, 0x110d: 0x9049, 0x110e: 0x9049, 0x110f: 0x9049, 0x1110: 0x9049, 0x1111: 0x9049, + 0x1112: 0x9049, 0x1113: 0x9049, 0x1114: 0x9049, 0x1115: 0x9049, 0x1116: 0x9049, 0x1117: 0x9049, + 0x1118: 0x9049, 0x1119: 0x9049, 0x111a: 0x9049, 0x111b: 0x9049, 0x111c: 0x9049, 0x111d: 0x9049, + 0x111e: 0x9049, 0x111f: 0x904a, 0x1120: 0x904b, 0x1121: 0xb04c, 0x1122: 0xb04d, 0x1123: 0xb04d, + 0x1124: 0xb04e, 0x1125: 0xb04f, 0x1126: 0xb050, 0x1127: 0xb051, 0x1128: 0xb052, 0x1129: 0xb053, + 0x112a: 0xb054, 0x112b: 0xb055, 0x112c: 0xb056, 0x112d: 0xb057, 0x112e: 0xb058, 0x112f: 0xb059, + 0x1130: 0xb05a, 0x1131: 0xb05b, 0x1132: 0xb05c, 0x1133: 0xb05d, 0x1134: 0xb05e, 0x1135: 0xb05f, + 0x1136: 0xb060, 0x1137: 0xb061, 0x1138: 0xb062, 0x1139: 0xb063, 0x113a: 0xb064, 0x113b: 0xb065, + 0x113c: 0xb052, 0x113d: 0xb066, 0x113e: 0xb067, 0x113f: 0xb055, + // Block 0x45, offset 0x1140 + 0x1140: 0xb068, 0x1141: 0xb069, 0x1142: 0xb06a, 0x1143: 0xb06b, 0x1144: 0xb05a, 0x1145: 0xb056, + 0x1146: 0xb06c, 0x1147: 0xb06d, 0x1148: 0xb06b, 0x1149: 0xb06e, 0x114a: 0xb06b, 0x114b: 0xb06f, + 0x114c: 0xb06f, 0x114d: 0xb070, 0x114e: 0xb070, 0x114f: 0xb071, 0x1150: 0xb056, 0x1151: 0xb072, + 0x1152: 0xb073, 0x1153: 0xb072, 0x1154: 0xb074, 0x1155: 0xb073, 0x1156: 0xb075, 0x1157: 0xb075, + 0x1158: 0xb076, 0x1159: 0xb076, 0x115a: 0xb077, 0x115b: 0xb077, 0x115c: 0xb073, 0x115d: 0xb078, + 0x115e: 0xb079, 0x115f: 0xb067, 0x1160: 0xb07a, 0x1161: 0xb07b, 0x1162: 0xb07b, 0x1163: 0xb07b, + 0x1164: 0xb07b, 0x1165: 0xb07b, 0x1166: 0xb07b, 0x1167: 0xb07b, 0x1168: 0xb07b, 0x1169: 0xb07b, + 0x116a: 0xb07b, 0x116b: 0xb07b, 0x116c: 0xb07b, 0x116d: 0xb07b, 0x116e: 0xb07b, 0x116f: 0xb07b, + 0x1170: 0xb07c, 0x1171: 0xb07c, 0x1172: 0xb07c, 0x1173: 0xb07c, 0x1174: 0xb07c, 0x1175: 0xb07c, + 0x1176: 0xb07c, 0x1177: 0xb07c, 0x1178: 0xb07c, 0x1179: 0xb07c, 0x117a: 0xb07c, 0x117b: 0xb07c, + 0x117c: 0xb07c, 0x117d: 0xb07c, 0x117e: 0xb07c, + // Block 0x46, offset 0x1180 + 0x1182: 0xb07d, 0x1183: 0xb07e, 0x1184: 0xb07f, 0x1185: 0xb080, + 0x1186: 0xb07f, 0x1187: 0xb07e, 0x118a: 0xb081, 0x118b: 0xb082, + 0x118c: 0xb083, 0x118d: 0xb07f, 0x118e: 0xb080, 0x118f: 0xb07f, + 0x1192: 0xb084, 0x1193: 0xb085, 0x1194: 0xb084, 0x1195: 0xb086, 0x1196: 0xb084, 0x1197: 0xb087, + 0x119a: 0xb088, 0x119b: 0xb089, 0x119c: 0xb08a, + 0x11a0: 0x908b, 0x11a1: 0x908b, 0x11a2: 0x908c, 0x11a3: 0x908d, + 0x11a4: 0x908b, 0x11a5: 0x908e, 0x11a6: 0x908f, 0x11a8: 0xb090, 0x11a9: 0xb091, + 0x11aa: 0xb092, 0x11ab: 0xb091, 0x11ac: 0xb093, 0x11ad: 0xb094, 0x11ae: 0xb095, + 0x11bd: 0x2000, + // Block 0x47, offset 0x11c0 + 0x11e0: 0x4000, + // Block 0x48, offset 0x1200 + 0x1200: 0x4000, 0x1201: 0x4000, 0x1202: 0x4000, 0x1203: 0x4000, 0x1204: 0x4000, 0x1205: 0x4000, + 0x1206: 0x4000, 0x1207: 0x4000, 0x1208: 0x4000, 0x1209: 0x4000, 0x120a: 0x4000, 0x120b: 0x4000, + 0x120c: 0x4000, 0x120d: 0x4000, 0x120e: 0x4000, 0x120f: 0x4000, 0x1210: 0x4000, 0x1211: 0x4000, + 0x1212: 0x4000, 0x1213: 0x4000, 0x1214: 0x4000, 0x1215: 0x4000, 0x1216: 0x4000, 0x1217: 0x4000, + 0x1218: 0x4000, 0x1219: 0x4000, 0x121a: 0x4000, 0x121b: 0x4000, 0x121c: 0x4000, 0x121d: 0x4000, + 0x121e: 0x4000, 0x121f: 0x4000, 0x1220: 0x4000, 0x1221: 0x4000, 0x1222: 0x4000, 0x1223: 0x4000, + 0x1224: 0x4000, 0x1225: 0x4000, 0x1226: 0x4000, 0x1227: 0x4000, 0x1228: 0x4000, 0x1229: 0x4000, + 0x122a: 0x4000, 0x122b: 0x4000, 0x122c: 0x4000, + // Block 0x49, offset 0x1240 + 0x1240: 0x4000, 0x1241: 0x4000, 0x1242: 0x4000, 0x1243: 0x4000, 0x1244: 0x4000, 0x1245: 0x4000, + 0x1246: 0x4000, 0x1247: 0x4000, 0x1248: 0x4000, 0x1249: 0x4000, 0x124a: 0x4000, 0x124b: 0x4000, + 0x124c: 0x4000, 0x124d: 0x4000, 0x124e: 0x4000, 0x124f: 0x4000, 0x1250: 0x4000, 0x1251: 0x4000, + 0x1252: 0x4000, 0x1253: 0x4000, 0x1254: 0x4000, 0x1255: 0x4000, 0x1256: 0x4000, 0x1257: 0x4000, + 0x1258: 0x4000, 0x1259: 0x4000, 0x125a: 0x4000, 0x125b: 0x4000, 0x125c: 0x4000, 0x125d: 0x4000, + 0x125e: 0x4000, 0x125f: 0x4000, 0x1260: 0x4000, 0x1261: 0x4000, 0x1262: 0x4000, 0x1263: 0x4000, + 0x1264: 0x4000, 0x1265: 0x4000, 0x1266: 0x4000, 0x1267: 0x4000, 0x1268: 0x4000, 0x1269: 0x4000, + 0x126a: 0x4000, 0x126b: 0x4000, 0x126c: 0x4000, 0x126d: 0x4000, 0x126e: 0x4000, 0x126f: 0x4000, + 0x1270: 0x4000, 0x1271: 0x4000, 0x1272: 0x4000, + // Block 0x4a, offset 0x1280 + 0x1280: 0x4000, 0x1281: 0x4000, + // Block 0x4b, offset 0x12c0 + 0x12c4: 0x4000, + // Block 0x4c, offset 0x1300 + 0x130f: 0x4000, + // Block 0x4d, offset 0x1340 + 0x1340: 0x2000, 0x1341: 0x2000, 0x1342: 0x2000, 0x1343: 0x2000, 0x1344: 0x2000, 0x1345: 0x2000, + 0x1346: 0x2000, 0x1347: 0x2000, 0x1348: 0x2000, 0x1349: 0x2000, 0x134a: 0x2000, + 0x1350: 0x2000, 0x1351: 0x2000, + 0x1352: 0x2000, 0x1353: 0x2000, 0x1354: 0x2000, 0x1355: 0x2000, 0x1356: 0x2000, 0x1357: 0x2000, + 0x1358: 0x2000, 0x1359: 0x2000, 0x135a: 0x2000, 0x135b: 0x2000, 0x135c: 0x2000, 0x135d: 0x2000, + 0x135e: 0x2000, 0x135f: 0x2000, 0x1360: 0x2000, 0x1361: 0x2000, 0x1362: 0x2000, 0x1363: 0x2000, + 0x1364: 0x2000, 0x1365: 0x2000, 0x1366: 0x2000, 0x1367: 0x2000, 0x1368: 0x2000, 0x1369: 0x2000, + 0x136a: 0x2000, 0x136b: 0x2000, 0x136c: 0x2000, 0x136d: 0x2000, + 0x1370: 0x2000, 0x1371: 0x2000, 0x1372: 0x2000, 0x1373: 0x2000, 0x1374: 0x2000, 0x1375: 0x2000, + 0x1376: 0x2000, 0x1377: 0x2000, 0x1378: 0x2000, 0x1379: 0x2000, 0x137a: 0x2000, 0x137b: 0x2000, + 0x137c: 0x2000, 0x137d: 0x2000, 0x137e: 0x2000, 0x137f: 0x2000, + // Block 0x4e, offset 0x1380 + 0x1380: 0x2000, 0x1381: 0x2000, 0x1382: 0x2000, 0x1383: 0x2000, 0x1384: 0x2000, 0x1385: 0x2000, + 0x1386: 0x2000, 0x1387: 0x2000, 0x1388: 0x2000, 0x1389: 0x2000, 0x138a: 0x2000, 0x138b: 0x2000, + 0x138c: 0x2000, 0x138d: 0x2000, 0x138e: 0x2000, 0x138f: 0x2000, 0x1390: 0x2000, 0x1391: 0x2000, + 0x1392: 0x2000, 0x1393: 0x2000, 0x1394: 0x2000, 0x1395: 0x2000, 0x1396: 0x2000, 0x1397: 0x2000, + 0x1398: 0x2000, 0x1399: 0x2000, 0x139a: 0x2000, 0x139b: 0x2000, 0x139c: 0x2000, 0x139d: 0x2000, + 0x139e: 0x2000, 0x139f: 0x2000, 0x13a0: 0x2000, 0x13a1: 0x2000, 0x13a2: 0x2000, 0x13a3: 0x2000, + 0x13a4: 0x2000, 0x13a5: 0x2000, 0x13a6: 0x2000, 0x13a7: 0x2000, 0x13a8: 0x2000, 0x13a9: 0x2000, + 0x13b0: 0x2000, 0x13b1: 0x2000, 0x13b2: 0x2000, 0x13b3: 0x2000, 0x13b4: 0x2000, 0x13b5: 0x2000, + 0x13b6: 0x2000, 0x13b7: 0x2000, 0x13b8: 0x2000, 0x13b9: 0x2000, 0x13ba: 0x2000, 0x13bb: 0x2000, + 0x13bc: 0x2000, 0x13bd: 0x2000, 0x13be: 0x2000, 0x13bf: 0x2000, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x2000, 0x13c1: 0x2000, 0x13c2: 0x2000, 0x13c3: 0x2000, 0x13c4: 0x2000, 0x13c5: 0x2000, + 0x13c6: 0x2000, 0x13c7: 0x2000, 0x13c8: 0x2000, 0x13c9: 0x2000, 0x13ca: 0x2000, 0x13cb: 0x2000, + 0x13cc: 0x2000, 0x13cd: 0x2000, 0x13ce: 0x4000, 0x13cf: 0x2000, 0x13d0: 0x2000, 0x13d1: 0x4000, + 0x13d2: 0x4000, 0x13d3: 0x4000, 0x13d4: 0x4000, 0x13d5: 0x4000, 0x13d6: 0x4000, 0x13d7: 0x4000, + 0x13d8: 0x4000, 0x13d9: 0x4000, 0x13da: 0x4000, 0x13db: 0x2000, 0x13dc: 0x2000, 0x13dd: 0x2000, + 0x13de: 0x2000, 0x13df: 0x2000, 0x13e0: 0x2000, 0x13e1: 0x2000, 0x13e2: 0x2000, 0x13e3: 0x2000, + 0x13e4: 0x2000, 0x13e5: 0x2000, 0x13e6: 0x2000, 0x13e7: 0x2000, 0x13e8: 0x2000, 0x13e9: 0x2000, + 0x13ea: 0x2000, 0x13eb: 0x2000, 0x13ec: 0x2000, + // Block 0x50, offset 0x1400 + 0x1400: 0x4000, 0x1401: 0x4000, 0x1402: 0x4000, + 0x1410: 0x4000, 0x1411: 0x4000, + 0x1412: 0x4000, 0x1413: 0x4000, 0x1414: 0x4000, 0x1415: 0x4000, 0x1416: 0x4000, 0x1417: 0x4000, + 0x1418: 0x4000, 0x1419: 0x4000, 0x141a: 0x4000, 0x141b: 0x4000, 0x141c: 0x4000, 0x141d: 0x4000, + 0x141e: 0x4000, 0x141f: 0x4000, 0x1420: 0x4000, 0x1421: 0x4000, 0x1422: 0x4000, 0x1423: 0x4000, + 0x1424: 0x4000, 0x1425: 0x4000, 0x1426: 0x4000, 0x1427: 0x4000, 0x1428: 0x4000, 0x1429: 0x4000, + 0x142a: 0x4000, 0x142b: 0x4000, 0x142c: 0x4000, 0x142d: 0x4000, 0x142e: 0x4000, 0x142f: 0x4000, + 0x1430: 0x4000, 0x1431: 0x4000, 0x1432: 0x4000, 0x1433: 0x4000, 0x1434: 0x4000, 0x1435: 0x4000, + 0x1436: 0x4000, 0x1437: 0x4000, 0x1438: 0x4000, 0x1439: 0x4000, 0x143a: 0x4000, 0x143b: 0x4000, + // Block 0x51, offset 0x1440 + 0x1440: 0x4000, 0x1441: 0x4000, 0x1442: 0x4000, 0x1443: 0x4000, 0x1444: 0x4000, 0x1445: 0x4000, + 0x1446: 0x4000, 0x1447: 0x4000, 0x1448: 0x4000, + 0x1450: 0x4000, 0x1451: 0x4000, + // Block 0x52, offset 0x1480 + 0x1480: 0x4000, 0x1481: 0x4000, 0x1482: 0x4000, 0x1483: 0x4000, 0x1484: 0x4000, 0x1485: 0x4000, + 0x1486: 0x4000, 0x1487: 0x4000, 0x1488: 0x4000, 0x1489: 0x4000, 0x148a: 0x4000, 0x148b: 0x4000, + 0x148c: 0x4000, 0x148d: 0x4000, 0x148e: 0x4000, 0x148f: 0x4000, 0x1490: 0x4000, 0x1491: 0x4000, + 0x1492: 0x4000, 0x1493: 0x4000, 0x1494: 0x4000, 0x1495: 0x4000, 0x1496: 0x4000, 0x1497: 0x4000, + 0x1498: 0x4000, 0x1499: 0x4000, 0x149a: 0x4000, 0x149b: 0x4000, 0x149c: 0x4000, 0x149d: 0x4000, + 0x149e: 0x4000, 0x149f: 0x4000, 0x14a0: 0x4000, + 0x14ad: 0x4000, 0x14ae: 0x4000, 0x14af: 0x4000, + 0x14b0: 0x4000, 0x14b1: 0x4000, 0x14b2: 0x4000, 0x14b3: 0x4000, 0x14b4: 0x4000, 0x14b5: 0x4000, + 0x14b7: 0x4000, 0x14b8: 0x4000, 0x14b9: 0x4000, 0x14ba: 0x4000, 0x14bb: 0x4000, + 0x14bc: 0x4000, 0x14bd: 0x4000, 0x14be: 0x4000, 0x14bf: 0x4000, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x4000, 0x14c1: 0x4000, 0x14c2: 0x4000, 0x14c3: 0x4000, 0x14c4: 0x4000, 0x14c5: 0x4000, + 0x14c6: 0x4000, 0x14c7: 0x4000, 0x14c8: 0x4000, 0x14c9: 0x4000, 0x14ca: 0x4000, 0x14cb: 0x4000, + 0x14cc: 0x4000, 0x14cd: 0x4000, 0x14ce: 0x4000, 0x14cf: 0x4000, 0x14d0: 0x4000, 0x14d1: 0x4000, + 0x14d2: 0x4000, 0x14d3: 0x4000, 0x14d4: 0x4000, 0x14d5: 0x4000, 0x14d6: 0x4000, 0x14d7: 0x4000, + 0x14d8: 0x4000, 0x14d9: 0x4000, 0x14da: 0x4000, 0x14db: 0x4000, 0x14dc: 0x4000, 0x14dd: 0x4000, + 0x14de: 0x4000, 0x14df: 0x4000, 0x14e0: 0x4000, 0x14e1: 0x4000, 0x14e2: 0x4000, 0x14e3: 0x4000, + 0x14e4: 0x4000, 0x14e5: 0x4000, 0x14e6: 0x4000, 0x14e7: 0x4000, 0x14e8: 0x4000, 0x14e9: 0x4000, + 0x14ea: 0x4000, 0x14eb: 0x4000, 0x14ec: 0x4000, 0x14ed: 0x4000, 0x14ee: 0x4000, 0x14ef: 0x4000, + 0x14f0: 0x4000, 0x14f1: 0x4000, 0x14f2: 0x4000, 0x14f3: 0x4000, 0x14f4: 0x4000, 0x14f5: 0x4000, + 0x14f6: 0x4000, 0x14f7: 0x4000, 0x14f8: 0x4000, 0x14f9: 0x4000, 0x14fa: 0x4000, 0x14fb: 0x4000, + 0x14fc: 0x4000, 0x14fe: 0x4000, 0x14ff: 0x4000, + // Block 0x54, offset 0x1500 + 0x1500: 0x4000, 0x1501: 0x4000, 0x1502: 0x4000, 0x1503: 0x4000, 0x1504: 0x4000, 0x1505: 0x4000, + 0x1506: 0x4000, 0x1507: 0x4000, 0x1508: 0x4000, 0x1509: 0x4000, 0x150a: 0x4000, 0x150b: 0x4000, + 0x150c: 0x4000, 0x150d: 0x4000, 0x150e: 0x4000, 0x150f: 0x4000, 0x1510: 0x4000, 0x1511: 0x4000, + 0x1512: 0x4000, 0x1513: 0x4000, + 0x1520: 0x4000, 0x1521: 0x4000, 0x1522: 0x4000, 0x1523: 0x4000, + 0x1524: 0x4000, 0x1525: 0x4000, 0x1526: 0x4000, 0x1527: 0x4000, 0x1528: 0x4000, 0x1529: 0x4000, + 0x152a: 0x4000, 0x152b: 0x4000, 0x152c: 0x4000, 0x152d: 0x4000, 0x152e: 0x4000, 0x152f: 0x4000, + 0x1530: 0x4000, 0x1531: 0x4000, 0x1532: 0x4000, 0x1533: 0x4000, 0x1534: 0x4000, 0x1535: 0x4000, + 0x1536: 0x4000, 0x1537: 0x4000, 0x1538: 0x4000, 0x1539: 0x4000, 0x153a: 0x4000, 0x153b: 0x4000, + 0x153c: 0x4000, 0x153d: 0x4000, 0x153e: 0x4000, 0x153f: 0x4000, + // Block 0x55, offset 0x1540 + 0x1540: 0x4000, 0x1541: 0x4000, 0x1542: 0x4000, 0x1543: 0x4000, 0x1544: 0x4000, 0x1545: 0x4000, + 0x1546: 0x4000, 0x1547: 0x4000, 0x1548: 0x4000, 0x1549: 0x4000, 0x154a: 0x4000, + 0x154f: 0x4000, 0x1550: 0x4000, 0x1551: 0x4000, + 0x1552: 0x4000, 0x1553: 0x4000, + 0x1560: 0x4000, 0x1561: 0x4000, 0x1562: 0x4000, 0x1563: 0x4000, + 0x1564: 0x4000, 0x1565: 0x4000, 0x1566: 0x4000, 0x1567: 0x4000, 0x1568: 0x4000, 0x1569: 0x4000, + 0x156a: 0x4000, 0x156b: 0x4000, 0x156c: 0x4000, 0x156d: 0x4000, 0x156e: 0x4000, 0x156f: 0x4000, + 0x1570: 0x4000, 0x1574: 0x4000, + 0x1578: 0x4000, 0x1579: 0x4000, 0x157a: 0x4000, 0x157b: 0x4000, + 0x157c: 0x4000, 0x157d: 0x4000, 0x157e: 0x4000, 0x157f: 0x4000, + // Block 0x56, offset 0x1580 + 0x1580: 0x4000, 0x1582: 0x4000, 0x1583: 0x4000, 0x1584: 0x4000, 0x1585: 0x4000, + 0x1586: 0x4000, 0x1587: 0x4000, 0x1588: 0x4000, 0x1589: 0x4000, 0x158a: 0x4000, 0x158b: 0x4000, + 0x158c: 0x4000, 0x158d: 0x4000, 0x158e: 0x4000, 0x158f: 0x4000, 0x1590: 0x4000, 0x1591: 0x4000, + 0x1592: 0x4000, 0x1593: 0x4000, 0x1594: 0x4000, 0x1595: 0x4000, 0x1596: 0x4000, 0x1597: 0x4000, + 0x1598: 0x4000, 0x1599: 0x4000, 0x159a: 0x4000, 0x159b: 0x4000, 0x159c: 0x4000, 0x159d: 0x4000, + 0x159e: 0x4000, 0x159f: 0x4000, 0x15a0: 0x4000, 0x15a1: 0x4000, 0x15a2: 0x4000, 0x15a3: 0x4000, + 0x15a4: 0x4000, 0x15a5: 0x4000, 0x15a6: 0x4000, 0x15a7: 0x4000, 0x15a8: 0x4000, 0x15a9: 0x4000, + 0x15aa: 0x4000, 0x15ab: 0x4000, 0x15ac: 0x4000, 0x15ad: 0x4000, 0x15ae: 0x4000, 0x15af: 0x4000, + 0x15b0: 0x4000, 0x15b1: 0x4000, 0x15b2: 0x4000, 0x15b3: 0x4000, 0x15b4: 0x4000, 0x15b5: 0x4000, + 0x15b6: 0x4000, 0x15b7: 0x4000, 0x15b8: 0x4000, 0x15b9: 0x4000, 0x15ba: 0x4000, 0x15bb: 0x4000, + 0x15bc: 0x4000, 0x15bd: 0x4000, 0x15be: 0x4000, 0x15bf: 0x4000, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x4000, 0x15c1: 0x4000, 0x15c2: 0x4000, 0x15c3: 0x4000, 0x15c4: 0x4000, 0x15c5: 0x4000, + 0x15c6: 0x4000, 0x15c7: 0x4000, 0x15c8: 0x4000, 0x15c9: 0x4000, 0x15ca: 0x4000, 0x15cb: 0x4000, + 0x15cc: 0x4000, 0x15cd: 0x4000, 0x15ce: 0x4000, 0x15cf: 0x4000, 0x15d0: 0x4000, 0x15d1: 0x4000, + 0x15d2: 0x4000, 0x15d3: 0x4000, 0x15d4: 0x4000, 0x15d5: 0x4000, 0x15d6: 0x4000, 0x15d7: 0x4000, + 0x15d8: 0x4000, 0x15d9: 0x4000, 0x15da: 0x4000, 0x15db: 0x4000, 0x15dc: 0x4000, 0x15dd: 0x4000, + 0x15de: 0x4000, 0x15df: 0x4000, 0x15e0: 0x4000, 0x15e1: 0x4000, 0x15e2: 0x4000, 0x15e3: 0x4000, + 0x15e4: 0x4000, 0x15e5: 0x4000, 0x15e6: 0x4000, 0x15e7: 0x4000, 0x15e8: 0x4000, 0x15e9: 0x4000, + 0x15ea: 0x4000, 0x15eb: 0x4000, 0x15ec: 0x4000, 0x15ed: 0x4000, 0x15ee: 0x4000, 0x15ef: 0x4000, + 0x15f0: 0x4000, 0x15f1: 0x4000, 0x15f2: 0x4000, 0x15f3: 0x4000, 0x15f4: 0x4000, 0x15f5: 0x4000, + 0x15f6: 0x4000, 0x15f7: 0x4000, 0x15f8: 0x4000, 0x15f9: 0x4000, 0x15fa: 0x4000, 0x15fb: 0x4000, + 0x15fc: 0x4000, 0x15ff: 0x4000, + // Block 0x58, offset 0x1600 + 0x1600: 0x4000, 0x1601: 0x4000, 0x1602: 0x4000, 0x1603: 0x4000, 0x1604: 0x4000, 0x1605: 0x4000, + 0x1606: 0x4000, 0x1607: 0x4000, 0x1608: 0x4000, 0x1609: 0x4000, 0x160a: 0x4000, 0x160b: 0x4000, + 0x160c: 0x4000, 0x160d: 0x4000, 0x160e: 0x4000, 0x160f: 0x4000, 0x1610: 0x4000, 0x1611: 0x4000, + 0x1612: 0x4000, 0x1613: 0x4000, 0x1614: 0x4000, 0x1615: 0x4000, 0x1616: 0x4000, 0x1617: 0x4000, + 0x1618: 0x4000, 0x1619: 0x4000, 0x161a: 0x4000, 0x161b: 0x4000, 0x161c: 0x4000, 0x161d: 0x4000, + 0x161e: 0x4000, 0x161f: 0x4000, 0x1620: 0x4000, 0x1621: 0x4000, 0x1622: 0x4000, 0x1623: 0x4000, + 0x1624: 0x4000, 0x1625: 0x4000, 0x1626: 0x4000, 0x1627: 0x4000, 0x1628: 0x4000, 0x1629: 0x4000, + 0x162a: 0x4000, 0x162b: 0x4000, 0x162c: 0x4000, 0x162d: 0x4000, 0x162e: 0x4000, 0x162f: 0x4000, + 0x1630: 0x4000, 0x1631: 0x4000, 0x1632: 0x4000, 0x1633: 0x4000, 0x1634: 0x4000, 0x1635: 0x4000, + 0x1636: 0x4000, 0x1637: 0x4000, 0x1638: 0x4000, 0x1639: 0x4000, 0x163a: 0x4000, 0x163b: 0x4000, + 0x163c: 0x4000, 0x163d: 0x4000, + // Block 0x59, offset 0x1640 + 0x164b: 0x4000, + 0x164c: 0x4000, 0x164d: 0x4000, 0x164e: 0x4000, 0x1650: 0x4000, 0x1651: 0x4000, + 0x1652: 0x4000, 0x1653: 0x4000, 0x1654: 0x4000, 0x1655: 0x4000, 0x1656: 0x4000, 0x1657: 0x4000, + 0x1658: 0x4000, 0x1659: 0x4000, 0x165a: 0x4000, 0x165b: 0x4000, 0x165c: 0x4000, 0x165d: 0x4000, + 0x165e: 0x4000, 0x165f: 0x4000, 0x1660: 0x4000, 0x1661: 0x4000, 0x1662: 0x4000, 0x1663: 0x4000, + 0x1664: 0x4000, 0x1665: 0x4000, 0x1666: 0x4000, 0x1667: 0x4000, + 0x167a: 0x4000, + // Block 0x5a, offset 0x1680 + 0x1695: 0x4000, 0x1696: 0x4000, + 0x16a4: 0x4000, + // Block 0x5b, offset 0x16c0 + 0x16fb: 0x4000, + 0x16fc: 0x4000, 0x16fd: 0x4000, 0x16fe: 0x4000, 0x16ff: 0x4000, + // Block 0x5c, offset 0x1700 + 0x1700: 0x4000, 0x1701: 0x4000, 0x1702: 0x4000, 0x1703: 0x4000, 0x1704: 0x4000, 0x1705: 0x4000, + 0x1706: 0x4000, 0x1707: 0x4000, 0x1708: 0x4000, 0x1709: 0x4000, 0x170a: 0x4000, 0x170b: 0x4000, + 0x170c: 0x4000, 0x170d: 0x4000, 0x170e: 0x4000, 0x170f: 0x4000, + // Block 0x5d, offset 0x1740 + 0x1740: 0x4000, 0x1741: 0x4000, 0x1742: 0x4000, 0x1743: 0x4000, 0x1744: 0x4000, 0x1745: 0x4000, + 0x174c: 0x4000, 0x1750: 0x4000, 0x1751: 0x4000, + 0x1752: 0x4000, + 0x176b: 0x4000, 0x176c: 0x4000, + 0x1774: 0x4000, 0x1775: 0x4000, + 0x1776: 0x4000, + // Block 0x5e, offset 0x1780 + 0x1790: 0x4000, 0x1791: 0x4000, + 0x1792: 0x4000, 0x1793: 0x4000, 0x1794: 0x4000, 0x1795: 0x4000, 0x1796: 0x4000, 0x1797: 0x4000, + 0x1798: 0x4000, 0x1799: 0x4000, 0x179a: 0x4000, 0x179b: 0x4000, 0x179c: 0x4000, 0x179d: 0x4000, + 0x179e: 0x4000, 0x17a0: 0x4000, 0x17a1: 0x4000, 0x17a2: 0x4000, 0x17a3: 0x4000, + 0x17a4: 0x4000, 0x17a5: 0x4000, 0x17a6: 0x4000, 0x17a7: 0x4000, + 0x17b0: 0x4000, 0x17b3: 0x4000, 0x17b4: 0x4000, 0x17b5: 0x4000, + 0x17b6: 0x4000, 0x17b7: 0x4000, 0x17b8: 0x4000, 0x17b9: 0x4000, 0x17ba: 0x4000, 0x17bb: 0x4000, + 0x17bc: 0x4000, 0x17bd: 0x4000, 0x17be: 0x4000, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x4000, 0x17c1: 0x4000, 0x17c2: 0x4000, 0x17c3: 0x4000, 0x17c4: 0x4000, 0x17c5: 0x4000, + 0x17c6: 0x4000, 0x17c7: 0x4000, 0x17c8: 0x4000, 0x17c9: 0x4000, 0x17ca: 0x4000, 0x17cb: 0x4000, + 0x17d0: 0x4000, 0x17d1: 0x4000, + 0x17d2: 0x4000, 0x17d3: 0x4000, 0x17d4: 0x4000, 0x17d5: 0x4000, 0x17d6: 0x4000, 0x17d7: 0x4000, + 0x17d8: 0x4000, 0x17d9: 0x4000, 0x17da: 0x4000, 0x17db: 0x4000, 0x17dc: 0x4000, 0x17dd: 0x4000, + 0x17de: 0x4000, + // Block 0x60, offset 0x1800 + 0x1800: 0x4000, 0x1801: 0x4000, 0x1802: 0x4000, 0x1803: 0x4000, 0x1804: 0x4000, 0x1805: 0x4000, + 0x1806: 0x4000, 0x1807: 0x4000, 0x1808: 0x4000, 0x1809: 0x4000, 0x180a: 0x4000, 0x180b: 0x4000, + 0x180c: 0x4000, 0x180d: 0x4000, 0x180e: 0x4000, 0x180f: 0x4000, 0x1810: 0x4000, 0x1811: 0x4000, + // Block 0x61, offset 0x1840 + 0x1840: 0x4000, + // Block 0x62, offset 0x1880 + 0x1880: 0x2000, 0x1881: 0x2000, 0x1882: 0x2000, 0x1883: 0x2000, 0x1884: 0x2000, 0x1885: 0x2000, + 0x1886: 0x2000, 0x1887: 0x2000, 0x1888: 0x2000, 0x1889: 0x2000, 0x188a: 0x2000, 0x188b: 0x2000, + 0x188c: 0x2000, 0x188d: 0x2000, 0x188e: 0x2000, 0x188f: 0x2000, 0x1890: 0x2000, 0x1891: 0x2000, + 0x1892: 0x2000, 0x1893: 0x2000, 0x1894: 0x2000, 0x1895: 0x2000, 0x1896: 0x2000, 0x1897: 0x2000, + 0x1898: 0x2000, 0x1899: 0x2000, 0x189a: 0x2000, 0x189b: 0x2000, 0x189c: 0x2000, 0x189d: 0x2000, + 0x189e: 0x2000, 0x189f: 0x2000, 0x18a0: 0x2000, 0x18a1: 0x2000, 0x18a2: 0x2000, 0x18a3: 0x2000, + 0x18a4: 0x2000, 0x18a5: 0x2000, 0x18a6: 0x2000, 0x18a7: 0x2000, 0x18a8: 0x2000, 0x18a9: 0x2000, + 0x18aa: 0x2000, 0x18ab: 0x2000, 0x18ac: 0x2000, 0x18ad: 0x2000, 0x18ae: 0x2000, 0x18af: 0x2000, + 0x18b0: 0x2000, 0x18b1: 0x2000, 0x18b2: 0x2000, 0x18b3: 0x2000, 0x18b4: 0x2000, 0x18b5: 0x2000, + 0x18b6: 0x2000, 0x18b7: 0x2000, 0x18b8: 0x2000, 0x18b9: 0x2000, 0x18ba: 0x2000, 0x18bb: 0x2000, + 0x18bc: 0x2000, 0x18bd: 0x2000, +} + +// widthIndex: 22 blocks, 1408 entries, 1408 bytes +// Block 0 is the zero block. +var widthIndex = [1408]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x02, 0xc4: 0x03, 0xc5: 0x04, 0xc7: 0x05, + 0xc9: 0x06, 0xcb: 0x07, 0xcc: 0x08, 0xcd: 0x09, 0xce: 0x0a, 0xcf: 0x0b, + 0xd0: 0x0c, 0xd1: 0x0d, + 0xe1: 0x02, 0xe2: 0x03, 0xe3: 0x04, 0xe4: 0x05, 0xe5: 0x06, 0xe6: 0x06, 0xe7: 0x06, + 0xe8: 0x06, 0xe9: 0x06, 0xea: 0x07, 0xeb: 0x06, 0xec: 0x06, 0xed: 0x08, 0xee: 0x09, 0xef: 0x0a, + 0xf0: 0x0f, 0xf3: 0x12, 0xf4: 0x13, + // Block 0x4, offset 0x100 + 0x104: 0x0e, 0x105: 0x0f, + // Block 0x5, offset 0x140 + 0x140: 0x10, 0x141: 0x11, 0x142: 0x12, 0x144: 0x13, 0x145: 0x14, 0x146: 0x15, 0x147: 0x16, + 0x148: 0x17, 0x149: 0x18, 0x14a: 0x19, 0x14c: 0x1a, 0x14f: 0x1b, + 0x151: 0x1c, 0x152: 0x08, 0x153: 0x1d, 0x154: 0x1e, 0x155: 0x1f, 0x156: 0x20, 0x157: 0x21, + 0x158: 0x22, 0x159: 0x23, 0x15a: 0x24, 0x15b: 0x25, 0x15c: 0x26, 0x15d: 0x27, 0x15e: 0x28, 0x15f: 0x29, + 0x166: 0x2a, + 0x16c: 0x2b, 0x16d: 0x2c, + 0x17a: 0x2d, 0x17b: 0x2e, 0x17c: 0x0e, 0x17d: 0x0e, 0x17e: 0x0e, 0x17f: 0x2f, + // Block 0x6, offset 0x180 + 0x180: 0x30, 0x181: 0x31, 0x182: 0x32, 0x183: 0x33, 0x184: 0x34, 0x185: 0x35, 0x186: 0x36, 0x187: 0x37, + 0x188: 0x38, 0x189: 0x39, 0x18a: 0x0e, 0x18b: 0x3a, 0x18c: 0x0e, 0x18d: 0x0e, 0x18e: 0x0e, 0x18f: 0x0e, + 0x190: 0x0e, 0x191: 0x0e, 0x192: 0x0e, 0x193: 0x0e, 0x194: 0x0e, 0x195: 0x0e, 0x196: 0x0e, 0x197: 0x0e, + 0x198: 0x0e, 0x199: 0x0e, 0x19a: 0x0e, 0x19b: 0x0e, 0x19c: 0x0e, 0x19d: 0x0e, 0x19e: 0x0e, 0x19f: 0x0e, + 0x1a0: 0x0e, 0x1a1: 0x0e, 0x1a2: 0x0e, 0x1a3: 0x0e, 0x1a4: 0x0e, 0x1a5: 0x0e, 0x1a6: 0x0e, 0x1a7: 0x0e, + 0x1a8: 0x0e, 0x1a9: 0x0e, 0x1aa: 0x0e, 0x1ab: 0x0e, 0x1ac: 0x0e, 0x1ad: 0x0e, 0x1ae: 0x0e, 0x1af: 0x0e, + 0x1b0: 0x0e, 0x1b1: 0x0e, 0x1b2: 0x0e, 0x1b3: 0x0e, 0x1b4: 0x0e, 0x1b5: 0x0e, 0x1b6: 0x0e, 0x1b7: 0x0e, + 0x1b8: 0x0e, 0x1b9: 0x0e, 0x1ba: 0x0e, 0x1bb: 0x0e, 0x1bc: 0x0e, 0x1bd: 0x0e, 0x1be: 0x0e, 0x1bf: 0x0e, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0e, 0x1c1: 0x0e, 0x1c2: 0x0e, 0x1c3: 0x0e, 0x1c4: 0x0e, 0x1c5: 0x0e, 0x1c6: 0x0e, 0x1c7: 0x0e, + 0x1c8: 0x0e, 0x1c9: 0x0e, 0x1ca: 0x0e, 0x1cb: 0x0e, 0x1cc: 0x0e, 0x1cd: 0x0e, 0x1ce: 0x0e, 0x1cf: 0x0e, + 0x1d0: 0x0e, 0x1d1: 0x0e, 0x1d2: 0x0e, 0x1d3: 0x0e, 0x1d4: 0x0e, 0x1d5: 0x0e, 0x1d6: 0x0e, 0x1d7: 0x0e, + 0x1d8: 0x0e, 0x1d9: 0x0e, 0x1da: 0x0e, 0x1db: 0x0e, 0x1dc: 0x0e, 0x1dd: 0x0e, 0x1de: 0x0e, 0x1df: 0x0e, + 0x1e0: 0x0e, 0x1e1: 0x0e, 0x1e2: 0x0e, 0x1e3: 0x0e, 0x1e4: 0x0e, 0x1e5: 0x0e, 0x1e6: 0x0e, 0x1e7: 0x0e, + 0x1e8: 0x0e, 0x1e9: 0x0e, 0x1ea: 0x0e, 0x1eb: 0x0e, 0x1ec: 0x0e, 0x1ed: 0x0e, 0x1ee: 0x0e, 0x1ef: 0x0e, + 0x1f0: 0x0e, 0x1f1: 0x0e, 0x1f2: 0x0e, 0x1f3: 0x0e, 0x1f4: 0x0e, 0x1f5: 0x0e, 0x1f6: 0x0e, + 0x1f8: 0x0e, 0x1f9: 0x0e, 0x1fa: 0x0e, 0x1fb: 0x0e, 0x1fc: 0x0e, 0x1fd: 0x0e, 0x1fe: 0x0e, 0x1ff: 0x0e, + // Block 0x8, offset 0x200 + 0x200: 0x0e, 0x201: 0x0e, 0x202: 0x0e, 0x203: 0x0e, 0x204: 0x0e, 0x205: 0x0e, 0x206: 0x0e, 0x207: 0x0e, + 0x208: 0x0e, 0x209: 0x0e, 0x20a: 0x0e, 0x20b: 0x0e, 0x20c: 0x0e, 0x20d: 0x0e, 0x20e: 0x0e, 0x20f: 0x0e, + 0x210: 0x0e, 0x211: 0x0e, 0x212: 0x0e, 0x213: 0x0e, 0x214: 0x0e, 0x215: 0x0e, 0x216: 0x0e, 0x217: 0x0e, + 0x218: 0x0e, 0x219: 0x0e, 0x21a: 0x0e, 0x21b: 0x0e, 0x21c: 0x0e, 0x21d: 0x0e, 0x21e: 0x0e, 0x21f: 0x0e, + 0x220: 0x0e, 0x221: 0x0e, 0x222: 0x0e, 0x223: 0x0e, 0x224: 0x0e, 0x225: 0x0e, 0x226: 0x0e, 0x227: 0x0e, + 0x228: 0x0e, 0x229: 0x0e, 0x22a: 0x0e, 0x22b: 0x0e, 0x22c: 0x0e, 0x22d: 0x0e, 0x22e: 0x0e, 0x22f: 0x0e, + 0x230: 0x0e, 0x231: 0x0e, 0x232: 0x0e, 0x233: 0x0e, 0x234: 0x0e, 0x235: 0x0e, 0x236: 0x0e, 0x237: 0x0e, + 0x238: 0x0e, 0x239: 0x0e, 0x23a: 0x0e, 0x23b: 0x0e, 0x23c: 0x0e, 0x23d: 0x0e, 0x23e: 0x0e, 0x23f: 0x0e, + // Block 0x9, offset 0x240 + 0x240: 0x0e, 0x241: 0x0e, 0x242: 0x0e, 0x243: 0x0e, 0x244: 0x0e, 0x245: 0x0e, 0x246: 0x0e, 0x247: 0x0e, + 0x248: 0x0e, 0x249: 0x0e, 0x24a: 0x0e, 0x24b: 0x0e, 0x24c: 0x0e, 0x24d: 0x0e, 0x24e: 0x0e, 0x24f: 0x0e, + 0x250: 0x0e, 0x251: 0x0e, 0x252: 0x3b, 0x253: 0x3c, + 0x265: 0x3d, + 0x270: 0x0e, 0x271: 0x0e, 0x272: 0x0e, 0x273: 0x0e, 0x274: 0x0e, 0x275: 0x0e, 0x276: 0x0e, 0x277: 0x0e, + 0x278: 0x0e, 0x279: 0x0e, 0x27a: 0x0e, 0x27b: 0x0e, 0x27c: 0x0e, 0x27d: 0x0e, 0x27e: 0x0e, 0x27f: 0x0e, + // Block 0xa, offset 0x280 + 0x280: 0x0e, 0x281: 0x0e, 0x282: 0x0e, 0x283: 0x0e, 0x284: 0x0e, 0x285: 0x0e, 0x286: 0x0e, 0x287: 0x0e, + 0x288: 0x0e, 0x289: 0x0e, 0x28a: 0x0e, 0x28b: 0x0e, 0x28c: 0x0e, 0x28d: 0x0e, 0x28e: 0x0e, 0x28f: 0x0e, + 0x290: 0x0e, 0x291: 0x0e, 0x292: 0x0e, 0x293: 0x0e, 0x294: 0x0e, 0x295: 0x0e, 0x296: 0x0e, 0x297: 0x0e, + 0x298: 0x0e, 0x299: 0x0e, 0x29a: 0x0e, 0x29b: 0x0e, 0x29c: 0x0e, 0x29d: 0x0e, 0x29e: 0x3e, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x08, 0x2c1: 0x08, 0x2c2: 0x08, 0x2c3: 0x08, 0x2c4: 0x08, 0x2c5: 0x08, 0x2c6: 0x08, 0x2c7: 0x08, + 0x2c8: 0x08, 0x2c9: 0x08, 0x2ca: 0x08, 0x2cb: 0x08, 0x2cc: 0x08, 0x2cd: 0x08, 0x2ce: 0x08, 0x2cf: 0x08, + 0x2d0: 0x08, 0x2d1: 0x08, 0x2d2: 0x08, 0x2d3: 0x08, 0x2d4: 0x08, 0x2d5: 0x08, 0x2d6: 0x08, 0x2d7: 0x08, + 0x2d8: 0x08, 0x2d9: 0x08, 0x2da: 0x08, 0x2db: 0x08, 0x2dc: 0x08, 0x2dd: 0x08, 0x2de: 0x08, 0x2df: 0x08, + 0x2e0: 0x08, 0x2e1: 0x08, 0x2e2: 0x08, 0x2e3: 0x08, 0x2e4: 0x08, 0x2e5: 0x08, 0x2e6: 0x08, 0x2e7: 0x08, + 0x2e8: 0x08, 0x2e9: 0x08, 0x2ea: 0x08, 0x2eb: 0x08, 0x2ec: 0x08, 0x2ed: 0x08, 0x2ee: 0x08, 0x2ef: 0x08, + 0x2f0: 0x08, 0x2f1: 0x08, 0x2f2: 0x08, 0x2f3: 0x08, 0x2f4: 0x08, 0x2f5: 0x08, 0x2f6: 0x08, 0x2f7: 0x08, + 0x2f8: 0x08, 0x2f9: 0x08, 0x2fa: 0x08, 0x2fb: 0x08, 0x2fc: 0x08, 0x2fd: 0x08, 0x2fe: 0x08, 0x2ff: 0x08, + // Block 0xc, offset 0x300 + 0x300: 0x08, 0x301: 0x08, 0x302: 0x08, 0x303: 0x08, 0x304: 0x08, 0x305: 0x08, 0x306: 0x08, 0x307: 0x08, + 0x308: 0x08, 0x309: 0x08, 0x30a: 0x08, 0x30b: 0x08, 0x30c: 0x08, 0x30d: 0x08, 0x30e: 0x08, 0x30f: 0x08, + 0x310: 0x08, 0x311: 0x08, 0x312: 0x08, 0x313: 0x08, 0x314: 0x08, 0x315: 0x08, 0x316: 0x08, 0x317: 0x08, + 0x318: 0x08, 0x319: 0x08, 0x31a: 0x08, 0x31b: 0x08, 0x31c: 0x08, 0x31d: 0x08, 0x31e: 0x08, 0x31f: 0x08, + 0x320: 0x08, 0x321: 0x08, 0x322: 0x08, 0x323: 0x08, 0x324: 0x0e, 0x325: 0x0e, 0x326: 0x0e, 0x327: 0x0e, + 0x328: 0x0e, 0x329: 0x0e, 0x32a: 0x0e, 0x32b: 0x0e, + 0x338: 0x3f, 0x339: 0x40, 0x33c: 0x41, 0x33d: 0x42, 0x33e: 0x43, 0x33f: 0x44, + // Block 0xd, offset 0x340 + 0x37f: 0x45, + // Block 0xe, offset 0x380 + 0x380: 0x0e, 0x381: 0x0e, 0x382: 0x0e, 0x383: 0x0e, 0x384: 0x0e, 0x385: 0x0e, 0x386: 0x0e, 0x387: 0x0e, + 0x388: 0x0e, 0x389: 0x0e, 0x38a: 0x0e, 0x38b: 0x0e, 0x38c: 0x0e, 0x38d: 0x0e, 0x38e: 0x0e, 0x38f: 0x0e, + 0x390: 0x0e, 0x391: 0x0e, 0x392: 0x0e, 0x393: 0x0e, 0x394: 0x0e, 0x395: 0x0e, 0x396: 0x0e, 0x397: 0x0e, + 0x398: 0x0e, 0x399: 0x0e, 0x39a: 0x0e, 0x39b: 0x0e, 0x39c: 0x0e, 0x39d: 0x0e, 0x39e: 0x0e, 0x39f: 0x46, + 0x3a0: 0x0e, 0x3a1: 0x0e, 0x3a2: 0x0e, 0x3a3: 0x0e, 0x3a4: 0x0e, 0x3a5: 0x0e, 0x3a6: 0x0e, 0x3a7: 0x0e, + 0x3a8: 0x0e, 0x3a9: 0x0e, 0x3aa: 0x0e, 0x3ab: 0x47, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x48, + // Block 0x10, offset 0x400 + 0x400: 0x49, 0x403: 0x4a, 0x404: 0x4b, 0x405: 0x4c, 0x406: 0x4d, + 0x408: 0x4e, 0x409: 0x4f, 0x40c: 0x50, 0x40d: 0x51, 0x40e: 0x52, 0x40f: 0x53, + 0x410: 0x3a, 0x411: 0x54, 0x412: 0x0e, 0x413: 0x55, 0x414: 0x56, 0x415: 0x57, 0x416: 0x58, 0x417: 0x59, + 0x418: 0x0e, 0x419: 0x5a, 0x41a: 0x0e, 0x41b: 0x5b, + 0x424: 0x5c, 0x425: 0x5d, 0x426: 0x5e, 0x427: 0x5f, + // Block 0x11, offset 0x440 + 0x456: 0x0b, 0x457: 0x06, + 0x458: 0x0c, 0x45b: 0x0d, 0x45f: 0x0e, + 0x460: 0x06, 0x461: 0x06, 0x462: 0x06, 0x463: 0x06, 0x464: 0x06, 0x465: 0x06, 0x466: 0x06, 0x467: 0x06, + 0x468: 0x06, 0x469: 0x06, 0x46a: 0x06, 0x46b: 0x06, 0x46c: 0x06, 0x46d: 0x06, 0x46e: 0x06, 0x46f: 0x06, + 0x470: 0x06, 0x471: 0x06, 0x472: 0x06, 0x473: 0x06, 0x474: 0x06, 0x475: 0x06, 0x476: 0x06, 0x477: 0x06, + 0x478: 0x06, 0x479: 0x06, 0x47a: 0x06, 0x47b: 0x06, 0x47c: 0x06, 0x47d: 0x06, 0x47e: 0x06, 0x47f: 0x06, + // Block 0x12, offset 0x480 + 0x484: 0x08, 0x485: 0x08, 0x486: 0x08, 0x487: 0x09, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x08, 0x4c1: 0x08, 0x4c2: 0x08, 0x4c3: 0x08, 0x4c4: 0x08, 0x4c5: 0x08, 0x4c6: 0x08, 0x4c7: 0x08, + 0x4c8: 0x08, 0x4c9: 0x08, 0x4ca: 0x08, 0x4cb: 0x08, 0x4cc: 0x08, 0x4cd: 0x08, 0x4ce: 0x08, 0x4cf: 0x08, + 0x4d0: 0x08, 0x4d1: 0x08, 0x4d2: 0x08, 0x4d3: 0x08, 0x4d4: 0x08, 0x4d5: 0x08, 0x4d6: 0x08, 0x4d7: 0x08, + 0x4d8: 0x08, 0x4d9: 0x08, 0x4da: 0x08, 0x4db: 0x08, 0x4dc: 0x08, 0x4dd: 0x08, 0x4de: 0x08, 0x4df: 0x08, + 0x4e0: 0x08, 0x4e1: 0x08, 0x4e2: 0x08, 0x4e3: 0x08, 0x4e4: 0x08, 0x4e5: 0x08, 0x4e6: 0x08, 0x4e7: 0x08, + 0x4e8: 0x08, 0x4e9: 0x08, 0x4ea: 0x08, 0x4eb: 0x08, 0x4ec: 0x08, 0x4ed: 0x08, 0x4ee: 0x08, 0x4ef: 0x08, + 0x4f0: 0x08, 0x4f1: 0x08, 0x4f2: 0x08, 0x4f3: 0x08, 0x4f4: 0x08, 0x4f5: 0x08, 0x4f6: 0x08, 0x4f7: 0x08, + 0x4f8: 0x08, 0x4f9: 0x08, 0x4fa: 0x08, 0x4fb: 0x08, 0x4fc: 0x08, 0x4fd: 0x08, 0x4fe: 0x08, 0x4ff: 0x60, + // Block 0x14, offset 0x500 + 0x520: 0x10, + 0x530: 0x09, 0x531: 0x09, 0x532: 0x09, 0x533: 0x09, 0x534: 0x09, 0x535: 0x09, 0x536: 0x09, 0x537: 0x09, + 0x538: 0x09, 0x539: 0x09, 0x53a: 0x09, 0x53b: 0x09, 0x53c: 0x09, 0x53d: 0x09, 0x53e: 0x09, 0x53f: 0x11, + // Block 0x15, offset 0x540 + 0x540: 0x09, 0x541: 0x09, 0x542: 0x09, 0x543: 0x09, 0x544: 0x09, 0x545: 0x09, 0x546: 0x09, 0x547: 0x09, + 0x548: 0x09, 0x549: 0x09, 0x54a: 0x09, 0x54b: 0x09, 0x54c: 0x09, 0x54d: 0x09, 0x54e: 0x09, 0x54f: 0x11, +} + +// inverseData contains 4-byte entries of the following format: +// +// <length> <modified UTF-8-encoded rune> <0 padding> +// +// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the +// UTF-8 encoding of the original rune. Mappings often have the following +// pattern: +// +// A -> A (U+FF21 -> U+0041) +// B -> B (U+FF22 -> U+0042) +// ... +// +// By xor-ing the last byte the same entry can be shared by many mappings. This +// reduces the total number of distinct entries by about two thirds. +// The resulting entry for the aforementioned mappings is +// +// { 0x01, 0xE0, 0x00, 0x00 } +// +// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get +// +// E0 ^ A1 = 41. +// +// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get +// +// E0 ^ A2 = 42. +// +// Note that because of the xor-ing, the byte sequence stored in the entry is +// not valid UTF-8. +var inverseData = [150][4]byte{ + {0x00, 0x00, 0x00, 0x00}, + {0x03, 0xe3, 0x80, 0xa0}, + {0x03, 0xef, 0xbc, 0xa0}, + {0x03, 0xef, 0xbc, 0xe0}, + {0x03, 0xef, 0xbd, 0xe0}, + {0x03, 0xef, 0xbf, 0x02}, + {0x03, 0xef, 0xbf, 0x00}, + {0x03, 0xef, 0xbf, 0x0e}, + {0x03, 0xef, 0xbf, 0x0c}, + {0x03, 0xef, 0xbf, 0x0f}, + {0x03, 0xef, 0xbf, 0x39}, + {0x03, 0xef, 0xbf, 0x3b}, + {0x03, 0xef, 0xbf, 0x3f}, + {0x03, 0xef, 0xbf, 0x2a}, + {0x03, 0xef, 0xbf, 0x0d}, + {0x03, 0xef, 0xbf, 0x25}, + {0x03, 0xef, 0xbd, 0x1a}, + {0x03, 0xef, 0xbd, 0x26}, + {0x01, 0xa0, 0x00, 0x00}, + {0x03, 0xef, 0xbd, 0x25}, + {0x03, 0xef, 0xbd, 0x23}, + {0x03, 0xef, 0xbd, 0x2e}, + {0x03, 0xef, 0xbe, 0x07}, + {0x03, 0xef, 0xbe, 0x05}, + {0x03, 0xef, 0xbd, 0x06}, + {0x03, 0xef, 0xbd, 0x13}, + {0x03, 0xef, 0xbd, 0x0b}, + {0x03, 0xef, 0xbd, 0x16}, + {0x03, 0xef, 0xbd, 0x0c}, + {0x03, 0xef, 0xbd, 0x15}, + {0x03, 0xef, 0xbd, 0x0d}, + {0x03, 0xef, 0xbd, 0x1c}, + {0x03, 0xef, 0xbd, 0x02}, + {0x03, 0xef, 0xbd, 0x1f}, + {0x03, 0xef, 0xbd, 0x1d}, + {0x03, 0xef, 0xbd, 0x17}, + {0x03, 0xef, 0xbd, 0x08}, + {0x03, 0xef, 0xbd, 0x09}, + {0x03, 0xef, 0xbd, 0x0e}, + {0x03, 0xef, 0xbd, 0x04}, + {0x03, 0xef, 0xbd, 0x05}, + {0x03, 0xef, 0xbe, 0x3f}, + {0x03, 0xef, 0xbe, 0x00}, + {0x03, 0xef, 0xbd, 0x2c}, + {0x03, 0xef, 0xbe, 0x06}, + {0x03, 0xef, 0xbe, 0x0c}, + {0x03, 0xef, 0xbe, 0x0f}, + {0x03, 0xef, 0xbe, 0x0d}, + {0x03, 0xef, 0xbe, 0x0b}, + {0x03, 0xef, 0xbe, 0x19}, + {0x03, 0xef, 0xbe, 0x15}, + {0x03, 0xef, 0xbe, 0x11}, + {0x03, 0xef, 0xbe, 0x31}, + {0x03, 0xef, 0xbe, 0x33}, + {0x03, 0xef, 0xbd, 0x0f}, + {0x03, 0xef, 0xbe, 0x30}, + {0x03, 0xef, 0xbe, 0x3e}, + {0x03, 0xef, 0xbe, 0x32}, + {0x03, 0xef, 0xbe, 0x36}, + {0x03, 0xef, 0xbd, 0x14}, + {0x03, 0xef, 0xbe, 0x2e}, + {0x03, 0xef, 0xbd, 0x1e}, + {0x03, 0xef, 0xbe, 0x10}, + {0x03, 0xef, 0xbf, 0x13}, + {0x03, 0xef, 0xbf, 0x15}, + {0x03, 0xef, 0xbf, 0x17}, + {0x03, 0xef, 0xbf, 0x1f}, + {0x03, 0xef, 0xbf, 0x1d}, + {0x03, 0xef, 0xbf, 0x1b}, + {0x03, 0xef, 0xbf, 0x09}, + {0x03, 0xef, 0xbf, 0x0b}, + {0x03, 0xef, 0xbf, 0x37}, + {0x03, 0xef, 0xbe, 0x04}, + {0x01, 0xe0, 0x00, 0x00}, + {0x03, 0xe2, 0xa6, 0x1a}, + {0x03, 0xe2, 0xa6, 0x26}, + {0x03, 0xe3, 0x80, 0x23}, + {0x03, 0xe3, 0x80, 0x2e}, + {0x03, 0xe3, 0x80, 0x25}, + {0x03, 0xe3, 0x83, 0x1e}, + {0x03, 0xe3, 0x83, 0x14}, + {0x03, 0xe3, 0x82, 0x06}, + {0x03, 0xe3, 0x82, 0x0b}, + {0x03, 0xe3, 0x82, 0x0c}, + {0x03, 0xe3, 0x82, 0x0d}, + {0x03, 0xe3, 0x82, 0x02}, + {0x03, 0xe3, 0x83, 0x0f}, + {0x03, 0xe3, 0x83, 0x08}, + {0x03, 0xe3, 0x83, 0x09}, + {0x03, 0xe3, 0x83, 0x2c}, + {0x03, 0xe3, 0x83, 0x0c}, + {0x03, 0xe3, 0x82, 0x13}, + {0x03, 0xe3, 0x82, 0x16}, + {0x03, 0xe3, 0x82, 0x15}, + {0x03, 0xe3, 0x82, 0x1c}, + {0x03, 0xe3, 0x82, 0x1f}, + {0x03, 0xe3, 0x82, 0x1d}, + {0x03, 0xe3, 0x82, 0x1a}, + {0x03, 0xe3, 0x82, 0x17}, + {0x03, 0xe3, 0x82, 0x08}, + {0x03, 0xe3, 0x82, 0x09}, + {0x03, 0xe3, 0x82, 0x0e}, + {0x03, 0xe3, 0x82, 0x04}, + {0x03, 0xe3, 0x82, 0x05}, + {0x03, 0xe3, 0x82, 0x3f}, + {0x03, 0xe3, 0x83, 0x00}, + {0x03, 0xe3, 0x83, 0x06}, + {0x03, 0xe3, 0x83, 0x05}, + {0x03, 0xe3, 0x83, 0x0d}, + {0x03, 0xe3, 0x83, 0x0b}, + {0x03, 0xe3, 0x83, 0x07}, + {0x03, 0xe3, 0x83, 0x19}, + {0x03, 0xe3, 0x83, 0x15}, + {0x03, 0xe3, 0x83, 0x11}, + {0x03, 0xe3, 0x83, 0x31}, + {0x03, 0xe3, 0x83, 0x33}, + {0x03, 0xe3, 0x83, 0x30}, + {0x03, 0xe3, 0x83, 0x3e}, + {0x03, 0xe3, 0x83, 0x32}, + {0x03, 0xe3, 0x83, 0x36}, + {0x03, 0xe3, 0x83, 0x2e}, + {0x03, 0xe3, 0x82, 0x07}, + {0x03, 0xe3, 0x85, 0x04}, + {0x03, 0xe3, 0x84, 0x10}, + {0x03, 0xe3, 0x85, 0x30}, + {0x03, 0xe3, 0x85, 0x0d}, + {0x03, 0xe3, 0x85, 0x13}, + {0x03, 0xe3, 0x85, 0x15}, + {0x03, 0xe3, 0x85, 0x17}, + {0x03, 0xe3, 0x85, 0x1f}, + {0x03, 0xe3, 0x85, 0x1d}, + {0x03, 0xe3, 0x85, 0x1b}, + {0x03, 0xe3, 0x85, 0x09}, + {0x03, 0xe3, 0x85, 0x0f}, + {0x03, 0xe3, 0x85, 0x0b}, + {0x03, 0xe3, 0x85, 0x37}, + {0x03, 0xe3, 0x85, 0x3b}, + {0x03, 0xe3, 0x85, 0x39}, + {0x03, 0xe3, 0x85, 0x3f}, + {0x02, 0xc2, 0x02, 0x00}, + {0x02, 0xc2, 0x0e, 0x00}, + {0x02, 0xc2, 0x0c, 0x00}, + {0x02, 0xc2, 0x00, 0x00}, + {0x03, 0xe2, 0x82, 0x0f}, + {0x03, 0xe2, 0x94, 0x2a}, + {0x03, 0xe2, 0x86, 0x39}, + {0x03, 0xe2, 0x86, 0x3b}, + {0x03, 0xe2, 0x86, 0x3f}, + {0x03, 0xe2, 0x96, 0x0d}, + {0x03, 0xe2, 0x97, 0x25}, +} + +// Total table size 14680 bytes (14KiB) diff --git a/vendor/golang.org/x/text/width/transform.go b/vendor/golang.org/x/text/width/transform.go new file mode 100644 index 00000000000..0049f700a2f --- /dev/null +++ b/vendor/golang.org/x/text/width/transform.go @@ -0,0 +1,239 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package width + +import ( + "unicode/utf8" + + "golang.org/x/text/transform" +) + +type foldTransform struct { + transform.NopResetter +} + +func (foldTransform) Span(src []byte, atEOF bool) (n int, err error) { + for n < len(src) { + if src[n] < utf8.RuneSelf { + // ASCII fast path. + for n++; n < len(src) && src[n] < utf8.RuneSelf; n++ { + } + continue + } + v, size := trie.lookup(src[n:]) + if size == 0 { // incomplete UTF-8 encoding + if !atEOF { + err = transform.ErrShortSrc + } else { + n = len(src) + } + break + } + if elem(v)&tagNeedsFold != 0 { + err = transform.ErrEndOfSpan + break + } + n += size + } + return n, err +} + +func (foldTransform) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + for nSrc < len(src) { + if src[nSrc] < utf8.RuneSelf { + // ASCII fast path. + start, end := nSrc, len(src) + if d := len(dst) - nDst; d < end-start { + end = nSrc + d + } + for nSrc++; nSrc < end && src[nSrc] < utf8.RuneSelf; nSrc++ { + } + n := copy(dst[nDst:], src[start:nSrc]) + if nDst += n; nDst == len(dst) { + nSrc = start + n + if nSrc == len(src) { + return nDst, nSrc, nil + } + if src[nSrc] < utf8.RuneSelf { + return nDst, nSrc, transform.ErrShortDst + } + } + continue + } + v, size := trie.lookup(src[nSrc:]) + if size == 0 { // incomplete UTF-8 encoding + if !atEOF { + return nDst, nSrc, transform.ErrShortSrc + } + size = 1 // gobble 1 byte + } + if elem(v)&tagNeedsFold == 0 { + if size != copy(dst[nDst:], src[nSrc:nSrc+size]) { + return nDst, nSrc, transform.ErrShortDst + } + nDst += size + } else { + data := inverseData[byte(v)] + if len(dst)-nDst < int(data[0]) { + return nDst, nSrc, transform.ErrShortDst + } + i := 1 + for end := int(data[0]); i < end; i++ { + dst[nDst] = data[i] + nDst++ + } + dst[nDst] = data[i] ^ src[nSrc+size-1] + nDst++ + } + nSrc += size + } + return nDst, nSrc, nil +} + +type narrowTransform struct { + transform.NopResetter +} + +func (narrowTransform) Span(src []byte, atEOF bool) (n int, err error) { + for n < len(src) { + if src[n] < utf8.RuneSelf { + // ASCII fast path. + for n++; n < len(src) && src[n] < utf8.RuneSelf; n++ { + } + continue + } + v, size := trie.lookup(src[n:]) + if size == 0 { // incomplete UTF-8 encoding + if !atEOF { + err = transform.ErrShortSrc + } else { + n = len(src) + } + break + } + if k := elem(v).kind(); byte(v) == 0 || k != EastAsianFullwidth && k != EastAsianWide && k != EastAsianAmbiguous { + } else { + err = transform.ErrEndOfSpan + break + } + n += size + } + return n, err +} + +func (narrowTransform) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + for nSrc < len(src) { + if src[nSrc] < utf8.RuneSelf { + // ASCII fast path. + start, end := nSrc, len(src) + if d := len(dst) - nDst; d < end-start { + end = nSrc + d + } + for nSrc++; nSrc < end && src[nSrc] < utf8.RuneSelf; nSrc++ { + } + n := copy(dst[nDst:], src[start:nSrc]) + if nDst += n; nDst == len(dst) { + nSrc = start + n + if nSrc == len(src) { + return nDst, nSrc, nil + } + if src[nSrc] < utf8.RuneSelf { + return nDst, nSrc, transform.ErrShortDst + } + } + continue + } + v, size := trie.lookup(src[nSrc:]) + if size == 0 { // incomplete UTF-8 encoding + if !atEOF { + return nDst, nSrc, transform.ErrShortSrc + } + size = 1 // gobble 1 byte + } + if k := elem(v).kind(); byte(v) == 0 || k != EastAsianFullwidth && k != EastAsianWide && k != EastAsianAmbiguous { + if size != copy(dst[nDst:], src[nSrc:nSrc+size]) { + return nDst, nSrc, transform.ErrShortDst + } + nDst += size + } else { + data := inverseData[byte(v)] + if len(dst)-nDst < int(data[0]) { + return nDst, nSrc, transform.ErrShortDst + } + i := 1 + for end := int(data[0]); i < end; i++ { + dst[nDst] = data[i] + nDst++ + } + dst[nDst] = data[i] ^ src[nSrc+size-1] + nDst++ + } + nSrc += size + } + return nDst, nSrc, nil +} + +type wideTransform struct { + transform.NopResetter +} + +func (wideTransform) Span(src []byte, atEOF bool) (n int, err error) { + for n < len(src) { + // TODO: Consider ASCII fast path. Special-casing ASCII handling can + // reduce the ns/op of BenchmarkWideASCII by about 30%. This is probably + // not enough to warrant the extra code and complexity. + v, size := trie.lookup(src[n:]) + if size == 0 { // incomplete UTF-8 encoding + if !atEOF { + err = transform.ErrShortSrc + } else { + n = len(src) + } + break + } + if k := elem(v).kind(); byte(v) == 0 || k != EastAsianHalfwidth && k != EastAsianNarrow { + } else { + err = transform.ErrEndOfSpan + break + } + n += size + } + return n, err +} + +func (wideTransform) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + for nSrc < len(src) { + // TODO: Consider ASCII fast path. Special-casing ASCII handling can + // reduce the ns/op of BenchmarkWideASCII by about 30%. This is probably + // not enough to warrant the extra code and complexity. + v, size := trie.lookup(src[nSrc:]) + if size == 0 { // incomplete UTF-8 encoding + if !atEOF { + return nDst, nSrc, transform.ErrShortSrc + } + size = 1 // gobble 1 byte + } + if k := elem(v).kind(); byte(v) == 0 || k != EastAsianHalfwidth && k != EastAsianNarrow { + if size != copy(dst[nDst:], src[nSrc:nSrc+size]) { + return nDst, nSrc, transform.ErrShortDst + } + nDst += size + } else { + data := inverseData[byte(v)] + if len(dst)-nDst < int(data[0]) { + return nDst, nSrc, transform.ErrShortDst + } + i := 1 + for end := int(data[0]); i < end; i++ { + dst[nDst] = data[i] + nDst++ + } + dst[nDst] = data[i] ^ src[nSrc+size-1] + nDst++ + } + nSrc += size + } + return nDst, nSrc, nil +} diff --git a/vendor/golang.org/x/text/width/trieval.go b/vendor/golang.org/x/text/width/trieval.go new file mode 100644 index 00000000000..ca8e45fd19e --- /dev/null +++ b/vendor/golang.org/x/text/width/trieval.go @@ -0,0 +1,30 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package width + +// elem is an entry of the width trie. The high byte is used to encode the type +// of the rune. The low byte is used to store the index to a mapping entry in +// the inverseData array. +type elem uint16 + +const ( + tagNeutral elem = iota << typeShift + tagAmbiguous + tagWide + tagNarrow + tagFullwidth + tagHalfwidth +) + +const ( + numTypeBits = 3 + typeShift = 16 - numTypeBits + + // tagNeedsFold is true for all fullwidth and halfwidth runes except for + // the Won sign U+20A9. + tagNeedsFold = 0x1000 + + // The Korean Won sign is halfwidth, but SHOULD NOT be mapped to a wide + // variant. + wonSign rune = 0x20A9 +) diff --git a/vendor/golang.org/x/text/width/width.go b/vendor/golang.org/x/text/width/width.go new file mode 100644 index 00000000000..29c7509be7c --- /dev/null +++ b/vendor/golang.org/x/text/width/width.go @@ -0,0 +1,206 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate stringer -type=Kind +//go:generate go run gen.go gen_common.go gen_trieval.go + +// Package width provides functionality for handling different widths in text. +// +// Wide characters behave like ideographs; they tend to allow line breaks after +// each character and remain upright in vertical text layout. Narrow characters +// are kept together in words or runs that are rotated sideways in vertical text +// layout. +// +// For more information, see https://unicode.org/reports/tr11/. +package width // import "golang.org/x/text/width" + +import ( + "unicode/utf8" + + "golang.org/x/text/transform" +) + +// TODO +// 1) Reduce table size by compressing blocks. +// 2) API proposition for computing display length +// (approximation, fixed pitch only). +// 3) Implement display length. + +// Kind indicates the type of width property as defined in https://unicode.org/reports/tr11/. +type Kind int + +const ( + // Neutral characters do not occur in legacy East Asian character sets. + Neutral Kind = iota + + // EastAsianAmbiguous characters that can be sometimes wide and sometimes + // narrow and require additional information not contained in the character + // code to further resolve their width. + EastAsianAmbiguous + + // EastAsianWide characters are wide in its usual form. They occur only in + // the context of East Asian typography. These runes may have explicit + // halfwidth counterparts. + EastAsianWide + + // EastAsianNarrow characters are narrow in its usual form. They often have + // fullwidth counterparts. + EastAsianNarrow + + // Note: there exist Narrow runes that do not have fullwidth or wide + // counterparts, despite what the definition says (e.g. U+27E6). + + // EastAsianFullwidth characters have a compatibility decompositions of type + // wide that map to a narrow counterpart. + EastAsianFullwidth + + // EastAsianHalfwidth characters have a compatibility decomposition of type + // narrow that map to a wide or ambiguous counterpart, plus U+20A9 ₩ WON + // SIGN. + EastAsianHalfwidth + + // Note: there exist runes that have a halfwidth counterparts but that are + // classified as Ambiguous, rather than wide (e.g. U+2190). +) + +// TODO: the generated tries need to return size 1 for invalid runes for the +// width to be computed correctly (each byte should render width 1) + +var trie = newWidthTrie(0) + +// Lookup reports the Properties of the first rune in b and the number of bytes +// of its UTF-8 encoding. +func Lookup(b []byte) (p Properties, size int) { + v, sz := trie.lookup(b) + return Properties{elem(v), b[sz-1]}, sz +} + +// LookupString reports the Properties of the first rune in s and the number of +// bytes of its UTF-8 encoding. +func LookupString(s string) (p Properties, size int) { + v, sz := trie.lookupString(s) + return Properties{elem(v), s[sz-1]}, sz +} + +// LookupRune reports the Properties of rune r. +func LookupRune(r rune) Properties { + var buf [4]byte + n := utf8.EncodeRune(buf[:], r) + v, _ := trie.lookup(buf[:n]) + last := byte(r) + if r >= utf8.RuneSelf { + last = 0x80 + byte(r&0x3f) + } + return Properties{elem(v), last} +} + +// Properties provides access to width properties of a rune. +type Properties struct { + elem elem + last byte +} + +func (e elem) kind() Kind { + return Kind(e >> typeShift) +} + +// Kind returns the Kind of a rune as defined in Unicode TR #11. +// See https://unicode.org/reports/tr11/ for more details. +func (p Properties) Kind() Kind { + return p.elem.kind() +} + +// Folded returns the folded variant of a rune or 0 if the rune is canonical. +func (p Properties) Folded() rune { + if p.elem&tagNeedsFold != 0 { + buf := inverseData[byte(p.elem)] + buf[buf[0]] ^= p.last + r, _ := utf8.DecodeRune(buf[1 : 1+buf[0]]) + return r + } + return 0 +} + +// Narrow returns the narrow variant of a rune or 0 if the rune is already +// narrow or doesn't have a narrow variant. +func (p Properties) Narrow() rune { + if k := p.elem.kind(); byte(p.elem) != 0 && (k == EastAsianFullwidth || k == EastAsianWide || k == EastAsianAmbiguous) { + buf := inverseData[byte(p.elem)] + buf[buf[0]] ^= p.last + r, _ := utf8.DecodeRune(buf[1 : 1+buf[0]]) + return r + } + return 0 +} + +// Wide returns the wide variant of a rune or 0 if the rune is already +// wide or doesn't have a wide variant. +func (p Properties) Wide() rune { + if k := p.elem.kind(); byte(p.elem) != 0 && (k == EastAsianHalfwidth || k == EastAsianNarrow) { + buf := inverseData[byte(p.elem)] + buf[buf[0]] ^= p.last + r, _ := utf8.DecodeRune(buf[1 : 1+buf[0]]) + return r + } + return 0 +} + +// TODO for Properties: +// - Add Fullwidth/Halfwidth or Inverted methods for computing variants +// mapping. +// - Add width information (including information on non-spacing runes). + +// Transformer implements the transform.Transformer interface. +type Transformer struct { + t transform.SpanningTransformer +} + +// Reset implements the transform.Transformer interface. +func (t Transformer) Reset() { t.t.Reset() } + +// Transform implements the transform.Transformer interface. +func (t Transformer) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + return t.t.Transform(dst, src, atEOF) +} + +// Span implements the transform.SpanningTransformer interface. +func (t Transformer) Span(src []byte, atEOF bool) (n int, err error) { + return t.t.Span(src, atEOF) +} + +// Bytes returns a new byte slice with the result of applying t to b. +func (t Transformer) Bytes(b []byte) []byte { + b, _, _ = transform.Bytes(t, b) + return b +} + +// String returns a string with the result of applying t to s. +func (t Transformer) String(s string) string { + s, _, _ = transform.String(t, s) + return s +} + +var ( + // Fold is a transform that maps all runes to their canonical width. + // + // Note that the NFKC and NFKD transforms in golang.org/x/text/unicode/norm + // provide a more generic folding mechanism. + Fold Transformer = Transformer{foldTransform{}} + + // Widen is a transform that maps runes to their wide variant, if + // available. + Widen Transformer = Transformer{wideTransform{}} + + // Narrow is a transform that maps runes to their narrow variant, if + // available. + Narrow Transformer = Transformer{narrowTransform{}} +) + +// TODO: Consider the following options: +// - Treat Ambiguous runes that have a halfwidth counterpart as wide, or some +// generalized variant of this. +// - Consider a wide Won character to be the default width (or some generalized +// variant of this). +// - Filter the set of characters that gets converted (the preferred approach is +// to allow applying filters to transforms). diff --git a/vendor/golang.org/x/time/LICENSE b/vendor/golang.org/x/time/LICENSE index 6a66aea5eaf..2a7cf70da6e 100644 --- a/vendor/golang.org/x/time/LICENSE +++ b/vendor/golang.org/x/time/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index 8f6c7f493f8..93a798ab637 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -99,8 +99,9 @@ func (lim *Limiter) Tokens() float64 { // bursts of at most b tokens. func NewLimiter(r Limit, b int) *Limiter { return &Limiter{ - limit: r, - burst: b, + limit: r, + burst: b, + tokens: float64(b), } } @@ -344,18 +345,6 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) tokens: n, timeToAct: t, } - } else if lim.limit == 0 { - var ok bool - if lim.burst >= n { - ok = true - lim.burst -= n - } - return Reservation{ - ok: ok, - lim: lim, - tokens: lim.burst, - timeToAct: t, - } } t, tokens := lim.advance(t) diff --git a/vendor/golang.org/x/tools/LICENSE b/vendor/golang.org/x/tools/LICENSE index 6a66aea5eaf..2a7cf70da6e 100644 --- a/vendor/golang.org/x/tools/LICENSE +++ b/vendor/golang.org/x/tools/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go index 137cc8df1d8..f3ab0a2e126 100644 --- a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go +++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -2,22 +2,64 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package gcexportdata provides functions for locating, reading, and -// writing export data files containing type information produced by the -// gc compiler. This package supports go1.7 export data format and all -// later versions. -// -// Although it might seem convenient for this package to live alongside -// go/types in the standard library, this would cause version skew -// problems for developer tools that use it, since they must be able to -// consume the outputs of the gc compiler both before and after a Go -// update such as from Go 1.7 to Go 1.8. Because this package lives in -// golang.org/x/tools, sites can update their version of this repo some -// time before the Go 1.8 release and rebuild and redeploy their -// developer tools, which will then be able to consume both Go 1.7 and -// Go 1.8 export data files, so they will work before and after the -// Go update. (See discussion at https://golang.org/issue/15651.) -package gcexportdata // import "golang.org/x/tools/go/gcexportdata" +// Package gcexportdata provides functions for reading and writing +// export data, which is a serialized description of the API of a Go +// package including the names, kinds, types, and locations of all +// exported declarations. +// +// The standard Go compiler (cmd/compile) writes an export data file +// for each package it compiles, which it later reads when compiling +// packages that import the earlier one. The compiler must thus +// contain logic to both write and read export data. +// (See the "Export" section in the cmd/compile/README file.) +// +// The [Read] function in this package can read files produced by the +// compiler, producing [go/types] data structures. As a matter of +// policy, Read supports export data files produced by only the last +// two Go releases plus tip; see https://go.dev/issue/68898. The +// export data files produced by the compiler contain additional +// details related to generics, inlining, and other optimizations that +// cannot be decoded by the [Read] function. +// +// In files written by the compiler, the export data is not at the +// start of the file. Before calling Read, use [NewReader] to locate +// the desired portion of the file. +// +// The [Write] function in this package encodes the exported API of a +// Go package ([types.Package]) as a file. Such files can be later +// decoded by Read, but cannot be consumed by the compiler. +// +// # Future changes +// +// Although Read supports the formats written by both Write and the +// compiler, the two are quite different, and there is an open +// proposal (https://go.dev/issue/69491) to separate these APIs. +// +// Under that proposal, this package would ultimately provide only the +// Read operation for compiler export data, which must be defined in +// this module (golang.org/x/tools), not in the standard library, to +// avoid version skew for developer tools that need to read compiler +// export data both before and after a Go release, such as from Go +// 1.23 to Go 1.24. Because this package lives in the tools module, +// clients can update their version of the module some time before the +// Go 1.24 release and rebuild and redeploy their tools, which will +// then be able to consume both Go 1.23 and Go 1.24 export data files, +// so they will work before and after the Go update. (See discussion +// at https://go.dev/issue/15651.) +// +// The operations to import and export [go/types] data structures +// would be defined in the go/types package as Import and Export. +// [Write] would (eventually) delegate to Export, +// and [Read], when it detects a file produced by Export, +// would delegate to Import. +// +// # Deprecations +// +// The [NewImporter] and [Find] functions are deprecated and should +// not be used in new code. The [WriteBundle] and [ReadBundle] +// functions are experimental, and there is an open proposal to +// deprecate them (https://go.dev/issue/69573). +package gcexportdata import ( "bufio" @@ -100,6 +142,11 @@ func readAll(r io.Reader) ([]byte, error) { // Read reads export data from in, decodes it, and returns type // information for the package. // +// Read is capable of reading export data produced by [Write] at the +// same source code version, or by the last two Go releases (plus tip) +// of the standard Go compiler. Reading files from older compilers may +// produce an error. +// // The package path (effectively its linker symbol prefix) is // specified by path, since unlike the package name, this information // may not be recorded in the export data. @@ -128,14 +175,26 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, // (from "version"). Select appropriate importer. if len(data) > 0 { switch data[0] { - case 'v', 'c', 'd': // binary, till go1.10 + case 'v', 'c', 'd': + // binary, produced by cmd/compile till go1.10 return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) - case 'i': // indexed, till go1.19 + case 'i': + // indexed, produced by cmd/compile till go1.19, + // and also by [Write]. + // + // If proposal #69491 is accepted, go/types + // serialization will be implemented by + // types.Export, to which Write would eventually + // delegate (explicitly dropping any pretence at + // inter-version Write-Read compatibility). + // This [Read] function would delegate to types.Import + // when it detects that the file was produced by Export. _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) return pkg, err - case 'u': // unified, from go1.20 + case 'u': + // unified, produced by cmd/compile since go1.20 _, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path) return pkg, err diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go index 3531ac8f5fc..f1931d10eeb 100644 --- a/vendor/golang.org/x/tools/go/packages/doc.go +++ b/vendor/golang.org/x/tools/go/packages/doc.go @@ -64,7 +64,7 @@ graph using the Imports fields. The Load function can be configured by passing a pointer to a Config as the first argument. A nil Config is equivalent to the zero Config, which -causes Load to run in LoadFiles mode, collecting minimal information. +causes Load to run in [LoadFiles] mode, collecting minimal information. See the documentation for type Config for details. As noted earlier, the Config.Mode controls the amount of detail @@ -72,14 +72,14 @@ reported about the loaded packages. See the documentation for type LoadMode for details. Most tools should pass their command-line arguments (after any flags) -uninterpreted to [Load], so that it can interpret them +uninterpreted to Load, so that it can interpret them according to the conventions of the underlying build system. See the Example function for typical usage. # The driver protocol -[Load] may be used to load Go packages even in Go projects that use +Load may be used to load Go packages even in Go projects that use alternative build systems, by installing an appropriate "driver" program for the build system and specifying its location in the GOPACKAGESDRIVER environment variable. @@ -97,6 +97,15 @@ JSON-encoded [DriverRequest] message providing additional information is written to the driver's standard input. The driver must write a JSON-encoded [DriverResponse] message to its standard output. (This message differs from the JSON schema produced by 'go list'.) + +The value of the PWD environment variable seen by the driver process +is the preferred name of its working directory. (The working directory +may have other aliases due to symbolic links; see the comment on the +Dir field of [exec.Cmd] for related information.) +When the driver process emits in its response the name of a file +that is a descendant of this directory, it must use an absolute path +that has the value of PWD as a prefix, to ensure that the returned +filenames satisfy the original query. */ package packages // import "golang.org/x/tools/go/packages" diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go index c2b4b711b59..96db9daf314 100644 --- a/vendor/golang.org/x/tools/go/packages/external.go +++ b/vendor/golang.org/x/tools/go/packages/external.go @@ -79,10 +79,10 @@ type DriverResponse struct { // driver is the type for functions that query the build system for the // packages named by the patterns. -type driver func(cfg *Config, patterns ...string) (*DriverResponse, error) +type driver func(cfg *Config, patterns []string) (*DriverResponse, error) // findExternalDriver returns the file path of a tool that supplies -// the build system package structure, or "" if not found." +// the build system package structure, or "" if not found. // If GOPACKAGESDRIVER is set in the environment findExternalTool returns its // value, otherwise it searches for a binary named gopackagesdriver on the PATH. func findExternalDriver(cfg *Config) driver { @@ -103,7 +103,7 @@ func findExternalDriver(cfg *Config) driver { return nil } } - return func(cfg *Config, words ...string) (*DriverResponse, error) { + return func(cfg *Config, patterns []string) (*DriverResponse, error) { req, err := json.Marshal(DriverRequest{ Mode: cfg.Mode, Env: cfg.Env, @@ -117,7 +117,7 @@ func findExternalDriver(cfg *Config) driver { buf := new(bytes.Buffer) stderr := new(bytes.Buffer) - cmd := exec.CommandContext(cfg.Context, tool, words...) + cmd := exec.CommandContext(cfg.Context, tool, patterns...) cmd.Dir = cfg.Dir // The cwd gets resolved to the real path. On Darwin, where // /tmp is a symlink, this breaks anything that expects the diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index 1a3a5b44f5c..76f910ecec9 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -80,6 +80,12 @@ type golistState struct { cfg *Config ctx context.Context + runner *gocommand.Runner + + // overlay is the JSON file that encodes the Config.Overlay + // mapping, used by 'go list -overlay=...'. + overlay string + envOnce sync.Once goEnvError error goEnv map[string]string @@ -127,7 +133,10 @@ func (state *golistState) mustGetEnv() map[string]string { // goListDriver uses the go list command to interpret the patterns and produce // the build system package structure. // See driver for more details. -func goListDriver(cfg *Config, patterns ...string) (_ *DriverResponse, err error) { +// +// overlay is the JSON file that encodes the cfg.Overlay +// mapping, used by 'go list -overlay=...' +func goListDriver(cfg *Config, runner *gocommand.Runner, overlay string, patterns []string) (_ *DriverResponse, err error) { // Make sure that any asynchronous go commands are killed when we return. parentCtx := cfg.Context if parentCtx == nil { @@ -142,13 +151,15 @@ func goListDriver(cfg *Config, patterns ...string) (_ *DriverResponse, err error cfg: cfg, ctx: ctx, vendorDirs: map[string]bool{}, + overlay: overlay, + runner: runner, } // Fill in response.Sizes asynchronously if necessary. - if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 { + if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 { errCh := make(chan error) go func() { - compiler, arch, err := getSizesForArgs(ctx, state.cfgInvocation(), cfg.gocmdRunner) + compiler, arch, err := getSizesForArgs(ctx, state.cfgInvocation(), runner) response.dr.Compiler = compiler response.dr.Arch = arch errCh <- err @@ -681,7 +692,7 @@ func (state *golistState) shouldAddFilenameFromError(p *jsonPackage) bool { // getGoVersion returns the effective minor version of the go command. func (state *golistState) getGoVersion() (int, error) { state.goVersionOnce.Do(func() { - state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.cfg.gocmdRunner) + state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.runner) }) return state.goVersion, state.goVersionError } @@ -751,7 +762,7 @@ func jsonFlag(cfg *Config, goVersion int) string { } } addFields("Name", "ImportPath", "Error") // These fields are always needed - if cfg.Mode&NeedFiles != 0 || cfg.Mode&NeedTypes != 0 { + if cfg.Mode&NeedFiles != 0 || cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 { addFields("Dir", "GoFiles", "IgnoredGoFiles", "IgnoredOtherFiles", "CFiles", "CgoFiles", "CXXFiles", "MFiles", "HFiles", "FFiles", "SFiles", "SwigFiles", "SwigCXXFiles", "SysoFiles") @@ -759,7 +770,7 @@ func jsonFlag(cfg *Config, goVersion int) string { addFields("TestGoFiles", "XTestGoFiles") } } - if cfg.Mode&NeedTypes != 0 { + if cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 { // CompiledGoFiles seems to be required for the test case TestCgoNoSyntax, // even when -compiled isn't passed in. // TODO(#52435): Should we make the test ask for -compiled, or automatically @@ -840,7 +851,7 @@ func (state *golistState) cfgInvocation() gocommand.Invocation { Env: cfg.Env, Logf: cfg.Logf, WorkingDir: cfg.Dir, - Overlay: cfg.goListOverlayFile, + Overlay: state.overlay, } } @@ -851,11 +862,8 @@ func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, inv := state.cfgInvocation() inv.Verb = verb inv.Args = args - gocmdRunner := cfg.gocmdRunner - if gocmdRunner == nil { - gocmdRunner = &gocommand.Runner{} - } - stdout, stderr, friendlyErr, err := gocmdRunner.RunRaw(cfg.Context, inv) + + stdout, stderr, friendlyErr, err := state.runner.RunRaw(cfg.Context, inv) if err != nil { // Check for 'go' executable not being found. if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound { @@ -879,6 +887,12 @@ func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, return nil, friendlyErr } + // Return an error if 'go list' failed due to missing tools in + // $GOROOT/pkg/tool/$GOOS_$GOARCH (#69606). + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), `go: no such tool`) { + return nil, friendlyErr + } + // Is there an error running the C compiler in cgo? This will be reported in the "Error" field // and should be suppressed by go list -e. // diff --git a/vendor/golang.org/x/tools/go/packages/loadmode_string.go b/vendor/golang.org/x/tools/go/packages/loadmode_string.go index 5c080d21b54..5fcad6ea6db 100644 --- a/vendor/golang.org/x/tools/go/packages/loadmode_string.go +++ b/vendor/golang.org/x/tools/go/packages/loadmode_string.go @@ -9,49 +9,46 @@ import ( "strings" ) -var allModes = []LoadMode{ - NeedName, - NeedFiles, - NeedCompiledGoFiles, - NeedImports, - NeedDeps, - NeedExportFile, - NeedTypes, - NeedSyntax, - NeedTypesInfo, - NeedTypesSizes, +var modes = [...]struct { + mode LoadMode + name string +}{ + {NeedName, "NeedName"}, + {NeedFiles, "NeedFiles"}, + {NeedCompiledGoFiles, "NeedCompiledGoFiles"}, + {NeedImports, "NeedImports"}, + {NeedDeps, "NeedDeps"}, + {NeedExportFile, "NeedExportFile"}, + {NeedTypes, "NeedTypes"}, + {NeedSyntax, "NeedSyntax"}, + {NeedTypesInfo, "NeedTypesInfo"}, + {NeedTypesSizes, "NeedTypesSizes"}, + {NeedModule, "NeedModule"}, + {NeedEmbedFiles, "NeedEmbedFiles"}, + {NeedEmbedPatterns, "NeedEmbedPatterns"}, } -var modeStrings = []string{ - "NeedName", - "NeedFiles", - "NeedCompiledGoFiles", - "NeedImports", - "NeedDeps", - "NeedExportFile", - "NeedTypes", - "NeedSyntax", - "NeedTypesInfo", - "NeedTypesSizes", -} - -func (mod LoadMode) String() string { - m := mod - if m == 0 { +func (mode LoadMode) String() string { + if mode == 0 { return "LoadMode(0)" } var out []string - for i, x := range allModes { - if x > m { - break + // named bits + for _, item := range modes { + if (mode & item.mode) != 0 { + mode ^= item.mode + out = append(out, item.name) } - if (m & x) != 0 { - out = append(out, modeStrings[i]) - m = m ^ x + } + // unnamed residue + if mode != 0 { + if out == nil { + return fmt.Sprintf("LoadMode(%#x)", int(mode)) } + out = append(out, fmt.Sprintf("%#x", int(mode))) } - if m != 0 { - out = append(out, "Unknown") + if len(out) == 1 { + return out[0] } - return fmt.Sprintf("LoadMode(%s)", strings.Join(out, "|")) + return "(" + strings.Join(out, "|") + ")" } diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 34306ddd390..2ecc64238e8 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -16,13 +16,13 @@ import ( "go/scanner" "go/token" "go/types" - "io" "log" "os" "path/filepath" "runtime" "strings" "sync" + "sync/atomic" "time" "golang.org/x/sync/errgroup" @@ -31,7 +31,6 @@ import ( "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/packagesinternal" "golang.org/x/tools/internal/typesinternal" - "golang.org/x/tools/internal/versions" ) // A LoadMode controls the amount of detail to return when loading. @@ -46,18 +45,17 @@ import ( // // Unfortunately there are a number of open bugs related to // interactions among the LoadMode bits: -// - https://github.com/golang/go/issues/48226 -// - https://github.com/golang/go/issues/56633 -// - https://github.com/golang/go/issues/56677 -// - https://github.com/golang/go/issues/58726 -// - https://github.com/golang/go/issues/63517 +// - https://github.com/golang/go/issues/56633 +// - https://github.com/golang/go/issues/56677 +// - https://github.com/golang/go/issues/58726 +// - https://github.com/golang/go/issues/63517 type LoadMode int const ( // NeedName adds Name and PkgPath. NeedName LoadMode = 1 << iota - // NeedFiles adds GoFiles and OtherFiles. + // NeedFiles adds GoFiles, OtherFiles, and IgnoredFiles NeedFiles // NeedCompiledGoFiles adds CompiledGoFiles. @@ -76,10 +74,10 @@ const ( // NeedTypes adds Types, Fset, and IllTyped. NeedTypes - // NeedSyntax adds Syntax. + // NeedSyntax adds Syntax and Fset. NeedSyntax - // NeedTypesInfo adds TypesInfo. + // NeedTypesInfo adds TypesInfo and Fset. NeedTypesInfo // NeedTypesSizes adds TypesSizes. @@ -104,25 +102,37 @@ const ( // NeedEmbedPatterns adds EmbedPatterns. NeedEmbedPatterns + + // Be sure to update loadmode_string.go when adding new items! ) const ( + // LoadFiles loads the name and file names for the initial packages. + // // Deprecated: LoadFiles exists for historical compatibility // and should not be used. Please directly specify the needed fields using the Need values. LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles + // LoadImports loads the name, file names, and import mapping for the initial packages. + // // Deprecated: LoadImports exists for historical compatibility // and should not be used. Please directly specify the needed fields using the Need values. LoadImports = LoadFiles | NeedImports + // LoadTypes loads exported type information for the initial packages. + // // Deprecated: LoadTypes exists for historical compatibility // and should not be used. Please directly specify the needed fields using the Need values. LoadTypes = LoadImports | NeedTypes | NeedTypesSizes + // LoadSyntax loads typed syntax for the initial packages. + // // Deprecated: LoadSyntax exists for historical compatibility // and should not be used. Please directly specify the needed fields using the Need values. LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo + // LoadAllSyntax loads typed syntax for the initial packages and all dependencies. + // // Deprecated: LoadAllSyntax exists for historical compatibility // and should not be used. Please directly specify the needed fields using the Need values. LoadAllSyntax = LoadSyntax | NeedDeps @@ -134,13 +144,7 @@ const ( // A Config specifies details about how packages should be loaded. // The zero value is a valid configuration. // -// Calls to Load do not modify this struct. -// -// TODO(adonovan): #67702: this is currently false: in fact, -// calls to [Load] do not modify the public fields of this struct, but -// may modify hidden fields, so concurrent calls to [Load] must not -// use the same Config. But perhaps we should reestablish the -// documented invariant. +// Calls to [Load] do not modify this struct. type Config struct { // Mode controls the level of information returned for each package. Mode LoadMode @@ -171,19 +175,10 @@ type Config struct { // Env []string - // gocmdRunner guards go command calls from concurrency errors. - gocmdRunner *gocommand.Runner - // BuildFlags is a list of command-line flags to be passed through to // the build system's query tool. BuildFlags []string - // modFile will be used for -modfile in go command invocations. - modFile string - - // modFlag will be used for -modfile in go command invocations. - modFlag string - // Fset provides source position information for syntax trees and types. // If Fset is nil, Load will use a new fileset, but preserve Fset's value. Fset *token.FileSet @@ -230,21 +225,24 @@ type Config struct { // drivers may vary in their level of support for overlays. Overlay map[string][]byte - // goListOverlayFile is the JSON file that encodes the Overlay - // mapping, used by 'go list -overlay=...' - goListOverlayFile string + // -- Hidden configuration fields only for use in x/tools -- + + // modFile will be used for -modfile in go command invocations. + modFile string + + // modFlag will be used for -modfile in go command invocations. + modFlag string } // Load loads and returns the Go packages named by the given patterns. // -// Config specifies loading options; -// nil behaves the same as an empty Config. +// The cfg parameter specifies loading options; nil behaves the same as an empty [Config]. // // The [Config.Mode] field is a set of bits that determine what kinds // of information should be computed and returned. Modes that require // more information tend to be slower. See [LoadMode] for details // and important caveats. Its zero value is equivalent to -// NeedName | NeedFiles | NeedCompiledGoFiles. +// [NeedName] | [NeedFiles] | [NeedCompiledGoFiles]. // // Each call to Load returns a new set of [Package] instances. // The Packages and their Imports form a directed acyclic graph. @@ -261,7 +259,7 @@ type Config struct { // Errors associated with a particular package are recorded in the // corresponding Package's Errors list, and do not cause Load to // return an error. Clients may need to handle such errors before -// proceeding with further analysis. The PrintErrors function is +// proceeding with further analysis. The [PrintErrors] function is // provided for convenient display of all errors. func Load(cfg *Config, patterns ...string) ([]*Package, error) { ld := newLoader(cfg) @@ -324,21 +322,24 @@ func defaultDriver(cfg *Config, patterns ...string) (*DriverResponse, bool, erro } else if !response.NotHandled { return response, true, nil } - // (fall through) + // not handled: fall through } // go list fallback - // + // Write overlays once, as there are many calls // to 'go list' (one per chunk plus others too). - overlay, cleanupOverlay, err := gocommand.WriteOverlays(cfg.Overlay) + overlayFile, cleanupOverlay, err := gocommand.WriteOverlays(cfg.Overlay) if err != nil { return nil, false, err } defer cleanupOverlay() - cfg.goListOverlayFile = overlay - response, err := callDriverOnChunks(goListDriver, cfg, chunks) + var runner gocommand.Runner // (shared across many 'go list' calls) + driver := func(cfg *Config, patterns []string) (*DriverResponse, error) { + return goListDriver(cfg, &runner, overlayFile, patterns) + } + response, err := callDriverOnChunks(driver, cfg, chunks) if err != nil { return nil, false, err } @@ -376,16 +377,14 @@ func splitIntoChunks(patterns []string, argMax int) ([][]string, error) { func callDriverOnChunks(driver driver, cfg *Config, chunks [][]string) (*DriverResponse, error) { if len(chunks) == 0 { - return driver(cfg) + return driver(cfg, nil) } responses := make([]*DriverResponse, len(chunks)) errNotHandled := errors.New("driver returned NotHandled") var g errgroup.Group for i, chunk := range chunks { - i := i - chunk := chunk g.Go(func() (err error) { - responses[i], err = driver(cfg, chunk...) + responses[i], err = driver(cfg, chunk) if responses[i] != nil && responses[i].NotHandled { err = errNotHandled } @@ -682,18 +681,19 @@ func (p *Package) String() string { return p.ID } // loaderPackage augments Package with state used during the loading phase type loaderPackage struct { *Package - importErrors map[string]error // maps each bad import to its error - loadOnce sync.Once - color uint8 // for cycle detection - needsrc bool // load from source (Mode >= LoadTypes) - needtypes bool // type information is either requested or depended on - initial bool // package was matched by a pattern - goVersion int // minor version number of go command on PATH + importErrors map[string]error // maps each bad import to its error + preds []*loaderPackage // packages that import this one + unfinishedSuccs atomic.Int32 // number of direct imports not yet loaded + color uint8 // for cycle detection + needsrc bool // load from source (Mode >= LoadTypes) + needtypes bool // type information is either requested or depended on + initial bool // package was matched by a pattern + goVersion int // minor version number of go command on PATH } // loader holds the working state of a single call to load. type loader struct { - pkgs map[string]*loaderPackage + pkgs map[string]*loaderPackage // keyed by Package.ID Config sizes types.Sizes // non-nil if needed by mode parseCache map[string]*parseValue @@ -739,9 +739,6 @@ func newLoader(cfg *Config) *loader { if ld.Config.Env == nil { ld.Config.Env = os.Environ() } - if ld.Config.gocmdRunner == nil { - ld.Config.gocmdRunner = &gocommand.Runner{} - } if ld.Context == nil { ld.Context = context.Background() } @@ -755,7 +752,7 @@ func newLoader(cfg *Config) *loader { ld.requestedMode = ld.Mode ld.Mode = impliedLoadMode(ld.Mode) - if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { + if ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0 { if ld.Fset == nil { ld.Fset = token.NewFileSet() } @@ -764,6 +761,7 @@ func newLoader(cfg *Config) *loader { // because we load source if export data is missing. if ld.ParseFile == nil { ld.ParseFile = func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) { + // We implicitly promise to keep doing ast.Object resolution. :( const mode = parser.AllErrors | parser.ParseComments return parser.ParseFile(fset, filename, src, mode) } @@ -795,7 +793,7 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { exportDataInvalid := len(ld.Overlay) > 0 || pkg.ExportFile == "" && pkg.PkgPath != "unsafe" // This package needs type information if the caller requested types and the package is // either a root, or it's a non-root and the user requested dependencies ... - needtypes := (ld.Mode&NeedTypes|NeedTypesInfo != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) + needtypes := (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) // This package needs source if the call requested source (or types info, which implies source) // and the package is either a root, or itas a non- root and the user requested dependencies... needsrc := ((ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) || @@ -820,9 +818,10 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { } } - if ld.Mode&NeedImports != 0 { - // Materialize the import graph. - + // Materialize the import graph if it is needed (NeedImports), + // or if we'll be using loadPackages (Need{Syntax|Types|TypesInfo}). + var leaves []*loaderPackage // packages with no unfinished successors + if ld.Mode&(NeedImports|NeedSyntax|NeedTypes|NeedTypesInfo) != 0 { const ( white = 0 // new grey = 1 // in progress @@ -841,63 +840,76 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { // dependency on a package that does. These are the only packages // for which we load source code. var stack []*loaderPackage - var visit func(lpkg *loaderPackage) bool - visit = func(lpkg *loaderPackage) bool { - switch lpkg.color { - case black: - return lpkg.needsrc - case grey: + var visit func(from, lpkg *loaderPackage) bool + visit = func(from, lpkg *loaderPackage) bool { + if lpkg.color == grey { panic("internal error: grey node") } - lpkg.color = grey - stack = append(stack, lpkg) // push - stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports - lpkg.Imports = make(map[string]*Package, len(stubs)) - for importPath, ipkg := range stubs { - var importErr error - imp := ld.pkgs[ipkg.ID] - if imp == nil { - // (includes package "C" when DisableCgo) - importErr = fmt.Errorf("missing package: %q", ipkg.ID) - } else if imp.color == grey { - importErr = fmt.Errorf("import cycle: %s", stack) + if lpkg.color == white { + lpkg.color = grey + stack = append(stack, lpkg) // push + stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports + lpkg.Imports = make(map[string]*Package, len(stubs)) + for importPath, ipkg := range stubs { + var importErr error + imp := ld.pkgs[ipkg.ID] + if imp == nil { + // (includes package "C" when DisableCgo) + importErr = fmt.Errorf("missing package: %q", ipkg.ID) + } else if imp.color == grey { + importErr = fmt.Errorf("import cycle: %s", stack) + } + if importErr != nil { + if lpkg.importErrors == nil { + lpkg.importErrors = make(map[string]error) + } + lpkg.importErrors[importPath] = importErr + continue + } + + if visit(lpkg, imp) { + lpkg.needsrc = true + } + lpkg.Imports[importPath] = imp.Package } - if importErr != nil { - if lpkg.importErrors == nil { - lpkg.importErrors = make(map[string]error) + + // -- postorder -- + + // Complete type information is required for the + // immediate dependencies of each source package. + if lpkg.needsrc && ld.Mode&NeedTypes != 0 { + for _, ipkg := range lpkg.Imports { + ld.pkgs[ipkg.ID].needtypes = true } - lpkg.importErrors[importPath] = importErr - continue } - if visit(imp) { - lpkg.needsrc = true + // NeedTypeSizes causes TypeSizes to be set even + // on packages for which types aren't needed. + if ld.Mode&NeedTypesSizes != 0 { + lpkg.TypesSizes = ld.sizes } - lpkg.Imports[importPath] = imp.Package - } - // Complete type information is required for the - // immediate dependencies of each source package. - if lpkg.needsrc && ld.Mode&NeedTypes != 0 { - for _, ipkg := range lpkg.Imports { - ld.pkgs[ipkg.ID].needtypes = true + // Add packages with no imports directly to the queue of leaves. + if len(lpkg.Imports) == 0 { + leaves = append(leaves, lpkg) } + + stack = stack[:len(stack)-1] // pop + lpkg.color = black } - // NeedTypeSizes causes TypeSizes to be set even - // on packages for which types aren't needed. - if ld.Mode&NeedTypesSizes != 0 { - lpkg.TypesSizes = ld.sizes + // Add edge from predecessor. + if from != nil { + from.unfinishedSuccs.Add(+1) // incref + lpkg.preds = append(lpkg.preds, from) } - stack = stack[:len(stack)-1] // pop - lpkg.color = black return lpkg.needsrc } // For each initial package, create its import DAG. for _, lpkg := range initial { - visit(lpkg) + visit(nil, lpkg) } } else { @@ -910,16 +922,45 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { // Load type data and syntax if needed, starting at // the initial packages (roots of the import DAG). - if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { - var wg sync.WaitGroup - for _, lpkg := range initial { - wg.Add(1) - go func(lpkg *loaderPackage) { - ld.loadRecursive(lpkg) - wg.Done() - }(lpkg) + if ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0 { + + // We avoid using g.SetLimit to limit concurrency as + // it makes g.Go stop accepting work, which prevents + // workers from enqeuing, and thus finishing, and thus + // allowing the group to make progress: deadlock. + // + // Instead we use the ioLimit and cpuLimit semaphores. + g, _ := errgroup.WithContext(ld.Context) + + // enqueues adds a package to the type-checking queue. + // It must have no unfinished successors. + var enqueue func(*loaderPackage) + enqueue = func(lpkg *loaderPackage) { + g.Go(func() error { + // Parse and type-check. + ld.loadPackage(lpkg) + + // Notify each waiting predecessor, + // and enqueue it when it becomes a leaf. + for _, pred := range lpkg.preds { + if pred.unfinishedSuccs.Add(-1) == 0 { // decref + enqueue(pred) + } + } + + return nil + }) + } + + // Load leaves first, adding new packages + // to the queue as they become leaves. + for _, leaf := range leaves { + enqueue(leaf) + } + + if err := g.Wait(); err != nil { + return nil, err // cancelled } - wg.Wait() } // If the context is done, return its error and @@ -961,12 +1002,14 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { } if ld.requestedMode&NeedTypes == 0 { ld.pkgs[i].Types = nil - ld.pkgs[i].Fset = nil ld.pkgs[i].IllTyped = false } if ld.requestedMode&NeedSyntax == 0 { ld.pkgs[i].Syntax = nil } + if ld.requestedMode&(NeedSyntax|NeedTypes|NeedTypesInfo) == 0 { + ld.pkgs[i].Fset = nil + } if ld.requestedMode&NeedTypesInfo == 0 { ld.pkgs[i].TypesInfo = nil } @@ -981,31 +1024,10 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { return result, nil } -// loadRecursive loads the specified package and its dependencies, -// recursively, in parallel, in topological order. -// It is atomic and idempotent. -// Precondition: ld.Mode&NeedTypes. -func (ld *loader) loadRecursive(lpkg *loaderPackage) { - lpkg.loadOnce.Do(func() { - // Load the direct dependencies, in parallel. - var wg sync.WaitGroup - for _, ipkg := range lpkg.Imports { - imp := ld.pkgs[ipkg.ID] - wg.Add(1) - go func(imp *loaderPackage) { - ld.loadRecursive(imp) - wg.Done() - }(imp) - } - wg.Wait() - ld.loadPackage(lpkg) - }) -} - -// loadPackage loads the specified package. +// loadPackage loads/parses/typechecks the specified package. // It must be called only once per Package, // after immediate dependencies are loaded. -// Precondition: ld.Mode & NeedTypes. +// Precondition: ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0. func (ld *loader) loadPackage(lpkg *loaderPackage) { if lpkg.PkgPath == "unsafe" { // Fill in the blanks to avoid surprises. @@ -1041,6 +1063,10 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { if !lpkg.needtypes && !lpkg.needsrc { return } + + // TODO(adonovan): this condition looks wrong: + // I think it should be lpkg.needtypes && !lpg.needsrc, + // so that NeedSyntax without NeedTypes can be satisfied by export data. if !lpkg.needsrc { if err := ld.loadFromExportData(lpkg); err != nil { lpkg.Errors = append(lpkg.Errors, Error{ @@ -1146,7 +1172,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { } lpkg.Syntax = files - if ld.Config.Mode&NeedTypes == 0 { + if ld.Config.Mode&(NeedTypes|NeedTypesInfo) == 0 { return } @@ -1157,16 +1183,20 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { return } - lpkg.TypesInfo = &types.Info{ - Types: make(map[ast.Expr]types.TypeAndValue), - Defs: make(map[*ast.Ident]types.Object), - Uses: make(map[*ast.Ident]types.Object), - Implicits: make(map[ast.Node]types.Object), - Instances: make(map[*ast.Ident]types.Instance), - Scopes: make(map[ast.Node]*types.Scope), - Selections: make(map[*ast.SelectorExpr]*types.Selection), + // Populate TypesInfo only if needed, as it + // causes the type checker to work much harder. + if ld.Config.Mode&NeedTypesInfo != 0 { + lpkg.TypesInfo = &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Instances: make(map[*ast.Ident]types.Instance), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + FileVersions: make(map[*ast.File]string), + } } - versions.InitFileVersions(lpkg.TypesInfo) lpkg.TypesSizes = ld.sizes importer := importerFunc(func(path string) (*types.Package, error) { @@ -1219,6 +1249,10 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { } } + // Type-checking is CPU intensive. + cpuLimit <- unit{} // acquire a token + defer func() { <-cpuLimit }() // release a token + typErr := types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax) lpkg.importErrors = nil // no longer needed @@ -1283,8 +1317,11 @@ type importerFunc func(path string) (*types.Package, error) func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) } // We use a counting semaphore to limit -// the number of parallel I/O calls per process. -var ioLimit = make(chan bool, 20) +// the number of parallel I/O calls or CPU threads per process. +var ( + ioLimit = make(chan unit, 20) + cpuLimit = make(chan unit, runtime.GOMAXPROCS(0)) +) func (ld *loader) parseFile(filename string) (*ast.File, error) { ld.parseCacheMu.Lock() @@ -1301,20 +1338,28 @@ func (ld *loader) parseFile(filename string) (*ast.File, error) { var src []byte for f, contents := range ld.Config.Overlay { + // TODO(adonovan): Inefficient for large overlays. + // Do an exact name-based map lookup + // (for nonexistent files) followed by a + // FileID-based map lookup (for existing ones). if sameFile(f, filename) { src = contents + break } } var err error if src == nil { - ioLimit <- true // wait + ioLimit <- unit{} // acquire a token src, err = os.ReadFile(filename) - <-ioLimit // signal + <-ioLimit // release a token } if err != nil { v.err = err } else { + // Parsing is CPU intensive. + cpuLimit <- unit{} // acquire a token v.f, v.err = ld.ParseFile(ld.Fset, filename, src) + <-cpuLimit // release a token } close(v.ready) @@ -1329,18 +1374,21 @@ func (ld *loader) parseFile(filename string) (*ast.File, error) { // Because files are scanned in parallel, the token.Pos // positions of the resulting ast.Files are not ordered. func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) { - var wg sync.WaitGroup - n := len(filenames) - parsed := make([]*ast.File, n) - errors := make([]error, n) - for i, file := range filenames { - wg.Add(1) - go func(i int, filename string) { + var ( + n = len(filenames) + parsed = make([]*ast.File, n) + errors = make([]error, n) + ) + var g errgroup.Group + for i, filename := range filenames { + // This creates goroutines unnecessarily in the + // cache-hit case, but that case is uncommon. + g.Go(func() error { parsed[i], errors[i] = ld.parseFile(filename) - wg.Done() - }(i, file) + return nil + }) } - wg.Wait() + g.Wait() // Eliminate nils, preserving order. var o int @@ -1499,6 +1547,10 @@ func impliedLoadMode(loadMode LoadMode) LoadMode { // All these things require knowing the import graph. loadMode |= NeedImports } + if loadMode&NeedTypes != 0 { + // Types require the GoVersion from Module. + loadMode |= NeedModule + } return loadMode } @@ -1507,4 +1559,4 @@ func usesExportData(cfg *Config) bool { return cfg.Mode&NeedExportFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0 } -var _ interface{} = io.Discard // assert build toolchain is go1.16 or later +type unit struct{} diff --git a/vendor/golang.org/x/tools/go/packages/visit.go b/vendor/golang.org/x/tools/go/packages/visit.go index a1dcc40b727..df14ffd94dc 100644 --- a/vendor/golang.org/x/tools/go/packages/visit.go +++ b/vendor/golang.org/x/tools/go/packages/visit.go @@ -49,11 +49,20 @@ func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) { // PrintErrors returns the number of errors printed. func PrintErrors(pkgs []*Package) int { var n int + errModules := make(map[*Module]bool) Visit(pkgs, nil, func(pkg *Package) { for _, err := range pkg.Errors { fmt.Fprintln(os.Stderr, err) n++ } + + // Print pkg.Module.Error once if present. + mod := pkg.Module + if mod != nil && mod.Error != nil && !errModules[mod] { + errModules[mod] = true + fmt.Fprintln(os.Stderr, mod.Error.Err) + n++ + } }) return n } diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go index d648c3d071b..16ed3c1780b 100644 --- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go +++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -51,7 +51,7 @@ type Path string // // PO package->object Package.Scope.Lookup // OT object->type Object.Type -// TT type->type Type.{Elem,Key,{,{,Recv}Type}Params,Results,Underlying} [EKPRUTrC] +// TT type->type Type.{Elem,Key,{,{,Recv}Type}Params,Results,Underlying,Rhs} [EKPRUTrCa] // TO type->object Type.{At,Field,Method,Obj} [AFMO] // // All valid paths start with a package and end at an object @@ -63,7 +63,7 @@ type Path string // - The only PO operator is Package.Scope.Lookup, which requires an identifier. // - The only OT operator is Object.Type, // which we encode as '.' because dot cannot appear in an identifier. -// - The TT operators are encoded as [EKPRUTrC]; +// - The TT operators are encoded as [EKPRUTrCa]; // two of these ({,Recv}TypeParams) require an integer operand, // which is encoded as a string of decimal digits. // - The TO operators are encoded as [AFMO]; @@ -106,6 +106,7 @@ const ( opTypeParam = 'T' // .TypeParams.At(i) (Named, Signature) opRecvTypeParam = 'r' // .RecvTypeParams.At(i) (Signature) opConstraint = 'C' // .Constraint() (TypeParam) + opRhs = 'a' // .Rhs() (Alias) // type->object operators opAt = 'A' // .At(i) (Tuple) @@ -227,7 +228,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { // Reject obviously non-viable cases. switch obj := obj.(type) { case *types.TypeName: - if _, ok := aliases.Unalias(obj.Type()).(*types.TypeParam); !ok { + if _, ok := types.Unalias(obj.Type()).(*types.TypeParam); !ok { // With the exception of type parameters, only package-level type names // have a path. return "", fmt.Errorf("no path for %v", obj) @@ -279,21 +280,26 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { path = append(path, opType) T := o.Type() - - if tname.IsAlias() { - // type alias - if r := find(obj, T, path, nil); r != nil { + if alias, ok := T.(*types.Alias); ok { + if r := findTypeParam(obj, aliases.TypeParams(alias), path, opTypeParam); r != nil { return Path(r), nil } - } else { - if named, _ := T.(*types.Named); named != nil { - if r := findTypeParam(obj, named.TypeParams(), path, opTypeParam, nil); r != nil { - // generic named type - return Path(r), nil - } + if r := find(obj, aliases.Rhs(alias), append(path, opRhs)); r != nil { + return Path(r), nil + } + + } else if tname.IsAlias() { + // legacy alias + if r := find(obj, T, path); r != nil { + return Path(r), nil } + + } else if named, ok := T.(*types.Named); ok { // defined (named) type - if r := find(obj, T.Underlying(), append(path, opUnderlying), nil); r != nil { + if r := findTypeParam(obj, named.TypeParams(), path, opTypeParam); r != nil { + return Path(r), nil + } + if r := find(obj, named.Underlying(), append(path, opUnderlying)); r != nil { return Path(r), nil } } @@ -306,7 +312,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { if _, ok := o.(*types.TypeName); !ok { if o.Exported() { // exported non-type (const, var, func) - if r := find(obj, o.Type(), append(path, opType), nil); r != nil { + if r := find(obj, o.Type(), append(path, opType)); r != nil { return Path(r), nil } } @@ -314,7 +320,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { } // Inspect declared methods of defined types. - if T, ok := aliases.Unalias(o.Type()).(*types.Named); ok { + if T, ok := types.Unalias(o.Type()).(*types.Named); ok { path = append(path, opType) // The method index here is always with respect // to the underlying go/types data structures, @@ -326,7 +332,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { if m == obj { return Path(path2), nil // found declared method } - if r := find(obj, m.Type(), append(path2, opType), nil); r != nil { + if r := find(obj, m.Type(), append(path2, opType)); r != nil { return Path(r), nil } } @@ -441,46 +447,64 @@ func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) { // // The seen map is used to short circuit cycles through type parameters. If // nil, it will be allocated as necessary. -func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]bool) []byte { +// +// The seenMethods map is used internally to short circuit cycles through +// interface methods, such as occur in the following example: +// +// type I interface { f() interface{I} } +// +// See golang/go#68046 for details. +func find(obj types.Object, T types.Type, path []byte) []byte { + return (&finder{obj: obj}).find(T, path) +} + +// finder closes over search state for a call to find. +type finder struct { + obj types.Object // the sought object + seenTParamNames map[*types.TypeName]bool // for cycle breaking through type parameters + seenMethods map[*types.Func]bool // for cycle breaking through recursive interfaces +} + +func (f *finder) find(T types.Type, path []byte) []byte { switch T := T.(type) { - case *aliases.Alias: - return find(obj, aliases.Unalias(T), path, seen) + case *types.Alias: + return f.find(types.Unalias(T), path) case *types.Basic, *types.Named: // Named types belonging to pkg were handled already, // so T must belong to another package. No path. return nil case *types.Pointer: - return find(obj, T.Elem(), append(path, opElem), seen) + return f.find(T.Elem(), append(path, opElem)) case *types.Slice: - return find(obj, T.Elem(), append(path, opElem), seen) + return f.find(T.Elem(), append(path, opElem)) case *types.Array: - return find(obj, T.Elem(), append(path, opElem), seen) + return f.find(T.Elem(), append(path, opElem)) case *types.Chan: - return find(obj, T.Elem(), append(path, opElem), seen) + return f.find(T.Elem(), append(path, opElem)) case *types.Map: - if r := find(obj, T.Key(), append(path, opKey), seen); r != nil { + if r := f.find(T.Key(), append(path, opKey)); r != nil { return r } - return find(obj, T.Elem(), append(path, opElem), seen) + return f.find(T.Elem(), append(path, opElem)) case *types.Signature: - if r := findTypeParam(obj, T.RecvTypeParams(), path, opRecvTypeParam, nil); r != nil { + if r := f.findTypeParam(T.RecvTypeParams(), path, opRecvTypeParam); r != nil { return r } - if r := findTypeParam(obj, T.TypeParams(), path, opTypeParam, seen); r != nil { + if r := f.findTypeParam(T.TypeParams(), path, opTypeParam); r != nil { return r } - if r := find(obj, T.Params(), append(path, opParams), seen); r != nil { + if r := f.find(T.Params(), append(path, opParams)); r != nil { return r } - return find(obj, T.Results(), append(path, opResults), seen) + return f.find(T.Results(), append(path, opResults)) case *types.Struct: for i := 0; i < T.NumFields(); i++ { fld := T.Field(i) path2 := appendOpArg(path, opField, i) - if fld == obj { + if fld == f.obj { return path2 // found field var } - if r := find(obj, fld.Type(), append(path2, opType), seen); r != nil { + if r := f.find(fld.Type(), append(path2, opType)); r != nil { return r } } @@ -489,10 +513,10 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] for i := 0; i < T.Len(); i++ { v := T.At(i) path2 := appendOpArg(path, opAt, i) - if v == obj { + if v == f.obj { return path2 // found param/result var } - if r := find(obj, v.Type(), append(path2, opType), seen); r != nil { + if r := f.find(v.Type(), append(path2, opType)); r != nil { return r } } @@ -500,28 +524,35 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] case *types.Interface: for i := 0; i < T.NumMethods(); i++ { m := T.Method(i) + if f.seenMethods[m] { + return nil + } path2 := appendOpArg(path, opMethod, i) - if m == obj { + if m == f.obj { return path2 // found interface method } - if r := find(obj, m.Type(), append(path2, opType), seen); r != nil { + if f.seenMethods == nil { + f.seenMethods = make(map[*types.Func]bool) + } + f.seenMethods[m] = true + if r := f.find(m.Type(), append(path2, opType)); r != nil { return r } } return nil case *types.TypeParam: name := T.Obj() - if name == obj { - return append(path, opObj) - } - if seen[name] { + if f.seenTParamNames[name] { return nil } - if seen == nil { - seen = make(map[*types.TypeName]bool) + if name == f.obj { + return append(path, opObj) + } + if f.seenTParamNames == nil { + f.seenTParamNames = make(map[*types.TypeName]bool) } - seen[name] = true - if r := find(obj, T.Constraint(), append(path, opConstraint), seen); r != nil { + f.seenTParamNames[name] = true + if r := f.find(T.Constraint(), append(path, opConstraint)); r != nil { return r } return nil @@ -529,11 +560,15 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] panic(T) } -func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, op byte, seen map[*types.TypeName]bool) []byte { +func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, op byte) []byte { + return (&finder{obj: obj}).findTypeParam(list, path, op) +} + +func (f *finder) findTypeParam(list *types.TypeParamList, path []byte, op byte) []byte { for i := 0; i < list.Len(); i++ { tparam := list.At(i) path2 := appendOpArg(path, op, i) - if r := find(obj, tparam, path2, seen); r != nil { + if r := f.find(tparam, path2); r != nil { return r } } @@ -620,7 +655,7 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { // Inv: t != nil, obj == nil - t = aliases.Unalias(t) + t = types.Unalias(t) switch code { case opElem: hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map @@ -657,6 +692,16 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { } t = named.Underlying() + case opRhs: + if alias, ok := t.(*types.Alias); ok { + t = aliases.Rhs(alias) + } else if false && aliases.Enabled() { + // The Enabled check is too expensive, so for now we + // simply assume that aliases are not enabled. + // TODO(adonovan): replace with "if true {" when go1.24 is assured. + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want alias)", code, t, t) + } + case opTypeParam: hasTypeParams, ok := t.(hasTypeParams) // Named, Signature if !ok { diff --git a/vendor/golang.org/x/tools/go/types/typeutil/callee.go b/vendor/golang.org/x/tools/go/types/typeutil/callee.go new file mode 100644 index 00000000000..754380351e8 --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/callee.go @@ -0,0 +1,68 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeutil + +import ( + "go/ast" + "go/types" + + "golang.org/x/tools/internal/typeparams" +) + +// Callee returns the named target of a function call, if any: +// a function, method, builtin, or variable. +// +// Functions and methods may potentially have type parameters. +func Callee(info *types.Info, call *ast.CallExpr) types.Object { + fun := ast.Unparen(call.Fun) + + // Look through type instantiation if necessary. + isInstance := false + switch fun.(type) { + case *ast.IndexExpr, *ast.IndexListExpr: + // When extracting the callee from an *IndexExpr, we need to check that + // it is a *types.Func and not a *types.Var. + // Example: Don't match a slice m within the expression `m[0]()`. + isInstance = true + fun, _, _, _ = typeparams.UnpackIndexExpr(fun) + } + + var obj types.Object + switch fun := fun.(type) { + case *ast.Ident: + obj = info.Uses[fun] // type, var, builtin, or declared func + case *ast.SelectorExpr: + if sel, ok := info.Selections[fun]; ok { + obj = sel.Obj() // method or field + } else { + obj = info.Uses[fun.Sel] // qualified identifier? + } + } + if _, ok := obj.(*types.TypeName); ok { + return nil // T(x) is a conversion, not a call + } + // A Func is required to match instantiations. + if _, ok := obj.(*types.Func); isInstance && !ok { + return nil // Was not a Func. + } + return obj +} + +// StaticCallee returns the target (function or method) of a static function +// call, if any. It returns nil for calls to builtins. +// +// Note: for calls of instantiated functions and methods, StaticCallee returns +// the corresponding generic function or method on the generic type. +func StaticCallee(info *types.Info, call *ast.CallExpr) *types.Func { + if f, ok := Callee(info, call).(*types.Func); ok && !interfaceMethod(f) { + return f + } + return nil +} + +func interfaceMethod(f *types.Func) bool { + recv := f.Type().(*types.Signature).Recv() + return recv != nil && types.IsInterface(recv.Type()) +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/imports.go b/vendor/golang.org/x/tools/go/types/typeutil/imports.go new file mode 100644 index 00000000000..b81ce0c330f --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/imports.go @@ -0,0 +1,30 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeutil + +import "go/types" + +// Dependencies returns all dependencies of the specified packages. +// +// Dependent packages appear in topological order: if package P imports +// package Q, Q appears earlier than P in the result. +// The algorithm follows import statements in the order they +// appear in the source code, so the result is a total order. +func Dependencies(pkgs ...*types.Package) []*types.Package { + var result []*types.Package + seen := make(map[*types.Package]bool) + var visit func(pkgs []*types.Package) + visit = func(pkgs []*types.Package) { + for _, p := range pkgs { + if !seen[p] { + seen[p] = true + visit(p.Imports()) + result = append(result, p) + } + } + } + visit(pkgs) + return result +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/map.go b/vendor/golang.org/x/tools/go/types/typeutil/map.go new file mode 100644 index 00000000000..8d824f7140f --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/map.go @@ -0,0 +1,517 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package typeutil defines various utilities for types, such as Map, +// a mapping from types.Type to any values. +package typeutil // import "golang.org/x/tools/go/types/typeutil" + +import ( + "bytes" + "fmt" + "go/types" + "reflect" + + "golang.org/x/tools/internal/typeparams" +) + +// Map is a hash-table-based mapping from types (types.Type) to +// arbitrary any values. The concrete types that implement +// the Type interface are pointers. Since they are not canonicalized, +// == cannot be used to check for equivalence, and thus we cannot +// simply use a Go map. +// +// Just as with map[K]V, a nil *Map is a valid empty map. +// +// Not thread-safe. +type Map struct { + hasher Hasher // shared by many Maps + table map[uint32][]entry // maps hash to bucket; entry.key==nil means unused + length int // number of map entries +} + +// entry is an entry (key/value association) in a hash bucket. +type entry struct { + key types.Type + value any +} + +// SetHasher sets the hasher used by Map. +// +// All Hashers are functionally equivalent but contain internal state +// used to cache the results of hashing previously seen types. +// +// A single Hasher created by MakeHasher() may be shared among many +// Maps. This is recommended if the instances have many keys in +// common, as it will amortize the cost of hash computation. +// +// A Hasher may grow without bound as new types are seen. Even when a +// type is deleted from the map, the Hasher never shrinks, since other +// types in the map may reference the deleted type indirectly. +// +// Hashers are not thread-safe, and read-only operations such as +// Map.Lookup require updates to the hasher, so a full Mutex lock (not a +// read-lock) is require around all Map operations if a shared +// hasher is accessed from multiple threads. +// +// If SetHasher is not called, the Map will create a private hasher at +// the first call to Insert. +func (m *Map) SetHasher(hasher Hasher) { + m.hasher = hasher +} + +// Delete removes the entry with the given key, if any. +// It returns true if the entry was found. +func (m *Map) Delete(key types.Type) bool { + if m != nil && m.table != nil { + hash := m.hasher.Hash(key) + bucket := m.table[hash] + for i, e := range bucket { + if e.key != nil && types.Identical(key, e.key) { + // We can't compact the bucket as it + // would disturb iterators. + bucket[i] = entry{} + m.length-- + return true + } + } + } + return false +} + +// At returns the map entry for the given key. +// The result is nil if the entry is not present. +func (m *Map) At(key types.Type) any { + if m != nil && m.table != nil { + for _, e := range m.table[m.hasher.Hash(key)] { + if e.key != nil && types.Identical(key, e.key) { + return e.value + } + } + } + return nil +} + +// Set sets the map entry for key to val, +// and returns the previous entry, if any. +func (m *Map) Set(key types.Type, value any) (prev any) { + if m.table != nil { + hash := m.hasher.Hash(key) + bucket := m.table[hash] + var hole *entry + for i, e := range bucket { + if e.key == nil { + hole = &bucket[i] + } else if types.Identical(key, e.key) { + prev = e.value + bucket[i].value = value + return + } + } + + if hole != nil { + *hole = entry{key, value} // overwrite deleted entry + } else { + m.table[hash] = append(bucket, entry{key, value}) + } + } else { + if m.hasher.memo == nil { + m.hasher = MakeHasher() + } + hash := m.hasher.Hash(key) + m.table = map[uint32][]entry{hash: {entry{key, value}}} + } + + m.length++ + return +} + +// Len returns the number of map entries. +func (m *Map) Len() int { + if m != nil { + return m.length + } + return 0 +} + +// Iterate calls function f on each entry in the map in unspecified order. +// +// If f should mutate the map, Iterate provides the same guarantees as +// Go maps: if f deletes a map entry that Iterate has not yet reached, +// f will not be invoked for it, but if f inserts a map entry that +// Iterate has not yet reached, whether or not f will be invoked for +// it is unspecified. +func (m *Map) Iterate(f func(key types.Type, value any)) { + if m != nil { + for _, bucket := range m.table { + for _, e := range bucket { + if e.key != nil { + f(e.key, e.value) + } + } + } + } +} + +// Keys returns a new slice containing the set of map keys. +// The order is unspecified. +func (m *Map) Keys() []types.Type { + keys := make([]types.Type, 0, m.Len()) + m.Iterate(func(key types.Type, _ any) { + keys = append(keys, key) + }) + return keys +} + +func (m *Map) toString(values bool) string { + if m == nil { + return "{}" + } + var buf bytes.Buffer + fmt.Fprint(&buf, "{") + sep := "" + m.Iterate(func(key types.Type, value any) { + fmt.Fprint(&buf, sep) + sep = ", " + fmt.Fprint(&buf, key) + if values { + fmt.Fprintf(&buf, ": %q", value) + } + }) + fmt.Fprint(&buf, "}") + return buf.String() +} + +// String returns a string representation of the map's entries. +// Values are printed using fmt.Sprintf("%v", v). +// Order is unspecified. +func (m *Map) String() string { + return m.toString(true) +} + +// KeysString returns a string representation of the map's key set. +// Order is unspecified. +func (m *Map) KeysString() string { + return m.toString(false) +} + +//////////////////////////////////////////////////////////////////////// +// Hasher + +// A Hasher maps each type to its hash value. +// For efficiency, a hasher uses memoization; thus its memory +// footprint grows monotonically over time. +// Hashers are not thread-safe. +// Hashers have reference semantics. +// Call MakeHasher to create a Hasher. +type Hasher struct { + memo map[types.Type]uint32 + + // ptrMap records pointer identity. + ptrMap map[any]uint32 + + // sigTParams holds type parameters from the signature being hashed. + // Signatures are considered identical modulo renaming of type parameters, so + // within the scope of a signature type the identity of the signature's type + // parameters is just their index. + // + // Since the language does not currently support referring to uninstantiated + // generic types or functions, and instantiated signatures do not have type + // parameter lists, we should never encounter a second non-empty type + // parameter list when hashing a generic signature. + sigTParams *types.TypeParamList +} + +// MakeHasher returns a new Hasher instance. +func MakeHasher() Hasher { + return Hasher{ + memo: make(map[types.Type]uint32), + ptrMap: make(map[any]uint32), + sigTParams: nil, + } +} + +// Hash computes a hash value for the given type t such that +// Identical(t, t') => Hash(t) == Hash(t'). +func (h Hasher) Hash(t types.Type) uint32 { + hash, ok := h.memo[t] + if !ok { + hash = h.hashFor(t) + h.memo[t] = hash + } + return hash +} + +// hashString computes the Fowler–Noll–Vo hash of s. +func hashString(s string) uint32 { + var h uint32 + for i := 0; i < len(s); i++ { + h ^= uint32(s[i]) + h *= 16777619 + } + return h +} + +// hashFor computes the hash of t. +func (h Hasher) hashFor(t types.Type) uint32 { + // See Identical for rationale. + switch t := t.(type) { + case *types.Basic: + return uint32(t.Kind()) + + case *types.Alias: + return h.Hash(types.Unalias(t)) + + case *types.Array: + return 9043 + 2*uint32(t.Len()) + 3*h.Hash(t.Elem()) + + case *types.Slice: + return 9049 + 2*h.Hash(t.Elem()) + + case *types.Struct: + var hash uint32 = 9059 + for i, n := 0, t.NumFields(); i < n; i++ { + f := t.Field(i) + if f.Anonymous() { + hash += 8861 + } + hash += hashString(t.Tag(i)) + hash += hashString(f.Name()) // (ignore f.Pkg) + hash += h.Hash(f.Type()) + } + return hash + + case *types.Pointer: + return 9067 + 2*h.Hash(t.Elem()) + + case *types.Signature: + var hash uint32 = 9091 + if t.Variadic() { + hash *= 8863 + } + + // Use a separate hasher for types inside of the signature, where type + // parameter identity is modified to be (index, constraint). We must use a + // new memo for this hasher as type identity may be affected by this + // masking. For example, in func[T any](*T), the identity of *T depends on + // whether we are mapping the argument in isolation, or recursively as part + // of hashing the signature. + // + // We should never encounter a generic signature while hashing another + // generic signature, but defensively set sigTParams only if h.mask is + // unset. + tparams := t.TypeParams() + if h.sigTParams == nil && tparams.Len() != 0 { + h = Hasher{ + // There may be something more efficient than discarding the existing + // memo, but it would require detecting whether types are 'tainted' by + // references to type parameters. + memo: make(map[types.Type]uint32), + // Re-using ptrMap ensures that pointer identity is preserved in this + // hasher. + ptrMap: h.ptrMap, + sigTParams: tparams, + } + } + + for i := 0; i < tparams.Len(); i++ { + tparam := tparams.At(i) + hash += 7 * h.Hash(tparam.Constraint()) + } + + return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results()) + + case *types.Union: + return h.hashUnion(t) + + case *types.Interface: + // Interfaces are identical if they have the same set of methods, with + // identical names and types, and they have the same set of type + // restrictions. See go/types.identical for more details. + var hash uint32 = 9103 + + // Hash methods. + for i, n := 0, t.NumMethods(); i < n; i++ { + // Method order is not significant. + // Ignore m.Pkg(). + m := t.Method(i) + // Use shallow hash on method signature to + // avoid anonymous interface cycles. + hash += 3*hashString(m.Name()) + 5*h.shallowHash(m.Type()) + } + + // Hash type restrictions. + terms, err := typeparams.InterfaceTermSet(t) + // if err != nil t has invalid type restrictions. + if err == nil { + hash += h.hashTermSet(terms) + } + + return hash + + case *types.Map: + return 9109 + 2*h.Hash(t.Key()) + 3*h.Hash(t.Elem()) + + case *types.Chan: + return 9127 + 2*uint32(t.Dir()) + 3*h.Hash(t.Elem()) + + case *types.Named: + hash := h.hashPtr(t.Obj()) + targs := t.TypeArgs() + for i := 0; i < targs.Len(); i++ { + targ := targs.At(i) + hash += 2 * h.Hash(targ) + } + return hash + + case *types.TypeParam: + return h.hashTypeParam(t) + + case *types.Tuple: + return h.hashTuple(t) + } + + panic(fmt.Sprintf("%T: %v", t, t)) +} + +func (h Hasher) hashTuple(tuple *types.Tuple) uint32 { + // See go/types.identicalTypes for rationale. + n := tuple.Len() + hash := 9137 + 2*uint32(n) + for i := 0; i < n; i++ { + hash += 3 * h.Hash(tuple.At(i).Type()) + } + return hash +} + +func (h Hasher) hashUnion(t *types.Union) uint32 { + // Hash type restrictions. + terms, err := typeparams.UnionTermSet(t) + // if err != nil t has invalid type restrictions. Fall back on a non-zero + // hash. + if err != nil { + return 9151 + } + return h.hashTermSet(terms) +} + +func (h Hasher) hashTermSet(terms []*types.Term) uint32 { + hash := 9157 + 2*uint32(len(terms)) + for _, term := range terms { + // term order is not significant. + termHash := h.Hash(term.Type()) + if term.Tilde() { + termHash *= 9161 + } + hash += 3 * termHash + } + return hash +} + +// hashTypeParam returns a hash of the type parameter t, with a hash value +// depending on whether t is contained in h.sigTParams. +// +// If h.sigTParams is set and contains t, then we are in the process of hashing +// a signature, and the hash value of t must depend only on t's index and +// constraint: signatures are considered identical modulo type parameter +// renaming. To avoid infinite recursion, we only hash the type parameter +// index, and rely on types.Identical to handle signatures where constraints +// are not identical. +// +// Otherwise the hash of t depends only on t's pointer identity. +func (h Hasher) hashTypeParam(t *types.TypeParam) uint32 { + if h.sigTParams != nil { + i := t.Index() + if i >= 0 && i < h.sigTParams.Len() && t == h.sigTParams.At(i) { + return 9173 + 3*uint32(i) + } + } + return h.hashPtr(t.Obj()) +} + +// hashPtr hashes the pointer identity of ptr. It uses h.ptrMap to ensure that +// pointers values are not dependent on the GC. +func (h Hasher) hashPtr(ptr any) uint32 { + if hash, ok := h.ptrMap[ptr]; ok { + return hash + } + hash := uint32(reflect.ValueOf(ptr).Pointer()) + h.ptrMap[ptr] = hash + return hash +} + +// shallowHash computes a hash of t without looking at any of its +// element Types, to avoid potential anonymous cycles in the types of +// interface methods. +// +// When an unnamed non-empty interface type appears anywhere among the +// arguments or results of an interface method, there is a potential +// for endless recursion. Consider: +// +// type X interface { m() []*interface { X } } +// +// The problem is that the Methods of the interface in m's result type +// include m itself; there is no mention of the named type X that +// might help us break the cycle. +// (See comment in go/types.identical, case *Interface, for more.) +func (h Hasher) shallowHash(t types.Type) uint32 { + // t is the type of an interface method (Signature), + // its params or results (Tuples), or their immediate + // elements (mostly Slice, Pointer, Basic, Named), + // so there's no need to optimize anything else. + switch t := t.(type) { + case *types.Alias: + return h.shallowHash(types.Unalias(t)) + + case *types.Signature: + var hash uint32 = 604171 + if t.Variadic() { + hash *= 971767 + } + // The Signature/Tuple recursion is always finite + // and invariably shallow. + return hash + 1062599*h.shallowHash(t.Params()) + 1282529*h.shallowHash(t.Results()) + + case *types.Tuple: + n := t.Len() + hash := 9137 + 2*uint32(n) + for i := 0; i < n; i++ { + hash += 53471161 * h.shallowHash(t.At(i).Type()) + } + return hash + + case *types.Basic: + return 45212177 * uint32(t.Kind()) + + case *types.Array: + return 1524181 + 2*uint32(t.Len()) + + case *types.Slice: + return 2690201 + + case *types.Struct: + return 3326489 + + case *types.Pointer: + return 4393139 + + case *types.Union: + return 562448657 + + case *types.Interface: + return 2124679 // no recursion here + + case *types.Map: + return 9109 + + case *types.Chan: + return 9127 + + case *types.Named: + return h.hashPtr(t.Obj()) + + case *types.TypeParam: + return h.hashPtr(t.Obj()) + } + panic(fmt.Sprintf("shallowHash: %T: %v", t, t)) +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go b/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go new file mode 100644 index 00000000000..f7666028fe5 --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go @@ -0,0 +1,71 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements a cache of method sets. + +package typeutil + +import ( + "go/types" + "sync" +) + +// A MethodSetCache records the method set of each type T for which +// MethodSet(T) is called so that repeat queries are fast. +// The zero value is a ready-to-use cache instance. +type MethodSetCache struct { + mu sync.Mutex + named map[*types.Named]struct{ value, pointer *types.MethodSet } // method sets for named N and *N + others map[types.Type]*types.MethodSet // all other types +} + +// MethodSet returns the method set of type T. It is thread-safe. +// +// If cache is nil, this function is equivalent to types.NewMethodSet(T). +// Utility functions can thus expose an optional *MethodSetCache +// parameter to clients that care about performance. +func (cache *MethodSetCache) MethodSet(T types.Type) *types.MethodSet { + if cache == nil { + return types.NewMethodSet(T) + } + cache.mu.Lock() + defer cache.mu.Unlock() + + switch T := types.Unalias(T).(type) { + case *types.Named: + return cache.lookupNamed(T).value + + case *types.Pointer: + if N, ok := types.Unalias(T.Elem()).(*types.Named); ok { + return cache.lookupNamed(N).pointer + } + } + + // all other types + // (The map uses pointer equivalence, not type identity.) + mset := cache.others[T] + if mset == nil { + mset = types.NewMethodSet(T) + if cache.others == nil { + cache.others = make(map[types.Type]*types.MethodSet) + } + cache.others[T] = mset + } + return mset +} + +func (cache *MethodSetCache) lookupNamed(named *types.Named) struct{ value, pointer *types.MethodSet } { + if cache.named == nil { + cache.named = make(map[*types.Named]struct{ value, pointer *types.MethodSet }) + } + // Avoid recomputing mset(*T) for each distinct Pointer + // instance whose underlying type is a named type. + msets, ok := cache.named[named] + if !ok { + msets.value = types.NewMethodSet(named) + msets.pointer = types.NewMethodSet(types.NewPointer(named)) + cache.named[named] = msets + } + return msets +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/ui.go b/vendor/golang.org/x/tools/go/types/typeutil/ui.go new file mode 100644 index 00000000000..9dda6a25df7 --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/ui.go @@ -0,0 +1,53 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeutil + +// This file defines utilities for user interfaces that display types. + +import ( + "go/types" +) + +// IntuitiveMethodSet returns the intuitive method set of a type T, +// which is the set of methods you can call on an addressable value of +// that type. +// +// The result always contains MethodSet(T), and is exactly MethodSet(T) +// for interface types and for pointer-to-concrete types. +// For all other concrete types T, the result additionally +// contains each method belonging to *T if there is no identically +// named method on T itself. +// +// This corresponds to user intuition about method sets; +// this function is intended only for user interfaces. +// +// The order of the result is as for types.MethodSet(T). +func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection { + isPointerToConcrete := func(T types.Type) bool { + ptr, ok := types.Unalias(T).(*types.Pointer) + return ok && !types.IsInterface(ptr.Elem()) + } + + var result []*types.Selection + mset := msets.MethodSet(T) + if types.IsInterface(T) || isPointerToConcrete(T) { + for i, n := 0, mset.Len(); i < n; i++ { + result = append(result, mset.At(i)) + } + } else { + // T is some other concrete type. + // Report methods of T and *T, preferring those of T. + pmset := msets.MethodSet(types.NewPointer(T)) + for i, n := 0, pmset.Len(); i < n; i++ { + meth := pmset.At(i) + if m := mset.Lookup(meth.Obj().Pkg(), meth.Obj().Name()); m != nil { + meth = m + } + result = append(result, meth) + } + + } + return result +} diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases.go b/vendor/golang.org/x/tools/internal/aliases/aliases.go index c24c2eee457..b9425f5a209 100644 --- a/vendor/golang.org/x/tools/internal/aliases/aliases.go +++ b/vendor/golang.org/x/tools/internal/aliases/aliases.go @@ -22,11 +22,17 @@ import ( // GODEBUG=gotypesalias=... by invoking the type checker. The Enabled // function is expensive and should be called once per task (e.g. // package import), not once per call to NewAlias. -func NewAlias(enabled bool, pos token.Pos, pkg *types.Package, name string, rhs types.Type) *types.TypeName { +// +// Precondition: enabled || len(tparams)==0. +// If materialized aliases are disabled, there must not be any type parameters. +func NewAlias(enabled bool, pos token.Pos, pkg *types.Package, name string, rhs types.Type, tparams []*types.TypeParam) *types.TypeName { if enabled { tname := types.NewTypeName(pos, pkg, name, nil) - newAlias(tname, rhs) + SetTypeParams(types.NewAlias(tname, rhs), tparams) return tname } + if len(tparams) > 0 { + panic("cannot create an alias with type parameters when gotypesalias is not enabled") + } return types.NewTypeName(pos, pkg, name, rhs) } diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go b/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go deleted file mode 100644 index c027b9f315f..00000000000 --- a/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.22 -// +build !go1.22 - -package aliases - -import ( - "go/types" -) - -// Alias is a placeholder for a go/types.Alias for <=1.21. -// It will never be created by go/types. -type Alias struct{} - -func (*Alias) String() string { panic("unreachable") } -func (*Alias) Underlying() types.Type { panic("unreachable") } -func (*Alias) Obj() *types.TypeName { panic("unreachable") } -func Rhs(alias *Alias) types.Type { panic("unreachable") } - -// Unalias returns the type t for go <=1.21. -func Unalias(t types.Type) types.Type { return t } - -func newAlias(name *types.TypeName, rhs types.Type) *Alias { panic("unreachable") } - -// Enabled reports whether [NewAlias] should create [types.Alias] types. -// -// Before go1.22, this function always returns false. -func Enabled() bool { return false } diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go b/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go index b3299548419..7716a3331db 100644 --- a/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go +++ b/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.22 -// +build go1.22 - package aliases import ( @@ -14,31 +11,51 @@ import ( "go/types" ) -// Alias is an alias of types.Alias. -type Alias = types.Alias - // Rhs returns the type on the right-hand side of the alias declaration. -func Rhs(alias *Alias) types.Type { +func Rhs(alias *types.Alias) types.Type { if alias, ok := any(alias).(interface{ Rhs() types.Type }); ok { return alias.Rhs() // go1.23+ } // go1.22's Alias didn't have the Rhs method, // so Unalias is the best we can do. - return Unalias(alias) + return types.Unalias(alias) +} + +// TypeParams returns the type parameter list of the alias. +func TypeParams(alias *types.Alias) *types.TypeParamList { + if alias, ok := any(alias).(interface{ TypeParams() *types.TypeParamList }); ok { + return alias.TypeParams() // go1.23+ + } + return nil +} + +// SetTypeParams sets the type parameters of the alias type. +func SetTypeParams(alias *types.Alias, tparams []*types.TypeParam) { + if alias, ok := any(alias).(interface { + SetTypeParams(tparams []*types.TypeParam) + }); ok { + alias.SetTypeParams(tparams) // go1.23+ + } else if len(tparams) > 0 { + panic("cannot set type parameters of an Alias type in go1.22") + } +} + +// TypeArgs returns the type arguments used to instantiate the Alias type. +func TypeArgs(alias *types.Alias) *types.TypeList { + if alias, ok := any(alias).(interface{ TypeArgs() *types.TypeList }); ok { + return alias.TypeArgs() // go1.23+ + } + return nil // empty (go1.22) } -// Unalias is a wrapper of types.Unalias. -func Unalias(t types.Type) types.Type { return types.Unalias(t) } - -// newAlias is an internal alias around types.NewAlias. -// Direct usage is discouraged as the moment. -// Try to use NewAlias instead. -func newAlias(tname *types.TypeName, rhs types.Type) *Alias { - a := types.NewAlias(tname, rhs) - // TODO(go.dev/issue/65455): Remove kludgy workaround to set a.actual as a side-effect. - Unalias(a) - return a +// Origin returns the generic Alias type of which alias is an instance. +// If alias is not an instance of a generic alias, Origin returns alias. +func Origin(alias *types.Alias) *types.Alias { + if alias, ok := any(alias).(interface{ Origin() *types.Alias }); ok { + return alias.Origin() // go1.23+ + } + return alias // not an instance of a generic alias (go1.22) } // Enabled reports whether [NewAlias] should create [types.Alias] types. @@ -56,7 +73,7 @@ func Enabled() bool { // many tests. Therefore any attempt to cache the result // is just incorrect. fset := token.NewFileSet() - f, _ := parser.ParseFile(fset, "a.go", "package p; type A = int", 0) + f, _ := parser.ParseFile(fset, "a.go", "package p; type A = int", parser.SkipObjectResolution) pkg, _ := new(types.Config).Check("p", fset, []*ast.File{f}, nil) _, enabled := pkg.Scope().Lookup("A").Type().(*types.Alias) return enabled diff --git a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go index d98b0db2a9a..d79a605ed13 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go @@ -87,64 +87,3 @@ func chanDir(d int) types.ChanDir { return 0 } } - -var predeclOnce sync.Once -var predecl []types.Type // initialized lazily - -func predeclared() []types.Type { - predeclOnce.Do(func() { - // initialize lazily to be sure that all - // elements have been initialized before - predecl = []types.Type{ // basic types - types.Typ[types.Bool], - types.Typ[types.Int], - types.Typ[types.Int8], - types.Typ[types.Int16], - types.Typ[types.Int32], - types.Typ[types.Int64], - types.Typ[types.Uint], - types.Typ[types.Uint8], - types.Typ[types.Uint16], - types.Typ[types.Uint32], - types.Typ[types.Uint64], - types.Typ[types.Uintptr], - types.Typ[types.Float32], - types.Typ[types.Float64], - types.Typ[types.Complex64], - types.Typ[types.Complex128], - types.Typ[types.String], - - // basic type aliases - types.Universe.Lookup("byte").Type(), - types.Universe.Lookup("rune").Type(), - - // error - types.Universe.Lookup("error").Type(), - - // untyped types - types.Typ[types.UntypedBool], - types.Typ[types.UntypedInt], - types.Typ[types.UntypedRune], - types.Typ[types.UntypedFloat], - types.Typ[types.UntypedComplex], - types.Typ[types.UntypedString], - types.Typ[types.UntypedNil], - - // package unsafe - types.Typ[types.UnsafePointer], - - // invalid type - types.Typ[types.Invalid], // only appears in packages with errors - - // used internally by gc; never used by this package or in .a files - anyType{}, - } - predecl = append(predecl, additionalPredeclared()...) - }) - return predecl -} - -type anyType struct{} - -func (t anyType) Underlying() types.Type { return t } -func (t anyType) String() string { return "any" } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go index 39df91124a4..e6c5d51f8e5 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go @@ -232,14 +232,19 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func // Select appropriate importer. if len(data) > 0 { switch data[0] { - case 'v', 'c', 'd': // binary, till go1.10 + case 'v', 'c', 'd': + // binary: emitted by cmd/compile till go1.10; obsolete. return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) - case 'i': // indexed, till go1.19 + case 'i': + // indexed: emitted by cmd/compile till go1.19; + // now used only for serializing go/types. + // See https://github.com/golang/go/issues/69491. _, pkg, err := IImportData(fset, packages, data[1:], id) return pkg, err - case 'u': // unified, from go1.20 + case 'u': + // unified: emitted by cmd/compile since go1.20. _, pkg, err := UImportData(fset, packages, data[1:size], id) return pkg, err diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go index deeb67f315a..7dfc31a37d7 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -2,9 +2,227 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Indexed binary package export. -// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go; -// see that file for specification of the format. +// Indexed package export. +// +// The indexed export data format is an evolution of the previous +// binary export data format. Its chief contribution is introducing an +// index table, which allows efficient random access of individual +// declarations and inline function bodies. In turn, this allows +// avoiding unnecessary work for compilation units that import large +// packages. +// +// +// The top-level data format is structured as: +// +// Header struct { +// Tag byte // 'i' +// Version uvarint +// StringSize uvarint +// DataSize uvarint +// } +// +// Strings [StringSize]byte +// Data [DataSize]byte +// +// MainIndex []struct{ +// PkgPath stringOff +// PkgName stringOff +// PkgHeight uvarint +// +// Decls []struct{ +// Name stringOff +// Offset declOff +// } +// } +// +// Fingerprint [8]byte +// +// uvarint means a uint64 written out using uvarint encoding. +// +// []T means a uvarint followed by that many T objects. In other +// words: +// +// Len uvarint +// Elems [Len]T +// +// stringOff means a uvarint that indicates an offset within the +// Strings section. At that offset is another uvarint, followed by +// that many bytes, which form the string value. +// +// declOff means a uvarint that indicates an offset within the Data +// section where the associated declaration can be found. +// +// +// There are five kinds of declarations, distinguished by their first +// byte: +// +// type Var struct { +// Tag byte // 'V' +// Pos Pos +// Type typeOff +// } +// +// type Func struct { +// Tag byte // 'F' or 'G' +// Pos Pos +// TypeParams []typeOff // only present if Tag == 'G' +// Signature Signature +// } +// +// type Const struct { +// Tag byte // 'C' +// Pos Pos +// Value Value +// } +// +// type Type struct { +// Tag byte // 'T' or 'U' +// Pos Pos +// TypeParams []typeOff // only present if Tag == 'U' +// Underlying typeOff +// +// Methods []struct{ // omitted if Underlying is an interface type +// Pos Pos +// Name stringOff +// Recv Param +// Signature Signature +// } +// } +// +// type Alias struct { +// Tag byte // 'A' or 'B' +// Pos Pos +// TypeParams []typeOff // only present if Tag == 'B' +// Type typeOff +// } +// +// // "Automatic" declaration of each typeparam +// type TypeParam struct { +// Tag byte // 'P' +// Pos Pos +// Implicit bool +// Constraint typeOff +// } +// +// typeOff means a uvarint that either indicates a predeclared type, +// or an offset into the Data section. If the uvarint is less than +// predeclReserved, then it indicates the index into the predeclared +// types list (see predeclared in bexport.go for order). Otherwise, +// subtracting predeclReserved yields the offset of a type descriptor. +// +// Value means a type, kind, and type-specific value. See +// (*exportWriter).value for details. +// +// +// There are twelve kinds of type descriptors, distinguished by an itag: +// +// type DefinedType struct { +// Tag itag // definedType +// Name stringOff +// PkgPath stringOff +// } +// +// type PointerType struct { +// Tag itag // pointerType +// Elem typeOff +// } +// +// type SliceType struct { +// Tag itag // sliceType +// Elem typeOff +// } +// +// type ArrayType struct { +// Tag itag // arrayType +// Len uint64 +// Elem typeOff +// } +// +// type ChanType struct { +// Tag itag // chanType +// Dir uint64 // 1 RecvOnly; 2 SendOnly; 3 SendRecv +// Elem typeOff +// } +// +// type MapType struct { +// Tag itag // mapType +// Key typeOff +// Elem typeOff +// } +// +// type FuncType struct { +// Tag itag // signatureType +// PkgPath stringOff +// Signature Signature +// } +// +// type StructType struct { +// Tag itag // structType +// PkgPath stringOff +// Fields []struct { +// Pos Pos +// Name stringOff +// Type typeOff +// Embedded bool +// Note stringOff +// } +// } +// +// type InterfaceType struct { +// Tag itag // interfaceType +// PkgPath stringOff +// Embeddeds []struct { +// Pos Pos +// Type typeOff +// } +// Methods []struct { +// Pos Pos +// Name stringOff +// Signature Signature +// } +// } +// +// // Reference to a type param declaration +// type TypeParamType struct { +// Tag itag // typeParamType +// Name stringOff +// PkgPath stringOff +// } +// +// // Instantiation of a generic type (like List[T2] or List[int]) +// type InstanceType struct { +// Tag itag // instanceType +// Pos pos +// TypeArgs []typeOff +// BaseType typeOff +// } +// +// type UnionType struct { +// Tag itag // interfaceType +// Terms []struct { +// tilde bool +// Type typeOff +// } +// } +// +// +// +// type Signature struct { +// Params []Param +// Results []Param +// Variadic bool // omitted if Results is empty +// } +// +// type Param struct { +// Pos Pos +// Name stringOff +// Type typOff +// } +// +// +// Pos encodes a file:line:column triple, incorporating a simple delta +// encoding scheme within a data object. See exportWriter.pos for +// details. package gcimporter @@ -24,11 +242,30 @@ import ( "golang.org/x/tools/go/types/objectpath" "golang.org/x/tools/internal/aliases" - "golang.org/x/tools/internal/tokeninternal" ) // IExportShallow encodes "shallow" export data for the specified package. // +// For types, we use "shallow" export data. Historically, the Go +// compiler always produced a summary of the types for a given package +// that included types from other packages that it indirectly +// referenced: "deep" export data. This had the advantage that the +// compiler (and analogous tools such as gopls) need only load one +// file per direct import. However, it meant that the files tended to +// get larger based on the level of the package in the import +// graph. For example, higher-level packages in the kubernetes module +// have over 1MB of "deep" export data, even when they have almost no +// content of their own, merely because they mention a major type that +// references many others. In pathological cases the export data was +// 300x larger than the source for a package due to this quadratic +// growth. +// +// "Shallow" export data means that the serialized types describe only +// a single package. If those types mention types from other packages, +// the type checker may need to request additional packages beyond +// just the direct imports. Type information for the entire transitive +// closure of imports is provided (lazily) by the DAG. +// // No promises are made about the encoding other than that it can be decoded by // the same version of IIExportShallow. If you plan to save export data in the // file system, be sure to include a cryptographic digest of the executable in @@ -51,8 +288,8 @@ func IExportShallow(fset *token.FileSet, pkg *types.Package, reportf ReportFunc) } // IImportShallow decodes "shallow" types.Package data encoded by -// IExportShallow in the same executable. This function cannot import data from -// cmd/compile or gcexportdata.Write. +// [IExportShallow] in the same executable. This function cannot import data +// from cmd/compile or gcexportdata.Write. // // The importer calls getPackages to obtain package symbols for all // packages mentioned in the export data, including the one being @@ -223,7 +460,7 @@ func (p *iexporter) encodeFile(w *intWriter, file *token.File, needed []uint64) // Sort the set of needed offsets. Duplicates are harmless. sort.Slice(needed, func(i, j int) bool { return needed[i] < needed[j] }) - lines := tokeninternal.GetLines(file) // byte offset of each line start + lines := file.Lines() // byte offset of each line start w.uint64(uint64(len(lines))) // Rather than record the entire array of line start offsets, @@ -507,13 +744,13 @@ func (p *iexporter) doDecl(obj types.Object) { case *types.TypeName: t := obj.Type() - if tparam, ok := aliases.Unalias(t).(*types.TypeParam); ok { + if tparam, ok := types.Unalias(t).(*types.TypeParam); ok { w.tag(typeParamTag) w.pos(obj.Pos()) constraint := tparam.Constraint() if p.version >= iexportVersionGo1_18 { implicit := false - if iface, _ := aliases.Unalias(constraint).(*types.Interface); iface != nil { + if iface, _ := types.Unalias(constraint).(*types.Interface); iface != nil { implicit = iface.IsImplicit() } w.bool(implicit) @@ -523,9 +760,22 @@ func (p *iexporter) doDecl(obj types.Object) { } if obj.IsAlias() { - w.tag(aliasTag) + alias, materialized := t.(*types.Alias) // may fail when aliases are not enabled + + var tparams *types.TypeParamList + if materialized { + tparams = aliases.TypeParams(alias) + } + if tparams.Len() == 0 { + w.tag(aliasTag) + } else { + w.tag(genericAliasTag) + } w.pos(obj.Pos()) - if alias, ok := t.(*aliases.Alias); ok { + if tparams.Len() > 0 { + w.tparamList(obj.Name(), tparams, obj.Pkg()) + } + if materialized { // Preserve materialized aliases, // even of non-exported types. t = aliases.Rhs(alias) @@ -744,8 +994,14 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { }() } switch t := t.(type) { - case *aliases.Alias: - // TODO(adonovan): support parameterized aliases, following *types.Named. + case *types.Alias: + if targs := aliases.TypeArgs(t); targs.Len() > 0 { + w.startType(instanceType) + w.pos(t.Obj().Pos()) + w.typeList(targs, pkg) + w.typ(aliases.Origin(t), pkg) + return + } w.startType(aliasType) w.qualifiedType(t.Obj()) @@ -854,7 +1110,7 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { for i := 0; i < n; i++ { ft := t.EmbeddedType(i) tPkg := pkg - if named, _ := aliases.Unalias(ft).(*types.Named); named != nil { + if named, _ := types.Unalias(ft).(*types.Named); named != nil { w.pos(named.Obj().Pos()) } else { w.pos(token.NoPos) diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index 136aa03653c..e260c0e8dbf 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // Indexed package import. -// See cmd/compile/internal/gc/iexport.go for the export data format. +// See iexport.go for the export data format. // This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go. @@ -53,6 +53,7 @@ const ( iexportVersionPosCol = 1 iexportVersionGo1_18 = 2 iexportVersionGenerics = 2 + iexportVersion = iexportVersionGenerics iexportVersionCurrent = 2 ) @@ -540,7 +541,7 @@ func canReuse(def *types.Named, rhs types.Type) bool { if def == nil { return true } - iface, _ := aliases.Unalias(rhs).(*types.Interface) + iface, _ := types.Unalias(rhs).(*types.Interface) if iface == nil { return true } @@ -557,19 +558,28 @@ type importReader struct { prevColumn int64 } +// markBlack is redefined in iimport_go123.go, to work around golang/go#69912. +// +// If TypeNames are not marked black (in the sense of go/types cycle +// detection), they may be mutated when dot-imported. Fix this by punching a +// hole through the type, when compiling with Go 1.23. (The bug has been fixed +// for 1.24, but the fix was not worth back-porting). +var markBlack = func(name *types.TypeName) {} + func (r *importReader) obj(name string) { tag := r.byte() pos := r.pos() switch tag { - case aliasTag: + case aliasTag, genericAliasTag: + var tparams []*types.TypeParam + if tag == genericAliasTag { + tparams = r.tparamList() + } typ := r.typ() - // TODO(adonovan): support generic aliases: - // if tag == genericAliasTag { - // tparams := r.tparamList() - // alias.SetTypeParams(tparams) - // } - r.declare(aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ)) + obj := aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ, tparams) + markBlack(obj) // workaround for golang/go#69912 + r.declare(obj) case constTag: typ, val := r.value() @@ -589,6 +599,9 @@ func (r *importReader) obj(name string) { // declaration before recursing. obj := types.NewTypeName(pos, r.currPkg, name, nil) named := types.NewNamed(obj, nil, nil) + + markBlack(obj) // workaround for golang/go#69912 + // Declare obj before calling r.tparamList, so the new type name is recognized // if used in the constraint of one of its own typeparams (see #48280). r.declare(obj) @@ -615,7 +628,7 @@ func (r *importReader) obj(name string) { if targs.Len() > 0 { rparams = make([]*types.TypeParam, targs.Len()) for i := range rparams { - rparams[i] = aliases.Unalias(targs.At(i)).(*types.TypeParam) + rparams[i] = types.Unalias(targs.At(i)).(*types.TypeParam) } } msig := r.signature(recv, rparams, nil) @@ -645,7 +658,7 @@ func (r *importReader) obj(name string) { } constraint := r.typ() if implicit { - iface, _ := aliases.Unalias(constraint).(*types.Interface) + iface, _ := types.Unalias(constraint).(*types.Interface) if iface == nil { errorf("non-interface constraint marked implicit") } @@ -852,7 +865,7 @@ func (r *importReader) typ() types.Type { } func isInterface(t types.Type) bool { - _, ok := aliases.Unalias(t).(*types.Interface) + _, ok := types.Unalias(t).(*types.Interface) return ok } @@ -862,7 +875,7 @@ func (r *importReader) string() string { return r.p.stringAt(r.uint64()) } func (r *importReader) doType(base *types.Named) (res types.Type) { k := r.kind() if debug { - r.p.trace("importing type %d (base: %s)", k, base) + r.p.trace("importing type %d (base: %v)", k, base) r.p.indent++ defer func() { r.p.indent-- @@ -959,7 +972,7 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { methods[i] = method } - typ := newInterface(methods, embeddeds) + typ := types.NewInterfaceType(methods, embeddeds) r.p.interfaceList = append(r.p.interfaceList, typ) return typ @@ -1051,7 +1064,7 @@ func (r *importReader) tparamList() []*types.TypeParam { for i := range xs { // Note: the standard library importer is tolerant of nil types here, // though would panic in SetTypeParams. - xs[i] = aliases.Unalias(r.typ()).(*types.TypeParam) + xs[i] = types.Unalias(r.typ()).(*types.TypeParam) } return xs } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go new file mode 100644 index 00000000000..7586bfaca60 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go @@ -0,0 +1,53 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.22 && !go1.24 + +package gcimporter + +import ( + "go/token" + "go/types" + "unsafe" +) + +// TODO(rfindley): delete this workaround once go1.24 is assured. + +func init() { + // Update markBlack so that it correctly sets the color + // of imported TypeNames. + // + // See the doc comment for markBlack for details. + + type color uint32 + const ( + white color = iota + black + grey + ) + type object struct { + _ *types.Scope + _ token.Pos + _ *types.Package + _ string + _ types.Type + _ uint32 + color_ color + _ token.Pos + } + type typeName struct { + object + } + + // If the size of types.TypeName changes, this will fail to compile. + const delta = int64(unsafe.Sizeof(typeName{})) - int64(unsafe.Sizeof(types.TypeName{})) + var _ [-delta * delta]int + + markBlack = func(obj *types.TypeName) { + type uP = unsafe.Pointer + var ptr *typeName + *(*uP)(uP(&ptr)) = uP(obj) + ptr.color_ = black + } +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go b/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go deleted file mode 100644 index 8b163e3d058..00000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.11 -// +build !go1.11 - -package gcimporter - -import "go/types" - -func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { - named := make([]*types.Named, len(embeddeds)) - for i, e := range embeddeds { - var ok bool - named[i], ok = e.(*types.Named) - if !ok { - panic("embedding of non-defined interfaces in interfaces is not supported before Go 1.11") - } - } - return types.NewInterface(methods, named) -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go b/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go deleted file mode 100644 index 49984f40fd8..00000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.11 -// +build go1.11 - -package gcimporter - -import "go/types" - -func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { - return types.NewInterfaceType(methods, embeddeds) -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/predeclared.go b/vendor/golang.org/x/tools/internal/gcimporter/predeclared.go new file mode 100644 index 00000000000..907c8557a54 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/predeclared.go @@ -0,0 +1,91 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gcimporter + +import ( + "go/types" + "sync" +) + +// predecl is a cache for the predeclared types in types.Universe. +// +// Cache a distinct result based on the runtime value of any. +// The pointer value of the any type varies based on GODEBUG settings. +var predeclMu sync.Mutex +var predecl map[types.Type][]types.Type + +func predeclared() []types.Type { + anyt := types.Universe.Lookup("any").Type() + + predeclMu.Lock() + defer predeclMu.Unlock() + + if pre, ok := predecl[anyt]; ok { + return pre + } + + if predecl == nil { + predecl = make(map[types.Type][]types.Type) + } + + decls := []types.Type{ // basic types + types.Typ[types.Bool], + types.Typ[types.Int], + types.Typ[types.Int8], + types.Typ[types.Int16], + types.Typ[types.Int32], + types.Typ[types.Int64], + types.Typ[types.Uint], + types.Typ[types.Uint8], + types.Typ[types.Uint16], + types.Typ[types.Uint32], + types.Typ[types.Uint64], + types.Typ[types.Uintptr], + types.Typ[types.Float32], + types.Typ[types.Float64], + types.Typ[types.Complex64], + types.Typ[types.Complex128], + types.Typ[types.String], + + // basic type aliases + types.Universe.Lookup("byte").Type(), + types.Universe.Lookup("rune").Type(), + + // error + types.Universe.Lookup("error").Type(), + + // untyped types + types.Typ[types.UntypedBool], + types.Typ[types.UntypedInt], + types.Typ[types.UntypedRune], + types.Typ[types.UntypedFloat], + types.Typ[types.UntypedComplex], + types.Typ[types.UntypedString], + types.Typ[types.UntypedNil], + + // package unsafe + types.Typ[types.UnsafePointer], + + // invalid type + types.Typ[types.Invalid], // only appears in packages with errors + + // used internally by gc; never used by this package or in .a files + anyType{}, + + // comparable + types.Universe.Lookup("comparable").Type(), + + // any + anyt, + } + + predecl[anyt] = decls + return decls +} + +type anyType struct{} + +func (t anyType) Underlying() types.Type { return t } +func (t anyType) String() string { return "any" } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go b/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go deleted file mode 100644 index 0cd3b91b65a..00000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gcimporter - -import "go/types" - -const iexportVersion = iexportVersionGenerics - -// additionalPredeclared returns additional predeclared types in go.1.18. -func additionalPredeclared() []types.Type { - return []types.Type{ - // comparable - types.Universe.Lookup("comparable").Type(), - - // any - types.Universe.Lookup("any").Type(), - } -} - -// See cmd/compile/internal/types.SplitVargenSuffix. -func splitVargenSuffix(name string) (base, suffix string) { - i := len(name) - for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' { - i-- - } - const dot = "·" - if i >= len(dot) && name[i-len(dot):i] == dot { - i -= len(dot) - return name[:i], name[i:] - } - return name, "" -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go deleted file mode 100644 index 38b624cadab..00000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !goexperiment.unified -// +build !goexperiment.unified - -package gcimporter - -const unifiedIR = false diff --git a/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go deleted file mode 100644 index b5118d0b3a5..00000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build goexperiment.unified -// +build goexperiment.unified - -package gcimporter - -const unifiedIR = true diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go index 2c077068877..1db408613c9 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go @@ -52,8 +52,7 @@ func (pr *pkgReader) later(fn func()) { // See cmd/compile/internal/noder.derivedInfo. type derivedInfo struct { - idx pkgbits.Index - needed bool + idx pkgbits.Index } // See cmd/compile/internal/noder.typeInfo. @@ -110,13 +109,17 @@ func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[st r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic) pkg := r.pkg() - r.Bool() // has init + if r.Version().Has(pkgbits.HasInit) { + r.Bool() + } for i, n := 0, r.Len(); i < n; i++ { // As if r.obj(), but avoiding the Scope.Lookup call, // to avoid eager loading of imports. r.Sync(pkgbits.SyncObject) - assert(!r.Bool()) + if r.Version().Has(pkgbits.DerivedFuncInstance) { + assert(!r.Bool()) + } r.p.objIdx(r.Reloc(pkgbits.RelocObj)) assert(r.Len() == 0) } @@ -165,7 +168,7 @@ type readerDict struct { // tparams is a slice of the constructed TypeParams for the element. tparams []*types.TypeParam - // devived is a slice of types derived from tparams, which may be + // derived is a slice of types derived from tparams, which may be // instantiated while reading the current element. derived []derivedInfo derivedTypes []types.Type // lazily instantiated from derived @@ -471,7 +474,9 @@ func (r *reader) param() *types.Var { func (r *reader) obj() (types.Object, []types.Type) { r.Sync(pkgbits.SyncObject) - assert(!r.Bool()) + if r.Version().Has(pkgbits.DerivedFuncInstance) { + assert(!r.Bool()) + } pkg, name := r.p.objIdx(r.Reloc(pkgbits.RelocObj)) obj := pkgScope(pkg).Lookup(name) @@ -525,8 +530,12 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { case pkgbits.ObjAlias: pos := r.pos() + var tparams []*types.TypeParam + if r.Version().Has(pkgbits.AliasTypeParamNames) { + tparams = r.typeParamNames() + } typ := r.typ() - declare(aliases.NewAlias(r.p.aliases, pos, objPkg, objName, typ)) + declare(aliases.NewAlias(r.p.aliases, pos, objPkg, objName, typ, tparams)) case pkgbits.ObjConst: pos := r.pos() @@ -553,7 +562,7 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { // If the underlying type is an interface, we need to // duplicate its methods so we can replace the receiver // parameter's type (#49906). - if iface, ok := aliases.Unalias(underlying).(*types.Interface); ok && iface.NumExplicitMethods() != 0 { + if iface, ok := types.Unalias(underlying).(*types.Interface); ok && iface.NumExplicitMethods() != 0 { methods := make([]*types.Func, iface.NumExplicitMethods()) for i := range methods { fn := iface.ExplicitMethod(i) @@ -632,7 +641,10 @@ func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict { dict.derived = make([]derivedInfo, r.Len()) dict.derivedTypes = make([]types.Type, len(dict.derived)) for i := range dict.derived { - dict.derived[i] = derivedInfo{r.Reloc(pkgbits.RelocType), r.Bool()} + dict.derived[i] = derivedInfo{idx: r.Reloc(pkgbits.RelocType)} + if r.Version().Has(pkgbits.DerivedInfoNeeded) { + assert(!r.Bool()) + } } pr.retireReader(r) @@ -726,3 +738,17 @@ func pkgScope(pkg *types.Package) *types.Scope { } return types.Universe } + +// See cmd/compile/internal/types.SplitVargenSuffix. +func splitVargenSuffix(name string) (base, suffix string) { + i := len(name) + for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' { + i-- + } + const dot = "·" + if i >= len(dot) && name[i-len(dot):i] == dot { + i -= len(dot) + return name[:i], name[i:] + } + return name, "" +} diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go index 2e59ff8558c..e333efc87f9 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -16,7 +16,6 @@ import ( "os" "os/exec" "path/filepath" - "reflect" "regexp" "runtime" "strconv" @@ -250,16 +249,13 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { cmd.Stdout = stdout cmd.Stderr = stderr - // cmd.WaitDelay was added only in go1.20 (see #50436). - if waitDelay := reflect.ValueOf(cmd).Elem().FieldByName("WaitDelay"); waitDelay.IsValid() { - // https://go.dev/issue/59541: don't wait forever copying stderr - // after the command has exited. - // After CL 484741 we copy stdout manually, so we we'll stop reading that as - // soon as ctx is done. However, we also don't want to wait around forever - // for stderr. Give a much-longer-than-reasonable delay and then assume that - // something has wedged in the kernel or runtime. - waitDelay.Set(reflect.ValueOf(30 * time.Second)) - } + // https://go.dev/issue/59541: don't wait forever copying stderr + // after the command has exited. + // After CL 484741 we copy stdout manually, so we we'll stop reading that as + // soon as ctx is done. However, we also don't want to wait around forever + // for stderr. Give a much-longer-than-reasonable delay and then assume that + // something has wedged in the kernel or runtime. + cmd.WaitDelay = 30 * time.Second // The cwd gets resolved to the real path. On Darwin, where // /tmp is a symlink, this breaks anything that expects the diff --git a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go index 2acd85851e3..f6cb37c5c3d 100644 --- a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go +++ b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go @@ -21,10 +21,7 @@ import ( // export data. type PkgDecoder struct { // version is the file format version. - version uint32 - - // aliases determines whether types.Aliases should be created - aliases bool + version Version // sync indicates whether the file uses sync markers. sync bool @@ -71,12 +68,9 @@ func (pr *PkgDecoder) SyncMarkers() bool { return pr.sync } // NewPkgDecoder returns a PkgDecoder initialized to read the Unified // IR export data from input. pkgPath is the package path for the // compilation unit that produced the export data. -// -// TODO(mdempsky): Remove pkgPath parameter; unneeded since CL 391014. func NewPkgDecoder(pkgPath, input string) PkgDecoder { pr := PkgDecoder{ pkgPath: pkgPath, - //aliases: aliases.Enabled(), } // TODO(mdempsky): Implement direct indexing of input string to @@ -84,14 +78,15 @@ func NewPkgDecoder(pkgPath, input string) PkgDecoder { r := strings.NewReader(input) - assert(binary.Read(r, binary.LittleEndian, &pr.version) == nil) + var ver uint32 + assert(binary.Read(r, binary.LittleEndian, &ver) == nil) + pr.version = Version(ver) - switch pr.version { - default: - panic(fmt.Errorf("unsupported version: %v", pr.version)) - case 0: - // no flags - case 1: + if pr.version >= numVersions { + panic(fmt.Errorf("cannot decode %q, export data version %d is greater than maximum supported version %d", pkgPath, pr.version, numVersions-1)) + } + + if pr.version.Has(Flags) { var flags uint32 assert(binary.Read(r, binary.LittleEndian, &flags) == nil) pr.sync = flags&flagSyncMarkers != 0 @@ -106,7 +101,9 @@ func NewPkgDecoder(pkgPath, input string) PkgDecoder { assert(err == nil) pr.elemData = input[pos:] - assert(len(pr.elemData)-8 == int(pr.elemEnds[len(pr.elemEnds)-1])) + + const fingerprintSize = 8 + assert(len(pr.elemData)-fingerprintSize == int(pr.elemEnds[len(pr.elemEnds)-1])) return pr } @@ -140,7 +137,7 @@ func (pr *PkgDecoder) AbsIdx(k RelocKind, idx Index) int { absIdx += int(pr.elemEndsEnds[k-1]) } if absIdx >= int(pr.elemEndsEnds[k]) { - errorf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds) + panicf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds) } return absIdx } @@ -197,9 +194,7 @@ func (pr *PkgDecoder) NewDecoderRaw(k RelocKind, idx Index) Decoder { Idx: idx, } - // TODO(mdempsky) r.data.Reset(...) after #44505 is resolved. - r.Data = *strings.NewReader(pr.DataIdx(k, idx)) - + r.Data.Reset(pr.DataIdx(k, idx)) r.Sync(SyncRelocs) r.Relocs = make([]RelocEnt, r.Len()) for i := range r.Relocs { @@ -248,7 +243,7 @@ type Decoder struct { func (r *Decoder) checkErr(err error) { if err != nil { - errorf("unexpected decoding error: %w", err) + panicf("unexpected decoding error: %w", err) } } @@ -519,3 +514,6 @@ func (pr *PkgDecoder) PeekObj(idx Index) (string, string, CodeObj) { return path, name, tag } + +// Version reports the version of the bitstream. +func (w *Decoder) Version() Version { return w.common.version } diff --git a/vendor/golang.org/x/tools/internal/pkgbits/encoder.go b/vendor/golang.org/x/tools/internal/pkgbits/encoder.go index 6482617a4fc..c17a12399d0 100644 --- a/vendor/golang.org/x/tools/internal/pkgbits/encoder.go +++ b/vendor/golang.org/x/tools/internal/pkgbits/encoder.go @@ -12,18 +12,15 @@ import ( "io" "math/big" "runtime" + "strings" ) -// currentVersion is the current version number. -// -// - v0: initial prototype -// -// - v1: adds the flags uint32 word -const currentVersion uint32 = 1 - // A PkgEncoder provides methods for encoding a package's Unified IR // export data. type PkgEncoder struct { + // version of the bitstream. + version Version + // elems holds the bitstream for previously encoded elements. elems [numRelocs][]string @@ -47,8 +44,9 @@ func (pw *PkgEncoder) SyncMarkers() bool { return pw.syncFrames >= 0 } // export data files, but can help diagnosing desync errors in // higher-level Unified IR reader/writer code. If syncFrames is // negative, then sync markers are omitted entirely. -func NewPkgEncoder(syncFrames int) PkgEncoder { +func NewPkgEncoder(version Version, syncFrames int) PkgEncoder { return PkgEncoder{ + version: version, stringsIdx: make(map[string]Index), syncFrames: syncFrames, } @@ -64,13 +62,15 @@ func (pw *PkgEncoder) DumpTo(out0 io.Writer) (fingerprint [8]byte) { assert(binary.Write(out, binary.LittleEndian, x) == nil) } - writeUint32(currentVersion) + writeUint32(uint32(pw.version)) - var flags uint32 - if pw.SyncMarkers() { - flags |= flagSyncMarkers + if pw.version.Has(Flags) { + var flags uint32 + if pw.SyncMarkers() { + flags |= flagSyncMarkers + } + writeUint32(flags) } - writeUint32(flags) // Write elemEndsEnds. var sum uint32 @@ -159,7 +159,7 @@ type Encoder struct { // Flush finalizes the element's bitstream and returns its Index. func (w *Encoder) Flush() Index { - var sb bytes.Buffer // TODO(mdempsky): strings.Builder after #44505 is resolved + var sb strings.Builder // Backup the data so we write the relocations at the front. var tmp bytes.Buffer @@ -189,7 +189,7 @@ func (w *Encoder) Flush() Index { func (w *Encoder) checkErr(err error) { if err != nil { - errorf("unexpected encoding error: %v", err) + panicf("unexpected encoding error: %v", err) } } @@ -320,8 +320,14 @@ func (w *Encoder) Code(c Code) { // section (if not already present), and then writing a relocation // into the element bitstream. func (w *Encoder) String(s string) { + w.StringRef(w.p.StringIdx(s)) +} + +// StringRef writes a reference to the given index, which must be a +// previously encoded string value. +func (w *Encoder) StringRef(idx Index) { w.Sync(SyncString) - w.Reloc(RelocString, w.p.StringIdx(s)) + w.Reloc(RelocString, idx) } // Strings encodes and writes a variable-length slice of strings into @@ -348,7 +354,7 @@ func (w *Encoder) Value(val constant.Value) { func (w *Encoder) scalar(val constant.Value) { switch v := constant.Val(val).(type) { default: - errorf("unhandled %v (%v)", val, val.Kind()) + panicf("unhandled %v (%v)", val, val.Kind()) case bool: w.Code(ValBool) w.Bool(v) @@ -381,3 +387,6 @@ func (w *Encoder) bigFloat(v *big.Float) { b := v.Append(nil, 'p', -1) w.String(string(b)) // TODO: More efficient encoding. } + +// Version reports the version of the bitstream. +func (w *Encoder) Version() Version { return w.p.version } diff --git a/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go b/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go deleted file mode 100644 index 5294f6a63ed..00000000000 --- a/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.7 -// +build !go1.7 - -// TODO(mdempsky): Remove after #44505 is resolved - -package pkgbits - -import "runtime" - -func walkFrames(pcs []uintptr, visit frameVisitor) { - for _, pc := range pcs { - fn := runtime.FuncForPC(pc) - file, line := fn.FileLine(pc) - - visit(file, line, fn.Name(), pc-fn.Entry()) - } -} diff --git a/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go b/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go deleted file mode 100644 index 2324ae7adfe..00000000000 --- a/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.7 -// +build go1.7 - -package pkgbits - -import "runtime" - -// walkFrames calls visit for each call frame represented by pcs. -// -// pcs should be a slice of PCs, as returned by runtime.Callers. -func walkFrames(pcs []uintptr, visit frameVisitor) { - if len(pcs) == 0 { - return - } - - frames := runtime.CallersFrames(pcs) - for { - frame, more := frames.Next() - visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry) - if !more { - return - } - } -} diff --git a/vendor/golang.org/x/tools/internal/pkgbits/support.go b/vendor/golang.org/x/tools/internal/pkgbits/support.go index ad26d3b28ca..50534a29553 100644 --- a/vendor/golang.org/x/tools/internal/pkgbits/support.go +++ b/vendor/golang.org/x/tools/internal/pkgbits/support.go @@ -12,6 +12,6 @@ func assert(b bool) { } } -func errorf(format string, args ...interface{}) { +func panicf(format string, args ...any) { panic(fmt.Errorf(format, args...)) } diff --git a/vendor/golang.org/x/tools/internal/pkgbits/sync.go b/vendor/golang.org/x/tools/internal/pkgbits/sync.go index 5bd51ef7170..1520b73afb9 100644 --- a/vendor/golang.org/x/tools/internal/pkgbits/sync.go +++ b/vendor/golang.org/x/tools/internal/pkgbits/sync.go @@ -6,6 +6,7 @@ package pkgbits import ( "fmt" + "runtime" "strings" ) @@ -23,6 +24,24 @@ func fmtFrames(pcs ...uintptr) []string { type frameVisitor func(file string, line int, name string, offset uintptr) +// walkFrames calls visit for each call frame represented by pcs. +// +// pcs should be a slice of PCs, as returned by runtime.Callers. +func walkFrames(pcs []uintptr, visit frameVisitor) { + if len(pcs) == 0 { + return + } + + frames := runtime.CallersFrames(pcs) + for { + frame, more := frames.Next() + visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry) + if !more { + return + } + } +} + // SyncMarker is an enum type that represents markers that may be // written to export data to ensure the reader and writer stay // synchronized. @@ -110,4 +129,8 @@ const ( SyncStmtsEnd SyncLabel SyncOptLabel + + SyncMultiExpr + SyncRType + SyncConvRTTI ) diff --git a/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go b/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go index 4a5b0ca5f2f..582ad56d3e0 100644 --- a/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go +++ b/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go @@ -74,11 +74,14 @@ func _() { _ = x[SyncStmtsEnd-64] _ = x[SyncLabel-65] _ = x[SyncOptLabel-66] + _ = x[SyncMultiExpr-67] + _ = x[SyncRType-68] + _ = x[SyncConvRTTI-69] } -const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabel" +const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabelMultiExprRTypeConvRTTI" -var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458} +var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458, 467, 472, 480} func (i SyncMarker) String() string { i -= 1 diff --git a/vendor/golang.org/x/tools/internal/pkgbits/version.go b/vendor/golang.org/x/tools/internal/pkgbits/version.go new file mode 100644 index 00000000000..53af9df22b3 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/pkgbits/version.go @@ -0,0 +1,85 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +// Version indicates a version of a unified IR bitstream. +// Each Version indicates the addition, removal, or change of +// new data in the bitstream. +// +// These are serialized to disk and the interpretation remains fixed. +type Version uint32 + +const ( + // V0: initial prototype. + // + // All data that is not assigned a Field is in version V0 + // and has not been deprecated. + V0 Version = iota + + // V1: adds the Flags uint32 word + V1 + + // V2: removes unused legacy fields and supports type parameters for aliases. + // - remove the legacy "has init" bool from the public root + // - remove obj's "derived func instance" bool + // - add a TypeParamNames field to ObjAlias + // - remove derived info "needed" bool + V2 + + numVersions = iota +) + +// Field denotes a unit of data in the serialized unified IR bitstream. +// It is conceptually a like field in a structure. +// +// We only really need Fields when the data may or may not be present +// in a stream based on the Version of the bitstream. +// +// Unlike much of pkgbits, Fields are not serialized and +// can change values as needed. +type Field int + +const ( + // Flags in a uint32 in the header of a bitstream + // that is used to indicate whether optional features are enabled. + Flags Field = iota + + // Deprecated: HasInit was a bool indicating whether a package + // has any init functions. + HasInit + + // Deprecated: DerivedFuncInstance was a bool indicating + // whether an object was a function instance. + DerivedFuncInstance + + // ObjAlias has a list of TypeParamNames. + AliasTypeParamNames + + // Deprecated: DerivedInfoNeeded was a bool indicating + // whether a type was a derived type. + DerivedInfoNeeded + + numFields = iota +) + +// introduced is the version a field was added. +var introduced = [numFields]Version{ + Flags: V1, + AliasTypeParamNames: V2, +} + +// removed is the version a field was removed in or 0 for fields +// that have not yet been deprecated. +// (So removed[f]-1 is the last version it is included in.) +var removed = [numFields]Version{ + HasInit: V2, + DerivedFuncInstance: V2, + DerivedInfoNeeded: V2, +} + +// Has reports whether field f is present in a bitstream at version v. +func (v Version) Has(f Field) bool { + return introduced[f] <= v && (v < removed[f] || removed[f] == V0) +} diff --git a/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/vendor/golang.org/x/tools/internal/stdlib/manifest.go index a928acf29fa..cdaac9ab34d 100644 --- a/vendor/golang.org/x/tools/internal/stdlib/manifest.go +++ b/vendor/golang.org/x/tools/internal/stdlib/manifest.go @@ -951,7 +951,7 @@ var PackageSymbols = map[string][]Symbol{ {"ParseSessionState", Func, 21}, {"QUICClient", Func, 21}, {"QUICConfig", Type, 21}, - {"QUICConfig.EnableStoreSessionEvent", Field, 23}, + {"QUICConfig.EnableSessionEvents", Field, 23}, {"QUICConfig.TLSConfig", Field, 21}, {"QUICConn", Type, 21}, {"QUICEncryptionLevel", Type, 21}, diff --git a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go b/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go deleted file mode 100644 index ff9437a36cd..00000000000 --- a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// package tokeninternal provides access to some internal features of the token -// package. -package tokeninternal - -import ( - "fmt" - "go/token" - "sort" - "sync" - "unsafe" -) - -// GetLines returns the table of line-start offsets from a token.File. -func GetLines(file *token.File) []int { - // token.File has a Lines method on Go 1.21 and later. - if file, ok := (interface{})(file).(interface{ Lines() []int }); ok { - return file.Lines() - } - - // This declaration must match that of token.File. - // This creates a risk of dependency skew. - // For now we check that the size of the two - // declarations is the same, on the (fragile) assumption - // that future changes would add fields. - type tokenFile119 struct { - _ string - _ int - _ int - mu sync.Mutex // we're not complete monsters - lines []int - _ []struct{} - } - - if unsafe.Sizeof(*file) != unsafe.Sizeof(tokenFile119{}) { - panic("unexpected token.File size") - } - var ptr *tokenFile119 - type uP = unsafe.Pointer - *(*uP)(uP(&ptr)) = uP(file) - ptr.mu.Lock() - defer ptr.mu.Unlock() - return ptr.lines -} - -// AddExistingFiles adds the specified files to the FileSet if they -// are not already present. It panics if any pair of files in the -// resulting FileSet would overlap. -func AddExistingFiles(fset *token.FileSet, files []*token.File) { - // Punch through the FileSet encapsulation. - type tokenFileSet struct { - // This type remained essentially consistent from go1.16 to go1.21. - mutex sync.RWMutex - base int - files []*token.File - _ *token.File // changed to atomic.Pointer[token.File] in go1.19 - } - - // If the size of token.FileSet changes, this will fail to compile. - const delta = int64(unsafe.Sizeof(tokenFileSet{})) - int64(unsafe.Sizeof(token.FileSet{})) - var _ [-delta * delta]int - - type uP = unsafe.Pointer - var ptr *tokenFileSet - *(*uP)(uP(&ptr)) = uP(fset) - ptr.mutex.Lock() - defer ptr.mutex.Unlock() - - // Merge and sort. - newFiles := append(ptr.files, files...) - sort.Slice(newFiles, func(i, j int) bool { - return newFiles[i].Base() < newFiles[j].Base() - }) - - // Reject overlapping files. - // Discard adjacent identical files. - out := newFiles[:0] - for i, file := range newFiles { - if i > 0 { - prev := newFiles[i-1] - if file == prev { - continue - } - if prev.Base()+prev.Size()+1 > file.Base() { - panic(fmt.Sprintf("file %s (%d-%d) overlaps with file %s (%d-%d)", - prev.Name(), prev.Base(), prev.Base()+prev.Size(), - file.Name(), file.Base(), file.Base()+file.Size())) - } - } - out = append(out, file) - } - newFiles = out - - ptr.files = newFiles - - // Advance FileSet.Base(). - if len(newFiles) > 0 { - last := newFiles[len(newFiles)-1] - newBase := last.Base() + last.Size() + 1 - if ptr.base < newBase { - ptr.base = newBase - } - } -} - -// FileSetFor returns a new FileSet containing a sequence of new Files with -// the same base, size, and line as the input files, for use in APIs that -// require a FileSet. -// -// Precondition: the input files must be non-overlapping, and sorted in order -// of their Base. -func FileSetFor(files ...*token.File) *token.FileSet { - fset := token.NewFileSet() - for _, f := range files { - f2 := fset.AddFile(f.Name(), f.Base(), f.Size()) - lines := GetLines(f) - f2.SetLines(lines) - } - return fset -} - -// CloneFileSet creates a new FileSet holding all files in fset. It does not -// create copies of the token.Files in fset: they are added to the resulting -// FileSet unmodified. -func CloneFileSet(fset *token.FileSet) *token.FileSet { - var files []*token.File - fset.Iterate(func(f *token.File) bool { - files = append(files, f) - return true - }) - newFileSet := token.NewFileSet() - AddExistingFiles(newFileSet, files) - return newFileSet -} diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go new file mode 100644 index 00000000000..0b84acc5c7f --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/common.go @@ -0,0 +1,140 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package typeparams contains common utilities for writing tools that +// interact with generic Go code, as introduced with Go 1.18. It +// supplements the standard library APIs. Notably, the StructuralTerms +// API computes a minimal representation of the structural +// restrictions on a type parameter. +// +// An external version of these APIs is available in the +// golang.org/x/exp/typeparams module. +package typeparams + +import ( + "go/ast" + "go/token" + "go/types" +) + +// UnpackIndexExpr extracts data from AST nodes that represent index +// expressions. +// +// For an ast.IndexExpr, the resulting indices slice will contain exactly one +// index expression. For an ast.IndexListExpr (go1.18+), it may have a variable +// number of index expressions. +// +// For nodes that don't represent index expressions, the first return value of +// UnpackIndexExpr will be nil. +func UnpackIndexExpr(n ast.Node) (x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) { + switch e := n.(type) { + case *ast.IndexExpr: + return e.X, e.Lbrack, []ast.Expr{e.Index}, e.Rbrack + case *ast.IndexListExpr: + return e.X, e.Lbrack, e.Indices, e.Rbrack + } + return nil, token.NoPos, nil, token.NoPos +} + +// PackIndexExpr returns an *ast.IndexExpr or *ast.IndexListExpr, depending on +// the cardinality of indices. Calling PackIndexExpr with len(indices) == 0 +// will panic. +func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) ast.Expr { + switch len(indices) { + case 0: + panic("empty indices") + case 1: + return &ast.IndexExpr{ + X: x, + Lbrack: lbrack, + Index: indices[0], + Rbrack: rbrack, + } + default: + return &ast.IndexListExpr{ + X: x, + Lbrack: lbrack, + Indices: indices, + Rbrack: rbrack, + } + } +} + +// IsTypeParam reports whether t is a type parameter (or an alias of one). +func IsTypeParam(t types.Type) bool { + _, ok := types.Unalias(t).(*types.TypeParam) + return ok +} + +// GenericAssignableTo is a generalization of types.AssignableTo that +// implements the following rule for uninstantiated generic types: +// +// If V and T are generic named types, then V is considered assignable to T if, +// for every possible instantiation of V[A_1, ..., A_N], the instantiation +// T[A_1, ..., A_N] is valid and V[A_1, ..., A_N] implements T[A_1, ..., A_N]. +// +// If T has structural constraints, they must be satisfied by V. +// +// For example, consider the following type declarations: +// +// type Interface[T any] interface { +// Accept(T) +// } +// +// type Container[T any] struct { +// Element T +// } +// +// func (c Container[T]) Accept(t T) { c.Element = t } +// +// In this case, GenericAssignableTo reports that instantiations of Container +// are assignable to the corresponding instantiation of Interface. +func GenericAssignableTo(ctxt *types.Context, V, T types.Type) bool { + V = types.Unalias(V) + T = types.Unalias(T) + + // If V and T are not both named, or do not have matching non-empty type + // parameter lists, fall back on types.AssignableTo. + + VN, Vnamed := V.(*types.Named) + TN, Tnamed := T.(*types.Named) + if !Vnamed || !Tnamed { + return types.AssignableTo(V, T) + } + + vtparams := VN.TypeParams() + ttparams := TN.TypeParams() + if vtparams.Len() == 0 || vtparams.Len() != ttparams.Len() || VN.TypeArgs().Len() != 0 || TN.TypeArgs().Len() != 0 { + return types.AssignableTo(V, T) + } + + // V and T have the same (non-zero) number of type params. Instantiate both + // with the type parameters of V. This must always succeed for V, and will + // succeed for T if and only if the type set of each type parameter of V is a + // subset of the type set of the corresponding type parameter of T, meaning + // that every instantiation of V corresponds to a valid instantiation of T. + + // Minor optimization: ensure we share a context across the two + // instantiations below. + if ctxt == nil { + ctxt = types.NewContext() + } + + var targs []types.Type + for i := 0; i < vtparams.Len(); i++ { + targs = append(targs, vtparams.At(i)) + } + + vinst, err := types.Instantiate(ctxt, V, targs, true) + if err != nil { + panic("type parameters should satisfy their own constraints") + } + + tinst, err := types.Instantiate(ctxt, T, targs, true) + if err != nil { + return false + } + + return types.AssignableTo(vinst, tinst) +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/coretype.go b/vendor/golang.org/x/tools/internal/typeparams/coretype.go new file mode 100644 index 00000000000..6e83c6fb1a2 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/coretype.go @@ -0,0 +1,150 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeparams + +import ( + "fmt" + "go/types" +) + +// CoreType returns the core type of T or nil if T does not have a core type. +// +// See https://go.dev/ref/spec#Core_types for the definition of a core type. +func CoreType(T types.Type) types.Type { + U := T.Underlying() + if _, ok := U.(*types.Interface); !ok { + return U // for non-interface types, + } + + terms, err := NormalTerms(U) + if len(terms) == 0 || err != nil { + // len(terms) -> empty type set of interface. + // err != nil => U is invalid, exceeds complexity bounds, or has an empty type set. + return nil // no core type. + } + + U = terms[0].Type().Underlying() + var identical int // i in [0,identical) => Identical(U, terms[i].Type().Underlying()) + for identical = 1; identical < len(terms); identical++ { + if !types.Identical(U, terms[identical].Type().Underlying()) { + break + } + } + + if identical == len(terms) { + // https://go.dev/ref/spec#Core_types + // "There is a single type U which is the underlying type of all types in the type set of T" + return U + } + ch, ok := U.(*types.Chan) + if !ok { + return nil // no core type as identical < len(terms) and U is not a channel. + } + // https://go.dev/ref/spec#Core_types + // "the type chan E if T contains only bidirectional channels, or the type chan<- E or + // <-chan E depending on the direction of the directional channels present." + for chans := identical; chans < len(terms); chans++ { + curr, ok := terms[chans].Type().Underlying().(*types.Chan) + if !ok { + return nil + } + if !types.Identical(ch.Elem(), curr.Elem()) { + return nil // channel elements are not identical. + } + if ch.Dir() == types.SendRecv { + // ch is bidirectional. We can safely always use curr's direction. + ch = curr + } else if curr.Dir() != types.SendRecv && ch.Dir() != curr.Dir() { + // ch and curr are not bidirectional and not the same direction. + return nil + } + } + return ch +} + +// NormalTerms returns a slice of terms representing the normalized structural +// type restrictions of a type, if any. +// +// For all types other than *types.TypeParam, *types.Interface, and +// *types.Union, this is just a single term with Tilde() == false and +// Type() == typ. For *types.TypeParam, *types.Interface, and *types.Union, see +// below. +// +// Structural type restrictions of a type parameter are created via +// non-interface types embedded in its constraint interface (directly, or via a +// chain of interface embeddings). For example, in the declaration type +// T[P interface{~int; m()}] int the structural restriction of the type +// parameter P is ~int. +// +// With interface embedding and unions, the specification of structural type +// restrictions may be arbitrarily complex. For example, consider the +// following: +// +// type A interface{ ~string|~[]byte } +// +// type B interface{ int|string } +// +// type C interface { ~string|~int } +// +// type T[P interface{ A|B; C }] int +// +// In this example, the structural type restriction of P is ~string|int: A|B +// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, +// which when intersected with C (~string|~int) yields ~string|int. +// +// NormalTerms computes these expansions and reductions, producing a +// "normalized" form of the embeddings. A structural restriction is normalized +// if it is a single union containing no interface terms, and is minimal in the +// sense that removing any term changes the set of types satisfying the +// constraint. It is left as a proof for the reader that, modulo sorting, there +// is exactly one such normalized form. +// +// Because the minimal representation always takes this form, NormalTerms +// returns a slice of tilde terms corresponding to the terms of the union in +// the normalized structural restriction. An error is returned if the type is +// invalid, exceeds complexity bounds, or has an empty type set. In the latter +// case, NormalTerms returns ErrEmptyTypeSet. +// +// NormalTerms makes no guarantees about the order of terms, except that it +// is deterministic. +func NormalTerms(typ types.Type) ([]*types.Term, error) { + switch typ := typ.Underlying().(type) { + case *types.TypeParam: + return StructuralTerms(typ) + case *types.Union: + return UnionTermSet(typ) + case *types.Interface: + return InterfaceTermSet(typ) + default: + return []*types.Term{types.NewTerm(false, typ)}, nil + } +} + +// Deref returns the type of the variable pointed to by t, +// if t's core type is a pointer; otherwise it returns t. +// +// Do not assume that Deref(T)==T implies T is not a pointer: +// consider "type T *T", for example. +// +// TODO(adonovan): ideally this would live in typesinternal, but that +// creates an import cycle. Move there when we melt this package down. +func Deref(t types.Type) types.Type { + if ptr, ok := CoreType(t).(*types.Pointer); ok { + return ptr.Elem() + } + return t +} + +// MustDeref returns the type of the variable pointed to by t. +// It panics if t's core type is not a pointer. +// +// TODO(adonovan): ideally this would live in typesinternal, but that +// creates an import cycle. Move there when we melt this package down. +func MustDeref(t types.Type) types.Type { + if ptr, ok := CoreType(t).(*types.Pointer); ok { + return ptr.Elem() + } + panic(fmt.Sprintf("%v is not a pointer", t)) +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/free.go b/vendor/golang.org/x/tools/internal/typeparams/free.go new file mode 100644 index 00000000000..0ade5c2949e --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/free.go @@ -0,0 +1,131 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeparams + +import ( + "go/types" + + "golang.org/x/tools/internal/aliases" +) + +// Free is a memoization of the set of free type parameters within a +// type. It makes a sequence of calls to [Free.Has] for overlapping +// types more efficient. The zero value is ready for use. +// +// NOTE: Adapted from go/types/infer.go. If it is later exported, factor. +type Free struct { + seen map[types.Type]bool +} + +// Has reports whether the specified type has a free type parameter. +func (w *Free) Has(typ types.Type) (res bool) { + // detect cycles + if x, ok := w.seen[typ]; ok { + return x + } + if w.seen == nil { + w.seen = make(map[types.Type]bool) + } + w.seen[typ] = false + defer func() { + w.seen[typ] = res + }() + + switch t := typ.(type) { + case nil, *types.Basic: // TODO(gri) should nil be handled here? + break + + case *types.Alias: + if aliases.TypeParams(t).Len() > aliases.TypeArgs(t).Len() { + return true // This is an uninstantiated Alias. + } + // The expansion of an alias can have free type parameters, + // whether or not the alias itself has type parameters: + // + // func _[K comparable]() { + // type Set = map[K]bool // free(Set) = {K} + // type MapTo[V] = map[K]V // free(Map[foo]) = {V} + // } + // + // So, we must Unalias. + return w.Has(types.Unalias(t)) + + case *types.Array: + return w.Has(t.Elem()) + + case *types.Slice: + return w.Has(t.Elem()) + + case *types.Struct: + for i, n := 0, t.NumFields(); i < n; i++ { + if w.Has(t.Field(i).Type()) { + return true + } + } + + case *types.Pointer: + return w.Has(t.Elem()) + + case *types.Tuple: + n := t.Len() + for i := 0; i < n; i++ { + if w.Has(t.At(i).Type()) { + return true + } + } + + case *types.Signature: + // t.tparams may not be nil if we are looking at a signature + // of a generic function type (or an interface method) that is + // part of the type we're testing. We don't care about these type + // parameters. + // Similarly, the receiver of a method may declare (rather than + // use) type parameters, we don't care about those either. + // Thus, we only need to look at the input and result parameters. + return w.Has(t.Params()) || w.Has(t.Results()) + + case *types.Interface: + for i, n := 0, t.NumMethods(); i < n; i++ { + if w.Has(t.Method(i).Type()) { + return true + } + } + terms, err := InterfaceTermSet(t) + if err != nil { + return false // ill typed + } + for _, term := range terms { + if w.Has(term.Type()) { + return true + } + } + + case *types.Map: + return w.Has(t.Key()) || w.Has(t.Elem()) + + case *types.Chan: + return w.Has(t.Elem()) + + case *types.Named: + args := t.TypeArgs() + if params := t.TypeParams(); params.Len() > args.Len() { + return true // this is an uninstantiated named type. + } + for i, n := 0, args.Len(); i < n; i++ { + if w.Has(args.At(i)) { + return true + } + } + return w.Has(t.Underlying()) // recurse for types local to parameterized functions + + case *types.TypeParam: + return true + + default: + panic(t) // unreachable + } + + return false +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/vendor/golang.org/x/tools/internal/typeparams/normalize.go new file mode 100644 index 00000000000..93c80fdc96c --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/normalize.go @@ -0,0 +1,218 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeparams + +import ( + "errors" + "fmt" + "go/types" + "os" + "strings" +) + +//go:generate go run copytermlist.go + +const debug = false + +var ErrEmptyTypeSet = errors.New("empty type set") + +// StructuralTerms returns a slice of terms representing the normalized +// structural type restrictions of a type parameter, if any. +// +// Structural type restrictions of a type parameter are created via +// non-interface types embedded in its constraint interface (directly, or via a +// chain of interface embeddings). For example, in the declaration +// +// type T[P interface{~int; m()}] int +// +// the structural restriction of the type parameter P is ~int. +// +// With interface embedding and unions, the specification of structural type +// restrictions may be arbitrarily complex. For example, consider the +// following: +// +// type A interface{ ~string|~[]byte } +// +// type B interface{ int|string } +// +// type C interface { ~string|~int } +// +// type T[P interface{ A|B; C }] int +// +// In this example, the structural type restriction of P is ~string|int: A|B +// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, +// which when intersected with C (~string|~int) yields ~string|int. +// +// StructuralTerms computes these expansions and reductions, producing a +// "normalized" form of the embeddings. A structural restriction is normalized +// if it is a single union containing no interface terms, and is minimal in the +// sense that removing any term changes the set of types satisfying the +// constraint. It is left as a proof for the reader that, modulo sorting, there +// is exactly one such normalized form. +// +// Because the minimal representation always takes this form, StructuralTerms +// returns a slice of tilde terms corresponding to the terms of the union in +// the normalized structural restriction. An error is returned if the +// constraint interface is invalid, exceeds complexity bounds, or has an empty +// type set. In the latter case, StructuralTerms returns ErrEmptyTypeSet. +// +// StructuralTerms makes no guarantees about the order of terms, except that it +// is deterministic. +func StructuralTerms(tparam *types.TypeParam) ([]*types.Term, error) { + constraint := tparam.Constraint() + if constraint == nil { + return nil, fmt.Errorf("%s has nil constraint", tparam) + } + iface, _ := constraint.Underlying().(*types.Interface) + if iface == nil { + return nil, fmt.Errorf("constraint is %T, not *types.Interface", constraint.Underlying()) + } + return InterfaceTermSet(iface) +} + +// InterfaceTermSet computes the normalized terms for a constraint interface, +// returning an error if the term set cannot be computed or is empty. In the +// latter case, the error will be ErrEmptyTypeSet. +// +// See the documentation of StructuralTerms for more information on +// normalization. +func InterfaceTermSet(iface *types.Interface) ([]*types.Term, error) { + return computeTermSet(iface) +} + +// UnionTermSet computes the normalized terms for a union, returning an error +// if the term set cannot be computed or is empty. In the latter case, the +// error will be ErrEmptyTypeSet. +// +// See the documentation of StructuralTerms for more information on +// normalization. +func UnionTermSet(union *types.Union) ([]*types.Term, error) { + return computeTermSet(union) +} + +func computeTermSet(typ types.Type) ([]*types.Term, error) { + tset, err := computeTermSetInternal(typ, make(map[types.Type]*termSet), 0) + if err != nil { + return nil, err + } + if tset.terms.isEmpty() { + return nil, ErrEmptyTypeSet + } + if tset.terms.isAll() { + return nil, nil + } + var terms []*types.Term + for _, term := range tset.terms { + terms = append(terms, types.NewTerm(term.tilde, term.typ)) + } + return terms, nil +} + +// A termSet holds the normalized set of terms for a given type. +// +// The name termSet is intentionally distinct from 'type set': a type set is +// all types that implement a type (and includes method restrictions), whereas +// a term set just represents the structural restrictions on a type. +type termSet struct { + complete bool + terms termlist +} + +func indentf(depth int, format string, args ...interface{}) { + fmt.Fprintf(os.Stderr, strings.Repeat(".", depth)+format+"\n", args...) +} + +func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth int) (res *termSet, err error) { + if t == nil { + panic("nil type") + } + + if debug { + indentf(depth, "%s", t.String()) + defer func() { + if err != nil { + indentf(depth, "=> %s", err) + } else { + indentf(depth, "=> %s", res.terms.String()) + } + }() + } + + const maxTermCount = 100 + if tset, ok := seen[t]; ok { + if !tset.complete { + return nil, fmt.Errorf("cycle detected in the declaration of %s", t) + } + return tset, nil + } + + // Mark the current type as seen to avoid infinite recursion. + tset := new(termSet) + defer func() { + tset.complete = true + }() + seen[t] = tset + + switch u := t.Underlying().(type) { + case *types.Interface: + // The term set of an interface is the intersection of the term sets of its + // embedded types. + tset.terms = allTermlist + for i := 0; i < u.NumEmbeddeds(); i++ { + embedded := u.EmbeddedType(i) + if _, ok := embedded.Underlying().(*types.TypeParam); ok { + return nil, fmt.Errorf("invalid embedded type %T", embedded) + } + tset2, err := computeTermSetInternal(embedded, seen, depth+1) + if err != nil { + return nil, err + } + tset.terms = tset.terms.intersect(tset2.terms) + } + case *types.Union: + // The term set of a union is the union of term sets of its terms. + tset.terms = nil + for i := 0; i < u.Len(); i++ { + t := u.Term(i) + var terms termlist + switch t.Type().Underlying().(type) { + case *types.Interface: + tset2, err := computeTermSetInternal(t.Type(), seen, depth+1) + if err != nil { + return nil, err + } + terms = tset2.terms + case *types.TypeParam, *types.Union: + // A stand-alone type parameter or union is not permitted as union + // term. + return nil, fmt.Errorf("invalid union term %T", t) + default: + if t.Type() == types.Typ[types.Invalid] { + continue + } + terms = termlist{{t.Tilde(), t.Type()}} + } + tset.terms = tset.terms.union(terms) + if len(tset.terms) > maxTermCount { + return nil, fmt.Errorf("exceeded max term count %d", maxTermCount) + } + } + case *types.TypeParam: + panic("unreachable") + default: + // For all other types, the term set is just a single non-tilde term + // holding the type itself. + if u != types.Typ[types.Invalid] { + tset.terms = termlist{{false, t}} + } + } + return tset, nil +} + +// under is a facade for the go/types internal function of the same name. It is +// used by typeterm.go. +func under(t types.Type) types.Type { + return t.Underlying() +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/termlist.go b/vendor/golang.org/x/tools/internal/typeparams/termlist.go new file mode 100644 index 00000000000..cbd12f80131 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/termlist.go @@ -0,0 +1,163 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by copytermlist.go DO NOT EDIT. + +package typeparams + +import ( + "bytes" + "go/types" +) + +// A termlist represents the type set represented by the union +// t1 ∪ y2 ∪ ... tn of the type sets of the terms t1 to tn. +// A termlist is in normal form if all terms are disjoint. +// termlist operations don't require the operands to be in +// normal form. +type termlist []*term + +// allTermlist represents the set of all types. +// It is in normal form. +var allTermlist = termlist{new(term)} + +// String prints the termlist exactly (without normalization). +func (xl termlist) String() string { + if len(xl) == 0 { + return "∅" + } + var buf bytes.Buffer + for i, x := range xl { + if i > 0 { + buf.WriteString(" | ") + } + buf.WriteString(x.String()) + } + return buf.String() +} + +// isEmpty reports whether the termlist xl represents the empty set of types. +func (xl termlist) isEmpty() bool { + // If there's a non-nil term, the entire list is not empty. + // If the termlist is in normal form, this requires at most + // one iteration. + for _, x := range xl { + if x != nil { + return false + } + } + return true +} + +// isAll reports whether the termlist xl represents the set of all types. +func (xl termlist) isAll() bool { + // If there's a 𝓤 term, the entire list is 𝓤. + // If the termlist is in normal form, this requires at most + // one iteration. + for _, x := range xl { + if x != nil && x.typ == nil { + return true + } + } + return false +} + +// norm returns the normal form of xl. +func (xl termlist) norm() termlist { + // Quadratic algorithm, but good enough for now. + // TODO(gri) fix asymptotic performance + used := make([]bool, len(xl)) + var rl termlist + for i, xi := range xl { + if xi == nil || used[i] { + continue + } + for j := i + 1; j < len(xl); j++ { + xj := xl[j] + if xj == nil || used[j] { + continue + } + if u1, u2 := xi.union(xj); u2 == nil { + // If we encounter a 𝓤 term, the entire list is 𝓤. + // Exit early. + // (Note that this is not just an optimization; + // if we continue, we may end up with a 𝓤 term + // and other terms and the result would not be + // in normal form.) + if u1.typ == nil { + return allTermlist + } + xi = u1 + used[j] = true // xj is now unioned into xi - ignore it in future iterations + } + } + rl = append(rl, xi) + } + return rl +} + +// union returns the union xl ∪ yl. +func (xl termlist) union(yl termlist) termlist { + return append(xl, yl...).norm() +} + +// intersect returns the intersection xl ∩ yl. +func (xl termlist) intersect(yl termlist) termlist { + if xl.isEmpty() || yl.isEmpty() { + return nil + } + + // Quadratic algorithm, but good enough for now. + // TODO(gri) fix asymptotic performance + var rl termlist + for _, x := range xl { + for _, y := range yl { + if r := x.intersect(y); r != nil { + rl = append(rl, r) + } + } + } + return rl.norm() +} + +// equal reports whether xl and yl represent the same type set. +func (xl termlist) equal(yl termlist) bool { + // TODO(gri) this should be more efficient + return xl.subsetOf(yl) && yl.subsetOf(xl) +} + +// includes reports whether t ∈ xl. +func (xl termlist) includes(t types.Type) bool { + for _, x := range xl { + if x.includes(t) { + return true + } + } + return false +} + +// supersetOf reports whether y ⊆ xl. +func (xl termlist) supersetOf(y *term) bool { + for _, x := range xl { + if y.subsetOf(x) { + return true + } + } + return false +} + +// subsetOf reports whether xl ⊆ yl. +func (xl termlist) subsetOf(yl termlist) bool { + if yl.isEmpty() { + return xl.isEmpty() + } + + // each term x of xl must be a subset of yl + for _, x := range xl { + if !yl.supersetOf(x) { + return false // x is not a subset yl + } + } + return true +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeterm.go b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go new file mode 100644 index 00000000000..7350bb702a1 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go @@ -0,0 +1,169 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by copytermlist.go DO NOT EDIT. + +package typeparams + +import "go/types" + +// A term describes elementary type sets: +// +// ∅: (*term)(nil) == ∅ // set of no types (empty set) +// 𝓤: &term{} == 𝓤 // set of all types (𝓤niverse) +// T: &term{false, T} == {T} // set of type T +// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t +type term struct { + tilde bool // valid if typ != nil + typ types.Type +} + +func (x *term) String() string { + switch { + case x == nil: + return "∅" + case x.typ == nil: + return "𝓤" + case x.tilde: + return "~" + x.typ.String() + default: + return x.typ.String() + } +} + +// equal reports whether x and y represent the same type set. +func (x *term) equal(y *term) bool { + // easy cases + switch { + case x == nil || y == nil: + return x == y + case x.typ == nil || y.typ == nil: + return x.typ == y.typ + } + // ∅ ⊂ x, y ⊂ 𝓤 + + return x.tilde == y.tilde && types.Identical(x.typ, y.typ) +} + +// union returns the union x ∪ y: zero, one, or two non-nil terms. +func (x *term) union(y *term) (_, _ *term) { + // easy cases + switch { + case x == nil && y == nil: + return nil, nil // ∅ ∪ ∅ == ∅ + case x == nil: + return y, nil // ∅ ∪ y == y + case y == nil: + return x, nil // x ∪ ∅ == x + case x.typ == nil: + return x, nil // 𝓤 ∪ y == 𝓤 + case y.typ == nil: + return y, nil // x ∪ 𝓤 == 𝓤 + } + // ∅ ⊂ x, y ⊂ 𝓤 + + if x.disjoint(y) { + return x, y // x ∪ y == (x, y) if x ∩ y == ∅ + } + // x.typ == y.typ + + // ~t ∪ ~t == ~t + // ~t ∪ T == ~t + // T ∪ ~t == ~t + // T ∪ T == T + if x.tilde || !y.tilde { + return x, nil + } + return y, nil +} + +// intersect returns the intersection x ∩ y. +func (x *term) intersect(y *term) *term { + // easy cases + switch { + case x == nil || y == nil: + return nil // ∅ ∩ y == ∅ and ∩ ∅ == ∅ + case x.typ == nil: + return y // 𝓤 ∩ y == y + case y.typ == nil: + return x // x ∩ 𝓤 == x + } + // ∅ ⊂ x, y ⊂ 𝓤 + + if x.disjoint(y) { + return nil // x ∩ y == ∅ if x ∩ y == ∅ + } + // x.typ == y.typ + + // ~t ∩ ~t == ~t + // ~t ∩ T == T + // T ∩ ~t == T + // T ∩ T == T + if !x.tilde || y.tilde { + return x + } + return y +} + +// includes reports whether t ∈ x. +func (x *term) includes(t types.Type) bool { + // easy cases + switch { + case x == nil: + return false // t ∈ ∅ == false + case x.typ == nil: + return true // t ∈ 𝓤 == true + } + // ∅ ⊂ x ⊂ 𝓤 + + u := t + if x.tilde { + u = under(u) + } + return types.Identical(x.typ, u) +} + +// subsetOf reports whether x ⊆ y. +func (x *term) subsetOf(y *term) bool { + // easy cases + switch { + case x == nil: + return true // ∅ ⊆ y == true + case y == nil: + return false // x ⊆ ∅ == false since x != ∅ + case y.typ == nil: + return true // x ⊆ 𝓤 == true + case x.typ == nil: + return false // 𝓤 ⊆ y == false since y != 𝓤 + } + // ∅ ⊂ x, y ⊂ 𝓤 + + if x.disjoint(y) { + return false // x ⊆ y == false if x ∩ y == ∅ + } + // x.typ == y.typ + + // ~t ⊆ ~t == true + // ~t ⊆ T == false + // T ⊆ ~t == true + // T ⊆ T == true + return !x.tilde || y.tilde +} + +// disjoint reports whether x ∩ y == ∅. +// x.typ and y.typ must not be nil. +func (x *term) disjoint(y *term) bool { + if debug && (x.typ == nil || y.typ == nil) { + panic("invalid argument(s)") + } + ux := x.typ + if y.tilde { + ux = under(ux) + } + uy := y.typ + if x.tilde { + uy = under(uy) + } + return !types.Identical(ux, uy) +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/element.go b/vendor/golang.org/x/tools/internal/typesinternal/element.go new file mode 100644 index 00000000000..4957f021641 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/element.go @@ -0,0 +1,133 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +import ( + "fmt" + "go/types" + + "golang.org/x/tools/go/types/typeutil" +) + +// ForEachElement calls f for type T and each type reachable from its +// type through reflection. It does this by recursively stripping off +// type constructors; in addition, for each named type N, the type *N +// is added to the result as it may have additional methods. +// +// The caller must provide an initially empty set used to de-duplicate +// identical types, potentially across multiple calls to ForEachElement. +// (Its final value holds all the elements seen, matching the arguments +// passed to f.) +// +// TODO(adonovan): share/harmonize with go/callgraph/rta. +func ForEachElement(rtypes *typeutil.Map, msets *typeutil.MethodSetCache, T types.Type, f func(types.Type)) { + var visit func(T types.Type, skip bool) + visit = func(T types.Type, skip bool) { + if !skip { + if seen, _ := rtypes.Set(T, true).(bool); seen { + return // de-dup + } + + f(T) // notify caller of new element type + } + + // Recursion over signatures of each method. + tmset := msets.MethodSet(T) + for i := 0; i < tmset.Len(); i++ { + sig := tmset.At(i).Type().(*types.Signature) + // It is tempting to call visit(sig, false) + // but, as noted in golang.org/cl/65450043, + // the Signature.Recv field is ignored by + // types.Identical and typeutil.Map, which + // is confusing at best. + // + // More importantly, the true signature rtype + // reachable from a method using reflection + // has no receiver but an extra ordinary parameter. + // For the Read method of io.Reader we want: + // func(Reader, []byte) (int, error) + // but here sig is: + // func([]byte) (int, error) + // with .Recv = Reader (though it is hard to + // notice because it doesn't affect Signature.String + // or types.Identical). + // + // TODO(adonovan): construct and visit the correct + // non-method signature with an extra parameter + // (though since unnamed func types have no methods + // there is essentially no actual demand for this). + // + // TODO(adonovan): document whether or not it is + // safe to skip non-exported methods (as RTA does). + visit(sig.Params(), true) // skip the Tuple + visit(sig.Results(), true) // skip the Tuple + } + + switch T := T.(type) { + case *types.Alias: + visit(types.Unalias(T), skip) // emulates the pre-Alias behavior + + case *types.Basic: + // nop + + case *types.Interface: + // nop---handled by recursion over method set. + + case *types.Pointer: + visit(T.Elem(), false) + + case *types.Slice: + visit(T.Elem(), false) + + case *types.Chan: + visit(T.Elem(), false) + + case *types.Map: + visit(T.Key(), false) + visit(T.Elem(), false) + + case *types.Signature: + if T.Recv() != nil { + panic(fmt.Sprintf("Signature %s has Recv %s", T, T.Recv())) + } + visit(T.Params(), true) // skip the Tuple + visit(T.Results(), true) // skip the Tuple + + case *types.Named: + // A pointer-to-named type can be derived from a named + // type via reflection. It may have methods too. + visit(types.NewPointer(T), false) + + // Consider 'type T struct{S}' where S has methods. + // Reflection provides no way to get from T to struct{S}, + // only to S, so the method set of struct{S} is unwanted, + // so set 'skip' flag during recursion. + visit(T.Underlying(), true) // skip the unnamed type + + case *types.Array: + visit(T.Elem(), false) + + case *types.Struct: + for i, n := 0, T.NumFields(); i < n; i++ { + // TODO(adonovan): document whether or not + // it is safe to skip non-exported fields. + visit(T.Field(i).Type(), false) + } + + case *types.Tuple: + for i, n := 0, T.Len(); i < n; i++ { + visit(T.At(i).Type(), false) + } + + case *types.TypeParam, *types.Union: + // forEachReachable must not be called on parameterized types. + panic(T) + + default: + panic(T) + } + } + visit(T, false) +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go index 834e05381ce..131caab2847 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go @@ -838,7 +838,7 @@ const ( // InvalidCap occurs when an argument to the cap built-in function is not of // supported type. // - // See https://golang.org/ref/spec#Lengthand_capacity for information on + // See https://golang.org/ref/spec#Length_and_capacity for information on // which underlying types are supported as arguments to cap and len. // // Example: @@ -859,7 +859,7 @@ const ( // InvalidCopy occurs when the arguments are not of slice type or do not // have compatible type. // - // See https://golang.org/ref/spec#Appendingand_copying_slices for more + // See https://golang.org/ref/spec#Appending_and_copying_slices for more // information on the type requirements for the copy built-in. // // Example: @@ -897,7 +897,7 @@ const ( // InvalidLen occurs when an argument to the len built-in function is not of // supported type. // - // See https://golang.org/ref/spec#Lengthand_capacity for information on + // See https://golang.org/ref/spec#Length_and_capacity for information on // which underlying types are supported as arguments to cap and len. // // Example: @@ -914,7 +914,7 @@ const ( // InvalidMake occurs when make is called with an unsupported type argument. // - // See https://golang.org/ref/spec#Makingslices_maps_and_channels for + // See https://golang.org/ref/spec#Making_slices_maps_and_channels for // information on the types that may be created using make. // // Example: diff --git a/vendor/golang.org/x/tools/internal/typesinternal/recv.go b/vendor/golang.org/x/tools/internal/typesinternal/recv.go index fea7c8b75e8..ba6f4f4ebd5 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/recv.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/recv.go @@ -6,8 +6,6 @@ package typesinternal import ( "go/types" - - "golang.org/x/tools/internal/aliases" ) // ReceiverNamed returns the named type (if any) associated with the @@ -15,11 +13,11 @@ import ( // It also reports whether a Pointer was present. func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) { t := recv.Type() - if ptr, ok := aliases.Unalias(t).(*types.Pointer); ok { + if ptr, ok := types.Unalias(t).(*types.Pointer); ok { isPtr = true t = ptr.Elem() } - named, _ = aliases.Unalias(t).(*types.Named) + named, _ = types.Unalias(t).(*types.Named) return } @@ -36,7 +34,7 @@ func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) { // indirection from the type, regardless of named types (analogous to // a LOAD instruction). func Unpointer(t types.Type) types.Type { - if ptr, ok := aliases.Unalias(t).(*types.Pointer); ok { + if ptr, ok := types.Unalias(t).(*types.Pointer); ok { return ptr.Elem() } return t diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go index 83923286120..df3ea521254 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/types.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go @@ -11,6 +11,8 @@ import ( "go/types" "reflect" "unsafe" + + "golang.org/x/tools/internal/aliases" ) func SetUsesCgo(conf *types.Config) bool { @@ -63,3 +65,57 @@ func NameRelativeTo(pkg *types.Package) types.Qualifier { return other.Name() } } + +// A NamedOrAlias is a [types.Type] that is named (as +// defined by the spec) and capable of bearing type parameters: it +// abstracts aliases ([types.Alias]) and defined types +// ([types.Named]). +// +// Every type declared by an explicit "type" declaration is a +// NamedOrAlias. (Built-in type symbols may additionally +// have type [types.Basic], which is not a NamedOrAlias, +// though the spec regards them as "named".) +// +// NamedOrAlias cannot expose the Origin method, because +// [types.Alias.Origin] and [types.Named.Origin] have different +// (covariant) result types; use [Origin] instead. +type NamedOrAlias interface { + types.Type + Obj() *types.TypeName +} + +// TypeParams is a light shim around t.TypeParams(). +// (go/types.Alias).TypeParams requires >= 1.23. +func TypeParams(t NamedOrAlias) *types.TypeParamList { + switch t := t.(type) { + case *types.Alias: + return aliases.TypeParams(t) + case *types.Named: + return t.TypeParams() + } + return nil +} + +// TypeArgs is a light shim around t.TypeArgs(). +// (go/types.Alias).TypeArgs requires >= 1.23. +func TypeArgs(t NamedOrAlias) *types.TypeList { + switch t := t.(type) { + case *types.Alias: + return aliases.TypeArgs(t) + case *types.Named: + return t.TypeArgs() + } + return nil +} + +// Origin returns the generic type of the Named or Alias type t if it +// is instantiated, otherwise it returns t. +func Origin(t NamedOrAlias) NamedOrAlias { + switch t := t.(type) { + case *types.Alias: + return aliases.Origin(t) + case *types.Named: + return t.Origin() + } + return t +} diff --git a/vendor/golang.org/x/tools/internal/versions/constraint.go b/vendor/golang.org/x/tools/internal/versions/constraint.go new file mode 100644 index 00000000000..179063d4848 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/constraint.go @@ -0,0 +1,13 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package versions + +import "go/build/constraint" + +// ConstraintGoVersion is constraint.GoVersion (if built with go1.21+). +// Otherwise nil. +// +// Deprecate once x/tools is after go1.21. +var ConstraintGoVersion func(x constraint.Expr) string diff --git a/vendor/golang.org/x/tools/internal/versions/constraint_go121.go b/vendor/golang.org/x/tools/internal/versions/constraint_go121.go new file mode 100644 index 00000000000..38011407d5f --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/constraint_go121.go @@ -0,0 +1,14 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 +// +build go1.21 + +package versions + +import "go/build/constraint" + +func init() { + ConstraintGoVersion = constraint.GoVersion +} diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain.go b/vendor/golang.org/x/tools/internal/versions/toolchain.go deleted file mode 100644 index 377bf7a53b4..00000000000 --- a/vendor/golang.org/x/tools/internal/versions/toolchain.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package versions - -// toolchain is maximum version (<1.22) that the go toolchain used -// to build the current tool is known to support. -// -// When a tool is built with >=1.22, the value of toolchain is unused. -// -// x/tools does not support building with go <1.18. So we take this -// as the minimum possible maximum. -var toolchain string = Go1_18 diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go deleted file mode 100644 index f65beed9d83..00000000000 --- a/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.19 -// +build go1.19 - -package versions - -func init() { - if Compare(toolchain, Go1_19) < 0 { - toolchain = Go1_19 - } -} diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go deleted file mode 100644 index 1a9efa126cd..00000000000 --- a/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.20 -// +build go1.20 - -package versions - -func init() { - if Compare(toolchain, Go1_20) < 0 { - toolchain = Go1_20 - } -} diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go deleted file mode 100644 index b7ef216dfec..00000000000 --- a/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.21 -// +build go1.21 - -package versions - -func init() { - if Compare(toolchain, Go1_21) < 0 { - toolchain = Go1_21 - } -} diff --git a/vendor/golang.org/x/tools/internal/versions/types.go b/vendor/golang.org/x/tools/internal/versions/types.go index 562eef21fa2..0fc10ce4eb5 100644 --- a/vendor/golang.org/x/tools/internal/versions/types.go +++ b/vendor/golang.org/x/tools/internal/versions/types.go @@ -5,15 +5,29 @@ package versions import ( + "go/ast" "go/types" ) -// GoVersion returns the Go version of the type package. -// It returns zero if no version can be determined. -func GoVersion(pkg *types.Package) string { - // TODO(taking): x/tools can call GoVersion() [from 1.21] after 1.25. - if pkg, ok := any(pkg).(interface{ GoVersion() string }); ok { - return pkg.GoVersion() +// FileVersion returns a file's Go version. +// The reported version is an unknown Future version if a +// version cannot be determined. +func FileVersion(info *types.Info, file *ast.File) string { + // In tools built with Go >= 1.22, the Go version of a file + // follow a cascades of sources: + // 1) types.Info.FileVersion, which follows the cascade: + // 1.a) file version (ast.File.GoVersion), + // 1.b) the package version (types.Config.GoVersion), or + // 2) is some unknown Future version. + // + // File versions require a valid package version to be provided to types + // in Config.GoVersion. Config.GoVersion is either from the package's module + // or the toolchain (go run). This value should be provided by go/packages + // or unitchecker.Config.GoVersion. + if v := info.FileVersions[file]; IsValid(v) { + return v } - return "" + // Note: we could instead return runtime.Version() [if valid]. + // This would act as a max version on what a tool can support. + return Future } diff --git a/vendor/golang.org/x/tools/internal/versions/types_go121.go b/vendor/golang.org/x/tools/internal/versions/types_go121.go deleted file mode 100644 index b4345d3349e..00000000000 --- a/vendor/golang.org/x/tools/internal/versions/types_go121.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.22 -// +build !go1.22 - -package versions - -import ( - "go/ast" - "go/types" -) - -// FileVersion returns a language version (<=1.21) derived from runtime.Version() -// or an unknown future version. -func FileVersion(info *types.Info, file *ast.File) string { - // In x/tools built with Go <= 1.21, we do not have Info.FileVersions - // available. We use a go version derived from the toolchain used to - // compile the tool by default. - // This will be <= go1.21. We take this as the maximum version that - // this tool can support. - // - // There are no features currently in x/tools that need to tell fine grained - // differences for versions <1.22. - return toolchain -} - -// InitFileVersions is a noop when compiled with this Go version. -func InitFileVersions(*types.Info) {} diff --git a/vendor/golang.org/x/tools/internal/versions/types_go122.go b/vendor/golang.org/x/tools/internal/versions/types_go122.go deleted file mode 100644 index aac5db62c98..00000000000 --- a/vendor/golang.org/x/tools/internal/versions/types_go122.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.22 -// +build go1.22 - -package versions - -import ( - "go/ast" - "go/types" -) - -// FileVersion returns a file's Go version. -// The reported version is an unknown Future version if a -// version cannot be determined. -func FileVersion(info *types.Info, file *ast.File) string { - // In tools built with Go >= 1.22, the Go version of a file - // follow a cascades of sources: - // 1) types.Info.FileVersion, which follows the cascade: - // 1.a) file version (ast.File.GoVersion), - // 1.b) the package version (types.Config.GoVersion), or - // 2) is some unknown Future version. - // - // File versions require a valid package version to be provided to types - // in Config.GoVersion. Config.GoVersion is either from the package's module - // or the toolchain (go run). This value should be provided by go/packages - // or unitchecker.Config.GoVersion. - if v := info.FileVersions[file]; IsValid(v) { - return v - } - // Note: we could instead return runtime.Version() [if valid]. - // This would act as a max version on what a tool can support. - return Future -} - -// InitFileVersions initializes info to record Go versions for Go files. -func InitFileVersions(info *types.Info) { - info.FileVersions = make(map[*ast.File]string) -} diff --git a/vendor/gonum.org/v1/gonum/AUTHORS b/vendor/gonum.org/v1/gonum/AUTHORS index 932c529804d..8e5896dbf1c 100644 --- a/vendor/gonum.org/v1/gonum/AUTHORS +++ b/vendor/gonum.org/v1/gonum/AUTHORS @@ -34,6 +34,7 @@ Davor Kapsa <davor.kapsa@gmail.com> DeepMind Technologies Delaney Gillilan <delaneygillilan@gmail.com> Dezmond Goff <goff.dezmond@gmail.com> +Dirk Müller <dirk@dmllr.de> Dong-hee Na <donghee.na92@gmail.com> Dustin Spicuzza <dustin@virtualroadside.com> Egon Elbre <egonelbre@gmail.com> @@ -50,6 +51,7 @@ Francesc Campoy <campoy@golang.org> Google Inc Gustaf Johansson <gustaf@pinon.se> Hossein Zolfi <hossein.zolfi@gmail.com> +Huang Peng Fei <huangpengfei@outlook.com> Iakov Davydov <iakov.davydov@unil.ch> Igor Mikushkin <igor.mikushkin@gmail.com> Iskander Sharipov <quasilyte@gmail.com> @@ -73,6 +75,7 @@ Joseph Watson <jtwatson@linux-consulting.us> Josh Wilson <josh.craig.wilson@gmail.com> Julien Roland <juroland@gmail.com> Kai Trukenmüller <ktye78@gmail.com> +Kendall Marcus <knowmost@outlook.com> Kent English <kent.english@gmail.com> Kevin C. Zimmerman <kevinczimmerman@gmail.com> Kirill Motkov <motkov.kirill@gmail.com> @@ -121,6 +124,8 @@ The University of Minnesota The University of Washington Thomas Berg <tomfuture@gmail.com> Tobin Harding <me@tobin.cc> +Tom Payne <twpayne@gmail.com> +Tristan Nicholls <tvk.nicholls@gmail.com> Valentin Deleplace <deleplace2015@gmail.com> Vincent Thiery <vjmthiery@gmail.com> Vladimír Chalupecký <vladimir.chalupecky@gmail.com> diff --git a/vendor/gonum.org/v1/gonum/CONTRIBUTORS b/vendor/gonum.org/v1/gonum/CONTRIBUTORS index 711ad614bd3..e367595b925 100644 --- a/vendor/gonum.org/v1/gonum/CONTRIBUTORS +++ b/vendor/gonum.org/v1/gonum/CONTRIBUTORS @@ -42,6 +42,7 @@ David Samborski <bloggingarrow@gmail.com> Davor Kapsa <davor.kapsa@gmail.com> Delaney Gillilan <delaneygillilan@gmail.com> Dezmond Goff <goff.dezmond@gmail.com> +Dirk Müller <dirk@dmllr.de> Dong-hee Na <donghee.na92@gmail.com> Dustin Spicuzza <dustin@virtualroadside.com> Egon Elbre <egonelbre@gmail.com> @@ -57,6 +58,7 @@ Fazlul Shahriar <fshahriar@gmail.com> Francesc Campoy <campoy@golang.org> Gustaf Johansson <gustaf@pinon.se> Hossein Zolfi <hossein.zolfi@gmail.com> +Huang Peng Fei <huangpengfei@outlook.com> Iakov Davydov <iakov.davydov@unil.ch> Igor Mikushkin <igor.mikushkin@gmail.com> Iskander Sharipov <quasilyte@gmail.com> @@ -81,6 +83,7 @@ Joseph Watson <jtwatson@linux-consulting.us> Josh Wilson <josh.craig.wilson@gmail.com> Julien Roland <juroland@gmail.com> Kai Trukenmüller <ktye78@gmail.com> +Kendall Marcus <knowmost@outlook.com> Kent English <kent.english@gmail.com> Kevin C. Zimmerman <kevinczimmerman@gmail.com> Kirill Motkov <motkov.kirill@gmail.com> @@ -124,6 +127,8 @@ Takeshi Yoneda <cz.rk.t0415y.g@gmail.com> Tamir Hyman <hyman.tamir@gmail.com> Thomas Berg <tomfuture@gmail.com> Tobin Harding <me@tobin.cc> +Tom Payne <twpayne@gmail.com> +Tristan Nicholls <tvk.nicholls@gmail.com> Valentin Deleplace <deleplace2015@gmail.com> Vincent Thiery <vjmthiery@gmail.com> Vladimír Chalupecký <vladimir.chalupecky@gmail.com> diff --git a/vendor/gonum.org/v1/gonum/blas/blas64/conv.go b/vendor/gonum.org/v1/gonum/blas/blas64/conv.go index 6cc6517f1b9..695557d13a9 100644 --- a/vendor/gonum.org/v1/gonum/blas/blas64/conv.go +++ b/vendor/gonum.org/v1/gonum/blas/blas64/conv.go @@ -261,17 +261,3 @@ func (t TriangularBand) From(a TriangularBandCols) { } dst.From(src) } - -func min(a, b int) int { - if a < b { - return a - } - return b -} - -func max(a, b int) int { - if a > b { - return a - } - return b -} diff --git a/vendor/gonum.org/v1/gonum/blas/cblas128/conv.go b/vendor/gonum.org/v1/gonum/blas/cblas128/conv.go index c459e1d87e3..bfafb96efcd 100644 --- a/vendor/gonum.org/v1/gonum/blas/cblas128/conv.go +++ b/vendor/gonum.org/v1/gonum/blas/cblas128/conv.go @@ -263,17 +263,3 @@ func (t TriangularBand) From(a TriangularBandCols) { } dst.From(src) } - -func min(a, b int) int { - if a < b { - return a - } - return b -} - -func max(a, b int) int { - if a > b { - return a - } - return b -} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/gonum.go b/vendor/gonum.org/v1/gonum/blas/gonum/gonum.go index 61a8b8b5d0b..5a5c1110121 100644 --- a/vendor/gonum.org/v1/gonum/blas/gonum/gonum.go +++ b/vendor/gonum.org/v1/gonum/blas/gonum/gonum.go @@ -21,20 +21,6 @@ const ( minParBlock = 4 // minimum number of blocks needed to go parallel ) -func max(a, b int) int { - if a > b { - return a - } - return b -} - -func min(a, b int) int { - if a > b { - return b - } - return a -} - // blocks returns the number of divisions of the dimension length with the given // block size. func blocks(dim, bsize int) int { diff --git a/vendor/gonum.org/v1/gonum/floats/floats.go b/vendor/gonum.org/v1/gonum/floats/floats.go index 5db73a0573a..68c4e65c7e2 100644 --- a/vendor/gonum.org/v1/gonum/floats/floats.go +++ b/vendor/gonum.org/v1/gonum/floats/floats.go @@ -7,6 +7,7 @@ package floats import ( "errors" "math" + "slices" "sort" "gonum.org/v1/gonum/floats/scalar" @@ -633,10 +634,10 @@ func Prod(s []float64) float64 { } // Reverse reverses the order of elements in the slice. +// +// Deprecated: This function simply calls [slices.Reverse]. func Reverse(s []float64) { - for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { - s[i], s[j] = s[j], s[i] - } + slices.Reverse(s) } // Same returns true when the input slices have the same length and all diff --git a/vendor/gonum.org/v1/gonum/graph/internal/ordered/doc.go b/vendor/gonum.org/v1/gonum/graph/internal/ordered/doc.go deleted file mode 100644 index 563df6f2e65..00000000000 --- a/vendor/gonum.org/v1/gonum/graph/internal/ordered/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ordered provides common sort ordering types. -package ordered // import "gonum.org/v1/gonum/graph/internal/ordered" diff --git a/vendor/gonum.org/v1/gonum/graph/internal/ordered/sort.go b/vendor/gonum.org/v1/gonum/graph/internal/ordered/sort.go deleted file mode 100644 index 1661125c2c7..00000000000 --- a/vendor/gonum.org/v1/gonum/graph/internal/ordered/sort.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ordered - -import ( - "sort" - - "gonum.org/v1/gonum/graph" -) - -// ByID sorts a slice of graph.Node by ID. -func ByID(n []graph.Node) { - sort.Slice(n, func(i, j int) bool { return n[i].ID() < n[j].ID() }) -} - -// BySliceValues sorts a slice of []int64 lexically by the values of the -// []int64. -func BySliceValues(c [][]int64) { - sort.Slice(c, func(i, j int) bool { - a, b := c[i], c[j] - l := len(a) - if len(b) < l { - l = len(b) - } - for k, v := range a[:l] { - if v < b[k] { - return true - } - if v > b[k] { - return false - } - } - return len(a) < len(b) - }) -} - -// BySliceIDs sorts a slice of []graph.Node lexically by the IDs of the -// []graph.Node. -func BySliceIDs(c [][]graph.Node) { - sort.Slice(c, func(i, j int) bool { - a, b := c[i], c[j] - l := len(a) - if len(b) < l { - l = len(b) - } - for k, v := range a[:l] { - if v.ID() < b[k].ID() { - return true - } - if v.ID() > b[k].ID() { - return false - } - } - return len(a) < len(b) - }) -} - -// Int64s sorts a slice of int64. -func Int64s(s []int64) { - sort.Slice(s, func(i, j int) bool { return s[i] < s[j] }) -} - -// LinesByIDs sort a slice of graph.LinesByIDs lexically by the From IDs, -// then by the To IDs, finally by the Line IDs. -func LinesByIDs(n []graph.Line) { - sort.Slice(n, func(i, j int) bool { - a, b := n[i], n[j] - if a.From().ID() != b.From().ID() { - return a.From().ID() < b.From().ID() - } - if a.To().ID() != b.To().ID() { - return a.To().ID() < b.To().ID() - } - return n[i].ID() < n[j].ID() - }) -} - -// Reverse reverses the order of nodes. -func Reverse(nodes []graph.Node) { - for i, j := 0, len(nodes)-1; i < j; i, j = i+1, j-1 { - nodes[i], nodes[j] = nodes[j], nodes[i] - } -} diff --git a/vendor/gonum.org/v1/gonum/graph/internal/set/same.go b/vendor/gonum.org/v1/gonum/graph/internal/set/same.go index a7031e4a1b7..90189f09ea0 100644 --- a/vendor/gonum.org/v1/gonum/graph/internal/set/same.go +++ b/vendor/gonum.org/v1/gonum/graph/internal/set/same.go @@ -23,15 +23,6 @@ func same(a, b Nodes) bool { // hash maps are passed as a pointer to a runtime Hmap struct. A map is // not seen by the runtime as a pointer though, so we use unsafe to get // the maps' pointer values to compare. -func intsSame(a, b Ints) bool { - return *(*uintptr)(unsafe.Pointer(&a)) == *(*uintptr)(unsafe.Pointer(&b)) -} - -// int64sSame determines whether two sets are backed by the same store. In the -// current implementation using hash maps it makes use of the fact that -// hash maps are passed as a pointer to a runtime Hmap struct. A map is -// not seen by the runtime as a pointer though, so we use unsafe to get -// the maps' pointer values to compare. -func int64sSame(a, b Int64s) bool { +func intsSame[T Int](a, b Ints[T]) bool { return *(*uintptr)(unsafe.Pointer(&a)) == *(*uintptr)(unsafe.Pointer(&b)) } diff --git a/vendor/gonum.org/v1/gonum/graph/internal/set/same_appengine.go b/vendor/gonum.org/v1/gonum/graph/internal/set/same_appengine.go index 368823d4c83..ae8aa4d71e6 100644 --- a/vendor/gonum.org/v1/gonum/graph/internal/set/same_appengine.go +++ b/vendor/gonum.org/v1/gonum/graph/internal/set/same_appengine.go @@ -23,15 +23,6 @@ func same(a, b Nodes) bool { // hash maps are passed as a pointer to a runtime Hmap struct. A map is // not seen by the runtime as a pointer though, so we use reflect to get // the maps' pointer values to compare. -func intsSame(a, b Ints) bool { - return reflect.ValueOf(a).Pointer() == reflect.ValueOf(b).Pointer() -} - -// int64sSame determines whether two sets are backed by the same store. In the -// current implementation using hash maps it makes use of the fact that -// hash maps are passed as a pointer to a runtime Hmap struct. A map is -// not seen by the runtime as a pointer though, so we use reflect to get -// the maps' pointer values to compare. -func int64sSame(a, b Int64s) bool { +func intsSame[T Int](a, b Ints[T]) bool { return reflect.ValueOf(a).Pointer() == reflect.ValueOf(b).Pointer() } diff --git a/vendor/gonum.org/v1/gonum/graph/internal/set/set.go b/vendor/gonum.org/v1/gonum/graph/internal/set/set.go index 8067e281677..2175e480be3 100644 --- a/vendor/gonum.org/v1/gonum/graph/internal/set/set.go +++ b/vendor/gonum.org/v1/gonum/graph/internal/set/set.go @@ -6,36 +6,34 @@ package set import "gonum.org/v1/gonum/graph" -// Ints is a set of int identifiers. -type Ints map[int]struct{} +type Int interface{ ~int | ~int64 } -// The simple accessor methods for Ints are provided to allow ease of -// implementation change should the need arise. +type Ints[T Int] map[T]struct{} // Add inserts an element into the set. -func (s Ints) Add(e int) { +func (s Ints[T]) Add(e T) { s[e] = struct{}{} } // Has reports the existence of the element in the set. -func (s Ints) Has(e int) bool { +func (s Ints[T]) Has(e T) bool { _, ok := s[e] return ok } // Remove deletes the specified element from the set. -func (s Ints) Remove(e int) { +func (s Ints[T]) Remove(e T) { delete(s, e) } // Count reports the number of elements stored in the set. -func (s Ints) Count() int { +func (s Ints[T]) Count() int { return len(s) } // IntsEqual reports set equality between the parameters. Sets are equal if // and only if they have the same elements. -func IntsEqual(a, b Ints) bool { +func IntsEqual[T Int](a, b Ints[T]) bool { if intsSame(a, b) { return true } @@ -53,53 +51,6 @@ func IntsEqual(a, b Ints) bool { return true } -// Int64s is a set of int64 identifiers. -type Int64s map[int64]struct{} - -// The simple accessor methods for Ints are provided to allow ease of -// implementation change should the need arise. - -// Add inserts an element into the set. -func (s Int64s) Add(e int64) { - s[e] = struct{}{} -} - -// Has reports the existence of the element in the set. -func (s Int64s) Has(e int64) bool { - _, ok := s[e] - return ok -} - -// Remove deletes the specified element from the set. -func (s Int64s) Remove(e int64) { - delete(s, e) -} - -// Count reports the number of elements stored in the set. -func (s Int64s) Count() int { - return len(s) -} - -// Int64sEqual reports set equality between the parameters. Sets are equal if -// and only if they have the same elements. -func Int64sEqual(a, b Int64s) bool { - if int64sSame(a, b) { - return true - } - - if len(a) != len(b) { - return false - } - - for e := range a { - if _, ok := b[e]; !ok { - return false - } - } - - return true -} - // Nodes is a set of nodes keyed in their integer identifiers. type Nodes map[int64]graph.Node diff --git a/vendor/gonum.org/v1/gonum/graph/iterator/lines_map_safe.go b/vendor/gonum.org/v1/gonum/graph/iterator/lines_map_safe.go index 0a7414f7d72..3f2d799f973 100644 --- a/vendor/gonum.org/v1/gonum/graph/iterator/lines_map_safe.go +++ b/vendor/gonum.org/v1/gonum/graph/iterator/lines_map_safe.go @@ -16,10 +16,11 @@ import ( // Lines implements the graph.Lines interfaces. // The iteration order of Lines is randomized. type Lines struct { - lines reflect.Value - iter *reflect.MapIter - pos int - curr graph.Line + iter reflect.MapIter + pos, len int + curr graph.Line + value reflect.Value + lines reflect.Value } // NewLines returns a Lines initialized with the provided lines, a @@ -30,23 +31,26 @@ type Lines struct { // the call to NewLines. func NewLines(lines map[int64]graph.Line) *Lines { rv := reflect.ValueOf(lines) - return &Lines{lines: rv, iter: rv.MapRange()} + l := &Lines{lines: rv, len: len(lines)} + l.iter.Reset(rv) + l.value = reflect.ValueOf(&l.curr).Elem() + return l } // Len returns the remaining number of lines to be iterated over. func (l *Lines) Len() int { - return l.lines.Len() - l.pos + return l.len - l.pos } // Next returns whether the next call of Line will return a valid line. func (l *Lines) Next() bool { - if l.pos >= l.lines.Len() { + if l.pos >= l.len { return false } ok := l.iter.Next() if ok { l.pos++ - l.curr = l.iter.Value().Interface().(graph.Line) + l.value.SetIterValue(&l.iter) } return ok } @@ -61,7 +65,7 @@ func (l *Lines) Line() graph.Line { func (l *Lines) Reset() { l.curr = nil l.pos = 0 - l.iter = l.lines.MapRange() + l.iter.Reset(l.lines) } // LineSlice returns all the remaining lines in the iterator and advances @@ -73,19 +77,21 @@ func (l *Lines) LineSlice() []graph.Line { } lines := make([]graph.Line, 0, l.Len()) for l.iter.Next() { - lines = append(lines, l.iter.Value().Interface().(graph.Line)) + l.value.SetIterValue(&l.iter) + lines = append(lines, l.curr) } - l.pos = l.lines.Len() + l.pos = l.len return lines } // WeightedLines implements the graph.WeightedLines interfaces. // The iteration order of WeightedLines is randomized. type WeightedLines struct { - lines reflect.Value - iter *reflect.MapIter - pos int - curr graph.WeightedLine + iter reflect.MapIter + pos, len int + curr graph.WeightedLine + value reflect.Value + lines reflect.Value } // NewWeightedLines returns a WeightedLines initialized with the provided lines, a @@ -96,23 +102,26 @@ type WeightedLines struct { // the call to NewWeightedLines. func NewWeightedLines(lines map[int64]graph.WeightedLine) *WeightedLines { rv := reflect.ValueOf(lines) - return &WeightedLines{lines: rv, iter: rv.MapRange()} + l := &WeightedLines{lines: rv, len: len(lines)} + l.iter.Reset(rv) + l.value = reflect.ValueOf(&l.curr).Elem() + return l } // Len returns the remaining number of lines to be iterated over. func (l *WeightedLines) Len() int { - return l.lines.Len() - l.pos + return l.len - l.pos } // Next returns whether the next call of WeightedLine will return a valid line. func (l *WeightedLines) Next() bool { - if l.pos >= l.lines.Len() { + if l.pos >= l.len { return false } ok := l.iter.Next() if ok { l.pos++ - l.curr = l.iter.Value().Interface().(graph.WeightedLine) + l.value.SetIterValue(&l.iter) } return ok } @@ -127,7 +136,7 @@ func (l *WeightedLines) WeightedLine() graph.WeightedLine { func (l *WeightedLines) Reset() { l.curr = nil l.pos = 0 - l.iter = l.lines.MapRange() + l.iter.Reset(l.lines) } // WeightedLineSlice returns all the remaining lines in the iterator and advances @@ -139,8 +148,9 @@ func (l *WeightedLines) WeightedLineSlice() []graph.WeightedLine { } lines := make([]graph.WeightedLine, 0, l.Len()) for l.iter.Next() { - lines = append(lines, l.iter.Value().Interface().(graph.WeightedLine)) + l.value.SetIterValue(&l.iter) + lines = append(lines, l.curr) } - l.pos = l.lines.Len() + l.pos = l.len return lines } diff --git a/vendor/gonum.org/v1/gonum/graph/iterator/nodes_map_safe.go b/vendor/gonum.org/v1/gonum/graph/iterator/nodes_map_safe.go index 1181545f56b..e79d8a850d4 100644 --- a/vendor/gonum.org/v1/gonum/graph/iterator/nodes_map_safe.go +++ b/vendor/gonum.org/v1/gonum/graph/iterator/nodes_map_safe.go @@ -16,10 +16,11 @@ import ( // Nodes implements the graph.Nodes interfaces. // The iteration order of Nodes is randomized. type Nodes struct { - nodes reflect.Value - iter *reflect.MapIter - pos int - curr graph.Node + iter reflect.MapIter + pos, len int + curr graph.Node + value reflect.Value + nodes reflect.Value } // NewNodes returns a Nodes initialized with the provided nodes, a @@ -30,23 +31,26 @@ type Nodes struct { // the call to NewNodes. func NewNodes(nodes map[int64]graph.Node) *Nodes { rv := reflect.ValueOf(nodes) - return &Nodes{nodes: rv, iter: rv.MapRange()} + n := &Nodes{nodes: rv, len: len(nodes)} + n.iter.Reset(rv) + n.value = reflect.ValueOf(&n.curr).Elem() + return n } // Len returns the remaining number of nodes to be iterated over. func (n *Nodes) Len() int { - return n.nodes.Len() - n.pos + return n.len - n.pos } // Next returns whether the next call of Node will return a valid node. func (n *Nodes) Next() bool { - if n.pos >= n.nodes.Len() { + if n.pos >= n.len { return false } ok := n.iter.Next() if ok { n.pos++ - n.curr = n.iter.Value().Interface().(graph.Node) + n.value.SetIterValue(&n.iter) } return ok } @@ -73,20 +77,21 @@ func (n *Nodes) NodeSlice() []graph.Node { } nodes := make([]graph.Node, 0, n.Len()) for n.iter.Next() { - nodes = append(nodes, n.iter.Value().Interface().(graph.Node)) + n.value.SetIterValue(&n.iter) + nodes = append(nodes, n.curr) } - n.pos = n.nodes.Len() + n.pos = n.len return nodes } // NodesByEdge implements the graph.Nodes interfaces. // The iteration order of Nodes is randomized. type NodesByEdge struct { - nodes map[int64]graph.Node - edges reflect.Value - iter *reflect.MapIter - pos int - curr graph.Node + iter reflect.MapIter + pos, len int + edges reflect.Value + curr graph.Node + nodes map[int64]graph.Node } // NewNodesByEdge returns a NodesByEdge initialized with the @@ -100,7 +105,9 @@ type NodesByEdge struct { // is mutated after the call to NewNodes. func NewNodesByEdge(nodes map[int64]graph.Node, edges map[int64]graph.Edge) *NodesByEdge { rv := reflect.ValueOf(edges) - return &NodesByEdge{nodes: nodes, edges: rv, iter: rv.MapRange()} + n := &NodesByEdge{nodes: nodes, len: len(edges), edges: rv} + n.iter.Reset(rv) + return n } // NewNodesByWeightedEdge returns a NodesByEdge initialized with the @@ -114,7 +121,9 @@ func NewNodesByEdge(nodes map[int64]graph.Node, edges map[int64]graph.Edge) *Nod // is mutated after the call to NewNodes. func NewNodesByWeightedEdge(nodes map[int64]graph.Node, edges map[int64]graph.WeightedEdge) *NodesByEdge { rv := reflect.ValueOf(edges) - return &NodesByEdge{nodes: nodes, edges: rv, iter: rv.MapRange()} + n := &NodesByEdge{nodes: nodes, len: len(edges), edges: rv} + n.iter.Reset(rv) + return n } // NewNodesByLines returns a NodesByEdge initialized with the @@ -128,7 +137,9 @@ func NewNodesByWeightedEdge(nodes map[int64]graph.Node, edges map[int64]graph.We // is mutated after the call to NewNodes. func NewNodesByLines(nodes map[int64]graph.Node, lines map[int64]map[int64]graph.Line) *NodesByEdge { rv := reflect.ValueOf(lines) - return &NodesByEdge{nodes: nodes, edges: rv, iter: rv.MapRange()} + n := &NodesByEdge{nodes: nodes, len: len(lines), edges: rv} + n.iter.Reset(rv) + return n } // NewNodesByWeightedLines returns a NodesByEdge initialized with the @@ -142,17 +153,19 @@ func NewNodesByLines(nodes map[int64]graph.Node, lines map[int64]map[int64]graph // is mutated after the call to NewNodes. func NewNodesByWeightedLines(nodes map[int64]graph.Node, lines map[int64]map[int64]graph.WeightedLine) *NodesByEdge { rv := reflect.ValueOf(lines) - return &NodesByEdge{nodes: nodes, edges: rv, iter: rv.MapRange()} + n := &NodesByEdge{nodes: nodes, len: len(lines), edges: rv} + n.iter.Reset(rv) + return n } // Len returns the remaining number of nodes to be iterated over. func (n *NodesByEdge) Len() int { - return n.edges.Len() - n.pos + return n.len - n.pos } // Next returns whether the next call of Node will return a valid node. func (n *NodesByEdge) Next() bool { - if n.pos >= n.edges.Len() { + if n.pos >= n.len { return false } ok := n.iter.Next() @@ -173,7 +186,7 @@ func (n *NodesByEdge) Node() graph.Node { func (n *NodesByEdge) Reset() { n.curr = nil n.pos = 0 - n.iter = n.edges.MapRange() + n.iter.Reset(n.edges) } // NodeSlice returns all the remaining nodes in the iterator and advances @@ -185,8 +198,9 @@ func (n *NodesByEdge) NodeSlice() []graph.Node { } nodes := make([]graph.Node, 0, n.Len()) for n.iter.Next() { - nodes = append(nodes, n.nodes[n.iter.Key().Int()]) + n.curr = n.nodes[n.iter.Key().Int()] + nodes = append(nodes, n.curr) } - n.pos = n.edges.Len() + n.pos = n.len return nodes } diff --git a/vendor/gonum.org/v1/gonum/graph/set/uid/uid.go b/vendor/gonum.org/v1/gonum/graph/set/uid/uid.go index ce14c76551f..75074425012 100644 --- a/vendor/gonum.org/v1/gonum/graph/set/uid/uid.go +++ b/vendor/gonum.org/v1/gonum/graph/set/uid/uid.go @@ -17,12 +17,12 @@ const Max = math.MaxInt64 // Set implements available ID storage. type Set struct { maxID int64 - used, free set.Int64s + used, free set.Ints[int64] } // NewSet returns a new Set. func NewSet() *Set { - return &Set{maxID: -1, used: make(set.Int64s), free: make(set.Int64s)} + return &Set{maxID: -1, used: make(set.Ints[int64]), free: make(set.Ints[int64])} } // NewID returns a new unique ID. The ID returned is not considered used diff --git a/vendor/gonum.org/v1/gonum/graph/simple/dense_directed_matrix.go b/vendor/gonum.org/v1/gonum/graph/simple/dense_directed_matrix.go index dbe5dc82ad7..34ce7b0bcb4 100644 --- a/vendor/gonum.org/v1/gonum/graph/simple/dense_directed_matrix.go +++ b/vendor/gonum.org/v1/gonum/graph/simple/dense_directed_matrix.go @@ -6,8 +6,8 @@ package simple import ( "gonum.org/v1/gonum/graph" - "gonum.org/v1/gonum/graph/internal/ordered" "gonum.org/v1/gonum/graph/iterator" + "gonum.org/v1/gonum/internal/order" "gonum.org/v1/gonum/mat" ) @@ -60,7 +60,7 @@ func NewDirectedMatrix(n int, init, self, absent float64) *DirectedMatrix { // specifies the cost of self connection, and absent specifies the weight // returned for absent edges. func NewDirectedMatrixFrom(nodes []graph.Node, init, self, absent float64) *DirectedMatrix { - ordered.ByID(nodes) + order.ByID(nodes) for i, n := range nodes { if int64(i) != n.ID() { panic("simple: non-contiguous node IDs") diff --git a/vendor/gonum.org/v1/gonum/graph/simple/dense_undirected_matrix.go b/vendor/gonum.org/v1/gonum/graph/simple/dense_undirected_matrix.go index 27fc5a27a57..e97e6afb484 100644 --- a/vendor/gonum.org/v1/gonum/graph/simple/dense_undirected_matrix.go +++ b/vendor/gonum.org/v1/gonum/graph/simple/dense_undirected_matrix.go @@ -6,8 +6,8 @@ package simple import ( "gonum.org/v1/gonum/graph" - "gonum.org/v1/gonum/graph/internal/ordered" "gonum.org/v1/gonum/graph/iterator" + "gonum.org/v1/gonum/internal/order" "gonum.org/v1/gonum/mat" ) @@ -60,7 +60,7 @@ func NewUndirectedMatrix(n int, init, self, absent float64) *UndirectedMatrix { // specifies the cost of self connection, and absent specifies the weight // returned for absent edges. func NewUndirectedMatrixFrom(nodes []graph.Node, init, self, absent float64) *UndirectedMatrix { - ordered.ByID(nodes) + order.ByID(nodes) for i, n := range nodes { if int64(i) != n.ID() { panic("simple: non-contiguous node IDs") diff --git a/vendor/gonum.org/v1/gonum/graph/topo/bron_kerbosch.go b/vendor/gonum.org/v1/gonum/graph/topo/bron_kerbosch.go index 83fdb5bdf8b..e190c260320 100644 --- a/vendor/gonum.org/v1/gonum/graph/topo/bron_kerbosch.go +++ b/vendor/gonum.org/v1/gonum/graph/topo/bron_kerbosch.go @@ -5,8 +5,9 @@ package topo import ( + "slices" + "gonum.org/v1/gonum/graph" - "gonum.org/v1/gonum/graph/internal/ordered" "gonum.org/v1/gonum/graph/internal/set" ) @@ -15,7 +16,7 @@ import ( func DegeneracyOrdering(g graph.Undirected) (order []graph.Node, cores [][]graph.Node) { order, offsets := degeneracyOrdering(g) - ordered.Reverse(order) + slices.Reverse(order) cores = make([][]graph.Node, len(offsets)) offset := len(order) for i, n := range offsets { @@ -145,7 +146,7 @@ func BronKerbosch(g graph.Undirected) [][]graph.Node { x := set.NewNodes() var bk bronKerbosch order, _ := degeneracyOrdering(g) - ordered.Reverse(order) + slices.Reverse(order) for _, v := range order { neighbours := graph.NodesOf(g.From(v.ID())) nv := set.NewNodesSize(len(neighbours)) diff --git a/vendor/gonum.org/v1/gonum/graph/topo/clique_graph.go b/vendor/gonum.org/v1/gonum/graph/topo/clique_graph.go index 202fa953f94..2223ca3b747 100644 --- a/vendor/gonum.org/v1/gonum/graph/topo/clique_graph.go +++ b/vendor/gonum.org/v1/gonum/graph/topo/clique_graph.go @@ -6,8 +6,8 @@ package topo import ( "gonum.org/v1/gonum/graph" - "gonum.org/v1/gonum/graph/internal/ordered" "gonum.org/v1/gonum/graph/internal/set" + "gonum.org/v1/gonum/internal/order" ) // Builder is a pure topological graph construction type. @@ -26,9 +26,9 @@ func CliqueGraph(dst Builder, g graph.Undirected) { // Construct a consistent view of cliques in g. Sorting costs // us a little, but not as much as the cliques themselves. for _, c := range cliques { - ordered.ByID(c) + order.ByID(c) } - ordered.BySliceIDs(cliques) + order.BySliceIDs(cliques) cliqueNodes := make(cliqueNodeSets, len(cliques)) for id, c := range cliques { @@ -59,7 +59,7 @@ func CliqueGraph(dst Builder, g graph.Undirected) { for _, n := range set.IntersectionOfNodes(uc.nodes, vc.nodes) { edgeNodes = append(edgeNodes, n) } - ordered.ByID(edgeNodes) + order.ByID(edgeNodes) } dst.SetEdge(CliqueGraphEdge{from: uc.Clique, to: vc.Clique, nodes: edgeNodes}) diff --git a/vendor/gonum.org/v1/gonum/graph/topo/johnson_cycles.go b/vendor/gonum.org/v1/gonum/graph/topo/johnson_cycles.go index 6c90f60ab53..0d8407b0411 100644 --- a/vendor/gonum.org/v1/gonum/graph/topo/johnson_cycles.go +++ b/vendor/gonum.org/v1/gonum/graph/topo/johnson_cycles.go @@ -6,9 +6,9 @@ package topo import ( "gonum.org/v1/gonum/graph" - "gonum.org/v1/gonum/graph/internal/ordered" "gonum.org/v1/gonum/graph/internal/set" "gonum.org/v1/gonum/graph/iterator" + "gonum.org/v1/gonum/internal/order" ) // johnson implements Johnson's "Finding all the elementary @@ -17,8 +17,8 @@ import ( // Comments in the johnson methods are kept in sync with the comments // and labels from the paper. type johnson struct { - adjacent johnsonGraph // SCC adjacency list. - b []set.Ints // Johnson's "B-list". + adjacent johnsonGraph // SCC adjacency list. + b []set.Ints[int] // Johnson's "B-list". blocked []bool s int @@ -32,7 +32,7 @@ func DirectedCyclesIn(g graph.Directed) [][]graph.Node { jg := johnsonGraphFrom(g) j := johnson{ adjacent: jg, - b: make([]set.Ints, len(jg.orig)), + b: make([]set.Ints[int], len(jg.orig)), blocked: make([]bool, len(jg.orig)), } @@ -57,7 +57,7 @@ func DirectedCyclesIn(g graph.Directed) [][]graph.Node { } if len(j.adjacent.succ[v.ID()]) > 0 { j.blocked[i] = false - j.b[i] = make(set.Ints) + j.b[i] = make(set.Ints[int]) } } //L3: @@ -125,20 +125,20 @@ type johnsonGraph struct { orig []graph.Node index map[int64]int - nodes set.Int64s - succ map[int64]set.Int64s + nodes set.Ints[int64] + succ map[int64]set.Ints[int64] } // johnsonGraphFrom returns a deep copy of the graph g. func johnsonGraphFrom(g graph.Directed) johnsonGraph { nodes := graph.NodesOf(g.Nodes()) - ordered.ByID(nodes) + order.ByID(nodes) c := johnsonGraph{ orig: nodes, index: make(map[int64]int, len(nodes)), - nodes: make(set.Int64s, len(nodes)), - succ: make(map[int64]set.Int64s), + nodes: make(set.Ints[int64], len(nodes)), + succ: make(map[int64]set.Ints[int64]), } for i, u := range nodes { uid := u.ID() @@ -147,7 +147,7 @@ func johnsonGraphFrom(g graph.Directed) johnsonGraph { for to.Next() { v := to.Node() if c.succ[uid] == nil { - c.succ[uid] = make(set.Int64s) + c.succ[uid] = make(set.Ints[int64]) c.nodes.Add(uid) } c.nodes.Add(v.ID()) @@ -207,8 +207,8 @@ func (g johnsonGraph) sccSubGraph(sccs [][]graph.Node, min int) johnsonGraph { sub := johnsonGraph{ orig: g.orig, index: g.index, - nodes: make(set.Int64s), - succ: make(map[int64]set.Int64s), + nodes: make(set.Ints[int64]), + succ: make(map[int64]set.Ints[int64]), } var n int @@ -221,7 +221,7 @@ func (g johnsonGraph) sccSubGraph(sccs [][]graph.Node, min int) johnsonGraph { for _, v := range scc { if _, ok := g.succ[u.ID()][v.ID()]; ok { if sub.succ[u.ID()] == nil { - sub.succ[u.ID()] = make(set.Int64s) + sub.succ[u.ID()] = make(set.Ints[int64]) sub.nodes.Add(u.ID()) } sub.nodes.Add(v.ID()) diff --git a/vendor/gonum.org/v1/gonum/graph/topo/paton_cycles.go b/vendor/gonum.org/v1/gonum/graph/topo/paton_cycles.go index 525f2983f73..2e1ce4cb7ba 100644 --- a/vendor/gonum.org/v1/gonum/graph/topo/paton_cycles.go +++ b/vendor/gonum.org/v1/gonum/graph/topo/paton_cycles.go @@ -17,7 +17,7 @@ func UndirectedCyclesIn(g graph.Undirected) [][]graph.Node { // https://doi.org/10.1145/363219.363232 var cycles [][]graph.Node - done := make(set.Int64s) + done := make(set.Ints[int64]) var tree linear.NodeStack nodes := g.Nodes() for nodes.Next() { @@ -30,7 +30,7 @@ func UndirectedCyclesIn(g graph.Undirected) [][]graph.Node { tree = tree[:0] tree.Push(n) - from := sets{id: set.Int64s{}} + from := sets{id: set.Ints[int64]{}} to := map[int64]graph.Node{id: n} for tree.Len() != 0 { @@ -68,12 +68,12 @@ func UndirectedCyclesIn(g graph.Undirected) [][]graph.Node { return cycles } -type sets map[int64]set.Int64s +type sets map[int64]set.Ints[int64] func (s sets) add(uid, vid int64) { e, ok := s[vid] if !ok { - e = make(set.Int64s) + e = make(set.Ints[int64]) s[vid] = e } e.Add(uid) diff --git a/vendor/gonum.org/v1/gonum/graph/topo/tarjan.go b/vendor/gonum.org/v1/gonum/graph/topo/tarjan.go index ab479a92d7f..31b165199a7 100644 --- a/vendor/gonum.org/v1/gonum/graph/topo/tarjan.go +++ b/vendor/gonum.org/v1/gonum/graph/topo/tarjan.go @@ -6,10 +6,11 @@ package topo import ( "fmt" + "slices" "gonum.org/v1/gonum/graph" - "gonum.org/v1/gonum/graph/internal/ordered" "gonum.org/v1/gonum/graph/internal/set" + "gonum.org/v1/gonum/internal/order" ) // Unorderable is an error containing sets of unorderable graph.Nodes. @@ -29,7 +30,7 @@ func (e Unorderable) Error() string { return fmt.Sprintf("topo: no topological ordering: cyclic components: %v", [][]graph.Node(e)) } -func lexical(nodes []graph.Node) { ordered.ByID(nodes) } +func lexical(nodes []graph.Node) { order.ByID(nodes) } // Sort performs a topological sort of the directed graph g returning the 'from' to 'to' // sort order. If a topological ordering is not possible, an Unorderable error is returned @@ -70,12 +71,10 @@ func sortedFrom(sccs [][]graph.Node, order func([]graph.Node)) ([]graph.Node, er } var err error if sc != nil { - for i, j := 0, len(sc)-1; i < j; i, j = i+1, j-1 { - sc[i], sc[j] = sc[j], sc[i] - } + slices.Reverse(sc) err = sc } - ordered.Reverse(sorted) + slices.Reverse(sorted) return sorted, err } @@ -100,12 +99,12 @@ func tarjanSCCstabilized(g graph.Directed, order func([]graph.Node)) [][]graph.N } } else { order(nodes) - ordered.Reverse(nodes) + slices.Reverse(nodes) succ = func(id int64) []graph.Node { to := graph.NodesOf(g.From(id)) order(to) - ordered.Reverse(to) + slices.Reverse(to) return to } } @@ -115,7 +114,7 @@ func tarjanSCCstabilized(g graph.Directed, order func([]graph.Node)) [][]graph.N indexTable: make(map[int64]int, len(nodes)), lowLink: make(map[int64]int, len(nodes)), - onStack: make(set.Int64s), + onStack: make(set.Ints[int64]), } for _, v := range nodes { if t.indexTable[v.ID()] == 0 { @@ -135,7 +134,7 @@ type tarjan struct { index int indexTable map[int64]int lowLink map[int64]int - onStack set.Int64s + onStack set.Ints[int64] stack []graph.Node @@ -187,10 +186,3 @@ func (t *tarjan) strongconnect(v graph.Node) { t.sccs = append(t.sccs, scc) } } - -func min(a, b int) int { - if a < b { - return a - } - return b -} diff --git a/vendor/gonum.org/v1/gonum/graph/topo/topo.go b/vendor/gonum.org/v1/gonum/graph/topo/topo.go index a76accb3186..344972798ca 100644 --- a/vendor/gonum.org/v1/gonum/graph/topo/topo.go +++ b/vendor/gonum.org/v1/gonum/graph/topo/topo.go @@ -6,8 +6,8 @@ package topo import ( "gonum.org/v1/gonum/graph" - "gonum.org/v1/gonum/graph/internal/ordered" "gonum.org/v1/gonum/graph/traverse" + "gonum.org/v1/gonum/internal/order" ) // IsPathIn returns whether path is a path in g. @@ -80,8 +80,8 @@ func Equal(a, b graph.Graph) bool { aNodeSlice := graph.NodesOf(aNodes) bNodeSlice := graph.NodesOf(bNodes) - ordered.ByID(aNodeSlice) - ordered.ByID(bNodeSlice) + order.ByID(aNodeSlice) + order.ByID(bNodeSlice) for i, aU := range aNodeSlice { id := aU.ID() if id != bNodeSlice[i].ID() { @@ -96,8 +96,8 @@ func Equal(a, b graph.Graph) bool { aAdjacent := graph.NodesOf(toA) bAdjacent := graph.NodesOf(toB) - ordered.ByID(aAdjacent) - ordered.ByID(bAdjacent) + order.ByID(aAdjacent) + order.ByID(bAdjacent) for i, aV := range aAdjacent { id := aV.ID() if id != bAdjacent[i].ID() { diff --git a/vendor/gonum.org/v1/gonum/graph/traverse/traverse.go b/vendor/gonum.org/v1/gonum/graph/traverse/traverse.go index 2a4efb66a60..bc4f9596c38 100644 --- a/vendor/gonum.org/v1/gonum/graph/traverse/traverse.go +++ b/vendor/gonum.org/v1/gonum/graph/traverse/traverse.go @@ -39,7 +39,7 @@ type BreadthFirst struct { Traverse func(graph.Edge) bool queue linear.NodeQueue - visited set.Int64s + visited set.Ints[int64] } // Walk performs a breadth-first traversal of the graph g starting from the given node, @@ -49,7 +49,7 @@ type BreadthFirst struct { // non-nil, it is called with each node the first time it is visited. func (b *BreadthFirst) Walk(g Graph, from graph.Node, until func(n graph.Node, d int) bool) graph.Node { if b.visited == nil { - b.visited = make(set.Int64s) + b.visited = make(set.Ints[int64]) } b.queue.Enqueue(from) if b.Visit != nil && !b.visited.Has(from.ID()) { @@ -147,7 +147,7 @@ type DepthFirst struct { Traverse func(graph.Edge) bool stack linear.NodeStack - visited set.Int64s + visited set.Ints[int64] } // Walk performs a depth-first traversal of the graph g starting from the given node, @@ -157,7 +157,7 @@ type DepthFirst struct { // is called with each node the first time it is visited. func (d *DepthFirst) Walk(g Graph, from graph.Node, until func(graph.Node) bool) graph.Node { if d.visited == nil { - d.visited = make(set.Int64s) + d.visited = make(set.Ints[int64]) } d.stack.Push(from) for d.stack.Len() != 0 { diff --git a/vendor/gonum.org/v1/gonum/internal/order/doc.go b/vendor/gonum.org/v1/gonum/internal/order/doc.go new file mode 100644 index 00000000000..6ed2f38c6b2 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/order/doc.go @@ -0,0 +1,6 @@ +// Copyright ©2024 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package order provides common sorting functions. +package order // import "gonum.org/v1/gonum/internal/order" diff --git a/vendor/gonum.org/v1/gonum/internal/order/order.go b/vendor/gonum.org/v1/gonum/internal/order/order.go new file mode 100644 index 00000000000..7baaec38c78 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/order/order.go @@ -0,0 +1,60 @@ +// Copyright ©2024 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package order + +import ( + "cmp" + "slices" + "sort" + + "gonum.org/v1/gonum/graph" +) + +// ByID sorts a slice of graph.Node by ID. +func ByID[S ~[]E, E graph.Node](n S) { + sort.Slice(n, func(i, j int) bool { return n[i].ID() < n[j].ID() }) +} + +// BySliceValues sorts a slice of []cmp.Ordered lexically by the values of +// the []cmp.Ordered. +func BySliceValues[S ~[]E, E cmp.Ordered](c []S) { + slices.SortFunc(c, func(a, b S) int { + l := min(len(a), len(b)) + for k, v := range a[:l] { + if n := cmp.Compare(v, b[k]); n != 0 { + return n + } + } + return cmp.Compare(len(a), len(b)) + }) +} + +// BySliceIDs sorts a slice of []graph.Node lexically by the IDs of the +// []graph.Node. +func BySliceIDs(c [][]graph.Node) { + slices.SortFunc(c, func(a, b []graph.Node) int { + l := min(len(a), len(b)) + for k, v := range a[:l] { + if n := cmp.Compare(v.ID(), b[k].ID()); n != 0 { + return n + } + } + return cmp.Compare(len(a), len(b)) + }) +} + +// LinesByIDs sort a slice of graph.LinesByIDs lexically by the From IDs, +// then by the To IDs, finally by the Line IDs. +func LinesByIDs(n []graph.Line) { + slices.SortFunc(n, func(a, b graph.Line) int { + if n := cmp.Compare(a.From().ID(), b.From().ID()); n != 0 { + return n + } + if n := cmp.Compare(a.To().ID(), b.To().ID()); n != 0 { + return n + } + return cmp.Compare(a.ID(), b.ID()) + }) +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/lapack.go b/vendor/gonum.org/v1/gonum/lapack/gonum/lapack.go index fef4f5583de..5daefc584dd 100644 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/lapack.go +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/lapack.go @@ -13,20 +13,6 @@ type Implementation struct{} var _ lapack.Float64 = Implementation{} -func min(a, b int) int { - if a < b { - return a - } - return b -} - -func max(a, b int) int { - if a > b { - return a - } - return b -} - func abs(a int) int { if a < 0 { return -a diff --git a/vendor/gonum.org/v1/gonum/lapack/lapack.go b/vendor/gonum.org/v1/gonum/lapack/lapack.go index 60b5d0d363b..60ef1c244a6 100644 --- a/vendor/gonum.org/v1/gonum/lapack/lapack.go +++ b/vendor/gonum.org/v1/gonum/lapack/lapack.go @@ -221,7 +221,7 @@ const ( EVSelected EVHowMany = 'S' // Compute selected right and/or left eigenvectors. ) -// MaximizeNormX specifies the heuristic method for computing a contribution to +// MaximizeNormXJob specifies the heuristic method for computing a contribution to // the reciprocal Dif-estimate in Dlatdf. type MaximizeNormXJob byte diff --git a/vendor/gonum.org/v1/gonum/lapack/lapack64/lapack64.go b/vendor/gonum.org/v1/gonum/lapack/lapack64/lapack64.go index d0afab119cd..1b4c1734a15 100644 --- a/vendor/gonum.org/v1/gonum/lapack/lapack64/lapack64.go +++ b/vendor/gonum.org/v1/gonum/lapack/lapack64/lapack64.go @@ -27,13 +27,6 @@ type Tridiagonal struct { DU []float64 } -func max(a, b int) int { - if a > b { - return a - } - return b -} - // Potrf computes the Cholesky factorization of a. // The factorization has the form // diff --git a/vendor/gonum.org/v1/gonum/mat/lu.go b/vendor/gonum.org/v1/gonum/mat/lu.go index 18ed3dab636..b530ada7e5c 100644 --- a/vendor/gonum.org/v1/gonum/mat/lu.go +++ b/vendor/gonum.org/v1/gonum/mat/lu.go @@ -237,6 +237,8 @@ func (lu *LU) RowPivots(dst []int) []int { return dst } +// Pivot returns the row pivots of the receiver. +// // Deprecated: Use RowPivots instead. func (lu *LU) Pivot(dst []int) []int { return lu.RowPivots(dst) diff --git a/vendor/gonum.org/v1/gonum/mat/matrix.go b/vendor/gonum.org/v1/gonum/mat/matrix.go index 9fc372c71ed..2d67bbe081e 100644 --- a/vendor/gonum.org/v1/gonum/mat/matrix.go +++ b/vendor/gonum.org/v1/gonum/mat/matrix.go @@ -962,20 +962,6 @@ func Trace(a Matrix) float64 { return v } -func min(a, b int) int { - if a < b { - return a - } - return b -} - -func max(a, b int) int { - if a > b { - return a - } - return b -} - // use returns a float64 slice with l elements, using f if it // has the necessary capacity, otherwise creating a new slice. func use(f []float64, l int) []float64 { diff --git a/vendor/gonum.org/v1/gonum/mat/qr.go b/vendor/gonum.org/v1/gonum/mat/qr.go index af99dbcaa15..7f8fec8f6f6 100644 --- a/vendor/gonum.org/v1/gonum/mat/qr.go +++ b/vendor/gonum.org/v1/gonum/mat/qr.go @@ -31,8 +31,13 @@ func (qr *QR) Dims() (r, c int) { return qr.qr.Dims() } -// At returns the element at row i, column j. +// At returns the element at row i, column j. At will panic if the receiver +// does not contain a successful factorization. func (qr *QR) At(i, j int) float64 { + if !qr.isValid() { + panic(badQR) + } + m, n := qr.Dims() if uint(i) >= uint(m) { panic(ErrRowAccess) @@ -41,6 +46,20 @@ func (qr *QR) At(i, j int) float64 { panic(ErrColAccess) } + if qr.q == nil || qr.q.IsEmpty() { + // Calculate Qi, Q i-th row + qi := getFloat64s(m, true) + qr.qRowTo(i, qi) + + // Compute QR(i,j) + var val float64 + for k := 0; k <= j; k++ { + val += qi[k] * qr.qr.at(k, j) + } + putFloat64s(qi) + return val + } + var val float64 for k := 0; k <= j; k++ { val += qr.q.at(i, k) * qr.qr.at(k, j) @@ -48,6 +67,25 @@ func (qr *QR) At(i, j int) float64 { return val } +// qRowTo extracts the i-th row of the orthonormal matrix Q from a QR +// decomposition. +func (qr *QR) qRowTo(i int, dst []float64) { + c := blas64.General{ + Rows: 1, + Cols: len(dst), + Stride: len(dst), + Data: dst, + } + c.Data[i] = 1 // C is the i-th unit vector + + // Construct Qi from the elementary reflectors: Qi = C * (H(1) H(2) ... H(nTau)) + work := []float64{0} + lapack64.Ormqr(blas.Right, blas.NoTrans, qr.qr.mat, qr.tau, c, work, -1) + work = getFloat64s(int(work[0]), false) + lapack64.Ormqr(blas.Right, blas.NoTrans, qr.qr.mat, qr.tau, c, work, len(work)) + putFloat64s(work) +} + // T performs an implicit transpose by returning the receiver inside a // Transpose. func (qr *QR) T() Matrix { @@ -98,7 +136,9 @@ func (qr *QR) factorize(a Matrix, norm lapack.MatrixNorm) { lapack64.Geqrf(qr.qr.mat, qr.tau, work, len(work)) putFloat64s(work) qr.updateCond(norm) - qr.updateQ() + if qr.q != nil { + qr.q.Reset() + } } func (qr *QR) updateQ() { @@ -149,7 +189,7 @@ func (qr *QR) RTo(dst *Dense) { dst.ReuseAs(r, c) } else { r2, c2 := dst.Dims() - if c != r2 || c != c2 { + if r != r2 || c != c2 { panic(ErrShape) } } @@ -192,6 +232,10 @@ func (qr *QR) QTo(dst *Dense) { panic(ErrShape) } } + + if qr.q == nil || qr.q.IsEmpty() { + qr.updateQ() + } dst.Copy(qr.q) } diff --git a/vendor/google.golang.org/api/googleapi/googleapi.go b/vendor/google.golang.org/api/googleapi/googleapi.go index b5e38c66282..c3e8a4f5911 100644 --- a/vendor/google.golang.org/api/googleapi/googleapi.go +++ b/vendor/google.golang.org/api/googleapi/googleapi.go @@ -145,22 +145,40 @@ func CheckResponse(res *http.Response) error { } slurp, err := io.ReadAll(res.Body) if err == nil { - jerr := new(errorReply) - err = json.Unmarshal(slurp, jerr) - if err == nil && jerr.Error != nil { - if jerr.Error.Code == 0 { - jerr.Error.Code = res.StatusCode - } - jerr.Error.Body = string(slurp) - jerr.Error.Header = res.Header - return jerr.Error - } + return CheckResponseWithBody(res, slurp) } return &Error{ Code: res.StatusCode, Body: string(slurp), Header: res.Header, } + +} + +// CheckResponseWithBody returns an error (of type *Error) if the response +// status code is not 2xx. Distinct from CheckResponse to allow for checking +// a previously-read body to maintain error detail content. +func CheckResponseWithBody(res *http.Response, body []byte) error { + if res.StatusCode >= 200 && res.StatusCode <= 299 { + return nil + } + + jerr := new(errorReply) + err := json.Unmarshal(body, jerr) + if err == nil && jerr.Error != nil { + if jerr.Error.Code == 0 { + jerr.Error.Code = res.StatusCode + } + jerr.Error.Body = string(body) + jerr.Error.Header = res.Header + return jerr.Error + } + + return &Error{ + Code: res.StatusCode, + Body: string(body), + Header: res.Header, + } } // IsNotModified reports whether err is the result of the @@ -200,7 +218,17 @@ var WithDataWrapper = MarshalStyle(true) // WithoutDataWrapper marshals JSON without a {"data": ...} wrapper. var WithoutDataWrapper = MarshalStyle(false) +// JSONReader is like JSONBuffer, but returns an io.Reader instead. func (wrap MarshalStyle) JSONReader(v interface{}) (io.Reader, error) { + buf, err := wrap.JSONBuffer(v) + if err != nil { + return nil, err + } + return buf, nil +} + +// JSONBuffer encodes the body and wraps it if needed. +func (wrap MarshalStyle) JSONBuffer(v interface{}) (*bytes.Buffer, error) { buf := new(bytes.Buffer) if wrap { buf.Write([]byte(`{"data": `)) @@ -259,6 +287,20 @@ func ChunkSize(size int) MediaOption { return chunkSizeOption(size) } +type chunkTransferTimeoutOption time.Duration + +func (cd chunkTransferTimeoutOption) setOptions(o *MediaOptions) { + o.ChunkTransferTimeout = time.Duration(cd) +} + +// ChunkTransferTimeout returns a MediaOption which sets a per-chunk +// transfer timeout for resumable uploads. If a single chunk has been +// attempting to upload for longer than this time then the old req got canceled and retried. +// The default is no timeout for the request. +func ChunkTransferTimeout(timeout time.Duration) MediaOption { + return chunkTransferTimeoutOption(timeout) +} + type chunkRetryDeadlineOption time.Duration func (cd chunkRetryDeadlineOption) setOptions(o *MediaOptions) { @@ -283,6 +325,7 @@ type MediaOptions struct { ForceEmptyContentType bool ChunkSize int ChunkRetryDeadline time.Duration + ChunkTransferTimeout time.Duration } // ProcessMediaOptions stores options from opts in a MediaOptions. diff --git a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-api.json b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-api.json index 633d334df40..b2633247179 100644 --- a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-api.json +++ b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-api.json @@ -165,6 +165,28 @@ "https://www.googleapis.com/auth/cloud-platform" ] }, + "getAllowedLocations": { + "description": "Returns the trust boundary info for a given service account.", + "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}/allowedLocations", + "httpMethod": "GET", + "id": "iamcredentials.projects.serviceAccounts.getAllowedLocations", + "parameterOrder": [ + "name" + ], + "parameters": { + "name": { + "description": "Required. Resource name of service account.", + "location": "path", + "pattern": "^projects/[^/]+/serviceAccounts/[^/]+$", + "required": true, + "type": "string" + } + }, + "path": "v1/{+name}/allowedLocations", + "response": { + "$ref": "ServiceAccountAllowedLocations" + } + }, "signBlob": { "description": "Signs a blob using a service account's system-managed private key.", "flatPath": "v1/projects/{projectsId}/serviceAccounts/{serviceAccountsId}:signBlob", @@ -226,7 +248,7 @@ } } }, - "revision": "20240227", + "revision": "20241024", "rootUrl": "https://iamcredentials.googleapis.com/", "schemas": { "GenerateAccessTokenRequest": { @@ -300,6 +322,26 @@ }, "type": "object" }, + "ServiceAccountAllowedLocations": { + "description": "Represents a list of allowed locations for given service account.", + "id": "ServiceAccountAllowedLocations", + "properties": { + "encodedLocations": { + "description": "Output only. The hex encoded bitmap of the trust boundary locations", + "readOnly": true, + "type": "string" + }, + "locations": { + "description": "Output only. The human readable trust boundary locations. For example, [\"us-central1\", \"europe-west1\"]", + "items": { + "type": "string" + }, + "readOnly": true, + "type": "array" + } + }, + "type": "object" + }, "SignBlobRequest": { "id": "SignBlobRequest", "properties": { diff --git a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go index f0c69484583..559cab1385b 100644 --- a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go +++ b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC. +// Copyright 2025 Google LLC. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -57,11 +57,13 @@ import ( "errors" "fmt" "io" + "log/slog" "net/http" "net/url" "strconv" "strings" + "github.com/googleapis/gax-go/v2/internallog" googleapi "google.golang.org/api/googleapi" internal "google.golang.org/api/internal" gensupport "google.golang.org/api/internal/gensupport" @@ -85,6 +87,7 @@ var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint var _ = internal.Version +var _ = internallog.New const apiId = "iamcredentials:v1" const apiName = "iamcredentials" @@ -115,7 +118,8 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err if err != nil { return nil, err } - s, err := New(client) + s := &Service{client: client, BasePath: basePath, logger: internaloption.GetLogger(opts)} + s.Projects = NewProjectsService(s) if err != nil { return nil, err } @@ -134,13 +138,12 @@ func New(client *http.Client) (*Service, error) { if client == nil { return nil, errors.New("client is nil") } - s := &Service{client: client, BasePath: basePath} - s.Projects = NewProjectsService(s) - return s, nil + return NewService(context.Background(), option.WithHTTPClient(client)) } type Service struct { client *http.Client + logger *slog.Logger BasePath string // API endpoint base URL UserAgent string // optional additional User-Agent fragment @@ -306,6 +309,36 @@ func (s GenerateIdTokenResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } +// ServiceAccountAllowedLocations: Represents a list of allowed locations for +// given service account. +type ServiceAccountAllowedLocations struct { + // EncodedLocations: Output only. The hex encoded bitmap of the trust boundary + // locations + EncodedLocations string `json:"encodedLocations,omitempty"` + // Locations: Output only. The human readable trust boundary locations. For + // example, ["us-central1", "europe-west1"] + Locations []string `json:"locations,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the server. + googleapi.ServerResponse `json:"-"` + // ForceSendFields is a list of field names (e.g. "EncodedLocations") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "EncodedLocations") to include in + // API requests with the JSON null value. By default, fields with empty values + // are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s ServiceAccountAllowedLocations) MarshalJSON() ([]byte, error) { + type NoMethod ServiceAccountAllowedLocations + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + type SignBlobRequest struct { // Delegates: The sequence of service accounts in a delegation chain. Each // service account must be granted the `roles/iam.serviceAccountTokenCreator` @@ -494,8 +527,7 @@ func (c *ProjectsServiceAccountsGenerateAccessTokenCall) Header() http.Header { func (c *ProjectsServiceAccountsGenerateAccessTokenCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.generateaccesstokenrequest) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.generateaccesstokenrequest) if err != nil { return nil, err } @@ -511,6 +543,7 @@ func (c *ProjectsServiceAccountsGenerateAccessTokenCall) doRequest(alt string) ( googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "iamcredentials.projects.serviceAccounts.generateAccessToken", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -546,9 +579,11 @@ func (c *ProjectsServiceAccountsGenerateAccessTokenCall) Do(opts ...googleapi.Ca }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "iamcredentials.projects.serviceAccounts.generateAccessToken", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -599,8 +634,7 @@ func (c *ProjectsServiceAccountsGenerateIdTokenCall) Header() http.Header { func (c *ProjectsServiceAccountsGenerateIdTokenCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.generateidtokenrequest) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.generateidtokenrequest) if err != nil { return nil, err } @@ -616,6 +650,7 @@ func (c *ProjectsServiceAccountsGenerateIdTokenCall) doRequest(alt string) (*htt googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "iamcredentials.projects.serviceAccounts.generateIdToken", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -651,9 +686,122 @@ func (c *ProjectsServiceAccountsGenerateIdTokenCall) Do(opts ...googleapi.CallOp }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "iamcredentials.projects.serviceAccounts.generateIdToken", "response", internallog.HTTPResponse(res, b)) + return ret, nil +} + +type ProjectsServiceAccountsGetAllowedLocationsCall struct { + s *Service + name string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetAllowedLocations: Returns the trust boundary info for a given service +// account. +// +// - name: Resource name of service account. +func (r *ProjectsServiceAccountsService) GetAllowedLocations(name string) *ProjectsServiceAccountsGetAllowedLocationsCall { + c := &ProjectsServiceAccountsGetAllowedLocationsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.name = name + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ProjectsServiceAccountsGetAllowedLocationsCall) Fields(s ...googleapi.Field) *ProjectsServiceAccountsGetAllowedLocationsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets an optional parameter which makes the operation fail if the +// object's ETag matches the given value. This is useful for getting updates +// only after the object has changed since the last request. +func (c *ProjectsServiceAccountsGetAllowedLocationsCall) IfNoneMatch(entityTag string) *ProjectsServiceAccountsGetAllowedLocationsCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ProjectsServiceAccountsGetAllowedLocationsCall) Context(ctx context.Context) *ProjectsServiceAccountsGetAllowedLocationsCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ProjectsServiceAccountsGetAllowedLocationsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsServiceAccountsGetAllowedLocationsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}/allowedLocations") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, nil) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "name": c.name, + }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "iamcredentials.projects.serviceAccounts.getAllowedLocations", "request", internallog.HTTPRequest(req, nil)) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "iamcredentials.projects.serviceAccounts.getAllowedLocations" call. +// Any non-2xx status code is an error. Response headers are in either +// *ServiceAccountAllowedLocations.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *ProjectsServiceAccountsGetAllowedLocationsCall) Do(opts ...googleapi.CallOption) (*ServiceAccountAllowedLocations, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ServiceAccountAllowedLocations{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { + return nil, err + } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "iamcredentials.projects.serviceAccounts.getAllowedLocations", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -704,8 +852,7 @@ func (c *ProjectsServiceAccountsSignBlobCall) Header() http.Header { func (c *ProjectsServiceAccountsSignBlobCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.signblobrequest) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.signblobrequest) if err != nil { return nil, err } @@ -721,6 +868,7 @@ func (c *ProjectsServiceAccountsSignBlobCall) doRequest(alt string) (*http.Respo googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "iamcredentials.projects.serviceAccounts.signBlob", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -756,9 +904,11 @@ func (c *ProjectsServiceAccountsSignBlobCall) Do(opts ...googleapi.CallOption) ( }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "iamcredentials.projects.serviceAccounts.signBlob", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -809,8 +959,7 @@ func (c *ProjectsServiceAccountsSignJwtCall) Header() http.Header { func (c *ProjectsServiceAccountsSignJwtCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.signjwtrequest) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.signjwtrequest) if err != nil { return nil, err } @@ -826,6 +975,7 @@ func (c *ProjectsServiceAccountsSignJwtCall) doRequest(alt string) (*http.Respon googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "iamcredentials.projects.serviceAccounts.signJwt", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -861,8 +1011,10 @@ func (c *ProjectsServiceAccountsSignJwtCall) Do(opts ...googleapi.CallOption) (* }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "iamcredentials.projects.serviceAccounts.signJwt", "response", internallog.HTTPResponse(res, b)) return ret, nil } diff --git a/vendor/google.golang.org/api/idtoken/cache.go b/vendor/google.golang.org/api/idtoken/cache.go deleted file mode 100644 index ed57d55f650..00000000000 --- a/vendor/google.golang.org/api/idtoken/cache.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2020 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package idtoken - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "strconv" - "strings" - "sync" - "time" -) - -type cachingClient struct { - client *http.Client - - // clock optionally specifies a func to return the current time. - // If nil, time.Now is used. - clock func() time.Time - - mu sync.Mutex - certs map[string]*cachedResponse -} - -func newCachingClient(client *http.Client) *cachingClient { - return &cachingClient{ - client: client, - certs: make(map[string]*cachedResponse, 2), - } -} - -type cachedResponse struct { - resp *certResponse - exp time.Time -} - -func (c *cachingClient) getCert(ctx context.Context, url string) (*certResponse, error) { - if response, ok := c.get(url); ok { - return response, nil - } - req, err := http.NewRequest(http.MethodGet, url, nil) - if err != nil { - return nil, err - } - req = req.WithContext(ctx) - resp, err := c.client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("idtoken: unable to retrieve cert, got status code %d", resp.StatusCode) - } - - certResp := &certResponse{} - if err := json.NewDecoder(resp.Body).Decode(certResp); err != nil { - return nil, err - - } - c.set(url, certResp, resp.Header) - return certResp, nil -} - -func (c *cachingClient) now() time.Time { - if c.clock != nil { - return c.clock() - } - return time.Now() -} - -func (c *cachingClient) get(url string) (*certResponse, bool) { - c.mu.Lock() - defer c.mu.Unlock() - cachedResp, ok := c.certs[url] - if !ok { - return nil, false - } - if c.now().After(cachedResp.exp) { - return nil, false - } - return cachedResp.resp, true -} - -func (c *cachingClient) set(url string, resp *certResponse, headers http.Header) { - exp := c.calculateExpireTime(headers) - c.mu.Lock() - c.certs[url] = &cachedResponse{resp: resp, exp: exp} - c.mu.Unlock() -} - -// calculateExpireTime will determine the expire time for the cache based on -// HTTP headers. If there is any difficulty reading the headers the fallback is -// to set the cache to expire now. -func (c *cachingClient) calculateExpireTime(headers http.Header) time.Time { - var maxAge int - cc := strings.Split(headers.Get("cache-control"), ",") - for _, v := range cc { - if strings.Contains(v, "max-age") { - ss := strings.Split(v, "=") - if len(ss) < 2 { - return c.now() - } - ma, err := strconv.Atoi(ss[1]) - if err != nil { - return c.now() - } - maxAge = ma - } - } - a := headers.Get("age") - if a == "" { - return c.now().Add(time.Duration(maxAge) * time.Second) - } - age, err := strconv.Atoi(a) - if err != nil { - return c.now() - } - return c.now().Add(time.Duration(maxAge-age) * time.Second) -} diff --git a/vendor/google.golang.org/api/idtoken/compute.go b/vendor/google.golang.org/api/idtoken/compute.go deleted file mode 100644 index 222396cb7c0..00000000000 --- a/vendor/google.golang.org/api/idtoken/compute.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2020 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package idtoken - -import ( - "fmt" - "net/url" - "time" - - "cloud.google.com/go/compute/metadata" - "golang.org/x/oauth2" - - "google.golang.org/api/internal" -) - -// computeTokenSource checks if this code is being run on GCE. If it is, it will -// use the metadata service to build a TokenSource that fetches ID tokens. -func computeTokenSource(audience string, ds *internal.DialSettings) (oauth2.TokenSource, error) { - if ds.CustomClaims != nil { - return nil, fmt.Errorf("idtoken: WithCustomClaims can't be used with the metadata service, please provide a service account if you would like to use this feature") - } - ts := computeIDTokenSource{ - audience: audience, - } - tok, err := ts.Token() - if err != nil { - return nil, err - } - return oauth2.ReuseTokenSource(tok, ts), nil -} - -type computeIDTokenSource struct { - audience string -} - -func (c computeIDTokenSource) Token() (*oauth2.Token, error) { - v := url.Values{} - v.Set("audience", c.audience) - v.Set("format", "full") - urlSuffix := "instance/service-accounts/default/identity?" + v.Encode() - res, err := metadata.Get(urlSuffix) - if err != nil { - return nil, err - } - if res == "" { - return nil, fmt.Errorf("idtoken: invalid response from metadata service") - } - return &oauth2.Token{ - AccessToken: res, - TokenType: "bearer", - // Compute tokens are valid for one hour, leave a little buffer - Expiry: time.Now().Add(55 * time.Minute), - }, nil -} diff --git a/vendor/google.golang.org/api/idtoken/doc.go b/vendor/google.golang.org/api/idtoken/doc.go deleted file mode 100644 index 1b1effdd69a..00000000000 --- a/vendor/google.golang.org/api/idtoken/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright 2020 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package idtoken provides utilities for creating authenticated transports with -// ID Tokens for Google HTTP APIs. It also provides methods to validate Google -// issued ID tokens. -package idtoken diff --git a/vendor/google.golang.org/api/idtoken/idtoken.go b/vendor/google.golang.org/api/idtoken/idtoken.go deleted file mode 100644 index 477164f904b..00000000000 --- a/vendor/google.golang.org/api/idtoken/idtoken.go +++ /dev/null @@ -1,252 +0,0 @@ -// Copyright 2020 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package idtoken - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "path/filepath" - "strings" - - "cloud.google.com/go/compute/metadata" - "golang.org/x/oauth2" - "golang.org/x/oauth2/google" - - newidtoken "cloud.google.com/go/auth/credentials/idtoken" - "cloud.google.com/go/auth/oauth2adapt" - "google.golang.org/api/impersonate" - "google.golang.org/api/internal" - "google.golang.org/api/option" - "google.golang.org/api/option/internaloption" - htransport "google.golang.org/api/transport/http" -) - -// ClientOption is aliased so relevant options are easily found in the docs. - -// ClientOption is for configuring a Google API client or transport. -type ClientOption = option.ClientOption - -type credentialsType int - -const ( - unknownCredType credentialsType = iota - serviceAccount - impersonatedServiceAccount - externalAccount -) - -// NewClient creates a HTTP Client that automatically adds an ID token to each -// request via an Authorization header. The token will have the audience -// provided and be configured with the supplied options. The parameter audience -// may not be empty. -func NewClient(ctx context.Context, audience string, opts ...ClientOption) (*http.Client, error) { - var ds internal.DialSettings - for _, opt := range opts { - opt.Apply(&ds) - } - if err := ds.Validate(); err != nil { - return nil, err - } - if ds.NoAuth { - return nil, fmt.Errorf("idtoken: option.WithoutAuthentication not supported") - } - if ds.APIKey != "" { - return nil, fmt.Errorf("idtoken: option.WithAPIKey not supported") - } - if ds.TokenSource != nil { - return nil, fmt.Errorf("idtoken: option.WithTokenSource not supported") - } - - ts, err := NewTokenSource(ctx, audience, opts...) - if err != nil { - return nil, err - } - // Skip DialSettings validation so added TokenSource will not conflict with user - // provided credentials. - opts = append(opts, option.WithTokenSource(ts), internaloption.SkipDialSettingsValidation()) - httpTransport := http.DefaultTransport.(*http.Transport).Clone() - httpTransport.MaxIdleConnsPerHost = 100 - t, err := htransport.NewTransport(ctx, httpTransport, opts...) - if err != nil { - return nil, err - } - return &http.Client{Transport: t}, nil -} - -// NewTokenSource creates a TokenSource that returns ID tokens with the audience -// provided and configured with the supplied options. The parameter audience may -// not be empty. -func NewTokenSource(ctx context.Context, audience string, opts ...ClientOption) (oauth2.TokenSource, error) { - if audience == "" { - return nil, fmt.Errorf("idtoken: must supply a non-empty audience") - } - var ds internal.DialSettings - for _, opt := range opts { - opt.Apply(&ds) - } - if err := ds.Validate(); err != nil { - return nil, err - } - if ds.TokenSource != nil { - return nil, fmt.Errorf("idtoken: option.WithTokenSource not supported") - } - if ds.ImpersonationConfig != nil { - return nil, fmt.Errorf("idtoken: option.WithImpersonatedCredentials not supported") - } - if ds.IsNewAuthLibraryEnabled() { - return newTokenSourceNewAuth(ctx, audience, &ds) - } - return newTokenSource(ctx, audience, &ds) -} - -func newTokenSourceNewAuth(ctx context.Context, audience string, ds *internal.DialSettings) (oauth2.TokenSource, error) { - if ds.AuthCredentials != nil { - return nil, fmt.Errorf("idtoken: option.WithTokenProvider not supported") - } - creds, err := newidtoken.NewCredentials(&newidtoken.Options{ - Audience: audience, - CustomClaims: ds.CustomClaims, - CredentialsFile: ds.CredentialsFile, - CredentialsJSON: ds.CredentialsJSON, - Client: oauth2.NewClient(ctx, nil), - }) - if err != nil { - return nil, err - } - return oauth2adapt.TokenSourceFromTokenProvider(creds), nil -} - -func newTokenSource(ctx context.Context, audience string, ds *internal.DialSettings) (oauth2.TokenSource, error) { - creds, err := internal.Creds(ctx, ds) - if err != nil { - return nil, err - } - if len(creds.JSON) > 0 { - return tokenSourceFromBytes(ctx, creds.JSON, audience, ds) - } - // If internal.Creds did not return a response with JSON fallback to the - // metadata service as the creds.TokenSource is not an ID token. - if metadata.OnGCE() { - return computeTokenSource(audience, ds) - } - return nil, fmt.Errorf("idtoken: couldn't find any credentials") -} - -func tokenSourceFromBytes(ctx context.Context, data []byte, audience string, ds *internal.DialSettings) (oauth2.TokenSource, error) { - allowedType, err := getAllowedType(data) - if err != nil { - return nil, err - } - switch allowedType { - case serviceAccount: - cfg, err := google.JWTConfigFromJSON(data, ds.GetScopes()...) - if err != nil { - return nil, err - } - customClaims := ds.CustomClaims - if customClaims == nil { - customClaims = make(map[string]interface{}) - } - customClaims["target_audience"] = audience - - cfg.PrivateClaims = customClaims - cfg.UseIDToken = true - - ts := cfg.TokenSource(ctx) - tok, err := ts.Token() - if err != nil { - return nil, err - } - return oauth2.ReuseTokenSource(tok, ts), nil - case impersonatedServiceAccount, externalAccount: - type url struct { - ServiceAccountImpersonationURL string `json:"service_account_impersonation_url"` - } - var accountURL *url - if err := json.Unmarshal(data, &accountURL); err != nil { - return nil, err - } - account := filepath.Base(accountURL.ServiceAccountImpersonationURL) - account = strings.Split(account, ":")[0] - - config := impersonate.IDTokenConfig{ - Audience: audience, - TargetPrincipal: account, - IncludeEmail: true, - } - ts, err := impersonate.IDTokenSource(ctx, config, option.WithCredentialsJSON(data)) - if err != nil { - return nil, err - } - return ts, nil - default: - return nil, fmt.Errorf("idtoken: unsupported credentials type") - } -} - -// getAllowedType returns the credentials type of type credentialsType, and an error. -// allowed types are "service_account" and "impersonated_service_account" -func getAllowedType(data []byte) (credentialsType, error) { - var t credentialsType - if len(data) == 0 { - return t, fmt.Errorf("idtoken: credential provided is 0 bytes") - } - var f struct { - Type string `json:"type"` - } - if err := json.Unmarshal(data, &f); err != nil { - return t, err - } - t = parseCredType(f.Type) - return t, nil -} - -func parseCredType(typeString string) credentialsType { - switch typeString { - case "service_account": - return serviceAccount - case "impersonated_service_account": - return impersonatedServiceAccount - case "external_account": - return externalAccount - default: - return unknownCredType - } -} - -// WithCustomClaims optionally specifies custom private claims for an ID token. -func WithCustomClaims(customClaims map[string]interface{}) ClientOption { - return withCustomClaims(customClaims) -} - -type withCustomClaims map[string]interface{} - -func (w withCustomClaims) Apply(o *internal.DialSettings) { - o.CustomClaims = w -} - -// WithCredentialsFile returns a ClientOption that authenticates -// API calls with the given service account or refresh token JSON -// credentials file. -func WithCredentialsFile(filename string) ClientOption { - return option.WithCredentialsFile(filename) -} - -// WithCredentialsJSON returns a ClientOption that authenticates -// API calls with the given service account or refresh token JSON -// credentials. -func WithCredentialsJSON(p []byte) ClientOption { - return option.WithCredentialsJSON(p) -} - -// WithHTTPClient returns a ClientOption that specifies the HTTP client to use -// as the basis of communications. This option may only be used with services -// that support HTTP as their communication transport. When used, the -// WithHTTPClient option takes precedent over all other supplied options. -func WithHTTPClient(client *http.Client) ClientOption { - return option.WithHTTPClient(client) -} diff --git a/vendor/google.golang.org/api/idtoken/validate.go b/vendor/google.golang.org/api/idtoken/validate.go deleted file mode 100644 index 47d314f50e5..00000000000 --- a/vendor/google.golang.org/api/idtoken/validate.go +++ /dev/null @@ -1,334 +0,0 @@ -// Copyright 2020 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package idtoken - -import ( - "context" - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rsa" - "crypto/sha256" - "encoding/base64" - "encoding/json" - "fmt" - "math/big" - "net/http" - "strings" - "time" - - "google.golang.org/api/option" - "google.golang.org/api/option/internaloption" - htransport "google.golang.org/api/transport/http" -) - -const ( - es256KeySize int = 32 - googleIAPCertsURL string = "https://www.gstatic.com/iap/verify/public_key-jwk" - googleSACertsURL string = "https://www.googleapis.com/oauth2/v3/certs" -) - -var ( - defaultValidator = &Validator{client: newCachingClient(http.DefaultClient)} - // now aliases time.Now for testing. - now = time.Now -) - -func defaultValidatorOpts() []ClientOption { - return []ClientOption{ - internaloption.WithDefaultScopes("https://www.googleapis.com/auth/cloud-platform"), - option.WithoutAuthentication(), - } -} - -// Payload represents a decoded payload of an ID Token. -type Payload struct { - Issuer string `json:"iss"` - Audience string `json:"aud"` - Expires int64 `json:"exp"` - IssuedAt int64 `json:"iat"` - Subject string `json:"sub,omitempty"` - Claims map[string]interface{} `json:"-"` -} - -// jwt represents the segments of a jwt and exposes convenience methods for -// working with the different segments. -type jwt struct { - header string - payload string - signature string -} - -// jwtHeader represents a parted jwt's header segment. -type jwtHeader struct { - Algorithm string `json:"alg"` - Type string `json:"typ"` - KeyID string `json:"kid"` -} - -// certResponse represents a list jwks. It is the format returned from known -// Google cert endpoints. -type certResponse struct { - Keys []jwk `json:"keys"` -} - -// jwk is a simplified representation of a standard jwk. It only includes the -// fields used by Google's cert endpoints. -type jwk struct { - Alg string `json:"alg"` - Crv string `json:"crv"` - Kid string `json:"kid"` - Kty string `json:"kty"` - Use string `json:"use"` - E string `json:"e"` - N string `json:"n"` - X string `json:"x"` - Y string `json:"y"` -} - -// Validator provides a way to validate Google ID Tokens with a user provided -// http.Client. -type Validator struct { - client *cachingClient -} - -// NewValidator creates a Validator that uses the options provided to configure -// a the internal http.Client that will be used to make requests to fetch JWKs. -func NewValidator(ctx context.Context, opts ...ClientOption) (*Validator, error) { - opts = append(defaultValidatorOpts(), opts...) - client, _, err := htransport.NewClient(ctx, opts...) - if err != nil { - return nil, err - } - return &Validator{client: newCachingClient(client)}, nil -} - -// Validate is used to validate the provided idToken with a known Google cert -// URL. If audience is not empty the audience claim of the Token is validated. -// Upon successful validation a parsed token Payload is returned allowing the -// caller to validate any additional claims. -func (v *Validator) Validate(ctx context.Context, idToken string, audience string) (*Payload, error) { - return v.validate(ctx, idToken, audience) -} - -// Validate is used to validate the provided idToken with a known Google cert -// URL. If audience is not empty the audience claim of the Token is validated. -// Upon successful validation a parsed token Payload is returned allowing the -// caller to validate any additional claims. -func Validate(ctx context.Context, idToken string, audience string) (*Payload, error) { - // TODO(codyoss): consider adding a check revoked version of the api. See: https://pkg.go.dev/firebase.google.com/go/auth?tab=doc#Client.VerifyIDTokenAndCheckRevoked - return defaultValidator.validate(ctx, idToken, audience) -} - -// ParsePayload parses the given token and returns its payload. -// -// Warning: This function does not validate the token prior to parsing it. -// -// ParsePayload is primarily meant to be used to inspect a token's payload. This is -// useful when validation fails and the payload needs to be inspected. -// -// Note: A successful Validate() invocation with the same token will return an -// identical payload. -func ParsePayload(idToken string) (*Payload, error) { - jwt, err := parseJWT(idToken) - if err != nil { - return nil, err - } - return jwt.parsedPayload() -} - -func (v *Validator) validate(ctx context.Context, idToken string, audience string) (*Payload, error) { - jwt, err := parseJWT(idToken) - if err != nil { - return nil, err - } - header, err := jwt.parsedHeader() - if err != nil { - return nil, err - } - payload, err := jwt.parsedPayload() - if err != nil { - return nil, err - } - sig, err := jwt.decodedSignature() - if err != nil { - return nil, err - } - - if audience != "" && payload.Audience != audience { - return nil, fmt.Errorf("idtoken: audience provided does not match aud claim in the JWT") - } - - if now().Unix() > payload.Expires { - return nil, fmt.Errorf("idtoken: token expired: now=%v, expires=%v", now().Unix(), payload.Expires) - } - - switch header.Algorithm { - case "RS256": - if err := v.validateRS256(ctx, header.KeyID, jwt.hashedContent(), sig); err != nil { - return nil, err - } - case "ES256": - if err := v.validateES256(ctx, header.KeyID, jwt.hashedContent(), sig); err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("idtoken: expected JWT signed with RS256 or ES256 but found %q", header.Algorithm) - } - - return payload, nil -} - -func (v *Validator) validateRS256(ctx context.Context, keyID string, hashedContent []byte, sig []byte) error { - certResp, err := v.client.getCert(ctx, googleSACertsURL) - if err != nil { - return err - } - j, err := findMatchingKey(certResp, keyID) - if err != nil { - return err - } - dn, err := decode(j.N) - if err != nil { - return err - } - de, err := decode(j.E) - if err != nil { - return err - } - - pk := &rsa.PublicKey{ - N: new(big.Int).SetBytes(dn), - E: int(new(big.Int).SetBytes(de).Int64()), - } - return rsa.VerifyPKCS1v15(pk, crypto.SHA256, hashedContent, sig) -} - -func (v *Validator) validateES256(ctx context.Context, keyID string, hashedContent []byte, sig []byte) error { - certResp, err := v.client.getCert(ctx, googleIAPCertsURL) - if err != nil { - return err - } - j, err := findMatchingKey(certResp, keyID) - if err != nil { - return err - } - dx, err := decode(j.X) - if err != nil { - return err - } - dy, err := decode(j.Y) - if err != nil { - return err - } - - pk := &ecdsa.PublicKey{ - Curve: elliptic.P256(), - X: new(big.Int).SetBytes(dx), - Y: new(big.Int).SetBytes(dy), - } - r := big.NewInt(0).SetBytes(sig[:es256KeySize]) - s := big.NewInt(0).SetBytes(sig[es256KeySize:]) - if valid := ecdsa.Verify(pk, hashedContent, r, s); !valid { - return fmt.Errorf("idtoken: ES256 signature not valid") - } - return nil -} - -func findMatchingKey(response *certResponse, keyID string) (*jwk, error) { - if response == nil { - return nil, fmt.Errorf("idtoken: cert response is nil") - } - for _, v := range response.Keys { - if v.Kid == keyID { - return &v, nil - } - } - return nil, fmt.Errorf("idtoken: could not find matching cert keyId for the token provided") -} - -func parseJWT(idToken string) (*jwt, error) { - segments := strings.Split(idToken, ".") - if len(segments) != 3 { - return nil, fmt.Errorf("idtoken: invalid token, token must have three segments; found %d", len(segments)) - } - return &jwt{ - header: segments[0], - payload: segments[1], - signature: segments[2], - }, nil -} - -// decodedHeader base64 decodes the header segment. -func (j *jwt) decodedHeader() ([]byte, error) { - dh, err := decode(j.header) - if err != nil { - return nil, fmt.Errorf("idtoken: unable to decode JWT header: %v", err) - } - return dh, nil -} - -// decodedPayload base64 payload the header segment. -func (j *jwt) decodedPayload() ([]byte, error) { - p, err := decode(j.payload) - if err != nil { - return nil, fmt.Errorf("idtoken: unable to decode JWT payload: %v", err) - } - return p, nil -} - -// decodedPayload base64 payload the header segment. -func (j *jwt) decodedSignature() ([]byte, error) { - p, err := decode(j.signature) - if err != nil { - return nil, fmt.Errorf("idtoken: unable to decode JWT signature: %v", err) - } - return p, nil -} - -// parsedHeader returns a struct representing a JWT header. -func (j *jwt) parsedHeader() (jwtHeader, error) { - var h jwtHeader - dh, err := j.decodedHeader() - if err != nil { - return h, err - } - err = json.Unmarshal(dh, &h) - if err != nil { - return h, fmt.Errorf("idtoken: unable to unmarshal JWT header: %v", err) - } - return h, nil -} - -// parsedPayload returns a struct representing a JWT payload. -func (j *jwt) parsedPayload() (*Payload, error) { - var p Payload - dp, err := j.decodedPayload() - if err != nil { - return nil, err - } - if err := json.Unmarshal(dp, &p); err != nil { - return nil, fmt.Errorf("idtoken: unable to unmarshal JWT payload: %v", err) - } - if err := json.Unmarshal(dp, &p.Claims); err != nil { - return nil, fmt.Errorf("idtoken: unable to unmarshal JWT payload claims: %v", err) - } - return &p, nil -} - -// hashedContent gets the SHA256 checksum for verification of the JWT. -func (j *jwt) hashedContent() []byte { - signedContent := j.header + "." + j.payload - hashed := sha256.Sum256([]byte(signedContent)) - return hashed[:] -} - -func (j *jwt) String() string { - return fmt.Sprintf("%s.%s.%s", j.header, j.payload, j.signature) -} - -func decode(s string) ([]byte, error) { - return base64.RawURLEncoding.DecodeString(s) -} diff --git a/vendor/google.golang.org/api/impersonate/doc.go b/vendor/google.golang.org/api/impersonate/doc.go deleted file mode 100644 index 155ef70e15a..00000000000 --- a/vendor/google.golang.org/api/impersonate/doc.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package impersonate is used to impersonate Google Credentials. -// -// # Required IAM roles -// -// In order to impersonate a service account the base service account must have -// the Service Account Token Creator role, roles/iam.serviceAccountTokenCreator, -// on the service account being impersonated. See -// https://cloud.google.com/iam/docs/understanding-service-accounts. -// -// Optionally, delegates can be used during impersonation if the base service -// account lacks the token creator role on the target. When using delegates, -// each service account must be granted roles/iam.serviceAccountTokenCreator -// on the next service account in the delgation chain. -// -// For example, if a base service account of SA1 is trying to impersonate target -// service account SA2 while using delegate service accounts DSA1 and DSA2, -// the following must be true: -// -// 1. Base service account SA1 has roles/iam.serviceAccountTokenCreator on -// DSA1. -// 2. DSA1 has roles/iam.serviceAccountTokenCreator on DSA2. -// 3. DSA2 has roles/iam.serviceAccountTokenCreator on target SA2. -// -// If the base credential is an authorized user and not a service account, or if -// the option WithQuotaProject is set, the target service account must have a -// role that grants the serviceusage.services.use permission such as -// roles/serviceusage.serviceUsageConsumer. -package impersonate diff --git a/vendor/google.golang.org/api/impersonate/idtoken.go b/vendor/google.golang.org/api/impersonate/idtoken.go deleted file mode 100644 index 9000b8e0f61..00000000000 --- a/vendor/google.golang.org/api/impersonate/idtoken.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package impersonate - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "time" - - "golang.org/x/oauth2" - "google.golang.org/api/option" - htransport "google.golang.org/api/transport/http" -) - -// IDTokenConfig for generating an impersonated ID token. -type IDTokenConfig struct { - // Audience is the `aud` field for the token, such as an API endpoint the - // token will grant access to. Required. - Audience string - // TargetPrincipal is the email address of the service account to - // impersonate. Required. - TargetPrincipal string - // IncludeEmail includes the service account's email in the token. The - // resulting token will include both an `email` and `email_verified` - // claim. - IncludeEmail bool - // Delegates are the service account email addresses in a delegation chain. - // Each service account must be granted roles/iam.serviceAccountTokenCreator - // on the next service account in the chain. Optional. - Delegates []string -} - -// IDTokenSource creates an impersonated TokenSource that returns ID tokens -// configured with the provided config and using credentials loaded from -// Application Default Credentials as the base credentials. The tokens provided -// by the source are valid for one hour and are automatically refreshed. -func IDTokenSource(ctx context.Context, config IDTokenConfig, opts ...option.ClientOption) (oauth2.TokenSource, error) { - if config.Audience == "" { - return nil, fmt.Errorf("impersonate: an audience must be provided") - } - if config.TargetPrincipal == "" { - return nil, fmt.Errorf("impersonate: a target service account must be provided") - } - - clientOpts := append(defaultClientOptions(), opts...) - client, _, err := htransport.NewClient(ctx, clientOpts...) - if err != nil { - return nil, err - } - - its := impersonatedIDTokenSource{ - client: client, - targetPrincipal: config.TargetPrincipal, - audience: config.Audience, - includeEmail: config.IncludeEmail, - } - for _, v := range config.Delegates { - its.delegates = append(its.delegates, formatIAMServiceAccountName(v)) - } - return oauth2.ReuseTokenSource(nil, its), nil -} - -type generateIDTokenRequest struct { - Audience string `json:"audience"` - IncludeEmail bool `json:"includeEmail"` - Delegates []string `json:"delegates,omitempty"` -} - -type generateIDTokenResponse struct { - Token string `json:"token"` -} - -type impersonatedIDTokenSource struct { - client *http.Client - - targetPrincipal string - audience string - includeEmail bool - delegates []string -} - -func (i impersonatedIDTokenSource) Token() (*oauth2.Token, error) { - now := time.Now() - genIDTokenReq := generateIDTokenRequest{ - Audience: i.audience, - IncludeEmail: i.includeEmail, - Delegates: i.delegates, - } - bodyBytes, err := json.Marshal(genIDTokenReq) - if err != nil { - return nil, fmt.Errorf("impersonate: unable to marshal request: %v", err) - } - - url := fmt.Sprintf("%s/v1/%s:generateIdToken", iamCredentailsEndpoint, formatIAMServiceAccountName(i.targetPrincipal)) - req, err := http.NewRequest("POST", url, bytes.NewReader(bodyBytes)) - if err != nil { - return nil, fmt.Errorf("impersonate: unable to create request: %v", err) - } - req.Header.Set("Content-Type", "application/json") - resp, err := i.client.Do(req) - if err != nil { - return nil, fmt.Errorf("impersonate: unable to generate ID token: %v", err) - } - defer resp.Body.Close() - body, err := io.ReadAll(io.LimitReader(resp.Body, 1<<20)) - if err != nil { - return nil, fmt.Errorf("impersonate: unable to read body: %v", err) - } - if c := resp.StatusCode; c < 200 || c > 299 { - return nil, fmt.Errorf("impersonate: status code %d: %s", c, body) - } - - var generateIDTokenResp generateIDTokenResponse - if err := json.Unmarshal(body, &generateIDTokenResp); err != nil { - return nil, fmt.Errorf("impersonate: unable to parse response: %v", err) - } - return &oauth2.Token{ - AccessToken: generateIDTokenResp.Token, - // Generated ID tokens are good for one hour. - Expiry: now.Add(1 * time.Hour), - }, nil -} diff --git a/vendor/google.golang.org/api/impersonate/impersonate.go b/vendor/google.golang.org/api/impersonate/impersonate.go deleted file mode 100644 index a09698aded1..00000000000 --- a/vendor/google.golang.org/api/impersonate/impersonate.go +++ /dev/null @@ -1,209 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package impersonate - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "time" - - "golang.org/x/oauth2" - "google.golang.org/api/internal" - "google.golang.org/api/option" - "google.golang.org/api/option/internaloption" - htransport "google.golang.org/api/transport/http" -) - -var ( - iamCredentailsEndpoint = "https://iamcredentials.googleapis.com" - oauth2Endpoint = "https://oauth2.googleapis.com" - errMissingTargetPrincipal = errors.New("impersonate: a target service account must be provided") - errMissingScopes = errors.New("impersonate: scopes must be provided") - errLifetimeOverMax = errors.New("impersonate: max lifetime is 12 hours") - errUniverseNotSupportedDomainWideDelegation = errors.New("impersonate: service account user is configured for the credential. " + - "Domain-wide delegation is not supported in universes other than googleapis.com") -) - -// CredentialsConfig for generating impersonated credentials. -type CredentialsConfig struct { - // TargetPrincipal is the email address of the service account to - // impersonate. Required. - TargetPrincipal string - // Scopes that the impersonated credential should have. Required. - Scopes []string - // Delegates are the service account email addresses in a delegation chain. - // Each service account must be granted roles/iam.serviceAccountTokenCreator - // on the next service account in the chain. Optional. - Delegates []string - // Lifetime is the amount of time until the impersonated token expires. If - // unset the token's lifetime will be one hour and be automatically - // refreshed. If set the token may have a max lifetime of one hour and will - // not be refreshed. Service accounts that have been added to an org policy - // with constraints/iam.allowServiceAccountCredentialLifetimeExtension may - // request a token lifetime of up to 12 hours. Optional. - Lifetime time.Duration - // Subject is the sub field of a JWT. This field should only be set if you - // wish to impersonate as a user. This feature is useful when using domain - // wide delegation. Optional. - Subject string -} - -// defaultClientOptions ensures the base credentials will work with the IAM -// Credentials API if no scope or audience is set by the user. -func defaultClientOptions() []option.ClientOption { - return []option.ClientOption{ - internaloption.WithDefaultAudience("https://iamcredentials.googleapis.com/"), - internaloption.WithDefaultScopes("https://www.googleapis.com/auth/cloud-platform"), - } -} - -// CredentialsTokenSource returns an impersonated CredentialsTokenSource configured with the provided -// config and using credentials loaded from Application Default Credentials as -// the base credentials. -func CredentialsTokenSource(ctx context.Context, config CredentialsConfig, opts ...option.ClientOption) (oauth2.TokenSource, error) { - if config.TargetPrincipal == "" { - return nil, errMissingTargetPrincipal - } - if len(config.Scopes) == 0 { - return nil, errMissingScopes - } - if config.Lifetime.Hours() > 12 { - return nil, errLifetimeOverMax - } - - var isStaticToken bool - // Default to the longest acceptable value of one hour as the token will - // be refreshed automatically if not set. - lifetime := 3600 * time.Second - if config.Lifetime != 0 { - lifetime = config.Lifetime - // Don't auto-refresh token if a lifetime is configured. - isStaticToken = true - } - - clientOpts := append(defaultClientOptions(), opts...) - client, _, err := htransport.NewClient(ctx, clientOpts...) - if err != nil { - return nil, err - } - // If a subject is specified a domain-wide delegation auth-flow is initiated - // to impersonate as the provided subject (user). - if config.Subject != "" { - settings, err := newSettings(clientOpts) - if err != nil { - return nil, err - } - if !settings.IsUniverseDomainGDU() { - return nil, errUniverseNotSupportedDomainWideDelegation - } - return user(ctx, config, client, lifetime, isStaticToken) - } - - its := impersonatedTokenSource{ - client: client, - targetPrincipal: config.TargetPrincipal, - lifetime: fmt.Sprintf("%.fs", lifetime.Seconds()), - } - for _, v := range config.Delegates { - its.delegates = append(its.delegates, formatIAMServiceAccountName(v)) - } - its.scopes = make([]string, len(config.Scopes)) - copy(its.scopes, config.Scopes) - - if isStaticToken { - tok, err := its.Token() - if err != nil { - return nil, err - } - return oauth2.StaticTokenSource(tok), nil - } - return oauth2.ReuseTokenSource(nil, its), nil -} - -func newSettings(opts []option.ClientOption) (*internal.DialSettings, error) { - var o internal.DialSettings - for _, opt := range opts { - opt.Apply(&o) - } - if err := o.Validate(); err != nil { - return nil, err - } - - return &o, nil -} - -func formatIAMServiceAccountName(name string) string { - return fmt.Sprintf("projects/-/serviceAccounts/%s", name) -} - -type generateAccessTokenReq struct { - Delegates []string `json:"delegates,omitempty"` - Lifetime string `json:"lifetime,omitempty"` - Scope []string `json:"scope,omitempty"` -} - -type generateAccessTokenResp struct { - AccessToken string `json:"accessToken"` - ExpireTime string `json:"expireTime"` -} - -type impersonatedTokenSource struct { - client *http.Client - - targetPrincipal string - lifetime string - scopes []string - delegates []string -} - -// Token returns an impersonated Token. -func (i impersonatedTokenSource) Token() (*oauth2.Token, error) { - reqBody := generateAccessTokenReq{ - Delegates: i.delegates, - Lifetime: i.lifetime, - Scope: i.scopes, - } - b, err := json.Marshal(reqBody) - if err != nil { - return nil, fmt.Errorf("impersonate: unable to marshal request: %v", err) - } - url := fmt.Sprintf("%s/v1/%s:generateAccessToken", iamCredentailsEndpoint, formatIAMServiceAccountName(i.targetPrincipal)) - req, err := http.NewRequest("POST", url, bytes.NewReader(b)) - if err != nil { - return nil, fmt.Errorf("impersonate: unable to create request: %v", err) - } - req.Header.Set("Content-Type", "application/json") - - resp, err := i.client.Do(req) - if err != nil { - return nil, fmt.Errorf("impersonate: unable to generate access token: %v", err) - } - defer resp.Body.Close() - body, err := io.ReadAll(io.LimitReader(resp.Body, 1<<20)) - if err != nil { - return nil, fmt.Errorf("impersonate: unable to read body: %v", err) - } - if c := resp.StatusCode; c < 200 || c > 299 { - return nil, fmt.Errorf("impersonate: status code %d: %s", c, body) - } - - var accessTokenResp generateAccessTokenResp - if err := json.Unmarshal(body, &accessTokenResp); err != nil { - return nil, fmt.Errorf("impersonate: unable to parse response: %v", err) - } - expiry, err := time.Parse(time.RFC3339, accessTokenResp.ExpireTime) - if err != nil { - return nil, fmt.Errorf("impersonate: unable to parse expiry: %v", err) - } - return &oauth2.Token{ - AccessToken: accessTokenResp.AccessToken, - Expiry: expiry, - }, nil -} diff --git a/vendor/google.golang.org/api/impersonate/user.go b/vendor/google.golang.org/api/impersonate/user.go deleted file mode 100644 index fa4e0b43b97..00000000000 --- a/vendor/google.golang.org/api/impersonate/user.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package impersonate - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "net/url" - "strings" - "time" - - "golang.org/x/oauth2" -) - -// user provides an auth flow for domain-wide delegation, setting -// CredentialsConfig.Subject to be the impersonated user. -func user(ctx context.Context, c CredentialsConfig, client *http.Client, lifetime time.Duration, isStaticToken bool) (oauth2.TokenSource, error) { - u := userTokenSource{ - client: client, - targetPrincipal: c.TargetPrincipal, - subject: c.Subject, - lifetime: lifetime, - } - u.delegates = make([]string, len(c.Delegates)) - for i, v := range c.Delegates { - u.delegates[i] = formatIAMServiceAccountName(v) - } - u.scopes = make([]string, len(c.Scopes)) - copy(u.scopes, c.Scopes) - if isStaticToken { - tok, err := u.Token() - if err != nil { - return nil, err - } - return oauth2.StaticTokenSource(tok), nil - } - return oauth2.ReuseTokenSource(nil, u), nil -} - -type claimSet struct { - Iss string `json:"iss"` - Scope string `json:"scope,omitempty"` - Sub string `json:"sub,omitempty"` - Aud string `json:"aud"` - Iat int64 `json:"iat"` - Exp int64 `json:"exp"` -} - -type signJWTRequest struct { - Payload string `json:"payload"` - Delegates []string `json:"delegates,omitempty"` -} - -type signJWTResponse struct { - // KeyID is the key used to sign the JWT. - KeyID string `json:"keyId"` - // SignedJwt contains the automatically generated header; the - // client-supplied payload; and the signature, which is generated using - // the key referenced by the `kid` field in the header. - SignedJWT string `json:"signedJwt"` -} - -type exchangeTokenResponse struct { - AccessToken string `json:"access_token"` - TokenType string `json:"token_type"` - ExpiresIn int64 `json:"expires_in"` -} - -type userTokenSource struct { - client *http.Client - - targetPrincipal string - subject string - scopes []string - lifetime time.Duration - delegates []string -} - -func (u userTokenSource) Token() (*oauth2.Token, error) { - signedJWT, err := u.signJWT() - if err != nil { - return nil, err - } - return u.exchangeToken(signedJWT) -} - -func (u userTokenSource) signJWT() (string, error) { - now := time.Now() - exp := now.Add(u.lifetime) - claims := claimSet{ - Iss: u.targetPrincipal, - Scope: strings.Join(u.scopes, " "), - Sub: u.subject, - Aud: fmt.Sprintf("%s/token", oauth2Endpoint), - Iat: now.Unix(), - Exp: exp.Unix(), - } - payloadBytes, err := json.Marshal(claims) - if err != nil { - return "", fmt.Errorf("impersonate: unable to marshal claims: %v", err) - } - signJWTReq := signJWTRequest{ - Payload: string(payloadBytes), - Delegates: u.delegates, - } - - bodyBytes, err := json.Marshal(signJWTReq) - if err != nil { - return "", fmt.Errorf("impersonate: unable to marshal request: %v", err) - } - reqURL := fmt.Sprintf("%s/v1/%s:signJwt", iamCredentailsEndpoint, formatIAMServiceAccountName(u.targetPrincipal)) - req, err := http.NewRequest("POST", reqURL, bytes.NewReader(bodyBytes)) - if err != nil { - return "", fmt.Errorf("impersonate: unable to create request: %v", err) - } - req.Header.Set("Content-Type", "application/json") - rawResp, err := u.client.Do(req) - if err != nil { - return "", fmt.Errorf("impersonate: unable to sign JWT: %v", err) - } - body, err := io.ReadAll(io.LimitReader(rawResp.Body, 1<<20)) - if err != nil { - return "", fmt.Errorf("impersonate: unable to read body: %v", err) - } - if c := rawResp.StatusCode; c < 200 || c > 299 { - return "", fmt.Errorf("impersonate: status code %d: %s", c, body) - } - - var signJWTResp signJWTResponse - if err := json.Unmarshal(body, &signJWTResp); err != nil { - return "", fmt.Errorf("impersonate: unable to parse response: %v", err) - } - return signJWTResp.SignedJWT, nil -} - -func (u userTokenSource) exchangeToken(signedJWT string) (*oauth2.Token, error) { - now := time.Now() - v := url.Values{} - v.Set("grant_type", "assertion") - v.Set("assertion_type", "http://oauth.net/grant_type/jwt/1.0/bearer") - v.Set("assertion", signedJWT) - rawResp, err := u.client.PostForm(fmt.Sprintf("%s/token", oauth2Endpoint), v) - if err != nil { - return nil, fmt.Errorf("impersonate: unable to exchange token: %v", err) - } - body, err := io.ReadAll(io.LimitReader(rawResp.Body, 1<<20)) - if err != nil { - return nil, fmt.Errorf("impersonate: unable to read body: %v", err) - } - if c := rawResp.StatusCode; c < 200 || c > 299 { - return nil, fmt.Errorf("impersonate: status code %d: %s", c, body) - } - - var tokenResp exchangeTokenResponse - if err := json.Unmarshal(body, &tokenResp); err != nil { - return nil, fmt.Errorf("impersonate: unable to parse response: %v", err) - } - - return &oauth2.Token{ - AccessToken: tokenResp.AccessToken, - TokenType: tokenResp.TokenType, - Expiry: now.Add(time.Second * time.Duration(tokenResp.ExpiresIn)), - }, nil -} diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go index 5ea555ed01a..86861e24383 100644 --- a/vendor/google.golang.org/api/internal/creds.go +++ b/vendor/google.golang.org/api/internal/creds.go @@ -15,6 +15,7 @@ import ( "os" "time" + "cloud.google.com/go/auth" "cloud.google.com/go/auth/credentials" "cloud.google.com/go/auth/oauth2adapt" "golang.org/x/oauth2" @@ -30,7 +31,7 @@ const quotaProjectEnvVar = "GOOGLE_CLOUD_QUOTA_PROJECT" // it returns default credential information. func Creds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) { if ds.IsNewAuthLibraryEnabled() { - return credsNewAuth(ctx, ds) + return credsNewAuth(ds) } creds, err := baseCreds(ctx, ds) if err != nil { @@ -42,6 +43,30 @@ func Creds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) { return creds, nil } +// AuthCreds returns [cloud.google.com/go/auth.Credentials] based on credentials +// options provided via [option.ClientOption], including legacy oauth2/google +// options. If there are no applicable options, then it returns the result of +// [cloud.google.com/go/auth/credentials.DetectDefault]. +func AuthCreds(ctx context.Context, settings *DialSettings) (*auth.Credentials, error) { + if settings.AuthCredentials != nil { + return settings.AuthCredentials, nil + } + // Support oauth2/google options + var oauth2Creds *google.Credentials + if settings.InternalCredentials != nil { + oauth2Creds = settings.InternalCredentials + } else if settings.Credentials != nil { + oauth2Creds = settings.Credentials + } else if settings.TokenSource != nil { + oauth2Creds = &google.Credentials{TokenSource: settings.TokenSource} + } + if oauth2Creds != nil { + return oauth2adapt.AuthCredentialsFromOauth2Credentials(oauth2Creds), nil + } + + return detectDefaultFromDialSettings(settings) +} + // GetOAuth2Configuration determines configurations for the OAuth2 transport, which is separate from the API transport. // The OAuth2 transport and endpoint will be configured for mTLS if applicable. func GetOAuth2Configuration(ctx context.Context, settings *DialSettings) (string, *http.Client, error) { @@ -62,7 +87,7 @@ func GetOAuth2Configuration(ctx context.Context, settings *DialSettings) (string return tokenURL, oauth2Client, nil } -func credsNewAuth(ctx context.Context, settings *DialSettings) (*google.Credentials, error) { +func credsNewAuth(settings *DialSettings) (*google.Credentials, error) { // Preserve old options behavior if settings.InternalCredentials != nil { return settings.InternalCredentials, nil @@ -76,6 +101,14 @@ func credsNewAuth(ctx context.Context, settings *DialSettings) (*google.Credenti return oauth2adapt.Oauth2CredentialsFromAuthCredentials(settings.AuthCredentials), nil } + creds, err := detectDefaultFromDialSettings(settings) + if err != nil { + return nil, err + } + return oauth2adapt.Oauth2CredentialsFromAuthCredentials(creds), nil +} + +func detectDefaultFromDialSettings(settings *DialSettings) (*auth.Credentials, error) { var useSelfSignedJWT bool var aud string var scopes []string @@ -100,24 +133,14 @@ func credsNewAuth(ctx context.Context, settings *DialSettings) (*google.Credenti aud = settings.DefaultAudience } - tokenURL, oauth2Client, err := GetOAuth2Configuration(ctx, settings) - if err != nil { - return nil, err - } - creds, err := credentials.DetectDefault(&credentials.DetectOptions{ + return credentials.DetectDefault(&credentials.DetectOptions{ Scopes: scopes, Audience: aud, CredentialsFile: settings.CredentialsFile, CredentialsJSON: settings.CredentialsJSON, UseSelfSignedJWT: useSelfSignedJWT, - TokenURL: tokenURL, - Client: oauth2Client, + Logger: settings.Logger, }) - if err != nil { - return nil, err - } - - return oauth2adapt.Oauth2CredentialsFromAuthCredentials(creds), nil } func baseCreds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) { @@ -302,14 +325,3 @@ func baseTransport() *http.Transport { ExpectContinueTimeout: 1 * time.Second, } } - -// ErrUniverseNotMatch composes an error string from the provided universe -// domain sources (DialSettings and Credentials, respectively). -func ErrUniverseNotMatch(settingsUD, credsUD string) error { - return fmt.Errorf( - "the configured universe domain (%q) does not match the universe "+ - "domain found in the credentials (%q). If you haven't configured "+ - "WithUniverseDomain explicitly, \"googleapis.com\" is the default", - settingsUD, - credsUD) -} diff --git a/vendor/google.golang.org/api/internal/gensupport/media.go b/vendor/google.golang.org/api/internal/gensupport/media.go index c048a57084b..8c7435de3e6 100644 --- a/vendor/google.golang.org/api/internal/gensupport/media.go +++ b/vendor/google.golang.org/api/internal/gensupport/media.go @@ -135,13 +135,14 @@ func PrepareUpload(media io.Reader, chunkSize int) (r io.Reader, mb *MediaBuffer // code only. type MediaInfo struct { // At most one of Media and MediaBuffer will be set. - media io.Reader - buffer *MediaBuffer - singleChunk bool - mType string - size int64 // mediaSize, if known. Used only for calls to progressUpdater_. - progressUpdater googleapi.ProgressUpdater - chunkRetryDeadline time.Duration + media io.Reader + buffer *MediaBuffer + singleChunk bool + mType string + size int64 // mediaSize, if known. Used only for calls to progressUpdater_. + progressUpdater googleapi.ProgressUpdater + chunkRetryDeadline time.Duration + chunkTransferTimeout time.Duration } // NewInfoFromMedia should be invoked from the Media method of a call. It returns a @@ -157,6 +158,7 @@ func NewInfoFromMedia(r io.Reader, options []googleapi.MediaOption) *MediaInfo { } } mi.chunkRetryDeadline = opts.ChunkRetryDeadline + mi.chunkTransferTimeout = opts.ChunkTransferTimeout mi.media, mi.buffer, mi.singleChunk = PrepareUpload(r, opts.ChunkSize) return mi } @@ -198,6 +200,9 @@ func (mi *MediaInfo) UploadType() string { // UploadRequest sets up an HTTP request for media upload. It adds headers // as necessary, and returns a replacement for the body and a function for http.Request.GetBody. func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newBody io.Reader, getBody func() (io.ReadCloser, error), cleanup func()) { + if body == nil { + body = new(bytes.Buffer) + } cleanup = func() {} if mi == nil { return body, nil, cleanup @@ -294,7 +299,8 @@ func (mi *MediaInfo) ResumableUpload(locURI string) *ResumableUpload { mi.progressUpdater(curr, mi.size) } }, - ChunkRetryDeadline: mi.chunkRetryDeadline, + ChunkRetryDeadline: mi.chunkRetryDeadline, + ChunkTransferTimeout: mi.chunkTransferTimeout, } } diff --git a/vendor/google.golang.org/api/internal/gensupport/resumable.go b/vendor/google.golang.org/api/internal/gensupport/resumable.go index f828ddb60e6..d74fe2a2998 100644 --- a/vendor/google.golang.org/api/internal/gensupport/resumable.go +++ b/vendor/google.golang.org/api/internal/gensupport/resumable.go @@ -43,6 +43,10 @@ type ResumableUpload struct { // retries should happen. ChunkRetryDeadline time.Duration + // ChunkTransferTimeout configures the per-chunk transfer timeout. If a chunk upload stalls for longer than + // this duration, the upload will be retried. + ChunkTransferTimeout time.Duration + // Track current request invocation ID and attempt count for retry metrics // and idempotency headers. invocationID string @@ -160,6 +164,8 @@ func (rx *ResumableUpload) transferChunk(ctx context.Context) (*http.Response, e // and calls the returned functions after the request returns (see send.go). // rx is private to the auto-generated API code. // Exactly one of resp or err will be nil. If resp is non-nil, the caller must call resp.Body.Close. +// Upload does not parse the response into the error on a non 200 response; +// it is the caller's responsibility to call resp.Body.Close. func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err error) { // There are a couple of cases where it's possible for err and resp to both @@ -241,13 +247,47 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err default: } - resp, err = rx.transferChunk(ctx) + // rCtx is derived from a context with a defined transferTimeout with non-zero value. + // If a particular request exceeds this transfer time for getting response, the rCtx deadline will be exceeded, + // triggering a retry of the request. + var rCtx context.Context + var cancel context.CancelFunc + + rCtx = ctx + if rx.ChunkTransferTimeout != 0 { + rCtx, cancel = context.WithTimeout(ctx, rx.ChunkTransferTimeout) + } + + // We close the response's body here, since we definitely will not + // return `resp` now. If we close it before the select case above, a + // timer may fire and cause us to return a response with a closed body + // (in which case, the caller will not get the error message in the body). + if resp != nil && resp.Body != nil { + // Read the body to EOF - if the Body is not both read to EOF and closed, + // the Client's underlying RoundTripper may not be able to re-use the + // persistent TCP connection to the server for a subsequent "keep-alive" request. + // See https://pkg.go.dev/net/http#Client.Do + io.Copy(io.Discard, resp.Body) + resp.Body.Close() + } + resp, err = rx.transferChunk(rCtx) var status int if resp != nil { status = resp.StatusCode } + // The upload should be retried if the rCtx is canceled due to a timeout. + select { + case <-rCtx.Done(): + if rx.ChunkTransferTimeout != 0 && errors.Is(rCtx.Err(), context.DeadlineExceeded) { + // Cancel the context for rCtx + cancel() + continue + } + default: + } + // Check if we should retry the request. if !errorFunc(status, err) { quitAfterTimer.Stop() @@ -256,15 +296,11 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err rx.attempts++ pause = bo.Pause() - if resp != nil && resp.Body != nil { - resp.Body.Close() - } } // If the chunk was uploaded successfully, but there's still // more to go, upload the next chunk without any delay. if statusResumeIncomplete(resp) { - resp.Body.Close() continue } diff --git a/vendor/google.golang.org/api/internal/gensupport/send.go b/vendor/google.golang.org/api/internal/gensupport/send.go index f6716134ebf..1c91f147abe 100644 --- a/vendor/google.golang.org/api/internal/gensupport/send.go +++ b/vendor/google.golang.org/api/internal/gensupport/send.go @@ -9,6 +9,7 @@ import ( "encoding/json" "errors" "fmt" + "io" "net/http" "strings" "time" @@ -222,3 +223,19 @@ func DecodeResponse(target interface{}, res *http.Response) error { } return json.NewDecoder(res.Body).Decode(target) } + +// DecodeResponseBytes decodes the body of res into target and returns bytes read +// from the body. If there is no body, target is unchanged. +func DecodeResponseBytes(target interface{}, res *http.Response) ([]byte, error) { + if res.StatusCode == http.StatusNoContent { + return nil, nil + } + b, err := io.ReadAll(res.Body) + if err != nil { + return nil, err + } + if err := json.Unmarshal(b, target); err != nil { + return nil, err + } + return b, nil +} diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go index edba49af499..4f5b1a0ebea 100644 --- a/vendor/google.golang.org/api/internal/settings.go +++ b/vendor/google.golang.org/api/internal/settings.go @@ -8,6 +8,7 @@ package internal import ( "crypto/tls" "errors" + "log/slog" "net/http" "os" "strconv" @@ -62,6 +63,7 @@ type DialSettings struct { AllowNonDefaultServiceAccount bool DefaultUniverseDomain string UniverseDomain string + Logger *slog.Logger // Google API system parameters. For more information please read: // https://cloud.google.com/apis/docs/system-parameters QuotaProject string @@ -70,6 +72,9 @@ type DialSettings struct { // New Auth library Options AuthCredentials *auth.Credentials EnableNewAuthLibrary bool + + // TODO(b/372244283): Remove after b/358175516 has been fixed + EnableAsyncRefreshDryRun func() } // GetScopes returns the user-provided scopes, if set, or else falls back to the @@ -204,8 +209,7 @@ func (ds *DialSettings) IsUniverseDomainGDU() bool { } // GetUniverseDomain returns the default service domain for a given Cloud -// universe, from google.Credentials, for comparison with the value returned by -// (*DialSettings).GetUniverseDomain. This wrapper function should be removed +// universe, from google.Credentials. This wrapper function should be removed // to close https://github.com/googleapis/google-api-go-client/issues/2399. func GetUniverseDomain(creds *google.Credentials) (string, error) { timer := time.NewTimer(time.Second) diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go index e2edf688d4a..e55b0e5b710 100644 --- a/vendor/google.golang.org/api/internal/version.go +++ b/vendor/google.golang.org/api/internal/version.go @@ -5,4 +5,4 @@ package internal // Version is the current tagged release of the library. -const Version = "0.188.0" +const Version = "0.218.0" diff --git a/vendor/google.golang.org/api/option/internaloption/internaloption.go b/vendor/google.golang.org/api/option/internaloption/internaloption.go index e6b5c102555..c63c0c194ae 100644 --- a/vendor/google.golang.org/api/option/internaloption/internaloption.go +++ b/vendor/google.golang.org/api/option/internaloption/internaloption.go @@ -6,6 +6,11 @@ package internaloption import ( + "context" + "log/slog" + + "cloud.google.com/go/auth" + "github.com/googleapis/gax-go/v2/internallog" "golang.org/x/oauth2/google" "google.golang.org/api/internal" "google.golang.org/api/option" @@ -206,9 +211,79 @@ func (w enableNewAuthLibrary) Apply(o *internal.DialSettings) { o.EnableNewAuthLibrary = bool(w) } +// EnableAsyncRefreshDryRun returns a ClientOption that specifies if libraries in this +// module should asynchronously refresh auth token in parallel to sync refresh. +// +// This option can be used to determine whether refreshing the token asymnchronously +// prior to its actual expiry works without any issues in a particular environment. +// +// errHandler function will be called when there is an error while refreshing +// the token asynchronously. +// +// This is an EXPERIMENTAL option and will be removed in the future. +// TODO(b/372244283): Remove after b/358175516 has been fixed +func EnableAsyncRefreshDryRun(errHandler func()) option.ClientOption { + return enableAsyncRefreshDryRun{ + errHandler: errHandler, + } +} + +// TODO(b/372244283): Remove after b/358175516 has been fixed +type enableAsyncRefreshDryRun struct { + errHandler func() +} + +// TODO(b/372244283): Remove after b/358175516 has been fixed +func (w enableAsyncRefreshDryRun) Apply(o *internal.DialSettings) { + o.EnableAsyncRefreshDryRun = w.errHandler +} + // EmbeddableAdapter is a no-op option.ClientOption that allow libraries to // create their own client options by embedding this type into their own // client-specific option wrapper. See example for usage. type EmbeddableAdapter struct{} func (*EmbeddableAdapter) Apply(_ *internal.DialSettings) {} + +// GetLogger is a helper for client libraries to extract the [slog.Logger] from +// the provided options or return a default logger if one is not found. +// +// It should only be used internally by generated clients. This is an EXPERIMENTAL API +// and may be changed or removed in the future. +func GetLogger(opts []option.ClientOption) *slog.Logger { + var ds internal.DialSettings + for _, opt := range opts { + opt.Apply(&ds) + } + return internallog.New(ds.Logger) +} + +// AuthCreds returns [cloud.google.com/go/auth.Credentials] using the following +// options provided via [option.ClientOption], including legacy oauth2/google +// options, in this order: +// +// * [option.WithAuthCredentials] +// * [option/internaloption.WithCredentials] (internal use only) +// * [option.WithCredentials] +// * [option.WithTokenSource] +// +// If there are no applicable credentials options, then it passes the +// following options to [cloud.google.com/go/auth/credentials.DetectDefault] and +// returns the result: +// +// * [option.WithAudiences] +// * [option.WithCredentialsFile] +// * [option.WithCredentialsJSON] +// * [option.WithScopes] +// * [option/internaloption.WithDefaultScopes] (internal use only) +// * [option/internaloption.EnableJwtWithScope] (internal use only) +// +// This function should only be used internally by generated clients. This is an +// EXPERIMENTAL API and may be changed or removed in the future. +func AuthCreds(ctx context.Context, opts []option.ClientOption) (*auth.Credentials, error) { + var ds internal.DialSettings + for _, opt := range opts { + opt.Apply(&ds) + } + return internal.AuthCreds(ctx, &ds) +} diff --git a/vendor/google.golang.org/api/option/option.go b/vendor/google.golang.org/api/option/option.go index 23aba01b628..eb54813aae6 100644 --- a/vendor/google.golang.org/api/option/option.go +++ b/vendor/google.golang.org/api/option/option.go @@ -7,6 +7,7 @@ package option import ( "crypto/tls" + "log/slog" "net/http" "cloud.google.com/go/auth" @@ -359,8 +360,6 @@ func (w withAuthCredentials) Apply(o *internal.DialSettings) { } // WithUniverseDomain returns a ClientOption that sets the universe domain. -// -// This is an EXPERIMENTAL API and may be changed or removed in the future. func WithUniverseDomain(ud string) ClientOption { return withUniverseDomain(ud) } @@ -370,3 +369,17 @@ type withUniverseDomain string func (w withUniverseDomain) Apply(o *internal.DialSettings) { o.UniverseDomain = string(w) } + +// WithLogger returns a ClientOption that sets the logger used throughout the +// client library call stack. If this option is provided it takes precedence +// over the value set in GOOGLE_SDK_GO_LOGGING_LEVEL. Specifying this option +// enables logging at the provided logger's configured level. +func WithLogger(l *slog.Logger) ClientOption { + return withLogger{l} +} + +type withLogger struct{ l *slog.Logger } + +func (w withLogger) Apply(o *internal.DialSettings) { + o.Logger = w.l +} diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json index 9c06c5ede4e..992c4c0145a 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-api.json +++ b/vendor/google.golang.org/api/storage/v1/storage-api.json @@ -32,6 +32,11 @@ "endpointUrl": "https://storage.europe-west3.rep.googleapis.com/", "location": "europe-west3" }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.europe-west8.rep.googleapis.com/", + "location": "europe-west8" + }, { "description": "Regional Endpoint", "endpointUrl": "https://storage.europe-west9.rep.googleapis.com/", @@ -41,9 +46,54 @@ "description": "Regional Endpoint", "endpointUrl": "https://storage.me-central2.rep.googleapis.com/", "location": "me-central2" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.us-central1.rep.googleapis.com/", + "location": "us-central1" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.us-east1.rep.googleapis.com/", + "location": "us-east1" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.us-east4.rep.googleapis.com/", + "location": "us-east4" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.us-east5.rep.googleapis.com/", + "location": "us-east5" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.us-south1.rep.googleapis.com/", + "location": "us-south1" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.us-west1.rep.googleapis.com/", + "location": "us-west1" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.us-west2.rep.googleapis.com/", + "location": "us-west2" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.us-west3.rep.googleapis.com/", + "location": "us-west3" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.us-west4.rep.googleapis.com/", + "location": "us-west4" } ], - "etag": "\"39393931363036383932333134343736343437\"", + "etag": "\"3133343838373034343130353038353234313337\"", "icons": { "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" @@ -560,7 +610,7 @@ "buckets": { "methods": { "delete": { - "description": "Permanently deletes an empty bucket.", + "description": "Deletes an empty bucket. Deletions are permanent unless soft delete is enabled on the bucket.", "httpMethod": "DELETE", "id": "storage.buckets.delete", "parameterOrder": [ @@ -612,6 +662,12 @@ "required": true, "type": "string" }, + "generation": { + "description": "If present, specifies the generation of the bucket. This is required if softDeleted is true.", + "format": "int64", + "location": "query", + "type": "string" + }, "ifMetagenerationMatch": { "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", "format": "int64", @@ -637,6 +693,11 @@ "location": "query", "type": "string" }, + "softDeleted": { + "description": "If true, return the soft-deleted version of this bucket. The default is false. For more information, see [Soft Delete](https://cloud.google.com/storage/docs/soft-delete).", + "location": "query", + "type": "boolean" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -860,6 +921,11 @@ "location": "query", "type": "string" }, + "softDeleted": { + "description": "If true, only soft-deleted bucket versions will be returned. The default is false. For more information, see [Soft Delete](https://cloud.google.com/storage/docs/soft-delete).", + "location": "query", + "type": "boolean" + }, "userProject": { "description": "The project to be billed for this request.", "location": "query", @@ -1013,6 +1079,85 @@ "https://www.googleapis.com/auth/devstorage.full_control" ] }, + "relocate": { + "description": "Initiates a long-running Relocate Bucket operation on the specified bucket.", + "httpMethod": "POST", + "id": "storage.buckets.relocate", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket to be moved.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/relocate", + "request": { + "$ref": "RelocateBucketRequest" + }, + "response": { + "$ref": "GoogleLongrunningOperation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "restore": { + "description": "Restores a soft-deleted bucket.", + "httpMethod": "POST", + "id": "storage.buckets.restore", + "parameterOrder": [ + "bucket", + "generation" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "generation": { + "description": "Generation of a bucket.", + "format": "int64", + "location": "query", + "required": true, + "type": "string" + }, + "projection": { + "description": "Set of properties to return. Defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit owner, acl and defaultObjectAcl properties." + ], + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/restore", + "response": { + "$ref": "Bucket" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, "setIamPolicy": { "description": "Updates an IAM policy for the specified bucket.", "httpMethod": "PUT", @@ -2778,8 +2923,13 @@ "location": "query", "type": "string" }, + "restoreToken": { + "description": "Restore token used to differentiate soft-deleted objects with the same name and generation. Only applicable for hierarchical namespace buckets and if softDeleted is set to true. This parameter is optional, and is only required in the rare case when there are multiple soft-deleted objects with the same name and generation.", + "location": "query", + "type": "string" + }, "softDeleted": { - "description": "If true, only soft-deleted object versions will be listed. The default is false. For more information, see Soft Delete.", + "description": "If true, only soft-deleted object versions will be listed. The default is false. For more information, see [Soft Delete](https://cloud.google.com/storage/docs/soft-delete).", "location": "query", "type": "boolean" }, @@ -3041,7 +3191,7 @@ "type": "string" }, "softDeleted": { - "description": "If true, only soft-deleted object versions will be listed. The default is false. For more information, see Soft Delete.", + "description": "If true, only soft-deleted object versions will be listed. The default is false. For more information, see [Soft Delete](https://cloud.google.com/storage/docs/soft-delete).", "location": "query", "type": "boolean" }, @@ -3056,7 +3206,7 @@ "type": "string" }, "versions": { - "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", + "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see [Object Versioning](https://cloud.google.com/storage/docs/object-versioning).", "location": "query", "type": "boolean" } @@ -3074,6 +3224,98 @@ ], "supportsSubscription": true }, + "move": { + "description": "Moves the source object to the destination object in the same bucket.", + "httpMethod": "POST", + "id": "storage.objects.move", + "parameterOrder": [ + "bucket", + "sourceObject", + "destinationObject" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which the object resides.", + "location": "path", + "required": true, + "type": "string" + }, + "destinationObject": { + "description": "Name of the destination object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + "location": "path", + "required": true, + "type": "string" + }, + "ifGenerationMatch": { + "description": "Makes the operation conditional on whether the destination object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object. `ifGenerationMatch` and `ifGenerationNotMatch` conditions are mutually exclusive: it's an error for both of them to be set in the request.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifGenerationNotMatch": { + "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.`ifGenerationMatch` and `ifGenerationNotMatch` conditions are mutually exclusive: it's an error for both of them to be set in the request.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value. `ifMetagenerationMatch` and `ifMetagenerationNotMatch` conditions are mutually exclusive: it's an error for both of them to be set in the request.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value. `ifMetagenerationMatch` and `ifMetagenerationNotMatch` conditions are mutually exclusive: it's an error for both of them to be set in the request.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifSourceGenerationMatch": { + "description": "Makes the operation conditional on whether the source object's current generation matches the given value. `ifSourceGenerationMatch` and `ifSourceGenerationNotMatch` conditions are mutually exclusive: it's an error for both of them to be set in the request.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifSourceGenerationNotMatch": { + "description": "Makes the operation conditional on whether the source object's current generation does not match the given value. `ifSourceGenerationMatch` and `ifSourceGenerationNotMatch` conditions are mutually exclusive: it's an error for both of them to be set in the request.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifSourceMetagenerationMatch": { + "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value. `ifSourceMetagenerationMatch` and `ifSourceMetagenerationNotMatch` conditions are mutually exclusive: it's an error for both of them to be set in the request.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifSourceMetagenerationNotMatch": { + "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value. `ifSourceMetagenerationMatch` and `ifSourceMetagenerationNotMatch` conditions are mutually exclusive: it's an error for both of them to be set in the request.", + "format": "int64", + "location": "query", + "type": "string" + }, + "sourceObject": { + "description": "Name of the source object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/o/{sourceObject}/moveTo/o/{destinationObject}", + "response": { + "$ref": "Object" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, "patch": { "description": "Patches an object's metadata.", "httpMethod": "PATCH", @@ -3235,7 +3477,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -3253,6 +3495,11 @@ "location": "query", "type": "string" }, + "restoreToken": { + "description": "Restore token used to differentiate sof-deleted objects with the same name and generation. Only applicable for hierarchical namespace buckets. This parameter is optional, and is only required in the rare case when there are multiple soft-deleted objects with the same name and generation.", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -3704,7 +3951,7 @@ "type": "string" }, "versions": { - "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", + "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see [Object Versioning](https://cloud.google.com/storage/docs/object-versioning).", "location": "query", "type": "boolean" } @@ -3730,6 +3977,38 @@ }, "operations": { "methods": { + "advanceRelocateBucket": { + "description": "Starts asynchronous advancement of the relocate bucket operation in the case of required write downtime, to allow it to lock the bucket at the source location, and proceed with the bucket location swap. The server makes a best effort to advance the relocate bucket operation, but success is not guaranteed.", + "httpMethod": "POST", + "id": "storage.buckets.operations.advanceRelocateBucket", + "parameterOrder": [ + "bucket", + "operationId" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket to advance the relocate for.", + "location": "path", + "required": true, + "type": "string" + }, + "operationId": { + "description": "ID of the operation resource.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/operations/{operationId}/advanceRelocateBucket", + "request": { + "$ref": "AdvanceRelocateBucketOperationRequest" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, "cancel": { "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed.", "httpMethod": "POST", @@ -4006,7 +4285,7 @@ ] }, "update": { - "description": "Updates the state of an HMAC key. See the HMAC Key resource descriptor for valid states.", + "description": "Updates the state of an HMAC key. See the [HMAC Key resource descriptor](https://cloud.google.com/storage/docs/json_api/v1/projects/hmacKeys/update#request-body) for valid states.", "httpMethod": "PUT", "id": "storage.projects.hmacKeys.update", "parameterOrder": [ @@ -4085,9 +4364,26 @@ } } }, - "revision": "20240625", + "revision": "20241206", "rootUrl": "https://storage.googleapis.com/", "schemas": { + "AdvanceRelocateBucketOperationRequest": { + "description": "An AdvanceRelocateBucketOperation request.", + "id": "AdvanceRelocateBucketOperationRequest", + "properties": { + "expireTime": { + "description": "Specifies the time when the relocation will revert to the sync stage if the relocation hasn't succeeded.", + "format": "date-time", + "type": "string" + }, + "ttl": { + "description": "Specifies the duration after which the relocation will revert to the sync stage if the relocation hasn't succeeded. Optional, if not supplied, a default value of 12h will be used.", + "format": "google-duration", + "type": "string" + } + }, + "type": "object" + }, "AnywhereCache": { "description": "An Anywhere Cache instance.", "id": "AnywhereCache", @@ -4293,6 +4589,16 @@ "description": "HTTP 1.1 Entity tag for the bucket.", "type": "string" }, + "generation": { + "description": "The generation of this bucket.", + "format": "int64", + "type": "string" + }, + "hardDeleteTime": { + "description": "The hard delete time of the bucket in RFC 3339 format.", + "format": "date-time", + "type": "string" + }, "hierarchicalNamespace": { "description": "The bucket's hierarchical namespace configuration.", "properties": { @@ -4347,6 +4653,49 @@ "description": "The ID of the bucket. For buckets, the id and name properties are the same.", "type": "string" }, + "ipFilter": { + "description": "The bucket's IP filter configuration. Specifies the network sources that are allowed to access the operations on the bucket, as well as its underlying objects. Only enforced when the mode is set to 'Enabled'.", + "properties": { + "mode": { + "description": "The mode of the IP filter. Valid values are 'Enabled' and 'Disabled'.", + "type": "string" + }, + "publicNetworkSource": { + "description": "The public network source of the bucket's IP filter.", + "properties": { + "allowedIpCidrRanges": { + "description": "The list of public IPv4, IPv6 cidr ranges that are allowed to access the bucket.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "vpcNetworkSources": { + "description": "The list of [VPC network](https://cloud.google.com/vpc/docs/vpc) sources of the bucket's IP filter.", + "items": { + "properties": { + "allowedIpCidrRanges": { + "description": "The list of IPv4, IPv6 cidr ranges subnetworks that are allowed to access the bucket.", + "items": { + "type": "string" + }, + "type": "array" + }, + "network": { + "description": "Name of the network. Format: projects/{PROJECT_ID}/global/networks/{NETWORK_NAME}", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + } + }, + "type": "object" + }, "kind": { "default": "storage#bucket", "description": "The kind of item this is. For buckets, this is always storage#bucket.", @@ -4361,7 +4710,7 @@ "type": "object" }, "lifecycle": { - "description": "The bucket's lifecycle configuration. See lifecycle management for more information.", + "description": "The bucket's lifecycle configuration. See [Lifecycle Management](https://cloud.google.com/storage/docs/lifecycle) for more information.", "properties": { "rule": { "description": "A lifecycle management rule, which is made of an action to take and the condition(s) under which the action will be taken.", @@ -4460,7 +4809,7 @@ "type": "object" }, "location": { - "description": "The location of the bucket. Object data for objects in the bucket resides in physical storage within this region. Defaults to US. See the developer's guide for the authoritative list.", + "description": "The location of the bucket. Object data for objects in the bucket resides in physical storage within this region. Defaults to US. See the [Developer's Guide](https://cloud.google.com/storage/docs/locations) for the authoritative list.", "type": "string" }, "locationType": { @@ -4548,6 +4897,10 @@ "description": "The Recovery Point Objective (RPO) of this bucket. Set to ASYNC_TURBO to turn on Turbo Replication on a bucket.", "type": "string" }, + "satisfiesPZI": { + "description": "Reserved for future use.", + "type": "boolean" + }, "satisfiesPZS": { "description": "Reserved for future use.", "type": "boolean" @@ -4572,8 +4925,13 @@ }, "type": "object" }, + "softDeleteTime": { + "description": "The soft delete time of the bucket in RFC 3339 format.", + "format": "date-time", + "type": "string" + }, "storageClass": { - "description": "The bucket's default storage class, used whenever no storageClass is specified for a newly-created object. This defines how objects in the bucket are stored and determines the SLA and the cost of storage. Values include MULTI_REGIONAL, REGIONAL, STANDARD, NEARLINE, COLDLINE, ARCHIVE, and DURABLE_REDUCED_AVAILABILITY. If this value is not specified when the bucket is created, it will default to STANDARD. For more information, see storage classes.", + "description": "The bucket's default storage class, used whenever no storageClass is specified for a newly-created object. This defines how objects in the bucket are stored and determines the SLA and the cost of storage. Values include MULTI_REGIONAL, REGIONAL, STANDARD, NEARLINE, COLDLINE, ARCHIVE, and DURABLE_REDUCED_AVAILABILITY. If this value is not specified when the bucket is created, it will default to STANDARD. For more information, see [Storage Classes](https://cloud.google.com/storage/docs/storage-classes).", "type": "string" }, "timeCreated": { @@ -4597,7 +4955,7 @@ "type": "object" }, "website": { - "description": "The bucket's website configuration, controlling how the service behaves when accessing bucket contents as a web site. See the Static Website Examples for more information.", + "description": "The bucket's website configuration, controlling how the service behaves when accessing bucket contents as a web site. See the [Static Website Examples](https://cloud.google.com/storage/docs/static-website) for more information.", "properties": { "mainPageSuffix": { "description": "If the requested object path is missing, the service will ensure the path has a trailing '/', append this suffix, and attempt to retrieve the resulting object. This allows the creation of index.html objects to represent directory pages.", @@ -5392,7 +5750,7 @@ "type": "string" }, "crc32c": { - "description": "CRC32c checksum, as described in RFC 4960, Appendix B; encoded using base64 in big-endian byte order. For more information about using the CRC32c checksum, see Hashes and ETags: Best Practices.", + "description": "CRC32c checksum, as described in RFC 4960, Appendix B; encoded using base64 in big-endian byte order. For more information about using the CRC32c checksum, see [Data Validation and Change Detection](https://cloud.google.com/storage/docs/data-validation).", "type": "string" }, "customTime": { @@ -5446,7 +5804,7 @@ "type": "string" }, "md5Hash": { - "description": "MD5 hash of the data; encoded using base64. For more information about using the MD5 hash, see Hashes and ETags: Best Practices.", + "description": "MD5 hash of the data; encoded using base64. For more information about using the MD5 hash, see [Data Validation and Change Detection](https://cloud.google.com/storage/docs/data-validation).", "type": "string" }, "mediaLink": { @@ -5484,6 +5842,10 @@ }, "type": "object" }, + "restoreToken": { + "description": "Restore token used to differentiate deleted objects with the same name and generation. This field is only returned for deleted objects in hierarchical namespace buckets.", + "type": "string" + }, "retention": { "description": "A collection of object level retention parameters.", "properties": { @@ -5536,6 +5898,11 @@ "format": "date-time", "type": "string" }, + "timeFinalized": { + "description": "The time when the object was finalized.", + "format": "date-time", + "type": "string" + }, "timeStorageClassUpdated": { "description": "The time at which the object's storage class was last changed. When the object is initially created, it will be set to timeCreated.", "format": "date-time", @@ -5752,6 +6119,34 @@ }, "type": "object" }, + "RelocateBucketRequest": { + "description": "A Relocate Bucket request.", + "id": "RelocateBucketRequest", + "properties": { + "destinationCustomPlacementConfig": { + "description": "The bucket's new custom placement configuration if relocating to a Custom Dual Region.", + "properties": { + "dataLocations": { + "description": "The list of regional locations in which data is placed.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "destinationLocation": { + "description": "The new location the bucket will be relocated to.", + "type": "string" + }, + "validateOnly": { + "description": "If true, validate the operation, but do not actually relocate the bucket.", + "type": "boolean" + } + }, + "type": "object" + }, "RewriteResponse": { "description": "A rewrite response.", "id": "RewriteResponse", diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go index cfa73e2e38d..89f08a8d98b 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-gen.go +++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC. +// Copyright 2025 Google LLC. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -64,12 +64,14 @@ import ( "errors" "fmt" "io" + "log/slog" "net/http" "net/url" "strconv" "strings" "github.com/googleapis/gax-go/v2" + "github.com/googleapis/gax-go/v2/internallog" googleapi "google.golang.org/api/googleapi" internal "google.golang.org/api/internal" gensupport "google.golang.org/api/internal/gensupport" @@ -93,6 +95,8 @@ var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint var _ = internal.Version +var _ = internallog.New +var _ = gax.Version const apiId = "storage:v1" const apiName = "storage" @@ -133,11 +137,24 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) opts = append(opts, internaloption.WithDefaultEndpointTemplate(basePathTemplate)) opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) + opts = append(opts, internaloption.EnableNewAuthLibrary()) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err } - s, err := New(client) + s := &Service{client: client, BasePath: basePath, logger: internaloption.GetLogger(opts)} + s.AnywhereCaches = NewAnywhereCachesService(s) + s.BucketAccessControls = NewBucketAccessControlsService(s) + s.Buckets = NewBucketsService(s) + s.Channels = NewChannelsService(s) + s.DefaultObjectAccessControls = NewDefaultObjectAccessControlsService(s) + s.Folders = NewFoldersService(s) + s.ManagedFolders = NewManagedFoldersService(s) + s.Notifications = NewNotificationsService(s) + s.ObjectAccessControls = NewObjectAccessControlsService(s) + s.Objects = NewObjectsService(s) + s.Operations = NewOperationsService(s) + s.Projects = NewProjectsService(s) if err != nil { return nil, err } @@ -156,24 +173,12 @@ func New(client *http.Client) (*Service, error) { if client == nil { return nil, errors.New("client is nil") } - s := &Service{client: client, BasePath: basePath} - s.AnywhereCaches = NewAnywhereCachesService(s) - s.BucketAccessControls = NewBucketAccessControlsService(s) - s.Buckets = NewBucketsService(s) - s.Channels = NewChannelsService(s) - s.DefaultObjectAccessControls = NewDefaultObjectAccessControlsService(s) - s.Folders = NewFoldersService(s) - s.ManagedFolders = NewManagedFoldersService(s) - s.Notifications = NewNotificationsService(s) - s.ObjectAccessControls = NewObjectAccessControlsService(s) - s.Objects = NewObjectsService(s) - s.Operations = NewOperationsService(s) - s.Projects = NewProjectsService(s) - return s, nil + return NewService(context.Background(), option.WithHTTPClient(client)) } type Service struct { client *http.Client + logger *slog.Logger BasePath string // API endpoint base URL UserAgent string // optional additional User-Agent fragment @@ -341,6 +346,34 @@ type ProjectsServiceAccountService struct { s *Service } +// AdvanceRelocateBucketOperationRequest: An AdvanceRelocateBucketOperation +// request. +type AdvanceRelocateBucketOperationRequest struct { + // ExpireTime: Specifies the time when the relocation will revert to the sync + // stage if the relocation hasn't succeeded. + ExpireTime string `json:"expireTime,omitempty"` + // Ttl: Specifies the duration after which the relocation will revert to the + // sync stage if the relocation hasn't succeeded. Optional, if not supplied, a + // default value of 12h will be used. + Ttl string `json:"ttl,omitempty"` + // ForceSendFields is a list of field names (e.g. "ExpireTime") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "ExpireTime") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s AdvanceRelocateBucketOperationRequest) MarshalJSON() ([]byte, error) { + type NoMethod AdvanceRelocateBucketOperationRequest + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + // AnywhereCache: An Anywhere Cache instance. type AnywhereCache struct { // AdmissionPolicy: The cache-level entry admission policy. @@ -459,6 +492,10 @@ type Bucket struct { Encryption *BucketEncryption `json:"encryption,omitempty"` // Etag: HTTP 1.1 Entity tag for the bucket. Etag string `json:"etag,omitempty"` + // Generation: The generation of this bucket. + Generation int64 `json:"generation,omitempty,string"` + // HardDeleteTime: The hard delete time of the bucket in RFC 3339 format. + HardDeleteTime string `json:"hardDeleteTime,omitempty"` // HierarchicalNamespace: The bucket's hierarchical namespace configuration. HierarchicalNamespace *BucketHierarchicalNamespace `json:"hierarchicalNamespace,omitempty"` // IamConfiguration: The bucket's IAM configuration. @@ -466,16 +503,21 @@ type Bucket struct { // Id: The ID of the bucket. For buckets, the id and name properties are the // same. Id string `json:"id,omitempty"` + // IpFilter: The bucket's IP filter configuration. Specifies the network + // sources that are allowed to access the operations on the bucket, as well as + // its underlying objects. Only enforced when the mode is set to 'Enabled'. + IpFilter *BucketIpFilter `json:"ipFilter,omitempty"` // Kind: The kind of item this is. For buckets, this is always storage#bucket. Kind string `json:"kind,omitempty"` // Labels: User-provided labels, in key/value pairs. Labels map[string]string `json:"labels,omitempty"` - // Lifecycle: The bucket's lifecycle configuration. See lifecycle management - // for more information. + // Lifecycle: The bucket's lifecycle configuration. See Lifecycle Management + // (https://cloud.google.com/storage/docs/lifecycle) for more information. Lifecycle *BucketLifecycle `json:"lifecycle,omitempty"` // Location: The location of the bucket. Object data for objects in the bucket // resides in physical storage within this region. Defaults to US. See the - // developer's guide for the authoritative list. + // Developer's Guide (https://cloud.google.com/storage/docs/locations) for the + // authoritative list. Location string `json:"location,omitempty"` // LocationType: The type of the bucket location. LocationType string `json:"locationType,omitempty"` @@ -506,6 +548,8 @@ type Bucket struct { // Rpo: The Recovery Point Objective (RPO) of this bucket. Set to ASYNC_TURBO // to turn on Turbo Replication on a bucket. Rpo string `json:"rpo,omitempty"` + // SatisfiesPZI: Reserved for future use. + SatisfiesPZI bool `json:"satisfiesPZI,omitempty"` // SatisfiesPZS: Reserved for future use. SatisfiesPZS bool `json:"satisfiesPZS,omitempty"` // SelfLink: The URI of this bucket. @@ -514,13 +558,16 @@ type Bucket struct { // of time that soft-deleted objects will be retained, and cannot be // permanently deleted. SoftDeletePolicy *BucketSoftDeletePolicy `json:"softDeletePolicy,omitempty"` + // SoftDeleteTime: The soft delete time of the bucket in RFC 3339 format. + SoftDeleteTime string `json:"softDeleteTime,omitempty"` // StorageClass: The bucket's default storage class, used whenever no // storageClass is specified for a newly-created object. This defines how // objects in the bucket are stored and determines the SLA and the cost of // storage. Values include MULTI_REGIONAL, REGIONAL, STANDARD, NEARLINE, // COLDLINE, ARCHIVE, and DURABLE_REDUCED_AVAILABILITY. If this value is not // specified when the bucket is created, it will default to STANDARD. For more - // information, see storage classes. + // information, see Storage Classes + // (https://cloud.google.com/storage/docs/storage-classes). StorageClass string `json:"storageClass,omitempty"` // TimeCreated: The creation time of the bucket in RFC 3339 format. TimeCreated string `json:"timeCreated,omitempty"` @@ -530,7 +577,8 @@ type Bucket struct { Versioning *BucketVersioning `json:"versioning,omitempty"` // Website: The bucket's website configuration, controlling how the service // behaves when accessing bucket contents as a web site. See the Static Website - // Examples for more information. + // Examples (https://cloud.google.com/storage/docs/static-website) for more + // information. Website *BucketWebsite `json:"website,omitempty"` // ServerResponse contains the HTTP response code and headers from the server. @@ -803,8 +851,87 @@ func (s BucketIamConfigurationUniformBucketLevelAccess) MarshalJSON() ([]byte, e return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// BucketLifecycle: The bucket's lifecycle configuration. See lifecycle -// management for more information. +// BucketIpFilter: The bucket's IP filter configuration. Specifies the network +// sources that are allowed to access the operations on the bucket, as well as +// its underlying objects. Only enforced when the mode is set to 'Enabled'. +type BucketIpFilter struct { + // Mode: The mode of the IP filter. Valid values are 'Enabled' and 'Disabled'. + Mode string `json:"mode,omitempty"` + // PublicNetworkSource: The public network source of the bucket's IP filter. + PublicNetworkSource *BucketIpFilterPublicNetworkSource `json:"publicNetworkSource,omitempty"` + // VpcNetworkSources: The list of VPC network + // (https://cloud.google.com/vpc/docs/vpc) sources of the bucket's IP filter. + VpcNetworkSources []*BucketIpFilterVpcNetworkSources `json:"vpcNetworkSources,omitempty"` + // ForceSendFields is a list of field names (e.g. "Mode") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Mode") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BucketIpFilter) MarshalJSON() ([]byte, error) { + type NoMethod BucketIpFilter + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// BucketIpFilterPublicNetworkSource: The public network source of the bucket's +// IP filter. +type BucketIpFilterPublicNetworkSource struct { + // AllowedIpCidrRanges: The list of public IPv4, IPv6 cidr ranges that are + // allowed to access the bucket. + AllowedIpCidrRanges []string `json:"allowedIpCidrRanges,omitempty"` + // ForceSendFields is a list of field names (e.g. "AllowedIpCidrRanges") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "AllowedIpCidrRanges") to include + // in API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BucketIpFilterPublicNetworkSource) MarshalJSON() ([]byte, error) { + type NoMethod BucketIpFilterPublicNetworkSource + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +type BucketIpFilterVpcNetworkSources struct { + // AllowedIpCidrRanges: The list of IPv4, IPv6 cidr ranges subnetworks that are + // allowed to access the bucket. + AllowedIpCidrRanges []string `json:"allowedIpCidrRanges,omitempty"` + // Network: Name of the network. Format: + // projects/{PROJECT_ID}/global/networks/{NETWORK_NAME} + Network string `json:"network,omitempty"` + // ForceSendFields is a list of field names (e.g. "AllowedIpCidrRanges") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "AllowedIpCidrRanges") to include + // in API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BucketIpFilterVpcNetworkSources) MarshalJSON() ([]byte, error) { + type NoMethod BucketIpFilterVpcNetworkSources + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// BucketLifecycle: The bucket's lifecycle configuration. See Lifecycle +// Management (https://cloud.google.com/storage/docs/lifecycle) for more +// information. type BucketLifecycle struct { // Rule: A lifecycle management rule, which is made of an action to take and // the condition(s) under which the action will be taken. @@ -1116,7 +1243,8 @@ func (s BucketVersioning) MarshalJSON() ([]byte, error) { // BucketWebsite: The bucket's website configuration, controlling how the // service behaves when accessing bucket contents as a web site. See the Static -// Website Examples for more information. +// Website Examples (https://cloud.google.com/storage/docs/static-website) for +// more information. type BucketWebsite struct { // MainPageSuffix: If the requested object path is missing, the service will // ensure the path has a trailing '/', append this suffix, and attempt to @@ -2083,7 +2211,8 @@ type Object struct { ContentType string `json:"contentType,omitempty"` // Crc32c: CRC32c checksum, as described in RFC 4960, Appendix B; encoded using // base64 in big-endian byte order. For more information about using the CRC32c - // checksum, see Hashes and ETags: Best Practices. + // checksum, see Data Validation and Change Detection + // (https://cloud.google.com/storage/docs/data-validation). Crc32c string `json:"crc32c,omitempty"` // CustomTime: A timestamp in RFC 3339 format specified by the user for an // object. @@ -2121,7 +2250,8 @@ type Object struct { // request to fail with status code 400 - Bad Request. KmsKeyName string `json:"kmsKeyName,omitempty"` // Md5Hash: MD5 hash of the data; encoded using base64. For more information - // about using the MD5 hash, see Hashes and ETags: Best Practices. + // about using the MD5 hash, see Data Validation and Change Detection + // (https://cloud.google.com/storage/docs/data-validation). Md5Hash string `json:"md5Hash,omitempty"` // MediaLink: Media download link. MediaLink string `json:"mediaLink,omitempty"` @@ -2137,6 +2267,10 @@ type Object struct { // Owner: The owner of the object. This will always be the uploader of the // object. Owner *ObjectOwner `json:"owner,omitempty"` + // RestoreToken: Restore token used to differentiate deleted objects with the + // same name and generation. This field is only returned for deleted objects in + // hierarchical namespace buckets. + RestoreToken string `json:"restoreToken,omitempty"` // Retention: A collection of object level retention parameters. Retention *ObjectRetention `json:"retention,omitempty"` // RetentionExpirationTime: A server-determined value that specifies the @@ -2169,6 +2303,8 @@ type Object struct { // format. Will be returned if and only if this version of the object has been // deleted. TimeDeleted string `json:"timeDeleted,omitempty"` + // TimeFinalized: The time when the object was finalized. + TimeFinalized string `json:"timeFinalized,omitempty"` // TimeStorageClassUpdated: The time at which the object's storage class was // last changed. When the object is initially created, it will be set to // timeCreated. @@ -2540,6 +2676,59 @@ func (s PolicyBindings) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } +// RelocateBucketRequest: A Relocate Bucket request. +type RelocateBucketRequest struct { + // DestinationCustomPlacementConfig: The bucket's new custom placement + // configuration if relocating to a Custom Dual Region. + DestinationCustomPlacementConfig *RelocateBucketRequestDestinationCustomPlacementConfig `json:"destinationCustomPlacementConfig,omitempty"` + // DestinationLocation: The new location the bucket will be relocated to. + DestinationLocation string `json:"destinationLocation,omitempty"` + // ValidateOnly: If true, validate the operation, but do not actually relocate + // the bucket. + ValidateOnly bool `json:"validateOnly,omitempty"` + // ForceSendFields is a list of field names (e.g. + // "DestinationCustomPlacementConfig") to unconditionally include in API + // requests. By default, fields with empty or default values are omitted from + // API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. + // "DestinationCustomPlacementConfig") to include in API requests with the JSON + // null value. By default, fields with empty values are omitted from API + // requests. See https://pkg.go.dev/google.golang.org/api#hdr-NullFields for + // more details. + NullFields []string `json:"-"` +} + +func (s RelocateBucketRequest) MarshalJSON() ([]byte, error) { + type NoMethod RelocateBucketRequest + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + +// RelocateBucketRequestDestinationCustomPlacementConfig: The bucket's new +// custom placement configuration if relocating to a Custom Dual Region. +type RelocateBucketRequestDestinationCustomPlacementConfig struct { + // DataLocations: The list of regional locations in which data is placed. + DataLocations []string `json:"dataLocations,omitempty"` + // ForceSendFields is a list of field names (e.g. "DataLocations") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "DataLocations") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s RelocateBucketRequestDestinationCustomPlacementConfig) MarshalJSON() ([]byte, error) { + type NoMethod RelocateBucketRequestDestinationCustomPlacementConfig + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + // RewriteResponse: A rewrite response. type RewriteResponse struct { // Done: true if the copy is finished; otherwise, false if the copy is in @@ -2706,12 +2895,11 @@ func (c *AnywhereCachesDisableCall) Header() http.Header { func (c *AnywhereCachesDisableCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/anywhereCaches/{anywhereCacheId}/disable") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("POST", urls, nil) if err != nil { return nil, err } @@ -2720,6 +2908,7 @@ func (c *AnywhereCachesDisableCall) doRequest(alt string) (*http.Response, error "bucket": c.bucket, "anywhereCacheId": c.anywhereCacheId, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.anywhereCaches.disable", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -2754,9 +2943,11 @@ func (c *AnywhereCachesDisableCall) Do(opts ...googleapi.CallOption) (*AnywhereC }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.anywhereCaches.disable", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -2817,12 +3008,11 @@ func (c *AnywhereCachesGetCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/anywhereCaches/{anywhereCacheId}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -2831,6 +3021,7 @@ func (c *AnywhereCachesGetCall) doRequest(alt string) (*http.Response, error) { "bucket": c.bucket, "anywhereCacheId": c.anywhereCacheId, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.anywhereCaches.get", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -2865,9 +3056,11 @@ func (c *AnywhereCachesGetCall) Do(opts ...googleapi.CallOption) (*AnywhereCache }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.anywhereCaches.get", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -2915,8 +3108,7 @@ func (c *AnywhereCachesInsertCall) Header() http.Header { func (c *AnywhereCachesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.anywherecache) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.anywherecache) if err != nil { return nil, err } @@ -2932,6 +3124,7 @@ func (c *AnywhereCachesInsertCall) doRequest(alt string) (*http.Response, error) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.anywhereCaches.insert", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -2967,9 +3160,11 @@ func (c *AnywhereCachesInsertCall) Do(opts ...googleapi.CallOption) (*GoogleLong }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.anywhereCaches.insert", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -3042,12 +3237,11 @@ func (c *AnywhereCachesListCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/anywhereCaches") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -3055,6 +3249,7 @@ func (c *AnywhereCachesListCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.anywhereCaches.list", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -3089,9 +3284,11 @@ func (c *AnywhereCachesListCall) Do(opts ...googleapi.CallOption) (*AnywhereCach }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.anywhereCaches.list", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -3161,12 +3358,11 @@ func (c *AnywhereCachesPauseCall) Header() http.Header { func (c *AnywhereCachesPauseCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/anywhereCaches/{anywhereCacheId}/pause") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("POST", urls, nil) if err != nil { return nil, err } @@ -3175,6 +3371,7 @@ func (c *AnywhereCachesPauseCall) doRequest(alt string) (*http.Response, error) "bucket": c.bucket, "anywhereCacheId": c.anywhereCacheId, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.anywhereCaches.pause", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -3209,9 +3406,11 @@ func (c *AnywhereCachesPauseCall) Do(opts ...googleapi.CallOption) (*AnywhereCac }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.anywhereCaches.pause", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -3260,12 +3459,11 @@ func (c *AnywhereCachesResumeCall) Header() http.Header { func (c *AnywhereCachesResumeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/anywhereCaches/{anywhereCacheId}/resume") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("POST", urls, nil) if err != nil { return nil, err } @@ -3274,6 +3472,7 @@ func (c *AnywhereCachesResumeCall) doRequest(alt string) (*http.Response, error) "bucket": c.bucket, "anywhereCacheId": c.anywhereCacheId, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.anywhereCaches.resume", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -3308,9 +3507,11 @@ func (c *AnywhereCachesResumeCall) Do(opts ...googleapi.CallOption) (*AnywhereCa }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.anywhereCaches.resume", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -3362,8 +3563,7 @@ func (c *AnywhereCachesUpdateCall) Header() http.Header { func (c *AnywhereCachesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.anywherecache) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.anywherecache) if err != nil { return nil, err } @@ -3380,6 +3580,7 @@ func (c *AnywhereCachesUpdateCall) doRequest(alt string) (*http.Response, error) "bucket": c.bucket, "anywhereCacheId": c.anywhereCacheId, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.anywhereCaches.update", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -3415,9 +3616,11 @@ func (c *AnywhereCachesUpdateCall) Do(opts ...googleapi.CallOption) (*GoogleLong }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.anywhereCaches.update", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -3476,12 +3679,11 @@ func (c *BucketAccessControlsDeleteCall) Header() http.Header { func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("DELETE", urls, nil) if err != nil { return nil, err } @@ -3490,6 +3692,7 @@ func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, "bucket": c.bucket, "entity": c.entity, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.bucketAccessControls.delete", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -3504,6 +3707,7 @@ func (c *BucketAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error if err := googleapi.CheckResponse(res); err != nil { return gensupport.WrapError(err) } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.bucketAccessControls.delete", "response", internallog.HTTPResponse(res, nil)) return nil } @@ -3573,12 +3777,11 @@ func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, err if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -3587,6 +3790,7 @@ func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, err "bucket": c.bucket, "entity": c.entity, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.bucketAccessControls.get", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -3622,9 +3826,11 @@ func (c *BucketAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*BucketA }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.bucketAccessControls.get", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -3679,8 +3885,7 @@ func (c *BucketAccessControlsInsertCall) Header() http.Header { func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.bucketaccesscontrol) if err != nil { return nil, err } @@ -3696,6 +3901,7 @@ func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.bucketAccessControls.insert", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -3731,9 +3937,11 @@ func (c *BucketAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*Buck }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.bucketAccessControls.insert", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -3798,12 +4006,11 @@ func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, er if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -3811,6 +4018,7 @@ func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, er googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.bucketAccessControls.list", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -3846,9 +4054,11 @@ func (c *BucketAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Bucket }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.bucketAccessControls.list", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -3908,8 +4118,7 @@ func (c *BucketAccessControlsPatchCall) Header() http.Header { func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.bucketaccesscontrol) if err != nil { return nil, err } @@ -3926,6 +4135,7 @@ func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, e "bucket": c.bucket, "entity": c.entity, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.bucketAccessControls.patch", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -3961,9 +4171,11 @@ func (c *BucketAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*Bucke }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.bucketAccessControls.patch", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -4023,8 +4235,7 @@ func (c *BucketAccessControlsUpdateCall) Header() http.Header { func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.bucketaccesscontrol) if err != nil { return nil, err } @@ -4041,6 +4252,7 @@ func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, "bucket": c.bucket, "entity": c.entity, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.bucketAccessControls.update", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -4076,9 +4288,11 @@ func (c *BucketAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*Buck }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.bucketAccessControls.update", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -4090,7 +4304,8 @@ type BucketsDeleteCall struct { header_ http.Header } -// Delete: Permanently deletes an empty bucket. +// Delete: Deletes an empty bucket. Deletions are permanent unless soft delete +// is enabled on the bucket. // // - bucket: Name of a bucket. func (r *BucketsService) Delete(bucket string) *BucketsDeleteCall { @@ -4146,12 +4361,11 @@ func (c *BucketsDeleteCall) Header() http.Header { func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("DELETE", urls, nil) if err != nil { return nil, err } @@ -4159,6 +4373,7 @@ func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.delete", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -4173,6 +4388,7 @@ func (c *BucketsDeleteCall) Do(opts ...googleapi.CallOption) error { if err := googleapi.CheckResponse(res); err != nil { return gensupport.WrapError(err) } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.delete", "response", internallog.HTTPResponse(res, nil)) return nil } @@ -4194,6 +4410,13 @@ func (r *BucketsService) Get(bucket string) *BucketsGetCall { return c } +// Generation sets the optional parameter "generation": If present, specifies +// the generation of the bucket. This is required if softDeleted is true. +func (c *BucketsGetCall) Generation(generation int64) *BucketsGetCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + // IfMetagenerationMatch sets the optional parameter "ifMetagenerationMatch": // Makes the return of the bucket metadata conditional on whether the bucket's // current metageneration matches the given value. @@ -4223,6 +4446,15 @@ func (c *BucketsGetCall) Projection(projection string) *BucketsGetCall { return c } +// SoftDeleted sets the optional parameter "softDeleted": If true, return the +// soft-deleted version of this bucket. The default is false. For more +// information, see Soft Delete +// (https://cloud.google.com/storage/docs/soft-delete). +func (c *BucketsGetCall) SoftDeleted(softDeleted bool) *BucketsGetCall { + c.urlParams_.Set("softDeleted", fmt.Sprint(softDeleted)) + return c +} + // UserProject sets the optional parameter "userProject": The project to be // billed for this request. Required for Requester Pays buckets. func (c *BucketsGetCall) UserProject(userProject string) *BucketsGetCall { @@ -4266,12 +4498,11 @@ func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -4279,6 +4510,7 @@ func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.get", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -4313,9 +4545,11 @@ func (c *BucketsGetCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.get", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -4389,12 +4623,11 @@ func (c *BucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -4402,6 +4635,7 @@ func (c *BucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.getIamPolicy", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -4436,9 +4670,11 @@ func (c *BucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.getIamPolicy", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -4505,12 +4741,11 @@ func (c *BucketsGetStorageLayoutCall) doRequest(alt string) (*http.Response, err if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/storageLayout") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -4518,6 +4753,7 @@ func (c *BucketsGetStorageLayoutCall) doRequest(alt string) (*http.Response, err googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.getStorageLayout", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -4553,9 +4789,11 @@ func (c *BucketsGetStorageLayoutCall) Do(opts ...googleapi.CallOption) (*BucketS }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.getStorageLayout", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -4686,8 +4924,7 @@ func (c *BucketsInsertCall) Header() http.Header { func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.bucket) if err != nil { return nil, err } @@ -4700,6 +4937,7 @@ func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) { return nil, err } req.Header = reqHeaders + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.insert", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -4734,9 +4972,11 @@ func (c *BucketsInsertCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.insert", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -4791,6 +5031,15 @@ func (c *BucketsListCall) Projection(projection string) *BucketsListCall { return c } +// SoftDeleted sets the optional parameter "softDeleted": If true, only +// soft-deleted bucket versions will be returned. The default is false. For +// more information, see Soft Delete +// (https://cloud.google.com/storage/docs/soft-delete). +func (c *BucketsListCall) SoftDeleted(softDeleted bool) *BucketsListCall { + c.urlParams_.Set("softDeleted", fmt.Sprint(softDeleted)) + return c +} + // UserProject sets the optional parameter "userProject": The project to be // billed for this request. func (c *BucketsListCall) UserProject(userProject string) *BucketsListCall { @@ -4834,16 +5083,16 @@ func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } req.Header = reqHeaders + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.list", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -4878,9 +5127,11 @@ func (c *BucketsListCall) Do(opts ...googleapi.CallOption) (*Buckets, error) { }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.list", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -4957,12 +5208,11 @@ func (c *BucketsLockRetentionPolicyCall) Header() http.Header { func (c *BucketsLockRetentionPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/lockRetentionPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("POST", urls, nil) if err != nil { return nil, err } @@ -4970,6 +5220,7 @@ func (c *BucketsLockRetentionPolicyCall) doRequest(alt string) (*http.Response, googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.lockRetentionPolicy", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -5004,9 +5255,11 @@ func (c *BucketsLockRetentionPolicyCall) Do(opts ...googleapi.CallOption) (*Buck }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.lockRetentionPolicy", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -5148,8 +5401,7 @@ func (c *BucketsPatchCall) Header() http.Header { func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.bucket2) if err != nil { return nil, err } @@ -5165,6 +5417,7 @@ func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.patch", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -5199,9 +5452,234 @@ func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { + return nil, err + } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.patch", "response", internallog.HTTPResponse(res, b)) + return ret, nil +} + +type BucketsRelocateCall struct { + s *Service + bucket string + relocatebucketrequest *RelocateBucketRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Relocate: Initiates a long-running Relocate Bucket operation on the +// specified bucket. +// +// - bucket: Name of the bucket to be moved. +func (r *BucketsService) Relocate(bucket string, relocatebucketrequest *RelocateBucketRequest) *BucketsRelocateCall { + c := &BucketsRelocateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.relocatebucketrequest = relocatebucketrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *BucketsRelocateCall) Fields(s ...googleapi.Field) *BucketsRelocateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *BucketsRelocateCall) Context(ctx context.Context) *BucketsRelocateCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *BucketsRelocateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsRelocateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.relocatebucketrequest) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/relocate") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.relocate", "request", internallog.HTTPRequest(req, body.Bytes())) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.relocate" call. +// Any non-2xx status code is an error. Response headers are in either +// *GoogleLongrunningOperation.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *BucketsRelocateCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &GoogleLongrunningOperation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { + return nil, err + } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.relocate", "response", internallog.HTTPResponse(res, b)) + return ret, nil +} + +type BucketsRestoreCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Restore: Restores a soft-deleted bucket. +// +// - bucket: Name of a bucket. +// - generation: Generation of a bucket. +func (r *BucketsService) Restore(bucket string, generation int64) *BucketsRestoreCall { + c := &BucketsRestoreCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// Projection sets the optional parameter "projection": Set of properties to +// return. Defaults to full. +// +// Possible values: +// +// "full" - Include all properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. +func (c *BucketsRestoreCall) Projection(projection string) *BucketsRestoreCall { + c.urlParams_.Set("projection", projection) + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *BucketsRestoreCall) UserProject(userProject string) *BucketsRestoreCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *BucketsRestoreCall) Fields(s ...googleapi.Field) *BucketsRestoreCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *BucketsRestoreCall) Context(ctx context.Context) *BucketsRestoreCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *BucketsRestoreCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsRestoreCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/restore") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, nil) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.restore", "request", internallog.HTTPRequest(req, nil)) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.restore" call. +// Any non-2xx status code is an error. Response headers are in either +// *Bucket.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *BucketsRestoreCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Bucket{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.restore", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -5256,8 +5734,7 @@ func (c *BucketsSetIamPolicyCall) Header() http.Header { func (c *BucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.policy) if err != nil { return nil, err } @@ -5273,6 +5750,7 @@ func (c *BucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.setIamPolicy", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -5307,9 +5785,11 @@ func (c *BucketsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.setIamPolicy", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -5377,12 +5857,11 @@ func (c *BucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, e if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam/testPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -5390,6 +5869,7 @@ func (c *BucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, e googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.testIamPermissions", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -5425,9 +5905,11 @@ func (c *BucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.testIamPermissions", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -5569,8 +6051,7 @@ func (c *BucketsUpdateCall) Header() http.Header { func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.bucket2) if err != nil { return nil, err } @@ -5586,6 +6067,7 @@ func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.update", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -5620,9 +6102,11 @@ func (c *BucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.update", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -5666,8 +6150,7 @@ func (c *ChannelsStopCall) Header() http.Header { func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.channel) if err != nil { return nil, err } @@ -5680,6 +6163,7 @@ func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { return nil, err } req.Header = reqHeaders + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.channels.stop", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -5694,6 +6178,7 @@ func (c *ChannelsStopCall) Do(opts ...googleapi.CallOption) error { if err := googleapi.CheckResponse(res); err != nil { return gensupport.WrapError(err) } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.channels.stop", "response", internallog.HTTPResponse(res, nil)) return nil } @@ -5752,12 +6237,11 @@ func (c *DefaultObjectAccessControlsDeleteCall) Header() http.Header { func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("DELETE", urls, nil) if err != nil { return nil, err } @@ -5766,6 +6250,7 @@ func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Res "bucket": c.bucket, "entity": c.entity, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.defaultObjectAccessControls.delete", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -5780,6 +6265,7 @@ func (c *DefaultObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return gensupport.WrapError(err) } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.defaultObjectAccessControls.delete", "response", internallog.HTTPResponse(res, nil)) return nil } @@ -5850,12 +6336,11 @@ func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Respon if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -5864,6 +6349,7 @@ func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Respon "bucket": c.bucket, "entity": c.entity, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.defaultObjectAccessControls.get", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -5899,9 +6385,11 @@ func (c *DefaultObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (* }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.defaultObjectAccessControls.get", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -5956,8 +6444,7 @@ func (c *DefaultObjectAccessControlsInsertCall) Header() http.Header { func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.objectaccesscontrol) if err != nil { return nil, err } @@ -5973,6 +6460,7 @@ func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Res googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.defaultObjectAccessControls.insert", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -6008,9 +6496,11 @@ func (c *DefaultObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.defaultObjectAccessControls.insert", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -6091,12 +6581,11 @@ func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Respo if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -6104,6 +6593,7 @@ func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Respo googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.defaultObjectAccessControls.list", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -6139,9 +6629,11 @@ func (c *DefaultObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) ( }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.defaultObjectAccessControls.list", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -6201,8 +6693,7 @@ func (c *DefaultObjectAccessControlsPatchCall) Header() http.Header { func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.objectaccesscontrol) if err != nil { return nil, err } @@ -6219,6 +6710,7 @@ func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Resp "bucket": c.bucket, "entity": c.entity, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.defaultObjectAccessControls.patch", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -6254,9 +6746,11 @@ func (c *DefaultObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.defaultObjectAccessControls.patch", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -6316,8 +6810,7 @@ func (c *DefaultObjectAccessControlsUpdateCall) Header() http.Header { func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.objectaccesscontrol) if err != nil { return nil, err } @@ -6334,6 +6827,7 @@ func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Res "bucket": c.bucket, "entity": c.entity, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.defaultObjectAccessControls.update", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -6369,9 +6863,11 @@ func (c *DefaultObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.defaultObjectAccessControls.update", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -6436,12 +6932,11 @@ func (c *FoldersDeleteCall) Header() http.Header { func (c *FoldersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/folders/{folder}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("DELETE", urls, nil) if err != nil { return nil, err } @@ -6450,6 +6945,7 @@ func (c *FoldersDeleteCall) doRequest(alt string) (*http.Response, error) { "bucket": c.bucket, "folder": c.folder, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.folders.delete", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -6464,6 +6960,7 @@ func (c *FoldersDeleteCall) Do(opts ...googleapi.CallOption) error { if err := googleapi.CheckResponse(res); err != nil { return gensupport.WrapError(err) } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.folders.delete", "response", internallog.HTTPResponse(res, nil)) return nil } @@ -6542,12 +7039,11 @@ func (c *FoldersGetCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/folders/{folder}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -6556,6 +7052,7 @@ func (c *FoldersGetCall) doRequest(alt string) (*http.Response, error) { "bucket": c.bucket, "folder": c.folder, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.folders.get", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -6590,9 +7087,11 @@ func (c *FoldersGetCall) Do(opts ...googleapi.CallOption) (*Folder, error) { }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.folders.get", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -6648,8 +7147,7 @@ func (c *FoldersInsertCall) Header() http.Header { func (c *FoldersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.folder) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.folder) if err != nil { return nil, err } @@ -6665,6 +7163,7 @@ func (c *FoldersInsertCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.folders.insert", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -6699,9 +7198,11 @@ func (c *FoldersInsertCall) Do(opts ...googleapi.CallOption) (*Folder, error) { }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.folders.insert", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -6809,12 +7310,11 @@ func (c *FoldersListCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/folders") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -6822,6 +7322,7 @@ func (c *FoldersListCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.folders.list", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -6856,9 +7357,11 @@ func (c *FoldersListCall) Do(opts ...googleapi.CallOption) (*Folders, error) { }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.folders.list", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -6948,12 +7451,11 @@ func (c *FoldersRenameCall) Header() http.Header { func (c *FoldersRenameCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/folders/{sourceFolder}/renameTo/folders/{destinationFolder}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("POST", urls, nil) if err != nil { return nil, err } @@ -6963,6 +7465,7 @@ func (c *FoldersRenameCall) doRequest(alt string) (*http.Response, error) { "sourceFolder": c.sourceFolder, "destinationFolder": c.destinationFolder, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.folders.rename", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -6998,9 +7501,11 @@ func (c *FoldersRenameCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunning }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.folders.rename", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -7074,12 +7579,11 @@ func (c *ManagedFoldersDeleteCall) Header() http.Header { func (c *ManagedFoldersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders/{managedFolder}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("DELETE", urls, nil) if err != nil { return nil, err } @@ -7088,6 +7592,7 @@ func (c *ManagedFoldersDeleteCall) doRequest(alt string) (*http.Response, error) "bucket": c.bucket, "managedFolder": c.managedFolder, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.managedFolders.delete", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -7102,6 +7607,7 @@ func (c *ManagedFoldersDeleteCall) Do(opts ...googleapi.CallOption) error { if err := googleapi.CheckResponse(res); err != nil { return gensupport.WrapError(err) } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.managedFolders.delete", "response", internallog.HTTPResponse(res, nil)) return nil } @@ -7179,12 +7685,11 @@ func (c *ManagedFoldersGetCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders/{managedFolder}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -7193,6 +7698,7 @@ func (c *ManagedFoldersGetCall) doRequest(alt string) (*http.Response, error) { "bucket": c.bucket, "managedFolder": c.managedFolder, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.managedFolders.get", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -7227,9 +7733,11 @@ func (c *ManagedFoldersGetCall) Do(opts ...googleapi.CallOption) (*ManagedFolder }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.managedFolders.get", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -7306,12 +7814,11 @@ func (c *ManagedFoldersGetIamPolicyCall) doRequest(alt string) (*http.Response, if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders/{managedFolder}/iam") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -7320,6 +7827,7 @@ func (c *ManagedFoldersGetIamPolicyCall) doRequest(alt string) (*http.Response, "bucket": c.bucket, "managedFolder": c.managedFolder, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.managedFolders.getIamPolicy", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -7354,9 +7862,11 @@ func (c *ManagedFoldersGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Poli }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.managedFolders.getIamPolicy", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -7404,8 +7914,7 @@ func (c *ManagedFoldersInsertCall) Header() http.Header { func (c *ManagedFoldersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.managedfolder) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.managedfolder) if err != nil { return nil, err } @@ -7421,6 +7930,7 @@ func (c *ManagedFoldersInsertCall) doRequest(alt string) (*http.Response, error) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.managedFolders.insert", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -7455,9 +7965,11 @@ func (c *ManagedFoldersInsertCall) Do(opts ...googleapi.CallOption) (*ManagedFol }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.managedFolders.insert", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -7536,12 +8048,11 @@ func (c *ManagedFoldersListCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -7549,6 +8060,7 @@ func (c *ManagedFoldersListCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.managedFolders.list", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -7583,9 +8095,11 @@ func (c *ManagedFoldersListCall) Do(opts ...googleapi.CallOption) (*ManagedFolde }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.managedFolders.list", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -7664,8 +8178,7 @@ func (c *ManagedFoldersSetIamPolicyCall) Header() http.Header { func (c *ManagedFoldersSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.policy) if err != nil { return nil, err } @@ -7682,6 +8195,7 @@ func (c *ManagedFoldersSetIamPolicyCall) doRequest(alt string) (*http.Response, "bucket": c.bucket, "managedFolder": c.managedFolder, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.managedFolders.setIamPolicy", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -7716,9 +8230,11 @@ func (c *ManagedFoldersSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Poli }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.managedFolders.setIamPolicy", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -7789,12 +8305,11 @@ func (c *ManagedFoldersTestIamPermissionsCall) doRequest(alt string) (*http.Resp if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders/{managedFolder}/iam/testPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -7803,6 +8318,7 @@ func (c *ManagedFoldersTestIamPermissionsCall) doRequest(alt string) (*http.Resp "bucket": c.bucket, "managedFolder": c.managedFolder, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.managedFolders.testIamPermissions", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -7838,9 +8354,11 @@ func (c *ManagedFoldersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.managedFolders.testIamPermissions", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -7896,12 +8414,11 @@ func (c *NotificationsDeleteCall) Header() http.Header { func (c *NotificationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs/{notification}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("DELETE", urls, nil) if err != nil { return nil, err } @@ -7910,6 +8427,7 @@ func (c *NotificationsDeleteCall) doRequest(alt string) (*http.Response, error) "bucket": c.bucket, "notification": c.notification, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.notifications.delete", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -7924,6 +8442,7 @@ func (c *NotificationsDeleteCall) Do(opts ...googleapi.CallOption) error { if err := googleapi.CheckResponse(res); err != nil { return gensupport.WrapError(err) } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.notifications.delete", "response", internallog.HTTPResponse(res, nil)) return nil } @@ -7991,12 +8510,11 @@ func (c *NotificationsGetCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs/{notification}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -8005,6 +8523,7 @@ func (c *NotificationsGetCall) doRequest(alt string) (*http.Response, error) { "bucket": c.bucket, "notification": c.notification, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.notifications.get", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -8039,9 +8558,11 @@ func (c *NotificationsGetCall) Do(opts ...googleapi.CallOption) (*Notification, }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.notifications.get", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -8096,8 +8617,7 @@ func (c *NotificationsInsertCall) Header() http.Header { func (c *NotificationsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.notification) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.notification) if err != nil { return nil, err } @@ -8113,6 +8633,7 @@ func (c *NotificationsInsertCall) doRequest(alt string) (*http.Response, error) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.notifications.insert", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -8147,9 +8668,11 @@ func (c *NotificationsInsertCall) Do(opts ...googleapi.CallOption) (*Notificatio }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.notifications.insert", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -8214,12 +8737,11 @@ func (c *NotificationsListCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -8227,6 +8749,7 @@ func (c *NotificationsListCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.notifications.list", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -8261,9 +8784,11 @@ func (c *NotificationsListCall) Do(opts ...googleapi.CallOption) (*Notifications }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.notifications.list", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -8335,12 +8860,11 @@ func (c *ObjectAccessControlsDeleteCall) Header() http.Header { func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("DELETE", urls, nil) if err != nil { return nil, err } @@ -8350,6 +8874,7 @@ func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, "object": c.object, "entity": c.entity, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objectAccessControls.delete", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -8364,6 +8889,7 @@ func (c *ObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error if err := googleapi.CheckResponse(res); err != nil { return gensupport.WrapError(err) } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objectAccessControls.delete", "response", internallog.HTTPResponse(res, nil)) return nil } @@ -8446,12 +8972,11 @@ func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, err if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -8461,6 +8986,7 @@ func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, err "object": c.object, "entity": c.entity, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objectAccessControls.get", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -8496,9 +9022,11 @@ func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectA }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objectAccessControls.get", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -8566,8 +9094,7 @@ func (c *ObjectAccessControlsInsertCall) Header() http.Header { func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.objectaccesscontrol) if err != nil { return nil, err } @@ -8584,6 +9111,7 @@ func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, "bucket": c.bucket, "object": c.object, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objectAccessControls.insert", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -8619,9 +9147,11 @@ func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*Obje }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objectAccessControls.insert", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -8699,12 +9229,11 @@ func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, er if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -8713,6 +9242,7 @@ func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, er "bucket": c.bucket, "object": c.object, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objectAccessControls.list", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -8748,9 +9278,11 @@ func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Object }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objectAccessControls.list", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -8823,8 +9355,7 @@ func (c *ObjectAccessControlsPatchCall) Header() http.Header { func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.objectaccesscontrol) if err != nil { return nil, err } @@ -8842,6 +9373,7 @@ func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, e "object": c.object, "entity": c.entity, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objectAccessControls.patch", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -8877,9 +9409,11 @@ func (c *ObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*Objec }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objectAccessControls.patch", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -8952,8 +9486,7 @@ func (c *ObjectAccessControlsUpdateCall) Header() http.Header { func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.objectaccesscontrol) if err != nil { return nil, err } @@ -8971,6 +9504,7 @@ func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, "object": c.object, "entity": c.entity, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objectAccessControls.update", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -9006,9 +9540,11 @@ func (c *ObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*Obje }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objectAccessControls.update", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -9057,8 +9593,7 @@ func (c *ObjectsBulkRestoreCall) Header() http.Header { func (c *ObjectsBulkRestoreCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bulkrestoreobjectsrequest) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.bulkrestoreobjectsrequest) if err != nil { return nil, err } @@ -9074,6 +9609,7 @@ func (c *ObjectsBulkRestoreCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.bulkRestore", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -9109,9 +9645,11 @@ func (c *ObjectsBulkRestoreCall) Do(opts ...googleapi.CallOption) (*GoogleLongru }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.bulkRestore", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -9231,8 +9769,7 @@ func (c *ObjectsComposeCall) Header() http.Header { func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.composerequest) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.composerequest) if err != nil { return nil, err } @@ -9249,6 +9786,7 @@ func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) { "destinationBucket": c.destinationBucket, "destinationObject": c.destinationObject, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.compose", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -9283,9 +9821,11 @@ func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) { }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.compose", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -9487,8 +10027,7 @@ func (c *ObjectsCopyCall) Header() http.Header { func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.object) if err != nil { return nil, err } @@ -9507,6 +10046,7 @@ func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) { "destinationBucket": c.destinationBucket, "destinationObject": c.destinationObject, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.copy", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -9541,9 +10081,11 @@ func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) { }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.copy", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -9646,12 +10188,11 @@ func (c *ObjectsDeleteCall) Header() http.Header { func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("DELETE", urls, nil) if err != nil { return nil, err } @@ -9660,6 +10201,7 @@ func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) { "bucket": c.bucket, "object": c.object, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.delete", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -9674,6 +10216,7 @@ func (c *ObjectsDeleteCall) Do(opts ...googleapi.CallOption) error { if err := googleapi.CheckResponse(res); err != nil { return gensupport.WrapError(err) } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.delete", "response", internallog.HTTPResponse(res, nil)) return nil } @@ -9755,9 +10298,21 @@ func (c *ObjectsGetCall) Projection(projection string) *ObjectsGetCall { return c } +// RestoreToken sets the optional parameter "restoreToken": Restore token used +// to differentiate soft-deleted objects with the same name and generation. +// Only applicable for hierarchical namespace buckets and if softDeleted is set +// to true. This parameter is optional, and is only required in the rare case +// when there are multiple soft-deleted objects with the same name and +// generation. +func (c *ObjectsGetCall) RestoreToken(restoreToken string) *ObjectsGetCall { + c.urlParams_.Set("restoreToken", restoreToken) + return c +} + // SoftDeleted sets the optional parameter "softDeleted": If true, only // soft-deleted object versions will be listed. The default is false. For more -// information, see Soft Delete. +// information, see Soft Delete +// (https://cloud.google.com/storage/docs/soft-delete). func (c *ObjectsGetCall) SoftDeleted(softDeleted bool) *ObjectsGetCall { c.urlParams_.Set("softDeleted", fmt.Sprint(softDeleted)) return c @@ -9806,12 +10361,11 @@ func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -9820,6 +10374,7 @@ func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) { "bucket": c.bucket, "object": c.object, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.get", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -9870,9 +10425,11 @@ func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) { }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.get", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -9950,12 +10507,11 @@ func (c *ObjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -9964,6 +10520,7 @@ func (c *ObjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) "bucket": c.bucket, "object": c.object, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.getIamPolicy", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -9998,9 +10555,11 @@ func (c *ObjectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.getIamPolicy", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -10232,8 +10791,7 @@ func (c *ObjectsInsertCall) Header() http.Header { func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.object) if err != nil { return nil, err } @@ -10244,14 +10802,10 @@ func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { urls = googleapi.ResolveRelative(c.s.BasePath, "/upload/storage/v1/b/{bucket}/o") c.urlParams_.Set("uploadType", c.mediaInfo_.UploadType()) } - if body == nil { - body = new(bytes.Buffer) - reqHeaders.Set("Content-Type", "application/json") - } - body, getBody, cleanup := c.mediaInfo_.UploadRequest(reqHeaders, body) + newBody, getBody, cleanup := c.mediaInfo_.UploadRequest(reqHeaders, body) defer cleanup() urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("POST", urls, newBody) if err != nil { return nil, err } @@ -10260,6 +10814,7 @@ func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.insert", "request", internallog.HTTPRequest(req, body.Bytes())) if c.retry != nil { return gensupport.SendRequestWithRetry(c.ctx_, c.s.client, req, c.retry) } @@ -10315,9 +10870,11 @@ func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) { }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.insert", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -10420,7 +10977,8 @@ func (c *ObjectsListCall) Projection(projection string) *ObjectsListCall { // SoftDeleted sets the optional parameter "softDeleted": If true, only // soft-deleted object versions will be listed. The default is false. For more -// information, see Soft Delete. +// information, see Soft Delete +// (https://cloud.google.com/storage/docs/soft-delete). func (c *ObjectsListCall) SoftDeleted(softDeleted bool) *ObjectsListCall { c.urlParams_.Set("softDeleted", fmt.Sprint(softDeleted)) return c @@ -10444,7 +11002,8 @@ func (c *ObjectsListCall) UserProject(userProject string) *ObjectsListCall { // Versions sets the optional parameter "versions": If true, lists all versions // of an object as distinct results. The default is false. For more -// information, see Object Versioning. +// information, see Object Versioning +// (https://cloud.google.com/storage/docs/object-versioning). func (c *ObjectsListCall) Versions(versions bool) *ObjectsListCall { c.urlParams_.Set("versions", fmt.Sprint(versions)) return c @@ -10486,12 +11045,11 @@ func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -10499,6 +11057,7 @@ func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.list", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -10533,9 +11092,11 @@ func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) { }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.list", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -10560,6 +11121,207 @@ func (c *ObjectsListCall) Pages(ctx context.Context, f func(*Objects) error) err } } +type ObjectsMoveCall struct { + s *Service + bucket string + sourceObject string + destinationObject string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Move: Moves the source object to the destination object in the same bucket. +// +// - bucket: Name of the bucket in which the object resides. +// - destinationObject: Name of the destination object. For information about +// how to URL encode object names to be path safe, see Encoding URI Path +// Parts (https://cloud.google.com/storage/docs/request-endpoints#encoding). +// - sourceObject: Name of the source object. For information about how to URL +// encode object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +func (r *ObjectsService) Move(bucket string, sourceObject string, destinationObject string) *ObjectsMoveCall { + c := &ObjectsMoveCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.sourceObject = sourceObject + c.destinationObject = destinationObject + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": Makes the +// operation conditional on whether the destination object's current generation +// matches the given value. Setting to 0 makes the operation succeed only if +// there are no live versions of the object. `ifGenerationMatch` and +// `ifGenerationNotMatch` conditions are mutually exclusive: it's an error for +// both of them to be set in the request. +func (c *ObjectsMoveCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsMoveCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter "ifGenerationNotMatch": +// Makes the operation conditional on whether the destination object's current +// generation does not match the given value. If no live object exists, the +// precondition fails. Setting to 0 makes the operation succeed only if there +// is a live version of the object.`ifGenerationMatch` and +// `ifGenerationNotMatch` conditions are mutually exclusive: it's an error for +// both of them to be set in the request. +func (c *ObjectsMoveCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsMoveCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter "ifMetagenerationMatch": +// Makes the operation conditional on whether the destination object's current +// metageneration matches the given value. `ifMetagenerationMatch` and +// `ifMetagenerationNotMatch` conditions are mutually exclusive: it's an error +// for both of them to be set in the request. +func (c *ObjectsMoveCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsMoveCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on whether the +// destination object's current metageneration does not match the given value. +// `ifMetagenerationMatch` and `ifMetagenerationNotMatch` conditions are +// mutually exclusive: it's an error for both of them to be set in the request. +func (c *ObjectsMoveCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsMoveCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// IfSourceGenerationMatch sets the optional parameter +// "ifSourceGenerationMatch": Makes the operation conditional on whether the +// source object's current generation matches the given value. +// `ifSourceGenerationMatch` and `ifSourceGenerationNotMatch` conditions are +// mutually exclusive: it's an error for both of them to be set in the request. +func (c *ObjectsMoveCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) *ObjectsMoveCall { + c.urlParams_.Set("ifSourceGenerationMatch", fmt.Sprint(ifSourceGenerationMatch)) + return c +} + +// IfSourceGenerationNotMatch sets the optional parameter +// "ifSourceGenerationNotMatch": Makes the operation conditional on whether the +// source object's current generation does not match the given value. +// `ifSourceGenerationMatch` and `ifSourceGenerationNotMatch` conditions are +// mutually exclusive: it's an error for both of them to be set in the request. +func (c *ObjectsMoveCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch int64) *ObjectsMoveCall { + c.urlParams_.Set("ifSourceGenerationNotMatch", fmt.Sprint(ifSourceGenerationNotMatch)) + return c +} + +// IfSourceMetagenerationMatch sets the optional parameter +// "ifSourceMetagenerationMatch": Makes the operation conditional on whether +// the source object's current metageneration matches the given value. +// `ifSourceMetagenerationMatch` and `ifSourceMetagenerationNotMatch` +// conditions are mutually exclusive: it's an error for both of them to be set +// in the request. +func (c *ObjectsMoveCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *ObjectsMoveCall { + c.urlParams_.Set("ifSourceMetagenerationMatch", fmt.Sprint(ifSourceMetagenerationMatch)) + return c +} + +// IfSourceMetagenerationNotMatch sets the optional parameter +// "ifSourceMetagenerationNotMatch": Makes the operation conditional on whether +// the source object's current metageneration does not match the given value. +// `ifSourceMetagenerationMatch` and `ifSourceMetagenerationNotMatch` +// conditions are mutually exclusive: it's an error for both of them to be set +// in the request. +func (c *ObjectsMoveCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *ObjectsMoveCall { + c.urlParams_.Set("ifSourceMetagenerationNotMatch", fmt.Sprint(ifSourceMetagenerationNotMatch)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *ObjectsMoveCall) UserProject(userProject string) *ObjectsMoveCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ObjectsMoveCall) Fields(s ...googleapi.Field) *ObjectsMoveCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ObjectsMoveCall) Context(ctx context.Context) *ObjectsMoveCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ObjectsMoveCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsMoveCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{sourceObject}/moveTo/o/{destinationObject}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, nil) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "sourceObject": c.sourceObject, + "destinationObject": c.destinationObject, + }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.move", "request", internallog.HTTPRequest(req, nil)) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.move" call. +// Any non-2xx status code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ObjectsMoveCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { + return nil, err + } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.move", "response", internallog.HTTPResponse(res, b)) + return ret, nil +} + type ObjectsPatchCall struct { s *Service bucket string @@ -10710,8 +11472,7 @@ func (c *ObjectsPatchCall) Header() http.Header { func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.object2) if err != nil { return nil, err } @@ -10728,6 +11489,7 @@ func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) { "bucket": c.bucket, "object": c.object, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.patch", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -10762,9 +11524,11 @@ func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) { }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.patch", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -10782,7 +11546,8 @@ type ObjectsRestoreCall struct { // - bucket: Name of the bucket in which the object resides. // - generation: Selects a specific revision of this object. // - object: Name of the object. For information about how to URL encode object -// names to be path safe, see Encoding URI Path Parts. +// names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (r *ObjectsService) Restore(bucket string, object string, generation int64) *ObjectsRestoreCall { c := &ObjectsRestoreCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -10846,6 +11611,16 @@ func (c *ObjectsRestoreCall) Projection(projection string) *ObjectsRestoreCall { return c } +// RestoreToken sets the optional parameter "restoreToken": Restore token used +// to differentiate sof-deleted objects with the same name and generation. Only +// applicable for hierarchical namespace buckets. This parameter is optional, +// and is only required in the rare case when there are multiple soft-deleted +// objects with the same name and generation. +func (c *ObjectsRestoreCall) RestoreToken(restoreToken string) *ObjectsRestoreCall { + c.urlParams_.Set("restoreToken", restoreToken) + return c +} + // UserProject sets the optional parameter "userProject": The project to be // billed for this request. Required for Requester Pays buckets. func (c *ObjectsRestoreCall) UserProject(userProject string) *ObjectsRestoreCall { @@ -10878,12 +11653,11 @@ func (c *ObjectsRestoreCall) Header() http.Header { func (c *ObjectsRestoreCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/restore") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("POST", urls, nil) if err != nil { return nil, err } @@ -10892,6 +11666,7 @@ func (c *ObjectsRestoreCall) doRequest(alt string) (*http.Response, error) { "bucket": c.bucket, "object": c.object, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.restore", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -10926,9 +11701,11 @@ func (c *ObjectsRestoreCall) Do(opts ...googleapi.CallOption) (*Object, error) { }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.restore", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -11152,8 +11929,7 @@ func (c *ObjectsRewriteCall) Header() http.Header { func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.object) if err != nil { return nil, err } @@ -11172,6 +11948,7 @@ func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) { "destinationBucket": c.destinationBucket, "destinationObject": c.destinationObject, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.rewrite", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -11207,9 +11984,11 @@ func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse, }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.rewrite", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -11277,8 +12056,7 @@ func (c *ObjectsSetIamPolicyCall) Header() http.Header { func (c *ObjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.policy) if err != nil { return nil, err } @@ -11295,6 +12073,7 @@ func (c *ObjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) "bucket": c.bucket, "object": c.object, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.setIamPolicy", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -11329,9 +12108,11 @@ func (c *ObjectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.setIamPolicy", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -11412,12 +12193,11 @@ func (c *ObjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, e if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam/testPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -11426,6 +12206,7 @@ func (c *ObjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, e "bucket": c.bucket, "object": c.object, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.testIamPermissions", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -11461,9 +12242,11 @@ func (c *ObjectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.testIamPermissions", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -11617,8 +12400,7 @@ func (c *ObjectsUpdateCall) Header() http.Header { func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.object2) if err != nil { return nil, err } @@ -11635,6 +12417,7 @@ func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) { "bucket": c.bucket, "object": c.object, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.update", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -11669,9 +12452,11 @@ func (c *ObjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Object, error) { }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.update", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -11775,7 +12560,8 @@ func (c *ObjectsWatchAllCall) UserProject(userProject string) *ObjectsWatchAllCa // Versions sets the optional parameter "versions": If true, lists all versions // of an object as distinct results. The default is false. For more -// information, see Object Versioning. +// information, see Object Versioning +// (https://cloud.google.com/storage/docs/object-versioning). func (c *ObjectsWatchAllCall) Versions(versions bool) *ObjectsWatchAllCall { c.urlParams_.Set("versions", fmt.Sprint(versions)) return c @@ -11806,8 +12592,7 @@ func (c *ObjectsWatchAllCall) Header() http.Header { func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.channel) if err != nil { return nil, err } @@ -11823,6 +12608,7 @@ func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.watchAll", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -11857,12 +12643,101 @@ func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.watchAll", "response", internallog.HTTPResponse(res, b)) return ret, nil } +type OperationsAdvanceRelocateBucketCall struct { + s *Service + bucket string + operationId string + advancerelocatebucketoperationrequest *AdvanceRelocateBucketOperationRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// AdvanceRelocateBucket: Starts asynchronous advancement of the relocate +// bucket operation in the case of required write downtime, to allow it to lock +// the bucket at the source location, and proceed with the bucket location +// swap. The server makes a best effort to advance the relocate bucket +// operation, but success is not guaranteed. +// +// - bucket: Name of the bucket to advance the relocate for. +// - operationId: ID of the operation resource. +func (r *OperationsService) AdvanceRelocateBucket(bucket string, operationId string, advancerelocatebucketoperationrequest *AdvanceRelocateBucketOperationRequest) *OperationsAdvanceRelocateBucketCall { + c := &OperationsAdvanceRelocateBucketCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.operationId = operationId + c.advancerelocatebucketoperationrequest = advancerelocatebucketoperationrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *OperationsAdvanceRelocateBucketCall) Fields(s ...googleapi.Field) *OperationsAdvanceRelocateBucketCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *OperationsAdvanceRelocateBucketCall) Context(ctx context.Context) *OperationsAdvanceRelocateBucketCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *OperationsAdvanceRelocateBucketCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OperationsAdvanceRelocateBucketCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.advancerelocatebucketoperationrequest) + if err != nil { + return nil, err + } + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/operations/{operationId}/advanceRelocateBucket") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "operationId": c.operationId, + }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.operations.advanceRelocateBucket", "request", internallog.HTTPRequest(req, body.Bytes())) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.operations.advanceRelocateBucket" call. +func (c *OperationsAdvanceRelocateBucketCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.operations.advanceRelocateBucket", "response", internallog.HTTPResponse(res, nil)) + return nil +} + type OperationsCancelCall struct { s *Service bucket string @@ -11910,12 +12785,11 @@ func (c *OperationsCancelCall) Header() http.Header { func (c *OperationsCancelCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/operations/{operationId}/cancel") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("POST", urls, nil) if err != nil { return nil, err } @@ -11924,6 +12798,7 @@ func (c *OperationsCancelCall) doRequest(alt string) (*http.Response, error) { "bucket": c.bucket, "operationId": c.operationId, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.operations.cancel", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -11938,6 +12813,7 @@ func (c *OperationsCancelCall) Do(opts ...googleapi.CallOption) error { if err := googleapi.CheckResponse(res); err != nil { return gensupport.WrapError(err) } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.operations.cancel", "response", internallog.HTTPResponse(res, nil)) return nil } @@ -11998,12 +12874,11 @@ func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/operations/{operationId}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -12012,6 +12887,7 @@ func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) { "bucket": c.bucket, "operationId": c.operationId, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.operations.get", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -12047,9 +12923,11 @@ func (c *OperationsGetCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunning }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.operations.get", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -12131,12 +13009,11 @@ func (c *OperationsListCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/operations") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -12144,6 +13021,7 @@ func (c *OperationsListCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.operations.list", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -12179,9 +13057,11 @@ func (c *OperationsListCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunnin }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.operations.list", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -12257,12 +13137,11 @@ func (c *ProjectsHmacKeysCreateCall) Header() http.Header { func (c *ProjectsHmacKeysCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/hmacKeys") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("POST", urls, nil) if err != nil { return nil, err } @@ -12270,6 +13149,7 @@ func (c *ProjectsHmacKeysCreateCall) doRequest(alt string) (*http.Response, erro googleapi.Expand(req.URL, map[string]string{ "projectId": c.projectId, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.projects.hmacKeys.create", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -12304,9 +13184,11 @@ func (c *ProjectsHmacKeysCreateCall) Do(opts ...googleapi.CallOption) (*HmacKey, }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.projects.hmacKeys.create", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -12362,12 +13244,11 @@ func (c *ProjectsHmacKeysDeleteCall) Header() http.Header { func (c *ProjectsHmacKeysDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/hmacKeys/{accessId}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("DELETE", urls, nil) if err != nil { return nil, err } @@ -12376,6 +13257,7 @@ func (c *ProjectsHmacKeysDeleteCall) doRequest(alt string) (*http.Response, erro "projectId": c.projectId, "accessId": c.accessId, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.projects.hmacKeys.delete", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -12390,6 +13272,7 @@ func (c *ProjectsHmacKeysDeleteCall) Do(opts ...googleapi.CallOption) error { if err := googleapi.CheckResponse(res); err != nil { return gensupport.WrapError(err) } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.projects.hmacKeys.delete", "response", internallog.HTTPResponse(res, nil)) return nil } @@ -12457,12 +13340,11 @@ func (c *ProjectsHmacKeysGetCall) doRequest(alt string) (*http.Response, error) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/hmacKeys/{accessId}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -12471,6 +13353,7 @@ func (c *ProjectsHmacKeysGetCall) doRequest(alt string) (*http.Response, error) "projectId": c.projectId, "accessId": c.accessId, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.projects.hmacKeys.get", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -12506,9 +13389,11 @@ func (c *ProjectsHmacKeysGetCall) Do(opts ...googleapi.CallOption) (*HmacKeyMeta }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.projects.hmacKeys.get", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -12605,12 +13490,11 @@ func (c *ProjectsHmacKeysListCall) doRequest(alt string) (*http.Response, error) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/hmacKeys") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -12618,6 +13502,7 @@ func (c *ProjectsHmacKeysListCall) doRequest(alt string) (*http.Response, error) googleapi.Expand(req.URL, map[string]string{ "projectId": c.projectId, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.projects.hmacKeys.list", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -12653,9 +13538,11 @@ func (c *ProjectsHmacKeysListCall) Do(opts ...googleapi.CallOption) (*HmacKeysMe }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.projects.hmacKeys.list", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -12691,7 +13578,9 @@ type ProjectsHmacKeysUpdateCall struct { } // Update: Updates the state of an HMAC key. See the HMAC Key resource -// descriptor for valid states. +// descriptor +// (https://cloud.google.com/storage/docs/json_api/v1/projects/hmacKeys/update#request-body) +// for valid states. // // - accessId: Name of the HMAC key being updated. // - projectId: Project ID owning the service account of the updated key. @@ -12735,8 +13624,7 @@ func (c *ProjectsHmacKeysUpdateCall) Header() http.Header { func (c *ProjectsHmacKeysUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.hmackeymetadata) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.hmackeymetadata) if err != nil { return nil, err } @@ -12753,6 +13641,7 @@ func (c *ProjectsHmacKeysUpdateCall) doRequest(alt string) (*http.Response, erro "projectId": c.projectId, "accessId": c.accessId, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.projects.hmacKeys.update", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -12788,9 +13677,11 @@ func (c *ProjectsHmacKeysUpdateCall) Do(opts ...googleapi.CallOption) (*HmacKeyM }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.projects.hmacKeys.update", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -12856,12 +13747,11 @@ func (c *ProjectsServiceAccountGetCall) doRequest(alt string) (*http.Response, e if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/serviceAccount") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -12869,6 +13759,7 @@ func (c *ProjectsServiceAccountGetCall) doRequest(alt string) (*http.Response, e googleapi.Expand(req.URL, map[string]string{ "projectId": c.projectId, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.projects.serviceAccount.get", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -12903,8 +13794,10 @@ func (c *ProjectsServiceAccountGetCall) Do(opts ...googleapi.CallOption) (*Servi }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.projects.serviceAccount.get", "response", internallog.HTTPResponse(res, b)) return ret, nil } diff --git a/vendor/google.golang.org/api/transport/grpc/dial.go b/vendor/google.golang.org/api/transport/grpc/dial.go index 12ebb0a11e7..a354d223d31 100644 --- a/vendor/google.golang.org/api/transport/grpc/dial.go +++ b/vendor/google.golang.org/api/transport/grpc/dial.go @@ -22,7 +22,6 @@ import ( "cloud.google.com/go/auth/grpctransport" "cloud.google.com/go/auth/oauth2adapt" "cloud.google.com/go/compute/metadata" - "go.opencensus.io/plugin/ocgrpc" "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" "golang.org/x/oauth2" "golang.org/x/time/rate" @@ -53,6 +52,9 @@ var logRateLimiter = rate.Sometimes{Interval: 1 * time.Second} // Assign to var for unit test replacement var dialContext = grpc.DialContext +// Assign to var for unit test replacement +var dialContextNewAuth = grpctransport.Dial + // otelStatsHandler is a singleton otelgrpc.clientHandler to be used across // all dial connections to avoid the memory leak documented in // https://github.com/open-telemetry/opentelemetry-go-contrib/issues/4226 @@ -218,27 +220,22 @@ func dialPoolNewAuth(ctx context.Context, secure bool, poolSize int, ds *interna defaultEndpointTemplate = ds.DefaultEndpoint } - tokenURL, oauth2Client, err := internal.GetOAuth2Configuration(ctx, ds) - if err != nil { - return nil, err - } - - pool, err := grpctransport.Dial(ctx, secure, &grpctransport.Options{ + pool, err := dialContextNewAuth(ctx, secure, &grpctransport.Options{ DisableTelemetry: ds.TelemetryDisabled, DisableAuthentication: ds.NoAuth, Endpoint: ds.Endpoint, Metadata: metadata, - GRPCDialOpts: ds.GRPCDialOpts, + GRPCDialOpts: prepareDialOptsNewAuth(ds), PoolSize: poolSize, Credentials: creds, + ClientCertProvider: ds.ClientCertSource, APIKey: ds.APIKey, DetectOpts: &credentials.DetectOptions{ Scopes: ds.Scopes, Audience: aud, CredentialsFile: ds.CredentialsFile, CredentialsJSON: ds.CredentialsJSON, - TokenURL: tokenURL, - Client: oauth2Client, + Logger: ds.Logger, }, InternalOptions: &grpctransport.InternalOptions{ EnableNonDefaultSAForDirectPath: ds.AllowNonDefaultServiceAccount, @@ -251,10 +248,55 @@ func dialPoolNewAuth(ctx context.Context, secure bool, poolSize int, ds *interna DefaultScopes: ds.DefaultScopes, SkipValidation: skipValidation, }, + UniverseDomain: ds.UniverseDomain, + Logger: ds.Logger, }) return pool, err } +func prepareDialOptsNewAuth(ds *internal.DialSettings) []grpc.DialOption { + var opts []grpc.DialOption + if ds.UserAgent != "" { + opts = append(opts, grpc.WithUserAgent(ds.UserAgent)) + } + + return append(opts, ds.GRPCDialOpts...) +} + +// dryRunAsync is a wrapper for oauth2.TokenSource that performs a sync refresh +// after an async refresh. Token generated by async refresh is not used. +// +// This is an EXPERIMENTAL feature and may be removed or changed in the future. +// It is a temporary struct to determine if the async refresh +// is working properly. +// TODO(b/372244283): Remove after b/358175516 has been fixed +type dryRunAsync struct { + asyncTokenSource oauth2.TokenSource + syncTokenSource oauth2.TokenSource + errHandler func() +} + +// TODO(b/372244283): Remove after b/358175516 has been fixed +func newDryRunAsync(ts oauth2.TokenSource, errHandler func()) dryRunAsync { + tp := auth.NewCachedTokenProvider(oauth2adapt.TokenProviderFromTokenSource(ts), nil) + asyncTs := oauth2adapt.TokenSourceFromTokenProvider(tp) + return dryRunAsync{ + syncTokenSource: ts, + asyncTokenSource: asyncTs, + errHandler: errHandler, + } +} + +// Token returns a token or an error. +// TODO(b/372244283): Remove after b/358175516 has been fixed +func (async dryRunAsync) Token() (*oauth2.Token, error) { + _, err := async.asyncTokenSource.Token() + if err != nil { + async.errHandler() + } + return async.syncTokenSource.Token() +} + func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.ClientConn, error) { if o.HTTPClient != nil { return nil, errors.New("unsupported HTTP client specified") @@ -291,19 +333,14 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C if err != nil { return nil, err } - if o.TokenSource == nil { - // We only validate non-tokensource creds, as TokenSource-based credentials - // don't propagate universe. - credsUniverseDomain, err := internal.GetUniverseDomain(creds) - if err != nil { - return nil, err - } - if o.GetUniverseDomain() != credsUniverseDomain { - return nil, internal.ErrUniverseNotMatch(o.GetUniverseDomain(), credsUniverseDomain) - } + + ts := creds.TokenSource + // TODO(b/372244283): Remove after b/358175516 has been fixed + if o.EnableAsyncRefreshDryRun != nil { + ts = newDryRunAsync(ts, o.EnableAsyncRefreshDryRun) } grpcOpts = append(grpcOpts, grpc.WithPerRPCCredentials(grpcTokenSource{ - TokenSource: oauth.TokenSource{TokenSource: creds.TokenSource}, + TokenSource: oauth.TokenSource{TokenSource: ts}, quotaProject: internal.GetQuotaProject(creds, o.QuotaProject), requestReason: o.RequestReason, })) @@ -349,7 +386,6 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C // Add tracing, but before the other options, so that clients can override the // gRPC stats handler. // This assumes that gRPC options are processed in order, left to right. - grpcOpts = addOCStatsHandler(grpcOpts, o) grpcOpts = addOpenTelemetryStatsHandler(grpcOpts, o) grpcOpts = append(grpcOpts, o.GRPCDialOpts...) if o.UserAgent != "" { @@ -359,13 +395,6 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C return dialContext(ctx, endpoint, grpcOpts...) } -func addOCStatsHandler(opts []grpc.DialOption, settings *internal.DialSettings) []grpc.DialOption { - if settings.TelemetryDisabled { - return opts - } - return append(opts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{})) -} - func addOpenTelemetryStatsHandler(opts []grpc.DialOption, settings *internal.DialSettings) []grpc.DialOption { if settings.TelemetryDisabled { return opts diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go index a36e24315ba..6b7ea74ba41 100644 --- a/vendor/google.golang.org/api/transport/http/dial.go +++ b/vendor/google.golang.org/api/transport/http/dial.go @@ -19,7 +19,6 @@ import ( "cloud.google.com/go/auth/credentials" "cloud.google.com/go/auth/httptransport" "cloud.google.com/go/auth/oauth2adapt" - "go.opencensus.io/plugin/ochttp" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" "golang.org/x/net/http2" "golang.org/x/oauth2" @@ -27,7 +26,6 @@ import ( "google.golang.org/api/internal" "google.golang.org/api/internal/cert" "google.golang.org/api/option" - "google.golang.org/api/transport/http/internal/propagation" ) // NewClient returns an HTTP client for use communicating with a Google cloud @@ -107,10 +105,6 @@ func newClientNewAuth(ctx context.Context, base http.RoundTripper, ds *internal. if ds.RequestReason != "" { headers.Set("X-goog-request-reason", ds.RequestReason) } - tokenURL, oauth2Client, err := internal.GetOAuth2Configuration(ctx, ds) - if err != nil { - return nil, err - } client, err := httptransport.NewClient(&httptransport.Options{ DisableTelemetry: ds.TelemetryDisabled, DisableAuthentication: ds.NoAuth, @@ -125,8 +119,7 @@ func newClientNewAuth(ctx context.Context, base http.RoundTripper, ds *internal. Audience: aud, CredentialsFile: ds.CredentialsFile, CredentialsJSON: ds.CredentialsJSON, - TokenURL: tokenURL, - Client: oauth2Client, + Logger: ds.Logger, }, InternalOptions: &httptransport.InternalOptions{ EnableJWTWithScope: ds.EnableJwtWithScope, @@ -136,6 +129,8 @@ func newClientNewAuth(ctx context.Context, base http.RoundTripper, ds *internal. DefaultScopes: ds.DefaultScopes, SkipValidation: skipValidation, }, + UniverseDomain: ds.UniverseDomain, + Logger: ds.Logger, }) if err != nil { return nil, err @@ -170,10 +165,7 @@ func newTransport(ctx context.Context, base http.RoundTripper, settings *interna requestReason: settings.RequestReason, } var trans http.RoundTripper = paramTransport - // Give OpenTelemetry precedence over OpenCensus in case user configuration - // causes both to write the same header (`X-Cloud-Trace-Context`). trans = addOpenTelemetryTransport(trans, settings) - trans = addOCTransport(trans, settings) switch { case settings.NoAuth: // Do nothing. @@ -188,17 +180,6 @@ func newTransport(ctx context.Context, base http.RoundTripper, settings *interna if err != nil { return nil, err } - if settings.TokenSource == nil { - // We only validate non-tokensource creds, as TokenSource-based credentials - // don't propagate universe. - credsUniverseDomain, err := internal.GetUniverseDomain(creds) - if err != nil { - return nil, err - } - if settings.GetUniverseDomain() != credsUniverseDomain { - return nil, internal.ErrUniverseNotMatch(settings.GetUniverseDomain(), credsUniverseDomain) - } - } paramTransport.quotaProject = internal.GetQuotaProject(creds, settings.QuotaProject) ts := creds.TokenSource if settings.ImpersonationConfig == nil && settings.TokenSource != nil { @@ -325,16 +306,6 @@ func addOpenTelemetryTransport(trans http.RoundTripper, settings *internal.DialS return otelhttp.NewTransport(trans) } -func addOCTransport(trans http.RoundTripper, settings *internal.DialSettings) http.RoundTripper { - if settings.TelemetryDisabled { - return trans - } - return &ochttp.Transport{ - Base: trans, - Propagation: &propagation.HTTPFormat{}, - } -} - // clonedTransport returns the given RoundTripper as a cloned *http.Transport. // It returns nil if the RoundTripper can't be cloned or coerced to // *http.Transport. diff --git a/vendor/google.golang.org/api/transport/http/internal/propagation/http.go b/vendor/google.golang.org/api/transport/http/internal/propagation/http.go deleted file mode 100644 index ba7512aa26d..00000000000 --- a/vendor/google.golang.org/api/transport/http/internal/propagation/http.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2018 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.8 -// +build go1.8 - -// Package propagation implements X-Cloud-Trace-Context header propagation used -// by Google Cloud products. -package propagation - -import ( - "encoding/binary" - "encoding/hex" - "fmt" - "net/http" - "strconv" - "strings" - - "go.opencensus.io/trace" - "go.opencensus.io/trace/propagation" -) - -const ( - httpHeaderMaxSize = 200 - httpHeader = `X-Cloud-Trace-Context` -) - -var _ propagation.HTTPFormat = (*HTTPFormat)(nil) - -// HTTPFormat implements propagation.HTTPFormat to propagate -// traces in HTTP headers for Google Cloud Platform and Stackdriver Trace. -type HTTPFormat struct{} - -// SpanContextFromRequest extracts a Stackdriver Trace span context from incoming requests. -func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { - h := req.Header.Get(httpHeader) - // See https://cloud.google.com/trace/docs/faq for the header HTTPFormat. - // Return if the header is empty or missing, or if the header is unreasonably - // large, to avoid making unnecessary copies of a large string. - if h == "" || len(h) > httpHeaderMaxSize { - return trace.SpanContext{}, false - } - - // Parse the trace id field. - slash := strings.Index(h, `/`) - if slash == -1 { - return trace.SpanContext{}, false - } - tid, h := h[:slash], h[slash+1:] - - buf, err := hex.DecodeString(tid) - if err != nil { - return trace.SpanContext{}, false - } - copy(sc.TraceID[:], buf) - - // Parse the span id field. - spanstr := h - semicolon := strings.Index(h, `;`) - if semicolon != -1 { - spanstr, h = h[:semicolon], h[semicolon+1:] - } - sid, err := strconv.ParseUint(spanstr, 10, 64) - if err != nil { - return trace.SpanContext{}, false - } - binary.BigEndian.PutUint64(sc.SpanID[:], sid) - - // Parse the options field, options field is optional. - if !strings.HasPrefix(h, "o=") { - return sc, true - } - o, err := strconv.ParseUint(h[2:], 10, 64) - if err != nil { - return trace.SpanContext{}, false - } - sc.TraceOptions = trace.TraceOptions(o) - return sc, true -} - -// SpanContextToRequest modifies the given request to include a Stackdriver Trace header. -func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { - sid := binary.BigEndian.Uint64(sc.SpanID[:]) - header := fmt.Sprintf("%s/%d;o=%d", hex.EncodeToString(sc.TraceID[:]), sid, int64(sc.TraceOptions)) - req.Header.Set(httpHeader, header) -} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go index 636edb460a4..4a9fce53c44 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go @@ -180,6 +180,8 @@ type CommonLanguageSettings struct { ReferenceDocsUri string `protobuf:"bytes,1,opt,name=reference_docs_uri,json=referenceDocsUri,proto3" json:"reference_docs_uri,omitempty"` // The destination where API teams want this client library to be published. Destinations []ClientLibraryDestination `protobuf:"varint,2,rep,packed,name=destinations,proto3,enum=google.api.ClientLibraryDestination" json:"destinations,omitempty"` + // Configuration for which RPCs should be generated in the GAPIC client. + SelectiveGapicGeneration *SelectiveGapicGeneration `protobuf:"bytes,3,opt,name=selective_gapic_generation,json=selectiveGapicGeneration,proto3" json:"selective_gapic_generation,omitempty"` } func (x *CommonLanguageSettings) Reset() { @@ -229,6 +231,13 @@ func (x *CommonLanguageSettings) GetDestinations() []ClientLibraryDestination { return nil } +func (x *CommonLanguageSettings) GetSelectiveGapicGeneration() *SelectiveGapicGeneration { + if x != nil { + return x.SelectiveGapicGeneration + } + return nil +} + // Details about how and where to publish client libraries. type ClientLibrarySettings struct { state protoimpl.MessageState @@ -719,6 +728,8 @@ type PythonSettings struct { // Some settings. Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` + // Experimental features to be included during client library generation. + ExperimentalFeatures *PythonSettings_ExperimentalFeatures `protobuf:"bytes,2,opt,name=experimental_features,json=experimentalFeatures,proto3" json:"experimental_features,omitempty"` } func (x *PythonSettings) Reset() { @@ -760,6 +771,13 @@ func (x *PythonSettings) GetCommon() *CommonLanguageSettings { return nil } +func (x *PythonSettings) GetExperimentalFeatures() *PythonSettings_ExperimentalFeatures { + if x != nil { + return x.ExperimentalFeatures + } + return nil +} + // Settings for Node client libraries. type NodeSettings struct { state protoimpl.MessageState @@ -975,6 +993,16 @@ type GoSettings struct { // Some settings. Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` + // Map of service names to renamed services. Keys are the package relative + // service names and values are the name to be used for the service client + // and call options. + // + // publishing: + // + // go_settings: + // renamed_services: + // Publisher: TopicAdmin + RenamedServices map[string]string `protobuf:"bytes,2,rep,name=renamed_services,json=renamedServices,proto3" json:"renamed_services,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *GoSettings) Reset() { @@ -1016,6 +1044,13 @@ func (x *GoSettings) GetCommon() *CommonLanguageSettings { return nil } +func (x *GoSettings) GetRenamedServices() map[string]string { + if x != nil { + return x.RenamedServices + } + return nil +} + // Describes the generator configuration for a method. type MethodSettings struct { state protoimpl.MessageState @@ -1024,6 +1059,13 @@ type MethodSettings struct { // The fully qualified name of the method, for which the options below apply. // This is used to find the method to apply the options. + // + // Example: + // + // publishing: + // method_settings: + // - selector: google.storage.control.v2.StorageControl.CreateFolder + // # method settings for CreateFolder... Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"` // Describes settings to use for long-running operations when generating // API methods for RPCs. Complements RPCs that use the annotations in @@ -1033,15 +1075,12 @@ type MethodSettings struct { // // publishing: // method_settings: - // - selector: google.cloud.speech.v2.Speech.BatchRecognize - // long_running: - // initial_poll_delay: - // seconds: 60 # 1 minute - // poll_delay_multiplier: 1.5 - // max_poll_delay: - // seconds: 360 # 6 minutes - // total_poll_timeout: - // seconds: 54000 # 90 minutes + // - selector: google.cloud.speech.v2.Speech.BatchRecognize + // long_running: + // initial_poll_delay: 60s # 1 minute + // poll_delay_multiplier: 1.5 + // max_poll_delay: 360s # 6 minutes + // total_poll_timeout: 54000s # 90 minutes LongRunning *MethodSettings_LongRunning `protobuf:"bytes,2,opt,name=long_running,json=longRunning,proto3" json:"long_running,omitempty"` // List of top-level fields of the request message, that should be // automatically populated by the client libraries based on their @@ -1051,9 +1090,9 @@ type MethodSettings struct { // // publishing: // method_settings: - // - selector: google.example.v1.ExampleService.CreateExample - // auto_populated_fields: - // - request_id + // - selector: google.example.v1.ExampleService.CreateExample + // auto_populated_fields: + // - request_id AutoPopulatedFields []string `protobuf:"bytes,3,rep,name=auto_populated_fields,json=autoPopulatedFields,proto3" json:"auto_populated_fields,omitempty"` } @@ -1110,6 +1149,123 @@ func (x *MethodSettings) GetAutoPopulatedFields() []string { return nil } +// This message is used to configure the generation of a subset of the RPCs in +// a service for client libraries. +type SelectiveGapicGeneration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An allowlist of the fully qualified names of RPCs that should be included + // on public client surfaces. + Methods []string `protobuf:"bytes,1,rep,name=methods,proto3" json:"methods,omitempty"` +} + +func (x *SelectiveGapicGeneration) Reset() { + *x = SelectiveGapicGeneration{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SelectiveGapicGeneration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SelectiveGapicGeneration) ProtoMessage() {} + +func (x *SelectiveGapicGeneration) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SelectiveGapicGeneration.ProtoReflect.Descriptor instead. +func (*SelectiveGapicGeneration) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{12} +} + +func (x *SelectiveGapicGeneration) GetMethods() []string { + if x != nil { + return x.Methods + } + return nil +} + +// Experimental features to be included during client library generation. +// These fields will be deprecated once the feature graduates and is enabled +// by default. +type PythonSettings_ExperimentalFeatures struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Enables generation of asynchronous REST clients if `rest` transport is + // enabled. By default, asynchronous REST clients will not be generated. + // This feature will be enabled by default 1 month after launching the + // feature in preview packages. + RestAsyncIoEnabled bool `protobuf:"varint,1,opt,name=rest_async_io_enabled,json=restAsyncIoEnabled,proto3" json:"rest_async_io_enabled,omitempty"` + // Enables generation of protobuf code using new types that are more + // Pythonic which are included in `protobuf>=5.29.x`. This feature will be + // enabled by default 1 month after launching the feature in preview + // packages. + ProtobufPythonicTypesEnabled bool `protobuf:"varint,2,opt,name=protobuf_pythonic_types_enabled,json=protobufPythonicTypesEnabled,proto3" json:"protobuf_pythonic_types_enabled,omitempty"` +} + +func (x *PythonSettings_ExperimentalFeatures) Reset() { + *x = PythonSettings_ExperimentalFeatures{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PythonSettings_ExperimentalFeatures) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PythonSettings_ExperimentalFeatures) ProtoMessage() {} + +func (x *PythonSettings_ExperimentalFeatures) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PythonSettings_ExperimentalFeatures.ProtoReflect.Descriptor instead. +func (*PythonSettings_ExperimentalFeatures) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{6, 0} +} + +func (x *PythonSettings_ExperimentalFeatures) GetRestAsyncIoEnabled() bool { + if x != nil { + return x.RestAsyncIoEnabled + } + return false +} + +func (x *PythonSettings_ExperimentalFeatures) GetProtobufPythonicTypesEnabled() bool { + if x != nil { + return x.ProtobufPythonicTypesEnabled + } + return false +} + // Describes settings to use when generating API methods that use the // long-running operation pattern. // All default values below are from those used in the client library @@ -1138,7 +1294,7 @@ type MethodSettings_LongRunning struct { func (x *MethodSettings_LongRunning) Reset() { *x = MethodSettings_LongRunning{} if protoimpl.UnsafeEnabled { - mi := &file_google_api_client_proto_msgTypes[15] + mi := &file_google_api_client_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1151,7 +1307,7 @@ func (x *MethodSettings_LongRunning) String() string { func (*MethodSettings_LongRunning) ProtoMessage() {} func (x *MethodSettings_LongRunning) ProtoReflect() protoreflect.Message { - mi := &file_google_api_client_proto_msgTypes[15] + mi := &file_google_api_client_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1339,7 +1495,7 @@ var file_google_api_client_proto_rawDesc = []byte{ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x94, 0x01, 0x0a, 0x16, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf8, 0x01, 0x0a, 0x16, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x30, 0x0a, 0x12, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, @@ -1348,240 +1504,275 @@ var file_google_api_client_proto_rawDesc = []byte{ 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x0c, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x93, 0x05, - 0x0a, 0x15, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x3a, 0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, - 0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x2c, 0x0a, - 0x12, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x65, 0x6e, - 0x75, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, 0x65, 0x73, 0x74, 0x4e, - 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x45, 0x6e, 0x75, 0x6d, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6a, - 0x61, 0x76, 0x61, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x15, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6a, 0x61, - 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x63, 0x70, - 0x70, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x70, - 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x63, 0x70, 0x70, 0x53, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x70, 0x68, 0x70, 0x5f, 0x73, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x70, 0x68, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, - 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6e, 0x6f, 0x64, 0x65, 0x5f, - 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4e, 0x6f, 0x64, 0x65, - 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6e, 0x6f, 0x64, 0x65, 0x53, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x64, 0x6f, 0x74, 0x6e, 0x65, 0x74, - 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, - 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x64, 0x6f, 0x74, - 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x72, - 0x75, 0x62, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1b, 0x20, 0x01, + 0x0c, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x62, 0x0a, + 0x1a, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x67, 0x61, 0x70, 0x69, 0x63, + 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x47, 0x61, 0x70, 0x69, 0x63, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x18, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x76, 0x65, 0x47, 0x61, 0x70, 0x69, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x22, 0x93, 0x05, 0x0a, 0x15, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, + 0x61, 0x72, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3a, 0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, + 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, + 0x74, 0x61, 0x67, 0x65, 0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, + 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x69, + 0x63, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, + 0x65, 0x73, 0x74, 0x4e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x45, 0x6e, 0x75, 0x6d, 0x73, 0x12, + 0x3d, 0x0a, 0x0d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x52, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, + 0x0a, 0x0c, 0x63, 0x70, 0x70, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x16, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x43, 0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x63, + 0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x70, 0x68, + 0x70, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x68, + 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x70, 0x68, 0x70, 0x53, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, + 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, + 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x70, 0x79, 0x74, + 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6e, + 0x6f, 0x64, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x72, 0x75, - 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x67, 0x6f, - 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x6f, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0a, 0x67, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, - 0x6e, 0x67, 0x73, 0x22, 0xf4, 0x04, 0x0a, 0x0a, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x69, - 0x6e, 0x67, 0x12, 0x43, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x6e, 0x65, 0x77, 0x5f, 0x69, - 0x73, 0x73, 0x75, 0x65, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x65, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x6e, 0x65, 0x77, 0x49, 0x73, 0x73, 0x75, 0x65, 0x55, 0x72, 0x69, 0x12, 0x2b, 0x0a, 0x11, 0x64, - 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, - 0x18, 0x66, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x70, 0x69, 0x5f, - 0x73, 0x68, 0x6f, 0x72, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x67, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x61, 0x70, 0x69, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, - 0x0a, 0x0c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x68, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x4c, 0x61, 0x62, 0x65, - 0x6c, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x69, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x14, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x47, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x54, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x64, 0x6f, 0x63, 0x5f, 0x74, - 0x61, 0x67, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x6a, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0c, 0x64, 0x6f, 0x63, 0x54, 0x61, 0x67, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x49, 0x0a, - 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x6b, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, - 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6f, 0x72, 0x67, 0x61, - 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4c, 0x0a, 0x10, 0x6c, 0x69, 0x62, 0x72, - 0x61, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x6d, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x49, 0x0a, 0x21, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x5f, - 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, - 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x6e, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x1e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, - 0x65, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, - 0x69, 0x12, 0x47, 0x0a, 0x20, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, - 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x6f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1d, 0x72, 0x65, 0x73, - 0x74, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, - 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x22, 0x9a, 0x02, 0x0a, 0x0c, 0x4a, - 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x6c, - 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x50, 0x61, 0x63, - 0x6b, 0x61, 0x67, 0x65, 0x12, 0x5f, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, - 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4a, - 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, - 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6e, 0x6f, + 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x64, 0x6f, + 0x74, 0x6e, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1a, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, + 0x0e, 0x64, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, + 0x3d, 0x0a, 0x0d, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x18, 0x1b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x52, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x37, + 0x0a, 0x0b, 0x67, 0x6f, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1c, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0a, 0x67, 0x6f, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x22, 0xf4, 0x04, 0x0a, 0x0a, 0x50, 0x75, 0x62, 0x6c, + 0x69, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x12, 0x43, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x6d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x6e, + 0x65, 0x77, 0x5f, 0x69, 0x73, 0x73, 0x75, 0x65, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x65, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x65, 0x77, 0x49, 0x73, 0x73, 0x75, 0x65, 0x55, 0x72, 0x69, 0x12, + 0x2b, 0x0a, 0x11, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x75, 0x72, 0x69, 0x18, 0x66, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x6f, 0x63, 0x75, + 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x12, 0x24, 0x0a, 0x0e, + 0x61, 0x70, 0x69, 0x5f, 0x73, 0x68, 0x6f, 0x72, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x67, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x70, 0x69, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x6c, 0x61, 0x62, + 0x65, 0x6c, 0x18, 0x68, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, + 0x65, 0x72, 0x5f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x73, 0x18, + 0x69, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72, + 0x47, 0x69, 0x74, 0x68, 0x75, 0x62, 0x54, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x64, + 0x6f, 0x63, 0x5f, 0x74, 0x61, 0x67, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x6a, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x6f, 0x63, 0x54, 0x61, 0x67, 0x50, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x12, 0x49, 0x0a, 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x6b, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, + 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, + 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4c, 0x0a, 0x10, + 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x18, 0x6d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, + 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, + 0x72, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x49, 0x0a, 0x21, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, + 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, + 0x6e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x65, 0x66, 0x65, + 0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x12, 0x47, 0x0a, 0x20, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x72, 0x65, + 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x6f, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x1d, 0x72, 0x65, 0x73, 0x74, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x6f, + 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x22, 0x9a, + 0x02, 0x0a, 0x0c, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, + 0x27, 0x0a, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, + 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, + 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x5f, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, + 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, + 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x1a, 0x44, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x49, 0x0a, 0x0b, 0x43, + 0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, + 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x49, 0x0a, 0x0b, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x1a, 0x44, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, - 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x49, 0x0a, 0x0b, 0x43, 0x70, 0x70, 0x53, 0x65, + 0x6e, 0x22, 0xc5, 0x02, 0x0a, 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x12, 0x64, 0x0a, 0x15, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, + 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, + 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x45, 0x78, 0x70, 0x65, + 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, + 0x52, 0x14, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a, 0x90, 0x01, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x65, 0x72, + 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, + 0x31, 0x0a, 0x15, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x6f, + 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, + 0x72, 0x65, 0x73, 0x74, 0x41, 0x73, 0x79, 0x6e, 0x63, 0x49, 0x6f, 0x45, 0x6e, 0x61, 0x62, 0x6c, + 0x65, 0x64, 0x12, 0x45, 0x0a, 0x1f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x5f, 0x70, + 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x69, 0x63, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x5f, 0x65, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1c, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x69, 0x63, 0x54, 0x79, 0x70, + 0x65, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x4a, 0x0a, 0x0c, 0x4e, 0x6f, 0x64, + 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, + 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae, 0x04, 0x0a, 0x0e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, + 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, + 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, + 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x72, + 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, + 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x69, 0x67, 0x6e, 0x6f, + 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, + 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x16, + 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x16, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72, + 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, + 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72, 0x69, 0x74, + 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a, 0x42, 0x0a, + 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x22, 0x49, 0x0a, 0x0b, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x6f, 0x6e, 0x22, 0xe4, 0x01, 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4c, 0x0a, - 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, - 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, - 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4a, 0x0a, 0x0c, 0x4e, - 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, - 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, - 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae, 0x04, 0x0a, 0x0e, 0x44, 0x6f, 0x74, 0x6e, - 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, - 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, - 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, - 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, - 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, - 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, - 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x69, 0x67, - 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x38, - 0x0a, 0x18, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x16, 0x68, 0x61, 0x6e, 0x64, - 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72, - 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a, - 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, 0x62, 0x79, - 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, - 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x48, 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, - 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xc2, - 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, - 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x49, 0x0a, - 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, - 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x6c, 0x6f, 0x6e, - 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x75, 0x74, 0x6f, - 0x5f, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x50, 0x6f, 0x70, - 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x94, 0x02, 0x0a, - 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47, 0x0a, 0x12, - 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, - 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, - 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, - 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x4d, - 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d, 0x61, 0x78, - 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x56, 0x0a, + 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, + 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc2, 0x03, 0x0a, 0x0e, 0x4d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, + 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x49, 0x0a, 0x0c, 0x6c, 0x6f, 0x6e, 0x67, + 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52, + 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x6c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, + 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x70, 0x75, + 0x6c, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, + 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x94, 0x02, 0x0a, 0x0b, 0x4c, 0x6f, 0x6e, 0x67, + 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47, 0x0a, 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, + 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, + 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, + 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6d, + 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, + 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, + 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, + 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x50, 0x6f, 0x6c, 0x6c, + 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, + 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6d, 0x61, - 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12, 0x74, 0x6f, - 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65, - 0x6f, 0x75, 0x74, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, - 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, - 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49, 0x4f, 0x4e, - 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, - 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, 0x53, - 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, 0x03, 0x12, 0x0f, - 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, 0x04, 0x12, - 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x07, 0x0a, - 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, - 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, 0x18, 0x43, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, - 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, 0x41, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a, 0x12, 0x13, 0x0a, - 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x52, - 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x6d, - 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x3a, 0x43, - 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1f, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x48, - 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, 0x63, 0x6f, - 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x61, 0x75, - 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x3a, 0x44, 0x0a, 0x0b, 0x61, 0x70, 0x69, 0x5f, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xba, 0xab, 0xfa, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x69, - 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, - 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, - 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, - 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f, + 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0x34, + 0x0a, 0x18, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x47, 0x61, 0x70, 0x69, 0x63, + 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x73, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, + 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, + 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49, 0x4f, + 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, + 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, 0x03, 0x12, + 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, 0x04, + 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x07, + 0x0a, 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, 0x45, 0x52, + 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, 0x18, 0x43, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, + 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, 0x41, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a, 0x12, 0x13, + 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, + 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x69, + 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x3a, + 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x12, + 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, 0x63, + 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x61, + 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x3a, 0x44, 0x0a, 0x0b, 0x61, 0x70, 0x69, + 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xba, 0xab, 0xfa, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, + 0x69, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( @@ -1597,69 +1788,75 @@ func file_google_api_client_proto_rawDescGZIP() []byte { } var file_google_api_client_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 16) +var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 19) var file_google_api_client_proto_goTypes = []interface{}{ - (ClientLibraryOrganization)(0), // 0: google.api.ClientLibraryOrganization - (ClientLibraryDestination)(0), // 1: google.api.ClientLibraryDestination - (*CommonLanguageSettings)(nil), // 2: google.api.CommonLanguageSettings - (*ClientLibrarySettings)(nil), // 3: google.api.ClientLibrarySettings - (*Publishing)(nil), // 4: google.api.Publishing - (*JavaSettings)(nil), // 5: google.api.JavaSettings - (*CppSettings)(nil), // 6: google.api.CppSettings - (*PhpSettings)(nil), // 7: google.api.PhpSettings - (*PythonSettings)(nil), // 8: google.api.PythonSettings - (*NodeSettings)(nil), // 9: google.api.NodeSettings - (*DotnetSettings)(nil), // 10: google.api.DotnetSettings - (*RubySettings)(nil), // 11: google.api.RubySettings - (*GoSettings)(nil), // 12: google.api.GoSettings - (*MethodSettings)(nil), // 13: google.api.MethodSettings - nil, // 14: google.api.JavaSettings.ServiceClassNamesEntry - nil, // 15: google.api.DotnetSettings.RenamedServicesEntry - nil, // 16: google.api.DotnetSettings.RenamedResourcesEntry - (*MethodSettings_LongRunning)(nil), // 17: google.api.MethodSettings.LongRunning - (api.LaunchStage)(0), // 18: google.api.LaunchStage - (*durationpb.Duration)(nil), // 19: google.protobuf.Duration - (*descriptorpb.MethodOptions)(nil), // 20: google.protobuf.MethodOptions - (*descriptorpb.ServiceOptions)(nil), // 21: google.protobuf.ServiceOptions + (ClientLibraryOrganization)(0), // 0: google.api.ClientLibraryOrganization + (ClientLibraryDestination)(0), // 1: google.api.ClientLibraryDestination + (*CommonLanguageSettings)(nil), // 2: google.api.CommonLanguageSettings + (*ClientLibrarySettings)(nil), // 3: google.api.ClientLibrarySettings + (*Publishing)(nil), // 4: google.api.Publishing + (*JavaSettings)(nil), // 5: google.api.JavaSettings + (*CppSettings)(nil), // 6: google.api.CppSettings + (*PhpSettings)(nil), // 7: google.api.PhpSettings + (*PythonSettings)(nil), // 8: google.api.PythonSettings + (*NodeSettings)(nil), // 9: google.api.NodeSettings + (*DotnetSettings)(nil), // 10: google.api.DotnetSettings + (*RubySettings)(nil), // 11: google.api.RubySettings + (*GoSettings)(nil), // 12: google.api.GoSettings + (*MethodSettings)(nil), // 13: google.api.MethodSettings + (*SelectiveGapicGeneration)(nil), // 14: google.api.SelectiveGapicGeneration + nil, // 15: google.api.JavaSettings.ServiceClassNamesEntry + (*PythonSettings_ExperimentalFeatures)(nil), // 16: google.api.PythonSettings.ExperimentalFeatures + nil, // 17: google.api.DotnetSettings.RenamedServicesEntry + nil, // 18: google.api.DotnetSettings.RenamedResourcesEntry + nil, // 19: google.api.GoSettings.RenamedServicesEntry + (*MethodSettings_LongRunning)(nil), // 20: google.api.MethodSettings.LongRunning + (api.LaunchStage)(0), // 21: google.api.LaunchStage + (*durationpb.Duration)(nil), // 22: google.protobuf.Duration + (*descriptorpb.MethodOptions)(nil), // 23: google.protobuf.MethodOptions + (*descriptorpb.ServiceOptions)(nil), // 24: google.protobuf.ServiceOptions } var file_google_api_client_proto_depIdxs = []int32{ 1, // 0: google.api.CommonLanguageSettings.destinations:type_name -> google.api.ClientLibraryDestination - 18, // 1: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage - 5, // 2: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings - 6, // 3: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings - 7, // 4: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings - 8, // 5: google.api.ClientLibrarySettings.python_settings:type_name -> google.api.PythonSettings - 9, // 6: google.api.ClientLibrarySettings.node_settings:type_name -> google.api.NodeSettings - 10, // 7: google.api.ClientLibrarySettings.dotnet_settings:type_name -> google.api.DotnetSettings - 11, // 8: google.api.ClientLibrarySettings.ruby_settings:type_name -> google.api.RubySettings - 12, // 9: google.api.ClientLibrarySettings.go_settings:type_name -> google.api.GoSettings - 13, // 10: google.api.Publishing.method_settings:type_name -> google.api.MethodSettings - 0, // 11: google.api.Publishing.organization:type_name -> google.api.ClientLibraryOrganization - 3, // 12: google.api.Publishing.library_settings:type_name -> google.api.ClientLibrarySettings - 14, // 13: google.api.JavaSettings.service_class_names:type_name -> google.api.JavaSettings.ServiceClassNamesEntry - 2, // 14: google.api.JavaSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 15: google.api.CppSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 16: google.api.PhpSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 17: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 18: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 19: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings - 15, // 20: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry - 16, // 21: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry - 2, // 22: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 23: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings - 17, // 24: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning - 19, // 25: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration - 19, // 26: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration - 19, // 27: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration - 20, // 28: google.api.method_signature:extendee -> google.protobuf.MethodOptions - 21, // 29: google.api.default_host:extendee -> google.protobuf.ServiceOptions - 21, // 30: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions - 21, // 31: google.api.api_version:extendee -> google.protobuf.ServiceOptions - 32, // [32:32] is the sub-list for method output_type - 32, // [32:32] is the sub-list for method input_type - 32, // [32:32] is the sub-list for extension type_name - 28, // [28:32] is the sub-list for extension extendee - 0, // [0:28] is the sub-list for field type_name + 14, // 1: google.api.CommonLanguageSettings.selective_gapic_generation:type_name -> google.api.SelectiveGapicGeneration + 21, // 2: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage + 5, // 3: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings + 6, // 4: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings + 7, // 5: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings + 8, // 6: google.api.ClientLibrarySettings.python_settings:type_name -> google.api.PythonSettings + 9, // 7: google.api.ClientLibrarySettings.node_settings:type_name -> google.api.NodeSettings + 10, // 8: google.api.ClientLibrarySettings.dotnet_settings:type_name -> google.api.DotnetSettings + 11, // 9: google.api.ClientLibrarySettings.ruby_settings:type_name -> google.api.RubySettings + 12, // 10: google.api.ClientLibrarySettings.go_settings:type_name -> google.api.GoSettings + 13, // 11: google.api.Publishing.method_settings:type_name -> google.api.MethodSettings + 0, // 12: google.api.Publishing.organization:type_name -> google.api.ClientLibraryOrganization + 3, // 13: google.api.Publishing.library_settings:type_name -> google.api.ClientLibrarySettings + 15, // 14: google.api.JavaSettings.service_class_names:type_name -> google.api.JavaSettings.ServiceClassNamesEntry + 2, // 15: google.api.JavaSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 16: google.api.CppSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 17: google.api.PhpSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 18: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings + 16, // 19: google.api.PythonSettings.experimental_features:type_name -> google.api.PythonSettings.ExperimentalFeatures + 2, // 20: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 21: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings + 17, // 22: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry + 18, // 23: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry + 2, // 24: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 25: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings + 19, // 26: google.api.GoSettings.renamed_services:type_name -> google.api.GoSettings.RenamedServicesEntry + 20, // 27: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning + 22, // 28: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration + 22, // 29: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration + 22, // 30: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration + 23, // 31: google.api.method_signature:extendee -> google.protobuf.MethodOptions + 24, // 32: google.api.default_host:extendee -> google.protobuf.ServiceOptions + 24, // 33: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions + 24, // 34: google.api.api_version:extendee -> google.protobuf.ServiceOptions + 35, // [35:35] is the sub-list for method output_type + 35, // [35:35] is the sub-list for method input_type + 35, // [35:35] is the sub-list for extension type_name + 31, // [31:35] is the sub-list for extension extendee + 0, // [0:31] is the sub-list for field type_name } func init() { file_google_api_client_proto_init() } @@ -1812,7 +2009,31 @@ func file_google_api_client_proto_init() { return nil } } - file_google_api_client_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_google_api_client_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SelectiveGapicGeneration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PythonSettings_ExperimentalFeatures); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MethodSettings_LongRunning); i { case 0: return &v.state @@ -1831,7 +2052,7 @@ func file_google_api_client_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_api_client_proto_rawDesc, NumEnums: 2, - NumMessages: 16, + NumMessages: 19, NumExtensions: 4, NumServices: 0, }, diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go index d339dfb02ac..a462e7d0132 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go @@ -121,6 +121,11 @@ type FieldInfo struct { // any API consumer, just documents the API's format for the field it is // applied to. Format FieldInfo_Format `protobuf:"varint,1,opt,name=format,proto3,enum=google.api.FieldInfo_Format" json:"format,omitempty"` + // The type(s) that the annotated, generic field may represent. + // + // Currently, this must only be used on fields of type `google.protobuf.Any`. + // Supporting other generic types may be considered in the future. + ReferencedTypes []*TypeReference `protobuf:"bytes,2,rep,name=referenced_types,json=referencedTypes,proto3" json:"referenced_types,omitempty"` } func (x *FieldInfo) Reset() { @@ -162,6 +167,70 @@ func (x *FieldInfo) GetFormat() FieldInfo_Format { return FieldInfo_FORMAT_UNSPECIFIED } +func (x *FieldInfo) GetReferencedTypes() []*TypeReference { + if x != nil { + return x.ReferencedTypes + } + return nil +} + +// A reference to a message type, for use in [FieldInfo][google.api.FieldInfo]. +type TypeReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the type that the annotated, generic field may represent. + // If the type is in the same protobuf package, the value can be the simple + // message name e.g., `"MyMessage"`. Otherwise, the value must be the + // fully-qualified message name e.g., `"google.library.v1.Book"`. + // + // If the type(s) are unknown to the service (e.g. the field accepts generic + // user input), use the wildcard `"*"` to denote this behavior. + // + // See [AIP-202](https://google.aip.dev/202#type-references) for more details. + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` +} + +func (x *TypeReference) Reset() { + *x = TypeReference{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_field_info_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TypeReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TypeReference) ProtoMessage() {} + +func (x *TypeReference) ProtoReflect() protoreflect.Message { + mi := &file_google_api_field_info_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TypeReference.ProtoReflect.Descriptor instead. +func (*TypeReference) Descriptor() ([]byte, []int) { + return file_google_api_field_info_proto_rawDescGZIP(), []int{1} +} + +func (x *TypeReference) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + var file_google_api_field_info_proto_extTypes = []protoimpl.ExtensionInfo{ { ExtendedType: (*descriptorpb.FieldOptions)(nil), @@ -185,6 +254,13 @@ var ( // string actual_ip_address = 4 [ // (google.api.field_info).format = IPV4_OR_IPV6 // ]; + // google.protobuf.Any generic_field = 5 [ + // (google.api.field_info).referenced_types = {type_name: "ActualType"}, + // (google.api.field_info).referenced_types = {type_name: "OtherType"}, + // ]; + // google.protobuf.Any generic_user_input = 5 [ + // (google.api.field_info).referenced_types = {type_name: "*"}, + // ]; // // optional google.api.FieldInfo field_info = 291403980; E_FieldInfo = &file_google_api_field_info_proto_extTypes[0] @@ -197,30 +273,37 @@ var file_google_api_field_info_proto_rawDesc = []byte{ 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x94, 0x01, 0x0a, 0x09, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xda, 0x01, 0x0a, 0x09, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x34, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, - 0x2e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, - 0x51, 0x0a, 0x06, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x16, 0x0a, 0x12, 0x46, 0x4f, 0x52, - 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x09, 0x0a, 0x05, 0x55, 0x55, 0x49, 0x44, 0x34, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, - 0x49, 0x50, 0x56, 0x34, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x50, 0x56, 0x36, 0x10, 0x03, - 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x50, 0x56, 0x34, 0x5f, 0x4f, 0x52, 0x5f, 0x49, 0x50, 0x56, 0x36, - 0x10, 0x04, 0x3a, 0x57, 0x0a, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x66, 0x6f, - 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0xcc, 0xf1, 0xf9, 0x8a, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, - 0x52, 0x09, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x6c, 0x0a, 0x0e, 0x63, - 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, - 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, - 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x2e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, + 0x44, 0x0a, 0x10, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x64, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x66, 0x65, 0x72, + 0x65, 0x6e, 0x63, 0x65, 0x52, 0x0f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x64, + 0x54, 0x79, 0x70, 0x65, 0x73, 0x22, 0x51, 0x0a, 0x06, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, + 0x16, 0x0a, 0x12, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x55, 0x55, 0x49, 0x44, 0x34, + 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x50, 0x56, 0x34, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, + 0x49, 0x50, 0x56, 0x36, 0x10, 0x03, 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x50, 0x56, 0x34, 0x5f, 0x4f, + 0x52, 0x5f, 0x49, 0x50, 0x56, 0x36, 0x10, 0x04, 0x22, 0x2c, 0x0a, 0x0d, 0x54, 0x79, 0x70, 0x65, + 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, + 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x3a, 0x57, 0x0a, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, + 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0xcc, 0xf1, 0xf9, 0x8a, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x42, + 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x42, 0x0e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, + 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -236,21 +319,23 @@ func file_google_api_field_info_proto_rawDescGZIP() []byte { } var file_google_api_field_info_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_google_api_field_info_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_api_field_info_proto_msgTypes = make([]protoimpl.MessageInfo, 2) var file_google_api_field_info_proto_goTypes = []interface{}{ (FieldInfo_Format)(0), // 0: google.api.FieldInfo.Format (*FieldInfo)(nil), // 1: google.api.FieldInfo - (*descriptorpb.FieldOptions)(nil), // 2: google.protobuf.FieldOptions + (*TypeReference)(nil), // 2: google.api.TypeReference + (*descriptorpb.FieldOptions)(nil), // 3: google.protobuf.FieldOptions } var file_google_api_field_info_proto_depIdxs = []int32{ 0, // 0: google.api.FieldInfo.format:type_name -> google.api.FieldInfo.Format - 2, // 1: google.api.field_info:extendee -> google.protobuf.FieldOptions - 1, // 2: google.api.field_info:type_name -> google.api.FieldInfo - 3, // [3:3] is the sub-list for method output_type - 3, // [3:3] is the sub-list for method input_type - 2, // [2:3] is the sub-list for extension type_name - 1, // [1:2] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name + 2, // 1: google.api.FieldInfo.referenced_types:type_name -> google.api.TypeReference + 3, // 2: google.api.field_info:extendee -> google.protobuf.FieldOptions + 1, // 3: google.api.field_info:type_name -> google.api.FieldInfo + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 3, // [3:4] is the sub-list for extension type_name + 2, // [2:3] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name } func init() { file_google_api_field_info_proto_init() } @@ -271,6 +356,18 @@ func file_google_api_field_info_proto_init() { return nil } } + file_google_api_field_info_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TypeReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } type x struct{} out := protoimpl.TypeBuilder{ @@ -278,7 +375,7 @@ func file_google_api_field_info_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_api_field_info_proto_rawDesc, NumEnums: 1, - NumMessages: 1, + NumMessages: 2, NumExtensions: 1, NumServices: 0, }, diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go index 76ea76df330..ffb5838cb18 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go @@ -102,7 +102,7 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { return false } -// # gRPC Transcoding +// gRPC Transcoding // // gRPC Transcoding is a feature for mapping between a gRPC method and one or // more HTTP REST endpoints. It allows developers to build a single API service @@ -143,9 +143,8 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // // This enables an HTTP REST to gRPC mapping as below: // -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")` +// - HTTP: `GET /v1/messages/123456` +// - gRPC: `GetMessage(name: "messages/123456")` // // Any fields in the request message which are not bound by the path template // automatically become HTTP query parameters if there is no HTTP request body. @@ -169,11 +168,9 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // // This enables a HTTP JSON to RPC mapping as below: // -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | -// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: -// "foo"))` +// - HTTP: `GET /v1/messages/123456?revision=2&sub.subfield=foo` +// - gRPC: `GetMessage(message_id: "123456" revision: 2 sub: +// SubMessage(subfield: "foo"))` // // Note that fields which are mapped to URL query parameters must have a // primitive type or a repeated primitive type or a non-repeated message type. @@ -203,10 +200,8 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // representation of the JSON in the request body is determined by // protos JSON encoding: // -// HTTP | gRPC -// -----|----- -// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: -// "123456" message { text: "Hi!" })` +// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }` +// - gRPC: `UpdateMessage(message_id: "123456" message { text: "Hi!" })` // // The special name `*` can be used in the body mapping to define that // every field not bound by the path template should be mapped to the @@ -228,10 +223,8 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // // The following HTTP JSON to RPC mapping is enabled: // -// HTTP | gRPC -// -----|----- -// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: -// "123456" text: "Hi!")` +// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }` +// - gRPC: `UpdateMessage(message_id: "123456" text: "Hi!")` // // Note that when using `*` in the body mapping, it is not possible to // have HTTP parameters, as all fields not bound by the path end in @@ -259,13 +252,13 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // // This enables the following two alternative HTTP JSON to RPC mappings: // -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` -// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: -// "123456")` +// - HTTP: `GET /v1/messages/123456` +// - gRPC: `GetMessage(message_id: "123456")` // -// ## Rules for HTTP mapping +// - HTTP: `GET /v1/users/me/messages/123456` +// - gRPC: `GetMessage(user_id: "me" message_id: "123456")` +// +// # Rules for HTTP mapping // // 1. Leaf request fields (recursive expansion nested messages in the request // message) are classified into three categories: @@ -284,7 +277,7 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // request body, all // fields are passed via URL path and URL query parameters. // -// ### Path template syntax +// Path template syntax // // Template = "/" Segments [ Verb ] ; // Segments = Segment { "/" Segment } ; @@ -323,7 +316,7 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // Document](https://developers.google.com/discovery/v1/reference/apis) as // `{+var}`. // -// ## Using gRPC API Service Configuration +// # Using gRPC API Service Configuration // // gRPC API Service Configuration (service config) is a configuration language // for configuring a gRPC service to become a user-facing product. The @@ -338,15 +331,14 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // specified in the service config will override any matching transcoding // configuration in the proto. // -// Example: +// The following example selects a gRPC method and applies an `HttpRule` to it: // // http: // rules: -// # Selects a gRPC method and applies HttpRule to it. // - selector: example.v1.Messaging.GetMessage // get: /v1/messages/{message_id}/{sub.subfield} // -// ## Special notes +// # Special notes // // When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the // proto to JSON conversion must follow the [proto3 diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go index 7a3fd93fcd9..b5db279aebf 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go @@ -253,8 +253,13 @@ type ResourceDescriptor struct { History ResourceDescriptor_History `protobuf:"varint,4,opt,name=history,proto3,enum=google.api.ResourceDescriptor_History" json:"history,omitempty"` // The plural name used in the resource name and permission names, such as // 'projects' for the resource name of 'projects/{project}' and the permission - // name of 'cloudresourcemanager.googleapis.com/projects.get'. It is the same - // concept of the `plural` field in k8s CRD spec + // name of 'cloudresourcemanager.googleapis.com/projects.get'. One exception + // to this is for Nested Collections that have stuttering names, as defined + // in [AIP-122](https://google.aip.dev/122#nested-collections), where the + // collection ID in the resource name pattern does not necessarily directly + // match the `plural` value. + // + // It is the same concept of the `plural` field in k8s CRD spec // https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/ // // Note: The plural form is required even for singleton resources. See diff --git a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go index e7d3805e365..f388426b08f 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go @@ -159,14 +159,14 @@ var file_google_api_httpbody_proto_rawDesc = []byte{ 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, - 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x68, 0x0a, 0x0e, 0x63, + 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x65, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0d, 0x48, 0x74, 0x74, 0x70, 0x42, 0x6f, 0x64, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, - 0x64, 0x79, 0x3b, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, 0x64, 0x79, 0xf8, 0x01, 0x01, 0xa2, 0x02, - 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x64, 0x79, 0x3b, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, 0x64, 0x79, 0xa2, 0x02, 0x04, 0x47, 0x41, + 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go index 3e562182792..3cd9a5bb8e6 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go @@ -80,11 +80,12 @@ type ErrorInfo struct { Domain string `protobuf:"bytes,2,opt,name=domain,proto3" json:"domain,omitempty"` // Additional structured details about this error. // - // Keys should match /[a-zA-Z0-9-_]/ and be limited to 64 characters in + // Keys must match a regular expression of `[a-z][a-zA-Z0-9-_]+` but should + // ideally be lowerCamelCase. Also, they must be limited to 64 characters in // length. When identifying the current value of an exceeded limit, the units // should be contained in the key, not the value. For example, rather than - // {"instanceLimit": "100/request"}, should be returned as, - // {"instanceLimitPerRequest": "100"}, if the client exceeds the number of + // `{"instanceLimit": "100/request"}`, should be returned as, + // `{"instanceLimitPerRequest": "100"}`, if the client exceeds the number of // instances that can be created in a single (batch) request. Metadata map[string]string `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } @@ -870,6 +871,16 @@ type BadRequest_FieldViolation struct { Field string `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"` // A description of why the request element is bad. Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + // The reason of the field-level error. This is a constant value that + // identifies the proximate cause of the field-level error. It should + // uniquely identify the type of the FieldViolation within the scope of the + // google.rpc.ErrorInfo.domain. This should be at most 63 + // characters and match a regular expression of `[A-Z][A-Z0-9_]+[A-Z0-9]`, + // which represents UPPER_SNAKE_CASE. + Reason string `protobuf:"bytes,3,opt,name=reason,proto3" json:"reason,omitempty"` + // Provides a localized error message for field-level errors that is safe to + // return to the API consumer. + LocalizedMessage *LocalizedMessage `protobuf:"bytes,4,opt,name=localized_message,json=localizedMessage,proto3" json:"localized_message,omitempty"` } func (x *BadRequest_FieldViolation) Reset() { @@ -918,6 +929,20 @@ func (x *BadRequest_FieldViolation) GetDescription() string { return "" } +func (x *BadRequest_FieldViolation) GetReason() string { + if x != nil { + return x.Reason + } + return "" +} + +func (x *BadRequest_FieldViolation) GetLocalizedMessage() *LocalizedMessage { + if x != nil { + return x.LocalizedMessage + } + return nil +} + // Describes a URL link. type Help_Link struct { state protoimpl.MessageState @@ -1026,51 +1051,57 @@ var file_google_rpc_error_details_proto_rawDesc = []byte{ 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa8, 0x01, 0x0a, 0x0a, 0x42, 0x61, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x8c, 0x02, 0x0a, 0x0a, 0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x10, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x48, 0x0a, 0x0e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, - 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4f, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, - 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61, - 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, - 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x90, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6f, 0x0a, 0x04, 0x48, 0x65, 0x6c, 0x70, - 0x12, 0x2b, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x6c, - 0x70, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x1a, 0x3a, 0x0a, - 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x44, 0x0a, 0x10, 0x4c, 0x6f, 0x63, - 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, - 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, - 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, - 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, - 0x63, 0x42, 0x11, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x72, 0x70, - 0x63, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x3b, 0x65, 0x72, 0x72, - 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xab, 0x01, 0x0a, 0x0e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, + 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, + 0x65, 0x6c, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x49, 0x0a, + 0x11, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, + 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x4f, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, + 0x67, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x90, 0x01, 0x0a, 0x0c, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6f, 0x0a, 0x04, + 0x48, 0x65, 0x6c, 0x70, 0x12, 0x2b, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, + 0x2e, 0x48, 0x65, 0x6c, 0x70, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, + 0x73, 0x1a, 0x3a, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, + 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x44, 0x0a, + 0x10, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x42, 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x11, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, + 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, + 0x3b, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0xa2, 0x02, 0x03, 0x52, 0x50, + 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1111,11 +1142,12 @@ var file_google_rpc_error_details_proto_depIdxs = []int32{ 12, // 3: google.rpc.PreconditionFailure.violations:type_name -> google.rpc.PreconditionFailure.Violation 13, // 4: google.rpc.BadRequest.field_violations:type_name -> google.rpc.BadRequest.FieldViolation 14, // 5: google.rpc.Help.links:type_name -> google.rpc.Help.Link - 6, // [6:6] is the sub-list for method output_type - 6, // [6:6] is the sub-list for method input_type - 6, // [6:6] is the sub-list for extension type_name - 6, // [6:6] is the sub-list for extension extendee - 0, // [0:6] is the sub-list for field type_name + 9, // 6: google.rpc.BadRequest.FieldViolation.localized_message:type_name -> google.rpc.LocalizedMessage + 7, // [7:7] is the sub-list for method output_type + 7, // [7:7] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name } func init() { file_google_rpc_error_details_proto_init() } diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md index 0854d298e41..d9bfa6e1e7c 100644 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -4,7 +4,7 @@ We definitely welcome your patches and contributions to gRPC! Please read the gR organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md) and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding. -If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) +If you are new to GitHub, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) ## Legal requirements @@ -25,8 +25,8 @@ How to get your contributions merged smoothly and quickly. is a great place to start. These issues are well-documented and usually can be resolved with a single pull request. -- If you are adding a new file, make sure it has the copyright message template - at the top as a comment. You can copy over the message from an existing file +- If you are adding a new file, make sure it has the copyright message template + at the top as a comment. You can copy over the message from an existing file and update the year. - The grpc package should only depend on standard Go packages and a small number @@ -39,12 +39,12 @@ How to get your contributions merged smoothly and quickly. proposal](https://github.com/grpc/proposal). - Provide a good **PR description** as a record of **what** change is being made - and **why** it was made. Link to a github issue if it exists. + and **why** it was made. Link to a GitHub issue if it exists. -- If you want to fix formatting or style, consider whether your changes are an - obvious improvement or might be considered a personal preference. If a style - change is based on preference, it likely will not be accepted. If it corrects - widely agreed-upon anti-patterns, then please do create a PR and explain the +- If you want to fix formatting or style, consider whether your changes are an + obvious improvement or might be considered a personal preference. If a style + change is based on preference, it likely will not be accepted. If it corrects + widely agreed-upon anti-patterns, then please do create a PR and explain the benefits of the change. - Unless your PR is trivial, you should expect there will be reviewer comments diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md index 6a8a07781ae..5d4096d46a0 100644 --- a/vendor/google.golang.org/grpc/MAINTAINERS.md +++ b/vendor/google.golang.org/grpc/MAINTAINERS.md @@ -9,21 +9,28 @@ for general contribution guidelines. ## Maintainers (in alphabetical order) +- [aranjans](https://github.com/aranjans), Google LLC +- [arjan-bal](https://github.com/arjan-bal), Google LLC +- [arvindbr8](https://github.com/arvindbr8), Google LLC - [atollena](https://github.com/atollena), Datadog, Inc. -- [cesarghali](https://github.com/cesarghali), Google LLC - [dfawley](https://github.com/dfawley), Google LLC - [easwars](https://github.com/easwars), Google LLC -- [menghanl](https://github.com/menghanl), Google LLC -- [srini100](https://github.com/srini100), Google LLC +- [erm-g](https://github.com/erm-g), Google LLC +- [gtcooke94](https://github.com/gtcooke94), Google LLC +- [purnesh42h](https://github.com/purnesh42h), Google LLC +- [zasweq](https://github.com/zasweq), Google LLC ## Emeritus Maintainers (in alphabetical order) -- [adelez](https://github.com/adelez), Google LLC -- [canguler](https://github.com/canguler), Google LLC -- [iamqizhao](https://github.com/iamqizhao), Google LLC -- [jadekler](https://github.com/jadekler), Google LLC -- [jtattermusch](https://github.com/jtattermusch), Google LLC -- [lyuxuan](https://github.com/lyuxuan), Google LLC -- [makmukhi](https://github.com/makmukhi), Google LLC -- [matt-kwong](https://github.com/matt-kwong), Google LLC -- [nicolasnoble](https://github.com/nicolasnoble), Google LLC -- [yongni](https://github.com/yongni), Google LLC +- [adelez](https://github.com/adelez) +- [canguler](https://github.com/canguler) +- [cesarghali](https://github.com/cesarghali) +- [iamqizhao](https://github.com/iamqizhao) +- [jeanbza](https://github.com/jeanbza) +- [jtattermusch](https://github.com/jtattermusch) +- [lyuxuan](https://github.com/lyuxuan) +- [makmukhi](https://github.com/makmukhi) +- [matt-kwong](https://github.com/matt-kwong) +- [menghanl](https://github.com/menghanl) +- [nicolasnoble](https://github.com/nicolasnoble) +- [srini100](https://github.com/srini100) +- [yongni](https://github.com/yongni) diff --git a/vendor/google.golang.org/grpc/SECURITY.md b/vendor/google.golang.org/grpc/SECURITY.md index be6e108705c..abab279379b 100644 --- a/vendor/google.golang.org/grpc/SECURITY.md +++ b/vendor/google.golang.org/grpc/SECURITY.md @@ -1,3 +1,3 @@ # Security Policy -For information on gRPC Security Policy and reporting potentional security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md). +For information on gRPC Security Policy and reporting potential security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md). diff --git a/vendor/google.golang.org/grpc/backoff/backoff.go b/vendor/google.golang.org/grpc/backoff/backoff.go index 0787d0b50ce..d7b40b7cb66 100644 --- a/vendor/google.golang.org/grpc/backoff/backoff.go +++ b/vendor/google.golang.org/grpc/backoff/backoff.go @@ -39,7 +39,7 @@ type Config struct { MaxDelay time.Duration } -// DefaultConfig is a backoff configuration with the default values specfied +// DefaultConfig is a backoff configuration with the default values specified // at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. // // This should be useful for callers who want to configure backoff with diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index f391744f729..382ad694110 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -30,6 +30,7 @@ import ( "google.golang.org/grpc/channelz" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" + estats "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" "google.golang.org/grpc/metadata" @@ -74,6 +75,8 @@ func unregisterForTesting(name string) { func init() { internal.BalancerUnregister = unregisterForTesting + internal.ConnectedAddress = connectedAddress + internal.SetConnectedAddress = setConnectedAddress } // Get returns the resolver builder registered with the given name. @@ -92,54 +95,6 @@ func Get(name string) Builder { return nil } -// A SubConn represents a single connection to a gRPC backend service. -// -// Each SubConn contains a list of addresses. -// -// All SubConns start in IDLE, and will not try to connect. To trigger the -// connecting, Balancers must call Connect. If a connection re-enters IDLE, -// Balancers must call Connect again to trigger a new connection attempt. -// -// gRPC will try to connect to the addresses in sequence, and stop trying the -// remainder once the first connection is successful. If an attempt to connect -// to all addresses encounters an error, the SubConn will enter -// TRANSIENT_FAILURE for a backoff period, and then transition to IDLE. -// -// Once established, if a connection is lost, the SubConn will transition -// directly to IDLE. -// -// This interface is to be implemented by gRPC. Users should not need their own -// implementation of this interface. For situations like testing, any -// implementations should embed this interface. This allows gRPC to add new -// methods to this interface. -type SubConn interface { - // UpdateAddresses updates the addresses used in this SubConn. - // gRPC checks if currently-connected address is still in the new list. - // If it's in the list, the connection will be kept. - // If it's not in the list, the connection will gracefully closed, and - // a new connection will be created. - // - // This will trigger a state transition for the SubConn. - // - // Deprecated: this method will be removed. Create new SubConns for new - // addresses instead. - UpdateAddresses([]resolver.Address) - // Connect starts the connecting for this SubConn. - Connect() - // GetOrBuildProducer returns a reference to the existing Producer for this - // ProducerBuilder in this SubConn, or, if one does not currently exist, - // creates a new one and returns it. Returns a close function which must - // be called when the Producer is no longer needed. - GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) - // Shutdown shuts down the SubConn gracefully. Any started RPCs will be - // allowed to complete. No future calls should be made on the SubConn. - // One final state update will be delivered to the StateListener (or - // UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to - // indicate the shutdown operation. This may be delivered before - // in-progress RPCs are complete and the actual connection is closed. - Shutdown() -} - // NewSubConnOptions contains options to create new SubConn. type NewSubConnOptions struct { // CredsBundle is the credentials bundle that will be used in the created @@ -243,6 +198,10 @@ type BuildOptions struct { // same resolver.Target as passed to the resolver. See the documentation for // the resolver.Target type for details about what it contains. Target resolver.Target + // MetricsRecorder is the metrics recorder that balancers can use to record + // metrics. Balancer implementations which do not register metrics on + // metrics registry and record on them can ignore this field. + MetricsRecorder estats.MetricsRecorder } // Builder creates a balancer. @@ -403,15 +362,6 @@ type ExitIdler interface { ExitIdle() } -// SubConnState describes the state of a SubConn. -type SubConnState struct { - // ConnectivityState is the connectivity state of the SubConn. - ConnectivityState connectivity.State - // ConnectionError is set if the ConnectivityState is TransientFailure, - // describing the reason the SubConn failed. Otherwise, it is nil. - ConnectionError error -} - // ClientConnState describes the state of a ClientConn relevant to the // balancer. type ClientConnState struct { @@ -424,20 +374,3 @@ type ClientConnState struct { // ErrBadResolverState may be returned by UpdateClientConnState to indicate a // problem with the provided name resolver data. var ErrBadResolverState = errors.New("bad resolver state") - -// A ProducerBuilder is a simple constructor for a Producer. It is used by the -// SubConn to create producers when needed. -type ProducerBuilder interface { - // Build creates a Producer. The first parameter is always a - // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the - // associated SubConn), but is declared as `any` to avoid a dependency - // cycle. Should also return a close function that will be called when all - // references to the Producer have been given up. - Build(grpcClientConnInterface any) (p Producer, close func()) -} - -// A Producer is a type shared among potentially many consumers. It is -// associated with a SubConn, and an implementation will typically contain -// other methods to provide additional functionality, e.g. configuration or -// subscription registration. -type Producer any diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index a7f1eeec8e6..d5ed172ae69 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -36,7 +36,7 @@ type baseBuilder struct { config Config } -func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { +func (bb *baseBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { bal := &baseBalancer{ cc: cc, pickerBuilder: bb.pickerBuilder, @@ -133,7 +133,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { } } // If resolver state contains no addresses, return an error so ClientConn - // will trigger re-resolve. Also records this as an resolver error, so when + // will trigger re-resolve. Also records this as a resolver error, so when // the overall state turns transient failure, the error message will have // the zero address information. if len(s.ResolverState.Addresses) == 0 { @@ -259,6 +259,6 @@ type errPicker struct { err error // Pick() always returns this err. } -func (p *errPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { +func (p *errPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { return balancer.PickResult{}, p.err } diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go index 0adc98866c0..3f274482c74 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go @@ -19,8 +19,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 -// protoc v4.25.2 +// protoc-gen-go v1.35.1 +// protoc v5.27.1 // source: grpc/lb/v1/load_balancer.proto package grpc_lb_v1 @@ -55,11 +55,9 @@ type LoadBalanceRequest struct { func (x *LoadBalanceRequest) Reset() { *x = LoadBalanceRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *LoadBalanceRequest) String() string { @@ -70,7 +68,7 @@ func (*LoadBalanceRequest) ProtoMessage() {} func (x *LoadBalanceRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -139,11 +137,9 @@ type InitialLoadBalanceRequest struct { func (x *InitialLoadBalanceRequest) Reset() { *x = InitialLoadBalanceRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *InitialLoadBalanceRequest) String() string { @@ -154,7 +150,7 @@ func (*InitialLoadBalanceRequest) ProtoMessage() {} func (x *InitialLoadBalanceRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -190,11 +186,9 @@ type ClientStatsPerToken struct { func (x *ClientStatsPerToken) Reset() { *x = ClientStatsPerToken{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ClientStatsPerToken) String() string { @@ -205,7 +199,7 @@ func (*ClientStatsPerToken) ProtoMessage() {} func (x *ClientStatsPerToken) ProtoReflect() protoreflect.Message { mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -258,11 +252,9 @@ type ClientStats struct { func (x *ClientStats) Reset() { *x = ClientStats{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ClientStats) String() string { @@ -273,7 +265,7 @@ func (*ClientStats) ProtoMessage() {} func (x *ClientStats) ProtoReflect() protoreflect.Message { mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -345,11 +337,9 @@ type LoadBalanceResponse struct { func (x *LoadBalanceResponse) Reset() { *x = LoadBalanceResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *LoadBalanceResponse) String() string { @@ -360,7 +350,7 @@ func (*LoadBalanceResponse) ProtoMessage() {} func (x *LoadBalanceResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -438,11 +428,9 @@ type FallbackResponse struct { func (x *FallbackResponse) Reset() { *x = FallbackResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FallbackResponse) String() string { @@ -453,7 +441,7 @@ func (*FallbackResponse) ProtoMessage() {} func (x *FallbackResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -481,11 +469,9 @@ type InitialLoadBalanceResponse struct { func (x *InitialLoadBalanceResponse) Reset() { *x = InitialLoadBalanceResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *InitialLoadBalanceResponse) String() string { @@ -496,7 +482,7 @@ func (*InitialLoadBalanceResponse) ProtoMessage() {} func (x *InitialLoadBalanceResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -532,11 +518,9 @@ type ServerList struct { func (x *ServerList) Reset() { *x = ServerList{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServerList) String() string { @@ -547,7 +531,7 @@ func (*ServerList) ProtoMessage() {} func (x *ServerList) ProtoReflect() protoreflect.Message { mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -597,11 +581,9 @@ type Server struct { func (x *Server) Reset() { *x = Server{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Server) String() string { @@ -612,7 +594,7 @@ func (*Server) ProtoMessage() {} func (x *Server) ProtoReflect() protoreflect.Message { mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -780,7 +762,7 @@ func file_grpc_lb_v1_load_balancer_proto_rawDescGZIP() []byte { } var file_grpc_lb_v1_load_balancer_proto_msgTypes = make([]protoimpl.MessageInfo, 9) -var file_grpc_lb_v1_load_balancer_proto_goTypes = []interface{}{ +var file_grpc_lb_v1_load_balancer_proto_goTypes = []any{ (*LoadBalanceRequest)(nil), // 0: grpc.lb.v1.LoadBalanceRequest (*InitialLoadBalanceRequest)(nil), // 1: grpc.lb.v1.InitialLoadBalanceRequest (*ClientStatsPerToken)(nil), // 2: grpc.lb.v1.ClientStatsPerToken @@ -817,121 +799,11 @@ func file_grpc_lb_v1_load_balancer_proto_init() { if File_grpc_lb_v1_load_balancer_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_grpc_lb_v1_load_balancer_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LoadBalanceRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lb_v1_load_balancer_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*InitialLoadBalanceRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lb_v1_load_balancer_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ClientStatsPerToken); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lb_v1_load_balancer_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ClientStats); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lb_v1_load_balancer_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LoadBalanceResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lb_v1_load_balancer_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FallbackResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lb_v1_load_balancer_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*InitialLoadBalanceResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lb_v1_load_balancer_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServerList); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lb_v1_load_balancer_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Server); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_grpc_lb_v1_load_balancer_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_grpc_lb_v1_load_balancer_proto_msgTypes[0].OneofWrappers = []any{ (*LoadBalanceRequest_InitialRequest)(nil), (*LoadBalanceRequest_ClientStats)(nil), } - file_grpc_lb_v1_load_balancer_proto_msgTypes[4].OneofWrappers = []interface{}{ + file_grpc_lb_v1_load_balancer_proto_msgTypes[4].OneofWrappers = []any{ (*LoadBalanceResponse_InitialResponse)(nil), (*LoadBalanceResponse_ServerList)(nil), (*LoadBalanceResponse_FallbackResponse)(nil), diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go index 57a792a7b48..84e6a25056b 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go @@ -19,8 +19,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.4.0 -// - protoc v4.25.2 +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.27.1 // source: grpc/lb/v1/load_balancer.proto package grpc_lb_v1 @@ -34,8 +34,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.62.0 or later. -const _ = grpc.SupportPackageIsVersion8 +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 const ( LoadBalancer_BalanceLoad_FullMethodName = "/grpc.lb.v1.LoadBalancer/BalanceLoad" @@ -46,7 +46,7 @@ const ( // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type LoadBalancerClient interface { // Bidirectional rpc to get a list of servers. - BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) + BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[LoadBalanceRequest, LoadBalanceResponse], error) } type loadBalancerClient struct { @@ -57,53 +57,38 @@ func NewLoadBalancerClient(cc grpc.ClientConnInterface) LoadBalancerClient { return &loadBalancerClient{cc} } -func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) { +func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[LoadBalanceRequest, LoadBalanceResponse], error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) stream, err := c.cc.NewStream(ctx, &LoadBalancer_ServiceDesc.Streams[0], LoadBalancer_BalanceLoad_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &loadBalancerBalanceLoadClient{ClientStream: stream} + x := &grpc.GenericClientStream[LoadBalanceRequest, LoadBalanceResponse]{ClientStream: stream} return x, nil } -type LoadBalancer_BalanceLoadClient interface { - Send(*LoadBalanceRequest) error - Recv() (*LoadBalanceResponse, error) - grpc.ClientStream -} - -type loadBalancerBalanceLoadClient struct { - grpc.ClientStream -} - -func (x *loadBalancerBalanceLoadClient) Send(m *LoadBalanceRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *loadBalancerBalanceLoadClient) Recv() (*LoadBalanceResponse, error) { - m := new(LoadBalanceResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type LoadBalancer_BalanceLoadClient = grpc.BidiStreamingClient[LoadBalanceRequest, LoadBalanceResponse] // LoadBalancerServer is the server API for LoadBalancer service. // All implementations should embed UnimplementedLoadBalancerServer -// for forward compatibility +// for forward compatibility. type LoadBalancerServer interface { // Bidirectional rpc to get a list of servers. - BalanceLoad(LoadBalancer_BalanceLoadServer) error + BalanceLoad(grpc.BidiStreamingServer[LoadBalanceRequest, LoadBalanceResponse]) error } -// UnimplementedLoadBalancerServer should be embedded to have forward compatible implementations. -type UnimplementedLoadBalancerServer struct { -} +// UnimplementedLoadBalancerServer should be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedLoadBalancerServer struct{} -func (UnimplementedLoadBalancerServer) BalanceLoad(LoadBalancer_BalanceLoadServer) error { +func (UnimplementedLoadBalancerServer) BalanceLoad(grpc.BidiStreamingServer[LoadBalanceRequest, LoadBalanceResponse]) error { return status.Errorf(codes.Unimplemented, "method BalanceLoad not implemented") } +func (UnimplementedLoadBalancerServer) testEmbeddedByValue() {} // UnsafeLoadBalancerServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to LoadBalancerServer will @@ -113,34 +98,22 @@ type UnsafeLoadBalancerServer interface { } func RegisterLoadBalancerServer(s grpc.ServiceRegistrar, srv LoadBalancerServer) { + // If the following call panics, it indicates UnimplementedLoadBalancerServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&LoadBalancer_ServiceDesc, srv) } func _LoadBalancer_BalanceLoad_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(LoadBalancerServer).BalanceLoad(&loadBalancerBalanceLoadServer{ServerStream: stream}) -} - -type LoadBalancer_BalanceLoadServer interface { - Send(*LoadBalanceResponse) error - Recv() (*LoadBalanceRequest, error) - grpc.ServerStream -} - -type loadBalancerBalanceLoadServer struct { - grpc.ServerStream + return srv.(LoadBalancerServer).BalanceLoad(&grpc.GenericServerStream[LoadBalanceRequest, LoadBalanceResponse]{ServerStream: stream}) } -func (x *loadBalancerBalanceLoadServer) Send(m *LoadBalanceResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *loadBalancerBalanceLoadServer) Recv() (*LoadBalanceRequest, error) { - m := new(LoadBalanceRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type LoadBalancer_BalanceLoadServer = grpc.BidiStreamingServer[LoadBalanceRequest, LoadBalanceResponse] // LoadBalancer_ServiceDesc is the grpc.ServiceDesc for LoadBalancer service. // It's only intended for direct use with grpc.RegisterService, diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go index 47a3e938dcf..0770b88e96d 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go @@ -197,7 +197,7 @@ type lbBalancer struct { // manualResolver is used in the remote LB ClientConn inside grpclb. When // resolved address updates are received by grpclb, filtered updates will be - // send to remote LB ClientConn through this resolver. + // sent to remote LB ClientConn through this resolver. manualResolver *manual.Resolver // The ClientConn to talk to the remote balancer. ccRemoteLB *remoteBalancerCCWrapper @@ -219,7 +219,7 @@ type lbBalancer struct { // All backends addresses, with metadata set to nil. This list contains all // backend addresses in the same order and with the same duplicates as in // serverlist. When generating picker, a SubConn slice with the same order - // but with only READY SCs will be gerenated. + // but with only READY SCs will be generated. backendAddrsWithoutMetadata []resolver.Address // Roundrobin functionalities. state connectivity.State diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go index 671bc663fcb..9ff07522d78 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go @@ -19,7 +19,7 @@ package grpclb import ( - "math/rand" + rand "math/rand/v2" "sync" "sync/atomic" @@ -112,7 +112,7 @@ type rrPicker struct { func newRRPicker(readySCs []balancer.SubConn) *rrPicker { return &rrPicker{ subConns: readySCs, - subConnsNext: rand.Intn(len(readySCs)), + subConnsNext: rand.IntN(len(readySCs)), } } @@ -147,7 +147,7 @@ func newLBPicker(serverList []*lbpb.Server, readySCs []balancer.SubConn, stats * return &lbPicker{ serverList: serverList, subConns: readySCs, - subConnsNext: rand.Intn(len(readySCs)), + subConnsNext: rand.IntN(len(readySCs)), stats: stats, } } diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go b/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go new file mode 100644 index 00000000000..7d66cb491c4 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go @@ -0,0 +1,35 @@ +/* + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains code internal to the pickfirst package. +package internal + +import ( + rand "math/rand/v2" + "time" +) + +var ( + // RandShuffle pseudo-randomizes the order of addresses. + RandShuffle = rand.Shuffle + // TimeAfterFunc allows mocking the timer for testing connection delay + // related functionality. + TimeAfterFunc = func(d time.Duration, f func()) func() { + timer := time.AfterFunc(d, f) + return func() { timer.Stop() } + } +) diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go index 07527603f1d..ea8899818c2 100644 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go @@ -23,21 +23,26 @@ import ( "encoding/json" "errors" "fmt" - "math/rand" + rand "math/rand/v2" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/pickfirst/internal" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/envconfig" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" + + _ "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" // For automatically registering the new pickfirst if required. ) func init() { + if envconfig.NewPickFirstEnabled { + return + } balancer.Register(pickfirstBuilder{}) - internal.ShuffleAddressListForTesting = func(n int, swap func(i, j int)) { rand.Shuffle(n, swap) } } var logger = grpclog.Component("pick-first-lb") @@ -50,7 +55,7 @@ const ( type pickfirstBuilder struct{} -func (pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { +func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { b := &pickfirstBalancer{cc: cc} b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) return b @@ -103,10 +108,13 @@ func (b *pickfirstBalancer) ResolverError(err error) { }) } +// Shuffler is an interface for shuffling an address list. type Shuffler interface { ShuffleAddressListForTesting(n int, swap func(i, j int)) } +// ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n +// is the number of elements. swap swaps the elements with indexes i and j. func ShuffleAddressListForTesting(n int, swap func(i, j int)) { rand.Shuffle(n, swap) } func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { @@ -140,7 +148,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // within each endpoint. - A61 if cfg.ShuffleAddressList { endpoints = append([]resolver.Endpoint{}, endpoints...) - internal.ShuffleAddressListForTesting.(func(int, func(int, int)))(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) + internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) } // "Flatten the list by concatenating the ordered list of addresses for each @@ -155,7 +163,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // Endpoints not set, process addresses until we migrate resolver // emissions fully to Endpoints. The top channel does wrap emitted // addresses with endpoints, however some balancers such as weighted - // target do not forwarrd the corresponding correct endpoints down/split + // target do not forward the corresponding correct endpoints down/split // endpoints properly. Once all balancers correctly forward endpoints // down, can delete this else conditional. addrs = state.ResolverState.Addresses diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go new file mode 100644 index 00000000000..2fc0a71f944 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go @@ -0,0 +1,911 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package pickfirstleaf contains the pick_first load balancing policy which +// will be the universal leaf policy after dualstack changes are implemented. +// +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package pickfirstleaf + +import ( + "encoding/json" + "errors" + "fmt" + "net" + "net/netip" + "sync" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/pickfirst/internal" + "google.golang.org/grpc/connectivity" + expstats "google.golang.org/grpc/experimental/stats" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/envconfig" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +func init() { + if envconfig.NewPickFirstEnabled { + // Register as the default pick_first balancer. + Name = "pick_first" + } + balancer.Register(pickfirstBuilder{}) +} + +// enableHealthListenerKeyType is a unique key type used in resolver attributes +// to indicate whether the health listener usage is enabled. +type enableHealthListenerKeyType struct{} + +var ( + logger = grpclog.Component("pick-first-leaf-lb") + // Name is the name of the pick_first_leaf balancer. + // It is changed to "pick_first" in init() if this balancer is to be + // registered as the default pickfirst. + Name = "pick_first_leaf" + disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ + Name: "grpc.lb.pick_first.disconnections", + Description: "EXPERIMENTAL. Number of times the selected subchannel becomes disconnected.", + Unit: "disconnection", + Labels: []string{"grpc.target"}, + Default: false, + }) + connectionAttemptsSucceededMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ + Name: "grpc.lb.pick_first.connection_attempts_succeeded", + Description: "EXPERIMENTAL. Number of successful connection attempts.", + Unit: "attempt", + Labels: []string{"grpc.target"}, + Default: false, + }) + connectionAttemptsFailedMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ + Name: "grpc.lb.pick_first.connection_attempts_failed", + Description: "EXPERIMENTAL. Number of failed connection attempts.", + Unit: "attempt", + Labels: []string{"grpc.target"}, + Default: false, + }) +) + +const ( + // TODO: change to pick-first when this becomes the default pick_first policy. + logPrefix = "[pick-first-leaf-lb %p] " + // connectionDelayInterval is the time to wait for during the happy eyeballs + // pass before starting the next connection attempt. + connectionDelayInterval = 250 * time.Millisecond +) + +type ipAddrFamily int + +const ( + // ipAddrFamilyUnknown represents strings that can't be parsed as an IP + // address. + ipAddrFamilyUnknown ipAddrFamily = iota + ipAddrFamilyV4 + ipAddrFamilyV6 +) + +type pickfirstBuilder struct{} + +func (pickfirstBuilder) Build(cc balancer.ClientConn, bo balancer.BuildOptions) balancer.Balancer { + b := &pickfirstBalancer{ + cc: cc, + target: bo.Target.String(), + metricsRecorder: bo.MetricsRecorder, // ClientConn will always create a Metrics Recorder. + + subConns: resolver.NewAddressMap(), + state: connectivity.Connecting, + cancelConnectionTimer: func() {}, + } + b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) + return b +} + +func (b pickfirstBuilder) Name() string { + return Name +} + +func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + var cfg pfConfig + if err := json.Unmarshal(js, &cfg); err != nil { + return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) + } + return cfg, nil +} + +// EnableHealthListener updates the state to configure pickfirst for using a +// generic health listener. +func EnableHealthListener(state resolver.State) resolver.State { + state.Attributes = state.Attributes.WithValue(enableHealthListenerKeyType{}, true) + return state +} + +type pfConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + // If set to true, instructs the LB policy to shuffle the order of the list + // of endpoints received from the name resolver before attempting to + // connect to them. + ShuffleAddressList bool `json:"shuffleAddressList"` +} + +// scData keeps track of the current state of the subConn. +// It is not safe for concurrent access. +type scData struct { + // The following fields are initialized at build time and read-only after + // that. + subConn balancer.SubConn + addr resolver.Address + + rawConnectivityState connectivity.State + // The effective connectivity state based on raw connectivity, health state + // and after following sticky TransientFailure behaviour defined in A62. + effectiveState connectivity.State + lastErr error + connectionFailedInFirstPass bool +} + +func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) { + sd := &scData{ + rawConnectivityState: connectivity.Idle, + effectiveState: connectivity.Idle, + addr: addr, + } + sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{ + StateListener: func(state balancer.SubConnState) { + b.updateSubConnState(sd, state) + }, + }) + if err != nil { + return nil, err + } + sd.subConn = sc + return sd, nil +} + +type pickfirstBalancer struct { + // The following fields are initialized at build time and read-only after + // that and therefore do not need to be guarded by a mutex. + logger *internalgrpclog.PrefixLogger + cc balancer.ClientConn + target string + metricsRecorder expstats.MetricsRecorder // guaranteed to be non nil + + // The mutex is used to ensure synchronization of updates triggered + // from the idle picker and the already serialized resolver, + // SubConn state updates. + mu sync.Mutex + // State reported to the channel based on SubConn states and resolver + // updates. + state connectivity.State + // scData for active subonns mapped by address. + subConns *resolver.AddressMap + addressList addressList + firstPass bool + numTF int + cancelConnectionTimer func() + healthCheckingEnabled bool +} + +// ResolverError is called by the ClientConn when the name resolver produces +// an error or when pickfirst determined the resolver update to be invalid. +func (b *pickfirstBalancer) ResolverError(err error) { + b.mu.Lock() + defer b.mu.Unlock() + b.resolverErrorLocked(err) +} + +func (b *pickfirstBalancer) resolverErrorLocked(err error) { + if b.logger.V(2) { + b.logger.Infof("Received error from the name resolver: %v", err) + } + + // The picker will not change since the balancer does not currently + // report an error. If the balancer hasn't received a single good resolver + // update yet, transition to TRANSIENT_FAILURE. + if b.state != connectivity.TransientFailure && b.addressList.size() > 0 { + if b.logger.V(2) { + b.logger.Infof("Ignoring resolver error because balancer is using a previous good update.") + } + return + } + + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, + }) +} + +func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { + b.mu.Lock() + defer b.mu.Unlock() + b.cancelConnectionTimer() + if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 { + // Cleanup state pertaining to the previous resolver state. + // Treat an empty address list like an error by calling b.ResolverError. + b.closeSubConnsLocked() + b.addressList.updateAddrs(nil) + b.resolverErrorLocked(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } + b.healthCheckingEnabled = state.ResolverState.Attributes.Value(enableHealthListenerKeyType{}) != nil + cfg, ok := state.BalancerConfig.(pfConfig) + if state.BalancerConfig != nil && !ok { + return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState) + } + + if b.logger.V(2) { + b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState)) + } + + var newAddrs []resolver.Address + if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 { + // Perform the optional shuffling described in gRFC A62. The shuffling + // will change the order of endpoints but not touch the order of the + // addresses within each endpoint. - A61 + if cfg.ShuffleAddressList { + endpoints = append([]resolver.Endpoint{}, endpoints...) + internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) + } + + // "Flatten the list by concatenating the ordered list of addresses for + // each of the endpoints, in order." - A61 + for _, endpoint := range endpoints { + newAddrs = append(newAddrs, endpoint.Addresses...) + } + } else { + // Endpoints not set, process addresses until we migrate resolver + // emissions fully to Endpoints. The top channel does wrap emitted + // addresses with endpoints, however some balancers such as weighted + // target do not forward the corresponding correct endpoints down/split + // endpoints properly. Once all balancers correctly forward endpoints + // down, can delete this else conditional. + newAddrs = state.ResolverState.Addresses + if cfg.ShuffleAddressList { + newAddrs = append([]resolver.Address{}, newAddrs...) + internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) + } + } + + // If an address appears in multiple endpoints or in the same endpoint + // multiple times, we keep it only once. We will create only one SubConn + // for the address because an AddressMap is used to store SubConns. + // Not de-duplicating would result in attempting to connect to the same + // SubConn multiple times in the same pass. We don't want this. + newAddrs = deDupAddresses(newAddrs) + newAddrs = interleaveAddresses(newAddrs) + + prevAddr := b.addressList.currentAddress() + prevSCData, found := b.subConns.Get(prevAddr) + prevAddrsCount := b.addressList.size() + isPrevRawConnectivityStateReady := found && prevSCData.(*scData).rawConnectivityState == connectivity.Ready + b.addressList.updateAddrs(newAddrs) + + // If the previous ready SubConn exists in new address list, + // keep this connection and don't create new SubConns. + if isPrevRawConnectivityStateReady && b.addressList.seekTo(prevAddr) { + return nil + } + + b.reconcileSubConnsLocked(newAddrs) + // If it's the first resolver update or the balancer was already READY + // (but the new address list does not contain the ready SubConn) or + // CONNECTING, enter CONNECTING. + // We may be in TRANSIENT_FAILURE due to a previous empty address list, + // we should still enter CONNECTING because the sticky TF behaviour + // mentioned in A62 applies only when the TRANSIENT_FAILURE is reported + // due to connectivity failures. + if isPrevRawConnectivityStateReady || b.state == connectivity.Connecting || prevAddrsCount == 0 { + // Start connection attempt at first address. + b.forceUpdateConcludedStateLocked(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + b.startFirstPassLocked() + } else if b.state == connectivity.TransientFailure { + // If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until + // we're READY. See A62. + b.startFirstPassLocked() + } + return nil +} + +// UpdateSubConnState is unused as a StateListener is always registered when +// creating SubConns. +func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state) +} + +func (b *pickfirstBalancer) Close() { + b.mu.Lock() + defer b.mu.Unlock() + b.closeSubConnsLocked() + b.cancelConnectionTimer() + b.state = connectivity.Shutdown +} + +// ExitIdle moves the balancer out of idle state. It can be called concurrently +// by the idlePicker and clientConn so access to variables should be +// synchronized. +func (b *pickfirstBalancer) ExitIdle() { + b.mu.Lock() + defer b.mu.Unlock() + if b.state == connectivity.Idle { + b.startFirstPassLocked() + } +} + +func (b *pickfirstBalancer) startFirstPassLocked() { + b.firstPass = true + b.numTF = 0 + // Reset the connection attempt record for existing SubConns. + for _, sd := range b.subConns.Values() { + sd.(*scData).connectionFailedInFirstPass = false + } + b.requestConnectionLocked() +} + +func (b *pickfirstBalancer) closeSubConnsLocked() { + for _, sd := range b.subConns.Values() { + sd.(*scData).subConn.Shutdown() + } + b.subConns = resolver.NewAddressMap() +} + +// deDupAddresses ensures that each address appears only once in the slice. +func deDupAddresses(addrs []resolver.Address) []resolver.Address { + seenAddrs := resolver.NewAddressMap() + retAddrs := []resolver.Address{} + + for _, addr := range addrs { + if _, ok := seenAddrs.Get(addr); ok { + continue + } + retAddrs = append(retAddrs, addr) + } + return retAddrs +} + +// interleaveAddresses interleaves addresses of both families (IPv4 and IPv6) +// as per RFC-8305 section 4. +// Whichever address family is first in the list is followed by an address of +// the other address family; that is, if the first address in the list is IPv6, +// then the first IPv4 address should be moved up in the list to be second in +// the list. It doesn't support configuring "First Address Family Count", i.e. +// there will always be a single member of the first address family at the +// beginning of the interleaved list. +// Addresses that are neither IPv4 nor IPv6 are treated as part of a third +// "unknown" family for interleaving. +// See: https://datatracker.ietf.org/doc/html/rfc8305#autoid-6 +func interleaveAddresses(addrs []resolver.Address) []resolver.Address { + familyAddrsMap := map[ipAddrFamily][]resolver.Address{} + interleavingOrder := []ipAddrFamily{} + for _, addr := range addrs { + family := addressFamily(addr.Addr) + if _, found := familyAddrsMap[family]; !found { + interleavingOrder = append(interleavingOrder, family) + } + familyAddrsMap[family] = append(familyAddrsMap[family], addr) + } + + interleavedAddrs := make([]resolver.Address, 0, len(addrs)) + + for curFamilyIdx := 0; len(interleavedAddrs) < len(addrs); curFamilyIdx = (curFamilyIdx + 1) % len(interleavingOrder) { + // Some IP types may have fewer addresses than others, so we look for + // the next type that has a remaining member to add to the interleaved + // list. + family := interleavingOrder[curFamilyIdx] + remainingMembers := familyAddrsMap[family] + if len(remainingMembers) > 0 { + interleavedAddrs = append(interleavedAddrs, remainingMembers[0]) + familyAddrsMap[family] = remainingMembers[1:] + } + } + + return interleavedAddrs +} + +// addressFamily returns the ipAddrFamily after parsing the address string. +// If the address isn't of the format "ip-address:port", it returns +// ipAddrFamilyUnknown. The address may be valid even if it's not an IP when +// using a resolver like passthrough where the address may be a hostname in +// some format that the dialer can resolve. +func addressFamily(address string) ipAddrFamily { + // Parse the IP after removing the port. + host, _, err := net.SplitHostPort(address) + if err != nil { + return ipAddrFamilyUnknown + } + ip, err := netip.ParseAddr(host) + if err != nil { + return ipAddrFamilyUnknown + } + switch { + case ip.Is4() || ip.Is4In6(): + return ipAddrFamilyV4 + case ip.Is6(): + return ipAddrFamilyV6 + default: + return ipAddrFamilyUnknown + } +} + +// reconcileSubConnsLocked updates the active subchannels based on a new address +// list from the resolver. It does this by: +// - closing subchannels: any existing subchannels associated with addresses +// that are no longer in the updated list are shut down. +// - removing subchannels: entries for these closed subchannels are removed +// from the subchannel map. +// +// This ensures that the subchannel map accurately reflects the current set of +// addresses received from the name resolver. +func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) { + newAddrsMap := resolver.NewAddressMap() + for _, addr := range newAddrs { + newAddrsMap.Set(addr, true) + } + + for _, oldAddr := range b.subConns.Keys() { + if _, ok := newAddrsMap.Get(oldAddr); ok { + continue + } + val, _ := b.subConns.Get(oldAddr) + val.(*scData).subConn.Shutdown() + b.subConns.Delete(oldAddr) + } +} + +// shutdownRemainingLocked shuts down remaining subConns. Called when a subConn +// becomes ready, which means that all other subConn must be shutdown. +func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) { + b.cancelConnectionTimer() + for _, v := range b.subConns.Values() { + sd := v.(*scData) + if sd.subConn != selected.subConn { + sd.subConn.Shutdown() + } + } + b.subConns = resolver.NewAddressMap() + b.subConns.Set(selected.addr, selected) +} + +// requestConnectionLocked starts connecting on the subchannel corresponding to +// the current address. If no subchannel exists, one is created. If the current +// subchannel is in TransientFailure, a connection to the next address is +// attempted until a subchannel is found. +func (b *pickfirstBalancer) requestConnectionLocked() { + if !b.addressList.isValid() { + return + } + var lastErr error + for valid := true; valid; valid = b.addressList.increment() { + curAddr := b.addressList.currentAddress() + sd, ok := b.subConns.Get(curAddr) + if !ok { + var err error + // We want to assign the new scData to sd from the outer scope, + // hence we can't use := below. + sd, err = b.newSCData(curAddr) + if err != nil { + // This should never happen, unless the clientConn is being shut + // down. + if b.logger.V(2) { + b.logger.Infof("Failed to create a subConn for address %v: %v", curAddr.String(), err) + } + // Do nothing, the LB policy will be closed soon. + return + } + b.subConns.Set(curAddr, sd) + } + + scd := sd.(*scData) + switch scd.rawConnectivityState { + case connectivity.Idle: + scd.subConn.Connect() + b.scheduleNextConnectionLocked() + return + case connectivity.TransientFailure: + // The SubConn is being re-used and failed during a previous pass + // over the addressList. It has not completed backoff yet. + // Mark it as having failed and try the next address. + scd.connectionFailedInFirstPass = true + lastErr = scd.lastErr + continue + case connectivity.Connecting: + // Wait for the connection attempt to complete or the timer to fire + // before attempting the next address. + b.scheduleNextConnectionLocked() + return + default: + b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", scd.rawConnectivityState) + return + + } + } + + // All the remaining addresses in the list are in TRANSIENT_FAILURE, end the + // first pass if possible. + b.endFirstPassIfPossibleLocked(lastErr) +} + +func (b *pickfirstBalancer) scheduleNextConnectionLocked() { + b.cancelConnectionTimer() + if !b.addressList.hasNext() { + return + } + curAddr := b.addressList.currentAddress() + cancelled := false // Access to this is protected by the balancer's mutex. + closeFn := internal.TimeAfterFunc(connectionDelayInterval, func() { + b.mu.Lock() + defer b.mu.Unlock() + // If the scheduled task is cancelled while acquiring the mutex, return. + if cancelled { + return + } + if b.logger.V(2) { + b.logger.Infof("Happy Eyeballs timer expired while waiting for connection to %q.", curAddr.Addr) + } + if b.addressList.increment() { + b.requestConnectionLocked() + } + }) + // Access to the cancellation callback held by the balancer is guarded by + // the balancer's mutex, so it's safe to set the boolean from the callback. + b.cancelConnectionTimer = sync.OnceFunc(func() { + cancelled = true + closeFn() + }) +} + +func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) { + b.mu.Lock() + defer b.mu.Unlock() + oldState := sd.rawConnectivityState + sd.rawConnectivityState = newState.ConnectivityState + // Previously relevant SubConns can still callback with state updates. + // To prevent pickers from returning these obsolete SubConns, this logic + // is included to check if the current list of active SubConns includes this + // SubConn. + if !b.isActiveSCData(sd) { + return + } + if newState.ConnectivityState == connectivity.Shutdown { + sd.effectiveState = connectivity.Shutdown + return + } + + // Record a connection attempt when exiting CONNECTING. + if newState.ConnectivityState == connectivity.TransientFailure { + sd.connectionFailedInFirstPass = true + connectionAttemptsFailedMetric.Record(b.metricsRecorder, 1, b.target) + } + + if newState.ConnectivityState == connectivity.Ready { + connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target) + b.shutdownRemainingLocked(sd) + if !b.addressList.seekTo(sd.addr) { + // This should not fail as we should have only one SubConn after + // entering READY. The SubConn should be present in the addressList. + b.logger.Errorf("Address %q not found address list in %v", sd.addr, b.addressList.addresses) + return + } + if !b.healthCheckingEnabled { + if b.logger.V(2) { + b.logger.Infof("SubConn %p reported connectivity state READY and the health listener is disabled. Transitioning SubConn to READY.", sd.subConn) + } + + sd.effectiveState = connectivity.Ready + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, + }) + return + } + if b.logger.V(2) { + b.logger.Infof("SubConn %p reported connectivity state READY. Registering health listener.", sd.subConn) + } + // Send a CONNECTING update to take the SubConn out of sticky-TF if + // required. + sd.effectiveState = connectivity.Connecting + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + sd.subConn.RegisterHealthListener(func(scs balancer.SubConnState) { + b.updateSubConnHealthState(sd, scs) + }) + return + } + + // If the LB policy is READY, and it receives a subchannel state change, + // it means that the READY subchannel has failed. + // A SubConn can also transition from CONNECTING directly to IDLE when + // a transport is successfully created, but the connection fails + // before the SubConn can send the notification for READY. We treat + // this as a successful connection and transition to IDLE. + // TODO: https://github.com/grpc/grpc-go/issues/7862 - Remove the second + // part of the if condition below once the issue is fixed. + if oldState == connectivity.Ready || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) { + // Once a transport fails, the balancer enters IDLE and starts from + // the first address when the picker is used. + b.shutdownRemainingLocked(sd) + sd.effectiveState = newState.ConnectivityState + // READY SubConn interspliced in between CONNECTING and IDLE, need to + // account for that. + if oldState == connectivity.Connecting { + // A known issue (https://github.com/grpc/grpc-go/issues/7862) + // causes a race that prevents the READY state change notification. + // This works around it. + connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target) + } + disconnectionsMetric.Record(b.metricsRecorder, 1, b.target) + b.addressList.reset() + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Idle, + Picker: &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)}, + }) + return + } + + if b.firstPass { + switch newState.ConnectivityState { + case connectivity.Connecting: + // The effective state can be in either IDLE, CONNECTING or + // TRANSIENT_FAILURE. If it's TRANSIENT_FAILURE, stay in + // TRANSIENT_FAILURE until it's READY. See A62. + if sd.effectiveState != connectivity.TransientFailure { + sd.effectiveState = connectivity.Connecting + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + } + case connectivity.TransientFailure: + sd.lastErr = newState.ConnectionError + sd.effectiveState = connectivity.TransientFailure + // Since we're re-using common SubConns while handling resolver + // updates, we could receive an out of turn TRANSIENT_FAILURE from + // a pass over the previous address list. Happy Eyeballs will also + // cause out of order updates to arrive. + + if curAddr := b.addressList.currentAddress(); equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) { + b.cancelConnectionTimer() + if b.addressList.increment() { + b.requestConnectionLocked() + return + } + } + + // End the first pass if we've seen a TRANSIENT_FAILURE from all + // SubConns once. + b.endFirstPassIfPossibleLocked(newState.ConnectionError) + } + return + } + + // We have finished the first pass, keep re-connecting failing SubConns. + switch newState.ConnectivityState { + case connectivity.TransientFailure: + b.numTF = (b.numTF + 1) % b.subConns.Len() + sd.lastErr = newState.ConnectionError + if b.numTF%b.subConns.Len() == 0 { + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: newState.ConnectionError}, + }) + } + // We don't need to request re-resolution since the SubConn already + // does that before reporting TRANSIENT_FAILURE. + // TODO: #7534 - Move re-resolution requests from SubConn into + // pick_first. + case connectivity.Idle: + sd.subConn.Connect() + } +} + +// endFirstPassIfPossibleLocked ends the first happy-eyeballs pass if all the +// addresses are tried and their SubConns have reported a failure. +func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) { + // An optimization to avoid iterating over the entire SubConn map. + if b.addressList.isValid() { + return + } + // Connect() has been called on all the SubConns. The first pass can be + // ended if all the SubConns have reported a failure. + for _, v := range b.subConns.Values() { + sd := v.(*scData) + if !sd.connectionFailedInFirstPass { + return + } + } + b.firstPass = false + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: lastErr}, + }) + // Start re-connecting all the SubConns that are already in IDLE. + for _, v := range b.subConns.Values() { + sd := v.(*scData) + if sd.rawConnectivityState == connectivity.Idle { + sd.subConn.Connect() + } + } +} + +func (b *pickfirstBalancer) isActiveSCData(sd *scData) bool { + activeSD, found := b.subConns.Get(sd.addr) + return found && activeSD == sd +} + +func (b *pickfirstBalancer) updateSubConnHealthState(sd *scData, state balancer.SubConnState) { + b.mu.Lock() + defer b.mu.Unlock() + // Previously relevant SubConns can still callback with state updates. + // To prevent pickers from returning these obsolete SubConns, this logic + // is included to check if the current list of active SubConns includes + // this SubConn. + if !b.isActiveSCData(sd) { + return + } + sd.effectiveState = state.ConnectivityState + switch state.ConnectivityState { + case connectivity.Ready: + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, + }) + case connectivity.TransientFailure: + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("pickfirst: health check failure: %v", state.ConnectionError)}, + }) + case connectivity.Connecting: + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + default: + b.logger.Errorf("Got unexpected health update for SubConn %p: %v", state) + } +} + +// updateBalancerState stores the state reported to the channel and calls +// ClientConn.UpdateState(). As an optimization, it avoids sending duplicate +// updates to the channel. +func (b *pickfirstBalancer) updateBalancerState(newState balancer.State) { + // In case of TransientFailures allow the picker to be updated to update + // the connectivity error, in all other cases don't send duplicate state + // updates. + if newState.ConnectivityState == b.state && b.state != connectivity.TransientFailure { + return + } + b.forceUpdateConcludedStateLocked(newState) +} + +// forceUpdateConcludedStateLocked stores the state reported to the channel and +// calls ClientConn.UpdateState(). +// A separate function is defined to force update the ClientConn state since the +// channel doesn't correctly assume that LB policies start in CONNECTING and +// relies on LB policy to send an initial CONNECTING update. +func (b *pickfirstBalancer) forceUpdateConcludedStateLocked(newState balancer.State) { + b.state = newState.ConnectivityState + b.cc.UpdateState(newState) +} + +type picker struct { + result balancer.PickResult + err error +} + +func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + return p.result, p.err +} + +// idlePicker is used when the SubConn is IDLE and kicks the SubConn into +// CONNECTING when Pick is called. +type idlePicker struct { + exitIdle func() +} + +func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + i.exitIdle() + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable +} + +// addressList manages sequentially iterating over addresses present in a list +// of endpoints. It provides a 1 dimensional view of the addresses present in +// the endpoints. +// This type is not safe for concurrent access. +type addressList struct { + addresses []resolver.Address + idx int +} + +func (al *addressList) isValid() bool { + return al.idx < len(al.addresses) +} + +func (al *addressList) size() int { + return len(al.addresses) +} + +// increment moves to the next index in the address list. +// This method returns false if it went off the list, true otherwise. +func (al *addressList) increment() bool { + if !al.isValid() { + return false + } + al.idx++ + return al.idx < len(al.addresses) +} + +// currentAddress returns the current address pointed to in the addressList. +// If the list is in an invalid state, it returns an empty address instead. +func (al *addressList) currentAddress() resolver.Address { + if !al.isValid() { + return resolver.Address{} + } + return al.addresses[al.idx] +} + +func (al *addressList) reset() { + al.idx = 0 +} + +func (al *addressList) updateAddrs(addrs []resolver.Address) { + al.addresses = addrs + al.reset() +} + +// seekTo returns false if the needle was not found and the current index was +// left unchanged. +func (al *addressList) seekTo(needle resolver.Address) bool { + for ai, addr := range al.addresses { + if !equalAddressIgnoringBalAttributes(&addr, &needle) { + continue + } + al.idx = ai + return true + } + return false +} + +// hasNext returns whether incrementing the addressList will result in moving +// past the end of the list. If the list has already moved past the end, it +// returns false. +func (al *addressList) hasNext() bool { + if !al.isValid() { + return false + } + return al.idx+1 < len(al.addresses) +} + +// equalAddressIgnoringBalAttributes returns true is a and b are considered +// equal. This is different from the Equal method on the resolver.Address type +// which considers all fields to determine equality. Here, we only consider +// fields that are meaningful to the SubConn. +func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool { + return a.Addr == b.Addr && a.ServerName == b.ServerName && + a.Attributes.Equal(b.Attributes) && + a.Metadata == b.Metadata +} diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go index 260255d31b6..80a42d22510 100644 --- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -22,7 +22,7 @@ package roundrobin import ( - "math/rand" + rand "math/rand/v2" "sync/atomic" "google.golang.org/grpc/balancer" @@ -60,7 +60,7 @@ func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { // Start at a random index, as the same RR balancer rebuilds a new // picker when SubConn states change, and we don't want to apply excess // load to the first server in the list. - next: uint32(rand.Intn(len(scs))), + next: uint32(rand.IntN(len(scs))), } } diff --git a/vendor/google.golang.org/grpc/balancer/subconn.go b/vendor/google.golang.org/grpc/balancer/subconn.go new file mode 100644 index 00000000000..ea27c4fa767 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/subconn.go @@ -0,0 +1,134 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package balancer + +import ( + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/resolver" +) + +// A SubConn represents a single connection to a gRPC backend service. +// +// All SubConns start in IDLE, and will not try to connect. To trigger a +// connection attempt, Balancers must call Connect. +// +// If the connection attempt fails, the SubConn will transition to +// TRANSIENT_FAILURE for a backoff period, and then return to IDLE. If the +// connection attempt succeeds, it will transition to READY. +// +// If a READY SubConn becomes disconnected, the SubConn will transition to IDLE. +// +// If a connection re-enters IDLE, Balancers must call Connect again to trigger +// a new connection attempt. +// +// Each SubConn contains a list of addresses. gRPC will try to connect to the +// addresses in sequence, and stop trying the remainder once the first +// connection is successful. However, this behavior is deprecated. SubConns +// should only use a single address. +// +// NOTICE: This interface is intended to be implemented by gRPC, or intercepted +// by custom load balancing poilices. Users should not need their own complete +// implementation of this interface -- they should always delegate to a SubConn +// returned by ClientConn.NewSubConn() by embedding it in their implementations. +// An embedded SubConn must never be nil, or runtime panics will occur. +type SubConn interface { + // UpdateAddresses updates the addresses used in this SubConn. + // gRPC checks if currently-connected address is still in the new list. + // If it's in the list, the connection will be kept. + // If it's not in the list, the connection will gracefully close, and + // a new connection will be created. + // + // This will trigger a state transition for the SubConn. + // + // Deprecated: this method will be removed. Create new SubConns for new + // addresses instead. + UpdateAddresses([]resolver.Address) + // Connect starts the connecting for this SubConn. + Connect() + // GetOrBuildProducer returns a reference to the existing Producer for this + // ProducerBuilder in this SubConn, or, if one does not currently exist, + // creates a new one and returns it. Returns a close function which may be + // called when the Producer is no longer needed. Otherwise the producer + // will automatically be closed upon connection loss or subchannel close. + // Should only be called on a SubConn in state Ready. Otherwise the + // producer will be unable to create streams. + GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) + // Shutdown shuts down the SubConn gracefully. Any started RPCs will be + // allowed to complete. No future calls should be made on the SubConn. + // One final state update will be delivered to the StateListener (or + // UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to + // indicate the shutdown operation. This may be delivered before + // in-progress RPCs are complete and the actual connection is closed. + Shutdown() + // RegisterHealthListener registers a health listener that receives health + // updates for a Ready SubConn. Only one health listener can be registered + // at a time. A health listener should be registered each time the SubConn's + // connectivity state changes to READY. Registering a health listener when + // the connectivity state is not READY may result in undefined behaviour. + // This method must not be called synchronously while handling an update + // from a previously registered health listener. + RegisterHealthListener(func(SubConnState)) + // EnforceSubConnEmbedding is included to force implementers to embed + // another implementation of this interface, allowing gRPC to add methods + // without breaking users. + internal.EnforceSubConnEmbedding +} + +// A ProducerBuilder is a simple constructor for a Producer. It is used by the +// SubConn to create producers when needed. +type ProducerBuilder interface { + // Build creates a Producer. The first parameter is always a + // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the + // associated SubConn), but is declared as `any` to avoid a dependency + // cycle. Build also returns a close function that will be called when all + // references to the Producer have been given up for a SubConn, or when a + // connectivity state change occurs on the SubConn. The close function + // should always block until all asynchronous cleanup work is completed. + Build(grpcClientConnInterface any) (p Producer, close func()) +} + +// SubConnState describes the state of a SubConn. +type SubConnState struct { + // ConnectivityState is the connectivity state of the SubConn. + ConnectivityState connectivity.State + // ConnectionError is set if the ConnectivityState is TransientFailure, + // describing the reason the SubConn failed. Otherwise, it is nil. + ConnectionError error + // connectedAddr contains the connected address when ConnectivityState is + // Ready. Otherwise, it is indeterminate. + connectedAddress resolver.Address +} + +// connectedAddress returns the connected address for a SubConnState. The +// address is only valid if the state is READY. +func connectedAddress(scs SubConnState) resolver.Address { + return scs.connectedAddress +} + +// setConnectedAddress sets the connected address for a SubConnState. +func setConnectedAddress(scs *SubConnState, addr resolver.Address) { + scs.connectedAddress = addr +} + +// A Producer is a type shared among potentially many consumers. It is +// associated with a SubConn, and an implementation will typically contain +// other methods to provide additional functionality, e.g. configuration or +// subscription registration. +type Producer any diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go index 4161fdf47a8..905817b5fc7 100644 --- a/vendor/google.golang.org/grpc/balancer_wrapper.go +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -24,13 +24,18 @@ import ( "sync" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" + "google.golang.org/grpc/status" ) +var setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address)) + // ccBalancerWrapper sits between the ClientConn and the Balancer. // // ccBalancerWrapper implements methods corresponding to the ones on the @@ -79,6 +84,7 @@ func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { CustomUserAgent: cc.dopts.copts.UserAgent, ChannelzParent: cc.channelz, Target: cc.parsedTarget, + MetricsRecorder: cc.metricsRecorderList, }, serializer: grpcsync.NewCallbackSerializer(ctx), serializerCancel: cancel, @@ -92,7 +98,7 @@ func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { // it is safe to call into the balancer here. func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { errCh := make(chan error) - ok := ccb.serializer.Schedule(func(ctx context.Context) { + uccs := func(ctx context.Context) { defer close(errCh) if ctx.Err() != nil || ccb.balancer == nil { return @@ -107,17 +113,23 @@ func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnStat logger.Infof("error from balancer.UpdateClientConnState: %v", err) } errCh <- err - }) - if !ok { - return nil } + onFailure := func() { close(errCh) } + + // UpdateClientConnState can race with Close, and when the latter wins, the + // serializer is closed, and the attempt to schedule the callback will fail. + // It is acceptable to ignore this failure. But since we want to handle the + // state update in a blocking fashion (when we successfully schedule the + // callback), we have to use the ScheduleOr method and not the MaybeSchedule + // method on the serializer. + ccb.serializer.ScheduleOr(uccs, onFailure) return <-errCh } // resolverError is invoked by grpc to push a resolver error to the underlying // balancer. The call to the balancer is executed from the serializer. func (ccb *ccBalancerWrapper) resolverError(err error) { - ccb.serializer.Schedule(func(ctx context.Context) { + ccb.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil || ccb.balancer == nil { return } @@ -133,7 +145,7 @@ func (ccb *ccBalancerWrapper) close() { ccb.closed = true ccb.mu.Unlock() channelz.Info(logger, ccb.cc.channelz, "ccBalancerWrapper: closing") - ccb.serializer.Schedule(func(context.Context) { + ccb.serializer.TrySchedule(func(context.Context) { if ccb.balancer == nil { return } @@ -145,7 +157,7 @@ func (ccb *ccBalancerWrapper) close() { // exitIdle invokes the balancer's exitIdle method in the serializer. func (ccb *ccBalancerWrapper) exitIdle() { - ccb.serializer.Schedule(func(ctx context.Context) { + ccb.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil || ccb.balancer == nil { return } @@ -177,12 +189,13 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer), stateListener: opts.StateListener, + healthData: newHealthData(connectivity.Idle), } ac.acbw = acbw return acbw, nil } -func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { +func (ccb *ccBalancerWrapper) RemoveSubConn(balancer.SubConn) { // The graceful switch balancer will never call this. logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc") } @@ -242,25 +255,70 @@ func (ccb *ccBalancerWrapper) Target() string { // acBalancerWrapper is a wrapper on top of ac for balancers. // It implements balancer.SubConn interface. type acBalancerWrapper struct { + internal.EnforceSubConnEmbedding ac *addrConn // read-only ccb *ccBalancerWrapper // read-only stateListener func(balancer.SubConnState) - mu sync.Mutex - producers map[balancer.ProducerBuilder]*refCountedProducer + producersMu sync.Mutex + producers map[balancer.ProducerBuilder]*refCountedProducer + + // Access to healthData is protected by healthMu. + healthMu sync.Mutex + // healthData is stored as a pointer to detect when the health listener is + // dropped or updated. This is required as closures can't be compared for + // equality. + healthData *healthData +} + +// healthData holds data related to health state reporting. +type healthData struct { + // connectivityState stores the most recent connectivity state delivered + // to the LB policy. This is stored to avoid sending updates when the + // SubConn has already exited connectivity state READY. + connectivityState connectivity.State +} + +func newHealthData(s connectivity.State) *healthData { + return &healthData{connectivityState: s} } // updateState is invoked by grpc to push a subConn state update to the // underlying balancer. -func (acbw *acBalancerWrapper) updateState(s connectivity.State, err error) { - acbw.ccb.serializer.Schedule(func(ctx context.Context) { +func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolver.Address, err error) { + acbw.ccb.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil || acbw.ccb.balancer == nil { return } + // Invalidate all producers on any state change. + acbw.closeProducers() + // Even though it is optional for balancers, gracefulswitch ensures // opts.StateListener is set, so this cannot ever be nil. // TODO: delete this comment when UpdateSubConnState is removed. - acbw.stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) + scs := balancer.SubConnState{ConnectivityState: s, ConnectionError: err} + if s == connectivity.Ready { + setConnectedAddress(&scs, curAddr) + } + // Invalidate the health listener by updating the healthData. + acbw.healthMu.Lock() + // A race may occur if a health listener is registered soon after the + // connectivity state is set but before the stateListener is called. + // Two cases may arise: + // 1. The new state is not READY: RegisterHealthListener has checks to + // ensure no updates are sent when the connectivity state is not + // READY. + // 2. The new state is READY: This means that the old state wasn't Ready. + // The RegisterHealthListener API mentions that a health listener + // must not be registered when a SubConn is not ready to avoid such + // races. When this happens, the LB policy would get health updates + // on the old listener. When the LB policy registers a new listener + // on receiving the connectivity update, the health updates will be + // sent to the new health listener. + acbw.healthData = newHealthData(scs.ConnectivityState) + acbw.healthMu.Unlock() + + acbw.stateListener(scs) }) } @@ -277,6 +335,7 @@ func (acbw *acBalancerWrapper) Connect() { } func (acbw *acBalancerWrapper) Shutdown() { + acbw.closeProducers() acbw.ccb.cc.removeAddrConn(acbw.ac, errConnDrain) } @@ -284,9 +343,10 @@ func (acbw *acBalancerWrapper) Shutdown() { // ready, blocks until it is or ctx expires. Returns an error when the context // expires or the addrConn is shut down. func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { - transport, err := acbw.ac.getTransport(ctx) - if err != nil { - return nil, err + transport := acbw.ac.getReadyTransport() + if transport == nil { + return nil, status.Errorf(codes.Unavailable, "SubConn state is not Ready") + } return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...) } @@ -311,15 +371,15 @@ type refCountedProducer struct { } func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) { - acbw.mu.Lock() - defer acbw.mu.Unlock() + acbw.producersMu.Lock() + defer acbw.producersMu.Unlock() // Look up existing producer from this builder. pData := acbw.producers[pb] if pData == nil { // Not found; create a new one and add it to the producers map. - p, close := pb.Build(acbw) - pData = &refCountedProducer{producer: p, close: close} + p, closeFn := pb.Build(acbw) + pData = &refCountedProducer{producer: p, close: closeFn} acbw.producers[pb] = pData } // Account for this new reference. @@ -329,13 +389,64 @@ func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) ( // and delete the refCountedProducer from the map if the total reference // count goes to zero. unref := func() { - acbw.mu.Lock() + acbw.producersMu.Lock() + // If closeProducers has already closed this producer instance, refs is + // set to 0, so the check after decrementing will never pass, and the + // producer will not be double-closed. pData.refs-- if pData.refs == 0 { defer pData.close() // Run outside the acbw mutex delete(acbw.producers, pb) } - acbw.mu.Unlock() + acbw.producersMu.Unlock() } return pData.producer, grpcsync.OnceFunc(unref) } + +func (acbw *acBalancerWrapper) closeProducers() { + acbw.producersMu.Lock() + defer acbw.producersMu.Unlock() + for pb, pData := range acbw.producers { + pData.refs = 0 + pData.close() + delete(acbw.producers, pb) + } +} + +// RegisterHealthListener accepts a health listener from the LB policy. It sends +// updates to the health listener as long as the SubConn's connectivity state +// doesn't change and a new health listener is not registered. To invalidate +// the currently registered health listener, acbw updates the healthData. If a +// nil listener is registered, the active health listener is dropped. +func (acbw *acBalancerWrapper) RegisterHealthListener(listener func(balancer.SubConnState)) { + acbw.healthMu.Lock() + defer acbw.healthMu.Unlock() + // listeners should not be registered when the connectivity state + // isn't Ready. This may happen when the balancer registers a listener + // after the connectivityState is updated, but before it is notified + // of the update. + if acbw.healthData.connectivityState != connectivity.Ready { + return + } + // Replace the health data to stop sending updates to any previously + // registered health listeners. + hd := newHealthData(connectivity.Ready) + acbw.healthData = hd + if listener == nil { + return + } + + acbw.ccb.serializer.TrySchedule(func(ctx context.Context) { + if ctx.Err() != nil || acbw.ccb.balancer == nil { + return + } + // Don't send updates if a new listener is registered. + acbw.healthMu.Lock() + defer acbw.healthMu.Unlock() + curHD := acbw.healthData + if curHD != hd { + return + } + listener(balancer.SubConnState{ConnectivityState: connectivity.Ready}) + }) +} diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index 63c639e4fe9..9e9d0806995 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,8 +18,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 -// protoc v4.25.2 +// protoc-gen-go v1.35.1 +// protoc v5.27.1 // source: grpc/binlog/v1/binarylog.proto package grpc_binarylog_v1 @@ -274,11 +274,9 @@ type GrpcLogEntry struct { func (x *GrpcLogEntry) Reset() { *x = GrpcLogEntry{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GrpcLogEntry) String() string { @@ -289,7 +287,7 @@ func (*GrpcLogEntry) ProtoMessage() {} func (x *GrpcLogEntry) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -440,11 +438,9 @@ type ClientHeader struct { func (x *ClientHeader) Reset() { *x = ClientHeader{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ClientHeader) String() string { @@ -455,7 +451,7 @@ func (*ClientHeader) ProtoMessage() {} func (x *ClientHeader) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -509,11 +505,9 @@ type ServerHeader struct { func (x *ServerHeader) Reset() { *x = ServerHeader{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServerHeader) String() string { @@ -524,7 +518,7 @@ func (*ServerHeader) ProtoMessage() {} func (x *ServerHeader) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -565,11 +559,9 @@ type Trailer struct { func (x *Trailer) Reset() { *x = Trailer{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Trailer) String() string { @@ -580,7 +572,7 @@ func (*Trailer) ProtoMessage() {} func (x *Trailer) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -638,11 +630,9 @@ type Message struct { func (x *Message) Reset() { *x = Message{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Message) String() string { @@ -653,7 +643,7 @@ func (*Message) ProtoMessage() {} func (x *Message) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -713,11 +703,9 @@ type Metadata struct { func (x *Metadata) Reset() { *x = Metadata{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Metadata) String() string { @@ -728,7 +716,7 @@ func (*Metadata) ProtoMessage() {} func (x *Metadata) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -762,11 +750,9 @@ type MetadataEntry struct { func (x *MetadataEntry) Reset() { *x = MetadataEntry{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MetadataEntry) String() string { @@ -777,7 +763,7 @@ func (*MetadataEntry) ProtoMessage() {} func (x *MetadataEntry) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -820,11 +806,9 @@ type Address struct { func (x *Address) Reset() { *x = Address{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Address) String() string { @@ -835,7 +819,7 @@ func (*Address) ProtoMessage() {} func (x *Address) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1015,7 +999,7 @@ func file_grpc_binlog_v1_binarylog_proto_rawDescGZIP() []byte { var file_grpc_binlog_v1_binarylog_proto_enumTypes = make([]protoimpl.EnumInfo, 3) var file_grpc_binlog_v1_binarylog_proto_msgTypes = make([]protoimpl.MessageInfo, 8) -var file_grpc_binlog_v1_binarylog_proto_goTypes = []interface{}{ +var file_grpc_binlog_v1_binarylog_proto_goTypes = []any{ (GrpcLogEntry_EventType)(0), // 0: grpc.binarylog.v1.GrpcLogEntry.EventType (GrpcLogEntry_Logger)(0), // 1: grpc.binarylog.v1.GrpcLogEntry.Logger (Address_Type)(0), // 2: grpc.binarylog.v1.Address.Type @@ -1057,105 +1041,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { if File_grpc_binlog_v1_binarylog_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GrpcLogEntry); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ClientHeader); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServerHeader); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Trailer); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Message); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Metadata); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MetadataEntry); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Address); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []any{ (*GrpcLogEntry_ClientHeader)(nil), (*GrpcLogEntry_ServerHeader)(nil), (*GrpcLogEntry_Message)(nil), diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 423be7b43b0..4f57b55434f 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -24,6 +24,7 @@ import ( "fmt" "math" "net/url" + "slices" "strings" "sync" "sync/atomic" @@ -39,6 +40,7 @@ import ( "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/idle" iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/stats" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" @@ -194,8 +196,11 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz) cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers) + cc.metricsRecorderList = stats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers) + cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc. cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout) + return cc, nil } @@ -590,13 +595,14 @@ type ClientConn struct { cancel context.CancelFunc // Cancelled on close. // The following are initialized at dial time, and are read-only after that. - target string // User's dial target. - parsedTarget resolver.Target // See initParsedTargetAndResolverBuilder(). - authority string // See initAuthority(). - dopts dialOptions // Default and user specified dial options. - channelz *channelz.Channel // Channelz object. - resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder(). - idlenessMgr *idle.Manager + target string // User's dial target. + parsedTarget resolver.Target // See initParsedTargetAndResolverBuilder(). + authority string // See initAuthority(). + dopts dialOptions // Default and user specified dial options. + channelz *channelz.Channel // Channelz object. + resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder(). + idlenessMgr *idle.Manager + metricsRecorderList *stats.MetricsRecorderList // The following provide their own synchronization, and therefore don't // require cc.mu to be held to access them. @@ -626,11 +632,6 @@ type ClientConn struct { // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // ctx expires. A true value is returned in former case and false in latter. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool { ch := cc.csMgr.getNotifyChan() if cc.csMgr.getState() != sourceState { @@ -645,11 +646,6 @@ func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connec } // GetState returns the connectivity.State of ClientConn. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a later -// release. func (cc *ClientConn) GetState() connectivity.State { return cc.csMgr.getState() } @@ -779,10 +775,7 @@ func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) } } - var balCfg serviceconfig.LoadBalancingConfig - if cc.sc != nil && cc.sc.lbConfig != nil { - balCfg = cc.sc.lbConfig - } + balCfg := cc.sc.lbConfig bw := cc.balancerWrapper cc.mu.Unlock() @@ -812,17 +805,11 @@ func (cc *ClientConn) applyFailingLBLocked(sc *serviceconfig.ParseResult) { cc.csMgr.updateState(connectivity.TransientFailure) } -// Makes a copy of the input addresses slice and clears out the balancer -// attributes field. Addresses are passed during subconn creation and address -// update operations. In both cases, we will clear the balancer attributes by -// calling this function, and therefore we will be able to use the Equal method -// provided by the resolver.Address type for comparison. -func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Address { +// Makes a copy of the input addresses slice. Addresses are passed during +// subconn creation and address update operations. +func copyAddresses(in []resolver.Address) []resolver.Address { out := make([]resolver.Address, len(in)) - for i := range in { - out[i] = in[i] - out[i].BalancerAttributes = nil - } + copy(out, in) return out } @@ -837,12 +824,11 @@ func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer. ac := &addrConn{ state: connectivity.Idle, cc: cc, - addrs: copyAddressesWithoutBalancerAttributes(addrs), + addrs: copyAddresses(addrs), scopts: opts, dopts: cc.dopts, channelz: channelz.RegisterSubChannel(cc.channelz, ""), resetBackoff: make(chan struct{}), - stateChan: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) // Start with our address set to the first address; this may be updated if @@ -918,28 +904,29 @@ func (ac *addrConn) connect() error { ac.mu.Unlock() return nil } - ac.mu.Unlock() - ac.resetTransport() + ac.resetTransportAndUnlock() return nil } -func equalAddresses(a, b []resolver.Address) bool { - if len(a) != len(b) { - return false - } - for i, v := range a { - if !v.Equal(b[i]) { - return false - } - } - return true +// equalAddressIgnoringBalAttributes returns true is a and b are considered equal. +// This is different from the Equal method on the resolver.Address type which +// considers all fields to determine equality. Here, we only consider fields +// that are meaningful to the subConn. +func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool { + return a.Addr == b.Addr && a.ServerName == b.ServerName && + a.Attributes.Equal(b.Attributes) && + a.Metadata == b.Metadata +} + +func equalAddressesIgnoringBalAttributes(a, b []resolver.Address) bool { + return slices.EqualFunc(a, b, func(a, b resolver.Address) bool { return equalAddressIgnoringBalAttributes(&a, &b) }) } // updateAddrs updates ac.addrs with the new addresses list and handles active // connections or connection attempts. func (ac *addrConn) updateAddrs(addrs []resolver.Address) { - addrs = copyAddressesWithoutBalancerAttributes(addrs) + addrs = copyAddresses(addrs) limit := len(addrs) if limit > 5 { limit = 5 @@ -947,7 +934,7 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) { channelz.Infof(logger, ac.channelz, "addrConn: updateAddrs addrs (%d of %d): %v", limit, len(addrs), addrs[:limit]) ac.mu.Lock() - if equalAddresses(ac.addrs, addrs) { + if equalAddressesIgnoringBalAttributes(ac.addrs, addrs) { ac.mu.Unlock() return } @@ -966,7 +953,7 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) { // Try to find the connected address. for _, a := range addrs { a.ServerName = ac.cc.getServerName(a) - if a.Equal(ac.curAddr) { + if equalAddressIgnoringBalAttributes(&a, &ac.curAddr) { // We are connected to a valid address, so do nothing but // update the addresses. ac.mu.Unlock() @@ -992,11 +979,9 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) { ac.updateConnectivityState(connectivity.Idle, nil) } - ac.mu.Unlock() - // Since we were connecting/connected, we should start a new connection // attempt. - go ac.resetTransport() + go ac.resetTransportAndUnlock() } // getServerName determines the serverName to be used in the connection @@ -1152,10 +1137,15 @@ func (cc *ClientConn) Close() error { <-cc.resolverWrapper.serializer.Done() <-cc.balancerWrapper.serializer.Done() - + var wg sync.WaitGroup for ac := range conns { - ac.tearDown(ErrClientConnClosing) + wg.Add(1) + go func(ac *addrConn) { + defer wg.Done() + ac.tearDown(ErrClientConnClosing) + }(ac) } + wg.Wait() cc.addTraceEvent("deleted") // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add // trace reference to the entity being deleted, and thus prevent it from being @@ -1190,8 +1180,7 @@ type addrConn struct { addrs []resolver.Address // All addresses that the resolver resolved to. // Use updateConnectivityState for updating addrConn's connectivity state. - state connectivity.State - stateChan chan struct{} // closed and recreated on every state change. + state connectivity.State backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} @@ -1204,9 +1193,6 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) if ac.state == s { return } - // When changing states, reset the state change channel. - close(ac.stateChan) - ac.stateChan = make(chan struct{}) ac.state = s ac.channelz.ChannelMetrics.State.Store(&s) if lastErr == nil { @@ -1214,7 +1200,7 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) } else { channelz.Infof(logger, ac.channelz, "Subchannel Connectivity change to %v, last error: %s", s, lastErr) } - ac.acbw.updateState(s, lastErr) + ac.acbw.updateState(s, ac.curAddr, lastErr) } // adjustParams updates parameters used to create transports upon @@ -1231,8 +1217,10 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) { } } -func (ac *addrConn) resetTransport() { - ac.mu.Lock() +// resetTransportAndUnlock unconditionally connects the addrConn. +// +// ac.mu must be held by the caller, and this function will guarantee it is released. +func (ac *addrConn) resetTransportAndUnlock() { acCtx := ac.ctx if acCtx.Err() != nil { ac.mu.Unlock() @@ -1263,6 +1251,8 @@ func (ac *addrConn) resetTransport() { ac.mu.Unlock() if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil { + // TODO: #7534 - Move re-resolution requests into the pick_first LB policy + // to ensure one resolution request per pass instead of per subconn failure. ac.cc.resolveNow(resolver.ResolveNowOptions{}) ac.mu.Lock() if acCtx.Err() != nil { @@ -1304,7 +1294,7 @@ func (ac *addrConn) resetTransport() { ac.mu.Unlock() } -// tryAllAddrs tries to creates a connection to the addresses, and stop when at +// tryAllAddrs tries to create a connection to the addresses, and stop when at // the first successful one. It returns an error if no address was successfully // connected, or updates ac appropriately with the new transport. func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error { @@ -1381,7 +1371,7 @@ func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, defer cancel() copts.ChannelzParent = ac.channelz - newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onClose) + newTr, err := transport.NewHTTP2Client(connectCtx, ac.cc.ctx, addr, copts, onClose) if err != nil { if logger.V(2) { logger.Infof("Creating new client transport to %q: %v", addr, err) @@ -1455,7 +1445,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { if !ac.scopts.HealthCheckEnabled { return } - healthCheckFunc := ac.cc.dopts.healthCheckFunc + healthCheckFunc := internal.HealthCheckFunc if healthCheckFunc == nil { // The health package is not imported to set health check function. // @@ -1487,7 +1477,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { } // Start the health checking stream. go func() { - err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) + err := healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) if err != nil { if status.Code(err) == codes.Unimplemented { channelz.Error(logger, ac.channelz, "Subchannel health check is unimplemented at server side, thus health check is disabled") @@ -1516,29 +1506,6 @@ func (ac *addrConn) getReadyTransport() transport.ClientTransport { return nil } -// getTransport waits until the addrconn is ready and returns the transport. -// If the context expires first, returns an appropriate status. If the -// addrConn is stopped first, returns an Unavailable status error. -func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) { - for ctx.Err() == nil { - ac.mu.Lock() - t, state, sc := ac.transport, ac.state, ac.stateChan - ac.mu.Unlock() - if state == connectivity.Ready { - return t, nil - } - if state == connectivity.Shutdown { - return nil, status.Errorf(codes.Unavailable, "SubConn shutting down") - } - - select { - case <-ctx.Done(): - case <-sc: - } - } - return nil, status.FromContextError(ctx.Err()).Err() -} - // tearDown starts to tear down the addrConn. // // Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct @@ -1585,7 +1552,7 @@ func (ac *addrConn) tearDown(err error) { } else { // Hard close the transport when the channel is entering idle or is // being shutdown. In the case where the channel is being shutdown, - // closing of transports is also taken care of by cancelation of cc.ctx. + // closing of transports is also taken care of by cancellation of cc.ctx. // But in the case where the channel is entering idle, we need to // explicitly close the transports here. Instead of distinguishing // between these two cases, it is simpler to close the transport diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go index 411e3dfd47c..959c2f99d4a 100644 --- a/vendor/google.golang.org/grpc/codec.go +++ b/vendor/google.golang.org/grpc/codec.go @@ -21,18 +21,73 @@ package grpc import ( "google.golang.org/grpc/encoding" _ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto" + "google.golang.org/grpc/mem" ) -// baseCodec contains the functionality of both Codec and encoding.Codec, but -// omits the name/string, which vary between the two and are not needed for -// anything besides the registry in the encoding package. +// baseCodec captures the new encoding.CodecV2 interface without the Name +// function, allowing it to be implemented by older Codec and encoding.Codec +// implementations. The omitted Name function is only needed for the register in +// the encoding package and is not part of the core functionality. type baseCodec interface { - Marshal(v any) ([]byte, error) - Unmarshal(data []byte, v any) error + Marshal(v any) (mem.BufferSlice, error) + Unmarshal(data mem.BufferSlice, v any) error +} + +// getCodec returns an encoding.CodecV2 for the codec of the given name (if +// registered). Initially checks the V2 registry with encoding.GetCodecV2 and +// returns the V2 codec if it is registered. Otherwise, it checks the V1 registry +// with encoding.GetCodec and if it is registered wraps it with newCodecV1Bridge +// to turn it into an encoding.CodecV2. Returns nil otherwise. +func getCodec(name string) encoding.CodecV2 { + if codecV1 := encoding.GetCodec(name); codecV1 != nil { + return newCodecV1Bridge(codecV1) + } + + return encoding.GetCodecV2(name) +} + +func newCodecV0Bridge(c Codec) baseCodec { + return codecV0Bridge{codec: c} +} + +func newCodecV1Bridge(c encoding.Codec) encoding.CodecV2 { + return codecV1Bridge{ + codecV0Bridge: codecV0Bridge{codec: c}, + name: c.Name(), + } +} + +var _ baseCodec = codecV0Bridge{} + +type codecV0Bridge struct { + codec interface { + Marshal(v any) ([]byte, error) + Unmarshal(data []byte, v any) error + } +} + +func (c codecV0Bridge) Marshal(v any) (mem.BufferSlice, error) { + data, err := c.codec.Marshal(v) + if err != nil { + return nil, err + } + return mem.BufferSlice{mem.SliceBuffer(data)}, nil +} + +func (c codecV0Bridge) Unmarshal(data mem.BufferSlice, v any) (err error) { + return c.codec.Unmarshal(data.Materialize(), v) } -var _ baseCodec = Codec(nil) -var _ baseCodec = encoding.Codec(nil) +var _ encoding.CodecV2 = codecV1Bridge{} + +type codecV1Bridge struct { + codecV0Bridge + name string +} + +func (c codecV1Bridge) Name() string { + return c.name +} // Codec defines the interface gRPC uses to encode and decode messages. // Note that implementations of this interface must be thread safe; diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go index 43726e877b8..7e4bfee8886 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go @@ -49,7 +49,7 @@ func (k KeySizeError) Error() string { // newRekeyAEAD creates a new instance of aes128gcm with rekeying. // The key argument should be 44 bytes, the first 32 bytes are used as a key -// for HKDF-expand and the remainining 12 bytes are used as a random mask for +// for HKDF-expand and the remaining 12 bytes are used as a random mask for // the counter. func newRekeyAEAD(key []byte) (*rekeyAEAD, error) { k := len(key) diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go index 6a9035ea254..b5bbb5497aa 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go @@ -51,7 +51,7 @@ type aes128gcmRekey struct { // NewAES128GCMRekey creates an instance that uses aes128gcm with rekeying // for ALTS record. The key argument should be 44 bytes, the first 32 bytes -// are used as a key for HKDF-expand and the remainining 12 bytes are used +// are used as a key for HKDF-expand and the remaining 12 bytes are used // as a random mask for the counter. func NewAES128GCMRekey(side core.Side, key []byte) (ALTSRecordCrypto, error) { inCounter := NewInCounter(side, overflowLenAES128GCMRekey) diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go index 0d64fb37a12..f1ea7bb2081 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go @@ -266,10 +266,3 @@ func (p *conn) Write(b []byte) (n int, err error) { } return n, nil } - -func min(a, b int) int { - if a < b { - return a - } - return b -} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go index 6c867dd8501..50721f690ac 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go @@ -128,7 +128,7 @@ type altsHandshaker struct { // NewClientHandshaker creates a core.Handshaker that performs a client-side // ALTS handshake by acting as a proxy between the peer and the ALTS handshaker // service in the metadata server. -func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ClientHandshakerOptions) (core.Handshaker, error) { +func NewClientHandshaker(_ context.Context, conn *grpc.ClientConn, c net.Conn, opts *ClientHandshakerOptions) (core.Handshaker, error) { return &altsHandshaker{ stream: nil, conn: c, @@ -141,7 +141,7 @@ func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, // NewServerHandshaker creates a core.Handshaker that performs a server-side // ALTS handshake by acting as a proxy between the peer and the ALTS handshaker // service in the metadata server. -func NewServerHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ServerHandshakerOptions) (core.Handshaker, error) { +func NewServerHandshaker(_ context.Context, conn *grpc.ClientConn, c net.Conn, opts *ServerHandshakerOptions) (core.Handshaker, error) { return &altsHandshaker{ stream: nil, conn: c, diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go index e1cdafb980c..fbfde5d047f 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go @@ -34,8 +34,6 @@ var ( // to a corresponding connection to a hypervisor handshaker service // instance. hsConnMap = make(map[string]*grpc.ClientConn) - // hsDialer will be reassigned in tests. - hsDialer = grpc.Dial ) // Dial dials the handshake service in the hypervisor. If a connection has @@ -49,8 +47,10 @@ func Dial(hsAddress string) (*grpc.ClientConn, error) { if !ok { // Create a new connection to the handshaker service. Note that // this connection stays open until the application is closed. + // Disable the service config to avoid unnecessary TXT record lookups that + // cause timeouts with some versions of systemd-resolved. var err error - hsConn, err = hsDialer(hsAddress, grpc.WithTransportCredentials(insecure.NewCredentials())) + hsConn, err = grpc.Dial(hsAddress, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDisableServiceConfig()) if err != nil { return nil, err } diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go index 38cb5cf0d74..83d23f65aa5 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 -// protoc v4.25.2 +// protoc-gen-go v1.35.1 +// protoc v5.27.1 // source: grpc/gcp/altscontext.proto package grpc_gcp @@ -60,11 +60,9 @@ type AltsContext struct { func (x *AltsContext) Reset() { *x = AltsContext{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_altscontext_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_gcp_altscontext_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *AltsContext) String() string { @@ -75,7 +73,7 @@ func (*AltsContext) ProtoMessage() {} func (x *AltsContext) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_altscontext_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -201,7 +199,7 @@ func file_grpc_gcp_altscontext_proto_rawDescGZIP() []byte { } var file_grpc_gcp_altscontext_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_grpc_gcp_altscontext_proto_goTypes = []interface{}{ +var file_grpc_gcp_altscontext_proto_goTypes = []any{ (*AltsContext)(nil), // 0: grpc.gcp.AltsContext nil, // 1: grpc.gcp.AltsContext.PeerAttributesEntry (SecurityLevel)(0), // 2: grpc.gcp.SecurityLevel @@ -224,20 +222,6 @@ func file_grpc_gcp_altscontext_proto_init() { return } file_grpc_gcp_transport_security_common_proto_init() - if !protoimpl.UnsafeEnabled { - file_grpc_gcp_altscontext_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AltsContext); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go index 55fc7f65f10..915b36df821 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 -// protoc v4.25.2 +// protoc-gen-go v1.35.1 +// protoc v5.27.1 // source: grpc/gcp/handshaker.proto package grpc_gcp @@ -154,11 +154,9 @@ type Endpoint struct { func (x *Endpoint) Reset() { *x = Endpoint{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_gcp_handshaker_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Endpoint) String() string { @@ -169,7 +167,7 @@ func (*Endpoint) ProtoMessage() {} func (x *Endpoint) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_handshaker_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -221,11 +219,9 @@ type Identity struct { func (x *Identity) Reset() { *x = Identity{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_gcp_handshaker_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Identity) String() string { @@ -236,7 +232,7 @@ func (*Identity) ProtoMessage() {} func (x *Identity) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_handshaker_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -340,11 +336,9 @@ type StartClientHandshakeReq struct { func (x *StartClientHandshakeReq) Reset() { *x = StartClientHandshakeReq{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_gcp_handshaker_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *StartClientHandshakeReq) String() string { @@ -355,7 +349,7 @@ func (*StartClientHandshakeReq) ProtoMessage() {} func (x *StartClientHandshakeReq) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_handshaker_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -467,11 +461,9 @@ type ServerHandshakeParameters struct { func (x *ServerHandshakeParameters) Reset() { *x = ServerHandshakeParameters{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_gcp_handshaker_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServerHandshakeParameters) String() string { @@ -482,7 +474,7 @@ func (*ServerHandshakeParameters) ProtoMessage() {} func (x *ServerHandshakeParameters) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_handshaker_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -533,7 +525,7 @@ type StartServerHandshakeReq struct { // to handshake_parameters is the integer value of HandshakeProtocol enum. HandshakeParameters map[int32]*ServerHandshakeParameters `protobuf:"bytes,2,rep,name=handshake_parameters,json=handshakeParameters,proto3" json:"handshake_parameters,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Bytes in out_frames returned from the peer's HandshakerResp. It is possible - // that the peer's out_frames are split into multiple HandshakReq messages. + // that the peer's out_frames are split into multiple HandshakeReq messages. InBytes []byte `protobuf:"bytes,3,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` // (Optional) Local endpoint information of the connection to the client, // such as local IP address, port number, and network protocol. @@ -549,11 +541,9 @@ type StartServerHandshakeReq struct { func (x *StartServerHandshakeReq) Reset() { *x = StartServerHandshakeReq{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_gcp_handshaker_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *StartServerHandshakeReq) String() string { @@ -564,7 +554,7 @@ func (*StartServerHandshakeReq) ProtoMessage() {} func (x *StartServerHandshakeReq) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_handshaker_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -645,11 +635,9 @@ type NextHandshakeMessageReq struct { func (x *NextHandshakeMessageReq) Reset() { *x = NextHandshakeMessageReq{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_gcp_handshaker_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NextHandshakeMessageReq) String() string { @@ -660,7 +648,7 @@ func (*NextHandshakeMessageReq) ProtoMessage() {} func (x *NextHandshakeMessageReq) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_handshaker_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -704,11 +692,9 @@ type HandshakerReq struct { func (x *HandshakerReq) Reset() { *x = HandshakerReq{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_gcp_handshaker_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HandshakerReq) String() string { @@ -719,7 +705,7 @@ func (*HandshakerReq) ProtoMessage() {} func (x *HandshakerReq) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_handshaker_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -816,11 +802,9 @@ type HandshakerResult struct { func (x *HandshakerResult) Reset() { *x = HandshakerResult{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_gcp_handshaker_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HandshakerResult) String() string { @@ -831,7 +815,7 @@ func (*HandshakerResult) ProtoMessage() {} func (x *HandshakerResult) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_handshaker_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -915,11 +899,9 @@ type HandshakerStatus struct { func (x *HandshakerStatus) Reset() { *x = HandshakerStatus{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_gcp_handshaker_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HandshakerStatus) String() string { @@ -930,7 +912,7 @@ func (*HandshakerStatus) ProtoMessage() {} func (x *HandshakerStatus) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_handshaker_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -983,11 +965,9 @@ type HandshakerResp struct { func (x *HandshakerResp) Reset() { *x = HandshakerResp{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_gcp_handshaker_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HandshakerResp) String() string { @@ -998,7 +978,7 @@ func (*HandshakerResp) ProtoMessage() {} func (x *HandshakerResp) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_handshaker_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1071,7 +1051,7 @@ var file_grpc_gcp_handshaker_proto_rawDesc = []byte{ 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, - 0x22, 0xf6, 0x04, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x22, 0xfb, 0x04, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x12, 0x5b, 0x0a, 0x1b, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, @@ -1108,139 +1088,140 @@ var file_grpc_gcp_handshaker_proto_rawDesc = []byte{ 0x6f, 0x6e, 0x73, 0x52, 0x0b, 0x72, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x46, 0x72, 0x61, - 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x61, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xaa, 0x01, 0x0a, 0x19, 0x53, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, - 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x63, 0x6f, 0x72, - 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, - 0x6c, 0x73, 0x12, 0x3d, 0x0a, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, - 0x52, 0x0f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, - 0x73, 0x12, 0x19, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x48, 0x00, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x88, 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, - 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xa5, 0x04, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, + 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x26, 0x0a, 0x0c, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0x80, 0x01, + 0x01, 0x52, 0x0b, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xaf, + 0x01, 0x0a, 0x19, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, + 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x29, 0x0a, 0x10, + 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x12, 0x3d, 0x0a, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x1e, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0x80, 0x01, 0x01, 0x48, 0x00, 0x52, 0x05, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x88, 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x22, 0xa5, 0x04, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x12, 0x33, 0x0a, 0x15, + 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x61, 0x70, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x73, 0x12, 0x6d, 0x0a, 0x14, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x70, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x3a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, - 0x65, 0x71, 0x12, 0x33, 0x0a, 0x15, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x14, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x12, 0x6d, 0x0a, 0x14, 0x68, 0x61, 0x6e, 0x64, 0x73, - 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, - 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, - 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, - 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x13, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, - 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, - 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, - 0x73, 0x12, 0x39, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, - 0x69, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0d, 0x6c, - 0x6f, 0x63, 0x61, 0x6c, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0f, - 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, - 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0e, 0x72, 0x65, 0x6d, 0x6f, 0x74, - 0x65, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x40, 0x0a, 0x0c, 0x72, 0x70, 0x63, - 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0b, - 0x72, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, - 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x07, 0x20, + 0x65, 0x71, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x68, 0x61, 0x6e, + 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, + 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0e, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x45, + 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x45, 0x6e, + 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x52, 0x0e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x45, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x12, 0x40, 0x0a, 0x0c, 0x72, 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0b, 0x72, 0x70, 0x63, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, + 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, + 0x61, 0x78, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x1a, 0x6b, 0x0a, 0x18, 0x48, + 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x67, 0x63, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, + 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x62, 0x0a, 0x17, 0x4e, 0x65, 0x78, 0x74, + 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x52, 0x65, 0x71, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x2c, + 0x0a, 0x12, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, + 0x79, 0x5f, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x10, 0x6e, 0x65, 0x74, 0x77, + 0x6f, 0x72, 0x6b, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4d, 0x73, 0x22, 0xe5, 0x01, 0x0a, + 0x0d, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x12, 0x46, + 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, + 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, + 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x46, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, + 0x00, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x37, + 0x0a, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x4e, 0x65, 0x78, 0x74, 0x48, 0x61, 0x6e, 0x64, + 0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x48, + 0x00, 0x52, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x71, 0x5f, 0x6f, + 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x9a, 0x03, 0x0a, 0x10, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, + 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x31, 0x0a, 0x14, 0x61, 0x70, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x27, 0x0a, 0x0f, + 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x19, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6b, 0x65, 0x79, 0x44, 0x61, 0x74, 0x61, + 0x12, 0x37, 0x0a, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0c, 0x70, 0x65, 0x65, + 0x72, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x39, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x63, 0x68, 0x61, + 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0f, 0x6b, 0x65, 0x65, 0x70, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x4f, 0x70, 0x65, 0x6e, + 0x12, 0x49, 0x0a, 0x11, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x72, 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0f, 0x70, 0x65, 0x65, 0x72, + 0x52, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, + 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x53, 0x69, 0x7a, - 0x65, 0x1a, 0x6b, 0x0a, 0x18, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, - 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, - 0x65, 0x72, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x62, - 0x0a, 0x17, 0x4e, 0x65, 0x78, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, - 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, - 0x79, 0x74, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, - 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x10, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, - 0x4d, 0x73, 0x22, 0xe5, 0x01, 0x0a, 0x0d, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, - 0x72, 0x52, 0x65, 0x71, 0x12, 0x46, 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, - 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, - 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, - 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x46, 0x0a, 0x0c, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, - 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, - 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, - 0x74, 0x61, 0x72, 0x74, 0x12, 0x37, 0x0a, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x4e, 0x65, - 0x78, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x42, 0x0b, 0x0a, - 0x09, 0x72, 0x65, 0x71, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x9a, 0x03, 0x0a, 0x10, 0x48, - 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, - 0x31, 0x0a, 0x14, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x61, - 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, - 0x6f, 0x6c, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x63, - 0x6f, 0x72, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x19, 0x0a, 0x08, 0x6b, - 0x65, 0x79, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6b, - 0x65, 0x79, 0x44, 0x61, 0x74, 0x61, 0x12, 0x37, 0x0a, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, - 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x52, 0x0c, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, - 0x39, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, - 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, - 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x6b, 0x65, - 0x65, 0x70, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x6b, 0x65, 0x65, 0x70, 0x43, 0x68, 0x61, 0x6e, 0x6e, - 0x65, 0x6c, 0x4f, 0x70, 0x65, 0x6e, 0x12, 0x49, 0x0a, 0x11, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x72, - 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x52, 0x0f, 0x70, 0x65, 0x65, 0x72, 0x52, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, - 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x46, 0x72, - 0x61, 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x40, 0x0a, 0x10, 0x48, 0x61, 0x6e, 0x64, 0x73, - 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, - 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, - 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0xbe, 0x01, 0x0a, 0x0e, 0x48, 0x61, - 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1d, 0x0a, 0x0a, - 0x6f, 0x75, 0x74, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x09, 0x6f, 0x75, 0x74, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x62, - 0x79, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x62, 0x79, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, - 0x65, 0x64, 0x12, 0x32, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, - 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, - 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, - 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2a, 0x4a, 0x0a, 0x11, 0x48, 0x61, - 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, - 0x22, 0x0a, 0x1e, 0x48, 0x41, 0x4e, 0x44, 0x53, 0x48, 0x41, 0x4b, 0x45, 0x5f, 0x50, 0x52, 0x4f, - 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, - 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x4c, 0x53, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, - 0x41, 0x4c, 0x54, 0x53, 0x10, 0x02, 0x2a, 0x45, 0x0a, 0x0f, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, - 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x20, 0x0a, 0x1c, 0x4e, 0x45, 0x54, - 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x55, 0x4e, - 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x54, - 0x43, 0x50, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x55, 0x44, 0x50, 0x10, 0x02, 0x32, 0x5b, 0x0a, - 0x11, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x12, 0x46, 0x0a, 0x0b, 0x44, 0x6f, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, - 0x65, 0x12, 0x17, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, - 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x1a, 0x18, 0x2e, 0x67, 0x72, 0x70, - 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, - 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x6b, 0x0a, 0x15, 0x69, 0x6f, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x61, 0x6c, 0x74, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x42, 0x0f, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x63, - 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2f, 0x61, 0x6c, 0x74, 0x73, 0x2f, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, - 0x72, 0x70, 0x63, 0x5f, 0x67, 0x63, 0x70, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x22, 0x40, 0x0a, 0x10, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74, + 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x73, 0x22, 0xbe, 0x01, 0x0a, 0x0e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, + 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1d, 0x0a, 0x0a, 0x6f, 0x75, 0x74, 0x5f, 0x66, 0x72, + 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x6f, 0x75, 0x74, 0x46, + 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x63, + 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x12, 0x32, 0x0a, 0x06, + 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, + 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x12, 0x32, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, + 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x2a, 0x4a, 0x0a, 0x11, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, + 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x22, 0x0a, 0x1e, 0x48, 0x41, 0x4e, + 0x44, 0x53, 0x48, 0x41, 0x4b, 0x45, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, + 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, + 0x03, 0x54, 0x4c, 0x53, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x41, 0x4c, 0x54, 0x53, 0x10, 0x02, + 0x2a, 0x45, 0x0a, 0x0f, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x12, 0x20, 0x0a, 0x1c, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x50, + 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x10, 0x01, 0x12, 0x07, + 0x0a, 0x03, 0x55, 0x44, 0x50, 0x10, 0x02, 0x32, 0x5b, 0x0a, 0x11, 0x48, 0x61, 0x6e, 0x64, 0x73, + 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x46, 0x0a, 0x0b, + 0x44, 0x6f, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x12, 0x17, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, + 0x72, 0x52, 0x65, 0x71, 0x1a, 0x18, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, + 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, + 0x28, 0x01, 0x30, 0x01, 0x42, 0x6b, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x61, 0x6c, 0x74, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x42, 0x0f, 0x48, + 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x61, 0x6c, 0x73, 0x2f, 0x61, 0x6c, 0x74, 0x73, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x67, 0x63, + 0x70, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1257,7 +1238,7 @@ func file_grpc_gcp_handshaker_proto_rawDescGZIP() []byte { var file_grpc_gcp_handshaker_proto_enumTypes = make([]protoimpl.EnumInfo, 2) var file_grpc_gcp_handshaker_proto_msgTypes = make([]protoimpl.MessageInfo, 12) -var file_grpc_gcp_handshaker_proto_goTypes = []interface{}{ +var file_grpc_gcp_handshaker_proto_goTypes = []any{ (HandshakeProtocol)(0), // 0: grpc.gcp.HandshakeProtocol (NetworkProtocol)(0), // 1: grpc.gcp.NetworkProtocol (*Endpoint)(nil), // 2: grpc.gcp.Endpoint @@ -1312,134 +1293,12 @@ func file_grpc_gcp_handshaker_proto_init() { return } file_grpc_gcp_transport_security_common_proto_init() - if !protoimpl.UnsafeEnabled { - file_grpc_gcp_handshaker_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Endpoint); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_handshaker_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Identity); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_handshaker_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartClientHandshakeReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_handshaker_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServerHandshakeParameters); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_handshaker_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartServerHandshakeReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_handshaker_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NextHandshakeMessageReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_handshaker_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HandshakerReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_handshaker_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HandshakerResult); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_handshaker_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HandshakerStatus); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_handshaker_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HandshakerResp); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_grpc_gcp_handshaker_proto_msgTypes[1].OneofWrappers = []interface{}{ + file_grpc_gcp_handshaker_proto_msgTypes[1].OneofWrappers = []any{ (*Identity_ServiceAccount)(nil), (*Identity_Hostname)(nil), } - file_grpc_gcp_handshaker_proto_msgTypes[3].OneofWrappers = []interface{}{} - file_grpc_gcp_handshaker_proto_msgTypes[6].OneofWrappers = []interface{}{ + file_grpc_gcp_handshaker_proto_msgTypes[3].OneofWrappers = []any{} + file_grpc_gcp_handshaker_proto_msgTypes[6].OneofWrappers = []any{ (*HandshakerReq_ClientStart)(nil), (*HandshakerReq_ServerStart)(nil), (*HandshakerReq_Next)(nil), diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go index 358074b6494..34443b1d2dc 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.4.0 -// - protoc v4.25.2 +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.27.1 // source: grpc/gcp/handshaker.proto package grpc_gcp @@ -75,7 +75,7 @@ type HandshakerService_DoHandshakeClient = grpc.BidiStreamingClient[HandshakerRe // HandshakerServiceServer is the server API for HandshakerService service. // All implementations must embed UnimplementedHandshakerServiceServer -// for forward compatibility +// for forward compatibility. type HandshakerServiceServer interface { // Handshaker service accepts a stream of handshaker request, returning a // stream of handshaker response. Client is expected to send exactly one @@ -87,14 +87,18 @@ type HandshakerServiceServer interface { mustEmbedUnimplementedHandshakerServiceServer() } -// UnimplementedHandshakerServiceServer must be embedded to have forward compatible implementations. -type UnimplementedHandshakerServiceServer struct { -} +// UnimplementedHandshakerServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedHandshakerServiceServer struct{} func (UnimplementedHandshakerServiceServer) DoHandshake(grpc.BidiStreamingServer[HandshakerReq, HandshakerResp]) error { return status.Errorf(codes.Unimplemented, "method DoHandshake not implemented") } func (UnimplementedHandshakerServiceServer) mustEmbedUnimplementedHandshakerServiceServer() {} +func (UnimplementedHandshakerServiceServer) testEmbeddedByValue() {} // UnsafeHandshakerServiceServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to HandshakerServiceServer will @@ -104,6 +108,13 @@ type UnsafeHandshakerServiceServer interface { } func RegisterHandshakerServiceServer(s grpc.ServiceRegistrar, srv HandshakerServiceServer) { + // If the following call panics, it indicates UnimplementedHandshakerServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&HandshakerService_ServiceDesc, srv) } diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go index 18cc9cfbd59..e9676db4b52 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 -// protoc v4.25.2 +// protoc-gen-go v1.35.1 +// protoc v5.27.1 // source: grpc/gcp/transport_security_common.proto package grpc_gcp @@ -102,11 +102,9 @@ type RpcProtocolVersions struct { func (x *RpcProtocolVersions) Reset() { *x = RpcProtocolVersions{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RpcProtocolVersions) String() string { @@ -117,7 +115,7 @@ func (*RpcProtocolVersions) ProtoMessage() {} func (x *RpcProtocolVersions) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -158,11 +156,9 @@ type RpcProtocolVersions_Version struct { func (x *RpcProtocolVersions_Version) Reset() { *x = RpcProtocolVersions_Version{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RpcProtocolVersions_Version) String() string { @@ -173,7 +169,7 @@ func (*RpcProtocolVersions_Version) ProtoMessage() {} func (x *RpcProtocolVersions_Version) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -253,7 +249,7 @@ func file_grpc_gcp_transport_security_common_proto_rawDescGZIP() []byte { var file_grpc_gcp_transport_security_common_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_grpc_gcp_transport_security_common_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_grpc_gcp_transport_security_common_proto_goTypes = []interface{}{ +var file_grpc_gcp_transport_security_common_proto_goTypes = []any{ (SecurityLevel)(0), // 0: grpc.gcp.SecurityLevel (*RpcProtocolVersions)(nil), // 1: grpc.gcp.RpcProtocolVersions (*RpcProtocolVersions_Version)(nil), // 2: grpc.gcp.RpcProtocolVersions.Version @@ -273,32 +269,6 @@ func file_grpc_gcp_transport_security_common_proto_init() { if File_grpc_gcp_transport_security_common_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_grpc_gcp_transport_security_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RpcProtocolVersions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_transport_security_common_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RpcProtocolVersions_Version); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/grpc/credentials/google/google.go b/vendor/google.golang.org/grpc/credentials/google/google.go index fbdf7dc2997..5a9c9461f0e 100644 --- a/vendor/google.golang.org/grpc/credentials/google/google.go +++ b/vendor/google.golang.org/grpc/credentials/google/google.go @@ -22,7 +22,6 @@ package google import ( "context" "fmt" - "time" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/alts" @@ -31,7 +30,7 @@ import ( "google.golang.org/grpc/internal" ) -const tokenRequestTimeout = 30 * time.Second +const defaultCloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" var logger = grpclog.Component("credentials") @@ -39,6 +38,9 @@ var logger = grpclog.Component("credentials") type DefaultCredentialsOptions struct { // PerRPCCreds is a per RPC credentials that is passed to a bundle. PerRPCCreds credentials.PerRPCCredentials + // ALTSPerRPCCreds is a per RPC credentials that, if specified, will + // supercede PerRPCCreds above for and only for ALTS connections. + ALTSPerRPCCreds credentials.PerRPCCredentials } // NewDefaultCredentialsWithOptions returns a credentials bundle that is @@ -47,14 +49,21 @@ type DefaultCredentialsOptions struct { // This API is experimental. func NewDefaultCredentialsWithOptions(opts DefaultCredentialsOptions) credentials.Bundle { if opts.PerRPCCreds == nil { - ctx, cancel := context.WithTimeout(context.Background(), tokenRequestTimeout) - defer cancel() var err error - opts.PerRPCCreds, err = newADC(ctx) + // If the ADC ends up being Compute Engine Credentials, this context + // won't be used. Otherwise, the context dictates all the subsequent + // token requests via HTTP. So we cannot have any deadline or timeout. + opts.PerRPCCreds, err = newADC(context.TODO()) if err != nil { logger.Warningf("NewDefaultCredentialsWithOptions: failed to create application oauth: %v", err) } } + if opts.ALTSPerRPCCreds != nil { + opts.PerRPCCreds = &dualPerRPCCreds{ + perRPCCreds: opts.PerRPCCreds, + altsPerRPCCreds: opts.ALTSPerRPCCreds, + } + } c := &creds{opts: opts} bundle, err := c.NewWithMode(internal.CredsBundleModeFallback) if err != nil { @@ -113,7 +122,7 @@ var ( return alts.NewClientCreds(alts.DefaultClientOptions()) } newADC = func(ctx context.Context) (credentials.PerRPCCredentials, error) { - return oauth.NewApplicationDefault(ctx) + return oauth.NewApplicationDefault(ctx, defaultCloudPlatformScope) } ) @@ -143,3 +152,27 @@ func (c *creds) NewWithMode(mode string) (credentials.Bundle, error) { return newCreds, nil } + +// dualPerRPCCreds implements credentials.PerRPCCredentials by embedding the +// fallback PerRPCCredentials and the ALTS one. It pickes one of them based on +// the channel type. +type dualPerRPCCreds struct { + perRPCCreds credentials.PerRPCCredentials + altsPerRPCCreds credentials.PerRPCCredentials +} + +func (d *dualPerRPCCreds) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + ri, ok := credentials.RequestInfoFromContext(ctx) + if !ok { + return nil, fmt.Errorf("request info not found from context") + } + if authType := ri.AuthInfo.AuthType(); authType == "alts" { + return d.altsPerRPCCreds.GetRequestMetadata(ctx, uri...) + } + // This ensures backward compatibility even if authType is not "tls". + return d.perRPCCreds.GetRequestMetadata(ctx, uri...) +} + +func (d *dualPerRPCCreds) RequireTransportSecurity() bool { + return d.altsPerRPCCreds.RequireTransportSecurity() || d.perRPCCreds.RequireTransportSecurity() +} diff --git a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go index 82bee1443bf..4c805c64462 100644 --- a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go +++ b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go @@ -40,7 +40,7 @@ func NewCredentials() credentials.TransportCredentials { // NoSecurity. type insecureTC struct{} -func (insecureTC) ClientHandshake(ctx context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) { +func (insecureTC) ClientHandshake(_ context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) { return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil } diff --git a/vendor/google.golang.org/grpc/credentials/oauth/oauth.go b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go index d475cbc0894..328b838ed1f 100644 --- a/vendor/google.golang.org/grpc/credentials/oauth/oauth.go +++ b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go @@ -38,7 +38,7 @@ type TokenSource struct { } // GetRequestMetadata gets the request metadata as a map from a TokenSource. -func (ts TokenSource) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { +func (ts TokenSource) GetRequestMetadata(ctx context.Context, _ ...string) (map[string]string, error) { token, err := ts.Token() if err != nil { return nil, err @@ -127,7 +127,7 @@ func NewOauthAccess(token *oauth2.Token) credentials.PerRPCCredentials { return oauthAccess{token: *token} } -func (oa oauthAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { +func (oa oauthAccess) GetRequestMetadata(ctx context.Context, _ ...string) (map[string]string, error) { ri, _ := credentials.RequestInfoFromContext(ctx) if err := credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity); err != nil { return nil, fmt.Errorf("unable to transfer oauthAccess PerRPCCredentials: %v", err) @@ -156,7 +156,7 @@ type serviceAccount struct { t *oauth2.Token } -func (s *serviceAccount) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { +func (s *serviceAccount) GetRequestMetadata(ctx context.Context, _ ...string) (map[string]string, error) { s.mu.Lock() defer s.mu.Unlock() if !s.t.Valid() { diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index 4114358545e..e163a473df9 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -200,25 +200,40 @@ var tls12ForbiddenCipherSuites = map[uint16]struct{}{ // NewTLS uses c to construct a TransportCredentials based on TLS. func NewTLS(c *tls.Config) TransportCredentials { - tc := &tlsCreds{credinternal.CloneTLSConfig(c)} - tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos) + config := applyDefaults(c) + if config.GetConfigForClient != nil { + oldFn := config.GetConfigForClient + config.GetConfigForClient = func(hello *tls.ClientHelloInfo) (*tls.Config, error) { + cfgForClient, err := oldFn(hello) + if err != nil || cfgForClient == nil { + return cfgForClient, err + } + return applyDefaults(cfgForClient), nil + } + } + return &tlsCreds{config: config} +} + +func applyDefaults(c *tls.Config) *tls.Config { + config := credinternal.CloneTLSConfig(c) + config.NextProtos = credinternal.AppendH2ToNextProtos(config.NextProtos) // If the user did not configure a MinVersion and did not configure a // MaxVersion < 1.2, use MinVersion=1.2, which is required by // https://datatracker.ietf.org/doc/html/rfc7540#section-9.2 - if tc.config.MinVersion == 0 && (tc.config.MaxVersion == 0 || tc.config.MaxVersion >= tls.VersionTLS12) { - tc.config.MinVersion = tls.VersionTLS12 + if config.MinVersion == 0 && (config.MaxVersion == 0 || config.MaxVersion >= tls.VersionTLS12) { + config.MinVersion = tls.VersionTLS12 } // If the user did not configure CipherSuites, use all "secure" cipher // suites reported by the TLS package, but remove some explicitly forbidden // by https://datatracker.ietf.org/doc/html/rfc7540#appendix-A - if tc.config.CipherSuites == nil { + if config.CipherSuites == nil { for _, cs := range tls.CipherSuites() { if _, ok := tls12ForbiddenCipherSuites[cs.ID]; !ok { - tc.config.CipherSuites = append(tc.config.CipherSuites, cs.ID) + config.CipherSuites = append(config.CipherSuites, cs.ID) } } } - return tc + return config } // NewClientTLSFromCert constructs TLS credentials from the provided root diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index f5453d48a53..7494ae591f1 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -33,6 +33,7 @@ import ( "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" ) @@ -60,7 +61,7 @@ func init() { internal.WithBinaryLogger = withBinaryLogger internal.JoinDialOptions = newJoinDialOption internal.DisableGlobalDialOptions = newDisableGlobalDialOptions - internal.WithRecvBufferPool = withRecvBufferPool + internal.WithBufferPool = withBufferPool } // dialOptions configure a Dial call. dialOptions are set by the DialOption @@ -86,13 +87,11 @@ type dialOptions struct { disableServiceConfig bool disableRetry bool disableHealthCheck bool - healthCheckFunc internal.HealthChecker minConnectTimeout func() time.Duration defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. defaultServiceConfigRawJSON *string resolvers []resolver.Builder idleTimeout time.Duration - recvBufferPool SharedBufferPool defaultScheme string maxCallAttempts int } @@ -436,7 +435,7 @@ func WithTimeout(d time.Duration) DialOption { // option to true from the Control field. For a concrete example of how to do // this, see internal.NetDialerWithTCPKeepalive(). // -// For more information, please see [issue 23459] in the Go github repo. +// For more information, please see [issue 23459] in the Go GitHub repo. // // [issue 23459]: https://github.com/golang/go/issues/23459 func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { @@ -445,10 +444,6 @@ func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOp }) } -func init() { - internal.WithHealthCheckFunc = withHealthCheckFunc -} - // WithDialer returns a DialOption that specifies a function to use for dialing // network addresses. If FailOnNonTempDialError() is set to true, and an error // is returned by f, gRPC checks the error's Temporary() method to decide if it @@ -518,6 +513,8 @@ func WithUserAgent(s string) DialOption { // WithKeepaliveParams returns a DialOption that specifies keepalive parameters // for the client transport. +// +// Keepalive is disabled by default. func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { if kp.Time < internal.KeepaliveMinPingTime { logger.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime) @@ -660,16 +657,6 @@ func WithDisableHealthCheck() DialOption { }) } -// withHealthCheckFunc replaces the default health check function with the -// provided one. It makes tests easier to change the health check function. -// -// For testing purpose only. -func withHealthCheckFunc(f internal.HealthChecker) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.healthCheckFunc = f - }) -} - func defaultDialOptions() dialOptions { return dialOptions{ copts: transport.ConnectOptions{ @@ -677,11 +664,10 @@ func defaultDialOptions() dialOptions { WriteBufferSize: defaultWriteBufSize, UseProxy: true, UserAgent: grpcUA, + BufferPool: mem.DefaultBufferPool(), }, bs: internalbackoff.DefaultExponential, - healthCheckFunc: internal.HealthCheckFunc, idleTimeout: 30 * time.Minute, - recvBufferPool: nopBufferPool{}, defaultScheme: "dns", maxCallAttempts: defaultMaxCallAttempts, } @@ -758,25 +744,8 @@ func WithMaxCallAttempts(n int) DialOption { }) } -// WithRecvBufferPool returns a DialOption that configures the ClientConn -// to use the provided shared buffer pool for parsing incoming messages. Depending -// on the application's workload, this could result in reduced memory allocation. -// -// If you are unsure about how to implement a memory pool but want to utilize one, -// begin with grpc.NewSharedBufferPool. -// -// Note: The shared buffer pool feature will not be active if any of the following -// options are used: WithStatsHandler, EnableTracing, or binary logging. In such -// cases, the shared buffer pool will be ignored. -// -// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in -// v1.60.0 or later. -func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption { - return withRecvBufferPool(bufferPool) -} - -func withRecvBufferPool(bufferPool SharedBufferPool) DialOption { +func withBufferPool(bufferPool mem.BufferPool) DialOption { return newFuncDialOption(func(o *dialOptions) { - o.recvBufferPool = bufferPool + o.copts.BufferPool = bufferPool }) } diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go index 0022859ad74..e7b532b6f80 100644 --- a/vendor/google.golang.org/grpc/doc.go +++ b/vendor/google.golang.org/grpc/doc.go @@ -16,7 +16,7 @@ * */ -//go:generate ./regenerate.sh +//go:generate ./scripts/regenerate.sh /* Package grpc implements an RPC system called gRPC. diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 5ebf88d7147..11d0ae142c4 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -94,7 +94,7 @@ type Codec interface { Name() string } -var registeredCodecs = make(map[string]Codec) +var registeredCodecs = make(map[string]any) // RegisterCodec registers the provided Codec for use with all gRPC clients and // servers. @@ -126,5 +126,6 @@ func RegisterCodec(codec Codec) { // // The content-subtype is expected to be lowercase. func GetCodec(contentSubtype string) Codec { - return registeredCodecs[contentSubtype] + c, _ := registeredCodecs[contentSubtype].(Codec) + return c } diff --git a/vendor/google.golang.org/grpc/encoding/encoding_v2.go b/vendor/google.golang.org/grpc/encoding/encoding_v2.go new file mode 100644 index 00000000000..074c5e234a7 --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/encoding_v2.go @@ -0,0 +1,81 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package encoding + +import ( + "strings" + + "google.golang.org/grpc/mem" +) + +// CodecV2 defines the interface gRPC uses to encode and decode messages. Note +// that implementations of this interface must be thread safe; a CodecV2's +// methods can be called from concurrent goroutines. +type CodecV2 interface { + // Marshal returns the wire format of v. The buffers in the returned + // [mem.BufferSlice] must have at least one reference each, which will be freed + // by gRPC when they are no longer needed. + Marshal(v any) (out mem.BufferSlice, err error) + // Unmarshal parses the wire format into v. Note that data will be freed as soon + // as this function returns. If the codec wishes to guarantee access to the data + // after this function, it must take its own reference that it frees when it is + // no longer needed. + Unmarshal(data mem.BufferSlice, v any) error + // Name returns the name of the Codec implementation. The returned string + // will be used as part of content type in transmission. The result must be + // static; the result cannot change between calls. + Name() string +} + +// RegisterCodecV2 registers the provided CodecV2 for use with all gRPC clients and +// servers. +// +// The CodecV2 will be stored and looked up by result of its Name() method, which +// should match the content-subtype of the encoding handled by the CodecV2. This +// is case-insensitive, and is stored and looked up as lowercase. If the +// result of calling Name() is an empty string, RegisterCodecV2 will panic. See +// Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If both a Codec and CodecV2 are registered with the same name, the CodecV2 +// will be used. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Codecs are +// registered with the same name, the one registered last will take effect. +func RegisterCodecV2(codec CodecV2) { + if codec == nil { + panic("cannot register a nil CodecV2") + } + if codec.Name() == "" { + panic("cannot register CodecV2 with empty string result for Name()") + } + contentSubtype := strings.ToLower(codec.Name()) + registeredCodecs[contentSubtype] = codec +} + +// GetCodecV2 gets a registered CodecV2 by content-subtype, or nil if no CodecV2 is +// registered for the content-subtype. +// +// The content-subtype is expected to be lowercase. +func GetCodecV2(contentSubtype string) CodecV2 { + c, _ := registeredCodecs[contentSubtype].(CodecV2) + return c +} diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go index 66d5cdf03ec..ceec319dd2f 100644 --- a/vendor/google.golang.org/grpc/encoding/proto/proto.go +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -1,6 +1,6 @@ /* * - * Copyright 2018 gRPC authors. + * Copyright 2024 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -24,6 +24,7 @@ import ( "fmt" "google.golang.org/grpc/encoding" + "google.golang.org/grpc/mem" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/protoadapt" ) @@ -32,28 +33,51 @@ import ( const Name = "proto" func init() { - encoding.RegisterCodec(codec{}) + encoding.RegisterCodecV2(&codecV2{}) } -// codec is a Codec implementation with protobuf. It is the default codec for gRPC. -type codec struct{} +// codec is a CodecV2 implementation with protobuf. It is the default codec for +// gRPC. +type codecV2 struct{} -func (codec) Marshal(v any) ([]byte, error) { +func (c *codecV2) Marshal(v any) (data mem.BufferSlice, err error) { vv := messageV2Of(v) if vv == nil { - return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) + return nil, fmt.Errorf("proto: failed to marshal, message is %T, want proto.Message", v) } - return proto.Marshal(vv) + size := proto.Size(vv) + if mem.IsBelowBufferPoolingThreshold(size) { + buf, err := proto.Marshal(vv) + if err != nil { + return nil, err + } + data = append(data, mem.SliceBuffer(buf)) + } else { + pool := mem.DefaultBufferPool() + buf := pool.Get(size) + if _, err := (proto.MarshalOptions{}).MarshalAppend((*buf)[:0], vv); err != nil { + pool.Put(buf) + return nil, err + } + data = append(data, mem.NewBuffer(buf, pool)) + } + + return data, nil } -func (codec) Unmarshal(data []byte, v any) error { +func (c *codecV2) Unmarshal(data mem.BufferSlice, v any) (err error) { vv := messageV2Of(v) if vv == nil { return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v) } - return proto.Unmarshal(data, vv) + buf := data.MaterializeToBuffer(mem.DefaultBufferPool()) + defer buf.Free() + // TODO: Upgrade proto.Unmarshal to support mem.BufferSlice. Right now, it's not + // really possible without a major overhaul of the proto package, but the + // vtprotobuf library may be able to support this. + return proto.Unmarshal(buf.ReadOnlyData(), vv) } func messageV2Of(v any) proto.Message { @@ -67,6 +91,6 @@ func messageV2Of(v any) proto.Message { return nil } -func (codec) Name() string { +func (c *codecV2) Name() string { return Name } diff --git a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go new file mode 100644 index 00000000000..ad75313a18e --- /dev/null +++ b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go @@ -0,0 +1,270 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package stats + +import ( + "maps" + + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/stats" +) + +func init() { + internal.SnapshotMetricRegistryForTesting = snapshotMetricsRegistryForTesting +} + +var logger = grpclog.Component("metrics-registry") + +// DefaultMetrics are the default metrics registered through global metrics +// registry. This is written to at initialization time only, and is read only +// after initialization. +var DefaultMetrics = stats.NewMetricSet() + +// MetricDescriptor is the data for a registered metric. +type MetricDescriptor struct { + // The name of this metric. This name must be unique across the whole binary + // (including any per call metrics). See + // https://github.com/grpc/proposal/blob/master/A79-non-per-call-metrics-architecture.md#metric-instrument-naming-conventions + // for metric naming conventions. + Name string + // The description of this metric. + Description string + // The unit (e.g. entries, seconds) of this metric. + Unit string + // The required label keys for this metric. These are intended to + // metrics emitted from a stats handler. + Labels []string + // The optional label keys for this metric. These are intended to attached + // to metrics emitted from a stats handler if configured. + OptionalLabels []string + // Whether this metric is on by default. + Default bool + // The type of metric. This is set by the metric registry, and not intended + // to be set by a component registering a metric. + Type MetricType + // Bounds are the bounds of this metric. This only applies to histogram + // metrics. If unset or set with length 0, stats handlers will fall back to + // default bounds. + Bounds []float64 +} + +// MetricType is the type of metric. +type MetricType int + +// Type of metric supported by this instrument registry. +const ( + MetricTypeIntCount MetricType = iota + MetricTypeFloatCount + MetricTypeIntHisto + MetricTypeFloatHisto + MetricTypeIntGauge +) + +// Int64CountHandle is a typed handle for a int count metric. This handle +// is passed at the recording point in order to know which metric to record +// on. +type Int64CountHandle MetricDescriptor + +// Descriptor returns the int64 count handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64CountHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 count value on the metrics recorder provided. +func (h *Int64CountHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) { + recorder.RecordInt64Count(h, incr, labels...) +} + +// Float64CountHandle is a typed handle for a float count metric. This handle is +// passed at the recording point in order to know which metric to record on. +type Float64CountHandle MetricDescriptor + +// Descriptor returns the float64 count handle typecast to a pointer to a +// MetricDescriptor. +func (h *Float64CountHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the float64 count value on the metrics recorder provided. +func (h *Float64CountHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) { + recorder.RecordFloat64Count(h, incr, labels...) +} + +// Int64HistoHandle is a typed handle for an int histogram metric. This handle +// is passed at the recording point in order to know which metric to record on. +type Int64HistoHandle MetricDescriptor + +// Descriptor returns the int64 histo handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64HistoHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 histo value on the metrics recorder provided. +func (h *Int64HistoHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) { + recorder.RecordInt64Histo(h, incr, labels...) +} + +// Float64HistoHandle is a typed handle for a float histogram metric. This +// handle is passed at the recording point in order to know which metric to +// record on. +type Float64HistoHandle MetricDescriptor + +// Descriptor returns the float64 histo handle typecast to a pointer to a +// MetricDescriptor. +func (h *Float64HistoHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the float64 histo value on the metrics recorder provided. +func (h *Float64HistoHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) { + recorder.RecordFloat64Histo(h, incr, labels...) +} + +// Int64GaugeHandle is a typed handle for an int gauge metric. This handle is +// passed at the recording point in order to know which metric to record on. +type Int64GaugeHandle MetricDescriptor + +// Descriptor returns the int64 gauge handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64GaugeHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 histo value on the metrics recorder provided. +func (h *Int64GaugeHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) { + recorder.RecordInt64Gauge(h, incr, labels...) +} + +// registeredMetrics are the registered metric descriptor names. +var registeredMetrics = make(map[string]bool) + +// metricsRegistry contains all of the registered metrics. +// +// This is written to only at init time, and read only after that. +var metricsRegistry = make(map[string]*MetricDescriptor) + +// DescriptorForMetric returns the MetricDescriptor from the global registry. +// +// Returns nil if MetricDescriptor not present. +func DescriptorForMetric(metricName string) *MetricDescriptor { + return metricsRegistry[metricName] +} + +func registerMetric(metricName string, def bool) { + if registeredMetrics[metricName] { + logger.Fatalf("metric %v already registered", metricName) + } + registeredMetrics[metricName] = true + if def { + DefaultMetrics = DefaultMetrics.Add(metricName) + } +} + +// RegisterInt64Count registers the metric description onto the global registry. +// It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64Count(descriptor MetricDescriptor) *Int64CountHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeIntCount + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64CountHandle)(descPtr) +} + +// RegisterFloat64Count registers the metric description onto the global +// registry. It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterFloat64Count(descriptor MetricDescriptor) *Float64CountHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeFloatCount + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Float64CountHandle)(descPtr) +} + +// RegisterInt64Histo registers the metric description onto the global registry. +// It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64Histo(descriptor MetricDescriptor) *Int64HistoHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeIntHisto + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64HistoHandle)(descPtr) +} + +// RegisterFloat64Histo registers the metric description onto the global +// registry. It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterFloat64Histo(descriptor MetricDescriptor) *Float64HistoHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeFloatHisto + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Float64HistoHandle)(descPtr) +} + +// RegisterInt64Gauge registers the metric description onto the global registry. +// It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64Gauge(descriptor MetricDescriptor) *Int64GaugeHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeIntGauge + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64GaugeHandle)(descPtr) +} + +// snapshotMetricsRegistryForTesting snapshots the global data of the metrics +// registry. Returns a cleanup function that sets the metrics registry to its +// original state. +func snapshotMetricsRegistryForTesting() func() { + oldDefaultMetrics := DefaultMetrics + oldRegisteredMetrics := registeredMetrics + oldMetricsRegistry := metricsRegistry + + registeredMetrics = make(map[string]bool) + metricsRegistry = make(map[string]*MetricDescriptor) + maps.Copy(registeredMetrics, registeredMetrics) + maps.Copy(metricsRegistry, metricsRegistry) + + return func() { + DefaultMetrics = oldDefaultMetrics + registeredMetrics = oldRegisteredMetrics + metricsRegistry = oldMetricsRegistry + } +} diff --git a/vendor/google.golang.org/grpc/experimental/stats/metrics.go b/vendor/google.golang.org/grpc/experimental/stats/metrics.go new file mode 100644 index 00000000000..ee1423605ab --- /dev/null +++ b/vendor/google.golang.org/grpc/experimental/stats/metrics.go @@ -0,0 +1,54 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package stats contains experimental metrics/stats API's. +package stats + +import "google.golang.org/grpc/stats" + +// MetricsRecorder records on metrics derived from metric registry. +type MetricsRecorder interface { + // RecordInt64Count records the measurement alongside labels on the int + // count associated with the provided handle. + RecordInt64Count(handle *Int64CountHandle, incr int64, labels ...string) + // RecordFloat64Count records the measurement alongside labels on the float + // count associated with the provided handle. + RecordFloat64Count(handle *Float64CountHandle, incr float64, labels ...string) + // RecordInt64Histo records the measurement alongside labels on the int + // histo associated with the provided handle. + RecordInt64Histo(handle *Int64HistoHandle, incr int64, labels ...string) + // RecordFloat64Histo records the measurement alongside labels on the float + // histo associated with the provided handle. + RecordFloat64Histo(handle *Float64HistoHandle, incr float64, labels ...string) + // RecordInt64Gauge records the measurement alongside labels on the int + // gauge associated with the provided handle. + RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string) +} + +// Metrics is an experimental legacy alias of the now-stable stats.MetricSet. +// Metrics will be deleted in a future release. +type Metrics = stats.MetricSet + +// Metric was replaced by direct usage of strings. +type Metric = string + +// NewMetrics is an experimental legacy alias of the now-stable +// stats.NewMetricSet. NewMetrics will be deleted in a future release. +func NewMetrics(metrics ...Metric) *Metrics { + return stats.NewMetricSet(metrics...) +} diff --git a/vendor/google.golang.org/grpc/grpclog/component.go b/vendor/google.golang.org/grpc/grpclog/component.go index ac73c9ced25..f1ae080dcb8 100644 --- a/vendor/google.golang.org/grpc/grpclog/component.go +++ b/vendor/google.golang.org/grpc/grpclog/component.go @@ -20,8 +20,6 @@ package grpclog import ( "fmt" - - "google.golang.org/grpc/internal/grpclog" ) // componentData records the settings for a component. @@ -33,22 +31,22 @@ var cache = map[string]*componentData{} func (c *componentData) InfoDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.InfoDepth(depth+1, args...) + InfoDepth(depth+1, args...) } func (c *componentData) WarningDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.WarningDepth(depth+1, args...) + WarningDepth(depth+1, args...) } func (c *componentData) ErrorDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.ErrorDepth(depth+1, args...) + ErrorDepth(depth+1, args...) } func (c *componentData) FatalDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.FatalDepth(depth+1, args...) + FatalDepth(depth+1, args...) } func (c *componentData) Info(args ...any) { diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go index 16928c9cb99..db320105e64 100644 --- a/vendor/google.golang.org/grpc/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -18,18 +18,15 @@ // Package grpclog defines logging for grpc. // -// All logs in transport and grpclb packages only go to verbose level 2. -// All logs in other packages in grpc are logged in spite of the verbosity level. -// -// In the default logger, -// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL, -// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL. -package grpclog // import "google.golang.org/grpc/grpclog" +// In the default logger, severity level can be set by environment variable +// GRPC_GO_LOG_SEVERITY_LEVEL, verbosity level can be set by +// GRPC_GO_LOG_VERBOSITY_LEVEL. +package grpclog import ( "os" - "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/grpclog/internal" ) func init() { @@ -38,58 +35,58 @@ func init() { // V reports whether verbosity level l is at least the requested verbose level. func V(l int) bool { - return grpclog.Logger.V(l) + return internal.LoggerV2Impl.V(l) } // Info logs to the INFO log. func Info(args ...any) { - grpclog.Logger.Info(args...) + internal.LoggerV2Impl.Info(args...) } // Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. func Infof(format string, args ...any) { - grpclog.Logger.Infof(format, args...) + internal.LoggerV2Impl.Infof(format, args...) } // Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. func Infoln(args ...any) { - grpclog.Logger.Infoln(args...) + internal.LoggerV2Impl.Infoln(args...) } // Warning logs to the WARNING log. func Warning(args ...any) { - grpclog.Logger.Warning(args...) + internal.LoggerV2Impl.Warning(args...) } // Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. func Warningf(format string, args ...any) { - grpclog.Logger.Warningf(format, args...) + internal.LoggerV2Impl.Warningf(format, args...) } // Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. func Warningln(args ...any) { - grpclog.Logger.Warningln(args...) + internal.LoggerV2Impl.Warningln(args...) } // Error logs to the ERROR log. func Error(args ...any) { - grpclog.Logger.Error(args...) + internal.LoggerV2Impl.Error(args...) } // Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. func Errorf(format string, args ...any) { - grpclog.Logger.Errorf(format, args...) + internal.LoggerV2Impl.Errorf(format, args...) } // Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. func Errorln(args ...any) { - grpclog.Logger.Errorln(args...) + internal.LoggerV2Impl.Errorln(args...) } // Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. // It calls os.Exit() with exit code 1. func Fatal(args ...any) { - grpclog.Logger.Fatal(args...) + internal.LoggerV2Impl.Fatal(args...) // Make sure fatal logs will exit. os.Exit(1) } @@ -97,15 +94,15 @@ func Fatal(args ...any) { // Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. // It calls os.Exit() with exit code 1. func Fatalf(format string, args ...any) { - grpclog.Logger.Fatalf(format, args...) + internal.LoggerV2Impl.Fatalf(format, args...) // Make sure fatal logs will exit. os.Exit(1) } // Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. -// It calle os.Exit()) with exit code 1. +// It calls os.Exit() with exit code 1. func Fatalln(args ...any) { - grpclog.Logger.Fatalln(args...) + internal.LoggerV2Impl.Fatalln(args...) // Make sure fatal logs will exit. os.Exit(1) } @@ -114,19 +111,76 @@ func Fatalln(args ...any) { // // Deprecated: use Info. func Print(args ...any) { - grpclog.Logger.Info(args...) + internal.LoggerV2Impl.Info(args...) } // Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. // // Deprecated: use Infof. func Printf(format string, args ...any) { - grpclog.Logger.Infof(format, args...) + internal.LoggerV2Impl.Infof(format, args...) } // Println prints to the logger. Arguments are handled in the manner of fmt.Println. // // Deprecated: use Infoln. func Println(args ...any) { - grpclog.Logger.Infoln(args...) + internal.LoggerV2Impl.Infoln(args...) +} + +// InfoDepth logs to the INFO log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func InfoDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.InfoDepth(depth, args...) + } else { + internal.LoggerV2Impl.Infoln(args...) + } +} + +// WarningDepth logs to the WARNING log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WarningDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.WarningDepth(depth, args...) + } else { + internal.LoggerV2Impl.Warningln(args...) + } +} + +// ErrorDepth logs to the ERROR log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ErrorDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.ErrorDepth(depth, args...) + } else { + internal.LoggerV2Impl.Errorln(args...) + } +} + +// FatalDepth logs to the FATAL log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func FatalDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.FatalDepth(depth, args...) + } else { + internal.LoggerV2Impl.Fatalln(args...) + } + os.Exit(1) } diff --git a/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go b/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go new file mode 100644 index 00000000000..59c03bc14c2 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go @@ -0,0 +1,26 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains functionality internal to the grpclog package. +package internal + +// LoggerV2Impl is the logger used for the non-depth log functions. +var LoggerV2Impl LoggerV2 + +// DepthLoggerV2Impl is the logger used for the depth log functions. +var DepthLoggerV2Impl DepthLoggerV2 diff --git a/vendor/google.golang.org/grpc/grpclog/internal/logger.go b/vendor/google.golang.org/grpc/grpclog/internal/logger.go new file mode 100644 index 00000000000..e524fdd40b2 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/internal/logger.go @@ -0,0 +1,87 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +// Logger mimics golang's standard Logger as an interface. +// +// Deprecated: use LoggerV2. +type Logger interface { + Fatal(args ...any) + Fatalf(format string, args ...any) + Fatalln(args ...any) + Print(args ...any) + Printf(format string, args ...any) + Println(args ...any) +} + +// LoggerWrapper wraps Logger into a LoggerV2. +type LoggerWrapper struct { + Logger +} + +// Info logs to INFO log. Arguments are handled in the manner of fmt.Print. +func (l *LoggerWrapper) Info(args ...any) { + l.Logger.Print(args...) +} + +// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. +func (l *LoggerWrapper) Infoln(args ...any) { + l.Logger.Println(args...) +} + +// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. +func (l *LoggerWrapper) Infof(format string, args ...any) { + l.Logger.Printf(format, args...) +} + +// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. +func (l *LoggerWrapper) Warning(args ...any) { + l.Logger.Print(args...) +} + +// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. +func (l *LoggerWrapper) Warningln(args ...any) { + l.Logger.Println(args...) +} + +// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. +func (l *LoggerWrapper) Warningf(format string, args ...any) { + l.Logger.Printf(format, args...) +} + +// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. +func (l *LoggerWrapper) Error(args ...any) { + l.Logger.Print(args...) +} + +// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. +func (l *LoggerWrapper) Errorln(args ...any) { + l.Logger.Println(args...) +} + +// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. +func (l *LoggerWrapper) Errorf(format string, args ...any) { + l.Logger.Printf(format, args...) +} + +// V reports whether verbosity level l is at least the requested verbose level. +func (*LoggerWrapper) V(int) bool { + // Returns true for all verbose level. + return true +} diff --git a/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go new file mode 100644 index 00000000000..ed90060c3cb --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go @@ -0,0 +1,267 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +import ( + "encoding/json" + "fmt" + "io" + "log" + "os" +) + +// LoggerV2 does underlying logging work for grpclog. +type LoggerV2 interface { + // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. + Info(args ...any) + // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. + Infoln(args ...any) + // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. + Infof(format string, args ...any) + // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. + Warning(args ...any) + // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. + Warningln(args ...any) + // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. + Warningf(format string, args ...any) + // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. + Error(args ...any) + // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + Errorln(args ...any) + // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + Errorf(format string, args ...any) + // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatal(args ...any) + // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalln(args ...any) + // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalf(format string, args ...any) + // V reports whether verbosity level l is at least the requested verbose level. + V(l int) bool +} + +// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements +// DepthLoggerV2, the below functions will be called with the appropriate stack +// depth set for trivial functions the logger may ignore. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type DepthLoggerV2 interface { + LoggerV2 + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. + InfoDepth(depth int, args ...any) + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. + WarningDepth(depth int, args ...any) + // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. + ErrorDepth(depth int, args ...any) + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. + FatalDepth(depth int, args ...any) +} + +const ( + // infoLog indicates Info severity. + infoLog int = iota + // warningLog indicates Warning severity. + warningLog + // errorLog indicates Error severity. + errorLog + // fatalLog indicates Fatal severity. + fatalLog +) + +// severityName contains the string representation of each severity. +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// sprintf is fmt.Sprintf. +// These vars exist to make it possible to test that expensive format calls aren't made unnecessarily. +var sprintf = fmt.Sprintf + +// sprint is fmt.Sprint. +// These vars exist to make it possible to test that expensive format calls aren't made unnecessarily. +var sprint = fmt.Sprint + +// sprintln is fmt.Sprintln. +// These vars exist to make it possible to test that expensive format calls aren't made unnecessarily. +var sprintln = fmt.Sprintln + +// exit is os.Exit. +// This var exists to make it possible to test functions calling os.Exit. +var exit = os.Exit + +// loggerT is the default logger used by grpclog. +type loggerT struct { + m []*log.Logger + v int + jsonFormat bool +} + +func (g *loggerT) output(severity int, s string) { + sevStr := severityName[severity] + if !g.jsonFormat { + g.m[severity].Output(2, sevStr+": "+s) + return + } + // TODO: we can also include the logging component, but that needs more + // (API) changes. + b, _ := json.Marshal(map[string]string{ + "severity": sevStr, + "message": s, + }) + g.m[severity].Output(2, string(b)) +} + +func (g *loggerT) printf(severity int, format string, args ...any) { + // Note the discard check is duplicated in each print func, rather than in + // output, to avoid the expensive Sprint calls. + // De-duplicating this by moving to output would be a significant performance regression! + if lg := g.m[severity]; lg.Writer() == io.Discard { + return + } + g.output(severity, sprintf(format, args...)) +} + +func (g *loggerT) print(severity int, v ...any) { + if lg := g.m[severity]; lg.Writer() == io.Discard { + return + } + g.output(severity, sprint(v...)) +} + +func (g *loggerT) println(severity int, v ...any) { + if lg := g.m[severity]; lg.Writer() == io.Discard { + return + } + g.output(severity, sprintln(v...)) +} + +func (g *loggerT) Info(args ...any) { + g.print(infoLog, args...) +} + +func (g *loggerT) Infoln(args ...any) { + g.println(infoLog, args...) +} + +func (g *loggerT) Infof(format string, args ...any) { + g.printf(infoLog, format, args...) +} + +func (g *loggerT) Warning(args ...any) { + g.print(warningLog, args...) +} + +func (g *loggerT) Warningln(args ...any) { + g.println(warningLog, args...) +} + +func (g *loggerT) Warningf(format string, args ...any) { + g.printf(warningLog, format, args...) +} + +func (g *loggerT) Error(args ...any) { + g.print(errorLog, args...) +} + +func (g *loggerT) Errorln(args ...any) { + g.println(errorLog, args...) +} + +func (g *loggerT) Errorf(format string, args ...any) { + g.printf(errorLog, format, args...) +} + +func (g *loggerT) Fatal(args ...any) { + g.print(fatalLog, args...) + exit(1) +} + +func (g *loggerT) Fatalln(args ...any) { + g.println(fatalLog, args...) + exit(1) +} + +func (g *loggerT) Fatalf(format string, args ...any) { + g.printf(fatalLog, format, args...) + exit(1) +} + +func (g *loggerT) V(l int) bool { + return l <= g.v +} + +// LoggerV2Config configures the LoggerV2 implementation. +type LoggerV2Config struct { + // Verbosity sets the verbosity level of the logger. + Verbosity int + // FormatJSON controls whether the logger should output logs in JSON format. + FormatJSON bool +} + +// combineLoggers returns a combined logger for both higher & lower severity logs, +// or only one if the other is io.Discard. +// +// This uses io.Discard instead of io.MultiWriter when all loggers +// are set to io.Discard. Both this package and the standard log package have +// significant optimizations for io.Discard, which io.MultiWriter lacks (as of +// this writing). +func combineLoggers(lower, higher io.Writer) io.Writer { + if lower == io.Discard { + return higher + } + if higher == io.Discard { + return lower + } + return io.MultiWriter(lower, higher) +} + +// NewLoggerV2 creates a new LoggerV2 instance with the provided configuration. +// The infoW, warningW, and errorW writers are used to write log messages of +// different severity levels. +func NewLoggerV2(infoW, warningW, errorW io.Writer, c LoggerV2Config) LoggerV2 { + flag := log.LstdFlags + if c.FormatJSON { + flag = 0 + } + + warningW = combineLoggers(infoW, warningW) + errorW = combineLoggers(errorW, warningW) + + fatalW := errorW + + m := []*log.Logger{ + log.New(infoW, "", flag), + log.New(warningW, "", flag), + log.New(errorW, "", flag), + log.New(fatalW, "", flag), + } + return &loggerT{m: m, v: c.Verbosity, jsonFormat: c.FormatJSON} +} diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go index b1674d8267c..4b203585707 100644 --- a/vendor/google.golang.org/grpc/grpclog/logger.go +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -18,70 +18,17 @@ package grpclog -import "google.golang.org/grpc/internal/grpclog" +import "google.golang.org/grpc/grpclog/internal" // Logger mimics golang's standard Logger as an interface. // // Deprecated: use LoggerV2. -type Logger interface { - Fatal(args ...any) - Fatalf(format string, args ...any) - Fatalln(args ...any) - Print(args ...any) - Printf(format string, args ...any) - Println(args ...any) -} +type Logger internal.Logger // SetLogger sets the logger that is used in grpc. Call only from // init() functions. // // Deprecated: use SetLoggerV2. func SetLogger(l Logger) { - grpclog.Logger = &loggerWrapper{Logger: l} -} - -// loggerWrapper wraps Logger into a LoggerV2. -type loggerWrapper struct { - Logger -} - -func (g *loggerWrapper) Info(args ...any) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Infoln(args ...any) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Infof(format string, args ...any) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) Warning(args ...any) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Warningln(args ...any) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Warningf(format string, args ...any) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) Error(args ...any) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Errorln(args ...any) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Errorf(format string, args ...any) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) V(l int) bool { - // Returns true for all verbose level. - return true + internal.LoggerV2Impl = &internal.LoggerWrapper{Logger: l} } diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go index ecfd36d7130..892dc13d164 100644 --- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -19,52 +19,16 @@ package grpclog import ( - "encoding/json" - "fmt" "io" - "log" "os" "strconv" "strings" - "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/grpclog/internal" ) // LoggerV2 does underlying logging work for grpclog. -type LoggerV2 interface { - // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. - Info(args ...any) - // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. - Infoln(args ...any) - // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. - Infof(format string, args ...any) - // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. - Warning(args ...any) - // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. - Warningln(args ...any) - // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. - Warningf(format string, args ...any) - // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. - Error(args ...any) - // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - Errorln(args ...any) - // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - Errorf(format string, args ...any) - // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatal(args ...any) - // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatalln(args ...any) - // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatalf(format string, args ...any) - // V reports whether verbosity level l is at least the requested verbose level. - V(l int) bool -} +type LoggerV2 internal.LoggerV2 // SetLoggerV2 sets logger that is used in grpc to a V2 logger. // Not mutex-protected, should be called before any gRPC functions. @@ -72,34 +36,8 @@ func SetLoggerV2(l LoggerV2) { if _, ok := l.(*componentData); ok { panic("cannot use component logger as grpclog logger") } - grpclog.Logger = l - grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2) -} - -const ( - // infoLog indicates Info severity. - infoLog int = iota - // warningLog indicates Warning severity. - warningLog - // errorLog indicates Error severity. - errorLog - // fatalLog indicates Fatal severity. - fatalLog -) - -// severityName contains the string representation of each severity. -var severityName = []string{ - infoLog: "INFO", - warningLog: "WARNING", - errorLog: "ERROR", - fatalLog: "FATAL", -} - -// loggerT is the default logger used by grpclog. -type loggerT struct { - m []*log.Logger - v int - jsonFormat bool + internal.LoggerV2Impl = l + internal.DepthLoggerV2Impl, _ = l.(internal.DepthLoggerV2) } // NewLoggerV2 creates a loggerV2 with the provided writers. @@ -108,32 +46,13 @@ type loggerT struct { // Warning logs will be written to warningW and infoW. // Info logs will be written to infoW. func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { - return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{}) + return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{}) } // NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and // verbosity level. func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { - return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{verbose: v}) -} - -type loggerV2Config struct { - verbose int - jsonFormat bool -} - -func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) LoggerV2 { - var m []*log.Logger - flag := log.LstdFlags - if c.jsonFormat { - flag = 0 - } - m = append(m, log.New(infoW, "", flag)) - m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) - ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. - m = append(m, log.New(ew, "", flag)) - m = append(m, log.New(ew, "", flag)) - return &loggerT{m: m, v: c.verbose, jsonFormat: c.jsonFormat} + return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{Verbosity: v}) } // newLoggerV2 creates a loggerV2 to be used as default logger. @@ -161,80 +80,10 @@ func newLoggerV2() LoggerV2 { jsonFormat := strings.EqualFold(os.Getenv("GRPC_GO_LOG_FORMATTER"), "json") - return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{ - verbose: v, - jsonFormat: jsonFormat, - }) -} - -func (g *loggerT) output(severity int, s string) { - sevStr := severityName[severity] - if !g.jsonFormat { - g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) - return - } - // TODO: we can also include the logging component, but that needs more - // (API) changes. - b, _ := json.Marshal(map[string]string{ - "severity": sevStr, - "message": s, + return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{ + Verbosity: v, + FormatJSON: jsonFormat, }) - g.m[severity].Output(2, string(b)) -} - -func (g *loggerT) Info(args ...any) { - g.output(infoLog, fmt.Sprint(args...)) -} - -func (g *loggerT) Infoln(args ...any) { - g.output(infoLog, fmt.Sprintln(args...)) -} - -func (g *loggerT) Infof(format string, args ...any) { - g.output(infoLog, fmt.Sprintf(format, args...)) -} - -func (g *loggerT) Warning(args ...any) { - g.output(warningLog, fmt.Sprint(args...)) -} - -func (g *loggerT) Warningln(args ...any) { - g.output(warningLog, fmt.Sprintln(args...)) -} - -func (g *loggerT) Warningf(format string, args ...any) { - g.output(warningLog, fmt.Sprintf(format, args...)) -} - -func (g *loggerT) Error(args ...any) { - g.output(errorLog, fmt.Sprint(args...)) -} - -func (g *loggerT) Errorln(args ...any) { - g.output(errorLog, fmt.Sprintln(args...)) -} - -func (g *loggerT) Errorf(format string, args ...any) { - g.output(errorLog, fmt.Sprintf(format, args...)) -} - -func (g *loggerT) Fatal(args ...any) { - g.output(fatalLog, fmt.Sprint(args...)) - os.Exit(1) -} - -func (g *loggerT) Fatalln(args ...any) { - g.output(fatalLog, fmt.Sprintln(args...)) - os.Exit(1) -} - -func (g *loggerT) Fatalf(format string, args ...any) { - g.output(fatalLog, fmt.Sprintf(format, args...)) - os.Exit(1) -} - -func (g *loggerT) V(l int) bool { - return l <= g.v } // DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements @@ -245,14 +94,4 @@ func (g *loggerT) V(l int) bool { // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. -type DepthLoggerV2 interface { - LoggerV2 - // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. - InfoDepth(depth int, args ...any) - // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. - WarningDepth(depth int, args ...any) - // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. - ErrorDepth(depth int, args ...any) - // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. - FatalDepth(depth int, args ...any) -} +type DepthLoggerV2 internal.DepthLoggerV2 diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index 38b88350735..26e16d91924 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 -// protoc v4.25.2 +// protoc-gen-go v1.35.1 +// protoc v5.27.1 // source: grpc/health/v1/health.proto package grpc_health_v1 @@ -99,11 +99,9 @@ type HealthCheckRequest struct { func (x *HealthCheckRequest) Reset() { *x = HealthCheckRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_health_v1_health_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_health_v1_health_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HealthCheckRequest) String() string { @@ -114,7 +112,7 @@ func (*HealthCheckRequest) ProtoMessage() {} func (x *HealthCheckRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_health_v1_health_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -146,11 +144,9 @@ type HealthCheckResponse struct { func (x *HealthCheckResponse) Reset() { *x = HealthCheckResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_health_v1_health_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_health_v1_health_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HealthCheckResponse) String() string { @@ -161,7 +157,7 @@ func (*HealthCheckResponse) ProtoMessage() {} func (x *HealthCheckResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_health_v1_health_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -237,7 +233,7 @@ func file_grpc_health_v1_health_proto_rawDescGZIP() []byte { var file_grpc_health_v1_health_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_grpc_health_v1_health_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_grpc_health_v1_health_proto_goTypes = []interface{}{ +var file_grpc_health_v1_health_proto_goTypes = []any{ (HealthCheckResponse_ServingStatus)(0), // 0: grpc.health.v1.HealthCheckResponse.ServingStatus (*HealthCheckRequest)(nil), // 1: grpc.health.v1.HealthCheckRequest (*HealthCheckResponse)(nil), // 2: grpc.health.v1.HealthCheckResponse @@ -260,32 +256,6 @@ func file_grpc_health_v1_health_proto_init() { if File_grpc_health_v1_health_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_grpc_health_v1_health_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HealthCheckRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_health_v1_health_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HealthCheckResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go index 51b736ba06e..f96b8ab4927 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.4.0 -// - protoc v4.25.2 +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.27.1 // source: grpc/health/v1/health.proto package grpc_health_v1 @@ -32,8 +32,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.62.0 or later. -const _ = grpc.SupportPackageIsVersion8 +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 const ( Health_Check_FullMethodName = "/grpc.health.v1.Health/Check" @@ -73,7 +73,7 @@ type HealthClient interface { // should assume this method is not supported and should not retry the // call. If the call terminates with any other status (including OK), // clients should retry the call with appropriate exponential backoff. - Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) + Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HealthCheckResponse], error) } type healthClient struct { @@ -94,13 +94,13 @@ func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts . return out, nil } -func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) { +func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HealthCheckResponse], error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], Health_Watch_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &healthWatchClient{ClientStream: stream} + x := &grpc.GenericClientStream[HealthCheckRequest, HealthCheckResponse]{ClientStream: stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -110,26 +110,12 @@ func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts . return x, nil } -type Health_WatchClient interface { - Recv() (*HealthCheckResponse, error) - grpc.ClientStream -} - -type healthWatchClient struct { - grpc.ClientStream -} - -func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) { - m := new(HealthCheckResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type Health_WatchClient = grpc.ServerStreamingClient[HealthCheckResponse] // HealthServer is the server API for Health service. // All implementations should embed UnimplementedHealthServer -// for forward compatibility +// for forward compatibility. // // Health is gRPC's mechanism for checking whether a server is able to handle // RPCs. Its semantics are documented in @@ -160,19 +146,23 @@ type HealthServer interface { // should assume this method is not supported and should not retry the // call. If the call terminates with any other status (including OK), // clients should retry the call with appropriate exponential backoff. - Watch(*HealthCheckRequest, Health_WatchServer) error + Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error } -// UnimplementedHealthServer should be embedded to have forward compatible implementations. -type UnimplementedHealthServer struct { -} +// UnimplementedHealthServer should be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedHealthServer struct{} func (UnimplementedHealthServer) Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Check not implemented") } -func (UnimplementedHealthServer) Watch(*HealthCheckRequest, Health_WatchServer) error { +func (UnimplementedHealthServer) Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error { return status.Errorf(codes.Unimplemented, "method Watch not implemented") } +func (UnimplementedHealthServer) testEmbeddedByValue() {} // UnsafeHealthServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to HealthServer will @@ -182,6 +172,13 @@ type UnsafeHealthServer interface { } func RegisterHealthServer(s grpc.ServiceRegistrar, srv HealthServer) { + // If the following call panics, it indicates UnimplementedHealthServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&Health_ServiceDesc, srv) } @@ -208,21 +205,11 @@ func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { if err := stream.RecvMsg(m); err != nil { return err } - return srv.(HealthServer).Watch(m, &healthWatchServer{ServerStream: stream}) -} - -type Health_WatchServer interface { - Send(*HealthCheckResponse) error - grpc.ServerStream + return srv.(HealthServer).Watch(m, &grpc.GenericServerStream[HealthCheckRequest, HealthCheckResponse]{ServerStream: stream}) } -type healthWatchServer struct { - grpc.ServerStream -} - -func (x *healthWatchServer) Send(m *HealthCheckResponse) error { - return x.ServerStream.SendMsg(m) -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type Health_WatchServer = grpc.ServerStreamingServer[HealthCheckResponse] // Health_ServiceDesc is the grpc.ServiceDesc for Health service. // It's only intended for direct use with grpc.RegisterService, diff --git a/vendor/google.golang.org/grpc/health/server.go b/vendor/google.golang.org/grpc/health/server.go index cce6312d77f..d4b4b708159 100644 --- a/vendor/google.golang.org/grpc/health/server.go +++ b/vendor/google.golang.org/grpc/health/server.go @@ -51,7 +51,7 @@ func NewServer() *Server { } // Check implements `service Health`. -func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) { +func (s *Server) Check(_ context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) { s.mu.RLock() defer s.mu.RUnlock() if servingStatus, ok := s.statusMap[in.Service]; ok { diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go index b15cf482d29..b6ae7f25850 100644 --- a/vendor/google.golang.org/grpc/internal/backoff/backoff.go +++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -25,7 +25,7 @@ package backoff import ( "context" "errors" - "math/rand" + rand "math/rand/v2" "time" grpcbackoff "google.golang.org/grpc/backoff" diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go index 13821a92660..85540f86a73 100644 --- a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go @@ -33,6 +33,8 @@ type lbConfig struct { childConfig serviceconfig.LoadBalancingConfig } +// ChildName returns the name of the child balancer of the gracefulswitch +// Balancer. func ChildName(l serviceconfig.LoadBalancingConfig) string { return l.(*lbConfig).childBuilder.Name() } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index aa4505a871d..9669328914a 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -106,7 +106,7 @@ func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry } // Log creates a proto binary log entry, and logs it to the sink. -func (ml *TruncatingMethodLogger) Log(ctx context.Context, c LogEntryConfig) { +func (ml *TruncatingMethodLogger) Log(_ context.Context, c LogEntryConfig) { ml.sink.Write(ml.Build(c)) } diff --git a/vendor/google.golang.org/grpc/internal/channelz/channel.go b/vendor/google.golang.org/grpc/internal/channelz/channel.go index d7e9e1d54ec..3ec662799a8 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/channel.go +++ b/vendor/google.golang.org/grpc/internal/channelz/channel.go @@ -43,6 +43,8 @@ type Channel struct { // Non-zero traceRefCount means the trace of this channel cannot be deleted. traceRefCount int32 + // ChannelMetrics holds connectivity state, target and call metrics for the + // channel within channelz. ChannelMetrics ChannelMetrics } @@ -50,6 +52,8 @@ type Channel struct { // nesting. func (c *Channel) channelzIdentifier() {} +// String returns a string representation of the Channel, including its parent +// entity and ID. func (c *Channel) String() string { if c.Parent == nil { return fmt.Sprintf("Channel #%d", c.ID) @@ -61,24 +65,31 @@ func (c *Channel) id() int64 { return c.ID } +// SubChans returns a copy of the map of sub-channels associated with the +// Channel. func (c *Channel) SubChans() map[int64]string { db.mu.RLock() defer db.mu.RUnlock() return copyMap(c.subChans) } +// NestedChans returns a copy of the map of nested channels associated with the +// Channel. func (c *Channel) NestedChans() map[int64]string { db.mu.RLock() defer db.mu.RUnlock() return copyMap(c.nestedChans) } +// Trace returns a copy of the Channel's trace data. func (c *Channel) Trace() *ChannelTrace { db.mu.RLock() defer db.mu.RUnlock() return c.trace.copy() } +// ChannelMetrics holds connectivity state, target and call metrics for the +// channel within channelz. type ChannelMetrics struct { // The current connectivity state of the channel. State atomic.Pointer[connectivity.State] @@ -136,12 +147,16 @@ func strFromPointer(s *string) string { return *s } +// String returns a string representation of the ChannelMetrics, including its +// state, target, and call metrics. func (c *ChannelMetrics) String() string { return fmt.Sprintf("State: %v, Target: %s, CallsStarted: %v, CallsSucceeded: %v, CallsFailed: %v, LastCallStartedTimestamp: %v", c.State.Load(), strFromPointer(c.Target.Load()), c.CallsStarted.Load(), c.CallsSucceeded.Load(), c.CallsFailed.Load(), c.LastCallStartedTimestamp.Load(), ) } +// NewChannelMetricForTesting creates a new instance of ChannelMetrics with +// specified initial values for testing purposes. func NewChannelMetricForTesting(state connectivity.State, target string, started, succeeded, failed, timestamp int64) *ChannelMetrics { c := &ChannelMetrics{} c.State.Store(&state) diff --git a/vendor/google.golang.org/grpc/internal/channelz/channelmap.go b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go index dfe18b08925..64c791953d0 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/channelmap.go +++ b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go @@ -46,7 +46,7 @@ type entry interface { // channelMap is the storage data structure for channelz. // -// Methods of channelMap can be divided in two two categories with respect to +// Methods of channelMap can be divided into two categories with respect to // locking. // // 1. Methods acquire the global lock. @@ -234,13 +234,6 @@ func copyMap(m map[int64]string) map[int64]string { return n } -func min(a, b int) int { - if a < b { - return a - } - return b -} - func (c *channelMap) getTopChannels(id int64, maxResults int) ([]*Channel, bool) { if maxResults <= 0 { maxResults = EntriesPerPage diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index 03e24e1507a..078bb81238b 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -33,7 +33,7 @@ var ( // outside this package except by tests. IDGen IDGenerator - db *channelMap = newChannelMap() + db = newChannelMap() // EntriesPerPage defines the number of channelz entries to be shown on a web page. EntriesPerPage = 50 curState int32 diff --git a/vendor/google.golang.org/grpc/internal/channelz/server.go b/vendor/google.golang.org/grpc/internal/channelz/server.go index cdfc49d6eac..b5a82499299 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/server.go +++ b/vendor/google.golang.org/grpc/internal/channelz/server.go @@ -59,6 +59,8 @@ func NewServerMetricsForTesting(started, succeeded, failed, timestamp int64) *Se return sm } +// CopyFrom copies the metrics data from the provided ServerMetrics +// instance into the current instance. func (sm *ServerMetrics) CopyFrom(o *ServerMetrics) { sm.CallsStarted.Store(o.CallsStarted.Load()) sm.CallsSucceeded.Store(o.CallsSucceeded.Load()) diff --git a/vendor/google.golang.org/grpc/internal/channelz/socket.go b/vendor/google.golang.org/grpc/internal/channelz/socket.go index fa64834b25d..90103847c5f 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/socket.go +++ b/vendor/google.golang.org/grpc/internal/channelz/socket.go @@ -70,13 +70,18 @@ type EphemeralSocketMetrics struct { RemoteFlowControlWindow int64 } +// SocketType represents the type of socket. type SocketType string +// SocketType can be one of these. const ( SocketTypeNormal = "NormalSocket" SocketTypeListen = "ListenSocket" ) +// Socket represents a socket within channelz which includes socket +// metrics and data related to socket activity and provides methods +// for managing and interacting with sockets. type Socket struct { Entity SocketType SocketType @@ -100,6 +105,8 @@ type Socket struct { Security credentials.ChannelzSecurityValue } +// String returns a string representation of the Socket, including its parent +// entity, socket type, and ID. func (ls *Socket) String() string { return fmt.Sprintf("%s %s #%d", ls.Parent, ls.SocketType, ls.ID) } diff --git a/vendor/google.golang.org/grpc/internal/channelz/subchannel.go b/vendor/google.golang.org/grpc/internal/channelz/subchannel.go index 3b88e4cba8e..b20802e6e96 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/subchannel.go +++ b/vendor/google.golang.org/grpc/internal/channelz/subchannel.go @@ -47,12 +47,14 @@ func (sc *SubChannel) id() int64 { return sc.ID } +// Sockets returns a copy of the sockets map associated with the SubChannel. func (sc *SubChannel) Sockets() map[int64]string { db.mu.RLock() defer db.mu.RUnlock() return copyMap(sc.sockets) } +// Trace returns a copy of the ChannelTrace associated with the SubChannel. func (sc *SubChannel) Trace() *ChannelTrace { db.mu.RLock() defer db.mu.RUnlock() diff --git a/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go index d1ed8df6a51..0e6e18e185c 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go @@ -35,13 +35,13 @@ type SocketOptionData struct { // Getsockopt defines the function to get socket options requested by channelz. // It is to be passed to syscall.RawConn.Control(). // Windows OS doesn't support Socket Option -func (s *SocketOptionData) Getsockopt(fd uintptr) { +func (s *SocketOptionData) Getsockopt(uintptr) { once.Do(func() { logger.Warning("Channelz: socket options are not supported on non-linux environments") }) } // GetSocketOption gets the socket option info of the conn. -func GetSocketOption(c any) *SocketOptionData { +func GetSocketOption(any) *SocketOptionData { return nil } diff --git a/vendor/google.golang.org/grpc/internal/channelz/trace.go b/vendor/google.golang.org/grpc/internal/channelz/trace.go index 36b86740323..2bffe477768 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/trace.go +++ b/vendor/google.golang.org/grpc/internal/channelz/trace.go @@ -79,13 +79,21 @@ type TraceEvent struct { Parent *TraceEvent } +// ChannelTrace provides tracing information for a channel. +// It tracks various events and metadata related to the channel's lifecycle +// and operations. type ChannelTrace struct { - cm *channelMap - clearCalled bool + cm *channelMap + clearCalled bool + // The time when the trace was created. CreationTime time.Time - EventNum int64 - mu sync.Mutex - Events []*traceEvent + // A counter for the number of events recorded in the + // trace. + EventNum int64 + mu sync.Mutex + // A slice of traceEvent pointers representing the events recorded for + // this channel. + Events []*traceEvent } func (c *ChannelTrace) copy() *ChannelTrace { @@ -175,6 +183,7 @@ var refChannelTypeToString = map[RefChannelType]string{ RefNormalSocket: "NormalSocket", } +// String returns a string representation of the RefChannelType func (r RefChannelType) String() string { return refChannelTypeToString[r] } diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index d9064871394..6e7dd6b7727 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -45,7 +45,16 @@ var ( // option is present for backward compatibility. This option may be overridden // by setting the environment variable "GRPC_ENFORCE_ALPN_ENABLED" to "true" // or "false". - EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", false) + EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", true) + // XDSFallbackSupport is the env variable that controls whether support for + // xDS fallback is turned on. If this is unset or is false, only the first + // xDS server in the list of server configs will be used. + XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", false) + // NewPickFirstEnabled is set if the new pickfirst leaf policy is to be used + // instead of the exiting pickfirst implementation. This can be enabled by + // setting the environment variable "GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST" + // to "true". + NewPickFirstEnabled = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST", false) ) func boolFromEnv(envVar string, def bool) bool { diff --git a/vendor/google.golang.org/grpc/internal/experimental.go b/vendor/google.golang.org/grpc/internal/experimental.go index 7f7044e1731..7617be21589 100644 --- a/vendor/google.golang.org/grpc/internal/experimental.go +++ b/vendor/google.golang.org/grpc/internal/experimental.go @@ -18,11 +18,11 @@ package internal var ( - // WithRecvBufferPool is implemented by the grpc package and returns a dial + // WithBufferPool is implemented by the grpc package and returns a dial // option to configure a shared buffer pool for a grpc.ClientConn. - WithRecvBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption + WithBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption - // RecvBufferPool is implemented by the grpc package and returns a server + // BufferPool is implemented by the grpc package and returns a server // option to configure a shared buffer pool for a grpc.Server. - RecvBufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption + BufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption ) diff --git a/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go b/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go index 6717b757f80..43423d8ad9a 100644 --- a/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go +++ b/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go @@ -62,9 +62,9 @@ func isRunningOnGCE(manufacturer []byte, goos string) bool { name = strings.TrimSpace(name) return name == "Google" || name == "Google Compute Engine" case "windows": - name = strings.Replace(name, " ", "", -1) - name = strings.Replace(name, "\n", "", -1) - name = strings.Replace(name, "\r", "", -1) + name = strings.ReplaceAll(name, " ", "") + name = strings.ReplaceAll(name, "\n", "") + name = strings.ReplaceAll(name, "\r", "") return name == "Google" default: return false diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go deleted file mode 100644 index bfc45102ab2..00000000000 --- a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go +++ /dev/null @@ -1,126 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package grpclog (internal) defines depth logging for grpc. -package grpclog - -import ( - "os" -) - -// Logger is the logger used for the non-depth log functions. -var Logger LoggerV2 - -// DepthLogger is the logger used for the depth log functions. -var DepthLogger DepthLoggerV2 - -// InfoDepth logs to the INFO log at the specified depth. -func InfoDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.InfoDepth(depth, args...) - } else { - Logger.Infoln(args...) - } -} - -// WarningDepth logs to the WARNING log at the specified depth. -func WarningDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.WarningDepth(depth, args...) - } else { - Logger.Warningln(args...) - } -} - -// ErrorDepth logs to the ERROR log at the specified depth. -func ErrorDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.ErrorDepth(depth, args...) - } else { - Logger.Errorln(args...) - } -} - -// FatalDepth logs to the FATAL log at the specified depth. -func FatalDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.FatalDepth(depth, args...) - } else { - Logger.Fatalln(args...) - } - os.Exit(1) -} - -// LoggerV2 does underlying logging work for grpclog. -// This is a copy of the LoggerV2 defined in the external grpclog package. It -// is defined here to avoid a circular dependency. -type LoggerV2 interface { - // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. - Info(args ...any) - // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. - Infoln(args ...any) - // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. - Infof(format string, args ...any) - // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. - Warning(args ...any) - // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. - Warningln(args ...any) - // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. - Warningf(format string, args ...any) - // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. - Error(args ...any) - // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - Errorln(args ...any) - // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - Errorf(format string, args ...any) - // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatal(args ...any) - // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatalln(args ...any) - // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatalf(format string, args ...any) - // V reports whether verbosity level l is at least the requested verbose level. - V(l int) bool -} - -// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements -// DepthLoggerV2, the below functions will be called with the appropriate stack -// depth set for trivial functions the logger may ignore. -// This is a copy of the DepthLoggerV2 defined in the external grpclog package. -// It is defined here to avoid a circular dependency. -// -// # Experimental -// -// Notice: This type is EXPERIMENTAL and may be changed or removed in a -// later release. -type DepthLoggerV2 interface { - // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. - InfoDepth(depth int, args ...any) - // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. - WarningDepth(depth int, args ...any) - // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. - ErrorDepth(depth int, args ...any) - // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. - FatalDepth(depth int, args ...any) -} diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go deleted file mode 100644 index faa998de763..00000000000 --- a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go +++ /dev/null @@ -1,93 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpclog - -import ( - "fmt" -) - -// PrefixLogger does logging with a prefix. -// -// Logging method on a nil logs without any prefix. -type PrefixLogger struct { - logger DepthLoggerV2 - prefix string -} - -// Infof does info logging. -func (pl *PrefixLogger) Infof(format string, args ...any) { - if pl != nil { - // Handle nil, so the tests can pass in a nil logger. - format = pl.prefix + format - pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) - return - } - InfoDepth(1, fmt.Sprintf(format, args...)) -} - -// Warningf does warning logging. -func (pl *PrefixLogger) Warningf(format string, args ...any) { - if pl != nil { - format = pl.prefix + format - pl.logger.WarningDepth(1, fmt.Sprintf(format, args...)) - return - } - WarningDepth(1, fmt.Sprintf(format, args...)) -} - -// Errorf does error logging. -func (pl *PrefixLogger) Errorf(format string, args ...any) { - if pl != nil { - format = pl.prefix + format - pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...)) - return - } - ErrorDepth(1, fmt.Sprintf(format, args...)) -} - -// Debugf does info logging at verbose level 2. -func (pl *PrefixLogger) Debugf(format string, args ...any) { - // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe - // rewrite PrefixLogger a little to ensure that we don't use the global - // `Logger` here, and instead use the `logger` field. - if !Logger.V(2) { - return - } - if pl != nil { - // Handle nil, so the tests can pass in a nil logger. - format = pl.prefix + format - pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) - return - } - InfoDepth(1, fmt.Sprintf(format, args...)) - -} - -// V reports whether verbosity level l is at least the requested verbose level. -func (pl *PrefixLogger) V(l int) bool { - // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe - // rewrite PrefixLogger a little to ensure that we don't use the global - // `Logger` here, and instead use the `logger` field. - return Logger.V(l) -} - -// NewPrefixLogger creates a prefix logger with the given prefix. -func NewPrefixLogger(logger DepthLoggerV2, prefix string) *PrefixLogger { - return &PrefixLogger{logger: logger, prefix: prefix} -} diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go new file mode 100644 index 00000000000..092ad187a2c --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go @@ -0,0 +1,79 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpclog provides logging functionality for internal gRPC packages, +// outside of the functionality provided by the external `grpclog` package. +package grpclog + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" +) + +// PrefixLogger does logging with a prefix. +// +// Logging method on a nil logs without any prefix. +type PrefixLogger struct { + logger grpclog.DepthLoggerV2 + prefix string +} + +// Infof does info logging. +func (pl *PrefixLogger) Infof(format string, args ...any) { + if pl != nil { + // Handle nil, so the tests can pass in a nil logger. + format = pl.prefix + format + pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) + return + } + grpclog.InfoDepth(1, fmt.Sprintf(format, args...)) +} + +// Warningf does warning logging. +func (pl *PrefixLogger) Warningf(format string, args ...any) { + if pl != nil { + format = pl.prefix + format + pl.logger.WarningDepth(1, fmt.Sprintf(format, args...)) + return + } + grpclog.WarningDepth(1, fmt.Sprintf(format, args...)) +} + +// Errorf does error logging. +func (pl *PrefixLogger) Errorf(format string, args ...any) { + if pl != nil { + format = pl.prefix + format + pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...)) + return + } + grpclog.ErrorDepth(1, fmt.Sprintf(format, args...)) +} + +// V reports whether verbosity level l is at least the requested verbose level. +func (pl *PrefixLogger) V(l int) bool { + if pl != nil { + return pl.logger.V(l) + } + return true +} + +// NewPrefixLogger creates a prefix logger with the given prefix. +func NewPrefixLogger(logger grpclog.DepthLoggerV2, prefix string) *PrefixLogger { + return &PrefixLogger{logger: logger, prefix: prefix} +} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go index f7f40a16ace..8e8e861280a 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -53,16 +53,28 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { return cs } -// Schedule adds a callback to be scheduled after existing callbacks are run. +// TrySchedule tries to schedule the provided callback function f to be +// executed in the order it was added. This is a best-effort operation. If the +// context passed to NewCallbackSerializer was canceled before this method is +// called, the callback will not be scheduled. // // Callbacks are expected to honor the context when performing any blocking // operations, and should return early when the context is canceled. +func (cs *CallbackSerializer) TrySchedule(f func(ctx context.Context)) { + cs.callbacks.Put(f) +} + +// ScheduleOr schedules the provided callback function f to be executed in the +// order it was added. If the context passed to NewCallbackSerializer has been +// canceled before this method is called, the onFailure callback will be +// executed inline instead. // -// Return value indicates if the callback was successfully added to the list of -// callbacks to be executed by the serializer. It is not possible to add -// callbacks once the context passed to NewCallbackSerializer is cancelled. -func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { - return cs.callbacks.Put(f) == nil +// Callbacks are expected to honor the context when performing any blocking +// operations, and should return early when the context is canceled. +func (cs *CallbackSerializer) ScheduleOr(f func(ctx context.Context), onFailure func()) { + if cs.callbacks.Put(f) != nil { + onFailure() + } } func (cs *CallbackSerializer) run(ctx context.Context) { diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go index aef8cec1ab0..6d8c2f518df 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go @@ -77,7 +77,7 @@ func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) { if ps.msg != nil { msg := ps.msg - ps.cs.Schedule(func(context.Context) { + ps.cs.TrySchedule(func(context.Context) { ps.mu.Lock() defer ps.mu.Unlock() if !ps.subscribers[sub] { @@ -103,7 +103,7 @@ func (ps *PubSub) Publish(msg any) { ps.msg = msg for sub := range ps.subscribers { s := sub - ps.cs.Schedule(func(context.Context) { + ps.cs.TrySchedule(func(context.Context) { ps.mu.Lock() defer ps.mu.Unlock() if !ps.subscribers[s] { diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/vendor/google.golang.org/grpc/internal/grpcutil/method.go index ec62b4775e5..683d1955c6a 100644 --- a/vendor/google.golang.org/grpc/internal/grpcutil/method.go +++ b/vendor/google.golang.org/grpc/internal/grpcutil/method.go @@ -39,7 +39,7 @@ func ParseMethod(methodName string) (service, method string, _ error) { } // baseContentType is the base content-type for gRPC. This is a valid -// content-type on it's own, but can also include a content-subtype such as +// content-type on its own, but can also include a content-subtype such as // "proto" as a suffix after "+" or ";". See // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests // for more details. diff --git a/vendor/google.golang.org/grpc/internal/idle/idle.go b/vendor/google.golang.org/grpc/internal/idle/idle.go index fe49cb74c55..2c13ee9dac7 100644 --- a/vendor/google.golang.org/grpc/internal/idle/idle.go +++ b/vendor/google.golang.org/grpc/internal/idle/idle.go @@ -182,6 +182,7 @@ func (m *Manager) tryEnterIdleMode() bool { return true } +// EnterIdleModeForTesting instructs the channel to enter idle mode. func (m *Manager) EnterIdleModeForTesting() { m.tryEnterIdleMode() } @@ -225,7 +226,7 @@ func (m *Manager) ExitIdleMode() error { // came in and OnCallBegin() noticed that the calls count is negative. // - Channel is in idle mode, and multiple new RPCs come in at the same // time, all of them notice a negative calls count in OnCallBegin and get - // here. The first one to get the lock would got the channel to exit idle. + // here. The first one to get the lock would get the channel to exit idle. // - Channel is not in idle mode, and the user calls Connect which calls // m.ExitIdleMode. // @@ -266,6 +267,7 @@ func (m *Manager) isClosed() bool { return atomic.LoadInt32(&m.closed) == 1 } +// Close stops the timer associated with the Manager, if it exists. func (m *Manager) Close() { atomic.StoreInt32(&m.closed, 1) diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 5d665398692..3afc1813440 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -29,8 +29,6 @@ import ( ) var ( - // WithHealthCheckFunc is set by dialoptions.go - WithHealthCheckFunc any // func (HealthChecker) DialOption // HealthCheckFunc is used to provide client-side LB channel health checking HealthCheckFunc HealthChecker // BalancerUnregister is exported by package balancer to unregister a balancer. @@ -149,6 +147,20 @@ var ( // other features, including the CSDS service. NewXDSResolverWithConfigForTesting any // func([]byte) (resolver.Builder, error) + // NewXDSResolverWithClientForTesting creates a new xDS resolver builder + // using the provided xDS client instead of creating a new one using the + // bootstrap configuration specified by the supported environment variables. + // The resolver.Builder is meant to be used in conjunction with the + // grpc.WithResolvers DialOption. The resolver.Builder does not take + // ownership of the provided xDS client and it is the responsibility of the + // caller to close the client when no longer required. + // + // Testing Only + // + // This function should ONLY be used for testing and may not work with some + // other features, including the CSDS service. + NewXDSResolverWithClientForTesting any // func(xdsclient.XDSClient) (resolver.Builder, error) + // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster // Specifier Plugin for testing purposes, regardless of the XDSRLS environment // variable. @@ -183,7 +195,7 @@ var ( // GRPCResolverSchemeExtraMetadata determines when gRPC will add extra // metadata to RPCs. - GRPCResolverSchemeExtraMetadata string = "xds" + GRPCResolverSchemeExtraMetadata = "xds" // EnterIdleModeForTesting gets the ClientConn to enter IDLE mode. EnterIdleModeForTesting any // func(*grpc.ClientConn) @@ -191,6 +203,8 @@ var ( // ExitIdleModeForTesting gets the ClientConn to exit IDLE mode. ExitIdleModeForTesting any // func(*grpc.ClientConn) error + // ChannelzTurnOffForTesting disables the Channelz service for testing + // purposes. ChannelzTurnOffForTesting func() // TriggerXDSResourceNotFoundForTesting causes the provided xDS Client to @@ -203,11 +217,27 @@ var ( // UserSetDefaultScheme is set to true if the user has overridden the // default resolver scheme. - UserSetDefaultScheme bool = false + UserSetDefaultScheme = false - // ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n - // is the number of elements. swap swaps the elements with indexes i and j. - ShuffleAddressListForTesting any // func(n int, swap func(i, j int)) + // ConnectedAddress returns the connected address for a SubConnState. The + // address is only valid if the state is READY. + ConnectedAddress any // func (scs SubConnState) resolver.Address + + // SetConnectedAddress sets the connected address for a SubConnState. + SetConnectedAddress any // func(scs *SubConnState, addr resolver.Address) + + // SnapshotMetricRegistryForTesting snapshots the global data of the metric + // registry. Returns a cleanup function that sets the metric registry to its + // original state. Only called in testing functions. + SnapshotMetricRegistryForTesting func() func() + + // SetDefaultBufferPoolForTesting updates the default buffer pool, for + // testing purposes. + SetDefaultBufferPoolForTesting any // func(mem.BufferPool) + + // SetBufferPoolingThresholdForTesting updates the buffer pooling threshold, for + // testing purposes. + SetBufferPoolingThresholdForTesting any // func(int) ) // HealthChecker defines the signature of the client-side LB channel health @@ -215,7 +245,7 @@ var ( // // The implementation is expected to create a health checking RPC stream by // calling newStream(), watch for the health status of serviceName, and report -// it's health back by calling setConnectivityState(). +// its health back by calling setConnectivityState(). // // The health checking protocol is defined at: // https://github.com/grpc/grpc/blob/master/doc/health-checking.md @@ -237,3 +267,9 @@ const ( // It currently has an experimental suffix which would be removed once // end-to-end testing of the policy is completed. const RLSLoadBalancingPolicyName = "rls_experimental" + +// EnforceSubConnEmbedding is used to enforce proper SubConn implementation +// embedding. +type EnforceSubConnEmbedding interface { + enforceSubConnEmbedding() +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 4552db16b02..ba5c5a95d0d 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -24,8 +24,9 @@ import ( "context" "encoding/json" "fmt" - "math/rand" + rand "math/rand/v2" "net" + "net/netip" "os" "strconv" "strings" @@ -122,7 +123,7 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts } // IP address. - if ipAddr, ok := formatIP(host); ok { + if ipAddr, err := formatIP(host); err == nil { addr := []resolver.Address{{Addr: ipAddr + ":" + port}} cc.UpdateState(resolver.State{Addresses: addr}) return deadResolver{}, nil @@ -177,7 +178,7 @@ type dnsResolver struct { // finished. Otherwise, data race will be possible. [Race Example] in // dns_resolver_test we replace the real lookup functions with mocked ones to // facilitate testing. If Close() doesn't wait for watcher() goroutine - // finishes, race detector sometimes will warns lookup (READ the lookup + // finishes, race detector sometimes will warn lookup (READ the lookup // function pointers) inside watcher() goroutine has data race with // replaceNetFunc (WRITE the lookup function pointers). wg sync.WaitGroup @@ -237,7 +238,9 @@ func (d *dnsResolver) watcher() { } func (d *dnsResolver) lookupSRV(ctx context.Context) ([]resolver.Address, error) { - if !EnableSRVLookups { + // Skip this particular host to avoid timeouts with some versions of + // systemd-resolved. + if !EnableSRVLookups || d.host == "metadata.google.internal." { return nil, nil } var newAddrs []resolver.Address @@ -258,9 +261,9 @@ func (d *dnsResolver) lookupSRV(ctx context.Context) ([]resolver.Address, error) return nil, err } for _, a := range lbAddrs { - ip, ok := formatIP(a) - if !ok { - return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) + ip, err := formatIP(a) + if err != nil { + return nil, fmt.Errorf("dns: error parsing A record IP address %v: %v", a, err) } addr := ip + ":" + strconv.Itoa(int(s.Port)) newAddrs = append(newAddrs, resolver.Address{Addr: addr, ServerName: s.Target}) @@ -320,9 +323,9 @@ func (d *dnsResolver) lookupHost(ctx context.Context) ([]resolver.Address, error } newAddrs := make([]resolver.Address, 0, len(addrs)) for _, a := range addrs { - ip, ok := formatIP(a) - if !ok { - return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) + ip, err := formatIP(a) + if err != nil { + return nil, fmt.Errorf("dns: error parsing A record IP address %v: %v", a, err) } addr := ip + ":" + d.port newAddrs = append(newAddrs, resolver.Address{Addr: addr}) @@ -349,19 +352,19 @@ func (d *dnsResolver) lookup() (*resolver.State, error) { return &state, nil } -// formatIP returns ok = false if addr is not a valid textual representation of -// an IP address. If addr is an IPv4 address, return the addr and ok = true. +// formatIP returns an error if addr is not a valid textual representation of +// an IP address. If addr is an IPv4 address, return the addr and error = nil. // If addr is an IPv6 address, return the addr enclosed in square brackets and -// ok = true. -func formatIP(addr string) (addrIP string, ok bool) { - ip := net.ParseIP(addr) - if ip == nil { - return "", false +// error = nil. +func formatIP(addr string) (string, error) { + ip, err := netip.ParseAddr(addr) + if err != nil { + return "", err } - if ip.To4() != nil { - return addr, true + if ip.Is4() { + return addr, nil } - return "[" + addr + "]", true + return "[" + addr + "]", nil } // parseTarget takes the user input target string and default port, returns @@ -377,7 +380,7 @@ func parseTarget(target, defaultPort string) (host, port string, err error) { if target == "" { return "", "", internal.ErrMissingAddr } - if ip := net.ParseIP(target); ip != nil { + if _, err := netip.ParseAddr(target); err == nil { // target is an IPv4 or IPv6(without brackets) address return target, defaultPort, nil } @@ -425,7 +428,7 @@ func chosenByPercentage(a *int) bool { if a == nil { return true } - return rand.Intn(100)+1 <= *a + return rand.IntN(100)+1 <= *a } func canaryingSC(js string) string { diff --git a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go index afac56572ad..b901c7bace5 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go +++ b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go @@ -55,7 +55,7 @@ func (r *passthroughResolver) start() { r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}}) } -func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {} +func (*passthroughResolver) ResolveNow(resolver.ResolveNowOptions) {} func (*passthroughResolver) Close() {} diff --git a/vendor/google.golang.org/grpc/internal/stats/labels.go b/vendor/google.golang.org/grpc/internal/stats/labels.go new file mode 100644 index 00000000000..fd33af51ae8 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/stats/labels.go @@ -0,0 +1,42 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package stats provides internal stats related functionality. +package stats + +import "context" + +// Labels are the labels for metrics. +type Labels struct { + // TelemetryLabels are the telemetry labels to record. + TelemetryLabels map[string]string +} + +type labelsKey struct{} + +// GetLabels returns the Labels stored in the context, or nil if there is one. +func GetLabels(ctx context.Context) *Labels { + labels, _ := ctx.Value(labelsKey{}).(*Labels) + return labels +} + +// SetLabels sets the Labels in the context. +func SetLabels(ctx context.Context, labels *Labels) context.Context { + // could also append + return context.WithValue(ctx, labelsKey{}, labels) +} diff --git a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go new file mode 100644 index 00000000000..79044657be1 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go @@ -0,0 +1,105 @@ +/* + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package stats + +import ( + "fmt" + + estats "google.golang.org/grpc/experimental/stats" + "google.golang.org/grpc/stats" +) + +// MetricsRecorderList forwards Record calls to all of its metricsRecorders. +// +// It eats any record calls where the label values provided do not match the +// number of label keys. +type MetricsRecorderList struct { + // metricsRecorders are the metrics recorders this list will forward to. + metricsRecorders []estats.MetricsRecorder +} + +// NewMetricsRecorderList creates a new metric recorder list with all the stats +// handlers provided which implement the MetricsRecorder interface. +// If no stats handlers provided implement the MetricsRecorder interface, +// the MetricsRecorder list returned is a no-op. +func NewMetricsRecorderList(shs []stats.Handler) *MetricsRecorderList { + var mrs []estats.MetricsRecorder + for _, sh := range shs { + if mr, ok := sh.(estats.MetricsRecorder); ok { + mrs = append(mrs, mr) + } + } + return &MetricsRecorderList{ + metricsRecorders: mrs, + } +} + +func verifyLabels(desc *estats.MetricDescriptor, labelsRecv ...string) { + if got, want := len(labelsRecv), len(desc.Labels)+len(desc.OptionalLabels); got != want { + panic(fmt.Sprintf("Received %d labels in call to record metric %q, but expected %d.", got, desc.Name, want)) + } +} + +// RecordInt64Count records the measurement alongside labels on the int +// count associated with the provided handle. +func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle, incr int64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordInt64Count(handle, incr, labels...) + } +} + +// RecordFloat64Count records the measurement alongside labels on the float +// count associated with the provided handle. +func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHandle, incr float64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordFloat64Count(handle, incr, labels...) + } +} + +// RecordInt64Histo records the measurement alongside labels on the int +// histo associated with the provided handle. +func (l *MetricsRecorderList) RecordInt64Histo(handle *estats.Int64HistoHandle, incr int64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordInt64Histo(handle, incr, labels...) + } +} + +// RecordFloat64Histo records the measurement alongside labels on the float +// histo associated with the provided handle. +func (l *MetricsRecorderList) RecordFloat64Histo(handle *estats.Float64HistoHandle, incr float64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordFloat64Histo(handle, incr, labels...) + } +} + +// RecordInt64Gauge records the measurement alongside labels on the int +// gauge associated with the provided handle. +func (l *MetricsRecorderList) RecordInt64Gauge(handle *estats.Int64GaugeHandle, incr int64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordInt64Gauge(handle, incr, labels...) + } +} diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index c7dbc820595..1186f1e9a9a 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -138,17 +138,19 @@ func (s *Status) WithDetails(details ...protoadapt.MessageV1) (*Status, error) { // s.Code() != OK implies that s.Proto() != nil. p := s.Proto() for _, detail := range details { - any, err := anypb.New(protoadapt.MessageV2Of(detail)) + m, err := anypb.New(protoadapt.MessageV2Of(detail)) if err != nil { return nil, err } - p.Details = append(p.Details, any) + p.Details = append(p.Details, m) } return &Status{s: p}, nil } // Details returns a slice of details messages attached to the status. // If a detail cannot be decoded, the error is returned in place of the detail. +// If the detail can be decoded, the proto message returned is of the same +// type that was given to WithDetails(). func (s *Status) Details() []any { if s == nil || s.s == nil { return nil @@ -160,7 +162,38 @@ func (s *Status) Details() []any { details = append(details, err) continue } - details = append(details, detail) + // The call to MessageV1Of is required to unwrap the proto message if + // it implemented only the MessageV1 API. The proto message would have + // been wrapped in a V2 wrapper in Status.WithDetails. V2 messages are + // added to a global registry used by any.UnmarshalNew(). + // MessageV1Of has the following behaviour: + // 1. If the given message is a wrapped MessageV1, it returns the + // unwrapped value. + // 2. If the given message already implements MessageV1, it returns it + // as is. + // 3. Else, it wraps the MessageV2 in a MessageV1 wrapper. + // + // Since the Status.WithDetails() API only accepts MessageV1, calling + // MessageV1Of ensures we return the same type that was given to + // WithDetails: + // * If the give type implemented only MessageV1, the unwrapping from + // point 1 above will restore the type. + // * If the given type implemented both MessageV1 and MessageV2, point 2 + // above will ensure no wrapping is performed. + // * If the given type implemented only MessageV2 and was wrapped using + // MessageV1Of before passing to WithDetails(), it would be unwrapped + // in WithDetails by calling MessageV2Of(). Point 3 above will ensure + // that the type is wrapped in a MessageV1 wrapper again before + // returning. Note that protoc-gen-go doesn't generate code which + // implements ONLY MessageV2 at the time of writing. + // + // NOTE: Status details can also be added using the FromProto method. + // This could theoretically allow passing a Detail message that only + // implements the V2 API. In such a case the message will be wrapped in + // a MessageV1 wrapper when fetched using Details(). + // Since protoc-gen-go generates only code that implements both V1 and + // V2 APIs for backward compatibility, this is not a concern. + details = append(details, protoadapt.MessageV1Of(detail)) } return details } diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go index 999f52cd75b..54c24c2ff38 100644 --- a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go @@ -58,20 +58,20 @@ func GetRusage() *Rusage { // CPUTimeDiff returns the differences of user CPU time and system CPU time used // between two Rusage structs. It a no-op function for non-linux environments. -func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { +func CPUTimeDiff(*Rusage, *Rusage) (float64, float64) { log() return 0, 0 } // SetTCPUserTimeout is a no-op function under non-linux environments. -func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { +func SetTCPUserTimeout(net.Conn, time.Duration) error { log() return nil } // GetTCPUserTimeout is a no-op function under non-linux environments. // A negative return value indicates the operation is not supported -func GetTCPUserTimeout(conn net.Conn) (int, error) { +func GetTCPUserTimeout(net.Conn) (int, error) { log() return -1, nil } diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go index 078137b7fd7..7e7aaa54636 100644 --- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go @@ -44,7 +44,7 @@ func NetDialerWithTCPKeepalive() *net.Dialer { // combination of unconditionally enabling TCP keepalives here, and // disabling the overriding of TCP keepalive parameters by setting the // KeepAlive field to a negative value above, results in OS defaults for - // the TCP keealive interval and time parameters. + // the TCP keepalive interval and time parameters. Control: func(_, _ string, c syscall.RawConn) error { return c.Control(func(fd uintptr) { unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1) diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go index fd7d43a8907..d5c1085eeae 100644 --- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go @@ -44,7 +44,7 @@ func NetDialerWithTCPKeepalive() *net.Dialer { // combination of unconditionally enabling TCP keepalives here, and // disabling the overriding of TCP keepalive parameters by setting the // KeepAlive field to a negative value above, results in OS defaults for - // the TCP keealive interval and time parameters. + // the TCP keepalive interval and time parameters. Control: func(_, _ string, c syscall.RawConn) error { return c.Control(func(fd uintptr) { windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_KEEPALIVE, 1) diff --git a/vendor/google.golang.org/grpc/internal/transport/client_stream.go b/vendor/google.golang.org/grpc/internal/transport/client_stream.go new file mode 100644 index 00000000000..8ed347c5419 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/client_stream.go @@ -0,0 +1,144 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "sync/atomic" + + "golang.org/x/net/http2" + "google.golang.org/grpc/mem" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// ClientStream implements streaming functionality for a gRPC client. +type ClientStream struct { + *Stream // Embed for common stream functionality. + + ct *http2Client + done chan struct{} // closed at the end of stream to unblock writers. + doneFunc func() // invoked at the end of stream. + + headerChan chan struct{} // closed to indicate the end of header metadata. + headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. + // headerValid indicates whether a valid header was received. Only + // meaningful after headerChan is closed (always call waitOnHeader() before + // reading its value). + headerValid bool + header metadata.MD // the received header metadata + noHeaders bool // set if the client never received headers (set only after the stream is done). + + bytesReceived atomic.Bool // indicates whether any bytes have been received on this stream + unprocessed atomic.Bool // set if the server sends a refused stream or GOAWAY including this stream + + status *status.Status // the status error received from the server +} + +// Read reads an n byte message from the input stream. +func (s *ClientStream) Read(n int) (mem.BufferSlice, error) { + b, err := s.Stream.read(n) + if err == nil { + s.ct.incrMsgRecv() + } + return b, err +} + +// Close closes the stream and popagates err to any readers. +func (s *ClientStream) Close(err error) { + var ( + rst bool + rstCode http2.ErrCode + ) + if err != nil { + rst = true + rstCode = http2.ErrCodeCancel + } + s.ct.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false) +} + +// Write writes the hdr and data bytes to the output stream. +func (s *ClientStream) Write(hdr []byte, data mem.BufferSlice, opts *WriteOptions) error { + return s.ct.write(s, hdr, data, opts) +} + +// BytesReceived indicates whether any bytes have been received on this stream. +func (s *ClientStream) BytesReceived() bool { + return s.bytesReceived.Load() +} + +// Unprocessed indicates whether the server did not process this stream -- +// i.e. it sent a refused stream or GOAWAY including this stream ID. +func (s *ClientStream) Unprocessed() bool { + return s.unprocessed.Load() +} + +func (s *ClientStream) waitOnHeader() { + select { + case <-s.ctx.Done(): + // Close the stream to prevent headers/trailers from changing after + // this function returns. + s.Close(ContextErr(s.ctx.Err())) + // headerChan could possibly not be closed yet if closeStream raced + // with operateHeaders; wait until it is closed explicitly here. + <-s.headerChan + case <-s.headerChan: + } +} + +// RecvCompress returns the compression algorithm applied to the inbound +// message. It is empty string if there is no compression applied. +func (s *ClientStream) RecvCompress() string { + s.waitOnHeader() + return s.recvCompress +} + +// Done returns a channel which is closed when it receives the final status +// from the server. +func (s *ClientStream) Done() <-chan struct{} { + return s.done +} + +// Header returns the header metadata of the stream. Acquires the key-value +// pairs of header metadata once it is available. It blocks until i) the +// metadata is ready or ii) there is no header metadata or iii) the stream is +// canceled/expired. +func (s *ClientStream) Header() (metadata.MD, error) { + s.waitOnHeader() + + if !s.headerValid || s.noHeaders { + return nil, s.status.Err() + } + + return s.header.Copy(), nil +} + +// TrailersOnly blocks until a header or trailers-only frame is received and +// then returns true if the stream was trailers-only. If the stream ends +// before headers are received, returns true, nil. +func (s *ClientStream) TrailersOnly() bool { + s.waitOnHeader() + return s.noHeaders +} + +// Status returns the status received from the server. +// Status can be read safely only after the stream has ended, +// that is, after Done() is closed. +func (s *ClientStream) Status() *status.Status { + return s.status +} diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index 3deadfb4a20..ef72fbb3a01 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -32,6 +32,7 @@ import ( "golang.org/x/net/http2/hpack" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/mem" "google.golang.org/grpc/status" ) @@ -148,9 +149,9 @@ type dataFrame struct { streamID uint32 endStream bool h []byte - d []byte + reader mem.Reader // onEachWrite is called every time - // a part of d is written out. + // a part of data is written out. onEachWrite func() } @@ -289,18 +290,22 @@ func (l *outStreamList) dequeue() *outStream { } // controlBuffer is a way to pass information to loopy. -// Information is passed as specific struct types called control frames. -// A control frame not only represents data, messages or headers to be sent out -// but can also be used to instruct loopy to update its internal state. -// It shouldn't be confused with an HTTP2 frame, although some of the control frames -// like dataFrame and headerFrame do go out on wire as HTTP2 frames. +// +// Information is passed as specific struct types called control frames. A +// control frame not only represents data, messages or headers to be sent out +// but can also be used to instruct loopy to update its internal state. It +// shouldn't be confused with an HTTP2 frame, although some of the control +// frames like dataFrame and headerFrame do go out on wire as HTTP2 frames. type controlBuffer struct { - ch chan struct{} - done <-chan struct{} + wakeupCh chan struct{} // Unblocks readers waiting for something to read. + done <-chan struct{} // Closed when the transport is done. + + // Mutex guards all the fields below, except trfChan which can be read + // atomically without holding mu. mu sync.Mutex - consumerWaiting bool - list *itemList - err error + consumerWaiting bool // True when readers are blocked waiting for new data. + closed bool // True when the controlbuf is finished. + list *itemList // List of queued control frames. // transportResponseFrames counts the number of queued items that represent // the response of an action initiated by the peer. trfChan is created @@ -308,47 +313,59 @@ type controlBuffer struct { // closed and nilled when transportResponseFrames drops below the // threshold. Both fields are protected by mu. transportResponseFrames int - trfChan atomic.Value // chan struct{} + trfChan atomic.Pointer[chan struct{}] } func newControlBuffer(done <-chan struct{}) *controlBuffer { return &controlBuffer{ - ch: make(chan struct{}, 1), - list: &itemList{}, - done: done, + wakeupCh: make(chan struct{}, 1), + list: &itemList{}, + done: done, } } -// throttle blocks if there are too many incomingSettings/cleanupStreams in the -// controlbuf. +// throttle blocks if there are too many frames in the control buf that +// represent the response of an action initiated by the peer, like +// incomingSettings cleanupStreams etc. func (c *controlBuffer) throttle() { - ch, _ := c.trfChan.Load().(chan struct{}) - if ch != nil { + if ch := c.trfChan.Load(); ch != nil { select { - case <-ch: + case <-(*ch): case <-c.done: } } } +// put adds an item to the controlbuf. func (c *controlBuffer) put(it cbItem) error { _, err := c.executeAndPut(nil, it) return err } +// executeAndPut runs f, and if the return value is true, adds the given item to +// the controlbuf. The item could be nil, in which case, this method simply +// executes f and does not add the item to the controlbuf. +// +// The first return value indicates whether the item was successfully added to +// the control buffer. A non-nil error, specifically ErrConnClosing, is returned +// if the control buffer is already closed. func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) { - var wakeUp bool c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return false, c.err + defer c.mu.Unlock() + + if c.closed { + return false, ErrConnClosing } if f != nil { if !f() { // f wasn't successful - c.mu.Unlock() return false, nil } } + if it == nil { + return true, nil + } + + var wakeUp bool if c.consumerWaiting { wakeUp = true c.consumerWaiting = false @@ -359,98 +376,102 @@ func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) { if c.transportResponseFrames == maxQueuedTransportResponseFrames { // We are adding the frame that puts us over the threshold; create // a throttling channel. - c.trfChan.Store(make(chan struct{})) + ch := make(chan struct{}) + c.trfChan.Store(&ch) } } - c.mu.Unlock() if wakeUp { select { - case c.ch <- struct{}{}: + case c.wakeupCh <- struct{}{}: default: } } return true, nil } -// Note argument f should never be nil. -func (c *controlBuffer) execute(f func(it any) bool, it any) (bool, error) { - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return false, c.err - } - if !f(it) { // f wasn't successful - c.mu.Unlock() - return false, nil - } - c.mu.Unlock() - return true, nil -} - +// get returns the next control frame from the control buffer. If block is true +// **and** there are no control frames in the control buffer, the call blocks +// until one of the conditions is met: there is a frame to return or the +// transport is closed. func (c *controlBuffer) get(block bool) (any, error) { for { c.mu.Lock() - if c.err != nil { + frame, err := c.getOnceLocked() + if frame != nil || err != nil || !block { + // If we read a frame or an error, we can return to the caller. The + // call to getOnceLocked() returns a nil frame and a nil error if + // there is nothing to read, and in that case, if the caller asked + // us not to block, we can return now as well. c.mu.Unlock() - return nil, c.err - } - if !c.list.isEmpty() { - h := c.list.dequeue().(cbItem) - if h.isTransportResponseFrame() { - if c.transportResponseFrames == maxQueuedTransportResponseFrames { - // We are removing the frame that put us over the - // threshold; close and clear the throttling channel. - ch := c.trfChan.Load().(chan struct{}) - close(ch) - c.trfChan.Store((chan struct{})(nil)) - } - c.transportResponseFrames-- - } - c.mu.Unlock() - return h, nil - } - if !block { - c.mu.Unlock() - return nil, nil + return frame, err } c.consumerWaiting = true c.mu.Unlock() + + // Release the lock above and wait to be woken up. select { - case <-c.ch: + case <-c.wakeupCh: case <-c.done: return nil, errors.New("transport closed by client") } } } +// Callers must not use this method, but should instead use get(). +// +// Caller must hold c.mu. +func (c *controlBuffer) getOnceLocked() (any, error) { + if c.closed { + return false, ErrConnClosing + } + if c.list.isEmpty() { + return nil, nil + } + h := c.list.dequeue().(cbItem) + if h.isTransportResponseFrame() { + if c.transportResponseFrames == maxQueuedTransportResponseFrames { + // We are removing the frame that put us over the + // threshold; close and clear the throttling channel. + ch := c.trfChan.Swap(nil) + close(*ch) + } + c.transportResponseFrames-- + } + return h, nil +} + +// finish closes the control buffer, cleaning up any streams that have queued +// header frames. Once this method returns, no more frames can be added to the +// control buffer, and attempts to do so will return ErrConnClosing. func (c *controlBuffer) finish() { c.mu.Lock() - if c.err != nil { - c.mu.Unlock() + defer c.mu.Unlock() + + if c.closed { return } - c.err = ErrConnClosing + c.closed = true // There may be headers for streams in the control buffer. // These streams need to be cleaned out since the transport // is still not aware of these yet. for head := c.list.dequeueAll(); head != nil; head = head.next { - hdr, ok := head.it.(*headerFrame) - if !ok { - continue - } - if hdr.onOrphaned != nil { // It will be nil on the server-side. - hdr.onOrphaned(ErrConnClosing) + switch v := head.it.(type) { + case *headerFrame: + if v.onOrphaned != nil { // It will be nil on the server-side. + v.onOrphaned(ErrConnClosing) + } + case *dataFrame: + _ = v.reader.Close() } } + // In case throttle() is currently in flight, it needs to be unblocked. // Otherwise, the transport may not close, since the transport is closed by // the reader encountering the connection error. - ch, _ := c.trfChan.Load().(chan struct{}) + ch := c.trfChan.Swap(nil) if ch != nil { - close(ch) + close(*ch) } - c.trfChan.Store((chan struct{})(nil)) - c.mu.Unlock() } type side int @@ -466,7 +487,7 @@ const ( // stream maintains a queue of data frames; as loopy receives data frames // it gets added to the queue of the relevant stream. // Loopy goes over this list of active streams by processing one node every iteration, -// thereby closely resemebling to a round-robin scheduling over all streams. While +// thereby closely resembling a round-robin scheduling over all streams. While // processing a stream, loopy writes out data bytes from this stream capped by the min // of http2MaxFrameLen, connection-level flow control and stream-level flow control. type loopyWriter struct { @@ -490,12 +511,13 @@ type loopyWriter struct { draining bool conn net.Conn logger *grpclog.PrefixLogger + bufferPool mem.BufferPool // Side-specific handlers ssGoAwayHandler func(*goAway) (bool, error) } -func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error)) *loopyWriter { +func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error), bufferPool mem.BufferPool) *loopyWriter { var buf bytes.Buffer l := &loopyWriter{ side: s, @@ -511,6 +533,7 @@ func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimato conn: conn, logger: logger, ssGoAwayHandler: goAwayHandler, + bufferPool: bufferPool, } return l } @@ -768,6 +791,11 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { // not be established yet. delete(l.estdStreams, c.streamID) str.deleteSelf() + for head := str.itl.dequeueAll(); head != nil; head = head.next { + if df, ok := head.it.(*dataFrame); ok { + _ = df.reader.Close() + } + } } if c.rst { // If RST_STREAM needs to be sent. if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil { @@ -903,16 +931,18 @@ func (l *loopyWriter) processData() (bool, error) { dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. // A data item is represented by a dataFrame, since it later translates into // multiple HTTP2 data frames. - // Every dataFrame has two buffers; h that keeps grpc-message header and d that is actual data. - // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the - // maximum possible HTTP2 frame size. + // Every dataFrame has two buffers; h that keeps grpc-message header and data + // that is the actual message. As an optimization to keep wire traffic low, data + // from data is copied to h to make as big as the maximum possible HTTP2 frame + // size. - if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame + if len(dataItem.h) == 0 && dataItem.reader.Remaining() == 0 { // Empty data frame // Client sends out empty data frame with endStream = true if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil { return false, err } str.itl.dequeue() // remove the empty data item from stream + _ = dataItem.reader.Close() if str.itl.isEmpty() { str.state = empty } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers. @@ -927,9 +957,7 @@ func (l *loopyWriter) processData() (bool, error) { } return false, nil } - var ( - buf []byte - ) + // Figure out the maximum size we can send maxSize := http2MaxFrameLen if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control. @@ -943,43 +971,50 @@ func (l *loopyWriter) processData() (bool, error) { } // Compute how much of the header and data we can send within quota and max frame length hSize := min(maxSize, len(dataItem.h)) - dSize := min(maxSize-hSize, len(dataItem.d)) - if hSize != 0 { - if dSize == 0 { - buf = dataItem.h - } else { - // We can add some data to grpc message header to distribute bytes more equally across frames. - // Copy on the stack to avoid generating garbage - var localBuf [http2MaxFrameLen]byte - copy(localBuf[:hSize], dataItem.h) - copy(localBuf[hSize:], dataItem.d[:dSize]) - buf = localBuf[:hSize+dSize] - } + dSize := min(maxSize-hSize, dataItem.reader.Remaining()) + remainingBytes := len(dataItem.h) + dataItem.reader.Remaining() - hSize - dSize + size := hSize + dSize + + var buf *[]byte + + if hSize != 0 && dSize == 0 { + buf = &dataItem.h } else { - buf = dataItem.d - } + // Note: this is only necessary because the http2.Framer does not support + // partially writing a frame, so the sequence must be materialized into a buffer. + // TODO: Revisit once https://github.com/golang/go/issues/66655 is addressed. + pool := l.bufferPool + if pool == nil { + // Note that this is only supposed to be nil in tests. Otherwise, stream is + // always initialized with a BufferPool. + pool = mem.DefaultBufferPool() + } + buf = pool.Get(size) + defer pool.Put(buf) - size := hSize + dSize + copy((*buf)[:hSize], dataItem.h) + _, _ = dataItem.reader.Read((*buf)[hSize:]) + } // Now that outgoing flow controls are checked we can replenish str's write quota str.wq.replenish(size) var endStream bool // If this is the last data message on this stream and all of it can be written in this iteration. - if dataItem.endStream && len(dataItem.h)+len(dataItem.d) <= size { + if dataItem.endStream && remainingBytes == 0 { endStream = true } if dataItem.onEachWrite != nil { dataItem.onEachWrite() } - if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil { + if err := l.framer.fr.WriteData(dataItem.streamID, endStream, (*buf)[:size]); err != nil { return false, err } str.bytesOutStanding += size l.sendQuota -= uint32(size) dataItem.h = dataItem.h[hSize:] - dataItem.d = dataItem.d[dSize:] - if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out. + if remainingBytes == 0 { // All the data from that message was written out. + _ = dataItem.reader.Close() str.itl.dequeue() } if str.itl.isEmpty() { @@ -998,10 +1033,3 @@ func (l *loopyWriter) processData() (bool, error) { } return false, nil } - -func min(a, b int) int { - if a < b { - return a - } - return b -} diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go index 97198c51588..dfc0f224ec8 100644 --- a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go +++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go @@ -92,14 +92,11 @@ func (f *trInFlow) newLimit(n uint32) uint32 { func (f *trInFlow) onData(n uint32) uint32 { f.unacked += n - if f.unacked >= f.limit/4 { - w := f.unacked - f.unacked = 0 + if f.unacked < f.limit/4 { f.updateEffectiveWindowSize() - return w + return 0 } - f.updateEffectiveWindowSize() - return 0 + return f.reset() } func (f *trInFlow) reset() uint32 { diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 4a3ddce29a4..d9305a65d88 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -24,7 +24,6 @@ package transport import ( - "bytes" "context" "errors" "fmt" @@ -40,6 +39,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -50,7 +50,7 @@ import ( // NewServerHandlerTransport returns a ServerTransport handling gRPC from // inside an http.Handler, or writes an HTTP error to w and returns an error. // It requires that the http Server supports HTTP/2. -func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) { +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler, bufferPool mem.BufferPool) (ServerTransport, error) { if r.Method != http.MethodPost { w.Header().Set("Allow", http.MethodPost) msg := fmt.Sprintf("invalid gRPC request method %q", r.Method) @@ -98,6 +98,7 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s contentType: contentType, contentSubtype: contentSubtype, stats: stats, + bufferPool: bufferPool, } st.logger = prefixLoggerForServerHandlerTransport(st) @@ -171,6 +172,8 @@ type serverHandlerTransport struct { stats []stats.Handler logger *grpclog.PrefixLogger + + bufferPool mem.BufferPool } func (ht *serverHandlerTransport) Close(err error) { @@ -222,7 +225,7 @@ func (ht *serverHandlerTransport) do(fn func()) error { } } -func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error { +func (ht *serverHandlerTransport) writeStatus(s *ServerStream, st *status.Status) error { ht.writeStatusMu.Lock() defer ht.writeStatusMu.Unlock() @@ -244,6 +247,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro } s.hdrMu.Lock() + defer s.hdrMu.Unlock() if p := st.Proto(); p != nil && len(p.Details) > 0 { delete(s.trailer, grpcStatusDetailsBinHeader) stBytes, err := proto.Marshal(p) @@ -268,7 +272,6 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro } } } - s.hdrMu.Unlock() }) if err == nil { // transport has not been closed @@ -286,14 +289,14 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro // writePendingHeaders sets common and custom headers on the first // write call (Write, WriteHeader, or WriteStatus) -func (ht *serverHandlerTransport) writePendingHeaders(s *Stream) { +func (ht *serverHandlerTransport) writePendingHeaders(s *ServerStream) { ht.writeCommonHeaders(s) ht.writeCustomHeaders(s) } // writeCommonHeaders sets common headers on the first write // call (Write, WriteHeader, or WriteStatus). -func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { +func (ht *serverHandlerTransport) writeCommonHeaders(s *ServerStream) { h := ht.rw.Header() h["Date"] = nil // suppress Date to make tests happy; TODO: restore h.Set("Content-Type", ht.contentType) @@ -314,7 +317,7 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { // writeCustomHeaders sets custom headers set on the stream via SetHeader // on the first write call (Write, WriteHeader, or WriteStatus) -func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { +func (ht *serverHandlerTransport) writeCustomHeaders(s *ServerStream) { h := ht.rw.Header() s.hdrMu.Lock() @@ -330,19 +333,31 @@ func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { s.hdrMu.Unlock() } -func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { +func (ht *serverHandlerTransport) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ *WriteOptions) error { + // Always take a reference because otherwise there is no guarantee the data will + // be available after this function returns. This is what callers to Write + // expect. + data.Ref() headersWritten := s.updateHeaderSent() - return ht.do(func() { + err := ht.do(func() { + defer data.Free() if !headersWritten { ht.writePendingHeaders(s) } ht.rw.Write(hdr) - ht.rw.Write(data) + for _, b := range data { + _, _ = ht.rw.Write(b.ReadOnlyData()) + } ht.rw.(http.Flusher).Flush() }) + if err != nil { + data.Free() + return err + } + return nil } -func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { +func (ht *serverHandlerTransport) writeHeader(s *ServerStream, md metadata.MD) error { if err := s.SetHeader(md); err != nil { return err } @@ -370,7 +385,7 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { return err } -func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*Stream)) { +func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*ServerStream)) { // With this transport type there will be exactly 1 stream: this HTTP request. var cancel context.CancelFunc if ht.timeoutSet { @@ -393,20 +408,22 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream ctx = metadata.NewIncomingContext(ctx, ht.headerMD) req := ht.req - s := &Stream{ - id: 0, // irrelevant - ctx: ctx, - requestRead: func(int) {}, + s := &ServerStream{ + Stream: &Stream{ + id: 0, // irrelevant + ctx: ctx, + requestRead: func(int) {}, + buf: newRecvBuffer(), + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), + contentSubtype: ht.contentSubtype, + }, cancel: cancel, - buf: newRecvBuffer(), st: ht, - method: req.URL.Path, - recvCompress: req.Header.Get("grpc-encoding"), - contentSubtype: ht.contentSubtype, headerWireLength: 0, // won't have access to header wire length until golang/go#18997. } s.trReader = &transportReader{ - reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, + reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf}, windowHandler: func(int) {}, } @@ -415,21 +432,19 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream go func() { defer close(readerDone) - // TODO: minimize garbage, optimize recvBuffer code/ownership - const readSize = 8196 - for buf := make([]byte, readSize); ; { - n, err := req.Body.Read(buf) + for { + buf := ht.bufferPool.Get(http2MaxFrameLen) + n, err := req.Body.Read(*buf) if n > 0 { - s.buf.put(recvMsg{buffer: bytes.NewBuffer(buf[:n:n])}) - buf = buf[n:] + *buf = (*buf)[:n] + s.buf.put(recvMsg{buffer: mem.NewBuffer(buf, ht.bufferPool)}) + } else { + ht.bufferPool.Put(buf) } if err != nil { s.buf.put(recvMsg{err: mapRecvMsgError(err)}) return } - if len(buf) == 0 { - buf = make([]byte, readSize) - } } }() @@ -458,11 +473,9 @@ func (ht *serverHandlerTransport) runStream() { } } -func (ht *serverHandlerTransport) IncrMsgSent() {} - -func (ht *serverHandlerTransport) IncrMsgRecv() {} +func (ht *serverHandlerTransport) incrMsgRecv() {} -func (ht *serverHandlerTransport) Drain(debugData string) { +func (ht *serverHandlerTransport) Drain(string) { panic("Drain() is not implemented") } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 3c63c706986..f323ab7f45a 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -47,6 +47,7 @@ import ( isyscall "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/internal/transport/networktype" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" @@ -59,6 +60,8 @@ import ( // atomically. var clientConnectionCounter uint64 +var goAwayLoopyWriterTimeout = 5 * time.Second + var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool)) // http2Client implements the ClientTransport interface with HTTP2. @@ -83,9 +86,9 @@ type http2Client struct { writerDone chan struct{} // sync point to enable testing. // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor) // that the server sent GoAway on this transport. - goAway chan struct{} - - framer *framer + goAway chan struct{} + keepaliveDone chan struct{} // Closed when the keepalive goroutine exits. + framer *framer // controlBuf delivers all the control related tasks (e.g., window // updates, reset streams, and various settings) to the controller. // Do not access controlBuf with mu held. @@ -120,7 +123,7 @@ type http2Client struct { mu sync.Mutex // guard the following variables nextID uint32 state transportState - activeStreams map[uint32]*Stream + activeStreams map[uint32]*ClientStream // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. prevGoAwayID uint32 // goAwayReason records the http2.ErrCode and debug data received with the @@ -144,7 +147,7 @@ type http2Client struct { onClose func(GoAwayReason) - bufferPool *bufferPool + bufferPool mem.BufferPool connectionID uint64 logger *grpclog.PrefixLogger @@ -196,10 +199,10 @@ func isTemporary(err error) bool { return true } -// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 +// NewHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 // and starts to receive messages on it. Non-nil error returns if construction // fails. -func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ *http2Client, err error) { +func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ ClientTransport, err error) { scheme := "http" ctx, cancel := context.WithCancel(ctx) defer func() { @@ -229,7 +232,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } }(conn) - // The following defer and goroutine monitor the connectCtx for cancelation + // The following defer and goroutine monitor the connectCtx for cancellation // and deadline. On context expiration, the connection is hard closed and // this function will naturally fail as a result. Otherwise, the defer // waits for the goroutine to exit to prevent the context from being @@ -332,10 +335,11 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts readerDone: make(chan struct{}), writerDone: make(chan struct{}), goAway: make(chan struct{}), + keepaliveDone: make(chan struct{}), framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize), fc: &trInFlow{limit: uint32(icwz)}, scheme: scheme, - activeStreams: make(map[uint32]*Stream), + activeStreams: make(map[uint32]*ClientStream), isSecure: isSecure, perRPCCreds: perRPCCreds, kp: kp, @@ -346,7 +350,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts streamQuota: defaultMaxStreamsClient, streamsQuotaAvailable: make(chan struct{}, 1), keepaliveEnabled: keepaliveEnabled, - bufferPool: newBufferPool(), + bufferPool: opts.BufferPool, onClose: onClose, } var czSecurity credentials.ChannelzSecurityValue @@ -463,7 +467,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts return nil, err } go func() { - t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler) + t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool) if err := t.loopy.run(); !isIOError(err) { // Immediately close the connection, as the loopy writer returns // when there are no more active streams and we were draining (the @@ -476,17 +480,19 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts return t, nil } -func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { +func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *ClientStream { // TODO(zhaoq): Handle uint32 overflow of Stream.id. - s := &Stream{ - ct: t, - done: make(chan struct{}), - method: callHdr.Method, - sendCompress: callHdr.SendCompress, - buf: newRecvBuffer(), - headerChan: make(chan struct{}), - contentSubtype: callHdr.ContentSubtype, - doneFunc: callHdr.DoneFunc, + s := &ClientStream{ + Stream: &Stream{ + method: callHdr.Method, + sendCompress: callHdr.SendCompress, + buf: newRecvBuffer(), + contentSubtype: callHdr.ContentSubtype, + }, + ct: t, + done: make(chan struct{}), + headerChan: make(chan struct{}), + doneFunc: callHdr.DoneFunc, } s.wq = newWriteQuota(defaultWriteQuota, s.done) s.requestRead = func(n int) { @@ -502,9 +508,8 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { ctxDone: s.ctx.Done(), recv: s.buf, closeStream: func(err error) { - t.CloseStream(s, err) + s.Close(err) }, - freeBuffer: t.bufferPool.put, }, windowHandler: func(n int) { t.updateWindow(s, uint32(n)) @@ -525,8 +530,9 @@ func (t *http2Client) getPeer() *peer.Peer { // to be the last frame loopy writes to the transport. func (t *http2Client) outgoingGoAwayHandler(g *goAway) (bool, error) { t.mu.Lock() - defer t.mu.Unlock() - if err := t.framer.fr.WriteGoAway(t.nextID-2, http2.ErrCodeNo, g.debugData); err != nil { + maxStreamID := t.nextID - 2 + t.mu.Unlock() + if err := t.framer.fr.WriteGoAway(maxStreamID, http2.ErrCodeNo, g.debugData); err != nil { return false, err } return false, g.closeConn @@ -593,12 +599,6 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) for k, v := range callAuthData { headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } - if b := stats.OutgoingTags(ctx); b != nil { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)}) - } - if b := stats.OutgoingTrace(ctx); b != nil { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) - } if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok { var k string @@ -734,7 +734,7 @@ func (e NewStreamError) Error() string { // NewStream creates a stream and registers it into the transport as "active" // streams. All non-nil errors returned will be *NewStreamError. -func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) { +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientStream, error) { ctx = peer.NewContext(ctx, t.getPeer()) // ServerName field of the resolver returned address takes precedence over @@ -759,7 +759,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return } // The stream was unprocessed by the server. - atomic.StoreUint32(&s.unprocessed, 1) + s.unprocessed.Store(true) s.write(recvMsg{err: err}) close(s.done) // If headerChan isn't closed, then close it. @@ -770,7 +770,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, hdr := &headerFrame{ hf: headerFields, endStream: false, - initStream: func(id uint32) error { + initStream: func(uint32) error { t.mu.Lock() // TODO: handle transport closure in loopy instead and remove this // initStream is never called when transport is draining. @@ -904,21 +904,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return s, nil } -// CloseStream clears the footprint of a stream when the stream is not needed any more. -// This must not be executed in reader's goroutine. -func (t *http2Client) CloseStream(s *Stream, err error) { - var ( - rst bool - rstCode http2.ErrCode - ) - if err != nil { - rst = true - rstCode = http2.ErrCodeCancel - } - t.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false) -} - -func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { +func (t *http2Client) closeStream(s *ClientStream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { // Set stream status to done. if s.swapState(streamDone) == streamDone { // If it was already done, return. If multiple closeStream calls @@ -983,6 +969,7 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. // only once on a transport. Once it is called, the transport should not be // accessed anymore. func (t *http2Client) Close(err error) { + t.conn.SetWriteDeadline(time.Now().Add(time.Second * 10)) t.mu.Lock() // Make sure we only close once. if t.state == closing { @@ -1005,18 +992,33 @@ func (t *http2Client) Close(err error) { // should unblock it so that the goroutine eventually exits. t.kpDormancyCond.Signal() } + // Append info about previous goaways if there were any, since this may be important + // for understanding the root cause for this connection to be closed. + goAwayDebugMessage := t.goAwayDebugMessage t.mu.Unlock() + // Per HTTP/2 spec, a GOAWAY frame must be sent before closing the - // connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY. + // connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY. It + // also waits for loopyWriter to be closed with a timer to avoid the + // long blocking in case the connection is blackholed, i.e. TCP is + // just stuck. t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte("client transport shutdown"), closeConn: err}) - <-t.writerDone + timer := time.NewTimer(goAwayLoopyWriterTimeout) + defer timer.Stop() + select { + case <-t.writerDone: // success + case <-timer.C: + t.logger.Infof("Failed to write a GOAWAY frame as part of connection close after %s. Giving up and closing the transport.", goAwayLoopyWriterTimeout) + } t.cancel() t.conn.Close() + // Waits for the reader and keepalive goroutines to exit before returning to + // ensure all resources are cleaned up before Close can return. + <-t.readerDone + if t.keepaliveEnabled { + <-t.keepaliveDone + } channelz.RemoveEntry(t.channelz.ID) - // Append info about previous goaways if there were any, since this may be important - // for understanding the root cause for this connection to be closed. - _, goAwayDebugMessage := t.GetGoAwayReason() - var st *status.Status if len(goAwayDebugMessage) > 0 { st = status.Newf(codes.Unavailable, "closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage) @@ -1065,30 +1067,40 @@ func (t *http2Client) GracefulClose() { // Write formats the data into HTTP2 data frame(s) and sends it out. The caller // should proceed only if Write returns nil. -func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { +func (t *http2Client) write(s *ClientStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error { + reader := data.Reader() + if opts.Last { // If it's the last message, update stream state. if !s.compareAndSwapState(streamActive, streamWriteDone) { + _ = reader.Close() return errStreamDone } } else if s.getState() != streamActive { + _ = reader.Close() return errStreamDone } df := &dataFrame{ streamID: s.id, endStream: opts.Last, h: hdr, - d: data, + reader: reader, } - if hdr != nil || data != nil { // If it's not an empty data frame, check quota. - if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + if hdr != nil || df.reader.Remaining() != 0 { // If it's not an empty data frame, check quota. + if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil { + _ = reader.Close() return err } } - return t.controlBuf.put(df) + if err := t.controlBuf.put(df); err != nil { + _ = reader.Close() + return err + } + t.incrMsgSent() + return nil } -func (t *http2Client) getStream(f http2.Frame) *Stream { +func (t *http2Client) getStream(f http2.Frame) *ClientStream { t.mu.Lock() s := t.activeStreams[f.Header().StreamID] t.mu.Unlock() @@ -1098,7 +1110,7 @@ func (t *http2Client) getStream(f http2.Frame) *Stream { // adjustWindow sends out extra window update over the initial window size // of stream if the application is requesting data larger in size than // the window. -func (t *http2Client) adjustWindow(s *Stream, n uint32) { +func (t *http2Client) adjustWindow(s *ClientStream, n uint32) { if w := s.fc.maybeAdjust(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) } @@ -1107,7 +1119,7 @@ func (t *http2Client) adjustWindow(s *Stream, n uint32) { // updateWindow adjusts the inbound quota for the stream. // Window updates will be sent out when the cumulative quota // exceeds the corresponding threshold. -func (t *http2Client) updateWindow(s *Stream, n uint32) { +func (t *http2Client) updateWindow(s *ClientStream, n uint32) { if w := s.fc.onRead(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) } @@ -1190,10 +1202,13 @@ func (t *http2Client) handleData(f *http2.DataFrame) { // guarantee f.Data() is consumed before the arrival of next frame. // Can this copy be eliminated? if len(f.Data()) > 0 { - buffer := t.bufferPool.get() - buffer.Reset() - buffer.Write(f.Data()) - s.write(recvMsg{buffer: buffer}) + pool := t.bufferPool + if pool == nil { + // Note that this is only supposed to be nil in tests. Otherwise, stream is + // always initialized with a BufferPool. + pool = mem.DefaultBufferPool() + } + s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)}) } } // The server has closed the stream without sending trailers. Record that @@ -1210,7 +1225,7 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { } if f.ErrCode == http2.ErrCodeRefusedStream { // The stream was unprocessed by the server. - atomic.StoreUint32(&s.unprocessed, 1) + s.unprocessed.Store(true) } statusCode, ok := http2ErrConvTab[f.ErrCode] if !ok { @@ -1222,7 +1237,7 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { if statusCode == codes.Canceled { if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) { // Our deadline was already exceeded, and that was likely the cause - // of this cancelation. Alter the status code accordingly. + // of this cancellation. Alter the status code accordingly. statusCode = codes.DeadlineExceeded } } @@ -1291,11 +1306,11 @@ func (t *http2Client) handlePing(f *http2.PingFrame) { t.controlBuf.put(pingAck) } -func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { +func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) error { t.mu.Lock() if t.state == closing { t.mu.Unlock() - return + return nil } if f.ErrCode == http2.ErrCodeEnhanceYourCalm && string(f.DebugData()) == "too_many_pings" { // When a client receives a GOAWAY with error code ENHANCE_YOUR_CALM and debug @@ -1307,8 +1322,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { id := f.LastStreamID if id > 0 && id%2 == 0 { t.mu.Unlock() - t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered numbered stream id: %v", id)) - return + return connectionErrorf(true, nil, "received goaway with non-zero even-numbered stream id: %v", id) } // A client can receive multiple GoAways from the server (see // https://github.com/grpc/grpc-go/issues/1387). The idea is that the first @@ -1325,8 +1339,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { // If there are multiple GoAways the first one should always have an ID greater than the following ones. if id > t.prevGoAwayID { t.mu.Unlock() - t.Close(connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID)) - return + return connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID) } default: t.setGoAwayReason(f) @@ -1350,15 +1363,14 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { t.prevGoAwayID = id if len(t.activeStreams) == 0 { t.mu.Unlock() - t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams")) - return + return connectionErrorf(true, nil, "received goaway and there are no active streams") } - streamsToClose := make([]*Stream, 0) + streamsToClose := make([]*ClientStream, 0) for streamID, stream := range t.activeStreams { if streamID > id && streamID <= upperLimit { // The stream was unprocessed by the server. - atomic.StoreUint32(&stream.unprocessed, 1) + stream.unprocessed.Store(true) streamsToClose = append(streamsToClose, stream) } } @@ -1368,6 +1380,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { for _, stream := range streamsToClose { t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) } + return nil } // setGoAwayReason sets the value of t.goAwayReason based @@ -1409,7 +1422,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } endStream := frame.StreamEnded() - atomic.StoreUint32(&s.bytesReceived, 1) + s.bytesReceived.Store(true) initialHeader := atomic.LoadUint32(&s.headerChanClosed) == 0 if !initialHeader && !endStream { @@ -1603,7 +1616,13 @@ func (t *http2Client) readServerPreface() error { // network connection. If the server preface is not read successfully, an // error is pushed to errCh; otherwise errCh is closed with no error. func (t *http2Client) reader(errCh chan<- error) { - defer close(t.readerDone) + var errClose error + defer func() { + close(t.readerDone) + if errClose != nil { + t.Close(errClose) + } + }() if err := t.readServerPreface(); err != nil { errCh <- err @@ -1642,11 +1661,10 @@ func (t *http2Client) reader(errCh chan<- error) { t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false) } continue - } else { - // Transport error. - t.Close(connectionErrorf(true, err, "error reading from server: %v", err)) - return } + // Transport error. + errClose = connectionErrorf(true, err, "error reading from server: %v", err) + return } switch frame := frame.(type) { case *http2.MetaHeadersFrame: @@ -1660,7 +1678,7 @@ func (t *http2Client) reader(errCh chan<- error) { case *http2.PingFrame: t.handlePing(frame) case *http2.GoAwayFrame: - t.handleGoAway(frame) + errClose = t.handleGoAway(frame) case *http2.WindowUpdateFrame: t.handleWindowUpdate(frame) default: @@ -1671,15 +1689,15 @@ func (t *http2Client) reader(errCh chan<- error) { } } -func minTime(a, b time.Duration) time.Duration { - if a < b { - return a - } - return b -} - // keepalive running in a separate goroutine makes sure the connection is alive by sending pings. func (t *http2Client) keepalive() { + var err error + defer func() { + close(t.keepaliveDone) + if err != nil { + t.Close(err) + } + }() p := &ping{data: [8]byte{}} // True iff a ping has been sent, and no data has been received since then. outstandingPing := false @@ -1703,7 +1721,7 @@ func (t *http2Client) keepalive() { continue } if outstandingPing && timeoutLeft <= 0 { - t.Close(connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout")) + err = connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout") return } t.mu.Lock() @@ -1745,7 +1763,7 @@ func (t *http2Client) keepalive() { // timeoutLeft. This will ensure that we wait only for kp.Time // before sending out the next ping (for cases where the ping is // acked). - sleepDuration := minTime(t.kp.Time, timeoutLeft) + sleepDuration := min(t.kp.Time, timeoutLeft) timeoutLeft -= sleepDuration timer.Reset(sleepDuration) case <-t.ctx.Done(): @@ -1774,14 +1792,18 @@ func (t *http2Client) socketMetrics() *channelz.EphemeralSocketMetrics { func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr } -func (t *http2Client) IncrMsgSent() { - t.channelz.SocketMetrics.MessagesSent.Add(1) - t.channelz.SocketMetrics.LastMessageSentTimestamp.Store(time.Now().UnixNano()) +func (t *http2Client) incrMsgSent() { + if channelz.IsOn() { + t.channelz.SocketMetrics.MessagesSent.Add(1) + t.channelz.SocketMetrics.LastMessageSentTimestamp.Store(time.Now().UnixNano()) + } } -func (t *http2Client) IncrMsgRecv() { - t.channelz.SocketMetrics.MessagesReceived.Add(1) - t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Store(time.Now().UnixNano()) +func (t *http2Client) incrMsgRecv() { + if channelz.IsOn() { + t.channelz.SocketMetrics.MessagesReceived.Add(1) + t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Store(time.Now().UnixNano()) + } } func (t *http2Client) getOutFlowWindow() int64 { diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index b7091165b50..0055fddd7ec 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -25,7 +25,7 @@ import ( "fmt" "io" "math" - "math/rand" + rand "math/rand/v2" "net" "net/http" "strconv" @@ -39,6 +39,7 @@ import ( "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/syscall" + "google.golang.org/grpc/mem" "google.golang.org/protobuf/proto" "google.golang.org/grpc/codes" @@ -110,7 +111,7 @@ type http2Server struct { // already initialized since draining is already underway. drainEvent *grpcsync.Event state transportState - activeStreams map[uint32]*Stream + activeStreams map[uint32]*ServerStream // idle is the time instant when the connection went idle. // This is either the beginning of the connection or when the number of // RPCs go down to 0. @@ -119,7 +120,7 @@ type http2Server struct { // Fields below are for channelz metric collection. channelz *channelz.Socket - bufferPool *bufferPool + bufferPool mem.BufferPool connectionID uint64 @@ -255,13 +256,13 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, inTapHandle: config.InTapHandle, fc: &trInFlow{limit: uint32(icwz)}, state: reachable, - activeStreams: make(map[uint32]*Stream), + activeStreams: make(map[uint32]*ServerStream), stats: config.StatsHandlers, kp: kp, idle: time.Now(), kep: kep, initialWindowSize: iwz, - bufferPool: newBufferPool(), + bufferPool: config.BufferPool, } var czSecurity credentials.ChannelzSecurityValue if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok { @@ -330,7 +331,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, t.handleSettings(sf) go func() { - t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler) + t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool) err := t.loopy.run() close(t.loopyWriterDone) if !isIOError(err) { @@ -358,7 +359,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, // operateHeaders takes action on the decoded headers. Returns an error if fatal // error encountered and transport needs to close, otherwise returns nil. -func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*Stream)) error { +func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*ServerStream)) error { // Acquire max stream ID lock for entire duration t.maxStreamMu.Lock() defer t.maxStreamMu.Unlock() @@ -384,11 +385,13 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade t.maxStreamID = streamID buf := newRecvBuffer() - s := &Stream{ - id: streamID, + s := &ServerStream{ + Stream: &Stream{ + id: streamID, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + }, st: t, - buf: buf, - fc: &inFlow{limit: uint32(t.initialWindowSize)}, headerWireLength: int(frame.Header().Length), } var ( @@ -536,12 +539,6 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade // Attach the received metadata to the context. if len(mdata) > 0 { s.ctx = metadata.NewIncomingContext(s.ctx, mdata) - if statsTags := mdata["grpc-tags-bin"]; len(statsTags) > 0 { - s.ctx = stats.SetIncomingTags(s.ctx, []byte(statsTags[len(statsTags)-1])) - } - if statsTrace := mdata["grpc-trace-bin"]; len(statsTrace) > 0 { - s.ctx = stats.SetIncomingTrace(s.ctx, []byte(statsTrace[len(statsTrace)-1])) - } } t.mu.Lock() if t.state != reachable { @@ -613,10 +610,9 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) s.trReader = &transportReader{ reader: &recvBufferReader{ - ctx: s.ctx, - ctxDone: s.ctxDone, - recv: s.buf, - freeBuffer: t.bufferPool.put, + ctx: s.ctx, + ctxDone: s.ctxDone, + recv: s.buf, }, windowHandler: func(n int) { t.updateWindow(s, uint32(n)) @@ -634,7 +630,7 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade // HandleStreams receives incoming streams using the given handler. This is // typically run in a separate goroutine. // traceCtx attaches trace to ctx and returns the new context. -func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) { +func (t *http2Server) HandleStreams(ctx context.Context, handle func(*ServerStream)) { defer func() { close(t.readerDone) <-t.loopyWriterDone @@ -698,7 +694,7 @@ func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) { } } -func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { +func (t *http2Server) getStream(f http2.Frame) (*ServerStream, bool) { t.mu.Lock() defer t.mu.Unlock() if t.activeStreams == nil { @@ -716,7 +712,7 @@ func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { // adjustWindow sends out extra window update over the initial window size // of stream if the application is requesting data larger in size than // the window. -func (t *http2Server) adjustWindow(s *Stream, n uint32) { +func (t *http2Server) adjustWindow(s *ServerStream, n uint32) { if w := s.fc.maybeAdjust(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) } @@ -726,7 +722,7 @@ func (t *http2Server) adjustWindow(s *Stream, n uint32) { // updateWindow adjusts the inbound quota for the stream and the transport. // Window updates will deliver to the controller for sending when // the cumulative quota exceeds the corresponding threshold. -func (t *http2Server) updateWindow(s *Stream, n uint32) { +func (t *http2Server) updateWindow(s *ServerStream, n uint32) { if w := s.fc.onRead(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w, @@ -813,10 +809,13 @@ func (t *http2Server) handleData(f *http2.DataFrame) { // guarantee f.Data() is consumed before the arrival of next frame. // Can this copy be eliminated? if len(f.Data()) > 0 { - buffer := t.bufferPool.get() - buffer.Reset() - buffer.Write(f.Data()) - s.write(recvMsg{buffer: buffer}) + pool := t.bufferPool + if pool == nil { + // Note that this is only supposed to be nil in tests. Otherwise, stream is + // always initialized with a BufferPool. + pool = mem.DefaultBufferPool() + } + s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)}) } } if f.StreamEnded() { @@ -960,7 +959,7 @@ func (t *http2Server) checkForHeaderListSize(it any) bool { return true } -func (t *http2Server) streamContextErr(s *Stream) error { +func (t *http2Server) streamContextErr(s *ServerStream) error { select { case <-t.done: return ErrConnClosing @@ -970,7 +969,7 @@ func (t *http2Server) streamContextErr(s *Stream) error { } // WriteHeader sends the header metadata md back to the client. -func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { +func (t *http2Server) writeHeader(s *ServerStream, md metadata.MD) error { s.hdrMu.Lock() defer s.hdrMu.Unlock() if s.getState() == streamDone { @@ -1003,7 +1002,7 @@ func (t *http2Server) setResetPingStrikes() { atomic.StoreUint32(&t.resetPingStrikes, 1) } -func (t *http2Server) writeHeaderLocked(s *Stream) error { +func (t *http2Server) writeHeaderLocked(s *ServerStream) error { // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields // first and create a slice of that exact size. headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else. @@ -1043,7 +1042,7 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { // There is no further I/O operations being able to perform on this stream. // TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early // OK is adopted. -func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { +func (t *http2Server) writeStatus(s *ServerStream, st *status.Status) error { s.hdrMu.Lock() defer s.hdrMu.Unlock() @@ -1089,7 +1088,9 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { onWrite: t.setResetPingStrikes, } - success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader) + success, err := t.controlBuf.executeAndPut(func() bool { + return t.checkForHeaderListSize(trailingHeader) + }, nil) if !success { if err != nil { return err @@ -1112,27 +1113,38 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { // Write converts the data into HTTP2 data frame and sends it out. Non-nil error // is returns if it fails (e.g., framing error, transport error). -func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { +func (t *http2Server) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ *WriteOptions) error { + reader := data.Reader() + if !s.isHeaderSent() { // Headers haven't been written yet. - if err := t.WriteHeader(s, nil); err != nil { + if err := t.writeHeader(s, nil); err != nil { + _ = reader.Close() return err } } else { // Writing headers checks for this condition. if s.getState() == streamDone { + _ = reader.Close() return t.streamContextErr(s) } } + df := &dataFrame{ streamID: s.id, h: hdr, - d: data, + reader: reader, onEachWrite: t.setResetPingStrikes, } - if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil { + _ = reader.Close() return t.streamContextErr(s) } - return t.controlBuf.put(df) + if err := t.controlBuf.put(df); err != nil { + _ = reader.Close() + return err + } + t.incrMsgSent() + return nil } // keepalive running in a separate goroutine does the following: @@ -1223,7 +1235,7 @@ func (t *http2Server) keepalive() { // timeoutLeft. This will ensure that we wait only for kp.Time // before sending out the next ping (for cases where the ping is // acked). - sleepDuration := minTime(t.kp.Time, kpTimeoutLeft) + sleepDuration := min(t.kp.Time, kpTimeoutLeft) kpTimeoutLeft -= sleepDuration kpTimer.Reset(sleepDuration) case <-t.done: @@ -1261,7 +1273,7 @@ func (t *http2Server) Close(err error) { } // deleteStream deletes the stream s from transport's active streams. -func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { +func (t *http2Server) deleteStream(s *ServerStream, eosReceived bool) { t.mu.Lock() if _, ok := t.activeStreams[s.id]; ok { @@ -1282,7 +1294,7 @@ func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { } // finishStream closes the stream and puts the trailing headerFrame into controlbuf. -func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { +func (t *http2Server) finishStream(s *ServerStream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { // In case stream sending and receiving are invoked in separate // goroutines (e.g., bi-directional streaming), cancel needs to be // called to interrupt the potential blocking on other goroutines. @@ -1306,7 +1318,7 @@ func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, h } // closeStream clears the footprint of a stream when the stream is not needed any more. -func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { +func (t *http2Server) closeStream(s *ServerStream, rst bool, rstCode http2.ErrCode, eosReceived bool) { // In case stream sending and receiving are invoked in separate // goroutines (e.g., bi-directional streaming), cancel needs to be // called to interrupt the potential blocking on other goroutines. @@ -1400,14 +1412,18 @@ func (t *http2Server) socketMetrics() *channelz.EphemeralSocketMetrics { } } -func (t *http2Server) IncrMsgSent() { - t.channelz.SocketMetrics.MessagesSent.Add(1) - t.channelz.SocketMetrics.LastMessageSentTimestamp.Add(1) +func (t *http2Server) incrMsgSent() { + if channelz.IsOn() { + t.channelz.SocketMetrics.MessagesSent.Add(1) + t.channelz.SocketMetrics.LastMessageSentTimestamp.Add(1) + } } -func (t *http2Server) IncrMsgRecv() { - t.channelz.SocketMetrics.MessagesReceived.Add(1) - t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Add(1) +func (t *http2Server) incrMsgRecv() { + if channelz.IsOn() { + t.channelz.SocketMetrics.MessagesReceived.Add(1) + t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Add(1) + } } func (t *http2Server) getOutFlowWindow() int64 { @@ -1440,7 +1456,7 @@ func getJitter(v time.Duration) time.Duration { } // Generate a jitter between +/- 10% of the value. r := int64(v / 10) - j := rand.Int63n(2*r) - r + j := rand.Int64N(2*r) - r return time.Duration(j) } diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index 39cef3bd442..3613d7b6481 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -317,28 +317,32 @@ func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter { return w } -func (w *bufWriter) Write(b []byte) (n int, err error) { +func (w *bufWriter) Write(b []byte) (int, error) { if w.err != nil { return 0, w.err } if w.batchSize == 0 { // Buffer has been disabled. - n, err = w.conn.Write(b) + n, err := w.conn.Write(b) return n, toIOError(err) } if w.buf == nil { b := w.pool.Get().(*[]byte) w.buf = *b } + written := 0 for len(b) > 0 { - nn := copy(w.buf[w.offset:], b) - b = b[nn:] - w.offset += nn - n += nn - if w.offset >= w.batchSize { - err = w.flushKeepBuffer() + copied := copy(w.buf[w.offset:], b) + b = b[copied:] + written += copied + w.offset += copied + if w.offset < w.batchSize { + continue + } + if err := w.flushKeepBuffer(); err != nil { + return written, err } } - return n, err + return written, nil } func (w *bufWriter) Flush() error { @@ -389,7 +393,7 @@ type framer struct { fr *http2.Framer } -var writeBufferPoolMap map[int]*sync.Pool = make(map[int]*sync.Pool) +var writeBufferPoolMap = make(map[int]*sync.Pool) var writeBufferMutex sync.Mutex func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer { diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go index 24fa1032574..54b22443654 100644 --- a/vendor/google.golang.org/grpc/internal/transport/proxy.go +++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go @@ -107,8 +107,14 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri } return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump) } - - return &bufConn{Conn: conn, r: r}, nil + // The buffer could contain extra bytes from the target server, so we can't + // discard it. However, in many cases where the server waits for the client + // to send the first message (e.g. when TLS is being used), the buffer will + // be empty, so we can avoid the overhead of reading through this buffer. + if r.Buffered() != 0 { + return &bufConn{Conn: conn, r: r}, nil + } + return conn, nil } // proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy diff --git a/vendor/google.golang.org/grpc/internal/transport/server_stream.go b/vendor/google.golang.org/grpc/internal/transport/server_stream.go new file mode 100644 index 00000000000..a22a9015149 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/server_stream.go @@ -0,0 +1,178 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "context" + "errors" + "strings" + "sync" + "sync/atomic" + + "google.golang.org/grpc/mem" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// ServerStream implements streaming functionality for a gRPC server. +type ServerStream struct { + *Stream // Embed for common stream functionality. + + st internalServerTransport + ctxDone <-chan struct{} // closed at the end of stream. Cache of ctx.Done() (for performance) + cancel context.CancelFunc // invoked at the end of stream to cancel ctx. + + // Holds compressor names passed in grpc-accept-encoding metadata from the + // client. + clientAdvertisedCompressors string + headerWireLength int + + // hdrMu protects outgoing header and trailer metadata. + hdrMu sync.Mutex + header metadata.MD // the outgoing header metadata. Updated by WriteHeader. + headerSent atomic.Bool // atomically set when the headers are sent out. +} + +// Read reads an n byte message from the input stream. +func (s *ServerStream) Read(n int) (mem.BufferSlice, error) { + b, err := s.Stream.read(n) + if err == nil { + s.st.incrMsgRecv() + } + return b, err +} + +// SendHeader sends the header metadata for the given stream. +func (s *ServerStream) SendHeader(md metadata.MD) error { + return s.st.writeHeader(s, md) +} + +// Write writes the hdr and data bytes to the output stream. +func (s *ServerStream) Write(hdr []byte, data mem.BufferSlice, opts *WriteOptions) error { + return s.st.write(s, hdr, data, opts) +} + +// WriteStatus sends the status of a stream to the client. WriteStatus is +// the final call made on a stream and always occurs. +func (s *ServerStream) WriteStatus(st *status.Status) error { + return s.st.writeStatus(s, st) +} + +// isHeaderSent indicates whether headers have been sent. +func (s *ServerStream) isHeaderSent() bool { + return s.headerSent.Load() +} + +// updateHeaderSent updates headerSent and returns true +// if it was already set. +func (s *ServerStream) updateHeaderSent() bool { + return s.headerSent.Swap(true) +} + +// RecvCompress returns the compression algorithm applied to the inbound +// message. It is empty string if there is no compression applied. +func (s *ServerStream) RecvCompress() string { + return s.recvCompress +} + +// SendCompress returns the send compressor name. +func (s *ServerStream) SendCompress() string { + return s.sendCompress +} + +// ContentSubtype returns the content-subtype for a request. For example, a +// content-subtype of "proto" will result in a content-type of +// "application/grpc+proto". This will always be lowercase. See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +func (s *ServerStream) ContentSubtype() string { + return s.contentSubtype +} + +// SetSendCompress sets the compression algorithm to the stream. +func (s *ServerStream) SetSendCompress(name string) error { + if s.isHeaderSent() || s.getState() == streamDone { + return errors.New("transport: set send compressor called after headers sent or stream done") + } + + s.sendCompress = name + return nil +} + +// SetContext sets the context of the stream. This will be deleted once the +// stats handler callouts all move to gRPC layer. +func (s *ServerStream) SetContext(ctx context.Context) { + s.ctx = ctx +} + +// ClientAdvertisedCompressors returns the compressor names advertised by the +// client via grpc-accept-encoding header. +func (s *ServerStream) ClientAdvertisedCompressors() []string { + values := strings.Split(s.clientAdvertisedCompressors, ",") + for i, v := range values { + values[i] = strings.TrimSpace(v) + } + return values +} + +// Header returns the header metadata of the stream. It returns the out header +// after t.WriteHeader is called. It does not block and must not be called +// until after WriteHeader. +func (s *ServerStream) Header() (metadata.MD, error) { + // Return the header in stream. It will be the out + // header after t.WriteHeader is called. + return s.header.Copy(), nil +} + +// HeaderWireLength returns the size of the headers of the stream as received +// from the wire. +func (s *ServerStream) HeaderWireLength() int { + return s.headerWireLength +} + +// SetHeader sets the header metadata. This can be called multiple times. +// This should not be called in parallel to other data writes. +func (s *ServerStream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.isHeaderSent() || s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.header = metadata.Join(s.header, md) + s.hdrMu.Unlock() + return nil +} + +// SetTrailer sets the trailer metadata which will be sent with the RPC status +// by the server. This can be called multiple times. +// This should not be called parallel to other data writes. +func (s *ServerStream) SetTrailer(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.trailer = metadata.Join(s.trailer, md) + s.hdrMu.Unlock() + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 4b39c0ade97..2859b87755f 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -22,13 +22,11 @@ package transport import ( - "bytes" "context" "errors" "fmt" "io" "net" - "strings" "sync" "sync/atomic" "time" @@ -37,9 +35,9 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" - "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" "google.golang.org/grpc/tap" @@ -47,32 +45,10 @@ import ( const logLevel = 2 -type bufferPool struct { - pool sync.Pool -} - -func newBufferPool() *bufferPool { - return &bufferPool{ - pool: sync.Pool{ - New: func() any { - return new(bytes.Buffer) - }, - }, - } -} - -func (p *bufferPool) get() *bytes.Buffer { - return p.pool.Get().(*bytes.Buffer) -} - -func (p *bufferPool) put(b *bytes.Buffer) { - p.pool.Put(b) -} - // recvMsg represents the received msg from the transport. All transport // protocol specific info has been removed. type recvMsg struct { - buffer *bytes.Buffer + buffer mem.Buffer // nil: received some data // io.EOF: stream is completed. data is nil. // other non-nil error: transport failure. data is nil. @@ -102,6 +78,9 @@ func newRecvBuffer() *recvBuffer { func (b *recvBuffer) put(r recvMsg) { b.mu.Lock() if b.err != nil { + // drop the buffer on the floor. Since b.err is not nil, any subsequent reads + // will always return an error, making this buffer inaccessible. + r.buffer.Free() b.mu.Unlock() // An error had occurred earlier, don't accept more // data or errors. @@ -148,45 +127,70 @@ type recvBufferReader struct { ctx context.Context ctxDone <-chan struct{} // cache of ctx.Done() (for performance). recv *recvBuffer - last *bytes.Buffer // Stores the remaining data in the previous calls. + last mem.Buffer // Stores the remaining data in the previous calls. err error - freeBuffer func(*bytes.Buffer) } -// Read reads the next len(p) bytes from last. If last is drained, it tries to -// read additional data from recv. It blocks if there no additional data available -// in recv. If Read returns any non-nil error, it will continue to return that error. -func (r *recvBufferReader) Read(p []byte) (n int, err error) { +func (r *recvBufferReader) ReadMessageHeader(header []byte) (n int, err error) { if r.err != nil { return 0, r.err } if r.last != nil { - // Read remaining data left in last call. - copied, _ := r.last.Read(p) - if r.last.Len() == 0 { - r.freeBuffer(r.last) + n, r.last = mem.ReadUnsafe(header, r.last) + return n, nil + } + if r.closeStream != nil { + n, r.err = r.readMessageHeaderClient(header) + } else { + n, r.err = r.readMessageHeader(header) + } + return n, r.err +} + +// Read reads the next n bytes from last. If last is drained, it tries to read +// additional data from recv. It blocks if there no additional data available in +// recv. If Read returns any non-nil error, it will continue to return that +// error. +func (r *recvBufferReader) Read(n int) (buf mem.Buffer, err error) { + if r.err != nil { + return nil, r.err + } + if r.last != nil { + buf = r.last + if r.last.Len() > n { + buf, r.last = mem.SplitUnsafe(buf, n) + } else { r.last = nil } - return copied, nil + return buf, nil } if r.closeStream != nil { - n, r.err = r.readClient(p) + buf, r.err = r.readClient(n) } else { - n, r.err = r.read(p) + buf, r.err = r.read(n) } - return n, r.err + return buf, r.err } -func (r *recvBufferReader) read(p []byte) (n int, err error) { +func (r *recvBufferReader) readMessageHeader(header []byte) (n int, err error) { select { case <-r.ctxDone: return 0, ContextErr(r.ctx.Err()) case m := <-r.recv.get(): - return r.readAdditional(m, p) + return r.readMessageHeaderAdditional(m, header) } } -func (r *recvBufferReader) readClient(p []byte) (n int, err error) { +func (r *recvBufferReader) read(n int) (buf mem.Buffer, err error) { + select { + case <-r.ctxDone: + return nil, ContextErr(r.ctx.Err()) + case m := <-r.recv.get(): + return r.readAdditional(m, n) + } +} + +func (r *recvBufferReader) readMessageHeaderClient(header []byte) (n int, err error) { // If the context is canceled, then closes the stream with nil metadata. // closeStream writes its error parameter to r.recv as a recvMsg. // r.readAdditional acts on that message and returns the necessary error. @@ -207,25 +211,67 @@ func (r *recvBufferReader) readClient(p []byte) (n int, err error) { // faster. r.closeStream(ContextErr(r.ctx.Err())) m := <-r.recv.get() - return r.readAdditional(m, p) + return r.readMessageHeaderAdditional(m, header) case m := <-r.recv.get(): - return r.readAdditional(m, p) + return r.readMessageHeaderAdditional(m, header) } } -func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error) { +func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) { + // If the context is canceled, then closes the stream with nil metadata. + // closeStream writes its error parameter to r.recv as a recvMsg. + // r.readAdditional acts on that message and returns the necessary error. + select { + case <-r.ctxDone: + // Note that this adds the ctx error to the end of recv buffer, and + // reads from the head. This will delay the error until recv buffer is + // empty, thus will delay ctx cancellation in Recv(). + // + // It's done this way to fix a race between ctx cancel and trailer. The + // race was, stream.Recv() may return ctx error if ctxDone wins the + // race, but stream.Trailer() may return a non-nil md because the stream + // was not marked as done when trailer is received. This closeStream + // call will mark stream as done, thus fix the race. + // + // TODO: delaying ctx error seems like a unnecessary side effect. What + // we really want is to mark the stream as done, and return ctx error + // faster. + r.closeStream(ContextErr(r.ctx.Err())) + m := <-r.recv.get() + return r.readAdditional(m, n) + case m := <-r.recv.get(): + return r.readAdditional(m, n) + } +} + +func (r *recvBufferReader) readMessageHeaderAdditional(m recvMsg, header []byte) (n int, err error) { r.recv.load() if m.err != nil { + if m.buffer != nil { + m.buffer.Free() + } return 0, m.err } - copied, _ := m.buffer.Read(p) - if m.buffer.Len() == 0 { - r.freeBuffer(m.buffer) - r.last = nil - } else { - r.last = m.buffer + + n, r.last = mem.ReadUnsafe(header, m.buffer) + + return n, nil +} + +func (r *recvBufferReader) readAdditional(m recvMsg, n int) (b mem.Buffer, err error) { + r.recv.load() + if m.err != nil { + if m.buffer != nil { + m.buffer.Free() + } + return nil, m.err } - return copied, nil + + if m.buffer.Len() > n { + m.buffer, r.last = mem.SplitUnsafe(m.buffer, n) + } + + return m.buffer, nil } type streamState uint32 @@ -240,73 +286,26 @@ const ( // Stream represents an RPC in the transport layer. type Stream struct { id uint32 - st ServerTransport // nil for client side Stream - ct *http2Client // nil for server side Stream - ctx context.Context // the associated context of the stream - cancel context.CancelFunc // always nil for client side Stream - done chan struct{} // closed at the end of stream to unblock writers. On the client side. - doneFunc func() // invoked at the end of stream on client side. - ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance) - method string // the associated RPC method of the stream + ctx context.Context // the associated context of the stream + method string // the associated RPC method of the stream recvCompress string sendCompress string buf *recvBuffer - trReader io.Reader + trReader *transportReader fc *inFlow wq *writeQuota - // Holds compressor names passed in grpc-accept-encoding metadata from the - // client. This is empty for the client side stream. - clientAdvertisedCompressors string // Callback to state application's intentions to read data. This // is used to adjust flow control, if needed. requestRead func(int) - headerChan chan struct{} // closed to indicate the end of header metadata. - headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. - // headerValid indicates whether a valid header was received. Only - // meaningful after headerChan is closed (always call waitOnHeader() before - // reading its value). Not valid on server side. - headerValid bool - headerWireLength int // Only set on server side. - - // hdrMu protects header and trailer metadata on the server-side. - hdrMu sync.Mutex - // On client side, header keeps the received header metadata. - // - // On server side, header keeps the header set by SetHeader(). The complete - // header will merged into this after t.WriteHeader() is called. - header metadata.MD - trailer metadata.MD // the key-value map of trailer metadata. - - noHeaders bool // set if the client never received headers (set only after the stream is done). - - // On the server-side, headerSent is atomically set to 1 when the headers are sent out. - headerSent uint32 - state streamState - // On client-side it is the status error received from the server. - // On server-side it is unused. - status *status.Status - - bytesReceived uint32 // indicates whether any bytes have been received on this stream - unprocessed uint32 // set if the server sends a refused stream or GOAWAY including this stream - // contentSubtype is the content-subtype for requests. // this must be lowercase or the behavior is undefined. contentSubtype string -} - -// isHeaderSent is only valid on the server-side. -func (s *Stream) isHeaderSent() bool { - return atomic.LoadUint32(&s.headerSent) == 1 -} -// updateHeaderSent updates headerSent and returns true -// if it was already set. It is valid only on server-side. -func (s *Stream) updateHeaderSent() bool { - return atomic.SwapUint32(&s.headerSent, 1) == 1 + trailer metadata.MD // the key-value map of trailer metadata. } func (s *Stream) swapState(st streamState) streamState { @@ -321,110 +320,12 @@ func (s *Stream) getState() streamState { return streamState(atomic.LoadUint32((*uint32)(&s.state))) } -func (s *Stream) waitOnHeader() { - if s.headerChan == nil { - // On the server headerChan is always nil since a stream originates - // only after having received headers. - return - } - select { - case <-s.ctx.Done(): - // Close the stream to prevent headers/trailers from changing after - // this function returns. - s.ct.CloseStream(s, ContextErr(s.ctx.Err())) - // headerChan could possibly not be closed yet if closeStream raced - // with operateHeaders; wait until it is closed explicitly here. - <-s.headerChan - case <-s.headerChan: - } -} - -// RecvCompress returns the compression algorithm applied to the inbound -// message. It is empty string if there is no compression applied. -func (s *Stream) RecvCompress() string { - s.waitOnHeader() - return s.recvCompress -} - -// SetSendCompress sets the compression algorithm to the stream. -func (s *Stream) SetSendCompress(name string) error { - if s.isHeaderSent() || s.getState() == streamDone { - return errors.New("transport: set send compressor called after headers sent or stream done") - } - - s.sendCompress = name - return nil -} - -// SendCompress returns the send compressor name. -func (s *Stream) SendCompress() string { - return s.sendCompress -} - -// ClientAdvertisedCompressors returns the compressor names advertised by the -// client via grpc-accept-encoding header. -func (s *Stream) ClientAdvertisedCompressors() []string { - values := strings.Split(s.clientAdvertisedCompressors, ",") - for i, v := range values { - values[i] = strings.TrimSpace(v) - } - return values -} - -// Done returns a channel which is closed when it receives the final status -// from the server. -func (s *Stream) Done() <-chan struct{} { - return s.done -} - -// Header returns the header metadata of the stream. -// -// On client side, it acquires the key-value pairs of header metadata once it is -// available. It blocks until i) the metadata is ready or ii) there is no header -// metadata or iii) the stream is canceled/expired. -// -// On server side, it returns the out header after t.WriteHeader is called. It -// does not block and must not be called until after WriteHeader. -func (s *Stream) Header() (metadata.MD, error) { - if s.headerChan == nil { - // On server side, return the header in stream. It will be the out - // header after t.WriteHeader is called. - return s.header.Copy(), nil - } - s.waitOnHeader() - - if !s.headerValid || s.noHeaders { - return nil, s.status.Err() - } - - return s.header.Copy(), nil -} - -// TrailersOnly blocks until a header or trailers-only frame is received and -// then returns true if the stream was trailers-only. If the stream ends -// before headers are received, returns true, nil. Client-side only. -func (s *Stream) TrailersOnly() bool { - s.waitOnHeader() - return s.noHeaders -} - -// Trailer returns the cached trailer metedata. Note that if it is not called -// after the entire stream is done, it could return an empty MD. Client -// side only. +// Trailer returns the cached trailer metadata. Note that if it is not called +// after the entire stream is done, it could return an empty MD. // It can be safely read only after stream has ended that is either read // or write have returned io.EOF. func (s *Stream) Trailer() metadata.MD { - c := s.trailer.Copy() - return c -} - -// ContentSubtype returns the content-subtype for a request. For example, a -// content-subtype of "proto" will result in a content-type of -// "application/grpc+proto". This will always be lowercase. See -// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for -// more details. -func (s *Stream) ContentSubtype() string { - return s.contentSubtype + return s.trailer.Copy() } // Context returns the context of the stream. @@ -432,114 +333,104 @@ func (s *Stream) Context() context.Context { return s.ctx } -// SetContext sets the context of the stream. This will be deleted once the -// stats handler callouts all move to gRPC layer. -func (s *Stream) SetContext(ctx context.Context) { - s.ctx = ctx -} - // Method returns the method for the stream. func (s *Stream) Method() string { return s.method } -// Status returns the status received from the server. -// Status can be read safely only after the stream has ended, -// that is, after Done() is closed. -func (s *Stream) Status() *status.Status { - return s.status -} - -// HeaderWireLength returns the size of the headers of the stream as received -// from the wire. Valid only on the server. -func (s *Stream) HeaderWireLength() int { - return s.headerWireLength -} - -// SetHeader sets the header metadata. This can be called multiple times. -// Server side only. -// This should not be called in parallel to other data writes. -func (s *Stream) SetHeader(md metadata.MD) error { - if md.Len() == 0 { - return nil - } - if s.isHeaderSent() || s.getState() == streamDone { - return ErrIllegalHeaderWrite - } - s.hdrMu.Lock() - s.header = metadata.Join(s.header, md) - s.hdrMu.Unlock() - return nil -} - -// SendHeader sends the given header metadata. The given metadata is -// combined with any metadata set by previous calls to SetHeader and -// then written to the transport stream. -func (s *Stream) SendHeader(md metadata.MD) error { - return s.st.WriteHeader(s, md) +func (s *Stream) write(m recvMsg) { + s.buf.put(m) } -// SetTrailer sets the trailer metadata which will be sent with the RPC status -// by the server. This can be called multiple times. Server side only. -// This should not be called parallel to other data writes. -func (s *Stream) SetTrailer(md metadata.MD) error { - if md.Len() == 0 { - return nil +// ReadMessageHeader reads data into the provided header slice from the stream. +// It first checks if there was an error during a previous read operation and +// returns it if present. It then requests a read operation for the length of +// the header. It continues to read from the stream until the entire header +// slice is filled or an error occurs. If an `io.EOF` error is encountered with +// partially read data, it is converted to `io.ErrUnexpectedEOF` to indicate an +// unexpected end of the stream. The method returns any error encountered during +// the read process or nil if the header was successfully read. +func (s *Stream) ReadMessageHeader(header []byte) (err error) { + // Don't request a read if there was an error earlier + if er := s.trReader.er; er != nil { + return er } - if s.getState() == streamDone { - return ErrIllegalHeaderWrite + s.requestRead(len(header)) + for len(header) != 0 { + n, err := s.trReader.ReadMessageHeader(header) + header = header[n:] + if len(header) == 0 { + err = nil + } + if err != nil { + if n > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } } - s.hdrMu.Lock() - s.trailer = metadata.Join(s.trailer, md) - s.hdrMu.Unlock() return nil } -func (s *Stream) write(m recvMsg) { - s.buf.put(m) -} - -// Read reads all p bytes from the wire for this stream. -func (s *Stream) Read(p []byte) (n int, err error) { +// Read reads n bytes from the wire for this stream. +func (s *Stream) read(n int) (data mem.BufferSlice, err error) { // Don't request a read if there was an error earlier - if er := s.trReader.(*transportReader).er; er != nil { - return 0, er + if er := s.trReader.er; er != nil { + return nil, er + } + s.requestRead(n) + for n != 0 { + buf, err := s.trReader.Read(n) + var bufLen int + if buf != nil { + bufLen = buf.Len() + } + n -= bufLen + if n == 0 { + err = nil + } + if err != nil { + if bufLen > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + data.Free() + return nil, err + } + data = append(data, buf) } - s.requestRead(len(p)) - return io.ReadFull(s.trReader, p) + return data, nil } -// tranportReader reads all the data available for this Stream from the transport and +// transportReader reads all the data available for this Stream from the transport and // passes them into the decoder, which converts them into a gRPC message stream. // The error is io.EOF when the stream is done or another non-nil error if // the stream broke. type transportReader struct { - reader io.Reader + reader *recvBufferReader // The handler to control the window update procedure for both this // particular stream and the associated transport. windowHandler func(int) er error } -func (t *transportReader) Read(p []byte) (n int, err error) { - n, err = t.reader.Read(p) +func (t *transportReader) ReadMessageHeader(header []byte) (int, error) { + n, err := t.reader.ReadMessageHeader(header) if err != nil { t.er = err - return + return 0, err } t.windowHandler(n) - return + return n, nil } -// BytesReceived indicates whether any bytes have been received on this stream. -func (s *Stream) BytesReceived() bool { - return atomic.LoadUint32(&s.bytesReceived) == 1 -} - -// Unprocessed indicates whether the server did not process this stream -- -// i.e. it sent a refused stream or GOAWAY including this stream ID. -func (s *Stream) Unprocessed() bool { - return atomic.LoadUint32(&s.unprocessed) == 1 +func (t *transportReader) Read(n int) (mem.Buffer, error) { + buf, err := t.reader.Read(n) + if err != nil { + t.er = err + return buf, err + } + t.windowHandler(buf.Len()) + return buf, nil } // GoString is implemented by Stream so context.String() won't @@ -574,6 +465,7 @@ type ServerConfig struct { ChannelzParent *channelz.Server MaxHeaderListSize *uint32 HeaderTableSize *uint32 + BufferPool mem.BufferPool } // ConnectOptions covers all relevant options for communicating with the server. @@ -612,17 +504,13 @@ type ConnectOptions struct { MaxHeaderListSize *uint32 // UseProxy specifies if a proxy should be used. UseProxy bool + // The mem.BufferPool to use when reading/writing to the wire. + BufferPool mem.BufferPool } -// NewClientTransport establishes the transport with the required ConnectOptions -// and returns it to the caller. -func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (ClientTransport, error) { - return newHTTP2Client(connectCtx, ctx, addr, opts, onClose) -} - -// Options provides additional hints and information for message +// WriteOptions provides additional hints and information for message // transmission. -type Options struct { +type WriteOptions struct { // Last indicates whether this write is the last piece for // this stream. Last bool @@ -671,18 +559,8 @@ type ClientTransport interface { // It does not block. GracefulClose() - // Write sends the data for the given stream. A nil stream indicates - // the write is to be performed on the transport as a whole. - Write(s *Stream, hdr []byte, data []byte, opts *Options) error - // NewStream creates a Stream for an RPC. - NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) - - // CloseStream clears the footprint of a stream when the stream is - // not needed any more. The err indicates the error incurred when - // CloseStream is called. Must be called when a stream is finished - // unless the associated transport is closing. - CloseStream(stream *Stream, err error) + NewStream(ctx context.Context, callHdr *CallHdr) (*ClientStream, error) // Error returns a channel that is closed when some I/O error // happens. Typically the caller should have a goroutine to monitor @@ -702,12 +580,6 @@ type ClientTransport interface { // RemoteAddr returns the remote network address. RemoteAddr() net.Addr - - // IncrMsgSent increments the number of message sent through this transport. - IncrMsgSent() - - // IncrMsgRecv increments the number of message received through this transport. - IncrMsgRecv() } // ServerTransport is the common interface for all gRPC server-side transport @@ -717,19 +589,7 @@ type ClientTransport interface { // Write methods for a given Stream will be called serially. type ServerTransport interface { // HandleStreams receives incoming streams using the given handler. - HandleStreams(context.Context, func(*Stream)) - - // WriteHeader sends the header metadata for the given stream. - // WriteHeader may not be called on all streams. - WriteHeader(s *Stream, md metadata.MD) error - - // Write sends the data for the given stream. - // Write may not be called on all streams. - Write(s *Stream, hdr []byte, data []byte, opts *Options) error - - // WriteStatus sends the status of a stream to the client. WriteStatus is - // the final call made on a stream and always occurs. - WriteStatus(s *Stream, st *status.Status) error + HandleStreams(context.Context, func(*ServerStream)) // Close tears down the transport. Once it is called, the transport // should not be accessed any more. All the pending streams and their @@ -741,12 +601,14 @@ type ServerTransport interface { // Drain notifies the client this ServerTransport stops accepting new RPCs. Drain(debugData string) +} - // IncrMsgSent increments the number of message sent through this transport. - IncrMsgSent() - - // IncrMsgRecv increments the number of message received through this transport. - IncrMsgRecv() +type internalServerTransport interface { + ServerTransport + writeHeader(s *ServerStream, md metadata.MD) error + write(s *ServerStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error + writeStatus(s *ServerStream, st *status.Status) error + incrMsgRecv() } // connectionErrorf creates an ConnectionError with the specified error description. @@ -798,7 +660,7 @@ var ( // connection is draining. This could be caused by goaway or balancer // removing the address. errStreamDrain = status.Error(codes.Unavailable, "the connection is draining") - // errStreamDone is returned from write at the client side to indiacte application + // errStreamDone is returned from write at the client side to indicate application // layer of an error. errStreamDone = errors.New("the stream is done") // StatusGoAway indicates that the server sent a GOAWAY that included this diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go index 34d31b5e7d3..eb42b19fb99 100644 --- a/vendor/google.golang.org/grpc/keepalive/keepalive.go +++ b/vendor/google.golang.org/grpc/keepalive/keepalive.go @@ -34,15 +34,29 @@ type ClientParameters struct { // After a duration of this time if the client doesn't see any activity it // pings the server to see if the transport is still alive. // If set below 10s, a minimum value of 10s will be used instead. - Time time.Duration // The current default value is infinity. + // + // Note that gRPC servers have a default EnforcementPolicy.MinTime of 5 + // minutes (which means the client shouldn't ping more frequently than every + // 5 minutes). + // + // Though not ideal, it's not a strong requirement for Time to be less than + // EnforcementPolicy.MinTime. Time will automatically double if the server + // disconnects due to its enforcement policy. + // + // For more details, see + // https://github.com/grpc/proposal/blob/master/A8-client-side-keepalive.md + Time time.Duration // After having pinged for keepalive check, the client waits for a duration // of Timeout and if no activity is seen even after that the connection is // closed. - Timeout time.Duration // The current default value is 20 seconds. + // + // If keepalive is enabled, and this value is not explicitly set, the default + // is 20 seconds. + Timeout time.Duration // If true, client sends keepalive pings even with no active RPCs. If false, // when there are no active RPCs, Time and Timeout will be ignored and no // keepalive pings will be sent. - PermitWithoutStream bool // false by default. + PermitWithoutStream bool } // ServerParameters is used to set keepalive and max-age parameters on the diff --git a/vendor/google.golang.org/grpc/mem/buffer_pool.go b/vendor/google.golang.org/grpc/mem/buffer_pool.go new file mode 100644 index 00000000000..c37c58c0233 --- /dev/null +++ b/vendor/google.golang.org/grpc/mem/buffer_pool.go @@ -0,0 +1,194 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package mem + +import ( + "sort" + "sync" + + "google.golang.org/grpc/internal" +) + +// BufferPool is a pool of buffers that can be shared and reused, resulting in +// decreased memory allocation. +type BufferPool interface { + // Get returns a buffer with specified length from the pool. + Get(length int) *[]byte + + // Put returns a buffer to the pool. + Put(*[]byte) +} + +var defaultBufferPoolSizes = []int{ + 256, + 4 << 10, // 4KB (go page size) + 16 << 10, // 16KB (max HTTP/2 frame size used by gRPC) + 32 << 10, // 32KB (default buffer size for io.Copy) + 1 << 20, // 1MB +} + +var defaultBufferPool BufferPool + +func init() { + defaultBufferPool = NewTieredBufferPool(defaultBufferPoolSizes...) + + internal.SetDefaultBufferPoolForTesting = func(pool BufferPool) { + defaultBufferPool = pool + } + + internal.SetBufferPoolingThresholdForTesting = func(threshold int) { + bufferPoolingThreshold = threshold + } +} + +// DefaultBufferPool returns the current default buffer pool. It is a BufferPool +// created with NewBufferPool that uses a set of default sizes optimized for +// expected workflows. +func DefaultBufferPool() BufferPool { + return defaultBufferPool +} + +// NewTieredBufferPool returns a BufferPool implementation that uses multiple +// underlying pools of the given pool sizes. +func NewTieredBufferPool(poolSizes ...int) BufferPool { + sort.Ints(poolSizes) + pools := make([]*sizedBufferPool, len(poolSizes)) + for i, s := range poolSizes { + pools[i] = newSizedBufferPool(s) + } + return &tieredBufferPool{ + sizedPools: pools, + } +} + +// tieredBufferPool implements the BufferPool interface with multiple tiers of +// buffer pools for different sizes of buffers. +type tieredBufferPool struct { + sizedPools []*sizedBufferPool + fallbackPool simpleBufferPool +} + +func (p *tieredBufferPool) Get(size int) *[]byte { + return p.getPool(size).Get(size) +} + +func (p *tieredBufferPool) Put(buf *[]byte) { + p.getPool(cap(*buf)).Put(buf) +} + +func (p *tieredBufferPool) getPool(size int) BufferPool { + poolIdx := sort.Search(len(p.sizedPools), func(i int) bool { + return p.sizedPools[i].defaultSize >= size + }) + + if poolIdx == len(p.sizedPools) { + return &p.fallbackPool + } + + return p.sizedPools[poolIdx] +} + +// sizedBufferPool is a BufferPool implementation that is optimized for specific +// buffer sizes. For example, HTTP/2 frames within gRPC have a default max size +// of 16kb and a sizedBufferPool can be configured to only return buffers with a +// capacity of 16kb. Note that however it does not support returning larger +// buffers and in fact panics if such a buffer is requested. Because of this, +// this BufferPool implementation is not meant to be used on its own and rather +// is intended to be embedded in a tieredBufferPool such that Get is only +// invoked when the required size is smaller than or equal to defaultSize. +type sizedBufferPool struct { + pool sync.Pool + defaultSize int +} + +func (p *sizedBufferPool) Get(size int) *[]byte { + buf := p.pool.Get().(*[]byte) + b := *buf + clear(b[:cap(b)]) + *buf = b[:size] + return buf +} + +func (p *sizedBufferPool) Put(buf *[]byte) { + if cap(*buf) < p.defaultSize { + // Ignore buffers that are too small to fit in the pool. Otherwise, when + // Get is called it will panic as it tries to index outside the bounds + // of the buffer. + return + } + p.pool.Put(buf) +} + +func newSizedBufferPool(size int) *sizedBufferPool { + return &sizedBufferPool{ + pool: sync.Pool{ + New: func() any { + buf := make([]byte, size) + return &buf + }, + }, + defaultSize: size, + } +} + +var _ BufferPool = (*simpleBufferPool)(nil) + +// simpleBufferPool is an implementation of the BufferPool interface that +// attempts to pool buffers with a sync.Pool. When Get is invoked, it tries to +// acquire a buffer from the pool but if that buffer is too small, it returns it +// to the pool and creates a new one. +type simpleBufferPool struct { + pool sync.Pool +} + +func (p *simpleBufferPool) Get(size int) *[]byte { + bs, ok := p.pool.Get().(*[]byte) + if ok && cap(*bs) >= size { + *bs = (*bs)[:size] + return bs + } + + // A buffer was pulled from the pool, but it is too small. Put it back in + // the pool and create one large enough. + if ok { + p.pool.Put(bs) + } + + b := make([]byte, size) + return &b +} + +func (p *simpleBufferPool) Put(buf *[]byte) { + p.pool.Put(buf) +} + +var _ BufferPool = NopBufferPool{} + +// NopBufferPool is a buffer pool that returns new buffers without pooling. +type NopBufferPool struct{} + +// Get returns a buffer with specified length from the pool. +func (NopBufferPool) Get(length int) *[]byte { + b := make([]byte, length) + return &b +} + +// Put returns a buffer to the pool. +func (NopBufferPool) Put(*[]byte) { +} diff --git a/vendor/google.golang.org/grpc/mem/buffer_slice.go b/vendor/google.golang.org/grpc/mem/buffer_slice.go new file mode 100644 index 00000000000..65002e2cc85 --- /dev/null +++ b/vendor/google.golang.org/grpc/mem/buffer_slice.go @@ -0,0 +1,281 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package mem + +import ( + "io" +) + +const ( + // 32 KiB is what io.Copy uses. + readAllBufSize = 32 * 1024 +) + +// BufferSlice offers a means to represent data that spans one or more Buffer +// instances. A BufferSlice is meant to be immutable after creation, and methods +// like Ref create and return copies of the slice. This is why all methods have +// value receivers rather than pointer receivers. +// +// Note that any of the methods that read the underlying buffers such as Ref, +// Len or CopyTo etc., will panic if any underlying buffers have already been +// freed. It is recommended to not directly interact with any of the underlying +// buffers directly, rather such interactions should be mediated through the +// various methods on this type. +// +// By convention, any APIs that return (mem.BufferSlice, error) should reduce +// the burden on the caller by never returning a mem.BufferSlice that needs to +// be freed if the error is non-nil, unless explicitly stated. +type BufferSlice []Buffer + +// Len returns the sum of the length of all the Buffers in this slice. +// +// # Warning +// +// Invoking the built-in len on a BufferSlice will return the number of buffers +// in the slice, and *not* the value returned by this function. +func (s BufferSlice) Len() int { + var length int + for _, b := range s { + length += b.Len() + } + return length +} + +// Ref invokes Ref on each buffer in the slice. +func (s BufferSlice) Ref() { + for _, b := range s { + b.Ref() + } +} + +// Free invokes Buffer.Free() on each Buffer in the slice. +func (s BufferSlice) Free() { + for _, b := range s { + b.Free() + } +} + +// CopyTo copies each of the underlying Buffer's data into the given buffer, +// returning the number of bytes copied. Has the same semantics as the copy +// builtin in that it will copy as many bytes as it can, stopping when either dst +// is full or s runs out of data, returning the minimum of s.Len() and len(dst). +func (s BufferSlice) CopyTo(dst []byte) int { + off := 0 + for _, b := range s { + off += copy(dst[off:], b.ReadOnlyData()) + } + return off +} + +// Materialize concatenates all the underlying Buffer's data into a single +// contiguous buffer using CopyTo. +func (s BufferSlice) Materialize() []byte { + l := s.Len() + if l == 0 { + return nil + } + out := make([]byte, l) + s.CopyTo(out) + return out +} + +// MaterializeToBuffer functions like Materialize except that it writes the data +// to a single Buffer pulled from the given BufferPool. +// +// As a special case, if the input BufferSlice only actually has one Buffer, this +// function simply increases the refcount before returning said Buffer. Freeing this +// buffer won't release it until the BufferSlice is itself released. +func (s BufferSlice) MaterializeToBuffer(pool BufferPool) Buffer { + if len(s) == 1 { + s[0].Ref() + return s[0] + } + sLen := s.Len() + if sLen == 0 { + return emptyBuffer{} + } + buf := pool.Get(sLen) + s.CopyTo(*buf) + return NewBuffer(buf, pool) +} + +// Reader returns a new Reader for the input slice after taking references to +// each underlying buffer. +func (s BufferSlice) Reader() Reader { + s.Ref() + return &sliceReader{ + data: s, + len: s.Len(), + } +} + +// Reader exposes a BufferSlice's data as an io.Reader, allowing it to interface +// with other parts systems. It also provides an additional convenience method +// Remaining(), which returns the number of unread bytes remaining in the slice. +// Buffers will be freed as they are read. +type Reader interface { + io.Reader + io.ByteReader + // Close frees the underlying BufferSlice and never returns an error. Subsequent + // calls to Read will return (0, io.EOF). + Close() error + // Remaining returns the number of unread bytes remaining in the slice. + Remaining() int +} + +type sliceReader struct { + data BufferSlice + len int + // The index into data[0].ReadOnlyData(). + bufferIdx int +} + +func (r *sliceReader) Remaining() int { + return r.len +} + +func (r *sliceReader) Close() error { + r.data.Free() + r.data = nil + r.len = 0 + return nil +} + +func (r *sliceReader) freeFirstBufferIfEmpty() bool { + if len(r.data) == 0 || r.bufferIdx != len(r.data[0].ReadOnlyData()) { + return false + } + + r.data[0].Free() + r.data = r.data[1:] + r.bufferIdx = 0 + return true +} + +func (r *sliceReader) Read(buf []byte) (n int, _ error) { + if r.len == 0 { + return 0, io.EOF + } + + for len(buf) != 0 && r.len != 0 { + // Copy as much as possible from the first Buffer in the slice into the + // given byte slice. + data := r.data[0].ReadOnlyData() + copied := copy(buf, data[r.bufferIdx:]) + r.len -= copied // Reduce len by the number of bytes copied. + r.bufferIdx += copied // Increment the buffer index. + n += copied // Increment the total number of bytes read. + buf = buf[copied:] // Shrink the given byte slice. + + // If we have copied all the data from the first Buffer, free it and advance to + // the next in the slice. + r.freeFirstBufferIfEmpty() + } + + return n, nil +} + +func (r *sliceReader) ReadByte() (byte, error) { + if r.len == 0 { + return 0, io.EOF + } + + // There may be any number of empty buffers in the slice, clear them all until a + // non-empty buffer is reached. This is guaranteed to exit since r.len is not 0. + for r.freeFirstBufferIfEmpty() { + } + + b := r.data[0].ReadOnlyData()[r.bufferIdx] + r.len-- + r.bufferIdx++ + // Free the first buffer in the slice if the last byte was read + r.freeFirstBufferIfEmpty() + return b, nil +} + +var _ io.Writer = (*writer)(nil) + +type writer struct { + buffers *BufferSlice + pool BufferPool +} + +func (w *writer) Write(p []byte) (n int, err error) { + b := Copy(p, w.pool) + *w.buffers = append(*w.buffers, b) + return b.Len(), nil +} + +// NewWriter wraps the given BufferSlice and BufferPool to implement the +// io.Writer interface. Every call to Write copies the contents of the given +// buffer into a new Buffer pulled from the given pool and the Buffer is +// added to the given BufferSlice. +func NewWriter(buffers *BufferSlice, pool BufferPool) io.Writer { + return &writer{buffers: buffers, pool: pool} +} + +// ReadAll reads from r until an error or EOF and returns the data it read. +// A successful call returns err == nil, not err == EOF. Because ReadAll is +// defined to read from src until EOF, it does not treat an EOF from Read +// as an error to be reported. +// +// Important: A failed call returns a non-nil error and may also return +// partially read buffers. It is the responsibility of the caller to free the +// BufferSlice returned, or its memory will not be reused. +func ReadAll(r io.Reader, pool BufferPool) (BufferSlice, error) { + var result BufferSlice + if wt, ok := r.(io.WriterTo); ok { + // This is more optimal since wt knows the size of chunks it wants to + // write and, hence, we can allocate buffers of an optimal size to fit + // them. E.g. might be a single big chunk, and we wouldn't chop it + // into pieces. + w := NewWriter(&result, pool) + _, err := wt.WriteTo(w) + return result, err + } +nextBuffer: + for { + buf := pool.Get(readAllBufSize) + // We asked for 32KiB but may have been given a bigger buffer. + // Use all of it if that's the case. + *buf = (*buf)[:cap(*buf)] + usedCap := 0 + for { + n, err := r.Read((*buf)[usedCap:]) + usedCap += n + if err != nil { + if usedCap == 0 { + // Nothing in this buf, put it back + pool.Put(buf) + } else { + *buf = (*buf)[:usedCap] + result = append(result, NewBuffer(buf, pool)) + } + if err == io.EOF { + err = nil + } + return result, err + } + if len(*buf) == usedCap { + result = append(result, NewBuffer(buf, pool)) + continue nextBuffer + } + } + } +} diff --git a/vendor/google.golang.org/grpc/mem/buffers.go b/vendor/google.golang.org/grpc/mem/buffers.go new file mode 100644 index 00000000000..ecbf0b9a73e --- /dev/null +++ b/vendor/google.golang.org/grpc/mem/buffers.go @@ -0,0 +1,268 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package mem provides utilities that facilitate memory reuse in byte slices +// that are used as buffers. +// +// # Experimental +// +// Notice: All APIs in this package are EXPERIMENTAL and may be changed or +// removed in a later release. +package mem + +import ( + "fmt" + "sync" + "sync/atomic" +) + +// A Buffer represents a reference counted piece of data (in bytes) that can be +// acquired by a call to NewBuffer() or Copy(). A reference to a Buffer may be +// released by calling Free(), which invokes the free function given at creation +// only after all references are released. +// +// Note that a Buffer is not safe for concurrent access and instead each +// goroutine should use its own reference to the data, which can be acquired via +// a call to Ref(). +// +// Attempts to access the underlying data after releasing the reference to the +// Buffer will panic. +type Buffer interface { + // ReadOnlyData returns the underlying byte slice. Note that it is undefined + // behavior to modify the contents of this slice in any way. + ReadOnlyData() []byte + // Ref increases the reference counter for this Buffer. + Ref() + // Free decrements this Buffer's reference counter and frees the underlying + // byte slice if the counter reaches 0 as a result of this call. + Free() + // Len returns the Buffer's size. + Len() int + + split(n int) (left, right Buffer) + read(buf []byte) (int, Buffer) +} + +var ( + bufferPoolingThreshold = 1 << 10 + + bufferObjectPool = sync.Pool{New: func() any { return new(buffer) }} + refObjectPool = sync.Pool{New: func() any { return new(atomic.Int32) }} +) + +// IsBelowBufferPoolingThreshold returns true if the given size is less than or +// equal to the threshold for buffer pooling. This is used to determine whether +// to pool buffers or allocate them directly. +func IsBelowBufferPoolingThreshold(size int) bool { + return size <= bufferPoolingThreshold +} + +type buffer struct { + origData *[]byte + data []byte + refs *atomic.Int32 + pool BufferPool +} + +func newBuffer() *buffer { + return bufferObjectPool.Get().(*buffer) +} + +// NewBuffer creates a new Buffer from the given data, initializing the reference +// counter to 1. The data will then be returned to the given pool when all +// references to the returned Buffer are released. As a special case to avoid +// additional allocations, if the given buffer pool is nil, the returned buffer +// will be a "no-op" Buffer where invoking Buffer.Free() does nothing and the +// underlying data is never freed. +// +// Note that the backing array of the given data is not copied. +func NewBuffer(data *[]byte, pool BufferPool) Buffer { + // Use the buffer's capacity instead of the length, otherwise buffers may + // not be reused under certain conditions. For example, if a large buffer + // is acquired from the pool, but fewer bytes than the buffering threshold + // are written to it, the buffer will not be returned to the pool. + if pool == nil || IsBelowBufferPoolingThreshold(cap(*data)) { + return (SliceBuffer)(*data) + } + b := newBuffer() + b.origData = data + b.data = *data + b.pool = pool + b.refs = refObjectPool.Get().(*atomic.Int32) + b.refs.Add(1) + return b +} + +// Copy creates a new Buffer from the given data, initializing the reference +// counter to 1. +// +// It acquires a []byte from the given pool and copies over the backing array +// of the given data. The []byte acquired from the pool is returned to the +// pool when all references to the returned Buffer are released. +func Copy(data []byte, pool BufferPool) Buffer { + if IsBelowBufferPoolingThreshold(len(data)) { + buf := make(SliceBuffer, len(data)) + copy(buf, data) + return buf + } + + buf := pool.Get(len(data)) + copy(*buf, data) + return NewBuffer(buf, pool) +} + +func (b *buffer) ReadOnlyData() []byte { + if b.refs == nil { + panic("Cannot read freed buffer") + } + return b.data +} + +func (b *buffer) Ref() { + if b.refs == nil { + panic("Cannot ref freed buffer") + } + b.refs.Add(1) +} + +func (b *buffer) Free() { + if b.refs == nil { + panic("Cannot free freed buffer") + } + + refs := b.refs.Add(-1) + switch { + case refs > 0: + return + case refs == 0: + if b.pool != nil { + b.pool.Put(b.origData) + } + + refObjectPool.Put(b.refs) + b.origData = nil + b.data = nil + b.refs = nil + b.pool = nil + bufferObjectPool.Put(b) + default: + panic("Cannot free freed buffer") + } +} + +func (b *buffer) Len() int { + return len(b.ReadOnlyData()) +} + +func (b *buffer) split(n int) (Buffer, Buffer) { + if b.refs == nil { + panic("Cannot split freed buffer") + } + + b.refs.Add(1) + split := newBuffer() + split.origData = b.origData + split.data = b.data[n:] + split.refs = b.refs + split.pool = b.pool + + b.data = b.data[:n] + + return b, split +} + +func (b *buffer) read(buf []byte) (int, Buffer) { + if b.refs == nil { + panic("Cannot read freed buffer") + } + + n := copy(buf, b.data) + if n == len(b.data) { + b.Free() + return n, nil + } + + b.data = b.data[n:] + return n, b +} + +func (b *buffer) String() string { + return fmt.Sprintf("mem.Buffer(%p, data: %p, length: %d)", b, b.ReadOnlyData(), len(b.ReadOnlyData())) +} + +// ReadUnsafe reads bytes from the given Buffer into the provided slice. +// It does not perform safety checks. +func ReadUnsafe(dst []byte, buf Buffer) (int, Buffer) { + return buf.read(dst) +} + +// SplitUnsafe modifies the receiver to point to the first n bytes while it +// returns a new reference to the remaining bytes. The returned Buffer +// functions just like a normal reference acquired using Ref(). +func SplitUnsafe(buf Buffer, n int) (left, right Buffer) { + return buf.split(n) +} + +type emptyBuffer struct{} + +func (e emptyBuffer) ReadOnlyData() []byte { + return nil +} + +func (e emptyBuffer) Ref() {} +func (e emptyBuffer) Free() {} + +func (e emptyBuffer) Len() int { + return 0 +} + +func (e emptyBuffer) split(int) (left, right Buffer) { + return e, e +} + +func (e emptyBuffer) read([]byte) (int, Buffer) { + return 0, e +} + +// SliceBuffer is a Buffer implementation that wraps a byte slice. It provides +// methods for reading, splitting, and managing the byte slice. +type SliceBuffer []byte + +// ReadOnlyData returns the byte slice. +func (s SliceBuffer) ReadOnlyData() []byte { return s } + +// Ref is a noop implementation of Ref. +func (s SliceBuffer) Ref() {} + +// Free is a noop implementation of Free. +func (s SliceBuffer) Free() {} + +// Len is a noop implementation of Len. +func (s SliceBuffer) Len() int { return len(s) } + +func (s SliceBuffer) split(n int) (left, right Buffer) { + return s[:n], s[n:] +} + +func (s SliceBuffer) read(buf []byte) (int, Buffer) { + n := copy(buf, s) + if n == len(s) { + return n, nil + } + return n, s[n:] +} diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index 1e9485fd6e2..d2e15253bbf 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -213,11 +213,6 @@ func FromIncomingContext(ctx context.Context) (MD, bool) { // ValueFromIncomingContext returns the metadata value corresponding to the metadata // key from the incoming metadata if it exists. Keys are matched in a case insensitive // manner. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. func ValueFromIncomingContext(ctx context.Context, key string) []string { md, ok := ctx.Value(mdIncomingKey{}).(MD) if !ok { @@ -228,7 +223,7 @@ func ValueFromIncomingContext(ctx context.Context, key string) []string { return copyOf(v) } for k, v := range md { - // Case insenitive comparison: MD is a map, and there's no guarantee + // Case insensitive comparison: MD is a map, and there's no guarantee // that the MD attached to the context is created using our helper // functions. if strings.EqualFold(k, key) { diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go index 73bd6336433..ee0ff969af4 100644 --- a/vendor/google.golang.org/grpc/preloader.go +++ b/vendor/google.golang.org/grpc/preloader.go @@ -20,6 +20,7 @@ package grpc import ( "google.golang.org/grpc/codes" + "google.golang.org/grpc/mem" "google.golang.org/grpc/status" ) @@ -31,9 +32,10 @@ import ( // later release. type PreparedMsg struct { // Struct for preparing msg before sending them - encodedData []byte + encodedData mem.BufferSlice hdr []byte - payload []byte + payload mem.BufferSlice + pf payloadFormat } // Encode marshalls and compresses the message using the codec and compressor for the stream. @@ -57,11 +59,27 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error { if err != nil { return err } - p.encodedData = data - compData, err := compress(data, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp) + + materializedData := data.Materialize() + data.Free() + p.encodedData = mem.BufferSlice{mem.SliceBuffer(materializedData)} + + // TODO: it should be possible to grab the bufferPool from the underlying + // stream implementation with a type cast to its actual type (such as + // addrConnStream) and accessing the buffer pool directly. + var compData mem.BufferSlice + compData, p.pf, err = compress(p.encodedData, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp, mem.DefaultBufferPool()) if err != nil { return err } - p.hdr, p.payload = msgHeader(data, compData) + + if p.pf.isCompressed() { + materializedCompData := compData.Materialize() + compData.Free() + compData = mem.BufferSlice{mem.SliceBuffer(materializedCompData)} + } + + p.hdr, p.payload = msgHeader(p.encodedData, compData, p.pf) + return nil } diff --git a/vendor/google.golang.org/grpc/reflection/README.md b/vendor/google.golang.org/grpc/reflection/README.md deleted file mode 100644 index 9ace83ccb67..00000000000 --- a/vendor/google.golang.org/grpc/reflection/README.md +++ /dev/null @@ -1,18 +0,0 @@ -# Reflection - -Package reflection implements server reflection service. - -The service implemented is defined in: https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1/reflection.proto. - -To register server reflection on a gRPC server: -```go -import "google.golang.org/grpc/reflection" - -s := grpc.NewServer() -pb.RegisterYourOwnServer(s, &server{}) - -// Register reflection service on gRPC server. -reflection.Register(s) - -s.Serve(lis) -``` diff --git a/vendor/google.golang.org/grpc/reflection/adapt.go b/vendor/google.golang.org/grpc/reflection/adapt.go deleted file mode 100644 index 6997e474031..00000000000 --- a/vendor/google.golang.org/grpc/reflection/adapt.go +++ /dev/null @@ -1,57 +0,0 @@ -/* - * - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package reflection - -import ( - "google.golang.org/grpc/reflection/internal" - - v1reflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1" - v1reflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1" - v1alphareflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" -) - -// asV1Alpha returns an implementation of the v1alpha version of the reflection -// interface that delegates all calls to the given v1 version. -func asV1Alpha(svr v1reflectiongrpc.ServerReflectionServer) v1alphareflectiongrpc.ServerReflectionServer { - return v1AlphaServerImpl{svr: svr} -} - -type v1AlphaServerImpl struct { - svr v1reflectiongrpc.ServerReflectionServer -} - -func (s v1AlphaServerImpl) ServerReflectionInfo(stream v1alphareflectiongrpc.ServerReflection_ServerReflectionInfoServer) error { - return s.svr.ServerReflectionInfo(v1AlphaServerStreamAdapter{stream}) -} - -type v1AlphaServerStreamAdapter struct { - v1alphareflectiongrpc.ServerReflection_ServerReflectionInfoServer -} - -func (s v1AlphaServerStreamAdapter) Send(response *v1reflectionpb.ServerReflectionResponse) error { - return s.ServerReflection_ServerReflectionInfoServer.Send(internal.V1ToV1AlphaResponse(response)) -} - -func (s v1AlphaServerStreamAdapter) Recv() (*v1reflectionpb.ServerReflectionRequest, error) { - resp, err := s.ServerReflection_ServerReflectionInfoServer.Recv() - if err != nil { - return nil, err - } - return internal.V1AlphaToV1Request(resp), nil -} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go deleted file mode 100644 index 666eda8e5f3..00000000000 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go +++ /dev/null @@ -1,953 +0,0 @@ -// Copyright 2016 The gRPC Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Service exported by server reflection. A more complete description of how -// server reflection works can be found at -// https://github.com/grpc/grpc/blob/master/doc/server-reflection.md -// -// The canonical version of this proto can be found at -// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.34.1 -// protoc v4.25.2 -// source: grpc/reflection/v1/reflection.proto - -package grpc_reflection_v1 - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// The message sent by the client when calling ServerReflectionInfo method. -type ServerReflectionRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` - // To use reflection service, the client should set one of the following - // fields in message_request. The server distinguishes requests by their - // defined field and then handles them using corresponding methods. - // - // Types that are assignable to MessageRequest: - // - // *ServerReflectionRequest_FileByFilename - // *ServerReflectionRequest_FileContainingSymbol - // *ServerReflectionRequest_FileContainingExtension - // *ServerReflectionRequest_AllExtensionNumbersOfType - // *ServerReflectionRequest_ListServices - MessageRequest isServerReflectionRequest_MessageRequest `protobuf_oneof:"message_request"` -} - -func (x *ServerReflectionRequest) Reset() { - *x = ServerReflectionRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ServerReflectionRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ServerReflectionRequest) ProtoMessage() {} - -func (x *ServerReflectionRequest) ProtoReflect() protoreflect.Message { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ServerReflectionRequest.ProtoReflect.Descriptor instead. -func (*ServerReflectionRequest) Descriptor() ([]byte, []int) { - return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{0} -} - -func (x *ServerReflectionRequest) GetHost() string { - if x != nil { - return x.Host - } - return "" -} - -func (m *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_MessageRequest { - if m != nil { - return m.MessageRequest - } - return nil -} - -func (x *ServerReflectionRequest) GetFileByFilename() string { - if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileByFilename); ok { - return x.FileByFilename - } - return "" -} - -func (x *ServerReflectionRequest) GetFileContainingSymbol() string { - if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingSymbol); ok { - return x.FileContainingSymbol - } - return "" -} - -func (x *ServerReflectionRequest) GetFileContainingExtension() *ExtensionRequest { - if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingExtension); ok { - return x.FileContainingExtension - } - return nil -} - -func (x *ServerReflectionRequest) GetAllExtensionNumbersOfType() string { - if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_AllExtensionNumbersOfType); ok { - return x.AllExtensionNumbersOfType - } - return "" -} - -func (x *ServerReflectionRequest) GetListServices() string { - if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_ListServices); ok { - return x.ListServices - } - return "" -} - -type isServerReflectionRequest_MessageRequest interface { - isServerReflectionRequest_MessageRequest() -} - -type ServerReflectionRequest_FileByFilename struct { - // Find a proto file by the file name. - FileByFilename string `protobuf:"bytes,3,opt,name=file_by_filename,json=fileByFilename,proto3,oneof"` -} - -type ServerReflectionRequest_FileContainingSymbol struct { - // Find the proto file that declares the given fully-qualified symbol name. - // This field should be a fully-qualified symbol name - // (e.g. <package>.<service>[.<method>] or <package>.<type>). - FileContainingSymbol string `protobuf:"bytes,4,opt,name=file_containing_symbol,json=fileContainingSymbol,proto3,oneof"` -} - -type ServerReflectionRequest_FileContainingExtension struct { - // Find the proto file which defines an extension extending the given - // message type with the given field number. - FileContainingExtension *ExtensionRequest `protobuf:"bytes,5,opt,name=file_containing_extension,json=fileContainingExtension,proto3,oneof"` -} - -type ServerReflectionRequest_AllExtensionNumbersOfType struct { - // Finds the tag numbers used by all known extensions of the given message - // type, and appends them to ExtensionNumberResponse in an undefined order. - // Its corresponding method is best-effort: it's not guaranteed that the - // reflection service will implement this method, and it's not guaranteed - // that this method will provide all extensions. Returns - // StatusCode::UNIMPLEMENTED if it's not implemented. - // This field should be a fully-qualified type name. The format is - // <package>.<type> - AllExtensionNumbersOfType string `protobuf:"bytes,6,opt,name=all_extension_numbers_of_type,json=allExtensionNumbersOfType,proto3,oneof"` -} - -type ServerReflectionRequest_ListServices struct { - // List the full names of registered services. The content will not be - // checked. - ListServices string `protobuf:"bytes,7,opt,name=list_services,json=listServices,proto3,oneof"` -} - -func (*ServerReflectionRequest_FileByFilename) isServerReflectionRequest_MessageRequest() {} - -func (*ServerReflectionRequest_FileContainingSymbol) isServerReflectionRequest_MessageRequest() {} - -func (*ServerReflectionRequest_FileContainingExtension) isServerReflectionRequest_MessageRequest() {} - -func (*ServerReflectionRequest_AllExtensionNumbersOfType) isServerReflectionRequest_MessageRequest() { -} - -func (*ServerReflectionRequest_ListServices) isServerReflectionRequest_MessageRequest() {} - -// The type name and extension number sent by the client when requesting -// file_containing_extension. -type ExtensionRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Fully-qualified type name. The format should be <package>.<type> - ContainingType string `protobuf:"bytes,1,opt,name=containing_type,json=containingType,proto3" json:"containing_type,omitempty"` - ExtensionNumber int32 `protobuf:"varint,2,opt,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` -} - -func (x *ExtensionRequest) Reset() { - *x = ExtensionRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ExtensionRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ExtensionRequest) ProtoMessage() {} - -func (x *ExtensionRequest) ProtoReflect() protoreflect.Message { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ExtensionRequest.ProtoReflect.Descriptor instead. -func (*ExtensionRequest) Descriptor() ([]byte, []int) { - return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{1} -} - -func (x *ExtensionRequest) GetContainingType() string { - if x != nil { - return x.ContainingType - } - return "" -} - -func (x *ExtensionRequest) GetExtensionNumber() int32 { - if x != nil { - return x.ExtensionNumber - } - return 0 -} - -// The message sent by the server to answer ServerReflectionInfo method. -type ServerReflectionResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost,proto3" json:"valid_host,omitempty"` - OriginalRequest *ServerReflectionRequest `protobuf:"bytes,2,opt,name=original_request,json=originalRequest,proto3" json:"original_request,omitempty"` - // The server sets one of the following fields according to the message_request - // in the request. - // - // Types that are assignable to MessageResponse: - // - // *ServerReflectionResponse_FileDescriptorResponse - // *ServerReflectionResponse_AllExtensionNumbersResponse - // *ServerReflectionResponse_ListServicesResponse - // *ServerReflectionResponse_ErrorResponse - MessageResponse isServerReflectionResponse_MessageResponse `protobuf_oneof:"message_response"` -} - -func (x *ServerReflectionResponse) Reset() { - *x = ServerReflectionResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ServerReflectionResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ServerReflectionResponse) ProtoMessage() {} - -func (x *ServerReflectionResponse) ProtoReflect() protoreflect.Message { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ServerReflectionResponse.ProtoReflect.Descriptor instead. -func (*ServerReflectionResponse) Descriptor() ([]byte, []int) { - return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{2} -} - -func (x *ServerReflectionResponse) GetValidHost() string { - if x != nil { - return x.ValidHost - } - return "" -} - -func (x *ServerReflectionResponse) GetOriginalRequest() *ServerReflectionRequest { - if x != nil { - return x.OriginalRequest - } - return nil -} - -func (m *ServerReflectionResponse) GetMessageResponse() isServerReflectionResponse_MessageResponse { - if m != nil { - return m.MessageResponse - } - return nil -} - -func (x *ServerReflectionResponse) GetFileDescriptorResponse() *FileDescriptorResponse { - if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_FileDescriptorResponse); ok { - return x.FileDescriptorResponse - } - return nil -} - -func (x *ServerReflectionResponse) GetAllExtensionNumbersResponse() *ExtensionNumberResponse { - if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_AllExtensionNumbersResponse); ok { - return x.AllExtensionNumbersResponse - } - return nil -} - -func (x *ServerReflectionResponse) GetListServicesResponse() *ListServiceResponse { - if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ListServicesResponse); ok { - return x.ListServicesResponse - } - return nil -} - -func (x *ServerReflectionResponse) GetErrorResponse() *ErrorResponse { - if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ErrorResponse); ok { - return x.ErrorResponse - } - return nil -} - -type isServerReflectionResponse_MessageResponse interface { - isServerReflectionResponse_MessageResponse() -} - -type ServerReflectionResponse_FileDescriptorResponse struct { - // This message is used to answer file_by_filename, file_containing_symbol, - // file_containing_extension requests with transitive dependencies. - // As the repeated label is not allowed in oneof fields, we use a - // FileDescriptorResponse message to encapsulate the repeated fields. - // The reflection service is allowed to avoid sending FileDescriptorProtos - // that were previously sent in response to earlier requests in the stream. - FileDescriptorResponse *FileDescriptorResponse `protobuf:"bytes,4,opt,name=file_descriptor_response,json=fileDescriptorResponse,proto3,oneof"` -} - -type ServerReflectionResponse_AllExtensionNumbersResponse struct { - // This message is used to answer all_extension_numbers_of_type requests. - AllExtensionNumbersResponse *ExtensionNumberResponse `protobuf:"bytes,5,opt,name=all_extension_numbers_response,json=allExtensionNumbersResponse,proto3,oneof"` -} - -type ServerReflectionResponse_ListServicesResponse struct { - // This message is used to answer list_services requests. - ListServicesResponse *ListServiceResponse `protobuf:"bytes,6,opt,name=list_services_response,json=listServicesResponse,proto3,oneof"` -} - -type ServerReflectionResponse_ErrorResponse struct { - // This message is used when an error occurs. - ErrorResponse *ErrorResponse `protobuf:"bytes,7,opt,name=error_response,json=errorResponse,proto3,oneof"` -} - -func (*ServerReflectionResponse_FileDescriptorResponse) isServerReflectionResponse_MessageResponse() { -} - -func (*ServerReflectionResponse_AllExtensionNumbersResponse) isServerReflectionResponse_MessageResponse() { -} - -func (*ServerReflectionResponse_ListServicesResponse) isServerReflectionResponse_MessageResponse() {} - -func (*ServerReflectionResponse_ErrorResponse) isServerReflectionResponse_MessageResponse() {} - -// Serialized FileDescriptorProto messages sent by the server answering -// a file_by_filename, file_containing_symbol, or file_containing_extension -// request. -type FileDescriptorResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Serialized FileDescriptorProto messages. We avoid taking a dependency on - // descriptor.proto, which uses proto2 only features, by making them opaque - // bytes instead. - FileDescriptorProto [][]byte `protobuf:"bytes,1,rep,name=file_descriptor_proto,json=fileDescriptorProto,proto3" json:"file_descriptor_proto,omitempty"` -} - -func (x *FileDescriptorResponse) Reset() { - *x = FileDescriptorResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *FileDescriptorResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FileDescriptorResponse) ProtoMessage() {} - -func (x *FileDescriptorResponse) ProtoReflect() protoreflect.Message { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FileDescriptorResponse.ProtoReflect.Descriptor instead. -func (*FileDescriptorResponse) Descriptor() ([]byte, []int) { - return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{3} -} - -func (x *FileDescriptorResponse) GetFileDescriptorProto() [][]byte { - if x != nil { - return x.FileDescriptorProto - } - return nil -} - -// A list of extension numbers sent by the server answering -// all_extension_numbers_of_type request. -type ExtensionNumberResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Full name of the base type, including the package name. The format - // is <package>.<type> - BaseTypeName string `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName,proto3" json:"base_type_name,omitempty"` - ExtensionNumber []int32 `protobuf:"varint,2,rep,packed,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` -} - -func (x *ExtensionNumberResponse) Reset() { - *x = ExtensionNumberResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ExtensionNumberResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ExtensionNumberResponse) ProtoMessage() {} - -func (x *ExtensionNumberResponse) ProtoReflect() protoreflect.Message { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ExtensionNumberResponse.ProtoReflect.Descriptor instead. -func (*ExtensionNumberResponse) Descriptor() ([]byte, []int) { - return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{4} -} - -func (x *ExtensionNumberResponse) GetBaseTypeName() string { - if x != nil { - return x.BaseTypeName - } - return "" -} - -func (x *ExtensionNumberResponse) GetExtensionNumber() []int32 { - if x != nil { - return x.ExtensionNumber - } - return nil -} - -// A list of ServiceResponse sent by the server answering list_services request. -type ListServiceResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The information of each service may be expanded in the future, so we use - // ServiceResponse message to encapsulate it. - Service []*ServiceResponse `protobuf:"bytes,1,rep,name=service,proto3" json:"service,omitempty"` -} - -func (x *ListServiceResponse) Reset() { - *x = ListServiceResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListServiceResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListServiceResponse) ProtoMessage() {} - -func (x *ListServiceResponse) ProtoReflect() protoreflect.Message { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListServiceResponse.ProtoReflect.Descriptor instead. -func (*ListServiceResponse) Descriptor() ([]byte, []int) { - return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{5} -} - -func (x *ListServiceResponse) GetService() []*ServiceResponse { - if x != nil { - return x.Service - } - return nil -} - -// The information of a single service used by ListServiceResponse to answer -// list_services request. -type ServiceResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Full name of a registered service, including its package name. The format - // is <package>.<service> - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *ServiceResponse) Reset() { - *x = ServiceResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ServiceResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ServiceResponse) ProtoMessage() {} - -func (x *ServiceResponse) ProtoReflect() protoreflect.Message { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ServiceResponse.ProtoReflect.Descriptor instead. -func (*ServiceResponse) Descriptor() ([]byte, []int) { - return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{6} -} - -func (x *ServiceResponse) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// The error code and error message sent by the server when an error occurs. -type ErrorResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // This field uses the error codes defined in grpc::StatusCode. - ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` - ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` -} - -func (x *ErrorResponse) Reset() { - *x = ErrorResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ErrorResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ErrorResponse) ProtoMessage() {} - -func (x *ErrorResponse) ProtoReflect() protoreflect.Message { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ErrorResponse.ProtoReflect.Descriptor instead. -func (*ErrorResponse) Descriptor() ([]byte, []int) { - return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{7} -} - -func (x *ErrorResponse) GetErrorCode() int32 { - if x != nil { - return x.ErrorCode - } - return 0 -} - -func (x *ErrorResponse) GetErrorMessage() string { - if x != nil { - return x.ErrorMessage - } - return "" -} - -var File_grpc_reflection_v1_reflection_proto protoreflect.FileDescriptor - -var file_grpc_reflection_v1_reflection_proto_rawDesc = []byte{ - 0x0a, 0x23, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x22, 0xf3, 0x02, 0x0a, 0x17, 0x53, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x10, 0x66, 0x69, 0x6c, - 0x65, 0x5f, 0x62, 0x79, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0e, 0x66, 0x69, 0x6c, 0x65, 0x42, 0x79, 0x46, 0x69, 0x6c, - 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x16, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, - 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x14, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x12, 0x62, 0x0a, - 0x19, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, - 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x24, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x17, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, - 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x42, 0x0a, 0x1d, 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x19, 0x61, 0x6c, 0x6c, 0x45, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x4f, - 0x66, 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, 0x0a, 0x0d, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, - 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, 0x11, 0x0a, 0x0f, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, - 0x66, 0x0a, 0x10, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, - 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6f, - 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, - 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0xae, 0x04, 0x0a, 0x18, 0x53, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x6f, - 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, - 0x6f, 0x73, 0x74, 0x12, 0x56, 0x0a, 0x10, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0f, 0x6f, 0x72, 0x69, 0x67, - 0x69, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x66, 0x0a, 0x18, 0x66, - 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x72, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x76, 0x31, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x16, 0x66, 0x69, 0x6c, - 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x72, 0x0a, 0x1e, 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x72, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, - 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x1b, 0x61, 0x6c, 0x6c, 0x45, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5f, 0x0a, 0x16, 0x6c, 0x69, 0x73, 0x74, 0x5f, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, - 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, - 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x48, 0x00, 0x52, 0x14, 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x0e, 0x65, 0x72, 0x72, 0x6f, - 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x12, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, - 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4c, 0x0a, 0x16, 0x46, 0x69, 0x6c, 0x65, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0c, 0x52, 0x13, 0x66, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, 0x0a, 0x17, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x54, - 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, - 0x65, 0x72, 0x22, 0x54, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x07, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x72, 0x70, - 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, - 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x25, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, - 0x53, 0x0a, 0x0d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, - 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x32, 0x89, 0x01, 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, - 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x75, 0x0a, 0x14, 0x53, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, - 0x6f, 0x12, 0x2b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, - 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, - 0x42, 0x66, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x15, 0x53, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x50, 0x01, 0x5a, 0x34, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, - 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x72, 0x65, 0x66, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_grpc_reflection_v1_reflection_proto_rawDescOnce sync.Once - file_grpc_reflection_v1_reflection_proto_rawDescData = file_grpc_reflection_v1_reflection_proto_rawDesc -) - -func file_grpc_reflection_v1_reflection_proto_rawDescGZIP() []byte { - file_grpc_reflection_v1_reflection_proto_rawDescOnce.Do(func() { - file_grpc_reflection_v1_reflection_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_reflection_v1_reflection_proto_rawDescData) - }) - return file_grpc_reflection_v1_reflection_proto_rawDescData -} - -var file_grpc_reflection_v1_reflection_proto_msgTypes = make([]protoimpl.MessageInfo, 8) -var file_grpc_reflection_v1_reflection_proto_goTypes = []interface{}{ - (*ServerReflectionRequest)(nil), // 0: grpc.reflection.v1.ServerReflectionRequest - (*ExtensionRequest)(nil), // 1: grpc.reflection.v1.ExtensionRequest - (*ServerReflectionResponse)(nil), // 2: grpc.reflection.v1.ServerReflectionResponse - (*FileDescriptorResponse)(nil), // 3: grpc.reflection.v1.FileDescriptorResponse - (*ExtensionNumberResponse)(nil), // 4: grpc.reflection.v1.ExtensionNumberResponse - (*ListServiceResponse)(nil), // 5: grpc.reflection.v1.ListServiceResponse - (*ServiceResponse)(nil), // 6: grpc.reflection.v1.ServiceResponse - (*ErrorResponse)(nil), // 7: grpc.reflection.v1.ErrorResponse -} -var file_grpc_reflection_v1_reflection_proto_depIdxs = []int32{ - 1, // 0: grpc.reflection.v1.ServerReflectionRequest.file_containing_extension:type_name -> grpc.reflection.v1.ExtensionRequest - 0, // 1: grpc.reflection.v1.ServerReflectionResponse.original_request:type_name -> grpc.reflection.v1.ServerReflectionRequest - 3, // 2: grpc.reflection.v1.ServerReflectionResponse.file_descriptor_response:type_name -> grpc.reflection.v1.FileDescriptorResponse - 4, // 3: grpc.reflection.v1.ServerReflectionResponse.all_extension_numbers_response:type_name -> grpc.reflection.v1.ExtensionNumberResponse - 5, // 4: grpc.reflection.v1.ServerReflectionResponse.list_services_response:type_name -> grpc.reflection.v1.ListServiceResponse - 7, // 5: grpc.reflection.v1.ServerReflectionResponse.error_response:type_name -> grpc.reflection.v1.ErrorResponse - 6, // 6: grpc.reflection.v1.ListServiceResponse.service:type_name -> grpc.reflection.v1.ServiceResponse - 0, // 7: grpc.reflection.v1.ServerReflection.ServerReflectionInfo:input_type -> grpc.reflection.v1.ServerReflectionRequest - 2, // 8: grpc.reflection.v1.ServerReflection.ServerReflectionInfo:output_type -> grpc.reflection.v1.ServerReflectionResponse - 8, // [8:9] is the sub-list for method output_type - 7, // [7:8] is the sub-list for method input_type - 7, // [7:7] is the sub-list for extension type_name - 7, // [7:7] is the sub-list for extension extendee - 0, // [0:7] is the sub-list for field type_name -} - -func init() { file_grpc_reflection_v1_reflection_proto_init() } -func file_grpc_reflection_v1_reflection_proto_init() { - if File_grpc_reflection_v1_reflection_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_grpc_reflection_v1_reflection_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServerReflectionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1_reflection_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExtensionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1_reflection_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServerReflectionResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1_reflection_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FileDescriptorResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1_reflection_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExtensionNumberResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1_reflection_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListServiceResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1_reflection_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServiceResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1_reflection_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ErrorResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_grpc_reflection_v1_reflection_proto_msgTypes[0].OneofWrappers = []interface{}{ - (*ServerReflectionRequest_FileByFilename)(nil), - (*ServerReflectionRequest_FileContainingSymbol)(nil), - (*ServerReflectionRequest_FileContainingExtension)(nil), - (*ServerReflectionRequest_AllExtensionNumbersOfType)(nil), - (*ServerReflectionRequest_ListServices)(nil), - } - file_grpc_reflection_v1_reflection_proto_msgTypes[2].OneofWrappers = []interface{}{ - (*ServerReflectionResponse_FileDescriptorResponse)(nil), - (*ServerReflectionResponse_AllExtensionNumbersResponse)(nil), - (*ServerReflectionResponse_ListServicesResponse)(nil), - (*ServerReflectionResponse_ErrorResponse)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_grpc_reflection_v1_reflection_proto_rawDesc, - NumEnums: 0, - NumMessages: 8, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_grpc_reflection_v1_reflection_proto_goTypes, - DependencyIndexes: file_grpc_reflection_v1_reflection_proto_depIdxs, - MessageInfos: file_grpc_reflection_v1_reflection_proto_msgTypes, - }.Build() - File_grpc_reflection_v1_reflection_proto = out.File - file_grpc_reflection_v1_reflection_proto_rawDesc = nil - file_grpc_reflection_v1_reflection_proto_goTypes = nil - file_grpc_reflection_v1_reflection_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go deleted file mode 100644 index 17d21fde22a..00000000000 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright 2016 The gRPC Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Service exported by server reflection. A more complete description of how -// server reflection works can be found at -// https://github.com/grpc/grpc/blob/master/doc/server-reflection.md -// -// The canonical version of this proto can be found at -// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto - -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.4.0 -// - protoc v4.25.2 -// source: grpc/reflection/v1/reflection.proto - -package grpc_reflection_v1 - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.62.0 or later. -const _ = grpc.SupportPackageIsVersion8 - -const ( - ServerReflection_ServerReflectionInfo_FullMethodName = "/grpc.reflection.v1.ServerReflection/ServerReflectionInfo" -) - -// ServerReflectionClient is the client API for ServerReflection service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type ServerReflectionClient interface { - // The reflection service is structured as a bidirectional stream, ensuring - // all related requests go to a single server. - ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) -} - -type serverReflectionClient struct { - cc grpc.ClientConnInterface -} - -func NewServerReflectionClient(cc grpc.ClientConnInterface) ServerReflectionClient { - return &serverReflectionClient{cc} -} - -func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &ServerReflection_ServiceDesc.Streams[0], ServerReflection_ServerReflectionInfo_FullMethodName, cOpts...) - if err != nil { - return nil, err - } - x := &serverReflectionServerReflectionInfoClient{ClientStream: stream} - return x, nil -} - -type ServerReflection_ServerReflectionInfoClient interface { - Send(*ServerReflectionRequest) error - Recv() (*ServerReflectionResponse, error) - grpc.ClientStream -} - -type serverReflectionServerReflectionInfoClient struct { - grpc.ClientStream -} - -func (x *serverReflectionServerReflectionInfoClient) Send(m *ServerReflectionRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *serverReflectionServerReflectionInfoClient) Recv() (*ServerReflectionResponse, error) { - m := new(ServerReflectionResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// ServerReflectionServer is the server API for ServerReflection service. -// All implementations should embed UnimplementedServerReflectionServer -// for forward compatibility -type ServerReflectionServer interface { - // The reflection service is structured as a bidirectional stream, ensuring - // all related requests go to a single server. - ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error -} - -// UnimplementedServerReflectionServer should be embedded to have forward compatible implementations. -type UnimplementedServerReflectionServer struct { -} - -func (UnimplementedServerReflectionServer) ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error { - return status.Errorf(codes.Unimplemented, "method ServerReflectionInfo not implemented") -} - -// UnsafeServerReflectionServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to ServerReflectionServer will -// result in compilation errors. -type UnsafeServerReflectionServer interface { - mustEmbedUnimplementedServerReflectionServer() -} - -func RegisterServerReflectionServer(s grpc.ServiceRegistrar, srv ServerReflectionServer) { - s.RegisterService(&ServerReflection_ServiceDesc, srv) -} - -func _ServerReflection_ServerReflectionInfo_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(ServerReflectionServer).ServerReflectionInfo(&serverReflectionServerReflectionInfoServer{ServerStream: stream}) -} - -type ServerReflection_ServerReflectionInfoServer interface { - Send(*ServerReflectionResponse) error - Recv() (*ServerReflectionRequest, error) - grpc.ServerStream -} - -type serverReflectionServerReflectionInfoServer struct { - grpc.ServerStream -} - -func (x *serverReflectionServerReflectionInfoServer) Send(m *ServerReflectionResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *serverReflectionServerReflectionInfoServer) Recv() (*ServerReflectionRequest, error) { - m := new(ServerReflectionRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// ServerReflection_ServiceDesc is the grpc.ServiceDesc for ServerReflection service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var ServerReflection_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "grpc.reflection.v1.ServerReflection", - HandlerType: (*ServerReflectionServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "ServerReflectionInfo", - Handler: _ServerReflection_ServerReflectionInfo_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "grpc/reflection/v1/reflection.proto", -} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go deleted file mode 100644 index cd032acefca..00000000000 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go +++ /dev/null @@ -1,1028 +0,0 @@ -// Copyright 2016 The gRPC Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// Service exported by server reflection - -// Warning: this entire file is deprecated. Use this instead: -// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.34.1 -// protoc v4.25.2 -// grpc/reflection/v1alpha/reflection.proto is a deprecated file. - -package grpc_reflection_v1alpha - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// The message sent by the client when calling ServerReflectionInfo method. -// -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -type ServerReflectionRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` - // To use reflection service, the client should set one of the following - // fields in message_request. The server distinguishes requests by their - // defined field and then handles them using corresponding methods. - // - // Types that are assignable to MessageRequest: - // - // *ServerReflectionRequest_FileByFilename - // *ServerReflectionRequest_FileContainingSymbol - // *ServerReflectionRequest_FileContainingExtension - // *ServerReflectionRequest_AllExtensionNumbersOfType - // *ServerReflectionRequest_ListServices - MessageRequest isServerReflectionRequest_MessageRequest `protobuf_oneof:"message_request"` -} - -func (x *ServerReflectionRequest) Reset() { - *x = ServerReflectionRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ServerReflectionRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ServerReflectionRequest) ProtoMessage() {} - -func (x *ServerReflectionRequest) ProtoReflect() protoreflect.Message { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ServerReflectionRequest.ProtoReflect.Descriptor instead. -func (*ServerReflectionRequest) Descriptor() ([]byte, []int) { - return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{0} -} - -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -func (x *ServerReflectionRequest) GetHost() string { - if x != nil { - return x.Host - } - return "" -} - -func (m *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_MessageRequest { - if m != nil { - return m.MessageRequest - } - return nil -} - -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -func (x *ServerReflectionRequest) GetFileByFilename() string { - if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileByFilename); ok { - return x.FileByFilename - } - return "" -} - -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -func (x *ServerReflectionRequest) GetFileContainingSymbol() string { - if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingSymbol); ok { - return x.FileContainingSymbol - } - return "" -} - -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -func (x *ServerReflectionRequest) GetFileContainingExtension() *ExtensionRequest { - if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingExtension); ok { - return x.FileContainingExtension - } - return nil -} - -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -func (x *ServerReflectionRequest) GetAllExtensionNumbersOfType() string { - if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_AllExtensionNumbersOfType); ok { - return x.AllExtensionNumbersOfType - } - return "" -} - -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -func (x *ServerReflectionRequest) GetListServices() string { - if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_ListServices); ok { - return x.ListServices - } - return "" -} - -type isServerReflectionRequest_MessageRequest interface { - isServerReflectionRequest_MessageRequest() -} - -type ServerReflectionRequest_FileByFilename struct { - // Find a proto file by the file name. - // - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - FileByFilename string `protobuf:"bytes,3,opt,name=file_by_filename,json=fileByFilename,proto3,oneof"` -} - -type ServerReflectionRequest_FileContainingSymbol struct { - // Find the proto file that declares the given fully-qualified symbol name. - // This field should be a fully-qualified symbol name - // (e.g. <package>.<service>[.<method>] or <package>.<type>). - // - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - FileContainingSymbol string `protobuf:"bytes,4,opt,name=file_containing_symbol,json=fileContainingSymbol,proto3,oneof"` -} - -type ServerReflectionRequest_FileContainingExtension struct { - // Find the proto file which defines an extension extending the given - // message type with the given field number. - // - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - FileContainingExtension *ExtensionRequest `protobuf:"bytes,5,opt,name=file_containing_extension,json=fileContainingExtension,proto3,oneof"` -} - -type ServerReflectionRequest_AllExtensionNumbersOfType struct { - // Finds the tag numbers used by all known extensions of extendee_type, and - // appends them to ExtensionNumberResponse in an undefined order. - // Its corresponding method is best-effort: it's not guaranteed that the - // reflection service will implement this method, and it's not guaranteed - // that this method will provide all extensions. Returns - // StatusCode::UNIMPLEMENTED if it's not implemented. - // This field should be a fully-qualified type name. The format is - // <package>.<type> - // - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - AllExtensionNumbersOfType string `protobuf:"bytes,6,opt,name=all_extension_numbers_of_type,json=allExtensionNumbersOfType,proto3,oneof"` -} - -type ServerReflectionRequest_ListServices struct { - // List the full names of registered services. The content will not be - // checked. - // - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - ListServices string `protobuf:"bytes,7,opt,name=list_services,json=listServices,proto3,oneof"` -} - -func (*ServerReflectionRequest_FileByFilename) isServerReflectionRequest_MessageRequest() {} - -func (*ServerReflectionRequest_FileContainingSymbol) isServerReflectionRequest_MessageRequest() {} - -func (*ServerReflectionRequest_FileContainingExtension) isServerReflectionRequest_MessageRequest() {} - -func (*ServerReflectionRequest_AllExtensionNumbersOfType) isServerReflectionRequest_MessageRequest() { -} - -func (*ServerReflectionRequest_ListServices) isServerReflectionRequest_MessageRequest() {} - -// The type name and extension number sent by the client when requesting -// file_containing_extension. -// -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -type ExtensionRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Fully-qualified type name. The format should be <package>.<type> - // - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - ContainingType string `protobuf:"bytes,1,opt,name=containing_type,json=containingType,proto3" json:"containing_type,omitempty"` - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - ExtensionNumber int32 `protobuf:"varint,2,opt,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` -} - -func (x *ExtensionRequest) Reset() { - *x = ExtensionRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ExtensionRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ExtensionRequest) ProtoMessage() {} - -func (x *ExtensionRequest) ProtoReflect() protoreflect.Message { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ExtensionRequest.ProtoReflect.Descriptor instead. -func (*ExtensionRequest) Descriptor() ([]byte, []int) { - return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{1} -} - -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -func (x *ExtensionRequest) GetContainingType() string { - if x != nil { - return x.ContainingType - } - return "" -} - -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -func (x *ExtensionRequest) GetExtensionNumber() int32 { - if x != nil { - return x.ExtensionNumber - } - return 0 -} - -// The message sent by the server to answer ServerReflectionInfo method. -// -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -type ServerReflectionResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost,proto3" json:"valid_host,omitempty"` - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - OriginalRequest *ServerReflectionRequest `protobuf:"bytes,2,opt,name=original_request,json=originalRequest,proto3" json:"original_request,omitempty"` - // The server set one of the following fields according to the message_request - // in the request. - // - // Types that are assignable to MessageResponse: - // - // *ServerReflectionResponse_FileDescriptorResponse - // *ServerReflectionResponse_AllExtensionNumbersResponse - // *ServerReflectionResponse_ListServicesResponse - // *ServerReflectionResponse_ErrorResponse - MessageResponse isServerReflectionResponse_MessageResponse `protobuf_oneof:"message_response"` -} - -func (x *ServerReflectionResponse) Reset() { - *x = ServerReflectionResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ServerReflectionResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ServerReflectionResponse) ProtoMessage() {} - -func (x *ServerReflectionResponse) ProtoReflect() protoreflect.Message { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ServerReflectionResponse.ProtoReflect.Descriptor instead. -func (*ServerReflectionResponse) Descriptor() ([]byte, []int) { - return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{2} -} - -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -func (x *ServerReflectionResponse) GetValidHost() string { - if x != nil { - return x.ValidHost - } - return "" -} - -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -func (x *ServerReflectionResponse) GetOriginalRequest() *ServerReflectionRequest { - if x != nil { - return x.OriginalRequest - } - return nil -} - -func (m *ServerReflectionResponse) GetMessageResponse() isServerReflectionResponse_MessageResponse { - if m != nil { - return m.MessageResponse - } - return nil -} - -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -func (x *ServerReflectionResponse) GetFileDescriptorResponse() *FileDescriptorResponse { - if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_FileDescriptorResponse); ok { - return x.FileDescriptorResponse - } - return nil -} - -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -func (x *ServerReflectionResponse) GetAllExtensionNumbersResponse() *ExtensionNumberResponse { - if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_AllExtensionNumbersResponse); ok { - return x.AllExtensionNumbersResponse - } - return nil -} - -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -func (x *ServerReflectionResponse) GetListServicesResponse() *ListServiceResponse { - if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ListServicesResponse); ok { - return x.ListServicesResponse - } - return nil -} - -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -func (x *ServerReflectionResponse) GetErrorResponse() *ErrorResponse { - if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ErrorResponse); ok { - return x.ErrorResponse - } - return nil -} - -type isServerReflectionResponse_MessageResponse interface { - isServerReflectionResponse_MessageResponse() -} - -type ServerReflectionResponse_FileDescriptorResponse struct { - // This message is used to answer file_by_filename, file_containing_symbol, - // file_containing_extension requests with transitive dependencies. As - // the repeated label is not allowed in oneof fields, we use a - // FileDescriptorResponse message to encapsulate the repeated fields. - // The reflection service is allowed to avoid sending FileDescriptorProtos - // that were previously sent in response to earlier requests in the stream. - // - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - FileDescriptorResponse *FileDescriptorResponse `protobuf:"bytes,4,opt,name=file_descriptor_response,json=fileDescriptorResponse,proto3,oneof"` -} - -type ServerReflectionResponse_AllExtensionNumbersResponse struct { - // This message is used to answer all_extension_numbers_of_type requst. - // - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - AllExtensionNumbersResponse *ExtensionNumberResponse `protobuf:"bytes,5,opt,name=all_extension_numbers_response,json=allExtensionNumbersResponse,proto3,oneof"` -} - -type ServerReflectionResponse_ListServicesResponse struct { - // This message is used to answer list_services request. - // - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - ListServicesResponse *ListServiceResponse `protobuf:"bytes,6,opt,name=list_services_response,json=listServicesResponse,proto3,oneof"` -} - -type ServerReflectionResponse_ErrorResponse struct { - // This message is used when an error occurs. - // - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - ErrorResponse *ErrorResponse `protobuf:"bytes,7,opt,name=error_response,json=errorResponse,proto3,oneof"` -} - -func (*ServerReflectionResponse_FileDescriptorResponse) isServerReflectionResponse_MessageResponse() { -} - -func (*ServerReflectionResponse_AllExtensionNumbersResponse) isServerReflectionResponse_MessageResponse() { -} - -func (*ServerReflectionResponse_ListServicesResponse) isServerReflectionResponse_MessageResponse() {} - -func (*ServerReflectionResponse_ErrorResponse) isServerReflectionResponse_MessageResponse() {} - -// Serialized FileDescriptorProto messages sent by the server answering -// a file_by_filename, file_containing_symbol, or file_containing_extension -// request. -// -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -type FileDescriptorResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Serialized FileDescriptorProto messages. We avoid taking a dependency on - // descriptor.proto, which uses proto2 only features, by making them opaque - // bytes instead. - // - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - FileDescriptorProto [][]byte `protobuf:"bytes,1,rep,name=file_descriptor_proto,json=fileDescriptorProto,proto3" json:"file_descriptor_proto,omitempty"` -} - -func (x *FileDescriptorResponse) Reset() { - *x = FileDescriptorResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *FileDescriptorResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FileDescriptorResponse) ProtoMessage() {} - -func (x *FileDescriptorResponse) ProtoReflect() protoreflect.Message { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FileDescriptorResponse.ProtoReflect.Descriptor instead. -func (*FileDescriptorResponse) Descriptor() ([]byte, []int) { - return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{3} -} - -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -func (x *FileDescriptorResponse) GetFileDescriptorProto() [][]byte { - if x != nil { - return x.FileDescriptorProto - } - return nil -} - -// A list of extension numbers sent by the server answering -// all_extension_numbers_of_type request. -// -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -type ExtensionNumberResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Full name of the base type, including the package name. The format - // is <package>.<type> - // - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - BaseTypeName string `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName,proto3" json:"base_type_name,omitempty"` - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - ExtensionNumber []int32 `protobuf:"varint,2,rep,packed,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` -} - -func (x *ExtensionNumberResponse) Reset() { - *x = ExtensionNumberResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ExtensionNumberResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ExtensionNumberResponse) ProtoMessage() {} - -func (x *ExtensionNumberResponse) ProtoReflect() protoreflect.Message { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ExtensionNumberResponse.ProtoReflect.Descriptor instead. -func (*ExtensionNumberResponse) Descriptor() ([]byte, []int) { - return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{4} -} - -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -func (x *ExtensionNumberResponse) GetBaseTypeName() string { - if x != nil { - return x.BaseTypeName - } - return "" -} - -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -func (x *ExtensionNumberResponse) GetExtensionNumber() []int32 { - if x != nil { - return x.ExtensionNumber - } - return nil -} - -// A list of ServiceResponse sent by the server answering list_services request. -// -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -type ListServiceResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The information of each service may be expanded in the future, so we use - // ServiceResponse message to encapsulate it. - // - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - Service []*ServiceResponse `protobuf:"bytes,1,rep,name=service,proto3" json:"service,omitempty"` -} - -func (x *ListServiceResponse) Reset() { - *x = ListServiceResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListServiceResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListServiceResponse) ProtoMessage() {} - -func (x *ListServiceResponse) ProtoReflect() protoreflect.Message { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListServiceResponse.ProtoReflect.Descriptor instead. -func (*ListServiceResponse) Descriptor() ([]byte, []int) { - return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{5} -} - -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -func (x *ListServiceResponse) GetService() []*ServiceResponse { - if x != nil { - return x.Service - } - return nil -} - -// The information of a single service used by ListServiceResponse to answer -// list_services request. -// -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -type ServiceResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Full name of a registered service, including its package name. The format - // is <package>.<service> - // - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *ServiceResponse) Reset() { - *x = ServiceResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ServiceResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ServiceResponse) ProtoMessage() {} - -func (x *ServiceResponse) ProtoReflect() protoreflect.Message { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ServiceResponse.ProtoReflect.Descriptor instead. -func (*ServiceResponse) Descriptor() ([]byte, []int) { - return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{6} -} - -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -func (x *ServiceResponse) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// The error code and error message sent by the server when an error occurs. -// -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -type ErrorResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // This field uses the error codes defined in grpc::StatusCode. - // - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` -} - -func (x *ErrorResponse) Reset() { - *x = ErrorResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ErrorResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ErrorResponse) ProtoMessage() {} - -func (x *ErrorResponse) ProtoReflect() protoreflect.Message { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ErrorResponse.ProtoReflect.Descriptor instead. -func (*ErrorResponse) Descriptor() ([]byte, []int) { - return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{7} -} - -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -func (x *ErrorResponse) GetErrorCode() int32 { - if x != nil { - return x.ErrorCode - } - return 0 -} - -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -func (x *ErrorResponse) GetErrorMessage() string { - if x != nil { - return x.ErrorMessage - } - return "" -} - -var File_grpc_reflection_v1alpha_reflection_proto protoreflect.FileDescriptor - -var file_grpc_reflection_v1alpha_reflection_proto_rawDesc = []byte{ - 0x0a, 0x28, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, - 0x70, 0x68, 0x61, 0x22, 0xf8, 0x02, 0x0a, 0x17, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, - 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, - 0x6f, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x10, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x62, 0x79, 0x5f, 0x66, - 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, - 0x0e, 0x66, 0x69, 0x6c, 0x65, 0x42, 0x79, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x36, 0x0a, 0x16, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, - 0x6e, 0x67, 0x5f, 0x73, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, - 0x00, 0x52, 0x14, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, - 0x67, 0x53, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x12, 0x67, 0x0a, 0x19, 0x66, 0x69, 0x6c, 0x65, 0x5f, - 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x72, 0x70, - 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x17, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x12, 0x42, 0x0a, 0x1d, 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x19, 0x61, 0x6c, 0x6c, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x4f, 0x66, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, 0x0a, 0x0d, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x6c, - 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, 0x11, 0x0a, 0x0f, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x66, - 0x0a, 0x10, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0xc7, 0x04, 0x0a, 0x18, 0x53, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x6f, 0x73, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x6f, - 0x73, 0x74, 0x12, 0x5b, 0x0a, 0x10, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, - 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, - 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0f, - 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x6b, 0x0a, 0x18, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x46, 0x69, 0x6c, 0x65, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x48, 0x00, 0x52, 0x16, 0x66, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x77, 0x0a, 0x1e, - 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, - 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x1b, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x64, 0x0a, 0x16, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, - 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, - 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x14, 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4f, 0x0a, 0x0e, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, 0x72, - 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x12, 0x0a, 0x10, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x4c, 0x0a, 0x16, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x66, 0x69, - 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x13, 0x66, 0x69, 0x6c, 0x65, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, - 0x0a, 0x17, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, - 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x62, 0x61, 0x73, - 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x54, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, - 0x29, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, - 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x59, 0x0a, 0x13, 0x4c, 0x69, - 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x42, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x07, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x25, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x53, 0x0a, 0x0d, - 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, - 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, 0x0d, - 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x32, 0x93, 0x01, 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7f, 0x0a, 0x14, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x30, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, - 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, 0x42, 0x73, 0x0a, 0x1a, 0x69, 0x6f, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, - 0x61, 0x6c, 0x70, 0x68, 0x61, 0x42, 0x15, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, - 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x39, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, - 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0xb8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_grpc_reflection_v1alpha_reflection_proto_rawDescOnce sync.Once - file_grpc_reflection_v1alpha_reflection_proto_rawDescData = file_grpc_reflection_v1alpha_reflection_proto_rawDesc -) - -func file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP() []byte { - file_grpc_reflection_v1alpha_reflection_proto_rawDescOnce.Do(func() { - file_grpc_reflection_v1alpha_reflection_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_reflection_v1alpha_reflection_proto_rawDescData) - }) - return file_grpc_reflection_v1alpha_reflection_proto_rawDescData -} - -var file_grpc_reflection_v1alpha_reflection_proto_msgTypes = make([]protoimpl.MessageInfo, 8) -var file_grpc_reflection_v1alpha_reflection_proto_goTypes = []interface{}{ - (*ServerReflectionRequest)(nil), // 0: grpc.reflection.v1alpha.ServerReflectionRequest - (*ExtensionRequest)(nil), // 1: grpc.reflection.v1alpha.ExtensionRequest - (*ServerReflectionResponse)(nil), // 2: grpc.reflection.v1alpha.ServerReflectionResponse - (*FileDescriptorResponse)(nil), // 3: grpc.reflection.v1alpha.FileDescriptorResponse - (*ExtensionNumberResponse)(nil), // 4: grpc.reflection.v1alpha.ExtensionNumberResponse - (*ListServiceResponse)(nil), // 5: grpc.reflection.v1alpha.ListServiceResponse - (*ServiceResponse)(nil), // 6: grpc.reflection.v1alpha.ServiceResponse - (*ErrorResponse)(nil), // 7: grpc.reflection.v1alpha.ErrorResponse -} -var file_grpc_reflection_v1alpha_reflection_proto_depIdxs = []int32{ - 1, // 0: grpc.reflection.v1alpha.ServerReflectionRequest.file_containing_extension:type_name -> grpc.reflection.v1alpha.ExtensionRequest - 0, // 1: grpc.reflection.v1alpha.ServerReflectionResponse.original_request:type_name -> grpc.reflection.v1alpha.ServerReflectionRequest - 3, // 2: grpc.reflection.v1alpha.ServerReflectionResponse.file_descriptor_response:type_name -> grpc.reflection.v1alpha.FileDescriptorResponse - 4, // 3: grpc.reflection.v1alpha.ServerReflectionResponse.all_extension_numbers_response:type_name -> grpc.reflection.v1alpha.ExtensionNumberResponse - 5, // 4: grpc.reflection.v1alpha.ServerReflectionResponse.list_services_response:type_name -> grpc.reflection.v1alpha.ListServiceResponse - 7, // 5: grpc.reflection.v1alpha.ServerReflectionResponse.error_response:type_name -> grpc.reflection.v1alpha.ErrorResponse - 6, // 6: grpc.reflection.v1alpha.ListServiceResponse.service:type_name -> grpc.reflection.v1alpha.ServiceResponse - 0, // 7: grpc.reflection.v1alpha.ServerReflection.ServerReflectionInfo:input_type -> grpc.reflection.v1alpha.ServerReflectionRequest - 2, // 8: grpc.reflection.v1alpha.ServerReflection.ServerReflectionInfo:output_type -> grpc.reflection.v1alpha.ServerReflectionResponse - 8, // [8:9] is the sub-list for method output_type - 7, // [7:8] is the sub-list for method input_type - 7, // [7:7] is the sub-list for extension type_name - 7, // [7:7] is the sub-list for extension extendee - 0, // [0:7] is the sub-list for field type_name -} - -func init() { file_grpc_reflection_v1alpha_reflection_proto_init() } -func file_grpc_reflection_v1alpha_reflection_proto_init() { - if File_grpc_reflection_v1alpha_reflection_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServerReflectionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExtensionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServerReflectionResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FileDescriptorResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExtensionNumberResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListServiceResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServiceResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ErrorResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].OneofWrappers = []interface{}{ - (*ServerReflectionRequest_FileByFilename)(nil), - (*ServerReflectionRequest_FileContainingSymbol)(nil), - (*ServerReflectionRequest_FileContainingExtension)(nil), - (*ServerReflectionRequest_AllExtensionNumbersOfType)(nil), - (*ServerReflectionRequest_ListServices)(nil), - } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].OneofWrappers = []interface{}{ - (*ServerReflectionResponse_FileDescriptorResponse)(nil), - (*ServerReflectionResponse_AllExtensionNumbersResponse)(nil), - (*ServerReflectionResponse_ListServicesResponse)(nil), - (*ServerReflectionResponse_ErrorResponse)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_grpc_reflection_v1alpha_reflection_proto_rawDesc, - NumEnums: 0, - NumMessages: 8, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_grpc_reflection_v1alpha_reflection_proto_goTypes, - DependencyIndexes: file_grpc_reflection_v1alpha_reflection_proto_depIdxs, - MessageInfos: file_grpc_reflection_v1alpha_reflection_proto_msgTypes, - }.Build() - File_grpc_reflection_v1alpha_reflection_proto = out.File - file_grpc_reflection_v1alpha_reflection_proto_rawDesc = nil - file_grpc_reflection_v1alpha_reflection_proto_goTypes = nil - file_grpc_reflection_v1alpha_reflection_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go deleted file mode 100644 index 93886e38216..00000000000 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2016 The gRPC Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// Service exported by server reflection - -// Warning: this entire file is deprecated. Use this instead: -// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto - -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.4.0 -// - protoc v4.25.2 -// grpc/reflection/v1alpha/reflection.proto is a deprecated file. - -package grpc_reflection_v1alpha - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.62.0 or later. -const _ = grpc.SupportPackageIsVersion8 - -const ( - ServerReflection_ServerReflectionInfo_FullMethodName = "/grpc.reflection.v1alpha.ServerReflection/ServerReflectionInfo" -) - -// ServerReflectionClient is the client API for ServerReflection service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type ServerReflectionClient interface { - // The reflection service is structured as a bidirectional stream, ensuring - // all related requests go to a single server. - ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) -} - -type serverReflectionClient struct { - cc grpc.ClientConnInterface -} - -func NewServerReflectionClient(cc grpc.ClientConnInterface) ServerReflectionClient { - return &serverReflectionClient{cc} -} - -func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &ServerReflection_ServiceDesc.Streams[0], ServerReflection_ServerReflectionInfo_FullMethodName, cOpts...) - if err != nil { - return nil, err - } - x := &serverReflectionServerReflectionInfoClient{ClientStream: stream} - return x, nil -} - -type ServerReflection_ServerReflectionInfoClient interface { - Send(*ServerReflectionRequest) error - Recv() (*ServerReflectionResponse, error) - grpc.ClientStream -} - -type serverReflectionServerReflectionInfoClient struct { - grpc.ClientStream -} - -func (x *serverReflectionServerReflectionInfoClient) Send(m *ServerReflectionRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *serverReflectionServerReflectionInfoClient) Recv() (*ServerReflectionResponse, error) { - m := new(ServerReflectionResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// ServerReflectionServer is the server API for ServerReflection service. -// All implementations should embed UnimplementedServerReflectionServer -// for forward compatibility -type ServerReflectionServer interface { - // The reflection service is structured as a bidirectional stream, ensuring - // all related requests go to a single server. - ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error -} - -// UnimplementedServerReflectionServer should be embedded to have forward compatible implementations. -type UnimplementedServerReflectionServer struct { -} - -func (UnimplementedServerReflectionServer) ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error { - return status.Errorf(codes.Unimplemented, "method ServerReflectionInfo not implemented") -} - -// UnsafeServerReflectionServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to ServerReflectionServer will -// result in compilation errors. -type UnsafeServerReflectionServer interface { - mustEmbedUnimplementedServerReflectionServer() -} - -func RegisterServerReflectionServer(s grpc.ServiceRegistrar, srv ServerReflectionServer) { - s.RegisterService(&ServerReflection_ServiceDesc, srv) -} - -func _ServerReflection_ServerReflectionInfo_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(ServerReflectionServer).ServerReflectionInfo(&serverReflectionServerReflectionInfoServer{ServerStream: stream}) -} - -type ServerReflection_ServerReflectionInfoServer interface { - Send(*ServerReflectionResponse) error - Recv() (*ServerReflectionRequest, error) - grpc.ServerStream -} - -type serverReflectionServerReflectionInfoServer struct { - grpc.ServerStream -} - -func (x *serverReflectionServerReflectionInfoServer) Send(m *ServerReflectionResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *serverReflectionServerReflectionInfoServer) Recv() (*ServerReflectionRequest, error) { - m := new(ServerReflectionRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// ServerReflection_ServiceDesc is the grpc.ServiceDesc for ServerReflection service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var ServerReflection_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "grpc.reflection.v1alpha.ServerReflection", - HandlerType: (*ServerReflectionServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "ServerReflectionInfo", - Handler: _ServerReflection_ServerReflectionInfo_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "grpc/reflection/v1alpha/reflection.proto", -} diff --git a/vendor/google.golang.org/grpc/reflection/internal/internal.go b/vendor/google.golang.org/grpc/reflection/internal/internal.go deleted file mode 100644 index 36ee6507507..00000000000 --- a/vendor/google.golang.org/grpc/reflection/internal/internal.go +++ /dev/null @@ -1,436 +0,0 @@ -/* - * - * Copyright 2024 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package internal contains code that is shared by both reflection package and -// the test package. The packages are split in this way inorder to avoid -// depenedency to deprecated package github.com/golang/protobuf. -package internal - -import ( - "io" - "sort" - - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protodesc" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" - - v1reflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1" - v1reflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1" - v1alphareflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" - v1alphareflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" -) - -// ServiceInfoProvider is an interface used to retrieve metadata about the -// services to expose. -type ServiceInfoProvider interface { - GetServiceInfo() map[string]grpc.ServiceInfo -} - -// ExtensionResolver is the interface used to query details about extensions. -// This interface is satisfied by protoregistry.GlobalTypes. -type ExtensionResolver interface { - protoregistry.ExtensionTypeResolver - RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool) -} - -// ServerReflectionServer is the server API for ServerReflection service. -type ServerReflectionServer struct { - v1alphareflectiongrpc.UnimplementedServerReflectionServer - S ServiceInfoProvider - DescResolver protodesc.Resolver - ExtResolver ExtensionResolver -} - -// FileDescWithDependencies returns a slice of serialized fileDescriptors in -// wire format ([]byte). The fileDescriptors will include fd and all the -// transitive dependencies of fd with names not in sentFileDescriptors. -func (s *ServerReflectionServer) FileDescWithDependencies(fd protoreflect.FileDescriptor, sentFileDescriptors map[string]bool) ([][]byte, error) { - if fd.IsPlaceholder() { - // If the given root file is a placeholder, treat it - // as missing instead of serializing it. - return nil, protoregistry.NotFound - } - var r [][]byte - queue := []protoreflect.FileDescriptor{fd} - for len(queue) > 0 { - currentfd := queue[0] - queue = queue[1:] - if currentfd.IsPlaceholder() { - // Skip any missing files in the dependency graph. - continue - } - if sent := sentFileDescriptors[currentfd.Path()]; len(r) == 0 || !sent { - sentFileDescriptors[currentfd.Path()] = true - fdProto := protodesc.ToFileDescriptorProto(currentfd) - currentfdEncoded, err := proto.Marshal(fdProto) - if err != nil { - return nil, err - } - r = append(r, currentfdEncoded) - } - for i := 0; i < currentfd.Imports().Len(); i++ { - queue = append(queue, currentfd.Imports().Get(i)) - } - } - return r, nil -} - -// FileDescEncodingContainingSymbol finds the file descriptor containing the -// given symbol, finds all of its previously unsent transitive dependencies, -// does marshalling on them, and returns the marshalled result. The given symbol -// can be a type, a service or a method. -func (s *ServerReflectionServer) FileDescEncodingContainingSymbol(name string, sentFileDescriptors map[string]bool) ([][]byte, error) { - d, err := s.DescResolver.FindDescriptorByName(protoreflect.FullName(name)) - if err != nil { - return nil, err - } - return s.FileDescWithDependencies(d.ParentFile(), sentFileDescriptors) -} - -// FileDescEncodingContainingExtension finds the file descriptor containing -// given extension, finds all of its previously unsent transitive dependencies, -// does marshalling on them, and returns the marshalled result. -func (s *ServerReflectionServer) FileDescEncodingContainingExtension(typeName string, extNum int32, sentFileDescriptors map[string]bool) ([][]byte, error) { - xt, err := s.ExtResolver.FindExtensionByNumber(protoreflect.FullName(typeName), protoreflect.FieldNumber(extNum)) - if err != nil { - return nil, err - } - return s.FileDescWithDependencies(xt.TypeDescriptor().ParentFile(), sentFileDescriptors) -} - -// AllExtensionNumbersForTypeName returns all extension numbers for the given type. -func (s *ServerReflectionServer) AllExtensionNumbersForTypeName(name string) ([]int32, error) { - var numbers []int32 - s.ExtResolver.RangeExtensionsByMessage(protoreflect.FullName(name), func(xt protoreflect.ExtensionType) bool { - numbers = append(numbers, int32(xt.TypeDescriptor().Number())) - return true - }) - sort.Slice(numbers, func(i, j int) bool { - return numbers[i] < numbers[j] - }) - if len(numbers) == 0 { - // maybe return an error if given type name is not known - if _, err := s.DescResolver.FindDescriptorByName(protoreflect.FullName(name)); err != nil { - return nil, err - } - } - return numbers, nil -} - -// ListServices returns the names of services this server exposes. -func (s *ServerReflectionServer) ListServices() []*v1reflectionpb.ServiceResponse { - serviceInfo := s.S.GetServiceInfo() - resp := make([]*v1reflectionpb.ServiceResponse, 0, len(serviceInfo)) - for svc := range serviceInfo { - resp = append(resp, &v1reflectionpb.ServiceResponse{Name: svc}) - } - sort.Slice(resp, func(i, j int) bool { - return resp[i].Name < resp[j].Name - }) - return resp -} - -// ServerReflectionInfo is the reflection service handler. -func (s *ServerReflectionServer) ServerReflectionInfo(stream v1reflectiongrpc.ServerReflection_ServerReflectionInfoServer) error { - sentFileDescriptors := make(map[string]bool) - for { - in, err := stream.Recv() - if err == io.EOF { - return nil - } - if err != nil { - return err - } - - out := &v1reflectionpb.ServerReflectionResponse{ - ValidHost: in.Host, - OriginalRequest: in, - } - switch req := in.MessageRequest.(type) { - case *v1reflectionpb.ServerReflectionRequest_FileByFilename: - var b [][]byte - fd, err := s.DescResolver.FindFileByPath(req.FileByFilename) - if err == nil { - b, err = s.FileDescWithDependencies(fd, sentFileDescriptors) - } - if err != nil { - out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &v1reflectionpb.ErrorResponse{ - ErrorCode: int32(codes.NotFound), - ErrorMessage: err.Error(), - }, - } - } else { - out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b}, - } - } - case *v1reflectionpb.ServerReflectionRequest_FileContainingSymbol: - b, err := s.FileDescEncodingContainingSymbol(req.FileContainingSymbol, sentFileDescriptors) - if err != nil { - out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &v1reflectionpb.ErrorResponse{ - ErrorCode: int32(codes.NotFound), - ErrorMessage: err.Error(), - }, - } - } else { - out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b}, - } - } - case *v1reflectionpb.ServerReflectionRequest_FileContainingExtension: - typeName := req.FileContainingExtension.ContainingType - extNum := req.FileContainingExtension.ExtensionNumber - b, err := s.FileDescEncodingContainingExtension(typeName, extNum, sentFileDescriptors) - if err != nil { - out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &v1reflectionpb.ErrorResponse{ - ErrorCode: int32(codes.NotFound), - ErrorMessage: err.Error(), - }, - } - } else { - out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b}, - } - } - case *v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType: - extNums, err := s.AllExtensionNumbersForTypeName(req.AllExtensionNumbersOfType) - if err != nil { - out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &v1reflectionpb.ErrorResponse{ - ErrorCode: int32(codes.NotFound), - ErrorMessage: err.Error(), - }, - } - } else { - out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse{ - AllExtensionNumbersResponse: &v1reflectionpb.ExtensionNumberResponse{ - BaseTypeName: req.AllExtensionNumbersOfType, - ExtensionNumber: extNums, - }, - } - } - case *v1reflectionpb.ServerReflectionRequest_ListServices: - out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ListServicesResponse{ - ListServicesResponse: &v1reflectionpb.ListServiceResponse{ - Service: s.ListServices(), - }, - } - default: - return status.Errorf(codes.InvalidArgument, "invalid MessageRequest: %v", in.MessageRequest) - } - - if err := stream.Send(out); err != nil { - return err - } - } -} - -// V1ToV1AlphaResponse converts a v1 ServerReflectionResponse to a v1alpha. -func V1ToV1AlphaResponse(v1 *v1reflectionpb.ServerReflectionResponse) *v1alphareflectionpb.ServerReflectionResponse { - var v1alpha v1alphareflectionpb.ServerReflectionResponse - v1alpha.ValidHost = v1.ValidHost - if v1.OriginalRequest != nil { - v1alpha.OriginalRequest = V1ToV1AlphaRequest(v1.OriginalRequest) - } - switch mr := v1.MessageResponse.(type) { - case *v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse: - if mr != nil { - v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &v1alphareflectionpb.FileDescriptorResponse{ - FileDescriptorProto: mr.FileDescriptorResponse.GetFileDescriptorProto(), - }, - } - } - case *v1reflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse: - if mr != nil { - v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse{ - AllExtensionNumbersResponse: &v1alphareflectionpb.ExtensionNumberResponse{ - BaseTypeName: mr.AllExtensionNumbersResponse.GetBaseTypeName(), - ExtensionNumber: mr.AllExtensionNumbersResponse.GetExtensionNumber(), - }, - } - } - case *v1reflectionpb.ServerReflectionResponse_ListServicesResponse: - if mr != nil { - svcs := make([]*v1alphareflectionpb.ServiceResponse, len(mr.ListServicesResponse.GetService())) - for i, svc := range mr.ListServicesResponse.GetService() { - svcs[i] = &v1alphareflectionpb.ServiceResponse{ - Name: svc.GetName(), - } - } - v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_ListServicesResponse{ - ListServicesResponse: &v1alphareflectionpb.ListServiceResponse{ - Service: svcs, - }, - } - } - case *v1reflectionpb.ServerReflectionResponse_ErrorResponse: - if mr != nil { - v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &v1alphareflectionpb.ErrorResponse{ - ErrorCode: mr.ErrorResponse.GetErrorCode(), - ErrorMessage: mr.ErrorResponse.GetErrorMessage(), - }, - } - } - default: - // no value set - } - return &v1alpha -} - -// V1AlphaToV1Request converts a v1alpha ServerReflectionRequest to a v1. -func V1AlphaToV1Request(v1alpha *v1alphareflectionpb.ServerReflectionRequest) *v1reflectionpb.ServerReflectionRequest { - var v1 v1reflectionpb.ServerReflectionRequest - v1.Host = v1alpha.Host - switch mr := v1alpha.MessageRequest.(type) { - case *v1alphareflectionpb.ServerReflectionRequest_FileByFilename: - v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileByFilename{ - FileByFilename: mr.FileByFilename, - } - case *v1alphareflectionpb.ServerReflectionRequest_FileContainingSymbol: - v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileContainingSymbol{ - FileContainingSymbol: mr.FileContainingSymbol, - } - case *v1alphareflectionpb.ServerReflectionRequest_FileContainingExtension: - if mr.FileContainingExtension != nil { - v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileContainingExtension{ - FileContainingExtension: &v1reflectionpb.ExtensionRequest{ - ContainingType: mr.FileContainingExtension.GetContainingType(), - ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(), - }, - } - } - case *v1alphareflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType: - v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType{ - AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType, - } - case *v1alphareflectionpb.ServerReflectionRequest_ListServices: - v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_ListServices{ - ListServices: mr.ListServices, - } - default: - // no value set - } - return &v1 -} - -// V1ToV1AlphaRequest converts a v1 ServerReflectionRequest to a v1alpha. -func V1ToV1AlphaRequest(v1 *v1reflectionpb.ServerReflectionRequest) *v1alphareflectionpb.ServerReflectionRequest { - var v1alpha v1alphareflectionpb.ServerReflectionRequest - v1alpha.Host = v1.Host - switch mr := v1.MessageRequest.(type) { - case *v1reflectionpb.ServerReflectionRequest_FileByFilename: - if mr != nil { - v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileByFilename{ - FileByFilename: mr.FileByFilename, - } - } - case *v1reflectionpb.ServerReflectionRequest_FileContainingSymbol: - if mr != nil { - v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileContainingSymbol{ - FileContainingSymbol: mr.FileContainingSymbol, - } - } - case *v1reflectionpb.ServerReflectionRequest_FileContainingExtension: - if mr != nil { - v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileContainingExtension{ - FileContainingExtension: &v1alphareflectionpb.ExtensionRequest{ - ContainingType: mr.FileContainingExtension.GetContainingType(), - ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(), - }, - } - } - case *v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType: - if mr != nil { - v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType{ - AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType, - } - } - case *v1reflectionpb.ServerReflectionRequest_ListServices: - if mr != nil { - v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_ListServices{ - ListServices: mr.ListServices, - } - } - default: - // no value set - } - return &v1alpha -} - -// V1AlphaToV1Response converts a v1alpha ServerReflectionResponse to a v1. -func V1AlphaToV1Response(v1alpha *v1alphareflectionpb.ServerReflectionResponse) *v1reflectionpb.ServerReflectionResponse { - var v1 v1reflectionpb.ServerReflectionResponse - v1.ValidHost = v1alpha.ValidHost - if v1alpha.OriginalRequest != nil { - v1.OriginalRequest = V1AlphaToV1Request(v1alpha.OriginalRequest) - } - switch mr := v1alpha.MessageResponse.(type) { - case *v1alphareflectionpb.ServerReflectionResponse_FileDescriptorResponse: - if mr != nil { - v1.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{ - FileDescriptorProto: mr.FileDescriptorResponse.GetFileDescriptorProto(), - }, - } - } - case *v1alphareflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse: - if mr != nil { - v1.MessageResponse = &v1reflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse{ - AllExtensionNumbersResponse: &v1reflectionpb.ExtensionNumberResponse{ - BaseTypeName: mr.AllExtensionNumbersResponse.GetBaseTypeName(), - ExtensionNumber: mr.AllExtensionNumbersResponse.GetExtensionNumber(), - }, - } - } - case *v1alphareflectionpb.ServerReflectionResponse_ListServicesResponse: - if mr != nil { - svcs := make([]*v1reflectionpb.ServiceResponse, len(mr.ListServicesResponse.GetService())) - for i, svc := range mr.ListServicesResponse.GetService() { - svcs[i] = &v1reflectionpb.ServiceResponse{ - Name: svc.GetName(), - } - } - v1.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ListServicesResponse{ - ListServicesResponse: &v1reflectionpb.ListServiceResponse{ - Service: svcs, - }, - } - } - case *v1alphareflectionpb.ServerReflectionResponse_ErrorResponse: - if mr != nil { - v1.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &v1reflectionpb.ErrorResponse{ - ErrorCode: mr.ErrorResponse.GetErrorCode(), - ErrorMessage: mr.ErrorResponse.GetErrorMessage(), - }, - } - } - default: - // no value set - } - return &v1 -} diff --git a/vendor/google.golang.org/grpc/reflection/serverreflection.go b/vendor/google.golang.org/grpc/reflection/serverreflection.go deleted file mode 100644 index 13a94e2dd2e..00000000000 --- a/vendor/google.golang.org/grpc/reflection/serverreflection.go +++ /dev/null @@ -1,160 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/* -Package reflection implements server reflection service. - -The service implemented is defined in: -https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1alpha/reflection.proto. - -To register server reflection on a gRPC server: - - import "google.golang.org/grpc/reflection" - - s := grpc.NewServer() - pb.RegisterYourOwnServer(s, &server{}) - - // Register reflection service on gRPC server. - reflection.Register(s) - - s.Serve(lis) -*/ -package reflection // import "google.golang.org/grpc/reflection" - -import ( - "google.golang.org/grpc" - "google.golang.org/grpc/reflection/internal" - "google.golang.org/protobuf/reflect/protodesc" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" - - v1reflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1" - v1alphareflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" -) - -// GRPCServer is the interface provided by a gRPC server. It is implemented by -// *grpc.Server, but could also be implemented by other concrete types. It acts -// as a registry, for accumulating the services exposed by the server. -type GRPCServer interface { - grpc.ServiceRegistrar - ServiceInfoProvider -} - -var _ GRPCServer = (*grpc.Server)(nil) - -// Register registers the server reflection service on the given gRPC server. -// Both the v1 and v1alpha versions are registered. -func Register(s GRPCServer) { - svr := NewServerV1(ServerOptions{Services: s}) - v1alphareflectiongrpc.RegisterServerReflectionServer(s, asV1Alpha(svr)) - v1reflectiongrpc.RegisterServerReflectionServer(s, svr) -} - -// RegisterV1 registers only the v1 version of the server reflection service -// on the given gRPC server. Many clients may only support v1alpha so most -// users should use Register instead, at least until clients have upgraded. -func RegisterV1(s GRPCServer) { - svr := NewServerV1(ServerOptions{Services: s}) - v1reflectiongrpc.RegisterServerReflectionServer(s, svr) -} - -// ServiceInfoProvider is an interface used to retrieve metadata about the -// services to expose. -// -// The reflection service is only interested in the service names, but the -// signature is this way so that *grpc.Server implements it. So it is okay -// for a custom implementation to return zero values for the -// grpc.ServiceInfo values in the map. -// -// # Experimental -// -// Notice: This type is EXPERIMENTAL and may be changed or removed in a -// later release. -type ServiceInfoProvider interface { - GetServiceInfo() map[string]grpc.ServiceInfo -} - -// ExtensionResolver is the interface used to query details about extensions. -// This interface is satisfied by protoregistry.GlobalTypes. -// -// # Experimental -// -// Notice: This type is EXPERIMENTAL and may be changed or removed in a -// later release. -type ExtensionResolver interface { - protoregistry.ExtensionTypeResolver - RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool) -} - -// ServerOptions represents the options used to construct a reflection server. -// -// # Experimental -// -// Notice: This type is EXPERIMENTAL and may be changed or removed in a -// later release. -type ServerOptions struct { - // The source of advertised RPC services. If not specified, the reflection - // server will report an empty list when asked to list services. - // - // This value will typically be a *grpc.Server. But the set of advertised - // services can be customized by wrapping a *grpc.Server or using an - // alternate implementation that returns a custom set of service names. - Services ServiceInfoProvider - // Optional resolver used to load descriptors. If not specified, - // protoregistry.GlobalFiles will be used. - DescriptorResolver protodesc.Resolver - // Optional resolver used to query for known extensions. If not specified, - // protoregistry.GlobalTypes will be used. - ExtensionResolver ExtensionResolver -} - -// NewServer returns a reflection server implementation using the given options. -// This can be used to customize behavior of the reflection service. Most usages -// should prefer to use Register instead. For backwards compatibility reasons, -// this returns the v1alpha version of the reflection server. For a v1 version -// of the reflection server, see NewServerV1. -// -// # Experimental -// -// Notice: This function is EXPERIMENTAL and may be changed or removed in a -// later release. -func NewServer(opts ServerOptions) v1alphareflectiongrpc.ServerReflectionServer { - return asV1Alpha(NewServerV1(opts)) -} - -// NewServerV1 returns a reflection server implementation using the given options. -// This can be used to customize behavior of the reflection service. Most usages -// should prefer to use Register instead. -// -// # Experimental -// -// Notice: This function is EXPERIMENTAL and may be changed or removed in a -// later release. -func NewServerV1(opts ServerOptions) v1reflectiongrpc.ServerReflectionServer { - if opts.DescriptorResolver == nil { - opts.DescriptorResolver = protoregistry.GlobalFiles - } - if opts.ExtensionResolver == nil { - opts.ExtensionResolver = protoregistry.GlobalTypes - } - return &internal.ServerReflectionServer{ - S: opts.Services, - DescResolver: opts.DescriptorResolver, - ExtResolver: opts.ExtensionResolver, - } -} diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh deleted file mode 100644 index 3edca296c22..00000000000 --- a/vendor/google.golang.org/grpc/regenerate.sh +++ /dev/null @@ -1,123 +0,0 @@ -#!/bin/bash -# Copyright 2020 gRPC authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -eu -o pipefail - -WORKDIR=$(mktemp -d) - -function finish { - rm -rf "$WORKDIR" -} -trap finish EXIT - -export GOBIN=${WORKDIR}/bin -export PATH=${GOBIN}:${PATH} -mkdir -p ${GOBIN} - -echo "remove existing generated files" -# grpc_testing_not_regenerate/*.pb.go is not re-generated, -# see grpc_testing_not_regenerate/README.md for details. -rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testing_not_regenerate') - -echo "go install google.golang.org/protobuf/cmd/protoc-gen-go" -(cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go) - -echo "go install cmd/protoc-gen-go-grpc" -(cd cmd/protoc-gen-go-grpc && go install .) - -echo "git clone https://github.com/grpc/grpc-proto" -git clone --quiet https://github.com/grpc/grpc-proto ${WORKDIR}/grpc-proto - -echo "git clone https://github.com/protocolbuffers/protobuf" -git clone --quiet https://github.com/protocolbuffers/protobuf ${WORKDIR}/protobuf - -# Pull in code.proto as a proto dependency -mkdir -p ${WORKDIR}/googleapis/google/rpc -echo "curl https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto" -curl --silent https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto > ${WORKDIR}/googleapis/google/rpc/code.proto - -mkdir -p ${WORKDIR}/out - -# Generates sources without the embed requirement -LEGACY_SOURCES=( - ${WORKDIR}/grpc-proto/grpc/binlog/v1/binarylog.proto - ${WORKDIR}/grpc-proto/grpc/channelz/v1/channelz.proto - ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto - ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto - profiling/proto/service.proto - ${WORKDIR}/grpc-proto/grpc/reflection/v1alpha/reflection.proto - ${WORKDIR}/grpc-proto/grpc/reflection/v1/reflection.proto -) - -# Generates only the new gRPC Service symbols -SOURCES=( - $(git ls-files --exclude-standard --cached --others "*.proto" | grep -v '^profiling/proto/service.proto$') - ${WORKDIR}/grpc-proto/grpc/gcp/altscontext.proto - ${WORKDIR}/grpc-proto/grpc/gcp/handshaker.proto - ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto - ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto - ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto - ${WORKDIR}/grpc-proto/grpc/testing/*.proto - ${WORKDIR}/grpc-proto/grpc/core/*.proto -) - -# These options of the form 'Mfoo.proto=bar' instruct the codegen to use an -# import path of 'bar' in the generated code when 'foo.proto' is imported in -# one of the sources. -# -# Note that the protos listed here are all for testing purposes. All protos to -# be used externally should have a go_package option (and they don't need to be -# listed here). -OPTS=Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\ -Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/messages.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/worker_service.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/control.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/test.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/payloads.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/empty.proto=google.golang.org/grpc/interop/grpc_testing - -for src in ${SOURCES[@]}; do - echo "protoc ${src}" - protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},use_generic_streams_experimental=true:${WORKDIR}/out \ - -I"." \ - -I${WORKDIR}/grpc-proto \ - -I${WORKDIR}/googleapis \ - -I${WORKDIR}/protobuf/src \ - ${src} -done - -for src in ${LEGACY_SOURCES[@]}; do - echo "protoc ${src}" - protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},require_unimplemented_servers=false:${WORKDIR}/out \ - -I"." \ - -I${WORKDIR}/grpc-proto \ - -I${WORKDIR}/googleapis \ - -I${WORKDIR}/protobuf/src \ - ${src} -done - -# The go_package option in grpc/lookup/v1/rls.proto doesn't match the -# current location. Move it into the right place. -mkdir -p ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 -mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 - -# grpc_testing_not_regenerate/*.pb.go are not re-generated, -# see grpc_testing_not_regenerate/README.md for details. -rm ${WORKDIR}/out/google.golang.org/grpc/reflection/test/grpc_testing_not_regenerate/*.pb.go - -cp -R ${WORKDIR}/out/google.golang.org/grpc/* . diff --git a/vendor/google.golang.org/grpc/resolver/manual/manual.go b/vendor/google.golang.org/grpc/resolver/manual/manual.go index f2efa2a2cb5..09e864a89d3 100644 --- a/vendor/google.golang.org/grpc/resolver/manual/manual.go +++ b/vendor/google.golang.org/grpc/resolver/manual/manual.go @@ -76,9 +76,11 @@ func (r *Resolver) InitialState(s resolver.State) { // Build returns itself for Resolver, because it's both a builder and a resolver. func (r *Resolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { - r.BuildCallback(target, cc, opts) r.mu.Lock() defer r.mu.Unlock() + // Call BuildCallback after locking to avoid a race when UpdateState + // or ReportError is called before Build returns. + r.BuildCallback(target, cc, opts) r.CC = cc if r.lastSeenState != nil { err := r.CC.UpdateState(*r.lastSeenState) diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index 202854511b8..8eb1cf3bcfa 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -22,6 +22,7 @@ package resolver import ( "context" + "errors" "fmt" "net" "net/url" @@ -237,8 +238,8 @@ type ClientConn interface { // UpdateState can be omitted. UpdateState(State) error // ReportError notifies the ClientConn that the Resolver encountered an - // error. The ClientConn will notify the load balancer and begin calling - // ResolveNow on the Resolver with exponential backoff. + // error. The ClientConn then forwards this error to the load balancing + // policy. ReportError(error) // NewAddress is called by resolver to notify ClientConn a new list // of resolved addresses. @@ -330,3 +331,20 @@ type AuthorityOverrider interface { // typically in line, and must keep it unchanged. OverrideAuthority(Target) string } + +// ValidateEndpoints validates endpoints from a petiole policy's perspective. +// Petiole policies should call this before calling into their children. See +// [gRPC A61](https://github.com/grpc/proposal/blob/master/A61-IPv4-IPv6-dualstack-backends.md) +// for details. +func ValidateEndpoints(endpoints []Endpoint) error { + if len(endpoints) == 0 { + return errors.New("endpoints list is empty") + } + + for _, endpoint := range endpoints { + for range endpoint.Addresses { + return nil + } + } + return errors.New("endpoints list contains no addresses") +} diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go index c5fb45236fa..23bb3fb2582 100644 --- a/vendor/google.golang.org/grpc/resolver_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_wrapper.go @@ -66,7 +66,7 @@ func newCCResolverWrapper(cc *ClientConn) *ccResolverWrapper { // any newly created ccResolverWrapper, except that close may be called instead. func (ccr *ccResolverWrapper) start() error { errCh := make(chan error) - ccr.serializer.Schedule(func(ctx context.Context) { + ccr.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil { return } @@ -85,7 +85,7 @@ func (ccr *ccResolverWrapper) start() error { } func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { - ccr.serializer.Schedule(func(ctx context.Context) { + ccr.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil || ccr.resolver == nil { return } @@ -102,7 +102,7 @@ func (ccr *ccResolverWrapper) close() { ccr.closed = true ccr.mu.Unlock() - ccr.serializer.Schedule(func(context.Context) { + ccr.serializer.TrySchedule(func(context.Context) { if ccr.resolver == nil { return } @@ -177,6 +177,9 @@ func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.P // addChannelzTraceEvent adds a channelz trace event containing the new // state received from resolver implementations. func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { + if !logger.V(0) && !channelz.IsOn() { + return + } var updates []string var oldSC, newSC *ServiceConfig var oldOK, newOK bool diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index fdd49e6e915..9fac2b08b48 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -19,7 +19,6 @@ package grpc import ( - "bytes" "compress/gzip" "context" "encoding/binary" @@ -35,6 +34,7 @@ import ( "google.golang.org/grpc/encoding" "google.golang.org/grpc/encoding/proto" "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -220,8 +220,8 @@ type HeaderCallOption struct { HeaderAddr *metadata.MD } -func (o HeaderCallOption) before(c *callInfo) error { return nil } -func (o HeaderCallOption) after(c *callInfo, attempt *csAttempt) { +func (o HeaderCallOption) before(*callInfo) error { return nil } +func (o HeaderCallOption) after(_ *callInfo, attempt *csAttempt) { *o.HeaderAddr, _ = attempt.s.Header() } @@ -242,8 +242,8 @@ type TrailerCallOption struct { TrailerAddr *metadata.MD } -func (o TrailerCallOption) before(c *callInfo) error { return nil } -func (o TrailerCallOption) after(c *callInfo, attempt *csAttempt) { +func (o TrailerCallOption) before(*callInfo) error { return nil } +func (o TrailerCallOption) after(_ *callInfo, attempt *csAttempt) { *o.TrailerAddr = attempt.s.Trailer() } @@ -264,24 +264,20 @@ type PeerCallOption struct { PeerAddr *peer.Peer } -func (o PeerCallOption) before(c *callInfo) error { return nil } -func (o PeerCallOption) after(c *callInfo, attempt *csAttempt) { +func (o PeerCallOption) before(*callInfo) error { return nil } +func (o PeerCallOption) after(_ *callInfo, attempt *csAttempt) { if x, ok := peer.FromContext(attempt.s.Context()); ok { *o.PeerAddr = *x } } -// WaitForReady configures the action to take when an RPC is attempted on broken -// connections or unreachable servers. If waitForReady is false and the -// connection is in the TRANSIENT_FAILURE state, the RPC will fail -// immediately. Otherwise, the RPC client will block the call until a -// connection is available (or the call is canceled or times out) and will -// retry the call if it fails due to a transient error. gRPC will not retry if -// data was written to the wire unless the server indicates it did not process -// the data. Please refer to -// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md. +// WaitForReady configures the RPC's behavior when the client is in +// TRANSIENT_FAILURE, which occurs when all addresses fail to connect. If +// waitForReady is false, the RPC will fail immediately. Otherwise, the client +// will wait until a connection becomes available or the RPC's deadline is +// reached. // -// By default, RPCs don't "wait for ready". +// By default, RPCs do not "wait for ready". func WaitForReady(waitForReady bool) CallOption { return FailFastCallOption{FailFast: !waitForReady} } @@ -308,7 +304,7 @@ func (o FailFastCallOption) before(c *callInfo) error { c.failFast = o.FailFast return nil } -func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o FailFastCallOption) after(*callInfo, *csAttempt) {} // OnFinish returns a CallOption that configures a callback to be called when // the call completes. The error passed to the callback is the status of the @@ -343,7 +339,7 @@ func (o OnFinishCallOption) before(c *callInfo) error { return nil } -func (o OnFinishCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o OnFinishCallOption) after(*callInfo, *csAttempt) {} // MaxCallRecvMsgSize returns a CallOption which sets the maximum message size // in bytes the client can receive. If this is not set, gRPC uses the default @@ -367,7 +363,7 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { c.maxReceiveMessageSize = &o.MaxRecvMsgSize return nil } -func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o MaxRecvMsgSizeCallOption) after(*callInfo, *csAttempt) {} // MaxCallSendMsgSize returns a CallOption which sets the maximum message size // in bytes the client can send. If this is not set, gRPC uses the default @@ -391,7 +387,7 @@ func (o MaxSendMsgSizeCallOption) before(c *callInfo) error { c.maxSendMessageSize = &o.MaxSendMsgSize return nil } -func (o MaxSendMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o MaxSendMsgSizeCallOption) after(*callInfo, *csAttempt) {} // PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials // for a call. @@ -414,7 +410,7 @@ func (o PerRPCCredsCallOption) before(c *callInfo) error { c.creds = o.Creds return nil } -func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o PerRPCCredsCallOption) after(*callInfo, *csAttempt) {} // UseCompressor returns a CallOption which sets the compressor used when // sending the request. If WithCompressor is also set, UseCompressor has @@ -442,7 +438,7 @@ func (o CompressorCallOption) before(c *callInfo) error { c.compressorType = o.CompressorType return nil } -func (o CompressorCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o CompressorCallOption) after(*callInfo, *csAttempt) {} // CallContentSubtype returns a CallOption that will set the content-subtype // for a call. For example, if content-subtype is "json", the Content-Type over @@ -479,7 +475,7 @@ func (o ContentSubtypeCallOption) before(c *callInfo) error { c.contentSubtype = o.ContentSubtype return nil } -func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o ContentSubtypeCallOption) after(*callInfo, *csAttempt) {} // ForceCodec returns a CallOption that will set codec to be used for all // request and response messages for a call. The result of calling Name() will @@ -515,10 +511,50 @@ type ForceCodecCallOption struct { } func (o ForceCodecCallOption) before(c *callInfo) error { - c.codec = o.Codec + c.codec = newCodecV1Bridge(o.Codec) return nil } -func (o ForceCodecCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o ForceCodecCallOption) after(*callInfo, *csAttempt) {} + +// ForceCodecV2 returns a CallOption that will set codec to be used for all +// request and response messages for a call. The result of calling Name() will +// be used as the content-subtype after converting to lowercase, unless +// CallContentSubtype is also used. +// +// See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. Also see the documentation on RegisterCodec and +// CallContentSubtype for more details on the interaction between Codec and +// content-subtype. +// +// This function is provided for advanced users; prefer to use only +// CallContentSubtype to select a registered codec instead. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceCodecV2(codec encoding.CodecV2) CallOption { + return ForceCodecV2CallOption{CodecV2: codec} +} + +// ForceCodecV2CallOption is a CallOption that indicates the codec used for +// marshaling messages. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ForceCodecV2CallOption struct { + CodecV2 encoding.CodecV2 +} + +func (o ForceCodecV2CallOption) before(c *callInfo) error { + c.codec = o.CodecV2 + return nil +} + +func (o ForceCodecV2CallOption) after(*callInfo, *csAttempt) {} // CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of // an encoding.Codec. @@ -540,10 +576,10 @@ type CustomCodecCallOption struct { } func (o CustomCodecCallOption) before(c *callInfo) error { - c.codec = o.Codec + c.codec = newCodecV0Bridge(o.Codec) return nil } -func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o CustomCodecCallOption) after(*callInfo, *csAttempt) {} // MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory // used for buffering this RPC's requests for retry purposes. @@ -571,7 +607,7 @@ func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error { c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize return nil } -func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o MaxRetryRPCBufferSizeCallOption) after(*callInfo, *csAttempt) {} // The format of the payload: compressed or not? type payloadFormat uint8 @@ -581,19 +617,28 @@ const ( compressionMade payloadFormat = 1 // compressed ) +func (pf payloadFormat) isCompressed() bool { + return pf == compressionMade +} + +type streamReader interface { + ReadMessageHeader(header []byte) error + Read(n int) (mem.BufferSlice, error) +} + // parser reads complete gRPC messages from the underlying reader. type parser struct { // r is the underlying reader. // See the comment on recvMsg for the permissible // error types. - r io.Reader + r streamReader // The header of a gRPC message. Find more detail at // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md header [5]byte - // recvBufferPool is the pool of shared receive buffers. - recvBufferPool SharedBufferPool + // bufferPool is the pool of shared receive buffers. + bufferPool mem.BufferPool } // recvMsg reads a complete gRPC message from the stream. @@ -608,39 +653,38 @@ type parser struct { // - an error from the status package // // No other error values or types must be returned, which also means -// that the underlying io.Reader must not return an incompatible +// that the underlying streamReader must not return an incompatible // error. -func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) { - if _, err := p.r.Read(p.header[:]); err != nil { +func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSlice, error) { + err := p.r.ReadMessageHeader(p.header[:]) + if err != nil { return 0, nil, err } - pf = payloadFormat(p.header[0]) + pf := payloadFormat(p.header[0]) length := binary.BigEndian.Uint32(p.header[1:]) - if length == 0 { - return pf, nil, nil - } if int64(length) > int64(maxInt) { return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt) } if int(length) > maxReceiveMessageSize { return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) } - msg = p.recvBufferPool.Get(int(length)) - if _, err := p.r.Read(msg); err != nil { + + data, err := p.r.Read(int(length)) + if err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF } return 0, nil, err } - return pf, msg, nil + return pf, data, nil } // encode serializes msg and returns a buffer containing the message, or an // error if it is too large to be transmitted by grpc. If msg is nil, it // generates an empty message. -func encode(c baseCodec, msg any) ([]byte, error) { +func encode(c baseCodec, msg any) (mem.BufferSlice, error) { if msg == nil { // NOTE: typed nils will not be caught by this check return nil, nil } @@ -648,7 +692,8 @@ func encode(c baseCodec, msg any) ([]byte, error) { if err != nil { return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error()) } - if uint(len(b)) > math.MaxUint32 { + if uint(b.Len()) > math.MaxUint32 { + b.Free() return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b)) } return b, nil @@ -659,34 +704,41 @@ func encode(c baseCodec, msg any) ([]byte, error) { // indicating no compression was done. // // TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor. -func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) { - if compressor == nil && cp == nil { - return nil, nil - } - if len(in) == 0 { - return nil, nil +func compress(in mem.BufferSlice, cp Compressor, compressor encoding.Compressor, pool mem.BufferPool) (mem.BufferSlice, payloadFormat, error) { + if (compressor == nil && cp == nil) || in.Len() == 0 { + return nil, compressionNone, nil } + var out mem.BufferSlice + w := mem.NewWriter(&out, pool) wrapErr := func(err error) error { + out.Free() return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) } - cbuf := &bytes.Buffer{} if compressor != nil { - z, err := compressor.Compress(cbuf) + z, err := compressor.Compress(w) if err != nil { - return nil, wrapErr(err) + return nil, 0, wrapErr(err) } - if _, err := z.Write(in); err != nil { - return nil, wrapErr(err) + for _, b := range in { + if _, err := z.Write(b.ReadOnlyData()); err != nil { + return nil, 0, wrapErr(err) + } } if err := z.Close(); err != nil { - return nil, wrapErr(err) + return nil, 0, wrapErr(err) } } else { - if err := cp.Do(cbuf, in); err != nil { - return nil, wrapErr(err) + // This is obviously really inefficient since it fully materializes the data, but + // there is no way around this with the old Compressor API. At least it attempts + // to return the buffer to the provider, in the hopes it can be reused (maybe + // even by a subsequent call to this very function). + buf := in.MaterializeToBuffer(pool) + defer buf.Free() + if err := cp.Do(w, buf.ReadOnlyData()); err != nil { + return nil, 0, wrapErr(err) } } - return cbuf.Bytes(), nil + return out, compressionMade, nil } const ( @@ -697,33 +749,36 @@ const ( // msgHeader returns a 5-byte header for the message being transmitted and the // payload, which is compData if non-nil or data otherwise. -func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { +func msgHeader(data, compData mem.BufferSlice, pf payloadFormat) (hdr []byte, payload mem.BufferSlice) { hdr = make([]byte, headerLen) - if compData != nil { - hdr[0] = byte(compressionMade) - data = compData + hdr[0] = byte(pf) + + var length uint32 + if pf.isCompressed() { + length = uint32(compData.Len()) + payload = compData } else { - hdr[0] = byte(compressionNone) + length = uint32(data.Len()) + payload = data } // Write length of payload into buf - binary.BigEndian.PutUint32(hdr[payloadLen:], uint32(len(data))) - return hdr, data + binary.BigEndian.PutUint32(hdr[payloadLen:], length) + return hdr, payload } -func outPayload(client bool, msg any, data, payload []byte, t time.Time) *stats.OutPayload { +func outPayload(client bool, msg any, dataLength, payloadLength int, t time.Time) *stats.OutPayload { return &stats.OutPayload{ Client: client, Payload: msg, - Data: data, - Length: len(data), - WireLength: len(payload) + headerLen, - CompressedLength: len(payload), + Length: dataLength, + WireLength: payloadLength + headerLen, + CompressedLength: payloadLength, SentTime: t, } } -func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status { +func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool, isServer bool) *status.Status { switch pf { case compressionNone: case compressionMade: @@ -731,7 +786,10 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding") } if !haveCompressor { - return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + if isServer { + return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + } + return status.Newf(codes.Internal, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) } default: return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf) @@ -741,104 +799,110 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool type payloadInfo struct { compressedLength int // The compressed length got from wire. - uncompressedBytes []byte + uncompressedBytes mem.BufferSlice +} + +func (p *payloadInfo) free() { + if p != nil && p.uncompressedBytes != nil { + p.uncompressedBytes.Free() + } } // recvAndDecompress reads a message from the stream, decompressing it if necessary. // // Cancelling the returned cancel function releases the buffer back to the pool. So the caller should cancel as soon as // the buffer is no longer needed. -func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, -) (uncompressedBuf []byte, cancel func(), err error) { - pf, compressedBuf, err := p.recvMsg(maxReceiveMessageSize) +// TODO: Refactor this function to reduce the number of arguments. +// See: https://google.github.io/styleguide/go/best-practices.html#function-argument-lists +func recvAndDecompress(p *parser, s recvCompressor, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool, +) (out mem.BufferSlice, err error) { + pf, compressed, err := p.recvMsg(maxReceiveMessageSize) if err != nil { - return nil, nil, err + return nil, err } - if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { - return nil, nil, st.Err() + compressedLength := compressed.Len() + + if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil, isServer); st != nil { + compressed.Free() + return nil, st.Err() } var size int - if pf == compressionMade { + if pf.isCompressed() { + defer compressed.Free() + // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, // use this decompressor as the default. if dc != nil { - uncompressedBuf, err = dc.Do(bytes.NewReader(compressedBuf)) + var uncompressedBuf []byte + uncompressedBuf, err = dc.Do(compressed.Reader()) + if err == nil { + out = mem.BufferSlice{mem.SliceBuffer(uncompressedBuf)} + } size = len(uncompressedBuf) } else { - uncompressedBuf, size, err = decompress(compressor, compressedBuf, maxReceiveMessageSize) + out, size, err = decompress(compressor, compressed, maxReceiveMessageSize, p.bufferPool) } if err != nil { - return nil, nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) } if size > maxReceiveMessageSize { + out.Free() // TODO: Revisit the error code. Currently keep it consistent with java // implementation. - return nil, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) + return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) } } else { - uncompressedBuf = compressedBuf + out = compressed } if payInfo != nil { - payInfo.compressedLength = len(compressedBuf) - payInfo.uncompressedBytes = uncompressedBuf - - cancel = func() {} - } else { - cancel = func() { - p.recvBufferPool.Put(&compressedBuf) - } + payInfo.compressedLength = compressedLength + out.Ref() + payInfo.uncompressedBytes = out } - return uncompressedBuf, cancel, nil + return out, nil } // Using compressor, decompress d, returning data and size. // Optionally, if data will be over maxReceiveMessageSize, just return the size. -func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize int) ([]byte, int, error) { - dcReader, err := compressor.Decompress(bytes.NewReader(d)) +func decompress(compressor encoding.Compressor, d mem.BufferSlice, maxReceiveMessageSize int, pool mem.BufferPool) (mem.BufferSlice, int, error) { + dcReader, err := compressor.Decompress(d.Reader()) if err != nil { return nil, 0, err } - if sizer, ok := compressor.(interface { - DecompressedSize(compressedBytes []byte) int - }); ok { - if size := sizer.DecompressedSize(d); size >= 0 { - if size > maxReceiveMessageSize { - return nil, size, nil - } - // size is used as an estimate to size the buffer, but we - // will read more data if available. - // +MinRead so ReadFrom will not reallocate if size is correct. - // - // TODO: If we ensure that the buffer size is the same as the DecompressedSize, - // we can also utilize the recv buffer pool here. - buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) - bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) - return buf.Bytes(), int(bytesRead), err - } + + out, err := mem.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1), pool) + if err != nil { + out.Free() + return nil, 0, err } - // Read from LimitReader with limit max+1. So if the underlying - // reader is over limit, the result will be bigger than max. - d, err = io.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) - return d, len(d), err + return out, out.Len(), nil +} + +type recvCompressor interface { + RecvCompress() string } // For the two compressor parameters, both should not be set, but if they are, // dc takes precedence over compressor. // TODO(dfawley): wrap the old compressor/decompressor using the new API? -func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { - buf, cancel, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) +func recv(p *parser, c baseCodec, s recvCompressor, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error { + data, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor, isServer) if err != nil { return err } - defer cancel() - if err := c.Unmarshal(buf, m); err != nil { + // If the codec wants its own reference to the data, it can get it. Otherwise, always + // free the buffers. + defer data.Free() + + if err := c.Unmarshal(data, m); err != nil { return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err) } + return nil } @@ -941,7 +1005,7 @@ func setCallInfoCodec(c *callInfo) error { // encoding.Codec (Name vs. String method name). We only support // setting content subtype from encoding.Codec to avoid a behavior // change with the deprecated version. - if ec, ok := c.codec.(encoding.Codec); ok { + if ec, ok := c.codec.(encoding.CodecV2); ok { c.contentSubtype = strings.ToLower(ec.Name()) } } @@ -950,12 +1014,12 @@ func setCallInfoCodec(c *callInfo) error { if c.contentSubtype == "" { // No codec specified in CallOptions; use proto by default. - c.codec = encoding.GetCodec(proto.Name) + c.codec = getCodec(proto.Name) return nil } // c.contentSubtype is already lowercased in CallContentSubtype - c.codec = encoding.GetCodec(c.contentSubtype) + c.codec = getCodec(c.contentSubtype) if c.codec == nil { return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype) } diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 89f8e4792bf..16065a027ae 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -45,6 +45,7 @@ import ( "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -80,18 +81,19 @@ func init() { } internal.BinaryLogger = binaryLogger internal.JoinServerOptions = newJoinServerOption - internal.RecvBufferPool = recvBufferPool + internal.BufferPool = bufferPool } var statusOK = status.New(codes.OK, "") var logger = grpclog.Component("core") -type methodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error) +// MethodHandler is a function type that processes a unary RPC method call. +type MethodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error) // MethodDesc represents an RPC service's method specification. type MethodDesc struct { MethodName string - Handler methodHandler + Handler MethodHandler } // ServiceDesc represents an RPC service's specification. @@ -170,7 +172,7 @@ type serverOptions struct { maxHeaderListSize *uint32 headerTableSize *uint32 numServerWorkers uint32 - recvBufferPool SharedBufferPool + bufferPool mem.BufferPool waitForHandlers bool } @@ -181,7 +183,7 @@ var defaultServerOptions = serverOptions{ connectionTimeout: 120 * time.Second, writeBufferSize: defaultWriteBufSize, readBufferSize: defaultReadBufSize, - recvBufferPool: nopBufferPool{}, + bufferPool: mem.DefaultBufferPool(), } var globalServerOptions []ServerOption @@ -313,7 +315,7 @@ func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption { // Will be supported throughout 1.x. func CustomCodec(codec Codec) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.codec = codec + o.codec = newCodecV0Bridge(codec) }) } @@ -342,7 +344,22 @@ func CustomCodec(codec Codec) ServerOption { // later release. func ForceServerCodec(codec encoding.Codec) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.codec = codec + o.codec = newCodecV1Bridge(codec) + }) +} + +// ForceServerCodecV2 is the equivalent of ForceServerCodec, but for the new +// CodecV2 interface. +// +// Will be supported throughout 1.x. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceServerCodecV2(codecV2 encoding.CodecV2) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.codec = codecV2 }) } @@ -592,26 +609,9 @@ func WaitForHandlers(w bool) ServerOption { }) } -// RecvBufferPool returns a ServerOption that configures the server -// to use the provided shared buffer pool for parsing incoming messages. Depending -// on the application's workload, this could result in reduced memory allocation. -// -// If you are unsure about how to implement a memory pool but want to utilize one, -// begin with grpc.NewSharedBufferPool. -// -// Note: The shared buffer pool feature will not be active if any of the following -// options are used: StatsHandler, EnableTracing, or binary logging. In such -// cases, the shared buffer pool will be ignored. -// -// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in -// v1.60.0 or later. -func RecvBufferPool(bufferPool SharedBufferPool) ServerOption { - return recvBufferPool(bufferPool) -} - -func recvBufferPool(bufferPool SharedBufferPool) ServerOption { +func bufferPool(bufferPool mem.BufferPool) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.recvBufferPool = bufferPool + o.bufferPool = bufferPool }) } @@ -622,8 +622,8 @@ func recvBufferPool(bufferPool SharedBufferPool) ServerOption { // workload (assuming a QPS of a few thousand requests/sec). const serverWorkerResetThreshold = 1 << 16 -// serverWorkers blocks on a *transport.Stream channel forever and waits for -// data to be fed by serveStreams. This allows multiple requests to be +// serverWorker blocks on a *transport.ServerStream channel forever and waits +// for data to be fed by serveStreams. This allows multiple requests to be // processed by the same goroutine, removing the need for expensive stack // re-allocations (see the runtime.morestack problem [1]). // @@ -980,6 +980,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { ChannelzParent: s.channelz, MaxHeaderListSize: s.opts.maxHeaderListSize, HeaderTableSize: s.opts.headerTableSize, + BufferPool: s.opts.bufferPool, } st, err := transport.NewServerTransport(c, config) if err != nil { @@ -1020,7 +1021,7 @@ func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, }() streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) - st.HandleStreams(ctx, func(stream *transport.Stream) { + st.HandleStreams(ctx, func(stream *transport.ServerStream) { s.handlersWG.Add(1) streamQuota.acquire() f := func() { @@ -1072,7 +1073,7 @@ var _ http.Handler = (*Server)(nil) // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { - st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers) + st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers, s.opts.bufferPool) if err != nil { // Errors returned from transport.NewServerHandlerTransport have // already been written to w. @@ -1136,26 +1137,41 @@ func (s *Server) incrCallsFailed() { s.channelz.ServerMetrics.CallsFailed.Add(1) } -func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { +func (s *Server) sendResponse(ctx context.Context, stream *transport.ServerStream, msg any, cp Compressor, opts *transport.WriteOptions, comp encoding.Compressor) error { data, err := encode(s.getCodec(stream.ContentSubtype()), msg) if err != nil { channelz.Error(logger, s.channelz, "grpc: server failed to encode response: ", err) return err } - compData, err := compress(data, cp, comp) + + compData, pf, err := compress(data, cp, comp, s.opts.bufferPool) if err != nil { + data.Free() channelz.Error(logger, s.channelz, "grpc: server failed to compress response: ", err) return err } - hdr, payload := msgHeader(data, compData) + + hdr, payload := msgHeader(data, compData, pf) + + defer func() { + compData.Free() + data.Free() + // payload does not need to be freed here, it is either data or compData, both of + // which are already freed. + }() + + dataLen := data.Len() + payloadLen := payload.Len() // TODO(dfawley): should we be checking len(data) instead? - if len(payload) > s.opts.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize) + if payloadLen > s.opts.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize) } - err = t.Write(stream, hdr, payload, opts) + err = stream.Write(hdr, payload, opts) if err == nil { - for _, sh := range s.opts.statsHandlers { - sh.HandleRPC(ctx, outPayload(false, msg, data, payload, time.Now())) + if len(s.opts.statsHandlers) != 0 { + for _, sh := range s.opts.statsHandlers { + sh.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now())) + } } } return err @@ -1197,7 +1213,7 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info } } -func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { +func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { shs := s.opts.statsHandlers if len(shs) != 0 || trInfo != nil || channelz.IsOn() { if channelz.IsOn() { @@ -1305,7 +1321,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor decomp = encoding.GetCompressor(rc) if decomp == nil { st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) - t.WriteStatus(stream, st) + stream.WriteStatus(st) return st.Err() } } @@ -1334,37 +1350,34 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor var payInfo *payloadInfo if len(shs) != 0 || len(binlogs) != 0 { payInfo = &payloadInfo{} + defer payInfo.free() } - d, cancel, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) + d, err := recvAndDecompress(&parser{r: stream, bufferPool: s.opts.bufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp, true) if err != nil { - if e := t.WriteStatus(stream, status.Convert(err)); e != nil { + if e := stream.WriteStatus(status.Convert(err)); e != nil { channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } return err } - if channelz.IsOn() { - t.IncrMsgRecv() - } + defer d.Free() df := func(v any) error { - defer cancel() - if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } + for _, sh := range shs { sh.HandleRPC(ctx, &stats.InPayload{ RecvTime: time.Now(), Payload: v, - Length: len(d), + Length: d.Len(), WireLength: payInfo.compressedLength + headerLen, CompressedLength: payInfo.compressedLength, - Data: d, }) } if len(binlogs) != 0 { cm := &binarylog.ClientMessage{ - Message: d, + Message: d.Materialize(), } for _, binlog := range binlogs { binlog.Log(ctx, cm) @@ -1389,7 +1402,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor trInfo.tr.LazyLog(stringer(appStatus.Message()), true) trInfo.tr.SetError() } - if e := t.WriteStatus(stream, appStatus); e != nil { + if e := stream.WriteStatus(appStatus); e != nil { channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } if len(binlogs) != 0 { @@ -1416,20 +1429,20 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor if trInfo != nil { trInfo.tr.LazyLog(stringer("OK"), false) } - opts := &transport.Options{Last: true} + opts := &transport.WriteOptions{Last: true} // Server handler could have set new compressor by calling SetSendCompressor. // In case it is set, we need to use it for compressing outbound message. if stream.SendCompress() != sendCompressorName { comp = encoding.GetCompressor(stream.SendCompress()) } - if err := s.sendResponse(ctx, t, stream, reply, cp, opts, comp); err != nil { + if err := s.sendResponse(ctx, stream, reply, cp, opts, comp); err != nil { if err == io.EOF { // The entire stream is done (for unary RPC only). return err } if sts, ok := status.FromError(err); ok { - if e := t.WriteStatus(stream, sts); e != nil { + if e := stream.WriteStatus(sts); e != nil { channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } } else { @@ -1469,9 +1482,6 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor binlog.Log(ctx, sm) } } - if channelz.IsOn() { - t.IncrMsgSent() - } if trInfo != nil { trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) } @@ -1487,7 +1497,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor binlog.Log(ctx, st) } } - return t.WriteStatus(stream, statusOK) + return stream.WriteStatus(statusOK) } // chainStreamServerInterceptors chains all stream server interceptors into one. @@ -1526,7 +1536,7 @@ func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, inf } } -func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { +func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { if channelz.IsOn() { s.incrCallsStarted() } @@ -1546,9 +1556,8 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran ctx = NewContextWithServerTransportStream(ctx, stream) ss := &serverStream{ ctx: ctx, - t: t, s: stream, - p: &parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, + p: &parser{r: stream, bufferPool: s.opts.bufferPool}, codec: s.getCodec(stream.ContentSubtype()), maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, @@ -1633,7 +1642,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran ss.decomp = encoding.GetCompressor(rc) if ss.decomp == nil { st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) - t.WriteStatus(ss.s, st) + ss.s.WriteStatus(st) return st.Err() } } @@ -1702,7 +1711,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran binlog.Log(ctx, st) } } - t.WriteStatus(ss.s, appStatus) + ss.s.WriteStatus(appStatus) // TODO: Should we log an error from WriteStatus here and below? return appErr } @@ -1720,10 +1729,10 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran binlog.Log(ctx, st) } } - return t.WriteStatus(ss.s, statusOK) + return ss.s.WriteStatus(statusOK) } -func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) { +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.ServerStream) { ctx := stream.Context() ctx = contextWithServer(ctx, s) var ti *traceInfo @@ -1753,7 +1762,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str ti.tr.SetError() } errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) - if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { + if err := stream.WriteStatus(status.New(codes.Unimplemented, errDesc)); err != nil { if ti != nil { ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ti.tr.SetError() @@ -1768,17 +1777,20 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str service := sm[:pos] method := sm[pos+1:] - md, _ := metadata.FromIncomingContext(ctx) - for _, sh := range s.opts.statsHandlers { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) - sh.HandleRPC(ctx, &stats.InHeader{ - FullMethod: stream.Method(), - RemoteAddr: t.Peer().Addr, - LocalAddr: t.Peer().LocalAddr, - Compression: stream.RecvCompress(), - WireLength: stream.HeaderWireLength(), - Header: md, - }) + // FromIncomingContext is expensive: skip if there are no statsHandlers + if len(s.opts.statsHandlers) > 0 { + md, _ := metadata.FromIncomingContext(ctx) + for _, sh := range s.opts.statsHandlers { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) + sh.HandleRPC(ctx, &stats.InHeader{ + FullMethod: stream.Method(), + RemoteAddr: t.Peer().Addr, + LocalAddr: t.Peer().LocalAddr, + Compression: stream.RecvCompress(), + WireLength: stream.HeaderWireLength(), + Header: md, + }) + } } // To have calls in stream callouts work. Will delete once all stats handler // calls come from the gRPC layer. @@ -1787,17 +1799,17 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str srv, knownService := s.services[service] if knownService { if md, ok := srv.methods[method]; ok { - s.processUnaryRPC(ctx, t, stream, srv, md, ti) + s.processUnaryRPC(ctx, stream, srv, md, ti) return } if sd, ok := srv.streams[method]; ok { - s.processStreamingRPC(ctx, t, stream, srv, sd, ti) + s.processStreamingRPC(ctx, stream, srv, sd, ti) return } } // Unknown service, or known server unknown method. if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { - s.processStreamingRPC(ctx, t, stream, nil, unknownDesc, ti) + s.processStreamingRPC(ctx, stream, nil, unknownDesc, ti) return } var errDesc string @@ -1810,7 +1822,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str ti.tr.LazyPrintf("%s", errDesc) ti.tr.SetError() } - if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { + if err := stream.WriteStatus(status.New(codes.Unimplemented, errDesc)); err != nil { if ti != nil { ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ti.tr.SetError() @@ -1963,12 +1975,12 @@ func (s *Server) getCodec(contentSubtype string) baseCodec { return s.opts.codec } if contentSubtype == "" { - return encoding.GetCodec(proto.Name) + return getCodec(proto.Name) } - codec := encoding.GetCodec(contentSubtype) + codec := getCodec(contentSubtype) if codec == nil { logger.Warningf("Unsupported codec %q. Defaulting to %q for now. This will start to fail in future releases.", contentSubtype, proto.Name) - return encoding.GetCodec(proto.Name) + return getCodec(proto.Name) } return codec } @@ -2085,7 +2097,7 @@ func SendHeader(ctx context.Context, md metadata.MD) error { // Notice: This function is EXPERIMENTAL and may be changed or removed in a // later release. func SetSendCompressor(ctx context.Context, name string) error { - stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) + stream, ok := ServerTransportStreamFromContext(ctx).(*transport.ServerStream) if !ok || stream == nil { return fmt.Errorf("failed to fetch the stream from the given context") } @@ -2107,7 +2119,7 @@ func SetSendCompressor(ctx context.Context, name string) error { // Notice: This function is EXPERIMENTAL and may be changed or removed in a // later release. func ClientSupportedCompressors(ctx context.Context) ([]string, error) { - stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) + stream, ok := ServerTransportStreamFromContext(ctx).(*transport.ServerStream) if !ok || stream == nil { return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx) } diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index 2671c5ef69f..7e83027d199 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -168,6 +168,7 @@ func init() { return parseServiceConfig(js, defaultMaxCallAttempts) } } + func parseServiceConfig(js string, maxAttempts int) *serviceconfig.ParseResult { if len(js) == 0 { return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")} @@ -297,7 +298,7 @@ func convertRetryPolicy(jrp *jsonRetryPolicy, maxAttempts int) (p *internalservi return rp, nil } -func min(a, b *int) *int { +func minPointers(a, b *int) *int { if *a < *b { return a } @@ -309,7 +310,7 @@ func getMaxSize(mcMax, doptMax *int, defaultVal int) *int { return &defaultVal } if mcMax != nil && doptMax != nil { - return min(mcMax, doptMax) + return minPointers(mcMax, doptMax) } if mcMax != nil { return mcMax diff --git a/vendor/google.golang.org/grpc/shared_buffer_pool.go b/vendor/google.golang.org/grpc/shared_buffer_pool.go deleted file mode 100644 index 48a64cfe8e2..00000000000 --- a/vendor/google.golang.org/grpc/shared_buffer_pool.go +++ /dev/null @@ -1,154 +0,0 @@ -/* - * - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import "sync" - -// SharedBufferPool is a pool of buffers that can be shared, resulting in -// decreased memory allocation. Currently, in gRPC-go, it is only utilized -// for parsing incoming messages. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -type SharedBufferPool interface { - // Get returns a buffer with specified length from the pool. - // - // The returned byte slice may be not zero initialized. - Get(length int) []byte - - // Put returns a buffer to the pool. - Put(*[]byte) -} - -// NewSharedBufferPool creates a simple SharedBufferPool with buckets -// of different sizes to optimize memory usage. This prevents the pool from -// wasting large amounts of memory, even when handling messages of varying sizes. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func NewSharedBufferPool() SharedBufferPool { - return &simpleSharedBufferPool{ - pools: [poolArraySize]simpleSharedBufferChildPool{ - newBytesPool(level0PoolMaxSize), - newBytesPool(level1PoolMaxSize), - newBytesPool(level2PoolMaxSize), - newBytesPool(level3PoolMaxSize), - newBytesPool(level4PoolMaxSize), - newBytesPool(0), - }, - } -} - -// simpleSharedBufferPool is a simple implementation of SharedBufferPool. -type simpleSharedBufferPool struct { - pools [poolArraySize]simpleSharedBufferChildPool -} - -func (p *simpleSharedBufferPool) Get(size int) []byte { - return p.pools[p.poolIdx(size)].Get(size) -} - -func (p *simpleSharedBufferPool) Put(bs *[]byte) { - p.pools[p.poolIdx(cap(*bs))].Put(bs) -} - -func (p *simpleSharedBufferPool) poolIdx(size int) int { - switch { - case size <= level0PoolMaxSize: - return level0PoolIdx - case size <= level1PoolMaxSize: - return level1PoolIdx - case size <= level2PoolMaxSize: - return level2PoolIdx - case size <= level3PoolMaxSize: - return level3PoolIdx - case size <= level4PoolMaxSize: - return level4PoolIdx - default: - return levelMaxPoolIdx - } -} - -const ( - level0PoolMaxSize = 16 // 16 B - level1PoolMaxSize = level0PoolMaxSize * 16 // 256 B - level2PoolMaxSize = level1PoolMaxSize * 16 // 4 KB - level3PoolMaxSize = level2PoolMaxSize * 16 // 64 KB - level4PoolMaxSize = level3PoolMaxSize * 16 // 1 MB -) - -const ( - level0PoolIdx = iota - level1PoolIdx - level2PoolIdx - level3PoolIdx - level4PoolIdx - levelMaxPoolIdx - poolArraySize -) - -type simpleSharedBufferChildPool interface { - Get(size int) []byte - Put(any) -} - -type bufferPool struct { - sync.Pool - - defaultSize int -} - -func (p *bufferPool) Get(size int) []byte { - bs := p.Pool.Get().(*[]byte) - - if cap(*bs) < size { - p.Pool.Put(bs) - - return make([]byte, size) - } - - return (*bs)[:size] -} - -func newBytesPool(size int) simpleSharedBufferChildPool { - return &bufferPool{ - Pool: sync.Pool{ - New: func() any { - bs := make([]byte, size) - return &bs - }, - }, - defaultSize: size, - } -} - -// nopBufferPool is a buffer pool just makes new buffer without pooling. -type nopBufferPool struct { -} - -func (nopBufferPool) Get(length int) []byte { - return make([]byte, length) -} - -func (nopBufferPool) Put(*[]byte) { -} diff --git a/vendor/google.golang.org/grpc/stats/metrics.go b/vendor/google.golang.org/grpc/stats/metrics.go new file mode 100644 index 00000000000..641c8e9794a --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/metrics.go @@ -0,0 +1,81 @@ +/* + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package stats + +import "maps" + +// MetricSet is a set of metrics to record. Once created, MetricSet is immutable, +// however Add and Remove can make copies with specific metrics added or +// removed, respectively. +// +// Do not construct directly; use NewMetricSet instead. +type MetricSet struct { + // metrics are the set of metrics to initialize. + metrics map[string]bool +} + +// NewMetricSet returns a MetricSet containing metricNames. +func NewMetricSet(metricNames ...string) *MetricSet { + newMetrics := make(map[string]bool) + for _, metric := range metricNames { + newMetrics[metric] = true + } + return &MetricSet{metrics: newMetrics} +} + +// Metrics returns the metrics set. The returned map is read-only and must not +// be modified. +func (m *MetricSet) Metrics() map[string]bool { + return m.metrics +} + +// Add adds the metricNames to the metrics set and returns a new copy with the +// additional metrics. +func (m *MetricSet) Add(metricNames ...string) *MetricSet { + newMetrics := make(map[string]bool) + for metric := range m.metrics { + newMetrics[metric] = true + } + + for _, metric := range metricNames { + newMetrics[metric] = true + } + return &MetricSet{metrics: newMetrics} +} + +// Join joins the metrics passed in with the metrics set, and returns a new copy +// with the merged metrics. +func (m *MetricSet) Join(metrics *MetricSet) *MetricSet { + newMetrics := make(map[string]bool) + maps.Copy(newMetrics, m.metrics) + maps.Copy(newMetrics, metrics.metrics) + return &MetricSet{metrics: newMetrics} +} + +// Remove removes the metricNames from the metrics set and returns a new copy +// with the metrics removed. +func (m *MetricSet) Remove(metricNames ...string) *MetricSet { + newMetrics := make(map[string]bool) + for metric := range m.metrics { + newMetrics[metric] = true + } + + for _, metric := range metricNames { + delete(newMetrics, metric) + } + return &MetricSet{metrics: newMetrics} +} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index fdb0bd65182..6f20d2d5486 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -77,9 +77,6 @@ type InPayload struct { // the call to HandleRPC which provides the InPayload returns and must be // copied if needed later. Payload any - // Data is the serialized message payload. - // Deprecated: Data will be removed in the next release. - Data []byte // Length is the size of the uncompressed payload data. Does not include any // framing (gRPC or HTTP/2). @@ -150,9 +147,6 @@ type OutPayload struct { // the call to HandleRPC which provides the OutPayload returns and must be // copied if needed later. Payload any - // Data is the serialized message payload. - // Deprecated: Data will be removed in the next release. - Data []byte // Length is the size of the uncompressed payload data. Does not include any // framing (gRPC or HTTP/2). Length int @@ -266,84 +260,42 @@ func (s *ConnEnd) IsClient() bool { return s.Client } func (s *ConnEnd) isConnStats() {} -type incomingTagsKey struct{} -type outgoingTagsKey struct{} - // SetTags attaches stats tagging data to the context, which will be sent in // the outgoing RPC with the header grpc-tags-bin. Subsequent calls to // SetTags will overwrite the values from earlier calls. // -// NOTE: this is provided only for backward compatibility with existing clients -// and will likely be removed in an upcoming release. New uses should transmit -// this type of data using metadata with a different, non-reserved (i.e. does -// not begin with "grpc-") header name. +// Deprecated: set the `grpc-tags-bin` header in the metadata instead. func SetTags(ctx context.Context, b []byte) context.Context { - return context.WithValue(ctx, outgoingTagsKey{}, b) + return metadata.AppendToOutgoingContext(ctx, "grpc-tags-bin", string(b)) } // Tags returns the tags from the context for the inbound RPC. // -// NOTE: this is provided only for backward compatibility with existing clients -// and will likely be removed in an upcoming release. New uses should transmit -// this type of data using metadata with a different, non-reserved (i.e. does -// not begin with "grpc-") header name. +// Deprecated: obtain the `grpc-tags-bin` header from metadata instead. func Tags(ctx context.Context) []byte { - b, _ := ctx.Value(incomingTagsKey{}).([]byte) - return b -} - -// SetIncomingTags attaches stats tagging data to the context, to be read by -// the application (not sent in outgoing RPCs). -// -// This is intended for gRPC-internal use ONLY. -func SetIncomingTags(ctx context.Context, b []byte) context.Context { - return context.WithValue(ctx, incomingTagsKey{}, b) -} - -// OutgoingTags returns the tags from the context for the outbound RPC. -// -// This is intended for gRPC-internal use ONLY. -func OutgoingTags(ctx context.Context) []byte { - b, _ := ctx.Value(outgoingTagsKey{}).([]byte) - return b + traceValues := metadata.ValueFromIncomingContext(ctx, "grpc-tags-bin") + if len(traceValues) == 0 { + return nil + } + return []byte(traceValues[len(traceValues)-1]) } -type incomingTraceKey struct{} -type outgoingTraceKey struct{} - // SetTrace attaches stats tagging data to the context, which will be sent in // the outgoing RPC with the header grpc-trace-bin. Subsequent calls to // SetTrace will overwrite the values from earlier calls. // -// NOTE: this is provided only for backward compatibility with existing clients -// and will likely be removed in an upcoming release. New uses should transmit -// this type of data using metadata with a different, non-reserved (i.e. does -// not begin with "grpc-") header name. +// Deprecated: set the `grpc-trace-bin` header in the metadata instead. func SetTrace(ctx context.Context, b []byte) context.Context { - return context.WithValue(ctx, outgoingTraceKey{}, b) + return metadata.AppendToOutgoingContext(ctx, "grpc-trace-bin", string(b)) } // Trace returns the trace from the context for the inbound RPC. // -// NOTE: this is provided only for backward compatibility with existing clients -// and will likely be removed in an upcoming release. New uses should transmit -// this type of data using metadata with a different, non-reserved (i.e. does -// not begin with "grpc-") header name. +// Deprecated: obtain the `grpc-trace-bin` header from metadata instead. func Trace(ctx context.Context) []byte { - b, _ := ctx.Value(incomingTraceKey{}).([]byte) - return b -} - -// SetIncomingTrace attaches stats tagging data to the context, to be read by -// the application (not sent in outgoing RPCs). It is intended for -// gRPC-internal use. -func SetIncomingTrace(ctx context.Context, b []byte) context.Context { - return context.WithValue(ctx, incomingTraceKey{}, b) -} - -// OutgoingTrace returns the trace from the context for the outbound RPC. It is -// intended for gRPC-internal use. -func OutgoingTrace(ctx context.Context) []byte { - b, _ := ctx.Value(outgoingTraceKey{}).([]byte) - return b + traceValues := metadata.ValueFromIncomingContext(ctx, "grpc-trace-bin") + if len(traceValues) == 0 { + return nil + } + return []byte(traceValues[len(traceValues)-1]) } diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 8051ef5b514..17e2267b332 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -23,7 +23,7 @@ import ( "errors" "io" "math" - "math/rand" + rand "math/rand/v2" "strconv" "sync" "time" @@ -41,6 +41,7 @@ import ( "google.golang.org/grpc/internal/serviceconfig" istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -112,7 +113,9 @@ type ClientStream interface { // SendMsg is generally called by generated code. On error, SendMsg aborts // the stream. If the error was generated by the client, the status is // returned directly; otherwise, io.EOF is returned and the status of - // the stream may be discovered using RecvMsg. + // the stream may be discovered using RecvMsg. For unary or server-streaming + // RPCs (StreamDesc.ClientStreams is false), a nil error is returned + // unconditionally. // // SendMsg blocks until: // - There is sufficient flow control to schedule m with the transport, or @@ -215,7 +218,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth var mc serviceconfig.MethodConfig var onCommit func() - var newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) { + newStream := func(ctx context.Context, done func()) (iresolver.ClientStream, error) { return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, opts...) } @@ -359,7 +362,7 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client cs.attempt = a return nil } - if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil { + if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) }); err != nil { return nil, err } @@ -517,7 +520,7 @@ func (a *csAttempt) newStream() error { } a.s = s a.ctx = s.Context() - a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool} + a.p = &parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool} return nil } @@ -566,10 +569,15 @@ type clientStream struct { // place where we need to check if the attempt is nil. attempt *csAttempt // TODO(hedging): hedging will have multiple attempts simultaneously. - committed bool // active attempt committed for retry? - onCommit func() - buffer []func(a *csAttempt) error // operations to replay on retry - bufferSize int // current size of buffer + committed bool // active attempt committed for retry? + onCommit func() + replayBuffer []replayOp // operations to replay on retry + replayBufferSize int // current size of replayBuffer +} + +type replayOp struct { + op func(a *csAttempt) error + cleanup func() } // csAttempt implements a single transport stream attempt within a @@ -578,7 +586,7 @@ type csAttempt struct { ctx context.Context cs *clientStream t transport.ClientTransport - s *transport.Stream + s *transport.ClientStream p *parser pickResult balancer.PickResult @@ -607,7 +615,12 @@ func (cs *clientStream) commitAttemptLocked() { cs.onCommit() } cs.committed = true - cs.buffer = nil + for _, op := range cs.replayBuffer { + if op.cleanup != nil { + op.cleanup() + } + } + cs.replayBuffer = nil } func (cs *clientStream) commitAttempt() { @@ -695,11 +708,10 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) { cs.numRetriesSincePushback = 0 } else { fact := math.Pow(rp.BackoffMultiplier, float64(cs.numRetriesSincePushback)) - cur := float64(rp.InitialBackoff) * fact - if max := float64(rp.MaxBackoff); cur > max { - cur = max - } - dur = time.Duration(rand.Int63n(int64(cur))) + cur := min(float64(rp.InitialBackoff)*fact, float64(rp.MaxBackoff)) + // Apply jitter by multiplying with a random factor between 0.8 and 1.2 + cur *= 0.8 + 0.4*rand.Float64() + dur = time.Duration(int64(cur)) cs.numRetriesSincePushback++ } @@ -732,7 +744,7 @@ func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error { // the stream is canceled. return err } - // Note that the first op in the replay buffer always sets cs.attempt + // Note that the first op in replayBuffer always sets cs.attempt // if it is able to pick a transport and create a stream. if lastErr = cs.replayBufferLocked(attempt); lastErr == nil { return nil @@ -761,7 +773,7 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) // already be status errors. return toRPCErr(op(cs.attempt)) } - if len(cs.buffer) == 0 { + if len(cs.replayBuffer) == 0 { // For the first op, which controls creation of the stream and // assigns cs.attempt, we need to create a new attempt inline // before executing the first op. On subsequent ops, the attempt @@ -851,25 +863,26 @@ func (cs *clientStream) Trailer() metadata.MD { } func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error { - for _, f := range cs.buffer { - if err := f(attempt); err != nil { + for _, f := range cs.replayBuffer { + if err := f.op(attempt); err != nil { return err } } return nil } -func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) { +func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error, cleanup func()) { // Note: we still will buffer if retry is disabled (for transparent retries). if cs.committed { return } - cs.bufferSize += sz - if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize { + cs.replayBufferSize += sz + if cs.replayBufferSize > cs.callInfo.maxRetryRPCBufferSize { cs.commitAttemptLocked() + cleanup() return } - cs.buffer = append(cs.buffer, op) + cs.replayBuffer = append(cs.replayBuffer, replayOp{op: op, cleanup: cleanup}) } func (cs *clientStream) SendMsg(m any) (err error) { @@ -891,23 +904,50 @@ func (cs *clientStream) SendMsg(m any) (err error) { } // load hdr, payload, data - hdr, payload, data, err := prepareMsg(m, cs.codec, cs.cp, cs.comp) + hdr, data, payload, pf, err := prepareMsg(m, cs.codec, cs.cp, cs.comp, cs.cc.dopts.copts.BufferPool) if err != nil { return err } + defer func() { + data.Free() + // only free payload if compression was made, and therefore it is a different set + // of buffers from data. + if pf.isCompressed() { + payload.Free() + } + }() + + dataLen := data.Len() + payloadLen := payload.Len() // TODO(dfawley): should we be checking len(data) instead? - if len(payload) > *cs.callInfo.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) + if payloadLen > *cs.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, *cs.callInfo.maxSendMessageSize) } + + // always take an extra ref in case data == payload (i.e. when the data isn't + // compressed). The original ref will always be freed by the deferred free above. + payload.Ref() op := func(a *csAttempt) error { - return a.sendMsg(m, hdr, payload, data) + return a.sendMsg(m, hdr, payload, dataLen, payloadLen) + } + + // onSuccess is invoked when the op is captured for a subsequent retry. If the + // stream was established by a previous message and therefore retries are + // disabled, onSuccess will not be invoked, and payloadRef can be freed + // immediately. + onSuccessCalled := false + err = cs.withRetry(op, func() { + cs.bufferForRetryLocked(len(hdr)+payloadLen, op, payload.Free) + onSuccessCalled = true + }) + if !onSuccessCalled { + payload.Free() } - err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) if len(cs.binlogs) != 0 && err == nil { cm := &binarylog.ClientMessage{ OnClientSide: true, - Message: data, + Message: data.Materialize(), } for _, binlog := range cs.binlogs { binlog.Log(cs.ctx, cm) @@ -924,6 +964,7 @@ func (cs *clientStream) RecvMsg(m any) error { var recvInfo *payloadInfo if len(cs.binlogs) != 0 { recvInfo = &payloadInfo{} + defer recvInfo.free() } err := cs.withRetry(func(a *csAttempt) error { return a.recvMsg(m, recvInfo) @@ -931,7 +972,7 @@ func (cs *clientStream) RecvMsg(m any) error { if len(cs.binlogs) != 0 && err == nil { sm := &binarylog.ServerMessage{ OnClientSide: true, - Message: recvInfo.uncompressedBytes, + Message: recvInfo.uncompressedBytes.Materialize(), } for _, binlog := range cs.binlogs { binlog.Log(cs.ctx, sm) @@ -951,14 +992,14 @@ func (cs *clientStream) CloseSend() error { } cs.sentLast = true op := func(a *csAttempt) error { - a.t.Write(a.s, nil, nil, &transport.Options{Last: true}) + a.s.Write(nil, nil, &transport.WriteOptions{Last: true}) // Always return nil; io.EOF is the only error that might make sense // instead, but there is no need to signal the client to call RecvMsg // as the only use left for the stream after CloseSend is to call // RecvMsg. This also matches historical behavior. return nil } - cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }) + cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) }) if len(cs.binlogs) != 0 { chc := &binarylog.ClientHalfClose{ OnClientSide: true, @@ -1034,7 +1075,7 @@ func (cs *clientStream) finish(err error) { cs.cancel() } -func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error { +func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength, payloadLength int) error { cs := a.cs if a.trInfo != nil { a.mu.Lock() @@ -1043,7 +1084,7 @@ func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error { } a.mu.Unlock() } - if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil { + if err := a.s.Write(hdr, payld, &transport.WriteOptions{Last: !cs.desc.ClientStreams}); err != nil { if !cs.desc.ClientStreams { // For non-client-streaming RPCs, we return nil instead of EOF on error // because the generated code requires it. finish is not called; RecvMsg() @@ -1052,11 +1093,10 @@ func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error { } return io.EOF } - for _, sh := range a.statsHandlers { - sh.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now())) - } - if channelz.IsOn() { - a.t.IncrMsgSent() + if len(a.statsHandlers) != 0 { + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now())) + } } return nil } @@ -1065,6 +1105,7 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { cs := a.cs if len(a.statsHandlers) != 0 && payInfo == nil { payInfo = &payloadInfo{} + defer payInfo.free() } if !a.decompSet { @@ -1083,8 +1124,7 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { // Only initialize this state once per stream. a.decompSet = true } - err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp) - if err != nil { + if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp, false); err != nil { if err == io.EOF { if statusErr := a.s.Status().Err(); statusErr != nil { return statusErr @@ -1103,33 +1143,26 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { } for _, sh := range a.statsHandlers { sh.HandleRPC(a.ctx, &stats.InPayload{ - Client: true, - RecvTime: time.Now(), - Payload: m, - // TODO truncate large payload. - Data: payInfo.uncompressedBytes, + Client: true, + RecvTime: time.Now(), + Payload: m, WireLength: payInfo.compressedLength + headerLen, CompressedLength: payInfo.compressedLength, - Length: len(payInfo.uncompressedBytes), + Length: payInfo.uncompressedBytes.Len(), }) } - if channelz.IsOn() { - a.t.IncrMsgRecv() - } if cs.desc.ServerStreams { // Subsequent messages should be received by subsequent RecvMsg calls. return nil } // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp) - if err == nil { - return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>")) - } - if err == io.EOF { + if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp, false); err == io.EOF { return a.s.Status().Err() // non-server streaming Recv returns nil on success + } else if err != nil { + return toRPCErr(err) } - return toRPCErr(err) + return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>")) } func (a *csAttempt) finish(err error) { @@ -1145,7 +1178,7 @@ func (a *csAttempt) finish(err error) { } var tr metadata.MD if a.s != nil { - a.t.CloseStream(a.s, err) + a.s.Close(err) tr = a.s.Trailer() } @@ -1185,12 +1218,12 @@ func (a *csAttempt) finish(err error) { a.mu.Unlock() } -// newClientStream creates a ClientStream with the specified transport, on the +// newNonRetryClientStream creates a ClientStream with the specified transport, on the // given addrConn. // // It's expected that the given transport is either the same one in addrConn, or // is already closed. To avoid race, transport is specified separately, instead -// of using ac.transpot. +// of using ac.transport. // // Main difference between this and ClientConn.NewStream: // - no retry @@ -1276,7 +1309,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin return nil, err } as.s = s - as.p = &parser{r: s, recvBufferPool: ac.dopts.recvBufferPool} + as.p = &parser{r: s, bufferPool: ac.dopts.copts.BufferPool} ac.incrCallsStarted() if desc != unaryStreamDesc { // Listen on stream context to cleanup when the stream context is @@ -1302,7 +1335,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin } type addrConnStream struct { - s *transport.Stream + s *transport.ClientStream ac *addrConn callHdr *transport.CallHdr cancel context.CancelFunc @@ -1342,7 +1375,7 @@ func (as *addrConnStream) CloseSend() error { } as.sentLast = true - as.t.Write(as.s, nil, nil, &transport.Options{Last: true}) + as.s.Write(nil, nil, &transport.WriteOptions{Last: true}) // Always return nil; io.EOF is the only error that might make sense // instead, but there is no need to signal the client to call RecvMsg // as the only use left for the stream after CloseSend is to call @@ -1373,17 +1406,26 @@ func (as *addrConnStream) SendMsg(m any) (err error) { } // load hdr, payload, data - hdr, payld, _, err := prepareMsg(m, as.codec, as.cp, as.comp) + hdr, data, payload, pf, err := prepareMsg(m, as.codec, as.cp, as.comp, as.ac.dopts.copts.BufferPool) if err != nil { return err } + defer func() { + data.Free() + // only free payload if compression was made, and therefore it is a different set + // of buffers from data. + if pf.isCompressed() { + payload.Free() + } + }() + // TODO(dfawley): should we be checking len(data) instead? - if len(payld) > *as.callInfo.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize) + if payload.Len() > *as.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payload.Len(), *as.callInfo.maxSendMessageSize) } - if err := as.t.Write(as.s, hdr, payld, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { + if err := as.s.Write(hdr, payload, &transport.WriteOptions{Last: !as.desc.ClientStreams}); err != nil { if !as.desc.ClientStreams { // For non-client-streaming RPCs, we return nil instead of EOF on error // because the generated code requires it. finish is not called; RecvMsg() @@ -1393,9 +1435,6 @@ func (as *addrConnStream) SendMsg(m any) (err error) { return io.EOF } - if channelz.IsOn() { - as.t.IncrMsgSent() - } return nil } @@ -1423,8 +1462,7 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { // Only initialize this state once per stream. as.decompSet = true } - err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) - if err != nil { + if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err != nil { if err == io.EOF { if statusErr := as.s.Status().Err(); statusErr != nil { return statusErr @@ -1434,9 +1472,6 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { return toRPCErr(err) } - if channelz.IsOn() { - as.t.IncrMsgRecv() - } if as.desc.ServerStreams { // Subsequent messages should be received by subsequent RecvMsg calls. return nil @@ -1444,14 +1479,12 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) - if err == nil { - return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>")) - } - if err == io.EOF { + if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err == io.EOF { return as.s.Status().Err() // non-server streaming Recv returns nil on success + } else if err != nil { + return toRPCErr(err) } - return toRPCErr(err) + return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>")) } func (as *addrConnStream) finish(err error) { @@ -1466,7 +1499,7 @@ func (as *addrConnStream) finish(err error) { err = nil } if as.s != nil { - as.t.CloseStream(as.s, err) + as.s.Close(err) } if err != nil { @@ -1533,8 +1566,7 @@ type ServerStream interface { // serverStream implements a server side Stream. type serverStream struct { ctx context.Context - t transport.ServerTransport - s *transport.Stream + s *transport.ServerStream p *parser codec baseCodec @@ -1584,7 +1616,7 @@ func (ss *serverStream) SendHeader(md metadata.MD) error { return status.Error(codes.Internal, err.Error()) } - err = ss.t.WriteHeader(ss.s, md) + err = ss.s.SendHeader(md) if len(ss.binlogs) != 0 && !ss.serverHeaderBinlogged { h, _ := ss.s.Header() sh := &binarylog.ServerHeader{ @@ -1624,7 +1656,7 @@ func (ss *serverStream) SendMsg(m any) (err error) { } if err != nil && err != io.EOF { st, _ := status.FromError(toRPCErr(err)) - ss.t.WriteStatus(ss.s, st) + ss.s.WriteStatus(st) // Non-user specified status was sent out. This should be an error // case (as a server side Cancel maybe). // @@ -1632,9 +1664,6 @@ func (ss *serverStream) SendMsg(m any) (err error) { // status from the service handler, we will log that error instead. // This behavior is similar to an interceptor. } - if channelz.IsOn() && err == nil { - ss.t.IncrMsgSent() - } }() // Server handler could have set new compressor by calling SetSendCompressor. @@ -1645,18 +1674,31 @@ func (ss *serverStream) SendMsg(m any) (err error) { } // load hdr, payload, data - hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp) + hdr, data, payload, pf, err := prepareMsg(m, ss.codec, ss.cp, ss.comp, ss.p.bufferPool) if err != nil { return err } + defer func() { + data.Free() + // only free payload if compression was made, and therefore it is a different set + // of buffers from data. + if pf.isCompressed() { + payload.Free() + } + }() + + dataLen := data.Len() + payloadLen := payload.Len() + // TODO(dfawley): should we be checking len(data) instead? - if len(payload) > ss.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize) + if payloadLen > ss.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, ss.maxSendMessageSize) } - if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { + if err := ss.s.Write(hdr, payload, &transport.WriteOptions{Last: false}); err != nil { return toRPCErr(err) } + if len(ss.binlogs) != 0 { if !ss.serverHeaderBinlogged { h, _ := ss.s.Header() @@ -1669,7 +1711,7 @@ func (ss *serverStream) SendMsg(m any) (err error) { } } sm := &binarylog.ServerMessage{ - Message: data, + Message: data.Materialize(), } for _, binlog := range ss.binlogs { binlog.Log(ss.ctx, sm) @@ -1677,7 +1719,7 @@ func (ss *serverStream) SendMsg(m any) (err error) { } if len(ss.statsHandler) != 0 { for _, sh := range ss.statsHandler { - sh.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) + sh.HandleRPC(ss.s.Context(), outPayload(false, m, dataLen, payloadLen, time.Now())) } } return nil @@ -1699,7 +1741,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) { } if err != nil && err != io.EOF { st, _ := status.FromError(toRPCErr(err)) - ss.t.WriteStatus(ss.s, st) + ss.s.WriteStatus(st) // Non-user specified status was sent out. This should be an error // case (as a server side Cancel maybe). // @@ -1707,15 +1749,13 @@ func (ss *serverStream) RecvMsg(m any) (err error) { // status from the service handler, we will log that error instead. // This behavior is similar to an interceptor. } - if channelz.IsOn() && err == nil { - ss.t.IncrMsgRecv() - } }() var payInfo *payloadInfo if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 { payInfo = &payloadInfo{} + defer payInfo.free() } - if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil { + if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp, true); err != nil { if err == io.EOF { if len(ss.binlogs) != 0 { chc := &binarylog.ClientHalfClose{} @@ -1733,11 +1773,9 @@ func (ss *serverStream) RecvMsg(m any) (err error) { if len(ss.statsHandler) != 0 { for _, sh := range ss.statsHandler { sh.HandleRPC(ss.s.Context(), &stats.InPayload{ - RecvTime: time.Now(), - Payload: m, - // TODO truncate large payload. - Data: payInfo.uncompressedBytes, - Length: len(payInfo.uncompressedBytes), + RecvTime: time.Now(), + Payload: m, + Length: payInfo.uncompressedBytes.Len(), WireLength: payInfo.compressedLength + headerLen, CompressedLength: payInfo.compressedLength, }) @@ -1745,7 +1783,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) { } if len(ss.binlogs) != 0 { cm := &binarylog.ClientMessage{ - Message: payInfo.uncompressedBytes, + Message: payInfo.uncompressedBytes.Materialize(), } for _, binlog := range ss.binlogs { binlog.Log(ss.ctx, cm) @@ -1760,23 +1798,26 @@ func MethodFromServerStream(stream ServerStream) (string, bool) { return Method(stream.Context()) } -// prepareMsg returns the hdr, payload and data -// using the compressors passed or using the -// passed preparedmsg -func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { +// prepareMsg returns the hdr, payload and data using the compressors passed or +// using the passed preparedmsg. The returned boolean indicates whether +// compression was made and therefore whether the payload needs to be freed in +// addition to the returned data. Freeing the payload if the returned boolean is +// false can lead to undefined behavior. +func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor, pool mem.BufferPool) (hdr []byte, data, payload mem.BufferSlice, pf payloadFormat, err error) { if preparedMsg, ok := m.(*PreparedMsg); ok { - return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil + return preparedMsg.hdr, preparedMsg.encodedData, preparedMsg.payload, preparedMsg.pf, nil } // The input interface is not a prepared msg. // Marshal and Compress the data at this point data, err = encode(codec, m) if err != nil { - return nil, nil, nil, err + return nil, nil, nil, 0, err } - compData, err := compress(data, cp, comp) + compData, pf, err := compress(data, cp, comp, pool) if err != nil { - return nil, nil, nil, err + data.Free() + return nil, nil, nil, 0, err } - hdr, payload = msgHeader(data, compData) - return hdr, payload, data, nil + hdr, payload = msgHeader(data, compData, pf) + return hdr, data, payload, pf, nil } diff --git a/vendor/google.golang.org/grpc/stream_interfaces.go b/vendor/google.golang.org/grpc/stream_interfaces.go index 8b813529c0c..0037fee0bd7 100644 --- a/vendor/google.golang.org/grpc/stream_interfaces.go +++ b/vendor/google.golang.org/grpc/stream_interfaces.go @@ -22,15 +22,35 @@ package grpc // request, many responses) RPC. It is generic over the type of the response // message. It is used in generated code. type ServerStreamingClient[Res any] interface { + // Recv receives the next response message from the server. The client may + // repeatedly call Recv to read messages from the response stream. If + // io.EOF is returned, the stream has terminated with an OK status. Any + // other error is compatible with the status package and indicates the + // RPC's status code and message. Recv() (*Res, error) + + // ClientStream is embedded to provide Context, Header, and Trailer + // functionality. No other methods in the ClientStream should be called + // directly. ClientStream } // ServerStreamingServer represents the server side of a server-streaming (one // request, many responses) RPC. It is generic over the type of the response // message. It is used in generated code. +// +// To terminate the response stream, return from the handler method and return +// an error from the status package, or use nil to indicate an OK status code. type ServerStreamingServer[Res any] interface { + // Send sends a response message to the client. The server handler may + // call Send multiple times to send multiple messages to the client. An + // error is returned if the stream was terminated unexpectedly, and the + // handler method should return, as the stream is no longer usable. Send(*Res) error + + // ServerStream is embedded to provide Context, SetHeader, SendHeader, and + // SetTrailer functionality. No other methods in the ServerStream should + // be called directly. ServerStream } @@ -39,8 +59,22 @@ type ServerStreamingServer[Res any] interface { // message stream and the type of the unary response message. It is used in // generated code. type ClientStreamingClient[Req any, Res any] interface { + // Send sends a request message to the server. The client may call Send + // multiple times to send multiple messages to the server. On error, Send + // aborts the stream. If the error was generated by the client, the status + // is returned directly. Otherwise, io.EOF is returned, and the status of + // the stream may be discovered using CloseAndRecv(). Send(*Req) error + + // CloseAndRecv closes the request stream and waits for the server's + // response. This method must be called once and only once after sending + // all request messages. Any error returned is implemented by the status + // package. CloseAndRecv() (*Res, error) + + // ClientStream is embedded to provide Context, Header, and Trailer + // functionality. No other methods in the ClientStream should be called + // directly. ClientStream } @@ -48,9 +82,28 @@ type ClientStreamingClient[Req any, Res any] interface { // requests, one response) RPC. It is generic over both the type of the request // message stream and the type of the unary response message. It is used in // generated code. +// +// To terminate the RPC, call SendAndClose and return nil from the method +// handler or do not call SendAndClose and return an error from the status +// package. type ClientStreamingServer[Req any, Res any] interface { + // Recv receives the next request message from the client. The server may + // repeatedly call Recv to read messages from the request stream. If + // io.EOF is returned, it indicates the client called CloseAndRecv on its + // ClientStreamingClient. Any other error indicates the stream was + // terminated unexpectedly, and the handler method should return, as the + // stream is no longer usable. Recv() (*Req, error) + + // SendAndClose sends a single response message to the client and closes + // the stream. This method must be called once and only once after all + // request messages have been processed. Recv should not be called after + // calling SendAndClose. SendAndClose(*Res) error + + // ServerStream is embedded to provide Context, SetHeader, SendHeader, and + // SetTrailer functionality. No other methods in the ServerStream should + // be called directly. ServerStream } @@ -59,8 +112,23 @@ type ClientStreamingServer[Req any, Res any] interface { // request message stream and the type of the response message stream. It is // used in generated code. type BidiStreamingClient[Req any, Res any] interface { + // Send sends a request message to the server. The client may call Send + // multiple times to send multiple messages to the server. On error, Send + // aborts the stream. If the error was generated by the client, the status + // is returned directly. Otherwise, io.EOF is returned, and the status of + // the stream may be discovered using Recv(). Send(*Req) error + + // Recv receives the next response message from the server. The client may + // repeatedly call Recv to read messages from the response stream. If + // io.EOF is returned, the stream has terminated with an OK status. Any + // other error is compatible with the status package and indicates the + // RPC's status code and message. Recv() (*Res, error) + + // ClientStream is embedded to provide Context, Header, Trailer, and + // CloseSend functionality. No other methods in the ClientStream should be + // called directly. ClientStream } @@ -68,9 +136,27 @@ type BidiStreamingClient[Req any, Res any] interface { // (many requests, many responses) RPC. It is generic over both the type of the // request message stream and the type of the response message stream. It is // used in generated code. +// +// To terminate the stream, return from the handler method and return +// an error from the status package, or use nil to indicate an OK status code. type BidiStreamingServer[Req any, Res any] interface { + // Recv receives the next request message from the client. The server may + // repeatedly call Recv to read messages from the request stream. If + // io.EOF is returned, it indicates the client called CloseSend on its + // BidiStreamingClient. Any other error indicates the stream was + // terminated unexpectedly, and the handler method should return, as the + // stream is no longer usable. Recv() (*Req, error) + + // Send sends a response message to the client. The server handler may + // call Send multiple times to send multiple messages to the client. An + // error is returned if the stream was terminated unexpectedly, and the + // handler method should return, as the stream is no longer usable. Send(*Res) error + + // ServerStream is embedded to provide Context, SetHeader, SendHeader, and + // SetTrailer functionality. No other methods in the ServerStream should + // be called directly. ServerStream } diff --git a/vendor/google.golang.org/grpc/test/bufconn/bufconn.go b/vendor/google.golang.org/grpc/test/bufconn/bufconn.go index 3f77f4876eb..e6eb4feebb9 100644 --- a/vendor/google.golang.org/grpc/test/bufconn/bufconn.go +++ b/vendor/google.golang.org/grpc/test/bufconn/bufconn.go @@ -109,7 +109,7 @@ type pipe struct { mu sync.Mutex // buf contains the data in the pipe. It is a ring buffer of fixed capacity, - // with r and w pointing to the offset to read and write, respsectively. + // with r and w pointing to the offset to read and write, respectively. // // Data is read between [r, w) and written to [w, r), wrapping around the end // of the slice if necessary. diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index bafaef99be9..d2bba7f3d9e 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.65.0" +const Version = "1.69.4" diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go index bb2966e3b4c..cffdfda9619 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go @@ -192,7 +192,7 @@ func (d decoder) unmarshalMessage(m protoreflect.Message, skipTypeURL bool) erro fd = fieldDescs.ByTextName(name) } } - if flags.ProtoLegacy { + if flags.ProtoLegacyWeak { if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() { fd = nil // reset since the weak reference is not linked in } @@ -351,7 +351,7 @@ func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect. panic(fmt.Sprintf("unmarshalScalar: invalid scalar kind %v", kind)) } - return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString()) + return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v field %v: %v", kind, fd.JSONName(), tok.RawString()) } func unmarshalInt(tok json.Token, bitSize int) (protoreflect.Value, bool) { diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go index 29846df222c..0e72d85378b 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go @@ -216,9 +216,7 @@ func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, proto } v := m.Get(fd) - isProto2Scalar := fd.Syntax() == protoreflect.Proto2 && fd.Default().IsValid() - isSingularMessage := fd.Cardinality() != protoreflect.Repeated && fd.Message() != nil - if isProto2Scalar || isSingularMessage { + if fd.HasPresence() { if m.skipNull { continue } diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go index 4b177c8206f..e9fe1039437 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go @@ -348,7 +348,11 @@ func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m protoreflect.Messa switch tok.Kind() { case json.ObjectClose: if !found { - return d.newError(tok.Pos(), `missing "value" field`) + // We tolerate an omitted `value` field with the google.protobuf.Empty Well-Known-Type, + // for compatibility with other proto runtimes that have interpreted the spec differently. + if m.Descriptor().FullName() != genid.Empty_message_fullname { + return d.newError(tok.Pos(), `missing "value" field`) + } } return nil diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go index 24bc98ac422..d972a3d98ed 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -185,7 +185,7 @@ func (d decoder) unmarshalMessage(m protoreflect.Message, checkDelims bool) erro } else if xtErr != nil && xtErr != protoregistry.NotFound { return d.newError(tok.Pos(), "unable to resolve [%s]: %v", tok.RawString(), xtErr) } - if flags.ProtoLegacy { + if flags.ProtoLegacyWeak { if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() { fd = nil // reset since the weak reference is not linked in } diff --git a/vendor/google.golang.org/protobuf/internal/descopts/options.go b/vendor/google.golang.org/protobuf/internal/descopts/options.go index 8401be8c84f..024ffebd3dd 100644 --- a/vendor/google.golang.org/protobuf/internal/descopts/options.go +++ b/vendor/google.golang.org/protobuf/internal/descopts/options.go @@ -9,7 +9,7 @@ // dependency on the descriptor proto package). package descopts -import pref "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" // These variables are set by the init function in descriptor.pb.go via logic // in internal/filetype. In other words, so long as the descriptor proto package @@ -17,13 +17,13 @@ import pref "google.golang.org/protobuf/reflect/protoreflect" // // Each variable is populated with a nil pointer to the options struct. var ( - File pref.ProtoMessage - Enum pref.ProtoMessage - EnumValue pref.ProtoMessage - Message pref.ProtoMessage - Field pref.ProtoMessage - Oneof pref.ProtoMessage - ExtensionRange pref.ProtoMessage - Service pref.ProtoMessage - Method pref.ProtoMessage + File protoreflect.ProtoMessage + Enum protoreflect.ProtoMessage + EnumValue protoreflect.ProtoMessage + Message protoreflect.ProtoMessage + Field protoreflect.ProtoMessage + Oneof protoreflect.ProtoMessage + ExtensionRange protoreflect.ProtoMessage + Service protoreflect.ProtoMessage + Method protoreflect.ProtoMessage ) diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb index ff6a38360ad..5a57ef6f3c8 100644 Binary files a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb and b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb differ diff --git a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go index 029a6a12d74..bf1aba0e851 100644 --- a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go +++ b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go @@ -5,9 +5,14 @@ // Package editionssupport defines constants for editions that are supported. package editionssupport -import descriptorpb "google.golang.org/protobuf/types/descriptorpb" +import "google.golang.org/protobuf/types/descriptorpb" const ( Minimum = descriptorpb.Edition_EDITION_PROTO2 Maximum = descriptorpb.Edition_EDITION_2023 + + // MaximumKnown is the maximum edition that is known to Go Protobuf, but not + // declared as supported. In other words: end users cannot use it, but + // testprotos inside Go Protobuf can. + MaximumKnown = descriptorpb.Edition_EDITION_2024 ) diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go112.go b/vendor/google.golang.org/protobuf/internal/errors/is_go112.go deleted file mode 100644 index fbcd349207d..00000000000 --- a/vendor/google.golang.org/protobuf/internal/errors/is_go112.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.13 -// +build !go1.13 - -package errors - -import "reflect" - -// Is is a copy of Go 1.13's errors.Is for use with older Go versions. -func Is(err, target error) bool { - if target == nil { - return err == target - } - - isComparable := reflect.TypeOf(target).Comparable() - for { - if isComparable && err == target { - return true - } - if x, ok := err.(interface{ Is(error) bool }); ok && x.Is(target) { - return true - } - if err = unwrap(err); err == nil { - return false - } - } -} - -func unwrap(err error) error { - u, ok := err.(interface { - Unwrap() error - }) - if !ok { - return nil - } - return u.Unwrap() -} diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go113.go b/vendor/google.golang.org/protobuf/internal/errors/is_go113.go deleted file mode 100644 index 5e72f1cde9e..00000000000 --- a/vendor/google.golang.org/protobuf/internal/errors/is_go113.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.13 -// +build go1.13 - -package errors - -import "errors" - -// Is is errors.Is. -func Is(err, target error) bool { return errors.Is(err, target) } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index df53ff40b25..378b826faa6 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -32,6 +32,7 @@ const ( EditionProto2 Edition = 998 EditionProto3 Edition = 999 Edition2023 Edition = 1000 + Edition2024 Edition = 1001 EditionUnsupported Edition = 100000 ) @@ -77,31 +78,48 @@ type ( Locations SourceLocations } + // EditionFeatures is a frequently-instantiated struct, so please take care + // to minimize padding when adding new fields to this struct (add them in + // the right place/order). EditionFeatures struct { + // StripEnumPrefix determines if the plugin generates enum value + // constants as-is, with their prefix stripped, or both variants. + StripEnumPrefix int + // IsFieldPresence is true if field_presence is EXPLICIT // https://protobuf.dev/editions/features/#field_presence IsFieldPresence bool + // IsFieldPresence is true if field_presence is LEGACY_REQUIRED // https://protobuf.dev/editions/features/#field_presence IsLegacyRequired bool + // IsOpenEnum is true if enum_type is OPEN // https://protobuf.dev/editions/features/#enum_type IsOpenEnum bool + // IsPacked is true if repeated_field_encoding is PACKED // https://protobuf.dev/editions/features/#repeated_field_encoding IsPacked bool + // IsUTF8Validated is true if utf_validation is VERIFY // https://protobuf.dev/editions/features/#utf8_validation IsUTF8Validated bool + // IsDelimitedEncoded is true if message_encoding is DELIMITED // https://protobuf.dev/editions/features/#message_encoding IsDelimitedEncoded bool + // IsJSONCompliant is true if json_format is ALLOW // https://protobuf.dev/editions/features/#json_format IsJSONCompliant bool + // GenerateLegacyUnmarshalJSON determines if the plugin generates the // UnmarshalJSON([]byte) error method for enums. GenerateLegacyUnmarshalJSON bool + // APILevel controls which API (Open, Hybrid or Opaque) should be used + // for generated code (.pb.go files). + APILevel int } ) @@ -258,6 +276,7 @@ type ( StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto IsWeak bool // promoted from google.protobuf.FieldOptions + IsLazy bool // promoted from google.protobuf.FieldOptions Default defaultValue ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields Enum protoreflect.EnumDescriptor @@ -351,6 +370,7 @@ func (fd *Field) IsPacked() bool { } func (fd *Field) IsExtension() bool { return false } func (fd *Field) IsWeak() bool { return fd.L1.IsWeak } +func (fd *Field) IsLazy() bool { return fd.L1.IsLazy } func (fd *Field) IsList() bool { return fd.Cardinality() == protoreflect.Repeated && !fd.IsMap() } func (fd *Field) IsMap() bool { return fd.Message() != nil && fd.Message().IsMapEntry() } func (fd *Field) MapKey() protoreflect.FieldDescriptor { @@ -425,6 +445,7 @@ type ( Extendee protoreflect.MessageDescriptor Cardinality protoreflect.Cardinality Kind protoreflect.Kind + IsLazy bool EditionFeatures EditionFeatures } ExtensionL2 struct { @@ -465,6 +486,7 @@ func (xd *Extension) IsPacked() bool { } func (xd *Extension) IsExtension() bool { return true } func (xd *Extension) IsWeak() bool { return false } +func (xd *Extension) IsLazy() bool { return xd.L1.IsLazy } func (xd *Extension) IsList() bool { return xd.Cardinality() == protoreflect.Repeated } func (xd *Extension) IsMap() bool { return false } func (xd *Extension) MapKey() protoreflect.FieldDescriptor { return nil } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go index 8a57d60b08c..d2f549497eb 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -495,6 +495,8 @@ func (xd *Extension) unmarshalOptions(b []byte) { switch num { case genid.FieldOptions_Packed_field_number: xd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) + case genid.FieldOptions_Lazy_field_number: + xd.L1.IsLazy = protowire.DecodeBool(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index e56c91a8dbe..67a51b327c5 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -504,6 +504,8 @@ func (fd *Field) unmarshalOptions(b []byte) { fd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) case genid.FieldOptions_Weak_field_number: fd.L1.IsWeak = protowire.DecodeBool(v) + case genid.FieldOptions_Lazy_field_number: + fd.L1.IsLazy = protowire.DecodeBool(v) case FieldOptions_EnforceUTF8: fd.L1.EditionFeatures.IsUTF8Validated = protowire.DecodeBool(v) } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go index 11f5f356b66..10132c9b384 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go @@ -32,6 +32,14 @@ func unmarshalGoFeature(b []byte, parent EditionFeatures) EditionFeatures { v, m := protowire.ConsumeVarint(b) b = b[m:] parent.GenerateLegacyUnmarshalJSON = protowire.DecodeBool(v) + case genid.GoFeatures_ApiLevel_field_number: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + parent.APILevel = int(v) + case genid.GoFeatures_StripEnumPrefix_field_number: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + parent.StripEnumPrefix = int(v) default: panic(fmt.Sprintf("unkown field number %d while unmarshalling GoFeatures", num)) } @@ -68,7 +76,7 @@ func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures { v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number: + case genid.FeatureSet_Go_ext_number: parent = unmarshalGoFeature(v, parent) } } diff --git a/vendor/google.golang.org/protobuf/internal/flags/flags.go b/vendor/google.golang.org/protobuf/internal/flags/flags.go index 58372dd3485..5cb3ee70f91 100644 --- a/vendor/google.golang.org/protobuf/internal/flags/flags.go +++ b/vendor/google.golang.org/protobuf/internal/flags/flags.go @@ -22,3 +22,8 @@ const ProtoLegacy = protoLegacy // extension fields at unmarshal time, but defers creating the message // structure until the extension is first accessed. const LazyUnmarshalExtensions = ProtoLegacy + +// ProtoLegacyWeak specifies whether to enable support for weak fields. +// This flag was split out of ProtoLegacy in preparation for removing +// support for weak fields (independent of the other protolegacy features). +const ProtoLegacyWeak = ProtoLegacy diff --git a/vendor/google.golang.org/protobuf/internal/genid/doc.go b/vendor/google.golang.org/protobuf/internal/genid/doc.go index 45ccd01211c..d9b9d916a20 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/doc.go +++ b/vendor/google.golang.org/protobuf/internal/genid/doc.go @@ -6,6 +6,6 @@ // and the well-known types. package genid -import protoreflect "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" const GoogleProtobuf_package protoreflect.FullName = "google.protobuf" diff --git a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go index 9a652a2b424..f5ee7f5c2b7 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go @@ -12,20 +12,59 @@ import ( const File_google_protobuf_go_features_proto = "google/protobuf/go_features.proto" -// Names for google.protobuf.GoFeatures. +// Names for pb.GoFeatures. const ( GoFeatures_message_name protoreflect.Name = "GoFeatures" - GoFeatures_message_fullname protoreflect.FullName = "google.protobuf.GoFeatures" + GoFeatures_message_fullname protoreflect.FullName = "pb.GoFeatures" ) -// Field names for google.protobuf.GoFeatures. +// Field names for pb.GoFeatures. const ( GoFeatures_LegacyUnmarshalJsonEnum_field_name protoreflect.Name = "legacy_unmarshal_json_enum" + GoFeatures_ApiLevel_field_name protoreflect.Name = "api_level" + GoFeatures_StripEnumPrefix_field_name protoreflect.Name = "strip_enum_prefix" - GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "google.protobuf.GoFeatures.legacy_unmarshal_json_enum" + GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "pb.GoFeatures.legacy_unmarshal_json_enum" + GoFeatures_ApiLevel_field_fullname protoreflect.FullName = "pb.GoFeatures.api_level" + GoFeatures_StripEnumPrefix_field_fullname protoreflect.FullName = "pb.GoFeatures.strip_enum_prefix" ) -// Field numbers for google.protobuf.GoFeatures. +// Field numbers for pb.GoFeatures. const ( GoFeatures_LegacyUnmarshalJsonEnum_field_number protoreflect.FieldNumber = 1 + GoFeatures_ApiLevel_field_number protoreflect.FieldNumber = 2 + GoFeatures_StripEnumPrefix_field_number protoreflect.FieldNumber = 3 +) + +// Full and short names for pb.GoFeatures.APILevel. +const ( + GoFeatures_APILevel_enum_fullname = "pb.GoFeatures.APILevel" + GoFeatures_APILevel_enum_name = "APILevel" +) + +// Enum values for pb.GoFeatures.APILevel. +const ( + GoFeatures_API_LEVEL_UNSPECIFIED_enum_value = 0 + GoFeatures_API_OPEN_enum_value = 1 + GoFeatures_API_HYBRID_enum_value = 2 + GoFeatures_API_OPAQUE_enum_value = 3 +) + +// Full and short names for pb.GoFeatures.StripEnumPrefix. +const ( + GoFeatures_StripEnumPrefix_enum_fullname = "pb.GoFeatures.StripEnumPrefix" + GoFeatures_StripEnumPrefix_enum_name = "StripEnumPrefix" +) + +// Enum values for pb.GoFeatures.StripEnumPrefix. +const ( + GoFeatures_STRIP_ENUM_PREFIX_UNSPECIFIED_enum_value = 0 + GoFeatures_STRIP_ENUM_PREFIX_KEEP_enum_value = 1 + GoFeatures_STRIP_ENUM_PREFIX_GENERATE_BOTH_enum_value = 2 + GoFeatures_STRIP_ENUM_PREFIX_STRIP_enum_value = 3 +) + +// Extension numbers +const ( + FeatureSet_Go_ext_number protoreflect.FieldNumber = 1002 ) diff --git a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go index 8f9ea02ff2a..bef5a25fbbf 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go +++ b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go @@ -4,7 +4,7 @@ package genid -import protoreflect "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" // Generic field names and numbers for synthetic map entry messages. const ( diff --git a/vendor/google.golang.org/protobuf/internal/genid/name.go b/vendor/google.golang.org/protobuf/internal/genid/name.go new file mode 100644 index 00000000000..224f339302f --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/name.go @@ -0,0 +1,12 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package genid + +const ( + NoUnkeyedLiteral_goname = "noUnkeyedLiteral" + NoUnkeyedLiteralA_goname = "XXX_NoUnkeyedLiteral" + + BuilderSuffix_goname = "_builder" +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go index 429384b85b0..9404270de0b 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go +++ b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go @@ -4,7 +4,7 @@ package genid -import protoreflect "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" // Generic field name and number for messages in wrappers.proto. const ( diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/api_export_opaque.go new file mode 100644 index 00000000000..6075d6f6967 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/api_export_opaque.go @@ -0,0 +1,128 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "strconv" + "sync/atomic" + "unsafe" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +func (Export) UnmarshalField(msg any, fieldNum int32) { + UnmarshalField(msg.(protoreflect.ProtoMessage).ProtoReflect(), protoreflect.FieldNumber(fieldNum)) +} + +// Present checks the presence set for a certain field number (zero +// based, ordered by appearance in original proto file). part is +// a pointer to the correct element in the bitmask array, num is the +// field number unaltered. Example (field number 70 -> part = +// &m.XXX_presence[1], num = 70) +func (Export) Present(part *uint32, num uint32) bool { + // This hook will read an unprotected shadow presence set if + // we're unning under the race detector + raceDetectHookPresent(part, num) + return atomic.LoadUint32(part)&(1<<(num%32)) > 0 +} + +// SetPresent adds a field to the presence set. part is a pointer to +// the relevant element in the array and num is the field number +// unaltered. size is the number of fields in the protocol +// buffer. +func (Export) SetPresent(part *uint32, num uint32, size uint32) { + // This hook will mutate an unprotected shadow presence set if + // we're running under the race detector + raceDetectHookSetPresent(part, num, presenceSize(size)) + for { + old := atomic.LoadUint32(part) + if atomic.CompareAndSwapUint32(part, old, old|(1<<(num%32))) { + return + } + } +} + +// SetPresentNonAtomic is like SetPresent, but operates non-atomically. +// It is meant for use by builder methods, where the message is known not +// to be accessible yet by other goroutines. +func (Export) SetPresentNonAtomic(part *uint32, num uint32, size uint32) { + // This hook will mutate an unprotected shadow presence set if + // we're running under the race detector + raceDetectHookSetPresent(part, num, presenceSize(size)) + *part |= 1 << (num % 32) +} + +// ClearPresence removes a field from the presence set. part is a +// pointer to the relevant element in the presence array and num is +// the field number unaltered. +func (Export) ClearPresent(part *uint32, num uint32) { + // This hook will mutate an unprotected shadow presence set if + // we're running under the race detector + raceDetectHookClearPresent(part, num) + for { + old := atomic.LoadUint32(part) + if atomic.CompareAndSwapUint32(part, old, old&^(1<<(num%32))) { + return + } + } +} + +// interfaceToPointer takes a pointer to an empty interface whose value is a +// pointer type, and converts it into a "pointer" that points to the same +// target +func interfaceToPointer(i *any) pointer { + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} +} + +func (p pointer) atomicGetPointer() pointer { + return pointer{p: atomic.LoadPointer((*unsafe.Pointer)(p.p))} +} + +func (p pointer) atomicSetPointer(q pointer) { + atomic.StorePointer((*unsafe.Pointer)(p.p), q.p) +} + +// AtomicCheckPointerIsNil takes an interface (which is a pointer to a +// pointer) and returns true if the pointed-to pointer is nil (using an +// atomic load). This function is inlineable and, on x86, just becomes a +// simple load and compare. +func (Export) AtomicCheckPointerIsNil(ptr any) bool { + return interfaceToPointer(&ptr).atomicGetPointer().IsNil() +} + +// AtomicSetPointer takes two interfaces (first is a pointer to a pointer, +// second is a pointer) and atomically sets the second pointer into location +// referenced by first pointer. Unfortunately, atomicSetPointer() does not inline +// (even on x86), so this does not become a simple store on x86. +func (Export) AtomicSetPointer(dstPtr, valPtr any) { + interfaceToPointer(&dstPtr).atomicSetPointer(interfaceToPointer(&valPtr)) +} + +// AtomicLoadPointer loads the pointer at the location pointed at by src, +// and stores that pointer value into the location pointed at by dst. +func (Export) AtomicLoadPointer(ptr Pointer, dst Pointer) { + *(*unsafe.Pointer)(unsafe.Pointer(dst)) = atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(ptr))) +} + +// AtomicInitializePointer makes ptr and dst point to the same value. +// +// If *ptr is a nil pointer, it sets *ptr = *dst. +// +// If *ptr is a non-nil pointer, it sets *dst = *ptr. +func (Export) AtomicInitializePointer(ptr Pointer, dst Pointer) { + if !atomic.CompareAndSwapPointer((*unsafe.Pointer)(ptr), unsafe.Pointer(nil), *(*unsafe.Pointer)(dst)) { + *(*unsafe.Pointer)(unsafe.Pointer(dst)) = atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(ptr))) + } +} + +// MessageFieldStringOf returns the field formatted as a string, +// either as the field name if resolvable otherwise as a decimal string. +func (Export) MessageFieldStringOf(md protoreflect.MessageDescriptor, n protoreflect.FieldNumber) string { + fd := md.Fields().ByNumber(n) + if fd != nil { + return string(fd.Name()) + } + return strconv.Itoa(int(n)) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/bitmap.go b/vendor/google.golang.org/protobuf/internal/impl/bitmap.go new file mode 100644 index 00000000000..ea276547cd6 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/bitmap.go @@ -0,0 +1,34 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !race + +package impl + +// There is no additional data as we're not running under race detector. +type RaceDetectHookData struct{} + +// Empty stubs for when not using the race detector. Calls to these from index.go should be optimized away. +func (presence) raceDetectHookPresent(num uint32) {} +func (presence) raceDetectHookSetPresent(num uint32, size presenceSize) {} +func (presence) raceDetectHookClearPresent(num uint32) {} +func (presence) raceDetectHookAllocAndCopy(src presence) {} + +// raceDetectHookPresent is called by the generated file interface +// (*proto.internalFuncs) Present to optionally read an unprotected +// shadow bitmap when race detection is enabled. In regular code it is +// a noop. +func raceDetectHookPresent(field *uint32, num uint32) {} + +// raceDetectHookSetPresent is called by the generated file interface +// (*proto.internalFuncs) SetPresent to optionally write an unprotected +// shadow bitmap when race detection is enabled. In regular code it is +// a noop. +func raceDetectHookSetPresent(field *uint32, num uint32, size presenceSize) {} + +// raceDetectHookClearPresent is called by the generated file interface +// (*proto.internalFuncs) ClearPresent to optionally write an unprotected +// shadow bitmap when race detection is enabled. In regular code it is +// a noop. +func raceDetectHookClearPresent(field *uint32, num uint32) {} diff --git a/vendor/google.golang.org/protobuf/internal/impl/bitmap_race.go b/vendor/google.golang.org/protobuf/internal/impl/bitmap_race.go new file mode 100644 index 00000000000..e9a27583aeb --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/bitmap_race.go @@ -0,0 +1,126 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build race + +package impl + +// When running under race detector, we add a presence map of bytes, that we can access +// in the hook functions so that we trigger the race detection whenever we have concurrent +// Read-Writes or Write-Writes. The race detector does not otherwise detect invalid concurrent +// access to lazy fields as all updates of bitmaps and pointers are done using atomic operations. +type RaceDetectHookData struct { + shadowPresence *[]byte +} + +// Hooks for presence bitmap operations that allocate, read and write the shadowPresence +// using non-atomic operations. +func (data *RaceDetectHookData) raceDetectHookAlloc(size presenceSize) { + sp := make([]byte, size) + atomicStoreShadowPresence(&data.shadowPresence, &sp) +} + +func (p presence) raceDetectHookPresent(num uint32) { + data := p.toRaceDetectData() + if data == nil { + return + } + sp := atomicLoadShadowPresence(&data.shadowPresence) + if sp != nil { + _ = (*sp)[num] + } +} + +func (p presence) raceDetectHookSetPresent(num uint32, size presenceSize) { + data := p.toRaceDetectData() + if data == nil { + return + } + sp := atomicLoadShadowPresence(&data.shadowPresence) + if sp == nil { + data.raceDetectHookAlloc(size) + sp = atomicLoadShadowPresence(&data.shadowPresence) + } + (*sp)[num] = 1 +} + +func (p presence) raceDetectHookClearPresent(num uint32) { + data := p.toRaceDetectData() + if data == nil { + return + } + sp := atomicLoadShadowPresence(&data.shadowPresence) + if sp != nil { + (*sp)[num] = 0 + + } +} + +// raceDetectHookAllocAndCopy allocates a new shadowPresence slice at lazy and copies +// shadowPresence bytes from src to lazy. +func (p presence) raceDetectHookAllocAndCopy(q presence) { + sData := q.toRaceDetectData() + dData := p.toRaceDetectData() + if sData == nil { + return + } + srcSp := atomicLoadShadowPresence(&sData.shadowPresence) + if srcSp == nil { + atomicStoreShadowPresence(&dData.shadowPresence, nil) + return + } + n := len(*srcSp) + dSlice := make([]byte, n) + atomicStoreShadowPresence(&dData.shadowPresence, &dSlice) + for i := 0; i < n; i++ { + dSlice[i] = (*srcSp)[i] + } +} + +// raceDetectHookPresent is called by the generated file interface +// (*proto.internalFuncs) Present to optionally read an unprotected +// shadow bitmap when race detection is enabled. In regular code it is +// a noop. +func raceDetectHookPresent(field *uint32, num uint32) { + data := findPointerToRaceDetectData(field, num) + if data == nil { + return + } + sp := atomicLoadShadowPresence(&data.shadowPresence) + if sp != nil { + _ = (*sp)[num] + } +} + +// raceDetectHookSetPresent is called by the generated file interface +// (*proto.internalFuncs) SetPresent to optionally write an unprotected +// shadow bitmap when race detection is enabled. In regular code it is +// a noop. +func raceDetectHookSetPresent(field *uint32, num uint32, size presenceSize) { + data := findPointerToRaceDetectData(field, num) + if data == nil { + return + } + sp := atomicLoadShadowPresence(&data.shadowPresence) + if sp == nil { + data.raceDetectHookAlloc(size) + sp = atomicLoadShadowPresence(&data.shadowPresence) + } + (*sp)[num] = 1 +} + +// raceDetectHookClearPresent is called by the generated file interface +// (*proto.internalFuncs) ClearPresent to optionally write an unprotected +// shadow bitmap when race detection is enabled. In regular code it is +// a noop. +func raceDetectHookClearPresent(field *uint32, num uint32) { + data := findPointerToRaceDetectData(field, num) + if data == nil { + return + } + sp := atomicLoadShadowPresence(&data.shadowPresence) + if sp != nil { + (*sp)[num] = 0 + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go index f29e6a8fa88..fe2c719ce40 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go +++ b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go @@ -35,6 +35,12 @@ func (mi *MessageInfo) checkInitializedPointer(p pointer) error { } return nil } + + var presence presence + if mi.presenceOffset.IsValid() { + presence = p.Apply(mi.presenceOffset).PresenceInfo() + } + if mi.extensionOffset.IsValid() { e := p.Apply(mi.extensionOffset).Extensions() if err := mi.isInitExtensions(e); err != nil { @@ -45,6 +51,33 @@ func (mi *MessageInfo) checkInitializedPointer(p pointer) error { if !f.isRequired && f.funcs.isInit == nil { continue } + + if f.presenceIndex != noPresence { + if !presence.Present(f.presenceIndex) { + if f.isRequired { + return errors.RequiredNotSet(string(mi.Desc.Fields().ByNumber(f.num).FullName())) + } + continue + } + if f.funcs.isInit != nil { + f.mi.init() + if f.mi.needsInitCheck { + if f.isLazy && p.Apply(f.offset).AtomicGetPointer().IsNil() { + lazy := *p.Apply(mi.lazyOffset).LazyInfoPtr() + if !lazy.AllowedPartial() { + // Nothing to see here, it was checked on unmarshal + continue + } + mi.lazyUnmarshal(p, f.num) + } + if err := f.funcs.isInit(p.Apply(f.offset), f); err != nil { + return err + } + } + } + continue + } + fptr := p.Apply(f.offset) if f.isPointer && fptr.Elem().IsNil() { if f.isRequired { diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go index 4bb0a7a20ce..0d5b546e0ee 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go @@ -67,7 +67,6 @@ type lazyExtensionValue struct { xi *extensionFieldInfo value protoreflect.Value b []byte - fn func() protoreflect.Value } type ExtensionField struct { @@ -158,10 +157,9 @@ func (f *ExtensionField) lazyInit() { } f.lazy.value = val } else { - f.lazy.value = f.lazy.fn() + panic("No support for lazy fns for ExtensionField") } f.lazy.xi = nil - f.lazy.fn = nil f.lazy.b = nil atomic.StoreUint32(&f.lazy.atomicOnce, 1) } @@ -174,13 +172,6 @@ func (f *ExtensionField) Set(t protoreflect.ExtensionType, v protoreflect.Value) f.lazy = nil } -// SetLazy sets the type and a value that is to be lazily evaluated upon first use. -// This must not be called concurrently. -func (f *ExtensionField) SetLazy(t protoreflect.ExtensionType, fn func() protoreflect.Value) { - f.typ = t - f.lazy = &lazyExtensionValue{fn: fn} -} - // Value returns the value of the extension field. // This may be called concurrently. func (f *ExtensionField) Value() protoreflect.Value { diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go index 78ee47e44b9..7c1f66c8c19 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go @@ -65,6 +65,9 @@ func (mi *MessageInfo) initOneofFieldCoders(od protoreflect.OneofDescriptor, si if err != nil { return out, err } + if cf.funcs.isInit == nil { + out.initialized = true + } vi.Set(vw) return out, nil } diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field_opaque.go new file mode 100644 index 00000000000..76818ea252f --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field_opaque.go @@ -0,0 +1,264 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/reflect/protoreflect" +) + +func makeOpaqueMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) { + mi := getMessageInfo(ft) + if mi == nil { + panic(fmt.Sprintf("invalid field: %v: unsupported message type %v", fd.FullName(), ft)) + } + switch fd.Kind() { + case protoreflect.MessageKind: + return mi, pointerCoderFuncs{ + size: sizeOpaqueMessage, + marshal: appendOpaqueMessage, + unmarshal: consumeOpaqueMessage, + isInit: isInitOpaqueMessage, + merge: mergeOpaqueMessage, + } + case protoreflect.GroupKind: + return mi, pointerCoderFuncs{ + size: sizeOpaqueGroup, + marshal: appendOpaqueGroup, + unmarshal: consumeOpaqueGroup, + isInit: isInitOpaqueMessage, + merge: mergeOpaqueMessage, + } + } + panic("unexpected field kind") +} + +func sizeOpaqueMessage(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + return protowire.SizeBytes(f.mi.sizePointer(p.AtomicGetPointer(), opts)) + f.tagsize +} + +func appendOpaqueMessage(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + mp := p.AtomicGetPointer() + calculatedSize := f.mi.sizePointer(mp, opts) + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(calculatedSize)) + before := len(b) + b, err := f.mi.marshalAppendPointer(b, mp, opts) + if measuredSize := len(b) - before; calculatedSize != measuredSize && err == nil { + return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize) + } + return b, err +} + +func consumeOpaqueMessage(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + mp := p.AtomicGetPointer() + if mp.IsNil() { + mp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))) + } + o, err := f.mi.unmarshalPointer(v, mp, 0, opts) + if err != nil { + return out, err + } + out.n = n + out.initialized = o.initialized + return out, nil +} + +func isInitOpaqueMessage(p pointer, f *coderFieldInfo) error { + mp := p.AtomicGetPointer() + if mp.IsNil() { + return nil + } + return f.mi.checkInitializedPointer(mp) +} + +func mergeOpaqueMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + dstmp := dst.AtomicGetPointer() + if dstmp.IsNil() { + dstmp = dst.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))) + } + f.mi.mergePointer(dstmp, src.AtomicGetPointer(), opts) +} + +func sizeOpaqueGroup(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + return 2*f.tagsize + f.mi.sizePointer(p.AtomicGetPointer(), opts) +} + +func appendOpaqueGroup(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, f.wiretag) // start group + b, err := f.mi.marshalAppendPointer(b, p.AtomicGetPointer(), opts) + b = protowire.AppendVarint(b, f.wiretag+1) // end group + return b, err +} + +func consumeOpaqueGroup(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.StartGroupType { + return out, errUnknown + } + mp := p.AtomicGetPointer() + if mp.IsNil() { + mp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))) + } + o, e := f.mi.unmarshalPointer(b, mp, f.num, opts) + return o, e +} + +func makeOpaqueRepeatedMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) { + if ft.Kind() != reflect.Ptr || ft.Elem().Kind() != reflect.Slice { + panic(fmt.Sprintf("invalid field: %v: unsupported type for opaque repeated message: %v", fd.FullName(), ft)) + } + mt := ft.Elem().Elem() // *[]*T -> *T + mi := getMessageInfo(mt) + if mi == nil { + panic(fmt.Sprintf("invalid field: %v: unsupported message type %v", fd.FullName(), mt)) + } + switch fd.Kind() { + case protoreflect.MessageKind: + return mi, pointerCoderFuncs{ + size: sizeOpaqueMessageSlice, + marshal: appendOpaqueMessageSlice, + unmarshal: consumeOpaqueMessageSlice, + isInit: isInitOpaqueMessageSlice, + merge: mergeOpaqueMessageSlice, + } + case protoreflect.GroupKind: + return mi, pointerCoderFuncs{ + size: sizeOpaqueGroupSlice, + marshal: appendOpaqueGroupSlice, + unmarshal: consumeOpaqueGroupSlice, + isInit: isInitOpaqueMessageSlice, + merge: mergeOpaqueMessageSlice, + } + } + panic("unexpected field kind") +} + +func sizeOpaqueMessageSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := p.AtomicGetPointer().PointerSlice() + n := 0 + for _, v := range s { + n += protowire.SizeBytes(f.mi.sizePointer(v, opts)) + f.tagsize + } + return n +} + +func appendOpaqueMessageSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := p.AtomicGetPointer().PointerSlice() + var err error + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + siz := f.mi.sizePointer(v, opts) + b = protowire.AppendVarint(b, uint64(siz)) + before := len(b) + b, err = f.mi.marshalAppendPointer(b, v, opts) + if err != nil { + return b, err + } + if measuredSize := len(b) - before; siz != measuredSize { + return nil, errors.MismatchedSizeCalculation(siz, measuredSize) + } + } + return b, nil +} + +func consumeOpaqueMessageSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + mp := pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())) + o, err := f.mi.unmarshalPointer(v, mp, 0, opts) + if err != nil { + return out, err + } + sp := p.AtomicGetPointer() + if sp.IsNil() { + sp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.ft.Elem()))) + } + sp.AppendPointerSlice(mp) + out.n = n + out.initialized = o.initialized + return out, nil +} + +func isInitOpaqueMessageSlice(p pointer, f *coderFieldInfo) error { + sp := p.AtomicGetPointer() + if sp.IsNil() { + return nil + } + s := sp.PointerSlice() + for _, v := range s { + if err := f.mi.checkInitializedPointer(v); err != nil { + return err + } + } + return nil +} + +func mergeOpaqueMessageSlice(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + ds := dst.AtomicGetPointer() + if ds.IsNil() { + ds = dst.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.ft.Elem()))) + } + for _, sp := range src.AtomicGetPointer().PointerSlice() { + dm := pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())) + f.mi.mergePointer(dm, sp, opts) + ds.AppendPointerSlice(dm) + } +} + +func sizeOpaqueGroupSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := p.AtomicGetPointer().PointerSlice() + n := 0 + for _, v := range s { + n += 2*f.tagsize + f.mi.sizePointer(v, opts) + } + return n +} + +func appendOpaqueGroupSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := p.AtomicGetPointer().PointerSlice() + var err error + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) // start group + b, err = f.mi.marshalAppendPointer(b, v, opts) + if err != nil { + return b, err + } + b = protowire.AppendVarint(b, f.wiretag+1) // end group + } + return b, nil +} + +func consumeOpaqueGroupSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.StartGroupType { + return out, errUnknown + } + mp := pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())) + out, err = f.mi.unmarshalPointer(b, mp, f.num, opts) + if err != nil { + return out, err + } + sp := p.AtomicGetPointer() + if sp.IsNil() { + sp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.ft.Elem()))) + } + sp.AppendPointerSlice(mp) + return out, err +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go index fb35f0bae9c..229c6980138 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go @@ -94,7 +94,7 @@ func sizeMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalO return 0 } n := 0 - iter := mapRange(mapv) + iter := mapv.MapRange() for iter.Next() { key := mapi.conv.keyConv.PBValueOf(iter.Key()).MapKey() keySize := mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts) @@ -281,7 +281,7 @@ func appendMap(b []byte, mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, o if opts.Deterministic() { return appendMapDeterministic(b, mapv, mapi, f, opts) } - iter := mapRange(mapv) + iter := mapv.MapRange() for iter.Next() { var err error b = protowire.AppendVarint(b, f.wiretag) @@ -328,7 +328,7 @@ func isInitMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo) error { if !mi.needsInitCheck { return nil } - iter := mapRange(mapv) + iter := mapv.MapRange() for iter.Next() { val := pointerOfValue(iter.Value()) if err := mi.checkInitializedPointer(val); err != nil { @@ -336,7 +336,7 @@ func isInitMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo) error { } } } else { - iter := mapRange(mapv) + iter := mapv.MapRange() for iter.Next() { val := mapi.conv.valConv.PBValueOf(iter.Value()) if err := mapi.valFuncs.isInit(val); err != nil { @@ -356,7 +356,7 @@ func mergeMap(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { if dstm.IsNil() { dstm.Set(reflect.MakeMap(f.ft)) } - iter := mapRange(srcm) + iter := srcm.MapRange() for iter.Next() { dstm.SetMapIndex(iter.Key(), iter.Value()) } @@ -371,7 +371,7 @@ func mergeMapOfBytes(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { if dstm.IsNil() { dstm.Set(reflect.MakeMap(f.ft)) } - iter := mapRange(srcm) + iter := srcm.MapRange() for iter.Next() { dstm.SetMapIndex(iter.Key(), reflect.ValueOf(append(emptyBuf[:], iter.Value().Bytes()...))) } @@ -386,7 +386,7 @@ func mergeMapOfMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { if dstm.IsNil() { dstm.Set(reflect.MakeMap(f.ft)) } - iter := mapRange(srcm) + iter := srcm.MapRange() for iter.Next() { val := reflect.New(f.ft.Elem().Elem()) if f.mi != nil { diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go deleted file mode 100644 index 4b15493f2f4..00000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.12 -// +build !go1.12 - -package impl - -import "reflect" - -type mapIter struct { - v reflect.Value - keys []reflect.Value -} - -// mapRange provides a less-efficient equivalent to -// the Go 1.12 reflect.Value.MapRange method. -func mapRange(v reflect.Value) *mapIter { - return &mapIter{v: v} -} - -func (i *mapIter) Next() bool { - if i.keys == nil { - i.keys = i.v.MapKeys() - } else { - i.keys = i.keys[1:] - } - return len(i.keys) > 0 -} - -func (i *mapIter) Key() reflect.Value { - return i.keys[0] -} - -func (i *mapIter) Value() reflect.Value { - return i.v.MapIndex(i.keys[0]) -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go deleted file mode 100644 index 0b31b66eaf8..00000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.12 -// +build go1.12 - -package impl - -import "reflect" - -func mapRange(v reflect.Value) *reflect.MapIter { return v.MapRange() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go index 6b2fdbb739a..111d95833d4 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go @@ -32,6 +32,10 @@ type coderMessageInfo struct { needsInitCheck bool isMessageSet bool numRequiredFields uint8 + + lazyOffset offset + presenceOffset offset + presenceSize presenceSize } type coderFieldInfo struct { @@ -45,12 +49,19 @@ type coderFieldInfo struct { tagsize int // size of the varint-encoded tag isPointer bool // true if IsNil may be called on the struct field isRequired bool // true if field is required + + isLazy bool + presenceIndex uint32 } +const noPresence = 0xffffffff + func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { mi.sizecacheOffset = invalidOffset mi.unknownOffset = invalidOffset mi.extensionOffset = invalidOffset + mi.lazyOffset = invalidOffset + mi.presenceOffset = si.presenceOffset if si.sizecacheOffset.IsValid() && si.sizecacheType == sizecacheType { mi.sizecacheOffset = si.sizecacheOffset @@ -107,12 +118,12 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { }, } case isOneof: - fieldOffset = offsetOf(fs, mi.Exporter) + fieldOffset = offsetOf(fs) case fd.IsWeak(): fieldOffset = si.weakOffset funcs = makeWeakMessageFieldCoder(fd) default: - fieldOffset = offsetOf(fs, mi.Exporter) + fieldOffset = offsetOf(fs) childMessage, funcs = fieldCoder(fd, ft) } cf := &preallocFields[i] @@ -127,6 +138,8 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { validation: newFieldValidationInfo(mi, si, fd, ft), isPointer: fd.Cardinality() == protoreflect.Repeated || fd.HasPresence(), isRequired: fd.Cardinality() == protoreflect.Required, + + presenceIndex: noPresence, } mi.orderedCoderFields = append(mi.orderedCoderFields, cf) mi.coderFields[cf.num] = cf @@ -189,6 +202,9 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { if mi.methods.Merge == nil { mi.methods.Merge = mi.merge } + if mi.methods.Equal == nil { + mi.methods.Equal = equal + } } // getUnknownBytes returns a *[]byte for the unknown fields. diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go new file mode 100644 index 00000000000..f81d7d0db9a --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go @@ -0,0 +1,156 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + "sort" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/order" + "google.golang.org/protobuf/reflect/protoreflect" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +func (mi *MessageInfo) makeOpaqueCoderMethods(t reflect.Type, si opaqueStructInfo) { + mi.sizecacheOffset = si.sizecacheOffset + mi.unknownOffset = si.unknownOffset + mi.unknownPtrKind = si.unknownType.Kind() == reflect.Ptr + mi.extensionOffset = si.extensionOffset + mi.lazyOffset = si.lazyOffset + mi.presenceOffset = si.presenceOffset + + mi.coderFields = make(map[protowire.Number]*coderFieldInfo) + fields := mi.Desc.Fields() + for i := 0; i < fields.Len(); i++ { + fd := fields.Get(i) + + fs := si.fieldsByNumber[fd.Number()] + if fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic() { + fs = si.oneofsByName[fd.ContainingOneof().Name()] + } + ft := fs.Type + var wiretag uint64 + if !fd.IsPacked() { + wiretag = protowire.EncodeTag(fd.Number(), wireTypes[fd.Kind()]) + } else { + wiretag = protowire.EncodeTag(fd.Number(), protowire.BytesType) + } + var fieldOffset offset + var funcs pointerCoderFuncs + var childMessage *MessageInfo + switch { + case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): + fieldOffset = offsetOf(fs) + case fd.IsWeak(): + fieldOffset = si.weakOffset + funcs = makeWeakMessageFieldCoder(fd) + case fd.Message() != nil && !fd.IsMap(): + fieldOffset = offsetOf(fs) + if fd.IsList() { + childMessage, funcs = makeOpaqueRepeatedMessageFieldCoder(fd, ft) + } else { + childMessage, funcs = makeOpaqueMessageFieldCoder(fd, ft) + } + default: + fieldOffset = offsetOf(fs) + childMessage, funcs = fieldCoder(fd, ft) + } + cf := &coderFieldInfo{ + num: fd.Number(), + offset: fieldOffset, + wiretag: wiretag, + ft: ft, + tagsize: protowire.SizeVarint(wiretag), + funcs: funcs, + mi: childMessage, + validation: newFieldValidationInfo(mi, si.structInfo, fd, ft), + isPointer: (fd.Cardinality() == protoreflect.Repeated || + fd.Kind() == protoreflect.MessageKind || + fd.Kind() == protoreflect.GroupKind), + isRequired: fd.Cardinality() == protoreflect.Required, + presenceIndex: noPresence, + } + + // TODO: Use presence for all fields. + // + // In some cases, such as maps, presence means only "might be set" rather + // than "is definitely set", but every field should have a presence bit to + // permit us to skip over definitely-unset fields at marshal time. + + var hasPresence bool + hasPresence, cf.isLazy = usePresenceForField(si, fd) + + if hasPresence { + cf.presenceIndex, mi.presenceSize = presenceIndex(mi.Desc, fd) + } + + mi.orderedCoderFields = append(mi.orderedCoderFields, cf) + mi.coderFields[cf.num] = cf + } + for i, oneofs := 0, mi.Desc.Oneofs(); i < oneofs.Len(); i++ { + if od := oneofs.Get(i); !od.IsSynthetic() { + mi.initOneofFieldCoders(od, si.structInfo) + } + } + if messageset.IsMessageSet(mi.Desc) { + if !mi.extensionOffset.IsValid() { + panic(fmt.Sprintf("%v: MessageSet with no extensions field", mi.Desc.FullName())) + } + if !mi.unknownOffset.IsValid() { + panic(fmt.Sprintf("%v: MessageSet with no unknown field", mi.Desc.FullName())) + } + mi.isMessageSet = true + } + sort.Slice(mi.orderedCoderFields, func(i, j int) bool { + return mi.orderedCoderFields[i].num < mi.orderedCoderFields[j].num + }) + + var maxDense protoreflect.FieldNumber + for _, cf := range mi.orderedCoderFields { + if cf.num >= 16 && cf.num >= 2*maxDense { + break + } + maxDense = cf.num + } + mi.denseCoderFields = make([]*coderFieldInfo, maxDense+1) + for _, cf := range mi.orderedCoderFields { + if int(cf.num) > len(mi.denseCoderFields) { + break + } + mi.denseCoderFields[cf.num] = cf + } + + // To preserve compatibility with historic wire output, marshal oneofs last. + if mi.Desc.Oneofs().Len() > 0 { + sort.Slice(mi.orderedCoderFields, func(i, j int) bool { + fi := fields.ByNumber(mi.orderedCoderFields[i].num) + fj := fields.ByNumber(mi.orderedCoderFields[j].num) + return order.LegacyFieldOrder(fi, fj) + }) + } + + mi.needsInitCheck = needsInitCheck(mi.Desc) + if mi.methods.Marshal == nil && mi.methods.Size == nil { + mi.methods.Flags |= piface.SupportMarshalDeterministic + mi.methods.Marshal = mi.marshal + mi.methods.Size = mi.size + } + if mi.methods.Unmarshal == nil { + mi.methods.Flags |= piface.SupportUnmarshalDiscardUnknown + mi.methods.Unmarshal = mi.unmarshal + } + if mi.methods.CheckInitialized == nil { + mi.methods.CheckInitialized = mi.checkInitialized + } + if mi.methods.Merge == nil { + mi.methods.Merge = mi.merge + } + if mi.methods.Equal == nil { + mi.methods.Equal = equal + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go deleted file mode 100644 index 145c577bd6b..00000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package impl - -import ( - "reflect" - - "google.golang.org/protobuf/encoding/protowire" -) - -func sizeEnum(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { - v := p.v.Elem().Int() - return f.tagsize + protowire.SizeVarint(uint64(v)) -} - -func appendEnum(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := p.v.Elem().Int() - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(v)) - return b, nil -} - -func consumeEnum(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return out, errUnknown - } - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return out, errDecode - } - p.v.Elem().SetInt(int64(v)) - out.n = n - return out, nil -} - -func mergeEnum(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - dst.v.Elem().Set(src.v.Elem()) -} - -var coderEnum = pointerCoderFuncs{ - size: sizeEnum, - marshal: appendEnum, - unmarshal: consumeEnum, - merge: mergeEnum, -} - -func sizeEnumNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - if p.v.Elem().Int() == 0 { - return 0 - } - return sizeEnum(p, f, opts) -} - -func appendEnumNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - if p.v.Elem().Int() == 0 { - return b, nil - } - return appendEnum(b, p, f, opts) -} - -func mergeEnumNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - if src.v.Elem().Int() != 0 { - dst.v.Elem().Set(src.v.Elem()) - } -} - -var coderEnumNoZero = pointerCoderFuncs{ - size: sizeEnumNoZero, - marshal: appendEnumNoZero, - unmarshal: consumeEnum, - merge: mergeEnumNoZero, -} - -func sizeEnumPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - return sizeEnum(pointer{p.v.Elem()}, f, opts) -} - -func appendEnumPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - return appendEnum(b, pointer{p.v.Elem()}, f, opts) -} - -func consumeEnumPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return out, errUnknown - } - if p.v.Elem().IsNil() { - p.v.Elem().Set(reflect.New(p.v.Elem().Type().Elem())) - } - return consumeEnum(b, pointer{p.v.Elem()}, wtyp, f, opts) -} - -func mergeEnumPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - if !src.v.Elem().IsNil() { - v := reflect.New(dst.v.Type().Elem().Elem()) - v.Elem().Set(src.v.Elem().Elem()) - dst.v.Elem().Set(v) - } -} - -var coderEnumPtr = pointerCoderFuncs{ - size: sizeEnumPtr, - marshal: appendEnumPtr, - unmarshal: consumeEnumPtr, - merge: mergeEnumPtr, -} - -func sizeEnumSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := p.v.Elem() - for i, llen := 0, s.Len(); i < llen; i++ { - size += protowire.SizeVarint(uint64(s.Index(i).Int())) + f.tagsize - } - return size -} - -func appendEnumSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := p.v.Elem() - for i, llen := 0, s.Len(); i < llen; i++ { - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(s.Index(i).Int())) - } - return b, nil -} - -func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - s := p.v.Elem() - if wtyp == protowire.BytesType { - b, n := protowire.ConsumeBytes(b) - if n < 0 { - return out, errDecode - } - for len(b) > 0 { - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return out, errDecode - } - rv := reflect.New(s.Type().Elem()).Elem() - rv.SetInt(int64(v)) - s.Set(reflect.Append(s, rv)) - b = b[n:] - } - out.n = n - return out, nil - } - if wtyp != protowire.VarintType { - return out, errUnknown - } - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return out, errDecode - } - rv := reflect.New(s.Type().Elem()).Elem() - rv.SetInt(int64(v)) - s.Set(reflect.Append(s, rv)) - out.n = n - return out, nil -} - -func mergeEnumSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - dst.v.Elem().Set(reflect.AppendSlice(dst.v.Elem(), src.v.Elem())) -} - -var coderEnumSlice = pointerCoderFuncs{ - size: sizeEnumSlice, - marshal: appendEnumSlice, - unmarshal: consumeEnumSlice, - merge: mergeEnumSlice, -} - -func sizeEnumPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := p.v.Elem() - llen := s.Len() - if llen == 0 { - return 0 - } - n := 0 - for i := 0; i < llen; i++ { - n += protowire.SizeVarint(uint64(s.Index(i).Int())) - } - return f.tagsize + protowire.SizeBytes(n) -} - -func appendEnumPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := p.v.Elem() - llen := s.Len() - if llen == 0 { - return b, nil - } - b = protowire.AppendVarint(b, f.wiretag) - n := 0 - for i := 0; i < llen; i++ { - n += protowire.SizeVarint(uint64(s.Index(i).Int())) - } - b = protowire.AppendVarint(b, uint64(n)) - for i := 0; i < llen; i++ { - b = protowire.AppendVarint(b, uint64(s.Index(i).Int())) - } - return b, nil -} - -var coderEnumPackedSlice = pointerCoderFuncs{ - size: sizeEnumPackedSlice, - marshal: appendEnumPackedSlice, - unmarshal: consumeEnumSlice, - merge: mergeEnumSlice, -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go index 757642e23c9..077712c2c5a 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine -// +build !purego,!appengine - package impl // When using unsafe pointers, we can just treat enum values as int32s. diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go index e06ece55a26..f72ddd882f3 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go @@ -322,7 +322,7 @@ func (c *stringConverter) PBValueOf(v reflect.Value) protoreflect.Value { return protoreflect.ValueOfString(v.Convert(stringType).String()) } func (c *stringConverter) GoValueOf(v protoreflect.Value) reflect.Value { - // pref.Value.String never panics, so we go through an interface + // protoreflect.Value.String never panics, so we go through an interface // conversion here to check the type. s := v.Interface().(string) if c.goType.Kind() == reflect.Slice && s == "" { diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go index 304244a651d..e4580b3ac2e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go @@ -101,7 +101,7 @@ func (ms *mapReflect) Mutable(k protoreflect.MapKey) protoreflect.Value { return v } func (ms *mapReflect) Range(f func(protoreflect.MapKey, protoreflect.Value) bool) { - iter := mapRange(ms.v) + iter := ms.v.MapRange() for iter.Next() { k := ms.keyConv.PBValueOf(iter.Key()).MapKey() v := ms.valConv.PBValueOf(iter.Value()) diff --git a/vendor/google.golang.org/protobuf/internal/impl/decode.go b/vendor/google.golang.org/protobuf/internal/impl/decode.go index cda0520c275..e0dd21fa5f4 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/decode.go +++ b/vendor/google.golang.org/protobuf/internal/impl/decode.go @@ -34,6 +34,8 @@ func (o unmarshalOptions) Options() proto.UnmarshalOptions { AllowPartial: true, DiscardUnknown: o.DiscardUnknown(), Resolver: o.resolver, + + NoLazyDecoding: o.NoLazyDecoding(), } } @@ -41,13 +43,26 @@ func (o unmarshalOptions) DiscardUnknown() bool { return o.flags&protoiface.UnmarshalDiscardUnknown != 0 } -func (o unmarshalOptions) IsDefault() bool { - return o.flags == 0 && o.resolver == protoregistry.GlobalTypes +func (o unmarshalOptions) AliasBuffer() bool { return o.flags&protoiface.UnmarshalAliasBuffer != 0 } +func (o unmarshalOptions) Validated() bool { return o.flags&protoiface.UnmarshalValidated != 0 } +func (o unmarshalOptions) NoLazyDecoding() bool { + return o.flags&protoiface.UnmarshalNoLazyDecoding != 0 +} + +func (o unmarshalOptions) CanBeLazy() bool { + if o.resolver != protoregistry.GlobalTypes { + return false + } + // We ignore the UnmarshalInvalidateSizeCache even though it's not in the default set + return (o.flags & ^(protoiface.UnmarshalAliasBuffer | protoiface.UnmarshalValidated | protoiface.UnmarshalCheckRequired)) == 0 } var lazyUnmarshalOptions = unmarshalOptions{ resolver: protoregistry.GlobalTypes, - depth: protowire.DefaultRecursionLimit, + + flags: protoiface.UnmarshalAliasBuffer | protoiface.UnmarshalValidated, + + depth: protowire.DefaultRecursionLimit, } type unmarshalOutput struct { @@ -94,9 +109,30 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire. if flags.ProtoLegacy && mi.isMessageSet { return unmarshalMessageSet(mi, b, p, opts) } + + lazyDecoding := LazyEnabled() // default + if opts.NoLazyDecoding() { + lazyDecoding = false // explicitly disabled + } + if mi.lazyOffset.IsValid() && lazyDecoding { + return mi.unmarshalPointerLazy(b, p, groupTag, opts) + } + return mi.unmarshalPointerEager(b, p, groupTag, opts) +} + +// unmarshalPointerEager is the message unmarshalling function for all messages that are not lazy. +// The corresponding function for Lazy is in google_lazy.go. +func (mi *MessageInfo) unmarshalPointerEager(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) { + initialized := true var requiredMask uint64 var exts *map[int32]ExtensionField + + var presence presence + if mi.presenceOffset.IsValid() { + presence = p.Apply(mi.presenceOffset).PresenceInfo() + } + start := len(b) for len(b) > 0 { // Parse the tag (field number and wire type). @@ -154,6 +190,11 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire. if f.funcs.isInit != nil && !o.initialized { initialized = false } + + if f.presenceIndex != noPresence { + presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize) + } + default: // Possible extension. if exts == nil && mi.extensionOffset.IsValid() { @@ -222,7 +263,7 @@ func (mi *MessageInfo) unmarshalExtension(b []byte, num protowire.Number, wtyp p return out, errUnknown } if flags.LazyUnmarshalExtensions { - if opts.IsDefault() && x.canLazy(xt) { + if opts.CanBeLazy() && x.canLazy(xt) { out, valid := skipExtension(b, xi, num, wtyp, opts) switch valid { case ValidationValid: @@ -270,6 +311,13 @@ func skipExtension(b []byte, xi *extensionFieldInfo, num protowire.Number, wtyp if n < 0 { return out, ValidationUnknown } + + if opts.Validated() { + out.initialized = true + out.n = n + return out, ValidationValid + } + out, st := xi.validation.mi.validate(v, 0, opts) out.n = n return out, st diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go index febd2122472..b2e212291d6 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/encode.go +++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go @@ -10,7 +10,8 @@ import ( "sync/atomic" "google.golang.org/protobuf/internal/flags" - proto "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/internal/protolazy" + "google.golang.org/protobuf/proto" piface "google.golang.org/protobuf/runtime/protoiface" ) @@ -71,11 +72,39 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int e := p.Apply(mi.extensionOffset).Extensions() size += mi.sizeExtensions(e, opts) } + + var lazy **protolazy.XXX_lazyUnmarshalInfo + var presence presence + if mi.presenceOffset.IsValid() { + presence = p.Apply(mi.presenceOffset).PresenceInfo() + if mi.lazyOffset.IsValid() { + lazy = p.Apply(mi.lazyOffset).LazyInfoPtr() + } + } + for _, f := range mi.orderedCoderFields { if f.funcs.size == nil { continue } fptr := p.Apply(f.offset) + + if f.presenceIndex != noPresence { + if !presence.Present(f.presenceIndex) { + continue + } + + if f.isLazy && fptr.AtomicGetPointer().IsNil() { + if lazyFields(opts) { + size += (*lazy).SizeField(uint32(f.num)) + continue + } else { + mi.lazyUnmarshal(p, f.num) + } + } + size += f.funcs.size(fptr, f, opts) + continue + } + if f.isPointer && fptr.Elem().IsNil() { continue } @@ -134,11 +163,52 @@ func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOpt return b, err } } + + var lazy **protolazy.XXX_lazyUnmarshalInfo + var presence presence + if mi.presenceOffset.IsValid() { + presence = p.Apply(mi.presenceOffset).PresenceInfo() + if mi.lazyOffset.IsValid() { + lazy = p.Apply(mi.lazyOffset).LazyInfoPtr() + } + } + for _, f := range mi.orderedCoderFields { if f.funcs.marshal == nil { continue } fptr := p.Apply(f.offset) + + if f.presenceIndex != noPresence { + if !presence.Present(f.presenceIndex) { + continue + } + if f.isLazy { + // Be careful, this field needs to be read atomically, like for a get + if f.isPointer && fptr.AtomicGetPointer().IsNil() { + if lazyFields(opts) { + b, _ = (*lazy).AppendField(b, uint32(f.num)) + continue + } else { + mi.lazyUnmarshal(p, f.num) + } + } + + b, err = f.funcs.marshal(b, fptr, f, opts) + if err != nil { + return b, err + } + continue + } else if f.isPointer && fptr.Elem().IsNil() { + continue + } + b, err = f.funcs.marshal(b, fptr, f, opts) + if err != nil { + return b, err + } + continue + } + if f.isPointer && fptr.Elem().IsNil() { continue } @@ -163,6 +233,14 @@ func fullyLazyExtensions(opts marshalOptions) bool { return opts.flags&piface.MarshalDeterministic == 0 } +// lazyFields returns true if we should attempt to keep fields lazy over size and marshal. +func lazyFields(opts marshalOptions) bool { + // When deterministic marshaling is requested, force an unmarshal for lazy + // fields to produce a deterministic result, instead of passing through + // bytes lazily that may or may not match what Go Protobuf would produce. + return opts.flags&piface.MarshalDeterministic == 0 +} + func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marshalOptions) (n int) { if ext == nil { return 0 diff --git a/vendor/google.golang.org/protobuf/internal/impl/equal.go b/vendor/google.golang.org/protobuf/internal/impl/equal.go new file mode 100644 index 00000000000..9f6c32a7d8c --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/equal.go @@ -0,0 +1,224 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "bytes" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +func equal(in protoiface.EqualInput) protoiface.EqualOutput { + return protoiface.EqualOutput{Equal: equalMessage(in.MessageA, in.MessageB)} +} + +// equalMessage is a fast-path variant of protoreflect.equalMessage. +// It takes advantage of the internal messageState type to avoid +// unnecessary allocations, type assertions. +func equalMessage(mx, my protoreflect.Message) bool { + if mx == nil || my == nil { + return mx == my + } + if mx.Descriptor() != my.Descriptor() { + return false + } + + msx, ok := mx.(*messageState) + if !ok { + return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my)) + } + msy, ok := my.(*messageState) + if !ok { + return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my)) + } + + mi := msx.messageInfo() + miy := msy.messageInfo() + if mi != miy { + return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my)) + } + mi.init() + // Compares regular fields + // Modified Message.Range code that compares two messages of the same type + // while going over the fields. + for _, ri := range mi.rangeInfos { + var fd protoreflect.FieldDescriptor + var vx, vy protoreflect.Value + + switch ri := ri.(type) { + case *fieldInfo: + hx := ri.has(msx.pointer()) + hy := ri.has(msy.pointer()) + if hx != hy { + return false + } + if !hx { + continue + } + fd = ri.fieldDesc + vx = ri.get(msx.pointer()) + vy = ri.get(msy.pointer()) + case *oneofInfo: + fnx := ri.which(msx.pointer()) + fny := ri.which(msy.pointer()) + if fnx != fny { + return false + } + if fnx <= 0 { + continue + } + fi := mi.fields[fnx] + fd = fi.fieldDesc + vx = fi.get(msx.pointer()) + vy = fi.get(msy.pointer()) + } + + if !equalValue(fd, vx, vy) { + return false + } + } + + // Compare extensions. + // This is more complicated because mx or my could have empty/nil extension maps, + // however some populated extension map values are equal to nil extension maps. + emx := mi.extensionMap(msx.pointer()) + emy := mi.extensionMap(msy.pointer()) + if emx != nil { + for k, x := range *emx { + xd := x.Type().TypeDescriptor() + xv := x.Value() + var y ExtensionField + ok := false + if emy != nil { + y, ok = (*emy)[k] + } + // We need to treat empty lists as equal to nil values + if emy == nil || !ok { + if xd.IsList() && xv.List().Len() == 0 { + continue + } + return false + } + + if !equalValue(xd, xv, y.Value()) { + return false + } + } + } + if emy != nil { + // emy may have extensions emx does not have, need to check them as well + for k, y := range *emy { + if emx != nil { + // emx has the field, so we already checked it + if _, ok := (*emx)[k]; ok { + continue + } + } + // Empty lists are equal to nil + if y.Type().TypeDescriptor().IsList() && y.Value().List().Len() == 0 { + continue + } + + // Cant be equal if the extension is populated + return false + } + } + + return equalUnknown(mx.GetUnknown(), my.GetUnknown()) +} + +func equalValue(fd protoreflect.FieldDescriptor, vx, vy protoreflect.Value) bool { + // slow path + if fd.Kind() != protoreflect.MessageKind { + return vx.Equal(vy) + } + + // fast path special cases + if fd.IsMap() { + if fd.MapValue().Kind() == protoreflect.MessageKind { + return equalMessageMap(vx.Map(), vy.Map()) + } + return vx.Equal(vy) + } + + if fd.IsList() { + return equalMessageList(vx.List(), vy.List()) + } + + return equalMessage(vx.Message(), vy.Message()) +} + +// Mostly copied from protoreflect.equalMap. +// This variant only works for messages as map types. +// All other map types should be handled via Value.Equal. +func equalMessageMap(mx, my protoreflect.Map) bool { + if mx.Len() != my.Len() { + return false + } + equal := true + mx.Range(func(k protoreflect.MapKey, vx protoreflect.Value) bool { + if !my.Has(k) { + equal = false + return false + } + vy := my.Get(k) + equal = equalMessage(vx.Message(), vy.Message()) + return equal + }) + return equal +} + +// Mostly copied from protoreflect.equalList. +// The only change is the usage of equalImpl instead of protoreflect.equalValue. +func equalMessageList(lx, ly protoreflect.List) bool { + if lx.Len() != ly.Len() { + return false + } + for i := 0; i < lx.Len(); i++ { + // We only operate on messages here since equalImpl will not call us in any other case. + if !equalMessage(lx.Get(i).Message(), ly.Get(i).Message()) { + return false + } + } + return true +} + +// equalUnknown compares unknown fields by direct comparison on the raw bytes +// of each individual field number. +// Copied from protoreflect.equalUnknown. +func equalUnknown(x, y protoreflect.RawFields) bool { + if len(x) != len(y) { + return false + } + if bytes.Equal([]byte(x), []byte(y)) { + return true + } + + mx := make(map[protoreflect.FieldNumber]protoreflect.RawFields) + my := make(map[protoreflect.FieldNumber]protoreflect.RawFields) + for len(x) > 0 { + fnum, _, n := protowire.ConsumeField(x) + mx[fnum] = append(mx[fnum], x[:n]...) + x = x[n:] + } + for len(y) > 0 { + fnum, _, n := protowire.ConsumeField(y) + my[fnum] = append(my[fnum], y[:n]...) + y = y[n:] + } + if len(mx) != len(my) { + return false + } + + for k, v1 := range mx { + if v2, ok := my[k]; !ok || !bytes.Equal([]byte(v1), []byte(v2)) { + return false + } + } + + return true +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/lazy.go b/vendor/google.golang.org/protobuf/internal/impl/lazy.go new file mode 100644 index 00000000000..e8fb6c35b4a --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/lazy.go @@ -0,0 +1,433 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "math/bits" + "os" + "reflect" + "sort" + "sync/atomic" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/protolazy" + "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +var enableLazy int32 = func() int32 { + if os.Getenv("GOPROTODEBUG") == "nolazy" { + return 0 + } + return 1 +}() + +// EnableLazyUnmarshal enables lazy unmarshaling. +func EnableLazyUnmarshal(enable bool) { + if enable { + atomic.StoreInt32(&enableLazy, 1) + return + } + atomic.StoreInt32(&enableLazy, 0) +} + +// LazyEnabled reports whether lazy unmarshalling is currently enabled. +func LazyEnabled() bool { + return atomic.LoadInt32(&enableLazy) != 0 +} + +// UnmarshalField unmarshals a field in a message. +func UnmarshalField(m interface{}, num protowire.Number) { + switch m := m.(type) { + case *messageState: + m.messageInfo().lazyUnmarshal(m.pointer(), num) + case *messageReflectWrapper: + m.messageInfo().lazyUnmarshal(m.pointer(), num) + default: + panic(fmt.Sprintf("unsupported wrapper type %T", m)) + } +} + +func (mi *MessageInfo) lazyUnmarshal(p pointer, num protoreflect.FieldNumber) { + var f *coderFieldInfo + if int(num) < len(mi.denseCoderFields) { + f = mi.denseCoderFields[num] + } else { + f = mi.coderFields[num] + } + if f == nil { + panic(fmt.Sprintf("lazyUnmarshal: field info for %v.%v", mi.Desc.FullName(), num)) + } + lazy := *p.Apply(mi.lazyOffset).LazyInfoPtr() + start, end, found, _, multipleEntries := lazy.FindFieldInProto(uint32(num)) + if !found && multipleEntries == nil { + panic(fmt.Sprintf("lazyUnmarshal: can't find field data for %v.%v", mi.Desc.FullName(), num)) + } + // The actual pointer in the message can not be set until the whole struct is filled in, otherwise we will have races. + // Create another pointer and set it atomically, if we won the race and the pointer in the original message is still nil. + fp := pointerOfValue(reflect.New(f.ft)) + if multipleEntries != nil { + for _, entry := range multipleEntries { + mi.unmarshalField(lazy.Buffer()[entry.Start:entry.End], fp, f, lazy, lazy.UnmarshalFlags()) + } + } else { + mi.unmarshalField(lazy.Buffer()[start:end], fp, f, lazy, lazy.UnmarshalFlags()) + } + p.Apply(f.offset).AtomicSetPointerIfNil(fp.Elem()) +} + +func (mi *MessageInfo) unmarshalField(b []byte, p pointer, f *coderFieldInfo, lazyInfo *protolazy.XXX_lazyUnmarshalInfo, flags piface.UnmarshalInputFlags) error { + opts := lazyUnmarshalOptions + opts.flags |= flags + for len(b) > 0 { + // Parse the tag (field number and wire type). + var tag uint64 + if b[0] < 0x80 { + tag = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + tag = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + tag, n = protowire.ConsumeVarint(b) + if n < 0 { + return errors.New("invalid wire data") + } + b = b[n:] + } + var num protowire.Number + if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) { + return errors.New("invalid wire data") + } else { + num = protowire.Number(n) + } + wtyp := protowire.Type(tag & 7) + if num == f.num { + o, err := f.funcs.unmarshal(b, p, wtyp, f, opts) + if err == nil { + b = b[o.n:] + continue + } + if err != errUnknown { + return err + } + } + n := protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return errors.New("invalid wire data") + } + b = b[n:] + } + return nil +} + +func (mi *MessageInfo) skipField(b []byte, f *coderFieldInfo, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, _ ValidationStatus) { + fmi := f.validation.mi + if fmi == nil { + fd := mi.Desc.Fields().ByNumber(f.num) + if fd == nil || !fd.IsWeak() { + return out, ValidationUnknown + } + messageName := fd.Message().FullName() + messageType, err := preg.GlobalTypes.FindMessageByName(messageName) + if err != nil { + return out, ValidationUnknown + } + var ok bool + fmi, ok = messageType.(*MessageInfo) + if !ok { + return out, ValidationUnknown + } + } + fmi.init() + switch f.validation.typ { + case validationTypeMessage: + if wtyp != protowire.BytesType { + return out, ValidationWrongWireType + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, ValidationInvalid + } + out, st := fmi.validate(v, 0, opts) + out.n = n + return out, st + case validationTypeGroup: + if wtyp != protowire.StartGroupType { + return out, ValidationWrongWireType + } + out, st := fmi.validate(b, f.num, opts) + return out, st + default: + return out, ValidationUnknown + } +} + +// unmarshalPointerLazy is similar to unmarshalPointerEager, but it +// specifically handles lazy unmarshalling. it expects lazyOffset and +// presenceOffset to both be valid. +func (mi *MessageInfo) unmarshalPointerLazy(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) { + initialized := true + var requiredMask uint64 + var lazy **protolazy.XXX_lazyUnmarshalInfo + var presence presence + var lazyIndex []protolazy.IndexEntry + var lastNum protowire.Number + outOfOrder := false + lazyDecode := false + presence = p.Apply(mi.presenceOffset).PresenceInfo() + lazy = p.Apply(mi.lazyOffset).LazyInfoPtr() + if !presence.AnyPresent(mi.presenceSize) { + if opts.CanBeLazy() { + // If the message contains existing data, we need to merge into it. + // Lazy unmarshaling doesn't merge, so only enable it when the + // message is empty (has no presence bitmap). + lazyDecode = true + if *lazy == nil { + *lazy = &protolazy.XXX_lazyUnmarshalInfo{} + } + (*lazy).SetUnmarshalFlags(opts.flags) + if !opts.AliasBuffer() { + // Make a copy of the buffer for lazy unmarshaling. + // Set the AliasBuffer flag so recursive unmarshal + // operations reuse the copy. + b = append([]byte{}, b...) + opts.flags |= piface.UnmarshalAliasBuffer + } + (*lazy).SetBuffer(b) + } + } + // Track special handling of lazy fields. + // + // In the common case, all fields are lazyValidateOnly (and lazyFields remains nil). + // In the event that validation for a field fails, this map tracks handling of the field. + type lazyAction uint8 + const ( + lazyValidateOnly lazyAction = iota // validate the field only + lazyUnmarshalNow // eagerly unmarshal the field + lazyUnmarshalLater // unmarshal the field after the message is fully processed + ) + var lazyFields map[*coderFieldInfo]lazyAction + var exts *map[int32]ExtensionField + start := len(b) + pos := 0 + for len(b) > 0 { + // Parse the tag (field number and wire type). + var tag uint64 + if b[0] < 0x80 { + tag = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + tag = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + tag, n = protowire.ConsumeVarint(b) + if n < 0 { + return out, errDecode + } + b = b[n:] + } + var num protowire.Number + if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) { + return out, errors.New("invalid field number") + } else { + num = protowire.Number(n) + } + wtyp := protowire.Type(tag & 7) + + if wtyp == protowire.EndGroupType { + if num != groupTag { + return out, errors.New("mismatching end group marker") + } + groupTag = 0 + break + } + + var f *coderFieldInfo + if int(num) < len(mi.denseCoderFields) { + f = mi.denseCoderFields[num] + } else { + f = mi.coderFields[num] + } + var n int + err := errUnknown + discardUnknown := false + Field: + switch { + case f != nil: + if f.funcs.unmarshal == nil { + break + } + if f.isLazy && lazyDecode { + switch { + case lazyFields == nil || lazyFields[f] == lazyValidateOnly: + // Attempt to validate this field and leave it for later lazy unmarshaling. + o, valid := mi.skipField(b, f, wtyp, opts) + switch valid { + case ValidationValid: + // Skip over the valid field and continue. + err = nil + presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize) + requiredMask |= f.validation.requiredBit + if !o.initialized { + initialized = false + } + n = o.n + break Field + case ValidationInvalid: + return out, errors.New("invalid proto wire format") + case ValidationWrongWireType: + break Field + case ValidationUnknown: + if lazyFields == nil { + lazyFields = make(map[*coderFieldInfo]lazyAction) + } + if presence.Present(f.presenceIndex) { + // We were unable to determine if the field is valid or not, + // and we've already skipped over at least one instance of this + // field. Clear the presence bit (so if we stop decoding early, + // we don't leave a partially-initialized field around) and flag + // the field for unmarshaling before we return. + presence.ClearPresent(f.presenceIndex) + lazyFields[f] = lazyUnmarshalLater + discardUnknown = true + break Field + } else { + // We were unable to determine if the field is valid or not, + // but this is the first time we've seen it. Flag it as needing + // eager unmarshaling and fall through to the eager unmarshal case below. + lazyFields[f] = lazyUnmarshalNow + } + } + case lazyFields[f] == lazyUnmarshalLater: + // This field will be unmarshaled in a separate pass below. + // Skip over it here. + discardUnknown = true + break Field + default: + // Eagerly unmarshal the field. + } + } + if f.isLazy && !lazyDecode && presence.Present(f.presenceIndex) { + if p.Apply(f.offset).AtomicGetPointer().IsNil() { + mi.lazyUnmarshal(p, f.num) + } + } + var o unmarshalOutput + o, err = f.funcs.unmarshal(b, p.Apply(f.offset), wtyp, f, opts) + n = o.n + if err != nil { + break + } + requiredMask |= f.validation.requiredBit + if f.funcs.isInit != nil && !o.initialized { + initialized = false + } + if f.presenceIndex != noPresence { + presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize) + } + default: + // Possible extension. + if exts == nil && mi.extensionOffset.IsValid() { + exts = p.Apply(mi.extensionOffset).Extensions() + if *exts == nil { + *exts = make(map[int32]ExtensionField) + } + } + if exts == nil { + break + } + var o unmarshalOutput + o, err = mi.unmarshalExtension(b, num, wtyp, *exts, opts) + if err != nil { + break + } + n = o.n + if !o.initialized { + initialized = false + } + } + if err != nil { + if err != errUnknown { + return out, err + } + n = protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return out, errDecode + } + if !discardUnknown && !opts.DiscardUnknown() && mi.unknownOffset.IsValid() { + u := mi.mutableUnknownBytes(p) + *u = protowire.AppendTag(*u, num, wtyp) + *u = append(*u, b[:n]...) + } + } + b = b[n:] + end := start - len(b) + if lazyDecode && f != nil && f.isLazy { + if num != lastNum { + lazyIndex = append(lazyIndex, protolazy.IndexEntry{ + FieldNum: uint32(num), + Start: uint32(pos), + End: uint32(end), + }) + } else { + i := len(lazyIndex) - 1 + lazyIndex[i].End = uint32(end) + lazyIndex[i].MultipleContiguous = true + } + } + if num < lastNum { + outOfOrder = true + } + pos = end + lastNum = num + } + if groupTag != 0 { + return out, errors.New("missing end group marker") + } + if lazyFields != nil { + // Some fields failed validation, and now need to be unmarshaled. + for f, action := range lazyFields { + if action != lazyUnmarshalLater { + continue + } + initialized = false + if *lazy == nil { + *lazy = &protolazy.XXX_lazyUnmarshalInfo{} + } + if err := mi.unmarshalField((*lazy).Buffer(), p.Apply(f.offset), f, *lazy, opts.flags); err != nil { + return out, err + } + presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize) + } + } + if lazyDecode { + if outOfOrder { + sort.Slice(lazyIndex, func(i, j int) bool { + return lazyIndex[i].FieldNum < lazyIndex[j].FieldNum || + (lazyIndex[i].FieldNum == lazyIndex[j].FieldNum && + lazyIndex[i].Start < lazyIndex[j].Start) + }) + } + if *lazy == nil { + *lazy = &protolazy.XXX_lazyUnmarshalInfo{} + } + + (*lazy).SetIndex(lazyIndex) + } + if mi.numRequiredFields > 0 && bits.OnesCount64(requiredMask) != int(mi.numRequiredFields) { + initialized = false + } + if initialized { + out.initialized = true + } + out.n = start - len(b) + return out, nil +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go index 6e8677ee633..b6849d66927 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go @@ -160,6 +160,7 @@ func (x placeholderExtension) HasPresence() bool func (x placeholderExtension) HasOptionalKeyword() bool { return false } func (x placeholderExtension) IsExtension() bool { return true } func (x placeholderExtension) IsWeak() bool { return false } +func (x placeholderExtension) IsLazy() bool { return false } func (x placeholderExtension) IsPacked() bool { return false } func (x placeholderExtension) IsList() bool { return false } func (x placeholderExtension) IsMap() bool { return false } diff --git a/vendor/google.golang.org/protobuf/internal/impl/merge.go b/vendor/google.golang.org/protobuf/internal/impl/merge.go index 7e65f64f28e..8ffdce67d34 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/merge.go +++ b/vendor/google.golang.org/protobuf/internal/impl/merge.go @@ -41,11 +41,38 @@ func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) { if src.IsNil() { return } + + var presenceSrc presence + var presenceDst presence + if mi.presenceOffset.IsValid() { + presenceSrc = src.Apply(mi.presenceOffset).PresenceInfo() + presenceDst = dst.Apply(mi.presenceOffset).PresenceInfo() + } + for _, f := range mi.orderedCoderFields { if f.funcs.merge == nil { continue } sfptr := src.Apply(f.offset) + + if f.presenceIndex != noPresence { + if !presenceSrc.Present(f.presenceIndex) { + continue + } + dfptr := dst.Apply(f.offset) + if f.isLazy { + if sfptr.AtomicGetPointer().IsNil() { + mi.lazyUnmarshal(src, f.num) + } + if presenceDst.Present(f.presenceIndex) && dfptr.AtomicGetPointer().IsNil() { + mi.lazyUnmarshal(dst, f.num) + } + } + f.funcs.merge(dst.Apply(f.offset), sfptr, f, opts) + presenceDst.SetPresentUnatomic(f.presenceIndex, mi.presenceSize) + continue + } + if f.isPointer && sfptr.Elem().IsNil() { continue } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go index 019399d454d..d1f79b4224f 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -30,8 +30,8 @@ type MessageInfo struct { // Desc is the underlying message descriptor type and must be populated. Desc protoreflect.MessageDescriptor - // Exporter must be provided in a purego environment in order to provide - // access to unexported fields. + // Deprecated: Exporter will be removed the next time we bump + // protoimpl.GenVersion. See https://github.com/golang/protobuf/issues/1640 Exporter exporter // OneofWrappers is list of pointers to oneof wrapper struct types. @@ -79,6 +79,9 @@ func (mi *MessageInfo) initOnce() { if mi.initDone == 1 { return } + if opaqueInitHook(mi) { + return + } t := mi.GoReflectType if t.Kind() != reflect.Ptr && t.Elem().Kind() != reflect.Struct { @@ -133,6 +136,9 @@ type structInfo struct { extensionOffset offset extensionType reflect.Type + lazyOffset offset + presenceOffset offset + fieldsByNumber map[protoreflect.FieldNumber]reflect.StructField oneofsByName map[protoreflect.Name]reflect.StructField oneofWrappersByType map[reflect.Type]protoreflect.FieldNumber @@ -145,6 +151,8 @@ func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo { weakOffset: invalidOffset, unknownOffset: invalidOffset, extensionOffset: invalidOffset, + lazyOffset: invalidOffset, + presenceOffset: invalidOffset, fieldsByNumber: map[protoreflect.FieldNumber]reflect.StructField{}, oneofsByName: map[protoreflect.Name]reflect.StructField{}, @@ -157,24 +165,28 @@ fieldLoop: switch f := t.Field(i); f.Name { case genid.SizeCache_goname, genid.SizeCacheA_goname: if f.Type == sizecacheType { - si.sizecacheOffset = offsetOf(f, mi.Exporter) + si.sizecacheOffset = offsetOf(f) si.sizecacheType = f.Type } case genid.WeakFields_goname, genid.WeakFieldsA_goname: if f.Type == weakFieldsType { - si.weakOffset = offsetOf(f, mi.Exporter) + si.weakOffset = offsetOf(f) si.weakType = f.Type } case genid.UnknownFields_goname, genid.UnknownFieldsA_goname: if f.Type == unknownFieldsAType || f.Type == unknownFieldsBType { - si.unknownOffset = offsetOf(f, mi.Exporter) + si.unknownOffset = offsetOf(f) si.unknownType = f.Type } case genid.ExtensionFields_goname, genid.ExtensionFieldsA_goname, genid.ExtensionFieldsB_goname: if f.Type == extensionFieldsType { - si.extensionOffset = offsetOf(f, mi.Exporter) + si.extensionOffset = offsetOf(f) si.extensionType = f.Type } + case "lazyFields", "XXX_lazyUnmarshalInfo": + si.lazyOffset = offsetOf(f) + case "XXX_presence": + si.presenceOffset = offsetOf(f) default: for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") { if len(s) > 0 && strings.Trim(s, "0123456789") == "" { diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go new file mode 100644 index 00000000000..d8dcd788636 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go @@ -0,0 +1,632 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "math" + "reflect" + "strings" + "sync/atomic" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +type opaqueStructInfo struct { + structInfo +} + +// isOpaque determines whether a protobuf message type is on the Opaque API. It +// checks whether the type is a Go struct that protoc-gen-go would generate. +// +// This function only detects newly generated messages from the v2 +// implementation of protoc-gen-go. It is unable to classify generated messages +// that are too old or those that are generated by a different generator +// such as protoc-gen-gogo. +func isOpaque(t reflect.Type) bool { + // The current detection mechanism is to simply check the first field + // for a struct tag with the "protogen" key. + if t.Kind() == reflect.Struct && t.NumField() > 0 { + pgt := t.Field(0).Tag.Get("protogen") + return strings.HasPrefix(pgt, "opaque.") + } + return false +} + +func opaqueInitHook(mi *MessageInfo) bool { + mt := mi.GoReflectType.Elem() + si := opaqueStructInfo{ + structInfo: mi.makeStructInfo(mt), + } + + if !isOpaque(mt) { + return false + } + + defer atomic.StoreUint32(&mi.initDone, 1) + + mi.fields = map[protoreflect.FieldNumber]*fieldInfo{} + fds := mi.Desc.Fields() + for i := 0; i < fds.Len(); i++ { + fd := fds.Get(i) + fs := si.fieldsByNumber[fd.Number()] + var fi fieldInfo + usePresence, _ := usePresenceForField(si, fd) + + switch { + case fd.IsWeak(): + // Weak fields are no different for opaque. + fi = fieldInfoForWeakMessage(fd, si.weakOffset) + case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): + // Oneofs are no different for opaque. + fi = fieldInfoForOneof(fd, si.oneofsByName[fd.ContainingOneof().Name()], mi.Exporter, si.oneofWrappersByNumber[fd.Number()]) + case fd.IsMap(): + fi = mi.fieldInfoForMapOpaque(si, fd, fs) + case fd.IsList() && fd.Message() == nil && usePresence: + fi = mi.fieldInfoForScalarListOpaque(si, fd, fs) + case fd.IsList() && fd.Message() == nil: + // Proto3 lists without presence can use same access methods as open + fi = fieldInfoForList(fd, fs, mi.Exporter) + case fd.IsList() && usePresence: + fi = mi.fieldInfoForMessageListOpaque(si, fd, fs) + case fd.IsList(): + // Proto3 opaque messages that does not need presence bitmap. + // Different representation than open struct, but same logic + fi = mi.fieldInfoForMessageListOpaqueNoPresence(si, fd, fs) + case fd.Message() != nil && usePresence: + fi = mi.fieldInfoForMessageOpaque(si, fd, fs) + case fd.Message() != nil: + // Proto3 messages without presence can use same access methods as open + fi = fieldInfoForMessage(fd, fs, mi.Exporter) + default: + fi = mi.fieldInfoForScalarOpaque(si, fd, fs) + } + mi.fields[fd.Number()] = &fi + } + mi.oneofs = map[protoreflect.Name]*oneofInfo{} + for i := 0; i < mi.Desc.Oneofs().Len(); i++ { + od := mi.Desc.Oneofs().Get(i) + mi.oneofs[od.Name()] = makeOneofInfoOpaque(mi, od, si.structInfo, mi.Exporter) + } + + mi.denseFields = make([]*fieldInfo, fds.Len()*2) + for i := 0; i < fds.Len(); i++ { + if fd := fds.Get(i); int(fd.Number()) < len(mi.denseFields) { + mi.denseFields[fd.Number()] = mi.fields[fd.Number()] + } + } + + for i := 0; i < fds.Len(); { + fd := fds.Get(i) + if od := fd.ContainingOneof(); od != nil && !fd.ContainingOneof().IsSynthetic() { + mi.rangeInfos = append(mi.rangeInfos, mi.oneofs[od.Name()]) + i += od.Fields().Len() + } else { + mi.rangeInfos = append(mi.rangeInfos, mi.fields[fd.Number()]) + i++ + } + } + + mi.makeExtensionFieldsFunc(mt, si.structInfo) + mi.makeUnknownFieldsFunc(mt, si.structInfo) + mi.makeOpaqueCoderMethods(mt, si) + mi.makeFieldTypes(si.structInfo) + + return true +} + +func makeOneofInfoOpaque(mi *MessageInfo, od protoreflect.OneofDescriptor, si structInfo, x exporter) *oneofInfo { + oi := &oneofInfo{oneofDesc: od} + if od.IsSynthetic() { + fd := od.Fields().Get(0) + index, _ := presenceIndex(mi.Desc, fd) + oi.which = func(p pointer) protoreflect.FieldNumber { + if p.IsNil() { + return 0 + } + if !mi.present(p, index) { + return 0 + } + return od.Fields().Get(0).Number() + } + return oi + } + // Dispatch to non-opaque oneof implementation for non-synthetic oneofs. + return makeOneofInfo(od, si, x) +} + +func (mi *MessageInfo) fieldInfoForMapOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo { + ft := fs.Type + if ft.Kind() != reflect.Map { + panic(fmt.Sprintf("invalid type: got %v, want map kind", ft)) + } + fieldOffset := offsetOf(fs) + conv := NewConverter(ft, fd) + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + // Don't bother checking presence bits, since we need to + // look at the map length even if the presence bit is set. + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + return rv.Len() > 0 + }, + clear: func(p pointer) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(reflect.Zero(rv.Type())) + }, + get: func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.Len() == 0 { + return conv.Zero() + } + return conv.PBValueOf(rv) + }, + set: func(p pointer, v protoreflect.Value) { + pv := conv.GoValueOf(v) + if pv.IsNil() { + panic(fmt.Sprintf("invalid value: setting map field to read-only value")) + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(pv) + }, + mutable: func(p pointer) protoreflect.Value { + v := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if v.IsNil() { + v.Set(reflect.MakeMap(fs.Type)) + } + return conv.PBValueOf(v) + }, + newField: func() protoreflect.Value { + return conv.New() + }, + } +} + +func (mi *MessageInfo) fieldInfoForScalarListOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo { + ft := fs.Type + if ft.Kind() != reflect.Slice { + panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft)) + } + conv := NewConverter(reflect.PtrTo(ft), fd) + fieldOffset := offsetOf(fs) + index, _ := presenceIndex(mi.Desc, fd) + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + return rv.Len() > 0 + }, + clear: func(p pointer) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(reflect.Zero(rv.Type())) + }, + get: func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type) + if rv.Elem().Len() == 0 { + return conv.Zero() + } + return conv.PBValueOf(rv) + }, + set: func(p pointer, v protoreflect.Value) { + pv := conv.GoValueOf(v) + if pv.IsNil() { + panic(fmt.Sprintf("invalid value: setting repeated field to read-only value")) + } + mi.setPresent(p, index) + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(pv.Elem()) + }, + mutable: func(p pointer) protoreflect.Value { + mi.setPresent(p, index) + return conv.PBValueOf(p.Apply(fieldOffset).AsValueOf(fs.Type)) + }, + newField: func() protoreflect.Value { + return conv.New() + }, + } +} + +func (mi *MessageInfo) fieldInfoForMessageListOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo { + ft := fs.Type + if ft.Kind() != reflect.Ptr || ft.Elem().Kind() != reflect.Slice { + panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft)) + } + conv := NewConverter(ft, fd) + fieldOffset := offsetOf(fs) + index, _ := presenceIndex(mi.Desc, fd) + fieldNumber := fd.Number() + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + if !mi.present(p, index) { + return false + } + sp := p.Apply(fieldOffset).AtomicGetPointer() + if sp.IsNil() { + // Lazily unmarshal this field. + mi.lazyUnmarshal(p, fieldNumber) + sp = p.Apply(fieldOffset).AtomicGetPointer() + } + rv := sp.AsValueOf(fs.Type.Elem()) + return rv.Elem().Len() > 0 + }, + clear: func(p pointer) { + fp := p.Apply(fieldOffset) + sp := fp.AtomicGetPointer() + if sp.IsNil() { + sp = fp.AtomicSetPointerIfNil(pointerOfValue(reflect.New(fs.Type.Elem()))) + mi.setPresent(p, index) + } + rv := sp.AsValueOf(fs.Type.Elem()) + rv.Elem().Set(reflect.Zero(rv.Type().Elem())) + }, + get: func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + if !mi.present(p, index) { + return conv.Zero() + } + sp := p.Apply(fieldOffset).AtomicGetPointer() + if sp.IsNil() { + // Lazily unmarshal this field. + mi.lazyUnmarshal(p, fieldNumber) + sp = p.Apply(fieldOffset).AtomicGetPointer() + } + rv := sp.AsValueOf(fs.Type.Elem()) + if rv.Elem().Len() == 0 { + return conv.Zero() + } + return conv.PBValueOf(rv) + }, + set: func(p pointer, v protoreflect.Value) { + fp := p.Apply(fieldOffset) + sp := fp.AtomicGetPointer() + if sp.IsNil() { + sp = fp.AtomicSetPointerIfNil(pointerOfValue(reflect.New(fs.Type.Elem()))) + mi.setPresent(p, index) + } + rv := sp.AsValueOf(fs.Type.Elem()) + val := conv.GoValueOf(v) + if val.IsNil() { + panic(fmt.Sprintf("invalid value: setting repeated field to read-only value")) + } else { + rv.Elem().Set(val.Elem()) + } + }, + mutable: func(p pointer) protoreflect.Value { + fp := p.Apply(fieldOffset) + sp := fp.AtomicGetPointer() + if sp.IsNil() { + if mi.present(p, index) { + // Lazily unmarshal this field. + mi.lazyUnmarshal(p, fieldNumber) + sp = p.Apply(fieldOffset).AtomicGetPointer() + } else { + sp = fp.AtomicSetPointerIfNil(pointerOfValue(reflect.New(fs.Type.Elem()))) + mi.setPresent(p, index) + } + } + rv := sp.AsValueOf(fs.Type.Elem()) + return conv.PBValueOf(rv) + }, + newField: func() protoreflect.Value { + return conv.New() + }, + } +} + +func (mi *MessageInfo) fieldInfoForMessageListOpaqueNoPresence(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo { + ft := fs.Type + if ft.Kind() != reflect.Ptr || ft.Elem().Kind() != reflect.Slice { + panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft)) + } + conv := NewConverter(ft, fd) + fieldOffset := offsetOf(fs) + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + sp := p.Apply(fieldOffset).AtomicGetPointer() + if sp.IsNil() { + return false + } + rv := sp.AsValueOf(fs.Type.Elem()) + return rv.Elem().Len() > 0 + }, + clear: func(p pointer) { + sp := p.Apply(fieldOffset).AtomicGetPointer() + if !sp.IsNil() { + rv := sp.AsValueOf(fs.Type.Elem()) + rv.Elem().Set(reflect.Zero(rv.Type().Elem())) + } + }, + get: func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + sp := p.Apply(fieldOffset).AtomicGetPointer() + if sp.IsNil() { + return conv.Zero() + } + rv := sp.AsValueOf(fs.Type.Elem()) + if rv.Elem().Len() == 0 { + return conv.Zero() + } + return conv.PBValueOf(rv) + }, + set: func(p pointer, v protoreflect.Value) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() { + rv.Set(reflect.New(fs.Type.Elem())) + } + val := conv.GoValueOf(v) + if val.IsNil() { + panic(fmt.Sprintf("invalid value: setting repeated field to read-only value")) + } else { + rv.Elem().Set(val.Elem()) + } + }, + mutable: func(p pointer) protoreflect.Value { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() { + rv.Set(reflect.New(fs.Type.Elem())) + } + return conv.PBValueOf(rv) + }, + newField: func() protoreflect.Value { + return conv.New() + }, + } +} + +func (mi *MessageInfo) fieldInfoForScalarOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo { + ft := fs.Type + nullable := fd.HasPresence() + if oneof := fd.ContainingOneof(); oneof != nil && oneof.IsSynthetic() { + nullable = true + } + deref := false + if nullable && ft.Kind() == reflect.Ptr { + ft = ft.Elem() + deref = true + } + conv := NewConverter(ft, fd) + fieldOffset := offsetOf(fs) + index, _ := presenceIndex(mi.Desc, fd) + var getter func(p pointer) protoreflect.Value + if !nullable { + getter = getterForDirectScalar(fd, fs, conv, fieldOffset) + } else { + getter = getterForOpaqueNullableScalar(mi, index, fd, fs, conv, fieldOffset) + } + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + if nullable { + return mi.present(p, index) + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + switch rv.Kind() { + case reflect.Bool: + return rv.Bool() + case reflect.Int32, reflect.Int64: + return rv.Int() != 0 + case reflect.Uint32, reflect.Uint64: + return rv.Uint() != 0 + case reflect.Float32, reflect.Float64: + return rv.Float() != 0 || math.Signbit(rv.Float()) + case reflect.String, reflect.Slice: + return rv.Len() > 0 + default: + panic(fmt.Sprintf("invalid type: %v", rv.Type())) // should never happen + } + }, + clear: func(p pointer) { + if nullable { + mi.clearPresent(p, index) + } + // This is only valuable for bytes and strings, but we do it unconditionally. + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(reflect.Zero(rv.Type())) + }, + get: getter, + // TODO: Implement unsafe fast path for set? + set: func(p pointer, v protoreflect.Value) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if deref { + if rv.IsNil() { + rv.Set(reflect.New(ft)) + } + rv = rv.Elem() + } + + rv.Set(conv.GoValueOf(v)) + if nullable && rv.Kind() == reflect.Slice && rv.IsNil() { + rv.Set(emptyBytes) + } + if nullable { + mi.setPresent(p, index) + } + }, + newField: func() protoreflect.Value { + return conv.New() + }, + } +} + +func (mi *MessageInfo) fieldInfoForMessageOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo { + ft := fs.Type + conv := NewConverter(ft, fd) + fieldOffset := offsetOf(fs) + index, _ := presenceIndex(mi.Desc, fd) + fieldNumber := fd.Number() + elemType := fs.Type.Elem() + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + return mi.present(p, index) + }, + clear: func(p pointer) { + mi.clearPresent(p, index) + p.Apply(fieldOffset).AtomicSetNilPointer() + }, + get: func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + fp := p.Apply(fieldOffset) + mp := fp.AtomicGetPointer() + if mp.IsNil() { + // Lazily unmarshal this field. + mi.lazyUnmarshal(p, fieldNumber) + mp = fp.AtomicGetPointer() + } + rv := mp.AsValueOf(elemType) + return conv.PBValueOf(rv) + }, + set: func(p pointer, v protoreflect.Value) { + val := pointerOfValue(conv.GoValueOf(v)) + if val.IsNil() { + panic("invalid nil pointer") + } + p.Apply(fieldOffset).AtomicSetPointer(val) + mi.setPresent(p, index) + }, + mutable: func(p pointer) protoreflect.Value { + fp := p.Apply(fieldOffset) + mp := fp.AtomicGetPointer() + if mp.IsNil() { + if mi.present(p, index) { + // Lazily unmarshal this field. + mi.lazyUnmarshal(p, fieldNumber) + mp = fp.AtomicGetPointer() + } else { + mp = pointerOfValue(conv.GoValueOf(conv.New())) + fp.AtomicSetPointer(mp) + mi.setPresent(p, index) + } + } + return conv.PBValueOf(mp.AsValueOf(fs.Type.Elem())) + }, + newMessage: func() protoreflect.Message { + return conv.New().Message() + }, + newField: func() protoreflect.Value { + return conv.New() + }, + } +} + +// A presenceList wraps a List, updating presence bits as necessary when the +// list contents change. +type presenceList struct { + pvalueList + setPresence func(bool) +} +type pvalueList interface { + protoreflect.List + //Unwrapper +} + +func (list presenceList) Append(v protoreflect.Value) { + list.pvalueList.Append(v) + list.setPresence(true) +} +func (list presenceList) Truncate(i int) { + list.pvalueList.Truncate(i) + list.setPresence(i > 0) +} + +// presenceIndex returns the index to pass to presence functions. +// +// TODO: field.Desc.Index() would be simpler, and would give space to record the presence of oneof fields. +func presenceIndex(md protoreflect.MessageDescriptor, fd protoreflect.FieldDescriptor) (uint32, presenceSize) { + found := false + var index, numIndices uint32 + for i := 0; i < md.Fields().Len(); i++ { + f := md.Fields().Get(i) + if f == fd { + found = true + index = numIndices + } + if f.ContainingOneof() == nil || isLastOneofField(f) { + numIndices++ + } + } + if !found { + panic(fmt.Sprintf("BUG: %v not in %v", fd.Name(), md.FullName())) + } + return index, presenceSize(numIndices) +} + +func isLastOneofField(fd protoreflect.FieldDescriptor) bool { + fields := fd.ContainingOneof().Fields() + return fields.Get(fields.Len()-1) == fd +} + +func (mi *MessageInfo) setPresent(p pointer, index uint32) { + p.Apply(mi.presenceOffset).PresenceInfo().SetPresent(index, mi.presenceSize) +} + +func (mi *MessageInfo) clearPresent(p pointer, index uint32) { + p.Apply(mi.presenceOffset).PresenceInfo().ClearPresent(index) +} + +func (mi *MessageInfo) present(p pointer, index uint32) bool { + return p.Apply(mi.presenceOffset).PresenceInfo().Present(index) +} + +// usePresenceForField implements the somewhat intricate logic of when +// the presence bitmap is used for a field. The main logic is that a +// field that is optional or that can be lazy will use the presence +// bit, but for proto2, also maps have a presence bit. It also records +// if the field can ever be lazy, which is true if we have a +// lazyOffset and the field is a message or a slice of messages. A +// field that is lazy will always need a presence bit. Oneofs are not +// lazy and do not use presence, unless they are a synthetic oneof, +// which is a proto3 optional field. For proto3 optionals, we use the +// presence and they can also be lazy when applicable (a message). +func usePresenceForField(si opaqueStructInfo, fd protoreflect.FieldDescriptor) (usePresence, canBeLazy bool) { + hasLazyField := fd.(interface{ IsLazy() bool }).IsLazy() + + // Non-oneof scalar fields with explicit field presence use the presence array. + usesPresenceArray := fd.HasPresence() && fd.Message() == nil && (fd.ContainingOneof() == nil || fd.ContainingOneof().IsSynthetic()) + switch { + case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): + return false, false + case fd.IsWeak(): + return false, false + case fd.IsMap(): + return false, false + case fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind: + return hasLazyField, hasLazyField + default: + return usesPresenceArray || (hasLazyField && fd.HasPresence()), false + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_opaque_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_opaque_gen.go new file mode 100644 index 00000000000..a69825699ab --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/message_opaque_gen.go @@ -0,0 +1,132 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package impl + +import ( + "reflect" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +func getterForOpaqueNullableScalar(mi *MessageInfo, index uint32, fd protoreflect.FieldDescriptor, fs reflect.StructField, conv Converter, fieldOffset offset) func(p pointer) protoreflect.Value { + ft := fs.Type + if ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + if fd.Kind() == protoreflect.EnumKind { + // Enums for nullable opaque types. + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + return conv.PBValueOf(rv) + } + } + switch ft.Kind() { + case reflect.Bool: + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Bool() + return protoreflect.ValueOfBool(*x) + } + case reflect.Int32: + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Int32() + return protoreflect.ValueOfInt32(*x) + } + case reflect.Uint32: + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Uint32() + return protoreflect.ValueOfUint32(*x) + } + case reflect.Int64: + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Int64() + return protoreflect.ValueOfInt64(*x) + } + case reflect.Uint64: + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Uint64() + return protoreflect.ValueOfUint64(*x) + } + case reflect.Float32: + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Float32() + return protoreflect.ValueOfFloat32(*x) + } + case reflect.Float64: + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Float64() + return protoreflect.ValueOfFloat64(*x) + } + case reflect.String: + if fd.Kind() == protoreflect.BytesKind { + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).StringPtr() + if *x == nil { + return conv.Zero() + } + if len(**x) == 0 { + return protoreflect.ValueOfBytes(nil) + } + return protoreflect.ValueOfBytes([]byte(**x)) + } + } + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).StringPtr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfString(**x) + } + case reflect.Slice: + if fd.Kind() == protoreflect.StringKind { + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Bytes() + return protoreflect.ValueOfString(string(*x)) + } + } + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Bytes() + return protoreflect.ValueOfBytes(*x) + } + } + panic("unexpected protobuf kind: " + ft.Kind().String()) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go index ecb4623d701..31c19b54f8d 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go @@ -205,6 +205,11 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) { case fd.IsList(): if fd.Enum() != nil || fd.Message() != nil { ft = fs.Type.Elem() + + if ft.Kind() == reflect.Slice { + ft = ft.Elem() + } + } isMessage = fd.Message() != nil case fd.Enum() != nil: diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go index 986322b195a..3cd1fbc21fb 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go @@ -76,7 +76,7 @@ func fieldInfoForOneof(fd protoreflect.FieldDescriptor, fs reflect.StructField, isMessage := fd.Message() != nil // TODO: Implement unsafe fast path? - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) return fieldInfo{ // NOTE: The logic below intentionally assumes that oneof fields are // well-formatted. That is, the oneof interface never contains a @@ -152,7 +152,7 @@ func fieldInfoForMap(fd protoreflect.FieldDescriptor, fs reflect.StructField, x conv := NewConverter(ft, fd) // TODO: Implement unsafe fast path? - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) return fieldInfo{ fieldDesc: fd, has: func(p pointer) bool { @@ -205,7 +205,7 @@ func fieldInfoForList(fd protoreflect.FieldDescriptor, fs reflect.StructField, x conv := NewConverter(reflect.PtrTo(ft), fd) // TODO: Implement unsafe fast path? - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) return fieldInfo{ fieldDesc: fd, has: func(p pointer) bool { @@ -256,6 +256,7 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, ft := fs.Type nullable := fd.HasPresence() isBytes := ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 + var getter func(p pointer) protoreflect.Value if nullable { if ft.Kind() != reflect.Ptr && ft.Kind() != reflect.Slice { // This never occurs for generated message types. @@ -268,19 +269,25 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, } } conv := NewConverter(ft, fd) + fieldOffset := offsetOf(fs) + + // Generate specialized getter functions to avoid going through reflect.Value + if nullable { + getter = getterForNullableScalar(fd, fs, conv, fieldOffset) + } else { + getter = getterForDirectScalar(fd, fs, conv, fieldOffset) + } - // TODO: Implement unsafe fast path? - fieldOffset := offsetOf(fs, x) return fieldInfo{ fieldDesc: fd, has: func(p pointer) bool { if p.IsNil() { return false } - rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() if nullable { - return !rv.IsNil() + return !p.Apply(fieldOffset).Elem().IsNil() } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() switch rv.Kind() { case reflect.Bool: return rv.Bool() @@ -300,21 +307,8 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() rv.Set(reflect.Zero(rv.Type())) }, - get: func(p pointer) protoreflect.Value { - if p.IsNil() { - return conv.Zero() - } - rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() - if nullable { - if rv.IsNil() { - return conv.Zero() - } - if rv.Kind() == reflect.Ptr { - rv = rv.Elem() - } - } - return conv.PBValueOf(rv) - }, + get: getter, + // TODO: Implement unsafe fast path for set? set: func(p pointer, v protoreflect.Value) { rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() if nullable && rv.Kind() == reflect.Ptr { @@ -339,7 +333,7 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, } func fieldInfoForWeakMessage(fd protoreflect.FieldDescriptor, weakOffset offset) fieldInfo { - if !flags.ProtoLegacy { + if !flags.ProtoLegacyWeak { panic("no support for proto1 weak fields") } @@ -416,7 +410,7 @@ func fieldInfoForMessage(fd protoreflect.FieldDescriptor, fs reflect.StructField conv := NewConverter(ft, fd) // TODO: Implement unsafe fast path? - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) return fieldInfo{ fieldDesc: fd, has: func(p pointer) bool { @@ -425,7 +419,7 @@ func fieldInfoForMessage(fd protoreflect.FieldDescriptor, fs reflect.StructField } rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() if fs.Type.Kind() != reflect.Ptr { - return !isZero(rv) + return !rv.IsZero() } return !rv.IsNil() }, @@ -472,7 +466,7 @@ func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) * oi := &oneofInfo{oneofDesc: od} if od.IsSynthetic() { fs := si.fieldsByNumber[od.Fields().Get(0).Number()] - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) oi.which = func(p pointer) protoreflect.FieldNumber { if p.IsNil() { return 0 @@ -485,7 +479,7 @@ func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) * } } else { fs := si.oneofsByName[od.Name()] - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) oi.which = func(p pointer) protoreflect.FieldNumber { if p.IsNil() { return 0 @@ -503,41 +497,3 @@ func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) * } return oi } - -// isZero is identical to reflect.Value.IsZero. -// TODO: Remove this when Go1.13 is the minimally supported Go version. -func isZero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return math.Float64bits(v.Float()) == 0 - case reflect.Complex64, reflect.Complex128: - c := v.Complex() - return math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0 - case reflect.Array: - for i := 0; i < v.Len(); i++ { - if !isZero(v.Index(i)) { - return false - } - } - return true - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer: - return v.IsNil() - case reflect.String: - return v.Len() == 0 - case reflect.Struct: - for i := 0; i < v.NumField(); i++ { - if !isZero(v.Field(i)) { - return false - } - } - return true - default: - panic(&reflect.ValueError{Method: "reflect.Value.IsZero", Kind: v.Kind()}) - } -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field_gen.go new file mode 100644 index 00000000000..af5e063a1e8 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field_gen.go @@ -0,0 +1,273 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package impl + +import ( + "reflect" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +func getterForNullableScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, conv Converter, fieldOffset offset) func(p pointer) protoreflect.Value { + ft := fs.Type + if ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + if fd.Kind() == protoreflect.EnumKind { + elemType := fs.Type.Elem() + // Enums for nullable types. + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).Elem().AsValueOf(elemType) + if rv.IsNil() { + return conv.Zero() + } + return conv.PBValueOf(rv.Elem()) + } + } + switch ft.Kind() { + case reflect.Bool: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).BoolPtr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfBool(**x) + } + case reflect.Int32: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Int32Ptr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfInt32(**x) + } + case reflect.Uint32: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Uint32Ptr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfUint32(**x) + } + case reflect.Int64: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Int64Ptr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfInt64(**x) + } + case reflect.Uint64: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Uint64Ptr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfUint64(**x) + } + case reflect.Float32: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Float32Ptr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfFloat32(**x) + } + case reflect.Float64: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Float64Ptr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfFloat64(**x) + } + case reflect.String: + if fd.Kind() == protoreflect.BytesKind { + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).StringPtr() + if *x == nil { + return conv.Zero() + } + if len(**x) == 0 { + return protoreflect.ValueOfBytes(nil) + } + return protoreflect.ValueOfBytes([]byte(**x)) + } + } + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).StringPtr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfString(**x) + } + case reflect.Slice: + if fd.Kind() == protoreflect.StringKind { + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Bytes() + if len(*x) == 0 { + return conv.Zero() + } + return protoreflect.ValueOfString(string(*x)) + } + } + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Bytes() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfBytes(*x) + } + } + panic("unexpected protobuf kind: " + ft.Kind().String()) +} + +func getterForDirectScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, conv Converter, fieldOffset offset) func(p pointer) protoreflect.Value { + ft := fs.Type + if fd.Kind() == protoreflect.EnumKind { + // Enums for non nullable types. + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + return conv.PBValueOf(rv) + } + } + switch ft.Kind() { + case reflect.Bool: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Bool() + return protoreflect.ValueOfBool(*x) + } + case reflect.Int32: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Int32() + return protoreflect.ValueOfInt32(*x) + } + case reflect.Uint32: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Uint32() + return protoreflect.ValueOfUint32(*x) + } + case reflect.Int64: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Int64() + return protoreflect.ValueOfInt64(*x) + } + case reflect.Uint64: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Uint64() + return protoreflect.ValueOfUint64(*x) + } + case reflect.Float32: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Float32() + return protoreflect.ValueOfFloat32(*x) + } + case reflect.Float64: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Float64() + return protoreflect.ValueOfFloat64(*x) + } + case reflect.String: + if fd.Kind() == protoreflect.BytesKind { + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).String() + if len(*x) == 0 { + return protoreflect.ValueOfBytes(nil) + } + return protoreflect.ValueOfBytes([]byte(*x)) + } + } + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).String() + return protoreflect.ValueOfString(*x) + } + case reflect.Slice: + if fd.Kind() == protoreflect.StringKind { + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Bytes() + return protoreflect.ValueOfString(string(*x)) + } + } + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Bytes() + return protoreflect.ValueOfBytes(*x) + } + } + panic("unexpected protobuf kind: " + ft.Kind().String()) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go deleted file mode 100644 index da685e8a29d..00000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go +++ /dev/null @@ -1,215 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package impl - -import ( - "fmt" - "reflect" - "sync" -) - -const UnsafeEnabled = false - -// Pointer is an opaque pointer type. -type Pointer any - -// offset represents the offset to a struct field, accessible from a pointer. -// The offset is the field index into a struct. -type offset struct { - index int - export exporter -} - -// offsetOf returns a field offset for the struct field. -func offsetOf(f reflect.StructField, x exporter) offset { - if len(f.Index) != 1 { - panic("embedded structs are not supported") - } - if f.PkgPath == "" { - return offset{index: f.Index[0]} // field is already exported - } - if x == nil { - panic("exporter must be provided for unexported field") - } - return offset{index: f.Index[0], export: x} -} - -// IsValid reports whether the offset is valid. -func (f offset) IsValid() bool { return f.index >= 0 } - -// invalidOffset is an invalid field offset. -var invalidOffset = offset{index: -1} - -// zeroOffset is a noop when calling pointer.Apply. -var zeroOffset = offset{index: 0} - -// pointer is an abstract representation of a pointer to a struct or field. -type pointer struct{ v reflect.Value } - -// pointerOf returns p as a pointer. -func pointerOf(p Pointer) pointer { - return pointerOfIface(p) -} - -// pointerOfValue returns v as a pointer. -func pointerOfValue(v reflect.Value) pointer { - return pointer{v: v} -} - -// pointerOfIface returns the pointer portion of an interface. -func pointerOfIface(v any) pointer { - return pointer{v: reflect.ValueOf(v)} -} - -// IsNil reports whether the pointer is nil. -func (p pointer) IsNil() bool { - return p.v.IsNil() -} - -// Apply adds an offset to the pointer to derive a new pointer -// to a specified field. The current pointer must be pointing at a struct. -func (p pointer) Apply(f offset) pointer { - if f.export != nil { - if v := reflect.ValueOf(f.export(p.v.Interface(), f.index)); v.IsValid() { - return pointer{v: v} - } - } - return pointer{v: p.v.Elem().Field(f.index).Addr()} -} - -// AsValueOf treats p as a pointer to an object of type t and returns the value. -// It is equivalent to reflect.ValueOf(p.AsIfaceOf(t)) -func (p pointer) AsValueOf(t reflect.Type) reflect.Value { - if got := p.v.Type().Elem(); got != t { - panic(fmt.Sprintf("invalid type: got %v, want %v", got, t)) - } - return p.v -} - -// AsIfaceOf treats p as a pointer to an object of type t and returns the value. -// It is equivalent to p.AsValueOf(t).Interface() -func (p pointer) AsIfaceOf(t reflect.Type) any { - return p.AsValueOf(t).Interface() -} - -func (p pointer) Bool() *bool { return p.v.Interface().(*bool) } -func (p pointer) BoolPtr() **bool { return p.v.Interface().(**bool) } -func (p pointer) BoolSlice() *[]bool { return p.v.Interface().(*[]bool) } -func (p pointer) Int32() *int32 { return p.v.Interface().(*int32) } -func (p pointer) Int32Ptr() **int32 { return p.v.Interface().(**int32) } -func (p pointer) Int32Slice() *[]int32 { return p.v.Interface().(*[]int32) } -func (p pointer) Int64() *int64 { return p.v.Interface().(*int64) } -func (p pointer) Int64Ptr() **int64 { return p.v.Interface().(**int64) } -func (p pointer) Int64Slice() *[]int64 { return p.v.Interface().(*[]int64) } -func (p pointer) Uint32() *uint32 { return p.v.Interface().(*uint32) } -func (p pointer) Uint32Ptr() **uint32 { return p.v.Interface().(**uint32) } -func (p pointer) Uint32Slice() *[]uint32 { return p.v.Interface().(*[]uint32) } -func (p pointer) Uint64() *uint64 { return p.v.Interface().(*uint64) } -func (p pointer) Uint64Ptr() **uint64 { return p.v.Interface().(**uint64) } -func (p pointer) Uint64Slice() *[]uint64 { return p.v.Interface().(*[]uint64) } -func (p pointer) Float32() *float32 { return p.v.Interface().(*float32) } -func (p pointer) Float32Ptr() **float32 { return p.v.Interface().(**float32) } -func (p pointer) Float32Slice() *[]float32 { return p.v.Interface().(*[]float32) } -func (p pointer) Float64() *float64 { return p.v.Interface().(*float64) } -func (p pointer) Float64Ptr() **float64 { return p.v.Interface().(**float64) } -func (p pointer) Float64Slice() *[]float64 { return p.v.Interface().(*[]float64) } -func (p pointer) String() *string { return p.v.Interface().(*string) } -func (p pointer) StringPtr() **string { return p.v.Interface().(**string) } -func (p pointer) StringSlice() *[]string { return p.v.Interface().(*[]string) } -func (p pointer) Bytes() *[]byte { return p.v.Interface().(*[]byte) } -func (p pointer) BytesPtr() **[]byte { return p.v.Interface().(**[]byte) } -func (p pointer) BytesSlice() *[][]byte { return p.v.Interface().(*[][]byte) } -func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.v.Interface().(*WeakFields)) } -func (p pointer) Extensions() *map[int32]ExtensionField { - return p.v.Interface().(*map[int32]ExtensionField) -} - -func (p pointer) Elem() pointer { - return pointer{v: p.v.Elem()} -} - -// PointerSlice copies []*T from p as a new []pointer. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) PointerSlice() []pointer { - // TODO: reconsider this - if p.v.IsNil() { - return nil - } - n := p.v.Elem().Len() - s := make([]pointer, n) - for i := 0; i < n; i++ { - s[i] = pointer{v: p.v.Elem().Index(i)} - } - return s -} - -// AppendPointerSlice appends v to p, which must be a []*T. -func (p pointer) AppendPointerSlice(v pointer) { - sp := p.v.Elem() - sp.Set(reflect.Append(sp, v.v)) -} - -// SetPointer sets *p to v. -func (p pointer) SetPointer(v pointer) { - p.v.Elem().Set(v.v) -} - -func growSlice(p pointer, addCap int) { - // TODO: Once we only support Go 1.20 and newer, use reflect.Grow. - in := p.v.Elem() - out := reflect.MakeSlice(in.Type(), in.Len(), in.Len()+addCap) - reflect.Copy(out, in) - p.v.Elem().Set(out) -} - -func (p pointer) growBoolSlice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growInt32Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growUint32Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growInt64Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growUint64Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growFloat64Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growFloat32Slice(addCap int) { - growSlice(p, addCap) -} - -func (Export) MessageStateOf(p Pointer) *messageState { panic("not supported") } -func (ms *messageState) pointer() pointer { panic("not supported") } -func (ms *messageState) messageInfo() *MessageInfo { panic("not supported") } -func (ms *messageState) LoadMessageInfo() *MessageInfo { panic("not supported") } -func (ms *messageState) StoreMessageInfo(mi *MessageInfo) { panic("not supported") } - -type atomicNilMessage struct { - once sync.Once - m messageReflectWrapper -} - -func (m *atomicNilMessage) Init(mi *MessageInfo) *messageReflectWrapper { - m.once.Do(func() { - m.m.p = pointerOfIface(reflect.Zero(mi.GoReflectType).Interface()) - m.m.mi = mi - }) - return &m.m -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index 5f20ca5d8ab..6bed45e35c2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -2,15 +2,14 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine -// +build !purego,!appengine - package impl import ( "reflect" "sync/atomic" "unsafe" + + "google.golang.org/protobuf/internal/protolazy" ) const UnsafeEnabled = true @@ -23,7 +22,7 @@ type Pointer unsafe.Pointer type offset uintptr // offsetOf returns a field offset for the struct field. -func offsetOf(f reflect.StructField, x exporter) offset { +func offsetOf(f reflect.StructField) offset { return offset(f.Offset) } @@ -114,6 +113,13 @@ func (p pointer) BytesPtr() **[]byte { return (**[]byte)(p.p) func (p pointer) BytesSlice() *[][]byte { return (*[][]byte)(p.p) } func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.p) } func (p pointer) Extensions() *map[int32]ExtensionField { return (*map[int32]ExtensionField)(p.p) } +func (p pointer) LazyInfoPtr() **protolazy.XXX_lazyUnmarshalInfo { + return (**protolazy.XXX_lazyUnmarshalInfo)(p.p) +} + +func (p pointer) PresenceInfo() presence { + return presence{P: p.p} +} func (p pointer) Elem() pointer { return pointer{p: *(*unsafe.Pointer)(p.p)} diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe_opaque.go new file mode 100644 index 00000000000..38aa7b7dcf2 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe_opaque.go @@ -0,0 +1,42 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "sync/atomic" + "unsafe" +) + +func (p pointer) AtomicGetPointer() pointer { + return pointer{p: atomic.LoadPointer((*unsafe.Pointer)(p.p))} +} + +func (p pointer) AtomicSetPointer(v pointer) { + atomic.StorePointer((*unsafe.Pointer)(p.p), v.p) +} + +func (p pointer) AtomicSetNilPointer() { + atomic.StorePointer((*unsafe.Pointer)(p.p), unsafe.Pointer(nil)) +} + +func (p pointer) AtomicSetPointerIfNil(v pointer) pointer { + if atomic.CompareAndSwapPointer((*unsafe.Pointer)(p.p), unsafe.Pointer(nil), v.p) { + return v + } + return pointer{p: atomic.LoadPointer((*unsafe.Pointer)(p.p))} +} + +type atomicV1MessageInfo struct{ p Pointer } + +func (mi *atomicV1MessageInfo) Get() Pointer { + return Pointer(atomic.LoadPointer((*unsafe.Pointer)(&mi.p))) +} + +func (mi *atomicV1MessageInfo) SetIfNil(p Pointer) Pointer { + if atomic.CompareAndSwapPointer((*unsafe.Pointer)(&mi.p), nil, unsafe.Pointer(p)) { + return p + } + return mi.Get() +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/presence.go b/vendor/google.golang.org/protobuf/internal/impl/presence.go new file mode 100644 index 00000000000..914cb1deda2 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/presence.go @@ -0,0 +1,142 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "sync/atomic" + "unsafe" +) + +// presenceSize represents the size of a presence set, which should be the largest index of the set+1 +type presenceSize uint32 + +// presence is the internal representation of the bitmap array in a generated protobuf +type presence struct { + // This is a pointer to the beginning of an array of uint32 + P unsafe.Pointer +} + +func (p presence) toElem(num uint32) (ret *uint32) { + const ( + bitsPerByte = 8 + siz = unsafe.Sizeof(*ret) + ) + // p.P points to an array of uint32, num is the bit in this array that the + // caller wants to check/manipulate. Calculate the index in the array that + // contains this specific bit. E.g.: 76 / 32 = 2 (integer division). + offset := uintptr(num) / (siz * bitsPerByte) * siz + return (*uint32)(unsafe.Pointer(uintptr(p.P) + offset)) +} + +// Present checks for the presence of a specific field number in a presence set. +func (p presence) Present(num uint32) bool { + if p.P == nil { + return false + } + return Export{}.Present(p.toElem(num), num) +} + +// SetPresent adds presence for a specific field number in a presence set. +func (p presence) SetPresent(num uint32, size presenceSize) { + Export{}.SetPresent(p.toElem(num), num, uint32(size)) +} + +// SetPresentUnatomic adds presence for a specific field number in a presence set without using +// atomic operations. Only to be called during unmarshaling. +func (p presence) SetPresentUnatomic(num uint32, size presenceSize) { + Export{}.SetPresentNonAtomic(p.toElem(num), num, uint32(size)) +} + +// ClearPresent removes presence for a specific field number in a presence set. +func (p presence) ClearPresent(num uint32) { + Export{}.ClearPresent(p.toElem(num), num) +} + +// LoadPresenceCache (together with PresentInCache) allows for a +// cached version of checking for presence without re-reading the word +// for every field. It is optimized for efficiency and assumes no +// simltaneous mutation of the presence set (or at least does not have +// a problem with simultaneous mutation giving inconsistent results). +func (p presence) LoadPresenceCache() (current uint32) { + if p.P == nil { + return 0 + } + return atomic.LoadUint32((*uint32)(p.P)) +} + +// PresentInCache reads presence from a cached word in the presence +// bitmap. It caches up a new word if the bit is outside the +// word. This is for really fast iteration through bitmaps in cases +// where we either know that the bitmap will not be altered, or we +// don't care about inconsistencies caused by simultaneous writes. +func (p presence) PresentInCache(num uint32, cachedElement *uint32, current *uint32) bool { + if num/32 != *cachedElement { + o := uintptr(num/32) * unsafe.Sizeof(uint32(0)) + q := (*uint32)(unsafe.Pointer(uintptr(p.P) + o)) + *current = atomic.LoadUint32(q) + *cachedElement = num / 32 + } + return (*current & (1 << (num % 32))) > 0 +} + +// AnyPresent checks if any field is marked as present in the bitmap. +func (p presence) AnyPresent(size presenceSize) bool { + n := uintptr((size + 31) / 32) + for j := uintptr(0); j < n; j++ { + o := j * unsafe.Sizeof(uint32(0)) + q := (*uint32)(unsafe.Pointer(uintptr(p.P) + o)) + b := atomic.LoadUint32(q) + if b > 0 { + return true + } + } + return false +} + +// toRaceDetectData finds the preceding RaceDetectHookData in a +// message by using pointer arithmetic. As the type of the presence +// set (bitmap) varies with the number of fields in the protobuf, we +// can not have a struct type containing the array and the +// RaceDetectHookData. instead the RaceDetectHookData is placed +// immediately before the bitmap array, and we find it by walking +// backwards in the struct. +// +// This method is only called from the race-detect version of the code, +// so RaceDetectHookData is never an empty struct. +func (p presence) toRaceDetectData() *RaceDetectHookData { + var template struct { + d RaceDetectHookData + a [1]uint32 + } + o := (uintptr(unsafe.Pointer(&template.a)) - uintptr(unsafe.Pointer(&template.d))) + return (*RaceDetectHookData)(unsafe.Pointer(uintptr(p.P) - o)) +} + +func atomicLoadShadowPresence(p **[]byte) *[]byte { + return (*[]byte)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreShadowPresence(p **[]byte, v *[]byte) { + atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(p)), nil, unsafe.Pointer(v)) +} + +// findPointerToRaceDetectData finds the preceding RaceDetectHookData +// in a message by using pointer arithmetic. For the methods called +// directy from generated code, we don't have a pointer to the +// beginning of the presence set, but a pointer inside the array. As +// we know the index of the bit we're manipulating (num), we can +// calculate which element of the array ptr is pointing to. With that +// information we find the preceding RaceDetectHookData and can +// manipulate the shadow bitmap. +// +// This method is only called from the race-detect version of the +// code, so RaceDetectHookData is never an empty struct. +func findPointerToRaceDetectData(ptr *uint32, num uint32) *RaceDetectHookData { + var template struct { + d RaceDetectHookData + a [1]uint32 + } + o := (uintptr(unsafe.Pointer(&template.a)) - uintptr(unsafe.Pointer(&template.d))) + uintptr(num/32)*unsafe.Sizeof(uint32(0)) + return (*RaceDetectHookData)(unsafe.Pointer(uintptr(unsafe.Pointer(ptr)) - o)) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/validate.go b/vendor/google.golang.org/protobuf/internal/impl/validate.go index a24e6bbd7a5..b534a3d6dbb 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/validate.go +++ b/vendor/google.golang.org/protobuf/internal/impl/validate.go @@ -37,6 +37,10 @@ const ( // ValidationValid indicates that unmarshaling the message will succeed. ValidationValid + + // ValidationWrongWireType indicates that a validated field does not have + // the expected wire type. + ValidationWrongWireType ) func (v ValidationStatus) String() string { @@ -149,11 +153,23 @@ func newValidationInfo(fd protoreflect.FieldDescriptor, ft reflect.Type) validat switch fd.Kind() { case protoreflect.MessageKind: vi.typ = validationTypeMessage + + if ft.Kind() == reflect.Ptr { + // Repeated opaque message fields are *[]*T. + ft = ft.Elem() + } + if ft.Kind() == reflect.Slice { vi.mi = getMessageInfo(ft.Elem()) } case protoreflect.GroupKind: vi.typ = validationTypeGroup + + if ft.Kind() == reflect.Ptr { + // Repeated opaque message fields are *[]*T. + ft = ft.Elem() + } + if ft.Kind() == reflect.Slice { vi.mi = getMessageInfo(ft.Elem()) } diff --git a/vendor/google.golang.org/protobuf/internal/protolazy/bufferreader.go b/vendor/google.golang.org/protobuf/internal/protolazy/bufferreader.go new file mode 100644 index 00000000000..82e5cab4aad --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/protolazy/bufferreader.go @@ -0,0 +1,364 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Helper code for parsing a protocol buffer + +package protolazy + +import ( + "errors" + "fmt" + "io" + + "google.golang.org/protobuf/encoding/protowire" +) + +// BufferReader is a structure encapsulating a protobuf and a current position +type BufferReader struct { + Buf []byte + Pos int +} + +// NewBufferReader creates a new BufferRead from a protobuf +func NewBufferReader(buf []byte) BufferReader { + return BufferReader{Buf: buf, Pos: 0} +} + +var errOutOfBounds = errors.New("protobuf decoding: out of bounds") +var errOverflow = errors.New("proto: integer overflow") + +func (b *BufferReader) DecodeVarintSlow() (x uint64, err error) { + i := b.Pos + l := len(b.Buf) + + for shift := uint(0); shift < 64; shift += 7 { + if i >= l { + err = io.ErrUnexpectedEOF + return + } + v := b.Buf[i] + i++ + x |= (uint64(v) & 0x7F) << shift + if v < 0x80 { + b.Pos = i + return + } + } + + // The number is too large to represent in a 64-bit value. + err = errOverflow + return +} + +// decodeVarint decodes a varint at the current position +func (b *BufferReader) DecodeVarint() (x uint64, err error) { + i := b.Pos + buf := b.Buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + b.Pos++ + return uint64(buf[i]), nil + } else if len(buf)-i < 10 { + return b.DecodeVarintSlow() + } + + var v uint64 + // we already checked the first byte + x = uint64(buf[i]) & 127 + i++ + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 7 + if v < 128 { + goto done + } + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 14 + if v < 128 { + goto done + } + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 21 + if v < 128 { + goto done + } + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 28 + if v < 128 { + goto done + } + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 35 + if v < 128 { + goto done + } + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 42 + if v < 128 { + goto done + } + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 49 + if v < 128 { + goto done + } + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 56 + if v < 128 { + goto done + } + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 63 + if v < 128 { + goto done + } + + return 0, errOverflow + +done: + b.Pos = i + return +} + +// decodeVarint32 decodes a varint32 at the current position +func (b *BufferReader) DecodeVarint32() (x uint32, err error) { + i := b.Pos + buf := b.Buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + b.Pos++ + return uint32(buf[i]), nil + } else if len(buf)-i < 5 { + v, err := b.DecodeVarintSlow() + return uint32(v), err + } + + var v uint32 + // we already checked the first byte + x = uint32(buf[i]) & 127 + i++ + + v = uint32(buf[i]) + i++ + x |= (v & 127) << 7 + if v < 128 { + goto done + } + + v = uint32(buf[i]) + i++ + x |= (v & 127) << 14 + if v < 128 { + goto done + } + + v = uint32(buf[i]) + i++ + x |= (v & 127) << 21 + if v < 128 { + goto done + } + + v = uint32(buf[i]) + i++ + x |= (v & 127) << 28 + if v < 128 { + goto done + } + + return 0, errOverflow + +done: + b.Pos = i + return +} + +// skipValue skips a value in the protobuf, based on the specified tag +func (b *BufferReader) SkipValue(tag uint32) (err error) { + wireType := tag & 0x7 + switch protowire.Type(wireType) { + case protowire.VarintType: + err = b.SkipVarint() + case protowire.Fixed64Type: + err = b.SkipFixed64() + case protowire.BytesType: + var n uint32 + n, err = b.DecodeVarint32() + if err == nil { + err = b.Skip(int(n)) + } + case protowire.StartGroupType: + err = b.SkipGroup(tag) + case protowire.Fixed32Type: + err = b.SkipFixed32() + default: + err = fmt.Errorf("Unexpected wire type (%d)", wireType) + } + return +} + +// skipGroup skips a group with the specified tag. It executes efficiently using a tag stack +func (b *BufferReader) SkipGroup(tag uint32) (err error) { + tagStack := make([]uint32, 0, 16) + tagStack = append(tagStack, tag) + var n uint32 + for len(tagStack) > 0 { + tag, err = b.DecodeVarint32() + if err != nil { + return err + } + switch protowire.Type(tag & 0x7) { + case protowire.VarintType: + err = b.SkipVarint() + case protowire.Fixed64Type: + err = b.Skip(8) + case protowire.BytesType: + n, err = b.DecodeVarint32() + if err == nil { + err = b.Skip(int(n)) + } + case protowire.StartGroupType: + tagStack = append(tagStack, tag) + case protowire.Fixed32Type: + err = b.SkipFixed32() + case protowire.EndGroupType: + if protoFieldNumber(tagStack[len(tagStack)-1]) == protoFieldNumber(tag) { + tagStack = tagStack[:len(tagStack)-1] + } else { + err = fmt.Errorf("end group tag %d does not match begin group tag %d at pos %d", + protoFieldNumber(tag), protoFieldNumber(tagStack[len(tagStack)-1]), b.Pos) + } + } + if err != nil { + return err + } + } + return nil +} + +// skipVarint effiently skips a varint +func (b *BufferReader) SkipVarint() (err error) { + i := b.Pos + + if len(b.Buf)-i < 10 { + // Use DecodeVarintSlow() to check for buffer overflow, but ignore result + if _, err := b.DecodeVarintSlow(); err != nil { + return err + } + return nil + } + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + return errOverflow + +out: + b.Pos = i + 1 + return nil +} + +// skip skips the specified number of bytes +func (b *BufferReader) Skip(n int) (err error) { + if len(b.Buf) < b.Pos+n { + return io.ErrUnexpectedEOF + } + b.Pos += n + return +} + +// skipFixed64 skips a fixed64 +func (b *BufferReader) SkipFixed64() (err error) { + return b.Skip(8) +} + +// skipFixed32 skips a fixed32 +func (b *BufferReader) SkipFixed32() (err error) { + return b.Skip(4) +} + +// skipBytes skips a set of bytes +func (b *BufferReader) SkipBytes() (err error) { + n, err := b.DecodeVarint32() + if err != nil { + return err + } + return b.Skip(int(n)) +} + +// Done returns whether we are at the end of the protobuf +func (b *BufferReader) Done() bool { + return b.Pos == len(b.Buf) +} + +// Remaining returns how many bytes remain +func (b *BufferReader) Remaining() int { + return len(b.Buf) - b.Pos +} diff --git a/vendor/google.golang.org/protobuf/internal/protolazy/lazy.go b/vendor/google.golang.org/protobuf/internal/protolazy/lazy.go new file mode 100644 index 00000000000..ff4d4834bbc --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/protolazy/lazy.go @@ -0,0 +1,359 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protolazy contains internal data structures for lazy message decoding. +package protolazy + +import ( + "fmt" + "sort" + + "google.golang.org/protobuf/encoding/protowire" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +// IndexEntry is the structure for an index of the fields in a message of a +// proto (not descending to sub-messages) +type IndexEntry struct { + FieldNum uint32 + // first byte of this tag/field + Start uint32 + // first byte after a contiguous sequence of bytes for this tag/field, which could + // include a single encoding of the field, or multiple encodings for the field + End uint32 + // True if this protobuf segment includes multiple encodings of the field + MultipleContiguous bool +} + +// XXX_lazyUnmarshalInfo has information about a particular lazily decoded message +// +// Deprecated: Do not use. This will be deleted in the near future. +type XXX_lazyUnmarshalInfo struct { + // Index of fields and their positions in the protobuf for this + // message. Make index be a pointer to a slice so it can be updated + // atomically. The index pointer is only set once (lazily when/if + // the index is first needed), and must always be SET and LOADED + // ATOMICALLY. + index *[]IndexEntry + // The protobuf associated with this lazily decoded message. It is + // only set during proto.Unmarshal(). It doesn't need to be set and + // loaded atomically, since any simultaneous set (Unmarshal) and read + // (during a get) would already be a race in the app code. + Protobuf []byte + // The flags present when Unmarshal was originally called for this particular message + unmarshalFlags piface.UnmarshalInputFlags +} + +// The Buffer and SetBuffer methods let v2/internal/impl interact with +// XXX_lazyUnmarshalInfo via an interface, to avoid an import cycle. + +// Buffer returns the lazy unmarshal buffer. +// +// Deprecated: Do not use. This will be deleted in the near future. +func (lazy *XXX_lazyUnmarshalInfo) Buffer() []byte { + return lazy.Protobuf +} + +// SetBuffer sets the lazy unmarshal buffer. +// +// Deprecated: Do not use. This will be deleted in the near future. +func (lazy *XXX_lazyUnmarshalInfo) SetBuffer(b []byte) { + lazy.Protobuf = b +} + +// SetUnmarshalFlags is called to set a copy of the original unmarshalInputFlags. +// The flags should reflect how Unmarshal was called. +func (lazy *XXX_lazyUnmarshalInfo) SetUnmarshalFlags(f piface.UnmarshalInputFlags) { + lazy.unmarshalFlags = f +} + +// UnmarshalFlags returns the original unmarshalInputFlags. +func (lazy *XXX_lazyUnmarshalInfo) UnmarshalFlags() piface.UnmarshalInputFlags { + return lazy.unmarshalFlags +} + +// AllowedPartial returns true if the user originally unmarshalled this message with +// AllowPartial set to true +func (lazy *XXX_lazyUnmarshalInfo) AllowedPartial() bool { + return (lazy.unmarshalFlags & piface.UnmarshalCheckRequired) == 0 +} + +func protoFieldNumber(tag uint32) uint32 { + return tag >> 3 +} + +// buildIndex builds an index of the specified protobuf, return the index +// array and an error. +func buildIndex(buf []byte) ([]IndexEntry, error) { + index := make([]IndexEntry, 0, 16) + var lastProtoFieldNum uint32 + var outOfOrder bool + + var r BufferReader = NewBufferReader(buf) + + for !r.Done() { + var tag uint32 + var err error + var curPos = r.Pos + // INLINED: tag, err = r.DecodeVarint32() + { + i := r.Pos + buf := r.Buf + + if i >= len(buf) { + return nil, errOutOfBounds + } else if buf[i] < 0x80 { + r.Pos++ + tag = uint32(buf[i]) + } else if r.Remaining() < 5 { + var v uint64 + v, err = r.DecodeVarintSlow() + tag = uint32(v) + } else { + var v uint32 + // we already checked the first byte + tag = uint32(buf[i]) & 127 + i++ + + v = uint32(buf[i]) + i++ + tag |= (v & 127) << 7 + if v < 128 { + goto done + } + + v = uint32(buf[i]) + i++ + tag |= (v & 127) << 14 + if v < 128 { + goto done + } + + v = uint32(buf[i]) + i++ + tag |= (v & 127) << 21 + if v < 128 { + goto done + } + + v = uint32(buf[i]) + i++ + tag |= (v & 127) << 28 + if v < 128 { + goto done + } + + return nil, errOutOfBounds + + done: + r.Pos = i + } + } + // DONE: tag, err = r.DecodeVarint32() + + fieldNum := protoFieldNumber(tag) + if fieldNum < lastProtoFieldNum { + outOfOrder = true + } + + // Skip the current value -- will skip over an entire group as well. + // INLINED: err = r.SkipValue(tag) + wireType := tag & 0x7 + switch protowire.Type(wireType) { + case protowire.VarintType: + // INLINED: err = r.SkipVarint() + i := r.Pos + + if len(r.Buf)-i < 10 { + // Use DecodeVarintSlow() to skip while + // checking for buffer overflow, but ignore result + _, err = r.DecodeVarintSlow() + goto out2 + } + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + return nil, errOverflow + out: + r.Pos = i + 1 + // DONE: err = r.SkipVarint() + case protowire.Fixed64Type: + err = r.SkipFixed64() + case protowire.BytesType: + var n uint32 + n, err = r.DecodeVarint32() + if err == nil { + err = r.Skip(int(n)) + } + case protowire.StartGroupType: + err = r.SkipGroup(tag) + case protowire.Fixed32Type: + err = r.SkipFixed32() + default: + err = fmt.Errorf("Unexpected wire type (%d)", wireType) + } + // DONE: err = r.SkipValue(tag) + + out2: + if err != nil { + return nil, err + } + if fieldNum != lastProtoFieldNum { + index = append(index, IndexEntry{FieldNum: fieldNum, + Start: uint32(curPos), + End: uint32(r.Pos)}, + ) + } else { + index[len(index)-1].End = uint32(r.Pos) + index[len(index)-1].MultipleContiguous = true + } + lastProtoFieldNum = fieldNum + } + if outOfOrder { + sort.Slice(index, func(i, j int) bool { + return index[i].FieldNum < index[j].FieldNum || + (index[i].FieldNum == index[j].FieldNum && + index[i].Start < index[j].Start) + }) + } + return index, nil +} + +func (lazy *XXX_lazyUnmarshalInfo) SizeField(num uint32) (size int) { + start, end, found, _, multipleEntries := lazy.FindFieldInProto(num) + if multipleEntries != nil { + for _, entry := range multipleEntries { + size += int(entry.End - entry.Start) + } + return size + } + if !found { + return 0 + } + return int(end - start) +} + +func (lazy *XXX_lazyUnmarshalInfo) AppendField(b []byte, num uint32) ([]byte, bool) { + start, end, found, _, multipleEntries := lazy.FindFieldInProto(num) + if multipleEntries != nil { + for _, entry := range multipleEntries { + b = append(b, lazy.Protobuf[entry.Start:entry.End]...) + } + return b, true + } + if !found { + return nil, false + } + b = append(b, lazy.Protobuf[start:end]...) + return b, true +} + +func (lazy *XXX_lazyUnmarshalInfo) SetIndex(index []IndexEntry) { + atomicStoreIndex(&lazy.index, &index) +} + +// FindFieldInProto looks for field fieldNum in lazyUnmarshalInfo information +// (including protobuf), returns startOffset/endOffset/found. +func (lazy *XXX_lazyUnmarshalInfo) FindFieldInProto(fieldNum uint32) (start, end uint32, found, multipleContiguous bool, multipleEntries []IndexEntry) { + if lazy.Protobuf == nil { + // There is no backing protobuf for this message -- it was made from a builder + return 0, 0, false, false, nil + } + index := atomicLoadIndex(&lazy.index) + if index == nil { + r, err := buildIndex(lazy.Protobuf) + if err != nil { + panic(fmt.Sprintf("findFieldInfo: error building index when looking for field %d: %v", fieldNum, err)) + } + // lazy.index is a pointer to the slice returned by BuildIndex + index = &r + atomicStoreIndex(&lazy.index, index) + } + return lookupField(index, fieldNum) +} + +// lookupField returns the offset at which the indicated field starts using +// the index, offset immediately after field ends (including all instances of +// a repeated field), and bools indicating if field was found and if there +// are multiple encodings of the field in the byte range. +// +// To hande the uncommon case where there are repeated encodings for the same +// field which are not consecutive in the protobuf (so we need to returns +// multiple start/end offsets), we also return a slice multipleEntries. If +// multipleEntries is non-nil, then multiple entries were found, and the +// values in the slice should be used, rather than start/end/found. +func lookupField(indexp *[]IndexEntry, fieldNum uint32) (start, end uint32, found bool, multipleContiguous bool, multipleEntries []IndexEntry) { + // The pointer indexp to the index was already loaded atomically. + // The slice is uniquely associated with the pointer, so it doesn't + // need to be loaded atomically. + index := *indexp + for i, entry := range index { + if fieldNum == entry.FieldNum { + if i < len(index)-1 && entry.FieldNum == index[i+1].FieldNum { + // Handle the uncommon case where there are + // repeated entries for the same field which + // are not contiguous in the protobuf. + multiple := make([]IndexEntry, 1, 2) + multiple[0] = IndexEntry{fieldNum, entry.Start, entry.End, entry.MultipleContiguous} + i++ + for i < len(index) && index[i].FieldNum == fieldNum { + multiple = append(multiple, IndexEntry{fieldNum, index[i].Start, index[i].End, index[i].MultipleContiguous}) + i++ + } + return 0, 0, false, false, multiple + + } + return entry.Start, entry.End, true, entry.MultipleContiguous, nil + } + if fieldNum < entry.FieldNum { + return 0, 0, false, false, nil + } + } + return 0, 0, false, false, nil +} diff --git a/vendor/google.golang.org/protobuf/internal/protolazy/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/protolazy/pointer_unsafe.go new file mode 100644 index 00000000000..dc2a64ca643 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/protolazy/pointer_unsafe.go @@ -0,0 +1,17 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protolazy + +import ( + "sync/atomic" + "unsafe" +) + +func atomicLoadIndex(p **[]IndexEntry) *[]IndexEntry { + return (*[]IndexEntry)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreIndex(p **[]IndexEntry, v *[]IndexEntry) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go deleted file mode 100644 index a1f6f333860..00000000000 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package strs - -import pref "google.golang.org/protobuf/reflect/protoreflect" - -func UnsafeString(b []byte) string { - return string(b) -} - -func UnsafeBytes(s string) []byte { - return []byte(s) -} - -type Builder struct{} - -func (*Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName { - return prefix.Append(name) -} - -func (*Builder) MakeString(b []byte) string { - return string(b) -} diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go index a008acd0908..832a7988f14 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && !go1.21 -// +build !purego,!appengine,!go1.21 +//go:build !go1.21 package strs diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go index 60166f2ba3c..1ffddf6877a 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && go1.21 -// +build !purego,!appengine,go1.21 +//go:build go1.21 package strs diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index dbbf1f6862c..f5c06280fe3 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -51,8 +51,8 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 34 - Patch = 2 + Minor = 36 + Patch = 3 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go index d75a6534c1b..e28d7acb378 100644 --- a/vendor/google.golang.org/protobuf/proto/decode.go +++ b/vendor/google.golang.org/protobuf/proto/decode.go @@ -47,6 +47,12 @@ type UnmarshalOptions struct { // RecursionLimit limits how deeply messages may be nested. // If zero, a default limit is applied. RecursionLimit int + + // + // NoLazyDecoding turns off lazy decoding, which otherwise is enabled by + // default. Lazy decoding only affects submessages (annotated with [lazy = + // true] in the .proto file) within messages that use the Opaque API. + NoLazyDecoding bool } // Unmarshal parses the wire-format message in b and places the result in m. @@ -104,6 +110,16 @@ func (o UnmarshalOptions) unmarshal(b []byte, m protoreflect.Message) (out proto if o.DiscardUnknown { in.Flags |= protoiface.UnmarshalDiscardUnknown } + + if !allowPartial { + // This does not affect how current unmarshal functions work, it just allows them + // to record this for lazy the decoding case. + in.Flags |= protoiface.UnmarshalCheckRequired + } + if o.NoLazyDecoding { + in.Flags |= protoiface.UnmarshalNoLazyDecoding + } + out, err = methods.Unmarshal(in) } else { o.RecursionLimit-- @@ -156,7 +172,7 @@ func (o UnmarshalOptions) unmarshalMessageSlow(b []byte, m protoreflect.Message) var err error if fd == nil { err = errUnknown - } else if flags.ProtoLegacy { + } else if flags.ProtoLegacyWeak { if fd.IsWeak() && fd.Message().IsPlaceholder() { err = errUnknown // weak referent is not linked in } diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go index 1f847bcc358..f0473c5869a 100644 --- a/vendor/google.golang.org/protobuf/proto/encode.go +++ b/vendor/google.golang.org/protobuf/proto/encode.go @@ -63,7 +63,8 @@ type MarshalOptions struct { // options (except for UseCachedSize itself). // // 2. The message and all its submessages have not changed in any - // way since the Size call. + // way since the Size call. For lazily decoded messages, accessing + // a message results in decoding the message, which is a change. // // If either of these invariants is violated, // the results are undefined and may include panics or corrupted output. diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go index 1a0be1b03c7..c36d4a9cd75 100644 --- a/vendor/google.golang.org/protobuf/proto/equal.go +++ b/vendor/google.golang.org/protobuf/proto/equal.go @@ -8,6 +8,7 @@ import ( "reflect" "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" ) // Equal reports whether two messages are equal, @@ -51,6 +52,14 @@ func Equal(x, y Message) bool { if mx.IsValid() != my.IsValid() { return false } + + // Only one of the messages needs to implement the fast-path for it to work. + pmx := protoMethods(mx) + pmy := protoMethods(my) + if pmx != nil && pmy != nil && pmx.Equal != nil && pmy.Equal != nil { + return pmx.Equal(protoiface.EqualInput{MessageA: mx, MessageB: my}).Equal + } + vx := protoreflect.ValueOfMessage(mx) vy := protoreflect.ValueOfMessage(my) return vx.Equal(vy) diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go index d248f292846..78445d116f7 100644 --- a/vendor/google.golang.org/protobuf/proto/extension.go +++ b/vendor/google.golang.org/protobuf/proto/extension.go @@ -39,6 +39,48 @@ func ClearExtension(m Message, xt protoreflect.ExtensionType) { // If the field is unpopulated, it returns the default value for // scalars and an immutable, empty value for lists or messages. // It panics if xt does not extend m. +// +// The type of the value is dependent on the field type of the extension. +// For extensions generated by protoc-gen-go, the Go type is as follows: +// +// ╔═══════════════════╤═════════════════════════╗ +// ║ Go type │ Protobuf kind ║ +// ╠═══════════════════╪═════════════════════════╣ +// ║ bool │ bool ║ +// ║ int32 │ int32, sint32, sfixed32 ║ +// ║ int64 │ int64, sint64, sfixed64 ║ +// ║ uint32 │ uint32, fixed32 ║ +// ║ uint64 │ uint64, fixed64 ║ +// ║ float32 │ float ║ +// ║ float64 │ double ║ +// ║ string │ string ║ +// ║ []byte │ bytes ║ +// ║ protoreflect.Enum │ enum ║ +// ║ proto.Message │ message, group ║ +// ╚═══════════════════╧═════════════════════════╝ +// +// The protoreflect.Enum and proto.Message types are the concrete Go type +// associated with the named enum or message. Repeated fields are represented +// using a Go slice of the base element type. +// +// If a generated extension descriptor variable is directly passed to +// GetExtension, then the call should be followed immediately by a +// type assertion to the expected output value. For example: +// +// mm := proto.GetExtension(m, foopb.E_MyExtension).(*foopb.MyMessage) +// +// This pattern enables static analysis tools to verify that the asserted type +// matches the Go type associated with the extension field and +// also enables a possible future migration to a type-safe extension API. +// +// Since singular messages are the most common extension type, the pattern of +// calling HasExtension followed by GetExtension may be simplified to: +// +// if mm := proto.GetExtension(m, foopb.E_MyExtension).(*foopb.MyMessage); mm != nil { +// ... // make use of mm +// } +// +// The mm variable is non-nil if and only if HasExtension reports true. func GetExtension(m Message, xt protoreflect.ExtensionType) any { // Treat nil message interface as an empty message; return the default. if m == nil { @@ -51,6 +93,35 @@ func GetExtension(m Message, xt protoreflect.ExtensionType) any { // SetExtension stores the value of an extension field. // It panics if m is invalid, xt does not extend m, or if type of v // is invalid for the specified extension field. +// +// The type of the value is dependent on the field type of the extension. +// For extensions generated by protoc-gen-go, the Go type is as follows: +// +// ╔═══════════════════╤═════════════════════════╗ +// ║ Go type │ Protobuf kind ║ +// ╠═══════════════════╪═════════════════════════╣ +// ║ bool │ bool ║ +// ║ int32 │ int32, sint32, sfixed32 ║ +// ║ int64 │ int64, sint64, sfixed64 ║ +// ║ uint32 │ uint32, fixed32 ║ +// ║ uint64 │ uint64, fixed64 ║ +// ║ float32 │ float ║ +// ║ float64 │ double ║ +// ║ string │ string ║ +// ║ []byte │ bytes ║ +// ║ protoreflect.Enum │ enum ║ +// ║ proto.Message │ message, group ║ +// ╚═══════════════════╧═════════════════════════╝ +// +// The protoreflect.Enum and proto.Message types are the concrete Go type +// associated with the named enum or message. Repeated fields are represented +// using a Go slice of the base element type. +// +// If a generated extension descriptor variable is directly passed to +// SetExtension (e.g., foopb.E_MyExtension), then the value should be a +// concrete type that matches the expected Go type for the extension descriptor +// so that static analysis tools can verify type correctness. +// This also enables a possible future migration to a type-safe extension API. func SetExtension(m Message, xt protoreflect.ExtensionType, v any) { xd := xt.TypeDescriptor() pv := xt.ValueOf(v) diff --git a/vendor/google.golang.org/protobuf/proto/size.go b/vendor/google.golang.org/protobuf/proto/size.go index 052fb5ae313..c8675806c65 100644 --- a/vendor/google.golang.org/protobuf/proto/size.go +++ b/vendor/google.golang.org/protobuf/proto/size.go @@ -12,11 +12,19 @@ import ( ) // Size returns the size in bytes of the wire-format encoding of m. +// +// Note that Size might return more bytes than Marshal will write in the case of +// lazily decoded messages that arrive in non-minimal wire format: see +// https://protobuf.dev/reference/go/size/ for more details. func Size(m Message) int { return MarshalOptions{}.Size(m) } // Size returns the size in bytes of the wire-format encoding of m. +// +// Note that Size might return more bytes than Marshal will write in the case of +// lazily decoded messages that arrive in non-minimal wire format: see +// https://protobuf.dev/reference/go/size/ for more details. func (o MarshalOptions) Size(m Message) int { // Treat a nil message interface as an empty message; nothing to output. if m == nil { diff --git a/vendor/google.golang.org/protobuf/proto/wrapperopaque.go b/vendor/google.golang.org/protobuf/proto/wrapperopaque.go new file mode 100644 index 00000000000..267fd0f1f62 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/wrapperopaque.go @@ -0,0 +1,80 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +// ValueOrNil returns nil if has is false, or a pointer to a new variable +// containing the value returned by the specified getter. +// +// This function is similar to the wrappers (proto.Int32(), proto.String(), +// etc.), but is generic (works for any field type) and works with the hasser +// and getter of a field, as opposed to a value. +// +// This is convenient when populating builder fields. +// +// Example: +// +// hop := attr.GetDirectHop() +// injectedRoute := ripb.InjectedRoute_builder{ +// Prefixes: route.GetPrefixes(), +// NextHop: proto.ValueOrNil(hop.HasAddress(), hop.GetAddress), +// } +func ValueOrNil[T any](has bool, getter func() T) *T { + if !has { + return nil + } + v := getter() + return &v +} + +// ValueOrDefault returns the protobuf message val if val is not nil, otherwise +// it returns a pointer to an empty val message. +// +// This function allows for translating code from the old Open Struct API to the +// new Opaque API. +// +// The old Open Struct API represented oneof fields with a wrapper struct: +// +// var signedImg *accountpb.SignedImage +// profile := &accountpb.Profile{ +// // The Avatar oneof will be set, with an empty SignedImage. +// Avatar: &accountpb.Profile_SignedImage{signedImg}, +// } +// +// The new Opaque API treats oneof fields like regular fields, there are no more +// wrapper structs: +// +// var signedImg *accountpb.SignedImage +// profile := &accountpb.Profile{} +// profile.SetSignedImage(signedImg) +// +// For convenience, the Opaque API also offers Builders, which allow for a +// direct translation of struct initialization. However, because Builders use +// nilness to represent field presence (but there is no non-nil wrapper struct +// anymore), Builders cannot distinguish between an unset oneof and a set oneof +// with nil message. The above code would need to be translated with help of the +// ValueOrDefault function to retain the same behavior: +// +// var signedImg *accountpb.SignedImage +// return &accountpb.Profile_builder{ +// SignedImage: proto.ValueOrDefault(signedImg), +// }.Build() +func ValueOrDefault[T interface { + *P + Message +}, P any](val T) T { + if val == nil { + return T(new(P)) + } + return val +} + +// ValueOrDefaultBytes is like ValueOrDefault but for working with fields of +// type []byte. +func ValueOrDefaultBytes(val []byte) []byte { + if val == nil { + return []byte{} + } + return val +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go index 8fbecb4f58d..69a05050917 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go @@ -13,6 +13,8 @@ package protodesc import ( + "strings" + "google.golang.org/protobuf/internal/editionssupport" "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/internal/filedesc" @@ -102,13 +104,17 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot default: return nil, errors.New("invalid syntax: %q", fd.GetSyntax()) } - if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < editionssupport.Minimum || fd.GetEdition() > editionssupport.Maximum) { - return nil, errors.New("use of edition %v not yet supported by the Go Protobuf runtime", fd.GetEdition()) - } f.L1.Path = fd.GetName() if f.L1.Path == "" { return nil, errors.New("file path must be populated") } + if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < editionssupport.Minimum || fd.GetEdition() > editionssupport.Maximum) { + // Allow cmd/protoc-gen-go/testdata to use any edition for easier + // testing of upcoming edition features. + if !strings.HasPrefix(fd.GetName(), "cmd/protoc-gen-go/testdata/") { + return nil, errors.New("use of edition %v not yet supported by the Go Protobuf runtime", fd.GetEdition()) + } + } f.L1.Package = protoreflect.FullName(fd.GetPackage()) if !f.L1.Package.IsValid() && f.L1.Package != "" { return nil, errors.New("invalid package: %q", f.L1.Package) diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go index 85617554272..ebcb4a8ab13 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go @@ -150,6 +150,7 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc opts = proto.Clone(opts).(*descriptorpb.FieldOptions) f.L1.Options = func() protoreflect.ProtoMessage { return opts } f.L1.IsWeak = opts.GetWeak() + f.L1.IsLazy = opts.GetLazy() if opts.Packed != nil { f.L1.EditionFeatures.IsPacked = opts.GetPacked() } @@ -214,6 +215,9 @@ func (r descsByName) initExtensionDeclarations(xds []*descriptorpb.FieldDescript if xd.JsonName != nil { x.L2.StringName.InitJSON(xd.GetJsonName()) } + if x.L1.Kind == protoreflect.MessageKind && x.L1.EditionFeatures.IsDelimitedEncoded { + x.L1.Kind = protoreflect.GroupKind + } } return xs, nil } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go index 6de31c2ebdb..5eaf652176c 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go @@ -149,7 +149,7 @@ func validateMessageDeclarations(file *filedesc.File, ms []filedesc.Message, mds return errors.New("message field %q under proto3 optional semantics must be within a single element oneof", f.FullName()) } } - if f.IsWeak() && !flags.ProtoLegacy { + if f.IsWeak() && !flags.ProtoLegacyWeak { return errors.New("message field %q is a weak field, which is a legacy proto1 feature that is no longer supported", f.FullName()) } if f.IsWeak() && (!f.HasPresence() || !isOptionalMessage(f) || f.ContainingOneof() != nil) { diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go index 804830eda36..f55b0369596 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go @@ -14,7 +14,7 @@ import ( "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/types/descriptorpb" - gofeaturespb "google.golang.org/protobuf/types/gofeaturespb" + "google.golang.org/protobuf/types/gofeaturespb" ) var defaults = &descriptorpb.FeatureSetDefaults{} @@ -43,6 +43,8 @@ func toEditionProto(ed filedesc.Edition) descriptorpb.Edition { return descriptorpb.Edition_EDITION_PROTO3 case filedesc.Edition2023: return descriptorpb.Edition_EDITION_2023 + case filedesc.Edition2024: + return descriptorpb.Edition_EDITION_2024 default: panic(fmt.Sprintf("unknown value for edition: %v", ed)) } @@ -123,10 +125,27 @@ func mergeEditionFeatures(parentDesc protoreflect.Descriptor, child *descriptorp parentFS.IsJSONCompliant = *jf == descriptorpb.FeatureSet_ALLOW } - if goFeatures, ok := proto.GetExtension(child, gofeaturespb.E_Go).(*gofeaturespb.GoFeatures); ok && goFeatures != nil { - if luje := goFeatures.LegacyUnmarshalJsonEnum; luje != nil { - parentFS.GenerateLegacyUnmarshalJSON = *luje - } + // We must not use proto.GetExtension(child, gofeaturespb.E_Go) + // because that only works for messages we generated, but not for + // dynamicpb messages. See golang/protobuf#1669. + goFeatures := child.ProtoReflect().Get(gofeaturespb.E_Go.TypeDescriptor()) + if !goFeatures.IsValid() { + return parentFS + } + // gf.Interface() could be *dynamicpb.Message or *gofeaturespb.GoFeatures. + gf := goFeatures.Message() + fields := gf.Descriptor().Fields() + + if fd := fields.ByName("legacy_unmarshal_json_enum"); gf.Has(fd) { + parentFS.GenerateLegacyUnmarshalJSON = gf.Get(fd).Bool() + } + + if fd := fields.ByName("strip_enum_prefix"); gf.Has(fd) { + parentFS.StripEnumPrefix = int(gf.Get(fd).Enum()) + } + + if fd := fields.ByName("api_level"); gf.Has(fd) { + parentFS.APILevel = int(gf.Get(fd).Enum()) } return parentFS diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go index d5d5af6ebed..742cb518c40 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go @@ -23,6 +23,7 @@ type ( Unmarshal func(unmarshalInput) (unmarshalOutput, error) Merge func(mergeInput) mergeOutput CheckInitialized func(checkInitializedInput) (checkInitializedOutput, error) + Equal func(equalInput) equalOutput } supportFlags = uint64 sizeInput = struct { @@ -75,4 +76,13 @@ type ( checkInitializedOutput = struct { pragma.NoUnkeyedLiterals } + equalInput = struct { + pragma.NoUnkeyedLiterals + MessageA Message + MessageB Message + } + equalOutput = struct { + pragma.NoUnkeyedLiterals + Equal bool + } ) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go index a7b0d06ff32..a4b78acef68 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go @@ -152,7 +152,7 @@ type Message interface { // This method may return nil. // // The returned methods type is identical to - // google.golang.org/protobuf/runtime/protoiface.Methods. + // [google.golang.org/protobuf/runtime/protoiface.Methods]. // Consult the protoiface package documentation for details. ProtoMethods() *methods } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go deleted file mode 100644 index 75f83a2af03..00000000000 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package protoreflect - -import "google.golang.org/protobuf/internal/pragma" - -type valueType int - -const ( - nilType valueType = iota - boolType - int32Type - int64Type - uint32Type - uint64Type - float32Type - float64Type - stringType - bytesType - enumType - ifaceType -) - -// value is a union where only one type can be represented at a time. -// This uses a distinct field for each type. This is type safe in Go, but -// occupies more memory than necessary (72B). -type value struct { - pragma.DoNotCompare // 0B - - typ valueType // 8B - num uint64 // 8B - str string // 16B - bin []byte // 24B - iface any // 16B -} - -func valueOfString(v string) Value { - return Value{typ: stringType, str: v} -} -func valueOfBytes(v []byte) Value { - return Value{typ: bytesType, bin: v} -} -func valueOfIface(v any) Value { - return Value{typ: ifaceType, iface: v} -} - -func (v Value) getString() string { - return v.str -} -func (v Value) getBytes() []byte { - return v.bin -} -func (v Value) getIface() any { - return v.iface -} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go index 7f3583ead81..0015fcb35d8 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && !go1.21 -// +build !purego,!appengine,!go1.21 +//go:build !go1.21 package protoreflect diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go index f7d386990a0..479527b58dd 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && go1.21 -// +build !purego,!appengine,go1.21 +//go:build go1.21 package protoreflect diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go index 44cf467d884..28e9e9f0397 100644 --- a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go +++ b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go @@ -39,6 +39,9 @@ type Methods = struct { // CheckInitialized returns an error if any required fields in the message are not set. CheckInitialized func(CheckInitializedInput) (CheckInitializedOutput, error) + + // Equal compares two messages and returns EqualOutput.Equal == true if they are equal. + Equal func(EqualInput) EqualOutput } // SupportFlags indicate support for optional features. @@ -119,6 +122,22 @@ type UnmarshalInputFlags = uint8 const ( UnmarshalDiscardUnknown UnmarshalInputFlags = 1 << iota + + // UnmarshalAliasBuffer permits unmarshal operations to alias the input buffer. + // The unmarshaller must not modify the contents of the buffer. + UnmarshalAliasBuffer + + // UnmarshalValidated indicates that validation has already been + // performed on the input buffer. + UnmarshalValidated + + // UnmarshalCheckRequired is set if this unmarshal operation ultimately will care if required fields are + // initialized. + UnmarshalCheckRequired + + // UnmarshalNoLazyDecoding is set if this unmarshal operation should not use + // lazy decoding, even when otherwise available. + UnmarshalNoLazyDecoding ) // UnmarshalOutputFlags are output from the Unmarshal method. @@ -166,3 +185,18 @@ type CheckInitializedInput = struct { type CheckInitializedOutput = struct { pragma.NoUnkeyedLiterals } + +// EqualInput is input to the Equal method. +type EqualInput = struct { + pragma.NoUnkeyedLiterals + + MessageA protoreflect.Message + MessageB protoreflect.Message +} + +// EqualOutput is output from the Equal method. +type EqualOutput = struct { + pragma.NoUnkeyedLiterals + + Equal bool +} diff --git a/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go b/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go index 4a1ab7fb3de..93df1b569bb 100644 --- a/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go +++ b/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go @@ -15,6 +15,7 @@ import ( "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/internal/filetype" "google.golang.org/protobuf/internal/impl" + "google.golang.org/protobuf/internal/protolazy" ) // UnsafeEnabled specifies whether package unsafe can be used. @@ -39,6 +40,9 @@ type ( ExtensionFieldV1 = impl.ExtensionField Pointer = impl.Pointer + + LazyUnmarshalInfo = *protolazy.XXX_lazyUnmarshalInfo + RaceDetectHookData = impl.RaceDetectHookData ) var X impl.Export diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index 9403eb07507..a551e7ae945 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -69,7 +69,7 @@ const ( Edition_EDITION_2023 Edition = 1000 Edition_EDITION_2024 Edition = 1001 // Placeholder editions for testing feature resolution. These should not be - // used or relyed on outside of tests. + // used or relied on outside of tests. Edition_EDITION_1_TEST_ONLY Edition = 1 Edition_EDITION_2_TEST_ONLY Edition = 2 Edition_EDITION_99997_TEST_ONLY Edition = 99997 @@ -577,8 +577,6 @@ func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { } // If set to RETENTION_SOURCE, the option will be omitted from the binary. -// Note: as of January 2023, support for this is in progress and does not yet -// have an effect (b/264593489). type FieldOptions_OptionRetention int32 const ( @@ -640,8 +638,7 @@ func (FieldOptions_OptionRetention) EnumDescriptor() ([]byte, []int) { // This indicates the types of entities that the field may apply to when used // as an option. If it is unset, then the field may be freely used as an -// option on any kind of entity. Note: as of January 2023, support for this is -// in progress and does not yet have an effect (b/264593489). +// option on any kind of entity. type FieldOptions_OptionTargetType int32 const ( @@ -1208,20 +1205,18 @@ func (GeneratedCodeInfo_Annotation_Semantic) EnumDescriptor() ([]byte, []int) { // The protocol compiler can output a FileDescriptorSet containing the .proto // files it parses. type FileDescriptorSet struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FileDescriptorSet) Reset() { *x = FileDescriptorSet{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileDescriptorSet) String() string { @@ -1232,7 +1227,7 @@ func (*FileDescriptorSet) ProtoMessage() {} func (x *FileDescriptorSet) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1256,12 +1251,9 @@ func (x *FileDescriptorSet) GetFile() []*FileDescriptorProto { // Describes a complete .proto file. type FileDescriptorProto struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // file name, relative to root of source tree - Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` // e.g. "foo", "foo.bar", etc. + state protoimpl.MessageState `protogen:"open.v1"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // file name, relative to root of source tree + Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` // e.g. "foo", "foo.bar", etc. // Names of files imported by this file. Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` // Indexes of the public imported files in the dependency list above. @@ -1286,16 +1278,16 @@ type FileDescriptorProto struct { // If `edition` is present, this value must be "editions". Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` // The edition of the proto file. - Edition *Edition `protobuf:"varint,14,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` + Edition *Edition `protobuf:"varint,14,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FileDescriptorProto) Reset() { *x = FileDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileDescriptorProto) String() string { @@ -1306,7 +1298,7 @@ func (*FileDescriptorProto) ProtoMessage() {} func (x *FileDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1414,10 +1406,7 @@ func (x *FileDescriptorProto) GetEdition() Edition { // Describes a message type. type DescriptorProto struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` @@ -1429,16 +1418,16 @@ type DescriptorProto struct { ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` // Reserved field names, which may not be used by fields in the same message. // A given name may only be reserved once. - ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *DescriptorProto) Reset() { *x = DescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DescriptorProto) String() string { @@ -1449,7 +1438,7 @@ func (*DescriptorProto) ProtoMessage() {} func (x *DescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1535,11 +1524,7 @@ func (x *DescriptorProto) GetReservedName() []string { } type ExtensionRangeOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` // For external users: DO NOT USE. We are in the process of open sourcing @@ -1551,7 +1536,10 @@ type ExtensionRangeOptions struct { // The verification state of the range. // TODO: flip the default to DECLARATION once all empty ranges // are marked as UNVERIFIED. - Verification *ExtensionRangeOptions_VerificationState `protobuf:"varint,3,opt,name=verification,enum=google.protobuf.ExtensionRangeOptions_VerificationState,def=1" json:"verification,omitempty"` + Verification *ExtensionRangeOptions_VerificationState `protobuf:"varint,3,opt,name=verification,enum=google.protobuf.ExtensionRangeOptions_VerificationState,def=1" json:"verification,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Default values for ExtensionRangeOptions fields. @@ -1561,11 +1549,9 @@ const ( func (x *ExtensionRangeOptions) Reset() { *x = ExtensionRangeOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExtensionRangeOptions) String() string { @@ -1576,7 +1562,7 @@ func (*ExtensionRangeOptions) ProtoMessage() {} func (x *ExtensionRangeOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1621,10 +1607,7 @@ func (x *ExtensionRangeOptions) GetVerification() ExtensionRangeOptions_Verifica // Describes a field within a message. type FieldDescriptorProto struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` @@ -1676,15 +1659,15 @@ type FieldDescriptorProto struct { // Proto2 optional fields do not set this flag, because they already indicate // optional with `LABEL_OPTIONAL`. Proto3Optional *bool `protobuf:"varint,17,opt,name=proto3_optional,json=proto3Optional" json:"proto3_optional,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FieldDescriptorProto) Reset() { *x = FieldDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldDescriptorProto) String() string { @@ -1695,7 +1678,7 @@ func (*FieldDescriptorProto) ProtoMessage() {} func (x *FieldDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1789,21 +1772,18 @@ func (x *FieldDescriptorProto) GetProto3Optional() bool { // Describes a oneof. type OneofDescriptorProto struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` + sizeCache protoimpl.SizeCache } func (x *OneofDescriptorProto) Reset() { *x = OneofDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *OneofDescriptorProto) String() string { @@ -1814,7 +1794,7 @@ func (*OneofDescriptorProto) ProtoMessage() {} func (x *OneofDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1845,10 +1825,7 @@ func (x *OneofDescriptorProto) GetOptions() *OneofOptions { // Describes an enum type. type EnumDescriptorProto struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` @@ -1858,16 +1835,16 @@ type EnumDescriptorProto struct { ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` // Reserved enum value names, which may not be reused. A given name may only // be reserved once. - ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *EnumDescriptorProto) Reset() { *x = EnumDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumDescriptorProto) String() string { @@ -1878,7 +1855,7 @@ func (*EnumDescriptorProto) ProtoMessage() {} func (x *EnumDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1930,22 +1907,19 @@ func (x *EnumDescriptorProto) GetReservedName() []string { // Describes a value within an enum. type EnumValueDescriptorProto struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` - Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + sizeCache protoimpl.SizeCache } func (x *EnumValueDescriptorProto) Reset() { *x = EnumValueDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumValueDescriptorProto) String() string { @@ -1956,7 +1930,7 @@ func (*EnumValueDescriptorProto) ProtoMessage() {} func (x *EnumValueDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1994,22 +1968,19 @@ func (x *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { // Describes a service. type ServiceDescriptorProto struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` - Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ServiceDescriptorProto) Reset() { *x = ServiceDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServiceDescriptorProto) String() string { @@ -2020,7 +1991,7 @@ func (*ServiceDescriptorProto) ProtoMessage() {} func (x *ServiceDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2058,11 +2029,8 @@ func (x *ServiceDescriptorProto) GetOptions() *ServiceOptions { // Describes a method of a service. type MethodDescriptorProto struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // Input and output type names. These are resolved in the same way as // FieldDescriptorProto.type_name, but must refer to a message type. InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` @@ -2072,6 +2040,8 @@ type MethodDescriptorProto struct { ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` // Identifies if server streams multiple server messages ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Default values for MethodDescriptorProto fields. @@ -2082,11 +2052,9 @@ const ( func (x *MethodDescriptorProto) Reset() { *x = MethodDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MethodDescriptorProto) String() string { @@ -2097,7 +2065,7 @@ func (*MethodDescriptorProto) ProtoMessage() {} func (x *MethodDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2155,11 +2123,7 @@ func (x *MethodDescriptorProto) GetServerStreaming() bool { } type FileOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` // Sets the Java package where classes generated from this .proto will be // placed. By default, the proto package is used, but this is often // inappropriate because proto packages do not normally start with backwards @@ -2251,6 +2215,9 @@ type FileOptions struct { // The parser stores options it doesn't recognize here. // See the documentation for the "Options" section above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Default values for FileOptions fields. @@ -2267,11 +2234,9 @@ const ( func (x *FileOptions) Reset() { *x = FileOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileOptions) String() string { @@ -2282,7 +2247,7 @@ func (*FileOptions) ProtoMessage() {} func (x *FileOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2446,11 +2411,7 @@ func (x *FileOptions) GetUninterpretedOption() []*UninterpretedOption { } type MessageOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` // Set true to use the old proto1 MessageSet wire format for extensions. // This is provided for backwards-compatibility with the MessageSet wire // format. You should not use this for any other reason: It's less @@ -2523,6 +2484,9 @@ type MessageOptions struct { Features *FeatureSet `protobuf:"bytes,12,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Default values for MessageOptions fields. @@ -2534,11 +2498,9 @@ const ( func (x *MessageOptions) Reset() { *x = MessageOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MessageOptions) String() string { @@ -2549,7 +2511,7 @@ func (*MessageOptions) ProtoMessage() {} func (x *MessageOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2615,17 +2577,14 @@ func (x *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { } type FieldOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` + // NOTE: ctype is deprecated. Use `features.(pb.cpp).string_type` instead. // The ctype option instructs the C++ code generator to use a different // representation of the field than it normally would. See the specific // options below. This option is only implemented to support use of // [ctype=CORD] and [ctype=STRING] (the default) on non-repeated fields of - // type "bytes" in the open source release -- sorry, we'll try to include - // other types in a future version! + // type "bytes" in the open source release. + // TODO: make ctype actually deprecated. Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` // The packed option can be enabled for repeated primitive fields to enable // a more efficient representation on the wire. Rather than repeatedly @@ -2692,6 +2651,9 @@ type FieldOptions struct { FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,22,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Default values for FieldOptions fields. @@ -2707,11 +2669,9 @@ const ( func (x *FieldOptions) Reset() { *x = FieldOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldOptions) String() string { @@ -2722,7 +2682,7 @@ func (*FieldOptions) ProtoMessage() {} func (x *FieldOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2836,24 +2796,21 @@ func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { } type OneofOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` // Any features defined in the specific edition. Features *FeatureSet `protobuf:"bytes,1,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *OneofOptions) Reset() { *x = OneofOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *OneofOptions) String() string { @@ -2864,7 +2821,7 @@ func (*OneofOptions) ProtoMessage() {} func (x *OneofOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2894,11 +2851,7 @@ func (x *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { } type EnumOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` // Set this option to true to allow mapping different tag names to the same // value. AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` @@ -2920,6 +2873,9 @@ type EnumOptions struct { Features *FeatureSet `protobuf:"bytes,7,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Default values for EnumOptions fields. @@ -2929,11 +2885,9 @@ const ( func (x *EnumOptions) Reset() { *x = EnumOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumOptions) String() string { @@ -2944,7 +2898,7 @@ func (*EnumOptions) ProtoMessage() {} func (x *EnumOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2996,11 +2950,7 @@ func (x *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { } type EnumValueOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` // Is this enum value deprecated? // Depending on the target platform, this can emit Deprecated annotations // for the enum value, or it will be completely ignored; in the very least, @@ -3016,6 +2966,9 @@ type EnumValueOptions struct { FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,4,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Default values for EnumValueOptions fields. @@ -3026,11 +2979,9 @@ const ( func (x *EnumValueOptions) Reset() { *x = EnumValueOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumValueOptions) String() string { @@ -3041,7 +2992,7 @@ func (*EnumValueOptions) ProtoMessage() {} func (x *EnumValueOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3092,11 +3043,7 @@ func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { } type ServiceOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` // Any features defined in the specific edition. Features *FeatureSet `protobuf:"bytes,34,opt,name=features" json:"features,omitempty"` // Is this service deprecated? @@ -3106,6 +3053,9 @@ type ServiceOptions struct { Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Default values for ServiceOptions fields. @@ -3115,11 +3065,9 @@ const ( func (x *ServiceOptions) Reset() { *x = ServiceOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServiceOptions) String() string { @@ -3130,7 +3078,7 @@ func (*ServiceOptions) ProtoMessage() {} func (x *ServiceOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3167,11 +3115,7 @@ func (x *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { } type MethodOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` // Is this method deprecated? // Depending on the target platform, this can emit Deprecated annotations // for the method, or it will be completely ignored; in the very least, @@ -3182,6 +3126,9 @@ type MethodOptions struct { Features *FeatureSet `protobuf:"bytes,35,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Default values for MethodOptions fields. @@ -3192,11 +3139,9 @@ const ( func (x *MethodOptions) Reset() { *x = MethodOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MethodOptions) String() string { @@ -3207,7 +3152,7 @@ func (*MethodOptions) ProtoMessage() {} func (x *MethodOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3257,11 +3202,8 @@ func (x *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { // or produced by Descriptor::CopyTo()) will never have UninterpretedOptions // in them. type UninterpretedOption struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` // The value of the uninterpreted option, in whatever type the tokenizer // identified it as during parsing. Exactly one of these should be set. IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` @@ -3270,15 +3212,15 @@ type UninterpretedOption struct { DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *UninterpretedOption) Reset() { *x = UninterpretedOption{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UninterpretedOption) String() string { @@ -3289,7 +3231,7 @@ func (*UninterpretedOption) ProtoMessage() {} func (x *UninterpretedOption) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3360,26 +3302,23 @@ func (x *UninterpretedOption) GetAggregateValue() string { // be designed and implemented to handle this, hopefully before we ever hit a // conflict here. type FeatureSet struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` FieldPresence *FeatureSet_FieldPresence `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"` EnumType *FeatureSet_EnumType `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"` RepeatedFieldEncoding *FeatureSet_RepeatedFieldEncoding `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"` Utf8Validation *FeatureSet_Utf8Validation `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"` MessageEncoding *FeatureSet_MessageEncoding `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"` JsonFormat *FeatureSet_JsonFormat `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FeatureSet) Reset() { *x = FeatureSet{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FeatureSet) String() string { @@ -3390,7 +3329,7 @@ func (*FeatureSet) ProtoMessage() {} func (x *FeatureSet) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3452,10 +3391,7 @@ func (x *FeatureSet) GetJsonFormat() FeatureSet_JsonFormat { // feature resolution. The resolution with this object becomes a simple search // for the closest matching edition, followed by proto merges. type FeatureSetDefaults struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` Defaults []*FeatureSetDefaults_FeatureSetEditionDefault `protobuf:"bytes,1,rep,name=defaults" json:"defaults,omitempty"` // The minimum supported edition (inclusive) when this was constructed. // Editions before this will not have defaults. @@ -3463,15 +3399,15 @@ type FeatureSetDefaults struct { // The maximum known edition (inclusive) when this was constructed. Editions // after this will not have reliable defaults. MaximumEdition *Edition `protobuf:"varint,5,opt,name=maximum_edition,json=maximumEdition,enum=google.protobuf.Edition" json:"maximum_edition,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FeatureSetDefaults) Reset() { *x = FeatureSetDefaults{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FeatureSetDefaults) String() string { @@ -3482,7 +3418,7 @@ func (*FeatureSetDefaults) ProtoMessage() {} func (x *FeatureSetDefaults) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3521,10 +3457,7 @@ func (x *FeatureSetDefaults) GetMaximumEdition() Edition { // Encapsulates information about the original source file from which a // FileDescriptorProto was generated. type SourceCodeInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // A Location identifies a piece of source code in a .proto file which // corresponds to a particular definition. This information is intended // to be useful to IDEs, code indexers, documentation generators, and similar @@ -3573,16 +3506,17 @@ type SourceCodeInfo struct { // - Code which tries to interpret locations should probably be designed to // ignore those that it doesn't understand, as more types of locations could // be recorded in the future. - Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *SourceCodeInfo) Reset() { *x = SourceCodeInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SourceCodeInfo) String() string { @@ -3593,7 +3527,7 @@ func (*SourceCodeInfo) ProtoMessage() {} func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3619,22 +3553,19 @@ func (x *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { // file. A GeneratedCodeInfo message is associated with only one generated // source file, but may contain references to different source .proto files. type GeneratedCodeInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // An Annotation connects some span of text in generated code to an element // of its generating .proto file. - Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GeneratedCodeInfo) Reset() { *x = GeneratedCodeInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GeneratedCodeInfo) String() string { @@ -3645,7 +3576,7 @@ func (*GeneratedCodeInfo) ProtoMessage() {} func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3668,22 +3599,19 @@ func (x *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { } type DescriptorProto_ExtensionRange struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Exclusive. + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` unknownFields protoimpl.UnknownFields - - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Exclusive. - Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + sizeCache protoimpl.SizeCache } func (x *DescriptorProto_ExtensionRange) Reset() { *x = DescriptorProto_ExtensionRange{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DescriptorProto_ExtensionRange) String() string { @@ -3694,7 +3622,7 @@ func (*DescriptorProto_ExtensionRange) ProtoMessage() {} func (x *DescriptorProto_ExtensionRange) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[23] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3734,21 +3662,18 @@ func (x *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { // fields or extension ranges in the same message. Reserved ranges may // not overlap. type DescriptorProto_ReservedRange struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Exclusive. unknownFields protoimpl.UnknownFields - - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Exclusive. + sizeCache protoimpl.SizeCache } func (x *DescriptorProto_ReservedRange) Reset() { *x = DescriptorProto_ReservedRange{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DescriptorProto_ReservedRange) String() string { @@ -3759,7 +3684,7 @@ func (*DescriptorProto_ReservedRange) ProtoMessage() {} func (x *DescriptorProto_ReservedRange) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[24] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3789,10 +3714,7 @@ func (x *DescriptorProto_ReservedRange) GetEnd() int32 { } type ExtensionRangeOptions_Declaration struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The extension number declared within the extension range. Number *int32 `protobuf:"varint,1,opt,name=number" json:"number,omitempty"` // The fully-qualified name of the extension field. There must be a leading @@ -3808,16 +3730,16 @@ type ExtensionRangeOptions_Declaration struct { Reserved *bool `protobuf:"varint,5,opt,name=reserved" json:"reserved,omitempty"` // If true, indicates that the extension must be defined as repeated. // Otherwise the extension must be defined as optional. - Repeated *bool `protobuf:"varint,6,opt,name=repeated" json:"repeated,omitempty"` + Repeated *bool `protobuf:"varint,6,opt,name=repeated" json:"repeated,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ExtensionRangeOptions_Declaration) Reset() { *x = ExtensionRangeOptions_Declaration{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExtensionRangeOptions_Declaration) String() string { @@ -3828,7 +3750,7 @@ func (*ExtensionRangeOptions_Declaration) ProtoMessage() {} func (x *ExtensionRangeOptions_Declaration) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[25] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3885,21 +3807,18 @@ func (x *ExtensionRangeOptions_Declaration) GetRepeated() bool { // is inclusive such that it can appropriately represent the entire int32 // domain. type EnumDescriptorProto_EnumReservedRange struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Inclusive. unknownFields protoimpl.UnknownFields - - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Inclusive. + sizeCache protoimpl.SizeCache } func (x *EnumDescriptorProto_EnumReservedRange) Reset() { *x = EnumDescriptorProto_EnumReservedRange{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumDescriptorProto_EnumReservedRange) String() string { @@ -3910,7 +3829,7 @@ func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} func (x *EnumDescriptorProto_EnumReservedRange) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[26] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3940,21 +3859,18 @@ func (x *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { } type FieldOptions_EditionDefault struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` // Textproto value. unknownFields protoimpl.UnknownFields - - Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` - Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` // Textproto value. + sizeCache protoimpl.SizeCache } func (x *FieldOptions_EditionDefault) Reset() { *x = FieldOptions_EditionDefault{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldOptions_EditionDefault) String() string { @@ -3965,7 +3881,7 @@ func (*FieldOptions_EditionDefault) ProtoMessage() {} func (x *FieldOptions_EditionDefault) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[27] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3996,10 +3912,7 @@ func (x *FieldOptions_EditionDefault) GetValue() string { // Information about the support window of a feature. type FieldOptions_FeatureSupport struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The edition that this feature was first available in. In editions // earlier than this one, the default assigned to EDITION_LEGACY will be // used, and proto files will not be able to override it. @@ -4014,15 +3927,15 @@ type FieldOptions_FeatureSupport struct { // this one, the last default assigned will be used, and proto files will // not be able to override it. EditionRemoved *Edition `protobuf:"varint,4,opt,name=edition_removed,json=editionRemoved,enum=google.protobuf.Edition" json:"edition_removed,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FieldOptions_FeatureSupport) Reset() { *x = FieldOptions_FeatureSupport{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldOptions_FeatureSupport) String() string { @@ -4033,7 +3946,7 @@ func (*FieldOptions_FeatureSupport) ProtoMessage() {} func (x *FieldOptions_FeatureSupport) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[28] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4082,21 +3995,18 @@ func (x *FieldOptions_FeatureSupport) GetEditionRemoved() Edition { // E.g.,{ ["foo", false], ["bar.baz", true], ["moo", false] } represents // "foo.(bar.baz).moo". type UninterpretedOption_NamePart struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` unknownFields protoimpl.UnknownFields - - NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` - IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` + sizeCache protoimpl.SizeCache } func (x *UninterpretedOption_NamePart) Reset() { *x = UninterpretedOption_NamePart{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UninterpretedOption_NamePart) String() string { @@ -4107,7 +4017,7 @@ func (*UninterpretedOption_NamePart) ProtoMessage() {} func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[29] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4141,24 +4051,21 @@ func (x *UninterpretedOption_NamePart) GetIsExtension() bool { // the defaults at the closest matching edition ordered at or before it should // be used. This field must be in strict ascending order by edition. type FeatureSetDefaults_FeatureSetEditionDefault struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` // Defaults of features that can be overridden in this edition. OverridableFeatures *FeatureSet `protobuf:"bytes,4,opt,name=overridable_features,json=overridableFeatures" json:"overridable_features,omitempty"` // Defaults of features that can't be overridden in this edition. FixedFeatures *FeatureSet `protobuf:"bytes,5,opt,name=fixed_features,json=fixedFeatures" json:"fixed_features,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() { *x = FeatureSetDefaults_FeatureSetEditionDefault{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string { @@ -4169,7 +4076,7 @@ func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {} func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[30] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4206,10 +4113,7 @@ func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetFixedFeatures() *Featur } type SourceCodeInfo_Location struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Identifies which part of the FileDescriptorProto was defined at this // location. // @@ -4301,15 +4205,15 @@ type SourceCodeInfo_Location struct { LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *SourceCodeInfo_Location) Reset() { *x = SourceCodeInfo_Location{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[31] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SourceCodeInfo_Location) String() string { @@ -4320,7 +4224,7 @@ func (*SourceCodeInfo_Location) ProtoMessage() {} func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[31] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4371,10 +4275,7 @@ func (x *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { } type GeneratedCodeInfo_Annotation struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Identifies the element in the original source .proto file. This field // is formatted the same as SourceCodeInfo.Location.path. Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` @@ -4386,17 +4287,17 @@ type GeneratedCodeInfo_Annotation struct { // Identifies the ending offset in bytes in the generated code that // relates to the identified object. The end offset should be one past // the last relevant byte (so the length of the text = end - begin). - End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` - Semantic *GeneratedCodeInfo_Annotation_Semantic `protobuf:"varint,5,opt,name=semantic,enum=google.protobuf.GeneratedCodeInfo_Annotation_Semantic" json:"semantic,omitempty"` + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + Semantic *GeneratedCodeInfo_Annotation_Semantic `protobuf:"varint,5,opt,name=semantic,enum=google.protobuf.GeneratedCodeInfo_Annotation_Semantic" json:"semantic,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GeneratedCodeInfo_Annotation) Reset() { *x = GeneratedCodeInfo_Annotation{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[32] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GeneratedCodeInfo_Annotation) String() string { @@ -4407,7 +4308,7 @@ func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[32] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4463,494 +4364,474 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x22, 0x4d, 0x0a, 0x11, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x62, 0x75, 0x66, 0x22, 0x5b, 0x0a, 0x11, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x38, 0x0a, 0x04, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69, - 0x6c, 0x65, 0x22, 0x98, 0x05, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, - 0x0a, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x65, - 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x65, - 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x75, 0x62, 0x6c, - 0x69, 0x63, 0x5f, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0a, 0x20, - 0x03, 0x28, 0x05, 0x52, 0x10, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x44, 0x65, 0x70, 0x65, 0x6e, - 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x77, 0x65, 0x61, 0x6b, 0x5f, 0x64, 0x65, - 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0e, - 0x77, 0x65, 0x61, 0x6b, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x43, - 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, - 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36, - 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, - 0x6f, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, - 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, - 0x0a, 0x0f, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, - 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, - 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, - 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x6c, 0x65, 0x2a, 0x0c, 0x08, 0x80, 0xec, 0xca, 0xff, 0x01, 0x10, 0x81, 0xec, 0xca, 0xff, 0x01, + 0x22, 0x98, 0x05, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, + 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, + 0x65, 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x65, + 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, + 0x5f, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0a, 0x20, 0x03, 0x28, + 0x05, 0x52, 0x10, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, + 0x6e, 0x63, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x77, 0x65, 0x61, 0x6b, 0x5f, 0x64, 0x65, 0x70, 0x65, + 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0e, 0x77, 0x65, + 0x61, 0x6b, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x43, 0x0a, 0x0c, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x07, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, - 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, - 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, - 0x5f, 0x64, 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, - 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, - 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, + 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, + 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, - 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, - 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, - 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, - 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x1a, 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xcc, 0x04, 0x0a, 0x15, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, - 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, - 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, - 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x12, 0x6d, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, - 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x3a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x42, 0x03, 0x88, - 0x01, 0x02, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x1a, 0x94, 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, - 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, - 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, - 0x64, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, - 0x44, 0x45, 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, - 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, - 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, + 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, + 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, + 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, 0x0a, 0x0f, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, + 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65, + 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, + 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64, + 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, + 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, + 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, + 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, + 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, + 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37, + 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xcc, 0x04, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, 0x0b, 0x64, + 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, + 0x6d, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x3a, + 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x42, 0x03, 0x88, 0x01, 0x02, + 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x94, + 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, + 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, + 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, 0x6c, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x4a, + 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45, + 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, + 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, 0xe8, 0x07, + 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, - 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, + 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, + 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, - 0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, - 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, - 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, - 0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, - 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, + 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, + 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, + 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, + 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, + 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, + 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, + 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, + 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, + 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, + 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, + 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, + 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, + 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, + 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, + 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, + 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, + 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, + 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, + 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, + 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, + 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, + 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, + 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, + 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, - 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, - 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, - 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, - 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, - 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, - 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, - 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, - 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, - 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, - 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, - 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, - 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, - 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, - 0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, - 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, - 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14, - 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, - 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, + 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, + 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, + 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, + 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, - 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, - 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, - 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, - 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, - 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, - 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, + 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, + 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, + 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, + 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, + 0x67, 0x22, 0xad, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, + 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, + 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, + 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, + 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, + 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, + 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, + 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, + 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, + 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, + 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, + 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, + 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, + 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, + 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, + 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, + 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, + 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, + 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, + 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, + 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, + 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, + 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65, + 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73, + 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, + 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, + 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72, + 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77, + 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, + 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73, + 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, + 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x70, 0x68, + 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, + 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, + 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, + 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, - 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, - 0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, - 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x69, 0x6e, 0x67, 0x22, 0xad, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, - 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, - 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, - 0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, - 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, - 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, - 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, - 0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, - 0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, - 0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, - 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, - 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, - 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, - 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, - 0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, - 0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, - 0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, - 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, - 0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, - 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, - 0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, - 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, - 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, - 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, - 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, - 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, - 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, - 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, - 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, - 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, - 0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, - 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, - 0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, - 0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, - 0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, - 0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, - 0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, - 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, - 0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, - 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, - 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, - 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, - 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, - 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, - 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, - 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, - 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, - 0x02, 0x4a, 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x52, 0x14, 0x70, - 0x68, 0x70, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x22, 0xf4, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, - 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, - 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, - 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, - 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, - 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, - 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, - 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, - 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, - 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, - 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, - 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, - 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x9d, 0x0d, 0x0a, 0x0c, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, - 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, - 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, - 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, - 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, - 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, - 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, - 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, - 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, - 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, - 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, - 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, - 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, - 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, - 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, + 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, + 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, + 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, + 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, + 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x52, 0x14, 0x70, 0x68, 0x70, + 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x22, 0xf4, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, + 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, + 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, + 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, + 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, + 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, + 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, + 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, + 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, + 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, + 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, + 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, + 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x9d, 0x0d, 0x0a, 0x0c, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, + 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, + 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, + 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, - 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, - 0x10, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, + 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, + 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28, + 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, + 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, + 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, + 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, + 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, + 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, + 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, - 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, - 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, - 0x55, 0x0a, 0x0f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, - 0x72, 0x74, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, - 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, - 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, - 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x96, 0x02, 0x0a, - 0x0e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, - 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x72, 0x6f, - 0x64, 0x75, 0x63, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, - 0x74, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, - 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x77, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, - 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69, - 0x6e, 0x67, 0x12, 0x41, 0x0a, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, - 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, - 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, - 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, - 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, - 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, - 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, - 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, - 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, - 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, - 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, - 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, - 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, - 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, - 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, - 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, - 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, - 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, - 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, - 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, - 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, - 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, - 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, - 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, - 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, + 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, + 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, + 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, + 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, + 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x55, 0x0a, + 0x0f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, + 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, + 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, + 0x70, 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, - 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, - 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, - 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, - 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, - 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, - 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, - 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x5a, + 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x96, 0x02, 0x0a, 0x0e, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x47, 0x0a, + 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x64, 0x75, + 0x63, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x72, + 0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, + 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x77, + 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, + 0x12, 0x41, 0x0a, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x6d, 0x6f, + 0x76, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, + 0x76, 0x65, 0x64, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, + 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, + 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, + 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, + 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, + 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, + 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, + 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, + 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, + 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, + 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, + 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, + 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, + 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f, + 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, + 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, + 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, + 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14, + 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, + 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07, + 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, + 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10, + 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, + 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, + 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, @@ -4959,274 +4840,296 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, - 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0xd8, 0x02, - 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, + 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, + 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, + 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, + 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, + 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, + 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, + 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42, + 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, + 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, + 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, + 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, + 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, + 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0xd8, 0x02, 0x0a, 0x10, + 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, + 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, + 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, + 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x55, 0x0a, 0x0f, 0x66, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, + 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, + 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, + 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, - 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, - 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x55, 0x0a, 0x0f, - 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, - 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, - 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, + 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x99, + 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, + 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, + 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64, 0x65, 0x6d, 0x70, + 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x22, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, + 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, + 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, + 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, - 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, + 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, + 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, + 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, + 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, + 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a, + 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f, + 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, + 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, + 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, + 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67, + 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e, + 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, + 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, + 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x0a, 0x0a, 0x0a, 0x46, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c, 0x64, + 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x3f, 0x88, 0x01, 0x01, 0x98, + 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, + 0x49, 0x54, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, + 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, + 0x49, 0x54, 0x18, 0xe8, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0d, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x6c, 0x0a, 0x09, 0x65, 0x6e, + 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, - 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, - 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, - 0x22, 0x99, 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64, 0x65, - 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x22, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, - 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, - 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65, 0x6d, - 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, 0x08, - 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, - 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, - 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, - 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, - 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, - 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, - 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, - 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, - 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, - 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, - 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, - 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, - 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, - 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, - 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, - 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, - 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, - 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, - 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, - 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x0a, 0x0a, 0x0a, 0x46, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x0e, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x3f, 0x88, 0x01, - 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, - 0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50, 0x4c, - 0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, - 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0d, 0x66, - 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x6c, 0x0a, 0x09, - 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, - 0x6d, 0x54, 0x79, 0x70, 0x65, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, - 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, - 0x09, 0x12, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, - 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x17, 0x72, - 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, - 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, - 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, - 0x2d, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, - 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, - 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x15, - 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, - 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7e, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38, - 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, - 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0xe6, - 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0xb2, - 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7e, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x26, 0x88, 0x01, - 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, - 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xb2, 0x01, - 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, - 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, - 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, - 0x6d, 0x61, 0x74, 0x42, 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06, 0x98, 0x01, - 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, - 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, - 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0a, - 0x6a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, - 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, - 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, - 0x43, 0x49, 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, - 0x54, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, - 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, - 0x50, 0x45, 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, - 0x02, 0x22, 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, - 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, - 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, - 0x0a, 0x0a, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, - 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x49, 0x0a, 0x0e, 0x55, 0x74, 0x66, - 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, - 0x54, 0x46, 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, - 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, - 0x46, 0x59, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x04, - 0x08, 0x01, 0x10, 0x01, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, - 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, - 0x47, 0x45, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, - 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, - 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, - 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, - 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, - 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, - 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, - 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, - 0x54, 0x10, 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0x8b, 0x4e, 0x2a, 0x06, 0x08, 0x8b, 0x4e, - 0x10, 0x90, 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, - 0x10, 0xe8, 0x07, 0x22, 0xef, 0x03, 0x0a, 0x12, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, - 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, - 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x54, + 0x79, 0x70, 0x65, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, + 0x0b, 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x09, 0x12, + 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x08, + 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x17, 0x72, 0x65, 0x70, + 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x6f, + 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x2d, 0x88, + 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, + 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, 0x41, 0x43, + 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x15, 0x72, 0x65, + 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, + 0x69, 0x6e, 0x67, 0x12, 0x7e, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, - 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, + 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0x84, 0x07, 0xa2, + 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, + 0x08, 0xe8, 0x07, 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x7e, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65, + 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, - 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, - 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, - 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xf8, 0x01, 0x0a, 0x18, 0x46, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x14, 0x6f, - 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x26, 0x88, 0x01, 0x01, 0x98, + 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, + 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0x84, 0x07, 0xb2, 0x01, 0x03, 0x08, + 0xe8, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, + 0x69, 0x6e, 0x67, 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x13, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, - 0x62, 0x6c, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x42, 0x0a, 0x0e, 0x66, - 0x69, 0x78, 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, - 0x52, 0x0d, 0x66, 0x69, 0x78, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x4a, - 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x08, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, - 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, - 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, - 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, - 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, - 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, - 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, - 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, - 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, - 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, - 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, - 0xd0, 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, - 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, - 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, - 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, - 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x03, 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, - 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, - 0x6e, 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, - 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, - 0x10, 0x02, 0x2a, 0xa7, 0x02, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, - 0x0a, 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, - 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4c, - 0x45, 0x47, 0x41, 0x43, 0x59, 0x10, 0x84, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, - 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, - 0xe7, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, - 0x32, 0x33, 0x10, 0xe8, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, - 0x5f, 0x32, 0x30, 0x32, 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, - 0x01, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, - 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, - 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, - 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, - 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, - 0x4e, 0x4c, 0x59, 0x10, 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, 0x0a, 0x13, - 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, - 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x42, 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, + 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, + 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x41, 0x4c, + 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0a, 0x6a, 0x73, + 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, 0x49, 0x45, + 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, + 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, + 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, 0x51, 0x55, + 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45, + 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x02, 0x22, + 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, 0x50, 0x45, + 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, + 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, + 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, + 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x49, 0x0a, 0x0e, 0x55, 0x74, 0x66, 0x38, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x54, 0x46, + 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, + 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, + 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x04, 0x08, 0x01, + 0x10, 0x01, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, + 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, + 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, + 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, + 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49, + 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f, + 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, + 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47, + 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10, + 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0x8b, 0x4e, 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90, + 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8, + 0x07, 0x22, 0xef, 0x03, 0x0a, 0x12, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, + 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, + 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, + 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xf8, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x14, 0x6f, 0x76, 0x65, + 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x53, 0x65, 0x74, 0x52, 0x13, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, + 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x42, 0x0a, 0x0e, 0x66, 0x69, 0x78, + 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x0d, + 0x66, 0x69, 0x78, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x4a, 0x04, 0x08, + 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x22, 0xb5, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, + 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, + 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, + 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, + 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, + 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, + 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, + 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, + 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, + 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, + 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, + 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x2a, 0x0c, 0x08, + 0x80, 0xec, 0xca, 0xff, 0x01, 0x10, 0x81, 0xec, 0xca, 0xff, 0x01, 0x22, 0xd0, 0x02, 0x0a, 0x11, + 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, + 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, + 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, 0x10, + 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, + 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, + 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x61, + 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, + 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, + 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, 0x02, 0x2a, 0xa7, + 0x02, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x44, + 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4c, 0x45, 0x47, 0x41, 0x43, + 0x59, 0x10, 0x84, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, 0xe7, 0x07, 0x12, 0x11, + 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, 0x33, 0x10, 0xe8, + 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, + 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, 0x17, 0x0a, + 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, + 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, + 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, + 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, + 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, + 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, + 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, + 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, + 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, + 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, } var ( @@ -5385,424 +5288,6 @@ func file_google_protobuf_descriptor_proto_init() { if File_google_protobuf_descriptor_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*FileDescriptorSet); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*FileDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*DescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*ExtensionRangeOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*FieldDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*OneofDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*EnumDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*EnumValueDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*ServiceDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v any, i int) any { - switch v := v.(*MethodDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v any, i int) any { - switch v := v.(*FileOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v any, i int) any { - switch v := v.(*MessageOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v any, i int) any { - switch v := v.(*FieldOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v any, i int) any { - switch v := v.(*OneofOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v any, i int) any { - switch v := v.(*EnumOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v any, i int) any { - switch v := v.(*EnumValueOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v any, i int) any { - switch v := v.(*ServiceOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v any, i int) any { - switch v := v.(*MethodOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v any, i int) any { - switch v := v.(*UninterpretedOption); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v any, i int) any { - switch v := v.(*FeatureSet); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v any, i int) any { - switch v := v.(*FeatureSetDefaults); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v any, i int) any { - switch v := v.(*SourceCodeInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v any, i int) any { - switch v := v.(*GeneratedCodeInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v any, i int) any { - switch v := v.(*DescriptorProto_ExtensionRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v any, i int) any { - switch v := v.(*DescriptorProto_ReservedRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v any, i int) any { - switch v := v.(*ExtensionRangeOptions_Declaration); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v any, i int) any { - switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v any, i int) any { - switch v := v.(*FieldOptions_EditionDefault); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v any, i int) any { - switch v := v.(*FieldOptions_FeatureSupport); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v any, i int) any { - switch v := v.(*UninterpretedOption_NamePart); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v any, i int) any { - switch v := v.(*FeatureSetDefaults_FeatureSetEditionDefault); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v any, i int) any { - switch v := v.(*SourceCodeInfo_Location); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[32].Exporter = func(v any, i int) any { - switch v := v.(*GeneratedCodeInfo_Annotation); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go index a2ca940c50f..e0b72eaf922 100644 --- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go +++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go @@ -18,22 +18,150 @@ import ( sync "sync" ) -type GoFeatures struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +type GoFeatures_APILevel int32 + +const ( + // API_LEVEL_UNSPECIFIED results in selecting the OPEN API, + // but needs to be a separate value to distinguish between + // an explicitly set api level or a missing api level. + GoFeatures_API_LEVEL_UNSPECIFIED GoFeatures_APILevel = 0 + GoFeatures_API_OPEN GoFeatures_APILevel = 1 + GoFeatures_API_HYBRID GoFeatures_APILevel = 2 + GoFeatures_API_OPAQUE GoFeatures_APILevel = 3 +) + +// Enum value maps for GoFeatures_APILevel. +var ( + GoFeatures_APILevel_name = map[int32]string{ + 0: "API_LEVEL_UNSPECIFIED", + 1: "API_OPEN", + 2: "API_HYBRID", + 3: "API_OPAQUE", + } + GoFeatures_APILevel_value = map[string]int32{ + "API_LEVEL_UNSPECIFIED": 0, + "API_OPEN": 1, + "API_HYBRID": 2, + "API_OPAQUE": 3, + } +) + +func (x GoFeatures_APILevel) Enum() *GoFeatures_APILevel { + p := new(GoFeatures_APILevel) + *p = x + return p +} + +func (x GoFeatures_APILevel) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GoFeatures_APILevel) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_go_features_proto_enumTypes[0].Descriptor() +} + +func (GoFeatures_APILevel) Type() protoreflect.EnumType { + return &file_google_protobuf_go_features_proto_enumTypes[0] +} + +func (x GoFeatures_APILevel) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *GoFeatures_APILevel) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = GoFeatures_APILevel(num) + return nil +} + +// Deprecated: Use GoFeatures_APILevel.Descriptor instead. +func (GoFeatures_APILevel) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_go_features_proto_rawDescGZIP(), []int{0, 0} +} + +type GoFeatures_StripEnumPrefix int32 + +const ( + GoFeatures_STRIP_ENUM_PREFIX_UNSPECIFIED GoFeatures_StripEnumPrefix = 0 + GoFeatures_STRIP_ENUM_PREFIX_KEEP GoFeatures_StripEnumPrefix = 1 + GoFeatures_STRIP_ENUM_PREFIX_GENERATE_BOTH GoFeatures_StripEnumPrefix = 2 + GoFeatures_STRIP_ENUM_PREFIX_STRIP GoFeatures_StripEnumPrefix = 3 +) + +// Enum value maps for GoFeatures_StripEnumPrefix. +var ( + GoFeatures_StripEnumPrefix_name = map[int32]string{ + 0: "STRIP_ENUM_PREFIX_UNSPECIFIED", + 1: "STRIP_ENUM_PREFIX_KEEP", + 2: "STRIP_ENUM_PREFIX_GENERATE_BOTH", + 3: "STRIP_ENUM_PREFIX_STRIP", + } + GoFeatures_StripEnumPrefix_value = map[string]int32{ + "STRIP_ENUM_PREFIX_UNSPECIFIED": 0, + "STRIP_ENUM_PREFIX_KEEP": 1, + "STRIP_ENUM_PREFIX_GENERATE_BOTH": 2, + "STRIP_ENUM_PREFIX_STRIP": 3, + } +) + +func (x GoFeatures_StripEnumPrefix) Enum() *GoFeatures_StripEnumPrefix { + p := new(GoFeatures_StripEnumPrefix) + *p = x + return p +} + +func (x GoFeatures_StripEnumPrefix) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GoFeatures_StripEnumPrefix) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_go_features_proto_enumTypes[1].Descriptor() +} + +func (GoFeatures_StripEnumPrefix) Type() protoreflect.EnumType { + return &file_google_protobuf_go_features_proto_enumTypes[1] +} + +func (x GoFeatures_StripEnumPrefix) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *GoFeatures_StripEnumPrefix) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = GoFeatures_StripEnumPrefix(num) + return nil +} +// Deprecated: Use GoFeatures_StripEnumPrefix.Descriptor instead. +func (GoFeatures_StripEnumPrefix) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_go_features_proto_rawDescGZIP(), []int{0, 1} +} + +type GoFeatures struct { + state protoimpl.MessageState `protogen:"open.v1"` // Whether or not to generate the deprecated UnmarshalJSON method for enums. + // Can only be true for proto using the Open Struct api. LegacyUnmarshalJsonEnum *bool `protobuf:"varint,1,opt,name=legacy_unmarshal_json_enum,json=legacyUnmarshalJsonEnum" json:"legacy_unmarshal_json_enum,omitempty"` + // One of OPEN, HYBRID or OPAQUE. + ApiLevel *GoFeatures_APILevel `protobuf:"varint,2,opt,name=api_level,json=apiLevel,enum=pb.GoFeatures_APILevel" json:"api_level,omitempty"` + StripEnumPrefix *GoFeatures_StripEnumPrefix `protobuf:"varint,3,opt,name=strip_enum_prefix,json=stripEnumPrefix,enum=pb.GoFeatures_StripEnumPrefix" json:"strip_enum_prefix,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GoFeatures) Reset() { *x = GoFeatures{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_go_features_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_go_features_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GoFeatures) String() string { @@ -44,7 +172,7 @@ func (*GoFeatures) ProtoMessage() {} func (x *GoFeatures) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_go_features_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -66,6 +194,20 @@ func (x *GoFeatures) GetLegacyUnmarshalJsonEnum() bool { return false } +func (x *GoFeatures) GetApiLevel() GoFeatures_APILevel { + if x != nil && x.ApiLevel != nil { + return *x.ApiLevel + } + return GoFeatures_API_LEVEL_UNSPECIFIED +} + +func (x *GoFeatures) GetStripEnumPrefix() GoFeatures_StripEnumPrefix { + if x != nil && x.StripEnumPrefix != nil { + return *x.StripEnumPrefix + } + return GoFeatures_STRIP_ENUM_PREFIX_UNSPECIFIED +} + var file_google_protobuf_go_features_proto_extTypes = []protoimpl.ExtensionInfo{ { ExtendedType: (*descriptorpb.FeatureSet)(nil), @@ -90,7 +232,7 @@ var file_google_protobuf_go_features_proto_rawDesc = []byte{ 0x66, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcd, 0x01, 0x0a, 0x0a, 0x47, 0x6f, + 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xab, 0x05, 0x0a, 0x0a, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0xbe, 0x01, 0x0a, 0x1a, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x80, 0x01, @@ -103,14 +245,44 @@ var file_google_protobuf_go_features_proto_rawDesc = []byte{ 0x20, 0x62, 0x65, 0x20, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x61, 0x20, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x20, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, - 0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x3c, 0x0a, 0x02, 0x67, 0x6f, 0x12, - 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x70, 0x62, + 0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x12, 0x74, 0x0a, 0x09, 0x61, 0x70, 0x69, + 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x70, + 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x41, 0x50, 0x49, + 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x42, 0x3e, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x01, + 0xa2, 0x01, 0x1a, 0x12, 0x15, 0x41, 0x50, 0x49, 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x55, + 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0f, + 0x12, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x41, 0x51, 0x55, 0x45, 0x18, 0xe9, 0x07, 0xb2, + 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x08, 0x61, 0x70, 0x69, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, + 0x7c, 0x0a, 0x11, 0x73, 0x74, 0x72, 0x69, 0x70, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x70, 0x72, + 0x65, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x70, 0x62, 0x2e, + 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x70, + 0x45, 0x6e, 0x75, 0x6d, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x42, 0x30, 0x88, 0x01, 0x01, 0x98, + 0x01, 0x06, 0x98, 0x01, 0x07, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x1b, 0x12, 0x16, 0x53, 0x54, 0x52, + 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x4b, + 0x45, 0x45, 0x50, 0x18, 0x84, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe9, 0x07, 0x52, 0x0f, 0x73, 0x74, + 0x72, 0x69, 0x70, 0x45, 0x6e, 0x75, 0x6d, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x22, 0x53, 0x0a, + 0x08, 0x41, 0x50, 0x49, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x19, 0x0a, 0x15, 0x41, 0x50, 0x49, + 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, + 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x45, 0x4e, + 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x48, 0x59, 0x42, 0x52, 0x49, 0x44, + 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x41, 0x51, 0x55, 0x45, + 0x10, 0x03, 0x22, 0x92, 0x01, 0x0a, 0x0f, 0x53, 0x74, 0x72, 0x69, 0x70, 0x45, 0x6e, 0x75, 0x6d, + 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x54, 0x52, 0x49, 0x50, 0x5f, + 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x54, 0x52, + 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x4b, + 0x45, 0x45, 0x50, 0x10, 0x01, 0x12, 0x23, 0x0a, 0x1f, 0x53, 0x54, 0x52, 0x49, 0x50, 0x5f, 0x45, + 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x47, 0x45, 0x4e, 0x45, 0x52, + 0x41, 0x54, 0x45, 0x5f, 0x42, 0x4f, 0x54, 0x48, 0x10, 0x02, 0x12, 0x1b, 0x0a, 0x17, 0x53, 0x54, + 0x52, 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, + 0x53, 0x54, 0x52, 0x49, 0x50, 0x10, 0x03, 0x3a, 0x3c, 0x0a, 0x02, 0x67, 0x6f, 0x12, 0x1b, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x70, 0x62, } var ( @@ -125,19 +297,24 @@ func file_google_protobuf_go_features_proto_rawDescGZIP() []byte { return file_google_protobuf_go_features_proto_rawDescData } +var file_google_protobuf_go_features_proto_enumTypes = make([]protoimpl.EnumInfo, 2) var file_google_protobuf_go_features_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_google_protobuf_go_features_proto_goTypes = []any{ - (*GoFeatures)(nil), // 0: pb.GoFeatures - (*descriptorpb.FeatureSet)(nil), // 1: google.protobuf.FeatureSet + (GoFeatures_APILevel)(0), // 0: pb.GoFeatures.APILevel + (GoFeatures_StripEnumPrefix)(0), // 1: pb.GoFeatures.StripEnumPrefix + (*GoFeatures)(nil), // 2: pb.GoFeatures + (*descriptorpb.FeatureSet)(nil), // 3: google.protobuf.FeatureSet } var file_google_protobuf_go_features_proto_depIdxs = []int32{ - 1, // 0: pb.go:extendee -> google.protobuf.FeatureSet - 0, // 1: pb.go:type_name -> pb.GoFeatures - 2, // [2:2] is the sub-list for method output_type - 2, // [2:2] is the sub-list for method input_type - 1, // [1:2] is the sub-list for extension type_name - 0, // [0:1] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name + 0, // 0: pb.GoFeatures.api_level:type_name -> pb.GoFeatures.APILevel + 1, // 1: pb.GoFeatures.strip_enum_prefix:type_name -> pb.GoFeatures.StripEnumPrefix + 3, // 2: pb.go:extendee -> google.protobuf.FeatureSet + 2, // 3: pb.go:type_name -> pb.GoFeatures + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 3, // [3:4] is the sub-list for extension type_name + 2, // [2:3] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name } func init() { file_google_protobuf_go_features_proto_init() } @@ -145,32 +322,19 @@ func file_google_protobuf_go_features_proto_init() { if File_google_protobuf_go_features_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_go_features_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*GoFeatures); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_protobuf_go_features_proto_rawDesc, - NumEnums: 0, + NumEnums: 2, NumMessages: 1, NumExtensions: 1, NumServices: 0, }, GoTypes: file_google_protobuf_go_features_proto_goTypes, DependencyIndexes: file_google_protobuf_go_features_proto_depIdxs, + EnumInfos: file_google_protobuf_go_features_proto_enumTypes, MessageInfos: file_google_protobuf_go_features_proto_msgTypes, ExtensionInfos: file_google_protobuf_go_features_proto_extTypes, }.Build() diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 7172b43d383..191552cce09 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -210,10 +210,7 @@ import ( // "value": "1.212s" // } type Any struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // A URL/resource name that uniquely identifies the type of the serialized // protocol buffer message. This string must contain at least // one "/" character. The last segment of the URL's path must represent @@ -244,7 +241,9 @@ type Any struct { // used with implementation specific semantics. TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` // Must be a valid serialized protocol buffer of the above specified type. - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // New marshals src into a new Any instance. @@ -368,11 +367,9 @@ func (x *Any) UnmarshalNew() (proto.Message, error) { func (x *Any) Reset() { *x = Any{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_any_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_any_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Any) String() string { @@ -383,7 +380,7 @@ func (*Any) ProtoMessage() {} func (x *Any) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_any_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -461,20 +458,6 @@ func file_google_protobuf_any_proto_init() { if File_google_protobuf_any_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Any); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go index 1b71bcd910a..34d76e6cd93 100644 --- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -141,10 +141,7 @@ import ( // be expressed in JSON format as "3.000000001s", and 3 seconds and 1 // microsecond should be expressed in JSON format as "3.000001s". type Duration struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Signed seconds of the span of time. Must be from -315,576,000,000 // to +315,576,000,000 inclusive. Note: these bounds are computed from: // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years @@ -155,7 +152,9 @@ type Duration struct { // of one second or more, a non-zero value for the `nanos` field must be // of the same sign as the `seconds` field. Must be from -999,999,999 // to +999,999,999 inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // New constructs a new Duration from the provided time.Duration. @@ -245,11 +244,9 @@ func (x *Duration) check() uint { func (x *Duration) Reset() { *x = Duration{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_duration_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_duration_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Duration) String() string { @@ -260,7 +257,7 @@ func (*Duration) ProtoMessage() {} func (x *Duration) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_duration_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -339,20 +336,6 @@ func file_google_protobuf_duration_proto_init() { if File_google_protobuf_duration_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Duration); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go index d87b4fb8281..7fcdd382cc9 100644 --- a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go @@ -48,18 +48,16 @@ import ( // rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); // } type Empty struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Empty) Reset() { *x = Empty{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_empty_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_empty_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Empty) String() string { @@ -70,7 +68,7 @@ func (*Empty) ProtoMessage() {} func (x *Empty) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_empty_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -131,20 +129,6 @@ func file_google_protobuf_empty_proto_init() { if File_google_protobuf_empty_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_empty_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Empty); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go index ac1e91bb6dd..e5d7da38c20 100644 --- a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go @@ -284,12 +284,11 @@ import ( // request should verify the included field paths, and return an // `INVALID_ARGUMENT` error if any path is unmappable. type FieldMask struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The set of field mask paths. - Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"` + Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // New constructs a field mask from a list of paths and verifies that @@ -467,11 +466,9 @@ func rangeFields(path string, f func(field string) bool) bool { func (x *FieldMask) Reset() { *x = FieldMask{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_field_mask_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_field_mask_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldMask) String() string { @@ -482,7 +479,7 @@ func (*FieldMask) ProtoMessage() {} func (x *FieldMask) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_field_mask_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -553,20 +550,6 @@ func file_google_protobuf_field_mask_proto_init() { if File_google_protobuf_field_mask_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_field_mask_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*FieldMask); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go index d45361cbc72..f2c53ea337c 100644 --- a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go @@ -120,6 +120,7 @@ package structpb import ( base64 "encoding/base64" + json "encoding/json" protojson "google.golang.org/protobuf/encoding/protojson" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" @@ -186,12 +187,11 @@ func (NullValue) EnumDescriptor() ([]byte, []int) { // // The JSON representation for `Struct` is JSON object. type Struct struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Unordered map of dynamically typed values. - Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // NewStruct constructs a Struct from a general-purpose Go map. @@ -233,11 +233,9 @@ func (x *Struct) UnmarshalJSON(b []byte) error { func (x *Struct) Reset() { *x = Struct{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_struct_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_struct_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Struct) String() string { @@ -248,7 +246,7 @@ func (*Struct) ProtoMessage() {} func (x *Struct) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_struct_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -277,13 +275,10 @@ func (x *Struct) GetFields() map[string]*Value { // // The JSON representation for `Value` is JSON value. type Value struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The kind of value. // - // Types that are assignable to Kind: + // Types that are valid to be assigned to Kind: // // *Value_NullValue // *Value_NumberValue @@ -291,24 +286,27 @@ type Value struct { // *Value_BoolValue // *Value_StructValue // *Value_ListValue - Kind isValue_Kind `protobuf_oneof:"kind"` + Kind isValue_Kind `protobuf_oneof:"kind"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // NewValue constructs a Value from a general-purpose Go interface. // -// ╔════════════════════════╤════════════════════════════════════════════╗ -// ║ Go type │ Conversion ║ -// ╠════════════════════════╪════════════════════════════════════════════╣ -// ║ nil │ stored as NullValue ║ -// ║ bool │ stored as BoolValue ║ -// ║ int, int32, int64 │ stored as NumberValue ║ -// ║ uint, uint32, uint64 │ stored as NumberValue ║ -// ║ float32, float64 │ stored as NumberValue ║ -// ║ string │ stored as StringValue; must be valid UTF-8 ║ -// ║ []byte │ stored as StringValue; base64-encoded ║ -// ║ map[string]any │ stored as StructValue ║ -// ║ []any │ stored as ListValue ║ -// ╚════════════════════════╧════════════════════════════════════════════╝ +// ╔═══════════════════════════════════════╤════════════════════════════════════════════╗ +// ║ Go type │ Conversion ║ +// ╠═══════════════════════════════════════╪════════════════════════════════════════════╣ +// ║ nil │ stored as NullValue ║ +// ║ bool │ stored as BoolValue ║ +// ║ int, int8, int16, int32, int64 │ stored as NumberValue ║ +// ║ uint, uint8, uint16, uint32, uint64 │ stored as NumberValue ║ +// ║ float32, float64 │ stored as NumberValue ║ +// ║ json.Number │ stored as NumberValue ║ +// ║ string │ stored as StringValue; must be valid UTF-8 ║ +// ║ []byte │ stored as StringValue; base64-encoded ║ +// ║ map[string]any │ stored as StructValue ║ +// ║ []any │ stored as ListValue ║ +// ╚═══════════════════════════════════════╧════════════════════════════════════════════╝ // // When converting an int64 or uint64 to a NumberValue, numeric precision loss // is possible since they are stored as a float64. @@ -320,12 +318,20 @@ func NewValue(v any) (*Value, error) { return NewBoolValue(v), nil case int: return NewNumberValue(float64(v)), nil + case int8: + return NewNumberValue(float64(v)), nil + case int16: + return NewNumberValue(float64(v)), nil case int32: return NewNumberValue(float64(v)), nil case int64: return NewNumberValue(float64(v)), nil case uint: return NewNumberValue(float64(v)), nil + case uint8: + return NewNumberValue(float64(v)), nil + case uint16: + return NewNumberValue(float64(v)), nil case uint32: return NewNumberValue(float64(v)), nil case uint64: @@ -334,6 +340,12 @@ func NewValue(v any) (*Value, error) { return NewNumberValue(float64(v)), nil case float64: return NewNumberValue(float64(v)), nil + case json.Number: + n, err := v.Float64() + if err != nil { + return nil, protoimpl.X.NewError("invalid number format %q, expected a float64: %v", v, err) + } + return NewNumberValue(n), nil case string: if !utf8.ValidString(v) { return nil, protoimpl.X.NewError("invalid UTF-8 in string: %q", v) @@ -441,11 +453,9 @@ func (x *Value) UnmarshalJSON(b []byte) error { func (x *Value) Reset() { *x = Value{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_struct_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_struct_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Value) String() string { @@ -456,7 +466,7 @@ func (*Value) ProtoMessage() {} func (x *Value) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_struct_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -471,51 +481,63 @@ func (*Value) Descriptor() ([]byte, []int) { return file_google_protobuf_struct_proto_rawDescGZIP(), []int{1} } -func (m *Value) GetKind() isValue_Kind { - if m != nil { - return m.Kind +func (x *Value) GetKind() isValue_Kind { + if x != nil { + return x.Kind } return nil } func (x *Value) GetNullValue() NullValue { - if x, ok := x.GetKind().(*Value_NullValue); ok { - return x.NullValue + if x != nil { + if x, ok := x.Kind.(*Value_NullValue); ok { + return x.NullValue + } } return NullValue_NULL_VALUE } func (x *Value) GetNumberValue() float64 { - if x, ok := x.GetKind().(*Value_NumberValue); ok { - return x.NumberValue + if x != nil { + if x, ok := x.Kind.(*Value_NumberValue); ok { + return x.NumberValue + } } return 0 } func (x *Value) GetStringValue() string { - if x, ok := x.GetKind().(*Value_StringValue); ok { - return x.StringValue + if x != nil { + if x, ok := x.Kind.(*Value_StringValue); ok { + return x.StringValue + } } return "" } func (x *Value) GetBoolValue() bool { - if x, ok := x.GetKind().(*Value_BoolValue); ok { - return x.BoolValue + if x != nil { + if x, ok := x.Kind.(*Value_BoolValue); ok { + return x.BoolValue + } } return false } func (x *Value) GetStructValue() *Struct { - if x, ok := x.GetKind().(*Value_StructValue); ok { - return x.StructValue + if x != nil { + if x, ok := x.Kind.(*Value_StructValue); ok { + return x.StructValue + } } return nil } func (x *Value) GetListValue() *ListValue { - if x, ok := x.GetKind().(*Value_ListValue); ok { - return x.ListValue + if x != nil { + if x, ok := x.Kind.(*Value_ListValue); ok { + return x.ListValue + } } return nil } @@ -570,12 +592,11 @@ func (*Value_ListValue) isValue_Kind() {} // // The JSON representation for `ListValue` is JSON array. type ListValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Repeated field of dynamically typed values. - Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // NewList constructs a ListValue from a general-purpose Go slice. @@ -613,11 +634,9 @@ func (x *ListValue) UnmarshalJSON(b []byte) error { func (x *ListValue) Reset() { *x = ListValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_struct_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_struct_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ListValue) String() string { @@ -628,7 +647,7 @@ func (*ListValue) ProtoMessage() {} func (x *ListValue) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_struct_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -742,44 +761,6 @@ func file_google_protobuf_struct_proto_init() { if File_google_protobuf_struct_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_struct_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Struct); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_struct_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*Value); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_struct_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*ListValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } file_google_protobuf_struct_proto_msgTypes[1].OneofWrappers = []any{ (*Value_NullValue)(nil), (*Value_NumberValue)(nil), diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index 83a5a645b08..9550109aa3b 100644 --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -170,10 +170,7 @@ import ( // http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime() // ) to obtain a formatter capable of generating timestamps in this format. type Timestamp struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Represents seconds of UTC time since Unix epoch // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to // 9999-12-31T23:59:59Z inclusive. @@ -182,7 +179,9 @@ type Timestamp struct { // second values with fractions must still have non-negative nanos values // that count forward in time. Must be from 0 to 999,999,999 // inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Now constructs a new Timestamp from the current time. @@ -254,11 +253,9 @@ func (x *Timestamp) check() uint { func (x *Timestamp) Reset() { *x = Timestamp{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_timestamp_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_timestamp_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Timestamp) String() string { @@ -269,7 +266,7 @@ func (*Timestamp) ProtoMessage() {} func (x *Timestamp) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_timestamp_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -348,20 +345,6 @@ func file_google_protobuf_timestamp_proto_init() { if File_google_protobuf_timestamp_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Timestamp); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go index e473f826aa3..15b424ec12c 100644 --- a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go @@ -54,12 +54,11 @@ import ( // // The JSON representation for `DoubleValue` is JSON number. type DoubleValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The double value. - Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Double stores v in a new DoubleValue and returns a pointer to it. @@ -69,11 +68,9 @@ func Double(v float64) *DoubleValue { func (x *DoubleValue) Reset() { *x = DoubleValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DoubleValue) String() string { @@ -84,7 +81,7 @@ func (*DoubleValue) ProtoMessage() {} func (x *DoubleValue) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -110,12 +107,11 @@ func (x *DoubleValue) GetValue() float64 { // // The JSON representation for `FloatValue` is JSON number. type FloatValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The float value. - Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` + Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Float stores v in a new FloatValue and returns a pointer to it. @@ -125,11 +121,9 @@ func Float(v float32) *FloatValue { func (x *FloatValue) Reset() { *x = FloatValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FloatValue) String() string { @@ -140,7 +134,7 @@ func (*FloatValue) ProtoMessage() {} func (x *FloatValue) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -166,12 +160,11 @@ func (x *FloatValue) GetValue() float32 { // // The JSON representation for `Int64Value` is JSON string. type Int64Value struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The int64 value. - Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Int64 stores v in a new Int64Value and returns a pointer to it. @@ -181,11 +174,9 @@ func Int64(v int64) *Int64Value { func (x *Int64Value) Reset() { *x = Int64Value{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Int64Value) String() string { @@ -196,7 +187,7 @@ func (*Int64Value) ProtoMessage() {} func (x *Int64Value) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -222,12 +213,11 @@ func (x *Int64Value) GetValue() int64 { // // The JSON representation for `UInt64Value` is JSON string. type UInt64Value struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The uint64 value. - Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // UInt64 stores v in a new UInt64Value and returns a pointer to it. @@ -237,11 +227,9 @@ func UInt64(v uint64) *UInt64Value { func (x *UInt64Value) Reset() { *x = UInt64Value{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UInt64Value) String() string { @@ -252,7 +240,7 @@ func (*UInt64Value) ProtoMessage() {} func (x *UInt64Value) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -278,12 +266,11 @@ func (x *UInt64Value) GetValue() uint64 { // // The JSON representation for `Int32Value` is JSON number. type Int32Value struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The int32 value. - Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Int32 stores v in a new Int32Value and returns a pointer to it. @@ -293,11 +280,9 @@ func Int32(v int32) *Int32Value { func (x *Int32Value) Reset() { *x = Int32Value{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Int32Value) String() string { @@ -308,7 +293,7 @@ func (*Int32Value) ProtoMessage() {} func (x *Int32Value) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -334,12 +319,11 @@ func (x *Int32Value) GetValue() int32 { // // The JSON representation for `UInt32Value` is JSON number. type UInt32Value struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The uint32 value. - Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // UInt32 stores v in a new UInt32Value and returns a pointer to it. @@ -349,11 +333,9 @@ func UInt32(v uint32) *UInt32Value { func (x *UInt32Value) Reset() { *x = UInt32Value{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UInt32Value) String() string { @@ -364,7 +346,7 @@ func (*UInt32Value) ProtoMessage() {} func (x *UInt32Value) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -390,12 +372,11 @@ func (x *UInt32Value) GetValue() uint32 { // // The JSON representation for `BoolValue` is JSON `true` and `false`. type BoolValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The bool value. - Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Bool stores v in a new BoolValue and returns a pointer to it. @@ -405,11 +386,9 @@ func Bool(v bool) *BoolValue { func (x *BoolValue) Reset() { *x = BoolValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *BoolValue) String() string { @@ -420,7 +399,7 @@ func (*BoolValue) ProtoMessage() {} func (x *BoolValue) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -446,12 +425,11 @@ func (x *BoolValue) GetValue() bool { // // The JSON representation for `StringValue` is JSON string. type StringValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The string value. - Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // String stores v in a new StringValue and returns a pointer to it. @@ -461,11 +439,9 @@ func String(v string) *StringValue { func (x *StringValue) Reset() { *x = StringValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *StringValue) String() string { @@ -476,7 +452,7 @@ func (*StringValue) ProtoMessage() {} func (x *StringValue) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -502,12 +478,11 @@ func (x *StringValue) GetValue() string { // // The JSON representation for `BytesValue` is JSON string. type BytesValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The bytes value. - Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Bytes stores v in a new BytesValue and returns a pointer to it. @@ -517,11 +492,9 @@ func Bytes(v []byte) *BytesValue { func (x *BytesValue) Reset() { *x = BytesValue{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_wrappers_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_wrappers_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *BytesValue) String() string { @@ -532,7 +505,7 @@ func (*BytesValue) ProtoMessage() {} func (x *BytesValue) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_wrappers_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -629,116 +602,6 @@ func file_google_protobuf_wrappers_proto_init() { if File_google_protobuf_wrappers_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_wrappers_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*DoubleValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*FloatValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*Int64Value); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*UInt64Value); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*Int32Value); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*UInt32Value); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*BoolValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*StringValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_wrappers_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*BytesValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/modules.txt b/vendor/modules.txt index c1c41a0b42e..10295b5f213 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -4,12 +4,10 @@ cloud.google.com/go/internal cloud.google.com/go/internal/optional cloud.google.com/go/internal/trace cloud.google.com/go/internal/version -# cloud.google.com/go/auth v0.7.0 -## explicit; go 1.20 +# cloud.google.com/go/auth v0.14.0 +## explicit; go 1.22 cloud.google.com/go/auth cloud.google.com/go/auth/credentials -cloud.google.com/go/auth/credentials/idtoken -cloud.google.com/go/auth/credentials/impersonate cloud.google.com/go/auth/credentials/internal/externalaccount cloud.google.com/go/auth/credentials/internal/externalaccountuser cloud.google.com/go/auth/credentials/internal/gdch @@ -18,17 +16,18 @@ cloud.google.com/go/auth/credentials/internal/stsexchange cloud.google.com/go/auth/grpctransport cloud.google.com/go/auth/httptransport cloud.google.com/go/auth/internal +cloud.google.com/go/auth/internal/compute cloud.google.com/go/auth/internal/credsfile cloud.google.com/go/auth/internal/jwt cloud.google.com/go/auth/internal/transport cloud.google.com/go/auth/internal/transport/cert -# cloud.google.com/go/auth/oauth2adapt v0.2.2 -## explicit; go 1.19 +# cloud.google.com/go/auth/oauth2adapt v0.2.7 +## explicit; go 1.22 cloud.google.com/go/auth/oauth2adapt -# cloud.google.com/go/compute/metadata v0.5.0 -## explicit; go 1.20 +# cloud.google.com/go/compute/metadata v0.6.0 +## explicit; go 1.21 cloud.google.com/go/compute/metadata -# cloud.google.com/go/iam v1.1.10 +# cloud.google.com/go/iam v1.1.11 ## explicit; go 1.20 cloud.google.com/go/iam cloud.google.com/go/iam/apiv1/iampb @@ -38,7 +37,7 @@ cloud.google.com/go/storage cloud.google.com/go/storage/internal cloud.google.com/go/storage/internal/apiv2 cloud.google.com/go/storage/internal/apiv2/storagepb -# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 +# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/azcore github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource @@ -60,7 +59,7 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming github.com/Azure/azure-sdk-for-go/sdk/azcore/to github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing -# github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 +# github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.1 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/azidentity github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal @@ -73,7 +72,7 @@ github.com/Azure/azure-sdk-for-go/sdk/internal/log github.com/Azure/azure-sdk-for-go/sdk/internal/poller github.com/Azure/azure-sdk-for-go/sdk/internal/temporal github.com/Azure/azure-sdk-for-go/sdk/internal/uuid -# github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 +# github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob @@ -88,7 +87,7 @@ github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service -# github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 +# github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2 ## explicit; go 1.18 github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential @@ -111,38 +110,44 @@ github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version github.com/AzureAD/microsoft-authentication-library-for-go/apps/public -# github.com/IBM/sarama v1.43.2 -## explicit; go 1.19 +# github.com/IBM/sarama v1.45.0 +## explicit; go 1.21 github.com/IBM/sarama -# github.com/VividCortex/gohistogram v1.0.0 -## explicit -github.com/VividCortex/gohistogram -# github.com/alecthomas/kong v0.8.0 -## explicit; go 1.18 +# github.com/alecthomas/kong v1.6.1 +## explicit; go 1.20 github.com/alecthomas/kong # github.com/alecthomas/participle/v2 v2.1.1 ## explicit; go 1.18 github.com/alecthomas/participle/v2 github.com/alecthomas/participle/v2/lexer -# github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 +# github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b ## explicit; go 1.15 github.com/alecthomas/units -# github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a +# github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302 ## explicit github.com/alicebob/gopher-json -# github.com/alicebob/miniredis/v2 v2.21.0 -## explicit; go 1.14 +# github.com/alicebob/miniredis/v2 v2.34.0 +## explicit; go 1.17 github.com/alicebob/miniredis/v2 +github.com/alicebob/miniredis/v2/fpconv github.com/alicebob/miniredis/v2/geohash github.com/alicebob/miniredis/v2/hyperloglog github.com/alicebob/miniredis/v2/metro +github.com/alicebob/miniredis/v2/proto github.com/alicebob/miniredis/v2/server +github.com/alicebob/miniredis/v2/size # github.com/andybalholm/brotli v1.1.1 ## explicit; go 1.13 github.com/andybalholm/brotli github.com/andybalholm/brotli/matchfinder -# github.com/apache/thrift v0.20.0 -## explicit; go 1.21 +# github.com/antchfx/xmlquery v1.4.3 +## explicit; go 1.14 +github.com/antchfx/xmlquery +# github.com/antchfx/xpath v1.3.3 +## explicit; go 1.14 +github.com/antchfx/xpath +# github.com/apache/thrift v0.21.0 +## explicit; go 1.22.0 github.com/apache/thrift/lib/go/thrift # github.com/armon/go-metrics v0.4.1 ## explicit; go 1.12 @@ -151,7 +156,10 @@ github.com/armon/go-metrics/prometheus # github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 ## explicit; go 1.13 github.com/asaskevich/govalidator -# github.com/aws/aws-sdk-go v1.55.5 +# github.com/aws/aws-msk-iam-sasl-signer-go v1.0.0 +## explicit; go 1.17 +github.com/aws/aws-msk-iam-sasl-signer-go/signer +# github.com/aws/aws-sdk-go v1.55.6 ## explicit; go 1.19 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/arn @@ -206,8 +214,89 @@ github.com/aws/aws-sdk-go/service/sts github.com/aws/aws-sdk-go/service/sts/stsiface # github.com/aws/aws-sdk-go-v2 v1.22.2 ## explicit; go 1.19 +github.com/aws/aws-sdk-go-v2/aws +github.com/aws/aws-sdk-go-v2/aws/defaults +github.com/aws/aws-sdk-go-v2/aws/middleware +github.com/aws/aws-sdk-go-v2/aws/protocol/query +github.com/aws/aws-sdk-go-v2/aws/protocol/restjson +github.com/aws/aws-sdk-go-v2/aws/protocol/xml +github.com/aws/aws-sdk-go-v2/aws/ratelimit +github.com/aws/aws-sdk-go-v2/aws/retry +github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 +github.com/aws/aws-sdk-go-v2/aws/signer/v4 +github.com/aws/aws-sdk-go-v2/aws/transport/http +github.com/aws/aws-sdk-go-v2/internal/auth +github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn +github.com/aws/aws-sdk-go-v2/internal/rand +github.com/aws/aws-sdk-go-v2/internal/sdk +github.com/aws/aws-sdk-go-v2/internal/sdkio +github.com/aws/aws-sdk-go-v2/internal/shareddefaults +github.com/aws/aws-sdk-go-v2/internal/strings +github.com/aws/aws-sdk-go-v2/internal/sync/singleflight +github.com/aws/aws-sdk-go-v2/internal/timeconv # github.com/aws/aws-sdk-go-v2/config v1.24.0 ## explicit; go 1.19 +github.com/aws/aws-sdk-go-v2/config +# github.com/aws/aws-sdk-go-v2/credentials v1.15.2 +## explicit; go 1.19 +github.com/aws/aws-sdk-go-v2/credentials +github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds +github.com/aws/aws-sdk-go-v2/credentials/endpointcreds +github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client +github.com/aws/aws-sdk-go-v2/credentials/processcreds +github.com/aws/aws-sdk-go-v2/credentials/ssocreds +github.com/aws/aws-sdk-go-v2/credentials/stscreds +# github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.3 +## explicit; go 1.19 +github.com/aws/aws-sdk-go-v2/feature/ec2/imds +github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config +# github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.2 +## explicit; go 1.19 +github.com/aws/aws-sdk-go-v2/internal/configsources +# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.2 +## explicit; go 1.19 +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 +# github.com/aws/aws-sdk-go-v2/internal/ini v1.7.0 +## explicit; go 1.19 +github.com/aws/aws-sdk-go-v2/internal/ini +# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.2 +## explicit; go 1.19 +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url +# github.com/aws/aws-sdk-go-v2/service/sso v1.17.1 +## explicit; go 1.19 +github.com/aws/aws-sdk-go-v2/service/sso +github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints +github.com/aws/aws-sdk-go-v2/service/sso/types +# github.com/aws/aws-sdk-go-v2/service/ssooidc v1.19.1 +## explicit; go 1.19 +github.com/aws/aws-sdk-go-v2/service/ssooidc +github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints +github.com/aws/aws-sdk-go-v2/service/ssooidc/types +# github.com/aws/aws-sdk-go-v2/service/sts v1.25.1 +## explicit; go 1.19 +github.com/aws/aws-sdk-go-v2/service/sts +github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints +github.com/aws/aws-sdk-go-v2/service/sts/types +# github.com/aws/smithy-go v1.16.0 +## explicit; go 1.19 +github.com/aws/smithy-go +github.com/aws/smithy-go/auth/bearer +github.com/aws/smithy-go/context +github.com/aws/smithy-go/document +github.com/aws/smithy-go/encoding +github.com/aws/smithy-go/encoding/httpbinding +github.com/aws/smithy-go/encoding/json +github.com/aws/smithy-go/encoding/xml +github.com/aws/smithy-go/endpoints +github.com/aws/smithy-go/internal/sync/singleflight +github.com/aws/smithy-go/io +github.com/aws/smithy-go/logging +github.com/aws/smithy-go/middleware +github.com/aws/smithy-go/ptr +github.com/aws/smithy-go/rand +github.com/aws/smithy-go/time +github.com/aws/smithy-go/transport/http +github.com/aws/smithy-go/transport/http/internal/io # github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 ## explicit; go 1.20 github.com/bboreham/go-loser @@ -225,9 +314,6 @@ github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1 github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1 github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1 github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1 -# github.com/cespare/xxhash v1.1.0 -## explicit -github.com/cespare/xxhash # github.com/cespare/xxhash/v2 v2.3.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 @@ -258,7 +344,7 @@ github.com/drone/envsubst/path # github.com/dustin/go-humanize v1.0.1 ## explicit; go 1.16 github.com/dustin/go-humanize -# github.com/eapache/go-resiliency v1.6.0 +# github.com/eapache/go-resiliency v1.7.0 ## explicit; go 1.13 github.com/eapache/go-resiliency/breaker # github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 @@ -267,13 +353,30 @@ github.com/eapache/go-xerial-snappy # github.com/eapache/queue v1.1.0 ## explicit github.com/eapache/queue +# github.com/ebitengine/purego v0.8.1 +## explicit; go 1.18 +github.com/ebitengine/purego +github.com/ebitengine/purego/internal/cgo +github.com/ebitengine/purego/internal/fakecgo +github.com/ebitengine/purego/internal/strings # github.com/edsrzf/mmap-go v1.1.0 ## explicit; go 1.17 github.com/edsrzf/mmap-go +# github.com/elastic/go-grok v0.3.1 +## explicit; go 1.21.0 +github.com/elastic/go-grok +github.com/elastic/go-grok/dev-tools/mage +github.com/elastic/go-grok/dev-tools/mage/gotool +github.com/elastic/go-grok/patterns +# github.com/elastic/lunes v0.1.0 +## explicit; go 1.21.0 +github.com/elastic/lunes +github.com/elastic/lunes/dev-tools/mage +github.com/elastic/lunes/dev-tools/mage/gotool # github.com/evanphx/json-patch v5.9.0+incompatible ## explicit github.com/evanphx/json-patch -# github.com/expr-lang/expr v1.16.2 +# github.com/expr-lang/expr v1.16.9 ## explicit; go 1.18 github.com/expr-lang/expr github.com/expr-lang/expr/ast @@ -300,18 +403,13 @@ github.com/fatih/color # github.com/felixge/httpsnoop v1.0.4 ## explicit; go 1.13 github.com/felixge/httpsnoop -# github.com/fsnotify/fsnotify v1.7.0 +# github.com/fsnotify/fsnotify v1.8.0 ## explicit; go 1.17 github.com/fsnotify/fsnotify +github.com/fsnotify/fsnotify/internal # github.com/go-ini/ini v1.67.0 ## explicit github.com/go-ini/ini -# github.com/go-kit/kit v0.13.0 -## explicit; go 1.17 -github.com/go-kit/kit/metrics -github.com/go-kit/kit/metrics/expvar -github.com/go-kit/kit/metrics/generic -github.com/go-kit/kit/metrics/internal/lv # github.com/go-kit/log v0.2.1 ## explicit; go 1.17 github.com/go-kit/log @@ -377,9 +475,10 @@ github.com/go-redis/redis/v8/internal/util # github.com/go-test/deep v1.1.1 ## explicit; go 1.16 github.com/go-test/deep -# github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 +# github.com/go-viper/mapstructure/v2 v2.2.1 ## explicit; go 1.18 github.com/go-viper/mapstructure/v2 +github.com/go-viper/mapstructure/v2/internal/errors # github.com/gobwas/glob v0.2.3 ## explicit github.com/gobwas/glob @@ -390,7 +489,7 @@ github.com/gobwas/glob/syntax/ast github.com/gobwas/glob/syntax/lexer github.com/gobwas/glob/util/runes github.com/gobwas/glob/util/strings -# github.com/goccy/go-json v0.10.3 +# github.com/goccy/go-json v0.10.4 ## explicit; go 1.19 github.com/goccy/go-json github.com/goccy/go-json/internal/decoder @@ -419,8 +518,8 @@ github.com/gogo/status # github.com/golang-jwt/jwt/v5 v5.2.1 ## explicit; go 1.18 github.com/golang-jwt/jwt/v5 -# github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da -## explicit +# github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 +## explicit; go 1.20 github.com/golang/groupcache/lru # github.com/golang/protobuf v1.5.4 ## explicit; go 1.17 @@ -443,8 +542,8 @@ github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value -# github.com/google/s2a-go v0.1.7 -## explicit; go 1.19 +# github.com/google/s2a-go v0.1.9 +## explicit; go 1.20 github.com/google/s2a-go github.com/google/s2a-go/fallback github.com/google/s2a-go/internal/authinfo @@ -469,19 +568,21 @@ github.com/google/s2a-go/stream # github.com/google/uuid v1.6.0 ## explicit github.com/google/uuid -# github.com/googleapis/enterprise-certificate-proxy v0.3.2 +# github.com/googleapis/enterprise-certificate-proxy v0.3.4 ## explicit; go 1.19 github.com/googleapis/enterprise-certificate-proxy/client github.com/googleapis/enterprise-certificate-proxy/client/util -# github.com/googleapis/gax-go/v2 v2.13.0 -## explicit; go 1.20 +# github.com/googleapis/gax-go/v2 v2.14.1 +## explicit; go 1.21 github.com/googleapis/gax-go/v2 github.com/googleapis/gax-go/v2/apierror github.com/googleapis/gax-go/v2/apierror/internal/proto github.com/googleapis/gax-go/v2/callctx github.com/googleapis/gax-go/v2/internal -# github.com/gorilla/handlers v1.5.1 -## explicit; go 1.14 +github.com/googleapis/gax-go/v2/internallog +github.com/googleapis/gax-go/v2/internallog/internal +# github.com/gorilla/handlers v1.5.2 +## explicit; go 1.20 github.com/gorilla/handlers # github.com/gorilla/mux v1.8.1 ## explicit; go 1.20 @@ -526,6 +627,7 @@ github.com/grafana/dskit/signals github.com/grafana/dskit/spanlogger github.com/grafana/dskit/spanprofiler github.com/grafana/dskit/tenant +github.com/grafana/dskit/test github.com/grafana/dskit/tracing github.com/grafana/dskit/user # github.com/grafana/e2e v0.1.1 @@ -533,8 +635,8 @@ github.com/grafana/dskit/user github.com/grafana/e2e github.com/grafana/e2e/db github.com/grafana/e2e/images -# github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56 -## explicit; go 1.18 +# github.com/grafana/gomemcache v0.0.0-20241016125027-0a5bcc5aef40 +## explicit; go 1.22 github.com/grafana/gomemcache/memcache # github.com/grafana/pyroscope-go/godeltaprof v0.1.8 ## explicit; go 1.18 @@ -548,16 +650,12 @@ github.com/grafana/regexp/syntax # github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 ## explicit; go 1.14 github.com/grpc-ecosystem/go-grpc-middleware -github.com/grpc-ecosystem/go-grpc-middleware/logging -github.com/grpc-ecosystem/go-grpc-middleware/logging/settable -github.com/grpc-ecosystem/go-grpc-middleware/logging/zap -github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap -github.com/grpc-ecosystem/go-grpc-middleware/retry -github.com/grpc-ecosystem/go-grpc-middleware/tags -github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils -github.com/grpc-ecosystem/go-grpc-middleware/util/metautils -# github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 -## explicit; go 1.20 +# github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.2.0 +## explicit; go 1.21 +github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry +github.com/grpc-ecosystem/go-grpc-middleware/v2/metadata +# github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.0 +## explicit; go 1.22 github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule github.com/grpc-ecosystem/grpc-gateway/v2/runtime github.com/grpc-ecosystem/grpc-gateway/v2/utilities @@ -582,13 +680,6 @@ github.com/hashicorp/go-msgpack/codec # github.com/hashicorp/go-multierror v1.1.1 ## explicit; go 1.13 github.com/hashicorp/go-multierror -# github.com/hashicorp/go-plugin v1.6.0 -## explicit; go 1.17 -github.com/hashicorp/go-plugin -github.com/hashicorp/go-plugin/internal/cmdrunner -github.com/hashicorp/go-plugin/internal/grpcmux -github.com/hashicorp/go-plugin/internal/plugin -github.com/hashicorp/go-plugin/runner # github.com/hashicorp/go-rootcerts v1.0.2 ## explicit; go 1.12 github.com/hashicorp/go-rootcerts @@ -603,6 +694,7 @@ github.com/hashicorp/go-uuid github.com/hashicorp/go-version # github.com/hashicorp/golang-lru v1.0.2 ## explicit; go 1.12 +github.com/hashicorp/golang-lru github.com/hashicorp/golang-lru/simplelru # github.com/hashicorp/golang-lru/v2 v2.0.7 ## explicit; go 1.18 @@ -627,64 +719,48 @@ github.com/hashicorp/memberlist # github.com/hashicorp/serf v0.10.1 ## explicit; go 1.12 github.com/hashicorp/serf/coordinate -# github.com/hashicorp/yamux v0.1.1 -## explicit; go 1.15 -github.com/hashicorp/yamux # github.com/iancoleman/strcase v0.3.0 ## explicit; go 1.16 github.com/iancoleman/strcase # github.com/inconshreveable/mousetrap v1.1.0 ## explicit; go 1.18 github.com/inconshreveable/mousetrap -# github.com/jaegertracing/jaeger v1.57.0 -## explicit; go 1.21 +# github.com/jaegertracing/jaeger v1.65.0 +## explicit; go 1.22.7 github.com/jaegertracing/jaeger/cmd/agent/app/configmanager github.com/jaegertracing/jaeger/cmd/agent/app/configmanager/grpc github.com/jaegertracing/jaeger/cmd/agent/app/customtransport -github.com/jaegertracing/jaeger/cmd/agent/app/httpserver github.com/jaegertracing/jaeger/cmd/agent/app/processors github.com/jaegertracing/jaeger/cmd/agent/app/reporter github.com/jaegertracing/jaeger/cmd/agent/app/reporter/grpc github.com/jaegertracing/jaeger/cmd/agent/app/servers github.com/jaegertracing/jaeger/cmd/agent/app/servers/thriftudp github.com/jaegertracing/jaeger/cmd/all-in-one/setupcontext -github.com/jaegertracing/jaeger/cmd/collector/app/sampling/model -github.com/jaegertracing/jaeger/cmd/collector/app/sampling/strategystore github.com/jaegertracing/jaeger/cmd/collector/app/sanitizer/zipkin github.com/jaegertracing/jaeger/cmd/internal/flags -github.com/jaegertracing/jaeger/internal/metrics/expvar github.com/jaegertracing/jaeger/internal/metrics/metricsbuilder +github.com/jaegertracing/jaeger/internal/metrics/otelmetrics github.com/jaegertracing/jaeger/internal/metrics/prometheus github.com/jaegertracing/jaeger/model -github.com/jaegertracing/jaeger/model/converter/json github.com/jaegertracing/jaeger/model/converter/thrift/jaeger github.com/jaegertracing/jaeger/model/converter/thrift/zipkin -github.com/jaegertracing/jaeger/model/json -github.com/jaegertracing/jaeger/pkg/bearertoken -github.com/jaegertracing/jaeger/pkg/clientcfg/clientcfghttp github.com/jaegertracing/jaeger/pkg/config/tlscfg github.com/jaegertracing/jaeger/pkg/discovery github.com/jaegertracing/jaeger/pkg/discovery/grpcresolver -github.com/jaegertracing/jaeger/pkg/distributedlock -github.com/jaegertracing/jaeger/pkg/fswatcher github.com/jaegertracing/jaeger/pkg/gogocodec github.com/jaegertracing/jaeger/pkg/healthcheck github.com/jaegertracing/jaeger/pkg/metrics github.com/jaegertracing/jaeger/pkg/netutils github.com/jaegertracing/jaeger/pkg/recoveryhandler +github.com/jaegertracing/jaeger/pkg/telemetry github.com/jaegertracing/jaeger/pkg/version github.com/jaegertracing/jaeger/plugin/storage/grpc/shared github.com/jaegertracing/jaeger/ports github.com/jaegertracing/jaeger/proto-gen/api_v2 -github.com/jaegertracing/jaeger/proto-gen/api_v2/metrics github.com/jaegertracing/jaeger/proto-gen/storage_v1 -github.com/jaegertracing/jaeger/storage github.com/jaegertracing/jaeger/storage/dependencystore -github.com/jaegertracing/jaeger/storage/metricsstore -github.com/jaegertracing/jaeger/storage/samplingstore github.com/jaegertracing/jaeger/storage/spanstore github.com/jaegertracing/jaeger/thrift-gen/agent -github.com/jaegertracing/jaeger/thrift-gen/baggage github.com/jaegertracing/jaeger/thrift-gen/jaeger github.com/jaegertracing/jaeger/thrift-gen/sampling github.com/jaegertracing/jaeger/thrift-gen/zipkincore @@ -734,8 +810,8 @@ github.com/jcmturner/gokrb5/v8/types ## explicit; go 1.13 github.com/jcmturner/rpc/v2/mstypes github.com/jcmturner/rpc/v2/ndr -# github.com/jedib0t/go-pretty/v6 v6.2.4 -## explicit; go 1.13 +# github.com/jedib0t/go-pretty/v6 v6.6.5 +## explicit; go 1.17 github.com/jedib0t/go-pretty/v6/table github.com/jedib0t/go-pretty/v6/text # github.com/jmespath/go-jmespath v0.4.0 @@ -774,23 +850,27 @@ github.com/klauspost/compress/snappy github.com/klauspost/compress/zlib github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd/internal/xxhash -# github.com/klauspost/cpuid/v2 v2.2.8 -## explicit; go 1.15 +# github.com/klauspost/cpuid/v2 v2.2.9 +## explicit; go 1.20 github.com/klauspost/cpuid/v2 # github.com/knadh/koanf v1.5.0 ## explicit; go 1.12 github.com/knadh/koanf/maps github.com/knadh/koanf/providers/confmap -# github.com/knadh/koanf/v2 v2.1.1 +# github.com/knadh/koanf/v2 v2.1.2 ## explicit; go 1.18 github.com/knadh/koanf/v2 # github.com/kylelemons/godebug v1.1.0 ## explicit; go 1.11 github.com/kylelemons/godebug/diff github.com/kylelemons/godebug/pretty -# github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c +# github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 ## explicit; go 1.16 github.com/lufia/plan9stats +# github.com/magefile/mage v1.15.0 +## explicit; go 1.12 +github.com/magefile/mage/mg +github.com/magefile/mage/sh # github.com/magiconair/properties v1.8.7 ## explicit; go 1.19 github.com/magiconair/properties @@ -814,7 +894,7 @@ github.com/miekg/dns # github.com/minio/md5-simd v1.1.2 ## explicit; go 1.14 github.com/minio/md5-simd -# github.com/minio/minio-go/v7 v7.0.80 +# github.com/minio/minio-go/v7 v7.0.84 ## explicit; go 1.22 github.com/minio/minio-go/v7 github.com/minio/minio-go/v7/pkg/cors @@ -834,9 +914,6 @@ github.com/mitchellh/copystructure # github.com/mitchellh/go-homedir v1.1.0 ## explicit github.com/mitchellh/go-homedir -# github.com/mitchellh/go-testing-interface v1.0.0 -## explicit -github.com/mitchellh/go-testing-interface # github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c ## explicit; go 1.14 github.com/mitchellh/mapstructure @@ -852,35 +929,31 @@ github.com/modern-go/reflect2 # github.com/mostynb/go-grpc-compression v1.2.3 ## explicit; go 1.17 github.com/mostynb/go-grpc-compression/internal/snappy +github.com/mostynb/go-grpc-compression/internal/zstd github.com/mostynb/go-grpc-compression/nonclobbering/snappy +github.com/mostynb/go-grpc-compression/nonclobbering/zstd # github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 ## explicit github.com/munnerz/goautoneg # github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f ## explicit github.com/mwitkow/go-conntrack -# github.com/oklog/run v1.1.0 -## explicit; go 1.13 -github.com/oklog/run # github.com/oklog/ulid v1.3.1 ## explicit github.com/oklog/ulid # github.com/olekukonko/tablewriter v0.0.5 ## explicit; go 1.12 github.com/olekukonko/tablewriter -# github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.102.0 -## explicit; go 1.21.0 +# github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.118.0 +## explicit; go 1.22.7 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/internal/metadata -# github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter v0.102.0 -## explicit; go 1.21.0 +# github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter v0.118.0 +## explicit; go 1.22.7 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/internal/metadata -# github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.102.0 -## explicit; go 1.21.0 -github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/localhostgate -# github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.102.0 -## explicit; go 1.21.0 +# github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.118.0 +## explicit; go 1.22.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/idutils github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/occonventions github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/parseutils @@ -889,8 +962,8 @@ github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/ github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/timeutils/internal/ctimefmt github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/tracetranslator github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/traceutil -# github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.97.0 -## explicit; go 1.21 +# github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.118.0 +## explicit; go 1.22.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterconfig github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterexpr @@ -902,69 +975,74 @@ github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filter github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/regexp github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/strict github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterspan -# github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.102.0 -## explicit; go 1.21.0 +# github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.118.0 +## explicit; go 1.22.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka/awsmsk -# github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.102.0 -## explicit; go 1.21.0 +# github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.118.0 +## explicit; go 1.22.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent -# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.102.0 -## explicit; go 1.21.0 +# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.118.0 +## explicit; go 1.22.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal -# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.97.0 -## explicit; go 1.21 +# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic v0.118.0 +## explicit; go 1.22.0 +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic +# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.118.0 +## explicit; go 1.22.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/logging github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspanevent github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/internal/ottlcommon github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs -# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.102.0 -## explicit; go 1.21.0 +# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.118.0 +## explicit; go 1.22.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil -# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure v0.102.0 -## explicit; go 1.21.0 +# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure v0.118.0 +## explicit; go 1.22.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure -# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.102.0 -## explicit; go 1.21.0 +# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.118.0 +## explicit; go 1.22.7 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger -# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.102.0 -## explicit; go 1.21.0 +# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.118.0 +## explicit; go 1.22.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus -# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.102.0 -## explicit; go 1.21.0 +# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.118.0 +## explicit; go 1.22.7 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin/internal/zipkin github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin/zipkinv1 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin/zipkinv2 -# github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.97.0 -## explicit; go 1.21 +# github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.118.0 +## explicit; go 1.22.0 github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/internal/metadata -# github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.102.0 -## explicit; go 1.21.0 +# github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.118.0 +## explicit; go 1.22.7 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/internal/metadata -# github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.102.0 -## explicit; go 1.21.0 +# github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.118.0 +## explicit; go 1.22.7 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/internal/metadata -# github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver v0.102.0 -## explicit; go 1.21.0 +# github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver v0.118.0 +## explicit; go 1.22.7 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/internal/metadata github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/internal/ocmetrics github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/internal/octrace -# github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.102.0 -## explicit; go 1.21.0 +# github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.118.0 +## explicit; go 1.22.7 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/internal/metadata -# github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e -## explicit; go 1.11 +# github.com/opentracing-contrib/go-grpc v0.1.0 +## explicit; go 1.22.7 github.com/opentracing-contrib/go-grpc # github.com/opentracing-contrib/go-stdlib v1.0.0 ## explicit; go 1.14 @@ -979,7 +1057,7 @@ github.com/opentracing/opentracing-go/log github.com/openzipkin/zipkin-go/model github.com/openzipkin/zipkin-go/proto/zipkin_proto3 github.com/openzipkin/zipkin-go/reporter -# github.com/parquet-go/parquet-go v0.23.1-0.20241011155651-6446d1d0d2fe +# github.com/parquet-go/parquet-go v0.24.0 ## explicit; go 1.22 github.com/parquet-go/parquet-go github.com/parquet-go/parquet-go/bloom @@ -1008,14 +1086,14 @@ github.com/parquet-go/parquet-go/internal/bytealg github.com/parquet-go/parquet-go/internal/debug github.com/parquet-go/parquet-go/internal/unsafecast github.com/parquet-go/parquet-go/sparse -# github.com/pelletier/go-toml/v2 v2.1.0 -## explicit; go 1.16 +# github.com/pelletier/go-toml/v2 v2.2.3 +## explicit; go 1.21.0 github.com/pelletier/go-toml/v2 github.com/pelletier/go-toml/v2/internal/characters github.com/pelletier/go-toml/v2/internal/danger github.com/pelletier/go-toml/v2/internal/tracker github.com/pelletier/go-toml/v2/unstable -# github.com/pierrec/lz4/v4 v4.1.21 +# github.com/pierrec/lz4/v4 v4.1.22 ## explicit; go 1.14 github.com/pierrec/lz4/v4 github.com/pierrec/lz4/v4/internal/lz4block @@ -1034,14 +1112,16 @@ github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 ## explicit github.com/pmezard/go-difflib/difflib -# github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c +# github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 ## explicit; go 1.14 github.com/power-devops/perfstat # github.com/prometheus/alertmanager v0.27.0 ## explicit; go 1.21 github.com/prometheus/alertmanager/api/v2/models -# github.com/prometheus/client_golang v1.19.1 +# github.com/prometheus/client_golang v1.20.5 ## explicit; go 1.20 +github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil +github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/collectors github.com/prometheus/client_golang/prometheus/collectors/version @@ -1055,8 +1135,8 @@ github.com/prometheus/client_golang/prometheus/testutil/promlint/validations # github.com/prometheus/client_model v0.6.1 ## explicit; go 1.19 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.55.0 -## explicit; go 1.20 +# github.com/prometheus/common v0.62.0 +## explicit; go 1.21 github.com/prometheus/common/config github.com/prometheus/common/expfmt github.com/prometheus/common/helpers/templates @@ -1074,7 +1154,7 @@ github.com/prometheus/exporter-toolkit/web github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/prometheus/prometheus v0.54.0 +# github.com/prometheus/prometheus v0.54.1 ## explicit; go 1.21.0 github.com/prometheus/prometheus/config github.com/prometheus/prometheus/discovery @@ -1129,19 +1209,19 @@ github.com/prometheus/prometheus/util/strutil github.com/prometheus/prometheus/util/testutil github.com/prometheus/prometheus/util/zeropool github.com/prometheus/prometheus/web/api/v1 -# github.com/prometheus/statsd_exporter v0.26.0 -## explicit; go 1.18 +# github.com/prometheus/statsd_exporter v0.26.1 +## explicit; go 1.20 github.com/prometheus/statsd_exporter/pkg/level # github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 ## explicit github.com/rcrowley/go-metrics -# github.com/relvacode/iso8601 v1.4.0 +# github.com/relvacode/iso8601 v1.6.0 ## explicit; go 1.13 github.com/relvacode/iso8601 # github.com/rivo/uniseg v0.4.7 ## explicit; go 1.18 github.com/rivo/uniseg -# github.com/rs/cors v1.11.0 +# github.com/rs/cors v1.11.1 ## explicit; go 1.13 github.com/rs/cors github.com/rs/cors/internal @@ -1163,17 +1243,14 @@ github.com/segmentio/fasthash/fnv1a # github.com/sercand/kuberesolver/v5 v5.1.1 ## explicit; go 1.18 github.com/sercand/kuberesolver/v5 -# github.com/shirou/gopsutil/v3 v3.24.4 -## explicit; go 1.15 -github.com/shirou/gopsutil/v3/common -github.com/shirou/gopsutil/v3/cpu -github.com/shirou/gopsutil/v3/internal/common -github.com/shirou/gopsutil/v3/mem -github.com/shirou/gopsutil/v3/net -github.com/shirou/gopsutil/v3/process -# github.com/shoenig/go-m1cpu v0.1.6 -## explicit; go 1.20 -github.com/shoenig/go-m1cpu +# github.com/shirou/gopsutil/v4 v4.24.12 +## explicit; go 1.18 +github.com/shirou/gopsutil/v4/common +github.com/shirou/gopsutil/v4/cpu +github.com/shirou/gopsutil/v4/internal/common +github.com/shirou/gopsutil/v4/mem +github.com/shirou/gopsutil/v4/net +github.com/shirou/gopsutil/v4/process # github.com/soheilhy/cmux v0.1.5 ## explicit; go 1.11 github.com/soheilhy/cmux @@ -1197,14 +1274,14 @@ github.com/spf13/afero/mem # github.com/spf13/cast v1.6.0 ## explicit; go 1.19 github.com/spf13/cast -# github.com/spf13/cobra v1.8.0 +# github.com/spf13/cobra v1.8.1 ## explicit; go 1.15 github.com/spf13/cobra # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag -# github.com/spf13/viper v1.18.2 -## explicit; go 1.18 +# github.com/spf13/viper v1.19.0 +## explicit; go 1.20 github.com/spf13/viper github.com/spf13/viper/internal/encoding github.com/spf13/viper/internal/encoding/dotenv @@ -1222,20 +1299,49 @@ github.com/stoewer/parquet-cli/pkg/output # github.com/stretchr/objx v0.5.2 ## explicit; go 1.20 github.com/stretchr/objx -# github.com/stretchr/testify v1.9.0 +# github.com/stretchr/testify v1.10.0 ## explicit; go 1.17 github.com/stretchr/testify/assert +github.com/stretchr/testify/assert/yaml github.com/stretchr/testify/mock github.com/stretchr/testify/require # github.com/subosito/gotenv v1.6.0 ## explicit; go 1.18 github.com/subosito/gotenv -# github.com/tklauser/go-sysconf v0.3.12 -## explicit; go 1.13 +# github.com/tklauser/go-sysconf v0.3.14 +## explicit; go 1.18 github.com/tklauser/go-sysconf -# github.com/tklauser/numcpus v0.6.1 -## explicit; go 1.13 +# github.com/tklauser/numcpus v0.8.0 +## explicit; go 1.18 github.com/tklauser/numcpus +# github.com/twmb/franz-go v1.18.1 +## explicit; go 1.21 +github.com/twmb/franz-go/pkg/kbin +github.com/twmb/franz-go/pkg/kerr +github.com/twmb/franz-go/pkg/kgo +github.com/twmb/franz-go/pkg/kgo/internal/sticky +github.com/twmb/franz-go/pkg/kversion +github.com/twmb/franz-go/pkg/sasl +github.com/twmb/franz-go/pkg/sasl/plain +# github.com/twmb/franz-go/pkg/kadm v1.14.0 +## explicit; go 1.21 +github.com/twmb/franz-go/pkg/kadm +# github.com/twmb/franz-go/pkg/kfake v0.0.0-20241202133023-293b7c4c56bb +## explicit; go 1.21 +github.com/twmb/franz-go/pkg/kfake +# github.com/twmb/franz-go/pkg/kmsg v1.9.0 +## explicit; go 1.21 +github.com/twmb/franz-go/pkg/kmsg +github.com/twmb/franz-go/pkg/kmsg/internal/kbin +# github.com/twmb/franz-go/plugin/kotel v1.5.0 +## explicit; go 1.21 +github.com/twmb/franz-go/plugin/kotel +# github.com/twmb/franz-go/plugin/kprom v1.1.0 +## explicit; go 1.18 +github.com/twmb/franz-go/plugin/kprom +# github.com/ua-parser/uap-go v0.0.0-20241012191800-bbb40edc15aa +## explicit +github.com/ua-parser/uap-go/uaparser # github.com/uber-go/atomic v1.4.0 ## explicit github.com/uber-go/atomic @@ -1278,7 +1384,7 @@ github.com/xdg-go/scram # github.com/xdg-go/stringprep v1.0.4 ## explicit; go 1.11 github.com/xdg-go/stringprep -# github.com/yuin/gopher-lua v0.0.0-20220504180219-658193537a64 +# github.com/yuin/gopher-lua v1.1.1 ## explicit; go 1.17 github.com/yuin/gopher-lua github.com/yuin/gopher-lua/ast @@ -1287,7 +1393,7 @@ github.com/yuin/gopher-lua/pm # github.com/yusufpapurcu/wmi v1.2.4 ## explicit; go 1.16 github.com/yusufpapurcu/wmi -# go.etcd.io/etcd/api/v3 v3.5.10 +# go.etcd.io/etcd/api/v3 v3.5.12 ## explicit; go 1.20 go.etcd.io/etcd/api/v3/authpb go.etcd.io/etcd/api/v3/etcdserverpb @@ -1295,7 +1401,7 @@ go.etcd.io/etcd/api/v3/membershippb go.etcd.io/etcd/api/v3/mvccpb go.etcd.io/etcd/api/v3/v3rpc/rpctypes go.etcd.io/etcd/api/v3/version -# go.etcd.io/etcd/client/pkg/v3 v3.5.10 +# go.etcd.io/etcd/client/pkg/v3 v3.5.12 ## explicit; go 1.20 go.etcd.io/etcd/client/pkg/v3/fileutil go.etcd.io/etcd/client/pkg/v3/logutil @@ -1303,7 +1409,7 @@ go.etcd.io/etcd/client/pkg/v3/systemd go.etcd.io/etcd/client/pkg/v3/tlsutil go.etcd.io/etcd/client/pkg/v3/transport go.etcd.io/etcd/client/pkg/v3/types -# go.etcd.io/etcd/client/v3 v3.5.10 +# go.etcd.io/etcd/client/v3 v3.5.12 ## explicit; go 1.20 go.etcd.io/etcd/client/v3 go.etcd.io/etcd/client/v3/credentials @@ -1322,140 +1428,161 @@ go.mongodb.org/mongo-driver/x/bsonx/bsoncore ## explicit; go 1.13 go.opencensus.io go.opencensus.io/internal -go.opencensus.io/internal/tagencoding go.opencensus.io/metric/metricdata go.opencensus.io/metric/metricproducer -go.opencensus.io/plugin/ocgrpc -go.opencensus.io/plugin/ochttp -go.opencensus.io/plugin/ochttp/propagation/b3 go.opencensus.io/resource go.opencensus.io/resource/resourcekeys -go.opencensus.io/stats -go.opencensus.io/stats/internal -go.opencensus.io/stats/view -go.opencensus.io/tag go.opencensus.io/trace go.opencensus.io/trace/internal -go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate -# go.opentelemetry.io/collector v0.102.1 -## explicit; go 1.21.0 -go.opentelemetry.io/collector/client -go.opentelemetry.io/collector/internal/fanoutconsumer +# go.opentelemetry.io/auto/sdk v1.1.0 +## explicit; go 1.22.0 +go.opentelemetry.io/auto/sdk +go.opentelemetry.io/auto/sdk/internal/telemetry +# go.opentelemetry.io/collector v0.118.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/internal/httphelper -go.opentelemetry.io/collector/internal/localhostgate -go.opentelemetry.io/collector/internal/obsreportconfig -go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics -go.opentelemetry.io/collector/internal/sharedcomponent -# go.opentelemetry.io/collector/component v0.102.1 -## explicit; go 1.21.0 +# go.opentelemetry.io/collector/client v1.24.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/client +# go.opentelemetry.io/collector/component v0.118.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/component +# go.opentelemetry.io/collector/component/componentstatus v0.118.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/component/componentstatus +# go.opentelemetry.io/collector/component/componenttest v0.118.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/component/componenttest -# go.opentelemetry.io/collector/config/configauth v0.102.1 -## explicit; go 1.21.0 +# go.opentelemetry.io/collector/config/configauth v0.118.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/config/configauth -# go.opentelemetry.io/collector/config/configcompression v1.9.0 -## explicit; go 1.21.0 +# go.opentelemetry.io/collector/config/configcompression v1.24.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/config/configcompression -# go.opentelemetry.io/collector/config/configgrpc v0.102.1 -## explicit; go 1.21.0 +# go.opentelemetry.io/collector/config/configgrpc v0.118.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/config/configgrpc -go.opentelemetry.io/collector/config/configgrpc/internal -# go.opentelemetry.io/collector/config/confighttp v0.102.1 -## explicit; go 1.21.0 +# go.opentelemetry.io/collector/config/confighttp v0.118.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/config/confighttp -# go.opentelemetry.io/collector/config/confignet v0.102.1 -## explicit; go 1.21.0 +go.opentelemetry.io/collector/config/confighttp/internal +# go.opentelemetry.io/collector/config/confignet v1.24.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/config/confignet -# go.opentelemetry.io/collector/config/configopaque v1.18.0 +# go.opentelemetry.io/collector/config/configopaque v1.24.0 ## explicit; go 1.22.0 go.opentelemetry.io/collector/config/configopaque -# go.opentelemetry.io/collector/config/configretry v0.102.1 -## explicit; go 1.21.0 +# go.opentelemetry.io/collector/config/configretry v1.24.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/config/configretry -# go.opentelemetry.io/collector/config/configtelemetry v0.102.1 -## explicit; go 1.21.0 +# go.opentelemetry.io/collector/config/configtelemetry v0.118.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/config/configtelemetry -# go.opentelemetry.io/collector/config/configtls v1.18.0 +# go.opentelemetry.io/collector/config/configtls v1.24.0 ## explicit; go 1.22.0 go.opentelemetry.io/collector/config/configtls -# go.opentelemetry.io/collector/config/internal v0.102.1 -## explicit; go 1.21.0 -go.opentelemetry.io/collector/config/internal -# go.opentelemetry.io/collector/confmap v0.102.1 -## explicit; go 1.21.0 +# go.opentelemetry.io/collector/confmap v1.24.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/confmap -go.opentelemetry.io/collector/confmap/internal/envvar go.opentelemetry.io/collector/confmap/internal/mapstructure -go.opentelemetry.io/collector/confmap/provider/internal -go.opentelemetry.io/collector/confmap/provider/internal/configurablehttpprovider -# go.opentelemetry.io/collector/confmap/converter/expandconverter v0.102.1 -## explicit; go 1.21.0 -go.opentelemetry.io/collector/confmap/converter/expandconverter -# go.opentelemetry.io/collector/confmap/provider/envprovider v0.102.1 -## explicit; go 1.21.0 -go.opentelemetry.io/collector/confmap/provider/envprovider -# go.opentelemetry.io/collector/confmap/provider/fileprovider v0.102.1 -## explicit; go 1.21.0 -go.opentelemetry.io/collector/confmap/provider/fileprovider -# go.opentelemetry.io/collector/confmap/provider/httpprovider v0.102.1 -## explicit; go 1.21.0 -go.opentelemetry.io/collector/confmap/provider/httpprovider -# go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.102.1 -## explicit; go 1.21.0 -go.opentelemetry.io/collector/confmap/provider/httpsprovider -# go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.102.1 -## explicit; go 1.21.0 -go.opentelemetry.io/collector/confmap/provider/yamlprovider -# go.opentelemetry.io/collector/connector v0.102.1 -## explicit; go 1.21.0 +# go.opentelemetry.io/collector/connector v0.118.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/connector -# go.opentelemetry.io/collector/consumer v0.102.1 -## explicit; go 1.21.0 +go.opentelemetry.io/collector/connector/internal +# go.opentelemetry.io/collector/connector/connectortest v0.118.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/connector/connectortest +# go.opentelemetry.io/collector/connector/xconnector v0.118.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/connector/xconnector +# go.opentelemetry.io/collector/consumer v1.24.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/consumer +go.opentelemetry.io/collector/consumer/internal +# go.opentelemetry.io/collector/consumer/consumererror v0.118.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/consumer/consumererror +go.opentelemetry.io/collector/consumer/consumererror/internal +# go.opentelemetry.io/collector/consumer/consumererror/xconsumererror v0.118.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/consumer/consumererror/xconsumererror +# go.opentelemetry.io/collector/consumer/consumertest v0.118.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/consumer/consumertest -# go.opentelemetry.io/collector/exporter v0.102.1 -## explicit; go 1.21.0 +# go.opentelemetry.io/collector/consumer/xconsumer v0.118.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/consumer/xconsumer +# go.opentelemetry.io/collector/exporter v0.118.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/exporter go.opentelemetry.io/collector/exporter/exporterbatcher go.opentelemetry.io/collector/exporter/exporterhelper +go.opentelemetry.io/collector/exporter/exporterhelper/internal go.opentelemetry.io/collector/exporter/exporterhelper/internal/metadata go.opentelemetry.io/collector/exporter/exporterqueue -go.opentelemetry.io/collector/exporter/exportertest +go.opentelemetry.io/collector/exporter/internal go.opentelemetry.io/collector/exporter/internal/experr go.opentelemetry.io/collector/exporter/internal/queue -# go.opentelemetry.io/collector/exporter/otlpexporter v0.102.1 -## explicit; go 1.21.0 +go.opentelemetry.io/collector/exporter/internal/requesttest +# go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper v0.118.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper +# go.opentelemetry.io/collector/exporter/exportertest v0.118.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/exporter/exportertest +# go.opentelemetry.io/collector/exporter/otlpexporter v0.118.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/exporter/otlpexporter go.opentelemetry.io/collector/exporter/otlpexporter/internal/metadata -# go.opentelemetry.io/collector/extension v0.102.1 -## explicit; go 1.21.0 +# go.opentelemetry.io/collector/exporter/otlphttpexporter v0.118.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/exporter/otlphttpexporter +go.opentelemetry.io/collector/exporter/otlphttpexporter/internal/metadata +# go.opentelemetry.io/collector/exporter/xexporter v0.118.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/exporter/xexporter +# go.opentelemetry.io/collector/extension v0.118.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/extension -go.opentelemetry.io/collector/extension/experimental/storage -# go.opentelemetry.io/collector/extension/auth v0.102.1 -## explicit; go 1.21.0 +# go.opentelemetry.io/collector/extension/auth v0.118.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/extension/auth -# go.opentelemetry.io/collector/featuregate v1.9.0 -## explicit; go 1.21.0 +# go.opentelemetry.io/collector/extension/extensioncapabilities v0.118.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/extension/extensioncapabilities +# go.opentelemetry.io/collector/extension/extensiontest v0.118.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/extension/extensiontest +# go.opentelemetry.io/collector/extension/xextension v0.118.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/extension/xextension/storage +# go.opentelemetry.io/collector/featuregate v1.24.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/featuregate -# go.opentelemetry.io/collector/otelcol v0.102.1 -## explicit; go 1.21.0 +# go.opentelemetry.io/collector/internal/fanoutconsumer v0.118.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/internal/fanoutconsumer +# go.opentelemetry.io/collector/internal/sharedcomponent v0.118.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/internal/sharedcomponent +# go.opentelemetry.io/collector/otelcol v0.118.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/otelcol go.opentelemetry.io/collector/otelcol/internal/configunmarshaler go.opentelemetry.io/collector/otelcol/internal/grpclog -# go.opentelemetry.io/collector/pdata v1.12.0 -## explicit; go 1.21.0 +# go.opentelemetry.io/collector/pdata v1.24.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/pdata/internal go.opentelemetry.io/collector/pdata/internal/data go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1 go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1 -go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1experimental +go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1 go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1 go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1 go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1 -go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1experimental +go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1 go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1 go.opentelemetry.io/collector/pdata/internal/json @@ -1467,70 +1594,107 @@ go.opentelemetry.io/collector/pdata/pmetric go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp go.opentelemetry.io/collector/pdata/ptrace go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp -# go.opentelemetry.io/collector/processor v0.102.1 -## explicit; go 1.21.0 +# go.opentelemetry.io/collector/pdata/pprofile v0.118.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/pdata/pprofile +go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp +# go.opentelemetry.io/collector/pdata/testdata v0.118.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/pdata/testdata +# go.opentelemetry.io/collector/pipeline v0.118.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/pipeline +go.opentelemetry.io/collector/pipeline/internal/globalsignal +# go.opentelemetry.io/collector/pipeline/xpipeline v0.118.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/pipeline/xpipeline +# go.opentelemetry.io/collector/processor v0.118.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/processor +go.opentelemetry.io/collector/processor/internal go.opentelemetry.io/collector/processor/processorhelper go.opentelemetry.io/collector/processor/processorhelper/internal/metadata -# go.opentelemetry.io/collector/receiver v0.102.1 -## explicit; go 1.21.0 +# go.opentelemetry.io/collector/processor/processortest v0.118.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/processor/processortest +# go.opentelemetry.io/collector/processor/xprocessor v0.118.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/processor/xprocessor +# go.opentelemetry.io/collector/receiver v0.118.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/receiver go.opentelemetry.io/collector/receiver/receiverhelper +go.opentelemetry.io/collector/receiver/receiverhelper/internal go.opentelemetry.io/collector/receiver/receiverhelper/internal/metadata -go.opentelemetry.io/collector/receiver/receivertest -# go.opentelemetry.io/collector/receiver/otlpreceiver v0.102.1 -## explicit; go 1.21.0 +# go.opentelemetry.io/collector/receiver/otlpreceiver v0.118.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/receiver/otlpreceiver go.opentelemetry.io/collector/receiver/otlpreceiver/internal/errors go.opentelemetry.io/collector/receiver/otlpreceiver/internal/logs go.opentelemetry.io/collector/receiver/otlpreceiver/internal/metadata go.opentelemetry.io/collector/receiver/otlpreceiver/internal/metrics +go.opentelemetry.io/collector/receiver/otlpreceiver/internal/profiles go.opentelemetry.io/collector/receiver/otlpreceiver/internal/trace -# go.opentelemetry.io/collector/semconv v0.105.0 -## explicit; go 1.21.0 +# go.opentelemetry.io/collector/receiver/receivertest v0.118.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/receiver/receivertest +# go.opentelemetry.io/collector/receiver/xreceiver v0.118.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/receiver/xreceiver +# go.opentelemetry.io/collector/semconv v0.118.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/semconv/v1.12.0 go.opentelemetry.io/collector/semconv/v1.13.0 +go.opentelemetry.io/collector/semconv/v1.16.0 go.opentelemetry.io/collector/semconv/v1.18.0 +go.opentelemetry.io/collector/semconv/v1.25.0 +go.opentelemetry.io/collector/semconv/v1.26.0 +go.opentelemetry.io/collector/semconv/v1.27.0 go.opentelemetry.io/collector/semconv/v1.6.1 go.opentelemetry.io/collector/semconv/v1.9.0 -# go.opentelemetry.io/collector/service v0.102.1 -## explicit; go 1.21.0 +# go.opentelemetry.io/collector/service v0.118.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/service go.opentelemetry.io/collector/service/extensions +go.opentelemetry.io/collector/service/internal/builders go.opentelemetry.io/collector/service/internal/capabilityconsumer go.opentelemetry.io/collector/service/internal/components go.opentelemetry.io/collector/service/internal/graph +go.opentelemetry.io/collector/service/internal/metadata go.opentelemetry.io/collector/service/internal/proctelemetry go.opentelemetry.io/collector/service/internal/resource -go.opentelemetry.io/collector/service/internal/servicetelemetry go.opentelemetry.io/collector/service/internal/status go.opentelemetry.io/collector/service/internal/zpages go.opentelemetry.io/collector/service/pipelines go.opentelemetry.io/collector/service/telemetry -go.opentelemetry.io/collector/service/telemetry/internal -# go.opentelemetry.io/contrib/bridges/prometheus v0.53.0 -## explicit; go 1.21 +go.opentelemetry.io/collector/service/telemetry/internal/otelinit +# go.opentelemetry.io/contrib/bridges/otelzap v0.8.0 +## explicit; go 1.22.0 +go.opentelemetry.io/contrib/bridges/otelzap +# go.opentelemetry.io/contrib/bridges/prometheus v0.59.0 +## explicit; go 1.22.0 go.opentelemetry.io/contrib/bridges/prometheus -# go.opentelemetry.io/contrib/config v0.7.0 -## explicit; go 1.21 +# go.opentelemetry.io/contrib/config v0.10.0 +## explicit; go 1.22 go.opentelemetry.io/contrib/config -# go.opentelemetry.io/contrib/exporters/autoexport v0.53.0 -## explicit; go 1.21 +# go.opentelemetry.io/contrib/exporters/autoexport v0.59.0 +## explicit; go 1.22.0 go.opentelemetry.io/contrib/exporters/autoexport -# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 -## explicit; go 1.21 +# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0 +## explicit; go 1.22.0 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal -# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 -## explicit; go 1.22 +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 +## explicit; go 1.22.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil -# go.opentelemetry.io/contrib/propagators/b3 v1.27.0 -## explicit; go 1.21 +# go.opentelemetry.io/contrib/propagators/b3 v1.33.0 +## explicit; go 1.22.0 go.opentelemetry.io/contrib/propagators/b3 -# go.opentelemetry.io/otel v1.31.0 -## explicit; go 1.22 +# go.opentelemetry.io/otel v1.34.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute go.opentelemetry.io/otel/baggage @@ -1541,19 +1705,20 @@ go.opentelemetry.io/otel/internal/baggage go.opentelemetry.io/otel/internal/global go.opentelemetry.io/otel/propagation go.opentelemetry.io/otel/semconv/v1.17.0 +go.opentelemetry.io/otel/semconv/v1.18.0 go.opentelemetry.io/otel/semconv/v1.20.0 go.opentelemetry.io/otel/semconv/v1.21.0 go.opentelemetry.io/otel/semconv/v1.25.0 go.opentelemetry.io/otel/semconv/v1.26.0 -# go.opentelemetry.io/otel/bridge/opencensus v1.27.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/bridge/opencensus v1.34.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/bridge/opencensus go.opentelemetry.io/otel/bridge/opencensus/internal go.opentelemetry.io/otel/bridge/opencensus/internal/oc2otel go.opentelemetry.io/otel/bridge/opencensus/internal/ocmetric go.opentelemetry.io/otel/bridge/opencensus/internal/otel2oc -# go.opentelemetry.io/otel/bridge/opentracing v1.26.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/bridge/opentracing v1.34.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/bridge/opentracing go.opentelemetry.io/otel/bridge/opentracing/migration # go.opentelemetry.io/otel/exporters/jaeger v1.17.0 @@ -1563,69 +1728,76 @@ go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift -# go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.4.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.10.0 +## explicit; go 1.22.0 +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform +# go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.10.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/retry go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/transform -# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.34.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform -# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.28.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.34.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform -# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.34.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry -# go.opentelemetry.io/otel/exporters/prometheus v0.50.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/exporters/prometheus v0.56.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/exporters/prometheus -# go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.4.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.10.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/exporters/stdout/stdoutlog -# go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.28.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.34.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/exporters/stdout/stdoutmetric -# go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.34.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/exporters/stdout/stdouttrace -# go.opentelemetry.io/otel/log v0.4.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/log v0.10.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/log go.opentelemetry.io/otel/log/embedded +go.opentelemetry.io/otel/log/global +go.opentelemetry.io/otel/log/internal/global go.opentelemetry.io/otel/log/noop -# go.opentelemetry.io/otel/metric v1.31.0 -## explicit; go 1.22 +# go.opentelemetry.io/otel/metric v1.34.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/noop -# go.opentelemetry.io/otel/sdk v1.31.0 -## explicit; go 1.22 +# go.opentelemetry.io/otel/sdk v1.34.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/sdk go.opentelemetry.io/otel/sdk/instrumentation go.opentelemetry.io/otel/sdk/internal/env @@ -1633,24 +1805,25 @@ go.opentelemetry.io/otel/sdk/internal/x go.opentelemetry.io/otel/sdk/resource go.opentelemetry.io/otel/sdk/trace go.opentelemetry.io/otel/sdk/trace/tracetest -# go.opentelemetry.io/otel/sdk/log v0.4.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/sdk/log v0.10.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/sdk/log -# go.opentelemetry.io/otel/sdk/metric v1.28.0 -## explicit; go 1.21 +go.opentelemetry.io/otel/sdk/log/internal/x +# go.opentelemetry.io/otel/sdk/metric v1.34.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/sdk/metric +go.opentelemetry.io/otel/sdk/metric/exemplar go.opentelemetry.io/otel/sdk/metric/internal go.opentelemetry.io/otel/sdk/metric/internal/aggregate -go.opentelemetry.io/otel/sdk/metric/internal/exemplar go.opentelemetry.io/otel/sdk/metric/internal/x go.opentelemetry.io/otel/sdk/metric/metricdata -# go.opentelemetry.io/otel/trace v1.31.0 -## explicit; go 1.22 +# go.opentelemetry.io/otel/trace v1.34.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded go.opentelemetry.io/otel/trace/noop -# go.opentelemetry.io/proto/otlp v1.3.1 -## explicit; go 1.17 +# go.opentelemetry.io/proto/otlp v1.5.0 +## explicit; go 1.22.0 go.opentelemetry.io/proto/otlp/collector/logs/v1 go.opentelemetry.io/proto/otlp/collector/metrics/v1 go.opentelemetry.io/proto/otlp/collector/trace/v1 @@ -1681,7 +1854,7 @@ go.uber.org/zap/internal/pool go.uber.org/zap/internal/stacktrace go.uber.org/zap/zapcore go.uber.org/zap/zapgrpc -# golang.org/x/crypto v0.29.0 +# golang.org/x/crypto v0.32.0 ## explicit; go 1.20 golang.org/x/crypto/argon2 golang.org/x/crypto/bcrypt @@ -1698,20 +1871,24 @@ golang.org/x/crypto/md4 golang.org/x/crypto/pbkdf2 golang.org/x/crypto/pkcs12 golang.org/x/crypto/pkcs12/internal/rc2 -# golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 -## explicit; go 1.20 +# golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f +## explicit; go 1.22.0 golang.org/x/exp/constraints golang.org/x/exp/maps golang.org/x/exp/slices golang.org/x/exp/slog golang.org/x/exp/slog/internal golang.org/x/exp/slog/internal/buffer -# golang.org/x/mod v0.19.0 -## explicit; go 1.18 +# golang.org/x/mod v0.22.0 +## explicit; go 1.22.0 golang.org/x/mod/semver -# golang.org/x/net v0.31.0 +# golang.org/x/net v0.34.0 ## explicit; go 1.18 golang.org/x/net/bpf +golang.org/x/net/context +golang.org/x/net/html +golang.org/x/net/html/atom +golang.org/x/net/html/charset golang.org/x/net/http/httpguts golang.org/x/net/http/httpproxy golang.org/x/net/http2 @@ -1728,7 +1905,7 @@ golang.org/x/net/netutil golang.org/x/net/proxy golang.org/x/net/publicsuffix golang.org/x/net/trace -# golang.org/x/oauth2 v0.21.0 +# golang.org/x/oauth2 v0.25.0 ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/authhandler @@ -1741,11 +1918,11 @@ golang.org/x/oauth2/google/internal/stsexchange golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sync v0.9.0 +# golang.org/x/sync v0.10.0 ## explicit; go 1.18 golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.27.0 +# golang.org/x/sys v0.29.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/unix @@ -1753,10 +1930,11 @@ golang.org/x/sys/windows golang.org/x/sys/windows/registry golang.org/x/sys/windows/svc golang.org/x/sys/windows/svc/eventlog -# golang.org/x/text v0.20.0 +# golang.org/x/text v0.21.0 ## explicit; go 1.18 golang.org/x/text/encoding golang.org/x/text/encoding/charmap +golang.org/x/text/encoding/htmlindex golang.org/x/text/encoding/ianaindex golang.org/x/text/encoding/internal golang.org/x/text/encoding/internal/identifier @@ -1765,20 +1943,26 @@ golang.org/x/text/encoding/korean golang.org/x/text/encoding/simplifiedchinese golang.org/x/text/encoding/traditionalchinese golang.org/x/text/encoding/unicode +golang.org/x/text/internal/language +golang.org/x/text/internal/language/compact +golang.org/x/text/internal/tag golang.org/x/text/internal/utf8internal +golang.org/x/text/language golang.org/x/text/runes golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/time v0.5.0 +golang.org/x/text/width +# golang.org/x/time v0.9.0 ## explicit; go 1.18 golang.org/x/time/rate -# golang.org/x/tools v0.23.0 -## explicit; go 1.19 +# golang.org/x/tools v0.27.0 +## explicit; go 1.22.0 golang.org/x/tools/go/gcexportdata golang.org/x/tools/go/packages golang.org/x/tools/go/types/objectpath +golang.org/x/tools/go/types/typeutil golang.org/x/tools/internal/aliases golang.org/x/tools/internal/event golang.org/x/tools/internal/event/core @@ -1789,11 +1973,11 @@ golang.org/x/tools/internal/gocommand golang.org/x/tools/internal/packagesinternal golang.org/x/tools/internal/pkgbits golang.org/x/tools/internal/stdlib -golang.org/x/tools/internal/tokeninternal +golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typesinternal golang.org/x/tools/internal/versions -# gonum.org/v1/gonum v0.15.0 -## explicit; go 1.21 +# gonum.org/v1/gonum v0.15.1 +## explicit; go 1.22 gonum.org/v1/gonum/blas gonum.org/v1/gonum/blas/blas64 gonum.org/v1/gonum/blas/cblas128 @@ -1802,7 +1986,6 @@ gonum.org/v1/gonum/floats gonum.org/v1/gonum/floats/scalar gonum.org/v1/gonum/graph gonum.org/v1/gonum/graph/internal/linear -gonum.org/v1/gonum/graph/internal/ordered gonum.org/v1/gonum/graph/internal/set gonum.org/v1/gonum/graph/iterator gonum.org/v1/gonum/graph/set/uid @@ -1815,17 +1998,16 @@ gonum.org/v1/gonum/internal/asm/f32 gonum.org/v1/gonum/internal/asm/f64 gonum.org/v1/gonum/internal/cmplx64 gonum.org/v1/gonum/internal/math32 +gonum.org/v1/gonum/internal/order gonum.org/v1/gonum/lapack gonum.org/v1/gonum/lapack/gonum gonum.org/v1/gonum/lapack/lapack64 gonum.org/v1/gonum/mat -# google.golang.org/api v0.188.0 -## explicit; go 1.20 +# google.golang.org/api v0.218.0 +## explicit; go 1.22 google.golang.org/api/googleapi google.golang.org/api/googleapi/transport google.golang.org/api/iamcredentials/v1 -google.golang.org/api/idtoken -google.golang.org/api/impersonate google.golang.org/api/internal google.golang.org/api/internal/cert google.golang.org/api/internal/gensupport @@ -1838,23 +2020,22 @@ google.golang.org/api/storage/v1 google.golang.org/api/transport google.golang.org/api/transport/grpc google.golang.org/api/transport/http -google.golang.org/api/transport/http/internal/propagation # google.golang.org/genproto v0.0.0-20240708141625-4ad9e859172b ## explicit; go 1.20 google.golang.org/genproto/googleapis/type/date google.golang.org/genproto/googleapis/type/expr -# google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d -## explicit; go 1.20 +# google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f +## explicit; go 1.22 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20240711142825-46eb208f015d -## explicit; go 1.20 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f +## explicit; go 1.22 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.65.0 -## explicit; go 1.21 +# google.golang.org/grpc v1.69.4 +## explicit; go 1.22 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff @@ -1864,6 +2045,8 @@ google.golang.org/grpc/balancer/grpclb google.golang.org/grpc/balancer/grpclb/grpc_lb_v1 google.golang.org/grpc/balancer/grpclb/state google.golang.org/grpc/balancer/pickfirst +google.golang.org/grpc/balancer/pickfirst/internal +google.golang.org/grpc/balancer/pickfirst/pickfirstleaf google.golang.org/grpc/balancer/roundrobin google.golang.org/grpc/binarylog/grpc_binarylog_v1 google.golang.org/grpc/channelz @@ -1883,7 +2066,9 @@ google.golang.org/grpc/credentials/oauth google.golang.org/grpc/encoding google.golang.org/grpc/encoding/gzip google.golang.org/grpc/encoding/proto +google.golang.org/grpc/experimental/stats google.golang.org/grpc/grpclog +google.golang.org/grpc/grpclog/internal google.golang.org/grpc/health google.golang.org/grpc/health/grpc_health_v1 google.golang.org/grpc/internal @@ -1908,18 +2093,16 @@ google.golang.org/grpc/internal/resolver/dns/internal google.golang.org/grpc/internal/resolver/passthrough google.golang.org/grpc/internal/resolver/unix google.golang.org/grpc/internal/serviceconfig +google.golang.org/grpc/internal/stats google.golang.org/grpc/internal/status google.golang.org/grpc/internal/syscall google.golang.org/grpc/internal/transport google.golang.org/grpc/internal/transport/networktype google.golang.org/grpc/internal/xds google.golang.org/grpc/keepalive +google.golang.org/grpc/mem google.golang.org/grpc/metadata google.golang.org/grpc/peer -google.golang.org/grpc/reflection -google.golang.org/grpc/reflection/grpc_reflection_v1 -google.golang.org/grpc/reflection/grpc_reflection_v1alpha -google.golang.org/grpc/reflection/internal google.golang.org/grpc/resolver google.golang.org/grpc/resolver/dns google.golang.org/grpc/resolver/manual @@ -1928,8 +2111,8 @@ google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap google.golang.org/grpc/test/bufconn -# google.golang.org/protobuf v1.34.2 -## explicit; go 1.20 +# google.golang.org/protobuf v1.36.3 +## explicit; go 1.21 google.golang.org/protobuf/encoding/protodelim google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext @@ -1952,6 +2135,7 @@ google.golang.org/protobuf/internal/genid google.golang.org/protobuf/internal/impl google.golang.org/protobuf/internal/order google.golang.org/protobuf/internal/pragma +google.golang.org/protobuf/internal/protolazy google.golang.org/protobuf/internal/set google.golang.org/protobuf/internal/strs google.golang.org/protobuf/internal/version